Use locals instead of struct fields; NFC

This is one of a series of changes intended to make
https://reviews.llvm.org/D44748 more easily reviewable. Please see that
patch for more context.

Since I was requested to do all of this with post-commit review, this is
about as small as I can make it (beyond committing changes to these few
files separately, but they're incredibly similar in spirit, so...)

On its own, this change doesn't make a great deal of sense. I plan on
having a follow-up Real Soon Now(TM) to make the bits here make more
sense. :)

In particular, the next change in this series is meant to make
LocationSize an actual type, which you have to call .getValue() on in
order to get at the uint64_t inside. Hence, this change refactors code
so that:
- we only need to call the soon-to-come getValue() once in most cases,
  and
- said call to getValue() happens very closely to a piece of code that
  checks if the LocationSize has a value (e.g. if it's != UnknownSize).

llvm-svn: 344012
This commit is contained in:
George Burgess IV 2018-10-09 02:14:33 +00:00
parent 40a64c4c08
commit fefc42c9bb
3 changed files with 53 additions and 38 deletions

View File

@ -1001,9 +1001,9 @@ ModRefInfo BasicAAResult::getModRefInfo(ImmutableCallSite CS1,
/// Provide ad-hoc rules to disambiguate accesses through two GEP operators,
/// both having the exact same pointer operand.
static AliasResult aliasSameBasePointerGEPs(const GEPOperator *GEP1,
LocationSize V1Size,
LocationSize MaybeV1Size,
const GEPOperator *GEP2,
LocationSize V2Size,
LocationSize MaybeV2Size,
const DataLayout &DL) {
assert(GEP1->getPointerOperand()->stripPointerCastsAndInvariantGroups() ==
GEP2->getPointerOperand()->stripPointerCastsAndInvariantGroups() &&
@ -1019,10 +1019,13 @@ static AliasResult aliasSameBasePointerGEPs(const GEPOperator *GEP1,
// If we don't know the size of the accesses through both GEPs, we can't
// determine whether the struct fields accessed can't alias.
if (V1Size == MemoryLocation::UnknownSize ||
V2Size == MemoryLocation::UnknownSize)
if (MaybeV1Size == MemoryLocation::UnknownSize ||
MaybeV2Size == MemoryLocation::UnknownSize)
return MayAlias;
const uint64_t V1Size = MaybeV1Size;
const uint64_t V2Size = MaybeV2Size;
ConstantInt *C1 =
dyn_cast<ConstantInt>(GEP1->getOperand(GEP1->getNumOperands() - 1));
ConstantInt *C2 =
@ -1179,11 +1182,14 @@ static AliasResult aliasSameBasePointerGEPs(const GEPOperator *GEP1,
// than (%alloca - 1), and so is not inbounds, a contradiction.
bool BasicAAResult::isGEPBaseAtNegativeOffset(const GEPOperator *GEPOp,
const DecomposedGEP &DecompGEP, const DecomposedGEP &DecompObject,
LocationSize ObjectAccessSize) {
LocationSize MaybeObjectAccessSize) {
// If the object access size is unknown, or the GEP isn't inbounds, bail.
if (ObjectAccessSize == MemoryLocation::UnknownSize || !GEPOp->isInBounds())
if (MaybeObjectAccessSize == MemoryLocation::UnknownSize ||
!GEPOp->isInBounds())
return false;
const uint64_t ObjectAccessSize = MaybeObjectAccessSize;
// We need the object to be an alloca or a globalvariable, and want to know
// the offset of the pointer from the object precisely, so no variable
// indices are allowed.
@ -1418,7 +1424,9 @@ BasicAAResult::aliasGEP(const GEPOperator *GEP1, LocationSize V1Size,
// If we know all the variables are positive, then GEP1 >= GEP1BasePtr.
// If GEP1BasePtr > V2 (GEP1BaseOffset > 0) then we know the pointers
// don't alias if V2Size can fit in the gap between V2 and GEP1BasePtr.
if (AllPositive && GEP1BaseOffset > 0 && V2Size <= (uint64_t)GEP1BaseOffset)
if (AllPositive && GEP1BaseOffset > 0 &&
V2Size != MemoryLocation::UnknownSize &&
V2Size <= (uint64_t)GEP1BaseOffset)
return NoAlias;
if (constantOffsetHeuristic(DecompGEP1.VarIndices, V1Size, V2Size,
@ -1853,13 +1861,16 @@ void BasicAAResult::GetIndexDifference(
}
bool BasicAAResult::constantOffsetHeuristic(
const SmallVectorImpl<VariableGEPIndex> &VarIndices, LocationSize V1Size,
LocationSize V2Size, int64_t BaseOffset, AssumptionCache *AC,
DominatorTree *DT) {
if (VarIndices.size() != 2 || V1Size == MemoryLocation::UnknownSize ||
V2Size == MemoryLocation::UnknownSize)
const SmallVectorImpl<VariableGEPIndex> &VarIndices,
LocationSize MaybeV1Size, LocationSize MaybeV2Size, int64_t BaseOffset,
AssumptionCache *AC, DominatorTree *DT) {
if (VarIndices.size() != 2 || MaybeV1Size == MemoryLocation::UnknownSize ||
MaybeV2Size == MemoryLocation::UnknownSize)
return false;
const uint64_t V1Size = MaybeV1Size;
const uint64_t V2Size = MaybeV2Size;
const VariableGEPIndex &Var0 = VarIndices[0], &Var1 = VarIndices[1];
if (Var0.ZExtBits != Var1.ZExtBits || Var0.SExtBits != Var1.SExtBits ||

View File

@ -515,10 +515,9 @@ CFLAndersAAResult::FunctionInfo::getAttrs(const Value *V) const {
return None;
}
bool CFLAndersAAResult::FunctionInfo::mayAlias(const Value *LHS,
LocationSize LHSSize,
const Value *RHS,
LocationSize RHSSize) const {
bool CFLAndersAAResult::FunctionInfo::mayAlias(
const Value *LHS, LocationSize MaybeLHSSize, const Value *RHS,
LocationSize MaybeRHSSize) const {
assert(LHS && RHS);
// Check if we've seen LHS and RHS before. Sometimes LHS or RHS can be created
@ -558,10 +557,13 @@ bool CFLAndersAAResult::FunctionInfo::mayAlias(const Value *LHS,
if (RangePair.first != RangePair.second) {
// Be conservative about UnknownSize
if (LHSSize == MemoryLocation::UnknownSize ||
RHSSize == MemoryLocation::UnknownSize)
if (MaybeLHSSize == MemoryLocation::UnknownSize ||
MaybeRHSSize == MemoryLocation::UnknownSize)
return true;
const uint64_t LHSSize = MaybeLHSSize;
const uint64_t RHSSize = MaybeRHSSize;
for (const auto &OVal : make_range(RangePair)) {
// Be conservative about UnknownOffset
if (OVal.Offset == UnknownOffset)

View File

@ -354,6 +354,9 @@ static OverwriteResult isOverwrite(const MemoryLocation &Later,
Earlier.Size == MemoryLocation::UnknownSize)
return OW_Unknown;
const uint64_t LaterSize = Later.Size;
const uint64_t EarlierSize = Earlier.Size;
const Value *P1 = Earlier.Ptr->stripPointerCasts();
const Value *P2 = Later.Ptr->stripPointerCasts();
@ -361,7 +364,7 @@ static OverwriteResult isOverwrite(const MemoryLocation &Later,
// the later store was larger than the earlier store.
if (P1 == P2 || AA.isMustAlias(P1, P2)) {
// Make sure that the Later size is >= the Earlier size.
if (Later.Size >= Earlier.Size)
if (LaterSize >= EarlierSize)
return OW_Complete;
}
@ -379,7 +382,7 @@ static OverwriteResult isOverwrite(const MemoryLocation &Later,
// If the "Later" store is to a recognizable object, get its size.
uint64_t ObjectSize = getPointerSize(UO2, DL, TLI, F);
if (ObjectSize != MemoryLocation::UnknownSize)
if (ObjectSize == Later.Size && ObjectSize >= Earlier.Size)
if (ObjectSize == LaterSize && ObjectSize >= EarlierSize)
return OW_Complete;
// Okay, we have stores to two completely different pointers. Try to
@ -410,8 +413,8 @@ static OverwriteResult isOverwrite(const MemoryLocation &Later,
//
// We have to be careful here as *Off is signed while *.Size is unsigned.
if (EarlierOff >= LaterOff &&
Later.Size >= Earlier.Size &&
uint64_t(EarlierOff - LaterOff) + Earlier.Size <= Later.Size)
LaterSize >= EarlierSize &&
uint64_t(EarlierOff - LaterOff) + EarlierSize <= LaterSize)
return OW_Complete;
// We may now overlap, although the overlap is not complete. There might also
@ -420,21 +423,21 @@ static OverwriteResult isOverwrite(const MemoryLocation &Later,
// Note: The correctness of this logic depends on the fact that this function
// is not even called providing DepWrite when there are any intervening reads.
if (EnablePartialOverwriteTracking &&
LaterOff < int64_t(EarlierOff + Earlier.Size) &&
int64_t(LaterOff + Later.Size) >= EarlierOff) {
LaterOff < int64_t(EarlierOff + EarlierSize) &&
int64_t(LaterOff + LaterSize) >= EarlierOff) {
// Insert our part of the overlap into the map.
auto &IM = IOL[DepWrite];
LLVM_DEBUG(dbgs() << "DSE: Partial overwrite: Earlier [" << EarlierOff
<< ", " << int64_t(EarlierOff + Earlier.Size)
<< ", " << int64_t(EarlierOff + EarlierSize)
<< ") Later [" << LaterOff << ", "
<< int64_t(LaterOff + Later.Size) << ")\n");
<< int64_t(LaterOff + LaterSize) << ")\n");
// Make sure that we only insert non-overlapping intervals and combine
// adjacent intervals. The intervals are stored in the map with the ending
// offset as the key (in the half-open sense) and the starting offset as
// the value.
int64_t LaterIntStart = LaterOff, LaterIntEnd = LaterOff + Later.Size;
int64_t LaterIntStart = LaterOff, LaterIntEnd = LaterOff + LaterSize;
// Find any intervals ending at, or after, LaterIntStart which start
// before LaterIntEnd.
@ -464,10 +467,10 @@ static OverwriteResult isOverwrite(const MemoryLocation &Later,
ILI = IM.begin();
if (ILI->second <= EarlierOff &&
ILI->first >= int64_t(EarlierOff + Earlier.Size)) {
ILI->first >= int64_t(EarlierOff + EarlierSize)) {
LLVM_DEBUG(dbgs() << "DSE: Full overwrite from partials: Earlier ["
<< EarlierOff << ", "
<< int64_t(EarlierOff + Earlier.Size)
<< int64_t(EarlierOff + EarlierSize)
<< ") Composite Later [" << ILI->second << ", "
<< ILI->first << ")\n");
++NumCompletePartials;
@ -478,13 +481,13 @@ static OverwriteResult isOverwrite(const MemoryLocation &Later,
// Check for an earlier store which writes to all the memory locations that
// the later store writes to.
if (EnablePartialStoreMerging && LaterOff >= EarlierOff &&
int64_t(EarlierOff + Earlier.Size) > LaterOff &&
uint64_t(LaterOff - EarlierOff) + Later.Size <= Earlier.Size) {
int64_t(EarlierOff + EarlierSize) > LaterOff &&
uint64_t(LaterOff - EarlierOff) + LaterSize <= EarlierSize) {
LLVM_DEBUG(dbgs() << "DSE: Partial overwrite an earlier load ["
<< EarlierOff << ", "
<< int64_t(EarlierOff + Earlier.Size)
<< int64_t(EarlierOff + EarlierSize)
<< ") by a later store [" << LaterOff << ", "
<< int64_t(LaterOff + Later.Size) << ")\n");
<< int64_t(LaterOff + LaterSize) << ")\n");
// TODO: Maybe come up with a better name?
return OW_PartialEarlierWithFullLater;
}
@ -498,8 +501,8 @@ static OverwriteResult isOverwrite(const MemoryLocation &Later,
// In this case we may want to trim the size of earlier to avoid generating
// writes to addresses which will definitely be overwritten later
if (!EnablePartialOverwriteTracking &&
(LaterOff > EarlierOff && LaterOff < int64_t(EarlierOff + Earlier.Size) &&
int64_t(LaterOff + Later.Size) >= int64_t(EarlierOff + Earlier.Size)))
(LaterOff > EarlierOff && LaterOff < int64_t(EarlierOff + EarlierSize) &&
int64_t(LaterOff + LaterSize) >= int64_t(EarlierOff + EarlierSize)))
return OW_End;
// Finally, we also need to check if the later store overwrites the beginning
@ -512,9 +515,8 @@ static OverwriteResult isOverwrite(const MemoryLocation &Later,
// of earlier to avoid generating writes to addresses which will definitely
// be overwritten later.
if (!EnablePartialOverwriteTracking &&
(LaterOff <= EarlierOff && int64_t(LaterOff + Later.Size) > EarlierOff)) {
assert(int64_t(LaterOff + Later.Size) <
int64_t(EarlierOff + Earlier.Size) &&
(LaterOff <= EarlierOff && int64_t(LaterOff + LaterSize) > EarlierOff)) {
assert(int64_t(LaterOff + LaterSize) < int64_t(EarlierOff + EarlierSize) &&
"Expect to be handled as OW_Complete");
return OW_Begin;
}