Revert "[Alignment][NFC] Add DebugStr and operator*"

This reverts commit 1e34ab98fc.
This commit is contained in:
Guillaume Chatelet 2020-04-06 07:55:25 +00:00
parent acb0b99c8e
commit 6000478f39
6 changed files with 75 additions and 98 deletions

View File

@ -395,16 +395,6 @@ inline bool operator>(MaybeAlign Lhs, Align Rhs) {
return Lhs && (*Lhs).value() > Rhs.value();
}
inline Align operator*(Align Lhs, uint64_t Rhs) {
assert(Rhs > 0 && "Rhs must be positive");
return Align(Lhs.value() * Rhs);
}
inline MaybeAlign operator*(MaybeAlign Lhs, uint64_t Rhs) {
assert(Rhs > 0 && "Rhs must be positive");
return Lhs ? Lhs.getValue() * Rhs : MaybeAlign();
}
inline Align operator/(Align Lhs, uint64_t Divisor) {
assert(llvm::isPowerOf2_64(Divisor) &&
"Divisor must be positive and a power of 2");
@ -426,19 +416,6 @@ inline Align max(Align Lhs, MaybeAlign Rhs) {
return Rhs && *Rhs > Lhs ? *Rhs : Lhs;
}
#ifndef NDEBUG
// For usage in LLVM_DEBUG macros.
inline std::string DebugStr(const Align &A) {
return "Align(" + std::to_string(A.value()) + ")";
}
// For usage in LLVM_DEBUG macros.
inline std::string DebugStr(const MaybeAlign &MA) {
if (MA)
return "MaybeAlign(" + std::to_string(MA->value()) + ")";
return "MaybeAlign(None)";
}
#endif
#undef ALIGN_CHECK_ISPOSITIVE
#undef ALIGN_CHECK_ISSET

View File

@ -41,9 +41,8 @@ static inline Align clampStackAlignment(bool ShouldClamp, Align Alignment,
Align StackAlignment) {
if (!ShouldClamp || Alignment <= StackAlignment)
return Alignment;
LLVM_DEBUG(dbgs() << "Warning: requested alignment " << DebugStr(Alignment)
<< " exceeds the stack alignment "
<< DebugStr(StackAlignment)
LLVM_DEBUG(dbgs() << "Warning: requested alignment " << Alignment.value()
<< " exceeds the stack alignment " << StackAlignment.value()
<< " when stack realignment is off" << '\n');
return StackAlignment;
}

View File

@ -9498,8 +9498,8 @@ static void tryToElideArgumentCopy(
if (MFI.getObjectAlign(FixedIndex) < RequiredAlignment) {
LLVM_DEBUG(dbgs() << " argument copy elision failed: alignment of alloca "
"greater than stack argument alignment ("
<< DebugStr(RequiredAlignment) << " vs "
<< DebugStr(MFI.getObjectAlign(FixedIndex)) << ")\n");
<< RequiredAlignment.value() << " vs "
<< MFI.getObjectAlign(FixedIndex).value() << ")\n");
return;
}

View File

@ -266,7 +266,7 @@ eliminateFrameIndex(MachineBasicBlock::iterator II, int SPAdj,
<< "spOffset : " << spOffset << "\n"
<< "stackSize : " << stackSize << "\n"
<< "alignment : "
<< DebugStr(MF.getFrameInfo().getObjectAlign(FrameIndex))
<< MF.getFrameInfo().getObjectAlign(FrameIndex).value()
<< "\n");
eliminateFI(MI, FIOperandNum, FrameIndex, stackSize, spOffset);

View File

@ -297,7 +297,9 @@ class DataFlowSanitizer : public ModulePass {
friend struct DFSanFunction;
friend class DFSanVisitor;
enum { ShadowWidthBits = 16, ShadowWidthBytes = ShadowWidthBits / 8 };
enum {
ShadowWidth = 16
};
/// Which ABI should be used for instrumented functions?
enum InstrumentedABI {
@ -575,11 +577,11 @@ bool DataFlowSanitizer::doInitialization(Module &M) {
Mod = &M;
Ctx = &M.getContext();
ShadowTy = IntegerType::get(*Ctx, ShadowWidthBits);
ShadowTy = IntegerType::get(*Ctx, ShadowWidth);
ShadowPtrTy = PointerType::getUnqual(ShadowTy);
IntptrTy = DL.getIntPtrType(*Ctx);
ZeroShadow = ConstantInt::getSigned(ShadowTy, 0);
ShadowPtrMul = ConstantInt::getSigned(IntptrTy, ShadowWidthBytes);
ShadowPtrMul = ConstantInt::getSigned(IntptrTy, ShadowWidth / 8);
if (IsX86_64)
ShadowPtrMask = ConstantInt::getSigned(IntptrTy, ~0x700000000000LL);
else if (IsMIPS64)
@ -1236,7 +1238,7 @@ Value *DFSanFunction::loadShadow(Value *Addr, uint64_t Size, uint64_t Align,
}
}
const MaybeAlign ShadowAlign(Align * DFS.ShadowWidthBytes);
const MaybeAlign ShadowAlign(Align * DFS.ShadowWidth / 8);
SmallVector<const Value *, 2> Objs;
GetUnderlyingObjects(Addr, Objs, Pos->getModule()->getDataLayout());
bool AllConstants = true;
@ -1270,7 +1272,7 @@ Value *DFSanFunction::loadShadow(Value *Addr, uint64_t Size, uint64_t Align,
IRB.CreateAlignedLoad(DFS.ShadowTy, ShadowAddr1, ShadowAlign), Pos);
}
}
if (!AvoidNewBlocks && Size % (64 / DFS.ShadowWidthBits) == 0) {
if (!AvoidNewBlocks && Size % (64 / DFS.ShadowWidth) == 0) {
// Fast path for the common case where each byte has identical shadow: load
// shadow 64 bits at a time, fall out to a __dfsan_union_load call if any
// shadow is non-equal.
@ -1282,15 +1284,15 @@ Value *DFSanFunction::loadShadow(Value *Addr, uint64_t Size, uint64_t Align,
FallbackCall->addAttribute(AttributeList::ReturnIndex, Attribute::ZExt);
// Compare each of the shadows stored in the loaded 64 bits to each other,
// by computing (WideShadow rotl ShadowWidthBits) == WideShadow.
// by computing (WideShadow rotl ShadowWidth) == WideShadow.
IRBuilder<> IRB(Pos);
Value *WideAddr =
IRB.CreateBitCast(ShadowAddr, Type::getInt64PtrTy(*DFS.Ctx));
Value *WideShadow =
IRB.CreateAlignedLoad(IRB.getInt64Ty(), WideAddr, ShadowAlign);
Value *TruncShadow = IRB.CreateTrunc(WideShadow, DFS.ShadowTy);
Value *ShlShadow = IRB.CreateShl(WideShadow, DFS.ShadowWidthBits);
Value *ShrShadow = IRB.CreateLShr(WideShadow, 64 - DFS.ShadowWidthBits);
Value *ShlShadow = IRB.CreateShl(WideShadow, DFS.ShadowWidth);
Value *ShrShadow = IRB.CreateLShr(WideShadow, 64 - DFS.ShadowWidth);
Value *RotShadow = IRB.CreateOr(ShlShadow, ShrShadow);
Value *ShadowsEq = IRB.CreateICmpEQ(WideShadow, RotShadow);
@ -1313,8 +1315,8 @@ Value *DFSanFunction::loadShadow(Value *Addr, uint64_t Size, uint64_t Align,
ReplaceInstWithInst(Head->getTerminator(), LastBr);
DT.addNewBlock(FallbackBB, Head);
for (uint64_t Ofs = 64 / DFS.ShadowWidthBits; Ofs != Size;
Ofs += 64 / DFS.ShadowWidthBits) {
for (uint64_t Ofs = 64 / DFS.ShadowWidth; Ofs != Size;
Ofs += 64 / DFS.ShadowWidth) {
BasicBlock *NextBB = BasicBlock::Create(*DFS.Ctx, "", F);
DT.addNewBlock(NextBB, LastBr->getParent());
IRBuilder<> NextIRB(NextBB);
@ -1384,12 +1386,11 @@ void DFSanFunction::storeShadow(Value *Addr, uint64_t Size, Align Alignment,
}
}
const Align ShadowAlign(Alignment.value() * DFS.ShadowWidthBytes);
const Align ShadowAlign(Alignment.value() * (DFS.ShadowWidth / 8));
IRBuilder<> IRB(Pos);
Value *ShadowAddr = DFS.getShadowAddress(Addr, Pos);
if (Shadow == DFS.ZeroShadow) {
IntegerType *ShadowTy =
IntegerType::get(*DFS.Ctx, Size * DFS.ShadowWidthBits);
IntegerType *ShadowTy = IntegerType::get(*DFS.Ctx, Size * DFS.ShadowWidth);
Value *ExtZeroShadow = ConstantInt::get(ShadowTy, 0);
Value *ExtShadowAddr =
IRB.CreateBitCast(ShadowAddr, PointerType::getUnqual(ShadowTy));
@ -1397,7 +1398,7 @@ void DFSanFunction::storeShadow(Value *Addr, uint64_t Size, Align Alignment,
return;
}
const unsigned ShadowVecSize = 128 / DFS.ShadowWidthBits;
const unsigned ShadowVecSize = 128 / DFS.ShadowWidth;
uint64_t Offset = 0;
if (Size >= ShadowVecSize) {
VectorType *ShadowVecTy = VectorType::get(DFS.ShadowTy, ShadowVecSize);
@ -1547,9 +1548,9 @@ void DFSanVisitor::visitMemTransferInst(MemTransferInst &I) {
IRBuilder<> IRB(&I);
Value *RawDestShadow = DFSF.DFS.getShadowAddress(I.getDest(), &I);
Value *SrcShadow = DFSF.DFS.getShadowAddress(I.getSource(), &I);
Value *LenShadow =
IRB.CreateMul(I.getLength(), ConstantInt::get(I.getLength()->getType(),
DFSF.DFS.ShadowWidthBytes));
Value *LenShadow = IRB.CreateMul(
I.getLength(),
ConstantInt::get(I.getLength()->getType(), DFSF.DFS.ShadowWidth / 8));
Type *Int8Ptr = Type::getInt8PtrTy(*DFSF.DFS.Ctx);
Value *DestShadow = IRB.CreateBitCast(RawDestShadow, Int8Ptr);
SrcShadow = IRB.CreateBitCast(SrcShadow, Int8Ptr);
@ -1557,11 +1558,11 @@ void DFSanVisitor::visitMemTransferInst(MemTransferInst &I) {
IRB.CreateCall(I.getFunctionType(), I.getCalledValue(),
{DestShadow, SrcShadow, LenShadow, I.getVolatileCst()}));
if (ClPreserveAlignment) {
MTI->setDestAlignment(I.getDestAlign() * DFSF.DFS.ShadowWidthBytes);
MTI->setSourceAlignment(I.getSourceAlign() * DFSF.DFS.ShadowWidthBytes);
MTI->setDestAlignment(I.getDestAlignment() * (DFSF.DFS.ShadowWidth / 8));
MTI->setSourceAlignment(I.getSourceAlignment() * (DFSF.DFS.ShadowWidth / 8));
} else {
MTI->setDestAlignment(Align(DFSF.DFS.ShadowWidthBytes));
MTI->setSourceAlignment(Align(DFSF.DFS.ShadowWidthBytes));
MTI->setDestAlignment(DFSF.DFS.ShadowWidth / 8);
MTI->setSourceAlignment(DFSF.DFS.ShadowWidth / 8);
}
if (ClEventCallbacks) {
IRB.CreateCall(DFSF.DFS.DFSanMemTransferCallbackFn,

View File

@ -90,9 +90,9 @@ FunctionPass *llvm::createAlignmentFromAssumptionsPass() {
// to a constant. Using SCEV to compute alignment handles the case where
// DiffSCEV is a recurrence with constant start such that the aligned offset
// is constant. e.g. {16,+,32} % 32 -> 16.
static MaybeAlign getNewAlignmentDiff(const SCEV *DiffSCEV,
const SCEV *AlignSCEV,
ScalarEvolution *SE) {
static unsigned getNewAlignmentDiff(const SCEV *DiffSCEV,
const SCEV *AlignSCEV,
ScalarEvolution *SE) {
// DiffUnits = Diff % int64_t(Alignment)
const SCEV *DiffUnitsSCEV = SE->getURemExpr(DiffSCEV, AlignSCEV);
@ -107,24 +107,25 @@ static MaybeAlign getNewAlignmentDiff(const SCEV *DiffSCEV,
// displaced pointer has the same alignment as the aligned pointer, so
// return the alignment value.
if (!DiffUnits)
return cast<SCEVConstant>(AlignSCEV)->getValue()->getAlignValue();
return (unsigned)
cast<SCEVConstant>(AlignSCEV)->getValue()->getSExtValue();
// If the displacement is not an exact multiple, but the remainder is a
// constant, then return this remainder (but only if it is a power of 2).
uint64_t DiffUnitsAbs = std::abs(DiffUnits);
if (isPowerOf2_64(DiffUnitsAbs))
return Align(DiffUnitsAbs);
return (unsigned) DiffUnitsAbs;
}
return None;
return 0;
}
// There is an address given by an offset OffSCEV from AASCEV which has an
// alignment AlignSCEV. Use that information, if possible, to compute a new
// alignment for Ptr.
static MaybeAlign getNewAlignment(const SCEV *AASCEV, const SCEV *AlignSCEV,
const SCEV *OffSCEV, Value *Ptr,
ScalarEvolution *SE) {
static unsigned getNewAlignment(const SCEV *AASCEV, const SCEV *AlignSCEV,
const SCEV *OffSCEV, Value *Ptr,
ScalarEvolution *SE) {
const SCEV *PtrSCEV = SE->getSCEV(Ptr);
// On a platform with 32-bit allocas, but 64-bit flat/global pointer sizes
// (*cough* AMDGPU), the effective SCEV type of AASCEV and PtrSCEV
@ -145,12 +146,13 @@ static MaybeAlign getNewAlignment(const SCEV *AASCEV, const SCEV *AlignSCEV,
<< *AlignSCEV << " and offset " << *OffSCEV
<< " using diff " << *DiffSCEV << "\n");
if (MaybeAlign NewAlignment = getNewAlignmentDiff(DiffSCEV, AlignSCEV, SE)) {
LLVM_DEBUG(dbgs() << "\tnew alignment: " << DebugStr(NewAlignment) << "\n");
return *NewAlignment;
}
unsigned NewAlignment = getNewAlignmentDiff(DiffSCEV, AlignSCEV, SE);
LLVM_DEBUG(dbgs() << "\tnew alignment: " << NewAlignment << "\n");
if (const SCEVAddRecExpr *DiffARSCEV = dyn_cast<SCEVAddRecExpr>(DiffSCEV)) {
if (NewAlignment) {
return NewAlignment;
} else if (const SCEVAddRecExpr *DiffARSCEV =
dyn_cast<SCEVAddRecExpr>(DiffSCEV)) {
// The relative offset to the alignment assumption did not yield a constant,
// but we should try harder: if we assume that a is 32-byte aligned, then in
// for (i = 0; i < 1024; i += 4) r += a[i]; not all of the loads from a are
@ -168,34 +170,34 @@ static MaybeAlign getNewAlignment(const SCEV *AASCEV, const SCEV *AlignSCEV,
// first iteration, and also the alignment using the per-iteration delta.
// If these are the same, then use that answer. Otherwise, use the smaller
// one, but only if it divides the larger one.
MaybeAlign NewAlignment = getNewAlignmentDiff(DiffStartSCEV, AlignSCEV, SE);
MaybeAlign NewIncAlignment =
getNewAlignmentDiff(DiffIncSCEV, AlignSCEV, SE);
NewAlignment = getNewAlignmentDiff(DiffStartSCEV, AlignSCEV, SE);
unsigned NewIncAlignment = getNewAlignmentDiff(DiffIncSCEV, AlignSCEV, SE);
LLVM_DEBUG(dbgs() << "\tnew start alignment: " << DebugStr(NewAlignment)
<< "\n");
LLVM_DEBUG(dbgs() << "\tnew inc alignment: " << DebugStr(NewIncAlignment)
<< "\n");
LLVM_DEBUG(dbgs() << "\tnew start alignment: " << NewAlignment << "\n");
LLVM_DEBUG(dbgs() << "\tnew inc alignment: " << NewIncAlignment << "\n");
// Both None or set to the same value.
if (NewAlignment == NewIncAlignment) {
LLVM_DEBUG(dbgs() << "\tnew start/inc alignment: "
<< DebugStr(NewAlignment) << "\n");
return NewAlignment;
}
if (NewAlignment > NewIncAlignment) {
LLVM_DEBUG(dbgs() << "\tnew start/inc alignment: "
<< DebugStr(NewIncAlignment) << "\n");
return NewIncAlignment;
}
if (NewIncAlignment > NewAlignment) {
LLVM_DEBUG(dbgs() << "\tnew start/inc alignment: "
<< DebugStr(NewAlignment) << "\n");
if (!NewAlignment || !NewIncAlignment) {
return 0;
} else if (NewAlignment > NewIncAlignment) {
if (NewAlignment % NewIncAlignment == 0) {
LLVM_DEBUG(dbgs() << "\tnew start/inc alignment: " << NewIncAlignment
<< "\n");
return NewIncAlignment;
}
} else if (NewIncAlignment > NewAlignment) {
if (NewIncAlignment % NewAlignment == 0) {
LLVM_DEBUG(dbgs() << "\tnew start/inc alignment: " << NewAlignment
<< "\n");
return NewAlignment;
}
} else if (NewIncAlignment == NewAlignment) {
LLVM_DEBUG(dbgs() << "\tnew start/inc alignment: " << NewAlignment
<< "\n");
return NewAlignment;
}
}
return None;
return 0;
}
bool AlignmentFromAssumptionsPass::extractAlignmentInfo(CallInst *I,
@ -321,27 +323,26 @@ bool AlignmentFromAssumptionsPass::processAssumption(CallInst *ACall) {
Instruction *J = WorkList.pop_back_val();
if (LoadInst *LI = dyn_cast<LoadInst>(J)) {
MaybeAlign NewAlignment = getNewAlignment(AASCEV, AlignSCEV, OffSCEV,
LI->getPointerOperand(), SE);
unsigned NewAlignment = getNewAlignment(AASCEV, AlignSCEV, OffSCEV,
LI->getPointerOperand(), SE);
if (NewAlignment > LI->getAlignment()) {
LI->setAlignment(MaybeAlign(NewAlignment));
++NumLoadAlignChanged;
}
} else if (StoreInst *SI = dyn_cast<StoreInst>(J)) {
MaybeAlign NewAlignment = getNewAlignment(AASCEV, AlignSCEV, OffSCEV,
SI->getPointerOperand(), SE);
unsigned NewAlignment = getNewAlignment(AASCEV, AlignSCEV, OffSCEV,
SI->getPointerOperand(), SE);
if (NewAlignment > SI->getAlignment()) {
SI->setAlignment(MaybeAlign(NewAlignment));
++NumStoreAlignChanged;
}
} else if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(J)) {
MaybeAlign NewDestAlignment =
getNewAlignment(AASCEV, AlignSCEV, OffSCEV, MI->getDest(), SE);
unsigned NewDestAlignment = getNewAlignment(AASCEV, AlignSCEV, OffSCEV,
MI->getDest(), SE);
LLVM_DEBUG(dbgs() << "\tmem inst: " << DebugStr(NewDestAlignment)
<< "\n";);
LLVM_DEBUG(dbgs() << "\tmem inst: " << NewDestAlignment << "\n";);
if (NewDestAlignment > MI->getDestAlignment()) {
MI->setDestAlignment(NewDestAlignment);
++NumMemIntAlignChanged;
@ -350,11 +351,10 @@ bool AlignmentFromAssumptionsPass::processAssumption(CallInst *ACall) {
// For memory transfers, there is also a source alignment that
// can be set.
if (MemTransferInst *MTI = dyn_cast<MemTransferInst>(MI)) {
MaybeAlign NewSrcAlignment =
getNewAlignment(AASCEV, AlignSCEV, OffSCEV, MTI->getSource(), SE);
unsigned NewSrcAlignment = getNewAlignment(AASCEV, AlignSCEV, OffSCEV,
MTI->getSource(), SE);
LLVM_DEBUG(dbgs() << "\tmem trans: " << DebugStr(NewSrcAlignment)
<< "\n";);
LLVM_DEBUG(dbgs() << "\tmem trans: " << NewSrcAlignment << "\n";);
if (NewSrcAlignment > MTI->getSourceAlignment()) {
MTI->setSourceAlignment(NewSrcAlignment);