forked from OSchip/llvm-project
[Alignment][NFC] Use proper getter to retrieve alignment from ConstantInt and ConstantSDNode
This patch is part of a series to introduce an Alignment type. See this thread for context: http://lists.llvm.org/pipermail/llvm-dev/2019-July/133851.html See this patch for the introduction of the type: https://reviews.llvm.org/D64790 Differential Revision: https://reviews.llvm.org/D83082
This commit is contained in:
parent
47cb8a0f0b
commit
87e2751cf0
|
@ -1576,6 +1576,8 @@ public:
|
|||
uint64_t getLimitedValue(uint64_t Limit = UINT64_MAX) {
|
||||
return Value->getLimitedValue(Limit);
|
||||
}
|
||||
MaybeAlign getMaybeAlignValue() const { return Value->getMaybeAlignValue(); }
|
||||
Align getAlignValue() const { return Value->getAlignValue(); }
|
||||
|
||||
bool isOne() const { return Value->isOne(); }
|
||||
bool isNullValue() const { return Value->isZero(); }
|
||||
|
|
|
@ -151,9 +151,19 @@ public:
|
|||
return Val.getSExtValue();
|
||||
}
|
||||
|
||||
/// Return the constant as an llvm::Align. Note that this method can assert if
|
||||
/// the value does not fit in 64 bits or is not a power of two.
|
||||
inline Align getAlignValue() const { return Align(getZExtValue()); }
|
||||
/// Return the constant as an llvm::MaybeAlign.
|
||||
/// Note that this method can assert if the value does not fit in 64 bits or
|
||||
/// is not a power of two.
|
||||
inline MaybeAlign getMaybeAlignValue() const {
|
||||
return MaybeAlign(getZExtValue());
|
||||
}
|
||||
|
||||
/// Return the constant as an llvm::Align, interpreting `0` as `Align(1)`.
|
||||
/// Note that this method can assert if the value does not fit in 64 bits or
|
||||
/// is not a power of two.
|
||||
inline Align getAlignValue() const {
|
||||
return getMaybeAlignValue().valueOrOne();
|
||||
}
|
||||
|
||||
/// A helper method that can be used to determine if the constant contained
|
||||
/// within is equal to a constant. This only works for very small values,
|
||||
|
|
|
@ -25,12 +25,12 @@ public:
|
|||
bool IsWrite;
|
||||
Type *OpType;
|
||||
uint64_t TypeSize;
|
||||
unsigned Alignment;
|
||||
MaybeAlign Alignment;
|
||||
// The mask Value, if we're looking at a masked load/store.
|
||||
Value *MaybeMask;
|
||||
|
||||
InterestingMemoryOperand(Instruction *I, unsigned OperandNo, bool IsWrite,
|
||||
class Type *OpType, unsigned Alignment,
|
||||
class Type *OpType, MaybeAlign Alignment,
|
||||
Value *MaybeMask = nullptr)
|
||||
: IsWrite(IsWrite), OpType(OpType), Alignment(Alignment),
|
||||
MaybeMask(MaybeMask) {
|
||||
|
|
|
@ -403,7 +403,7 @@ static void scalarizeMaskedGather(CallInst *CI, bool &ModifiedDT) {
|
|||
Instruction *InsertPt = CI;
|
||||
BasicBlock *IfBlock = CI->getParent();
|
||||
Builder.SetInsertPoint(InsertPt);
|
||||
unsigned AlignVal = cast<ConstantInt>(Alignment)->getZExtValue();
|
||||
MaybeAlign AlignVal = cast<ConstantInt>(Alignment)->getMaybeAlignValue();
|
||||
|
||||
Builder.SetCurrentDebugLocation(CI->getDebugLoc());
|
||||
|
||||
|
@ -417,8 +417,8 @@ static void scalarizeMaskedGather(CallInst *CI, bool &ModifiedDT) {
|
|||
if (cast<Constant>(Mask)->getAggregateElement(Idx)->isNullValue())
|
||||
continue;
|
||||
Value *Ptr = Builder.CreateExtractElement(Ptrs, Idx, "Ptr" + Twine(Idx));
|
||||
LoadInst *Load = Builder.CreateAlignedLoad(
|
||||
EltTy, Ptr, MaybeAlign(AlignVal), "Load" + Twine(Idx));
|
||||
LoadInst *Load =
|
||||
Builder.CreateAlignedLoad(EltTy, Ptr, AlignVal, "Load" + Twine(Idx));
|
||||
VResult =
|
||||
Builder.CreateInsertElement(VResult, Load, Idx, "Res" + Twine(Idx));
|
||||
}
|
||||
|
@ -462,8 +462,8 @@ static void scalarizeMaskedGather(CallInst *CI, bool &ModifiedDT) {
|
|||
Builder.SetInsertPoint(InsertPt);
|
||||
|
||||
Value *Ptr = Builder.CreateExtractElement(Ptrs, Idx, "Ptr" + Twine(Idx));
|
||||
LoadInst *Load = Builder.CreateAlignedLoad(EltTy, Ptr, MaybeAlign(AlignVal),
|
||||
"Load" + Twine(Idx));
|
||||
LoadInst *Load =
|
||||
Builder.CreateAlignedLoad(EltTy, Ptr, AlignVal, "Load" + Twine(Idx));
|
||||
Value *NewVResult =
|
||||
Builder.CreateInsertElement(VResult, Load, Idx, "Res" + Twine(Idx));
|
||||
|
||||
|
@ -533,7 +533,7 @@ static void scalarizeMaskedScatter(CallInst *CI, bool &ModifiedDT) {
|
|||
Builder.SetInsertPoint(InsertPt);
|
||||
Builder.SetCurrentDebugLocation(CI->getDebugLoc());
|
||||
|
||||
MaybeAlign AlignVal(cast<ConstantInt>(Alignment)->getZExtValue());
|
||||
MaybeAlign AlignVal = cast<ConstantInt>(Alignment)->getMaybeAlignValue();
|
||||
unsigned VectorWidth = cast<VectorType>(Src->getType())->getNumElements();
|
||||
|
||||
// Shorten the way if the mask is a vector of constants.
|
||||
|
|
|
@ -1599,17 +1599,17 @@ void SelectionDAGLegalize::ExpandDYNAMIC_STACKALLOC(SDNode* Node,
|
|||
SDValue Size = Tmp2.getOperand(1);
|
||||
SDValue SP = DAG.getCopyFromReg(Chain, dl, SPReg, VT);
|
||||
Chain = SP.getValue(1);
|
||||
unsigned Align = cast<ConstantSDNode>(Tmp3)->getZExtValue();
|
||||
Align Alignment = cast<ConstantSDNode>(Tmp3)->getAlignValue();
|
||||
const TargetFrameLowering *TFL = DAG.getSubtarget().getFrameLowering();
|
||||
unsigned Opc =
|
||||
TFL->getStackGrowthDirection() == TargetFrameLowering::StackGrowsUp ?
|
||||
ISD::ADD : ISD::SUB;
|
||||
|
||||
unsigned StackAlign = TFL->getStackAlignment();
|
||||
Align StackAlign = TFL->getStackAlign();
|
||||
Tmp1 = DAG.getNode(Opc, dl, VT, SP, Size); // Value
|
||||
if (Align > StackAlign)
|
||||
if (Alignment > StackAlign)
|
||||
Tmp1 = DAG.getNode(ISD::AND, dl, VT, Tmp1,
|
||||
DAG.getConstant(-(uint64_t)Align, dl, VT));
|
||||
DAG.getConstant(-Alignment.value(), dl, VT));
|
||||
Chain = DAG.getCopyToReg(Chain, dl, SPReg, Tmp1); // Output chain
|
||||
|
||||
Tmp2 = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(0, dl, true),
|
||||
|
|
|
@ -4214,8 +4214,7 @@ void SelectionDAGBuilder::visitMaskedStore(const CallInst &I,
|
|||
// llvm.masked.store.*(Src0, Ptr, alignment, Mask)
|
||||
Src0 = I.getArgOperand(0);
|
||||
Ptr = I.getArgOperand(1);
|
||||
Alignment =
|
||||
MaybeAlign(cast<ConstantInt>(I.getArgOperand(2))->getZExtValue());
|
||||
Alignment = cast<ConstantInt>(I.getArgOperand(2))->getMaybeAlignValue();
|
||||
Mask = I.getArgOperand(3);
|
||||
};
|
||||
auto getCompressingStoreOps = [&](Value *&Ptr, Value *&Mask, Value *&Src0,
|
||||
|
@ -4329,9 +4328,9 @@ void SelectionDAGBuilder::visitMaskedScatter(const CallInst &I) {
|
|||
SDValue Src0 = getValue(I.getArgOperand(0));
|
||||
SDValue Mask = getValue(I.getArgOperand(3));
|
||||
EVT VT = Src0.getValueType();
|
||||
MaybeAlign Alignment(cast<ConstantInt>(I.getArgOperand(2))->getZExtValue());
|
||||
if (!Alignment)
|
||||
Alignment = DAG.getEVTAlign(VT);
|
||||
Align Alignment = cast<ConstantInt>(I.getArgOperand(2))
|
||||
->getMaybeAlignValue()
|
||||
.getValueOr(DAG.getEVTAlign(VT));
|
||||
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
|
||||
|
||||
AAMDNodes AAInfo;
|
||||
|
@ -4349,7 +4348,7 @@ void SelectionDAGBuilder::visitMaskedScatter(const CallInst &I) {
|
|||
MachinePointerInfo(AS), MachineMemOperand::MOStore,
|
||||
// TODO: Make MachineMemOperands aware of scalable
|
||||
// vectors.
|
||||
MemoryLocation::UnknownSize, *Alignment, AAInfo);
|
||||
MemoryLocation::UnknownSize, Alignment, AAInfo);
|
||||
if (!UniformBase) {
|
||||
Base = DAG.getConstant(0, sdl, TLI.getPointerTy(DAG.getDataLayout()));
|
||||
Index = getValue(Ptr);
|
||||
|
@ -4370,8 +4369,7 @@ void SelectionDAGBuilder::visitMaskedLoad(const CallInst &I, bool IsExpanding) {
|
|||
MaybeAlign &Alignment) {
|
||||
// @llvm.masked.load.*(Ptr, alignment, Mask, Src0)
|
||||
Ptr = I.getArgOperand(0);
|
||||
Alignment =
|
||||
MaybeAlign(cast<ConstantInt>(I.getArgOperand(1))->getZExtValue());
|
||||
Alignment = cast<ConstantInt>(I.getArgOperand(1))->getMaybeAlignValue();
|
||||
Mask = I.getArgOperand(2);
|
||||
Src0 = I.getArgOperand(3);
|
||||
};
|
||||
|
@ -4440,9 +4438,9 @@ void SelectionDAGBuilder::visitMaskedGather(const CallInst &I) {
|
|||
|
||||
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
|
||||
EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType());
|
||||
MaybeAlign Alignment(cast<ConstantInt>(I.getArgOperand(1))->getZExtValue());
|
||||
if (!Alignment)
|
||||
Alignment = DAG.getEVTAlign(VT);
|
||||
Align Alignment = cast<ConstantInt>(I.getArgOperand(1))
|
||||
->getMaybeAlignValue()
|
||||
.getValueOr(DAG.getEVTAlign(VT));
|
||||
|
||||
AAMDNodes AAInfo;
|
||||
I.getAAMetadata(AAInfo);
|
||||
|
@ -4460,7 +4458,7 @@ void SelectionDAGBuilder::visitMaskedGather(const CallInst &I) {
|
|||
MachinePointerInfo(AS), MachineMemOperand::MOLoad,
|
||||
// TODO: Make MachineMemOperands aware of scalable
|
||||
// vectors.
|
||||
MemoryLocation::UnknownSize, *Alignment, AAInfo, Ranges);
|
||||
MemoryLocation::UnknownSize, Alignment, AAInfo, Ranges);
|
||||
|
||||
if (!UniformBase) {
|
||||
Base = DAG.getConstant(0, sdl, TLI.getPointerTy(DAG.getDataLayout()));
|
||||
|
|
|
@ -3757,10 +3757,10 @@ void llvm::UpgradeIntrinsicCall(CallInst *CI, Function *NewFn) {
|
|||
auto *MemCI = cast<MemIntrinsic>(NewCall);
|
||||
// All mem intrinsics support dest alignment.
|
||||
const ConstantInt *Align = cast<ConstantInt>(CI->getArgOperand(3));
|
||||
MemCI->setDestAlignment(Align->getZExtValue());
|
||||
MemCI->setDestAlignment(Align->getMaybeAlignValue());
|
||||
// Memcpy/Memmove also support source alignment.
|
||||
if (auto *MTI = dyn_cast<MemTransferInst>(MemCI))
|
||||
MTI->setSourceAlignment(Align->getZExtValue());
|
||||
MTI->setSourceAlignment(Align->getMaybeAlignValue());
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -9109,7 +9109,8 @@ AArch64TargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op,
|
|||
SDNode *Node = Op.getNode();
|
||||
SDValue Chain = Op.getOperand(0);
|
||||
SDValue Size = Op.getOperand(1);
|
||||
MaybeAlign Align(cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue());
|
||||
MaybeAlign Align =
|
||||
cast<ConstantSDNode>(Op.getOperand(2))->getMaybeAlignValue();
|
||||
EVT VT = Node->getValueType(0);
|
||||
|
||||
if (DAG.getMachineFunction().getFunction().hasFnAttribute(
|
||||
|
|
|
@ -590,8 +590,8 @@ bool AMDGPULibCalls::fold_read_write_pipe(CallInst *CI, IRBuilder<> &B,
|
|||
if (!isa<ConstantInt>(PacketSize) || !isa<ConstantInt>(PacketAlign))
|
||||
return false;
|
||||
unsigned Size = cast<ConstantInt>(PacketSize)->getZExtValue();
|
||||
unsigned Align = cast<ConstantInt>(PacketAlign)->getZExtValue();
|
||||
if (Size != Align || !isPowerOf2_32(Size))
|
||||
Align Alignment = cast<ConstantInt>(PacketAlign)->getAlignValue();
|
||||
if (Alignment != Size)
|
||||
return false;
|
||||
|
||||
Type *PtrElemTy;
|
||||
|
|
|
@ -3127,7 +3127,7 @@ SDValue SITargetLowering::lowerDYNAMIC_STACKALLOCImpl(
|
|||
SDValue Size = Tmp2.getOperand(1);
|
||||
SDValue SP = DAG.getCopyFromReg(Chain, dl, SPReg, VT);
|
||||
Chain = SP.getValue(1);
|
||||
MaybeAlign Alignment(cast<ConstantSDNode>(Tmp3)->getZExtValue());
|
||||
MaybeAlign Alignment = cast<ConstantSDNode>(Tmp3)->getMaybeAlignValue();
|
||||
const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
|
||||
const TargetFrameLowering *TFL = ST.getFrameLowering();
|
||||
unsigned Opc =
|
||||
|
|
|
@ -17758,7 +17758,8 @@ ARMTargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG) const
|
|||
|
||||
if (DAG.getMachineFunction().getFunction().hasFnAttribute(
|
||||
"no-stack-arg-probe")) {
|
||||
MaybeAlign Align(cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue());
|
||||
MaybeAlign Align =
|
||||
cast<ConstantSDNode>(Op.getOperand(2))->getMaybeAlignValue();
|
||||
SDValue SP = DAG.getCopyFromReg(Chain, DL, ARM::SP, MVT::i32);
|
||||
Chain = SP.getValue(1);
|
||||
SP = DAG.getNode(ISD::SUB, DL, MVT::i32, SP, Size);
|
||||
|
@ -17969,7 +17970,7 @@ bool ARMTargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
|
|||
Info.ptrVal = I.getArgOperand(0);
|
||||
Info.offset = 0;
|
||||
Value *AlignArg = I.getArgOperand(I.getNumArgOperands() - 1);
|
||||
Info.align = MaybeAlign(cast<ConstantInt>(AlignArg)->getZExtValue());
|
||||
Info.align = cast<ConstantInt>(AlignArg)->getMaybeAlignValue();
|
||||
// volatile loads with NEON intrinsics not supported
|
||||
Info.flags = MachineMemOperand::MOLoad;
|
||||
return true;
|
||||
|
@ -18010,7 +18011,7 @@ bool ARMTargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
|
|||
Info.ptrVal = I.getArgOperand(0);
|
||||
Info.offset = 0;
|
||||
Value *AlignArg = I.getArgOperand(I.getNumArgOperands() - 1);
|
||||
Info.align = MaybeAlign(cast<ConstantInt>(AlignArg)->getZExtValue());
|
||||
Info.align = cast<ConstantInt>(AlignArg)->getMaybeAlignValue();
|
||||
// volatile stores with NEON intrinsics not supported
|
||||
Info.flags = MachineMemOperand::MOStore;
|
||||
return true;
|
||||
|
|
|
@ -78,7 +78,7 @@ private:
|
|||
|
||||
// Check this is a valid gather with correct alignment
|
||||
bool isLegalTypeAndAlignment(unsigned NumElements, unsigned ElemSize,
|
||||
unsigned Alignment);
|
||||
Align Alignment);
|
||||
// Check whether Ptr is hidden behind a bitcast and look through it
|
||||
void lookThroughBitcast(Value *&Ptr);
|
||||
// Check for a getelementptr and deduce base and offsets from it, on success
|
||||
|
@ -155,12 +155,12 @@ Pass *llvm::createMVEGatherScatterLoweringPass() {
|
|||
|
||||
bool MVEGatherScatterLowering::isLegalTypeAndAlignment(unsigned NumElements,
|
||||
unsigned ElemSize,
|
||||
unsigned Alignment) {
|
||||
Align Alignment) {
|
||||
if (((NumElements == 4 &&
|
||||
(ElemSize == 32 || ElemSize == 16 || ElemSize == 8)) ||
|
||||
(NumElements == 8 && (ElemSize == 16 || ElemSize == 8)) ||
|
||||
(NumElements == 16 && ElemSize == 8)) &&
|
||||
ElemSize / 8 <= Alignment)
|
||||
Alignment >= ElemSize / 8)
|
||||
return true;
|
||||
LLVM_DEBUG(dbgs() << "masked gathers/scatters: instruction does not have "
|
||||
<< "valid alignment or vector type \n");
|
||||
|
@ -306,7 +306,7 @@ Value *MVEGatherScatterLowering::lowerGather(IntrinsicInst *I) {
|
|||
// Potentially optimising the addressing modes as we do so.
|
||||
auto *Ty = cast<FixedVectorType>(I->getType());
|
||||
Value *Ptr = I->getArgOperand(0);
|
||||
unsigned Alignment = cast<ConstantInt>(I->getArgOperand(1))->getZExtValue();
|
||||
Align Alignment = cast<ConstantInt>(I->getArgOperand(1))->getAlignValue();
|
||||
Value *Mask = I->getArgOperand(2);
|
||||
Value *PassThru = I->getArgOperand(3);
|
||||
|
||||
|
@ -466,7 +466,7 @@ Value *MVEGatherScatterLowering::lowerScatter(IntrinsicInst *I) {
|
|||
// Potentially optimising the addressing modes as we do so.
|
||||
Value *Input = I->getArgOperand(0);
|
||||
Value *Ptr = I->getArgOperand(1);
|
||||
unsigned Alignment = cast<ConstantInt>(I->getArgOperand(2))->getZExtValue();
|
||||
Align Alignment = cast<ConstantInt>(I->getArgOperand(2))->getAlignValue();
|
||||
auto *Ty = cast<FixedVectorType>(Input->getType());
|
||||
|
||||
if (!isLegalTypeAndAlignment(Ty->getNumElements(), Ty->getScalarSizeInBits(),
|
||||
|
|
|
@ -1315,28 +1315,28 @@ bool HexagonDAGToDAGISel::SelectAddrFI(SDValue &N, SDValue &R) {
|
|||
}
|
||||
|
||||
inline bool HexagonDAGToDAGISel::SelectAddrGA(SDValue &N, SDValue &R) {
|
||||
return SelectGlobalAddress(N, R, false, 0);
|
||||
return SelectGlobalAddress(N, R, false, Align(1));
|
||||
}
|
||||
|
||||
inline bool HexagonDAGToDAGISel::SelectAddrGP(SDValue &N, SDValue &R) {
|
||||
return SelectGlobalAddress(N, R, true, 0);
|
||||
return SelectGlobalAddress(N, R, true, Align(1));
|
||||
}
|
||||
|
||||
inline bool HexagonDAGToDAGISel::SelectAnyImm(SDValue &N, SDValue &R) {
|
||||
return SelectAnyImmediate(N, R, 0);
|
||||
return SelectAnyImmediate(N, R, Align(1));
|
||||
}
|
||||
|
||||
inline bool HexagonDAGToDAGISel::SelectAnyImm0(SDValue &N, SDValue &R) {
|
||||
return SelectAnyImmediate(N, R, 0);
|
||||
return SelectAnyImmediate(N, R, Align(1));
|
||||
}
|
||||
inline bool HexagonDAGToDAGISel::SelectAnyImm1(SDValue &N, SDValue &R) {
|
||||
return SelectAnyImmediate(N, R, 1);
|
||||
return SelectAnyImmediate(N, R, Align(2));
|
||||
}
|
||||
inline bool HexagonDAGToDAGISel::SelectAnyImm2(SDValue &N, SDValue &R) {
|
||||
return SelectAnyImmediate(N, R, 2);
|
||||
return SelectAnyImmediate(N, R, Align(4));
|
||||
}
|
||||
inline bool HexagonDAGToDAGISel::SelectAnyImm3(SDValue &N, SDValue &R) {
|
||||
return SelectAnyImmediate(N, R, 3);
|
||||
return SelectAnyImmediate(N, R, Align(8));
|
||||
}
|
||||
|
||||
inline bool HexagonDAGToDAGISel::SelectAnyInt(SDValue &N, SDValue &R) {
|
||||
|
@ -1348,17 +1348,13 @@ inline bool HexagonDAGToDAGISel::SelectAnyInt(SDValue &N, SDValue &R) {
|
|||
}
|
||||
|
||||
bool HexagonDAGToDAGISel::SelectAnyImmediate(SDValue &N, SDValue &R,
|
||||
uint32_t LogAlign) {
|
||||
auto IsAligned = [LogAlign] (uint64_t V) -> bool {
|
||||
return alignTo(V, (uint64_t)1 << LogAlign) == V;
|
||||
};
|
||||
|
||||
Align Alignment) {
|
||||
switch (N.getOpcode()) {
|
||||
case ISD::Constant: {
|
||||
if (N.getValueType() != MVT::i32)
|
||||
return false;
|
||||
int32_t V = cast<const ConstantSDNode>(N)->getZExtValue();
|
||||
if (!IsAligned(V))
|
||||
if (!isAligned(Alignment, V))
|
||||
return false;
|
||||
R = CurDAG->getTargetConstant(V, SDLoc(N), N.getValueType());
|
||||
return true;
|
||||
|
@ -1366,37 +1362,34 @@ bool HexagonDAGToDAGISel::SelectAnyImmediate(SDValue &N, SDValue &R,
|
|||
case HexagonISD::JT:
|
||||
case HexagonISD::CP:
|
||||
// These are assumed to always be aligned at least 8-byte boundary.
|
||||
if (LogAlign > 3)
|
||||
if (Alignment > Align(8))
|
||||
return false;
|
||||
R = N.getOperand(0);
|
||||
return true;
|
||||
case ISD::ExternalSymbol:
|
||||
// Symbols may be aligned at any boundary.
|
||||
if (LogAlign > 0)
|
||||
if (Alignment > Align(1))
|
||||
return false;
|
||||
R = N;
|
||||
return true;
|
||||
case ISD::BlockAddress:
|
||||
// Block address is always aligned at least 4-byte boundary.
|
||||
if (LogAlign > 2 || !IsAligned(cast<BlockAddressSDNode>(N)->getOffset()))
|
||||
if (Alignment > Align(4) ||
|
||||
!isAligned(Alignment, cast<BlockAddressSDNode>(N)->getOffset()))
|
||||
return false;
|
||||
R = N;
|
||||
return true;
|
||||
}
|
||||
|
||||
if (SelectGlobalAddress(N, R, false, LogAlign) ||
|
||||
SelectGlobalAddress(N, R, true, LogAlign))
|
||||
if (SelectGlobalAddress(N, R, false, Alignment) ||
|
||||
SelectGlobalAddress(N, R, true, Alignment))
|
||||
return true;
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
bool HexagonDAGToDAGISel::SelectGlobalAddress(SDValue &N, SDValue &R,
|
||||
bool UseGP, uint32_t LogAlign) {
|
||||
auto IsAligned = [LogAlign] (uint64_t V) -> bool {
|
||||
return alignTo(V, (uint64_t)1 << LogAlign) == V;
|
||||
};
|
||||
|
||||
bool UseGP, Align Alignment) {
|
||||
switch (N.getOpcode()) {
|
||||
case ISD::ADD: {
|
||||
SDValue N0 = N.getOperand(0);
|
||||
|
@ -1407,10 +1400,9 @@ bool HexagonDAGToDAGISel::SelectGlobalAddress(SDValue &N, SDValue &R,
|
|||
if (!UseGP && GAOpc != HexagonISD::CONST32)
|
||||
return false;
|
||||
if (ConstantSDNode *Const = dyn_cast<ConstantSDNode>(N1)) {
|
||||
SDValue Addr = N0.getOperand(0);
|
||||
// For the purpose of alignment, sextvalue and zextvalue are the same.
|
||||
if (!IsAligned(Const->getZExtValue()))
|
||||
if (!isAligned(Alignment, Const->getZExtValue()))
|
||||
return false;
|
||||
SDValue Addr = N0.getOperand(0);
|
||||
if (GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(Addr)) {
|
||||
if (GA->getOpcode() == ISD::TargetGlobalAddress) {
|
||||
uint64_t NewOff = GA->getOffset() + (uint64_t)Const->getSExtValue();
|
||||
|
|
|
@ -59,9 +59,8 @@ public:
|
|||
inline bool SelectAddrGP(SDValue &N, SDValue &R);
|
||||
inline bool SelectAnyImm(SDValue &N, SDValue &R);
|
||||
inline bool SelectAnyInt(SDValue &N, SDValue &R);
|
||||
bool SelectAnyImmediate(SDValue &N, SDValue &R, uint32_t LogAlign);
|
||||
bool SelectGlobalAddress(SDValue &N, SDValue &R, bool UseGP,
|
||||
uint32_t LogAlign);
|
||||
bool SelectAnyImmediate(SDValue &N, SDValue &R, Align Alignment);
|
||||
bool SelectGlobalAddress(SDValue &N, SDValue &R, bool UseGP, Align Alignment);
|
||||
bool SelectAddrFI(SDValue &N, SDValue &R);
|
||||
bool DetectUseSxtw(SDValue &N, SDValue &R);
|
||||
|
||||
|
|
|
@ -3780,8 +3780,7 @@ bool NVPTXTargetLowering::getTgtMemIntrinsic(
|
|||
Info.ptrVal = I.getArgOperand(0);
|
||||
Info.offset = 0;
|
||||
Info.flags = MachineMemOperand::MOLoad;
|
||||
Info.align =
|
||||
MaybeAlign(cast<ConstantInt>(I.getArgOperand(1))->getZExtValue());
|
||||
Info.align = cast<ConstantInt>(I.getArgOperand(1))->getMaybeAlignValue();
|
||||
|
||||
return true;
|
||||
}
|
||||
|
@ -3800,8 +3799,7 @@ bool NVPTXTargetLowering::getTgtMemIntrinsic(
|
|||
Info.ptrVal = I.getArgOperand(0);
|
||||
Info.offset = 0;
|
||||
Info.flags = MachineMemOperand::MOLoad;
|
||||
Info.align =
|
||||
MaybeAlign(cast<ConstantInt>(I.getArgOperand(1))->getZExtValue());
|
||||
Info.align = cast<ConstantInt>(I.getArgOperand(1))->getMaybeAlignValue();
|
||||
|
||||
return true;
|
||||
}
|
||||
|
|
|
@ -240,7 +240,7 @@ namespace {
|
|||
/// bit signed displacement.
|
||||
/// Returns false if it can be represented by [r+imm], which are preferred.
|
||||
bool SelectAddrIdx(SDValue N, SDValue &Base, SDValue &Index) {
|
||||
return PPCLowering->SelectAddressRegReg(N, Base, Index, *CurDAG, 0);
|
||||
return PPCLowering->SelectAddressRegReg(N, Base, Index, *CurDAG, None);
|
||||
}
|
||||
|
||||
/// SelectAddrIdx4 - Given the specified address, check to see if it can be
|
||||
|
@ -250,7 +250,8 @@ namespace {
|
|||
/// displacement must be a multiple of 4.
|
||||
/// Returns false if it can be represented by [r+imm], which are preferred.
|
||||
bool SelectAddrIdxX4(SDValue N, SDValue &Base, SDValue &Index) {
|
||||
return PPCLowering->SelectAddressRegReg(N, Base, Index, *CurDAG, 4);
|
||||
return PPCLowering->SelectAddressRegReg(N, Base, Index, *CurDAG,
|
||||
Align(4));
|
||||
}
|
||||
|
||||
/// SelectAddrIdx16 - Given the specified address, check to see if it can be
|
||||
|
@ -260,7 +261,8 @@ namespace {
|
|||
/// displacement must be a multiple of 16.
|
||||
/// Returns false if it can be represented by [r+imm], which are preferred.
|
||||
bool SelectAddrIdxX16(SDValue N, SDValue &Base, SDValue &Index) {
|
||||
return PPCLowering->SelectAddressRegReg(N, Base, Index, *CurDAG, 16);
|
||||
return PPCLowering->SelectAddressRegReg(N, Base, Index, *CurDAG,
|
||||
Align(16));
|
||||
}
|
||||
|
||||
/// SelectAddrIdxOnly - Given the specified address, force it to be
|
||||
|
@ -275,21 +277,22 @@ namespace {
|
|||
/// displacement.
|
||||
bool SelectAddrImm(SDValue N, SDValue &Disp,
|
||||
SDValue &Base) {
|
||||
return PPCLowering->SelectAddressRegImm(N, Disp, Base, *CurDAG, 0);
|
||||
return PPCLowering->SelectAddressRegImm(N, Disp, Base, *CurDAG, None);
|
||||
}
|
||||
|
||||
/// SelectAddrImmX4 - Returns true if the address N can be represented by
|
||||
/// a base register plus a signed 16-bit displacement that is a multiple of
|
||||
/// 4 (last parameter). Suitable for use by STD and friends.
|
||||
bool SelectAddrImmX4(SDValue N, SDValue &Disp, SDValue &Base) {
|
||||
return PPCLowering->SelectAddressRegImm(N, Disp, Base, *CurDAG, 4);
|
||||
return PPCLowering->SelectAddressRegImm(N, Disp, Base, *CurDAG, Align(4));
|
||||
}
|
||||
|
||||
/// SelectAddrImmX16 - Returns true if the address N can be represented by
|
||||
/// a base register plus a signed 16-bit displacement that is a multiple of
|
||||
/// 16(last parameter). Suitable for use by STXV and friends.
|
||||
bool SelectAddrImmX16(SDValue N, SDValue &Disp, SDValue &Base) {
|
||||
return PPCLowering->SelectAddressRegImm(N, Disp, Base, *CurDAG, 16);
|
||||
return PPCLowering->SelectAddressRegImm(N, Disp, Base, *CurDAG,
|
||||
Align(16));
|
||||
}
|
||||
|
||||
// Select an address into a single register.
|
||||
|
|
|
@ -2438,22 +2438,22 @@ bool PPCTargetLowering::SelectAddressEVXRegReg(SDValue N, SDValue &Base,
|
|||
/// non-zero and N can be represented by a base register plus a signed 16-bit
|
||||
/// displacement, make a more precise judgement by checking (displacement % \p
|
||||
/// EncodingAlignment).
|
||||
bool PPCTargetLowering::SelectAddressRegReg(SDValue N, SDValue &Base,
|
||||
SDValue &Index, SelectionDAG &DAG,
|
||||
unsigned EncodingAlignment) const {
|
||||
bool PPCTargetLowering::SelectAddressRegReg(
|
||||
SDValue N, SDValue &Base, SDValue &Index, SelectionDAG &DAG,
|
||||
MaybeAlign EncodingAlignment) const {
|
||||
// If we have a PC Relative target flag don't select as [reg+reg]. It will be
|
||||
// a [pc+imm].
|
||||
if (SelectAddressPCRel(N, Base))
|
||||
return false;
|
||||
|
||||
int16_t imm = 0;
|
||||
int16_t Imm = 0;
|
||||
if (N.getOpcode() == ISD::ADD) {
|
||||
// Is there any SPE load/store (f64), which can't handle 16bit offset?
|
||||
// SPE load/store can only handle 8-bit offsets.
|
||||
if (hasSPE() && SelectAddressEVXRegReg(N, Base, Index, DAG))
|
||||
return true;
|
||||
if (isIntS16Immediate(N.getOperand(1), imm) &&
|
||||
(!EncodingAlignment || !(imm % EncodingAlignment)))
|
||||
if (isIntS16Immediate(N.getOperand(1), Imm) &&
|
||||
(!EncodingAlignment || isAligned(*EncodingAlignment, Imm)))
|
||||
return false; // r+i
|
||||
if (N.getOperand(1).getOpcode() == PPCISD::Lo)
|
||||
return false; // r+i
|
||||
|
@ -2462,8 +2462,8 @@ bool PPCTargetLowering::SelectAddressRegReg(SDValue N, SDValue &Base,
|
|||
Index = N.getOperand(1);
|
||||
return true;
|
||||
} else if (N.getOpcode() == ISD::OR) {
|
||||
if (isIntS16Immediate(N.getOperand(1), imm) &&
|
||||
(!EncodingAlignment || !(imm % EncodingAlignment)))
|
||||
if (isIntS16Immediate(N.getOperand(1), Imm) &&
|
||||
(!EncodingAlignment || isAligned(*EncodingAlignment, Imm)))
|
||||
return false; // r+i can fold it if we can.
|
||||
|
||||
// If this is an or of disjoint bitfields, we can codegen this as an add
|
||||
|
@ -2529,10 +2529,9 @@ static void fixupFuncForFI(SelectionDAG &DAG, int FrameIdx, EVT VT) {
|
|||
/// a signed 16-bit displacement [r+imm], and if it is not better
|
||||
/// represented as reg+reg. If \p EncodingAlignment is non-zero, only accept
|
||||
/// displacements that are multiples of that value.
|
||||
bool PPCTargetLowering::SelectAddressRegImm(SDValue N, SDValue &Disp,
|
||||
SDValue &Base,
|
||||
SelectionDAG &DAG,
|
||||
unsigned EncodingAlignment) const {
|
||||
bool PPCTargetLowering::SelectAddressRegImm(
|
||||
SDValue N, SDValue &Disp, SDValue &Base, SelectionDAG &DAG,
|
||||
MaybeAlign EncodingAlignment) const {
|
||||
// FIXME dl should come from parent load or store, not from address
|
||||
SDLoc dl(N);
|
||||
|
||||
|
@ -2548,7 +2547,7 @@ bool PPCTargetLowering::SelectAddressRegImm(SDValue N, SDValue &Disp,
|
|||
if (N.getOpcode() == ISD::ADD) {
|
||||
int16_t imm = 0;
|
||||
if (isIntS16Immediate(N.getOperand(1), imm) &&
|
||||
(!EncodingAlignment || (imm % EncodingAlignment) == 0)) {
|
||||
(!EncodingAlignment || isAligned(*EncodingAlignment, imm))) {
|
||||
Disp = DAG.getTargetConstant(imm, dl, N.getValueType());
|
||||
if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(N.getOperand(0))) {
|
||||
Base = DAG.getTargetFrameIndex(FI->getIndex(), N.getValueType());
|
||||
|
@ -2572,7 +2571,7 @@ bool PPCTargetLowering::SelectAddressRegImm(SDValue N, SDValue &Disp,
|
|||
} else if (N.getOpcode() == ISD::OR) {
|
||||
int16_t imm = 0;
|
||||
if (isIntS16Immediate(N.getOperand(1), imm) &&
|
||||
(!EncodingAlignment || (imm % EncodingAlignment) == 0)) {
|
||||
(!EncodingAlignment || isAligned(*EncodingAlignment, imm))) {
|
||||
// If this is an or of disjoint bitfields, we can codegen this as an add
|
||||
// (for better address arithmetic) if the LHS and RHS of the OR are
|
||||
// provably disjoint.
|
||||
|
@ -2599,7 +2598,7 @@ bool PPCTargetLowering::SelectAddressRegImm(SDValue N, SDValue &Disp,
|
|||
// this as "d, 0"
|
||||
int16_t Imm;
|
||||
if (isIntS16Immediate(CN, Imm) &&
|
||||
(!EncodingAlignment || (Imm % EncodingAlignment) == 0)) {
|
||||
(!EncodingAlignment || isAligned(*EncodingAlignment, Imm))) {
|
||||
Disp = DAG.getTargetConstant(Imm, dl, CN->getValueType(0));
|
||||
Base = DAG.getRegister(Subtarget.isPPC64() ? PPC::ZERO8 : PPC::ZERO,
|
||||
CN->getValueType(0));
|
||||
|
@ -2609,7 +2608,8 @@ bool PPCTargetLowering::SelectAddressRegImm(SDValue N, SDValue &Disp,
|
|||
// Handle 32-bit sext immediates with LIS + addr mode.
|
||||
if ((CN->getValueType(0) == MVT::i32 ||
|
||||
(int64_t)CN->getZExtValue() == (int)CN->getZExtValue()) &&
|
||||
(!EncodingAlignment || (CN->getZExtValue() % EncodingAlignment) == 0)) {
|
||||
(!EncodingAlignment ||
|
||||
isAligned(*EncodingAlignment, CN->getZExtValue()))) {
|
||||
int Addr = (int)CN->getZExtValue();
|
||||
|
||||
// Otherwise, break this down into an LIS + disp.
|
||||
|
@ -2796,14 +2796,14 @@ bool PPCTargetLowering::getPreIndexedAddressParts(SDNode *N, SDValue &Base,
|
|||
|
||||
// LDU/STU can only handle immediates that are a multiple of 4.
|
||||
if (VT != MVT::i64) {
|
||||
if (!SelectAddressRegImm(Ptr, Offset, Base, DAG, 0))
|
||||
if (!SelectAddressRegImm(Ptr, Offset, Base, DAG, None))
|
||||
return false;
|
||||
} else {
|
||||
// LDU/STU need an address with at least 4-byte alignment.
|
||||
if (Alignment < 4)
|
||||
return false;
|
||||
|
||||
if (!SelectAddressRegImm(Ptr, Offset, Base, DAG, 4))
|
||||
if (!SelectAddressRegImm(Ptr, Offset, Base, DAG, Align(4)))
|
||||
return false;
|
||||
}
|
||||
|
||||
|
|
|
@ -726,7 +726,7 @@ namespace llvm {
|
|||
/// Returns false if it can be represented by [r+imm], which are preferred.
|
||||
bool SelectAddressRegReg(SDValue N, SDValue &Base, SDValue &Index,
|
||||
SelectionDAG &DAG,
|
||||
unsigned EncodingAlignment = 0) const;
|
||||
MaybeAlign EncodingAlignment = None) const;
|
||||
|
||||
/// SelectAddressRegImm - Returns true if the address N can be represented
|
||||
/// by a base register plus a signed 16-bit displacement [r+imm], and if it
|
||||
|
@ -735,7 +735,7 @@ namespace llvm {
|
|||
/// requirement, i.e. multiples of 4 for DS form.
|
||||
bool SelectAddressRegImm(SDValue N, SDValue &Disp, SDValue &Base,
|
||||
SelectionDAG &DAG,
|
||||
unsigned EncodingAlignment) const;
|
||||
MaybeAlign EncodingAlignment) const;
|
||||
|
||||
/// SelectAddressRegRegOnly - Given the specified addressed, force it to be
|
||||
/// represented as an indexed [r+r] operation.
|
||||
|
|
|
@ -2549,7 +2549,8 @@ static SDValue LowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG,
|
|||
const SparcSubtarget *Subtarget) {
|
||||
SDValue Chain = Op.getOperand(0); // Legalize the chain.
|
||||
SDValue Size = Op.getOperand(1); // Legalize the size.
|
||||
MaybeAlign Alignment(cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue());
|
||||
MaybeAlign Alignment =
|
||||
cast<ConstantSDNode>(Op.getOperand(2))->getMaybeAlignValue();
|
||||
Align StackAlign = Subtarget->getFrameLowering()->getStackAlign();
|
||||
EVT VT = Size->getValueType(0);
|
||||
SDLoc dl(Op);
|
||||
|
|
|
@ -212,7 +212,7 @@ public:
|
|||
}
|
||||
|
||||
Align getStorageAlignment() const {
|
||||
return Align(cast<ConstantInt>(getArgOperand(AlignArg))->getZExtValue());
|
||||
return cast<ConstantInt>(getArgOperand(AlignArg))->getAlignValue();
|
||||
}
|
||||
|
||||
Value *getStorage() const {
|
||||
|
@ -347,7 +347,7 @@ public:
|
|||
/// The required alignment of the promise. This must match the
|
||||
/// alignment of the promise alloca in the coroutine.
|
||||
Align getAlignment() const {
|
||||
return Align(cast<ConstantInt>(getArgOperand(AlignArg))->getZExtValue());
|
||||
return cast<ConstantInt>(getArgOperand(AlignArg))->getAlignValue();
|
||||
}
|
||||
|
||||
// Methods to support type inquiry through isa, cast, and dyn_cast:
|
||||
|
@ -468,8 +468,8 @@ public:
|
|||
Value *getSize() const {
|
||||
return getArgOperand(SizeArg);
|
||||
}
|
||||
unsigned getAlignment() const {
|
||||
return cast<ConstantInt>(getArgOperand(AlignArg))->getZExtValue();
|
||||
Align getAlignment() const {
|
||||
return cast<ConstantInt>(getArgOperand(AlignArg))->getAlignValue();
|
||||
}
|
||||
|
||||
// Methods to support type inquiry through isa, cast, and dyn_cast:
|
||||
|
|
|
@ -1144,7 +1144,7 @@ Instruction *InstCombiner::simplifyMaskedStore(IntrinsicInst &II) {
|
|||
// If the mask is all ones, this is a plain vector store of the 1st argument.
|
||||
if (ConstMask->isAllOnesValue()) {
|
||||
Value *StorePtr = II.getArgOperand(1);
|
||||
Align Alignment(cast<ConstantInt>(II.getArgOperand(2))->getZExtValue());
|
||||
Align Alignment = cast<ConstantInt>(II.getArgOperand(2))->getAlignValue();
|
||||
return new StoreInst(II.getArgOperand(0), StorePtr, false, Alignment);
|
||||
}
|
||||
|
||||
|
@ -3383,8 +3383,9 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
|
|||
case Intrinsic::arm_neon_vst4lane: {
|
||||
Align MemAlign = getKnownAlignment(II->getArgOperand(0), DL, II, &AC, &DT);
|
||||
unsigned AlignArg = II->getNumArgOperands() - 1;
|
||||
ConstantInt *IntrAlign = dyn_cast<ConstantInt>(II->getArgOperand(AlignArg));
|
||||
if (IntrAlign && IntrAlign->getZExtValue() < MemAlign.value())
|
||||
Value *AlignArgOp = II->getArgOperand(AlignArg);
|
||||
MaybeAlign Align = cast<ConstantInt>(AlignArgOp)->getMaybeAlignValue();
|
||||
if (Align && *Align < MemAlign)
|
||||
return replaceOperand(*II, AlignArg,
|
||||
ConstantInt::get(Type::getInt32Ty(II->getContext()),
|
||||
MemAlign.value(), false));
|
||||
|
@ -4546,11 +4547,10 @@ static void annotateAnyAllocSite(CallBase &Call, const TargetLibraryInfo *TLI) {
|
|||
Call.getContext(), Op1C->getZExtValue()));
|
||||
// Add alignment attribute if alignment is a power of two constant.
|
||||
if (Op0C && Op0C->getValue().ult(llvm::Value::MaximumAlignment)) {
|
||||
uint64_t AlignmentVal = Op0C->getZExtValue();
|
||||
if (llvm::isPowerOf2_64(AlignmentVal))
|
||||
Call.addAttribute(AttributeList::ReturnIndex,
|
||||
Attribute::getWithAlignment(Call.getContext(),
|
||||
Align(AlignmentVal)));
|
||||
if (MaybeAlign AlignmentVal = Op0C->getMaybeAlignValue())
|
||||
Call.addAttribute(
|
||||
AttributeList::ReturnIndex,
|
||||
Attribute::getWithAlignment(Call.getContext(), *AlignmentVal));
|
||||
}
|
||||
} else if (isReallocLikeFn(&Call, TLI) && Op1C) {
|
||||
Call.addAttribute(AttributeList::ReturnIndex,
|
||||
|
|
|
@ -1378,23 +1378,22 @@ void AddressSanitizer::getInterestingMemoryOperands(
|
|||
if (!ClInstrumentReads || ignoreAccess(LI->getPointerOperand()))
|
||||
return;
|
||||
Interesting.emplace_back(I, LI->getPointerOperandIndex(), false,
|
||||
LI->getType(), LI->getAlignment());
|
||||
LI->getType(), LI->getAlign());
|
||||
} else if (StoreInst *SI = dyn_cast<StoreInst>(I)) {
|
||||
if (!ClInstrumentWrites || ignoreAccess(SI->getPointerOperand()))
|
||||
return;
|
||||
Interesting.emplace_back(I, SI->getPointerOperandIndex(), true,
|
||||
SI->getValueOperand()->getType(),
|
||||
SI->getAlignment());
|
||||
SI->getValueOperand()->getType(), SI->getAlign());
|
||||
} else if (AtomicRMWInst *RMW = dyn_cast<AtomicRMWInst>(I)) {
|
||||
if (!ClInstrumentAtomics || ignoreAccess(RMW->getPointerOperand()))
|
||||
return;
|
||||
Interesting.emplace_back(I, RMW->getPointerOperandIndex(), true,
|
||||
RMW->getValOperand()->getType(), 0);
|
||||
RMW->getValOperand()->getType(), None);
|
||||
} else if (AtomicCmpXchgInst *XCHG = dyn_cast<AtomicCmpXchgInst>(I)) {
|
||||
if (!ClInstrumentAtomics || ignoreAccess(XCHG->getPointerOperand()))
|
||||
return;
|
||||
Interesting.emplace_back(I, XCHG->getPointerOperandIndex(), true,
|
||||
XCHG->getCompareOperand()->getType(), 0);
|
||||
XCHG->getCompareOperand()->getType(), None);
|
||||
} else if (auto CI = dyn_cast<CallInst>(I)) {
|
||||
auto *F = CI->getCalledFunction();
|
||||
if (F && (F->getName().startswith("llvm.masked.load.") ||
|
||||
|
@ -1409,11 +1408,10 @@ void AddressSanitizer::getInterestingMemoryOperands(
|
|||
if (ignoreAccess(BasePtr))
|
||||
return;
|
||||
auto Ty = cast<PointerType>(BasePtr->getType())->getElementType();
|
||||
unsigned Alignment = 1;
|
||||
MaybeAlign Alignment = Align(1);
|
||||
// Otherwise no alignment guarantees. We probably got Undef.
|
||||
if (auto AlignmentConstant =
|
||||
dyn_cast<ConstantInt>(CI->getOperand(1 + OpOffset)))
|
||||
Alignment = (unsigned)AlignmentConstant->getZExtValue();
|
||||
if (auto *Op = dyn_cast<ConstantInt>(CI->getOperand(1 + OpOffset)))
|
||||
Alignment = Op->getMaybeAlignValue();
|
||||
Value *Mask = CI->getOperand(2 + OpOffset);
|
||||
Interesting.emplace_back(I, OpOffset, IsWrite, Ty, Alignment, Mask);
|
||||
} else {
|
||||
|
@ -1422,7 +1420,7 @@ void AddressSanitizer::getInterestingMemoryOperands(
|
|||
ignoreAccess(CI->getArgOperand(ArgNo)))
|
||||
continue;
|
||||
Type *Ty = CI->getParamByValType(ArgNo);
|
||||
Interesting.emplace_back(I, ArgNo, false, Ty, 1);
|
||||
Interesting.emplace_back(I, ArgNo, false, Ty, Align(1));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1484,7 +1482,7 @@ void AddressSanitizer::instrumentPointerComparisonOrSubtraction(
|
|||
|
||||
static void doInstrumentAddress(AddressSanitizer *Pass, Instruction *I,
|
||||
Instruction *InsertBefore, Value *Addr,
|
||||
unsigned Alignment, unsigned Granularity,
|
||||
MaybeAlign Alignment, unsigned Granularity,
|
||||
uint32_t TypeSize, bool IsWrite,
|
||||
Value *SizeArgument, bool UseCalls,
|
||||
uint32_t Exp) {
|
||||
|
@ -1492,7 +1490,7 @@ static void doInstrumentAddress(AddressSanitizer *Pass, Instruction *I,
|
|||
// if the data is properly aligned.
|
||||
if ((TypeSize == 8 || TypeSize == 16 || TypeSize == 32 || TypeSize == 64 ||
|
||||
TypeSize == 128) &&
|
||||
(Alignment >= Granularity || Alignment == 0 || Alignment >= TypeSize / 8))
|
||||
(!Alignment || *Alignment >= Granularity || *Alignment >= TypeSize / 8))
|
||||
return Pass->instrumentAddress(I, InsertBefore, Addr, TypeSize, IsWrite,
|
||||
nullptr, UseCalls, Exp);
|
||||
Pass->instrumentUnusualSizeOrAlignment(I, InsertBefore, Addr, TypeSize,
|
||||
|
@ -1502,7 +1500,7 @@ static void doInstrumentAddress(AddressSanitizer *Pass, Instruction *I,
|
|||
static void instrumentMaskedLoadOrStore(AddressSanitizer *Pass,
|
||||
const DataLayout &DL, Type *IntptrTy,
|
||||
Value *Mask, Instruction *I,
|
||||
Value *Addr, unsigned Alignment,
|
||||
Value *Addr, MaybeAlign Alignment,
|
||||
unsigned Granularity, uint32_t TypeSize,
|
||||
bool IsWrite, Value *SizeArgument,
|
||||
bool UseCalls, uint32_t Exp) {
|
||||
|
|
|
@ -539,30 +539,29 @@ void HWAddressSanitizer::getInterestingMemoryOperands(
|
|||
if (!ClInstrumentReads || ignoreAccess(LI->getPointerOperand()))
|
||||
return;
|
||||
Interesting.emplace_back(I, LI->getPointerOperandIndex(), false,
|
||||
LI->getType(), LI->getAlignment());
|
||||
LI->getType(), LI->getAlign());
|
||||
} else if (StoreInst *SI = dyn_cast<StoreInst>(I)) {
|
||||
if (!ClInstrumentWrites || ignoreAccess(SI->getPointerOperand()))
|
||||
return;
|
||||
Interesting.emplace_back(I, SI->getPointerOperandIndex(), true,
|
||||
SI->getValueOperand()->getType(),
|
||||
SI->getAlignment());
|
||||
SI->getValueOperand()->getType(), SI->getAlign());
|
||||
} else if (AtomicRMWInst *RMW = dyn_cast<AtomicRMWInst>(I)) {
|
||||
if (!ClInstrumentAtomics || ignoreAccess(RMW->getPointerOperand()))
|
||||
return;
|
||||
Interesting.emplace_back(I, RMW->getPointerOperandIndex(), true,
|
||||
RMW->getValOperand()->getType(), 0);
|
||||
RMW->getValOperand()->getType(), None);
|
||||
} else if (AtomicCmpXchgInst *XCHG = dyn_cast<AtomicCmpXchgInst>(I)) {
|
||||
if (!ClInstrumentAtomics || ignoreAccess(XCHG->getPointerOperand()))
|
||||
return;
|
||||
Interesting.emplace_back(I, XCHG->getPointerOperandIndex(), true,
|
||||
XCHG->getCompareOperand()->getType(), 0);
|
||||
XCHG->getCompareOperand()->getType(), None);
|
||||
} else if (auto CI = dyn_cast<CallInst>(I)) {
|
||||
for (unsigned ArgNo = 0; ArgNo < CI->getNumArgOperands(); ArgNo++) {
|
||||
if (!ClInstrumentByval || !CI->isByValArgument(ArgNo) ||
|
||||
ignoreAccess(CI->getArgOperand(ArgNo)))
|
||||
continue;
|
||||
Type *Ty = CI->getParamByValType(ArgNo);
|
||||
Interesting.emplace_back(I, ArgNo, false, Ty, 1);
|
||||
Interesting.emplace_back(I, ArgNo, false, Ty, Align(1));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -733,8 +732,8 @@ bool HWAddressSanitizer::instrumentMemAccess(InterestingMemoryOperand &O) {
|
|||
IRBuilder<> IRB(O.getInsn());
|
||||
if (isPowerOf2_64(O.TypeSize) &&
|
||||
(O.TypeSize / 8 <= (1UL << (kNumberOfAccessSizes - 1))) &&
|
||||
(O.Alignment >= (1UL << Mapping.Scale) || O.Alignment == 0 ||
|
||||
O.Alignment >= O.TypeSize / 8)) {
|
||||
(!O.Alignment || *O.Alignment >= (1UL << Mapping.Scale) ||
|
||||
*O.Alignment >= O.TypeSize / 8)) {
|
||||
size_t AccessSizeIndex = TypeSizeToSizeIndex(O.TypeSize);
|
||||
if (ClInstrumentWithCalls) {
|
||||
IRB.CreateCall(HwasanMemoryAccessCallback[O.IsWrite][AccessSizeIndex],
|
||||
|
|
|
@ -2956,7 +2956,7 @@ private:
|
|||
unsigned OffsetWidth = DL.getIndexSizeInBits(OtherAS);
|
||||
APInt OtherOffset(OffsetWidth, NewBeginOffset - BeginOffset);
|
||||
Align OtherAlign =
|
||||
assumeAligned(IsDest ? II.getSourceAlignment() : II.getDestAlignment());
|
||||
(IsDest ? II.getSourceAlign() : II.getDestAlign()).valueOrOne();
|
||||
OtherAlign =
|
||||
commonAlignment(OtherAlign, OtherOffset.zextOrTrunc(64).getZExtValue());
|
||||
|
||||
|
|
Loading…
Reference in New Issue