[Alignment][NFC] Migrate AArch64, ARM, Hexagon, MSP and NVPTX backends to Align

This patch is part of a series to introduce an Alignment type.
See this thread for context: http://lists.llvm.org/pipermail/llvm-dev/2019-July/133851.html
See this patch for the introduction of the type: https://reviews.llvm.org/D64790

Differential Revision: https://reviews.llvm.org/D82749
This commit is contained in:
Guillaume Chatelet 2020-06-30 07:49:21 +00:00
parent ed4328c607
commit 4f5133a4dc
8 changed files with 66 additions and 66 deletions

View File

@ -2519,8 +2519,8 @@ void AArch64FrameLowering::determineCalleeSaves(MachineFunction &MF,
const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo();
const TargetRegisterClass &RC = AArch64::GPR64RegClass;
unsigned Size = TRI->getSpillSize(RC);
unsigned Align = TRI->getSpillAlignment(RC);
int FI = MFI.CreateStackObject(Size, Align, false);
Align Alignment = TRI->getSpillAlign(RC);
int FI = MFI.CreateStackObject(Size, Alignment, false);
RS->addScavengingFrameIndex(FI);
LLVM_DEBUG(dbgs() << "No available CS registers, allocated fi#" << FI
<< " as the emergency spill slot.\n");

View File

@ -3318,9 +3318,9 @@ SDValue AArch64TargetLowering::LowerSTORE(SDValue Op,
return LowerFixedLengthVectorStoreToSVE(Op, DAG);
unsigned AS = StoreNode->getAddressSpace();
unsigned Align = StoreNode->getAlignment();
if (Align < MemVT.getStoreSize() &&
!allowsMisalignedMemoryAccesses(MemVT, AS, Align,
Align Alignment = StoreNode->getAlign();
if (Alignment < MemVT.getStoreSize() &&
!allowsMisalignedMemoryAccesses(MemVT, AS, Alignment.value(),
StoreNode->getMemOperand()->getFlags(),
nullptr)) {
return scalarizeVectorStore(StoreNode, DAG);
@ -4427,9 +4427,9 @@ AArch64TargetLowering::LowerCall(CallLoweringInfo &CLI,
"Only scalable vectors can be passed indirectly");
MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo();
Type *Ty = EVT(VA.getValVT()).getTypeForEVT(*DAG.getContext());
unsigned Align = DAG.getDataLayout().getPrefTypeAlignment(Ty);
Align Alignment = DAG.getDataLayout().getPrefTypeAlign(Ty);
int FI = MFI.CreateStackObject(
VA.getValVT().getStoreSize().getKnownMinSize(), Align, false);
VA.getValVT().getStoreSize().getKnownMinSize(), Alignment, false);
MFI.setStackID(FI, TargetStackID::SVEVector);
SDValue SpillSlot = DAG.getFrameIndex(
@ -6095,7 +6095,7 @@ SDValue AArch64TargetLowering::LowerVAARG(SDValue Op, SelectionDAG &DAG) const {
SDLoc DL(Op);
SDValue Chain = Op.getOperand(0);
SDValue Addr = Op.getOperand(1);
unsigned Align = Op.getConstantOperandVal(3);
MaybeAlign Align(Op.getConstantOperandVal(3));
unsigned MinSlotSize = Subtarget->isTargetILP32() ? 4 : 8;
auto PtrVT = getPointerTy(DAG.getDataLayout());
auto PtrMemVT = getPointerMemTy(DAG.getDataLayout());
@ -6104,12 +6104,11 @@ SDValue AArch64TargetLowering::LowerVAARG(SDValue Op, SelectionDAG &DAG) const {
Chain = VAList.getValue(1);
VAList = DAG.getZExtOrTrunc(VAList, DL, PtrVT);
if (Align > MinSlotSize) {
assert(((Align & (Align - 1)) == 0) && "Expected Align to be a power of 2");
if (Align && *Align > MinSlotSize) {
VAList = DAG.getNode(ISD::ADD, DL, PtrVT, VAList,
DAG.getConstant(Align - 1, DL, PtrVT));
DAG.getConstant(Align->value() - 1, DL, PtrVT));
VAList = DAG.getNode(ISD::AND, DL, PtrVT, VAList,
DAG.getConstant(-(int64_t)Align, DL, PtrVT));
DAG.getConstant(-(int64_t)Align->value(), DL, PtrVT));
}
Type *ArgTy = VT.getTypeForEVT(*DAG.getContext());
@ -9110,7 +9109,7 @@ AArch64TargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op,
SDNode *Node = Op.getNode();
SDValue Chain = Op.getOperand(0);
SDValue Size = Op.getOperand(1);
unsigned Align = cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue();
MaybeAlign Align(cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue());
EVT VT = Node->getValueType(0);
if (DAG.getMachineFunction().getFunction().hasFnAttribute(
@ -9120,7 +9119,7 @@ AArch64TargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op,
SP = DAG.getNode(ISD::SUB, dl, MVT::i64, SP, Size);
if (Align)
SP = DAG.getNode(ISD::AND, dl, VT, SP.getValue(0),
DAG.getConstant(-(uint64_t)Align, dl, VT));
DAG.getConstant(-(uint64_t)Align->value(), dl, VT));
Chain = DAG.getCopyToReg(Chain, dl, AArch64::SP, SP);
SDValue Ops[2] = {SP, Chain};
return DAG.getMergeValues(Ops, dl);
@ -9135,7 +9134,7 @@ AArch64TargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op,
SP = DAG.getNode(ISD::SUB, dl, MVT::i64, SP, Size);
if (Align)
SP = DAG.getNode(ISD::AND, dl, VT, SP.getValue(0),
DAG.getConstant(-(uint64_t)Align, dl, VT));
DAG.getConstant(-(uint64_t)Align->value(), dl, VT));
Chain = DAG.getCopyToReg(Chain, dl, AArch64::SP, SP);
Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(0, dl, true),

View File

@ -2144,8 +2144,9 @@ void ARMFrameLowering::determineCalleeSaves(MachineFunction &MF,
LLVM_DEBUG(dbgs() << "Reserving emergency spill slot\n");
const TargetRegisterClass &RC = ARM::GPRRegClass;
unsigned Size = TRI->getSpillSize(RC);
unsigned Align = TRI->getSpillAlignment(RC);
RS->addScavengingFrameIndex(MFI.CreateStackObject(Size, Align, false));
Align Alignment = TRI->getSpillAlign(RC);
RS->addScavengingFrameIndex(
MFI.CreateStackObject(Size, Alignment, false));
}
}
}

View File

@ -1726,7 +1726,7 @@ bool ARMDAGToDAGISel::tryMVEIndexedLoad(SDNode *N) {
EVT LoadedVT;
unsigned Opcode = 0;
bool isSExtLd, isPre;
unsigned Align;
Align Alignment;
ARMVCC::VPTCodes Pred;
SDValue PredReg;
SDValue Chain, Base, Offset;
@ -1742,7 +1742,7 @@ bool ARMDAGToDAGISel::tryMVEIndexedLoad(SDNode *N) {
Chain = LD->getChain();
Base = LD->getBasePtr();
Offset = LD->getOffset();
Align = LD->getAlignment();
Alignment = LD->getAlign();
isSExtLd = LD->getExtensionType() == ISD::SEXTLOAD;
isPre = (AM == ISD::PRE_INC) || (AM == ISD::PRE_DEC);
Pred = ARMVCC::None;
@ -1758,7 +1758,7 @@ bool ARMDAGToDAGISel::tryMVEIndexedLoad(SDNode *N) {
Chain = LD->getChain();
Base = LD->getBasePtr();
Offset = LD->getOffset();
Align = LD->getAlignment();
Alignment = LD->getAlign();
isSExtLd = LD->getExtensionType() == ISD::SEXTLOAD;
isPre = (AM == ISD::PRE_INC) || (AM == ISD::PRE_DEC);
Pred = ARMVCC::Then;
@ -1772,7 +1772,7 @@ bool ARMDAGToDAGISel::tryMVEIndexedLoad(SDNode *N) {
bool CanChangeType = Subtarget->isLittle() && !isa<MaskedLoadSDNode>(N);
SDValue NewOffset;
if (Align >= 2 && LoadedVT == MVT::v4i16 &&
if (Alignment >= Align(2) && LoadedVT == MVT::v4i16 &&
SelectT2AddrModeImm7Offset(N, Offset, NewOffset, 1)) {
if (isSExtLd)
Opcode = isPre ? ARM::MVE_VLDRHS32_pre : ARM::MVE_VLDRHS32_post;
@ -1790,12 +1790,12 @@ bool ARMDAGToDAGISel::tryMVEIndexedLoad(SDNode *N) {
Opcode = isPre ? ARM::MVE_VLDRBS32_pre : ARM::MVE_VLDRBS32_post;
else
Opcode = isPre ? ARM::MVE_VLDRBU32_pre : ARM::MVE_VLDRBU32_post;
} else if (Align >= 4 &&
} else if (Alignment >= Align(4) &&
(CanChangeType || LoadedVT == MVT::v4i32 ||
LoadedVT == MVT::v4f32) &&
SelectT2AddrModeImm7Offset(N, Offset, NewOffset, 2))
Opcode = isPre ? ARM::MVE_VLDRWU32_pre : ARM::MVE_VLDRWU32_post;
else if (Align >= 2 &&
else if (Alignment >= Align(2) &&
(CanChangeType || LoadedVT == MVT::v8i16 ||
LoadedVT == MVT::v8f16) &&
SelectT2AddrModeImm7Offset(N, Offset, NewOffset, 1))

View File

@ -16827,7 +16827,7 @@ static bool getT2IndexedAddressParts(SDNode *Ptr, EVT VT,
return false;
}
static bool getMVEIndexedAddressParts(SDNode *Ptr, EVT VT, unsigned Align,
static bool getMVEIndexedAddressParts(SDNode *Ptr, EVT VT, Align Alignment,
bool isSEXTLoad, bool IsMasked, bool isLE,
SDValue &Base, SDValue &Offset,
bool &isInc, SelectionDAG &DAG) {
@ -16862,16 +16862,16 @@ static bool getMVEIndexedAddressParts(SDNode *Ptr, EVT VT, unsigned Align,
// (in BE/masked) type.
Base = Ptr->getOperand(0);
if (VT == MVT::v4i16) {
if (Align >= 2 && IsInRange(RHSC, 0x80, 2))
if (Alignment >= 2 && IsInRange(RHSC, 0x80, 2))
return true;
} else if (VT == MVT::v4i8 || VT == MVT::v8i8) {
if (IsInRange(RHSC, 0x80, 1))
return true;
} else if (Align >= 4 &&
} else if (Alignment >= 4 &&
(CanChangeType || VT == MVT::v4i32 || VT == MVT::v4f32) &&
IsInRange(RHSC, 0x80, 4))
return true;
else if (Align >= 2 &&
else if (Alignment >= 2 &&
(CanChangeType || VT == MVT::v8i16 || VT == MVT::v8f16) &&
IsInRange(RHSC, 0x80, 2))
return true;
@ -16893,28 +16893,28 @@ ARMTargetLowering::getPreIndexedAddressParts(SDNode *N, SDValue &Base,
EVT VT;
SDValue Ptr;
unsigned Align;
Align Alignment;
bool isSEXTLoad = false;
bool IsMasked = false;
if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) {
Ptr = LD->getBasePtr();
VT = LD->getMemoryVT();
Align = LD->getAlignment();
Alignment = LD->getAlign();
isSEXTLoad = LD->getExtensionType() == ISD::SEXTLOAD;
} else if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) {
Ptr = ST->getBasePtr();
VT = ST->getMemoryVT();
Align = ST->getAlignment();
Alignment = ST->getAlign();
} else if (MaskedLoadSDNode *LD = dyn_cast<MaskedLoadSDNode>(N)) {
Ptr = LD->getBasePtr();
VT = LD->getMemoryVT();
Align = LD->getAlignment();
Alignment = LD->getAlign();
isSEXTLoad = LD->getExtensionType() == ISD::SEXTLOAD;
IsMasked = true;
} else if (MaskedStoreSDNode *ST = dyn_cast<MaskedStoreSDNode>(N)) {
Ptr = ST->getBasePtr();
VT = ST->getMemoryVT();
Align = ST->getAlignment();
Alignment = ST->getAlign();
IsMasked = true;
} else
return false;
@ -16923,9 +16923,9 @@ ARMTargetLowering::getPreIndexedAddressParts(SDNode *N, SDValue &Base,
bool isLegal = false;
if (VT.isVector())
isLegal = Subtarget->hasMVEIntegerOps() &&
getMVEIndexedAddressParts(Ptr.getNode(), VT, Align, isSEXTLoad,
IsMasked, Subtarget->isLittle(), Base,
Offset, isInc, DAG);
getMVEIndexedAddressParts(
Ptr.getNode(), VT, Alignment, isSEXTLoad, IsMasked,
Subtarget->isLittle(), Base, Offset, isInc, DAG);
else {
if (Subtarget->isThumb2())
isLegal = getT2IndexedAddressParts(Ptr.getNode(), VT, isSEXTLoad, Base,
@ -16951,31 +16951,31 @@ bool ARMTargetLowering::getPostIndexedAddressParts(SDNode *N, SDNode *Op,
SelectionDAG &DAG) const {
EVT VT;
SDValue Ptr;
unsigned Align;
Align Alignment;
bool isSEXTLoad = false, isNonExt;
bool IsMasked = false;
if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) {
VT = LD->getMemoryVT();
Ptr = LD->getBasePtr();
Align = LD->getAlignment();
Alignment = LD->getAlign();
isSEXTLoad = LD->getExtensionType() == ISD::SEXTLOAD;
isNonExt = LD->getExtensionType() == ISD::NON_EXTLOAD;
} else if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) {
VT = ST->getMemoryVT();
Ptr = ST->getBasePtr();
Align = ST->getAlignment();
Alignment = ST->getAlign();
isNonExt = !ST->isTruncatingStore();
} else if (MaskedLoadSDNode *LD = dyn_cast<MaskedLoadSDNode>(N)) {
VT = LD->getMemoryVT();
Ptr = LD->getBasePtr();
Align = LD->getAlignment();
Alignment = LD->getAlign();
isSEXTLoad = LD->getExtensionType() == ISD::SEXTLOAD;
isNonExt = LD->getExtensionType() == ISD::NON_EXTLOAD;
IsMasked = true;
} else if (MaskedStoreSDNode *ST = dyn_cast<MaskedStoreSDNode>(N)) {
VT = ST->getMemoryVT();
Ptr = ST->getBasePtr();
Align = ST->getAlignment();
Alignment = ST->getAlign();
isNonExt = !ST->isTruncatingStore();
IsMasked = true;
} else
@ -17001,7 +17001,7 @@ bool ARMTargetLowering::getPostIndexedAddressParts(SDNode *N, SDNode *Op,
bool isLegal = false;
if (VT.isVector())
isLegal = Subtarget->hasMVEIntegerOps() &&
getMVEIndexedAddressParts(Op, VT, Align, isSEXTLoad, IsMasked,
getMVEIndexedAddressParts(Op, VT, Alignment, isSEXTLoad, IsMasked,
Subtarget->isLittle(), Base, Offset,
isInc, DAG);
else {
@ -17758,13 +17758,14 @@ ARMTargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG) const
if (DAG.getMachineFunction().getFunction().hasFnAttribute(
"no-stack-arg-probe")) {
unsigned Align = cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue();
MaybeAlign Align(cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue());
SDValue SP = DAG.getCopyFromReg(Chain, DL, ARM::SP, MVT::i32);
Chain = SP.getValue(1);
SP = DAG.getNode(ISD::SUB, DL, MVT::i32, SP, Size);
if (Align)
SP = DAG.getNode(ISD::AND, DL, MVT::i32, SP.getValue(0),
DAG.getConstant(-(uint64_t)Align, DL, MVT::i32));
SP =
DAG.getNode(ISD::AND, DL, MVT::i32, SP.getValue(0),
DAG.getConstant(-(uint64_t)Align->value(), DL, MVT::i32));
Chain = DAG.getCopyToReg(Chain, DL, ARM::SP, SP);
SDValue Ops[2] = { SP, Chain };
return DAG.getMergeValues(Ops, DL);

View File

@ -107,7 +107,7 @@ bool HexagonVExtract::runOnMachineFunction(MachineFunction &MF) {
Register AR =
MF.getInfo<HexagonMachineFunctionInfo>()->getStackAlignBaseVReg();
std::map<unsigned, SmallVector<MachineInstr*,4>> VExtractMap;
unsigned MaxAlign = 0;
MaybeAlign MaxAlign;
bool Changed = false;
for (MachineBasicBlock &MBB : MF) {
@ -137,14 +137,14 @@ bool HexagonVExtract::runOnMachineFunction(MachineFunction &MF) {
continue;
const auto &VecRC = *MRI.getRegClass(VecR);
unsigned Align = HRI.getSpillAlignment(VecRC);
MaxAlign = std::max(MaxAlign, Align);
Align Alignment = HRI.getSpillAlign(VecRC);
MaxAlign = max(MaxAlign, Alignment);
// Make sure this is not a spill slot: spill slots cannot be aligned
// if there are variable-sized objects on the stack. They must be
// accessible via FP (which is not aligned), because SP is unknown,
// and AP may not be available at the location of the load/store.
int FI = MFI.CreateStackObject(HRI.getSpillSize(VecRC), Align,
/*isSpillSlot*/false);
int FI = MFI.CreateStackObject(HRI.getSpillSize(VecRC), Alignment,
/*isSpillSlot*/ false);
MachineInstr *DefI = MRI.getVRegDef(VecR);
MachineBasicBlock::iterator At = std::next(DefI->getIterator());
@ -178,13 +178,13 @@ bool HexagonVExtract::runOnMachineFunction(MachineFunction &MF) {
}
}
if (AR) {
if (AR && MaxAlign) {
// Update the required stack alignment.
MachineInstr *AlignaI = MRI.getVRegDef(AR);
assert(AlignaI->getOpcode() == Hexagon::PS_aligna);
MachineOperand &Op = AlignaI->getOperand(1);
if (MaxAlign > Op.getImm())
Op.setImm(MaxAlign);
if (*MaxAlign > Op.getImm())
Op.setImm(MaxAlign->value());
}
return Changed;

View File

@ -50,7 +50,7 @@ namespace {
const BlockAddress *BlockAddr = nullptr;
const char *ES = nullptr;
int JT = -1;
unsigned Align = 0; // CP alignment.
Align Alignment; // CP alignment.
MSP430ISelAddressMode() = default;
@ -74,12 +74,12 @@ namespace {
} else if (CP) {
errs() << " CP ";
CP->dump();
errs() << " Align" << Align << '\n';
errs() << " Align" << Alignment.value() << '\n';
} else if (ES) {
errs() << "ES ";
errs() << ES << '\n';
} else if (JT != -1)
errs() << " JT" << JT << " Align" << Align << '\n';
errs() << " JT" << JT << " Align" << Alignment.value() << '\n';
}
#endif
};
@ -146,7 +146,7 @@ bool MSP430DAGToDAGISel::MatchWrapper(SDValue N, MSP430ISelAddressMode &AM) {
//AM.SymbolFlags = G->getTargetFlags();
} else if (ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(N0)) {
AM.CP = CP->getConstVal();
AM.Align = CP->getAlign().value();
AM.Alignment = CP->getAlign();
AM.Disp += CP->getOffset();
//AM.SymbolFlags = CP->getTargetFlags();
} else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(N0)) {
@ -263,8 +263,8 @@ bool MSP430DAGToDAGISel::SelectAddr(SDValue N,
MVT::i16, AM.Disp,
0/*AM.SymbolFlags*/);
else if (AM.CP)
Disp = CurDAG->getTargetConstantPool(AM.CP, MVT::i16, Align(AM.Align),
AM.Disp, 0 /*AM.SymbolFlags*/);
Disp = CurDAG->getTargetConstantPool(AM.CP, MVT::i16, AM.Alignment, AM.Disp,
0 /*AM.SymbolFlags*/);
else if (AM.ES)
Disp = CurDAG->getTargetExternalSymbol(AM.ES, MVT::i16, 0/*AM.SymbolFlags*/);
else if (AM.JT != -1)

View File

@ -2302,10 +2302,10 @@ NVPTXTargetLowering::LowerSTOREVector(SDValue Op, SelectionDAG &DAG) const {
MemSDNode *MemSD = cast<MemSDNode>(N);
const DataLayout &TD = DAG.getDataLayout();
unsigned Align = MemSD->getAlignment();
unsigned PrefAlign =
TD.getPrefTypeAlignment(ValVT.getTypeForEVT(*DAG.getContext()));
if (Align < PrefAlign) {
Align Alignment = MemSD->getAlign();
Align PrefAlign =
TD.getPrefTypeAlign(ValVT.getTypeForEVT(*DAG.getContext()));
if (Alignment < PrefAlign) {
// This store is not sufficiently aligned, so bail out and let this vector
// store be scalarized. Note that we may still be able to emit smaller
// vector stores. For example, if we are storing a <4 x float> with an
@ -4791,11 +4791,10 @@ static void ReplaceLoadVector(SDNode *N, SelectionDAG &DAG,
LoadSDNode *LD = cast<LoadSDNode>(N);
unsigned Align = LD->getAlignment();
Align Alignment = LD->getAlign();
auto &TD = DAG.getDataLayout();
unsigned PrefAlign =
TD.getPrefTypeAlignment(ResVT.getTypeForEVT(*DAG.getContext()));
if (Align < PrefAlign) {
Align PrefAlign = TD.getPrefTypeAlign(ResVT.getTypeForEVT(*DAG.getContext()));
if (Alignment < PrefAlign) {
// This load is not sufficiently aligned, so bail out and let this vector
// load be scalarized. Note that we may still be able to emit smaller
// vector loads. For example, if we are loading a <4 x float> with an