[Alignment][NFC] Migrate part of Arm/AArch64 backend

Summary: Follow up on D81196

Reviewers: courbet

Subscribers: kristof.beyls, hiraditya, danielkiss, llvm-commits

Tags: #llvm

Differential Revision: https://reviews.llvm.org/D81274
This commit is contained in:
Guillaume Chatelet 2020-06-05 16:04:42 +00:00
parent 7432fb2c78
commit be4f5061ea
2 changed files with 20 additions and 23 deletions

View File

@ -38,18 +38,17 @@ static const MCPhysReg QRegList[] = {AArch64::Q0, AArch64::Q1, AArch64::Q2,
static bool finishStackBlock(SmallVectorImpl<CCValAssign> &PendingMembers,
MVT LocVT, ISD::ArgFlagsTy &ArgFlags,
CCState &State, unsigned SlotAlign) {
CCState &State, Align SlotAlign) {
unsigned Size = LocVT.getSizeInBits() / 8;
const Align StackAlign =
State.getMachineFunction().getDataLayout().getStackAlignment();
const Align OrigAlign(ArgFlags.getOrigAlign());
const Align Align = std::min(OrigAlign, StackAlign);
const Align Alignment = std::min(OrigAlign, StackAlign);
for (auto &It : PendingMembers) {
It.convertToMem(State.AllocateStack(
Size, std::max((unsigned)Align.value(), SlotAlign)));
It.convertToMem(State.AllocateStack(Size, std::max(Alignment, SlotAlign)));
State.addLoc(It);
SlotAlign = 1;
SlotAlign = Align(1);
}
// All pending members have now been allocated
@ -72,7 +71,7 @@ static bool CC_AArch64_Custom_Stack_Block(
if (!ArgFlags.isInConsecutiveRegsLast())
return true;
return finishStackBlock(PendingMembers, LocVT, ArgFlags, State, 8);
return finishStackBlock(PendingMembers, LocVT, ArgFlags, State, Align(8));
}
/// Given an [N x Ty] block, it should be passed in a consecutive sequence of
@ -146,7 +145,7 @@ static bool CC_AArch64_Custom_Block(unsigned &ValNo, MVT &ValVT, MVT &LocVT,
for (auto Reg : RegList)
State.AllocateReg(Reg);
unsigned SlotAlign = Subtarget.isTargetDarwin() ? 1 : 8;
const Align SlotAlign = Subtarget.isTargetDarwin() ? Align(1) : Align(8);
return finishStackBlock(PendingMembers, LocVT, ArgFlags, State, SlotAlign);
}

View File

@ -32,9 +32,8 @@ static bool f64AssignAPCS(unsigned ValNo, MVT ValVT, MVT LocVT,
return false;
// Put the whole thing on the stack.
State.addLoc(CCValAssign::getCustomMem(ValNo, ValVT,
State.AllocateStack(8, 4),
LocVT, LocInfo));
State.addLoc(CCValAssign::getCustomMem(
ValNo, ValVT, State.AllocateStack(8, Align(4)), LocVT, LocInfo));
return true;
}
@ -42,9 +41,8 @@ static bool f64AssignAPCS(unsigned ValNo, MVT ValVT, MVT LocVT,
if (unsigned Reg = State.AllocateReg(RegList))
State.addLoc(CCValAssign::getCustomReg(ValNo, ValVT, Reg, LocVT, LocInfo));
else
State.addLoc(CCValAssign::getCustomMem(ValNo, ValVT,
State.AllocateStack(4, 4),
LocVT, LocInfo));
State.addLoc(CCValAssign::getCustomMem(
ValNo, ValVT, State.AllocateStack(4, Align(4)), LocVT, LocInfo));
return true;
}
@ -81,9 +79,8 @@ static bool f64AssignAAPCS(unsigned ValNo, MVT ValVT, MVT LocVT,
return false;
// Put the whole thing on the stack.
State.addLoc(CCValAssign::getCustomMem(ValNo, ValVT,
State.AllocateStack(8, 8),
LocVT, LocInfo));
State.addLoc(CCValAssign::getCustomMem(
ValNo, ValVT, State.AllocateStack(8, Align(8)), LocVT, LocInfo));
return true;
}
@ -193,8 +190,9 @@ static bool CC_ARM_AAPCS_Custom_Aggregate(unsigned ValNo, MVT ValVT,
// Try to allocate a contiguous block of registers, each of the correct
// size to hold one member.
auto &DL = State.getMachineFunction().getDataLayout();
unsigned StackAlign = DL.getStackAlignment().value();
unsigned Align = std::min(PendingMembers[0].getExtraInfo(), StackAlign);
const Align StackAlign = DL.getStackAlignment();
const Align FirstMemberAlign(PendingMembers[0].getExtraInfo());
Align Alignment = std::min(FirstMemberAlign, StackAlign);
ArrayRef<MCPhysReg> RegList;
switch (LocVT.SimpleTy) {
@ -204,7 +202,7 @@ static bool CC_ARM_AAPCS_Custom_Aggregate(unsigned ValNo, MVT ValVT,
// First consume all registers that would give an unaligned object. Whether
// we go on stack or in regs, no-one will be using them in future.
unsigned RegAlign = alignTo(Align, 4) / 4;
unsigned RegAlign = alignTo(Alignment.value(), 4) / 4;
while (RegIdx % RegAlign != 0 && RegIdx < RegList.size())
State.AllocateReg(RegList[RegIdx++]);
@ -247,7 +245,7 @@ static bool CC_ARM_AAPCS_Custom_Aggregate(unsigned ValNo, MVT ValVT,
unsigned RegIdx = State.getFirstUnallocated(RegList);
for (auto &It : PendingMembers) {
if (RegIdx >= RegList.size())
It.convertToMem(State.AllocateStack(Size, Size));
It.convertToMem(State.AllocateStack(Size, Align(Size)));
else
It.convertToReg(State.AllocateReg(RegList[RegIdx++]));
@ -265,12 +263,12 @@ static bool CC_ARM_AAPCS_Custom_Aggregate(unsigned ValNo, MVT ValVT,
// After the first item has been allocated, the rest are packed as tightly as
// possible. (E.g. an incoming i64 would have starting Align of 8, but we'll
// be allocating a bunch of i32 slots).
unsigned RestAlign = std::min(Align, Size);
const Align RestAlign = std::min(Alignment, Align(Size));
for (auto &It : PendingMembers) {
It.convertToMem(State.AllocateStack(Size, Align));
It.convertToMem(State.AllocateStack(Size, Alignment));
State.addLoc(It);
Align = RestAlign;
Alignment = RestAlign;
}
// All pending members have now been allocated