forked from OSchip/llvm-project
ARM64: remove holes from *all* HFAs on the stack.
My first attempt to make sure HFAs were contiguous was in the block dealing with padding registers, which meant it only triggered on the first stack-based HFA. This should extend it to the rest as well. Another part of PR19432. llvm-svn: 206456
This commit is contained in:
parent
7e107dabd6
commit
5ffc092700
|
@ -151,6 +151,10 @@ public:
|
|||
return PaddingType;
|
||||
}
|
||||
|
||||
void setPaddingType(llvm::Type *T) {
|
||||
PaddingType = T;
|
||||
}
|
||||
|
||||
bool getPaddingInReg() const {
|
||||
return PaddingInReg;
|
||||
}
|
||||
|
|
|
@ -3183,26 +3183,28 @@ private:
|
|||
const unsigned NumGPRs = 8;
|
||||
it->info = classifyArgumentType(it->type, AllocatedVFP, IsHA,
|
||||
AllocatedGPR, IsSmallAggr);
|
||||
|
||||
// Under AAPCS the 64-bit stack slot alignment means we can't pass HAs
|
||||
// as sequences of floats since they'll get "holes" inserted as
|
||||
// padding by the back end.
|
||||
if (IsHA && AllocatedVFP > NumVFPs && !isDarwinPCS()) {
|
||||
uint32_t NumStackSlots = getContext().getTypeSize(it->type);
|
||||
NumStackSlots = llvm::RoundUpToAlignment(NumStackSlots, 64) / 64;
|
||||
|
||||
llvm::Type *CoerceTy = llvm::ArrayType::get(
|
||||
llvm::Type::getDoubleTy(getVMContext()), NumStackSlots);
|
||||
it->info = ABIArgInfo::getDirect(CoerceTy);
|
||||
}
|
||||
|
||||
// If we do not have enough VFP registers for the HA, any VFP registers
|
||||
// that are unallocated are marked as unavailable. To achieve this, we add
|
||||
// padding of (NumVFPs - PreAllocation) floats.
|
||||
if (IsHA && AllocatedVFP > NumVFPs && PreAllocation < NumVFPs) {
|
||||
llvm::Type *PaddingTy = llvm::ArrayType::get(
|
||||
llvm::Type::getFloatTy(getVMContext()), NumVFPs - PreAllocation);
|
||||
if (isDarwinPCS())
|
||||
it->info = ABIArgInfo::getExpandWithPadding(false, PaddingTy);
|
||||
else {
|
||||
// Under AAPCS the 64-bit stack slot alignment means we can't pass HAs
|
||||
// as sequences of floats since they'll get "holes" inserted as
|
||||
// padding by the back end.
|
||||
uint32_t NumStackSlots = getContext().getTypeSize(it->type);
|
||||
NumStackSlots = llvm::RoundUpToAlignment(NumStackSlots, 64) / 64;
|
||||
|
||||
llvm::Type *CoerceTy = llvm::ArrayType::get(
|
||||
llvm::Type::getDoubleTy(getVMContext()), NumStackSlots);
|
||||
it->info = ABIArgInfo::getDirect(CoerceTy, 0, PaddingTy);
|
||||
}
|
||||
it->info.setPaddingType(PaddingTy);
|
||||
}
|
||||
|
||||
// If we do not have enough GPRs for the small aggregate, any GPR regs
|
||||
// that are unallocated are marked as unavailable.
|
||||
if (IsSmallAggr && AllocatedGPR > NumGPRs && PreGPR < NumGPRs) {
|
||||
|
|
|
@ -12,3 +12,12 @@ void test1(int x0, __int128 x2_x3, __int128 x4_x5, __int128 x6_x7, Small sp) {
|
|||
// CHECK: void @test2(i32 %x0, i128 %x2_x3.coerce, i32 %x4, i128 %x6_x7.coerce, i32 %sp, i128 %sp16.coerce)
|
||||
void test2(int x0, Small x2_x3, int x4, Small x6_x7, int sp, Small sp16) {
|
||||
}
|
||||
|
||||
// We coerce HFAs into a contiguous [N x double] type if they're going on the
|
||||
// stack in order to avoid holes. Make sure we get all of them, and not just the
|
||||
// first:
|
||||
|
||||
// CHECK: void @test3(float %s0_s3.0, float %s0_s3.1, float %s0_s3.2, float %s0_s3.3, float %s4, [3 x float], [2 x double] %sp.coerce, [2 x double] %sp16.coerce)
|
||||
typedef struct { float arr[4]; } HFA;
|
||||
void test3(HFA s0_s3, float s4, HFA sp, HFA sp16) {
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue