forked from OSchip/llvm-project
[AArch64][GlobalISel] Support the 'returned' parameter attribute.
On AArch64 (which seems to be the only target that supports it), this attribute allows codegen to avoid saving/restoring the value in x0 across a call. Gives a 0.1% geomean -Os code size improvement on CTMark. Differential Revision: https://reviews.llvm.org/D96099
This commit is contained in:
parent
3d471d7f06
commit
ec41ed5b1b
|
@ -266,13 +266,13 @@ protected:
|
|||
///
|
||||
/// \return True if everything has succeeded, false otherwise.
|
||||
bool handleAssignments(MachineIRBuilder &MIRBuilder,
|
||||
SmallVectorImpl<ArgInfo> &Args,
|
||||
ValueHandler &Handler) const;
|
||||
SmallVectorImpl<ArgInfo> &Args, ValueHandler &Handler,
|
||||
Register ThisReturnReg = Register()) const;
|
||||
bool handleAssignments(CCState &CCState,
|
||||
SmallVectorImpl<CCValAssign> &ArgLocs,
|
||||
MachineIRBuilder &MIRBuilder,
|
||||
SmallVectorImpl<ArgInfo> &Args,
|
||||
ValueHandler &Handler) const;
|
||||
SmallVectorImpl<ArgInfo> &Args, ValueHandler &Handler,
|
||||
Register ThisReturnReg = Register()) const;
|
||||
|
||||
/// Analyze passed or returned values from a call, supplied in \p ArgInfo,
|
||||
/// incorporating info about the passed values into \p CCState.
|
||||
|
@ -456,6 +456,10 @@ public:
|
|||
ArrayRef<Register> ResRegs,
|
||||
ArrayRef<ArrayRef<Register>> ArgRegs, Register SwiftErrorVReg,
|
||||
std::function<unsigned()> GetCalleeReg) const;
|
||||
|
||||
/// For targets which support the "returned" parameter attribute, returns
|
||||
/// true if the given type is a valid one to use with "returned".
|
||||
virtual bool isTypeIsValidForThisReturn(EVT Ty) const { return false; }
|
||||
};
|
||||
|
||||
} // end namespace llvm
|
||||
|
|
|
@ -119,7 +119,7 @@ namespace ISD {
|
|||
void setNest() { IsNest = 1; }
|
||||
|
||||
bool isReturned() const { return IsReturned; }
|
||||
void setReturned() { IsReturned = 1; }
|
||||
void setReturned(bool V = true) { IsReturned = V; }
|
||||
|
||||
bool isInConsecutiveRegs() const { return IsInConsecutiveRegs; }
|
||||
void setInConsecutiveRegs(bool Flag = true) { IsInConsecutiveRegs = Flag; }
|
||||
|
|
|
@ -170,6 +170,11 @@ void CallLowering::setArgFlags(CallLowering::ArgInfo &Arg, unsigned OpIdx,
|
|||
Flags.setByValAlign(FrameAlign);
|
||||
}
|
||||
Flags.setOrigAlign(DL.getABITypeAlign(Arg.Ty));
|
||||
|
||||
// Don't try to use the returned attribute if the argument is marked as
|
||||
// swiftself, since it won't be passed in x0.
|
||||
if (Flags.isSwiftSelf())
|
||||
Flags.setReturned(false);
|
||||
}
|
||||
|
||||
template void
|
||||
|
@ -225,19 +230,22 @@ void CallLowering::unpackRegs(ArrayRef<Register> DstRegs, Register SrcReg,
|
|||
|
||||
bool CallLowering::handleAssignments(MachineIRBuilder &MIRBuilder,
|
||||
SmallVectorImpl<ArgInfo> &Args,
|
||||
ValueHandler &Handler) const {
|
||||
ValueHandler &Handler,
|
||||
Register ThisReturnReg) const {
|
||||
MachineFunction &MF = MIRBuilder.getMF();
|
||||
const Function &F = MF.getFunction();
|
||||
SmallVector<CCValAssign, 16> ArgLocs;
|
||||
CCState CCInfo(F.getCallingConv(), F.isVarArg(), MF, ArgLocs, F.getContext());
|
||||
return handleAssignments(CCInfo, ArgLocs, MIRBuilder, Args, Handler);
|
||||
return handleAssignments(CCInfo, ArgLocs, MIRBuilder, Args, Handler,
|
||||
ThisReturnReg);
|
||||
}
|
||||
|
||||
bool CallLowering::handleAssignments(CCState &CCInfo,
|
||||
SmallVectorImpl<CCValAssign> &ArgLocs,
|
||||
MachineIRBuilder &MIRBuilder,
|
||||
SmallVectorImpl<ArgInfo> &Args,
|
||||
ValueHandler &Handler) const {
|
||||
ValueHandler &Handler,
|
||||
Register ThisReturnReg) const {
|
||||
MachineFunction &MF = MIRBuilder.getMF();
|
||||
const Function &F = MF.getFunction();
|
||||
const DataLayout &DL = F.getParent()->getDataLayout();
|
||||
|
@ -330,6 +338,15 @@ bool CallLowering::handleAssignments(CCState &CCInfo,
|
|||
if (PartIdx == NumParts - 1)
|
||||
Flags.setSplitEnd();
|
||||
}
|
||||
|
||||
// TODO: Also check if there is a valid extension that preserves the
|
||||
// bits. However currently this call lowering doesn't support non-exact
|
||||
// split parts, so that can't be tested.
|
||||
if (OrigFlags.isReturned() &&
|
||||
(NumParts * NewVT.getSizeInBits() != CurVT.getSizeInBits())) {
|
||||
Flags.setReturned(false);
|
||||
}
|
||||
|
||||
Args[i].Regs.push_back(Unmerge.getReg(PartIdx));
|
||||
Args[i].Flags.push_back(Flags);
|
||||
if (Handler.assignArg(i, NewVT, NewVT, CCValAssign::Full,
|
||||
|
@ -394,6 +411,13 @@ bool CallLowering::handleAssignments(CCState &CCInfo,
|
|||
|
||||
assert(VA.isRegLoc() && "custom loc should have been handled already");
|
||||
|
||||
if (i == 0 && ThisReturnReg.isValid() &&
|
||||
Handler.isIncomingArgumentHandler() &&
|
||||
isTypeIsValidForThisReturn(VAVT)) {
|
||||
Handler.assignValueToReg(Args[i].Regs[i], ThisReturnReg, VA);
|
||||
continue;
|
||||
}
|
||||
|
||||
// GlobalISel does not currently work for scalable vectors.
|
||||
if (OrigVT.getFixedSizeInBits() >= VAVT.getFixedSizeInBits() ||
|
||||
!Handler.isIncomingArgumentHandler()) {
|
||||
|
|
|
@ -152,6 +152,16 @@ struct CallReturnHandler : public IncomingArgHandler {
|
|||
MachineInstrBuilder MIB;
|
||||
};
|
||||
|
||||
/// A special return arg handler for "returned" attribute arg calls.
|
||||
struct ReturnedArgCallReturnHandler : public CallReturnHandler {
|
||||
ReturnedArgCallReturnHandler(MachineIRBuilder &MIRBuilder,
|
||||
MachineRegisterInfo &MRI,
|
||||
MachineInstrBuilder MIB, CCAssignFn *AssignFn)
|
||||
: CallReturnHandler(MIRBuilder, MRI, MIB, AssignFn) {}
|
||||
|
||||
void markPhysRegUsed(MCRegister PhysReg) override {}
|
||||
};
|
||||
|
||||
struct OutgoingArgHandler : public CallLowering::OutgoingValueHandler {
|
||||
OutgoingArgHandler(MachineIRBuilder &MIRBuilder, MachineRegisterInfo &MRI,
|
||||
MachineInstrBuilder MIB, CCAssignFn *AssignFn,
|
||||
|
@ -785,6 +795,24 @@ static unsigned getCallOpcode(const MachineFunction &CallerF, bool IsIndirect,
|
|||
return AArch64::TCRETURNri;
|
||||
}
|
||||
|
||||
static const uint32_t *
|
||||
getMaskForArgs(SmallVectorImpl<AArch64CallLowering::ArgInfo> &OutArgs,
|
||||
AArch64CallLowering::CallLoweringInfo &Info,
|
||||
const AArch64RegisterInfo &TRI, MachineFunction &MF) {
|
||||
const uint32_t *Mask;
|
||||
if (!OutArgs.empty() && OutArgs[0].Flags[0].isReturned()) {
|
||||
// For 'this' returns, use the X0-preserving mask if applicable
|
||||
Mask = TRI.getThisReturnPreservedMask(MF, Info.CallConv);
|
||||
if (!Mask) {
|
||||
OutArgs[0].Flags[0].setReturned(false);
|
||||
Mask = TRI.getCallPreservedMask(MF, Info.CallConv);
|
||||
}
|
||||
} else {
|
||||
Mask = TRI.getCallPreservedMask(MF, Info.CallConv);
|
||||
}
|
||||
return Mask;
|
||||
}
|
||||
|
||||
bool AArch64CallLowering::lowerTailCall(
|
||||
MachineIRBuilder &MIRBuilder, CallLoweringInfo &Info,
|
||||
SmallVectorImpl<ArgInfo> &OutArgs) const {
|
||||
|
@ -878,6 +906,8 @@ bool AArch64CallLowering::lowerTailCall(
|
|||
if (!handleAssignments(MIRBuilder, OutArgs, Handler))
|
||||
return false;
|
||||
|
||||
Mask = getMaskForArgs(OutArgs, Info, *TRI, MF);
|
||||
|
||||
if (Info.IsVarArg && Info.IsMustTailCall) {
|
||||
// Now we know what's being passed to the function. Add uses to the call for
|
||||
// the forwarded registers that we *aren't* passing as parameters. This will
|
||||
|
@ -979,14 +1009,8 @@ bool AArch64CallLowering::lowerCall(MachineIRBuilder &MIRBuilder,
|
|||
MIB.add(Info.Callee);
|
||||
|
||||
// Tell the call which registers are clobbered.
|
||||
auto TRI = MF.getSubtarget<AArch64Subtarget>().getRegisterInfo();
|
||||
const uint32_t *Mask = TRI->getCallPreservedMask(MF, Info.CallConv);
|
||||
if (MF.getSubtarget<AArch64Subtarget>().hasCustomCallingConv())
|
||||
TRI->UpdateCustomCallPreservedMask(MF, &Mask);
|
||||
MIB.addRegMask(Mask);
|
||||
|
||||
if (TRI->isAnyArgRegReserved(MF))
|
||||
TRI->emitReservedArgRegCallError(MF);
|
||||
const uint32_t *Mask;
|
||||
const auto *TRI = MF.getSubtarget<AArch64Subtarget>().getRegisterInfo();
|
||||
|
||||
// Do the actual argument marshalling.
|
||||
OutgoingArgHandler Handler(MIRBuilder, MRI, MIB, AssignFnFixed,
|
||||
|
@ -994,6 +1018,15 @@ bool AArch64CallLowering::lowerCall(MachineIRBuilder &MIRBuilder,
|
|||
if (!handleAssignments(MIRBuilder, OutArgs, Handler))
|
||||
return false;
|
||||
|
||||
Mask = getMaskForArgs(OutArgs, Info, *TRI, MF);
|
||||
|
||||
if (MF.getSubtarget<AArch64Subtarget>().hasCustomCallingConv())
|
||||
TRI->UpdateCustomCallPreservedMask(MF, &Mask);
|
||||
MIB.addRegMask(Mask);
|
||||
|
||||
if (TRI->isAnyArgRegReserved(MF))
|
||||
TRI->emitReservedArgRegCallError(MF);
|
||||
|
||||
// Now we can add the actual call instruction to the correct basic block.
|
||||
MIRBuilder.insertInstr(MIB);
|
||||
|
||||
|
@ -1011,7 +1044,13 @@ bool AArch64CallLowering::lowerCall(MachineIRBuilder &MIRBuilder,
|
|||
if (!Info.OrigRet.Ty->isVoidTy()) {
|
||||
CCAssignFn *RetAssignFn = TLI.CCAssignFnForReturn(Info.CallConv);
|
||||
CallReturnHandler Handler(MIRBuilder, MRI, MIB, RetAssignFn);
|
||||
if (!handleAssignments(MIRBuilder, InArgs, Handler))
|
||||
bool UsingReturnedArg =
|
||||
!OutArgs.empty() && OutArgs[0].Flags[0].isReturned();
|
||||
ReturnedArgCallReturnHandler ReturnedArgHandler(MIRBuilder, MRI, MIB,
|
||||
RetAssignFn);
|
||||
if (!handleAssignments(MIRBuilder, InArgs,
|
||||
UsingReturnedArg ? ReturnedArgHandler : Handler,
|
||||
UsingReturnedArg ? OutArgs[0].Regs[0] : Register()))
|
||||
return false;
|
||||
}
|
||||
|
||||
|
@ -1033,3 +1072,7 @@ bool AArch64CallLowering::lowerCall(MachineIRBuilder &MIRBuilder,
|
|||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool AArch64CallLowering::isTypeIsValidForThisReturn(EVT Ty) const {
|
||||
return Ty.getSizeInBits() == 64;
|
||||
}
|
|
@ -55,6 +55,8 @@ public:
|
|||
|
||||
bool supportSwiftError() const override { return true; }
|
||||
|
||||
bool isTypeIsValidForThisReturn(EVT Ty) const override;
|
||||
|
||||
private:
|
||||
using RegHandler = std::function<void(MachineIRBuilder &, Type *, unsigned,
|
||||
CCValAssign &)>;
|
||||
|
|
|
@ -788,8 +788,8 @@ define void @jt_multiple_jump_tables(%1* %arg, i32 %arg1, i32* %arg2) {
|
|||
; CHECK: ADJCALLSTACKDOWN 0, 0, implicit-def $sp, implicit $sp
|
||||
; CHECK: $x0 = COPY [[COPY]](p0)
|
||||
; CHECK: $x1 = COPY [[LOAD]](p0)
|
||||
; CHECK: BL @wibble, csr_aarch64_aapcs, implicit-def $lr, implicit $sp, implicit $x0, implicit $x1, implicit-def $x0
|
||||
; CHECK: [[COPY3:%[0-9]+]]:_(p0) = COPY $x0
|
||||
; CHECK: BL @wibble, csr_aarch64_aapcs_thisreturn, implicit-def $lr, implicit $sp, implicit $x0, implicit $x1
|
||||
; CHECK: [[COPY3:%[0-9]+]]:_(p0) = COPY [[COPY]](p0)
|
||||
; CHECK: ADJCALLSTACKUP 0, 0, implicit-def $sp, implicit $sp
|
||||
; CHECK: G_BR %bb.59
|
||||
; CHECK: bb.57.bb62:
|
||||
|
@ -797,8 +797,8 @@ define void @jt_multiple_jump_tables(%1* %arg, i32 %arg1, i32* %arg2) {
|
|||
; CHECK: ADJCALLSTACKDOWN 0, 0, implicit-def $sp, implicit $sp
|
||||
; CHECK: $x0 = COPY [[COPY]](p0)
|
||||
; CHECK: $x1 = COPY [[COPY2]](p0)
|
||||
; CHECK: BL @wibble, csr_aarch64_aapcs, implicit-def $lr, implicit $sp, implicit $x0, implicit $x1, implicit-def $x0
|
||||
; CHECK: [[COPY4:%[0-9]+]]:_(p0) = COPY $x0
|
||||
; CHECK: BL @wibble, csr_aarch64_aapcs_thisreturn, implicit-def $lr, implicit $sp, implicit $x0, implicit $x1
|
||||
; CHECK: [[COPY4:%[0-9]+]]:_(p0) = COPY [[COPY]](p0)
|
||||
; CHECK: ADJCALLSTACKUP 0, 0, implicit-def $sp, implicit $sp
|
||||
; CHECK: G_BR %bb.59
|
||||
; CHECK: bb.58.bb64:
|
||||
|
@ -812,8 +812,8 @@ define void @jt_multiple_jump_tables(%1* %arg, i32 %arg1, i32* %arg2) {
|
|||
; CHECK: ADJCALLSTACKDOWN 0, 0, implicit-def $sp, implicit $sp
|
||||
; CHECK: $x0 = COPY [[COPY]](p0)
|
||||
; CHECK: $x1 = COPY [[COPY5]](p0)
|
||||
; CHECK: BL @wibble, csr_aarch64_aapcs, implicit-def $lr, implicit $sp, implicit $x0, implicit $x1, implicit-def $x0
|
||||
; CHECK: [[COPY6:%[0-9]+]]:_(p0) = COPY $x0
|
||||
; CHECK: BL @wibble, csr_aarch64_aapcs_thisreturn, implicit-def $lr, implicit $sp, implicit $x0, implicit $x1
|
||||
; CHECK: [[COPY6:%[0-9]+]]:_(p0) = COPY [[COPY]](p0)
|
||||
; CHECK: ADJCALLSTACKUP 0, 0, implicit-def $sp, implicit $sp
|
||||
; CHECK: bb.59.bb68:
|
||||
; CHECK: RET_ReallyLR
|
||||
|
|
|
@ -1,4 +1,7 @@
|
|||
; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
|
||||
; RUN: llc < %s -mtriple=arm64-eabi | FileCheck %s
|
||||
; RUN: llc < %s -mtriple=arm64-eabi -global-isel -global-isel-abort=1 | FileCheck %s
|
||||
; RUN: llc < %s -mtriple=arm64-eabi -global-isel -global-isel-abort=1 -stop-after=irtranslator | FileCheck %s --check-prefix=GISEL-MIR
|
||||
|
||||
%struct.A = type { i8 }
|
||||
%struct.B = type { i32 }
|
||||
|
@ -15,6 +18,19 @@ declare %struct.B* @B_ctor_base_nothisret(%struct.B*, i32)
|
|||
declare %struct.B* @B_ctor_complete_nothisret(%struct.B*, i32)
|
||||
|
||||
define %struct.C* @C_ctor_base(%struct.C* returned %this, i32 %x) {
|
||||
; GISEL-MIR-LABEL: name: C_ctor_base
|
||||
; GISEL-MIR: bb.1.entry:
|
||||
; GISEL-MIR: liveins: $w1, $x0
|
||||
; GISEL-MIR: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
|
||||
; GISEL-MIR: [[COPY1:%[0-9]+]]:_(s32) = COPY $w1
|
||||
; GISEL-MIR: ADJCALLSTACKDOWN 0, 0, implicit-def $sp, implicit $sp
|
||||
; GISEL-MIR: $x0 = COPY [[COPY]](p0)
|
||||
; GISEL-MIR: BL @A_ctor_base, csr_aarch64_aapcs_thisreturn, implicit-def $lr, implicit $sp, implicit $x0
|
||||
; GISEL-MIR: [[COPY2:%[0-9]+]]:_(p0) = COPY [[COPY]](p0)
|
||||
; GISEL-MIR: ADJCALLSTACKUP 0, 0, implicit-def $sp, implicit $sp
|
||||
; GISEL-MIR: $x0 = COPY [[COPY]](p0)
|
||||
; GISEL-MIR: $w1 = COPY [[COPY1]](s32)
|
||||
; GISEL-MIR: TCRETURNdi @B_ctor_base, 0, csr_aarch64_aapcs, implicit $sp, implicit $x0, implicit $w1
|
||||
entry:
|
||||
; CHECK-LABEL: C_ctor_base:
|
||||
; CHECK-NOT: mov {{x[0-9]+}}, x0
|
||||
|
@ -29,6 +45,24 @@ entry:
|
|||
}
|
||||
|
||||
define %struct.C* @C_ctor_base_nothisret(%struct.C* %this, i32 %x) {
|
||||
; GISEL-MIR-LABEL: name: C_ctor_base_nothisret
|
||||
; GISEL-MIR: bb.1.entry:
|
||||
; GISEL-MIR: liveins: $w1, $x0
|
||||
; GISEL-MIR: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
|
||||
; GISEL-MIR: [[COPY1:%[0-9]+]]:_(s32) = COPY $w1
|
||||
; GISEL-MIR: ADJCALLSTACKDOWN 0, 0, implicit-def $sp, implicit $sp
|
||||
; GISEL-MIR: $x0 = COPY [[COPY]](p0)
|
||||
; GISEL-MIR: BL @A_ctor_base_nothisret, csr_aarch64_aapcs, implicit-def $lr, implicit $sp, implicit $x0, implicit-def $x0
|
||||
; GISEL-MIR: [[COPY2:%[0-9]+]]:_(p0) = COPY $x0
|
||||
; GISEL-MIR: ADJCALLSTACKUP 0, 0, implicit-def $sp, implicit $sp
|
||||
; GISEL-MIR: ADJCALLSTACKDOWN 0, 0, implicit-def $sp, implicit $sp
|
||||
; GISEL-MIR: $x0 = COPY [[COPY]](p0)
|
||||
; GISEL-MIR: $w1 = COPY [[COPY1]](s32)
|
||||
; GISEL-MIR: BL @B_ctor_base_nothisret, csr_aarch64_aapcs, implicit-def $lr, implicit $sp, implicit $x0, implicit $w1, implicit-def $x0
|
||||
; GISEL-MIR: [[COPY3:%[0-9]+]]:_(p0) = COPY $x0
|
||||
; GISEL-MIR: ADJCALLSTACKUP 0, 0, implicit-def $sp, implicit $sp
|
||||
; GISEL-MIR: $x0 = COPY [[COPY]](p0)
|
||||
; GISEL-MIR: RET_ReallyLR implicit $x0
|
||||
entry:
|
||||
; CHECK-LABEL: C_ctor_base_nothisret:
|
||||
; CHECK: mov [[SAVETHIS:x[0-9]+]], x0
|
||||
|
@ -43,6 +77,14 @@ entry:
|
|||
}
|
||||
|
||||
define %struct.C* @C_ctor_complete(%struct.C* %this, i32 %x) {
|
||||
; GISEL-MIR-LABEL: name: C_ctor_complete
|
||||
; GISEL-MIR: bb.1.entry:
|
||||
; GISEL-MIR: liveins: $w1, $x0
|
||||
; GISEL-MIR: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
|
||||
; GISEL-MIR: [[COPY1:%[0-9]+]]:_(s32) = COPY $w1
|
||||
; GISEL-MIR: $x0 = COPY [[COPY]](p0)
|
||||
; GISEL-MIR: $w1 = COPY [[COPY1]](s32)
|
||||
; GISEL-MIR: TCRETURNdi @C_ctor_base, 0, csr_aarch64_aapcs, implicit $sp, implicit $x0, implicit $w1
|
||||
entry:
|
||||
; CHECK-LABEL: C_ctor_complete:
|
||||
; CHECK: b {{_?C_ctor_base}}
|
||||
|
@ -51,6 +93,19 @@ entry:
|
|||
}
|
||||
|
||||
define %struct.C* @C_ctor_complete_nothisret(%struct.C* %this, i32 %x) {
|
||||
; GISEL-MIR-LABEL: name: C_ctor_complete_nothisret
|
||||
; GISEL-MIR: bb.1.entry:
|
||||
; GISEL-MIR: liveins: $w1, $x0
|
||||
; GISEL-MIR: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
|
||||
; GISEL-MIR: [[COPY1:%[0-9]+]]:_(s32) = COPY $w1
|
||||
; GISEL-MIR: ADJCALLSTACKDOWN 0, 0, implicit-def $sp, implicit $sp
|
||||
; GISEL-MIR: $x0 = COPY [[COPY]](p0)
|
||||
; GISEL-MIR: $w1 = COPY [[COPY1]](s32)
|
||||
; GISEL-MIR: BL @C_ctor_base_nothisret, csr_aarch64_aapcs, implicit-def $lr, implicit $sp, implicit $x0, implicit $w1, implicit-def $x0
|
||||
; GISEL-MIR: [[COPY2:%[0-9]+]]:_(p0) = COPY $x0
|
||||
; GISEL-MIR: ADJCALLSTACKUP 0, 0, implicit-def $sp, implicit $sp
|
||||
; GISEL-MIR: $x0 = COPY [[COPY]](p0)
|
||||
; GISEL-MIR: RET_ReallyLR implicit $x0
|
||||
entry:
|
||||
; CHECK-LABEL: C_ctor_complete_nothisret:
|
||||
; CHECK-NOT: b {{_?C_ctor_base_nothisret}}
|
||||
|
@ -59,6 +114,20 @@ entry:
|
|||
}
|
||||
|
||||
define %struct.D* @D_ctor_base(%struct.D* %this, i32 %x) {
|
||||
; GISEL-MIR-LABEL: name: D_ctor_base
|
||||
; GISEL-MIR: bb.1.entry:
|
||||
; GISEL-MIR: liveins: $w1, $x0
|
||||
; GISEL-MIR: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
|
||||
; GISEL-MIR: [[COPY1:%[0-9]+]]:_(s32) = COPY $w1
|
||||
; GISEL-MIR: ADJCALLSTACKDOWN 0, 0, implicit-def $sp, implicit $sp
|
||||
; GISEL-MIR: $x0 = COPY [[COPY]](p0)
|
||||
; GISEL-MIR: $w1 = COPY [[COPY1]](s32)
|
||||
; GISEL-MIR: BL @B_ctor_complete, csr_aarch64_aapcs_thisreturn, implicit-def $lr, implicit $sp, implicit $x0, implicit $w1
|
||||
; GISEL-MIR: [[COPY2:%[0-9]+]]:_(p0) = COPY [[COPY]](p0)
|
||||
; GISEL-MIR: ADJCALLSTACKUP 0, 0, implicit-def $sp, implicit $sp
|
||||
; GISEL-MIR: $x0 = COPY [[COPY]](p0)
|
||||
; GISEL-MIR: $w1 = COPY [[COPY1]](s32)
|
||||
; GISEL-MIR: TCRETURNdi @B_ctor_complete, 0, csr_aarch64_aapcs, implicit $sp, implicit $x0, implicit $w1
|
||||
entry:
|
||||
; CHECK-LABEL: D_ctor_base:
|
||||
; CHECK-NOT: mov {{x[0-9]+}}, x0
|
||||
|
@ -72,6 +141,27 @@ entry:
|
|||
}
|
||||
|
||||
define %struct.E* @E_ctor_base(%struct.E* %this, i32 %x) {
|
||||
; GISEL-MIR-LABEL: name: E_ctor_base
|
||||
; GISEL-MIR: bb.1.entry:
|
||||
; GISEL-MIR: liveins: $w1, $x0
|
||||
; GISEL-MIR: [[COPY:%[0-9]+]]:_(p0) = COPY $x0
|
||||
; GISEL-MIR: [[COPY1:%[0-9]+]]:_(s32) = COPY $w1
|
||||
; GISEL-MIR: ADJCALLSTACKDOWN 0, 0, implicit-def $sp, implicit $sp
|
||||
; GISEL-MIR: $x0 = COPY [[COPY]](p0)
|
||||
; GISEL-MIR: $w1 = COPY [[COPY1]](s32)
|
||||
; GISEL-MIR: BL @B_ctor_complete, csr_aarch64_aapcs_thisreturn, implicit-def $lr, implicit $sp, implicit $x0, implicit $w1
|
||||
; GISEL-MIR: [[COPY2:%[0-9]+]]:_(p0) = COPY [[COPY]](p0)
|
||||
; GISEL-MIR: ADJCALLSTACKUP 0, 0, implicit-def $sp, implicit $sp
|
||||
; GISEL-MIR: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 4
|
||||
; GISEL-MIR: [[PTR_ADD:%[0-9]+]]:_(p0) = G_PTR_ADD [[COPY]], [[C]](s64)
|
||||
; GISEL-MIR: ADJCALLSTACKDOWN 0, 0, implicit-def $sp, implicit $sp
|
||||
; GISEL-MIR: $x0 = COPY [[PTR_ADD]](p0)
|
||||
; GISEL-MIR: $w1 = COPY [[COPY1]](s32)
|
||||
; GISEL-MIR: BL @B_ctor_complete, csr_aarch64_aapcs_thisreturn, implicit-def $lr, implicit $sp, implicit $x0, implicit $w1
|
||||
; GISEL-MIR: [[COPY3:%[0-9]+]]:_(p0) = COPY [[PTR_ADD]](p0)
|
||||
; GISEL-MIR: ADJCALLSTACKUP 0, 0, implicit-def $sp, implicit $sp
|
||||
; GISEL-MIR: $x0 = COPY [[COPY]](p0)
|
||||
; GISEL-MIR: RET_ReallyLR implicit $x0
|
||||
entry:
|
||||
; CHECK-LABEL: E_ctor_base:
|
||||
; CHECK-NOT: b {{_?B_ctor_complete}}
|
||||
|
|
Loading…
Reference in New Issue