forked from OSchip/llvm-project
Add __builtin_setjmp/_longjmp supprt in X86 backend
- Besides used in SjLj exception handling, __builtin_setjmp/__longjmp is also used as a light-weight replacement of setjmp/longjmp which are used to implementation continuation, user-level threading, and etc. The support added in this patch ONLY addresses this usage and is NOT intended to support SjLj exception handling as zero-cost DWARF exception handling is used by default in X86. llvm-svn: 165989
This commit is contained in:
parent
bd847cc562
commit
97bf363a9e
|
@ -1302,7 +1302,9 @@ bool X86DAGToDAGISel::SelectAddr(SDNode *Parent, SDValue N, SDValue &Base,
|
|||
// that are not a MemSDNode, and thus don't have proper addrspace info.
|
||||
Parent->getOpcode() != ISD::INTRINSIC_W_CHAIN && // unaligned loads, fixme
|
||||
Parent->getOpcode() != ISD::INTRINSIC_VOID && // nontemporal stores
|
||||
Parent->getOpcode() != X86ISD::TLSCALL) { // Fixme
|
||||
Parent->getOpcode() != X86ISD::TLSCALL && // Fixme
|
||||
Parent->getOpcode() != X86ISD::EH_SJLJ_SETJMP && // setjmp
|
||||
Parent->getOpcode() != X86ISD::EH_SJLJ_LONGJMP) { // longjmp
|
||||
unsigned AddrSpace =
|
||||
cast<MemSDNode>(Parent)->getPointerInfo().getAddrSpace();
|
||||
// AddrSpace 256 -> GS, 257 -> FS.
|
||||
|
|
|
@ -457,6 +457,14 @@ X86TargetLowering::X86TargetLowering(X86TargetMachine &TM)
|
|||
setOperationAction(ISD::SETCC , MVT::i64 , Custom);
|
||||
}
|
||||
setOperationAction(ISD::EH_RETURN , MVT::Other, Custom);
|
||||
// NOTE: EH_SJLJ_SETJMP/_LONGJMP supported here is NOT intened to support
|
||||
// SjLj exception handling but a light-weight setjmp/longjmp replacement to
|
||||
// support continuation, user-level threading, and etc.. As a result, not
|
||||
// other SjLj exception interfaces are implemented and please don't build
|
||||
// your own exception handling based on them.
|
||||
// LLVM/Clang supports zero-cost DWARF exception handling.
|
||||
setOperationAction(ISD::EH_SJLJ_SETJMP, MVT::i32, Custom);
|
||||
setOperationAction(ISD::EH_SJLJ_LONGJMP, MVT::Other, Custom);
|
||||
|
||||
// Darwin ABI issue.
|
||||
setOperationAction(ISD::ConstantPool , MVT::i32 , Custom);
|
||||
|
@ -10351,6 +10359,21 @@ SDValue X86TargetLowering::LowerEH_RETURN(SDValue Op, SelectionDAG &DAG) const {
|
|||
Chain, DAG.getRegister(StoreAddrReg, getPointerTy()));
|
||||
}
|
||||
|
||||
SDValue X86TargetLowering::lowerEH_SJLJ_SETJMP(SDValue Op,
|
||||
SelectionDAG &DAG) const {
|
||||
DebugLoc DL = Op.getDebugLoc();
|
||||
return DAG.getNode(X86ISD::EH_SJLJ_SETJMP, DL,
|
||||
DAG.getVTList(MVT::i32, MVT::Other),
|
||||
Op.getOperand(0), Op.getOperand(1));
|
||||
}
|
||||
|
||||
SDValue X86TargetLowering::lowerEH_SJLJ_LONGJMP(SDValue Op,
|
||||
SelectionDAG &DAG) const {
|
||||
DebugLoc DL = Op.getDebugLoc();
|
||||
return DAG.getNode(X86ISD::EH_SJLJ_LONGJMP, DL, MVT::Other,
|
||||
Op.getOperand(0), Op.getOperand(1));
|
||||
}
|
||||
|
||||
static SDValue LowerADJUST_TRAMPOLINE(SDValue Op, SelectionDAG &DAG) {
|
||||
return Op.getOperand(0);
|
||||
}
|
||||
|
@ -11375,6 +11398,8 @@ SDValue X86TargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
|
|||
return LowerFRAME_TO_ARGS_OFFSET(Op, DAG);
|
||||
case ISD::DYNAMIC_STACKALLOC: return LowerDYNAMIC_STACKALLOC(Op, DAG);
|
||||
case ISD::EH_RETURN: return LowerEH_RETURN(Op, DAG);
|
||||
case ISD::EH_SJLJ_SETJMP: return lowerEH_SJLJ_SETJMP(Op, DAG);
|
||||
case ISD::EH_SJLJ_LONGJMP: return lowerEH_SJLJ_LONGJMP(Op, DAG);
|
||||
case ISD::INIT_TRAMPOLINE: return LowerINIT_TRAMPOLINE(Op, DAG);
|
||||
case ISD::ADJUST_TRAMPOLINE: return LowerADJUST_TRAMPOLINE(Op, DAG);
|
||||
case ISD::FLT_ROUNDS_: return LowerFLT_ROUNDS_(Op, DAG);
|
||||
|
@ -11667,6 +11692,8 @@ const char *X86TargetLowering::getTargetNodeName(unsigned Opcode) const {
|
|||
case X86ISD::TLSADDR: return "X86ISD::TLSADDR";
|
||||
case X86ISD::TLSBASEADDR: return "X86ISD::TLSBASEADDR";
|
||||
case X86ISD::TLSCALL: return "X86ISD::TLSCALL";
|
||||
case X86ISD::EH_SJLJ_SETJMP: return "X86ISD::EH_SJLJ_SETJMP";
|
||||
case X86ISD::EH_SJLJ_LONGJMP: return "X86ISD::EH_SJLJ_LONGJMP";
|
||||
case X86ISD::EH_RETURN: return "X86ISD::EH_RETURN";
|
||||
case X86ISD::TC_RETURN: return "X86ISD::TC_RETURN";
|
||||
case X86ISD::FNSTCW16m: return "X86ISD::FNSTCW16m";
|
||||
|
@ -13212,6 +13239,173 @@ X86TargetLowering::EmitLoweredTLSCall(MachineInstr *MI,
|
|||
return BB;
|
||||
}
|
||||
|
||||
MachineBasicBlock *
|
||||
X86TargetLowering::emitEHSjLjSetJmp(MachineInstr *MI,
|
||||
MachineBasicBlock *MBB) const {
|
||||
DebugLoc DL = MI->getDebugLoc();
|
||||
const TargetInstrInfo *TII = getTargetMachine().getInstrInfo();
|
||||
|
||||
MachineFunction *MF = MBB->getParent();
|
||||
MachineRegisterInfo &MRI = MF->getRegInfo();
|
||||
|
||||
const BasicBlock *BB = MBB->getBasicBlock();
|
||||
MachineFunction::iterator I = MBB;
|
||||
++I;
|
||||
|
||||
// Memory Reference
|
||||
MachineInstr::mmo_iterator MMOBegin = MI->memoperands_begin();
|
||||
MachineInstr::mmo_iterator MMOEnd = MI->memoperands_end();
|
||||
|
||||
unsigned DstReg;
|
||||
unsigned MemOpndSlot = 0;
|
||||
|
||||
unsigned CurOp = 0;
|
||||
|
||||
DstReg = MI->getOperand(CurOp++).getReg();
|
||||
const TargetRegisterClass *RC = MRI.getRegClass(DstReg);
|
||||
assert(RC->hasType(MVT::i32) && "Invalid destination!");
|
||||
unsigned mainDstReg = MRI.createVirtualRegister(RC);
|
||||
unsigned restoreDstReg = MRI.createVirtualRegister(RC);
|
||||
|
||||
MemOpndSlot = CurOp;
|
||||
|
||||
MVT PVT = getPointerTy();
|
||||
assert((PVT == MVT::i64 || PVT == MVT::i32) &&
|
||||
"Invalid Pointer Size!");
|
||||
|
||||
// For v = setjmp(buf), we generate
|
||||
//
|
||||
// thisMBB:
|
||||
// buf[Label_Offset] = ljMBB
|
||||
// SjLjSetup restoreMBB
|
||||
//
|
||||
// mainMBB:
|
||||
// v_main = 0
|
||||
//
|
||||
// sinkMBB:
|
||||
// v = phi(main, restore)
|
||||
//
|
||||
// restoreMBB:
|
||||
// v_restore = 1
|
||||
|
||||
MachineBasicBlock *thisMBB = MBB;
|
||||
MachineBasicBlock *mainMBB = MF->CreateMachineBasicBlock(BB);
|
||||
MachineBasicBlock *sinkMBB = MF->CreateMachineBasicBlock(BB);
|
||||
MachineBasicBlock *restoreMBB = MF->CreateMachineBasicBlock(BB);
|
||||
MF->insert(I, mainMBB);
|
||||
MF->insert(I, sinkMBB);
|
||||
MF->push_back(restoreMBB);
|
||||
|
||||
MachineInstrBuilder MIB;
|
||||
|
||||
// Transfer the remainder of BB and its successor edges to sinkMBB.
|
||||
sinkMBB->splice(sinkMBB->begin(), MBB,
|
||||
llvm::next(MachineBasicBlock::iterator(MI)), MBB->end());
|
||||
sinkMBB->transferSuccessorsAndUpdatePHIs(MBB);
|
||||
|
||||
// thisMBB:
|
||||
unsigned PtrImmStoreOpc = (PVT == MVT::i64) ? X86::MOV64mi32 : X86::MOV32mi;
|
||||
const int64_t Label_Offset = 1 * PVT.getStoreSize();
|
||||
|
||||
// Store IP
|
||||
MIB = BuildMI(*thisMBB, MI, DL, TII->get(PtrImmStoreOpc));
|
||||
for (unsigned i = 0; i < X86::AddrNumOperands; ++i) {
|
||||
if (i == X86::AddrDisp)
|
||||
MIB.addDisp(MI->getOperand(MemOpndSlot + i), Label_Offset);
|
||||
else
|
||||
MIB.addOperand(MI->getOperand(MemOpndSlot + i));
|
||||
}
|
||||
MIB.addMBB(restoreMBB);
|
||||
MIB.setMemRefs(MMOBegin, MMOEnd);
|
||||
// Setup
|
||||
MIB = BuildMI(*thisMBB, MI, DL, TII->get(X86::EH_SjLj_Setup))
|
||||
.addMBB(restoreMBB);
|
||||
MIB.addRegMask(RegInfo->getNoPreservedMask());
|
||||
thisMBB->addSuccessor(mainMBB);
|
||||
thisMBB->addSuccessor(restoreMBB);
|
||||
|
||||
// mainMBB:
|
||||
// EAX = 0
|
||||
BuildMI(mainMBB, DL, TII->get(X86::MOV32r0), mainDstReg);
|
||||
mainMBB->addSuccessor(sinkMBB);
|
||||
|
||||
// sinkMBB:
|
||||
BuildMI(*sinkMBB, sinkMBB->begin(), DL,
|
||||
TII->get(X86::PHI), DstReg)
|
||||
.addReg(mainDstReg).addMBB(mainMBB)
|
||||
.addReg(restoreDstReg).addMBB(restoreMBB);
|
||||
|
||||
// restoreMBB:
|
||||
BuildMI(restoreMBB, DL, TII->get(X86::MOV32ri), restoreDstReg).addImm(1);
|
||||
BuildMI(restoreMBB, DL, TII->get(X86::JMP_4)).addMBB(sinkMBB);
|
||||
restoreMBB->addSuccessor(sinkMBB);
|
||||
|
||||
MI->eraseFromParent();
|
||||
return sinkMBB;
|
||||
}
|
||||
|
||||
MachineBasicBlock *
|
||||
X86TargetLowering::emitEHSjLjLongJmp(MachineInstr *MI,
|
||||
MachineBasicBlock *MBB) const {
|
||||
DebugLoc DL = MI->getDebugLoc();
|
||||
const TargetInstrInfo *TII = getTargetMachine().getInstrInfo();
|
||||
|
||||
MachineFunction *MF = MBB->getParent();
|
||||
MachineRegisterInfo &MRI = MF->getRegInfo();
|
||||
|
||||
// Memory Reference
|
||||
MachineInstr::mmo_iterator MMOBegin = MI->memoperands_begin();
|
||||
MachineInstr::mmo_iterator MMOEnd = MI->memoperands_end();
|
||||
|
||||
MVT PVT = getPointerTy();
|
||||
assert((PVT == MVT::i64 || PVT == MVT::i32) &&
|
||||
"Invalid Pointer Size!");
|
||||
|
||||
const TargetRegisterClass *RC =
|
||||
(PVT == MVT::i64) ? &X86::GR64RegClass : &X86::GR32RegClass;
|
||||
unsigned Tmp = MRI.createVirtualRegister(RC);
|
||||
// Since FP is only updated here but NOT referenced, it's treated as GPR.
|
||||
unsigned FP = (PVT == MVT::i64) ? X86::RBP : X86::EBP;
|
||||
unsigned SP = RegInfo->getStackRegister();
|
||||
|
||||
MachineInstrBuilder MIB;
|
||||
|
||||
const int64_t Label_Offset = 1 * PVT.getStoreSize();
|
||||
const int64_t SP_Offset = 2 * PVT.getStoreSize();
|
||||
|
||||
unsigned PtrLoadOpc = (PVT == MVT::i64) ? X86::MOV64rm : X86::MOV32rm;
|
||||
unsigned IJmpOpc = (PVT == MVT::i64) ? X86::JMP64r : X86::JMP32r;
|
||||
|
||||
// Reload FP
|
||||
MIB = BuildMI(*MBB, MI, DL, TII->get(PtrLoadOpc), FP);
|
||||
for (unsigned i = 0; i < X86::AddrNumOperands; ++i)
|
||||
MIB.addOperand(MI->getOperand(i));
|
||||
MIB.setMemRefs(MMOBegin, MMOEnd);
|
||||
// Reload IP
|
||||
MIB = BuildMI(*MBB, MI, DL, TII->get(PtrLoadOpc), Tmp);
|
||||
for (unsigned i = 0; i < X86::AddrNumOperands; ++i) {
|
||||
if (i == X86::AddrDisp)
|
||||
MIB.addDisp(MI->getOperand(i), Label_Offset);
|
||||
else
|
||||
MIB.addOperand(MI->getOperand(i));
|
||||
}
|
||||
MIB.setMemRefs(MMOBegin, MMOEnd);
|
||||
// Reload SP
|
||||
MIB = BuildMI(*MBB, MI, DL, TII->get(PtrLoadOpc), SP);
|
||||
for (unsigned i = 0; i < X86::AddrNumOperands; ++i) {
|
||||
if (i == X86::AddrDisp)
|
||||
MIB.addDisp(MI->getOperand(i), SP_Offset);
|
||||
else
|
||||
MIB.addOperand(MI->getOperand(i));
|
||||
}
|
||||
MIB.setMemRefs(MMOBegin, MMOEnd);
|
||||
// Jump
|
||||
BuildMI(*MBB, MI, DL, TII->get(IJmpOpc)).addReg(Tmp);
|
||||
|
||||
MI->eraseFromParent();
|
||||
return MBB;
|
||||
}
|
||||
|
||||
MachineBasicBlock *
|
||||
X86TargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
|
||||
MachineBasicBlock *BB) const {
|
||||
|
@ -13427,6 +13621,14 @@ X86TargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
|
|||
|
||||
case X86::VAARG_64:
|
||||
return EmitVAARG64WithCustomInserter(MI, BB);
|
||||
|
||||
case X86::EH_SjLj_SetJmp32:
|
||||
case X86::EH_SjLj_SetJmp64:
|
||||
return emitEHSjLjSetJmp(MI, BB);
|
||||
|
||||
case X86::EH_SjLj_LongJmp32:
|
||||
case X86::EH_SjLj_LongJmp64:
|
||||
return emitEHSjLjLongJmp(MI, BB);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -217,6 +217,12 @@ namespace llvm {
|
|||
// EH_RETURN - Exception Handling helpers.
|
||||
EH_RETURN,
|
||||
|
||||
// EH_SJLJ_SETJMP - SjLj exception handling setjmp.
|
||||
EH_SJLJ_SETJMP,
|
||||
|
||||
// EH_SJLJ_LONGJMP - SjLj exception handling longjmp.
|
||||
EH_SJLJ_LONGJMP,
|
||||
|
||||
/// TC_RETURN - Tail call return.
|
||||
/// operand #0 chain
|
||||
/// operand #1 callee (register or absolute)
|
||||
|
@ -810,6 +816,8 @@ namespace llvm {
|
|||
SDValue LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const;
|
||||
SDValue LowerFRAME_TO_ARGS_OFFSET(SDValue Op, SelectionDAG &DAG) const;
|
||||
SDValue LowerEH_RETURN(SDValue Op, SelectionDAG &DAG) const;
|
||||
SDValue lowerEH_SJLJ_SETJMP(SDValue Op, SelectionDAG &DAG) const;
|
||||
SDValue lowerEH_SJLJ_LONGJMP(SDValue Op, SelectionDAG &DAG) const;
|
||||
SDValue LowerINIT_TRAMPOLINE(SDValue Op, SelectionDAG &DAG) const;
|
||||
SDValue LowerFLT_ROUNDS_(SDValue Op, SelectionDAG &DAG) const;
|
||||
SDValue LowerShift(SDValue Op, SelectionDAG &DAG) const;
|
||||
|
@ -906,6 +914,12 @@ namespace llvm {
|
|||
MachineBasicBlock *emitLoweredTLSAddr(MachineInstr *MI,
|
||||
MachineBasicBlock *BB) const;
|
||||
|
||||
MachineBasicBlock *emitEHSjLjSetJmp(MachineInstr *MI,
|
||||
MachineBasicBlock *MBB) const;
|
||||
|
||||
MachineBasicBlock *emitEHSjLjLongJmp(MachineInstr *MI,
|
||||
MachineBasicBlock *MBB) const;
|
||||
|
||||
/// Emit nodes that will be selected as "test Op0,Op0", or something
|
||||
/// equivalent, for use with the given x86 condition code.
|
||||
SDValue EmitTest(SDValue Op0, unsigned X86CC, SelectionDAG &DAG) const;
|
||||
|
|
|
@ -165,6 +165,33 @@ def EH_RETURN64 : I<0xC3, RawFrm, (outs), (ins GR64:$addr),
|
|||
|
||||
}
|
||||
|
||||
let hasSideEffects = 1, isBarrier = 1, isCodeGenOnly = 1,
|
||||
usesCustomInserter = 1 in {
|
||||
def EH_SjLj_SetJmp32 : I<0, Pseudo, (outs GR32:$dst), (ins i32mem:$buf),
|
||||
"#EH_SJLJ_SETJMP32",
|
||||
[(set GR32:$dst, (X86eh_sjlj_setjmp addr:$buf))]>,
|
||||
Requires<[In32BitMode]>;
|
||||
def EH_SjLj_SetJmp64 : I<0, Pseudo, (outs GR32:$dst), (ins i64mem:$buf),
|
||||
"#EH_SJLJ_SETJMP64",
|
||||
[(set GR32:$dst, (X86eh_sjlj_setjmp addr:$buf))]>,
|
||||
Requires<[In64BitMode]>;
|
||||
let isTerminator = 1 in {
|
||||
def EH_SjLj_LongJmp32 : I<0, Pseudo, (outs), (ins i32mem:$buf),
|
||||
"#EH_SJLJ_LONGJMP32",
|
||||
[(X86eh_sjlj_longjmp addr:$buf)]>,
|
||||
Requires<[In32BitMode]>;
|
||||
def EH_SjLj_LongJmp64 : I<0, Pseudo, (outs), (ins i64mem:$buf),
|
||||
"#EH_SJLJ_LONGJMP64",
|
||||
[(X86eh_sjlj_longjmp addr:$buf)]>,
|
||||
Requires<[In64BitMode]>;
|
||||
}
|
||||
}
|
||||
|
||||
let isBranch = 1, isTerminator = 1, isCodeGenOnly = 1 in {
|
||||
def EH_SjLj_Setup : I<0, Pseudo, (outs), (ins brtarget:$dst),
|
||||
"#EH_SjLj_Setup\t$dst", []>;
|
||||
}
|
||||
|
||||
//===----------------------------------------------------------------------===//
|
||||
// Pseudo instructions used by segmented stacks.
|
||||
//
|
||||
|
|
|
@ -216,6 +216,14 @@ def X86tlsbaseaddr : SDNode<"X86ISD::TLSBASEADDR", SDT_X86TLSBASEADDR,
|
|||
def X86ehret : SDNode<"X86ISD::EH_RETURN", SDT_X86EHRET,
|
||||
[SDNPHasChain]>;
|
||||
|
||||
def X86eh_sjlj_setjmp : SDNode<"X86ISD::EH_SJLJ_SETJMP",
|
||||
SDTypeProfile<1, 1, [SDTCisInt<0>,
|
||||
SDTCisPtrTy<1>]>,
|
||||
[SDNPHasChain, SDNPSideEffect]>;
|
||||
def X86eh_sjlj_longjmp : SDNode<"X86ISD::EH_SJLJ_LONGJMP",
|
||||
SDTypeProfile<0, 1, [SDTCisPtrTy<0>]>,
|
||||
[SDNPHasChain, SDNPSideEffect]>;
|
||||
|
||||
def X86tcret : SDNode<"X86ISD::TC_RETURN", SDT_X86TCRET,
|
||||
[SDNPHasChain, SDNPOptInGlue, SDNPVariadic]>;
|
||||
|
||||
|
|
|
@ -261,6 +261,11 @@ X86RegisterInfo::getCallPreservedMask(CallingConv::ID CC) const {
|
|||
return CSR_64_RegMask;
|
||||
}
|
||||
|
||||
const uint32_t*
|
||||
X86RegisterInfo::getNoPreservedMask() const {
|
||||
return CSR_NoRegs_RegMask;
|
||||
}
|
||||
|
||||
BitVector X86RegisterInfo::getReservedRegs(const MachineFunction &MF) const {
|
||||
BitVector Reserved(getNumRegs());
|
||||
const TargetFrameLowering *TFI = MF.getTarget().getFrameLowering();
|
||||
|
|
|
@ -100,6 +100,7 @@ public:
|
|||
/// callee-save registers on this target.
|
||||
const uint16_t *getCalleeSavedRegs(const MachineFunction* MF = 0) const;
|
||||
const uint32_t *getCallPreservedMask(CallingConv::ID) const;
|
||||
const uint32_t *getNoPreservedMask() const;
|
||||
|
||||
/// getReservedRegs - Returns a bitset indexed by physical register number
|
||||
/// indicating if a register is a special register that has particular uses and
|
||||
|
|
|
@ -0,0 +1,46 @@
|
|||
; RUN: llc < %s -mtriple=i386-pc-linux -mcpu=corei7 -relocation-model=static | FileCheck --check-prefix=X86 %s
|
||||
; RUN: llc < %s -mtriple=x86_64-pc-linux -mcpu=corei7 | FileCheck --check-prefix=X64 %s
|
||||
|
||||
@buf = internal global [5 x i8*] zeroinitializer
|
||||
|
||||
declare i8* @llvm.frameaddress(i32) nounwind readnone
|
||||
|
||||
declare i8* @llvm.stacksave() nounwind
|
||||
|
||||
declare i32 @llvm.eh.sjlj.setjmp(i8*) nounwind
|
||||
|
||||
declare void @llvm.eh.sjlj.longjmp(i8*) nounwind
|
||||
|
||||
define i32 @sj0() nounwind {
|
||||
%fp = tail call i8* @llvm.frameaddress(i32 0)
|
||||
store i8* %fp, i8** getelementptr inbounds ([5 x i8*]* @buf, i64 0, i64 0), align 16
|
||||
%sp = tail call i8* @llvm.stacksave()
|
||||
store i8* %sp, i8** getelementptr inbounds ([5 x i8*]* @buf, i64 0, i64 2), align 16
|
||||
%r = tail call i32 @llvm.eh.sjlj.setjmp(i8* bitcast ([5 x i8*]* @buf to i8*))
|
||||
ret i32 %r
|
||||
; X86: sj0
|
||||
; x86: movl %ebp, buf
|
||||
; x86: movl ${{.*LBB.*}}, buf+4
|
||||
; X86: movl %esp, buf+8
|
||||
; X86: ret
|
||||
; X64: sj0
|
||||
; x64: movq %rbp, buf(%rip)
|
||||
; x64: movq ${{.*LBB.*}}, buf+8(%rip)
|
||||
; X64: movq %rsp, buf+16(%rip)
|
||||
; X64: ret
|
||||
}
|
||||
|
||||
define void @lj0() nounwind {
|
||||
tail call void @llvm.eh.sjlj.longjmp(i8* bitcast ([5 x i8*]* @buf to i8*))
|
||||
unreachable
|
||||
; X86: lj0
|
||||
; X86: movl buf, %ebp
|
||||
; X86: movl buf+4, %[[REG32:.*]]
|
||||
; X86: movl buf+8, %esp
|
||||
; X86: jmpl *%[[REG32]]
|
||||
; X64: lj0
|
||||
; X64: movq buf(%rip), %rbp
|
||||
; X64: movq buf+8(%rip), %[[REG64:.*]]
|
||||
; X64: movq buf+16(%rip), %rsp
|
||||
; X64: jmpq *%[[REG64]]
|
||||
}
|
Loading…
Reference in New Issue