2012-02-18 20:03:15 +08:00
|
|
|
//===-- X86RegisterInfo.cpp - X86 Register Information --------------------===//
|
2004-03-14 15:19:51 +08:00
|
|
|
//
|
2003-10-21 03:43:21 +08:00
|
|
|
// The LLVM Compiler Infrastructure
|
|
|
|
//
|
2007-12-30 04:36:04 +08:00
|
|
|
// This file is distributed under the University of Illinois Open Source
|
|
|
|
// License. See LICENSE.TXT for details.
|
2004-03-14 15:19:51 +08:00
|
|
|
//
|
2003-10-21 03:43:21 +08:00
|
|
|
//===----------------------------------------------------------------------===//
|
2002-10-26 06:55:53 +08:00
|
|
|
//
|
2008-02-11 02:45:23 +08:00
|
|
|
// This file contains the X86 implementation of the TargetRegisterInfo class.
|
|
|
|
// This file is responsible for the frame pointer elimination optimization
|
|
|
|
// on X86.
|
2002-10-26 06:55:53 +08:00
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
#include "X86RegisterInfo.h"
|
2015-03-25 07:46:01 +08:00
|
|
|
#include "X86FrameLowering.h"
|
2002-11-23 06:43:47 +08:00
|
|
|
#include "X86InstrBuilder.h"
|
2006-06-07 07:30:24 +08:00
|
|
|
#include "X86MachineFunctionInfo.h"
|
2006-09-08 14:48:29 +08:00
|
|
|
#include "X86Subtarget.h"
|
2006-06-07 07:30:24 +08:00
|
|
|
#include "X86TargetMachine.h"
|
2012-12-04 00:50:05 +08:00
|
|
|
#include "llvm/ADT/BitVector.h"
|
|
|
|
#include "llvm/ADT/STLExtras.h"
|
|
|
|
#include "llvm/CodeGen/MachineFrameInfo.h"
|
2002-12-16 04:06:35 +08:00
|
|
|
#include "llvm/CodeGen/MachineFunction.h"
|
2008-07-02 02:15:35 +08:00
|
|
|
#include "llvm/CodeGen/MachineFunctionPass.h"
|
2012-12-04 00:50:05 +08:00
|
|
|
#include "llvm/CodeGen/MachineInstrBuilder.h"
|
2007-12-31 12:13:23 +08:00
|
|
|
#include "llvm/CodeGen/MachineModuleInfo.h"
|
|
|
|
#include "llvm/CodeGen/MachineRegisterInfo.h"
|
2013-01-02 19:36:10 +08:00
|
|
|
#include "llvm/IR/Constants.h"
|
|
|
|
#include "llvm/IR/Function.h"
|
|
|
|
#include "llvm/IR/Type.h"
|
2009-08-23 04:48:53 +08:00
|
|
|
#include "llvm/MC/MCAsmInfo.h"
|
2012-12-04 00:50:05 +08:00
|
|
|
#include "llvm/Support/CommandLine.h"
|
|
|
|
#include "llvm/Support/ErrorHandling.h"
|
2011-01-10 20:39:04 +08:00
|
|
|
#include "llvm/Target/TargetFrameLowering.h"
|
2006-12-07 09:21:59 +08:00
|
|
|
#include "llvm/Target/TargetInstrInfo.h"
|
2004-06-22 05:10:24 +08:00
|
|
|
#include "llvm/Target/TargetMachine.h"
|
2004-07-11 12:17:10 +08:00
|
|
|
#include "llvm/Target/TargetOptions.h"
|
2011-06-28 02:32:37 +08:00
|
|
|
|
2014-04-22 10:03:14 +08:00
|
|
|
using namespace llvm;
|
|
|
|
|
2011-06-28 02:32:37 +08:00
|
|
|
#define GET_REGINFO_TARGET_DESC
|
2011-06-24 09:44:41 +08:00
|
|
|
#include "X86GenRegisterInfo.inc"
|
2011-06-28 02:32:37 +08:00
|
|
|
|
2013-02-15 20:30:38 +08:00
|
|
|
static cl::opt<bool>
|
2012-07-11 01:45:53 +08:00
|
|
|
EnableBasePointer("x86-use-base-pointer", cl::Hidden, cl::init(true),
|
|
|
|
cl::desc("Enable use of a base pointer for complex stack frames"));
|
|
|
|
|
2015-03-13 01:54:19 +08:00
|
|
|
X86RegisterInfo::X86RegisterInfo(const Triple &TT)
|
|
|
|
: X86GenRegisterInfo((TT.isArch64Bit() ? X86::RIP : X86::EIP),
|
2015-09-16 00:17:27 +08:00
|
|
|
X86_MC::getDwarfRegFlavour(TT, false),
|
|
|
|
X86_MC::getDwarfRegFlavour(TT, true),
|
2015-03-13 01:54:19 +08:00
|
|
|
(TT.isArch64Bit() ? X86::RIP : X86::EIP)) {
|
2016-02-11 04:55:49 +08:00
|
|
|
X86_MC::initLLVMToSEHAndCVRegMapping(this);
|
2011-07-19 04:57:22 +08:00
|
|
|
|
2006-09-08 14:48:29 +08:00
|
|
|
// Cache some information.
|
2015-03-13 01:54:19 +08:00
|
|
|
Is64Bit = TT.isArch64Bit();
|
|
|
|
IsWin64 = Is64Bit && TT.isOSWindows();
|
2009-08-16 19:00:26 +08:00
|
|
|
|
2015-02-24 23:27:13 +08:00
|
|
|
// Use a callee-saved register as the base pointer. These registers must
|
|
|
|
// not conflict with any ABI requirements. For example, in 32-bit mode PIC
|
|
|
|
// requires GOT in the EBX register before function calls via PLT GOT pointer.
|
2006-09-08 14:48:29 +08:00
|
|
|
if (Is64Bit) {
|
|
|
|
SlotSize = 8;
|
2015-03-13 01:54:19 +08:00
|
|
|
// This matches the simplified 32-bit pointer code in the data layout
|
|
|
|
// computation.
|
|
|
|
// FIXME: Should use the data layout?
|
|
|
|
bool Use64BitReg = TT.getEnvironment() != Triple::GNUX32;
|
2015-02-24 23:27:13 +08:00
|
|
|
StackPtr = Use64BitReg ? X86::RSP : X86::ESP;
|
|
|
|
FramePtr = Use64BitReg ? X86::RBP : X86::EBP;
|
|
|
|
BasePtr = Use64BitReg ? X86::RBX : X86::EBX;
|
2006-09-08 14:48:29 +08:00
|
|
|
} else {
|
|
|
|
SlotSize = 4;
|
|
|
|
StackPtr = X86::ESP;
|
|
|
|
FramePtr = X86::EBP;
|
2015-02-24 23:27:13 +08:00
|
|
|
BasePtr = X86::ESI;
|
2006-09-08 14:48:29 +08:00
|
|
|
}
|
|
|
|
}
|
2003-08-03 23:48:14 +08:00
|
|
|
|
2012-04-24 05:39:35 +08:00
|
|
|
bool
|
|
|
|
X86RegisterInfo::trackLivenessAfterRegAlloc(const MachineFunction &MF) const {
|
2013-10-15 06:19:03 +08:00
|
|
|
// ExeDepsFixer and PostRAScheduler require liveness.
|
|
|
|
return true;
|
2012-04-24 05:39:35 +08:00
|
|
|
}
|
|
|
|
|
2011-05-25 00:57:53 +08:00
|
|
|
int
|
|
|
|
X86RegisterInfo::getSEHRegNum(unsigned i) const {
|
2012-10-05 03:50:43 +08:00
|
|
|
return getEncodingValue(i);
|
2011-05-25 00:57:53 +08:00
|
|
|
}
|
|
|
|
|
2011-10-06 04:26:33 +08:00
|
|
|
const TargetRegisterClass *
|
|
|
|
X86RegisterInfo::getSubClassWithSubReg(const TargetRegisterClass *RC,
|
|
|
|
unsigned Idx) const {
|
|
|
|
// The sub_8bit sub-register index is more constrained in 32-bit mode.
|
|
|
|
// It behaves just like the sub_8bit_hi index.
|
|
|
|
if (!Is64Bit && Idx == X86::sub_8bit)
|
|
|
|
Idx = X86::sub_8bit_hi;
|
|
|
|
|
|
|
|
// Forward to TableGen's default version.
|
|
|
|
return X86GenRegisterInfo::getSubClassWithSubReg(RC, Idx);
|
|
|
|
}
|
|
|
|
|
2009-07-18 10:10:10 +08:00
|
|
|
const TargetRegisterClass *
|
|
|
|
X86RegisterInfo::getMatchingSuperRegClass(const TargetRegisterClass *A,
|
|
|
|
const TargetRegisterClass *B,
|
|
|
|
unsigned SubIdx) const {
|
2011-12-20 00:53:34 +08:00
|
|
|
// The sub_8bit sub-register index is more constrained in 32-bit mode.
|
|
|
|
if (!Is64Bit && SubIdx == X86::sub_8bit) {
|
|
|
|
A = X86GenRegisterInfo::getSubClassWithSubReg(A, X86::sub_8bit_hi);
|
|
|
|
if (!A)
|
2014-04-25 13:30:21 +08:00
|
|
|
return nullptr;
|
2009-07-18 10:10:10 +08:00
|
|
|
}
|
2011-12-20 00:53:34 +08:00
|
|
|
return X86GenRegisterInfo::getMatchingSuperRegClass(A, B, SubIdx);
|
2009-07-18 10:10:10 +08:00
|
|
|
}
|
|
|
|
|
2015-03-11 07:46:01 +08:00
|
|
|
const TargetRegisterClass *
|
|
|
|
X86RegisterInfo::getLargestLegalSuperClass(const TargetRegisterClass *RC,
|
|
|
|
const MachineFunction &MF) const {
|
2011-10-09 04:20:03 +08:00
|
|
|
// Don't allow super-classes of GR8_NOREX. This class is only used after
|
2014-11-20 13:22:37 +08:00
|
|
|
// extracting sub_8bit_hi sub-registers. The H sub-registers cannot be copied
|
2011-10-09 04:20:03 +08:00
|
|
|
// to the full GR8 register class in 64-bit mode, so we cannot allow the
|
|
|
|
// reigster class inflation.
|
|
|
|
//
|
|
|
|
// The GR8_NOREX class is always used in a way that won't be constrained to a
|
|
|
|
// sub-class, so sub-classes like GR8_ABCD_L are allowed to expand to the
|
|
|
|
// full GR8 class.
|
2012-04-20 14:31:50 +08:00
|
|
|
if (RC == &X86::GR8_NOREXRegClass)
|
2011-10-09 04:20:03 +08:00
|
|
|
return RC;
|
|
|
|
|
2011-04-27 02:52:33 +08:00
|
|
|
const TargetRegisterClass *Super = RC;
|
2011-10-01 06:19:07 +08:00
|
|
|
TargetRegisterClass::sc_iterator I = RC->getSuperClasses();
|
2011-04-27 02:52:33 +08:00
|
|
|
do {
|
|
|
|
switch (Super->getID()) {
|
|
|
|
case X86::GR8RegClassID:
|
|
|
|
case X86::GR16RegClassID:
|
|
|
|
case X86::GR32RegClassID:
|
|
|
|
case X86::GR64RegClassID:
|
|
|
|
case X86::FR32RegClassID:
|
|
|
|
case X86::FR64RegClassID:
|
|
|
|
case X86::RFP32RegClassID:
|
|
|
|
case X86::RFP64RegClassID:
|
|
|
|
case X86::RFP80RegClassID:
|
|
|
|
case X86::VR128RegClassID:
|
|
|
|
case X86::VR256RegClassID:
|
|
|
|
// Don't return a super-class that would shrink the spill size.
|
|
|
|
// That can happen with the vector and float classes.
|
|
|
|
if (Super->getSize() == RC->getSize())
|
|
|
|
return Super;
|
|
|
|
}
|
|
|
|
Super = *I++;
|
|
|
|
} while (Super);
|
|
|
|
return RC;
|
|
|
|
}
|
|
|
|
|
2009-08-16 19:00:26 +08:00
|
|
|
const TargetRegisterClass *
|
2014-06-11 06:34:28 +08:00
|
|
|
X86RegisterInfo::getPointerRegClass(const MachineFunction &MF,
|
|
|
|
unsigned Kind) const {
|
2015-03-13 01:54:19 +08:00
|
|
|
const X86Subtarget &Subtarget = MF.getSubtarget<X86Subtarget>();
|
2009-07-30 09:56:29 +08:00
|
|
|
switch (Kind) {
|
|
|
|
default: llvm_unreachable("Unexpected Kind in getPointerRegClass!");
|
|
|
|
case 0: // Normal GPRs.
|
2013-01-26 06:07:43 +08:00
|
|
|
if (Subtarget.isTarget64BitLP64())
|
2009-07-30 09:56:29 +08:00
|
|
|
return &X86::GR64RegClass;
|
|
|
|
return &X86::GR32RegClass;
|
2011-01-26 09:27:58 +08:00
|
|
|
case 1: // Normal GPRs except the stack pointer (for encoding reasons).
|
2013-01-26 06:07:43 +08:00
|
|
|
if (Subtarget.isTarget64BitLP64())
|
2009-08-06 01:40:24 +08:00
|
|
|
return &X86::GR64_NOSPRegClass;
|
|
|
|
return &X86::GR32_NOSPRegClass;
|
2015-09-09 03:47:15 +08:00
|
|
|
case 2: // NOREX GPRs.
|
|
|
|
if (Subtarget.isTarget64BitLP64())
|
|
|
|
return &X86::GR64_NOREXRegClass;
|
|
|
|
return &X86::GR32_NOREXRegClass;
|
|
|
|
case 3: // NOREX GPRs except the stack pointer (for encoding reasons).
|
|
|
|
if (Subtarget.isTarget64BitLP64())
|
|
|
|
return &X86::GR64_NOREX_NOSPRegClass;
|
|
|
|
return &X86::GR32_NOREX_NOSPRegClass;
|
|
|
|
case 4: // Available for tailcall (not callee-saved GPRs).
|
2015-11-24 06:17:44 +08:00
|
|
|
return getGPRsForTailCall(MF);
|
2009-07-30 09:56:29 +08:00
|
|
|
}
|
2009-02-07 01:43:24 +08:00
|
|
|
}
|
|
|
|
|
2015-11-24 06:17:44 +08:00
|
|
|
const TargetRegisterClass *
|
|
|
|
X86RegisterInfo::getGPRsForTailCall(const MachineFunction &MF) const {
|
|
|
|
const Function *F = MF.getFunction();
|
|
|
|
if (IsWin64 || (F && F->getCallingConv() == CallingConv::X86_64_Win64))
|
|
|
|
return &X86::GR64_TCW64RegClass;
|
|
|
|
else if (Is64Bit)
|
|
|
|
return &X86::GR64_TCRegClass;
|
|
|
|
|
|
|
|
bool hasHipeCC = (F ? F->getCallingConv() == CallingConv::HiPE : false);
|
|
|
|
if (hasHipeCC)
|
|
|
|
return &X86::GR32RegClass;
|
|
|
|
return &X86::GR32_TCRegClass;
|
|
|
|
}
|
|
|
|
|
2007-09-27 05:31:07 +08:00
|
|
|
const TargetRegisterClass *
|
|
|
|
X86RegisterInfo::getCrossCopyRegClass(const TargetRegisterClass *RC) const {
|
2008-02-20 20:07:57 +08:00
|
|
|
if (RC == &X86::CCRRegClass) {
|
2007-09-28 05:50:05 +08:00
|
|
|
if (Is64Bit)
|
|
|
|
return &X86::GR64RegClass;
|
|
|
|
else
|
|
|
|
return &X86::GR32RegClass;
|
2008-02-20 20:07:57 +08:00
|
|
|
}
|
2011-03-10 08:16:32 +08:00
|
|
|
return RC;
|
2007-09-27 05:31:07 +08:00
|
|
|
}
|
2007-03-20 16:09:38 +08:00
|
|
|
|
2011-03-08 05:56:36 +08:00
|
|
|
unsigned
|
|
|
|
X86RegisterInfo::getRegPressureLimit(const TargetRegisterClass *RC,
|
|
|
|
MachineFunction &MF) const {
|
2015-07-11 02:13:17 +08:00
|
|
|
const X86FrameLowering *TFI = getFrameLowering(MF);
|
2011-03-08 05:56:36 +08:00
|
|
|
|
|
|
|
unsigned FPDiff = TFI->hasFP(MF) ? 1 : 0;
|
|
|
|
switch (RC->getID()) {
|
|
|
|
default:
|
|
|
|
return 0;
|
|
|
|
case X86::GR32RegClassID:
|
|
|
|
return 4 - FPDiff;
|
|
|
|
case X86::GR64RegClassID:
|
|
|
|
return 12 - FPDiff;
|
|
|
|
case X86::VR128RegClassID:
|
2015-03-13 01:54:19 +08:00
|
|
|
return Is64Bit ? 10 : 4;
|
2011-03-08 05:56:36 +08:00
|
|
|
case X86::VR64RegClassID:
|
|
|
|
return 4;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-04-04 13:16:06 +08:00
|
|
|
const MCPhysReg *
|
Change instruction description to split OperandList into OutOperandList and
InOperandList. This gives one piece of important information: # of results
produced by an instruction.
An example of the change:
def ADD32rr : I<0x01, MRMDestReg, (ops GR32:$dst, GR32:$src1, GR32:$src2),
"add{l} {$src2, $dst|$dst, $src2}",
[(set GR32:$dst, (add GR32:$src1, GR32:$src2))]>;
=>
def ADD32rr : I<0x01, MRMDestReg, (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
"add{l} {$src2, $dst|$dst, $src2}",
[(set GR32:$dst, (add GR32:$src1, GR32:$src2))]>;
llvm-svn: 40033
2007-07-19 09:14:50 +08:00
|
|
|
X86RegisterInfo::getCalleeSavedRegs(const MachineFunction *MF) const {
|
2015-03-13 01:54:19 +08:00
|
|
|
const X86Subtarget &Subtarget = MF->getSubtarget<X86Subtarget>();
|
2015-12-21 22:07:14 +08:00
|
|
|
bool HasSSE = Subtarget.hasSSE1();
|
2014-06-11 06:34:28 +08:00
|
|
|
bool HasAVX = Subtarget.hasAVX();
|
|
|
|
bool HasAVX512 = Subtarget.hasAVX512();
|
2015-02-27 08:57:01 +08:00
|
|
|
bool CallsEHReturn = MF->getMMI().callsEHReturn();
|
2014-01-11 09:00:27 +08:00
|
|
|
|
2014-03-14 23:38:12 +08:00
|
|
|
assert(MF && "MachineFunction required");
|
2013-02-23 03:19:44 +08:00
|
|
|
switch (MF->getFunction()->getCallingConv()) {
|
|
|
|
case CallingConv::GHC:
|
|
|
|
case CallingConv::HiPE:
|
2012-05-08 23:07:29 +08:00
|
|
|
return CSR_NoRegs_SaveList;
|
2013-11-09 07:28:16 +08:00
|
|
|
case CallingConv::AnyReg:
|
2014-01-11 09:00:27 +08:00
|
|
|
if (HasAVX)
|
|
|
|
return CSR_64_AllRegs_AVX_SaveList;
|
|
|
|
return CSR_64_AllRegs_SaveList;
|
2014-01-18 03:47:03 +08:00
|
|
|
case CallingConv::PreserveMost:
|
|
|
|
return CSR_64_RT_MostRegs_SaveList;
|
|
|
|
case CallingConv::PreserveAll:
|
|
|
|
if (HasAVX)
|
|
|
|
return CSR_64_RT_AllRegs_AVX_SaveList;
|
|
|
|
return CSR_64_RT_AllRegs_SaveList;
|
2015-12-05 01:40:13 +08:00
|
|
|
case CallingConv::CXX_FAST_TLS:
|
|
|
|
if (Is64Bit)
|
2016-01-12 09:08:46 +08:00
|
|
|
return MF->getInfo<X86MachineFunctionInfo>()->isSplitCSR() ?
|
|
|
|
CSR_64_CXX_TLS_Darwin_PE_SaveList : CSR_64_TLS_Darwin_SaveList;
|
2015-12-05 01:40:13 +08:00
|
|
|
break;
|
2013-02-23 03:19:44 +08:00
|
|
|
case CallingConv::Intel_OCL_BI: {
|
2013-07-24 19:02:47 +08:00
|
|
|
if (HasAVX512 && IsWin64)
|
|
|
|
return CSR_Win64_Intel_OCL_BI_AVX512_SaveList;
|
|
|
|
if (HasAVX512 && Is64Bit)
|
|
|
|
return CSR_64_Intel_OCL_BI_AVX512_SaveList;
|
2012-10-24 22:46:16 +08:00
|
|
|
if (HasAVX && IsWin64)
|
2013-02-23 03:19:44 +08:00
|
|
|
return CSR_Win64_Intel_OCL_BI_AVX_SaveList;
|
2012-10-24 22:46:16 +08:00
|
|
|
if (HasAVX && Is64Bit)
|
2013-02-23 03:19:44 +08:00
|
|
|
return CSR_64_Intel_OCL_BI_AVX_SaveList;
|
2012-10-24 22:46:16 +08:00
|
|
|
if (!HasAVX && !IsWin64 && Is64Bit)
|
2013-02-23 03:19:44 +08:00
|
|
|
return CSR_64_Intel_OCL_BI_SaveList;
|
|
|
|
break;
|
2012-10-24 22:46:16 +08:00
|
|
|
}
|
HHVM calling conventions.
HHVM calling convention, hhvmcc, is used by HHVM JIT for
functions in translated cache. We currently support LLVM back end to
generate code for X86-64 and may support other architectures in the
future.
In HHVM calling convention any GP register could be used to pass and
return values, with the exception of R12 which is reserved for
thread-local area and is callee-saved. Other than R12, we always
pass RBX and RBP as args, which are our virtual machine's stack pointer
and frame pointer respectively.
When we enter translation cache via hhvmcc function, we expect
the stack to be aligned at 16 bytes, i.e. skewed by 8 bytes as opposed
to standard ABI alignment. This affects stack object alignment and stack
adjustments for function calls.
One extra calling convention, hhvm_ccc, is used to call C++ helpers from
HHVM's translation cache. It is almost identical to standard C calling
convention with an exception of first argument which is passed in RBP
(before we use RDI, RSI, etc.)
Differential Revision: http://reviews.llvm.org/D12681
llvm-svn: 248832
2015-09-30 06:09:16 +08:00
|
|
|
case CallingConv::HHVM:
|
|
|
|
return CSR_64_HHVM_SaveList;
|
2013-02-23 03:19:44 +08:00
|
|
|
case CallingConv::Cold:
|
|
|
|
if (Is64Bit)
|
2014-01-11 09:00:27 +08:00
|
|
|
return CSR_64_MostRegs_SaveList;
|
2013-02-23 03:19:44 +08:00
|
|
|
break;
|
2015-02-27 08:57:01 +08:00
|
|
|
case CallingConv::X86_64_Win64:
|
|
|
|
return CSR_Win64_SaveList;
|
|
|
|
case CallingConv::X86_64_SysV:
|
|
|
|
if (CallsEHReturn)
|
|
|
|
return CSR_64EHRet_SaveList;
|
|
|
|
return CSR_64_SaveList;
|
2015-12-21 22:07:14 +08:00
|
|
|
case CallingConv::X86_INTR:
|
|
|
|
if (Is64Bit) {
|
|
|
|
if (HasAVX)
|
|
|
|
return CSR_64_AllRegs_AVX_SaveList;
|
|
|
|
else
|
|
|
|
return CSR_64_AllRegs_SaveList;
|
|
|
|
} else {
|
|
|
|
if (HasSSE)
|
|
|
|
return CSR_32_AllRegs_SSE_SaveList;
|
|
|
|
else
|
|
|
|
return CSR_32_AllRegs_SaveList;
|
|
|
|
}
|
2013-02-23 03:19:44 +08:00
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2012-01-18 06:47:01 +08:00
|
|
|
if (Is64Bit) {
|
2008-03-23 05:04:01 +08:00
|
|
|
if (IsWin64)
|
2012-01-18 06:47:01 +08:00
|
|
|
return CSR_Win64_SaveList;
|
2013-02-23 03:19:44 +08:00
|
|
|
if (CallsEHReturn)
|
2012-01-18 06:47:01 +08:00
|
|
|
return CSR_64EHRet_SaveList;
|
|
|
|
return CSR_64_SaveList;
|
2007-07-14 22:06:15 +08:00
|
|
|
}
|
2013-02-23 03:19:44 +08:00
|
|
|
if (CallsEHReturn)
|
2012-01-18 06:47:01 +08:00
|
|
|
return CSR_32EHRet_SaveList;
|
|
|
|
return CSR_32_SaveList;
|
|
|
|
}
|
|
|
|
|
2016-01-12 09:08:46 +08:00
|
|
|
const MCPhysReg *X86RegisterInfo::getCalleeSavedRegsViaCopy(
|
|
|
|
const MachineFunction *MF) const {
|
|
|
|
assert(MF && "Invalid MachineFunction pointer.");
|
|
|
|
if (MF->getFunction()->getCallingConv() == CallingConv::CXX_FAST_TLS &&
|
|
|
|
MF->getInfo<X86MachineFunctionInfo>()->isSplitCSR())
|
|
|
|
return CSR_64_CXX_TLS_Darwin_ViaCopy_SaveList;
|
|
|
|
return nullptr;
|
|
|
|
}
|
|
|
|
|
2015-03-12 06:42:13 +08:00
|
|
|
const uint32_t *
|
|
|
|
X86RegisterInfo::getCallPreservedMask(const MachineFunction &MF,
|
|
|
|
CallingConv::ID CC) const {
|
2015-03-13 01:54:19 +08:00
|
|
|
const X86Subtarget &Subtarget = MF.getSubtarget<X86Subtarget>();
|
2015-12-21 22:07:14 +08:00
|
|
|
bool HasSSE = Subtarget.hasSSE1();
|
2014-06-11 06:34:28 +08:00
|
|
|
bool HasAVX = Subtarget.hasAVX();
|
|
|
|
bool HasAVX512 = Subtarget.hasAVX512();
|
2012-10-24 22:46:16 +08:00
|
|
|
|
2014-01-11 09:00:27 +08:00
|
|
|
switch (CC) {
|
|
|
|
case CallingConv::GHC:
|
|
|
|
case CallingConv::HiPE:
|
|
|
|
return CSR_NoRegs_RegMask;
|
|
|
|
case CallingConv::AnyReg:
|
|
|
|
if (HasAVX)
|
|
|
|
return CSR_64_AllRegs_AVX_RegMask;
|
|
|
|
return CSR_64_AllRegs_RegMask;
|
2014-01-18 03:47:03 +08:00
|
|
|
case CallingConv::PreserveMost:
|
|
|
|
return CSR_64_RT_MostRegs_RegMask;
|
|
|
|
case CallingConv::PreserveAll:
|
|
|
|
if (HasAVX)
|
|
|
|
return CSR_64_RT_AllRegs_AVX_RegMask;
|
|
|
|
return CSR_64_RT_AllRegs_RegMask;
|
2015-12-05 01:40:13 +08:00
|
|
|
case CallingConv::CXX_FAST_TLS:
|
|
|
|
if (Is64Bit)
|
|
|
|
return CSR_64_TLS_Darwin_RegMask;
|
|
|
|
break;
|
2014-01-11 09:00:27 +08:00
|
|
|
case CallingConv::Intel_OCL_BI: {
|
2014-03-15 00:09:13 +08:00
|
|
|
if (HasAVX512 && IsWin64)
|
2013-07-24 19:02:47 +08:00
|
|
|
return CSR_Win64_Intel_OCL_BI_AVX512_RegMask;
|
2014-03-15 00:09:13 +08:00
|
|
|
if (HasAVX512 && Is64Bit)
|
2013-07-24 19:02:47 +08:00
|
|
|
return CSR_64_Intel_OCL_BI_AVX512_RegMask;
|
2014-03-15 00:09:13 +08:00
|
|
|
if (HasAVX && IsWin64)
|
2012-10-24 22:46:16 +08:00
|
|
|
return CSR_Win64_Intel_OCL_BI_AVX_RegMask;
|
2014-03-15 00:09:13 +08:00
|
|
|
if (HasAVX && Is64Bit)
|
2012-10-24 22:46:16 +08:00
|
|
|
return CSR_64_Intel_OCL_BI_AVX_RegMask;
|
|
|
|
if (!HasAVX && !IsWin64 && Is64Bit)
|
|
|
|
return CSR_64_Intel_OCL_BI_RegMask;
|
2014-03-15 00:29:21 +08:00
|
|
|
break;
|
2012-10-24 22:46:16 +08:00
|
|
|
}
|
HHVM calling conventions.
HHVM calling convention, hhvmcc, is used by HHVM JIT for
functions in translated cache. We currently support LLVM back end to
generate code for X86-64 and may support other architectures in the
future.
In HHVM calling convention any GP register could be used to pass and
return values, with the exception of R12 which is reserved for
thread-local area and is callee-saved. Other than R12, we always
pass RBX and RBP as args, which are our virtual machine's stack pointer
and frame pointer respectively.
When we enter translation cache via hhvmcc function, we expect
the stack to be aligned at 16 bytes, i.e. skewed by 8 bytes as opposed
to standard ABI alignment. This affects stack object alignment and stack
adjustments for function calls.
One extra calling convention, hhvm_ccc, is used to call C++ helpers from
HHVM's translation cache. It is almost identical to standard C calling
convention with an exception of first argument which is passed in RBP
(before we use RDI, RSI, etc.)
Differential Revision: http://reviews.llvm.org/D12681
llvm-svn: 248832
2015-09-30 06:09:16 +08:00
|
|
|
case CallingConv::HHVM:
|
|
|
|
return CSR_64_HHVM_RegMask;
|
2014-01-11 09:00:27 +08:00
|
|
|
case CallingConv::Cold:
|
|
|
|
if (Is64Bit)
|
|
|
|
return CSR_64_MostRegs_RegMask;
|
|
|
|
break;
|
2015-02-27 08:57:01 +08:00
|
|
|
case CallingConv::X86_64_Win64:
|
|
|
|
return CSR_Win64_RegMask;
|
|
|
|
case CallingConv::X86_64_SysV:
|
|
|
|
return CSR_64_RegMask;
|
2015-12-21 22:07:14 +08:00
|
|
|
case CallingConv::X86_INTR:
|
|
|
|
if (Is64Bit) {
|
|
|
|
if (HasAVX)
|
|
|
|
return CSR_64_AllRegs_AVX_RegMask;
|
|
|
|
else
|
|
|
|
return CSR_64_AllRegs_RegMask;
|
|
|
|
} else {
|
|
|
|
if (HasSSE)
|
|
|
|
return CSR_32_AllRegs_SSE_RegMask;
|
|
|
|
else
|
|
|
|
return CSR_32_AllRegs_RegMask;
|
|
|
|
}
|
|
|
|
default:
|
|
|
|
break;
|
2014-01-11 09:00:27 +08:00
|
|
|
}
|
|
|
|
|
2014-03-15 00:09:13 +08:00
|
|
|
// Unlike getCalleeSavedRegs(), we don't have MMI so we can't check
|
|
|
|
// callsEHReturn().
|
2014-01-11 09:00:27 +08:00
|
|
|
if (Is64Bit) {
|
|
|
|
if (IsWin64)
|
|
|
|
return CSR_Win64_RegMask;
|
|
|
|
return CSR_64_RegMask;
|
|
|
|
}
|
|
|
|
return CSR_32_RegMask;
|
2006-05-18 08:12:58 +08:00
|
|
|
}
|
|
|
|
|
2012-10-16 06:39:43 +08:00
|
|
|
const uint32_t*
|
|
|
|
X86RegisterInfo::getNoPreservedMask() const {
|
|
|
|
return CSR_NoRegs_RegMask;
|
|
|
|
}
|
|
|
|
|
2015-11-12 08:54:04 +08:00
|
|
|
const uint32_t *X86RegisterInfo::getDarwinTLSCallPreservedMask() const {
|
|
|
|
return CSR_64_TLS_Darwin_RegMask;
|
|
|
|
}
|
|
|
|
|
2007-02-20 05:49:54 +08:00
|
|
|
BitVector X86RegisterInfo::getReservedRegs(const MachineFunction &MF) const {
|
|
|
|
BitVector Reserved(getNumRegs());
|
2015-07-11 02:13:17 +08:00
|
|
|
const X86FrameLowering *TFI = getFrameLowering(MF);
|
2010-11-19 05:19:35 +08:00
|
|
|
|
2008-12-18 09:05:09 +08:00
|
|
|
// Set the stack-pointer register and its aliases as reserved.
|
2013-05-23 07:17:36 +08:00
|
|
|
for (MCSubRegIterator I(X86::RSP, this, /*IncludeSelf=*/true); I.isValid();
|
|
|
|
++I)
|
2012-06-01 08:02:08 +08:00
|
|
|
Reserved.set(*I);
|
2009-08-16 19:00:26 +08:00
|
|
|
|
2009-11-14 05:56:01 +08:00
|
|
|
// Set the instruction pointer register and its aliases as reserved.
|
2013-05-23 07:17:36 +08:00
|
|
|
for (MCSubRegIterator I(X86::RIP, this, /*IncludeSelf=*/true); I.isValid();
|
|
|
|
++I)
|
2012-06-01 08:02:08 +08:00
|
|
|
Reserved.set(*I);
|
2009-11-14 05:56:01 +08:00
|
|
|
|
2008-12-18 09:05:09 +08:00
|
|
|
// Set the frame-pointer register and its aliases as reserved if needed.
|
2010-11-19 05:19:35 +08:00
|
|
|
if (TFI->hasFP(MF)) {
|
2013-05-23 07:17:36 +08:00
|
|
|
for (MCSubRegIterator I(X86::RBP, this, /*IncludeSelf=*/true); I.isValid();
|
|
|
|
++I)
|
2012-06-01 08:02:08 +08:00
|
|
|
Reserved.set(*I);
|
2007-02-20 05:49:54 +08:00
|
|
|
}
|
2009-08-16 19:00:26 +08:00
|
|
|
|
2012-07-11 01:45:53 +08:00
|
|
|
// Set the base-pointer register and its aliases as reserved if needed.
|
|
|
|
if (hasBasePointer(MF)) {
|
|
|
|
CallingConv::ID CC = MF.getFunction()->getCallingConv();
|
2015-03-12 06:42:13 +08:00
|
|
|
const uint32_t *RegMask = getCallPreservedMask(MF, CC);
|
2012-07-11 01:45:53 +08:00
|
|
|
if (MachineOperand::clobbersPhysReg(RegMask, getBaseRegister()))
|
|
|
|
report_fatal_error(
|
|
|
|
"Stack realignment in presence of dynamic allocas is not supported with"
|
|
|
|
"this calling convention.");
|
|
|
|
|
2015-12-26 06:09:45 +08:00
|
|
|
unsigned BasePtr = getX86SubSuperRegister(getBaseRegister(), 64);
|
2015-02-25 00:13:16 +08:00
|
|
|
for (MCSubRegIterator I(BasePtr, this, /*IncludeSelf=*/true);
|
2013-05-23 07:17:36 +08:00
|
|
|
I.isValid(); ++I)
|
2012-07-11 01:45:53 +08:00
|
|
|
Reserved.set(*I);
|
|
|
|
}
|
|
|
|
|
2011-05-19 06:24:48 +08:00
|
|
|
// Mark the segment registers as reserved.
|
|
|
|
Reserved.set(X86::CS);
|
|
|
|
Reserved.set(X86::SS);
|
|
|
|
Reserved.set(X86::DS);
|
|
|
|
Reserved.set(X86::ES);
|
|
|
|
Reserved.set(X86::FS);
|
|
|
|
Reserved.set(X86::GS);
|
|
|
|
|
2012-05-02 03:50:22 +08:00
|
|
|
// Mark the floating point stack registers as reserved.
|
2013-07-26 10:02:47 +08:00
|
|
|
for (unsigned n = 0; n != 8; ++n)
|
|
|
|
Reserved.set(X86::ST0 + n);
|
2012-05-02 03:50:22 +08:00
|
|
|
|
2011-06-10 00:56:59 +08:00
|
|
|
// Reserve the registers that only exist in 64-bit mode.
|
|
|
|
if (!Is64Bit) {
|
2011-06-18 07:15:00 +08:00
|
|
|
// These 8-bit registers are part of the x86-64 extension even though their
|
|
|
|
// super-registers are old 32-bits.
|
|
|
|
Reserved.set(X86::SIL);
|
|
|
|
Reserved.set(X86::DIL);
|
|
|
|
Reserved.set(X86::BPL);
|
|
|
|
Reserved.set(X86::SPL);
|
|
|
|
|
2011-06-10 00:56:59 +08:00
|
|
|
for (unsigned n = 0; n != 8; ++n) {
|
2011-06-18 07:15:00 +08:00
|
|
|
// R8, R9, ...
|
2013-07-26 10:02:47 +08:00
|
|
|
for (MCRegAliasIterator AI(X86::R8 + n, this, true); AI.isValid(); ++AI)
|
2012-06-02 07:28:30 +08:00
|
|
|
Reserved.set(*AI);
|
2011-06-10 00:56:59 +08:00
|
|
|
|
|
|
|
// XMM8, XMM9, ...
|
2013-07-26 10:02:47 +08:00
|
|
|
for (MCRegAliasIterator AI(X86::XMM8 + n, this, true); AI.isValid(); ++AI)
|
2012-06-02 07:28:30 +08:00
|
|
|
Reserved.set(*AI);
|
2011-06-10 00:56:59 +08:00
|
|
|
}
|
|
|
|
}
|
2015-03-13 01:54:19 +08:00
|
|
|
if (!Is64Bit || !MF.getSubtarget<X86Subtarget>().hasAVX512()) {
|
2013-07-24 19:02:47 +08:00
|
|
|
for (unsigned n = 16; n != 32; ++n) {
|
|
|
|
for (MCRegAliasIterator AI(X86::XMM0 + n, this, true); AI.isValid(); ++AI)
|
|
|
|
Reserved.set(*AI);
|
|
|
|
}
|
|
|
|
}
|
2011-06-10 00:56:59 +08:00
|
|
|
|
2007-02-20 05:49:54 +08:00
|
|
|
return Reserved;
|
|
|
|
}
|
|
|
|
|
2015-06-12 06:40:04 +08:00
|
|
|
void X86RegisterInfo::adjustStackMapLiveOutMask(uint32_t *Mask) const {
|
|
|
|
// Check if the EFLAGS register is marked as live-out. This shouldn't happen,
|
|
|
|
// because the calling convention defines the EFLAGS register as NOT
|
|
|
|
// preserved.
|
|
|
|
//
|
|
|
|
// Unfortunatelly the EFLAGS show up as live-out after branch folding. Adding
|
|
|
|
// an assert to track this and clear the register afterwards to avoid
|
|
|
|
// unnecessary crashes during release builds.
|
|
|
|
assert(!(Mask[X86::EFLAGS / 32] & (1U << (X86::EFLAGS % 32))) &&
|
|
|
|
"EFLAGS are not live-out from a patchpoint.");
|
|
|
|
|
|
|
|
// Also clean other registers that don't need preserving (IP).
|
|
|
|
for (auto Reg : {X86::EFLAGS, X86::RIP, X86::EIP, X86::IP})
|
|
|
|
Mask[Reg / 32] &= ~(1U << (Reg % 32));
|
|
|
|
}
|
|
|
|
|
2002-12-29 04:32:28 +08:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// Stack Frame Processing methods
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
2015-08-01 02:28:09 +08:00
|
|
|
static bool CantUseSP(const MachineFrameInfo *MFI) {
|
|
|
|
return MFI->hasVarSizedObjects() || MFI->hasOpaqueSPAdjustment();
|
|
|
|
}
|
|
|
|
|
2012-07-11 01:45:53 +08:00
|
|
|
bool X86RegisterInfo::hasBasePointer(const MachineFunction &MF) const {
|
|
|
|
const MachineFrameInfo *MFI = MF.getFrameInfo();
|
|
|
|
|
|
|
|
if (!EnableBasePointer)
|
|
|
|
return false;
|
|
|
|
|
2013-12-11 02:27:32 +08:00
|
|
|
// When we need stack realignment, we can't address the stack from the frame
|
|
|
|
// pointer. When we have dynamic allocas or stack-adjusting inline asm, we
|
|
|
|
// can't address variables from the stack pointer. MS inline asm can
|
|
|
|
// reference locals while also adjusting the stack pointer. When we can't
|
|
|
|
// use both the SP and the FP, we need a separate base pointer register.
|
|
|
|
bool CantUseFP = needsStackRealignment(MF);
|
2015-08-01 02:28:09 +08:00
|
|
|
return CantUseFP && CantUseSP(MFI);
|
2012-07-11 01:45:53 +08:00
|
|
|
}
|
|
|
|
|
2010-01-20 02:31:11 +08:00
|
|
|
bool X86RegisterInfo::canRealignStack(const MachineFunction &MF) const {
|
Targets: commonize some stack realignment code
This patch does the following:
* Fix FIXME on `needsStackRealignment`: it is now shared between multiple targets, implemented in `TargetRegisterInfo`, and isn't `virtual` anymore. This will break out-of-tree targets, silently if they used `virtual` and with a build error if they used `override`.
* Factor out `canRealignStack` as a `virtual` function on `TargetRegisterInfo`, by default only looks for the `no-realign-stack` function attribute.
Multiple targets duplicated the same `needsStackRealignment` code:
- Aarch64.
- ARM.
- Mips almost: had extra `DEBUG` diagnostic, which the default implementation now has.
- PowerPC.
- WebAssembly.
- x86 almost: has an extra `-force-align-stack` option, which the default implementation now has.
The default implementation of `needsStackRealignment` used to just return `false`. My current patch changes the behavior by simply using the above shared behavior. This affects:
- AMDGPU
- BPF
- CppBackend
- MSP430
- NVPTX
- Sparc
- SystemZ
- XCore
- Out-of-tree targets
This is a breaking change! `make check` passes.
The only implementation of the `virtual` function (besides the slight different in x86) was Hexagon (which did `MF.getFrameInfo()->getMaxAlignment() > 8`), and potentially some out-of-tree targets. Hexagon now uses the default implementation.
`needsStackRealignment` was being overwritten in `<Target>GenRegisterInfo.inc`, to return `false` as the default also did. That was odd and is now gone.
Reviewers: sunfish
Subscribers: aemerson, llvm-commits, jfb
Differential Revision: http://reviews.llvm.org/D11160
llvm-svn: 242727
2015-07-21 06:51:32 +08:00
|
|
|
if (!TargetRegisterInfo::canRealignStack(MF))
|
2013-08-02 05:42:05 +08:00
|
|
|
return false;
|
|
|
|
|
2010-01-20 02:31:11 +08:00
|
|
|
const MachineFrameInfo *MFI = MF.getFrameInfo();
|
2012-07-11 01:45:53 +08:00
|
|
|
const MachineRegisterInfo *MRI = &MF.getRegInfo();
|
|
|
|
|
|
|
|
// Stack realignment requires a frame pointer. If we already started
|
|
|
|
// register allocation with frame pointer elimination, it is too late now.
|
|
|
|
if (!MRI->canReserveReg(FramePtr))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
// If a base pointer is necessary. Check that it isn't too late to reserve
|
|
|
|
// it.
|
2015-08-01 02:28:09 +08:00
|
|
|
if (CantUseSP(MFI))
|
2012-07-11 01:45:53 +08:00
|
|
|
return MRI->canReserveReg(BasePtr);
|
|
|
|
return true;
|
2010-01-20 02:31:11 +08:00
|
|
|
}
|
|
|
|
|
2010-07-20 14:52:21 +08:00
|
|
|
bool X86RegisterInfo::hasReservedSpillSlot(const MachineFunction &MF,
|
|
|
|
unsigned Reg, int &FrameIdx) const {
|
2014-08-07 17:41:19 +08:00
|
|
|
// Since X86 defines assignCalleeSavedSpillSlots which always return true
|
|
|
|
// this function neither used nor tested.
|
|
|
|
llvm_unreachable("Unused function on X86. Otherwise need a test case.");
|
2009-07-09 14:53:48 +08:00
|
|
|
}
|
|
|
|
|
2010-08-27 07:32:16 +08:00
|
|
|
void
|
2009-10-08 01:12:56 +08:00
|
|
|
X86RegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II,
|
2013-02-01 04:02:54 +08:00
|
|
|
int SPAdj, unsigned FIOperandNum,
|
|
|
|
RegScavenger *RS) const {
|
2004-02-12 10:27:10 +08:00
|
|
|
MachineInstr &MI = *II;
|
2004-08-15 06:05:10 +08:00
|
|
|
MachineFunction &MF = *MI.getParent()->getParent();
|
2015-07-11 02:13:17 +08:00
|
|
|
const X86FrameLowering *TFI = getFrameLowering(MF);
|
2013-02-01 04:02:54 +08:00
|
|
|
int FrameIndex = MI.getOperand(FIOperandNum).getIndex();
|
2008-04-24 02:21:02 +08:00
|
|
|
unsigned BasePtr;
|
2009-08-16 19:00:26 +08:00
|
|
|
|
2010-04-29 13:08:22 +08:00
|
|
|
unsigned Opc = MI.getOpcode();
|
2015-05-23 02:10:47 +08:00
|
|
|
bool AfterFPPop = Opc == X86::TAILJMPm64 || Opc == X86::TAILJMPm ||
|
|
|
|
Opc == X86::TCRETURNmi || Opc == X86::TCRETURNmi64;
|
2015-08-15 10:32:35 +08:00
|
|
|
|
2012-07-11 01:45:53 +08:00
|
|
|
if (hasBasePointer(MF))
|
|
|
|
BasePtr = (FrameIndex < 0 ? FramePtr : getBaseRegister());
|
|
|
|
else if (needsStackRealignment(MF))
|
2008-04-24 02:21:02 +08:00
|
|
|
BasePtr = (FrameIndex < 0 ? FramePtr : StackPtr);
|
2010-04-29 13:08:22 +08:00
|
|
|
else if (AfterFPPop)
|
|
|
|
BasePtr = StackPtr;
|
2008-04-24 02:21:02 +08:00
|
|
|
else
|
2010-11-19 05:19:35 +08:00
|
|
|
BasePtr = (TFI->hasFP(MF) ? FramePtr : StackPtr);
|
2008-04-24 02:21:02 +08:00
|
|
|
|
Rename llvm.frameescape and llvm.framerecover to localescape and localrecover
Summary:
Initially, these intrinsics seemed like part of a family of "frame"
related intrinsics, but now I think that's more confusing than helpful.
Initially, the LangRef specified that this would create a new kind of
allocation that would be allocated at a fixed offset from the frame
pointer (EBP/RBP). We ended up dropping that design, and leaving the
stack frame layout alone.
These intrinsics are really about sharing local stack allocations, not
frame pointers. I intend to go further and add an `llvm.localaddress()`
intrinsic that returns whatever register (EBP, ESI, ESP, RBX) is being
used to address locals, which should not be confused with the frame
pointer.
Naming suggestions at this point are welcome, I'm happy to re-run sed.
Reviewers: majnemer, nicholas
Subscribers: llvm-commits
Differential Revision: http://reviews.llvm.org/D11011
llvm-svn: 241633
2015-07-08 06:25:32 +08:00
|
|
|
// LOCAL_ESCAPE uses a single offset, with no register. It only works in the
|
2015-03-06 02:26:34 +08:00
|
|
|
// simple FP case, and doesn't work with stack realignment. On 32-bit, the
|
|
|
|
// offset is from the traditional base pointer location. On 64-bit, the
|
|
|
|
// offset is from the SP at the end of the prologue, not the FP location. This
|
|
|
|
// matches the behavior of llvm.frameaddress.
|
2015-10-13 03:43:34 +08:00
|
|
|
unsigned IgnoredFrameReg;
|
Rename llvm.frameescape and llvm.framerecover to localescape and localrecover
Summary:
Initially, these intrinsics seemed like part of a family of "frame"
related intrinsics, but now I think that's more confusing than helpful.
Initially, the LangRef specified that this would create a new kind of
allocation that would be allocated at a fixed offset from the frame
pointer (EBP/RBP). We ended up dropping that design, and leaving the
stack frame layout alone.
These intrinsics are really about sharing local stack allocations, not
frame pointers. I intend to go further and add an `llvm.localaddress()`
intrinsic that returns whatever register (EBP, ESI, ESP, RBX) is being
used to address locals, which should not be confused with the frame
pointer.
Naming suggestions at this point are welcome, I'm happy to re-run sed.
Reviewers: majnemer, nicholas
Subscribers: llvm-commits
Differential Revision: http://reviews.llvm.org/D11011
llvm-svn: 241633
2015-07-08 06:25:32 +08:00
|
|
|
if (Opc == TargetOpcode::LOCAL_ESCAPE) {
|
2015-03-06 02:26:34 +08:00
|
|
|
MachineOperand &FI = MI.getOperand(FIOperandNum);
|
2015-03-25 07:46:01 +08:00
|
|
|
int Offset;
|
2015-10-13 03:43:34 +08:00
|
|
|
Offset = TFI->getFrameIndexReference(MF, FrameIndex, IgnoredFrameReg);
|
2015-03-06 02:26:34 +08:00
|
|
|
FI.ChangeToImmediate(Offset);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2014-08-20 19:59:22 +08:00
|
|
|
// For LEA64_32r when BasePtr is 32-bits (X32) we can use full-size 64-bit
|
|
|
|
// register as source operand, semantic is the same and destination is
|
|
|
|
// 32-bits. It saves one byte per lea in code since 0x67 prefix is avoided.
|
|
|
|
if (Opc == X86::LEA64_32r && X86::GR32RegClass.contains(BasePtr))
|
2015-12-26 06:09:45 +08:00
|
|
|
BasePtr = getX86SubSuperRegister(BasePtr, 64);
|
2014-08-20 19:59:22 +08:00
|
|
|
|
2002-12-29 04:32:28 +08:00
|
|
|
// This must be part of a four operand memory reference. Replace the
|
2006-09-08 14:48:29 +08:00
|
|
|
// FrameIndex with base register with EBP. Add an offset to the offset.
|
2013-02-01 04:02:54 +08:00
|
|
|
MI.getOperand(FIOperandNum).ChangeToRegister(BasePtr, false);
|
2002-12-04 07:11:21 +08:00
|
|
|
|
2008-12-24 08:27:51 +08:00
|
|
|
// Now add the frame object offset to the offset from EBP.
|
2010-04-29 13:08:22 +08:00
|
|
|
int FIOffset;
|
|
|
|
if (AfterFPPop) {
|
|
|
|
// Tail call jmp happens after FP is popped.
|
|
|
|
const MachineFrameInfo *MFI = MF.getFrameInfo();
|
2010-11-19 05:19:35 +08:00
|
|
|
FIOffset = MFI->getObjectOffset(FrameIndex) - TFI->getOffsetOfLocalArea();
|
2010-04-29 13:08:22 +08:00
|
|
|
} else
|
2015-08-15 10:32:35 +08:00
|
|
|
FIOffset = TFI->getFrameIndexReference(MF, FrameIndex, IgnoredFrameReg);
|
2010-04-29 13:08:22 +08:00
|
|
|
|
2015-02-02 00:56:04 +08:00
|
|
|
if (BasePtr == StackPtr)
|
|
|
|
FIOffset += SPAdj;
|
|
|
|
|
2014-02-22 07:29:32 +08:00
|
|
|
// The frame index format for stackmaps and patchpoints is different from the
|
|
|
|
// X86 format. It only has a FI and an offset.
|
|
|
|
if (Opc == TargetOpcode::STACKMAP || Opc == TargetOpcode::PATCHPOINT) {
|
|
|
|
assert(BasePtr == FramePtr && "Expected the FP as base register");
|
|
|
|
int64_t Offset = MI.getOperand(FIOperandNum + 1).getImm() + FIOffset;
|
|
|
|
MI.getOperand(FIOperandNum + 1).ChangeToImmediate(Offset);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2013-02-01 04:02:54 +08:00
|
|
|
if (MI.getOperand(FIOperandNum+3).isImm()) {
|
2008-12-24 08:27:51 +08:00
|
|
|
// Offset is a 32-bit integer.
|
2013-02-01 04:02:54 +08:00
|
|
|
int Imm = (int)(MI.getOperand(FIOperandNum + 3).getImm());
|
2011-07-13 08:44:29 +08:00
|
|
|
int Offset = FIOffset + Imm;
|
2011-07-14 08:22:31 +08:00
|
|
|
assert((!Is64Bit || isInt<32>((long long)FIOffset + Imm)) &&
|
|
|
|
"Requesting 64-bit offset in 32-bit immediate!");
|
2013-02-01 04:02:54 +08:00
|
|
|
MI.getOperand(FIOperandNum + 3).ChangeToImmediate(Offset);
|
2008-12-24 08:27:51 +08:00
|
|
|
} else {
|
|
|
|
// Offset is symbolic. This is extremely rare.
|
2013-02-01 04:02:54 +08:00
|
|
|
uint64_t Offset = FIOffset +
|
|
|
|
(uint64_t)MI.getOperand(FIOperandNum+3).getOffset();
|
|
|
|
MI.getOperand(FIOperandNum + 3).setOffset(Offset);
|
2008-12-24 08:27:51 +08:00
|
|
|
}
|
2002-12-04 07:11:21 +08:00
|
|
|
}
|
2002-12-05 07:57:03 +08:00
|
|
|
|
2009-11-13 04:49:22 +08:00
|
|
|
unsigned X86RegisterInfo::getFrameRegister(const MachineFunction &MF) const {
|
2015-07-11 02:13:17 +08:00
|
|
|
const X86FrameLowering *TFI = getFrameLowering(MF);
|
2010-11-19 05:19:35 +08:00
|
|
|
return TFI->hasFP(MF) ? FramePtr : StackPtr;
|
2006-03-24 02:12:57 +08:00
|
|
|
}
|
|
|
|
|
2015-03-13 01:54:19 +08:00
|
|
|
unsigned
|
|
|
|
X86RegisterInfo::getPtrSizedFrameRegister(const MachineFunction &MF) const {
|
|
|
|
const X86Subtarget &Subtarget = MF.getSubtarget<X86Subtarget>();
|
2014-12-06 04:55:53 +08:00
|
|
|
unsigned FrameReg = getFrameRegister(MF);
|
|
|
|
if (Subtarget.isTarget64BitILP32())
|
2015-12-26 06:09:45 +08:00
|
|
|
FrameReg = getX86SubSuperRegister(FrameReg, 32);
|
2014-12-06 04:55:53 +08:00
|
|
|
return FrameReg;
|
|
|
|
}
|
|
|
|
|
2015-12-26 06:10:08 +08:00
|
|
|
unsigned llvm::get512BitSuperRegister(unsigned Reg) {
|
2013-07-24 19:02:47 +08:00
|
|
|
if (Reg >= X86::XMM0 && Reg <= X86::XMM31)
|
|
|
|
return X86::ZMM0 + (Reg - X86::XMM0);
|
|
|
|
if (Reg >= X86::YMM0 && Reg <= X86::YMM31)
|
|
|
|
return X86::ZMM0 + (Reg - X86::YMM0);
|
|
|
|
if (Reg >= X86::ZMM0 && Reg <= X86::ZMM31)
|
|
|
|
return Reg;
|
|
|
|
llvm_unreachable("Unexpected SIMD register");
|
|
|
|
}
|