2012-02-18 20:03:15 +08:00
|
|
|
//===-- X86RegisterInfo.cpp - X86 Register Information --------------------===//
|
2004-03-14 15:19:51 +08:00
|
|
|
//
|
2003-10-21 03:43:21 +08:00
|
|
|
// The LLVM Compiler Infrastructure
|
|
|
|
//
|
2007-12-30 04:36:04 +08:00
|
|
|
// This file is distributed under the University of Illinois Open Source
|
|
|
|
// License. See LICENSE.TXT for details.
|
2004-03-14 15:19:51 +08:00
|
|
|
//
|
2003-10-21 03:43:21 +08:00
|
|
|
//===----------------------------------------------------------------------===//
|
2002-10-26 06:55:53 +08:00
|
|
|
//
|
2008-02-11 02:45:23 +08:00
|
|
|
// This file contains the X86 implementation of the TargetRegisterInfo class.
|
|
|
|
// This file is responsible for the frame pointer elimination optimization
|
|
|
|
// on X86.
|
2002-10-26 06:55:53 +08:00
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
#include "X86RegisterInfo.h"
|
2012-03-18 02:46:09 +08:00
|
|
|
#include "X86.h"
|
2002-11-23 06:43:47 +08:00
|
|
|
#include "X86InstrBuilder.h"
|
2006-06-07 07:30:24 +08:00
|
|
|
#include "X86MachineFunctionInfo.h"
|
2006-09-08 14:48:29 +08:00
|
|
|
#include "X86Subtarget.h"
|
2006-06-07 07:30:24 +08:00
|
|
|
#include "X86TargetMachine.h"
|
2002-11-21 02:59:43 +08:00
|
|
|
#include "llvm/Constants.h"
|
2006-06-03 06:38:37 +08:00
|
|
|
#include "llvm/Function.h"
|
2006-09-08 14:48:29 +08:00
|
|
|
#include "llvm/Type.h"
|
2003-07-29 13:14:16 +08:00
|
|
|
#include "llvm/CodeGen/ValueTypes.h"
|
2002-11-21 02:59:43 +08:00
|
|
|
#include "llvm/CodeGen/MachineInstrBuilder.h"
|
2002-12-16 04:06:35 +08:00
|
|
|
#include "llvm/CodeGen/MachineFunction.h"
|
2008-07-02 02:15:35 +08:00
|
|
|
#include "llvm/CodeGen/MachineFunctionPass.h"
|
2002-12-29 05:08:28 +08:00
|
|
|
#include "llvm/CodeGen/MachineFrameInfo.h"
|
2007-12-31 12:13:23 +08:00
|
|
|
#include "llvm/CodeGen/MachineModuleInfo.h"
|
|
|
|
#include "llvm/CodeGen/MachineRegisterInfo.h"
|
2009-08-23 04:48:53 +08:00
|
|
|
#include "llvm/MC/MCAsmInfo.h"
|
2011-01-10 20:39:04 +08:00
|
|
|
#include "llvm/Target/TargetFrameLowering.h"
|
2006-12-07 09:21:59 +08:00
|
|
|
#include "llvm/Target/TargetInstrInfo.h"
|
2004-06-22 05:10:24 +08:00
|
|
|
#include "llvm/Target/TargetMachine.h"
|
2004-07-11 12:17:10 +08:00
|
|
|
#include "llvm/Target/TargetOptions.h"
|
2007-02-20 05:49:54 +08:00
|
|
|
#include "llvm/ADT/BitVector.h"
|
2004-09-02 06:55:40 +08:00
|
|
|
#include "llvm/ADT/STLExtras.h"
|
2009-07-12 04:10:48 +08:00
|
|
|
#include "llvm/Support/ErrorHandling.h"
|
2010-08-06 07:57:43 +08:00
|
|
|
#include "llvm/Support/CommandLine.h"
|
2011-06-28 02:32:37 +08:00
|
|
|
|
|
|
|
#define GET_REGINFO_TARGET_DESC
|
2011-06-24 09:44:41 +08:00
|
|
|
#include "X86GenRegisterInfo.inc"
|
2011-06-28 02:32:37 +08:00
|
|
|
|
2004-02-14 14:00:36 +08:00
|
|
|
using namespace llvm;
|
2003-11-12 06:41:34 +08:00
|
|
|
|
2010-11-15 08:06:54 +08:00
|
|
|
cl::opt<bool>
|
2010-08-06 07:57:43 +08:00
|
|
|
ForceStackAlign("force-align-stack",
|
|
|
|
cl::desc("Force align the stack to the minimum alignment"
|
|
|
|
" needed for the function."),
|
|
|
|
cl::init(false), cl::Hidden);
|
|
|
|
|
2012-07-11 01:45:53 +08:00
|
|
|
cl::opt<bool>
|
|
|
|
EnableBasePointer("x86-use-base-pointer", cl::Hidden, cl::init(true),
|
|
|
|
cl::desc("Enable use of a base pointer for complex stack frames"));
|
|
|
|
|
2006-09-08 14:48:29 +08:00
|
|
|
X86RegisterInfo::X86RegisterInfo(X86TargetMachine &tm,
|
|
|
|
const TargetInstrInfo &tii)
|
2011-07-19 04:57:22 +08:00
|
|
|
: X86GenRegisterInfo(tm.getSubtarget<X86Subtarget>().is64Bit()
|
|
|
|
? X86::RIP : X86::EIP,
|
|
|
|
X86_MC::getDwarfRegFlavour(tm.getTargetTriple(), false),
|
|
|
|
X86_MC::getDwarfRegFlavour(tm.getTargetTriple(), true)),
|
|
|
|
TM(tm), TII(tii) {
|
|
|
|
X86_MC::InitLLVM2SEHRegisterMapping(this);
|
|
|
|
|
2006-09-08 14:48:29 +08:00
|
|
|
// Cache some information.
|
|
|
|
const X86Subtarget *Subtarget = &TM.getSubtarget<X86Subtarget>();
|
|
|
|
Is64Bit = Subtarget->is64Bit();
|
2008-03-23 05:04:01 +08:00
|
|
|
IsWin64 = Subtarget->isTargetWin64();
|
2009-08-16 19:00:26 +08:00
|
|
|
|
2006-09-08 14:48:29 +08:00
|
|
|
if (Is64Bit) {
|
|
|
|
SlotSize = 8;
|
|
|
|
StackPtr = X86::RSP;
|
|
|
|
FramePtr = X86::RBP;
|
|
|
|
} else {
|
|
|
|
SlotSize = 4;
|
|
|
|
StackPtr = X86::ESP;
|
|
|
|
FramePtr = X86::EBP;
|
|
|
|
}
|
2012-08-01 02:29:21 +08:00
|
|
|
// Use a callee-saved register as the base pointer. These registers must
|
2012-08-02 02:39:17 +08:00
|
|
|
// not conflict with any ABI requirements. For example, in 32-bit mode PIC
|
2012-08-01 02:29:21 +08:00
|
|
|
// requires GOT in the EBX register before function calls via PLT GOT pointer.
|
|
|
|
BasePtr = Is64Bit ? X86::RBX : X86::ESI;
|
2006-09-08 14:48:29 +08:00
|
|
|
}
|
2003-08-03 23:48:14 +08:00
|
|
|
|
2011-07-01 07:20:32 +08:00
|
|
|
/// getCompactUnwindRegNum - This function maps the register to the number for
|
|
|
|
/// compact unwind encoding. Return -1 if the register isn't valid.
|
2011-07-07 04:33:48 +08:00
|
|
|
int X86RegisterInfo::getCompactUnwindRegNum(unsigned RegNum, bool isEH) const {
|
|
|
|
switch (getLLVMRegNum(RegNum, isEH)) {
|
2011-07-01 07:20:32 +08:00
|
|
|
case X86::EBX: case X86::RBX: return 1;
|
2011-07-01 07:47:14 +08:00
|
|
|
case X86::ECX: case X86::R12: return 2;
|
|
|
|
case X86::EDX: case X86::R13: return 3;
|
|
|
|
case X86::EDI: case X86::R14: return 4;
|
|
|
|
case X86::ESI: case X86::R15: return 5;
|
2011-07-01 07:20:32 +08:00
|
|
|
case X86::EBP: case X86::RBP: return 6;
|
|
|
|
}
|
|
|
|
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2012-04-24 05:39:35 +08:00
|
|
|
bool
|
|
|
|
X86RegisterInfo::trackLivenessAfterRegAlloc(const MachineFunction &MF) const {
|
|
|
|
// Only enable when post-RA scheduling is enabled and this is needed.
|
|
|
|
return TM.getSubtargetImpl()->postRAScheduler();
|
|
|
|
}
|
|
|
|
|
2011-05-25 00:57:53 +08:00
|
|
|
int
|
|
|
|
X86RegisterInfo::getSEHRegNum(unsigned i) const {
|
2012-10-05 03:50:43 +08:00
|
|
|
return getEncodingValue(i);
|
2011-05-25 00:57:53 +08:00
|
|
|
}
|
|
|
|
|
2011-10-06 04:26:33 +08:00
|
|
|
const TargetRegisterClass *
|
|
|
|
X86RegisterInfo::getSubClassWithSubReg(const TargetRegisterClass *RC,
|
|
|
|
unsigned Idx) const {
|
|
|
|
// The sub_8bit sub-register index is more constrained in 32-bit mode.
|
|
|
|
// It behaves just like the sub_8bit_hi index.
|
|
|
|
if (!Is64Bit && Idx == X86::sub_8bit)
|
|
|
|
Idx = X86::sub_8bit_hi;
|
|
|
|
|
|
|
|
// Forward to TableGen's default version.
|
|
|
|
return X86GenRegisterInfo::getSubClassWithSubReg(RC, Idx);
|
|
|
|
}
|
|
|
|
|
2009-07-18 10:10:10 +08:00
|
|
|
const TargetRegisterClass *
|
|
|
|
X86RegisterInfo::getMatchingSuperRegClass(const TargetRegisterClass *A,
|
|
|
|
const TargetRegisterClass *B,
|
|
|
|
unsigned SubIdx) const {
|
2011-12-20 00:53:34 +08:00
|
|
|
// The sub_8bit sub-register index is more constrained in 32-bit mode.
|
|
|
|
if (!Is64Bit && SubIdx == X86::sub_8bit) {
|
|
|
|
A = X86GenRegisterInfo::getSubClassWithSubReg(A, X86::sub_8bit_hi);
|
|
|
|
if (!A)
|
|
|
|
return 0;
|
2009-07-18 10:10:10 +08:00
|
|
|
}
|
2011-12-20 00:53:34 +08:00
|
|
|
return X86GenRegisterInfo::getMatchingSuperRegClass(A, B, SubIdx);
|
2009-07-18 10:10:10 +08:00
|
|
|
}
|
|
|
|
|
2011-04-27 02:52:33 +08:00
|
|
|
const TargetRegisterClass*
|
|
|
|
X86RegisterInfo::getLargestLegalSuperClass(const TargetRegisterClass *RC) const{
|
2011-10-09 04:20:03 +08:00
|
|
|
// Don't allow super-classes of GR8_NOREX. This class is only used after
|
|
|
|
// extrating sub_8bit_hi sub-registers. The H sub-registers cannot be copied
|
|
|
|
// to the full GR8 register class in 64-bit mode, so we cannot allow the
|
|
|
|
// reigster class inflation.
|
|
|
|
//
|
|
|
|
// The GR8_NOREX class is always used in a way that won't be constrained to a
|
|
|
|
// sub-class, so sub-classes like GR8_ABCD_L are allowed to expand to the
|
|
|
|
// full GR8 class.
|
2012-04-20 14:31:50 +08:00
|
|
|
if (RC == &X86::GR8_NOREXRegClass)
|
2011-10-09 04:20:03 +08:00
|
|
|
return RC;
|
|
|
|
|
2011-04-27 02:52:33 +08:00
|
|
|
const TargetRegisterClass *Super = RC;
|
2011-10-01 06:19:07 +08:00
|
|
|
TargetRegisterClass::sc_iterator I = RC->getSuperClasses();
|
2011-04-27 02:52:33 +08:00
|
|
|
do {
|
|
|
|
switch (Super->getID()) {
|
|
|
|
case X86::GR8RegClassID:
|
|
|
|
case X86::GR16RegClassID:
|
|
|
|
case X86::GR32RegClassID:
|
|
|
|
case X86::GR64RegClassID:
|
|
|
|
case X86::FR32RegClassID:
|
|
|
|
case X86::FR64RegClassID:
|
|
|
|
case X86::RFP32RegClassID:
|
|
|
|
case X86::RFP64RegClassID:
|
|
|
|
case X86::RFP80RegClassID:
|
|
|
|
case X86::VR128RegClassID:
|
|
|
|
case X86::VR256RegClassID:
|
|
|
|
// Don't return a super-class that would shrink the spill size.
|
|
|
|
// That can happen with the vector and float classes.
|
|
|
|
if (Super->getSize() == RC->getSize())
|
|
|
|
return Super;
|
|
|
|
}
|
|
|
|
Super = *I++;
|
|
|
|
} while (Super);
|
|
|
|
return RC;
|
|
|
|
}
|
|
|
|
|
2009-08-16 19:00:26 +08:00
|
|
|
const TargetRegisterClass *
|
2012-05-08 06:10:26 +08:00
|
|
|
X86RegisterInfo::getPointerRegClass(const MachineFunction &MF, unsigned Kind)
|
|
|
|
const {
|
2009-07-30 09:56:29 +08:00
|
|
|
switch (Kind) {
|
|
|
|
default: llvm_unreachable("Unexpected Kind in getPointerRegClass!");
|
|
|
|
case 0: // Normal GPRs.
|
|
|
|
if (TM.getSubtarget<X86Subtarget>().is64Bit())
|
|
|
|
return &X86::GR64RegClass;
|
|
|
|
return &X86::GR32RegClass;
|
2011-01-26 09:27:58 +08:00
|
|
|
case 1: // Normal GPRs except the stack pointer (for encoding reasons).
|
2009-08-06 01:40:24 +08:00
|
|
|
if (TM.getSubtarget<X86Subtarget>().is64Bit())
|
|
|
|
return &X86::GR64_NOSPRegClass;
|
|
|
|
return &X86::GR32_NOSPRegClass;
|
2011-01-26 10:04:09 +08:00
|
|
|
case 2: // Available for tailcall (not callee-saved GPRs).
|
|
|
|
if (TM.getSubtarget<X86Subtarget>().isTargetWin64())
|
|
|
|
return &X86::GR64_TCW64RegClass;
|
|
|
|
if (TM.getSubtarget<X86Subtarget>().is64Bit())
|
|
|
|
return &X86::GR64_TCRegClass;
|
|
|
|
return &X86::GR32_TCRegClass;
|
2009-07-30 09:56:29 +08:00
|
|
|
}
|
2009-02-07 01:43:24 +08:00
|
|
|
}
|
|
|
|
|
2007-09-27 05:31:07 +08:00
|
|
|
const TargetRegisterClass *
|
|
|
|
X86RegisterInfo::getCrossCopyRegClass(const TargetRegisterClass *RC) const {
|
2008-02-20 20:07:57 +08:00
|
|
|
if (RC == &X86::CCRRegClass) {
|
2007-09-28 05:50:05 +08:00
|
|
|
if (Is64Bit)
|
|
|
|
return &X86::GR64RegClass;
|
|
|
|
else
|
|
|
|
return &X86::GR32RegClass;
|
2008-02-20 20:07:57 +08:00
|
|
|
}
|
2011-03-10 08:16:32 +08:00
|
|
|
return RC;
|
2007-09-27 05:31:07 +08:00
|
|
|
}
|
2007-03-20 16:09:38 +08:00
|
|
|
|
2011-03-08 05:56:36 +08:00
|
|
|
unsigned
|
|
|
|
X86RegisterInfo::getRegPressureLimit(const TargetRegisterClass *RC,
|
|
|
|
MachineFunction &MF) const {
|
|
|
|
const TargetFrameLowering *TFI = MF.getTarget().getFrameLowering();
|
|
|
|
|
|
|
|
unsigned FPDiff = TFI->hasFP(MF) ? 1 : 0;
|
|
|
|
switch (RC->getID()) {
|
|
|
|
default:
|
|
|
|
return 0;
|
|
|
|
case X86::GR32RegClassID:
|
|
|
|
return 4 - FPDiff;
|
|
|
|
case X86::GR64RegClassID:
|
|
|
|
return 12 - FPDiff;
|
|
|
|
case X86::VR128RegClassID:
|
|
|
|
return TM.getSubtarget<X86Subtarget>().is64Bit() ? 10 : 4;
|
|
|
|
case X86::VR64RegClassID:
|
|
|
|
return 4;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-03-04 11:33:22 +08:00
|
|
|
const uint16_t *
|
Change instruction description to split OperandList into OutOperandList and
InOperandList. This gives one piece of important information: # of results
produced by an instruction.
An example of the change:
def ADD32rr : I<0x01, MRMDestReg, (ops GR32:$dst, GR32:$src1, GR32:$src2),
"add{l} {$src2, $dst|$dst, $src2}",
[(set GR32:$dst, (add GR32:$src1, GR32:$src2))]>;
=>
def ADD32rr : I<0x01, MRMDestReg, (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
"add{l} {$src2, $dst|$dst, $src2}",
[(set GR32:$dst, (add GR32:$src1, GR32:$src2))]>;
llvm-svn: 40033
2007-07-19 09:14:50 +08:00
|
|
|
X86RegisterInfo::getCalleeSavedRegs(const MachineFunction *MF) const {
|
2008-09-09 05:12:47 +08:00
|
|
|
bool callsEHReturn = false;
|
2010-03-11 08:22:57 +08:00
|
|
|
bool ghcCall = false;
|
2008-09-09 05:12:47 +08:00
|
|
|
|
|
|
|
if (MF) {
|
2010-04-05 13:57:52 +08:00
|
|
|
callsEHReturn = MF->getMMI().callsEHReturn();
|
2010-03-11 08:22:57 +08:00
|
|
|
const Function *F = MF->getFunction();
|
|
|
|
ghcCall = (F ? F->getCallingConv() == CallingConv::GHC : false);
|
2008-09-09 05:12:47 +08:00
|
|
|
}
|
|
|
|
|
2012-01-18 06:47:01 +08:00
|
|
|
if (ghcCall)
|
2012-05-08 23:07:29 +08:00
|
|
|
return CSR_NoRegs_SaveList;
|
2012-01-18 06:47:01 +08:00
|
|
|
if (Is64Bit) {
|
2008-03-23 05:04:01 +08:00
|
|
|
if (IsWin64)
|
2012-01-18 06:47:01 +08:00
|
|
|
return CSR_Win64_SaveList;
|
|
|
|
if (callsEHReturn)
|
|
|
|
return CSR_64EHRet_SaveList;
|
|
|
|
return CSR_64_SaveList;
|
2007-07-14 22:06:15 +08:00
|
|
|
}
|
2012-01-18 06:47:01 +08:00
|
|
|
if (callsEHReturn)
|
|
|
|
return CSR_32EHRet_SaveList;
|
|
|
|
return CSR_32_SaveList;
|
|
|
|
}
|
|
|
|
|
|
|
|
const uint32_t*
|
|
|
|
X86RegisterInfo::getCallPreservedMask(CallingConv::ID CC) const {
|
|
|
|
if (CC == CallingConv::GHC)
|
2012-05-08 23:07:29 +08:00
|
|
|
return CSR_NoRegs_RegMask;
|
2012-01-18 06:47:01 +08:00
|
|
|
if (!Is64Bit)
|
|
|
|
return CSR_32_RegMask;
|
|
|
|
if (IsWin64)
|
|
|
|
return CSR_Win64_RegMask;
|
|
|
|
return CSR_64_RegMask;
|
2006-05-18 08:12:58 +08:00
|
|
|
}
|
|
|
|
|
2012-10-16 06:39:43 +08:00
|
|
|
const uint32_t*
|
|
|
|
X86RegisterInfo::getNoPreservedMask() const {
|
|
|
|
return CSR_NoRegs_RegMask;
|
|
|
|
}
|
|
|
|
|
2007-02-20 05:49:54 +08:00
|
|
|
BitVector X86RegisterInfo::getReservedRegs(const MachineFunction &MF) const {
|
|
|
|
BitVector Reserved(getNumRegs());
|
2011-01-10 20:39:04 +08:00
|
|
|
const TargetFrameLowering *TFI = MF.getTarget().getFrameLowering();
|
2010-11-19 05:19:35 +08:00
|
|
|
|
2008-12-18 09:05:09 +08:00
|
|
|
// Set the stack-pointer register and its aliases as reserved.
|
2007-02-20 05:49:54 +08:00
|
|
|
Reserved.set(X86::RSP);
|
2012-06-01 08:02:08 +08:00
|
|
|
for (MCSubRegIterator I(X86::RSP, this); I.isValid(); ++I)
|
|
|
|
Reserved.set(*I);
|
2009-08-16 19:00:26 +08:00
|
|
|
|
2009-11-14 05:56:01 +08:00
|
|
|
// Set the instruction pointer register and its aliases as reserved.
|
|
|
|
Reserved.set(X86::RIP);
|
2012-06-01 08:02:08 +08:00
|
|
|
for (MCSubRegIterator I(X86::RIP, this); I.isValid(); ++I)
|
|
|
|
Reserved.set(*I);
|
2009-11-14 05:56:01 +08:00
|
|
|
|
2008-12-18 09:05:09 +08:00
|
|
|
// Set the frame-pointer register and its aliases as reserved if needed.
|
2010-11-19 05:19:35 +08:00
|
|
|
if (TFI->hasFP(MF)) {
|
2007-02-20 05:49:54 +08:00
|
|
|
Reserved.set(X86::RBP);
|
2012-06-01 08:02:08 +08:00
|
|
|
for (MCSubRegIterator I(X86::RBP, this); I.isValid(); ++I)
|
|
|
|
Reserved.set(*I);
|
2007-02-20 05:49:54 +08:00
|
|
|
}
|
2009-08-16 19:00:26 +08:00
|
|
|
|
2012-07-11 01:45:53 +08:00
|
|
|
// Set the base-pointer register and its aliases as reserved if needed.
|
|
|
|
if (hasBasePointer(MF)) {
|
|
|
|
CallingConv::ID CC = MF.getFunction()->getCallingConv();
|
|
|
|
const uint32_t* RegMask = getCallPreservedMask(CC);
|
|
|
|
if (MachineOperand::clobbersPhysReg(RegMask, getBaseRegister()))
|
|
|
|
report_fatal_error(
|
|
|
|
"Stack realignment in presence of dynamic allocas is not supported with"
|
|
|
|
"this calling convention.");
|
|
|
|
|
|
|
|
Reserved.set(getBaseRegister());
|
|
|
|
for (MCSubRegIterator I(getBaseRegister(), this); I.isValid(); ++I)
|
|
|
|
Reserved.set(*I);
|
|
|
|
}
|
|
|
|
|
2011-05-19 06:24:48 +08:00
|
|
|
// Mark the segment registers as reserved.
|
|
|
|
Reserved.set(X86::CS);
|
|
|
|
Reserved.set(X86::SS);
|
|
|
|
Reserved.set(X86::DS);
|
|
|
|
Reserved.set(X86::ES);
|
|
|
|
Reserved.set(X86::FS);
|
|
|
|
Reserved.set(X86::GS);
|
|
|
|
|
2012-05-02 03:50:22 +08:00
|
|
|
// Mark the floating point stack registers as reserved.
|
|
|
|
Reserved.set(X86::ST0);
|
|
|
|
Reserved.set(X86::ST1);
|
|
|
|
Reserved.set(X86::ST2);
|
|
|
|
Reserved.set(X86::ST3);
|
|
|
|
Reserved.set(X86::ST4);
|
|
|
|
Reserved.set(X86::ST5);
|
|
|
|
Reserved.set(X86::ST6);
|
|
|
|
Reserved.set(X86::ST7);
|
|
|
|
|
2011-06-10 00:56:59 +08:00
|
|
|
// Reserve the registers that only exist in 64-bit mode.
|
|
|
|
if (!Is64Bit) {
|
2011-06-18 07:15:00 +08:00
|
|
|
// These 8-bit registers are part of the x86-64 extension even though their
|
|
|
|
// super-registers are old 32-bits.
|
|
|
|
Reserved.set(X86::SIL);
|
|
|
|
Reserved.set(X86::DIL);
|
|
|
|
Reserved.set(X86::BPL);
|
|
|
|
Reserved.set(X86::SPL);
|
|
|
|
|
2011-06-10 00:56:59 +08:00
|
|
|
for (unsigned n = 0; n != 8; ++n) {
|
2011-06-18 07:15:00 +08:00
|
|
|
// R8, R9, ...
|
2012-03-04 18:43:23 +08:00
|
|
|
static const uint16_t GPR64[] = {
|
2011-06-10 00:56:59 +08:00
|
|
|
X86::R8, X86::R9, X86::R10, X86::R11,
|
|
|
|
X86::R12, X86::R13, X86::R14, X86::R15
|
|
|
|
};
|
2012-06-02 07:28:30 +08:00
|
|
|
for (MCRegAliasIterator AI(GPR64[n], this, true); AI.isValid(); ++AI)
|
|
|
|
Reserved.set(*AI);
|
2011-06-10 00:56:59 +08:00
|
|
|
|
|
|
|
// XMM8, XMM9, ...
|
|
|
|
assert(X86::XMM15 == X86::XMM8+7);
|
2012-06-02 07:28:30 +08:00
|
|
|
for (MCRegAliasIterator AI(X86::XMM8 + n, this, true); AI.isValid(); ++AI)
|
|
|
|
Reserved.set(*AI);
|
2011-06-10 00:56:59 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2007-02-20 05:49:54 +08:00
|
|
|
return Reserved;
|
|
|
|
}
|
|
|
|
|
2002-12-29 04:32:28 +08:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
// Stack Frame Processing methods
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
2012-07-11 01:45:53 +08:00
|
|
|
bool X86RegisterInfo::hasBasePointer(const MachineFunction &MF) const {
|
|
|
|
const MachineFrameInfo *MFI = MF.getFrameInfo();
|
|
|
|
|
|
|
|
if (!EnableBasePointer)
|
|
|
|
return false;
|
|
|
|
|
2012-08-02 02:39:17 +08:00
|
|
|
// When we need stack realignment and there are dynamic allocas, we can't
|
2012-07-11 01:45:53 +08:00
|
|
|
// reference off of the stack pointer, so we reserve a base pointer.
|
|
|
|
if (needsStackRealignment(MF) && MFI->hasVarSizedObjects())
|
|
|
|
return true;
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2010-01-20 02:31:11 +08:00
|
|
|
bool X86RegisterInfo::canRealignStack(const MachineFunction &MF) const {
|
|
|
|
const MachineFrameInfo *MFI = MF.getFrameInfo();
|
2012-07-11 01:45:53 +08:00
|
|
|
const MachineRegisterInfo *MRI = &MF.getRegInfo();
|
|
|
|
if (!MF.getTarget().Options.RealignStack)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
// Stack realignment requires a frame pointer. If we already started
|
|
|
|
// register allocation with frame pointer elimination, it is too late now.
|
|
|
|
if (!MRI->canReserveReg(FramePtr))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
// If a base pointer is necessary. Check that it isn't too late to reserve
|
|
|
|
// it.
|
|
|
|
if (MFI->hasVarSizedObjects())
|
|
|
|
return MRI->canReserveReg(BasePtr);
|
|
|
|
return true;
|
2010-01-20 02:31:11 +08:00
|
|
|
}
|
|
|
|
|
2008-04-24 02:15:48 +08:00
|
|
|
bool X86RegisterInfo::needsStackRealignment(const MachineFunction &MF) const {
|
2009-03-19 13:51:39 +08:00
|
|
|
const MachineFrameInfo *MFI = MF.getFrameInfo();
|
2010-02-20 02:17:13 +08:00
|
|
|
const Function *F = MF.getFunction();
|
2011-06-23 09:53:43 +08:00
|
|
|
unsigned StackAlign = TM.getFrameLowering()->getStackAlignment();
|
2012-10-09 15:45:08 +08:00
|
|
|
bool requiresRealignment =
|
|
|
|
((MFI->getMaxAlignment() > StackAlign) ||
|
|
|
|
F->getFnAttributes().hasAttribute(Attributes::StackAlignment));
|
2008-04-24 02:15:48 +08:00
|
|
|
|
2010-08-06 07:57:43 +08:00
|
|
|
// If we've requested that we force align the stack do so now.
|
|
|
|
if (ForceStackAlign)
|
|
|
|
return canRealignStack(MF);
|
2011-01-26 09:28:06 +08:00
|
|
|
|
2010-07-17 08:25:41 +08:00
|
|
|
return requiresRealignment && canRealignStack(MF);
|
2008-04-24 02:15:48 +08:00
|
|
|
}
|
|
|
|
|
2010-07-20 14:52:21 +08:00
|
|
|
bool X86RegisterInfo::hasReservedSpillSlot(const MachineFunction &MF,
|
|
|
|
unsigned Reg, int &FrameIdx) const {
|
2011-01-10 20:39:04 +08:00
|
|
|
const TargetFrameLowering *TFI = MF.getTarget().getFrameLowering();
|
2010-11-19 05:19:35 +08:00
|
|
|
|
|
|
|
if (Reg == FramePtr && TFI->hasFP(MF)) {
|
2009-07-09 14:53:48 +08:00
|
|
|
FrameIdx = MF.getFrameInfo()->getObjectIndexBegin();
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2010-05-19 08:53:19 +08:00
|
|
|
static unsigned getSUBriOpcode(unsigned is64Bit, int64_t Imm) {
|
|
|
|
if (is64Bit) {
|
|
|
|
if (isInt<8>(Imm))
|
|
|
|
return X86::SUB64ri8;
|
|
|
|
return X86::SUB64ri32;
|
|
|
|
} else {
|
|
|
|
if (isInt<8>(Imm))
|
|
|
|
return X86::SUB32ri8;
|
|
|
|
return X86::SUB32ri;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static unsigned getADDriOpcode(unsigned is64Bit, int64_t Imm) {
|
|
|
|
if (is64Bit) {
|
|
|
|
if (isInt<8>(Imm))
|
|
|
|
return X86::ADD64ri8;
|
|
|
|
return X86::ADD64ri32;
|
|
|
|
} else {
|
|
|
|
if (isInt<8>(Imm))
|
|
|
|
return X86::ADD32ri8;
|
|
|
|
return X86::ADD32ri;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2004-02-15 03:49:54 +08:00
|
|
|
void X86RegisterInfo::
|
|
|
|
eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB,
|
|
|
|
MachineBasicBlock::iterator I) const {
|
2011-01-10 20:39:04 +08:00
|
|
|
const TargetFrameLowering *TFI = MF.getTarget().getFrameLowering();
|
2010-12-24 07:54:17 +08:00
|
|
|
bool reseveCallFrame = TFI->hasReservedCallFrame(MF);
|
|
|
|
int Opcode = I->getOpcode();
|
2011-06-29 05:14:33 +08:00
|
|
|
bool isDestroy = Opcode == TII.getCallFrameDestroyOpcode();
|
2010-12-24 07:54:17 +08:00
|
|
|
DebugLoc DL = I->getDebugLoc();
|
|
|
|
uint64_t Amount = !reseveCallFrame ? I->getOperand(0).getImm() : 0;
|
|
|
|
uint64_t CalleeAmt = isDestroy ? I->getOperand(1).getImm() : 0;
|
|
|
|
I = MBB.erase(I);
|
|
|
|
|
|
|
|
if (!reseveCallFrame) {
|
2007-07-19 08:42:05 +08:00
|
|
|
// If the stack pointer can be changed after prologue, turn the
|
|
|
|
// adjcallstackup instruction into a 'sub ESP, <amt>' and the
|
|
|
|
// adjcallstackdown instruction into 'add ESP, <amt>'
|
|
|
|
// TODO: consider using push / pop instead of sub + store / add
|
2010-12-24 07:54:17 +08:00
|
|
|
if (Amount == 0)
|
|
|
|
return;
|
|
|
|
|
|
|
|
// We need to keep the stack aligned properly. To do this, we round the
|
|
|
|
// amount of space needed for the outgoing arguments up to the next
|
|
|
|
// alignment boundary.
|
2011-06-23 09:53:43 +08:00
|
|
|
unsigned StackAlign = TM.getFrameLowering()->getStackAlignment();
|
2010-12-24 07:54:17 +08:00
|
|
|
Amount = (Amount + StackAlign - 1) / StackAlign * StackAlign;
|
|
|
|
|
|
|
|
MachineInstr *New = 0;
|
2011-06-29 05:14:33 +08:00
|
|
|
if (Opcode == TII.getCallFrameSetupOpcode()) {
|
2010-12-24 07:54:17 +08:00
|
|
|
New = BuildMI(MF, DL, TII.get(getSUBriOpcode(Is64Bit, Amount)),
|
|
|
|
StackPtr)
|
|
|
|
.addReg(StackPtr)
|
|
|
|
.addImm(Amount);
|
|
|
|
} else {
|
2011-06-29 05:14:33 +08:00
|
|
|
assert(Opcode == TII.getCallFrameDestroyOpcode());
|
2010-12-24 07:54:17 +08:00
|
|
|
|
|
|
|
// Factor out the amount the callee already popped.
|
|
|
|
Amount -= CalleeAmt;
|
2011-01-26 09:28:06 +08:00
|
|
|
|
2009-08-16 19:00:26 +08:00
|
|
|
if (Amount) {
|
2010-12-24 07:54:17 +08:00
|
|
|
unsigned Opc = getADDriOpcode(Is64Bit, Amount);
|
|
|
|
New = BuildMI(MF, DL, TII.get(Opc), StackPtr)
|
|
|
|
.addReg(StackPtr).addImm(Amount);
|
2009-02-12 03:50:24 +08:00
|
|
|
}
|
2005-05-14 05:44:04 +08:00
|
|
|
}
|
2009-08-16 19:00:26 +08:00
|
|
|
|
2010-12-24 07:54:17 +08:00
|
|
|
if (New) {
|
2008-12-19 06:03:42 +08:00
|
|
|
// The EFLAGS implicit def is dead.
|
|
|
|
New->getOperand(3).setIsDead();
|
2010-12-24 07:54:17 +08:00
|
|
|
|
|
|
|
// Replace the pseudo instruction with a new instruction.
|
2004-02-15 03:49:54 +08:00
|
|
|
MBB.insert(I, New);
|
2002-12-29 04:32:28 +08:00
|
|
|
}
|
2010-12-24 07:54:17 +08:00
|
|
|
|
|
|
|
return;
|
2002-12-29 04:32:28 +08:00
|
|
|
}
|
|
|
|
|
2011-06-29 05:14:33 +08:00
|
|
|
if (Opcode == TII.getCallFrameDestroyOpcode() && CalleeAmt) {
|
2010-12-24 07:54:17 +08:00
|
|
|
// If we are performing frame pointer elimination and if the callee pops
|
|
|
|
// something off the stack pointer, add it back. We do this until we have
|
|
|
|
// more advanced stack pointer tracking ability.
|
|
|
|
unsigned Opc = getSUBriOpcode(Is64Bit, CalleeAmt);
|
|
|
|
MachineInstr *New = BuildMI(MF, DL, TII.get(Opc), StackPtr)
|
|
|
|
.addReg(StackPtr).addImm(CalleeAmt);
|
|
|
|
|
|
|
|
// The EFLAGS implicit def is dead.
|
|
|
|
New->getOperand(3).setIsDead();
|
2011-06-30 07:11:39 +08:00
|
|
|
|
|
|
|
// We are not tracking the stack pointer adjustment by the callee, so make
|
|
|
|
// sure we restore the stack pointer immediately after the call, there may
|
|
|
|
// be spill code inserted between the CALL and ADJCALLSTACKUP instructions.
|
|
|
|
MachineBasicBlock::iterator B = MBB.begin();
|
2011-12-07 15:15:52 +08:00
|
|
|
while (I != B && !llvm::prior(I)->isCall())
|
2011-06-30 07:11:39 +08:00
|
|
|
--I;
|
2010-12-24 07:54:17 +08:00
|
|
|
MBB.insert(I, New);
|
|
|
|
}
|
2002-12-04 07:11:21 +08:00
|
|
|
}
|
|
|
|
|
2010-08-27 07:32:16 +08:00
|
|
|
void
|
2009-10-08 01:12:56 +08:00
|
|
|
X86RegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II,
|
2012-09-10 07:40:55 +08:00
|
|
|
int SPAdj, RegScavenger *RS) const {
|
2007-05-01 17:13:03 +08:00
|
|
|
assert(SPAdj == 0 && "Unexpected");
|
|
|
|
|
2003-01-13 08:50:33 +08:00
|
|
|
unsigned i = 0;
|
2004-02-12 10:27:10 +08:00
|
|
|
MachineInstr &MI = *II;
|
2004-08-15 06:05:10 +08:00
|
|
|
MachineFunction &MF = *MI.getParent()->getParent();
|
2011-01-10 20:39:04 +08:00
|
|
|
const TargetFrameLowering *TFI = MF.getTarget().getFrameLowering();
|
2009-08-16 19:00:26 +08:00
|
|
|
|
2008-10-03 23:45:36 +08:00
|
|
|
while (!MI.getOperand(i).isFI()) {
|
2002-12-29 04:32:28 +08:00
|
|
|
++i;
|
|
|
|
assert(i < MI.getNumOperands() && "Instr doesn't have FrameIndex operand!");
|
|
|
|
}
|
|
|
|
|
2007-12-31 07:10:15 +08:00
|
|
|
int FrameIndex = MI.getOperand(i).getIndex();
|
2008-04-24 02:21:02 +08:00
|
|
|
unsigned BasePtr;
|
2009-08-16 19:00:26 +08:00
|
|
|
|
2010-04-29 13:08:22 +08:00
|
|
|
unsigned Opc = MI.getOpcode();
|
|
|
|
bool AfterFPPop = Opc == X86::TAILJMPm64 || Opc == X86::TAILJMPm;
|
2012-07-11 01:45:53 +08:00
|
|
|
if (hasBasePointer(MF))
|
|
|
|
BasePtr = (FrameIndex < 0 ? FramePtr : getBaseRegister());
|
|
|
|
else if (needsStackRealignment(MF))
|
2008-04-24 02:21:02 +08:00
|
|
|
BasePtr = (FrameIndex < 0 ? FramePtr : StackPtr);
|
2010-04-29 13:08:22 +08:00
|
|
|
else if (AfterFPPop)
|
|
|
|
BasePtr = StackPtr;
|
2008-04-24 02:21:02 +08:00
|
|
|
else
|
2010-11-19 05:19:35 +08:00
|
|
|
BasePtr = (TFI->hasFP(MF) ? FramePtr : StackPtr);
|
2008-04-24 02:21:02 +08:00
|
|
|
|
2002-12-29 04:32:28 +08:00
|
|
|
// This must be part of a four operand memory reference. Replace the
|
2006-09-08 14:48:29 +08:00
|
|
|
// FrameIndex with base register with EBP. Add an offset to the offset.
|
2008-04-24 02:21:02 +08:00
|
|
|
MI.getOperand(i).ChangeToRegister(BasePtr, false);
|
2002-12-04 07:11:21 +08:00
|
|
|
|
2008-12-24 08:27:51 +08:00
|
|
|
// Now add the frame object offset to the offset from EBP.
|
2010-04-29 13:08:22 +08:00
|
|
|
int FIOffset;
|
|
|
|
if (AfterFPPop) {
|
|
|
|
// Tail call jmp happens after FP is popped.
|
|
|
|
const MachineFrameInfo *MFI = MF.getFrameInfo();
|
2010-11-19 05:19:35 +08:00
|
|
|
FIOffset = MFI->getObjectOffset(FrameIndex) - TFI->getOffsetOfLocalArea();
|
2010-04-29 13:08:22 +08:00
|
|
|
} else
|
2010-11-20 23:59:32 +08:00
|
|
|
FIOffset = TFI->getFrameIndexOffset(MF, FrameIndex);
|
2010-04-29 13:08:22 +08:00
|
|
|
|
2008-12-24 08:27:51 +08:00
|
|
|
if (MI.getOperand(i+3).isImm()) {
|
|
|
|
// Offset is a 32-bit integer.
|
2011-07-13 08:44:29 +08:00
|
|
|
int Imm = (int)(MI.getOperand(i + 3).getImm());
|
|
|
|
int Offset = FIOffset + Imm;
|
2011-07-14 08:22:31 +08:00
|
|
|
assert((!Is64Bit || isInt<32>((long long)FIOffset + Imm)) &&
|
|
|
|
"Requesting 64-bit offset in 32-bit immediate!");
|
2009-11-13 04:49:22 +08:00
|
|
|
MI.getOperand(i + 3).ChangeToImmediate(Offset);
|
2008-12-24 08:27:51 +08:00
|
|
|
} else {
|
|
|
|
// Offset is symbolic. This is extremely rare.
|
2010-04-29 13:08:22 +08:00
|
|
|
uint64_t Offset = FIOffset + (uint64_t)MI.getOperand(i+3).getOffset();
|
2008-12-24 08:27:51 +08:00
|
|
|
MI.getOperand(i+3).setOffset(Offset);
|
|
|
|
}
|
2002-12-04 07:11:21 +08:00
|
|
|
}
|
2002-12-05 07:57:03 +08:00
|
|
|
|
2009-11-13 04:49:22 +08:00
|
|
|
unsigned X86RegisterInfo::getFrameRegister(const MachineFunction &MF) const {
|
2011-01-10 20:39:04 +08:00
|
|
|
const TargetFrameLowering *TFI = MF.getTarget().getFrameLowering();
|
2010-11-19 05:19:35 +08:00
|
|
|
return TFI->hasFP(MF) ? FramePtr : StackPtr;
|
2006-03-24 02:12:57 +08:00
|
|
|
}
|
|
|
|
|
2007-02-22 06:54:50 +08:00
|
|
|
unsigned X86RegisterInfo::getEHExceptionRegister() const {
|
2009-07-15 00:55:14 +08:00
|
|
|
llvm_unreachable("What is the exception register");
|
2007-02-22 06:54:50 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
unsigned X86RegisterInfo::getEHHandlerRegister() const {
|
2009-07-15 00:55:14 +08:00
|
|
|
llvm_unreachable("What is the exception handler register");
|
2007-02-22 06:54:50 +08:00
|
|
|
}
|
|
|
|
|
2006-05-05 13:40:20 +08:00
|
|
|
namespace llvm {
|
2012-10-01 03:49:56 +08:00
|
|
|
unsigned getX86SubSuperRegister(unsigned Reg, MVT::SimpleValueType VT,
|
|
|
|
bool High) {
|
|
|
|
switch (VT) {
|
|
|
|
default: llvm_unreachable("Unexpected VT");
|
2009-08-12 04:47:22 +08:00
|
|
|
case MVT::i8:
|
2006-05-05 13:40:20 +08:00
|
|
|
if (High) {
|
|
|
|
switch (Reg) {
|
2011-12-01 16:12:41 +08:00
|
|
|
default: return getX86SubSuperRegister(Reg, MVT::i64, High);
|
2006-09-08 14:48:29 +08:00
|
|
|
case X86::AH: case X86::AL: case X86::AX: case X86::EAX: case X86::RAX:
|
2006-05-05 13:40:20 +08:00
|
|
|
return X86::AH;
|
2006-09-08 14:48:29 +08:00
|
|
|
case X86::DH: case X86::DL: case X86::DX: case X86::EDX: case X86::RDX:
|
2006-05-05 13:40:20 +08:00
|
|
|
return X86::DH;
|
2006-09-08 14:48:29 +08:00
|
|
|
case X86::CH: case X86::CL: case X86::CX: case X86::ECX: case X86::RCX:
|
2006-05-05 13:40:20 +08:00
|
|
|
return X86::CH;
|
2006-09-08 14:48:29 +08:00
|
|
|
case X86::BH: case X86::BL: case X86::BX: case X86::EBX: case X86::RBX:
|
2006-05-05 13:40:20 +08:00
|
|
|
return X86::BH;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
switch (Reg) {
|
2012-10-01 03:49:56 +08:00
|
|
|
default: llvm_unreachable("Unexpected register");
|
2006-09-08 14:48:29 +08:00
|
|
|
case X86::AH: case X86::AL: case X86::AX: case X86::EAX: case X86::RAX:
|
2006-05-05 13:40:20 +08:00
|
|
|
return X86::AL;
|
2006-09-08 14:48:29 +08:00
|
|
|
case X86::DH: case X86::DL: case X86::DX: case X86::EDX: case X86::RDX:
|
2006-05-05 13:40:20 +08:00
|
|
|
return X86::DL;
|
2006-09-08 14:48:29 +08:00
|
|
|
case X86::CH: case X86::CL: case X86::CX: case X86::ECX: case X86::RCX:
|
2006-05-05 13:40:20 +08:00
|
|
|
return X86::CL;
|
2006-09-08 14:48:29 +08:00
|
|
|
case X86::BH: case X86::BL: case X86::BX: case X86::EBX: case X86::RBX:
|
2006-05-05 13:40:20 +08:00
|
|
|
return X86::BL;
|
2006-09-08 14:48:29 +08:00
|
|
|
case X86::SIL: case X86::SI: case X86::ESI: case X86::RSI:
|
|
|
|
return X86::SIL;
|
|
|
|
case X86::DIL: case X86::DI: case X86::EDI: case X86::RDI:
|
|
|
|
return X86::DIL;
|
|
|
|
case X86::BPL: case X86::BP: case X86::EBP: case X86::RBP:
|
|
|
|
return X86::BPL;
|
|
|
|
case X86::SPL: case X86::SP: case X86::ESP: case X86::RSP:
|
|
|
|
return X86::SPL;
|
|
|
|
case X86::R8B: case X86::R8W: case X86::R8D: case X86::R8:
|
|
|
|
return X86::R8B;
|
|
|
|
case X86::R9B: case X86::R9W: case X86::R9D: case X86::R9:
|
|
|
|
return X86::R9B;
|
|
|
|
case X86::R10B: case X86::R10W: case X86::R10D: case X86::R10:
|
|
|
|
return X86::R10B;
|
|
|
|
case X86::R11B: case X86::R11W: case X86::R11D: case X86::R11:
|
|
|
|
return X86::R11B;
|
|
|
|
case X86::R12B: case X86::R12W: case X86::R12D: case X86::R12:
|
|
|
|
return X86::R12B;
|
|
|
|
case X86::R13B: case X86::R13W: case X86::R13D: case X86::R13:
|
|
|
|
return X86::R13B;
|
|
|
|
case X86::R14B: case X86::R14W: case X86::R14D: case X86::R14:
|
|
|
|
return X86::R14B;
|
|
|
|
case X86::R15B: case X86::R15W: case X86::R15D: case X86::R15:
|
|
|
|
return X86::R15B;
|
2006-05-05 13:40:20 +08:00
|
|
|
}
|
|
|
|
}
|
2009-08-12 04:47:22 +08:00
|
|
|
case MVT::i16:
|
2006-05-05 13:40:20 +08:00
|
|
|
switch (Reg) {
|
2012-10-01 03:49:56 +08:00
|
|
|
default: llvm_unreachable("Unexpected register");
|
2006-09-08 14:48:29 +08:00
|
|
|
case X86::AH: case X86::AL: case X86::AX: case X86::EAX: case X86::RAX:
|
2006-05-05 13:40:20 +08:00
|
|
|
return X86::AX;
|
2006-09-08 14:48:29 +08:00
|
|
|
case X86::DH: case X86::DL: case X86::DX: case X86::EDX: case X86::RDX:
|
2006-05-05 13:40:20 +08:00
|
|
|
return X86::DX;
|
2006-09-08 14:48:29 +08:00
|
|
|
case X86::CH: case X86::CL: case X86::CX: case X86::ECX: case X86::RCX:
|
2006-05-05 13:40:20 +08:00
|
|
|
return X86::CX;
|
2006-09-08 14:48:29 +08:00
|
|
|
case X86::BH: case X86::BL: case X86::BX: case X86::EBX: case X86::RBX:
|
2006-05-05 13:40:20 +08:00
|
|
|
return X86::BX;
|
2006-09-08 14:48:29 +08:00
|
|
|
case X86::SIL: case X86::SI: case X86::ESI: case X86::RSI:
|
2006-05-05 13:40:20 +08:00
|
|
|
return X86::SI;
|
2006-09-08 14:48:29 +08:00
|
|
|
case X86::DIL: case X86::DI: case X86::EDI: case X86::RDI:
|
2006-05-05 13:40:20 +08:00
|
|
|
return X86::DI;
|
2006-09-08 14:48:29 +08:00
|
|
|
case X86::BPL: case X86::BP: case X86::EBP: case X86::RBP:
|
2006-05-05 13:40:20 +08:00
|
|
|
return X86::BP;
|
2006-09-08 14:48:29 +08:00
|
|
|
case X86::SPL: case X86::SP: case X86::ESP: case X86::RSP:
|
2006-05-05 13:40:20 +08:00
|
|
|
return X86::SP;
|
2006-09-08 14:48:29 +08:00
|
|
|
case X86::R8B: case X86::R8W: case X86::R8D: case X86::R8:
|
|
|
|
return X86::R8W;
|
|
|
|
case X86::R9B: case X86::R9W: case X86::R9D: case X86::R9:
|
|
|
|
return X86::R9W;
|
|
|
|
case X86::R10B: case X86::R10W: case X86::R10D: case X86::R10:
|
|
|
|
return X86::R10W;
|
|
|
|
case X86::R11B: case X86::R11W: case X86::R11D: case X86::R11:
|
|
|
|
return X86::R11W;
|
|
|
|
case X86::R12B: case X86::R12W: case X86::R12D: case X86::R12:
|
|
|
|
return X86::R12W;
|
|
|
|
case X86::R13B: case X86::R13W: case X86::R13D: case X86::R13:
|
|
|
|
return X86::R13W;
|
|
|
|
case X86::R14B: case X86::R14W: case X86::R14D: case X86::R14:
|
|
|
|
return X86::R14W;
|
|
|
|
case X86::R15B: case X86::R15W: case X86::R15D: case X86::R15:
|
|
|
|
return X86::R15W;
|
2006-05-05 13:40:20 +08:00
|
|
|
}
|
2009-08-12 04:47:22 +08:00
|
|
|
case MVT::i32:
|
2006-05-05 13:40:20 +08:00
|
|
|
switch (Reg) {
|
2012-10-01 03:49:56 +08:00
|
|
|
default: llvm_unreachable("Unexpected register");
|
2006-09-08 14:48:29 +08:00
|
|
|
case X86::AH: case X86::AL: case X86::AX: case X86::EAX: case X86::RAX:
|
2006-05-05 13:40:20 +08:00
|
|
|
return X86::EAX;
|
2006-09-08 14:48:29 +08:00
|
|
|
case X86::DH: case X86::DL: case X86::DX: case X86::EDX: case X86::RDX:
|
2006-05-05 13:40:20 +08:00
|
|
|
return X86::EDX;
|
2006-09-08 14:48:29 +08:00
|
|
|
case X86::CH: case X86::CL: case X86::CX: case X86::ECX: case X86::RCX:
|
2006-05-05 13:40:20 +08:00
|
|
|
return X86::ECX;
|
2006-09-08 14:48:29 +08:00
|
|
|
case X86::BH: case X86::BL: case X86::BX: case X86::EBX: case X86::RBX:
|
2006-05-05 13:40:20 +08:00
|
|
|
return X86::EBX;
|
2006-09-08 14:48:29 +08:00
|
|
|
case X86::SIL: case X86::SI: case X86::ESI: case X86::RSI:
|
2006-05-05 13:40:20 +08:00
|
|
|
return X86::ESI;
|
2006-09-08 14:48:29 +08:00
|
|
|
case X86::DIL: case X86::DI: case X86::EDI: case X86::RDI:
|
2006-05-05 13:40:20 +08:00
|
|
|
return X86::EDI;
|
2006-09-08 14:48:29 +08:00
|
|
|
case X86::BPL: case X86::BP: case X86::EBP: case X86::RBP:
|
2006-05-05 13:40:20 +08:00
|
|
|
return X86::EBP;
|
2006-09-08 14:48:29 +08:00
|
|
|
case X86::SPL: case X86::SP: case X86::ESP: case X86::RSP:
|
2006-05-05 13:40:20 +08:00
|
|
|
return X86::ESP;
|
2006-09-08 14:48:29 +08:00
|
|
|
case X86::R8B: case X86::R8W: case X86::R8D: case X86::R8:
|
|
|
|
return X86::R8D;
|
|
|
|
case X86::R9B: case X86::R9W: case X86::R9D: case X86::R9:
|
|
|
|
return X86::R9D;
|
|
|
|
case X86::R10B: case X86::R10W: case X86::R10D: case X86::R10:
|
|
|
|
return X86::R10D;
|
|
|
|
case X86::R11B: case X86::R11W: case X86::R11D: case X86::R11:
|
|
|
|
return X86::R11D;
|
|
|
|
case X86::R12B: case X86::R12W: case X86::R12D: case X86::R12:
|
|
|
|
return X86::R12D;
|
|
|
|
case X86::R13B: case X86::R13W: case X86::R13D: case X86::R13:
|
|
|
|
return X86::R13D;
|
|
|
|
case X86::R14B: case X86::R14W: case X86::R14D: case X86::R14:
|
|
|
|
return X86::R14D;
|
|
|
|
case X86::R15B: case X86::R15W: case X86::R15D: case X86::R15:
|
|
|
|
return X86::R15D;
|
|
|
|
}
|
2009-08-12 04:47:22 +08:00
|
|
|
case MVT::i64:
|
2011-12-01 16:12:41 +08:00
|
|
|
// For 64-bit mode if we've requested a "high" register and the
|
|
|
|
// Q or r constraints we want one of these high registers or
|
|
|
|
// just the register name otherwise.
|
|
|
|
if (High) {
|
|
|
|
switch (Reg) {
|
|
|
|
case X86::SIL: case X86::SI: case X86::ESI: case X86::RSI:
|
|
|
|
return X86::SI;
|
|
|
|
case X86::DIL: case X86::DI: case X86::EDI: case X86::RDI:
|
|
|
|
return X86::DI;
|
|
|
|
case X86::BPL: case X86::BP: case X86::EBP: case X86::RBP:
|
|
|
|
return X86::BP;
|
|
|
|
case X86::SPL: case X86::SP: case X86::ESP: case X86::RSP:
|
|
|
|
return X86::SP;
|
|
|
|
// Fallthrough.
|
|
|
|
}
|
|
|
|
}
|
2006-09-08 14:48:29 +08:00
|
|
|
switch (Reg) {
|
2012-10-01 03:49:56 +08:00
|
|
|
default: llvm_unreachable("Unexpected register");
|
2006-09-08 14:48:29 +08:00
|
|
|
case X86::AH: case X86::AL: case X86::AX: case X86::EAX: case X86::RAX:
|
|
|
|
return X86::RAX;
|
|
|
|
case X86::DH: case X86::DL: case X86::DX: case X86::EDX: case X86::RDX:
|
|
|
|
return X86::RDX;
|
|
|
|
case X86::CH: case X86::CL: case X86::CX: case X86::ECX: case X86::RCX:
|
|
|
|
return X86::RCX;
|
|
|
|
case X86::BH: case X86::BL: case X86::BX: case X86::EBX: case X86::RBX:
|
|
|
|
return X86::RBX;
|
|
|
|
case X86::SIL: case X86::SI: case X86::ESI: case X86::RSI:
|
|
|
|
return X86::RSI;
|
|
|
|
case X86::DIL: case X86::DI: case X86::EDI: case X86::RDI:
|
|
|
|
return X86::RDI;
|
|
|
|
case X86::BPL: case X86::BP: case X86::EBP: case X86::RBP:
|
|
|
|
return X86::RBP;
|
|
|
|
case X86::SPL: case X86::SP: case X86::ESP: case X86::RSP:
|
|
|
|
return X86::RSP;
|
|
|
|
case X86::R8B: case X86::R8W: case X86::R8D: case X86::R8:
|
|
|
|
return X86::R8;
|
|
|
|
case X86::R9B: case X86::R9W: case X86::R9D: case X86::R9:
|
|
|
|
return X86::R9;
|
|
|
|
case X86::R10B: case X86::R10W: case X86::R10D: case X86::R10:
|
|
|
|
return X86::R10;
|
|
|
|
case X86::R11B: case X86::R11W: case X86::R11D: case X86::R11:
|
|
|
|
return X86::R11;
|
|
|
|
case X86::R12B: case X86::R12W: case X86::R12D: case X86::R12:
|
|
|
|
return X86::R12;
|
|
|
|
case X86::R13B: case X86::R13W: case X86::R13D: case X86::R13:
|
|
|
|
return X86::R13;
|
|
|
|
case X86::R14B: case X86::R14W: case X86::R14D: case X86::R14:
|
|
|
|
return X86::R14;
|
|
|
|
case X86::R15B: case X86::R15W: case X86::R15D: case X86::R15:
|
|
|
|
return X86::R15;
|
2006-05-05 13:40:20 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2010-04-07 04:26:37 +08:00
|
|
|
namespace {
|
|
|
|
struct MSAH : public MachineFunctionPass {
|
|
|
|
static char ID;
|
2010-08-07 02:33:48 +08:00
|
|
|
MSAH() : MachineFunctionPass(ID) {}
|
2010-04-07 04:26:37 +08:00
|
|
|
|
|
|
|
virtual bool runOnMachineFunction(MachineFunction &MF) {
|
|
|
|
const X86TargetMachine *TM =
|
|
|
|
static_cast<const X86TargetMachine *>(&MF.getTarget());
|
2011-06-23 09:53:43 +08:00
|
|
|
const TargetFrameLowering *TFI = TM->getFrameLowering();
|
2010-04-07 04:26:37 +08:00
|
|
|
MachineRegisterInfo &RI = MF.getRegInfo();
|
|
|
|
X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>();
|
2011-06-23 09:53:43 +08:00
|
|
|
unsigned StackAlignment = TFI->getStackAlignment();
|
2010-04-07 04:26:37 +08:00
|
|
|
|
|
|
|
// Be over-conservative: scan over all vreg defs and find whether vector
|
|
|
|
// registers are used. If yes, there is a possibility that vector register
|
|
|
|
// will be spilled and thus require dynamic stack realignment.
|
2011-01-09 07:11:11 +08:00
|
|
|
for (unsigned i = 0, e = RI.getNumVirtRegs(); i != e; ++i) {
|
|
|
|
unsigned Reg = TargetRegisterInfo::index2VirtReg(i);
|
|
|
|
if (RI.getRegClass(Reg)->getAlignment() > StackAlignment) {
|
Fix PR10884.
This PR basically reports a problem where a crash in generated code
happened due to %rbp being clobbered:
pushq %rbp
movq %rsp, %rbp
....
vmovmskps %ymm12, %ebp
....
movq %rbp, %rsp
popq %rbp
ret
Since Eric's r123367 commit, the default stack alignment for x86 32-bit
has changed to be 16-bytes. Since then, the MaxStackAlignmentHeuristicPass
hasn't been really used, but with AVX it becomes useful again, since per
ABI compliance we don't always align the stack to 256-bit, but only when
there are 256-bit incoming arguments.
ReserveFP was only used by this pass, but there's no RA target hook that
uses getReserveFP() to check for the presence of FP (since nothing was
triggering the pass to run, the uses of getReserveFP() were removed
through time without being noticed). Change this pass to use
setForceFramePointer, which is properly called by MachineFunction
hasFP method.
The testcase is very big and dependent on RA, not sure if it's worth
adding to test/CodeGen/X86.
llvm-svn: 139939
2011-09-17 04:58:28 +08:00
|
|
|
FuncInfo->setForceFramePointer(true);
|
2010-04-07 04:26:37 +08:00
|
|
|
return true;
|
|
|
|
}
|
2011-01-09 07:11:11 +08:00
|
|
|
}
|
2010-04-07 04:26:37 +08:00
|
|
|
// Nothing to do
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
virtual const char *getPassName() const {
|
|
|
|
return "X86 Maximal Stack Alignment Check";
|
|
|
|
}
|
|
|
|
|
|
|
|
virtual void getAnalysisUsage(AnalysisUsage &AU) const {
|
|
|
|
AU.setPreservesCFG();
|
|
|
|
MachineFunctionPass::getAnalysisUsage(AU);
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
char MSAH::ID = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
FunctionPass*
|
|
|
|
llvm::createX86MaxStackAlignmentHeuristicPass() { return new MSAH(); }
|