Reimplement rip-relative addressing in the X86-64 backend. The new

implementation primarily differs from the former in that the asmprinter
doesn't make a zillion decisions about whether or not something will be
RIP relative or not.  Instead, those decisions are made by isel lowering
and propagated through to the asm printer.  To achieve this, we:

1. Represent RIP relative addresses by setting the base of the X86 addr
   mode to X86::RIP.
2. When ISel Lowering decides that it is safe to use RIP, it lowers to
   X86ISD::WrapperRIP.  When it is unsafe to use RIP, it lowers to
   X86ISD::Wrapper as before.
3. This removes isRIPRel from X86ISelAddressMode, representing it with
   a basereg of RIP instead.
4. The addressing mode matching logic in isel is greatly simplified.
5. The asmprinter is greatly simplified, notably the "NotRIPRel" predicate
   passed through various printoperand routines is gone now.
6. The various symbol printing routines in asmprinter now no longer infer
   when to emit (%rip), they just print the symbol.

I think this is a big improvement over the previous situation.  It does have
two small caveats though: 1. I implemented a horrible "no-rip" modifier for
the inline asm "P" constraint modifier.  This is a short term hack, there is
a much better, but more involved, solution.  2. I had to xfail an 
-aggressive-remat testcase because it isn't handling the use of RIP in the
constant-pool reading instruction.  This specific test is easy to fix without
-aggressive-remat, which I intend to do next.

llvm-svn: 74372
This commit is contained in:
Chris Lattner 2009-06-27 04:16:01 +00:00
parent df92e147c9
commit fea81da433
11 changed files with 251 additions and 166 deletions

View File

@ -425,7 +425,7 @@ void X86ATTAsmPrinter::print_pcrel_imm(const MachineInstr *MI, unsigned OpNo) {
} }
void X86ATTAsmPrinter::printOperand(const MachineInstr *MI, unsigned OpNo, void X86ATTAsmPrinter::printOperand(const MachineInstr *MI, unsigned OpNo,
const char *Modifier, bool NotRIPRel) { const char *Modifier) {
const MachineOperand &MO = MI->getOperand(OpNo); const MachineOperand &MO = MI->getOperand(OpNo);
switch (MO.getType()) { switch (MO.getType()) {
case MachineOperand::MO_Register: { case MachineOperand::MO_Register: {
@ -476,8 +476,6 @@ void X86ATTAsmPrinter::printOperand(const MachineInstr *MI, unsigned OpNo,
break; break;
} }
if (isMemOp && Subtarget->isPICStyleRIPRel() && !NotRIPRel)
O << "(%rip)";
return; return;
} }
case MachineOperand::MO_ConstantPoolIndex: { case MachineOperand::MO_ConstantPoolIndex: {
@ -509,8 +507,6 @@ void X86ATTAsmPrinter::printOperand(const MachineInstr *MI, unsigned OpNo,
break; break;
} }
if (isMemOp && Subtarget->isPICStyleRIPRel() && !NotRIPRel)
O << "(%rip)";
return; return;
} }
case MachineOperand::MO_GlobalAddress: { case MachineOperand::MO_GlobalAddress: {
@ -580,8 +576,6 @@ void X86ATTAsmPrinter::printOperand(const MachineInstr *MI, unsigned OpNo,
if (needCloseParen) if (needCloseParen)
O << ')'; O << ')';
bool isRIPRelative = false;
switch (MO.getTargetFlags()) { switch (MO.getTargetFlags()) {
default: default:
assert(0 && "Unknown target flag on GV operand"); assert(0 && "Unknown target flag on GV operand");
@ -595,8 +589,6 @@ void X86ATTAsmPrinter::printOperand(const MachineInstr *MI, unsigned OpNo,
break; break;
case X86II::MO_GOTTPOFF: case X86II::MO_GOTTPOFF:
O << "@GOTTPOFF"; O << "@GOTTPOFF";
assert(!NotRIPRel);
isRIPRelative = true;
break; break;
case X86II::MO_INDNTPOFF: case X86II::MO_INDNTPOFF:
O << "@INDNTPOFF"; O << "@INDNTPOFF";
@ -607,6 +599,9 @@ void X86ATTAsmPrinter::printOperand(const MachineInstr *MI, unsigned OpNo,
case X86II::MO_NTPOFF: case X86II::MO_NTPOFF:
O << "@NTPOFF"; O << "@NTPOFF";
break; break;
case X86II::MO_GOTPCREL:
O << "@GOTPCREL";
break;
} }
if (isThreadLocal) { if (isThreadLocal) {
@ -617,23 +612,14 @@ void X86ATTAsmPrinter::printOperand(const MachineInstr *MI, unsigned OpNo,
O << "@GOT"; O << "@GOT";
else else
O << "@GOTOFF"; O << "@GOTOFF";
} else if (Subtarget->isPICStyleRIPRel() && } else if (Subtarget->isPICStyleRIPRel()) {
!NotRIPRel) {
if (TM.getRelocationModel() != Reloc::Static) { if (TM.getRelocationModel() != Reloc::Static) {
if (Subtarget->GVRequiresExtraLoad(GV, TM, false)) if (Subtarget->GVRequiresExtraLoad(GV, TM, false))
O << "@GOTPCREL"; O << "@GOTPCREL";
} }
isRIPRelative = true;
} }
} }
// Use rip when possible to reduce code size, except when
// index or base register are also part of the address. e.g.
// foo(%rip)(%rcx,%rax,4) is not legal.
if (isRIPRelative)
O << "(%rip)";
return; return;
} }
case MachineOperand::MO_ExternalSymbol: { case MachineOperand::MO_ExternalSymbol: {
@ -679,25 +665,24 @@ void X86ATTAsmPrinter::printSSECC(const MachineInstr *MI, unsigned Op) {
} }
void X86ATTAsmPrinter::printLeaMemReference(const MachineInstr *MI, unsigned Op, void X86ATTAsmPrinter::printLeaMemReference(const MachineInstr *MI, unsigned Op,
const char *Modifier, const char *Modifier) {
bool NotRIPRel) {
MachineOperand BaseReg = MI->getOperand(Op); MachineOperand BaseReg = MI->getOperand(Op);
MachineOperand IndexReg = MI->getOperand(Op+2); MachineOperand IndexReg = MI->getOperand(Op+2);
const MachineOperand &DispSpec = MI->getOperand(Op+3); const MachineOperand &DispSpec = MI->getOperand(Op+3);
NotRIPRel |= IndexReg.getReg() || BaseReg.getReg();
if (DispSpec.isGlobal() || if (DispSpec.isGlobal() ||
DispSpec.isCPI() || DispSpec.isCPI() ||
DispSpec.isJTI() || DispSpec.isJTI() ||
DispSpec.isSymbol()) { DispSpec.isSymbol()) {
printOperand(MI, Op+3, "mem", NotRIPRel); printOperand(MI, Op+3, "mem");
} else { } else {
int DispVal = DispSpec.getImm(); int DispVal = DispSpec.getImm();
if (DispVal || (!IndexReg.getReg() && !BaseReg.getReg())) if (DispVal || (!IndexReg.getReg() && !BaseReg.getReg()))
O << DispVal; O << DispVal;
} }
if (IndexReg.getReg() || BaseReg.getReg()) { if ((IndexReg.getReg() || BaseReg.getReg()) &&
(Modifier == 0 || strcmp(Modifier, "no-rip"))) {
unsigned ScaleVal = MI->getOperand(Op+1).getImm(); unsigned ScaleVal = MI->getOperand(Op+1).getImm();
unsigned BaseRegOperand = 0, IndexRegOperand = 2; unsigned BaseRegOperand = 0, IndexRegOperand = 2;
@ -725,14 +710,14 @@ void X86ATTAsmPrinter::printLeaMemReference(const MachineInstr *MI, unsigned Op,
} }
void X86ATTAsmPrinter::printMemReference(const MachineInstr *MI, unsigned Op, void X86ATTAsmPrinter::printMemReference(const MachineInstr *MI, unsigned Op,
const char *Modifier, bool NotRIPRel){ const char *Modifier) {
assert(isMem(MI, Op) && "Invalid memory reference!"); assert(isMem(MI, Op) && "Invalid memory reference!");
MachineOperand Segment = MI->getOperand(Op+4); MachineOperand Segment = MI->getOperand(Op+4);
if (Segment.getReg()) { if (Segment.getReg()) {
printOperand(MI, Op+4, Modifier); printOperand(MI, Op+4, Modifier);
O << ':'; O << ':';
} }
printLeaMemReference(MI, Op, Modifier, NotRIPRel); printLeaMemReference(MI, Op, Modifier);
} }
void X86ATTAsmPrinter::printPICJumpTableSetLabel(unsigned uid, void X86ATTAsmPrinter::printPICJumpTableSetLabel(unsigned uid,
@ -825,7 +810,7 @@ bool X86ATTAsmPrinter::PrintAsmOperand(const MachineInstr *MI, unsigned OpNo,
switch (ExtraCode[0]) { switch (ExtraCode[0]) {
default: return true; // Unknown modifier. default: return true; // Unknown modifier.
case 'c': // Don't print "$" before a global var name or constant. case 'c': // Don't print "$" before a global var name or constant.
printOperand(MI, OpNo, "mem", /*NotRIPRel=*/true); printOperand(MI, OpNo, "mem");
return false; return false;
case 'b': // Print QImode register case 'b': // Print QImode register
case 'h': // Print QImode high register case 'h': // Print QImode high register
@ -838,7 +823,7 @@ bool X86ATTAsmPrinter::PrintAsmOperand(const MachineInstr *MI, unsigned OpNo,
return false; return false;
case 'P': // Don't print @PLT, but do print as memory. case 'P': // Don't print @PLT, but do print as memory.
printOperand(MI, OpNo, "mem", /*NotRIPRel=*/true); printOperand(MI, OpNo, "mem");
return false; return false;
case 'n': { // Negate the immediate or print a '-' before the operand. case 'n': { // Negate the immediate or print a '-' before the operand.
@ -875,7 +860,7 @@ bool X86ATTAsmPrinter::PrintAsmMemoryOperand(const MachineInstr *MI,
// These only apply to registers, ignore on mem. // These only apply to registers, ignore on mem.
break; break;
case 'P': // Don't print @PLT, but do print as memory. case 'P': // Don't print @PLT, but do print as memory.
printMemReference(MI, OpNo, "mem", /*NotRIPRel=*/true); printMemReference(MI, OpNo, "no-rip");
return false; return false;
} }
} }

View File

@ -75,7 +75,7 @@ class VISIBILITY_HIDDEN X86ATTAsmPrinter : public AsmPrinter {
bool printInstruction(const MCInst *MI); bool printInstruction(const MCInst *MI);
void printOperand(const MCInst *MI, unsigned OpNo, void printOperand(const MCInst *MI, unsigned OpNo,
const char *Modifier = 0, bool NotRIPRel = false); const char *Modifier = 0);
void printMemReference(const MCInst *MI, unsigned Op); void printMemReference(const MCInst *MI, unsigned Op);
void printLeaMemReference(const MCInst *MI, unsigned Op); void printLeaMemReference(const MCInst *MI, unsigned Op);
void printSSECC(const MCInst *MI, unsigned Op); void printSSECC(const MCInst *MI, unsigned Op);
@ -123,7 +123,7 @@ class VISIBILITY_HIDDEN X86ATTAsmPrinter : public AsmPrinter {
// These methods are used by the tablegen'erated instruction printer. // These methods are used by the tablegen'erated instruction printer.
void printOperand(const MachineInstr *MI, unsigned OpNo, void printOperand(const MachineInstr *MI, unsigned OpNo,
const char *Modifier = 0, bool NotRIPRel = false); const char *Modifier = 0);
void print_pcrel_imm(const MachineInstr *MI, unsigned OpNo); void print_pcrel_imm(const MachineInstr *MI, unsigned OpNo);
void printi8mem(const MachineInstr *MI, unsigned OpNo) { void printi8mem(const MachineInstr *MI, unsigned OpNo) {
printMemReference(MI, OpNo); printMemReference(MI, OpNo);
@ -171,9 +171,9 @@ class VISIBILITY_HIDDEN X86ATTAsmPrinter : public AsmPrinter {
void printMachineInstruction(const MachineInstr *MI); void printMachineInstruction(const MachineInstr *MI);
void printSSECC(const MachineInstr *MI, unsigned Op); void printSSECC(const MachineInstr *MI, unsigned Op);
void printMemReference(const MachineInstr *MI, unsigned Op, void printMemReference(const MachineInstr *MI, unsigned Op,
const char *Modifier=NULL, bool NotRIPRel = false); const char *Modifier=NULL);
void printLeaMemReference(const MachineInstr *MI, unsigned Op, void printLeaMemReference(const MachineInstr *MI, unsigned Op,
const char *Modifier=NULL, bool NotRIPRel = false); const char *Modifier=NULL);
void printPICJumpTableSetLabel(unsigned uid, void printPICJumpTableSetLabel(unsigned uid,
const MachineBasicBlock *MBB) const; const MachineBasicBlock *MBB) const;
void printPICJumpTableSetLabel(unsigned uid, unsigned uid2, void printPICJumpTableSetLabel(unsigned uid, unsigned uid2,

View File

@ -65,7 +65,7 @@ void X86ATTAsmPrinter::print_pcrel_imm(const MCInst *MI, unsigned OpNo) {
void X86ATTAsmPrinter::printOperand(const MCInst *MI, unsigned OpNo, void X86ATTAsmPrinter::printOperand(const MCInst *MI, unsigned OpNo,
const char *Modifier, bool NotRIPRel) { const char *Modifier) {
assert(Modifier == 0 && "Modifiers should not be used"); assert(Modifier == 0 && "Modifiers should not be used");
const MCOperand &Op = MI->getOperand(OpNo); const MCOperand &Op = MI->getOperand(OpNo);
@ -93,13 +93,11 @@ void X86ATTAsmPrinter::printOperand(const MCInst *MI, unsigned OpNo,
} }
void X86ATTAsmPrinter::printLeaMemReference(const MCInst *MI, unsigned Op) { void X86ATTAsmPrinter::printLeaMemReference(const MCInst *MI, unsigned Op) {
bool NotRIPRel = false;
const MCOperand &BaseReg = MI->getOperand(Op); const MCOperand &BaseReg = MI->getOperand(Op);
const MCOperand &IndexReg = MI->getOperand(Op+2); const MCOperand &IndexReg = MI->getOperand(Op+2);
const MCOperand &DispSpec = MI->getOperand(Op+3); const MCOperand &DispSpec = MI->getOperand(Op+3);
NotRIPRel |= IndexReg.getReg() || BaseReg.getReg();
if (DispSpec.isImm()) { if (DispSpec.isImm()) {
int64_t DispVal = DispSpec.getImm(); int64_t DispVal = DispSpec.getImm();
if (DispVal || (!IndexReg.getReg() && !BaseReg.getReg())) if (DispVal || (!IndexReg.getReg() && !BaseReg.getReg()))
@ -108,7 +106,7 @@ void X86ATTAsmPrinter::printLeaMemReference(const MCInst *MI, unsigned Op) {
abort(); abort();
//assert(DispSpec.isGlobal() || DispSpec.isCPI() || //assert(DispSpec.isGlobal() || DispSpec.isCPI() ||
// DispSpec.isJTI() || DispSpec.isSymbol()); // DispSpec.isJTI() || DispSpec.isSymbol());
//printOperand(MI, Op+3, "mem", NotRIPRel); //printOperand(MI, Op+3, "mem");
} }
if (IndexReg.getReg() || BaseReg.getReg()) { if (IndexReg.getReg() || BaseReg.getReg()) {

View File

@ -443,6 +443,7 @@ bool X86FastISel::X86SelectAddress(Value *V, X86AddressMode &AM, bool isCall) {
// Set up the basic address. // Set up the basic address.
AM.GV = GV; AM.GV = GV;
if (!isCall && if (!isCall &&
TM.getRelocationModel() == Reloc::PIC_ && TM.getRelocationModel() == Reloc::PIC_ &&
!Subtarget->is64Bit()) !Subtarget->is64Bit())
@ -481,7 +482,11 @@ bool X86FastISel::X86SelectAddress(Value *V, X86AddressMode &AM, bool isCall) {
// Prevent loading GV stub multiple times in same MBB. // Prevent loading GV stub multiple times in same MBB.
LocalValueMap[V] = AM.Base.Reg; LocalValueMap[V] = AM.Base.Reg;
} else if (getTargetMachine()->symbolicAddressesAreRIPRel()) {
// Use rip-relative addressing if we can.
AM.Base.Reg = X86::RIP;
} }
return true; return true;
} }

View File

@ -65,7 +65,6 @@ namespace {
int FrameIndex; int FrameIndex;
} Base; } Base;
bool isRIPRel; // RIP as base?
unsigned Scale; unsigned Scale;
SDValue IndexReg; SDValue IndexReg;
int32_t Disp; int32_t Disp;
@ -78,7 +77,7 @@ namespace {
unsigned char SymbolFlags; // X86II::MO_* unsigned char SymbolFlags; // X86II::MO_*
X86ISelAddressMode() X86ISelAddressMode()
: BaseType(RegBase), isRIPRel(false), Scale(1), IndexReg(), Disp(0), : BaseType(RegBase), Scale(1), IndexReg(), Disp(0),
Segment(), GV(0), CP(0), ES(0), JT(-1), Align(0), SymbolFlags(0) { Segment(), GV(0), CP(0), ES(0), JT(-1), Align(0), SymbolFlags(0) {
} }
@ -86,13 +85,32 @@ namespace {
return GV != 0 || CP != 0 || ES != 0 || JT != -1; return GV != 0 || CP != 0 || ES != 0 || JT != -1;
} }
bool hasBaseOrIndexReg() const {
return IndexReg.getNode() != 0 || Base.Reg.getNode() != 0;
}
/// isRIPRelative - Return true if this addressing mode is already RIP
/// relative.
bool isRIPRelative() const {
if (BaseType != RegBase) return false;
if (RegisterSDNode *RegNode =
dyn_cast_or_null<RegisterSDNode>(Base.Reg.getNode()))
return RegNode->getReg() == X86::RIP;
return false;
}
void setBaseReg(SDValue Reg) {
BaseType = RegBase;
Base.Reg = Reg;
}
void dump() { void dump() {
cerr << "X86ISelAddressMode " << this << "\n"; cerr << "X86ISelAddressMode " << this << "\n";
cerr << "Base.Reg "; cerr << "Base.Reg ";
if (Base.Reg.getNode() != 0) Base.Reg.getNode()->dump(); if (Base.Reg.getNode() != 0) Base.Reg.getNode()->dump();
else cerr << "nul"; else cerr << "nul";
cerr << " Base.FrameIndex " << Base.FrameIndex << "\n"; cerr << " Base.FrameIndex " << Base.FrameIndex << "\n";
cerr << "isRIPRel " << isRIPRel << " Scale" << Scale << "\n"; cerr << " Scale" << Scale << "\n";
cerr << "IndexReg "; cerr << "IndexReg ";
if (IndexReg.getNode() != 0) IndexReg.getNode()->dump(); if (IndexReg.getNode() != 0) IndexReg.getNode()->dump();
else cerr << "nul"; else cerr << "nul";
@ -685,65 +703,80 @@ bool X86DAGToDAGISel::MatchLoad(SDValue N, X86ISelAddressMode &AM) {
return true; return true;
} }
/// MatchWrapper - Try to match X86ISD::Wrapper and X86ISD::WrapperRIP nodes
/// into an addressing mode. These wrap things that will resolve down into a
/// symbol reference. If no match is possible, this returns true, otherwise it
/// returns false.
bool X86DAGToDAGISel::MatchWrapper(SDValue N, X86ISelAddressMode &AM) { bool X86DAGToDAGISel::MatchWrapper(SDValue N, X86ISelAddressMode &AM) {
bool SymbolicAddressesAreRIPRel = // If the addressing mode already has a symbol as the displacement, we can
getTargetMachine().symbolicAddressesAreRIPRel(); // never match another symbol.
bool is64Bit = Subtarget->is64Bit();
DOUT << "Wrapper: 64bit " << is64Bit;
DOUT << " AM "; DEBUG(AM.dump()); DOUT << "\n";
// Under X86-64 non-small code model, GV (and friends) are 64-bits.
if (is64Bit && (TM.getCodeModel() != CodeModel::Small))
return true;
// Base and index reg must be 0 in order to use rip as base.
bool canUsePICRel = !AM.Base.Reg.getNode() && !AM.IndexReg.getNode();
if (is64Bit && !canUsePICRel && SymbolicAddressesAreRIPRel)
return true;
if (AM.hasSymbolicDisplacement()) if (AM.hasSymbolicDisplacement())
return true; return true;
// If value is available in a register both base and index components have
// been picked, we can't fit the result available in the register in the
// addressing mode. Duplicate GlobalAddress or ConstantPool as displacement.
SDValue N0 = N.getOperand(0); SDValue N0 = N.getOperand(0);
// Handle X86-64 rip-relative addresses. We check this before checking direct
// folding because RIP is preferable to non-RIP accesses.
if (Subtarget->is64Bit() &&
// Under X86-64 non-small code model, GV (and friends) are 64-bits, so
// they cannot be folded into immediate fields.
// FIXME: This can be improved for kernel and other models?
TM.getCodeModel() == CodeModel::Small &&
// Base and index reg must be 0 in order to use %rip as base and lowering
// must allow RIP.
!AM.hasBaseOrIndexReg() && N.getOpcode() == X86ISD::WrapperRIP) {
if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(N0)) { if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(N0)) {
uint64_t Offset = G->getOffset(); int64_t Offset = AM.Disp + G->getOffset();
if (!is64Bit || isInt32(AM.Disp + Offset)) { if (!isInt32(Offset)) return true;
GlobalValue *GV = G->getGlobal(); AM.GV = G->getGlobal();
bool isRIPRel = SymbolicAddressesAreRIPRel; AM.Disp = Offset;
if (N0.getOpcode() == llvm::ISD::TargetGlobalTLSAddress) {
TLSModel::Model model =
getTLSModel (GV, TM.getRelocationModel());
if (is64Bit && model == TLSModel::InitialExec)
isRIPRel = true;
}
AM.GV = GV;
AM.Disp += Offset;
AM.isRIPRel = isRIPRel;
AM.SymbolFlags = G->getTargetFlags(); AM.SymbolFlags = G->getTargetFlags();
return false;
}
} else if (ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(N0)) { } else if (ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(N0)) {
uint64_t Offset = CP->getOffset(); int64_t Offset = AM.Disp + CP->getOffset();
if (!is64Bit || isInt32(AM.Disp + Offset)) { if (!isInt32(Offset)) return true;
AM.CP = CP->getConstVal(); AM.CP = CP->getConstVal();
AM.Align = CP->getAlignment(); AM.Align = CP->getAlignment();
AM.Disp += Offset; AM.Disp = Offset;
AM.isRIPRel = SymbolicAddressesAreRIPRel;
AM.SymbolFlags = CP->getTargetFlags(); AM.SymbolFlags = CP->getTargetFlags();
return false;
}
} else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(N0)) { } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(N0)) {
AM.ES = S->getSymbol(); AM.ES = S->getSymbol();
AM.isRIPRel = SymbolicAddressesAreRIPRel;
AM.SymbolFlags = S->getTargetFlags(); AM.SymbolFlags = S->getTargetFlags();
return false; } else {
} else if (JumpTableSDNode *J = dyn_cast<JumpTableSDNode>(N0)) { JumpTableSDNode *J = cast<JumpTableSDNode>(N0);
AM.JT = J->getIndex(); AM.JT = J->getIndex();
AM.isRIPRel = SymbolicAddressesAreRIPRel;
AM.SymbolFlags = J->getTargetFlags(); AM.SymbolFlags = J->getTargetFlags();
}
if (N.getOpcode() == X86ISD::WrapperRIP)
AM.setBaseReg(CurDAG->getRegister(X86::RIP, MVT::i64));
return false;
}
// Handle the case when globals fit in our immediate field: This is true for
// X86-32 always and X86-64 when in -static -mcmodel=small mode. In 64-bit
// mode, this results in a non-RIP-relative computation.
if (!Subtarget->is64Bit() ||
(TM.getCodeModel() == CodeModel::Small &&
TM.getRelocationModel() == Reloc::Static)) {
if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(N0)) {
AM.GV = G->getGlobal();
AM.Disp += G->getOffset();
AM.SymbolFlags = G->getTargetFlags();
} else if (ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(N0)) {
AM.CP = CP->getConstVal();
AM.Align = CP->getAlignment();
AM.Disp += CP->getOffset();
AM.SymbolFlags = CP->getTargetFlags();
} else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(N0)) {
AM.ES = S->getSymbol();
AM.SymbolFlags = S->getTargetFlags();
} else {
JumpTableSDNode *J = cast<JumpTableSDNode>(N0);
AM.JT = J->getIndex();
AM.SymbolFlags = J->getTargetFlags();
}
return false; return false;
} }
@ -762,12 +795,19 @@ bool X86DAGToDAGISel::MatchAddress(SDValue N, X86ISelAddressMode &AM,
if (Depth > 5) if (Depth > 5)
return MatchAddressBase(N, AM); return MatchAddressBase(N, AM);
// If this is already a %rip relative address, we can only merge immediates
// into it. Instead of handling this in every case, we handle it here.
// RIP relative addressing: %rip + 32-bit displacement! // RIP relative addressing: %rip + 32-bit displacement!
if (AM.isRIPRel) { if (AM.isRIPRelative()) {
if (!AM.ES && AM.JT != -1 && N.getOpcode() == ISD::Constant) { // FIXME: JumpTable and ExternalSymbol address currently don't like
uint64_t Val = cast<ConstantSDNode>(N)->getSExtValue(); // displacements. It isn't very important, but this should be fixed for
if (!is64Bit || isInt32(AM.Disp + Val)) { // consistency.
AM.Disp += Val; if (!AM.ES && AM.JT != -1) return true;
if (ConstantSDNode *Cst = dyn_cast<ConstantSDNode>(N)) {
int64_t Val = AM.Disp + Cst->getSExtValue();
if (isInt32(Val)) {
AM.Disp = Val;
return false; return false;
} }
} }
@ -791,6 +831,7 @@ bool X86DAGToDAGISel::MatchAddress(SDValue N, X86ISelAddressMode &AM,
break; break;
case X86ISD::Wrapper: case X86ISD::Wrapper:
case X86ISD::WrapperRIP:
if (!MatchWrapper(N, AM)) if (!MatchWrapper(N, AM))
return false; return false;
break; break;
@ -810,7 +851,7 @@ bool X86DAGToDAGISel::MatchAddress(SDValue N, X86ISelAddressMode &AM,
break; break;
case ISD::SHL: case ISD::SHL:
if (AM.IndexReg.getNode() != 0 || AM.Scale != 1 || AM.isRIPRel) if (AM.IndexReg.getNode() != 0 || AM.Scale != 1)
break; break;
if (ConstantSDNode if (ConstantSDNode
@ -851,8 +892,7 @@ bool X86DAGToDAGISel::MatchAddress(SDValue N, X86ISelAddressMode &AM,
// X*[3,5,9] -> X+X*[2,4,8] // X*[3,5,9] -> X+X*[2,4,8]
if (AM.BaseType == X86ISelAddressMode::RegBase && if (AM.BaseType == X86ISelAddressMode::RegBase &&
AM.Base.Reg.getNode() == 0 && AM.Base.Reg.getNode() == 0 &&
AM.IndexReg.getNode() == 0 && AM.IndexReg.getNode() == 0) {
!AM.isRIPRel) {
if (ConstantSDNode if (ConstantSDNode
*CN = dyn_cast<ConstantSDNode>(N.getNode()->getOperand(1))) *CN = dyn_cast<ConstantSDNode>(N.getNode()->getOperand(1)))
if (CN->getZExtValue() == 3 || CN->getZExtValue() == 5 || if (CN->getZExtValue() == 3 || CN->getZExtValue() == 5 ||
@ -901,7 +941,7 @@ bool X86DAGToDAGISel::MatchAddress(SDValue N, X86ISelAddressMode &AM,
break; break;
} }
// Test if the index field is free for use. // Test if the index field is free for use.
if (AM.IndexReg.getNode() || AM.isRIPRel) { if (AM.IndexReg.getNode() || AM.isRIPRelative()) {
AM = Backup; AM = Backup;
break; break;
} }
@ -972,8 +1012,7 @@ bool X86DAGToDAGISel::MatchAddress(SDValue N, X86ISelAddressMode &AM,
// the add. // the add.
if (AM.BaseType == X86ISelAddressMode::RegBase && if (AM.BaseType == X86ISelAddressMode::RegBase &&
!AM.Base.Reg.getNode() && !AM.Base.Reg.getNode() &&
!AM.IndexReg.getNode() && !AM.IndexReg.getNode()) {
!AM.isRIPRel) {
AM.Base.Reg = N.getNode()->getOperand(0); AM.Base.Reg = N.getNode()->getOperand(0);
AM.IndexReg = N.getNode()->getOperand(1); AM.IndexReg = N.getNode()->getOperand(1);
AM.Scale = 1; AM.Scale = 1;
@ -1012,9 +1051,6 @@ bool X86DAGToDAGISel::MatchAddress(SDValue N, X86ISelAddressMode &AM,
// Scale must not be used already. // Scale must not be used already.
if (AM.IndexReg.getNode() != 0 || AM.Scale != 1) break; if (AM.IndexReg.getNode() != 0 || AM.Scale != 1) break;
// Not when RIP is used as the base.
if (AM.isRIPRel) break;
SDValue X = Shift.getOperand(0); SDValue X = Shift.getOperand(0);
ConstantSDNode *C2 = dyn_cast<ConstantSDNode>(N.getOperand(1)); ConstantSDNode *C2 = dyn_cast<ConstantSDNode>(N.getOperand(1));
ConstantSDNode *C1 = dyn_cast<ConstantSDNode>(Shift.getOperand(1)); ConstantSDNode *C1 = dyn_cast<ConstantSDNode>(Shift.getOperand(1));
@ -1136,7 +1172,7 @@ bool X86DAGToDAGISel::MatchAddressBase(SDValue N, X86ISelAddressMode &AM) {
// Is the base register already occupied? // Is the base register already occupied?
if (AM.BaseType != X86ISelAddressMode::RegBase || AM.Base.Reg.getNode()) { if (AM.BaseType != X86ISelAddressMode::RegBase || AM.Base.Reg.getNode()) {
// If so, check to see if the scale index register is set. // If so, check to see if the scale index register is set.
if (AM.IndexReg.getNode() == 0 && !AM.isRIPRel) { if (AM.IndexReg.getNode() == 0) {
AM.IndexReg = N; AM.IndexReg = N;
AM.Scale = 1; AM.Scale = 1;
return false; return false;
@ -1163,7 +1199,7 @@ bool X86DAGToDAGISel::SelectAddr(SDValue Op, SDValue N, SDValue &Base,
if (AvoidDupAddrCompute && !N.hasOneUse()) { if (AvoidDupAddrCompute && !N.hasOneUse()) {
unsigned Opcode = N.getOpcode(); unsigned Opcode = N.getOpcode();
if (Opcode != ISD::Constant && Opcode != ISD::FrameIndex && if (Opcode != ISD::Constant && Opcode != ISD::FrameIndex &&
Opcode != X86ISD::Wrapper) { Opcode != X86ISD::Wrapper && Opcode != X86ISD::WrapperRIP) {
// If we are able to fold N into addressing mode, then we'll allow it even // If we are able to fold N into addressing mode, then we'll allow it even
// if N has multiple uses. In general, addressing computation is used as // if N has multiple uses. In general, addressing computation is used as
// addresses by all of its uses. But watch out for CopyToReg uses, that // addresses by all of its uses. But watch out for CopyToReg uses, that
@ -1694,7 +1730,8 @@ SDNode *X86DAGToDAGISel::Select(SDValue N) {
// If N2 is not Wrapper(decriptor) then the llvm.declare is mangled // If N2 is not Wrapper(decriptor) then the llvm.declare is mangled
// somehow, just ignore it. // somehow, just ignore it.
if (N2.getOpcode() != X86ISD::Wrapper) { if (N2.getOpcode() != X86ISD::Wrapper &&
N2.getOpcode() != X86ISD::WrapperRIP) {
ReplaceUses(N.getValue(0), Chain); ReplaceUses(N.getValue(0), Chain);
return NULL; return NULL;
} }

View File

@ -4316,18 +4316,22 @@ X86TargetLowering::LowerConstantPool(SDValue Op, SelectionDAG &DAG) {
// In PIC mode (unless we're in RIPRel PIC mode) we add an offset to the // In PIC mode (unless we're in RIPRel PIC mode) we add an offset to the
// global base reg. // global base reg.
unsigned char OpFlag = 0; unsigned char OpFlag = 0;
unsigned WrapperKind = X86ISD::Wrapper;
if (getTargetMachine().getRelocationModel() == Reloc::PIC_) { if (getTargetMachine().getRelocationModel() == Reloc::PIC_) {
if (Subtarget->isPICStyleStub()) if (Subtarget->isPICStyleStub())
OpFlag = X86II::MO_PIC_BASE_OFFSET; OpFlag = X86II::MO_PIC_BASE_OFFSET;
else if (Subtarget->isPICStyleGOT()) else if (Subtarget->isPICStyleGOT())
OpFlag = X86II::MO_GOTOFF; OpFlag = X86II::MO_GOTOFF;
else if (Subtarget->isPICStyleRIPRel() &&
getTargetMachine().getCodeModel() == CodeModel::Small)
WrapperKind = X86ISD::WrapperRIP;
} }
SDValue Result = DAG.getTargetConstantPool(CP->getConstVal(), getPointerTy(), SDValue Result = DAG.getTargetConstantPool(CP->getConstVal(), getPointerTy(),
CP->getAlignment(), CP->getAlignment(),
CP->getOffset(), OpFlag); CP->getOffset(), OpFlag);
DebugLoc DL = CP->getDebugLoc(); DebugLoc DL = CP->getDebugLoc();
Result = DAG.getNode(X86ISD::Wrapper, DL, getPointerTy(), Result); Result = DAG.getNode(WrapperKind, DL, getPointerTy(), Result);
// With PIC, the address is actually $g + Offset. // With PIC, the address is actually $g + Offset.
if (OpFlag) { if (OpFlag) {
Result = DAG.getNode(ISD::ADD, DL, getPointerTy(), Result = DAG.getNode(ISD::ADD, DL, getPointerTy(),
@ -4339,6 +4343,74 @@ X86TargetLowering::LowerConstantPool(SDValue Op, SelectionDAG &DAG) {
return Result; return Result;
} }
SDValue X86TargetLowering::LowerJumpTable(SDValue Op, SelectionDAG &DAG) {
JumpTableSDNode *JT = cast<JumpTableSDNode>(Op);
// In PIC mode (unless we're in RIPRel PIC mode) we add an offset to the
// global base reg.
unsigned char OpFlag = 0;
unsigned WrapperKind = X86ISD::Wrapper;
if (getTargetMachine().getRelocationModel() == Reloc::PIC_) {
if (Subtarget->isPICStyleStub())
OpFlag = X86II::MO_PIC_BASE_OFFSET;
else if (Subtarget->isPICStyleGOT())
OpFlag = X86II::MO_GOTOFF;
else if (Subtarget->isPICStyleRIPRel())
WrapperKind = X86ISD::WrapperRIP;
}
SDValue Result = DAG.getTargetJumpTable(JT->getIndex(), getPointerTy(),
OpFlag);
DebugLoc DL = JT->getDebugLoc();
Result = DAG.getNode(WrapperKind, DL, getPointerTy(), Result);
// With PIC, the address is actually $g + Offset.
if (OpFlag) {
Result = DAG.getNode(ISD::ADD, DL, getPointerTy(),
DAG.getNode(X86ISD::GlobalBaseReg,
DebugLoc::getUnknownLoc(), getPointerTy()),
Result);
}
return Result;
}
SDValue
X86TargetLowering::LowerExternalSymbol(SDValue Op, SelectionDAG &DAG) {
const char *Sym = cast<ExternalSymbolSDNode>(Op)->getSymbol();
// In PIC mode (unless we're in RIPRel PIC mode) we add an offset to the
// global base reg.
unsigned char OpFlag = 0;
unsigned WrapperKind = X86ISD::Wrapper;
if (getTargetMachine().getRelocationModel() == Reloc::PIC_) {
if (Subtarget->isPICStyleStub())
OpFlag = X86II::MO_PIC_BASE_OFFSET;
else if (Subtarget->isPICStyleGOT())
OpFlag = X86II::MO_GOTOFF;
else if (Subtarget->isPICStyleRIPRel())
WrapperKind = X86ISD::WrapperRIP;
}
SDValue Result = DAG.getTargetExternalSymbol(Sym, getPointerTy(), OpFlag);
DebugLoc DL = Op.getDebugLoc();
Result = DAG.getNode(WrapperKind, DL, getPointerTy(), Result);
// With PIC, the address is actually $g + Offset.
if (getTargetMachine().getRelocationModel() == Reloc::PIC_ &&
!Subtarget->isPICStyleRIPRel()) {
Result = DAG.getNode(ISD::ADD, DL, getPointerTy(),
DAG.getNode(X86ISD::GlobalBaseReg,
DebugLoc::getUnknownLoc(),
getPointerTy()),
Result);
}
return Result;
}
SDValue SDValue
X86TargetLowering::LowerGlobalAddress(const GlobalValue *GV, DebugLoc dl, X86TargetLowering::LowerGlobalAddress(const GlobalValue *GV, DebugLoc dl,
int64_t Offset, int64_t Offset,
@ -4353,8 +4425,14 @@ X86TargetLowering::LowerGlobalAddress(const GlobalValue *GV, DebugLoc dl,
if (!IsPic && !ExtraLoadRequired && isInt32(Offset)) { if (!IsPic && !ExtraLoadRequired && isInt32(Offset)) {
Result = DAG.getTargetGlobalAddress(GV, getPointerTy(), Offset); Result = DAG.getTargetGlobalAddress(GV, getPointerTy(), Offset);
Offset = 0; Offset = 0;
} else } else {
Result = DAG.getTargetGlobalAddress(GV, getPointerTy(), 0); Result = DAG.getTargetGlobalAddress(GV, getPointerTy(), 0);
}
if (Subtarget->isPICStyleRIPRel() &&
getTargetMachine().getCodeModel() == CodeModel::Small)
Result = DAG.getNode(X86ISD::WrapperRIP, dl, getPointerTy(), Result);
else
Result = DAG.getNode(X86ISD::Wrapper, dl, getPointerTy(), Result); Result = DAG.getNode(X86ISD::Wrapper, dl, getPointerTy(), Result);
// With PIC, the address is actually $g + Offset. // With PIC, the address is actually $g + Offset.
@ -4449,19 +4527,25 @@ static SDValue LowerToTLSExecModel(GlobalAddressSDNode *GA, SelectionDAG &DAG,
NULL, 0); NULL, 0);
unsigned char OperandFlags = 0; unsigned char OperandFlags = 0;
if (model == TLSModel::InitialExec) { // Most TLS accesses are not RIP relative, even on x86-64. One exception is
OperandFlags = is64Bit ? X86II::MO_GOTTPOFF : X86II::MO_INDNTPOFF; // initialexec.
} else { unsigned WrapperKind = X86ISD::Wrapper;
assert(model == TLSModel::LocalExec); if (model == TLSModel::LocalExec) {
OperandFlags = is64Bit ? X86II::MO_TPOFF : X86II::MO_NTPOFF; OperandFlags = is64Bit ? X86II::MO_TPOFF : X86II::MO_NTPOFF;
} else if (is64Bit) {
assert(model == TLSModel::InitialExec);
OperandFlags = X86II::MO_GOTTPOFF;
WrapperKind = X86ISD::WrapperRIP;
} else {
assert(model == TLSModel::InitialExec);
OperandFlags = X86II::MO_INDNTPOFF;
} }
// emit "addl x@ntpoff,%eax" (local exec) or "addl x@indntpoff,%eax" (initial // emit "addl x@ntpoff,%eax" (local exec) or "addl x@indntpoff,%eax" (initial
// exec) // exec)
SDValue TGA = DAG.getTargetGlobalAddress(GA->getGlobal(), GA->getValueType(0), SDValue TGA = DAG.getTargetGlobalAddress(GA->getGlobal(), GA->getValueType(0),
GA->getOffset(), OperandFlags); GA->getOffset(), OperandFlags);
SDValue Offset = DAG.getNode(X86ISD::Wrapper, dl, PtrVT, TGA); SDValue Offset = DAG.getNode(WrapperKind, dl, PtrVT, TGA);
if (model == TLSModel::InitialExec) if (model == TLSModel::InitialExec)
Offset = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), Offset, Offset = DAG.getLoad(PtrVT, dl, DAG.getEntryNode(), Offset,
@ -4506,54 +4590,6 @@ X86TargetLowering::LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) {
return SDValue(); return SDValue();
} }
SDValue
X86TargetLowering::LowerExternalSymbol(SDValue Op, SelectionDAG &DAG) {
// FIXME there isn't really any debug info here
DebugLoc dl = Op.getDebugLoc();
const char *Sym = cast<ExternalSymbolSDNode>(Op)->getSymbol();
SDValue Result = DAG.getTargetExternalSymbol(Sym, getPointerTy());
Result = DAG.getNode(X86ISD::Wrapper, dl, getPointerTy(), Result);
// With PIC, the address is actually $g + Offset.
if (getTargetMachine().getRelocationModel() == Reloc::PIC_ &&
!Subtarget->isPICStyleRIPRel()) {
Result = DAG.getNode(ISD::ADD, dl, getPointerTy(),
DAG.getNode(X86ISD::GlobalBaseReg,
DebugLoc::getUnknownLoc(),
getPointerTy()),
Result);
}
return Result;
}
SDValue X86TargetLowering::LowerJumpTable(SDValue Op, SelectionDAG &DAG) {
JumpTableSDNode *JT = cast<JumpTableSDNode>(Op);
// In PIC mode (unless we're in RIPRel PIC mode) we add an offset to the
// global base reg.
unsigned char OpFlag = 0;
if (getTargetMachine().getRelocationModel() == Reloc::PIC_) {
if (Subtarget->isPICStyleStub())
OpFlag = X86II::MO_PIC_BASE_OFFSET;
else if (Subtarget->isPICStyleGOT())
OpFlag = X86II::MO_GOTOFF;
}
SDValue Result = DAG.getTargetJumpTable(JT->getIndex(), getPointerTy(),
OpFlag);
DebugLoc DL = JT->getDebugLoc();
Result = DAG.getNode(X86ISD::Wrapper, DL, getPointerTy(), Result);
// With PIC, the address is actually $g + Offset.
if (OpFlag) {
Result = DAG.getNode(ISD::ADD, DL, getPointerTy(),
DAG.getNode(X86ISD::GlobalBaseReg,
DebugLoc::getUnknownLoc(), getPointerTy()),
Result);
}
return Result;
}
/// LowerShift - Lower SRA_PARTS and friends, which return two i32 values and /// LowerShift - Lower SRA_PARTS and friends, which return two i32 values and
/// take a 2 x i32 value to shift plus a shift amount. /// take a 2 x i32 value to shift plus a shift amount.
@ -6810,6 +6846,7 @@ const char *X86TargetLowering::getTargetNodeName(unsigned Opcode) const {
case X86ISD::REP_MOVS: return "X86ISD::REP_MOVS"; case X86ISD::REP_MOVS: return "X86ISD::REP_MOVS";
case X86ISD::GlobalBaseReg: return "X86ISD::GlobalBaseReg"; case X86ISD::GlobalBaseReg: return "X86ISD::GlobalBaseReg";
case X86ISD::Wrapper: return "X86ISD::Wrapper"; case X86ISD::Wrapper: return "X86ISD::Wrapper";
case X86ISD::WrapperRIP: return "X86ISD::WrapperRIP";
case X86ISD::PEXTRB: return "X86ISD::PEXTRB"; case X86ISD::PEXTRB: return "X86ISD::PEXTRB";
case X86ISD::PEXTRW: return "X86ISD::PEXTRW"; case X86ISD::PEXTRW: return "X86ISD::PEXTRW";
case X86ISD::INSERTPS: return "X86ISD::INSERTPS"; case X86ISD::INSERTPS: return "X86ISD::INSERTPS";

View File

@ -45,7 +45,8 @@ def lea64_32mem : Operand<i32> {
// Complex Pattern Definitions. // Complex Pattern Definitions.
// //
def lea64addr : ComplexPattern<i64, 4, "SelectLEAAddr", def lea64addr : ComplexPattern<i64, 4, "SelectLEAAddr",
[add, mul, X86mul_imm, shl, or, frameindex, X86Wrapper], [add, mul, X86mul_imm, shl, or, frameindex, X86Wrapper,
X86WrapperRIP],
[]>; []>;
def tls64addr : ComplexPattern<i64, 4, "SelectTLSADDRAddr", def tls64addr : ComplexPattern<i64, 4, "SelectTLSADDRAddr",
@ -1418,6 +1419,9 @@ def : Pat<(i64 (X86Wrapper tglobaladdr :$dst)),
def : Pat<(i64 (X86Wrapper texternalsym:$dst)), def : Pat<(i64 (X86Wrapper texternalsym:$dst)),
(MOV64ri texternalsym:$dst)>, Requires<[NotSmallCode]>; (MOV64ri texternalsym:$dst)>, Requires<[NotSmallCode]>;
// If we have small model and -static mode, it is safe to store global addresses
// directly as immediates. FIXME: This is really a hack, the 'imm' predicate
// should handle this sort of thing.
def : Pat<(store (i64 (X86Wrapper tconstpool:$src)), addr:$dst), def : Pat<(store (i64 (X86Wrapper tconstpool:$src)), addr:$dst),
(MOV64mi32 addr:$dst, tconstpool:$src)>, (MOV64mi32 addr:$dst, tconstpool:$src)>,
Requires<[SmallCode, IsStatic]>; Requires<[SmallCode, IsStatic]>;
@ -1431,6 +1435,23 @@ def : Pat<(store (i64 (X86Wrapper texternalsym:$src)), addr:$dst),
(MOV64mi32 addr:$dst, texternalsym:$src)>, (MOV64mi32 addr:$dst, texternalsym:$src)>,
Requires<[SmallCode, IsStatic]>; Requires<[SmallCode, IsStatic]>;
// If we have small model and -static mode, it is safe to store global addresses
// directly as immediates. FIXME: This is really a hack, the 'imm' predicate
// should handle this sort of thing.
def : Pat<(store (i64 (X86WrapperRIP tconstpool:$src)), addr:$dst),
(MOV64mi32 addr:$dst, tconstpool:$src)>,
Requires<[SmallCode, IsStatic]>;
def : Pat<(store (i64 (X86WrapperRIP tjumptable:$src)), addr:$dst),
(MOV64mi32 addr:$dst, tjumptable:$src)>,
Requires<[SmallCode, IsStatic]>;
def : Pat<(store (i64 (X86WrapperRIP tglobaladdr:$src)), addr:$dst),
(MOV64mi32 addr:$dst, tglobaladdr:$src)>,
Requires<[SmallCode, IsStatic]>;
def : Pat<(store (i64 (X86WrapperRIP texternalsym:$src)), addr:$dst),
(MOV64mi32 addr:$dst, texternalsym:$src)>,
Requires<[SmallCode, IsStatic]>;
// Calls // Calls
// Direct PC relative function call for small code model. 32-bit displacement // Direct PC relative function call for small code model. 32-bit displacement
// sign extended to 64-bit. // sign extended to 64-bit.

View File

@ -806,7 +806,7 @@ X86InstrInfo::isReallyTriviallyReMaterializable(const MachineInstr *MI) const {
(MI->getOperand(4).isGlobal() && (MI->getOperand(4).isGlobal() &&
isGVStub(MI->getOperand(4).getGlobal(), TM)))) { isGVStub(MI->getOperand(4).getGlobal(), TM)))) {
unsigned BaseReg = MI->getOperand(1).getReg(); unsigned BaseReg = MI->getOperand(1).getReg();
if (BaseReg == 0) if (BaseReg == 0 || BaseReg == X86::RIP)
return true; return true;
// Allow re-materialization of PIC load. // Allow re-materialization of PIC load.
if (!ReMatPICStubLoad && MI->getOperand(4).isGlobal()) if (!ReMatPICStubLoad && MI->getOperand(4).isGlobal())

View File

@ -220,7 +220,8 @@ bool X86TargetMachine::addAssemblyEmitter(PassManagerBase &PM,
// On Darwin, override 64-bit static relocation to pic_ since the // On Darwin, override 64-bit static relocation to pic_ since the
// assembler doesn't support it. // assembler doesn't support it.
if (DefRelocModel == Reloc::Static && if (DefRelocModel == Reloc::Static &&
Subtarget.isTargetDarwin() && Subtarget.is64Bit()) Subtarget.isTargetDarwin() && Subtarget.is64Bit() &&
getCodeModel() == CodeModel::Small)
setRelocationModel(Reloc::PIC_); setRelocationModel(Reloc::PIC_);
assert(AsmPrinterCtor && "AsmPrinter was not linked in"); assert(AsmPrinterCtor && "AsmPrinter was not linked in");

View File

@ -1,4 +1,5 @@
; RUN: llvm-as < %s | llc -mtriple=x86_64-linux -relocation-model=static -aggressive-remat | grep xmm | count 2 ; RUN: llvm-as < %s | llc -mtriple=x86_64-linux -relocation-model=static -aggressive-remat | grep xmm | count 2
; XFAIL: *
declare void @bar() nounwind declare void @bar() nounwind

View File

@ -7,7 +7,7 @@
@i = external thread_local global i32 ; <i32*> [#uses=2] @i = external thread_local global i32 ; <i32*> [#uses=2]
define i32 @f() { define i32 @f() nounwind {
entry: entry:
%tmp1 = load i32* @i ; <i32> [#uses=1] %tmp1 = load i32* @i ; <i32> [#uses=1]
ret i32 %tmp1 ret i32 %tmp1