forked from OSchip/llvm-project
[X86] Simplify some X86 address mode folding code, NFCI
This code should really do exactly the same thing for 32-bit x86 and 64-bit small code models, with the exception that RIP-relative addressing can't use base and index registers. llvm-svn: 332893
This commit is contained in:
parent
dc3bf90447
commit
537917d13c
|
@ -877,9 +877,14 @@ static bool isDispSafeForFrameIndex(int64_t Val) {
|
|||
|
||||
bool X86DAGToDAGISel::foldOffsetIntoAddress(uint64_t Offset,
|
||||
X86ISelAddressMode &AM) {
|
||||
// If there's no offset to fold, we don't need to do any work.
|
||||
if (Offset == 0)
|
||||
return false;
|
||||
|
||||
// Cannot combine ExternalSymbol displacements with integer offsets.
|
||||
if (Offset != 0 && (AM.ES || AM.MCSym))
|
||||
if (AM.ES || AM.MCSym)
|
||||
return true;
|
||||
|
||||
int64_t Val = AM.Disp + Offset;
|
||||
CodeModel::Model M = TM.getCodeModel();
|
||||
if (Subtarget->is64Bit()) {
|
||||
|
@ -933,94 +938,58 @@ bool X86DAGToDAGISel::matchWrapper(SDValue N, X86ISelAddressMode &AM) {
|
|||
if (AM.hasSymbolicDisplacement())
|
||||
return true;
|
||||
|
||||
SDValue N0 = N.getOperand(0);
|
||||
bool IsRIPRel = N.getOpcode() == X86ISD::WrapperRIP;
|
||||
|
||||
// Only do this address mode folding for 64-bit if we're in the small code
|
||||
// model.
|
||||
// FIXME: But we can do GOTPCREL addressing in the medium code model.
|
||||
CodeModel::Model M = TM.getCodeModel();
|
||||
if (Subtarget->is64Bit() && M != CodeModel::Small && M != CodeModel::Kernel)
|
||||
return true;
|
||||
|
||||
// Handle X86-64 rip-relative addresses. We check this before checking direct
|
||||
// folding because RIP is preferable to non-RIP accesses.
|
||||
if (Subtarget->is64Bit() && N.getOpcode() == X86ISD::WrapperRIP &&
|
||||
// Under X86-64 non-small code model, GV (and friends) are 64-bits, so
|
||||
// they cannot be folded into immediate fields.
|
||||
// FIXME: This can be improved for kernel and other models?
|
||||
(M == CodeModel::Small || M == CodeModel::Kernel)) {
|
||||
// Base and index reg must be 0 in order to use %rip as base.
|
||||
if (AM.hasBaseOrIndexReg())
|
||||
return true;
|
||||
if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(N0)) {
|
||||
X86ISelAddressMode Backup = AM;
|
||||
AM.GV = G->getGlobal();
|
||||
AM.SymbolFlags = G->getTargetFlags();
|
||||
if (foldOffsetIntoAddress(G->getOffset(), AM)) {
|
||||
AM = Backup;
|
||||
return true;
|
||||
}
|
||||
} else if (ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(N0)) {
|
||||
X86ISelAddressMode Backup = AM;
|
||||
AM.CP = CP->getConstVal();
|
||||
AM.Align = CP->getAlignment();
|
||||
AM.SymbolFlags = CP->getTargetFlags();
|
||||
if (foldOffsetIntoAddress(CP->getOffset(), AM)) {
|
||||
AM = Backup;
|
||||
return true;
|
||||
}
|
||||
} else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(N0)) {
|
||||
AM.ES = S->getSymbol();
|
||||
AM.SymbolFlags = S->getTargetFlags();
|
||||
} else if (auto *S = dyn_cast<MCSymbolSDNode>(N0)) {
|
||||
AM.MCSym = S->getMCSymbol();
|
||||
} else if (JumpTableSDNode *J = dyn_cast<JumpTableSDNode>(N0)) {
|
||||
AM.JT = J->getIndex();
|
||||
AM.SymbolFlags = J->getTargetFlags();
|
||||
} else if (BlockAddressSDNode *BA = dyn_cast<BlockAddressSDNode>(N0)) {
|
||||
X86ISelAddressMode Backup = AM;
|
||||
AM.BlockAddr = BA->getBlockAddress();
|
||||
AM.SymbolFlags = BA->getTargetFlags();
|
||||
if (foldOffsetIntoAddress(BA->getOffset(), AM)) {
|
||||
AM = Backup;
|
||||
return true;
|
||||
}
|
||||
} else
|
||||
llvm_unreachable("Unhandled symbol reference node.");
|
||||
// Base and index reg must be 0 in order to use %rip as base.
|
||||
if (IsRIPRel && AM.hasBaseOrIndexReg())
|
||||
return true;
|
||||
|
||||
if (N.getOpcode() == X86ISD::WrapperRIP)
|
||||
AM.setBaseReg(CurDAG->getRegister(X86::RIP, MVT::i64));
|
||||
return false;
|
||||
// Make a local copy in case we can't do this fold.
|
||||
X86ISelAddressMode Backup = AM;
|
||||
|
||||
int64_t Offset = 0;
|
||||
SDValue N0 = N.getOperand(0);
|
||||
if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(N0)) {
|
||||
AM.GV = G->getGlobal();
|
||||
AM.SymbolFlags = G->getTargetFlags();
|
||||
Offset = G->getOffset();
|
||||
} else if (ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(N0)) {
|
||||
AM.CP = CP->getConstVal();
|
||||
AM.Align = CP->getAlignment();
|
||||
AM.SymbolFlags = CP->getTargetFlags();
|
||||
Offset = CP->getOffset();
|
||||
} else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(N0)) {
|
||||
AM.ES = S->getSymbol();
|
||||
AM.SymbolFlags = S->getTargetFlags();
|
||||
} else if (auto *S = dyn_cast<MCSymbolSDNode>(N0)) {
|
||||
AM.MCSym = S->getMCSymbol();
|
||||
} else if (JumpTableSDNode *J = dyn_cast<JumpTableSDNode>(N0)) {
|
||||
AM.JT = J->getIndex();
|
||||
AM.SymbolFlags = J->getTargetFlags();
|
||||
} else if (BlockAddressSDNode *BA = dyn_cast<BlockAddressSDNode>(N0)) {
|
||||
AM.BlockAddr = BA->getBlockAddress();
|
||||
AM.SymbolFlags = BA->getTargetFlags();
|
||||
Offset = BA->getOffset();
|
||||
} else
|
||||
llvm_unreachable("Unhandled symbol reference node.");
|
||||
|
||||
if (foldOffsetIntoAddress(Offset, AM)) {
|
||||
AM = Backup;
|
||||
return true;
|
||||
}
|
||||
|
||||
// Handle the case when globals fit in our immediate field: This is true for
|
||||
// X86-32 always and X86-64 when in -mcmodel=small mode. In 64-bit
|
||||
// mode, this only applies to a non-RIP-relative computation.
|
||||
if (!Subtarget->is64Bit() ||
|
||||
M == CodeModel::Small || M == CodeModel::Kernel) {
|
||||
assert(N.getOpcode() != X86ISD::WrapperRIP &&
|
||||
"RIP-relative addressing already handled");
|
||||
if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(N0)) {
|
||||
AM.GV = G->getGlobal();
|
||||
AM.Disp += G->getOffset();
|
||||
AM.SymbolFlags = G->getTargetFlags();
|
||||
} else if (ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(N0)) {
|
||||
AM.CP = CP->getConstVal();
|
||||
AM.Align = CP->getAlignment();
|
||||
AM.Disp += CP->getOffset();
|
||||
AM.SymbolFlags = CP->getTargetFlags();
|
||||
} else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(N0)) {
|
||||
AM.ES = S->getSymbol();
|
||||
AM.SymbolFlags = S->getTargetFlags();
|
||||
} else if (auto *S = dyn_cast<MCSymbolSDNode>(N0)) {
|
||||
AM.MCSym = S->getMCSymbol();
|
||||
} else if (JumpTableSDNode *J = dyn_cast<JumpTableSDNode>(N0)) {
|
||||
AM.JT = J->getIndex();
|
||||
AM.SymbolFlags = J->getTargetFlags();
|
||||
} else if (BlockAddressSDNode *BA = dyn_cast<BlockAddressSDNode>(N0)) {
|
||||
AM.BlockAddr = BA->getBlockAddress();
|
||||
AM.Disp += BA->getOffset();
|
||||
AM.SymbolFlags = BA->getTargetFlags();
|
||||
} else
|
||||
llvm_unreachable("Unhandled symbol reference node.");
|
||||
return false;
|
||||
}
|
||||
if (IsRIPRel)
|
||||
AM.setBaseReg(CurDAG->getRegister(X86::RIP, MVT::i64));
|
||||
|
||||
return true;
|
||||
// Commit the changes now that we know this fold is safe.
|
||||
return false;
|
||||
}
|
||||
|
||||
/// Add the specified node to the specified addressing mode, returning true if
|
||||
|
|
Loading…
Reference in New Issue