forked from OSchip/llvm-project
[X86] Don't exit from foldOffsetIntoAddress if the Offset is 0, but AM.Disp is non-zero.
This is an alternate fix for the issue D73606 was trying to solve. The main issue here is that we bailed out of foldOffsetIntoAddress if Offset is 0. But if we just found a symbolic displacement and AM.Disp became non-zero earlier, we still need to validate that AM.Disp with the symbolic displacement. This passes fold-add-pcrel.ll. Differential Revision: https://reviews.llvm.org/D73608
This commit is contained in:
parent
3e24242a7d
commit
1ef8e8b414
|
@ -1409,15 +1409,17 @@ static bool isDispSafeForFrameIndex(int64_t Val) {
|
|||
|
||||
bool X86DAGToDAGISel::foldOffsetIntoAddress(uint64_t Offset,
|
||||
X86ISelAddressMode &AM) {
|
||||
// If there's no offset to fold, we don't need to do any work.
|
||||
if (Offset == 0)
|
||||
// If the final displacement is 0, we don't need to do any work. We may have
|
||||
// already matched a displacement and the caller just added the symbolic
|
||||
// displacement with an offset of 0. So recheck everything if Val is non-zero.
|
||||
int64_t Val = AM.Disp + Offset;
|
||||
if (Val == 0)
|
||||
return false;
|
||||
|
||||
// Cannot combine ExternalSymbol displacements with integer offsets.
|
||||
if (AM.ES || AM.MCSym)
|
||||
return true;
|
||||
|
||||
int64_t Val = AM.Disp + Offset;
|
||||
CodeModel::Model M = TM.getCodeModel();
|
||||
if (Subtarget->is64Bit()) {
|
||||
if (!X86::isOffsetSuitableForCodeModel(Val, M,
|
||||
|
@ -1581,24 +1583,13 @@ bool X86DAGToDAGISel::matchAdd(SDValue &N, X86ISelAddressMode &AM,
|
|||
if (!matchAddressRecursively(N.getOperand(0), AM, Depth+1) &&
|
||||
!matchAddressRecursively(Handle.getValue().getOperand(1), AM, Depth+1))
|
||||
return false;
|
||||
|
||||
// Don't try commuting operands if the address is in the form of
|
||||
// sym+disp(%rip). foldOffsetIntoAddress() currently does not know there is a
|
||||
// symbolic displacement and would fold disp. If disp is just a bit smaller
|
||||
// than 2**31, it can easily cause a relocation overflow.
|
||||
bool NoCommutate = false;
|
||||
if (AM.isRIPRelative() && AM.hasSymbolicDisplacement())
|
||||
if (ConstantSDNode *Cst =
|
||||
dyn_cast<ConstantSDNode>(Handle.getValue().getOperand(1)))
|
||||
NoCommutate = Cst->getSExtValue() != 0;
|
||||
|
||||
AM = Backup;
|
||||
if (!NoCommutate) {
|
||||
// Try again after commutating the operands.
|
||||
if (!matchAddressRecursively(Handle.getValue().getOperand(1), AM, Depth + 1) &&
|
||||
!matchAddressRecursively(Handle.getValue().getOperand(0), AM, Depth + 1))
|
||||
return false;
|
||||
}
|
||||
|
||||
// Try again after commutating the operands.
|
||||
if (!matchAddressRecursively(Handle.getValue().getOperand(1), AM,
|
||||
Depth + 1) &&
|
||||
!matchAddressRecursively(Handle.getValue().getOperand(0), AM, Depth + 1))
|
||||
return false;
|
||||
AM = Backup;
|
||||
|
||||
// If we couldn't fold both operands into the address at the same time,
|
||||
|
|
Loading…
Reference in New Issue