Reimplement (part of) the or -> add optimization. Matching 'or' into 'add'

is general goodness because it allows ORs to be converted to LEA to avoid
inserting copies.  However, this is bad because it makes the generated .s
file less obvious and gives valgrind heartburn (tons of false positives in
bitfield code).

While the general fix should be in valgrind, we can at least try to avoid
emitting ADD instructions that *don't* get promoted to LEA.  This is more
work because it requires introducing pseudo instructions to represents
"add that knows the bits are disjoint", but hey, people really love valgrind.

This fixes this testcase:
https://bugs.kde.org/show_bug.cgi?id=242137#c20

the add r/i cases are coming next.

llvm-svn: 116007
This commit is contained in:
Chris Lattner 2010-10-07 23:36:18 +00:00
parent 0f1677e190
commit 4fb38d3cd3
4 changed files with 126 additions and 65 deletions

View File

@ -997,6 +997,63 @@ def def32 : PatLeaf<(i32 GR32:$src), [{
def : Pat<(i64 (zext def32:$src)),
(SUBREG_TO_REG (i64 0), GR32:$src, sub_32bit)>;
//===----------------------------------------------------------------------===//
// Pattern match OR as ADD
//===----------------------------------------------------------------------===//
// If safe, we prefer to pattern match OR as ADD at isel time. ADD can be
// 3-addressified into an LEA instruction to avoid copies. However, we also
// want to finally emit these instructions as an or at the end of the code
// generator to make the generated code easier to read. To do this, we select
// into "disjoint bits" pseudo ops.
// Treat an 'or' node is as an 'add' if the or'ed bits are known to be zero.
def or_is_add : PatFrag<(ops node:$lhs, node:$rhs), (or node:$lhs, node:$rhs),[{
if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N->getOperand(1)))
return CurDAG->MaskedValueIsZero(N->getOperand(0), CN->getAPIntValue());
unsigned BitWidth = N->getValueType(0).getScalarType().getSizeInBits();
APInt Mask = APInt::getAllOnesValue(BitWidth);
APInt KnownZero0, KnownOne0;
CurDAG->ComputeMaskedBits(N->getOperand(0), Mask, KnownZero0, KnownOne0, 0);
APInt KnownZero1, KnownOne1;
CurDAG->ComputeMaskedBits(N->getOperand(1), Mask, KnownZero1, KnownOne1, 0);
return (~KnownZero0 & ~KnownZero1) == 0;
}]>;
// (or x1, x2) -> (add x1, x2) if two operands are known not to share bits.
let AddedComplexity = 5 in { // Try this before the selecting to OR
let isCommutable = 1, isConvertibleToThreeAddress = 1,
Constraints = "$src1 = $dst" in {
def ADD16rr_DB : I<0, Pseudo, (outs GR16:$dst), (ins GR16:$src1, GR16:$src2),
"", // orw/addw REG, REG
[(set GR16:$dst, (or_is_add GR16:$src1, GR16:$src2))]>;
def ADD32rr_DB : I<0, Pseudo, (outs GR32:$dst), (ins GR32:$src1, GR32:$src2),
"", // orl/addl REG, REG
[(set GR32:$dst, (or_is_add GR32:$src1, GR32:$src2))]>;
def ADD64rr_DB : I<0, Pseudo, (outs GR64:$dst), (ins GR64:$src1, GR64:$src2),
"", // orq/addq REG, REG
[(set GR64:$dst, (or_is_add GR64:$src1, GR64:$src2))]>;
}
def : Pat<(or_is_add GR16:$src1, imm:$src2),
(ADD16ri GR16:$src1, imm:$src2)>;
def : Pat<(or_is_add GR32:$src1, imm:$src2),
(ADD32ri GR32:$src1, imm:$src2)>;
def : Pat<(or_is_add GR64:$src1, i64immSExt32:$src2),
(ADD64ri32 GR64:$src1, i64immSExt32:$src2)>;
def : Pat<(or_is_add GR16:$src1, i16immSExt8:$src2),
(ADD16ri8 GR16:$src1, i16immSExt8:$src2)>;
def : Pat<(or_is_add GR32:$src1, i32immSExt8:$src2),
(ADD32ri8 GR32:$src1, i32immSExt8:$src2)>;
def : Pat<(or_is_add GR64:$src1, i64immSExt8:$src2),
(ADD64ri8 GR64:$src1, i64immSExt8:$src2)>;
} // AddedComplexity
//===----------------------------------------------------------------------===//
// Some peepholes
//===----------------------------------------------------------------------===//
@ -1309,27 +1366,8 @@ def : Pat<(i32 (anyext (i8 (X86setcc_c X86_COND_B, EFLAGS)))),
def : Pat<(i32 (anyext (i16 (X86setcc_c X86_COND_B, EFLAGS)))),
(SETB_C32r)>;
// (or x1, x2) -> (add x1, x2) if two operands are known not to share bits.
let AddedComplexity = 5 in { // Try this before the selecting to OR
def : Pat<(or_is_add GR16:$src1, imm:$src2),
(ADD16ri GR16:$src1, imm:$src2)>;
def : Pat<(or_is_add GR32:$src1, imm:$src2),
(ADD32ri GR32:$src1, imm:$src2)>;
def : Pat<(or_is_add GR16:$src1, i16immSExt8:$src2),
(ADD16ri8 GR16:$src1, i16immSExt8:$src2)>;
def : Pat<(or_is_add GR32:$src1, i32immSExt8:$src2),
(ADD32ri8 GR32:$src1, i32immSExt8:$src2)>;
def : Pat<(or_is_add GR16:$src1, GR16:$src2),
(ADD16rr GR16:$src1, GR16:$src2)>;
def : Pat<(or_is_add GR32:$src1, GR32:$src2),
(ADD32rr GR32:$src1, GR32:$src2)>;
def : Pat<(or_is_add GR64:$src1, i64immSExt8:$src2),
(ADD64ri8 GR64:$src1, i64immSExt8:$src2)>;
def : Pat<(or_is_add GR64:$src1, i64immSExt32:$src2),
(ADD64ri32 GR64:$src1, i64immSExt32:$src2)>;
def : Pat<(or_is_add GR64:$src1, GR64:$src2),
(ADD64rr GR64:$src1, GR64:$src2)>;
} // AddedComplexity
//===----------------------------------------------------------------------===//
// EFLAGS-defining Patterns

View File

@ -55,6 +55,11 @@ ReMatPICStubLoad("remat-pic-stub-load",
X86InstrInfo::X86InstrInfo(X86TargetMachine &tm)
: TargetInstrInfoImpl(X86Insts, array_lengthof(X86Insts)),
TM(tm), RI(tm, *this) {
enum {
TB_NOT_REVERSABLE = 1U << 31,
TB_FLAGS = TB_NOT_REVERSABLE
};
static const unsigned OpTbl2Addr[][2] = {
{ X86::ADC32ri, X86::ADC32mi },
{ X86::ADC32ri8, X86::ADC32mi8 },
@ -65,12 +70,15 @@ X86InstrInfo::X86InstrInfo(X86TargetMachine &tm)
{ X86::ADD16ri, X86::ADD16mi },
{ X86::ADD16ri8, X86::ADD16mi8 },
{ X86::ADD16rr, X86::ADD16mr },
{ X86::ADD16rr_DB, X86::ADD16mr | TB_NOT_REVERSABLE },
{ X86::ADD32ri, X86::ADD32mi },
{ X86::ADD32ri8, X86::ADD32mi8 },
{ X86::ADD32rr, X86::ADD32mr },
{ X86::ADD32rr_DB, X86::ADD32mr | TB_NOT_REVERSABLE },
{ X86::ADD64ri32, X86::ADD64mi32 },
{ X86::ADD64ri8, X86::ADD64mi8 },
{ X86::ADD64rr, X86::ADD64mr },
{ X86::ADD64rr_DB, X86::ADD64mr | TB_NOT_REVERSABLE },
{ X86::ADD8ri, X86::ADD8mi },
{ X86::ADD8rr, X86::ADD8mr },
{ X86::AND16ri, X86::AND16mi },
@ -215,16 +223,21 @@ X86InstrInfo::X86InstrInfo(X86TargetMachine &tm)
for (unsigned i = 0, e = array_lengthof(OpTbl2Addr); i != e; ++i) {
unsigned RegOp = OpTbl2Addr[i][0];
unsigned MemOp = OpTbl2Addr[i][1];
if (!RegOp2MemOpTable2Addr.insert(std::make_pair(RegOp,
std::make_pair(MemOp,0))).second)
assert(false && "Duplicated entries?");
unsigned MemOp = OpTbl2Addr[i][1] & ~TB_FLAGS;
assert(!RegOp2MemOpTable2Addr.count(RegOp) && "Duplicated entries?");
RegOp2MemOpTable2Addr[RegOp] = std::make_pair(MemOp, 0U);
// If this is not a reversable operation (because there is a many->one)
// mapping, don't insert the reverse of the operation into MemOp2RegOpTable.
if (OpTbl2Addr[i][1] & TB_NOT_REVERSABLE)
continue;
// Index 0, folded load and store, no alignment requirement.
unsigned AuxInfo = 0 | (1 << 4) | (1 << 5);
if (!MemOp2RegOpTable.insert(std::make_pair(MemOp,
std::make_pair(RegOp,
AuxInfo))).second)
assert(false && "Duplicated entries in unfolding maps?");
assert(!MemOp2RegOpTable.count(MemOp) &&
"Duplicated entries in unfolding maps?");
MemOp2RegOpTable[MemOp] = std::make_pair(RegOp, AuxInfo);
}
// If the third value is 1, then it's folding either a load or a store.
@ -455,8 +468,11 @@ X86InstrInfo::X86InstrInfo(X86TargetMachine &tm)
{ X86::ADC32rr, X86::ADC32rm, 0 },
{ X86::ADC64rr, X86::ADC64rm, 0 },
{ X86::ADD16rr, X86::ADD16rm, 0 },
{ X86::ADD16rr_DB, X86::ADD16rm | TB_NOT_REVERSABLE, 0 },
{ X86::ADD32rr, X86::ADD32rm, 0 },
{ X86::ADD32rr_DB, X86::ADD32rm | TB_NOT_REVERSABLE, 0 },
{ X86::ADD64rr, X86::ADD64rm, 0 },
{ X86::ADD64rr_DB, X86::ADD64rm | TB_NOT_REVERSABLE, 0 },
{ X86::ADD8rr, X86::ADD8rm, 0 },
{ X86::ADDPDrr, X86::ADDPDrm, 16 },
{ X86::ADDPSrr, X86::ADDPSrm, 16 },
@ -651,16 +667,23 @@ X86InstrInfo::X86InstrInfo(X86TargetMachine &tm)
for (unsigned i = 0, e = array_lengthof(OpTbl2); i != e; ++i) {
unsigned RegOp = OpTbl2[i][0];
unsigned MemOp = OpTbl2[i][1];
unsigned MemOp = OpTbl2[i][1] & ~TB_FLAGS;
unsigned Align = OpTbl2[i][2];
if (!RegOp2MemOpTable2.insert(std::make_pair(RegOp,
std::make_pair(MemOp,Align))).second)
assert(false && "Duplicated entries?");
assert(!RegOp2MemOpTable2.count(RegOp) && "Duplicate entry!");
RegOp2MemOpTable2[RegOp] = std::make_pair(MemOp, Align);
// If this is not a reversable operation (because there is a many->one)
// mapping, don't insert the reverse of the operation into MemOp2RegOpTable.
if (OpTbl2[i][1] & TB_NOT_REVERSABLE)
continue;
// Index 2, folded load
unsigned AuxInfo = 2 | (1 << 4);
if (!MemOp2RegOpTable.insert(std::make_pair(MemOp,
std::make_pair(RegOp, AuxInfo))).second)
assert(false && "Duplicated entries in unfolding maps?");
assert(!MemOp2RegOpTable.count(MemOp) &&
"Duplicated entries in unfolding maps?");
MemOp2RegOpTable[MemOp] = std::make_pair(RegOp, AuxInfo);
}
}
@ -1135,7 +1158,8 @@ X86InstrInfo::convertToThreeAddressWithLEA(unsigned MIOpc,
case X86::ADD16ri8:
addRegOffset(MIB, leaInReg, true, MI->getOperand(2).getImm());
break;
case X86::ADD16rr: {
case X86::ADD16rr:
case X86::ADD16rr_DB: {
unsigned Src2 = MI->getOperand(2).getReg();
bool isKill2 = MI->getOperand(2).isKill();
unsigned leaInReg2 = 0;
@ -1348,18 +1372,27 @@ X86InstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI,
Src, isKill, -1);
break;
case X86::ADD64rr:
case X86::ADD32rr: {
case X86::ADD64rr_DB:
case X86::ADD32rr:
case X86::ADD32rr_DB: {
assert(MI->getNumOperands() >= 3 && "Unknown add instruction!");
unsigned Opc = MIOpc == X86::ADD64rr ? X86::LEA64r
: (is64Bit ? X86::LEA64_32r : X86::LEA32r);
unsigned Opc;
TargetRegisterClass *RC;
if (MIOpc == X86::ADD64rr || MIOpc == X86::ADD64rr_DB) {
Opc = X86::LEA64r;
RC = X86::GR64_NOSPRegisterClass;
} else {
Opc = is64Bit ? X86::LEA64_32r : X86::LEA32r;
RC = X86::GR32_NOSPRegisterClass;
}
unsigned Src2 = MI->getOperand(2).getReg();
bool isKill2 = MI->getOperand(2).isKill();
// LEA can't handle RSP.
if (TargetRegisterInfo::isVirtualRegister(Src2) &&
!MF.getRegInfo().constrainRegClass(Src2,
MIOpc == X86::ADD64rr ? X86::GR64_NOSPRegisterClass :
X86::GR32_NOSPRegisterClass))
!MF.getRegInfo().constrainRegClass(Src2, RC))
return 0;
NewMI = addRegReg(BuildMI(MF, MI->getDebugLoc(), get(Opc))
@ -1370,7 +1403,8 @@ X86InstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI,
LV->replaceKillInstruction(Src2, MI, NewMI);
break;
}
case X86::ADD16rr: {
case X86::ADD16rr:
case X86::ADD16rr_DB: {
if (DisableLEA16)
return is64Bit ? convertToThreeAddressWithLEA(MIOpc, MFI, MBBI, LV) : 0;
assert(MI->getNumOperands() >= 3 && "Unknown add instruction!");
@ -2598,13 +2632,8 @@ bool X86InstrInfo::canFoldMemoryOperand(const MachineInstr *MI,
OpcodeTablePtr = &RegOp2MemOpTable2;
}
if (OpcodeTablePtr) {
// Find the Opcode to fuse
DenseMap<unsigned, std::pair<unsigned,unsigned> >::const_iterator I =
OpcodeTablePtr->find(Opc);
if (I != OpcodeTablePtr->end())
return true;
}
if (OpcodeTablePtr && OpcodeTablePtr->count(Opc))
return true;
return TargetInstrInfoImpl::canFoldMemoryOperand(MI, Ops);
}

View File

@ -544,20 +544,6 @@ def trunc_su : PatFrag<(ops node:$src), (trunc node:$src), [{
return N->hasOneUse();
}]>;
// Treat an 'or' node is as an 'add' if the or'ed bits are known to be zero.
def or_is_add : PatFrag<(ops node:$lhs, node:$rhs), (or node:$lhs, node:$rhs),[{
if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N->getOperand(1)))
return CurDAG->MaskedValueIsZero(N->getOperand(0), CN->getAPIntValue());
unsigned BitWidth = N->getValueType(0).getScalarType().getSizeInBits();
APInt Mask = APInt::getAllOnesValue(BitWidth);
APInt KnownZero0, KnownOne0;
CurDAG->ComputeMaskedBits(N->getOperand(0), Mask, KnownZero0, KnownOne0, 0);
APInt KnownZero1, KnownOne1;
CurDAG->ComputeMaskedBits(N->getOperand(1), Mask, KnownZero1, KnownOne1, 0);
return (~KnownZero0 & ~KnownZero1) == 0;
}]>;
//===----------------------------------------------------------------------===//
// Instruction list.
//

View File

@ -347,6 +347,7 @@ void X86MCInstLower::Lower(const MachineInstr *MI, MCInst &OutMI) const {
}
// Handle a few special cases to eliminate operand modifiers.
ReSimplify:
switch (OutMI.getOpcode()) {
case X86::LEA64_32r: // Handle 'subreg rewriting' for the lea64_32mem operand.
lower_lea64_32mem(&OutMI, 1);
@ -433,6 +434,13 @@ void X86MCInstLower::Lower(const MachineInstr *MI, MCInst &OutMI) const {
break;
}
// These are pseudo-ops for OR to help with the OR->ADD transformation. We do
// this with an ugly goto in case the resultant OR uses EAX and needs the
// short form.
case X86::ADD16rr_DB: OutMI.setOpcode(X86::OR16rr); goto ReSimplify;
case X86::ADD32rr_DB: OutMI.setOpcode(X86::OR32rr); goto ReSimplify;
case X86::ADD64rr_DB: OutMI.setOpcode(X86::OR64rr); goto ReSimplify;
// The assembler backend wants to see branches in their small form and relax
// them to their large form. The JIT can only handle the large form because
// it does not do relaxation. For now, translate the large form to the