forked from OSchip/llvm-project
[ARM] Remove redundant computeKnownBits helper.
Move the BFI logic to computeKnownBitsForTargetNode, and delete the redundant CMOV logic. This is intended as a cleanup, but it's probably possible to construct a case where moving the BFI logic allows more combines. Differential Revision: https://reviews.llvm.org/D31795 llvm-svn: 300752
This commit is contained in:
parent
75ad9ccbfa
commit
70ad2751d5
|
@ -11694,34 +11694,6 @@ static SDValue PerformExtendCombine(SDNode *N, SelectionDAG &DAG,
|
|||
return SDValue();
|
||||
}
|
||||
|
||||
static void computeKnownBits(SelectionDAG &DAG, SDValue Op, APInt &KnownZero,
|
||||
APInt &KnownOne) {
|
||||
if (Op.getOpcode() == ARMISD::BFI) {
|
||||
// Conservatively, we can recurse down the first operand
|
||||
// and just mask out all affected bits.
|
||||
computeKnownBits(DAG, Op.getOperand(0), KnownZero, KnownOne);
|
||||
|
||||
// The operand to BFI is already a mask suitable for removing the bits it
|
||||
// sets.
|
||||
ConstantSDNode *CI = cast<ConstantSDNode>(Op.getOperand(2));
|
||||
const APInt &Mask = CI->getAPIntValue();
|
||||
KnownZero &= Mask;
|
||||
KnownOne &= Mask;
|
||||
return;
|
||||
}
|
||||
if (Op.getOpcode() == ARMISD::CMOV) {
|
||||
APInt KZ2(KnownZero.getBitWidth(), 0);
|
||||
APInt KO2(KnownOne.getBitWidth(), 0);
|
||||
computeKnownBits(DAG, Op.getOperand(0), KnownZero, KnownOne);
|
||||
computeKnownBits(DAG, Op.getOperand(1), KZ2, KO2);
|
||||
|
||||
KnownZero &= KZ2;
|
||||
KnownOne &= KO2;
|
||||
return;
|
||||
}
|
||||
return DAG.computeKnownBits(Op, KnownZero, KnownOne);
|
||||
}
|
||||
|
||||
SDValue ARMTargetLowering::PerformCMOVToBFICombine(SDNode *CMOV, SelectionDAG &DAG) const {
|
||||
// If we have a CMOV, OR and AND combination such as:
|
||||
// if (x & CN)
|
||||
|
@ -11783,7 +11755,7 @@ SDValue ARMTargetLowering::PerformCMOVToBFICombine(SDNode *CMOV, SelectionDAG &D
|
|||
// Lastly, can we determine that the bits defined by OrCI
|
||||
// are zero in Y?
|
||||
APInt KnownZero, KnownOne;
|
||||
computeKnownBits(DAG, Y, KnownZero, KnownOne);
|
||||
DAG.computeKnownBits(Y, KnownZero, KnownOne);
|
||||
if ((OrCI & KnownZero) != OrCI)
|
||||
return SDValue();
|
||||
|
||||
|
@ -12663,6 +12635,19 @@ void ARMTargetLowering::computeKnownBitsForTargetNode(const SDValue Op,
|
|||
}
|
||||
}
|
||||
}
|
||||
case ARMISD::BFI: {
|
||||
// Conservatively, we can recurse down the first operand
|
||||
// and just mask out all affected bits.
|
||||
DAG.computeKnownBits(Op.getOperand(0), KnownZero, KnownOne, Depth + 1);
|
||||
|
||||
// The operand to BFI is already a mask suitable for removing the bits it
|
||||
// sets.
|
||||
ConstantSDNode *CI = cast<ConstantSDNode>(Op.getOperand(2));
|
||||
const APInt &Mask = CI->getAPIntValue();
|
||||
KnownZero &= Mask;
|
||||
KnownOne &= Mask;
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in New Issue