[CodeGen] Prefer "if (SDValue R = ...)" to "if (R.getNode())". NFCI.

llvm-svn: 260316
This commit is contained in:
Ahmed Bougacha 2016-02-09 22:54:12 +00:00
parent 244cd98474
commit f8dfb47c02
14 changed files with 98 additions and 167 deletions

View File

@ -3293,9 +3293,8 @@ SDValue DAGCombiner::visitAND(SDNode *N) {
}
// fold (and (or (srl N, 8), (shl N, 8)), 0xffff) -> (srl (bswap N), const)
if (N1C && N1C->getAPIntValue() == 0xffff && N0.getOpcode() == ISD::OR) {
SDValue BSwap = MatchBSwapHWordLow(N0.getNode(), N0.getOperand(0),
N0.getOperand(1), false);
if (BSwap.getNode())
if (SDValue BSwap = MatchBSwapHWordLow(N0.getNode(), N0.getOperand(0),
N0.getOperand(1), false))
return BSwap;
}
@ -4332,8 +4331,8 @@ SDValue DAGCombiner::visitRotate(SDNode *N) {
// fold (rot* x, (trunc (and y, c))) -> (rot* x, (and (trunc y), (trunc c))).
if (N->getOperand(1).getOpcode() == ISD::TRUNCATE &&
N->getOperand(1).getOperand(0).getOpcode() == ISD::AND) {
SDValue NewOp1 = distributeTruncateThroughAnd(N->getOperand(1).getNode());
if (NewOp1.getNode())
if (SDValue NewOp1 =
distributeTruncateThroughAnd(N->getOperand(1).getNode()))
return DAG.getNode(N->getOpcode(), SDLoc(N), N->getValueType(0),
N->getOperand(0), NewOp1);
}
@ -4397,8 +4396,7 @@ SDValue DAGCombiner::visitSHL(SDNode *N) {
// fold (shl x, (trunc (and y, c))) -> (shl x, (and (trunc y), (trunc c))).
if (N1.getOpcode() == ISD::TRUNCATE &&
N1.getOperand(0).getOpcode() == ISD::AND) {
SDValue NewOp1 = distributeTruncateThroughAnd(N1.getNode());
if (NewOp1.getNode())
if (SDValue NewOp1 = distributeTruncateThroughAnd(N1.getNode()))
return DAG.getNode(ISD::SHL, SDLoc(N), VT, N0, NewOp1);
}
@ -4654,8 +4652,7 @@ SDValue DAGCombiner::visitSRA(SDNode *N) {
// fold (sra x, (trunc (and y, c))) -> (sra x, (and (trunc y), (trunc c))).
if (N1.getOpcode() == ISD::TRUNCATE &&
N1.getOperand(0).getOpcode() == ISD::AND) {
SDValue NewOp1 = distributeTruncateThroughAnd(N1.getNode());
if (NewOp1.getNode())
if (SDValue NewOp1 = distributeTruncateThroughAnd(N1.getNode()))
return DAG.getNode(ISD::SRA, SDLoc(N), VT, N0, NewOp1);
}
@ -5656,9 +5653,8 @@ SDValue DAGCombiner::visitSELECT_CC(SDNode *N) {
return N2;
// Determine if the condition we're dealing with is constant
SDValue SCC = SimplifySetCC(getSetCCResultType(N0.getValueType()),
N0, N1, CC, SDLoc(N), false);
if (SCC.getNode()) {
if (SDValue SCC = SimplifySetCC(getSetCCResultType(N0.getValueType()), N0, N1,
CC, SDLoc(N), false)) {
AddToWorklist(SCC.getNode());
if (ConstantSDNode *SCCC = dyn_cast<ConstantSDNode>(SCC.getNode())) {
@ -6140,11 +6136,11 @@ SDValue DAGCombiner::visitSIGN_EXTEND(SDNode *N) {
SDLoc DL(N);
SDValue NegOne =
DAG.getConstant(APInt::getAllOnesValue(ElementWidth), DL, VT);
SDValue SCC =
SimplifySelectCC(DL, N0.getOperand(0), N0.getOperand(1),
NegOne, DAG.getConstant(0, DL, VT),
cast<CondCodeSDNode>(N0.getOperand(2))->get(), true);
if (SCC.getNode()) return SCC;
if (SDValue SCC = SimplifySelectCC(
DL, N0.getOperand(0), N0.getOperand(1), NegOne,
DAG.getConstant(0, DL, VT),
cast<CondCodeSDNode>(N0.getOperand(2))->get(), true))
return SCC;
if (!VT.isVector()) {
EVT SetCCVT = getSetCCResultType(N0.getOperand(0).getValueType());
@ -6471,11 +6467,11 @@ SDValue DAGCombiner::visitZERO_EXTEND(SDNode *N) {
// zext(setcc x,y,cc) -> select_cc x, y, 1, 0, cc
SDLoc DL(N);
SDValue SCC =
SimplifySelectCC(DL, N0.getOperand(0), N0.getOperand(1),
DAG.getConstant(1, DL, VT), DAG.getConstant(0, DL, VT),
cast<CondCodeSDNode>(N0.getOperand(2))->get(), true);
if (SCC.getNode()) return SCC;
if (SDValue SCC = SimplifySelectCC(
DL, N0.getOperand(0), N0.getOperand(1), DAG.getConstant(1, DL, VT),
DAG.getConstant(0, DL, VT),
cast<CondCodeSDNode>(N0.getOperand(2))->get(), true))
return SCC;
}
// (zext (shl (zext x), cst)) -> (shl (zext x), cst)
@ -6650,11 +6646,10 @@ SDValue DAGCombiner::visitANY_EXTEND(SDNode *N) {
// aext(setcc x,y,cc) -> select_cc x, y, 1, 0, cc
SDLoc DL(N);
SDValue SCC =
SimplifySelectCC(DL, N0.getOperand(0), N0.getOperand(1),
DAG.getConstant(1, DL, VT), DAG.getConstant(0, DL, VT),
cast<CondCodeSDNode>(N0.getOperand(2))->get(), true);
if (SCC.getNode())
if (SDValue SCC = SimplifySelectCC(
DL, N0.getOperand(0), N0.getOperand(1), DAG.getConstant(1, DL, VT),
DAG.getConstant(0, DL, VT),
cast<CondCodeSDNode>(N0.getOperand(2))->get(), true))
return SCC;
}
@ -6978,9 +6973,8 @@ SDValue DAGCombiner::visitSIGN_EXTEND_INREG(SDNode *N) {
// Form (sext_inreg (bswap >> 16)) or (sext_inreg (rotl (bswap) 16))
if (EVTBits <= 16 && N0.getOpcode() == ISD::OR) {
SDValue BSwap = MatchBSwapHWordLow(N0.getNode(), N0.getOperand(0),
N0.getOperand(1), false);
if (BSwap.getNode())
if (SDValue BSwap = MatchBSwapHWordLow(N0.getNode(), N0.getOperand(0),
N0.getOperand(1), false))
return DAG.getNode(ISD::SIGN_EXTEND_INREG, SDLoc(N), VT,
BSwap, N1);
}
@ -7117,10 +7111,9 @@ SDValue DAGCombiner::visitTRUNCATE(SDNode *N) {
// Currently we only perform this optimization on scalars because vectors
// may have different active low bits.
if (!VT.isVector()) {
SDValue Shorter =
GetDemandedBits(N0, APInt::getLowBitsSet(N0.getValueSizeInBits(),
VT.getSizeInBits()));
if (Shorter.getNode())
if (SDValue Shorter =
GetDemandedBits(N0, APInt::getLowBitsSet(N0.getValueSizeInBits(),
VT.getSizeInBits())))
return DAG.getNode(ISD::TRUNCATE, SDLoc(N), VT, Shorter);
}
// fold (truncate (load x)) -> (smaller load x)
@ -13318,9 +13311,7 @@ SDValue DAGCombiner::visitVECTOR_SHUFFLE(SDNode *N) {
(N1.getOpcode() == ISD::UNDEF ||
(N1.getOpcode() == ISD::CONCAT_VECTORS &&
N0.getOperand(0).getValueType() == N1.getOperand(0).getValueType()))) {
SDValue V = partitionShuffleOfConcats(N, DAG);
if (V.getNode())
if (SDValue V = partitionShuffleOfConcats(N, DAG))
return V;
}

View File

@ -922,8 +922,7 @@ void SelectionDAGLegalize::LegalizeLoadOps(SDNode *Node) {
break;
}
case TargetLowering::Custom: {
SDValue Res = TLI.LowerOperation(RVal, DAG);
if (Res.getNode()) {
if (SDValue Res = TLI.LowerOperation(RVal, DAG)) {
RVal = Res;
RChain = Res.getValue(1);
}
@ -1099,8 +1098,7 @@ void SelectionDAGLegalize::LegalizeLoadOps(SDNode *Node) {
Chain = SDValue(Node, 1);
if (isCustom) {
SDValue Res = TLI.LowerOperation(SDValue(Node, 0), DAG);
if (Res.getNode()) {
if (SDValue Res = TLI.LowerOperation(SDValue(Node, 0), DAG)) {
Value = Res;
Chain = Res.getValue(1);
}
@ -1399,8 +1397,7 @@ void SelectionDAGLegalize::LegalizeOp(SDNode *Node) {
case TargetLowering::Custom: {
// FIXME: The handling for custom lowering with multiple results is
// a complete mess.
SDValue Res = TLI.LowerOperation(SDValue(Node, 0), DAG);
if (Res.getNode()) {
if (SDValue Res = TLI.LowerOperation(SDValue(Node, 0), DAG)) {
if (!(Res.getNode() != Node || Res.getResNo() != 0))
return;

View File

@ -358,8 +358,7 @@ SDValue VectorLegalizer::LegalizeOp(SDValue Op) {
case TargetLowering::Legal:
break;
case TargetLowering::Custom: {
SDValue Tmp1 = TLI.LowerOperation(Op, DAG);
if (Tmp1.getNode()) {
if (SDValue Tmp1 = TLI.LowerOperation(Op, DAG)) {
Result = Tmp1;
break;
}

View File

@ -993,10 +993,8 @@ SDValue SelectionDAGBuilder::getValue(const Value *V) {
// If there's a virtual register allocated and initialized for this
// value, use it.
SDValue copyFromReg = getCopyFromRegs(V, V->getType());
if (copyFromReg.getNode()) {
if (SDValue copyFromReg = getCopyFromRegs(V, V->getType()))
return copyFromReg;
}
// Otherwise create a new SDValue and remember it.
SDValue Val = getValueImpl(V);
@ -7333,8 +7331,7 @@ TargetLowering::LowerCallTo(TargetLowering::CallLoweringInfo &CLI) const {
void TargetLowering::LowerOperationWrapper(SDNode *N,
SmallVectorImpl<SDValue> &Results,
SelectionDAG &DAG) const {
SDValue Res = LowerOperation(SDValue(N, 0), DAG);
if (Res.getNode())
if (SDValue Res = LowerOperation(SDValue(N, 0), DAG))
Results.push_back(Res);
}

View File

@ -5679,8 +5679,7 @@ SDValue AArch64TargetLowering::LowerVECTOR_SHUFFLE(SDValue Op,
return DAG.getNode(Opc, dl, V1.getValueType(), V1, V1);
}
SDValue Concat = tryFormConcatFromShuffle(Op, DAG);
if (Concat.getNode())
if (SDValue Concat = tryFormConcatFromShuffle(Op, DAG))
return Concat;
bool DstIsLeft;
@ -5952,8 +5951,7 @@ SDValue AArch64TargetLowering::LowerVectorOR(SDValue Op,
SelectionDAG &DAG) const {
// Attempt to form a vector S[LR]I from (or (and X, C1), (lsl Y, C2))
if (EnableAArch64SlrGeneration) {
SDValue Res = tryLowerToSLI(Op.getNode(), DAG);
if (Res.getNode())
if (SDValue Res = tryLowerToSLI(Op.getNode(), DAG))
return Res;
}
@ -7908,12 +7906,10 @@ static SDValue performORCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI,
if (!DAG.getTargetLoweringInfo().isTypeLegal(VT))
return SDValue();
SDValue Res = tryCombineToEXTR(N, DCI);
if (Res.getNode())
if (SDValue Res = tryCombineToEXTR(N, DCI))
return Res;
Res = tryCombineToBSL(N, DCI);
if (Res.getNode())
if (SDValue Res = tryCombineToBSL(N, DCI))
return Res;
return SDValue();
@ -8873,8 +8869,7 @@ static SDValue performSTORECombine(SDNode *N,
TargetLowering::DAGCombinerInfo &DCI,
SelectionDAG &DAG,
const AArch64Subtarget *Subtarget) {
SDValue Split = split16BStores(N, DCI, DAG, Subtarget);
if (Split.getNode())
if (SDValue Split = split16BStores(N, DCI, DAG, Subtarget))
return Split;
if (Subtarget->supportsAddressTopByteIgnored() &&
@ -9540,8 +9535,7 @@ SDValue performCONDCombine(SDNode *N,
static SDValue performBRCONDCombine(SDNode *N,
TargetLowering::DAGCombinerInfo &DCI,
SelectionDAG &DAG) {
SDValue NV = performCONDCombine(N, DCI, DAG, 2, 3);
if (NV.getNode())
if (SDValue NV = performCONDCombine(N, DCI, DAG, 2, 3))
N = NV.getNode();
SDValue Chain = N->getOperand(0);
SDValue Dest = N->getOperand(1);

View File

@ -684,8 +684,7 @@ void AMDGPUTargetLowering::ReplaceNodeResults(SDNode *N,
return;
}
case ISD::STORE: {
SDValue Lowered = LowerSTORE(SDValue(N, 0), DAG);
if (Lowered.getNode())
if (SDValue Lowered = LowerSTORE(SDValue(N, 0), DAG))
Results.push_back(Lowered);
return;
}
@ -1386,10 +1385,8 @@ SDValue AMDGPUTargetLowering::LowerLOAD(SDValue Op, SelectionDAG &DAG) const {
SDValue AMDGPUTargetLowering::LowerSTORE(SDValue Op, SelectionDAG &DAG) const {
SDLoc DL(Op);
SDValue Result = AMDGPUTargetLowering::MergeVectorStore(Op, DAG);
if (Result.getNode()) {
if (SDValue Result = AMDGPUTargetLowering::MergeVectorStore(Op, DAG))
return Result;
}
StoreSDNode *Store = cast<StoreSDNode>(Op);
SDValue Chain = Store->getChain();

View File

@ -1271,10 +1271,8 @@ SDValue R600TargetLowering::LowerSTORE(SDValue Op, SelectionDAG &DAG) const {
SDValue Value = Op.getOperand(1);
SDValue Ptr = Op.getOperand(2);
SDValue Result = AMDGPUTargetLowering::LowerSTORE(Op, DAG);
if (Result.getNode()) {
if (SDValue Result = AMDGPUTargetLowering::LowerSTORE(Op, DAG))
return Result;
}
if (StoreNode->getAddressSpace() == AMDGPUAS::GLOBAL_ADDRESS) {
if (StoreNode->isTruncatingStore()) {
@ -1328,16 +1326,13 @@ SDValue R600TargetLowering::LowerSTORE(SDValue Op, SelectionDAG &DAG) const {
EVT ValueVT = Value.getValueType();
if (StoreNode->getAddressSpace() != AMDGPUAS::PRIVATE_ADDRESS) {
if (StoreNode->getAddressSpace() != AMDGPUAS::PRIVATE_ADDRESS)
return SDValue();
}
SDValue Ret = AMDGPUTargetLowering::LowerSTORE(Op, DAG);
if (Ret.getNode()) {
if (SDValue Ret = AMDGPUTargetLowering::LowerSTORE(Op, DAG))
return Ret;
}
// Lowering for indirect addressing
// Lowering for indirect addressing
const MachineFunction &MF = DAG.getMachineFunction();
const AMDGPUFrameLowering *TFL =
static_cast<const AMDGPUFrameLowering *>(Subtarget->getFrameLowering());
@ -1906,8 +1901,7 @@ SDValue R600TargetLowering::PerformDAGCombine(SDNode *N,
case ISD::SELECT_CC: {
// Try common optimizations
SDValue Ret = AMDGPUTargetLowering::PerformDAGCombine(N, DCI);
if (Ret.getNode())
if (SDValue Ret = AMDGPUTargetLowering::PerformDAGCombine(N, DCI))
return Ret;
// fold selectcc (selectcc x, y, a, b, cc), b, a, b, seteq ->

View File

@ -1703,8 +1703,7 @@ SDValue SITargetLowering::LowerFastFDIV(SDValue Op, SelectionDAG &DAG) const {
}
SDValue SITargetLowering::LowerFDIV32(SDValue Op, SelectionDAG &DAG) const {
SDValue FastLowered = LowerFastFDIV(Op, DAG);
if (FastLowered.getNode())
if (SDValue FastLowered = LowerFastFDIV(Op, DAG))
return FastLowered;
// This uses v_rcp_f32 which does not handle denormals. Let this hit a
@ -1835,8 +1834,7 @@ SDValue SITargetLowering::LowerSTORE(SDValue Op, SelectionDAG &DAG) const {
return SDValue();
}
SDValue Ret = AMDGPUTargetLowering::LowerSTORE(Op, DAG);
if (Ret.getNode())
if (SDValue Ret = AMDGPUTargetLowering::LowerSTORE(Op, DAG))
return Ret;
if (VT.isVector() && VT.getVectorNumElements() >= 8)

View File

@ -3963,8 +3963,7 @@ SDValue ARMTargetLowering::LowerBR_CC(SDValue Op, SelectionDAG &DAG) const {
if (getTargetMachine().Options.UnsafeFPMath &&
(CC == ISD::SETEQ || CC == ISD::SETOEQ ||
CC == ISD::SETNE || CC == ISD::SETUNE)) {
SDValue Result = OptimizeVFPBrcond(Op, DAG);
if (Result.getNode())
if (SDValue Result = OptimizeVFPBrcond(Op, DAG))
return Result;
}
@ -6230,11 +6229,9 @@ static SDValue LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) {
if ((VT == MVT::v8i16 || VT == MVT::v16i8) && isReverseMask(ShuffleMask, VT))
return LowerReverse_VECTOR_SHUFFLEv16i8_v8i16(Op, DAG);
if (VT == MVT::v8i8) {
SDValue NewOp = LowerVECTOR_SHUFFLEv8i8(Op, ShuffleMask, DAG);
if (NewOp.getNode())
if (VT == MVT::v8i8)
if (SDValue NewOp = LowerVECTOR_SHUFFLEv8i8(Op, ShuffleMask, DAG))
return NewOp;
}
return SDValue();
}
@ -8497,16 +8494,12 @@ SDValue combineSelectAndUseCommutative(SDNode *N, bool AllOnes,
TargetLowering::DAGCombinerInfo &DCI) {
SDValue N0 = N->getOperand(0);
SDValue N1 = N->getOperand(1);
if (N0.getNode()->hasOneUse()) {
SDValue Result = combineSelectAndUse(N, N0, N1, DCI, AllOnes);
if (Result.getNode())
if (N0.getNode()->hasOneUse())
if (SDValue Result = combineSelectAndUse(N, N0, N1, DCI, AllOnes))
return Result;
}
if (N1.getNode()->hasOneUse()) {
SDValue Result = combineSelectAndUse(N, N1, N0, DCI, AllOnes);
if (Result.getNode())
if (N1.getNode()->hasOneUse())
if (SDValue Result = combineSelectAndUse(N, N1, N0, DCI, AllOnes))
return Result;
}
return SDValue();
}
@ -8765,15 +8758,13 @@ static SDValue PerformADDCombineWithOperands(SDNode *N, SDValue N0, SDValue N1,
const ARMSubtarget *Subtarget){
// Attempt to create vpaddl for this add.
SDValue Result = AddCombineToVPADDL(N, N0, N1, DCI, Subtarget);
if (Result.getNode())
if (SDValue Result = AddCombineToVPADDL(N, N0, N1, DCI, Subtarget))
return Result;
// fold (add (select cc, 0, c), x) -> (select cc, x, (add, x, c))
if (N0.getNode()->hasOneUse()) {
SDValue Result = combineSelectAndUse(N, N0, N1, DCI);
if (Result.getNode()) return Result;
}
if (N0.getNode()->hasOneUse())
if (SDValue Result = combineSelectAndUse(N, N0, N1, DCI))
return Result;
return SDValue();
}
@ -8786,8 +8777,7 @@ static SDValue PerformADDCombine(SDNode *N,
SDValue N1 = N->getOperand(1);
// First try with the default operand order.
SDValue Result = PerformADDCombineWithOperands(N, N0, N1, DCI, Subtarget);
if (Result.getNode())
if (SDValue Result = PerformADDCombineWithOperands(N, N0, N1, DCI, Subtarget))
return Result;
// If that didn't work, try again with the operands commuted.
@ -8802,10 +8792,9 @@ static SDValue PerformSUBCombine(SDNode *N,
SDValue N1 = N->getOperand(1);
// fold (sub x, (select cc, 0, c)) -> (select cc, x, (sub, x, c))
if (N1.getNode()->hasOneUse()) {
SDValue Result = combineSelectAndUse(N, N1, N0, DCI);
if (Result.getNode()) return Result;
}
if (N1.getNode()->hasOneUse())
if (SDValue Result = combineSelectAndUse(N, N1, N0, DCI))
return Result;
return SDValue();
}
@ -8975,8 +8964,7 @@ static SDValue PerformANDCombine(SDNode *N,
if (!Subtarget->isThumb1Only()) {
// fold (and (select cc, -1, c), x) -> (select cc, x, (and, x, c))
SDValue Result = combineSelectAndUseCommutative(N, true, DCI);
if (Result.getNode())
if (SDValue Result = combineSelectAndUseCommutative(N, true, DCI))
return Result;
}
@ -9018,8 +9006,7 @@ static SDValue PerformORCombine(SDNode *N,
if (!Subtarget->isThumb1Only()) {
// fold (or (select cc, 0, c), x) -> (select cc, x, (or, x, c))
SDValue Result = combineSelectAndUseCommutative(N, false, DCI);
if (Result.getNode())
if (SDValue Result = combineSelectAndUseCommutative(N, false, DCI))
return Result;
}
@ -9192,8 +9179,7 @@ static SDValue PerformXORCombine(SDNode *N,
if (!Subtarget->isThumb1Only()) {
// fold (xor (select cc, 0, c), x) -> (select cc, x, (xor, x, c))
SDValue Result = combineSelectAndUseCommutative(N, false, DCI);
if (Result.getNode())
if (SDValue Result = combineSelectAndUseCommutative(N, false, DCI))
return Result;
}
@ -9419,11 +9405,9 @@ static SDValue PerformBUILD_VECTORCombine(SDNode *N,
// into a pair of GPRs, which is fine when the value is used as a scalar,
// but if the i64 value is converted to a vector, we need to undo the VMOVRRD.
SelectionDAG &DAG = DCI.DAG;
if (N->getNumOperands() == 2) {
SDValue RV = PerformVMOVDRRCombine(N, DAG);
if (RV.getNode())
if (N->getNumOperands() == 2)
if (SDValue RV = PerformVMOVDRRCombine(N, DAG))
return RV;
}
// Load i64 elements as f64 values so that type legalization does not split
// them up into i32 values.

View File

@ -2883,26 +2883,20 @@ SDValue MipsSETargetLowering::lowerVECTOR_SHUFFLE(SDValue Op,
// MipsISD::VSHF.
if (isVECTOR_SHUFFLE_SPLATI(Op, ResTy, Indices, DAG))
return lowerVECTOR_SHUFFLE_VSHF(Op, ResTy, Indices, DAG);
SDValue Result = lowerVECTOR_SHUFFLE_ILVEV(Op, ResTy, Indices, DAG);
if (Result.getNode())
SDValue Result;
if ((Result = lowerVECTOR_SHUFFLE_ILVEV(Op, ResTy, Indices, DAG)))
return Result;
Result = lowerVECTOR_SHUFFLE_ILVOD(Op, ResTy, Indices, DAG);
if (Result.getNode())
if ((Result = lowerVECTOR_SHUFFLE_ILVOD(Op, ResTy, Indices, DAG)))
return Result;
Result = lowerVECTOR_SHUFFLE_ILVL(Op, ResTy, Indices, DAG);
if (Result.getNode())
if ((Result = lowerVECTOR_SHUFFLE_ILVL(Op, ResTy, Indices, DAG)))
return Result;
Result = lowerVECTOR_SHUFFLE_ILVR(Op, ResTy, Indices, DAG);
if (Result.getNode())
if ((Result = lowerVECTOR_SHUFFLE_ILVR(Op, ResTy, Indices, DAG)))
return Result;
Result = lowerVECTOR_SHUFFLE_PCKEV(Op, ResTy, Indices, DAG);
if (Result.getNode())
if ((Result = lowerVECTOR_SHUFFLE_PCKEV(Op, ResTy, Indices, DAG)))
return Result;
Result = lowerVECTOR_SHUFFLE_PCKOD(Op, ResTy, Indices, DAG);
if (Result.getNode())
if ((Result = lowerVECTOR_SHUFFLE_PCKOD(Op, ResTy, Indices, DAG)))
return Result;
Result = lowerVECTOR_SHUFFLE_SHF(Op, ResTy, Indices, DAG);
if (Result.getNode())
if ((Result = lowerVECTOR_SHUFFLE_SHF(Op, ResTy, Indices, DAG)))
return Result;
return lowerVECTOR_SHUFFLE_VSHF(Op, ResTy, Indices, DAG);
}

View File

@ -3944,9 +3944,8 @@ static SDValue PerformADDCombine(SDNode *N,
SDValue N1 = N->getOperand(1);
// First try with the default operand order.
SDValue Result = PerformADDCombineWithOperands(N, N0, N1, DCI, Subtarget,
OptLevel);
if (Result.getNode())
if (SDValue Result =
PerformADDCombineWithOperands(N, N0, N1, DCI, Subtarget, OptLevel))
return Result;
// If that didn't work, try again with the operands commuted.
@ -4234,8 +4233,7 @@ static SDValue PerformMULCombine(SDNode *N,
CodeGenOpt::Level OptLevel) {
if (OptLevel > 0) {
// Try mul.wide combining at OptLevel > 0
SDValue Ret = TryMULWIDECombine(N, DCI);
if (Ret.getNode())
if (SDValue Ret = TryMULWIDECombine(N, DCI))
return Ret;
}
@ -4248,8 +4246,7 @@ static SDValue PerformSHLCombine(SDNode *N,
CodeGenOpt::Level OptLevel) {
if (OptLevel > 0) {
// Try mul.wide combining at OptLevel > 0
SDValue Ret = TryMULWIDECombine(N, DCI);
if (Ret.getNode())
if (SDValue Ret = TryMULWIDECombine(N, DCI))
return Ret;
}

View File

@ -4120,8 +4120,7 @@ SDValue SystemZTargetLowering::lowerBUILD_VECTOR(SDValue Op,
}
// See if we should use shuffles to construct the vector from other vectors.
SDValue Res = tryBuildVectorShuffle(DAG, BVN);
if (Res.getNode())
if (SDValue Res = tryBuildVectorShuffle(DAG, BVN))
return Res;
// Detect SCALAR_TO_VECTOR conversions.
@ -4745,9 +4744,8 @@ SDValue SystemZTargetLowering::PerformDAGCombine(SDNode *N,
auto *SN = cast<StoreSDNode>(N);
EVT MemVT = SN->getMemoryVT();
if (MemVT.isInteger()) {
SDValue Value = combineTruncateExtract(SDLoc(N), MemVT,
SN->getValue(), DCI);
if (Value.getNode()) {
if (SDValue Value =
combineTruncateExtract(SDLoc(N), MemVT, SN->getValue(), DCI)) {
DCI.AddToWorklist(Value.getNode());
// Rewrite the store with the new form of stored value.

View File

@ -14352,8 +14352,7 @@ SDValue X86TargetLowering::EmitTest(SDValue Op, unsigned X86CC, SDLoc dl,
case ISD::AND: Opcode = X86ISD::AND; break;
case ISD::OR: {
if (!NeedTruncation && (X86CC == X86::COND_E || X86CC == X86::COND_NE)) {
SDValue EFLAGS = LowerVectorAllZeroTest(Op, Subtarget, DAG);
if (EFLAGS.getNode())
if (SDValue EFLAGS = LowerVectorAllZeroTest(Op, Subtarget, DAG))
return EFLAGS;
}
Opcode = X86ISD::OR;
@ -14992,8 +14991,7 @@ static SDValue LowerVSETCC(SDValue Op, const X86Subtarget &Subtarget,
// Only do this pre-AVX since vpcmp* is no longer destructive.
if (Subtarget.hasAVX())
break;
SDValue ULEOp1 = ChangeVSETULTtoVSETULE(dl, Op1, DAG);
if (ULEOp1.getNode()) {
if (SDValue ULEOp1 = ChangeVSETULTtoVSETULE(dl, Op1, DAG)) {
Op1 = ULEOp1;
Subus = true; Invert = false; Swap = false;
}
@ -15337,8 +15335,7 @@ SDValue X86TargetLowering::LowerSELECT(SDValue Op, SelectionDAG &DAG) const {
}
if (Cond.getOpcode() == ISD::SETCC) {
SDValue NewCond = LowerSETCC(Cond, DAG);
if (NewCond.getNode())
if (SDValue NewCond = LowerSETCC(Cond, DAG))
Cond = NewCond;
}
@ -15917,8 +15914,7 @@ SDValue X86TargetLowering::LowerBRCOND(SDValue Op, SelectionDAG &DAG) const {
Inverted = true;
Cond = Cond.getOperand(0);
} else {
SDValue NewCond = LowerSETCC(Cond, DAG);
if (NewCond.getNode())
if (SDValue NewCond = LowerSETCC(Cond, DAG))
Cond = NewCond;
}
}
@ -24363,9 +24359,8 @@ static SDValue PerformShuffleCombine(SDNode *N, SelectionDAG &DAG,
return LD;
if (isTargetShuffle(N->getOpcode())) {
SDValue Shuffle =
PerformTargetShuffleCombine(SDValue(N, 0), DAG, DCI, Subtarget);
if (Shuffle.getNode())
if (SDValue Shuffle =
PerformTargetShuffleCombine(SDValue(N, 0), DAG, DCI, Subtarget))
return Shuffle;
// Try recursively combining arbitrary sequences of x86 shuffle
@ -25155,8 +25150,8 @@ static SDValue PerformSELECTCombine(SDNode *N, SelectionDAG &DAG,
if ((N->getOpcode() == ISD::VSELECT ||
N->getOpcode() == X86ISD::SHRUNKBLEND) &&
!DCI.isBeforeLegalize() && !VT.is512BitVector()) {
SDValue Shuffle = transformVSELECTtoBlendVECTOR_SHUFFLE(N, DAG, Subtarget);
if (Shuffle.getNode())
if (SDValue Shuffle =
transformVSELECTtoBlendVECTOR_SHUFFLE(N, DAG, Subtarget))
return Shuffle;
}
@ -27001,9 +26996,8 @@ static SDValue PerformSTORECombine(SDNode *N, SelectionDAG &DAG,
// Check if we can detect an AVG pattern from the truncation. If yes,
// replace the trunc store by a normal store with the result of X86ISD::AVG
// instruction.
SDValue Avg =
detectAVGPattern(St->getValue(), St->getMemoryVT(), DAG, Subtarget, dl);
if (Avg.getNode())
if (SDValue Avg = detectAVGPattern(St->getValue(), St->getMemoryVT(), DAG,
Subtarget, dl))
return DAG.getStore(St->getChain(), dl, Avg, St->getBasePtr(),
St->getPointerInfo(), St->isVolatile(),
St->isNonTemporal(), St->getAlignment());
@ -27516,9 +27510,8 @@ static SDValue combineVectorTruncation(SDNode *N, SelectionDAG &DAG,
static SDValue PerformTRUNCATECombine(SDNode *N, SelectionDAG &DAG,
const X86Subtarget &Subtarget) {
// Try to detect AVG pattern first.
SDValue Avg = detectAVGPattern(N->getOperand(0), N->getValueType(0), DAG,
Subtarget, SDLoc(N));
if (Avg.getNode())
if (SDValue Avg = detectAVGPattern(N->getOperand(0), N->getValueType(0), DAG,
Subtarget, SDLoc(N)))
return Avg;
return combineVectorTruncation(N, DAG, Subtarget);

View File

@ -725,11 +725,9 @@ ExpandADDSUB(SDNode *N, SelectionDAG &DAG) const
(N->getOpcode() == ISD::ADD || N->getOpcode() == ISD::SUB) &&
"Unknown operand to lower!");
if (N->getOpcode() == ISD::ADD) {
SDValue Result = TryExpandADDWithMul(N, DAG);
if (Result.getNode())
if (N->getOpcode() == ISD::ADD)
if (SDValue Result = TryExpandADDWithMul(N, DAG))
return Result;
}
SDLoc dl(N);