[X86] Use TargetConstant for condition code on X86ISD::SETCC/CMOV/BRCOND nodes.

This removes the need for ConvertToTarget opcodes in the isel table.
It's also consistent with the recent changes to use TargetConstant
for intrinsic nodes that always take immediates.

Differential Revision: https://reviews.llvm.org/D67902

llvm-svn: 372645
This commit is contained in:
Craig Topper 2019-09-23 19:48:20 +00:00
parent a7d186c796
commit e3c2163ffe
4 changed files with 134 additions and 139 deletions

View File

@ -19853,7 +19853,7 @@ static SDValue LowerFGETSIGN(SDValue Op, SelectionDAG &DAG) {
static SDValue getSETCC(X86::CondCode Cond, SDValue EFLAGS, const SDLoc &dl,
SelectionDAG &DAG) {
return DAG.getNode(X86ISD::SETCC, dl, MVT::i8,
DAG.getConstant(Cond, dl, MVT::i8), EFLAGS);
DAG.getTargetConstant(Cond, dl, MVT::i8), EFLAGS);
}
/// Helper for matching OR(EXTRACTELT(X,0),OR(EXTRACTELT(X,1),...))
@ -19956,8 +19956,8 @@ static SDValue LowerVectorAllZeroTest(SDValue Op, ISD::CondCode CC,
VecIns.push_back(DAG.getNode(ISD::OR, DL, TestVT, LHS, RHS));
}
X86CC = DAG.getConstant(CC == ISD::SETEQ ? X86::COND_E : X86::COND_NE, DL,
MVT::i8);
X86CC = DAG.getTargetConstant(CC == ISD::SETEQ ? X86::COND_E : X86::COND_NE,
DL, MVT::i8);
return DAG.getNode(X86ISD::PTEST, DL, MVT::i32, VecIns.back(), VecIns.back());
}
@ -20394,8 +20394,8 @@ static SDValue LowerAndToBT(SDValue And, ISD::CondCode CC,
if (Src.getValueType() != BitNo.getValueType())
BitNo = DAG.getNode(ISD::ANY_EXTEND, dl, Src.getValueType(), BitNo);
X86CC = DAG.getConstant(CC == ISD::SETEQ ? X86::COND_AE : X86::COND_B,
dl, MVT::i8);
X86CC = DAG.getTargetConstant(CC == ISD::SETEQ ? X86::COND_AE : X86::COND_B,
dl, MVT::i8);
return DAG.getNode(X86ISD::BT, dl, MVT::i32, Src, BitNo);
}
@ -20943,7 +20943,7 @@ static SDValue EmitKORTEST(SDValue Op0, SDValue Op1, ISD::CondCode CC,
RHS = Op0.getOperand(1);
}
X86CC = DAG.getConstant(X86Cond, dl, MVT::i8);
X86CC = DAG.getTargetConstant(X86Cond, dl, MVT::i8);
return DAG.getNode(X86ISD::KORTEST, dl, MVT::i32, LHS, RHS);
}
@ -20988,7 +20988,7 @@ SDValue X86TargetLowering::emitFlagsForSetcc(SDValue Op0, SDValue Op1,
if (Invert) {
X86::CondCode CCode = (X86::CondCode)Op0.getConstantOperandVal(0);
CCode = X86::GetOppositeBranchCondition(CCode);
X86CC = DAG.getConstant(CCode, dl, MVT::i8);
X86CC = DAG.getTargetConstant(CCode, dl, MVT::i8);
}
return Op0.getOperand(1);
@ -21002,7 +21002,7 @@ SDValue X86TargetLowering::emitFlagsForSetcc(SDValue Op0, SDValue Op1,
SDValue EFLAGS = EmitCmp(Op0, Op1, CondCode, dl, DAG);
EFLAGS = ConvertCmpIfNecessary(EFLAGS, DAG);
X86CC = DAG.getConstant(CondCode, dl, MVT::i8);
X86CC = DAG.getTargetConstant(CondCode, dl, MVT::i8);
return EFLAGS;
}
@ -21383,7 +21383,7 @@ SDValue X86TargetLowering::LowerSELECT(SDValue Op, SelectionDAG &DAG) const {
X86::CondCode X86Cond;
std::tie(Value, Cond) = getX86XALUOOp(X86Cond, Cond.getValue(0), DAG);
CC = DAG.getConstant(X86Cond, DL, MVT::i8);
CC = DAG.getTargetConstant(X86Cond, DL, MVT::i8);
AddTest = false;
}
@ -21405,7 +21405,7 @@ SDValue X86TargetLowering::LowerSELECT(SDValue Op, SelectionDAG &DAG) const {
}
if (AddTest) {
CC = DAG.getConstant(X86::COND_NE, DL, MVT::i8);
CC = DAG.getTargetConstant(X86::COND_NE, DL, MVT::i8);
Cond = EmitCmp(Cond, DAG.getConstant(0, DL, Cond.getValueType()),
X86::COND_NE, DL, DAG);
}
@ -21421,9 +21421,9 @@ SDValue X86TargetLowering::LowerSELECT(SDValue Op, SelectionDAG &DAG) const {
if ((CondCode == X86::COND_AE || CondCode == X86::COND_B) &&
(isAllOnesConstant(Op1) || isAllOnesConstant(Op2)) &&
(isNullConstant(Op1) || isNullConstant(Op2))) {
SDValue Res = DAG.getNode(X86ISD::SETCC_CARRY, DL, Op.getValueType(),
DAG.getConstant(X86::COND_B, DL, MVT::i8),
Cond);
SDValue Res =
DAG.getNode(X86ISD::SETCC_CARRY, DL, Op.getValueType(),
DAG.getTargetConstant(X86::COND_B, DL, MVT::i8), Cond);
if (isAllOnesConstant(Op1) != (CondCode == X86::COND_B))
return DAG.getNOT(DL, Res, Res.getValueType());
return Res;
@ -22167,7 +22167,7 @@ SDValue X86TargetLowering::LowerBRCOND(SDValue Op, SelectionDAG &DAG) const {
if (Inverted)
X86Cond = X86::GetOppositeBranchCondition(X86Cond);
CC = DAG.getConstant(X86Cond, dl, MVT::i8);
CC = DAG.getTargetConstant(X86Cond, dl, MVT::i8);
addTest = false;
} else {
unsigned CondOpc;
@ -22198,7 +22198,7 @@ SDValue X86TargetLowering::LowerBRCOND(SDValue Op, SelectionDAG &DAG) const {
X86::CondCode CCode0 =
(X86::CondCode)Cond.getOperand(0).getConstantOperandVal(0);
CCode0 = X86::GetOppositeBranchCondition(CCode0);
CC = DAG.getConstant(CCode0, dl, MVT::i8);
CC = DAG.getTargetConstant(CCode0, dl, MVT::i8);
SDNode *User = *Op.getNode()->use_begin();
// Look for an unconditional branch following this conditional branch.
// We need this because we need to reverse the successors in order
@ -22216,7 +22216,7 @@ SDValue X86TargetLowering::LowerBRCOND(SDValue Op, SelectionDAG &DAG) const {
X86::CondCode CCode1 =
(X86::CondCode)Cond.getOperand(1).getConstantOperandVal(0);
CCode1 = X86::GetOppositeBranchCondition(CCode1);
CC = DAG.getConstant(CCode1, dl, MVT::i8);
CC = DAG.getTargetConstant(CCode1, dl, MVT::i8);
Cond = Cmp;
addTest = false;
}
@ -22229,7 +22229,7 @@ SDValue X86TargetLowering::LowerBRCOND(SDValue Op, SelectionDAG &DAG) const {
X86::CondCode CCode =
(X86::CondCode)Cond.getOperand(0).getConstantOperandVal(0);
CCode = X86::GetOppositeBranchCondition(CCode);
CC = DAG.getConstant(CCode, dl, MVT::i8);
CC = DAG.getTargetConstant(CCode, dl, MVT::i8);
Cond = Cond.getOperand(0).getOperand(1);
addTest = false;
} else if (Cond.getOpcode() == ISD::SETCC &&
@ -22255,10 +22255,10 @@ SDValue X86TargetLowering::LowerBRCOND(SDValue Op, SelectionDAG &DAG) const {
SDValue Cmp = DAG.getNode(X86ISD::CMP, dl, MVT::i32,
Cond.getOperand(0), Cond.getOperand(1));
Cmp = ConvertCmpIfNecessary(Cmp, DAG);
CC = DAG.getConstant(X86::COND_NE, dl, MVT::i8);
CC = DAG.getTargetConstant(X86::COND_NE, dl, MVT::i8);
Chain = DAG.getNode(X86ISD::BRCOND, dl, Op.getValueType(),
Chain, Dest, CC, Cmp);
CC = DAG.getConstant(X86::COND_P, dl, MVT::i8);
CC = DAG.getTargetConstant(X86::COND_P, dl, MVT::i8);
Cond = Cmp;
addTest = false;
}
@ -22271,10 +22271,10 @@ SDValue X86TargetLowering::LowerBRCOND(SDValue Op, SelectionDAG &DAG) const {
SDValue Cmp = DAG.getNode(X86ISD::CMP, dl, MVT::i32,
Cond.getOperand(0), Cond.getOperand(1));
Cmp = ConvertCmpIfNecessary(Cmp, DAG);
CC = DAG.getConstant(X86::COND_NE, dl, MVT::i8);
CC = DAG.getTargetConstant(X86::COND_NE, dl, MVT::i8);
Chain = DAG.getNode(X86ISD::BRCOND, dl, Op.getValueType(),
Chain, Dest, CC, Cmp);
CC = DAG.getConstant(X86::COND_P, dl, MVT::i8);
CC = DAG.getTargetConstant(X86::COND_P, dl, MVT::i8);
Cond = Cmp;
addTest = false;
}
@ -22299,7 +22299,7 @@ SDValue X86TargetLowering::LowerBRCOND(SDValue Op, SelectionDAG &DAG) const {
if (addTest) {
X86::CondCode X86Cond = Inverted ? X86::COND_E : X86::COND_NE;
CC = DAG.getConstant(X86Cond, dl, MVT::i8);
CC = DAG.getTargetConstant(X86Cond, dl, MVT::i8);
Cond = EmitCmp(Cond, DAG.getConstant(0, dl, Cond.getValueType()),
X86Cond, dl, DAG);
}
@ -24171,10 +24171,10 @@ static SDValue LowerINTRINSIC_W_CHAIN(SDValue Op, const X86Subtarget &Subtarget,
// If the value returned by RDRAND/RDSEED was valid (CF=1), return 1.
// Otherwise return the value from Rand, which is always 0, casted to i32.
SDValue Ops[] = { DAG.getZExtOrTrunc(Result, dl, Op->getValueType(1)),
DAG.getConstant(1, dl, Op->getValueType(1)),
DAG.getConstant(X86::COND_B, dl, MVT::i8),
SDValue(Result.getNode(), 1) };
SDValue Ops[] = {DAG.getZExtOrTrunc(Result, dl, Op->getValueType(1)),
DAG.getConstant(1, dl, Op->getValueType(1)),
DAG.getTargetConstant(X86::COND_B, dl, MVT::i8),
SDValue(Result.getNode(), 1)};
SDValue isValid = DAG.getNode(X86ISD::CMOV, dl, Op->getValueType(1), Ops);
// Return { result, isValid, chain }.
@ -24911,12 +24911,9 @@ static SDValue LowerCTLZ(SDValue Op, const X86Subtarget &Subtarget,
if (Opc == ISD::CTLZ) {
// If src is zero (i.e. bsr sets ZF), returns NumBits.
SDValue Ops[] = {
Op,
DAG.getConstant(NumBits + NumBits - 1, dl, OpVT),
DAG.getConstant(X86::COND_E, dl, MVT::i8),
Op.getValue(1)
};
SDValue Ops[] = {Op, DAG.getConstant(NumBits + NumBits - 1, dl, OpVT),
DAG.getTargetConstant(X86::COND_E, dl, MVT::i8),
Op.getValue(1)};
Op = DAG.getNode(X86ISD::CMOV, dl, OpVT, Ops);
}
@ -24944,12 +24941,9 @@ static SDValue LowerCTTZ(SDValue Op, const X86Subtarget &Subtarget,
Op = DAG.getNode(X86ISD::BSF, dl, VTs, N0);
// If src is zero (i.e. bsf sets ZF), returns NumBits.
SDValue Ops[] = {
Op,
DAG.getConstant(NumBits, dl, VT),
DAG.getConstant(X86::COND_E, dl, MVT::i8),
Op.getValue(1)
};
SDValue Ops[] = {Op, DAG.getConstant(NumBits, dl, VT),
DAG.getTargetConstant(X86::COND_E, dl, MVT::i8),
Op.getValue(1)};
return DAG.getNode(X86ISD::CMOV, dl, VT, Ops);
}
@ -25085,7 +25079,7 @@ static SDValue LowerABS(SDValue Op, const X86Subtarget &Subtarget,
SDValue N0 = Op.getOperand(0);
SDValue Neg = DAG.getNode(X86ISD::SUB, DL, DAG.getVTList(VT, MVT::i32),
DAG.getConstant(0, DL, VT), N0);
SDValue Ops[] = {N0, Neg, DAG.getConstant(X86::COND_GE, DL, MVT::i8),
SDValue Ops[] = {N0, Neg, DAG.getTargetConstant(X86::COND_GE, DL, MVT::i8),
SDValue(Neg.getNode(), 1)};
return DAG.getNode(X86ISD::CMOV, DL, VT, Ops);
}
@ -37904,8 +37898,8 @@ static SDValue combineCMov(SDNode *N, SelectionDAG &DAG,
// We can't always do this as FCMOV only supports a subset of X86 cond.
if (SDValue Flags = combineSetCCEFLAGS(Cond, CC, DAG, Subtarget)) {
if (FalseOp.getValueType() != MVT::f80 || hasFPCMov(CC)) {
SDValue Ops[] = {FalseOp, TrueOp, DAG.getConstant(CC, DL, MVT::i8),
Flags};
SDValue Ops[] = {FalseOp, TrueOp, DAG.getTargetConstant(CC, DL, MVT::i8),
Flags};
return DAG.getNode(X86ISD::CMOV, DL, N->getValueType(0), Ops);
}
}
@ -38025,8 +38019,8 @@ static SDValue combineCMov(SDNode *N, SelectionDAG &DAG,
if (CC == X86::COND_E &&
CmpAgainst == dyn_cast<ConstantSDNode>(TrueOp)) {
SDValue Ops[] = { FalseOp, Cond.getOperand(0),
DAG.getConstant(CC, DL, MVT::i8), Cond };
SDValue Ops[] = {FalseOp, Cond.getOperand(0),
DAG.getTargetConstant(CC, DL, MVT::i8), Cond};
return DAG.getNode(X86ISD::CMOV, DL, N->getValueType(0), Ops);
}
}
@ -38060,10 +38054,11 @@ static SDValue combineCMov(SDNode *N, SelectionDAG &DAG,
CC1 = X86::GetOppositeBranchCondition(CC1);
}
SDValue LOps[] = {FalseOp, TrueOp, DAG.getConstant(CC0, DL, MVT::i8),
Flags};
SDValue LOps[] = {FalseOp, TrueOp,
DAG.getTargetConstant(CC0, DL, MVT::i8), Flags};
SDValue LCMOV = DAG.getNode(X86ISD::CMOV, DL, N->getValueType(0), LOps);
SDValue Ops[] = {LCMOV, TrueOp, DAG.getConstant(CC1, DL, MVT::i8), Flags};
SDValue Ops[] = {LCMOV, TrueOp, DAG.getTargetConstant(CC1, DL, MVT::i8),
Flags};
SDValue CMOV = DAG.getNode(X86ISD::CMOV, DL, N->getValueType(0), Ops);
return CMOV;
}
@ -38095,9 +38090,9 @@ static SDValue combineCMov(SDNode *N, SelectionDAG &DAG,
EVT VT = N->getValueType(0);
// This should constant fold.
SDValue Diff = DAG.getNode(ISD::SUB, DL, VT, Const, Add.getOperand(1));
SDValue CMov = DAG.getNode(X86ISD::CMOV, DL, VT, Diff, Add.getOperand(0),
DAG.getConstant(X86::COND_NE, DL, MVT::i8),
Cond);
SDValue CMov =
DAG.getNode(X86ISD::CMOV, DL, VT, Diff, Add.getOperand(0),
DAG.getTargetConstant(X86::COND_NE, DL, MVT::i8), Cond);
return DAG.getNode(ISD::ADD, DL, VT, CMov, Add.getOperand(1));
}
}
@ -43568,7 +43563,7 @@ static SDValue combineBrCond(SDNode *N, SelectionDAG &DAG,
// Make sure to not keep references to operands, as combineSetCCEFLAGS can
// RAUW them under us.
if (SDValue Flags = combineSetCCEFLAGS(EFLAGS, CC, DAG, Subtarget)) {
SDValue Cond = DAG.getConstant(CC, DL, MVT::i8);
SDValue Cond = DAG.getTargetConstant(CC, DL, MVT::i8);
return DAG.getNode(X86ISD::BRCOND, DL, N->getVTList(), N->getOperand(0),
N->getOperand(1), Cond, Flags);
}
@ -43987,12 +43982,12 @@ static SDValue combineADC(SDNode *N, SelectionDAG &DAG,
SDLoc DL(N);
EVT VT = N->getValueType(0);
SDValue CarryOut = DAG.getConstant(0, DL, N->getValueType(1));
SDValue Res1 = DAG.getNode(ISD::AND, DL, VT,
DAG.getNode(X86ISD::SETCC_CARRY, DL, VT,
DAG.getConstant(X86::COND_B, DL,
MVT::i8),
N->getOperand(2)),
DAG.getConstant(1, DL, VT));
SDValue Res1 =
DAG.getNode(ISD::AND, DL, VT,
DAG.getNode(X86ISD::SETCC_CARRY, DL, VT,
DAG.getTargetConstant(X86::COND_B, DL, MVT::i8),
N->getOperand(2)),
DAG.getConstant(1, DL, VT));
return DCI.CombineTo(N, Res1, CarryOut);
}
@ -44052,7 +44047,7 @@ static SDValue combineAddOrSubToADCOrSBB(SDNode *N, SelectionDAG &DAG) {
// -1 + SETAE --> -1 + (!CF) --> CF ? -1 : 0 --> SBB %eax, %eax
// 0 - SETB --> 0 - (CF) --> CF ? -1 : 0 --> SBB %eax, %eax
return DAG.getNode(X86ISD::SETCC_CARRY, DL, VT,
DAG.getConstant(X86::COND_B, DL, MVT::i8),
DAG.getTargetConstant(X86::COND_B, DL, MVT::i8),
Y.getOperand(1));
}
@ -44070,7 +44065,7 @@ static SDValue combineAddOrSubToADCOrSBB(SDNode *N, SelectionDAG &DAG) {
EFLAGS.getOperand(1), EFLAGS.getOperand(0));
SDValue NewEFLAGS = SDValue(NewSub.getNode(), EFLAGS.getResNo());
return DAG.getNode(X86ISD::SETCC_CARRY, DL, VT,
DAG.getConstant(X86::COND_B, DL, MVT::i8),
DAG.getTargetConstant(X86::COND_B, DL, MVT::i8),
NewEFLAGS);
}
}
@ -44130,7 +44125,7 @@ static SDValue combineAddOrSubToADCOrSBB(SDNode *N, SelectionDAG &DAG) {
SDVTList X86SubVTs = DAG.getVTList(ZVT, MVT::i32);
SDValue Neg = DAG.getNode(X86ISD::SUB, DL, X86SubVTs, Zero, Z);
return DAG.getNode(X86ISD::SETCC_CARRY, DL, VT,
DAG.getConstant(X86::COND_B, DL, MVT::i8),
DAG.getTargetConstant(X86::COND_B, DL, MVT::i8),
SDValue(Neg.getNode(), 1));
}
@ -44143,7 +44138,7 @@ static SDValue combineAddOrSubToADCOrSBB(SDNode *N, SelectionDAG &DAG) {
SDValue One = DAG.getConstant(1, DL, ZVT);
SDValue Cmp1 = DAG.getNode(X86ISD::CMP, DL, MVT::i32, Z, One);
return DAG.getNode(X86ISD::SETCC_CARRY, DL, VT,
DAG.getConstant(X86::COND_B, DL, MVT::i8), Cmp1);
DAG.getTargetConstant(X86::COND_B, DL, MVT::i8), Cmp1);
}
}

View File

@ -20,19 +20,19 @@ let Uses = [EFLAGS], Predicates = [HasCMov], Constraints = "$src1 = $dst",
: I<0x40, MRMSrcRegCC, (outs GR16:$dst), (ins GR16:$src1, GR16:$src2, ccode:$cond),
"cmov${cond}{w}\t{$src2, $dst|$dst, $src2}",
[(set GR16:$dst,
(X86cmov GR16:$src1, GR16:$src2, imm:$cond, EFLAGS))]>,
(X86cmov GR16:$src1, GR16:$src2, timm:$cond, EFLAGS))]>,
TB, OpSize16;
def CMOV32rr
: I<0x40, MRMSrcRegCC, (outs GR32:$dst), (ins GR32:$src1, GR32:$src2, ccode:$cond),
"cmov${cond}{l}\t{$src2, $dst|$dst, $src2}",
[(set GR32:$dst,
(X86cmov GR32:$src1, GR32:$src2, imm:$cond, EFLAGS))]>,
(X86cmov GR32:$src1, GR32:$src2, timm:$cond, EFLAGS))]>,
TB, OpSize32;
def CMOV64rr
:RI<0x40, MRMSrcRegCC, (outs GR64:$dst), (ins GR64:$src1, GR64:$src2, ccode:$cond),
"cmov${cond}{q}\t{$src2, $dst|$dst, $src2}",
[(set GR64:$dst,
(X86cmov GR64:$src1, GR64:$src2, imm:$cond, EFLAGS))]>, TB;
(X86cmov GR64:$src1, GR64:$src2, timm:$cond, EFLAGS))]>, TB;
}
let Uses = [EFLAGS], Predicates = [HasCMov], Constraints = "$src1 = $dst",
@ -41,29 +41,46 @@ let Uses = [EFLAGS], Predicates = [HasCMov], Constraints = "$src1 = $dst",
: I<0x40, MRMSrcMemCC, (outs GR16:$dst), (ins GR16:$src1, i16mem:$src2, ccode:$cond),
"cmov${cond}{w}\t{$src2, $dst|$dst, $src2}",
[(set GR16:$dst, (X86cmov GR16:$src1, (loadi16 addr:$src2),
imm:$cond, EFLAGS))]>, TB, OpSize16;
timm:$cond, EFLAGS))]>, TB, OpSize16;
def CMOV32rm
: I<0x40, MRMSrcMemCC, (outs GR32:$dst), (ins GR32:$src1, i32mem:$src2, ccode:$cond),
"cmov${cond}{l}\t{$src2, $dst|$dst, $src2}",
[(set GR32:$dst, (X86cmov GR32:$src1, (loadi32 addr:$src2),
imm:$cond, EFLAGS))]>, TB, OpSize32;
timm:$cond, EFLAGS))]>, TB, OpSize32;
def CMOV64rm
:RI<0x40, MRMSrcMemCC, (outs GR64:$dst), (ins GR64:$src1, i64mem:$src2, ccode:$cond),
"cmov${cond}{q}\t{$src2, $dst|$dst, $src2}",
[(set GR64:$dst, (X86cmov GR64:$src1, (loadi64 addr:$src2),
imm:$cond, EFLAGS))]>, TB;
timm:$cond, EFLAGS))]>, TB;
} // Uses = [EFLAGS], Predicates = [HasCMov], Constraints = "$src1 = $dst"
} // isCodeGenOnly = 1, ForceDisassemble = 1
def inv_cond_XFORM : SDNodeXForm<imm, [{
X86::CondCode CC = static_cast<X86::CondCode>(N->getZExtValue());
return CurDAG->getTargetConstant(X86::GetOppositeBranchCondition(CC),
SDLoc(N), MVT::i8);
}]>;
// Conditional moves with folded loads with operands swapped and conditions
// inverted.
let Predicates = [HasCMov] in {
def : Pat<(X86cmov (loadi16 addr:$src1), GR16:$src2, timm:$cond, EFLAGS),
(CMOV16rm GR16:$src2, addr:$src1, (inv_cond_XFORM timm:$cond))>;
def : Pat<(X86cmov (loadi32 addr:$src1), GR32:$src2, timm:$cond, EFLAGS),
(CMOV32rm GR32:$src2, addr:$src1, (inv_cond_XFORM timm:$cond))>;
def : Pat<(X86cmov (loadi64 addr:$src1), GR64:$src2, timm:$cond, EFLAGS),
(CMOV64rm GR64:$src2, addr:$src1, (inv_cond_XFORM timm:$cond))>;
}
// SetCC instructions.
let Uses = [EFLAGS], isCodeGenOnly = 1, ForceDisassemble = 1 in {
def SETCCr : I<0x90, MRMXrCC, (outs GR8:$dst), (ins ccode:$cond),
"set${cond}\t$dst",
[(set GR8:$dst, (X86setcc imm:$cond, EFLAGS))]>,
[(set GR8:$dst, (X86setcc timm:$cond, EFLAGS))]>,
TB, Sched<[WriteSETCC]>;
def SETCCm : I<0x90, MRMXmCC, (outs), (ins i8mem:$dst, ccode:$cond),
"set${cond}\t$dst",
[(store (X86setcc imm:$cond, EFLAGS), addr:$dst)]>,
[(store (X86setcc timm:$cond, EFLAGS), addr:$dst)]>,
TB, Sched<[WriteSETCCStore]>;
} // Uses = [EFLAGS]

View File

@ -542,7 +542,7 @@ multiclass CMOVrr_PSEUDO<RegisterClass RC, ValueType VT> {
def CMOV#NAME : I<0, Pseudo,
(outs RC:$dst), (ins RC:$t, RC:$f, i8imm:$cond),
"#CMOV_"#NAME#" PSEUDO!",
[(set RC:$dst, (VT (X86cmov RC:$t, RC:$f, imm:$cond,
[(set RC:$dst, (VT (X86cmov RC:$t, RC:$f, timm:$cond,
EFLAGS)))]>;
}
@ -593,66 +593,66 @@ let usesCustomInserter = 1, hasNoSchedulingInfo = 1, Uses = [EFLAGS] in {
defm _VK64 : CMOVrr_PSEUDO<VK64, v64i1>;
} // usesCustomInserter = 1, hasNoSchedulingInfo = 1, Uses = [EFLAGS]
def : Pat<(f128 (X86cmov VR128:$t, VR128:$f, imm:$cond, EFLAGS)),
(CMOV_VR128 VR128:$t, VR128:$f, imm:$cond)>;
def : Pat<(f128 (X86cmov VR128:$t, VR128:$f, timm:$cond, EFLAGS)),
(CMOV_VR128 VR128:$t, VR128:$f, timm:$cond)>;
let Predicates = [NoVLX] in {
def : Pat<(v16i8 (X86cmov VR128:$t, VR128:$f, imm:$cond, EFLAGS)),
(CMOV_VR128 VR128:$t, VR128:$f, imm:$cond)>;
def : Pat<(v8i16 (X86cmov VR128:$t, VR128:$f, imm:$cond, EFLAGS)),
(CMOV_VR128 VR128:$t, VR128:$f, imm:$cond)>;
def : Pat<(v4i32 (X86cmov VR128:$t, VR128:$f, imm:$cond, EFLAGS)),
(CMOV_VR128 VR128:$t, VR128:$f, imm:$cond)>;
def : Pat<(v4f32 (X86cmov VR128:$t, VR128:$f, imm:$cond, EFLAGS)),
(CMOV_VR128 VR128:$t, VR128:$f, imm:$cond)>;
def : Pat<(v2f64 (X86cmov VR128:$t, VR128:$f, imm:$cond, EFLAGS)),
(CMOV_VR128 VR128:$t, VR128:$f, imm:$cond)>;
def : Pat<(v16i8 (X86cmov VR128:$t, VR128:$f, timm:$cond, EFLAGS)),
(CMOV_VR128 VR128:$t, VR128:$f, timm:$cond)>;
def : Pat<(v8i16 (X86cmov VR128:$t, VR128:$f, timm:$cond, EFLAGS)),
(CMOV_VR128 VR128:$t, VR128:$f, timm:$cond)>;
def : Pat<(v4i32 (X86cmov VR128:$t, VR128:$f, timm:$cond, EFLAGS)),
(CMOV_VR128 VR128:$t, VR128:$f, timm:$cond)>;
def : Pat<(v4f32 (X86cmov VR128:$t, VR128:$f, timm:$cond, EFLAGS)),
(CMOV_VR128 VR128:$t, VR128:$f, timm:$cond)>;
def : Pat<(v2f64 (X86cmov VR128:$t, VR128:$f, timm:$cond, EFLAGS)),
(CMOV_VR128 VR128:$t, VR128:$f, timm:$cond)>;
def : Pat<(v32i8 (X86cmov VR256:$t, VR256:$f, imm:$cond, EFLAGS)),
(CMOV_VR256 VR256:$t, VR256:$f, imm:$cond)>;
def : Pat<(v16i16 (X86cmov VR256:$t, VR256:$f, imm:$cond, EFLAGS)),
(CMOV_VR256 VR256:$t, VR256:$f, imm:$cond)>;
def : Pat<(v8i32 (X86cmov VR256:$t, VR256:$f, imm:$cond, EFLAGS)),
(CMOV_VR256 VR256:$t, VR256:$f, imm:$cond)>;
def : Pat<(v8f32 (X86cmov VR256:$t, VR256:$f, imm:$cond, EFLAGS)),
(CMOV_VR256 VR256:$t, VR256:$f, imm:$cond)>;
def : Pat<(v4f64 (X86cmov VR256:$t, VR256:$f, imm:$cond, EFLAGS)),
(CMOV_VR256 VR256:$t, VR256:$f, imm:$cond)>;
def : Pat<(v32i8 (X86cmov VR256:$t, VR256:$f, timm:$cond, EFLAGS)),
(CMOV_VR256 VR256:$t, VR256:$f, timm:$cond)>;
def : Pat<(v16i16 (X86cmov VR256:$t, VR256:$f, timm:$cond, EFLAGS)),
(CMOV_VR256 VR256:$t, VR256:$f, timm:$cond)>;
def : Pat<(v8i32 (X86cmov VR256:$t, VR256:$f, timm:$cond, EFLAGS)),
(CMOV_VR256 VR256:$t, VR256:$f, timm:$cond)>;
def : Pat<(v8f32 (X86cmov VR256:$t, VR256:$f, timm:$cond, EFLAGS)),
(CMOV_VR256 VR256:$t, VR256:$f, timm:$cond)>;
def : Pat<(v4f64 (X86cmov VR256:$t, VR256:$f, timm:$cond, EFLAGS)),
(CMOV_VR256 VR256:$t, VR256:$f, timm:$cond)>;
}
let Predicates = [HasVLX] in {
def : Pat<(v16i8 (X86cmov VR128X:$t, VR128X:$f, imm:$cond, EFLAGS)),
(CMOV_VR128X VR128X:$t, VR128X:$f, imm:$cond)>;
def : Pat<(v8i16 (X86cmov VR128X:$t, VR128X:$f, imm:$cond, EFLAGS)),
(CMOV_VR128X VR128X:$t, VR128X:$f, imm:$cond)>;
def : Pat<(v4i32 (X86cmov VR128X:$t, VR128X:$f, imm:$cond, EFLAGS)),
(CMOV_VR128X VR128X:$t, VR128X:$f, imm:$cond)>;
def : Pat<(v4f32 (X86cmov VR128X:$t, VR128X:$f, imm:$cond, EFLAGS)),
(CMOV_VR128X VR128X:$t, VR128X:$f, imm:$cond)>;
def : Pat<(v2f64 (X86cmov VR128X:$t, VR128X:$f, imm:$cond, EFLAGS)),
(CMOV_VR128X VR128X:$t, VR128X:$f, imm:$cond)>;
def : Pat<(v16i8 (X86cmov VR128X:$t, VR128X:$f, timm:$cond, EFLAGS)),
(CMOV_VR128X VR128X:$t, VR128X:$f, timm:$cond)>;
def : Pat<(v8i16 (X86cmov VR128X:$t, VR128X:$f, timm:$cond, EFLAGS)),
(CMOV_VR128X VR128X:$t, VR128X:$f, timm:$cond)>;
def : Pat<(v4i32 (X86cmov VR128X:$t, VR128X:$f, timm:$cond, EFLAGS)),
(CMOV_VR128X VR128X:$t, VR128X:$f, timm:$cond)>;
def : Pat<(v4f32 (X86cmov VR128X:$t, VR128X:$f, timm:$cond, EFLAGS)),
(CMOV_VR128X VR128X:$t, VR128X:$f, timm:$cond)>;
def : Pat<(v2f64 (X86cmov VR128X:$t, VR128X:$f, timm:$cond, EFLAGS)),
(CMOV_VR128X VR128X:$t, VR128X:$f, timm:$cond)>;
def : Pat<(v32i8 (X86cmov VR256X:$t, VR256X:$f, imm:$cond, EFLAGS)),
(CMOV_VR256X VR256X:$t, VR256X:$f, imm:$cond)>;
def : Pat<(v16i16 (X86cmov VR256X:$t, VR256X:$f, imm:$cond, EFLAGS)),
(CMOV_VR256X VR256X:$t, VR256X:$f, imm:$cond)>;
def : Pat<(v8i32 (X86cmov VR256X:$t, VR256X:$f, imm:$cond, EFLAGS)),
(CMOV_VR256X VR256X:$t, VR256X:$f, imm:$cond)>;
def : Pat<(v8f32 (X86cmov VR256X:$t, VR256X:$f, imm:$cond, EFLAGS)),
(CMOV_VR256X VR256X:$t, VR256X:$f, imm:$cond)>;
def : Pat<(v4f64 (X86cmov VR256X:$t, VR256X:$f, imm:$cond, EFLAGS)),
(CMOV_VR256X VR256X:$t, VR256X:$f, imm:$cond)>;
def : Pat<(v32i8 (X86cmov VR256X:$t, VR256X:$f, timm:$cond, EFLAGS)),
(CMOV_VR256X VR256X:$t, VR256X:$f, timm:$cond)>;
def : Pat<(v16i16 (X86cmov VR256X:$t, VR256X:$f, timm:$cond, EFLAGS)),
(CMOV_VR256X VR256X:$t, VR256X:$f, timm:$cond)>;
def : Pat<(v8i32 (X86cmov VR256X:$t, VR256X:$f, timm:$cond, EFLAGS)),
(CMOV_VR256X VR256X:$t, VR256X:$f, timm:$cond)>;
def : Pat<(v8f32 (X86cmov VR256X:$t, VR256X:$f, timm:$cond, EFLAGS)),
(CMOV_VR256X VR256X:$t, VR256X:$f, timm:$cond)>;
def : Pat<(v4f64 (X86cmov VR256X:$t, VR256X:$f, timm:$cond, EFLAGS)),
(CMOV_VR256X VR256X:$t, VR256X:$f, timm:$cond)>;
}
def : Pat<(v64i8 (X86cmov VR512:$t, VR512:$f, imm:$cond, EFLAGS)),
(CMOV_VR512 VR512:$t, VR512:$f, imm:$cond)>;
def : Pat<(v32i16 (X86cmov VR512:$t, VR512:$f, imm:$cond, EFLAGS)),
(CMOV_VR512 VR512:$t, VR512:$f, imm:$cond)>;
def : Pat<(v16i32 (X86cmov VR512:$t, VR512:$f, imm:$cond, EFLAGS)),
(CMOV_VR512 VR512:$t, VR512:$f, imm:$cond)>;
def : Pat<(v16f32 (X86cmov VR512:$t, VR512:$f, imm:$cond, EFLAGS)),
(CMOV_VR512 VR512:$t, VR512:$f, imm:$cond)>;
def : Pat<(v8f64 (X86cmov VR512:$t, VR512:$f, imm:$cond, EFLAGS)),
(CMOV_VR512 VR512:$t, VR512:$f, imm:$cond)>;
def : Pat<(v64i8 (X86cmov VR512:$t, VR512:$f, timm:$cond, EFLAGS)),
(CMOV_VR512 VR512:$t, VR512:$f, timm:$cond)>;
def : Pat<(v32i16 (X86cmov VR512:$t, VR512:$f, timm:$cond, EFLAGS)),
(CMOV_VR512 VR512:$t, VR512:$f, timm:$cond)>;
def : Pat<(v16i32 (X86cmov VR512:$t, VR512:$f, timm:$cond, EFLAGS)),
(CMOV_VR512 VR512:$t, VR512:$f, timm:$cond)>;
def : Pat<(v16f32 (X86cmov VR512:$t, VR512:$f, timm:$cond, EFLAGS)),
(CMOV_VR512 VR512:$t, VR512:$f, timm:$cond)>;
def : Pat<(v8f64 (X86cmov VR512:$t, VR512:$f, timm:$cond, EFLAGS)),
(CMOV_VR512 VR512:$t, VR512:$f, timm:$cond)>;
//===----------------------------------------------------------------------===//
// Normal-Instructions-With-Lock-Prefix Pseudo Instructions
@ -1276,23 +1276,6 @@ def : Pat<(X86cmp GR32:$src1, 0),
def : Pat<(X86cmp GR64:$src1, 0),
(TEST64rr GR64:$src1, GR64:$src1)>;
def inv_cond_XFORM : SDNodeXForm<imm, [{
X86::CondCode CC = static_cast<X86::CondCode>(N->getZExtValue());
return CurDAG->getTargetConstant(X86::GetOppositeBranchCondition(CC),
SDLoc(N), MVT::i8);
}]>;
// Conditional moves with folded loads with operands swapped and conditions
// inverted.
let Predicates = [HasCMov] in {
def : Pat<(X86cmov (loadi16 addr:$src1), GR16:$src2, imm:$cond, EFLAGS),
(CMOV16rm GR16:$src2, addr:$src1, (inv_cond_XFORM imm:$cond))>;
def : Pat<(X86cmov (loadi32 addr:$src1), GR32:$src2, imm:$cond, EFLAGS),
(CMOV32rm GR32:$src2, addr:$src1, (inv_cond_XFORM imm:$cond))>;
def : Pat<(X86cmov (loadi64 addr:$src1), GR64:$src2, imm:$cond, EFLAGS),
(CMOV64rm GR64:$src2, addr:$src1, (inv_cond_XFORM imm:$cond))>;
}
// zextload bool -> zextload byte
// i1 stored in one byte in zero-extended form.
// Upper bits cleanup should be executed before Store.

View File

@ -75,7 +75,7 @@ let isBranch = 1, isTerminator = 1, Uses = [EFLAGS], SchedRW = [WriteJump],
def JCC_1 : Ii8PCRel <0x70, AddCCFrm, (outs),
(ins brtarget8:$dst, ccode:$cond),
"j${cond}\t$dst",
[(X86brcond bb:$dst, imm:$cond, EFLAGS)]>;
[(X86brcond bb:$dst, timm:$cond, EFLAGS)]>;
let hasSideEffects = 0 in {
def JCC_2 : Ii16PCRel<0x80, AddCCFrm, (outs),
(ins brtarget16:$dst, ccode:$cond),