forked from OSchip/llvm-project
Rename X86 subregister indices to something shorter.
Use the tablegen-produced enums. llvm-svn: 104493
This commit is contained in:
parent
1c69646e99
commit
9340ea59e1
|
@ -1029,7 +1029,7 @@ bool X86FastISel::X86SelectShift(const Instruction *I) {
|
|||
// we're doing here.
|
||||
if (CReg != X86::CL)
|
||||
BuildMI(MBB, DL, TII.get(TargetOpcode::EXTRACT_SUBREG), X86::CL)
|
||||
.addReg(CReg).addImm(X86::SUBREG_8BIT);
|
||||
.addReg(CReg).addImm(X86::sub_8bit);
|
||||
|
||||
unsigned ResultReg = createResultReg(RC);
|
||||
BuildMI(MBB, DL, TII.get(OpReg), ResultReg).addReg(Op0Reg);
|
||||
|
@ -1137,7 +1137,7 @@ bool X86FastISel::X86SelectTrunc(const Instruction *I) {
|
|||
// Then issue an extract_subreg.
|
||||
unsigned ResultReg = FastEmitInst_extractsubreg(MVT::i8,
|
||||
CopyReg, /*Kill=*/true,
|
||||
X86::SUBREG_8BIT);
|
||||
X86::sub_8bit);
|
||||
if (!ResultReg)
|
||||
return false;
|
||||
|
||||
|
|
|
@ -1693,7 +1693,7 @@ SDNode *X86DAGToDAGISel::Select(SDNode *Node) {
|
|||
Result,
|
||||
CurDAG->getTargetConstant(8, MVT::i8)), 0);
|
||||
// Then truncate it down to i8.
|
||||
Result = CurDAG->getTargetExtractSubreg(X86::SUBREG_8BIT, dl,
|
||||
Result = CurDAG->getTargetExtractSubreg(X86::sub_8bit, dl,
|
||||
MVT::i8, Result);
|
||||
} else {
|
||||
Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl,
|
||||
|
@ -1834,7 +1834,7 @@ SDNode *X86DAGToDAGISel::Select(SDNode *Node) {
|
|||
CurDAG->getTargetConstant(8, MVT::i8)),
|
||||
0);
|
||||
// Then truncate it down to i8.
|
||||
Result = CurDAG->getTargetExtractSubreg(X86::SUBREG_8BIT, dl,
|
||||
Result = CurDAG->getTargetExtractSubreg(X86::sub_8bit, dl,
|
||||
MVT::i8, Result);
|
||||
} else {
|
||||
Result = CurDAG->getCopyFromReg(CurDAG->getEntryNode(), dl,
|
||||
|
@ -1883,7 +1883,7 @@ SDNode *X86DAGToDAGISel::Select(SDNode *Node) {
|
|||
}
|
||||
|
||||
// Extract the l-register.
|
||||
SDValue Subreg = CurDAG->getTargetExtractSubreg(X86::SUBREG_8BIT, dl,
|
||||
SDValue Subreg = CurDAG->getTargetExtractSubreg(X86::sub_8bit, dl,
|
||||
MVT::i8, Reg);
|
||||
|
||||
// Emit a testb.
|
||||
|
@ -1912,7 +1912,7 @@ SDNode *X86DAGToDAGISel::Select(SDNode *Node) {
|
|||
Reg.getValueType(), Reg, RC), 0);
|
||||
|
||||
// Extract the h-register.
|
||||
SDValue Subreg = CurDAG->getTargetExtractSubreg(X86::SUBREG_8BIT_HI, dl,
|
||||
SDValue Subreg = CurDAG->getTargetExtractSubreg(X86::sub_8bit_hi, dl,
|
||||
MVT::i8, Reg);
|
||||
|
||||
// Emit a testb. No special NOREX tricks are needed since there's
|
||||
|
@ -1930,7 +1930,7 @@ SDNode *X86DAGToDAGISel::Select(SDNode *Node) {
|
|||
SDValue Reg = N0.getNode()->getOperand(0);
|
||||
|
||||
// Extract the 16-bit subregister.
|
||||
SDValue Subreg = CurDAG->getTargetExtractSubreg(X86::SUBREG_16BIT, dl,
|
||||
SDValue Subreg = CurDAG->getTargetExtractSubreg(X86::sub_16bit, dl,
|
||||
MVT::i16, Reg);
|
||||
|
||||
// Emit a testw.
|
||||
|
@ -1946,7 +1946,7 @@ SDNode *X86DAGToDAGISel::Select(SDNode *Node) {
|
|||
SDValue Reg = N0.getNode()->getOperand(0);
|
||||
|
||||
// Extract the 32-bit subregister.
|
||||
SDValue Subreg = CurDAG->getTargetExtractSubreg(X86::SUBREG_32BIT, dl,
|
||||
SDValue Subreg = CurDAG->getTargetExtractSubreg(X86::sub_32bit, dl,
|
||||
MVT::i32, Reg);
|
||||
|
||||
// Emit a testl.
|
||||
|
|
|
@ -498,7 +498,7 @@ def def32 : PatLeaf<(i32 GR32:$src), [{
|
|||
// In the case of a 32-bit def that is known to implicitly zero-extend,
|
||||
// we can use a SUBREG_TO_REG.
|
||||
def : Pat<(i64 (zext def32:$src)),
|
||||
(SUBREG_TO_REG (i64 0), GR32:$src, x86_subreg_32bit)>;
|
||||
(SUBREG_TO_REG (i64 0), GR32:$src, sub_32bit)>;
|
||||
|
||||
let neverHasSideEffects = 1 in {
|
||||
let Defs = [RAX], Uses = [EAX] in
|
||||
|
@ -2004,14 +2004,14 @@ def : Pat<(extloadi64i16 addr:$src), (MOVZX64rm16 addr:$src)>;
|
|||
// defined after an extload.
|
||||
def : Pat<(extloadi64i32 addr:$src),
|
||||
(SUBREG_TO_REG (i64 0), (MOV32rm addr:$src),
|
||||
x86_subreg_32bit)>;
|
||||
sub_32bit)>;
|
||||
|
||||
// anyext. Define these to do an explicit zero-extend to
|
||||
// avoid partial-register updates.
|
||||
def : Pat<(i64 (anyext GR8 :$src)), (MOVZX64rr8 GR8 :$src)>;
|
||||
def : Pat<(i64 (anyext GR16:$src)), (MOVZX64rr16 GR16 :$src)>;
|
||||
def : Pat<(i64 (anyext GR32:$src)),
|
||||
(SUBREG_TO_REG (i64 0), GR32:$src, x86_subreg_32bit)>;
|
||||
(SUBREG_TO_REG (i64 0), GR32:$src, sub_32bit)>;
|
||||
|
||||
//===----------------------------------------------------------------------===//
|
||||
// Some peepholes
|
||||
|
@ -2038,54 +2038,54 @@ def : Pat<(and GR64:$src, i64immZExt32:$imm),
|
|||
(SUBREG_TO_REG
|
||||
(i64 0),
|
||||
(AND32ri
|
||||
(EXTRACT_SUBREG GR64:$src, x86_subreg_32bit),
|
||||
(EXTRACT_SUBREG GR64:$src, sub_32bit),
|
||||
(i32 (GetLo32XForm imm:$imm))),
|
||||
x86_subreg_32bit)>;
|
||||
sub_32bit)>;
|
||||
|
||||
// r & (2^32-1) ==> movz
|
||||
def : Pat<(and GR64:$src, 0x00000000FFFFFFFF),
|
||||
(MOVZX64rr32 (EXTRACT_SUBREG GR64:$src, x86_subreg_32bit))>;
|
||||
(MOVZX64rr32 (EXTRACT_SUBREG GR64:$src, sub_32bit))>;
|
||||
// r & (2^16-1) ==> movz
|
||||
def : Pat<(and GR64:$src, 0xffff),
|
||||
(MOVZX64rr16 (i16 (EXTRACT_SUBREG GR64:$src, x86_subreg_16bit)))>;
|
||||
(MOVZX64rr16 (i16 (EXTRACT_SUBREG GR64:$src, sub_16bit)))>;
|
||||
// r & (2^8-1) ==> movz
|
||||
def : Pat<(and GR64:$src, 0xff),
|
||||
(MOVZX64rr8 (i8 (EXTRACT_SUBREG GR64:$src, x86_subreg_8bit)))>;
|
||||
(MOVZX64rr8 (i8 (EXTRACT_SUBREG GR64:$src, sub_8bit)))>;
|
||||
// r & (2^8-1) ==> movz
|
||||
def : Pat<(and GR32:$src1, 0xff),
|
||||
(MOVZX32rr8 (EXTRACT_SUBREG GR32:$src1, x86_subreg_8bit))>,
|
||||
(MOVZX32rr8 (EXTRACT_SUBREG GR32:$src1, sub_8bit))>,
|
||||
Requires<[In64BitMode]>;
|
||||
// r & (2^8-1) ==> movz
|
||||
def : Pat<(and GR16:$src1, 0xff),
|
||||
(MOVZX16rr8 (i8 (EXTRACT_SUBREG GR16:$src1, x86_subreg_8bit)))>,
|
||||
(MOVZX16rr8 (i8 (EXTRACT_SUBREG GR16:$src1, sub_8bit)))>,
|
||||
Requires<[In64BitMode]>;
|
||||
|
||||
// sext_inreg patterns
|
||||
def : Pat<(sext_inreg GR64:$src, i32),
|
||||
(MOVSX64rr32 (EXTRACT_SUBREG GR64:$src, x86_subreg_32bit))>;
|
||||
(MOVSX64rr32 (EXTRACT_SUBREG GR64:$src, sub_32bit))>;
|
||||
def : Pat<(sext_inreg GR64:$src, i16),
|
||||
(MOVSX64rr16 (EXTRACT_SUBREG GR64:$src, x86_subreg_16bit))>;
|
||||
(MOVSX64rr16 (EXTRACT_SUBREG GR64:$src, sub_16bit))>;
|
||||
def : Pat<(sext_inreg GR64:$src, i8),
|
||||
(MOVSX64rr8 (EXTRACT_SUBREG GR64:$src, x86_subreg_8bit))>;
|
||||
(MOVSX64rr8 (EXTRACT_SUBREG GR64:$src, sub_8bit))>;
|
||||
def : Pat<(sext_inreg GR32:$src, i8),
|
||||
(MOVSX32rr8 (EXTRACT_SUBREG GR32:$src, x86_subreg_8bit))>,
|
||||
(MOVSX32rr8 (EXTRACT_SUBREG GR32:$src, sub_8bit))>,
|
||||
Requires<[In64BitMode]>;
|
||||
def : Pat<(sext_inreg GR16:$src, i8),
|
||||
(MOVSX16rr8 (i8 (EXTRACT_SUBREG GR16:$src, x86_subreg_8bit)))>,
|
||||
(MOVSX16rr8 (i8 (EXTRACT_SUBREG GR16:$src, sub_8bit)))>,
|
||||
Requires<[In64BitMode]>;
|
||||
|
||||
// trunc patterns
|
||||
def : Pat<(i32 (trunc GR64:$src)),
|
||||
(EXTRACT_SUBREG GR64:$src, x86_subreg_32bit)>;
|
||||
(EXTRACT_SUBREG GR64:$src, sub_32bit)>;
|
||||
def : Pat<(i16 (trunc GR64:$src)),
|
||||
(EXTRACT_SUBREG GR64:$src, x86_subreg_16bit)>;
|
||||
(EXTRACT_SUBREG GR64:$src, sub_16bit)>;
|
||||
def : Pat<(i8 (trunc GR64:$src)),
|
||||
(EXTRACT_SUBREG GR64:$src, x86_subreg_8bit)>;
|
||||
(EXTRACT_SUBREG GR64:$src, sub_8bit)>;
|
||||
def : Pat<(i8 (trunc GR32:$src)),
|
||||
(EXTRACT_SUBREG GR32:$src, x86_subreg_8bit)>,
|
||||
(EXTRACT_SUBREG GR32:$src, sub_8bit)>,
|
||||
Requires<[In64BitMode]>;
|
||||
def : Pat<(i8 (trunc GR16:$src)),
|
||||
(EXTRACT_SUBREG GR16:$src, x86_subreg_8bit)>,
|
||||
(EXTRACT_SUBREG GR16:$src, sub_8bit)>,
|
||||
Requires<[In64BitMode]>;
|
||||
|
||||
// h-register tricks.
|
||||
|
@ -2101,67 +2101,67 @@ def : Pat<(and (srl_su GR64:$src, (i8 8)), (i64 255)),
|
|||
(i64 0),
|
||||
(MOVZX32_NOREXrr8
|
||||
(EXTRACT_SUBREG (i64 (COPY_TO_REGCLASS GR64:$src, GR64_ABCD)),
|
||||
x86_subreg_8bit_hi)),
|
||||
x86_subreg_32bit)>;
|
||||
sub_8bit_hi)),
|
||||
sub_32bit)>;
|
||||
def : Pat<(and (srl_su GR32:$src, (i8 8)), (i32 255)),
|
||||
(MOVZX32_NOREXrr8
|
||||
(EXTRACT_SUBREG (i32 (COPY_TO_REGCLASS GR32:$src, GR32_ABCD)),
|
||||
x86_subreg_8bit_hi))>,
|
||||
sub_8bit_hi))>,
|
||||
Requires<[In64BitMode]>;
|
||||
def : Pat<(srl (and_su GR32:$src, 0xff00), (i8 8)),
|
||||
(MOVZX32_NOREXrr8 (EXTRACT_SUBREG (i32 (COPY_TO_REGCLASS GR32:$src,
|
||||
GR32_ABCD)),
|
||||
x86_subreg_8bit_hi))>,
|
||||
sub_8bit_hi))>,
|
||||
Requires<[In64BitMode]>;
|
||||
def : Pat<(srl GR16:$src, (i8 8)),
|
||||
(EXTRACT_SUBREG
|
||||
(MOVZX32_NOREXrr8
|
||||
(EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS GR16:$src, GR16_ABCD)),
|
||||
x86_subreg_8bit_hi)),
|
||||
x86_subreg_16bit)>,
|
||||
sub_8bit_hi)),
|
||||
sub_16bit)>,
|
||||
Requires<[In64BitMode]>;
|
||||
def : Pat<(i32 (zext (srl_su GR16:$src, (i8 8)))),
|
||||
(MOVZX32_NOREXrr8
|
||||
(EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS GR16:$src, GR16_ABCD)),
|
||||
x86_subreg_8bit_hi))>,
|
||||
sub_8bit_hi))>,
|
||||
Requires<[In64BitMode]>;
|
||||
def : Pat<(i32 (anyext (srl_su GR16:$src, (i8 8)))),
|
||||
(MOVZX32_NOREXrr8
|
||||
(EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS GR16:$src, GR16_ABCD)),
|
||||
x86_subreg_8bit_hi))>,
|
||||
sub_8bit_hi))>,
|
||||
Requires<[In64BitMode]>;
|
||||
def : Pat<(i64 (zext (srl_su GR16:$src, (i8 8)))),
|
||||
(SUBREG_TO_REG
|
||||
(i64 0),
|
||||
(MOVZX32_NOREXrr8
|
||||
(EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS GR16:$src, GR16_ABCD)),
|
||||
x86_subreg_8bit_hi)),
|
||||
x86_subreg_32bit)>;
|
||||
sub_8bit_hi)),
|
||||
sub_32bit)>;
|
||||
def : Pat<(i64 (anyext (srl_su GR16:$src, (i8 8)))),
|
||||
(SUBREG_TO_REG
|
||||
(i64 0),
|
||||
(MOVZX32_NOREXrr8
|
||||
(EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS GR16:$src, GR16_ABCD)),
|
||||
x86_subreg_8bit_hi)),
|
||||
x86_subreg_32bit)>;
|
||||
sub_8bit_hi)),
|
||||
sub_32bit)>;
|
||||
|
||||
// h-register extract and store.
|
||||
def : Pat<(store (i8 (trunc_su (srl_su GR64:$src, (i8 8)))), addr:$dst),
|
||||
(MOV8mr_NOREX
|
||||
addr:$dst,
|
||||
(EXTRACT_SUBREG (i64 (COPY_TO_REGCLASS GR64:$src, GR64_ABCD)),
|
||||
x86_subreg_8bit_hi))>;
|
||||
sub_8bit_hi))>;
|
||||
def : Pat<(store (i8 (trunc_su (srl_su GR32:$src, (i8 8)))), addr:$dst),
|
||||
(MOV8mr_NOREX
|
||||
addr:$dst,
|
||||
(EXTRACT_SUBREG (i32 (COPY_TO_REGCLASS GR32:$src, GR32_ABCD)),
|
||||
x86_subreg_8bit_hi))>,
|
||||
sub_8bit_hi))>,
|
||||
Requires<[In64BitMode]>;
|
||||
def : Pat<(store (i8 (trunc_su (srl_su GR16:$src, (i8 8)))), addr:$dst),
|
||||
(MOV8mr_NOREX
|
||||
addr:$dst,
|
||||
(EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS GR16:$src, GR16_ABCD)),
|
||||
x86_subreg_8bit_hi))>,
|
||||
sub_8bit_hi))>,
|
||||
Requires<[In64BitMode]>;
|
||||
|
||||
// (shl x, 1) ==> (add x, x)
|
||||
|
|
|
@ -1154,7 +1154,7 @@ X86InstrInfo::convertToThreeAddressWithLEA(unsigned MIOpc,
|
|||
BuildMI(*MFI, MBBI, MI->getDebugLoc(), get(X86::INSERT_SUBREG),leaInReg)
|
||||
.addReg(leaInReg)
|
||||
.addReg(Src, getKillRegState(isKill))
|
||||
.addImm(X86::SUBREG_16BIT);
|
||||
.addImm(X86::sub_16bit);
|
||||
|
||||
MachineInstrBuilder MIB = BuildMI(*MFI, MBBI, MI->getDebugLoc(),
|
||||
get(Opc), leaOutReg);
|
||||
|
@ -1198,7 +1198,7 @@ X86InstrInfo::convertToThreeAddressWithLEA(unsigned MIOpc,
|
|||
BuildMI(*MFI, MIB, MI->getDebugLoc(), get(X86::INSERT_SUBREG),leaInReg2)
|
||||
.addReg(leaInReg2)
|
||||
.addReg(Src2, getKillRegState(isKill2))
|
||||
.addImm(X86::SUBREG_16BIT);
|
||||
.addImm(X86::sub_16bit);
|
||||
addRegReg(MIB, leaInReg, true, leaInReg2, true);
|
||||
}
|
||||
if (LV && isKill2 && InsMI2)
|
||||
|
@ -1212,7 +1212,7 @@ X86InstrInfo::convertToThreeAddressWithLEA(unsigned MIOpc,
|
|||
BuildMI(*MFI, MBBI, MI->getDebugLoc(), get(X86::EXTRACT_SUBREG))
|
||||
.addReg(Dest, RegState::Define | getDeadRegState(isDead))
|
||||
.addReg(leaOutReg, RegState::Kill)
|
||||
.addImm(X86::SUBREG_16BIT);
|
||||
.addImm(X86::sub_16bit);
|
||||
|
||||
if (LV) {
|
||||
// Update live variables
|
||||
|
@ -2483,9 +2483,9 @@ X86InstrInfo::foldMemoryOperandImpl(MachineFunction &MF,
|
|||
unsigned DstReg = NewMI->getOperand(0).getReg();
|
||||
if (TargetRegisterInfo::isPhysicalRegister(DstReg))
|
||||
NewMI->getOperand(0).setReg(RI.getSubReg(DstReg,
|
||||
X86::x86_subreg_32bit));
|
||||
X86::sub_32bit));
|
||||
else
|
||||
NewMI->getOperand(0).setSubReg(X86::x86_subreg_32bit);
|
||||
NewMI->getOperand(0).setSubReg(X86::sub_32bit);
|
||||
}
|
||||
return NewMI;
|
||||
}
|
||||
|
|
|
@ -4503,7 +4503,7 @@ def : Pat<(i32 (anyext GR8 :$src)), (MOVZX32rr8 GR8 :$src)>;
|
|||
|
||||
// Except for i16 -> i32 since isel expect i16 ops to be promoted to i32.
|
||||
def : Pat<(i32 (anyext GR16:$src)),
|
||||
(INSERT_SUBREG (i32 (IMPLICIT_DEF)), GR16:$src, x86_subreg_16bit)>;
|
||||
(INSERT_SUBREG (i32 (IMPLICIT_DEF)), GR16:$src, sub_16bit)>;
|
||||
|
||||
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
@ -4523,81 +4523,81 @@ def : Pat<(store (add (loadi32 addr:$dst), 128), addr:$dst),
|
|||
|
||||
// r & (2^16-1) ==> movz
|
||||
def : Pat<(and GR32:$src1, 0xffff),
|
||||
(MOVZX32rr16 (EXTRACT_SUBREG GR32:$src1, x86_subreg_16bit))>;
|
||||
(MOVZX32rr16 (EXTRACT_SUBREG GR32:$src1, sub_16bit))>;
|
||||
// r & (2^8-1) ==> movz
|
||||
def : Pat<(and GR32:$src1, 0xff),
|
||||
(MOVZX32rr8 (EXTRACT_SUBREG (i32 (COPY_TO_REGCLASS GR32:$src1,
|
||||
GR32_ABCD)),
|
||||
x86_subreg_8bit))>,
|
||||
sub_8bit))>,
|
||||
Requires<[In32BitMode]>;
|
||||
// r & (2^8-1) ==> movz
|
||||
def : Pat<(and GR16:$src1, 0xff),
|
||||
(MOVZX16rr8 (EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS GR16:$src1,
|
||||
GR16_ABCD)),
|
||||
x86_subreg_8bit))>,
|
||||
sub_8bit))>,
|
||||
Requires<[In32BitMode]>;
|
||||
|
||||
// sext_inreg patterns
|
||||
def : Pat<(sext_inreg GR32:$src, i16),
|
||||
(MOVSX32rr16 (EXTRACT_SUBREG GR32:$src, x86_subreg_16bit))>;
|
||||
(MOVSX32rr16 (EXTRACT_SUBREG GR32:$src, sub_16bit))>;
|
||||
def : Pat<(sext_inreg GR32:$src, i8),
|
||||
(MOVSX32rr8 (EXTRACT_SUBREG (i32 (COPY_TO_REGCLASS GR32:$src,
|
||||
GR32_ABCD)),
|
||||
x86_subreg_8bit))>,
|
||||
sub_8bit))>,
|
||||
Requires<[In32BitMode]>;
|
||||
def : Pat<(sext_inreg GR16:$src, i8),
|
||||
(MOVSX16rr8 (EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS GR16:$src,
|
||||
GR16_ABCD)),
|
||||
x86_subreg_8bit))>,
|
||||
sub_8bit))>,
|
||||
Requires<[In32BitMode]>;
|
||||
|
||||
// trunc patterns
|
||||
def : Pat<(i16 (trunc GR32:$src)),
|
||||
(EXTRACT_SUBREG GR32:$src, x86_subreg_16bit)>;
|
||||
(EXTRACT_SUBREG GR32:$src, sub_16bit)>;
|
||||
def : Pat<(i8 (trunc GR32:$src)),
|
||||
(EXTRACT_SUBREG (i32 (COPY_TO_REGCLASS GR32:$src, GR32_ABCD)),
|
||||
x86_subreg_8bit)>,
|
||||
sub_8bit)>,
|
||||
Requires<[In32BitMode]>;
|
||||
def : Pat<(i8 (trunc GR16:$src)),
|
||||
(EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS GR16:$src, GR16_ABCD)),
|
||||
x86_subreg_8bit)>,
|
||||
sub_8bit)>,
|
||||
Requires<[In32BitMode]>;
|
||||
|
||||
// h-register tricks
|
||||
def : Pat<(i8 (trunc (srl_su GR16:$src, (i8 8)))),
|
||||
(EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS GR16:$src, GR16_ABCD)),
|
||||
x86_subreg_8bit_hi)>,
|
||||
sub_8bit_hi)>,
|
||||
Requires<[In32BitMode]>;
|
||||
def : Pat<(i8 (trunc (srl_su GR32:$src, (i8 8)))),
|
||||
(EXTRACT_SUBREG (i32 (COPY_TO_REGCLASS GR32:$src, GR32_ABCD)),
|
||||
x86_subreg_8bit_hi)>,
|
||||
sub_8bit_hi)>,
|
||||
Requires<[In32BitMode]>;
|
||||
def : Pat<(srl GR16:$src, (i8 8)),
|
||||
(EXTRACT_SUBREG
|
||||
(MOVZX32rr8
|
||||
(EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS GR16:$src, GR16_ABCD)),
|
||||
x86_subreg_8bit_hi)),
|
||||
x86_subreg_16bit)>,
|
||||
sub_8bit_hi)),
|
||||
sub_16bit)>,
|
||||
Requires<[In32BitMode]>;
|
||||
def : Pat<(i32 (zext (srl_su GR16:$src, (i8 8)))),
|
||||
(MOVZX32rr8 (EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS GR16:$src,
|
||||
GR16_ABCD)),
|
||||
x86_subreg_8bit_hi))>,
|
||||
sub_8bit_hi))>,
|
||||
Requires<[In32BitMode]>;
|
||||
def : Pat<(i32 (anyext (srl_su GR16:$src, (i8 8)))),
|
||||
(MOVZX32rr8 (EXTRACT_SUBREG (i16 (COPY_TO_REGCLASS GR16:$src,
|
||||
GR16_ABCD)),
|
||||
x86_subreg_8bit_hi))>,
|
||||
sub_8bit_hi))>,
|
||||
Requires<[In32BitMode]>;
|
||||
def : Pat<(and (srl_su GR32:$src, (i8 8)), (i32 255)),
|
||||
(MOVZX32rr8 (EXTRACT_SUBREG (i32 (COPY_TO_REGCLASS GR32:$src,
|
||||
GR32_ABCD)),
|
||||
x86_subreg_8bit_hi))>,
|
||||
sub_8bit_hi))>,
|
||||
Requires<[In32BitMode]>;
|
||||
def : Pat<(srl (and_su GR32:$src, 0xff00), (i8 8)),
|
||||
(MOVZX32rr8 (EXTRACT_SUBREG (i32 (COPY_TO_REGCLASS GR32:$src,
|
||||
GR32_ABCD)),
|
||||
x86_subreg_8bit_hi))>,
|
||||
sub_8bit_hi))>,
|
||||
Requires<[In32BitMode]>;
|
||||
|
||||
// (shl x, 1) ==> (add x, x)
|
||||
|
|
|
@ -387,11 +387,11 @@ def MOVSSrr : SSI<0x10, MRMSrcReg,
|
|||
let AddedComplexity = 15 in
|
||||
def : Pat<(v4f32 (movl VR128:$src1, VR128:$src2)),
|
||||
(MOVSSrr (v4f32 VR128:$src1),
|
||||
(EXTRACT_SUBREG (v4f32 VR128:$src2), x86_subreg_ss))>;
|
||||
(EXTRACT_SUBREG (v4f32 VR128:$src2), sub_ss))>;
|
||||
|
||||
// Implicitly promote a 32-bit scalar to a vector.
|
||||
def : Pat<(v4f32 (scalar_to_vector FR32:$src)),
|
||||
(INSERT_SUBREG (v4f32 (IMPLICIT_DEF)), FR32:$src, x86_subreg_ss)>;
|
||||
(INSERT_SUBREG (v4f32 (IMPLICIT_DEF)), FR32:$src, sub_ss)>;
|
||||
|
||||
// Loading from memory automatically zeroing upper bits.
|
||||
let canFoldAsLoad = 1, isReMaterializable = 1 in
|
||||
|
@ -403,11 +403,11 @@ def MOVSSrm : SSI<0x10, MRMSrcMem, (outs FR32:$dst), (ins f32mem:$src),
|
|||
// with SUBREG_TO_REG.
|
||||
let AddedComplexity = 20 in {
|
||||
def : Pat<(v4f32 (X86vzmovl (v4f32 (scalar_to_vector (loadf32 addr:$src))))),
|
||||
(SUBREG_TO_REG (i32 0), (MOVSSrm addr:$src), x86_subreg_ss)>;
|
||||
(SUBREG_TO_REG (i32 0), (MOVSSrm addr:$src), sub_ss)>;
|
||||
def : Pat<(v4f32 (scalar_to_vector (loadf32 addr:$src))),
|
||||
(SUBREG_TO_REG (i32 0), (MOVSSrm addr:$src), x86_subreg_ss)>;
|
||||
(SUBREG_TO_REG (i32 0), (MOVSSrm addr:$src), sub_ss)>;
|
||||
def : Pat<(v4f32 (X86vzmovl (loadv4f32 addr:$src))),
|
||||
(SUBREG_TO_REG (i32 0), (MOVSSrm addr:$src), x86_subreg_ss)>;
|
||||
(SUBREG_TO_REG (i32 0), (MOVSSrm addr:$src), sub_ss)>;
|
||||
}
|
||||
|
||||
// Store scalar value to memory.
|
||||
|
@ -419,7 +419,7 @@ def MOVSSmr : SSI<0x11, MRMDestMem, (outs), (ins f32mem:$dst, FR32:$src),
|
|||
def : Pat<(store (f32 (vector_extract (v4f32 VR128:$src), (iPTR 0))),
|
||||
addr:$dst),
|
||||
(MOVSSmr addr:$dst,
|
||||
(EXTRACT_SUBREG (v4f32 VR128:$src), x86_subreg_ss))>;
|
||||
(EXTRACT_SUBREG (v4f32 VR128:$src), sub_ss))>;
|
||||
|
||||
// Conversion instructions
|
||||
def CVTTSS2SIrr : SSI<0x2C, MRMSrcReg, (outs GR32:$dst), (ins FR32:$src),
|
||||
|
@ -1131,7 +1131,7 @@ def : Pat<(v8i16 immAllZerosV), (V_SET0PI)>;
|
|||
def : Pat<(v16i8 immAllZerosV), (V_SET0PI)>;
|
||||
|
||||
def : Pat<(f32 (vector_extract (v4f32 VR128:$src), (iPTR 0))),
|
||||
(f32 (EXTRACT_SUBREG (v4f32 VR128:$src), x86_subreg_ss))>;
|
||||
(f32 (EXTRACT_SUBREG (v4f32 VR128:$src), sub_ss))>;
|
||||
|
||||
//===---------------------------------------------------------------------===//
|
||||
// SSE2 Instructions
|
||||
|
@ -1153,11 +1153,11 @@ def MOVSDrr : SDI<0x10, MRMSrcReg,
|
|||
let AddedComplexity = 15 in
|
||||
def : Pat<(v2f64 (movl VR128:$src1, VR128:$src2)),
|
||||
(MOVSDrr (v2f64 VR128:$src1),
|
||||
(EXTRACT_SUBREG (v2f64 VR128:$src2), x86_subreg_sd))>;
|
||||
(EXTRACT_SUBREG (v2f64 VR128:$src2), sub_sd))>;
|
||||
|
||||
// Implicitly promote a 64-bit scalar to a vector.
|
||||
def : Pat<(v2f64 (scalar_to_vector FR64:$src)),
|
||||
(INSERT_SUBREG (v2f64 (IMPLICIT_DEF)), FR64:$src, x86_subreg_sd)>;
|
||||
(INSERT_SUBREG (v2f64 (IMPLICIT_DEF)), FR64:$src, sub_sd)>;
|
||||
|
||||
// Loading from memory automatically zeroing upper bits.
|
||||
let canFoldAsLoad = 1, isReMaterializable = 1, AddedComplexity = 20 in
|
||||
|
@ -1169,15 +1169,15 @@ def MOVSDrm : SDI<0x10, MRMSrcMem, (outs FR64:$dst), (ins f64mem:$src),
|
|||
// with SUBREG_TO_REG.
|
||||
let AddedComplexity = 20 in {
|
||||
def : Pat<(v2f64 (X86vzmovl (v2f64 (scalar_to_vector (loadf64 addr:$src))))),
|
||||
(SUBREG_TO_REG (i64 0), (MOVSDrm addr:$src), x86_subreg_sd)>;
|
||||
(SUBREG_TO_REG (i64 0), (MOVSDrm addr:$src), sub_sd)>;
|
||||
def : Pat<(v2f64 (scalar_to_vector (loadf64 addr:$src))),
|
||||
(SUBREG_TO_REG (i64 0), (MOVSDrm addr:$src), x86_subreg_sd)>;
|
||||
(SUBREG_TO_REG (i64 0), (MOVSDrm addr:$src), sub_sd)>;
|
||||
def : Pat<(v2f64 (X86vzmovl (loadv2f64 addr:$src))),
|
||||
(SUBREG_TO_REG (i64 0), (MOVSDrm addr:$src), x86_subreg_sd)>;
|
||||
(SUBREG_TO_REG (i64 0), (MOVSDrm addr:$src), sub_sd)>;
|
||||
def : Pat<(v2f64 (X86vzmovl (bc_v2f64 (loadv4f32 addr:$src)))),
|
||||
(SUBREG_TO_REG (i64 0), (MOVSDrm addr:$src), x86_subreg_sd)>;
|
||||
(SUBREG_TO_REG (i64 0), (MOVSDrm addr:$src), sub_sd)>;
|
||||
def : Pat<(v2f64 (X86vzload addr:$src)),
|
||||
(SUBREG_TO_REG (i64 0), (MOVSDrm addr:$src), x86_subreg_sd)>;
|
||||
(SUBREG_TO_REG (i64 0), (MOVSDrm addr:$src), sub_sd)>;
|
||||
}
|
||||
|
||||
// Store scalar value to memory.
|
||||
|
@ -1189,7 +1189,7 @@ def MOVSDmr : SDI<0x11, MRMDestMem, (outs), (ins f64mem:$dst, FR64:$src),
|
|||
def : Pat<(store (f64 (vector_extract (v2f64 VR128:$src), (iPTR 0))),
|
||||
addr:$dst),
|
||||
(MOVSDmr addr:$dst,
|
||||
(EXTRACT_SUBREG (v2f64 VR128:$src), x86_subreg_sd))>;
|
||||
(EXTRACT_SUBREG (v2f64 VR128:$src), sub_sd))>;
|
||||
|
||||
// Conversion instructions
|
||||
def CVTTSD2SIrr : SDI<0x2C, MRMSrcReg, (outs GR32:$dst), (ins FR64:$src),
|
||||
|
@ -2467,7 +2467,7 @@ def MOVPQI2QImr : PDI<0xD6, MRMDestMem, (outs), (ins i64mem:$dst, VR128:$src),
|
|||
(iPTR 0))), addr:$dst)]>;
|
||||
|
||||
def : Pat<(f64 (vector_extract (v2f64 VR128:$src), (iPTR 0))),
|
||||
(f64 (EXTRACT_SUBREG (v2f64 VR128:$src), x86_subreg_sd))>;
|
||||
(f64 (EXTRACT_SUBREG (v2f64 VR128:$src), sub_sd))>;
|
||||
|
||||
def MOVPDI2DIrr : PDI<0x7E, MRMDestReg, (outs GR32:$dst), (ins VR128:$src),
|
||||
"movd\t{$src, $dst|$dst, $src}",
|
||||
|
@ -3047,10 +3047,10 @@ def : Pat<(v4f32 (X86vzmovl (v4f32 (scalar_to_vector FR32:$src)))),
|
|||
(MOVSSrr (v4f32 (V_SET0PS)), FR32:$src)>;
|
||||
def : Pat<(v4f32 (X86vzmovl (v4f32 VR128:$src))),
|
||||
(MOVSSrr (v4f32 (V_SET0PS)),
|
||||
(f32 (EXTRACT_SUBREG (v4f32 VR128:$src), x86_subreg_ss)))>;
|
||||
(f32 (EXTRACT_SUBREG (v4f32 VR128:$src), sub_ss)))>;
|
||||
def : Pat<(v4i32 (X86vzmovl (v4i32 VR128:$src))),
|
||||
(MOVSSrr (v4i32 (V_SET0PI)),
|
||||
(EXTRACT_SUBREG (v4i32 VR128:$src), x86_subreg_ss))>;
|
||||
(EXTRACT_SUBREG (v4i32 VR128:$src), sub_ss))>;
|
||||
}
|
||||
|
||||
// Splat v2f64 / v2i64
|
||||
|
@ -3186,17 +3186,17 @@ let AddedComplexity = 15 in {
|
|||
// Setting the lowest element in the vector.
|
||||
def : Pat<(v4i32 (movl VR128:$src1, VR128:$src2)),
|
||||
(MOVSSrr (v4i32 VR128:$src1),
|
||||
(EXTRACT_SUBREG (v4i32 VR128:$src2), x86_subreg_ss))>;
|
||||
(EXTRACT_SUBREG (v4i32 VR128:$src2), sub_ss))>;
|
||||
def : Pat<(v2i64 (movl VR128:$src1, VR128:$src2)),
|
||||
(MOVSDrr (v2i64 VR128:$src1),
|
||||
(EXTRACT_SUBREG (v2i64 VR128:$src2), x86_subreg_sd))>;
|
||||
(EXTRACT_SUBREG (v2i64 VR128:$src2), sub_sd))>;
|
||||
|
||||
// vector_shuffle v1, v2 <4, 5, 2, 3> using movsd
|
||||
def : Pat<(v4f32 (movlp VR128:$src1, VR128:$src2)),
|
||||
(MOVSDrr VR128:$src1, (EXTRACT_SUBREG VR128:$src2, x86_subreg_sd))>,
|
||||
(MOVSDrr VR128:$src1, (EXTRACT_SUBREG VR128:$src2, sub_sd))>,
|
||||
Requires<[HasSSE2]>;
|
||||
def : Pat<(v4i32 (movlp VR128:$src1, VR128:$src2)),
|
||||
(MOVSDrr VR128:$src1, (EXTRACT_SUBREG VR128:$src2, x86_subreg_sd))>,
|
||||
(MOVSDrr VR128:$src1, (EXTRACT_SUBREG VR128:$src2, sub_sd))>,
|
||||
Requires<[HasSSE2]>;
|
||||
}
|
||||
|
||||
|
|
|
@ -30,16 +30,6 @@ namespace N86 {
|
|||
};
|
||||
}
|
||||
|
||||
namespace X86 {
|
||||
/// SubregIndex - The index of various sized subregister classes. Note that
|
||||
/// these indices must be kept in sync with the class indices in the
|
||||
/// X86RegisterInfo.td file.
|
||||
enum SubregIndex {
|
||||
SUBREG_8BIT = 1, SUBREG_8BIT_HI = 2, SUBREG_16BIT = 3, SUBREG_32BIT = 4,
|
||||
SUBREG_SS = 1, SUBREG_SD = 2, SUBREG_XMM = 3
|
||||
};
|
||||
}
|
||||
|
||||
/// DWARFFlavour - Flavour of dwarf regnumbers
|
||||
///
|
||||
namespace DWARFFlavour {
|
||||
|
|
|
@ -19,14 +19,14 @@
|
|||
let Namespace = "X86" in {
|
||||
|
||||
// Subregister indices.
|
||||
def x86_subreg_8bit : SubRegIndex { let NumberHack = 1; }
|
||||
def x86_subreg_8bit_hi : SubRegIndex { let NumberHack = 2; }
|
||||
def x86_subreg_16bit : SubRegIndex { let NumberHack = 3; }
|
||||
def x86_subreg_32bit : SubRegIndex { let NumberHack = 4; }
|
||||
def sub_8bit : SubRegIndex { let NumberHack = 1; }
|
||||
def sub_8bit_hi : SubRegIndex { let NumberHack = 2; }
|
||||
def sub_16bit : SubRegIndex { let NumberHack = 3; }
|
||||
def sub_32bit : SubRegIndex { let NumberHack = 4; }
|
||||
|
||||
def x86_subreg_ss : SubRegIndex { let NumberHack = 1; }
|
||||
def x86_subreg_sd : SubRegIndex { let NumberHack = 2; }
|
||||
def x86_subreg_xmm : SubRegIndex { let NumberHack = 3; }
|
||||
def sub_ss : SubRegIndex { let NumberHack = 1; }
|
||||
def sub_sd : SubRegIndex { let NumberHack = 2; }
|
||||
def sub_xmm : SubRegIndex { let NumberHack = 3; }
|
||||
|
||||
|
||||
// In the register alias definitions below, we define which registers alias
|
||||
|
|
Loading…
Reference in New Issue