[X86] Remove MOVDI2SSrm/MOV64toSDrm/MOVSS2DImr/MOVSDto64mr CodeGenOnly instructions.

The isel patterns for these use a bitcast and load/store, but
DAG combine should have canonicalized those away.

For the purposes of the memory folding table these opcodes can be
replaced by the MOVSSrm_alt/MOVSDrm_alt and MOVSSmr/MOVSDmr opcodes.

llvm-svn: 363644
This commit is contained in:
Craig Topper 2019-06-18 03:23:15 +00:00
parent 8582ecd8d9
commit 587427716c
4 changed files with 12 additions and 76 deletions

View File

@ -3771,19 +3771,10 @@ def VMOV64toSDZrr : AVX512BI<0x6E, MRMSrcReg, (outs FR64X:$dst), (ins GR64:$src)
"vmovq\t{$src, $dst|$dst, $src}",
[(set FR64X:$dst, (bitconvert GR64:$src))]>,
EVEX, VEX_W, Sched<[WriteVecMoveFromGpr]>;
def VMOV64toSDZrm : AVX512XSI<0x7E, MRMSrcMem, (outs FR64X:$dst), (ins i64mem:$src),
"vmovq\t{$src, $dst|$dst, $src}",
[(set FR64X:$dst, (bitconvert (loadi64 addr:$src)))]>,
EVEX, VEX_W, EVEX_CD8<8, CD8VT8>, Sched<[WriteVecLoad]>;
def VMOVSDto64Zrr : AVX512BI<0x7E, MRMDestReg, (outs GR64:$dst), (ins FR64X:$src),
"vmovq\t{$src, $dst|$dst, $src}",
[(set GR64:$dst, (bitconvert FR64X:$src))]>,
EVEX, VEX_W, Sched<[WriteVecMoveFromGpr]>;
def VMOVSDto64Zmr : AVX512BI<0x7E, MRMDestMem, (outs), (ins i64mem:$dst, FR64X:$src),
"vmovq\t{$src, $dst|$dst, $src}",
[(store (i64 (bitconvert FR64X:$src)), addr:$dst)]>,
EVEX, VEX_W, Sched<[WriteVecStore]>,
EVEX_CD8<64, CD8VT1>;
}
} // ExeDomain = SSEPackedInt
@ -3794,11 +3785,6 @@ def VMOVDI2SSZrr : AVX512BI<0x6E, MRMSrcReg, (outs FR32X:$dst), (ins GR32:$src)
"vmovd\t{$src, $dst|$dst, $src}",
[(set FR32X:$dst, (bitconvert GR32:$src))]>,
EVEX, Sched<[WriteVecMoveFromGpr]>;
def VMOVDI2SSZrm : AVX512BI<0x6E, MRMSrcMem, (outs FR32X:$dst), (ins i32mem:$src),
"vmovd\t{$src, $dst|$dst, $src}",
[(set FR32X:$dst, (bitconvert (loadi32 addr:$src)))]>,
EVEX, EVEX_CD8<32, CD8VT1>, Sched<[WriteVecLoad]>;
} // ExeDomain = SSEPackedInt, isCodeGenOnly = 1
// Move doubleword from xmm register to r/m32
@ -3864,11 +3850,6 @@ def VMOVSS2DIZrr : AVX512BI<0x7E, MRMDestReg, (outs GR32:$dst),
"vmovd\t{$src, $dst|$dst, $src}",
[(set GR32:$dst, (bitconvert FR32X:$src))]>,
EVEX, Sched<[WriteVecMoveToGpr]>;
def VMOVSS2DIZmr : AVX512BI<0x7E, MRMDestMem, (outs),
(ins i32mem:$dst, FR32X:$src),
"vmovd\t{$src, $dst|$dst, $src}",
[(store (i32 (bitconvert FR32X:$src)), addr:$dst)]>,
EVEX, EVEX_CD8<32, CD8VT1>, Sched<[WriteVecStore]>;
} // ExeDomain = SSEPackedInt, isCodeGenOnly = 1
// Move Quadword Int to Packed Quadword Int

View File

@ -307,8 +307,8 @@ static const X86MemoryFoldTableEntry MemoryFoldTable0[] = {
{ X86::MOVDQUrr, X86::MOVDQUmr, TB_FOLDED_STORE },
{ X86::MOVPDI2DIrr, X86::MOVPDI2DImr, TB_FOLDED_STORE },
{ X86::MOVPQIto64rr, X86::MOVPQI2QImr, TB_FOLDED_STORE | TB_NO_REVERSE },
{ X86::MOVSDto64rr, X86::MOVSDto64mr, TB_FOLDED_STORE | TB_NO_REVERSE },
{ X86::MOVSS2DIrr, X86::MOVSS2DImr, TB_FOLDED_STORE },
{ X86::MOVSDto64rr, X86::MOVSDmr, TB_FOLDED_STORE | TB_NO_REVERSE },
{ X86::MOVSS2DIrr, X86::MOVSSmr, TB_FOLDED_STORE },
{ X86::MOVUPDrr, X86::MOVUPDmr, TB_FOLDED_STORE },
{ X86::MOVUPSrr, X86::MOVUPSmr, TB_FOLDED_STORE },
{ X86::MUL16r, X86::MUL16m, TB_FOLDED_LOAD },
@ -391,10 +391,10 @@ static const X86MemoryFoldTableEntry MemoryFoldTable0[] = {
{ X86::VMOVPDI2DIrr, X86::VMOVPDI2DImr, TB_FOLDED_STORE },
{ X86::VMOVPQIto64Zrr, X86::VMOVPQI2QIZmr, TB_FOLDED_STORE | TB_NO_REVERSE },
{ X86::VMOVPQIto64rr, X86::VMOVPQI2QImr, TB_FOLDED_STORE | TB_NO_REVERSE },
{ X86::VMOVSDto64Zrr, X86::VMOVSDto64Zmr, TB_FOLDED_STORE | TB_NO_REVERSE },
{ X86::VMOVSDto64rr, X86::VMOVSDto64mr, TB_FOLDED_STORE | TB_NO_REVERSE },
{ X86::VMOVSS2DIZrr, X86::VMOVSS2DIZmr, TB_FOLDED_STORE },
{ X86::VMOVSS2DIrr, X86::VMOVSS2DImr, TB_FOLDED_STORE },
{ X86::VMOVSDto64Zrr, X86::VMOVSDZmr, TB_FOLDED_STORE | TB_NO_REVERSE },
{ X86::VMOVSDto64rr, X86::VMOVSDmr, TB_FOLDED_STORE | TB_NO_REVERSE },
{ X86::VMOVSS2DIZrr, X86::VMOVSSZmr, TB_FOLDED_STORE },
{ X86::VMOVSS2DIrr, X86::VMOVSSmr, TB_FOLDED_STORE },
{ X86::VMOVUPDYrr, X86::VMOVUPDYmr, TB_FOLDED_STORE },
{ X86::VMOVUPDZ128rr, X86::VMOVUPDZ128mr, TB_FOLDED_STORE },
{ X86::VMOVUPDZ256rr, X86::VMOVUPDZ256mr, TB_FOLDED_STORE },
@ -531,13 +531,13 @@ static const X86MemoryFoldTableEntry MemoryFoldTable1[] = {
{ X86::MOV32rr, X86::MOV32rm, 0 },
{ X86::MOV64rr, X86::MOV64rm, 0 },
{ X86::MOV64toPQIrr, X86::MOVQI2PQIrm, TB_NO_REVERSE },
{ X86::MOV64toSDrr, X86::MOV64toSDrm, TB_NO_REVERSE },
{ X86::MOV64toSDrr, X86::MOVSDrm_alt, TB_NO_REVERSE },
{ X86::MOV8rr, X86::MOV8rm, 0 },
{ X86::MOVAPDrr, X86::MOVAPDrm, TB_ALIGN_16 },
{ X86::MOVAPSrr, X86::MOVAPSrm, TB_ALIGN_16 },
{ X86::MOVDDUPrr, X86::MOVDDUPrm, TB_NO_REVERSE },
{ X86::MOVDI2PDIrr, X86::MOVDI2PDIrm, 0 },
{ X86::MOVDI2SSrr, X86::MOVDI2SSrm, 0 },
{ X86::MOVDI2SSrr, X86::MOVSSrm_alt, 0 },
{ X86::MOVDQArr, X86::MOVDQArm, TB_ALIGN_16 },
{ X86::MOVDQUrr, X86::MOVDQUrm, 0 },
{ X86::MOVSHDUPrr, X86::MOVSHDUPrm, TB_ALIGN_16 },
@ -820,8 +820,8 @@ static const X86MemoryFoldTableEntry MemoryFoldTable1[] = {
{ X86::VGETMANTPSZrri, X86::VGETMANTPSZrmi, 0 },
{ X86::VMOV64toPQIZrr, X86::VMOVQI2PQIZrm, TB_NO_REVERSE },
{ X86::VMOV64toPQIrr, X86::VMOVQI2PQIrm, TB_NO_REVERSE },
{ X86::VMOV64toSDZrr, X86::VMOV64toSDZrm, TB_NO_REVERSE },
{ X86::VMOV64toSDrr, X86::VMOV64toSDrm, TB_NO_REVERSE },
{ X86::VMOV64toSDZrr, X86::VMOVSDZrm_alt, TB_NO_REVERSE },
{ X86::VMOV64toSDrr, X86::VMOVSDrm_alt, TB_NO_REVERSE },
{ X86::VMOVAPDYrr, X86::VMOVAPDYrm, TB_ALIGN_32 },
{ X86::VMOVAPDZ128rr, X86::VMOVAPDZ128rm, TB_ALIGN_16 },
{ X86::VMOVAPDZ256rr, X86::VMOVAPDZ256rm, TB_ALIGN_32 },
@ -839,8 +839,8 @@ static const X86MemoryFoldTableEntry MemoryFoldTable1[] = {
{ X86::VMOVDDUPrr, X86::VMOVDDUPrm, TB_NO_REVERSE },
{ X86::VMOVDI2PDIZrr, X86::VMOVDI2PDIZrm, 0 },
{ X86::VMOVDI2PDIrr, X86::VMOVDI2PDIrm, 0 },
{ X86::VMOVDI2SSZrr, X86::VMOVDI2SSZrm, 0 },
{ X86::VMOVDI2SSrr, X86::VMOVDI2SSrm, 0 },
{ X86::VMOVDI2SSZrr, X86::VMOVSSZrm_alt, 0 },
{ X86::VMOVDI2SSrr, X86::VMOVSSrm_alt, 0 },
{ X86::VMOVDQA32Z128rr, X86::VMOVDQA32Z128rm, TB_ALIGN_16 },
{ X86::VMOVDQA32Z256rr, X86::VMOVDQA32Z256rm, TB_ALIGN_32 },
{ X86::VMOVDQA32Zrr, X86::VMOVDQA32Zrm, TB_ALIGN_64 },

View File

@ -4054,19 +4054,11 @@ let ExeDomain = SSEPackedInt, isCodeGenOnly = 1 in {
[(set FR32:$dst, (bitconvert GR32:$src))]>,
VEX, Sched<[WriteVecMoveFromGpr]>;
def VMOVDI2SSrm : VS2I<0x6E, MRMSrcMem, (outs FR32:$dst), (ins i32mem:$src),
"movd\t{$src, $dst|$dst, $src}",
[(set FR32:$dst, (bitconvert (loadi32 addr:$src)))]>,
VEX, Sched<[WriteVecLoad]>;
def MOVDI2SSrr : S2I<0x6E, MRMSrcReg, (outs FR32:$dst), (ins GR32:$src),
"movd\t{$src, $dst|$dst, $src}",
[(set FR32:$dst, (bitconvert GR32:$src))]>,
Sched<[WriteVecMoveFromGpr]>;
def MOVDI2SSrm : S2I<0x6E, MRMSrcMem, (outs FR32:$dst), (ins i32mem:$src),
"movd\t{$src, $dst|$dst, $src}",
[(set FR32:$dst, (bitconvert (loadi32 addr:$src)))]>,
Sched<[WriteVecLoad]>;
} // ExeDomain = SSEPackedInt, isCodeGenOnly = 1
//===---------------------------------------------------------------------===//
@ -4128,32 +4120,15 @@ def MOVPQIto64mr : RS2I<0x7E, MRMDestMem, (outs), (ins i64mem:$dst, VR128:$src),
// Bitcast FR64 <-> GR64
//
let ExeDomain = SSEPackedInt, isCodeGenOnly = 1 in {
let Predicates = [UseAVX] in
def VMOV64toSDrm : VS2SI<0x7E, MRMSrcMem, (outs FR64:$dst), (ins i64mem:$src),
"movq\t{$src, $dst|$dst, $src}",
[(set FR64:$dst, (bitconvert (loadi64 addr:$src)))]>,
VEX, Sched<[WriteVecLoad]>;
def VMOVSDto64rr : VRS2I<0x7E, MRMDestReg, (outs GR64:$dst), (ins FR64:$src),
"movq\t{$src, $dst|$dst, $src}",
[(set GR64:$dst, (bitconvert FR64:$src))]>,
VEX, Sched<[WriteVecMoveToGpr]>;
def VMOVSDto64mr : VRS2I<0x7E, MRMDestMem, (outs), (ins i64mem:$dst, FR64:$src),
"movq\t{$src, $dst|$dst, $src}",
[(store (i64 (bitconvert FR64:$src)), addr:$dst)]>,
VEX, Sched<[WriteVecStore]>;
def MOV64toSDrm : S2SI<0x7E, MRMSrcMem, (outs FR64:$dst), (ins i64mem:$src),
"movq\t{$src, $dst|$dst, $src}",
[(set FR64:$dst, (bitconvert (loadi64 addr:$src)))]>,
Sched<[WriteVecLoad]>;
def MOVSDto64rr : RS2I<0x7E, MRMDestReg, (outs GR64:$dst), (ins FR64:$src),
"movq\t{$src, $dst|$dst, $src}",
[(set GR64:$dst, (bitconvert FR64:$src))]>,
Sched<[WriteVecMoveToGpr]>;
def MOVSDto64mr : RS2I<0x7E, MRMDestMem, (outs), (ins i64mem:$dst, FR64:$src),
"movq\t{$src, $dst|$dst, $src}",
[(store (i64 (bitconvert FR64:$src)), addr:$dst)]>,
Sched<[WriteVecStore]>;
} // ExeDomain = SSEPackedInt, isCodeGenOnly = 1
//===---------------------------------------------------------------------===//
@ -4164,18 +4139,10 @@ let ExeDomain = SSEPackedInt, isCodeGenOnly = 1 in {
"movd\t{$src, $dst|$dst, $src}",
[(set GR32:$dst, (bitconvert FR32:$src))]>,
VEX, Sched<[WriteVecMoveToGpr]>;
def VMOVSS2DImr : VS2I<0x7E, MRMDestMem, (outs), (ins i32mem:$dst, FR32:$src),
"movd\t{$src, $dst|$dst, $src}",
[(store (i32 (bitconvert FR32:$src)), addr:$dst)]>,
VEX, Sched<[WriteVecStore]>;
def MOVSS2DIrr : S2I<0x7E, MRMDestReg, (outs GR32:$dst), (ins FR32:$src),
"movd\t{$src, $dst|$dst, $src}",
[(set GR32:$dst, (bitconvert FR32:$src))]>,
Sched<[WriteVecMoveToGpr]>;
def MOVSS2DImr : S2I<0x7E, MRMDestMem, (outs), (ins i32mem:$dst, FR32:$src),
"movd\t{$src, $dst|$dst, $src}",
[(store (i32 (bitconvert FR32:$src)), addr:$dst)]>,
Sched<[WriteVecStore]>;
} // ExeDomain = SSEPackedInt, isCodeGenOnly = 1
let Predicates = [UseAVX] in {

View File

@ -2258,8 +2258,6 @@ body: |
$edi = VCVTTSS2SIZrr_Int $xmm0
; CHECK: $xmm0 = VMOV64toSDrr $rdi
$xmm0 = VMOV64toSDZrr $rdi
; CHECK: $xmm0 = VMOVDI2SSrm $rip, 1, $noreg, 0, $noreg
$xmm0 = VMOVDI2SSZrm $rip, 1, $noreg, 0, $noreg
; CHECK: $xmm0 = VMOVDI2SSrr $eax
$xmm0 = VMOVDI2SSZrr $eax
; CHECK: VMOVSDmr $rdi, 1, $noreg, 0, $noreg, $xmm0
@ -2274,8 +2272,6 @@ body: |
$xmm0 = VMOVSDZrr_REV $xmm0, $xmm1
; CHECK: $rax = VMOVSDto64rr $xmm0
$rax = VMOVSDto64Zrr $xmm0
; CHECK: VMOVSDto64mr $rdi, 1, $noreg, 0, $noreg, $xmm0
VMOVSDto64Zmr $rdi, 1, $noreg, 0, $noreg, $xmm0
; CHECK: VMOVSSmr $rdi, 1, $noreg, 0, $noreg, $xmm0
VMOVSSZmr $rdi, 1, $noreg, 0, $noreg, $xmm0
; CHECK: $xmm0 = VMOVSSrm $rip, 1, $noreg, 0, $noreg
@ -2286,8 +2282,6 @@ body: |
$xmm0 = VMOVSSZrr $xmm0, $xmm1
; CHECK: $xmm0 = VMOVSSrr_REV $xmm0, $xmm1
$xmm0 = VMOVSSZrr_REV $xmm0, $xmm1
; CHECK: VMOVSS2DImr $rdi, 1, $noreg, 0, $noreg, $xmm0
VMOVSS2DIZmr $rdi, 1, $noreg, 0, $noreg, $xmm0
; CHECK: $eax = VMOVSS2DIrr $xmm0
$eax = VMOVSS2DIZrr $xmm0
; CHECK: $xmm0 = VMOV64toPQIrr $rdi
@ -4646,8 +4640,6 @@ body: |
$edi = VCVTTSS2SIZrr_Int $xmm16
; CHECK: $xmm16 = VMOV64toSDZrr $rdi
$xmm16 = VMOV64toSDZrr $rdi
; CHECK: $xmm16 = VMOVDI2SSZrm $rip, 1, $rax, 0, $noreg
$xmm16 = VMOVDI2SSZrm $rip, 1, $rax, 0, $noreg
; CHECK: $xmm16 = VMOVDI2SSZrr $eax
$xmm16 = VMOVDI2SSZrr $eax
; CHECK: VMOVSDZmr $rdi, 1, $noreg, 0, $noreg, $xmm16
@ -4662,8 +4654,6 @@ body: |
$xmm16 = VMOVSDZrr_REV $xmm16, $xmm1
; CHECK: $rax = VMOVSDto64Zrr $xmm16
$rax = VMOVSDto64Zrr $xmm16
; CHECK: VMOVSDto64Zmr $rdi, 1, $noreg, 0, $noreg, $xmm16
VMOVSDto64Zmr $rdi, 1, $noreg, 0, $noreg, $xmm16
; CHECK: VMOVSSZmr $rdi, 1, $noreg, 0, $noreg, $xmm16
VMOVSSZmr $rdi, 1, $noreg, 0, $noreg, $xmm16
; CHECK: $xmm16 = VMOVSSZrm $rip, 1, $rax, 0, $noreg
@ -4674,8 +4664,6 @@ body: |
$xmm16 = VMOVSSZrr $xmm16, $xmm1
; CHECK: $xmm16 = VMOVSSZrr_REV $xmm16, $xmm1
$xmm16 = VMOVSSZrr_REV $xmm16, $xmm1
; CHECK: VMOVSS2DIZmr $rdi, 1, $noreg, 0, $noreg, $xmm16
VMOVSS2DIZmr $rdi, 1, $noreg, 0, $noreg, $xmm16
; CHECK: $eax = VMOVSS2DIZrr $xmm16
$eax = VMOVSS2DIZrr $xmm16
; CHECK: $xmm16 = VMOV64toPQIZrr $rdi