forked from OSchip/llvm-project
[X86] Remove (V)MOV64toSDrr/m and (V)MOVDI2SSrr/m. Use 128-bit result MOVD/MOVQ and COPY_TO_REGCLASS instead
Summary: The register form of these instructions are CodeGenOnly instructions that cover GR32->FR32 and GR64->FR64 bitcasts. There is a similar set of instructions for the opposite bitcast. Due to the patterns using bitcasts these instructions get marked as "bitcast" machine instructions as well. The peephole pass is able to look through these as well as other copies to try to avoid register bank copies. Because FR32/FR64/VR128 are all coalescable to each other we can end up in a situation where a GR32->FR32->VR128->FR64->GR64 sequence can be reduced to GR32->GR64 which the copyPhysReg code can't handle. To prevent this, this patch removes one set of the 'bitcast' instructions. So now we can only go GR32->VR128->FR32 or GR64->VR128->FR64. The instruction that converts from GR32/GR64->VR128 has no special significance to the peephole pass and won't be looked through. I guess the other option would be to add support to copyPhysReg to just promote the GR32->GR64 to a GR64->GR64 copy. The upper bits were basically undefined anyway. But removing the CodeGenOnly instruction in favor of one that won't be optimized seemed safer. I deleted the peephole test because it couldn't be made to work with the bitcast instructions removed. The load version of the instructions were unnecessary as the pattern that selects them contains a bitcasted load which should never happen. Fixes PR41619. Reviewers: RKSimon, spatel Reviewed By: RKSimon Subscribers: hiraditya, llvm-commits Tags: #llvm Differential Revision: https://reviews.llvm.org/D61223 llvm-svn: 359392
This commit is contained in:
parent
03c4e2663c
commit
bd35a30940
|
@ -3809,14 +3809,6 @@ def VMOV64toPQIZrm : AVX512BI<0x6E, MRMSrcMem, (outs VR128X:$dst),
|
|||
"vmovq\t{$src, $dst|$dst, $src}", []>,
|
||||
EVEX, VEX_W, EVEX_CD8<64, CD8VT1>, Sched<[WriteVecLoad]>;
|
||||
let isCodeGenOnly = 1 in {
|
||||
def VMOV64toSDZrr : AVX512BI<0x6E, MRMSrcReg, (outs FR64X:$dst), (ins GR64:$src),
|
||||
"vmovq\t{$src, $dst|$dst, $src}",
|
||||
[(set FR64X:$dst, (bitconvert GR64:$src))]>,
|
||||
EVEX, VEX_W, Sched<[WriteVecMoveFromGpr]>;
|
||||
def VMOV64toSDZrm : AVX512XSI<0x7E, MRMSrcMem, (outs FR64X:$dst), (ins i64mem:$src),
|
||||
"vmovq\t{$src, $dst|$dst, $src}",
|
||||
[(set FR64X:$dst, (bitconvert (loadi64 addr:$src)))]>,
|
||||
EVEX, VEX_W, EVEX_CD8<8, CD8VT8>, Sched<[WriteVecLoad]>;
|
||||
def VMOVSDto64Zrr : AVX512BI<0x7E, MRMDestReg, (outs GR64:$dst), (ins FR64X:$src),
|
||||
"vmovq\t{$src, $dst|$dst, $src}",
|
||||
[(set GR64:$dst, (bitconvert FR64X:$src))]>,
|
||||
|
@ -3829,20 +3821,6 @@ def VMOVSDto64Zmr : AVX512BI<0x7E, MRMDestMem, (outs), (ins i64mem:$dst, FR64X:$
|
|||
}
|
||||
} // ExeDomain = SSEPackedInt
|
||||
|
||||
// Move Int Doubleword to Single Scalar
|
||||
//
|
||||
let ExeDomain = SSEPackedInt, isCodeGenOnly = 1 in {
|
||||
def VMOVDI2SSZrr : AVX512BI<0x6E, MRMSrcReg, (outs FR32X:$dst), (ins GR32:$src),
|
||||
"vmovd\t{$src, $dst|$dst, $src}",
|
||||
[(set FR32X:$dst, (bitconvert GR32:$src))]>,
|
||||
EVEX, Sched<[WriteVecMoveFromGpr]>;
|
||||
|
||||
def VMOVDI2SSZrm : AVX512BI<0x6E, MRMSrcMem, (outs FR32X:$dst), (ins i32mem:$src),
|
||||
"vmovd\t{$src, $dst|$dst, $src}",
|
||||
[(set FR32X:$dst, (bitconvert (loadi32 addr:$src)))]>,
|
||||
EVEX, EVEX_CD8<32, CD8VT1>, Sched<[WriteVecLoad]>;
|
||||
} // ExeDomain = SSEPackedInt, isCodeGenOnly = 1
|
||||
|
||||
// Move doubleword from xmm register to r/m32
|
||||
//
|
||||
let ExeDomain = SSEPackedInt in {
|
||||
|
@ -3859,6 +3837,13 @@ def VMOVPDI2DIZmr : AVX512BI<0x7E, MRMDestMem, (outs),
|
|||
EVEX, EVEX_CD8<32, CD8VT1>, Sched<[WriteVecStore]>;
|
||||
} // ExeDomain = SSEPackedInt
|
||||
|
||||
let Predicates = [HasAVX512] in {
|
||||
def : Pat<(f64 (bitconvert GR64:$src)),
|
||||
(COPY_TO_REGCLASS (VMOV64toPQIZrr GR64:$src), FR64X)>;
|
||||
def : Pat<(f32 (bitconvert GR32:$src)),
|
||||
(COPY_TO_REGCLASS (VMOVDI2PDIZrr GR32:$src), FR32X)>;
|
||||
}
|
||||
|
||||
// Move quadword from xmm1 register to r/m64
|
||||
//
|
||||
let ExeDomain = SSEPackedInt in {
|
||||
|
|
|
@ -531,13 +531,11 @@ static const X86MemoryFoldTableEntry MemoryFoldTable1[] = {
|
|||
{ X86::MOV32rr, X86::MOV32rm, 0 },
|
||||
{ X86::MOV64rr, X86::MOV64rm, 0 },
|
||||
{ X86::MOV64toPQIrr, X86::MOVQI2PQIrm, 0 },
|
||||
{ X86::MOV64toSDrr, X86::MOV64toSDrm, 0 },
|
||||
{ X86::MOV8rr, X86::MOV8rm, 0 },
|
||||
{ X86::MOVAPDrr, X86::MOVAPDrm, TB_ALIGN_16 },
|
||||
{ X86::MOVAPSrr, X86::MOVAPSrm, TB_ALIGN_16 },
|
||||
{ X86::MOVDDUPrr, X86::MOVDDUPrm, TB_NO_REVERSE },
|
||||
{ X86::MOVDI2PDIrr, X86::MOVDI2PDIrm, 0 },
|
||||
{ X86::MOVDI2SSrr, X86::MOVDI2SSrm, 0 },
|
||||
{ X86::MOVDQArr, X86::MOVDQArm, TB_ALIGN_16 },
|
||||
{ X86::MOVDQUrr, X86::MOVDQUrm, 0 },
|
||||
{ X86::MOVSHDUPrr, X86::MOVSHDUPrm, TB_ALIGN_16 },
|
||||
|
@ -818,8 +816,6 @@ static const X86MemoryFoldTableEntry MemoryFoldTable1[] = {
|
|||
{ X86::VGETMANTPSZrri, X86::VGETMANTPSZrmi, 0 },
|
||||
{ X86::VMOV64toPQIZrr, X86::VMOVQI2PQIZrm, 0 },
|
||||
{ X86::VMOV64toPQIrr, X86::VMOVQI2PQIrm, 0 },
|
||||
{ X86::VMOV64toSDZrr, X86::VMOV64toSDZrm, 0 },
|
||||
{ X86::VMOV64toSDrr, X86::VMOV64toSDrm, 0 },
|
||||
{ X86::VMOVAPDYrr, X86::VMOVAPDYrm, TB_ALIGN_32 },
|
||||
{ X86::VMOVAPDZ128rr, X86::VMOVAPDZ128rm, TB_ALIGN_16 },
|
||||
{ X86::VMOVAPDZ256rr, X86::VMOVAPDZ256rm, TB_ALIGN_32 },
|
||||
|
@ -837,8 +833,6 @@ static const X86MemoryFoldTableEntry MemoryFoldTable1[] = {
|
|||
{ X86::VMOVDDUPrr, X86::VMOVDDUPrm, TB_NO_REVERSE },
|
||||
{ X86::VMOVDI2PDIZrr, X86::VMOVDI2PDIZrm, 0 },
|
||||
{ X86::VMOVDI2PDIrr, X86::VMOVDI2PDIrm, 0 },
|
||||
{ X86::VMOVDI2SSZrr, X86::VMOVDI2SSZrm, 0 },
|
||||
{ X86::VMOVDI2SSrr, X86::VMOVDI2SSrm, 0 },
|
||||
{ X86::VMOVDQA32Z128rr, X86::VMOVDQA32Z128rm, TB_ALIGN_16 },
|
||||
{ X86::VMOVDQA32Z256rr, X86::VMOVDQA32Z256rm, TB_ALIGN_32 },
|
||||
{ X86::VMOVDQA32Zrr, X86::VMOVDQA32Zrm, TB_ALIGN_64 },
|
||||
|
|
|
@ -4121,11 +4121,6 @@ let isCodeGenOnly = 1, ForceDisassemble = 1, hasSideEffects = 0, mayLoad = 1 in
|
|||
def VMOV64toPQIrm : VRS2I<0x6E, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
|
||||
"movq\t{$src, $dst|$dst, $src}", []>,
|
||||
VEX, Sched<[WriteVecLoad]>;
|
||||
let isCodeGenOnly = 1 in
|
||||
def VMOV64toSDrr : VRS2I<0x6E, MRMSrcReg, (outs FR64:$dst), (ins GR64:$src),
|
||||
"movq\t{$src, $dst|$dst, $src}",
|
||||
[(set FR64:$dst, (bitconvert GR64:$src))]>,
|
||||
VEX, Sched<[WriteVecMoveFromGpr]>;
|
||||
|
||||
def MOVDI2PDIrr : S2I<0x6E, MRMSrcReg, (outs VR128:$dst), (ins GR32:$src),
|
||||
"movd\t{$src, $dst|$dst, $src}",
|
||||
|
@ -4146,37 +4141,8 @@ let isCodeGenOnly = 1, ForceDisassemble = 1, hasSideEffects = 0, mayLoad = 1 in
|
|||
def MOV64toPQIrm : RS2I<0x6E, MRMSrcMem, (outs VR128:$dst), (ins i64mem:$src),
|
||||
"movq\t{$src, $dst|$dst, $src}", []>,
|
||||
Sched<[WriteVecLoad]>;
|
||||
let isCodeGenOnly = 1 in
|
||||
def MOV64toSDrr : RS2I<0x6E, MRMSrcReg, (outs FR64:$dst), (ins GR64:$src),
|
||||
"movq\t{$src, $dst|$dst, $src}",
|
||||
[(set FR64:$dst, (bitconvert GR64:$src))]>,
|
||||
Sched<[WriteVecMoveFromGpr]>;
|
||||
} // ExeDomain = SSEPackedInt
|
||||
|
||||
//===---------------------------------------------------------------------===//
|
||||
// Move Int Doubleword to Single Scalar
|
||||
//
|
||||
let ExeDomain = SSEPackedInt, isCodeGenOnly = 1 in {
|
||||
def VMOVDI2SSrr : VS2I<0x6E, MRMSrcReg, (outs FR32:$dst), (ins GR32:$src),
|
||||
"movd\t{$src, $dst|$dst, $src}",
|
||||
[(set FR32:$dst, (bitconvert GR32:$src))]>,
|
||||
VEX, Sched<[WriteVecMoveFromGpr]>;
|
||||
|
||||
def VMOVDI2SSrm : VS2I<0x6E, MRMSrcMem, (outs FR32:$dst), (ins i32mem:$src),
|
||||
"movd\t{$src, $dst|$dst, $src}",
|
||||
[(set FR32:$dst, (bitconvert (loadi32 addr:$src)))]>,
|
||||
VEX, Sched<[WriteVecLoad]>;
|
||||
def MOVDI2SSrr : S2I<0x6E, MRMSrcReg, (outs FR32:$dst), (ins GR32:$src),
|
||||
"movd\t{$src, $dst|$dst, $src}",
|
||||
[(set FR32:$dst, (bitconvert GR32:$src))]>,
|
||||
Sched<[WriteVecMoveFromGpr]>;
|
||||
|
||||
def MOVDI2SSrm : S2I<0x6E, MRMSrcMem, (outs FR32:$dst), (ins i32mem:$src),
|
||||
"movd\t{$src, $dst|$dst, $src}",
|
||||
[(set FR32:$dst, (bitconvert (loadi32 addr:$src)))]>,
|
||||
Sched<[WriteVecLoad]>;
|
||||
} // ExeDomain = SSEPackedInt, isCodeGenOnly = 1
|
||||
|
||||
//===---------------------------------------------------------------------===//
|
||||
// Move Packed Doubleword Int to Packed Double Int
|
||||
//
|
||||
|
@ -4204,6 +4170,21 @@ def MOVPDI2DImr : S2I<0x7E, MRMDestMem, (outs), (ins i32mem:$dst, VR128:$src),
|
|||
Sched<[WriteVecStore]>;
|
||||
} // ExeDomain = SSEPackedInt
|
||||
|
||||
let Predicates = [UseAVX] in {
|
||||
def : Pat<(f64 (bitconvert GR64:$src)),
|
||||
(COPY_TO_REGCLASS (VMOV64toPQIrr GR64:$src), FR64)>;
|
||||
def : Pat<(f32 (bitconvert GR32:$src)),
|
||||
(COPY_TO_REGCLASS (VMOVDI2PDIrr GR32:$src), FR32)>;
|
||||
}
|
||||
|
||||
let Predicates = [UseSSE2] in
|
||||
def : Pat<(f64 (bitconvert GR64:$src)),
|
||||
(COPY_TO_REGCLASS (MOV64toPQIrr GR64:$src), FR64)>;
|
||||
|
||||
let Predicates = [UseSSE1] in
|
||||
def : Pat<(f32 (bitconvert GR32:$src)),
|
||||
(COPY_TO_REGCLASS (MOVDI2PDIrr GR32:$src), FR32)>;
|
||||
|
||||
//===---------------------------------------------------------------------===//
|
||||
// Move Packed Doubleword Int first element to Doubleword Int
|
||||
//
|
||||
|
@ -4237,10 +4218,6 @@ def MOVPQIto64mr : RS2I<0x7E, MRMDestMem, (outs), (ins i64mem:$dst, VR128:$src),
|
|||
//
|
||||
let ExeDomain = SSEPackedInt, isCodeGenOnly = 1 in {
|
||||
let Predicates = [UseAVX] in
|
||||
def VMOV64toSDrm : VS2SI<0x7E, MRMSrcMem, (outs FR64:$dst), (ins i64mem:$src),
|
||||
"movq\t{$src, $dst|$dst, $src}",
|
||||
[(set FR64:$dst, (bitconvert (loadi64 addr:$src)))]>,
|
||||
VEX, Sched<[WriteVecLoad]>;
|
||||
def VMOVSDto64rr : VRS2I<0x7E, MRMDestReg, (outs GR64:$dst), (ins FR64:$src),
|
||||
"movq\t{$src, $dst|$dst, $src}",
|
||||
[(set GR64:$dst, (bitconvert FR64:$src))]>,
|
||||
|
@ -4250,10 +4227,6 @@ let ExeDomain = SSEPackedInt, isCodeGenOnly = 1 in {
|
|||
[(store (i64 (bitconvert FR64:$src)), addr:$dst)]>,
|
||||
VEX, Sched<[WriteVecStore]>;
|
||||
|
||||
def MOV64toSDrm : S2SI<0x7E, MRMSrcMem, (outs FR64:$dst), (ins i64mem:$src),
|
||||
"movq\t{$src, $dst|$dst, $src}",
|
||||
[(set FR64:$dst, (bitconvert (loadi64 addr:$src)))]>,
|
||||
Sched<[WriteVecLoad]>;
|
||||
def MOVSDto64rr : RS2I<0x7E, MRMDestReg, (outs GR64:$dst), (ins FR64:$src),
|
||||
"movq\t{$src, $dst|$dst, $src}",
|
||||
[(set GR64:$dst, (bitconvert FR64:$src))]>,
|
||||
|
|
|
@ -2216,12 +2216,6 @@ body: |
|
|||
$edi = VCVTTSS2SIZrr $xmm0
|
||||
; CHECK: $edi = VCVTTSS2SIrr_Int $xmm0
|
||||
$edi = VCVTTSS2SIZrr_Int $xmm0
|
||||
; CHECK: $xmm0 = VMOV64toSDrr $rdi
|
||||
$xmm0 = VMOV64toSDZrr $rdi
|
||||
; CHECK: $xmm0 = VMOVDI2SSrm $rip, $noreg, $noreg, $noreg, $noreg
|
||||
$xmm0 = VMOVDI2SSZrm $rip, $noreg, $noreg, $noreg, $noreg
|
||||
; CHECK: $xmm0 = VMOVDI2SSrr $eax
|
||||
$xmm0 = VMOVDI2SSZrr $eax
|
||||
; CHECK: VMOVSDmr $rdi, $xmm0, $noreg, $noreg, $noreg, $noreg
|
||||
VMOVSDZmr $rdi, $xmm0, $noreg, $noreg, $noreg, $noreg
|
||||
; CHECK: $xmm0 = VMOVSDrm $rip, $noreg, $noreg, $noreg, $noreg
|
||||
|
@ -2250,8 +2244,6 @@ body: |
|
|||
$xmm0 = VMOV64toPQIZrr $rdi
|
||||
; CHECK: $xmm0 = VMOV64toPQIrm $rdi, $noreg, $noreg, $noreg, $noreg
|
||||
$xmm0 = VMOV64toPQIZrm $rdi, $noreg, $noreg, $noreg, $noreg
|
||||
; CHECK: $xmm0 = VMOV64toSDrr $rdi
|
||||
$xmm0 = VMOV64toSDZrr $rdi
|
||||
; CHECK: $xmm0 = VMOVDI2PDIrm $rip, $noreg, $noreg, $noreg, $noreg
|
||||
$xmm0 = VMOVDI2PDIZrm $rip, $noreg, $noreg, $noreg, $noreg
|
||||
; CHECK: $xmm0 = VMOVDI2PDIrr $edi
|
||||
|
@ -4536,12 +4528,6 @@ body: |
|
|||
$edi = VCVTTSS2SIZrr $xmm16
|
||||
; CHECK: $edi = VCVTTSS2SIZrr_Int $xmm16
|
||||
$edi = VCVTTSS2SIZrr_Int $xmm16
|
||||
; CHECK: $xmm16 = VMOV64toSDZrr $rdi
|
||||
$xmm16 = VMOV64toSDZrr $rdi
|
||||
; CHECK: $xmm16 = VMOVDI2SSZrm $rip, $noreg, $noreg, $noreg, $noreg
|
||||
$xmm16 = VMOVDI2SSZrm $rip, $noreg, $noreg, $noreg, $noreg
|
||||
; CHECK: $xmm16 = VMOVDI2SSZrr $eax
|
||||
$xmm16 = VMOVDI2SSZrr $eax
|
||||
; CHECK: VMOVSDZmr $rdi, $xmm16, $noreg, $noreg, $noreg, $noreg
|
||||
VMOVSDZmr $rdi, $xmm16, $noreg, $noreg, $noreg, $noreg
|
||||
; CHECK: $xmm16 = VMOVSDZrm $rip, $noreg, $noreg, $noreg, $noreg
|
||||
|
@ -4570,8 +4556,6 @@ body: |
|
|||
$xmm16 = VMOV64toPQIZrr $rdi
|
||||
; CHECK: $xmm16 = VMOV64toPQIZrm $rdi, $noreg, $noreg, $noreg, $noreg
|
||||
$xmm16 = VMOV64toPQIZrm $rdi, $noreg, $noreg, $noreg, $noreg
|
||||
; CHECK: $xmm16 = VMOV64toSDZrr $rdi
|
||||
$xmm16 = VMOV64toSDZrr $rdi
|
||||
; CHECK: $xmm16 = VMOVDI2PDIZrm $rip, $noreg, $noreg, $noreg, $noreg
|
||||
$xmm16 = VMOVDI2PDIZrm $rip, $noreg, $noreg, $noreg, $noreg
|
||||
; CHECK: $xmm16 = VMOVDI2PDIZrr $edi
|
||||
|
|
|
@ -5,10 +5,9 @@
|
|||
define double @doo(double %x) nounwind {
|
||||
; CHECK-LABEL: doo:
|
||||
; CHECK: ## %bb.0:
|
||||
; CHECK-NEXT: movq %xmm0, %rax
|
||||
; CHECK-NEXT: movabsq $-9223372036854775808, %rcx ## imm = 0x8000000000000000
|
||||
; CHECK-NEXT: xorq %rax, %rcx
|
||||
; CHECK-NEXT: movq %rcx, %xmm0
|
||||
; CHECK-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
|
||||
; CHECK-NEXT: subsd %xmm0, %xmm1
|
||||
; CHECK-NEXT: movapd %xmm1, %xmm0
|
||||
; CHECK-NEXT: retq
|
||||
;
|
||||
; SSE2-LABEL: doo:
|
||||
|
@ -31,9 +30,9 @@ define double @doo(double %x) nounwind {
|
|||
define float @foo(float %x) nounwind {
|
||||
; CHECK-LABEL: foo:
|
||||
; CHECK: ## %bb.0:
|
||||
; CHECK-NEXT: movd %xmm0, %eax
|
||||
; CHECK-NEXT: xorl $2147483648, %eax ## imm = 0x80000000
|
||||
; CHECK-NEXT: movd %eax, %xmm0
|
||||
; CHECK-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
|
||||
; CHECK-NEXT: subss %xmm0, %xmm1
|
||||
; CHECK-NEXT: movaps %xmm1, %xmm0
|
||||
; CHECK-NEXT: retq
|
||||
;
|
||||
; SSE2-LABEL: foo:
|
||||
|
|
|
@ -1,40 +0,0 @@
|
|||
# RUN: llc -mtriple=x86_64-- -run-pass=peephole-opt %s -o - | FileCheck %s
|
||||
--- |
|
||||
define void @func() { ret void }
|
||||
...
|
||||
---
|
||||
# Check that instructions with MI.isBitcast() are only replaced by COPY if there
|
||||
# are no SUBREG_TO_REG users.
|
||||
# CHECK-LABEL: name: func
|
||||
name: func
|
||||
registers:
|
||||
- { id: 0, class: gr32 }
|
||||
- { id: 1, class: fr32 }
|
||||
- { id: 2, class: gr32 }
|
||||
|
||||
- { id: 3, class: gr32 }
|
||||
- { id: 4, class: fr32 }
|
||||
- { id: 5, class: gr32 }
|
||||
- { id: 6, class: gr64 }
|
||||
|
||||
body: |
|
||||
bb.0:
|
||||
; CHECK: %1:fr32 = VMOVDI2SSrr %0
|
||||
; CHECK: %7:gr32 = COPY %0
|
||||
; CHECK: NOOP implicit %7
|
||||
%0 = MOV32ri 42
|
||||
%1 = VMOVDI2SSrr %0
|
||||
%2 = MOVSS2DIrr %1
|
||||
NOOP implicit %2
|
||||
|
||||
; CHECK: %4:fr32 = VMOVDI2SSrr %3
|
||||
; CHECK-NOT: COPY
|
||||
; CHECK: %5:gr32 = MOVSS2DIrr %4
|
||||
; CHECK: %6:gr64 = SUBREG_TO_REG %5, 0
|
||||
; CHECK: NOOP implicit %6
|
||||
%3 = MOV32ri 42
|
||||
%4 = VMOVDI2SSrr %3
|
||||
%5 = MOVSS2DIrr %4
|
||||
%6 = SUBREG_TO_REG %5, 0, %subreg.sub_32bit
|
||||
NOOP implicit %6
|
||||
...
|
|
@ -0,0 +1,27 @@
|
|||
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
|
||||
; RUN: llc < %s -mtriple=x86_64-apple-macosx10.14.0 -mattr=avx2 | FileCheck %s
|
||||
|
||||
define void @foo(double %arg) {
|
||||
; CHECK-LABEL: foo:
|
||||
; CHECK: ## %bb.0: ## %bb
|
||||
; CHECK-NEXT: vmovq %xmm0, %rax
|
||||
; CHECK-NEXT: vmovd %eax, %xmm0
|
||||
; CHECK-NEXT: vxorps %xmm1, %xmm1, %xmm1
|
||||
; CHECK-NEXT: vmovq %xmm0, %rax
|
||||
; CHECK-NEXT: movl %eax, (%rax)
|
||||
; CHECK-NEXT: vmovlps %xmm1, (%rax)
|
||||
; CHECK-NEXT: retq
|
||||
bb:
|
||||
%tmp = bitcast double %arg to i64
|
||||
%tmp1 = trunc i64 %tmp to i32
|
||||
%tmp2 = bitcast i32 %tmp1 to float
|
||||
%tmp3 = insertelement <4 x float> zeroinitializer, float %tmp2, i32 2
|
||||
%tmp4 = bitcast <4 x float> %tmp3 to <2 x double>
|
||||
%tmp5 = extractelement <2 x double> %tmp4, i32 0
|
||||
%tmp6 = extractelement <2 x double> %tmp4, i32 1
|
||||
%tmp7 = bitcast double %tmp6 to i64
|
||||
%tmp8 = trunc i64 %tmp7 to i32
|
||||
store i32 %tmp8, i32* undef, align 4
|
||||
store double %tmp5, double* undef, align 16
|
||||
ret void
|
||||
}
|
Loading…
Reference in New Issue