forked from OSchip/llvm-project
[X86] Prefer blendi over movss/sd when avx512 is enabled unless optimizing for size.
AVX512 doesn't have an immediate controlled blend instruction. But blend throughput is still better than movss/sd on SKX. This commit changes AVX512 to use the AVX blend instructions instead of MOVSS/MOVSD. This constrains the register allocation since it won't be able to use XMM16-31, but hopefully the increased throughput and reduced port 5 pressure makes up for that. llvm-svn: 337083
This commit is contained in:
parent
70993d37e8
commit
f0b164415c
|
@ -3936,6 +3936,7 @@ def : InstAlias<"vmovd\t{$src, $dst|$dst, $src}",
|
|||
|
||||
multiclass avx512_move_scalar<string asm, SDNode OpNode,
|
||||
X86VectorVTInfo _> {
|
||||
let Predicates = [HasAVX512, OptForSize] in
|
||||
def rr : AVX512PI<0x10, MRMSrcReg, (outs _.RC:$dst),
|
||||
(ins _.RC:$src1, _.RC:$src2),
|
||||
!strconcat(asm, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
|
||||
|
@ -4324,7 +4325,7 @@ def : InstAlias<"vmovsd.s\t{$src2, $src1, $dst {${mask}} {z}|"#
|
|||
(VMOVSDZrrkz_REV VR128X:$dst, VK1WM:$mask,
|
||||
VR128X:$src1, VR128X:$src2), 0>;
|
||||
|
||||
let Predicates = [HasAVX512] in {
|
||||
let Predicates = [HasAVX512, OptForSize] in {
|
||||
def : Pat<(v4f32 (X86vzmovl (v4f32 VR128X:$src))),
|
||||
(VMOVSSZrr (v4f32 (AVX512_128_SET0)), VR128X:$src)>;
|
||||
def : Pat<(v4i32 (X86vzmovl (v4i32 VR128X:$src))),
|
||||
|
@ -4339,6 +4340,17 @@ let Predicates = [HasAVX512] in {
|
|||
(SUBREG_TO_REG (i32 0),
|
||||
(VMOVSSZrr (v4i32 (AVX512_128_SET0)),
|
||||
(EXTRACT_SUBREG (v8i32 VR256X:$src), sub_xmm)), sub_xmm)>;
|
||||
|
||||
def : Pat<(v4f64 (X86vzmovl (v4f64 VR256X:$src))),
|
||||
(SUBREG_TO_REG (i32 0),
|
||||
(VMOVSDZrr (v2f64 (AVX512_128_SET0)),
|
||||
(EXTRACT_SUBREG (v4f64 VR256X:$src), sub_xmm)), sub_xmm)>;
|
||||
def : Pat<(v4i64 (X86vzmovl (v4i64 VR256X:$src))),
|
||||
(SUBREG_TO_REG (i32 0), (VMOVSDZrr (v2i64 (AVX512_128_SET0)),
|
||||
(EXTRACT_SUBREG (v4i64 VR256X:$src), sub_xmm)), sub_xmm)>;
|
||||
}
|
||||
|
||||
let Predicates = [HasAVX512] in {
|
||||
def : Pat<(v16f32 (X86vzmovl (v16f32 VR512:$src))),
|
||||
(SUBREG_TO_REG (i32 0),
|
||||
(VMOVSSZrr (v4f32 (AVX512_128_SET0)),
|
||||
|
@ -4405,18 +4417,11 @@ let Predicates = [HasAVX512] in {
|
|||
(SUBREG_TO_REG (i64 0), (VMOVQI2PQIZrm addr:$src), sub_xmm)>;
|
||||
|
||||
// Move low f64 and clear high bits.
|
||||
def : Pat<(v4f64 (X86vzmovl (v4f64 VR256X:$src))),
|
||||
(SUBREG_TO_REG (i32 0),
|
||||
(VMOVSDZrr (v2f64 (AVX512_128_SET0)),
|
||||
(EXTRACT_SUBREG (v4f64 VR256X:$src), sub_xmm)), sub_xmm)>;
|
||||
def : Pat<(v8f64 (X86vzmovl (v8f64 VR512:$src))),
|
||||
(SUBREG_TO_REG (i32 0),
|
||||
(VMOVSDZrr (v2f64 (AVX512_128_SET0)),
|
||||
(EXTRACT_SUBREG (v8f64 VR512:$src), sub_xmm)), sub_xmm)>;
|
||||
|
||||
def : Pat<(v4i64 (X86vzmovl (v4i64 VR256X:$src))),
|
||||
(SUBREG_TO_REG (i32 0), (VMOVSDZrr (v2i64 (AVX512_128_SET0)),
|
||||
(EXTRACT_SUBREG (v4i64 VR256X:$src), sub_xmm)), sub_xmm)>;
|
||||
def : Pat<(v8i64 (X86vzmovl (v8i64 VR512:$src))),
|
||||
(SUBREG_TO_REG (i32 0), (VMOVSDZrr (v2i64 (AVX512_128_SET0)),
|
||||
(EXTRACT_SUBREG (v8i64 VR512:$src), sub_xmm)), sub_xmm)>;
|
||||
|
@ -4425,7 +4430,9 @@ let Predicates = [HasAVX512] in {
|
|||
def : Pat<(store (f32 (extractelt (v4f32 VR128X:$src), (iPTR 0))),
|
||||
addr:$dst),
|
||||
(VMOVSSZmr addr:$dst, (COPY_TO_REGCLASS (v4f32 VR128X:$src), FR32X))>;
|
||||
}
|
||||
|
||||
let Predicates = [HasAVX512, OptForSize] in {
|
||||
// Shuffle with VMOVSS
|
||||
def : Pat<(v4i32 (X86Movss VR128X:$src1, VR128X:$src2)),
|
||||
(VMOVSSZrr (v4i32 VR128X:$src1), VR128X:$src2)>;
|
||||
|
|
|
@ -6394,8 +6394,7 @@ let Predicates = [HasAVX2] in {
|
|||
// Prefer a movss or movsd over a blendps when optimizing for size. these were
|
||||
// changed to use blends because blends have better throughput on sandybridge
|
||||
// and haswell, but movs[s/d] are 1-2 byte shorter instructions.
|
||||
let Predicates = [UseAVX] in {
|
||||
let Predicates = [UseAVX, OptForSpeed] in {
|
||||
let Predicates = [HasAVX, OptForSpeed] in {
|
||||
def : Pat<(v4f32 (X86vzmovl (v4f32 VR128:$src))),
|
||||
(VBLENDPSrri (v4f32 (V_SET0)), VR128:$src, (i8 1))>;
|
||||
def : Pat<(v4i32 (X86vzmovl (v4i32 VR128:$src))),
|
||||
|
@ -6410,7 +6409,6 @@ let Predicates = [UseAVX] in {
|
|||
(VBLENDPDrri VR128:$src1, VR128:$src2, (i8 1))>;
|
||||
def : Pat<(v2i64 (X86Movsd VR128:$src1, VR128:$src2)),
|
||||
(VPBLENDWrri VR128:$src1, VR128:$src2, (i8 0xf))>;
|
||||
}
|
||||
|
||||
// Move low f32 and clear high bits.
|
||||
def : Pat<(v8f32 (X86vzmovl (v8f32 VR256:$src))),
|
||||
|
|
|
@ -298,17 +298,11 @@ declare <2 x i64> @llvm.x86.sse2.psrl.dq(<2 x i64>, i32) nounwind readnone
|
|||
|
||||
|
||||
define <2 x double> @test_x86_sse41_blendpd(<2 x double> %a0, <2 x double> %a1) {
|
||||
; AVX-LABEL: test_x86_sse41_blendpd:
|
||||
; AVX: # %bb.0:
|
||||
; AVX-NEXT: vblendps $3, %xmm0, %xmm1, %xmm0 # encoding: [0xc4,0xe3,0x71,0x0c,0xc0,0x03]
|
||||
; AVX-NEXT: # xmm0 = xmm0[0,1],xmm1[2,3]
|
||||
; AVX-NEXT: ret{{[l|q]}} # encoding: [0xc3]
|
||||
;
|
||||
; AVX512VL-LABEL: test_x86_sse41_blendpd:
|
||||
; AVX512VL: # %bb.0:
|
||||
; AVX512VL-NEXT: vmovsd %xmm0, %xmm1, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf3,0x10,0xc0]
|
||||
; AVX512VL-NEXT: # xmm0 = xmm0[0],xmm1[1]
|
||||
; AVX512VL-NEXT: ret{{[l|q]}} # encoding: [0xc3]
|
||||
; CHECK-LABEL: test_x86_sse41_blendpd:
|
||||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: vblendps $3, %xmm0, %xmm1, %xmm0 # encoding: [0xc4,0xe3,0x71,0x0c,0xc0,0x03]
|
||||
; CHECK-NEXT: # xmm0 = xmm0[0,1],xmm1[2,3]
|
||||
; CHECK-NEXT: ret{{[l|q]}} # encoding: [0xc3]
|
||||
%res = call <2 x double> @llvm.x86.sse41.blendpd(<2 x double> %a0, <2 x double> %a1, i8 2) ; <<2 x double>> [#uses=1]
|
||||
ret <2 x double> %res
|
||||
}
|
||||
|
|
|
@ -24,7 +24,7 @@ define <8 x double> @test2(<8 x double> %x, double* %br, double %y) nounwind {
|
|||
; CHECK-NEXT: vmovhpd {{.*#+}} xmm2 = xmm0[0],mem[0]
|
||||
; CHECK-NEXT: vinsertf32x4 $0, %xmm2, %zmm0, %zmm2
|
||||
; CHECK-NEXT: vextractf32x4 $3, %zmm0, %xmm0
|
||||
; CHECK-NEXT: vmovsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
|
||||
; CHECK-NEXT: vblendpd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
|
||||
; CHECK-NEXT: vinsertf32x4 $3, %xmm0, %zmm2, %zmm0
|
||||
; CHECK-NEXT: retq
|
||||
%rrr = load double, double* %br
|
||||
|
|
|
@ -1823,7 +1823,7 @@ define <2 x double> @test_mm_cvtu64_sd(<2 x double> %__A, i64 %__B) {
|
|||
; X86-NEXT: vpunpckldq {{.*#+}} xmm1 = xmm1[0],mem[0],xmm1[1],mem[1]
|
||||
; X86-NEXT: vsubpd {{\.LCPI.*}}, %xmm1, %xmm1
|
||||
; X86-NEXT: vhaddpd %xmm1, %xmm1, %xmm1
|
||||
; X86-NEXT: vmovsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
|
||||
; X86-NEXT: vblendpd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
|
||||
; X86-NEXT: retl
|
||||
;
|
||||
; X64-LABEL: test_mm_cvtu64_sd:
|
||||
|
@ -1873,7 +1873,7 @@ define <4 x float> @test_mm_cvtu64_ss(<4 x float> %__A, i64 %__B) {
|
|||
; X86-NEXT: fadds {{\.LCPI.*}}(,%ecx,4)
|
||||
; X86-NEXT: fstps {{[0-9]+}}(%esp)
|
||||
; X86-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
|
||||
; X86-NEXT: vmovss {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3]
|
||||
; X86-NEXT: vblendps {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3]
|
||||
; X86-NEXT: movl %ebp, %esp
|
||||
; X86-NEXT: popl %ebp
|
||||
; X86-NEXT: .cfi_def_cfa %esp, 4
|
||||
|
|
|
@ -2852,7 +2852,7 @@ define <4 x float> @test_8xfloat_to_4xfloat_perm_mask3(<8 x float> %vec) {
|
|||
; CHECK-LABEL: test_8xfloat_to_4xfloat_perm_mask3:
|
||||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: vextractf128 $1, %ymm0, %xmm1
|
||||
; CHECK-NEXT: vmovsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
|
||||
; CHECK-NEXT: vblendpd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
|
||||
; CHECK-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[3,3,1,2]
|
||||
; CHECK-NEXT: vzeroupper
|
||||
; CHECK-NEXT: retq
|
||||
|
@ -2863,7 +2863,7 @@ define <4 x float> @test_masked_8xfloat_to_4xfloat_perm_mask3(<8 x float> %vec,
|
|||
; CHECK-LABEL: test_masked_8xfloat_to_4xfloat_perm_mask3:
|
||||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: vextractf128 $1, %ymm0, %xmm3
|
||||
; CHECK-NEXT: vmovsd {{.*#+}} xmm0 = xmm3[0],xmm0[1]
|
||||
; CHECK-NEXT: vblendpd {{.*#+}} xmm0 = xmm3[0],xmm0[1]
|
||||
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
|
||||
; CHECK-NEXT: vcmpeqps %xmm3, %xmm2, %k1
|
||||
; CHECK-NEXT: vpermilps {{.*#+}} xmm1 {%k1} = xmm0[3,3,1,2]
|
||||
|
@ -2880,7 +2880,7 @@ define <4 x float> @test_masked_z_8xfloat_to_4xfloat_perm_mask3(<8 x float> %vec
|
|||
; CHECK-LABEL: test_masked_z_8xfloat_to_4xfloat_perm_mask3:
|
||||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: vextractf128 $1, %ymm0, %xmm2
|
||||
; CHECK-NEXT: vmovsd {{.*#+}} xmm0 = xmm2[0],xmm0[1]
|
||||
; CHECK-NEXT: vblendpd {{.*#+}} xmm0 = xmm2[0],xmm0[1]
|
||||
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
|
||||
; CHECK-NEXT: vcmpeqps %xmm2, %xmm1, %k1
|
||||
; CHECK-NEXT: vpermilps {{.*#+}} xmm0 {%k1} {z} = xmm0[3,3,1,2]
|
||||
|
@ -3801,7 +3801,7 @@ define <2 x double> @test_4xdouble_to_2xdouble_perm_mem_mask0(<4 x double>* %vp)
|
|||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: vmovapd (%rdi), %ymm0
|
||||
; CHECK-NEXT: vextractf128 $1, %ymm0, %xmm1
|
||||
; CHECK-NEXT: vmovsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
|
||||
; CHECK-NEXT: vblendpd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
|
||||
; CHECK-NEXT: vzeroupper
|
||||
; CHECK-NEXT: retq
|
||||
%vec = load <4 x double>, <4 x double>* %vp
|
||||
|
@ -3813,7 +3813,7 @@ define <2 x double> @test_masked_4xdouble_to_2xdouble_perm_mem_mask0(<4 x double
|
|||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: vmovapd (%rdi), %ymm2
|
||||
; CHECK-NEXT: vextractf128 $1, %ymm2, %xmm3
|
||||
; CHECK-NEXT: vmovsd {{.*#+}} xmm2 = xmm3[0],xmm2[1]
|
||||
; CHECK-NEXT: vblendpd {{.*#+}} xmm2 = xmm3[0],xmm2[1]
|
||||
; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3
|
||||
; CHECK-NEXT: vcmpeqpd %xmm3, %xmm1, %k1
|
||||
; CHECK-NEXT: vmovapd %xmm2, %xmm0 {%k1}
|
||||
|
@ -3831,7 +3831,7 @@ define <2 x double> @test_masked_z_4xdouble_to_2xdouble_perm_mem_mask0(<4 x doub
|
|||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: vmovapd (%rdi), %ymm1
|
||||
; CHECK-NEXT: vextractf128 $1, %ymm1, %xmm2
|
||||
; CHECK-NEXT: vmovsd {{.*#+}} xmm1 = xmm2[0],xmm1[1]
|
||||
; CHECK-NEXT: vblendpd {{.*#+}} xmm1 = xmm2[0],xmm1[1]
|
||||
; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2
|
||||
; CHECK-NEXT: vcmpeqpd %xmm2, %xmm0, %k1
|
||||
; CHECK-NEXT: vmovapd %xmm1, %xmm0 {%k1} {z}
|
||||
|
|
|
@ -25,7 +25,7 @@ define <2 x double> @insert_f64(double %a0, <2 x double> %a1) {
|
|||
;
|
||||
; AVX512-LABEL: insert_f64:
|
||||
; AVX512: # %bb.0:
|
||||
; AVX512-NEXT: vmovsd {{.*#+}} xmm0 = xmm0[0],xmm1[1]
|
||||
; AVX512-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3]
|
||||
; AVX512-NEXT: retq
|
||||
%1 = insertelement <2 x double> %a1, double %a0, i32 0
|
||||
ret <2 x double> %1
|
||||
|
@ -50,7 +50,7 @@ define <4 x float> @insert_f32(float %a0, <4 x float> %a1) {
|
|||
;
|
||||
; AVX512-LABEL: insert_f32:
|
||||
; AVX512: # %bb.0:
|
||||
; AVX512-NEXT: vmovss {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3]
|
||||
; AVX512-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3]
|
||||
; AVX512-NEXT: retq
|
||||
%1 = insertelement <4 x float> %a1, float %a0, i32 0
|
||||
ret <4 x float> %1
|
||||
|
|
|
@ -134,21 +134,13 @@ entry:
|
|||
|
||||
; This should not be matched to fmsubadd because the mul is on the wrong side of the fsub.
|
||||
define <2 x double> @mul_subadd_bad_commute(<2 x double> %A, <2 x double> %B, <2 x double> %C) #0 {
|
||||
; FMA3_256-LABEL: mul_subadd_bad_commute:
|
||||
; FMA3_256: # %bb.0: # %entry
|
||||
; FMA3_256-NEXT: vmulpd %xmm1, %xmm0, %xmm0
|
||||
; FMA3_256-NEXT: vsubpd %xmm0, %xmm2, %xmm1
|
||||
; FMA3_256-NEXT: vaddpd %xmm2, %xmm0, %xmm0
|
||||
; FMA3_256-NEXT: vblendpd {{.*#+}} xmm0 = xmm0[0],xmm1[1]
|
||||
; FMA3_256-NEXT: retq
|
||||
;
|
||||
; FMA3_512-LABEL: mul_subadd_bad_commute:
|
||||
; FMA3_512: # %bb.0: # %entry
|
||||
; FMA3_512-NEXT: vmulpd %xmm1, %xmm0, %xmm0
|
||||
; FMA3_512-NEXT: vsubpd %xmm0, %xmm2, %xmm1
|
||||
; FMA3_512-NEXT: vaddpd %xmm2, %xmm0, %xmm0
|
||||
; FMA3_512-NEXT: vmovsd {{.*#+}} xmm0 = xmm0[0],xmm1[1]
|
||||
; FMA3_512-NEXT: retq
|
||||
; FMA3-LABEL: mul_subadd_bad_commute:
|
||||
; FMA3: # %bb.0: # %entry
|
||||
; FMA3-NEXT: vmulpd %xmm1, %xmm0, %xmm0
|
||||
; FMA3-NEXT: vsubpd %xmm0, %xmm2, %xmm1
|
||||
; FMA3-NEXT: vaddpd %xmm2, %xmm0, %xmm0
|
||||
; FMA3-NEXT: vblendpd {{.*#+}} xmm0 = xmm0[0],xmm1[1]
|
||||
; FMA3-NEXT: retq
|
||||
;
|
||||
; FMA4-LABEL: mul_subadd_bad_commute:
|
||||
; FMA4: # %bb.0: # %entry
|
||||
|
|
|
@ -162,19 +162,12 @@ define <4 x float> @test_mm_cmpge_ss(<4 x float> %a0, <4 x float> %a1) nounwind
|
|||
; SSE-NEXT: # xmm0 = xmm1[0],xmm0[1,2,3]
|
||||
; SSE-NEXT: ret{{[l|q]}} # encoding: [0xc3]
|
||||
;
|
||||
; AVX1-LABEL: test_mm_cmpge_ss:
|
||||
; AVX1: # %bb.0:
|
||||
; AVX1-NEXT: vcmpless %xmm0, %xmm1, %xmm1 # encoding: [0xc5,0xf2,0xc2,0xc8,0x02]
|
||||
; AVX1-NEXT: vblendps $1, %xmm1, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x0c,0xc1,0x01]
|
||||
; AVX1-NEXT: # xmm0 = xmm1[0],xmm0[1,2,3]
|
||||
; AVX1-NEXT: ret{{[l|q]}} # encoding: [0xc3]
|
||||
;
|
||||
; AVX512-LABEL: test_mm_cmpge_ss:
|
||||
; AVX512: # %bb.0:
|
||||
; AVX512-NEXT: vcmpless %xmm0, %xmm1, %xmm1 # encoding: [0xc5,0xf2,0xc2,0xc8,0x02]
|
||||
; AVX512-NEXT: vmovss %xmm1, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xfa,0x10,0xc1]
|
||||
; AVX512-NEXT: # xmm0 = xmm1[0],xmm0[1,2,3]
|
||||
; AVX512-NEXT: ret{{[l|q]}} # encoding: [0xc3]
|
||||
; AVX-LABEL: test_mm_cmpge_ss:
|
||||
; AVX: # %bb.0:
|
||||
; AVX-NEXT: vcmpless %xmm0, %xmm1, %xmm1 # encoding: [0xc5,0xf2,0xc2,0xc8,0x02]
|
||||
; AVX-NEXT: vblendps $1, %xmm1, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x0c,0xc1,0x01]
|
||||
; AVX-NEXT: # xmm0 = xmm1[0],xmm0[1,2,3]
|
||||
; AVX-NEXT: ret{{[l|q]}} # encoding: [0xc3]
|
||||
%cmp = call <4 x float> @llvm.x86.sse.cmp.ss(<4 x float> %a1, <4 x float> %a0, i8 2)
|
||||
%res = shufflevector <4 x float> %a0, <4 x float> %cmp, <4 x i32> <i32 4, i32 1, i32 2, i32 3>
|
||||
ret <4 x float> %res
|
||||
|
@ -211,19 +204,12 @@ define <4 x float> @test_mm_cmpgt_ss(<4 x float> %a0, <4 x float> %a1) nounwind
|
|||
; SSE-NEXT: # xmm0 = xmm1[0],xmm0[1,2,3]
|
||||
; SSE-NEXT: ret{{[l|q]}} # encoding: [0xc3]
|
||||
;
|
||||
; AVX1-LABEL: test_mm_cmpgt_ss:
|
||||
; AVX1: # %bb.0:
|
||||
; AVX1-NEXT: vcmpltss %xmm0, %xmm1, %xmm1 # encoding: [0xc5,0xf2,0xc2,0xc8,0x01]
|
||||
; AVX1-NEXT: vblendps $1, %xmm1, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x0c,0xc1,0x01]
|
||||
; AVX1-NEXT: # xmm0 = xmm1[0],xmm0[1,2,3]
|
||||
; AVX1-NEXT: ret{{[l|q]}} # encoding: [0xc3]
|
||||
;
|
||||
; AVX512-LABEL: test_mm_cmpgt_ss:
|
||||
; AVX512: # %bb.0:
|
||||
; AVX512-NEXT: vcmpltss %xmm0, %xmm1, %xmm1 # encoding: [0xc5,0xf2,0xc2,0xc8,0x01]
|
||||
; AVX512-NEXT: vmovss %xmm1, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xfa,0x10,0xc1]
|
||||
; AVX512-NEXT: # xmm0 = xmm1[0],xmm0[1,2,3]
|
||||
; AVX512-NEXT: ret{{[l|q]}} # encoding: [0xc3]
|
||||
; AVX-LABEL: test_mm_cmpgt_ss:
|
||||
; AVX: # %bb.0:
|
||||
; AVX-NEXT: vcmpltss %xmm0, %xmm1, %xmm1 # encoding: [0xc5,0xf2,0xc2,0xc8,0x01]
|
||||
; AVX-NEXT: vblendps $1, %xmm1, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x0c,0xc1,0x01]
|
||||
; AVX-NEXT: # xmm0 = xmm1[0],xmm0[1,2,3]
|
||||
; AVX-NEXT: ret{{[l|q]}} # encoding: [0xc3]
|
||||
%cmp = call <4 x float> @llvm.x86.sse.cmp.ss(<4 x float> %a1, <4 x float> %a0, i8 1)
|
||||
%res = shufflevector <4 x float> %a0, <4 x float> %cmp, <4 x i32> <i32 4, i32 1, i32 2, i32 3>
|
||||
ret <4 x float> %res
|
||||
|
@ -368,19 +354,12 @@ define <4 x float> @test_mm_cmpnge_ss(<4 x float> %a0, <4 x float> %a1) nounwind
|
|||
; SSE-NEXT: # xmm0 = xmm1[0],xmm0[1,2,3]
|
||||
; SSE-NEXT: ret{{[l|q]}} # encoding: [0xc3]
|
||||
;
|
||||
; AVX1-LABEL: test_mm_cmpnge_ss:
|
||||
; AVX1: # %bb.0:
|
||||
; AVX1-NEXT: vcmpnless %xmm0, %xmm1, %xmm1 # encoding: [0xc5,0xf2,0xc2,0xc8,0x06]
|
||||
; AVX1-NEXT: vblendps $1, %xmm1, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x0c,0xc1,0x01]
|
||||
; AVX1-NEXT: # xmm0 = xmm1[0],xmm0[1,2,3]
|
||||
; AVX1-NEXT: ret{{[l|q]}} # encoding: [0xc3]
|
||||
;
|
||||
; AVX512-LABEL: test_mm_cmpnge_ss:
|
||||
; AVX512: # %bb.0:
|
||||
; AVX512-NEXT: vcmpnless %xmm0, %xmm1, %xmm1 # encoding: [0xc5,0xf2,0xc2,0xc8,0x06]
|
||||
; AVX512-NEXT: vmovss %xmm1, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xfa,0x10,0xc1]
|
||||
; AVX512-NEXT: # xmm0 = xmm1[0],xmm0[1,2,3]
|
||||
; AVX512-NEXT: ret{{[l|q]}} # encoding: [0xc3]
|
||||
; AVX-LABEL: test_mm_cmpnge_ss:
|
||||
; AVX: # %bb.0:
|
||||
; AVX-NEXT: vcmpnless %xmm0, %xmm1, %xmm1 # encoding: [0xc5,0xf2,0xc2,0xc8,0x06]
|
||||
; AVX-NEXT: vblendps $1, %xmm1, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x0c,0xc1,0x01]
|
||||
; AVX-NEXT: # xmm0 = xmm1[0],xmm0[1,2,3]
|
||||
; AVX-NEXT: ret{{[l|q]}} # encoding: [0xc3]
|
||||
%cmp = call <4 x float> @llvm.x86.sse.cmp.ss(<4 x float> %a1, <4 x float> %a0, i8 6)
|
||||
%res = shufflevector <4 x float> %a0, <4 x float> %cmp, <4 x i32> <i32 4, i32 1, i32 2, i32 3>
|
||||
ret <4 x float> %res
|
||||
|
@ -417,19 +396,12 @@ define <4 x float> @test_mm_cmpngt_ss(<4 x float> %a0, <4 x float> %a1) nounwind
|
|||
; SSE-NEXT: # xmm0 = xmm1[0],xmm0[1,2,3]
|
||||
; SSE-NEXT: ret{{[l|q]}} # encoding: [0xc3]
|
||||
;
|
||||
; AVX1-LABEL: test_mm_cmpngt_ss:
|
||||
; AVX1: # %bb.0:
|
||||
; AVX1-NEXT: vcmpnltss %xmm0, %xmm1, %xmm1 # encoding: [0xc5,0xf2,0xc2,0xc8,0x05]
|
||||
; AVX1-NEXT: vblendps $1, %xmm1, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x0c,0xc1,0x01]
|
||||
; AVX1-NEXT: # xmm0 = xmm1[0],xmm0[1,2,3]
|
||||
; AVX1-NEXT: ret{{[l|q]}} # encoding: [0xc3]
|
||||
;
|
||||
; AVX512-LABEL: test_mm_cmpngt_ss:
|
||||
; AVX512: # %bb.0:
|
||||
; AVX512-NEXT: vcmpnltss %xmm0, %xmm1, %xmm1 # encoding: [0xc5,0xf2,0xc2,0xc8,0x05]
|
||||
; AVX512-NEXT: vmovss %xmm1, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xfa,0x10,0xc1]
|
||||
; AVX512-NEXT: # xmm0 = xmm1[0],xmm0[1,2,3]
|
||||
; AVX512-NEXT: ret{{[l|q]}} # encoding: [0xc3]
|
||||
; AVX-LABEL: test_mm_cmpngt_ss:
|
||||
; AVX: # %bb.0:
|
||||
; AVX-NEXT: vcmpnltss %xmm0, %xmm1, %xmm1 # encoding: [0xc5,0xf2,0xc2,0xc8,0x05]
|
||||
; AVX-NEXT: vblendps $1, %xmm1, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x0c,0xc1,0x01]
|
||||
; AVX-NEXT: # xmm0 = xmm1[0],xmm0[1,2,3]
|
||||
; AVX-NEXT: ret{{[l|q]}} # encoding: [0xc3]
|
||||
%cmp = call <4 x float> @llvm.x86.sse.cmp.ss(<4 x float> %a1, <4 x float> %a0, i8 5)
|
||||
%res = shufflevector <4 x float> %a0, <4 x float> %cmp, <4 x i32> <i32 4, i32 1, i32 2, i32 3>
|
||||
ret <4 x float> %res
|
||||
|
@ -1435,8 +1407,8 @@ define <4 x float> @test_mm_loadl_pi(<4 x float> %a0, x86_mmx* %a1) {
|
|||
; X86-AVX512-NEXT: movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
|
||||
; X86-AVX512-NEXT: vmovsd (%eax), %xmm1 # EVEX TO VEX Compression encoding: [0xc5,0xfb,0x10,0x08]
|
||||
; X86-AVX512-NEXT: # xmm1 = mem[0],zero
|
||||
; X86-AVX512-NEXT: vmovsd %xmm1, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xfb,0x10,0xc1]
|
||||
; X86-AVX512-NEXT: # xmm0 = xmm1[0],xmm0[1]
|
||||
; X86-AVX512-NEXT: vblendps $3, %xmm1, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x0c,0xc1,0x03]
|
||||
; X86-AVX512-NEXT: # xmm0 = xmm1[0,1],xmm0[2,3]
|
||||
; X86-AVX512-NEXT: retl # encoding: [0xc3]
|
||||
;
|
||||
; X64-SSE-LABEL: test_mm_loadl_pi:
|
||||
|
@ -1646,17 +1618,11 @@ define <4 x float> @test_mm_move_ss(<4 x float> %a0, <4 x float> %a1) {
|
|||
; SSE-NEXT: # xmm0 = xmm1[0],xmm0[1,2,3]
|
||||
; SSE-NEXT: ret{{[l|q]}} # encoding: [0xc3]
|
||||
;
|
||||
; AVX1-LABEL: test_mm_move_ss:
|
||||
; AVX1: # %bb.0:
|
||||
; AVX1-NEXT: vblendps $1, %xmm1, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x0c,0xc1,0x01]
|
||||
; AVX1-NEXT: # xmm0 = xmm1[0],xmm0[1,2,3]
|
||||
; AVX1-NEXT: ret{{[l|q]}} # encoding: [0xc3]
|
||||
;
|
||||
; AVX512-LABEL: test_mm_move_ss:
|
||||
; AVX512: # %bb.0:
|
||||
; AVX512-NEXT: vmovss %xmm1, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xfa,0x10,0xc1]
|
||||
; AVX512-NEXT: # xmm0 = xmm1[0],xmm0[1,2,3]
|
||||
; AVX512-NEXT: ret{{[l|q]}} # encoding: [0xc3]
|
||||
; AVX-LABEL: test_mm_move_ss:
|
||||
; AVX: # %bb.0:
|
||||
; AVX-NEXT: vblendps $1, %xmm1, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x0c,0xc1,0x01]
|
||||
; AVX-NEXT: # xmm0 = xmm1[0],xmm0[1,2,3]
|
||||
; AVX-NEXT: ret{{[l|q]}} # encoding: [0xc3]
|
||||
%res = shufflevector <4 x float> %a0, <4 x float> %a1, <4 x i32> <i32 4, i32 1, i32 2, i32 3>
|
||||
ret <4 x float> %res
|
||||
}
|
||||
|
@ -2278,8 +2244,8 @@ define <4 x float> @test_mm_set_ss(float %a0) nounwind {
|
|||
; X86-AVX512: # %bb.0:
|
||||
; X86-AVX512-NEXT: vmovss {{[0-9]+}}(%esp), %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xfa,0x10,0x44,0x24,0x04]
|
||||
; X86-AVX512-NEXT: # xmm0 = mem[0],zero,zero,zero
|
||||
; X86-AVX512-NEXT: vxorps %xmm1, %xmm1, %xmm1 # EVEX TO VEX Compression encoding: [0xc5,0xf0,0x57,0xc9]
|
||||
; X86-AVX512-NEXT: vmovss %xmm0, %xmm1, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf2,0x10,0xc0]
|
||||
; X86-AVX512-NEXT: vxorps %xmm1, %xmm1, %xmm1 # encoding: [0xc5,0xf0,0x57,0xc9]
|
||||
; X86-AVX512-NEXT: vblendps $1, %xmm0, %xmm1, %xmm0 # encoding: [0xc4,0xe3,0x71,0x0c,0xc0,0x01]
|
||||
; X86-AVX512-NEXT: # xmm0 = xmm0[0],xmm1[1,2,3]
|
||||
; X86-AVX512-NEXT: retl # encoding: [0xc3]
|
||||
;
|
||||
|
@ -2291,19 +2257,12 @@ define <4 x float> @test_mm_set_ss(float %a0) nounwind {
|
|||
; X64-SSE-NEXT: movaps %xmm1, %xmm0 # encoding: [0x0f,0x28,0xc1]
|
||||
; X64-SSE-NEXT: retq # encoding: [0xc3]
|
||||
;
|
||||
; X64-AVX1-LABEL: test_mm_set_ss:
|
||||
; X64-AVX1: # %bb.0:
|
||||
; X64-AVX1-NEXT: vxorps %xmm1, %xmm1, %xmm1 # encoding: [0xc5,0xf0,0x57,0xc9]
|
||||
; X64-AVX1-NEXT: vblendps $1, %xmm0, %xmm1, %xmm0 # encoding: [0xc4,0xe3,0x71,0x0c,0xc0,0x01]
|
||||
; X64-AVX1-NEXT: # xmm0 = xmm0[0],xmm1[1,2,3]
|
||||
; X64-AVX1-NEXT: retq # encoding: [0xc3]
|
||||
;
|
||||
; X64-AVX512-LABEL: test_mm_set_ss:
|
||||
; X64-AVX512: # %bb.0:
|
||||
; X64-AVX512-NEXT: vxorps %xmm1, %xmm1, %xmm1 # EVEX TO VEX Compression encoding: [0xc5,0xf0,0x57,0xc9]
|
||||
; X64-AVX512-NEXT: vmovss %xmm0, %xmm1, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf2,0x10,0xc0]
|
||||
; X64-AVX512-NEXT: # xmm0 = xmm0[0],xmm1[1,2,3]
|
||||
; X64-AVX512-NEXT: retq # encoding: [0xc3]
|
||||
; X64-AVX-LABEL: test_mm_set_ss:
|
||||
; X64-AVX: # %bb.0:
|
||||
; X64-AVX-NEXT: vxorps %xmm1, %xmm1, %xmm1 # encoding: [0xc5,0xf0,0x57,0xc9]
|
||||
; X64-AVX-NEXT: vblendps $1, %xmm0, %xmm1, %xmm0 # encoding: [0xc4,0xe3,0x71,0x0c,0xc0,0x01]
|
||||
; X64-AVX-NEXT: # xmm0 = xmm0[0],xmm1[1,2,3]
|
||||
; X64-AVX-NEXT: retq # encoding: [0xc3]
|
||||
%res0 = insertelement <4 x float> undef, float %a0, i32 0
|
||||
%res1 = insertelement <4 x float> %res0, float 0.0, i32 1
|
||||
%res2 = insertelement <4 x float> %res1, float 0.0, i32 2
|
||||
|
|
|
@ -667,17 +667,11 @@ define <4 x float> @insert_test_add_ss(<4 x float> %a, <4 x float> %b) {
|
|||
; SSE41-NEXT: blendps {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3]
|
||||
; SSE41-NEXT: ret{{[l|q]}}
|
||||
;
|
||||
; AVX1-LABEL: insert_test_add_ss:
|
||||
; AVX1: # %bb.0:
|
||||
; AVX1-NEXT: vaddps %xmm1, %xmm0, %xmm1
|
||||
; AVX1-NEXT: vblendps {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3]
|
||||
; AVX1-NEXT: ret{{[l|q]}}
|
||||
;
|
||||
; AVX512-LABEL: insert_test_add_ss:
|
||||
; AVX512: # %bb.0:
|
||||
; AVX512-NEXT: vaddps %xmm1, %xmm0, %xmm1
|
||||
; AVX512-NEXT: vmovss {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3]
|
||||
; AVX512-NEXT: ret{{[l|q]}}
|
||||
; AVX-LABEL: insert_test_add_ss:
|
||||
; AVX: # %bb.0:
|
||||
; AVX-NEXT: vaddps %xmm1, %xmm0, %xmm1
|
||||
; AVX-NEXT: vblendps {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3]
|
||||
; AVX-NEXT: ret{{[l|q]}}
|
||||
%1 = fadd <4 x float> %a, %b
|
||||
%2 = shufflevector <4 x float> %1, <4 x float> %a, <4 x i32> <i32 0, i32 5, i32 6, i32 7>
|
||||
ret <4 x float> %2
|
||||
|
@ -699,17 +693,11 @@ define <4 x float> @insert_test_sub_ss(<4 x float> %a, <4 x float> %b) {
|
|||
; SSE41-NEXT: movaps %xmm2, %xmm0
|
||||
; SSE41-NEXT: ret{{[l|q]}}
|
||||
;
|
||||
; AVX1-LABEL: insert_test_sub_ss:
|
||||
; AVX1: # %bb.0:
|
||||
; AVX1-NEXT: vsubps %xmm1, %xmm0, %xmm1
|
||||
; AVX1-NEXT: vblendps {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3]
|
||||
; AVX1-NEXT: ret{{[l|q]}}
|
||||
;
|
||||
; AVX512-LABEL: insert_test_sub_ss:
|
||||
; AVX512: # %bb.0:
|
||||
; AVX512-NEXT: vsubps %xmm1, %xmm0, %xmm1
|
||||
; AVX512-NEXT: vmovss {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3]
|
||||
; AVX512-NEXT: ret{{[l|q]}}
|
||||
; AVX-LABEL: insert_test_sub_ss:
|
||||
; AVX: # %bb.0:
|
||||
; AVX-NEXT: vsubps %xmm1, %xmm0, %xmm1
|
||||
; AVX-NEXT: vblendps {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3]
|
||||
; AVX-NEXT: ret{{[l|q]}}
|
||||
%1 = fsub <4 x float> %a, %b
|
||||
%2 = shufflevector <4 x float> %1, <4 x float> %a, <4 x i32> <i32 0, i32 5, i32 6, i32 7>
|
||||
ret <4 x float> %2
|
||||
|
@ -728,17 +716,11 @@ define <4 x float> @insert_test_mul_ss(<4 x float> %a, <4 x float> %b) {
|
|||
; SSE41-NEXT: blendps {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3]
|
||||
; SSE41-NEXT: ret{{[l|q]}}
|
||||
;
|
||||
; AVX1-LABEL: insert_test_mul_ss:
|
||||
; AVX1: # %bb.0:
|
||||
; AVX1-NEXT: vmulps %xmm1, %xmm0, %xmm1
|
||||
; AVX1-NEXT: vblendps {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3]
|
||||
; AVX1-NEXT: ret{{[l|q]}}
|
||||
;
|
||||
; AVX512-LABEL: insert_test_mul_ss:
|
||||
; AVX512: # %bb.0:
|
||||
; AVX512-NEXT: vmulps %xmm1, %xmm0, %xmm1
|
||||
; AVX512-NEXT: vmovss {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3]
|
||||
; AVX512-NEXT: ret{{[l|q]}}
|
||||
; AVX-LABEL: insert_test_mul_ss:
|
||||
; AVX: # %bb.0:
|
||||
; AVX-NEXT: vmulps %xmm1, %xmm0, %xmm1
|
||||
; AVX-NEXT: vblendps {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3]
|
||||
; AVX-NEXT: ret{{[l|q]}}
|
||||
%1 = fmul <4 x float> %a, %b
|
||||
%2 = shufflevector <4 x float> %1, <4 x float> %a, <4 x i32> <i32 0, i32 5, i32 6, i32 7>
|
||||
ret <4 x float> %2
|
||||
|
@ -760,17 +742,11 @@ define <4 x float> @insert_test_div_ss(<4 x float> %a, <4 x float> %b) {
|
|||
; SSE41-NEXT: movaps %xmm2, %xmm0
|
||||
; SSE41-NEXT: ret{{[l|q]}}
|
||||
;
|
||||
; AVX1-LABEL: insert_test_div_ss:
|
||||
; AVX1: # %bb.0:
|
||||
; AVX1-NEXT: vdivps %xmm1, %xmm0, %xmm1
|
||||
; AVX1-NEXT: vblendps {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3]
|
||||
; AVX1-NEXT: ret{{[l|q]}}
|
||||
;
|
||||
; AVX512-LABEL: insert_test_div_ss:
|
||||
; AVX512: # %bb.0:
|
||||
; AVX512-NEXT: vdivps %xmm1, %xmm0, %xmm1
|
||||
; AVX512-NEXT: vmovss {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3]
|
||||
; AVX512-NEXT: ret{{[l|q]}}
|
||||
; AVX-LABEL: insert_test_div_ss:
|
||||
; AVX: # %bb.0:
|
||||
; AVX-NEXT: vdivps %xmm1, %xmm0, %xmm1
|
||||
; AVX-NEXT: vblendps {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3]
|
||||
; AVX-NEXT: ret{{[l|q]}}
|
||||
%1 = fdiv <4 x float> %a, %b
|
||||
%2 = shufflevector <4 x float> %1, <4 x float> %a, <4 x i32> <i32 0, i32 5, i32 6, i32 7>
|
||||
ret <4 x float> %2
|
||||
|
@ -789,17 +765,11 @@ define <2 x double> @insert_test_add_sd(<2 x double> %a, <2 x double> %b) {
|
|||
; SSE41-NEXT: blendpd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
|
||||
; SSE41-NEXT: ret{{[l|q]}}
|
||||
;
|
||||
; AVX1-LABEL: insert_test_add_sd:
|
||||
; AVX1: # %bb.0:
|
||||
; AVX1-NEXT: vaddpd %xmm1, %xmm0, %xmm1
|
||||
; AVX1-NEXT: vblendpd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
|
||||
; AVX1-NEXT: ret{{[l|q]}}
|
||||
;
|
||||
; AVX512-LABEL: insert_test_add_sd:
|
||||
; AVX512: # %bb.0:
|
||||
; AVX512-NEXT: vaddpd %xmm1, %xmm0, %xmm1
|
||||
; AVX512-NEXT: vmovsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
|
||||
; AVX512-NEXT: ret{{[l|q]}}
|
||||
; AVX-LABEL: insert_test_add_sd:
|
||||
; AVX: # %bb.0:
|
||||
; AVX-NEXT: vaddpd %xmm1, %xmm0, %xmm1
|
||||
; AVX-NEXT: vblendpd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
|
||||
; AVX-NEXT: ret{{[l|q]}}
|
||||
%1 = fadd <2 x double> %a, %b
|
||||
%2 = shufflevector <2 x double> %1, <2 x double> %a, <2 x i32> <i32 0, i32 3>
|
||||
ret <2 x double> %2
|
||||
|
@ -821,17 +791,11 @@ define <2 x double> @insert_test_sub_sd(<2 x double> %a, <2 x double> %b) {
|
|||
; SSE41-NEXT: movapd %xmm2, %xmm0
|
||||
; SSE41-NEXT: ret{{[l|q]}}
|
||||
;
|
||||
; AVX1-LABEL: insert_test_sub_sd:
|
||||
; AVX1: # %bb.0:
|
||||
; AVX1-NEXT: vsubpd %xmm1, %xmm0, %xmm1
|
||||
; AVX1-NEXT: vblendpd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
|
||||
; AVX1-NEXT: ret{{[l|q]}}
|
||||
;
|
||||
; AVX512-LABEL: insert_test_sub_sd:
|
||||
; AVX512: # %bb.0:
|
||||
; AVX512-NEXT: vsubpd %xmm1, %xmm0, %xmm1
|
||||
; AVX512-NEXT: vmovsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
|
||||
; AVX512-NEXT: ret{{[l|q]}}
|
||||
; AVX-LABEL: insert_test_sub_sd:
|
||||
; AVX: # %bb.0:
|
||||
; AVX-NEXT: vsubpd %xmm1, %xmm0, %xmm1
|
||||
; AVX-NEXT: vblendpd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
|
||||
; AVX-NEXT: ret{{[l|q]}}
|
||||
%1 = fsub <2 x double> %a, %b
|
||||
%2 = shufflevector <2 x double> %1, <2 x double> %a, <2 x i32> <i32 0, i32 3>
|
||||
ret <2 x double> %2
|
||||
|
@ -850,17 +814,11 @@ define <2 x double> @insert_test_mul_sd(<2 x double> %a, <2 x double> %b) {
|
|||
; SSE41-NEXT: blendpd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
|
||||
; SSE41-NEXT: ret{{[l|q]}}
|
||||
;
|
||||
; AVX1-LABEL: insert_test_mul_sd:
|
||||
; AVX1: # %bb.0:
|
||||
; AVX1-NEXT: vmulpd %xmm1, %xmm0, %xmm1
|
||||
; AVX1-NEXT: vblendpd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
|
||||
; AVX1-NEXT: ret{{[l|q]}}
|
||||
;
|
||||
; AVX512-LABEL: insert_test_mul_sd:
|
||||
; AVX512: # %bb.0:
|
||||
; AVX512-NEXT: vmulpd %xmm1, %xmm0, %xmm1
|
||||
; AVX512-NEXT: vmovsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
|
||||
; AVX512-NEXT: ret{{[l|q]}}
|
||||
; AVX-LABEL: insert_test_mul_sd:
|
||||
; AVX: # %bb.0:
|
||||
; AVX-NEXT: vmulpd %xmm1, %xmm0, %xmm1
|
||||
; AVX-NEXT: vblendpd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
|
||||
; AVX-NEXT: ret{{[l|q]}}
|
||||
%1 = fmul <2 x double> %a, %b
|
||||
%2 = shufflevector <2 x double> %1, <2 x double> %a, <2 x i32> <i32 0, i32 3>
|
||||
ret <2 x double> %2
|
||||
|
@ -882,17 +840,11 @@ define <2 x double> @insert_test_div_sd(<2 x double> %a, <2 x double> %b) {
|
|||
; SSE41-NEXT: movapd %xmm2, %xmm0
|
||||
; SSE41-NEXT: ret{{[l|q]}}
|
||||
;
|
||||
; AVX1-LABEL: insert_test_div_sd:
|
||||
; AVX1: # %bb.0:
|
||||
; AVX1-NEXT: vdivpd %xmm1, %xmm0, %xmm1
|
||||
; AVX1-NEXT: vblendpd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
|
||||
; AVX1-NEXT: ret{{[l|q]}}
|
||||
;
|
||||
; AVX512-LABEL: insert_test_div_sd:
|
||||
; AVX512: # %bb.0:
|
||||
; AVX512-NEXT: vdivpd %xmm1, %xmm0, %xmm1
|
||||
; AVX512-NEXT: vmovsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
|
||||
; AVX512-NEXT: ret{{[l|q]}}
|
||||
; AVX-LABEL: insert_test_div_sd:
|
||||
; AVX: # %bb.0:
|
||||
; AVX-NEXT: vdivpd %xmm1, %xmm0, %xmm1
|
||||
; AVX-NEXT: vblendpd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
|
||||
; AVX-NEXT: ret{{[l|q]}}
|
||||
%1 = fdiv <2 x double> %a, %b
|
||||
%2 = shufflevector <2 x double> %1, <2 x double> %a, <2 x i32> <i32 0, i32 3>
|
||||
ret <2 x double> %2
|
||||
|
@ -912,17 +864,11 @@ define <4 x float> @insert_test2_add_ss(<4 x float> %a, <4 x float> %b) {
|
|||
; SSE41-NEXT: blendps {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3]
|
||||
; SSE41-NEXT: ret{{[l|q]}}
|
||||
;
|
||||
; AVX1-LABEL: insert_test2_add_ss:
|
||||
; AVX1: # %bb.0:
|
||||
; AVX1-NEXT: vaddps %xmm0, %xmm1, %xmm0
|
||||
; AVX1-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3]
|
||||
; AVX1-NEXT: ret{{[l|q]}}
|
||||
;
|
||||
; AVX512-LABEL: insert_test2_add_ss:
|
||||
; AVX512: # %bb.0:
|
||||
; AVX512-NEXT: vaddps %xmm0, %xmm1, %xmm0
|
||||
; AVX512-NEXT: vmovss {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3]
|
||||
; AVX512-NEXT: ret{{[l|q]}}
|
||||
; AVX-LABEL: insert_test2_add_ss:
|
||||
; AVX: # %bb.0:
|
||||
; AVX-NEXT: vaddps %xmm0, %xmm1, %xmm0
|
||||
; AVX-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3]
|
||||
; AVX-NEXT: ret{{[l|q]}}
|
||||
%1 = fadd <4 x float> %b, %a
|
||||
%2 = shufflevector <4 x float> %1, <4 x float> %b, <4 x i32> <i32 0, i32 5, i32 6, i32 7>
|
||||
ret <4 x float> %2
|
||||
|
@ -945,17 +891,11 @@ define <4 x float> @insert_test2_sub_ss(<4 x float> %a, <4 x float> %b) {
|
|||
; SSE41-NEXT: movaps %xmm2, %xmm0
|
||||
; SSE41-NEXT: ret{{[l|q]}}
|
||||
;
|
||||
; AVX1-LABEL: insert_test2_sub_ss:
|
||||
; AVX1: # %bb.0:
|
||||
; AVX1-NEXT: vsubps %xmm0, %xmm1, %xmm0
|
||||
; AVX1-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3]
|
||||
; AVX1-NEXT: ret{{[l|q]}}
|
||||
;
|
||||
; AVX512-LABEL: insert_test2_sub_ss:
|
||||
; AVX512: # %bb.0:
|
||||
; AVX512-NEXT: vsubps %xmm0, %xmm1, %xmm0
|
||||
; AVX512-NEXT: vmovss {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3]
|
||||
; AVX512-NEXT: ret{{[l|q]}}
|
||||
; AVX-LABEL: insert_test2_sub_ss:
|
||||
; AVX: # %bb.0:
|
||||
; AVX-NEXT: vsubps %xmm0, %xmm1, %xmm0
|
||||
; AVX-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3]
|
||||
; AVX-NEXT: ret{{[l|q]}}
|
||||
%1 = fsub <4 x float> %b, %a
|
||||
%2 = shufflevector <4 x float> %1, <4 x float> %b, <4 x i32> <i32 0, i32 5, i32 6, i32 7>
|
||||
ret <4 x float> %2
|
||||
|
@ -975,17 +915,11 @@ define <4 x float> @insert_test2_mul_ss(<4 x float> %a, <4 x float> %b) {
|
|||
; SSE41-NEXT: blendps {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3]
|
||||
; SSE41-NEXT: ret{{[l|q]}}
|
||||
;
|
||||
; AVX1-LABEL: insert_test2_mul_ss:
|
||||
; AVX1: # %bb.0:
|
||||
; AVX1-NEXT: vmulps %xmm0, %xmm1, %xmm0
|
||||
; AVX1-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3]
|
||||
; AVX1-NEXT: ret{{[l|q]}}
|
||||
;
|
||||
; AVX512-LABEL: insert_test2_mul_ss:
|
||||
; AVX512: # %bb.0:
|
||||
; AVX512-NEXT: vmulps %xmm0, %xmm1, %xmm0
|
||||
; AVX512-NEXT: vmovss {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3]
|
||||
; AVX512-NEXT: ret{{[l|q]}}
|
||||
; AVX-LABEL: insert_test2_mul_ss:
|
||||
; AVX: # %bb.0:
|
||||
; AVX-NEXT: vmulps %xmm0, %xmm1, %xmm0
|
||||
; AVX-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3]
|
||||
; AVX-NEXT: ret{{[l|q]}}
|
||||
%1 = fmul <4 x float> %b, %a
|
||||
%2 = shufflevector <4 x float> %1, <4 x float> %b, <4 x i32> <i32 0, i32 5, i32 6, i32 7>
|
||||
ret <4 x float> %2
|
||||
|
@ -1008,17 +942,11 @@ define <4 x float> @insert_test2_div_ss(<4 x float> %a, <4 x float> %b) {
|
|||
; SSE41-NEXT: movaps %xmm2, %xmm0
|
||||
; SSE41-NEXT: ret{{[l|q]}}
|
||||
;
|
||||
; AVX1-LABEL: insert_test2_div_ss:
|
||||
; AVX1: # %bb.0:
|
||||
; AVX1-NEXT: vdivps %xmm0, %xmm1, %xmm0
|
||||
; AVX1-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3]
|
||||
; AVX1-NEXT: ret{{[l|q]}}
|
||||
;
|
||||
; AVX512-LABEL: insert_test2_div_ss:
|
||||
; AVX512: # %bb.0:
|
||||
; AVX512-NEXT: vdivps %xmm0, %xmm1, %xmm0
|
||||
; AVX512-NEXT: vmovss {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3]
|
||||
; AVX512-NEXT: ret{{[l|q]}}
|
||||
; AVX-LABEL: insert_test2_div_ss:
|
||||
; AVX: # %bb.0:
|
||||
; AVX-NEXT: vdivps %xmm0, %xmm1, %xmm0
|
||||
; AVX-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3]
|
||||
; AVX-NEXT: ret{{[l|q]}}
|
||||
%1 = fdiv <4 x float> %b, %a
|
||||
%2 = shufflevector <4 x float> %1, <4 x float> %b, <4 x i32> <i32 0, i32 5, i32 6, i32 7>
|
||||
ret <4 x float> %2
|
||||
|
@ -1038,17 +966,11 @@ define <2 x double> @insert_test2_add_sd(<2 x double> %a, <2 x double> %b) {
|
|||
; SSE41-NEXT: blendpd {{.*#+}} xmm0 = xmm0[0],xmm1[1]
|
||||
; SSE41-NEXT: ret{{[l|q]}}
|
||||
;
|
||||
; AVX1-LABEL: insert_test2_add_sd:
|
||||
; AVX1: # %bb.0:
|
||||
; AVX1-NEXT: vaddpd %xmm0, %xmm1, %xmm0
|
||||
; AVX1-NEXT: vblendpd {{.*#+}} xmm0 = xmm0[0],xmm1[1]
|
||||
; AVX1-NEXT: ret{{[l|q]}}
|
||||
;
|
||||
; AVX512-LABEL: insert_test2_add_sd:
|
||||
; AVX512: # %bb.0:
|
||||
; AVX512-NEXT: vaddpd %xmm0, %xmm1, %xmm0
|
||||
; AVX512-NEXT: vmovsd {{.*#+}} xmm0 = xmm0[0],xmm1[1]
|
||||
; AVX512-NEXT: ret{{[l|q]}}
|
||||
; AVX-LABEL: insert_test2_add_sd:
|
||||
; AVX: # %bb.0:
|
||||
; AVX-NEXT: vaddpd %xmm0, %xmm1, %xmm0
|
||||
; AVX-NEXT: vblendpd {{.*#+}} xmm0 = xmm0[0],xmm1[1]
|
||||
; AVX-NEXT: ret{{[l|q]}}
|
||||
%1 = fadd <2 x double> %b, %a
|
||||
%2 = shufflevector <2 x double> %1, <2 x double> %b, <2 x i32> <i32 0, i32 3>
|
||||
ret <2 x double> %2
|
||||
|
@ -1071,17 +993,11 @@ define <2 x double> @insert_test2_sub_sd(<2 x double> %a, <2 x double> %b) {
|
|||
; SSE41-NEXT: movapd %xmm2, %xmm0
|
||||
; SSE41-NEXT: ret{{[l|q]}}
|
||||
;
|
||||
; AVX1-LABEL: insert_test2_sub_sd:
|
||||
; AVX1: # %bb.0:
|
||||
; AVX1-NEXT: vsubpd %xmm0, %xmm1, %xmm0
|
||||
; AVX1-NEXT: vblendpd {{.*#+}} xmm0 = xmm0[0],xmm1[1]
|
||||
; AVX1-NEXT: ret{{[l|q]}}
|
||||
;
|
||||
; AVX512-LABEL: insert_test2_sub_sd:
|
||||
; AVX512: # %bb.0:
|
||||
; AVX512-NEXT: vsubpd %xmm0, %xmm1, %xmm0
|
||||
; AVX512-NEXT: vmovsd {{.*#+}} xmm0 = xmm0[0],xmm1[1]
|
||||
; AVX512-NEXT: ret{{[l|q]}}
|
||||
; AVX-LABEL: insert_test2_sub_sd:
|
||||
; AVX: # %bb.0:
|
||||
; AVX-NEXT: vsubpd %xmm0, %xmm1, %xmm0
|
||||
; AVX-NEXT: vblendpd {{.*#+}} xmm0 = xmm0[0],xmm1[1]
|
||||
; AVX-NEXT: ret{{[l|q]}}
|
||||
%1 = fsub <2 x double> %b, %a
|
||||
%2 = shufflevector <2 x double> %1, <2 x double> %b, <2 x i32> <i32 0, i32 3>
|
||||
ret <2 x double> %2
|
||||
|
@ -1101,17 +1017,11 @@ define <2 x double> @insert_test2_mul_sd(<2 x double> %a, <2 x double> %b) {
|
|||
; SSE41-NEXT: blendpd {{.*#+}} xmm0 = xmm0[0],xmm1[1]
|
||||
; SSE41-NEXT: ret{{[l|q]}}
|
||||
;
|
||||
; AVX1-LABEL: insert_test2_mul_sd:
|
||||
; AVX1: # %bb.0:
|
||||
; AVX1-NEXT: vmulpd %xmm0, %xmm1, %xmm0
|
||||
; AVX1-NEXT: vblendpd {{.*#+}} xmm0 = xmm0[0],xmm1[1]
|
||||
; AVX1-NEXT: ret{{[l|q]}}
|
||||
;
|
||||
; AVX512-LABEL: insert_test2_mul_sd:
|
||||
; AVX512: # %bb.0:
|
||||
; AVX512-NEXT: vmulpd %xmm0, %xmm1, %xmm0
|
||||
; AVX512-NEXT: vmovsd {{.*#+}} xmm0 = xmm0[0],xmm1[1]
|
||||
; AVX512-NEXT: ret{{[l|q]}}
|
||||
; AVX-LABEL: insert_test2_mul_sd:
|
||||
; AVX: # %bb.0:
|
||||
; AVX-NEXT: vmulpd %xmm0, %xmm1, %xmm0
|
||||
; AVX-NEXT: vblendpd {{.*#+}} xmm0 = xmm0[0],xmm1[1]
|
||||
; AVX-NEXT: ret{{[l|q]}}
|
||||
%1 = fmul <2 x double> %b, %a
|
||||
%2 = shufflevector <2 x double> %1, <2 x double> %b, <2 x i32> <i32 0, i32 3>
|
||||
ret <2 x double> %2
|
||||
|
@ -1134,17 +1044,11 @@ define <2 x double> @insert_test2_div_sd(<2 x double> %a, <2 x double> %b) {
|
|||
; SSE41-NEXT: movapd %xmm2, %xmm0
|
||||
; SSE41-NEXT: ret{{[l|q]}}
|
||||
;
|
||||
; AVX1-LABEL: insert_test2_div_sd:
|
||||
; AVX1: # %bb.0:
|
||||
; AVX1-NEXT: vdivpd %xmm0, %xmm1, %xmm0
|
||||
; AVX1-NEXT: vblendpd {{.*#+}} xmm0 = xmm0[0],xmm1[1]
|
||||
; AVX1-NEXT: ret{{[l|q]}}
|
||||
;
|
||||
; AVX512-LABEL: insert_test2_div_sd:
|
||||
; AVX512: # %bb.0:
|
||||
; AVX512-NEXT: vdivpd %xmm0, %xmm1, %xmm0
|
||||
; AVX512-NEXT: vmovsd {{.*#+}} xmm0 = xmm0[0],xmm1[1]
|
||||
; AVX512-NEXT: ret{{[l|q]}}
|
||||
; AVX-LABEL: insert_test2_div_sd:
|
||||
; AVX: # %bb.0:
|
||||
; AVX-NEXT: vdivpd %xmm0, %xmm1, %xmm0
|
||||
; AVX-NEXT: vblendpd {{.*#+}} xmm0 = xmm0[0],xmm1[1]
|
||||
; AVX-NEXT: ret{{[l|q]}}
|
||||
%1 = fdiv <2 x double> %b, %a
|
||||
%2 = shufflevector <2 x double> %1, <2 x double> %b, <2 x i32> <i32 0, i32 3>
|
||||
ret <2 x double> %2
|
||||
|
@ -1163,17 +1067,11 @@ define <4 x float> @insert_test3_add_ss(<4 x float> %a, <4 x float> %b) {
|
|||
; SSE41-NEXT: blendps {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3]
|
||||
; SSE41-NEXT: ret{{[l|q]}}
|
||||
;
|
||||
; AVX1-LABEL: insert_test3_add_ss:
|
||||
; AVX1: # %bb.0:
|
||||
; AVX1-NEXT: vaddps %xmm1, %xmm0, %xmm1
|
||||
; AVX1-NEXT: vblendps {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3]
|
||||
; AVX1-NEXT: ret{{[l|q]}}
|
||||
;
|
||||
; AVX512-LABEL: insert_test3_add_ss:
|
||||
; AVX512: # %bb.0:
|
||||
; AVX512-NEXT: vaddps %xmm1, %xmm0, %xmm1
|
||||
; AVX512-NEXT: vmovss {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3]
|
||||
; AVX512-NEXT: ret{{[l|q]}}
|
||||
; AVX-LABEL: insert_test3_add_ss:
|
||||
; AVX: # %bb.0:
|
||||
; AVX-NEXT: vaddps %xmm1, %xmm0, %xmm1
|
||||
; AVX-NEXT: vblendps {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3]
|
||||
; AVX-NEXT: ret{{[l|q]}}
|
||||
%1 = fadd <4 x float> %a, %b
|
||||
%2 = select <4 x i1> <i1 false, i1 true, i1 true, i1 true>, <4 x float> %a, <4 x float> %1
|
||||
ret <4 x float> %2
|
||||
|
@ -1195,17 +1093,11 @@ define <4 x float> @insert_test3_sub_ss(<4 x float> %a, <4 x float> %b) {
|
|||
; SSE41-NEXT: movaps %xmm2, %xmm0
|
||||
; SSE41-NEXT: ret{{[l|q]}}
|
||||
;
|
||||
; AVX1-LABEL: insert_test3_sub_ss:
|
||||
; AVX1: # %bb.0:
|
||||
; AVX1-NEXT: vsubps %xmm1, %xmm0, %xmm1
|
||||
; AVX1-NEXT: vblendps {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3]
|
||||
; AVX1-NEXT: ret{{[l|q]}}
|
||||
;
|
||||
; AVX512-LABEL: insert_test3_sub_ss:
|
||||
; AVX512: # %bb.0:
|
||||
; AVX512-NEXT: vsubps %xmm1, %xmm0, %xmm1
|
||||
; AVX512-NEXT: vmovss {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3]
|
||||
; AVX512-NEXT: ret{{[l|q]}}
|
||||
; AVX-LABEL: insert_test3_sub_ss:
|
||||
; AVX: # %bb.0:
|
||||
; AVX-NEXT: vsubps %xmm1, %xmm0, %xmm1
|
||||
; AVX-NEXT: vblendps {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3]
|
||||
; AVX-NEXT: ret{{[l|q]}}
|
||||
%1 = fsub <4 x float> %a, %b
|
||||
%2 = select <4 x i1> <i1 false, i1 true, i1 true, i1 true>, <4 x float> %a, <4 x float> %1
|
||||
ret <4 x float> %2
|
||||
|
@ -1224,17 +1116,11 @@ define <4 x float> @insert_test3_mul_ss(<4 x float> %a, <4 x float> %b) {
|
|||
; SSE41-NEXT: blendps {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3]
|
||||
; SSE41-NEXT: ret{{[l|q]}}
|
||||
;
|
||||
; AVX1-LABEL: insert_test3_mul_ss:
|
||||
; AVX1: # %bb.0:
|
||||
; AVX1-NEXT: vmulps %xmm1, %xmm0, %xmm1
|
||||
; AVX1-NEXT: vblendps {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3]
|
||||
; AVX1-NEXT: ret{{[l|q]}}
|
||||
;
|
||||
; AVX512-LABEL: insert_test3_mul_ss:
|
||||
; AVX512: # %bb.0:
|
||||
; AVX512-NEXT: vmulps %xmm1, %xmm0, %xmm1
|
||||
; AVX512-NEXT: vmovss {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3]
|
||||
; AVX512-NEXT: ret{{[l|q]}}
|
||||
; AVX-LABEL: insert_test3_mul_ss:
|
||||
; AVX: # %bb.0:
|
||||
; AVX-NEXT: vmulps %xmm1, %xmm0, %xmm1
|
||||
; AVX-NEXT: vblendps {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3]
|
||||
; AVX-NEXT: ret{{[l|q]}}
|
||||
%1 = fmul <4 x float> %a, %b
|
||||
%2 = select <4 x i1> <i1 false, i1 true, i1 true, i1 true>, <4 x float> %a, <4 x float> %1
|
||||
ret <4 x float> %2
|
||||
|
@ -1256,17 +1142,11 @@ define <4 x float> @insert_test3_div_ss(<4 x float> %a, <4 x float> %b) {
|
|||
; SSE41-NEXT: movaps %xmm2, %xmm0
|
||||
; SSE41-NEXT: ret{{[l|q]}}
|
||||
;
|
||||
; AVX1-LABEL: insert_test3_div_ss:
|
||||
; AVX1: # %bb.0:
|
||||
; AVX1-NEXT: vdivps %xmm1, %xmm0, %xmm1
|
||||
; AVX1-NEXT: vblendps {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3]
|
||||
; AVX1-NEXT: ret{{[l|q]}}
|
||||
;
|
||||
; AVX512-LABEL: insert_test3_div_ss:
|
||||
; AVX512: # %bb.0:
|
||||
; AVX512-NEXT: vdivps %xmm1, %xmm0, %xmm1
|
||||
; AVX512-NEXT: vmovss {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3]
|
||||
; AVX512-NEXT: ret{{[l|q]}}
|
||||
; AVX-LABEL: insert_test3_div_ss:
|
||||
; AVX: # %bb.0:
|
||||
; AVX-NEXT: vdivps %xmm1, %xmm0, %xmm1
|
||||
; AVX-NEXT: vblendps {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3]
|
||||
; AVX-NEXT: ret{{[l|q]}}
|
||||
%1 = fdiv <4 x float> %a, %b
|
||||
%2 = select <4 x i1> <i1 false, i1 true, i1 true, i1 true>, <4 x float> %a, <4 x float> %1
|
||||
ret <4 x float> %2
|
||||
|
@ -1285,17 +1165,11 @@ define <2 x double> @insert_test3_add_sd(<2 x double> %a, <2 x double> %b) {
|
|||
; SSE41-NEXT: blendpd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
|
||||
; SSE41-NEXT: ret{{[l|q]}}
|
||||
;
|
||||
; AVX1-LABEL: insert_test3_add_sd:
|
||||
; AVX1: # %bb.0:
|
||||
; AVX1-NEXT: vaddpd %xmm1, %xmm0, %xmm1
|
||||
; AVX1-NEXT: vblendpd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
|
||||
; AVX1-NEXT: ret{{[l|q]}}
|
||||
;
|
||||
; AVX512-LABEL: insert_test3_add_sd:
|
||||
; AVX512: # %bb.0:
|
||||
; AVX512-NEXT: vaddpd %xmm1, %xmm0, %xmm1
|
||||
; AVX512-NEXT: vmovsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
|
||||
; AVX512-NEXT: ret{{[l|q]}}
|
||||
; AVX-LABEL: insert_test3_add_sd:
|
||||
; AVX: # %bb.0:
|
||||
; AVX-NEXT: vaddpd %xmm1, %xmm0, %xmm1
|
||||
; AVX-NEXT: vblendpd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
|
||||
; AVX-NEXT: ret{{[l|q]}}
|
||||
%1 = fadd <2 x double> %a, %b
|
||||
%2 = select <2 x i1> <i1 false, i1 true>, <2 x double> %a, <2 x double> %1
|
||||
ret <2 x double> %2
|
||||
|
@ -1317,17 +1191,11 @@ define <2 x double> @insert_test3_sub_sd(<2 x double> %a, <2 x double> %b) {
|
|||
; SSE41-NEXT: movapd %xmm2, %xmm0
|
||||
; SSE41-NEXT: ret{{[l|q]}}
|
||||
;
|
||||
; AVX1-LABEL: insert_test3_sub_sd:
|
||||
; AVX1: # %bb.0:
|
||||
; AVX1-NEXT: vsubpd %xmm1, %xmm0, %xmm1
|
||||
; AVX1-NEXT: vblendpd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
|
||||
; AVX1-NEXT: ret{{[l|q]}}
|
||||
;
|
||||
; AVX512-LABEL: insert_test3_sub_sd:
|
||||
; AVX512: # %bb.0:
|
||||
; AVX512-NEXT: vsubpd %xmm1, %xmm0, %xmm1
|
||||
; AVX512-NEXT: vmovsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
|
||||
; AVX512-NEXT: ret{{[l|q]}}
|
||||
; AVX-LABEL: insert_test3_sub_sd:
|
||||
; AVX: # %bb.0:
|
||||
; AVX-NEXT: vsubpd %xmm1, %xmm0, %xmm1
|
||||
; AVX-NEXT: vblendpd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
|
||||
; AVX-NEXT: ret{{[l|q]}}
|
||||
%1 = fsub <2 x double> %a, %b
|
||||
%2 = select <2 x i1> <i1 false, i1 true>, <2 x double> %a, <2 x double> %1
|
||||
ret <2 x double> %2
|
||||
|
@ -1346,17 +1214,11 @@ define <2 x double> @insert_test3_mul_sd(<2 x double> %a, <2 x double> %b) {
|
|||
; SSE41-NEXT: blendpd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
|
||||
; SSE41-NEXT: ret{{[l|q]}}
|
||||
;
|
||||
; AVX1-LABEL: insert_test3_mul_sd:
|
||||
; AVX1: # %bb.0:
|
||||
; AVX1-NEXT: vmulpd %xmm1, %xmm0, %xmm1
|
||||
; AVX1-NEXT: vblendpd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
|
||||
; AVX1-NEXT: ret{{[l|q]}}
|
||||
;
|
||||
; AVX512-LABEL: insert_test3_mul_sd:
|
||||
; AVX512: # %bb.0:
|
||||
; AVX512-NEXT: vmulpd %xmm1, %xmm0, %xmm1
|
||||
; AVX512-NEXT: vmovsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
|
||||
; AVX512-NEXT: ret{{[l|q]}}
|
||||
; AVX-LABEL: insert_test3_mul_sd:
|
||||
; AVX: # %bb.0:
|
||||
; AVX-NEXT: vmulpd %xmm1, %xmm0, %xmm1
|
||||
; AVX-NEXT: vblendpd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
|
||||
; AVX-NEXT: ret{{[l|q]}}
|
||||
%1 = fmul <2 x double> %a, %b
|
||||
%2 = select <2 x i1> <i1 false, i1 true>, <2 x double> %a, <2 x double> %1
|
||||
ret <2 x double> %2
|
||||
|
@ -1378,17 +1240,11 @@ define <2 x double> @insert_test3_div_sd(<2 x double> %a, <2 x double> %b) {
|
|||
; SSE41-NEXT: movapd %xmm2, %xmm0
|
||||
; SSE41-NEXT: ret{{[l|q]}}
|
||||
;
|
||||
; AVX1-LABEL: insert_test3_div_sd:
|
||||
; AVX1: # %bb.0:
|
||||
; AVX1-NEXT: vdivpd %xmm1, %xmm0, %xmm1
|
||||
; AVX1-NEXT: vblendpd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
|
||||
; AVX1-NEXT: ret{{[l|q]}}
|
||||
;
|
||||
; AVX512-LABEL: insert_test3_div_sd:
|
||||
; AVX512: # %bb.0:
|
||||
; AVX512-NEXT: vdivpd %xmm1, %xmm0, %xmm1
|
||||
; AVX512-NEXT: vmovsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
|
||||
; AVX512-NEXT: ret{{[l|q]}}
|
||||
; AVX-LABEL: insert_test3_div_sd:
|
||||
; AVX: # %bb.0:
|
||||
; AVX-NEXT: vdivpd %xmm1, %xmm0, %xmm1
|
||||
; AVX-NEXT: vblendpd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
|
||||
; AVX-NEXT: ret{{[l|q]}}
|
||||
%1 = fdiv <2 x double> %a, %b
|
||||
%2 = select <2 x i1> <i1 false, i1 true>, <2 x double> %a, <2 x double> %1
|
||||
ret <2 x double> %2
|
||||
|
@ -1408,17 +1264,11 @@ define <4 x float> @insert_test4_add_ss(<4 x float> %a, <4 x float> %b) {
|
|||
; SSE41-NEXT: blendps {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3]
|
||||
; SSE41-NEXT: ret{{[l|q]}}
|
||||
;
|
||||
; AVX1-LABEL: insert_test4_add_ss:
|
||||
; AVX1: # %bb.0:
|
||||
; AVX1-NEXT: vaddps %xmm0, %xmm1, %xmm0
|
||||
; AVX1-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3]
|
||||
; AVX1-NEXT: ret{{[l|q]}}
|
||||
;
|
||||
; AVX512-LABEL: insert_test4_add_ss:
|
||||
; AVX512: # %bb.0:
|
||||
; AVX512-NEXT: vaddps %xmm0, %xmm1, %xmm0
|
||||
; AVX512-NEXT: vmovss {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3]
|
||||
; AVX512-NEXT: ret{{[l|q]}}
|
||||
; AVX-LABEL: insert_test4_add_ss:
|
||||
; AVX: # %bb.0:
|
||||
; AVX-NEXT: vaddps %xmm0, %xmm1, %xmm0
|
||||
; AVX-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3]
|
||||
; AVX-NEXT: ret{{[l|q]}}
|
||||
%1 = fadd <4 x float> %b, %a
|
||||
%2 = select <4 x i1> <i1 false, i1 true, i1 true, i1 true>, <4 x float> %b, <4 x float> %1
|
||||
ret <4 x float> %2
|
||||
|
@ -1441,17 +1291,11 @@ define <4 x float> @insert_test4_sub_ss(<4 x float> %a, <4 x float> %b) {
|
|||
; SSE41-NEXT: movaps %xmm2, %xmm0
|
||||
; SSE41-NEXT: ret{{[l|q]}}
|
||||
;
|
||||
; AVX1-LABEL: insert_test4_sub_ss:
|
||||
; AVX1: # %bb.0:
|
||||
; AVX1-NEXT: vsubps %xmm0, %xmm1, %xmm0
|
||||
; AVX1-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3]
|
||||
; AVX1-NEXT: ret{{[l|q]}}
|
||||
;
|
||||
; AVX512-LABEL: insert_test4_sub_ss:
|
||||
; AVX512: # %bb.0:
|
||||
; AVX512-NEXT: vsubps %xmm0, %xmm1, %xmm0
|
||||
; AVX512-NEXT: vmovss {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3]
|
||||
; AVX512-NEXT: ret{{[l|q]}}
|
||||
; AVX-LABEL: insert_test4_sub_ss:
|
||||
; AVX: # %bb.0:
|
||||
; AVX-NEXT: vsubps %xmm0, %xmm1, %xmm0
|
||||
; AVX-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3]
|
||||
; AVX-NEXT: ret{{[l|q]}}
|
||||
%1 = fsub <4 x float> %b, %a
|
||||
%2 = select <4 x i1> <i1 false, i1 true, i1 true, i1 true>, <4 x float> %b, <4 x float> %1
|
||||
ret <4 x float> %2
|
||||
|
@ -1471,17 +1315,11 @@ define <4 x float> @insert_test4_mul_ss(<4 x float> %a, <4 x float> %b) {
|
|||
; SSE41-NEXT: blendps {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3]
|
||||
; SSE41-NEXT: ret{{[l|q]}}
|
||||
;
|
||||
; AVX1-LABEL: insert_test4_mul_ss:
|
||||
; AVX1: # %bb.0:
|
||||
; AVX1-NEXT: vmulps %xmm0, %xmm1, %xmm0
|
||||
; AVX1-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3]
|
||||
; AVX1-NEXT: ret{{[l|q]}}
|
||||
;
|
||||
; AVX512-LABEL: insert_test4_mul_ss:
|
||||
; AVX512: # %bb.0:
|
||||
; AVX512-NEXT: vmulps %xmm0, %xmm1, %xmm0
|
||||
; AVX512-NEXT: vmovss {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3]
|
||||
; AVX512-NEXT: ret{{[l|q]}}
|
||||
; AVX-LABEL: insert_test4_mul_ss:
|
||||
; AVX: # %bb.0:
|
||||
; AVX-NEXT: vmulps %xmm0, %xmm1, %xmm0
|
||||
; AVX-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3]
|
||||
; AVX-NEXT: ret{{[l|q]}}
|
||||
%1 = fmul <4 x float> %b, %a
|
||||
%2 = select <4 x i1> <i1 false, i1 true, i1 true, i1 true>, <4 x float> %b, <4 x float> %1
|
||||
ret <4 x float> %2
|
||||
|
@ -1504,17 +1342,11 @@ define <4 x float> @insert_test4_div_ss(<4 x float> %a, <4 x float> %b) {
|
|||
; SSE41-NEXT: movaps %xmm2, %xmm0
|
||||
; SSE41-NEXT: ret{{[l|q]}}
|
||||
;
|
||||
; AVX1-LABEL: insert_test4_div_ss:
|
||||
; AVX1: # %bb.0:
|
||||
; AVX1-NEXT: vdivps %xmm0, %xmm1, %xmm0
|
||||
; AVX1-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3]
|
||||
; AVX1-NEXT: ret{{[l|q]}}
|
||||
;
|
||||
; AVX512-LABEL: insert_test4_div_ss:
|
||||
; AVX512: # %bb.0:
|
||||
; AVX512-NEXT: vdivps %xmm0, %xmm1, %xmm0
|
||||
; AVX512-NEXT: vmovss {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3]
|
||||
; AVX512-NEXT: ret{{[l|q]}}
|
||||
; AVX-LABEL: insert_test4_div_ss:
|
||||
; AVX: # %bb.0:
|
||||
; AVX-NEXT: vdivps %xmm0, %xmm1, %xmm0
|
||||
; AVX-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3]
|
||||
; AVX-NEXT: ret{{[l|q]}}
|
||||
%1 = fdiv <4 x float> %b, %a
|
||||
%2 = select <4 x i1> <i1 false, i1 true, i1 true, i1 true>, <4 x float> %b, <4 x float> %1
|
||||
ret <4 x float> %2
|
||||
|
@ -1534,17 +1366,11 @@ define <2 x double> @insert_test4_add_sd(<2 x double> %a, <2 x double> %b) {
|
|||
; SSE41-NEXT: blendpd {{.*#+}} xmm0 = xmm0[0],xmm1[1]
|
||||
; SSE41-NEXT: ret{{[l|q]}}
|
||||
;
|
||||
; AVX1-LABEL: insert_test4_add_sd:
|
||||
; AVX1: # %bb.0:
|
||||
; AVX1-NEXT: vaddpd %xmm0, %xmm1, %xmm0
|
||||
; AVX1-NEXT: vblendpd {{.*#+}} xmm0 = xmm0[0],xmm1[1]
|
||||
; AVX1-NEXT: ret{{[l|q]}}
|
||||
;
|
||||
; AVX512-LABEL: insert_test4_add_sd:
|
||||
; AVX512: # %bb.0:
|
||||
; AVX512-NEXT: vaddpd %xmm0, %xmm1, %xmm0
|
||||
; AVX512-NEXT: vmovsd {{.*#+}} xmm0 = xmm0[0],xmm1[1]
|
||||
; AVX512-NEXT: ret{{[l|q]}}
|
||||
; AVX-LABEL: insert_test4_add_sd:
|
||||
; AVX: # %bb.0:
|
||||
; AVX-NEXT: vaddpd %xmm0, %xmm1, %xmm0
|
||||
; AVX-NEXT: vblendpd {{.*#+}} xmm0 = xmm0[0],xmm1[1]
|
||||
; AVX-NEXT: ret{{[l|q]}}
|
||||
%1 = fadd <2 x double> %b, %a
|
||||
%2 = select <2 x i1> <i1 false, i1 true>, <2 x double> %b, <2 x double> %1
|
||||
ret <2 x double> %2
|
||||
|
@ -1567,17 +1393,11 @@ define <2 x double> @insert_test4_sub_sd(<2 x double> %a, <2 x double> %b) {
|
|||
; SSE41-NEXT: movapd %xmm2, %xmm0
|
||||
; SSE41-NEXT: ret{{[l|q]}}
|
||||
;
|
||||
; AVX1-LABEL: insert_test4_sub_sd:
|
||||
; AVX1: # %bb.0:
|
||||
; AVX1-NEXT: vsubpd %xmm0, %xmm1, %xmm0
|
||||
; AVX1-NEXT: vblendpd {{.*#+}} xmm0 = xmm0[0],xmm1[1]
|
||||
; AVX1-NEXT: ret{{[l|q]}}
|
||||
;
|
||||
; AVX512-LABEL: insert_test4_sub_sd:
|
||||
; AVX512: # %bb.0:
|
||||
; AVX512-NEXT: vsubpd %xmm0, %xmm1, %xmm0
|
||||
; AVX512-NEXT: vmovsd {{.*#+}} xmm0 = xmm0[0],xmm1[1]
|
||||
; AVX512-NEXT: ret{{[l|q]}}
|
||||
; AVX-LABEL: insert_test4_sub_sd:
|
||||
; AVX: # %bb.0:
|
||||
; AVX-NEXT: vsubpd %xmm0, %xmm1, %xmm0
|
||||
; AVX-NEXT: vblendpd {{.*#+}} xmm0 = xmm0[0],xmm1[1]
|
||||
; AVX-NEXT: ret{{[l|q]}}
|
||||
%1 = fsub <2 x double> %b, %a
|
||||
%2 = select <2 x i1> <i1 false, i1 true>, <2 x double> %b, <2 x double> %1
|
||||
ret <2 x double> %2
|
||||
|
@ -1597,17 +1417,11 @@ define <2 x double> @insert_test4_mul_sd(<2 x double> %a, <2 x double> %b) {
|
|||
; SSE41-NEXT: blendpd {{.*#+}} xmm0 = xmm0[0],xmm1[1]
|
||||
; SSE41-NEXT: ret{{[l|q]}}
|
||||
;
|
||||
; AVX1-LABEL: insert_test4_mul_sd:
|
||||
; AVX1: # %bb.0:
|
||||
; AVX1-NEXT: vmulpd %xmm0, %xmm1, %xmm0
|
||||
; AVX1-NEXT: vblendpd {{.*#+}} xmm0 = xmm0[0],xmm1[1]
|
||||
; AVX1-NEXT: ret{{[l|q]}}
|
||||
;
|
||||
; AVX512-LABEL: insert_test4_mul_sd:
|
||||
; AVX512: # %bb.0:
|
||||
; AVX512-NEXT: vmulpd %xmm0, %xmm1, %xmm0
|
||||
; AVX512-NEXT: vmovsd {{.*#+}} xmm0 = xmm0[0],xmm1[1]
|
||||
; AVX512-NEXT: ret{{[l|q]}}
|
||||
; AVX-LABEL: insert_test4_mul_sd:
|
||||
; AVX: # %bb.0:
|
||||
; AVX-NEXT: vmulpd %xmm0, %xmm1, %xmm0
|
||||
; AVX-NEXT: vblendpd {{.*#+}} xmm0 = xmm0[0],xmm1[1]
|
||||
; AVX-NEXT: ret{{[l|q]}}
|
||||
%1 = fmul <2 x double> %b, %a
|
||||
%2 = select <2 x i1> <i1 false, i1 true>, <2 x double> %b, <2 x double> %1
|
||||
ret <2 x double> %2
|
||||
|
@ -1630,17 +1444,11 @@ define <2 x double> @insert_test4_div_sd(<2 x double> %a, <2 x double> %b) {
|
|||
; SSE41-NEXT: movapd %xmm2, %xmm0
|
||||
; SSE41-NEXT: ret{{[l|q]}}
|
||||
;
|
||||
; AVX1-LABEL: insert_test4_div_sd:
|
||||
; AVX1: # %bb.0:
|
||||
; AVX1-NEXT: vdivpd %xmm0, %xmm1, %xmm0
|
||||
; AVX1-NEXT: vblendpd {{.*#+}} xmm0 = xmm0[0],xmm1[1]
|
||||
; AVX1-NEXT: ret{{[l|q]}}
|
||||
;
|
||||
; AVX512-LABEL: insert_test4_div_sd:
|
||||
; AVX512: # %bb.0:
|
||||
; AVX512-NEXT: vdivpd %xmm0, %xmm1, %xmm0
|
||||
; AVX512-NEXT: vmovsd {{.*#+}} xmm0 = xmm0[0],xmm1[1]
|
||||
; AVX512-NEXT: ret{{[l|q]}}
|
||||
; AVX-LABEL: insert_test4_div_sd:
|
||||
; AVX: # %bb.0:
|
||||
; AVX-NEXT: vdivpd %xmm0, %xmm1, %xmm0
|
||||
; AVX-NEXT: vblendpd {{.*#+}} xmm0 = xmm0[0],xmm1[1]
|
||||
; AVX-NEXT: ret{{[l|q]}}
|
||||
%1 = fdiv <2 x double> %b, %a
|
||||
%2 = select <2 x i1> <i1 false, i1 true>, <2 x double> %b, <2 x double> %1
|
||||
ret <2 x double> %2
|
||||
|
|
|
@ -639,19 +639,12 @@ define <2 x double> @test_mm_cmpge_sd(<2 x double> %a0, <2 x double> %a1) nounwi
|
|||
; SSE-NEXT: # xmm0 = xmm1[0],xmm0[1]
|
||||
; SSE-NEXT: ret{{[l|q]}} # encoding: [0xc3]
|
||||
;
|
||||
; AVX1-LABEL: test_mm_cmpge_sd:
|
||||
; AVX1: # %bb.0:
|
||||
; AVX1-NEXT: vcmplesd %xmm0, %xmm1, %xmm1 # encoding: [0xc5,0xf3,0xc2,0xc8,0x02]
|
||||
; AVX1-NEXT: vblendpd $1, %xmm1, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x0d,0xc1,0x01]
|
||||
; AVX1-NEXT: # xmm0 = xmm1[0],xmm0[1]
|
||||
; AVX1-NEXT: ret{{[l|q]}} # encoding: [0xc3]
|
||||
;
|
||||
; AVX512-LABEL: test_mm_cmpge_sd:
|
||||
; AVX512: # %bb.0:
|
||||
; AVX512-NEXT: vcmplesd %xmm0, %xmm1, %xmm1 # encoding: [0xc5,0xf3,0xc2,0xc8,0x02]
|
||||
; AVX512-NEXT: vmovsd %xmm1, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xfb,0x10,0xc1]
|
||||
; AVX512-NEXT: # xmm0 = xmm1[0],xmm0[1]
|
||||
; AVX512-NEXT: ret{{[l|q]}} # encoding: [0xc3]
|
||||
; AVX-LABEL: test_mm_cmpge_sd:
|
||||
; AVX: # %bb.0:
|
||||
; AVX-NEXT: vcmplesd %xmm0, %xmm1, %xmm1 # encoding: [0xc5,0xf3,0xc2,0xc8,0x02]
|
||||
; AVX-NEXT: vblendpd $1, %xmm1, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x0d,0xc1,0x01]
|
||||
; AVX-NEXT: # xmm0 = xmm1[0],xmm0[1]
|
||||
; AVX-NEXT: ret{{[l|q]}} # encoding: [0xc3]
|
||||
%cmp = call <2 x double> @llvm.x86.sse2.cmp.sd(<2 x double> %a1, <2 x double> %a0, i8 2)
|
||||
%ext0 = extractelement <2 x double> %cmp, i32 0
|
||||
%ins0 = insertelement <2 x double> undef, double %ext0, i32 0
|
||||
|
@ -763,19 +756,12 @@ define <2 x double> @test_mm_cmpgt_sd(<2 x double> %a0, <2 x double> %a1) nounwi
|
|||
; SSE-NEXT: # xmm0 = xmm1[0],xmm0[1]
|
||||
; SSE-NEXT: ret{{[l|q]}} # encoding: [0xc3]
|
||||
;
|
||||
; AVX1-LABEL: test_mm_cmpgt_sd:
|
||||
; AVX1: # %bb.0:
|
||||
; AVX1-NEXT: vcmpltsd %xmm0, %xmm1, %xmm1 # encoding: [0xc5,0xf3,0xc2,0xc8,0x01]
|
||||
; AVX1-NEXT: vblendpd $1, %xmm1, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x0d,0xc1,0x01]
|
||||
; AVX1-NEXT: # xmm0 = xmm1[0],xmm0[1]
|
||||
; AVX1-NEXT: ret{{[l|q]}} # encoding: [0xc3]
|
||||
;
|
||||
; AVX512-LABEL: test_mm_cmpgt_sd:
|
||||
; AVX512: # %bb.0:
|
||||
; AVX512-NEXT: vcmpltsd %xmm0, %xmm1, %xmm1 # encoding: [0xc5,0xf3,0xc2,0xc8,0x01]
|
||||
; AVX512-NEXT: vmovsd %xmm1, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xfb,0x10,0xc1]
|
||||
; AVX512-NEXT: # xmm0 = xmm1[0],xmm0[1]
|
||||
; AVX512-NEXT: ret{{[l|q]}} # encoding: [0xc3]
|
||||
; AVX-LABEL: test_mm_cmpgt_sd:
|
||||
; AVX: # %bb.0:
|
||||
; AVX-NEXT: vcmpltsd %xmm0, %xmm1, %xmm1 # encoding: [0xc5,0xf3,0xc2,0xc8,0x01]
|
||||
; AVX-NEXT: vblendpd $1, %xmm1, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x0d,0xc1,0x01]
|
||||
; AVX-NEXT: # xmm0 = xmm1[0],xmm0[1]
|
||||
; AVX-NEXT: ret{{[l|q]}} # encoding: [0xc3]
|
||||
%cmp = call <2 x double> @llvm.x86.sse2.cmp.sd(<2 x double> %a1, <2 x double> %a0, i8 1)
|
||||
%ext0 = extractelement <2 x double> %cmp, i32 0
|
||||
%ins0 = insertelement <2 x double> undef, double %ext0, i32 0
|
||||
|
@ -998,19 +984,12 @@ define <2 x double> @test_mm_cmpnge_sd(<2 x double> %a0, <2 x double> %a1) nounw
|
|||
; SSE-NEXT: # xmm0 = xmm1[0],xmm0[1]
|
||||
; SSE-NEXT: ret{{[l|q]}} # encoding: [0xc3]
|
||||
;
|
||||
; AVX1-LABEL: test_mm_cmpnge_sd:
|
||||
; AVX1: # %bb.0:
|
||||
; AVX1-NEXT: vcmpnlesd %xmm0, %xmm1, %xmm1 # encoding: [0xc5,0xf3,0xc2,0xc8,0x06]
|
||||
; AVX1-NEXT: vblendpd $1, %xmm1, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x0d,0xc1,0x01]
|
||||
; AVX1-NEXT: # xmm0 = xmm1[0],xmm0[1]
|
||||
; AVX1-NEXT: ret{{[l|q]}} # encoding: [0xc3]
|
||||
;
|
||||
; AVX512-LABEL: test_mm_cmpnge_sd:
|
||||
; AVX512: # %bb.0:
|
||||
; AVX512-NEXT: vcmpnlesd %xmm0, %xmm1, %xmm1 # encoding: [0xc5,0xf3,0xc2,0xc8,0x06]
|
||||
; AVX512-NEXT: vmovsd %xmm1, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xfb,0x10,0xc1]
|
||||
; AVX512-NEXT: # xmm0 = xmm1[0],xmm0[1]
|
||||
; AVX512-NEXT: ret{{[l|q]}} # encoding: [0xc3]
|
||||
; AVX-LABEL: test_mm_cmpnge_sd:
|
||||
; AVX: # %bb.0:
|
||||
; AVX-NEXT: vcmpnlesd %xmm0, %xmm1, %xmm1 # encoding: [0xc5,0xf3,0xc2,0xc8,0x06]
|
||||
; AVX-NEXT: vblendpd $1, %xmm1, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x0d,0xc1,0x01]
|
||||
; AVX-NEXT: # xmm0 = xmm1[0],xmm0[1]
|
||||
; AVX-NEXT: ret{{[l|q]}} # encoding: [0xc3]
|
||||
%cmp = call <2 x double> @llvm.x86.sse2.cmp.sd(<2 x double> %a1, <2 x double> %a0, i8 6)
|
||||
%ext0 = extractelement <2 x double> %cmp, i32 0
|
||||
%ins0 = insertelement <2 x double> undef, double %ext0, i32 0
|
||||
|
@ -1050,19 +1029,12 @@ define <2 x double> @test_mm_cmpngt_sd(<2 x double> %a0, <2 x double> %a1) nounw
|
|||
; SSE-NEXT: # xmm0 = xmm1[0],xmm0[1]
|
||||
; SSE-NEXT: ret{{[l|q]}} # encoding: [0xc3]
|
||||
;
|
||||
; AVX1-LABEL: test_mm_cmpngt_sd:
|
||||
; AVX1: # %bb.0:
|
||||
; AVX1-NEXT: vcmpnltsd %xmm0, %xmm1, %xmm1 # encoding: [0xc5,0xf3,0xc2,0xc8,0x05]
|
||||
; AVX1-NEXT: vblendpd $1, %xmm1, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x0d,0xc1,0x01]
|
||||
; AVX1-NEXT: # xmm0 = xmm1[0],xmm0[1]
|
||||
; AVX1-NEXT: ret{{[l|q]}} # encoding: [0xc3]
|
||||
;
|
||||
; AVX512-LABEL: test_mm_cmpngt_sd:
|
||||
; AVX512: # %bb.0:
|
||||
; AVX512-NEXT: vcmpnltsd %xmm0, %xmm1, %xmm1 # encoding: [0xc5,0xf3,0xc2,0xc8,0x05]
|
||||
; AVX512-NEXT: vmovsd %xmm1, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xfb,0x10,0xc1]
|
||||
; AVX512-NEXT: # xmm0 = xmm1[0],xmm0[1]
|
||||
; AVX512-NEXT: ret{{[l|q]}} # encoding: [0xc3]
|
||||
; AVX-LABEL: test_mm_cmpngt_sd:
|
||||
; AVX: # %bb.0:
|
||||
; AVX-NEXT: vcmpnltsd %xmm0, %xmm1, %xmm1 # encoding: [0xc5,0xf3,0xc2,0xc8,0x05]
|
||||
; AVX-NEXT: vblendpd $1, %xmm1, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x0d,0xc1,0x01]
|
||||
; AVX-NEXT: # xmm0 = xmm1[0],xmm0[1]
|
||||
; AVX-NEXT: ret{{[l|q]}} # encoding: [0xc3]
|
||||
%cmp = call <2 x double> @llvm.x86.sse2.cmp.sd(<2 x double> %a1, <2 x double> %a0, i8 5)
|
||||
%ext0 = extractelement <2 x double> %cmp, i32 0
|
||||
%ins0 = insertelement <2 x double> undef, double %ext0, i32 0
|
||||
|
@ -2597,17 +2569,11 @@ define <2 x double> @test_mm_move_sd(<2 x double> %a0, <2 x double> %a1) nounwin
|
|||
; SSE-NEXT: # xmm0 = xmm1[0],xmm0[1]
|
||||
; SSE-NEXT: ret{{[l|q]}} # encoding: [0xc3]
|
||||
;
|
||||
; AVX1-LABEL: test_mm_move_sd:
|
||||
; AVX1: # %bb.0:
|
||||
; AVX1-NEXT: vblendps $3, %xmm1, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x0c,0xc1,0x03]
|
||||
; AVX1-NEXT: # xmm0 = xmm1[0,1],xmm0[2,3]
|
||||
; AVX1-NEXT: ret{{[l|q]}} # encoding: [0xc3]
|
||||
;
|
||||
; AVX512-LABEL: test_mm_move_sd:
|
||||
; AVX512: # %bb.0:
|
||||
; AVX512-NEXT: vmovsd %xmm1, %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xfb,0x10,0xc1]
|
||||
; AVX512-NEXT: # xmm0 = xmm1[0],xmm0[1]
|
||||
; AVX512-NEXT: ret{{[l|q]}} # encoding: [0xc3]
|
||||
; AVX-LABEL: test_mm_move_sd:
|
||||
; AVX: # %bb.0:
|
||||
; AVX-NEXT: vblendps $3, %xmm1, %xmm0, %xmm0 # encoding: [0xc4,0xe3,0x79,0x0c,0xc1,0x03]
|
||||
; AVX-NEXT: # xmm0 = xmm1[0,1],xmm0[2,3]
|
||||
; AVX-NEXT: ret{{[l|q]}} # encoding: [0xc3]
|
||||
%ext0 = extractelement <2 x double> %a1, i32 0
|
||||
%res0 = insertelement <2 x double> undef, double %ext0, i32 0
|
||||
%ext1 = extractelement <2 x double> %a0, i32 1
|
||||
|
|
|
@ -776,8 +776,8 @@ define <2 x double> @test_x86_sse2_cvtss2sd_load(<2 x double> %a0, <4 x float>*
|
|||
; X86-AVX512-NEXT: vmovss (%eax), %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xfa,0x10,0x08]
|
||||
; X86-AVX512-NEXT: ## xmm1 = mem[0],zero,zero,zero
|
||||
; X86-AVX512-NEXT: vcvtss2sd %xmm1, %xmm1, %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf2,0x5a,0xc9]
|
||||
; X86-AVX512-NEXT: vmovsd %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfb,0x10,0xc1]
|
||||
; X86-AVX512-NEXT: ## xmm0 = xmm1[0],xmm0[1]
|
||||
; X86-AVX512-NEXT: vblendps $3, %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x0c,0xc1,0x03]
|
||||
; X86-AVX512-NEXT: ## xmm0 = xmm1[0,1],xmm0[2,3]
|
||||
; X86-AVX512-NEXT: retl ## encoding: [0xc3]
|
||||
;
|
||||
; X64-SSE-LABEL: test_x86_sse2_cvtss2sd_load:
|
||||
|
@ -803,8 +803,8 @@ define <2 x double> @test_x86_sse2_cvtss2sd_load(<2 x double> %a0, <4 x float>*
|
|||
; X64-AVX512-NEXT: vmovss (%rdi), %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xfa,0x10,0x0f]
|
||||
; X64-AVX512-NEXT: ## xmm1 = mem[0],zero,zero,zero
|
||||
; X64-AVX512-NEXT: vcvtss2sd %xmm1, %xmm1, %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xf2,0x5a,0xc9]
|
||||
; X64-AVX512-NEXT: vmovsd %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfb,0x10,0xc1]
|
||||
; X64-AVX512-NEXT: ## xmm0 = xmm1[0],xmm0[1]
|
||||
; X64-AVX512-NEXT: vblendps $3, %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x0c,0xc1,0x03]
|
||||
; X64-AVX512-NEXT: ## xmm0 = xmm1[0,1],xmm0[2,3]
|
||||
; X64-AVX512-NEXT: retq ## encoding: [0xc3]
|
||||
%a1 = load <4 x float>, <4 x float>* %p1
|
||||
%res = call <2 x double> @llvm.x86.sse2.cvtss2sd(<2 x double> %a0, <4 x float> %a1) ; <<2 x double>> [#uses=1]
|
||||
|
|
|
@ -34,18 +34,11 @@ define void @test1(<2 x double>* %r, <2 x double>* %A, double %B) nounwind {
|
|||
; X64-SSE-NEXT: movapd %xmm1, (%rdi)
|
||||
; X64-SSE-NEXT: retq
|
||||
;
|
||||
; X64-AVX1-LABEL: test1:
|
||||
; X64-AVX1: # %bb.0:
|
||||
; X64-AVX1-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0,1],mem[2,3]
|
||||
; X64-AVX1-NEXT: vmovaps %xmm0, (%rdi)
|
||||
; X64-AVX1-NEXT: retq
|
||||
;
|
||||
; X64-AVX512-LABEL: test1:
|
||||
; X64-AVX512: # %bb.0:
|
||||
; X64-AVX512-NEXT: vmovapd (%rsi), %xmm1
|
||||
; X64-AVX512-NEXT: vmovsd {{.*#+}} xmm0 = xmm0[0],xmm1[1]
|
||||
; X64-AVX512-NEXT: vmovapd %xmm0, (%rdi)
|
||||
; X64-AVX512-NEXT: retq
|
||||
; X64-AVX-LABEL: test1:
|
||||
; X64-AVX: # %bb.0:
|
||||
; X64-AVX-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0,1],mem[2,3]
|
||||
; X64-AVX-NEXT: vmovaps %xmm0, (%rdi)
|
||||
; X64-AVX-NEXT: retq
|
||||
%tmp3 = load <2 x double>, <2 x double>* %A, align 16
|
||||
%tmp7 = insertelement <2 x double> undef, double %B, i32 0
|
||||
%tmp9 = shufflevector <2 x double> %tmp3, <2 x double> %tmp7, <2 x i32> < i32 2, i32 1 >
|
||||
|
@ -422,10 +415,10 @@ define void @test12() nounwind {
|
|||
;
|
||||
; AVX512-LABEL: test12:
|
||||
; AVX512: # %bb.0:
|
||||
; AVX512-NEXT: vmovapd 0, %xmm0
|
||||
; AVX512-NEXT: vmovaps 0, %xmm0
|
||||
; AVX512-NEXT: vbroadcastss {{.*#+}} xmm1 = [1,1,1,1]
|
||||
; AVX512-NEXT: vmovsd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
|
||||
; AVX512-NEXT: vxorpd %xmm2, %xmm2, %xmm2
|
||||
; AVX512-NEXT: vblendps {{.*#+}} xmm1 = xmm0[0,1],xmm1[2,3]
|
||||
; AVX512-NEXT: vxorps %xmm2, %xmm2, %xmm2
|
||||
; AVX512-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm0[1],xmm2[1]
|
||||
; AVX512-NEXT: vaddps %xmm0, %xmm1, %xmm0
|
||||
; AVX512-NEXT: vmovaps %xmm0, 0
|
||||
|
|
|
@ -31,15 +31,10 @@ define <2 x double> @test_mm_blend_pd(<2 x double> %a0, <2 x double> %a1) {
|
|||
; SSE-NEXT: blendps {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3]
|
||||
; SSE-NEXT: ret{{[l|q]}}
|
||||
;
|
||||
; AVX1-LABEL: test_mm_blend_pd:
|
||||
; AVX1: # %bb.0:
|
||||
; AVX1-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3]
|
||||
; AVX1-NEXT: ret{{[l|q]}}
|
||||
;
|
||||
; AVX512-LABEL: test_mm_blend_pd:
|
||||
; AVX512: # %bb.0:
|
||||
; AVX512-NEXT: vmovsd {{.*#+}} xmm0 = xmm0[0],xmm1[1]
|
||||
; AVX512-NEXT: ret{{[l|q]}}
|
||||
; AVX-LABEL: test_mm_blend_pd:
|
||||
; AVX: # %bb.0:
|
||||
; AVX-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3]
|
||||
; AVX-NEXT: ret{{[l|q]}}
|
||||
%res = shufflevector <2 x double> %a0, <2 x double> %a1, <2 x i32> <i32 0, i32 3>
|
||||
ret <2 x double> %res
|
||||
}
|
||||
|
|
|
@ -16,17 +16,11 @@ define <2 x double> @test_x86_sse41_blendpd(<2 x double> %a0, <2 x double> %a1)
|
|||
; SSE-NEXT: ## xmm0 = xmm0[0,1],xmm1[2,3]
|
||||
; SSE-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
|
||||
;
|
||||
; AVX1-LABEL: test_x86_sse41_blendpd:
|
||||
; AVX1: ## %bb.0:
|
||||
; AVX1-NEXT: vblendps $3, %xmm0, %xmm1, %xmm0 ## encoding: [0xc4,0xe3,0x71,0x0c,0xc0,0x03]
|
||||
; AVX1-NEXT: ## xmm0 = xmm0[0,1],xmm1[2,3]
|
||||
; AVX1-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
|
||||
;
|
||||
; AVX512-LABEL: test_x86_sse41_blendpd:
|
||||
; AVX512: ## %bb.0:
|
||||
; AVX512-NEXT: vmovsd %xmm0, %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf3,0x10,0xc0]
|
||||
; AVX512-NEXT: ## xmm0 = xmm0[0],xmm1[1]
|
||||
; AVX512-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
|
||||
; AVX-LABEL: test_x86_sse41_blendpd:
|
||||
; AVX: ## %bb.0:
|
||||
; AVX-NEXT: vblendps $3, %xmm0, %xmm1, %xmm0 ## encoding: [0xc4,0xe3,0x71,0x0c,0xc0,0x03]
|
||||
; AVX-NEXT: ## xmm0 = xmm0[0,1],xmm1[2,3]
|
||||
; AVX-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
|
||||
%res = call <2 x double> @llvm.x86.sse41.blendpd(<2 x double> %a0, <2 x double> %a1, i32 6) ; <<2 x double>> [#uses=1]
|
||||
ret <2 x double> %res
|
||||
}
|
||||
|
|
|
@ -361,7 +361,7 @@ define <4 x float> @blendps_not_insertps_1(<4 x float> %t1, float %t2) nounwind
|
|||
; X86-AVX512: ## %bb.0:
|
||||
; X86-AVX512-NEXT: vmovss {{[0-9]+}}(%esp), %xmm1 ## EVEX TO VEX Compression encoding: [0xc5,0xfa,0x10,0x4c,0x24,0x04]
|
||||
; X86-AVX512-NEXT: ## xmm1 = mem[0],zero,zero,zero
|
||||
; X86-AVX512-NEXT: vmovss %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfa,0x10,0xc1]
|
||||
; X86-AVX512-NEXT: vblendps $1, %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x0c,0xc1,0x01]
|
||||
; X86-AVX512-NEXT: ## xmm0 = xmm1[0],xmm0[1,2,3]
|
||||
; X86-AVX512-NEXT: retl ## encoding: [0xc3]
|
||||
;
|
||||
|
@ -371,17 +371,11 @@ define <4 x float> @blendps_not_insertps_1(<4 x float> %t1, float %t2) nounwind
|
|||
; X64-SSE-NEXT: ## xmm0 = xmm1[0],xmm0[1,2,3]
|
||||
; X64-SSE-NEXT: retq ## encoding: [0xc3]
|
||||
;
|
||||
; X64-AVX1-LABEL: blendps_not_insertps_1:
|
||||
; X64-AVX1: ## %bb.0:
|
||||
; X64-AVX1-NEXT: vblendps $1, %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x0c,0xc1,0x01]
|
||||
; X64-AVX1-NEXT: ## xmm0 = xmm1[0],xmm0[1,2,3]
|
||||
; X64-AVX1-NEXT: retq ## encoding: [0xc3]
|
||||
;
|
||||
; X64-AVX512-LABEL: blendps_not_insertps_1:
|
||||
; X64-AVX512: ## %bb.0:
|
||||
; X64-AVX512-NEXT: vmovss %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfa,0x10,0xc1]
|
||||
; X64-AVX512-NEXT: ## xmm0 = xmm1[0],xmm0[1,2,3]
|
||||
; X64-AVX512-NEXT: retq ## encoding: [0xc3]
|
||||
; X64-AVX-LABEL: blendps_not_insertps_1:
|
||||
; X64-AVX: ## %bb.0:
|
||||
; X64-AVX-NEXT: vblendps $1, %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x0c,0xc1,0x01]
|
||||
; X64-AVX-NEXT: ## xmm0 = xmm1[0],xmm0[1,2,3]
|
||||
; X64-AVX-NEXT: retq ## encoding: [0xc3]
|
||||
%tmp1 = insertelement <4 x float> %t1, float %t2, i32 0
|
||||
ret <4 x float> %tmp1
|
||||
}
|
||||
|
@ -444,17 +438,11 @@ define <4 x float> @blendps_not_insertps_2(<4 x float> %t1, <4 x float> %t2) nou
|
|||
; SSE-NEXT: ## xmm0 = xmm1[0],xmm0[1,2,3]
|
||||
; SSE-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
|
||||
;
|
||||
; AVX1-LABEL: blendps_not_insertps_2:
|
||||
; AVX1: ## %bb.0:
|
||||
; AVX1-NEXT: vblendps $1, %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x0c,0xc1,0x01]
|
||||
; AVX1-NEXT: ## xmm0 = xmm1[0],xmm0[1,2,3]
|
||||
; AVX1-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
|
||||
;
|
||||
; AVX512-LABEL: blendps_not_insertps_2:
|
||||
; AVX512: ## %bb.0:
|
||||
; AVX512-NEXT: vmovss %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfa,0x10,0xc1]
|
||||
; AVX512-NEXT: ## xmm0 = xmm1[0],xmm0[1,2,3]
|
||||
; AVX512-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
|
||||
; AVX-LABEL: blendps_not_insertps_2:
|
||||
; AVX: ## %bb.0:
|
||||
; AVX-NEXT: vblendps $1, %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x0c,0xc1,0x01]
|
||||
; AVX-NEXT: ## xmm0 = xmm1[0],xmm0[1,2,3]
|
||||
; AVX-NEXT: ret{{[l|q]}} ## encoding: [0xc3]
|
||||
%tmp2 = extractelement <4 x float> %t2, i32 0
|
||||
%tmp1 = insertelement <4 x float> %t1, float %tmp2, i32 0
|
||||
ret <4 x float> %tmp1
|
||||
|
@ -1231,8 +1219,8 @@ define <4 x i32> @i32_shuf_X00A(<4 x i32> %x, <4 x i32> %a) {
|
|||
;
|
||||
; AVX512-LABEL: i32_shuf_X00A:
|
||||
; AVX512: ## %bb.0:
|
||||
; AVX512-NEXT: vxorps %xmm2, %xmm2, %xmm2 ## EVEX TO VEX Compression encoding: [0xc5,0xe8,0x57,0xd2]
|
||||
; AVX512-NEXT: vmovss %xmm0, %xmm2, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xea,0x10,0xc0]
|
||||
; AVX512-NEXT: vxorps %xmm2, %xmm2, %xmm2 ## encoding: [0xc5,0xe8,0x57,0xd2]
|
||||
; AVX512-NEXT: vblendps $1, %xmm0, %xmm2, %xmm0 ## encoding: [0xc4,0xe3,0x69,0x0c,0xc0,0x01]
|
||||
; AVX512-NEXT: ## xmm0 = xmm0[0],xmm2[1,2,3]
|
||||
; AVX512-NEXT: vbroadcastss %xmm1, %xmm1 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x18,0xc9]
|
||||
; AVX512-NEXT: vblendps $8, %xmm1, %xmm0, %xmm0 ## encoding: [0xc4,0xe3,0x79,0x0c,0xc1,0x08]
|
||||
|
|
|
@ -63,7 +63,8 @@ define i16 @test1(float %f) nounwind {
|
|||
; X32_AVX512-NEXT: vaddss LCPI0_0, %xmm0, %xmm0
|
||||
; X32_AVX512-NEXT: vmulss LCPI0_1, %xmm0, %xmm0
|
||||
; X32_AVX512-NEXT: vxorps %xmm1, %xmm1, %xmm1
|
||||
; X32_AVX512-NEXT: vmovss {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3]
|
||||
; X32_AVX512-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3]
|
||||
; X32_AVX512-NEXT: vxorps %xmm1, %xmm1, %xmm1
|
||||
; X32_AVX512-NEXT: vminss LCPI0_2, %xmm0, %xmm0
|
||||
; X32_AVX512-NEXT: vmaxss %xmm1, %xmm0, %xmm0
|
||||
; X32_AVX512-NEXT: vcvttss2si %xmm0, %eax
|
||||
|
@ -75,7 +76,8 @@ define i16 @test1(float %f) nounwind {
|
|||
; X64_AVX512-NEXT: vaddss {{.*}}(%rip), %xmm0, %xmm0
|
||||
; X64_AVX512-NEXT: vmulss {{.*}}(%rip), %xmm0, %xmm0
|
||||
; X64_AVX512-NEXT: vxorps %xmm1, %xmm1, %xmm1
|
||||
; X64_AVX512-NEXT: vmovss {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3]
|
||||
; X64_AVX512-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3]
|
||||
; X64_AVX512-NEXT: vxorps %xmm1, %xmm1, %xmm1
|
||||
; X64_AVX512-NEXT: vminss {{.*}}(%rip), %xmm0, %xmm0
|
||||
; X64_AVX512-NEXT: vmaxss %xmm1, %xmm0, %xmm0
|
||||
; X64_AVX512-NEXT: vcvttss2si %xmm0, %eax
|
||||
|
|
|
@ -243,20 +243,10 @@ define <2 x double> @shuffle_v2f64_03(<2 x double> %a, <2 x double> %b) {
|
|||
; SSE41-NEXT: blendps {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3]
|
||||
; SSE41-NEXT: retq
|
||||
;
|
||||
; AVX1-LABEL: shuffle_v2f64_03:
|
||||
; AVX1: # %bb.0:
|
||||
; AVX1-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3]
|
||||
; AVX1-NEXT: retq
|
||||
;
|
||||
; AVX2-LABEL: shuffle_v2f64_03:
|
||||
; AVX2: # %bb.0:
|
||||
; AVX2-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3]
|
||||
; AVX2-NEXT: retq
|
||||
;
|
||||
; AVX512VL-LABEL: shuffle_v2f64_03:
|
||||
; AVX512VL: # %bb.0:
|
||||
; AVX512VL-NEXT: vmovsd {{.*#+}} xmm0 = xmm0[0],xmm1[1]
|
||||
; AVX512VL-NEXT: retq
|
||||
; AVX-LABEL: shuffle_v2f64_03:
|
||||
; AVX: # %bb.0:
|
||||
; AVX-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3]
|
||||
; AVX-NEXT: retq
|
||||
%shuffle = shufflevector <2 x double> %a, <2 x double> %b, <2 x i32> <i32 0, i32 3>
|
||||
ret <2 x double> %shuffle
|
||||
}
|
||||
|
@ -281,20 +271,10 @@ define <2 x double> @shuffle_v2f64_21(<2 x double> %a, <2 x double> %b) {
|
|||
; SSE41-NEXT: blendps {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3]
|
||||
; SSE41-NEXT: retq
|
||||
;
|
||||
; AVX1-LABEL: shuffle_v2f64_21:
|
||||
; AVX1: # %bb.0:
|
||||
; AVX1-NEXT: vblendps {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3]
|
||||
; AVX1-NEXT: retq
|
||||
;
|
||||
; AVX2-LABEL: shuffle_v2f64_21:
|
||||
; AVX2: # %bb.0:
|
||||
; AVX2-NEXT: vblendps {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3]
|
||||
; AVX2-NEXT: retq
|
||||
;
|
||||
; AVX512VL-LABEL: shuffle_v2f64_21:
|
||||
; AVX512VL: # %bb.0:
|
||||
; AVX512VL-NEXT: vmovsd {{.*#+}} xmm0 = xmm1[0],xmm0[1]
|
||||
; AVX512VL-NEXT: retq
|
||||
; AVX-LABEL: shuffle_v2f64_21:
|
||||
; AVX: # %bb.0:
|
||||
; AVX-NEXT: vblendps {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3]
|
||||
; AVX-NEXT: retq
|
||||
%shuffle = shufflevector <2 x double> %a, <2 x double> %b, <2 x i32> <i32 2, i32 1>
|
||||
ret <2 x double> %shuffle
|
||||
}
|
||||
|
@ -1198,20 +1178,10 @@ define <2 x double> @insert_reg_lo_v2f64(double %a, <2 x double> %b) {
|
|||
; SSE41-NEXT: blendps {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3]
|
||||
; SSE41-NEXT: retq
|
||||
;
|
||||
; AVX1-LABEL: insert_reg_lo_v2f64:
|
||||
; AVX1: # %bb.0:
|
||||
; AVX1-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3]
|
||||
; AVX1-NEXT: retq
|
||||
;
|
||||
; AVX2-LABEL: insert_reg_lo_v2f64:
|
||||
; AVX2: # %bb.0:
|
||||
; AVX2-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3]
|
||||
; AVX2-NEXT: retq
|
||||
;
|
||||
; AVX512VL-LABEL: insert_reg_lo_v2f64:
|
||||
; AVX512VL: # %bb.0:
|
||||
; AVX512VL-NEXT: vmovsd {{.*#+}} xmm0 = xmm0[0],xmm1[1]
|
||||
; AVX512VL-NEXT: retq
|
||||
; AVX-LABEL: insert_reg_lo_v2f64:
|
||||
; AVX: # %bb.0:
|
||||
; AVX-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3]
|
||||
; AVX-NEXT: retq
|
||||
%v = insertelement <2 x double> undef, double %a, i32 0
|
||||
%shuffle = shufflevector <2 x double> %v, <2 x double> %b, <2 x i32> <i32 0, i32 3>
|
||||
ret <2 x double> %shuffle
|
||||
|
|
|
@ -631,17 +631,11 @@ define <4 x float> @shuffle_v4f32_4zzz(<4 x float> %a) {
|
|||
; SSE41-NEXT: blendps {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3]
|
||||
; SSE41-NEXT: retq
|
||||
;
|
||||
; AVX1OR2-LABEL: shuffle_v4f32_4zzz:
|
||||
; AVX1OR2: # %bb.0:
|
||||
; AVX1OR2-NEXT: vxorps %xmm1, %xmm1, %xmm1
|
||||
; AVX1OR2-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3]
|
||||
; AVX1OR2-NEXT: retq
|
||||
;
|
||||
; AVX512VL-LABEL: shuffle_v4f32_4zzz:
|
||||
; AVX512VL: # %bb.0:
|
||||
; AVX512VL-NEXT: vxorps %xmm1, %xmm1, %xmm1
|
||||
; AVX512VL-NEXT: vmovss {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3]
|
||||
; AVX512VL-NEXT: retq
|
||||
; AVX-LABEL: shuffle_v4f32_4zzz:
|
||||
; AVX: # %bb.0:
|
||||
; AVX-NEXT: vxorps %xmm1, %xmm1, %xmm1
|
||||
; AVX-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3]
|
||||
; AVX-NEXT: retq
|
||||
%shuffle = shufflevector <4 x float> zeroinitializer, <4 x float> %a, <4 x i32> <i32 4, i32 1, i32 2, i32 3>
|
||||
ret <4 x float> %shuffle
|
||||
}
|
||||
|
@ -1189,17 +1183,11 @@ define <4 x i32> @shuffle_v4i32_4zzz(<4 x i32> %a) {
|
|||
; SSE41-NEXT: blendps {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3]
|
||||
; SSE41-NEXT: retq
|
||||
;
|
||||
; AVX1OR2-LABEL: shuffle_v4i32_4zzz:
|
||||
; AVX1OR2: # %bb.0:
|
||||
; AVX1OR2-NEXT: vxorps %xmm1, %xmm1, %xmm1
|
||||
; AVX1OR2-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3]
|
||||
; AVX1OR2-NEXT: retq
|
||||
;
|
||||
; AVX512VL-LABEL: shuffle_v4i32_4zzz:
|
||||
; AVX512VL: # %bb.0:
|
||||
; AVX512VL-NEXT: vxorps %xmm1, %xmm1, %xmm1
|
||||
; AVX512VL-NEXT: vmovss {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3]
|
||||
; AVX512VL-NEXT: retq
|
||||
; AVX-LABEL: shuffle_v4i32_4zzz:
|
||||
; AVX: # %bb.0:
|
||||
; AVX-NEXT: vxorps %xmm1, %xmm1, %xmm1
|
||||
; AVX-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3]
|
||||
; AVX-NEXT: retq
|
||||
%shuffle = shufflevector <4 x i32> zeroinitializer, <4 x i32> %a, <4 x i32> <i32 4, i32 1, i32 2, i32 3>
|
||||
ret <4 x i32> %shuffle
|
||||
}
|
||||
|
@ -2100,17 +2088,11 @@ define <4 x float> @insert_reg_and_zero_v4f32(float %a) {
|
|||
; SSE41-NEXT: blendps {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3]
|
||||
; SSE41-NEXT: retq
|
||||
;
|
||||
; AVX1OR2-LABEL: insert_reg_and_zero_v4f32:
|
||||
; AVX1OR2: # %bb.0:
|
||||
; AVX1OR2-NEXT: vxorps %xmm1, %xmm1, %xmm1
|
||||
; AVX1OR2-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3]
|
||||
; AVX1OR2-NEXT: retq
|
||||
;
|
||||
; AVX512VL-LABEL: insert_reg_and_zero_v4f32:
|
||||
; AVX512VL: # %bb.0:
|
||||
; AVX512VL-NEXT: vxorps %xmm1, %xmm1, %xmm1
|
||||
; AVX512VL-NEXT: vmovss {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3]
|
||||
; AVX512VL-NEXT: retq
|
||||
; AVX-LABEL: insert_reg_and_zero_v4f32:
|
||||
; AVX: # %bb.0:
|
||||
; AVX-NEXT: vxorps %xmm1, %xmm1, %xmm1
|
||||
; AVX-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3]
|
||||
; AVX-NEXT: retq
|
||||
%v = insertelement <4 x float> undef, float %a, i32 0
|
||||
%shuffle = shufflevector <4 x float> %v, <4 x float> zeroinitializer, <4 x i32> <i32 0, i32 5, i32 6, i32 7>
|
||||
ret <4 x float> %shuffle
|
||||
|
@ -2267,15 +2249,10 @@ define <4 x float> @insert_reg_lo_v4f32(double %a, <4 x float> %b) {
|
|||
; SSE41-NEXT: blendps {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3]
|
||||
; SSE41-NEXT: retq
|
||||
;
|
||||
; AVX1OR2-LABEL: insert_reg_lo_v4f32:
|
||||
; AVX1OR2: # %bb.0:
|
||||
; AVX1OR2-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3]
|
||||
; AVX1OR2-NEXT: retq
|
||||
;
|
||||
; AVX512VL-LABEL: insert_reg_lo_v4f32:
|
||||
; AVX512VL: # %bb.0:
|
||||
; AVX512VL-NEXT: vmovsd {{.*#+}} xmm0 = xmm0[0],xmm1[1]
|
||||
; AVX512VL-NEXT: retq
|
||||
; AVX-LABEL: insert_reg_lo_v4f32:
|
||||
; AVX: # %bb.0:
|
||||
; AVX-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3]
|
||||
; AVX-NEXT: retq
|
||||
%a.cast = bitcast double %a to <2 x float>
|
||||
%v = shufflevector <2 x float> %a.cast, <2 x float> undef, <4 x i32> <i32 0, i32 1, i32 undef, i32 undef>
|
||||
%shuffle = shufflevector <4 x float> %v, <4 x float> %b, <4 x i32> <i32 0, i32 1, i32 6, i32 7>
|
||||
|
|
|
@ -1372,19 +1372,12 @@ define <4 x i64> @insert_mem_and_zero_v4i64(i64* %ptr) {
|
|||
}
|
||||
|
||||
define <4 x double> @insert_reg_and_zero_v4f64(double %a) {
|
||||
; AVX1OR2-LABEL: insert_reg_and_zero_v4f64:
|
||||
; AVX1OR2: # %bb.0:
|
||||
; AVX1OR2-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0
|
||||
; AVX1OR2-NEXT: vxorps %xmm1, %xmm1, %xmm1
|
||||
; AVX1OR2-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3,4,5,6,7]
|
||||
; AVX1OR2-NEXT: retq
|
||||
;
|
||||
; AVX512VL-LABEL: insert_reg_and_zero_v4f64:
|
||||
; AVX512VL: # %bb.0:
|
||||
; AVX512VL-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0
|
||||
; AVX512VL-NEXT: vpxor %xmm1, %xmm1, %xmm1
|
||||
; AVX512VL-NEXT: vmovsd {{.*#+}} xmm0 = xmm0[0],xmm1[1]
|
||||
; AVX512VL-NEXT: retq
|
||||
; ALL-LABEL: insert_reg_and_zero_v4f64:
|
||||
; ALL: # %bb.0:
|
||||
; ALL-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0
|
||||
; ALL-NEXT: vxorps %xmm1, %xmm1, %xmm1
|
||||
; ALL-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3,4,5,6,7]
|
||||
; ALL-NEXT: retq
|
||||
%v = insertelement <4 x double> undef, double %a, i32 0
|
||||
%shuffle = shufflevector <4 x double> %v, <4 x double> zeroinitializer, <4 x i32> <i32 0, i32 5, i32 6, i32 7>
|
||||
ret <4 x double> %shuffle
|
||||
|
|
|
@ -520,29 +520,17 @@ define <4 x i64> @combine_pshufb_as_zext128(<32 x i8> %a0) {
|
|||
}
|
||||
|
||||
define <4 x double> @combine_pshufb_as_vzmovl_64(<4 x double> %a0) {
|
||||
; X32-AVX2-LABEL: combine_pshufb_as_vzmovl_64:
|
||||
; X32-AVX2: # %bb.0:
|
||||
; X32-AVX2-NEXT: vxorps %xmm1, %xmm1, %xmm1
|
||||
; X32-AVX2-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3,4,5,6,7]
|
||||
; X32-AVX2-NEXT: retl
|
||||
; X32-LABEL: combine_pshufb_as_vzmovl_64:
|
||||
; X32: # %bb.0:
|
||||
; X32-NEXT: vxorps %xmm1, %xmm1, %xmm1
|
||||
; X32-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3,4,5,6,7]
|
||||
; X32-NEXT: retl
|
||||
;
|
||||
; X32-AVX512-LABEL: combine_pshufb_as_vzmovl_64:
|
||||
; X32-AVX512: # %bb.0:
|
||||
; X32-AVX512-NEXT: vxorpd %xmm1, %xmm1, %xmm1
|
||||
; X32-AVX512-NEXT: vmovsd {{.*#+}} xmm0 = xmm0[0],xmm1[1]
|
||||
; X32-AVX512-NEXT: retl
|
||||
;
|
||||
; X64-AVX2-LABEL: combine_pshufb_as_vzmovl_64:
|
||||
; X64-AVX2: # %bb.0:
|
||||
; X64-AVX2-NEXT: vxorps %xmm1, %xmm1, %xmm1
|
||||
; X64-AVX2-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3,4,5,6,7]
|
||||
; X64-AVX2-NEXT: retq
|
||||
;
|
||||
; X64-AVX512-LABEL: combine_pshufb_as_vzmovl_64:
|
||||
; X64-AVX512: # %bb.0:
|
||||
; X64-AVX512-NEXT: vxorpd %xmm1, %xmm1, %xmm1
|
||||
; X64-AVX512-NEXT: vmovsd {{.*#+}} xmm0 = xmm0[0],xmm1[1]
|
||||
; X64-AVX512-NEXT: retq
|
||||
; X64-LABEL: combine_pshufb_as_vzmovl_64:
|
||||
; X64: # %bb.0:
|
||||
; X64-NEXT: vxorps %xmm1, %xmm1, %xmm1
|
||||
; X64-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3,4,5,6,7]
|
||||
; X64-NEXT: retq
|
||||
%1 = bitcast <4 x double> %a0 to <32 x i8>
|
||||
%2 = call <32 x i8> @llvm.x86.avx2.pshuf.b(<32 x i8> %1, <32 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>)
|
||||
%3 = bitcast <32 x i8> %2 to <4 x double>
|
||||
|
@ -550,29 +538,17 @@ define <4 x double> @combine_pshufb_as_vzmovl_64(<4 x double> %a0) {
|
|||
}
|
||||
|
||||
define <8 x float> @combine_pshufb_as_vzmovl_32(<8 x float> %a0) {
|
||||
; X32-AVX2-LABEL: combine_pshufb_as_vzmovl_32:
|
||||
; X32-AVX2: # %bb.0:
|
||||
; X32-AVX2-NEXT: vxorps %xmm1, %xmm1, %xmm1
|
||||
; X32-AVX2-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3,4,5,6,7]
|
||||
; X32-AVX2-NEXT: retl
|
||||
; X32-LABEL: combine_pshufb_as_vzmovl_32:
|
||||
; X32: # %bb.0:
|
||||
; X32-NEXT: vxorps %xmm1, %xmm1, %xmm1
|
||||
; X32-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3,4,5,6,7]
|
||||
; X32-NEXT: retl
|
||||
;
|
||||
; X32-AVX512-LABEL: combine_pshufb_as_vzmovl_32:
|
||||
; X32-AVX512: # %bb.0:
|
||||
; X32-AVX512-NEXT: vxorps %xmm1, %xmm1, %xmm1
|
||||
; X32-AVX512-NEXT: vmovss {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3]
|
||||
; X32-AVX512-NEXT: retl
|
||||
;
|
||||
; X64-AVX2-LABEL: combine_pshufb_as_vzmovl_32:
|
||||
; X64-AVX2: # %bb.0:
|
||||
; X64-AVX2-NEXT: vxorps %xmm1, %xmm1, %xmm1
|
||||
; X64-AVX2-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3,4,5,6,7]
|
||||
; X64-AVX2-NEXT: retq
|
||||
;
|
||||
; X64-AVX512-LABEL: combine_pshufb_as_vzmovl_32:
|
||||
; X64-AVX512: # %bb.0:
|
||||
; X64-AVX512-NEXT: vxorps %xmm1, %xmm1, %xmm1
|
||||
; X64-AVX512-NEXT: vmovss {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3]
|
||||
; X64-AVX512-NEXT: retq
|
||||
; X64-LABEL: combine_pshufb_as_vzmovl_32:
|
||||
; X64: # %bb.0:
|
||||
; X64-NEXT: vxorps %xmm1, %xmm1, %xmm1
|
||||
; X64-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3,4,5,6,7]
|
||||
; X64-NEXT: retq
|
||||
%1 = bitcast <8 x float> %a0 to <32 x i8>
|
||||
%2 = call <32 x i8> @llvm.x86.avx2.pshuf.b(<32 x i8> %1, <32 x i8> <i8 0, i8 1, i8 2, i8 3, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>)
|
||||
%3 = bitcast <32 x i8> %2 to <8 x float>
|
||||
|
|
|
@ -52,20 +52,10 @@ define <2 x double> @combine_pshufb_as_movsd(<2 x double> %a0, <2 x double> %a1)
|
|||
; SSE41-NEXT: blendps {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3]
|
||||
; SSE41-NEXT: retq
|
||||
;
|
||||
; AVX1-LABEL: combine_pshufb_as_movsd:
|
||||
; AVX1: # %bb.0:
|
||||
; AVX1-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3]
|
||||
; AVX1-NEXT: retq
|
||||
;
|
||||
; AVX2-LABEL: combine_pshufb_as_movsd:
|
||||
; AVX2: # %bb.0:
|
||||
; AVX2-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3]
|
||||
; AVX2-NEXT: retq
|
||||
;
|
||||
; AVX512F-LABEL: combine_pshufb_as_movsd:
|
||||
; AVX512F: # %bb.0:
|
||||
; AVX512F-NEXT: vmovsd {{.*#+}} xmm0 = xmm0[0],xmm1[1]
|
||||
; AVX512F-NEXT: retq
|
||||
; AVX-LABEL: combine_pshufb_as_movsd:
|
||||
; AVX: # %bb.0:
|
||||
; AVX-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3]
|
||||
; AVX-NEXT: retq
|
||||
%1 = shufflevector <2 x double> %a0, <2 x double> %a1, <2 x i32> <i32 3, i32 0>
|
||||
%2 = bitcast <2 x double> %1 to <16 x i8>
|
||||
%3 = tail call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %2, <16 x i8> <i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7>)
|
||||
|
@ -84,20 +74,10 @@ define <4 x float> @combine_pshufb_as_movss(<4 x float> %a0, <4 x float> %a1) {
|
|||
; SSE41-NEXT: blendps {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3]
|
||||
; SSE41-NEXT: retq
|
||||
;
|
||||
; AVX1-LABEL: combine_pshufb_as_movss:
|
||||
; AVX1: # %bb.0:
|
||||
; AVX1-NEXT: vblendps {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3]
|
||||
; AVX1-NEXT: retq
|
||||
;
|
||||
; AVX2-LABEL: combine_pshufb_as_movss:
|
||||
; AVX2: # %bb.0:
|
||||
; AVX2-NEXT: vblendps {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3]
|
||||
; AVX2-NEXT: retq
|
||||
;
|
||||
; AVX512F-LABEL: combine_pshufb_as_movss:
|
||||
; AVX512F: # %bb.0:
|
||||
; AVX512F-NEXT: vmovss {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3]
|
||||
; AVX512F-NEXT: retq
|
||||
; AVX-LABEL: combine_pshufb_as_movss:
|
||||
; AVX: # %bb.0:
|
||||
; AVX-NEXT: vblendps {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3]
|
||||
; AVX-NEXT: retq
|
||||
%1 = shufflevector <4 x float> %a0, <4 x float> %a1, <4 x i32> <i32 4, i32 3, i32 2, i32 1>
|
||||
%2 = bitcast <4 x float> %1 to <16 x i8>
|
||||
%3 = tail call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %2, <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 12, i8 13, i8 14, i8 15, i8 8, i8 9, i8 10, i8 11, i8 4, i8 5, i8 6, i8 7>)
|
||||
|
@ -155,23 +135,11 @@ define <4 x float> @combine_pshufb_as_vzmovl_32(<4 x float> %a0) {
|
|||
; SSE41-NEXT: blendps {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3]
|
||||
; SSE41-NEXT: retq
|
||||
;
|
||||
; AVX1-LABEL: combine_pshufb_as_vzmovl_32:
|
||||
; AVX1: # %bb.0:
|
||||
; AVX1-NEXT: vxorps %xmm1, %xmm1, %xmm1
|
||||
; AVX1-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3]
|
||||
; AVX1-NEXT: retq
|
||||
;
|
||||
; AVX2-LABEL: combine_pshufb_as_vzmovl_32:
|
||||
; AVX2: # %bb.0:
|
||||
; AVX2-NEXT: vxorps %xmm1, %xmm1, %xmm1
|
||||
; AVX2-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3]
|
||||
; AVX2-NEXT: retq
|
||||
;
|
||||
; AVX512F-LABEL: combine_pshufb_as_vzmovl_32:
|
||||
; AVX512F: # %bb.0:
|
||||
; AVX512F-NEXT: vxorps %xmm1, %xmm1, %xmm1
|
||||
; AVX512F-NEXT: vmovss {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3]
|
||||
; AVX512F-NEXT: retq
|
||||
; AVX-LABEL: combine_pshufb_as_vzmovl_32:
|
||||
; AVX: # %bb.0:
|
||||
; AVX-NEXT: vxorps %xmm1, %xmm1, %xmm1
|
||||
; AVX-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3]
|
||||
; AVX-NEXT: retq
|
||||
%1 = bitcast <4 x float> %a0 to <16 x i8>
|
||||
%2 = call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %1, <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>)
|
||||
%3 = bitcast <16 x i8> %2 to <4 x float>
|
||||
|
|
Loading…
Reference in New Issue