forked from OSchip/llvm-project
[X86][SSE] Fix domains for VZEXT_LOAD type instructions
Add the missing domain equivalences for movss, movsd, movd and movq zero extending loading instructions. Differential Revision: https://reviews.llvm.org/D27684 llvm-svn: 289825
This commit is contained in:
parent
879a657680
commit
d7518896ff
|
@ -8457,6 +8457,8 @@ static const uint16_t ReplaceableInstrs[][3] = {
|
|||
{ X86::MOVUPSmr, X86::MOVUPDmr, X86::MOVDQUmr },
|
||||
{ X86::MOVUPSrm, X86::MOVUPDrm, X86::MOVDQUrm },
|
||||
{ X86::MOVLPSmr, X86::MOVLPDmr, X86::MOVPQI2QImr },
|
||||
{ X86::MOVSDrm, X86::MOVSDrm, X86::MOVQI2PQIrm },
|
||||
{ X86::MOVSSrm, X86::MOVSSrm, X86::MOVDI2PDIrm },
|
||||
{ X86::MOVNTPSmr, X86::MOVNTPDmr, X86::MOVNTDQmr },
|
||||
{ X86::ANDNPSrm, X86::ANDNPDrm, X86::PANDNrm },
|
||||
{ X86::ANDNPSrr, X86::ANDNPDrr, X86::PANDNrr },
|
||||
|
@ -8473,6 +8475,8 @@ static const uint16_t ReplaceableInstrs[][3] = {
|
|||
{ X86::VMOVUPSmr, X86::VMOVUPDmr, X86::VMOVDQUmr },
|
||||
{ X86::VMOVUPSrm, X86::VMOVUPDrm, X86::VMOVDQUrm },
|
||||
{ X86::VMOVLPSmr, X86::VMOVLPDmr, X86::VMOVPQI2QImr },
|
||||
{ X86::VMOVSDrm, X86::VMOVSDrm, X86::VMOVQI2PQIrm },
|
||||
{ X86::VMOVSSrm, X86::VMOVSSrm, X86::VMOVDI2PDIrm },
|
||||
{ X86::VMOVNTPSmr, X86::VMOVNTPDmr, X86::VMOVNTDQmr },
|
||||
{ X86::VANDNPSrm, X86::VANDNPDrm, X86::VPANDNrm },
|
||||
{ X86::VANDNPSrr, X86::VANDNPDrr, X86::VPANDNrr },
|
||||
|
@ -8549,6 +8553,8 @@ static const uint16_t ReplaceableInstrsAVX512[][4] = {
|
|||
{ X86::VMOVAPSZrr, X86::VMOVAPDZrr, X86::VMOVDQA64Zrr, X86::VMOVDQA32Zrr },
|
||||
{ X86::VMOVUPSZmr, X86::VMOVUPDZmr, X86::VMOVDQU64Zmr, X86::VMOVDQU32Zmr },
|
||||
{ X86::VMOVUPSZrm, X86::VMOVUPDZrm, X86::VMOVDQU64Zrm, X86::VMOVDQU32Zrm },
|
||||
{ X86::VMOVSDZrm, X86::VMOVSDZrm, X86::VMOVQI2PQIZrm, X86::VMOVQI2PQIZrm, },
|
||||
{ X86::VMOVSSZrm, X86::VMOVSSZrm, X86::VMOVDI2PDIZrm, X86::VMOVDI2PDIZrm, },
|
||||
};
|
||||
|
||||
static const uint16_t ReplaceableInstrsAVX512DQ[][4] = {
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
; RUN: llc < %s -march=x86 -mattr=+sse2 | FileCheck %s
|
||||
|
||||
; CHECK: xorpd {{.*}}{{LCPI0_0|__xmm@}}
|
||||
; CHECK: xorps {{.*}}{{LCPI0_0|__xmm@}}
|
||||
define void @casin({ double, double }* sret %agg.result, double %z.0, double %z.1) nounwind {
|
||||
entry:
|
||||
%memtmp = alloca { double, double }, align 8 ; <{ double, double }*> [#uses=3]
|
||||
|
|
|
@ -18,8 +18,8 @@ define void @bad_cast() {
|
|||
define void @bad_insert(i32 %t) {
|
||||
; CHECK-LABEL: bad_insert:
|
||||
; CHECK: # BB#0:
|
||||
; CHECK-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
|
||||
; CHECK-NEXT: vmovdqa %ymm0, (%eax)
|
||||
; CHECK-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
|
||||
; CHECK-NEXT: vmovaps %ymm0, (%eax)
|
||||
; CHECK-NEXT: vzeroupper
|
||||
; CHECK-NEXT: retl
|
||||
%v2 = insertelement <8 x i32> zeroinitializer, i32 %t, i32 0
|
||||
|
|
|
@ -403,7 +403,7 @@ define void @test_x86_sse2_storeu_pd(i8* %a0, <2 x double> %a1) {
|
|||
; CHECK-LABEL: test_x86_sse2_storeu_pd:
|
||||
; CHECK: ## BB#0:
|
||||
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax
|
||||
; CHECK-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
|
||||
; CHECK-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero
|
||||
; CHECK-NEXT: vpslldq {{.*#+}} xmm1 = zero,zero,zero,zero,zero,zero,zero,zero,xmm1[0,1,2,3,4,5,6,7]
|
||||
; CHECK-NEXT: vaddpd %xmm1, %xmm0, %xmm0
|
||||
; CHECK-NEXT: vmovupd %xmm0, (%eax)
|
||||
|
|
|
@ -16,7 +16,7 @@ define <8 x i16> @test2(<4 x i16>* %v) nounwind {
|
|||
; CHECK-LABEL: test2:
|
||||
; CHECK: # BB#0:
|
||||
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax
|
||||
; CHECK-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
|
||||
; CHECK-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero
|
||||
; CHECK-NEXT: vmovq {{.*#+}} xmm0 = xmm0[0],zero
|
||||
; CHECK-NEXT: retl
|
||||
%v9 = load <4 x i16>, <4 x i16> * %v, align 8
|
||||
|
|
|
@ -279,7 +279,7 @@ define <16 x i16> @broadcast_mem_v4i16_v16i16(<4 x i16>* %ptr) {
|
|||
; X32-AVX2-LABEL: broadcast_mem_v4i16_v16i16:
|
||||
; X32-AVX2: ## BB#0:
|
||||
; X32-AVX2-NEXT: movl {{[0-9]+}}(%esp), %eax
|
||||
; X32-AVX2-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
|
||||
; X32-AVX2-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero
|
||||
; X32-AVX2-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5,6,7,4,5,6,7,6,7],zero,zero
|
||||
; X32-AVX2-NEXT: vpbroadcastq %xmm0, %ymm0
|
||||
; X32-AVX2-NEXT: retl
|
||||
|
|
|
@ -31,7 +31,7 @@ define <2 x i64> @test3(i64 %x) {
|
|||
define <4 x i32> @test4(i32* %x) {
|
||||
; CHECK-LABEL: test4:
|
||||
; CHECK: ## BB#0:
|
||||
; CHECK-NEXT: vmovd (%rdi), %xmm0 ## encoding: [0x62,0xf1,0x7d,0x08,0x6e,0x07]
|
||||
; CHECK-NEXT: vmovss (%rdi), %xmm0 ## encoding: [0x62,0xf1,0x7e,0x08,0x10,0x07]
|
||||
; CHECK-NEXT: ## xmm0 = mem[0],zero,zero,zero
|
||||
; CHECK-NEXT: retq ## encoding: [0xc3]
|
||||
%y = load i32, i32* %x
|
||||
|
@ -89,7 +89,7 @@ define i64 @test9(<2 x i64> %x) {
|
|||
define <4 x i32> @test10(i32* %x) {
|
||||
; CHECK-LABEL: test10:
|
||||
; CHECK: ## BB#0:
|
||||
; CHECK-NEXT: vmovd (%rdi), %xmm0 ## encoding: [0x62,0xf1,0x7d,0x08,0x6e,0x07]
|
||||
; CHECK-NEXT: vmovss (%rdi), %xmm0 ## encoding: [0x62,0xf1,0x7e,0x08,0x10,0x07]
|
||||
; CHECK-NEXT: ## xmm0 = mem[0],zero,zero,zero
|
||||
; CHECK-NEXT: retq ## encoding: [0xc3]
|
||||
%y = load i32, i32* %x, align 4
|
||||
|
@ -140,7 +140,7 @@ define <4 x i32> @test14(i32 %x) {
|
|||
define <4 x i32> @test15(i32* %x) {
|
||||
; CHECK-LABEL: test15:
|
||||
; CHECK: ## BB#0:
|
||||
; CHECK-NEXT: vmovd (%rdi), %xmm0 ## encoding: [0x62,0xf1,0x7d,0x08,0x6e,0x07]
|
||||
; CHECK-NEXT: vmovss (%rdi), %xmm0 ## encoding: [0x62,0xf1,0x7e,0x08,0x10,0x07]
|
||||
; CHECK-NEXT: ## xmm0 = mem[0],zero,zero,zero
|
||||
; CHECK-NEXT: retq ## encoding: [0xc3]
|
||||
%y = load i32, i32* %x, align 4
|
||||
|
|
|
@ -231,7 +231,7 @@ define double @f7_double(double %x) {
|
|||
; CHECK-LABEL: f7_double:
|
||||
; CHECK: # BB#0:
|
||||
; CHECK-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
|
||||
; CHECK-NEXT: andpd %xmm1, %xmm0
|
||||
; CHECK-NEXT: andps %xmm1, %xmm0
|
||||
; CHECK-NEXT: retq
|
||||
;
|
||||
%bc1 = bitcast double %x to i64
|
||||
|
|
|
@ -46,7 +46,7 @@ entry:
|
|||
; X64-LABEL: TestFPExtF64_F128:
|
||||
; X64: movsd vf64(%rip), %xmm0
|
||||
; X64-NEXT: callq __extenddftf2
|
||||
; X64-NEXT: movapd %xmm0, vf128(%rip)
|
||||
; X64-NEXT: movaps %xmm0, vf128(%rip)
|
||||
; X64: ret
|
||||
}
|
||||
|
||||
|
|
|
@ -69,6 +69,7 @@ define void @store_i64_from_vector256(<16 x i16> %x, <16 x i16> %y, i64* %i) {
|
|||
define void @PR23476(<5 x i64> %in, i64* %out, i32 %index) {
|
||||
; X32-LABEL: PR23476:
|
||||
; X32: movsd {{.*#+}} xmm0 = mem[0],zero
|
||||
; X32: movsd {{.*#+}} xmm0 = mem[0],zero
|
||||
; X32-NEXT: movsd %xmm0, (%eax)
|
||||
%ext = extractelement <5 x i64> %in, i32 %index
|
||||
store i64 %ext, i64* %out, align 8
|
||||
|
|
|
@ -1,27 +1,28 @@
|
|||
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
|
||||
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=sse2,sse-unaligned-mem | FileCheck %s --check-prefix=SSE2
|
||||
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=avx | FileCheck %s --check-prefix=AVX
|
||||
|
||||
; Although we have the ability to fold an unaligned load with AVX
|
||||
; Although we have the ability to fold an unaligned load with AVX
|
||||
; and under special conditions with some SSE implementations, we
|
||||
; can not fold the load under any circumstances in these test
|
||||
; cases because they are not 16-byte loads. The load must be
|
||||
; executed as a scalar ('movs*') with a zero extension to
|
||||
; 128-bits and then used in the packed logical ('andp*') op.
|
||||
; 128-bits and then used in the packed logical ('andp*') op.
|
||||
; PR22371 - http://llvm.org/bugs/show_bug.cgi?id=22371
|
||||
|
||||
define double @load_double_no_fold(double %x, double %y) {
|
||||
; SSE2-LABEL: load_double_no_fold:
|
||||
; SSE2: BB#0:
|
||||
; SSE2: # BB#0:
|
||||
; SSE2-NEXT: cmplesd %xmm0, %xmm1
|
||||
; SSE2-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
|
||||
; SSE2-NEXT: andpd %xmm1, %xmm0
|
||||
; SSE2-NEXT: andps %xmm1, %xmm0
|
||||
; SSE2-NEXT: retq
|
||||
;
|
||||
; AVX-LABEL: load_double_no_fold:
|
||||
; AVX: BB#0:
|
||||
; AVX: # BB#0:
|
||||
; AVX-NEXT: vcmplesd %xmm0, %xmm1, %xmm0
|
||||
; AVX-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
|
||||
; AVX-NEXT: vandpd %xmm1, %xmm0, %xmm0
|
||||
; AVX-NEXT: vandps %xmm1, %xmm0, %xmm0
|
||||
; AVX-NEXT: retq
|
||||
|
||||
%cmp = fcmp oge double %x, %y
|
||||
|
@ -32,14 +33,14 @@ define double @load_double_no_fold(double %x, double %y) {
|
|||
|
||||
define float @load_float_no_fold(float %x, float %y) {
|
||||
; SSE2-LABEL: load_float_no_fold:
|
||||
; SSE2: BB#0:
|
||||
; SSE2: # BB#0:
|
||||
; SSE2-NEXT: cmpless %xmm0, %xmm1
|
||||
; SSE2-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
|
||||
; SSE2-NEXT: andps %xmm1, %xmm0
|
||||
; SSE2-NEXT: retq
|
||||
;
|
||||
; AVX-LABEL: load_float_no_fold:
|
||||
; AVX: BB#0:
|
||||
; AVX: # BB#0:
|
||||
; AVX-NEXT: vcmpless %xmm0, %xmm1, %xmm0
|
||||
; AVX-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
|
||||
; AVX-NEXT: vandps %xmm1, %xmm0, %xmm0
|
||||
|
|
|
@ -416,12 +416,12 @@ define <4 x i32> @merge_4i32_i32_23u5(i32* %ptr) nounwind uwtable noinline ssp {
|
|||
define <4 x i32> @merge_4i32_i32_3zuu(i32* %ptr) nounwind uwtable noinline ssp {
|
||||
; SSE-LABEL: merge_4i32_i32_3zuu:
|
||||
; SSE: # BB#0:
|
||||
; SSE-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
|
||||
; SSE-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
|
||||
; SSE-NEXT: retq
|
||||
;
|
||||
; AVX-LABEL: merge_4i32_i32_3zuu:
|
||||
; AVX: # BB#0:
|
||||
; AVX-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
|
||||
; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
|
||||
; AVX-NEXT: retq
|
||||
;
|
||||
; X32-SSE1-LABEL: merge_4i32_i32_3zuu:
|
||||
|
@ -436,7 +436,7 @@ define <4 x i32> @merge_4i32_i32_3zuu(i32* %ptr) nounwind uwtable noinline ssp {
|
|||
; X32-SSE41-LABEL: merge_4i32_i32_3zuu:
|
||||
; X32-SSE41: # BB#0:
|
||||
; X32-SSE41-NEXT: movl {{[0-9]+}}(%esp), %eax
|
||||
; X32-SSE41-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
|
||||
; X32-SSE41-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
|
||||
; X32-SSE41-NEXT: retl
|
||||
%ptr0 = getelementptr inbounds i32, i32* %ptr, i64 3
|
||||
%val0 = load i32, i32* %ptr0
|
||||
|
@ -448,12 +448,12 @@ define <4 x i32> @merge_4i32_i32_3zuu(i32* %ptr) nounwind uwtable noinline ssp {
|
|||
define <4 x i32> @merge_4i32_i32_34uu(i32* %ptr) nounwind uwtable noinline ssp {
|
||||
; SSE-LABEL: merge_4i32_i32_34uu:
|
||||
; SSE: # BB#0:
|
||||
; SSE-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
|
||||
; SSE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
|
||||
; SSE-NEXT: retq
|
||||
;
|
||||
; AVX-LABEL: merge_4i32_i32_34uu:
|
||||
; AVX: # BB#0:
|
||||
; AVX-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero
|
||||
; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
|
||||
; AVX-NEXT: retq
|
||||
;
|
||||
; X32-SSE1-LABEL: merge_4i32_i32_34uu:
|
||||
|
@ -469,7 +469,7 @@ define <4 x i32> @merge_4i32_i32_34uu(i32* %ptr) nounwind uwtable noinline ssp {
|
|||
; X32-SSE41-LABEL: merge_4i32_i32_34uu:
|
||||
; X32-SSE41: # BB#0:
|
||||
; X32-SSE41-NEXT: movl {{[0-9]+}}(%esp), %eax
|
||||
; X32-SSE41-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
|
||||
; X32-SSE41-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
|
||||
; X32-SSE41-NEXT: retl
|
||||
%ptr0 = getelementptr inbounds i32, i32* %ptr, i64 3
|
||||
%ptr1 = getelementptr inbounds i32, i32* %ptr, i64 4
|
||||
|
@ -483,12 +483,12 @@ define <4 x i32> @merge_4i32_i32_34uu(i32* %ptr) nounwind uwtable noinline ssp {
|
|||
define <4 x i32> @merge_4i32_i32_45zz(i32* %ptr) nounwind uwtable noinline ssp {
|
||||
; SSE-LABEL: merge_4i32_i32_45zz:
|
||||
; SSE: # BB#0:
|
||||
; SSE-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
|
||||
; SSE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
|
||||
; SSE-NEXT: retq
|
||||
;
|
||||
; AVX-LABEL: merge_4i32_i32_45zz:
|
||||
; AVX: # BB#0:
|
||||
; AVX-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero
|
||||
; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
|
||||
; AVX-NEXT: retq
|
||||
;
|
||||
; X32-SSE1-LABEL: merge_4i32_i32_45zz:
|
||||
|
@ -506,7 +506,7 @@ define <4 x i32> @merge_4i32_i32_45zz(i32* %ptr) nounwind uwtable noinline ssp {
|
|||
; X32-SSE41-LABEL: merge_4i32_i32_45zz:
|
||||
; X32-SSE41: # BB#0:
|
||||
; X32-SSE41-NEXT: movl {{[0-9]+}}(%esp), %eax
|
||||
; X32-SSE41-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
|
||||
; X32-SSE41-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
|
||||
; X32-SSE41-NEXT: retl
|
||||
%ptr0 = getelementptr inbounds i32, i32* %ptr, i64 4
|
||||
%ptr1 = getelementptr inbounds i32, i32* %ptr, i64 5
|
||||
|
@ -599,12 +599,12 @@ define <8 x i16> @merge_8i16_i16_23u567u9(i16* %ptr) nounwind uwtable noinline s
|
|||
define <8 x i16> @merge_8i16_i16_34uuuuuu(i16* %ptr) nounwind uwtable noinline ssp {
|
||||
; SSE-LABEL: merge_8i16_i16_34uuuuuu:
|
||||
; SSE: # BB#0:
|
||||
; SSE-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
|
||||
; SSE-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
|
||||
; SSE-NEXT: retq
|
||||
;
|
||||
; AVX-LABEL: merge_8i16_i16_34uuuuuu:
|
||||
; AVX: # BB#0:
|
||||
; AVX-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
|
||||
; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
|
||||
; AVX-NEXT: retq
|
||||
;
|
||||
; X32-SSE1-LABEL: merge_8i16_i16_34uuuuuu:
|
||||
|
@ -620,7 +620,7 @@ define <8 x i16> @merge_8i16_i16_34uuuuuu(i16* %ptr) nounwind uwtable noinline s
|
|||
; X32-SSE41-LABEL: merge_8i16_i16_34uuuuuu:
|
||||
; X32-SSE41: # BB#0:
|
||||
; X32-SSE41-NEXT: movl {{[0-9]+}}(%esp), %eax
|
||||
; X32-SSE41-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
|
||||
; X32-SSE41-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
|
||||
; X32-SSE41-NEXT: retl
|
||||
%ptr0 = getelementptr inbounds i16, i16* %ptr, i64 3
|
||||
%ptr1 = getelementptr inbounds i16, i16* %ptr, i64 4
|
||||
|
@ -634,12 +634,12 @@ define <8 x i16> @merge_8i16_i16_34uuuuuu(i16* %ptr) nounwind uwtable noinline s
|
|||
define <8 x i16> @merge_8i16_i16_45u7zzzz(i16* %ptr) nounwind uwtable noinline ssp {
|
||||
; SSE-LABEL: merge_8i16_i16_45u7zzzz:
|
||||
; SSE: # BB#0:
|
||||
; SSE-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
|
||||
; SSE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
|
||||
; SSE-NEXT: retq
|
||||
;
|
||||
; AVX-LABEL: merge_8i16_i16_45u7zzzz:
|
||||
; AVX: # BB#0:
|
||||
; AVX-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero
|
||||
; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
|
||||
; AVX-NEXT: retq
|
||||
;
|
||||
; X32-SSE1-LABEL: merge_8i16_i16_45u7zzzz:
|
||||
|
@ -667,7 +667,7 @@ define <8 x i16> @merge_8i16_i16_45u7zzzz(i16* %ptr) nounwind uwtable noinline s
|
|||
; X32-SSE41-LABEL: merge_8i16_i16_45u7zzzz:
|
||||
; X32-SSE41: # BB#0:
|
||||
; X32-SSE41-NEXT: movl {{[0-9]+}}(%esp), %eax
|
||||
; X32-SSE41-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
|
||||
; X32-SSE41-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
|
||||
; X32-SSE41-NEXT: retl
|
||||
%ptr0 = getelementptr inbounds i16, i16* %ptr, i64 4
|
||||
%ptr1 = getelementptr inbounds i16, i16* %ptr, i64 5
|
||||
|
@ -811,12 +811,12 @@ define <16 x i8> @merge_16i8_i8_01u3456789ABCDuF(i8* %ptr) nounwind uwtable noin
|
|||
define <16 x i8> @merge_16i8_i8_01u3uuzzuuuuuzzz(i8* %ptr) nounwind uwtable noinline ssp {
|
||||
; SSE-LABEL: merge_16i8_i8_01u3uuzzuuuuuzzz:
|
||||
; SSE: # BB#0:
|
||||
; SSE-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
|
||||
; SSE-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
|
||||
; SSE-NEXT: retq
|
||||
;
|
||||
; AVX-LABEL: merge_16i8_i8_01u3uuzzuuuuuzzz:
|
||||
; AVX: # BB#0:
|
||||
; AVX-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
|
||||
; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
|
||||
; AVX-NEXT: retq
|
||||
;
|
||||
; X32-SSE1-LABEL: merge_16i8_i8_01u3uuzzuuuuuzzz:
|
||||
|
@ -839,7 +839,7 @@ define <16 x i8> @merge_16i8_i8_01u3uuzzuuuuuzzz(i8* %ptr) nounwind uwtable noin
|
|||
; X32-SSE41-LABEL: merge_16i8_i8_01u3uuzzuuuuuzzz:
|
||||
; X32-SSE41: # BB#0:
|
||||
; X32-SSE41-NEXT: movl {{[0-9]+}}(%esp), %eax
|
||||
; X32-SSE41-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
|
||||
; X32-SSE41-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
|
||||
; X32-SSE41-NEXT: retl
|
||||
%ptr0 = getelementptr inbounds i8, i8* %ptr, i64 0
|
||||
%ptr1 = getelementptr inbounds i8, i8* %ptr, i64 1
|
||||
|
@ -861,12 +861,12 @@ define <16 x i8> @merge_16i8_i8_01u3uuzzuuuuuzzz(i8* %ptr) nounwind uwtable noin
|
|||
define <16 x i8> @merge_16i8_i8_0123uu67uuuuuzzz(i8* %ptr) nounwind uwtable noinline ssp {
|
||||
; SSE-LABEL: merge_16i8_i8_0123uu67uuuuuzzz:
|
||||
; SSE: # BB#0:
|
||||
; SSE-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
|
||||
; SSE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
|
||||
; SSE-NEXT: retq
|
||||
;
|
||||
; AVX-LABEL: merge_16i8_i8_0123uu67uuuuuzzz:
|
||||
; AVX: # BB#0:
|
||||
; AVX-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero
|
||||
; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
|
||||
; AVX-NEXT: retq
|
||||
;
|
||||
; X32-SSE1-LABEL: merge_16i8_i8_0123uu67uuuuuzzz:
|
||||
|
@ -905,7 +905,7 @@ define <16 x i8> @merge_16i8_i8_0123uu67uuuuuzzz(i8* %ptr) nounwind uwtable noin
|
|||
; X32-SSE41-LABEL: merge_16i8_i8_0123uu67uuuuuzzz:
|
||||
; X32-SSE41: # BB#0:
|
||||
; X32-SSE41-NEXT: movl {{[0-9]+}}(%esp), %eax
|
||||
; X32-SSE41-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
|
||||
; X32-SSE41-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
|
||||
; X32-SSE41-NEXT: retl
|
||||
%ptr0 = getelementptr inbounds i8, i8* %ptr, i64 0
|
||||
%ptr1 = getelementptr inbounds i8, i8* %ptr, i64 1
|
||||
|
@ -934,14 +934,14 @@ define <16 x i8> @merge_16i8_i8_0123uu67uuuuuzzz(i8* %ptr) nounwind uwtable noin
|
|||
define void @merge_4i32_i32_combine(<4 x i32>* %dst, i32* %src) {
|
||||
; SSE-LABEL: merge_4i32_i32_combine:
|
||||
; SSE: # BB#0:
|
||||
; SSE-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
|
||||
; SSE-NEXT: movdqa %xmm0, (%rdi)
|
||||
; SSE-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
|
||||
; SSE-NEXT: movaps %xmm0, (%rdi)
|
||||
; SSE-NEXT: retq
|
||||
;
|
||||
; AVX-LABEL: merge_4i32_i32_combine:
|
||||
; AVX: # BB#0:
|
||||
; AVX-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
|
||||
; AVX-NEXT: vmovdqa %xmm0, (%rdi)
|
||||
; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
|
||||
; AVX-NEXT: vmovaps %xmm0, (%rdi)
|
||||
; AVX-NEXT: retq
|
||||
;
|
||||
; X32-SSE1-LABEL: merge_4i32_i32_combine:
|
||||
|
@ -959,8 +959,8 @@ define void @merge_4i32_i32_combine(<4 x i32>* %dst, i32* %src) {
|
|||
; X32-SSE41: # BB#0:
|
||||
; X32-SSE41-NEXT: movl {{[0-9]+}}(%esp), %eax
|
||||
; X32-SSE41-NEXT: movl {{[0-9]+}}(%esp), %ecx
|
||||
; X32-SSE41-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
|
||||
; X32-SSE41-NEXT: movdqa %xmm0, (%eax)
|
||||
; X32-SSE41-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
|
||||
; X32-SSE41-NEXT: movaps %xmm0, (%eax)
|
||||
; X32-SSE41-NEXT: retl
|
||||
%1 = getelementptr i32, i32* %src, i32 0
|
||||
%2 = load i32, i32* %1
|
||||
|
|
|
@ -210,13 +210,13 @@ define <4 x i64> @merge_4i64_i64_1234(i64* %ptr) nounwind uwtable noinline ssp {
|
|||
define <4 x i64> @merge_4i64_i64_1zzu(i64* %ptr) nounwind uwtable noinline ssp {
|
||||
; AVX-LABEL: merge_4i64_i64_1zzu:
|
||||
; AVX: # BB#0:
|
||||
; AVX-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero
|
||||
; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
|
||||
; AVX-NEXT: retq
|
||||
;
|
||||
; X32-AVX-LABEL: merge_4i64_i64_1zzu:
|
||||
; X32-AVX: # BB#0:
|
||||
; X32-AVX-NEXT: movl {{[0-9]+}}(%esp), %eax
|
||||
; X32-AVX-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero
|
||||
; X32-AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
|
||||
; X32-AVX-NEXT: retl
|
||||
%ptr0 = getelementptr inbounds i64, i64* %ptr, i64 1
|
||||
%val0 = load i64, i64* %ptr0
|
||||
|
@ -385,32 +385,18 @@ define <8 x i32> @merge_8i32_4i32_z3(<4 x i32>* %ptr) nounwind uwtable noinline
|
|||
}
|
||||
|
||||
define <8 x i32> @merge_8i32_i32_56zz9uzz(i32* %ptr) nounwind uwtable noinline ssp {
|
||||
; AVX1-LABEL: merge_8i32_i32_56zz9uzz:
|
||||
; AVX1: # BB#0:
|
||||
; AVX1-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero
|
||||
; AVX1-NEXT: vmovd {{.*#+}} xmm1 = mem[0],zero,zero,zero
|
||||
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
|
||||
; AVX1-NEXT: retq
|
||||
;
|
||||
; AVX2-LABEL: merge_8i32_i32_56zz9uzz:
|
||||
; AVX2: # BB#0:
|
||||
; AVX2-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero
|
||||
; AVX2-NEXT: vmovd {{.*#+}} xmm1 = mem[0],zero,zero,zero
|
||||
; AVX2-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
|
||||
; AVX2-NEXT: retq
|
||||
;
|
||||
; AVX512F-LABEL: merge_8i32_i32_56zz9uzz:
|
||||
; AVX512F: # BB#0:
|
||||
; AVX512F-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero
|
||||
; AVX512F-NEXT: vmovd {{.*#+}} xmm1 = mem[0],zero,zero,zero
|
||||
; AVX512F-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
|
||||
; AVX512F-NEXT: retq
|
||||
; AVX-LABEL: merge_8i32_i32_56zz9uzz:
|
||||
; AVX: # BB#0:
|
||||
; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
|
||||
; AVX-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
|
||||
; AVX-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
|
||||
; AVX-NEXT: retq
|
||||
;
|
||||
; X32-AVX-LABEL: merge_8i32_i32_56zz9uzz:
|
||||
; X32-AVX: # BB#0:
|
||||
; X32-AVX-NEXT: movl {{[0-9]+}}(%esp), %eax
|
||||
; X32-AVX-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero
|
||||
; X32-AVX-NEXT: vmovd {{.*#+}} xmm1 = mem[0],zero,zero,zero
|
||||
; X32-AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
|
||||
; X32-AVX-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
|
||||
; X32-AVX-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
|
||||
; X32-AVX-NEXT: retl
|
||||
%ptr0 = getelementptr inbounds i32, i32* %ptr, i64 5
|
||||
|
@ -473,13 +459,13 @@ define <8 x i32> @merge_8i32_i32_1u3u5zu8(i32* %ptr) nounwind uwtable noinline s
|
|||
define <16 x i16> @merge_16i16_i16_89zzzuuuuuuuuuuuz(i16* %ptr) nounwind uwtable noinline ssp {
|
||||
; AVX-LABEL: merge_16i16_i16_89zzzuuuuuuuuuuuz:
|
||||
; AVX: # BB#0:
|
||||
; AVX-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
|
||||
; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
|
||||
; AVX-NEXT: retq
|
||||
;
|
||||
; X32-AVX-LABEL: merge_16i16_i16_89zzzuuuuuuuuuuuz:
|
||||
; X32-AVX: # BB#0:
|
||||
; X32-AVX-NEXT: movl {{[0-9]+}}(%esp), %eax
|
||||
; X32-AVX-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
|
||||
; X32-AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
|
||||
; X32-AVX-NEXT: retl
|
||||
%ptr0 = getelementptr inbounds i16, i16* %ptr, i64 8
|
||||
%ptr1 = getelementptr inbounds i16, i16* %ptr, i64 9
|
||||
|
@ -497,13 +483,13 @@ define <16 x i16> @merge_16i16_i16_89zzzuuuuuuuuuuuz(i16* %ptr) nounwind uwtable
|
|||
define <16 x i16> @merge_16i16_i16_45u7uuuuuuuuuuuu(i16* %ptr) nounwind uwtable noinline ssp {
|
||||
; AVX-LABEL: merge_16i16_i16_45u7uuuuuuuuuuuu:
|
||||
; AVX: # BB#0:
|
||||
; AVX-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero
|
||||
; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
|
||||
; AVX-NEXT: retq
|
||||
;
|
||||
; X32-AVX-LABEL: merge_16i16_i16_45u7uuuuuuuuuuuu:
|
||||
; X32-AVX: # BB#0:
|
||||
; X32-AVX-NEXT: movl {{[0-9]+}}(%esp), %eax
|
||||
; X32-AVX-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero
|
||||
; X32-AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
|
||||
; X32-AVX-NEXT: retl
|
||||
%ptr0 = getelementptr inbounds i16, i16* %ptr, i64 4
|
||||
%ptr1 = getelementptr inbounds i16, i16* %ptr, i64 5
|
||||
|
@ -583,13 +569,13 @@ define <16 x i16> @merge_16i16_i16_0uu3zzuuuuuzCuEF(i16* %ptr) nounwind uwtable
|
|||
define <32 x i8> @merge_32i8_i8_45u7uuuuuuuuuuuuuuuuuuuuuuuuuuuu(i8* %ptr) nounwind uwtable noinline ssp {
|
||||
; AVX-LABEL: merge_32i8_i8_45u7uuuuuuuuuuuuuuuuuuuuuuuuuuuu:
|
||||
; AVX: # BB#0:
|
||||
; AVX-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
|
||||
; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
|
||||
; AVX-NEXT: retq
|
||||
;
|
||||
; X32-AVX-LABEL: merge_32i8_i8_45u7uuuuuuuuuuuuuuuuuuuuuuuuuuuu:
|
||||
; X32-AVX: # BB#0:
|
||||
; X32-AVX-NEXT: movl {{[0-9]+}}(%esp), %eax
|
||||
; X32-AVX-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
|
||||
; X32-AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
|
||||
; X32-AVX-NEXT: retl
|
||||
%ptr0 = getelementptr inbounds i8, i8* %ptr, i64 4
|
||||
%ptr1 = getelementptr inbounds i8, i8* %ptr, i64 5
|
||||
|
@ -606,13 +592,13 @@ define <32 x i8> @merge_32i8_i8_45u7uuuuuuuuuuuuuuuuuuuuuuuuuuuu(i8* %ptr) nounw
|
|||
define <32 x i8> @merge_32i8_i8_23u5uuuuuuuuuuzzzzuuuuuuuuuuuuuu(i8* %ptr) nounwind uwtable noinline ssp {
|
||||
; AVX-LABEL: merge_32i8_i8_23u5uuuuuuuuuuzzzzuuuuuuuuuuuuuu:
|
||||
; AVX: # BB#0:
|
||||
; AVX-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
|
||||
; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
|
||||
; AVX-NEXT: retq
|
||||
;
|
||||
; X32-AVX-LABEL: merge_32i8_i8_23u5uuuuuuuuuuzzzzuuuuuuuuuuuuuu:
|
||||
; X32-AVX: # BB#0:
|
||||
; X32-AVX-NEXT: movl {{[0-9]+}}(%esp), %eax
|
||||
; X32-AVX-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
|
||||
; X32-AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
|
||||
; X32-AVX-NEXT: retl
|
||||
%ptr0 = getelementptr inbounds i8, i8* %ptr, i64 2
|
||||
%ptr1 = getelementptr inbounds i8, i8* %ptr, i64 3
|
||||
|
|
|
@ -372,13 +372,13 @@ define <16 x float> @merge_16f32_f32_0uu3zzuuuuuzCuEF(float* %ptr) nounwind uwta
|
|||
define <16 x i32> @merge_16i32_i32_12zzzuuuuuuuuuuuz(i32* %ptr) nounwind uwtable noinline ssp {
|
||||
; ALL-LABEL: merge_16i32_i32_12zzzuuuuuuuuuuuz:
|
||||
; ALL: # BB#0:
|
||||
; ALL-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero
|
||||
; ALL-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
|
||||
; ALL-NEXT: retq
|
||||
;
|
||||
; X32-AVX512F-LABEL: merge_16i32_i32_12zzzuuuuuuuuuuuz:
|
||||
; X32-AVX512F: # BB#0:
|
||||
; X32-AVX512F-NEXT: movl {{[0-9]+}}(%esp), %eax
|
||||
; X32-AVX512F-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero
|
||||
; X32-AVX512F-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
|
||||
; X32-AVX512F-NEXT: retl
|
||||
%ptr0 = getelementptr inbounds i32, i32* %ptr, i64 1
|
||||
%ptr1 = getelementptr inbounds i32, i32* %ptr, i64 2
|
||||
|
@ -486,19 +486,19 @@ define <16 x i32> @merge_16i32_i32_0uu3zzuuuuuzCuEF(i32* %ptr) nounwind uwtable
|
|||
define <32 x i16> @merge_32i16_i16_12u4uuuuuuuuuuuuuuuuuuuuuuuuuuzz(i16* %ptr) nounwind uwtable noinline ssp {
|
||||
; AVX512F-LABEL: merge_32i16_i16_12u4uuuuuuuuuuuuuuuuuuuuuuuuuuzz:
|
||||
; AVX512F: # BB#0:
|
||||
; AVX512F-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero
|
||||
; AVX512F-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
|
||||
; AVX512F-NEXT: vxorps %ymm1, %ymm1, %ymm1
|
||||
; AVX512F-NEXT: retq
|
||||
;
|
||||
; AVX512BW-LABEL: merge_32i16_i16_12u4uuuuuuuuuuuuuuuuuuuuuuuuuuzz:
|
||||
; AVX512BW: # BB#0:
|
||||
; AVX512BW-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero
|
||||
; AVX512BW-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
|
||||
; AVX512BW-NEXT: retq
|
||||
;
|
||||
; X32-AVX512F-LABEL: merge_32i16_i16_12u4uuuuuuuuuuuuuuuuuuuuuuuuuuzz:
|
||||
; X32-AVX512F: # BB#0:
|
||||
; X32-AVX512F-NEXT: movl {{[0-9]+}}(%esp), %eax
|
||||
; X32-AVX512F-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero
|
||||
; X32-AVX512F-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
|
||||
; X32-AVX512F-NEXT: vxorps %ymm1, %ymm1, %ymm1
|
||||
; X32-AVX512F-NEXT: retl
|
||||
%ptr0 = getelementptr inbounds i16, i16* %ptr, i64 1
|
||||
|
@ -518,13 +518,13 @@ define <32 x i16> @merge_32i16_i16_12u4uuuuuuuuuuuuuuuuuuuuuuuuuuzz(i16* %ptr) n
|
|||
define <32 x i16> @merge_32i16_i16_45u7uuuuuuuuuuuuuuuuuuuuuuuuuuuu(i16* %ptr) nounwind uwtable noinline ssp {
|
||||
; ALL-LABEL: merge_32i16_i16_45u7uuuuuuuuuuuuuuuuuuuuuuuuuuuu:
|
||||
; ALL: # BB#0:
|
||||
; ALL-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero
|
||||
; ALL-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
|
||||
; ALL-NEXT: retq
|
||||
;
|
||||
; X32-AVX512F-LABEL: merge_32i16_i16_45u7uuuuuuuuuuuuuuuuuuuuuuuuuuuu:
|
||||
; X32-AVX512F: # BB#0:
|
||||
; X32-AVX512F-NEXT: movl {{[0-9]+}}(%esp), %eax
|
||||
; X32-AVX512F-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero
|
||||
; X32-AVX512F-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
|
||||
; X32-AVX512F-NEXT: retl
|
||||
%ptr0 = getelementptr inbounds i16, i16* %ptr, i64 4
|
||||
%ptr1 = getelementptr inbounds i16, i16* %ptr, i64 5
|
||||
|
@ -541,19 +541,19 @@ define <32 x i16> @merge_32i16_i16_45u7uuuuuuuuuuuuuuuuuuuuuuuuuuuu(i16* %ptr) n
|
|||
define <32 x i16> @merge_32i16_i16_23uzuuuuuuuuuuzzzzuuuuuuuuuuuuuu(i16* %ptr) nounwind uwtable noinline ssp {
|
||||
; AVX512F-LABEL: merge_32i16_i16_23uzuuuuuuuuuuzzzzuuuuuuuuuuuuuu:
|
||||
; AVX512F: # BB#0:
|
||||
; AVX512F-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
|
||||
; AVX512F-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
|
||||
; AVX512F-NEXT: vxorps %ymm1, %ymm1, %ymm1
|
||||
; AVX512F-NEXT: retq
|
||||
;
|
||||
; AVX512BW-LABEL: merge_32i16_i16_23uzuuuuuuuuuuzzzzuuuuuuuuuuuuuu:
|
||||
; AVX512BW: # BB#0:
|
||||
; AVX512BW-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
|
||||
; AVX512BW-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
|
||||
; AVX512BW-NEXT: retq
|
||||
;
|
||||
; X32-AVX512F-LABEL: merge_32i16_i16_23uzuuuuuuuuuuzzzzuuuuuuuuuuuuuu:
|
||||
; X32-AVX512F: # BB#0:
|
||||
; X32-AVX512F-NEXT: movl {{[0-9]+}}(%esp), %eax
|
||||
; X32-AVX512F-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
|
||||
; X32-AVX512F-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
|
||||
; X32-AVX512F-NEXT: vxorps %ymm1, %ymm1, %ymm1
|
||||
; X32-AVX512F-NEXT: retl
|
||||
%ptr0 = getelementptr inbounds i16, i16* %ptr, i64 2
|
||||
|
@ -573,19 +573,19 @@ define <32 x i16> @merge_32i16_i16_23uzuuuuuuuuuuzzzzuuuuuuuuuuuuuu(i16* %ptr) n
|
|||
define <64 x i8> @merge_64i8_i8_12u4uuu8uuuuuuzzzzuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuz(i8* %ptr) nounwind uwtable noinline ssp {
|
||||
; AVX512F-LABEL: merge_64i8_i8_12u4uuu8uuuuuuzzzzuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuz:
|
||||
; AVX512F: # BB#0:
|
||||
; AVX512F-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero
|
||||
; AVX512F-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
|
||||
; AVX512F-NEXT: vxorps %ymm1, %ymm1, %ymm1
|
||||
; AVX512F-NEXT: retq
|
||||
;
|
||||
; AVX512BW-LABEL: merge_64i8_i8_12u4uuu8uuuuuuzzzzuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuz:
|
||||
; AVX512BW: # BB#0:
|
||||
; AVX512BW-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero
|
||||
; AVX512BW-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
|
||||
; AVX512BW-NEXT: retq
|
||||
;
|
||||
; X32-AVX512F-LABEL: merge_64i8_i8_12u4uuu8uuuuuuzzzzuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuz:
|
||||
; X32-AVX512F: # BB#0:
|
||||
; X32-AVX512F-NEXT: movl {{[0-9]+}}(%esp), %eax
|
||||
; X32-AVX512F-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero
|
||||
; X32-AVX512F-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
|
||||
; X32-AVX512F-NEXT: vxorps %ymm1, %ymm1, %ymm1
|
||||
; X32-AVX512F-NEXT: retl
|
||||
%ptr0 = getelementptr inbounds i8, i8* %ptr, i64 1
|
||||
|
@ -611,19 +611,19 @@ define <64 x i8> @merge_64i8_i8_12u4uuu8uuuuuuzzzzuuuuuuuuuuuuuuuuuuuuuuuuuuuuuu
|
|||
define <64 x i8> @merge_64i8_i8_12u4uuuuuuuuuuzzzzuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuz(i8* %ptr) nounwind uwtable noinline ssp {
|
||||
; AVX512F-LABEL: merge_64i8_i8_12u4uuuuuuuuuuzzzzuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuz:
|
||||
; AVX512F: # BB#0:
|
||||
; AVX512F-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
|
||||
; AVX512F-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
|
||||
; AVX512F-NEXT: vxorps %ymm1, %ymm1, %ymm1
|
||||
; AVX512F-NEXT: retq
|
||||
;
|
||||
; AVX512BW-LABEL: merge_64i8_i8_12u4uuuuuuuuuuzzzzuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuz:
|
||||
; AVX512BW: # BB#0:
|
||||
; AVX512BW-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
|
||||
; AVX512BW-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
|
||||
; AVX512BW-NEXT: retq
|
||||
;
|
||||
; X32-AVX512F-LABEL: merge_64i8_i8_12u4uuuuuuuuuuzzzzuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuz:
|
||||
; X32-AVX512F: # BB#0:
|
||||
; X32-AVX512F-NEXT: movl {{[0-9]+}}(%esp), %eax
|
||||
; X32-AVX512F-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
|
||||
; X32-AVX512F-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
|
||||
; X32-AVX512F-NEXT: vxorps %ymm1, %ymm1, %ymm1
|
||||
; X32-AVX512F-NEXT: retl
|
||||
%ptr0 = getelementptr inbounds i8, i8* %ptr, i64 1
|
||||
|
|
|
@ -10,7 +10,7 @@ define void @t3() nounwind {
|
|||
; X86-64-LABEL: t3:
|
||||
; X86-64: ## BB#0:
|
||||
; X86-64-NEXT: movq _g_v8qi@{{.*}}(%rip), %rax
|
||||
; X86-64-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
|
||||
; X86-64-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
|
||||
; X86-64-NEXT: movb $1, %al
|
||||
; X86-64-NEXT: jmp _pass_v8qi ## TAILCALL
|
||||
%tmp3 = load <8 x i8>, <8 x i8>* @g_v8qi, align 8
|
||||
|
|
|
@ -85,14 +85,14 @@ entry:
|
|||
define void @test_vector_creation() nounwind {
|
||||
; SSE-LABEL: test_vector_creation:
|
||||
; SSE: # BB#0:
|
||||
; SSE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
|
||||
; SSE-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
|
||||
; SSE-NEXT: pslldq {{.*#+}} xmm0 = zero,zero,zero,zero,zero,zero,zero,zero,xmm0[0,1,2,3,4,5,6,7]
|
||||
; SSE-NEXT: movdqa %xmm0, (%rax)
|
||||
; SSE-NEXT: retq
|
||||
;
|
||||
; AVX-LABEL: test_vector_creation:
|
||||
; AVX: # BB#0:
|
||||
; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
|
||||
; AVX-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero
|
||||
; AVX-NEXT: vpslldq {{.*#+}} xmm0 = zero,zero,zero,zero,zero,zero,zero,zero,xmm0[0,1,2,3,4,5,6,7]
|
||||
; AVX-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
|
||||
; AVX-NEXT: vmovaps %ymm0, (%rax)
|
||||
|
|
|
@ -41,8 +41,8 @@ declare i32 @printf(...)
|
|||
define double @PR22371(double %x) {
|
||||
; CHECK-LABEL: PR22371:
|
||||
; CHECK: movsd 16(%esp), %xmm0
|
||||
; CHECK-NEXT: andpd LCPI1_0, %xmm0
|
||||
; CHECK-NEXT: movlpd %xmm0, (%esp)
|
||||
; CHECK-NEXT: andps LCPI1_0, %xmm0
|
||||
; CHECK-NEXT: movlps %xmm0, (%esp)
|
||||
%call = tail call double @fabs(double %x) #0
|
||||
ret double %call
|
||||
}
|
||||
|
|
|
@ -74,14 +74,14 @@ define x86_fp80 @s32_to_x(i32 %a) nounwind {
|
|||
}
|
||||
|
||||
; CHECK-LABEL: u64_to_f
|
||||
; AVX512_32: vmovq {{.*#+}} xmm0 = mem[0],zero
|
||||
; AVX512_32: vmovq %xmm0, {{[0-9]+}}(%esp)
|
||||
; AVX512_32: vmovsd {{.*#+}} xmm0 = mem[0],zero
|
||||
; AVX512_32: vmovlps %xmm0, {{[0-9]+}}(%esp)
|
||||
; AVX512_32: fildll
|
||||
|
||||
; AVX512_64: vcvtusi2ssq
|
||||
|
||||
; SSE2_32: movq {{.*#+}} xmm0 = mem[0],zero
|
||||
; SSE2_32: movq %xmm0, {{[0-9]+}}(%esp)
|
||||
; SSE2_32: movsd {{.*#+}} xmm0 = mem[0],zero
|
||||
; SSE2_32: movlps %xmm0, {{[0-9]+}}(%esp)
|
||||
; SSE2_32: fildll
|
||||
|
||||
; SSE2_64: cvtsi2ssq
|
||||
|
|
|
@ -94,11 +94,11 @@ define double @int2(double %a, float %b, float %c) nounwind {
|
|||
; X32-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
|
||||
; X32-NEXT: addss 20(%ebp), %xmm0
|
||||
; X32-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
|
||||
; X32-NEXT: andpd {{\.LCPI.*}}, %xmm1
|
||||
; X32-NEXT: andps {{\.LCPI.*}}, %xmm1
|
||||
; X32-NEXT: cvtss2sd %xmm0, %xmm0
|
||||
; X32-NEXT: andpd {{\.LCPI.*}}, %xmm0
|
||||
; X32-NEXT: orpd %xmm1, %xmm0
|
||||
; X32-NEXT: movlpd %xmm0, (%esp)
|
||||
; X32-NEXT: andps {{\.LCPI.*}}, %xmm0
|
||||
; X32-NEXT: orps %xmm1, %xmm0
|
||||
; X32-NEXT: movlps %xmm0, (%esp)
|
||||
; X32-NEXT: fldl (%esp)
|
||||
; X32-NEXT: movl %ebp, %esp
|
||||
; X32-NEXT: popl %ebp
|
||||
|
|
|
@ -779,11 +779,11 @@ define double @oge_y(double %x) {
|
|||
; STRICT-LABEL: oge_y:
|
||||
; STRICT: # BB#0:
|
||||
; STRICT-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
|
||||
; STRICT-NEXT: movapd %xmm1, %xmm2
|
||||
; STRICT-NEXT: movaps %xmm1, %xmm2
|
||||
; STRICT-NEXT: cmplesd %xmm0, %xmm2
|
||||
; STRICT-NEXT: andpd %xmm2, %xmm0
|
||||
; STRICT-NEXT: andnpd %xmm1, %xmm2
|
||||
; STRICT-NEXT: orpd %xmm2, %xmm0
|
||||
; STRICT-NEXT: andps %xmm2, %xmm0
|
||||
; STRICT-NEXT: andnps %xmm1, %xmm2
|
||||
; STRICT-NEXT: orps %xmm2, %xmm0
|
||||
; STRICT-NEXT: retq
|
||||
;
|
||||
; RELAX-LABEL: oge_y:
|
||||
|
@ -800,12 +800,12 @@ define double @ole_y(double %x) {
|
|||
; STRICT-LABEL: ole_y:
|
||||
; STRICT: # BB#0:
|
||||
; STRICT-NEXT: movsd {{.*#+}} xmm2 = mem[0],zero
|
||||
; STRICT-NEXT: movapd %xmm0, %xmm1
|
||||
; STRICT-NEXT: movaps %xmm0, %xmm1
|
||||
; STRICT-NEXT: cmplesd %xmm2, %xmm1
|
||||
; STRICT-NEXT: andpd %xmm1, %xmm0
|
||||
; STRICT-NEXT: andnpd %xmm2, %xmm1
|
||||
; STRICT-NEXT: orpd %xmm0, %xmm1
|
||||
; STRICT-NEXT: movapd %xmm1, %xmm0
|
||||
; STRICT-NEXT: andps %xmm1, %xmm0
|
||||
; STRICT-NEXT: andnps %xmm2, %xmm1
|
||||
; STRICT-NEXT: orps %xmm0, %xmm1
|
||||
; STRICT-NEXT: movaps %xmm1, %xmm0
|
||||
; STRICT-NEXT: retq
|
||||
;
|
||||
; RELAX-LABEL: ole_y:
|
||||
|
@ -822,12 +822,12 @@ define double @oge_inverse_y(double %x) {
|
|||
; STRICT-LABEL: oge_inverse_y:
|
||||
; STRICT: # BB#0:
|
||||
; STRICT-NEXT: movsd {{.*#+}} xmm2 = mem[0],zero
|
||||
; STRICT-NEXT: movapd %xmm2, %xmm1
|
||||
; STRICT-NEXT: movaps %xmm2, %xmm1
|
||||
; STRICT-NEXT: cmplesd %xmm0, %xmm1
|
||||
; STRICT-NEXT: andpd %xmm1, %xmm2
|
||||
; STRICT-NEXT: andnpd %xmm0, %xmm1
|
||||
; STRICT-NEXT: orpd %xmm2, %xmm1
|
||||
; STRICT-NEXT: movapd %xmm1, %xmm0
|
||||
; STRICT-NEXT: andps %xmm1, %xmm2
|
||||
; STRICT-NEXT: andnps %xmm0, %xmm1
|
||||
; STRICT-NEXT: orps %xmm2, %xmm1
|
||||
; STRICT-NEXT: movaps %xmm1, %xmm0
|
||||
; STRICT-NEXT: retq
|
||||
;
|
||||
; UNSAFE-LABEL: oge_inverse_y:
|
||||
|
@ -851,12 +851,12 @@ define double @ole_inverse_y(double %x) {
|
|||
; STRICT-LABEL: ole_inverse_y:
|
||||
; STRICT: # BB#0:
|
||||
; STRICT-NEXT: movsd {{.*#+}} xmm2 = mem[0],zero
|
||||
; STRICT-NEXT: movapd %xmm0, %xmm1
|
||||
; STRICT-NEXT: movaps %xmm0, %xmm1
|
||||
; STRICT-NEXT: cmplesd %xmm2, %xmm1
|
||||
; STRICT-NEXT: andpd %xmm1, %xmm2
|
||||
; STRICT-NEXT: andnpd %xmm0, %xmm1
|
||||
; STRICT-NEXT: orpd %xmm2, %xmm1
|
||||
; STRICT-NEXT: movapd %xmm1, %xmm0
|
||||
; STRICT-NEXT: andps %xmm1, %xmm2
|
||||
; STRICT-NEXT: andnps %xmm0, %xmm1
|
||||
; STRICT-NEXT: orps %xmm2, %xmm1
|
||||
; STRICT-NEXT: movaps %xmm1, %xmm0
|
||||
; STRICT-NEXT: retq
|
||||
;
|
||||
; UNSAFE-LABEL: ole_inverse_y:
|
||||
|
@ -880,12 +880,12 @@ define double @ugt_y(double %x) {
|
|||
; STRICT-LABEL: ugt_y:
|
||||
; STRICT: # BB#0:
|
||||
; STRICT-NEXT: movsd {{.*#+}} xmm2 = mem[0],zero
|
||||
; STRICT-NEXT: movapd %xmm0, %xmm1
|
||||
; STRICT-NEXT: movaps %xmm0, %xmm1
|
||||
; STRICT-NEXT: cmpnlesd %xmm2, %xmm1
|
||||
; STRICT-NEXT: andpd %xmm1, %xmm0
|
||||
; STRICT-NEXT: andnpd %xmm2, %xmm1
|
||||
; STRICT-NEXT: orpd %xmm0, %xmm1
|
||||
; STRICT-NEXT: movapd %xmm1, %xmm0
|
||||
; STRICT-NEXT: andps %xmm1, %xmm0
|
||||
; STRICT-NEXT: andnps %xmm2, %xmm1
|
||||
; STRICT-NEXT: orps %xmm0, %xmm1
|
||||
; STRICT-NEXT: movaps %xmm1, %xmm0
|
||||
; STRICT-NEXT: retq
|
||||
;
|
||||
; RELAX-LABEL: ugt_y:
|
||||
|
@ -902,11 +902,11 @@ define double @ult_y(double %x) {
|
|||
; STRICT-LABEL: ult_y:
|
||||
; STRICT: # BB#0:
|
||||
; STRICT-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
|
||||
; STRICT-NEXT: movapd %xmm1, %xmm2
|
||||
; STRICT-NEXT: movaps %xmm1, %xmm2
|
||||
; STRICT-NEXT: cmpnlesd %xmm0, %xmm2
|
||||
; STRICT-NEXT: andpd %xmm2, %xmm0
|
||||
; STRICT-NEXT: andnpd %xmm1, %xmm2
|
||||
; STRICT-NEXT: orpd %xmm2, %xmm0
|
||||
; STRICT-NEXT: andps %xmm2, %xmm0
|
||||
; STRICT-NEXT: andnps %xmm1, %xmm2
|
||||
; STRICT-NEXT: orps %xmm2, %xmm0
|
||||
; STRICT-NEXT: retq
|
||||
;
|
||||
; RELAX-LABEL: ult_y:
|
||||
|
@ -923,12 +923,12 @@ define double @ugt_inverse_y(double %x) {
|
|||
; STRICT-LABEL: ugt_inverse_y:
|
||||
; STRICT: # BB#0:
|
||||
; STRICT-NEXT: movsd {{.*#+}} xmm2 = mem[0],zero
|
||||
; STRICT-NEXT: movapd %xmm0, %xmm1
|
||||
; STRICT-NEXT: movaps %xmm0, %xmm1
|
||||
; STRICT-NEXT: cmpnlesd %xmm2, %xmm1
|
||||
; STRICT-NEXT: andpd %xmm1, %xmm2
|
||||
; STRICT-NEXT: andnpd %xmm0, %xmm1
|
||||
; STRICT-NEXT: orpd %xmm2, %xmm1
|
||||
; STRICT-NEXT: movapd %xmm1, %xmm0
|
||||
; STRICT-NEXT: andps %xmm1, %xmm2
|
||||
; STRICT-NEXT: andnps %xmm0, %xmm1
|
||||
; STRICT-NEXT: orps %xmm2, %xmm1
|
||||
; STRICT-NEXT: movaps %xmm1, %xmm0
|
||||
; STRICT-NEXT: retq
|
||||
;
|
||||
; UNSAFE-LABEL: ugt_inverse_y:
|
||||
|
@ -952,12 +952,12 @@ define double @ult_inverse_y(double %x) {
|
|||
; STRICT-LABEL: ult_inverse_y:
|
||||
; STRICT: # BB#0:
|
||||
; STRICT-NEXT: movsd {{.*#+}} xmm2 = mem[0],zero
|
||||
; STRICT-NEXT: movapd %xmm2, %xmm1
|
||||
; STRICT-NEXT: movaps %xmm2, %xmm1
|
||||
; STRICT-NEXT: cmpnlesd %xmm0, %xmm1
|
||||
; STRICT-NEXT: andpd %xmm1, %xmm2
|
||||
; STRICT-NEXT: andnpd %xmm0, %xmm1
|
||||
; STRICT-NEXT: orpd %xmm2, %xmm1
|
||||
; STRICT-NEXT: movapd %xmm1, %xmm0
|
||||
; STRICT-NEXT: andps %xmm1, %xmm2
|
||||
; STRICT-NEXT: andnps %xmm0, %xmm1
|
||||
; STRICT-NEXT: orps %xmm2, %xmm1
|
||||
; STRICT-NEXT: movaps %xmm1, %xmm0
|
||||
; STRICT-NEXT: retq
|
||||
;
|
||||
; UNSAFE-LABEL: ult_inverse_y:
|
||||
|
|
|
@ -55,7 +55,7 @@ declare i64 @llvm.x86.sse2.cvttsd2si64(<2 x double>) nounwind readnone
|
|||
define <2 x i64> @test_mm_loadu_si64(i64* %a0) nounwind {
|
||||
; X64-LABEL: test_mm_loadu_si64:
|
||||
; X64: # BB#0:
|
||||
; X64-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
|
||||
; X64-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
|
||||
; X64-NEXT: retq
|
||||
%ld = load i64, i64* %a0, align 1
|
||||
%res0 = insertelement <2 x i64> undef, i64 %ld, i32 0
|
||||
|
|
|
@ -1275,7 +1275,7 @@ define <2 x double> @test_mm_cvtsi32_sd(<2 x double> %a0, i32 %a1) nounwind {
|
|||
define <2 x i64> @test_mm_cvtsi32_si128(i32 %a0) nounwind {
|
||||
; X32-LABEL: test_mm_cvtsi32_si128:
|
||||
; X32: # BB#0:
|
||||
; X32-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
|
||||
; X32-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
|
||||
; X32-NEXT: retl
|
||||
;
|
||||
; X64-LABEL: test_mm_cvtsi32_si128:
|
||||
|
@ -1523,12 +1523,12 @@ define <2 x i64> @test_mm_loadl_epi64(<2 x i64> %a0, <2 x i64>* %a1) nounwind {
|
|||
; X32-LABEL: test_mm_loadl_epi64:
|
||||
; X32: # BB#0:
|
||||
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
|
||||
; X32-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
|
||||
; X32-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
|
||||
; X32-NEXT: retl
|
||||
;
|
||||
; X64-LABEL: test_mm_loadl_epi64:
|
||||
; X64: # BB#0:
|
||||
; X64-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
|
||||
; X64-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
|
||||
; X64-NEXT: retq
|
||||
%bc = bitcast <2 x i64>* %a1 to i64*
|
||||
%ld = load i64, i64* %bc, align 1
|
||||
|
@ -2326,7 +2326,7 @@ define <2 x double> @test_mm_set_pd(double %a0, double %a1) nounwind {
|
|||
define <2 x double> @test_mm_set_sd(double %a0) nounwind {
|
||||
; X32-LABEL: test_mm_set_sd:
|
||||
; X32: # BB#0:
|
||||
; X32-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
|
||||
; X32-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
|
||||
; X32-NEXT: movq {{.*#+}} xmm0 = xmm0[0],zero
|
||||
; X32-NEXT: retl
|
||||
;
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
|
||||
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
|
||||
; RUN: llc < %s -mtriple=i686-apple-darwin -mattr=+sse2 | FileCheck %s
|
||||
|
||||
define <2 x i64> @test_x86_sse2_psll_dq_bs(<2 x i64> %a0) {
|
||||
|
@ -98,7 +98,7 @@ define void @test_x86_sse2_storeu_pd(i8* %a0, <2 x double> %a1) {
|
|||
; CHECK-LABEL: test_x86_sse2_storeu_pd:
|
||||
; CHECK: ## BB#0:
|
||||
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax
|
||||
; CHECK-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
|
||||
; CHECK-NEXT: movq {{.*#+}} xmm1 = mem[0],zero
|
||||
; CHECK-NEXT: pslldq {{.*#+}} xmm1 = zero,zero,zero,zero,zero,zero,zero,zero,xmm1[0,1,2,3,4,5,6,7]
|
||||
; CHECK-NEXT: addpd %xmm0, %xmm1
|
||||
; CHECK-NEXT: movupd %xmm1, (%eax)
|
||||
|
|
|
@ -76,7 +76,7 @@ define <4 x i32> @test5(i8** %ptr) nounwind {
|
|||
; CHECK: ## BB#0:
|
||||
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax
|
||||
; CHECK-NEXT: movl (%eax), %eax
|
||||
; CHECK-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
|
||||
; CHECK-NEXT: movd {{.*#+}} xmm1 = mem[0],zero,zero,zero
|
||||
; CHECK-NEXT: pxor %xmm0, %xmm0
|
||||
; CHECK-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
|
||||
; CHECK-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
|
||||
|
|
|
@ -13,8 +13,8 @@ define float @test(i64 %a) nounwind {
|
|||
; X86-NEXT: movl %esp, %ebp
|
||||
; X86-NEXT: andl $-8, %esp
|
||||
; X86-NEXT: subl $16, %esp
|
||||
; X86-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
|
||||
; X86-NEXT: movq %xmm0, {{[0-9]+}}(%esp)
|
||||
; X86-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
|
||||
; X86-NEXT: movlps %xmm0, {{[0-9]+}}(%esp)
|
||||
; X86-NEXT: xorl %eax, %eax
|
||||
; X86-NEXT: cmpl $0, 12(%ebp)
|
||||
; X86-NEXT: setns %al
|
||||
|
|
|
@ -7,8 +7,8 @@ define float @test1(i32 %x) nounwind readnone {
|
|||
; CHECK: # BB#0: # %entry
|
||||
; CHECK-NEXT: pushl %eax
|
||||
; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
|
||||
; CHECK-NEXT: movd {{.*#+}} xmm1 = mem[0],zero,zero,zero
|
||||
; CHECK-NEXT: por %xmm0, %xmm1
|
||||
; CHECK-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
|
||||
; CHECK-NEXT: orpd %xmm0, %xmm1
|
||||
; CHECK-NEXT: subsd %xmm0, %xmm1
|
||||
; CHECK-NEXT: xorps %xmm0, %xmm0
|
||||
; CHECK-NEXT: cvtsd2ss %xmm1, %xmm0
|
||||
|
|
|
@ -126,7 +126,7 @@ define void @legal_vzmovl_2i32_8i32(<2 x i32>* %in, <8 x i32>* %out) {
|
|||
;
|
||||
; X64-LABEL: legal_vzmovl_2i32_8i32:
|
||||
; X64: # BB#0:
|
||||
; X64-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero
|
||||
; X64-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
|
||||
; X64-NEXT: vxorps %ymm1, %ymm1, %ymm1
|
||||
; X64-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3,4,5,6,7]
|
||||
; X64-NEXT: vmovaps %ymm0, (%rsi)
|
||||
|
@ -178,7 +178,7 @@ define void @legal_vzmovl_2f32_8f32(<2 x float>* %in, <8 x float>* %out) {
|
|||
;
|
||||
; X64-LABEL: legal_vzmovl_2f32_8f32:
|
||||
; X64: # BB#0:
|
||||
; X64-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero
|
||||
; X64-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
|
||||
; X64-NEXT: vxorps %ymm1, %ymm1, %ymm1
|
||||
; X64-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3,4,5,6,7]
|
||||
; X64-NEXT: vmovaps %ymm0, (%rsi)
|
||||
|
|
|
@ -16,7 +16,7 @@ define i32 @test0(<1 x i64>* %v4) nounwind {
|
|||
; X32-NEXT: movl %ecx, (%esp)
|
||||
; X32-NEXT: pshufw $238, (%esp), %mm0 # mm0 = mem[2,3,2,3]
|
||||
; X32-NEXT: movq %mm0, {{[0-9]+}}(%esp)
|
||||
; X32-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
|
||||
; X32-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
|
||||
; X32-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,1,3]
|
||||
; X32-NEXT: movd %xmm0, %eax
|
||||
; X32-NEXT: addl $32, %eax
|
||||
|
@ -55,7 +55,7 @@ define i32 @test1(i32* nocapture readonly %ptr) nounwind {
|
|||
; X32-NEXT: movd (%eax), %mm0
|
||||
; X32-NEXT: pshufw $232, %mm0, %mm0 # mm0 = mm0[0,2,2,3]
|
||||
; X32-NEXT: movq %mm0, (%esp)
|
||||
; X32-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
|
||||
; X32-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
|
||||
; X32-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,1,3]
|
||||
; X32-NEXT: movd %xmm0, %eax
|
||||
; X32-NEXT: emms
|
||||
|
@ -98,7 +98,7 @@ define i32 @test2(i32* nocapture readonly %ptr) nounwind {
|
|||
; X32-NEXT: movl 8(%ebp), %eax
|
||||
; X32-NEXT: pshufw $232, (%eax), %mm0 # mm0 = mem[0,2,2,3]
|
||||
; X32-NEXT: movq %mm0, (%esp)
|
||||
; X32-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
|
||||
; X32-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
|
||||
; X32-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,1,3]
|
||||
; X32-NEXT: movd %xmm0, %eax
|
||||
; X32-NEXT: emms
|
||||
|
@ -149,7 +149,7 @@ define i32 @test4(x86_mmx %a) nounwind {
|
|||
; X32-NEXT: andl $-8, %esp
|
||||
; X32-NEXT: subl $8, %esp
|
||||
; X32-NEXT: movq %mm0, (%esp)
|
||||
; X32-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
|
||||
; X32-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
|
||||
; X32-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,3,0,1]
|
||||
; X32-NEXT: movd %xmm0, %eax
|
||||
; X32-NEXT: movl %ebp, %esp
|
||||
|
|
|
@ -8,12 +8,12 @@ define <2 x i64> @foo1(i64* %y) nounwind {
|
|||
; X32-LABEL: foo1:
|
||||
; X32: # BB#0: # %entry
|
||||
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
|
||||
; X32-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
|
||||
; X32-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
|
||||
; X32-NEXT: retl
|
||||
;
|
||||
; X64-LABEL: foo1:
|
||||
; X64: # BB#0: # %entry
|
||||
; X64-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
|
||||
; X64-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
|
||||
; X64-NEXT: retq
|
||||
entry:
|
||||
%tmp1 = load i64, i64* %y, align 8
|
||||
|
@ -27,12 +27,12 @@ define <4 x float> @foo2(i64* %p) nounwind {
|
|||
; X32-LABEL: foo2:
|
||||
; X32: # BB#0: # %entry
|
||||
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
|
||||
; X32-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
|
||||
; X32-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
|
||||
; X32-NEXT: retl
|
||||
;
|
||||
; X64-LABEL: foo2:
|
||||
; X64: # BB#0: # %entry
|
||||
; X64-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
|
||||
; X64-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
|
||||
; X64-NEXT: retq
|
||||
entry:
|
||||
%load = load i64, i64* %p
|
||||
|
|
|
@ -23,7 +23,7 @@ define <4 x float> @t1(float %s, <4 x float> %tmp) nounwind {
|
|||
define <4 x i32> @t2(i32 %s, <4 x i32> %tmp) nounwind {
|
||||
; X32-LABEL: t2:
|
||||
; X32: # BB#0:
|
||||
; X32-NEXT: movd {{.*#+}} xmm1 = mem[0],zero,zero,zero
|
||||
; X32-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
|
||||
; X32-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,0],xmm0[2,0]
|
||||
; X32-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,0]
|
||||
; X32-NEXT: retl
|
||||
|
|
|
@ -5,10 +5,10 @@
|
|||
define <2 x i64> @t1(i64 %s, <2 x i64> %tmp) nounwind {
|
||||
; X32-LABEL: t1:
|
||||
; X32: # BB#0:
|
||||
; X32-NEXT: movd {{.*#+}} xmm1 = mem[0],zero,zero,zero
|
||||
; X32-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
|
||||
; X32-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,0],xmm0[3,0]
|
||||
; X32-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,1],xmm1[0,2]
|
||||
; X32-NEXT: movd {{.*#+}} xmm1 = mem[0],zero,zero,zero
|
||||
; X32-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
|
||||
; X32-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,0],xmm0[2,0]
|
||||
; X32-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,0]
|
||||
; X32-NEXT: retl
|
||||
|
|
|
@ -29,7 +29,7 @@ define x86_mmx @t0(i32 %A) nounwind {
|
|||
define <8 x i8> @t1(i8 zeroext %x) nounwind {
|
||||
; X32-LABEL: t1:
|
||||
; X32: ## BB#0:
|
||||
; X32-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
|
||||
; X32-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
|
||||
; X32-NEXT: retl
|
||||
;
|
||||
; X64-LABEL: t1:
|
||||
|
@ -65,7 +65,7 @@ define void @t3() {
|
|||
; X32: ## BB#0:
|
||||
; X32-NEXT: movl L_g0$non_lazy_ptr, %eax
|
||||
; X32-NEXT: movl L_g1$non_lazy_ptr, %ecx
|
||||
; X32-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
|
||||
; X32-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
|
||||
; X32-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3]
|
||||
; X32-NEXT: movzwl (%eax), %eax
|
||||
; X32-NEXT: movd %eax, %xmm1
|
||||
|
|
|
@ -2976,7 +2976,7 @@ define <2 x double> @uitofp_load_2i32_to_2f64(<2 x i32> *%a) {
|
|||
;
|
||||
; AVX512F-LABEL: uitofp_load_2i32_to_2f64:
|
||||
; AVX512F: # BB#0:
|
||||
; AVX512F-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero
|
||||
; AVX512F-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
|
||||
; AVX512F-NEXT: vcvtudq2pd %ymm0, %zmm0
|
||||
; AVX512F-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<kill>
|
||||
; AVX512F-NEXT: retq
|
||||
|
@ -2990,7 +2990,7 @@ define <2 x double> @uitofp_load_2i32_to_2f64(<2 x i32> *%a) {
|
|||
;
|
||||
; AVX512DQ-LABEL: uitofp_load_2i32_to_2f64:
|
||||
; AVX512DQ: # BB#0:
|
||||
; AVX512DQ-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero
|
||||
; AVX512DQ-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
|
||||
; AVX512DQ-NEXT: vcvtudq2pd %ymm0, %zmm0
|
||||
; AVX512DQ-NEXT: # kill: %XMM0<def> %XMM0<kill> %ZMM0<kill>
|
||||
; AVX512DQ-NEXT: retq
|
||||
|
|
|
@ -16,7 +16,7 @@ define <4 x float> @test1(float %a) nounwind {
|
|||
define <2 x i64> @test(i32 %a) nounwind {
|
||||
; CHECK-LABEL: test:
|
||||
; CHECK: # BB#0:
|
||||
; CHECK-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
|
||||
; CHECK-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
|
||||
; CHECK-NEXT: retl
|
||||
%tmp = insertelement <4 x i32> zeroinitializer, i32 %a, i32 0
|
||||
%tmp6 = insertelement <4 x i32> %tmp, i32 0, i32 1
|
||||
|
|
|
@ -5,7 +5,7 @@
|
|||
define <2 x i64> @t1(i64 %x) nounwind {
|
||||
; X32-LABEL: t1:
|
||||
; X32: # BB#0:
|
||||
; X32-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
|
||||
; X32-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
|
||||
; X32-NEXT: retl
|
||||
;
|
||||
; X64-LABEL: t1:
|
||||
|
|
|
@ -4,7 +4,7 @@
|
|||
define <4 x i32> @t(i32 %x, i32 %y) nounwind {
|
||||
; CHECK-LABEL: t:
|
||||
; CHECK: # BB#0:
|
||||
; CHECK-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
|
||||
; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
|
||||
; CHECK-NEXT: retl
|
||||
%tmp1 = insertelement <4 x i32> zeroinitializer, i32 %x, i32 0
|
||||
%tmp2 = insertelement <4 x i32> %tmp1, i32 %y, i32 1
|
||||
|
|
|
@ -5,7 +5,7 @@ define <2 x i64> @t1(<2 x i64>* %ptr) nounwind {
|
|||
; CHECK-LABEL: t1:
|
||||
; CHECK: # BB#0:
|
||||
; CHECK-NEXT: movl {{[0-9]+}}(%esp), %eax
|
||||
; CHECK-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
|
||||
; CHECK-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
|
||||
; CHECK-NEXT: retl
|
||||
%tmp45 = bitcast <2 x i64>* %ptr to <2 x i32>*
|
||||
%tmp615 = load <2 x i32>, <2 x i32>* %tmp45
|
||||
|
|
|
@ -1001,12 +1001,12 @@ define <2 x i64> @insert_reg_and_zero_v2i64(i64 %a) {
|
|||
define <2 x i64> @insert_mem_and_zero_v2i64(i64* %ptr) {
|
||||
; SSE-LABEL: insert_mem_and_zero_v2i64:
|
||||
; SSE: # BB#0:
|
||||
; SSE-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
|
||||
; SSE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
|
||||
; SSE-NEXT: retq
|
||||
;
|
||||
; AVX-LABEL: insert_mem_and_zero_v2i64:
|
||||
; AVX: # BB#0:
|
||||
; AVX-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero
|
||||
; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
|
||||
; AVX-NEXT: retq
|
||||
%a = load i64, i64* %ptr
|
||||
%v = insertelement <2 x i64> undef, i64 %a, i32 0
|
||||
|
|
|
@ -2055,12 +2055,12 @@ define <4 x i32> @insert_reg_and_zero_v4i32(i32 %a) {
|
|||
define <4 x i32> @insert_mem_and_zero_v4i32(i32* %ptr) {
|
||||
; SSE-LABEL: insert_mem_and_zero_v4i32:
|
||||
; SSE: # BB#0:
|
||||
; SSE-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
|
||||
; SSE-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
|
||||
; SSE-NEXT: retq
|
||||
;
|
||||
; AVX-LABEL: insert_mem_and_zero_v4i32:
|
||||
; AVX: # BB#0:
|
||||
; AVX-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
|
||||
; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
|
||||
; AVX-NEXT: retq
|
||||
%a = load i32, i32* %ptr
|
||||
%v = insertelement <4 x i32> undef, i32 %a, i32 0
|
||||
|
|
|
@ -1187,7 +1187,7 @@ define <4 x i64> @insert_reg_and_zero_v4i64(i64 %a) {
|
|||
define <4 x i64> @insert_mem_and_zero_v4i64(i64* %ptr) {
|
||||
; ALL-LABEL: insert_mem_and_zero_v4i64:
|
||||
; ALL: # BB#0:
|
||||
; ALL-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero
|
||||
; ALL-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
|
||||
; ALL-NEXT: retq
|
||||
%a = load i64, i64* %ptr
|
||||
%v = insertelement <4 x i64> undef, i64 %a, i64 0
|
||||
|
|
|
@ -2434,7 +2434,7 @@ define <8 x i32> @shuffle_v8i32_12305674(<8 x i32> %a, <8 x i32> %b) {
|
|||
define <8x float> @concat_v2f32_1(<2 x float>* %tmp64, <2 x float>* %tmp65) {
|
||||
; ALL-LABEL: concat_v2f32_1:
|
||||
; ALL: # BB#0: # %entry
|
||||
; ALL-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero
|
||||
; ALL-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
|
||||
; ALL-NEXT: vmovhpd {{.*#+}} xmm0 = xmm0[0],mem[0]
|
||||
; ALL-NEXT: retq
|
||||
entry:
|
||||
|
@ -2449,7 +2449,7 @@ entry:
|
|||
define <8x float> @concat_v2f32_2(<2 x float>* %tmp64, <2 x float>* %tmp65) {
|
||||
; ALL-LABEL: concat_v2f32_2:
|
||||
; ALL: # BB#0: # %entry
|
||||
; ALL-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero
|
||||
; ALL-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
|
||||
; ALL-NEXT: vmovhpd {{.*#+}} xmm0 = xmm0[0],mem[0]
|
||||
; ALL-NEXT: retq
|
||||
entry:
|
||||
|
@ -2462,7 +2462,7 @@ entry:
|
|||
define <8x float> @concat_v2f32_3(<2 x float>* %tmp64, <2 x float>* %tmp65) {
|
||||
; ALL-LABEL: concat_v2f32_3:
|
||||
; ALL: # BB#0: # %entry
|
||||
; ALL-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero
|
||||
; ALL-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
|
||||
; ALL-NEXT: vmovhpd {{.*#+}} xmm0 = xmm0[0],mem[0]
|
||||
; ALL-NEXT: retq
|
||||
entry:
|
||||
|
@ -2476,7 +2476,7 @@ entry:
|
|||
define <8 x i32> @insert_mem_and_zero_v8i32(i32* %ptr) {
|
||||
; ALL-LABEL: insert_mem_and_zero_v8i32:
|
||||
; ALL: # BB#0:
|
||||
; ALL-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
|
||||
; ALL-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
|
||||
; ALL-NEXT: retq
|
||||
%a = load i32, i32* %ptr
|
||||
%v = insertelement <8 x i32> undef, i32 %a, i32 0
|
||||
|
|
|
@ -301,7 +301,7 @@ define <16 x float> @shuffle_v16f32_00_01_10_10_04_05_14_14_08_09_18_18_0c_0d_1c
|
|||
define <16 x i32> @insert_mem_and_zero_v16i32(i32* %ptr) {
|
||||
; ALL-LABEL: insert_mem_and_zero_v16i32:
|
||||
; ALL: # BB#0:
|
||||
; ALL-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
|
||||
; ALL-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
|
||||
; ALL-NEXT: retq
|
||||
%a = load i32, i32* %ptr
|
||||
%v = insertelement <16 x i32> undef, i32 %a, i32 0
|
||||
|
|
|
@ -397,7 +397,7 @@ define <4 x float> @PR31296(i8* %in) {
|
|||
; X32-LABEL: PR31296:
|
||||
; X32: # BB#0: # %entry
|
||||
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
|
||||
; X32-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
|
||||
; X32-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
|
||||
; X32-NEXT: vmovaps {{.*#+}} xmm1 = <0,1,u,u>
|
||||
; X32-NEXT: vpermil2ps {{.*#+}} xmm0 = xmm0[0],xmm1[0,0,1]
|
||||
; X32-NEXT: retl
|
||||
|
|
|
@ -1782,13 +1782,13 @@ define <4 x i32> @combine_test21(<8 x i32> %a, <4 x i32>* %ptr) {
|
|||
define <8 x float> @combine_test22(<2 x float>* %a, <2 x float>* %b) {
|
||||
; SSE-LABEL: combine_test22:
|
||||
; SSE: # BB#0:
|
||||
; SSE-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
|
||||
; SSE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
|
||||
; SSE-NEXT: movhpd {{.*#+}} xmm0 = xmm0[0],mem[0]
|
||||
; SSE-NEXT: retq
|
||||
;
|
||||
; AVX-LABEL: combine_test22:
|
||||
; AVX: # BB#0:
|
||||
; AVX-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero
|
||||
; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
|
||||
; AVX-NEXT: vmovhpd {{.*#+}} xmm0 = xmm0[0],mem[0]
|
||||
; AVX-NEXT: retq
|
||||
; Current AVX2 lowering of this is still awful, not adding a test case.
|
||||
|
@ -2818,13 +2818,13 @@ define void @combine_scalar_load_with_blend_with_zero(double* %a0, <4 x float>*
|
|||
; SSE-LABEL: combine_scalar_load_with_blend_with_zero:
|
||||
; SSE: # BB#0:
|
||||
; SSE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
|
||||
; SSE-NEXT: movapd %xmm0, (%rsi)
|
||||
; SSE-NEXT: movaps %xmm0, (%rsi)
|
||||
; SSE-NEXT: retq
|
||||
;
|
||||
; AVX-LABEL: combine_scalar_load_with_blend_with_zero:
|
||||
; AVX: # BB#0:
|
||||
; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
|
||||
; AVX-NEXT: vmovapd %xmm0, (%rsi)
|
||||
; AVX-NEXT: vmovaps %xmm0, (%rsi)
|
||||
; AVX-NEXT: retq
|
||||
%1 = load double, double* %a0, align 8
|
||||
%2 = insertelement <2 x double> undef, double %1, i32 0
|
||||
|
|
|
@ -8,7 +8,7 @@ define void @test0(<1 x i64>* %x) {
|
|||
; X32-LABEL: test0:
|
||||
; X32: ## BB#0: ## %entry
|
||||
; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
|
||||
; X32-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
|
||||
; X32-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
|
||||
; X32-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
|
||||
; X32-NEXT: movq %xmm0, (%eax)
|
||||
; X32-NEXT: retl
|
||||
|
|
|
@ -250,9 +250,9 @@ define <8 x float> @var_shuffle_v8f32_v8f32_xxxxxxxx_i32(<8 x float> %x, i32 %i0
|
|||
; AVX2-NEXT: vpermps %ymm0, %ymm5, %ymm5
|
||||
; AVX2-NEXT: vmovd %r9d, %xmm6
|
||||
; AVX2-NEXT: vpermps %ymm0, %ymm6, %ymm6
|
||||
; AVX2-NEXT: vmovd {{.*#+}} xmm7 = mem[0],zero,zero,zero
|
||||
; AVX2-NEXT: vmovss {{.*#+}} xmm7 = mem[0],zero,zero,zero
|
||||
; AVX2-NEXT: vpermps %ymm0, %ymm7, %ymm7
|
||||
; AVX2-NEXT: vmovd {{.*#+}} xmm8 = mem[0],zero,zero,zero
|
||||
; AVX2-NEXT: vmovss {{.*#+}} xmm8 = mem[0],zero,zero,zero
|
||||
; AVX2-NEXT: vpermps %ymm0, %ymm8, %ymm0
|
||||
; AVX2-NEXT: vinsertps {{.*#+}} xmm5 = xmm5[0],xmm6[0],xmm5[2,3]
|
||||
; AVX2-NEXT: vinsertps {{.*#+}} xmm5 = xmm5[0,1],xmm7[0],xmm5[3]
|
||||
|
|
|
@ -8,12 +8,12 @@
|
|||
define <4 x i32> @load_zmov_4i32_to_0zzz(<4 x i32> *%ptr) {
|
||||
; SSE-LABEL: load_zmov_4i32_to_0zzz:
|
||||
; SSE: # BB#0: # %entry
|
||||
; SSE-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
|
||||
; SSE-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
|
||||
; SSE-NEXT: retq
|
||||
;
|
||||
; AVX-LABEL: load_zmov_4i32_to_0zzz:
|
||||
; AVX: # BB#0: # %entry
|
||||
; AVX-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
|
||||
; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
|
||||
; AVX-NEXT: retq
|
||||
|
||||
entry:
|
||||
|
@ -25,12 +25,12 @@ entry:
|
|||
define <2 x i64> @load_zmov_2i64_to_0z(<2 x i64> *%ptr) {
|
||||
; SSE-LABEL: load_zmov_2i64_to_0z:
|
||||
; SSE: # BB#0: # %entry
|
||||
; SSE-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
|
||||
; SSE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
|
||||
; SSE-NEXT: retq
|
||||
;
|
||||
; AVX-LABEL: load_zmov_2i64_to_0z:
|
||||
; AVX: # BB#0: # %entry
|
||||
; AVX-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero
|
||||
; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
|
||||
; AVX-NEXT: retq
|
||||
|
||||
entry:
|
||||
|
|
|
@ -195,8 +195,8 @@ define void @add4i16(%i16vec4* nocapture sret %ret, %i16vec4* %ap, %i16vec4* %bp
|
|||
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
|
||||
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
|
||||
; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
|
||||
; X86-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
|
||||
; X86-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
|
||||
; X86-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
|
||||
; X86-NEXT: movq {{.*#+}} xmm1 = mem[0],zero
|
||||
; X86-NEXT: paddw %xmm0, %xmm1
|
||||
; X86-NEXT: movq %xmm1, (%eax)
|
||||
; X86-NEXT: retl $4
|
||||
|
|
Loading…
Reference in New Issue