[X86] Regenerate 32-bit merge-consecutive-loads tests

Avoid use of X32 check prefix - we try to only use that for gnux32 triple tests
This commit is contained in:
Simon Pilgrim 2020-12-02 16:10:50 +00:00
parent 0dab7ecc5d
commit 793192da7f
3 changed files with 817 additions and 817 deletions

File diff suppressed because it is too large Load Diff

View File

@ -4,7 +4,7 @@
; RUN: llc < %s -disable-peephole -mtriple=x86_64-unknown-unknown -mattr=+avx512f | FileCheck %s --check-prefix=AVX --check-prefix=AVX512F
;
; Just one 32-bit run to make sure we do reasonable things.
; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+avx | FileCheck %s --check-prefix=X32-AVX
; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+avx | FileCheck %s --check-prefix=X86-AVX
define <4 x double> @merge_4f64_2f64_23(<2 x double>* %ptr) nounwind uwtable noinline ssp {
; AVX-LABEL: merge_4f64_2f64_23:
@ -12,11 +12,11 @@ define <4 x double> @merge_4f64_2f64_23(<2 x double>* %ptr) nounwind uwtable noi
; AVX-NEXT: vmovups 32(%rdi), %ymm0
; AVX-NEXT: retq
;
; X32-AVX-LABEL: merge_4f64_2f64_23:
; X32-AVX: # %bb.0:
; X32-AVX-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-AVX-NEXT: vmovups 32(%eax), %ymm0
; X32-AVX-NEXT: retl
; X86-AVX-LABEL: merge_4f64_2f64_23:
; X86-AVX: # %bb.0:
; X86-AVX-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-AVX-NEXT: vmovups 32(%eax), %ymm0
; X86-AVX-NEXT: retl
%ptr0 = getelementptr inbounds <2 x double>, <2 x double>* %ptr, i64 2
%ptr1 = getelementptr inbounds <2 x double>, <2 x double>* %ptr, i64 3
%val0 = load <2 x double>, <2 x double>* %ptr0
@ -31,11 +31,11 @@ define <4 x double> @merge_4f64_2f64_2z(<2 x double>* %ptr) nounwind uwtable noi
; AVX-NEXT: vmovaps 32(%rdi), %xmm0
; AVX-NEXT: retq
;
; X32-AVX-LABEL: merge_4f64_2f64_2z:
; X32-AVX: # %bb.0:
; X32-AVX-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-AVX-NEXT: vmovaps 32(%eax), %xmm0
; X32-AVX-NEXT: retl
; X86-AVX-LABEL: merge_4f64_2f64_2z:
; X86-AVX: # %bb.0:
; X86-AVX-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-AVX-NEXT: vmovaps 32(%eax), %xmm0
; X86-AVX-NEXT: retl
%ptr0 = getelementptr inbounds <2 x double>, <2 x double>* %ptr, i64 2
%val0 = load <2 x double>, <2 x double>* %ptr0
%res = shufflevector <2 x double> %val0, <2 x double> zeroinitializer, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
@ -48,11 +48,11 @@ define <4 x double> @merge_4f64_f64_2345(double* %ptr) nounwind uwtable noinline
; AVX-NEXT: vmovups 16(%rdi), %ymm0
; AVX-NEXT: retq
;
; X32-AVX-LABEL: merge_4f64_f64_2345:
; X32-AVX: # %bb.0:
; X32-AVX-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-AVX-NEXT: vmovups 16(%eax), %ymm0
; X32-AVX-NEXT: retl
; X86-AVX-LABEL: merge_4f64_f64_2345:
; X86-AVX: # %bb.0:
; X86-AVX-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-AVX-NEXT: vmovups 16(%eax), %ymm0
; X86-AVX-NEXT: retl
%ptr0 = getelementptr inbounds double, double* %ptr, i64 2
%ptr1 = getelementptr inbounds double, double* %ptr, i64 3
%ptr2 = getelementptr inbounds double, double* %ptr, i64 4
@ -74,11 +74,11 @@ define <4 x double> @merge_4f64_f64_3zuu(double* %ptr) nounwind uwtable noinline
; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; AVX-NEXT: retq
;
; X32-AVX-LABEL: merge_4f64_f64_3zuu:
; X32-AVX: # %bb.0:
; X32-AVX-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; X32-AVX-NEXT: retl
; X86-AVX-LABEL: merge_4f64_f64_3zuu:
; X86-AVX: # %bb.0:
; X86-AVX-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; X86-AVX-NEXT: retl
%ptr0 = getelementptr inbounds double, double* %ptr, i64 3
%val0 = load double, double* %ptr0
%res0 = insertelement <4 x double> undef, double %val0, i32 0
@ -92,11 +92,11 @@ define <4 x double> @merge_4f64_f64_34uu(double* %ptr) nounwind uwtable noinline
; AVX-NEXT: vmovups 24(%rdi), %xmm0
; AVX-NEXT: retq
;
; X32-AVX-LABEL: merge_4f64_f64_34uu:
; X32-AVX: # %bb.0:
; X32-AVX-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-AVX-NEXT: vmovups 24(%eax), %xmm0
; X32-AVX-NEXT: retl
; X86-AVX-LABEL: merge_4f64_f64_34uu:
; X86-AVX: # %bb.0:
; X86-AVX-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-AVX-NEXT: vmovups 24(%eax), %xmm0
; X86-AVX-NEXT: retl
%ptr0 = getelementptr inbounds double, double* %ptr, i64 3
%ptr1 = getelementptr inbounds double, double* %ptr, i64 4
%val0 = load double, double* %ptr0
@ -112,11 +112,11 @@ define <4 x double> @merge_4f64_f64_45zz(double* %ptr) nounwind uwtable noinline
; AVX-NEXT: vmovups 32(%rdi), %xmm0
; AVX-NEXT: retq
;
; X32-AVX-LABEL: merge_4f64_f64_45zz:
; X32-AVX: # %bb.0:
; X32-AVX-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-AVX-NEXT: vmovups 32(%eax), %xmm0
; X32-AVX-NEXT: retl
; X86-AVX-LABEL: merge_4f64_f64_45zz:
; X86-AVX: # %bb.0:
; X86-AVX-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-AVX-NEXT: vmovups 32(%eax), %xmm0
; X86-AVX-NEXT: retl
%ptr0 = getelementptr inbounds double, double* %ptr, i64 4
%ptr1 = getelementptr inbounds double, double* %ptr, i64 5
%val0 = load double, double* %ptr0
@ -133,12 +133,12 @@ define <4 x double> @merge_4f64_f64_34z6(double* %ptr) nounwind uwtable noinline
; AVX-NEXT: vblendps {{.*#+}} ymm0 = mem[0,1,2,3],ymm0[4,5],mem[6,7]
; AVX-NEXT: retq
;
; X32-AVX-LABEL: merge_4f64_f64_34z6:
; X32-AVX: # %bb.0:
; X32-AVX-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-AVX-NEXT: vxorps %xmm0, %xmm0, %xmm0
; X32-AVX-NEXT: vblendps {{.*#+}} ymm0 = mem[0,1,2,3],ymm0[4,5],mem[6,7]
; X32-AVX-NEXT: retl
; X86-AVX-LABEL: merge_4f64_f64_34z6:
; X86-AVX: # %bb.0:
; X86-AVX-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-AVX-NEXT: vxorps %xmm0, %xmm0, %xmm0
; X86-AVX-NEXT: vblendps {{.*#+}} ymm0 = mem[0,1,2,3],ymm0[4,5],mem[6,7]
; X86-AVX-NEXT: retl
%ptr0 = getelementptr inbounds double, double* %ptr, i64 3
%ptr1 = getelementptr inbounds double, double* %ptr, i64 4
%ptr3 = getelementptr inbounds double, double* %ptr, i64 6
@ -158,11 +158,11 @@ define <4 x i64> @merge_4i64_2i64_3z(<2 x i64>* %ptr) nounwind uwtable noinline
; AVX-NEXT: vmovaps 48(%rdi), %xmm0
; AVX-NEXT: retq
;
; X32-AVX-LABEL: merge_4i64_2i64_3z:
; X32-AVX: # %bb.0:
; X32-AVX-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-AVX-NEXT: vmovaps 48(%eax), %xmm0
; X32-AVX-NEXT: retl
; X86-AVX-LABEL: merge_4i64_2i64_3z:
; X86-AVX: # %bb.0:
; X86-AVX-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-AVX-NEXT: vmovaps 48(%eax), %xmm0
; X86-AVX-NEXT: retl
%ptr0 = getelementptr inbounds <2 x i64>, <2 x i64>* %ptr, i64 3
%val0 = load <2 x i64>, <2 x i64>* %ptr0
%res = shufflevector <2 x i64> %val0, <2 x i64> zeroinitializer, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
@ -175,11 +175,11 @@ define <4 x i64> @merge_4i64_i64_1234(i64* %ptr) nounwind uwtable noinline ssp {
; AVX-NEXT: vmovups 8(%rdi), %ymm0
; AVX-NEXT: retq
;
; X32-AVX-LABEL: merge_4i64_i64_1234:
; X32-AVX: # %bb.0:
; X32-AVX-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-AVX-NEXT: vmovups 8(%eax), %ymm0
; X32-AVX-NEXT: retl
; X86-AVX-LABEL: merge_4i64_i64_1234:
; X86-AVX: # %bb.0:
; X86-AVX-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-AVX-NEXT: vmovups 8(%eax), %ymm0
; X86-AVX-NEXT: retl
%ptr0 = getelementptr inbounds i64, i64* %ptr, i64 1
%ptr1 = getelementptr inbounds i64, i64* %ptr, i64 2
%ptr2 = getelementptr inbounds i64, i64* %ptr, i64 3
@ -201,11 +201,11 @@ define <4 x i64> @merge_4i64_i64_1zzu(i64* %ptr) nounwind uwtable noinline ssp {
; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; AVX-NEXT: retq
;
; X32-AVX-LABEL: merge_4i64_i64_1zzu:
; X32-AVX: # %bb.0:
; X32-AVX-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; X32-AVX-NEXT: retl
; X86-AVX-LABEL: merge_4i64_i64_1zzu:
; X86-AVX: # %bb.0:
; X86-AVX-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; X86-AVX-NEXT: retl
%ptr0 = getelementptr inbounds i64, i64* %ptr, i64 1
%val0 = load i64, i64* %ptr0
%res0 = insertelement <4 x i64> undef, i64 %val0, i32 0
@ -220,11 +220,11 @@ define <4 x i64> @merge_4i64_i64_23zz(i64* %ptr) nounwind uwtable noinline ssp {
; AVX-NEXT: vmovups 16(%rdi), %xmm0
; AVX-NEXT: retq
;
; X32-AVX-LABEL: merge_4i64_i64_23zz:
; X32-AVX: # %bb.0:
; X32-AVX-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-AVX-NEXT: vmovups 16(%eax), %xmm0
; X32-AVX-NEXT: retl
; X86-AVX-LABEL: merge_4i64_i64_23zz:
; X86-AVX: # %bb.0:
; X86-AVX-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-AVX-NEXT: vmovups 16(%eax), %xmm0
; X86-AVX-NEXT: retl
%ptr0 = getelementptr inbounds i64, i64* %ptr, i64 2
%ptr1 = getelementptr inbounds i64, i64* %ptr, i64 3
%val0 = load i64, i64* %ptr0
@ -243,14 +243,14 @@ define <8 x float> @merge_8f32_2f32_23z5(<2 x float>* %ptr) nounwind uwtable noi
; AVX-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; AVX-NEXT: retq
;
; X32-AVX-LABEL: merge_8f32_2f32_23z5:
; X32-AVX: # %bb.0:
; X32-AVX-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-AVX-NEXT: vmovups 16(%eax), %xmm0
; X32-AVX-NEXT: vxorps %xmm1, %xmm1, %xmm1
; X32-AVX-NEXT: vmovhps {{.*#+}} xmm1 = xmm1[0,1],mem[0,1]
; X32-AVX-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; X32-AVX-NEXT: retl
; X86-AVX-LABEL: merge_8f32_2f32_23z5:
; X86-AVX: # %bb.0:
; X86-AVX-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-AVX-NEXT: vmovups 16(%eax), %xmm0
; X86-AVX-NEXT: vxorps %xmm1, %xmm1, %xmm1
; X86-AVX-NEXT: vmovhps {{.*#+}} xmm1 = xmm1[0,1],mem[0,1]
; X86-AVX-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; X86-AVX-NEXT: retl
%ptr0 = getelementptr inbounds <2 x float>, <2 x float>* %ptr, i64 2
%ptr1 = getelementptr inbounds <2 x float>, <2 x float>* %ptr, i64 3
%ptr3 = getelementptr inbounds <2 x float>, <2 x float>* %ptr, i64 5
@ -270,12 +270,12 @@ define <8 x float> @merge_8f32_4f32_z2(<4 x float>* %ptr) nounwind uwtable noinl
; AVX-NEXT: vinsertf128 $1, 32(%rdi), %ymm0, %ymm0
; AVX-NEXT: retq
;
; X32-AVX-LABEL: merge_8f32_4f32_z2:
; X32-AVX: # %bb.0:
; X32-AVX-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-AVX-NEXT: vxorps %xmm0, %xmm0, %xmm0
; X32-AVX-NEXT: vinsertf128 $1, 32(%eax), %ymm0, %ymm0
; X32-AVX-NEXT: retl
; X86-AVX-LABEL: merge_8f32_4f32_z2:
; X86-AVX: # %bb.0:
; X86-AVX-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-AVX-NEXT: vxorps %xmm0, %xmm0, %xmm0
; X86-AVX-NEXT: vinsertf128 $1, 32(%eax), %ymm0, %ymm0
; X86-AVX-NEXT: retl
%ptr1 = getelementptr inbounds <4 x float>, <4 x float>* %ptr, i64 2
%val1 = load <4 x float>, <4 x float>* %ptr1
%res = shufflevector <4 x float> zeroinitializer, <4 x float> %val1, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
@ -288,11 +288,11 @@ define <8 x float> @merge_8f32_f32_12zzuuzz(float* %ptr) nounwind uwtable noinli
; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; AVX-NEXT: retq
;
; X32-AVX-LABEL: merge_8f32_f32_12zzuuzz:
; X32-AVX: # %bb.0:
; X32-AVX-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; X32-AVX-NEXT: retl
; X86-AVX-LABEL: merge_8f32_f32_12zzuuzz:
; X86-AVX: # %bb.0:
; X86-AVX-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; X86-AVX-NEXT: retl
%ptr0 = getelementptr inbounds float, float* %ptr, i64 1
%ptr1 = getelementptr inbounds float, float* %ptr, i64 2
%val0 = load float, float* %ptr0
@ -313,12 +313,12 @@ define <8 x float> @merge_8f32_f32_1u3u5zu8(float* %ptr) nounwind uwtable noinli
; AVX-NEXT: vblendps {{.*#+}} ymm0 = mem[0,1,2,3,4],ymm0[5],mem[6,7]
; AVX-NEXT: retq
;
; X32-AVX-LABEL: merge_8f32_f32_1u3u5zu8:
; X32-AVX: # %bb.0:
; X32-AVX-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-AVX-NEXT: vxorps %xmm0, %xmm0, %xmm0
; X32-AVX-NEXT: vblendps {{.*#+}} ymm0 = mem[0,1,2,3,4],ymm0[5],mem[6,7]
; X32-AVX-NEXT: retl
; X86-AVX-LABEL: merge_8f32_f32_1u3u5zu8:
; X86-AVX: # %bb.0:
; X86-AVX-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-AVX-NEXT: vxorps %xmm0, %xmm0, %xmm0
; X86-AVX-NEXT: vblendps {{.*#+}} ymm0 = mem[0,1,2,3,4],ymm0[5],mem[6,7]
; X86-AVX-NEXT: retl
%ptr0 = getelementptr inbounds float, float* %ptr, i64 1
%ptr2 = getelementptr inbounds float, float* %ptr, i64 3
%ptr4 = getelementptr inbounds float, float* %ptr, i64 5
@ -342,12 +342,12 @@ define <8 x i32> @merge_8i32_4i32_z3(<4 x i32>* %ptr) nounwind uwtable noinline
; AVX-NEXT: vinsertf128 $1, 48(%rdi), %ymm0, %ymm0
; AVX-NEXT: retq
;
; X32-AVX-LABEL: merge_8i32_4i32_z3:
; X32-AVX: # %bb.0:
; X32-AVX-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-AVX-NEXT: vxorps %xmm0, %xmm0, %xmm0
; X32-AVX-NEXT: vinsertf128 $1, 48(%eax), %ymm0, %ymm0
; X32-AVX-NEXT: retl
; X86-AVX-LABEL: merge_8i32_4i32_z3:
; X86-AVX: # %bb.0:
; X86-AVX-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-AVX-NEXT: vxorps %xmm0, %xmm0, %xmm0
; X86-AVX-NEXT: vinsertf128 $1, 48(%eax), %ymm0, %ymm0
; X86-AVX-NEXT: retl
%ptr1 = getelementptr inbounds <4 x i32>, <4 x i32>* %ptr, i64 3
%val1 = load <4 x i32>, <4 x i32>* %ptr1
%res = shufflevector <4 x i32> zeroinitializer, <4 x i32> %val1, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
@ -362,13 +362,13 @@ define <8 x i32> @merge_8i32_i32_56zz9uzz(i32* %ptr) nounwind uwtable noinline s
; AVX-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; AVX-NEXT: retq
;
; X32-AVX-LABEL: merge_8i32_i32_56zz9uzz:
; X32-AVX: # %bb.0:
; X32-AVX-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; X32-AVX-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
; X32-AVX-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; X32-AVX-NEXT: retl
; X86-AVX-LABEL: merge_8i32_i32_56zz9uzz:
; X86-AVX: # %bb.0:
; X86-AVX-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; X86-AVX-NEXT: vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
; X86-AVX-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; X86-AVX-NEXT: retl
%ptr0 = getelementptr inbounds i32, i32* %ptr, i64 5
%ptr1 = getelementptr inbounds i32, i32* %ptr, i64 6
%ptr4 = getelementptr inbounds i32, i32* %ptr, i64 9
@ -392,12 +392,12 @@ define <8 x i32> @merge_8i32_i32_1u3u5zu8(i32* %ptr) nounwind uwtable noinline s
; AVX-NEXT: vblendps {{.*#+}} ymm0 = mem[0,1,2,3,4],ymm0[5],mem[6,7]
; AVX-NEXT: retq
;
; X32-AVX-LABEL: merge_8i32_i32_1u3u5zu8:
; X32-AVX: # %bb.0:
; X32-AVX-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-AVX-NEXT: vxorps %xmm0, %xmm0, %xmm0
; X32-AVX-NEXT: vblendps {{.*#+}} ymm0 = mem[0,1,2,3,4],ymm0[5],mem[6,7]
; X32-AVX-NEXT: retl
; X86-AVX-LABEL: merge_8i32_i32_1u3u5zu8:
; X86-AVX: # %bb.0:
; X86-AVX-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-AVX-NEXT: vxorps %xmm0, %xmm0, %xmm0
; X86-AVX-NEXT: vblendps {{.*#+}} ymm0 = mem[0,1,2,3,4],ymm0[5],mem[6,7]
; X86-AVX-NEXT: retl
%ptr0 = getelementptr inbounds i32, i32* %ptr, i64 1
%ptr2 = getelementptr inbounds i32, i32* %ptr, i64 3
%ptr4 = getelementptr inbounds i32, i32* %ptr, i64 5
@ -420,11 +420,11 @@ define <16 x i16> @merge_16i16_i16_89zzzuuuuuuuuuuuz(i16* %ptr) nounwind uwtable
; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; AVX-NEXT: retq
;
; X32-AVX-LABEL: merge_16i16_i16_89zzzuuuuuuuuuuuz:
; X32-AVX: # %bb.0:
; X32-AVX-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; X32-AVX-NEXT: retl
; X86-AVX-LABEL: merge_16i16_i16_89zzzuuuuuuuuuuuz:
; X86-AVX: # %bb.0:
; X86-AVX-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; X86-AVX-NEXT: retl
%ptr0 = getelementptr inbounds i16, i16* %ptr, i64 8
%ptr1 = getelementptr inbounds i16, i16* %ptr, i64 9
%val0 = load i16, i16* %ptr0
@ -444,11 +444,11 @@ define <16 x i16> @merge_16i16_i16_45u7uuuuuuuuuuuu(i16* %ptr) nounwind uwtable
; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; AVX-NEXT: retq
;
; X32-AVX-LABEL: merge_16i16_i16_45u7uuuuuuuuuuuu:
; X32-AVX: # %bb.0:
; X32-AVX-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; X32-AVX-NEXT: retl
; X86-AVX-LABEL: merge_16i16_i16_45u7uuuuuuuuuuuu:
; X86-AVX: # %bb.0:
; X86-AVX-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; X86-AVX-NEXT: retl
%ptr0 = getelementptr inbounds i16, i16* %ptr, i64 4
%ptr1 = getelementptr inbounds i16, i16* %ptr, i64 5
%ptr3 = getelementptr inbounds i16, i16* %ptr, i64 7
@ -467,11 +467,11 @@ define <16 x i16> @merge_16i16_i16_0uu3uuuuuuuuCuEF(i16* %ptr) nounwind uwtable
; AVX-NEXT: vmovups (%rdi), %ymm0
; AVX-NEXT: retq
;
; X32-AVX-LABEL: merge_16i16_i16_0uu3uuuuuuuuCuEF:
; X32-AVX: # %bb.0:
; X32-AVX-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-AVX-NEXT: vmovups (%eax), %ymm0
; X32-AVX-NEXT: retl
; X86-AVX-LABEL: merge_16i16_i16_0uu3uuuuuuuuCuEF:
; X86-AVX: # %bb.0:
; X86-AVX-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-AVX-NEXT: vmovups (%eax), %ymm0
; X86-AVX-NEXT: retl
%ptr0 = getelementptr inbounds i16, i16* %ptr, i64 0
%ptr3 = getelementptr inbounds i16, i16* %ptr, i64 3
%ptrC = getelementptr inbounds i16, i16* %ptr, i64 12
@ -497,12 +497,12 @@ define <16 x i16> @merge_16i16_i16_0uu3zzuuuuuzCuEF(i16* %ptr) nounwind uwtable
; AVX-NEXT: vandps {{.*}}(%rip), %ymm0, %ymm0
; AVX-NEXT: retq
;
; X32-AVX-LABEL: merge_16i16_i16_0uu3zzuuuuuzCuEF:
; X32-AVX: # %bb.0:
; X32-AVX-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-AVX-NEXT: vmovups (%eax), %ymm0
; X32-AVX-NEXT: vandps {{\.LCPI.*}}, %ymm0, %ymm0
; X32-AVX-NEXT: retl
; X86-AVX-LABEL: merge_16i16_i16_0uu3zzuuuuuzCuEF:
; X86-AVX: # %bb.0:
; X86-AVX-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-AVX-NEXT: vmovups (%eax), %ymm0
; X86-AVX-NEXT: vandps {{\.LCPI.*}}, %ymm0, %ymm0
; X86-AVX-NEXT: retl
%ptr0 = getelementptr inbounds i16, i16* %ptr, i64 0
%ptr3 = getelementptr inbounds i16, i16* %ptr, i64 3
%ptrC = getelementptr inbounds i16, i16* %ptr, i64 12
@ -530,11 +530,11 @@ define <32 x i8> @merge_32i8_i8_45u7uuuuuuuuuuuuuuuuuuuuuuuuuuuu(i8* %ptr) nounw
; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; AVX-NEXT: retq
;
; X32-AVX-LABEL: merge_32i8_i8_45u7uuuuuuuuuuuuuuuuuuuuuuuuuuuu:
; X32-AVX: # %bb.0:
; X32-AVX-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; X32-AVX-NEXT: retl
; X86-AVX-LABEL: merge_32i8_i8_45u7uuuuuuuuuuuuuuuuuuuuuuuuuuuu:
; X86-AVX: # %bb.0:
; X86-AVX-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; X86-AVX-NEXT: retl
%ptr0 = getelementptr inbounds i8, i8* %ptr, i64 4
%ptr1 = getelementptr inbounds i8, i8* %ptr, i64 5
%ptr3 = getelementptr inbounds i8, i8* %ptr, i64 7
@ -553,11 +553,11 @@ define <32 x i8> @merge_32i8_i8_23u5uuuuuuuuuuzzzzuuuuuuuuuuuuuu(i8* %ptr) nounw
; AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; AVX-NEXT: retq
;
; X32-AVX-LABEL: merge_32i8_i8_23u5uuuuuuuuuuzzzzuuuuuuuuuuuuuu:
; X32-AVX: # %bb.0:
; X32-AVX-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; X32-AVX-NEXT: retl
; X86-AVX-LABEL: merge_32i8_i8_23u5uuuuuuuuuuzzzzuuuuuuuuuuuuuu:
; X86-AVX: # %bb.0:
; X86-AVX-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-AVX-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; X86-AVX-NEXT: retl
%ptr0 = getelementptr inbounds i8, i8* %ptr, i64 2
%ptr1 = getelementptr inbounds i8, i8* %ptr, i64 3
%ptr3 = getelementptr inbounds i8, i8* %ptr, i64 5
@ -585,12 +585,12 @@ define <4 x double> @merge_4f64_f64_34uz_volatile(double* %ptr) nounwind uwtable
; AVX-NEXT: vmovhps {{.*#+}} xmm0 = xmm0[0,1],mem[0,1]
; AVX-NEXT: retq
;
; X32-AVX-LABEL: merge_4f64_f64_34uz_volatile:
; X32-AVX: # %bb.0:
; X32-AVX-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; X32-AVX-NEXT: vmovhps {{.*#+}} xmm0 = xmm0[0,1],mem[0,1]
; X32-AVX-NEXT: retl
; X86-AVX-LABEL: merge_4f64_f64_34uz_volatile:
; X86-AVX: # %bb.0:
; X86-AVX-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; X86-AVX-NEXT: vmovhps {{.*#+}} xmm0 = xmm0[0,1],mem[0,1]
; X86-AVX-NEXT: retl
%ptr0 = getelementptr inbounds double, double* %ptr, i64 3
%ptr1 = getelementptr inbounds double, double* %ptr, i64 4
%val0 = load volatile double, double* %ptr0
@ -638,18 +638,18 @@ define <16 x i16> @merge_16i16_i16_0uu3zzuuuuuzCuEF_volatile(i16* %ptr) nounwind
; AVX512F-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0
; AVX512F-NEXT: retq
;
; X32-AVX-LABEL: merge_16i16_i16_0uu3zzuuuuuzCuEF_volatile:
; X32-AVX: # %bb.0:
; X32-AVX-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-AVX-NEXT: movzwl (%eax), %ecx
; X32-AVX-NEXT: vpxor %xmm0, %xmm0, %xmm0
; X32-AVX-NEXT: vpinsrw $4, 24(%eax), %xmm0, %xmm0
; X32-AVX-NEXT: vpinsrw $6, 28(%eax), %xmm0, %xmm0
; X32-AVX-NEXT: vpinsrw $7, 30(%eax), %xmm0, %xmm0
; X32-AVX-NEXT: vmovd %ecx, %xmm1
; X32-AVX-NEXT: vpinsrw $3, 6(%eax), %xmm1, %xmm1
; X32-AVX-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
; X32-AVX-NEXT: retl
; X86-AVX-LABEL: merge_16i16_i16_0uu3zzuuuuuzCuEF_volatile:
; X86-AVX: # %bb.0:
; X86-AVX-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-AVX-NEXT: movzwl (%eax), %ecx
; X86-AVX-NEXT: vpxor %xmm0, %xmm0, %xmm0
; X86-AVX-NEXT: vpinsrw $4, 24(%eax), %xmm0, %xmm0
; X86-AVX-NEXT: vpinsrw $6, 28(%eax), %xmm0, %xmm0
; X86-AVX-NEXT: vpinsrw $7, 30(%eax), %xmm0, %xmm0
; X86-AVX-NEXT: vmovd %ecx, %xmm1
; X86-AVX-NEXT: vpinsrw $3, 6(%eax), %xmm1, %xmm1
; X86-AVX-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
; X86-AVX-NEXT: retl
%ptr0 = getelementptr inbounds i16, i16* %ptr, i64 0
%ptr3 = getelementptr inbounds i16, i16* %ptr, i64 3
%ptrC = getelementptr inbounds i16, i16* %ptr, i64 12
@ -686,14 +686,14 @@ define <2 x i8> @PR42846(<2 x i8>* %j, <2 x i8> %k) {
; AVX-NEXT: vzeroupper
; AVX-NEXT: retq
;
; X32-AVX-LABEL: PR42846:
; X32-AVX: # %bb.0:
; X32-AVX-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-AVX-NEXT: vmovdqa l, %ymm0
; X32-AVX-NEXT: vpextrw $0, %xmm0, (%eax)
; X32-AVX-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; X32-AVX-NEXT: vzeroupper
; X32-AVX-NEXT: retl
; X86-AVX-LABEL: PR42846:
; X86-AVX: # %bb.0:
; X86-AVX-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-AVX-NEXT: vmovdqa l, %ymm0
; X86-AVX-NEXT: vpextrw $0, %xmm0, (%eax)
; X86-AVX-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; X86-AVX-NEXT: vzeroupper
; X86-AVX-NEXT: retl
%t0 = load volatile <32 x i8>, <32 x i8>* @l, align 32
%shuffle = shufflevector <32 x i8> %t0, <32 x i8> undef, <2 x i32> <i32 0, i32 1>
store <2 x i8> %shuffle, <2 x i8>* %j, align 2

View File

@ -3,7 +3,7 @@
; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx512f,+avx512bw | FileCheck %s --check-prefix=ALL
;
; Just one 32-bit run to make sure we do reasonable things.
; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+avx512f | FileCheck %s --check-prefix=X32-AVX512F
; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+avx512f | FileCheck %s --check-prefix=X86-AVX512F
define <8 x double> @merge_8f64_2f64_12u4(<2 x double>* %ptr) nounwind uwtable noinline ssp {
; ALL-LABEL: merge_8f64_2f64_12u4:
@ -13,13 +13,13 @@ define <8 x double> @merge_8f64_2f64_12u4(<2 x double>* %ptr) nounwind uwtable n
; ALL-NEXT: vinsertf64x4 $1, %ymm1, %zmm0, %zmm0
; ALL-NEXT: retq
;
; X32-AVX512F-LABEL: merge_8f64_2f64_12u4:
; X32-AVX512F: # %bb.0:
; X32-AVX512F-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-AVX512F-NEXT: vmovups 16(%eax), %ymm0
; X32-AVX512F-NEXT: vinsertf128 $1, 64(%eax), %ymm0, %ymm1
; X32-AVX512F-NEXT: vinsertf64x4 $1, %ymm1, %zmm0, %zmm0
; X32-AVX512F-NEXT: retl
; X86-AVX512F-LABEL: merge_8f64_2f64_12u4:
; X86-AVX512F: # %bb.0:
; X86-AVX512F-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-AVX512F-NEXT: vmovups 16(%eax), %ymm0
; X86-AVX512F-NEXT: vinsertf128 $1, 64(%eax), %ymm0, %ymm1
; X86-AVX512F-NEXT: vinsertf64x4 $1, %ymm1, %zmm0, %zmm0
; X86-AVX512F-NEXT: retl
%ptr0 = getelementptr inbounds <2 x double>, <2 x double>* %ptr, i64 1
%ptr1 = getelementptr inbounds <2 x double>, <2 x double>* %ptr, i64 2
%ptr3 = getelementptr inbounds <2 x double>, <2 x double>* %ptr, i64 4
@ -41,14 +41,14 @@ define <8 x double> @merge_8f64_2f64_23z5(<2 x double>* %ptr) nounwind uwtable n
; ALL-NEXT: vinsertf64x4 $1, %ymm1, %zmm0, %zmm0
; ALL-NEXT: retq
;
; X32-AVX512F-LABEL: merge_8f64_2f64_23z5:
; X32-AVX512F: # %bb.0:
; X32-AVX512F-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-AVX512F-NEXT: vmovups 32(%eax), %ymm0
; X32-AVX512F-NEXT: vxorps %xmm1, %xmm1, %xmm1
; X32-AVX512F-NEXT: vinsertf128 $1, 80(%eax), %ymm1, %ymm1
; X32-AVX512F-NEXT: vinsertf64x4 $1, %ymm1, %zmm0, %zmm0
; X32-AVX512F-NEXT: retl
; X86-AVX512F-LABEL: merge_8f64_2f64_23z5:
; X86-AVX512F: # %bb.0:
; X86-AVX512F-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-AVX512F-NEXT: vmovups 32(%eax), %ymm0
; X86-AVX512F-NEXT: vxorps %xmm1, %xmm1, %xmm1
; X86-AVX512F-NEXT: vinsertf128 $1, 80(%eax), %ymm1, %ymm1
; X86-AVX512F-NEXT: vinsertf64x4 $1, %ymm1, %zmm0, %zmm0
; X86-AVX512F-NEXT: retl
%ptr0 = getelementptr inbounds <2 x double>, <2 x double>* %ptr, i64 2
%ptr1 = getelementptr inbounds <2 x double>, <2 x double>* %ptr, i64 3
%ptr3 = getelementptr inbounds <2 x double>, <2 x double>* %ptr, i64 5
@ -68,12 +68,12 @@ define <8 x double> @merge_8f64_4f64_z2(<4 x double>* %ptr) nounwind uwtable noi
; ALL-NEXT: vinsertf64x4 $1, 64(%rdi), %zmm0, %zmm0
; ALL-NEXT: retq
;
; X32-AVX512F-LABEL: merge_8f64_4f64_z2:
; X32-AVX512F: # %bb.0:
; X32-AVX512F-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-AVX512F-NEXT: vxorps %xmm0, %xmm0, %xmm0
; X32-AVX512F-NEXT: vinsertf64x4 $1, 64(%eax), %zmm0, %zmm0
; X32-AVX512F-NEXT: retl
; X86-AVX512F-LABEL: merge_8f64_4f64_z2:
; X86-AVX512F: # %bb.0:
; X86-AVX512F-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-AVX512F-NEXT: vxorps %xmm0, %xmm0, %xmm0
; X86-AVX512F-NEXT: vinsertf64x4 $1, 64(%eax), %zmm0, %zmm0
; X86-AVX512F-NEXT: retl
%ptr1 = getelementptr inbounds <4 x double>, <4 x double>* %ptr, i64 2
%val1 = load <4 x double>, <4 x double>* %ptr1
%res = shufflevector <4 x double> zeroinitializer, <4 x double> %val1, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
@ -86,11 +86,11 @@ define <8 x double> @merge_8f64_f64_23uuuuu9(double* %ptr) nounwind uwtable noin
; ALL-NEXT: vmovups 16(%rdi), %zmm0
; ALL-NEXT: retq
;
; X32-AVX512F-LABEL: merge_8f64_f64_23uuuuu9:
; X32-AVX512F: # %bb.0:
; X32-AVX512F-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-AVX512F-NEXT: vmovups 16(%eax), %zmm0
; X32-AVX512F-NEXT: retl
; X86-AVX512F-LABEL: merge_8f64_f64_23uuuuu9:
; X86-AVX512F: # %bb.0:
; X86-AVX512F-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-AVX512F-NEXT: vmovups 16(%eax), %zmm0
; X86-AVX512F-NEXT: retl
%ptr0 = getelementptr inbounds double, double* %ptr, i64 2
%ptr1 = getelementptr inbounds double, double* %ptr, i64 3
%ptr7 = getelementptr inbounds double, double* %ptr, i64 9
@ -109,11 +109,11 @@ define <8 x double> @merge_8f64_f64_12zzuuzz(double* %ptr) nounwind uwtable noin
; ALL-NEXT: vmovups 8(%rdi), %xmm0
; ALL-NEXT: retq
;
; X32-AVX512F-LABEL: merge_8f64_f64_12zzuuzz:
; X32-AVX512F: # %bb.0:
; X32-AVX512F-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-AVX512F-NEXT: vmovups 8(%eax), %xmm0
; X32-AVX512F-NEXT: retl
; X86-AVX512F-LABEL: merge_8f64_f64_12zzuuzz:
; X86-AVX512F: # %bb.0:
; X86-AVX512F-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-AVX512F-NEXT: vmovups 8(%eax), %xmm0
; X86-AVX512F-NEXT: retl
%ptr0 = getelementptr inbounds double, double* %ptr, i64 1
%ptr1 = getelementptr inbounds double, double* %ptr, i64 2
%val0 = load double, double* %ptr0
@ -134,12 +134,12 @@ define <8 x double> @merge_8f64_f64_1u3u5zu8(double* %ptr) nounwind uwtable noin
; ALL-NEXT: vpandq {{.*}}(%rip), %zmm0, %zmm0
; ALL-NEXT: retq
;
; X32-AVX512F-LABEL: merge_8f64_f64_1u3u5zu8:
; X32-AVX512F: # %bb.0:
; X32-AVX512F-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-AVX512F-NEXT: vmovdqu64 8(%eax), %zmm0
; X32-AVX512F-NEXT: vpandq {{\.LCPI.*}}, %zmm0, %zmm0
; X32-AVX512F-NEXT: retl
; X86-AVX512F-LABEL: merge_8f64_f64_1u3u5zu8:
; X86-AVX512F: # %bb.0:
; X86-AVX512F-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-AVX512F-NEXT: vmovdqu64 8(%eax), %zmm0
; X86-AVX512F-NEXT: vpandq {{\.LCPI.*}}, %zmm0, %zmm0
; X86-AVX512F-NEXT: retl
%ptr0 = getelementptr inbounds double, double* %ptr, i64 1
%ptr2 = getelementptr inbounds double, double* %ptr, i64 3
%ptr4 = getelementptr inbounds double, double* %ptr, i64 5
@ -163,12 +163,12 @@ define <8 x i64> @merge_8i64_4i64_z3(<4 x i64>* %ptr) nounwind uwtable noinline
; ALL-NEXT: vinsertf64x4 $1, 96(%rdi), %zmm0, %zmm0
; ALL-NEXT: retq
;
; X32-AVX512F-LABEL: merge_8i64_4i64_z3:
; X32-AVX512F: # %bb.0:
; X32-AVX512F-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-AVX512F-NEXT: vxorps %xmm0, %xmm0, %xmm0
; X32-AVX512F-NEXT: vinsertf64x4 $1, 96(%eax), %zmm0, %zmm0
; X32-AVX512F-NEXT: retl
; X86-AVX512F-LABEL: merge_8i64_4i64_z3:
; X86-AVX512F: # %bb.0:
; X86-AVX512F-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-AVX512F-NEXT: vxorps %xmm0, %xmm0, %xmm0
; X86-AVX512F-NEXT: vinsertf64x4 $1, 96(%eax), %zmm0, %zmm0
; X86-AVX512F-NEXT: retl
%ptr1 = getelementptr inbounds <4 x i64>, <4 x i64>* %ptr, i64 3
%val1 = load <4 x i64>, <4 x i64>* %ptr1
%res = shufflevector <4 x i64> zeroinitializer, <4 x i64> %val1, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
@ -183,13 +183,13 @@ define <8 x i64> @merge_8i64_i64_56zz9uzz(i64* %ptr) nounwind uwtable noinline s
; ALL-NEXT: vinsertf64x4 $1, %ymm1, %zmm0, %zmm0
; ALL-NEXT: retq
;
; X32-AVX512F-LABEL: merge_8i64_i64_56zz9uzz:
; X32-AVX512F: # %bb.0:
; X32-AVX512F-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-AVX512F-NEXT: vmovups 40(%eax), %xmm0
; X32-AVX512F-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
; X32-AVX512F-NEXT: vinsertf64x4 $1, %ymm1, %zmm0, %zmm0
; X32-AVX512F-NEXT: retl
; X86-AVX512F-LABEL: merge_8i64_i64_56zz9uzz:
; X86-AVX512F: # %bb.0:
; X86-AVX512F-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-AVX512F-NEXT: vmovups 40(%eax), %xmm0
; X86-AVX512F-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
; X86-AVX512F-NEXT: vinsertf64x4 $1, %ymm1, %zmm0, %zmm0
; X86-AVX512F-NEXT: retl
%ptr0 = getelementptr inbounds i64, i64* %ptr, i64 5
%ptr1 = getelementptr inbounds i64, i64* %ptr, i64 6
%ptr4 = getelementptr inbounds i64, i64* %ptr, i64 9
@ -213,12 +213,12 @@ define <8 x i64> @merge_8i64_i64_1u3u5zu8(i64* %ptr) nounwind uwtable noinline s
; ALL-NEXT: vpandq {{.*}}(%rip), %zmm0, %zmm0
; ALL-NEXT: retq
;
; X32-AVX512F-LABEL: merge_8i64_i64_1u3u5zu8:
; X32-AVX512F: # %bb.0:
; X32-AVX512F-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-AVX512F-NEXT: vmovdqu64 8(%eax), %zmm0
; X32-AVX512F-NEXT: vpandq {{\.LCPI.*}}, %zmm0, %zmm0
; X32-AVX512F-NEXT: retl
; X86-AVX512F-LABEL: merge_8i64_i64_1u3u5zu8:
; X86-AVX512F: # %bb.0:
; X86-AVX512F-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-AVX512F-NEXT: vmovdqu64 8(%eax), %zmm0
; X86-AVX512F-NEXT: vpandq {{\.LCPI.*}}, %zmm0, %zmm0
; X86-AVX512F-NEXT: retl
%ptr0 = getelementptr inbounds i64, i64* %ptr, i64 1
%ptr2 = getelementptr inbounds i64, i64* %ptr, i64 3
%ptr4 = getelementptr inbounds i64, i64* %ptr, i64 5
@ -241,11 +241,11 @@ define <16 x float> @merge_16f32_f32_89zzzuuuuuuuuuuuz(float* %ptr) nounwind uwt
; ALL-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; ALL-NEXT: retq
;
; X32-AVX512F-LABEL: merge_16f32_f32_89zzzuuuuuuuuuuuz:
; X32-AVX512F: # %bb.0:
; X32-AVX512F-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-AVX512F-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; X32-AVX512F-NEXT: retl
; X86-AVX512F-LABEL: merge_16f32_f32_89zzzuuuuuuuuuuuz:
; X86-AVX512F: # %bb.0:
; X86-AVX512F-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-AVX512F-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; X86-AVX512F-NEXT: retl
%ptr0 = getelementptr inbounds float, float* %ptr, i64 8
%ptr1 = getelementptr inbounds float, float* %ptr, i64 9
%val0 = load float, float* %ptr0
@ -265,11 +265,11 @@ define <16 x float> @merge_16f32_f32_45u7uuuuuuuuuuuu(float* %ptr) nounwind uwta
; ALL-NEXT: vmovups 16(%rdi), %xmm0
; ALL-NEXT: retq
;
; X32-AVX512F-LABEL: merge_16f32_f32_45u7uuuuuuuuuuuu:
; X32-AVX512F: # %bb.0:
; X32-AVX512F-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-AVX512F-NEXT: vmovups 16(%eax), %xmm0
; X32-AVX512F-NEXT: retl
; X86-AVX512F-LABEL: merge_16f32_f32_45u7uuuuuuuuuuuu:
; X86-AVX512F: # %bb.0:
; X86-AVX512F-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-AVX512F-NEXT: vmovups 16(%eax), %xmm0
; X86-AVX512F-NEXT: retl
%ptr0 = getelementptr inbounds float, float* %ptr, i64 4
%ptr1 = getelementptr inbounds float, float* %ptr, i64 5
%ptr3 = getelementptr inbounds float, float* %ptr, i64 7
@ -288,11 +288,11 @@ define <16 x float> @merge_16f32_f32_0uu3uuuuuuuuCuEF(float* %ptr) nounwind uwta
; ALL-NEXT: vmovups (%rdi), %zmm0
; ALL-NEXT: retq
;
; X32-AVX512F-LABEL: merge_16f32_f32_0uu3uuuuuuuuCuEF:
; X32-AVX512F: # %bb.0:
; X32-AVX512F-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-AVX512F-NEXT: vmovups (%eax), %zmm0
; X32-AVX512F-NEXT: retl
; X86-AVX512F-LABEL: merge_16f32_f32_0uu3uuuuuuuuCuEF:
; X86-AVX512F: # %bb.0:
; X86-AVX512F-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-AVX512F-NEXT: vmovups (%eax), %zmm0
; X86-AVX512F-NEXT: retl
%ptr0 = getelementptr inbounds float, float* %ptr, i64 0
%ptr3 = getelementptr inbounds float, float* %ptr, i64 3
%ptrC = getelementptr inbounds float, float* %ptr, i64 12
@ -320,14 +320,14 @@ define <16 x float> @merge_16f32_f32_0uu3zzuuuuuzCuEF(float* %ptr) nounwind uwta
; ALL-NEXT: vpermi2ps %zmm2, %zmm1, %zmm0
; ALL-NEXT: retq
;
; X32-AVX512F-LABEL: merge_16f32_f32_0uu3zzuuuuuzCuEF:
; X32-AVX512F: # %bb.0:
; X32-AVX512F-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-AVX512F-NEXT: vmovups (%eax), %zmm1
; X32-AVX512F-NEXT: vxorps %xmm2, %xmm2, %xmm2
; X32-AVX512F-NEXT: vmovaps {{.*#+}} zmm0 = <0,u,u,3,20,21,u,u,u,u,u,u,12,29,14,15>
; X32-AVX512F-NEXT: vpermi2ps %zmm2, %zmm1, %zmm0
; X32-AVX512F-NEXT: retl
; X86-AVX512F-LABEL: merge_16f32_f32_0uu3zzuuuuuzCuEF:
; X86-AVX512F: # %bb.0:
; X86-AVX512F-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-AVX512F-NEXT: vmovups (%eax), %zmm1
; X86-AVX512F-NEXT: vxorps %xmm2, %xmm2, %xmm2
; X86-AVX512F-NEXT: vmovaps {{.*#+}} zmm0 = <0,u,u,3,20,21,u,u,u,u,u,u,12,29,14,15>
; X86-AVX512F-NEXT: vpermi2ps %zmm2, %zmm1, %zmm0
; X86-AVX512F-NEXT: retl
%ptr0 = getelementptr inbounds float, float* %ptr, i64 0
%ptr3 = getelementptr inbounds float, float* %ptr, i64 3
%ptrC = getelementptr inbounds float, float* %ptr, i64 12
@ -355,11 +355,11 @@ define <16 x i32> @merge_16i32_i32_12zzzuuuuuuuuuuuz(i32* %ptr) nounwind uwtable
; ALL-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; ALL-NEXT: retq
;
; X32-AVX512F-LABEL: merge_16i32_i32_12zzzuuuuuuuuuuuz:
; X32-AVX512F: # %bb.0:
; X32-AVX512F-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-AVX512F-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; X32-AVX512F-NEXT: retl
; X86-AVX512F-LABEL: merge_16i32_i32_12zzzuuuuuuuuuuuz:
; X86-AVX512F: # %bb.0:
; X86-AVX512F-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-AVX512F-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; X86-AVX512F-NEXT: retl
%ptr0 = getelementptr inbounds i32, i32* %ptr, i64 1
%ptr1 = getelementptr inbounds i32, i32* %ptr, i64 2
%val0 = load i32, i32* %ptr0
@ -379,11 +379,11 @@ define <16 x i32> @merge_16i32_i32_23u5uuuuuuuuuuuu(i32* %ptr) nounwind uwtable
; ALL-NEXT: vmovups 8(%rdi), %xmm0
; ALL-NEXT: retq
;
; X32-AVX512F-LABEL: merge_16i32_i32_23u5uuuuuuuuuuuu:
; X32-AVX512F: # %bb.0:
; X32-AVX512F-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-AVX512F-NEXT: vmovups 8(%eax), %xmm0
; X32-AVX512F-NEXT: retl
; X86-AVX512F-LABEL: merge_16i32_i32_23u5uuuuuuuuuuuu:
; X86-AVX512F: # %bb.0:
; X86-AVX512F-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-AVX512F-NEXT: vmovups 8(%eax), %xmm0
; X86-AVX512F-NEXT: retl
%ptr0 = getelementptr inbounds i32, i32* %ptr, i64 2
%ptr1 = getelementptr inbounds i32, i32* %ptr, i64 3
%ptr3 = getelementptr inbounds i32, i32* %ptr, i64 5
@ -402,11 +402,11 @@ define <16 x i32> @merge_16i32_i32_0uu3uuuuuuuuCuEF(i32* %ptr) nounwind uwtable
; ALL-NEXT: vmovups (%rdi), %zmm0
; ALL-NEXT: retq
;
; X32-AVX512F-LABEL: merge_16i32_i32_0uu3uuuuuuuuCuEF:
; X32-AVX512F: # %bb.0:
; X32-AVX512F-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-AVX512F-NEXT: vmovups (%eax), %zmm0
; X32-AVX512F-NEXT: retl
; X86-AVX512F-LABEL: merge_16i32_i32_0uu3uuuuuuuuCuEF:
; X86-AVX512F: # %bb.0:
; X86-AVX512F-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-AVX512F-NEXT: vmovups (%eax), %zmm0
; X86-AVX512F-NEXT: retl
%ptr0 = getelementptr inbounds i32, i32* %ptr, i64 0
%ptr3 = getelementptr inbounds i32, i32* %ptr, i64 3
%ptrC = getelementptr inbounds i32, i32* %ptr, i64 12
@ -432,12 +432,12 @@ define <16 x i32> @merge_16i32_i32_0uu3zzuuuuuzCuEF(i32* %ptr) nounwind uwtable
; ALL-NEXT: vpandd {{.*}}(%rip), %zmm0, %zmm0
; ALL-NEXT: retq
;
; X32-AVX512F-LABEL: merge_16i32_i32_0uu3zzuuuuuzCuEF:
; X32-AVX512F: # %bb.0:
; X32-AVX512F-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-AVX512F-NEXT: vmovdqu64 (%eax), %zmm0
; X32-AVX512F-NEXT: vpandd {{\.LCPI.*}}, %zmm0, %zmm0
; X32-AVX512F-NEXT: retl
; X86-AVX512F-LABEL: merge_16i32_i32_0uu3zzuuuuuzCuEF:
; X86-AVX512F: # %bb.0:
; X86-AVX512F-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-AVX512F-NEXT: vmovdqu64 (%eax), %zmm0
; X86-AVX512F-NEXT: vpandd {{\.LCPI.*}}, %zmm0, %zmm0
; X86-AVX512F-NEXT: retl
%ptr0 = getelementptr inbounds i32, i32* %ptr, i64 0
%ptr3 = getelementptr inbounds i32, i32* %ptr, i64 3
%ptrC = getelementptr inbounds i32, i32* %ptr, i64 12
@ -465,11 +465,11 @@ define <32 x i16> @merge_32i16_i16_12u4uuuuuuuuuuuuuuuuuuuuuuuuuuzz(i16* %ptr) n
; ALL-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; ALL-NEXT: retq
;
; X32-AVX512F-LABEL: merge_32i16_i16_12u4uuuuuuuuuuuuuuuuuuuuuuuuuuzz:
; X32-AVX512F: # %bb.0:
; X32-AVX512F-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-AVX512F-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; X32-AVX512F-NEXT: retl
; X86-AVX512F-LABEL: merge_32i16_i16_12u4uuuuuuuuuuuuuuuuuuuuuuuuuuzz:
; X86-AVX512F: # %bb.0:
; X86-AVX512F-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-AVX512F-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; X86-AVX512F-NEXT: retl
%ptr0 = getelementptr inbounds i16, i16* %ptr, i64 1
%ptr1 = getelementptr inbounds i16, i16* %ptr, i64 2
%ptr3 = getelementptr inbounds i16, i16* %ptr, i64 4
@ -490,11 +490,11 @@ define <32 x i16> @merge_32i16_i16_45u7uuuuuuuuuuuuuuuuuuuuuuuuuuuu(i16* %ptr) n
; ALL-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; ALL-NEXT: retq
;
; X32-AVX512F-LABEL: merge_32i16_i16_45u7uuuuuuuuuuuuuuuuuuuuuuuuuuuu:
; X32-AVX512F: # %bb.0:
; X32-AVX512F-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-AVX512F-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; X32-AVX512F-NEXT: retl
; X86-AVX512F-LABEL: merge_32i16_i16_45u7uuuuuuuuuuuuuuuuuuuuuuuuuuuu:
; X86-AVX512F: # %bb.0:
; X86-AVX512F-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-AVX512F-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; X86-AVX512F-NEXT: retl
%ptr0 = getelementptr inbounds i16, i16* %ptr, i64 4
%ptr1 = getelementptr inbounds i16, i16* %ptr, i64 5
%ptr3 = getelementptr inbounds i16, i16* %ptr, i64 7
@ -513,11 +513,11 @@ define <32 x i16> @merge_32i16_i16_23uzuuuuuuuuuuzzzzuuuuuuuuuuuuuu(i16* %ptr) n
; ALL-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; ALL-NEXT: retq
;
; X32-AVX512F-LABEL: merge_32i16_i16_23uzuuuuuuuuuuzzzzuuuuuuuuuuuuuu:
; X32-AVX512F: # %bb.0:
; X32-AVX512F-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-AVX512F-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; X32-AVX512F-NEXT: retl
; X86-AVX512F-LABEL: merge_32i16_i16_23uzuuuuuuuuuuzzzzuuuuuuuuuuuuuu:
; X86-AVX512F: # %bb.0:
; X86-AVX512F-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-AVX512F-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; X86-AVX512F-NEXT: retl
%ptr0 = getelementptr inbounds i16, i16* %ptr, i64 2
%ptr1 = getelementptr inbounds i16, i16* %ptr, i64 3
%val0 = load i16, i16* %ptr0
@ -538,11 +538,11 @@ define <64 x i8> @merge_64i8_i8_12u4uuu8uuuuuuzzzzuuuuuuuuuuuuuuuuuuuuuuuuuuuuuu
; ALL-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; ALL-NEXT: retq
;
; X32-AVX512F-LABEL: merge_64i8_i8_12u4uuu8uuuuuuzzzzuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuz:
; X32-AVX512F: # %bb.0:
; X32-AVX512F-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-AVX512F-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; X32-AVX512F-NEXT: retl
; X86-AVX512F-LABEL: merge_64i8_i8_12u4uuu8uuuuuuzzzzuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuz:
; X86-AVX512F: # %bb.0:
; X86-AVX512F-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-AVX512F-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; X86-AVX512F-NEXT: retl
%ptr0 = getelementptr inbounds i8, i8* %ptr, i64 1
%ptr1 = getelementptr inbounds i8, i8* %ptr, i64 2
%ptr3 = getelementptr inbounds i8, i8* %ptr, i64 4
@ -569,11 +569,11 @@ define <64 x i8> @merge_64i8_i8_12u4uuuuuuuuuuzzzzuuuuuuuuuuuuuuuuuuuuuuuuuuuuuu
; ALL-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; ALL-NEXT: retq
;
; X32-AVX512F-LABEL: merge_64i8_i8_12u4uuuuuuuuuuzzzzuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuz:
; X32-AVX512F: # %bb.0:
; X32-AVX512F-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-AVX512F-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; X32-AVX512F-NEXT: retl
; X86-AVX512F-LABEL: merge_64i8_i8_12u4uuuuuuuuuuzzzzuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuuz:
; X86-AVX512F: # %bb.0:
; X86-AVX512F-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-AVX512F-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
; X86-AVX512F-NEXT: retl
%ptr0 = getelementptr inbounds i8, i8* %ptr, i64 1
%ptr1 = getelementptr inbounds i8, i8* %ptr, i64 2
%ptr3 = getelementptr inbounds i8, i8* %ptr, i64 4
@ -604,14 +604,14 @@ define <8 x double> @merge_8f64_f64_23uuuuu9_volatile(double* %ptr) nounwind uwt
; ALL-NEXT: vinsertf64x4 $1, %ymm1, %zmm0, %zmm0
; ALL-NEXT: retq
;
; X32-AVX512F-LABEL: merge_8f64_f64_23uuuuu9_volatile:
; X32-AVX512F: # %bb.0:
; X32-AVX512F-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-AVX512F-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; X32-AVX512F-NEXT: vmovhps {{.*#+}} xmm0 = xmm0[0,1],mem[0,1]
; X32-AVX512F-NEXT: vbroadcastsd 72(%eax), %ymm1
; X32-AVX512F-NEXT: vinsertf64x4 $1, %ymm1, %zmm0, %zmm0
; X32-AVX512F-NEXT: retl
; X86-AVX512F-LABEL: merge_8f64_f64_23uuuuu9_volatile:
; X86-AVX512F: # %bb.0:
; X86-AVX512F-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-AVX512F-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; X86-AVX512F-NEXT: vmovhps {{.*#+}} xmm0 = xmm0[0,1],mem[0,1]
; X86-AVX512F-NEXT: vbroadcastsd 72(%eax), %ymm1
; X86-AVX512F-NEXT: vinsertf64x4 $1, %ymm1, %zmm0, %zmm0
; X86-AVX512F-NEXT: retl
%ptr0 = getelementptr inbounds double, double* %ptr, i64 2
%ptr1 = getelementptr inbounds double, double* %ptr, i64 3
%ptr7 = getelementptr inbounds double, double* %ptr, i64 9
@ -636,17 +636,17 @@ define <16 x i32> @merge_16i32_i32_0uu3uuuuuuuuCuEF_volatile(i32* %ptr) nounwind
; ALL-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0
; ALL-NEXT: retq
;
; X32-AVX512F-LABEL: merge_16i32_i32_0uu3uuuuuuuuCuEF_volatile:
; X32-AVX512F: # %bb.0:
; X32-AVX512F-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-AVX512F-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
; X32-AVX512F-NEXT: vpinsrd $3, 12(%eax), %xmm0, %xmm0
; X32-AVX512F-NEXT: vmovd {{.*#+}} xmm1 = mem[0],zero,zero,zero
; X32-AVX512F-NEXT: vpinsrd $2, 56(%eax), %xmm1, %xmm1
; X32-AVX512F-NEXT: vpinsrd $3, 60(%eax), %xmm1, %xmm1
; X32-AVX512F-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm1
; X32-AVX512F-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0
; X32-AVX512F-NEXT: retl
; X86-AVX512F-LABEL: merge_16i32_i32_0uu3uuuuuuuuCuEF_volatile:
; X86-AVX512F: # %bb.0:
; X86-AVX512F-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-AVX512F-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
; X86-AVX512F-NEXT: vpinsrd $3, 12(%eax), %xmm0, %xmm0
; X86-AVX512F-NEXT: vmovd {{.*#+}} xmm1 = mem[0],zero,zero,zero
; X86-AVX512F-NEXT: vpinsrd $2, 56(%eax), %xmm1, %xmm1
; X86-AVX512F-NEXT: vpinsrd $3, 60(%eax), %xmm1, %xmm1
; X86-AVX512F-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm1
; X86-AVX512F-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0
; X86-AVX512F-NEXT: retl
%ptr0 = getelementptr inbounds i32, i32* %ptr, i64 0
%ptr3 = getelementptr inbounds i32, i32* %ptr, i64 3
%ptrC = getelementptr inbounds i32, i32* %ptr, i64 12