From ef8f61bce6687c86e2abfcb943a7f5f29ec1911a Mon Sep 17 00:00:00 2001 From: Ayman Musa Date: Sun, 3 Sep 2017 13:53:44 +0000 Subject: [PATCH] [X86][AVX512] Add simple tests for all AVX512 shuffle instructions. Throughout an effort to strongly check the behavior of CodeGen with the IR shufflevector instruction we generated many tests while predicting the best X86 sequence that may be generated. This is a subset of the generated tests that we think may add value to our X86 set of tests. Some of the checks are not optimal and will be changed after fixing: 1. PR34394 2. PR34382 3. PR34380 4. PR34359 Differential Revision: https://reviews.llvm.org/D37329 llvm-svn: 312442 --- .../avx512-shuffles/broadcast-scalar-fp.ll | 1158 +++++ .../avx512-shuffles/broadcast-scalar-int.ll | 2623 ++++++++++ .../avx512-shuffles/broadcast-vector-fp.ll | 1101 ++++ .../avx512-shuffles/broadcast-vector-int.ll | 1343 +++++ .../X86/avx512-shuffles/duplicate-high.ll | 789 +++ .../X86/avx512-shuffles/duplicate-low.ll | 1428 ++++++ .../X86/avx512-shuffles/in_lane_permute.ll | 1756 +++++++ .../X86/avx512-shuffles/partial_permute.ll | 4556 +++++++++++++++++ .../CodeGen/X86/avx512-shuffles/permute.ll | 2937 +++++++++++ .../X86/avx512-shuffles/shuffle-interleave.ll | 1312 +++++ .../X86/avx512-shuffles/shuffle-vec.ll | 1941 +++++++ .../CodeGen/X86/avx512-shuffles/shuffle.ll | 2792 ++++++++++ .../CodeGen/X86/avx512-shuffles/unpack.ll | 2621 ++++++++++ 13 files changed, 26357 insertions(+) create mode 100644 llvm/test/CodeGen/X86/avx512-shuffles/broadcast-scalar-fp.ll create mode 100644 llvm/test/CodeGen/X86/avx512-shuffles/broadcast-scalar-int.ll create mode 100644 llvm/test/CodeGen/X86/avx512-shuffles/broadcast-vector-fp.ll create mode 100644 llvm/test/CodeGen/X86/avx512-shuffles/broadcast-vector-int.ll create mode 100644 llvm/test/CodeGen/X86/avx512-shuffles/duplicate-high.ll create mode 100644 llvm/test/CodeGen/X86/avx512-shuffles/duplicate-low.ll create mode 100644 llvm/test/CodeGen/X86/avx512-shuffles/in_lane_permute.ll create mode 100644 llvm/test/CodeGen/X86/avx512-shuffles/partial_permute.ll create mode 100644 llvm/test/CodeGen/X86/avx512-shuffles/permute.ll create mode 100644 llvm/test/CodeGen/X86/avx512-shuffles/shuffle-interleave.ll create mode 100644 llvm/test/CodeGen/X86/avx512-shuffles/shuffle-vec.ll create mode 100644 llvm/test/CodeGen/X86/avx512-shuffles/shuffle.ll create mode 100644 llvm/test/CodeGen/X86/avx512-shuffles/unpack.ll diff --git a/llvm/test/CodeGen/X86/avx512-shuffles/broadcast-scalar-fp.ll b/llvm/test/CodeGen/X86/avx512-shuffles/broadcast-scalar-fp.ll new file mode 100644 index 000000000000..46349931bf15 --- /dev/null +++ b/llvm/test/CodeGen/X86/avx512-shuffles/broadcast-scalar-fp.ll @@ -0,0 +1,1158 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mcpu=skx %s -o - | FileCheck %s + +define <4 x double> @test_double_to_4(double %s) { +; CHECK-LABEL: test_double_to_4: +; CHECK: # BB#0: +; CHECK-NEXT: vbroadcastsd %xmm0, %ymm0 +; CHECK-NEXT: retq + %vec = insertelement <2 x double> undef, double %s, i32 0 + %res = shufflevector <2 x double> %vec, <2 x double> undef, <4 x i32> + ret <4 x double> %res +} +define <4 x double> @test_masked_double_to_4_mask0(double %s, <4 x double> %default) { +; CHECK-LABEL: test_masked_double_to_4_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: movb $12, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vbroadcastsd %xmm0, %ymm1 {%k1} +; CHECK-NEXT: vmovapd %ymm1, %ymm0 +; CHECK-NEXT: retq + %vec = insertelement <2 x double> undef, double %s, i32 0 + %shuf = shufflevector <2 x double> %vec, <2 x double> undef, <4 x i32> + %res = select <4 x i1> , <4 x double> %shuf, <4 x double> %default + ret <4 x double> %res +} + +define <4 x double> @test_masked_z_double_to_4_mask0(double %s) { +; CHECK-LABEL: test_masked_z_double_to_4_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: movb $12, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vbroadcastsd %xmm0, %ymm0 {%k1} {z} +; CHECK-NEXT: retq + %vec = insertelement <2 x double> undef, double %s, i32 0 + %shuf = shufflevector <2 x double> %vec, <2 x double> undef, <4 x i32> + %res = select <4 x i1> , <4 x double> %shuf, <4 x double> zeroinitializer + ret <4 x double> %res +} +define <4 x double> @test_masked_double_to_4_mask1(double %s, <4 x double> %default) { +; CHECK-LABEL: test_masked_double_to_4_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: movb $10, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vbroadcastsd %xmm0, %ymm1 {%k1} +; CHECK-NEXT: vmovapd %ymm1, %ymm0 +; CHECK-NEXT: retq + %vec = insertelement <2 x double> undef, double %s, i32 0 + %shuf = shufflevector <2 x double> %vec, <2 x double> undef, <4 x i32> + %res = select <4 x i1> , <4 x double> %shuf, <4 x double> %default + ret <4 x double> %res +} + +define <4 x double> @test_masked_z_double_to_4_mask1(double %s) { +; CHECK-LABEL: test_masked_z_double_to_4_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: movb $10, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vbroadcastsd %xmm0, %ymm0 {%k1} {z} +; CHECK-NEXT: retq + %vec = insertelement <2 x double> undef, double %s, i32 0 + %shuf = shufflevector <2 x double> %vec, <2 x double> undef, <4 x i32> + %res = select <4 x i1> , <4 x double> %shuf, <4 x double> zeroinitializer + ret <4 x double> %res +} +define <4 x double> @test_masked_double_to_4_mask2(double %s, <4 x double> %default) { +; CHECK-LABEL: test_masked_double_to_4_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: movb $6, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vbroadcastsd %xmm0, %ymm1 {%k1} +; CHECK-NEXT: vmovapd %ymm1, %ymm0 +; CHECK-NEXT: retq + %vec = insertelement <2 x double> undef, double %s, i32 0 + %shuf = shufflevector <2 x double> %vec, <2 x double> undef, <4 x i32> + %res = select <4 x i1> , <4 x double> %shuf, <4 x double> %default + ret <4 x double> %res +} + +define <4 x double> @test_masked_z_double_to_4_mask2(double %s) { +; CHECK-LABEL: test_masked_z_double_to_4_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: movb $6, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vbroadcastsd %xmm0, %ymm0 {%k1} {z} +; CHECK-NEXT: retq + %vec = insertelement <2 x double> undef, double %s, i32 0 + %shuf = shufflevector <2 x double> %vec, <2 x double> undef, <4 x i32> + %res = select <4 x i1> , <4 x double> %shuf, <4 x double> zeroinitializer + ret <4 x double> %res +} +define <4 x double> @test_masked_double_to_4_mask3(double %s, <4 x double> %default) { +; CHECK-LABEL: test_masked_double_to_4_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: movb $3, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vbroadcastsd %xmm0, %ymm1 {%k1} +; CHECK-NEXT: vmovapd %ymm1, %ymm0 +; CHECK-NEXT: retq + %vec = insertelement <2 x double> undef, double %s, i32 0 + %shuf = shufflevector <2 x double> %vec, <2 x double> undef, <4 x i32> + %res = select <4 x i1> , <4 x double> %shuf, <4 x double> %default + ret <4 x double> %res +} + +define <4 x double> @test_masked_z_double_to_4_mask3(double %s) { +; CHECK-LABEL: test_masked_z_double_to_4_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: movb $3, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vbroadcastsd %xmm0, %ymm0 {%k1} {z} +; CHECK-NEXT: retq + %vec = insertelement <2 x double> undef, double %s, i32 0 + %shuf = shufflevector <2 x double> %vec, <2 x double> undef, <4 x i32> + %res = select <4 x i1> , <4 x double> %shuf, <4 x double> zeroinitializer + ret <4 x double> %res +} +define <8 x double> @test_double_to_8(double %s) { +; CHECK-LABEL: test_double_to_8: +; CHECK: # BB#0: +; CHECK-NEXT: vbroadcastsd %xmm0, %zmm0 +; CHECK-NEXT: retq + %vec = insertelement <2 x double> undef, double %s, i32 0 + %res = shufflevector <2 x double> %vec, <2 x double> undef, <8 x i32> + ret <8 x double> %res +} +define <8 x double> @test_masked_double_to_8_mask0(double %s, <8 x double> %default) { +; CHECK-LABEL: test_masked_double_to_8_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: movb $-126, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vbroadcastsd %xmm0, %zmm1 {%k1} +; CHECK-NEXT: vmovapd %zmm1, %zmm0 +; CHECK-NEXT: retq + %vec = insertelement <2 x double> undef, double %s, i32 0 + %shuf = shufflevector <2 x double> %vec, <2 x double> undef, <8 x i32> + %res = select <8 x i1> , <8 x double> %shuf, <8 x double> %default + ret <8 x double> %res +} + +define <8 x double> @test_masked_z_double_to_8_mask0(double %s) { +; CHECK-LABEL: test_masked_z_double_to_8_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: movb $-126, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vbroadcastsd %xmm0, %zmm0 {%k1} {z} +; CHECK-NEXT: retq + %vec = insertelement <2 x double> undef, double %s, i32 0 + %shuf = shufflevector <2 x double> %vec, <2 x double> undef, <8 x i32> + %res = select <8 x i1> , <8 x double> %shuf, <8 x double> zeroinitializer + ret <8 x double> %res +} +define <8 x double> @test_masked_double_to_8_mask1(double %s, <8 x double> %default) { +; CHECK-LABEL: test_masked_double_to_8_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: movb $103, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vbroadcastsd %xmm0, %zmm1 {%k1} +; CHECK-NEXT: vmovapd %zmm1, %zmm0 +; CHECK-NEXT: retq + %vec = insertelement <2 x double> undef, double %s, i32 0 + %shuf = shufflevector <2 x double> %vec, <2 x double> undef, <8 x i32> + %res = select <8 x i1> , <8 x double> %shuf, <8 x double> %default + ret <8 x double> %res +} + +define <8 x double> @test_masked_z_double_to_8_mask1(double %s) { +; CHECK-LABEL: test_masked_z_double_to_8_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: movb $103, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vbroadcastsd %xmm0, %zmm0 {%k1} {z} +; CHECK-NEXT: retq + %vec = insertelement <2 x double> undef, double %s, i32 0 + %shuf = shufflevector <2 x double> %vec, <2 x double> undef, <8 x i32> + %res = select <8 x i1> , <8 x double> %shuf, <8 x double> zeroinitializer + ret <8 x double> %res +} +define <8 x double> @test_masked_double_to_8_mask2(double %s, <8 x double> %default) { +; CHECK-LABEL: test_masked_double_to_8_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: movb $-56, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vbroadcastsd %xmm0, %zmm1 {%k1} +; CHECK-NEXT: vmovapd %zmm1, %zmm0 +; CHECK-NEXT: retq + %vec = insertelement <2 x double> undef, double %s, i32 0 + %shuf = shufflevector <2 x double> %vec, <2 x double> undef, <8 x i32> + %res = select <8 x i1> , <8 x double> %shuf, <8 x double> %default + ret <8 x double> %res +} + +define <8 x double> @test_masked_z_double_to_8_mask2(double %s) { +; CHECK-LABEL: test_masked_z_double_to_8_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: movb $-56, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vbroadcastsd %xmm0, %zmm0 {%k1} {z} +; CHECK-NEXT: retq + %vec = insertelement <2 x double> undef, double %s, i32 0 + %shuf = shufflevector <2 x double> %vec, <2 x double> undef, <8 x i32> + %res = select <8 x i1> , <8 x double> %shuf, <8 x double> zeroinitializer + ret <8 x double> %res +} +define <8 x double> @test_masked_double_to_8_mask3(double %s, <8 x double> %default) { +; CHECK-LABEL: test_masked_double_to_8_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: movb $78, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vbroadcastsd %xmm0, %zmm1 {%k1} +; CHECK-NEXT: vmovapd %zmm1, %zmm0 +; CHECK-NEXT: retq + %vec = insertelement <2 x double> undef, double %s, i32 0 + %shuf = shufflevector <2 x double> %vec, <2 x double> undef, <8 x i32> + %res = select <8 x i1> , <8 x double> %shuf, <8 x double> %default + ret <8 x double> %res +} + +define <8 x double> @test_masked_z_double_to_8_mask3(double %s) { +; CHECK-LABEL: test_masked_z_double_to_8_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: movb $78, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vbroadcastsd %xmm0, %zmm0 {%k1} {z} +; CHECK-NEXT: retq + %vec = insertelement <2 x double> undef, double %s, i32 0 + %shuf = shufflevector <2 x double> %vec, <2 x double> undef, <8 x i32> + %res = select <8 x i1> , <8 x double> %shuf, <8 x double> zeroinitializer + ret <8 x double> %res +} +define <4 x float> @test_float_to_4(float %s) { +; CHECK-LABEL: test_float_to_4: +; CHECK: # BB#0: +; CHECK-NEXT: vbroadcastss %xmm0, %xmm0 +; CHECK-NEXT: retq + %vec = insertelement <2 x float> undef, float %s, i32 0 + %res = shufflevector <2 x float> %vec, <2 x float> undef, <4 x i32> + ret <4 x float> %res +} +define <4 x float> @test_masked_float_to_4_mask0(float %s, <4 x float> %default) { +; CHECK-LABEL: test_masked_float_to_4_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: movb $7, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vbroadcastss %xmm0, %xmm1 {%k1} +; CHECK-NEXT: vmovaps %xmm1, %xmm0 +; CHECK-NEXT: retq + %vec = insertelement <2 x float> undef, float %s, i32 0 + %shuf = shufflevector <2 x float> %vec, <2 x float> undef, <4 x i32> + %res = select <4 x i1> , <4 x float> %shuf, <4 x float> %default + ret <4 x float> %res +} + +define <4 x float> @test_masked_z_float_to_4_mask0(float %s) { +; CHECK-LABEL: test_masked_z_float_to_4_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: movb $7, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vbroadcastss %xmm0, %xmm0 {%k1} {z} +; CHECK-NEXT: retq + %vec = insertelement <2 x float> undef, float %s, i32 0 + %shuf = shufflevector <2 x float> %vec, <2 x float> undef, <4 x i32> + %res = select <4 x i1> , <4 x float> %shuf, <4 x float> zeroinitializer + ret <4 x float> %res +} +define <4 x float> @test_masked_float_to_4_mask1(float %s, <4 x float> %default) { +; CHECK-LABEL: test_masked_float_to_4_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: movb $8, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vbroadcastss %xmm0, %xmm1 {%k1} +; CHECK-NEXT: vmovaps %xmm1, %xmm0 +; CHECK-NEXT: retq + %vec = insertelement <2 x float> undef, float %s, i32 0 + %shuf = shufflevector <2 x float> %vec, <2 x float> undef, <4 x i32> + %res = select <4 x i1> , <4 x float> %shuf, <4 x float> %default + ret <4 x float> %res +} + +define <4 x float> @test_masked_z_float_to_4_mask1(float %s) { +; CHECK-LABEL: test_masked_z_float_to_4_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: movb $8, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vbroadcastss %xmm0, %xmm0 {%k1} {z} +; CHECK-NEXT: retq + %vec = insertelement <2 x float> undef, float %s, i32 0 + %shuf = shufflevector <2 x float> %vec, <2 x float> undef, <4 x i32> + %res = select <4 x i1> , <4 x float> %shuf, <4 x float> zeroinitializer + ret <4 x float> %res +} +define <4 x float> @test_masked_float_to_4_mask2(float %s, <4 x float> %default) { +; CHECK-LABEL: test_masked_float_to_4_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: movb $11, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vbroadcastss %xmm0, %xmm1 {%k1} +; CHECK-NEXT: vmovaps %xmm1, %xmm0 +; CHECK-NEXT: retq + %vec = insertelement <2 x float> undef, float %s, i32 0 + %shuf = shufflevector <2 x float> %vec, <2 x float> undef, <4 x i32> + %res = select <4 x i1> , <4 x float> %shuf, <4 x float> %default + ret <4 x float> %res +} + +define <4 x float> @test_masked_z_float_to_4_mask2(float %s) { +; CHECK-LABEL: test_masked_z_float_to_4_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: movb $11, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vbroadcastss %xmm0, %xmm0 {%k1} {z} +; CHECK-NEXT: retq + %vec = insertelement <2 x float> undef, float %s, i32 0 + %shuf = shufflevector <2 x float> %vec, <2 x float> undef, <4 x i32> + %res = select <4 x i1> , <4 x float> %shuf, <4 x float> zeroinitializer + ret <4 x float> %res +} +define <4 x float> @test_masked_float_to_4_mask3(float %s, <4 x float> %default) { +; CHECK-LABEL: test_masked_float_to_4_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: movb $6, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vbroadcastss %xmm0, %xmm1 {%k1} +; CHECK-NEXT: vmovaps %xmm1, %xmm0 +; CHECK-NEXT: retq + %vec = insertelement <2 x float> undef, float %s, i32 0 + %shuf = shufflevector <2 x float> %vec, <2 x float> undef, <4 x i32> + %res = select <4 x i1> , <4 x float> %shuf, <4 x float> %default + ret <4 x float> %res +} + +define <4 x float> @test_masked_z_float_to_4_mask3(float %s) { +; CHECK-LABEL: test_masked_z_float_to_4_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: movb $6, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vbroadcastss %xmm0, %xmm0 {%k1} {z} +; CHECK-NEXT: retq + %vec = insertelement <2 x float> undef, float %s, i32 0 + %shuf = shufflevector <2 x float> %vec, <2 x float> undef, <4 x i32> + %res = select <4 x i1> , <4 x float> %shuf, <4 x float> zeroinitializer + ret <4 x float> %res +} +define <8 x float> @test_float_to_8(float %s) { +; CHECK-LABEL: test_float_to_8: +; CHECK: # BB#0: +; CHECK-NEXT: vbroadcastss %xmm0, %ymm0 +; CHECK-NEXT: retq + %vec = insertelement <2 x float> undef, float %s, i32 0 + %res = shufflevector <2 x float> %vec, <2 x float> undef, <8 x i32> + ret <8 x float> %res +} +define <8 x float> @test_masked_float_to_8_mask0(float %s, <8 x float> %default) { +; CHECK-LABEL: test_masked_float_to_8_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: movb $72, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vbroadcastss %xmm0, %ymm1 {%k1} +; CHECK-NEXT: vmovaps %ymm1, %ymm0 +; CHECK-NEXT: retq + %vec = insertelement <2 x float> undef, float %s, i32 0 + %shuf = shufflevector <2 x float> %vec, <2 x float> undef, <8 x i32> + %res = select <8 x i1> , <8 x float> %shuf, <8 x float> %default + ret <8 x float> %res +} + +define <8 x float> @test_masked_z_float_to_8_mask0(float %s) { +; CHECK-LABEL: test_masked_z_float_to_8_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: movb $72, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vbroadcastss %xmm0, %ymm0 {%k1} {z} +; CHECK-NEXT: retq + %vec = insertelement <2 x float> undef, float %s, i32 0 + %shuf = shufflevector <2 x float> %vec, <2 x float> undef, <8 x i32> + %res = select <8 x i1> , <8 x float> %shuf, <8 x float> zeroinitializer + ret <8 x float> %res +} +define <8 x float> @test_masked_float_to_8_mask1(float %s, <8 x float> %default) { +; CHECK-LABEL: test_masked_float_to_8_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: movb $-64, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vbroadcastss %xmm0, %ymm1 {%k1} +; CHECK-NEXT: vmovaps %ymm1, %ymm0 +; CHECK-NEXT: retq + %vec = insertelement <2 x float> undef, float %s, i32 0 + %shuf = shufflevector <2 x float> %vec, <2 x float> undef, <8 x i32> + %res = select <8 x i1> , <8 x float> %shuf, <8 x float> %default + ret <8 x float> %res +} + +define <8 x float> @test_masked_z_float_to_8_mask1(float %s) { +; CHECK-LABEL: test_masked_z_float_to_8_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: movb $-64, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vbroadcastss %xmm0, %ymm0 {%k1} {z} +; CHECK-NEXT: retq + %vec = insertelement <2 x float> undef, float %s, i32 0 + %shuf = shufflevector <2 x float> %vec, <2 x float> undef, <8 x i32> + %res = select <8 x i1> , <8 x float> %shuf, <8 x float> zeroinitializer + ret <8 x float> %res +} +define <8 x float> @test_masked_float_to_8_mask2(float %s, <8 x float> %default) { +; CHECK-LABEL: test_masked_float_to_8_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: movb $-98, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vbroadcastss %xmm0, %ymm1 {%k1} +; CHECK-NEXT: vmovaps %ymm1, %ymm0 +; CHECK-NEXT: retq + %vec = insertelement <2 x float> undef, float %s, i32 0 + %shuf = shufflevector <2 x float> %vec, <2 x float> undef, <8 x i32> + %res = select <8 x i1> , <8 x float> %shuf, <8 x float> %default + ret <8 x float> %res +} + +define <8 x float> @test_masked_z_float_to_8_mask2(float %s) { +; CHECK-LABEL: test_masked_z_float_to_8_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: movb $-98, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vbroadcastss %xmm0, %ymm0 {%k1} {z} +; CHECK-NEXT: retq + %vec = insertelement <2 x float> undef, float %s, i32 0 + %shuf = shufflevector <2 x float> %vec, <2 x float> undef, <8 x i32> + %res = select <8 x i1> , <8 x float> %shuf, <8 x float> zeroinitializer + ret <8 x float> %res +} +define <8 x float> @test_masked_float_to_8_mask3(float %s, <8 x float> %default) { +; CHECK-LABEL: test_masked_float_to_8_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: movb $64, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vbroadcastss %xmm0, %ymm1 {%k1} +; CHECK-NEXT: vmovaps %ymm1, %ymm0 +; CHECK-NEXT: retq + %vec = insertelement <2 x float> undef, float %s, i32 0 + %shuf = shufflevector <2 x float> %vec, <2 x float> undef, <8 x i32> + %res = select <8 x i1> , <8 x float> %shuf, <8 x float> %default + ret <8 x float> %res +} + +define <8 x float> @test_masked_z_float_to_8_mask3(float %s) { +; CHECK-LABEL: test_masked_z_float_to_8_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: movb $64, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vbroadcastss %xmm0, %ymm0 {%k1} {z} +; CHECK-NEXT: retq + %vec = insertelement <2 x float> undef, float %s, i32 0 + %shuf = shufflevector <2 x float> %vec, <2 x float> undef, <8 x i32> + %res = select <8 x i1> , <8 x float> %shuf, <8 x float> zeroinitializer + ret <8 x float> %res +} +define <16 x float> @test_float_to_16(float %s) { +; CHECK-LABEL: test_float_to_16: +; CHECK: # BB#0: +; CHECK-NEXT: vbroadcastss %xmm0, %zmm0 +; CHECK-NEXT: retq + %vec = insertelement <2 x float> undef, float %s, i32 0 + %res = shufflevector <2 x float> %vec, <2 x float> undef, <16 x i32> + ret <16 x float> %res +} +define <16 x float> @test_masked_float_to_16_mask0(float %s, <16 x float> %default) { +; CHECK-LABEL: test_masked_float_to_16_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: movw $-5916, %ax # imm = 0xE8E4 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vbroadcastss %xmm0, %zmm1 {%k1} +; CHECK-NEXT: vmovaps %zmm1, %zmm0 +; CHECK-NEXT: retq + %vec = insertelement <2 x float> undef, float %s, i32 0 + %shuf = shufflevector <2 x float> %vec, <2 x float> undef, <16 x i32> + %res = select <16 x i1> , <16 x float> %shuf, <16 x float> %default + ret <16 x float> %res +} + +define <16 x float> @test_masked_z_float_to_16_mask0(float %s) { +; CHECK-LABEL: test_masked_z_float_to_16_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: movw $-5916, %ax # imm = 0xE8E4 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vbroadcastss %xmm0, %zmm0 {%k1} {z} +; CHECK-NEXT: retq + %vec = insertelement <2 x float> undef, float %s, i32 0 + %shuf = shufflevector <2 x float> %vec, <2 x float> undef, <16 x i32> + %res = select <16 x i1> , <16 x float> %shuf, <16 x float> zeroinitializer + ret <16 x float> %res +} +define <16 x float> @test_masked_float_to_16_mask1(float %s, <16 x float> %default) { +; CHECK-LABEL: test_masked_float_to_16_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: movw $-1130, %ax # imm = 0xFB96 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vbroadcastss %xmm0, %zmm1 {%k1} +; CHECK-NEXT: vmovaps %zmm1, %zmm0 +; CHECK-NEXT: retq + %vec = insertelement <2 x float> undef, float %s, i32 0 + %shuf = shufflevector <2 x float> %vec, <2 x float> undef, <16 x i32> + %res = select <16 x i1> , <16 x float> %shuf, <16 x float> %default + ret <16 x float> %res +} + +define <16 x float> @test_masked_z_float_to_16_mask1(float %s) { +; CHECK-LABEL: test_masked_z_float_to_16_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: movw $-1130, %ax # imm = 0xFB96 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vbroadcastss %xmm0, %zmm0 {%k1} {z} +; CHECK-NEXT: retq + %vec = insertelement <2 x float> undef, float %s, i32 0 + %shuf = shufflevector <2 x float> %vec, <2 x float> undef, <16 x i32> + %res = select <16 x i1> , <16 x float> %shuf, <16 x float> zeroinitializer + ret <16 x float> %res +} +define <16 x float> @test_masked_float_to_16_mask2(float %s, <16 x float> %default) { +; CHECK-LABEL: test_masked_float_to_16_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: movw $-12439, %ax # imm = 0xCF69 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vbroadcastss %xmm0, %zmm1 {%k1} +; CHECK-NEXT: vmovaps %zmm1, %zmm0 +; CHECK-NEXT: retq + %vec = insertelement <2 x float> undef, float %s, i32 0 + %shuf = shufflevector <2 x float> %vec, <2 x float> undef, <16 x i32> + %res = select <16 x i1> , <16 x float> %shuf, <16 x float> %default + ret <16 x float> %res +} + +define <16 x float> @test_masked_z_float_to_16_mask2(float %s) { +; CHECK-LABEL: test_masked_z_float_to_16_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: movw $-12439, %ax # imm = 0xCF69 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vbroadcastss %xmm0, %zmm0 {%k1} {z} +; CHECK-NEXT: retq + %vec = insertelement <2 x float> undef, float %s, i32 0 + %shuf = shufflevector <2 x float> %vec, <2 x float> undef, <16 x i32> + %res = select <16 x i1> , <16 x float> %shuf, <16 x float> zeroinitializer + ret <16 x float> %res +} +define <16 x float> @test_masked_float_to_16_mask3(float %s, <16 x float> %default) { +; CHECK-LABEL: test_masked_float_to_16_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: movw $-6413, %ax # imm = 0xE6F3 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vbroadcastss %xmm0, %zmm1 {%k1} +; CHECK-NEXT: vmovaps %zmm1, %zmm0 +; CHECK-NEXT: retq + %vec = insertelement <2 x float> undef, float %s, i32 0 + %shuf = shufflevector <2 x float> %vec, <2 x float> undef, <16 x i32> + %res = select <16 x i1> , <16 x float> %shuf, <16 x float> %default + ret <16 x float> %res +} + +define <16 x float> @test_masked_z_float_to_16_mask3(float %s) { +; CHECK-LABEL: test_masked_z_float_to_16_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: movw $-6413, %ax # imm = 0xE6F3 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vbroadcastss %xmm0, %zmm0 {%k1} {z} +; CHECK-NEXT: retq + %vec = insertelement <2 x float> undef, float %s, i32 0 + %shuf = shufflevector <2 x float> %vec, <2 x float> undef, <16 x i32> + %res = select <16 x i1> , <16 x float> %shuf, <16 x float> zeroinitializer + ret <16 x float> %res +} +define <4 x double> @test_double_to_4_mem(double* %p) { +; CHECK-LABEL: test_double_to_4_mem: +; CHECK: # BB#0: +; CHECK-NEXT: vbroadcastsd (%rdi), %ymm0 +; CHECK-NEXT: retq + %s = load double, double* %p + %vec = insertelement <2 x double> undef, double %s, i32 0 + %res = shufflevector <2 x double> %vec, <2 x double> undef, <4 x i32> + ret <4 x double> %res +} +define <4 x double> @test_masked_double_to_4_mem_mask0(double* %p, <4 x double> %default) { +; CHECK-LABEL: test_masked_double_to_4_mem_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: movb $5, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vbroadcastsd (%rdi), %ymm0 {%k1} +; CHECK-NEXT: retq + %s = load double, double* %p + %vec = insertelement <2 x double> undef, double %s, i32 0 + %shuf = shufflevector <2 x double> %vec, <2 x double> undef, <4 x i32> + %res = select <4 x i1> , <4 x double> %shuf, <4 x double> %default + ret <4 x double> %res +} + +define <4 x double> @test_masked_z_double_to_4_mem_mask0(double* %p) { +; CHECK-LABEL: test_masked_z_double_to_4_mem_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: movb $5, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vbroadcastsd (%rdi), %ymm0 {%k1} {z} +; CHECK-NEXT: retq + %s = load double, double* %p + %vec = insertelement <2 x double> undef, double %s, i32 0 + %shuf = shufflevector <2 x double> %vec, <2 x double> undef, <4 x i32> + %res = select <4 x i1> , <4 x double> %shuf, <4 x double> zeroinitializer + ret <4 x double> %res +} +define <4 x double> @test_masked_double_to_4_mem_mask1(double* %p, <4 x double> %default) { +; CHECK-LABEL: test_masked_double_to_4_mem_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: movb $10, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vbroadcastsd (%rdi), %ymm0 {%k1} +; CHECK-NEXT: retq + %s = load double, double* %p + %vec = insertelement <2 x double> undef, double %s, i32 0 + %shuf = shufflevector <2 x double> %vec, <2 x double> undef, <4 x i32> + %res = select <4 x i1> , <4 x double> %shuf, <4 x double> %default + ret <4 x double> %res +} + +define <4 x double> @test_masked_z_double_to_4_mem_mask1(double* %p) { +; CHECK-LABEL: test_masked_z_double_to_4_mem_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: movb $10, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vbroadcastsd (%rdi), %ymm0 {%k1} {z} +; CHECK-NEXT: retq + %s = load double, double* %p + %vec = insertelement <2 x double> undef, double %s, i32 0 + %shuf = shufflevector <2 x double> %vec, <2 x double> undef, <4 x i32> + %res = select <4 x i1> , <4 x double> %shuf, <4 x double> zeroinitializer + ret <4 x double> %res +} +define <4 x double> @test_masked_double_to_4_mem_mask2(double* %p, <4 x double> %default) { +; CHECK-LABEL: test_masked_double_to_4_mem_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: movb $11, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vbroadcastsd (%rdi), %ymm0 {%k1} +; CHECK-NEXT: retq + %s = load double, double* %p + %vec = insertelement <2 x double> undef, double %s, i32 0 + %shuf = shufflevector <2 x double> %vec, <2 x double> undef, <4 x i32> + %res = select <4 x i1> , <4 x double> %shuf, <4 x double> %default + ret <4 x double> %res +} + +define <4 x double> @test_masked_z_double_to_4_mem_mask2(double* %p) { +; CHECK-LABEL: test_masked_z_double_to_4_mem_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: movb $11, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vbroadcastsd (%rdi), %ymm0 {%k1} {z} +; CHECK-NEXT: retq + %s = load double, double* %p + %vec = insertelement <2 x double> undef, double %s, i32 0 + %shuf = shufflevector <2 x double> %vec, <2 x double> undef, <4 x i32> + %res = select <4 x i1> , <4 x double> %shuf, <4 x double> zeroinitializer + ret <4 x double> %res +} +define <4 x double> @test_masked_double_to_4_mem_mask3(double* %p, <4 x double> %default) { +; CHECK-LABEL: test_masked_double_to_4_mem_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: movb $8, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vbroadcastsd (%rdi), %ymm0 {%k1} +; CHECK-NEXT: retq + %s = load double, double* %p + %vec = insertelement <2 x double> undef, double %s, i32 0 + %shuf = shufflevector <2 x double> %vec, <2 x double> undef, <4 x i32> + %res = select <4 x i1> , <4 x double> %shuf, <4 x double> %default + ret <4 x double> %res +} + +define <4 x double> @test_masked_z_double_to_4_mem_mask3(double* %p) { +; CHECK-LABEL: test_masked_z_double_to_4_mem_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: movb $8, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vbroadcastsd (%rdi), %ymm0 {%k1} {z} +; CHECK-NEXT: retq + %s = load double, double* %p + %vec = insertelement <2 x double> undef, double %s, i32 0 + %shuf = shufflevector <2 x double> %vec, <2 x double> undef, <4 x i32> + %res = select <4 x i1> , <4 x double> %shuf, <4 x double> zeroinitializer + ret <4 x double> %res +} +define <8 x double> @test_double_to_8_mem(double* %p) { +; CHECK-LABEL: test_double_to_8_mem: +; CHECK: # BB#0: +; CHECK-NEXT: vbroadcastsd (%rdi), %zmm0 +; CHECK-NEXT: retq + %s = load double, double* %p + %vec = insertelement <2 x double> undef, double %s, i32 0 + %res = shufflevector <2 x double> %vec, <2 x double> undef, <8 x i32> + ret <8 x double> %res +} +define <8 x double> @test_masked_double_to_8_mem_mask0(double* %p, <8 x double> %default) { +; CHECK-LABEL: test_masked_double_to_8_mem_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: movb $120, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vbroadcastsd (%rdi), %zmm0 {%k1} +; CHECK-NEXT: retq + %s = load double, double* %p + %vec = insertelement <2 x double> undef, double %s, i32 0 + %shuf = shufflevector <2 x double> %vec, <2 x double> undef, <8 x i32> + %res = select <8 x i1> , <8 x double> %shuf, <8 x double> %default + ret <8 x double> %res +} + +define <8 x double> @test_masked_z_double_to_8_mem_mask0(double* %p) { +; CHECK-LABEL: test_masked_z_double_to_8_mem_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: movb $120, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vbroadcastsd (%rdi), %zmm0 {%k1} {z} +; CHECK-NEXT: retq + %s = load double, double* %p + %vec = insertelement <2 x double> undef, double %s, i32 0 + %shuf = shufflevector <2 x double> %vec, <2 x double> undef, <8 x i32> + %res = select <8 x i1> , <8 x double> %shuf, <8 x double> zeroinitializer + ret <8 x double> %res +} +define <8 x double> @test_masked_double_to_8_mem_mask1(double* %p, <8 x double> %default) { +; CHECK-LABEL: test_masked_double_to_8_mem_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: movb $26, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vbroadcastsd (%rdi), %zmm0 {%k1} +; CHECK-NEXT: retq + %s = load double, double* %p + %vec = insertelement <2 x double> undef, double %s, i32 0 + %shuf = shufflevector <2 x double> %vec, <2 x double> undef, <8 x i32> + %res = select <8 x i1> , <8 x double> %shuf, <8 x double> %default + ret <8 x double> %res +} + +define <8 x double> @test_masked_z_double_to_8_mem_mask1(double* %p) { +; CHECK-LABEL: test_masked_z_double_to_8_mem_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: movb $26, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vbroadcastsd (%rdi), %zmm0 {%k1} {z} +; CHECK-NEXT: retq + %s = load double, double* %p + %vec = insertelement <2 x double> undef, double %s, i32 0 + %shuf = shufflevector <2 x double> %vec, <2 x double> undef, <8 x i32> + %res = select <8 x i1> , <8 x double> %shuf, <8 x double> zeroinitializer + ret <8 x double> %res +} +define <8 x double> @test_masked_double_to_8_mem_mask2(double* %p, <8 x double> %default) { +; CHECK-LABEL: test_masked_double_to_8_mem_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: movb $111, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vbroadcastsd (%rdi), %zmm0 {%k1} +; CHECK-NEXT: retq + %s = load double, double* %p + %vec = insertelement <2 x double> undef, double %s, i32 0 + %shuf = shufflevector <2 x double> %vec, <2 x double> undef, <8 x i32> + %res = select <8 x i1> , <8 x double> %shuf, <8 x double> %default + ret <8 x double> %res +} + +define <8 x double> @test_masked_z_double_to_8_mem_mask2(double* %p) { +; CHECK-LABEL: test_masked_z_double_to_8_mem_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: movb $111, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vbroadcastsd (%rdi), %zmm0 {%k1} {z} +; CHECK-NEXT: retq + %s = load double, double* %p + %vec = insertelement <2 x double> undef, double %s, i32 0 + %shuf = shufflevector <2 x double> %vec, <2 x double> undef, <8 x i32> + %res = select <8 x i1> , <8 x double> %shuf, <8 x double> zeroinitializer + ret <8 x double> %res +} +define <8 x double> @test_masked_double_to_8_mem_mask3(double* %p, <8 x double> %default) { +; CHECK-LABEL: test_masked_double_to_8_mem_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: movb $-100, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vbroadcastsd (%rdi), %zmm0 {%k1} +; CHECK-NEXT: retq + %s = load double, double* %p + %vec = insertelement <2 x double> undef, double %s, i32 0 + %shuf = shufflevector <2 x double> %vec, <2 x double> undef, <8 x i32> + %res = select <8 x i1> , <8 x double> %shuf, <8 x double> %default + ret <8 x double> %res +} + +define <8 x double> @test_masked_z_double_to_8_mem_mask3(double* %p) { +; CHECK-LABEL: test_masked_z_double_to_8_mem_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: movb $-100, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vbroadcastsd (%rdi), %zmm0 {%k1} {z} +; CHECK-NEXT: retq + %s = load double, double* %p + %vec = insertelement <2 x double> undef, double %s, i32 0 + %shuf = shufflevector <2 x double> %vec, <2 x double> undef, <8 x i32> + %res = select <8 x i1> , <8 x double> %shuf, <8 x double> zeroinitializer + ret <8 x double> %res +} +define <4 x float> @test_float_to_4_mem(float* %p) { +; CHECK-LABEL: test_float_to_4_mem: +; CHECK: # BB#0: +; CHECK-NEXT: vbroadcastss (%rdi), %xmm0 +; CHECK-NEXT: retq + %s = load float, float* %p + %vec = insertelement <2 x float> undef, float %s, i32 0 + %res = shufflevector <2 x float> %vec, <2 x float> undef, <4 x i32> + ret <4 x float> %res +} +define <4 x float> @test_masked_float_to_4_mem_mask0(float* %p, <4 x float> %default) { +; CHECK-LABEL: test_masked_float_to_4_mem_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: movb $13, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vbroadcastss (%rdi), %xmm0 {%k1} +; CHECK-NEXT: retq + %s = load float, float* %p + %vec = insertelement <2 x float> undef, float %s, i32 0 + %shuf = shufflevector <2 x float> %vec, <2 x float> undef, <4 x i32> + %res = select <4 x i1> , <4 x float> %shuf, <4 x float> %default + ret <4 x float> %res +} + +define <4 x float> @test_masked_z_float_to_4_mem_mask0(float* %p) { +; CHECK-LABEL: test_masked_z_float_to_4_mem_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: movb $13, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vbroadcastss (%rdi), %xmm0 {%k1} {z} +; CHECK-NEXT: retq + %s = load float, float* %p + %vec = insertelement <2 x float> undef, float %s, i32 0 + %shuf = shufflevector <2 x float> %vec, <2 x float> undef, <4 x i32> + %res = select <4 x i1> , <4 x float> %shuf, <4 x float> zeroinitializer + ret <4 x float> %res +} +define <4 x float> @test_masked_float_to_4_mem_mask1(float* %p, <4 x float> %default) { +; CHECK-LABEL: test_masked_float_to_4_mem_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: movb $14, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vbroadcastss (%rdi), %xmm0 {%k1} +; CHECK-NEXT: retq + %s = load float, float* %p + %vec = insertelement <2 x float> undef, float %s, i32 0 + %shuf = shufflevector <2 x float> %vec, <2 x float> undef, <4 x i32> + %res = select <4 x i1> , <4 x float> %shuf, <4 x float> %default + ret <4 x float> %res +} + +define <4 x float> @test_masked_z_float_to_4_mem_mask1(float* %p) { +; CHECK-LABEL: test_masked_z_float_to_4_mem_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: movb $14, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vbroadcastss (%rdi), %xmm0 {%k1} {z} +; CHECK-NEXT: retq + %s = load float, float* %p + %vec = insertelement <2 x float> undef, float %s, i32 0 + %shuf = shufflevector <2 x float> %vec, <2 x float> undef, <4 x i32> + %res = select <4 x i1> , <4 x float> %shuf, <4 x float> zeroinitializer + ret <4 x float> %res +} +define <4 x float> @test_masked_float_to_4_mem_mask2(float* %p, <4 x float> %default) { +; CHECK-LABEL: test_masked_float_to_4_mem_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: movb $6, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vbroadcastss (%rdi), %xmm0 {%k1} +; CHECK-NEXT: retq + %s = load float, float* %p + %vec = insertelement <2 x float> undef, float %s, i32 0 + %shuf = shufflevector <2 x float> %vec, <2 x float> undef, <4 x i32> + %res = select <4 x i1> , <4 x float> %shuf, <4 x float> %default + ret <4 x float> %res +} + +define <4 x float> @test_masked_z_float_to_4_mem_mask2(float* %p) { +; CHECK-LABEL: test_masked_z_float_to_4_mem_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: movb $6, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vbroadcastss (%rdi), %xmm0 {%k1} {z} +; CHECK-NEXT: retq + %s = load float, float* %p + %vec = insertelement <2 x float> undef, float %s, i32 0 + %shuf = shufflevector <2 x float> %vec, <2 x float> undef, <4 x i32> + %res = select <4 x i1> , <4 x float> %shuf, <4 x float> zeroinitializer + ret <4 x float> %res +} +define <4 x float> @test_masked_float_to_4_mem_mask3(float* %p, <4 x float> %default) { +; CHECK-LABEL: test_masked_float_to_4_mem_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: movb $10, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vbroadcastss (%rdi), %xmm0 {%k1} +; CHECK-NEXT: retq + %s = load float, float* %p + %vec = insertelement <2 x float> undef, float %s, i32 0 + %shuf = shufflevector <2 x float> %vec, <2 x float> undef, <4 x i32> + %res = select <4 x i1> , <4 x float> %shuf, <4 x float> %default + ret <4 x float> %res +} + +define <4 x float> @test_masked_z_float_to_4_mem_mask3(float* %p) { +; CHECK-LABEL: test_masked_z_float_to_4_mem_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: movb $10, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vbroadcastss (%rdi), %xmm0 {%k1} {z} +; CHECK-NEXT: retq + %s = load float, float* %p + %vec = insertelement <2 x float> undef, float %s, i32 0 + %shuf = shufflevector <2 x float> %vec, <2 x float> undef, <4 x i32> + %res = select <4 x i1> , <4 x float> %shuf, <4 x float> zeroinitializer + ret <4 x float> %res +} +define <8 x float> @test_float_to_8_mem(float* %p) { +; CHECK-LABEL: test_float_to_8_mem: +; CHECK: # BB#0: +; CHECK-NEXT: vbroadcastss (%rdi), %ymm0 +; CHECK-NEXT: retq + %s = load float, float* %p + %vec = insertelement <2 x float> undef, float %s, i32 0 + %res = shufflevector <2 x float> %vec, <2 x float> undef, <8 x i32> + ret <8 x float> %res +} +define <8 x float> @test_masked_float_to_8_mem_mask0(float* %p, <8 x float> %default) { +; CHECK-LABEL: test_masked_float_to_8_mem_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: movb $67, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vbroadcastss (%rdi), %ymm0 {%k1} +; CHECK-NEXT: retq + %s = load float, float* %p + %vec = insertelement <2 x float> undef, float %s, i32 0 + %shuf = shufflevector <2 x float> %vec, <2 x float> undef, <8 x i32> + %res = select <8 x i1> , <8 x float> %shuf, <8 x float> %default + ret <8 x float> %res +} + +define <8 x float> @test_masked_z_float_to_8_mem_mask0(float* %p) { +; CHECK-LABEL: test_masked_z_float_to_8_mem_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: movb $67, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vbroadcastss (%rdi), %ymm0 {%k1} {z} +; CHECK-NEXT: retq + %s = load float, float* %p + %vec = insertelement <2 x float> undef, float %s, i32 0 + %shuf = shufflevector <2 x float> %vec, <2 x float> undef, <8 x i32> + %res = select <8 x i1> , <8 x float> %shuf, <8 x float> zeroinitializer + ret <8 x float> %res +} +define <8 x float> @test_masked_float_to_8_mem_mask1(float* %p, <8 x float> %default) { +; CHECK-LABEL: test_masked_float_to_8_mem_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: movb $-51, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vbroadcastss (%rdi), %ymm0 {%k1} +; CHECK-NEXT: retq + %s = load float, float* %p + %vec = insertelement <2 x float> undef, float %s, i32 0 + %shuf = shufflevector <2 x float> %vec, <2 x float> undef, <8 x i32> + %res = select <8 x i1> , <8 x float> %shuf, <8 x float> %default + ret <8 x float> %res +} + +define <8 x float> @test_masked_z_float_to_8_mem_mask1(float* %p) { +; CHECK-LABEL: test_masked_z_float_to_8_mem_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: movb $-51, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vbroadcastss (%rdi), %ymm0 {%k1} {z} +; CHECK-NEXT: retq + %s = load float, float* %p + %vec = insertelement <2 x float> undef, float %s, i32 0 + %shuf = shufflevector <2 x float> %vec, <2 x float> undef, <8 x i32> + %res = select <8 x i1> , <8 x float> %shuf, <8 x float> zeroinitializer + ret <8 x float> %res +} +define <8 x float> @test_masked_float_to_8_mem_mask2(float* %p, <8 x float> %default) { +; CHECK-LABEL: test_masked_float_to_8_mem_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: movb $-116, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vbroadcastss (%rdi), %ymm0 {%k1} +; CHECK-NEXT: retq + %s = load float, float* %p + %vec = insertelement <2 x float> undef, float %s, i32 0 + %shuf = shufflevector <2 x float> %vec, <2 x float> undef, <8 x i32> + %res = select <8 x i1> , <8 x float> %shuf, <8 x float> %default + ret <8 x float> %res +} + +define <8 x float> @test_masked_z_float_to_8_mem_mask2(float* %p) { +; CHECK-LABEL: test_masked_z_float_to_8_mem_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: movb $-116, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vbroadcastss (%rdi), %ymm0 {%k1} {z} +; CHECK-NEXT: retq + %s = load float, float* %p + %vec = insertelement <2 x float> undef, float %s, i32 0 + %shuf = shufflevector <2 x float> %vec, <2 x float> undef, <8 x i32> + %res = select <8 x i1> , <8 x float> %shuf, <8 x float> zeroinitializer + ret <8 x float> %res +} +define <8 x float> @test_masked_float_to_8_mem_mask3(float* %p, <8 x float> %default) { +; CHECK-LABEL: test_masked_float_to_8_mem_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: movb $4, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vbroadcastss (%rdi), %ymm0 {%k1} +; CHECK-NEXT: retq + %s = load float, float* %p + %vec = insertelement <2 x float> undef, float %s, i32 0 + %shuf = shufflevector <2 x float> %vec, <2 x float> undef, <8 x i32> + %res = select <8 x i1> , <8 x float> %shuf, <8 x float> %default + ret <8 x float> %res +} + +define <8 x float> @test_masked_z_float_to_8_mem_mask3(float* %p) { +; CHECK-LABEL: test_masked_z_float_to_8_mem_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: movb $4, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vbroadcastss (%rdi), %ymm0 {%k1} {z} +; CHECK-NEXT: retq + %s = load float, float* %p + %vec = insertelement <2 x float> undef, float %s, i32 0 + %shuf = shufflevector <2 x float> %vec, <2 x float> undef, <8 x i32> + %res = select <8 x i1> , <8 x float> %shuf, <8 x float> zeroinitializer + ret <8 x float> %res +} +define <16 x float> @test_float_to_16_mem(float* %p) { +; CHECK-LABEL: test_float_to_16_mem: +; CHECK: # BB#0: +; CHECK-NEXT: vbroadcastss (%rdi), %zmm0 +; CHECK-NEXT: retq + %s = load float, float* %p + %vec = insertelement <2 x float> undef, float %s, i32 0 + %res = shufflevector <2 x float> %vec, <2 x float> undef, <16 x i32> + ret <16 x float> %res +} +define <16 x float> @test_masked_float_to_16_mem_mask0(float* %p, <16 x float> %default) { +; CHECK-LABEL: test_masked_float_to_16_mem_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: movw $-18370, %ax # imm = 0xB83E +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vbroadcastss (%rdi), %zmm0 {%k1} +; CHECK-NEXT: retq + %s = load float, float* %p + %vec = insertelement <2 x float> undef, float %s, i32 0 + %shuf = shufflevector <2 x float> %vec, <2 x float> undef, <16 x i32> + %res = select <16 x i1> , <16 x float> %shuf, <16 x float> %default + ret <16 x float> %res +} + +define <16 x float> @test_masked_z_float_to_16_mem_mask0(float* %p) { +; CHECK-LABEL: test_masked_z_float_to_16_mem_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: movw $-18370, %ax # imm = 0xB83E +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vbroadcastss (%rdi), %zmm0 {%k1} {z} +; CHECK-NEXT: retq + %s = load float, float* %p + %vec = insertelement <2 x float> undef, float %s, i32 0 + %shuf = shufflevector <2 x float> %vec, <2 x float> undef, <16 x i32> + %res = select <16 x i1> , <16 x float> %shuf, <16 x float> zeroinitializer + ret <16 x float> %res +} +define <16 x float> @test_masked_float_to_16_mem_mask1(float* %p, <16 x float> %default) { +; CHECK-LABEL: test_masked_float_to_16_mem_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: movw $26137, %ax # imm = 0x6619 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vbroadcastss (%rdi), %zmm0 {%k1} +; CHECK-NEXT: retq + %s = load float, float* %p + %vec = insertelement <2 x float> undef, float %s, i32 0 + %shuf = shufflevector <2 x float> %vec, <2 x float> undef, <16 x i32> + %res = select <16 x i1> , <16 x float> %shuf, <16 x float> %default + ret <16 x float> %res +} + +define <16 x float> @test_masked_z_float_to_16_mem_mask1(float* %p) { +; CHECK-LABEL: test_masked_z_float_to_16_mem_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: movw $26137, %ax # imm = 0x6619 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vbroadcastss (%rdi), %zmm0 {%k1} {z} +; CHECK-NEXT: retq + %s = load float, float* %p + %vec = insertelement <2 x float> undef, float %s, i32 0 + %shuf = shufflevector <2 x float> %vec, <2 x float> undef, <16 x i32> + %res = select <16 x i1> , <16 x float> %shuf, <16 x float> zeroinitializer + ret <16 x float> %res +} +define <16 x float> @test_masked_float_to_16_mem_mask2(float* %p, <16 x float> %default) { +; CHECK-LABEL: test_masked_float_to_16_mem_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: movw $-11480, %ax # imm = 0xD328 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vbroadcastss (%rdi), %zmm0 {%k1} +; CHECK-NEXT: retq + %s = load float, float* %p + %vec = insertelement <2 x float> undef, float %s, i32 0 + %shuf = shufflevector <2 x float> %vec, <2 x float> undef, <16 x i32> + %res = select <16 x i1> , <16 x float> %shuf, <16 x float> %default + ret <16 x float> %res +} + +define <16 x float> @test_masked_z_float_to_16_mem_mask2(float* %p) { +; CHECK-LABEL: test_masked_z_float_to_16_mem_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: movw $-11480, %ax # imm = 0xD328 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vbroadcastss (%rdi), %zmm0 {%k1} {z} +; CHECK-NEXT: retq + %s = load float, float* %p + %vec = insertelement <2 x float> undef, float %s, i32 0 + %shuf = shufflevector <2 x float> %vec, <2 x float> undef, <16 x i32> + %res = select <16 x i1> , <16 x float> %shuf, <16 x float> zeroinitializer + ret <16 x float> %res +} +define <16 x float> @test_masked_float_to_16_mem_mask3(float* %p, <16 x float> %default) { +; CHECK-LABEL: test_masked_float_to_16_mem_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: movw $-21749, %ax # imm = 0xAB0B +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vbroadcastss (%rdi), %zmm0 {%k1} +; CHECK-NEXT: retq + %s = load float, float* %p + %vec = insertelement <2 x float> undef, float %s, i32 0 + %shuf = shufflevector <2 x float> %vec, <2 x float> undef, <16 x i32> + %res = select <16 x i1> , <16 x float> %shuf, <16 x float> %default + ret <16 x float> %res +} + +define <16 x float> @test_masked_z_float_to_16_mem_mask3(float* %p) { +; CHECK-LABEL: test_masked_z_float_to_16_mem_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: movw $-21749, %ax # imm = 0xAB0B +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vbroadcastss (%rdi), %zmm0 {%k1} {z} +; CHECK-NEXT: retq + %s = load float, float* %p + %vec = insertelement <2 x float> undef, float %s, i32 0 + %shuf = shufflevector <2 x float> %vec, <2 x float> undef, <16 x i32> + %res = select <16 x i1> , <16 x float> %shuf, <16 x float> zeroinitializer + ret <16 x float> %res +} diff --git a/llvm/test/CodeGen/X86/avx512-shuffles/broadcast-scalar-int.ll b/llvm/test/CodeGen/X86/avx512-shuffles/broadcast-scalar-int.ll new file mode 100644 index 000000000000..f0bbcba55987 --- /dev/null +++ b/llvm/test/CodeGen/X86/avx512-shuffles/broadcast-scalar-int.ll @@ -0,0 +1,2623 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mcpu=skx %s -o - | FileCheck %s + +define <16 x i8> @test_i8_to_16(i8 %s) { +; CHECK-LABEL: test_i8_to_16: +; CHECK: # BB#0: +; CHECK-NEXT: vpbroadcastb %edi, %xmm0 +; CHECK-NEXT: retq + %vec = insertelement <2 x i8> undef, i8 %s, i32 0 + %res = shufflevector <2 x i8> %vec, <2 x i8> undef, <16 x i32> + ret <16 x i8> %res +} +define <16 x i8> @test_masked_i8_to_16_mask0(i8 %s, <16 x i8> %default) { +; CHECK-LABEL: test_masked_i8_to_16_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: movw $-6675, %ax # imm = 0xE5ED +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpbroadcastb %edi, %xmm0 {%k1} +; CHECK-NEXT: retq + %vec = insertelement <2 x i8> undef, i8 %s, i32 0 + %shuf = shufflevector <2 x i8> %vec, <2 x i8> undef, <16 x i32> + %res = select <16 x i1> , <16 x i8> %shuf, <16 x i8> %default + ret <16 x i8> %res +} + +define <16 x i8> @test_masked_z_i8_to_16_mask0(i8 %s) { +; CHECK-LABEL: test_masked_z_i8_to_16_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: movw $-6675, %ax # imm = 0xE5ED +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpbroadcastb %edi, %xmm0 {%k1} {z} +; CHECK-NEXT: retq + %vec = insertelement <2 x i8> undef, i8 %s, i32 0 + %shuf = shufflevector <2 x i8> %vec, <2 x i8> undef, <16 x i32> + %res = select <16 x i1> , <16 x i8> %shuf, <16 x i8> zeroinitializer + ret <16 x i8> %res +} +define <16 x i8> @test_masked_i8_to_16_mask1(i8 %s, <16 x i8> %default) { +; CHECK-LABEL: test_masked_i8_to_16_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: movw $-5042, %ax # imm = 0xEC4E +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpbroadcastb %edi, %xmm0 {%k1} +; CHECK-NEXT: retq + %vec = insertelement <2 x i8> undef, i8 %s, i32 0 + %shuf = shufflevector <2 x i8> %vec, <2 x i8> undef, <16 x i32> + %res = select <16 x i1> , <16 x i8> %shuf, <16 x i8> %default + ret <16 x i8> %res +} + +define <16 x i8> @test_masked_z_i8_to_16_mask1(i8 %s) { +; CHECK-LABEL: test_masked_z_i8_to_16_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: movw $-5042, %ax # imm = 0xEC4E +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpbroadcastb %edi, %xmm0 {%k1} {z} +; CHECK-NEXT: retq + %vec = insertelement <2 x i8> undef, i8 %s, i32 0 + %shuf = shufflevector <2 x i8> %vec, <2 x i8> undef, <16 x i32> + %res = select <16 x i1> , <16 x i8> %shuf, <16 x i8> zeroinitializer + ret <16 x i8> %res +} +define <16 x i8> @test_masked_i8_to_16_mask2(i8 %s, <16 x i8> %default) { +; CHECK-LABEL: test_masked_i8_to_16_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: movw $-30108, %ax # imm = 0x8A64 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpbroadcastb %edi, %xmm0 {%k1} +; CHECK-NEXT: retq + %vec = insertelement <2 x i8> undef, i8 %s, i32 0 + %shuf = shufflevector <2 x i8> %vec, <2 x i8> undef, <16 x i32> + %res = select <16 x i1> , <16 x i8> %shuf, <16 x i8> %default + ret <16 x i8> %res +} + +define <16 x i8> @test_masked_z_i8_to_16_mask2(i8 %s) { +; CHECK-LABEL: test_masked_z_i8_to_16_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: movw $-30108, %ax # imm = 0x8A64 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpbroadcastb %edi, %xmm0 {%k1} {z} +; CHECK-NEXT: retq + %vec = insertelement <2 x i8> undef, i8 %s, i32 0 + %shuf = shufflevector <2 x i8> %vec, <2 x i8> undef, <16 x i32> + %res = select <16 x i1> , <16 x i8> %shuf, <16 x i8> zeroinitializer + ret <16 x i8> %res +} +define <16 x i8> @test_masked_i8_to_16_mask3(i8 %s, <16 x i8> %default) { +; CHECK-LABEL: test_masked_i8_to_16_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: movw $25644, %ax # imm = 0x642C +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpbroadcastb %edi, %xmm0 {%k1} +; CHECK-NEXT: retq + %vec = insertelement <2 x i8> undef, i8 %s, i32 0 + %shuf = shufflevector <2 x i8> %vec, <2 x i8> undef, <16 x i32> + %res = select <16 x i1> , <16 x i8> %shuf, <16 x i8> %default + ret <16 x i8> %res +} + +define <16 x i8> @test_masked_z_i8_to_16_mask3(i8 %s) { +; CHECK-LABEL: test_masked_z_i8_to_16_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: movw $25644, %ax # imm = 0x642C +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpbroadcastb %edi, %xmm0 {%k1} {z} +; CHECK-NEXT: retq + %vec = insertelement <2 x i8> undef, i8 %s, i32 0 + %shuf = shufflevector <2 x i8> %vec, <2 x i8> undef, <16 x i32> + %res = select <16 x i1> , <16 x i8> %shuf, <16 x i8> zeroinitializer + ret <16 x i8> %res +} +define <32 x i8> @test_i8_to_32(i8 %s) { +; CHECK-LABEL: test_i8_to_32: +; CHECK: # BB#0: +; CHECK-NEXT: vpbroadcastb %edi, %ymm0 +; CHECK-NEXT: retq + %vec = insertelement <2 x i8> undef, i8 %s, i32 0 + %res = shufflevector <2 x i8> %vec, <2 x i8> undef, <32 x i32> + ret <32 x i8> %res +} +define <32 x i8> @test_masked_i8_to_32_mask0(i8 %s, <32 x i8> %default) { +; CHECK-LABEL: test_masked_i8_to_32_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: movl $-1265798160, %eax # imm = 0xB48D73F0 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpbroadcastb %edi, %ymm0 {%k1} +; CHECK-NEXT: retq + %vec = insertelement <2 x i8> undef, i8 %s, i32 0 + %shuf = shufflevector <2 x i8> %vec, <2 x i8> undef, <32 x i32> + %res = select <32 x i1> , <32 x i8> %shuf, <32 x i8> %default + ret <32 x i8> %res +} + +define <32 x i8> @test_masked_z_i8_to_32_mask0(i8 %s) { +; CHECK-LABEL: test_masked_z_i8_to_32_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: movl $-1265798160, %eax # imm = 0xB48D73F0 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpbroadcastb %edi, %ymm0 {%k1} {z} +; CHECK-NEXT: retq + %vec = insertelement <2 x i8> undef, i8 %s, i32 0 + %shuf = shufflevector <2 x i8> %vec, <2 x i8> undef, <32 x i32> + %res = select <32 x i1> , <32 x i8> %shuf, <32 x i8> zeroinitializer + ret <32 x i8> %res +} +define <32 x i8> @test_masked_i8_to_32_mask1(i8 %s, <32 x i8> %default) { +; CHECK-LABEL: test_masked_i8_to_32_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: movl $1183839537, %eax # imm = 0x468FF531 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpbroadcastb %edi, %ymm0 {%k1} +; CHECK-NEXT: retq + %vec = insertelement <2 x i8> undef, i8 %s, i32 0 + %shuf = shufflevector <2 x i8> %vec, <2 x i8> undef, <32 x i32> + %res = select <32 x i1> , <32 x i8> %shuf, <32 x i8> %default + ret <32 x i8> %res +} + +define <32 x i8> @test_masked_z_i8_to_32_mask1(i8 %s) { +; CHECK-LABEL: test_masked_z_i8_to_32_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: movl $1183839537, %eax # imm = 0x468FF531 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpbroadcastb %edi, %ymm0 {%k1} {z} +; CHECK-NEXT: retq + %vec = insertelement <2 x i8> undef, i8 %s, i32 0 + %shuf = shufflevector <2 x i8> %vec, <2 x i8> undef, <32 x i32> + %res = select <32 x i1> , <32 x i8> %shuf, <32 x i8> zeroinitializer + ret <32 x i8> %res +} +define <32 x i8> @test_masked_i8_to_32_mask2(i8 %s, <32 x i8> %default) { +; CHECK-LABEL: test_masked_i8_to_32_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: movl $-809048538, %eax # imm = 0xCFC6E626 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpbroadcastb %edi, %ymm0 {%k1} +; CHECK-NEXT: retq + %vec = insertelement <2 x i8> undef, i8 %s, i32 0 + %shuf = shufflevector <2 x i8> %vec, <2 x i8> undef, <32 x i32> + %res = select <32 x i1> , <32 x i8> %shuf, <32 x i8> %default + ret <32 x i8> %res +} + +define <32 x i8> @test_masked_z_i8_to_32_mask2(i8 %s) { +; CHECK-LABEL: test_masked_z_i8_to_32_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: movl $-809048538, %eax # imm = 0xCFC6E626 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpbroadcastb %edi, %ymm0 {%k1} {z} +; CHECK-NEXT: retq + %vec = insertelement <2 x i8> undef, i8 %s, i32 0 + %shuf = shufflevector <2 x i8> %vec, <2 x i8> undef, <32 x i32> + %res = select <32 x i1> , <32 x i8> %shuf, <32 x i8> zeroinitializer + ret <32 x i8> %res +} +define <32 x i8> @test_masked_i8_to_32_mask3(i8 %s, <32 x i8> %default) { +; CHECK-LABEL: test_masked_i8_to_32_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: movl $-646452858, %eax # imm = 0xD977E986 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpbroadcastb %edi, %ymm0 {%k1} +; CHECK-NEXT: retq + %vec = insertelement <2 x i8> undef, i8 %s, i32 0 + %shuf = shufflevector <2 x i8> %vec, <2 x i8> undef, <32 x i32> + %res = select <32 x i1> , <32 x i8> %shuf, <32 x i8> %default + ret <32 x i8> %res +} + +define <32 x i8> @test_masked_z_i8_to_32_mask3(i8 %s) { +; CHECK-LABEL: test_masked_z_i8_to_32_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: movl $-646452858, %eax # imm = 0xD977E986 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpbroadcastb %edi, %ymm0 {%k1} {z} +; CHECK-NEXT: retq + %vec = insertelement <2 x i8> undef, i8 %s, i32 0 + %shuf = shufflevector <2 x i8> %vec, <2 x i8> undef, <32 x i32> + %res = select <32 x i1> , <32 x i8> %shuf, <32 x i8> zeroinitializer + ret <32 x i8> %res +} +define <64 x i8> @test_i8_to_64(i8 %s) { +; CHECK-LABEL: test_i8_to_64: +; CHECK: # BB#0: +; CHECK-NEXT: vpbroadcastb %edi, %zmm0 +; CHECK-NEXT: retq + %vec = insertelement <2 x i8> undef, i8 %s, i32 0 + %res = shufflevector <2 x i8> %vec, <2 x i8> undef, <64 x i32> + ret <64 x i8> %res +} +define <64 x i8> @test_masked_i8_to_64_mask0(i8 %s, <64 x i8> %default) { +; CHECK-LABEL: test_masked_i8_to_64_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: movabsq $4127638692029284353, %rax # imm = 0x394851856F904001 +; CHECK-NEXT: kmovq %rax, %k1 +; CHECK-NEXT: vpbroadcastb %edi, %zmm0 {%k1} +; CHECK-NEXT: retq + %vec = insertelement <2 x i8> undef, i8 %s, i32 0 + %shuf = shufflevector <2 x i8> %vec, <2 x i8> undef, <64 x i32> + %res = select <64 x i1> , <64 x i8> %shuf, <64 x i8> %default + ret <64 x i8> %res +} + +define <64 x i8> @test_masked_z_i8_to_64_mask0(i8 %s) { +; CHECK-LABEL: test_masked_z_i8_to_64_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: movabsq $4127638692029284353, %rax # imm = 0x394851856F904001 +; CHECK-NEXT: kmovq %rax, %k1 +; CHECK-NEXT: vpbroadcastb %edi, %zmm0 {%k1} {z} +; CHECK-NEXT: retq + %vec = insertelement <2 x i8> undef, i8 %s, i32 0 + %shuf = shufflevector <2 x i8> %vec, <2 x i8> undef, <64 x i32> + %res = select <64 x i1> , <64 x i8> %shuf, <64 x i8> zeroinitializer + ret <64 x i8> %res +} +define <64 x i8> @test_masked_i8_to_64_mask1(i8 %s, <64 x i8> %default) { +; CHECK-LABEL: test_masked_i8_to_64_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: movabsq $2719977871742575617, %rax # imm = 0x25BF4D769A23A401 +; CHECK-NEXT: kmovq %rax, %k1 +; CHECK-NEXT: vpbroadcastb %edi, %zmm0 {%k1} +; CHECK-NEXT: retq + %vec = insertelement <2 x i8> undef, i8 %s, i32 0 + %shuf = shufflevector <2 x i8> %vec, <2 x i8> undef, <64 x i32> + %res = select <64 x i1> , <64 x i8> %shuf, <64 x i8> %default + ret <64 x i8> %res +} + +define <64 x i8> @test_masked_z_i8_to_64_mask1(i8 %s) { +; CHECK-LABEL: test_masked_z_i8_to_64_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: movabsq $2719977871742575617, %rax # imm = 0x25BF4D769A23A401 +; CHECK-NEXT: kmovq %rax, %k1 +; CHECK-NEXT: vpbroadcastb %edi, %zmm0 {%k1} {z} +; CHECK-NEXT: retq + %vec = insertelement <2 x i8> undef, i8 %s, i32 0 + %shuf = shufflevector <2 x i8> %vec, <2 x i8> undef, <64 x i32> + %res = select <64 x i1> , <64 x i8> %shuf, <64 x i8> zeroinitializer + ret <64 x i8> %res +} +define <64 x i8> @test_masked_i8_to_64_mask2(i8 %s, <64 x i8> %default) { +; CHECK-LABEL: test_masked_i8_to_64_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: movabsq $4380017386678030849, %rax # imm = 0x3CC8F29B5AFA9201 +; CHECK-NEXT: kmovq %rax, %k1 +; CHECK-NEXT: vpbroadcastb %edi, %zmm0 {%k1} +; CHECK-NEXT: retq + %vec = insertelement <2 x i8> undef, i8 %s, i32 0 + %shuf = shufflevector <2 x i8> %vec, <2 x i8> undef, <64 x i32> + %res = select <64 x i1> , <64 x i8> %shuf, <64 x i8> %default + ret <64 x i8> %res +} + +define <64 x i8> @test_masked_z_i8_to_64_mask2(i8 %s) { +; CHECK-LABEL: test_masked_z_i8_to_64_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: movabsq $4380017386678030849, %rax # imm = 0x3CC8F29B5AFA9201 +; CHECK-NEXT: kmovq %rax, %k1 +; CHECK-NEXT: vpbroadcastb %edi, %zmm0 {%k1} {z} +; CHECK-NEXT: retq + %vec = insertelement <2 x i8> undef, i8 %s, i32 0 + %shuf = shufflevector <2 x i8> %vec, <2 x i8> undef, <64 x i32> + %res = select <64 x i1> , <64 x i8> %shuf, <64 x i8> zeroinitializer + ret <64 x i8> %res +} +define <64 x i8> @test_masked_i8_to_64_mask3(i8 %s, <64 x i8> %default) { +; CHECK-LABEL: test_masked_i8_to_64_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: movabsq $2673371376007625217, %rax # imm = 0x2519B91A33A1BA01 +; CHECK-NEXT: kmovq %rax, %k1 +; CHECK-NEXT: vpbroadcastb %edi, %zmm0 {%k1} +; CHECK-NEXT: retq + %vec = insertelement <2 x i8> undef, i8 %s, i32 0 + %shuf = shufflevector <2 x i8> %vec, <2 x i8> undef, <64 x i32> + %res = select <64 x i1> , <64 x i8> %shuf, <64 x i8> %default + ret <64 x i8> %res +} + +define <64 x i8> @test_masked_z_i8_to_64_mask3(i8 %s) { +; CHECK-LABEL: test_masked_z_i8_to_64_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: movabsq $2673371376007625217, %rax # imm = 0x2519B91A33A1BA01 +; CHECK-NEXT: kmovq %rax, %k1 +; CHECK-NEXT: vpbroadcastb %edi, %zmm0 {%k1} {z} +; CHECK-NEXT: retq + %vec = insertelement <2 x i8> undef, i8 %s, i32 0 + %shuf = shufflevector <2 x i8> %vec, <2 x i8> undef, <64 x i32> + %res = select <64 x i1> , <64 x i8> %shuf, <64 x i8> zeroinitializer + ret <64 x i8> %res +} +define <8 x i16> @test_i16_to_8(i16 %s) { +; CHECK-LABEL: test_i16_to_8: +; CHECK: # BB#0: +; CHECK-NEXT: vpbroadcastw %edi, %xmm0 +; CHECK-NEXT: retq + %vec = insertelement <2 x i16> undef, i16 %s, i32 0 + %res = shufflevector <2 x i16> %vec, <2 x i16> undef, <8 x i32> + ret <8 x i16> %res +} +define <8 x i16> @test_masked_i16_to_8_mask0(i16 %s, <8 x i16> %default) { +; CHECK-LABEL: test_masked_i16_to_8_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: movb $115, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpbroadcastw %edi, %xmm0 {%k1} +; CHECK-NEXT: retq + %vec = insertelement <2 x i16> undef, i16 %s, i32 0 + %shuf = shufflevector <2 x i16> %vec, <2 x i16> undef, <8 x i32> + %res = select <8 x i1> , <8 x i16> %shuf, <8 x i16> %default + ret <8 x i16> %res +} + +define <8 x i16> @test_masked_z_i16_to_8_mask0(i16 %s) { +; CHECK-LABEL: test_masked_z_i16_to_8_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: movb $115, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpbroadcastw %edi, %xmm0 {%k1} {z} +; CHECK-NEXT: retq + %vec = insertelement <2 x i16> undef, i16 %s, i32 0 + %shuf = shufflevector <2 x i16> %vec, <2 x i16> undef, <8 x i32> + %res = select <8 x i1> , <8 x i16> %shuf, <8 x i16> zeroinitializer + ret <8 x i16> %res +} +define <8 x i16> @test_masked_i16_to_8_mask1(i16 %s, <8 x i16> %default) { +; CHECK-LABEL: test_masked_i16_to_8_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: movb $-88, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpbroadcastw %edi, %xmm0 {%k1} +; CHECK-NEXT: retq + %vec = insertelement <2 x i16> undef, i16 %s, i32 0 + %shuf = shufflevector <2 x i16> %vec, <2 x i16> undef, <8 x i32> + %res = select <8 x i1> , <8 x i16> %shuf, <8 x i16> %default + ret <8 x i16> %res +} + +define <8 x i16> @test_masked_z_i16_to_8_mask1(i16 %s) { +; CHECK-LABEL: test_masked_z_i16_to_8_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: movb $-88, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpbroadcastw %edi, %xmm0 {%k1} {z} +; CHECK-NEXT: retq + %vec = insertelement <2 x i16> undef, i16 %s, i32 0 + %shuf = shufflevector <2 x i16> %vec, <2 x i16> undef, <8 x i32> + %res = select <8 x i1> , <8 x i16> %shuf, <8 x i16> zeroinitializer + ret <8 x i16> %res +} +define <8 x i16> @test_masked_i16_to_8_mask2(i16 %s, <8 x i16> %default) { +; CHECK-LABEL: test_masked_i16_to_8_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: movb $-2, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpbroadcastw %edi, %xmm0 {%k1} +; CHECK-NEXT: retq + %vec = insertelement <2 x i16> undef, i16 %s, i32 0 + %shuf = shufflevector <2 x i16> %vec, <2 x i16> undef, <8 x i32> + %res = select <8 x i1> , <8 x i16> %shuf, <8 x i16> %default + ret <8 x i16> %res +} + +define <8 x i16> @test_masked_z_i16_to_8_mask2(i16 %s) { +; CHECK-LABEL: test_masked_z_i16_to_8_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: movb $-2, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpbroadcastw %edi, %xmm0 {%k1} {z} +; CHECK-NEXT: retq + %vec = insertelement <2 x i16> undef, i16 %s, i32 0 + %shuf = shufflevector <2 x i16> %vec, <2 x i16> undef, <8 x i32> + %res = select <8 x i1> , <8 x i16> %shuf, <8 x i16> zeroinitializer + ret <8 x i16> %res +} +define <8 x i16> @test_masked_i16_to_8_mask3(i16 %s, <8 x i16> %default) { +; CHECK-LABEL: test_masked_i16_to_8_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: movb $-23, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpbroadcastw %edi, %xmm0 {%k1} +; CHECK-NEXT: retq + %vec = insertelement <2 x i16> undef, i16 %s, i32 0 + %shuf = shufflevector <2 x i16> %vec, <2 x i16> undef, <8 x i32> + %res = select <8 x i1> , <8 x i16> %shuf, <8 x i16> %default + ret <8 x i16> %res +} + +define <8 x i16> @test_masked_z_i16_to_8_mask3(i16 %s) { +; CHECK-LABEL: test_masked_z_i16_to_8_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: movb $-23, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpbroadcastw %edi, %xmm0 {%k1} {z} +; CHECK-NEXT: retq + %vec = insertelement <2 x i16> undef, i16 %s, i32 0 + %shuf = shufflevector <2 x i16> %vec, <2 x i16> undef, <8 x i32> + %res = select <8 x i1> , <8 x i16> %shuf, <8 x i16> zeroinitializer + ret <8 x i16> %res +} +define <16 x i16> @test_i16_to_16(i16 %s) { +; CHECK-LABEL: test_i16_to_16: +; CHECK: # BB#0: +; CHECK-NEXT: vpbroadcastw %edi, %ymm0 +; CHECK-NEXT: retq + %vec = insertelement <2 x i16> undef, i16 %s, i32 0 + %res = shufflevector <2 x i16> %vec, <2 x i16> undef, <16 x i32> + ret <16 x i16> %res +} +define <16 x i16> @test_masked_i16_to_16_mask0(i16 %s, <16 x i16> %default) { +; CHECK-LABEL: test_masked_i16_to_16_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: movw $-13546, %ax # imm = 0xCB16 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpbroadcastw %edi, %ymm0 {%k1} +; CHECK-NEXT: retq + %vec = insertelement <2 x i16> undef, i16 %s, i32 0 + %shuf = shufflevector <2 x i16> %vec, <2 x i16> undef, <16 x i32> + %res = select <16 x i1> , <16 x i16> %shuf, <16 x i16> %default + ret <16 x i16> %res +} + +define <16 x i16> @test_masked_z_i16_to_16_mask0(i16 %s) { +; CHECK-LABEL: test_masked_z_i16_to_16_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: movw $-13546, %ax # imm = 0xCB16 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpbroadcastw %edi, %ymm0 {%k1} {z} +; CHECK-NEXT: retq + %vec = insertelement <2 x i16> undef, i16 %s, i32 0 + %shuf = shufflevector <2 x i16> %vec, <2 x i16> undef, <16 x i32> + %res = select <16 x i1> , <16 x i16> %shuf, <16 x i16> zeroinitializer + ret <16 x i16> %res +} +define <16 x i16> @test_masked_i16_to_16_mask1(i16 %s, <16 x i16> %default) { +; CHECK-LABEL: test_masked_i16_to_16_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: movw $5399, %ax # imm = 0x1517 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpbroadcastw %edi, %ymm0 {%k1} +; CHECK-NEXT: retq + %vec = insertelement <2 x i16> undef, i16 %s, i32 0 + %shuf = shufflevector <2 x i16> %vec, <2 x i16> undef, <16 x i32> + %res = select <16 x i1> , <16 x i16> %shuf, <16 x i16> %default + ret <16 x i16> %res +} + +define <16 x i16> @test_masked_z_i16_to_16_mask1(i16 %s) { +; CHECK-LABEL: test_masked_z_i16_to_16_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: movw $5399, %ax # imm = 0x1517 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpbroadcastw %edi, %ymm0 {%k1} {z} +; CHECK-NEXT: retq + %vec = insertelement <2 x i16> undef, i16 %s, i32 0 + %shuf = shufflevector <2 x i16> %vec, <2 x i16> undef, <16 x i32> + %res = select <16 x i1> , <16 x i16> %shuf, <16 x i16> zeroinitializer + ret <16 x i16> %res +} +define <16 x i16> @test_masked_i16_to_16_mask2(i16 %s, <16 x i16> %default) { +; CHECK-LABEL: test_masked_i16_to_16_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: movw $-25377, %ax # imm = 0x9CDF +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpbroadcastw %edi, %ymm0 {%k1} +; CHECK-NEXT: retq + %vec = insertelement <2 x i16> undef, i16 %s, i32 0 + %shuf = shufflevector <2 x i16> %vec, <2 x i16> undef, <16 x i32> + %res = select <16 x i1> , <16 x i16> %shuf, <16 x i16> %default + ret <16 x i16> %res +} + +define <16 x i16> @test_masked_z_i16_to_16_mask2(i16 %s) { +; CHECK-LABEL: test_masked_z_i16_to_16_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: movw $-25377, %ax # imm = 0x9CDF +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpbroadcastw %edi, %ymm0 {%k1} {z} +; CHECK-NEXT: retq + %vec = insertelement <2 x i16> undef, i16 %s, i32 0 + %shuf = shufflevector <2 x i16> %vec, <2 x i16> undef, <16 x i32> + %res = select <16 x i1> , <16 x i16> %shuf, <16 x i16> zeroinitializer + ret <16 x i16> %res +} +define <16 x i16> @test_masked_i16_to_16_mask3(i16 %s, <16 x i16> %default) { +; CHECK-LABEL: test_masked_i16_to_16_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: movw $31879, %ax # imm = 0x7C87 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpbroadcastw %edi, %ymm0 {%k1} +; CHECK-NEXT: retq + %vec = insertelement <2 x i16> undef, i16 %s, i32 0 + %shuf = shufflevector <2 x i16> %vec, <2 x i16> undef, <16 x i32> + %res = select <16 x i1> , <16 x i16> %shuf, <16 x i16> %default + ret <16 x i16> %res +} + +define <16 x i16> @test_masked_z_i16_to_16_mask3(i16 %s) { +; CHECK-LABEL: test_masked_z_i16_to_16_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: movw $31879, %ax # imm = 0x7C87 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpbroadcastw %edi, %ymm0 {%k1} {z} +; CHECK-NEXT: retq + %vec = insertelement <2 x i16> undef, i16 %s, i32 0 + %shuf = shufflevector <2 x i16> %vec, <2 x i16> undef, <16 x i32> + %res = select <16 x i1> , <16 x i16> %shuf, <16 x i16> zeroinitializer + ret <16 x i16> %res +} +define <32 x i16> @test_i16_to_32(i16 %s) { +; CHECK-LABEL: test_i16_to_32: +; CHECK: # BB#0: +; CHECK-NEXT: vpbroadcastw %edi, %zmm0 +; CHECK-NEXT: retq + %vec = insertelement <2 x i16> undef, i16 %s, i32 0 + %res = shufflevector <2 x i16> %vec, <2 x i16> undef, <32 x i32> + ret <32 x i16> %res +} +define <32 x i16> @test_masked_i16_to_32_mask0(i16 %s, <32 x i16> %default) { +; CHECK-LABEL: test_masked_i16_to_32_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: movl $-1588505078, %eax # imm = 0xA151560A +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpbroadcastw %edi, %zmm0 {%k1} +; CHECK-NEXT: retq + %vec = insertelement <2 x i16> undef, i16 %s, i32 0 + %shuf = shufflevector <2 x i16> %vec, <2 x i16> undef, <32 x i32> + %res = select <32 x i1> , <32 x i16> %shuf, <32 x i16> %default + ret <32 x i16> %res +} + +define <32 x i16> @test_masked_z_i16_to_32_mask0(i16 %s) { +; CHECK-LABEL: test_masked_z_i16_to_32_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: movl $-1588505078, %eax # imm = 0xA151560A +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpbroadcastw %edi, %zmm0 {%k1} {z} +; CHECK-NEXT: retq + %vec = insertelement <2 x i16> undef, i16 %s, i32 0 + %shuf = shufflevector <2 x i16> %vec, <2 x i16> undef, <32 x i32> + %res = select <32 x i1> , <32 x i16> %shuf, <32 x i16> zeroinitializer + ret <32 x i16> %res +} +define <32 x i16> @test_masked_i16_to_32_mask1(i16 %s, <32 x i16> %default) { +; CHECK-LABEL: test_masked_i16_to_32_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: movl $-665386747, %eax # imm = 0xD8570105 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpbroadcastw %edi, %zmm0 {%k1} +; CHECK-NEXT: retq + %vec = insertelement <2 x i16> undef, i16 %s, i32 0 + %shuf = shufflevector <2 x i16> %vec, <2 x i16> undef, <32 x i32> + %res = select <32 x i1> , <32 x i16> %shuf, <32 x i16> %default + ret <32 x i16> %res +} + +define <32 x i16> @test_masked_z_i16_to_32_mask1(i16 %s) { +; CHECK-LABEL: test_masked_z_i16_to_32_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: movl $-665386747, %eax # imm = 0xD8570105 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpbroadcastw %edi, %zmm0 {%k1} {z} +; CHECK-NEXT: retq + %vec = insertelement <2 x i16> undef, i16 %s, i32 0 + %shuf = shufflevector <2 x i16> %vec, <2 x i16> undef, <32 x i32> + %res = select <32 x i1> , <32 x i16> %shuf, <32 x i16> zeroinitializer + ret <32 x i16> %res +} +define <32 x i16> @test_masked_i16_to_32_mask2(i16 %s, <32 x i16> %default) { +; CHECK-LABEL: test_masked_i16_to_32_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: movl $1043830049, %eax # imm = 0x3E379521 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpbroadcastw %edi, %zmm0 {%k1} +; CHECK-NEXT: retq + %vec = insertelement <2 x i16> undef, i16 %s, i32 0 + %shuf = shufflevector <2 x i16> %vec, <2 x i16> undef, <32 x i32> + %res = select <32 x i1> , <32 x i16> %shuf, <32 x i16> %default + ret <32 x i16> %res +} + +define <32 x i16> @test_masked_z_i16_to_32_mask2(i16 %s) { +; CHECK-LABEL: test_masked_z_i16_to_32_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: movl $1043830049, %eax # imm = 0x3E379521 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpbroadcastw %edi, %zmm0 {%k1} {z} +; CHECK-NEXT: retq + %vec = insertelement <2 x i16> undef, i16 %s, i32 0 + %shuf = shufflevector <2 x i16> %vec, <2 x i16> undef, <32 x i32> + %res = select <32 x i1> , <32 x i16> %shuf, <32 x i16> zeroinitializer + ret <32 x i16> %res +} +define <32 x i16> @test_masked_i16_to_32_mask3(i16 %s, <32 x i16> %default) { +; CHECK-LABEL: test_masked_i16_to_32_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: movl $-1153245016, %eax # imm = 0xBB42E0A8 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpbroadcastw %edi, %zmm0 {%k1} +; CHECK-NEXT: retq + %vec = insertelement <2 x i16> undef, i16 %s, i32 0 + %shuf = shufflevector <2 x i16> %vec, <2 x i16> undef, <32 x i32> + %res = select <32 x i1> , <32 x i16> %shuf, <32 x i16> %default + ret <32 x i16> %res +} + +define <32 x i16> @test_masked_z_i16_to_32_mask3(i16 %s) { +; CHECK-LABEL: test_masked_z_i16_to_32_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: movl $-1153245016, %eax # imm = 0xBB42E0A8 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpbroadcastw %edi, %zmm0 {%k1} {z} +; CHECK-NEXT: retq + %vec = insertelement <2 x i16> undef, i16 %s, i32 0 + %shuf = shufflevector <2 x i16> %vec, <2 x i16> undef, <32 x i32> + %res = select <32 x i1> , <32 x i16> %shuf, <32 x i16> zeroinitializer + ret <32 x i16> %res +} +define <4 x i32> @test_i32_to_4(i32 %s) { +; CHECK-LABEL: test_i32_to_4: +; CHECK: # BB#0: +; CHECK-NEXT: vpbroadcastd %edi, %xmm0 +; CHECK-NEXT: retq + %vec = insertelement <2 x i32> undef, i32 %s, i32 0 + %res = shufflevector <2 x i32> %vec, <2 x i32> undef, <4 x i32> + ret <4 x i32> %res +} +define <4 x i32> @test_masked_i32_to_4_mask0(i32 %s, <4 x i32> %default) { +; CHECK-LABEL: test_masked_i32_to_4_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: movb $2, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpbroadcastd %edi, %xmm0 {%k1} +; CHECK-NEXT: retq + %vec = insertelement <2 x i32> undef, i32 %s, i32 0 + %shuf = shufflevector <2 x i32> %vec, <2 x i32> undef, <4 x i32> + %res = select <4 x i1> , <4 x i32> %shuf, <4 x i32> %default + ret <4 x i32> %res +} + +define <4 x i32> @test_masked_z_i32_to_4_mask0(i32 %s) { +; CHECK-LABEL: test_masked_z_i32_to_4_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: movb $2, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpbroadcastd %edi, %xmm0 {%k1} {z} +; CHECK-NEXT: retq + %vec = insertelement <2 x i32> undef, i32 %s, i32 0 + %shuf = shufflevector <2 x i32> %vec, <2 x i32> undef, <4 x i32> + %res = select <4 x i1> , <4 x i32> %shuf, <4 x i32> zeroinitializer + ret <4 x i32> %res +} +define <4 x i32> @test_masked_i32_to_4_mask1(i32 %s, <4 x i32> %default) { +; CHECK-LABEL: test_masked_i32_to_4_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: movb $3, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpbroadcastd %edi, %xmm0 {%k1} +; CHECK-NEXT: retq + %vec = insertelement <2 x i32> undef, i32 %s, i32 0 + %shuf = shufflevector <2 x i32> %vec, <2 x i32> undef, <4 x i32> + %res = select <4 x i1> , <4 x i32> %shuf, <4 x i32> %default + ret <4 x i32> %res +} + +define <4 x i32> @test_masked_z_i32_to_4_mask1(i32 %s) { +; CHECK-LABEL: test_masked_z_i32_to_4_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: movb $3, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpbroadcastd %edi, %xmm0 {%k1} {z} +; CHECK-NEXT: retq + %vec = insertelement <2 x i32> undef, i32 %s, i32 0 + %shuf = shufflevector <2 x i32> %vec, <2 x i32> undef, <4 x i32> + %res = select <4 x i1> , <4 x i32> %shuf, <4 x i32> zeroinitializer + ret <4 x i32> %res +} +define <4 x i32> @test_masked_i32_to_4_mask2(i32 %s, <4 x i32> %default) { +; CHECK-LABEL: test_masked_i32_to_4_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: movb $10, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpbroadcastd %edi, %xmm0 {%k1} +; CHECK-NEXT: retq + %vec = insertelement <2 x i32> undef, i32 %s, i32 0 + %shuf = shufflevector <2 x i32> %vec, <2 x i32> undef, <4 x i32> + %res = select <4 x i1> , <4 x i32> %shuf, <4 x i32> %default + ret <4 x i32> %res +} + +define <4 x i32> @test_masked_z_i32_to_4_mask2(i32 %s) { +; CHECK-LABEL: test_masked_z_i32_to_4_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: movb $10, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpbroadcastd %edi, %xmm0 {%k1} {z} +; CHECK-NEXT: retq + %vec = insertelement <2 x i32> undef, i32 %s, i32 0 + %shuf = shufflevector <2 x i32> %vec, <2 x i32> undef, <4 x i32> + %res = select <4 x i1> , <4 x i32> %shuf, <4 x i32> zeroinitializer + ret <4 x i32> %res +} +define <4 x i32> @test_masked_i32_to_4_mask3(i32 %s, <4 x i32> %default) { +; CHECK-LABEL: test_masked_i32_to_4_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: movb $4, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpbroadcastd %edi, %xmm0 {%k1} +; CHECK-NEXT: retq + %vec = insertelement <2 x i32> undef, i32 %s, i32 0 + %shuf = shufflevector <2 x i32> %vec, <2 x i32> undef, <4 x i32> + %res = select <4 x i1> , <4 x i32> %shuf, <4 x i32> %default + ret <4 x i32> %res +} + +define <4 x i32> @test_masked_z_i32_to_4_mask3(i32 %s) { +; CHECK-LABEL: test_masked_z_i32_to_4_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: movb $4, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpbroadcastd %edi, %xmm0 {%k1} {z} +; CHECK-NEXT: retq + %vec = insertelement <2 x i32> undef, i32 %s, i32 0 + %shuf = shufflevector <2 x i32> %vec, <2 x i32> undef, <4 x i32> + %res = select <4 x i1> , <4 x i32> %shuf, <4 x i32> zeroinitializer + ret <4 x i32> %res +} +define <8 x i32> @test_i32_to_8(i32 %s) { +; CHECK-LABEL: test_i32_to_8: +; CHECK: # BB#0: +; CHECK-NEXT: vpbroadcastd %edi, %ymm0 +; CHECK-NEXT: retq + %vec = insertelement <2 x i32> undef, i32 %s, i32 0 + %res = shufflevector <2 x i32> %vec, <2 x i32> undef, <8 x i32> + ret <8 x i32> %res +} +define <8 x i32> @test_masked_i32_to_8_mask0(i32 %s, <8 x i32> %default) { +; CHECK-LABEL: test_masked_i32_to_8_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: movb $-48, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpbroadcastd %edi, %ymm0 {%k1} +; CHECK-NEXT: retq + %vec = insertelement <2 x i32> undef, i32 %s, i32 0 + %shuf = shufflevector <2 x i32> %vec, <2 x i32> undef, <8 x i32> + %res = select <8 x i1> , <8 x i32> %shuf, <8 x i32> %default + ret <8 x i32> %res +} + +define <8 x i32> @test_masked_z_i32_to_8_mask0(i32 %s) { +; CHECK-LABEL: test_masked_z_i32_to_8_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: movb $-48, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpbroadcastd %edi, %ymm0 {%k1} {z} +; CHECK-NEXT: retq + %vec = insertelement <2 x i32> undef, i32 %s, i32 0 + %shuf = shufflevector <2 x i32> %vec, <2 x i32> undef, <8 x i32> + %res = select <8 x i1> , <8 x i32> %shuf, <8 x i32> zeroinitializer + ret <8 x i32> %res +} +define <8 x i32> @test_masked_i32_to_8_mask1(i32 %s, <8 x i32> %default) { +; CHECK-LABEL: test_masked_i32_to_8_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: movb $26, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpbroadcastd %edi, %ymm0 {%k1} +; CHECK-NEXT: retq + %vec = insertelement <2 x i32> undef, i32 %s, i32 0 + %shuf = shufflevector <2 x i32> %vec, <2 x i32> undef, <8 x i32> + %res = select <8 x i1> , <8 x i32> %shuf, <8 x i32> %default + ret <8 x i32> %res +} + +define <8 x i32> @test_masked_z_i32_to_8_mask1(i32 %s) { +; CHECK-LABEL: test_masked_z_i32_to_8_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: movb $26, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpbroadcastd %edi, %ymm0 {%k1} {z} +; CHECK-NEXT: retq + %vec = insertelement <2 x i32> undef, i32 %s, i32 0 + %shuf = shufflevector <2 x i32> %vec, <2 x i32> undef, <8 x i32> + %res = select <8 x i1> , <8 x i32> %shuf, <8 x i32> zeroinitializer + ret <8 x i32> %res +} +define <8 x i32> @test_masked_i32_to_8_mask2(i32 %s, <8 x i32> %default) { +; CHECK-LABEL: test_masked_i32_to_8_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: movb $38, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpbroadcastd %edi, %ymm0 {%k1} +; CHECK-NEXT: retq + %vec = insertelement <2 x i32> undef, i32 %s, i32 0 + %shuf = shufflevector <2 x i32> %vec, <2 x i32> undef, <8 x i32> + %res = select <8 x i1> , <8 x i32> %shuf, <8 x i32> %default + ret <8 x i32> %res +} + +define <8 x i32> @test_masked_z_i32_to_8_mask2(i32 %s) { +; CHECK-LABEL: test_masked_z_i32_to_8_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: movb $38, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpbroadcastd %edi, %ymm0 {%k1} {z} +; CHECK-NEXT: retq + %vec = insertelement <2 x i32> undef, i32 %s, i32 0 + %shuf = shufflevector <2 x i32> %vec, <2 x i32> undef, <8 x i32> + %res = select <8 x i1> , <8 x i32> %shuf, <8 x i32> zeroinitializer + ret <8 x i32> %res +} +define <8 x i32> @test_masked_i32_to_8_mask3(i32 %s, <8 x i32> %default) { +; CHECK-LABEL: test_masked_i32_to_8_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: movb $-78, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpbroadcastd %edi, %ymm0 {%k1} +; CHECK-NEXT: retq + %vec = insertelement <2 x i32> undef, i32 %s, i32 0 + %shuf = shufflevector <2 x i32> %vec, <2 x i32> undef, <8 x i32> + %res = select <8 x i1> , <8 x i32> %shuf, <8 x i32> %default + ret <8 x i32> %res +} + +define <8 x i32> @test_masked_z_i32_to_8_mask3(i32 %s) { +; CHECK-LABEL: test_masked_z_i32_to_8_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: movb $-78, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpbroadcastd %edi, %ymm0 {%k1} {z} +; CHECK-NEXT: retq + %vec = insertelement <2 x i32> undef, i32 %s, i32 0 + %shuf = shufflevector <2 x i32> %vec, <2 x i32> undef, <8 x i32> + %res = select <8 x i1> , <8 x i32> %shuf, <8 x i32> zeroinitializer + ret <8 x i32> %res +} +define <16 x i32> @test_i32_to_16(i32 %s) { +; CHECK-LABEL: test_i32_to_16: +; CHECK: # BB#0: +; CHECK-NEXT: vpbroadcastd %edi, %zmm0 +; CHECK-NEXT: retq + %vec = insertelement <2 x i32> undef, i32 %s, i32 0 + %res = shufflevector <2 x i32> %vec, <2 x i32> undef, <16 x i32> + ret <16 x i32> %res +} +define <16 x i32> @test_masked_i32_to_16_mask0(i32 %s, <16 x i32> %default) { +; CHECK-LABEL: test_masked_i32_to_16_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: movw $2965, %ax # imm = 0xB95 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpbroadcastd %edi, %zmm0 {%k1} +; CHECK-NEXT: retq + %vec = insertelement <2 x i32> undef, i32 %s, i32 0 + %shuf = shufflevector <2 x i32> %vec, <2 x i32> undef, <16 x i32> + %res = select <16 x i1> , <16 x i32> %shuf, <16 x i32> %default + ret <16 x i32> %res +} + +define <16 x i32> @test_masked_z_i32_to_16_mask0(i32 %s) { +; CHECK-LABEL: test_masked_z_i32_to_16_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: movw $2965, %ax # imm = 0xB95 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpbroadcastd %edi, %zmm0 {%k1} {z} +; CHECK-NEXT: retq + %vec = insertelement <2 x i32> undef, i32 %s, i32 0 + %shuf = shufflevector <2 x i32> %vec, <2 x i32> undef, <16 x i32> + %res = select <16 x i1> , <16 x i32> %shuf, <16 x i32> zeroinitializer + ret <16 x i32> %res +} +define <16 x i32> @test_masked_i32_to_16_mask1(i32 %s, <16 x i32> %default) { +; CHECK-LABEL: test_masked_i32_to_16_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: movw $-27928, %ax # imm = 0x92E8 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpbroadcastd %edi, %zmm0 {%k1} +; CHECK-NEXT: retq + %vec = insertelement <2 x i32> undef, i32 %s, i32 0 + %shuf = shufflevector <2 x i32> %vec, <2 x i32> undef, <16 x i32> + %res = select <16 x i1> , <16 x i32> %shuf, <16 x i32> %default + ret <16 x i32> %res +} + +define <16 x i32> @test_masked_z_i32_to_16_mask1(i32 %s) { +; CHECK-LABEL: test_masked_z_i32_to_16_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: movw $-27928, %ax # imm = 0x92E8 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpbroadcastd %edi, %zmm0 {%k1} {z} +; CHECK-NEXT: retq + %vec = insertelement <2 x i32> undef, i32 %s, i32 0 + %shuf = shufflevector <2 x i32> %vec, <2 x i32> undef, <16 x i32> + %res = select <16 x i1> , <16 x i32> %shuf, <16 x i32> zeroinitializer + ret <16 x i32> %res +} +define <16 x i32> @test_masked_i32_to_16_mask2(i32 %s, <16 x i32> %default) { +; CHECK-LABEL: test_masked_i32_to_16_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: movw $-5899, %ax # imm = 0xE8F5 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpbroadcastd %edi, %zmm0 {%k1} +; CHECK-NEXT: retq + %vec = insertelement <2 x i32> undef, i32 %s, i32 0 + %shuf = shufflevector <2 x i32> %vec, <2 x i32> undef, <16 x i32> + %res = select <16 x i1> , <16 x i32> %shuf, <16 x i32> %default + ret <16 x i32> %res +} + +define <16 x i32> @test_masked_z_i32_to_16_mask2(i32 %s) { +; CHECK-LABEL: test_masked_z_i32_to_16_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: movw $-5899, %ax # imm = 0xE8F5 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpbroadcastd %edi, %zmm0 {%k1} {z} +; CHECK-NEXT: retq + %vec = insertelement <2 x i32> undef, i32 %s, i32 0 + %shuf = shufflevector <2 x i32> %vec, <2 x i32> undef, <16 x i32> + %res = select <16 x i1> , <16 x i32> %shuf, <16 x i32> zeroinitializer + ret <16 x i32> %res +} +define <16 x i32> @test_masked_i32_to_16_mask3(i32 %s, <16 x i32> %default) { +; CHECK-LABEL: test_masked_i32_to_16_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: movw $-30527, %ax # imm = 0x88C1 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpbroadcastd %edi, %zmm0 {%k1} +; CHECK-NEXT: retq + %vec = insertelement <2 x i32> undef, i32 %s, i32 0 + %shuf = shufflevector <2 x i32> %vec, <2 x i32> undef, <16 x i32> + %res = select <16 x i1> , <16 x i32> %shuf, <16 x i32> %default + ret <16 x i32> %res +} + +define <16 x i32> @test_masked_z_i32_to_16_mask3(i32 %s) { +; CHECK-LABEL: test_masked_z_i32_to_16_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: movw $-30527, %ax # imm = 0x88C1 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpbroadcastd %edi, %zmm0 {%k1} {z} +; CHECK-NEXT: retq + %vec = insertelement <2 x i32> undef, i32 %s, i32 0 + %shuf = shufflevector <2 x i32> %vec, <2 x i32> undef, <16 x i32> + %res = select <16 x i1> , <16 x i32> %shuf, <16 x i32> zeroinitializer + ret <16 x i32> %res +} +define <2 x i64> @test_i64_to_2(i64 %s) { +; CHECK-LABEL: test_i64_to_2: +; CHECK: # BB#0: +; CHECK-NEXT: vpbroadcastq %rdi, %xmm0 +; CHECK-NEXT: retq + %vec = insertelement <2 x i64> undef, i64 %s, i32 0 + %res = shufflevector <2 x i64> %vec, <2 x i64> undef, <2 x i32> + ret <2 x i64> %res +} +define <2 x i64> @test_masked_i64_to_2_mask0(i64 %s, <2 x i64> %default) { +; CHECK-LABEL: test_masked_i64_to_2_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: movb $2, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpbroadcastq %rdi, %xmm0 {%k1} +; CHECK-NEXT: retq + %vec = insertelement <2 x i64> undef, i64 %s, i32 0 + %shuf = shufflevector <2 x i64> %vec, <2 x i64> undef, <2 x i32> + %res = select <2 x i1> , <2 x i64> %shuf, <2 x i64> %default + ret <2 x i64> %res +} + +define <2 x i64> @test_masked_z_i64_to_2_mask0(i64 %s) { +; CHECK-LABEL: test_masked_z_i64_to_2_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: movb $2, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpbroadcastq %rdi, %xmm0 {%k1} {z} +; CHECK-NEXT: retq + %vec = insertelement <2 x i64> undef, i64 %s, i32 0 + %shuf = shufflevector <2 x i64> %vec, <2 x i64> undef, <2 x i32> + %res = select <2 x i1> , <2 x i64> %shuf, <2 x i64> zeroinitializer + ret <2 x i64> %res +} +define <2 x i64> @test_masked_i64_to_2_mask1(i64 %s, <2 x i64> %default) { +; CHECK-LABEL: test_masked_i64_to_2_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: movb $1, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpbroadcastq %rdi, %xmm0 {%k1} +; CHECK-NEXT: retq + %vec = insertelement <2 x i64> undef, i64 %s, i32 0 + %shuf = shufflevector <2 x i64> %vec, <2 x i64> undef, <2 x i32> + %res = select <2 x i1> , <2 x i64> %shuf, <2 x i64> %default + ret <2 x i64> %res +} + +define <2 x i64> @test_masked_z_i64_to_2_mask1(i64 %s) { +; CHECK-LABEL: test_masked_z_i64_to_2_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: movb $1, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpbroadcastq %rdi, %xmm0 {%k1} {z} +; CHECK-NEXT: retq + %vec = insertelement <2 x i64> undef, i64 %s, i32 0 + %shuf = shufflevector <2 x i64> %vec, <2 x i64> undef, <2 x i32> + %res = select <2 x i1> , <2 x i64> %shuf, <2 x i64> zeroinitializer + ret <2 x i64> %res +} +define <4 x i64> @test_i64_to_4(i64 %s) { +; CHECK-LABEL: test_i64_to_4: +; CHECK: # BB#0: +; CHECK-NEXT: vpbroadcastq %rdi, %ymm0 +; CHECK-NEXT: retq + %vec = insertelement <2 x i64> undef, i64 %s, i32 0 + %res = shufflevector <2 x i64> %vec, <2 x i64> undef, <4 x i32> + ret <4 x i64> %res +} +define <4 x i64> @test_masked_i64_to_4_mask0(i64 %s, <4 x i64> %default) { +; CHECK-LABEL: test_masked_i64_to_4_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: movb $9, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpbroadcastq %rdi, %ymm0 {%k1} +; CHECK-NEXT: retq + %vec = insertelement <2 x i64> undef, i64 %s, i32 0 + %shuf = shufflevector <2 x i64> %vec, <2 x i64> undef, <4 x i32> + %res = select <4 x i1> , <4 x i64> %shuf, <4 x i64> %default + ret <4 x i64> %res +} + +define <4 x i64> @test_masked_z_i64_to_4_mask0(i64 %s) { +; CHECK-LABEL: test_masked_z_i64_to_4_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: movb $9, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpbroadcastq %rdi, %ymm0 {%k1} {z} +; CHECK-NEXT: retq + %vec = insertelement <2 x i64> undef, i64 %s, i32 0 + %shuf = shufflevector <2 x i64> %vec, <2 x i64> undef, <4 x i32> + %res = select <4 x i1> , <4 x i64> %shuf, <4 x i64> zeroinitializer + ret <4 x i64> %res +} +define <4 x i64> @test_masked_i64_to_4_mask1(i64 %s, <4 x i64> %default) { +; CHECK-LABEL: test_masked_i64_to_4_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: movb $8, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpbroadcastq %rdi, %ymm0 {%k1} +; CHECK-NEXT: retq + %vec = insertelement <2 x i64> undef, i64 %s, i32 0 + %shuf = shufflevector <2 x i64> %vec, <2 x i64> undef, <4 x i32> + %res = select <4 x i1> , <4 x i64> %shuf, <4 x i64> %default + ret <4 x i64> %res +} + +define <4 x i64> @test_masked_z_i64_to_4_mask1(i64 %s) { +; CHECK-LABEL: test_masked_z_i64_to_4_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: movb $8, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpbroadcastq %rdi, %ymm0 {%k1} {z} +; CHECK-NEXT: retq + %vec = insertelement <2 x i64> undef, i64 %s, i32 0 + %shuf = shufflevector <2 x i64> %vec, <2 x i64> undef, <4 x i32> + %res = select <4 x i1> , <4 x i64> %shuf, <4 x i64> zeroinitializer + ret <4 x i64> %res +} +define <4 x i64> @test_masked_i64_to_4_mask2(i64 %s, <4 x i64> %default) { +; CHECK-LABEL: test_masked_i64_to_4_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: movb $7, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpbroadcastq %rdi, %ymm0 {%k1} +; CHECK-NEXT: retq + %vec = insertelement <2 x i64> undef, i64 %s, i32 0 + %shuf = shufflevector <2 x i64> %vec, <2 x i64> undef, <4 x i32> + %res = select <4 x i1> , <4 x i64> %shuf, <4 x i64> %default + ret <4 x i64> %res +} + +define <4 x i64> @test_masked_z_i64_to_4_mask2(i64 %s) { +; CHECK-LABEL: test_masked_z_i64_to_4_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: movb $7, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpbroadcastq %rdi, %ymm0 {%k1} {z} +; CHECK-NEXT: retq + %vec = insertelement <2 x i64> undef, i64 %s, i32 0 + %shuf = shufflevector <2 x i64> %vec, <2 x i64> undef, <4 x i32> + %res = select <4 x i1> , <4 x i64> %shuf, <4 x i64> zeroinitializer + ret <4 x i64> %res +} +define <4 x i64> @test_masked_i64_to_4_mask3(i64 %s, <4 x i64> %default) { +; CHECK-LABEL: test_masked_i64_to_4_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: movb $5, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpbroadcastq %rdi, %ymm0 {%k1} +; CHECK-NEXT: retq + %vec = insertelement <2 x i64> undef, i64 %s, i32 0 + %shuf = shufflevector <2 x i64> %vec, <2 x i64> undef, <4 x i32> + %res = select <4 x i1> , <4 x i64> %shuf, <4 x i64> %default + ret <4 x i64> %res +} + +define <4 x i64> @test_masked_z_i64_to_4_mask3(i64 %s) { +; CHECK-LABEL: test_masked_z_i64_to_4_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: movb $5, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpbroadcastq %rdi, %ymm0 {%k1} {z} +; CHECK-NEXT: retq + %vec = insertelement <2 x i64> undef, i64 %s, i32 0 + %shuf = shufflevector <2 x i64> %vec, <2 x i64> undef, <4 x i32> + %res = select <4 x i1> , <4 x i64> %shuf, <4 x i64> zeroinitializer + ret <4 x i64> %res +} +define <8 x i64> @test_i64_to_8(i64 %s) { +; CHECK-LABEL: test_i64_to_8: +; CHECK: # BB#0: +; CHECK-NEXT: vpbroadcastq %rdi, %zmm0 +; CHECK-NEXT: retq + %vec = insertelement <2 x i64> undef, i64 %s, i32 0 + %res = shufflevector <2 x i64> %vec, <2 x i64> undef, <8 x i32> + ret <8 x i64> %res +} +define <8 x i64> @test_masked_i64_to_8_mask0(i64 %s, <8 x i64> %default) { +; CHECK-LABEL: test_masked_i64_to_8_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: movb $95, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpbroadcastq %rdi, %zmm0 {%k1} +; CHECK-NEXT: retq + %vec = insertelement <2 x i64> undef, i64 %s, i32 0 + %shuf = shufflevector <2 x i64> %vec, <2 x i64> undef, <8 x i32> + %res = select <8 x i1> , <8 x i64> %shuf, <8 x i64> %default + ret <8 x i64> %res +} + +define <8 x i64> @test_masked_z_i64_to_8_mask0(i64 %s) { +; CHECK-LABEL: test_masked_z_i64_to_8_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: movb $95, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpbroadcastq %rdi, %zmm0 {%k1} {z} +; CHECK-NEXT: retq + %vec = insertelement <2 x i64> undef, i64 %s, i32 0 + %shuf = shufflevector <2 x i64> %vec, <2 x i64> undef, <8 x i32> + %res = select <8 x i1> , <8 x i64> %shuf, <8 x i64> zeroinitializer + ret <8 x i64> %res +} +define <8 x i64> @test_masked_i64_to_8_mask1(i64 %s, <8 x i64> %default) { +; CHECK-LABEL: test_masked_i64_to_8_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: movb $-6, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpbroadcastq %rdi, %zmm0 {%k1} +; CHECK-NEXT: retq + %vec = insertelement <2 x i64> undef, i64 %s, i32 0 + %shuf = shufflevector <2 x i64> %vec, <2 x i64> undef, <8 x i32> + %res = select <8 x i1> , <8 x i64> %shuf, <8 x i64> %default + ret <8 x i64> %res +} + +define <8 x i64> @test_masked_z_i64_to_8_mask1(i64 %s) { +; CHECK-LABEL: test_masked_z_i64_to_8_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: movb $-6, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpbroadcastq %rdi, %zmm0 {%k1} {z} +; CHECK-NEXT: retq + %vec = insertelement <2 x i64> undef, i64 %s, i32 0 + %shuf = shufflevector <2 x i64> %vec, <2 x i64> undef, <8 x i32> + %res = select <8 x i1> , <8 x i64> %shuf, <8 x i64> zeroinitializer + ret <8 x i64> %res +} +define <8 x i64> @test_masked_i64_to_8_mask2(i64 %s, <8 x i64> %default) { +; CHECK-LABEL: test_masked_i64_to_8_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: movb $10, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpbroadcastq %rdi, %zmm0 {%k1} +; CHECK-NEXT: retq + %vec = insertelement <2 x i64> undef, i64 %s, i32 0 + %shuf = shufflevector <2 x i64> %vec, <2 x i64> undef, <8 x i32> + %res = select <8 x i1> , <8 x i64> %shuf, <8 x i64> %default + ret <8 x i64> %res +} + +define <8 x i64> @test_masked_z_i64_to_8_mask2(i64 %s) { +; CHECK-LABEL: test_masked_z_i64_to_8_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: movb $10, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpbroadcastq %rdi, %zmm0 {%k1} {z} +; CHECK-NEXT: retq + %vec = insertelement <2 x i64> undef, i64 %s, i32 0 + %shuf = shufflevector <2 x i64> %vec, <2 x i64> undef, <8 x i32> + %res = select <8 x i1> , <8 x i64> %shuf, <8 x i64> zeroinitializer + ret <8 x i64> %res +} +define <8 x i64> @test_masked_i64_to_8_mask3(i64 %s, <8 x i64> %default) { +; CHECK-LABEL: test_masked_i64_to_8_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: movb $6, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpbroadcastq %rdi, %zmm0 {%k1} +; CHECK-NEXT: retq + %vec = insertelement <2 x i64> undef, i64 %s, i32 0 + %shuf = shufflevector <2 x i64> %vec, <2 x i64> undef, <8 x i32> + %res = select <8 x i1> , <8 x i64> %shuf, <8 x i64> %default + ret <8 x i64> %res +} + +define <8 x i64> @test_masked_z_i64_to_8_mask3(i64 %s) { +; CHECK-LABEL: test_masked_z_i64_to_8_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: movb $6, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpbroadcastq %rdi, %zmm0 {%k1} {z} +; CHECK-NEXT: retq + %vec = insertelement <2 x i64> undef, i64 %s, i32 0 + %shuf = shufflevector <2 x i64> %vec, <2 x i64> undef, <8 x i32> + %res = select <8 x i1> , <8 x i64> %shuf, <8 x i64> zeroinitializer + ret <8 x i64> %res +} +define <16 x i8> @test_i8_to_16_mem(i8* %p) { +; CHECK-LABEL: test_i8_to_16_mem: +; CHECK: # BB#0: +; CHECK-NEXT: vpbroadcastb (%rdi), %xmm0 +; CHECK-NEXT: retq + %s = load i8, i8* %p + %vec = insertelement <2 x i8> undef, i8 %s, i32 0 + %res = shufflevector <2 x i8> %vec, <2 x i8> undef, <16 x i32> + ret <16 x i8> %res +} +define <16 x i8> @test_masked_i8_to_16_mem_mask0(i8* %p, <16 x i8> %default) { +; CHECK-LABEL: test_masked_i8_to_16_mem_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: movw $-2555, %ax # imm = 0xF605 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpbroadcastb (%rdi), %xmm0 {%k1} +; CHECK-NEXT: retq + %s = load i8, i8* %p + %vec = insertelement <2 x i8> undef, i8 %s, i32 0 + %shuf = shufflevector <2 x i8> %vec, <2 x i8> undef, <16 x i32> + %res = select <16 x i1> , <16 x i8> %shuf, <16 x i8> %default + ret <16 x i8> %res +} + +define <16 x i8> @test_masked_z_i8_to_16_mem_mask0(i8* %p) { +; CHECK-LABEL: test_masked_z_i8_to_16_mem_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: movw $-2555, %ax # imm = 0xF605 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpbroadcastb (%rdi), %xmm0 {%k1} {z} +; CHECK-NEXT: retq + %s = load i8, i8* %p + %vec = insertelement <2 x i8> undef, i8 %s, i32 0 + %shuf = shufflevector <2 x i8> %vec, <2 x i8> undef, <16 x i32> + %res = select <16 x i1> , <16 x i8> %shuf, <16 x i8> zeroinitializer + ret <16 x i8> %res +} +define <16 x i8> @test_masked_i8_to_16_mem_mask1(i8* %p, <16 x i8> %default) { +; CHECK-LABEL: test_masked_i8_to_16_mem_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: movw $12122, %ax # imm = 0x2F5A +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpbroadcastb (%rdi), %xmm0 {%k1} +; CHECK-NEXT: retq + %s = load i8, i8* %p + %vec = insertelement <2 x i8> undef, i8 %s, i32 0 + %shuf = shufflevector <2 x i8> %vec, <2 x i8> undef, <16 x i32> + %res = select <16 x i1> , <16 x i8> %shuf, <16 x i8> %default + ret <16 x i8> %res +} + +define <16 x i8> @test_masked_z_i8_to_16_mem_mask1(i8* %p) { +; CHECK-LABEL: test_masked_z_i8_to_16_mem_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: movw $12122, %ax # imm = 0x2F5A +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpbroadcastb (%rdi), %xmm0 {%k1} {z} +; CHECK-NEXT: retq + %s = load i8, i8* %p + %vec = insertelement <2 x i8> undef, i8 %s, i32 0 + %shuf = shufflevector <2 x i8> %vec, <2 x i8> undef, <16 x i32> + %res = select <16 x i1> , <16 x i8> %shuf, <16 x i8> zeroinitializer + ret <16 x i8> %res +} +define <16 x i8> @test_masked_i8_to_16_mem_mask2(i8* %p, <16 x i8> %default) { +; CHECK-LABEL: test_masked_i8_to_16_mem_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: movw $8120, %ax # imm = 0x1FB8 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpbroadcastb (%rdi), %xmm0 {%k1} +; CHECK-NEXT: retq + %s = load i8, i8* %p + %vec = insertelement <2 x i8> undef, i8 %s, i32 0 + %shuf = shufflevector <2 x i8> %vec, <2 x i8> undef, <16 x i32> + %res = select <16 x i1> , <16 x i8> %shuf, <16 x i8> %default + ret <16 x i8> %res +} + +define <16 x i8> @test_masked_z_i8_to_16_mem_mask2(i8* %p) { +; CHECK-LABEL: test_masked_z_i8_to_16_mem_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: movw $8120, %ax # imm = 0x1FB8 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpbroadcastb (%rdi), %xmm0 {%k1} {z} +; CHECK-NEXT: retq + %s = load i8, i8* %p + %vec = insertelement <2 x i8> undef, i8 %s, i32 0 + %shuf = shufflevector <2 x i8> %vec, <2 x i8> undef, <16 x i32> + %res = select <16 x i1> , <16 x i8> %shuf, <16 x i8> zeroinitializer + ret <16 x i8> %res +} +define <16 x i8> @test_masked_i8_to_16_mem_mask3(i8* %p, <16 x i8> %default) { +; CHECK-LABEL: test_masked_i8_to_16_mem_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: movw $13800, %ax # imm = 0x35E8 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpbroadcastb (%rdi), %xmm0 {%k1} +; CHECK-NEXT: retq + %s = load i8, i8* %p + %vec = insertelement <2 x i8> undef, i8 %s, i32 0 + %shuf = shufflevector <2 x i8> %vec, <2 x i8> undef, <16 x i32> + %res = select <16 x i1> , <16 x i8> %shuf, <16 x i8> %default + ret <16 x i8> %res +} + +define <16 x i8> @test_masked_z_i8_to_16_mem_mask3(i8* %p) { +; CHECK-LABEL: test_masked_z_i8_to_16_mem_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: movw $13800, %ax # imm = 0x35E8 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpbroadcastb (%rdi), %xmm0 {%k1} {z} +; CHECK-NEXT: retq + %s = load i8, i8* %p + %vec = insertelement <2 x i8> undef, i8 %s, i32 0 + %shuf = shufflevector <2 x i8> %vec, <2 x i8> undef, <16 x i32> + %res = select <16 x i1> , <16 x i8> %shuf, <16 x i8> zeroinitializer + ret <16 x i8> %res +} +define <32 x i8> @test_i8_to_32_mem(i8* %p) { +; CHECK-LABEL: test_i8_to_32_mem: +; CHECK: # BB#0: +; CHECK-NEXT: vpbroadcastb (%rdi), %ymm0 +; CHECK-NEXT: retq + %s = load i8, i8* %p + %vec = insertelement <2 x i8> undef, i8 %s, i32 0 + %res = shufflevector <2 x i8> %vec, <2 x i8> undef, <32 x i32> + ret <32 x i8> %res +} +define <32 x i8> @test_masked_i8_to_32_mem_mask0(i8* %p, <32 x i8> %default) { +; CHECK-LABEL: test_masked_i8_to_32_mem_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: movl $-855786879, %eax # imm = 0xCCFDBA81 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpbroadcastb (%rdi), %ymm0 {%k1} +; CHECK-NEXT: retq + %s = load i8, i8* %p + %vec = insertelement <2 x i8> undef, i8 %s, i32 0 + %shuf = shufflevector <2 x i8> %vec, <2 x i8> undef, <32 x i32> + %res = select <32 x i1> , <32 x i8> %shuf, <32 x i8> %default + ret <32 x i8> %res +} + +define <32 x i8> @test_masked_z_i8_to_32_mem_mask0(i8* %p) { +; CHECK-LABEL: test_masked_z_i8_to_32_mem_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: movl $-855786879, %eax # imm = 0xCCFDBA81 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpbroadcastb (%rdi), %ymm0 {%k1} {z} +; CHECK-NEXT: retq + %s = load i8, i8* %p + %vec = insertelement <2 x i8> undef, i8 %s, i32 0 + %shuf = shufflevector <2 x i8> %vec, <2 x i8> undef, <32 x i32> + %res = select <32 x i1> , <32 x i8> %shuf, <32 x i8> zeroinitializer + ret <32 x i8> %res +} +define <32 x i8> @test_masked_i8_to_32_mem_mask1(i8* %p, <32 x i8> %default) { +; CHECK-LABEL: test_masked_i8_to_32_mem_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: movl $-270715404, %eax # imm = 0xEFDD35F4 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpbroadcastb (%rdi), %ymm0 {%k1} +; CHECK-NEXT: retq + %s = load i8, i8* %p + %vec = insertelement <2 x i8> undef, i8 %s, i32 0 + %shuf = shufflevector <2 x i8> %vec, <2 x i8> undef, <32 x i32> + %res = select <32 x i1> , <32 x i8> %shuf, <32 x i8> %default + ret <32 x i8> %res +} + +define <32 x i8> @test_masked_z_i8_to_32_mem_mask1(i8* %p) { +; CHECK-LABEL: test_masked_z_i8_to_32_mem_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: movl $-270715404, %eax # imm = 0xEFDD35F4 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpbroadcastb (%rdi), %ymm0 {%k1} {z} +; CHECK-NEXT: retq + %s = load i8, i8* %p + %vec = insertelement <2 x i8> undef, i8 %s, i32 0 + %shuf = shufflevector <2 x i8> %vec, <2 x i8> undef, <32 x i32> + %res = select <32 x i1> , <32 x i8> %shuf, <32 x i8> zeroinitializer + ret <32 x i8> %res +} +define <32 x i8> @test_masked_i8_to_32_mem_mask2(i8* %p, <32 x i8> %default) { +; CHECK-LABEL: test_masked_i8_to_32_mem_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: movl $97850418, %eax # imm = 0x5D51432 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpbroadcastb (%rdi), %ymm0 {%k1} +; CHECK-NEXT: retq + %s = load i8, i8* %p + %vec = insertelement <2 x i8> undef, i8 %s, i32 0 + %shuf = shufflevector <2 x i8> %vec, <2 x i8> undef, <32 x i32> + %res = select <32 x i1> , <32 x i8> %shuf, <32 x i8> %default + ret <32 x i8> %res +} + +define <32 x i8> @test_masked_z_i8_to_32_mem_mask2(i8* %p) { +; CHECK-LABEL: test_masked_z_i8_to_32_mem_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: movl $97850418, %eax # imm = 0x5D51432 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpbroadcastb (%rdi), %ymm0 {%k1} {z} +; CHECK-NEXT: retq + %s = load i8, i8* %p + %vec = insertelement <2 x i8> undef, i8 %s, i32 0 + %shuf = shufflevector <2 x i8> %vec, <2 x i8> undef, <32 x i32> + %res = select <32 x i1> , <32 x i8> %shuf, <32 x i8> zeroinitializer + ret <32 x i8> %res +} +define <32 x i8> @test_masked_i8_to_32_mem_mask3(i8* %p, <32 x i8> %default) { +; CHECK-LABEL: test_masked_i8_to_32_mem_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: movl $1828018964, %eax # imm = 0x6CF55B14 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpbroadcastb (%rdi), %ymm0 {%k1} +; CHECK-NEXT: retq + %s = load i8, i8* %p + %vec = insertelement <2 x i8> undef, i8 %s, i32 0 + %shuf = shufflevector <2 x i8> %vec, <2 x i8> undef, <32 x i32> + %res = select <32 x i1> , <32 x i8> %shuf, <32 x i8> %default + ret <32 x i8> %res +} + +define <32 x i8> @test_masked_z_i8_to_32_mem_mask3(i8* %p) { +; CHECK-LABEL: test_masked_z_i8_to_32_mem_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: movl $1828018964, %eax # imm = 0x6CF55B14 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpbroadcastb (%rdi), %ymm0 {%k1} {z} +; CHECK-NEXT: retq + %s = load i8, i8* %p + %vec = insertelement <2 x i8> undef, i8 %s, i32 0 + %shuf = shufflevector <2 x i8> %vec, <2 x i8> undef, <32 x i32> + %res = select <32 x i1> , <32 x i8> %shuf, <32 x i8> zeroinitializer + ret <32 x i8> %res +} +define <64 x i8> @test_i8_to_64_mem(i8* %p) { +; CHECK-LABEL: test_i8_to_64_mem: +; CHECK: # BB#0: +; CHECK-NEXT: vpbroadcastb (%rdi), %zmm0 +; CHECK-NEXT: retq + %s = load i8, i8* %p + %vec = insertelement <2 x i8> undef, i8 %s, i32 0 + %res = shufflevector <2 x i8> %vec, <2 x i8> undef, <64 x i32> + ret <64 x i8> %res +} +define <64 x i8> @test_masked_i8_to_64_mem_mask0(i8* %p, <64 x i8> %default) { +; CHECK-LABEL: test_masked_i8_to_64_mem_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: movabsq $468087142555171329, %rax # imm = 0x67EFAC6AFEDBA01 +; CHECK-NEXT: kmovq %rax, %k1 +; CHECK-NEXT: vpbroadcastb (%rdi), %zmm0 {%k1} +; CHECK-NEXT: retq + %s = load i8, i8* %p + %vec = insertelement <2 x i8> undef, i8 %s, i32 0 + %shuf = shufflevector <2 x i8> %vec, <2 x i8> undef, <64 x i32> + %res = select <64 x i1> , <64 x i8> %shuf, <64 x i8> %default + ret <64 x i8> %res +} + +define <64 x i8> @test_masked_z_i8_to_64_mem_mask0(i8* %p) { +; CHECK-LABEL: test_masked_z_i8_to_64_mem_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: movabsq $468087142555171329, %rax # imm = 0x67EFAC6AFEDBA01 +; CHECK-NEXT: kmovq %rax, %k1 +; CHECK-NEXT: vpbroadcastb (%rdi), %zmm0 {%k1} {z} +; CHECK-NEXT: retq + %s = load i8, i8* %p + %vec = insertelement <2 x i8> undef, i8 %s, i32 0 + %shuf = shufflevector <2 x i8> %vec, <2 x i8> undef, <64 x i32> + %res = select <64 x i1> , <64 x i8> %shuf, <64 x i8> zeroinitializer + ret <64 x i8> %res +} +define <64 x i8> @test_masked_i8_to_64_mem_mask1(i8* %p, <64 x i8> %default) { +; CHECK-LABEL: test_masked_i8_to_64_mem_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: movabsq $1198668921668790785, %rax # imm = 0x10A287088F5E6A01 +; CHECK-NEXT: kmovq %rax, %k1 +; CHECK-NEXT: vpbroadcastb (%rdi), %zmm0 {%k1} +; CHECK-NEXT: retq + %s = load i8, i8* %p + %vec = insertelement <2 x i8> undef, i8 %s, i32 0 + %shuf = shufflevector <2 x i8> %vec, <2 x i8> undef, <64 x i32> + %res = select <64 x i1> , <64 x i8> %shuf, <64 x i8> %default + ret <64 x i8> %res +} + +define <64 x i8> @test_masked_z_i8_to_64_mem_mask1(i8* %p) { +; CHECK-LABEL: test_masked_z_i8_to_64_mem_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: movabsq $1198668921668790785, %rax # imm = 0x10A287088F5E6A01 +; CHECK-NEXT: kmovq %rax, %k1 +; CHECK-NEXT: vpbroadcastb (%rdi), %zmm0 {%k1} {z} +; CHECK-NEXT: retq + %s = load i8, i8* %p + %vec = insertelement <2 x i8> undef, i8 %s, i32 0 + %shuf = shufflevector <2 x i8> %vec, <2 x i8> undef, <64 x i32> + %res = select <64 x i1> , <64 x i8> %shuf, <64 x i8> zeroinitializer + ret <64 x i8> %res +} +define <64 x i8> @test_masked_i8_to_64_mem_mask2(i8* %p, <64 x i8> %default) { +; CHECK-LABEL: test_masked_i8_to_64_mem_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: movabsq $1018395262988968961, %rax # imm = 0xE2211189365E401 +; CHECK-NEXT: kmovq %rax, %k1 +; CHECK-NEXT: vpbroadcastb (%rdi), %zmm0 {%k1} +; CHECK-NEXT: retq + %s = load i8, i8* %p + %vec = insertelement <2 x i8> undef, i8 %s, i32 0 + %shuf = shufflevector <2 x i8> %vec, <2 x i8> undef, <64 x i32> + %res = select <64 x i1> , <64 x i8> %shuf, <64 x i8> %default + ret <64 x i8> %res +} + +define <64 x i8> @test_masked_z_i8_to_64_mem_mask2(i8* %p) { +; CHECK-LABEL: test_masked_z_i8_to_64_mem_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: movabsq $1018395262988968961, %rax # imm = 0xE2211189365E401 +; CHECK-NEXT: kmovq %rax, %k1 +; CHECK-NEXT: vpbroadcastb (%rdi), %zmm0 {%k1} {z} +; CHECK-NEXT: retq + %s = load i8, i8* %p + %vec = insertelement <2 x i8> undef, i8 %s, i32 0 + %shuf = shufflevector <2 x i8> %vec, <2 x i8> undef, <64 x i32> + %res = select <64 x i1> , <64 x i8> %shuf, <64 x i8> zeroinitializer + ret <64 x i8> %res +} +define <64 x i8> @test_masked_i8_to_64_mem_mask3(i8* %p, <64 x i8> %default) { +; CHECK-LABEL: test_masked_i8_to_64_mem_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: movabsq $2983418297125630465, %rax # imm = 0x29673B226892F201 +; CHECK-NEXT: kmovq %rax, %k1 +; CHECK-NEXT: vpbroadcastb (%rdi), %zmm0 {%k1} +; CHECK-NEXT: retq + %s = load i8, i8* %p + %vec = insertelement <2 x i8> undef, i8 %s, i32 0 + %shuf = shufflevector <2 x i8> %vec, <2 x i8> undef, <64 x i32> + %res = select <64 x i1> , <64 x i8> %shuf, <64 x i8> %default + ret <64 x i8> %res +} + +define <64 x i8> @test_masked_z_i8_to_64_mem_mask3(i8* %p) { +; CHECK-LABEL: test_masked_z_i8_to_64_mem_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: movabsq $2983418297125630465, %rax # imm = 0x29673B226892F201 +; CHECK-NEXT: kmovq %rax, %k1 +; CHECK-NEXT: vpbroadcastb (%rdi), %zmm0 {%k1} {z} +; CHECK-NEXT: retq + %s = load i8, i8* %p + %vec = insertelement <2 x i8> undef, i8 %s, i32 0 + %shuf = shufflevector <2 x i8> %vec, <2 x i8> undef, <64 x i32> + %res = select <64 x i1> , <64 x i8> %shuf, <64 x i8> zeroinitializer + ret <64 x i8> %res +} +define <8 x i16> @test_i16_to_8_mem(i16* %p) { +; CHECK-LABEL: test_i16_to_8_mem: +; CHECK: # BB#0: +; CHECK-NEXT: vpbroadcastw (%rdi), %xmm0 +; CHECK-NEXT: retq + %s = load i16, i16* %p + %vec = insertelement <2 x i16> undef, i16 %s, i32 0 + %res = shufflevector <2 x i16> %vec, <2 x i16> undef, <8 x i32> + ret <8 x i16> %res +} +define <8 x i16> @test_masked_i16_to_8_mem_mask0(i16* %p, <8 x i16> %default) { +; CHECK-LABEL: test_masked_i16_to_8_mem_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: movb $89, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpbroadcastw (%rdi), %xmm0 {%k1} +; CHECK-NEXT: retq + %s = load i16, i16* %p + %vec = insertelement <2 x i16> undef, i16 %s, i32 0 + %shuf = shufflevector <2 x i16> %vec, <2 x i16> undef, <8 x i32> + %res = select <8 x i1> , <8 x i16> %shuf, <8 x i16> %default + ret <8 x i16> %res +} + +define <8 x i16> @test_masked_z_i16_to_8_mem_mask0(i16* %p) { +; CHECK-LABEL: test_masked_z_i16_to_8_mem_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: movb $89, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpbroadcastw (%rdi), %xmm0 {%k1} {z} +; CHECK-NEXT: retq + %s = load i16, i16* %p + %vec = insertelement <2 x i16> undef, i16 %s, i32 0 + %shuf = shufflevector <2 x i16> %vec, <2 x i16> undef, <8 x i32> + %res = select <8 x i1> , <8 x i16> %shuf, <8 x i16> zeroinitializer + ret <8 x i16> %res +} +define <8 x i16> @test_masked_i16_to_8_mem_mask1(i16* %p, <8 x i16> %default) { +; CHECK-LABEL: test_masked_i16_to_8_mem_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: movb $46, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpbroadcastw (%rdi), %xmm0 {%k1} +; CHECK-NEXT: retq + %s = load i16, i16* %p + %vec = insertelement <2 x i16> undef, i16 %s, i32 0 + %shuf = shufflevector <2 x i16> %vec, <2 x i16> undef, <8 x i32> + %res = select <8 x i1> , <8 x i16> %shuf, <8 x i16> %default + ret <8 x i16> %res +} + +define <8 x i16> @test_masked_z_i16_to_8_mem_mask1(i16* %p) { +; CHECK-LABEL: test_masked_z_i16_to_8_mem_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: movb $46, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpbroadcastw (%rdi), %xmm0 {%k1} {z} +; CHECK-NEXT: retq + %s = load i16, i16* %p + %vec = insertelement <2 x i16> undef, i16 %s, i32 0 + %shuf = shufflevector <2 x i16> %vec, <2 x i16> undef, <8 x i32> + %res = select <8 x i1> , <8 x i16> %shuf, <8 x i16> zeroinitializer + ret <8 x i16> %res +} +define <8 x i16> @test_masked_i16_to_8_mem_mask2(i16* %p, <8 x i16> %default) { +; CHECK-LABEL: test_masked_i16_to_8_mem_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: movb $-128, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpbroadcastw (%rdi), %xmm0 {%k1} +; CHECK-NEXT: retq + %s = load i16, i16* %p + %vec = insertelement <2 x i16> undef, i16 %s, i32 0 + %shuf = shufflevector <2 x i16> %vec, <2 x i16> undef, <8 x i32> + %res = select <8 x i1> , <8 x i16> %shuf, <8 x i16> %default + ret <8 x i16> %res +} + +define <8 x i16> @test_masked_z_i16_to_8_mem_mask2(i16* %p) { +; CHECK-LABEL: test_masked_z_i16_to_8_mem_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: movb $-128, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpbroadcastw (%rdi), %xmm0 {%k1} {z} +; CHECK-NEXT: retq + %s = load i16, i16* %p + %vec = insertelement <2 x i16> undef, i16 %s, i32 0 + %shuf = shufflevector <2 x i16> %vec, <2 x i16> undef, <8 x i32> + %res = select <8 x i1> , <8 x i16> %shuf, <8 x i16> zeroinitializer + ret <8 x i16> %res +} +define <8 x i16> @test_masked_i16_to_8_mem_mask3(i16* %p, <8 x i16> %default) { +; CHECK-LABEL: test_masked_i16_to_8_mem_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: movb $11, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpbroadcastw (%rdi), %xmm0 {%k1} +; CHECK-NEXT: retq + %s = load i16, i16* %p + %vec = insertelement <2 x i16> undef, i16 %s, i32 0 + %shuf = shufflevector <2 x i16> %vec, <2 x i16> undef, <8 x i32> + %res = select <8 x i1> , <8 x i16> %shuf, <8 x i16> %default + ret <8 x i16> %res +} + +define <8 x i16> @test_masked_z_i16_to_8_mem_mask3(i16* %p) { +; CHECK-LABEL: test_masked_z_i16_to_8_mem_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: movb $11, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpbroadcastw (%rdi), %xmm0 {%k1} {z} +; CHECK-NEXT: retq + %s = load i16, i16* %p + %vec = insertelement <2 x i16> undef, i16 %s, i32 0 + %shuf = shufflevector <2 x i16> %vec, <2 x i16> undef, <8 x i32> + %res = select <8 x i1> , <8 x i16> %shuf, <8 x i16> zeroinitializer + ret <8 x i16> %res +} +define <16 x i16> @test_i16_to_16_mem(i16* %p) { +; CHECK-LABEL: test_i16_to_16_mem: +; CHECK: # BB#0: +; CHECK-NEXT: vpbroadcastw (%rdi), %ymm0 +; CHECK-NEXT: retq + %s = load i16, i16* %p + %vec = insertelement <2 x i16> undef, i16 %s, i32 0 + %res = shufflevector <2 x i16> %vec, <2 x i16> undef, <16 x i32> + ret <16 x i16> %res +} +define <16 x i16> @test_masked_i16_to_16_mem_mask0(i16* %p, <16 x i16> %default) { +; CHECK-LABEL: test_masked_i16_to_16_mem_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: movw $6614, %ax # imm = 0x19D6 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpbroadcastw (%rdi), %ymm0 {%k1} +; CHECK-NEXT: retq + %s = load i16, i16* %p + %vec = insertelement <2 x i16> undef, i16 %s, i32 0 + %shuf = shufflevector <2 x i16> %vec, <2 x i16> undef, <16 x i32> + %res = select <16 x i1> , <16 x i16> %shuf, <16 x i16> %default + ret <16 x i16> %res +} + +define <16 x i16> @test_masked_z_i16_to_16_mem_mask0(i16* %p) { +; CHECK-LABEL: test_masked_z_i16_to_16_mem_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: movw $6614, %ax # imm = 0x19D6 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpbroadcastw (%rdi), %ymm0 {%k1} {z} +; CHECK-NEXT: retq + %s = load i16, i16* %p + %vec = insertelement <2 x i16> undef, i16 %s, i32 0 + %shuf = shufflevector <2 x i16> %vec, <2 x i16> undef, <16 x i32> + %res = select <16 x i1> , <16 x i16> %shuf, <16 x i16> zeroinitializer + ret <16 x i16> %res +} +define <16 x i16> @test_masked_i16_to_16_mem_mask1(i16* %p, <16 x i16> %default) { +; CHECK-LABEL: test_masked_i16_to_16_mem_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: movw $-772, %ax # imm = 0xFCFC +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpbroadcastw (%rdi), %ymm0 {%k1} +; CHECK-NEXT: retq + %s = load i16, i16* %p + %vec = insertelement <2 x i16> undef, i16 %s, i32 0 + %shuf = shufflevector <2 x i16> %vec, <2 x i16> undef, <16 x i32> + %res = select <16 x i1> , <16 x i16> %shuf, <16 x i16> %default + ret <16 x i16> %res +} + +define <16 x i16> @test_masked_z_i16_to_16_mem_mask1(i16* %p) { +; CHECK-LABEL: test_masked_z_i16_to_16_mem_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: movw $-772, %ax # imm = 0xFCFC +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpbroadcastw (%rdi), %ymm0 {%k1} {z} +; CHECK-NEXT: retq + %s = load i16, i16* %p + %vec = insertelement <2 x i16> undef, i16 %s, i32 0 + %shuf = shufflevector <2 x i16> %vec, <2 x i16> undef, <16 x i32> + %res = select <16 x i1> , <16 x i16> %shuf, <16 x i16> zeroinitializer + ret <16 x i16> %res +} +define <16 x i16> @test_masked_i16_to_16_mem_mask2(i16* %p, <16 x i16> %default) { +; CHECK-LABEL: test_masked_i16_to_16_mem_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: movw $13065, %ax # imm = 0x3309 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpbroadcastw (%rdi), %ymm0 {%k1} +; CHECK-NEXT: retq + %s = load i16, i16* %p + %vec = insertelement <2 x i16> undef, i16 %s, i32 0 + %shuf = shufflevector <2 x i16> %vec, <2 x i16> undef, <16 x i32> + %res = select <16 x i1> , <16 x i16> %shuf, <16 x i16> %default + ret <16 x i16> %res +} + +define <16 x i16> @test_masked_z_i16_to_16_mem_mask2(i16* %p) { +; CHECK-LABEL: test_masked_z_i16_to_16_mem_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: movw $13065, %ax # imm = 0x3309 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpbroadcastw (%rdi), %ymm0 {%k1} {z} +; CHECK-NEXT: retq + %s = load i16, i16* %p + %vec = insertelement <2 x i16> undef, i16 %s, i32 0 + %shuf = shufflevector <2 x i16> %vec, <2 x i16> undef, <16 x i32> + %res = select <16 x i1> , <16 x i16> %shuf, <16 x i16> zeroinitializer + ret <16 x i16> %res +} +define <16 x i16> @test_masked_i16_to_16_mem_mask3(i16* %p, <16 x i16> %default) { +; CHECK-LABEL: test_masked_i16_to_16_mem_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: movw $23498, %ax # imm = 0x5BCA +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpbroadcastw (%rdi), %ymm0 {%k1} +; CHECK-NEXT: retq + %s = load i16, i16* %p + %vec = insertelement <2 x i16> undef, i16 %s, i32 0 + %shuf = shufflevector <2 x i16> %vec, <2 x i16> undef, <16 x i32> + %res = select <16 x i1> , <16 x i16> %shuf, <16 x i16> %default + ret <16 x i16> %res +} + +define <16 x i16> @test_masked_z_i16_to_16_mem_mask3(i16* %p) { +; CHECK-LABEL: test_masked_z_i16_to_16_mem_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: movw $23498, %ax # imm = 0x5BCA +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpbroadcastw (%rdi), %ymm0 {%k1} {z} +; CHECK-NEXT: retq + %s = load i16, i16* %p + %vec = insertelement <2 x i16> undef, i16 %s, i32 0 + %shuf = shufflevector <2 x i16> %vec, <2 x i16> undef, <16 x i32> + %res = select <16 x i1> , <16 x i16> %shuf, <16 x i16> zeroinitializer + ret <16 x i16> %res +} +define <32 x i16> @test_i16_to_32_mem(i16* %p) { +; CHECK-LABEL: test_i16_to_32_mem: +; CHECK: # BB#0: +; CHECK-NEXT: vpbroadcastw (%rdi), %zmm0 +; CHECK-NEXT: retq + %s = load i16, i16* %p + %vec = insertelement <2 x i16> undef, i16 %s, i32 0 + %res = shufflevector <2 x i16> %vec, <2 x i16> undef, <32 x i32> + ret <32 x i16> %res +} +define <32 x i16> @test_masked_i16_to_32_mem_mask0(i16* %p, <32 x i16> %default) { +; CHECK-LABEL: test_masked_i16_to_32_mem_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: movl $-1152776498, %eax # imm = 0xBB4A06CE +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpbroadcastw (%rdi), %zmm0 {%k1} +; CHECK-NEXT: retq + %s = load i16, i16* %p + %vec = insertelement <2 x i16> undef, i16 %s, i32 0 + %shuf = shufflevector <2 x i16> %vec, <2 x i16> undef, <32 x i32> + %res = select <32 x i1> , <32 x i16> %shuf, <32 x i16> %default + ret <32 x i16> %res +} + +define <32 x i16> @test_masked_z_i16_to_32_mem_mask0(i16* %p) { +; CHECK-LABEL: test_masked_z_i16_to_32_mem_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: movl $-1152776498, %eax # imm = 0xBB4A06CE +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpbroadcastw (%rdi), %zmm0 {%k1} {z} +; CHECK-NEXT: retq + %s = load i16, i16* %p + %vec = insertelement <2 x i16> undef, i16 %s, i32 0 + %shuf = shufflevector <2 x i16> %vec, <2 x i16> undef, <32 x i32> + %res = select <32 x i1> , <32 x i16> %shuf, <32 x i16> zeroinitializer + ret <32 x i16> %res +} +define <32 x i16> @test_masked_i16_to_32_mem_mask1(i16* %p, <32 x i16> %default) { +; CHECK-LABEL: test_masked_i16_to_32_mem_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: movl $-694382116, %eax # imm = 0xD69C91DC +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpbroadcastw (%rdi), %zmm0 {%k1} +; CHECK-NEXT: retq + %s = load i16, i16* %p + %vec = insertelement <2 x i16> undef, i16 %s, i32 0 + %shuf = shufflevector <2 x i16> %vec, <2 x i16> undef, <32 x i32> + %res = select <32 x i1> , <32 x i16> %shuf, <32 x i16> %default + ret <32 x i16> %res +} + +define <32 x i16> @test_masked_z_i16_to_32_mem_mask1(i16* %p) { +; CHECK-LABEL: test_masked_z_i16_to_32_mem_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: movl $-694382116, %eax # imm = 0xD69C91DC +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpbroadcastw (%rdi), %zmm0 {%k1} {z} +; CHECK-NEXT: retq + %s = load i16, i16* %p + %vec = insertelement <2 x i16> undef, i16 %s, i32 0 + %shuf = shufflevector <2 x i16> %vec, <2 x i16> undef, <32 x i32> + %res = select <32 x i1> , <32 x i16> %shuf, <32 x i16> zeroinitializer + ret <32 x i16> %res +} +define <32 x i16> @test_masked_i16_to_32_mem_mask2(i16* %p, <32 x i16> %default) { +; CHECK-LABEL: test_masked_i16_to_32_mem_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: movl $-350116879, %eax # imm = 0xEB21A3F1 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpbroadcastw (%rdi), %zmm0 {%k1} +; CHECK-NEXT: retq + %s = load i16, i16* %p + %vec = insertelement <2 x i16> undef, i16 %s, i32 0 + %shuf = shufflevector <2 x i16> %vec, <2 x i16> undef, <32 x i32> + %res = select <32 x i1> , <32 x i16> %shuf, <32 x i16> %default + ret <32 x i16> %res +} + +define <32 x i16> @test_masked_z_i16_to_32_mem_mask2(i16* %p) { +; CHECK-LABEL: test_masked_z_i16_to_32_mem_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: movl $-350116879, %eax # imm = 0xEB21A3F1 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpbroadcastw (%rdi), %zmm0 {%k1} {z} +; CHECK-NEXT: retq + %s = load i16, i16* %p + %vec = insertelement <2 x i16> undef, i16 %s, i32 0 + %shuf = shufflevector <2 x i16> %vec, <2 x i16> undef, <32 x i32> + %res = select <32 x i1> , <32 x i16> %shuf, <32 x i16> zeroinitializer + ret <32 x i16> %res +} +define <32 x i16> @test_masked_i16_to_32_mem_mask3(i16* %p, <32 x i16> %default) { +; CHECK-LABEL: test_masked_i16_to_32_mem_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: movl $727673142, %eax # imm = 0x2B5F6936 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpbroadcastw (%rdi), %zmm0 {%k1} +; CHECK-NEXT: retq + %s = load i16, i16* %p + %vec = insertelement <2 x i16> undef, i16 %s, i32 0 + %shuf = shufflevector <2 x i16> %vec, <2 x i16> undef, <32 x i32> + %res = select <32 x i1> , <32 x i16> %shuf, <32 x i16> %default + ret <32 x i16> %res +} + +define <32 x i16> @test_masked_z_i16_to_32_mem_mask3(i16* %p) { +; CHECK-LABEL: test_masked_z_i16_to_32_mem_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: movl $727673142, %eax # imm = 0x2B5F6936 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpbroadcastw (%rdi), %zmm0 {%k1} {z} +; CHECK-NEXT: retq + %s = load i16, i16* %p + %vec = insertelement <2 x i16> undef, i16 %s, i32 0 + %shuf = shufflevector <2 x i16> %vec, <2 x i16> undef, <32 x i32> + %res = select <32 x i1> , <32 x i16> %shuf, <32 x i16> zeroinitializer + ret <32 x i16> %res +} +define <4 x i32> @test_i32_to_4_mem(i32* %p) { +; CHECK-LABEL: test_i32_to_4_mem: +; CHECK: # BB#0: +; CHECK-NEXT: vbroadcastss (%rdi), %xmm0 +; CHECK-NEXT: retq + %s = load i32, i32* %p + %vec = insertelement <2 x i32> undef, i32 %s, i32 0 + %res = shufflevector <2 x i32> %vec, <2 x i32> undef, <4 x i32> + ret <4 x i32> %res +} +define <4 x i32> @test_masked_i32_to_4_mem_mask0(i32* %p, <4 x i32> %default) { +; CHECK-LABEL: test_masked_i32_to_4_mem_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: movb $10, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpbroadcastd (%rdi), %xmm0 {%k1} +; CHECK-NEXT: retq + %s = load i32, i32* %p + %vec = insertelement <2 x i32> undef, i32 %s, i32 0 + %shuf = shufflevector <2 x i32> %vec, <2 x i32> undef, <4 x i32> + %res = select <4 x i1> , <4 x i32> %shuf, <4 x i32> %default + ret <4 x i32> %res +} + +define <4 x i32> @test_masked_z_i32_to_4_mem_mask0(i32* %p) { +; CHECK-LABEL: test_masked_z_i32_to_4_mem_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: movb $10, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpbroadcastd (%rdi), %xmm0 {%k1} {z} +; CHECK-NEXT: retq + %s = load i32, i32* %p + %vec = insertelement <2 x i32> undef, i32 %s, i32 0 + %shuf = shufflevector <2 x i32> %vec, <2 x i32> undef, <4 x i32> + %res = select <4 x i1> , <4 x i32> %shuf, <4 x i32> zeroinitializer + ret <4 x i32> %res +} +define <4 x i32> @test_masked_i32_to_4_mem_mask1(i32* %p, <4 x i32> %default) { +; CHECK-LABEL: test_masked_i32_to_4_mem_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: movb $13, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpbroadcastd (%rdi), %xmm0 {%k1} +; CHECK-NEXT: retq + %s = load i32, i32* %p + %vec = insertelement <2 x i32> undef, i32 %s, i32 0 + %shuf = shufflevector <2 x i32> %vec, <2 x i32> undef, <4 x i32> + %res = select <4 x i1> , <4 x i32> %shuf, <4 x i32> %default + ret <4 x i32> %res +} + +define <4 x i32> @test_masked_z_i32_to_4_mem_mask1(i32* %p) { +; CHECK-LABEL: test_masked_z_i32_to_4_mem_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: movb $13, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpbroadcastd (%rdi), %xmm0 {%k1} {z} +; CHECK-NEXT: retq + %s = load i32, i32* %p + %vec = insertelement <2 x i32> undef, i32 %s, i32 0 + %shuf = shufflevector <2 x i32> %vec, <2 x i32> undef, <4 x i32> + %res = select <4 x i1> , <4 x i32> %shuf, <4 x i32> zeroinitializer + ret <4 x i32> %res +} +define <4 x i32> @test_masked_i32_to_4_mem_mask2(i32* %p, <4 x i32> %default) { +; CHECK-LABEL: test_masked_i32_to_4_mem_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: movb $1, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpbroadcastd (%rdi), %xmm0 {%k1} +; CHECK-NEXT: retq + %s = load i32, i32* %p + %vec = insertelement <2 x i32> undef, i32 %s, i32 0 + %shuf = shufflevector <2 x i32> %vec, <2 x i32> undef, <4 x i32> + %res = select <4 x i1> , <4 x i32> %shuf, <4 x i32> %default + ret <4 x i32> %res +} + +define <4 x i32> @test_masked_z_i32_to_4_mem_mask2(i32* %p) { +; CHECK-LABEL: test_masked_z_i32_to_4_mem_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: movb $1, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpbroadcastd (%rdi), %xmm0 {%k1} {z} +; CHECK-NEXT: retq + %s = load i32, i32* %p + %vec = insertelement <2 x i32> undef, i32 %s, i32 0 + %shuf = shufflevector <2 x i32> %vec, <2 x i32> undef, <4 x i32> + %res = select <4 x i1> , <4 x i32> %shuf, <4 x i32> zeroinitializer + ret <4 x i32> %res +} +define <4 x i32> @test_masked_i32_to_4_mem_mask3(i32* %p, <4 x i32> %default) { +; CHECK-LABEL: test_masked_i32_to_4_mem_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: movb $8, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpbroadcastd (%rdi), %xmm0 {%k1} +; CHECK-NEXT: retq + %s = load i32, i32* %p + %vec = insertelement <2 x i32> undef, i32 %s, i32 0 + %shuf = shufflevector <2 x i32> %vec, <2 x i32> undef, <4 x i32> + %res = select <4 x i1> , <4 x i32> %shuf, <4 x i32> %default + ret <4 x i32> %res +} + +define <4 x i32> @test_masked_z_i32_to_4_mem_mask3(i32* %p) { +; CHECK-LABEL: test_masked_z_i32_to_4_mem_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: movb $8, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpbroadcastd (%rdi), %xmm0 {%k1} {z} +; CHECK-NEXT: retq + %s = load i32, i32* %p + %vec = insertelement <2 x i32> undef, i32 %s, i32 0 + %shuf = shufflevector <2 x i32> %vec, <2 x i32> undef, <4 x i32> + %res = select <4 x i1> , <4 x i32> %shuf, <4 x i32> zeroinitializer + ret <4 x i32> %res +} +define <8 x i32> @test_i32_to_8_mem(i32* %p) { +; CHECK-LABEL: test_i32_to_8_mem: +; CHECK: # BB#0: +; CHECK-NEXT: vbroadcastss (%rdi), %ymm0 +; CHECK-NEXT: retq + %s = load i32, i32* %p + %vec = insertelement <2 x i32> undef, i32 %s, i32 0 + %res = shufflevector <2 x i32> %vec, <2 x i32> undef, <8 x i32> + ret <8 x i32> %res +} +define <8 x i32> @test_masked_i32_to_8_mem_mask0(i32* %p, <8 x i32> %default) { +; CHECK-LABEL: test_masked_i32_to_8_mem_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: movb $-41, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpbroadcastd (%rdi), %ymm0 {%k1} +; CHECK-NEXT: retq + %s = load i32, i32* %p + %vec = insertelement <2 x i32> undef, i32 %s, i32 0 + %shuf = shufflevector <2 x i32> %vec, <2 x i32> undef, <8 x i32> + %res = select <8 x i1> , <8 x i32> %shuf, <8 x i32> %default + ret <8 x i32> %res +} + +define <8 x i32> @test_masked_z_i32_to_8_mem_mask0(i32* %p) { +; CHECK-LABEL: test_masked_z_i32_to_8_mem_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: movb $-41, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpbroadcastd (%rdi), %ymm0 {%k1} {z} +; CHECK-NEXT: retq + %s = load i32, i32* %p + %vec = insertelement <2 x i32> undef, i32 %s, i32 0 + %shuf = shufflevector <2 x i32> %vec, <2 x i32> undef, <8 x i32> + %res = select <8 x i1> , <8 x i32> %shuf, <8 x i32> zeroinitializer + ret <8 x i32> %res +} +define <8 x i32> @test_masked_i32_to_8_mem_mask1(i32* %p, <8 x i32> %default) { +; CHECK-LABEL: test_masked_i32_to_8_mem_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: movb $87, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpbroadcastd (%rdi), %ymm0 {%k1} +; CHECK-NEXT: retq + %s = load i32, i32* %p + %vec = insertelement <2 x i32> undef, i32 %s, i32 0 + %shuf = shufflevector <2 x i32> %vec, <2 x i32> undef, <8 x i32> + %res = select <8 x i1> , <8 x i32> %shuf, <8 x i32> %default + ret <8 x i32> %res +} + +define <8 x i32> @test_masked_z_i32_to_8_mem_mask1(i32* %p) { +; CHECK-LABEL: test_masked_z_i32_to_8_mem_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: movb $87, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpbroadcastd (%rdi), %ymm0 {%k1} {z} +; CHECK-NEXT: retq + %s = load i32, i32* %p + %vec = insertelement <2 x i32> undef, i32 %s, i32 0 + %shuf = shufflevector <2 x i32> %vec, <2 x i32> undef, <8 x i32> + %res = select <8 x i1> , <8 x i32> %shuf, <8 x i32> zeroinitializer + ret <8 x i32> %res +} +define <8 x i32> @test_masked_i32_to_8_mem_mask2(i32* %p, <8 x i32> %default) { +; CHECK-LABEL: test_masked_i32_to_8_mem_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: movb $64, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpbroadcastd (%rdi), %ymm0 {%k1} +; CHECK-NEXT: retq + %s = load i32, i32* %p + %vec = insertelement <2 x i32> undef, i32 %s, i32 0 + %shuf = shufflevector <2 x i32> %vec, <2 x i32> undef, <8 x i32> + %res = select <8 x i1> , <8 x i32> %shuf, <8 x i32> %default + ret <8 x i32> %res +} + +define <8 x i32> @test_masked_z_i32_to_8_mem_mask2(i32* %p) { +; CHECK-LABEL: test_masked_z_i32_to_8_mem_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: movb $64, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpbroadcastd (%rdi), %ymm0 {%k1} {z} +; CHECK-NEXT: retq + %s = load i32, i32* %p + %vec = insertelement <2 x i32> undef, i32 %s, i32 0 + %shuf = shufflevector <2 x i32> %vec, <2 x i32> undef, <8 x i32> + %res = select <8 x i1> , <8 x i32> %shuf, <8 x i32> zeroinitializer + ret <8 x i32> %res +} +define <8 x i32> @test_masked_i32_to_8_mem_mask3(i32* %p, <8 x i32> %default) { +; CHECK-LABEL: test_masked_i32_to_8_mem_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: movb $-104, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpbroadcastd (%rdi), %ymm0 {%k1} +; CHECK-NEXT: retq + %s = load i32, i32* %p + %vec = insertelement <2 x i32> undef, i32 %s, i32 0 + %shuf = shufflevector <2 x i32> %vec, <2 x i32> undef, <8 x i32> + %res = select <8 x i1> , <8 x i32> %shuf, <8 x i32> %default + ret <8 x i32> %res +} + +define <8 x i32> @test_masked_z_i32_to_8_mem_mask3(i32* %p) { +; CHECK-LABEL: test_masked_z_i32_to_8_mem_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: movb $-104, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpbroadcastd (%rdi), %ymm0 {%k1} {z} +; CHECK-NEXT: retq + %s = load i32, i32* %p + %vec = insertelement <2 x i32> undef, i32 %s, i32 0 + %shuf = shufflevector <2 x i32> %vec, <2 x i32> undef, <8 x i32> + %res = select <8 x i1> , <8 x i32> %shuf, <8 x i32> zeroinitializer + ret <8 x i32> %res +} +define <16 x i32> @test_i32_to_16_mem(i32* %p) { +; CHECK-LABEL: test_i32_to_16_mem: +; CHECK: # BB#0: +; CHECK-NEXT: vbroadcastss (%rdi), %zmm0 +; CHECK-NEXT: retq + %s = load i32, i32* %p + %vec = insertelement <2 x i32> undef, i32 %s, i32 0 + %res = shufflevector <2 x i32> %vec, <2 x i32> undef, <16 x i32> + ret <16 x i32> %res +} +define <16 x i32> @test_masked_i32_to_16_mem_mask0(i32* %p, <16 x i32> %default) { +; CHECK-LABEL: test_masked_i32_to_16_mem_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: movw $28987, %ax # imm = 0x713B +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpbroadcastd (%rdi), %zmm0 {%k1} +; CHECK-NEXT: retq + %s = load i32, i32* %p + %vec = insertelement <2 x i32> undef, i32 %s, i32 0 + %shuf = shufflevector <2 x i32> %vec, <2 x i32> undef, <16 x i32> + %res = select <16 x i1> , <16 x i32> %shuf, <16 x i32> %default + ret <16 x i32> %res +} + +define <16 x i32> @test_masked_z_i32_to_16_mem_mask0(i32* %p) { +; CHECK-LABEL: test_masked_z_i32_to_16_mem_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: movw $28987, %ax # imm = 0x713B +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpbroadcastd (%rdi), %zmm0 {%k1} {z} +; CHECK-NEXT: retq + %s = load i32, i32* %p + %vec = insertelement <2 x i32> undef, i32 %s, i32 0 + %shuf = shufflevector <2 x i32> %vec, <2 x i32> undef, <16 x i32> + %res = select <16 x i1> , <16 x i32> %shuf, <16 x i32> zeroinitializer + ret <16 x i32> %res +} +define <16 x i32> @test_masked_i32_to_16_mem_mask1(i32* %p, <16 x i32> %default) { +; CHECK-LABEL: test_masked_i32_to_16_mem_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: movw $11457, %ax # imm = 0x2CC1 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpbroadcastd (%rdi), %zmm0 {%k1} +; CHECK-NEXT: retq + %s = load i32, i32* %p + %vec = insertelement <2 x i32> undef, i32 %s, i32 0 + %shuf = shufflevector <2 x i32> %vec, <2 x i32> undef, <16 x i32> + %res = select <16 x i1> , <16 x i32> %shuf, <16 x i32> %default + ret <16 x i32> %res +} + +define <16 x i32> @test_masked_z_i32_to_16_mem_mask1(i32* %p) { +; CHECK-LABEL: test_masked_z_i32_to_16_mem_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: movw $11457, %ax # imm = 0x2CC1 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpbroadcastd (%rdi), %zmm0 {%k1} {z} +; CHECK-NEXT: retq + %s = load i32, i32* %p + %vec = insertelement <2 x i32> undef, i32 %s, i32 0 + %shuf = shufflevector <2 x i32> %vec, <2 x i32> undef, <16 x i32> + %res = select <16 x i1> , <16 x i32> %shuf, <16 x i32> zeroinitializer + ret <16 x i32> %res +} +define <16 x i32> @test_masked_i32_to_16_mem_mask2(i32* %p, <16 x i32> %default) { +; CHECK-LABEL: test_masked_i32_to_16_mem_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: movw $30908, %ax # imm = 0x78BC +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpbroadcastd (%rdi), %zmm0 {%k1} +; CHECK-NEXT: retq + %s = load i32, i32* %p + %vec = insertelement <2 x i32> undef, i32 %s, i32 0 + %shuf = shufflevector <2 x i32> %vec, <2 x i32> undef, <16 x i32> + %res = select <16 x i1> , <16 x i32> %shuf, <16 x i32> %default + ret <16 x i32> %res +} + +define <16 x i32> @test_masked_z_i32_to_16_mem_mask2(i32* %p) { +; CHECK-LABEL: test_masked_z_i32_to_16_mem_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: movw $30908, %ax # imm = 0x78BC +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpbroadcastd (%rdi), %zmm0 {%k1} {z} +; CHECK-NEXT: retq + %s = load i32, i32* %p + %vec = insertelement <2 x i32> undef, i32 %s, i32 0 + %shuf = shufflevector <2 x i32> %vec, <2 x i32> undef, <16 x i32> + %res = select <16 x i1> , <16 x i32> %shuf, <16 x i32> zeroinitializer + ret <16 x i32> %res +} +define <16 x i32> @test_masked_i32_to_16_mem_mask3(i32* %p, <16 x i32> %default) { +; CHECK-LABEL: test_masked_i32_to_16_mem_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: movw $26863, %ax # imm = 0x68EF +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpbroadcastd (%rdi), %zmm0 {%k1} +; CHECK-NEXT: retq + %s = load i32, i32* %p + %vec = insertelement <2 x i32> undef, i32 %s, i32 0 + %shuf = shufflevector <2 x i32> %vec, <2 x i32> undef, <16 x i32> + %res = select <16 x i1> , <16 x i32> %shuf, <16 x i32> %default + ret <16 x i32> %res +} + +define <16 x i32> @test_masked_z_i32_to_16_mem_mask3(i32* %p) { +; CHECK-LABEL: test_masked_z_i32_to_16_mem_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: movw $26863, %ax # imm = 0x68EF +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpbroadcastd (%rdi), %zmm0 {%k1} {z} +; CHECK-NEXT: retq + %s = load i32, i32* %p + %vec = insertelement <2 x i32> undef, i32 %s, i32 0 + %shuf = shufflevector <2 x i32> %vec, <2 x i32> undef, <16 x i32> + %res = select <16 x i1> , <16 x i32> %shuf, <16 x i32> zeroinitializer + ret <16 x i32> %res +} +define <2 x i64> @test_i64_to_2_mem(i64* %p) { +; CHECK-LABEL: test_i64_to_2_mem: +; CHECK: # BB#0: +; CHECK-NEXT: vpbroadcastq (%rdi), %xmm0 +; CHECK-NEXT: retq + %s = load i64, i64* %p + %vec = insertelement <2 x i64> undef, i64 %s, i32 0 + %res = shufflevector <2 x i64> %vec, <2 x i64> undef, <2 x i32> + ret <2 x i64> %res +} +define <2 x i64> @test_masked_i64_to_2_mem_mask0(i64* %p, <2 x i64> %default) { +; CHECK-LABEL: test_masked_i64_to_2_mem_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: movb $2, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpbroadcastq (%rdi), %xmm0 {%k1} +; CHECK-NEXT: retq + %s = load i64, i64* %p + %vec = insertelement <2 x i64> undef, i64 %s, i32 0 + %shuf = shufflevector <2 x i64> %vec, <2 x i64> undef, <2 x i32> + %res = select <2 x i1> , <2 x i64> %shuf, <2 x i64> %default + ret <2 x i64> %res +} + +define <2 x i64> @test_masked_z_i64_to_2_mem_mask0(i64* %p) { +; CHECK-LABEL: test_masked_z_i64_to_2_mem_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: movb $2, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpbroadcastq (%rdi), %xmm0 {%k1} {z} +; CHECK-NEXT: retq + %s = load i64, i64* %p + %vec = insertelement <2 x i64> undef, i64 %s, i32 0 + %shuf = shufflevector <2 x i64> %vec, <2 x i64> undef, <2 x i32> + %res = select <2 x i1> , <2 x i64> %shuf, <2 x i64> zeroinitializer + ret <2 x i64> %res +} +define <2 x i64> @test_masked_i64_to_2_mem_mask1(i64* %p, <2 x i64> %default) { +; CHECK-LABEL: test_masked_i64_to_2_mem_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: movb $1, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpbroadcastq (%rdi), %xmm0 {%k1} +; CHECK-NEXT: retq + %s = load i64, i64* %p + %vec = insertelement <2 x i64> undef, i64 %s, i32 0 + %shuf = shufflevector <2 x i64> %vec, <2 x i64> undef, <2 x i32> + %res = select <2 x i1> , <2 x i64> %shuf, <2 x i64> %default + ret <2 x i64> %res +} + +define <2 x i64> @test_masked_z_i64_to_2_mem_mask1(i64* %p) { +; CHECK-LABEL: test_masked_z_i64_to_2_mem_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: movb $1, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpbroadcastq (%rdi), %xmm0 {%k1} {z} +; CHECK-NEXT: retq + %s = load i64, i64* %p + %vec = insertelement <2 x i64> undef, i64 %s, i32 0 + %shuf = shufflevector <2 x i64> %vec, <2 x i64> undef, <2 x i32> + %res = select <2 x i1> , <2 x i64> %shuf, <2 x i64> zeroinitializer + ret <2 x i64> %res +} +define <4 x i64> @test_i64_to_4_mem(i64* %p) { +; CHECK-LABEL: test_i64_to_4_mem: +; CHECK: # BB#0: +; CHECK-NEXT: vbroadcastsd (%rdi), %ymm0 +; CHECK-NEXT: retq + %s = load i64, i64* %p + %vec = insertelement <2 x i64> undef, i64 %s, i32 0 + %res = shufflevector <2 x i64> %vec, <2 x i64> undef, <4 x i32> + ret <4 x i64> %res +} +define <4 x i64> @test_masked_i64_to_4_mem_mask0(i64* %p, <4 x i64> %default) { +; CHECK-LABEL: test_masked_i64_to_4_mem_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: movb $5, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpbroadcastq (%rdi), %ymm0 {%k1} +; CHECK-NEXT: retq + %s = load i64, i64* %p + %vec = insertelement <2 x i64> undef, i64 %s, i32 0 + %shuf = shufflevector <2 x i64> %vec, <2 x i64> undef, <4 x i32> + %res = select <4 x i1> , <4 x i64> %shuf, <4 x i64> %default + ret <4 x i64> %res +} + +define <4 x i64> @test_masked_z_i64_to_4_mem_mask0(i64* %p) { +; CHECK-LABEL: test_masked_z_i64_to_4_mem_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: movb $5, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpbroadcastq (%rdi), %ymm0 {%k1} {z} +; CHECK-NEXT: retq + %s = load i64, i64* %p + %vec = insertelement <2 x i64> undef, i64 %s, i32 0 + %shuf = shufflevector <2 x i64> %vec, <2 x i64> undef, <4 x i32> + %res = select <4 x i1> , <4 x i64> %shuf, <4 x i64> zeroinitializer + ret <4 x i64> %res +} +define <4 x i64> @test_masked_i64_to_4_mem_mask1(i64* %p, <4 x i64> %default) { +; CHECK-LABEL: test_masked_i64_to_4_mem_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: movb $14, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpbroadcastq (%rdi), %ymm0 {%k1} +; CHECK-NEXT: retq + %s = load i64, i64* %p + %vec = insertelement <2 x i64> undef, i64 %s, i32 0 + %shuf = shufflevector <2 x i64> %vec, <2 x i64> undef, <4 x i32> + %res = select <4 x i1> , <4 x i64> %shuf, <4 x i64> %default + ret <4 x i64> %res +} + +define <4 x i64> @test_masked_z_i64_to_4_mem_mask1(i64* %p) { +; CHECK-LABEL: test_masked_z_i64_to_4_mem_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: movb $14, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpbroadcastq (%rdi), %ymm0 {%k1} {z} +; CHECK-NEXT: retq + %s = load i64, i64* %p + %vec = insertelement <2 x i64> undef, i64 %s, i32 0 + %shuf = shufflevector <2 x i64> %vec, <2 x i64> undef, <4 x i32> + %res = select <4 x i1> , <4 x i64> %shuf, <4 x i64> zeroinitializer + ret <4 x i64> %res +} +define <4 x i64> @test_masked_i64_to_4_mem_mask2(i64* %p, <4 x i64> %default) { +; CHECK-LABEL: test_masked_i64_to_4_mem_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: movb $11, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpbroadcastq (%rdi), %ymm0 {%k1} +; CHECK-NEXT: retq + %s = load i64, i64* %p + %vec = insertelement <2 x i64> undef, i64 %s, i32 0 + %shuf = shufflevector <2 x i64> %vec, <2 x i64> undef, <4 x i32> + %res = select <4 x i1> , <4 x i64> %shuf, <4 x i64> %default + ret <4 x i64> %res +} + +define <4 x i64> @test_masked_z_i64_to_4_mem_mask2(i64* %p) { +; CHECK-LABEL: test_masked_z_i64_to_4_mem_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: movb $11, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpbroadcastq (%rdi), %ymm0 {%k1} {z} +; CHECK-NEXT: retq + %s = load i64, i64* %p + %vec = insertelement <2 x i64> undef, i64 %s, i32 0 + %shuf = shufflevector <2 x i64> %vec, <2 x i64> undef, <4 x i32> + %res = select <4 x i1> , <4 x i64> %shuf, <4 x i64> zeroinitializer + ret <4 x i64> %res +} +define <4 x i64> @test_masked_i64_to_4_mem_mask3(i64* %p, <4 x i64> %default) { +; CHECK-LABEL: test_masked_i64_to_4_mem_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: movb $3, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpbroadcastq (%rdi), %ymm0 {%k1} +; CHECK-NEXT: retq + %s = load i64, i64* %p + %vec = insertelement <2 x i64> undef, i64 %s, i32 0 + %shuf = shufflevector <2 x i64> %vec, <2 x i64> undef, <4 x i32> + %res = select <4 x i1> , <4 x i64> %shuf, <4 x i64> %default + ret <4 x i64> %res +} + +define <4 x i64> @test_masked_z_i64_to_4_mem_mask3(i64* %p) { +; CHECK-LABEL: test_masked_z_i64_to_4_mem_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: movb $3, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpbroadcastq (%rdi), %ymm0 {%k1} {z} +; CHECK-NEXT: retq + %s = load i64, i64* %p + %vec = insertelement <2 x i64> undef, i64 %s, i32 0 + %shuf = shufflevector <2 x i64> %vec, <2 x i64> undef, <4 x i32> + %res = select <4 x i1> , <4 x i64> %shuf, <4 x i64> zeroinitializer + ret <4 x i64> %res +} +define <8 x i64> @test_i64_to_8_mem(i64* %p) { +; CHECK-LABEL: test_i64_to_8_mem: +; CHECK: # BB#0: +; CHECK-NEXT: vbroadcastsd (%rdi), %zmm0 +; CHECK-NEXT: retq + %s = load i64, i64* %p + %vec = insertelement <2 x i64> undef, i64 %s, i32 0 + %res = shufflevector <2 x i64> %vec, <2 x i64> undef, <8 x i32> + ret <8 x i64> %res +} +define <8 x i64> @test_masked_i64_to_8_mem_mask0(i64* %p, <8 x i64> %default) { +; CHECK-LABEL: test_masked_i64_to_8_mem_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: movb $-113, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpbroadcastq (%rdi), %zmm0 {%k1} +; CHECK-NEXT: retq + %s = load i64, i64* %p + %vec = insertelement <2 x i64> undef, i64 %s, i32 0 + %shuf = shufflevector <2 x i64> %vec, <2 x i64> undef, <8 x i32> + %res = select <8 x i1> , <8 x i64> %shuf, <8 x i64> %default + ret <8 x i64> %res +} + +define <8 x i64> @test_masked_z_i64_to_8_mem_mask0(i64* %p) { +; CHECK-LABEL: test_masked_z_i64_to_8_mem_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: movb $-113, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpbroadcastq (%rdi), %zmm0 {%k1} {z} +; CHECK-NEXT: retq + %s = load i64, i64* %p + %vec = insertelement <2 x i64> undef, i64 %s, i32 0 + %shuf = shufflevector <2 x i64> %vec, <2 x i64> undef, <8 x i32> + %res = select <8 x i1> , <8 x i64> %shuf, <8 x i64> zeroinitializer + ret <8 x i64> %res +} +define <8 x i64> @test_masked_i64_to_8_mem_mask1(i64* %p, <8 x i64> %default) { +; CHECK-LABEL: test_masked_i64_to_8_mem_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: movb $4, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpbroadcastq (%rdi), %zmm0 {%k1} +; CHECK-NEXT: retq + %s = load i64, i64* %p + %vec = insertelement <2 x i64> undef, i64 %s, i32 0 + %shuf = shufflevector <2 x i64> %vec, <2 x i64> undef, <8 x i32> + %res = select <8 x i1> , <8 x i64> %shuf, <8 x i64> %default + ret <8 x i64> %res +} + +define <8 x i64> @test_masked_z_i64_to_8_mem_mask1(i64* %p) { +; CHECK-LABEL: test_masked_z_i64_to_8_mem_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: movb $4, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpbroadcastq (%rdi), %zmm0 {%k1} {z} +; CHECK-NEXT: retq + %s = load i64, i64* %p + %vec = insertelement <2 x i64> undef, i64 %s, i32 0 + %shuf = shufflevector <2 x i64> %vec, <2 x i64> undef, <8 x i32> + %res = select <8 x i1> , <8 x i64> %shuf, <8 x i64> zeroinitializer + ret <8 x i64> %res +} +define <8 x i64> @test_masked_i64_to_8_mem_mask2(i64* %p, <8 x i64> %default) { +; CHECK-LABEL: test_masked_i64_to_8_mem_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: movb $-67, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpbroadcastq (%rdi), %zmm0 {%k1} +; CHECK-NEXT: retq + %s = load i64, i64* %p + %vec = insertelement <2 x i64> undef, i64 %s, i32 0 + %shuf = shufflevector <2 x i64> %vec, <2 x i64> undef, <8 x i32> + %res = select <8 x i1> , <8 x i64> %shuf, <8 x i64> %default + ret <8 x i64> %res +} + +define <8 x i64> @test_masked_z_i64_to_8_mem_mask2(i64* %p) { +; CHECK-LABEL: test_masked_z_i64_to_8_mem_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: movb $-67, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpbroadcastq (%rdi), %zmm0 {%k1} {z} +; CHECK-NEXT: retq + %s = load i64, i64* %p + %vec = insertelement <2 x i64> undef, i64 %s, i32 0 + %shuf = shufflevector <2 x i64> %vec, <2 x i64> undef, <8 x i32> + %res = select <8 x i1> , <8 x i64> %shuf, <8 x i64> zeroinitializer + ret <8 x i64> %res +} +define <8 x i64> @test_masked_i64_to_8_mem_mask3(i64* %p, <8 x i64> %default) { +; CHECK-LABEL: test_masked_i64_to_8_mem_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: movb $86, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpbroadcastq (%rdi), %zmm0 {%k1} +; CHECK-NEXT: retq + %s = load i64, i64* %p + %vec = insertelement <2 x i64> undef, i64 %s, i32 0 + %shuf = shufflevector <2 x i64> %vec, <2 x i64> undef, <8 x i32> + %res = select <8 x i1> , <8 x i64> %shuf, <8 x i64> %default + ret <8 x i64> %res +} + +define <8 x i64> @test_masked_z_i64_to_8_mem_mask3(i64* %p) { +; CHECK-LABEL: test_masked_z_i64_to_8_mem_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: movb $86, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpbroadcastq (%rdi), %zmm0 {%k1} {z} +; CHECK-NEXT: retq + %s = load i64, i64* %p + %vec = insertelement <2 x i64> undef, i64 %s, i32 0 + %shuf = shufflevector <2 x i64> %vec, <2 x i64> undef, <8 x i32> + %res = select <8 x i1> , <8 x i64> %shuf, <8 x i64> zeroinitializer + ret <8 x i64> %res +} diff --git a/llvm/test/CodeGen/X86/avx512-shuffles/broadcast-vector-fp.ll b/llvm/test/CodeGen/X86/avx512-shuffles/broadcast-vector-fp.ll new file mode 100644 index 000000000000..8a5584bc917a --- /dev/null +++ b/llvm/test/CodeGen/X86/avx512-shuffles/broadcast-vector-fp.ll @@ -0,0 +1,1101 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mcpu=skx %s -o - | FileCheck %s + +define <8 x float> @test_2xfloat_to_8xfloat(<8 x float> %vec) { +; CHECK-LABEL: test_2xfloat_to_8xfloat: +; CHECK: # BB#0: +; CHECK-NEXT: vbroadcastsd %xmm0, %ymm0 +; CHECK-NEXT: retq + %res = shufflevector <8 x float> %vec, <8 x float> undef, <8 x i32> + ret <8 x float> %res +} +define <8 x float> @test_masked_2xfloat_to_8xfloat_mask0(<8 x float> %vec, <8 x float> %default) { +; CHECK-LABEL: test_masked_2xfloat_to_8xfloat_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: movb $1, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vbroadcastf32x2 {{.*#+}} ymm1 {%k1} = xmm0[0,1,0,1,0,1,0,1] +; CHECK-NEXT: vmovapd %ymm1, %ymm0 +; CHECK-NEXT: retq + %shuf = shufflevector <8 x float> %vec, <8 x float> undef, <8 x i32> + %res = select <8 x i1> , <8 x float> %shuf, <8 x float> %default + ret <8 x float> %res +} + +define <8 x float> @test_masked_z_2xfloat_to_8xfloat_mask0(<8 x float> %vec) { +; CHECK-LABEL: test_masked_z_2xfloat_to_8xfloat_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: movb $1, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vbroadcastf32x2 {{.*#+}} ymm0 {%k1} {z} = xmm0[0,1,0,1,0,1,0,1] +; CHECK-NEXT: retq + %shuf = shufflevector <8 x float> %vec, <8 x float> undef, <8 x i32> + %res = select <8 x i1> , <8 x float> %shuf, <8 x float> zeroinitializer + ret <8 x float> %res +} +define <8 x float> @test_masked_2xfloat_to_8xfloat_mask1(<8 x float> %vec, <8 x float> %default) { +; CHECK-LABEL: test_masked_2xfloat_to_8xfloat_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: movb $126, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vbroadcastf32x2 {{.*#+}} ymm1 {%k1} = xmm0[0,1,0,1,0,1,0,1] +; CHECK-NEXT: vmovapd %ymm1, %ymm0 +; CHECK-NEXT: retq + %shuf = shufflevector <8 x float> %vec, <8 x float> undef, <8 x i32> + %res = select <8 x i1> , <8 x float> %shuf, <8 x float> %default + ret <8 x float> %res +} + +define <8 x float> @test_masked_z_2xfloat_to_8xfloat_mask1(<8 x float> %vec) { +; CHECK-LABEL: test_masked_z_2xfloat_to_8xfloat_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: movb $126, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vbroadcastf32x2 {{.*#+}} ymm0 {%k1} {z} = xmm0[0,1,0,1,0,1,0,1] +; CHECK-NEXT: retq + %shuf = shufflevector <8 x float> %vec, <8 x float> undef, <8 x i32> + %res = select <8 x i1> , <8 x float> %shuf, <8 x float> zeroinitializer + ret <8 x float> %res +} +define <8 x float> @test_masked_2xfloat_to_8xfloat_mask2(<8 x float> %vec, <8 x float> %default) { +; CHECK-LABEL: test_masked_2xfloat_to_8xfloat_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: movb $-35, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vbroadcastf32x2 {{.*#+}} ymm1 {%k1} = xmm0[0,1,0,1,0,1,0,1] +; CHECK-NEXT: vmovapd %ymm1, %ymm0 +; CHECK-NEXT: retq + %shuf = shufflevector <8 x float> %vec, <8 x float> undef, <8 x i32> + %res = select <8 x i1> , <8 x float> %shuf, <8 x float> %default + ret <8 x float> %res +} + +define <8 x float> @test_masked_z_2xfloat_to_8xfloat_mask2(<8 x float> %vec) { +; CHECK-LABEL: test_masked_z_2xfloat_to_8xfloat_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: movb $-35, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vbroadcastf32x2 {{.*#+}} ymm0 {%k1} {z} = xmm0[0,1,0,1,0,1,0,1] +; CHECK-NEXT: retq + %shuf = shufflevector <8 x float> %vec, <8 x float> undef, <8 x i32> + %res = select <8 x i1> , <8 x float> %shuf, <8 x float> zeroinitializer + ret <8 x float> %res +} +define <8 x float> @test_masked_2xfloat_to_8xfloat_mask3(<8 x float> %vec, <8 x float> %default) { +; CHECK-LABEL: test_masked_2xfloat_to_8xfloat_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: movb $62, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vbroadcastf32x2 {{.*#+}} ymm1 {%k1} = xmm0[0,1,0,1,0,1,0,1] +; CHECK-NEXT: vmovapd %ymm1, %ymm0 +; CHECK-NEXT: retq + %shuf = shufflevector <8 x float> %vec, <8 x float> undef, <8 x i32> + %res = select <8 x i1> , <8 x float> %shuf, <8 x float> %default + ret <8 x float> %res +} + +define <8 x float> @test_masked_z_2xfloat_to_8xfloat_mask3(<8 x float> %vec) { +; CHECK-LABEL: test_masked_z_2xfloat_to_8xfloat_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: movb $62, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vbroadcastf32x2 {{.*#+}} ymm0 {%k1} {z} = xmm0[0,1,0,1,0,1,0,1] +; CHECK-NEXT: retq + %shuf = shufflevector <8 x float> %vec, <8 x float> undef, <8 x i32> + %res = select <8 x i1> , <8 x float> %shuf, <8 x float> zeroinitializer + ret <8 x float> %res +} +define <16 x float> @test_2xfloat_to_16xfloat(<16 x float> %vec) { +; CHECK-LABEL: test_2xfloat_to_16xfloat: +; CHECK: # BB#0: +; CHECK-NEXT: vbroadcastf32x2 {{.*#+}} zmm0 = xmm0[0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1] +; CHECK-NEXT: retq + %res = shufflevector <16 x float> %vec, <16 x float> undef, <16 x i32> + ret <16 x float> %res +} +define <16 x float> @test_masked_2xfloat_to_16xfloat_mask0(<16 x float> %vec, <16 x float> %default) { +; CHECK-LABEL: test_masked_2xfloat_to_16xfloat_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: movw $21312, %ax # imm = 0x5340 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vbroadcastf32x2 {{.*#+}} zmm1 {%k1} = xmm0[0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1] +; CHECK-NEXT: vmovapd %zmm1, %zmm0 +; CHECK-NEXT: retq + %shuf = shufflevector <16 x float> %vec, <16 x float> undef, <16 x i32> + %res = select <16 x i1> , <16 x float> %shuf, <16 x float> %default + ret <16 x float> %res +} + +define <16 x float> @test_masked_z_2xfloat_to_16xfloat_mask0(<16 x float> %vec) { +; CHECK-LABEL: test_masked_z_2xfloat_to_16xfloat_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: movw $21312, %ax # imm = 0x5340 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vbroadcastf32x2 {{.*#+}} zmm0 {%k1} {z} = xmm0[0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1] +; CHECK-NEXT: retq + %shuf = shufflevector <16 x float> %vec, <16 x float> undef, <16 x i32> + %res = select <16 x i1> , <16 x float> %shuf, <16 x float> zeroinitializer + ret <16 x float> %res +} +define <16 x float> @test_masked_2xfloat_to_16xfloat_mask1(<16 x float> %vec, <16 x float> %default) { +; CHECK-LABEL: test_masked_2xfloat_to_16xfloat_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: movw $-8490, %ax # imm = 0xDED6 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vbroadcastf32x2 {{.*#+}} zmm1 {%k1} = xmm0[0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1] +; CHECK-NEXT: vmovapd %zmm1, %zmm0 +; CHECK-NEXT: retq + %shuf = shufflevector <16 x float> %vec, <16 x float> undef, <16 x i32> + %res = select <16 x i1> , <16 x float> %shuf, <16 x float> %default + ret <16 x float> %res +} + +define <16 x float> @test_masked_z_2xfloat_to_16xfloat_mask1(<16 x float> %vec) { +; CHECK-LABEL: test_masked_z_2xfloat_to_16xfloat_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: movw $-8490, %ax # imm = 0xDED6 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vbroadcastf32x2 {{.*#+}} zmm0 {%k1} {z} = xmm0[0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1] +; CHECK-NEXT: retq + %shuf = shufflevector <16 x float> %vec, <16 x float> undef, <16 x i32> + %res = select <16 x i1> , <16 x float> %shuf, <16 x float> zeroinitializer + ret <16 x float> %res +} +define <16 x float> @test_masked_2xfloat_to_16xfloat_mask2(<16 x float> %vec, <16 x float> %default) { +; CHECK-LABEL: test_masked_2xfloat_to_16xfloat_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: movw $12522, %ax # imm = 0x30EA +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vbroadcastf32x2 {{.*#+}} zmm1 {%k1} = xmm0[0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1] +; CHECK-NEXT: vmovapd %zmm1, %zmm0 +; CHECK-NEXT: retq + %shuf = shufflevector <16 x float> %vec, <16 x float> undef, <16 x i32> + %res = select <16 x i1> , <16 x float> %shuf, <16 x float> %default + ret <16 x float> %res +} + +define <16 x float> @test_masked_z_2xfloat_to_16xfloat_mask2(<16 x float> %vec) { +; CHECK-LABEL: test_masked_z_2xfloat_to_16xfloat_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: movw $12522, %ax # imm = 0x30EA +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vbroadcastf32x2 {{.*#+}} zmm0 {%k1} {z} = xmm0[0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1] +; CHECK-NEXT: retq + %shuf = shufflevector <16 x float> %vec, <16 x float> undef, <16 x i32> + %res = select <16 x i1> , <16 x float> %shuf, <16 x float> zeroinitializer + ret <16 x float> %res +} +define <16 x float> @test_masked_2xfloat_to_16xfloat_mask3(<16 x float> %vec, <16 x float> %default) { +; CHECK-LABEL: test_masked_2xfloat_to_16xfloat_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: movw $-28344, %ax # imm = 0x9148 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vbroadcastf32x2 {{.*#+}} zmm1 {%k1} = xmm0[0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1] +; CHECK-NEXT: vmovapd %zmm1, %zmm0 +; CHECK-NEXT: retq + %shuf = shufflevector <16 x float> %vec, <16 x float> undef, <16 x i32> + %res = select <16 x i1> , <16 x float> %shuf, <16 x float> %default + ret <16 x float> %res +} + +define <16 x float> @test_masked_z_2xfloat_to_16xfloat_mask3(<16 x float> %vec) { +; CHECK-LABEL: test_masked_z_2xfloat_to_16xfloat_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: movw $-28344, %ax # imm = 0x9148 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vbroadcastf32x2 {{.*#+}} zmm0 {%k1} {z} = xmm0[0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1] +; CHECK-NEXT: retq + %shuf = shufflevector <16 x float> %vec, <16 x float> undef, <16 x i32> + %res = select <16 x i1> , <16 x float> %shuf, <16 x float> zeroinitializer + ret <16 x float> %res +} +define <4 x double> @test_2xdouble_to_4xdouble_mem(<2 x double>* %vp) { +; CHECK-LABEL: test_2xdouble_to_4xdouble_mem: +; CHECK: # BB#0: +; CHECK-NEXT: vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1] +; CHECK-NEXT: retq + %vec = load <2 x double>, <2 x double>* %vp + %res = shufflevector <2 x double> %vec, <2 x double> undef, <4 x i32> + ret <4 x double> %res +} +define <4 x double> @test_masked_2xdouble_to_4xdouble_mem_mask0(<2 x double>* %vp, <4 x double> %default) { +; CHECK-LABEL: test_masked_2xdouble_to_4xdouble_mem_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: movb $4, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vbroadcastf64x2 {{.*#+}} ymm0 {%k1} = mem[0,1,0,1] +; CHECK-NEXT: retq + %vec = load <2 x double>, <2 x double>* %vp + %shuf = shufflevector <2 x double> %vec, <2 x double> undef, <4 x i32> + %res = select <4 x i1> , <4 x double> %shuf, <4 x double> %default + ret <4 x double> %res +} + +define <4 x double> @test_masked_z_2xdouble_to_4xdouble_mem_mask0(<2 x double>* %vp) { +; CHECK-LABEL: test_masked_z_2xdouble_to_4xdouble_mem_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: movb $4, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vbroadcastf64x2 {{.*#+}} ymm0 {%k1} {z} = mem[0,1,0,1] +; CHECK-NEXT: retq + %vec = load <2 x double>, <2 x double>* %vp + %shuf = shufflevector <2 x double> %vec, <2 x double> undef, <4 x i32> + %res = select <4 x i1> , <4 x double> %shuf, <4 x double> zeroinitializer + ret <4 x double> %res +} +define <4 x double> @test_masked_2xdouble_to_4xdouble_mem_mask1(<2 x double>* %vp, <4 x double> %default) { +; CHECK-LABEL: test_masked_2xdouble_to_4xdouble_mem_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: movb $13, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vbroadcastf64x2 {{.*#+}} ymm0 {%k1} = mem[0,1,0,1] +; CHECK-NEXT: retq + %vec = load <2 x double>, <2 x double>* %vp + %shuf = shufflevector <2 x double> %vec, <2 x double> undef, <4 x i32> + %res = select <4 x i1> , <4 x double> %shuf, <4 x double> %default + ret <4 x double> %res +} + +define <4 x double> @test_masked_z_2xdouble_to_4xdouble_mem_mask1(<2 x double>* %vp) { +; CHECK-LABEL: test_masked_z_2xdouble_to_4xdouble_mem_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: movb $13, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vbroadcastf64x2 {{.*#+}} ymm0 {%k1} {z} = mem[0,1,0,1] +; CHECK-NEXT: retq + %vec = load <2 x double>, <2 x double>* %vp + %shuf = shufflevector <2 x double> %vec, <2 x double> undef, <4 x i32> + %res = select <4 x i1> , <4 x double> %shuf, <4 x double> zeroinitializer + ret <4 x double> %res +} +define <4 x double> @test_masked_2xdouble_to_4xdouble_mem_mask2(<2 x double>* %vp, <4 x double> %default) { +; CHECK-LABEL: test_masked_2xdouble_to_4xdouble_mem_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: movb $10, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vbroadcastf64x2 {{.*#+}} ymm0 {%k1} = mem[0,1,0,1] +; CHECK-NEXT: retq + %vec = load <2 x double>, <2 x double>* %vp + %shuf = shufflevector <2 x double> %vec, <2 x double> undef, <4 x i32> + %res = select <4 x i1> , <4 x double> %shuf, <4 x double> %default + ret <4 x double> %res +} + +define <4 x double> @test_masked_z_2xdouble_to_4xdouble_mem_mask2(<2 x double>* %vp) { +; CHECK-LABEL: test_masked_z_2xdouble_to_4xdouble_mem_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: movb $10, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vbroadcastf64x2 {{.*#+}} ymm0 {%k1} {z} = mem[0,1,0,1] +; CHECK-NEXT: retq + %vec = load <2 x double>, <2 x double>* %vp + %shuf = shufflevector <2 x double> %vec, <2 x double> undef, <4 x i32> + %res = select <4 x i1> , <4 x double> %shuf, <4 x double> zeroinitializer + ret <4 x double> %res +} +define <4 x double> @test_masked_2xdouble_to_4xdouble_mem_mask3(<2 x double>* %vp, <4 x double> %default) { +; CHECK-LABEL: test_masked_2xdouble_to_4xdouble_mem_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: movb $5, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vbroadcastf64x2 {{.*#+}} ymm0 {%k1} = mem[0,1,0,1] +; CHECK-NEXT: retq + %vec = load <2 x double>, <2 x double>* %vp + %shuf = shufflevector <2 x double> %vec, <2 x double> undef, <4 x i32> + %res = select <4 x i1> , <4 x double> %shuf, <4 x double> %default + ret <4 x double> %res +} + +define <4 x double> @test_masked_z_2xdouble_to_4xdouble_mem_mask3(<2 x double>* %vp) { +; CHECK-LABEL: test_masked_z_2xdouble_to_4xdouble_mem_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: movb $5, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vbroadcastf64x2 {{.*#+}} ymm0 {%k1} {z} = mem[0,1,0,1] +; CHECK-NEXT: retq + %vec = load <2 x double>, <2 x double>* %vp + %shuf = shufflevector <2 x double> %vec, <2 x double> undef, <4 x i32> + %res = select <4 x i1> , <4 x double> %shuf, <4 x double> zeroinitializer + ret <4 x double> %res +} +define <8 x double> @test_2xdouble_to_8xdouble_mem(<2 x double>* %vp) { +; CHECK-LABEL: test_2xdouble_to_8xdouble_mem: +; CHECK: # BB#0: +; CHECK-NEXT: vbroadcastf32x4 {{.*#+}} zmm0 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3] +; CHECK-NEXT: retq + %vec = load <2 x double>, <2 x double>* %vp + %res = shufflevector <2 x double> %vec, <2 x double> undef, <8 x i32> + ret <8 x double> %res +} +define <8 x double> @test_masked_2xdouble_to_8xdouble_mem_mask0(<2 x double>* %vp, <8 x double> %default) { +; CHECK-LABEL: test_masked_2xdouble_to_8xdouble_mem_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: movb $21, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vbroadcastf64x2 {{.*#+}} zmm0 {%k1} = mem[0,1,0,1,0,1,0,1] +; CHECK-NEXT: retq + %vec = load <2 x double>, <2 x double>* %vp + %shuf = shufflevector <2 x double> %vec, <2 x double> undef, <8 x i32> + %res = select <8 x i1> , <8 x double> %shuf, <8 x double> %default + ret <8 x double> %res +} + +define <8 x double> @test_masked_z_2xdouble_to_8xdouble_mem_mask0(<2 x double>* %vp) { +; CHECK-LABEL: test_masked_z_2xdouble_to_8xdouble_mem_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: movb $21, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vbroadcastf64x2 {{.*#+}} zmm0 {%k1} {z} = mem[0,1,0,1,0,1,0,1] +; CHECK-NEXT: retq + %vec = load <2 x double>, <2 x double>* %vp + %shuf = shufflevector <2 x double> %vec, <2 x double> undef, <8 x i32> + %res = select <8 x i1> , <8 x double> %shuf, <8 x double> zeroinitializer + ret <8 x double> %res +} +define <8 x double> @test_masked_2xdouble_to_8xdouble_mem_mask1(<2 x double>* %vp, <8 x double> %default) { +; CHECK-LABEL: test_masked_2xdouble_to_8xdouble_mem_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: movb $82, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vbroadcastf64x2 {{.*#+}} zmm0 {%k1} = mem[0,1,0,1,0,1,0,1] +; CHECK-NEXT: retq + %vec = load <2 x double>, <2 x double>* %vp + %shuf = shufflevector <2 x double> %vec, <2 x double> undef, <8 x i32> + %res = select <8 x i1> , <8 x double> %shuf, <8 x double> %default + ret <8 x double> %res +} + +define <8 x double> @test_masked_z_2xdouble_to_8xdouble_mem_mask1(<2 x double>* %vp) { +; CHECK-LABEL: test_masked_z_2xdouble_to_8xdouble_mem_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: movb $82, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vbroadcastf64x2 {{.*#+}} zmm0 {%k1} {z} = mem[0,1,0,1,0,1,0,1] +; CHECK-NEXT: retq + %vec = load <2 x double>, <2 x double>* %vp + %shuf = shufflevector <2 x double> %vec, <2 x double> undef, <8 x i32> + %res = select <8 x i1> , <8 x double> %shuf, <8 x double> zeroinitializer + ret <8 x double> %res +} +define <8 x double> @test_masked_2xdouble_to_8xdouble_mem_mask2(<2 x double>* %vp, <8 x double> %default) { +; CHECK-LABEL: test_masked_2xdouble_to_8xdouble_mem_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: movb $-126, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vbroadcastf64x2 {{.*#+}} zmm0 {%k1} = mem[0,1,0,1,0,1,0,1] +; CHECK-NEXT: retq + %vec = load <2 x double>, <2 x double>* %vp + %shuf = shufflevector <2 x double> %vec, <2 x double> undef, <8 x i32> + %res = select <8 x i1> , <8 x double> %shuf, <8 x double> %default + ret <8 x double> %res +} + +define <8 x double> @test_masked_z_2xdouble_to_8xdouble_mem_mask2(<2 x double>* %vp) { +; CHECK-LABEL: test_masked_z_2xdouble_to_8xdouble_mem_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: movb $-126, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vbroadcastf64x2 {{.*#+}} zmm0 {%k1} {z} = mem[0,1,0,1,0,1,0,1] +; CHECK-NEXT: retq + %vec = load <2 x double>, <2 x double>* %vp + %shuf = shufflevector <2 x double> %vec, <2 x double> undef, <8 x i32> + %res = select <8 x i1> , <8 x double> %shuf, <8 x double> zeroinitializer + ret <8 x double> %res +} +define <8 x double> @test_masked_2xdouble_to_8xdouble_mem_mask3(<2 x double>* %vp, <8 x double> %default) { +; CHECK-LABEL: test_masked_2xdouble_to_8xdouble_mem_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: movb $-19, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vbroadcastf64x2 {{.*#+}} zmm0 {%k1} = mem[0,1,0,1,0,1,0,1] +; CHECK-NEXT: retq + %vec = load <2 x double>, <2 x double>* %vp + %shuf = shufflevector <2 x double> %vec, <2 x double> undef, <8 x i32> + %res = select <8 x i1> , <8 x double> %shuf, <8 x double> %default + ret <8 x double> %res +} + +define <8 x double> @test_masked_z_2xdouble_to_8xdouble_mem_mask3(<2 x double>* %vp) { +; CHECK-LABEL: test_masked_z_2xdouble_to_8xdouble_mem_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: movb $-19, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vbroadcastf64x2 {{.*#+}} zmm0 {%k1} {z} = mem[0,1,0,1,0,1,0,1] +; CHECK-NEXT: retq + %vec = load <2 x double>, <2 x double>* %vp + %shuf = shufflevector <2 x double> %vec, <2 x double> undef, <8 x i32> + %res = select <8 x i1> , <8 x double> %shuf, <8 x double> zeroinitializer + ret <8 x double> %res +} +define <8 x double> @test_4xdouble_to_8xdouble_mem(<4 x double>* %vp) { +; CHECK-LABEL: test_4xdouble_to_8xdouble_mem: +; CHECK: # BB#0: +; CHECK-NEXT: vbroadcastf64x4 {{.*#+}} zmm0 = mem[0,1,2,3,0,1,2,3] +; CHECK-NEXT: retq + %vec = load <4 x double>, <4 x double>* %vp + %res = shufflevector <4 x double> %vec, <4 x double> undef, <8 x i32> + ret <8 x double> %res +} +define <8 x double> @test_masked_4xdouble_to_8xdouble_mem_mask0(<4 x double>* %vp, <8 x double> %default) { +; CHECK-LABEL: test_masked_4xdouble_to_8xdouble_mem_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: movb $28, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vbroadcastf64x4 {{.*#+}} zmm0 {%k1} = mem[0,1,2,3,0,1,2,3] +; CHECK-NEXT: retq + %vec = load <4 x double>, <4 x double>* %vp + %shuf = shufflevector <4 x double> %vec, <4 x double> undef, <8 x i32> + %res = select <8 x i1> , <8 x double> %shuf, <8 x double> %default + ret <8 x double> %res +} + +define <8 x double> @test_masked_z_4xdouble_to_8xdouble_mem_mask0(<4 x double>* %vp) { +; CHECK-LABEL: test_masked_z_4xdouble_to_8xdouble_mem_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: movb $28, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vbroadcastf64x4 {{.*#+}} zmm0 {%k1} {z} = mem[0,1,2,3,0,1,2,3] +; CHECK-NEXT: retq + %vec = load <4 x double>, <4 x double>* %vp + %shuf = shufflevector <4 x double> %vec, <4 x double> undef, <8 x i32> + %res = select <8 x i1> , <8 x double> %shuf, <8 x double> zeroinitializer + ret <8 x double> %res +} +define <8 x double> @test_masked_4xdouble_to_8xdouble_mem_mask1(<4 x double>* %vp, <8 x double> %default) { +; CHECK-LABEL: test_masked_4xdouble_to_8xdouble_mem_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: movb $-115, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vbroadcastf64x4 {{.*#+}} zmm0 {%k1} = mem[0,1,2,3,0,1,2,3] +; CHECK-NEXT: retq + %vec = load <4 x double>, <4 x double>* %vp + %shuf = shufflevector <4 x double> %vec, <4 x double> undef, <8 x i32> + %res = select <8 x i1> , <8 x double> %shuf, <8 x double> %default + ret <8 x double> %res +} + +define <8 x double> @test_masked_z_4xdouble_to_8xdouble_mem_mask1(<4 x double>* %vp) { +; CHECK-LABEL: test_masked_z_4xdouble_to_8xdouble_mem_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: movb $-115, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vbroadcastf64x4 {{.*#+}} zmm0 {%k1} {z} = mem[0,1,2,3,0,1,2,3] +; CHECK-NEXT: retq + %vec = load <4 x double>, <4 x double>* %vp + %shuf = shufflevector <4 x double> %vec, <4 x double> undef, <8 x i32> + %res = select <8 x i1> , <8 x double> %shuf, <8 x double> zeroinitializer + ret <8 x double> %res +} +define <8 x double> @test_masked_4xdouble_to_8xdouble_mem_mask2(<4 x double>* %vp, <8 x double> %default) { +; CHECK-LABEL: test_masked_4xdouble_to_8xdouble_mem_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: movb $-76, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vbroadcastf64x4 {{.*#+}} zmm0 {%k1} = mem[0,1,2,3,0,1,2,3] +; CHECK-NEXT: retq + %vec = load <4 x double>, <4 x double>* %vp + %shuf = shufflevector <4 x double> %vec, <4 x double> undef, <8 x i32> + %res = select <8 x i1> , <8 x double> %shuf, <8 x double> %default + ret <8 x double> %res +} + +define <8 x double> @test_masked_z_4xdouble_to_8xdouble_mem_mask2(<4 x double>* %vp) { +; CHECK-LABEL: test_masked_z_4xdouble_to_8xdouble_mem_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: movb $-76, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vbroadcastf64x4 {{.*#+}} zmm0 {%k1} {z} = mem[0,1,2,3,0,1,2,3] +; CHECK-NEXT: retq + %vec = load <4 x double>, <4 x double>* %vp + %shuf = shufflevector <4 x double> %vec, <4 x double> undef, <8 x i32> + %res = select <8 x i1> , <8 x double> %shuf, <8 x double> zeroinitializer + ret <8 x double> %res +} +define <8 x double> @test_masked_4xdouble_to_8xdouble_mem_mask3(<4 x double>* %vp, <8 x double> %default) { +; CHECK-LABEL: test_masked_4xdouble_to_8xdouble_mem_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: movb $-116, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vbroadcastf64x4 {{.*#+}} zmm0 {%k1} = mem[0,1,2,3,0,1,2,3] +; CHECK-NEXT: retq + %vec = load <4 x double>, <4 x double>* %vp + %shuf = shufflevector <4 x double> %vec, <4 x double> undef, <8 x i32> + %res = select <8 x i1> , <8 x double> %shuf, <8 x double> %default + ret <8 x double> %res +} + +define <8 x double> @test_masked_z_4xdouble_to_8xdouble_mem_mask3(<4 x double>* %vp) { +; CHECK-LABEL: test_masked_z_4xdouble_to_8xdouble_mem_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: movb $-116, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vbroadcastf64x4 {{.*#+}} zmm0 {%k1} {z} = mem[0,1,2,3,0,1,2,3] +; CHECK-NEXT: retq + %vec = load <4 x double>, <4 x double>* %vp + %shuf = shufflevector <4 x double> %vec, <4 x double> undef, <8 x i32> + %res = select <8 x i1> , <8 x double> %shuf, <8 x double> zeroinitializer + ret <8 x double> %res +} +define <8 x float> @test_2xfloat_to_8xfloat_mem(<2 x float>* %vp) { +; CHECK-LABEL: test_2xfloat_to_8xfloat_mem: +; CHECK: # BB#0: +; CHECK-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero +; CHECK-NEXT: vbroadcastsd %xmm0, %ymm0 +; CHECK-NEXT: retq + %vec = load <2 x float>, <2 x float>* %vp + %res = shufflevector <2 x float> %vec, <2 x float> undef, <8 x i32> + ret <8 x float> %res +} +define <8 x float> @test_masked_2xfloat_to_8xfloat_mem_mask0(<2 x float>* %vp, <8 x float> %default) { +; CHECK-LABEL: test_masked_2xfloat_to_8xfloat_mem_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero +; CHECK-NEXT: movb $-49, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vbroadcastf32x2 {{.*#+}} ymm0 {%k1} = xmm1[0,1,0,1,0,1,0,1] +; CHECK-NEXT: retq + %vec = load <2 x float>, <2 x float>* %vp + %shuf = shufflevector <2 x float> %vec, <2 x float> undef, <8 x i32> + %res = select <8 x i1> , <8 x float> %shuf, <8 x float> %default + ret <8 x float> %res +} + +define <8 x float> @test_masked_z_2xfloat_to_8xfloat_mem_mask0(<2 x float>* %vp) { +; CHECK-LABEL: test_masked_z_2xfloat_to_8xfloat_mem_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero +; CHECK-NEXT: movb $-49, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vbroadcastf32x2 {{.*#+}} ymm0 {%k1} {z} = xmm0[0,1,0,1,0,1,0,1] +; CHECK-NEXT: retq + %vec = load <2 x float>, <2 x float>* %vp + %shuf = shufflevector <2 x float> %vec, <2 x float> undef, <8 x i32> + %res = select <8 x i1> , <8 x float> %shuf, <8 x float> zeroinitializer + ret <8 x float> %res +} +define <8 x float> @test_masked_2xfloat_to_8xfloat_mem_mask1(<2 x float>* %vp, <8 x float> %default) { +; CHECK-LABEL: test_masked_2xfloat_to_8xfloat_mem_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero +; CHECK-NEXT: movb $-118, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vbroadcastf32x2 {{.*#+}} ymm0 {%k1} = xmm1[0,1,0,1,0,1,0,1] +; CHECK-NEXT: retq + %vec = load <2 x float>, <2 x float>* %vp + %shuf = shufflevector <2 x float> %vec, <2 x float> undef, <8 x i32> + %res = select <8 x i1> , <8 x float> %shuf, <8 x float> %default + ret <8 x float> %res +} + +define <8 x float> @test_masked_z_2xfloat_to_8xfloat_mem_mask1(<2 x float>* %vp) { +; CHECK-LABEL: test_masked_z_2xfloat_to_8xfloat_mem_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero +; CHECK-NEXT: movb $-118, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vbroadcastf32x2 {{.*#+}} ymm0 {%k1} {z} = xmm0[0,1,0,1,0,1,0,1] +; CHECK-NEXT: retq + %vec = load <2 x float>, <2 x float>* %vp + %shuf = shufflevector <2 x float> %vec, <2 x float> undef, <8 x i32> + %res = select <8 x i1> , <8 x float> %shuf, <8 x float> zeroinitializer + ret <8 x float> %res +} +define <8 x float> @test_masked_2xfloat_to_8xfloat_mem_mask2(<2 x float>* %vp, <8 x float> %default) { +; CHECK-LABEL: test_masked_2xfloat_to_8xfloat_mem_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero +; CHECK-NEXT: movb $-11, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vbroadcastf32x2 {{.*#+}} ymm0 {%k1} = xmm1[0,1,0,1,0,1,0,1] +; CHECK-NEXT: retq + %vec = load <2 x float>, <2 x float>* %vp + %shuf = shufflevector <2 x float> %vec, <2 x float> undef, <8 x i32> + %res = select <8 x i1> , <8 x float> %shuf, <8 x float> %default + ret <8 x float> %res +} + +define <8 x float> @test_masked_z_2xfloat_to_8xfloat_mem_mask2(<2 x float>* %vp) { +; CHECK-LABEL: test_masked_z_2xfloat_to_8xfloat_mem_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero +; CHECK-NEXT: movb $-11, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vbroadcastf32x2 {{.*#+}} ymm0 {%k1} {z} = xmm0[0,1,0,1,0,1,0,1] +; CHECK-NEXT: retq + %vec = load <2 x float>, <2 x float>* %vp + %shuf = shufflevector <2 x float> %vec, <2 x float> undef, <8 x i32> + %res = select <8 x i1> , <8 x float> %shuf, <8 x float> zeroinitializer + ret <8 x float> %res +} +define <8 x float> @test_masked_2xfloat_to_8xfloat_mem_mask3(<2 x float>* %vp, <8 x float> %default) { +; CHECK-LABEL: test_masked_2xfloat_to_8xfloat_mem_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero +; CHECK-NEXT: movb $-102, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vbroadcastf32x2 {{.*#+}} ymm0 {%k1} = xmm1[0,1,0,1,0,1,0,1] +; CHECK-NEXT: retq + %vec = load <2 x float>, <2 x float>* %vp + %shuf = shufflevector <2 x float> %vec, <2 x float> undef, <8 x i32> + %res = select <8 x i1> , <8 x float> %shuf, <8 x float> %default + ret <8 x float> %res +} + +define <8 x float> @test_masked_z_2xfloat_to_8xfloat_mem_mask3(<2 x float>* %vp) { +; CHECK-LABEL: test_masked_z_2xfloat_to_8xfloat_mem_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero +; CHECK-NEXT: movb $-102, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vbroadcastf32x2 {{.*#+}} ymm0 {%k1} {z} = xmm0[0,1,0,1,0,1,0,1] +; CHECK-NEXT: retq + %vec = load <2 x float>, <2 x float>* %vp + %shuf = shufflevector <2 x float> %vec, <2 x float> undef, <8 x i32> + %res = select <8 x i1> , <8 x float> %shuf, <8 x float> zeroinitializer + ret <8 x float> %res +} +define <16 x float> @test_2xfloat_to_16xfloat_mem(<2 x float>* %vp) { +; CHECK-LABEL: test_2xfloat_to_16xfloat_mem: +; CHECK: # BB#0: +; CHECK-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero +; CHECK-NEXT: vbroadcastf32x2 {{.*#+}} zmm0 = xmm0[0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1] +; CHECK-NEXT: retq + %vec = load <2 x float>, <2 x float>* %vp + %res = shufflevector <2 x float> %vec, <2 x float> undef, <16 x i32> + ret <16 x float> %res +} +define <16 x float> @test_masked_2xfloat_to_16xfloat_mem_mask0(<2 x float>* %vp, <16 x float> %default) { +; CHECK-LABEL: test_masked_2xfloat_to_16xfloat_mem_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero +; CHECK-NEXT: movw $-27027, %ax # imm = 0x966D +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vbroadcastf32x2 {{.*#+}} zmm0 {%k1} = xmm1[0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1] +; CHECK-NEXT: retq + %vec = load <2 x float>, <2 x float>* %vp + %shuf = shufflevector <2 x float> %vec, <2 x float> undef, <16 x i32> + %res = select <16 x i1> , <16 x float> %shuf, <16 x float> %default + ret <16 x float> %res +} + +define <16 x float> @test_masked_z_2xfloat_to_16xfloat_mem_mask0(<2 x float>* %vp) { +; CHECK-LABEL: test_masked_z_2xfloat_to_16xfloat_mem_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero +; CHECK-NEXT: movw $-27027, %ax # imm = 0x966D +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vbroadcastf32x2 {{.*#+}} zmm0 {%k1} {z} = xmm0[0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1] +; CHECK-NEXT: retq + %vec = load <2 x float>, <2 x float>* %vp + %shuf = shufflevector <2 x float> %vec, <2 x float> undef, <16 x i32> + %res = select <16 x i1> , <16 x float> %shuf, <16 x float> zeroinitializer + ret <16 x float> %res +} +define <16 x float> @test_masked_2xfloat_to_16xfloat_mem_mask1(<2 x float>* %vp, <16 x float> %default) { +; CHECK-LABEL: test_masked_2xfloat_to_16xfloat_mem_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero +; CHECK-NEXT: movw $29162, %ax # imm = 0x71EA +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vbroadcastf32x2 {{.*#+}} zmm0 {%k1} = xmm1[0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1] +; CHECK-NEXT: retq + %vec = load <2 x float>, <2 x float>* %vp + %shuf = shufflevector <2 x float> %vec, <2 x float> undef, <16 x i32> + %res = select <16 x i1> , <16 x float> %shuf, <16 x float> %default + ret <16 x float> %res +} + +define <16 x float> @test_masked_z_2xfloat_to_16xfloat_mem_mask1(<2 x float>* %vp) { +; CHECK-LABEL: test_masked_z_2xfloat_to_16xfloat_mem_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero +; CHECK-NEXT: movw $29162, %ax # imm = 0x71EA +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vbroadcastf32x2 {{.*#+}} zmm0 {%k1} {z} = xmm0[0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1] +; CHECK-NEXT: retq + %vec = load <2 x float>, <2 x float>* %vp + %shuf = shufflevector <2 x float> %vec, <2 x float> undef, <16 x i32> + %res = select <16 x i1> , <16 x float> %shuf, <16 x float> zeroinitializer + ret <16 x float> %res +} +define <16 x float> @test_masked_2xfloat_to_16xfloat_mem_mask2(<2 x float>* %vp, <16 x float> %default) { +; CHECK-LABEL: test_masked_2xfloat_to_16xfloat_mem_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero +; CHECK-NEXT: movw $-26458, %ax # imm = 0x98A6 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vbroadcastf32x2 {{.*#+}} zmm0 {%k1} = xmm1[0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1] +; CHECK-NEXT: retq + %vec = load <2 x float>, <2 x float>* %vp + %shuf = shufflevector <2 x float> %vec, <2 x float> undef, <16 x i32> + %res = select <16 x i1> , <16 x float> %shuf, <16 x float> %default + ret <16 x float> %res +} + +define <16 x float> @test_masked_z_2xfloat_to_16xfloat_mem_mask2(<2 x float>* %vp) { +; CHECK-LABEL: test_masked_z_2xfloat_to_16xfloat_mem_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero +; CHECK-NEXT: movw $-26458, %ax # imm = 0x98A6 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vbroadcastf32x2 {{.*#+}} zmm0 {%k1} {z} = xmm0[0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1] +; CHECK-NEXT: retq + %vec = load <2 x float>, <2 x float>* %vp + %shuf = shufflevector <2 x float> %vec, <2 x float> undef, <16 x i32> + %res = select <16 x i1> , <16 x float> %shuf, <16 x float> zeroinitializer + ret <16 x float> %res +} +define <16 x float> @test_masked_2xfloat_to_16xfloat_mem_mask3(<2 x float>* %vp, <16 x float> %default) { +; CHECK-LABEL: test_masked_2xfloat_to_16xfloat_mem_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero +; CHECK-NEXT: movw $25225, %ax # imm = 0x6289 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vbroadcastf32x2 {{.*#+}} zmm0 {%k1} = xmm1[0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1] +; CHECK-NEXT: retq + %vec = load <2 x float>, <2 x float>* %vp + %shuf = shufflevector <2 x float> %vec, <2 x float> undef, <16 x i32> + %res = select <16 x i1> , <16 x float> %shuf, <16 x float> %default + ret <16 x float> %res +} + +define <16 x float> @test_masked_z_2xfloat_to_16xfloat_mem_mask3(<2 x float>* %vp) { +; CHECK-LABEL: test_masked_z_2xfloat_to_16xfloat_mem_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero +; CHECK-NEXT: movw $25225, %ax # imm = 0x6289 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vbroadcastf32x2 {{.*#+}} zmm0 {%k1} {z} = xmm0[0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1] +; CHECK-NEXT: retq + %vec = load <2 x float>, <2 x float>* %vp + %shuf = shufflevector <2 x float> %vec, <2 x float> undef, <16 x i32> + %res = select <16 x i1> , <16 x float> %shuf, <16 x float> zeroinitializer + ret <16 x float> %res +} +define <8 x float> @test_4xfloat_to_8xfloat_mem(<4 x float>* %vp) { +; CHECK-LABEL: test_4xfloat_to_8xfloat_mem: +; CHECK: # BB#0: +; CHECK-NEXT: vbroadcastf128 {{.*#+}} ymm0 = mem[0,1,0,1] +; CHECK-NEXT: retq + %vec = load <4 x float>, <4 x float>* %vp + %res = shufflevector <4 x float> %vec, <4 x float> undef, <8 x i32> + ret <8 x float> %res +} +define <8 x float> @test_masked_4xfloat_to_8xfloat_mem_mask0(<4 x float>* %vp, <8 x float> %default) { +; CHECK-LABEL: test_masked_4xfloat_to_8xfloat_mem_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: movb $-109, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vbroadcastf32x4 {{.*#+}} ymm0 {%k1} = mem[0,1,2,3,0,1,2,3] +; CHECK-NEXT: retq + %vec = load <4 x float>, <4 x float>* %vp + %shuf = shufflevector <4 x float> %vec, <4 x float> undef, <8 x i32> + %res = select <8 x i1> , <8 x float> %shuf, <8 x float> %default + ret <8 x float> %res +} + +define <8 x float> @test_masked_z_4xfloat_to_8xfloat_mem_mask0(<4 x float>* %vp) { +; CHECK-LABEL: test_masked_z_4xfloat_to_8xfloat_mem_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: movb $-109, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vbroadcastf32x4 {{.*#+}} ymm0 {%k1} {z} = mem[0,1,2,3,0,1,2,3] +; CHECK-NEXT: retq + %vec = load <4 x float>, <4 x float>* %vp + %shuf = shufflevector <4 x float> %vec, <4 x float> undef, <8 x i32> + %res = select <8 x i1> , <8 x float> %shuf, <8 x float> zeroinitializer + ret <8 x float> %res +} +define <8 x float> @test_masked_4xfloat_to_8xfloat_mem_mask1(<4 x float>* %vp, <8 x float> %default) { +; CHECK-LABEL: test_masked_4xfloat_to_8xfloat_mem_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: movb $74, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vbroadcastf32x4 {{.*#+}} ymm0 {%k1} = mem[0,1,2,3,0,1,2,3] +; CHECK-NEXT: retq + %vec = load <4 x float>, <4 x float>* %vp + %shuf = shufflevector <4 x float> %vec, <4 x float> undef, <8 x i32> + %res = select <8 x i1> , <8 x float> %shuf, <8 x float> %default + ret <8 x float> %res +} + +define <8 x float> @test_masked_z_4xfloat_to_8xfloat_mem_mask1(<4 x float>* %vp) { +; CHECK-LABEL: test_masked_z_4xfloat_to_8xfloat_mem_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: movb $74, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vbroadcastf32x4 {{.*#+}} ymm0 {%k1} {z} = mem[0,1,2,3,0,1,2,3] +; CHECK-NEXT: retq + %vec = load <4 x float>, <4 x float>* %vp + %shuf = shufflevector <4 x float> %vec, <4 x float> undef, <8 x i32> + %res = select <8 x i1> , <8 x float> %shuf, <8 x float> zeroinitializer + ret <8 x float> %res +} +define <8 x float> @test_masked_4xfloat_to_8xfloat_mem_mask2(<4 x float>* %vp, <8 x float> %default) { +; CHECK-LABEL: test_masked_4xfloat_to_8xfloat_mem_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: movb $49, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vbroadcastf32x4 {{.*#+}} ymm0 {%k1} = mem[0,1,2,3,0,1,2,3] +; CHECK-NEXT: retq + %vec = load <4 x float>, <4 x float>* %vp + %shuf = shufflevector <4 x float> %vec, <4 x float> undef, <8 x i32> + %res = select <8 x i1> , <8 x float> %shuf, <8 x float> %default + ret <8 x float> %res +} + +define <8 x float> @test_masked_z_4xfloat_to_8xfloat_mem_mask2(<4 x float>* %vp) { +; CHECK-LABEL: test_masked_z_4xfloat_to_8xfloat_mem_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: movb $49, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vbroadcastf32x4 {{.*#+}} ymm0 {%k1} {z} = mem[0,1,2,3,0,1,2,3] +; CHECK-NEXT: retq + %vec = load <4 x float>, <4 x float>* %vp + %shuf = shufflevector <4 x float> %vec, <4 x float> undef, <8 x i32> + %res = select <8 x i1> , <8 x float> %shuf, <8 x float> zeroinitializer + ret <8 x float> %res +} +define <8 x float> @test_masked_4xfloat_to_8xfloat_mem_mask3(<4 x float>* %vp, <8 x float> %default) { +; CHECK-LABEL: test_masked_4xfloat_to_8xfloat_mem_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: movb $48, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vbroadcastf32x4 {{.*#+}} ymm0 {%k1} = mem[0,1,2,3,0,1,2,3] +; CHECK-NEXT: retq + %vec = load <4 x float>, <4 x float>* %vp + %shuf = shufflevector <4 x float> %vec, <4 x float> undef, <8 x i32> + %res = select <8 x i1> , <8 x float> %shuf, <8 x float> %default + ret <8 x float> %res +} + +define <8 x float> @test_masked_z_4xfloat_to_8xfloat_mem_mask3(<4 x float>* %vp) { +; CHECK-LABEL: test_masked_z_4xfloat_to_8xfloat_mem_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: movb $48, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vbroadcastf32x4 {{.*#+}} ymm0 {%k1} {z} = mem[0,1,2,3,0,1,2,3] +; CHECK-NEXT: retq + %vec = load <4 x float>, <4 x float>* %vp + %shuf = shufflevector <4 x float> %vec, <4 x float> undef, <8 x i32> + %res = select <8 x i1> , <8 x float> %shuf, <8 x float> zeroinitializer + ret <8 x float> %res +} +define <16 x float> @test_4xfloat_to_16xfloat_mem(<4 x float>* %vp) { +; CHECK-LABEL: test_4xfloat_to_16xfloat_mem: +; CHECK: # BB#0: +; CHECK-NEXT: vbroadcastf32x4 {{.*#+}} zmm0 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3] +; CHECK-NEXT: retq + %vec = load <4 x float>, <4 x float>* %vp + %res = shufflevector <4 x float> %vec, <4 x float> undef, <16 x i32> + ret <16 x float> %res +} +define <16 x float> @test_masked_4xfloat_to_16xfloat_mem_mask0(<4 x float>* %vp, <16 x float> %default) { +; CHECK-LABEL: test_masked_4xfloat_to_16xfloat_mem_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: movw $-25378, %ax # imm = 0x9CDE +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vbroadcastf32x4 {{.*#+}} zmm0 {%k1} = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3] +; CHECK-NEXT: retq + %vec = load <4 x float>, <4 x float>* %vp + %shuf = shufflevector <4 x float> %vec, <4 x float> undef, <16 x i32> + %res = select <16 x i1> , <16 x float> %shuf, <16 x float> %default + ret <16 x float> %res +} + +define <16 x float> @test_masked_z_4xfloat_to_16xfloat_mem_mask0(<4 x float>* %vp) { +; CHECK-LABEL: test_masked_z_4xfloat_to_16xfloat_mem_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: movw $-25378, %ax # imm = 0x9CDE +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vbroadcastf32x4 {{.*#+}} zmm0 {%k1} {z} = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3] +; CHECK-NEXT: retq + %vec = load <4 x float>, <4 x float>* %vp + %shuf = shufflevector <4 x float> %vec, <4 x float> undef, <16 x i32> + %res = select <16 x i1> , <16 x float> %shuf, <16 x float> zeroinitializer + ret <16 x float> %res +} +define <16 x float> @test_masked_4xfloat_to_16xfloat_mem_mask1(<4 x float>* %vp, <16 x float> %default) { +; CHECK-LABEL: test_masked_4xfloat_to_16xfloat_mem_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: movw $-22502, %ax # imm = 0xA81A +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vbroadcastf32x4 {{.*#+}} zmm0 {%k1} = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3] +; CHECK-NEXT: retq + %vec = load <4 x float>, <4 x float>* %vp + %shuf = shufflevector <4 x float> %vec, <4 x float> undef, <16 x i32> + %res = select <16 x i1> , <16 x float> %shuf, <16 x float> %default + ret <16 x float> %res +} + +define <16 x float> @test_masked_z_4xfloat_to_16xfloat_mem_mask1(<4 x float>* %vp) { +; CHECK-LABEL: test_masked_z_4xfloat_to_16xfloat_mem_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: movw $-22502, %ax # imm = 0xA81A +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vbroadcastf32x4 {{.*#+}} zmm0 {%k1} {z} = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3] +; CHECK-NEXT: retq + %vec = load <4 x float>, <4 x float>* %vp + %shuf = shufflevector <4 x float> %vec, <4 x float> undef, <16 x i32> + %res = select <16 x i1> , <16 x float> %shuf, <16 x float> zeroinitializer + ret <16 x float> %res +} +define <16 x float> @test_masked_4xfloat_to_16xfloat_mem_mask2(<4 x float>* %vp, <16 x float> %default) { +; CHECK-LABEL: test_masked_4xfloat_to_16xfloat_mem_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: movw $31229, %ax # imm = 0x79FD +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vbroadcastf32x4 {{.*#+}} zmm0 {%k1} = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3] +; CHECK-NEXT: retq + %vec = load <4 x float>, <4 x float>* %vp + %shuf = shufflevector <4 x float> %vec, <4 x float> undef, <16 x i32> + %res = select <16 x i1> , <16 x float> %shuf, <16 x float> %default + ret <16 x float> %res +} + +define <16 x float> @test_masked_z_4xfloat_to_16xfloat_mem_mask2(<4 x float>* %vp) { +; CHECK-LABEL: test_masked_z_4xfloat_to_16xfloat_mem_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: movw $31229, %ax # imm = 0x79FD +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vbroadcastf32x4 {{.*#+}} zmm0 {%k1} {z} = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3] +; CHECK-NEXT: retq + %vec = load <4 x float>, <4 x float>* %vp + %shuf = shufflevector <4 x float> %vec, <4 x float> undef, <16 x i32> + %res = select <16 x i1> , <16 x float> %shuf, <16 x float> zeroinitializer + ret <16 x float> %res +} +define <16 x float> @test_masked_4xfloat_to_16xfloat_mem_mask3(<4 x float>* %vp, <16 x float> %default) { +; CHECK-LABEL: test_masked_4xfloat_to_16xfloat_mem_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: movw $5887, %ax # imm = 0x16FF +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vbroadcastf32x4 {{.*#+}} zmm0 {%k1} = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3] +; CHECK-NEXT: retq + %vec = load <4 x float>, <4 x float>* %vp + %shuf = shufflevector <4 x float> %vec, <4 x float> undef, <16 x i32> + %res = select <16 x i1> , <16 x float> %shuf, <16 x float> %default + ret <16 x float> %res +} + +define <16 x float> @test_masked_z_4xfloat_to_16xfloat_mem_mask3(<4 x float>* %vp) { +; CHECK-LABEL: test_masked_z_4xfloat_to_16xfloat_mem_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: movw $5887, %ax # imm = 0x16FF +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vbroadcastf32x4 {{.*#+}} zmm0 {%k1} {z} = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3] +; CHECK-NEXT: retq + %vec = load <4 x float>, <4 x float>* %vp + %shuf = shufflevector <4 x float> %vec, <4 x float> undef, <16 x i32> + %res = select <16 x i1> , <16 x float> %shuf, <16 x float> zeroinitializer + ret <16 x float> %res +} +define <16 x float> @test_8xfloat_to_16xfloat_mem(<8 x float>* %vp) { +; CHECK-LABEL: test_8xfloat_to_16xfloat_mem: +; CHECK: # BB#0: +; CHECK-NEXT: vbroadcastf64x4 {{.*#+}} zmm0 = mem[0,1,2,3,0,1,2,3] +; CHECK-NEXT: retq + %vec = load <8 x float>, <8 x float>* %vp + %res = shufflevector <8 x float> %vec, <8 x float> undef, <16 x i32> + ret <16 x float> %res +} +define <16 x float> @test_masked_8xfloat_to_16xfloat_mem_mask0(<8 x float>* %vp, <16 x float> %default) { +; CHECK-LABEL: test_masked_8xfloat_to_16xfloat_mem_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: movw $-15887, %ax # imm = 0xC1F1 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vbroadcastf32x8 {{.*#+}} zmm0 {%k1} = mem[0,1,2,3,4,5,6,7,0,1,2,3,4,5,6,7] +; CHECK-NEXT: retq + %vec = load <8 x float>, <8 x float>* %vp + %shuf = shufflevector <8 x float> %vec, <8 x float> undef, <16 x i32> + %res = select <16 x i1> , <16 x float> %shuf, <16 x float> %default + ret <16 x float> %res +} + +define <16 x float> @test_masked_z_8xfloat_to_16xfloat_mem_mask0(<8 x float>* %vp) { +; CHECK-LABEL: test_masked_z_8xfloat_to_16xfloat_mem_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: movw $-15887, %ax # imm = 0xC1F1 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vbroadcastf32x8 {{.*#+}} zmm0 {%k1} {z} = mem[0,1,2,3,4,5,6,7,0,1,2,3,4,5,6,7] +; CHECK-NEXT: retq + %vec = load <8 x float>, <8 x float>* %vp + %shuf = shufflevector <8 x float> %vec, <8 x float> undef, <16 x i32> + %res = select <16 x i1> , <16 x float> %shuf, <16 x float> zeroinitializer + ret <16 x float> %res +} +define <16 x float> @test_masked_8xfloat_to_16xfloat_mem_mask1(<8 x float>* %vp, <16 x float> %default) { +; CHECK-LABEL: test_masked_8xfloat_to_16xfloat_mem_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: movw $-8077, %ax # imm = 0xE073 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vbroadcastf32x8 {{.*#+}} zmm0 {%k1} = mem[0,1,2,3,4,5,6,7,0,1,2,3,4,5,6,7] +; CHECK-NEXT: retq + %vec = load <8 x float>, <8 x float>* %vp + %shuf = shufflevector <8 x float> %vec, <8 x float> undef, <16 x i32> + %res = select <16 x i1> , <16 x float> %shuf, <16 x float> %default + ret <16 x float> %res +} + +define <16 x float> @test_masked_z_8xfloat_to_16xfloat_mem_mask1(<8 x float>* %vp) { +; CHECK-LABEL: test_masked_z_8xfloat_to_16xfloat_mem_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: movw $-8077, %ax # imm = 0xE073 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vbroadcastf32x8 {{.*#+}} zmm0 {%k1} {z} = mem[0,1,2,3,4,5,6,7,0,1,2,3,4,5,6,7] +; CHECK-NEXT: retq + %vec = load <8 x float>, <8 x float>* %vp + %shuf = shufflevector <8 x float> %vec, <8 x float> undef, <16 x i32> + %res = select <16 x i1> , <16 x float> %shuf, <16 x float> zeroinitializer + ret <16 x float> %res +} +define <16 x float> @test_masked_8xfloat_to_16xfloat_mem_mask2(<8 x float>* %vp, <16 x float> %default) { +; CHECK-LABEL: test_masked_8xfloat_to_16xfloat_mem_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: movw $-5023, %ax # imm = 0xEC61 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vbroadcastf32x8 {{.*#+}} zmm0 {%k1} = mem[0,1,2,3,4,5,6,7,0,1,2,3,4,5,6,7] +; CHECK-NEXT: retq + %vec = load <8 x float>, <8 x float>* %vp + %shuf = shufflevector <8 x float> %vec, <8 x float> undef, <16 x i32> + %res = select <16 x i1> , <16 x float> %shuf, <16 x float> %default + ret <16 x float> %res +} + +define <16 x float> @test_masked_z_8xfloat_to_16xfloat_mem_mask2(<8 x float>* %vp) { +; CHECK-LABEL: test_masked_z_8xfloat_to_16xfloat_mem_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: movw $-5023, %ax # imm = 0xEC61 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vbroadcastf32x8 {{.*#+}} zmm0 {%k1} {z} = mem[0,1,2,3,4,5,6,7,0,1,2,3,4,5,6,7] +; CHECK-NEXT: retq + %vec = load <8 x float>, <8 x float>* %vp + %shuf = shufflevector <8 x float> %vec, <8 x float> undef, <16 x i32> + %res = select <16 x i1> , <16 x float> %shuf, <16 x float> zeroinitializer + ret <16 x float> %res +} +define <16 x float> @test_masked_8xfloat_to_16xfloat_mem_mask3(<8 x float>* %vp, <16 x float> %default) { +; CHECK-LABEL: test_masked_8xfloat_to_16xfloat_mem_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: movw $-10326, %ax # imm = 0xD7AA +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vbroadcastf32x8 {{.*#+}} zmm0 {%k1} = mem[0,1,2,3,4,5,6,7,0,1,2,3,4,5,6,7] +; CHECK-NEXT: retq + %vec = load <8 x float>, <8 x float>* %vp + %shuf = shufflevector <8 x float> %vec, <8 x float> undef, <16 x i32> + %res = select <16 x i1> , <16 x float> %shuf, <16 x float> %default + ret <16 x float> %res +} + +define <16 x float> @test_masked_z_8xfloat_to_16xfloat_mem_mask3(<8 x float>* %vp) { +; CHECK-LABEL: test_masked_z_8xfloat_to_16xfloat_mem_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: movw $-10326, %ax # imm = 0xD7AA +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vbroadcastf32x8 {{.*#+}} zmm0 {%k1} {z} = mem[0,1,2,3,4,5,6,7,0,1,2,3,4,5,6,7] +; CHECK-NEXT: retq + %vec = load <8 x float>, <8 x float>* %vp + %shuf = shufflevector <8 x float> %vec, <8 x float> undef, <16 x i32> + %res = select <16 x i1> , <16 x float> %shuf, <16 x float> zeroinitializer + ret <16 x float> %res +} diff --git a/llvm/test/CodeGen/X86/avx512-shuffles/broadcast-vector-int.ll b/llvm/test/CodeGen/X86/avx512-shuffles/broadcast-vector-int.ll new file mode 100644 index 000000000000..0b0dfa878fcd --- /dev/null +++ b/llvm/test/CodeGen/X86/avx512-shuffles/broadcast-vector-int.ll @@ -0,0 +1,1343 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mcpu=skx %s -o - | FileCheck %s + +; FIXME: fixing PR34394 should fix the i32x2 memory cases resulting in a simple vbroadcasti32x2 instruction. + +define <4 x i32> @test_2xi32_to_4xi32(<4 x i32> %vec) { +; CHECK-LABEL: test_2xi32_to_4xi32: +; CHECK: # BB#0: +; CHECK-NEXT: vpbroadcastq %xmm0, %xmm0 +; CHECK-NEXT: retq + %res = shufflevector <4 x i32> %vec, <4 x i32> undef, <4 x i32> + ret <4 x i32> %res +} +define <4 x i32> @test_masked_2xi32_to_4xi32_mask0(<4 x i32> %vec, <4 x i32> %default) { +; CHECK-LABEL: test_masked_2xi32_to_4xi32_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: movb $4, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vbroadcasti32x2 %xmm0, %xmm1 {%k1} +; CHECK-NEXT: vmovdqa %xmm1, %xmm0 +; CHECK-NEXT: retq + %shuf = shufflevector <4 x i32> %vec, <4 x i32> undef, <4 x i32> + %res = select <4 x i1> , <4 x i32> %shuf, <4 x i32> %default + ret <4 x i32> %res +} + +define <4 x i32> @test_masked_z_2xi32_to_4xi32_mask0(<4 x i32> %vec) { +; CHECK-LABEL: test_masked_z_2xi32_to_4xi32_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: movb $4, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vbroadcasti32x2 %xmm0, %xmm0 {%k1} {z} +; CHECK-NEXT: retq + %shuf = shufflevector <4 x i32> %vec, <4 x i32> undef, <4 x i32> + %res = select <4 x i1> , <4 x i32> %shuf, <4 x i32> zeroinitializer + ret <4 x i32> %res +} +define <4 x i32> @test_masked_2xi32_to_4xi32_mask1(<4 x i32> %vec, <4 x i32> %default) { +; CHECK-LABEL: test_masked_2xi32_to_4xi32_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: movb $13, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vbroadcasti32x2 %xmm0, %xmm1 {%k1} +; CHECK-NEXT: vmovdqa %xmm1, %xmm0 +; CHECK-NEXT: retq + %shuf = shufflevector <4 x i32> %vec, <4 x i32> undef, <4 x i32> + %res = select <4 x i1> , <4 x i32> %shuf, <4 x i32> %default + ret <4 x i32> %res +} + +define <4 x i32> @test_masked_z_2xi32_to_4xi32_mask1(<4 x i32> %vec) { +; CHECK-LABEL: test_masked_z_2xi32_to_4xi32_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: movb $13, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vbroadcasti32x2 %xmm0, %xmm0 {%k1} {z} +; CHECK-NEXT: retq + %shuf = shufflevector <4 x i32> %vec, <4 x i32> undef, <4 x i32> + %res = select <4 x i1> , <4 x i32> %shuf, <4 x i32> zeroinitializer + ret <4 x i32> %res +} +define <4 x i32> @test_masked_2xi32_to_4xi32_mask2(<4 x i32> %vec, <4 x i32> %default) { +; CHECK-LABEL: test_masked_2xi32_to_4xi32_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: movb $5, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vbroadcasti32x2 %xmm0, %xmm1 {%k1} +; CHECK-NEXT: vmovdqa %xmm1, %xmm0 +; CHECK-NEXT: retq + %shuf = shufflevector <4 x i32> %vec, <4 x i32> undef, <4 x i32> + %res = select <4 x i1> , <4 x i32> %shuf, <4 x i32> %default + ret <4 x i32> %res +} + +define <4 x i32> @test_masked_z_2xi32_to_4xi32_mask2(<4 x i32> %vec) { +; CHECK-LABEL: test_masked_z_2xi32_to_4xi32_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: movb $5, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vbroadcasti32x2 %xmm0, %xmm0 {%k1} {z} +; CHECK-NEXT: retq + %shuf = shufflevector <4 x i32> %vec, <4 x i32> undef, <4 x i32> + %res = select <4 x i1> , <4 x i32> %shuf, <4 x i32> zeroinitializer + ret <4 x i32> %res +} +define <4 x i32> @test_masked_2xi32_to_4xi32_mask3(<4 x i32> %vec, <4 x i32> %default) { +; CHECK-LABEL: test_masked_2xi32_to_4xi32_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: movb $14, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vbroadcasti32x2 %xmm0, %xmm1 {%k1} +; CHECK-NEXT: vmovdqa %xmm1, %xmm0 +; CHECK-NEXT: retq + %shuf = shufflevector <4 x i32> %vec, <4 x i32> undef, <4 x i32> + %res = select <4 x i1> , <4 x i32> %shuf, <4 x i32> %default + ret <4 x i32> %res +} + +define <4 x i32> @test_masked_z_2xi32_to_4xi32_mask3(<4 x i32> %vec) { +; CHECK-LABEL: test_masked_z_2xi32_to_4xi32_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: movb $14, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vbroadcasti32x2 %xmm0, %xmm0 {%k1} {z} +; CHECK-NEXT: retq + %shuf = shufflevector <4 x i32> %vec, <4 x i32> undef, <4 x i32> + %res = select <4 x i1> , <4 x i32> %shuf, <4 x i32> zeroinitializer + ret <4 x i32> %res +} +define <8 x i32> @test_2xi32_to_8xi32(<8 x i32> %vec) { +; CHECK-LABEL: test_2xi32_to_8xi32: +; CHECK: # BB#0: +; CHECK-NEXT: vpbroadcastq %xmm0, %ymm0 +; CHECK-NEXT: retq + %res = shufflevector <8 x i32> %vec, <8 x i32> undef, <8 x i32> + ret <8 x i32> %res +} +define <8 x i32> @test_masked_2xi32_to_8xi32_mask0(<8 x i32> %vec, <8 x i32> %default) { +; CHECK-LABEL: test_masked_2xi32_to_8xi32_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: movb $92, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vbroadcasti32x2 {{.*#+}} ymm1 {%k1} = xmm0[0,1,0,1,0,1,0,1] +; CHECK-NEXT: vmovdqa %ymm1, %ymm0 +; CHECK-NEXT: retq + %shuf = shufflevector <8 x i32> %vec, <8 x i32> undef, <8 x i32> + %res = select <8 x i1> , <8 x i32> %shuf, <8 x i32> %default + ret <8 x i32> %res +} + +define <8 x i32> @test_masked_z_2xi32_to_8xi32_mask0(<8 x i32> %vec) { +; CHECK-LABEL: test_masked_z_2xi32_to_8xi32_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: movb $92, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vbroadcasti32x2 {{.*#+}} ymm0 {%k1} {z} = xmm0[0,1,0,1,0,1,0,1] +; CHECK-NEXT: retq + %shuf = shufflevector <8 x i32> %vec, <8 x i32> undef, <8 x i32> + %res = select <8 x i1> , <8 x i32> %shuf, <8 x i32> zeroinitializer + ret <8 x i32> %res +} +define <8 x i32> @test_masked_2xi32_to_8xi32_mask1(<8 x i32> %vec, <8 x i32> %default) { +; CHECK-LABEL: test_masked_2xi32_to_8xi32_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: movb $-15, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vbroadcasti32x2 {{.*#+}} ymm1 {%k1} = xmm0[0,1,0,1,0,1,0,1] +; CHECK-NEXT: vmovdqa %ymm1, %ymm0 +; CHECK-NEXT: retq + %shuf = shufflevector <8 x i32> %vec, <8 x i32> undef, <8 x i32> + %res = select <8 x i1> , <8 x i32> %shuf, <8 x i32> %default + ret <8 x i32> %res +} + +define <8 x i32> @test_masked_z_2xi32_to_8xi32_mask1(<8 x i32> %vec) { +; CHECK-LABEL: test_masked_z_2xi32_to_8xi32_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: movb $-15, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vbroadcasti32x2 {{.*#+}} ymm0 {%k1} {z} = xmm0[0,1,0,1,0,1,0,1] +; CHECK-NEXT: retq + %shuf = shufflevector <8 x i32> %vec, <8 x i32> undef, <8 x i32> + %res = select <8 x i1> , <8 x i32> %shuf, <8 x i32> zeroinitializer + ret <8 x i32> %res +} +define <8 x i32> @test_masked_2xi32_to_8xi32_mask2(<8 x i32> %vec, <8 x i32> %default) { +; CHECK-LABEL: test_masked_2xi32_to_8xi32_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: movb $-95, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vbroadcasti32x2 {{.*#+}} ymm1 {%k1} = xmm0[0,1,0,1,0,1,0,1] +; CHECK-NEXT: vmovdqa %ymm1, %ymm0 +; CHECK-NEXT: retq + %shuf = shufflevector <8 x i32> %vec, <8 x i32> undef, <8 x i32> + %res = select <8 x i1> , <8 x i32> %shuf, <8 x i32> %default + ret <8 x i32> %res +} + +define <8 x i32> @test_masked_z_2xi32_to_8xi32_mask2(<8 x i32> %vec) { +; CHECK-LABEL: test_masked_z_2xi32_to_8xi32_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: movb $-95, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vbroadcasti32x2 {{.*#+}} ymm0 {%k1} {z} = xmm0[0,1,0,1,0,1,0,1] +; CHECK-NEXT: retq + %shuf = shufflevector <8 x i32> %vec, <8 x i32> undef, <8 x i32> + %res = select <8 x i1> , <8 x i32> %shuf, <8 x i32> zeroinitializer + ret <8 x i32> %res +} +define <8 x i32> @test_masked_2xi32_to_8xi32_mask3(<8 x i32> %vec, <8 x i32> %default) { +; CHECK-LABEL: test_masked_2xi32_to_8xi32_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: movb $-98, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vbroadcasti32x2 {{.*#+}} ymm1 {%k1} = xmm0[0,1,0,1,0,1,0,1] +; CHECK-NEXT: vmovdqa %ymm1, %ymm0 +; CHECK-NEXT: retq + %shuf = shufflevector <8 x i32> %vec, <8 x i32> undef, <8 x i32> + %res = select <8 x i1> , <8 x i32> %shuf, <8 x i32> %default + ret <8 x i32> %res +} + +define <8 x i32> @test_masked_z_2xi32_to_8xi32_mask3(<8 x i32> %vec) { +; CHECK-LABEL: test_masked_z_2xi32_to_8xi32_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: movb $-98, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vbroadcasti32x2 {{.*#+}} ymm0 {%k1} {z} = xmm0[0,1,0,1,0,1,0,1] +; CHECK-NEXT: retq + %shuf = shufflevector <8 x i32> %vec, <8 x i32> undef, <8 x i32> + %res = select <8 x i1> , <8 x i32> %shuf, <8 x i32> zeroinitializer + ret <8 x i32> %res +} +define <16 x i32> @test_2xi32_to_16xi32(<16 x i32> %vec) { +; CHECK-LABEL: test_2xi32_to_16xi32: +; CHECK: # BB#0: +; CHECK-NEXT: vbroadcasti32x2 {{.*#+}} zmm0 = xmm0[0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1] +; CHECK-NEXT: retq + %res = shufflevector <16 x i32> %vec, <16 x i32> undef, <16 x i32> + ret <16 x i32> %res +} +define <16 x i32> @test_masked_2xi32_to_16xi32_mask0(<16 x i32> %vec, <16 x i32> %default) { +; CHECK-LABEL: test_masked_2xi32_to_16xi32_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: movw $-18638, %ax # imm = 0xB732 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vbroadcasti32x2 {{.*#+}} zmm1 {%k1} = xmm0[0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1] +; CHECK-NEXT: vmovdqa64 %zmm1, %zmm0 +; CHECK-NEXT: retq + %shuf = shufflevector <16 x i32> %vec, <16 x i32> undef, <16 x i32> + %res = select <16 x i1> , <16 x i32> %shuf, <16 x i32> %default + ret <16 x i32> %res +} + +define <16 x i32> @test_masked_z_2xi32_to_16xi32_mask0(<16 x i32> %vec) { +; CHECK-LABEL: test_masked_z_2xi32_to_16xi32_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: movw $-18638, %ax # imm = 0xB732 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vbroadcasti32x2 {{.*#+}} zmm0 {%k1} {z} = xmm0[0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1] +; CHECK-NEXT: retq + %shuf = shufflevector <16 x i32> %vec, <16 x i32> undef, <16 x i32> + %res = select <16 x i1> , <16 x i32> %shuf, <16 x i32> zeroinitializer + ret <16 x i32> %res +} +define <16 x i32> @test_masked_2xi32_to_16xi32_mask1(<16 x i32> %vec, <16 x i32> %default) { +; CHECK-LABEL: test_masked_2xi32_to_16xi32_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: movw $25429, %ax # imm = 0x6355 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vbroadcasti32x2 {{.*#+}} zmm1 {%k1} = xmm0[0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1] +; CHECK-NEXT: vmovdqa64 %zmm1, %zmm0 +; CHECK-NEXT: retq + %shuf = shufflevector <16 x i32> %vec, <16 x i32> undef, <16 x i32> + %res = select <16 x i1> , <16 x i32> %shuf, <16 x i32> %default + ret <16 x i32> %res +} + +define <16 x i32> @test_masked_z_2xi32_to_16xi32_mask1(<16 x i32> %vec) { +; CHECK-LABEL: test_masked_z_2xi32_to_16xi32_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: movw $25429, %ax # imm = 0x6355 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vbroadcasti32x2 {{.*#+}} zmm0 {%k1} {z} = xmm0[0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1] +; CHECK-NEXT: retq + %shuf = shufflevector <16 x i32> %vec, <16 x i32> undef, <16 x i32> + %res = select <16 x i1> , <16 x i32> %shuf, <16 x i32> zeroinitializer + ret <16 x i32> %res +} +define <16 x i32> @test_masked_2xi32_to_16xi32_mask2(<16 x i32> %vec, <16 x i32> %default) { +; CHECK-LABEL: test_masked_2xi32_to_16xi32_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: movw $27159, %ax # imm = 0x6A17 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vbroadcasti32x2 {{.*#+}} zmm1 {%k1} = xmm0[0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1] +; CHECK-NEXT: vmovdqa64 %zmm1, %zmm0 +; CHECK-NEXT: retq + %shuf = shufflevector <16 x i32> %vec, <16 x i32> undef, <16 x i32> + %res = select <16 x i1> , <16 x i32> %shuf, <16 x i32> %default + ret <16 x i32> %res +} + +define <16 x i32> @test_masked_z_2xi32_to_16xi32_mask2(<16 x i32> %vec) { +; CHECK-LABEL: test_masked_z_2xi32_to_16xi32_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: movw $27159, %ax # imm = 0x6A17 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vbroadcasti32x2 {{.*#+}} zmm0 {%k1} {z} = xmm0[0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1] +; CHECK-NEXT: retq + %shuf = shufflevector <16 x i32> %vec, <16 x i32> undef, <16 x i32> + %res = select <16 x i1> , <16 x i32> %shuf, <16 x i32> zeroinitializer + ret <16 x i32> %res +} +define <16 x i32> @test_masked_2xi32_to_16xi32_mask3(<16 x i32> %vec, <16 x i32> %default) { +; CHECK-LABEL: test_masked_2xi32_to_16xi32_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: movw $-22884, %ax # imm = 0xA69C +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vbroadcasti32x2 {{.*#+}} zmm1 {%k1} = xmm0[0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1] +; CHECK-NEXT: vmovdqa64 %zmm1, %zmm0 +; CHECK-NEXT: retq + %shuf = shufflevector <16 x i32> %vec, <16 x i32> undef, <16 x i32> + %res = select <16 x i1> , <16 x i32> %shuf, <16 x i32> %default + ret <16 x i32> %res +} + +define <16 x i32> @test_masked_z_2xi32_to_16xi32_mask3(<16 x i32> %vec) { +; CHECK-LABEL: test_masked_z_2xi32_to_16xi32_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: movw $-22884, %ax # imm = 0xA69C +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vbroadcasti32x2 {{.*#+}} zmm0 {%k1} {z} = xmm0[0,1,0,1,0,1,0,1,0,1,0,1,0,1,0,1] +; CHECK-NEXT: retq + %shuf = shufflevector <16 x i32> %vec, <16 x i32> undef, <16 x i32> + %res = select <16 x i1> , <16 x i32> %shuf, <16 x i32> zeroinitializer + ret <16 x i32> %res +} +define <4 x i32> @test_2xi32_to_4xi32_mem(<2 x i32>* %vp) { +; CHECK-LABEL: test_2xi32_to_4xi32_mem: +; CHECK: # BB#0: +; CHECK-NEXT: vpmovzxdq {{.*#+}} xmm0 = mem[0],zero,mem[1],zero +; CHECK-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,0,2] +; CHECK-NEXT: retq + %vec = load <2 x i32>, <2 x i32>* %vp + %res = shufflevector <2 x i32> %vec, <2 x i32> undef, <4 x i32> + ret <4 x i32> %res +} +define <4 x i32> @test_masked_2xi32_to_4xi32_mem_mask0(<2 x i32>* %vp, <4 x i32> %default) { +; CHECK-LABEL: test_masked_2xi32_to_4xi32_mem_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: vpmovzxdq {{.*#+}} xmm1 = mem[0],zero,mem[1],zero +; CHECK-NEXT: movb $1, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpshufd {{.*#+}} xmm0 {%k1} = xmm1[0,2,0,2] +; CHECK-NEXT: retq + %vec = load <2 x i32>, <2 x i32>* %vp + %shuf = shufflevector <2 x i32> %vec, <2 x i32> undef, <4 x i32> + %res = select <4 x i1> , <4 x i32> %shuf, <4 x i32> %default + ret <4 x i32> %res +} + +define <4 x i32> @test_masked_z_2xi32_to_4xi32_mem_mask0(<2 x i32>* %vp) { +; CHECK-LABEL: test_masked_z_2xi32_to_4xi32_mem_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: vpmovzxdq {{.*#+}} xmm0 = mem[0],zero,mem[1],zero +; CHECK-NEXT: movb $1, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpshufd {{.*#+}} xmm0 {%k1} {z} = xmm0[0,2,0,2] +; CHECK-NEXT: retq + %vec = load <2 x i32>, <2 x i32>* %vp + %shuf = shufflevector <2 x i32> %vec, <2 x i32> undef, <4 x i32> + %res = select <4 x i1> , <4 x i32> %shuf, <4 x i32> zeroinitializer + ret <4 x i32> %res +} +define <4 x i32> @test_masked_2xi32_to_4xi32_mem_mask1(<2 x i32>* %vp, <4 x i32> %default) { +; CHECK-LABEL: test_masked_2xi32_to_4xi32_mem_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: vpmovzxdq {{.*#+}} xmm1 = mem[0],zero,mem[1],zero +; CHECK-NEXT: movb $3, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpshufd {{.*#+}} xmm0 {%k1} = xmm1[0,2,0,2] +; CHECK-NEXT: retq + %vec = load <2 x i32>, <2 x i32>* %vp + %shuf = shufflevector <2 x i32> %vec, <2 x i32> undef, <4 x i32> + %res = select <4 x i1> , <4 x i32> %shuf, <4 x i32> %default + ret <4 x i32> %res +} + +define <4 x i32> @test_masked_z_2xi32_to_4xi32_mem_mask1(<2 x i32>* %vp) { +; CHECK-LABEL: test_masked_z_2xi32_to_4xi32_mem_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: vpmovzxdq {{.*#+}} xmm0 = mem[0],zero,mem[1],zero +; CHECK-NEXT: movb $3, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpshufd {{.*#+}} xmm0 {%k1} {z} = xmm0[0,2,0,2] +; CHECK-NEXT: retq + %vec = load <2 x i32>, <2 x i32>* %vp + %shuf = shufflevector <2 x i32> %vec, <2 x i32> undef, <4 x i32> + %res = select <4 x i1> , <4 x i32> %shuf, <4 x i32> zeroinitializer + ret <4 x i32> %res +} +define <4 x i32> @test_masked_2xi32_to_4xi32_mem_mask2(<2 x i32>* %vp, <4 x i32> %default) { +; CHECK-LABEL: test_masked_2xi32_to_4xi32_mem_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: vpmovzxdq {{.*#+}} xmm1 = mem[0],zero,mem[1],zero +; CHECK-NEXT: movb $5, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpshufd {{.*#+}} xmm0 {%k1} = xmm1[0,2,0,2] +; CHECK-NEXT: retq + %vec = load <2 x i32>, <2 x i32>* %vp + %shuf = shufflevector <2 x i32> %vec, <2 x i32> undef, <4 x i32> + %res = select <4 x i1> , <4 x i32> %shuf, <4 x i32> %default + ret <4 x i32> %res +} + +define <4 x i32> @test_masked_z_2xi32_to_4xi32_mem_mask2(<2 x i32>* %vp) { +; CHECK-LABEL: test_masked_z_2xi32_to_4xi32_mem_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: vpmovzxdq {{.*#+}} xmm0 = mem[0],zero,mem[1],zero +; CHECK-NEXT: movb $5, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpshufd {{.*#+}} xmm0 {%k1} {z} = xmm0[0,2,0,2] +; CHECK-NEXT: retq + %vec = load <2 x i32>, <2 x i32>* %vp + %shuf = shufflevector <2 x i32> %vec, <2 x i32> undef, <4 x i32> + %res = select <4 x i1> , <4 x i32> %shuf, <4 x i32> zeroinitializer + ret <4 x i32> %res +} +define <4 x i32> @test_masked_2xi32_to_4xi32_mem_mask3(<2 x i32>* %vp, <4 x i32> %default) { +; CHECK-LABEL: test_masked_2xi32_to_4xi32_mem_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: vpmovzxdq {{.*#+}} xmm1 = mem[0],zero,mem[1],zero +; CHECK-NEXT: movb $13, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpshufd {{.*#+}} xmm0 {%k1} = xmm1[0,2,0,2] +; CHECK-NEXT: retq + %vec = load <2 x i32>, <2 x i32>* %vp + %shuf = shufflevector <2 x i32> %vec, <2 x i32> undef, <4 x i32> + %res = select <4 x i1> , <4 x i32> %shuf, <4 x i32> %default + ret <4 x i32> %res +} + +define <4 x i32> @test_masked_z_2xi32_to_4xi32_mem_mask3(<2 x i32>* %vp) { +; CHECK-LABEL: test_masked_z_2xi32_to_4xi32_mem_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: vpmovzxdq {{.*#+}} xmm0 = mem[0],zero,mem[1],zero +; CHECK-NEXT: movb $13, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpshufd {{.*#+}} xmm0 {%k1} {z} = xmm0[0,2,0,2] +; CHECK-NEXT: retq + %vec = load <2 x i32>, <2 x i32>* %vp + %shuf = shufflevector <2 x i32> %vec, <2 x i32> undef, <4 x i32> + %res = select <4 x i1> , <4 x i32> %shuf, <4 x i32> zeroinitializer + ret <4 x i32> %res +} +define <8 x i32> @test_2xi32_to_8xi32_mem(<2 x i32>* %vp) { +; CHECK-LABEL: test_2xi32_to_8xi32_mem: +; CHECK: # BB#0: +; CHECK-NEXT: vpmovzxdq {{.*#+}} xmm0 = mem[0],zero,mem[1],zero +; CHECK-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] +; CHECK-NEXT: vpbroadcastq %xmm0, %ymm0 +; CHECK-NEXT: retq + %vec = load <2 x i32>, <2 x i32>* %vp + %res = shufflevector <2 x i32> %vec, <2 x i32> undef, <8 x i32> + ret <8 x i32> %res +} +define <8 x i32> @test_masked_2xi32_to_8xi32_mem_mask0(<2 x i32>* %vp, <8 x i32> %default) { +; CHECK-LABEL: test_masked_2xi32_to_8xi32_mem_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: vpmovzxdq {{.*#+}} xmm1 = mem[0],zero,mem[1],zero +; CHECK-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,2,2,3] +; CHECK-NEXT: movb $-94, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vbroadcasti32x2 {{.*#+}} ymm0 {%k1} = xmm1[0,1,0,1,0,1,0,1] +; CHECK-NEXT: retq + %vec = load <2 x i32>, <2 x i32>* %vp + %shuf = shufflevector <2 x i32> %vec, <2 x i32> undef, <8 x i32> + %res = select <8 x i1> , <8 x i32> %shuf, <8 x i32> %default + ret <8 x i32> %res +} + +define <8 x i32> @test_masked_z_2xi32_to_8xi32_mem_mask0(<2 x i32>* %vp) { +; CHECK-LABEL: test_masked_z_2xi32_to_8xi32_mem_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: vpmovzxdq {{.*#+}} xmm0 = mem[0],zero,mem[1],zero +; CHECK-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] +; CHECK-NEXT: movb $-94, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vbroadcasti32x2 {{.*#+}} ymm0 {%k1} {z} = xmm0[0,1,0,1,0,1,0,1] +; CHECK-NEXT: retq + %vec = load <2 x i32>, <2 x i32>* %vp + %shuf = shufflevector <2 x i32> %vec, <2 x i32> undef, <8 x i32> + %res = select <8 x i1> , <8 x i32> %shuf, <8 x i32> zeroinitializer + ret <8 x i32> %res +} +define <8 x i32> @test_masked_2xi32_to_8xi32_mem_mask1(<2 x i32>* %vp, <8 x i32> %default) { +; CHECK-LABEL: test_masked_2xi32_to_8xi32_mem_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: vpmovzxdq {{.*#+}} xmm1 = mem[0],zero,mem[1],zero +; CHECK-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,2,2,3] +; CHECK-NEXT: movb $97, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vbroadcasti32x2 {{.*#+}} ymm0 {%k1} = xmm1[0,1,0,1,0,1,0,1] +; CHECK-NEXT: retq + %vec = load <2 x i32>, <2 x i32>* %vp + %shuf = shufflevector <2 x i32> %vec, <2 x i32> undef, <8 x i32> + %res = select <8 x i1> , <8 x i32> %shuf, <8 x i32> %default + ret <8 x i32> %res +} + +define <8 x i32> @test_masked_z_2xi32_to_8xi32_mem_mask1(<2 x i32>* %vp) { +; CHECK-LABEL: test_masked_z_2xi32_to_8xi32_mem_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: vpmovzxdq {{.*#+}} xmm0 = mem[0],zero,mem[1],zero +; CHECK-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] +; CHECK-NEXT: movb $97, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vbroadcasti32x2 {{.*#+}} ymm0 {%k1} {z} = xmm0[0,1,0,1,0,1,0,1] +; CHECK-NEXT: retq + %vec = load <2 x i32>, <2 x i32>* %vp + %shuf = shufflevector <2 x i32> %vec, <2 x i32> undef, <8 x i32> + %res = select <8 x i1> , <8 x i32> %shuf, <8 x i32> zeroinitializer + ret <8 x i32> %res +} +define <8 x i32> @test_masked_2xi32_to_8xi32_mem_mask2(<2 x i32>* %vp, <8 x i32> %default) { +; CHECK-LABEL: test_masked_2xi32_to_8xi32_mem_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: vpmovzxdq {{.*#+}} xmm1 = mem[0],zero,mem[1],zero +; CHECK-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,2,2,3] +; CHECK-NEXT: movb $-33, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vbroadcasti32x2 {{.*#+}} ymm0 {%k1} = xmm1[0,1,0,1,0,1,0,1] +; CHECK-NEXT: retq + %vec = load <2 x i32>, <2 x i32>* %vp + %shuf = shufflevector <2 x i32> %vec, <2 x i32> undef, <8 x i32> + %res = select <8 x i1> , <8 x i32> %shuf, <8 x i32> %default + ret <8 x i32> %res +} + +define <8 x i32> @test_masked_z_2xi32_to_8xi32_mem_mask2(<2 x i32>* %vp) { +; CHECK-LABEL: test_masked_z_2xi32_to_8xi32_mem_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: vpmovzxdq {{.*#+}} xmm0 = mem[0],zero,mem[1],zero +; CHECK-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] +; CHECK-NEXT: movb $-33, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vbroadcasti32x2 {{.*#+}} ymm0 {%k1} {z} = xmm0[0,1,0,1,0,1,0,1] +; CHECK-NEXT: retq + %vec = load <2 x i32>, <2 x i32>* %vp + %shuf = shufflevector <2 x i32> %vec, <2 x i32> undef, <8 x i32> + %res = select <8 x i1> , <8 x i32> %shuf, <8 x i32> zeroinitializer + ret <8 x i32> %res +} +define <8 x i32> @test_masked_2xi32_to_8xi32_mem_mask3(<2 x i32>* %vp, <8 x i32> %default) { +; CHECK-LABEL: test_masked_2xi32_to_8xi32_mem_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: vpmovzxdq {{.*#+}} xmm1 = mem[0],zero,mem[1],zero +; CHECK-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,2,2,3] +; CHECK-NEXT: movb $-111, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vbroadcasti32x2 {{.*#+}} ymm0 {%k1} = xmm1[0,1,0,1,0,1,0,1] +; CHECK-NEXT: retq + %vec = load <2 x i32>, <2 x i32>* %vp + %shuf = shufflevector <2 x i32> %vec, <2 x i32> undef, <8 x i32> + %res = select <8 x i1> , <8 x i32> %shuf, <8 x i32> %default + ret <8 x i32> %res +} + +define <8 x i32> @test_masked_z_2xi32_to_8xi32_mem_mask3(<2 x i32>* %vp) { +; CHECK-LABEL: test_masked_z_2xi32_to_8xi32_mem_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: vpmovzxdq {{.*#+}} xmm0 = mem[0],zero,mem[1],zero +; CHECK-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] +; CHECK-NEXT: movb $-111, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vbroadcasti32x2 {{.*#+}} ymm0 {%k1} {z} = xmm0[0,1,0,1,0,1,0,1] +; CHECK-NEXT: retq + %vec = load <2 x i32>, <2 x i32>* %vp + %shuf = shufflevector <2 x i32> %vec, <2 x i32> undef, <8 x i32> + %res = select <8 x i1> , <8 x i32> %shuf, <8 x i32> zeroinitializer + ret <8 x i32> %res +} +define <16 x i32> @test_2xi32_to_16xi32_mem(<2 x i32>* %vp) { +; CHECK-LABEL: test_2xi32_to_16xi32_mem: +; CHECK: # BB#0: +; CHECK-NEXT: vpmovzxdq {{.*#+}} xmm0 = mem[0],zero,mem[1],zero +; CHECK-NEXT: vmovdqa32 {{.*#+}} zmm1 = [0,2,0,2,0,2,0,2,0,2,0,2,0,2,0,2] +; CHECK-NEXT: vpermd %zmm0, %zmm1, %zmm0 +; CHECK-NEXT: retq + %vec = load <2 x i32>, <2 x i32>* %vp + %res = shufflevector <2 x i32> %vec, <2 x i32> undef, <16 x i32> + ret <16 x i32> %res +} +define <16 x i32> @test_masked_2xi32_to_16xi32_mem_mask0(<2 x i32>* %vp, <16 x i32> %default) { +; CHECK-LABEL: test_masked_2xi32_to_16xi32_mem_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: vpmovzxdq {{.*#+}} xmm1 = mem[0],zero,mem[1],zero +; CHECK-NEXT: vmovdqa32 {{.*#+}} zmm2 = [0,2,0,2,0,2,0,2,0,2,0,2,0,2,0,2] +; CHECK-NEXT: movw $27158, %ax # imm = 0x6A16 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermd %zmm1, %zmm2, %zmm0 {%k1} +; CHECK-NEXT: retq + %vec = load <2 x i32>, <2 x i32>* %vp + %shuf = shufflevector <2 x i32> %vec, <2 x i32> undef, <16 x i32> + %res = select <16 x i1> , <16 x i32> %shuf, <16 x i32> %default + ret <16 x i32> %res +} + +define <16 x i32> @test_masked_z_2xi32_to_16xi32_mem_mask0(<2 x i32>* %vp) { +; CHECK-LABEL: test_masked_z_2xi32_to_16xi32_mem_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: vpmovzxdq {{.*#+}} xmm0 = mem[0],zero,mem[1],zero +; CHECK-NEXT: vmovdqa32 {{.*#+}} zmm1 = [0,2,0,2,0,2,0,2,0,2,0,2,0,2,0,2] +; CHECK-NEXT: movw $27158, %ax # imm = 0x6A16 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermd %zmm0, %zmm1, %zmm0 {%k1} {z} +; CHECK-NEXT: retq + %vec = load <2 x i32>, <2 x i32>* %vp + %shuf = shufflevector <2 x i32> %vec, <2 x i32> undef, <16 x i32> + %res = select <16 x i1> , <16 x i32> %shuf, <16 x i32> zeroinitializer + ret <16 x i32> %res +} +define <16 x i32> @test_masked_2xi32_to_16xi32_mem_mask1(<2 x i32>* %vp, <16 x i32> %default) { +; CHECK-LABEL: test_masked_2xi32_to_16xi32_mem_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: vpmovzxdq {{.*#+}} xmm1 = mem[0],zero,mem[1],zero +; CHECK-NEXT: vmovdqa32 {{.*#+}} zmm2 = [0,2,0,2,0,2,0,2,0,2,0,2,0,2,0,2] +; CHECK-NEXT: movw $26363, %ax # imm = 0x66FB +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermd %zmm1, %zmm2, %zmm0 {%k1} +; CHECK-NEXT: retq + %vec = load <2 x i32>, <2 x i32>* %vp + %shuf = shufflevector <2 x i32> %vec, <2 x i32> undef, <16 x i32> + %res = select <16 x i1> , <16 x i32> %shuf, <16 x i32> %default + ret <16 x i32> %res +} + +define <16 x i32> @test_masked_z_2xi32_to_16xi32_mem_mask1(<2 x i32>* %vp) { +; CHECK-LABEL: test_masked_z_2xi32_to_16xi32_mem_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: vpmovzxdq {{.*#+}} xmm0 = mem[0],zero,mem[1],zero +; CHECK-NEXT: vmovdqa32 {{.*#+}} zmm1 = [0,2,0,2,0,2,0,2,0,2,0,2,0,2,0,2] +; CHECK-NEXT: movw $26363, %ax # imm = 0x66FB +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermd %zmm0, %zmm1, %zmm0 {%k1} {z} +; CHECK-NEXT: retq + %vec = load <2 x i32>, <2 x i32>* %vp + %shuf = shufflevector <2 x i32> %vec, <2 x i32> undef, <16 x i32> + %res = select <16 x i1> , <16 x i32> %shuf, <16 x i32> zeroinitializer + ret <16 x i32> %res +} +define <16 x i32> @test_masked_2xi32_to_16xi32_mem_mask2(<2 x i32>* %vp, <16 x i32> %default) { +; CHECK-LABEL: test_masked_2xi32_to_16xi32_mem_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: vpmovzxdq {{.*#+}} xmm1 = mem[0],zero,mem[1],zero +; CHECK-NEXT: vmovdqa32 {{.*#+}} zmm2 = [0,2,0,2,0,2,0,2,0,2,0,2,0,2,0,2] +; CHECK-NEXT: movw $-19542, %ax # imm = 0xB3AA +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermd %zmm1, %zmm2, %zmm0 {%k1} +; CHECK-NEXT: retq + %vec = load <2 x i32>, <2 x i32>* %vp + %shuf = shufflevector <2 x i32> %vec, <2 x i32> undef, <16 x i32> + %res = select <16 x i1> , <16 x i32> %shuf, <16 x i32> %default + ret <16 x i32> %res +} + +define <16 x i32> @test_masked_z_2xi32_to_16xi32_mem_mask2(<2 x i32>* %vp) { +; CHECK-LABEL: test_masked_z_2xi32_to_16xi32_mem_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: vpmovzxdq {{.*#+}} xmm0 = mem[0],zero,mem[1],zero +; CHECK-NEXT: vmovdqa32 {{.*#+}} zmm1 = [0,2,0,2,0,2,0,2,0,2,0,2,0,2,0,2] +; CHECK-NEXT: movw $-19542, %ax # imm = 0xB3AA +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermd %zmm0, %zmm1, %zmm0 {%k1} {z} +; CHECK-NEXT: retq + %vec = load <2 x i32>, <2 x i32>* %vp + %shuf = shufflevector <2 x i32> %vec, <2 x i32> undef, <16 x i32> + %res = select <16 x i1> , <16 x i32> %shuf, <16 x i32> zeroinitializer + ret <16 x i32> %res +} +define <16 x i32> @test_masked_2xi32_to_16xi32_mem_mask3(<2 x i32>* %vp, <16 x i32> %default) { +; CHECK-LABEL: test_masked_2xi32_to_16xi32_mem_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: vpmovzxdq {{.*#+}} xmm1 = mem[0],zero,mem[1],zero +; CHECK-NEXT: vmovdqa32 {{.*#+}} zmm2 = [0,2,0,2,0,2,0,2,0,2,0,2,0,2,0,2] +; CHECK-NEXT: movw $27409, %ax # imm = 0x6B11 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermd %zmm1, %zmm2, %zmm0 {%k1} +; CHECK-NEXT: retq + %vec = load <2 x i32>, <2 x i32>* %vp + %shuf = shufflevector <2 x i32> %vec, <2 x i32> undef, <16 x i32> + %res = select <16 x i1> , <16 x i32> %shuf, <16 x i32> %default + ret <16 x i32> %res +} + +define <16 x i32> @test_masked_z_2xi32_to_16xi32_mem_mask3(<2 x i32>* %vp) { +; CHECK-LABEL: test_masked_z_2xi32_to_16xi32_mem_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: vpmovzxdq {{.*#+}} xmm0 = mem[0],zero,mem[1],zero +; CHECK-NEXT: vmovdqa32 {{.*#+}} zmm1 = [0,2,0,2,0,2,0,2,0,2,0,2,0,2,0,2] +; CHECK-NEXT: movw $27409, %ax # imm = 0x6B11 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermd %zmm0, %zmm1, %zmm0 {%k1} {z} +; CHECK-NEXT: retq + %vec = load <2 x i32>, <2 x i32>* %vp + %shuf = shufflevector <2 x i32> %vec, <2 x i32> undef, <16 x i32> + %res = select <16 x i1> , <16 x i32> %shuf, <16 x i32> zeroinitializer + ret <16 x i32> %res +} +define <8 x i32> @test_4xi32_to_8xi32_mem(<4 x i32>* %vp) { +; CHECK-LABEL: test_4xi32_to_8xi32_mem: +; CHECK: # BB#0: +; CHECK-NEXT: vbroadcasti128 {{.*#+}} ymm0 = mem[0,1,0,1] +; CHECK-NEXT: retq + %vec = load <4 x i32>, <4 x i32>* %vp + %res = shufflevector <4 x i32> %vec, <4 x i32> undef, <8 x i32> + ret <8 x i32> %res +} +define <8 x i32> @test_masked_4xi32_to_8xi32_mem_mask0(<4 x i32>* %vp, <8 x i32> %default) { +; CHECK-LABEL: test_masked_4xi32_to_8xi32_mem_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: movb $-87, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vbroadcasti32x4 {{.*#+}} ymm0 {%k1} = mem[0,1,2,3,0,1,2,3] +; CHECK-NEXT: retq + %vec = load <4 x i32>, <4 x i32>* %vp + %shuf = shufflevector <4 x i32> %vec, <4 x i32> undef, <8 x i32> + %res = select <8 x i1> , <8 x i32> %shuf, <8 x i32> %default + ret <8 x i32> %res +} + +define <8 x i32> @test_masked_z_4xi32_to_8xi32_mem_mask0(<4 x i32>* %vp) { +; CHECK-LABEL: test_masked_z_4xi32_to_8xi32_mem_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: movb $-87, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vbroadcasti32x4 {{.*#+}} ymm0 {%k1} {z} = mem[0,1,2,3,0,1,2,3] +; CHECK-NEXT: retq + %vec = load <4 x i32>, <4 x i32>* %vp + %shuf = shufflevector <4 x i32> %vec, <4 x i32> undef, <8 x i32> + %res = select <8 x i1> , <8 x i32> %shuf, <8 x i32> zeroinitializer + ret <8 x i32> %res +} +define <8 x i32> @test_masked_4xi32_to_8xi32_mem_mask1(<4 x i32>* %vp, <8 x i32> %default) { +; CHECK-LABEL: test_masked_4xi32_to_8xi32_mem_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: movb $12, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vbroadcasti32x4 {{.*#+}} ymm0 {%k1} = mem[0,1,2,3,0,1,2,3] +; CHECK-NEXT: retq + %vec = load <4 x i32>, <4 x i32>* %vp + %shuf = shufflevector <4 x i32> %vec, <4 x i32> undef, <8 x i32> + %res = select <8 x i1> , <8 x i32> %shuf, <8 x i32> %default + ret <8 x i32> %res +} + +define <8 x i32> @test_masked_z_4xi32_to_8xi32_mem_mask1(<4 x i32>* %vp) { +; CHECK-LABEL: test_masked_z_4xi32_to_8xi32_mem_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: movb $12, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vbroadcasti32x4 {{.*#+}} ymm0 {%k1} {z} = mem[0,1,2,3,0,1,2,3] +; CHECK-NEXT: retq + %vec = load <4 x i32>, <4 x i32>* %vp + %shuf = shufflevector <4 x i32> %vec, <4 x i32> undef, <8 x i32> + %res = select <8 x i1> , <8 x i32> %shuf, <8 x i32> zeroinitializer + ret <8 x i32> %res +} +define <8 x i32> @test_masked_4xi32_to_8xi32_mem_mask2(<4 x i32>* %vp, <8 x i32> %default) { +; CHECK-LABEL: test_masked_4xi32_to_8xi32_mem_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: movb $114, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vbroadcasti32x4 {{.*#+}} ymm0 {%k1} = mem[0,1,2,3,0,1,2,3] +; CHECK-NEXT: retq + %vec = load <4 x i32>, <4 x i32>* %vp + %shuf = shufflevector <4 x i32> %vec, <4 x i32> undef, <8 x i32> + %res = select <8 x i1> , <8 x i32> %shuf, <8 x i32> %default + ret <8 x i32> %res +} + +define <8 x i32> @test_masked_z_4xi32_to_8xi32_mem_mask2(<4 x i32>* %vp) { +; CHECK-LABEL: test_masked_z_4xi32_to_8xi32_mem_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: movb $114, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vbroadcasti32x4 {{.*#+}} ymm0 {%k1} {z} = mem[0,1,2,3,0,1,2,3] +; CHECK-NEXT: retq + %vec = load <4 x i32>, <4 x i32>* %vp + %shuf = shufflevector <4 x i32> %vec, <4 x i32> undef, <8 x i32> + %res = select <8 x i1> , <8 x i32> %shuf, <8 x i32> zeroinitializer + ret <8 x i32> %res +} +define <8 x i32> @test_masked_4xi32_to_8xi32_mem_mask3(<4 x i32>* %vp, <8 x i32> %default) { +; CHECK-LABEL: test_masked_4xi32_to_8xi32_mem_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: movb $66, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vbroadcasti32x4 {{.*#+}} ymm0 {%k1} = mem[0,1,2,3,0,1,2,3] +; CHECK-NEXT: retq + %vec = load <4 x i32>, <4 x i32>* %vp + %shuf = shufflevector <4 x i32> %vec, <4 x i32> undef, <8 x i32> + %res = select <8 x i1> , <8 x i32> %shuf, <8 x i32> %default + ret <8 x i32> %res +} + +define <8 x i32> @test_masked_z_4xi32_to_8xi32_mem_mask3(<4 x i32>* %vp) { +; CHECK-LABEL: test_masked_z_4xi32_to_8xi32_mem_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: movb $66, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vbroadcasti32x4 {{.*#+}} ymm0 {%k1} {z} = mem[0,1,2,3,0,1,2,3] +; CHECK-NEXT: retq + %vec = load <4 x i32>, <4 x i32>* %vp + %shuf = shufflevector <4 x i32> %vec, <4 x i32> undef, <8 x i32> + %res = select <8 x i1> , <8 x i32> %shuf, <8 x i32> zeroinitializer + ret <8 x i32> %res +} +define <16 x i32> @test_4xi32_to_16xi32_mem(<4 x i32>* %vp) { +; CHECK-LABEL: test_4xi32_to_16xi32_mem: +; CHECK: # BB#0: +; CHECK-NEXT: vbroadcasti32x4 {{.*#+}} zmm0 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3] +; CHECK-NEXT: retq + %vec = load <4 x i32>, <4 x i32>* %vp + %res = shufflevector <4 x i32> %vec, <4 x i32> undef, <16 x i32> + ret <16 x i32> %res +} +define <16 x i32> @test_masked_4xi32_to_16xi32_mem_mask0(<4 x i32>* %vp, <16 x i32> %default) { +; CHECK-LABEL: test_masked_4xi32_to_16xi32_mem_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: movw $10334, %ax # imm = 0x285E +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vbroadcasti32x4 {{.*#+}} zmm0 {%k1} = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3] +; CHECK-NEXT: retq + %vec = load <4 x i32>, <4 x i32>* %vp + %shuf = shufflevector <4 x i32> %vec, <4 x i32> undef, <16 x i32> + %res = select <16 x i1> , <16 x i32> %shuf, <16 x i32> %default + ret <16 x i32> %res +} + +define <16 x i32> @test_masked_z_4xi32_to_16xi32_mem_mask0(<4 x i32>* %vp) { +; CHECK-LABEL: test_masked_z_4xi32_to_16xi32_mem_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: movw $10334, %ax # imm = 0x285E +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vbroadcasti32x4 {{.*#+}} zmm0 {%k1} {z} = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3] +; CHECK-NEXT: retq + %vec = load <4 x i32>, <4 x i32>* %vp + %shuf = shufflevector <4 x i32> %vec, <4 x i32> undef, <16 x i32> + %res = select <16 x i1> , <16 x i32> %shuf, <16 x i32> zeroinitializer + ret <16 x i32> %res +} +define <16 x i32> @test_masked_4xi32_to_16xi32_mem_mask1(<4 x i32>* %vp, <16 x i32> %default) { +; CHECK-LABEL: test_masked_4xi32_to_16xi32_mem_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: movw $-30962, %ax # imm = 0x870E +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vbroadcasti32x4 {{.*#+}} zmm0 {%k1} = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3] +; CHECK-NEXT: retq + %vec = load <4 x i32>, <4 x i32>* %vp + %shuf = shufflevector <4 x i32> %vec, <4 x i32> undef, <16 x i32> + %res = select <16 x i1> , <16 x i32> %shuf, <16 x i32> %default + ret <16 x i32> %res +} + +define <16 x i32> @test_masked_z_4xi32_to_16xi32_mem_mask1(<4 x i32>* %vp) { +; CHECK-LABEL: test_masked_z_4xi32_to_16xi32_mem_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: movw $-30962, %ax # imm = 0x870E +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vbroadcasti32x4 {{.*#+}} zmm0 {%k1} {z} = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3] +; CHECK-NEXT: retq + %vec = load <4 x i32>, <4 x i32>* %vp + %shuf = shufflevector <4 x i32> %vec, <4 x i32> undef, <16 x i32> + %res = select <16 x i1> , <16 x i32> %shuf, <16 x i32> zeroinitializer + ret <16 x i32> %res +} +define <16 x i32> @test_masked_4xi32_to_16xi32_mem_mask2(<4 x i32>* %vp, <16 x i32> %default) { +; CHECK-LABEL: test_masked_4xi32_to_16xi32_mem_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: movw $31933, %ax # imm = 0x7CBD +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vbroadcasti32x4 {{.*#+}} zmm0 {%k1} = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3] +; CHECK-NEXT: retq + %vec = load <4 x i32>, <4 x i32>* %vp + %shuf = shufflevector <4 x i32> %vec, <4 x i32> undef, <16 x i32> + %res = select <16 x i1> , <16 x i32> %shuf, <16 x i32> %default + ret <16 x i32> %res +} + +define <16 x i32> @test_masked_z_4xi32_to_16xi32_mem_mask2(<4 x i32>* %vp) { +; CHECK-LABEL: test_masked_z_4xi32_to_16xi32_mem_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: movw $31933, %ax # imm = 0x7CBD +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vbroadcasti32x4 {{.*#+}} zmm0 {%k1} {z} = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3] +; CHECK-NEXT: retq + %vec = load <4 x i32>, <4 x i32>* %vp + %shuf = shufflevector <4 x i32> %vec, <4 x i32> undef, <16 x i32> + %res = select <16 x i1> , <16 x i32> %shuf, <16 x i32> zeroinitializer + ret <16 x i32> %res +} +define <16 x i32> @test_masked_4xi32_to_16xi32_mem_mask3(<4 x i32>* %vp, <16 x i32> %default) { +; CHECK-LABEL: test_masked_4xi32_to_16xi32_mem_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: movw $-28744, %ax # imm = 0x8FB8 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vbroadcasti32x4 {{.*#+}} zmm0 {%k1} = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3] +; CHECK-NEXT: retq + %vec = load <4 x i32>, <4 x i32>* %vp + %shuf = shufflevector <4 x i32> %vec, <4 x i32> undef, <16 x i32> + %res = select <16 x i1> , <16 x i32> %shuf, <16 x i32> %default + ret <16 x i32> %res +} + +define <16 x i32> @test_masked_z_4xi32_to_16xi32_mem_mask3(<4 x i32>* %vp) { +; CHECK-LABEL: test_masked_z_4xi32_to_16xi32_mem_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: movw $-28744, %ax # imm = 0x8FB8 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vbroadcasti32x4 {{.*#+}} zmm0 {%k1} {z} = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3] +; CHECK-NEXT: retq + %vec = load <4 x i32>, <4 x i32>* %vp + %shuf = shufflevector <4 x i32> %vec, <4 x i32> undef, <16 x i32> + %res = select <16 x i1> , <16 x i32> %shuf, <16 x i32> zeroinitializer + ret <16 x i32> %res +} +define <4 x i64> @test_2xi64_to_4xi64_mem(<2 x i64>* %vp) { +; CHECK-LABEL: test_2xi64_to_4xi64_mem: +; CHECK: # BB#0: +; CHECK-NEXT: vbroadcasti128 {{.*#+}} ymm0 = mem[0,1,0,1] +; CHECK-NEXT: retq + %vec = load <2 x i64>, <2 x i64>* %vp + %res = shufflevector <2 x i64> %vec, <2 x i64> undef, <4 x i32> + ret <4 x i64> %res +} +define <4 x i64> @test_masked_2xi64_to_4xi64_mem_mask0(<2 x i64>* %vp, <4 x i64> %default) { +; CHECK-LABEL: test_masked_2xi64_to_4xi64_mem_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: movb $11, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vbroadcasti64x2 {{.*#+}} ymm0 {%k1} = mem[0,1,0,1] +; CHECK-NEXT: retq + %vec = load <2 x i64>, <2 x i64>* %vp + %shuf = shufflevector <2 x i64> %vec, <2 x i64> undef, <4 x i32> + %res = select <4 x i1> , <4 x i64> %shuf, <4 x i64> %default + ret <4 x i64> %res +} + +define <4 x i64> @test_masked_z_2xi64_to_4xi64_mem_mask0(<2 x i64>* %vp) { +; CHECK-LABEL: test_masked_z_2xi64_to_4xi64_mem_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: movb $11, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vbroadcasti64x2 {{.*#+}} ymm0 {%k1} {z} = mem[0,1,0,1] +; CHECK-NEXT: retq + %vec = load <2 x i64>, <2 x i64>* %vp + %shuf = shufflevector <2 x i64> %vec, <2 x i64> undef, <4 x i32> + %res = select <4 x i1> , <4 x i64> %shuf, <4 x i64> zeroinitializer + ret <4 x i64> %res +} +define <4 x i64> @test_masked_2xi64_to_4xi64_mem_mask1(<2 x i64>* %vp, <4 x i64> %default) { +; CHECK-LABEL: test_masked_2xi64_to_4xi64_mem_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: movb $12, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vbroadcasti64x2 {{.*#+}} ymm0 {%k1} = mem[0,1,0,1] +; CHECK-NEXT: retq + %vec = load <2 x i64>, <2 x i64>* %vp + %shuf = shufflevector <2 x i64> %vec, <2 x i64> undef, <4 x i32> + %res = select <4 x i1> , <4 x i64> %shuf, <4 x i64> %default + ret <4 x i64> %res +} + +define <4 x i64> @test_masked_z_2xi64_to_4xi64_mem_mask1(<2 x i64>* %vp) { +; CHECK-LABEL: test_masked_z_2xi64_to_4xi64_mem_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: movb $12, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vbroadcasti64x2 {{.*#+}} ymm0 {%k1} {z} = mem[0,1,0,1] +; CHECK-NEXT: retq + %vec = load <2 x i64>, <2 x i64>* %vp + %shuf = shufflevector <2 x i64> %vec, <2 x i64> undef, <4 x i32> + %res = select <4 x i1> , <4 x i64> %shuf, <4 x i64> zeroinitializer + ret <4 x i64> %res +} +define <4 x i64> @test_masked_2xi64_to_4xi64_mem_mask2(<2 x i64>* %vp, <4 x i64> %default) { +; CHECK-LABEL: test_masked_2xi64_to_4xi64_mem_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: movb $6, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vbroadcasti64x2 {{.*#+}} ymm0 {%k1} = mem[0,1,0,1] +; CHECK-NEXT: retq + %vec = load <2 x i64>, <2 x i64>* %vp + %shuf = shufflevector <2 x i64> %vec, <2 x i64> undef, <4 x i32> + %res = select <4 x i1> , <4 x i64> %shuf, <4 x i64> %default + ret <4 x i64> %res +} + +define <4 x i64> @test_masked_z_2xi64_to_4xi64_mem_mask2(<2 x i64>* %vp) { +; CHECK-LABEL: test_masked_z_2xi64_to_4xi64_mem_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: movb $6, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vbroadcasti64x2 {{.*#+}} ymm0 {%k1} {z} = mem[0,1,0,1] +; CHECK-NEXT: retq + %vec = load <2 x i64>, <2 x i64>* %vp + %shuf = shufflevector <2 x i64> %vec, <2 x i64> undef, <4 x i32> + %res = select <4 x i1> , <4 x i64> %shuf, <4 x i64> zeroinitializer + ret <4 x i64> %res +} +define <4 x i64> @test_masked_2xi64_to_4xi64_mem_mask3(<2 x i64>* %vp, <4 x i64> %default) { +; CHECK-LABEL: test_masked_2xi64_to_4xi64_mem_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: movb $4, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vbroadcasti64x2 {{.*#+}} ymm0 {%k1} = mem[0,1,0,1] +; CHECK-NEXT: retq + %vec = load <2 x i64>, <2 x i64>* %vp + %shuf = shufflevector <2 x i64> %vec, <2 x i64> undef, <4 x i32> + %res = select <4 x i1> , <4 x i64> %shuf, <4 x i64> %default + ret <4 x i64> %res +} + +define <4 x i64> @test_masked_z_2xi64_to_4xi64_mem_mask3(<2 x i64>* %vp) { +; CHECK-LABEL: test_masked_z_2xi64_to_4xi64_mem_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: movb $4, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vbroadcasti64x2 {{.*#+}} ymm0 {%k1} {z} = mem[0,1,0,1] +; CHECK-NEXT: retq + %vec = load <2 x i64>, <2 x i64>* %vp + %shuf = shufflevector <2 x i64> %vec, <2 x i64> undef, <4 x i32> + %res = select <4 x i1> , <4 x i64> %shuf, <4 x i64> zeroinitializer + ret <4 x i64> %res +} +define <8 x i64> @test_2xi64_to_8xi64_mem(<2 x i64>* %vp) { +; CHECK-LABEL: test_2xi64_to_8xi64_mem: +; CHECK: # BB#0: +; CHECK-NEXT: vbroadcasti32x4 {{.*#+}} zmm0 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3] +; CHECK-NEXT: retq + %vec = load <2 x i64>, <2 x i64>* %vp + %res = shufflevector <2 x i64> %vec, <2 x i64> undef, <8 x i32> + ret <8 x i64> %res +} +define <8 x i64> @test_masked_2xi64_to_8xi64_mem_mask0(<2 x i64>* %vp, <8 x i64> %default) { +; CHECK-LABEL: test_masked_2xi64_to_8xi64_mem_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: movb $119, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vbroadcasti64x2 {{.*#+}} zmm0 {%k1} = mem[0,1,0,1,0,1,0,1] +; CHECK-NEXT: retq + %vec = load <2 x i64>, <2 x i64>* %vp + %shuf = shufflevector <2 x i64> %vec, <2 x i64> undef, <8 x i32> + %res = select <8 x i1> , <8 x i64> %shuf, <8 x i64> %default + ret <8 x i64> %res +} + +define <8 x i64> @test_masked_z_2xi64_to_8xi64_mem_mask0(<2 x i64>* %vp) { +; CHECK-LABEL: test_masked_z_2xi64_to_8xi64_mem_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: movb $119, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vbroadcasti64x2 {{.*#+}} zmm0 {%k1} {z} = mem[0,1,0,1,0,1,0,1] +; CHECK-NEXT: retq + %vec = load <2 x i64>, <2 x i64>* %vp + %shuf = shufflevector <2 x i64> %vec, <2 x i64> undef, <8 x i32> + %res = select <8 x i1> , <8 x i64> %shuf, <8 x i64> zeroinitializer + ret <8 x i64> %res +} +define <8 x i64> @test_masked_2xi64_to_8xi64_mem_mask1(<2 x i64>* %vp, <8 x i64> %default) { +; CHECK-LABEL: test_masked_2xi64_to_8xi64_mem_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: movb $-50, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vbroadcasti64x2 {{.*#+}} zmm0 {%k1} = mem[0,1,0,1,0,1,0,1] +; CHECK-NEXT: retq + %vec = load <2 x i64>, <2 x i64>* %vp + %shuf = shufflevector <2 x i64> %vec, <2 x i64> undef, <8 x i32> + %res = select <8 x i1> , <8 x i64> %shuf, <8 x i64> %default + ret <8 x i64> %res +} + +define <8 x i64> @test_masked_z_2xi64_to_8xi64_mem_mask1(<2 x i64>* %vp) { +; CHECK-LABEL: test_masked_z_2xi64_to_8xi64_mem_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: movb $-50, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vbroadcasti64x2 {{.*#+}} zmm0 {%k1} {z} = mem[0,1,0,1,0,1,0,1] +; CHECK-NEXT: retq + %vec = load <2 x i64>, <2 x i64>* %vp + %shuf = shufflevector <2 x i64> %vec, <2 x i64> undef, <8 x i32> + %res = select <8 x i1> , <8 x i64> %shuf, <8 x i64> zeroinitializer + ret <8 x i64> %res +} +define <8 x i64> @test_masked_2xi64_to_8xi64_mem_mask2(<2 x i64>* %vp, <8 x i64> %default) { +; CHECK-LABEL: test_masked_2xi64_to_8xi64_mem_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: movb $-33, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vbroadcasti64x2 {{.*#+}} zmm0 {%k1} = mem[0,1,0,1,0,1,0,1] +; CHECK-NEXT: retq + %vec = load <2 x i64>, <2 x i64>* %vp + %shuf = shufflevector <2 x i64> %vec, <2 x i64> undef, <8 x i32> + %res = select <8 x i1> , <8 x i64> %shuf, <8 x i64> %default + ret <8 x i64> %res +} + +define <8 x i64> @test_masked_z_2xi64_to_8xi64_mem_mask2(<2 x i64>* %vp) { +; CHECK-LABEL: test_masked_z_2xi64_to_8xi64_mem_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: movb $-33, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vbroadcasti64x2 {{.*#+}} zmm0 {%k1} {z} = mem[0,1,0,1,0,1,0,1] +; CHECK-NEXT: retq + %vec = load <2 x i64>, <2 x i64>* %vp + %shuf = shufflevector <2 x i64> %vec, <2 x i64> undef, <8 x i32> + %res = select <8 x i1> , <8 x i64> %shuf, <8 x i64> zeroinitializer + ret <8 x i64> %res +} +define <8 x i64> @test_masked_2xi64_to_8xi64_mem_mask3(<2 x i64>* %vp, <8 x i64> %default) { +; CHECK-LABEL: test_masked_2xi64_to_8xi64_mem_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: movb $-49, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vbroadcasti64x2 {{.*#+}} zmm0 {%k1} = mem[0,1,0,1,0,1,0,1] +; CHECK-NEXT: retq + %vec = load <2 x i64>, <2 x i64>* %vp + %shuf = shufflevector <2 x i64> %vec, <2 x i64> undef, <8 x i32> + %res = select <8 x i1> , <8 x i64> %shuf, <8 x i64> %default + ret <8 x i64> %res +} + +define <8 x i64> @test_masked_z_2xi64_to_8xi64_mem_mask3(<2 x i64>* %vp) { +; CHECK-LABEL: test_masked_z_2xi64_to_8xi64_mem_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: movb $-49, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vbroadcasti64x2 {{.*#+}} zmm0 {%k1} {z} = mem[0,1,0,1,0,1,0,1] +; CHECK-NEXT: retq + %vec = load <2 x i64>, <2 x i64>* %vp + %shuf = shufflevector <2 x i64> %vec, <2 x i64> undef, <8 x i32> + %res = select <8 x i1> , <8 x i64> %shuf, <8 x i64> zeroinitializer + ret <8 x i64> %res +} +define <16 x i32> @test_8xi32_to_16xi32_mem(<8 x i32>* %vp) { +; CHECK-LABEL: test_8xi32_to_16xi32_mem: +; CHECK: # BB#0: +; CHECK-NEXT: vbroadcasti64x4 {{.*#+}} zmm0 = mem[0,1,2,3,0,1,2,3] +; CHECK-NEXT: retq + %vec = load <8 x i32>, <8 x i32>* %vp + %res = shufflevector <8 x i32> %vec, <8 x i32> undef, <16 x i32> + ret <16 x i32> %res +} +define <16 x i32> @test_masked_8xi32_to_16xi32_mem_mask0(<8 x i32>* %vp, <16 x i32> %default) { +; CHECK-LABEL: test_masked_8xi32_to_16xi32_mem_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: movw $12321, %ax # imm = 0x3021 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vbroadcasti32x8 {{.*#+}} zmm0 {%k1} = mem[0,1,2,3,4,5,6,7,0,1,2,3,4,5,6,7] +; CHECK-NEXT: retq + %vec = load <8 x i32>, <8 x i32>* %vp + %shuf = shufflevector <8 x i32> %vec, <8 x i32> undef, <16 x i32> + %res = select <16 x i1> , <16 x i32> %shuf, <16 x i32> %default + ret <16 x i32> %res +} + +define <16 x i32> @test_masked_z_8xi32_to_16xi32_mem_mask0(<8 x i32>* %vp) { +; CHECK-LABEL: test_masked_z_8xi32_to_16xi32_mem_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: movw $12321, %ax # imm = 0x3021 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vbroadcasti32x8 {{.*#+}} zmm0 {%k1} {z} = mem[0,1,2,3,4,5,6,7,0,1,2,3,4,5,6,7] +; CHECK-NEXT: retq + %vec = load <8 x i32>, <8 x i32>* %vp + %shuf = shufflevector <8 x i32> %vec, <8 x i32> undef, <16 x i32> + %res = select <16 x i1> , <16 x i32> %shuf, <16 x i32> zeroinitializer + ret <16 x i32> %res +} +define <16 x i32> @test_masked_8xi32_to_16xi32_mem_mask1(<8 x i32>* %vp, <16 x i32> %default) { +; CHECK-LABEL: test_masked_8xi32_to_16xi32_mem_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: movw $-39, %ax +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vbroadcasti32x8 {{.*#+}} zmm0 {%k1} = mem[0,1,2,3,4,5,6,7,0,1,2,3,4,5,6,7] +; CHECK-NEXT: retq + %vec = load <8 x i32>, <8 x i32>* %vp + %shuf = shufflevector <8 x i32> %vec, <8 x i32> undef, <16 x i32> + %res = select <16 x i1> , <16 x i32> %shuf, <16 x i32> %default + ret <16 x i32> %res +} + +define <16 x i32> @test_masked_z_8xi32_to_16xi32_mem_mask1(<8 x i32>* %vp) { +; CHECK-LABEL: test_masked_z_8xi32_to_16xi32_mem_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: movw $-39, %ax +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vbroadcasti32x8 {{.*#+}} zmm0 {%k1} {z} = mem[0,1,2,3,4,5,6,7,0,1,2,3,4,5,6,7] +; CHECK-NEXT: retq + %vec = load <8 x i32>, <8 x i32>* %vp + %shuf = shufflevector <8 x i32> %vec, <8 x i32> undef, <16 x i32> + %res = select <16 x i1> , <16 x i32> %shuf, <16 x i32> zeroinitializer + ret <16 x i32> %res +} +define <16 x i32> @test_masked_8xi32_to_16xi32_mem_mask2(<8 x i32>* %vp, <16 x i32> %default) { +; CHECK-LABEL: test_masked_8xi32_to_16xi32_mem_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: movw $-24047, %ax # imm = 0xA211 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vbroadcasti32x8 {{.*#+}} zmm0 {%k1} = mem[0,1,2,3,4,5,6,7,0,1,2,3,4,5,6,7] +; CHECK-NEXT: retq + %vec = load <8 x i32>, <8 x i32>* %vp + %shuf = shufflevector <8 x i32> %vec, <8 x i32> undef, <16 x i32> + %res = select <16 x i1> , <16 x i32> %shuf, <16 x i32> %default + ret <16 x i32> %res +} + +define <16 x i32> @test_masked_z_8xi32_to_16xi32_mem_mask2(<8 x i32>* %vp) { +; CHECK-LABEL: test_masked_z_8xi32_to_16xi32_mem_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: movw $-24047, %ax # imm = 0xA211 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vbroadcasti32x8 {{.*#+}} zmm0 {%k1} {z} = mem[0,1,2,3,4,5,6,7,0,1,2,3,4,5,6,7] +; CHECK-NEXT: retq + %vec = load <8 x i32>, <8 x i32>* %vp + %shuf = shufflevector <8 x i32> %vec, <8 x i32> undef, <16 x i32> + %res = select <16 x i1> , <16 x i32> %shuf, <16 x i32> zeroinitializer + ret <16 x i32> %res +} +define <16 x i32> @test_masked_8xi32_to_16xi32_mem_mask3(<8 x i32>* %vp, <16 x i32> %default) { +; CHECK-LABEL: test_masked_8xi32_to_16xi32_mem_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: movw $5470, %ax # imm = 0x155E +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vbroadcasti32x8 {{.*#+}} zmm0 {%k1} = mem[0,1,2,3,4,5,6,7,0,1,2,3,4,5,6,7] +; CHECK-NEXT: retq + %vec = load <8 x i32>, <8 x i32>* %vp + %shuf = shufflevector <8 x i32> %vec, <8 x i32> undef, <16 x i32> + %res = select <16 x i1> , <16 x i32> %shuf, <16 x i32> %default + ret <16 x i32> %res +} + +define <16 x i32> @test_masked_z_8xi32_to_16xi32_mem_mask3(<8 x i32>* %vp) { +; CHECK-LABEL: test_masked_z_8xi32_to_16xi32_mem_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: movw $5470, %ax # imm = 0x155E +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vbroadcasti32x8 {{.*#+}} zmm0 {%k1} {z} = mem[0,1,2,3,4,5,6,7,0,1,2,3,4,5,6,7] +; CHECK-NEXT: retq + %vec = load <8 x i32>, <8 x i32>* %vp + %shuf = shufflevector <8 x i32> %vec, <8 x i32> undef, <16 x i32> + %res = select <16 x i1> , <16 x i32> %shuf, <16 x i32> zeroinitializer + ret <16 x i32> %res +} +define <8 x i64> @test_4xi64_to_8xi64_mem(<4 x i64>* %vp) { +; CHECK-LABEL: test_4xi64_to_8xi64_mem: +; CHECK: # BB#0: +; CHECK-NEXT: vbroadcasti64x4 {{.*#+}} zmm0 = mem[0,1,2,3,0,1,2,3] +; CHECK-NEXT: retq + %vec = load <4 x i64>, <4 x i64>* %vp + %res = shufflevector <4 x i64> %vec, <4 x i64> undef, <8 x i32> + ret <8 x i64> %res +} +define <8 x i64> @test_masked_4xi64_to_8xi64_mem_mask0(<4 x i64>* %vp, <8 x i64> %default) { +; CHECK-LABEL: test_masked_4xi64_to_8xi64_mem_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: movb $-71, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vbroadcasti64x4 {{.*#+}} zmm0 {%k1} = mem[0,1,2,3,0,1,2,3] +; CHECK-NEXT: retq + %vec = load <4 x i64>, <4 x i64>* %vp + %shuf = shufflevector <4 x i64> %vec, <4 x i64> undef, <8 x i32> + %res = select <8 x i1> , <8 x i64> %shuf, <8 x i64> %default + ret <8 x i64> %res +} + +define <8 x i64> @test_masked_z_4xi64_to_8xi64_mem_mask0(<4 x i64>* %vp) { +; CHECK-LABEL: test_masked_z_4xi64_to_8xi64_mem_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: movb $-71, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vbroadcasti64x4 {{.*#+}} zmm0 {%k1} {z} = mem[0,1,2,3,0,1,2,3] +; CHECK-NEXT: retq + %vec = load <4 x i64>, <4 x i64>* %vp + %shuf = shufflevector <4 x i64> %vec, <4 x i64> undef, <8 x i32> + %res = select <8 x i1> , <8 x i64> %shuf, <8 x i64> zeroinitializer + ret <8 x i64> %res +} +define <8 x i64> @test_masked_4xi64_to_8xi64_mem_mask1(<4 x i64>* %vp, <8 x i64> %default) { +; CHECK-LABEL: test_masked_4xi64_to_8xi64_mem_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: movb $-5, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vbroadcasti64x4 {{.*#+}} zmm0 {%k1} = mem[0,1,2,3,0,1,2,3] +; CHECK-NEXT: retq + %vec = load <4 x i64>, <4 x i64>* %vp + %shuf = shufflevector <4 x i64> %vec, <4 x i64> undef, <8 x i32> + %res = select <8 x i1> , <8 x i64> %shuf, <8 x i64> %default + ret <8 x i64> %res +} + +define <8 x i64> @test_masked_z_4xi64_to_8xi64_mem_mask1(<4 x i64>* %vp) { +; CHECK-LABEL: test_masked_z_4xi64_to_8xi64_mem_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: movb $-5, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vbroadcasti64x4 {{.*#+}} zmm0 {%k1} {z} = mem[0,1,2,3,0,1,2,3] +; CHECK-NEXT: retq + %vec = load <4 x i64>, <4 x i64>* %vp + %shuf = shufflevector <4 x i64> %vec, <4 x i64> undef, <8 x i32> + %res = select <8 x i1> , <8 x i64> %shuf, <8 x i64> zeroinitializer + ret <8 x i64> %res +} +define <8 x i64> @test_masked_4xi64_to_8xi64_mem_mask2(<4 x i64>* %vp, <8 x i64> %default) { +; CHECK-LABEL: test_masked_4xi64_to_8xi64_mem_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: movb $103, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vbroadcasti64x4 {{.*#+}} zmm0 {%k1} = mem[0,1,2,3,0,1,2,3] +; CHECK-NEXT: retq + %vec = load <4 x i64>, <4 x i64>* %vp + %shuf = shufflevector <4 x i64> %vec, <4 x i64> undef, <8 x i32> + %res = select <8 x i1> , <8 x i64> %shuf, <8 x i64> %default + ret <8 x i64> %res +} + +define <8 x i64> @test_masked_z_4xi64_to_8xi64_mem_mask2(<4 x i64>* %vp) { +; CHECK-LABEL: test_masked_z_4xi64_to_8xi64_mem_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: movb $103, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vbroadcasti64x4 {{.*#+}} zmm0 {%k1} {z} = mem[0,1,2,3,0,1,2,3] +; CHECK-NEXT: retq + %vec = load <4 x i64>, <4 x i64>* %vp + %shuf = shufflevector <4 x i64> %vec, <4 x i64> undef, <8 x i32> + %res = select <8 x i1> , <8 x i64> %shuf, <8 x i64> zeroinitializer + ret <8 x i64> %res +} +define <8 x i64> @test_masked_4xi64_to_8xi64_mem_mask3(<4 x i64>* %vp, <8 x i64> %default) { +; CHECK-LABEL: test_masked_4xi64_to_8xi64_mem_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: movb $-83, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vbroadcasti64x4 {{.*#+}} zmm0 {%k1} = mem[0,1,2,3,0,1,2,3] +; CHECK-NEXT: retq + %vec = load <4 x i64>, <4 x i64>* %vp + %shuf = shufflevector <4 x i64> %vec, <4 x i64> undef, <8 x i32> + %res = select <8 x i1> , <8 x i64> %shuf, <8 x i64> %default + ret <8 x i64> %res +} + +define <8 x i64> @test_masked_z_4xi64_to_8xi64_mem_mask3(<4 x i64>* %vp) { +; CHECK-LABEL: test_masked_z_4xi64_to_8xi64_mem_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: movb $-83, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vbroadcasti64x4 {{.*#+}} zmm0 {%k1} {z} = mem[0,1,2,3,0,1,2,3] +; CHECK-NEXT: retq + %vec = load <4 x i64>, <4 x i64>* %vp + %shuf = shufflevector <4 x i64> %vec, <4 x i64> undef, <8 x i32> + %res = select <8 x i1> , <8 x i64> %shuf, <8 x i64> zeroinitializer + ret <8 x i64> %res +} diff --git a/llvm/test/CodeGen/X86/avx512-shuffles/duplicate-high.ll b/llvm/test/CodeGen/X86/avx512-shuffles/duplicate-high.ll new file mode 100644 index 000000000000..d94db336274f --- /dev/null +++ b/llvm/test/CodeGen/X86/avx512-shuffles/duplicate-high.ll @@ -0,0 +1,789 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mcpu=skx %s -o - | FileCheck %s + +define <4 x float> @test_4xfloat_dup_high(<4 x float> %vec) { +; CHECK-LABEL: test_4xfloat_dup_high: +; CHECK: # BB#0: +; CHECK-NEXT: vmovshdup {{.*#+}} xmm0 = xmm0[1,1,3,3] +; CHECK-NEXT: retq + %res = shufflevector <4 x float> %vec, <4 x float> undef, <4 x i32> + ret <4 x float> %res +} +define <4 x float> @test_masked_4xfloat_dup_high_mask0(<4 x float> %vec, <4 x float> %vec2) { +; CHECK-LABEL: test_masked_4xfloat_dup_high_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: movb $8, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vmovshdup {{.*#+}} xmm1 {%k1} = xmm0[1,1,3,3] +; CHECK-NEXT: vmovaps %xmm1, %xmm0 +; CHECK-NEXT: retq + %shuf = shufflevector <4 x float> %vec, <4 x float> undef, <4 x i32> + %res = select <4 x i1> , <4 x float> %shuf, <4 x float> %vec2 + ret <4 x float> %res +} + +define <4 x float> @test_masked_z_4xfloat_dup_high_mask0(<4 x float> %vec) { +; CHECK-LABEL: test_masked_z_4xfloat_dup_high_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: movb $8, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vmovshdup {{.*#+}} xmm0 {%k1} {z} = xmm0[1,1,3,3] +; CHECK-NEXT: retq + %shuf = shufflevector <4 x float> %vec, <4 x float> undef, <4 x i32> + %res = select <4 x i1> , <4 x float> %shuf, <4 x float> zeroinitializer + ret <4 x float> %res +} +define <4 x float> @test_masked_4xfloat_dup_high_mask1(<4 x float> %vec, <4 x float> %vec2) { +; CHECK-LABEL: test_masked_4xfloat_dup_high_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: movb $13, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vmovshdup {{.*#+}} xmm1 {%k1} = xmm0[1,1,3,3] +; CHECK-NEXT: vmovaps %xmm1, %xmm0 +; CHECK-NEXT: retq + %shuf = shufflevector <4 x float> %vec, <4 x float> undef, <4 x i32> + %res = select <4 x i1> , <4 x float> %shuf, <4 x float> %vec2 + ret <4 x float> %res +} + +define <4 x float> @test_masked_z_4xfloat_dup_high_mask1(<4 x float> %vec) { +; CHECK-LABEL: test_masked_z_4xfloat_dup_high_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: movb $13, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vmovshdup {{.*#+}} xmm0 {%k1} {z} = xmm0[1,1,3,3] +; CHECK-NEXT: retq + %shuf = shufflevector <4 x float> %vec, <4 x float> undef, <4 x i32> + %res = select <4 x i1> , <4 x float> %shuf, <4 x float> zeroinitializer + ret <4 x float> %res +} +define <4 x float> @test_masked_4xfloat_dup_high_mask2(<4 x float> %vec, <4 x float> %vec2) { +; CHECK-LABEL: test_masked_4xfloat_dup_high_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: movb $2, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vmovshdup {{.*#+}} xmm1 {%k1} = xmm0[1,1,3,3] +; CHECK-NEXT: vmovaps %xmm1, %xmm0 +; CHECK-NEXT: retq + %shuf = shufflevector <4 x float> %vec, <4 x float> undef, <4 x i32> + %res = select <4 x i1> , <4 x float> %shuf, <4 x float> %vec2 + ret <4 x float> %res +} + +define <4 x float> @test_masked_z_4xfloat_dup_high_mask2(<4 x float> %vec) { +; CHECK-LABEL: test_masked_z_4xfloat_dup_high_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: movb $2, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vmovshdup {{.*#+}} xmm0 {%k1} {z} = xmm0[1,1,3,3] +; CHECK-NEXT: retq + %shuf = shufflevector <4 x float> %vec, <4 x float> undef, <4 x i32> + %res = select <4 x i1> , <4 x float> %shuf, <4 x float> zeroinitializer + ret <4 x float> %res +} +define <4 x float> @test_masked_4xfloat_dup_high_mask3(<4 x float> %vec, <4 x float> %vec2) { +; CHECK-LABEL: test_masked_4xfloat_dup_high_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: movb $7, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vmovshdup {{.*#+}} xmm1 {%k1} = xmm0[1,1,3,3] +; CHECK-NEXT: vmovaps %xmm1, %xmm0 +; CHECK-NEXT: retq + %shuf = shufflevector <4 x float> %vec, <4 x float> undef, <4 x i32> + %res = select <4 x i1> , <4 x float> %shuf, <4 x float> %vec2 + ret <4 x float> %res +} + +define <4 x float> @test_masked_z_4xfloat_dup_high_mask3(<4 x float> %vec) { +; CHECK-LABEL: test_masked_z_4xfloat_dup_high_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: movb $7, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vmovshdup {{.*#+}} xmm0 {%k1} {z} = xmm0[1,1,3,3] +; CHECK-NEXT: retq + %shuf = shufflevector <4 x float> %vec, <4 x float> undef, <4 x i32> + %res = select <4 x i1> , <4 x float> %shuf, <4 x float> zeroinitializer + ret <4 x float> %res +} +define <4 x float> @test_masked_4xfloat_dup_high_mask4(<4 x float> %vec, <4 x float> %vec2) { +; CHECK-LABEL: test_masked_4xfloat_dup_high_mask4: +; CHECK: # BB#0: +; CHECK-NEXT: movb $14, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vmovshdup {{.*#+}} xmm1 {%k1} = xmm0[1,1,3,3] +; CHECK-NEXT: vmovaps %xmm1, %xmm0 +; CHECK-NEXT: retq + %shuf = shufflevector <4 x float> %vec, <4 x float> undef, <4 x i32> + %res = select <4 x i1> , <4 x float> %shuf, <4 x float> %vec2 + ret <4 x float> %res +} + +define <4 x float> @test_masked_z_4xfloat_dup_high_mask4(<4 x float> %vec) { +; CHECK-LABEL: test_masked_z_4xfloat_dup_high_mask4: +; CHECK: # BB#0: +; CHECK-NEXT: movb $14, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vmovshdup {{.*#+}} xmm0 {%k1} {z} = xmm0[1,1,3,3] +; CHECK-NEXT: retq + %shuf = shufflevector <4 x float> %vec, <4 x float> undef, <4 x i32> + %res = select <4 x i1> , <4 x float> %shuf, <4 x float> zeroinitializer + ret <4 x float> %res +} +define <4 x float> @test_4xfloat_dup_high_mem(<4 x float>* %vp) { +; CHECK-LABEL: test_4xfloat_dup_high_mem: +; CHECK: # BB#0: +; CHECK-NEXT: vmovshdup {{.*#+}} xmm0 = mem[1,1,3,3] +; CHECK-NEXT: retq + %vec = load <4 x float>, <4 x float>* %vp + %res = shufflevector <4 x float> %vec, <4 x float> undef, <4 x i32> + ret <4 x float> %res +} +define <4 x float> @test_masked_4xfloat_dup_high_mem_mask0(<4 x float>* %vp, <4 x float> %vec2) { +; CHECK-LABEL: test_masked_4xfloat_dup_high_mem_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: movb $8, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vmovshdup {{.*#+}} xmm0 {%k1} = mem[1,1,3,3] +; CHECK-NEXT: retq + %vec = load <4 x float>, <4 x float>* %vp + %shuf = shufflevector <4 x float> %vec, <4 x float> undef, <4 x i32> + %res = select <4 x i1> , <4 x float> %shuf, <4 x float> %vec2 + ret <4 x float> %res +} + +define <4 x float> @test_masked_z_4xfloat_dup_high_mem_mask0(<4 x float>* %vp) { +; CHECK-LABEL: test_masked_z_4xfloat_dup_high_mem_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: movb $8, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vmovshdup {{.*#+}} xmm0 {%k1} {z} = mem[1,1,3,3] +; CHECK-NEXT: retq + %vec = load <4 x float>, <4 x float>* %vp + %shuf = shufflevector <4 x float> %vec, <4 x float> undef, <4 x i32> + %res = select <4 x i1> , <4 x float> %shuf, <4 x float> zeroinitializer + ret <4 x float> %res +} +define <4 x float> @test_masked_4xfloat_dup_high_mem_mask1(<4 x float>* %vp, <4 x float> %vec2) { +; CHECK-LABEL: test_masked_4xfloat_dup_high_mem_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: movb $11, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vmovshdup {{.*#+}} xmm0 {%k1} = mem[1,1,3,3] +; CHECK-NEXT: retq + %vec = load <4 x float>, <4 x float>* %vp + %shuf = shufflevector <4 x float> %vec, <4 x float> undef, <4 x i32> + %res = select <4 x i1> , <4 x float> %shuf, <4 x float> %vec2 + ret <4 x float> %res +} + +define <4 x float> @test_masked_z_4xfloat_dup_high_mem_mask1(<4 x float>* %vp) { +; CHECK-LABEL: test_masked_z_4xfloat_dup_high_mem_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: movb $11, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vmovshdup {{.*#+}} xmm0 {%k1} {z} = mem[1,1,3,3] +; CHECK-NEXT: retq + %vec = load <4 x float>, <4 x float>* %vp + %shuf = shufflevector <4 x float> %vec, <4 x float> undef, <4 x i32> + %res = select <4 x i1> , <4 x float> %shuf, <4 x float> zeroinitializer + ret <4 x float> %res +} +define <4 x float> @test_masked_4xfloat_dup_high_mem_mask2(<4 x float>* %vp, <4 x float> %vec2) { +; CHECK-LABEL: test_masked_4xfloat_dup_high_mem_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: movb $7, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vmovshdup {{.*#+}} xmm0 {%k1} = mem[1,1,3,3] +; CHECK-NEXT: retq + %vec = load <4 x float>, <4 x float>* %vp + %shuf = shufflevector <4 x float> %vec, <4 x float> undef, <4 x i32> + %res = select <4 x i1> , <4 x float> %shuf, <4 x float> %vec2 + ret <4 x float> %res +} + +define <4 x float> @test_masked_z_4xfloat_dup_high_mem_mask2(<4 x float>* %vp) { +; CHECK-LABEL: test_masked_z_4xfloat_dup_high_mem_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: movb $7, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vmovshdup {{.*#+}} xmm0 {%k1} {z} = mem[1,1,3,3] +; CHECK-NEXT: retq + %vec = load <4 x float>, <4 x float>* %vp + %shuf = shufflevector <4 x float> %vec, <4 x float> undef, <4 x i32> + %res = select <4 x i1> , <4 x float> %shuf, <4 x float> zeroinitializer + ret <4 x float> %res +} +define <4 x float> @test_masked_4xfloat_dup_high_mem_mask3(<4 x float>* %vp, <4 x float> %vec2) { +; CHECK-LABEL: test_masked_4xfloat_dup_high_mem_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: movb $13, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vmovshdup {{.*#+}} xmm0 {%k1} = mem[1,1,3,3] +; CHECK-NEXT: retq + %vec = load <4 x float>, <4 x float>* %vp + %shuf = shufflevector <4 x float> %vec, <4 x float> undef, <4 x i32> + %res = select <4 x i1> , <4 x float> %shuf, <4 x float> %vec2 + ret <4 x float> %res +} + +define <4 x float> @test_masked_z_4xfloat_dup_high_mem_mask3(<4 x float>* %vp) { +; CHECK-LABEL: test_masked_z_4xfloat_dup_high_mem_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: movb $13, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vmovshdup {{.*#+}} xmm0 {%k1} {z} = mem[1,1,3,3] +; CHECK-NEXT: retq + %vec = load <4 x float>, <4 x float>* %vp + %shuf = shufflevector <4 x float> %vec, <4 x float> undef, <4 x i32> + %res = select <4 x i1> , <4 x float> %shuf, <4 x float> zeroinitializer + ret <4 x float> %res +} +define <4 x float> @test_masked_4xfloat_dup_high_mem_mask4(<4 x float>* %vp, <4 x float> %vec2) { +; CHECK-LABEL: test_masked_4xfloat_dup_high_mem_mask4: +; CHECK: # BB#0: +; CHECK-NEXT: movb $12, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vmovshdup {{.*#+}} xmm0 {%k1} = mem[1,1,3,3] +; CHECK-NEXT: retq + %vec = load <4 x float>, <4 x float>* %vp + %shuf = shufflevector <4 x float> %vec, <4 x float> undef, <4 x i32> + %res = select <4 x i1> , <4 x float> %shuf, <4 x float> %vec2 + ret <4 x float> %res +} + +define <4 x float> @test_masked_z_4xfloat_dup_high_mem_mask4(<4 x float>* %vp) { +; CHECK-LABEL: test_masked_z_4xfloat_dup_high_mem_mask4: +; CHECK: # BB#0: +; CHECK-NEXT: movb $12, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vmovshdup {{.*#+}} xmm0 {%k1} {z} = mem[1,1,3,3] +; CHECK-NEXT: retq + %vec = load <4 x float>, <4 x float>* %vp + %shuf = shufflevector <4 x float> %vec, <4 x float> undef, <4 x i32> + %res = select <4 x i1> , <4 x float> %shuf, <4 x float> zeroinitializer + ret <4 x float> %res +} +define <8 x float> @test_8xfloat_dup_high(<8 x float> %vec) { +; CHECK-LABEL: test_8xfloat_dup_high: +; CHECK: # BB#0: +; CHECK-NEXT: vmovshdup {{.*#+}} ymm0 = ymm0[1,1,3,3,5,5,7,7] +; CHECK-NEXT: retq + %res = shufflevector <8 x float> %vec, <8 x float> undef, <8 x i32> + ret <8 x float> %res +} +define <8 x float> @test_masked_8xfloat_dup_high_mask0(<8 x float> %vec, <8 x float> %vec2) { +; CHECK-LABEL: test_masked_8xfloat_dup_high_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: movb $-106, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vmovshdup {{.*#+}} ymm1 {%k1} = ymm0[1,1,3,3,5,5,7,7] +; CHECK-NEXT: vmovaps %ymm1, %ymm0 +; CHECK-NEXT: retq + %shuf = shufflevector <8 x float> %vec, <8 x float> undef, <8 x i32> + %res = select <8 x i1> , <8 x float> %shuf, <8 x float> %vec2 + ret <8 x float> %res +} + +define <8 x float> @test_masked_z_8xfloat_dup_high_mask0(<8 x float> %vec) { +; CHECK-LABEL: test_masked_z_8xfloat_dup_high_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: movb $-106, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vmovshdup {{.*#+}} ymm0 {%k1} {z} = ymm0[1,1,3,3,5,5,7,7] +; CHECK-NEXT: retq + %shuf = shufflevector <8 x float> %vec, <8 x float> undef, <8 x i32> + %res = select <8 x i1> , <8 x float> %shuf, <8 x float> zeroinitializer + ret <8 x float> %res +} +define <8 x float> @test_masked_8xfloat_dup_high_mask1(<8 x float> %vec, <8 x float> %vec2) { +; CHECK-LABEL: test_masked_8xfloat_dup_high_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: movb $114, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vmovshdup {{.*#+}} ymm1 {%k1} = ymm0[1,1,3,3,5,5,7,7] +; CHECK-NEXT: vmovaps %ymm1, %ymm0 +; CHECK-NEXT: retq + %shuf = shufflevector <8 x float> %vec, <8 x float> undef, <8 x i32> + %res = select <8 x i1> , <8 x float> %shuf, <8 x float> %vec2 + ret <8 x float> %res +} + +define <8 x float> @test_masked_z_8xfloat_dup_high_mask1(<8 x float> %vec) { +; CHECK-LABEL: test_masked_z_8xfloat_dup_high_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: movb $114, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vmovshdup {{.*#+}} ymm0 {%k1} {z} = ymm0[1,1,3,3,5,5,7,7] +; CHECK-NEXT: retq + %shuf = shufflevector <8 x float> %vec, <8 x float> undef, <8 x i32> + %res = select <8 x i1> , <8 x float> %shuf, <8 x float> zeroinitializer + ret <8 x float> %res +} +define <8 x float> @test_masked_8xfloat_dup_high_mask2(<8 x float> %vec, <8 x float> %vec2) { +; CHECK-LABEL: test_masked_8xfloat_dup_high_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: movb $-104, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vmovshdup {{.*#+}} ymm1 {%k1} = ymm0[1,1,3,3,5,5,7,7] +; CHECK-NEXT: vmovaps %ymm1, %ymm0 +; CHECK-NEXT: retq + %shuf = shufflevector <8 x float> %vec, <8 x float> undef, <8 x i32> + %res = select <8 x i1> , <8 x float> %shuf, <8 x float> %vec2 + ret <8 x float> %res +} + +define <8 x float> @test_masked_z_8xfloat_dup_high_mask2(<8 x float> %vec) { +; CHECK-LABEL: test_masked_z_8xfloat_dup_high_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: movb $-104, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vmovshdup {{.*#+}} ymm0 {%k1} {z} = ymm0[1,1,3,3,5,5,7,7] +; CHECK-NEXT: retq + %shuf = shufflevector <8 x float> %vec, <8 x float> undef, <8 x i32> + %res = select <8 x i1> , <8 x float> %shuf, <8 x float> zeroinitializer + ret <8 x float> %res +} +define <8 x float> @test_masked_8xfloat_dup_high_mask3(<8 x float> %vec, <8 x float> %vec2) { +; CHECK-LABEL: test_masked_8xfloat_dup_high_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: movb $98, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vmovshdup {{.*#+}} ymm1 {%k1} = ymm0[1,1,3,3,5,5,7,7] +; CHECK-NEXT: vmovaps %ymm1, %ymm0 +; CHECK-NEXT: retq + %shuf = shufflevector <8 x float> %vec, <8 x float> undef, <8 x i32> + %res = select <8 x i1> , <8 x float> %shuf, <8 x float> %vec2 + ret <8 x float> %res +} + +define <8 x float> @test_masked_z_8xfloat_dup_high_mask3(<8 x float> %vec) { +; CHECK-LABEL: test_masked_z_8xfloat_dup_high_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: movb $98, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vmovshdup {{.*#+}} ymm0 {%k1} {z} = ymm0[1,1,3,3,5,5,7,7] +; CHECK-NEXT: retq + %shuf = shufflevector <8 x float> %vec, <8 x float> undef, <8 x i32> + %res = select <8 x i1> , <8 x float> %shuf, <8 x float> zeroinitializer + ret <8 x float> %res +} +define <8 x float> @test_masked_8xfloat_dup_high_mask4(<8 x float> %vec, <8 x float> %vec2) { +; CHECK-LABEL: test_masked_8xfloat_dup_high_mask4: +; CHECK: # BB#0: +; CHECK-NEXT: movb $-109, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vmovshdup {{.*#+}} ymm1 {%k1} = ymm0[1,1,3,3,5,5,7,7] +; CHECK-NEXT: vmovaps %ymm1, %ymm0 +; CHECK-NEXT: retq + %shuf = shufflevector <8 x float> %vec, <8 x float> undef, <8 x i32> + %res = select <8 x i1> , <8 x float> %shuf, <8 x float> %vec2 + ret <8 x float> %res +} + +define <8 x float> @test_masked_z_8xfloat_dup_high_mask4(<8 x float> %vec) { +; CHECK-LABEL: test_masked_z_8xfloat_dup_high_mask4: +; CHECK: # BB#0: +; CHECK-NEXT: movb $-109, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vmovshdup {{.*#+}} ymm0 {%k1} {z} = ymm0[1,1,3,3,5,5,7,7] +; CHECK-NEXT: retq + %shuf = shufflevector <8 x float> %vec, <8 x float> undef, <8 x i32> + %res = select <8 x i1> , <8 x float> %shuf, <8 x float> zeroinitializer + ret <8 x float> %res +} +define <8 x float> @test_8xfloat_dup_high_mem(<8 x float>* %vp) { +; CHECK-LABEL: test_8xfloat_dup_high_mem: +; CHECK: # BB#0: +; CHECK-NEXT: vmovshdup {{.*#+}} ymm0 = mem[1,1,3,3,5,5,7,7] +; CHECK-NEXT: retq + %vec = load <8 x float>, <8 x float>* %vp + %res = shufflevector <8 x float> %vec, <8 x float> undef, <8 x i32> + ret <8 x float> %res +} +define <8 x float> @test_masked_8xfloat_dup_high_mem_mask0(<8 x float>* %vp, <8 x float> %vec2) { +; CHECK-LABEL: test_masked_8xfloat_dup_high_mem_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: movb $74, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vmovshdup {{.*#+}} ymm0 {%k1} = mem[1,1,3,3,5,5,7,7] +; CHECK-NEXT: retq + %vec = load <8 x float>, <8 x float>* %vp + %shuf = shufflevector <8 x float> %vec, <8 x float> undef, <8 x i32> + %res = select <8 x i1> , <8 x float> %shuf, <8 x float> %vec2 + ret <8 x float> %res +} + +define <8 x float> @test_masked_z_8xfloat_dup_high_mem_mask0(<8 x float>* %vp) { +; CHECK-LABEL: test_masked_z_8xfloat_dup_high_mem_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: movb $74, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vmovshdup {{.*#+}} ymm0 {%k1} {z} = mem[1,1,3,3,5,5,7,7] +; CHECK-NEXT: retq + %vec = load <8 x float>, <8 x float>* %vp + %shuf = shufflevector <8 x float> %vec, <8 x float> undef, <8 x i32> + %res = select <8 x i1> , <8 x float> %shuf, <8 x float> zeroinitializer + ret <8 x float> %res +} +define <8 x float> @test_masked_8xfloat_dup_high_mem_mask1(<8 x float>* %vp, <8 x float> %vec2) { +; CHECK-LABEL: test_masked_8xfloat_dup_high_mem_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: movb $49, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vmovshdup {{.*#+}} ymm0 {%k1} = mem[1,1,3,3,5,5,7,7] +; CHECK-NEXT: retq + %vec = load <8 x float>, <8 x float>* %vp + %shuf = shufflevector <8 x float> %vec, <8 x float> undef, <8 x i32> + %res = select <8 x i1> , <8 x float> %shuf, <8 x float> %vec2 + ret <8 x float> %res +} + +define <8 x float> @test_masked_z_8xfloat_dup_high_mem_mask1(<8 x float>* %vp) { +; CHECK-LABEL: test_masked_z_8xfloat_dup_high_mem_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: movb $49, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vmovshdup {{.*#+}} ymm0 {%k1} {z} = mem[1,1,3,3,5,5,7,7] +; CHECK-NEXT: retq + %vec = load <8 x float>, <8 x float>* %vp + %shuf = shufflevector <8 x float> %vec, <8 x float> undef, <8 x i32> + %res = select <8 x i1> , <8 x float> %shuf, <8 x float> zeroinitializer + ret <8 x float> %res +} +define <8 x float> @test_masked_8xfloat_dup_high_mem_mask2(<8 x float>* %vp, <8 x float> %vec2) { +; CHECK-LABEL: test_masked_8xfloat_dup_high_mem_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: movb $48, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vmovshdup {{.*#+}} ymm0 {%k1} = mem[1,1,3,3,5,5,7,7] +; CHECK-NEXT: retq + %vec = load <8 x float>, <8 x float>* %vp + %shuf = shufflevector <8 x float> %vec, <8 x float> undef, <8 x i32> + %res = select <8 x i1> , <8 x float> %shuf, <8 x float> %vec2 + ret <8 x float> %res +} + +define <8 x float> @test_masked_z_8xfloat_dup_high_mem_mask2(<8 x float>* %vp) { +; CHECK-LABEL: test_masked_z_8xfloat_dup_high_mem_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: movb $48, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vmovshdup {{.*#+}} ymm0 {%k1} {z} = mem[1,1,3,3,5,5,7,7] +; CHECK-NEXT: retq + %vec = load <8 x float>, <8 x float>* %vp + %shuf = shufflevector <8 x float> %vec, <8 x float> undef, <8 x i32> + %res = select <8 x i1> , <8 x float> %shuf, <8 x float> zeroinitializer + ret <8 x float> %res +} +define <8 x float> @test_masked_8xfloat_dup_high_mem_mask3(<8 x float>* %vp, <8 x float> %vec2) { +; CHECK-LABEL: test_masked_8xfloat_dup_high_mem_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: movb $-100, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vmovshdup {{.*#+}} ymm0 {%k1} = mem[1,1,3,3,5,5,7,7] +; CHECK-NEXT: retq + %vec = load <8 x float>, <8 x float>* %vp + %shuf = shufflevector <8 x float> %vec, <8 x float> undef, <8 x i32> + %res = select <8 x i1> , <8 x float> %shuf, <8 x float> %vec2 + ret <8 x float> %res +} + +define <8 x float> @test_masked_z_8xfloat_dup_high_mem_mask3(<8 x float>* %vp) { +; CHECK-LABEL: test_masked_z_8xfloat_dup_high_mem_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: movb $-100, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vmovshdup {{.*#+}} ymm0 {%k1} {z} = mem[1,1,3,3,5,5,7,7] +; CHECK-NEXT: retq + %vec = load <8 x float>, <8 x float>* %vp + %shuf = shufflevector <8 x float> %vec, <8 x float> undef, <8 x i32> + %res = select <8 x i1> , <8 x float> %shuf, <8 x float> zeroinitializer + ret <8 x float> %res +} +define <8 x float> @test_masked_8xfloat_dup_high_mem_mask4(<8 x float>* %vp, <8 x float> %vec2) { +; CHECK-LABEL: test_masked_8xfloat_dup_high_mem_mask4: +; CHECK: # BB#0: +; CHECK-NEXT: movb $-89, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vmovshdup {{.*#+}} ymm0 {%k1} = mem[1,1,3,3,5,5,7,7] +; CHECK-NEXT: retq + %vec = load <8 x float>, <8 x float>* %vp + %shuf = shufflevector <8 x float> %vec, <8 x float> undef, <8 x i32> + %res = select <8 x i1> , <8 x float> %shuf, <8 x float> %vec2 + ret <8 x float> %res +} + +define <8 x float> @test_masked_z_8xfloat_dup_high_mem_mask4(<8 x float>* %vp) { +; CHECK-LABEL: test_masked_z_8xfloat_dup_high_mem_mask4: +; CHECK: # BB#0: +; CHECK-NEXT: movb $-89, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vmovshdup {{.*#+}} ymm0 {%k1} {z} = mem[1,1,3,3,5,5,7,7] +; CHECK-NEXT: retq + %vec = load <8 x float>, <8 x float>* %vp + %shuf = shufflevector <8 x float> %vec, <8 x float> undef, <8 x i32> + %res = select <8 x i1> , <8 x float> %shuf, <8 x float> zeroinitializer + ret <8 x float> %res +} +define <16 x float> @test_16xfloat_dup_high(<16 x float> %vec) { +; CHECK-LABEL: test_16xfloat_dup_high: +; CHECK: # BB#0: +; CHECK-NEXT: vmovshdup {{.*#+}} zmm0 = zmm0[1,1,3,3,5,5,7,7,9,9,11,11,13,13,15,15] +; CHECK-NEXT: retq + %res = shufflevector <16 x float> %vec, <16 x float> undef, <16 x i32> + ret <16 x float> %res +} +define <16 x float> @test_masked_16xfloat_dup_high_mask0(<16 x float> %vec, <16 x float> %vec2) { +; CHECK-LABEL: test_masked_16xfloat_dup_high_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: movw $31229, %ax # imm = 0x79FD +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vmovshdup {{.*#+}} zmm1 {%k1} = zmm0[1,1,3,3,5,5,7,7,9,9,11,11,13,13,15,15] +; CHECK-NEXT: vmovaps %zmm1, %zmm0 +; CHECK-NEXT: retq + %shuf = shufflevector <16 x float> %vec, <16 x float> undef, <16 x i32> + %res = select <16 x i1> , <16 x float> %shuf, <16 x float> %vec2 + ret <16 x float> %res +} + +define <16 x float> @test_masked_z_16xfloat_dup_high_mask0(<16 x float> %vec) { +; CHECK-LABEL: test_masked_z_16xfloat_dup_high_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: movw $31229, %ax # imm = 0x79FD +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vmovshdup {{.*#+}} zmm0 {%k1} {z} = zmm0[1,1,3,3,5,5,7,7,9,9,11,11,13,13,15,15] +; CHECK-NEXT: retq + %shuf = shufflevector <16 x float> %vec, <16 x float> undef, <16 x i32> + %res = select <16 x i1> , <16 x float> %shuf, <16 x float> zeroinitializer + ret <16 x float> %res +} +define <16 x float> @test_masked_16xfloat_dup_high_mask1(<16 x float> %vec, <16 x float> %vec2) { +; CHECK-LABEL: test_masked_16xfloat_dup_high_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: movw $5887, %ax # imm = 0x16FF +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vmovshdup {{.*#+}} zmm1 {%k1} = zmm0[1,1,3,3,5,5,7,7,9,9,11,11,13,13,15,15] +; CHECK-NEXT: vmovaps %zmm1, %zmm0 +; CHECK-NEXT: retq + %shuf = shufflevector <16 x float> %vec, <16 x float> undef, <16 x i32> + %res = select <16 x i1> , <16 x float> %shuf, <16 x float> %vec2 + ret <16 x float> %res +} + +define <16 x float> @test_masked_z_16xfloat_dup_high_mask1(<16 x float> %vec) { +; CHECK-LABEL: test_masked_z_16xfloat_dup_high_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: movw $5887, %ax # imm = 0x16FF +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vmovshdup {{.*#+}} zmm0 {%k1} {z} = zmm0[1,1,3,3,5,5,7,7,9,9,11,11,13,13,15,15] +; CHECK-NEXT: retq + %shuf = shufflevector <16 x float> %vec, <16 x float> undef, <16 x i32> + %res = select <16 x i1> , <16 x float> %shuf, <16 x float> zeroinitializer + ret <16 x float> %res +} +define <16 x float> @test_masked_16xfloat_dup_high_mask2(<16 x float> %vec, <16 x float> %vec2) { +; CHECK-LABEL: test_masked_16xfloat_dup_high_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: movw $-15887, %ax # imm = 0xC1F1 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vmovshdup {{.*#+}} zmm1 {%k1} = zmm0[1,1,3,3,5,5,7,7,9,9,11,11,13,13,15,15] +; CHECK-NEXT: vmovaps %zmm1, %zmm0 +; CHECK-NEXT: retq + %shuf = shufflevector <16 x float> %vec, <16 x float> undef, <16 x i32> + %res = select <16 x i1> , <16 x float> %shuf, <16 x float> %vec2 + ret <16 x float> %res +} + +define <16 x float> @test_masked_z_16xfloat_dup_high_mask2(<16 x float> %vec) { +; CHECK-LABEL: test_masked_z_16xfloat_dup_high_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: movw $-15887, %ax # imm = 0xC1F1 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vmovshdup {{.*#+}} zmm0 {%k1} {z} = zmm0[1,1,3,3,5,5,7,7,9,9,11,11,13,13,15,15] +; CHECK-NEXT: retq + %shuf = shufflevector <16 x float> %vec, <16 x float> undef, <16 x i32> + %res = select <16 x i1> , <16 x float> %shuf, <16 x float> zeroinitializer + ret <16 x float> %res +} +define <16 x float> @test_masked_16xfloat_dup_high_mask3(<16 x float> %vec, <16 x float> %vec2) { +; CHECK-LABEL: test_masked_16xfloat_dup_high_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: movw $-8077, %ax # imm = 0xE073 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vmovshdup {{.*#+}} zmm1 {%k1} = zmm0[1,1,3,3,5,5,7,7,9,9,11,11,13,13,15,15] +; CHECK-NEXT: vmovaps %zmm1, %zmm0 +; CHECK-NEXT: retq + %shuf = shufflevector <16 x float> %vec, <16 x float> undef, <16 x i32> + %res = select <16 x i1> , <16 x float> %shuf, <16 x float> %vec2 + ret <16 x float> %res +} + +define <16 x float> @test_masked_z_16xfloat_dup_high_mask3(<16 x float> %vec) { +; CHECK-LABEL: test_masked_z_16xfloat_dup_high_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: movw $-8077, %ax # imm = 0xE073 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vmovshdup {{.*#+}} zmm0 {%k1} {z} = zmm0[1,1,3,3,5,5,7,7,9,9,11,11,13,13,15,15] +; CHECK-NEXT: retq + %shuf = shufflevector <16 x float> %vec, <16 x float> undef, <16 x i32> + %res = select <16 x i1> , <16 x float> %shuf, <16 x float> zeroinitializer + ret <16 x float> %res +} +define <16 x float> @test_masked_16xfloat_dup_high_mask4(<16 x float> %vec, <16 x float> %vec2) { +; CHECK-LABEL: test_masked_16xfloat_dup_high_mask4: +; CHECK: # BB#0: +; CHECK-NEXT: movw $-5023, %ax # imm = 0xEC61 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vmovshdup {{.*#+}} zmm1 {%k1} = zmm0[1,1,3,3,5,5,7,7,9,9,11,11,13,13,15,15] +; CHECK-NEXT: vmovaps %zmm1, %zmm0 +; CHECK-NEXT: retq + %shuf = shufflevector <16 x float> %vec, <16 x float> undef, <16 x i32> + %res = select <16 x i1> , <16 x float> %shuf, <16 x float> %vec2 + ret <16 x float> %res +} + +define <16 x float> @test_masked_z_16xfloat_dup_high_mask4(<16 x float> %vec) { +; CHECK-LABEL: test_masked_z_16xfloat_dup_high_mask4: +; CHECK: # BB#0: +; CHECK-NEXT: movw $-5023, %ax # imm = 0xEC61 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vmovshdup {{.*#+}} zmm0 {%k1} {z} = zmm0[1,1,3,3,5,5,7,7,9,9,11,11,13,13,15,15] +; CHECK-NEXT: retq + %shuf = shufflevector <16 x float> %vec, <16 x float> undef, <16 x i32> + %res = select <16 x i1> , <16 x float> %shuf, <16 x float> zeroinitializer + ret <16 x float> %res +} +define <16 x float> @test_16xfloat_dup_high_mem(<16 x float>* %vp) { +; CHECK-LABEL: test_16xfloat_dup_high_mem: +; CHECK: # BB#0: +; CHECK-NEXT: vmovshdup {{.*#+}} zmm0 = mem[1,1,3,3,5,5,7,7,9,9,11,11,13,13,15,15] +; CHECK-NEXT: retq + %vec = load <16 x float>, <16 x float>* %vp + %res = shufflevector <16 x float> %vec, <16 x float> undef, <16 x i32> + ret <16 x float> %res +} +define <16 x float> @test_masked_16xfloat_dup_high_mem_mask0(<16 x float>* %vp, <16 x float> %vec2) { +; CHECK-LABEL: test_masked_16xfloat_dup_high_mem_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: movw $-10326, %ax # imm = 0xD7AA +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vmovshdup {{.*#+}} zmm0 {%k1} = mem[1,1,3,3,5,5,7,7,9,9,11,11,13,13,15,15] +; CHECK-NEXT: retq + %vec = load <16 x float>, <16 x float>* %vp + %shuf = shufflevector <16 x float> %vec, <16 x float> undef, <16 x i32> + %res = select <16 x i1> , <16 x float> %shuf, <16 x float> %vec2 + ret <16 x float> %res +} + +define <16 x float> @test_masked_z_16xfloat_dup_high_mem_mask0(<16 x float>* %vp) { +; CHECK-LABEL: test_masked_z_16xfloat_dup_high_mem_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: movw $-10326, %ax # imm = 0xD7AA +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vmovshdup {{.*#+}} zmm0 {%k1} {z} = mem[1,1,3,3,5,5,7,7,9,9,11,11,13,13,15,15] +; CHECK-NEXT: retq + %vec = load <16 x float>, <16 x float>* %vp + %shuf = shufflevector <16 x float> %vec, <16 x float> undef, <16 x i32> + %res = select <16 x i1> , <16 x float> %shuf, <16 x float> zeroinitializer + ret <16 x float> %res +} +define <16 x float> @test_masked_16xfloat_dup_high_mem_mask1(<16 x float>* %vp, <16 x float> %vec2) { +; CHECK-LABEL: test_masked_16xfloat_dup_high_mem_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: movw $-6675, %ax # imm = 0xE5ED +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vmovshdup {{.*#+}} zmm0 {%k1} = mem[1,1,3,3,5,5,7,7,9,9,11,11,13,13,15,15] +; CHECK-NEXT: retq + %vec = load <16 x float>, <16 x float>* %vp + %shuf = shufflevector <16 x float> %vec, <16 x float> undef, <16 x i32> + %res = select <16 x i1> , <16 x float> %shuf, <16 x float> %vec2 + ret <16 x float> %res +} + +define <16 x float> @test_masked_z_16xfloat_dup_high_mem_mask1(<16 x float>* %vp) { +; CHECK-LABEL: test_masked_z_16xfloat_dup_high_mem_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: movw $-6675, %ax # imm = 0xE5ED +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vmovshdup {{.*#+}} zmm0 {%k1} {z} = mem[1,1,3,3,5,5,7,7,9,9,11,11,13,13,15,15] +; CHECK-NEXT: retq + %vec = load <16 x float>, <16 x float>* %vp + %shuf = shufflevector <16 x float> %vec, <16 x float> undef, <16 x i32> + %res = select <16 x i1> , <16 x float> %shuf, <16 x float> zeroinitializer + ret <16 x float> %res +} +define <16 x float> @test_masked_16xfloat_dup_high_mem_mask2(<16 x float>* %vp, <16 x float> %vec2) { +; CHECK-LABEL: test_masked_16xfloat_dup_high_mem_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: movw $-5042, %ax # imm = 0xEC4E +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vmovshdup {{.*#+}} zmm0 {%k1} = mem[1,1,3,3,5,5,7,7,9,9,11,11,13,13,15,15] +; CHECK-NEXT: retq + %vec = load <16 x float>, <16 x float>* %vp + %shuf = shufflevector <16 x float> %vec, <16 x float> undef, <16 x i32> + %res = select <16 x i1> , <16 x float> %shuf, <16 x float> %vec2 + ret <16 x float> %res +} + +define <16 x float> @test_masked_z_16xfloat_dup_high_mem_mask2(<16 x float>* %vp) { +; CHECK-LABEL: test_masked_z_16xfloat_dup_high_mem_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: movw $-5042, %ax # imm = 0xEC4E +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vmovshdup {{.*#+}} zmm0 {%k1} {z} = mem[1,1,3,3,5,5,7,7,9,9,11,11,13,13,15,15] +; CHECK-NEXT: retq + %vec = load <16 x float>, <16 x float>* %vp + %shuf = shufflevector <16 x float> %vec, <16 x float> undef, <16 x i32> + %res = select <16 x i1> , <16 x float> %shuf, <16 x float> zeroinitializer + ret <16 x float> %res +} +define <16 x float> @test_masked_16xfloat_dup_high_mem_mask3(<16 x float>* %vp, <16 x float> %vec2) { +; CHECK-LABEL: test_masked_16xfloat_dup_high_mem_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: movw $-30108, %ax # imm = 0x8A64 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vmovshdup {{.*#+}} zmm0 {%k1} = mem[1,1,3,3,5,5,7,7,9,9,11,11,13,13,15,15] +; CHECK-NEXT: retq + %vec = load <16 x float>, <16 x float>* %vp + %shuf = shufflevector <16 x float> %vec, <16 x float> undef, <16 x i32> + %res = select <16 x i1> , <16 x float> %shuf, <16 x float> %vec2 + ret <16 x float> %res +} + +define <16 x float> @test_masked_z_16xfloat_dup_high_mem_mask3(<16 x float>* %vp) { +; CHECK-LABEL: test_masked_z_16xfloat_dup_high_mem_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: movw $-30108, %ax # imm = 0x8A64 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vmovshdup {{.*#+}} zmm0 {%k1} {z} = mem[1,1,3,3,5,5,7,7,9,9,11,11,13,13,15,15] +; CHECK-NEXT: retq + %vec = load <16 x float>, <16 x float>* %vp + %shuf = shufflevector <16 x float> %vec, <16 x float> undef, <16 x i32> + %res = select <16 x i1> , <16 x float> %shuf, <16 x float> zeroinitializer + ret <16 x float> %res +} +define <16 x float> @test_masked_16xfloat_dup_high_mem_mask4(<16 x float>* %vp, <16 x float> %vec2) { +; CHECK-LABEL: test_masked_16xfloat_dup_high_mem_mask4: +; CHECK: # BB#0: +; CHECK-NEXT: movw $25644, %ax # imm = 0x642C +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vmovshdup {{.*#+}} zmm0 {%k1} = mem[1,1,3,3,5,5,7,7,9,9,11,11,13,13,15,15] +; CHECK-NEXT: retq + %vec = load <16 x float>, <16 x float>* %vp + %shuf = shufflevector <16 x float> %vec, <16 x float> undef, <16 x i32> + %res = select <16 x i1> , <16 x float> %shuf, <16 x float> %vec2 + ret <16 x float> %res +} + +define <16 x float> @test_masked_z_16xfloat_dup_high_mem_mask4(<16 x float>* %vp) { +; CHECK-LABEL: test_masked_z_16xfloat_dup_high_mem_mask4: +; CHECK: # BB#0: +; CHECK-NEXT: movw $25644, %ax # imm = 0x642C +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vmovshdup {{.*#+}} zmm0 {%k1} {z} = mem[1,1,3,3,5,5,7,7,9,9,11,11,13,13,15,15] +; CHECK-NEXT: retq + %vec = load <16 x float>, <16 x float>* %vp + %shuf = shufflevector <16 x float> %vec, <16 x float> undef, <16 x i32> + %res = select <16 x i1> , <16 x float> %shuf, <16 x float> zeroinitializer + ret <16 x float> %res +} diff --git a/llvm/test/CodeGen/X86/avx512-shuffles/duplicate-low.ll b/llvm/test/CodeGen/X86/avx512-shuffles/duplicate-low.ll new file mode 100644 index 000000000000..8da48ea5ada7 --- /dev/null +++ b/llvm/test/CodeGen/X86/avx512-shuffles/duplicate-low.ll @@ -0,0 +1,1428 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mcpu=skx %s -o - | FileCheck %s + +define <2 x double> @test_2xdouble_dup_low(<2 x double> %vec) { +; CHECK-LABEL: test_2xdouble_dup_low: +; CHECK: # BB#0: +; CHECK-NEXT: vmovddup {{.*#+}} xmm0 = xmm0[0,0] +; CHECK-NEXT: retq + %res = shufflevector <2 x double> %vec, <2 x double> undef, <2 x i32> + ret <2 x double> %res +} +define <2 x double> @test_masked_2xdouble_dup_low_mask0(<2 x double> %vec, <2 x double> %vec2) { +; CHECK-LABEL: test_masked_2xdouble_dup_low_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: movb $2, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vmovddup {{.*#+}} xmm1 {%k1} = xmm0[0,0] +; CHECK-NEXT: vmovapd %xmm1, %xmm0 +; CHECK-NEXT: retq + %shuf = shufflevector <2 x double> %vec, <2 x double> undef, <2 x i32> + %res = select <2 x i1> , <2 x double> %shuf, <2 x double> %vec2 + ret <2 x double> %res +} + +define <2 x double> @test_masked_z_2xdouble_dup_low_mask0(<2 x double> %vec) { +; CHECK-LABEL: test_masked_z_2xdouble_dup_low_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: movb $2, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vmovddup {{.*#+}} xmm0 {%k1} {z} = xmm0[0,0] +; CHECK-NEXT: retq + %shuf = shufflevector <2 x double> %vec, <2 x double> undef, <2 x i32> + %res = select <2 x i1> , <2 x double> %shuf, <2 x double> zeroinitializer + ret <2 x double> %res +} +define <2 x double> @test_masked_2xdouble_dup_low_mask1(<2 x double> %vec, <2 x double> %vec2) { +; CHECK-LABEL: test_masked_2xdouble_dup_low_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: movb $1, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vmovddup {{.*#+}} xmm1 {%k1} = xmm0[0,0] +; CHECK-NEXT: vmovapd %xmm1, %xmm0 +; CHECK-NEXT: retq + %shuf = shufflevector <2 x double> %vec, <2 x double> undef, <2 x i32> + %res = select <2 x i1> , <2 x double> %shuf, <2 x double> %vec2 + ret <2 x double> %res +} + +define <2 x double> @test_masked_z_2xdouble_dup_low_mask1(<2 x double> %vec) { +; CHECK-LABEL: test_masked_z_2xdouble_dup_low_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: movb $1, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vmovddup {{.*#+}} xmm0 {%k1} {z} = xmm0[0,0] +; CHECK-NEXT: retq + %shuf = shufflevector <2 x double> %vec, <2 x double> undef, <2 x i32> + %res = select <2 x i1> , <2 x double> %shuf, <2 x double> zeroinitializer + ret <2 x double> %res +} +define <2 x double> @test_2xdouble_dup_low_mem(<2 x double>* %vp) { +; CHECK-LABEL: test_2xdouble_dup_low_mem: +; CHECK: # BB#0: +; CHECK-NEXT: vmovddup {{.*#+}} xmm0 = mem[0,0] +; CHECK-NEXT: retq + %vec = load <2 x double>, <2 x double>* %vp + %res = shufflevector <2 x double> %vec, <2 x double> undef, <2 x i32> + ret <2 x double> %res +} +define <2 x double> @test_masked_2xdouble_dup_low_mem_mask0(<2 x double>* %vp, <2 x double> %vec2) { +; CHECK-LABEL: test_masked_2xdouble_dup_low_mem_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: movb $1, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vmovddup {{.*#+}} xmm0 {%k1} = mem[0,0] +; CHECK-NEXT: retq + %vec = load <2 x double>, <2 x double>* %vp + %shuf = shufflevector <2 x double> %vec, <2 x double> undef, <2 x i32> + %res = select <2 x i1> , <2 x double> %shuf, <2 x double> %vec2 + ret <2 x double> %res +} + +define <2 x double> @test_masked_z_2xdouble_dup_low_mem_mask0(<2 x double>* %vp) { +; CHECK-LABEL: test_masked_z_2xdouble_dup_low_mem_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: movb $1, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vmovddup {{.*#+}} xmm0 {%k1} {z} = mem[0,0] +; CHECK-NEXT: retq + %vec = load <2 x double>, <2 x double>* %vp + %shuf = shufflevector <2 x double> %vec, <2 x double> undef, <2 x i32> + %res = select <2 x i1> , <2 x double> %shuf, <2 x double> zeroinitializer + ret <2 x double> %res +} +define <2 x double> @test_masked_2xdouble_dup_low_mem_mask1(<2 x double>* %vp, <2 x double> %vec2) { +; CHECK-LABEL: test_masked_2xdouble_dup_low_mem_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: movb $2, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vmovddup {{.*#+}} xmm0 {%k1} = mem[0,0] +; CHECK-NEXT: retq + %vec = load <2 x double>, <2 x double>* %vp + %shuf = shufflevector <2 x double> %vec, <2 x double> undef, <2 x i32> + %res = select <2 x i1> , <2 x double> %shuf, <2 x double> %vec2 + ret <2 x double> %res +} + +define <2 x double> @test_masked_z_2xdouble_dup_low_mem_mask1(<2 x double>* %vp) { +; CHECK-LABEL: test_masked_z_2xdouble_dup_low_mem_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: movb $2, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vmovddup {{.*#+}} xmm0 {%k1} {z} = mem[0,0] +; CHECK-NEXT: retq + %vec = load <2 x double>, <2 x double>* %vp + %shuf = shufflevector <2 x double> %vec, <2 x double> undef, <2 x i32> + %res = select <2 x i1> , <2 x double> %shuf, <2 x double> zeroinitializer + ret <2 x double> %res +} +define <4 x double> @test_4xdouble_dup_low(<4 x double> %vec) { +; CHECK-LABEL: test_4xdouble_dup_low: +; CHECK: # BB#0: +; CHECK-NEXT: vmovddup {{.*#+}} ymm0 = ymm0[0,0,2,2] +; CHECK-NEXT: retq + %res = shufflevector <4 x double> %vec, <4 x double> undef, <4 x i32> + ret <4 x double> %res +} +define <4 x double> @test_masked_4xdouble_dup_low_mask0(<4 x double> %vec, <4 x double> %vec2) { +; CHECK-LABEL: test_masked_4xdouble_dup_low_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: movb $8, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vmovddup {{.*#+}} ymm1 {%k1} = ymm0[0,0,2,2] +; CHECK-NEXT: vmovapd %ymm1, %ymm0 +; CHECK-NEXT: retq + %shuf = shufflevector <4 x double> %vec, <4 x double> undef, <4 x i32> + %res = select <4 x i1> , <4 x double> %shuf, <4 x double> %vec2 + ret <4 x double> %res +} + +define <4 x double> @test_masked_z_4xdouble_dup_low_mask0(<4 x double> %vec) { +; CHECK-LABEL: test_masked_z_4xdouble_dup_low_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: movb $8, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vmovddup {{.*#+}} ymm0 {%k1} {z} = ymm0[0,0,2,2] +; CHECK-NEXT: retq + %shuf = shufflevector <4 x double> %vec, <4 x double> undef, <4 x i32> + %res = select <4 x i1> , <4 x double> %shuf, <4 x double> zeroinitializer + ret <4 x double> %res +} +define <4 x double> @test_masked_4xdouble_dup_low_mask1(<4 x double> %vec, <4 x double> %vec2) { +; CHECK-LABEL: test_masked_4xdouble_dup_low_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: movb $6, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vmovddup {{.*#+}} ymm1 {%k1} = ymm0[0,0,2,2] +; CHECK-NEXT: vmovapd %ymm1, %ymm0 +; CHECK-NEXT: retq + %shuf = shufflevector <4 x double> %vec, <4 x double> undef, <4 x i32> + %res = select <4 x i1> , <4 x double> %shuf, <4 x double> %vec2 + ret <4 x double> %res +} + +define <4 x double> @test_masked_z_4xdouble_dup_low_mask1(<4 x double> %vec) { +; CHECK-LABEL: test_masked_z_4xdouble_dup_low_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: movb $6, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vmovddup {{.*#+}} ymm0 {%k1} {z} = ymm0[0,0,2,2] +; CHECK-NEXT: retq + %shuf = shufflevector <4 x double> %vec, <4 x double> undef, <4 x i32> + %res = select <4 x i1> , <4 x double> %shuf, <4 x double> zeroinitializer + ret <4 x double> %res +} +define <4 x double> @test_masked_4xdouble_dup_low_mask2(<4 x double> %vec, <4 x double> %vec2) { +; CHECK-LABEL: test_masked_4xdouble_dup_low_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: movb $10, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vmovddup {{.*#+}} ymm1 {%k1} = ymm0[0,0,2,2] +; CHECK-NEXT: vmovapd %ymm1, %ymm0 +; CHECK-NEXT: retq + %shuf = shufflevector <4 x double> %vec, <4 x double> undef, <4 x i32> + %res = select <4 x i1> , <4 x double> %shuf, <4 x double> %vec2 + ret <4 x double> %res +} + +define <4 x double> @test_masked_z_4xdouble_dup_low_mask2(<4 x double> %vec) { +; CHECK-LABEL: test_masked_z_4xdouble_dup_low_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: movb $10, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vmovddup {{.*#+}} ymm0 {%k1} {z} = ymm0[0,0,2,2] +; CHECK-NEXT: retq + %shuf = shufflevector <4 x double> %vec, <4 x double> undef, <4 x i32> + %res = select <4 x i1> , <4 x double> %shuf, <4 x double> zeroinitializer + ret <4 x double> %res +} +define <4 x double> @test_masked_4xdouble_dup_low_mask3(<4 x double> %vec, <4 x double> %vec2) { +; CHECK-LABEL: test_masked_4xdouble_dup_low_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: movb $4, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vmovddup {{.*#+}} ymm1 {%k1} = ymm0[0,0,2,2] +; CHECK-NEXT: vmovapd %ymm1, %ymm0 +; CHECK-NEXT: retq + %shuf = shufflevector <4 x double> %vec, <4 x double> undef, <4 x i32> + %res = select <4 x i1> , <4 x double> %shuf, <4 x double> %vec2 + ret <4 x double> %res +} + +define <4 x double> @test_masked_z_4xdouble_dup_low_mask3(<4 x double> %vec) { +; CHECK-LABEL: test_masked_z_4xdouble_dup_low_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: movb $4, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vmovddup {{.*#+}} ymm0 {%k1} {z} = ymm0[0,0,2,2] +; CHECK-NEXT: retq + %shuf = shufflevector <4 x double> %vec, <4 x double> undef, <4 x i32> + %res = select <4 x i1> , <4 x double> %shuf, <4 x double> zeroinitializer + ret <4 x double> %res +} +define <4 x double> @test_masked_4xdouble_dup_low_mask4(<4 x double> %vec, <4 x double> %vec2) { +; CHECK-LABEL: test_masked_4xdouble_dup_low_mask4: +; CHECK: # BB#0: +; CHECK-NEXT: movb $5, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vmovddup {{.*#+}} ymm1 {%k1} = ymm0[0,0,2,2] +; CHECK-NEXT: vmovapd %ymm1, %ymm0 +; CHECK-NEXT: retq + %shuf = shufflevector <4 x double> %vec, <4 x double> undef, <4 x i32> + %res = select <4 x i1> , <4 x double> %shuf, <4 x double> %vec2 + ret <4 x double> %res +} + +define <4 x double> @test_masked_z_4xdouble_dup_low_mask4(<4 x double> %vec) { +; CHECK-LABEL: test_masked_z_4xdouble_dup_low_mask4: +; CHECK: # BB#0: +; CHECK-NEXT: movb $5, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vmovddup {{.*#+}} ymm0 {%k1} {z} = ymm0[0,0,2,2] +; CHECK-NEXT: retq + %shuf = shufflevector <4 x double> %vec, <4 x double> undef, <4 x i32> + %res = select <4 x i1> , <4 x double> %shuf, <4 x double> zeroinitializer + ret <4 x double> %res +} +define <4 x double> @test_4xdouble_dup_low_mem(<4 x double>* %vp) { +; CHECK-LABEL: test_4xdouble_dup_low_mem: +; CHECK: # BB#0: +; CHECK-NEXT: vmovddup {{.*#+}} ymm0 = mem[0,0,2,2] +; CHECK-NEXT: retq + %vec = load <4 x double>, <4 x double>* %vp + %res = shufflevector <4 x double> %vec, <4 x double> undef, <4 x i32> + ret <4 x double> %res +} +define <4 x double> @test_masked_4xdouble_dup_low_mem_mask0(<4 x double>* %vp, <4 x double> %vec2) { +; CHECK-LABEL: test_masked_4xdouble_dup_low_mem_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: movb $9, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vmovddup {{.*#+}} ymm0 {%k1} = mem[0,0,2,2] +; CHECK-NEXT: retq + %vec = load <4 x double>, <4 x double>* %vp + %shuf = shufflevector <4 x double> %vec, <4 x double> undef, <4 x i32> + %res = select <4 x i1> , <4 x double> %shuf, <4 x double> %vec2 + ret <4 x double> %res +} + +define <4 x double> @test_masked_z_4xdouble_dup_low_mem_mask0(<4 x double>* %vp) { +; CHECK-LABEL: test_masked_z_4xdouble_dup_low_mem_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: movb $9, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vmovddup {{.*#+}} ymm0 {%k1} {z} = mem[0,0,2,2] +; CHECK-NEXT: retq + %vec = load <4 x double>, <4 x double>* %vp + %shuf = shufflevector <4 x double> %vec, <4 x double> undef, <4 x i32> + %res = select <4 x i1> , <4 x double> %shuf, <4 x double> zeroinitializer + ret <4 x double> %res +} +define <4 x double> @test_masked_4xdouble_dup_low_mem_mask1(<4 x double>* %vp, <4 x double> %vec2) { +; CHECK-LABEL: test_masked_4xdouble_dup_low_mem_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: movb $12, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vmovddup {{.*#+}} ymm0 {%k1} = mem[0,0,2,2] +; CHECK-NEXT: retq + %vec = load <4 x double>, <4 x double>* %vp + %shuf = shufflevector <4 x double> %vec, <4 x double> undef, <4 x i32> + %res = select <4 x i1> , <4 x double> %shuf, <4 x double> %vec2 + ret <4 x double> %res +} + +define <4 x double> @test_masked_z_4xdouble_dup_low_mem_mask1(<4 x double>* %vp) { +; CHECK-LABEL: test_masked_z_4xdouble_dup_low_mem_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: movb $12, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vmovddup {{.*#+}} ymm0 {%k1} {z} = mem[0,0,2,2] +; CHECK-NEXT: retq + %vec = load <4 x double>, <4 x double>* %vp + %shuf = shufflevector <4 x double> %vec, <4 x double> undef, <4 x i32> + %res = select <4 x i1> , <4 x double> %shuf, <4 x double> zeroinitializer + ret <4 x double> %res +} +define <4 x double> @test_masked_4xdouble_dup_low_mem_mask2(<4 x double>* %vp, <4 x double> %vec2) { +; CHECK-LABEL: test_masked_4xdouble_dup_low_mem_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: movb $7, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vmovddup {{.*#+}} ymm0 {%k1} = mem[0,0,2,2] +; CHECK-NEXT: retq + %vec = load <4 x double>, <4 x double>* %vp + %shuf = shufflevector <4 x double> %vec, <4 x double> undef, <4 x i32> + %res = select <4 x i1> , <4 x double> %shuf, <4 x double> %vec2 + ret <4 x double> %res +} + +define <4 x double> @test_masked_z_4xdouble_dup_low_mem_mask2(<4 x double>* %vp) { +; CHECK-LABEL: test_masked_z_4xdouble_dup_low_mem_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: movb $7, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vmovddup {{.*#+}} ymm0 {%k1} {z} = mem[0,0,2,2] +; CHECK-NEXT: retq + %vec = load <4 x double>, <4 x double>* %vp + %shuf = shufflevector <4 x double> %vec, <4 x double> undef, <4 x i32> + %res = select <4 x i1> , <4 x double> %shuf, <4 x double> zeroinitializer + ret <4 x double> %res +} +define <4 x double> @test_masked_4xdouble_dup_low_mem_mask3(<4 x double>* %vp, <4 x double> %vec2) { +; CHECK-LABEL: test_masked_4xdouble_dup_low_mem_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: movb $4, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vmovddup {{.*#+}} ymm0 {%k1} = mem[0,0,2,2] +; CHECK-NEXT: retq + %vec = load <4 x double>, <4 x double>* %vp + %shuf = shufflevector <4 x double> %vec, <4 x double> undef, <4 x i32> + %res = select <4 x i1> , <4 x double> %shuf, <4 x double> %vec2 + ret <4 x double> %res +} + +define <4 x double> @test_masked_z_4xdouble_dup_low_mem_mask3(<4 x double>* %vp) { +; CHECK-LABEL: test_masked_z_4xdouble_dup_low_mem_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: movb $4, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vmovddup {{.*#+}} ymm0 {%k1} {z} = mem[0,0,2,2] +; CHECK-NEXT: retq + %vec = load <4 x double>, <4 x double>* %vp + %shuf = shufflevector <4 x double> %vec, <4 x double> undef, <4 x i32> + %res = select <4 x i1> , <4 x double> %shuf, <4 x double> zeroinitializer + ret <4 x double> %res +} +define <4 x double> @test_masked_4xdouble_dup_low_mem_mask4(<4 x double>* %vp, <4 x double> %vec2) { +; CHECK-LABEL: test_masked_4xdouble_dup_low_mem_mask4: +; CHECK: # BB#0: +; CHECK-NEXT: movb $8, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vmovddup {{.*#+}} ymm0 {%k1} = mem[0,0,2,2] +; CHECK-NEXT: retq + %vec = load <4 x double>, <4 x double>* %vp + %shuf = shufflevector <4 x double> %vec, <4 x double> undef, <4 x i32> + %res = select <4 x i1> , <4 x double> %shuf, <4 x double> %vec2 + ret <4 x double> %res +} + +define <4 x double> @test_masked_z_4xdouble_dup_low_mem_mask4(<4 x double>* %vp) { +; CHECK-LABEL: test_masked_z_4xdouble_dup_low_mem_mask4: +; CHECK: # BB#0: +; CHECK-NEXT: movb $8, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vmovddup {{.*#+}} ymm0 {%k1} {z} = mem[0,0,2,2] +; CHECK-NEXT: retq + %vec = load <4 x double>, <4 x double>* %vp + %shuf = shufflevector <4 x double> %vec, <4 x double> undef, <4 x i32> + %res = select <4 x i1> , <4 x double> %shuf, <4 x double> zeroinitializer + ret <4 x double> %res +} +define <8 x double> @test_8xdouble_dup_low(<8 x double> %vec) { +; CHECK-LABEL: test_8xdouble_dup_low: +; CHECK: # BB#0: +; CHECK-NEXT: vmovddup {{.*#+}} zmm0 = zmm0[0,0,2,2,4,4,6,6] +; CHECK-NEXT: retq + %res = shufflevector <8 x double> %vec, <8 x double> undef, <8 x i32> + ret <8 x double> %res +} +define <8 x double> @test_masked_8xdouble_dup_low_mask0(<8 x double> %vec, <8 x double> %vec2) { +; CHECK-LABEL: test_masked_8xdouble_dup_low_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: movb $-98, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vmovddup {{.*#+}} zmm1 {%k1} = zmm0[0,0,2,2,4,4,6,6] +; CHECK-NEXT: vmovapd %zmm1, %zmm0 +; CHECK-NEXT: retq + %shuf = shufflevector <8 x double> %vec, <8 x double> undef, <8 x i32> + %res = select <8 x i1> , <8 x double> %shuf, <8 x double> %vec2 + ret <8 x double> %res +} + +define <8 x double> @test_masked_z_8xdouble_dup_low_mask0(<8 x double> %vec) { +; CHECK-LABEL: test_masked_z_8xdouble_dup_low_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: movb $-98, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vmovddup {{.*#+}} zmm0 {%k1} {z} = zmm0[0,0,2,2,4,4,6,6] +; CHECK-NEXT: retq + %shuf = shufflevector <8 x double> %vec, <8 x double> undef, <8 x i32> + %res = select <8 x i1> , <8 x double> %shuf, <8 x double> zeroinitializer + ret <8 x double> %res +} +define <8 x double> @test_masked_8xdouble_dup_low_mask1(<8 x double> %vec, <8 x double> %vec2) { +; CHECK-LABEL: test_masked_8xdouble_dup_low_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: movb $64, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vmovddup {{.*#+}} zmm1 {%k1} = zmm0[0,0,2,2,4,4,6,6] +; CHECK-NEXT: vmovapd %zmm1, %zmm0 +; CHECK-NEXT: retq + %shuf = shufflevector <8 x double> %vec, <8 x double> undef, <8 x i32> + %res = select <8 x i1> , <8 x double> %shuf, <8 x double> %vec2 + ret <8 x double> %res +} + +define <8 x double> @test_masked_z_8xdouble_dup_low_mask1(<8 x double> %vec) { +; CHECK-LABEL: test_masked_z_8xdouble_dup_low_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: movb $64, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vmovddup {{.*#+}} zmm0 {%k1} {z} = zmm0[0,0,2,2,4,4,6,6] +; CHECK-NEXT: retq + %shuf = shufflevector <8 x double> %vec, <8 x double> undef, <8 x i32> + %res = select <8 x i1> , <8 x double> %shuf, <8 x double> zeroinitializer + ret <8 x double> %res +} +define <8 x double> @test_masked_8xdouble_dup_low_mask2(<8 x double> %vec, <8 x double> %vec2) { +; CHECK-LABEL: test_masked_8xdouble_dup_low_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: movb $-24, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vmovddup {{.*#+}} zmm1 {%k1} = zmm0[0,0,2,2,4,4,6,6] +; CHECK-NEXT: vmovapd %zmm1, %zmm0 +; CHECK-NEXT: retq + %shuf = shufflevector <8 x double> %vec, <8 x double> undef, <8 x i32> + %res = select <8 x i1> , <8 x double> %shuf, <8 x double> %vec2 + ret <8 x double> %res +} + +define <8 x double> @test_masked_z_8xdouble_dup_low_mask2(<8 x double> %vec) { +; CHECK-LABEL: test_masked_z_8xdouble_dup_low_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: movb $-24, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vmovddup {{.*#+}} zmm0 {%k1} {z} = zmm0[0,0,2,2,4,4,6,6] +; CHECK-NEXT: retq + %shuf = shufflevector <8 x double> %vec, <8 x double> undef, <8 x i32> + %res = select <8 x i1> , <8 x double> %shuf, <8 x double> zeroinitializer + ret <8 x double> %res +} +define <8 x double> @test_masked_8xdouble_dup_low_mask3(<8 x double> %vec, <8 x double> %vec2) { +; CHECK-LABEL: test_masked_8xdouble_dup_low_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: movb $-6, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vmovddup {{.*#+}} zmm1 {%k1} = zmm0[0,0,2,2,4,4,6,6] +; CHECK-NEXT: vmovapd %zmm1, %zmm0 +; CHECK-NEXT: retq + %shuf = shufflevector <8 x double> %vec, <8 x double> undef, <8 x i32> + %res = select <8 x i1> , <8 x double> %shuf, <8 x double> %vec2 + ret <8 x double> %res +} + +define <8 x double> @test_masked_z_8xdouble_dup_low_mask3(<8 x double> %vec) { +; CHECK-LABEL: test_masked_z_8xdouble_dup_low_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: movb $-6, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vmovddup {{.*#+}} zmm0 {%k1} {z} = zmm0[0,0,2,2,4,4,6,6] +; CHECK-NEXT: retq + %shuf = shufflevector <8 x double> %vec, <8 x double> undef, <8 x i32> + %res = select <8 x i1> , <8 x double> %shuf, <8 x double> zeroinitializer + ret <8 x double> %res +} +define <8 x double> @test_masked_8xdouble_dup_low_mask4(<8 x double> %vec, <8 x double> %vec2) { +; CHECK-LABEL: test_masked_8xdouble_dup_low_mask4: +; CHECK: # BB#0: +; CHECK-NEXT: movb $-50, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vmovddup {{.*#+}} zmm1 {%k1} = zmm0[0,0,2,2,4,4,6,6] +; CHECK-NEXT: vmovapd %zmm1, %zmm0 +; CHECK-NEXT: retq + %shuf = shufflevector <8 x double> %vec, <8 x double> undef, <8 x i32> + %res = select <8 x i1> , <8 x double> %shuf, <8 x double> %vec2 + ret <8 x double> %res +} + +define <8 x double> @test_masked_z_8xdouble_dup_low_mask4(<8 x double> %vec) { +; CHECK-LABEL: test_masked_z_8xdouble_dup_low_mask4: +; CHECK: # BB#0: +; CHECK-NEXT: movb $-50, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vmovddup {{.*#+}} zmm0 {%k1} {z} = zmm0[0,0,2,2,4,4,6,6] +; CHECK-NEXT: retq + %shuf = shufflevector <8 x double> %vec, <8 x double> undef, <8 x i32> + %res = select <8 x i1> , <8 x double> %shuf, <8 x double> zeroinitializer + ret <8 x double> %res +} +define <8 x double> @test_8xdouble_dup_low_mem(<8 x double>* %vp) { +; CHECK-LABEL: test_8xdouble_dup_low_mem: +; CHECK: # BB#0: +; CHECK-NEXT: vmovddup {{.*#+}} zmm0 = mem[0,0,2,2,4,4,6,6] +; CHECK-NEXT: retq + %vec = load <8 x double>, <8 x double>* %vp + %res = shufflevector <8 x double> %vec, <8 x double> undef, <8 x i32> + ret <8 x double> %res +} +define <8 x double> @test_masked_8xdouble_dup_low_mem_mask0(<8 x double>* %vp, <8 x double> %vec2) { +; CHECK-LABEL: test_masked_8xdouble_dup_low_mem_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: movb $-26, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vmovddup {{.*#+}} zmm0 {%k1} = mem[0,0,2,2,4,4,6,6] +; CHECK-NEXT: retq + %vec = load <8 x double>, <8 x double>* %vp + %shuf = shufflevector <8 x double> %vec, <8 x double> undef, <8 x i32> + %res = select <8 x i1> , <8 x double> %shuf, <8 x double> %vec2 + ret <8 x double> %res +} + +define <8 x double> @test_masked_z_8xdouble_dup_low_mem_mask0(<8 x double>* %vp) { +; CHECK-LABEL: test_masked_z_8xdouble_dup_low_mem_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: movb $-26, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vmovddup {{.*#+}} zmm0 {%k1} {z} = mem[0,0,2,2,4,4,6,6] +; CHECK-NEXT: retq + %vec = load <8 x double>, <8 x double>* %vp + %shuf = shufflevector <8 x double> %vec, <8 x double> undef, <8 x i32> + %res = select <8 x i1> , <8 x double> %shuf, <8 x double> zeroinitializer + ret <8 x double> %res +} +define <8 x double> @test_masked_8xdouble_dup_low_mem_mask1(<8 x double>* %vp, <8 x double> %vec2) { +; CHECK-LABEL: test_masked_8xdouble_dup_low_mem_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: movb $79, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vmovddup {{.*#+}} zmm0 {%k1} = mem[0,0,2,2,4,4,6,6] +; CHECK-NEXT: retq + %vec = load <8 x double>, <8 x double>* %vp + %shuf = shufflevector <8 x double> %vec, <8 x double> undef, <8 x i32> + %res = select <8 x i1> , <8 x double> %shuf, <8 x double> %vec2 + ret <8 x double> %res +} + +define <8 x double> @test_masked_z_8xdouble_dup_low_mem_mask1(<8 x double>* %vp) { +; CHECK-LABEL: test_masked_z_8xdouble_dup_low_mem_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: movb $79, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vmovddup {{.*#+}} zmm0 {%k1} {z} = mem[0,0,2,2,4,4,6,6] +; CHECK-NEXT: retq + %vec = load <8 x double>, <8 x double>* %vp + %shuf = shufflevector <8 x double> %vec, <8 x double> undef, <8 x i32> + %res = select <8 x i1> , <8 x double> %shuf, <8 x double> zeroinitializer + ret <8 x double> %res +} +define <8 x double> @test_masked_8xdouble_dup_low_mem_mask2(<8 x double>* %vp, <8 x double> %vec2) { +; CHECK-LABEL: test_masked_8xdouble_dup_low_mem_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: movb $-70, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vmovddup {{.*#+}} zmm0 {%k1} = mem[0,0,2,2,4,4,6,6] +; CHECK-NEXT: retq + %vec = load <8 x double>, <8 x double>* %vp + %shuf = shufflevector <8 x double> %vec, <8 x double> undef, <8 x i32> + %res = select <8 x i1> , <8 x double> %shuf, <8 x double> %vec2 + ret <8 x double> %res +} + +define <8 x double> @test_masked_z_8xdouble_dup_low_mem_mask2(<8 x double>* %vp) { +; CHECK-LABEL: test_masked_z_8xdouble_dup_low_mem_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: movb $-70, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vmovddup {{.*#+}} zmm0 {%k1} {z} = mem[0,0,2,2,4,4,6,6] +; CHECK-NEXT: retq + %vec = load <8 x double>, <8 x double>* %vp + %shuf = shufflevector <8 x double> %vec, <8 x double> undef, <8 x i32> + %res = select <8 x i1> , <8 x double> %shuf, <8 x double> zeroinitializer + ret <8 x double> %res +} +define <8 x double> @test_masked_8xdouble_dup_low_mem_mask3(<8 x double>* %vp, <8 x double> %vec2) { +; CHECK-LABEL: test_masked_8xdouble_dup_low_mem_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: movb $-27, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vmovddup {{.*#+}} zmm0 {%k1} = mem[0,0,2,2,4,4,6,6] +; CHECK-NEXT: retq + %vec = load <8 x double>, <8 x double>* %vp + %shuf = shufflevector <8 x double> %vec, <8 x double> undef, <8 x i32> + %res = select <8 x i1> , <8 x double> %shuf, <8 x double> %vec2 + ret <8 x double> %res +} + +define <8 x double> @test_masked_z_8xdouble_dup_low_mem_mask3(<8 x double>* %vp) { +; CHECK-LABEL: test_masked_z_8xdouble_dup_low_mem_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: movb $-27, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vmovddup {{.*#+}} zmm0 {%k1} {z} = mem[0,0,2,2,4,4,6,6] +; CHECK-NEXT: retq + %vec = load <8 x double>, <8 x double>* %vp + %shuf = shufflevector <8 x double> %vec, <8 x double> undef, <8 x i32> + %res = select <8 x i1> , <8 x double> %shuf, <8 x double> zeroinitializer + ret <8 x double> %res +} +define <8 x double> @test_masked_8xdouble_dup_low_mem_mask4(<8 x double>* %vp, <8 x double> %vec2) { +; CHECK-LABEL: test_masked_8xdouble_dup_low_mem_mask4: +; CHECK: # BB#0: +; CHECK-NEXT: movb $-82, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vmovddup {{.*#+}} zmm0 {%k1} = mem[0,0,2,2,4,4,6,6] +; CHECK-NEXT: retq + %vec = load <8 x double>, <8 x double>* %vp + %shuf = shufflevector <8 x double> %vec, <8 x double> undef, <8 x i32> + %res = select <8 x i1> , <8 x double> %shuf, <8 x double> %vec2 + ret <8 x double> %res +} + +define <8 x double> @test_masked_z_8xdouble_dup_low_mem_mask4(<8 x double>* %vp) { +; CHECK-LABEL: test_masked_z_8xdouble_dup_low_mem_mask4: +; CHECK: # BB#0: +; CHECK-NEXT: movb $-82, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vmovddup {{.*#+}} zmm0 {%k1} {z} = mem[0,0,2,2,4,4,6,6] +; CHECK-NEXT: retq + %vec = load <8 x double>, <8 x double>* %vp + %shuf = shufflevector <8 x double> %vec, <8 x double> undef, <8 x i32> + %res = select <8 x i1> , <8 x double> %shuf, <8 x double> zeroinitializer + ret <8 x double> %res +} +define <4 x float> @test_4xfloat_dup_low(<4 x float> %vec) { +; CHECK-LABEL: test_4xfloat_dup_low: +; CHECK: # BB#0: +; CHECK-NEXT: vmovsldup {{.*#+}} xmm0 = xmm0[0,0,2,2] +; CHECK-NEXT: retq + %res = shufflevector <4 x float> %vec, <4 x float> undef, <4 x i32> + ret <4 x float> %res +} +define <4 x float> @test_masked_4xfloat_dup_low_mask0(<4 x float> %vec, <4 x float> %vec2) { +; CHECK-LABEL: test_masked_4xfloat_dup_low_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: movb $7, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vmovsldup {{.*#+}} xmm1 {%k1} = xmm0[0,0,2,2] +; CHECK-NEXT: vmovaps %xmm1, %xmm0 +; CHECK-NEXT: retq + %shuf = shufflevector <4 x float> %vec, <4 x float> undef, <4 x i32> + %res = select <4 x i1> , <4 x float> %shuf, <4 x float> %vec2 + ret <4 x float> %res +} + +define <4 x float> @test_masked_z_4xfloat_dup_low_mask0(<4 x float> %vec) { +; CHECK-LABEL: test_masked_z_4xfloat_dup_low_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: movb $7, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vmovsldup {{.*#+}} xmm0 {%k1} {z} = xmm0[0,0,2,2] +; CHECK-NEXT: retq + %shuf = shufflevector <4 x float> %vec, <4 x float> undef, <4 x i32> + %res = select <4 x i1> , <4 x float> %shuf, <4 x float> zeroinitializer + ret <4 x float> %res +} +define <4 x float> @test_masked_4xfloat_dup_low_mask1(<4 x float> %vec, <4 x float> %vec2) { +; CHECK-LABEL: test_masked_4xfloat_dup_low_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: movb $2, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vmovsldup {{.*#+}} xmm1 {%k1} = xmm0[0,0,2,2] +; CHECK-NEXT: vmovaps %xmm1, %xmm0 +; CHECK-NEXT: retq + %shuf = shufflevector <4 x float> %vec, <4 x float> undef, <4 x i32> + %res = select <4 x i1> , <4 x float> %shuf, <4 x float> %vec2 + ret <4 x float> %res +} + +define <4 x float> @test_masked_z_4xfloat_dup_low_mask1(<4 x float> %vec) { +; CHECK-LABEL: test_masked_z_4xfloat_dup_low_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: movb $2, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vmovsldup {{.*#+}} xmm0 {%k1} {z} = xmm0[0,0,2,2] +; CHECK-NEXT: retq + %shuf = shufflevector <4 x float> %vec, <4 x float> undef, <4 x i32> + %res = select <4 x i1> , <4 x float> %shuf, <4 x float> zeroinitializer + ret <4 x float> %res +} +define <4 x float> @test_masked_4xfloat_dup_low_mask2(<4 x float> %vec, <4 x float> %vec2) { +; CHECK-LABEL: test_masked_4xfloat_dup_low_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: movb $6, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vmovsldup {{.*#+}} xmm1 {%k1} = xmm0[0,0,2,2] +; CHECK-NEXT: vmovaps %xmm1, %xmm0 +; CHECK-NEXT: retq + %shuf = shufflevector <4 x float> %vec, <4 x float> undef, <4 x i32> + %res = select <4 x i1> , <4 x float> %shuf, <4 x float> %vec2 + ret <4 x float> %res +} + +define <4 x float> @test_masked_z_4xfloat_dup_low_mask2(<4 x float> %vec) { +; CHECK-LABEL: test_masked_z_4xfloat_dup_low_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: movb $6, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vmovsldup {{.*#+}} xmm0 {%k1} {z} = xmm0[0,0,2,2] +; CHECK-NEXT: retq + %shuf = shufflevector <4 x float> %vec, <4 x float> undef, <4 x i32> + %res = select <4 x i1> , <4 x float> %shuf, <4 x float> zeroinitializer + ret <4 x float> %res +} +define <4 x float> @test_masked_4xfloat_dup_low_mask3(<4 x float> %vec, <4 x float> %vec2) { +; CHECK-LABEL: test_masked_4xfloat_dup_low_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: movb $14, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vmovsldup {{.*#+}} xmm1 {%k1} = xmm0[0,0,2,2] +; CHECK-NEXT: vmovaps %xmm1, %xmm0 +; CHECK-NEXT: retq + %shuf = shufflevector <4 x float> %vec, <4 x float> undef, <4 x i32> + %res = select <4 x i1> , <4 x float> %shuf, <4 x float> %vec2 + ret <4 x float> %res +} + +define <4 x float> @test_masked_z_4xfloat_dup_low_mask3(<4 x float> %vec) { +; CHECK-LABEL: test_masked_z_4xfloat_dup_low_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: movb $14, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vmovsldup {{.*#+}} xmm0 {%k1} {z} = xmm0[0,0,2,2] +; CHECK-NEXT: retq + %shuf = shufflevector <4 x float> %vec, <4 x float> undef, <4 x i32> + %res = select <4 x i1> , <4 x float> %shuf, <4 x float> zeroinitializer + ret <4 x float> %res +} +define <4 x float> @test_masked_4xfloat_dup_low_mask4(<4 x float> %vec, <4 x float> %vec2) { +; CHECK-LABEL: test_masked_4xfloat_dup_low_mask4: +; CHECK: # BB#0: +; CHECK-NEXT: movb $10, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vmovsldup {{.*#+}} xmm1 {%k1} = xmm0[0,0,2,2] +; CHECK-NEXT: vmovaps %xmm1, %xmm0 +; CHECK-NEXT: retq + %shuf = shufflevector <4 x float> %vec, <4 x float> undef, <4 x i32> + %res = select <4 x i1> , <4 x float> %shuf, <4 x float> %vec2 + ret <4 x float> %res +} + +define <4 x float> @test_masked_z_4xfloat_dup_low_mask4(<4 x float> %vec) { +; CHECK-LABEL: test_masked_z_4xfloat_dup_low_mask4: +; CHECK: # BB#0: +; CHECK-NEXT: movb $10, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vmovsldup {{.*#+}} xmm0 {%k1} {z} = xmm0[0,0,2,2] +; CHECK-NEXT: retq + %shuf = shufflevector <4 x float> %vec, <4 x float> undef, <4 x i32> + %res = select <4 x i1> , <4 x float> %shuf, <4 x float> zeroinitializer + ret <4 x float> %res +} +define <4 x float> @test_4xfloat_dup_low_mem(<4 x float>* %vp) { +; CHECK-LABEL: test_4xfloat_dup_low_mem: +; CHECK: # BB#0: +; CHECK-NEXT: vmovsldup {{.*#+}} xmm0 = mem[0,0,2,2] +; CHECK-NEXT: retq + %vec = load <4 x float>, <4 x float>* %vp + %res = shufflevector <4 x float> %vec, <4 x float> undef, <4 x i32> + ret <4 x float> %res +} +define <4 x float> @test_masked_4xfloat_dup_low_mem_mask0(<4 x float>* %vp, <4 x float> %vec2) { +; CHECK-LABEL: test_masked_4xfloat_dup_low_mem_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: movb $14, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vmovsldup {{.*#+}} xmm0 {%k1} = mem[0,0,2,2] +; CHECK-NEXT: retq + %vec = load <4 x float>, <4 x float>* %vp + %shuf = shufflevector <4 x float> %vec, <4 x float> undef, <4 x i32> + %res = select <4 x i1> , <4 x float> %shuf, <4 x float> %vec2 + ret <4 x float> %res +} + +define <4 x float> @test_masked_z_4xfloat_dup_low_mem_mask0(<4 x float>* %vp) { +; CHECK-LABEL: test_masked_z_4xfloat_dup_low_mem_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: movb $14, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vmovsldup {{.*#+}} xmm0 {%k1} {z} = mem[0,0,2,2] +; CHECK-NEXT: retq + %vec = load <4 x float>, <4 x float>* %vp + %shuf = shufflevector <4 x float> %vec, <4 x float> undef, <4 x i32> + %res = select <4 x i1> , <4 x float> %shuf, <4 x float> zeroinitializer + ret <4 x float> %res +} +define <4 x float> @test_masked_4xfloat_dup_low_mem_mask1(<4 x float>* %vp, <4 x float> %vec2) { +; CHECK-LABEL: test_masked_4xfloat_dup_low_mem_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: movb $7, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vmovsldup {{.*#+}} xmm0 {%k1} = mem[0,0,2,2] +; CHECK-NEXT: retq + %vec = load <4 x float>, <4 x float>* %vp + %shuf = shufflevector <4 x float> %vec, <4 x float> undef, <4 x i32> + %res = select <4 x i1> , <4 x float> %shuf, <4 x float> %vec2 + ret <4 x float> %res +} + +define <4 x float> @test_masked_z_4xfloat_dup_low_mem_mask1(<4 x float>* %vp) { +; CHECK-LABEL: test_masked_z_4xfloat_dup_low_mem_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: movb $7, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vmovsldup {{.*#+}} xmm0 {%k1} {z} = mem[0,0,2,2] +; CHECK-NEXT: retq + %vec = load <4 x float>, <4 x float>* %vp + %shuf = shufflevector <4 x float> %vec, <4 x float> undef, <4 x i32> + %res = select <4 x i1> , <4 x float> %shuf, <4 x float> zeroinitializer + ret <4 x float> %res +} +define <4 x float> @test_masked_4xfloat_dup_low_mem_mask2(<4 x float>* %vp, <4 x float> %vec2) { +; CHECK-LABEL: test_masked_4xfloat_dup_low_mem_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: movb $11, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vmovsldup {{.*#+}} xmm0 {%k1} = mem[0,0,2,2] +; CHECK-NEXT: retq + %vec = load <4 x float>, <4 x float>* %vp + %shuf = shufflevector <4 x float> %vec, <4 x float> undef, <4 x i32> + %res = select <4 x i1> , <4 x float> %shuf, <4 x float> %vec2 + ret <4 x float> %res +} + +define <4 x float> @test_masked_z_4xfloat_dup_low_mem_mask2(<4 x float>* %vp) { +; CHECK-LABEL: test_masked_z_4xfloat_dup_low_mem_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: movb $11, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vmovsldup {{.*#+}} xmm0 {%k1} {z} = mem[0,0,2,2] +; CHECK-NEXT: retq + %vec = load <4 x float>, <4 x float>* %vp + %shuf = shufflevector <4 x float> %vec, <4 x float> undef, <4 x i32> + %res = select <4 x i1> , <4 x float> %shuf, <4 x float> zeroinitializer + ret <4 x float> %res +} +define <4 x float> @test_masked_4xfloat_dup_low_mem_mask3(<4 x float>* %vp, <4 x float> %vec2) { +; CHECK-LABEL: test_masked_4xfloat_dup_low_mem_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: movb $3, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vmovsldup {{.*#+}} xmm0 {%k1} = mem[0,0,2,2] +; CHECK-NEXT: retq + %vec = load <4 x float>, <4 x float>* %vp + %shuf = shufflevector <4 x float> %vec, <4 x float> undef, <4 x i32> + %res = select <4 x i1> , <4 x float> %shuf, <4 x float> %vec2 + ret <4 x float> %res +} + +define <4 x float> @test_masked_z_4xfloat_dup_low_mem_mask3(<4 x float>* %vp) { +; CHECK-LABEL: test_masked_z_4xfloat_dup_low_mem_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: movb $3, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vmovsldup {{.*#+}} xmm0 {%k1} {z} = mem[0,0,2,2] +; CHECK-NEXT: retq + %vec = load <4 x float>, <4 x float>* %vp + %shuf = shufflevector <4 x float> %vec, <4 x float> undef, <4 x i32> + %res = select <4 x i1> , <4 x float> %shuf, <4 x float> zeroinitializer + ret <4 x float> %res +} +define <4 x float> @test_masked_4xfloat_dup_low_mem_mask4(<4 x float>* %vp, <4 x float> %vec2) { +; CHECK-LABEL: test_masked_4xfloat_dup_low_mem_mask4: +; CHECK: # BB#0: +; CHECK-NEXT: movb $9, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vmovsldup {{.*#+}} xmm0 {%k1} = mem[0,0,2,2] +; CHECK-NEXT: retq + %vec = load <4 x float>, <4 x float>* %vp + %shuf = shufflevector <4 x float> %vec, <4 x float> undef, <4 x i32> + %res = select <4 x i1> , <4 x float> %shuf, <4 x float> %vec2 + ret <4 x float> %res +} + +define <4 x float> @test_masked_z_4xfloat_dup_low_mem_mask4(<4 x float>* %vp) { +; CHECK-LABEL: test_masked_z_4xfloat_dup_low_mem_mask4: +; CHECK: # BB#0: +; CHECK-NEXT: movb $9, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vmovsldup {{.*#+}} xmm0 {%k1} {z} = mem[0,0,2,2] +; CHECK-NEXT: retq + %vec = load <4 x float>, <4 x float>* %vp + %shuf = shufflevector <4 x float> %vec, <4 x float> undef, <4 x i32> + %res = select <4 x i1> , <4 x float> %shuf, <4 x float> zeroinitializer + ret <4 x float> %res +} +define <8 x float> @test_8xfloat_dup_low(<8 x float> %vec) { +; CHECK-LABEL: test_8xfloat_dup_low: +; CHECK: # BB#0: +; CHECK-NEXT: vmovsldup {{.*#+}} ymm0 = ymm0[0,0,2,2,4,4,6,6] +; CHECK-NEXT: retq + %res = shufflevector <8 x float> %vec, <8 x float> undef, <8 x i32> + ret <8 x float> %res +} +define <8 x float> @test_masked_8xfloat_dup_low_mask0(<8 x float> %vec, <8 x float> %vec2) { +; CHECK-LABEL: test_masked_8xfloat_dup_low_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: movb $-116, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vmovsldup {{.*#+}} ymm1 {%k1} = ymm0[0,0,2,2,4,4,6,6] +; CHECK-NEXT: vmovaps %ymm1, %ymm0 +; CHECK-NEXT: retq + %shuf = shufflevector <8 x float> %vec, <8 x float> undef, <8 x i32> + %res = select <8 x i1> , <8 x float> %shuf, <8 x float> %vec2 + ret <8 x float> %res +} + +define <8 x float> @test_masked_z_8xfloat_dup_low_mask0(<8 x float> %vec) { +; CHECK-LABEL: test_masked_z_8xfloat_dup_low_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: movb $-116, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vmovsldup {{.*#+}} ymm0 {%k1} {z} = ymm0[0,0,2,2,4,4,6,6] +; CHECK-NEXT: retq + %shuf = shufflevector <8 x float> %vec, <8 x float> undef, <8 x i32> + %res = select <8 x i1> , <8 x float> %shuf, <8 x float> zeroinitializer + ret <8 x float> %res +} +define <8 x float> @test_masked_8xfloat_dup_low_mask1(<8 x float> %vec, <8 x float> %vec2) { +; CHECK-LABEL: test_masked_8xfloat_dup_low_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: movb $4, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vmovsldup {{.*#+}} ymm1 {%k1} = ymm0[0,0,2,2,4,4,6,6] +; CHECK-NEXT: vmovaps %ymm1, %ymm0 +; CHECK-NEXT: retq + %shuf = shufflevector <8 x float> %vec, <8 x float> undef, <8 x i32> + %res = select <8 x i1> , <8 x float> %shuf, <8 x float> %vec2 + ret <8 x float> %res +} + +define <8 x float> @test_masked_z_8xfloat_dup_low_mask1(<8 x float> %vec) { +; CHECK-LABEL: test_masked_z_8xfloat_dup_low_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: movb $4, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vmovsldup {{.*#+}} ymm0 {%k1} {z} = ymm0[0,0,2,2,4,4,6,6] +; CHECK-NEXT: retq + %shuf = shufflevector <8 x float> %vec, <8 x float> undef, <8 x i32> + %res = select <8 x i1> , <8 x float> %shuf, <8 x float> zeroinitializer + ret <8 x float> %res +} +define <8 x float> @test_masked_8xfloat_dup_low_mask2(<8 x float> %vec, <8 x float> %vec2) { +; CHECK-LABEL: test_masked_8xfloat_dup_low_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: movb $-73, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vmovsldup {{.*#+}} ymm1 {%k1} = ymm0[0,0,2,2,4,4,6,6] +; CHECK-NEXT: vmovaps %ymm1, %ymm0 +; CHECK-NEXT: retq + %shuf = shufflevector <8 x float> %vec, <8 x float> undef, <8 x i32> + %res = select <8 x i1> , <8 x float> %shuf, <8 x float> %vec2 + ret <8 x float> %res +} + +define <8 x float> @test_masked_z_8xfloat_dup_low_mask2(<8 x float> %vec) { +; CHECK-LABEL: test_masked_z_8xfloat_dup_low_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: movb $-73, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vmovsldup {{.*#+}} ymm0 {%k1} {z} = ymm0[0,0,2,2,4,4,6,6] +; CHECK-NEXT: retq + %shuf = shufflevector <8 x float> %vec, <8 x float> undef, <8 x i32> + %res = select <8 x i1> , <8 x float> %shuf, <8 x float> zeroinitializer + ret <8 x float> %res +} +define <8 x float> @test_masked_8xfloat_dup_low_mask3(<8 x float> %vec, <8 x float> %vec2) { +; CHECK-LABEL: test_masked_8xfloat_dup_low_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: movb $102, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vmovsldup {{.*#+}} ymm1 {%k1} = ymm0[0,0,2,2,4,4,6,6] +; CHECK-NEXT: vmovaps %ymm1, %ymm0 +; CHECK-NEXT: retq + %shuf = shufflevector <8 x float> %vec, <8 x float> undef, <8 x i32> + %res = select <8 x i1> , <8 x float> %shuf, <8 x float> %vec2 + ret <8 x float> %res +} + +define <8 x float> @test_masked_z_8xfloat_dup_low_mask3(<8 x float> %vec) { +; CHECK-LABEL: test_masked_z_8xfloat_dup_low_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: movb $102, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vmovsldup {{.*#+}} ymm0 {%k1} {z} = ymm0[0,0,2,2,4,4,6,6] +; CHECK-NEXT: retq + %shuf = shufflevector <8 x float> %vec, <8 x float> undef, <8 x i32> + %res = select <8 x i1> , <8 x float> %shuf, <8 x float> zeroinitializer + ret <8 x float> %res +} +define <8 x float> @test_masked_8xfloat_dup_low_mask4(<8 x float> %vec, <8 x float> %vec2) { +; CHECK-LABEL: test_masked_8xfloat_dup_low_mask4: +; CHECK: # BB#0: +; CHECK-NEXT: movb $-46, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vmovsldup {{.*#+}} ymm1 {%k1} = ymm0[0,0,2,2,4,4,6,6] +; CHECK-NEXT: vmovaps %ymm1, %ymm0 +; CHECK-NEXT: retq + %shuf = shufflevector <8 x float> %vec, <8 x float> undef, <8 x i32> + %res = select <8 x i1> , <8 x float> %shuf, <8 x float> %vec2 + ret <8 x float> %res +} + +define <8 x float> @test_masked_z_8xfloat_dup_low_mask4(<8 x float> %vec) { +; CHECK-LABEL: test_masked_z_8xfloat_dup_low_mask4: +; CHECK: # BB#0: +; CHECK-NEXT: movb $-46, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vmovsldup {{.*#+}} ymm0 {%k1} {z} = ymm0[0,0,2,2,4,4,6,6] +; CHECK-NEXT: retq + %shuf = shufflevector <8 x float> %vec, <8 x float> undef, <8 x i32> + %res = select <8 x i1> , <8 x float> %shuf, <8 x float> zeroinitializer + ret <8 x float> %res +} +define <8 x float> @test_8xfloat_dup_low_mem(<8 x float>* %vp) { +; CHECK-LABEL: test_8xfloat_dup_low_mem: +; CHECK: # BB#0: +; CHECK-NEXT: vmovsldup {{.*#+}} ymm0 = mem[0,0,2,2,4,4,6,6] +; CHECK-NEXT: retq + %vec = load <8 x float>, <8 x float>* %vp + %res = shufflevector <8 x float> %vec, <8 x float> undef, <8 x i32> + ret <8 x float> %res +} +define <8 x float> @test_masked_8xfloat_dup_low_mem_mask0(<8 x float>* %vp, <8 x float> %vec2) { +; CHECK-LABEL: test_masked_8xfloat_dup_low_mem_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: movb $-86, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vmovsldup {{.*#+}} ymm0 {%k1} = mem[0,0,2,2,4,4,6,6] +; CHECK-NEXT: retq + %vec = load <8 x float>, <8 x float>* %vp + %shuf = shufflevector <8 x float> %vec, <8 x float> undef, <8 x i32> + %res = select <8 x i1> , <8 x float> %shuf, <8 x float> %vec2 + ret <8 x float> %res +} + +define <8 x float> @test_masked_z_8xfloat_dup_low_mem_mask0(<8 x float>* %vp) { +; CHECK-LABEL: test_masked_z_8xfloat_dup_low_mem_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: movb $-86, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vmovsldup {{.*#+}} ymm0 {%k1} {z} = mem[0,0,2,2,4,4,6,6] +; CHECK-NEXT: retq + %vec = load <8 x float>, <8 x float>* %vp + %shuf = shufflevector <8 x float> %vec, <8 x float> undef, <8 x i32> + %res = select <8 x i1> , <8 x float> %shuf, <8 x float> zeroinitializer + ret <8 x float> %res +} +define <8 x float> @test_masked_8xfloat_dup_low_mem_mask1(<8 x float>* %vp, <8 x float> %vec2) { +; CHECK-LABEL: test_masked_8xfloat_dup_low_mem_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: movb $1, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vmovsldup {{.*#+}} ymm0 {%k1} = mem[0,0,2,2,4,4,6,6] +; CHECK-NEXT: retq + %vec = load <8 x float>, <8 x float>* %vp + %shuf = shufflevector <8 x float> %vec, <8 x float> undef, <8 x i32> + %res = select <8 x i1> , <8 x float> %shuf, <8 x float> %vec2 + ret <8 x float> %res +} + +define <8 x float> @test_masked_z_8xfloat_dup_low_mem_mask1(<8 x float>* %vp) { +; CHECK-LABEL: test_masked_z_8xfloat_dup_low_mem_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: movb $1, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vmovsldup {{.*#+}} ymm0 {%k1} {z} = mem[0,0,2,2,4,4,6,6] +; CHECK-NEXT: retq + %vec = load <8 x float>, <8 x float>* %vp + %shuf = shufflevector <8 x float> %vec, <8 x float> undef, <8 x i32> + %res = select <8 x i1> , <8 x float> %shuf, <8 x float> zeroinitializer + ret <8 x float> %res +} +define <8 x float> @test_masked_8xfloat_dup_low_mem_mask2(<8 x float>* %vp, <8 x float> %vec2) { +; CHECK-LABEL: test_masked_8xfloat_dup_low_mem_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: movb $126, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vmovsldup {{.*#+}} ymm0 {%k1} = mem[0,0,2,2,4,4,6,6] +; CHECK-NEXT: retq + %vec = load <8 x float>, <8 x float>* %vp + %shuf = shufflevector <8 x float> %vec, <8 x float> undef, <8 x i32> + %res = select <8 x i1> , <8 x float> %shuf, <8 x float> %vec2 + ret <8 x float> %res +} + +define <8 x float> @test_masked_z_8xfloat_dup_low_mem_mask2(<8 x float>* %vp) { +; CHECK-LABEL: test_masked_z_8xfloat_dup_low_mem_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: movb $126, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vmovsldup {{.*#+}} ymm0 {%k1} {z} = mem[0,0,2,2,4,4,6,6] +; CHECK-NEXT: retq + %vec = load <8 x float>, <8 x float>* %vp + %shuf = shufflevector <8 x float> %vec, <8 x float> undef, <8 x i32> + %res = select <8 x i1> , <8 x float> %shuf, <8 x float> zeroinitializer + ret <8 x float> %res +} +define <8 x float> @test_masked_8xfloat_dup_low_mem_mask3(<8 x float>* %vp, <8 x float> %vec2) { +; CHECK-LABEL: test_masked_8xfloat_dup_low_mem_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: movb $-35, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vmovsldup {{.*#+}} ymm0 {%k1} = mem[0,0,2,2,4,4,6,6] +; CHECK-NEXT: retq + %vec = load <8 x float>, <8 x float>* %vp + %shuf = shufflevector <8 x float> %vec, <8 x float> undef, <8 x i32> + %res = select <8 x i1> , <8 x float> %shuf, <8 x float> %vec2 + ret <8 x float> %res +} + +define <8 x float> @test_masked_z_8xfloat_dup_low_mem_mask3(<8 x float>* %vp) { +; CHECK-LABEL: test_masked_z_8xfloat_dup_low_mem_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: movb $-35, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vmovsldup {{.*#+}} ymm0 {%k1} {z} = mem[0,0,2,2,4,4,6,6] +; CHECK-NEXT: retq + %vec = load <8 x float>, <8 x float>* %vp + %shuf = shufflevector <8 x float> %vec, <8 x float> undef, <8 x i32> + %res = select <8 x i1> , <8 x float> %shuf, <8 x float> zeroinitializer + ret <8 x float> %res +} +define <8 x float> @test_masked_8xfloat_dup_low_mem_mask4(<8 x float>* %vp, <8 x float> %vec2) { +; CHECK-LABEL: test_masked_8xfloat_dup_low_mem_mask4: +; CHECK: # BB#0: +; CHECK-NEXT: movb $62, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vmovsldup {{.*#+}} ymm0 {%k1} = mem[0,0,2,2,4,4,6,6] +; CHECK-NEXT: retq + %vec = load <8 x float>, <8 x float>* %vp + %shuf = shufflevector <8 x float> %vec, <8 x float> undef, <8 x i32> + %res = select <8 x i1> , <8 x float> %shuf, <8 x float> %vec2 + ret <8 x float> %res +} + +define <8 x float> @test_masked_z_8xfloat_dup_low_mem_mask4(<8 x float>* %vp) { +; CHECK-LABEL: test_masked_z_8xfloat_dup_low_mem_mask4: +; CHECK: # BB#0: +; CHECK-NEXT: movb $62, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vmovsldup {{.*#+}} ymm0 {%k1} {z} = mem[0,0,2,2,4,4,6,6] +; CHECK-NEXT: retq + %vec = load <8 x float>, <8 x float>* %vp + %shuf = shufflevector <8 x float> %vec, <8 x float> undef, <8 x i32> + %res = select <8 x i1> , <8 x float> %shuf, <8 x float> zeroinitializer + ret <8 x float> %res +} +define <16 x float> @test_16xfloat_dup_low(<16 x float> %vec) { +; CHECK-LABEL: test_16xfloat_dup_low: +; CHECK: # BB#0: +; CHECK-NEXT: vmovsldup {{.*#+}} zmm0 = zmm0[0,0,2,2,4,4,6,6,8,8,10,10,12,12,14,14] +; CHECK-NEXT: retq + %res = shufflevector <16 x float> %vec, <16 x float> undef, <16 x i32> + ret <16 x float> %res +} +define <16 x float> @test_masked_16xfloat_dup_low_mask0(<16 x float> %vec, <16 x float> %vec2) { +; CHECK-LABEL: test_masked_16xfloat_dup_low_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: movw $21312, %ax # imm = 0x5340 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vmovsldup {{.*#+}} zmm1 {%k1} = zmm0[0,0,2,2,4,4,6,6,8,8,10,10,12,12,14,14] +; CHECK-NEXT: vmovaps %zmm1, %zmm0 +; CHECK-NEXT: retq + %shuf = shufflevector <16 x float> %vec, <16 x float> undef, <16 x i32> + %res = select <16 x i1> , <16 x float> %shuf, <16 x float> %vec2 + ret <16 x float> %res +} + +define <16 x float> @test_masked_z_16xfloat_dup_low_mask0(<16 x float> %vec) { +; CHECK-LABEL: test_masked_z_16xfloat_dup_low_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: movw $21312, %ax # imm = 0x5340 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vmovsldup {{.*#+}} zmm0 {%k1} {z} = zmm0[0,0,2,2,4,4,6,6,8,8,10,10,12,12,14,14] +; CHECK-NEXT: retq + %shuf = shufflevector <16 x float> %vec, <16 x float> undef, <16 x i32> + %res = select <16 x i1> , <16 x float> %shuf, <16 x float> zeroinitializer + ret <16 x float> %res +} +define <16 x float> @test_masked_16xfloat_dup_low_mask1(<16 x float> %vec, <16 x float> %vec2) { +; CHECK-LABEL: test_masked_16xfloat_dup_low_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: movw $-8490, %ax # imm = 0xDED6 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vmovsldup {{.*#+}} zmm1 {%k1} = zmm0[0,0,2,2,4,4,6,6,8,8,10,10,12,12,14,14] +; CHECK-NEXT: vmovaps %zmm1, %zmm0 +; CHECK-NEXT: retq + %shuf = shufflevector <16 x float> %vec, <16 x float> undef, <16 x i32> + %res = select <16 x i1> , <16 x float> %shuf, <16 x float> %vec2 + ret <16 x float> %res +} + +define <16 x float> @test_masked_z_16xfloat_dup_low_mask1(<16 x float> %vec) { +; CHECK-LABEL: test_masked_z_16xfloat_dup_low_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: movw $-8490, %ax # imm = 0xDED6 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vmovsldup {{.*#+}} zmm0 {%k1} {z} = zmm0[0,0,2,2,4,4,6,6,8,8,10,10,12,12,14,14] +; CHECK-NEXT: retq + %shuf = shufflevector <16 x float> %vec, <16 x float> undef, <16 x i32> + %res = select <16 x i1> , <16 x float> %shuf, <16 x float> zeroinitializer + ret <16 x float> %res +} +define <16 x float> @test_masked_16xfloat_dup_low_mask2(<16 x float> %vec, <16 x float> %vec2) { +; CHECK-LABEL: test_masked_16xfloat_dup_low_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: movw $12522, %ax # imm = 0x30EA +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vmovsldup {{.*#+}} zmm1 {%k1} = zmm0[0,0,2,2,4,4,6,6,8,8,10,10,12,12,14,14] +; CHECK-NEXT: vmovaps %zmm1, %zmm0 +; CHECK-NEXT: retq + %shuf = shufflevector <16 x float> %vec, <16 x float> undef, <16 x i32> + %res = select <16 x i1> , <16 x float> %shuf, <16 x float> %vec2 + ret <16 x float> %res +} + +define <16 x float> @test_masked_z_16xfloat_dup_low_mask2(<16 x float> %vec) { +; CHECK-LABEL: test_masked_z_16xfloat_dup_low_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: movw $12522, %ax # imm = 0x30EA +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vmovsldup {{.*#+}} zmm0 {%k1} {z} = zmm0[0,0,2,2,4,4,6,6,8,8,10,10,12,12,14,14] +; CHECK-NEXT: retq + %shuf = shufflevector <16 x float> %vec, <16 x float> undef, <16 x i32> + %res = select <16 x i1> , <16 x float> %shuf, <16 x float> zeroinitializer + ret <16 x float> %res +} +define <16 x float> @test_masked_16xfloat_dup_low_mask3(<16 x float> %vec, <16 x float> %vec2) { +; CHECK-LABEL: test_masked_16xfloat_dup_low_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: movw $-28344, %ax # imm = 0x9148 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vmovsldup {{.*#+}} zmm1 {%k1} = zmm0[0,0,2,2,4,4,6,6,8,8,10,10,12,12,14,14] +; CHECK-NEXT: vmovaps %zmm1, %zmm0 +; CHECK-NEXT: retq + %shuf = shufflevector <16 x float> %vec, <16 x float> undef, <16 x i32> + %res = select <16 x i1> , <16 x float> %shuf, <16 x float> %vec2 + ret <16 x float> %res +} + +define <16 x float> @test_masked_z_16xfloat_dup_low_mask3(<16 x float> %vec) { +; CHECK-LABEL: test_masked_z_16xfloat_dup_low_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: movw $-28344, %ax # imm = 0x9148 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vmovsldup {{.*#+}} zmm0 {%k1} {z} = zmm0[0,0,2,2,4,4,6,6,8,8,10,10,12,12,14,14] +; CHECK-NEXT: retq + %shuf = shufflevector <16 x float> %vec, <16 x float> undef, <16 x i32> + %res = select <16 x i1> , <16 x float> %shuf, <16 x float> zeroinitializer + ret <16 x float> %res +} +define <16 x float> @test_masked_16xfloat_dup_low_mask4(<16 x float> %vec, <16 x float> %vec2) { +; CHECK-LABEL: test_masked_16xfloat_dup_low_mask4: +; CHECK: # BB#0: +; CHECK-NEXT: movw $15638, %ax # imm = 0x3D16 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vmovsldup {{.*#+}} zmm1 {%k1} = zmm0[0,0,2,2,4,4,6,6,8,8,10,10,12,12,14,14] +; CHECK-NEXT: vmovaps %zmm1, %zmm0 +; CHECK-NEXT: retq + %shuf = shufflevector <16 x float> %vec, <16 x float> undef, <16 x i32> + %res = select <16 x i1> , <16 x float> %shuf, <16 x float> %vec2 + ret <16 x float> %res +} + +define <16 x float> @test_masked_z_16xfloat_dup_low_mask4(<16 x float> %vec) { +; CHECK-LABEL: test_masked_z_16xfloat_dup_low_mask4: +; CHECK: # BB#0: +; CHECK-NEXT: movw $15638, %ax # imm = 0x3D16 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vmovsldup {{.*#+}} zmm0 {%k1} {z} = zmm0[0,0,2,2,4,4,6,6,8,8,10,10,12,12,14,14] +; CHECK-NEXT: retq + %shuf = shufflevector <16 x float> %vec, <16 x float> undef, <16 x i32> + %res = select <16 x i1> , <16 x float> %shuf, <16 x float> zeroinitializer + ret <16 x float> %res +} +define <16 x float> @test_16xfloat_dup_low_mem(<16 x float>* %vp) { +; CHECK-LABEL: test_16xfloat_dup_low_mem: +; CHECK: # BB#0: +; CHECK-NEXT: vmovsldup {{.*#+}} zmm0 = mem[0,0,2,2,4,4,6,6,8,8,10,10,12,12,14,14] +; CHECK-NEXT: retq + %vec = load <16 x float>, <16 x float>* %vp + %res = shufflevector <16 x float> %vec, <16 x float> undef, <16 x i32> + ret <16 x float> %res +} +define <16 x float> @test_masked_16xfloat_dup_low_mem_mask0(<16 x float>* %vp, <16 x float> %vec2) { +; CHECK-LABEL: test_masked_16xfloat_dup_low_mem_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: movw $-2129, %ax # imm = 0xF7AF +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vmovsldup {{.*#+}} zmm0 {%k1} = mem[0,0,2,2,4,4,6,6,8,8,10,10,12,12,14,14] +; CHECK-NEXT: retq + %vec = load <16 x float>, <16 x float>* %vp + %shuf = shufflevector <16 x float> %vec, <16 x float> undef, <16 x i32> + %res = select <16 x i1> , <16 x float> %shuf, <16 x float> %vec2 + ret <16 x float> %res +} + +define <16 x float> @test_masked_z_16xfloat_dup_low_mem_mask0(<16 x float>* %vp) { +; CHECK-LABEL: test_masked_z_16xfloat_dup_low_mem_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: movw $-2129, %ax # imm = 0xF7AF +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vmovsldup {{.*#+}} zmm0 {%k1} {z} = mem[0,0,2,2,4,4,6,6,8,8,10,10,12,12,14,14] +; CHECK-NEXT: retq + %vec = load <16 x float>, <16 x float>* %vp + %shuf = shufflevector <16 x float> %vec, <16 x float> undef, <16 x i32> + %res = select <16 x i1> , <16 x float> %shuf, <16 x float> zeroinitializer + ret <16 x float> %res +} +define <16 x float> @test_masked_16xfloat_dup_low_mem_mask1(<16 x float>* %vp, <16 x float> %vec2) { +; CHECK-LABEL: test_masked_16xfloat_dup_low_mem_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: movw $-12900, %ax # imm = 0xCD9C +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vmovsldup {{.*#+}} zmm0 {%k1} = mem[0,0,2,2,4,4,6,6,8,8,10,10,12,12,14,14] +; CHECK-NEXT: retq + %vec = load <16 x float>, <16 x float>* %vp + %shuf = shufflevector <16 x float> %vec, <16 x float> undef, <16 x i32> + %res = select <16 x i1> , <16 x float> %shuf, <16 x float> %vec2 + ret <16 x float> %res +} + +define <16 x float> @test_masked_z_16xfloat_dup_low_mem_mask1(<16 x float>* %vp) { +; CHECK-LABEL: test_masked_z_16xfloat_dup_low_mem_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: movw $-12900, %ax # imm = 0xCD9C +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vmovsldup {{.*#+}} zmm0 {%k1} {z} = mem[0,0,2,2,4,4,6,6,8,8,10,10,12,12,14,14] +; CHECK-NEXT: retq + %vec = load <16 x float>, <16 x float>* %vp + %shuf = shufflevector <16 x float> %vec, <16 x float> undef, <16 x i32> + %res = select <16 x i1> , <16 x float> %shuf, <16 x float> zeroinitializer + ret <16 x float> %res +} +define <16 x float> @test_masked_16xfloat_dup_low_mem_mask2(<16 x float>* %vp, <16 x float> %vec2) { +; CHECK-LABEL: test_masked_16xfloat_dup_low_mem_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: movw $29358, %ax # imm = 0x72AE +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vmovsldup {{.*#+}} zmm0 {%k1} = mem[0,0,2,2,4,4,6,6,8,8,10,10,12,12,14,14] +; CHECK-NEXT: retq + %vec = load <16 x float>, <16 x float>* %vp + %shuf = shufflevector <16 x float> %vec, <16 x float> undef, <16 x i32> + %res = select <16 x i1> , <16 x float> %shuf, <16 x float> %vec2 + ret <16 x float> %res +} + +define <16 x float> @test_masked_z_16xfloat_dup_low_mem_mask2(<16 x float>* %vp) { +; CHECK-LABEL: test_masked_z_16xfloat_dup_low_mem_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: movw $29358, %ax # imm = 0x72AE +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vmovsldup {{.*#+}} zmm0 {%k1} {z} = mem[0,0,2,2,4,4,6,6,8,8,10,10,12,12,14,14] +; CHECK-NEXT: retq + %vec = load <16 x float>, <16 x float>* %vp + %shuf = shufflevector <16 x float> %vec, <16 x float> undef, <16 x i32> + %res = select <16 x i1> , <16 x float> %shuf, <16 x float> zeroinitializer + ret <16 x float> %res +} +define <16 x float> @test_masked_16xfloat_dup_low_mem_mask3(<16 x float>* %vp, <16 x float> %vec2) { +; CHECK-LABEL: test_masked_16xfloat_dup_low_mem_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: movw $5272, %ax # imm = 0x1498 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vmovsldup {{.*#+}} zmm0 {%k1} = mem[0,0,2,2,4,4,6,6,8,8,10,10,12,12,14,14] +; CHECK-NEXT: retq + %vec = load <16 x float>, <16 x float>* %vp + %shuf = shufflevector <16 x float> %vec, <16 x float> undef, <16 x i32> + %res = select <16 x i1> , <16 x float> %shuf, <16 x float> %vec2 + ret <16 x float> %res +} + +define <16 x float> @test_masked_z_16xfloat_dup_low_mem_mask3(<16 x float>* %vp) { +; CHECK-LABEL: test_masked_z_16xfloat_dup_low_mem_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: movw $5272, %ax # imm = 0x1498 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vmovsldup {{.*#+}} zmm0 {%k1} {z} = mem[0,0,2,2,4,4,6,6,8,8,10,10,12,12,14,14] +; CHECK-NEXT: retq + %vec = load <16 x float>, <16 x float>* %vp + %shuf = shufflevector <16 x float> %vec, <16 x float> undef, <16 x i32> + %res = select <16 x i1> , <16 x float> %shuf, <16 x float> zeroinitializer + ret <16 x float> %res +} +define <16 x float> @test_masked_16xfloat_dup_low_mem_mask4(<16 x float>* %vp, <16 x float> %vec2) { +; CHECK-LABEL: test_masked_16xfloat_dup_low_mem_mask4: +; CHECK: # BB#0: +; CHECK-NEXT: movw $20975, %ax # imm = 0x51EF +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vmovsldup {{.*#+}} zmm0 {%k1} = mem[0,0,2,2,4,4,6,6,8,8,10,10,12,12,14,14] +; CHECK-NEXT: retq + %vec = load <16 x float>, <16 x float>* %vp + %shuf = shufflevector <16 x float> %vec, <16 x float> undef, <16 x i32> + %res = select <16 x i1> , <16 x float> %shuf, <16 x float> %vec2 + ret <16 x float> %res +} + +define <16 x float> @test_masked_z_16xfloat_dup_low_mem_mask4(<16 x float>* %vp) { +; CHECK-LABEL: test_masked_z_16xfloat_dup_low_mem_mask4: +; CHECK: # BB#0: +; CHECK-NEXT: movw $20975, %ax # imm = 0x51EF +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vmovsldup {{.*#+}} zmm0 {%k1} {z} = mem[0,0,2,2,4,4,6,6,8,8,10,10,12,12,14,14] +; CHECK-NEXT: retq + %vec = load <16 x float>, <16 x float>* %vp + %shuf = shufflevector <16 x float> %vec, <16 x float> undef, <16 x i32> + %res = select <16 x i1> , <16 x float> %shuf, <16 x float> zeroinitializer + ret <16 x float> %res +} diff --git a/llvm/test/CodeGen/X86/avx512-shuffles/in_lane_permute.ll b/llvm/test/CodeGen/X86/avx512-shuffles/in_lane_permute.ll new file mode 100644 index 000000000000..8803ba9c7a8c --- /dev/null +++ b/llvm/test/CodeGen/X86/avx512-shuffles/in_lane_permute.ll @@ -0,0 +1,1756 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mcpu=skx %s -o - | FileCheck %s + +; FIXME: The non immediate <16 x float> test cases should be fixed by PR34382 + +define <4 x float> @test_4xfloat_perm_mask0(<4 x float> %vec) { +; CHECK-LABEL: test_4xfloat_perm_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[2,1,3,1] +; CHECK-NEXT: retq + %res = shufflevector <4 x float> %vec, <4 x float> undef, <4 x i32> + ret <4 x float> %res +} +define <4 x float> @test_masked_4xfloat_perm_mask0(<4 x float> %vec, <4 x float> %vec2) { +; CHECK-LABEL: test_masked_4xfloat_perm_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: movb $12, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermilps {{.*#+}} xmm1 {%k1} = xmm0[2,1,3,1] +; CHECK-NEXT: vmovaps %xmm1, %xmm0 +; CHECK-NEXT: retq + %shuf = shufflevector <4 x float> %vec, <4 x float> undef, <4 x i32> + %res = select <4 x i1> , <4 x float> %shuf, <4 x float> %vec2 + ret <4 x float> %res +} + +define <4 x float> @test_masked_z_4xfloat_perm_mask0(<4 x float> %vec) { +; CHECK-LABEL: test_masked_z_4xfloat_perm_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: movb $12, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermilps {{.*#+}} xmm0 {%k1} {z} = xmm0[2,1,3,1] +; CHECK-NEXT: retq + %shuf = shufflevector <4 x float> %vec, <4 x float> undef, <4 x i32> + %res = select <4 x i1> , <4 x float> %shuf, <4 x float> zeroinitializer + ret <4 x float> %res +} +define <4 x float> @test_masked_4xfloat_perm_mask1(<4 x float> %vec, <4 x float> %vec2) { +; CHECK-LABEL: test_masked_4xfloat_perm_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: movb $10, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermilps {{.*#+}} xmm1 {%k1} = xmm0[1,2,3,2] +; CHECK-NEXT: vmovaps %xmm1, %xmm0 +; CHECK-NEXT: retq + %shuf = shufflevector <4 x float> %vec, <4 x float> undef, <4 x i32> + %res = select <4 x i1> , <4 x float> %shuf, <4 x float> %vec2 + ret <4 x float> %res +} + +define <4 x float> @test_masked_z_4xfloat_perm_mask1(<4 x float> %vec) { +; CHECK-LABEL: test_masked_z_4xfloat_perm_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: movb $10, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermilps {{.*#+}} xmm0 {%k1} {z} = xmm0[1,2,3,2] +; CHECK-NEXT: retq + %shuf = shufflevector <4 x float> %vec, <4 x float> undef, <4 x i32> + %res = select <4 x i1> , <4 x float> %shuf, <4 x float> zeroinitializer + ret <4 x float> %res +} +define <4 x float> @test_masked_4xfloat_perm_mask2(<4 x float> %vec, <4 x float> %vec2) { +; CHECK-LABEL: test_masked_4xfloat_perm_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: movb $6, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermilps {{.*#+}} xmm1 {%k1} = xmm0[1,3,2,1] +; CHECK-NEXT: vmovaps %xmm1, %xmm0 +; CHECK-NEXT: retq + %shuf = shufflevector <4 x float> %vec, <4 x float> undef, <4 x i32> + %res = select <4 x i1> , <4 x float> %shuf, <4 x float> %vec2 + ret <4 x float> %res +} + +define <4 x float> @test_masked_z_4xfloat_perm_mask2(<4 x float> %vec) { +; CHECK-LABEL: test_masked_z_4xfloat_perm_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: movb $6, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermilps {{.*#+}} xmm0 {%k1} {z} = xmm0[1,3,2,1] +; CHECK-NEXT: retq + %shuf = shufflevector <4 x float> %vec, <4 x float> undef, <4 x i32> + %res = select <4 x i1> , <4 x float> %shuf, <4 x float> zeroinitializer + ret <4 x float> %res +} +define <4 x float> @test_4xfloat_perm_mask3(<4 x float> %vec) { +; CHECK-LABEL: test_4xfloat_perm_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[1,2,3,2] +; CHECK-NEXT: retq + %res = shufflevector <4 x float> %vec, <4 x float> undef, <4 x i32> + ret <4 x float> %res +} +define <4 x float> @test_masked_4xfloat_perm_mask3(<4 x float> %vec, <4 x float> %vec2) { +; CHECK-LABEL: test_masked_4xfloat_perm_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: movb $3, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermilps {{.*#+}} xmm1 {%k1} = xmm0[1,2,3,2] +; CHECK-NEXT: vmovaps %xmm1, %xmm0 +; CHECK-NEXT: retq + %shuf = shufflevector <4 x float> %vec, <4 x float> undef, <4 x i32> + %res = select <4 x i1> , <4 x float> %shuf, <4 x float> %vec2 + ret <4 x float> %res +} + +define <4 x float> @test_masked_z_4xfloat_perm_mask3(<4 x float> %vec) { +; CHECK-LABEL: test_masked_z_4xfloat_perm_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: movb $3, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermilps {{.*#+}} xmm0 {%k1} {z} = xmm0[1,2,3,2] +; CHECK-NEXT: retq + %shuf = shufflevector <4 x float> %vec, <4 x float> undef, <4 x i32> + %res = select <4 x i1> , <4 x float> %shuf, <4 x float> zeroinitializer + ret <4 x float> %res +} +define <4 x float> @test_4xfloat_perm_mem_mask0(<4 x float>* %vp) { +; CHECK-LABEL: test_4xfloat_perm_mem_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: vpermilps {{.*#+}} xmm0 = mem[3,3,1,3] +; CHECK-NEXT: retq + %vec = load <4 x float>, <4 x float>* %vp + %res = shufflevector <4 x float> %vec, <4 x float> undef, <4 x i32> + ret <4 x float> %res +} +define <4 x float> @test_masked_4xfloat_perm_mem_mask0(<4 x float>* %vp, <4 x float> %vec2) { +; CHECK-LABEL: test_masked_4xfloat_perm_mem_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: movb $7, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermilps {{.*#+}} xmm0 {%k1} = mem[3,3,1,3] +; CHECK-NEXT: retq + %vec = load <4 x float>, <4 x float>* %vp + %shuf = shufflevector <4 x float> %vec, <4 x float> undef, <4 x i32> + %res = select <4 x i1> , <4 x float> %shuf, <4 x float> %vec2 + ret <4 x float> %res +} + +define <4 x float> @test_masked_z_4xfloat_perm_mem_mask0(<4 x float>* %vp) { +; CHECK-LABEL: test_masked_z_4xfloat_perm_mem_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: movb $7, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermilps {{.*#+}} xmm0 {%k1} {z} = mem[3,3,1,3] +; CHECK-NEXT: retq + %vec = load <4 x float>, <4 x float>* %vp + %shuf = shufflevector <4 x float> %vec, <4 x float> undef, <4 x i32> + %res = select <4 x i1> , <4 x float> %shuf, <4 x float> zeroinitializer + ret <4 x float> %res +} + +define <4 x float> @test_masked_4xfloat_perm_mem_mask1(<4 x float>* %vp, <4 x float> %vec2) { +; CHECK-LABEL: test_masked_4xfloat_perm_mem_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: movb $2, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermilps {{.*#+}} xmm0 {%k1} = mem[1,3,2,0] +; CHECK-NEXT: retq + %vec = load <4 x float>, <4 x float>* %vp + %shuf = shufflevector <4 x float> %vec, <4 x float> undef, <4 x i32> + %res = select <4 x i1> , <4 x float> %shuf, <4 x float> %vec2 + ret <4 x float> %res +} + +define <4 x float> @test_masked_z_4xfloat_perm_mem_mask1(<4 x float>* %vp) { +; CHECK-LABEL: test_masked_z_4xfloat_perm_mem_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: movb $2, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermilps {{.*#+}} xmm0 {%k1} {z} = mem[1,3,2,0] +; CHECK-NEXT: retq + %vec = load <4 x float>, <4 x float>* %vp + %shuf = shufflevector <4 x float> %vec, <4 x float> undef, <4 x i32> + %res = select <4 x i1> , <4 x float> %shuf, <4 x float> zeroinitializer + ret <4 x float> %res +} + +define <4 x float> @test_masked_4xfloat_perm_mem_mask2(<4 x float>* %vp, <4 x float> %vec2) { +; CHECK-LABEL: test_masked_4xfloat_perm_mem_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: movb $6, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermilps {{.*#+}} xmm0 {%k1} = mem[2,1,3,2] +; CHECK-NEXT: retq + %vec = load <4 x float>, <4 x float>* %vp + %shuf = shufflevector <4 x float> %vec, <4 x float> undef, <4 x i32> + %res = select <4 x i1> , <4 x float> %shuf, <4 x float> %vec2 + ret <4 x float> %res +} + +define <4 x float> @test_masked_z_4xfloat_perm_mem_mask2(<4 x float>* %vp) { +; CHECK-LABEL: test_masked_z_4xfloat_perm_mem_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: movb $6, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermilps {{.*#+}} xmm0 {%k1} {z} = mem[2,1,3,2] +; CHECK-NEXT: retq + %vec = load <4 x float>, <4 x float>* %vp + %shuf = shufflevector <4 x float> %vec, <4 x float> undef, <4 x i32> + %res = select <4 x i1> , <4 x float> %shuf, <4 x float> zeroinitializer + ret <4 x float> %res +} + +define <4 x float> @test_4xfloat_perm_mem_mask3(<4 x float>* %vp) { +; CHECK-LABEL: test_4xfloat_perm_mem_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: vpermilps {{.*#+}} xmm0 = mem[0,1,3,0] +; CHECK-NEXT: retq + %vec = load <4 x float>, <4 x float>* %vp + %res = shufflevector <4 x float> %vec, <4 x float> undef, <4 x i32> + ret <4 x float> %res +} +define <4 x float> @test_masked_4xfloat_perm_mem_mask3(<4 x float>* %vp, <4 x float> %vec2) { +; CHECK-LABEL: test_masked_4xfloat_perm_mem_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: movb $14, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermilps {{.*#+}} xmm0 {%k1} = mem[0,1,3,0] +; CHECK-NEXT: retq + %vec = load <4 x float>, <4 x float>* %vp + %shuf = shufflevector <4 x float> %vec, <4 x float> undef, <4 x i32> + %res = select <4 x i1> , <4 x float> %shuf, <4 x float> %vec2 + ret <4 x float> %res +} + +define <4 x float> @test_masked_z_4xfloat_perm_mem_mask3(<4 x float>* %vp) { +; CHECK-LABEL: test_masked_z_4xfloat_perm_mem_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: movb $14, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermilps {{.*#+}} xmm0 {%k1} {z} = mem[0,1,3,0] +; CHECK-NEXT: retq + %vec = load <4 x float>, <4 x float>* %vp + %shuf = shufflevector <4 x float> %vec, <4 x float> undef, <4 x i32> + %res = select <4 x i1> , <4 x float> %shuf, <4 x float> zeroinitializer + ret <4 x float> %res +} + +define <8 x float> @test_8xfloat_perm_mask0(<8 x float> %vec) { +; CHECK-LABEL: test_8xfloat_perm_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,6,6,6] +; CHECK-NEXT: retq + %res = shufflevector <8 x float> %vec, <8 x float> undef, <8 x i32> + ret <8 x float> %res +} +define <8 x float> @test_masked_8xfloat_perm_mask0(<8 x float> %vec, <8 x float> %vec2) { +; CHECK-LABEL: test_masked_8xfloat_perm_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: movb $83, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermilps {{.*#+}} ymm1 {%k1} = ymm0[0,1,2,3,4,6,6,6] +; CHECK-NEXT: vmovaps %ymm1, %ymm0 +; CHECK-NEXT: retq + %shuf = shufflevector <8 x float> %vec, <8 x float> undef, <8 x i32> + %res = select <8 x i1> , <8 x float> %shuf, <8 x float> %vec2 + ret <8 x float> %res +} + +define <8 x float> @test_masked_z_8xfloat_perm_mask0(<8 x float> %vec) { +; CHECK-LABEL: test_masked_z_8xfloat_perm_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: movb $83, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermilps {{.*#+}} ymm0 {%k1} {z} = ymm0[0,1,2,3,4,6,6,6] +; CHECK-NEXT: retq + %shuf = shufflevector <8 x float> %vec, <8 x float> undef, <8 x i32> + %res = select <8 x i1> , <8 x float> %shuf, <8 x float> zeroinitializer + ret <8 x float> %res +} +define <8 x float> @test_masked_8xfloat_perm_imm_mask1(<8 x float> %vec, <8 x float> %vec2) { +; CHECK-LABEL: test_masked_8xfloat_perm_imm_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: movb $-34, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermilps {{.*#+}} ymm1 {%k1} = ymm0[3,2,3,2,7,6,7,6] +; CHECK-NEXT: vmovaps %ymm1, %ymm0 +; CHECK-NEXT: retq + %shuf = shufflevector <8 x float> %vec, <8 x float> undef, <8 x i32> + %res = select <8 x i1> , <8 x float> %shuf, <8 x float> %vec2 + ret <8 x float> %res +} + +define <8 x float> @test_masked_z_8xfloat_perm_imm_mask1(<8 x float> %vec) { +; CHECK-LABEL: test_masked_z_8xfloat_perm_imm_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: movb $-34, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermilps {{.*#+}} ymm0 {%k1} {z} = ymm0[3,2,3,2,7,6,7,6] +; CHECK-NEXT: retq + %shuf = shufflevector <8 x float> %vec, <8 x float> undef, <8 x i32> + %res = select <8 x i1> , <8 x float> %shuf, <8 x float> zeroinitializer + ret <8 x float> %res +} +define <8 x float> @test_masked_8xfloat_perm_mask2(<8 x float> %vec, <8 x float> %vec2) { +; CHECK-LABEL: test_masked_8xfloat_perm_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: movb $49, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermilps {{.*#+}} ymm1 {%k1} = ymm0[2,1,2,1,6,5,4,4] +; CHECK-NEXT: vmovaps %ymm1, %ymm0 +; CHECK-NEXT: retq + %shuf = shufflevector <8 x float> %vec, <8 x float> undef, <8 x i32> + %res = select <8 x i1> , <8 x float> %shuf, <8 x float> %vec2 + ret <8 x float> %res +} + +define <8 x float> @test_masked_z_8xfloat_perm_mask2(<8 x float> %vec) { +; CHECK-LABEL: test_masked_z_8xfloat_perm_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: movb $49, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermilps {{.*#+}} ymm0 {%k1} {z} = ymm0[2,1,2,1,6,5,4,4] +; CHECK-NEXT: retq + %shuf = shufflevector <8 x float> %vec, <8 x float> undef, <8 x i32> + %res = select <8 x i1> , <8 x float> %shuf, <8 x float> zeroinitializer + ret <8 x float> %res +} +define <8 x float> @test_8xfloat_perm_imm_mask3(<8 x float> %vec) { +; CHECK-LABEL: test_8xfloat_perm_imm_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[2,2,1,0,6,6,5,4] +; CHECK-NEXT: retq + %res = shufflevector <8 x float> %vec, <8 x float> undef, <8 x i32> + ret <8 x float> %res +} +define <8 x float> @test_masked_8xfloat_perm_imm_mask3(<8 x float> %vec, <8 x float> %vec2) { +; CHECK-LABEL: test_masked_8xfloat_perm_imm_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: movb $-111, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermilps {{.*#+}} ymm1 {%k1} = ymm0[2,2,1,0,6,6,5,4] +; CHECK-NEXT: vmovaps %ymm1, %ymm0 +; CHECK-NEXT: retq + %shuf = shufflevector <8 x float> %vec, <8 x float> undef, <8 x i32> + %res = select <8 x i1> , <8 x float> %shuf, <8 x float> %vec2 + ret <8 x float> %res +} + +define <8 x float> @test_masked_z_8xfloat_perm_imm_mask3(<8 x float> %vec) { +; CHECK-LABEL: test_masked_z_8xfloat_perm_imm_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: movb $-111, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermilps {{.*#+}} ymm0 {%k1} {z} = ymm0[2,2,1,0,6,6,5,4] +; CHECK-NEXT: retq + %shuf = shufflevector <8 x float> %vec, <8 x float> undef, <8 x i32> + %res = select <8 x i1> , <8 x float> %shuf, <8 x float> zeroinitializer + ret <8 x float> %res +} +define <8 x float> @test_masked_8xfloat_perm_mask4(<8 x float> %vec, <8 x float> %vec2) { +; CHECK-LABEL: test_masked_8xfloat_perm_mask4: +; CHECK: # BB#0: +; CHECK-NEXT: movb $61, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermilps {{.*#+}} ymm1 {%k1} = ymm0[3,3,3,3,7,7,6,5] +; CHECK-NEXT: vmovaps %ymm1, %ymm0 +; CHECK-NEXT: retq + %shuf = shufflevector <8 x float> %vec, <8 x float> undef, <8 x i32> + %res = select <8 x i1> , <8 x float> %shuf, <8 x float> %vec2 + ret <8 x float> %res +} + +define <8 x float> @test_masked_z_8xfloat_perm_mask4(<8 x float> %vec) { +; CHECK-LABEL: test_masked_z_8xfloat_perm_mask4: +; CHECK: # BB#0: +; CHECK-NEXT: movb $61, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermilps {{.*#+}} ymm0 {%k1} {z} = ymm0[3,3,3,3,7,7,6,5] +; CHECK-NEXT: retq + %shuf = shufflevector <8 x float> %vec, <8 x float> undef, <8 x i32> + %res = select <8 x i1> , <8 x float> %shuf, <8 x float> zeroinitializer + ret <8 x float> %res +} +define <8 x float> @test_masked_8xfloat_perm_imm_mask5(<8 x float> %vec, <8 x float> %vec2) { +; CHECK-LABEL: test_masked_8xfloat_perm_imm_mask5: +; CHECK: # BB#0: +; CHECK-NEXT: movb $-10, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermilps {{.*#+}} ymm1 {%k1} = ymm0[2,1,3,3,6,5,7,7] +; CHECK-NEXT: vmovaps %ymm1, %ymm0 +; CHECK-NEXT: retq + %shuf = shufflevector <8 x float> %vec, <8 x float> undef, <8 x i32> + %res = select <8 x i1> , <8 x float> %shuf, <8 x float> %vec2 + ret <8 x float> %res +} + +define <8 x float> @test_masked_z_8xfloat_perm_imm_mask5(<8 x float> %vec) { +; CHECK-LABEL: test_masked_z_8xfloat_perm_imm_mask5: +; CHECK: # BB#0: +; CHECK-NEXT: movb $-10, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermilps {{.*#+}} ymm0 {%k1} {z} = ymm0[2,1,3,3,6,5,7,7] +; CHECK-NEXT: retq + %shuf = shufflevector <8 x float> %vec, <8 x float> undef, <8 x i32> + %res = select <8 x i1> , <8 x float> %shuf, <8 x float> zeroinitializer + ret <8 x float> %res +} +define <8 x float> @test_8xfloat_perm_mask6(<8 x float> %vec) { +; CHECK-LABEL: test_8xfloat_perm_mask6: +; CHECK: # BB#0: +; CHECK-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[3,2,3,2,5,6,7,7] +; CHECK-NEXT: retq + %res = shufflevector <8 x float> %vec, <8 x float> undef, <8 x i32> + ret <8 x float> %res +} +define <8 x float> @test_masked_8xfloat_perm_mask6(<8 x float> %vec, <8 x float> %vec2) { +; CHECK-LABEL: test_masked_8xfloat_perm_mask6: +; CHECK: # BB#0: +; CHECK-NEXT: movb $-51, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermilps {{.*#+}} ymm1 {%k1} = ymm0[3,2,3,2,5,6,7,7] +; CHECK-NEXT: vmovaps %ymm1, %ymm0 +; CHECK-NEXT: retq + %shuf = shufflevector <8 x float> %vec, <8 x float> undef, <8 x i32> + %res = select <8 x i1> , <8 x float> %shuf, <8 x float> %vec2 + ret <8 x float> %res +} + +define <8 x float> @test_masked_z_8xfloat_perm_mask6(<8 x float> %vec) { +; CHECK-LABEL: test_masked_z_8xfloat_perm_mask6: +; CHECK: # BB#0: +; CHECK-NEXT: movb $-51, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermilps {{.*#+}} ymm0 {%k1} {z} = ymm0[3,2,3,2,5,6,7,7] +; CHECK-NEXT: retq + %shuf = shufflevector <8 x float> %vec, <8 x float> undef, <8 x i32> + %res = select <8 x i1> , <8 x float> %shuf, <8 x float> zeroinitializer + ret <8 x float> %res +} +define <8 x float> @test_masked_8xfloat_perm_imm_mask7(<8 x float> %vec, <8 x float> %vec2) { +; CHECK-LABEL: test_masked_8xfloat_perm_imm_mask7: +; CHECK: # BB#0: +; CHECK-NEXT: movb $114, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermilps {{.*#+}} ymm1 {%k1} = ymm0[3,0,2,1,7,4,6,5] +; CHECK-NEXT: vmovaps %ymm1, %ymm0 +; CHECK-NEXT: retq + %shuf = shufflevector <8 x float> %vec, <8 x float> undef, <8 x i32> + %res = select <8 x i1> , <8 x float> %shuf, <8 x float> %vec2 + ret <8 x float> %res +} + +define <8 x float> @test_masked_z_8xfloat_perm_imm_mask7(<8 x float> %vec) { +; CHECK-LABEL: test_masked_z_8xfloat_perm_imm_mask7: +; CHECK: # BB#0: +; CHECK-NEXT: movb $114, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermilps {{.*#+}} ymm0 {%k1} {z} = ymm0[3,0,2,1,7,4,6,5] +; CHECK-NEXT: retq + %shuf = shufflevector <8 x float> %vec, <8 x float> undef, <8 x i32> + %res = select <8 x i1> , <8 x float> %shuf, <8 x float> zeroinitializer + ret <8 x float> %res +} +define <8 x float> @test_8xfloat_perm_mem_mask0(<8 x float>* %vp) { +; CHECK-LABEL: test_8xfloat_perm_mem_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: vmovaps (%rdi), %ymm0 +; CHECK-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[3,0,0,2,4,6,7,6] +; CHECK-NEXT: retq + %vec = load <8 x float>, <8 x float>* %vp + %res = shufflevector <8 x float> %vec, <8 x float> undef, <8 x i32> + ret <8 x float> %res +} +define <8 x float> @test_masked_8xfloat_perm_mem_mask0(<8 x float>* %vp, <8 x float> %vec2) { +; CHECK-LABEL: test_masked_8xfloat_perm_mem_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: vmovaps (%rdi), %ymm1 +; CHECK-NEXT: movb $-95, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermilps {{.*#+}} ymm0 {%k1} = ymm1[3,0,0,2,4,6,7,6] +; CHECK-NEXT: retq + %vec = load <8 x float>, <8 x float>* %vp + %shuf = shufflevector <8 x float> %vec, <8 x float> undef, <8 x i32> + %res = select <8 x i1> , <8 x float> %shuf, <8 x float> %vec2 + ret <8 x float> %res +} + +define <8 x float> @test_masked_z_8xfloat_perm_mem_mask0(<8 x float>* %vp) { +; CHECK-LABEL: test_masked_z_8xfloat_perm_mem_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: vmovaps (%rdi), %ymm0 +; CHECK-NEXT: movb $-95, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermilps {{.*#+}} ymm0 {%k1} {z} = ymm0[3,0,0,2,4,6,7,6] +; CHECK-NEXT: retq + %vec = load <8 x float>, <8 x float>* %vp + %shuf = shufflevector <8 x float> %vec, <8 x float> undef, <8 x i32> + %res = select <8 x i1> , <8 x float> %shuf, <8 x float> zeroinitializer + ret <8 x float> %res +} + +define <8 x float> @test_masked_8xfloat_perm_imm_mem_mask1(<8 x float>* %vp, <8 x float> %vec2) { +; CHECK-LABEL: test_masked_8xfloat_perm_imm_mem_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: movb $-41, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermilps {{.*#+}} ymm0 {%k1} = mem[2,0,2,2,6,4,6,6] +; CHECK-NEXT: retq + %vec = load <8 x float>, <8 x float>* %vp + %shuf = shufflevector <8 x float> %vec, <8 x float> undef, <8 x i32> + %res = select <8 x i1> , <8 x float> %shuf, <8 x float> %vec2 + ret <8 x float> %res +} + +define <8 x float> @test_masked_z_8xfloat_perm_imm_mem_mask1(<8 x float>* %vp) { +; CHECK-LABEL: test_masked_z_8xfloat_perm_imm_mem_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: movb $-41, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermilps {{.*#+}} ymm0 {%k1} {z} = mem[2,0,2,2,6,4,6,6] +; CHECK-NEXT: retq + %vec = load <8 x float>, <8 x float>* %vp + %shuf = shufflevector <8 x float> %vec, <8 x float> undef, <8 x i32> + %res = select <8 x i1> , <8 x float> %shuf, <8 x float> zeroinitializer + ret <8 x float> %res +} + +define <8 x float> @test_masked_8xfloat_perm_mem_mask2(<8 x float>* %vp, <8 x float> %vec2) { +; CHECK-LABEL: test_masked_8xfloat_perm_mem_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: vmovaps (%rdi), %ymm1 +; CHECK-NEXT: movb $62, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermilps {{.*#+}} ymm0 {%k1} = ymm1[2,1,1,3,4,4,7,4] +; CHECK-NEXT: retq + %vec = load <8 x float>, <8 x float>* %vp + %shuf = shufflevector <8 x float> %vec, <8 x float> undef, <8 x i32> + %res = select <8 x i1> , <8 x float> %shuf, <8 x float> %vec2 + ret <8 x float> %res +} + +define <8 x float> @test_masked_z_8xfloat_perm_mem_mask2(<8 x float>* %vp) { +; CHECK-LABEL: test_masked_z_8xfloat_perm_mem_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: vmovaps (%rdi), %ymm0 +; CHECK-NEXT: movb $62, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermilps {{.*#+}} ymm0 {%k1} {z} = ymm0[2,1,1,3,4,4,7,4] +; CHECK-NEXT: retq + %vec = load <8 x float>, <8 x float>* %vp + %shuf = shufflevector <8 x float> %vec, <8 x float> undef, <8 x i32> + %res = select <8 x i1> , <8 x float> %shuf, <8 x float> zeroinitializer + ret <8 x float> %res +} + +define <8 x float> @test_8xfloat_perm_imm_mem_mask3(<8 x float>* %vp) { +; CHECK-LABEL: test_8xfloat_perm_imm_mem_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: vpermilps {{.*#+}} ymm0 = mem[0,0,3,3,4,4,7,7] +; CHECK-NEXT: retq + %vec = load <8 x float>, <8 x float>* %vp + %res = shufflevector <8 x float> %vec, <8 x float> undef, <8 x i32> + ret <8 x float> %res +} +define <8 x float> @test_masked_8xfloat_perm_imm_mem_mask3(<8 x float>* %vp, <8 x float> %vec2) { +; CHECK-LABEL: test_masked_8xfloat_perm_imm_mem_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: movb $-70, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermilps {{.*#+}} ymm0 {%k1} = mem[0,0,3,3,4,4,7,7] +; CHECK-NEXT: retq + %vec = load <8 x float>, <8 x float>* %vp + %shuf = shufflevector <8 x float> %vec, <8 x float> undef, <8 x i32> + %res = select <8 x i1> , <8 x float> %shuf, <8 x float> %vec2 + ret <8 x float> %res +} + +define <8 x float> @test_masked_z_8xfloat_perm_imm_mem_mask3(<8 x float>* %vp) { +; CHECK-LABEL: test_masked_z_8xfloat_perm_imm_mem_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: movb $-70, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermilps {{.*#+}} ymm0 {%k1} {z} = mem[0,0,3,3,4,4,7,7] +; CHECK-NEXT: retq + %vec = load <8 x float>, <8 x float>* %vp + %shuf = shufflevector <8 x float> %vec, <8 x float> undef, <8 x i32> + %res = select <8 x i1> , <8 x float> %shuf, <8 x float> zeroinitializer + ret <8 x float> %res +} + +define <8 x float> @test_masked_8xfloat_perm_mem_mask4(<8 x float>* %vp, <8 x float> %vec2) { +; CHECK-LABEL: test_masked_8xfloat_perm_mem_mask4: +; CHECK: # BB#0: +; CHECK-NEXT: vmovaps (%rdi), %ymm1 +; CHECK-NEXT: movb $30, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermilps {{.*#+}} ymm0 {%k1} = ymm1[0,1,0,1,4,6,5,4] +; CHECK-NEXT: retq + %vec = load <8 x float>, <8 x float>* %vp + %shuf = shufflevector <8 x float> %vec, <8 x float> undef, <8 x i32> + %res = select <8 x i1> , <8 x float> %shuf, <8 x float> %vec2 + ret <8 x float> %res +} + +define <8 x float> @test_masked_z_8xfloat_perm_mem_mask4(<8 x float>* %vp) { +; CHECK-LABEL: test_masked_z_8xfloat_perm_mem_mask4: +; CHECK: # BB#0: +; CHECK-NEXT: vmovaps (%rdi), %ymm0 +; CHECK-NEXT: movb $30, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermilps {{.*#+}} ymm0 {%k1} {z} = ymm0[0,1,0,1,4,6,5,4] +; CHECK-NEXT: retq + %vec = load <8 x float>, <8 x float>* %vp + %shuf = shufflevector <8 x float> %vec, <8 x float> undef, <8 x i32> + %res = select <8 x i1> , <8 x float> %shuf, <8 x float> zeroinitializer + ret <8 x float> %res +} + +define <8 x float> @test_masked_8xfloat_perm_imm_mem_mask5(<8 x float>* %vp, <8 x float> %vec2) { +; CHECK-LABEL: test_masked_8xfloat_perm_imm_mem_mask5: +; CHECK: # BB#0: +; CHECK-NEXT: movb $56, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermilps {{.*#+}} ymm0 {%k1} = mem[2,0,0,3,6,4,4,7] +; CHECK-NEXT: retq + %vec = load <8 x float>, <8 x float>* %vp + %shuf = shufflevector <8 x float> %vec, <8 x float> undef, <8 x i32> + %res = select <8 x i1> , <8 x float> %shuf, <8 x float> %vec2 + ret <8 x float> %res +} + +define <8 x float> @test_masked_z_8xfloat_perm_imm_mem_mask5(<8 x float>* %vp) { +; CHECK-LABEL: test_masked_z_8xfloat_perm_imm_mem_mask5: +; CHECK: # BB#0: +; CHECK-NEXT: movb $56, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermilps {{.*#+}} ymm0 {%k1} {z} = mem[2,0,0,3,6,4,4,7] +; CHECK-NEXT: retq + %vec = load <8 x float>, <8 x float>* %vp + %shuf = shufflevector <8 x float> %vec, <8 x float> undef, <8 x i32> + %res = select <8 x i1> , <8 x float> %shuf, <8 x float> zeroinitializer + ret <8 x float> %res +} + +define <8 x float> @test_8xfloat_perm_mem_mask6(<8 x float>* %vp) { +; CHECK-LABEL: test_8xfloat_perm_mem_mask6: +; CHECK: # BB#0: +; CHECK-NEXT: vmovaps (%rdi), %ymm0 +; CHECK-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[0,1,2,3,7,4,6,7] +; CHECK-NEXT: retq + %vec = load <8 x float>, <8 x float>* %vp + %res = shufflevector <8 x float> %vec, <8 x float> undef, <8 x i32> + ret <8 x float> %res +} +define <8 x float> @test_masked_8xfloat_perm_mem_mask6(<8 x float>* %vp, <8 x float> %vec2) { +; CHECK-LABEL: test_masked_8xfloat_perm_mem_mask6: +; CHECK: # BB#0: +; CHECK-NEXT: vmovaps (%rdi), %ymm1 +; CHECK-NEXT: movb $-54, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermilps {{.*#+}} ymm0 {%k1} = ymm1[0,1,2,3,7,4,6,7] +; CHECK-NEXT: retq + %vec = load <8 x float>, <8 x float>* %vp + %shuf = shufflevector <8 x float> %vec, <8 x float> undef, <8 x i32> + %res = select <8 x i1> , <8 x float> %shuf, <8 x float> %vec2 + ret <8 x float> %res +} + +define <8 x float> @test_masked_z_8xfloat_perm_mem_mask6(<8 x float>* %vp) { +; CHECK-LABEL: test_masked_z_8xfloat_perm_mem_mask6: +; CHECK: # BB#0: +; CHECK-NEXT: vmovaps (%rdi), %ymm0 +; CHECK-NEXT: movb $-54, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermilps {{.*#+}} ymm0 {%k1} {z} = ymm0[0,1,2,3,7,4,6,7] +; CHECK-NEXT: retq + %vec = load <8 x float>, <8 x float>* %vp + %shuf = shufflevector <8 x float> %vec, <8 x float> undef, <8 x i32> + %res = select <8 x i1> , <8 x float> %shuf, <8 x float> zeroinitializer + ret <8 x float> %res +} + +define <8 x float> @test_masked_8xfloat_perm_imm_mem_mask7(<8 x float>* %vp, <8 x float> %vec2) { +; CHECK-LABEL: test_masked_8xfloat_perm_imm_mem_mask7: +; CHECK: # BB#0: +; CHECK-NEXT: movb $85, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermilps {{.*#+}} ymm0 {%k1} = mem[0,2,3,1,4,6,7,5] +; CHECK-NEXT: retq + %vec = load <8 x float>, <8 x float>* %vp + %shuf = shufflevector <8 x float> %vec, <8 x float> undef, <8 x i32> + %res = select <8 x i1> , <8 x float> %shuf, <8 x float> %vec2 + ret <8 x float> %res +} + +define <8 x float> @test_masked_z_8xfloat_perm_imm_mem_mask7(<8 x float>* %vp) { +; CHECK-LABEL: test_masked_z_8xfloat_perm_imm_mem_mask7: +; CHECK: # BB#0: +; CHECK-NEXT: movb $85, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermilps {{.*#+}} ymm0 {%k1} {z} = mem[0,2,3,1,4,6,7,5] +; CHECK-NEXT: retq + %vec = load <8 x float>, <8 x float>* %vp + %shuf = shufflevector <8 x float> %vec, <8 x float> undef, <8 x i32> + %res = select <8 x i1> , <8 x float> %shuf, <8 x float> zeroinitializer + ret <8 x float> %res +} + +define <16 x float> @test_16xfloat_perm_mask0(<16 x float> %vec) { +; CHECK-LABEL: test_16xfloat_perm_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: vmovaps {{.*#+}} zmm1 = [1,1,3,1,6,4,6,5,8,9,8,11,13,13,13,15] +; CHECK-NEXT: vpermps %zmm0, %zmm1, %zmm0 +; CHECK-NEXT: retq + %res = shufflevector <16 x float> %vec, <16 x float> undef, <16 x i32> + ret <16 x float> %res +} +define <16 x float> @test_masked_16xfloat_perm_mask0(<16 x float> %vec, <16 x float> %vec2) { +; CHECK-LABEL: test_masked_16xfloat_perm_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: vmovaps {{.*#+}} zmm2 = [1,1,3,1,6,4,6,5,8,9,8,11,13,13,13,15] +; CHECK-NEXT: movw $16429, %ax # imm = 0x402D +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermps %zmm0, %zmm2, %zmm1 {%k1} +; CHECK-NEXT: vmovaps %zmm1, %zmm0 +; CHECK-NEXT: retq + %shuf = shufflevector <16 x float> %vec, <16 x float> undef, <16 x i32> + %res = select <16 x i1> , <16 x float> %shuf, <16 x float> %vec2 + ret <16 x float> %res +} + +define <16 x float> @test_masked_z_16xfloat_perm_mask0(<16 x float> %vec) { +; CHECK-LABEL: test_masked_z_16xfloat_perm_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: vmovaps {{.*#+}} zmm1 = [1,1,3,1,6,4,6,5,8,9,8,11,13,13,13,15] +; CHECK-NEXT: movw $16429, %ax # imm = 0x402D +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermps %zmm0, %zmm1, %zmm0 {%k1} {z} +; CHECK-NEXT: retq + %shuf = shufflevector <16 x float> %vec, <16 x float> undef, <16 x i32> + %res = select <16 x i1> , <16 x float> %shuf, <16 x float> zeroinitializer + ret <16 x float> %res +} +define <16 x float> @test_masked_16xfloat_perm_imm_mask1(<16 x float> %vec, <16 x float> %vec2) { +; CHECK-LABEL: test_masked_16xfloat_perm_imm_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: movw $-26425, %ax # imm = 0x98C7 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermilps {{.*#+}} zmm1 {%k1} = zmm0[2,2,2,1,6,6,6,5,10,10,10,9,14,14,14,13] +; CHECK-NEXT: vmovaps %zmm1, %zmm0 +; CHECK-NEXT: retq + %shuf = shufflevector <16 x float> %vec, <16 x float> undef, <16 x i32> + %res = select <16 x i1> , <16 x float> %shuf, <16 x float> %vec2 + ret <16 x float> %res +} + +define <16 x float> @test_masked_z_16xfloat_perm_imm_mask1(<16 x float> %vec) { +; CHECK-LABEL: test_masked_z_16xfloat_perm_imm_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: movw $-26425, %ax # imm = 0x98C7 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermilps {{.*#+}} zmm0 {%k1} {z} = zmm0[2,2,2,1,6,6,6,5,10,10,10,9,14,14,14,13] +; CHECK-NEXT: retq + %shuf = shufflevector <16 x float> %vec, <16 x float> undef, <16 x i32> + %res = select <16 x i1> , <16 x float> %shuf, <16 x float> zeroinitializer + ret <16 x float> %res +} +define <16 x float> @test_masked_16xfloat_perm_mask2(<16 x float> %vec, <16 x float> %vec2) { +; CHECK-LABEL: test_masked_16xfloat_perm_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: vmovaps {{.*#+}} zmm2 = [1,2,0,0,5,4,6,5,11,10,9,9,14,13,14,12] +; CHECK-NEXT: movw $28987, %ax # imm = 0x713B +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermps %zmm0, %zmm2, %zmm1 {%k1} +; CHECK-NEXT: vmovaps %zmm1, %zmm0 +; CHECK-NEXT: retq + %shuf = shufflevector <16 x float> %vec, <16 x float> undef, <16 x i32> + %res = select <16 x i1> , <16 x float> %shuf, <16 x float> %vec2 + ret <16 x float> %res +} + +define <16 x float> @test_masked_z_16xfloat_perm_mask2(<16 x float> %vec) { +; CHECK-LABEL: test_masked_z_16xfloat_perm_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: vmovaps {{.*#+}} zmm1 = [1,2,0,0,5,4,6,5,11,10,9,9,14,13,14,12] +; CHECK-NEXT: movw $28987, %ax # imm = 0x713B +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermps %zmm0, %zmm1, %zmm0 {%k1} {z} +; CHECK-NEXT: retq + %shuf = shufflevector <16 x float> %vec, <16 x float> undef, <16 x i32> + %res = select <16 x i1> , <16 x float> %shuf, <16 x float> zeroinitializer + ret <16 x float> %res +} +define <16 x float> @test_16xfloat_perm_imm_mask3(<16 x float> %vec) { +; CHECK-LABEL: test_16xfloat_perm_imm_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: vpermilps {{.*#+}} zmm0 = zmm0[1,1,0,2,5,5,4,6,9,9,8,10,13,13,12,14] +; CHECK-NEXT: retq + %res = shufflevector <16 x float> %vec, <16 x float> undef, <16 x i32> + ret <16 x float> %res +} +define <16 x float> @test_masked_16xfloat_perm_imm_mask3(<16 x float> %vec, <16 x float> %vec2) { +; CHECK-LABEL: test_masked_16xfloat_perm_imm_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: movw $11457, %ax # imm = 0x2CC1 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermilps {{.*#+}} zmm1 {%k1} = zmm0[1,1,0,2,5,5,4,6,9,9,8,10,13,13,12,14] +; CHECK-NEXT: vmovaps %zmm1, %zmm0 +; CHECK-NEXT: retq + %shuf = shufflevector <16 x float> %vec, <16 x float> undef, <16 x i32> + %res = select <16 x i1> , <16 x float> %shuf, <16 x float> %vec2 + ret <16 x float> %res +} + +define <16 x float> @test_masked_z_16xfloat_perm_imm_mask3(<16 x float> %vec) { +; CHECK-LABEL: test_masked_z_16xfloat_perm_imm_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: movw $11457, %ax # imm = 0x2CC1 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermilps {{.*#+}} zmm0 {%k1} {z} = zmm0[1,1,0,2,5,5,4,6,9,9,8,10,13,13,12,14] +; CHECK-NEXT: retq + %shuf = shufflevector <16 x float> %vec, <16 x float> undef, <16 x i32> + %res = select <16 x i1> , <16 x float> %shuf, <16 x float> zeroinitializer + ret <16 x float> %res +} +define <16 x float> @test_masked_16xfloat_perm_mask4(<16 x float> %vec, <16 x float> %vec2) { +; CHECK-LABEL: test_masked_16xfloat_perm_mask4: +; CHECK: # BB#0: +; CHECK-NEXT: vmovaps {{.*#+}} zmm2 = [1,2,3,3,5,5,5,7,11,11,8,11,14,12,14,15] +; CHECK-NEXT: movw $30908, %ax # imm = 0x78BC +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermps %zmm0, %zmm2, %zmm1 {%k1} +; CHECK-NEXT: vmovaps %zmm1, %zmm0 +; CHECK-NEXT: retq + %shuf = shufflevector <16 x float> %vec, <16 x float> undef, <16 x i32> + %res = select <16 x i1> , <16 x float> %shuf, <16 x float> %vec2 + ret <16 x float> %res +} + +define <16 x float> @test_masked_z_16xfloat_perm_mask4(<16 x float> %vec) { +; CHECK-LABEL: test_masked_z_16xfloat_perm_mask4: +; CHECK: # BB#0: +; CHECK-NEXT: vmovaps {{.*#+}} zmm1 = [1,2,3,3,5,5,5,7,11,11,8,11,14,12,14,15] +; CHECK-NEXT: movw $30908, %ax # imm = 0x78BC +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermps %zmm0, %zmm1, %zmm0 {%k1} {z} +; CHECK-NEXT: retq + %shuf = shufflevector <16 x float> %vec, <16 x float> undef, <16 x i32> + %res = select <16 x i1> , <16 x float> %shuf, <16 x float> zeroinitializer + ret <16 x float> %res +} +define <16 x float> @test_masked_16xfloat_perm_imm_mask5(<16 x float> %vec, <16 x float> %vec2) { +; CHECK-LABEL: test_masked_16xfloat_perm_imm_mask5: +; CHECK: # BB#0: +; CHECK-NEXT: movw $26863, %ax # imm = 0x68EF +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermilps {{.*#+}} zmm1 {%k1} = zmm0[1,2,1,0,5,6,5,4,9,10,9,8,13,14,13,12] +; CHECK-NEXT: vmovaps %zmm1, %zmm0 +; CHECK-NEXT: retq + %shuf = shufflevector <16 x float> %vec, <16 x float> undef, <16 x i32> + %res = select <16 x i1> , <16 x float> %shuf, <16 x float> %vec2 + ret <16 x float> %res +} + +define <16 x float> @test_masked_z_16xfloat_perm_imm_mask5(<16 x float> %vec) { +; CHECK-LABEL: test_masked_z_16xfloat_perm_imm_mask5: +; CHECK: # BB#0: +; CHECK-NEXT: movw $26863, %ax # imm = 0x68EF +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermilps {{.*#+}} zmm0 {%k1} {z} = zmm0[1,2,1,0,5,6,5,4,9,10,9,8,13,14,13,12] +; CHECK-NEXT: retq + %shuf = shufflevector <16 x float> %vec, <16 x float> undef, <16 x i32> + %res = select <16 x i1> , <16 x float> %shuf, <16 x float> zeroinitializer + ret <16 x float> %res +} +define <16 x float> @test_16xfloat_perm_mask6(<16 x float> %vec) { +; CHECK-LABEL: test_16xfloat_perm_mask6: +; CHECK: # BB#0: +; CHECK-NEXT: vmovaps {{.*#+}} zmm1 = [2,0,3,2,4,4,6,7,9,11,8,11,13,12,13,13] +; CHECK-NEXT: vpermps %zmm0, %zmm1, %zmm0 +; CHECK-NEXT: retq + %res = shufflevector <16 x float> %vec, <16 x float> undef, <16 x i32> + ret <16 x float> %res +} +define <16 x float> @test_masked_16xfloat_perm_mask6(<16 x float> %vec, <16 x float> %vec2) { +; CHECK-LABEL: test_masked_16xfloat_perm_mask6: +; CHECK: # BB#0: +; CHECK-NEXT: vmovaps {{.*#+}} zmm2 = [2,0,3,2,4,4,6,7,9,11,8,11,13,12,13,13] +; CHECK-NEXT: movw $-28239, %ax # imm = 0x91B1 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermps %zmm0, %zmm2, %zmm1 {%k1} +; CHECK-NEXT: vmovaps %zmm1, %zmm0 +; CHECK-NEXT: retq + %shuf = shufflevector <16 x float> %vec, <16 x float> undef, <16 x i32> + %res = select <16 x i1> , <16 x float> %shuf, <16 x float> %vec2 + ret <16 x float> %res +} + +define <16 x float> @test_masked_z_16xfloat_perm_mask6(<16 x float> %vec) { +; CHECK-LABEL: test_masked_z_16xfloat_perm_mask6: +; CHECK: # BB#0: +; CHECK-NEXT: vmovaps {{.*#+}} zmm1 = [2,0,3,2,4,4,6,7,9,11,8,11,13,12,13,13] +; CHECK-NEXT: movw $-28239, %ax # imm = 0x91B1 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermps %zmm0, %zmm1, %zmm0 {%k1} {z} +; CHECK-NEXT: retq + %shuf = shufflevector <16 x float> %vec, <16 x float> undef, <16 x i32> + %res = select <16 x i1> , <16 x float> %shuf, <16 x float> zeroinitializer + ret <16 x float> %res +} +define <16 x float> @test_masked_16xfloat_perm_imm_mask7(<16 x float> %vec, <16 x float> %vec2) { +; CHECK-LABEL: test_masked_16xfloat_perm_imm_mask7: +; CHECK: # BB#0: +; CHECK-NEXT: movw $-32205, %ax # imm = 0x8233 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermilps {{.*#+}} zmm1 {%k1} = zmm0[3,3,0,2,7,7,4,6,11,11,8,10,15,15,12,14] +; CHECK-NEXT: vmovaps %zmm1, %zmm0 +; CHECK-NEXT: retq + %shuf = shufflevector <16 x float> %vec, <16 x float> undef, <16 x i32> + %res = select <16 x i1> , <16 x float> %shuf, <16 x float> %vec2 + ret <16 x float> %res +} + +define <16 x float> @test_masked_z_16xfloat_perm_imm_mask7(<16 x float> %vec) { +; CHECK-LABEL: test_masked_z_16xfloat_perm_imm_mask7: +; CHECK: # BB#0: +; CHECK-NEXT: movw $-32205, %ax # imm = 0x8233 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermilps {{.*#+}} zmm0 {%k1} {z} = zmm0[3,3,0,2,7,7,4,6,11,11,8,10,15,15,12,14] +; CHECK-NEXT: retq + %shuf = shufflevector <16 x float> %vec, <16 x float> undef, <16 x i32> + %res = select <16 x i1> , <16 x float> %shuf, <16 x float> zeroinitializer + ret <16 x float> %res +} +define <16 x float> @test_16xfloat_perm_mem_mask0(<16 x float>* %vp) { +; CHECK-LABEL: test_16xfloat_perm_mem_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: vmovaps {{.*#+}} zmm0 = [3,3,3,0,6,6,6,6,11,10,9,10,12,14,12,12] +; CHECK-NEXT: vpermps (%rdi), %zmm0, %zmm0 +; CHECK-NEXT: retq + %vec = load <16 x float>, <16 x float>* %vp + %res = shufflevector <16 x float> %vec, <16 x float> undef, <16 x i32> + ret <16 x float> %res +} +define <16 x float> @test_masked_16xfloat_perm_mem_mask0(<16 x float>* %vp, <16 x float> %vec2) { +; CHECK-LABEL: test_masked_16xfloat_perm_mem_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: vmovaps {{.*#+}} zmm1 = [3,3,3,0,6,6,6,6,11,10,9,10,12,14,12,12] +; CHECK-NEXT: movw $-22887, %ax # imm = 0xA699 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermps (%rdi), %zmm1, %zmm0 {%k1} +; CHECK-NEXT: retq + %vec = load <16 x float>, <16 x float>* %vp + %shuf = shufflevector <16 x float> %vec, <16 x float> undef, <16 x i32> + %res = select <16 x i1> , <16 x float> %shuf, <16 x float> %vec2 + ret <16 x float> %res +} + +define <16 x float> @test_masked_z_16xfloat_perm_mem_mask0(<16 x float>* %vp) { +; CHECK-LABEL: test_masked_z_16xfloat_perm_mem_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: vmovaps {{.*#+}} zmm0 = [3,3,3,0,6,6,6,6,11,10,9,10,12,14,12,12] +; CHECK-NEXT: movw $-22887, %ax # imm = 0xA699 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermps (%rdi), %zmm0, %zmm0 {%k1} {z} +; CHECK-NEXT: retq + %vec = load <16 x float>, <16 x float>* %vp + %shuf = shufflevector <16 x float> %vec, <16 x float> undef, <16 x i32> + %res = select <16 x i1> , <16 x float> %shuf, <16 x float> zeroinitializer + ret <16 x float> %res +} + +define <16 x float> @test_masked_16xfloat_perm_imm_mem_mask1(<16 x float>* %vp, <16 x float> %vec2) { +; CHECK-LABEL: test_masked_16xfloat_perm_imm_mem_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: movw $22744, %ax # imm = 0x58D8 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermilps {{.*#+}} zmm0 {%k1} = mem[1,3,2,1,5,7,6,5,9,11,10,9,13,15,14,13] +; CHECK-NEXT: retq + %vec = load <16 x float>, <16 x float>* %vp + %shuf = shufflevector <16 x float> %vec, <16 x float> undef, <16 x i32> + %res = select <16 x i1> , <16 x float> %shuf, <16 x float> %vec2 + ret <16 x float> %res +} + +define <16 x float> @test_masked_z_16xfloat_perm_imm_mem_mask1(<16 x float>* %vp) { +; CHECK-LABEL: test_masked_z_16xfloat_perm_imm_mem_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: movw $22744, %ax # imm = 0x58D8 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermilps {{.*#+}} zmm0 {%k1} {z} = mem[1,3,2,1,5,7,6,5,9,11,10,9,13,15,14,13] +; CHECK-NEXT: retq + %vec = load <16 x float>, <16 x float>* %vp + %shuf = shufflevector <16 x float> %vec, <16 x float> undef, <16 x i32> + %res = select <16 x i1> , <16 x float> %shuf, <16 x float> zeroinitializer + ret <16 x float> %res +} + +define <16 x float> @test_masked_16xfloat_perm_mem_mask2(<16 x float>* %vp, <16 x float> %vec2) { +; CHECK-LABEL: test_masked_16xfloat_perm_mem_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: vmovaps {{.*#+}} zmm1 = [2,0,0,3,5,5,6,5,9,8,8,8,14,12,13,13] +; CHECK-NEXT: movw $-8399, %ax # imm = 0xDF31 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermps (%rdi), %zmm1, %zmm0 {%k1} +; CHECK-NEXT: retq + %vec = load <16 x float>, <16 x float>* %vp + %shuf = shufflevector <16 x float> %vec, <16 x float> undef, <16 x i32> + %res = select <16 x i1> , <16 x float> %shuf, <16 x float> %vec2 + ret <16 x float> %res +} + +define <16 x float> @test_masked_z_16xfloat_perm_mem_mask2(<16 x float>* %vp) { +; CHECK-LABEL: test_masked_z_16xfloat_perm_mem_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: vmovaps {{.*#+}} zmm0 = [2,0,0,3,5,5,6,5,9,8,8,8,14,12,13,13] +; CHECK-NEXT: movw $-8399, %ax # imm = 0xDF31 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermps (%rdi), %zmm0, %zmm0 {%k1} {z} +; CHECK-NEXT: retq + %vec = load <16 x float>, <16 x float>* %vp + %shuf = shufflevector <16 x float> %vec, <16 x float> undef, <16 x i32> + %res = select <16 x i1> , <16 x float> %shuf, <16 x float> zeroinitializer + ret <16 x float> %res +} + +define <16 x float> @test_16xfloat_perm_imm_mem_mask3(<16 x float>* %vp) { +; CHECK-LABEL: test_16xfloat_perm_imm_mem_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: vpermilps {{.*#+}} zmm0 = mem[1,0,3,1,5,4,7,5,9,8,11,9,13,12,15,13] +; CHECK-NEXT: retq + %vec = load <16 x float>, <16 x float>* %vp + %res = shufflevector <16 x float> %vec, <16 x float> undef, <16 x i32> + ret <16 x float> %res +} +define <16 x float> @test_masked_16xfloat_perm_imm_mem_mask3(<16 x float>* %vp, <16 x float> %vec2) { +; CHECK-LABEL: test_masked_16xfloat_perm_imm_mem_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: movw $18246, %ax # imm = 0x4746 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermilps {{.*#+}} zmm0 {%k1} = mem[1,0,3,1,5,4,7,5,9,8,11,9,13,12,15,13] +; CHECK-NEXT: retq + %vec = load <16 x float>, <16 x float>* %vp + %shuf = shufflevector <16 x float> %vec, <16 x float> undef, <16 x i32> + %res = select <16 x i1> , <16 x float> %shuf, <16 x float> %vec2 + ret <16 x float> %res +} + +define <16 x float> @test_masked_z_16xfloat_perm_imm_mem_mask3(<16 x float>* %vp) { +; CHECK-LABEL: test_masked_z_16xfloat_perm_imm_mem_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: movw $18246, %ax # imm = 0x4746 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermilps {{.*#+}} zmm0 {%k1} {z} = mem[1,0,3,1,5,4,7,5,9,8,11,9,13,12,15,13] +; CHECK-NEXT: retq + %vec = load <16 x float>, <16 x float>* %vp + %shuf = shufflevector <16 x float> %vec, <16 x float> undef, <16 x i32> + %res = select <16 x i1> , <16 x float> %shuf, <16 x float> zeroinitializer + ret <16 x float> %res +} + +define <16 x float> @test_masked_16xfloat_perm_mem_mask4(<16 x float>* %vp, <16 x float> %vec2) { +; CHECK-LABEL: test_masked_16xfloat_perm_mem_mask4: +; CHECK: # BB#0: +; CHECK-NEXT: vmovaps {{.*#+}} zmm1 = [3,3,1,1,6,5,5,6,11,11,10,9,15,14,12,12] +; CHECK-NEXT: movw $1218, %ax # imm = 0x4C2 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermps (%rdi), %zmm1, %zmm0 {%k1} +; CHECK-NEXT: retq + %vec = load <16 x float>, <16 x float>* %vp + %shuf = shufflevector <16 x float> %vec, <16 x float> undef, <16 x i32> + %res = select <16 x i1> , <16 x float> %shuf, <16 x float> %vec2 + ret <16 x float> %res +} + +define <16 x float> @test_masked_z_16xfloat_perm_mem_mask4(<16 x float>* %vp) { +; CHECK-LABEL: test_masked_z_16xfloat_perm_mem_mask4: +; CHECK: # BB#0: +; CHECK-NEXT: vmovaps {{.*#+}} zmm0 = [3,3,1,1,6,5,5,6,11,11,10,9,15,14,12,12] +; CHECK-NEXT: movw $1218, %ax # imm = 0x4C2 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermps (%rdi), %zmm0, %zmm0 {%k1} {z} +; CHECK-NEXT: retq + %vec = load <16 x float>, <16 x float>* %vp + %shuf = shufflevector <16 x float> %vec, <16 x float> undef, <16 x i32> + %res = select <16 x i1> , <16 x float> %shuf, <16 x float> zeroinitializer + ret <16 x float> %res +} + +define <16 x float> @test_masked_16xfloat_perm_imm_mem_mask5(<16 x float>* %vp, <16 x float> %vec2) { +; CHECK-LABEL: test_masked_16xfloat_perm_imm_mem_mask5: +; CHECK: # BB#0: +; CHECK-NEXT: movw $2665, %ax # imm = 0xA69 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermilps {{.*#+}} zmm0 {%k1} = mem[2,0,0,1,6,4,4,5,10,8,8,9,14,12,12,13] +; CHECK-NEXT: retq + %vec = load <16 x float>, <16 x float>* %vp + %shuf = shufflevector <16 x float> %vec, <16 x float> undef, <16 x i32> + %res = select <16 x i1> , <16 x float> %shuf, <16 x float> %vec2 + ret <16 x float> %res +} + +define <16 x float> @test_masked_z_16xfloat_perm_imm_mem_mask5(<16 x float>* %vp) { +; CHECK-LABEL: test_masked_z_16xfloat_perm_imm_mem_mask5: +; CHECK: # BB#0: +; CHECK-NEXT: movw $2665, %ax # imm = 0xA69 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermilps {{.*#+}} zmm0 {%k1} {z} = mem[2,0,0,1,6,4,4,5,10,8,8,9,14,12,12,13] +; CHECK-NEXT: retq + %vec = load <16 x float>, <16 x float>* %vp + %shuf = shufflevector <16 x float> %vec, <16 x float> undef, <16 x i32> + %res = select <16 x i1> , <16 x float> %shuf, <16 x float> zeroinitializer + ret <16 x float> %res +} + +define <16 x float> @test_16xfloat_perm_mem_mask6(<16 x float>* %vp) { +; CHECK-LABEL: test_16xfloat_perm_mem_mask6: +; CHECK: # BB#0: +; CHECK-NEXT: vmovaps {{.*#+}} zmm0 = [2,1,1,2,6,5,5,7,9,11,9,9,12,15,14,15] +; CHECK-NEXT: vpermps (%rdi), %zmm0, %zmm0 +; CHECK-NEXT: retq + %vec = load <16 x float>, <16 x float>* %vp + %res = shufflevector <16 x float> %vec, <16 x float> undef, <16 x i32> + ret <16 x float> %res +} +define <16 x float> @test_masked_16xfloat_perm_mem_mask6(<16 x float>* %vp, <16 x float> %vec2) { +; CHECK-LABEL: test_masked_16xfloat_perm_mem_mask6: +; CHECK: # BB#0: +; CHECK-NEXT: vmovaps {{.*#+}} zmm1 = [2,1,1,2,6,5,5,7,9,11,9,9,12,15,14,15] +; CHECK-NEXT: movw $-20907, %ax # imm = 0xAE55 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermps (%rdi), %zmm1, %zmm0 {%k1} +; CHECK-NEXT: retq + %vec = load <16 x float>, <16 x float>* %vp + %shuf = shufflevector <16 x float> %vec, <16 x float> undef, <16 x i32> + %res = select <16 x i1> , <16 x float> %shuf, <16 x float> %vec2 + ret <16 x float> %res +} + +define <16 x float> @test_masked_z_16xfloat_perm_mem_mask6(<16 x float>* %vp) { +; CHECK-LABEL: test_masked_z_16xfloat_perm_mem_mask6: +; CHECK: # BB#0: +; CHECK-NEXT: vmovaps {{.*#+}} zmm0 = [2,1,1,2,6,5,5,7,9,11,9,9,12,15,14,15] +; CHECK-NEXT: movw $-20907, %ax # imm = 0xAE55 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermps (%rdi), %zmm0, %zmm0 {%k1} {z} +; CHECK-NEXT: retq + %vec = load <16 x float>, <16 x float>* %vp + %shuf = shufflevector <16 x float> %vec, <16 x float> undef, <16 x i32> + %res = select <16 x i1> , <16 x float> %shuf, <16 x float> zeroinitializer + ret <16 x float> %res +} + +define <16 x float> @test_masked_16xfloat_perm_imm_mem_mask7(<16 x float>* %vp, <16 x float> %vec2) { +; CHECK-LABEL: test_masked_16xfloat_perm_imm_mem_mask7: +; CHECK: # BB#0: +; CHECK-NEXT: movw $-28944, %ax # imm = 0x8EF0 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermilps {{.*#+}} zmm0 {%k1} = mem[1,2,0,1,5,6,4,5,9,10,8,9,13,14,12,13] +; CHECK-NEXT: retq + %vec = load <16 x float>, <16 x float>* %vp + %shuf = shufflevector <16 x float> %vec, <16 x float> undef, <16 x i32> + %res = select <16 x i1> , <16 x float> %shuf, <16 x float> %vec2 + ret <16 x float> %res +} + +define <16 x float> @test_masked_z_16xfloat_perm_imm_mem_mask7(<16 x float>* %vp) { +; CHECK-LABEL: test_masked_z_16xfloat_perm_imm_mem_mask7: +; CHECK: # BB#0: +; CHECK-NEXT: movw $-28944, %ax # imm = 0x8EF0 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermilps {{.*#+}} zmm0 {%k1} {z} = mem[1,2,0,1,5,6,4,5,9,10,8,9,13,14,12,13] +; CHECK-NEXT: retq + %vec = load <16 x float>, <16 x float>* %vp + %shuf = shufflevector <16 x float> %vec, <16 x float> undef, <16 x i32> + %res = select <16 x i1> , <16 x float> %shuf, <16 x float> zeroinitializer + ret <16 x float> %res +} + +define <2 x double> @test_2xdouble_perm_mask0(<2 x double> %vec) { +; CHECK-LABEL: test_2xdouble_perm_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: vpermilpd {{.*#+}} xmm0 = xmm0[1,0] +; CHECK-NEXT: retq + %res = shufflevector <2 x double> %vec, <2 x double> undef, <2 x i32> + ret <2 x double> %res +} +define <2 x double> @test_masked_2xdouble_perm_mask0(<2 x double> %vec, <2 x double> %vec2) { +; CHECK-LABEL: test_masked_2xdouble_perm_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: movb $1, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermilpd {{.*#+}} xmm1 {%k1} = xmm0[1,0] +; CHECK-NEXT: vmovapd %xmm1, %xmm0 +; CHECK-NEXT: retq + %shuf = shufflevector <2 x double> %vec, <2 x double> undef, <2 x i32> + %res = select <2 x i1> , <2 x double> %shuf, <2 x double> %vec2 + ret <2 x double> %res +} + +define <2 x double> @test_masked_z_2xdouble_perm_mask0(<2 x double> %vec) { +; CHECK-LABEL: test_masked_z_2xdouble_perm_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: movb $1, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermilpd {{.*#+}} xmm0 {%k1} {z} = xmm0[1,0] +; CHECK-NEXT: retq + %shuf = shufflevector <2 x double> %vec, <2 x double> undef, <2 x i32> + %res = select <2 x i1> , <2 x double> %shuf, <2 x double> zeroinitializer + ret <2 x double> %res +} +define <2 x double> @test_masked_2xdouble_perm_mask1(<2 x double> %vec, <2 x double> %vec2) { +; CHECK-LABEL: test_masked_2xdouble_perm_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: movb $2, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermilpd {{.*#+}} xmm1 {%k1} = xmm0[1,0] +; CHECK-NEXT: vmovapd %xmm1, %xmm0 +; CHECK-NEXT: retq + %shuf = shufflevector <2 x double> %vec, <2 x double> undef, <2 x i32> + %res = select <2 x i1> , <2 x double> %shuf, <2 x double> %vec2 + ret <2 x double> %res +} + +define <2 x double> @test_masked_z_2xdouble_perm_mask1(<2 x double> %vec) { +; CHECK-LABEL: test_masked_z_2xdouble_perm_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: movb $2, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermilpd {{.*#+}} xmm0 {%k1} {z} = xmm0[1,0] +; CHECK-NEXT: retq + %shuf = shufflevector <2 x double> %vec, <2 x double> undef, <2 x i32> + %res = select <2 x i1> , <2 x double> %shuf, <2 x double> zeroinitializer + ret <2 x double> %res +} +define <2 x double> @test_2xdouble_perm_mem_mask0(<2 x double>* %vp) { +; CHECK-LABEL: test_2xdouble_perm_mem_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: vpermilpd {{.*#+}} xmm0 = mem[1,0] +; CHECK-NEXT: retq + %vec = load <2 x double>, <2 x double>* %vp + %res = shufflevector <2 x double> %vec, <2 x double> undef, <2 x i32> + ret <2 x double> %res +} +define <2 x double> @test_masked_2xdouble_perm_mem_mask0(<2 x double>* %vp, <2 x double> %vec2) { +; CHECK-LABEL: test_masked_2xdouble_perm_mem_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: movb $1, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermilpd {{.*#+}} xmm0 {%k1} = mem[1,0] +; CHECK-NEXT: retq + %vec = load <2 x double>, <2 x double>* %vp + %shuf = shufflevector <2 x double> %vec, <2 x double> undef, <2 x i32> + %res = select <2 x i1> , <2 x double> %shuf, <2 x double> %vec2 + ret <2 x double> %res +} + +define <2 x double> @test_masked_z_2xdouble_perm_mem_mask0(<2 x double>* %vp) { +; CHECK-LABEL: test_masked_z_2xdouble_perm_mem_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: movb $1, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermilpd {{.*#+}} xmm0 {%k1} {z} = mem[1,0] +; CHECK-NEXT: retq + %vec = load <2 x double>, <2 x double>* %vp + %shuf = shufflevector <2 x double> %vec, <2 x double> undef, <2 x i32> + %res = select <2 x i1> , <2 x double> %shuf, <2 x double> zeroinitializer + ret <2 x double> %res +} + +define <2 x double> @test_masked_2xdouble_perm_mem_mask1(<2 x double>* %vp, <2 x double> %vec2) { +; CHECK-LABEL: test_masked_2xdouble_perm_mem_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: movb $2, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermilpd {{.*#+}} xmm0 {%k1} = mem[1,0] +; CHECK-NEXT: retq + %vec = load <2 x double>, <2 x double>* %vp + %shuf = shufflevector <2 x double> %vec, <2 x double> undef, <2 x i32> + %res = select <2 x i1> , <2 x double> %shuf, <2 x double> %vec2 + ret <2 x double> %res +} + +define <2 x double> @test_masked_z_2xdouble_perm_mem_mask1(<2 x double>* %vp) { +; CHECK-LABEL: test_masked_z_2xdouble_perm_mem_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: movb $2, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermilpd {{.*#+}} xmm0 {%k1} {z} = mem[1,0] +; CHECK-NEXT: retq + %vec = load <2 x double>, <2 x double>* %vp + %shuf = shufflevector <2 x double> %vec, <2 x double> undef, <2 x i32> + %res = select <2 x i1> , <2 x double> %shuf, <2 x double> zeroinitializer + ret <2 x double> %res +} + +define <4 x double> @test_4xdouble_perm_mask0(<4 x double> %vec) { +; CHECK-LABEL: test_4xdouble_perm_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: vpermilpd {{.*#+}} ymm0 = ymm0[1,0,2,3] +; CHECK-NEXT: retq + %res = shufflevector <4 x double> %vec, <4 x double> undef, <4 x i32> + ret <4 x double> %res +} +define <4 x double> @test_masked_4xdouble_perm_mask0(<4 x double> %vec, <4 x double> %vec2) { +; CHECK-LABEL: test_masked_4xdouble_perm_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: movb $7, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermilpd {{.*#+}} ymm1 {%k1} = ymm0[1,0,2,3] +; CHECK-NEXT: vmovapd %ymm1, %ymm0 +; CHECK-NEXT: retq + %shuf = shufflevector <4 x double> %vec, <4 x double> undef, <4 x i32> + %res = select <4 x i1> , <4 x double> %shuf, <4 x double> %vec2 + ret <4 x double> %res +} + +define <4 x double> @test_masked_z_4xdouble_perm_mask0(<4 x double> %vec) { +; CHECK-LABEL: test_masked_z_4xdouble_perm_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: movb $7, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermilpd {{.*#+}} ymm0 {%k1} {z} = ymm0[1,0,2,3] +; CHECK-NEXT: retq + %shuf = shufflevector <4 x double> %vec, <4 x double> undef, <4 x i32> + %res = select <4 x i1> , <4 x double> %shuf, <4 x double> zeroinitializer + ret <4 x double> %res +} +define <4 x double> @test_masked_4xdouble_perm_mask1(<4 x double> %vec, <4 x double> %vec2) { +; CHECK-LABEL: test_masked_4xdouble_perm_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: movb $14, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermilpd {{.*#+}} ymm1 {%k1} = ymm0[1,1,2,2] +; CHECK-NEXT: vmovapd %ymm1, %ymm0 +; CHECK-NEXT: retq + %shuf = shufflevector <4 x double> %vec, <4 x double> undef, <4 x i32> + %res = select <4 x i1> , <4 x double> %shuf, <4 x double> %vec2 + ret <4 x double> %res +} + +define <4 x double> @test_masked_z_4xdouble_perm_mask1(<4 x double> %vec) { +; CHECK-LABEL: test_masked_z_4xdouble_perm_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: movb $14, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermilpd {{.*#+}} ymm0 {%k1} {z} = ymm0[1,1,2,2] +; CHECK-NEXT: retq + %shuf = shufflevector <4 x double> %vec, <4 x double> undef, <4 x i32> + %res = select <4 x i1> , <4 x double> %shuf, <4 x double> zeroinitializer + ret <4 x double> %res +} +define <4 x double> @test_masked_4xdouble_perm_mask2(<4 x double> %vec, <4 x double> %vec2) { +; CHECK-LABEL: test_masked_4xdouble_perm_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: movb $9, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermilpd {{.*#+}} ymm1 {%k1} = ymm0[0,1,3,3] +; CHECK-NEXT: vmovapd %ymm1, %ymm0 +; CHECK-NEXT: retq + %shuf = shufflevector <4 x double> %vec, <4 x double> undef, <4 x i32> + %res = select <4 x i1> , <4 x double> %shuf, <4 x double> %vec2 + ret <4 x double> %res +} + +define <4 x double> @test_masked_z_4xdouble_perm_mask2(<4 x double> %vec) { +; CHECK-LABEL: test_masked_z_4xdouble_perm_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: movb $9, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermilpd {{.*#+}} ymm0 {%k1} {z} = ymm0[0,1,3,3] +; CHECK-NEXT: retq + %shuf = shufflevector <4 x double> %vec, <4 x double> undef, <4 x i32> + %res = select <4 x i1> , <4 x double> %shuf, <4 x double> zeroinitializer + ret <4 x double> %res +} +define <4 x double> @test_4xdouble_perm_mask3(<4 x double> %vec) { +; CHECK-LABEL: test_4xdouble_perm_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: vpermilpd {{.*#+}} ymm0 = ymm0[1,1,2,2] +; CHECK-NEXT: retq + %res = shufflevector <4 x double> %vec, <4 x double> undef, <4 x i32> + ret <4 x double> %res +} +define <4 x double> @test_masked_4xdouble_perm_mask3(<4 x double> %vec, <4 x double> %vec2) { +; CHECK-LABEL: test_masked_4xdouble_perm_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: movb $3, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermilpd {{.*#+}} ymm1 {%k1} = ymm0[1,1,2,2] +; CHECK-NEXT: vmovapd %ymm1, %ymm0 +; CHECK-NEXT: retq + %shuf = shufflevector <4 x double> %vec, <4 x double> undef, <4 x i32> + %res = select <4 x i1> , <4 x double> %shuf, <4 x double> %vec2 + ret <4 x double> %res +} + +define <4 x double> @test_masked_z_4xdouble_perm_mask3(<4 x double> %vec) { +; CHECK-LABEL: test_masked_z_4xdouble_perm_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: movb $3, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermilpd {{.*#+}} ymm0 {%k1} {z} = ymm0[1,1,2,2] +; CHECK-NEXT: retq + %shuf = shufflevector <4 x double> %vec, <4 x double> undef, <4 x i32> + %res = select <4 x i1> , <4 x double> %shuf, <4 x double> zeroinitializer + ret <4 x double> %res +} +define <4 x double> @test_4xdouble_perm_mem_mask0(<4 x double>* %vp) { +; CHECK-LABEL: test_4xdouble_perm_mem_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: vpermilpd {{.*#+}} ymm0 = mem[0,1,2,2] +; CHECK-NEXT: retq + %vec = load <4 x double>, <4 x double>* %vp + %res = shufflevector <4 x double> %vec, <4 x double> undef, <4 x i32> + ret <4 x double> %res +} +define <4 x double> @test_masked_4xdouble_perm_mem_mask0(<4 x double>* %vp, <4 x double> %vec2) { +; CHECK-LABEL: test_masked_4xdouble_perm_mem_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: movb $13, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermilpd {{.*#+}} ymm0 {%k1} = mem[0,1,2,2] +; CHECK-NEXT: retq + %vec = load <4 x double>, <4 x double>* %vp + %shuf = shufflevector <4 x double> %vec, <4 x double> undef, <4 x i32> + %res = select <4 x i1> , <4 x double> %shuf, <4 x double> %vec2 + ret <4 x double> %res +} + +define <4 x double> @test_masked_z_4xdouble_perm_mem_mask0(<4 x double>* %vp) { +; CHECK-LABEL: test_masked_z_4xdouble_perm_mem_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: movb $13, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermilpd {{.*#+}} ymm0 {%k1} {z} = mem[0,1,2,2] +; CHECK-NEXT: retq + %vec = load <4 x double>, <4 x double>* %vp + %shuf = shufflevector <4 x double> %vec, <4 x double> undef, <4 x i32> + %res = select <4 x i1> , <4 x double> %shuf, <4 x double> zeroinitializer + ret <4 x double> %res +} + +define <4 x double> @test_masked_4xdouble_perm_mem_mask1(<4 x double>* %vp, <4 x double> %vec2) { +; CHECK-LABEL: test_masked_4xdouble_perm_mem_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: movb $1, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermilpd {{.*#+}} ymm0 {%k1} = mem[0,1,3,3] +; CHECK-NEXT: retq + %vec = load <4 x double>, <4 x double>* %vp + %shuf = shufflevector <4 x double> %vec, <4 x double> undef, <4 x i32> + %res = select <4 x i1> , <4 x double> %shuf, <4 x double> %vec2 + ret <4 x double> %res +} + +define <4 x double> @test_masked_z_4xdouble_perm_mem_mask1(<4 x double>* %vp) { +; CHECK-LABEL: test_masked_z_4xdouble_perm_mem_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: movb $1, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermilpd {{.*#+}} ymm0 {%k1} {z} = mem[0,1,3,3] +; CHECK-NEXT: retq + %vec = load <4 x double>, <4 x double>* %vp + %shuf = shufflevector <4 x double> %vec, <4 x double> undef, <4 x i32> + %res = select <4 x i1> , <4 x double> %shuf, <4 x double> zeroinitializer + ret <4 x double> %res +} + +define <4 x double> @test_masked_4xdouble_perm_mem_mask2(<4 x double>* %vp, <4 x double> %vec2) { +; CHECK-LABEL: test_masked_4xdouble_perm_mem_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: movb $3, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermilpd {{.*#+}} ymm0 {%k1} = mem[1,0,3,3] +; CHECK-NEXT: retq + %vec = load <4 x double>, <4 x double>* %vp + %shuf = shufflevector <4 x double> %vec, <4 x double> undef, <4 x i32> + %res = select <4 x i1> , <4 x double> %shuf, <4 x double> %vec2 + ret <4 x double> %res +} + +define <4 x double> @test_masked_z_4xdouble_perm_mem_mask2(<4 x double>* %vp) { +; CHECK-LABEL: test_masked_z_4xdouble_perm_mem_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: movb $3, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermilpd {{.*#+}} ymm0 {%k1} {z} = mem[1,0,3,3] +; CHECK-NEXT: retq + %vec = load <4 x double>, <4 x double>* %vp + %shuf = shufflevector <4 x double> %vec, <4 x double> undef, <4 x i32> + %res = select <4 x i1> , <4 x double> %shuf, <4 x double> zeroinitializer + ret <4 x double> %res +} + +define <4 x double> @test_4xdouble_perm_mem_mask3(<4 x double>* %vp) { +; CHECK-LABEL: test_4xdouble_perm_mem_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: vpermilpd {{.*#+}} ymm0 = mem[1,0,3,2] +; CHECK-NEXT: retq + %vec = load <4 x double>, <4 x double>* %vp + %res = shufflevector <4 x double> %vec, <4 x double> undef, <4 x i32> + ret <4 x double> %res +} +define <4 x double> @test_masked_4xdouble_perm_mem_mask3(<4 x double>* %vp, <4 x double> %vec2) { +; CHECK-LABEL: test_masked_4xdouble_perm_mem_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: movb $14, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermilpd {{.*#+}} ymm0 {%k1} = mem[1,0,3,2] +; CHECK-NEXT: retq + %vec = load <4 x double>, <4 x double>* %vp + %shuf = shufflevector <4 x double> %vec, <4 x double> undef, <4 x i32> + %res = select <4 x i1> , <4 x double> %shuf, <4 x double> %vec2 + ret <4 x double> %res +} + +define <4 x double> @test_masked_z_4xdouble_perm_mem_mask3(<4 x double>* %vp) { +; CHECK-LABEL: test_masked_z_4xdouble_perm_mem_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: movb $14, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermilpd {{.*#+}} ymm0 {%k1} {z} = mem[1,0,3,2] +; CHECK-NEXT: retq + %vec = load <4 x double>, <4 x double>* %vp + %shuf = shufflevector <4 x double> %vec, <4 x double> undef, <4 x i32> + %res = select <4 x i1> , <4 x double> %shuf, <4 x double> zeroinitializer + ret <4 x double> %res +} + +define <8 x double> @test_8xdouble_perm_mask0(<8 x double> %vec) { +; CHECK-LABEL: test_8xdouble_perm_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: vpermilpd {{.*#+}} zmm0 = zmm0[0,0,3,2,4,5,7,6] +; CHECK-NEXT: retq + %res = shufflevector <8 x double> %vec, <8 x double> undef, <8 x i32> + ret <8 x double> %res +} +define <8 x double> @test_masked_8xdouble_perm_mask0(<8 x double> %vec, <8 x double> %vec2) { +; CHECK-LABEL: test_masked_8xdouble_perm_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: movb $-107, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermilpd {{.*#+}} zmm1 {%k1} = zmm0[0,0,3,2,4,5,7,6] +; CHECK-NEXT: vmovapd %zmm1, %zmm0 +; CHECK-NEXT: retq + %shuf = shufflevector <8 x double> %vec, <8 x double> undef, <8 x i32> + %res = select <8 x i1> , <8 x double> %shuf, <8 x double> %vec2 + ret <8 x double> %res +} + +define <8 x double> @test_masked_z_8xdouble_perm_mask0(<8 x double> %vec) { +; CHECK-LABEL: test_masked_z_8xdouble_perm_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: movb $-107, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermilpd {{.*#+}} zmm0 {%k1} {z} = zmm0[0,0,3,2,4,5,7,6] +; CHECK-NEXT: retq + %shuf = shufflevector <8 x double> %vec, <8 x double> undef, <8 x i32> + %res = select <8 x i1> , <8 x double> %shuf, <8 x double> zeroinitializer + ret <8 x double> %res +} +define <8 x double> @test_masked_8xdouble_perm_mask1(<8 x double> %vec, <8 x double> %vec2) { +; CHECK-LABEL: test_masked_8xdouble_perm_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: movb $-39, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermilpd {{.*#+}} zmm1 {%k1} = zmm0[0,1,2,3,4,4,7,6] +; CHECK-NEXT: vmovapd %zmm1, %zmm0 +; CHECK-NEXT: retq + %shuf = shufflevector <8 x double> %vec, <8 x double> undef, <8 x i32> + %res = select <8 x i1> , <8 x double> %shuf, <8 x double> %vec2 + ret <8 x double> %res +} + +define <8 x double> @test_masked_z_8xdouble_perm_mask1(<8 x double> %vec) { +; CHECK-LABEL: test_masked_z_8xdouble_perm_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: movb $-39, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermilpd {{.*#+}} zmm0 {%k1} {z} = zmm0[0,1,2,3,4,4,7,6] +; CHECK-NEXT: retq + %shuf = shufflevector <8 x double> %vec, <8 x double> undef, <8 x i32> + %res = select <8 x i1> , <8 x double> %shuf, <8 x double> zeroinitializer + ret <8 x double> %res +} +define <8 x double> @test_masked_8xdouble_perm_mask2(<8 x double> %vec, <8 x double> %vec2) { +; CHECK-LABEL: test_masked_8xdouble_perm_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: movb $-53, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermilpd {{.*#+}} zmm1 {%k1} = zmm0[0,0,2,3,5,5,6,7] +; CHECK-NEXT: vmovapd %zmm1, %zmm0 +; CHECK-NEXT: retq + %shuf = shufflevector <8 x double> %vec, <8 x double> undef, <8 x i32> + %res = select <8 x i1> , <8 x double> %shuf, <8 x double> %vec2 + ret <8 x double> %res +} + +define <8 x double> @test_masked_z_8xdouble_perm_mask2(<8 x double> %vec) { +; CHECK-LABEL: test_masked_z_8xdouble_perm_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: movb $-53, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermilpd {{.*#+}} zmm0 {%k1} {z} = zmm0[0,0,2,3,5,5,6,7] +; CHECK-NEXT: retq + %shuf = shufflevector <8 x double> %vec, <8 x double> undef, <8 x i32> + %res = select <8 x i1> , <8 x double> %shuf, <8 x double> zeroinitializer + ret <8 x double> %res +} +define <8 x double> @test_8xdouble_perm_mask3(<8 x double> %vec) { +; CHECK-LABEL: test_8xdouble_perm_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: vpermilpd {{.*#+}} zmm0 = zmm0[0,1,2,2,4,4,6,7] +; CHECK-NEXT: retq + %res = shufflevector <8 x double> %vec, <8 x double> undef, <8 x i32> + ret <8 x double> %res +} +define <8 x double> @test_masked_8xdouble_perm_mask3(<8 x double> %vec, <8 x double> %vec2) { +; CHECK-LABEL: test_masked_8xdouble_perm_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: movb $-89, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermilpd {{.*#+}} zmm1 {%k1} = zmm0[0,1,2,2,4,4,6,7] +; CHECK-NEXT: vmovapd %zmm1, %zmm0 +; CHECK-NEXT: retq + %shuf = shufflevector <8 x double> %vec, <8 x double> undef, <8 x i32> + %res = select <8 x i1> , <8 x double> %shuf, <8 x double> %vec2 + ret <8 x double> %res +} + +define <8 x double> @test_masked_z_8xdouble_perm_mask3(<8 x double> %vec) { +; CHECK-LABEL: test_masked_z_8xdouble_perm_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: movb $-89, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermilpd {{.*#+}} zmm0 {%k1} {z} = zmm0[0,1,2,2,4,4,6,7] +; CHECK-NEXT: retq + %shuf = shufflevector <8 x double> %vec, <8 x double> undef, <8 x i32> + %res = select <8 x i1> , <8 x double> %shuf, <8 x double> zeroinitializer + ret <8 x double> %res +} +define <8 x double> @test_8xdouble_perm_mem_mask0(<8 x double>* %vp) { +; CHECK-LABEL: test_8xdouble_perm_mem_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: vpermilpd {{.*#+}} zmm0 = mem[0,1,2,3,5,4,7,6] +; CHECK-NEXT: retq + %vec = load <8 x double>, <8 x double>* %vp + %res = shufflevector <8 x double> %vec, <8 x double> undef, <8 x i32> + ret <8 x double> %res +} +define <8 x double> @test_masked_8xdouble_perm_mem_mask0(<8 x double>* %vp, <8 x double> %vec2) { +; CHECK-LABEL: test_masked_8xdouble_perm_mem_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: movb $-95, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermilpd {{.*#+}} zmm0 {%k1} = mem[0,1,2,3,5,4,7,6] +; CHECK-NEXT: retq + %vec = load <8 x double>, <8 x double>* %vp + %shuf = shufflevector <8 x double> %vec, <8 x double> undef, <8 x i32> + %res = select <8 x i1> , <8 x double> %shuf, <8 x double> %vec2 + ret <8 x double> %res +} + +define <8 x double> @test_masked_z_8xdouble_perm_mem_mask0(<8 x double>* %vp) { +; CHECK-LABEL: test_masked_z_8xdouble_perm_mem_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: movb $-95, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermilpd {{.*#+}} zmm0 {%k1} {z} = mem[0,1,2,3,5,4,7,6] +; CHECK-NEXT: retq + %vec = load <8 x double>, <8 x double>* %vp + %shuf = shufflevector <8 x double> %vec, <8 x double> undef, <8 x i32> + %res = select <8 x i1> , <8 x double> %shuf, <8 x double> zeroinitializer + ret <8 x double> %res +} + +define <8 x double> @test_masked_8xdouble_perm_mem_mask1(<8 x double>* %vp, <8 x double> %vec2) { +; CHECK-LABEL: test_masked_8xdouble_perm_mem_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: movb $27, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermilpd {{.*#+}} zmm0 {%k1} = mem[0,1,3,3,4,5,7,7] +; CHECK-NEXT: retq + %vec = load <8 x double>, <8 x double>* %vp + %shuf = shufflevector <8 x double> %vec, <8 x double> undef, <8 x i32> + %res = select <8 x i1> , <8 x double> %shuf, <8 x double> %vec2 + ret <8 x double> %res +} + +define <8 x double> @test_masked_z_8xdouble_perm_mem_mask1(<8 x double>* %vp) { +; CHECK-LABEL: test_masked_z_8xdouble_perm_mem_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: movb $27, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermilpd {{.*#+}} zmm0 {%k1} {z} = mem[0,1,3,3,4,5,7,7] +; CHECK-NEXT: retq + %vec = load <8 x double>, <8 x double>* %vp + %shuf = shufflevector <8 x double> %vec, <8 x double> undef, <8 x i32> + %res = select <8 x i1> , <8 x double> %shuf, <8 x double> zeroinitializer + ret <8 x double> %res +} + +define <8 x double> @test_masked_8xdouble_perm_mem_mask2(<8 x double>* %vp, <8 x double> %vec2) { +; CHECK-LABEL: test_masked_8xdouble_perm_mem_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: movb $-116, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermilpd {{.*#+}} zmm0 {%k1} = mem[1,1,3,3,5,4,7,6] +; CHECK-NEXT: retq + %vec = load <8 x double>, <8 x double>* %vp + %shuf = shufflevector <8 x double> %vec, <8 x double> undef, <8 x i32> + %res = select <8 x i1> , <8 x double> %shuf, <8 x double> %vec2 + ret <8 x double> %res +} + +define <8 x double> @test_masked_z_8xdouble_perm_mem_mask2(<8 x double>* %vp) { +; CHECK-LABEL: test_masked_z_8xdouble_perm_mem_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: movb $-116, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermilpd {{.*#+}} zmm0 {%k1} {z} = mem[1,1,3,3,5,4,7,6] +; CHECK-NEXT: retq + %vec = load <8 x double>, <8 x double>* %vp + %shuf = shufflevector <8 x double> %vec, <8 x double> undef, <8 x i32> + %res = select <8 x i1> , <8 x double> %shuf, <8 x double> zeroinitializer + ret <8 x double> %res +} + +define <8 x double> @test_8xdouble_perm_mem_mask3(<8 x double>* %vp) { +; CHECK-LABEL: test_8xdouble_perm_mem_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: vpermilpd {{.*#+}} zmm0 = mem[1,0,3,2,4,5,6,7] +; CHECK-NEXT: retq + %vec = load <8 x double>, <8 x double>* %vp + %res = shufflevector <8 x double> %vec, <8 x double> undef, <8 x i32> + ret <8 x double> %res +} +define <8 x double> @test_masked_8xdouble_perm_mem_mask3(<8 x double>* %vp, <8 x double> %vec2) { +; CHECK-LABEL: test_masked_8xdouble_perm_mem_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: movb $89, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermilpd {{.*#+}} zmm0 {%k1} = mem[1,0,3,2,4,5,6,7] +; CHECK-NEXT: retq + %vec = load <8 x double>, <8 x double>* %vp + %shuf = shufflevector <8 x double> %vec, <8 x double> undef, <8 x i32> + %res = select <8 x i1> , <8 x double> %shuf, <8 x double> %vec2 + ret <8 x double> %res +} + +define <8 x double> @test_masked_z_8xdouble_perm_mem_mask3(<8 x double>* %vp) { +; CHECK-LABEL: test_masked_z_8xdouble_perm_mem_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: movb $89, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermilpd {{.*#+}} zmm0 {%k1} {z} = mem[1,0,3,2,4,5,6,7] +; CHECK-NEXT: retq + %vec = load <8 x double>, <8 x double>* %vp + %shuf = shufflevector <8 x double> %vec, <8 x double> undef, <8 x i32> + %res = select <8 x i1> , <8 x double> %shuf, <8 x double> zeroinitializer + ret <8 x double> %res +} + diff --git a/llvm/test/CodeGen/X86/avx512-shuffles/partial_permute.ll b/llvm/test/CodeGen/X86/avx512-shuffles/partial_permute.ll new file mode 100644 index 000000000000..01e795a3c4f9 --- /dev/null +++ b/llvm/test/CodeGen/X86/avx512-shuffles/partial_permute.ll @@ -0,0 +1,4556 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mcpu=skx %s -o - | FileCheck %s + +; FIXME: All cases here should be fixed by PR34380 + +define <8 x i16> @test_16xi16_to_8xi16_perm_mask0(<16 x i16> %vec) { +; CHECK-LABEL: test_16xi16_to_8xi16_perm_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: vpshufb {{.*#+}} xmm1 = xmm0[8,9,12,13,12,13,8,9,14,15,10,11,12,13,14,15] +; CHECK-NEXT: vextracti128 $1, %ymm0, %xmm0 +; CHECK-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,0,3] +; CHECK-NEXT: vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5,6,4] +; CHECK-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3,4],xmm0[5,6,7] +; CHECK-NEXT: vzeroupper +; CHECK-NEXT: retq + %res = shufflevector <16 x i16> %vec, <16 x i16> undef, <8 x i32> + ret <8 x i16> %res +} +define <8 x i16> @test_masked_16xi16_to_8xi16_perm_mask0(<16 x i16> %vec, <8 x i16> %vec2) { +; CHECK-LABEL: test_masked_16xi16_to_8xi16_perm_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: vpshufb {{.*#+}} xmm2 = xmm0[8,9,12,13,12,13,8,9,14,15,10,11,12,13,14,15] +; CHECK-NEXT: vextracti128 $1, %ymm0, %xmm0 +; CHECK-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,0,3] +; CHECK-NEXT: vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5,6,4] +; CHECK-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],xmm2[1],xmm0[2],xmm2[3,4],xmm0[5,6,7] +; CHECK-NEXT: movb $-41, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpblendmw %xmm0, %xmm1, %xmm0 {%k1} +; CHECK-NEXT: vzeroupper +; CHECK-NEXT: retq + %shuf = shufflevector <16 x i16> %vec, <16 x i16> undef, <8 x i32> + %res = select <8 x i1> , <8 x i16> %shuf, <8 x i16> %vec2 + ret <8 x i16> %res +} + +define <8 x i16> @test_masked_z_16xi16_to_8xi16_perm_mask0(<16 x i16> %vec) { +; CHECK-LABEL: test_masked_z_16xi16_to_8xi16_perm_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: vpshufb {{.*#+}} xmm1 = xmm0[8,9,12,13,12,13,8,9,14,15,10,11,12,13,14,15] +; CHECK-NEXT: vextracti128 $1, %ymm0, %xmm0 +; CHECK-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,0,3] +; CHECK-NEXT: vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5,6,4] +; CHECK-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2],xmm1[3,4],xmm0[5,6,7] +; CHECK-NEXT: movb $-41, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vmovdqu16 %xmm0, %xmm0 {%k1} {z} +; CHECK-NEXT: vzeroupper +; CHECK-NEXT: retq + %shuf = shufflevector <16 x i16> %vec, <16 x i16> undef, <8 x i32> + %res = select <8 x i1> , <8 x i16> %shuf, <8 x i16> zeroinitializer + ret <8 x i16> %res +} +define <8 x i16> @test_masked_16xi16_to_8xi16_perm_mask1(<16 x i16> %vec, <8 x i16> %vec2) { +; CHECK-LABEL: test_masked_16xi16_to_8xi16_perm_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: vextracti128 $1, %ymm0, %xmm2 +; CHECK-NEXT: vpshufb {{.*#+}} xmm2 = xmm2[0,1,8,9,2,3,10,11,12,13,14,15,8,9,12,13] +; CHECK-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,1,2,3] +; CHECK-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[0,1,2,0,4,5,6,7] +; CHECK-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],xmm2[1,2],xmm0[3],xmm2[4,5,6,7] +; CHECK-NEXT: movb $-63, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpblendmw %xmm0, %xmm1, %xmm0 {%k1} +; CHECK-NEXT: vzeroupper +; CHECK-NEXT: retq + %shuf = shufflevector <16 x i16> %vec, <16 x i16> undef, <8 x i32> + %res = select <8 x i1> , <8 x i16> %shuf, <8 x i16> %vec2 + ret <8 x i16> %res +} + +define <8 x i16> @test_masked_z_16xi16_to_8xi16_perm_mask1(<16 x i16> %vec) { +; CHECK-LABEL: test_masked_z_16xi16_to_8xi16_perm_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: vextracti128 $1, %ymm0, %xmm1 +; CHECK-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[0,1,8,9,2,3,10,11,12,13,14,15,8,9,12,13] +; CHECK-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,1,2,3] +; CHECK-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[0,1,2,0,4,5,6,7] +; CHECK-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],xmm1[1,2],xmm0[3],xmm1[4,5,6,7] +; CHECK-NEXT: movb $-63, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vmovdqu16 %xmm0, %xmm0 {%k1} {z} +; CHECK-NEXT: vzeroupper +; CHECK-NEXT: retq + %shuf = shufflevector <16 x i16> %vec, <16 x i16> undef, <8 x i32> + %res = select <8 x i1> , <8 x i16> %shuf, <8 x i16> zeroinitializer + ret <8 x i16> %res +} +define <8 x i16> @test_masked_16xi16_to_8xi16_perm_mask2(<16 x i16> %vec, <8 x i16> %vec2) { +; CHECK-LABEL: test_masked_16xi16_to_8xi16_perm_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: vextracti128 $1, %ymm0, %xmm2 +; CHECK-NEXT: vpshufb {{.*#+}} xmm2 = xmm2[12,13,6,7,12,13,4,5,0,1,2,3,12,13,2,3] +; CHECK-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,1,0,3] +; CHECK-NEXT: vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,7,5,6,7] +; CHECK-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],xmm2[1,2,3],xmm0[4,5,6],xmm2[7] +; CHECK-NEXT: movb $107, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpblendmw %xmm0, %xmm1, %xmm0 {%k1} +; CHECK-NEXT: vzeroupper +; CHECK-NEXT: retq + %shuf = shufflevector <16 x i16> %vec, <16 x i16> undef, <8 x i32> + %res = select <8 x i1> , <8 x i16> %shuf, <8 x i16> %vec2 + ret <8 x i16> %res +} + +define <8 x i16> @test_masked_z_16xi16_to_8xi16_perm_mask2(<16 x i16> %vec) { +; CHECK-LABEL: test_masked_z_16xi16_to_8xi16_perm_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: vextracti128 $1, %ymm0, %xmm1 +; CHECK-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[12,13,6,7,12,13,4,5,0,1,2,3,12,13,2,3] +; CHECK-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,1,0,3] +; CHECK-NEXT: vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,7,5,6,7] +; CHECK-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3],xmm0[4,5,6],xmm1[7] +; CHECK-NEXT: movb $107, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vmovdqu16 %xmm0, %xmm0 {%k1} {z} +; CHECK-NEXT: vzeroupper +; CHECK-NEXT: retq + %shuf = shufflevector <16 x i16> %vec, <16 x i16> undef, <8 x i32> + %res = select <8 x i1> , <8 x i16> %shuf, <8 x i16> zeroinitializer + ret <8 x i16> %res +} +define <8 x i16> @test_16xi16_to_8xi16_perm_mask3(<16 x i16> %vec) { +; CHECK-LABEL: test_16xi16_to_8xi16_perm_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: vpshufb {{.*#+}} xmm1 = xmm0[0,1,2,3,14,15,14,15,8,9,10,11,0,1,0,1] +; CHECK-NEXT: vextracti128 $1, %ymm0, %xmm0 +; CHECK-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[12,13,14,15,12,13,10,11,8,9,8,9,0,1,2,3] +; CHECK-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2],xmm0[3],xmm1[4],xmm0[5,6],xmm1[7] +; CHECK-NEXT: vzeroupper +; CHECK-NEXT: retq + %res = shufflevector <16 x i16> %vec, <16 x i16> undef, <8 x i32> + ret <8 x i16> %res +} +define <8 x i16> @test_masked_16xi16_to_8xi16_perm_mask3(<16 x i16> %vec, <8 x i16> %vec2) { +; CHECK-LABEL: test_masked_16xi16_to_8xi16_perm_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: vpshufb {{.*#+}} xmm2 = xmm0[0,1,2,3,14,15,14,15,8,9,10,11,0,1,0,1] +; CHECK-NEXT: vextracti128 $1, %ymm0, %xmm0 +; CHECK-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[12,13,14,15,12,13,10,11,8,9,8,9,0,1,2,3] +; CHECK-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2],xmm0[3],xmm2[4],xmm0[5,6],xmm2[7] +; CHECK-NEXT: movb $66, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpblendmw %xmm0, %xmm1, %xmm0 {%k1} +; CHECK-NEXT: vzeroupper +; CHECK-NEXT: retq + %shuf = shufflevector <16 x i16> %vec, <16 x i16> undef, <8 x i32> + %res = select <8 x i1> , <8 x i16> %shuf, <8 x i16> %vec2 + ret <8 x i16> %res +} + +define <8 x i16> @test_masked_z_16xi16_to_8xi16_perm_mask3(<16 x i16> %vec) { +; CHECK-LABEL: test_masked_z_16xi16_to_8xi16_perm_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: vpshufb {{.*#+}} xmm1 = xmm0[0,1,2,3,14,15,14,15,8,9,10,11,0,1,0,1] +; CHECK-NEXT: vextracti128 $1, %ymm0, %xmm0 +; CHECK-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[12,13,14,15,12,13,10,11,8,9,8,9,0,1,2,3] +; CHECK-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2],xmm0[3],xmm1[4],xmm0[5,6],xmm1[7] +; CHECK-NEXT: movb $66, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vmovdqu16 %xmm0, %xmm0 {%k1} {z} +; CHECK-NEXT: vzeroupper +; CHECK-NEXT: retq + %shuf = shufflevector <16 x i16> %vec, <16 x i16> undef, <8 x i32> + %res = select <8 x i1> , <8 x i16> %shuf, <8 x i16> zeroinitializer + ret <8 x i16> %res +} +define <8 x i16> @test_16xi16_to_8xi16_perm_mem_mask0(<16 x i16>* %vp) { +; CHECK-LABEL: test_16xi16_to_8xi16_perm_mem_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: vmovdqa (%rdi), %ymm0 +; CHECK-NEXT: vpshufb {{.*#+}} xmm1 = xmm0[0,1,14,15,12,13,6,7,10,11,10,11,6,7,6,7] +; CHECK-NEXT: vextracti128 $1, %ymm0, %xmm0 +; CHECK-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,1,2,0] +; CHECK-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[0,1,1,3,4,5,6,7] +; CHECK-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0,1],xmm0[2],xmm1[3,4],xmm0[5],xmm1[6],xmm0[7] +; CHECK-NEXT: vzeroupper +; CHECK-NEXT: retq + %vec = load <16 x i16>, <16 x i16>* %vp + %res = shufflevector <16 x i16> %vec, <16 x i16> undef, <8 x i32> + ret <8 x i16> %res +} +define <8 x i16> @test_masked_16xi16_to_8xi16_perm_mem_mask0(<16 x i16>* %vp, <8 x i16> %vec2) { +; CHECK-LABEL: test_masked_16xi16_to_8xi16_perm_mem_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: vmovdqa (%rdi), %ymm1 +; CHECK-NEXT: vpshufb {{.*#+}} xmm2 = xmm1[0,1,14,15,12,13,6,7,10,11,10,11,6,7,6,7] +; CHECK-NEXT: vextracti128 $1, %ymm1, %xmm1 +; CHECK-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[2,1,2,0] +; CHECK-NEXT: vpshuflw {{.*#+}} xmm1 = xmm1[0,1,1,3,4,5,6,7] +; CHECK-NEXT: vpblendw {{.*#+}} xmm1 = xmm2[0,1],xmm1[2],xmm2[3,4],xmm1[5],xmm2[6],xmm1[7] +; CHECK-NEXT: movb $-73, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vmovdqu16 %xmm1, %xmm0 {%k1} +; CHECK-NEXT: vzeroupper +; CHECK-NEXT: retq + %vec = load <16 x i16>, <16 x i16>* %vp + %shuf = shufflevector <16 x i16> %vec, <16 x i16> undef, <8 x i32> + %res = select <8 x i1> , <8 x i16> %shuf, <8 x i16> %vec2 + ret <8 x i16> %res +} + +define <8 x i16> @test_masked_z_16xi16_to_8xi16_perm_mem_mask0(<16 x i16>* %vp) { +; CHECK-LABEL: test_masked_z_16xi16_to_8xi16_perm_mem_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: vmovdqa (%rdi), %ymm0 +; CHECK-NEXT: vpshufb {{.*#+}} xmm1 = xmm0[0,1,14,15,12,13,6,7,10,11,10,11,6,7,6,7] +; CHECK-NEXT: vextracti128 $1, %ymm0, %xmm0 +; CHECK-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,1,2,0] +; CHECK-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[0,1,1,3,4,5,6,7] +; CHECK-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0,1],xmm0[2],xmm1[3,4],xmm0[5],xmm1[6],xmm0[7] +; CHECK-NEXT: movb $-73, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vmovdqu16 %xmm0, %xmm0 {%k1} {z} +; CHECK-NEXT: vzeroupper +; CHECK-NEXT: retq + %vec = load <16 x i16>, <16 x i16>* %vp + %shuf = shufflevector <16 x i16> %vec, <16 x i16> undef, <8 x i32> + %res = select <8 x i1> , <8 x i16> %shuf, <8 x i16> zeroinitializer + ret <8 x i16> %res +} + +define <8 x i16> @test_masked_16xi16_to_8xi16_perm_mem_mask1(<16 x i16>* %vp, <8 x i16> %vec2) { +; CHECK-LABEL: test_masked_16xi16_to_8xi16_perm_mem_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: vmovdqa (%rdi), %ymm1 +; CHECK-NEXT: vextracti128 $1, %ymm1, %xmm2 +; CHECK-NEXT: vpshufb {{.*#+}} xmm2 = xmm2[8,9,14,15,8,9,14,15,0,1,2,3,0,1,12,13] +; CHECK-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[6,7,14,15,4,5,14,15,2,3,10,11,0,1,2,3] +; CHECK-NEXT: vpblendw {{.*#+}} xmm1 = xmm1[0],xmm2[1,2],xmm1[3,4,5],xmm2[6,7] +; CHECK-NEXT: movb $102, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vmovdqu16 %xmm1, %xmm0 {%k1} +; CHECK-NEXT: vzeroupper +; CHECK-NEXT: retq + %vec = load <16 x i16>, <16 x i16>* %vp + %shuf = shufflevector <16 x i16> %vec, <16 x i16> undef, <8 x i32> + %res = select <8 x i1> , <8 x i16> %shuf, <8 x i16> %vec2 + ret <8 x i16> %res +} + +define <8 x i16> @test_masked_z_16xi16_to_8xi16_perm_mem_mask1(<16 x i16>* %vp) { +; CHECK-LABEL: test_masked_z_16xi16_to_8xi16_perm_mem_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: vmovdqa (%rdi), %ymm0 +; CHECK-NEXT: vextracti128 $1, %ymm0, %xmm1 +; CHECK-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[8,9,14,15,8,9,14,15,0,1,2,3,0,1,12,13] +; CHECK-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[6,7,14,15,4,5,14,15,2,3,10,11,0,1,2,3] +; CHECK-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],xmm1[1,2],xmm0[3,4,5],xmm1[6,7] +; CHECK-NEXT: movb $102, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vmovdqu16 %xmm0, %xmm0 {%k1} {z} +; CHECK-NEXT: vzeroupper +; CHECK-NEXT: retq + %vec = load <16 x i16>, <16 x i16>* %vp + %shuf = shufflevector <16 x i16> %vec, <16 x i16> undef, <8 x i32> + %res = select <8 x i1> , <8 x i16> %shuf, <8 x i16> zeroinitializer + ret <8 x i16> %res +} + +define <8 x i16> @test_masked_16xi16_to_8xi16_perm_mem_mask2(<16 x i16>* %vp, <8 x i16> %vec2) { +; CHECK-LABEL: test_masked_16xi16_to_8xi16_perm_mem_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: vmovdqa (%rdi), %ymm1 +; CHECK-NEXT: vpsrld $16, %xmm1, %xmm2 +; CHECK-NEXT: vextracti128 $1, %ymm1, %xmm1 +; CHECK-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[0,1,0,1,6,7,0,1,10,11,0,1,14,15,2,3] +; CHECK-NEXT: vpblendw {{.*#+}} xmm1 = xmm2[0],xmm1[1,2,3,4,5,6,7] +; CHECK-NEXT: movb $-46, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vmovdqu16 %xmm1, %xmm0 {%k1} +; CHECK-NEXT: vzeroupper +; CHECK-NEXT: retq + %vec = load <16 x i16>, <16 x i16>* %vp + %shuf = shufflevector <16 x i16> %vec, <16 x i16> undef, <8 x i32> + %res = select <8 x i1> , <8 x i16> %shuf, <8 x i16> %vec2 + ret <8 x i16> %res +} + +define <8 x i16> @test_masked_z_16xi16_to_8xi16_perm_mem_mask2(<16 x i16>* %vp) { +; CHECK-LABEL: test_masked_z_16xi16_to_8xi16_perm_mem_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: vmovdqa (%rdi), %ymm0 +; CHECK-NEXT: vpsrld $16, %xmm0, %xmm1 +; CHECK-NEXT: vextracti128 $1, %ymm0, %xmm0 +; CHECK-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,0,1,6,7,0,1,10,11,0,1,14,15,2,3] +; CHECK-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3,4,5,6,7] +; CHECK-NEXT: movb $-46, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vmovdqu16 %xmm0, %xmm0 {%k1} {z} +; CHECK-NEXT: vzeroupper +; CHECK-NEXT: retq + %vec = load <16 x i16>, <16 x i16>* %vp + %shuf = shufflevector <16 x i16> %vec, <16 x i16> undef, <8 x i32> + %res = select <8 x i1> , <8 x i16> %shuf, <8 x i16> zeroinitializer + ret <8 x i16> %res +} + +define <8 x i16> @test_16xi16_to_8xi16_perm_mem_mask3(<16 x i16>* %vp) { +; CHECK-LABEL: test_16xi16_to_8xi16_perm_mem_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: vmovdqa (%rdi), %ymm0 +; CHECK-NEXT: vextracti128 $1, %ymm0, %xmm1 +; CHECK-NEXT: vpblendd {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3] +; CHECK-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[2,3,14,15,2,3,12,13,2,3,8,9,6,7,4,5] +; CHECK-NEXT: vzeroupper +; CHECK-NEXT: retq + %vec = load <16 x i16>, <16 x i16>* %vp + %res = shufflevector <16 x i16> %vec, <16 x i16> undef, <8 x i32> + ret <8 x i16> %res +} +define <8 x i16> @test_masked_16xi16_to_8xi16_perm_mem_mask3(<16 x i16>* %vp, <8 x i16> %vec2) { +; CHECK-LABEL: test_masked_16xi16_to_8xi16_perm_mem_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: vmovdqa (%rdi), %ymm1 +; CHECK-NEXT: vextracti128 $1, %ymm1, %xmm2 +; CHECK-NEXT: vpblendd {{.*#+}} xmm1 = xmm2[0],xmm1[1,2,3] +; CHECK-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[2,3,14,15,2,3,12,13,2,3,8,9,6,7,4,5] +; CHECK-NEXT: movb $-86, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vmovdqu16 %xmm1, %xmm0 {%k1} +; CHECK-NEXT: vzeroupper +; CHECK-NEXT: retq + %vec = load <16 x i16>, <16 x i16>* %vp + %shuf = shufflevector <16 x i16> %vec, <16 x i16> undef, <8 x i32> + %res = select <8 x i1> , <8 x i16> %shuf, <8 x i16> %vec2 + ret <8 x i16> %res +} + +define <8 x i16> @test_masked_z_16xi16_to_8xi16_perm_mem_mask3(<16 x i16>* %vp) { +; CHECK-LABEL: test_masked_z_16xi16_to_8xi16_perm_mem_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: vmovdqa (%rdi), %ymm0 +; CHECK-NEXT: vextracti128 $1, %ymm0, %xmm1 +; CHECK-NEXT: vpblendd {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3] +; CHECK-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[2,3,14,15,2,3,12,13,2,3,8,9,6,7,4,5] +; CHECK-NEXT: movb $-86, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vmovdqu16 %xmm0, %xmm0 {%k1} {z} +; CHECK-NEXT: vzeroupper +; CHECK-NEXT: retq + %vec = load <16 x i16>, <16 x i16>* %vp + %shuf = shufflevector <16 x i16> %vec, <16 x i16> undef, <8 x i32> + %res = select <8 x i1> , <8 x i16> %shuf, <8 x i16> zeroinitializer + ret <8 x i16> %res +} + +define <16 x i16> @test_32xi16_to_16xi16_perm_mask0(<32 x i16> %vec) { +; CHECK-LABEL: test_32xi16_to_16xi16_perm_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: vextracti64x4 $1, %zmm0, %ymm2 +; CHECK-NEXT: vmovdqa {{.*#+}} ymm1 = [8,12,13,10,12,13,1,28,6,24,9,11,12,2,14,2] +; CHECK-NEXT: vpermi2w %ymm0, %ymm2, %ymm1 +; CHECK-NEXT: vmovdqa %ymm1, %ymm0 +; CHECK-NEXT: retq + %res = shufflevector <32 x i16> %vec, <32 x i16> undef, <16 x i32> + ret <16 x i16> %res +} +define <16 x i16> @test_masked_32xi16_to_16xi16_perm_mask0(<32 x i16> %vec, <16 x i16> %vec2) { +; CHECK-LABEL: test_masked_32xi16_to_16xi16_perm_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: vextracti64x4 $1, %zmm0, %ymm2 +; CHECK-NEXT: vmovdqa {{.*#+}} ymm3 = [8,12,13,10,12,13,1,28,6,24,9,11,12,2,14,2] +; CHECK-NEXT: vpermi2w %ymm0, %ymm2, %ymm3 +; CHECK-NEXT: movw $-25378, %ax # imm = 0x9CDE +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpblendmw %ymm3, %ymm1, %ymm0 {%k1} +; CHECK-NEXT: retq + %shuf = shufflevector <32 x i16> %vec, <32 x i16> undef, <16 x i32> + %res = select <16 x i1> , <16 x i16> %shuf, <16 x i16> %vec2 + ret <16 x i16> %res +} + +define <16 x i16> @test_masked_z_32xi16_to_16xi16_perm_mask0(<32 x i16> %vec) { +; CHECK-LABEL: test_masked_z_32xi16_to_16xi16_perm_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: vextracti64x4 $1, %zmm0, %ymm2 +; CHECK-NEXT: vmovdqa {{.*#+}} ymm1 = [8,12,13,10,12,13,1,28,6,24,9,11,12,2,14,2] +; CHECK-NEXT: movw $-25378, %ax # imm = 0x9CDE +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermi2w %ymm0, %ymm2, %ymm1 {%k1} {z} +; CHECK-NEXT: vmovdqa %ymm1, %ymm0 +; CHECK-NEXT: retq + %shuf = shufflevector <32 x i16> %vec, <32 x i16> undef, <16 x i32> + %res = select <16 x i1> , <16 x i16> %shuf, <16 x i16> zeroinitializer + ret <16 x i16> %res +} +define <16 x i16> @test_masked_32xi16_to_16xi16_perm_mask1(<32 x i16> %vec, <16 x i16> %vec2) { +; CHECK-LABEL: test_masked_32xi16_to_16xi16_perm_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: vextracti64x4 $1, %zmm0, %ymm2 +; CHECK-NEXT: vmovdqa {{.*#+}} ymm3 = [30,5,15,13,9,18,3,31,4,11,23,7,19,23,9,26] +; CHECK-NEXT: vpermi2w %ymm0, %ymm2, %ymm3 +; CHECK-NEXT: movw $-22502, %ax # imm = 0xA81A +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpblendmw %ymm3, %ymm1, %ymm0 {%k1} +; CHECK-NEXT: retq + %shuf = shufflevector <32 x i16> %vec, <32 x i16> undef, <16 x i32> + %res = select <16 x i1> , <16 x i16> %shuf, <16 x i16> %vec2 + ret <16 x i16> %res +} + +define <16 x i16> @test_masked_z_32xi16_to_16xi16_perm_mask1(<32 x i16> %vec) { +; CHECK-LABEL: test_masked_z_32xi16_to_16xi16_perm_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: vextracti64x4 $1, %zmm0, %ymm2 +; CHECK-NEXT: vmovdqa {{.*#+}} ymm1 = [30,5,15,13,9,18,3,31,4,11,23,7,19,23,9,26] +; CHECK-NEXT: movw $-22502, %ax # imm = 0xA81A +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermi2w %ymm0, %ymm2, %ymm1 {%k1} {z} +; CHECK-NEXT: vmovdqa %ymm1, %ymm0 +; CHECK-NEXT: retq + %shuf = shufflevector <32 x i16> %vec, <32 x i16> undef, <16 x i32> + %res = select <16 x i1> , <16 x i16> %shuf, <16 x i16> zeroinitializer + ret <16 x i16> %res +} +define <16 x i16> @test_masked_32xi16_to_16xi16_perm_mask2(<32 x i16> %vec, <16 x i16> %vec2) { +; CHECK-LABEL: test_masked_32xi16_to_16xi16_perm_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: vextracti64x4 $1, %zmm0, %ymm2 +; CHECK-NEXT: vmovdqa {{.*#+}} ymm3 = [10,19,20,6,17,2,13,1,5,16,4,3,2,28,27,15] +; CHECK-NEXT: vpermi2w %ymm0, %ymm2, %ymm3 +; CHECK-NEXT: movw $31229, %ax # imm = 0x79FD +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpblendmw %ymm3, %ymm1, %ymm0 {%k1} +; CHECK-NEXT: retq + %shuf = shufflevector <32 x i16> %vec, <32 x i16> undef, <16 x i32> + %res = select <16 x i1> , <16 x i16> %shuf, <16 x i16> %vec2 + ret <16 x i16> %res +} + +define <16 x i16> @test_masked_z_32xi16_to_16xi16_perm_mask2(<32 x i16> %vec) { +; CHECK-LABEL: test_masked_z_32xi16_to_16xi16_perm_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: vextracti64x4 $1, %zmm0, %ymm2 +; CHECK-NEXT: vmovdqa {{.*#+}} ymm1 = [10,19,20,6,17,2,13,1,5,16,4,3,2,28,27,15] +; CHECK-NEXT: movw $31229, %ax # imm = 0x79FD +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermi2w %ymm0, %ymm2, %ymm1 {%k1} {z} +; CHECK-NEXT: vmovdqa %ymm1, %ymm0 +; CHECK-NEXT: retq + %shuf = shufflevector <32 x i16> %vec, <32 x i16> undef, <16 x i32> + %res = select <16 x i1> , <16 x i16> %shuf, <16 x i16> zeroinitializer + ret <16 x i16> %res +} +define <16 x i16> @test_32xi16_to_16xi16_perm_mask3(<32 x i16> %vec) { +; CHECK-LABEL: test_32xi16_to_16xi16_perm_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: vextracti64x4 $1, %zmm0, %ymm2 +; CHECK-NEXT: vmovdqa {{.*#+}} ymm1 = [1,0,30,5,3,6,25,29,0,13,3,8,7,20,11,5] +; CHECK-NEXT: vpermi2w %ymm2, %ymm0, %ymm1 +; CHECK-NEXT: vmovdqa %ymm1, %ymm0 +; CHECK-NEXT: retq + %res = shufflevector <32 x i16> %vec, <32 x i16> undef, <16 x i32> + ret <16 x i16> %res +} +define <16 x i16> @test_masked_32xi16_to_16xi16_perm_mask3(<32 x i16> %vec, <16 x i16> %vec2) { +; CHECK-LABEL: test_masked_32xi16_to_16xi16_perm_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: vextracti64x4 $1, %zmm0, %ymm2 +; CHECK-NEXT: vmovdqa {{.*#+}} ymm3 = [1,0,30,5,3,6,25,29,0,13,3,8,7,20,11,5] +; CHECK-NEXT: vpermi2w %ymm2, %ymm0, %ymm3 +; CHECK-NEXT: movw $5887, %ax # imm = 0x16FF +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpblendmw %ymm3, %ymm1, %ymm0 {%k1} +; CHECK-NEXT: retq + %shuf = shufflevector <32 x i16> %vec, <32 x i16> undef, <16 x i32> + %res = select <16 x i1> , <16 x i16> %shuf, <16 x i16> %vec2 + ret <16 x i16> %res +} + +define <16 x i16> @test_masked_z_32xi16_to_16xi16_perm_mask3(<32 x i16> %vec) { +; CHECK-LABEL: test_masked_z_32xi16_to_16xi16_perm_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: vextracti64x4 $1, %zmm0, %ymm2 +; CHECK-NEXT: vmovdqa {{.*#+}} ymm1 = [1,0,30,5,3,6,25,29,0,13,3,8,7,20,11,5] +; CHECK-NEXT: movw $5887, %ax # imm = 0x16FF +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermi2w %ymm2, %ymm0, %ymm1 {%k1} {z} +; CHECK-NEXT: vmovdqa %ymm1, %ymm0 +; CHECK-NEXT: retq + %shuf = shufflevector <32 x i16> %vec, <32 x i16> undef, <16 x i32> + %res = select <16 x i1> , <16 x i16> %shuf, <16 x i16> zeroinitializer + ret <16 x i16> %res +} +define <8 x i16> @test_32xi16_to_8xi16_perm_mask0(<32 x i16> %vec) { +; CHECK-LABEL: test_32xi16_to_8xi16_perm_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: vextracti64x4 $1, %zmm0, %ymm2 +; CHECK-NEXT: vmovdqa {{.*#+}} ymm1 = <22,27,7,10,13,21,5,14,u,u,u,u,u,u,u,u> +; CHECK-NEXT: vpermi2w %ymm0, %ymm2, %ymm1 +; CHECK-NEXT: vmovdqa %xmm1, %xmm0 +; CHECK-NEXT: vzeroupper +; CHECK-NEXT: retq + %res = shufflevector <32 x i16> %vec, <32 x i16> undef, <8 x i32> + ret <8 x i16> %res +} +define <8 x i16> @test_masked_32xi16_to_8xi16_perm_mask0(<32 x i16> %vec, <8 x i16> %vec2) { +; CHECK-LABEL: test_masked_32xi16_to_8xi16_perm_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: vextracti64x4 $1, %zmm0, %ymm2 +; CHECK-NEXT: vmovdqa {{.*#+}} ymm3 = <22,27,7,10,13,21,5,14,u,u,u,u,u,u,u,u> +; CHECK-NEXT: vpermi2w %ymm0, %ymm2, %ymm3 +; CHECK-NEXT: movb $-128, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpblendmw %xmm3, %xmm1, %xmm0 {%k1} +; CHECK-NEXT: vzeroupper +; CHECK-NEXT: retq + %shuf = shufflevector <32 x i16> %vec, <32 x i16> undef, <8 x i32> + %res = select <8 x i1> , <8 x i16> %shuf, <8 x i16> %vec2 + ret <8 x i16> %res +} + +define <8 x i16> @test_masked_z_32xi16_to_8xi16_perm_mask0(<32 x i16> %vec) { +; CHECK-LABEL: test_masked_z_32xi16_to_8xi16_perm_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: vextracti64x4 $1, %zmm0, %ymm1 +; CHECK-NEXT: vmovdqa {{.*#+}} ymm2 = <22,27,7,10,13,21,5,14,u,u,u,u,u,u,u,u> +; CHECK-NEXT: vpermi2w %ymm0, %ymm1, %ymm2 +; CHECK-NEXT: movb $-128, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vmovdqu16 %xmm2, %xmm0 {%k1} {z} +; CHECK-NEXT: vzeroupper +; CHECK-NEXT: retq + %shuf = shufflevector <32 x i16> %vec, <32 x i16> undef, <8 x i32> + %res = select <8 x i1> , <8 x i16> %shuf, <8 x i16> zeroinitializer + ret <8 x i16> %res +} +define <8 x i16> @test_masked_32xi16_to_8xi16_perm_mask1(<32 x i16> %vec, <8 x i16> %vec2) { +; CHECK-LABEL: test_masked_32xi16_to_8xi16_perm_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: vextracti64x4 $1, %zmm0, %ymm2 +; CHECK-NEXT: vmovdqa {{.*#+}} ymm3 = <1,21,27,10,8,19,14,5,u,u,u,u,u,u,u,u> +; CHECK-NEXT: vpermi2w %ymm2, %ymm0, %ymm3 +; CHECK-NEXT: movb $11, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpblendmw %xmm3, %xmm1, %xmm0 {%k1} +; CHECK-NEXT: vzeroupper +; CHECK-NEXT: retq + %shuf = shufflevector <32 x i16> %vec, <32 x i16> undef, <8 x i32> + %res = select <8 x i1> , <8 x i16> %shuf, <8 x i16> %vec2 + ret <8 x i16> %res +} + +define <8 x i16> @test_masked_z_32xi16_to_8xi16_perm_mask1(<32 x i16> %vec) { +; CHECK-LABEL: test_masked_z_32xi16_to_8xi16_perm_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: vextracti64x4 $1, %zmm0, %ymm1 +; CHECK-NEXT: vmovdqa {{.*#+}} ymm2 = <1,21,27,10,8,19,14,5,u,u,u,u,u,u,u,u> +; CHECK-NEXT: vpermi2w %ymm1, %ymm0, %ymm2 +; CHECK-NEXT: movb $11, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vmovdqu16 %xmm2, %xmm0 {%k1} {z} +; CHECK-NEXT: vzeroupper +; CHECK-NEXT: retq + %shuf = shufflevector <32 x i16> %vec, <32 x i16> undef, <8 x i32> + %res = select <8 x i1> , <8 x i16> %shuf, <8 x i16> zeroinitializer + ret <8 x i16> %res +} +define <8 x i16> @test_masked_32xi16_to_8xi16_perm_mask2(<32 x i16> %vec, <8 x i16> %vec2) { +; CHECK-LABEL: test_masked_32xi16_to_8xi16_perm_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: vextracti64x4 $1, %zmm0, %ymm2 +; CHECK-NEXT: vmovdqa {{.*#+}} ymm3 = <15,13,18,16,9,11,26,8,u,u,u,u,u,u,u,u> +; CHECK-NEXT: vpermi2w %ymm2, %ymm0, %ymm3 +; CHECK-NEXT: movb $26, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpblendmw %xmm3, %xmm1, %xmm0 {%k1} +; CHECK-NEXT: vzeroupper +; CHECK-NEXT: retq + %shuf = shufflevector <32 x i16> %vec, <32 x i16> undef, <8 x i32> + %res = select <8 x i1> , <8 x i16> %shuf, <8 x i16> %vec2 + ret <8 x i16> %res +} + +define <8 x i16> @test_masked_z_32xi16_to_8xi16_perm_mask2(<32 x i16> %vec) { +; CHECK-LABEL: test_masked_z_32xi16_to_8xi16_perm_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: vextracti64x4 $1, %zmm0, %ymm1 +; CHECK-NEXT: vmovdqa {{.*#+}} ymm2 = <15,13,18,16,9,11,26,8,u,u,u,u,u,u,u,u> +; CHECK-NEXT: vpermi2w %ymm1, %ymm0, %ymm2 +; CHECK-NEXT: movb $26, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vmovdqu16 %xmm2, %xmm0 {%k1} {z} +; CHECK-NEXT: vzeroupper +; CHECK-NEXT: retq + %shuf = shufflevector <32 x i16> %vec, <32 x i16> undef, <8 x i32> + %res = select <8 x i1> , <8 x i16> %shuf, <8 x i16> zeroinitializer + ret <8 x i16> %res +} +define <8 x i16> @test_32xi16_to_8xi16_perm_mask3(<32 x i16> %vec) { +; CHECK-LABEL: test_32xi16_to_8xi16_perm_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: vextracti64x4 $1, %zmm0, %ymm2 +; CHECK-NEXT: vmovdqa {{.*#+}} ymm1 = <17,0,23,10,1,8,7,30,u,u,u,u,u,u,u,u> +; CHECK-NEXT: vpermi2w %ymm2, %ymm0, %ymm1 +; CHECK-NEXT: vmovdqa %xmm1, %xmm0 +; CHECK-NEXT: vzeroupper +; CHECK-NEXT: retq + %res = shufflevector <32 x i16> %vec, <32 x i16> undef, <8 x i32> + ret <8 x i16> %res +} +define <8 x i16> @test_masked_32xi16_to_8xi16_perm_mask3(<32 x i16> %vec, <8 x i16> %vec2) { +; CHECK-LABEL: test_masked_32xi16_to_8xi16_perm_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: vextracti64x4 $1, %zmm0, %ymm2 +; CHECK-NEXT: vmovdqa {{.*#+}} ymm3 = <17,0,23,10,1,8,7,30,u,u,u,u,u,u,u,u> +; CHECK-NEXT: vpermi2w %ymm2, %ymm0, %ymm3 +; CHECK-NEXT: movb $-4, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpblendmw %xmm3, %xmm1, %xmm0 {%k1} +; CHECK-NEXT: vzeroupper +; CHECK-NEXT: retq + %shuf = shufflevector <32 x i16> %vec, <32 x i16> undef, <8 x i32> + %res = select <8 x i1> , <8 x i16> %shuf, <8 x i16> %vec2 + ret <8 x i16> %res +} + +define <8 x i16> @test_masked_z_32xi16_to_8xi16_perm_mask3(<32 x i16> %vec) { +; CHECK-LABEL: test_masked_z_32xi16_to_8xi16_perm_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: vextracti64x4 $1, %zmm0, %ymm1 +; CHECK-NEXT: vmovdqa {{.*#+}} ymm2 = <17,0,23,10,1,8,7,30,u,u,u,u,u,u,u,u> +; CHECK-NEXT: vpermi2w %ymm1, %ymm0, %ymm2 +; CHECK-NEXT: movb $-4, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vmovdqu16 %xmm2, %xmm0 {%k1} {z} +; CHECK-NEXT: vzeroupper +; CHECK-NEXT: retq + %shuf = shufflevector <32 x i16> %vec, <32 x i16> undef, <8 x i32> + %res = select <8 x i1> , <8 x i16> %shuf, <8 x i16> zeroinitializer + ret <8 x i16> %res +} +define <16 x i16> @test_32xi16_to_16xi16_perm_mem_mask0(<32 x i16>* %vp) { +; CHECK-LABEL: test_32xi16_to_16xi16_perm_mem_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: vmovdqa64 (%rdi), %zmm1 +; CHECK-NEXT: vextracti64x4 $1, %zmm1, %ymm2 +; CHECK-NEXT: vmovdqa {{.*#+}} ymm0 = [20,19,22,12,13,20,0,6,10,7,20,12,28,18,13,12] +; CHECK-NEXT: vpermi2w %ymm2, %ymm1, %ymm0 +; CHECK-NEXT: retq + %vec = load <32 x i16>, <32 x i16>* %vp + %res = shufflevector <32 x i16> %vec, <32 x i16> undef, <16 x i32> + ret <16 x i16> %res +} +define <16 x i16> @test_masked_32xi16_to_16xi16_perm_mem_mask0(<32 x i16>* %vp, <16 x i16> %vec2) { +; CHECK-LABEL: test_masked_32xi16_to_16xi16_perm_mem_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: vmovdqa64 (%rdi), %zmm1 +; CHECK-NEXT: vextracti64x4 $1, %zmm1, %ymm2 +; CHECK-NEXT: vmovdqa {{.*#+}} ymm3 = [20,19,22,12,13,20,0,6,10,7,20,12,28,18,13,12] +; CHECK-NEXT: vpermi2w %ymm2, %ymm1, %ymm3 +; CHECK-NEXT: movw $23083, %ax # imm = 0x5A2B +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vmovdqu16 %ymm3, %ymm0 {%k1} +; CHECK-NEXT: retq + %vec = load <32 x i16>, <32 x i16>* %vp + %shuf = shufflevector <32 x i16> %vec, <32 x i16> undef, <16 x i32> + %res = select <16 x i1> , <16 x i16> %shuf, <16 x i16> %vec2 + ret <16 x i16> %res +} + +define <16 x i16> @test_masked_z_32xi16_to_16xi16_perm_mem_mask0(<32 x i16>* %vp) { +; CHECK-LABEL: test_masked_z_32xi16_to_16xi16_perm_mem_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: vmovdqa64 (%rdi), %zmm1 +; CHECK-NEXT: vextracti64x4 $1, %zmm1, %ymm2 +; CHECK-NEXT: vmovdqa {{.*#+}} ymm0 = [20,19,22,12,13,20,0,6,10,7,20,12,28,18,13,12] +; CHECK-NEXT: movw $23083, %ax # imm = 0x5A2B +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermi2w %ymm2, %ymm1, %ymm0 {%k1} {z} +; CHECK-NEXT: retq + %vec = load <32 x i16>, <32 x i16>* %vp + %shuf = shufflevector <32 x i16> %vec, <32 x i16> undef, <16 x i32> + %res = select <16 x i1> , <16 x i16> %shuf, <16 x i16> zeroinitializer + ret <16 x i16> %res +} + +define <16 x i16> @test_masked_32xi16_to_16xi16_perm_mem_mask1(<32 x i16>* %vp, <16 x i16> %vec2) { +; CHECK-LABEL: test_masked_32xi16_to_16xi16_perm_mem_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: vmovdqa64 (%rdi), %zmm1 +; CHECK-NEXT: vextracti64x4 $1, %zmm1, %ymm2 +; CHECK-NEXT: vmovdqa {{.*#+}} ymm3 = [22,13,21,1,14,8,5,16,15,17,24,28,15,9,14,25] +; CHECK-NEXT: vpermi2w %ymm2, %ymm1, %ymm3 +; CHECK-NEXT: movw $18866, %ax # imm = 0x49B2 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vmovdqu16 %ymm3, %ymm0 {%k1} +; CHECK-NEXT: retq + %vec = load <32 x i16>, <32 x i16>* %vp + %shuf = shufflevector <32 x i16> %vec, <32 x i16> undef, <16 x i32> + %res = select <16 x i1> , <16 x i16> %shuf, <16 x i16> %vec2 + ret <16 x i16> %res +} + +define <16 x i16> @test_masked_z_32xi16_to_16xi16_perm_mem_mask1(<32 x i16>* %vp) { +; CHECK-LABEL: test_masked_z_32xi16_to_16xi16_perm_mem_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: vmovdqa64 (%rdi), %zmm1 +; CHECK-NEXT: vextracti64x4 $1, %zmm1, %ymm2 +; CHECK-NEXT: vmovdqa {{.*#+}} ymm0 = [22,13,21,1,14,8,5,16,15,17,24,28,15,9,14,25] +; CHECK-NEXT: movw $18866, %ax # imm = 0x49B2 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermi2w %ymm2, %ymm1, %ymm0 {%k1} {z} +; CHECK-NEXT: retq + %vec = load <32 x i16>, <32 x i16>* %vp + %shuf = shufflevector <32 x i16> %vec, <32 x i16> undef, <16 x i32> + %res = select <16 x i1> , <16 x i16> %shuf, <16 x i16> zeroinitializer + ret <16 x i16> %res +} + +define <16 x i16> @test_masked_32xi16_to_16xi16_perm_mem_mask2(<32 x i16>* %vp, <16 x i16> %vec2) { +; CHECK-LABEL: test_masked_32xi16_to_16xi16_perm_mem_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: vmovdqa64 (%rdi), %zmm1 +; CHECK-NEXT: vextracti64x4 $1, %zmm1, %ymm2 +; CHECK-NEXT: vmovdqa {{.*#+}} ymm3 = [12,9,22,15,4,18,7,15,28,5,26,22,6,16,10,0] +; CHECK-NEXT: vpermi2w %ymm1, %ymm2, %ymm3 +; CHECK-NEXT: movw $23540, %ax # imm = 0x5BF4 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vmovdqu16 %ymm3, %ymm0 {%k1} +; CHECK-NEXT: retq + %vec = load <32 x i16>, <32 x i16>* %vp + %shuf = shufflevector <32 x i16> %vec, <32 x i16> undef, <16 x i32> + %res = select <16 x i1> , <16 x i16> %shuf, <16 x i16> %vec2 + ret <16 x i16> %res +} + +define <16 x i16> @test_masked_z_32xi16_to_16xi16_perm_mem_mask2(<32 x i16>* %vp) { +; CHECK-LABEL: test_masked_z_32xi16_to_16xi16_perm_mem_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: vmovdqa64 (%rdi), %zmm1 +; CHECK-NEXT: vextracti64x4 $1, %zmm1, %ymm2 +; CHECK-NEXT: vmovdqa {{.*#+}} ymm0 = [12,9,22,15,4,18,7,15,28,5,26,22,6,16,10,0] +; CHECK-NEXT: movw $23540, %ax # imm = 0x5BF4 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermi2w %ymm1, %ymm2, %ymm0 {%k1} {z} +; CHECK-NEXT: retq + %vec = load <32 x i16>, <32 x i16>* %vp + %shuf = shufflevector <32 x i16> %vec, <32 x i16> undef, <16 x i32> + %res = select <16 x i1> , <16 x i16> %shuf, <16 x i16> zeroinitializer + ret <16 x i16> %res +} + +define <16 x i16> @test_32xi16_to_16xi16_perm_mem_mask3(<32 x i16>* %vp) { +; CHECK-LABEL: test_32xi16_to_16xi16_perm_mem_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: vmovdqa64 (%rdi), %zmm1 +; CHECK-NEXT: vextracti64x4 $1, %zmm1, %ymm2 +; CHECK-NEXT: vmovdqa {{.*#+}} ymm0 = [3,3,20,27,8,31,3,27,12,2,8,14,25,27,4,16] +; CHECK-NEXT: vpermi2w %ymm2, %ymm1, %ymm0 +; CHECK-NEXT: retq + %vec = load <32 x i16>, <32 x i16>* %vp + %res = shufflevector <32 x i16> %vec, <32 x i16> undef, <16 x i32> + ret <16 x i16> %res +} +define <16 x i16> @test_masked_32xi16_to_16xi16_perm_mem_mask3(<32 x i16>* %vp, <16 x i16> %vec2) { +; CHECK-LABEL: test_masked_32xi16_to_16xi16_perm_mem_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: vmovdqa64 (%rdi), %zmm1 +; CHECK-NEXT: vextracti64x4 $1, %zmm1, %ymm2 +; CHECK-NEXT: vmovdqa {{.*#+}} ymm3 = [3,3,20,27,8,31,3,27,12,2,8,14,25,27,4,16] +; CHECK-NEXT: vpermi2w %ymm2, %ymm1, %ymm3 +; CHECK-NEXT: movw $-3481, %ax # imm = 0xF267 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vmovdqu16 %ymm3, %ymm0 {%k1} +; CHECK-NEXT: retq + %vec = load <32 x i16>, <32 x i16>* %vp + %shuf = shufflevector <32 x i16> %vec, <32 x i16> undef, <16 x i32> + %res = select <16 x i1> , <16 x i16> %shuf, <16 x i16> %vec2 + ret <16 x i16> %res +} + +define <16 x i16> @test_masked_z_32xi16_to_16xi16_perm_mem_mask3(<32 x i16>* %vp) { +; CHECK-LABEL: test_masked_z_32xi16_to_16xi16_perm_mem_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: vmovdqa64 (%rdi), %zmm1 +; CHECK-NEXT: vextracti64x4 $1, %zmm1, %ymm2 +; CHECK-NEXT: vmovdqa {{.*#+}} ymm0 = [3,3,20,27,8,31,3,27,12,2,8,14,25,27,4,16] +; CHECK-NEXT: movw $-3481, %ax # imm = 0xF267 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermi2w %ymm2, %ymm1, %ymm0 {%k1} {z} +; CHECK-NEXT: retq + %vec = load <32 x i16>, <32 x i16>* %vp + %shuf = shufflevector <32 x i16> %vec, <32 x i16> undef, <16 x i32> + %res = select <16 x i1> , <16 x i16> %shuf, <16 x i16> zeroinitializer + ret <16 x i16> %res +} + +define <8 x i16> @test_32xi16_to_8xi16_perm_mem_mask0(<32 x i16>* %vp) { +; CHECK-LABEL: test_32xi16_to_8xi16_perm_mem_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: vmovdqa64 (%rdi), %zmm1 +; CHECK-NEXT: vextracti64x4 $1, %zmm1, %ymm2 +; CHECK-NEXT: vmovdqa {{.*#+}} ymm0 = <16,17,5,1,14,14,13,17,u,u,u,u,u,u,u,u> +; CHECK-NEXT: vpermi2w %ymm1, %ymm2, %ymm0 +; CHECK-NEXT: # kill: %XMM0 %XMM0 %YMM0 +; CHECK-NEXT: vzeroupper +; CHECK-NEXT: retq + %vec = load <32 x i16>, <32 x i16>* %vp + %res = shufflevector <32 x i16> %vec, <32 x i16> undef, <8 x i32> + ret <8 x i16> %res +} +define <8 x i16> @test_masked_32xi16_to_8xi16_perm_mem_mask0(<32 x i16>* %vp, <8 x i16> %vec2) { +; CHECK-LABEL: test_masked_32xi16_to_8xi16_perm_mem_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: vmovdqa64 (%rdi), %zmm1 +; CHECK-NEXT: vextracti64x4 $1, %zmm1, %ymm2 +; CHECK-NEXT: vmovdqa {{.*#+}} ymm3 = <16,17,5,1,14,14,13,17,u,u,u,u,u,u,u,u> +; CHECK-NEXT: vpermi2w %ymm1, %ymm2, %ymm3 +; CHECK-NEXT: movb $-90, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vmovdqu16 %xmm3, %xmm0 {%k1} +; CHECK-NEXT: vzeroupper +; CHECK-NEXT: retq + %vec = load <32 x i16>, <32 x i16>* %vp + %shuf = shufflevector <32 x i16> %vec, <32 x i16> undef, <8 x i32> + %res = select <8 x i1> , <8 x i16> %shuf, <8 x i16> %vec2 + ret <8 x i16> %res +} + +define <8 x i16> @test_masked_z_32xi16_to_8xi16_perm_mem_mask0(<32 x i16>* %vp) { +; CHECK-LABEL: test_masked_z_32xi16_to_8xi16_perm_mem_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: vmovdqa64 (%rdi), %zmm0 +; CHECK-NEXT: vextracti64x4 $1, %zmm0, %ymm1 +; CHECK-NEXT: vmovdqa {{.*#+}} ymm2 = <16,17,5,1,14,14,13,17,u,u,u,u,u,u,u,u> +; CHECK-NEXT: vpermi2w %ymm0, %ymm1, %ymm2 +; CHECK-NEXT: movb $-90, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vmovdqu16 %xmm2, %xmm0 {%k1} {z} +; CHECK-NEXT: vzeroupper +; CHECK-NEXT: retq + %vec = load <32 x i16>, <32 x i16>* %vp + %shuf = shufflevector <32 x i16> %vec, <32 x i16> undef, <8 x i32> + %res = select <8 x i1> , <8 x i16> %shuf, <8 x i16> zeroinitializer + ret <8 x i16> %res +} + +define <8 x i16> @test_masked_32xi16_to_8xi16_perm_mem_mask1(<32 x i16>* %vp, <8 x i16> %vec2) { +; CHECK-LABEL: test_masked_32xi16_to_8xi16_perm_mem_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: vmovdqa64 (%rdi), %zmm1 +; CHECK-NEXT: vextracti64x4 $1, %zmm1, %ymm2 +; CHECK-NEXT: vmovdqa {{.*#+}} ymm3 = <7,6,4,6,12,4,27,1,u,u,u,u,u,u,u,u> +; CHECK-NEXT: vpermi2w %ymm1, %ymm2, %ymm3 +; CHECK-NEXT: movb $89, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vmovdqu16 %xmm3, %xmm0 {%k1} +; CHECK-NEXT: vzeroupper +; CHECK-NEXT: retq + %vec = load <32 x i16>, <32 x i16>* %vp + %shuf = shufflevector <32 x i16> %vec, <32 x i16> undef, <8 x i32> + %res = select <8 x i1> , <8 x i16> %shuf, <8 x i16> %vec2 + ret <8 x i16> %res +} + +define <8 x i16> @test_masked_z_32xi16_to_8xi16_perm_mem_mask1(<32 x i16>* %vp) { +; CHECK-LABEL: test_masked_z_32xi16_to_8xi16_perm_mem_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: vmovdqa64 (%rdi), %zmm0 +; CHECK-NEXT: vextracti64x4 $1, %zmm0, %ymm1 +; CHECK-NEXT: vmovdqa {{.*#+}} ymm2 = <7,6,4,6,12,4,27,1,u,u,u,u,u,u,u,u> +; CHECK-NEXT: vpermi2w %ymm0, %ymm1, %ymm2 +; CHECK-NEXT: movb $89, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vmovdqu16 %xmm2, %xmm0 {%k1} {z} +; CHECK-NEXT: vzeroupper +; CHECK-NEXT: retq + %vec = load <32 x i16>, <32 x i16>* %vp + %shuf = shufflevector <32 x i16> %vec, <32 x i16> undef, <8 x i32> + %res = select <8 x i1> , <8 x i16> %shuf, <8 x i16> zeroinitializer + ret <8 x i16> %res +} + +define <8 x i16> @test_masked_32xi16_to_8xi16_perm_mem_mask2(<32 x i16>* %vp, <8 x i16> %vec2) { +; CHECK-LABEL: test_masked_32xi16_to_8xi16_perm_mem_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: vmovdqa64 (%rdi), %zmm1 +; CHECK-NEXT: vextracti64x4 $1, %zmm1, %ymm2 +; CHECK-NEXT: vmovdqa {{.*#+}} ymm3 = <6,18,0,4,10,25,22,10,u,u,u,u,u,u,u,u> +; CHECK-NEXT: vpermi2w %ymm2, %ymm1, %ymm3 +; CHECK-NEXT: movb $-34, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vmovdqu16 %xmm3, %xmm0 {%k1} +; CHECK-NEXT: vzeroupper +; CHECK-NEXT: retq + %vec = load <32 x i16>, <32 x i16>* %vp + %shuf = shufflevector <32 x i16> %vec, <32 x i16> undef, <8 x i32> + %res = select <8 x i1> , <8 x i16> %shuf, <8 x i16> %vec2 + ret <8 x i16> %res +} + +define <8 x i16> @test_masked_z_32xi16_to_8xi16_perm_mem_mask2(<32 x i16>* %vp) { +; CHECK-LABEL: test_masked_z_32xi16_to_8xi16_perm_mem_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: vmovdqa64 (%rdi), %zmm0 +; CHECK-NEXT: vextracti64x4 $1, %zmm0, %ymm1 +; CHECK-NEXT: vmovdqa {{.*#+}} ymm2 = <6,18,0,4,10,25,22,10,u,u,u,u,u,u,u,u> +; CHECK-NEXT: vpermi2w %ymm1, %ymm0, %ymm2 +; CHECK-NEXT: movb $-34, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vmovdqu16 %xmm2, %xmm0 {%k1} {z} +; CHECK-NEXT: vzeroupper +; CHECK-NEXT: retq + %vec = load <32 x i16>, <32 x i16>* %vp + %shuf = shufflevector <32 x i16> %vec, <32 x i16> undef, <8 x i32> + %res = select <8 x i1> , <8 x i16> %shuf, <8 x i16> zeroinitializer + ret <8 x i16> %res +} + +define <8 x i16> @test_32xi16_to_8xi16_perm_mem_mask3(<32 x i16>* %vp) { +; CHECK-LABEL: test_32xi16_to_8xi16_perm_mem_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: vmovdqa64 (%rdi), %zmm1 +; CHECK-NEXT: vextracti64x4 $1, %zmm1, %ymm2 +; CHECK-NEXT: vmovdqa {{.*#+}} ymm0 = <19,1,5,31,9,12,17,9,u,u,u,u,u,u,u,u> +; CHECK-NEXT: vpermi2w %ymm2, %ymm1, %ymm0 +; CHECK-NEXT: # kill: %XMM0 %XMM0 %YMM0 +; CHECK-NEXT: vzeroupper +; CHECK-NEXT: retq + %vec = load <32 x i16>, <32 x i16>* %vp + %res = shufflevector <32 x i16> %vec, <32 x i16> undef, <8 x i32> + ret <8 x i16> %res +} +define <8 x i16> @test_masked_32xi16_to_8xi16_perm_mem_mask3(<32 x i16>* %vp, <8 x i16> %vec2) { +; CHECK-LABEL: test_masked_32xi16_to_8xi16_perm_mem_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: vmovdqa64 (%rdi), %zmm1 +; CHECK-NEXT: vextracti64x4 $1, %zmm1, %ymm2 +; CHECK-NEXT: vmovdqa {{.*#+}} ymm3 = <19,1,5,31,9,12,17,9,u,u,u,u,u,u,u,u> +; CHECK-NEXT: vpermi2w %ymm2, %ymm1, %ymm3 +; CHECK-NEXT: movb $71, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vmovdqu16 %xmm3, %xmm0 {%k1} +; CHECK-NEXT: vzeroupper +; CHECK-NEXT: retq + %vec = load <32 x i16>, <32 x i16>* %vp + %shuf = shufflevector <32 x i16> %vec, <32 x i16> undef, <8 x i32> + %res = select <8 x i1> , <8 x i16> %shuf, <8 x i16> %vec2 + ret <8 x i16> %res +} + +define <8 x i16> @test_masked_z_32xi16_to_8xi16_perm_mem_mask3(<32 x i16>* %vp) { +; CHECK-LABEL: test_masked_z_32xi16_to_8xi16_perm_mem_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: vmovdqa64 (%rdi), %zmm0 +; CHECK-NEXT: vextracti64x4 $1, %zmm0, %ymm1 +; CHECK-NEXT: vmovdqa {{.*#+}} ymm2 = <19,1,5,31,9,12,17,9,u,u,u,u,u,u,u,u> +; CHECK-NEXT: vpermi2w %ymm1, %ymm0, %ymm2 +; CHECK-NEXT: movb $71, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vmovdqu16 %xmm2, %xmm0 {%k1} {z} +; CHECK-NEXT: vzeroupper +; CHECK-NEXT: retq + %vec = load <32 x i16>, <32 x i16>* %vp + %shuf = shufflevector <32 x i16> %vec, <32 x i16> undef, <8 x i32> + %res = select <8 x i1> , <8 x i16> %shuf, <8 x i16> zeroinitializer + ret <8 x i16> %res +} + +define <4 x i32> @test_8xi32_to_4xi32_perm_mask0(<8 x i32> %vec) { +; CHECK-LABEL: test_8xi32_to_4xi32_perm_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: vextracti128 $1, %ymm0, %xmm1 +; CHECK-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,3,2] +; CHECK-NEXT: vpblendd {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3] +; CHECK-NEXT: vzeroupper +; CHECK-NEXT: retq + %res = shufflevector <8 x i32> %vec, <8 x i32> undef, <4 x i32> + ret <4 x i32> %res +} +define <4 x i32> @test_masked_8xi32_to_4xi32_perm_mask0(<8 x i32> %vec, <4 x i32> %vec2) { +; CHECK-LABEL: test_masked_8xi32_to_4xi32_perm_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: vextracti128 $1, %ymm0, %xmm2 +; CHECK-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,3,2] +; CHECK-NEXT: vpblendd {{.*#+}} xmm0 = xmm2[0],xmm0[1,2,3] +; CHECK-NEXT: movb $7, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpblendmd %xmm0, %xmm1, %xmm0 {%k1} +; CHECK-NEXT: vzeroupper +; CHECK-NEXT: retq + %shuf = shufflevector <8 x i32> %vec, <8 x i32> undef, <4 x i32> + %res = select <4 x i1> , <4 x i32> %shuf, <4 x i32> %vec2 + ret <4 x i32> %res +} + +define <4 x i32> @test_masked_z_8xi32_to_4xi32_perm_mask0(<8 x i32> %vec) { +; CHECK-LABEL: test_masked_z_8xi32_to_4xi32_perm_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: vextracti128 $1, %ymm0, %xmm1 +; CHECK-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,3,2] +; CHECK-NEXT: vpblendd {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3] +; CHECK-NEXT: movb $7, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vmovdqa32 %xmm0, %xmm0 {%k1} {z} +; CHECK-NEXT: vzeroupper +; CHECK-NEXT: retq + %shuf = shufflevector <8 x i32> %vec, <8 x i32> undef, <4 x i32> + %res = select <4 x i1> , <4 x i32> %shuf, <4 x i32> zeroinitializer + ret <4 x i32> %res +} +define <4 x i32> @test_masked_8xi32_to_4xi32_perm_mask1(<8 x i32> %vec, <4 x i32> %vec2) { +; CHECK-LABEL: test_masked_8xi32_to_4xi32_perm_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: vextracti128 $1, %ymm0, %xmm2 +; CHECK-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[2,2,3,3] +; CHECK-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[3,0,2,3] +; CHECK-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0,1],xmm2[2],xmm0[3] +; CHECK-NEXT: movb $4, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpblendmd %xmm0, %xmm1, %xmm0 {%k1} +; CHECK-NEXT: vzeroupper +; CHECK-NEXT: retq + %shuf = shufflevector <8 x i32> %vec, <8 x i32> undef, <4 x i32> + %res = select <4 x i1> , <4 x i32> %shuf, <4 x i32> %vec2 + ret <4 x i32> %res +} + +define <4 x i32> @test_masked_z_8xi32_to_4xi32_perm_mask1(<8 x i32> %vec) { +; CHECK-LABEL: test_masked_z_8xi32_to_4xi32_perm_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: vextracti128 $1, %ymm0, %xmm1 +; CHECK-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[2,2,3,3] +; CHECK-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[3,0,2,3] +; CHECK-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0,1],xmm1[2],xmm0[3] +; CHECK-NEXT: movb $4, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vmovdqa32 %xmm0, %xmm0 {%k1} {z} +; CHECK-NEXT: vzeroupper +; CHECK-NEXT: retq + %shuf = shufflevector <8 x i32> %vec, <8 x i32> undef, <4 x i32> + %res = select <4 x i1> , <4 x i32> %shuf, <4 x i32> zeroinitializer + ret <4 x i32> %res +} +define <4 x i32> @test_masked_8xi32_to_4xi32_perm_mask2(<8 x i32> %vec, <4 x i32> %vec2) { +; CHECK-LABEL: test_masked_8xi32_to_4xi32_perm_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: vextracti128 $1, %ymm0, %xmm2 +; CHECK-NEXT: vpunpckhqdq {{.*#+}} xmm0 = xmm2[1],xmm0[1] +; CHECK-NEXT: movb $1, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpblendmd %xmm0, %xmm1, %xmm0 {%k1} +; CHECK-NEXT: vzeroupper +; CHECK-NEXT: retq + %shuf = shufflevector <8 x i32> %vec, <8 x i32> undef, <4 x i32> + %res = select <4 x i1> , <4 x i32> %shuf, <4 x i32> %vec2 + ret <4 x i32> %res +} + +define <4 x i32> @test_masked_z_8xi32_to_4xi32_perm_mask2(<8 x i32> %vec) { +; CHECK-LABEL: test_masked_z_8xi32_to_4xi32_perm_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: vextracti128 $1, %ymm0, %xmm1 +; CHECK-NEXT: vpunpckhqdq {{.*#+}} xmm0 = xmm1[1],xmm0[1] +; CHECK-NEXT: movb $1, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vmovdqa32 %xmm0, %xmm0 {%k1} {z} +; CHECK-NEXT: vzeroupper +; CHECK-NEXT: retq + %shuf = shufflevector <8 x i32> %vec, <8 x i32> undef, <4 x i32> + %res = select <4 x i1> , <4 x i32> %shuf, <4 x i32> zeroinitializer + ret <4 x i32> %res +} +define <4 x i32> @test_8xi32_to_4xi32_perm_mask3(<8 x i32> %vec) { +; CHECK-LABEL: test_8xi32_to_4xi32_perm_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: vextracti128 $1, %ymm0, %xmm1 +; CHECK-NEXT: vpblendd {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3] +; CHECK-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,3,2,1] +; CHECK-NEXT: vzeroupper +; CHECK-NEXT: retq + %res = shufflevector <8 x i32> %vec, <8 x i32> undef, <4 x i32> + ret <4 x i32> %res +} +define <4 x i32> @test_masked_8xi32_to_4xi32_perm_mask3(<8 x i32> %vec, <4 x i32> %vec2) { +; CHECK-LABEL: test_masked_8xi32_to_4xi32_perm_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: vextracti128 $1, %ymm0, %xmm2 +; CHECK-NEXT: vpblendd {{.*#+}} xmm0 = xmm2[0,1],xmm0[2,3] +; CHECK-NEXT: movb $2, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpshufd {{.*#+}} xmm1 {%k1} = xmm0[1,3,2,1] +; CHECK-NEXT: vmovdqa %xmm1, %xmm0 +; CHECK-NEXT: vzeroupper +; CHECK-NEXT: retq + %shuf = shufflevector <8 x i32> %vec, <8 x i32> undef, <4 x i32> + %res = select <4 x i1> , <4 x i32> %shuf, <4 x i32> %vec2 + ret <4 x i32> %res +} + +define <4 x i32> @test_masked_z_8xi32_to_4xi32_perm_mask3(<8 x i32> %vec) { +; CHECK-LABEL: test_masked_z_8xi32_to_4xi32_perm_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: vextracti128 $1, %ymm0, %xmm1 +; CHECK-NEXT: vpblendd {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3] +; CHECK-NEXT: movb $2, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpshufd {{.*#+}} xmm0 {%k1} {z} = xmm0[1,3,2,1] +; CHECK-NEXT: vzeroupper +; CHECK-NEXT: retq + %shuf = shufflevector <8 x i32> %vec, <8 x i32> undef, <4 x i32> + %res = select <4 x i1> , <4 x i32> %shuf, <4 x i32> zeroinitializer + ret <4 x i32> %res +} +define <4 x i32> @test_8xi32_to_4xi32_perm_mem_mask0(<8 x i32>* %vp) { +; CHECK-LABEL: test_8xi32_to_4xi32_perm_mem_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: vmovaps (%rdi), %ymm0 +; CHECK-NEXT: vextractf128 $1, %ymm0, %xmm1 +; CHECK-NEXT: vshufps {{.*#+}} xmm0 = xmm1[3,1],xmm0[0,0] +; CHECK-NEXT: vzeroupper +; CHECK-NEXT: retq + %vec = load <8 x i32>, <8 x i32>* %vp + %res = shufflevector <8 x i32> %vec, <8 x i32> undef, <4 x i32> + ret <4 x i32> %res +} +define <4 x i32> @test_masked_8xi32_to_4xi32_perm_mem_mask0(<8 x i32>* %vp, <4 x i32> %vec2) { +; CHECK-LABEL: test_masked_8xi32_to_4xi32_perm_mem_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: vmovaps (%rdi), %ymm1 +; CHECK-NEXT: vextractf128 $1, %ymm1, %xmm2 +; CHECK-NEXT: vshufps {{.*#+}} xmm1 = xmm2[3,1],xmm1[0,0] +; CHECK-NEXT: movb $13, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vmovdqa32 %xmm1, %xmm0 {%k1} +; CHECK-NEXT: vzeroupper +; CHECK-NEXT: retq + %vec = load <8 x i32>, <8 x i32>* %vp + %shuf = shufflevector <8 x i32> %vec, <8 x i32> undef, <4 x i32> + %res = select <4 x i1> , <4 x i32> %shuf, <4 x i32> %vec2 + ret <4 x i32> %res +} + +define <4 x i32> @test_masked_z_8xi32_to_4xi32_perm_mem_mask0(<8 x i32>* %vp) { +; CHECK-LABEL: test_masked_z_8xi32_to_4xi32_perm_mem_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: vmovaps (%rdi), %ymm0 +; CHECK-NEXT: vextractf128 $1, %ymm0, %xmm1 +; CHECK-NEXT: vshufps {{.*#+}} xmm0 = xmm1[3,1],xmm0[0,0] +; CHECK-NEXT: movb $13, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vmovdqa32 %xmm0, %xmm0 {%k1} {z} +; CHECK-NEXT: vzeroupper +; CHECK-NEXT: retq + %vec = load <8 x i32>, <8 x i32>* %vp + %shuf = shufflevector <8 x i32> %vec, <8 x i32> undef, <4 x i32> + %res = select <4 x i1> , <4 x i32> %shuf, <4 x i32> zeroinitializer + ret <4 x i32> %res +} + +define <4 x i32> @test_masked_8xi32_to_4xi32_perm_mem_mask1(<8 x i32>* %vp, <4 x i32> %vec2) { +; CHECK-LABEL: test_masked_8xi32_to_4xi32_perm_mem_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: vmovdqa (%rdi), %ymm1 +; CHECK-NEXT: vextracti128 $1, %ymm1, %xmm2 +; CHECK-NEXT: vpblendd {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2,3] +; CHECK-NEXT: movb $12, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpshufd {{.*#+}} xmm0 {%k1} = xmm1[1,0,0,3] +; CHECK-NEXT: vzeroupper +; CHECK-NEXT: retq + %vec = load <8 x i32>, <8 x i32>* %vp + %shuf = shufflevector <8 x i32> %vec, <8 x i32> undef, <4 x i32> + %res = select <4 x i1> , <4 x i32> %shuf, <4 x i32> %vec2 + ret <4 x i32> %res +} + +define <4 x i32> @test_masked_z_8xi32_to_4xi32_perm_mem_mask1(<8 x i32>* %vp) { +; CHECK-LABEL: test_masked_z_8xi32_to_4xi32_perm_mem_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: vmovdqa (%rdi), %ymm0 +; CHECK-NEXT: vextracti128 $1, %ymm0, %xmm1 +; CHECK-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm1[1],xmm0[2,3] +; CHECK-NEXT: movb $12, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpshufd {{.*#+}} xmm0 {%k1} {z} = xmm0[1,0,0,3] +; CHECK-NEXT: vzeroupper +; CHECK-NEXT: retq + %vec = load <8 x i32>, <8 x i32>* %vp + %shuf = shufflevector <8 x i32> %vec, <8 x i32> undef, <4 x i32> + %res = select <4 x i1> , <4 x i32> %shuf, <4 x i32> zeroinitializer + ret <4 x i32> %res +} + +define <4 x i32> @test_masked_8xi32_to_4xi32_perm_mem_mask2(<8 x i32>* %vp, <4 x i32> %vec2) { +; CHECK-LABEL: test_masked_8xi32_to_4xi32_perm_mem_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: vmovdqa (%rdi), %ymm1 +; CHECK-NEXT: vextracti128 $1, %ymm1, %xmm2 +; CHECK-NEXT: vpblendd {{.*#+}} xmm1 = xmm2[0,1],xmm1[2,3] +; CHECK-NEXT: movb $8, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpshufd {{.*#+}} xmm0 {%k1} = xmm1[0,3,3,0] +; CHECK-NEXT: vzeroupper +; CHECK-NEXT: retq + %vec = load <8 x i32>, <8 x i32>* %vp + %shuf = shufflevector <8 x i32> %vec, <8 x i32> undef, <4 x i32> + %res = select <4 x i1> , <4 x i32> %shuf, <4 x i32> %vec2 + ret <4 x i32> %res +} + +define <4 x i32> @test_masked_z_8xi32_to_4xi32_perm_mem_mask2(<8 x i32>* %vp) { +; CHECK-LABEL: test_masked_z_8xi32_to_4xi32_perm_mem_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: vmovdqa (%rdi), %ymm0 +; CHECK-NEXT: vextracti128 $1, %ymm0, %xmm1 +; CHECK-NEXT: vpblendd {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3] +; CHECK-NEXT: movb $8, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpshufd {{.*#+}} xmm0 {%k1} {z} = xmm0[0,3,3,0] +; CHECK-NEXT: vzeroupper +; CHECK-NEXT: retq + %vec = load <8 x i32>, <8 x i32>* %vp + %shuf = shufflevector <8 x i32> %vec, <8 x i32> undef, <4 x i32> + %res = select <4 x i1> , <4 x i32> %shuf, <4 x i32> zeroinitializer + ret <4 x i32> %res +} + +define <4 x i32> @test_8xi32_to_4xi32_perm_mem_mask3(<8 x i32>* %vp) { +; CHECK-LABEL: test_8xi32_to_4xi32_perm_mem_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: vmovdqa (%rdi), %ymm0 +; CHECK-NEXT: vextracti128 $1, %ymm0, %xmm1 +; CHECK-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[1,1,2,3] +; CHECK-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,2,3] +; CHECK-NEXT: vpblendd {{.*#+}} xmm0 = xmm1[0],xmm0[1,2],xmm1[3] +; CHECK-NEXT: vzeroupper +; CHECK-NEXT: retq + %vec = load <8 x i32>, <8 x i32>* %vp + %res = shufflevector <8 x i32> %vec, <8 x i32> undef, <4 x i32> + ret <4 x i32> %res +} +define <4 x i32> @test_masked_8xi32_to_4xi32_perm_mem_mask3(<8 x i32>* %vp, <4 x i32> %vec2) { +; CHECK-LABEL: test_masked_8xi32_to_4xi32_perm_mem_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: vmovdqa (%rdi), %ymm1 +; CHECK-NEXT: vextracti128 $1, %ymm1, %xmm2 +; CHECK-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[1,1,2,3] +; CHECK-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[2,3,2,3] +; CHECK-NEXT: vpblendd {{.*#+}} xmm1 = xmm2[0],xmm1[1,2],xmm2[3] +; CHECK-NEXT: movb $5, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vmovdqa32 %xmm1, %xmm0 {%k1} +; CHECK-NEXT: vzeroupper +; CHECK-NEXT: retq + %vec = load <8 x i32>, <8 x i32>* %vp + %shuf = shufflevector <8 x i32> %vec, <8 x i32> undef, <4 x i32> + %res = select <4 x i1> , <4 x i32> %shuf, <4 x i32> %vec2 + ret <4 x i32> %res +} + +define <4 x i32> @test_masked_z_8xi32_to_4xi32_perm_mem_mask3(<8 x i32>* %vp) { +; CHECK-LABEL: test_masked_z_8xi32_to_4xi32_perm_mem_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: vmovdqa (%rdi), %ymm0 +; CHECK-NEXT: vextracti128 $1, %ymm0, %xmm1 +; CHECK-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[1,1,2,3] +; CHECK-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,2,3] +; CHECK-NEXT: vpblendd {{.*#+}} xmm0 = xmm1[0],xmm0[1,2],xmm1[3] +; CHECK-NEXT: movb $5, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vmovdqa32 %xmm0, %xmm0 {%k1} {z} +; CHECK-NEXT: vzeroupper +; CHECK-NEXT: retq + %vec = load <8 x i32>, <8 x i32>* %vp + %shuf = shufflevector <8 x i32> %vec, <8 x i32> undef, <4 x i32> + %res = select <4 x i1> , <4 x i32> %shuf, <4 x i32> zeroinitializer + ret <4 x i32> %res +} + +define <8 x i32> @test_16xi32_to_8xi32_perm_mask0(<16 x i32> %vec) { +; CHECK-LABEL: test_16xi32_to_8xi32_perm_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: vextracti64x4 $1, %zmm0, %ymm2 +; CHECK-NEXT: vmovdqa {{.*#+}} ymm1 = [9,5,3,6,15,2,9,14] +; CHECK-NEXT: vpermi2d %ymm0, %ymm2, %ymm1 +; CHECK-NEXT: vmovdqa %ymm1, %ymm0 +; CHECK-NEXT: retq + %res = shufflevector <16 x i32> %vec, <16 x i32> undef, <8 x i32> + ret <8 x i32> %res +} +define <8 x i32> @test_masked_16xi32_to_8xi32_perm_mask0(<16 x i32> %vec, <8 x i32> %vec2) { +; CHECK-LABEL: test_masked_16xi32_to_8xi32_perm_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: vextracti64x4 $1, %zmm0, %ymm2 +; CHECK-NEXT: vmovdqa {{.*#+}} ymm3 = [9,5,3,6,15,2,9,14] +; CHECK-NEXT: vpermi2d %ymm0, %ymm2, %ymm3 +; CHECK-NEXT: movb $67, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpblendmd %ymm3, %ymm1, %ymm0 {%k1} +; CHECK-NEXT: retq + %shuf = shufflevector <16 x i32> %vec, <16 x i32> undef, <8 x i32> + %res = select <8 x i1> , <8 x i32> %shuf, <8 x i32> %vec2 + ret <8 x i32> %res +} + +define <8 x i32> @test_masked_z_16xi32_to_8xi32_perm_mask0(<16 x i32> %vec) { +; CHECK-LABEL: test_masked_z_16xi32_to_8xi32_perm_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: vextracti64x4 $1, %zmm0, %ymm2 +; CHECK-NEXT: vmovdqa {{.*#+}} ymm1 = [9,5,3,6,15,2,9,14] +; CHECK-NEXT: movb $67, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermi2d %ymm0, %ymm2, %ymm1 {%k1} {z} +; CHECK-NEXT: vmovdqa %ymm1, %ymm0 +; CHECK-NEXT: retq + %shuf = shufflevector <16 x i32> %vec, <16 x i32> undef, <8 x i32> + %res = select <8 x i1> , <8 x i32> %shuf, <8 x i32> zeroinitializer + ret <8 x i32> %res +} +define <8 x i32> @test_masked_16xi32_to_8xi32_perm_mask1(<16 x i32> %vec, <8 x i32> %vec2) { +; CHECK-LABEL: test_masked_16xi32_to_8xi32_perm_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: vextracti64x4 $1, %zmm0, %ymm2 +; CHECK-NEXT: vmovdqa {{.*#+}} ymm3 = [3,0,15,3,2,3,6,8] +; CHECK-NEXT: vpermi2d %ymm2, %ymm0, %ymm3 +; CHECK-NEXT: movb $-58, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpblendmd %ymm3, %ymm1, %ymm0 {%k1} +; CHECK-NEXT: retq + %shuf = shufflevector <16 x i32> %vec, <16 x i32> undef, <8 x i32> + %res = select <8 x i1> , <8 x i32> %shuf, <8 x i32> %vec2 + ret <8 x i32> %res +} + +define <8 x i32> @test_masked_z_16xi32_to_8xi32_perm_mask1(<16 x i32> %vec) { +; CHECK-LABEL: test_masked_z_16xi32_to_8xi32_perm_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: vextracti64x4 $1, %zmm0, %ymm2 +; CHECK-NEXT: vmovdqa {{.*#+}} ymm1 = [3,0,15,3,2,3,6,8] +; CHECK-NEXT: movb $-58, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermi2d %ymm2, %ymm0, %ymm1 {%k1} {z} +; CHECK-NEXT: vmovdqa %ymm1, %ymm0 +; CHECK-NEXT: retq + %shuf = shufflevector <16 x i32> %vec, <16 x i32> undef, <8 x i32> + %res = select <8 x i1> , <8 x i32> %shuf, <8 x i32> zeroinitializer + ret <8 x i32> %res +} +define <8 x i32> @test_masked_16xi32_to_8xi32_perm_mask2(<16 x i32> %vec, <8 x i32> %vec2) { +; CHECK-LABEL: test_masked_16xi32_to_8xi32_perm_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: vextracti64x4 $1, %zmm0, %ymm2 +; CHECK-NEXT: vmovdqa {{.*#+}} ymm3 = [2,15,15,2,6,10,14,7] +; CHECK-NEXT: vpermi2d %ymm2, %ymm0, %ymm3 +; CHECK-NEXT: movb $110, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpblendmd %ymm3, %ymm1, %ymm0 {%k1} +; CHECK-NEXT: retq + %shuf = shufflevector <16 x i32> %vec, <16 x i32> undef, <8 x i32> + %res = select <8 x i1> , <8 x i32> %shuf, <8 x i32> %vec2 + ret <8 x i32> %res +} + +define <8 x i32> @test_masked_z_16xi32_to_8xi32_perm_mask2(<16 x i32> %vec) { +; CHECK-LABEL: test_masked_z_16xi32_to_8xi32_perm_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: vextracti64x4 $1, %zmm0, %ymm2 +; CHECK-NEXT: vmovdqa {{.*#+}} ymm1 = [2,15,15,2,6,10,14,7] +; CHECK-NEXT: movb $110, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermi2d %ymm2, %ymm0, %ymm1 {%k1} {z} +; CHECK-NEXT: vmovdqa %ymm1, %ymm0 +; CHECK-NEXT: retq + %shuf = shufflevector <16 x i32> %vec, <16 x i32> undef, <8 x i32> + %res = select <8 x i1> , <8 x i32> %shuf, <8 x i32> zeroinitializer + ret <8 x i32> %res +} +define <8 x i32> @test_16xi32_to_8xi32_perm_mask3(<16 x i32> %vec) { +; CHECK-LABEL: test_16xi32_to_8xi32_perm_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: vextracti64x4 $1, %zmm0, %ymm2 +; CHECK-NEXT: vmovdqa {{.*#+}} ymm1 = [14,5,7,7,10,3,9,3] +; CHECK-NEXT: vpermi2d %ymm2, %ymm0, %ymm1 +; CHECK-NEXT: vmovdqa %ymm1, %ymm0 +; CHECK-NEXT: retq + %res = shufflevector <16 x i32> %vec, <16 x i32> undef, <8 x i32> + ret <8 x i32> %res +} +define <8 x i32> @test_masked_16xi32_to_8xi32_perm_mask3(<16 x i32> %vec, <8 x i32> %vec2) { +; CHECK-LABEL: test_masked_16xi32_to_8xi32_perm_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: vextracti64x4 $1, %zmm0, %ymm2 +; CHECK-NEXT: vmovdqa {{.*#+}} ymm3 = [14,5,7,7,10,3,9,3] +; CHECK-NEXT: vpermi2d %ymm2, %ymm0, %ymm3 +; CHECK-NEXT: movb $92, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpblendmd %ymm3, %ymm1, %ymm0 {%k1} +; CHECK-NEXT: retq + %shuf = shufflevector <16 x i32> %vec, <16 x i32> undef, <8 x i32> + %res = select <8 x i1> , <8 x i32> %shuf, <8 x i32> %vec2 + ret <8 x i32> %res +} + +define <8 x i32> @test_masked_z_16xi32_to_8xi32_perm_mask3(<16 x i32> %vec) { +; CHECK-LABEL: test_masked_z_16xi32_to_8xi32_perm_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: vextracti64x4 $1, %zmm0, %ymm2 +; CHECK-NEXT: vmovdqa {{.*#+}} ymm1 = [14,5,7,7,10,3,9,3] +; CHECK-NEXT: movb $92, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermi2d %ymm2, %ymm0, %ymm1 {%k1} {z} +; CHECK-NEXT: vmovdqa %ymm1, %ymm0 +; CHECK-NEXT: retq + %shuf = shufflevector <16 x i32> %vec, <16 x i32> undef, <8 x i32> + %res = select <8 x i1> , <8 x i32> %shuf, <8 x i32> zeroinitializer + ret <8 x i32> %res +} +define <4 x i32> @test_16xi32_to_4xi32_perm_mask0(<16 x i32> %vec) { +; CHECK-LABEL: test_16xi32_to_4xi32_perm_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: vextracti64x4 $1, %zmm0, %ymm1 +; CHECK-NEXT: vpshufd {{.*#+}} ymm1 = ymm1[0,1,2,0,4,5,6,4] +; CHECK-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[0,2,0,3,4,6,4,7] +; CHECK-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3],ymm0[4,5,6],ymm1[7] +; CHECK-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,3,2,3] +; CHECK-NEXT: # kill: %XMM0 %XMM0 %YMM0 +; CHECK-NEXT: vzeroupper +; CHECK-NEXT: retq + %res = shufflevector <16 x i32> %vec, <16 x i32> undef, <4 x i32> + ret <4 x i32> %res +} +define <4 x i32> @test_masked_16xi32_to_4xi32_perm_mask0(<16 x i32> %vec, <4 x i32> %vec2) { +; CHECK-LABEL: test_masked_16xi32_to_4xi32_perm_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: vextracti64x4 $1, %zmm0, %ymm2 +; CHECK-NEXT: vpshufd {{.*#+}} ymm2 = ymm2[0,1,2,0,4,5,6,4] +; CHECK-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[0,2,0,3,4,6,4,7] +; CHECK-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2],ymm2[3],ymm0[4,5,6],ymm2[7] +; CHECK-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,3,2,3] +; CHECK-NEXT: movb $5, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpblendmd %xmm0, %xmm1, %xmm0 {%k1} +; CHECK-NEXT: vzeroupper +; CHECK-NEXT: retq + %shuf = shufflevector <16 x i32> %vec, <16 x i32> undef, <4 x i32> + %res = select <4 x i1> , <4 x i32> %shuf, <4 x i32> %vec2 + ret <4 x i32> %res +} + +define <4 x i32> @test_masked_z_16xi32_to_4xi32_perm_mask0(<16 x i32> %vec) { +; CHECK-LABEL: test_masked_z_16xi32_to_4xi32_perm_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: vextracti64x4 $1, %zmm0, %ymm1 +; CHECK-NEXT: vpshufd {{.*#+}} ymm1 = ymm1[0,1,2,0,4,5,6,4] +; CHECK-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[0,2,0,3,4,6,4,7] +; CHECK-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2],ymm1[3],ymm0[4,5,6],ymm1[7] +; CHECK-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,3,2,3] +; CHECK-NEXT: movb $5, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vmovdqa32 %xmm0, %xmm0 {%k1} {z} +; CHECK-NEXT: vzeroupper +; CHECK-NEXT: retq + %shuf = shufflevector <16 x i32> %vec, <16 x i32> undef, <4 x i32> + %res = select <4 x i1> , <4 x i32> %shuf, <4 x i32> zeroinitializer + ret <4 x i32> %res +} +define <4 x i32> @test_masked_16xi32_to_4xi32_perm_mask1(<16 x i32> %vec, <4 x i32> %vec2) { +; CHECK-LABEL: test_masked_16xi32_to_4xi32_perm_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: vextracti64x4 $1, %zmm0, %ymm0 +; CHECK-NEXT: vmovdqa {{.*#+}} ymm2 = <5,1,3,4,u,u,u,u> +; CHECK-NEXT: vpermd %ymm0, %ymm2, %ymm0 +; CHECK-NEXT: movb $13, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpblendmd %xmm0, %xmm1, %xmm0 {%k1} +; CHECK-NEXT: vzeroupper +; CHECK-NEXT: retq + %shuf = shufflevector <16 x i32> %vec, <16 x i32> undef, <4 x i32> + %res = select <4 x i1> , <4 x i32> %shuf, <4 x i32> %vec2 + ret <4 x i32> %res +} + +define <4 x i32> @test_masked_z_16xi32_to_4xi32_perm_mask1(<16 x i32> %vec) { +; CHECK-LABEL: test_masked_z_16xi32_to_4xi32_perm_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: vextracti64x4 $1, %zmm0, %ymm0 +; CHECK-NEXT: vmovdqa {{.*#+}} ymm1 = <5,1,3,4,u,u,u,u> +; CHECK-NEXT: vpermd %ymm0, %ymm1, %ymm0 +; CHECK-NEXT: movb $13, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vmovdqa32 %xmm0, %xmm0 {%k1} {z} +; CHECK-NEXT: vzeroupper +; CHECK-NEXT: retq + %shuf = shufflevector <16 x i32> %vec, <16 x i32> undef, <4 x i32> + %res = select <4 x i1> , <4 x i32> %shuf, <4 x i32> zeroinitializer + ret <4 x i32> %res +} +define <4 x i32> @test_masked_16xi32_to_4xi32_perm_mask2(<16 x i32> %vec, <4 x i32> %vec2) { +; CHECK-LABEL: test_masked_16xi32_to_4xi32_perm_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: vextracti64x4 $1, %zmm0, %ymm2 +; CHECK-NEXT: vmovdqa {{.*#+}} ymm3 = <1,1,13,0,u,u,u,u> +; CHECK-NEXT: vpermi2d %ymm2, %ymm0, %ymm3 +; CHECK-NEXT: movb $11, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpblendmd %xmm3, %xmm1, %xmm0 {%k1} +; CHECK-NEXT: vzeroupper +; CHECK-NEXT: retq + %shuf = shufflevector <16 x i32> %vec, <16 x i32> undef, <4 x i32> + %res = select <4 x i1> , <4 x i32> %shuf, <4 x i32> %vec2 + ret <4 x i32> %res +} + +define <4 x i32> @test_masked_z_16xi32_to_4xi32_perm_mask2(<16 x i32> %vec) { +; CHECK-LABEL: test_masked_z_16xi32_to_4xi32_perm_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: vextracti64x4 $1, %zmm0, %ymm1 +; CHECK-NEXT: vmovdqa {{.*#+}} ymm2 = <1,1,13,0,u,u,u,u> +; CHECK-NEXT: vpermi2d %ymm1, %ymm0, %ymm2 +; CHECK-NEXT: movb $11, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vmovdqa32 %xmm2, %xmm0 {%k1} {z} +; CHECK-NEXT: vzeroupper +; CHECK-NEXT: retq + %shuf = shufflevector <16 x i32> %vec, <16 x i32> undef, <4 x i32> + %res = select <4 x i1> , <4 x i32> %shuf, <4 x i32> zeroinitializer + ret <4 x i32> %res +} +define <4 x i32> @test_16xi32_to_4xi32_perm_mask3(<16 x i32> %vec) { +; CHECK-LABEL: test_16xi32_to_4xi32_perm_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: vextracti64x4 $1, %zmm0, %ymm2 +; CHECK-NEXT: vmovdqa {{.*#+}} ymm1 = <3,0,0,13,u,u,u,u> +; CHECK-NEXT: vpermi2d %ymm2, %ymm0, %ymm1 +; CHECK-NEXT: vmovdqa %xmm1, %xmm0 +; CHECK-NEXT: vzeroupper +; CHECK-NEXT: retq + %res = shufflevector <16 x i32> %vec, <16 x i32> undef, <4 x i32> + ret <4 x i32> %res +} +define <4 x i32> @test_masked_16xi32_to_4xi32_perm_mask3(<16 x i32> %vec, <4 x i32> %vec2) { +; CHECK-LABEL: test_masked_16xi32_to_4xi32_perm_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: vextracti64x4 $1, %zmm0, %ymm2 +; CHECK-NEXT: vmovdqa {{.*#+}} ymm3 = <3,0,0,13,u,u,u,u> +; CHECK-NEXT: vpermi2d %ymm2, %ymm0, %ymm3 +; CHECK-NEXT: movb $9, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpblendmd %xmm3, %xmm1, %xmm0 {%k1} +; CHECK-NEXT: vzeroupper +; CHECK-NEXT: retq + %shuf = shufflevector <16 x i32> %vec, <16 x i32> undef, <4 x i32> + %res = select <4 x i1> , <4 x i32> %shuf, <4 x i32> %vec2 + ret <4 x i32> %res +} + +define <4 x i32> @test_masked_z_16xi32_to_4xi32_perm_mask3(<16 x i32> %vec) { +; CHECK-LABEL: test_masked_z_16xi32_to_4xi32_perm_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: vextracti64x4 $1, %zmm0, %ymm1 +; CHECK-NEXT: vmovdqa {{.*#+}} ymm2 = <3,0,0,13,u,u,u,u> +; CHECK-NEXT: vpermi2d %ymm1, %ymm0, %ymm2 +; CHECK-NEXT: movb $9, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vmovdqa32 %xmm2, %xmm0 {%k1} {z} +; CHECK-NEXT: vzeroupper +; CHECK-NEXT: retq + %shuf = shufflevector <16 x i32> %vec, <16 x i32> undef, <4 x i32> + %res = select <4 x i1> , <4 x i32> %shuf, <4 x i32> zeroinitializer + ret <4 x i32> %res +} +define <8 x i32> @test_16xi32_to_8xi32_perm_mem_mask0(<16 x i32>* %vp) { +; CHECK-LABEL: test_16xi32_to_8xi32_perm_mem_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: vmovdqa {{.*#+}} ymm0 = [7,0,6,0,1,2,4,4] +; CHECK-NEXT: vpermd 32(%rdi), %ymm0, %ymm0 +; CHECK-NEXT: retq + %vec = load <16 x i32>, <16 x i32>* %vp + %res = shufflevector <16 x i32> %vec, <16 x i32> undef, <8 x i32> + ret <8 x i32> %res +} +define <8 x i32> @test_masked_16xi32_to_8xi32_perm_mem_mask0(<16 x i32>* %vp, <8 x i32> %vec2) { +; CHECK-LABEL: test_masked_16xi32_to_8xi32_perm_mem_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: vmovdqa {{.*#+}} ymm1 = [7,0,6,0,1,2,4,4] +; CHECK-NEXT: movb $84, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermd 32(%rdi), %ymm1, %ymm0 {%k1} +; CHECK-NEXT: retq + %vec = load <16 x i32>, <16 x i32>* %vp + %shuf = shufflevector <16 x i32> %vec, <16 x i32> undef, <8 x i32> + %res = select <8 x i1> , <8 x i32> %shuf, <8 x i32> %vec2 + ret <8 x i32> %res +} + +define <8 x i32> @test_masked_z_16xi32_to_8xi32_perm_mem_mask0(<16 x i32>* %vp) { +; CHECK-LABEL: test_masked_z_16xi32_to_8xi32_perm_mem_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: vmovdqa {{.*#+}} ymm0 = [7,0,6,0,1,2,4,4] +; CHECK-NEXT: movb $84, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermd 32(%rdi), %ymm0, %ymm0 {%k1} {z} +; CHECK-NEXT: retq + %vec = load <16 x i32>, <16 x i32>* %vp + %shuf = shufflevector <16 x i32> %vec, <16 x i32> undef, <8 x i32> + %res = select <8 x i1> , <8 x i32> %shuf, <8 x i32> zeroinitializer + ret <8 x i32> %res +} + +define <8 x i32> @test_masked_16xi32_to_8xi32_perm_mem_mask1(<16 x i32>* %vp, <8 x i32> %vec2) { +; CHECK-LABEL: test_masked_16xi32_to_8xi32_perm_mem_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: vmovdqa32 (%rdi), %zmm1 +; CHECK-NEXT: vextracti64x4 $1, %zmm1, %ymm2 +; CHECK-NEXT: vmovdqa {{.*#+}} ymm3 = [7,3,6,11,0,1,5,15] +; CHECK-NEXT: vpermi2d %ymm1, %ymm2, %ymm3 +; CHECK-NEXT: movb $41, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vmovdqa32 %ymm3, %ymm0 {%k1} +; CHECK-NEXT: retq + %vec = load <16 x i32>, <16 x i32>* %vp + %shuf = shufflevector <16 x i32> %vec, <16 x i32> undef, <8 x i32> + %res = select <8 x i1> , <8 x i32> %shuf, <8 x i32> %vec2 + ret <8 x i32> %res +} + +define <8 x i32> @test_masked_z_16xi32_to_8xi32_perm_mem_mask1(<16 x i32>* %vp) { +; CHECK-LABEL: test_masked_z_16xi32_to_8xi32_perm_mem_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: vmovdqa32 (%rdi), %zmm1 +; CHECK-NEXT: vextracti64x4 $1, %zmm1, %ymm2 +; CHECK-NEXT: vmovdqa {{.*#+}} ymm0 = [7,3,6,11,0,1,5,15] +; CHECK-NEXT: movb $41, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermi2d %ymm1, %ymm2, %ymm0 {%k1} {z} +; CHECK-NEXT: retq + %vec = load <16 x i32>, <16 x i32>* %vp + %shuf = shufflevector <16 x i32> %vec, <16 x i32> undef, <8 x i32> + %res = select <8 x i1> , <8 x i32> %shuf, <8 x i32> zeroinitializer + ret <8 x i32> %res +} + +define <8 x i32> @test_masked_16xi32_to_8xi32_perm_mem_mask2(<16 x i32>* %vp, <8 x i32> %vec2) { +; CHECK-LABEL: test_masked_16xi32_to_8xi32_perm_mem_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: vmovdqa32 (%rdi), %zmm1 +; CHECK-NEXT: vextracti64x4 $1, %zmm1, %ymm2 +; CHECK-NEXT: vmovdqa {{.*#+}} ymm3 = [4,14,1,5,4,2,8,10] +; CHECK-NEXT: vpermi2d %ymm1, %ymm2, %ymm3 +; CHECK-NEXT: movb $38, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vmovdqa32 %ymm3, %ymm0 {%k1} +; CHECK-NEXT: retq + %vec = load <16 x i32>, <16 x i32>* %vp + %shuf = shufflevector <16 x i32> %vec, <16 x i32> undef, <8 x i32> + %res = select <8 x i1> , <8 x i32> %shuf, <8 x i32> %vec2 + ret <8 x i32> %res +} + +define <8 x i32> @test_masked_z_16xi32_to_8xi32_perm_mem_mask2(<16 x i32>* %vp) { +; CHECK-LABEL: test_masked_z_16xi32_to_8xi32_perm_mem_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: vmovdqa32 (%rdi), %zmm1 +; CHECK-NEXT: vextracti64x4 $1, %zmm1, %ymm2 +; CHECK-NEXT: vmovdqa {{.*#+}} ymm0 = [4,14,1,5,4,2,8,10] +; CHECK-NEXT: movb $38, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermi2d %ymm1, %ymm2, %ymm0 {%k1} {z} +; CHECK-NEXT: retq + %vec = load <16 x i32>, <16 x i32>* %vp + %shuf = shufflevector <16 x i32> %vec, <16 x i32> undef, <8 x i32> + %res = select <8 x i1> , <8 x i32> %shuf, <8 x i32> zeroinitializer + ret <8 x i32> %res +} + +define <8 x i32> @test_16xi32_to_8xi32_perm_mem_mask3(<16 x i32>* %vp) { +; CHECK-LABEL: test_16xi32_to_8xi32_perm_mem_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: vmovdqa32 (%rdi), %zmm1 +; CHECK-NEXT: vextracti64x4 $1, %zmm1, %ymm2 +; CHECK-NEXT: vmovdqa {{.*#+}} ymm0 = [8,4,1,13,15,4,6,12] +; CHECK-NEXT: vpermi2d %ymm2, %ymm1, %ymm0 +; CHECK-NEXT: retq + %vec = load <16 x i32>, <16 x i32>* %vp + %res = shufflevector <16 x i32> %vec, <16 x i32> undef, <8 x i32> + ret <8 x i32> %res +} +define <8 x i32> @test_masked_16xi32_to_8xi32_perm_mem_mask3(<16 x i32>* %vp, <8 x i32> %vec2) { +; CHECK-LABEL: test_masked_16xi32_to_8xi32_perm_mem_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: vmovdqa32 (%rdi), %zmm1 +; CHECK-NEXT: vextracti64x4 $1, %zmm1, %ymm2 +; CHECK-NEXT: vmovdqa {{.*#+}} ymm3 = [8,4,1,13,15,4,6,12] +; CHECK-NEXT: vpermi2d %ymm2, %ymm1, %ymm3 +; CHECK-NEXT: movb $-89, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vmovdqa32 %ymm3, %ymm0 {%k1} +; CHECK-NEXT: retq + %vec = load <16 x i32>, <16 x i32>* %vp + %shuf = shufflevector <16 x i32> %vec, <16 x i32> undef, <8 x i32> + %res = select <8 x i1> , <8 x i32> %shuf, <8 x i32> %vec2 + ret <8 x i32> %res +} + +define <8 x i32> @test_masked_z_16xi32_to_8xi32_perm_mem_mask3(<16 x i32>* %vp) { +; CHECK-LABEL: test_masked_z_16xi32_to_8xi32_perm_mem_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: vmovdqa32 (%rdi), %zmm1 +; CHECK-NEXT: vextracti64x4 $1, %zmm1, %ymm2 +; CHECK-NEXT: vmovdqa {{.*#+}} ymm0 = [8,4,1,13,15,4,6,12] +; CHECK-NEXT: movb $-89, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermi2d %ymm2, %ymm1, %ymm0 {%k1} {z} +; CHECK-NEXT: retq + %vec = load <16 x i32>, <16 x i32>* %vp + %shuf = shufflevector <16 x i32> %vec, <16 x i32> undef, <8 x i32> + %res = select <8 x i1> , <8 x i32> %shuf, <8 x i32> zeroinitializer + ret <8 x i32> %res +} + +define <4 x i32> @test_16xi32_to_4xi32_perm_mem_mask0(<16 x i32>* %vp) { +; CHECK-LABEL: test_16xi32_to_4xi32_perm_mem_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: vmovdqa32 (%rdi), %zmm1 +; CHECK-NEXT: vextracti64x4 $1, %zmm1, %ymm2 +; CHECK-NEXT: vmovdqa {{.*#+}} ymm0 = <13,0,0,6,u,u,u,u> +; CHECK-NEXT: vpermi2d %ymm2, %ymm1, %ymm0 +; CHECK-NEXT: # kill: %XMM0 %XMM0 %YMM0 +; CHECK-NEXT: vzeroupper +; CHECK-NEXT: retq + %vec = load <16 x i32>, <16 x i32>* %vp + %res = shufflevector <16 x i32> %vec, <16 x i32> undef, <4 x i32> + ret <4 x i32> %res +} +define <4 x i32> @test_masked_16xi32_to_4xi32_perm_mem_mask0(<16 x i32>* %vp, <4 x i32> %vec2) { +; CHECK-LABEL: test_masked_16xi32_to_4xi32_perm_mem_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: vmovdqa32 (%rdi), %zmm1 +; CHECK-NEXT: vextracti64x4 $1, %zmm1, %ymm2 +; CHECK-NEXT: vmovdqa {{.*#+}} ymm3 = <13,0,0,6,u,u,u,u> +; CHECK-NEXT: vpermi2d %ymm2, %ymm1, %ymm3 +; CHECK-NEXT: movb $1, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vmovdqa32 %xmm3, %xmm0 {%k1} +; CHECK-NEXT: vzeroupper +; CHECK-NEXT: retq + %vec = load <16 x i32>, <16 x i32>* %vp + %shuf = shufflevector <16 x i32> %vec, <16 x i32> undef, <4 x i32> + %res = select <4 x i1> , <4 x i32> %shuf, <4 x i32> %vec2 + ret <4 x i32> %res +} + +define <4 x i32> @test_masked_z_16xi32_to_4xi32_perm_mem_mask0(<16 x i32>* %vp) { +; CHECK-LABEL: test_masked_z_16xi32_to_4xi32_perm_mem_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: vmovdqa32 (%rdi), %zmm0 +; CHECK-NEXT: vextracti64x4 $1, %zmm0, %ymm1 +; CHECK-NEXT: vmovdqa {{.*#+}} ymm2 = <13,0,0,6,u,u,u,u> +; CHECK-NEXT: vpermi2d %ymm1, %ymm0, %ymm2 +; CHECK-NEXT: movb $1, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vmovdqa32 %xmm2, %xmm0 {%k1} {z} +; CHECK-NEXT: vzeroupper +; CHECK-NEXT: retq + %vec = load <16 x i32>, <16 x i32>* %vp + %shuf = shufflevector <16 x i32> %vec, <16 x i32> undef, <4 x i32> + %res = select <4 x i1> , <4 x i32> %shuf, <4 x i32> zeroinitializer + ret <4 x i32> %res +} + +define <4 x i32> @test_masked_16xi32_to_4xi32_perm_mem_mask1(<16 x i32>* %vp, <4 x i32> %vec2) { +; CHECK-LABEL: test_masked_16xi32_to_4xi32_perm_mem_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: vmovdqa32 (%rdi), %zmm1 +; CHECK-NEXT: vpshufd {{.*#+}} ymm2 = ymm1[3,1,2,3,7,5,6,7] +; CHECK-NEXT: vextracti64x4 $1, %zmm1, %ymm1 +; CHECK-NEXT: vpshufd {{.*#+}} ymm1 = ymm1[0,1,3,2,4,5,7,6] +; CHECK-NEXT: vpblendd {{.*#+}} ymm1 = ymm2[0],ymm1[1,2,3],ymm2[4],ymm1[5,6,7] +; CHECK-NEXT: vpermq {{.*#+}} ymm1 = ymm1[2,1,2,3] +; CHECK-NEXT: movb $9, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vmovdqa32 %xmm1, %xmm0 {%k1} +; CHECK-NEXT: vzeroupper +; CHECK-NEXT: retq + %vec = load <16 x i32>, <16 x i32>* %vp + %shuf = shufflevector <16 x i32> %vec, <16 x i32> undef, <4 x i32> + %res = select <4 x i1> , <4 x i32> %shuf, <4 x i32> %vec2 + ret <4 x i32> %res +} + +define <4 x i32> @test_masked_z_16xi32_to_4xi32_perm_mem_mask1(<16 x i32>* %vp) { +; CHECK-LABEL: test_masked_z_16xi32_to_4xi32_perm_mem_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: vmovdqa32 (%rdi), %zmm0 +; CHECK-NEXT: vpshufd {{.*#+}} ymm1 = ymm0[3,1,2,3,7,5,6,7] +; CHECK-NEXT: vextracti64x4 $1, %zmm0, %ymm0 +; CHECK-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[0,1,3,2,4,5,7,6] +; CHECK-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0],ymm0[1,2,3],ymm1[4],ymm0[5,6,7] +; CHECK-NEXT: vpermq {{.*#+}} ymm0 = ymm0[2,1,2,3] +; CHECK-NEXT: movb $9, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vmovdqa32 %xmm0, %xmm0 {%k1} {z} +; CHECK-NEXT: vzeroupper +; CHECK-NEXT: retq + %vec = load <16 x i32>, <16 x i32>* %vp + %shuf = shufflevector <16 x i32> %vec, <16 x i32> undef, <4 x i32> + %res = select <4 x i1> , <4 x i32> %shuf, <4 x i32> zeroinitializer + ret <4 x i32> %res +} + +define <4 x i32> @test_masked_16xi32_to_4xi32_perm_mem_mask2(<16 x i32>* %vp, <4 x i32> %vec2) { +; CHECK-LABEL: test_masked_16xi32_to_4xi32_perm_mem_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: vmovdqa32 (%rdi), %zmm1 +; CHECK-NEXT: vextracti64x4 $1, %zmm1, %ymm2 +; CHECK-NEXT: vmovdqa {{.*#+}} ymm3 = <2,15,6,9,u,u,u,u> +; CHECK-NEXT: vpermi2d %ymm2, %ymm1, %ymm3 +; CHECK-NEXT: movb $2, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vmovdqa32 %xmm3, %xmm0 {%k1} +; CHECK-NEXT: vzeroupper +; CHECK-NEXT: retq + %vec = load <16 x i32>, <16 x i32>* %vp + %shuf = shufflevector <16 x i32> %vec, <16 x i32> undef, <4 x i32> + %res = select <4 x i1> , <4 x i32> %shuf, <4 x i32> %vec2 + ret <4 x i32> %res +} + +define <4 x i32> @test_masked_z_16xi32_to_4xi32_perm_mem_mask2(<16 x i32>* %vp) { +; CHECK-LABEL: test_masked_z_16xi32_to_4xi32_perm_mem_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: vmovdqa32 (%rdi), %zmm0 +; CHECK-NEXT: vextracti64x4 $1, %zmm0, %ymm1 +; CHECK-NEXT: vmovdqa {{.*#+}} ymm2 = <2,15,6,9,u,u,u,u> +; CHECK-NEXT: vpermi2d %ymm1, %ymm0, %ymm2 +; CHECK-NEXT: movb $2, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vmovdqa32 %xmm2, %xmm0 {%k1} {z} +; CHECK-NEXT: vzeroupper +; CHECK-NEXT: retq + %vec = load <16 x i32>, <16 x i32>* %vp + %shuf = shufflevector <16 x i32> %vec, <16 x i32> undef, <4 x i32> + %res = select <4 x i1> , <4 x i32> %shuf, <4 x i32> zeroinitializer + ret <4 x i32> %res +} + +define <4 x i32> @test_16xi32_to_4xi32_perm_mem_mask3(<16 x i32>* %vp) { +; CHECK-LABEL: test_16xi32_to_4xi32_perm_mem_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: vmovdqa32 (%rdi), %zmm0 +; CHECK-NEXT: vmovd %xmm0, %eax +; CHECK-NEXT: vextracti128 $1, %ymm0, %xmm1 +; CHECK-NEXT: vpshufd {{.*#+}} xmm2 = xmm1[2,3,0,1] +; CHECK-NEXT: vpinsrd $1, %eax, %xmm2, %xmm2 +; CHECK-NEXT: vpextrd $3, %xmm1, %eax +; CHECK-NEXT: vpinsrd $2, %eax, %xmm2, %xmm1 +; CHECK-NEXT: vpextrd $2, %xmm0, %eax +; CHECK-NEXT: vpinsrd $3, %eax, %xmm1, %xmm0 +; CHECK-NEXT: vzeroupper +; CHECK-NEXT: retq + %vec = load <16 x i32>, <16 x i32>* %vp + %res = shufflevector <16 x i32> %vec, <16 x i32> undef, <4 x i32> + ret <4 x i32> %res +} +define <4 x i32> @test_masked_16xi32_to_4xi32_perm_mem_mask3(<16 x i32>* %vp, <4 x i32> %vec2) { +; CHECK-LABEL: test_masked_16xi32_to_4xi32_perm_mem_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: vmovdqa32 (%rdi), %zmm1 +; CHECK-NEXT: vmovd %xmm1, %eax +; CHECK-NEXT: vextracti128 $1, %ymm1, %xmm2 +; CHECK-NEXT: vpshufd {{.*#+}} xmm3 = xmm2[2,3,0,1] +; CHECK-NEXT: vpinsrd $1, %eax, %xmm3, %xmm3 +; CHECK-NEXT: vpextrd $3, %xmm2, %eax +; CHECK-NEXT: vpinsrd $2, %eax, %xmm3, %xmm2 +; CHECK-NEXT: vpextrd $2, %xmm1, %eax +; CHECK-NEXT: vpinsrd $3, %eax, %xmm2, %xmm1 +; CHECK-NEXT: movb $4, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vmovdqa32 %xmm1, %xmm0 {%k1} +; CHECK-NEXT: vzeroupper +; CHECK-NEXT: retq + %vec = load <16 x i32>, <16 x i32>* %vp + %shuf = shufflevector <16 x i32> %vec, <16 x i32> undef, <4 x i32> + %res = select <4 x i1> , <4 x i32> %shuf, <4 x i32> %vec2 + ret <4 x i32> %res +} + +define <4 x i32> @test_masked_z_16xi32_to_4xi32_perm_mem_mask3(<16 x i32>* %vp) { +; CHECK-LABEL: test_masked_z_16xi32_to_4xi32_perm_mem_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: vmovdqa32 (%rdi), %zmm0 +; CHECK-NEXT: vmovd %xmm0, %eax +; CHECK-NEXT: vextracti128 $1, %ymm0, %xmm1 +; CHECK-NEXT: vpshufd {{.*#+}} xmm2 = xmm1[2,3,0,1] +; CHECK-NEXT: vpinsrd $1, %eax, %xmm2, %xmm2 +; CHECK-NEXT: vpextrd $3, %xmm1, %eax +; CHECK-NEXT: vpinsrd $2, %eax, %xmm2, %xmm1 +; CHECK-NEXT: vpextrd $2, %xmm0, %eax +; CHECK-NEXT: vpinsrd $3, %eax, %xmm1, %xmm0 +; CHECK-NEXT: movb $4, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vmovdqa32 %xmm0, %xmm0 {%k1} {z} +; CHECK-NEXT: vzeroupper +; CHECK-NEXT: retq + %vec = load <16 x i32>, <16 x i32>* %vp + %shuf = shufflevector <16 x i32> %vec, <16 x i32> undef, <4 x i32> + %res = select <4 x i1> , <4 x i32> %shuf, <4 x i32> zeroinitializer + ret <4 x i32> %res +} + +define <2 x i64> @test_4xi64_to_2xi64_perm_mask0(<4 x i64> %vec) { +; CHECK-LABEL: test_4xi64_to_2xi64_perm_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: vextracti128 $1, %ymm0, %xmm1 +; CHECK-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0] +; CHECK-NEXT: vzeroupper +; CHECK-NEXT: retq + %res = shufflevector <4 x i64> %vec, <4 x i64> undef, <2 x i32> + ret <2 x i64> %res +} +define <2 x i64> @test_masked_4xi64_to_2xi64_perm_mask0(<4 x i64> %vec, <2 x i64> %vec2) { +; CHECK-LABEL: test_masked_4xi64_to_2xi64_perm_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: vextracti128 $1, %ymm0, %xmm2 +; CHECK-NEXT: movb $1, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm2[0],xmm0[0] +; CHECK-NEXT: vmovdqa %xmm1, %xmm0 +; CHECK-NEXT: vzeroupper +; CHECK-NEXT: retq + %shuf = shufflevector <4 x i64> %vec, <4 x i64> undef, <2 x i32> + %res = select <2 x i1> , <2 x i64> %shuf, <2 x i64> %vec2 + ret <2 x i64> %res +} + +define <2 x i64> @test_masked_z_4xi64_to_2xi64_perm_mask0(<4 x i64> %vec) { +; CHECK-LABEL: test_masked_z_4xi64_to_2xi64_perm_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: vextracti128 $1, %ymm0, %xmm1 +; CHECK-NEXT: movb $1, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0] +; CHECK-NEXT: vzeroupper +; CHECK-NEXT: retq + %shuf = shufflevector <4 x i64> %vec, <4 x i64> undef, <2 x i32> + %res = select <2 x i1> , <2 x i64> %shuf, <2 x i64> zeroinitializer + ret <2 x i64> %res +} +define <2 x i64> @test_masked_4xi64_to_2xi64_perm_mask1(<4 x i64> %vec, <2 x i64> %vec2) { +; CHECK-LABEL: test_masked_4xi64_to_2xi64_perm_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: vextracti128 $1, %ymm0, %xmm2 +; CHECK-NEXT: vpblendd {{.*#+}} xmm0 = xmm2[0,1],xmm0[2,3] +; CHECK-NEXT: movb $2, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpblendmq %xmm0, %xmm1, %xmm0 {%k1} +; CHECK-NEXT: vzeroupper +; CHECK-NEXT: retq + %shuf = shufflevector <4 x i64> %vec, <4 x i64> undef, <2 x i32> + %res = select <2 x i1> , <2 x i64> %shuf, <2 x i64> %vec2 + ret <2 x i64> %res +} + +define <2 x i64> @test_masked_z_4xi64_to_2xi64_perm_mask1(<4 x i64> %vec) { +; CHECK-LABEL: test_masked_z_4xi64_to_2xi64_perm_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: vextracti128 $1, %ymm0, %xmm1 +; CHECK-NEXT: vpblendd {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3] +; CHECK-NEXT: movb $2, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vmovdqa64 %xmm0, %xmm0 {%k1} {z} +; CHECK-NEXT: vzeroupper +; CHECK-NEXT: retq + %shuf = shufflevector <4 x i64> %vec, <4 x i64> undef, <2 x i32> + %res = select <2 x i1> , <2 x i64> %shuf, <2 x i64> zeroinitializer + ret <2 x i64> %res +} +define <2 x i64> @test_4xi64_to_2xi64_perm_mem_mask0(<4 x i64>* %vp) { +; CHECK-LABEL: test_4xi64_to_2xi64_perm_mem_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: vmovdqa (%rdi), %ymm0 +; CHECK-NEXT: vextracti128 $1, %ymm0, %xmm1 +; CHECK-NEXT: vpunpckhqdq {{.*#+}} xmm0 = xmm0[1],xmm1[1] +; CHECK-NEXT: vzeroupper +; CHECK-NEXT: retq + %vec = load <4 x i64>, <4 x i64>* %vp + %res = shufflevector <4 x i64> %vec, <4 x i64> undef, <2 x i32> + ret <2 x i64> %res +} +define <2 x i64> @test_masked_4xi64_to_2xi64_perm_mem_mask0(<4 x i64>* %vp, <2 x i64> %vec2) { +; CHECK-LABEL: test_masked_4xi64_to_2xi64_perm_mem_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: vmovdqa (%rdi), %ymm1 +; CHECK-NEXT: vextracti128 $1, %ymm1, %xmm2 +; CHECK-NEXT: movb $1, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpunpckhqdq {{.*#+}} xmm0 = xmm1[1],xmm2[1] +; CHECK-NEXT: vzeroupper +; CHECK-NEXT: retq + %vec = load <4 x i64>, <4 x i64>* %vp + %shuf = shufflevector <4 x i64> %vec, <4 x i64> undef, <2 x i32> + %res = select <2 x i1> , <2 x i64> %shuf, <2 x i64> %vec2 + ret <2 x i64> %res +} + +define <2 x i64> @test_masked_z_4xi64_to_2xi64_perm_mem_mask0(<4 x i64>* %vp) { +; CHECK-LABEL: test_masked_z_4xi64_to_2xi64_perm_mem_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: vmovdqa (%rdi), %ymm0 +; CHECK-NEXT: vextracti128 $1, %ymm0, %xmm1 +; CHECK-NEXT: movb $1, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpunpckhqdq {{.*#+}} xmm0 = xmm0[1],xmm1[1] +; CHECK-NEXT: vzeroupper +; CHECK-NEXT: retq + %vec = load <4 x i64>, <4 x i64>* %vp + %shuf = shufflevector <4 x i64> %vec, <4 x i64> undef, <2 x i32> + %res = select <2 x i1> , <2 x i64> %shuf, <2 x i64> zeroinitializer + ret <2 x i64> %res +} + +define <2 x i64> @test_masked_4xi64_to_2xi64_perm_mem_mask1(<4 x i64>* %vp, <2 x i64> %vec2) { +; CHECK-LABEL: test_masked_4xi64_to_2xi64_perm_mem_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: vmovdqa (%rdi), %ymm1 +; CHECK-NEXT: vextracti128 $1, %ymm1, %xmm2 +; CHECK-NEXT: vpblendd {{.*#+}} xmm1 = xmm2[0,1],xmm1[2,3] +; CHECK-NEXT: movb $2, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vmovdqa64 %xmm1, %xmm0 {%k1} +; CHECK-NEXT: vzeroupper +; CHECK-NEXT: retq + %vec = load <4 x i64>, <4 x i64>* %vp + %shuf = shufflevector <4 x i64> %vec, <4 x i64> undef, <2 x i32> + %res = select <2 x i1> , <2 x i64> %shuf, <2 x i64> %vec2 + ret <2 x i64> %res +} + +define <2 x i64> @test_masked_z_4xi64_to_2xi64_perm_mem_mask1(<4 x i64>* %vp) { +; CHECK-LABEL: test_masked_z_4xi64_to_2xi64_perm_mem_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: vmovdqa (%rdi), %ymm0 +; CHECK-NEXT: vextracti128 $1, %ymm0, %xmm1 +; CHECK-NEXT: vpblendd {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3] +; CHECK-NEXT: movb $2, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vmovdqa64 %xmm0, %xmm0 {%k1} {z} +; CHECK-NEXT: vzeroupper +; CHECK-NEXT: retq + %vec = load <4 x i64>, <4 x i64>* %vp + %shuf = shufflevector <4 x i64> %vec, <4 x i64> undef, <2 x i32> + %res = select <2 x i1> , <2 x i64> %shuf, <2 x i64> zeroinitializer + ret <2 x i64> %res +} + +define <4 x i64> @test_8xi64_to_4xi64_perm_mask0(<8 x i64> %vec) { +; CHECK-LABEL: test_8xi64_to_4xi64_perm_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: vextracti64x4 $1, %zmm0, %ymm0 +; CHECK-NEXT: vpermq {{.*#+}} ymm0 = ymm0[2,3,2,1] +; CHECK-NEXT: retq + %res = shufflevector <8 x i64> %vec, <8 x i64> undef, <4 x i32> + ret <4 x i64> %res +} +define <4 x i64> @test_masked_8xi64_to_4xi64_perm_mask0(<8 x i64> %vec, <4 x i64> %vec2) { +; CHECK-LABEL: test_masked_8xi64_to_4xi64_perm_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: vextracti64x4 $1, %zmm0, %ymm0 +; CHECK-NEXT: movb $9, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermq {{.*#+}} ymm1 {%k1} = ymm0[2,3,2,1] +; CHECK-NEXT: vmovdqa %ymm1, %ymm0 +; CHECK-NEXT: retq + %shuf = shufflevector <8 x i64> %vec, <8 x i64> undef, <4 x i32> + %res = select <4 x i1> , <4 x i64> %shuf, <4 x i64> %vec2 + ret <4 x i64> %res +} + +define <4 x i64> @test_masked_z_8xi64_to_4xi64_perm_mask0(<8 x i64> %vec) { +; CHECK-LABEL: test_masked_z_8xi64_to_4xi64_perm_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: vextracti64x4 $1, %zmm0, %ymm0 +; CHECK-NEXT: movb $9, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermq {{.*#+}} ymm0 {%k1} {z} = ymm0[2,3,2,1] +; CHECK-NEXT: retq + %shuf = shufflevector <8 x i64> %vec, <8 x i64> undef, <4 x i32> + %res = select <4 x i1> , <4 x i64> %shuf, <4 x i64> zeroinitializer + ret <4 x i64> %res +} +define <4 x i64> @test_masked_8xi64_to_4xi64_perm_mask1(<8 x i64> %vec, <4 x i64> %vec2) { +; CHECK-LABEL: test_masked_8xi64_to_4xi64_perm_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: vextracti64x4 $1, %zmm0, %ymm2 +; CHECK-NEXT: vpblendd {{.*#+}} ymm0 = ymm2[0,1],ymm0[2,3],ymm2[4,5,6,7] +; CHECK-NEXT: movb $5, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermq {{.*#+}} ymm1 {%k1} = ymm0[2,0,2,1] +; CHECK-NEXT: vmovdqa %ymm1, %ymm0 +; CHECK-NEXT: retq + %shuf = shufflevector <8 x i64> %vec, <8 x i64> undef, <4 x i32> + %res = select <4 x i1> , <4 x i64> %shuf, <4 x i64> %vec2 + ret <4 x i64> %res +} + +define <4 x i64> @test_masked_z_8xi64_to_4xi64_perm_mask1(<8 x i64> %vec) { +; CHECK-LABEL: test_masked_z_8xi64_to_4xi64_perm_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: vextracti64x4 $1, %zmm0, %ymm1 +; CHECK-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3],ymm1[4,5,6,7] +; CHECK-NEXT: movb $5, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermq {{.*#+}} ymm0 {%k1} {z} = ymm0[2,0,2,1] +; CHECK-NEXT: retq + %shuf = shufflevector <8 x i64> %vec, <8 x i64> undef, <4 x i32> + %res = select <4 x i1> , <4 x i64> %shuf, <4 x i64> zeroinitializer + ret <4 x i64> %res +} +define <4 x i64> @test_masked_8xi64_to_4xi64_perm_mask2(<8 x i64> %vec, <4 x i64> %vec2) { +; CHECK-LABEL: test_masked_8xi64_to_4xi64_perm_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: vextracti64x4 $1, %zmm0, %ymm2 +; CHECK-NEXT: vpblendd {{.*#+}} ymm0 = ymm2[0,1,2,3,4,5],ymm0[6,7] +; CHECK-NEXT: vperm2i128 {{.*#+}} ymm0 = ymm0[2,3,2,3] +; CHECK-NEXT: movb $2, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpblendmq %ymm0, %ymm1, %ymm0 {%k1} +; CHECK-NEXT: retq + %shuf = shufflevector <8 x i64> %vec, <8 x i64> undef, <4 x i32> + %res = select <4 x i1> , <4 x i64> %shuf, <4 x i64> %vec2 + ret <4 x i64> %res +} + +define <4 x i64> @test_masked_z_8xi64_to_4xi64_perm_mask2(<8 x i64> %vec) { +; CHECK-LABEL: test_masked_z_8xi64_to_4xi64_perm_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: vextracti64x4 $1, %zmm0, %ymm1 +; CHECK-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3,4,5],ymm0[6,7] +; CHECK-NEXT: vperm2i128 {{.*#+}} ymm0 = ymm0[2,3,2,3] +; CHECK-NEXT: movb $2, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vmovdqa64 %ymm0, %ymm0 {%k1} {z} +; CHECK-NEXT: retq + %shuf = shufflevector <8 x i64> %vec, <8 x i64> undef, <4 x i32> + %res = select <4 x i1> , <4 x i64> %shuf, <4 x i64> zeroinitializer + ret <4 x i64> %res +} +define <4 x i64> @test_8xi64_to_4xi64_perm_mask3(<8 x i64> %vec) { +; CHECK-LABEL: test_8xi64_to_4xi64_perm_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: vextracti64x4 $1, %zmm0, %ymm1 +; CHECK-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3,4,5,6,7] +; CHECK-NEXT: vpermq {{.*#+}} ymm0 = ymm0[2,0,0,3] +; CHECK-NEXT: retq + %res = shufflevector <8 x i64> %vec, <8 x i64> undef, <4 x i32> + ret <4 x i64> %res +} +define <4 x i64> @test_masked_8xi64_to_4xi64_perm_mask3(<8 x i64> %vec, <4 x i64> %vec2) { +; CHECK-LABEL: test_masked_8xi64_to_4xi64_perm_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: vextracti64x4 $1, %zmm0, %ymm2 +; CHECK-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm2[2,3,4,5,6,7] +; CHECK-NEXT: movb $8, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermq {{.*#+}} ymm1 {%k1} = ymm0[2,0,0,3] +; CHECK-NEXT: vmovdqa %ymm1, %ymm0 +; CHECK-NEXT: retq + %shuf = shufflevector <8 x i64> %vec, <8 x i64> undef, <4 x i32> + %res = select <4 x i1> , <4 x i64> %shuf, <4 x i64> %vec2 + ret <4 x i64> %res +} + +define <4 x i64> @test_masked_z_8xi64_to_4xi64_perm_mask3(<8 x i64> %vec) { +; CHECK-LABEL: test_masked_z_8xi64_to_4xi64_perm_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: vextracti64x4 $1, %zmm0, %ymm1 +; CHECK-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3,4,5,6,7] +; CHECK-NEXT: movb $8, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermq {{.*#+}} ymm0 {%k1} {z} = ymm0[2,0,0,3] +; CHECK-NEXT: retq + %shuf = shufflevector <8 x i64> %vec, <8 x i64> undef, <4 x i32> + %res = select <4 x i1> , <4 x i64> %shuf, <4 x i64> zeroinitializer + ret <4 x i64> %res +} +define <4 x i64> @test_masked_8xi64_to_4xi64_perm_mask4(<8 x i64> %vec, <4 x i64> %vec2) { +; CHECK-LABEL: test_masked_8xi64_to_4xi64_perm_mask4: +; CHECK: # BB#0: +; CHECK-NEXT: vpermq {{.*#+}} ymm2 = ymm0[3,1,2,3] +; CHECK-NEXT: vextracti64x4 $1, %zmm0, %ymm0 +; CHECK-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,3,3,1] +; CHECK-NEXT: vpblendd {{.*#+}} ymm0 = ymm2[0,1],ymm0[2,3,4,5,6,7] +; CHECK-NEXT: movb $7, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpblendmq %ymm0, %ymm1, %ymm0 {%k1} +; CHECK-NEXT: retq + %shuf = shufflevector <8 x i64> %vec, <8 x i64> undef, <4 x i32> + %res = select <4 x i1> , <4 x i64> %shuf, <4 x i64> %vec2 + ret <4 x i64> %res +} + +define <4 x i64> @test_masked_z_8xi64_to_4xi64_perm_mask4(<8 x i64> %vec) { +; CHECK-LABEL: test_masked_z_8xi64_to_4xi64_perm_mask4: +; CHECK: # BB#0: +; CHECK-NEXT: vpermq {{.*#+}} ymm1 = ymm0[3,1,2,3] +; CHECK-NEXT: vextracti64x4 $1, %zmm0, %ymm0 +; CHECK-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,3,3,1] +; CHECK-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3,4,5,6,7] +; CHECK-NEXT: movb $7, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vmovdqa64 %ymm0, %ymm0 {%k1} {z} +; CHECK-NEXT: retq + %shuf = shufflevector <8 x i64> %vec, <8 x i64> undef, <4 x i32> + %res = select <4 x i1> , <4 x i64> %shuf, <4 x i64> zeroinitializer + ret <4 x i64> %res +} +define <4 x i64> @test_masked_8xi64_to_4xi64_perm_mask5(<8 x i64> %vec, <4 x i64> %vec2) { +; CHECK-LABEL: test_masked_8xi64_to_4xi64_perm_mask5: +; CHECK: # BB#0: +; CHECK-NEXT: vextracti64x4 $1, %zmm0, %ymm2 +; CHECK-NEXT: vpshufd {{.*#+}} ymm2 = ymm2[0,1,0,1,4,5,4,5] +; CHECK-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,1,0,3] +; CHECK-NEXT: vpblendd {{.*#+}} ymm0 = ymm2[0,1],ymm0[2,3,4,5],ymm2[6,7] +; CHECK-NEXT: movb $11, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpblendmq %ymm0, %ymm1, %ymm0 {%k1} +; CHECK-NEXT: retq + %shuf = shufflevector <8 x i64> %vec, <8 x i64> undef, <4 x i32> + %res = select <4 x i1> , <4 x i64> %shuf, <4 x i64> %vec2 + ret <4 x i64> %res +} + +define <4 x i64> @test_masked_z_8xi64_to_4xi64_perm_mask5(<8 x i64> %vec) { +; CHECK-LABEL: test_masked_z_8xi64_to_4xi64_perm_mask5: +; CHECK: # BB#0: +; CHECK-NEXT: vextracti64x4 $1, %zmm0, %ymm1 +; CHECK-NEXT: vpshufd {{.*#+}} ymm1 = ymm1[0,1,0,1,4,5,4,5] +; CHECK-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,1,0,3] +; CHECK-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3,4,5],ymm1[6,7] +; CHECK-NEXT: movb $11, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vmovdqa64 %ymm0, %ymm0 {%k1} {z} +; CHECK-NEXT: retq + %shuf = shufflevector <8 x i64> %vec, <8 x i64> undef, <4 x i32> + %res = select <4 x i1> , <4 x i64> %shuf, <4 x i64> zeroinitializer + ret <4 x i64> %res +} +define <4 x i64> @test_8xi64_to_4xi64_perm_mask6(<8 x i64> %vec) { +; CHECK-LABEL: test_8xi64_to_4xi64_perm_mask6: +; CHECK: # BB#0: +; CHECK-NEXT: vextracti64x4 $1, %zmm0, %ymm1 +; CHECK-NEXT: vpermq {{.*#+}} ymm1 = ymm1[3,2,1,3] +; CHECK-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3,4,5],ymm0[6,7] +; CHECK-NEXT: retq + %res = shufflevector <8 x i64> %vec, <8 x i64> undef, <4 x i32> + ret <4 x i64> %res +} +define <4 x i64> @test_masked_8xi64_to_4xi64_perm_mask6(<8 x i64> %vec, <4 x i64> %vec2) { +; CHECK-LABEL: test_masked_8xi64_to_4xi64_perm_mask6: +; CHECK: # BB#0: +; CHECK-NEXT: vextracti64x4 $1, %zmm0, %ymm2 +; CHECK-NEXT: vpermq {{.*#+}} ymm2 = ymm2[3,2,1,3] +; CHECK-NEXT: vpblendd {{.*#+}} ymm0 = ymm2[0,1,2,3,4,5],ymm0[6,7] +; CHECK-NEXT: movb $12, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpblendmq %ymm0, %ymm1, %ymm0 {%k1} +; CHECK-NEXT: retq + %shuf = shufflevector <8 x i64> %vec, <8 x i64> undef, <4 x i32> + %res = select <4 x i1> , <4 x i64> %shuf, <4 x i64> %vec2 + ret <4 x i64> %res +} + +define <4 x i64> @test_masked_z_8xi64_to_4xi64_perm_mask6(<8 x i64> %vec) { +; CHECK-LABEL: test_masked_z_8xi64_to_4xi64_perm_mask6: +; CHECK: # BB#0: +; CHECK-NEXT: vextracti64x4 $1, %zmm0, %ymm1 +; CHECK-NEXT: vpermq {{.*#+}} ymm1 = ymm1[3,2,1,3] +; CHECK-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3,4,5],ymm0[6,7] +; CHECK-NEXT: movb $12, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vmovdqa64 %ymm0, %ymm0 {%k1} {z} +; CHECK-NEXT: retq + %shuf = shufflevector <8 x i64> %vec, <8 x i64> undef, <4 x i32> + %res = select <4 x i1> , <4 x i64> %shuf, <4 x i64> zeroinitializer + ret <4 x i64> %res +} +define <4 x i64> @test_masked_8xi64_to_4xi64_perm_mask7(<8 x i64> %vec, <4 x i64> %vec2) { +; CHECK-LABEL: test_masked_8xi64_to_4xi64_perm_mask7: +; CHECK: # BB#0: +; CHECK-NEXT: vpermq {{.*#+}} ymm2 = ymm0[2,0,3,3] +; CHECK-NEXT: vextracti64x4 $1, %zmm0, %ymm0 +; CHECK-NEXT: vpbroadcastq %xmm0, %ymm0 +; CHECK-NEXT: vpblendd {{.*#+}} ymm0 = ymm2[0,1,2,3,4,5],ymm0[6,7] +; CHECK-NEXT: movb $10, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpblendmq %ymm0, %ymm1, %ymm0 {%k1} +; CHECK-NEXT: retq + %shuf = shufflevector <8 x i64> %vec, <8 x i64> undef, <4 x i32> + %res = select <4 x i1> , <4 x i64> %shuf, <4 x i64> %vec2 + ret <4 x i64> %res +} + +define <4 x i64> @test_masked_z_8xi64_to_4xi64_perm_mask7(<8 x i64> %vec) { +; CHECK-LABEL: test_masked_z_8xi64_to_4xi64_perm_mask7: +; CHECK: # BB#0: +; CHECK-NEXT: vpermq {{.*#+}} ymm1 = ymm0[2,0,3,3] +; CHECK-NEXT: vextracti64x4 $1, %zmm0, %ymm0 +; CHECK-NEXT: vpbroadcastq %xmm0, %ymm0 +; CHECK-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3,4,5],ymm0[6,7] +; CHECK-NEXT: movb $10, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vmovdqa64 %ymm0, %ymm0 {%k1} {z} +; CHECK-NEXT: retq + %shuf = shufflevector <8 x i64> %vec, <8 x i64> undef, <4 x i32> + %res = select <4 x i1> , <4 x i64> %shuf, <4 x i64> zeroinitializer + ret <4 x i64> %res +} +define <2 x i64> @test_8xi64_to_2xi64_perm_mask0(<8 x i64> %vec) { +; CHECK-LABEL: test_8xi64_to_2xi64_perm_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: vextracti128 $1, %ymm0, %xmm1 +; CHECK-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[2,3,0,1] +; CHECK-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0] +; CHECK-NEXT: vzeroupper +; CHECK-NEXT: retq + %res = shufflevector <8 x i64> %vec, <8 x i64> undef, <2 x i32> + ret <2 x i64> %res +} +define <2 x i64> @test_masked_8xi64_to_2xi64_perm_mask0(<8 x i64> %vec, <2 x i64> %vec2) { +; CHECK-LABEL: test_masked_8xi64_to_2xi64_perm_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: vextracti128 $1, %ymm0, %xmm2 +; CHECK-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[2,3,0,1] +; CHECK-NEXT: movb $2, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm2[0],xmm0[0] +; CHECK-NEXT: vmovdqa %xmm1, %xmm0 +; CHECK-NEXT: vzeroupper +; CHECK-NEXT: retq + %shuf = shufflevector <8 x i64> %vec, <8 x i64> undef, <2 x i32> + %res = select <2 x i1> , <2 x i64> %shuf, <2 x i64> %vec2 + ret <2 x i64> %res +} + +define <2 x i64> @test_masked_z_8xi64_to_2xi64_perm_mask0(<8 x i64> %vec) { +; CHECK-LABEL: test_masked_z_8xi64_to_2xi64_perm_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: vextracti128 $1, %ymm0, %xmm1 +; CHECK-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[2,3,0,1] +; CHECK-NEXT: movb $2, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0] +; CHECK-NEXT: vzeroupper +; CHECK-NEXT: retq + %shuf = shufflevector <8 x i64> %vec, <8 x i64> undef, <2 x i32> + %res = select <2 x i1> , <2 x i64> %shuf, <2 x i64> zeroinitializer + ret <2 x i64> %res +} +define <2 x i64> @test_masked_8xi64_to_2xi64_perm_mask1(<8 x i64> %vec, <2 x i64> %vec2) { +; CHECK-LABEL: test_masked_8xi64_to_2xi64_perm_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: vextracti64x4 $1, %zmm0, %ymm0 +; CHECK-NEXT: vpermq {{.*#+}} ymm0 = ymm0[2,1,2,3] +; CHECK-NEXT: movb $1, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpblendmq %xmm0, %xmm1, %xmm0 {%k1} +; CHECK-NEXT: vzeroupper +; CHECK-NEXT: retq + %shuf = shufflevector <8 x i64> %vec, <8 x i64> undef, <2 x i32> + %res = select <2 x i1> , <2 x i64> %shuf, <2 x i64> %vec2 + ret <2 x i64> %res +} + +define <2 x i64> @test_masked_z_8xi64_to_2xi64_perm_mask1(<8 x i64> %vec) { +; CHECK-LABEL: test_masked_z_8xi64_to_2xi64_perm_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: vextracti64x4 $1, %zmm0, %ymm0 +; CHECK-NEXT: vpermq {{.*#+}} ymm0 = ymm0[2,1,2,3] +; CHECK-NEXT: movb $1, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vmovdqa64 %xmm0, %xmm0 {%k1} {z} +; CHECK-NEXT: vzeroupper +; CHECK-NEXT: retq + %shuf = shufflevector <8 x i64> %vec, <8 x i64> undef, <2 x i32> + %res = select <2 x i1> , <2 x i64> %shuf, <2 x i64> zeroinitializer + ret <2 x i64> %res +} +define <4 x i64> @test_8xi64_to_4xi64_perm_mem_mask0(<8 x i64>* %vp) { +; CHECK-LABEL: test_8xi64_to_4xi64_perm_mem_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: vpermq {{.*#+}} ymm0 = mem[0,2,0,2] +; CHECK-NEXT: retq + %vec = load <8 x i64>, <8 x i64>* %vp + %res = shufflevector <8 x i64> %vec, <8 x i64> undef, <4 x i32> + ret <4 x i64> %res +} +define <4 x i64> @test_masked_8xi64_to_4xi64_perm_mem_mask0(<8 x i64>* %vp, <4 x i64> %vec2) { +; CHECK-LABEL: test_masked_8xi64_to_4xi64_perm_mem_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: movb $1, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermq {{.*#+}} ymm0 {%k1} = mem[0,2,0,2] +; CHECK-NEXT: retq + %vec = load <8 x i64>, <8 x i64>* %vp + %shuf = shufflevector <8 x i64> %vec, <8 x i64> undef, <4 x i32> + %res = select <4 x i1> , <4 x i64> %shuf, <4 x i64> %vec2 + ret <4 x i64> %res +} + +define <4 x i64> @test_masked_z_8xi64_to_4xi64_perm_mem_mask0(<8 x i64>* %vp) { +; CHECK-LABEL: test_masked_z_8xi64_to_4xi64_perm_mem_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: movb $1, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermq {{.*#+}} ymm0 {%k1} {z} = mem[0,2,0,2] +; CHECK-NEXT: retq + %vec = load <8 x i64>, <8 x i64>* %vp + %shuf = shufflevector <8 x i64> %vec, <8 x i64> undef, <4 x i32> + %res = select <4 x i1> , <4 x i64> %shuf, <4 x i64> zeroinitializer + ret <4 x i64> %res +} + +define <4 x i64> @test_masked_8xi64_to_4xi64_perm_mem_mask1(<8 x i64>* %vp, <4 x i64> %vec2) { +; CHECK-LABEL: test_masked_8xi64_to_4xi64_perm_mem_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: vmovdqa64 (%rdi), %zmm1 +; CHECK-NEXT: vextracti64x4 $1, %zmm1, %ymm2 +; CHECK-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1],ymm2[2,3,4,5,6,7] +; CHECK-NEXT: movb $6, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermq {{.*#+}} ymm0 {%k1} = ymm1[0,3,2,0] +; CHECK-NEXT: retq + %vec = load <8 x i64>, <8 x i64>* %vp + %shuf = shufflevector <8 x i64> %vec, <8 x i64> undef, <4 x i32> + %res = select <4 x i1> , <4 x i64> %shuf, <4 x i64> %vec2 + ret <4 x i64> %res +} + +define <4 x i64> @test_masked_z_8xi64_to_4xi64_perm_mem_mask1(<8 x i64>* %vp) { +; CHECK-LABEL: test_masked_z_8xi64_to_4xi64_perm_mem_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: vmovdqa64 (%rdi), %zmm0 +; CHECK-NEXT: vextracti64x4 $1, %zmm0, %ymm1 +; CHECK-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3,4,5,6,7] +; CHECK-NEXT: movb $6, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermq {{.*#+}} ymm0 {%k1} {z} = ymm0[0,3,2,0] +; CHECK-NEXT: retq + %vec = load <8 x i64>, <8 x i64>* %vp + %shuf = shufflevector <8 x i64> %vec, <8 x i64> undef, <4 x i32> + %res = select <4 x i1> , <4 x i64> %shuf, <4 x i64> zeroinitializer + ret <4 x i64> %res +} + +define <4 x i64> @test_masked_8xi64_to_4xi64_perm_mem_mask2(<8 x i64>* %vp, <4 x i64> %vec2) { +; CHECK-LABEL: test_masked_8xi64_to_4xi64_perm_mem_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: vmovdqa64 (%rdi), %zmm1 +; CHECK-NEXT: vextracti64x4 $1, %zmm1, %ymm2 +; CHECK-NEXT: vpermq {{.*#+}} ymm2 = ymm2[3,1,2,1] +; CHECK-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,1,1,3] +; CHECK-NEXT: vpblendd {{.*#+}} ymm1 = ymm2[0,1],ymm1[2,3,4,5],ymm2[6,7] +; CHECK-NEXT: movb $9, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vmovdqa64 %ymm1, %ymm0 {%k1} +; CHECK-NEXT: retq + %vec = load <8 x i64>, <8 x i64>* %vp + %shuf = shufflevector <8 x i64> %vec, <8 x i64> undef, <4 x i32> + %res = select <4 x i1> , <4 x i64> %shuf, <4 x i64> %vec2 + ret <4 x i64> %res +} + +define <4 x i64> @test_masked_z_8xi64_to_4xi64_perm_mem_mask2(<8 x i64>* %vp) { +; CHECK-LABEL: test_masked_z_8xi64_to_4xi64_perm_mem_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: vmovdqa64 (%rdi), %zmm0 +; CHECK-NEXT: vextracti64x4 $1, %zmm0, %ymm1 +; CHECK-NEXT: vpermq {{.*#+}} ymm1 = ymm1[3,1,2,1] +; CHECK-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,1,1,3] +; CHECK-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3,4,5],ymm1[6,7] +; CHECK-NEXT: movb $9, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vmovdqa64 %ymm0, %ymm0 {%k1} {z} +; CHECK-NEXT: retq + %vec = load <8 x i64>, <8 x i64>* %vp + %shuf = shufflevector <8 x i64> %vec, <8 x i64> undef, <4 x i32> + %res = select <4 x i1> , <4 x i64> %shuf, <4 x i64> zeroinitializer + ret <4 x i64> %res +} + +define <4 x i64> @test_8xi64_to_4xi64_perm_mem_mask3(<8 x i64>* %vp) { +; CHECK-LABEL: test_8xi64_to_4xi64_perm_mem_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: vmovdqa64 (%rdi), %zmm0 +; CHECK-NEXT: vextracti64x4 $1, %zmm0, %ymm1 +; CHECK-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6,7] +; CHECK-NEXT: vpermq {{.*#+}} ymm0 = ymm0[3,0,0,2] +; CHECK-NEXT: retq + %vec = load <8 x i64>, <8 x i64>* %vp + %res = shufflevector <8 x i64> %vec, <8 x i64> undef, <4 x i32> + ret <4 x i64> %res +} +define <4 x i64> @test_masked_8xi64_to_4xi64_perm_mem_mask3(<8 x i64>* %vp, <4 x i64> %vec2) { +; CHECK-LABEL: test_masked_8xi64_to_4xi64_perm_mem_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: vmovdqa64 (%rdi), %zmm1 +; CHECK-NEXT: vextracti64x4 $1, %zmm1, %ymm2 +; CHECK-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5],ymm2[6,7] +; CHECK-NEXT: movb $14, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermq {{.*#+}} ymm0 {%k1} = ymm1[3,0,0,2] +; CHECK-NEXT: retq + %vec = load <8 x i64>, <8 x i64>* %vp + %shuf = shufflevector <8 x i64> %vec, <8 x i64> undef, <4 x i32> + %res = select <4 x i1> , <4 x i64> %shuf, <4 x i64> %vec2 + ret <4 x i64> %res +} + +define <4 x i64> @test_masked_z_8xi64_to_4xi64_perm_mem_mask3(<8 x i64>* %vp) { +; CHECK-LABEL: test_masked_z_8xi64_to_4xi64_perm_mem_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: vmovdqa64 (%rdi), %zmm0 +; CHECK-NEXT: vextracti64x4 $1, %zmm0, %ymm1 +; CHECK-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6,7] +; CHECK-NEXT: movb $14, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermq {{.*#+}} ymm0 {%k1} {z} = ymm0[3,0,0,2] +; CHECK-NEXT: retq + %vec = load <8 x i64>, <8 x i64>* %vp + %shuf = shufflevector <8 x i64> %vec, <8 x i64> undef, <4 x i32> + %res = select <4 x i1> , <4 x i64> %shuf, <4 x i64> zeroinitializer + ret <4 x i64> %res +} + +define <4 x i64> @test_masked_8xi64_to_4xi64_perm_mem_mask4(<8 x i64>* %vp, <4 x i64> %vec2) { +; CHECK-LABEL: test_masked_8xi64_to_4xi64_perm_mem_mask4: +; CHECK: # BB#0: +; CHECK-NEXT: vmovdqa64 (%rdi), %zmm1 +; CHECK-NEXT: vextracti64x4 $1, %zmm1, %ymm2 +; CHECK-NEXT: vpshufd {{.*#+}} ymm2 = ymm2[0,1,0,1,4,5,4,5] +; CHECK-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,1,2,1] +; CHECK-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1],ymm2[2,3,4,5],ymm1[6,7] +; CHECK-NEXT: movb $10, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vmovdqa64 %ymm1, %ymm0 {%k1} +; CHECK-NEXT: retq + %vec = load <8 x i64>, <8 x i64>* %vp + %shuf = shufflevector <8 x i64> %vec, <8 x i64> undef, <4 x i32> + %res = select <4 x i1> , <4 x i64> %shuf, <4 x i64> %vec2 + ret <4 x i64> %res +} + +define <4 x i64> @test_masked_z_8xi64_to_4xi64_perm_mem_mask4(<8 x i64>* %vp) { +; CHECK-LABEL: test_masked_z_8xi64_to_4xi64_perm_mem_mask4: +; CHECK: # BB#0: +; CHECK-NEXT: vmovdqa64 (%rdi), %zmm0 +; CHECK-NEXT: vextracti64x4 $1, %zmm0, %ymm1 +; CHECK-NEXT: vpshufd {{.*#+}} ymm1 = ymm1[0,1,0,1,4,5,4,5] +; CHECK-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,1,2,1] +; CHECK-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3,4,5],ymm0[6,7] +; CHECK-NEXT: movb $10, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vmovdqa64 %ymm0, %ymm0 {%k1} {z} +; CHECK-NEXT: retq + %vec = load <8 x i64>, <8 x i64>* %vp + %shuf = shufflevector <8 x i64> %vec, <8 x i64> undef, <4 x i32> + %res = select <4 x i1> , <4 x i64> %shuf, <4 x i64> zeroinitializer + ret <4 x i64> %res +} + +define <4 x i64> @test_masked_8xi64_to_4xi64_perm_mem_mask5(<8 x i64>* %vp, <4 x i64> %vec2) { +; CHECK-LABEL: test_masked_8xi64_to_4xi64_perm_mem_mask5: +; CHECK: # BB#0: +; CHECK-NEXT: vmovdqa64 (%rdi), %zmm1 +; CHECK-NEXT: vextracti64x4 $1, %zmm1, %ymm2 +; CHECK-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5],ymm2[6,7] +; CHECK-NEXT: movb $12, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermq {{.*#+}} ymm0 {%k1} = ymm1[0,2,3,1] +; CHECK-NEXT: retq + %vec = load <8 x i64>, <8 x i64>* %vp + %shuf = shufflevector <8 x i64> %vec, <8 x i64> undef, <4 x i32> + %res = select <4 x i1> , <4 x i64> %shuf, <4 x i64> %vec2 + ret <4 x i64> %res +} + +define <4 x i64> @test_masked_z_8xi64_to_4xi64_perm_mem_mask5(<8 x i64>* %vp) { +; CHECK-LABEL: test_masked_z_8xi64_to_4xi64_perm_mem_mask5: +; CHECK: # BB#0: +; CHECK-NEXT: vmovdqa64 (%rdi), %zmm0 +; CHECK-NEXT: vextracti64x4 $1, %zmm0, %ymm1 +; CHECK-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6,7] +; CHECK-NEXT: movb $12, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermq {{.*#+}} ymm0 {%k1} {z} = ymm0[0,2,3,1] +; CHECK-NEXT: retq + %vec = load <8 x i64>, <8 x i64>* %vp + %shuf = shufflevector <8 x i64> %vec, <8 x i64> undef, <4 x i32> + %res = select <4 x i1> , <4 x i64> %shuf, <4 x i64> zeroinitializer + ret <4 x i64> %res +} + +define <4 x i64> @test_8xi64_to_4xi64_perm_mem_mask6(<8 x i64>* %vp) { +; CHECK-LABEL: test_8xi64_to_4xi64_perm_mem_mask6: +; CHECK: # BB#0: +; CHECK-NEXT: vmovdqa64 (%rdi), %zmm0 +; CHECK-NEXT: vextracti64x4 $1, %zmm0, %ymm1 +; CHECK-NEXT: vpermq {{.*#+}} ymm1 = ymm1[3,1,2,3] +; CHECK-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,3,2] +; CHECK-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3,4,5,6,7] +; CHECK-NEXT: retq + %vec = load <8 x i64>, <8 x i64>* %vp + %res = shufflevector <8 x i64> %vec, <8 x i64> undef, <4 x i32> + ret <4 x i64> %res +} +define <4 x i64> @test_masked_8xi64_to_4xi64_perm_mem_mask6(<8 x i64>* %vp, <4 x i64> %vec2) { +; CHECK-LABEL: test_masked_8xi64_to_4xi64_perm_mem_mask6: +; CHECK: # BB#0: +; CHECK-NEXT: vmovdqa64 (%rdi), %zmm1 +; CHECK-NEXT: vextracti64x4 $1, %zmm1, %ymm2 +; CHECK-NEXT: vpermq {{.*#+}} ymm2 = ymm2[3,1,2,3] +; CHECK-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,2,3,2] +; CHECK-NEXT: vpblendd {{.*#+}} ymm1 = ymm2[0,1],ymm1[2,3,4,5,6,7] +; CHECK-NEXT: movb $13, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vmovdqa64 %ymm1, %ymm0 {%k1} +; CHECK-NEXT: retq + %vec = load <8 x i64>, <8 x i64>* %vp + %shuf = shufflevector <8 x i64> %vec, <8 x i64> undef, <4 x i32> + %res = select <4 x i1> , <4 x i64> %shuf, <4 x i64> %vec2 + ret <4 x i64> %res +} + +define <4 x i64> @test_masked_z_8xi64_to_4xi64_perm_mem_mask6(<8 x i64>* %vp) { +; CHECK-LABEL: test_masked_z_8xi64_to_4xi64_perm_mem_mask6: +; CHECK: # BB#0: +; CHECK-NEXT: vmovdqa64 (%rdi), %zmm0 +; CHECK-NEXT: vextracti64x4 $1, %zmm0, %ymm1 +; CHECK-NEXT: vpermq {{.*#+}} ymm1 = ymm1[3,1,2,3] +; CHECK-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,3,2] +; CHECK-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3,4,5,6,7] +; CHECK-NEXT: movb $13, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vmovdqa64 %ymm0, %ymm0 {%k1} {z} +; CHECK-NEXT: retq + %vec = load <8 x i64>, <8 x i64>* %vp + %shuf = shufflevector <8 x i64> %vec, <8 x i64> undef, <4 x i32> + %res = select <4 x i1> , <4 x i64> %shuf, <4 x i64> zeroinitializer + ret <4 x i64> %res +} + +define <4 x i64> @test_masked_8xi64_to_4xi64_perm_mem_mask7(<8 x i64>* %vp, <4 x i64> %vec2) { +; CHECK-LABEL: test_masked_8xi64_to_4xi64_perm_mem_mask7: +; CHECK: # BB#0: +; CHECK-NEXT: vmovdqa64 (%rdi), %zmm1 +; CHECK-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm2 +; CHECK-NEXT: vextracti64x4 $1, %zmm1, %ymm1 +; CHECK-NEXT: vpermq {{.*#+}} ymm1 = ymm1[3,3,1,3] +; CHECK-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5],ymm2[6,7] +; CHECK-NEXT: movb $11, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vmovdqa64 %ymm1, %ymm0 {%k1} +; CHECK-NEXT: retq + %vec = load <8 x i64>, <8 x i64>* %vp + %shuf = shufflevector <8 x i64> %vec, <8 x i64> undef, <4 x i32> + %res = select <4 x i1> , <4 x i64> %shuf, <4 x i64> %vec2 + ret <4 x i64> %res +} + +define <4 x i64> @test_masked_z_8xi64_to_4xi64_perm_mem_mask7(<8 x i64>* %vp) { +; CHECK-LABEL: test_masked_z_8xi64_to_4xi64_perm_mem_mask7: +; CHECK: # BB#0: +; CHECK-NEXT: vmovdqa64 (%rdi), %zmm0 +; CHECK-NEXT: vinserti128 $1, %xmm0, %ymm0, %ymm1 +; CHECK-NEXT: vextracti64x4 $1, %zmm0, %ymm0 +; CHECK-NEXT: vpermq {{.*#+}} ymm0 = ymm0[3,3,1,3] +; CHECK-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6,7] +; CHECK-NEXT: movb $11, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vmovdqa64 %ymm0, %ymm0 {%k1} {z} +; CHECK-NEXT: retq + %vec = load <8 x i64>, <8 x i64>* %vp + %shuf = shufflevector <8 x i64> %vec, <8 x i64> undef, <4 x i32> + %res = select <4 x i1> , <4 x i64> %shuf, <4 x i64> zeroinitializer + ret <4 x i64> %res +} + +define <2 x i64> @test_8xi64_to_2xi64_perm_mem_mask0(<8 x i64>* %vp) { +; CHECK-LABEL: test_8xi64_to_2xi64_perm_mem_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: vmovdqa64 (%rdi), %zmm0 +; CHECK-NEXT: vextracti32x4 $2, %zmm0, %xmm1 +; CHECK-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1] +; CHECK-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0] +; CHECK-NEXT: vzeroupper +; CHECK-NEXT: retq + %vec = load <8 x i64>, <8 x i64>* %vp + %res = shufflevector <8 x i64> %vec, <8 x i64> undef, <2 x i32> + ret <2 x i64> %res +} +define <2 x i64> @test_masked_8xi64_to_2xi64_perm_mem_mask0(<8 x i64>* %vp, <2 x i64> %vec2) { +; CHECK-LABEL: test_masked_8xi64_to_2xi64_perm_mem_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: vmovdqa64 (%rdi), %zmm1 +; CHECK-NEXT: vextracti32x4 $2, %zmm1, %xmm2 +; CHECK-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[2,3,0,1] +; CHECK-NEXT: movb $2, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm2[0],xmm1[0] +; CHECK-NEXT: vzeroupper +; CHECK-NEXT: retq + %vec = load <8 x i64>, <8 x i64>* %vp + %shuf = shufflevector <8 x i64> %vec, <8 x i64> undef, <2 x i32> + %res = select <2 x i1> , <2 x i64> %shuf, <2 x i64> %vec2 + ret <2 x i64> %res +} + +define <2 x i64> @test_masked_z_8xi64_to_2xi64_perm_mem_mask0(<8 x i64>* %vp) { +; CHECK-LABEL: test_masked_z_8xi64_to_2xi64_perm_mem_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: vmovdqa64 (%rdi), %zmm0 +; CHECK-NEXT: vextracti32x4 $2, %zmm0, %xmm1 +; CHECK-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1] +; CHECK-NEXT: movb $2, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0] +; CHECK-NEXT: vzeroupper +; CHECK-NEXT: retq + %vec = load <8 x i64>, <8 x i64>* %vp + %shuf = shufflevector <8 x i64> %vec, <8 x i64> undef, <2 x i32> + %res = select <2 x i1> , <2 x i64> %shuf, <2 x i64> zeroinitializer + ret <2 x i64> %res +} + +define <2 x i64> @test_masked_8xi64_to_2xi64_perm_mem_mask1(<8 x i64>* %vp, <2 x i64> %vec2) { +; CHECK-LABEL: test_masked_8xi64_to_2xi64_perm_mem_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: vmovdqa64 (%rdi), %zmm1 +; CHECK-NEXT: vextracti64x4 $1, %zmm1, %ymm2 +; CHECK-NEXT: vextracti128 $1, %ymm2, %xmm2 +; CHECK-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,2,2,3] +; CHECK-NEXT: vpblendd {{.*#+}} xmm1 = xmm2[0,1],xmm1[2,3] +; CHECK-NEXT: movb $1, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vmovdqa64 %xmm1, %xmm0 {%k1} +; CHECK-NEXT: vzeroupper +; CHECK-NEXT: retq + %vec = load <8 x i64>, <8 x i64>* %vp + %shuf = shufflevector <8 x i64> %vec, <8 x i64> undef, <2 x i32> + %res = select <2 x i1> , <2 x i64> %shuf, <2 x i64> %vec2 + ret <2 x i64> %res +} + +define <2 x i64> @test_masked_z_8xi64_to_2xi64_perm_mem_mask1(<8 x i64>* %vp) { +; CHECK-LABEL: test_masked_z_8xi64_to_2xi64_perm_mem_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: vmovdqa64 (%rdi), %zmm0 +; CHECK-NEXT: vextracti64x4 $1, %zmm0, %ymm1 +; CHECK-NEXT: vextracti128 $1, %ymm1, %xmm1 +; CHECK-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3] +; CHECK-NEXT: vpblendd {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3] +; CHECK-NEXT: movb $1, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vmovdqa64 %xmm0, %xmm0 {%k1} {z} +; CHECK-NEXT: vzeroupper +; CHECK-NEXT: retq + %vec = load <8 x i64>, <8 x i64>* %vp + %shuf = shufflevector <8 x i64> %vec, <8 x i64> undef, <2 x i32> + %res = select <2 x i1> , <2 x i64> %shuf, <2 x i64> zeroinitializer + ret <2 x i64> %res +} + +define <4 x float> @test_8xfloat_to_4xfloat_perm_mask0(<8 x float> %vec) { +; CHECK-LABEL: test_8xfloat_to_4xfloat_perm_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: vextractf128 $1, %ymm0, %xmm1 +; CHECK-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,3],xmm1[0,1] +; CHECK-NEXT: vzeroupper +; CHECK-NEXT: retq + %res = shufflevector <8 x float> %vec, <8 x float> undef, <4 x i32> + ret <4 x float> %res +} +define <4 x float> @test_masked_8xfloat_to_4xfloat_perm_mask0(<8 x float> %vec, <4 x float> %vec2) { +; CHECK-LABEL: test_masked_8xfloat_to_4xfloat_perm_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: vextractf128 $1, %ymm0, %xmm2 +; CHECK-NEXT: movb $4, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vshufps {{.*#+}} xmm1 {%k1} = xmm0[0,3],xmm2[0,1] +; CHECK-NEXT: vmovaps %xmm1, %xmm0 +; CHECK-NEXT: vzeroupper +; CHECK-NEXT: retq + %shuf = shufflevector <8 x float> %vec, <8 x float> undef, <4 x i32> + %res = select <4 x i1> , <4 x float> %shuf, <4 x float> %vec2 + ret <4 x float> %res +} + +define <4 x float> @test_masked_z_8xfloat_to_4xfloat_perm_mask0(<8 x float> %vec) { +; CHECK-LABEL: test_masked_z_8xfloat_to_4xfloat_perm_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: vextractf128 $1, %ymm0, %xmm1 +; CHECK-NEXT: movb $4, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vshufps {{.*#+}} xmm0 {%k1} {z} = xmm0[0,3],xmm1[0,1] +; CHECK-NEXT: vzeroupper +; CHECK-NEXT: retq + %shuf = shufflevector <8 x float> %vec, <8 x float> undef, <4 x i32> + %res = select <4 x i1> , <4 x float> %shuf, <4 x float> zeroinitializer + ret <4 x float> %res +} +define <4 x float> @test_masked_8xfloat_to_4xfloat_perm_mask1(<8 x float> %vec, <4 x float> %vec2) { +; CHECK-LABEL: test_masked_8xfloat_to_4xfloat_perm_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: vextractf128 $1, %ymm0, %xmm2 +; CHECK-NEXT: vshufps {{.*#+}} xmm2 = xmm2[1,0],xmm0[0,0] +; CHECK-NEXT: movb $10, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vshufps {{.*#+}} xmm1 {%k1} = xmm0[1,3],xmm2[0,2] +; CHECK-NEXT: vmovaps %xmm1, %xmm0 +; CHECK-NEXT: vzeroupper +; CHECK-NEXT: retq + %shuf = shufflevector <8 x float> %vec, <8 x float> undef, <4 x i32> + %res = select <4 x i1> , <4 x float> %shuf, <4 x float> %vec2 + ret <4 x float> %res +} + +define <4 x float> @test_masked_z_8xfloat_to_4xfloat_perm_mask1(<8 x float> %vec) { +; CHECK-LABEL: test_masked_z_8xfloat_to_4xfloat_perm_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: vextractf128 $1, %ymm0, %xmm1 +; CHECK-NEXT: vshufps {{.*#+}} xmm1 = xmm1[1,0],xmm0[0,0] +; CHECK-NEXT: movb $10, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vshufps {{.*#+}} xmm0 {%k1} {z} = xmm0[1,3],xmm1[0,2] +; CHECK-NEXT: vzeroupper +; CHECK-NEXT: retq + %shuf = shufflevector <8 x float> %vec, <8 x float> undef, <4 x i32> + %res = select <4 x i1> , <4 x float> %shuf, <4 x float> zeroinitializer + ret <4 x float> %res +} +define <4 x float> @test_masked_8xfloat_to_4xfloat_perm_mask2(<8 x float> %vec, <4 x float> %vec2) { +; CHECK-LABEL: test_masked_8xfloat_to_4xfloat_perm_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: vextractf128 $1, %ymm0, %xmm2 +; CHECK-NEXT: vshufps {{.*#+}} xmm2 = xmm2[3,0],xmm0[0,0] +; CHECK-NEXT: movb $14, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vshufps {{.*#+}} xmm1 {%k1} = xmm0[3,2],xmm2[0,2] +; CHECK-NEXT: vmovaps %xmm1, %xmm0 +; CHECK-NEXT: vzeroupper +; CHECK-NEXT: retq + %shuf = shufflevector <8 x float> %vec, <8 x float> undef, <4 x i32> + %res = select <4 x i1> , <4 x float> %shuf, <4 x float> %vec2 + ret <4 x float> %res +} + +define <4 x float> @test_masked_z_8xfloat_to_4xfloat_perm_mask2(<8 x float> %vec) { +; CHECK-LABEL: test_masked_z_8xfloat_to_4xfloat_perm_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: vextractf128 $1, %ymm0, %xmm1 +; CHECK-NEXT: vshufps {{.*#+}} xmm1 = xmm1[3,0],xmm0[0,0] +; CHECK-NEXT: movb $14, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vshufps {{.*#+}} xmm0 {%k1} {z} = xmm0[3,2],xmm1[0,2] +; CHECK-NEXT: vzeroupper +; CHECK-NEXT: retq + %shuf = shufflevector <8 x float> %vec, <8 x float> undef, <4 x i32> + %res = select <4 x i1> , <4 x float> %shuf, <4 x float> zeroinitializer + ret <4 x float> %res +} +define <4 x float> @test_8xfloat_to_4xfloat_perm_mask3(<8 x float> %vec) { +; CHECK-LABEL: test_8xfloat_to_4xfloat_perm_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: vextractf128 $1, %ymm0, %xmm1 +; CHECK-NEXT: vmovsd {{.*#+}} xmm0 = xmm1[0],xmm0[1] +; CHECK-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[3,3,1,2] +; CHECK-NEXT: vzeroupper +; CHECK-NEXT: retq + %res = shufflevector <8 x float> %vec, <8 x float> undef, <4 x i32> + ret <4 x float> %res +} +define <4 x float> @test_masked_8xfloat_to_4xfloat_perm_mask3(<8 x float> %vec, <4 x float> %vec2) { +; CHECK-LABEL: test_masked_8xfloat_to_4xfloat_perm_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: vextractf128 $1, %ymm0, %xmm2 +; CHECK-NEXT: vmovsd {{.*#+}} xmm0 = xmm2[0],xmm0[1] +; CHECK-NEXT: movb $5, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermilps {{.*#+}} xmm1 {%k1} = xmm0[3,3,1,2] +; CHECK-NEXT: vmovaps %xmm1, %xmm0 +; CHECK-NEXT: vzeroupper +; CHECK-NEXT: retq + %shuf = shufflevector <8 x float> %vec, <8 x float> undef, <4 x i32> + %res = select <4 x i1> , <4 x float> %shuf, <4 x float> %vec2 + ret <4 x float> %res +} + +define <4 x float> @test_masked_z_8xfloat_to_4xfloat_perm_mask3(<8 x float> %vec) { +; CHECK-LABEL: test_masked_z_8xfloat_to_4xfloat_perm_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: vextractf128 $1, %ymm0, %xmm1 +; CHECK-NEXT: vmovsd {{.*#+}} xmm0 = xmm1[0],xmm0[1] +; CHECK-NEXT: movb $5, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermilps {{.*#+}} xmm0 {%k1} {z} = xmm0[3,3,1,2] +; CHECK-NEXT: vzeroupper +; CHECK-NEXT: retq + %shuf = shufflevector <8 x float> %vec, <8 x float> undef, <4 x i32> + %res = select <4 x i1> , <4 x float> %shuf, <4 x float> zeroinitializer + ret <4 x float> %res +} +define <4 x float> @test_8xfloat_to_4xfloat_perm_mem_mask0(<8 x float>* %vp) { +; CHECK-LABEL: test_8xfloat_to_4xfloat_perm_mem_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: vmovaps (%rdi), %ymm0 +; CHECK-NEXT: vextractf128 $1, %ymm0, %xmm1 +; CHECK-NEXT: vshufps {{.*#+}} xmm0 = xmm0[2,0],xmm1[2,0] +; CHECK-NEXT: vshufps {{.*#+}} xmm0 = xmm0[2,0],xmm1[0,1] +; CHECK-NEXT: vzeroupper +; CHECK-NEXT: retq + %vec = load <8 x float>, <8 x float>* %vp + %res = shufflevector <8 x float> %vec, <8 x float> undef, <4 x i32> + ret <4 x float> %res +} +define <4 x float> @test_masked_8xfloat_to_4xfloat_perm_mem_mask0(<8 x float>* %vp, <4 x float> %vec2) { +; CHECK-LABEL: test_masked_8xfloat_to_4xfloat_perm_mem_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: vmovaps (%rdi), %ymm1 +; CHECK-NEXT: vextractf128 $1, %ymm1, %xmm2 +; CHECK-NEXT: vshufps {{.*#+}} xmm1 = xmm1[2,0],xmm2[2,0] +; CHECK-NEXT: movb $3, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vshufps {{.*#+}} xmm0 {%k1} = xmm1[2,0],xmm2[0,1] +; CHECK-NEXT: vzeroupper +; CHECK-NEXT: retq + %vec = load <8 x float>, <8 x float>* %vp + %shuf = shufflevector <8 x float> %vec, <8 x float> undef, <4 x i32> + %res = select <4 x i1> , <4 x float> %shuf, <4 x float> %vec2 + ret <4 x float> %res +} + +define <4 x float> @test_masked_z_8xfloat_to_4xfloat_perm_mem_mask0(<8 x float>* %vp) { +; CHECK-LABEL: test_masked_z_8xfloat_to_4xfloat_perm_mem_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: vmovaps (%rdi), %ymm0 +; CHECK-NEXT: vextractf128 $1, %ymm0, %xmm1 +; CHECK-NEXT: vshufps {{.*#+}} xmm0 = xmm0[2,0],xmm1[2,0] +; CHECK-NEXT: movb $3, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vshufps {{.*#+}} xmm0 {%k1} {z} = xmm0[2,0],xmm1[0,1] +; CHECK-NEXT: vzeroupper +; CHECK-NEXT: retq + %vec = load <8 x float>, <8 x float>* %vp + %shuf = shufflevector <8 x float> %vec, <8 x float> undef, <4 x i32> + %res = select <4 x i1> , <4 x float> %shuf, <4 x float> zeroinitializer + ret <4 x float> %res +} + +define <4 x float> @test_masked_8xfloat_to_4xfloat_perm_mem_mask1(<8 x float>* %vp, <4 x float> %vec2) { +; CHECK-LABEL: test_masked_8xfloat_to_4xfloat_perm_mem_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: vmovaps (%rdi), %ymm1 +; CHECK-NEXT: vextractf128 $1, %ymm1, %xmm2 +; CHECK-NEXT: vblendps {{.*#+}} xmm1 = xmm2[0,1,2],xmm1[3] +; CHECK-NEXT: movb $4, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermilps {{.*#+}} xmm0 {%k1} = xmm1[2,3,3,2] +; CHECK-NEXT: vzeroupper +; CHECK-NEXT: retq + %vec = load <8 x float>, <8 x float>* %vp + %shuf = shufflevector <8 x float> %vec, <8 x float> undef, <4 x i32> + %res = select <4 x i1> , <4 x float> %shuf, <4 x float> %vec2 + ret <4 x float> %res +} + +define <4 x float> @test_masked_z_8xfloat_to_4xfloat_perm_mem_mask1(<8 x float>* %vp) { +; CHECK-LABEL: test_masked_z_8xfloat_to_4xfloat_perm_mem_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: vmovaps (%rdi), %ymm0 +; CHECK-NEXT: vextractf128 $1, %ymm0, %xmm1 +; CHECK-NEXT: vblendps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[3] +; CHECK-NEXT: movb $4, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermilps {{.*#+}} xmm0 {%k1} {z} = xmm0[2,3,3,2] +; CHECK-NEXT: vzeroupper +; CHECK-NEXT: retq + %vec = load <8 x float>, <8 x float>* %vp + %shuf = shufflevector <8 x float> %vec, <8 x float> undef, <4 x i32> + %res = select <4 x i1> , <4 x float> %shuf, <4 x float> zeroinitializer + ret <4 x float> %res +} + +define <4 x float> @test_masked_8xfloat_to_4xfloat_perm_mem_mask2(<8 x float>* %vp, <4 x float> %vec2) { +; CHECK-LABEL: test_masked_8xfloat_to_4xfloat_perm_mem_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: vmovaps (%rdi), %ymm1 +; CHECK-NEXT: vextractf128 $1, %ymm1, %xmm2 +; CHECK-NEXT: vshufps {{.*#+}} xmm2 = xmm2[3,0],xmm1[3,0] +; CHECK-NEXT: movb $6, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vshufps {{.*#+}} xmm0 {%k1} = xmm1[3,1],xmm2[2,0] +; CHECK-NEXT: vzeroupper +; CHECK-NEXT: retq + %vec = load <8 x float>, <8 x float>* %vp + %shuf = shufflevector <8 x float> %vec, <8 x float> undef, <4 x i32> + %res = select <4 x i1> , <4 x float> %shuf, <4 x float> %vec2 + ret <4 x float> %res +} + +define <4 x float> @test_masked_z_8xfloat_to_4xfloat_perm_mem_mask2(<8 x float>* %vp) { +; CHECK-LABEL: test_masked_z_8xfloat_to_4xfloat_perm_mem_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: vmovaps (%rdi), %ymm0 +; CHECK-NEXT: vextractf128 $1, %ymm0, %xmm1 +; CHECK-NEXT: vshufps {{.*#+}} xmm1 = xmm1[3,0],xmm0[3,0] +; CHECK-NEXT: movb $6, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vshufps {{.*#+}} xmm0 {%k1} {z} = xmm0[3,1],xmm1[2,0] +; CHECK-NEXT: vzeroupper +; CHECK-NEXT: retq + %vec = load <8 x float>, <8 x float>* %vp + %shuf = shufflevector <8 x float> %vec, <8 x float> undef, <4 x i32> + %res = select <4 x i1> , <4 x float> %shuf, <4 x float> zeroinitializer + ret <4 x float> %res +} + +define <4 x float> @test_8xfloat_to_4xfloat_perm_mem_mask3(<8 x float>* %vp) { +; CHECK-LABEL: test_8xfloat_to_4xfloat_perm_mem_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: vmovaps (%rdi), %ymm0 +; CHECK-NEXT: vextractf128 $1, %ymm0, %xmm1 +; CHECK-NEXT: vshufps {{.*#+}} xmm1 = xmm1[1,0],xmm0[3,0] +; CHECK-NEXT: vshufps {{.*#+}} xmm0 = xmm0[1,3],xmm1[0,2] +; CHECK-NEXT: vzeroupper +; CHECK-NEXT: retq + %vec = load <8 x float>, <8 x float>* %vp + %res = shufflevector <8 x float> %vec, <8 x float> undef, <4 x i32> + ret <4 x float> %res +} +define <4 x float> @test_masked_8xfloat_to_4xfloat_perm_mem_mask3(<8 x float>* %vp, <4 x float> %vec2) { +; CHECK-LABEL: test_masked_8xfloat_to_4xfloat_perm_mem_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: vmovaps (%rdi), %ymm1 +; CHECK-NEXT: vextractf128 $1, %ymm1, %xmm2 +; CHECK-NEXT: vshufps {{.*#+}} xmm2 = xmm2[1,0],xmm1[3,0] +; CHECK-NEXT: movb $11, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vshufps {{.*#+}} xmm0 {%k1} = xmm1[1,3],xmm2[0,2] +; CHECK-NEXT: vzeroupper +; CHECK-NEXT: retq + %vec = load <8 x float>, <8 x float>* %vp + %shuf = shufflevector <8 x float> %vec, <8 x float> undef, <4 x i32> + %res = select <4 x i1> , <4 x float> %shuf, <4 x float> %vec2 + ret <4 x float> %res +} + +define <4 x float> @test_masked_z_8xfloat_to_4xfloat_perm_mem_mask3(<8 x float>* %vp) { +; CHECK-LABEL: test_masked_z_8xfloat_to_4xfloat_perm_mem_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: vmovaps (%rdi), %ymm0 +; CHECK-NEXT: vextractf128 $1, %ymm0, %xmm1 +; CHECK-NEXT: vshufps {{.*#+}} xmm1 = xmm1[1,0],xmm0[3,0] +; CHECK-NEXT: movb $11, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vshufps {{.*#+}} xmm0 {%k1} {z} = xmm0[1,3],xmm1[0,2] +; CHECK-NEXT: vzeroupper +; CHECK-NEXT: retq + %vec = load <8 x float>, <8 x float>* %vp + %shuf = shufflevector <8 x float> %vec, <8 x float> undef, <4 x i32> + %res = select <4 x i1> , <4 x float> %shuf, <4 x float> zeroinitializer + ret <4 x float> %res +} + +define <8 x float> @test_16xfloat_to_8xfloat_perm_mask0(<16 x float> %vec) { +; CHECK-LABEL: test_16xfloat_to_8xfloat_perm_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: vextractf64x4 $1, %zmm0, %ymm2 +; CHECK-NEXT: vmovaps {{.*#+}} ymm1 = [0,4,12,10,8,2,11,7] +; CHECK-NEXT: vpermi2ps %ymm2, %ymm0, %ymm1 +; CHECK-NEXT: vmovaps %ymm1, %ymm0 +; CHECK-NEXT: retq + %res = shufflevector <16 x float> %vec, <16 x float> undef, <8 x i32> + ret <8 x float> %res +} +define <8 x float> @test_masked_16xfloat_to_8xfloat_perm_mask0(<16 x float> %vec, <8 x float> %vec2) { +; CHECK-LABEL: test_masked_16xfloat_to_8xfloat_perm_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: vextractf64x4 $1, %zmm0, %ymm2 +; CHECK-NEXT: vmovaps {{.*#+}} ymm3 = [0,4,12,10,8,2,11,7] +; CHECK-NEXT: vpermi2ps %ymm2, %ymm0, %ymm3 +; CHECK-NEXT: movb $52, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vblendmps %ymm3, %ymm1, %ymm0 {%k1} +; CHECK-NEXT: retq + %shuf = shufflevector <16 x float> %vec, <16 x float> undef, <8 x i32> + %res = select <8 x i1> , <8 x float> %shuf, <8 x float> %vec2 + ret <8 x float> %res +} + +define <8 x float> @test_masked_z_16xfloat_to_8xfloat_perm_mask0(<16 x float> %vec) { +; CHECK-LABEL: test_masked_z_16xfloat_to_8xfloat_perm_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: vextractf64x4 $1, %zmm0, %ymm2 +; CHECK-NEXT: vmovaps {{.*#+}} ymm1 = [0,4,12,10,8,2,11,7] +; CHECK-NEXT: movb $52, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermi2ps %ymm2, %ymm0, %ymm1 {%k1} {z} +; CHECK-NEXT: vmovaps %ymm1, %ymm0 +; CHECK-NEXT: retq + %shuf = shufflevector <16 x float> %vec, <16 x float> undef, <8 x i32> + %res = select <8 x i1> , <8 x float> %shuf, <8 x float> zeroinitializer + ret <8 x float> %res +} +define <8 x float> @test_masked_16xfloat_to_8xfloat_perm_mask1(<16 x float> %vec, <8 x float> %vec2) { +; CHECK-LABEL: test_masked_16xfloat_to_8xfloat_perm_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: vextractf64x4 $1, %zmm0, %ymm2 +; CHECK-NEXT: vmovaps {{.*#+}} ymm3 = [2,4,11,4,12,7,9,6] +; CHECK-NEXT: vpermi2ps %ymm0, %ymm2, %ymm3 +; CHECK-NEXT: movb $1, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vblendmps %ymm3, %ymm1, %ymm0 {%k1} +; CHECK-NEXT: retq + %shuf = shufflevector <16 x float> %vec, <16 x float> undef, <8 x i32> + %res = select <8 x i1> , <8 x float> %shuf, <8 x float> %vec2 + ret <8 x float> %res +} + +define <8 x float> @test_masked_z_16xfloat_to_8xfloat_perm_mask1(<16 x float> %vec) { +; CHECK-LABEL: test_masked_z_16xfloat_to_8xfloat_perm_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: vextractf64x4 $1, %zmm0, %ymm2 +; CHECK-NEXT: vmovaps {{.*#+}} ymm1 = [2,4,11,4,12,7,9,6] +; CHECK-NEXT: movb $1, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermi2ps %ymm0, %ymm2, %ymm1 {%k1} {z} +; CHECK-NEXT: vmovaps %ymm1, %ymm0 +; CHECK-NEXT: retq + %shuf = shufflevector <16 x float> %vec, <16 x float> undef, <8 x i32> + %res = select <8 x i1> , <8 x float> %shuf, <8 x float> zeroinitializer + ret <8 x float> %res +} +define <8 x float> @test_masked_16xfloat_to_8xfloat_perm_mask2(<16 x float> %vec, <8 x float> %vec2) { +; CHECK-LABEL: test_masked_16xfloat_to_8xfloat_perm_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: vmovaps {{.*#+}} ymm2 = <0,4,u,u,6,1,4,4> +; CHECK-NEXT: vpermps %ymm0, %ymm2, %ymm2 +; CHECK-NEXT: vextractf64x4 $1, %zmm0, %ymm0 +; CHECK-NEXT: vmovddup {{.*#+}} xmm0 = xmm0[0,0] +; CHECK-NEXT: vblendpd {{.*#+}} ymm0 = ymm2[0],ymm0[1],ymm2[2,3] +; CHECK-NEXT: movb $-78, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vblendmps %ymm0, %ymm1, %ymm0 {%k1} +; CHECK-NEXT: retq + %shuf = shufflevector <16 x float> %vec, <16 x float> undef, <8 x i32> + %res = select <8 x i1> , <8 x float> %shuf, <8 x float> %vec2 + ret <8 x float> %res +} + +define <8 x float> @test_masked_z_16xfloat_to_8xfloat_perm_mask2(<16 x float> %vec) { +; CHECK-LABEL: test_masked_z_16xfloat_to_8xfloat_perm_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: vmovaps {{.*#+}} ymm1 = <0,4,u,u,6,1,4,4> +; CHECK-NEXT: vpermps %ymm0, %ymm1, %ymm1 +; CHECK-NEXT: vextractf64x4 $1, %zmm0, %ymm0 +; CHECK-NEXT: vmovddup {{.*#+}} xmm0 = xmm0[0,0] +; CHECK-NEXT: vblendpd {{.*#+}} ymm0 = ymm1[0],ymm0[1],ymm1[2,3] +; CHECK-NEXT: movb $-78, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vmovaps %ymm0, %ymm0 {%k1} {z} +; CHECK-NEXT: retq + %shuf = shufflevector <16 x float> %vec, <16 x float> undef, <8 x i32> + %res = select <8 x i1> , <8 x float> %shuf, <8 x float> zeroinitializer + ret <8 x float> %res +} +define <8 x float> @test_16xfloat_to_8xfloat_perm_mask3(<16 x float> %vec) { +; CHECK-LABEL: test_16xfloat_to_8xfloat_perm_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: vextractf64x4 $1, %zmm0, %ymm2 +; CHECK-NEXT: vmovaps {{.*#+}} ymm1 = [4,6,1,8,4,12,13,0] +; CHECK-NEXT: vpermi2ps %ymm0, %ymm2, %ymm1 +; CHECK-NEXT: vmovaps %ymm1, %ymm0 +; CHECK-NEXT: retq + %res = shufflevector <16 x float> %vec, <16 x float> undef, <8 x i32> + ret <8 x float> %res +} +define <8 x float> @test_masked_16xfloat_to_8xfloat_perm_mask3(<16 x float> %vec, <8 x float> %vec2) { +; CHECK-LABEL: test_masked_16xfloat_to_8xfloat_perm_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: vextractf64x4 $1, %zmm0, %ymm2 +; CHECK-NEXT: vmovaps {{.*#+}} ymm3 = [4,6,1,8,4,12,13,0] +; CHECK-NEXT: vpermi2ps %ymm0, %ymm2, %ymm3 +; CHECK-NEXT: movb $-98, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vblendmps %ymm3, %ymm1, %ymm0 {%k1} +; CHECK-NEXT: retq + %shuf = shufflevector <16 x float> %vec, <16 x float> undef, <8 x i32> + %res = select <8 x i1> , <8 x float> %shuf, <8 x float> %vec2 + ret <8 x float> %res +} + +define <8 x float> @test_masked_z_16xfloat_to_8xfloat_perm_mask3(<16 x float> %vec) { +; CHECK-LABEL: test_masked_z_16xfloat_to_8xfloat_perm_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: vextractf64x4 $1, %zmm0, %ymm2 +; CHECK-NEXT: vmovaps {{.*#+}} ymm1 = [4,6,1,8,4,12,13,0] +; CHECK-NEXT: movb $-98, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermi2ps %ymm0, %ymm2, %ymm1 {%k1} {z} +; CHECK-NEXT: vmovaps %ymm1, %ymm0 +; CHECK-NEXT: retq + %shuf = shufflevector <16 x float> %vec, <16 x float> undef, <8 x i32> + %res = select <8 x i1> , <8 x float> %shuf, <8 x float> zeroinitializer + ret <8 x float> %res +} +define <4 x float> @test_16xfloat_to_4xfloat_perm_mask0(<16 x float> %vec) { +; CHECK-LABEL: test_16xfloat_to_4xfloat_perm_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: vextractf64x4 $1, %zmm0, %ymm2 +; CHECK-NEXT: vmovaps {{.*#+}} ymm1 = <12,0,1,2,u,u,u,u> +; CHECK-NEXT: vpermi2ps %ymm0, %ymm2, %ymm1 +; CHECK-NEXT: vmovaps %xmm1, %xmm0 +; CHECK-NEXT: vzeroupper +; CHECK-NEXT: retq + %res = shufflevector <16 x float> %vec, <16 x float> undef, <4 x i32> + ret <4 x float> %res +} +define <4 x float> @test_masked_16xfloat_to_4xfloat_perm_mask0(<16 x float> %vec, <4 x float> %vec2) { +; CHECK-LABEL: test_masked_16xfloat_to_4xfloat_perm_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: vextractf64x4 $1, %zmm0, %ymm2 +; CHECK-NEXT: vmovaps {{.*#+}} ymm3 = <12,0,1,2,u,u,u,u> +; CHECK-NEXT: vpermi2ps %ymm0, %ymm2, %ymm3 +; CHECK-NEXT: movb $4, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vblendmps %xmm3, %xmm1, %xmm0 {%k1} +; CHECK-NEXT: vzeroupper +; CHECK-NEXT: retq + %shuf = shufflevector <16 x float> %vec, <16 x float> undef, <4 x i32> + %res = select <4 x i1> , <4 x float> %shuf, <4 x float> %vec2 + ret <4 x float> %res +} + +define <4 x float> @test_masked_z_16xfloat_to_4xfloat_perm_mask0(<16 x float> %vec) { +; CHECK-LABEL: test_masked_z_16xfloat_to_4xfloat_perm_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: vextractf64x4 $1, %zmm0, %ymm1 +; CHECK-NEXT: vmovaps {{.*#+}} ymm2 = <12,0,1,2,u,u,u,u> +; CHECK-NEXT: vpermi2ps %ymm0, %ymm1, %ymm2 +; CHECK-NEXT: movb $4, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vmovaps %xmm2, %xmm0 {%k1} {z} +; CHECK-NEXT: vzeroupper +; CHECK-NEXT: retq + %shuf = shufflevector <16 x float> %vec, <16 x float> undef, <4 x i32> + %res = select <4 x i1> , <4 x float> %shuf, <4 x float> zeroinitializer + ret <4 x float> %res +} +define <4 x float> @test_masked_16xfloat_to_4xfloat_perm_mask1(<16 x float> %vec, <4 x float> %vec2) { +; CHECK-LABEL: test_masked_16xfloat_to_4xfloat_perm_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: vextractf64x4 $1, %zmm0, %ymm2 +; CHECK-NEXT: vextractf128 $1, %ymm0, %xmm0 +; CHECK-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[0,2,2,2] +; CHECK-NEXT: vblendps {{.*#+}} xmm0 = xmm2[0],xmm0[1],xmm2[2],xmm0[3] +; CHECK-NEXT: movb $12, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vblendmps %xmm0, %xmm1, %xmm0 {%k1} +; CHECK-NEXT: vzeroupper +; CHECK-NEXT: retq + %shuf = shufflevector <16 x float> %vec, <16 x float> undef, <4 x i32> + %res = select <4 x i1> , <4 x float> %shuf, <4 x float> %vec2 + ret <4 x float> %res +} + +define <4 x float> @test_masked_z_16xfloat_to_4xfloat_perm_mask1(<16 x float> %vec) { +; CHECK-LABEL: test_masked_z_16xfloat_to_4xfloat_perm_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: vextractf64x4 $1, %zmm0, %ymm1 +; CHECK-NEXT: vextractf128 $1, %ymm0, %xmm0 +; CHECK-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[0,2,2,2] +; CHECK-NEXT: vblendps {{.*#+}} xmm0 = xmm1[0],xmm0[1],xmm1[2],xmm0[3] +; CHECK-NEXT: movb $12, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vmovaps %xmm0, %xmm0 {%k1} {z} +; CHECK-NEXT: vzeroupper +; CHECK-NEXT: retq + %shuf = shufflevector <16 x float> %vec, <16 x float> undef, <4 x i32> + %res = select <4 x i1> , <4 x float> %shuf, <4 x float> zeroinitializer + ret <4 x float> %res +} +define <4 x float> @test_masked_16xfloat_to_4xfloat_perm_mask2(<16 x float> %vec, <4 x float> %vec2) { +; CHECK-LABEL: test_masked_16xfloat_to_4xfloat_perm_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: vextractf64x4 $1, %zmm0, %ymm2 +; CHECK-NEXT: vshufps {{.*#+}} ymm0 = ymm2[0,0],ymm0[0,1],ymm2[4,4],ymm0[4,5] +; CHECK-NEXT: movb $2, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vextractf32x4 $1, %ymm0, %xmm1 {%k1} +; CHECK-NEXT: vmovaps %xmm1, %xmm0 +; CHECK-NEXT: vzeroupper +; CHECK-NEXT: retq + %shuf = shufflevector <16 x float> %vec, <16 x float> undef, <4 x i32> + %res = select <4 x i1> , <4 x float> %shuf, <4 x float> %vec2 + ret <4 x float> %res +} + +define <4 x float> @test_masked_z_16xfloat_to_4xfloat_perm_mask2(<16 x float> %vec) { +; CHECK-LABEL: test_masked_z_16xfloat_to_4xfloat_perm_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: vextractf64x4 $1, %zmm0, %ymm1 +; CHECK-NEXT: vshufps {{.*#+}} ymm0 = ymm1[0,0],ymm0[0,1],ymm1[4,4],ymm0[4,5] +; CHECK-NEXT: movb $2, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vextractf32x4 $1, %ymm0, %xmm0 {%k1} {z} +; CHECK-NEXT: vzeroupper +; CHECK-NEXT: retq + %shuf = shufflevector <16 x float> %vec, <16 x float> undef, <4 x i32> + %res = select <4 x i1> , <4 x float> %shuf, <4 x float> zeroinitializer + ret <4 x float> %res +} +define <4 x float> @test_16xfloat_to_4xfloat_perm_mask3(<16 x float> %vec) { +; CHECK-LABEL: test_16xfloat_to_4xfloat_perm_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: vextractf64x4 $1, %zmm0, %ymm1 +; CHECK-NEXT: vpermilps {{.*#+}} xmm1 = xmm1[2,1,3,3] +; CHECK-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[0,2,2,3,4,6,6,7] +; CHECK-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,2,2,3] +; CHECK-NEXT: vblendps {{.*#+}} xmm0 = xmm1[0],xmm0[1],xmm1[2],xmm0[3] +; CHECK-NEXT: vzeroupper +; CHECK-NEXT: retq + %res = shufflevector <16 x float> %vec, <16 x float> undef, <4 x i32> + ret <4 x float> %res +} +define <4 x float> @test_masked_16xfloat_to_4xfloat_perm_mask3(<16 x float> %vec, <4 x float> %vec2) { +; CHECK-LABEL: test_masked_16xfloat_to_4xfloat_perm_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: vextractf64x4 $1, %zmm0, %ymm2 +; CHECK-NEXT: vpermilps {{.*#+}} xmm2 = xmm2[2,1,3,3] +; CHECK-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[0,2,2,3,4,6,6,7] +; CHECK-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,2,2,3] +; CHECK-NEXT: vblendps {{.*#+}} xmm0 = xmm2[0],xmm0[1],xmm2[2],xmm0[3] +; CHECK-NEXT: movb $5, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vblendmps %xmm0, %xmm1, %xmm0 {%k1} +; CHECK-NEXT: vzeroupper +; CHECK-NEXT: retq + %shuf = shufflevector <16 x float> %vec, <16 x float> undef, <4 x i32> + %res = select <4 x i1> , <4 x float> %shuf, <4 x float> %vec2 + ret <4 x float> %res +} + +define <4 x float> @test_masked_z_16xfloat_to_4xfloat_perm_mask3(<16 x float> %vec) { +; CHECK-LABEL: test_masked_z_16xfloat_to_4xfloat_perm_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: vextractf64x4 $1, %zmm0, %ymm1 +; CHECK-NEXT: vpermilps {{.*#+}} xmm1 = xmm1[2,1,3,3] +; CHECK-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[0,2,2,3,4,6,6,7] +; CHECK-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,2,2,3] +; CHECK-NEXT: vblendps {{.*#+}} xmm0 = xmm1[0],xmm0[1],xmm1[2],xmm0[3] +; CHECK-NEXT: movb $5, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vmovaps %xmm0, %xmm0 {%k1} {z} +; CHECK-NEXT: vzeroupper +; CHECK-NEXT: retq + %shuf = shufflevector <16 x float> %vec, <16 x float> undef, <4 x i32> + %res = select <4 x i1> , <4 x float> %shuf, <4 x float> zeroinitializer + ret <4 x float> %res +} +define <8 x float> @test_16xfloat_to_8xfloat_perm_mem_mask0(<16 x float>* %vp) { +; CHECK-LABEL: test_16xfloat_to_8xfloat_perm_mem_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: vmovaps (%rdi), %zmm1 +; CHECK-NEXT: vextractf64x4 $1, %zmm1, %ymm2 +; CHECK-NEXT: vmovaps {{.*#+}} ymm0 = [7,6,7,11,5,10,0,4] +; CHECK-NEXT: vpermi2ps %ymm2, %ymm1, %ymm0 +; CHECK-NEXT: retq + %vec = load <16 x float>, <16 x float>* %vp + %res = shufflevector <16 x float> %vec, <16 x float> undef, <8 x i32> + ret <8 x float> %res +} +define <8 x float> @test_masked_16xfloat_to_8xfloat_perm_mem_mask0(<16 x float>* %vp, <8 x float> %vec2) { +; CHECK-LABEL: test_masked_16xfloat_to_8xfloat_perm_mem_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: vmovaps (%rdi), %zmm1 +; CHECK-NEXT: vextractf64x4 $1, %zmm1, %ymm2 +; CHECK-NEXT: vmovaps {{.*#+}} ymm3 = [7,6,7,11,5,10,0,4] +; CHECK-NEXT: vpermi2ps %ymm2, %ymm1, %ymm3 +; CHECK-NEXT: movb $-105, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vmovaps %ymm3, %ymm0 {%k1} +; CHECK-NEXT: retq + %vec = load <16 x float>, <16 x float>* %vp + %shuf = shufflevector <16 x float> %vec, <16 x float> undef, <8 x i32> + %res = select <8 x i1> , <8 x float> %shuf, <8 x float> %vec2 + ret <8 x float> %res +} + +define <8 x float> @test_masked_z_16xfloat_to_8xfloat_perm_mem_mask0(<16 x float>* %vp) { +; CHECK-LABEL: test_masked_z_16xfloat_to_8xfloat_perm_mem_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: vmovaps (%rdi), %zmm1 +; CHECK-NEXT: vextractf64x4 $1, %zmm1, %ymm2 +; CHECK-NEXT: vmovaps {{.*#+}} ymm0 = [7,6,7,11,5,10,0,4] +; CHECK-NEXT: movb $-105, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermi2ps %ymm2, %ymm1, %ymm0 {%k1} {z} +; CHECK-NEXT: retq + %vec = load <16 x float>, <16 x float>* %vp + %shuf = shufflevector <16 x float> %vec, <16 x float> undef, <8 x i32> + %res = select <8 x i1> , <8 x float> %shuf, <8 x float> zeroinitializer + ret <8 x float> %res +} + +define <8 x float> @test_masked_16xfloat_to_8xfloat_perm_mem_mask1(<16 x float>* %vp, <8 x float> %vec2) { +; CHECK-LABEL: test_masked_16xfloat_to_8xfloat_perm_mem_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: vmovaps (%rdi), %zmm1 +; CHECK-NEXT: vextractf64x4 $1, %zmm1, %ymm2 +; CHECK-NEXT: vmovaps {{.*#+}} ymm3 = [11,0,9,0,7,14,0,8] +; CHECK-NEXT: vpermi2ps %ymm2, %ymm1, %ymm3 +; CHECK-NEXT: movb $36, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vmovaps %ymm3, %ymm0 {%k1} +; CHECK-NEXT: retq + %vec = load <16 x float>, <16 x float>* %vp + %shuf = shufflevector <16 x float> %vec, <16 x float> undef, <8 x i32> + %res = select <8 x i1> , <8 x float> %shuf, <8 x float> %vec2 + ret <8 x float> %res +} + +define <8 x float> @test_masked_z_16xfloat_to_8xfloat_perm_mem_mask1(<16 x float>* %vp) { +; CHECK-LABEL: test_masked_z_16xfloat_to_8xfloat_perm_mem_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: vmovaps (%rdi), %zmm1 +; CHECK-NEXT: vextractf64x4 $1, %zmm1, %ymm2 +; CHECK-NEXT: vmovaps {{.*#+}} ymm0 = [11,0,9,0,7,14,0,8] +; CHECK-NEXT: movb $36, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermi2ps %ymm2, %ymm1, %ymm0 {%k1} {z} +; CHECK-NEXT: retq + %vec = load <16 x float>, <16 x float>* %vp + %shuf = shufflevector <16 x float> %vec, <16 x float> undef, <8 x i32> + %res = select <8 x i1> , <8 x float> %shuf, <8 x float> zeroinitializer + ret <8 x float> %res +} + +define <8 x float> @test_masked_16xfloat_to_8xfloat_perm_mem_mask2(<16 x float>* %vp, <8 x float> %vec2) { +; CHECK-LABEL: test_masked_16xfloat_to_8xfloat_perm_mem_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: vmovaps (%rdi), %zmm1 +; CHECK-NEXT: vpermilps {{.*#+}} xmm2 = xmm1[1,0,0,3] +; CHECK-NEXT: vextractf64x4 $1, %zmm1, %ymm1 +; CHECK-NEXT: vmovaps {{.*#+}} ymm3 = [8,5,2,3,2,9,10,1] +; CHECK-NEXT: vpermi2ps %ymm2, %ymm1, %ymm3 +; CHECK-NEXT: movb $-86, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vmovaps %ymm3, %ymm0 {%k1} +; CHECK-NEXT: retq + %vec = load <16 x float>, <16 x float>* %vp + %shuf = shufflevector <16 x float> %vec, <16 x float> undef, <8 x i32> + %res = select <8 x i1> , <8 x float> %shuf, <8 x float> %vec2 + ret <8 x float> %res +} + +define <8 x float> @test_masked_z_16xfloat_to_8xfloat_perm_mem_mask2(<16 x float>* %vp) { +; CHECK-LABEL: test_masked_z_16xfloat_to_8xfloat_perm_mem_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: vmovaps (%rdi), %zmm0 +; CHECK-NEXT: vpermilps {{.*#+}} xmm1 = xmm0[1,0,0,3] +; CHECK-NEXT: vextractf64x4 $1, %zmm0, %ymm2 +; CHECK-NEXT: vmovaps {{.*#+}} ymm0 = [8,5,2,3,2,9,10,1] +; CHECK-NEXT: movb $-86, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermi2ps %ymm1, %ymm2, %ymm0 {%k1} {z} +; CHECK-NEXT: retq + %vec = load <16 x float>, <16 x float>* %vp + %shuf = shufflevector <16 x float> %vec, <16 x float> undef, <8 x i32> + %res = select <8 x i1> , <8 x float> %shuf, <8 x float> zeroinitializer + ret <8 x float> %res +} + +define <8 x float> @test_16xfloat_to_8xfloat_perm_mem_mask3(<16 x float>* %vp) { +; CHECK-LABEL: test_16xfloat_to_8xfloat_perm_mem_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: vmovaps (%rdi), %zmm1 +; CHECK-NEXT: vextractf64x4 $1, %zmm1, %ymm2 +; CHECK-NEXT: vmovaps {{.*#+}} ymm0 = [7,5,3,3,11,4,12,9] +; CHECK-NEXT: vpermi2ps %ymm1, %ymm2, %ymm0 +; CHECK-NEXT: retq + %vec = load <16 x float>, <16 x float>* %vp + %res = shufflevector <16 x float> %vec, <16 x float> undef, <8 x i32> + ret <8 x float> %res +} +define <8 x float> @test_masked_16xfloat_to_8xfloat_perm_mem_mask3(<16 x float>* %vp, <8 x float> %vec2) { +; CHECK-LABEL: test_masked_16xfloat_to_8xfloat_perm_mem_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: vmovaps (%rdi), %zmm1 +; CHECK-NEXT: vextractf64x4 $1, %zmm1, %ymm2 +; CHECK-NEXT: vmovaps {{.*#+}} ymm3 = [7,5,3,3,11,4,12,9] +; CHECK-NEXT: vpermi2ps %ymm1, %ymm2, %ymm3 +; CHECK-NEXT: movb $90, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vmovaps %ymm3, %ymm0 {%k1} +; CHECK-NEXT: retq + %vec = load <16 x float>, <16 x float>* %vp + %shuf = shufflevector <16 x float> %vec, <16 x float> undef, <8 x i32> + %res = select <8 x i1> , <8 x float> %shuf, <8 x float> %vec2 + ret <8 x float> %res +} + +define <8 x float> @test_masked_z_16xfloat_to_8xfloat_perm_mem_mask3(<16 x float>* %vp) { +; CHECK-LABEL: test_masked_z_16xfloat_to_8xfloat_perm_mem_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: vmovaps (%rdi), %zmm1 +; CHECK-NEXT: vextractf64x4 $1, %zmm1, %ymm2 +; CHECK-NEXT: vmovaps {{.*#+}} ymm0 = [7,5,3,3,11,4,12,9] +; CHECK-NEXT: movb $90, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermi2ps %ymm1, %ymm2, %ymm0 {%k1} {z} +; CHECK-NEXT: retq + %vec = load <16 x float>, <16 x float>* %vp + %shuf = shufflevector <16 x float> %vec, <16 x float> undef, <8 x i32> + %res = select <8 x i1> , <8 x float> %shuf, <8 x float> zeroinitializer + ret <8 x float> %res +} + +define <4 x float> @test_16xfloat_to_4xfloat_perm_mem_mask0(<16 x float>* %vp) { +; CHECK-LABEL: test_16xfloat_to_4xfloat_perm_mem_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: vmovaps (%rdi), %zmm0 +; CHECK-NEXT: vextractf128 $1, %ymm0, %xmm1 +; CHECK-NEXT: vpermilps {{.*#+}} xmm1 = xmm1[0,2,3,3] +; CHECK-NEXT: vextractf64x4 $1, %zmm0, %ymm0 +; CHECK-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[3,1,2,3] +; CHECK-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1,2],xmm0[3] +; CHECK-NEXT: vzeroupper +; CHECK-NEXT: retq + %vec = load <16 x float>, <16 x float>* %vp + %res = shufflevector <16 x float> %vec, <16 x float> undef, <4 x i32> + ret <4 x float> %res +} +define <4 x float> @test_masked_16xfloat_to_4xfloat_perm_mem_mask0(<16 x float>* %vp, <4 x float> %vec2) { +; CHECK-LABEL: test_masked_16xfloat_to_4xfloat_perm_mem_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: vmovaps (%rdi), %zmm1 +; CHECK-NEXT: vextractf128 $1, %ymm1, %xmm2 +; CHECK-NEXT: vpermilps {{.*#+}} xmm2 = xmm2[0,2,3,3] +; CHECK-NEXT: vextractf64x4 $1, %zmm1, %ymm1 +; CHECK-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[3,1,2,3] +; CHECK-NEXT: vblendps {{.*#+}} xmm1 = xmm1[0],xmm2[1,2],xmm1[3] +; CHECK-NEXT: movb $7, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vmovaps %xmm1, %xmm0 {%k1} +; CHECK-NEXT: vzeroupper +; CHECK-NEXT: retq + %vec = load <16 x float>, <16 x float>* %vp + %shuf = shufflevector <16 x float> %vec, <16 x float> undef, <4 x i32> + %res = select <4 x i1> , <4 x float> %shuf, <4 x float> %vec2 + ret <4 x float> %res +} + +define <4 x float> @test_masked_z_16xfloat_to_4xfloat_perm_mem_mask0(<16 x float>* %vp) { +; CHECK-LABEL: test_masked_z_16xfloat_to_4xfloat_perm_mem_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: vmovaps (%rdi), %zmm0 +; CHECK-NEXT: vextractf128 $1, %ymm0, %xmm1 +; CHECK-NEXT: vpermilps {{.*#+}} xmm1 = xmm1[0,2,3,3] +; CHECK-NEXT: vextractf64x4 $1, %zmm0, %ymm0 +; CHECK-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[3,1,2,3] +; CHECK-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1,2],xmm0[3] +; CHECK-NEXT: movb $7, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vmovaps %xmm0, %xmm0 {%k1} {z} +; CHECK-NEXT: vzeroupper +; CHECK-NEXT: retq + %vec = load <16 x float>, <16 x float>* %vp + %shuf = shufflevector <16 x float> %vec, <16 x float> undef, <4 x i32> + %res = select <4 x i1> , <4 x float> %shuf, <4 x float> zeroinitializer + ret <4 x float> %res +} + +define <4 x float> @test_masked_16xfloat_to_4xfloat_perm_mem_mask1(<16 x float>* %vp, <4 x float> %vec2) { +; CHECK-LABEL: test_masked_16xfloat_to_4xfloat_perm_mem_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: vmovaps (%rdi), %zmm1 +; CHECK-NEXT: vextractf64x4 $1, %zmm1, %ymm2 +; CHECK-NEXT: vshufps {{.*#+}} ymm1 = ymm2[0,2],ymm1[2,3],ymm2[4,6],ymm1[6,7] +; CHECK-NEXT: vpermilps {{.*#+}} ymm1 = ymm1[0,2,1,3,4,6,5,7] +; CHECK-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,3,2,3] +; CHECK-NEXT: movb $5, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vmovaps %xmm1, %xmm0 {%k1} +; CHECK-NEXT: vzeroupper +; CHECK-NEXT: retq + %vec = load <16 x float>, <16 x float>* %vp + %shuf = shufflevector <16 x float> %vec, <16 x float> undef, <4 x i32> + %res = select <4 x i1> , <4 x float> %shuf, <4 x float> %vec2 + ret <4 x float> %res +} + +define <4 x float> @test_masked_z_16xfloat_to_4xfloat_perm_mem_mask1(<16 x float>* %vp) { +; CHECK-LABEL: test_masked_z_16xfloat_to_4xfloat_perm_mem_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: vmovaps (%rdi), %zmm0 +; CHECK-NEXT: vextractf64x4 $1, %zmm0, %ymm1 +; CHECK-NEXT: vshufps {{.*#+}} ymm0 = ymm1[0,2],ymm0[2,3],ymm1[4,6],ymm0[6,7] +; CHECK-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[0,2,1,3,4,6,5,7] +; CHECK-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,3,2,3] +; CHECK-NEXT: movb $5, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vmovaps %xmm0, %xmm0 {%k1} {z} +; CHECK-NEXT: vzeroupper +; CHECK-NEXT: retq + %vec = load <16 x float>, <16 x float>* %vp + %shuf = shufflevector <16 x float> %vec, <16 x float> undef, <4 x i32> + %res = select <4 x i1> , <4 x float> %shuf, <4 x float> zeroinitializer + ret <4 x float> %res +} + +define <4 x float> @test_masked_16xfloat_to_4xfloat_perm_mem_mask2(<16 x float>* %vp, <4 x float> %vec2) { +; CHECK-LABEL: test_masked_16xfloat_to_4xfloat_perm_mem_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: vmovaps (%rdi), %zmm1 +; CHECK-NEXT: vextractf64x4 $1, %zmm1, %ymm2 +; CHECK-NEXT: vshufps {{.*#+}} ymm1 = ymm1[2,0],ymm2[0,0],ymm1[6,4],ymm2[4,4] +; CHECK-NEXT: vshufps {{.*#+}} ymm1 = ymm1[2,0],ymm2[2,3],ymm1[6,4],ymm2[6,7] +; CHECK-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[2,2,2,3] +; CHECK-NEXT: movb $3, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vmovaps %xmm1, %xmm0 {%k1} +; CHECK-NEXT: vzeroupper +; CHECK-NEXT: retq + %vec = load <16 x float>, <16 x float>* %vp + %shuf = shufflevector <16 x float> %vec, <16 x float> undef, <4 x i32> + %res = select <4 x i1> , <4 x float> %shuf, <4 x float> %vec2 + ret <4 x float> %res +} + +define <4 x float> @test_masked_z_16xfloat_to_4xfloat_perm_mem_mask2(<16 x float>* %vp) { +; CHECK-LABEL: test_masked_z_16xfloat_to_4xfloat_perm_mem_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: vmovaps (%rdi), %zmm0 +; CHECK-NEXT: vextractf64x4 $1, %zmm0, %ymm1 +; CHECK-NEXT: vshufps {{.*#+}} ymm0 = ymm0[2,0],ymm1[0,0],ymm0[6,4],ymm1[4,4] +; CHECK-NEXT: vshufps {{.*#+}} ymm0 = ymm0[2,0],ymm1[2,3],ymm0[6,4],ymm1[6,7] +; CHECK-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[2,2,2,3] +; CHECK-NEXT: movb $3, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vmovaps %xmm0, %xmm0 {%k1} {z} +; CHECK-NEXT: vzeroupper +; CHECK-NEXT: retq + %vec = load <16 x float>, <16 x float>* %vp + %shuf = shufflevector <16 x float> %vec, <16 x float> undef, <4 x i32> + %res = select <4 x i1> , <4 x float> %shuf, <4 x float> zeroinitializer + ret <4 x float> %res +} + +define <4 x float> @test_16xfloat_to_4xfloat_perm_mem_mask3(<16 x float>* %vp) { +; CHECK-LABEL: test_16xfloat_to_4xfloat_perm_mem_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: vmovaps (%rdi), %zmm1 +; CHECK-NEXT: vextractf64x4 $1, %zmm1, %ymm2 +; CHECK-NEXT: vmovaps {{.*#+}} ymm0 = <3,3,15,9,u,u,u,u> +; CHECK-NEXT: vpermi2ps %ymm2, %ymm1, %ymm0 +; CHECK-NEXT: # kill: %XMM0 %XMM0 %YMM0 +; CHECK-NEXT: vzeroupper +; CHECK-NEXT: retq + %vec = load <16 x float>, <16 x float>* %vp + %res = shufflevector <16 x float> %vec, <16 x float> undef, <4 x i32> + ret <4 x float> %res +} +define <4 x float> @test_masked_16xfloat_to_4xfloat_perm_mem_mask3(<16 x float>* %vp, <4 x float> %vec2) { +; CHECK-LABEL: test_masked_16xfloat_to_4xfloat_perm_mem_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: vmovaps (%rdi), %zmm1 +; CHECK-NEXT: vextractf64x4 $1, %zmm1, %ymm2 +; CHECK-NEXT: vmovaps {{.*#+}} ymm3 = <3,3,15,9,u,u,u,u> +; CHECK-NEXT: vpermi2ps %ymm2, %ymm1, %ymm3 +; CHECK-NEXT: movb $13, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vmovaps %xmm3, %xmm0 {%k1} +; CHECK-NEXT: vzeroupper +; CHECK-NEXT: retq + %vec = load <16 x float>, <16 x float>* %vp + %shuf = shufflevector <16 x float> %vec, <16 x float> undef, <4 x i32> + %res = select <4 x i1> , <4 x float> %shuf, <4 x float> %vec2 + ret <4 x float> %res +} + +define <4 x float> @test_masked_z_16xfloat_to_4xfloat_perm_mem_mask3(<16 x float>* %vp) { +; CHECK-LABEL: test_masked_z_16xfloat_to_4xfloat_perm_mem_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: vmovaps (%rdi), %zmm0 +; CHECK-NEXT: vextractf64x4 $1, %zmm0, %ymm1 +; CHECK-NEXT: vmovaps {{.*#+}} ymm2 = <3,3,15,9,u,u,u,u> +; CHECK-NEXT: vpermi2ps %ymm1, %ymm0, %ymm2 +; CHECK-NEXT: movb $13, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vmovaps %xmm2, %xmm0 {%k1} {z} +; CHECK-NEXT: vzeroupper +; CHECK-NEXT: retq + %vec = load <16 x float>, <16 x float>* %vp + %shuf = shufflevector <16 x float> %vec, <16 x float> undef, <4 x i32> + %res = select <4 x i1> , <4 x float> %shuf, <4 x float> zeroinitializer + ret <4 x float> %res +} + +define <2 x double> @test_4xdouble_to_2xdouble_perm_mask0(<4 x double> %vec) { +; CHECK-LABEL: test_4xdouble_to_2xdouble_perm_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: vextractf128 $1, %ymm0, %xmm1 +; CHECK-NEXT: vunpcklpd {{.*#+}} xmm0 = xmm1[0],xmm0[0] +; CHECK-NEXT: vzeroupper +; CHECK-NEXT: retq + %res = shufflevector <4 x double> %vec, <4 x double> undef, <2 x i32> + ret <2 x double> %res +} +define <2 x double> @test_masked_4xdouble_to_2xdouble_perm_mask0(<4 x double> %vec, <2 x double> %vec2) { +; CHECK-LABEL: test_masked_4xdouble_to_2xdouble_perm_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: vextractf128 $1, %ymm0, %xmm2 +; CHECK-NEXT: movb $1, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vunpcklpd {{.*#+}} xmm1 {%k1} = xmm2[0],xmm0[0] +; CHECK-NEXT: vmovapd %xmm1, %xmm0 +; CHECK-NEXT: vzeroupper +; CHECK-NEXT: retq + %shuf = shufflevector <4 x double> %vec, <4 x double> undef, <2 x i32> + %res = select <2 x i1> , <2 x double> %shuf, <2 x double> %vec2 + ret <2 x double> %res +} + +define <2 x double> @test_masked_z_4xdouble_to_2xdouble_perm_mask0(<4 x double> %vec) { +; CHECK-LABEL: test_masked_z_4xdouble_to_2xdouble_perm_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: vextractf128 $1, %ymm0, %xmm1 +; CHECK-NEXT: movb $1, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vunpcklpd {{.*#+}} xmm0 {%k1} {z} = xmm1[0],xmm0[0] +; CHECK-NEXT: vzeroupper +; CHECK-NEXT: retq + %shuf = shufflevector <4 x double> %vec, <4 x double> undef, <2 x i32> + %res = select <2 x i1> , <2 x double> %shuf, <2 x double> zeroinitializer + ret <2 x double> %res +} +define <2 x double> @test_masked_4xdouble_to_2xdouble_perm_mask1(<4 x double> %vec, <2 x double> %vec2) { +; CHECK-LABEL: test_masked_4xdouble_to_2xdouble_perm_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: vextractf128 $1, %ymm0, %xmm2 +; CHECK-NEXT: movb $2, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vunpckhpd {{.*#+}} xmm1 {%k1} = xmm0[1],xmm2[1] +; CHECK-NEXT: vmovapd %xmm1, %xmm0 +; CHECK-NEXT: vzeroupper +; CHECK-NEXT: retq + %shuf = shufflevector <4 x double> %vec, <4 x double> undef, <2 x i32> + %res = select <2 x i1> , <2 x double> %shuf, <2 x double> %vec2 + ret <2 x double> %res +} + +define <2 x double> @test_masked_z_4xdouble_to_2xdouble_perm_mask1(<4 x double> %vec) { +; CHECK-LABEL: test_masked_z_4xdouble_to_2xdouble_perm_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: vextractf128 $1, %ymm0, %xmm1 +; CHECK-NEXT: movb $2, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vunpckhpd {{.*#+}} xmm0 {%k1} {z} = xmm0[1],xmm1[1] +; CHECK-NEXT: vzeroupper +; CHECK-NEXT: retq + %shuf = shufflevector <4 x double> %vec, <4 x double> undef, <2 x i32> + %res = select <2 x i1> , <2 x double> %shuf, <2 x double> zeroinitializer + ret <2 x double> %res +} +define <2 x double> @test_4xdouble_to_2xdouble_perm_mem_mask0(<4 x double>* %vp) { +; CHECK-LABEL: test_4xdouble_to_2xdouble_perm_mem_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: vmovapd (%rdi), %ymm0 +; CHECK-NEXT: vextractf128 $1, %ymm0, %xmm1 +; CHECK-NEXT: vmovsd {{.*#+}} xmm0 = xmm1[0],xmm0[1] +; CHECK-NEXT: vzeroupper +; CHECK-NEXT: retq + %vec = load <4 x double>, <4 x double>* %vp + %res = shufflevector <4 x double> %vec, <4 x double> undef, <2 x i32> + ret <2 x double> %res +} +define <2 x double> @test_masked_4xdouble_to_2xdouble_perm_mem_mask0(<4 x double>* %vp, <2 x double> %vec2) { +; CHECK-LABEL: test_masked_4xdouble_to_2xdouble_perm_mem_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: vmovapd (%rdi), %ymm1 +; CHECK-NEXT: vextractf128 $1, %ymm1, %xmm2 +; CHECK-NEXT: vmovsd {{.*#+}} xmm1 = xmm2[0],xmm1[1] +; CHECK-NEXT: movb $1, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vmovapd %xmm1, %xmm0 {%k1} +; CHECK-NEXT: vzeroupper +; CHECK-NEXT: retq + %vec = load <4 x double>, <4 x double>* %vp + %shuf = shufflevector <4 x double> %vec, <4 x double> undef, <2 x i32> + %res = select <2 x i1> , <2 x double> %shuf, <2 x double> %vec2 + ret <2 x double> %res +} + +define <2 x double> @test_masked_z_4xdouble_to_2xdouble_perm_mem_mask0(<4 x double>* %vp) { +; CHECK-LABEL: test_masked_z_4xdouble_to_2xdouble_perm_mem_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: vmovapd (%rdi), %ymm0 +; CHECK-NEXT: vextractf128 $1, %ymm0, %xmm1 +; CHECK-NEXT: vmovsd {{.*#+}} xmm0 = xmm1[0],xmm0[1] +; CHECK-NEXT: movb $1, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vmovapd %xmm0, %xmm0 {%k1} {z} +; CHECK-NEXT: vzeroupper +; CHECK-NEXT: retq + %vec = load <4 x double>, <4 x double>* %vp + %shuf = shufflevector <4 x double> %vec, <4 x double> undef, <2 x i32> + %res = select <2 x i1> , <2 x double> %shuf, <2 x double> zeroinitializer + ret <2 x double> %res +} + +define <2 x double> @test_masked_4xdouble_to_2xdouble_perm_mem_mask1(<4 x double>* %vp, <2 x double> %vec2) { +; CHECK-LABEL: test_masked_4xdouble_to_2xdouble_perm_mem_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: vmovapd (%rdi), %ymm1 +; CHECK-NEXT: vextractf128 $1, %ymm1, %xmm2 +; CHECK-NEXT: movb $2, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vunpcklpd {{.*#+}} xmm0 {%k1} = xmm2[0],xmm1[0] +; CHECK-NEXT: vzeroupper +; CHECK-NEXT: retq + %vec = load <4 x double>, <4 x double>* %vp + %shuf = shufflevector <4 x double> %vec, <4 x double> undef, <2 x i32> + %res = select <2 x i1> , <2 x double> %shuf, <2 x double> %vec2 + ret <2 x double> %res +} + +define <2 x double> @test_masked_z_4xdouble_to_2xdouble_perm_mem_mask1(<4 x double>* %vp) { +; CHECK-LABEL: test_masked_z_4xdouble_to_2xdouble_perm_mem_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: vmovapd (%rdi), %ymm0 +; CHECK-NEXT: vextractf128 $1, %ymm0, %xmm1 +; CHECK-NEXT: movb $2, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vunpcklpd {{.*#+}} xmm0 {%k1} {z} = xmm1[0],xmm0[0] +; CHECK-NEXT: vzeroupper +; CHECK-NEXT: retq + %vec = load <4 x double>, <4 x double>* %vp + %shuf = shufflevector <4 x double> %vec, <4 x double> undef, <2 x i32> + %res = select <2 x i1> , <2 x double> %shuf, <2 x double> zeroinitializer + ret <2 x double> %res +} + +define <4 x double> @test_8xdouble_to_4xdouble_perm_mask0(<8 x double> %vec) { +; CHECK-LABEL: test_8xdouble_to_4xdouble_perm_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: vextractf64x4 $1, %zmm0, %ymm1 +; CHECK-NEXT: vunpckhpd {{.*#+}} ymm0 = ymm1[1],ymm0[1],ymm1[3],ymm0[3] +; CHECK-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3,2,3] +; CHECK-NEXT: retq + %res = shufflevector <8 x double> %vec, <8 x double> undef, <4 x i32> + ret <4 x double> %res +} +define <4 x double> @test_masked_8xdouble_to_4xdouble_perm_mask0(<8 x double> %vec, <4 x double> %vec2) { +; CHECK-LABEL: test_masked_8xdouble_to_4xdouble_perm_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: vextractf64x4 $1, %zmm0, %ymm2 +; CHECK-NEXT: vunpckhpd {{.*#+}} ymm0 = ymm2[1],ymm0[1],ymm2[3],ymm0[3] +; CHECK-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3,2,3] +; CHECK-NEXT: movb $4, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vblendmpd %ymm0, %ymm1, %ymm0 {%k1} +; CHECK-NEXT: retq + %shuf = shufflevector <8 x double> %vec, <8 x double> undef, <4 x i32> + %res = select <4 x i1> , <4 x double> %shuf, <4 x double> %vec2 + ret <4 x double> %res +} + +define <4 x double> @test_masked_z_8xdouble_to_4xdouble_perm_mask0(<8 x double> %vec) { +; CHECK-LABEL: test_masked_z_8xdouble_to_4xdouble_perm_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: vextractf64x4 $1, %zmm0, %ymm1 +; CHECK-NEXT: vunpckhpd {{.*#+}} ymm0 = ymm1[1],ymm0[1],ymm1[3],ymm0[3] +; CHECK-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3,2,3] +; CHECK-NEXT: movb $4, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vmovapd %ymm0, %ymm0 {%k1} {z} +; CHECK-NEXT: retq + %shuf = shufflevector <8 x double> %vec, <8 x double> undef, <4 x i32> + %res = select <4 x i1> , <4 x double> %shuf, <4 x double> zeroinitializer + ret <4 x double> %res +} +define <4 x double> @test_masked_8xdouble_to_4xdouble_perm_mask1(<8 x double> %vec, <4 x double> %vec2) { +; CHECK-LABEL: test_masked_8xdouble_to_4xdouble_perm_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: vextractf64x4 $1, %zmm0, %ymm2 +; CHECK-NEXT: vmovapd {{.*#+}} ymm3 = [2,0,7,6] +; CHECK-NEXT: vpermi2pd %ymm2, %ymm0, %ymm3 +; CHECK-NEXT: movb $1, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vblendmpd %ymm3, %ymm1, %ymm0 {%k1} +; CHECK-NEXT: retq + %shuf = shufflevector <8 x double> %vec, <8 x double> undef, <4 x i32> + %res = select <4 x i1> , <4 x double> %shuf, <4 x double> %vec2 + ret <4 x double> %res +} + +define <4 x double> @test_masked_z_8xdouble_to_4xdouble_perm_mask1(<8 x double> %vec) { +; CHECK-LABEL: test_masked_z_8xdouble_to_4xdouble_perm_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: vextractf64x4 $1, %zmm0, %ymm2 +; CHECK-NEXT: vmovapd {{.*#+}} ymm1 = [2,0,7,6] +; CHECK-NEXT: movb $1, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermi2pd %ymm2, %ymm0, %ymm1 {%k1} {z} +; CHECK-NEXT: vmovapd %ymm1, %ymm0 +; CHECK-NEXT: retq + %shuf = shufflevector <8 x double> %vec, <8 x double> undef, <4 x i32> + %res = select <4 x i1> , <4 x double> %shuf, <4 x double> zeroinitializer + ret <4 x double> %res +} +define <4 x double> @test_masked_8xdouble_to_4xdouble_perm_mask2(<8 x double> %vec, <4 x double> %vec2) { +; CHECK-LABEL: test_masked_8xdouble_to_4xdouble_perm_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: movb $2, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermpd {{.*#+}} ymm1 {%k1} = ymm0[2,3,2,0] +; CHECK-NEXT: vmovapd %ymm1, %ymm0 +; CHECK-NEXT: retq + %shuf = shufflevector <8 x double> %vec, <8 x double> undef, <4 x i32> + %res = select <4 x i1> , <4 x double> %shuf, <4 x double> %vec2 + ret <4 x double> %res +} + +define <4 x double> @test_masked_z_8xdouble_to_4xdouble_perm_mask2(<8 x double> %vec) { +; CHECK-LABEL: test_masked_z_8xdouble_to_4xdouble_perm_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: movb $2, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermpd {{.*#+}} ymm0 {%k1} {z} = ymm0[2,3,2,0] +; CHECK-NEXT: retq + %shuf = shufflevector <8 x double> %vec, <8 x double> undef, <4 x i32> + %res = select <4 x i1> , <4 x double> %shuf, <4 x double> zeroinitializer + ret <4 x double> %res +} +define <4 x double> @test_8xdouble_to_4xdouble_perm_mask3(<8 x double> %vec) { +; CHECK-LABEL: test_8xdouble_to_4xdouble_perm_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: vextractf64x4 $1, %zmm0, %ymm2 +; CHECK-NEXT: vmovapd {{.*#+}} ymm1 = [0,2,1,4] +; CHECK-NEXT: vpermi2pd %ymm2, %ymm0, %ymm1 +; CHECK-NEXT: vmovapd %ymm1, %ymm0 +; CHECK-NEXT: retq + %res = shufflevector <8 x double> %vec, <8 x double> undef, <4 x i32> + ret <4 x double> %res +} +define <4 x double> @test_masked_8xdouble_to_4xdouble_perm_mask3(<8 x double> %vec, <4 x double> %vec2) { +; CHECK-LABEL: test_masked_8xdouble_to_4xdouble_perm_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: vextractf64x4 $1, %zmm0, %ymm2 +; CHECK-NEXT: vmovapd {{.*#+}} ymm3 = [0,2,1,4] +; CHECK-NEXT: vpermi2pd %ymm2, %ymm0, %ymm3 +; CHECK-NEXT: movb $13, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vblendmpd %ymm3, %ymm1, %ymm0 {%k1} +; CHECK-NEXT: retq + %shuf = shufflevector <8 x double> %vec, <8 x double> undef, <4 x i32> + %res = select <4 x i1> , <4 x double> %shuf, <4 x double> %vec2 + ret <4 x double> %res +} + +define <4 x double> @test_masked_z_8xdouble_to_4xdouble_perm_mask3(<8 x double> %vec) { +; CHECK-LABEL: test_masked_z_8xdouble_to_4xdouble_perm_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: vextractf64x4 $1, %zmm0, %ymm2 +; CHECK-NEXT: vmovapd {{.*#+}} ymm1 = [0,2,1,4] +; CHECK-NEXT: movb $13, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermi2pd %ymm2, %ymm0, %ymm1 {%k1} {z} +; CHECK-NEXT: vmovapd %ymm1, %ymm0 +; CHECK-NEXT: retq + %shuf = shufflevector <8 x double> %vec, <8 x double> undef, <4 x i32> + %res = select <4 x i1> , <4 x double> %shuf, <4 x double> zeroinitializer + ret <4 x double> %res +} +define <4 x double> @test_masked_8xdouble_to_4xdouble_perm_mask4(<8 x double> %vec, <4 x double> %vec2) { +; CHECK-LABEL: test_masked_8xdouble_to_4xdouble_perm_mask4: +; CHECK: # BB#0: +; CHECK-NEXT: vextractf64x4 $1, %zmm0, %ymm2 +; CHECK-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm0[1],xmm2[1] +; CHECK-NEXT: movb $7, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermpd {{.*#+}} ymm1 {%k1} = ymm0[0,0,1,1] +; CHECK-NEXT: vmovapd %ymm1, %ymm0 +; CHECK-NEXT: retq + %shuf = shufflevector <8 x double> %vec, <8 x double> undef, <4 x i32> + %res = select <4 x i1> , <4 x double> %shuf, <4 x double> %vec2 + ret <4 x double> %res +} + +define <4 x double> @test_masked_z_8xdouble_to_4xdouble_perm_mask4(<8 x double> %vec) { +; CHECK-LABEL: test_masked_z_8xdouble_to_4xdouble_perm_mask4: +; CHECK: # BB#0: +; CHECK-NEXT: vextractf64x4 $1, %zmm0, %ymm1 +; CHECK-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm0[1],xmm1[1] +; CHECK-NEXT: movb $7, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermpd {{.*#+}} ymm0 {%k1} {z} = ymm0[0,0,1,1] +; CHECK-NEXT: retq + %shuf = shufflevector <8 x double> %vec, <8 x double> undef, <4 x i32> + %res = select <4 x i1> , <4 x double> %shuf, <4 x double> zeroinitializer + ret <4 x double> %res +} +define <4 x double> @test_masked_8xdouble_to_4xdouble_perm_mask5(<8 x double> %vec, <4 x double> %vec2) { +; CHECK-LABEL: test_masked_8xdouble_to_4xdouble_perm_mask5: +; CHECK: # BB#0: +; CHECK-NEXT: vextractf64x4 $1, %zmm0, %ymm2 +; CHECK-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm0[0],ymm2[0],ymm0[2],ymm2[2] +; CHECK-NEXT: movb $12, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermpd {{.*#+}} ymm1 {%k1} = ymm0[2,3,2,2] +; CHECK-NEXT: vmovapd %ymm1, %ymm0 +; CHECK-NEXT: retq + %shuf = shufflevector <8 x double> %vec, <8 x double> undef, <4 x i32> + %res = select <4 x i1> , <4 x double> %shuf, <4 x double> %vec2 + ret <4 x double> %res +} + +define <4 x double> @test_masked_z_8xdouble_to_4xdouble_perm_mask5(<8 x double> %vec) { +; CHECK-LABEL: test_masked_z_8xdouble_to_4xdouble_perm_mask5: +; CHECK: # BB#0: +; CHECK-NEXT: vextractf64x4 $1, %zmm0, %ymm1 +; CHECK-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[2],ymm1[2] +; CHECK-NEXT: movb $12, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermpd {{.*#+}} ymm0 {%k1} {z} = ymm0[2,3,2,2] +; CHECK-NEXT: retq + %shuf = shufflevector <8 x double> %vec, <8 x double> undef, <4 x i32> + %res = select <4 x i1> , <4 x double> %shuf, <4 x double> zeroinitializer + ret <4 x double> %res +} +define <4 x double> @test_8xdouble_to_4xdouble_perm_mask6(<8 x double> %vec) { +; CHECK-LABEL: test_8xdouble_to_4xdouble_perm_mask6: +; CHECK: # BB#0: +; CHECK-NEXT: vextractf64x4 $1, %zmm0, %ymm1 +; CHECK-NEXT: vshufpd {{.*#+}} ymm0 = ymm1[1],ymm0[0],ymm1[3],ymm0[2] +; CHECK-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,1,2,1] +; CHECK-NEXT: retq + %res = shufflevector <8 x double> %vec, <8 x double> undef, <4 x i32> + ret <4 x double> %res +} +define <4 x double> @test_masked_8xdouble_to_4xdouble_perm_mask6(<8 x double> %vec, <4 x double> %vec2) { +; CHECK-LABEL: test_masked_8xdouble_to_4xdouble_perm_mask6: +; CHECK: # BB#0: +; CHECK-NEXT: vextractf64x4 $1, %zmm0, %ymm2 +; CHECK-NEXT: vshufpd {{.*#+}} ymm0 = ymm2[1],ymm0[0],ymm2[3],ymm0[2] +; CHECK-NEXT: movb $10, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermpd {{.*#+}} ymm1 {%k1} = ymm0[0,1,2,1] +; CHECK-NEXT: vmovapd %ymm1, %ymm0 +; CHECK-NEXT: retq + %shuf = shufflevector <8 x double> %vec, <8 x double> undef, <4 x i32> + %res = select <4 x i1> , <4 x double> %shuf, <4 x double> %vec2 + ret <4 x double> %res +} + +define <4 x double> @test_masked_z_8xdouble_to_4xdouble_perm_mask6(<8 x double> %vec) { +; CHECK-LABEL: test_masked_z_8xdouble_to_4xdouble_perm_mask6: +; CHECK: # BB#0: +; CHECK-NEXT: vextractf64x4 $1, %zmm0, %ymm1 +; CHECK-NEXT: vshufpd {{.*#+}} ymm0 = ymm1[1],ymm0[0],ymm1[3],ymm0[2] +; CHECK-NEXT: movb $10, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermpd {{.*#+}} ymm0 {%k1} {z} = ymm0[0,1,2,1] +; CHECK-NEXT: retq + %shuf = shufflevector <8 x double> %vec, <8 x double> undef, <4 x i32> + %res = select <4 x i1> , <4 x double> %shuf, <4 x double> zeroinitializer + ret <4 x double> %res +} +define <4 x double> @test_masked_8xdouble_to_4xdouble_perm_mask7(<8 x double> %vec, <4 x double> %vec2) { +; CHECK-LABEL: test_masked_8xdouble_to_4xdouble_perm_mask7: +; CHECK: # BB#0: +; CHECK-NEXT: vextractf64x4 $1, %zmm0, %ymm2 +; CHECK-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0],ymm2[1,2],ymm0[3] +; CHECK-NEXT: movb $5, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermpd {{.*#+}} ymm1 {%k1} = ymm0[3,1,0,2] +; CHECK-NEXT: vmovapd %ymm1, %ymm0 +; CHECK-NEXT: retq + %shuf = shufflevector <8 x double> %vec, <8 x double> undef, <4 x i32> + %res = select <4 x i1> , <4 x double> %shuf, <4 x double> %vec2 + ret <4 x double> %res +} + +define <4 x double> @test_masked_z_8xdouble_to_4xdouble_perm_mask7(<8 x double> %vec) { +; CHECK-LABEL: test_masked_z_8xdouble_to_4xdouble_perm_mask7: +; CHECK: # BB#0: +; CHECK-NEXT: vextractf64x4 $1, %zmm0, %ymm1 +; CHECK-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0],ymm1[1,2],ymm0[3] +; CHECK-NEXT: movb $5, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermpd {{.*#+}} ymm0 {%k1} {z} = ymm0[3,1,0,2] +; CHECK-NEXT: retq + %shuf = shufflevector <8 x double> %vec, <8 x double> undef, <4 x i32> + %res = select <4 x i1> , <4 x double> %shuf, <4 x double> zeroinitializer + ret <4 x double> %res +} +define <2 x double> @test_8xdouble_to_2xdouble_perm_mask0(<8 x double> %vec) { +; CHECK-LABEL: test_8xdouble_to_2xdouble_perm_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: vextractf64x4 $1, %zmm0, %ymm1 +; CHECK-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[2],ymm1[2] +; CHECK-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,3,2,3] +; CHECK-NEXT: # kill: %XMM0 %XMM0 %YMM0 +; CHECK-NEXT: vzeroupper +; CHECK-NEXT: retq + %res = shufflevector <8 x double> %vec, <8 x double> undef, <2 x i32> + ret <2 x double> %res +} +define <2 x double> @test_masked_8xdouble_to_2xdouble_perm_mask0(<8 x double> %vec, <2 x double> %vec2) { +; CHECK-LABEL: test_masked_8xdouble_to_2xdouble_perm_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: vextractf64x4 $1, %zmm0, %ymm2 +; CHECK-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm0[0],ymm2[0],ymm0[2],ymm2[2] +; CHECK-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,3,2,3] +; CHECK-NEXT: movb $2, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vblendmpd %xmm0, %xmm1, %xmm0 {%k1} +; CHECK-NEXT: vzeroupper +; CHECK-NEXT: retq + %shuf = shufflevector <8 x double> %vec, <8 x double> undef, <2 x i32> + %res = select <2 x i1> , <2 x double> %shuf, <2 x double> %vec2 + ret <2 x double> %res +} + +define <2 x double> @test_masked_z_8xdouble_to_2xdouble_perm_mask0(<8 x double> %vec) { +; CHECK-LABEL: test_masked_z_8xdouble_to_2xdouble_perm_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: vextractf64x4 $1, %zmm0, %ymm1 +; CHECK-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[2],ymm1[2] +; CHECK-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,3,2,3] +; CHECK-NEXT: movb $2, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vmovapd %xmm0, %xmm0 {%k1} {z} +; CHECK-NEXT: vzeroupper +; CHECK-NEXT: retq + %shuf = shufflevector <8 x double> %vec, <8 x double> undef, <2 x i32> + %res = select <2 x i1> , <2 x double> %shuf, <2 x double> zeroinitializer + ret <2 x double> %res +} +define <2 x double> @test_masked_8xdouble_to_2xdouble_perm_mask1(<8 x double> %vec, <2 x double> %vec2) { +; CHECK-LABEL: test_masked_8xdouble_to_2xdouble_perm_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: vextractf64x4 $1, %zmm0, %ymm2 +; CHECK-NEXT: vunpckhpd {{.*#+}} ymm0 = ymm0[1],ymm2[1],ymm0[3],ymm2[3] +; CHECK-NEXT: movb $1, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vextractf64x2 $1, %ymm0, %xmm1 {%k1} +; CHECK-NEXT: vmovapd %xmm1, %xmm0 +; CHECK-NEXT: vzeroupper +; CHECK-NEXT: retq + %shuf = shufflevector <8 x double> %vec, <8 x double> undef, <2 x i32> + %res = select <2 x i1> , <2 x double> %shuf, <2 x double> %vec2 + ret <2 x double> %res +} + +define <2 x double> @test_masked_z_8xdouble_to_2xdouble_perm_mask1(<8 x double> %vec) { +; CHECK-LABEL: test_masked_z_8xdouble_to_2xdouble_perm_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: vextractf64x4 $1, %zmm0, %ymm1 +; CHECK-NEXT: vunpckhpd {{.*#+}} ymm0 = ymm0[1],ymm1[1],ymm0[3],ymm1[3] +; CHECK-NEXT: movb $1, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vextractf64x2 $1, %ymm0, %xmm0 {%k1} {z} +; CHECK-NEXT: vzeroupper +; CHECK-NEXT: retq + %shuf = shufflevector <8 x double> %vec, <8 x double> undef, <2 x i32> + %res = select <2 x i1> , <2 x double> %shuf, <2 x double> zeroinitializer + ret <2 x double> %res +} +define <4 x double> @test_8xdouble_to_4xdouble_perm_mem_mask0(<8 x double>* %vp) { +; CHECK-LABEL: test_8xdouble_to_4xdouble_perm_mem_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: vmovapd (%rdi), %zmm1 +; CHECK-NEXT: vextractf64x4 $1, %zmm1, %ymm2 +; CHECK-NEXT: vmovapd {{.*#+}} ymm0 = [1,6,7,2] +; CHECK-NEXT: vpermi2pd %ymm2, %ymm1, %ymm0 +; CHECK-NEXT: retq + %vec = load <8 x double>, <8 x double>* %vp + %res = shufflevector <8 x double> %vec, <8 x double> undef, <4 x i32> + ret <4 x double> %res +} +define <4 x double> @test_masked_8xdouble_to_4xdouble_perm_mem_mask0(<8 x double>* %vp, <4 x double> %vec2) { +; CHECK-LABEL: test_masked_8xdouble_to_4xdouble_perm_mem_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: vmovapd (%rdi), %zmm1 +; CHECK-NEXT: vextractf64x4 $1, %zmm1, %ymm2 +; CHECK-NEXT: vmovapd {{.*#+}} ymm3 = [1,6,7,2] +; CHECK-NEXT: vpermi2pd %ymm2, %ymm1, %ymm3 +; CHECK-NEXT: movb $7, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vmovapd %ymm3, %ymm0 {%k1} +; CHECK-NEXT: retq + %vec = load <8 x double>, <8 x double>* %vp + %shuf = shufflevector <8 x double> %vec, <8 x double> undef, <4 x i32> + %res = select <4 x i1> , <4 x double> %shuf, <4 x double> %vec2 + ret <4 x double> %res +} + +define <4 x double> @test_masked_z_8xdouble_to_4xdouble_perm_mem_mask0(<8 x double>* %vp) { +; CHECK-LABEL: test_masked_z_8xdouble_to_4xdouble_perm_mem_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: vmovapd (%rdi), %zmm1 +; CHECK-NEXT: vextractf64x4 $1, %zmm1, %ymm2 +; CHECK-NEXT: vmovapd {{.*#+}} ymm0 = [1,6,7,2] +; CHECK-NEXT: movb $7, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermi2pd %ymm2, %ymm1, %ymm0 {%k1} {z} +; CHECK-NEXT: retq + %vec = load <8 x double>, <8 x double>* %vp + %shuf = shufflevector <8 x double> %vec, <8 x double> undef, <4 x i32> + %res = select <4 x i1> , <4 x double> %shuf, <4 x double> zeroinitializer + ret <4 x double> %res +} + +define <4 x double> @test_masked_8xdouble_to_4xdouble_perm_mem_mask1(<8 x double>* %vp, <4 x double> %vec2) { +; CHECK-LABEL: test_masked_8xdouble_to_4xdouble_perm_mem_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: vmovapd (%rdi), %zmm1 +; CHECK-NEXT: vextractf64x4 $1, %zmm1, %ymm2 +; CHECK-NEXT: vblendpd {{.*#+}} ymm1 = ymm2[0],ymm1[1,2,3] +; CHECK-NEXT: movb $2, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermpd {{.*#+}} ymm0 {%k1} = ymm1[3,0,2,0] +; CHECK-NEXT: retq + %vec = load <8 x double>, <8 x double>* %vp + %shuf = shufflevector <8 x double> %vec, <8 x double> undef, <4 x i32> + %res = select <4 x i1> , <4 x double> %shuf, <4 x double> %vec2 + ret <4 x double> %res +} + +define <4 x double> @test_masked_z_8xdouble_to_4xdouble_perm_mem_mask1(<8 x double>* %vp) { +; CHECK-LABEL: test_masked_z_8xdouble_to_4xdouble_perm_mem_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: vmovapd (%rdi), %zmm0 +; CHECK-NEXT: vextractf64x4 $1, %zmm0, %ymm1 +; CHECK-NEXT: vblendpd {{.*#+}} ymm0 = ymm1[0],ymm0[1,2,3] +; CHECK-NEXT: movb $2, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermpd {{.*#+}} ymm0 {%k1} {z} = ymm0[3,0,2,0] +; CHECK-NEXT: retq + %vec = load <8 x double>, <8 x double>* %vp + %shuf = shufflevector <8 x double> %vec, <8 x double> undef, <4 x i32> + %res = select <4 x i1> , <4 x double> %shuf, <4 x double> zeroinitializer + ret <4 x double> %res +} + +define <4 x double> @test_masked_8xdouble_to_4xdouble_perm_mem_mask2(<8 x double>* %vp, <4 x double> %vec2) { +; CHECK-LABEL: test_masked_8xdouble_to_4xdouble_perm_mem_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: vmovapd (%rdi), %zmm1 +; CHECK-NEXT: vextractf64x4 $1, %zmm1, %ymm2 +; CHECK-NEXT: vblendpd {{.*#+}} ymm1 = ymm2[0],ymm1[1,2,3] +; CHECK-NEXT: movb $6, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermpd {{.*#+}} ymm0 {%k1} = ymm1[1,2,3,0] +; CHECK-NEXT: retq + %vec = load <8 x double>, <8 x double>* %vp + %shuf = shufflevector <8 x double> %vec, <8 x double> undef, <4 x i32> + %res = select <4 x i1> , <4 x double> %shuf, <4 x double> %vec2 + ret <4 x double> %res +} + +define <4 x double> @test_masked_z_8xdouble_to_4xdouble_perm_mem_mask2(<8 x double>* %vp) { +; CHECK-LABEL: test_masked_z_8xdouble_to_4xdouble_perm_mem_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: vmovapd (%rdi), %zmm0 +; CHECK-NEXT: vextractf64x4 $1, %zmm0, %ymm1 +; CHECK-NEXT: vblendpd {{.*#+}} ymm0 = ymm1[0],ymm0[1,2,3] +; CHECK-NEXT: movb $6, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermpd {{.*#+}} ymm0 {%k1} {z} = ymm0[1,2,3,0] +; CHECK-NEXT: retq + %vec = load <8 x double>, <8 x double>* %vp + %shuf = shufflevector <8 x double> %vec, <8 x double> undef, <4 x i32> + %res = select <4 x i1> , <4 x double> %shuf, <4 x double> zeroinitializer + ret <4 x double> %res +} + +define <4 x double> @test_8xdouble_to_4xdouble_perm_mem_mask3(<8 x double>* %vp) { +; CHECK-LABEL: test_8xdouble_to_4xdouble_perm_mem_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: vmovapd (%rdi), %zmm0 +; CHECK-NEXT: vextractf64x4 $1, %zmm0, %ymm1 +; CHECK-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,2,1,0] +; CHECK-NEXT: vblendpd {{.*#+}} ymm0 = ymm1[0],ymm0[1,2,3] +; CHECK-NEXT: retq + %vec = load <8 x double>, <8 x double>* %vp + %res = shufflevector <8 x double> %vec, <8 x double> undef, <4 x i32> + ret <4 x double> %res +} +define <4 x double> @test_masked_8xdouble_to_4xdouble_perm_mem_mask3(<8 x double>* %vp, <4 x double> %vec2) { +; CHECK-LABEL: test_masked_8xdouble_to_4xdouble_perm_mem_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: vmovapd (%rdi), %zmm1 +; CHECK-NEXT: vextractf64x4 $1, %zmm1, %ymm2 +; CHECK-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,2,1,0] +; CHECK-NEXT: vblendpd {{.*#+}} ymm1 = ymm2[0],ymm1[1,2,3] +; CHECK-NEXT: movb $9, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vmovapd %ymm1, %ymm0 {%k1} +; CHECK-NEXT: retq + %vec = load <8 x double>, <8 x double>* %vp + %shuf = shufflevector <8 x double> %vec, <8 x double> undef, <4 x i32> + %res = select <4 x i1> , <4 x double> %shuf, <4 x double> %vec2 + ret <4 x double> %res +} + +define <4 x double> @test_masked_z_8xdouble_to_4xdouble_perm_mem_mask3(<8 x double>* %vp) { +; CHECK-LABEL: test_masked_z_8xdouble_to_4xdouble_perm_mem_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: vmovapd (%rdi), %zmm0 +; CHECK-NEXT: vextractf64x4 $1, %zmm0, %ymm1 +; CHECK-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,2,1,0] +; CHECK-NEXT: vblendpd {{.*#+}} ymm0 = ymm1[0],ymm0[1,2,3] +; CHECK-NEXT: movb $9, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vmovapd %ymm0, %ymm0 {%k1} {z} +; CHECK-NEXT: retq + %vec = load <8 x double>, <8 x double>* %vp + %shuf = shufflevector <8 x double> %vec, <8 x double> undef, <4 x i32> + %res = select <4 x i1> , <4 x double> %shuf, <4 x double> zeroinitializer + ret <4 x double> %res +} + +define <4 x double> @test_masked_8xdouble_to_4xdouble_perm_mem_mask4(<8 x double>* %vp, <4 x double> %vec2) { +; CHECK-LABEL: test_masked_8xdouble_to_4xdouble_perm_mem_mask4: +; CHECK: # BB#0: +; CHECK-NEXT: vmovapd (%rdi), %zmm1 +; CHECK-NEXT: vextractf64x4 $1, %zmm1, %ymm2 +; CHECK-NEXT: vmovapd {{.*#+}} ymm3 = [2,4,1,5] +; CHECK-NEXT: vpermi2pd %ymm1, %ymm2, %ymm3 +; CHECK-NEXT: movb $11, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vmovapd %ymm3, %ymm0 {%k1} +; CHECK-NEXT: retq + %vec = load <8 x double>, <8 x double>* %vp + %shuf = shufflevector <8 x double> %vec, <8 x double> undef, <4 x i32> + %res = select <4 x i1> , <4 x double> %shuf, <4 x double> %vec2 + ret <4 x double> %res +} + +define <4 x double> @test_masked_z_8xdouble_to_4xdouble_perm_mem_mask4(<8 x double>* %vp) { +; CHECK-LABEL: test_masked_z_8xdouble_to_4xdouble_perm_mem_mask4: +; CHECK: # BB#0: +; CHECK-NEXT: vmovapd (%rdi), %zmm1 +; CHECK-NEXT: vextractf64x4 $1, %zmm1, %ymm2 +; CHECK-NEXT: vmovapd {{.*#+}} ymm0 = [2,4,1,5] +; CHECK-NEXT: movb $11, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermi2pd %ymm1, %ymm2, %ymm0 {%k1} {z} +; CHECK-NEXT: retq + %vec = load <8 x double>, <8 x double>* %vp + %shuf = shufflevector <8 x double> %vec, <8 x double> undef, <4 x i32> + %res = select <4 x i1> , <4 x double> %shuf, <4 x double> zeroinitializer + ret <4 x double> %res +} + +define <4 x double> @test_masked_8xdouble_to_4xdouble_perm_mem_mask5(<8 x double>* %vp, <4 x double> %vec2) { +; CHECK-LABEL: test_masked_8xdouble_to_4xdouble_perm_mem_mask5: +; CHECK: # BB#0: +; CHECK-NEXT: vmovapd (%rdi), %zmm1 +; CHECK-NEXT: vextractf64x4 $1, %zmm1, %ymm2 +; CHECK-NEXT: vblendpd {{.*#+}} ymm1 = ymm1[0],ymm2[1],ymm1[2,3] +; CHECK-NEXT: movb $1, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermpd {{.*#+}} ymm0 {%k1} = ymm1[2,1,1,1] +; CHECK-NEXT: retq + %vec = load <8 x double>, <8 x double>* %vp + %shuf = shufflevector <8 x double> %vec, <8 x double> undef, <4 x i32> + %res = select <4 x i1> , <4 x double> %shuf, <4 x double> %vec2 + ret <4 x double> %res +} + +define <4 x double> @test_masked_z_8xdouble_to_4xdouble_perm_mem_mask5(<8 x double>* %vp) { +; CHECK-LABEL: test_masked_z_8xdouble_to_4xdouble_perm_mem_mask5: +; CHECK: # BB#0: +; CHECK-NEXT: vmovapd (%rdi), %zmm0 +; CHECK-NEXT: vextractf64x4 $1, %zmm0, %ymm1 +; CHECK-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2,3] +; CHECK-NEXT: movb $1, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermpd {{.*#+}} ymm0 {%k1} {z} = ymm0[2,1,1,1] +; CHECK-NEXT: retq + %vec = load <8 x double>, <8 x double>* %vp + %shuf = shufflevector <8 x double> %vec, <8 x double> undef, <4 x i32> + %res = select <4 x i1> , <4 x double> %shuf, <4 x double> zeroinitializer + ret <4 x double> %res +} + +define <4 x double> @test_8xdouble_to_4xdouble_perm_mem_mask6(<8 x double>* %vp) { +; CHECK-LABEL: test_8xdouble_to_4xdouble_perm_mem_mask6: +; CHECK: # BB#0: +; CHECK-NEXT: vmovapd (%rdi), %zmm0 +; CHECK-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm1 +; CHECK-NEXT: vextractf64x4 $1, %zmm0, %ymm0 +; CHECK-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,2,2,1] +; CHECK-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2],ymm0[3] +; CHECK-NEXT: retq + %vec = load <8 x double>, <8 x double>* %vp + %res = shufflevector <8 x double> %vec, <8 x double> undef, <4 x i32> + ret <4 x double> %res +} +define <4 x double> @test_masked_8xdouble_to_4xdouble_perm_mem_mask6(<8 x double>* %vp, <4 x double> %vec2) { +; CHECK-LABEL: test_masked_8xdouble_to_4xdouble_perm_mem_mask6: +; CHECK: # BB#0: +; CHECK-NEXT: vmovapd (%rdi), %zmm1 +; CHECK-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm2 +; CHECK-NEXT: vextractf64x4 $1, %zmm1, %ymm1 +; CHECK-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,2,2,1] +; CHECK-NEXT: vblendpd {{.*#+}} ymm1 = ymm1[0,1],ymm2[2],ymm1[3] +; CHECK-NEXT: movb $13, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vmovapd %ymm1, %ymm0 {%k1} +; CHECK-NEXT: retq + %vec = load <8 x double>, <8 x double>* %vp + %shuf = shufflevector <8 x double> %vec, <8 x double> undef, <4 x i32> + %res = select <4 x i1> , <4 x double> %shuf, <4 x double> %vec2 + ret <4 x double> %res +} + +define <4 x double> @test_masked_z_8xdouble_to_4xdouble_perm_mem_mask6(<8 x double>* %vp) { +; CHECK-LABEL: test_masked_z_8xdouble_to_4xdouble_perm_mem_mask6: +; CHECK: # BB#0: +; CHECK-NEXT: vmovapd (%rdi), %zmm0 +; CHECK-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm1 +; CHECK-NEXT: vextractf64x4 $1, %zmm0, %ymm0 +; CHECK-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,2,2,1] +; CHECK-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2],ymm0[3] +; CHECK-NEXT: movb $13, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vmovapd %ymm0, %ymm0 {%k1} {z} +; CHECK-NEXT: retq + %vec = load <8 x double>, <8 x double>* %vp + %shuf = shufflevector <8 x double> %vec, <8 x double> undef, <4 x i32> + %res = select <4 x i1> , <4 x double> %shuf, <4 x double> zeroinitializer + ret <4 x double> %res +} + +define <4 x double> @test_masked_8xdouble_to_4xdouble_perm_mem_mask7(<8 x double>* %vp, <4 x double> %vec2) { +; CHECK-LABEL: test_masked_8xdouble_to_4xdouble_perm_mem_mask7: +; CHECK: # BB#0: +; CHECK-NEXT: vmovapd (%rdi), %zmm1 +; CHECK-NEXT: vextractf64x4 $1, %zmm1, %ymm2 +; CHECK-NEXT: vblendpd {{.*#+}} ymm1 = ymm1[0],ymm2[1],ymm1[2,3] +; CHECK-NEXT: movb $4, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermpd {{.*#+}} ymm0 {%k1} = ymm1[0,1,2,1] +; CHECK-NEXT: retq + %vec = load <8 x double>, <8 x double>* %vp + %shuf = shufflevector <8 x double> %vec, <8 x double> undef, <4 x i32> + %res = select <4 x i1> , <4 x double> %shuf, <4 x double> %vec2 + ret <4 x double> %res +} + +define <4 x double> @test_masked_z_8xdouble_to_4xdouble_perm_mem_mask7(<8 x double>* %vp) { +; CHECK-LABEL: test_masked_z_8xdouble_to_4xdouble_perm_mem_mask7: +; CHECK: # BB#0: +; CHECK-NEXT: vmovapd (%rdi), %zmm0 +; CHECK-NEXT: vextractf64x4 $1, %zmm0, %ymm1 +; CHECK-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2,3] +; CHECK-NEXT: movb $4, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermpd {{.*#+}} ymm0 {%k1} {z} = ymm0[0,1,2,1] +; CHECK-NEXT: retq + %vec = load <8 x double>, <8 x double>* %vp + %shuf = shufflevector <8 x double> %vec, <8 x double> undef, <4 x i32> + %res = select <4 x i1> , <4 x double> %shuf, <4 x double> zeroinitializer + ret <4 x double> %res +} + +define <2 x double> @test_8xdouble_to_2xdouble_perm_mem_mask0(<8 x double>* %vp) { +; CHECK-LABEL: test_8xdouble_to_2xdouble_perm_mem_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: vmovapd (%rdi), %zmm0 +; CHECK-NEXT: vextractf64x4 $1, %zmm0, %ymm1 +; CHECK-NEXT: vshufpd {{.*#+}} ymm0 = ymm0[1],ymm1[0],ymm0[3],ymm1[2] +; CHECK-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,3,2,3] +; CHECK-NEXT: # kill: %XMM0 %XMM0 %YMM0 +; CHECK-NEXT: vzeroupper +; CHECK-NEXT: retq + %vec = load <8 x double>, <8 x double>* %vp + %res = shufflevector <8 x double> %vec, <8 x double> undef, <2 x i32> + ret <2 x double> %res +} +define <2 x double> @test_masked_8xdouble_to_2xdouble_perm_mem_mask0(<8 x double>* %vp, <2 x double> %vec2) { +; CHECK-LABEL: test_masked_8xdouble_to_2xdouble_perm_mem_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: vmovapd (%rdi), %zmm1 +; CHECK-NEXT: vextractf64x4 $1, %zmm1, %ymm2 +; CHECK-NEXT: vshufpd {{.*#+}} ymm1 = ymm1[1],ymm2[0],ymm1[3],ymm2[2] +; CHECK-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,3,2,3] +; CHECK-NEXT: movb $2, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vmovapd %xmm1, %xmm0 {%k1} +; CHECK-NEXT: vzeroupper +; CHECK-NEXT: retq + %vec = load <8 x double>, <8 x double>* %vp + %shuf = shufflevector <8 x double> %vec, <8 x double> undef, <2 x i32> + %res = select <2 x i1> , <2 x double> %shuf, <2 x double> %vec2 + ret <2 x double> %res +} + +define <2 x double> @test_masked_z_8xdouble_to_2xdouble_perm_mem_mask0(<8 x double>* %vp) { +; CHECK-LABEL: test_masked_z_8xdouble_to_2xdouble_perm_mem_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: vmovapd (%rdi), %zmm0 +; CHECK-NEXT: vextractf64x4 $1, %zmm0, %ymm1 +; CHECK-NEXT: vshufpd {{.*#+}} ymm0 = ymm0[1],ymm1[0],ymm0[3],ymm1[2] +; CHECK-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,3,2,3] +; CHECK-NEXT: movb $2, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vmovapd %xmm0, %xmm0 {%k1} {z} +; CHECK-NEXT: vzeroupper +; CHECK-NEXT: retq + %vec = load <8 x double>, <8 x double>* %vp + %shuf = shufflevector <8 x double> %vec, <8 x double> undef, <2 x i32> + %res = select <2 x i1> , <2 x double> %shuf, <2 x double> zeroinitializer + ret <2 x double> %res +} + +define <2 x double> @test_masked_8xdouble_to_2xdouble_perm_mem_mask1(<8 x double>* %vp, <2 x double> %vec2) { +; CHECK-LABEL: test_masked_8xdouble_to_2xdouble_perm_mem_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: vmovapd (%rdi), %zmm1 +; CHECK-NEXT: vextractf32x4 $2, %zmm1, %xmm2 +; CHECK-NEXT: movb $1, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vshufpd {{.*#+}} xmm0 {%k1} = xmm1[1],xmm2[0] +; CHECK-NEXT: vzeroupper +; CHECK-NEXT: retq + %vec = load <8 x double>, <8 x double>* %vp + %shuf = shufflevector <8 x double> %vec, <8 x double> undef, <2 x i32> + %res = select <2 x i1> , <2 x double> %shuf, <2 x double> %vec2 + ret <2 x double> %res +} + +define <2 x double> @test_masked_z_8xdouble_to_2xdouble_perm_mem_mask1(<8 x double>* %vp) { +; CHECK-LABEL: test_masked_z_8xdouble_to_2xdouble_perm_mem_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: vmovapd (%rdi), %zmm0 +; CHECK-NEXT: vextractf32x4 $2, %zmm0, %xmm1 +; CHECK-NEXT: movb $1, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vshufpd {{.*#+}} xmm0 {%k1} {z} = xmm0[1],xmm1[0] +; CHECK-NEXT: vzeroupper +; CHECK-NEXT: retq + %vec = load <8 x double>, <8 x double>* %vp + %shuf = shufflevector <8 x double> %vec, <8 x double> undef, <2 x i32> + %res = select <2 x i1> , <2 x double> %shuf, <2 x double> zeroinitializer + ret <2 x double> %res +} + diff --git a/llvm/test/CodeGen/X86/avx512-shuffles/permute.ll b/llvm/test/CodeGen/X86/avx512-shuffles/permute.ll new file mode 100644 index 000000000000..47725ca1630c --- /dev/null +++ b/llvm/test/CodeGen/X86/avx512-shuffles/permute.ll @@ -0,0 +1,2937 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mcpu=skx %s -o - | FileCheck %s + +define <16 x i16> @test_16xi16_perm_mask0(<16 x i16> %vec) { +; CHECK-LABEL: test_16xi16_perm_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: vmovdqa {{.*#+}} ymm1 = [8,6,12,4,7,9,14,8,4,12,9,4,14,15,12,14] +; CHECK-NEXT: vpermw %ymm0, %ymm1, %ymm0 +; CHECK-NEXT: retq + %res = shufflevector <16 x i16> %vec, <16 x i16> undef, <16 x i32> + ret <16 x i16> %res +} +define <16 x i16> @test_masked_16xi16_perm_mask0(<16 x i16> %vec, <16 x i16> %vec2) { +; CHECK-LABEL: test_masked_16xi16_perm_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: vmovdqa {{.*#+}} ymm2 = [8,6,12,4,7,9,14,8,4,12,9,4,14,15,12,14] +; CHECK-NEXT: movw $-10197, %ax # imm = 0xD82B +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermw %ymm0, %ymm2, %ymm1 {%k1} +; CHECK-NEXT: vmovdqa %ymm1, %ymm0 +; CHECK-NEXT: retq + %shuf = shufflevector <16 x i16> %vec, <16 x i16> undef, <16 x i32> + %res = select <16 x i1> , <16 x i16> %shuf, <16 x i16> %vec2 + ret <16 x i16> %res +} + +define <16 x i16> @test_masked_z_16xi16_perm_mask0(<16 x i16> %vec) { +; CHECK-LABEL: test_masked_z_16xi16_perm_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: vmovdqa {{.*#+}} ymm1 = [8,6,12,4,7,9,14,8,4,12,9,4,14,15,12,14] +; CHECK-NEXT: movw $-10197, %ax # imm = 0xD82B +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermw %ymm0, %ymm1, %ymm0 {%k1} {z} +; CHECK-NEXT: retq + %shuf = shufflevector <16 x i16> %vec, <16 x i16> undef, <16 x i32> + %res = select <16 x i1> , <16 x i16> %shuf, <16 x i16> zeroinitializer + ret <16 x i16> %res +} +define <16 x i16> @test_masked_16xi16_perm_mask1(<16 x i16> %vec, <16 x i16> %vec2) { +; CHECK-LABEL: test_masked_16xi16_perm_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: vmovdqa {{.*#+}} ymm2 = [4,11,14,10,7,1,6,9,14,15,7,13,4,12,8,0] +; CHECK-NEXT: movw $-15864, %ax # imm = 0xC208 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermw %ymm0, %ymm2, %ymm1 {%k1} +; CHECK-NEXT: vmovdqa %ymm1, %ymm0 +; CHECK-NEXT: retq + %shuf = shufflevector <16 x i16> %vec, <16 x i16> undef, <16 x i32> + %res = select <16 x i1> , <16 x i16> %shuf, <16 x i16> %vec2 + ret <16 x i16> %res +} + +define <16 x i16> @test_masked_z_16xi16_perm_mask1(<16 x i16> %vec) { +; CHECK-LABEL: test_masked_z_16xi16_perm_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: vmovdqa {{.*#+}} ymm1 = [4,11,14,10,7,1,6,9,14,15,7,13,4,12,8,0] +; CHECK-NEXT: movw $-15864, %ax # imm = 0xC208 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermw %ymm0, %ymm1, %ymm0 {%k1} {z} +; CHECK-NEXT: retq + %shuf = shufflevector <16 x i16> %vec, <16 x i16> undef, <16 x i32> + %res = select <16 x i1> , <16 x i16> %shuf, <16 x i16> zeroinitializer + ret <16 x i16> %res +} +define <16 x i16> @test_masked_16xi16_perm_mask2(<16 x i16> %vec, <16 x i16> %vec2) { +; CHECK-LABEL: test_masked_16xi16_perm_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: vmovdqa {{.*#+}} ymm2 = [11,6,13,10,0,7,13,3,5,13,3,9,3,15,12,7] +; CHECK-NEXT: movw $27562, %ax # imm = 0x6BAA +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermw %ymm0, %ymm2, %ymm1 {%k1} +; CHECK-NEXT: vmovdqa %ymm1, %ymm0 +; CHECK-NEXT: retq + %shuf = shufflevector <16 x i16> %vec, <16 x i16> undef, <16 x i32> + %res = select <16 x i1> , <16 x i16> %shuf, <16 x i16> %vec2 + ret <16 x i16> %res +} + +define <16 x i16> @test_masked_z_16xi16_perm_mask2(<16 x i16> %vec) { +; CHECK-LABEL: test_masked_z_16xi16_perm_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: vmovdqa {{.*#+}} ymm1 = [11,6,13,10,0,7,13,3,5,13,3,9,3,15,12,7] +; CHECK-NEXT: movw $27562, %ax # imm = 0x6BAA +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermw %ymm0, %ymm1, %ymm0 {%k1} {z} +; CHECK-NEXT: retq + %shuf = shufflevector <16 x i16> %vec, <16 x i16> undef, <16 x i32> + %res = select <16 x i1> , <16 x i16> %shuf, <16 x i16> zeroinitializer + ret <16 x i16> %res +} +define <16 x i16> @test_16xi16_perm_mask3(<16 x i16> %vec) { +; CHECK-LABEL: test_16xi16_perm_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: vmovdqa {{.*#+}} ymm1 = [1,5,8,14,1,8,11,8,13,8,15,9,9,7,9,6] +; CHECK-NEXT: vpermw %ymm0, %ymm1, %ymm0 +; CHECK-NEXT: retq + %res = shufflevector <16 x i16> %vec, <16 x i16> undef, <16 x i32> + ret <16 x i16> %res +} +define <16 x i16> @test_masked_16xi16_perm_mask3(<16 x i16> %vec, <16 x i16> %vec2) { +; CHECK-LABEL: test_masked_16xi16_perm_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: vmovdqa {{.*#+}} ymm2 = [1,5,8,14,1,8,11,8,13,8,15,9,9,7,9,6] +; CHECK-NEXT: movw $16968, %ax # imm = 0x4248 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermw %ymm0, %ymm2, %ymm1 {%k1} +; CHECK-NEXT: vmovdqa %ymm1, %ymm0 +; CHECK-NEXT: retq + %shuf = shufflevector <16 x i16> %vec, <16 x i16> undef, <16 x i32> + %res = select <16 x i1> , <16 x i16> %shuf, <16 x i16> %vec2 + ret <16 x i16> %res +} + +define <16 x i16> @test_masked_z_16xi16_perm_mask3(<16 x i16> %vec) { +; CHECK-LABEL: test_masked_z_16xi16_perm_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: vmovdqa {{.*#+}} ymm1 = [1,5,8,14,1,8,11,8,13,8,15,9,9,7,9,6] +; CHECK-NEXT: movw $16968, %ax # imm = 0x4248 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermw %ymm0, %ymm1, %ymm0 {%k1} {z} +; CHECK-NEXT: retq + %shuf = shufflevector <16 x i16> %vec, <16 x i16> undef, <16 x i32> + %res = select <16 x i1> , <16 x i16> %shuf, <16 x i16> zeroinitializer + ret <16 x i16> %res +} +define <16 x i16> @test_16xi16_perm_mem_mask0(<16 x i16>* %vp) { +; CHECK-LABEL: test_16xi16_perm_mem_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: vmovdqa {{.*#+}} ymm0 = [9,10,7,1,12,14,14,13,14,14,8,6,11,4,12,13] +; CHECK-NEXT: vpermw (%rdi), %ymm0, %ymm0 +; CHECK-NEXT: retq + %vec = load <16 x i16>, <16 x i16>* %vp + %res = shufflevector <16 x i16> %vec, <16 x i16> undef, <16 x i32> + ret <16 x i16> %res +} +define <16 x i16> @test_masked_16xi16_perm_mem_mask0(<16 x i16>* %vp, <16 x i16> %vec2) { +; CHECK-LABEL: test_masked_16xi16_perm_mem_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: vmovdqa {{.*#+}} ymm1 = [9,10,7,1,12,14,14,13,14,14,8,6,11,4,12,13] +; CHECK-NEXT: movw $-27811, %ax # imm = 0x935D +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermw (%rdi), %ymm1, %ymm0 {%k1} +; CHECK-NEXT: retq + %vec = load <16 x i16>, <16 x i16>* %vp + %shuf = shufflevector <16 x i16> %vec, <16 x i16> undef, <16 x i32> + %res = select <16 x i1> , <16 x i16> %shuf, <16 x i16> %vec2 + ret <16 x i16> %res +} + +define <16 x i16> @test_masked_z_16xi16_perm_mem_mask0(<16 x i16>* %vp) { +; CHECK-LABEL: test_masked_z_16xi16_perm_mem_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: vmovdqa {{.*#+}} ymm0 = [9,10,7,1,12,14,14,13,14,14,8,6,11,4,12,13] +; CHECK-NEXT: movw $-27811, %ax # imm = 0x935D +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermw (%rdi), %ymm0, %ymm0 {%k1} {z} +; CHECK-NEXT: retq + %vec = load <16 x i16>, <16 x i16>* %vp + %shuf = shufflevector <16 x i16> %vec, <16 x i16> undef, <16 x i32> + %res = select <16 x i1> , <16 x i16> %shuf, <16 x i16> zeroinitializer + ret <16 x i16> %res +} + +define <16 x i16> @test_masked_16xi16_perm_mem_mask1(<16 x i16>* %vp, <16 x i16> %vec2) { +; CHECK-LABEL: test_masked_16xi16_perm_mem_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: vmovdqa {{.*#+}} ymm1 = [14,9,15,9,7,10,15,14,12,1,9,7,10,13,3,11] +; CHECK-NEXT: movw $19027, %ax # imm = 0x4A53 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermw (%rdi), %ymm1, %ymm0 {%k1} +; CHECK-NEXT: retq + %vec = load <16 x i16>, <16 x i16>* %vp + %shuf = shufflevector <16 x i16> %vec, <16 x i16> undef, <16 x i32> + %res = select <16 x i1> , <16 x i16> %shuf, <16 x i16> %vec2 + ret <16 x i16> %res +} + +define <16 x i16> @test_masked_z_16xi16_perm_mem_mask1(<16 x i16>* %vp) { +; CHECK-LABEL: test_masked_z_16xi16_perm_mem_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: vmovdqa {{.*#+}} ymm0 = [14,9,15,9,7,10,15,14,12,1,9,7,10,13,3,11] +; CHECK-NEXT: movw $19027, %ax # imm = 0x4A53 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermw (%rdi), %ymm0, %ymm0 {%k1} {z} +; CHECK-NEXT: retq + %vec = load <16 x i16>, <16 x i16>* %vp + %shuf = shufflevector <16 x i16> %vec, <16 x i16> undef, <16 x i32> + %res = select <16 x i1> , <16 x i16> %shuf, <16 x i16> zeroinitializer + ret <16 x i16> %res +} + +define <16 x i16> @test_masked_16xi16_perm_mem_mask2(<16 x i16>* %vp, <16 x i16> %vec2) { +; CHECK-LABEL: test_masked_16xi16_perm_mem_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: vmovdqa {{.*#+}} ymm1 = [1,3,12,5,13,1,2,11,0,9,14,8,10,0,10,9] +; CHECK-NEXT: movw $12412, %ax # imm = 0x307C +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermw (%rdi), %ymm1, %ymm0 {%k1} +; CHECK-NEXT: retq + %vec = load <16 x i16>, <16 x i16>* %vp + %shuf = shufflevector <16 x i16> %vec, <16 x i16> undef, <16 x i32> + %res = select <16 x i1> , <16 x i16> %shuf, <16 x i16> %vec2 + ret <16 x i16> %res +} + +define <16 x i16> @test_masked_z_16xi16_perm_mem_mask2(<16 x i16>* %vp) { +; CHECK-LABEL: test_masked_z_16xi16_perm_mem_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: vmovdqa {{.*#+}} ymm0 = [1,3,12,5,13,1,2,11,0,9,14,8,10,0,10,9] +; CHECK-NEXT: movw $12412, %ax # imm = 0x307C +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermw (%rdi), %ymm0, %ymm0 {%k1} {z} +; CHECK-NEXT: retq + %vec = load <16 x i16>, <16 x i16>* %vp + %shuf = shufflevector <16 x i16> %vec, <16 x i16> undef, <16 x i32> + %res = select <16 x i1> , <16 x i16> %shuf, <16 x i16> zeroinitializer + ret <16 x i16> %res +} + +define <16 x i16> @test_16xi16_perm_mem_mask3(<16 x i16>* %vp) { +; CHECK-LABEL: test_16xi16_perm_mem_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: vmovdqa {{.*#+}} ymm0 = [9,6,5,15,0,0,15,2,1,3,12,14,0,6,1,4] +; CHECK-NEXT: vpermw (%rdi), %ymm0, %ymm0 +; CHECK-NEXT: retq + %vec = load <16 x i16>, <16 x i16>* %vp + %res = shufflevector <16 x i16> %vec, <16 x i16> undef, <16 x i32> + ret <16 x i16> %res +} +define <16 x i16> @test_masked_16xi16_perm_mem_mask3(<16 x i16>* %vp, <16 x i16> %vec2) { +; CHECK-LABEL: test_masked_16xi16_perm_mem_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: vmovdqa {{.*#+}} ymm1 = [9,6,5,15,0,0,15,2,1,3,12,14,0,6,1,4] +; CHECK-NEXT: movw $12238, %ax # imm = 0x2FCE +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermw (%rdi), %ymm1, %ymm0 {%k1} +; CHECK-NEXT: retq + %vec = load <16 x i16>, <16 x i16>* %vp + %shuf = shufflevector <16 x i16> %vec, <16 x i16> undef, <16 x i32> + %res = select <16 x i1> , <16 x i16> %shuf, <16 x i16> %vec2 + ret <16 x i16> %res +} + +define <16 x i16> @test_masked_z_16xi16_perm_mem_mask3(<16 x i16>* %vp) { +; CHECK-LABEL: test_masked_z_16xi16_perm_mem_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: vmovdqa {{.*#+}} ymm0 = [9,6,5,15,0,0,15,2,1,3,12,14,0,6,1,4] +; CHECK-NEXT: movw $12238, %ax # imm = 0x2FCE +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermw (%rdi), %ymm0, %ymm0 {%k1} {z} +; CHECK-NEXT: retq + %vec = load <16 x i16>, <16 x i16>* %vp + %shuf = shufflevector <16 x i16> %vec, <16 x i16> undef, <16 x i32> + %res = select <16 x i1> , <16 x i16> %shuf, <16 x i16> zeroinitializer + ret <16 x i16> %res +} + +define <32 x i16> @test_32xi16_perm_mask0(<32 x i16> %vec) { +; CHECK-LABEL: test_32xi16_perm_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: vmovdqa64 {{.*#+}} zmm1 = [16,1,3,31,6,11,23,26,29,5,21,30,1,21,27,10,8,19,14,5,15,13,18,16,9,11,26,8,17,0,23,10] +; CHECK-NEXT: vpermw %zmm0, %zmm1, %zmm0 +; CHECK-NEXT: retq + %res = shufflevector <32 x i16> %vec, <32 x i16> undef, <32 x i32> + ret <32 x i16> %res +} +define <32 x i16> @test_masked_32xi16_perm_mask0(<32 x i16> %vec, <32 x i16> %vec2) { +; CHECK-LABEL: test_masked_32xi16_perm_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: vmovdqa64 {{.*#+}} zmm2 = [16,1,3,31,6,11,23,26,29,5,21,30,1,21,27,10,8,19,14,5,15,13,18,16,9,11,26,8,17,0,23,10] +; CHECK-NEXT: movl $948454498, %eax # imm = 0x38884462 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermw %zmm0, %zmm2, %zmm1 {%k1} +; CHECK-NEXT: vmovdqa64 %zmm1, %zmm0 +; CHECK-NEXT: retq + %shuf = shufflevector <32 x i16> %vec, <32 x i16> undef, <32 x i32> + %res = select <32 x i1> , <32 x i16> %shuf, <32 x i16> %vec2 + ret <32 x i16> %res +} + +define <32 x i16> @test_masked_z_32xi16_perm_mask0(<32 x i16> %vec) { +; CHECK-LABEL: test_masked_z_32xi16_perm_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: vmovdqa64 {{.*#+}} zmm1 = [16,1,3,31,6,11,23,26,29,5,21,30,1,21,27,10,8,19,14,5,15,13,18,16,9,11,26,8,17,0,23,10] +; CHECK-NEXT: movl $948454498, %eax # imm = 0x38884462 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermw %zmm0, %zmm1, %zmm0 {%k1} {z} +; CHECK-NEXT: retq + %shuf = shufflevector <32 x i16> %vec, <32 x i16> undef, <32 x i32> + %res = select <32 x i1> , <32 x i16> %shuf, <32 x i16> zeroinitializer + ret <32 x i16> %res +} +define <32 x i16> @test_masked_32xi16_perm_mask1(<32 x i16> %vec, <32 x i16> %vec2) { +; CHECK-LABEL: test_masked_32xi16_perm_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: vmovdqa64 {{.*#+}} zmm2 = [1,8,7,30,11,9,11,30,20,19,22,12,13,20,0,6,10,7,20,12,28,18,13,12,22,13,21,1,14,8,5,16] +; CHECK-NEXT: movl $-1516442487, %eax # imm = 0xA59CEC89 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermw %zmm0, %zmm2, %zmm1 {%k1} +; CHECK-NEXT: vmovdqa64 %zmm1, %zmm0 +; CHECK-NEXT: retq + %shuf = shufflevector <32 x i16> %vec, <32 x i16> undef, <32 x i32> + %res = select <32 x i1> , <32 x i16> %shuf, <32 x i16> %vec2 + ret <32 x i16> %res +} + +define <32 x i16> @test_masked_z_32xi16_perm_mask1(<32 x i16> %vec) { +; CHECK-LABEL: test_masked_z_32xi16_perm_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: vmovdqa64 {{.*#+}} zmm1 = [1,8,7,30,11,9,11,30,20,19,22,12,13,20,0,6,10,7,20,12,28,18,13,12,22,13,21,1,14,8,5,16] +; CHECK-NEXT: movl $-1516442487, %eax # imm = 0xA59CEC89 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermw %zmm0, %zmm1, %zmm0 {%k1} {z} +; CHECK-NEXT: retq + %shuf = shufflevector <32 x i16> %vec, <32 x i16> undef, <32 x i32> + %res = select <32 x i1> , <32 x i16> %shuf, <32 x i16> zeroinitializer + ret <32 x i16> %res +} +define <32 x i16> @test_masked_32xi16_perm_mask2(<32 x i16> %vec, <32 x i16> %vec2) { +; CHECK-LABEL: test_masked_32xi16_perm_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: vmovdqa64 {{.*#+}} zmm2 = [15,17,24,28,15,9,14,25,28,25,6,31,20,2,23,31,12,21,10,6,22,0,26,16,3,3,20,27,8,31,3,27] +; CHECK-NEXT: movl $1504501134, %eax # imm = 0x59ACDD8E +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermw %zmm0, %zmm2, %zmm1 {%k1} +; CHECK-NEXT: vmovdqa64 %zmm1, %zmm0 +; CHECK-NEXT: retq + %shuf = shufflevector <32 x i16> %vec, <32 x i16> undef, <32 x i32> + %res = select <32 x i1> , <32 x i16> %shuf, <32 x i16> %vec2 + ret <32 x i16> %res +} + +define <32 x i16> @test_masked_z_32xi16_perm_mask2(<32 x i16> %vec) { +; CHECK-LABEL: test_masked_z_32xi16_perm_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: vmovdqa64 {{.*#+}} zmm1 = [15,17,24,28,15,9,14,25,28,25,6,31,20,2,23,31,12,21,10,6,22,0,26,16,3,3,20,27,8,31,3,27] +; CHECK-NEXT: movl $1504501134, %eax # imm = 0x59ACDD8E +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermw %zmm0, %zmm1, %zmm0 {%k1} {z} +; CHECK-NEXT: retq + %shuf = shufflevector <32 x i16> %vec, <32 x i16> undef, <32 x i32> + %res = select <32 x i1> , <32 x i16> %shuf, <32 x i16> zeroinitializer + ret <32 x i16> %res +} +define <32 x i16> @test_32xi16_perm_mask3(<32 x i16> %vec) { +; CHECK-LABEL: test_32xi16_perm_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: vmovdqa64 {{.*#+}} zmm1 = [12,2,8,14,25,27,4,16,20,11,27,8,0,1,21,17,30,30,29,1,23,22,20,22,28,20,11,17,6,18,0,4] +; CHECK-NEXT: vpermw %zmm0, %zmm1, %zmm0 +; CHECK-NEXT: retq + %res = shufflevector <32 x i16> %vec, <32 x i16> undef, <32 x i32> + ret <32 x i16> %res +} +define <32 x i16> @test_masked_32xi16_perm_mask3(<32 x i16> %vec, <32 x i16> %vec2) { +; CHECK-LABEL: test_masked_32xi16_perm_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: vmovdqa64 {{.*#+}} zmm2 = [12,2,8,14,25,27,4,16,20,11,27,8,0,1,21,17,30,30,29,1,23,22,20,22,28,20,11,17,6,18,0,4] +; CHECK-NEXT: movl $774459490, %eax # imm = 0x2E295062 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermw %zmm0, %zmm2, %zmm1 {%k1} +; CHECK-NEXT: vmovdqa64 %zmm1, %zmm0 +; CHECK-NEXT: retq + %shuf = shufflevector <32 x i16> %vec, <32 x i16> undef, <32 x i32> + %res = select <32 x i1> , <32 x i16> %shuf, <32 x i16> %vec2 + ret <32 x i16> %res +} + +define <32 x i16> @test_masked_z_32xi16_perm_mask3(<32 x i16> %vec) { +; CHECK-LABEL: test_masked_z_32xi16_perm_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: vmovdqa64 {{.*#+}} zmm1 = [12,2,8,14,25,27,4,16,20,11,27,8,0,1,21,17,30,30,29,1,23,22,20,22,28,20,11,17,6,18,0,4] +; CHECK-NEXT: movl $774459490, %eax # imm = 0x2E295062 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermw %zmm0, %zmm1, %zmm0 {%k1} {z} +; CHECK-NEXT: retq + %shuf = shufflevector <32 x i16> %vec, <32 x i16> undef, <32 x i32> + %res = select <32 x i1> , <32 x i16> %shuf, <32 x i16> zeroinitializer + ret <32 x i16> %res +} +define <32 x i16> @test_32xi16_perm_mem_mask0(<32 x i16>* %vp) { +; CHECK-LABEL: test_32xi16_perm_mem_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: vmovdqa64 {{.*#+}} zmm0 = [19,1,5,31,9,12,17,9,15,7,1,5,16,2,12,10,13,3,29,15,26,31,10,15,22,13,9,23,28,29,20,12] +; CHECK-NEXT: vpermw (%rdi), %zmm0, %zmm0 +; CHECK-NEXT: retq + %vec = load <32 x i16>, <32 x i16>* %vp + %res = shufflevector <32 x i16> %vec, <32 x i16> undef, <32 x i32> + ret <32 x i16> %res +} +define <32 x i16> @test_masked_32xi16_perm_mem_mask0(<32 x i16>* %vp, <32 x i16> %vec2) { +; CHECK-LABEL: test_masked_32xi16_perm_mem_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: vmovdqa64 {{.*#+}} zmm1 = [19,1,5,31,9,12,17,9,15,7,1,5,16,2,12,10,13,3,29,15,26,31,10,15,22,13,9,23,28,29,20,12] +; CHECK-NEXT: movl $1431978123, %eax # imm = 0x555A408B +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermw (%rdi), %zmm1, %zmm0 {%k1} +; CHECK-NEXT: retq + %vec = load <32 x i16>, <32 x i16>* %vp + %shuf = shufflevector <32 x i16> %vec, <32 x i16> undef, <32 x i32> + %res = select <32 x i1> , <32 x i16> %shuf, <32 x i16> %vec2 + ret <32 x i16> %res +} + +define <32 x i16> @test_masked_z_32xi16_perm_mem_mask0(<32 x i16>* %vp) { +; CHECK-LABEL: test_masked_z_32xi16_perm_mem_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: vmovdqa64 {{.*#+}} zmm0 = [19,1,5,31,9,12,17,9,15,7,1,5,16,2,12,10,13,3,29,15,26,31,10,15,22,13,9,23,28,29,20,12] +; CHECK-NEXT: movl $1431978123, %eax # imm = 0x555A408B +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermw (%rdi), %zmm0, %zmm0 {%k1} {z} +; CHECK-NEXT: retq + %vec = load <32 x i16>, <32 x i16>* %vp + %shuf = shufflevector <32 x i16> %vec, <32 x i16> undef, <32 x i32> + %res = select <32 x i1> , <32 x i16> %shuf, <32 x i16> zeroinitializer + ret <32 x i16> %res +} + +define <32 x i16> @test_masked_32xi16_perm_mem_mask1(<32 x i16>* %vp, <32 x i16> %vec2) { +; CHECK-LABEL: test_masked_32xi16_perm_mem_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: vmovdqa64 {{.*#+}} zmm1 = [31,20,2,2,23,1,0,12,16,14,15,18,21,13,11,31,8,24,13,11,2,27,22,28,14,21,3,12,6,1,30,6] +; CHECK-NEXT: movl $-903561653, %eax # imm = 0xCA24BE4B +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermw (%rdi), %zmm1, %zmm0 {%k1} +; CHECK-NEXT: retq + %vec = load <32 x i16>, <32 x i16>* %vp + %shuf = shufflevector <32 x i16> %vec, <32 x i16> undef, <32 x i32> + %res = select <32 x i1> , <32 x i16> %shuf, <32 x i16> %vec2 + ret <32 x i16> %res +} + +define <32 x i16> @test_masked_z_32xi16_perm_mem_mask1(<32 x i16>* %vp) { +; CHECK-LABEL: test_masked_z_32xi16_perm_mem_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: vmovdqa64 {{.*#+}} zmm0 = [31,20,2,2,23,1,0,12,16,14,15,18,21,13,11,31,8,24,13,11,2,27,22,28,14,21,3,12,6,1,30,6] +; CHECK-NEXT: movl $-903561653, %eax # imm = 0xCA24BE4B +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermw (%rdi), %zmm0, %zmm0 {%k1} {z} +; CHECK-NEXT: retq + %vec = load <32 x i16>, <32 x i16>* %vp + %shuf = shufflevector <32 x i16> %vec, <32 x i16> undef, <32 x i32> + %res = select <32 x i1> , <32 x i16> %shuf, <32 x i16> zeroinitializer + ret <32 x i16> %res +} + +define <32 x i16> @test_masked_32xi16_perm_mem_mask2(<32 x i16>* %vp, <32 x i16> %vec2) { +; CHECK-LABEL: test_masked_32xi16_perm_mem_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: vmovdqa64 {{.*#+}} zmm1 = [4,6,12,17,4,31,31,4,12,21,28,15,29,10,15,15,21,6,19,7,10,30,28,26,1,4,8,25,26,18,22,25] +; CHECK-NEXT: movl $-1209035774, %eax # imm = 0xB7EF9402 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermw (%rdi), %zmm1, %zmm0 {%k1} +; CHECK-NEXT: retq + %vec = load <32 x i16>, <32 x i16>* %vp + %shuf = shufflevector <32 x i16> %vec, <32 x i16> undef, <32 x i32> + %res = select <32 x i1> , <32 x i16> %shuf, <32 x i16> %vec2 + ret <32 x i16> %res +} + +define <32 x i16> @test_masked_z_32xi16_perm_mem_mask2(<32 x i16>* %vp) { +; CHECK-LABEL: test_masked_z_32xi16_perm_mem_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: vmovdqa64 {{.*#+}} zmm0 = [4,6,12,17,4,31,31,4,12,21,28,15,29,10,15,15,21,6,19,7,10,30,28,26,1,4,8,25,26,18,22,25] +; CHECK-NEXT: movl $-1209035774, %eax # imm = 0xB7EF9402 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermw (%rdi), %zmm0, %zmm0 {%k1} {z} +; CHECK-NEXT: retq + %vec = load <32 x i16>, <32 x i16>* %vp + %shuf = shufflevector <32 x i16> %vec, <32 x i16> undef, <32 x i32> + %res = select <32 x i1> , <32 x i16> %shuf, <32 x i16> zeroinitializer + ret <32 x i16> %res +} + +define <32 x i16> @test_32xi16_perm_mem_mask3(<32 x i16>* %vp) { +; CHECK-LABEL: test_32xi16_perm_mem_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: vmovdqa64 {{.*#+}} zmm0 = [2,2,27,1,7,1,0,27,10,5,4,20,30,16,28,16,18,21,25,24,31,23,28,6,17,19,26,15,25,12,18,27] +; CHECK-NEXT: vpermw (%rdi), %zmm0, %zmm0 +; CHECK-NEXT: retq + %vec = load <32 x i16>, <32 x i16>* %vp + %res = shufflevector <32 x i16> %vec, <32 x i16> undef, <32 x i32> + ret <32 x i16> %res +} +define <32 x i16> @test_masked_32xi16_perm_mem_mask3(<32 x i16>* %vp, <32 x i16> %vec2) { +; CHECK-LABEL: test_masked_32xi16_perm_mem_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: vmovdqa64 {{.*#+}} zmm1 = [2,2,27,1,7,1,0,27,10,5,4,20,30,16,28,16,18,21,25,24,31,23,28,6,17,19,26,15,25,12,18,27] +; CHECK-NEXT: movl $1452798329, %eax # imm = 0x5697F179 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermw (%rdi), %zmm1, %zmm0 {%k1} +; CHECK-NEXT: retq + %vec = load <32 x i16>, <32 x i16>* %vp + %shuf = shufflevector <32 x i16> %vec, <32 x i16> undef, <32 x i32> + %res = select <32 x i1> , <32 x i16> %shuf, <32 x i16> %vec2 + ret <32 x i16> %res +} + +define <32 x i16> @test_masked_z_32xi16_perm_mem_mask3(<32 x i16>* %vp) { +; CHECK-LABEL: test_masked_z_32xi16_perm_mem_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: vmovdqa64 {{.*#+}} zmm0 = [2,2,27,1,7,1,0,27,10,5,4,20,30,16,28,16,18,21,25,24,31,23,28,6,17,19,26,15,25,12,18,27] +; CHECK-NEXT: movl $1452798329, %eax # imm = 0x5697F179 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermw (%rdi), %zmm0, %zmm0 {%k1} {z} +; CHECK-NEXT: retq + %vec = load <32 x i16>, <32 x i16>* %vp + %shuf = shufflevector <32 x i16> %vec, <32 x i16> undef, <32 x i32> + %res = select <32 x i1> , <32 x i16> %shuf, <32 x i16> zeroinitializer + ret <32 x i16> %res +} + +define <8 x i32> @test_8xi32_perm_mask0(<8 x i32> %vec) { +; CHECK-LABEL: test_8xi32_perm_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: vmovdqa {{.*#+}} ymm1 = [4,2,0,6,7,2,3,6] +; CHECK-NEXT: vpermd %ymm0, %ymm1, %ymm0 +; CHECK-NEXT: retq + %res = shufflevector <8 x i32> %vec, <8 x i32> undef, <8 x i32> + ret <8 x i32> %res +} +define <8 x i32> @test_masked_8xi32_perm_mask0(<8 x i32> %vec, <8 x i32> %vec2) { +; CHECK-LABEL: test_masked_8xi32_perm_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: vmovdqa {{.*#+}} ymm2 = [4,2,0,6,7,2,3,6] +; CHECK-NEXT: movb $-53, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermd %ymm0, %ymm2, %ymm1 {%k1} +; CHECK-NEXT: vmovdqa %ymm1, %ymm0 +; CHECK-NEXT: retq + %shuf = shufflevector <8 x i32> %vec, <8 x i32> undef, <8 x i32> + %res = select <8 x i1> , <8 x i32> %shuf, <8 x i32> %vec2 + ret <8 x i32> %res +} + +define <8 x i32> @test_masked_z_8xi32_perm_mask0(<8 x i32> %vec) { +; CHECK-LABEL: test_masked_z_8xi32_perm_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: vmovdqa {{.*#+}} ymm1 = [4,2,0,6,7,2,3,6] +; CHECK-NEXT: movb $-53, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermd %ymm0, %ymm1, %ymm0 {%k1} {z} +; CHECK-NEXT: retq + %shuf = shufflevector <8 x i32> %vec, <8 x i32> undef, <8 x i32> + %res = select <8 x i1> , <8 x i32> %shuf, <8 x i32> zeroinitializer + ret <8 x i32> %res +} +define <8 x i32> @test_masked_8xi32_perm_mask1(<8 x i32> %vec, <8 x i32> %vec2) { +; CHECK-LABEL: test_masked_8xi32_perm_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: vmovdqa {{.*#+}} ymm2 = [0,5,1,2,6,0,0,3] +; CHECK-NEXT: movb $-89, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermd %ymm0, %ymm2, %ymm1 {%k1} +; CHECK-NEXT: vmovdqa %ymm1, %ymm0 +; CHECK-NEXT: retq + %shuf = shufflevector <8 x i32> %vec, <8 x i32> undef, <8 x i32> + %res = select <8 x i1> , <8 x i32> %shuf, <8 x i32> %vec2 + ret <8 x i32> %res +} + +define <8 x i32> @test_masked_z_8xi32_perm_mask1(<8 x i32> %vec) { +; CHECK-LABEL: test_masked_z_8xi32_perm_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: vmovdqa {{.*#+}} ymm1 = [0,5,1,2,6,0,0,3] +; CHECK-NEXT: movb $-89, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermd %ymm0, %ymm1, %ymm0 {%k1} {z} +; CHECK-NEXT: retq + %shuf = shufflevector <8 x i32> %vec, <8 x i32> undef, <8 x i32> + %res = select <8 x i1> , <8 x i32> %shuf, <8 x i32> zeroinitializer + ret <8 x i32> %res +} +define <8 x i32> @test_masked_8xi32_perm_mask2(<8 x i32> %vec, <8 x i32> %vec2) { +; CHECK-LABEL: test_masked_8xi32_perm_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: vmovdqa {{.*#+}} ymm2 = [3,6,5,5,1,7,3,4] +; CHECK-NEXT: movb $1, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermd %ymm0, %ymm2, %ymm1 {%k1} +; CHECK-NEXT: vmovdqa %ymm1, %ymm0 +; CHECK-NEXT: retq + %shuf = shufflevector <8 x i32> %vec, <8 x i32> undef, <8 x i32> + %res = select <8 x i1> , <8 x i32> %shuf, <8 x i32> %vec2 + ret <8 x i32> %res +} + +define <8 x i32> @test_masked_z_8xi32_perm_mask2(<8 x i32> %vec) { +; CHECK-LABEL: test_masked_z_8xi32_perm_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: vmovdqa {{.*#+}} ymm1 = [3,6,5,5,1,7,3,4] +; CHECK-NEXT: movb $1, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermd %ymm0, %ymm1, %ymm0 {%k1} {z} +; CHECK-NEXT: retq + %shuf = shufflevector <8 x i32> %vec, <8 x i32> undef, <8 x i32> + %res = select <8 x i1> , <8 x i32> %shuf, <8 x i32> zeroinitializer + ret <8 x i32> %res +} +define <8 x i32> @test_8xi32_perm_mask3(<8 x i32> %vec) { +; CHECK-LABEL: test_8xi32_perm_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: vmovdqa {{.*#+}} ymm1 = [3,0,3,1,0,4,5,0] +; CHECK-NEXT: vpermd %ymm0, %ymm1, %ymm0 +; CHECK-NEXT: retq + %res = shufflevector <8 x i32> %vec, <8 x i32> undef, <8 x i32> + ret <8 x i32> %res +} +define <8 x i32> @test_masked_8xi32_perm_mask3(<8 x i32> %vec, <8 x i32> %vec2) { +; CHECK-LABEL: test_masked_8xi32_perm_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: vmovdqa {{.*#+}} ymm2 = [3,0,3,1,0,4,5,0] +; CHECK-NEXT: movb $47, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermd %ymm0, %ymm2, %ymm1 {%k1} +; CHECK-NEXT: vmovdqa %ymm1, %ymm0 +; CHECK-NEXT: retq + %shuf = shufflevector <8 x i32> %vec, <8 x i32> undef, <8 x i32> + %res = select <8 x i1> , <8 x i32> %shuf, <8 x i32> %vec2 + ret <8 x i32> %res +} + +define <8 x i32> @test_masked_z_8xi32_perm_mask3(<8 x i32> %vec) { +; CHECK-LABEL: test_masked_z_8xi32_perm_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: vmovdqa {{.*#+}} ymm1 = [3,0,3,1,0,4,5,0] +; CHECK-NEXT: movb $47, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermd %ymm0, %ymm1, %ymm0 {%k1} {z} +; CHECK-NEXT: retq + %shuf = shufflevector <8 x i32> %vec, <8 x i32> undef, <8 x i32> + %res = select <8 x i1> , <8 x i32> %shuf, <8 x i32> zeroinitializer + ret <8 x i32> %res +} +define <8 x i32> @test_8xi32_perm_mem_mask0(<8 x i32>* %vp) { +; CHECK-LABEL: test_8xi32_perm_mem_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: vmovdqa {{.*#+}} ymm0 = [3,7,4,3,5,2,0,5] +; CHECK-NEXT: vpermd (%rdi), %ymm0, %ymm0 +; CHECK-NEXT: retq + %vec = load <8 x i32>, <8 x i32>* %vp + %res = shufflevector <8 x i32> %vec, <8 x i32> undef, <8 x i32> + ret <8 x i32> %res +} +define <8 x i32> @test_masked_8xi32_perm_mem_mask0(<8 x i32>* %vp, <8 x i32> %vec2) { +; CHECK-LABEL: test_masked_8xi32_perm_mem_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: vmovdqa {{.*#+}} ymm1 = [3,7,4,3,5,2,0,5] +; CHECK-NEXT: movb $-116, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermd (%rdi), %ymm1, %ymm0 {%k1} +; CHECK-NEXT: retq + %vec = load <8 x i32>, <8 x i32>* %vp + %shuf = shufflevector <8 x i32> %vec, <8 x i32> undef, <8 x i32> + %res = select <8 x i1> , <8 x i32> %shuf, <8 x i32> %vec2 + ret <8 x i32> %res +} + +define <8 x i32> @test_masked_z_8xi32_perm_mem_mask0(<8 x i32>* %vp) { +; CHECK-LABEL: test_masked_z_8xi32_perm_mem_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: vmovdqa {{.*#+}} ymm0 = [3,7,4,3,5,2,0,5] +; CHECK-NEXT: movb $-116, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermd (%rdi), %ymm0, %ymm0 {%k1} {z} +; CHECK-NEXT: retq + %vec = load <8 x i32>, <8 x i32>* %vp + %shuf = shufflevector <8 x i32> %vec, <8 x i32> undef, <8 x i32> + %res = select <8 x i1> , <8 x i32> %shuf, <8 x i32> zeroinitializer + ret <8 x i32> %res +} + +define <8 x i32> @test_masked_8xi32_perm_mem_mask1(<8 x i32>* %vp, <8 x i32> %vec2) { +; CHECK-LABEL: test_masked_8xi32_perm_mem_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: vmovdqa {{.*#+}} ymm1 = [4,6,1,7,6,7,6,5] +; CHECK-NEXT: movb $89, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermd (%rdi), %ymm1, %ymm0 {%k1} +; CHECK-NEXT: retq + %vec = load <8 x i32>, <8 x i32>* %vp + %shuf = shufflevector <8 x i32> %vec, <8 x i32> undef, <8 x i32> + %res = select <8 x i1> , <8 x i32> %shuf, <8 x i32> %vec2 + ret <8 x i32> %res +} + +define <8 x i32> @test_masked_z_8xi32_perm_mem_mask1(<8 x i32>* %vp) { +; CHECK-LABEL: test_masked_z_8xi32_perm_mem_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: vmovdqa {{.*#+}} ymm0 = [4,6,1,7,6,7,6,5] +; CHECK-NEXT: movb $89, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermd (%rdi), %ymm0, %ymm0 {%k1} {z} +; CHECK-NEXT: retq + %vec = load <8 x i32>, <8 x i32>* %vp + %shuf = shufflevector <8 x i32> %vec, <8 x i32> undef, <8 x i32> + %res = select <8 x i1> , <8 x i32> %shuf, <8 x i32> zeroinitializer + ret <8 x i32> %res +} + +define <8 x i32> @test_masked_8xi32_perm_mem_mask2(<8 x i32>* %vp, <8 x i32> %vec2) { +; CHECK-LABEL: test_masked_8xi32_perm_mem_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: vmovdqa {{.*#+}} ymm1 = [6,4,6,1,6,3,6,3] +; CHECK-NEXT: movb $98, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermd (%rdi), %ymm1, %ymm0 {%k1} +; CHECK-NEXT: retq + %vec = load <8 x i32>, <8 x i32>* %vp + %shuf = shufflevector <8 x i32> %vec, <8 x i32> undef, <8 x i32> + %res = select <8 x i1> , <8 x i32> %shuf, <8 x i32> %vec2 + ret <8 x i32> %res +} + +define <8 x i32> @test_masked_z_8xi32_perm_mem_mask2(<8 x i32>* %vp) { +; CHECK-LABEL: test_masked_z_8xi32_perm_mem_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: vmovdqa {{.*#+}} ymm0 = [6,4,6,1,6,3,6,3] +; CHECK-NEXT: movb $98, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermd (%rdi), %ymm0, %ymm0 {%k1} {z} +; CHECK-NEXT: retq + %vec = load <8 x i32>, <8 x i32>* %vp + %shuf = shufflevector <8 x i32> %vec, <8 x i32> undef, <8 x i32> + %res = select <8 x i1> , <8 x i32> %shuf, <8 x i32> zeroinitializer + ret <8 x i32> %res +} + +define <8 x i32> @test_8xi32_perm_mem_mask3(<8 x i32>* %vp) { +; CHECK-LABEL: test_8xi32_perm_mem_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: vmovdqa {{.*#+}} ymm0 = [6,0,0,7,3,7,7,5] +; CHECK-NEXT: vpermd (%rdi), %ymm0, %ymm0 +; CHECK-NEXT: retq + %vec = load <8 x i32>, <8 x i32>* %vp + %res = shufflevector <8 x i32> %vec, <8 x i32> undef, <8 x i32> + ret <8 x i32> %res +} +define <8 x i32> @test_masked_8xi32_perm_mem_mask3(<8 x i32>* %vp, <8 x i32> %vec2) { +; CHECK-LABEL: test_masked_8xi32_perm_mem_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: vmovdqa {{.*#+}} ymm1 = [6,0,0,7,3,7,7,5] +; CHECK-NEXT: movb $-58, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermd (%rdi), %ymm1, %ymm0 {%k1} +; CHECK-NEXT: retq + %vec = load <8 x i32>, <8 x i32>* %vp + %shuf = shufflevector <8 x i32> %vec, <8 x i32> undef, <8 x i32> + %res = select <8 x i1> , <8 x i32> %shuf, <8 x i32> %vec2 + ret <8 x i32> %res +} + +define <8 x i32> @test_masked_z_8xi32_perm_mem_mask3(<8 x i32>* %vp) { +; CHECK-LABEL: test_masked_z_8xi32_perm_mem_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: vmovdqa {{.*#+}} ymm0 = [6,0,0,7,3,7,7,5] +; CHECK-NEXT: movb $-58, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermd (%rdi), %ymm0, %ymm0 {%k1} {z} +; CHECK-NEXT: retq + %vec = load <8 x i32>, <8 x i32>* %vp + %shuf = shufflevector <8 x i32> %vec, <8 x i32> undef, <8 x i32> + %res = select <8 x i1> , <8 x i32> %shuf, <8 x i32> zeroinitializer + ret <8 x i32> %res +} + +define <16 x i32> @test_16xi32_perm_mask0(<16 x i32> %vec) { +; CHECK-LABEL: test_16xi32_perm_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: vmovdqa32 {{.*#+}} zmm1 = [14,12,11,6,4,1,6,9,14,14,6,1,12,11,0,7] +; CHECK-NEXT: vpermd %zmm0, %zmm1, %zmm0 +; CHECK-NEXT: retq + %res = shufflevector <16 x i32> %vec, <16 x i32> undef, <16 x i32> + ret <16 x i32> %res +} +define <16 x i32> @test_masked_16xi32_perm_mask0(<16 x i32> %vec, <16 x i32> %vec2) { +; CHECK-LABEL: test_masked_16xi32_perm_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: vmovdqa32 {{.*#+}} zmm2 = [14,12,11,6,4,1,6,9,14,14,6,1,12,11,0,7] +; CHECK-NEXT: movw $-28063, %ax # imm = 0x9261 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermd %zmm0, %zmm2, %zmm1 {%k1} +; CHECK-NEXT: vmovdqa64 %zmm1, %zmm0 +; CHECK-NEXT: retq + %shuf = shufflevector <16 x i32> %vec, <16 x i32> undef, <16 x i32> + %res = select <16 x i1> , <16 x i32> %shuf, <16 x i32> %vec2 + ret <16 x i32> %res +} + +define <16 x i32> @test_masked_z_16xi32_perm_mask0(<16 x i32> %vec) { +; CHECK-LABEL: test_masked_z_16xi32_perm_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: vmovdqa32 {{.*#+}} zmm1 = [14,12,11,6,4,1,6,9,14,14,6,1,12,11,0,7] +; CHECK-NEXT: movw $-28063, %ax # imm = 0x9261 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermd %zmm0, %zmm1, %zmm0 {%k1} {z} +; CHECK-NEXT: retq + %shuf = shufflevector <16 x i32> %vec, <16 x i32> undef, <16 x i32> + %res = select <16 x i1> , <16 x i32> %shuf, <16 x i32> zeroinitializer + ret <16 x i32> %res +} +define <16 x i32> @test_masked_16xi32_perm_mask1(<16 x i32> %vec, <16 x i32> %vec2) { +; CHECK-LABEL: test_masked_16xi32_perm_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: vmovdqa32 {{.*#+}} zmm2 = [10,0,14,15,11,1,1,5,0,5,0,15,13,1,14,3] +; CHECK-NEXT: movw $14154, %ax # imm = 0x374A +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermd %zmm0, %zmm2, %zmm1 {%k1} +; CHECK-NEXT: vmovdqa64 %zmm1, %zmm0 +; CHECK-NEXT: retq + %shuf = shufflevector <16 x i32> %vec, <16 x i32> undef, <16 x i32> + %res = select <16 x i1> , <16 x i32> %shuf, <16 x i32> %vec2 + ret <16 x i32> %res +} + +define <16 x i32> @test_masked_z_16xi32_perm_mask1(<16 x i32> %vec) { +; CHECK-LABEL: test_masked_z_16xi32_perm_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: vmovdqa32 {{.*#+}} zmm1 = [10,0,14,15,11,1,1,5,0,5,0,15,13,1,14,3] +; CHECK-NEXT: movw $14154, %ax # imm = 0x374A +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermd %zmm0, %zmm1, %zmm0 {%k1} {z} +; CHECK-NEXT: retq + %shuf = shufflevector <16 x i32> %vec, <16 x i32> undef, <16 x i32> + %res = select <16 x i1> , <16 x i32> %shuf, <16 x i32> zeroinitializer + ret <16 x i32> %res +} +define <16 x i32> @test_masked_16xi32_perm_mask2(<16 x i32> %vec, <16 x i32> %vec2) { +; CHECK-LABEL: test_masked_16xi32_perm_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: vmovdqa32 {{.*#+}} zmm2 = [3,10,15,1,0,5,0,9,13,2,1,5,15,2,15,5] +; CHECK-NEXT: movw $6126, %ax # imm = 0x17EE +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermd %zmm0, %zmm2, %zmm1 {%k1} +; CHECK-NEXT: vmovdqa64 %zmm1, %zmm0 +; CHECK-NEXT: retq + %shuf = shufflevector <16 x i32> %vec, <16 x i32> undef, <16 x i32> + %res = select <16 x i1> , <16 x i32> %shuf, <16 x i32> %vec2 + ret <16 x i32> %res +} + +define <16 x i32> @test_masked_z_16xi32_perm_mask2(<16 x i32> %vec) { +; CHECK-LABEL: test_masked_z_16xi32_perm_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: vmovdqa32 {{.*#+}} zmm1 = [3,10,15,1,0,5,0,9,13,2,1,5,15,2,15,5] +; CHECK-NEXT: movw $6126, %ax # imm = 0x17EE +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermd %zmm0, %zmm1, %zmm0 {%k1} {z} +; CHECK-NEXT: retq + %shuf = shufflevector <16 x i32> %vec, <16 x i32> undef, <16 x i32> + %res = select <16 x i1> , <16 x i32> %shuf, <16 x i32> zeroinitializer + ret <16 x i32> %res +} +define <16 x i32> @test_16xi32_perm_mask3(<16 x i32> %vec) { +; CHECK-LABEL: test_16xi32_perm_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: vmovdqa32 {{.*#+}} zmm1 = [7,4,14,15,10,2,15,1,9,2,14,15,12,5,3,12] +; CHECK-NEXT: vpermd %zmm0, %zmm1, %zmm0 +; CHECK-NEXT: retq + %res = shufflevector <16 x i32> %vec, <16 x i32> undef, <16 x i32> + ret <16 x i32> %res +} +define <16 x i32> @test_masked_16xi32_perm_mask3(<16 x i32> %vec, <16 x i32> %vec2) { +; CHECK-LABEL: test_masked_16xi32_perm_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: vmovdqa32 {{.*#+}} zmm2 = [7,4,14,15,10,2,15,1,9,2,14,15,12,5,3,12] +; CHECK-NEXT: movw $-11837, %ax # imm = 0xD1C3 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermd %zmm0, %zmm2, %zmm1 {%k1} +; CHECK-NEXT: vmovdqa64 %zmm1, %zmm0 +; CHECK-NEXT: retq + %shuf = shufflevector <16 x i32> %vec, <16 x i32> undef, <16 x i32> + %res = select <16 x i1> , <16 x i32> %shuf, <16 x i32> %vec2 + ret <16 x i32> %res +} + +define <16 x i32> @test_masked_z_16xi32_perm_mask3(<16 x i32> %vec) { +; CHECK-LABEL: test_masked_z_16xi32_perm_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: vmovdqa32 {{.*#+}} zmm1 = [7,4,14,15,10,2,15,1,9,2,14,15,12,5,3,12] +; CHECK-NEXT: movw $-11837, %ax # imm = 0xD1C3 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermd %zmm0, %zmm1, %zmm0 {%k1} {z} +; CHECK-NEXT: retq + %shuf = shufflevector <16 x i32> %vec, <16 x i32> undef, <16 x i32> + %res = select <16 x i1> , <16 x i32> %shuf, <16 x i32> zeroinitializer + ret <16 x i32> %res +} +define <16 x i32> @test_16xi32_perm_mem_mask0(<16 x i32>* %vp) { +; CHECK-LABEL: test_16xi32_perm_mem_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: vmovdqa32 {{.*#+}} zmm0 = [0,1,1,6,8,11,2,6,10,1,7,5,15,0,6,6] +; CHECK-NEXT: vpermd (%rdi), %zmm0, %zmm0 +; CHECK-NEXT: retq + %vec = load <16 x i32>, <16 x i32>* %vp + %res = shufflevector <16 x i32> %vec, <16 x i32> undef, <16 x i32> + ret <16 x i32> %res +} +define <16 x i32> @test_masked_16xi32_perm_mem_mask0(<16 x i32>* %vp, <16 x i32> %vec2) { +; CHECK-LABEL: test_masked_16xi32_perm_mem_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: vmovdqa32 {{.*#+}} zmm1 = [0,1,1,6,8,11,2,6,10,1,7,5,15,0,6,6] +; CHECK-NEXT: movw $19075, %ax # imm = 0x4A83 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermd (%rdi), %zmm1, %zmm0 {%k1} +; CHECK-NEXT: retq + %vec = load <16 x i32>, <16 x i32>* %vp + %shuf = shufflevector <16 x i32> %vec, <16 x i32> undef, <16 x i32> + %res = select <16 x i1> , <16 x i32> %shuf, <16 x i32> %vec2 + ret <16 x i32> %res +} + +define <16 x i32> @test_masked_z_16xi32_perm_mem_mask0(<16 x i32>* %vp) { +; CHECK-LABEL: test_masked_z_16xi32_perm_mem_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: vmovdqa32 {{.*#+}} zmm0 = [0,1,1,6,8,11,2,6,10,1,7,5,15,0,6,6] +; CHECK-NEXT: movw $19075, %ax # imm = 0x4A83 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermd (%rdi), %zmm0, %zmm0 {%k1} {z} +; CHECK-NEXT: retq + %vec = load <16 x i32>, <16 x i32>* %vp + %shuf = shufflevector <16 x i32> %vec, <16 x i32> undef, <16 x i32> + %res = select <16 x i1> , <16 x i32> %shuf, <16 x i32> zeroinitializer + ret <16 x i32> %res +} + +define <16 x i32> @test_masked_16xi32_perm_mem_mask1(<16 x i32>* %vp, <16 x i32> %vec2) { +; CHECK-LABEL: test_masked_16xi32_perm_mem_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: vmovdqa32 {{.*#+}} zmm1 = [11,5,3,4,7,15,12,4,8,11,12,7,6,12,6,3] +; CHECK-NEXT: movw $27511, %ax # imm = 0x6B77 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermd (%rdi), %zmm1, %zmm0 {%k1} +; CHECK-NEXT: retq + %vec = load <16 x i32>, <16 x i32>* %vp + %shuf = shufflevector <16 x i32> %vec, <16 x i32> undef, <16 x i32> + %res = select <16 x i1> , <16 x i32> %shuf, <16 x i32> %vec2 + ret <16 x i32> %res +} + +define <16 x i32> @test_masked_z_16xi32_perm_mem_mask1(<16 x i32>* %vp) { +; CHECK-LABEL: test_masked_z_16xi32_perm_mem_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: vmovdqa32 {{.*#+}} zmm0 = [11,5,3,4,7,15,12,4,8,11,12,7,6,12,6,3] +; CHECK-NEXT: movw $27511, %ax # imm = 0x6B77 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermd (%rdi), %zmm0, %zmm0 {%k1} {z} +; CHECK-NEXT: retq + %vec = load <16 x i32>, <16 x i32>* %vp + %shuf = shufflevector <16 x i32> %vec, <16 x i32> undef, <16 x i32> + %res = select <16 x i1> , <16 x i32> %shuf, <16 x i32> zeroinitializer + ret <16 x i32> %res +} + +define <16 x i32> @test_masked_16xi32_perm_mem_mask2(<16 x i32>* %vp, <16 x i32> %vec2) { +; CHECK-LABEL: test_masked_16xi32_perm_mem_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: vmovdqa32 {{.*#+}} zmm1 = [7,14,2,7,10,7,3,0,11,9,0,4,12,10,8,2] +; CHECK-NEXT: movw $3032, %ax # imm = 0xBD8 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermd (%rdi), %zmm1, %zmm0 {%k1} +; CHECK-NEXT: retq + %vec = load <16 x i32>, <16 x i32>* %vp + %shuf = shufflevector <16 x i32> %vec, <16 x i32> undef, <16 x i32> + %res = select <16 x i1> , <16 x i32> %shuf, <16 x i32> %vec2 + ret <16 x i32> %res +} + +define <16 x i32> @test_masked_z_16xi32_perm_mem_mask2(<16 x i32>* %vp) { +; CHECK-LABEL: test_masked_z_16xi32_perm_mem_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: vmovdqa32 {{.*#+}} zmm0 = [7,14,2,7,10,7,3,0,11,9,0,4,12,10,8,2] +; CHECK-NEXT: movw $3032, %ax # imm = 0xBD8 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermd (%rdi), %zmm0, %zmm0 {%k1} {z} +; CHECK-NEXT: retq + %vec = load <16 x i32>, <16 x i32>* %vp + %shuf = shufflevector <16 x i32> %vec, <16 x i32> undef, <16 x i32> + %res = select <16 x i1> , <16 x i32> %shuf, <16 x i32> zeroinitializer + ret <16 x i32> %res +} + +define <16 x i32> @test_16xi32_perm_mem_mask3(<16 x i32>* %vp) { +; CHECK-LABEL: test_16xi32_perm_mem_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: vmovdqa32 {{.*#+}} zmm0 = [11,7,10,12,3,12,4,15,1,14,0,4,8,9,6,1] +; CHECK-NEXT: vpermd (%rdi), %zmm0, %zmm0 +; CHECK-NEXT: retq + %vec = load <16 x i32>, <16 x i32>* %vp + %res = shufflevector <16 x i32> %vec, <16 x i32> undef, <16 x i32> + ret <16 x i32> %res +} +define <16 x i32> @test_masked_16xi32_perm_mem_mask3(<16 x i32>* %vp, <16 x i32> %vec2) { +; CHECK-LABEL: test_masked_16xi32_perm_mem_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: vmovdqa32 {{.*#+}} zmm1 = [11,7,10,12,3,12,4,15,1,14,0,4,8,9,6,1] +; CHECK-NEXT: movw $8666, %ax # imm = 0x21DA +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermd (%rdi), %zmm1, %zmm0 {%k1} +; CHECK-NEXT: retq + %vec = load <16 x i32>, <16 x i32>* %vp + %shuf = shufflevector <16 x i32> %vec, <16 x i32> undef, <16 x i32> + %res = select <16 x i1> , <16 x i32> %shuf, <16 x i32> %vec2 + ret <16 x i32> %res +} + +define <16 x i32> @test_masked_z_16xi32_perm_mem_mask3(<16 x i32>* %vp) { +; CHECK-LABEL: test_masked_z_16xi32_perm_mem_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: vmovdqa32 {{.*#+}} zmm0 = [11,7,10,12,3,12,4,15,1,14,0,4,8,9,6,1] +; CHECK-NEXT: movw $8666, %ax # imm = 0x21DA +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermd (%rdi), %zmm0, %zmm0 {%k1} {z} +; CHECK-NEXT: retq + %vec = load <16 x i32>, <16 x i32>* %vp + %shuf = shufflevector <16 x i32> %vec, <16 x i32> undef, <16 x i32> + %res = select <16 x i1> , <16 x i32> %shuf, <16 x i32> zeroinitializer + ret <16 x i32> %res +} + +define <4 x i64> @test_4xi64_perm_mask0(<4 x i64> %vec) { +; CHECK-LABEL: test_4xi64_perm_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: vpermq {{.*#+}} ymm0 = ymm0[2,0,3,1] +; CHECK-NEXT: retq + %res = shufflevector <4 x i64> %vec, <4 x i64> undef, <4 x i32> + ret <4 x i64> %res +} +define <4 x i64> @test_masked_4xi64_perm_mask0(<4 x i64> %vec, <4 x i64> %vec2) { +; CHECK-LABEL: test_masked_4xi64_perm_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: movb $4, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermq {{.*#+}} ymm1 {%k1} = ymm0[2,0,3,1] +; CHECK-NEXT: vmovdqa %ymm1, %ymm0 +; CHECK-NEXT: retq + %shuf = shufflevector <4 x i64> %vec, <4 x i64> undef, <4 x i32> + %res = select <4 x i1> , <4 x i64> %shuf, <4 x i64> %vec2 + ret <4 x i64> %res +} + +define <4 x i64> @test_masked_z_4xi64_perm_mask0(<4 x i64> %vec) { +; CHECK-LABEL: test_masked_z_4xi64_perm_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: movb $4, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermq {{.*#+}} ymm0 {%k1} {z} = ymm0[2,0,3,1] +; CHECK-NEXT: retq + %shuf = shufflevector <4 x i64> %vec, <4 x i64> undef, <4 x i32> + %res = select <4 x i1> , <4 x i64> %shuf, <4 x i64> zeroinitializer + ret <4 x i64> %res +} +define <4 x i64> @test_masked_4xi64_perm_mask1(<4 x i64> %vec, <4 x i64> %vec2) { +; CHECK-LABEL: test_masked_4xi64_perm_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: movb $14, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermq {{.*#+}} ymm1 {%k1} = ymm0[1,2,0,3] +; CHECK-NEXT: vmovdqa %ymm1, %ymm0 +; CHECK-NEXT: retq + %shuf = shufflevector <4 x i64> %vec, <4 x i64> undef, <4 x i32> + %res = select <4 x i1> , <4 x i64> %shuf, <4 x i64> %vec2 + ret <4 x i64> %res +} + +define <4 x i64> @test_masked_z_4xi64_perm_mask1(<4 x i64> %vec) { +; CHECK-LABEL: test_masked_z_4xi64_perm_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: movb $14, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermq {{.*#+}} ymm0 {%k1} {z} = ymm0[1,2,0,3] +; CHECK-NEXT: retq + %shuf = shufflevector <4 x i64> %vec, <4 x i64> undef, <4 x i32> + %res = select <4 x i1> , <4 x i64> %shuf, <4 x i64> zeroinitializer + ret <4 x i64> %res +} +define <4 x i64> @test_masked_4xi64_perm_mask2(<4 x i64> %vec, <4 x i64> %vec2) { +; CHECK-LABEL: test_masked_4xi64_perm_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: movb $10, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermq {{.*#+}} ymm1 {%k1} = ymm0[2,2,2,1] +; CHECK-NEXT: vmovdqa %ymm1, %ymm0 +; CHECK-NEXT: retq + %shuf = shufflevector <4 x i64> %vec, <4 x i64> undef, <4 x i32> + %res = select <4 x i1> , <4 x i64> %shuf, <4 x i64> %vec2 + ret <4 x i64> %res +} + +define <4 x i64> @test_masked_z_4xi64_perm_mask2(<4 x i64> %vec) { +; CHECK-LABEL: test_masked_z_4xi64_perm_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: movb $10, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermq {{.*#+}} ymm0 {%k1} {z} = ymm0[2,2,2,1] +; CHECK-NEXT: retq + %shuf = shufflevector <4 x i64> %vec, <4 x i64> undef, <4 x i32> + %res = select <4 x i1> , <4 x i64> %shuf, <4 x i64> zeroinitializer + ret <4 x i64> %res +} +define <4 x i64> @test_4xi64_perm_mask3(<4 x i64> %vec) { +; CHECK-LABEL: test_4xi64_perm_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: vpermq {{.*#+}} ymm0 = ymm0[2,1,3,3] +; CHECK-NEXT: retq + %res = shufflevector <4 x i64> %vec, <4 x i64> undef, <4 x i32> + ret <4 x i64> %res +} +define <4 x i64> @test_masked_4xi64_perm_mask3(<4 x i64> %vec, <4 x i64> %vec2) { +; CHECK-LABEL: test_masked_4xi64_perm_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: movb $12, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermq {{.*#+}} ymm1 {%k1} = ymm0[2,1,3,3] +; CHECK-NEXT: vmovdqa %ymm1, %ymm0 +; CHECK-NEXT: retq + %shuf = shufflevector <4 x i64> %vec, <4 x i64> undef, <4 x i32> + %res = select <4 x i1> , <4 x i64> %shuf, <4 x i64> %vec2 + ret <4 x i64> %res +} + +define <4 x i64> @test_masked_z_4xi64_perm_mask3(<4 x i64> %vec) { +; CHECK-LABEL: test_masked_z_4xi64_perm_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: movb $12, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermq {{.*#+}} ymm0 {%k1} {z} = ymm0[2,1,3,3] +; CHECK-NEXT: retq + %shuf = shufflevector <4 x i64> %vec, <4 x i64> undef, <4 x i32> + %res = select <4 x i1> , <4 x i64> %shuf, <4 x i64> zeroinitializer + ret <4 x i64> %res +} +define <4 x i64> @test_4xi64_perm_mem_mask0(<4 x i64>* %vp) { +; CHECK-LABEL: test_4xi64_perm_mem_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: vpermq {{.*#+}} ymm0 = mem[2,1,2,0] +; CHECK-NEXT: retq + %vec = load <4 x i64>, <4 x i64>* %vp + %res = shufflevector <4 x i64> %vec, <4 x i64> undef, <4 x i32> + ret <4 x i64> %res +} +define <4 x i64> @test_masked_4xi64_perm_mem_mask0(<4 x i64>* %vp, <4 x i64> %vec2) { +; CHECK-LABEL: test_masked_4xi64_perm_mem_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: movb $5, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermq {{.*#+}} ymm0 {%k1} = mem[2,1,2,0] +; CHECK-NEXT: retq + %vec = load <4 x i64>, <4 x i64>* %vp + %shuf = shufflevector <4 x i64> %vec, <4 x i64> undef, <4 x i32> + %res = select <4 x i1> , <4 x i64> %shuf, <4 x i64> %vec2 + ret <4 x i64> %res +} + +define <4 x i64> @test_masked_z_4xi64_perm_mem_mask0(<4 x i64>* %vp) { +; CHECK-LABEL: test_masked_z_4xi64_perm_mem_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: movb $5, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermq {{.*#+}} ymm0 {%k1} {z} = mem[2,1,2,0] +; CHECK-NEXT: retq + %vec = load <4 x i64>, <4 x i64>* %vp + %shuf = shufflevector <4 x i64> %vec, <4 x i64> undef, <4 x i32> + %res = select <4 x i1> , <4 x i64> %shuf, <4 x i64> zeroinitializer + ret <4 x i64> %res +} + +define <4 x i64> @test_masked_4xi64_perm_mem_mask1(<4 x i64>* %vp, <4 x i64> %vec2) { +; CHECK-LABEL: test_masked_4xi64_perm_mem_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: movb $14, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermq {{.*#+}} ymm0 {%k1} = mem[2,1,1,1] +; CHECK-NEXT: retq + %vec = load <4 x i64>, <4 x i64>* %vp + %shuf = shufflevector <4 x i64> %vec, <4 x i64> undef, <4 x i32> + %res = select <4 x i1> , <4 x i64> %shuf, <4 x i64> %vec2 + ret <4 x i64> %res +} + +define <4 x i64> @test_masked_z_4xi64_perm_mem_mask1(<4 x i64>* %vp) { +; CHECK-LABEL: test_masked_z_4xi64_perm_mem_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: movb $14, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermq {{.*#+}} ymm0 {%k1} {z} = mem[2,1,1,1] +; CHECK-NEXT: retq + %vec = load <4 x i64>, <4 x i64>* %vp + %shuf = shufflevector <4 x i64> %vec, <4 x i64> undef, <4 x i32> + %res = select <4 x i1> , <4 x i64> %shuf, <4 x i64> zeroinitializer + ret <4 x i64> %res +} + +define <4 x i64> @test_masked_4xi64_perm_mem_mask2(<4 x i64>* %vp, <4 x i64> %vec2) { +; CHECK-LABEL: test_masked_4xi64_perm_mem_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: movb $8, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermq {{.*#+}} ymm0 {%k1} = mem[0,1,2,0] +; CHECK-NEXT: retq + %vec = load <4 x i64>, <4 x i64>* %vp + %shuf = shufflevector <4 x i64> %vec, <4 x i64> undef, <4 x i32> + %res = select <4 x i1> , <4 x i64> %shuf, <4 x i64> %vec2 + ret <4 x i64> %res +} + +define <4 x i64> @test_masked_z_4xi64_perm_mem_mask2(<4 x i64>* %vp) { +; CHECK-LABEL: test_masked_z_4xi64_perm_mem_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: movb $8, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermq {{.*#+}} ymm0 {%k1} {z} = mem[0,1,2,0] +; CHECK-NEXT: retq + %vec = load <4 x i64>, <4 x i64>* %vp + %shuf = shufflevector <4 x i64> %vec, <4 x i64> undef, <4 x i32> + %res = select <4 x i1> , <4 x i64> %shuf, <4 x i64> zeroinitializer + ret <4 x i64> %res +} + +define <4 x i64> @test_4xi64_perm_mem_mask3(<4 x i64>* %vp) { +; CHECK-LABEL: test_4xi64_perm_mem_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: vpermq {{.*#+}} ymm0 = mem[2,0,1,3] +; CHECK-NEXT: retq + %vec = load <4 x i64>, <4 x i64>* %vp + %res = shufflevector <4 x i64> %vec, <4 x i64> undef, <4 x i32> + ret <4 x i64> %res +} +define <4 x i64> @test_masked_4xi64_perm_mem_mask3(<4 x i64>* %vp, <4 x i64> %vec2) { +; CHECK-LABEL: test_masked_4xi64_perm_mem_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: movb $2, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermq {{.*#+}} ymm0 {%k1} = mem[2,0,1,3] +; CHECK-NEXT: retq + %vec = load <4 x i64>, <4 x i64>* %vp + %shuf = shufflevector <4 x i64> %vec, <4 x i64> undef, <4 x i32> + %res = select <4 x i1> , <4 x i64> %shuf, <4 x i64> %vec2 + ret <4 x i64> %res +} + +define <4 x i64> @test_masked_z_4xi64_perm_mem_mask3(<4 x i64>* %vp) { +; CHECK-LABEL: test_masked_z_4xi64_perm_mem_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: movb $2, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermq {{.*#+}} ymm0 {%k1} {z} = mem[2,0,1,3] +; CHECK-NEXT: retq + %vec = load <4 x i64>, <4 x i64>* %vp + %shuf = shufflevector <4 x i64> %vec, <4 x i64> undef, <4 x i32> + %res = select <4 x i1> , <4 x i64> %shuf, <4 x i64> zeroinitializer + ret <4 x i64> %res +} + +define <8 x i64> @test_8xi64_perm_mask0(<8 x i64> %vec) { +; CHECK-LABEL: test_8xi64_perm_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: vmovdqa64 {{.*#+}} zmm1 = [0,4,7,6,5,5,1,6] +; CHECK-NEXT: vpermq %zmm0, %zmm1, %zmm0 +; CHECK-NEXT: retq + %res = shufflevector <8 x i64> %vec, <8 x i64> undef, <8 x i32> + ret <8 x i64> %res +} +define <8 x i64> @test_masked_8xi64_perm_mask0(<8 x i64> %vec, <8 x i64> %vec2) { +; CHECK-LABEL: test_masked_8xi64_perm_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: vmovdqa64 {{.*#+}} zmm2 = [0,4,7,6,5,5,1,6] +; CHECK-NEXT: movb $3, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermq %zmm0, %zmm2, %zmm1 {%k1} +; CHECK-NEXT: vmovdqa64 %zmm1, %zmm0 +; CHECK-NEXT: retq + %shuf = shufflevector <8 x i64> %vec, <8 x i64> undef, <8 x i32> + %res = select <8 x i1> , <8 x i64> %shuf, <8 x i64> %vec2 + ret <8 x i64> %res +} + +define <8 x i64> @test_masked_z_8xi64_perm_mask0(<8 x i64> %vec) { +; CHECK-LABEL: test_masked_z_8xi64_perm_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: vmovdqa64 {{.*#+}} zmm1 = [0,4,7,6,5,5,1,6] +; CHECK-NEXT: movb $3, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermq %zmm0, %zmm1, %zmm0 {%k1} {z} +; CHECK-NEXT: retq + %shuf = shufflevector <8 x i64> %vec, <8 x i64> undef, <8 x i32> + %res = select <8 x i1> , <8 x i64> %shuf, <8 x i64> zeroinitializer + ret <8 x i64> %res +} +define <8 x i64> @test_masked_8xi64_perm_imm_mask1(<8 x i64> %vec, <8 x i64> %vec2) { +; CHECK-LABEL: test_masked_8xi64_perm_imm_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: movb $-122, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermq {{.*#+}} zmm1 {%k1} = zmm0[1,0,1,1,5,4,5,5] +; CHECK-NEXT: vmovdqa64 %zmm1, %zmm0 +; CHECK-NEXT: retq + %shuf = shufflevector <8 x i64> %vec, <8 x i64> undef, <8 x i32> + %res = select <8 x i1> , <8 x i64> %shuf, <8 x i64> %vec2 + ret <8 x i64> %res +} + +define <8 x i64> @test_masked_z_8xi64_perm_imm_mask1(<8 x i64> %vec) { +; CHECK-LABEL: test_masked_z_8xi64_perm_imm_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: movb $-122, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermq {{.*#+}} zmm0 {%k1} {z} = zmm0[1,0,1,1,5,4,5,5] +; CHECK-NEXT: retq + %shuf = shufflevector <8 x i64> %vec, <8 x i64> undef, <8 x i32> + %res = select <8 x i1> , <8 x i64> %shuf, <8 x i64> zeroinitializer + ret <8 x i64> %res +} +define <8 x i64> @test_masked_8xi64_perm_mask2(<8 x i64> %vec, <8 x i64> %vec2) { +; CHECK-LABEL: test_masked_8xi64_perm_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: vmovdqa64 {{.*#+}} zmm2 = [1,3,7,3,3,5,4,1] +; CHECK-NEXT: movb $17, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermq %zmm0, %zmm2, %zmm1 {%k1} +; CHECK-NEXT: vmovdqa64 %zmm1, %zmm0 +; CHECK-NEXT: retq + %shuf = shufflevector <8 x i64> %vec, <8 x i64> undef, <8 x i32> + %res = select <8 x i1> , <8 x i64> %shuf, <8 x i64> %vec2 + ret <8 x i64> %res +} + +define <8 x i64> @test_masked_z_8xi64_perm_mask2(<8 x i64> %vec) { +; CHECK-LABEL: test_masked_z_8xi64_perm_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: vmovdqa64 {{.*#+}} zmm1 = [1,3,7,3,3,5,4,1] +; CHECK-NEXT: movb $17, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermq %zmm0, %zmm1, %zmm0 {%k1} {z} +; CHECK-NEXT: retq + %shuf = shufflevector <8 x i64> %vec, <8 x i64> undef, <8 x i32> + %res = select <8 x i1> , <8 x i64> %shuf, <8 x i64> zeroinitializer + ret <8 x i64> %res +} +define <8 x i64> @test_8xi64_perm_imm_mask3(<8 x i64> %vec) { +; CHECK-LABEL: test_8xi64_perm_imm_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: vpermq {{.*#+}} zmm0 = zmm0[3,1,3,1,7,5,7,5] +; CHECK-NEXT: retq + %res = shufflevector <8 x i64> %vec, <8 x i64> undef, <8 x i32> + ret <8 x i64> %res +} +define <8 x i64> @test_masked_8xi64_perm_imm_mask3(<8 x i64> %vec, <8 x i64> %vec2) { +; CHECK-LABEL: test_masked_8xi64_perm_imm_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: movb $-35, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermq {{.*#+}} zmm1 {%k1} = zmm0[3,1,3,1,7,5,7,5] +; CHECK-NEXT: vmovdqa64 %zmm1, %zmm0 +; CHECK-NEXT: retq + %shuf = shufflevector <8 x i64> %vec, <8 x i64> undef, <8 x i32> + %res = select <8 x i1> , <8 x i64> %shuf, <8 x i64> %vec2 + ret <8 x i64> %res +} + +define <8 x i64> @test_masked_z_8xi64_perm_imm_mask3(<8 x i64> %vec) { +; CHECK-LABEL: test_masked_z_8xi64_perm_imm_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: movb $-35, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermq {{.*#+}} zmm0 {%k1} {z} = zmm0[3,1,3,1,7,5,7,5] +; CHECK-NEXT: retq + %shuf = shufflevector <8 x i64> %vec, <8 x i64> undef, <8 x i32> + %res = select <8 x i1> , <8 x i64> %shuf, <8 x i64> zeroinitializer + ret <8 x i64> %res +} +define <8 x i64> @test_masked_8xi64_perm_mask4(<8 x i64> %vec, <8 x i64> %vec2) { +; CHECK-LABEL: test_masked_8xi64_perm_mask4: +; CHECK: # BB#0: +; CHECK-NEXT: vmovdqa64 {{.*#+}} zmm2 = [6,3,1,1,7,4,0,3] +; CHECK-NEXT: movb $-81, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermq %zmm0, %zmm2, %zmm1 {%k1} +; CHECK-NEXT: vmovdqa64 %zmm1, %zmm0 +; CHECK-NEXT: retq + %shuf = shufflevector <8 x i64> %vec, <8 x i64> undef, <8 x i32> + %res = select <8 x i1> , <8 x i64> %shuf, <8 x i64> %vec2 + ret <8 x i64> %res +} + +define <8 x i64> @test_masked_z_8xi64_perm_mask4(<8 x i64> %vec) { +; CHECK-LABEL: test_masked_z_8xi64_perm_mask4: +; CHECK: # BB#0: +; CHECK-NEXT: vmovdqa64 {{.*#+}} zmm1 = [6,3,1,1,7,4,0,3] +; CHECK-NEXT: movb $-81, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermq %zmm0, %zmm1, %zmm0 {%k1} {z} +; CHECK-NEXT: retq + %shuf = shufflevector <8 x i64> %vec, <8 x i64> undef, <8 x i32> + %res = select <8 x i1> , <8 x i64> %shuf, <8 x i64> zeroinitializer + ret <8 x i64> %res +} +define <8 x i64> @test_masked_8xi64_perm_imm_mask5(<8 x i64> %vec, <8 x i64> %vec2) { +; CHECK-LABEL: test_masked_8xi64_perm_imm_mask5: +; CHECK: # BB#0: +; CHECK-NEXT: movb $-67, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermq {{.*#+}} zmm1 {%k1} = zmm0[0,0,0,0,4,4,4,4] +; CHECK-NEXT: vmovdqa64 %zmm1, %zmm0 +; CHECK-NEXT: retq + %shuf = shufflevector <8 x i64> %vec, <8 x i64> undef, <8 x i32> + %res = select <8 x i1> , <8 x i64> %shuf, <8 x i64> %vec2 + ret <8 x i64> %res +} + +define <8 x i64> @test_masked_z_8xi64_perm_imm_mask5(<8 x i64> %vec) { +; CHECK-LABEL: test_masked_z_8xi64_perm_imm_mask5: +; CHECK: # BB#0: +; CHECK-NEXT: movb $-67, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermq {{.*#+}} zmm0 {%k1} {z} = zmm0[0,0,0,0,4,4,4,4] +; CHECK-NEXT: retq + %shuf = shufflevector <8 x i64> %vec, <8 x i64> undef, <8 x i32> + %res = select <8 x i1> , <8 x i64> %shuf, <8 x i64> zeroinitializer + ret <8 x i64> %res +} +define <8 x i64> @test_8xi64_perm_mask6(<8 x i64> %vec) { +; CHECK-LABEL: test_8xi64_perm_mask6: +; CHECK: # BB#0: +; CHECK-NEXT: vmovdqa64 {{.*#+}} zmm1 = [5,1,4,4,5,4,2,7] +; CHECK-NEXT: vpermq %zmm0, %zmm1, %zmm0 +; CHECK-NEXT: retq + %res = shufflevector <8 x i64> %vec, <8 x i64> undef, <8 x i32> + ret <8 x i64> %res +} +define <8 x i64> @test_masked_8xi64_perm_mask6(<8 x i64> %vec, <8 x i64> %vec2) { +; CHECK-LABEL: test_masked_8xi64_perm_mask6: +; CHECK: # BB#0: +; CHECK-NEXT: vmovdqa64 {{.*#+}} zmm2 = [5,1,4,4,5,4,2,7] +; CHECK-NEXT: movb $-86, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermq %zmm0, %zmm2, %zmm1 {%k1} +; CHECK-NEXT: vmovdqa64 %zmm1, %zmm0 +; CHECK-NEXT: retq + %shuf = shufflevector <8 x i64> %vec, <8 x i64> undef, <8 x i32> + %res = select <8 x i1> , <8 x i64> %shuf, <8 x i64> %vec2 + ret <8 x i64> %res +} + +define <8 x i64> @test_masked_z_8xi64_perm_mask6(<8 x i64> %vec) { +; CHECK-LABEL: test_masked_z_8xi64_perm_mask6: +; CHECK: # BB#0: +; CHECK-NEXT: vmovdqa64 {{.*#+}} zmm1 = [5,1,4,4,5,4,2,7] +; CHECK-NEXT: movb $-86, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermq %zmm0, %zmm1, %zmm0 {%k1} {z} +; CHECK-NEXT: retq + %shuf = shufflevector <8 x i64> %vec, <8 x i64> undef, <8 x i32> + %res = select <8 x i1> , <8 x i64> %shuf, <8 x i64> zeroinitializer + ret <8 x i64> %res +} +define <8 x i64> @test_masked_8xi64_perm_imm_mask7(<8 x i64> %vec, <8 x i64> %vec2) { +; CHECK-LABEL: test_masked_8xi64_perm_imm_mask7: +; CHECK: # BB#0: +; CHECK-NEXT: movb $2, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermq {{.*#+}} zmm1 {%k1} = zmm0[3,3,3,3,7,7,7,7] +; CHECK-NEXT: vmovdqa64 %zmm1, %zmm0 +; CHECK-NEXT: retq + %shuf = shufflevector <8 x i64> %vec, <8 x i64> undef, <8 x i32> + %res = select <8 x i1> , <8 x i64> %shuf, <8 x i64> %vec2 + ret <8 x i64> %res +} + +define <8 x i64> @test_masked_z_8xi64_perm_imm_mask7(<8 x i64> %vec) { +; CHECK-LABEL: test_masked_z_8xi64_perm_imm_mask7: +; CHECK: # BB#0: +; CHECK-NEXT: movb $2, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermq {{.*#+}} zmm0 {%k1} {z} = zmm0[3,3,3,3,7,7,7,7] +; CHECK-NEXT: retq + %shuf = shufflevector <8 x i64> %vec, <8 x i64> undef, <8 x i32> + %res = select <8 x i1> , <8 x i64> %shuf, <8 x i64> zeroinitializer + ret <8 x i64> %res +} +define <8 x i64> @test_8xi64_perm_mem_mask0(<8 x i64>* %vp) { +; CHECK-LABEL: test_8xi64_perm_mem_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: vmovdqa64 {{.*#+}} zmm0 = [5,1,6,5,7,3,7,3] +; CHECK-NEXT: vpermq (%rdi), %zmm0, %zmm0 +; CHECK-NEXT: retq + %vec = load <8 x i64>, <8 x i64>* %vp + %res = shufflevector <8 x i64> %vec, <8 x i64> undef, <8 x i32> + ret <8 x i64> %res +} +define <8 x i64> @test_masked_8xi64_perm_mem_mask0(<8 x i64>* %vp, <8 x i64> %vec2) { +; CHECK-LABEL: test_masked_8xi64_perm_mem_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: vmovdqa64 {{.*#+}} zmm1 = [5,1,6,5,7,3,7,3] +; CHECK-NEXT: movb $-108, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermq (%rdi), %zmm1, %zmm0 {%k1} +; CHECK-NEXT: retq + %vec = load <8 x i64>, <8 x i64>* %vp + %shuf = shufflevector <8 x i64> %vec, <8 x i64> undef, <8 x i32> + %res = select <8 x i1> , <8 x i64> %shuf, <8 x i64> %vec2 + ret <8 x i64> %res +} + +define <8 x i64> @test_masked_z_8xi64_perm_mem_mask0(<8 x i64>* %vp) { +; CHECK-LABEL: test_masked_z_8xi64_perm_mem_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: vmovdqa64 {{.*#+}} zmm0 = [5,1,6,5,7,3,7,3] +; CHECK-NEXT: movb $-108, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermq (%rdi), %zmm0, %zmm0 {%k1} {z} +; CHECK-NEXT: retq + %vec = load <8 x i64>, <8 x i64>* %vp + %shuf = shufflevector <8 x i64> %vec, <8 x i64> undef, <8 x i32> + %res = select <8 x i1> , <8 x i64> %shuf, <8 x i64> zeroinitializer + ret <8 x i64> %res +} + +define <8 x i64> @test_masked_8xi64_perm_imm_mem_mask1(<8 x i64>* %vp, <8 x i64> %vec2) { +; CHECK-LABEL: test_masked_8xi64_perm_imm_mem_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: movb $125, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermq {{.*#+}} zmm0 {%k1} = mem[1,1,1,0,5,5,5,4] +; CHECK-NEXT: retq + %vec = load <8 x i64>, <8 x i64>* %vp + %shuf = shufflevector <8 x i64> %vec, <8 x i64> undef, <8 x i32> + %res = select <8 x i1> , <8 x i64> %shuf, <8 x i64> %vec2 + ret <8 x i64> %res +} + +define <8 x i64> @test_masked_z_8xi64_perm_imm_mem_mask1(<8 x i64>* %vp) { +; CHECK-LABEL: test_masked_z_8xi64_perm_imm_mem_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: movb $125, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermq {{.*#+}} zmm0 {%k1} {z} = mem[1,1,1,0,5,5,5,4] +; CHECK-NEXT: retq + %vec = load <8 x i64>, <8 x i64>* %vp + %shuf = shufflevector <8 x i64> %vec, <8 x i64> undef, <8 x i32> + %res = select <8 x i1> , <8 x i64> %shuf, <8 x i64> zeroinitializer + ret <8 x i64> %res +} + +define <8 x i64> @test_masked_8xi64_perm_mem_mask2(<8 x i64>* %vp, <8 x i64> %vec2) { +; CHECK-LABEL: test_masked_8xi64_perm_mem_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: vmovdqa64 {{.*#+}} zmm1 = [0,2,1,4,1,1,5,5] +; CHECK-NEXT: movb $-77, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermq (%rdi), %zmm1, %zmm0 {%k1} +; CHECK-NEXT: retq + %vec = load <8 x i64>, <8 x i64>* %vp + %shuf = shufflevector <8 x i64> %vec, <8 x i64> undef, <8 x i32> + %res = select <8 x i1> , <8 x i64> %shuf, <8 x i64> %vec2 + ret <8 x i64> %res +} + +define <8 x i64> @test_masked_z_8xi64_perm_mem_mask2(<8 x i64>* %vp) { +; CHECK-LABEL: test_masked_z_8xi64_perm_mem_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: vmovdqa64 {{.*#+}} zmm0 = [0,2,1,4,1,1,5,5] +; CHECK-NEXT: movb $-77, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermq (%rdi), %zmm0, %zmm0 {%k1} {z} +; CHECK-NEXT: retq + %vec = load <8 x i64>, <8 x i64>* %vp + %shuf = shufflevector <8 x i64> %vec, <8 x i64> undef, <8 x i32> + %res = select <8 x i1> , <8 x i64> %shuf, <8 x i64> zeroinitializer + ret <8 x i64> %res +} + +define <8 x i64> @test_8xi64_perm_imm_mem_mask3(<8 x i64>* %vp) { +; CHECK-LABEL: test_8xi64_perm_imm_mem_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: vpermq {{.*#+}} zmm0 = mem[1,3,1,1,5,7,5,5] +; CHECK-NEXT: retq + %vec = load <8 x i64>, <8 x i64>* %vp + %res = shufflevector <8 x i64> %vec, <8 x i64> undef, <8 x i32> + ret <8 x i64> %res +} +define <8 x i64> @test_masked_8xi64_perm_imm_mem_mask3(<8 x i64>* %vp, <8 x i64> %vec2) { +; CHECK-LABEL: test_masked_8xi64_perm_imm_mem_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: movb $55, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermq {{.*#+}} zmm0 {%k1} = mem[1,3,1,1,5,7,5,5] +; CHECK-NEXT: retq + %vec = load <8 x i64>, <8 x i64>* %vp + %shuf = shufflevector <8 x i64> %vec, <8 x i64> undef, <8 x i32> + %res = select <8 x i1> , <8 x i64> %shuf, <8 x i64> %vec2 + ret <8 x i64> %res +} + +define <8 x i64> @test_masked_z_8xi64_perm_imm_mem_mask3(<8 x i64>* %vp) { +; CHECK-LABEL: test_masked_z_8xi64_perm_imm_mem_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: movb $55, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermq {{.*#+}} zmm0 {%k1} {z} = mem[1,3,1,1,5,7,5,5] +; CHECK-NEXT: retq + %vec = load <8 x i64>, <8 x i64>* %vp + %shuf = shufflevector <8 x i64> %vec, <8 x i64> undef, <8 x i32> + %res = select <8 x i1> , <8 x i64> %shuf, <8 x i64> zeroinitializer + ret <8 x i64> %res +} + +define <8 x i64> @test_masked_8xi64_perm_mem_mask4(<8 x i64>* %vp, <8 x i64> %vec2) { +; CHECK-LABEL: test_masked_8xi64_perm_mem_mask4: +; CHECK: # BB#0: +; CHECK-NEXT: vmovdqa64 {{.*#+}} zmm1 = [5,0,7,0,3,5,0,6] +; CHECK-NEXT: movb $68, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermq (%rdi), %zmm1, %zmm0 {%k1} +; CHECK-NEXT: retq + %vec = load <8 x i64>, <8 x i64>* %vp + %shuf = shufflevector <8 x i64> %vec, <8 x i64> undef, <8 x i32> + %res = select <8 x i1> , <8 x i64> %shuf, <8 x i64> %vec2 + ret <8 x i64> %res +} + +define <8 x i64> @test_masked_z_8xi64_perm_mem_mask4(<8 x i64>* %vp) { +; CHECK-LABEL: test_masked_z_8xi64_perm_mem_mask4: +; CHECK: # BB#0: +; CHECK-NEXT: vmovdqa64 {{.*#+}} zmm0 = [5,0,7,0,3,5,0,6] +; CHECK-NEXT: movb $68, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermq (%rdi), %zmm0, %zmm0 {%k1} {z} +; CHECK-NEXT: retq + %vec = load <8 x i64>, <8 x i64>* %vp + %shuf = shufflevector <8 x i64> %vec, <8 x i64> undef, <8 x i32> + %res = select <8 x i1> , <8 x i64> %shuf, <8 x i64> zeroinitializer + ret <8 x i64> %res +} + +define <8 x i64> @test_masked_8xi64_perm_imm_mem_mask5(<8 x i64>* %vp, <8 x i64> %vec2) { +; CHECK-LABEL: test_masked_8xi64_perm_imm_mem_mask5: +; CHECK: # BB#0: +; CHECK-NEXT: movb $12, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermq {{.*#+}} zmm0 {%k1} = mem[3,1,0,0,7,5,4,4] +; CHECK-NEXT: retq + %vec = load <8 x i64>, <8 x i64>* %vp + %shuf = shufflevector <8 x i64> %vec, <8 x i64> undef, <8 x i32> + %res = select <8 x i1> , <8 x i64> %shuf, <8 x i64> %vec2 + ret <8 x i64> %res +} + +define <8 x i64> @test_masked_z_8xi64_perm_imm_mem_mask5(<8 x i64>* %vp) { +; CHECK-LABEL: test_masked_z_8xi64_perm_imm_mem_mask5: +; CHECK: # BB#0: +; CHECK-NEXT: movb $12, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermq {{.*#+}} zmm0 {%k1} {z} = mem[3,1,0,0,7,5,4,4] +; CHECK-NEXT: retq + %vec = load <8 x i64>, <8 x i64>* %vp + %shuf = shufflevector <8 x i64> %vec, <8 x i64> undef, <8 x i32> + %res = select <8 x i1> , <8 x i64> %shuf, <8 x i64> zeroinitializer + ret <8 x i64> %res +} + +define <8 x i64> @test_8xi64_perm_mem_mask6(<8 x i64>* %vp) { +; CHECK-LABEL: test_8xi64_perm_mem_mask6: +; CHECK: # BB#0: +; CHECK-NEXT: vmovdqa64 {{.*#+}} zmm0 = [0,6,3,7,3,0,3,6] +; CHECK-NEXT: vpermq (%rdi), %zmm0, %zmm0 +; CHECK-NEXT: retq + %vec = load <8 x i64>, <8 x i64>* %vp + %res = shufflevector <8 x i64> %vec, <8 x i64> undef, <8 x i32> + ret <8 x i64> %res +} +define <8 x i64> @test_masked_8xi64_perm_mem_mask6(<8 x i64>* %vp, <8 x i64> %vec2) { +; CHECK-LABEL: test_masked_8xi64_perm_mem_mask6: +; CHECK: # BB#0: +; CHECK-NEXT: vmovdqa64 {{.*#+}} zmm1 = [0,6,3,7,3,0,3,6] +; CHECK-NEXT: movb $42, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermq (%rdi), %zmm1, %zmm0 {%k1} +; CHECK-NEXT: retq + %vec = load <8 x i64>, <8 x i64>* %vp + %shuf = shufflevector <8 x i64> %vec, <8 x i64> undef, <8 x i32> + %res = select <8 x i1> , <8 x i64> %shuf, <8 x i64> %vec2 + ret <8 x i64> %res +} + +define <8 x i64> @test_masked_z_8xi64_perm_mem_mask6(<8 x i64>* %vp) { +; CHECK-LABEL: test_masked_z_8xi64_perm_mem_mask6: +; CHECK: # BB#0: +; CHECK-NEXT: vmovdqa64 {{.*#+}} zmm0 = [0,6,3,7,3,0,3,6] +; CHECK-NEXT: movb $42, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermq (%rdi), %zmm0, %zmm0 {%k1} {z} +; CHECK-NEXT: retq + %vec = load <8 x i64>, <8 x i64>* %vp + %shuf = shufflevector <8 x i64> %vec, <8 x i64> undef, <8 x i32> + %res = select <8 x i1> , <8 x i64> %shuf, <8 x i64> zeroinitializer + ret <8 x i64> %res +} + +define <8 x i64> @test_masked_8xi64_perm_imm_mem_mask7(<8 x i64>* %vp, <8 x i64> %vec2) { +; CHECK-LABEL: test_masked_8xi64_perm_imm_mem_mask7: +; CHECK: # BB#0: +; CHECK-NEXT: movb $1, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermq {{.*#+}} zmm0 {%k1} = mem[3,0,0,1,7,4,4,5] +; CHECK-NEXT: retq + %vec = load <8 x i64>, <8 x i64>* %vp + %shuf = shufflevector <8 x i64> %vec, <8 x i64> undef, <8 x i32> + %res = select <8 x i1> , <8 x i64> %shuf, <8 x i64> %vec2 + ret <8 x i64> %res +} + +define <8 x i64> @test_masked_z_8xi64_perm_imm_mem_mask7(<8 x i64>* %vp) { +; CHECK-LABEL: test_masked_z_8xi64_perm_imm_mem_mask7: +; CHECK: # BB#0: +; CHECK-NEXT: movb $1, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermq {{.*#+}} zmm0 {%k1} {z} = mem[3,0,0,1,7,4,4,5] +; CHECK-NEXT: retq + %vec = load <8 x i64>, <8 x i64>* %vp + %shuf = shufflevector <8 x i64> %vec, <8 x i64> undef, <8 x i32> + %res = select <8 x i1> , <8 x i64> %shuf, <8 x i64> zeroinitializer + ret <8 x i64> %res +} + +define <8 x float> @test_8xfloat_perm_mask0(<8 x float> %vec) { +; CHECK-LABEL: test_8xfloat_perm_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: vmovaps {{.*#+}} ymm1 = [3,4,2,4,1,2,3,4] +; CHECK-NEXT: vpermps %ymm0, %ymm1, %ymm0 +; CHECK-NEXT: retq + %res = shufflevector <8 x float> %vec, <8 x float> undef, <8 x i32> + ret <8 x float> %res +} +define <8 x float> @test_masked_8xfloat_perm_mask0(<8 x float> %vec, <8 x float> %vec2) { +; CHECK-LABEL: test_masked_8xfloat_perm_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: vmovaps {{.*#+}} ymm2 = [3,4,2,4,1,2,3,4] +; CHECK-NEXT: movb $33, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermps %ymm0, %ymm2, %ymm1 {%k1} +; CHECK-NEXT: vmovaps %ymm1, %ymm0 +; CHECK-NEXT: retq + %shuf = shufflevector <8 x float> %vec, <8 x float> undef, <8 x i32> + %res = select <8 x i1> , <8 x float> %shuf, <8 x float> %vec2 + ret <8 x float> %res +} + +define <8 x float> @test_masked_z_8xfloat_perm_mask0(<8 x float> %vec) { +; CHECK-LABEL: test_masked_z_8xfloat_perm_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: vmovaps {{.*#+}} ymm1 = [3,4,2,4,1,2,3,4] +; CHECK-NEXT: movb $33, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermps %ymm0, %ymm1, %ymm0 {%k1} {z} +; CHECK-NEXT: retq + %shuf = shufflevector <8 x float> %vec, <8 x float> undef, <8 x i32> + %res = select <8 x i1> , <8 x float> %shuf, <8 x float> zeroinitializer + ret <8 x float> %res +} +define <8 x float> @test_masked_8xfloat_perm_mask1(<8 x float> %vec, <8 x float> %vec2) { +; CHECK-LABEL: test_masked_8xfloat_perm_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: vmovaps {{.*#+}} ymm2 = [4,2,1,0,6,0,5,1] +; CHECK-NEXT: movb $-34, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermps %ymm0, %ymm2, %ymm1 {%k1} +; CHECK-NEXT: vmovaps %ymm1, %ymm0 +; CHECK-NEXT: retq + %shuf = shufflevector <8 x float> %vec, <8 x float> undef, <8 x i32> + %res = select <8 x i1> , <8 x float> %shuf, <8 x float> %vec2 + ret <8 x float> %res +} + +define <8 x float> @test_masked_z_8xfloat_perm_mask1(<8 x float> %vec) { +; CHECK-LABEL: test_masked_z_8xfloat_perm_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: vmovaps {{.*#+}} ymm1 = [4,2,1,0,6,0,5,1] +; CHECK-NEXT: movb $-34, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermps %ymm0, %ymm1, %ymm0 {%k1} {z} +; CHECK-NEXT: retq + %shuf = shufflevector <8 x float> %vec, <8 x float> undef, <8 x i32> + %res = select <8 x i1> , <8 x float> %shuf, <8 x float> zeroinitializer + ret <8 x float> %res +} +define <8 x float> @test_masked_8xfloat_perm_mask2(<8 x float> %vec, <8 x float> %vec2) { +; CHECK-LABEL: test_masked_8xfloat_perm_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: vmovaps {{.*#+}} ymm2 = [2,5,5,5,4,6,0,5] +; CHECK-NEXT: movb $-18, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermps %ymm0, %ymm2, %ymm1 {%k1} +; CHECK-NEXT: vmovaps %ymm1, %ymm0 +; CHECK-NEXT: retq + %shuf = shufflevector <8 x float> %vec, <8 x float> undef, <8 x i32> + %res = select <8 x i1> , <8 x float> %shuf, <8 x float> %vec2 + ret <8 x float> %res +} + +define <8 x float> @test_masked_z_8xfloat_perm_mask2(<8 x float> %vec) { +; CHECK-LABEL: test_masked_z_8xfloat_perm_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: vmovaps {{.*#+}} ymm1 = [2,5,5,5,4,6,0,5] +; CHECK-NEXT: movb $-18, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermps %ymm0, %ymm1, %ymm0 {%k1} {z} +; CHECK-NEXT: retq + %shuf = shufflevector <8 x float> %vec, <8 x float> undef, <8 x i32> + %res = select <8 x i1> , <8 x float> %shuf, <8 x float> zeroinitializer + ret <8 x float> %res +} +define <8 x float> @test_8xfloat_perm_mask3(<8 x float> %vec) { +; CHECK-LABEL: test_8xfloat_perm_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: vmovaps {{.*#+}} ymm1 = [0,5,2,5,5,5,1,6] +; CHECK-NEXT: vpermps %ymm0, %ymm1, %ymm0 +; CHECK-NEXT: retq + %res = shufflevector <8 x float> %vec, <8 x float> undef, <8 x i32> + ret <8 x float> %res +} +define <8 x float> @test_masked_8xfloat_perm_mask3(<8 x float> %vec, <8 x float> %vec2) { +; CHECK-LABEL: test_masked_8xfloat_perm_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: vmovaps {{.*#+}} ymm2 = [0,5,2,5,5,5,1,6] +; CHECK-NEXT: movb $82, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermps %ymm0, %ymm2, %ymm1 {%k1} +; CHECK-NEXT: vmovaps %ymm1, %ymm0 +; CHECK-NEXT: retq + %shuf = shufflevector <8 x float> %vec, <8 x float> undef, <8 x i32> + %res = select <8 x i1> , <8 x float> %shuf, <8 x float> %vec2 + ret <8 x float> %res +} + +define <8 x float> @test_masked_z_8xfloat_perm_mask3(<8 x float> %vec) { +; CHECK-LABEL: test_masked_z_8xfloat_perm_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: vmovaps {{.*#+}} ymm1 = [0,5,2,5,5,5,1,6] +; CHECK-NEXT: movb $82, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermps %ymm0, %ymm1, %ymm0 {%k1} {z} +; CHECK-NEXT: retq + %shuf = shufflevector <8 x float> %vec, <8 x float> undef, <8 x i32> + %res = select <8 x i1> , <8 x float> %shuf, <8 x float> zeroinitializer + ret <8 x float> %res +} +define <8 x float> @test_8xfloat_perm_mem_mask0(<8 x float>* %vp) { +; CHECK-LABEL: test_8xfloat_perm_mem_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: vmovaps {{.*#+}} ymm0 = [5,2,1,6,4,2,4,0] +; CHECK-NEXT: vpermps (%rdi), %ymm0, %ymm0 +; CHECK-NEXT: retq + %vec = load <8 x float>, <8 x float>* %vp + %res = shufflevector <8 x float> %vec, <8 x float> undef, <8 x i32> + ret <8 x float> %res +} +define <8 x float> @test_masked_8xfloat_perm_mem_mask0(<8 x float>* %vp, <8 x float> %vec2) { +; CHECK-LABEL: test_masked_8xfloat_perm_mem_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: vmovaps {{.*#+}} ymm1 = [5,2,1,6,4,2,4,0] +; CHECK-NEXT: movb $61, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermps (%rdi), %ymm1, %ymm0 {%k1} +; CHECK-NEXT: retq + %vec = load <8 x float>, <8 x float>* %vp + %shuf = shufflevector <8 x float> %vec, <8 x float> undef, <8 x i32> + %res = select <8 x i1> , <8 x float> %shuf, <8 x float> %vec2 + ret <8 x float> %res +} + +define <8 x float> @test_masked_z_8xfloat_perm_mem_mask0(<8 x float>* %vp) { +; CHECK-LABEL: test_masked_z_8xfloat_perm_mem_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: vmovaps {{.*#+}} ymm0 = [5,2,1,6,4,2,4,0] +; CHECK-NEXT: movb $61, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermps (%rdi), %ymm0, %ymm0 {%k1} {z} +; CHECK-NEXT: retq + %vec = load <8 x float>, <8 x float>* %vp + %shuf = shufflevector <8 x float> %vec, <8 x float> undef, <8 x i32> + %res = select <8 x i1> , <8 x float> %shuf, <8 x float> zeroinitializer + ret <8 x float> %res +} + +define <8 x float> @test_masked_8xfloat_perm_mem_mask1(<8 x float>* %vp, <8 x float> %vec2) { +; CHECK-LABEL: test_masked_8xfloat_perm_mem_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: vmovaps {{.*#+}} ymm1 = [1,3,7,4,0,6,6,6] +; CHECK-NEXT: movb $-124, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermps (%rdi), %ymm1, %ymm0 {%k1} +; CHECK-NEXT: retq + %vec = load <8 x float>, <8 x float>* %vp + %shuf = shufflevector <8 x float> %vec, <8 x float> undef, <8 x i32> + %res = select <8 x i1> , <8 x float> %shuf, <8 x float> %vec2 + ret <8 x float> %res +} + +define <8 x float> @test_masked_z_8xfloat_perm_mem_mask1(<8 x float>* %vp) { +; CHECK-LABEL: test_masked_z_8xfloat_perm_mem_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: vmovaps {{.*#+}} ymm0 = [1,3,7,4,0,6,6,6] +; CHECK-NEXT: movb $-124, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermps (%rdi), %ymm0, %ymm0 {%k1} {z} +; CHECK-NEXT: retq + %vec = load <8 x float>, <8 x float>* %vp + %shuf = shufflevector <8 x float> %vec, <8 x float> undef, <8 x i32> + %res = select <8 x i1> , <8 x float> %shuf, <8 x float> zeroinitializer + ret <8 x float> %res +} + +define <8 x float> @test_masked_8xfloat_perm_mem_mask2(<8 x float>* %vp, <8 x float> %vec2) { +; CHECK-LABEL: test_masked_8xfloat_perm_mem_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: vmovaps {{.*#+}} ymm1 = [4,5,1,5,6,6,2,4] +; CHECK-NEXT: movb $-84, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermps (%rdi), %ymm1, %ymm0 {%k1} +; CHECK-NEXT: retq + %vec = load <8 x float>, <8 x float>* %vp + %shuf = shufflevector <8 x float> %vec, <8 x float> undef, <8 x i32> + %res = select <8 x i1> , <8 x float> %shuf, <8 x float> %vec2 + ret <8 x float> %res +} + +define <8 x float> @test_masked_z_8xfloat_perm_mem_mask2(<8 x float>* %vp) { +; CHECK-LABEL: test_masked_z_8xfloat_perm_mem_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: vmovaps {{.*#+}} ymm0 = [4,5,1,5,6,6,2,4] +; CHECK-NEXT: movb $-84, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermps (%rdi), %ymm0, %ymm0 {%k1} {z} +; CHECK-NEXT: retq + %vec = load <8 x float>, <8 x float>* %vp + %shuf = shufflevector <8 x float> %vec, <8 x float> undef, <8 x i32> + %res = select <8 x i1> , <8 x float> %shuf, <8 x float> zeroinitializer + ret <8 x float> %res +} + +define <8 x float> @test_8xfloat_perm_mem_mask3(<8 x float>* %vp) { +; CHECK-LABEL: test_8xfloat_perm_mem_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: vmovaps {{.*#+}} ymm0 = [5,7,0,6,4,2,3,0] +; CHECK-NEXT: vpermps (%rdi), %ymm0, %ymm0 +; CHECK-NEXT: retq + %vec = load <8 x float>, <8 x float>* %vp + %res = shufflevector <8 x float> %vec, <8 x float> undef, <8 x i32> + ret <8 x float> %res +} +define <8 x float> @test_masked_8xfloat_perm_mem_mask3(<8 x float>* %vp, <8 x float> %vec2) { +; CHECK-LABEL: test_masked_8xfloat_perm_mem_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: vmovaps {{.*#+}} ymm1 = [5,7,0,6,4,2,3,0] +; CHECK-NEXT: movb $60, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermps (%rdi), %ymm1, %ymm0 {%k1} +; CHECK-NEXT: retq + %vec = load <8 x float>, <8 x float>* %vp + %shuf = shufflevector <8 x float> %vec, <8 x float> undef, <8 x i32> + %res = select <8 x i1> , <8 x float> %shuf, <8 x float> %vec2 + ret <8 x float> %res +} + +define <8 x float> @test_masked_z_8xfloat_perm_mem_mask3(<8 x float>* %vp) { +; CHECK-LABEL: test_masked_z_8xfloat_perm_mem_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: vmovaps {{.*#+}} ymm0 = [5,7,0,6,4,2,3,0] +; CHECK-NEXT: movb $60, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermps (%rdi), %ymm0, %ymm0 {%k1} {z} +; CHECK-NEXT: retq + %vec = load <8 x float>, <8 x float>* %vp + %shuf = shufflevector <8 x float> %vec, <8 x float> undef, <8 x i32> + %res = select <8 x i1> , <8 x float> %shuf, <8 x float> zeroinitializer + ret <8 x float> %res +} + +define <16 x float> @test_16xfloat_perm_mask0(<16 x float> %vec) { +; CHECK-LABEL: test_16xfloat_perm_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: vmovaps {{.*#+}} zmm1 = [15,7,5,13,4,9,11,13,12,6,0,0,11,15,5,7] +; CHECK-NEXT: vpermps %zmm0, %zmm1, %zmm0 +; CHECK-NEXT: retq + %res = shufflevector <16 x float> %vec, <16 x float> undef, <16 x i32> + ret <16 x float> %res +} +define <16 x float> @test_masked_16xfloat_perm_mask0(<16 x float> %vec, <16 x float> %vec2) { +; CHECK-LABEL: test_masked_16xfloat_perm_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: vmovaps {{.*#+}} zmm2 = [15,7,5,13,4,9,11,13,12,6,0,0,11,15,5,7] +; CHECK-NEXT: movw $14423, %ax # imm = 0x3857 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermps %zmm0, %zmm2, %zmm1 {%k1} +; CHECK-NEXT: vmovaps %zmm1, %zmm0 +; CHECK-NEXT: retq + %shuf = shufflevector <16 x float> %vec, <16 x float> undef, <16 x i32> + %res = select <16 x i1> , <16 x float> %shuf, <16 x float> %vec2 + ret <16 x float> %res +} + +define <16 x float> @test_masked_z_16xfloat_perm_mask0(<16 x float> %vec) { +; CHECK-LABEL: test_masked_z_16xfloat_perm_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: vmovaps {{.*#+}} zmm1 = [15,7,5,13,4,9,11,13,12,6,0,0,11,15,5,7] +; CHECK-NEXT: movw $14423, %ax # imm = 0x3857 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermps %zmm0, %zmm1, %zmm0 {%k1} {z} +; CHECK-NEXT: retq + %shuf = shufflevector <16 x float> %vec, <16 x float> undef, <16 x i32> + %res = select <16 x i1> , <16 x float> %shuf, <16 x float> zeroinitializer + ret <16 x float> %res +} +define <16 x float> @test_masked_16xfloat_perm_mask1(<16 x float> %vec, <16 x float> %vec2) { +; CHECK-LABEL: test_masked_16xfloat_perm_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: vmovaps {{.*#+}} zmm2 = [11,10,4,10,4,5,8,11,2,0,10,0,0,3,10,1] +; CHECK-NEXT: movw $-22757, %ax # imm = 0xA71B +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermps %zmm0, %zmm2, %zmm1 {%k1} +; CHECK-NEXT: vmovaps %zmm1, %zmm0 +; CHECK-NEXT: retq + %shuf = shufflevector <16 x float> %vec, <16 x float> undef, <16 x i32> + %res = select <16 x i1> , <16 x float> %shuf, <16 x float> %vec2 + ret <16 x float> %res +} + +define <16 x float> @test_masked_z_16xfloat_perm_mask1(<16 x float> %vec) { +; CHECK-LABEL: test_masked_z_16xfloat_perm_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: vmovaps {{.*#+}} zmm1 = [11,10,4,10,4,5,8,11,2,0,10,0,0,3,10,1] +; CHECK-NEXT: movw $-22757, %ax # imm = 0xA71B +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermps %zmm0, %zmm1, %zmm0 {%k1} {z} +; CHECK-NEXT: retq + %shuf = shufflevector <16 x float> %vec, <16 x float> undef, <16 x i32> + %res = select <16 x i1> , <16 x float> %shuf, <16 x float> zeroinitializer + ret <16 x float> %res +} +define <16 x float> @test_masked_16xfloat_perm_mask2(<16 x float> %vec, <16 x float> %vec2) { +; CHECK-LABEL: test_masked_16xfloat_perm_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: vmovaps {{.*#+}} zmm2 = [0,15,6,14,3,6,5,2,5,15,11,6,6,4,8,11] +; CHECK-NEXT: movw $-22227, %ax # imm = 0xA92D +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermps %zmm0, %zmm2, %zmm1 {%k1} +; CHECK-NEXT: vmovaps %zmm1, %zmm0 +; CHECK-NEXT: retq + %shuf = shufflevector <16 x float> %vec, <16 x float> undef, <16 x i32> + %res = select <16 x i1> , <16 x float> %shuf, <16 x float> %vec2 + ret <16 x float> %res +} + +define <16 x float> @test_masked_z_16xfloat_perm_mask2(<16 x float> %vec) { +; CHECK-LABEL: test_masked_z_16xfloat_perm_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: vmovaps {{.*#+}} zmm1 = [0,15,6,14,3,6,5,2,5,15,11,6,6,4,8,11] +; CHECK-NEXT: movw $-22227, %ax # imm = 0xA92D +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermps %zmm0, %zmm1, %zmm0 {%k1} {z} +; CHECK-NEXT: retq + %shuf = shufflevector <16 x float> %vec, <16 x float> undef, <16 x i32> + %res = select <16 x i1> , <16 x float> %shuf, <16 x float> zeroinitializer + ret <16 x float> %res +} +define <16 x float> @test_16xfloat_perm_mask3(<16 x float> %vec) { +; CHECK-LABEL: test_16xfloat_perm_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: vmovaps {{.*#+}} zmm1 = [10,7,0,14,6,6,0,2,13,8,11,2,5,13,13,3] +; CHECK-NEXT: vpermps %zmm0, %zmm1, %zmm0 +; CHECK-NEXT: retq + %res = shufflevector <16 x float> %vec, <16 x float> undef, <16 x i32> + ret <16 x float> %res +} +define <16 x float> @test_masked_16xfloat_perm_mask3(<16 x float> %vec, <16 x float> %vec2) { +; CHECK-LABEL: test_masked_16xfloat_perm_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: vmovaps {{.*#+}} zmm2 = [10,7,0,14,6,6,0,2,13,8,11,2,5,13,13,3] +; CHECK-NEXT: movw $32420, %ax # imm = 0x7EA4 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermps %zmm0, %zmm2, %zmm1 {%k1} +; CHECK-NEXT: vmovaps %zmm1, %zmm0 +; CHECK-NEXT: retq + %shuf = shufflevector <16 x float> %vec, <16 x float> undef, <16 x i32> + %res = select <16 x i1> , <16 x float> %shuf, <16 x float> %vec2 + ret <16 x float> %res +} + +define <16 x float> @test_masked_z_16xfloat_perm_mask3(<16 x float> %vec) { +; CHECK-LABEL: test_masked_z_16xfloat_perm_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: vmovaps {{.*#+}} zmm1 = [10,7,0,14,6,6,0,2,13,8,11,2,5,13,13,3] +; CHECK-NEXT: movw $32420, %ax # imm = 0x7EA4 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermps %zmm0, %zmm1, %zmm0 {%k1} {z} +; CHECK-NEXT: retq + %shuf = shufflevector <16 x float> %vec, <16 x float> undef, <16 x i32> + %res = select <16 x i1> , <16 x float> %shuf, <16 x float> zeroinitializer + ret <16 x float> %res +} +define <16 x float> @test_16xfloat_perm_mem_mask0(<16 x float>* %vp) { +; CHECK-LABEL: test_16xfloat_perm_mem_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: vmovaps {{.*#+}} zmm0 = [10,2,1,14,9,9,7,2,9,4,12,11,0,14,0,1] +; CHECK-NEXT: vpermps (%rdi), %zmm0, %zmm0 +; CHECK-NEXT: retq + %vec = load <16 x float>, <16 x float>* %vp + %res = shufflevector <16 x float> %vec, <16 x float> undef, <16 x i32> + ret <16 x float> %res +} +define <16 x float> @test_masked_16xfloat_perm_mem_mask0(<16 x float>* %vp, <16 x float> %vec2) { +; CHECK-LABEL: test_masked_16xfloat_perm_mem_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: vmovaps {{.*#+}} zmm1 = [10,2,1,14,9,9,7,2,9,4,12,11,0,14,0,1] +; CHECK-NEXT: movw $1441, %ax # imm = 0x5A1 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermps (%rdi), %zmm1, %zmm0 {%k1} +; CHECK-NEXT: retq + %vec = load <16 x float>, <16 x float>* %vp + %shuf = shufflevector <16 x float> %vec, <16 x float> undef, <16 x i32> + %res = select <16 x i1> , <16 x float> %shuf, <16 x float> %vec2 + ret <16 x float> %res +} + +define <16 x float> @test_masked_z_16xfloat_perm_mem_mask0(<16 x float>* %vp) { +; CHECK-LABEL: test_masked_z_16xfloat_perm_mem_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: vmovaps {{.*#+}} zmm0 = [10,2,1,14,9,9,7,2,9,4,12,11,0,14,0,1] +; CHECK-NEXT: movw $1441, %ax # imm = 0x5A1 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermps (%rdi), %zmm0, %zmm0 {%k1} {z} +; CHECK-NEXT: retq + %vec = load <16 x float>, <16 x float>* %vp + %shuf = shufflevector <16 x float> %vec, <16 x float> undef, <16 x i32> + %res = select <16 x i1> , <16 x float> %shuf, <16 x float> zeroinitializer + ret <16 x float> %res +} + +define <16 x float> @test_masked_16xfloat_perm_mem_mask1(<16 x float>* %vp, <16 x float> %vec2) { +; CHECK-LABEL: test_masked_16xfloat_perm_mem_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: vmovaps {{.*#+}} zmm1 = [4,2,3,5,11,6,4,7,6,4,14,8,15,12,9,4] +; CHECK-NEXT: movw $-12684, %ax # imm = 0xCE74 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermps (%rdi), %zmm1, %zmm0 {%k1} +; CHECK-NEXT: retq + %vec = load <16 x float>, <16 x float>* %vp + %shuf = shufflevector <16 x float> %vec, <16 x float> undef, <16 x i32> + %res = select <16 x i1> , <16 x float> %shuf, <16 x float> %vec2 + ret <16 x float> %res +} + +define <16 x float> @test_masked_z_16xfloat_perm_mem_mask1(<16 x float>* %vp) { +; CHECK-LABEL: test_masked_z_16xfloat_perm_mem_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: vmovaps {{.*#+}} zmm0 = [4,2,3,5,11,6,4,7,6,4,14,8,15,12,9,4] +; CHECK-NEXT: movw $-12684, %ax # imm = 0xCE74 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermps (%rdi), %zmm0, %zmm0 {%k1} {z} +; CHECK-NEXT: retq + %vec = load <16 x float>, <16 x float>* %vp + %shuf = shufflevector <16 x float> %vec, <16 x float> undef, <16 x i32> + %res = select <16 x i1> , <16 x float> %shuf, <16 x float> zeroinitializer + ret <16 x float> %res +} + +define <16 x float> @test_masked_16xfloat_perm_mem_mask2(<16 x float>* %vp, <16 x float> %vec2) { +; CHECK-LABEL: test_masked_16xfloat_perm_mem_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: vmovaps {{.*#+}} zmm1 = [10,7,11,6,7,0,11,0,10,9,12,4,10,3,8,5] +; CHECK-NEXT: movw $11066, %ax # imm = 0x2B3A +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermps (%rdi), %zmm1, %zmm0 {%k1} +; CHECK-NEXT: retq + %vec = load <16 x float>, <16 x float>* %vp + %shuf = shufflevector <16 x float> %vec, <16 x float> undef, <16 x i32> + %res = select <16 x i1> , <16 x float> %shuf, <16 x float> %vec2 + ret <16 x float> %res +} + +define <16 x float> @test_masked_z_16xfloat_perm_mem_mask2(<16 x float>* %vp) { +; CHECK-LABEL: test_masked_z_16xfloat_perm_mem_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: vmovaps {{.*#+}} zmm0 = [10,7,11,6,7,0,11,0,10,9,12,4,10,3,8,5] +; CHECK-NEXT: movw $11066, %ax # imm = 0x2B3A +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermps (%rdi), %zmm0, %zmm0 {%k1} {z} +; CHECK-NEXT: retq + %vec = load <16 x float>, <16 x float>* %vp + %shuf = shufflevector <16 x float> %vec, <16 x float> undef, <16 x i32> + %res = select <16 x i1> , <16 x float> %shuf, <16 x float> zeroinitializer + ret <16 x float> %res +} + +define <16 x float> @test_16xfloat_perm_mem_mask3(<16 x float>* %vp) { +; CHECK-LABEL: test_16xfloat_perm_mem_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: vmovaps {{.*#+}} zmm0 = [15,15,3,9,5,15,14,9,11,10,5,14,14,5,11,0] +; CHECK-NEXT: vpermps (%rdi), %zmm0, %zmm0 +; CHECK-NEXT: retq + %vec = load <16 x float>, <16 x float>* %vp + %res = shufflevector <16 x float> %vec, <16 x float> undef, <16 x i32> + ret <16 x float> %res +} +define <16 x float> @test_masked_16xfloat_perm_mem_mask3(<16 x float>* %vp, <16 x float> %vec2) { +; CHECK-LABEL: test_masked_16xfloat_perm_mem_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: vmovaps {{.*#+}} zmm1 = [15,15,3,9,5,15,14,9,11,10,5,14,14,5,11,0] +; CHECK-NEXT: movw $-13916, %ax # imm = 0xC9A4 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermps (%rdi), %zmm1, %zmm0 {%k1} +; CHECK-NEXT: retq + %vec = load <16 x float>, <16 x float>* %vp + %shuf = shufflevector <16 x float> %vec, <16 x float> undef, <16 x i32> + %res = select <16 x i1> , <16 x float> %shuf, <16 x float> %vec2 + ret <16 x float> %res +} + +define <16 x float> @test_masked_z_16xfloat_perm_mem_mask3(<16 x float>* %vp) { +; CHECK-LABEL: test_masked_z_16xfloat_perm_mem_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: vmovaps {{.*#+}} zmm0 = [15,15,3,9,5,15,14,9,11,10,5,14,14,5,11,0] +; CHECK-NEXT: movw $-13916, %ax # imm = 0xC9A4 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermps (%rdi), %zmm0, %zmm0 {%k1} {z} +; CHECK-NEXT: retq + %vec = load <16 x float>, <16 x float>* %vp + %shuf = shufflevector <16 x float> %vec, <16 x float> undef, <16 x i32> + %res = select <16 x i1> , <16 x float> %shuf, <16 x float> zeroinitializer + ret <16 x float> %res +} + +define <4 x double> @test_4xdouble_perm_mask0(<4 x double> %vec) { +; CHECK-LABEL: test_4xdouble_perm_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[2,1,3,2] +; CHECK-NEXT: retq + %res = shufflevector <4 x double> %vec, <4 x double> undef, <4 x i32> + ret <4 x double> %res +} +define <4 x double> @test_masked_4xdouble_perm_mask0(<4 x double> %vec, <4 x double> %vec2) { +; CHECK-LABEL: test_masked_4xdouble_perm_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: movb $12, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermpd {{.*#+}} ymm1 {%k1} = ymm0[2,1,3,2] +; CHECK-NEXT: vmovapd %ymm1, %ymm0 +; CHECK-NEXT: retq + %shuf = shufflevector <4 x double> %vec, <4 x double> undef, <4 x i32> + %res = select <4 x i1> , <4 x double> %shuf, <4 x double> %vec2 + ret <4 x double> %res +} + +define <4 x double> @test_masked_z_4xdouble_perm_mask0(<4 x double> %vec) { +; CHECK-LABEL: test_masked_z_4xdouble_perm_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: movb $12, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermpd {{.*#+}} ymm0 {%k1} {z} = ymm0[2,1,3,2] +; CHECK-NEXT: retq + %shuf = shufflevector <4 x double> %vec, <4 x double> undef, <4 x i32> + %res = select <4 x i1> , <4 x double> %shuf, <4 x double> zeroinitializer + ret <4 x double> %res +} +define <4 x double> @test_masked_4xdouble_perm_mask1(<4 x double> %vec, <4 x double> %vec2) { +; CHECK-LABEL: test_masked_4xdouble_perm_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: movb $8, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermpd {{.*#+}} ymm1 {%k1} = ymm0[3,0,0,0] +; CHECK-NEXT: vmovapd %ymm1, %ymm0 +; CHECK-NEXT: retq + %shuf = shufflevector <4 x double> %vec, <4 x double> undef, <4 x i32> + %res = select <4 x i1> , <4 x double> %shuf, <4 x double> %vec2 + ret <4 x double> %res +} + +define <4 x double> @test_masked_z_4xdouble_perm_mask1(<4 x double> %vec) { +; CHECK-LABEL: test_masked_z_4xdouble_perm_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: movb $8, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermpd {{.*#+}} ymm0 {%k1} {z} = ymm0[3,0,0,0] +; CHECK-NEXT: retq + %shuf = shufflevector <4 x double> %vec, <4 x double> undef, <4 x i32> + %res = select <4 x i1> , <4 x double> %shuf, <4 x double> zeroinitializer + ret <4 x double> %res +} +define <4 x double> @test_masked_4xdouble_perm_mask2(<4 x double> %vec, <4 x double> %vec2) { +; CHECK-LABEL: test_masked_4xdouble_perm_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: movb $14, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermpd {{.*#+}} ymm1 {%k1} = ymm0[0,3,3,1] +; CHECK-NEXT: vmovapd %ymm1, %ymm0 +; CHECK-NEXT: retq + %shuf = shufflevector <4 x double> %vec, <4 x double> undef, <4 x i32> + %res = select <4 x i1> , <4 x double> %shuf, <4 x double> %vec2 + ret <4 x double> %res +} + +define <4 x double> @test_masked_z_4xdouble_perm_mask2(<4 x double> %vec) { +; CHECK-LABEL: test_masked_z_4xdouble_perm_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: movb $14, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermpd {{.*#+}} ymm0 {%k1} {z} = ymm0[0,3,3,1] +; CHECK-NEXT: retq + %shuf = shufflevector <4 x double> %vec, <4 x double> undef, <4 x i32> + %res = select <4 x i1> , <4 x double> %shuf, <4 x double> zeroinitializer + ret <4 x double> %res +} +define <4 x double> @test_4xdouble_perm_mask3(<4 x double> %vec) { +; CHECK-LABEL: test_4xdouble_perm_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[3,3,3,2] +; CHECK-NEXT: retq + %res = shufflevector <4 x double> %vec, <4 x double> undef, <4 x i32> + ret <4 x double> %res +} +define <4 x double> @test_masked_4xdouble_perm_mask3(<4 x double> %vec, <4 x double> %vec2) { +; CHECK-LABEL: test_masked_4xdouble_perm_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: movb $4, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermpd {{.*#+}} ymm1 {%k1} = ymm0[3,3,3,2] +; CHECK-NEXT: vmovapd %ymm1, %ymm0 +; CHECK-NEXT: retq + %shuf = shufflevector <4 x double> %vec, <4 x double> undef, <4 x i32> + %res = select <4 x i1> , <4 x double> %shuf, <4 x double> %vec2 + ret <4 x double> %res +} + +define <4 x double> @test_masked_z_4xdouble_perm_mask3(<4 x double> %vec) { +; CHECK-LABEL: test_masked_z_4xdouble_perm_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: movb $4, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermpd {{.*#+}} ymm0 {%k1} {z} = ymm0[3,3,3,2] +; CHECK-NEXT: retq + %shuf = shufflevector <4 x double> %vec, <4 x double> undef, <4 x i32> + %res = select <4 x i1> , <4 x double> %shuf, <4 x double> zeroinitializer + ret <4 x double> %res +} +define <4 x double> @test_4xdouble_perm_mem_mask0(<4 x double>* %vp) { +; CHECK-LABEL: test_4xdouble_perm_mem_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: vpermpd {{.*#+}} ymm0 = mem[0,0,2,0] +; CHECK-NEXT: retq + %vec = load <4 x double>, <4 x double>* %vp + %res = shufflevector <4 x double> %vec, <4 x double> undef, <4 x i32> + ret <4 x double> %res +} +define <4 x double> @test_masked_4xdouble_perm_mem_mask0(<4 x double>* %vp, <4 x double> %vec2) { +; CHECK-LABEL: test_masked_4xdouble_perm_mem_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: movb $3, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermpd {{.*#+}} ymm0 {%k1} = mem[0,0,2,0] +; CHECK-NEXT: retq + %vec = load <4 x double>, <4 x double>* %vp + %shuf = shufflevector <4 x double> %vec, <4 x double> undef, <4 x i32> + %res = select <4 x i1> , <4 x double> %shuf, <4 x double> %vec2 + ret <4 x double> %res +} + +define <4 x double> @test_masked_z_4xdouble_perm_mem_mask0(<4 x double>* %vp) { +; CHECK-LABEL: test_masked_z_4xdouble_perm_mem_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: movb $3, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermpd {{.*#+}} ymm0 {%k1} {z} = mem[0,0,2,0] +; CHECK-NEXT: retq + %vec = load <4 x double>, <4 x double>* %vp + %shuf = shufflevector <4 x double> %vec, <4 x double> undef, <4 x i32> + %res = select <4 x i1> , <4 x double> %shuf, <4 x double> zeroinitializer + ret <4 x double> %res +} + +define <4 x double> @test_masked_4xdouble_perm_mem_mask1(<4 x double>* %vp, <4 x double> %vec2) { +; CHECK-LABEL: test_masked_4xdouble_perm_mem_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: movb $8, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermpd {{.*#+}} ymm0 {%k1} = mem[0,2,3,2] +; CHECK-NEXT: retq + %vec = load <4 x double>, <4 x double>* %vp + %shuf = shufflevector <4 x double> %vec, <4 x double> undef, <4 x i32> + %res = select <4 x i1> , <4 x double> %shuf, <4 x double> %vec2 + ret <4 x double> %res +} + +define <4 x double> @test_masked_z_4xdouble_perm_mem_mask1(<4 x double>* %vp) { +; CHECK-LABEL: test_masked_z_4xdouble_perm_mem_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: movb $8, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermpd {{.*#+}} ymm0 {%k1} {z} = mem[0,2,3,2] +; CHECK-NEXT: retq + %vec = load <4 x double>, <4 x double>* %vp + %shuf = shufflevector <4 x double> %vec, <4 x double> undef, <4 x i32> + %res = select <4 x i1> , <4 x double> %shuf, <4 x double> zeroinitializer + ret <4 x double> %res +} + +define <4 x double> @test_masked_4xdouble_perm_mem_mask2(<4 x double>* %vp, <4 x double> %vec2) { +; CHECK-LABEL: test_masked_4xdouble_perm_mem_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: movb $6, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermpd {{.*#+}} ymm0 {%k1} = mem[3,1,1,1] +; CHECK-NEXT: retq + %vec = load <4 x double>, <4 x double>* %vp + %shuf = shufflevector <4 x double> %vec, <4 x double> undef, <4 x i32> + %res = select <4 x i1> , <4 x double> %shuf, <4 x double> %vec2 + ret <4 x double> %res +} + +define <4 x double> @test_masked_z_4xdouble_perm_mem_mask2(<4 x double>* %vp) { +; CHECK-LABEL: test_masked_z_4xdouble_perm_mem_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: movb $6, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermpd {{.*#+}} ymm0 {%k1} {z} = mem[3,1,1,1] +; CHECK-NEXT: retq + %vec = load <4 x double>, <4 x double>* %vp + %shuf = shufflevector <4 x double> %vec, <4 x double> undef, <4 x i32> + %res = select <4 x i1> , <4 x double> %shuf, <4 x double> zeroinitializer + ret <4 x double> %res +} + +define <4 x double> @test_4xdouble_perm_mem_mask3(<4 x double>* %vp) { +; CHECK-LABEL: test_4xdouble_perm_mem_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: vpermpd {{.*#+}} ymm0 = mem[3,2,3,2] +; CHECK-NEXT: retq + %vec = load <4 x double>, <4 x double>* %vp + %res = shufflevector <4 x double> %vec, <4 x double> undef, <4 x i32> + ret <4 x double> %res +} +define <4 x double> @test_masked_4xdouble_perm_mem_mask3(<4 x double>* %vp, <4 x double> %vec2) { +; CHECK-LABEL: test_masked_4xdouble_perm_mem_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: movb $11, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermpd {{.*#+}} ymm0 {%k1} = mem[3,2,3,2] +; CHECK-NEXT: retq + %vec = load <4 x double>, <4 x double>* %vp + %shuf = shufflevector <4 x double> %vec, <4 x double> undef, <4 x i32> + %res = select <4 x i1> , <4 x double> %shuf, <4 x double> %vec2 + ret <4 x double> %res +} + +define <4 x double> @test_masked_z_4xdouble_perm_mem_mask3(<4 x double>* %vp) { +; CHECK-LABEL: test_masked_z_4xdouble_perm_mem_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: movb $11, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermpd {{.*#+}} ymm0 {%k1} {z} = mem[3,2,3,2] +; CHECK-NEXT: retq + %vec = load <4 x double>, <4 x double>* %vp + %shuf = shufflevector <4 x double> %vec, <4 x double> undef, <4 x i32> + %res = select <4 x i1> , <4 x double> %shuf, <4 x double> zeroinitializer + ret <4 x double> %res +} + +define <8 x double> @test_8xdouble_perm_mask0(<8 x double> %vec) { +; CHECK-LABEL: test_8xdouble_perm_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: vmovapd {{.*#+}} zmm1 = [5,7,4,2,7,4,3,4] +; CHECK-NEXT: vpermpd %zmm0, %zmm1, %zmm0 +; CHECK-NEXT: retq + %res = shufflevector <8 x double> %vec, <8 x double> undef, <8 x i32> + ret <8 x double> %res +} +define <8 x double> @test_masked_8xdouble_perm_mask0(<8 x double> %vec, <8 x double> %vec2) { +; CHECK-LABEL: test_masked_8xdouble_perm_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: vmovapd {{.*#+}} zmm2 = [5,7,4,2,7,4,3,4] +; CHECK-NEXT: movb $-115, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermpd %zmm0, %zmm2, %zmm1 {%k1} +; CHECK-NEXT: vmovapd %zmm1, %zmm0 +; CHECK-NEXT: retq + %shuf = shufflevector <8 x double> %vec, <8 x double> undef, <8 x i32> + %res = select <8 x i1> , <8 x double> %shuf, <8 x double> %vec2 + ret <8 x double> %res +} + +define <8 x double> @test_masked_z_8xdouble_perm_mask0(<8 x double> %vec) { +; CHECK-LABEL: test_masked_z_8xdouble_perm_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: vmovapd {{.*#+}} zmm1 = [5,7,4,2,7,4,3,4] +; CHECK-NEXT: movb $-115, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermpd %zmm0, %zmm1, %zmm0 {%k1} {z} +; CHECK-NEXT: retq + %shuf = shufflevector <8 x double> %vec, <8 x double> undef, <8 x i32> + %res = select <8 x i1> , <8 x double> %shuf, <8 x double> zeroinitializer + ret <8 x double> %res +} +define <8 x double> @test_masked_8xdouble_perm_imm_mask1(<8 x double> %vec, <8 x double> %vec2) { +; CHECK-LABEL: test_masked_8xdouble_perm_imm_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: movb $-4, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermpd {{.*#+}} zmm1 {%k1} = zmm0[3,0,0,2,7,4,4,6] +; CHECK-NEXT: vmovapd %zmm1, %zmm0 +; CHECK-NEXT: retq + %shuf = shufflevector <8 x double> %vec, <8 x double> undef, <8 x i32> + %res = select <8 x i1> , <8 x double> %shuf, <8 x double> %vec2 + ret <8 x double> %res +} + +define <8 x double> @test_masked_z_8xdouble_perm_imm_mask1(<8 x double> %vec) { +; CHECK-LABEL: test_masked_z_8xdouble_perm_imm_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: movb $-4, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermpd {{.*#+}} zmm0 {%k1} {z} = zmm0[3,0,0,2,7,4,4,6] +; CHECK-NEXT: retq + %shuf = shufflevector <8 x double> %vec, <8 x double> undef, <8 x i32> + %res = select <8 x i1> , <8 x double> %shuf, <8 x double> zeroinitializer + ret <8 x double> %res +} +define <8 x double> @test_masked_8xdouble_perm_mask2(<8 x double> %vec, <8 x double> %vec2) { +; CHECK-LABEL: test_masked_8xdouble_perm_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: vmovapd {{.*#+}} zmm2 = [7,5,5,5,3,5,1,7] +; CHECK-NEXT: movb $49, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermpd %zmm0, %zmm2, %zmm1 {%k1} +; CHECK-NEXT: vmovapd %zmm1, %zmm0 +; CHECK-NEXT: retq + %shuf = shufflevector <8 x double> %vec, <8 x double> undef, <8 x i32> + %res = select <8 x i1> , <8 x double> %shuf, <8 x double> %vec2 + ret <8 x double> %res +} + +define <8 x double> @test_masked_z_8xdouble_perm_mask2(<8 x double> %vec) { +; CHECK-LABEL: test_masked_z_8xdouble_perm_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: vmovapd {{.*#+}} zmm1 = [7,5,5,5,3,5,1,7] +; CHECK-NEXT: movb $49, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermpd %zmm0, %zmm1, %zmm0 {%k1} {z} +; CHECK-NEXT: retq + %shuf = shufflevector <8 x double> %vec, <8 x double> undef, <8 x i32> + %res = select <8 x i1> , <8 x double> %shuf, <8 x double> zeroinitializer + ret <8 x double> %res +} +define <8 x double> @test_8xdouble_perm_imm_mask3(<8 x double> %vec) { +; CHECK-LABEL: test_8xdouble_perm_imm_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: vpermpd {{.*#+}} zmm0 = zmm0[1,3,3,0,5,7,7,4] +; CHECK-NEXT: retq + %res = shufflevector <8 x double> %vec, <8 x double> undef, <8 x i32> + ret <8 x double> %res +} +define <8 x double> @test_masked_8xdouble_perm_imm_mask3(<8 x double> %vec, <8 x double> %vec2) { +; CHECK-LABEL: test_masked_8xdouble_perm_imm_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: movb $-57, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermpd {{.*#+}} zmm1 {%k1} = zmm0[1,3,3,0,5,7,7,4] +; CHECK-NEXT: vmovapd %zmm1, %zmm0 +; CHECK-NEXT: retq + %shuf = shufflevector <8 x double> %vec, <8 x double> undef, <8 x i32> + %res = select <8 x i1> , <8 x double> %shuf, <8 x double> %vec2 + ret <8 x double> %res +} + +define <8 x double> @test_masked_z_8xdouble_perm_imm_mask3(<8 x double> %vec) { +; CHECK-LABEL: test_masked_z_8xdouble_perm_imm_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: movb $-57, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermpd {{.*#+}} zmm0 {%k1} {z} = zmm0[1,3,3,0,5,7,7,4] +; CHECK-NEXT: retq + %shuf = shufflevector <8 x double> %vec, <8 x double> undef, <8 x i32> + %res = select <8 x i1> , <8 x double> %shuf, <8 x double> zeroinitializer + ret <8 x double> %res +} +define <8 x double> @test_masked_8xdouble_perm_mask4(<8 x double> %vec, <8 x double> %vec2) { +; CHECK-LABEL: test_masked_8xdouble_perm_mask4: +; CHECK: # BB#0: +; CHECK-NEXT: vmovapd {{.*#+}} zmm2 = [3,5,3,4,6,5,7,1] +; CHECK-NEXT: movb $-54, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermpd %zmm0, %zmm2, %zmm1 {%k1} +; CHECK-NEXT: vmovapd %zmm1, %zmm0 +; CHECK-NEXT: retq + %shuf = shufflevector <8 x double> %vec, <8 x double> undef, <8 x i32> + %res = select <8 x i1> , <8 x double> %shuf, <8 x double> %vec2 + ret <8 x double> %res +} + +define <8 x double> @test_masked_z_8xdouble_perm_mask4(<8 x double> %vec) { +; CHECK-LABEL: test_masked_z_8xdouble_perm_mask4: +; CHECK: # BB#0: +; CHECK-NEXT: vmovapd {{.*#+}} zmm1 = [3,5,3,4,6,5,7,1] +; CHECK-NEXT: movb $-54, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermpd %zmm0, %zmm1, %zmm0 {%k1} {z} +; CHECK-NEXT: retq + %shuf = shufflevector <8 x double> %vec, <8 x double> undef, <8 x i32> + %res = select <8 x i1> , <8 x double> %shuf, <8 x double> zeroinitializer + ret <8 x double> %res +} +define <8 x double> @test_masked_8xdouble_perm_imm_mask5(<8 x double> %vec, <8 x double> %vec2) { +; CHECK-LABEL: test_masked_8xdouble_perm_imm_mask5: +; CHECK: # BB#0: +; CHECK-NEXT: movb $-41, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermpd {{.*#+}} zmm1 {%k1} = zmm0[3,3,2,3,7,7,6,7] +; CHECK-NEXT: vmovapd %zmm1, %zmm0 +; CHECK-NEXT: retq + %shuf = shufflevector <8 x double> %vec, <8 x double> undef, <8 x i32> + %res = select <8 x i1> , <8 x double> %shuf, <8 x double> %vec2 + ret <8 x double> %res +} + +define <8 x double> @test_masked_z_8xdouble_perm_imm_mask5(<8 x double> %vec) { +; CHECK-LABEL: test_masked_z_8xdouble_perm_imm_mask5: +; CHECK: # BB#0: +; CHECK-NEXT: movb $-41, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermpd {{.*#+}} zmm0 {%k1} {z} = zmm0[3,3,2,3,7,7,6,7] +; CHECK-NEXT: retq + %shuf = shufflevector <8 x double> %vec, <8 x double> undef, <8 x i32> + %res = select <8 x i1> , <8 x double> %shuf, <8 x double> zeroinitializer + ret <8 x double> %res +} +define <8 x double> @test_8xdouble_perm_mask6(<8 x double> %vec) { +; CHECK-LABEL: test_8xdouble_perm_mask6: +; CHECK: # BB#0: +; CHECK-NEXT: vmovapd {{.*#+}} zmm1 = [2,7,6,4,0,0,0,2] +; CHECK-NEXT: vpermpd %zmm0, %zmm1, %zmm0 +; CHECK-NEXT: retq + %res = shufflevector <8 x double> %vec, <8 x double> undef, <8 x i32> + ret <8 x double> %res +} +define <8 x double> @test_masked_8xdouble_perm_mask6(<8 x double> %vec, <8 x double> %vec2) { +; CHECK-LABEL: test_masked_8xdouble_perm_mask6: +; CHECK: # BB#0: +; CHECK-NEXT: vmovapd {{.*#+}} zmm2 = [2,7,6,4,0,0,0,2] +; CHECK-NEXT: movb $-65, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermpd %zmm0, %zmm2, %zmm1 {%k1} +; CHECK-NEXT: vmovapd %zmm1, %zmm0 +; CHECK-NEXT: retq + %shuf = shufflevector <8 x double> %vec, <8 x double> undef, <8 x i32> + %res = select <8 x i1> , <8 x double> %shuf, <8 x double> %vec2 + ret <8 x double> %res +} + +define <8 x double> @test_masked_z_8xdouble_perm_mask6(<8 x double> %vec) { +; CHECK-LABEL: test_masked_z_8xdouble_perm_mask6: +; CHECK: # BB#0: +; CHECK-NEXT: vmovapd {{.*#+}} zmm1 = [2,7,6,4,0,0,0,2] +; CHECK-NEXT: movb $-65, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermpd %zmm0, %zmm1, %zmm0 {%k1} {z} +; CHECK-NEXT: retq + %shuf = shufflevector <8 x double> %vec, <8 x double> undef, <8 x i32> + %res = select <8 x i1> , <8 x double> %shuf, <8 x double> zeroinitializer + ret <8 x double> %res +} +define <8 x double> @test_masked_8xdouble_perm_imm_mask7(<8 x double> %vec, <8 x double> %vec2) { +; CHECK-LABEL: test_masked_8xdouble_perm_imm_mask7: +; CHECK: # BB#0: +; CHECK-NEXT: movb $40, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermpd {{.*#+}} zmm1 {%k1} = zmm0[3,1,3,2,7,5,7,6] +; CHECK-NEXT: vmovapd %zmm1, %zmm0 +; CHECK-NEXT: retq + %shuf = shufflevector <8 x double> %vec, <8 x double> undef, <8 x i32> + %res = select <8 x i1> , <8 x double> %shuf, <8 x double> %vec2 + ret <8 x double> %res +} + +define <8 x double> @test_masked_z_8xdouble_perm_imm_mask7(<8 x double> %vec) { +; CHECK-LABEL: test_masked_z_8xdouble_perm_imm_mask7: +; CHECK: # BB#0: +; CHECK-NEXT: movb $40, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermpd {{.*#+}} zmm0 {%k1} {z} = zmm0[3,1,3,2,7,5,7,6] +; CHECK-NEXT: retq + %shuf = shufflevector <8 x double> %vec, <8 x double> undef, <8 x i32> + %res = select <8 x i1> , <8 x double> %shuf, <8 x double> zeroinitializer + ret <8 x double> %res +} +define <8 x double> @test_8xdouble_perm_mem_mask0(<8 x double>* %vp) { +; CHECK-LABEL: test_8xdouble_perm_mem_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: vmovapd {{.*#+}} zmm0 = [0,3,4,0,4,2,0,1] +; CHECK-NEXT: vpermpd (%rdi), %zmm0, %zmm0 +; CHECK-NEXT: retq + %vec = load <8 x double>, <8 x double>* %vp + %res = shufflevector <8 x double> %vec, <8 x double> undef, <8 x i32> + ret <8 x double> %res +} +define <8 x double> @test_masked_8xdouble_perm_mem_mask0(<8 x double>* %vp, <8 x double> %vec2) { +; CHECK-LABEL: test_masked_8xdouble_perm_mem_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: vmovapd {{.*#+}} zmm1 = [0,3,4,0,4,2,0,1] +; CHECK-NEXT: movb $99, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermpd (%rdi), %zmm1, %zmm0 {%k1} +; CHECK-NEXT: retq + %vec = load <8 x double>, <8 x double>* %vp + %shuf = shufflevector <8 x double> %vec, <8 x double> undef, <8 x i32> + %res = select <8 x i1> , <8 x double> %shuf, <8 x double> %vec2 + ret <8 x double> %res +} + +define <8 x double> @test_masked_z_8xdouble_perm_mem_mask0(<8 x double>* %vp) { +; CHECK-LABEL: test_masked_z_8xdouble_perm_mem_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: vmovapd {{.*#+}} zmm0 = [0,3,4,0,4,2,0,1] +; CHECK-NEXT: movb $99, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermpd (%rdi), %zmm0, %zmm0 {%k1} {z} +; CHECK-NEXT: retq + %vec = load <8 x double>, <8 x double>* %vp + %shuf = shufflevector <8 x double> %vec, <8 x double> undef, <8 x i32> + %res = select <8 x i1> , <8 x double> %shuf, <8 x double> zeroinitializer + ret <8 x double> %res +} + +define <8 x double> @test_masked_8xdouble_perm_imm_mem_mask1(<8 x double>* %vp, <8 x double> %vec2) { +; CHECK-LABEL: test_masked_8xdouble_perm_imm_mem_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: movb $-32, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermpd {{.*#+}} zmm0 {%k1} = mem[0,2,0,3,4,6,4,7] +; CHECK-NEXT: retq + %vec = load <8 x double>, <8 x double>* %vp + %shuf = shufflevector <8 x double> %vec, <8 x double> undef, <8 x i32> + %res = select <8 x i1> , <8 x double> %shuf, <8 x double> %vec2 + ret <8 x double> %res +} + +define <8 x double> @test_masked_z_8xdouble_perm_imm_mem_mask1(<8 x double>* %vp) { +; CHECK-LABEL: test_masked_z_8xdouble_perm_imm_mem_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: movb $-32, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermpd {{.*#+}} zmm0 {%k1} {z} = mem[0,2,0,3,4,6,4,7] +; CHECK-NEXT: retq + %vec = load <8 x double>, <8 x double>* %vp + %shuf = shufflevector <8 x double> %vec, <8 x double> undef, <8 x i32> + %res = select <8 x i1> , <8 x double> %shuf, <8 x double> zeroinitializer + ret <8 x double> %res +} + +define <8 x double> @test_masked_8xdouble_perm_mem_mask2(<8 x double>* %vp, <8 x double> %vec2) { +; CHECK-LABEL: test_masked_8xdouble_perm_mem_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: vmovapd {{.*#+}} zmm1 = [6,7,2,7,7,6,2,5] +; CHECK-NEXT: movb $10, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermpd (%rdi), %zmm1, %zmm0 {%k1} +; CHECK-NEXT: retq + %vec = load <8 x double>, <8 x double>* %vp + %shuf = shufflevector <8 x double> %vec, <8 x double> undef, <8 x i32> + %res = select <8 x i1> , <8 x double> %shuf, <8 x double> %vec2 + ret <8 x double> %res +} + +define <8 x double> @test_masked_z_8xdouble_perm_mem_mask2(<8 x double>* %vp) { +; CHECK-LABEL: test_masked_z_8xdouble_perm_mem_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: vmovapd {{.*#+}} zmm0 = [6,7,2,7,7,6,2,5] +; CHECK-NEXT: movb $10, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermpd (%rdi), %zmm0, %zmm0 {%k1} {z} +; CHECK-NEXT: retq + %vec = load <8 x double>, <8 x double>* %vp + %shuf = shufflevector <8 x double> %vec, <8 x double> undef, <8 x i32> + %res = select <8 x i1> , <8 x double> %shuf, <8 x double> zeroinitializer + ret <8 x double> %res +} + +define <8 x double> @test_8xdouble_perm_imm_mem_mask3(<8 x double>* %vp) { +; CHECK-LABEL: test_8xdouble_perm_imm_mem_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: vpermpd {{.*#+}} zmm0 = mem[2,1,1,0,6,5,5,4] +; CHECK-NEXT: retq + %vec = load <8 x double>, <8 x double>* %vp + %res = shufflevector <8 x double> %vec, <8 x double> undef, <8 x i32> + ret <8 x double> %res +} +define <8 x double> @test_masked_8xdouble_perm_imm_mem_mask3(<8 x double>* %vp, <8 x double> %vec2) { +; CHECK-LABEL: test_masked_8xdouble_perm_imm_mem_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: movb $119, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermpd {{.*#+}} zmm0 {%k1} = mem[2,1,1,0,6,5,5,4] +; CHECK-NEXT: retq + %vec = load <8 x double>, <8 x double>* %vp + %shuf = shufflevector <8 x double> %vec, <8 x double> undef, <8 x i32> + %res = select <8 x i1> , <8 x double> %shuf, <8 x double> %vec2 + ret <8 x double> %res +} + +define <8 x double> @test_masked_z_8xdouble_perm_imm_mem_mask3(<8 x double>* %vp) { +; CHECK-LABEL: test_masked_z_8xdouble_perm_imm_mem_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: movb $119, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermpd {{.*#+}} zmm0 {%k1} {z} = mem[2,1,1,0,6,5,5,4] +; CHECK-NEXT: retq + %vec = load <8 x double>, <8 x double>* %vp + %shuf = shufflevector <8 x double> %vec, <8 x double> undef, <8 x i32> + %res = select <8 x i1> , <8 x double> %shuf, <8 x double> zeroinitializer + ret <8 x double> %res +} + +define <8 x double> @test_masked_8xdouble_perm_mem_mask4(<8 x double>* %vp, <8 x double> %vec2) { +; CHECK-LABEL: test_masked_8xdouble_perm_mem_mask4: +; CHECK: # BB#0: +; CHECK-NEXT: vmovapd {{.*#+}} zmm1 = [1,1,3,5,6,0,6,0] +; CHECK-NEXT: movb $-45, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermpd (%rdi), %zmm1, %zmm0 {%k1} +; CHECK-NEXT: retq + %vec = load <8 x double>, <8 x double>* %vp + %shuf = shufflevector <8 x double> %vec, <8 x double> undef, <8 x i32> + %res = select <8 x i1> , <8 x double> %shuf, <8 x double> %vec2 + ret <8 x double> %res +} + +define <8 x double> @test_masked_z_8xdouble_perm_mem_mask4(<8 x double>* %vp) { +; CHECK-LABEL: test_masked_z_8xdouble_perm_mem_mask4: +; CHECK: # BB#0: +; CHECK-NEXT: vmovapd {{.*#+}} zmm0 = [1,1,3,5,6,0,6,0] +; CHECK-NEXT: movb $-45, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermpd (%rdi), %zmm0, %zmm0 {%k1} {z} +; CHECK-NEXT: retq + %vec = load <8 x double>, <8 x double>* %vp + %shuf = shufflevector <8 x double> %vec, <8 x double> undef, <8 x i32> + %res = select <8 x i1> , <8 x double> %shuf, <8 x double> zeroinitializer + ret <8 x double> %res +} + +define <8 x double> @test_masked_8xdouble_perm_imm_mem_mask5(<8 x double>* %vp, <8 x double> %vec2) { +; CHECK-LABEL: test_masked_8xdouble_perm_imm_mem_mask5: +; CHECK: # BB#0: +; CHECK-NEXT: movb $33, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermpd {{.*#+}} zmm0 {%k1} = mem[2,2,2,3,6,6,6,7] +; CHECK-NEXT: retq + %vec = load <8 x double>, <8 x double>* %vp + %shuf = shufflevector <8 x double> %vec, <8 x double> undef, <8 x i32> + %res = select <8 x i1> , <8 x double> %shuf, <8 x double> %vec2 + ret <8 x double> %res +} + +define <8 x double> @test_masked_z_8xdouble_perm_imm_mem_mask5(<8 x double>* %vp) { +; CHECK-LABEL: test_masked_z_8xdouble_perm_imm_mem_mask5: +; CHECK: # BB#0: +; CHECK-NEXT: movb $33, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermpd {{.*#+}} zmm0 {%k1} {z} = mem[2,2,2,3,6,6,6,7] +; CHECK-NEXT: retq + %vec = load <8 x double>, <8 x double>* %vp + %shuf = shufflevector <8 x double> %vec, <8 x double> undef, <8 x i32> + %res = select <8 x i1> , <8 x double> %shuf, <8 x double> zeroinitializer + ret <8 x double> %res +} + +define <8 x double> @test_8xdouble_perm_mem_mask6(<8 x double>* %vp) { +; CHECK-LABEL: test_8xdouble_perm_mem_mask6: +; CHECK: # BB#0: +; CHECK-NEXT: vmovapd {{.*#+}} zmm0 = [2,4,0,4,6,1,2,5] +; CHECK-NEXT: vpermpd (%rdi), %zmm0, %zmm0 +; CHECK-NEXT: retq + %vec = load <8 x double>, <8 x double>* %vp + %res = shufflevector <8 x double> %vec, <8 x double> undef, <8 x i32> + ret <8 x double> %res +} +define <8 x double> @test_masked_8xdouble_perm_mem_mask6(<8 x double>* %vp, <8 x double> %vec2) { +; CHECK-LABEL: test_masked_8xdouble_perm_mem_mask6: +; CHECK: # BB#0: +; CHECK-NEXT: vmovapd {{.*#+}} zmm1 = [2,4,0,4,6,1,2,5] +; CHECK-NEXT: movb $-75, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermpd (%rdi), %zmm1, %zmm0 {%k1} +; CHECK-NEXT: retq + %vec = load <8 x double>, <8 x double>* %vp + %shuf = shufflevector <8 x double> %vec, <8 x double> undef, <8 x i32> + %res = select <8 x i1> , <8 x double> %shuf, <8 x double> %vec2 + ret <8 x double> %res +} + +define <8 x double> @test_masked_z_8xdouble_perm_mem_mask6(<8 x double>* %vp) { +; CHECK-LABEL: test_masked_z_8xdouble_perm_mem_mask6: +; CHECK: # BB#0: +; CHECK-NEXT: vmovapd {{.*#+}} zmm0 = [2,4,0,4,6,1,2,5] +; CHECK-NEXT: movb $-75, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermpd (%rdi), %zmm0, %zmm0 {%k1} {z} +; CHECK-NEXT: retq + %vec = load <8 x double>, <8 x double>* %vp + %shuf = shufflevector <8 x double> %vec, <8 x double> undef, <8 x i32> + %res = select <8 x i1> , <8 x double> %shuf, <8 x double> zeroinitializer + ret <8 x double> %res +} + +define <8 x double> @test_masked_8xdouble_perm_imm_mem_mask7(<8 x double>* %vp, <8 x double> %vec2) { +; CHECK-LABEL: test_masked_8xdouble_perm_imm_mem_mask7: +; CHECK: # BB#0: +; CHECK-NEXT: movb $84, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermpd {{.*#+}} zmm0 {%k1} = mem[0,3,2,0,4,7,6,4] +; CHECK-NEXT: retq + %vec = load <8 x double>, <8 x double>* %vp + %shuf = shufflevector <8 x double> %vec, <8 x double> undef, <8 x i32> + %res = select <8 x i1> , <8 x double> %shuf, <8 x double> %vec2 + ret <8 x double> %res +} + +define <8 x double> @test_masked_z_8xdouble_perm_imm_mem_mask7(<8 x double>* %vp) { +; CHECK-LABEL: test_masked_z_8xdouble_perm_imm_mem_mask7: +; CHECK: # BB#0: +; CHECK-NEXT: movb $84, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpermpd {{.*#+}} zmm0 {%k1} {z} = mem[0,3,2,0,4,7,6,4] +; CHECK-NEXT: retq + %vec = load <8 x double>, <8 x double>* %vp + %shuf = shufflevector <8 x double> %vec, <8 x double> undef, <8 x i32> + %res = select <8 x i1> , <8 x double> %shuf, <8 x double> zeroinitializer + ret <8 x double> %res +} + diff --git a/llvm/test/CodeGen/X86/avx512-shuffles/shuffle-interleave.ll b/llvm/test/CodeGen/X86/avx512-shuffles/shuffle-interleave.ll new file mode 100644 index 000000000000..a1afe31b93b7 --- /dev/null +++ b/llvm/test/CodeGen/X86/avx512-shuffles/shuffle-interleave.ll @@ -0,0 +1,1312 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mcpu=skx %s -o - | FileCheck %s + +define <4 x float> @test_4xfloat_shuff_mask0(<4 x float> %vec1, <4 x float> %vec2) { +; CHECK-LABEL: test_4xfloat_shuff_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: vshufps {{.*#+}} xmm0 = xmm0[2,1],xmm1[3,1] +; CHECK-NEXT: retq + %res = shufflevector <4 x float> %vec1, <4 x float> %vec2, <4 x i32> + ret <4 x float> %res +} +define <4 x float> @test_4xfloat_masked_shuff_mask0(<4 x float> %vec1, <4 x float> %vec2, <4 x float> %vec3) { +; CHECK-LABEL: test_4xfloat_masked_shuff_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: movb $12, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vshufps {{.*#+}} xmm2 {%k1} = xmm0[2,1],xmm1[3,1] +; CHECK-NEXT: vmovaps %xmm2, %xmm0 +; CHECK-NEXT: retq + %shuf = shufflevector <4 x float> %vec1, <4 x float> %vec2, <4 x i32> + %res = select <4 x i1> , <4 x float> %shuf, <4 x float> %vec3 + ret <4 x float> %res +} + +define <4 x float> @test_4xfloat_zero_masked_shuff_mask0(<4 x float> %vec1, <4 x float> %vec2) { +; CHECK-LABEL: test_4xfloat_zero_masked_shuff_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: movb $12, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vshufps {{.*#+}} xmm0 {%k1} {z} = xmm0[2,1],xmm1[3,1] +; CHECK-NEXT: retq + %shuf = shufflevector <4 x float> %vec1, <4 x float> %vec2, <4 x i32> + %res = select <4 x i1> , <4 x float> %shuf, <4 x float> zeroinitializer + ret <4 x float> %res +} +define <4 x float> @test_4xfloat_masked_shuff_mask1(<4 x float> %vec1, <4 x float> %vec2, <4 x float> %vec3) { +; CHECK-LABEL: test_4xfloat_masked_shuff_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: movb $10, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vshufps {{.*#+}} xmm2 {%k1} = xmm0[1,2],xmm1[3,2] +; CHECK-NEXT: vmovaps %xmm2, %xmm0 +; CHECK-NEXT: retq + %shuf = shufflevector <4 x float> %vec1, <4 x float> %vec2, <4 x i32> + %res = select <4 x i1> , <4 x float> %shuf, <4 x float> %vec3 + ret <4 x float> %res +} + +define <4 x float> @test_4xfloat_zero_masked_shuff_mask1(<4 x float> %vec1, <4 x float> %vec2) { +; CHECK-LABEL: test_4xfloat_zero_masked_shuff_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: movb $10, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vshufps {{.*#+}} xmm0 {%k1} {z} = xmm0[1,2],xmm1[3,2] +; CHECK-NEXT: retq + %shuf = shufflevector <4 x float> %vec1, <4 x float> %vec2, <4 x i32> + %res = select <4 x i1> , <4 x float> %shuf, <4 x float> zeroinitializer + ret <4 x float> %res +} +define <4 x float> @test_4xfloat_masked_shuff_mask2(<4 x float> %vec1, <4 x float> %vec2, <4 x float> %vec3) { +; CHECK-LABEL: test_4xfloat_masked_shuff_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: movb $6, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vshufps {{.*#+}} xmm2 {%k1} = xmm0[1,3],xmm1[2,1] +; CHECK-NEXT: vmovaps %xmm2, %xmm0 +; CHECK-NEXT: retq + %shuf = shufflevector <4 x float> %vec1, <4 x float> %vec2, <4 x i32> + %res = select <4 x i1> , <4 x float> %shuf, <4 x float> %vec3 + ret <4 x float> %res +} + +define <4 x float> @test_4xfloat_zero_masked_shuff_mask2(<4 x float> %vec1, <4 x float> %vec2) { +; CHECK-LABEL: test_4xfloat_zero_masked_shuff_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: movb $6, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vshufps {{.*#+}} xmm0 {%k1} {z} = xmm0[1,3],xmm1[2,1] +; CHECK-NEXT: retq + %shuf = shufflevector <4 x float> %vec1, <4 x float> %vec2, <4 x i32> + %res = select <4 x i1> , <4 x float> %shuf, <4 x float> zeroinitializer + ret <4 x float> %res +} +define <4 x float> @test_4xfloat_shuff_mask3(<4 x float> %vec1, <4 x float> %vec2) { +; CHECK-LABEL: test_4xfloat_shuff_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: vshufps {{.*#+}} xmm0 = xmm0[3,3],xmm1[3,3] +; CHECK-NEXT: retq + %res = shufflevector <4 x float> %vec1, <4 x float> %vec2, <4 x i32> + ret <4 x float> %res +} +define <4 x float> @test_4xfloat_masked_shuff_mask3(<4 x float> %vec1, <4 x float> %vec2, <4 x float> %vec3) { +; CHECK-LABEL: test_4xfloat_masked_shuff_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: movb $3, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vshufps {{.*#+}} xmm2 {%k1} = xmm0[3,3],xmm1[3,3] +; CHECK-NEXT: vmovaps %xmm2, %xmm0 +; CHECK-NEXT: retq + %shuf = shufflevector <4 x float> %vec1, <4 x float> %vec2, <4 x i32> + %res = select <4 x i1> , <4 x float> %shuf, <4 x float> %vec3 + ret <4 x float> %res +} + +define <4 x float> @test_4xfloat_zero_masked_shuff_mask3(<4 x float> %vec1, <4 x float> %vec2) { +; CHECK-LABEL: test_4xfloat_zero_masked_shuff_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: movb $3, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vshufps {{.*#+}} xmm0 {%k1} {z} = xmm0[3,3],xmm1[3,3] +; CHECK-NEXT: retq + %shuf = shufflevector <4 x float> %vec1, <4 x float> %vec2, <4 x i32> + %res = select <4 x i1> , <4 x float> %shuf, <4 x float> zeroinitializer + ret <4 x float> %res +} +define <4 x float> @test_4xfloat_shuff_mem_mask0(<4 x float> %vec1, <4 x float>* %vec2p) { +; CHECK-LABEL: test_4xfloat_shuff_mem_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: vshufps {{.*#+}} xmm0 = xmm0[1,0],mem[1,2] +; CHECK-NEXT: retq + %vec2 = load <4 x float>, <4 x float>* %vec2p + %res = shufflevector <4 x float> %vec1, <4 x float> %vec2, <4 x i32> + ret <4 x float> %res +} +define <4 x float> @test_4xfloat_masked_shuff_mem_mask0(<4 x float> %vec1, <4 x float>* %vec2p, <4 x float> %vec3) { +; CHECK-LABEL: test_4xfloat_masked_shuff_mem_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: movb $5, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vshufps {{.*#+}} xmm1 {%k1} = xmm0[1,0],mem[1,2] +; CHECK-NEXT: vmovaps %xmm1, %xmm0 +; CHECK-NEXT: retq + %vec2 = load <4 x float>, <4 x float>* %vec2p + %shuf = shufflevector <4 x float> %vec1, <4 x float> %vec2, <4 x i32> + %res = select <4 x i1> , <4 x float> %shuf, <4 x float> %vec3 + ret <4 x float> %res +} + +define <4 x float> @test_4xfloat_zero_masked_shuff_mem_mask0(<4 x float> %vec1, <4 x float>* %vec2p) { +; CHECK-LABEL: test_4xfloat_zero_masked_shuff_mem_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: movb $5, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vshufps {{.*#+}} xmm0 {%k1} {z} = xmm0[1,0],mem[1,2] +; CHECK-NEXT: retq + %vec2 = load <4 x float>, <4 x float>* %vec2p + %shuf = shufflevector <4 x float> %vec1, <4 x float> %vec2, <4 x i32> + %res = select <4 x i1> , <4 x float> %shuf, <4 x float> zeroinitializer + ret <4 x float> %res +} + +define <4 x float> @test_4xfloat_masked_shuff_mem_mask1(<4 x float> %vec1, <4 x float>* %vec2p, <4 x float> %vec3) { +; CHECK-LABEL: test_4xfloat_masked_shuff_mem_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: movb $10, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vshufps {{.*#+}} xmm1 {%k1} = xmm0[3,3],mem[1,3] +; CHECK-NEXT: vmovaps %xmm1, %xmm0 +; CHECK-NEXT: retq + %vec2 = load <4 x float>, <4 x float>* %vec2p + %shuf = shufflevector <4 x float> %vec1, <4 x float> %vec2, <4 x i32> + %res = select <4 x i1> , <4 x float> %shuf, <4 x float> %vec3 + ret <4 x float> %res +} + +define <4 x float> @test_4xfloat_zero_masked_shuff_mem_mask1(<4 x float> %vec1, <4 x float>* %vec2p) { +; CHECK-LABEL: test_4xfloat_zero_masked_shuff_mem_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: movb $10, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vshufps {{.*#+}} xmm0 {%k1} {z} = xmm0[3,3],mem[1,3] +; CHECK-NEXT: retq + %vec2 = load <4 x float>, <4 x float>* %vec2p + %shuf = shufflevector <4 x float> %vec1, <4 x float> %vec2, <4 x i32> + %res = select <4 x i1> , <4 x float> %shuf, <4 x float> zeroinitializer + ret <4 x float> %res +} + +define <4 x float> @test_4xfloat_masked_shuff_mem_mask2(<4 x float> %vec1, <4 x float>* %vec2p, <4 x float> %vec3) { +; CHECK-LABEL: test_4xfloat_masked_shuff_mem_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: movb $11, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vshufps {{.*#+}} xmm1 {%k1} = xmm0[1,3],mem[2,0] +; CHECK-NEXT: vmovaps %xmm1, %xmm0 +; CHECK-NEXT: retq + %vec2 = load <4 x float>, <4 x float>* %vec2p + %shuf = shufflevector <4 x float> %vec1, <4 x float> %vec2, <4 x i32> + %res = select <4 x i1> , <4 x float> %shuf, <4 x float> %vec3 + ret <4 x float> %res +} + +define <4 x float> @test_4xfloat_zero_masked_shuff_mem_mask2(<4 x float> %vec1, <4 x float>* %vec2p) { +; CHECK-LABEL: test_4xfloat_zero_masked_shuff_mem_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: movb $11, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vshufps {{.*#+}} xmm0 {%k1} {z} = xmm0[1,3],mem[2,0] +; CHECK-NEXT: retq + %vec2 = load <4 x float>, <4 x float>* %vec2p + %shuf = shufflevector <4 x float> %vec1, <4 x float> %vec2, <4 x i32> + %res = select <4 x i1> , <4 x float> %shuf, <4 x float> zeroinitializer + ret <4 x float> %res +} + +define <4 x float> @test_4xfloat_shuff_mem_mask3(<4 x float> %vec1, <4 x float>* %vec2p) { +; CHECK-LABEL: test_4xfloat_shuff_mem_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: vshufps {{.*#+}} xmm0 = xmm0[2,1],mem[3,2] +; CHECK-NEXT: retq + %vec2 = load <4 x float>, <4 x float>* %vec2p + %res = shufflevector <4 x float> %vec1, <4 x float> %vec2, <4 x i32> + ret <4 x float> %res +} +define <4 x float> @test_4xfloat_masked_shuff_mem_mask3(<4 x float> %vec1, <4 x float>* %vec2p, <4 x float> %vec3) { +; CHECK-LABEL: test_4xfloat_masked_shuff_mem_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: movb $8, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vshufps {{.*#+}} xmm1 {%k1} = xmm0[2,1],mem[3,2] +; CHECK-NEXT: vmovaps %xmm1, %xmm0 +; CHECK-NEXT: retq + %vec2 = load <4 x float>, <4 x float>* %vec2p + %shuf = shufflevector <4 x float> %vec1, <4 x float> %vec2, <4 x i32> + %res = select <4 x i1> , <4 x float> %shuf, <4 x float> %vec3 + ret <4 x float> %res +} + +define <4 x float> @test_4xfloat_zero_masked_shuff_mem_mask3(<4 x float> %vec1, <4 x float>* %vec2p) { +; CHECK-LABEL: test_4xfloat_zero_masked_shuff_mem_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: movb $8, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vshufps {{.*#+}} xmm0 {%k1} {z} = xmm0[2,1],mem[3,2] +; CHECK-NEXT: retq + %vec2 = load <4 x float>, <4 x float>* %vec2p + %shuf = shufflevector <4 x float> %vec1, <4 x float> %vec2, <4 x i32> + %res = select <4 x i1> , <4 x float> %shuf, <4 x float> zeroinitializer + ret <4 x float> %res +} + +define <8 x float> @test_8xfloat_shuff_mask0(<8 x float> %vec1, <8 x float> %vec2) { +; CHECK-LABEL: test_8xfloat_shuff_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: vshufps {{.*#+}} ymm0 = ymm0[1,3],ymm1[0,2],ymm0[5,7],ymm1[4,6] +; CHECK-NEXT: retq + %res = shufflevector <8 x float> %vec1, <8 x float> %vec2, <8 x i32> + ret <8 x float> %res +} +define <8 x float> @test_8xfloat_masked_shuff_mask0(<8 x float> %vec1, <8 x float> %vec2, <8 x float> %vec3) { +; CHECK-LABEL: test_8xfloat_masked_shuff_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: movb $1, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vshufps {{.*#+}} ymm2 {%k1} = ymm0[1,3],ymm1[0,2],ymm0[5,7],ymm1[4,6] +; CHECK-NEXT: vmovaps %ymm2, %ymm0 +; CHECK-NEXT: retq + %shuf = shufflevector <8 x float> %vec1, <8 x float> %vec2, <8 x i32> + %res = select <8 x i1> , <8 x float> %shuf, <8 x float> %vec3 + ret <8 x float> %res +} + +define <8 x float> @test_8xfloat_zero_masked_shuff_mask0(<8 x float> %vec1, <8 x float> %vec2) { +; CHECK-LABEL: test_8xfloat_zero_masked_shuff_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: movb $1, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vshufps {{.*#+}} ymm0 {%k1} {z} = ymm0[1,3],ymm1[0,2],ymm0[5,7],ymm1[4,6] +; CHECK-NEXT: retq + %shuf = shufflevector <8 x float> %vec1, <8 x float> %vec2, <8 x i32> + %res = select <8 x i1> , <8 x float> %shuf, <8 x float> zeroinitializer + ret <8 x float> %res +} +define <8 x float> @test_8xfloat_masked_shuff_mask1(<8 x float> %vec1, <8 x float> %vec2, <8 x float> %vec3) { +; CHECK-LABEL: test_8xfloat_masked_shuff_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: movb $126, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vshufps {{.*#+}} ymm2 {%k1} = ymm0[0,3],ymm1[3,1],ymm0[4,7],ymm1[7,5] +; CHECK-NEXT: vmovaps %ymm2, %ymm0 +; CHECK-NEXT: retq + %shuf = shufflevector <8 x float> %vec1, <8 x float> %vec2, <8 x i32> + %res = select <8 x i1> , <8 x float> %shuf, <8 x float> %vec3 + ret <8 x float> %res +} + +define <8 x float> @test_8xfloat_zero_masked_shuff_mask1(<8 x float> %vec1, <8 x float> %vec2) { +; CHECK-LABEL: test_8xfloat_zero_masked_shuff_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: movb $126, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vshufps {{.*#+}} ymm0 {%k1} {z} = ymm0[0,3],ymm1[3,1],ymm0[4,7],ymm1[7,5] +; CHECK-NEXT: retq + %shuf = shufflevector <8 x float> %vec1, <8 x float> %vec2, <8 x i32> + %res = select <8 x i1> , <8 x float> %shuf, <8 x float> zeroinitializer + ret <8 x float> %res +} +define <8 x float> @test_8xfloat_masked_shuff_mask2(<8 x float> %vec1, <8 x float> %vec2, <8 x float> %vec3) { +; CHECK-LABEL: test_8xfloat_masked_shuff_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: movb $-35, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vshufps {{.*#+}} ymm2 {%k1} = ymm0[0,2],ymm1[2,2],ymm0[4,6],ymm1[6,6] +; CHECK-NEXT: vmovaps %ymm2, %ymm0 +; CHECK-NEXT: retq + %shuf = shufflevector <8 x float> %vec1, <8 x float> %vec2, <8 x i32> + %res = select <8 x i1> , <8 x float> %shuf, <8 x float> %vec3 + ret <8 x float> %res +} + +define <8 x float> @test_8xfloat_zero_masked_shuff_mask2(<8 x float> %vec1, <8 x float> %vec2) { +; CHECK-LABEL: test_8xfloat_zero_masked_shuff_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: movb $-35, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vshufps {{.*#+}} ymm0 {%k1} {z} = ymm0[0,2],ymm1[2,2],ymm0[4,6],ymm1[6,6] +; CHECK-NEXT: retq + %shuf = shufflevector <8 x float> %vec1, <8 x float> %vec2, <8 x i32> + %res = select <8 x i1> , <8 x float> %shuf, <8 x float> zeroinitializer + ret <8 x float> %res +} +define <8 x float> @test_8xfloat_shuff_mask3(<8 x float> %vec1, <8 x float> %vec2) { +; CHECK-LABEL: test_8xfloat_shuff_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: vshufps {{.*#+}} ymm0 = ymm0[3,2],ymm1[3,2],ymm0[7,6],ymm1[7,6] +; CHECK-NEXT: retq + %res = shufflevector <8 x float> %vec1, <8 x float> %vec2, <8 x i32> + ret <8 x float> %res +} +define <8 x float> @test_8xfloat_masked_shuff_mask3(<8 x float> %vec1, <8 x float> %vec2, <8 x float> %vec3) { +; CHECK-LABEL: test_8xfloat_masked_shuff_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: movb $62, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vshufps {{.*#+}} ymm2 {%k1} = ymm0[3,2],ymm1[3,2],ymm0[7,6],ymm1[7,6] +; CHECK-NEXT: vmovaps %ymm2, %ymm0 +; CHECK-NEXT: retq + %shuf = shufflevector <8 x float> %vec1, <8 x float> %vec2, <8 x i32> + %res = select <8 x i1> , <8 x float> %shuf, <8 x float> %vec3 + ret <8 x float> %res +} + +define <8 x float> @test_8xfloat_zero_masked_shuff_mask3(<8 x float> %vec1, <8 x float> %vec2) { +; CHECK-LABEL: test_8xfloat_zero_masked_shuff_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: movb $62, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vshufps {{.*#+}} ymm0 {%k1} {z} = ymm0[3,2],ymm1[3,2],ymm0[7,6],ymm1[7,6] +; CHECK-NEXT: retq + %shuf = shufflevector <8 x float> %vec1, <8 x float> %vec2, <8 x i32> + %res = select <8 x i1> , <8 x float> %shuf, <8 x float> zeroinitializer + ret <8 x float> %res +} +define <8 x float> @test_8xfloat_shuff_mem_mask0(<8 x float> %vec1, <8 x float>* %vec2p) { +; CHECK-LABEL: test_8xfloat_shuff_mem_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: vshufps {{.*#+}} ymm0 = ymm0[2,1],mem[0,0],ymm0[6,5],mem[4,4] +; CHECK-NEXT: retq + %vec2 = load <8 x float>, <8 x float>* %vec2p + %res = shufflevector <8 x float> %vec1, <8 x float> %vec2, <8 x i32> + ret <8 x float> %res +} +define <8 x float> @test_8xfloat_masked_shuff_mem_mask0(<8 x float> %vec1, <8 x float>* %vec2p, <8 x float> %vec3) { +; CHECK-LABEL: test_8xfloat_masked_shuff_mem_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: movb $-106, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vshufps {{.*#+}} ymm1 {%k1} = ymm0[2,1],mem[0,0],ymm0[6,5],mem[4,4] +; CHECK-NEXT: vmovaps %ymm1, %ymm0 +; CHECK-NEXT: retq + %vec2 = load <8 x float>, <8 x float>* %vec2p + %shuf = shufflevector <8 x float> %vec1, <8 x float> %vec2, <8 x i32> + %res = select <8 x i1> , <8 x float> %shuf, <8 x float> %vec3 + ret <8 x float> %res +} + +define <8 x float> @test_8xfloat_zero_masked_shuff_mem_mask0(<8 x float> %vec1, <8 x float>* %vec2p) { +; CHECK-LABEL: test_8xfloat_zero_masked_shuff_mem_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: movb $-106, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vshufps {{.*#+}} ymm0 {%k1} {z} = ymm0[2,1],mem[0,0],ymm0[6,5],mem[4,4] +; CHECK-NEXT: retq + %vec2 = load <8 x float>, <8 x float>* %vec2p + %shuf = shufflevector <8 x float> %vec1, <8 x float> %vec2, <8 x i32> + %res = select <8 x i1> , <8 x float> %shuf, <8 x float> zeroinitializer + ret <8 x float> %res +} + +define <8 x float> @test_8xfloat_masked_shuff_mem_mask1(<8 x float> %vec1, <8 x float>* %vec2p, <8 x float> %vec3) { +; CHECK-LABEL: test_8xfloat_masked_shuff_mem_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: movb $114, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vshufps {{.*#+}} ymm1 {%k1} = ymm0[2,2],mem[1,0],ymm0[6,6],mem[5,4] +; CHECK-NEXT: vmovaps %ymm1, %ymm0 +; CHECK-NEXT: retq + %vec2 = load <8 x float>, <8 x float>* %vec2p + %shuf = shufflevector <8 x float> %vec1, <8 x float> %vec2, <8 x i32> + %res = select <8 x i1> , <8 x float> %shuf, <8 x float> %vec3 + ret <8 x float> %res +} + +define <8 x float> @test_8xfloat_zero_masked_shuff_mem_mask1(<8 x float> %vec1, <8 x float>* %vec2p) { +; CHECK-LABEL: test_8xfloat_zero_masked_shuff_mem_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: movb $114, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vshufps {{.*#+}} ymm0 {%k1} {z} = ymm0[2,2],mem[1,0],ymm0[6,6],mem[5,4] +; CHECK-NEXT: retq + %vec2 = load <8 x float>, <8 x float>* %vec2p + %shuf = shufflevector <8 x float> %vec1, <8 x float> %vec2, <8 x i32> + %res = select <8 x i1> , <8 x float> %shuf, <8 x float> zeroinitializer + ret <8 x float> %res +} + +define <8 x float> @test_8xfloat_masked_shuff_mem_mask2(<8 x float> %vec1, <8 x float>* %vec2p, <8 x float> %vec3) { +; CHECK-LABEL: test_8xfloat_masked_shuff_mem_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: movb $-104, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vshufps {{.*#+}} ymm1 {%k1} = ymm0[3,3],mem[3,3],ymm0[7,7],mem[7,7] +; CHECK-NEXT: vmovaps %ymm1, %ymm0 +; CHECK-NEXT: retq + %vec2 = load <8 x float>, <8 x float>* %vec2p + %shuf = shufflevector <8 x float> %vec1, <8 x float> %vec2, <8 x i32> + %res = select <8 x i1> , <8 x float> %shuf, <8 x float> %vec3 + ret <8 x float> %res +} + +define <8 x float> @test_8xfloat_zero_masked_shuff_mem_mask2(<8 x float> %vec1, <8 x float>* %vec2p) { +; CHECK-LABEL: test_8xfloat_zero_masked_shuff_mem_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: movb $-104, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vshufps {{.*#+}} ymm0 {%k1} {z} = ymm0[3,3],mem[3,3],ymm0[7,7],mem[7,7] +; CHECK-NEXT: retq + %vec2 = load <8 x float>, <8 x float>* %vec2p + %shuf = shufflevector <8 x float> %vec1, <8 x float> %vec2, <8 x i32> + %res = select <8 x i1> , <8 x float> %shuf, <8 x float> zeroinitializer + ret <8 x float> %res +} + +define <8 x float> @test_8xfloat_shuff_mem_mask3(<8 x float> %vec1, <8 x float>* %vec2p) { +; CHECK-LABEL: test_8xfloat_shuff_mem_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: vshufps {{.*#+}} ymm0 = ymm0[3,3],mem[2,1],ymm0[7,7],mem[6,5] +; CHECK-NEXT: retq + %vec2 = load <8 x float>, <8 x float>* %vec2p + %res = shufflevector <8 x float> %vec1, <8 x float> %vec2, <8 x i32> + ret <8 x float> %res +} +define <8 x float> @test_8xfloat_masked_shuff_mem_mask3(<8 x float> %vec1, <8 x float>* %vec2p, <8 x float> %vec3) { +; CHECK-LABEL: test_8xfloat_masked_shuff_mem_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: movb $98, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vshufps {{.*#+}} ymm1 {%k1} = ymm0[3,3],mem[2,1],ymm0[7,7],mem[6,5] +; CHECK-NEXT: vmovaps %ymm1, %ymm0 +; CHECK-NEXT: retq + %vec2 = load <8 x float>, <8 x float>* %vec2p + %shuf = shufflevector <8 x float> %vec1, <8 x float> %vec2, <8 x i32> + %res = select <8 x i1> , <8 x float> %shuf, <8 x float> %vec3 + ret <8 x float> %res +} + +define <8 x float> @test_8xfloat_zero_masked_shuff_mem_mask3(<8 x float> %vec1, <8 x float>* %vec2p) { +; CHECK-LABEL: test_8xfloat_zero_masked_shuff_mem_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: movb $98, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vshufps {{.*#+}} ymm0 {%k1} {z} = ymm0[3,3],mem[2,1],ymm0[7,7],mem[6,5] +; CHECK-NEXT: retq + %vec2 = load <8 x float>, <8 x float>* %vec2p + %shuf = shufflevector <8 x float> %vec1, <8 x float> %vec2, <8 x i32> + %res = select <8 x i1> , <8 x float> %shuf, <8 x float> zeroinitializer + ret <8 x float> %res +} + +define <16 x float> @test_16xfloat_shuff_mask0(<16 x float> %vec1, <16 x float> %vec2) { +; CHECK-LABEL: test_16xfloat_shuff_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: vshufps {{.*#+}} zmm0 = zmm0[3,2],zmm1[3,2],zmm0[7,6],zmm1[7,6],zmm0[11,10],zmm1[11,10],zmm0[15,14],zmm1[15,14] +; CHECK-NEXT: retq + %res = shufflevector <16 x float> %vec1, <16 x float> %vec2, <16 x i32> + ret <16 x float> %res +} +define <16 x float> @test_16xfloat_masked_shuff_mask0(<16 x float> %vec1, <16 x float> %vec2, <16 x float> %vec3) { +; CHECK-LABEL: test_16xfloat_masked_shuff_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: movw $-19315, %ax # imm = 0xB48D +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vshufps {{.*#+}} zmm2 {%k1} = zmm0[3,2],zmm1[3,2],zmm0[7,6],zmm1[7,6],zmm0[11,10],zmm1[11,10],zmm0[15,14],zmm1[15,14] +; CHECK-NEXT: vmovaps %zmm2, %zmm0 +; CHECK-NEXT: retq + %shuf = shufflevector <16 x float> %vec1, <16 x float> %vec2, <16 x i32> + %res = select <16 x i1> , <16 x float> %shuf, <16 x float> %vec3 + ret <16 x float> %res +} + +define <16 x float> @test_16xfloat_zero_masked_shuff_mask0(<16 x float> %vec1, <16 x float> %vec2) { +; CHECK-LABEL: test_16xfloat_zero_masked_shuff_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: movw $-19315, %ax # imm = 0xB48D +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vshufps {{.*#+}} zmm0 {%k1} {z} = zmm0[3,2],zmm1[3,2],zmm0[7,6],zmm1[7,6],zmm0[11,10],zmm1[11,10],zmm0[15,14],zmm1[15,14] +; CHECK-NEXT: retq + %shuf = shufflevector <16 x float> %vec1, <16 x float> %vec2, <16 x i32> + %res = select <16 x i1> , <16 x float> %shuf, <16 x float> zeroinitializer + ret <16 x float> %res +} +define <16 x float> @test_16xfloat_masked_shuff_mask1(<16 x float> %vec1, <16 x float> %vec2, <16 x float> %vec3) { +; CHECK-LABEL: test_16xfloat_masked_shuff_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: movw $18064, %ax # imm = 0x4690 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vshufps {{.*#+}} zmm2 {%k1} = zmm0[1,2],zmm1[3,3],zmm0[5,6],zmm1[7,7],zmm0[9,10],zmm1[11,11],zmm0[13,14],zmm1[15,15] +; CHECK-NEXT: vmovaps %zmm2, %zmm0 +; CHECK-NEXT: retq + %shuf = shufflevector <16 x float> %vec1, <16 x float> %vec2, <16 x i32> + %res = select <16 x i1> , <16 x float> %shuf, <16 x float> %vec3 + ret <16 x float> %res +} + +define <16 x float> @test_16xfloat_zero_masked_shuff_mask1(<16 x float> %vec1, <16 x float> %vec2) { +; CHECK-LABEL: test_16xfloat_zero_masked_shuff_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: movw $18064, %ax # imm = 0x4690 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vshufps {{.*#+}} zmm0 {%k1} {z} = zmm0[1,2],zmm1[3,3],zmm0[5,6],zmm1[7,7],zmm0[9,10],zmm1[11,11],zmm0[13,14],zmm1[15,15] +; CHECK-NEXT: retq + %shuf = shufflevector <16 x float> %vec1, <16 x float> %vec2, <16 x i32> + %res = select <16 x i1> , <16 x float> %shuf, <16 x float> zeroinitializer + ret <16 x float> %res +} +define <16 x float> @test_16xfloat_masked_shuff_mask2(<16 x float> %vec1, <16 x float> %vec2, <16 x float> %vec3) { +; CHECK-LABEL: test_16xfloat_masked_shuff_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: movw $-12346, %ax # imm = 0xCFC6 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vshufps {{.*#+}} zmm2 {%k1} = zmm0[3,0],zmm1[2,1],zmm0[7,4],zmm1[6,5],zmm0[11,8],zmm1[10,9],zmm0[15,12],zmm1[14,13] +; CHECK-NEXT: vmovaps %zmm2, %zmm0 +; CHECK-NEXT: retq + %shuf = shufflevector <16 x float> %vec1, <16 x float> %vec2, <16 x i32> + %res = select <16 x i1> , <16 x float> %shuf, <16 x float> %vec3 + ret <16 x float> %res +} + +define <16 x float> @test_16xfloat_zero_masked_shuff_mask2(<16 x float> %vec1, <16 x float> %vec2) { +; CHECK-LABEL: test_16xfloat_zero_masked_shuff_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: movw $-12346, %ax # imm = 0xCFC6 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vshufps {{.*#+}} zmm0 {%k1} {z} = zmm0[3,0],zmm1[2,1],zmm0[7,4],zmm1[6,5],zmm0[11,8],zmm1[10,9],zmm0[15,12],zmm1[14,13] +; CHECK-NEXT: retq + %shuf = shufflevector <16 x float> %vec1, <16 x float> %vec2, <16 x i32> + %res = select <16 x i1> , <16 x float> %shuf, <16 x float> zeroinitializer + ret <16 x float> %res +} +define <16 x float> @test_16xfloat_shuff_mask3(<16 x float> %vec1, <16 x float> %vec2) { +; CHECK-LABEL: test_16xfloat_shuff_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: vshufps {{.*#+}} zmm0 = zmm0[2,3],zmm1[0,2],zmm0[6,7],zmm1[4,6],zmm0[10,11],zmm1[8,10],zmm0[14,15],zmm1[12,14] +; CHECK-NEXT: retq + %res = shufflevector <16 x float> %vec1, <16 x float> %vec2, <16 x i32> + ret <16 x float> %res +} +define <16 x float> @test_16xfloat_masked_shuff_mask3(<16 x float> %vec1, <16 x float> %vec2, <16 x float> %vec3) { +; CHECK-LABEL: test_16xfloat_masked_shuff_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: movw $-9865, %ax # imm = 0xD977 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vshufps {{.*#+}} zmm2 {%k1} = zmm0[2,3],zmm1[0,2],zmm0[6,7],zmm1[4,6],zmm0[10,11],zmm1[8,10],zmm0[14,15],zmm1[12,14] +; CHECK-NEXT: vmovaps %zmm2, %zmm0 +; CHECK-NEXT: retq + %shuf = shufflevector <16 x float> %vec1, <16 x float> %vec2, <16 x i32> + %res = select <16 x i1> , <16 x float> %shuf, <16 x float> %vec3 + ret <16 x float> %res +} + +define <16 x float> @test_16xfloat_zero_masked_shuff_mask3(<16 x float> %vec1, <16 x float> %vec2) { +; CHECK-LABEL: test_16xfloat_zero_masked_shuff_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: movw $-9865, %ax # imm = 0xD977 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vshufps {{.*#+}} zmm0 {%k1} {z} = zmm0[2,3],zmm1[0,2],zmm0[6,7],zmm1[4,6],zmm0[10,11],zmm1[8,10],zmm0[14,15],zmm1[12,14] +; CHECK-NEXT: retq + %shuf = shufflevector <16 x float> %vec1, <16 x float> %vec2, <16 x i32> + %res = select <16 x i1> , <16 x float> %shuf, <16 x float> zeroinitializer + ret <16 x float> %res +} +define <16 x float> @test_16xfloat_shuff_mem_mask0(<16 x float> %vec1, <16 x float>* %vec2p) { +; CHECK-LABEL: test_16xfloat_shuff_mem_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: vshufps {{.*#+}} zmm0 = zmm0[3,0],mem[0,2],zmm0[7,4],mem[4,6],zmm0[11,8],mem[8,10],zmm0[15,12],mem[12,14] +; CHECK-NEXT: retq + %vec2 = load <16 x float>, <16 x float>* %vec2p + %res = shufflevector <16 x float> %vec1, <16 x float> %vec2, <16 x i32> + ret <16 x float> %res +} +define <16 x float> @test_16xfloat_masked_shuff_mem_mask0(<16 x float> %vec1, <16 x float>* %vec2p, <16 x float> %vec3) { +; CHECK-LABEL: test_16xfloat_masked_shuff_mem_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: movw $7677, %ax # imm = 0x1DFD +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vshufps {{.*#+}} zmm1 {%k1} = zmm0[3,0],mem[0,2],zmm0[7,4],mem[4,6],zmm0[11,8],mem[8,10],zmm0[15,12],mem[12,14] +; CHECK-NEXT: vmovaps %zmm1, %zmm0 +; CHECK-NEXT: retq + %vec2 = load <16 x float>, <16 x float>* %vec2p + %shuf = shufflevector <16 x float> %vec1, <16 x float> %vec2, <16 x i32> + %res = select <16 x i1> , <16 x float> %shuf, <16 x float> %vec3 + ret <16 x float> %res +} + +define <16 x float> @test_16xfloat_zero_masked_shuff_mem_mask0(<16 x float> %vec1, <16 x float>* %vec2p) { +; CHECK-LABEL: test_16xfloat_zero_masked_shuff_mem_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: movw $7677, %ax # imm = 0x1DFD +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vshufps {{.*#+}} zmm0 {%k1} {z} = zmm0[3,0],mem[0,2],zmm0[7,4],mem[4,6],zmm0[11,8],mem[8,10],zmm0[15,12],mem[12,14] +; CHECK-NEXT: retq + %vec2 = load <16 x float>, <16 x float>* %vec2p + %shuf = shufflevector <16 x float> %vec1, <16 x float> %vec2, <16 x i32> + %res = select <16 x i1> , <16 x float> %shuf, <16 x float> zeroinitializer + ret <16 x float> %res +} + +define <16 x float> @test_16xfloat_masked_shuff_mem_mask1(<16 x float> %vec1, <16 x float>* %vec2p, <16 x float> %vec3) { +; CHECK-LABEL: test_16xfloat_masked_shuff_mem_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: movw $14448, %ax # imm = 0x3870 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vshufps {{.*#+}} zmm1 {%k1} = zmm0[0,2],mem[3,2],zmm0[4,6],mem[7,6],zmm0[8,10],mem[11,10],zmm0[12,14],mem[15,14] +; CHECK-NEXT: vmovaps %zmm1, %zmm0 +; CHECK-NEXT: retq + %vec2 = load <16 x float>, <16 x float>* %vec2p + %shuf = shufflevector <16 x float> %vec1, <16 x float> %vec2, <16 x i32> + %res = select <16 x i1> , <16 x float> %shuf, <16 x float> %vec3 + ret <16 x float> %res +} + +define <16 x float> @test_16xfloat_zero_masked_shuff_mem_mask1(<16 x float> %vec1, <16 x float>* %vec2p) { +; CHECK-LABEL: test_16xfloat_zero_masked_shuff_mem_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: movw $14448, %ax # imm = 0x3870 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vshufps {{.*#+}} zmm0 {%k1} {z} = zmm0[0,2],mem[3,2],zmm0[4,6],mem[7,6],zmm0[8,10],mem[11,10],zmm0[12,14],mem[15,14] +; CHECK-NEXT: retq + %vec2 = load <16 x float>, <16 x float>* %vec2p + %shuf = shufflevector <16 x float> %vec1, <16 x float> %vec2, <16 x i32> + %res = select <16 x i1> , <16 x float> %shuf, <16 x float> zeroinitializer + ret <16 x float> %res +} + +define <16 x float> @test_16xfloat_masked_shuff_mem_mask2(<16 x float> %vec1, <16 x float>* %vec2p, <16 x float> %vec3) { +; CHECK-LABEL: test_16xfloat_masked_shuff_mem_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: movw $-13463, %ax # imm = 0xCB69 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vshufps {{.*#+}} zmm1 {%k1} = zmm0[2,0],mem[2,2],zmm0[6,4],mem[6,6],zmm0[10,8],mem[10,10],zmm0[14,12],mem[14,14] +; CHECK-NEXT: vmovaps %zmm1, %zmm0 +; CHECK-NEXT: retq + %vec2 = load <16 x float>, <16 x float>* %vec2p + %shuf = shufflevector <16 x float> %vec1, <16 x float> %vec2, <16 x i32> + %res = select <16 x i1> , <16 x float> %shuf, <16 x float> %vec3 + ret <16 x float> %res +} + +define <16 x float> @test_16xfloat_zero_masked_shuff_mem_mask2(<16 x float> %vec1, <16 x float>* %vec2p) { +; CHECK-LABEL: test_16xfloat_zero_masked_shuff_mem_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: movw $-13463, %ax # imm = 0xCB69 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vshufps {{.*#+}} zmm0 {%k1} {z} = zmm0[2,0],mem[2,2],zmm0[6,4],mem[6,6],zmm0[10,8],mem[10,10],zmm0[14,12],mem[14,14] +; CHECK-NEXT: retq + %vec2 = load <16 x float>, <16 x float>* %vec2p + %shuf = shufflevector <16 x float> %vec1, <16 x float> %vec2, <16 x i32> + %res = select <16 x i1> , <16 x float> %shuf, <16 x float> zeroinitializer + ret <16 x float> %res +} + +define <16 x float> @test_16xfloat_shuff_mem_mask3(<16 x float> %vec1, <16 x float>* %vec2p) { +; CHECK-LABEL: test_16xfloat_shuff_mem_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: vshufps {{.*#+}} zmm0 = zmm0[2,1],mem[1,3],zmm0[6,5],mem[5,7],zmm0[10,9],mem[9,11],zmm0[14,13],mem[13,15] +; CHECK-NEXT: retq + %vec2 = load <16 x float>, <16 x float>* %vec2p + %res = shufflevector <16 x float> %vec1, <16 x float> %vec2, <16 x i32> + ret <16 x float> %res +} +define <16 x float> @test_16xfloat_masked_shuff_mem_mask3(<16 x float> %vec1, <16 x float>* %vec2p, <16 x float> %vec3) { +; CHECK-LABEL: test_16xfloat_masked_shuff_mem_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: movw $21793, %ax # imm = 0x5521 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vshufps {{.*#+}} zmm1 {%k1} = zmm0[2,1],mem[1,3],zmm0[6,5],mem[5,7],zmm0[10,9],mem[9,11],zmm0[14,13],mem[13,15] +; CHECK-NEXT: vmovaps %zmm1, %zmm0 +; CHECK-NEXT: retq + %vec2 = load <16 x float>, <16 x float>* %vec2p + %shuf = shufflevector <16 x float> %vec1, <16 x float> %vec2, <16 x i32> + %res = select <16 x i1> , <16 x float> %shuf, <16 x float> %vec3 + ret <16 x float> %res +} + +define <16 x float> @test_16xfloat_zero_masked_shuff_mem_mask3(<16 x float> %vec1, <16 x float>* %vec2p) { +; CHECK-LABEL: test_16xfloat_zero_masked_shuff_mem_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: movw $21793, %ax # imm = 0x5521 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vshufps {{.*#+}} zmm0 {%k1} {z} = zmm0[2,1],mem[1,3],zmm0[6,5],mem[5,7],zmm0[10,9],mem[9,11],zmm0[14,13],mem[13,15] +; CHECK-NEXT: retq + %vec2 = load <16 x float>, <16 x float>* %vec2p + %shuf = shufflevector <16 x float> %vec1, <16 x float> %vec2, <16 x i32> + %res = select <16 x i1> , <16 x float> %shuf, <16 x float> zeroinitializer + ret <16 x float> %res +} + +define <2 x double> @test_2xdouble_shuff_mask0(<2 x double> %vec1, <2 x double> %vec2) { +; CHECK-LABEL: test_2xdouble_shuff_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: vshufpd {{.*#+}} xmm0 = xmm0[1],xmm1[0] +; CHECK-NEXT: retq + %res = shufflevector <2 x double> %vec1, <2 x double> %vec2, <2 x i32> + ret <2 x double> %res +} +define <2 x double> @test_2xdouble_masked_shuff_mask0(<2 x double> %vec1, <2 x double> %vec2, <2 x double> %vec3) { +; CHECK-LABEL: test_2xdouble_masked_shuff_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: movb $1, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vshufpd {{.*#+}} xmm2 {%k1} = xmm0[1],xmm1[0] +; CHECK-NEXT: vmovapd %xmm2, %xmm0 +; CHECK-NEXT: retq + %shuf = shufflevector <2 x double> %vec1, <2 x double> %vec2, <2 x i32> + %res = select <2 x i1> , <2 x double> %shuf, <2 x double> %vec3 + ret <2 x double> %res +} + +define <2 x double> @test_2xdouble_zero_masked_shuff_mask0(<2 x double> %vec1, <2 x double> %vec2) { +; CHECK-LABEL: test_2xdouble_zero_masked_shuff_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: movb $1, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vshufpd {{.*#+}} xmm0 {%k1} {z} = xmm0[1],xmm1[0] +; CHECK-NEXT: retq + %shuf = shufflevector <2 x double> %vec1, <2 x double> %vec2, <2 x i32> + %res = select <2 x i1> , <2 x double> %shuf, <2 x double> zeroinitializer + ret <2 x double> %res +} +define <2 x double> @test_2xdouble_masked_shuff_mask1(<2 x double> %vec1, <2 x double> %vec2, <2 x double> %vec3) { +; CHECK-LABEL: test_2xdouble_masked_shuff_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: movb $2, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vshufpd {{.*#+}} xmm2 {%k1} = xmm0[1],xmm1[0] +; CHECK-NEXT: vmovapd %xmm2, %xmm0 +; CHECK-NEXT: retq + %shuf = shufflevector <2 x double> %vec1, <2 x double> %vec2, <2 x i32> + %res = select <2 x i1> , <2 x double> %shuf, <2 x double> %vec3 + ret <2 x double> %res +} + +define <2 x double> @test_2xdouble_zero_masked_shuff_mask1(<2 x double> %vec1, <2 x double> %vec2) { +; CHECK-LABEL: test_2xdouble_zero_masked_shuff_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: movb $2, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vshufpd {{.*#+}} xmm0 {%k1} {z} = xmm0[1],xmm1[0] +; CHECK-NEXT: retq + %shuf = shufflevector <2 x double> %vec1, <2 x double> %vec2, <2 x i32> + %res = select <2 x i1> , <2 x double> %shuf, <2 x double> zeroinitializer + ret <2 x double> %res +} +define <2 x double> @test_2xdouble_shuff_mem_mask0(<2 x double> %vec1, <2 x double>* %vec2p) { +; CHECK-LABEL: test_2xdouble_shuff_mem_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: vshufpd {{.*#+}} xmm0 = xmm0[1],mem[0] +; CHECK-NEXT: retq + %vec2 = load <2 x double>, <2 x double>* %vec2p + %res = shufflevector <2 x double> %vec1, <2 x double> %vec2, <2 x i32> + ret <2 x double> %res +} +define <2 x double> @test_2xdouble_masked_shuff_mem_mask0(<2 x double> %vec1, <2 x double>* %vec2p, <2 x double> %vec3) { +; CHECK-LABEL: test_2xdouble_masked_shuff_mem_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: movb $1, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vshufpd {{.*#+}} xmm1 {%k1} = xmm0[1],mem[0] +; CHECK-NEXT: vmovapd %xmm1, %xmm0 +; CHECK-NEXT: retq + %vec2 = load <2 x double>, <2 x double>* %vec2p + %shuf = shufflevector <2 x double> %vec1, <2 x double> %vec2, <2 x i32> + %res = select <2 x i1> , <2 x double> %shuf, <2 x double> %vec3 + ret <2 x double> %res +} + +define <2 x double> @test_2xdouble_zero_masked_shuff_mem_mask0(<2 x double> %vec1, <2 x double>* %vec2p) { +; CHECK-LABEL: test_2xdouble_zero_masked_shuff_mem_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: movb $1, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vshufpd {{.*#+}} xmm0 {%k1} {z} = xmm0[1],mem[0] +; CHECK-NEXT: retq + %vec2 = load <2 x double>, <2 x double>* %vec2p + %shuf = shufflevector <2 x double> %vec1, <2 x double> %vec2, <2 x i32> + %res = select <2 x i1> , <2 x double> %shuf, <2 x double> zeroinitializer + ret <2 x double> %res +} + +define <2 x double> @test_2xdouble_masked_shuff_mem_mask1(<2 x double> %vec1, <2 x double>* %vec2p, <2 x double> %vec3) { +; CHECK-LABEL: test_2xdouble_masked_shuff_mem_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: movb $2, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vshufpd {{.*#+}} xmm1 {%k1} = xmm0[1],mem[0] +; CHECK-NEXT: vmovapd %xmm1, %xmm0 +; CHECK-NEXT: retq + %vec2 = load <2 x double>, <2 x double>* %vec2p + %shuf = shufflevector <2 x double> %vec1, <2 x double> %vec2, <2 x i32> + %res = select <2 x i1> , <2 x double> %shuf, <2 x double> %vec3 + ret <2 x double> %res +} + +define <2 x double> @test_2xdouble_zero_masked_shuff_mem_mask1(<2 x double> %vec1, <2 x double>* %vec2p) { +; CHECK-LABEL: test_2xdouble_zero_masked_shuff_mem_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: movb $2, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vshufpd {{.*#+}} xmm0 {%k1} {z} = xmm0[1],mem[0] +; CHECK-NEXT: retq + %vec2 = load <2 x double>, <2 x double>* %vec2p + %shuf = shufflevector <2 x double> %vec1, <2 x double> %vec2, <2 x i32> + %res = select <2 x i1> , <2 x double> %shuf, <2 x double> zeroinitializer + ret <2 x double> %res +} + +define <4 x double> @test_4xdouble_shuff_mask0(<4 x double> %vec1, <4 x double> %vec2) { +; CHECK-LABEL: test_4xdouble_shuff_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: vshufpd {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[3],ymm1[3] +; CHECK-NEXT: retq + %res = shufflevector <4 x double> %vec1, <4 x double> %vec2, <4 x i32> + ret <4 x double> %res +} +define <4 x double> @test_4xdouble_masked_shuff_mask0(<4 x double> %vec1, <4 x double> %vec2, <4 x double> %vec3) { +; CHECK-LABEL: test_4xdouble_masked_shuff_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: movb $4, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vshufpd {{.*#+}} ymm2 {%k1} = ymm0[0],ymm1[0],ymm0[3],ymm1[3] +; CHECK-NEXT: vmovapd %ymm2, %ymm0 +; CHECK-NEXT: retq + %shuf = shufflevector <4 x double> %vec1, <4 x double> %vec2, <4 x i32> + %res = select <4 x i1> , <4 x double> %shuf, <4 x double> %vec3 + ret <4 x double> %res +} + +define <4 x double> @test_4xdouble_zero_masked_shuff_mask0(<4 x double> %vec1, <4 x double> %vec2) { +; CHECK-LABEL: test_4xdouble_zero_masked_shuff_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: movb $4, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vshufpd {{.*#+}} ymm0 {%k1} {z} = ymm0[0],ymm1[0],ymm0[3],ymm1[3] +; CHECK-NEXT: retq + %shuf = shufflevector <4 x double> %vec1, <4 x double> %vec2, <4 x i32> + %res = select <4 x i1> , <4 x double> %shuf, <4 x double> zeroinitializer + ret <4 x double> %res +} +define <4 x double> @test_4xdouble_masked_shuff_mask1(<4 x double> %vec1, <4 x double> %vec2, <4 x double> %vec3) { +; CHECK-LABEL: test_4xdouble_masked_shuff_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: movb $8, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vshufpd {{.*#+}} ymm2 {%k1} = ymm0[0],ymm1[0],ymm0[3],ymm1[2] +; CHECK-NEXT: vmovapd %ymm2, %ymm0 +; CHECK-NEXT: retq + %shuf = shufflevector <4 x double> %vec1, <4 x double> %vec2, <4 x i32> + %res = select <4 x i1> , <4 x double> %shuf, <4 x double> %vec3 + ret <4 x double> %res +} + +define <4 x double> @test_4xdouble_zero_masked_shuff_mask1(<4 x double> %vec1, <4 x double> %vec2) { +; CHECK-LABEL: test_4xdouble_zero_masked_shuff_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: movb $8, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vshufpd {{.*#+}} ymm0 {%k1} {z} = ymm0[0],ymm1[0],ymm0[3],ymm1[2] +; CHECK-NEXT: retq + %shuf = shufflevector <4 x double> %vec1, <4 x double> %vec2, <4 x i32> + %res = select <4 x i1> , <4 x double> %shuf, <4 x double> zeroinitializer + ret <4 x double> %res +} +define <4 x double> @test_4xdouble_masked_shuff_mask2(<4 x double> %vec1, <4 x double> %vec2, <4 x double> %vec3) { +; CHECK-LABEL: test_4xdouble_masked_shuff_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: movb $6, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vshufpd {{.*#+}} ymm2 {%k1} = ymm0[1],ymm1[0],ymm0[3],ymm1[2] +; CHECK-NEXT: vmovapd %ymm2, %ymm0 +; CHECK-NEXT: retq + %shuf = shufflevector <4 x double> %vec1, <4 x double> %vec2, <4 x i32> + %res = select <4 x i1> , <4 x double> %shuf, <4 x double> %vec3 + ret <4 x double> %res +} + +define <4 x double> @test_4xdouble_zero_masked_shuff_mask2(<4 x double> %vec1, <4 x double> %vec2) { +; CHECK-LABEL: test_4xdouble_zero_masked_shuff_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: movb $6, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vshufpd {{.*#+}} ymm0 {%k1} {z} = ymm0[1],ymm1[0],ymm0[3],ymm1[2] +; CHECK-NEXT: retq + %shuf = shufflevector <4 x double> %vec1, <4 x double> %vec2, <4 x i32> + %res = select <4 x i1> , <4 x double> %shuf, <4 x double> zeroinitializer + ret <4 x double> %res +} +define <4 x double> @test_4xdouble_shuff_mask3(<4 x double> %vec1, <4 x double> %vec2) { +; CHECK-LABEL: test_4xdouble_shuff_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: vshufpd {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[2],ymm1[3] +; CHECK-NEXT: retq + %res = shufflevector <4 x double> %vec1, <4 x double> %vec2, <4 x i32> + ret <4 x double> %res +} +define <4 x double> @test_4xdouble_masked_shuff_mask3(<4 x double> %vec1, <4 x double> %vec2, <4 x double> %vec3) { +; CHECK-LABEL: test_4xdouble_masked_shuff_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: movb $2, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vshufpd {{.*#+}} ymm2 {%k1} = ymm0[0],ymm1[0],ymm0[2],ymm1[3] +; CHECK-NEXT: vmovapd %ymm2, %ymm0 +; CHECK-NEXT: retq + %shuf = shufflevector <4 x double> %vec1, <4 x double> %vec2, <4 x i32> + %res = select <4 x i1> , <4 x double> %shuf, <4 x double> %vec3 + ret <4 x double> %res +} + +define <4 x double> @test_4xdouble_zero_masked_shuff_mask3(<4 x double> %vec1, <4 x double> %vec2) { +; CHECK-LABEL: test_4xdouble_zero_masked_shuff_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: movb $2, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vshufpd {{.*#+}} ymm0 {%k1} {z} = ymm0[0],ymm1[0],ymm0[2],ymm1[3] +; CHECK-NEXT: retq + %shuf = shufflevector <4 x double> %vec1, <4 x double> %vec2, <4 x i32> + %res = select <4 x i1> , <4 x double> %shuf, <4 x double> zeroinitializer + ret <4 x double> %res +} +define <4 x double> @test_4xdouble_shuff_mem_mask0(<4 x double> %vec1, <4 x double>* %vec2p) { +; CHECK-LABEL: test_4xdouble_shuff_mem_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: vshufpd {{.*#+}} ymm0 = ymm0[1],mem[1],ymm0[3],mem[2] +; CHECK-NEXT: retq + %vec2 = load <4 x double>, <4 x double>* %vec2p + %res = shufflevector <4 x double> %vec1, <4 x double> %vec2, <4 x i32> + ret <4 x double> %res +} +define <4 x double> @test_4xdouble_masked_shuff_mem_mask0(<4 x double> %vec1, <4 x double>* %vec2p, <4 x double> %vec3) { +; CHECK-LABEL: test_4xdouble_masked_shuff_mem_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: movb $5, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vshufpd {{.*#+}} ymm1 {%k1} = ymm0[1],mem[1],ymm0[3],mem[2] +; CHECK-NEXT: vmovapd %ymm1, %ymm0 +; CHECK-NEXT: retq + %vec2 = load <4 x double>, <4 x double>* %vec2p + %shuf = shufflevector <4 x double> %vec1, <4 x double> %vec2, <4 x i32> + %res = select <4 x i1> , <4 x double> %shuf, <4 x double> %vec3 + ret <4 x double> %res +} + +define <4 x double> @test_4xdouble_zero_masked_shuff_mem_mask0(<4 x double> %vec1, <4 x double>* %vec2p) { +; CHECK-LABEL: test_4xdouble_zero_masked_shuff_mem_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: movb $5, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vshufpd {{.*#+}} ymm0 {%k1} {z} = ymm0[1],mem[1],ymm0[3],mem[2] +; CHECK-NEXT: retq + %vec2 = load <4 x double>, <4 x double>* %vec2p + %shuf = shufflevector <4 x double> %vec1, <4 x double> %vec2, <4 x i32> + %res = select <4 x i1> , <4 x double> %shuf, <4 x double> zeroinitializer + ret <4 x double> %res +} + +define <4 x double> @test_4xdouble_masked_shuff_mem_mask1(<4 x double> %vec1, <4 x double>* %vec2p, <4 x double> %vec3) { +; CHECK-LABEL: test_4xdouble_masked_shuff_mem_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: movb $4, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vshufpd {{.*#+}} ymm1 {%k1} = ymm0[0],mem[1],ymm0[2],mem[2] +; CHECK-NEXT: vmovapd %ymm1, %ymm0 +; CHECK-NEXT: retq + %vec2 = load <4 x double>, <4 x double>* %vec2p + %shuf = shufflevector <4 x double> %vec1, <4 x double> %vec2, <4 x i32> + %res = select <4 x i1> , <4 x double> %shuf, <4 x double> %vec3 + ret <4 x double> %res +} + +define <4 x double> @test_4xdouble_zero_masked_shuff_mem_mask1(<4 x double> %vec1, <4 x double>* %vec2p) { +; CHECK-LABEL: test_4xdouble_zero_masked_shuff_mem_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: movb $4, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vshufpd {{.*#+}} ymm0 {%k1} {z} = ymm0[0],mem[1],ymm0[2],mem[2] +; CHECK-NEXT: retq + %vec2 = load <4 x double>, <4 x double>* %vec2p + %shuf = shufflevector <4 x double> %vec1, <4 x double> %vec2, <4 x i32> + %res = select <4 x i1> , <4 x double> %shuf, <4 x double> zeroinitializer + ret <4 x double> %res +} + +define <4 x double> @test_4xdouble_masked_shuff_mem_mask2(<4 x double> %vec1, <4 x double>* %vec2p, <4 x double> %vec3) { +; CHECK-LABEL: test_4xdouble_masked_shuff_mem_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: movb $14, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vshufpd {{.*#+}} ymm1 {%k1} = ymm0[0],mem[0],ymm0[3],mem[2] +; CHECK-NEXT: vmovapd %ymm1, %ymm0 +; CHECK-NEXT: retq + %vec2 = load <4 x double>, <4 x double>* %vec2p + %shuf = shufflevector <4 x double> %vec1, <4 x double> %vec2, <4 x i32> + %res = select <4 x i1> , <4 x double> %shuf, <4 x double> %vec3 + ret <4 x double> %res +} + +define <4 x double> @test_4xdouble_zero_masked_shuff_mem_mask2(<4 x double> %vec1, <4 x double>* %vec2p) { +; CHECK-LABEL: test_4xdouble_zero_masked_shuff_mem_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: movb $14, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vshufpd {{.*#+}} ymm0 {%k1} {z} = ymm0[0],mem[0],ymm0[3],mem[2] +; CHECK-NEXT: retq + %vec2 = load <4 x double>, <4 x double>* %vec2p + %shuf = shufflevector <4 x double> %vec1, <4 x double> %vec2, <4 x i32> + %res = select <4 x i1> , <4 x double> %shuf, <4 x double> zeroinitializer + ret <4 x double> %res +} + +define <4 x double> @test_4xdouble_shuff_mem_mask3(<4 x double> %vec1, <4 x double>* %vec2p) { +; CHECK-LABEL: test_4xdouble_shuff_mem_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: vshufpd {{.*#+}} ymm0 = ymm0[1],mem[1],ymm0[2],mem[2] +; CHECK-NEXT: retq + %vec2 = load <4 x double>, <4 x double>* %vec2p + %res = shufflevector <4 x double> %vec1, <4 x double> %vec2, <4 x i32> + ret <4 x double> %res +} +define <4 x double> @test_4xdouble_masked_shuff_mem_mask3(<4 x double> %vec1, <4 x double>* %vec2p, <4 x double> %vec3) { +; CHECK-LABEL: test_4xdouble_masked_shuff_mem_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: movb $11, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vshufpd {{.*#+}} ymm1 {%k1} = ymm0[1],mem[1],ymm0[2],mem[2] +; CHECK-NEXT: vmovapd %ymm1, %ymm0 +; CHECK-NEXT: retq + %vec2 = load <4 x double>, <4 x double>* %vec2p + %shuf = shufflevector <4 x double> %vec1, <4 x double> %vec2, <4 x i32> + %res = select <4 x i1> , <4 x double> %shuf, <4 x double> %vec3 + ret <4 x double> %res +} + +define <4 x double> @test_4xdouble_zero_masked_shuff_mem_mask3(<4 x double> %vec1, <4 x double>* %vec2p) { +; CHECK-LABEL: test_4xdouble_zero_masked_shuff_mem_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: movb $11, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vshufpd {{.*#+}} ymm0 {%k1} {z} = ymm0[1],mem[1],ymm0[2],mem[2] +; CHECK-NEXT: retq + %vec2 = load <4 x double>, <4 x double>* %vec2p + %shuf = shufflevector <4 x double> %vec1, <4 x double> %vec2, <4 x i32> + %res = select <4 x i1> , <4 x double> %shuf, <4 x double> zeroinitializer + ret <4 x double> %res +} + +define <8 x double> @test_8xdouble_shuff_mask0(<8 x double> %vec1, <8 x double> %vec2) { +; CHECK-LABEL: test_8xdouble_shuff_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: vshufpd {{.*#+}} zmm0 = zmm0[0],zmm1[0],zmm0[2],zmm1[3],zmm0[4],zmm1[5],zmm0[7],zmm1[7] +; CHECK-NEXT: retq + %res = shufflevector <8 x double> %vec1, <8 x double> %vec2, <8 x i32> + ret <8 x double> %res +} +define <8 x double> @test_8xdouble_masked_shuff_mask0(<8 x double> %vec1, <8 x double> %vec2, <8 x double> %vec3) { +; CHECK-LABEL: test_8xdouble_masked_shuff_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: movb $-77, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vshufpd {{.*#+}} zmm2 {%k1} = zmm0[0],zmm1[0],zmm0[2],zmm1[3],zmm0[4],zmm1[5],zmm0[7],zmm1[7] +; CHECK-NEXT: vmovapd %zmm2, %zmm0 +; CHECK-NEXT: retq + %shuf = shufflevector <8 x double> %vec1, <8 x double> %vec2, <8 x i32> + %res = select <8 x i1> , <8 x double> %shuf, <8 x double> %vec3 + ret <8 x double> %res +} + +define <8 x double> @test_8xdouble_zero_masked_shuff_mask0(<8 x double> %vec1, <8 x double> %vec2) { +; CHECK-LABEL: test_8xdouble_zero_masked_shuff_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: movb $-77, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vshufpd {{.*#+}} zmm0 {%k1} {z} = zmm0[0],zmm1[0],zmm0[2],zmm1[3],zmm0[4],zmm1[5],zmm0[7],zmm1[7] +; CHECK-NEXT: retq + %shuf = shufflevector <8 x double> %vec1, <8 x double> %vec2, <8 x i32> + %res = select <8 x i1> , <8 x double> %shuf, <8 x double> zeroinitializer + ret <8 x double> %res +} +define <8 x double> @test_8xdouble_masked_shuff_mask1(<8 x double> %vec1, <8 x double> %vec2, <8 x double> %vec3) { +; CHECK-LABEL: test_8xdouble_masked_shuff_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: movb $107, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vshufpd {{.*#+}} zmm2 {%k1} = zmm0[0],zmm1[0],zmm0[2],zmm1[3],zmm0[5],zmm1[5],zmm0[6],zmm1[7] +; CHECK-NEXT: vmovapd %zmm2, %zmm0 +; CHECK-NEXT: retq + %shuf = shufflevector <8 x double> %vec1, <8 x double> %vec2, <8 x i32> + %res = select <8 x i1> , <8 x double> %shuf, <8 x double> %vec3 + ret <8 x double> %res +} + +define <8 x double> @test_8xdouble_zero_masked_shuff_mask1(<8 x double> %vec1, <8 x double> %vec2) { +; CHECK-LABEL: test_8xdouble_zero_masked_shuff_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: movb $107, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vshufpd {{.*#+}} zmm0 {%k1} {z} = zmm0[0],zmm1[0],zmm0[2],zmm1[3],zmm0[5],zmm1[5],zmm0[6],zmm1[7] +; CHECK-NEXT: retq + %shuf = shufflevector <8 x double> %vec1, <8 x double> %vec2, <8 x i32> + %res = select <8 x i1> , <8 x double> %shuf, <8 x double> zeroinitializer + ret <8 x double> %res +} +define <8 x double> @test_8xdouble_masked_shuff_mask2(<8 x double> %vec1, <8 x double> %vec2, <8 x double> %vec3) { +; CHECK-LABEL: test_8xdouble_masked_shuff_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: movb $-87, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vshufpd {{.*#+}} zmm2 {%k1} = zmm0[1],zmm1[0],zmm0[3],zmm1[3],zmm0[4],zmm1[5],zmm0[6],zmm1[6] +; CHECK-NEXT: vmovapd %zmm2, %zmm0 +; CHECK-NEXT: retq + %shuf = shufflevector <8 x double> %vec1, <8 x double> %vec2, <8 x i32> + %res = select <8 x i1> , <8 x double> %shuf, <8 x double> %vec3 + ret <8 x double> %res +} + +define <8 x double> @test_8xdouble_zero_masked_shuff_mask2(<8 x double> %vec1, <8 x double> %vec2) { +; CHECK-LABEL: test_8xdouble_zero_masked_shuff_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: movb $-87, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vshufpd {{.*#+}} zmm0 {%k1} {z} = zmm0[1],zmm1[0],zmm0[3],zmm1[3],zmm0[4],zmm1[5],zmm0[6],zmm1[6] +; CHECK-NEXT: retq + %shuf = shufflevector <8 x double> %vec1, <8 x double> %vec2, <8 x i32> + %res = select <8 x i1> , <8 x double> %shuf, <8 x double> zeroinitializer + ret <8 x double> %res +} +define <8 x double> @test_8xdouble_shuff_mask3(<8 x double> %vec1, <8 x double> %vec2) { +; CHECK-LABEL: test_8xdouble_shuff_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: vshufpd {{.*#+}} zmm0 = zmm0[1],zmm1[0],zmm0[3],zmm1[3],zmm0[4],zmm1[4],zmm0[7],zmm1[7] +; CHECK-NEXT: retq + %res = shufflevector <8 x double> %vec1, <8 x double> %vec2, <8 x i32> + ret <8 x double> %res +} +define <8 x double> @test_8xdouble_masked_shuff_mask3(<8 x double> %vec1, <8 x double> %vec2, <8 x double> %vec3) { +; CHECK-LABEL: test_8xdouble_masked_shuff_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: movb $12, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vshufpd {{.*#+}} zmm2 {%k1} = zmm0[1],zmm1[0],zmm0[3],zmm1[3],zmm0[4],zmm1[4],zmm0[7],zmm1[7] +; CHECK-NEXT: vmovapd %zmm2, %zmm0 +; CHECK-NEXT: retq + %shuf = shufflevector <8 x double> %vec1, <8 x double> %vec2, <8 x i32> + %res = select <8 x i1> , <8 x double> %shuf, <8 x double> %vec3 + ret <8 x double> %res +} + +define <8 x double> @test_8xdouble_zero_masked_shuff_mask3(<8 x double> %vec1, <8 x double> %vec2) { +; CHECK-LABEL: test_8xdouble_zero_masked_shuff_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: movb $12, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vshufpd {{.*#+}} zmm0 {%k1} {z} = zmm0[1],zmm1[0],zmm0[3],zmm1[3],zmm0[4],zmm1[4],zmm0[7],zmm1[7] +; CHECK-NEXT: retq + %shuf = shufflevector <8 x double> %vec1, <8 x double> %vec2, <8 x i32> + %res = select <8 x i1> , <8 x double> %shuf, <8 x double> zeroinitializer + ret <8 x double> %res +} +define <8 x double> @test_8xdouble_shuff_mem_mask0(<8 x double> %vec1, <8 x double>* %vec2p) { +; CHECK-LABEL: test_8xdouble_shuff_mem_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: vshufpd {{.*#+}} zmm0 = zmm0[0],mem[0],zmm0[2],mem[2],zmm0[5],mem[5],zmm0[6],mem[7] +; CHECK-NEXT: retq + %vec2 = load <8 x double>, <8 x double>* %vec2p + %res = shufflevector <8 x double> %vec1, <8 x double> %vec2, <8 x i32> + ret <8 x double> %res +} +define <8 x double> @test_8xdouble_masked_shuff_mem_mask0(<8 x double> %vec1, <8 x double>* %vec2p, <8 x double> %vec3) { +; CHECK-LABEL: test_8xdouble_masked_shuff_mem_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: movb $72, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vshufpd {{.*#+}} zmm1 {%k1} = zmm0[0],mem[0],zmm0[2],mem[2],zmm0[5],mem[5],zmm0[6],mem[7] +; CHECK-NEXT: vmovapd %zmm1, %zmm0 +; CHECK-NEXT: retq + %vec2 = load <8 x double>, <8 x double>* %vec2p + %shuf = shufflevector <8 x double> %vec1, <8 x double> %vec2, <8 x i32> + %res = select <8 x i1> , <8 x double> %shuf, <8 x double> %vec3 + ret <8 x double> %res +} + +define <8 x double> @test_8xdouble_zero_masked_shuff_mem_mask0(<8 x double> %vec1, <8 x double>* %vec2p) { +; CHECK-LABEL: test_8xdouble_zero_masked_shuff_mem_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: movb $72, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vshufpd {{.*#+}} zmm0 {%k1} {z} = zmm0[0],mem[0],zmm0[2],mem[2],zmm0[5],mem[5],zmm0[6],mem[7] +; CHECK-NEXT: retq + %vec2 = load <8 x double>, <8 x double>* %vec2p + %shuf = shufflevector <8 x double> %vec1, <8 x double> %vec2, <8 x i32> + %res = select <8 x i1> , <8 x double> %shuf, <8 x double> zeroinitializer + ret <8 x double> %res +} + +define <8 x double> @test_8xdouble_masked_shuff_mem_mask1(<8 x double> %vec1, <8 x double>* %vec2p, <8 x double> %vec3) { +; CHECK-LABEL: test_8xdouble_masked_shuff_mem_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: movb $-7, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vshufpd {{.*#+}} zmm1 {%k1} = zmm0[1],mem[0],zmm0[3],mem[2],zmm0[4],mem[4],zmm0[7],mem[7] +; CHECK-NEXT: vmovapd %zmm1, %zmm0 +; CHECK-NEXT: retq + %vec2 = load <8 x double>, <8 x double>* %vec2p + %shuf = shufflevector <8 x double> %vec1, <8 x double> %vec2, <8 x i32> + %res = select <8 x i1> , <8 x double> %shuf, <8 x double> %vec3 + ret <8 x double> %res +} + +define <8 x double> @test_8xdouble_zero_masked_shuff_mem_mask1(<8 x double> %vec1, <8 x double>* %vec2p) { +; CHECK-LABEL: test_8xdouble_zero_masked_shuff_mem_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: movb $-7, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vshufpd {{.*#+}} zmm0 {%k1} {z} = zmm0[1],mem[0],zmm0[3],mem[2],zmm0[4],mem[4],zmm0[7],mem[7] +; CHECK-NEXT: retq + %vec2 = load <8 x double>, <8 x double>* %vec2p + %shuf = shufflevector <8 x double> %vec1, <8 x double> %vec2, <8 x i32> + %res = select <8 x i1> , <8 x double> %shuf, <8 x double> zeroinitializer + ret <8 x double> %res +} + +define <8 x double> @test_8xdouble_masked_shuff_mem_mask2(<8 x double> %vec1, <8 x double>* %vec2p, <8 x double> %vec3) { +; CHECK-LABEL: test_8xdouble_masked_shuff_mem_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: movb $26, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vshufpd {{.*#+}} zmm1 {%k1} = zmm0[1],mem[1],zmm0[3],mem[2],zmm0[5],mem[5],zmm0[7],mem[7] +; CHECK-NEXT: vmovapd %zmm1, %zmm0 +; CHECK-NEXT: retq + %vec2 = load <8 x double>, <8 x double>* %vec2p + %shuf = shufflevector <8 x double> %vec1, <8 x double> %vec2, <8 x i32> + %res = select <8 x i1> , <8 x double> %shuf, <8 x double> %vec3 + ret <8 x double> %res +} + +define <8 x double> @test_8xdouble_zero_masked_shuff_mem_mask2(<8 x double> %vec1, <8 x double>* %vec2p) { +; CHECK-LABEL: test_8xdouble_zero_masked_shuff_mem_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: movb $26, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vshufpd {{.*#+}} zmm0 {%k1} {z} = zmm0[1],mem[1],zmm0[3],mem[2],zmm0[5],mem[5],zmm0[7],mem[7] +; CHECK-NEXT: retq + %vec2 = load <8 x double>, <8 x double>* %vec2p + %shuf = shufflevector <8 x double> %vec1, <8 x double> %vec2, <8 x i32> + %res = select <8 x i1> , <8 x double> %shuf, <8 x double> zeroinitializer + ret <8 x double> %res +} + +define <8 x double> @test_8xdouble_shuff_mem_mask3(<8 x double> %vec1, <8 x double>* %vec2p) { +; CHECK-LABEL: test_8xdouble_shuff_mem_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: vshufpd {{.*#+}} zmm0 = zmm0[1],mem[1],zmm0[2],mem[3],zmm0[4],mem[5],zmm0[6],mem[6] +; CHECK-NEXT: retq + %vec2 = load <8 x double>, <8 x double>* %vec2p + %res = shufflevector <8 x double> %vec1, <8 x double> %vec2, <8 x i32> + ret <8 x double> %res +} +define <8 x double> @test_8xdouble_masked_shuff_mem_mask3(<8 x double> %vec1, <8 x double>* %vec2p, <8 x double> %vec3) { +; CHECK-LABEL: test_8xdouble_masked_shuff_mem_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: movb $-39, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vshufpd {{.*#+}} zmm1 {%k1} = zmm0[1],mem[1],zmm0[2],mem[3],zmm0[4],mem[5],zmm0[6],mem[6] +; CHECK-NEXT: vmovapd %zmm1, %zmm0 +; CHECK-NEXT: retq + %vec2 = load <8 x double>, <8 x double>* %vec2p + %shuf = shufflevector <8 x double> %vec1, <8 x double> %vec2, <8 x i32> + %res = select <8 x i1> , <8 x double> %shuf, <8 x double> %vec3 + ret <8 x double> %res +} + +define <8 x double> @test_8xdouble_zero_masked_shuff_mem_mask3(<8 x double> %vec1, <8 x double>* %vec2p) { +; CHECK-LABEL: test_8xdouble_zero_masked_shuff_mem_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: movb $-39, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vshufpd {{.*#+}} zmm0 {%k1} {z} = zmm0[1],mem[1],zmm0[2],mem[3],zmm0[4],mem[5],zmm0[6],mem[6] +; CHECK-NEXT: retq + %vec2 = load <8 x double>, <8 x double>* %vec2p + %shuf = shufflevector <8 x double> %vec1, <8 x double> %vec2, <8 x i32> + %res = select <8 x i1> , <8 x double> %shuf, <8 x double> zeroinitializer + ret <8 x double> %res +} + diff --git a/llvm/test/CodeGen/X86/avx512-shuffles/shuffle-vec.ll b/llvm/test/CodeGen/X86/avx512-shuffles/shuffle-vec.ll new file mode 100644 index 000000000000..b4e762e43d49 --- /dev/null +++ b/llvm/test/CodeGen/X86/avx512-shuffles/shuffle-vec.ll @@ -0,0 +1,1941 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mcpu=skx %s -o - | FileCheck %s + +; FIXME: 128-bit shuffles of 256-bit vectors cases should be fixed by PR34359 + +define <8 x float> @test_8xfloat_shuff_mask0(<8 x float> %vec1, <8 x float> %vec2) { +; CHECK-LABEL: test_8xfloat_shuff_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[0,1] +; CHECK-NEXT: retq + %res = shufflevector <8 x float> %vec1, <8 x float> %vec2, <8 x i32> + ret <8 x float> %res +} +define <8 x float> @test_8xfloat_masked_shuff_mask0(<8 x float> %vec1, <8 x float> %vec2, <8 x float> %vec3) { +; CHECK-LABEL: test_8xfloat_masked_shuff_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[0,1] +; CHECK-NEXT: movb $-41, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vblendmps %ymm0, %ymm2, %ymm0 {%k1} +; CHECK-NEXT: retq + %shuf = shufflevector <8 x float> %vec1, <8 x float> %vec2, <8 x i32> + %res = select <8 x i1> , <8 x float> %shuf, <8 x float> %vec3 + ret <8 x float> %res +} + +define <8 x float> @test_8xfloat_zero_masked_shuff_mask0(<8 x float> %vec1, <8 x float> %vec2) { +; CHECK-LABEL: test_8xfloat_zero_masked_shuff_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[0,1] +; CHECK-NEXT: movb $-41, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vmovaps %ymm0, %ymm0 {%k1} {z} +; CHECK-NEXT: retq + %shuf = shufflevector <8 x float> %vec1, <8 x float> %vec2, <8 x i32> + %res = select <8 x i1> , <8 x float> %shuf, <8 x float> zeroinitializer + ret <8 x float> %res +} +define <8 x float> @test_8xfloat_masked_shuff_mask1(<8 x float> %vec1, <8 x float> %vec2, <8 x float> %vec3) { +; CHECK-LABEL: test_8xfloat_masked_shuff_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[0,1] +; CHECK-NEXT: movb $-63, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vblendmps %ymm0, %ymm2, %ymm0 {%k1} +; CHECK-NEXT: retq + %shuf = shufflevector <8 x float> %vec1, <8 x float> %vec2, <8 x i32> + %res = select <8 x i1> , <8 x float> %shuf, <8 x float> %vec3 + ret <8 x float> %res +} + +define <8 x float> @test_8xfloat_zero_masked_shuff_mask1(<8 x float> %vec1, <8 x float> %vec2) { +; CHECK-LABEL: test_8xfloat_zero_masked_shuff_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[0,1] +; CHECK-NEXT: movb $-63, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vmovaps %ymm0, %ymm0 {%k1} {z} +; CHECK-NEXT: retq + %shuf = shufflevector <8 x float> %vec1, <8 x float> %vec2, <8 x i32> + %res = select <8 x i1> , <8 x float> %shuf, <8 x float> zeroinitializer + ret <8 x float> %res +} +define <8 x float> @test_8xfloat_masked_shuff_mask2(<8 x float> %vec1, <8 x float> %vec2, <8 x float> %vec3) { +; CHECK-LABEL: test_8xfloat_masked_shuff_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[2,3] +; CHECK-NEXT: movb $107, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vblendmps %ymm0, %ymm2, %ymm0 {%k1} +; CHECK-NEXT: retq + %shuf = shufflevector <8 x float> %vec1, <8 x float> %vec2, <8 x i32> + %res = select <8 x i1> , <8 x float> %shuf, <8 x float> %vec3 + ret <8 x float> %res +} + +define <8 x float> @test_8xfloat_zero_masked_shuff_mask2(<8 x float> %vec1, <8 x float> %vec2) { +; CHECK-LABEL: test_8xfloat_zero_masked_shuff_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[2,3] +; CHECK-NEXT: movb $107, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vmovaps %ymm0, %ymm0 {%k1} {z} +; CHECK-NEXT: retq + %shuf = shufflevector <8 x float> %vec1, <8 x float> %vec2, <8 x i32> + %res = select <8 x i1> , <8 x float> %shuf, <8 x float> zeroinitializer + ret <8 x float> %res +} +define <8 x float> @test_8xfloat_shuff_mask3(<8 x float> %vec1, <8 x float> %vec2) { +; CHECK-LABEL: test_8xfloat_shuff_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[0,1] +; CHECK-NEXT: retq + %res = shufflevector <8 x float> %vec1, <8 x float> %vec2, <8 x i32> + ret <8 x float> %res +} +define <8 x float> @test_8xfloat_masked_shuff_mask3(<8 x float> %vec1, <8 x float> %vec2, <8 x float> %vec3) { +; CHECK-LABEL: test_8xfloat_masked_shuff_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[0,1] +; CHECK-NEXT: movb $66, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vblendmps %ymm0, %ymm2, %ymm0 {%k1} +; CHECK-NEXT: retq + %shuf = shufflevector <8 x float> %vec1, <8 x float> %vec2, <8 x i32> + %res = select <8 x i1> , <8 x float> %shuf, <8 x float> %vec3 + ret <8 x float> %res +} + +define <8 x float> @test_8xfloat_zero_masked_shuff_mask3(<8 x float> %vec1, <8 x float> %vec2) { +; CHECK-LABEL: test_8xfloat_zero_masked_shuff_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[0,1] +; CHECK-NEXT: movb $66, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vmovaps %ymm0, %ymm0 {%k1} {z} +; CHECK-NEXT: retq + %shuf = shufflevector <8 x float> %vec1, <8 x float> %vec2, <8 x i32> + %res = select <8 x i1> , <8 x float> %shuf, <8 x float> zeroinitializer + ret <8 x float> %res +} +define <8 x float> @test_8xfloat_shuff_mem_mask0(<8 x float> %vec1, <8 x float>* %vec2p) { +; CHECK-LABEL: test_8xfloat_shuff_mem_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],mem[2,3] +; CHECK-NEXT: retq + %vec2 = load <8 x float>, <8 x float>* %vec2p + %res = shufflevector <8 x float> %vec1, <8 x float> %vec2, <8 x i32> + ret <8 x float> %res +} +define <8 x float> @test_8xfloat_masked_shuff_mem_mask0(<8 x float> %vec1, <8 x float>* %vec2p, <8 x float> %vec3) { +; CHECK-LABEL: test_8xfloat_masked_shuff_mem_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],mem[2,3] +; CHECK-NEXT: movb $-24, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vblendmps %ymm0, %ymm1, %ymm0 {%k1} +; CHECK-NEXT: retq + %vec2 = load <8 x float>, <8 x float>* %vec2p + %shuf = shufflevector <8 x float> %vec1, <8 x float> %vec2, <8 x i32> + %res = select <8 x i1> , <8 x float> %shuf, <8 x float> %vec3 + ret <8 x float> %res +} + +define <8 x float> @test_8xfloat_zero_masked_shuff_mem_mask0(<8 x float> %vec1, <8 x float>* %vec2p) { +; CHECK-LABEL: test_8xfloat_zero_masked_shuff_mem_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],mem[2,3] +; CHECK-NEXT: movb $-24, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vmovaps %ymm0, %ymm0 {%k1} {z} +; CHECK-NEXT: retq + %vec2 = load <8 x float>, <8 x float>* %vec2p + %shuf = shufflevector <8 x float> %vec1, <8 x float> %vec2, <8 x i32> + %res = select <8 x i1> , <8 x float> %shuf, <8 x float> zeroinitializer + ret <8 x float> %res +} + +define <8 x float> @test_8xfloat_masked_shuff_mem_mask1(<8 x float> %vec1, <8 x float>* %vec2p, <8 x float> %vec3) { +; CHECK-LABEL: test_8xfloat_masked_shuff_mem_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],mem[2,3] +; CHECK-NEXT: movb $-6, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vblendmps %ymm0, %ymm1, %ymm0 {%k1} +; CHECK-NEXT: retq + %vec2 = load <8 x float>, <8 x float>* %vec2p + %shuf = shufflevector <8 x float> %vec1, <8 x float> %vec2, <8 x i32> + %res = select <8 x i1> , <8 x float> %shuf, <8 x float> %vec3 + ret <8 x float> %res +} + +define <8 x float> @test_8xfloat_zero_masked_shuff_mem_mask1(<8 x float> %vec1, <8 x float>* %vec2p) { +; CHECK-LABEL: test_8xfloat_zero_masked_shuff_mem_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],mem[2,3] +; CHECK-NEXT: movb $-6, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vmovaps %ymm0, %ymm0 {%k1} {z} +; CHECK-NEXT: retq + %vec2 = load <8 x float>, <8 x float>* %vec2p + %shuf = shufflevector <8 x float> %vec1, <8 x float> %vec2, <8 x i32> + %res = select <8 x i1> , <8 x float> %shuf, <8 x float> zeroinitializer + ret <8 x float> %res +} + +define <8 x float> @test_8xfloat_masked_shuff_mem_mask2(<8 x float> %vec1, <8 x float>* %vec2p, <8 x float> %vec3) { +; CHECK-LABEL: test_8xfloat_masked_shuff_mem_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],mem[0,1] +; CHECK-NEXT: movb $-50, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vblendmps %ymm0, %ymm1, %ymm0 {%k1} +; CHECK-NEXT: retq + %vec2 = load <8 x float>, <8 x float>* %vec2p + %shuf = shufflevector <8 x float> %vec1, <8 x float> %vec2, <8 x i32> + %res = select <8 x i1> , <8 x float> %shuf, <8 x float> %vec3 + ret <8 x float> %res +} + +define <8 x float> @test_8xfloat_zero_masked_shuff_mem_mask2(<8 x float> %vec1, <8 x float>* %vec2p) { +; CHECK-LABEL: test_8xfloat_zero_masked_shuff_mem_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],mem[0,1] +; CHECK-NEXT: movb $-50, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vmovaps %ymm0, %ymm0 {%k1} {z} +; CHECK-NEXT: retq + %vec2 = load <8 x float>, <8 x float>* %vec2p + %shuf = shufflevector <8 x float> %vec1, <8 x float> %vec2, <8 x i32> + %res = select <8 x i1> , <8 x float> %shuf, <8 x float> zeroinitializer + ret <8 x float> %res +} + +define <8 x float> @test_8xfloat_shuff_mem_mask3(<8 x float> %vec1, <8 x float>* %vec2p) { +; CHECK-LABEL: test_8xfloat_shuff_mem_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],mem[0,1] +; CHECK-NEXT: retq + %vec2 = load <8 x float>, <8 x float>* %vec2p + %res = shufflevector <8 x float> %vec1, <8 x float> %vec2, <8 x i32> + ret <8 x float> %res +} +define <8 x float> @test_8xfloat_masked_shuff_mem_mask3(<8 x float> %vec1, <8 x float>* %vec2p, <8 x float> %vec3) { +; CHECK-LABEL: test_8xfloat_masked_shuff_mem_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],mem[0,1] +; CHECK-NEXT: movb $-26, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vblendmps %ymm0, %ymm1, %ymm0 {%k1} +; CHECK-NEXT: retq + %vec2 = load <8 x float>, <8 x float>* %vec2p + %shuf = shufflevector <8 x float> %vec1, <8 x float> %vec2, <8 x i32> + %res = select <8 x i1> , <8 x float> %shuf, <8 x float> %vec3 + ret <8 x float> %res +} + +define <8 x float> @test_8xfloat_zero_masked_shuff_mem_mask3(<8 x float> %vec1, <8 x float>* %vec2p) { +; CHECK-LABEL: test_8xfloat_zero_masked_shuff_mem_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],mem[0,1] +; CHECK-NEXT: movb $-26, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vmovaps %ymm0, %ymm0 {%k1} {z} +; CHECK-NEXT: retq + %vec2 = load <8 x float>, <8 x float>* %vec2p + %shuf = shufflevector <8 x float> %vec1, <8 x float> %vec2, <8 x i32> + %res = select <8 x i1> , <8 x float> %shuf, <8 x float> zeroinitializer + ret <8 x float> %res +} + +define <16 x float> @test_16xfloat_shuff_mask0(<16 x float> %vec1, <16 x float> %vec2) { +; CHECK-LABEL: test_16xfloat_shuff_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: vshuff64x2 {{.*#+}} zmm0 = zmm0[6,7,0,1],zmm1[2,3,6,7] +; CHECK-NEXT: retq + %res = shufflevector <16 x float> %vec1, <16 x float> %vec2, <16 x i32> + ret <16 x float> %res +} +define <16 x float> @test_16xfloat_masked_shuff_mask0(<16 x float> %vec1, <16 x float> %vec2, <16 x float> %vec3) { +; CHECK-LABEL: test_16xfloat_masked_shuff_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: movw $-11480, %ax # imm = 0xD328 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vshuff32x4 {{.*#+}} zmm2 {%k1} = zmm0[12,13,14,15,0,1,2,3],zmm1[4,5,6,7,12,13,14,15] +; CHECK-NEXT: vmovaps %zmm2, %zmm0 +; CHECK-NEXT: retq + %shuf = shufflevector <16 x float> %vec1, <16 x float> %vec2, <16 x i32> + %res = select <16 x i1> , <16 x float> %shuf, <16 x float> %vec3 + ret <16 x float> %res +} + +define <16 x float> @test_16xfloat_zero_masked_shuff_mask0(<16 x float> %vec1, <16 x float> %vec2) { +; CHECK-LABEL: test_16xfloat_zero_masked_shuff_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: movw $-11480, %ax # imm = 0xD328 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vshuff32x4 {{.*#+}} zmm0 {%k1} {z} = zmm0[12,13,14,15,0,1,2,3],zmm1[4,5,6,7,12,13,14,15] +; CHECK-NEXT: retq + %shuf = shufflevector <16 x float> %vec1, <16 x float> %vec2, <16 x i32> + %res = select <16 x i1> , <16 x float> %shuf, <16 x float> zeroinitializer + ret <16 x float> %res +} +define <16 x float> @test_16xfloat_masked_shuff_mask1(<16 x float> %vec1, <16 x float> %vec2, <16 x float> %vec3) { +; CHECK-LABEL: test_16xfloat_masked_shuff_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: movw $-21749, %ax # imm = 0xAB0B +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vshuff32x4 {{.*#+}} zmm2 {%k1} = zmm0[0,1,2,3,8,9,10,11],zmm1[0,1,2,3,12,13,14,15] +; CHECK-NEXT: vmovaps %zmm2, %zmm0 +; CHECK-NEXT: retq + %shuf = shufflevector <16 x float> %vec1, <16 x float> %vec2, <16 x i32> + %res = select <16 x i1> , <16 x float> %shuf, <16 x float> %vec3 + ret <16 x float> %res +} + +define <16 x float> @test_16xfloat_zero_masked_shuff_mask1(<16 x float> %vec1, <16 x float> %vec2) { +; CHECK-LABEL: test_16xfloat_zero_masked_shuff_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: movw $-21749, %ax # imm = 0xAB0B +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vshuff32x4 {{.*#+}} zmm0 {%k1} {z} = zmm0[0,1,2,3,8,9,10,11],zmm1[0,1,2,3,12,13,14,15] +; CHECK-NEXT: retq + %shuf = shufflevector <16 x float> %vec1, <16 x float> %vec2, <16 x i32> + %res = select <16 x i1> , <16 x float> %shuf, <16 x float> zeroinitializer + ret <16 x float> %res +} +define <16 x float> @test_16xfloat_masked_shuff_mask2(<16 x float> %vec1, <16 x float> %vec2, <16 x float> %vec3) { +; CHECK-LABEL: test_16xfloat_masked_shuff_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: movw $75, %ax +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vshuff32x4 {{.*#+}} zmm2 {%k1} = zmm0[12,13,14,15,4,5,6,7],zmm1[0,1,2,3,4,5,6,7] +; CHECK-NEXT: vmovaps %zmm2, %zmm0 +; CHECK-NEXT: retq + %shuf = shufflevector <16 x float> %vec1, <16 x float> %vec2, <16 x i32> + %res = select <16 x i1> , <16 x float> %shuf, <16 x float> %vec3 + ret <16 x float> %res +} + +define <16 x float> @test_16xfloat_zero_masked_shuff_mask2(<16 x float> %vec1, <16 x float> %vec2) { +; CHECK-LABEL: test_16xfloat_zero_masked_shuff_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: movw $75, %ax +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vshuff32x4 {{.*#+}} zmm0 {%k1} {z} = zmm0[12,13,14,15,4,5,6,7],zmm1[0,1,2,3,4,5,6,7] +; CHECK-NEXT: retq + %shuf = shufflevector <16 x float> %vec1, <16 x float> %vec2, <16 x i32> + %res = select <16 x i1> , <16 x float> %shuf, <16 x float> zeroinitializer + ret <16 x float> %res +} +define <16 x float> @test_16xfloat_shuff_mask3(<16 x float> %vec1, <16 x float> %vec2) { +; CHECK-LABEL: test_16xfloat_shuff_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: vshuff64x2 {{.*#+}} zmm0 = zmm0[4,5,6,7],zmm1[0,1,4,5] +; CHECK-NEXT: retq + %res = shufflevector <16 x float> %vec1, <16 x float> %vec2, <16 x i32> + ret <16 x float> %res +} +define <16 x float> @test_16xfloat_masked_shuff_mask3(<16 x float> %vec1, <16 x float> %vec2, <16 x float> %vec3) { +; CHECK-LABEL: test_16xfloat_masked_shuff_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: movw $32347, %ax # imm = 0x7E5B +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vshuff32x4 {{.*#+}} zmm2 {%k1} = zmm0[8,9,10,11,12,13,14,15],zmm1[0,1,2,3,8,9,10,11] +; CHECK-NEXT: vmovaps %zmm2, %zmm0 +; CHECK-NEXT: retq + %shuf = shufflevector <16 x float> %vec1, <16 x float> %vec2, <16 x i32> + %res = select <16 x i1> , <16 x float> %shuf, <16 x float> %vec3 + ret <16 x float> %res +} + +define <16 x float> @test_16xfloat_zero_masked_shuff_mask3(<16 x float> %vec1, <16 x float> %vec2) { +; CHECK-LABEL: test_16xfloat_zero_masked_shuff_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: movw $32347, %ax # imm = 0x7E5B +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vshuff32x4 {{.*#+}} zmm0 {%k1} {z} = zmm0[8,9,10,11,12,13,14,15],zmm1[0,1,2,3,8,9,10,11] +; CHECK-NEXT: retq + %shuf = shufflevector <16 x float> %vec1, <16 x float> %vec2, <16 x i32> + %res = select <16 x i1> , <16 x float> %shuf, <16 x float> zeroinitializer + ret <16 x float> %res +} +define <16 x float> @test_16xfloat_shuff_mem_mask0(<16 x float> %vec1, <16 x float>* %vec2p) { +; CHECK-LABEL: test_16xfloat_shuff_mem_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: vshuff64x2 {{.*#+}} zmm0 = zmm0[6,7,4,5],mem[4,5,2,3] +; CHECK-NEXT: retq + %vec2 = load <16 x float>, <16 x float>* %vec2p + %res = shufflevector <16 x float> %vec1, <16 x float> %vec2, <16 x i32> + ret <16 x float> %res +} +define <16 x float> @test_16xfloat_masked_shuff_mem_mask0(<16 x float> %vec1, <16 x float>* %vec2p, <16 x float> %vec3) { +; CHECK-LABEL: test_16xfloat_masked_shuff_mem_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: movw $-19232, %ax # imm = 0xB4E0 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vshuff32x4 {{.*#+}} zmm1 {%k1} = zmm0[12,13,14,15,8,9,10,11],mem[8,9,10,11,4,5,6,7] +; CHECK-NEXT: vmovaps %zmm1, %zmm0 +; CHECK-NEXT: retq + %vec2 = load <16 x float>, <16 x float>* %vec2p + %shuf = shufflevector <16 x float> %vec1, <16 x float> %vec2, <16 x i32> + %res = select <16 x i1> , <16 x float> %shuf, <16 x float> %vec3 + ret <16 x float> %res +} + +define <16 x float> @test_16xfloat_zero_masked_shuff_mem_mask0(<16 x float> %vec1, <16 x float>* %vec2p) { +; CHECK-LABEL: test_16xfloat_zero_masked_shuff_mem_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: movw $-19232, %ax # imm = 0xB4E0 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vshuff32x4 {{.*#+}} zmm0 {%k1} {z} = zmm0[12,13,14,15,8,9,10,11],mem[8,9,10,11,4,5,6,7] +; CHECK-NEXT: retq + %vec2 = load <16 x float>, <16 x float>* %vec2p + %shuf = shufflevector <16 x float> %vec1, <16 x float> %vec2, <16 x i32> + %res = select <16 x i1> , <16 x float> %shuf, <16 x float> zeroinitializer + ret <16 x float> %res +} + +define <16 x float> @test_16xfloat_masked_shuff_mem_mask1(<16 x float> %vec1, <16 x float>* %vec2p, <16 x float> %vec3) { +; CHECK-LABEL: test_16xfloat_masked_shuff_mem_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: movw $-29660, %ax # imm = 0x8C24 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vshuff32x4 {{.*#+}} zmm1 {%k1} = zmm0[8,9,10,11,4,5,6,7],mem[8,9,10,11,4,5,6,7] +; CHECK-NEXT: vmovaps %zmm1, %zmm0 +; CHECK-NEXT: retq + %vec2 = load <16 x float>, <16 x float>* %vec2p + %shuf = shufflevector <16 x float> %vec1, <16 x float> %vec2, <16 x i32> + %res = select <16 x i1> , <16 x float> %shuf, <16 x float> %vec3 + ret <16 x float> %res +} + +define <16 x float> @test_16xfloat_zero_masked_shuff_mem_mask1(<16 x float> %vec1, <16 x float>* %vec2p) { +; CHECK-LABEL: test_16xfloat_zero_masked_shuff_mem_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: movw $-29660, %ax # imm = 0x8C24 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vshuff32x4 {{.*#+}} zmm0 {%k1} {z} = zmm0[8,9,10,11,4,5,6,7],mem[8,9,10,11,4,5,6,7] +; CHECK-NEXT: retq + %vec2 = load <16 x float>, <16 x float>* %vec2p + %shuf = shufflevector <16 x float> %vec1, <16 x float> %vec2, <16 x i32> + %res = select <16 x i1> , <16 x float> %shuf, <16 x float> zeroinitializer + ret <16 x float> %res +} + +define <16 x float> @test_16xfloat_masked_shuff_mem_mask2(<16 x float> %vec1, <16 x float>* %vec2p, <16 x float> %vec3) { +; CHECK-LABEL: test_16xfloat_masked_shuff_mem_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: movw $-12160, %ax # imm = 0xD080 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vshuff32x4 {{.*#+}} zmm1 {%k1} = zmm0[0,1,2,3,0,1,2,3],mem[8,9,10,11,8,9,10,11] +; CHECK-NEXT: vmovaps %zmm1, %zmm0 +; CHECK-NEXT: retq + %vec2 = load <16 x float>, <16 x float>* %vec2p + %shuf = shufflevector <16 x float> %vec1, <16 x float> %vec2, <16 x i32> + %res = select <16 x i1> , <16 x float> %shuf, <16 x float> %vec3 + ret <16 x float> %res +} + +define <16 x float> @test_16xfloat_zero_masked_shuff_mem_mask2(<16 x float> %vec1, <16 x float>* %vec2p) { +; CHECK-LABEL: test_16xfloat_zero_masked_shuff_mem_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: movw $-12160, %ax # imm = 0xD080 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vshuff32x4 {{.*#+}} zmm0 {%k1} {z} = zmm0[0,1,2,3,0,1,2,3],mem[8,9,10,11,8,9,10,11] +; CHECK-NEXT: retq + %vec2 = load <16 x float>, <16 x float>* %vec2p + %shuf = shufflevector <16 x float> %vec1, <16 x float> %vec2, <16 x i32> + %res = select <16 x i1> , <16 x float> %shuf, <16 x float> zeroinitializer + ret <16 x float> %res +} + +define <16 x float> @test_16xfloat_shuff_mem_mask3(<16 x float> %vec1, <16 x float>* %vec2p) { +; CHECK-LABEL: test_16xfloat_shuff_mem_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: vshuff64x2 {{.*#+}} zmm0 = zmm0[2,3,0,1],mem[6,7,6,7] +; CHECK-NEXT: retq + %vec2 = load <16 x float>, <16 x float>* %vec2p + %res = shufflevector <16 x float> %vec1, <16 x float> %vec2, <16 x i32> + ret <16 x float> %res +} +define <16 x float> @test_16xfloat_masked_shuff_mem_mask3(<16 x float> %vec1, <16 x float>* %vec2p, <16 x float> %vec3) { +; CHECK-LABEL: test_16xfloat_masked_shuff_mem_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: movw $-30129, %ax # imm = 0x8A4F +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vshuff32x4 {{.*#+}} zmm1 {%k1} = zmm0[4,5,6,7,0,1,2,3],mem[12,13,14,15,12,13,14,15] +; CHECK-NEXT: vmovaps %zmm1, %zmm0 +; CHECK-NEXT: retq + %vec2 = load <16 x float>, <16 x float>* %vec2p + %shuf = shufflevector <16 x float> %vec1, <16 x float> %vec2, <16 x i32> + %res = select <16 x i1> , <16 x float> %shuf, <16 x float> %vec3 + ret <16 x float> %res +} + +define <16 x float> @test_16xfloat_zero_masked_shuff_mem_mask3(<16 x float> %vec1, <16 x float>* %vec2p) { +; CHECK-LABEL: test_16xfloat_zero_masked_shuff_mem_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: movw $-30129, %ax # imm = 0x8A4F +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vshuff32x4 {{.*#+}} zmm0 {%k1} {z} = zmm0[4,5,6,7,0,1,2,3],mem[12,13,14,15,12,13,14,15] +; CHECK-NEXT: retq + %vec2 = load <16 x float>, <16 x float>* %vec2p + %shuf = shufflevector <16 x float> %vec1, <16 x float> %vec2, <16 x i32> + %res = select <16 x i1> , <16 x float> %shuf, <16 x float> zeroinitializer + ret <16 x float> %res +} + +define <4 x double> @test_4xdouble_shuff_mask0(<4 x double> %vec1, <4 x double> %vec2) { +; CHECK-LABEL: test_4xdouble_shuff_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[0,1] +; CHECK-NEXT: retq + %res = shufflevector <4 x double> %vec1, <4 x double> %vec2, <4 x i32> + ret <4 x double> %res +} +define <4 x double> @test_4xdouble_masked_shuff_mask0(<4 x double> %vec1, <4 x double> %vec2, <4 x double> %vec3) { +; CHECK-LABEL: test_4xdouble_masked_shuff_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[0,1] +; CHECK-NEXT: movb $13, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vblendmpd %ymm0, %ymm2, %ymm0 {%k1} +; CHECK-NEXT: retq + %shuf = shufflevector <4 x double> %vec1, <4 x double> %vec2, <4 x i32> + %res = select <4 x i1> , <4 x double> %shuf, <4 x double> %vec3 + ret <4 x double> %res +} + +define <4 x double> @test_4xdouble_zero_masked_shuff_mask0(<4 x double> %vec1, <4 x double> %vec2) { +; CHECK-LABEL: test_4xdouble_zero_masked_shuff_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[0,1] +; CHECK-NEXT: movb $13, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vmovapd %ymm0, %ymm0 {%k1} {z} +; CHECK-NEXT: retq + %shuf = shufflevector <4 x double> %vec1, <4 x double> %vec2, <4 x i32> + %res = select <4 x i1> , <4 x double> %shuf, <4 x double> zeroinitializer + ret <4 x double> %res +} +define <4 x double> @test_4xdouble_masked_shuff_mask1(<4 x double> %vec1, <4 x double> %vec2, <4 x double> %vec3) { +; CHECK-LABEL: test_4xdouble_masked_shuff_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[0,1] +; CHECK-NEXT: movb $11, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vblendmpd %ymm0, %ymm2, %ymm0 {%k1} +; CHECK-NEXT: retq + %shuf = shufflevector <4 x double> %vec1, <4 x double> %vec2, <4 x i32> + %res = select <4 x i1> , <4 x double> %shuf, <4 x double> %vec3 + ret <4 x double> %res +} + +define <4 x double> @test_4xdouble_zero_masked_shuff_mask1(<4 x double> %vec1, <4 x double> %vec2) { +; CHECK-LABEL: test_4xdouble_zero_masked_shuff_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[0,1] +; CHECK-NEXT: movb $11, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vmovapd %ymm0, %ymm0 {%k1} {z} +; CHECK-NEXT: retq + %shuf = shufflevector <4 x double> %vec1, <4 x double> %vec2, <4 x i32> + %res = select <4 x i1> , <4 x double> %shuf, <4 x double> zeroinitializer + ret <4 x double> %res +} +define <4 x double> @test_4xdouble_masked_shuff_mask2(<4 x double> %vec1, <4 x double> %vec2, <4 x double> %vec3) { +; CHECK-LABEL: test_4xdouble_masked_shuff_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[2,3] +; CHECK-NEXT: movb $14, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vblendmpd %ymm0, %ymm2, %ymm0 {%k1} +; CHECK-NEXT: retq + %shuf = shufflevector <4 x double> %vec1, <4 x double> %vec2, <4 x i32> + %res = select <4 x i1> , <4 x double> %shuf, <4 x double> %vec3 + ret <4 x double> %res +} + +define <4 x double> @test_4xdouble_zero_masked_shuff_mask2(<4 x double> %vec1, <4 x double> %vec2) { +; CHECK-LABEL: test_4xdouble_zero_masked_shuff_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[2,3] +; CHECK-NEXT: movb $14, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vmovapd %ymm0, %ymm0 {%k1} {z} +; CHECK-NEXT: retq + %shuf = shufflevector <4 x double> %vec1, <4 x double> %vec2, <4 x i32> + %res = select <4 x i1> , <4 x double> %shuf, <4 x double> zeroinitializer + ret <4 x double> %res +} +define <4 x double> @test_4xdouble_shuff_mask3(<4 x double> %vec1, <4 x double> %vec2) { +; CHECK-LABEL: test_4xdouble_shuff_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[2,3] +; CHECK-NEXT: retq + %res = shufflevector <4 x double> %vec1, <4 x double> %vec2, <4 x i32> + ret <4 x double> %res +} +define <4 x double> @test_4xdouble_masked_shuff_mask3(<4 x double> %vec1, <4 x double> %vec2, <4 x double> %vec3) { +; CHECK-LABEL: test_4xdouble_masked_shuff_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[2,3] +; CHECK-NEXT: movb $12, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vblendmpd %ymm0, %ymm2, %ymm0 {%k1} +; CHECK-NEXT: retq + %shuf = shufflevector <4 x double> %vec1, <4 x double> %vec2, <4 x i32> + %res = select <4 x i1> , <4 x double> %shuf, <4 x double> %vec3 + ret <4 x double> %res +} + +define <4 x double> @test_4xdouble_zero_masked_shuff_mask3(<4 x double> %vec1, <4 x double> %vec2) { +; CHECK-LABEL: test_4xdouble_zero_masked_shuff_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[2,3] +; CHECK-NEXT: movb $12, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vmovapd %ymm0, %ymm0 {%k1} {z} +; CHECK-NEXT: retq + %shuf = shufflevector <4 x double> %vec1, <4 x double> %vec2, <4 x i32> + %res = select <4 x i1> , <4 x double> %shuf, <4 x double> zeroinitializer + ret <4 x double> %res +} +define <4 x double> @test_4xdouble_shuff_mem_mask0(<4 x double> %vec1, <4 x double>* %vec2p) { +; CHECK-LABEL: test_4xdouble_shuff_mem_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],mem[2,3] +; CHECK-NEXT: retq + %vec2 = load <4 x double>, <4 x double>* %vec2p + %res = shufflevector <4 x double> %vec1, <4 x double> %vec2, <4 x i32> + ret <4 x double> %res +} +define <4 x double> @test_4xdouble_masked_shuff_mem_mask0(<4 x double> %vec1, <4 x double>* %vec2p, <4 x double> %vec3) { +; CHECK-LABEL: test_4xdouble_masked_shuff_mem_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],mem[2,3] +; CHECK-NEXT: movb $14, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vblendmpd %ymm0, %ymm1, %ymm0 {%k1} +; CHECK-NEXT: retq + %vec2 = load <4 x double>, <4 x double>* %vec2p + %shuf = shufflevector <4 x double> %vec1, <4 x double> %vec2, <4 x i32> + %res = select <4 x i1> , <4 x double> %shuf, <4 x double> %vec3 + ret <4 x double> %res +} + +define <4 x double> @test_4xdouble_zero_masked_shuff_mem_mask0(<4 x double> %vec1, <4 x double>* %vec2p) { +; CHECK-LABEL: test_4xdouble_zero_masked_shuff_mem_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],mem[2,3] +; CHECK-NEXT: movb $14, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vmovapd %ymm0, %ymm0 {%k1} {z} +; CHECK-NEXT: retq + %vec2 = load <4 x double>, <4 x double>* %vec2p + %shuf = shufflevector <4 x double> %vec1, <4 x double> %vec2, <4 x i32> + %res = select <4 x i1> , <4 x double> %shuf, <4 x double> zeroinitializer + ret <4 x double> %res +} + +define <4 x double> @test_4xdouble_masked_shuff_mem_mask1(<4 x double> %vec1, <4 x double>* %vec2p, <4 x double> %vec3) { +; CHECK-LABEL: test_4xdouble_masked_shuff_mem_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],mem[0,1] +; CHECK-NEXT: movb $8, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vblendmpd %ymm0, %ymm1, %ymm0 {%k1} +; CHECK-NEXT: retq + %vec2 = load <4 x double>, <4 x double>* %vec2p + %shuf = shufflevector <4 x double> %vec1, <4 x double> %vec2, <4 x i32> + %res = select <4 x i1> , <4 x double> %shuf, <4 x double> %vec3 + ret <4 x double> %res +} + +define <4 x double> @test_4xdouble_zero_masked_shuff_mem_mask1(<4 x double> %vec1, <4 x double>* %vec2p) { +; CHECK-LABEL: test_4xdouble_zero_masked_shuff_mem_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],mem[0,1] +; CHECK-NEXT: movb $8, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vmovapd %ymm0, %ymm0 {%k1} {z} +; CHECK-NEXT: retq + %vec2 = load <4 x double>, <4 x double>* %vec2p + %shuf = shufflevector <4 x double> %vec1, <4 x double> %vec2, <4 x i32> + %res = select <4 x i1> , <4 x double> %shuf, <4 x double> zeroinitializer + ret <4 x double> %res +} + +define <4 x double> @test_4xdouble_masked_shuff_mem_mask2(<4 x double> %vec1, <4 x double>* %vec2p, <4 x double> %vec3) { +; CHECK-LABEL: test_4xdouble_masked_shuff_mem_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],mem[0,1] +; CHECK-NEXT: movb $6, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vblendmpd %ymm0, %ymm1, %ymm0 {%k1} +; CHECK-NEXT: retq + %vec2 = load <4 x double>, <4 x double>* %vec2p + %shuf = shufflevector <4 x double> %vec1, <4 x double> %vec2, <4 x i32> + %res = select <4 x i1> , <4 x double> %shuf, <4 x double> %vec3 + ret <4 x double> %res +} + +define <4 x double> @test_4xdouble_zero_masked_shuff_mem_mask2(<4 x double> %vec1, <4 x double>* %vec2p) { +; CHECK-LABEL: test_4xdouble_zero_masked_shuff_mem_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],mem[0,1] +; CHECK-NEXT: movb $6, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vmovapd %ymm0, %ymm0 {%k1} {z} +; CHECK-NEXT: retq + %vec2 = load <4 x double>, <4 x double>* %vec2p + %shuf = shufflevector <4 x double> %vec1, <4 x double> %vec2, <4 x i32> + %res = select <4 x i1> , <4 x double> %shuf, <4 x double> zeroinitializer + ret <4 x double> %res +} + +define <4 x double> @test_4xdouble_shuff_mem_mask3(<4 x double> %vec1, <4 x double>* %vec2p) { +; CHECK-LABEL: test_4xdouble_shuff_mem_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],mem[2,3] +; CHECK-NEXT: retq + %vec2 = load <4 x double>, <4 x double>* %vec2p + %res = shufflevector <4 x double> %vec1, <4 x double> %vec2, <4 x i32> + ret <4 x double> %res +} +define <4 x double> @test_4xdouble_masked_shuff_mem_mask3(<4 x double> %vec1, <4 x double>* %vec2p, <4 x double> %vec3) { +; CHECK-LABEL: test_4xdouble_masked_shuff_mem_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],mem[2,3] +; CHECK-NEXT: movb $13, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vblendmpd %ymm0, %ymm1, %ymm0 {%k1} +; CHECK-NEXT: retq + %vec2 = load <4 x double>, <4 x double>* %vec2p + %shuf = shufflevector <4 x double> %vec1, <4 x double> %vec2, <4 x i32> + %res = select <4 x i1> , <4 x double> %shuf, <4 x double> %vec3 + ret <4 x double> %res +} + +define <4 x double> @test_4xdouble_zero_masked_shuff_mem_mask3(<4 x double> %vec1, <4 x double>* %vec2p) { +; CHECK-LABEL: test_4xdouble_zero_masked_shuff_mem_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],mem[2,3] +; CHECK-NEXT: movb $13, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vmovapd %ymm0, %ymm0 {%k1} {z} +; CHECK-NEXT: retq + %vec2 = load <4 x double>, <4 x double>* %vec2p + %shuf = shufflevector <4 x double> %vec1, <4 x double> %vec2, <4 x i32> + %res = select <4 x i1> , <4 x double> %shuf, <4 x double> zeroinitializer + ret <4 x double> %res +} + +define <8 x double> @test_8xdouble_shuff_mask0(<8 x double> %vec1, <8 x double> %vec2) { +; CHECK-LABEL: test_8xdouble_shuff_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: vshuff64x2 {{.*#+}} zmm0 = zmm0[6,7,2,3],zmm1[6,7,0,1] +; CHECK-NEXT: retq + %res = shufflevector <8 x double> %vec1, <8 x double> %vec2, <8 x i32> + ret <8 x double> %res +} +define <8 x double> @test_8xdouble_masked_shuff_mask0(<8 x double> %vec1, <8 x double> %vec2, <8 x double> %vec3) { +; CHECK-LABEL: test_8xdouble_masked_shuff_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: movb $62, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vshuff64x2 {{.*#+}} zmm2 {%k1} = zmm0[6,7,2,3],zmm1[6,7,0,1] +; CHECK-NEXT: vmovapd %zmm2, %zmm0 +; CHECK-NEXT: retq + %shuf = shufflevector <8 x double> %vec1, <8 x double> %vec2, <8 x i32> + %res = select <8 x i1> , <8 x double> %shuf, <8 x double> %vec3 + ret <8 x double> %res +} + +define <8 x double> @test_8xdouble_zero_masked_shuff_mask0(<8 x double> %vec1, <8 x double> %vec2) { +; CHECK-LABEL: test_8xdouble_zero_masked_shuff_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: movb $62, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vshuff64x2 {{.*#+}} zmm0 {%k1} {z} = zmm0[6,7,2,3],zmm1[6,7,0,1] +; CHECK-NEXT: retq + %shuf = shufflevector <8 x double> %vec1, <8 x double> %vec2, <8 x i32> + %res = select <8 x i1> , <8 x double> %shuf, <8 x double> zeroinitializer + ret <8 x double> %res +} +define <8 x double> @test_8xdouble_masked_shuff_mask1(<8 x double> %vec1, <8 x double> %vec2, <8 x double> %vec3) { +; CHECK-LABEL: test_8xdouble_masked_shuff_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: movb $-70, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vshuff64x2 {{.*#+}} zmm2 {%k1} = zmm0[0,1,4,5],zmm1[0,1,4,5] +; CHECK-NEXT: vmovapd %zmm2, %zmm0 +; CHECK-NEXT: retq + %shuf = shufflevector <8 x double> %vec1, <8 x double> %vec2, <8 x i32> + %res = select <8 x i1> , <8 x double> %shuf, <8 x double> %vec3 + ret <8 x double> %res +} + +define <8 x double> @test_8xdouble_zero_masked_shuff_mask1(<8 x double> %vec1, <8 x double> %vec2) { +; CHECK-LABEL: test_8xdouble_zero_masked_shuff_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: movb $-70, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vshuff64x2 {{.*#+}} zmm0 {%k1} {z} = zmm0[0,1,4,5],zmm1[0,1,4,5] +; CHECK-NEXT: retq + %shuf = shufflevector <8 x double> %vec1, <8 x double> %vec2, <8 x i32> + %res = select <8 x i1> , <8 x double> %shuf, <8 x double> zeroinitializer + ret <8 x double> %res +} +define <8 x double> @test_8xdouble_masked_shuff_mask2(<8 x double> %vec1, <8 x double> %vec2, <8 x double> %vec3) { +; CHECK-LABEL: test_8xdouble_masked_shuff_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: movb $30, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vshuff64x2 {{.*#+}} zmm2 {%k1} = zmm0[6,7,4,5],zmm1[4,5,0,1] +; CHECK-NEXT: vmovapd %zmm2, %zmm0 +; CHECK-NEXT: retq + %shuf = shufflevector <8 x double> %vec1, <8 x double> %vec2, <8 x i32> + %res = select <8 x i1> , <8 x double> %shuf, <8 x double> %vec3 + ret <8 x double> %res +} + +define <8 x double> @test_8xdouble_zero_masked_shuff_mask2(<8 x double> %vec1, <8 x double> %vec2) { +; CHECK-LABEL: test_8xdouble_zero_masked_shuff_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: movb $30, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vshuff64x2 {{.*#+}} zmm0 {%k1} {z} = zmm0[6,7,4,5],zmm1[4,5,0,1] +; CHECK-NEXT: retq + %shuf = shufflevector <8 x double> %vec1, <8 x double> %vec2, <8 x i32> + %res = select <8 x i1> , <8 x double> %shuf, <8 x double> zeroinitializer + ret <8 x double> %res +} +define <8 x double> @test_8xdouble_shuff_mask3(<8 x double> %vec1, <8 x double> %vec2) { +; CHECK-LABEL: test_8xdouble_shuff_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: vshuff64x2 {{.*#+}} zmm0 = zmm0[4,5,4,5],zmm1[4,5,2,3] +; CHECK-NEXT: retq + %res = shufflevector <8 x double> %vec1, <8 x double> %vec2, <8 x i32> + ret <8 x double> %res +} +define <8 x double> @test_8xdouble_masked_shuff_mask3(<8 x double> %vec1, <8 x double> %vec2, <8 x double> %vec3) { +; CHECK-LABEL: test_8xdouble_masked_shuff_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: movb $56, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vshuff64x2 {{.*#+}} zmm2 {%k1} = zmm0[4,5,4,5],zmm1[4,5,2,3] +; CHECK-NEXT: vmovapd %zmm2, %zmm0 +; CHECK-NEXT: retq + %shuf = shufflevector <8 x double> %vec1, <8 x double> %vec2, <8 x i32> + %res = select <8 x i1> , <8 x double> %shuf, <8 x double> %vec3 + ret <8 x double> %res +} + +define <8 x double> @test_8xdouble_zero_masked_shuff_mask3(<8 x double> %vec1, <8 x double> %vec2) { +; CHECK-LABEL: test_8xdouble_zero_masked_shuff_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: movb $56, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vshuff64x2 {{.*#+}} zmm0 {%k1} {z} = zmm0[4,5,4,5],zmm1[4,5,2,3] +; CHECK-NEXT: retq + %shuf = shufflevector <8 x double> %vec1, <8 x double> %vec2, <8 x i32> + %res = select <8 x i1> , <8 x double> %shuf, <8 x double> zeroinitializer + ret <8 x double> %res +} +define <8 x double> @test_8xdouble_shuff_mem_mask0(<8 x double> %vec1, <8 x double>* %vec2p) { +; CHECK-LABEL: test_8xdouble_shuff_mem_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: vshuff64x2 {{.*#+}} zmm0 = zmm0[6,7,0,1],mem[0,1,0,1] +; CHECK-NEXT: retq + %vec2 = load <8 x double>, <8 x double>* %vec2p + %res = shufflevector <8 x double> %vec1, <8 x double> %vec2, <8 x i32> + ret <8 x double> %res +} +define <8 x double> @test_8xdouble_masked_shuff_mem_mask0(<8 x double> %vec1, <8 x double>* %vec2p, <8 x double> %vec3) { +; CHECK-LABEL: test_8xdouble_masked_shuff_mem_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: movb $95, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vshuff64x2 {{.*#+}} zmm1 {%k1} = zmm0[6,7,0,1],mem[0,1,0,1] +; CHECK-NEXT: vmovapd %zmm1, %zmm0 +; CHECK-NEXT: retq + %vec2 = load <8 x double>, <8 x double>* %vec2p + %shuf = shufflevector <8 x double> %vec1, <8 x double> %vec2, <8 x i32> + %res = select <8 x i1> , <8 x double> %shuf, <8 x double> %vec3 + ret <8 x double> %res +} + +define <8 x double> @test_8xdouble_zero_masked_shuff_mem_mask0(<8 x double> %vec1, <8 x double>* %vec2p) { +; CHECK-LABEL: test_8xdouble_zero_masked_shuff_mem_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: movb $95, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vshuff64x2 {{.*#+}} zmm0 {%k1} {z} = zmm0[6,7,0,1],mem[0,1,0,1] +; CHECK-NEXT: retq + %vec2 = load <8 x double>, <8 x double>* %vec2p + %shuf = shufflevector <8 x double> %vec1, <8 x double> %vec2, <8 x i32> + %res = select <8 x i1> , <8 x double> %shuf, <8 x double> zeroinitializer + ret <8 x double> %res +} + +define <8 x double> @test_8xdouble_masked_shuff_mem_mask1(<8 x double> %vec1, <8 x double>* %vec2p, <8 x double> %vec3) { +; CHECK-LABEL: test_8xdouble_masked_shuff_mem_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: movb $-6, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vshuff64x2 {{.*#+}} zmm1 {%k1} = zmm0[6,7,6,7],mem[0,1,2,3] +; CHECK-NEXT: vmovapd %zmm1, %zmm0 +; CHECK-NEXT: retq + %vec2 = load <8 x double>, <8 x double>* %vec2p + %shuf = shufflevector <8 x double> %vec1, <8 x double> %vec2, <8 x i32> + %res = select <8 x i1> , <8 x double> %shuf, <8 x double> %vec3 + ret <8 x double> %res +} + +define <8 x double> @test_8xdouble_zero_masked_shuff_mem_mask1(<8 x double> %vec1, <8 x double>* %vec2p) { +; CHECK-LABEL: test_8xdouble_zero_masked_shuff_mem_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: movb $-6, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vshuff64x2 {{.*#+}} zmm0 {%k1} {z} = zmm0[6,7,6,7],mem[0,1,2,3] +; CHECK-NEXT: retq + %vec2 = load <8 x double>, <8 x double>* %vec2p + %shuf = shufflevector <8 x double> %vec1, <8 x double> %vec2, <8 x i32> + %res = select <8 x i1> , <8 x double> %shuf, <8 x double> zeroinitializer + ret <8 x double> %res +} + +define <8 x double> @test_8xdouble_masked_shuff_mem_mask2(<8 x double> %vec1, <8 x double>* %vec2p, <8 x double> %vec3) { +; CHECK-LABEL: test_8xdouble_masked_shuff_mem_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: movb $10, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vshuff64x2 {{.*#+}} zmm1 {%k1} = zmm0[0,1,2,3],mem[0,1,4,5] +; CHECK-NEXT: vmovapd %zmm1, %zmm0 +; CHECK-NEXT: retq + %vec2 = load <8 x double>, <8 x double>* %vec2p + %shuf = shufflevector <8 x double> %vec1, <8 x double> %vec2, <8 x i32> + %res = select <8 x i1> , <8 x double> %shuf, <8 x double> %vec3 + ret <8 x double> %res +} + +define <8 x double> @test_8xdouble_zero_masked_shuff_mem_mask2(<8 x double> %vec1, <8 x double>* %vec2p) { +; CHECK-LABEL: test_8xdouble_zero_masked_shuff_mem_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: movb $10, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vshuff64x2 {{.*#+}} zmm0 {%k1} {z} = zmm0[0,1,2,3],mem[0,1,4,5] +; CHECK-NEXT: retq + %vec2 = load <8 x double>, <8 x double>* %vec2p + %shuf = shufflevector <8 x double> %vec1, <8 x double> %vec2, <8 x i32> + %res = select <8 x i1> , <8 x double> %shuf, <8 x double> zeroinitializer + ret <8 x double> %res +} + +define <8 x double> @test_8xdouble_shuff_mem_mask3(<8 x double> %vec1, <8 x double>* %vec2p) { +; CHECK-LABEL: test_8xdouble_shuff_mem_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: vshuff64x2 {{.*#+}} zmm0 = zmm0[2,3,0,1],mem[4,5,0,1] +; CHECK-NEXT: retq + %vec2 = load <8 x double>, <8 x double>* %vec2p + %res = shufflevector <8 x double> %vec1, <8 x double> %vec2, <8 x i32> + ret <8 x double> %res +} +define <8 x double> @test_8xdouble_masked_shuff_mem_mask3(<8 x double> %vec1, <8 x double>* %vec2p, <8 x double> %vec3) { +; CHECK-LABEL: test_8xdouble_masked_shuff_mem_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: movb $6, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vshuff64x2 {{.*#+}} zmm1 {%k1} = zmm0[2,3,0,1],mem[4,5,0,1] +; CHECK-NEXT: vmovapd %zmm1, %zmm0 +; CHECK-NEXT: retq + %vec2 = load <8 x double>, <8 x double>* %vec2p + %shuf = shufflevector <8 x double> %vec1, <8 x double> %vec2, <8 x i32> + %res = select <8 x i1> , <8 x double> %shuf, <8 x double> %vec3 + ret <8 x double> %res +} + +define <8 x double> @test_8xdouble_zero_masked_shuff_mem_mask3(<8 x double> %vec1, <8 x double>* %vec2p) { +; CHECK-LABEL: test_8xdouble_zero_masked_shuff_mem_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: movb $6, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vshuff64x2 {{.*#+}} zmm0 {%k1} {z} = zmm0[2,3,0,1],mem[4,5,0,1] +; CHECK-NEXT: retq + %vec2 = load <8 x double>, <8 x double>* %vec2p + %shuf = shufflevector <8 x double> %vec1, <8 x double> %vec2, <8 x i32> + %res = select <8 x i1> , <8 x double> %shuf, <8 x double> zeroinitializer + ret <8 x double> %res +} + +define <8 x i32> @test_8xi32_shuff_mask0(<8 x i32> %vec1, <8 x i32> %vec2) { +; CHECK-LABEL: test_8xi32_shuff_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[2,3] +; CHECK-NEXT: retq + %res = shufflevector <8 x i32> %vec1, <8 x i32> %vec2, <8 x i32> + ret <8 x i32> %res +} +define <8 x i32> @test_8xi32_masked_shuff_mask0(<8 x i32> %vec1, <8 x i32> %vec2, <8 x i32> %vec3) { +; CHECK-LABEL: test_8xi32_masked_shuff_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: vperm2i128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[2,3] +; CHECK-NEXT: movb $26, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpblendmd %ymm0, %ymm2, %ymm0 {%k1} +; CHECK-NEXT: retq + %shuf = shufflevector <8 x i32> %vec1, <8 x i32> %vec2, <8 x i32> + %res = select <8 x i1> , <8 x i32> %shuf, <8 x i32> %vec3 + ret <8 x i32> %res +} + +define <8 x i32> @test_8xi32_zero_masked_shuff_mask0(<8 x i32> %vec1, <8 x i32> %vec2) { +; CHECK-LABEL: test_8xi32_zero_masked_shuff_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: vperm2i128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[2,3] +; CHECK-NEXT: movb $26, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vmovdqa32 %ymm0, %ymm0 {%k1} {z} +; CHECK-NEXT: retq + %shuf = shufflevector <8 x i32> %vec1, <8 x i32> %vec2, <8 x i32> + %res = select <8 x i1> , <8 x i32> %shuf, <8 x i32> zeroinitializer + ret <8 x i32> %res +} +define <8 x i32> @test_8xi32_masked_shuff_mask1(<8 x i32> %vec1, <8 x i32> %vec2, <8 x i32> %vec3) { +; CHECK-LABEL: test_8xi32_masked_shuff_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: vperm2i128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[0,1] +; CHECK-NEXT: movb $-4, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpblendmd %ymm0, %ymm2, %ymm0 {%k1} +; CHECK-NEXT: retq + %shuf = shufflevector <8 x i32> %vec1, <8 x i32> %vec2, <8 x i32> + %res = select <8 x i1> , <8 x i32> %shuf, <8 x i32> %vec3 + ret <8 x i32> %res +} + +define <8 x i32> @test_8xi32_zero_masked_shuff_mask1(<8 x i32> %vec1, <8 x i32> %vec2) { +; CHECK-LABEL: test_8xi32_zero_masked_shuff_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: vperm2i128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[0,1] +; CHECK-NEXT: movb $-4, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vmovdqa32 %ymm0, %ymm0 {%k1} {z} +; CHECK-NEXT: retq + %shuf = shufflevector <8 x i32> %vec1, <8 x i32> %vec2, <8 x i32> + %res = select <8 x i1> , <8 x i32> %shuf, <8 x i32> zeroinitializer + ret <8 x i32> %res +} +define <8 x i32> @test_8xi32_masked_shuff_mask2(<8 x i32> %vec1, <8 x i32> %vec2, <8 x i32> %vec3) { +; CHECK-LABEL: test_8xi32_masked_shuff_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: vperm2i128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[2,3] +; CHECK-NEXT: movb $51, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpblendmd %ymm0, %ymm2, %ymm0 {%k1} +; CHECK-NEXT: retq + %shuf = shufflevector <8 x i32> %vec1, <8 x i32> %vec2, <8 x i32> + %res = select <8 x i1> , <8 x i32> %shuf, <8 x i32> %vec3 + ret <8 x i32> %res +} + +define <8 x i32> @test_8xi32_zero_masked_shuff_mask2(<8 x i32> %vec1, <8 x i32> %vec2) { +; CHECK-LABEL: test_8xi32_zero_masked_shuff_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: vperm2i128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[2,3] +; CHECK-NEXT: movb $51, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vmovdqa32 %ymm0, %ymm0 {%k1} {z} +; CHECK-NEXT: retq + %shuf = shufflevector <8 x i32> %vec1, <8 x i32> %vec2, <8 x i32> + %res = select <8 x i1> , <8 x i32> %shuf, <8 x i32> zeroinitializer + ret <8 x i32> %res +} +define <8 x i32> @test_8xi32_shuff_mask3(<8 x i32> %vec1, <8 x i32> %vec2) { +; CHECK-LABEL: test_8xi32_shuff_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[0,1] +; CHECK-NEXT: retq + %res = shufflevector <8 x i32> %vec1, <8 x i32> %vec2, <8 x i32> + ret <8 x i32> %res +} +define <8 x i32> @test_8xi32_masked_shuff_mask3(<8 x i32> %vec1, <8 x i32> %vec2, <8 x i32> %vec3) { +; CHECK-LABEL: test_8xi32_masked_shuff_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: vperm2i128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[0,1] +; CHECK-NEXT: movb $92, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpblendmd %ymm0, %ymm2, %ymm0 {%k1} +; CHECK-NEXT: retq + %shuf = shufflevector <8 x i32> %vec1, <8 x i32> %vec2, <8 x i32> + %res = select <8 x i1> , <8 x i32> %shuf, <8 x i32> %vec3 + ret <8 x i32> %res +} + +define <8 x i32> @test_8xi32_zero_masked_shuff_mask3(<8 x i32> %vec1, <8 x i32> %vec2) { +; CHECK-LABEL: test_8xi32_zero_masked_shuff_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: vperm2i128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[0,1] +; CHECK-NEXT: movb $92, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vmovdqa32 %ymm0, %ymm0 {%k1} {z} +; CHECK-NEXT: retq + %shuf = shufflevector <8 x i32> %vec1, <8 x i32> %vec2, <8 x i32> + %res = select <8 x i1> , <8 x i32> %shuf, <8 x i32> zeroinitializer + ret <8 x i32> %res +} +define <8 x i32> @test_8xi32_shuff_mem_mask0(<8 x i32> %vec1, <8 x i32>* %vec2p) { +; CHECK-LABEL: test_8xi32_shuff_mem_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],mem[2,3] +; CHECK-NEXT: retq + %vec2 = load <8 x i32>, <8 x i32>* %vec2p + %res = shufflevector <8 x i32> %vec1, <8 x i32> %vec2, <8 x i32> + ret <8 x i32> %res +} +define <8 x i32> @test_8xi32_masked_shuff_mem_mask0(<8 x i32> %vec1, <8 x i32>* %vec2p, <8 x i32> %vec3) { +; CHECK-LABEL: test_8xi32_masked_shuff_mem_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: vperm2i128 {{.*#+}} ymm0 = ymm0[2,3],mem[2,3] +; CHECK-NEXT: movb $64, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpblendmd %ymm0, %ymm1, %ymm0 {%k1} +; CHECK-NEXT: retq + %vec2 = load <8 x i32>, <8 x i32>* %vec2p + %shuf = shufflevector <8 x i32> %vec1, <8 x i32> %vec2, <8 x i32> + %res = select <8 x i1> , <8 x i32> %shuf, <8 x i32> %vec3 + ret <8 x i32> %res +} + +define <8 x i32> @test_8xi32_zero_masked_shuff_mem_mask0(<8 x i32> %vec1, <8 x i32>* %vec2p) { +; CHECK-LABEL: test_8xi32_zero_masked_shuff_mem_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: vperm2i128 {{.*#+}} ymm0 = ymm0[2,3],mem[2,3] +; CHECK-NEXT: movb $64, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vmovdqa32 %ymm0, %ymm0 {%k1} {z} +; CHECK-NEXT: retq + %vec2 = load <8 x i32>, <8 x i32>* %vec2p + %shuf = shufflevector <8 x i32> %vec1, <8 x i32> %vec2, <8 x i32> + %res = select <8 x i1> , <8 x i32> %shuf, <8 x i32> zeroinitializer + ret <8 x i32> %res +} + +define <8 x i32> @test_8xi32_masked_shuff_mem_mask1(<8 x i32> %vec1, <8 x i32>* %vec2p, <8 x i32> %vec3) { +; CHECK-LABEL: test_8xi32_masked_shuff_mem_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: vperm2i128 {{.*#+}} ymm0 = ymm0[2,3],mem[0,1] +; CHECK-NEXT: movb $-104, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpblendmd %ymm0, %ymm1, %ymm0 {%k1} +; CHECK-NEXT: retq + %vec2 = load <8 x i32>, <8 x i32>* %vec2p + %shuf = shufflevector <8 x i32> %vec1, <8 x i32> %vec2, <8 x i32> + %res = select <8 x i1> , <8 x i32> %shuf, <8 x i32> %vec3 + ret <8 x i32> %res +} + +define <8 x i32> @test_8xi32_zero_masked_shuff_mem_mask1(<8 x i32> %vec1, <8 x i32>* %vec2p) { +; CHECK-LABEL: test_8xi32_zero_masked_shuff_mem_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: vperm2i128 {{.*#+}} ymm0 = ymm0[2,3],mem[0,1] +; CHECK-NEXT: movb $-104, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vmovdqa32 %ymm0, %ymm0 {%k1} {z} +; CHECK-NEXT: retq + %vec2 = load <8 x i32>, <8 x i32>* %vec2p + %shuf = shufflevector <8 x i32> %vec1, <8 x i32> %vec2, <8 x i32> + %res = select <8 x i1> , <8 x i32> %shuf, <8 x i32> zeroinitializer + ret <8 x i32> %res +} + +define <8 x i32> @test_8xi32_masked_shuff_mem_mask2(<8 x i32> %vec1, <8 x i32>* %vec2p, <8 x i32> %vec3) { +; CHECK-LABEL: test_8xi32_masked_shuff_mem_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: vperm2i128 {{.*#+}} ymm0 = ymm0[2,3],mem[0,1] +; CHECK-NEXT: movb $113, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpblendmd %ymm0, %ymm1, %ymm0 {%k1} +; CHECK-NEXT: retq + %vec2 = load <8 x i32>, <8 x i32>* %vec2p + %shuf = shufflevector <8 x i32> %vec1, <8 x i32> %vec2, <8 x i32> + %res = select <8 x i1> , <8 x i32> %shuf, <8 x i32> %vec3 + ret <8 x i32> %res +} + +define <8 x i32> @test_8xi32_zero_masked_shuff_mem_mask2(<8 x i32> %vec1, <8 x i32>* %vec2p) { +; CHECK-LABEL: test_8xi32_zero_masked_shuff_mem_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: vperm2i128 {{.*#+}} ymm0 = ymm0[2,3],mem[0,1] +; CHECK-NEXT: movb $113, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vmovdqa32 %ymm0, %ymm0 {%k1} {z} +; CHECK-NEXT: retq + %vec2 = load <8 x i32>, <8 x i32>* %vec2p + %shuf = shufflevector <8 x i32> %vec1, <8 x i32> %vec2, <8 x i32> + %res = select <8 x i1> , <8 x i32> %shuf, <8 x i32> zeroinitializer + ret <8 x i32> %res +} + +define <8 x i32> @test_8xi32_shuff_mem_mask3(<8 x i32> %vec1, <8 x i32>* %vec2p) { +; CHECK-LABEL: test_8xi32_shuff_mem_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],mem[0,1] +; CHECK-NEXT: retq + %vec2 = load <8 x i32>, <8 x i32>* %vec2p + %res = shufflevector <8 x i32> %vec1, <8 x i32> %vec2, <8 x i32> + ret <8 x i32> %res +} +define <8 x i32> @test_8xi32_masked_shuff_mem_mask3(<8 x i32> %vec1, <8 x i32>* %vec2p, <8 x i32> %vec3) { +; CHECK-LABEL: test_8xi32_masked_shuff_mem_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: vperm2i128 {{.*#+}} ymm0 = ymm0[2,3],mem[0,1] +; CHECK-NEXT: movb $45, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpblendmd %ymm0, %ymm1, %ymm0 {%k1} +; CHECK-NEXT: retq + %vec2 = load <8 x i32>, <8 x i32>* %vec2p + %shuf = shufflevector <8 x i32> %vec1, <8 x i32> %vec2, <8 x i32> + %res = select <8 x i1> , <8 x i32> %shuf, <8 x i32> %vec3 + ret <8 x i32> %res +} + +define <8 x i32> @test_8xi32_zero_masked_shuff_mem_mask3(<8 x i32> %vec1, <8 x i32>* %vec2p) { +; CHECK-LABEL: test_8xi32_zero_masked_shuff_mem_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: vperm2i128 {{.*#+}} ymm0 = ymm0[2,3],mem[0,1] +; CHECK-NEXT: movb $45, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vmovdqa32 %ymm0, %ymm0 {%k1} {z} +; CHECK-NEXT: retq + %vec2 = load <8 x i32>, <8 x i32>* %vec2p + %shuf = shufflevector <8 x i32> %vec1, <8 x i32> %vec2, <8 x i32> + %res = select <8 x i1> , <8 x i32> %shuf, <8 x i32> zeroinitializer + ret <8 x i32> %res +} + +define <16 x i32> @test_16xi32_shuff_mask0(<16 x i32> %vec1, <16 x i32> %vec2) { +; CHECK-LABEL: test_16xi32_shuff_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: vshufi64x2 {{.*#+}} zmm0 = zmm0[2,3,2,3],zmm1[2,3,6,7] +; CHECK-NEXT: retq + %res = shufflevector <16 x i32> %vec1, <16 x i32> %vec2, <16 x i32> + ret <16 x i32> %res +} +define <16 x i32> @test_16xi32_masked_shuff_mask0(<16 x i32> %vec1, <16 x i32> %vec2, <16 x i32> %vec3) { +; CHECK-LABEL: test_16xi32_masked_shuff_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: movw $2995, %ax # imm = 0xBB3 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vshufi32x4 {{.*#+}} zmm2 {%k1} = zmm0[4,5,6,7,4,5,6,7],zmm1[4,5,6,7,12,13,14,15] +; CHECK-NEXT: vmovdqa64 %zmm2, %zmm0 +; CHECK-NEXT: retq + %shuf = shufflevector <16 x i32> %vec1, <16 x i32> %vec2, <16 x i32> + %res = select <16 x i1> , <16 x i32> %shuf, <16 x i32> %vec3 + ret <16 x i32> %res +} + +define <16 x i32> @test_16xi32_zero_masked_shuff_mask0(<16 x i32> %vec1, <16 x i32> %vec2) { +; CHECK-LABEL: test_16xi32_zero_masked_shuff_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: movw $2995, %ax # imm = 0xBB3 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vshufi32x4 {{.*#+}} zmm0 {%k1} {z} = zmm0[4,5,6,7,4,5,6,7],zmm1[4,5,6,7,12,13,14,15] +; CHECK-NEXT: retq + %shuf = shufflevector <16 x i32> %vec1, <16 x i32> %vec2, <16 x i32> + %res = select <16 x i1> , <16 x i32> %shuf, <16 x i32> zeroinitializer + ret <16 x i32> %res +} +define <16 x i32> @test_16xi32_masked_shuff_mask1(<16 x i32> %vec1, <16 x i32> %vec2, <16 x i32> %vec3) { +; CHECK-LABEL: test_16xi32_masked_shuff_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: movw $18408, %ax # imm = 0x47E8 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vshufi32x4 {{.*#+}} zmm2 {%k1} = zmm0[8,9,10,11,8,9,10,11],zmm1[8,9,10,11,4,5,6,7] +; CHECK-NEXT: vmovdqa64 %zmm2, %zmm0 +; CHECK-NEXT: retq + %shuf = shufflevector <16 x i32> %vec1, <16 x i32> %vec2, <16 x i32> + %res = select <16 x i1> , <16 x i32> %shuf, <16 x i32> %vec3 + ret <16 x i32> %res +} + +define <16 x i32> @test_16xi32_zero_masked_shuff_mask1(<16 x i32> %vec1, <16 x i32> %vec2) { +; CHECK-LABEL: test_16xi32_zero_masked_shuff_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: movw $18408, %ax # imm = 0x47E8 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vshufi32x4 {{.*#+}} zmm0 {%k1} {z} = zmm0[8,9,10,11,8,9,10,11],zmm1[8,9,10,11,4,5,6,7] +; CHECK-NEXT: retq + %shuf = shufflevector <16 x i32> %vec1, <16 x i32> %vec2, <16 x i32> + %res = select <16 x i1> , <16 x i32> %shuf, <16 x i32> zeroinitializer + ret <16 x i32> %res +} +define <16 x i32> @test_16xi32_masked_shuff_mask2(<16 x i32> %vec1, <16 x i32> %vec2, <16 x i32> %vec3) { +; CHECK-LABEL: test_16xi32_masked_shuff_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: movw $15737, %ax # imm = 0x3D79 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vshufi32x4 {{.*#+}} zmm2 {%k1} = zmm0[4,5,6,7,8,9,10,11],zmm1[0,1,2,3,0,1,2,3] +; CHECK-NEXT: vmovdqa64 %zmm2, %zmm0 +; CHECK-NEXT: retq + %shuf = shufflevector <16 x i32> %vec1, <16 x i32> %vec2, <16 x i32> + %res = select <16 x i1> , <16 x i32> %shuf, <16 x i32> %vec3 + ret <16 x i32> %res +} + +define <16 x i32> @test_16xi32_zero_masked_shuff_mask2(<16 x i32> %vec1, <16 x i32> %vec2) { +; CHECK-LABEL: test_16xi32_zero_masked_shuff_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: movw $15737, %ax # imm = 0x3D79 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vshufi32x4 {{.*#+}} zmm0 {%k1} {z} = zmm0[4,5,6,7,8,9,10,11],zmm1[0,1,2,3,0,1,2,3] +; CHECK-NEXT: retq + %shuf = shufflevector <16 x i32> %vec1, <16 x i32> %vec2, <16 x i32> + %res = select <16 x i1> , <16 x i32> %shuf, <16 x i32> zeroinitializer + ret <16 x i32> %res +} +define <16 x i32> @test_16xi32_shuff_mask3(<16 x i32> %vec1, <16 x i32> %vec2) { +; CHECK-LABEL: test_16xi32_shuff_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: vshufi64x2 {{.*#+}} zmm0 = zmm0[2,3,0,1],zmm1[4,5,2,3] +; CHECK-NEXT: retq + %res = shufflevector <16 x i32> %vec1, <16 x i32> %vec2, <16 x i32> + ret <16 x i32> %res +} +define <16 x i32> @test_16xi32_masked_shuff_mask3(<16 x i32> %vec1, <16 x i32> %vec2, <16 x i32> %vec3) { +; CHECK-LABEL: test_16xi32_masked_shuff_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: movw $-3073, %ax # imm = 0xF3FF +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vshufi32x4 {{.*#+}} zmm2 {%k1} = zmm0[4,5,6,7,0,1,2,3],zmm1[8,9,10,11,4,5,6,7] +; CHECK-NEXT: vmovdqa64 %zmm2, %zmm0 +; CHECK-NEXT: retq + %shuf = shufflevector <16 x i32> %vec1, <16 x i32> %vec2, <16 x i32> + %res = select <16 x i1> , <16 x i32> %shuf, <16 x i32> %vec3 + ret <16 x i32> %res +} + +define <16 x i32> @test_16xi32_zero_masked_shuff_mask3(<16 x i32> %vec1, <16 x i32> %vec2) { +; CHECK-LABEL: test_16xi32_zero_masked_shuff_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: movw $-3073, %ax # imm = 0xF3FF +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vshufi32x4 {{.*#+}} zmm0 {%k1} {z} = zmm0[4,5,6,7,0,1,2,3],zmm1[8,9,10,11,4,5,6,7] +; CHECK-NEXT: retq + %shuf = shufflevector <16 x i32> %vec1, <16 x i32> %vec2, <16 x i32> + %res = select <16 x i1> , <16 x i32> %shuf, <16 x i32> zeroinitializer + ret <16 x i32> %res +} +define <16 x i32> @test_16xi32_shuff_mem_mask0(<16 x i32> %vec1, <16 x i32>* %vec2p) { +; CHECK-LABEL: test_16xi32_shuff_mem_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: vshufi64x2 {{.*#+}} zmm0 = zmm0[4,5,2,3],mem[4,5,0,1] +; CHECK-NEXT: retq + %vec2 = load <16 x i32>, <16 x i32>* %vec2p + %res = shufflevector <16 x i32> %vec1, <16 x i32> %vec2, <16 x i32> + ret <16 x i32> %res +} +define <16 x i32> @test_16xi32_masked_shuff_mem_mask0(<16 x i32> %vec1, <16 x i32>* %vec2p, <16 x i32> %vec3) { +; CHECK-LABEL: test_16xi32_masked_shuff_mem_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: movw $-8166, %ax # imm = 0xE01A +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vshufi32x4 {{.*#+}} zmm1 {%k1} = zmm0[8,9,10,11,4,5,6,7],mem[8,9,10,11,0,1,2,3] +; CHECK-NEXT: vmovdqa64 %zmm1, %zmm0 +; CHECK-NEXT: retq + %vec2 = load <16 x i32>, <16 x i32>* %vec2p + %shuf = shufflevector <16 x i32> %vec1, <16 x i32> %vec2, <16 x i32> + %res = select <16 x i1> , <16 x i32> %shuf, <16 x i32> %vec3 + ret <16 x i32> %res +} + +define <16 x i32> @test_16xi32_zero_masked_shuff_mem_mask0(<16 x i32> %vec1, <16 x i32>* %vec2p) { +; CHECK-LABEL: test_16xi32_zero_masked_shuff_mem_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: movw $-8166, %ax # imm = 0xE01A +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vshufi32x4 {{.*#+}} zmm0 {%k1} {z} = zmm0[8,9,10,11,4,5,6,7],mem[8,9,10,11,0,1,2,3] +; CHECK-NEXT: retq + %vec2 = load <16 x i32>, <16 x i32>* %vec2p + %shuf = shufflevector <16 x i32> %vec1, <16 x i32> %vec2, <16 x i32> + %res = select <16 x i1> , <16 x i32> %shuf, <16 x i32> zeroinitializer + ret <16 x i32> %res +} + +define <16 x i32> @test_16xi32_masked_shuff_mem_mask1(<16 x i32> %vec1, <16 x i32>* %vec2p, <16 x i32> %vec3) { +; CHECK-LABEL: test_16xi32_masked_shuff_mem_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: movw $-28302, %ax # imm = 0x9172 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vshufi32x4 {{.*#+}} zmm1 {%k1} = zmm0[4,5,6,7,4,5,6,7],mem[0,1,2,3,8,9,10,11] +; CHECK-NEXT: vmovdqa64 %zmm1, %zmm0 +; CHECK-NEXT: retq + %vec2 = load <16 x i32>, <16 x i32>* %vec2p + %shuf = shufflevector <16 x i32> %vec1, <16 x i32> %vec2, <16 x i32> + %res = select <16 x i1> , <16 x i32> %shuf, <16 x i32> %vec3 + ret <16 x i32> %res +} + +define <16 x i32> @test_16xi32_zero_masked_shuff_mem_mask1(<16 x i32> %vec1, <16 x i32>* %vec2p) { +; CHECK-LABEL: test_16xi32_zero_masked_shuff_mem_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: movw $-28302, %ax # imm = 0x9172 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vshufi32x4 {{.*#+}} zmm0 {%k1} {z} = zmm0[4,5,6,7,4,5,6,7],mem[0,1,2,3,8,9,10,11] +; CHECK-NEXT: retq + %vec2 = load <16 x i32>, <16 x i32>* %vec2p + %shuf = shufflevector <16 x i32> %vec1, <16 x i32> %vec2, <16 x i32> + %res = select <16 x i1> , <16 x i32> %shuf, <16 x i32> zeroinitializer + ret <16 x i32> %res +} + +define <16 x i32> @test_16xi32_masked_shuff_mem_mask2(<16 x i32> %vec1, <16 x i32>* %vec2p, <16 x i32> %vec3) { +; CHECK-LABEL: test_16xi32_masked_shuff_mem_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: movw $27158, %ax # imm = 0x6A16 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vshufi32x4 {{.*#+}} zmm1 {%k1} = zmm0[4,5,6,7,8,9,10,11],mem[12,13,14,15,12,13,14,15] +; CHECK-NEXT: vmovdqa64 %zmm1, %zmm0 +; CHECK-NEXT: retq + %vec2 = load <16 x i32>, <16 x i32>* %vec2p + %shuf = shufflevector <16 x i32> %vec1, <16 x i32> %vec2, <16 x i32> + %res = select <16 x i1> , <16 x i32> %shuf, <16 x i32> %vec3 + ret <16 x i32> %res +} + +define <16 x i32> @test_16xi32_zero_masked_shuff_mem_mask2(<16 x i32> %vec1, <16 x i32>* %vec2p) { +; CHECK-LABEL: test_16xi32_zero_masked_shuff_mem_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: movw $27158, %ax # imm = 0x6A16 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vshufi32x4 {{.*#+}} zmm0 {%k1} {z} = zmm0[4,5,6,7,8,9,10,11],mem[12,13,14,15,12,13,14,15] +; CHECK-NEXT: retq + %vec2 = load <16 x i32>, <16 x i32>* %vec2p + %shuf = shufflevector <16 x i32> %vec1, <16 x i32> %vec2, <16 x i32> + %res = select <16 x i1> , <16 x i32> %shuf, <16 x i32> zeroinitializer + ret <16 x i32> %res +} + +define <16 x i32> @test_16xi32_shuff_mem_mask3(<16 x i32> %vec1, <16 x i32>* %vec2p) { +; CHECK-LABEL: test_16xi32_shuff_mem_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: vshufi64x2 {{.*#+}} zmm0 = zmm0[2,3,2,3],mem[2,3,6,7] +; CHECK-NEXT: retq + %vec2 = load <16 x i32>, <16 x i32>* %vec2p + %res = shufflevector <16 x i32> %vec1, <16 x i32> %vec2, <16 x i32> + ret <16 x i32> %res +} +define <16 x i32> @test_16xi32_masked_shuff_mem_mask3(<16 x i32> %vec1, <16 x i32>* %vec2p, <16 x i32> %vec3) { +; CHECK-LABEL: test_16xi32_masked_shuff_mem_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: movw $26363, %ax # imm = 0x66FB +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vshufi32x4 {{.*#+}} zmm1 {%k1} = zmm0[4,5,6,7,4,5,6,7],mem[4,5,6,7,12,13,14,15] +; CHECK-NEXT: vmovdqa64 %zmm1, %zmm0 +; CHECK-NEXT: retq + %vec2 = load <16 x i32>, <16 x i32>* %vec2p + %shuf = shufflevector <16 x i32> %vec1, <16 x i32> %vec2, <16 x i32> + %res = select <16 x i1> , <16 x i32> %shuf, <16 x i32> %vec3 + ret <16 x i32> %res +} + +define <16 x i32> @test_16xi32_zero_masked_shuff_mem_mask3(<16 x i32> %vec1, <16 x i32>* %vec2p) { +; CHECK-LABEL: test_16xi32_zero_masked_shuff_mem_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: movw $26363, %ax # imm = 0x66FB +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vshufi32x4 {{.*#+}} zmm0 {%k1} {z} = zmm0[4,5,6,7,4,5,6,7],mem[4,5,6,7,12,13,14,15] +; CHECK-NEXT: retq + %vec2 = load <16 x i32>, <16 x i32>* %vec2p + %shuf = shufflevector <16 x i32> %vec1, <16 x i32> %vec2, <16 x i32> + %res = select <16 x i1> , <16 x i32> %shuf, <16 x i32> zeroinitializer + ret <16 x i32> %res +} + +define <4 x i64> @test_4xi64_shuff_mask0(<4 x i64> %vec1, <4 x i64> %vec2) { +; CHECK-LABEL: test_4xi64_shuff_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[0,1] +; CHECK-NEXT: retq + %res = shufflevector <4 x i64> %vec1, <4 x i64> %vec2, <4 x i32> + ret <4 x i64> %res +} +define <4 x i64> @test_4xi64_masked_shuff_mask0(<4 x i64> %vec1, <4 x i64> %vec2, <4 x i64> %vec3) { +; CHECK-LABEL: test_4xi64_masked_shuff_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: vperm2i128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[0,1] +; CHECK-NEXT: movb $13, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpblendmq %ymm0, %ymm2, %ymm0 {%k1} +; CHECK-NEXT: retq + %shuf = shufflevector <4 x i64> %vec1, <4 x i64> %vec2, <4 x i32> + %res = select <4 x i1> , <4 x i64> %shuf, <4 x i64> %vec3 + ret <4 x i64> %res +} + +define <4 x i64> @test_4xi64_zero_masked_shuff_mask0(<4 x i64> %vec1, <4 x i64> %vec2) { +; CHECK-LABEL: test_4xi64_zero_masked_shuff_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: vperm2i128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[0,1] +; CHECK-NEXT: movb $13, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vmovdqa64 %ymm0, %ymm0 {%k1} {z} +; CHECK-NEXT: retq + %shuf = shufflevector <4 x i64> %vec1, <4 x i64> %vec2, <4 x i32> + %res = select <4 x i1> , <4 x i64> %shuf, <4 x i64> zeroinitializer + ret <4 x i64> %res +} +define <4 x i64> @test_4xi64_masked_shuff_mask1(<4 x i64> %vec1, <4 x i64> %vec2, <4 x i64> %vec3) { +; CHECK-LABEL: test_4xi64_masked_shuff_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: vperm2i128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[2,3] +; CHECK-NEXT: movb $11, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpblendmq %ymm0, %ymm2, %ymm0 {%k1} +; CHECK-NEXT: retq + %shuf = shufflevector <4 x i64> %vec1, <4 x i64> %vec2, <4 x i32> + %res = select <4 x i1> , <4 x i64> %shuf, <4 x i64> %vec3 + ret <4 x i64> %res +} + +define <4 x i64> @test_4xi64_zero_masked_shuff_mask1(<4 x i64> %vec1, <4 x i64> %vec2) { +; CHECK-LABEL: test_4xi64_zero_masked_shuff_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: vperm2i128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[2,3] +; CHECK-NEXT: movb $11, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vmovdqa64 %ymm0, %ymm0 {%k1} {z} +; CHECK-NEXT: retq + %shuf = shufflevector <4 x i64> %vec1, <4 x i64> %vec2, <4 x i32> + %res = select <4 x i1> , <4 x i64> %shuf, <4 x i64> zeroinitializer + ret <4 x i64> %res +} +define <4 x i64> @test_4xi64_masked_shuff_mask2(<4 x i64> %vec1, <4 x i64> %vec2, <4 x i64> %vec3) { +; CHECK-LABEL: test_4xi64_masked_shuff_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: vperm2i128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[0,1] +; CHECK-NEXT: movb $3, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpblendmq %ymm0, %ymm2, %ymm0 {%k1} +; CHECK-NEXT: retq + %shuf = shufflevector <4 x i64> %vec1, <4 x i64> %vec2, <4 x i32> + %res = select <4 x i1> , <4 x i64> %shuf, <4 x i64> %vec3 + ret <4 x i64> %res +} + +define <4 x i64> @test_4xi64_zero_masked_shuff_mask2(<4 x i64> %vec1, <4 x i64> %vec2) { +; CHECK-LABEL: test_4xi64_zero_masked_shuff_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: vperm2i128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[0,1] +; CHECK-NEXT: movb $3, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vmovdqa64 %ymm0, %ymm0 {%k1} {z} +; CHECK-NEXT: retq + %shuf = shufflevector <4 x i64> %vec1, <4 x i64> %vec2, <4 x i32> + %res = select <4 x i1> , <4 x i64> %shuf, <4 x i64> zeroinitializer + ret <4 x i64> %res +} +define <4 x i64> @test_4xi64_shuff_mask3(<4 x i64> %vec1, <4 x i64> %vec2) { +; CHECK-LABEL: test_4xi64_shuff_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[2,3] +; CHECK-NEXT: retq + %res = shufflevector <4 x i64> %vec1, <4 x i64> %vec2, <4 x i32> + ret <4 x i64> %res +} +define <4 x i64> @test_4xi64_masked_shuff_mask3(<4 x i64> %vec1, <4 x i64> %vec2, <4 x i64> %vec3) { +; CHECK-LABEL: test_4xi64_masked_shuff_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: vperm2i128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[2,3] +; CHECK-NEXT: movb $14, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpblendmq %ymm0, %ymm2, %ymm0 {%k1} +; CHECK-NEXT: retq + %shuf = shufflevector <4 x i64> %vec1, <4 x i64> %vec2, <4 x i32> + %res = select <4 x i1> , <4 x i64> %shuf, <4 x i64> %vec3 + ret <4 x i64> %res +} + +define <4 x i64> @test_4xi64_zero_masked_shuff_mask3(<4 x i64> %vec1, <4 x i64> %vec2) { +; CHECK-LABEL: test_4xi64_zero_masked_shuff_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: vperm2i128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[2,3] +; CHECK-NEXT: movb $14, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vmovdqa64 %ymm0, %ymm0 {%k1} {z} +; CHECK-NEXT: retq + %shuf = shufflevector <4 x i64> %vec1, <4 x i64> %vec2, <4 x i32> + %res = select <4 x i1> , <4 x i64> %shuf, <4 x i64> zeroinitializer + ret <4 x i64> %res +} +define <4 x i64> @test_4xi64_shuff_mem_mask0(<4 x i64> %vec1, <4 x i64>* %vec2p) { +; CHECK-LABEL: test_4xi64_shuff_mem_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],mem[2,3] +; CHECK-NEXT: retq + %vec2 = load <4 x i64>, <4 x i64>* %vec2p + %res = shufflevector <4 x i64> %vec1, <4 x i64> %vec2, <4 x i32> + ret <4 x i64> %res +} +define <4 x i64> @test_4xi64_masked_shuff_mem_mask0(<4 x i64> %vec1, <4 x i64>* %vec2p, <4 x i64> %vec3) { +; CHECK-LABEL: test_4xi64_masked_shuff_mem_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: vperm2i128 {{.*#+}} ymm0 = ymm0[2,3],mem[2,3] +; CHECK-NEXT: movb $2, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpblendmq %ymm0, %ymm1, %ymm0 {%k1} +; CHECK-NEXT: retq + %vec2 = load <4 x i64>, <4 x i64>* %vec2p + %shuf = shufflevector <4 x i64> %vec1, <4 x i64> %vec2, <4 x i32> + %res = select <4 x i1> , <4 x i64> %shuf, <4 x i64> %vec3 + ret <4 x i64> %res +} + +define <4 x i64> @test_4xi64_zero_masked_shuff_mem_mask0(<4 x i64> %vec1, <4 x i64>* %vec2p) { +; CHECK-LABEL: test_4xi64_zero_masked_shuff_mem_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: vperm2i128 {{.*#+}} ymm0 = ymm0[2,3],mem[2,3] +; CHECK-NEXT: movb $2, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vmovdqa64 %ymm0, %ymm0 {%k1} {z} +; CHECK-NEXT: retq + %vec2 = load <4 x i64>, <4 x i64>* %vec2p + %shuf = shufflevector <4 x i64> %vec1, <4 x i64> %vec2, <4 x i32> + %res = select <4 x i1> , <4 x i64> %shuf, <4 x i64> zeroinitializer + ret <4 x i64> %res +} + +define <4 x i64> @test_4xi64_masked_shuff_mem_mask1(<4 x i64> %vec1, <4 x i64>* %vec2p, <4 x i64> %vec3) { +; CHECK-LABEL: test_4xi64_masked_shuff_mem_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: vperm2i128 {{.*#+}} ymm0 = ymm0[2,3],mem[0,1] +; CHECK-NEXT: movb $14, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpblendmq %ymm0, %ymm1, %ymm0 {%k1} +; CHECK-NEXT: retq + %vec2 = load <4 x i64>, <4 x i64>* %vec2p + %shuf = shufflevector <4 x i64> %vec1, <4 x i64> %vec2, <4 x i32> + %res = select <4 x i1> , <4 x i64> %shuf, <4 x i64> %vec3 + ret <4 x i64> %res +} + +define <4 x i64> @test_4xi64_zero_masked_shuff_mem_mask1(<4 x i64> %vec1, <4 x i64>* %vec2p) { +; CHECK-LABEL: test_4xi64_zero_masked_shuff_mem_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: vperm2i128 {{.*#+}} ymm0 = ymm0[2,3],mem[0,1] +; CHECK-NEXT: movb $14, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vmovdqa64 %ymm0, %ymm0 {%k1} {z} +; CHECK-NEXT: retq + %vec2 = load <4 x i64>, <4 x i64>* %vec2p + %shuf = shufflevector <4 x i64> %vec1, <4 x i64> %vec2, <4 x i32> + %res = select <4 x i1> , <4 x i64> %shuf, <4 x i64> zeroinitializer + ret <4 x i64> %res +} + +define <4 x i64> @test_4xi64_masked_shuff_mem_mask2(<4 x i64> %vec1, <4 x i64>* %vec2p, <4 x i64> %vec3) { +; CHECK-LABEL: test_4xi64_masked_shuff_mem_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: vperm2i128 {{.*#+}} ymm0 = ymm0[2,3],mem[0,1] +; CHECK-NEXT: movb $8, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpblendmq %ymm0, %ymm1, %ymm0 {%k1} +; CHECK-NEXT: retq + %vec2 = load <4 x i64>, <4 x i64>* %vec2p + %shuf = shufflevector <4 x i64> %vec1, <4 x i64> %vec2, <4 x i32> + %res = select <4 x i1> , <4 x i64> %shuf, <4 x i64> %vec3 + ret <4 x i64> %res +} + +define <4 x i64> @test_4xi64_zero_masked_shuff_mem_mask2(<4 x i64> %vec1, <4 x i64>* %vec2p) { +; CHECK-LABEL: test_4xi64_zero_masked_shuff_mem_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: vperm2i128 {{.*#+}} ymm0 = ymm0[2,3],mem[0,1] +; CHECK-NEXT: movb $8, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vmovdqa64 %ymm0, %ymm0 {%k1} {z} +; CHECK-NEXT: retq + %vec2 = load <4 x i64>, <4 x i64>* %vec2p + %shuf = shufflevector <4 x i64> %vec1, <4 x i64> %vec2, <4 x i32> + %res = select <4 x i1> , <4 x i64> %shuf, <4 x i64> zeroinitializer + ret <4 x i64> %res +} + +define <4 x i64> @test_4xi64_shuff_mem_mask3(<4 x i64> %vec1, <4 x i64>* %vec2p) { +; CHECK-LABEL: test_4xi64_shuff_mem_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],mem[2,3] +; CHECK-NEXT: retq + %vec2 = load <4 x i64>, <4 x i64>* %vec2p + %res = shufflevector <4 x i64> %vec1, <4 x i64> %vec2, <4 x i32> + ret <4 x i64> %res +} +define <4 x i64> @test_4xi64_masked_shuff_mem_mask3(<4 x i64> %vec1, <4 x i64>* %vec2p, <4 x i64> %vec3) { +; CHECK-LABEL: test_4xi64_masked_shuff_mem_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: vperm2i128 {{.*#+}} ymm0 = ymm0[2,3],mem[2,3] +; CHECK-NEXT: movb $10, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpblendmq %ymm0, %ymm1, %ymm0 {%k1} +; CHECK-NEXT: retq + %vec2 = load <4 x i64>, <4 x i64>* %vec2p + %shuf = shufflevector <4 x i64> %vec1, <4 x i64> %vec2, <4 x i32> + %res = select <4 x i1> , <4 x i64> %shuf, <4 x i64> %vec3 + ret <4 x i64> %res +} + +define <4 x i64> @test_4xi64_zero_masked_shuff_mem_mask3(<4 x i64> %vec1, <4 x i64>* %vec2p) { +; CHECK-LABEL: test_4xi64_zero_masked_shuff_mem_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: vperm2i128 {{.*#+}} ymm0 = ymm0[2,3],mem[2,3] +; CHECK-NEXT: movb $10, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vmovdqa64 %ymm0, %ymm0 {%k1} {z} +; CHECK-NEXT: retq + %vec2 = load <4 x i64>, <4 x i64>* %vec2p + %shuf = shufflevector <4 x i64> %vec1, <4 x i64> %vec2, <4 x i32> + %res = select <4 x i1> , <4 x i64> %shuf, <4 x i64> zeroinitializer + ret <4 x i64> %res +} + +define <8 x i64> @test_8xi64_shuff_mask0(<8 x i64> %vec1, <8 x i64> %vec2) { +; CHECK-LABEL: test_8xi64_shuff_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: vshufi64x2 {{.*#+}} zmm0 = zmm0[4,5,4,5],zmm1[4,5,4,5] +; CHECK-NEXT: retq + %res = shufflevector <8 x i64> %vec1, <8 x i64> %vec2, <8 x i32> + ret <8 x i64> %res +} +define <8 x i64> @test_8xi64_masked_shuff_mask0(<8 x i64> %vec1, <8 x i64> %vec2, <8 x i64> %vec3) { +; CHECK-LABEL: test_8xi64_masked_shuff_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: movb $-15, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vshufi64x2 {{.*#+}} zmm2 {%k1} = zmm0[4,5,4,5],zmm1[4,5,4,5] +; CHECK-NEXT: vmovdqa64 %zmm2, %zmm0 +; CHECK-NEXT: retq + %shuf = shufflevector <8 x i64> %vec1, <8 x i64> %vec2, <8 x i32> + %res = select <8 x i1> , <8 x i64> %shuf, <8 x i64> %vec3 + ret <8 x i64> %res +} + +define <8 x i64> @test_8xi64_zero_masked_shuff_mask0(<8 x i64> %vec1, <8 x i64> %vec2) { +; CHECK-LABEL: test_8xi64_zero_masked_shuff_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: movb $-15, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vshufi64x2 {{.*#+}} zmm0 {%k1} {z} = zmm0[4,5,4,5],zmm1[4,5,4,5] +; CHECK-NEXT: retq + %shuf = shufflevector <8 x i64> %vec1, <8 x i64> %vec2, <8 x i32> + %res = select <8 x i1> , <8 x i64> %shuf, <8 x i64> zeroinitializer + ret <8 x i64> %res +} +define <8 x i64> @test_8xi64_masked_shuff_mask1(<8 x i64> %vec1, <8 x i64> %vec2, <8 x i64> %vec3) { +; CHECK-LABEL: test_8xi64_masked_shuff_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: movb $-17, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vshufi64x2 {{.*#+}} zmm2 {%k1} = zmm0[6,7,4,5],zmm1[2,3,4,5] +; CHECK-NEXT: vmovdqa64 %zmm2, %zmm0 +; CHECK-NEXT: retq + %shuf = shufflevector <8 x i64> %vec1, <8 x i64> %vec2, <8 x i32> + %res = select <8 x i1> , <8 x i64> %shuf, <8 x i64> %vec3 + ret <8 x i64> %res +} + +define <8 x i64> @test_8xi64_zero_masked_shuff_mask1(<8 x i64> %vec1, <8 x i64> %vec2) { +; CHECK-LABEL: test_8xi64_zero_masked_shuff_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: movb $-17, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vshufi64x2 {{.*#+}} zmm0 {%k1} {z} = zmm0[6,7,4,5],zmm1[2,3,4,5] +; CHECK-NEXT: retq + %shuf = shufflevector <8 x i64> %vec1, <8 x i64> %vec2, <8 x i32> + %res = select <8 x i1> , <8 x i64> %shuf, <8 x i64> zeroinitializer + ret <8 x i64> %res +} +define <8 x i64> @test_8xi64_masked_shuff_mask2(<8 x i64> %vec1, <8 x i64> %vec2, <8 x i64> %vec3) { +; CHECK-LABEL: test_8xi64_masked_shuff_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: movb $-24, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vshufi64x2 {{.*#+}} zmm2 {%k1} = zmm0[0,1,4,5],zmm1[0,1,0,1] +; CHECK-NEXT: vmovdqa64 %zmm2, %zmm0 +; CHECK-NEXT: retq + %shuf = shufflevector <8 x i64> %vec1, <8 x i64> %vec2, <8 x i32> + %res = select <8 x i1> , <8 x i64> %shuf, <8 x i64> %vec3 + ret <8 x i64> %res +} + +define <8 x i64> @test_8xi64_zero_masked_shuff_mask2(<8 x i64> %vec1, <8 x i64> %vec2) { +; CHECK-LABEL: test_8xi64_zero_masked_shuff_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: movb $-24, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vshufi64x2 {{.*#+}} zmm0 {%k1} {z} = zmm0[0,1,4,5],zmm1[0,1,0,1] +; CHECK-NEXT: retq + %shuf = shufflevector <8 x i64> %vec1, <8 x i64> %vec2, <8 x i32> + %res = select <8 x i1> , <8 x i64> %shuf, <8 x i64> zeroinitializer + ret <8 x i64> %res +} +define <8 x i64> @test_8xi64_shuff_mask3(<8 x i64> %vec1, <8 x i64> %vec2) { +; CHECK-LABEL: test_8xi64_shuff_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: vshufi64x2 {{.*#+}} zmm0 = zmm0[2,3,6,7],zmm1[4,5,2,3] +; CHECK-NEXT: retq + %res = shufflevector <8 x i64> %vec1, <8 x i64> %vec2, <8 x i32> + ret <8 x i64> %res +} +define <8 x i64> @test_8xi64_masked_shuff_mask3(<8 x i64> %vec1, <8 x i64> %vec2, <8 x i64> %vec3) { +; CHECK-LABEL: test_8xi64_masked_shuff_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: movb $11, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vshufi64x2 {{.*#+}} zmm2 {%k1} = zmm0[2,3,6,7],zmm1[4,5,2,3] +; CHECK-NEXT: vmovdqa64 %zmm2, %zmm0 +; CHECK-NEXT: retq + %shuf = shufflevector <8 x i64> %vec1, <8 x i64> %vec2, <8 x i32> + %res = select <8 x i1> , <8 x i64> %shuf, <8 x i64> %vec3 + ret <8 x i64> %res +} + +define <8 x i64> @test_8xi64_zero_masked_shuff_mask3(<8 x i64> %vec1, <8 x i64> %vec2) { +; CHECK-LABEL: test_8xi64_zero_masked_shuff_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: movb $11, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vshufi64x2 {{.*#+}} zmm0 {%k1} {z} = zmm0[2,3,6,7],zmm1[4,5,2,3] +; CHECK-NEXT: retq + %shuf = shufflevector <8 x i64> %vec1, <8 x i64> %vec2, <8 x i32> + %res = select <8 x i1> , <8 x i64> %shuf, <8 x i64> zeroinitializer + ret <8 x i64> %res +} +define <8 x i64> @test_8xi64_shuff_mem_mask0(<8 x i64> %vec1, <8 x i64>* %vec2p) { +; CHECK-LABEL: test_8xi64_shuff_mem_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: vshufi64x2 {{.*#+}} zmm0 = zmm0[2,3,2,3],mem[4,5,2,3] +; CHECK-NEXT: retq + %vec2 = load <8 x i64>, <8 x i64>* %vec2p + %res = shufflevector <8 x i64> %vec1, <8 x i64> %vec2, <8 x i32> + ret <8 x i64> %res +} +define <8 x i64> @test_8xi64_masked_shuff_mem_mask0(<8 x i64> %vec1, <8 x i64>* %vec2p, <8 x i64> %vec3) { +; CHECK-LABEL: test_8xi64_masked_shuff_mem_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: movb $-98, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vshufi64x2 {{.*#+}} zmm1 {%k1} = zmm0[2,3,2,3],mem[4,5,2,3] +; CHECK-NEXT: vmovdqa64 %zmm1, %zmm0 +; CHECK-NEXT: retq + %vec2 = load <8 x i64>, <8 x i64>* %vec2p + %shuf = shufflevector <8 x i64> %vec1, <8 x i64> %vec2, <8 x i32> + %res = select <8 x i1> , <8 x i64> %shuf, <8 x i64> %vec3 + ret <8 x i64> %res +} + +define <8 x i64> @test_8xi64_zero_masked_shuff_mem_mask0(<8 x i64> %vec1, <8 x i64>* %vec2p) { +; CHECK-LABEL: test_8xi64_zero_masked_shuff_mem_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: movb $-98, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vshufi64x2 {{.*#+}} zmm0 {%k1} {z} = zmm0[2,3,2,3],mem[4,5,2,3] +; CHECK-NEXT: retq + %vec2 = load <8 x i64>, <8 x i64>* %vec2p + %shuf = shufflevector <8 x i64> %vec1, <8 x i64> %vec2, <8 x i32> + %res = select <8 x i1> , <8 x i64> %shuf, <8 x i64> zeroinitializer + ret <8 x i64> %res +} + +define <8 x i64> @test_8xi64_masked_shuff_mem_mask1(<8 x i64> %vec1, <8 x i64>* %vec2p, <8 x i64> %vec3) { +; CHECK-LABEL: test_8xi64_masked_shuff_mem_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: movb $11, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vshufi64x2 {{.*#+}} zmm1 {%k1} = zmm0[2,3,0,1],mem[0,1,0,1] +; CHECK-NEXT: vmovdqa64 %zmm1, %zmm0 +; CHECK-NEXT: retq + %vec2 = load <8 x i64>, <8 x i64>* %vec2p + %shuf = shufflevector <8 x i64> %vec1, <8 x i64> %vec2, <8 x i32> + %res = select <8 x i1> , <8 x i64> %shuf, <8 x i64> %vec3 + ret <8 x i64> %res +} + +define <8 x i64> @test_8xi64_zero_masked_shuff_mem_mask1(<8 x i64> %vec1, <8 x i64>* %vec2p) { +; CHECK-LABEL: test_8xi64_zero_masked_shuff_mem_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: movb $11, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vshufi64x2 {{.*#+}} zmm0 {%k1} {z} = zmm0[2,3,0,1],mem[0,1,0,1] +; CHECK-NEXT: retq + %vec2 = load <8 x i64>, <8 x i64>* %vec2p + %shuf = shufflevector <8 x i64> %vec1, <8 x i64> %vec2, <8 x i32> + %res = select <8 x i1> , <8 x i64> %shuf, <8 x i64> zeroinitializer + ret <8 x i64> %res +} + +define <8 x i64> @test_8xi64_masked_shuff_mem_mask2(<8 x i64> %vec1, <8 x i64>* %vec2p, <8 x i64> %vec3) { +; CHECK-LABEL: test_8xi64_masked_shuff_mem_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: movb $42, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vshufi64x2 {{.*#+}} zmm1 {%k1} = zmm0[4,5,0,1],mem[2,3,2,3] +; CHECK-NEXT: vmovdqa64 %zmm1, %zmm0 +; CHECK-NEXT: retq + %vec2 = load <8 x i64>, <8 x i64>* %vec2p + %shuf = shufflevector <8 x i64> %vec1, <8 x i64> %vec2, <8 x i32> + %res = select <8 x i1> , <8 x i64> %shuf, <8 x i64> %vec3 + ret <8 x i64> %res +} + +define <8 x i64> @test_8xi64_zero_masked_shuff_mem_mask2(<8 x i64> %vec1, <8 x i64>* %vec2p) { +; CHECK-LABEL: test_8xi64_zero_masked_shuff_mem_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: movb $42, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vshufi64x2 {{.*#+}} zmm0 {%k1} {z} = zmm0[4,5,0,1],mem[2,3,2,3] +; CHECK-NEXT: retq + %vec2 = load <8 x i64>, <8 x i64>* %vec2p + %shuf = shufflevector <8 x i64> %vec1, <8 x i64> %vec2, <8 x i32> + %res = select <8 x i1> , <8 x i64> %shuf, <8 x i64> zeroinitializer + ret <8 x i64> %res +} + +define <8 x i64> @test_8xi64_shuff_mem_mask3(<8 x i64> %vec1, <8 x i64>* %vec2p) { +; CHECK-LABEL: test_8xi64_shuff_mem_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: vshufi64x2 {{.*#+}} zmm0 = zmm0[2,3,0,1],mem[6,7,2,3] +; CHECK-NEXT: retq + %vec2 = load <8 x i64>, <8 x i64>* %vec2p + %res = shufflevector <8 x i64> %vec1, <8 x i64> %vec2, <8 x i32> + ret <8 x i64> %res +} +define <8 x i64> @test_8xi64_masked_shuff_mem_mask3(<8 x i64> %vec1, <8 x i64>* %vec2p, <8 x i64> %vec3) { +; CHECK-LABEL: test_8xi64_masked_shuff_mem_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: movb $-6, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vshufi64x2 {{.*#+}} zmm1 {%k1} = zmm0[2,3,0,1],mem[6,7,2,3] +; CHECK-NEXT: vmovdqa64 %zmm1, %zmm0 +; CHECK-NEXT: retq + %vec2 = load <8 x i64>, <8 x i64>* %vec2p + %shuf = shufflevector <8 x i64> %vec1, <8 x i64> %vec2, <8 x i32> + %res = select <8 x i1> , <8 x i64> %shuf, <8 x i64> %vec3 + ret <8 x i64> %res +} + +define <8 x i64> @test_8xi64_zero_masked_shuff_mem_mask3(<8 x i64> %vec1, <8 x i64>* %vec2p) { +; CHECK-LABEL: test_8xi64_zero_masked_shuff_mem_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: movb $-6, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vshufi64x2 {{.*#+}} zmm0 {%k1} {z} = zmm0[2,3,0,1],mem[6,7,2,3] +; CHECK-NEXT: retq + %vec2 = load <8 x i64>, <8 x i64>* %vec2p + %shuf = shufflevector <8 x i64> %vec1, <8 x i64> %vec2, <8 x i32> + %res = select <8 x i1> , <8 x i64> %shuf, <8 x i64> zeroinitializer + ret <8 x i64> %res +} + diff --git a/llvm/test/CodeGen/X86/avx512-shuffles/shuffle.ll b/llvm/test/CodeGen/X86/avx512-shuffles/shuffle.ll new file mode 100644 index 000000000000..1668d3c6d665 --- /dev/null +++ b/llvm/test/CodeGen/X86/avx512-shuffles/shuffle.ll @@ -0,0 +1,2792 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mcpu=skx %s -o - | FileCheck %s + +define <16 x i8> @test_16xi8_perm_mask0(<16 x i8> %vec) { +; CHECK-LABEL: test_16xi8_perm_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[8,6,12,4,7,9,14,8,4,12,9,4,14,15,12,14] +; CHECK-NEXT: retq + %res = shufflevector <16 x i8> %vec, <16 x i8> undef, <16 x i32> + ret <16 x i8> %res +} +define <16 x i8> @test_masked_16xi8_perm_mask0(<16 x i8> %vec, <16 x i8> %vec2) { +; CHECK-LABEL: test_masked_16xi8_perm_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: movw $-10197, %ax # imm = 0xD82B +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpshufb {{.*#+}} xmm1 {%k1} = xmm0[8,6,12,4,7,9,14,8,4,12,9,4,14,15,12,14] +; CHECK-NEXT: vmovdqa %xmm1, %xmm0 +; CHECK-NEXT: retq + %shuf = shufflevector <16 x i8> %vec, <16 x i8> undef, <16 x i32> + %res = select <16 x i1> , <16 x i8> %shuf, <16 x i8> %vec2 + ret <16 x i8> %res +} + +define <16 x i8> @test_masked_z_16xi8_perm_mask0(<16 x i8> %vec) { +; CHECK-LABEL: test_masked_z_16xi8_perm_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: movw $-10197, %ax # imm = 0xD82B +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpshufb {{.*#+}} xmm0 {%k1} {z} = xmm0[8,6,12,4,7,9,14,8,4,12,9,4,14,15,12,14] +; CHECK-NEXT: retq + %shuf = shufflevector <16 x i8> %vec, <16 x i8> undef, <16 x i32> + %res = select <16 x i1> , <16 x i8> %shuf, <16 x i8> zeroinitializer + ret <16 x i8> %res +} +define <16 x i8> @test_masked_16xi8_perm_mask1(<16 x i8> %vec, <16 x i8> %vec2) { +; CHECK-LABEL: test_masked_16xi8_perm_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: movw $-15864, %ax # imm = 0xC208 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpshufb {{.*#+}} xmm1 {%k1} = xmm0[4,11,14,10,7,1,6,9,14,15,7,13,4,12,8,0] +; CHECK-NEXT: vmovdqa %xmm1, %xmm0 +; CHECK-NEXT: retq + %shuf = shufflevector <16 x i8> %vec, <16 x i8> undef, <16 x i32> + %res = select <16 x i1> , <16 x i8> %shuf, <16 x i8> %vec2 + ret <16 x i8> %res +} + +define <16 x i8> @test_masked_z_16xi8_perm_mask1(<16 x i8> %vec) { +; CHECK-LABEL: test_masked_z_16xi8_perm_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: movw $-15864, %ax # imm = 0xC208 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpshufb {{.*#+}} xmm0 {%k1} {z} = xmm0[4,11,14,10,7,1,6,9,14,15,7,13,4,12,8,0] +; CHECK-NEXT: retq + %shuf = shufflevector <16 x i8> %vec, <16 x i8> undef, <16 x i32> + %res = select <16 x i1> , <16 x i8> %shuf, <16 x i8> zeroinitializer + ret <16 x i8> %res +} +define <16 x i8> @test_masked_16xi8_perm_mask2(<16 x i8> %vec, <16 x i8> %vec2) { +; CHECK-LABEL: test_masked_16xi8_perm_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: movw $27562, %ax # imm = 0x6BAA +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpshufb {{.*#+}} xmm1 {%k1} = xmm0[11,6,13,10,0,7,13,3,5,13,3,9,3,15,12,7] +; CHECK-NEXT: vmovdqa %xmm1, %xmm0 +; CHECK-NEXT: retq + %shuf = shufflevector <16 x i8> %vec, <16 x i8> undef, <16 x i32> + %res = select <16 x i1> , <16 x i8> %shuf, <16 x i8> %vec2 + ret <16 x i8> %res +} + +define <16 x i8> @test_masked_z_16xi8_perm_mask2(<16 x i8> %vec) { +; CHECK-LABEL: test_masked_z_16xi8_perm_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: movw $27562, %ax # imm = 0x6BAA +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpshufb {{.*#+}} xmm0 {%k1} {z} = xmm0[11,6,13,10,0,7,13,3,5,13,3,9,3,15,12,7] +; CHECK-NEXT: retq + %shuf = shufflevector <16 x i8> %vec, <16 x i8> undef, <16 x i32> + %res = select <16 x i1> , <16 x i8> %shuf, <16 x i8> zeroinitializer + ret <16 x i8> %res +} +define <16 x i8> @test_16xi8_perm_mask3(<16 x i8> %vec) { +; CHECK-LABEL: test_16xi8_perm_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[1,5,8,14,1,8,11,8,13,8,15,9,9,7,9,6] +; CHECK-NEXT: retq + %res = shufflevector <16 x i8> %vec, <16 x i8> undef, <16 x i32> + ret <16 x i8> %res +} +define <16 x i8> @test_masked_16xi8_perm_mask3(<16 x i8> %vec, <16 x i8> %vec2) { +; CHECK-LABEL: test_masked_16xi8_perm_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: movw $16968, %ax # imm = 0x4248 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpshufb {{.*#+}} xmm1 {%k1} = xmm0[1,5,8,14,1,8,11,8,13,8,15,9,9,7,9,6] +; CHECK-NEXT: vmovdqa %xmm1, %xmm0 +; CHECK-NEXT: retq + %shuf = shufflevector <16 x i8> %vec, <16 x i8> undef, <16 x i32> + %res = select <16 x i1> , <16 x i8> %shuf, <16 x i8> %vec2 + ret <16 x i8> %res +} + +define <16 x i8> @test_masked_z_16xi8_perm_mask3(<16 x i8> %vec) { +; CHECK-LABEL: test_masked_z_16xi8_perm_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: movw $16968, %ax # imm = 0x4248 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpshufb {{.*#+}} xmm0 {%k1} {z} = xmm0[1,5,8,14,1,8,11,8,13,8,15,9,9,7,9,6] +; CHECK-NEXT: retq + %shuf = shufflevector <16 x i8> %vec, <16 x i8> undef, <16 x i32> + %res = select <16 x i1> , <16 x i8> %shuf, <16 x i8> zeroinitializer + ret <16 x i8> %res +} +define <16 x i8> @test_16xi8_perm_mem_mask0(<16 x i8>* %vp) { +; CHECK-LABEL: test_16xi8_perm_mem_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: vmovdqa (%rdi), %xmm0 +; CHECK-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[9,10,7,1,12,14,14,13,14,14,8,6,11,4,12,13] +; CHECK-NEXT: retq + %vec = load <16 x i8>, <16 x i8>* %vp + %res = shufflevector <16 x i8> %vec, <16 x i8> undef, <16 x i32> + ret <16 x i8> %res +} +define <16 x i8> @test_masked_16xi8_perm_mem_mask0(<16 x i8>* %vp, <16 x i8> %vec2) { +; CHECK-LABEL: test_masked_16xi8_perm_mem_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: vmovdqa (%rdi), %xmm1 +; CHECK-NEXT: movw $-27811, %ax # imm = 0x935D +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpshufb {{.*#+}} xmm0 {%k1} = xmm1[9,10,7,1,12,14,14,13,14,14,8,6,11,4,12,13] +; CHECK-NEXT: retq + %vec = load <16 x i8>, <16 x i8>* %vp + %shuf = shufflevector <16 x i8> %vec, <16 x i8> undef, <16 x i32> + %res = select <16 x i1> , <16 x i8> %shuf, <16 x i8> %vec2 + ret <16 x i8> %res +} + +define <16 x i8> @test_masked_z_16xi8_perm_mem_mask0(<16 x i8>* %vp) { +; CHECK-LABEL: test_masked_z_16xi8_perm_mem_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: vmovdqa (%rdi), %xmm0 +; CHECK-NEXT: movw $-27811, %ax # imm = 0x935D +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpshufb {{.*#+}} xmm0 {%k1} {z} = xmm0[9,10,7,1,12,14,14,13,14,14,8,6,11,4,12,13] +; CHECK-NEXT: retq + %vec = load <16 x i8>, <16 x i8>* %vp + %shuf = shufflevector <16 x i8> %vec, <16 x i8> undef, <16 x i32> + %res = select <16 x i1> , <16 x i8> %shuf, <16 x i8> zeroinitializer + ret <16 x i8> %res +} + +define <16 x i8> @test_masked_16xi8_perm_mem_mask1(<16 x i8>* %vp, <16 x i8> %vec2) { +; CHECK-LABEL: test_masked_16xi8_perm_mem_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: vmovdqa (%rdi), %xmm1 +; CHECK-NEXT: movw $19027, %ax # imm = 0x4A53 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpshufb {{.*#+}} xmm0 {%k1} = xmm1[14,9,15,9,7,10,15,14,12,1,9,7,10,13,3,11] +; CHECK-NEXT: retq + %vec = load <16 x i8>, <16 x i8>* %vp + %shuf = shufflevector <16 x i8> %vec, <16 x i8> undef, <16 x i32> + %res = select <16 x i1> , <16 x i8> %shuf, <16 x i8> %vec2 + ret <16 x i8> %res +} + +define <16 x i8> @test_masked_z_16xi8_perm_mem_mask1(<16 x i8>* %vp) { +; CHECK-LABEL: test_masked_z_16xi8_perm_mem_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: vmovdqa (%rdi), %xmm0 +; CHECK-NEXT: movw $19027, %ax # imm = 0x4A53 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpshufb {{.*#+}} xmm0 {%k1} {z} = xmm0[14,9,15,9,7,10,15,14,12,1,9,7,10,13,3,11] +; CHECK-NEXT: retq + %vec = load <16 x i8>, <16 x i8>* %vp + %shuf = shufflevector <16 x i8> %vec, <16 x i8> undef, <16 x i32> + %res = select <16 x i1> , <16 x i8> %shuf, <16 x i8> zeroinitializer + ret <16 x i8> %res +} + +define <16 x i8> @test_masked_16xi8_perm_mem_mask2(<16 x i8>* %vp, <16 x i8> %vec2) { +; CHECK-LABEL: test_masked_16xi8_perm_mem_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: vmovdqa (%rdi), %xmm1 +; CHECK-NEXT: movw $12412, %ax # imm = 0x307C +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpshufb {{.*#+}} xmm0 {%k1} = xmm1[1,3,12,5,13,1,2,11,0,9,14,8,10,0,10,9] +; CHECK-NEXT: retq + %vec = load <16 x i8>, <16 x i8>* %vp + %shuf = shufflevector <16 x i8> %vec, <16 x i8> undef, <16 x i32> + %res = select <16 x i1> , <16 x i8> %shuf, <16 x i8> %vec2 + ret <16 x i8> %res +} + +define <16 x i8> @test_masked_z_16xi8_perm_mem_mask2(<16 x i8>* %vp) { +; CHECK-LABEL: test_masked_z_16xi8_perm_mem_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: vmovdqa (%rdi), %xmm0 +; CHECK-NEXT: movw $12412, %ax # imm = 0x307C +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpshufb {{.*#+}} xmm0 {%k1} {z} = xmm0[1,3,12,5,13,1,2,11,0,9,14,8,10,0,10,9] +; CHECK-NEXT: retq + %vec = load <16 x i8>, <16 x i8>* %vp + %shuf = shufflevector <16 x i8> %vec, <16 x i8> undef, <16 x i32> + %res = select <16 x i1> , <16 x i8> %shuf, <16 x i8> zeroinitializer + ret <16 x i8> %res +} + +define <16 x i8> @test_16xi8_perm_mem_mask3(<16 x i8>* %vp) { +; CHECK-LABEL: test_16xi8_perm_mem_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: vmovdqa (%rdi), %xmm0 +; CHECK-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[9,6,5,15,0,0,15,2,1,3,12,14,0,6,1,4] +; CHECK-NEXT: retq + %vec = load <16 x i8>, <16 x i8>* %vp + %res = shufflevector <16 x i8> %vec, <16 x i8> undef, <16 x i32> + ret <16 x i8> %res +} +define <16 x i8> @test_masked_16xi8_perm_mem_mask3(<16 x i8>* %vp, <16 x i8> %vec2) { +; CHECK-LABEL: test_masked_16xi8_perm_mem_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: vmovdqa (%rdi), %xmm1 +; CHECK-NEXT: movw $12238, %ax # imm = 0x2FCE +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpshufb {{.*#+}} xmm0 {%k1} = xmm1[9,6,5,15,0,0,15,2,1,3,12,14,0,6,1,4] +; CHECK-NEXT: retq + %vec = load <16 x i8>, <16 x i8>* %vp + %shuf = shufflevector <16 x i8> %vec, <16 x i8> undef, <16 x i32> + %res = select <16 x i1> , <16 x i8> %shuf, <16 x i8> %vec2 + ret <16 x i8> %res +} + +define <16 x i8> @test_masked_z_16xi8_perm_mem_mask3(<16 x i8>* %vp) { +; CHECK-LABEL: test_masked_z_16xi8_perm_mem_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: vmovdqa (%rdi), %xmm0 +; CHECK-NEXT: movw $12238, %ax # imm = 0x2FCE +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpshufb {{.*#+}} xmm0 {%k1} {z} = xmm0[9,6,5,15,0,0,15,2,1,3,12,14,0,6,1,4] +; CHECK-NEXT: retq + %vec = load <16 x i8>, <16 x i8>* %vp + %shuf = shufflevector <16 x i8> %vec, <16 x i8> undef, <16 x i32> + %res = select <16 x i1> , <16 x i8> %shuf, <16 x i8> zeroinitializer + ret <16 x i8> %res +} + +define <32 x i8> @test_32xi8_perm_mask0(<32 x i8> %vec) { +; CHECK-LABEL: test_32xi8_perm_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[8,0,1,15,3,5,11,13,14,2,10,15,0,10,13,5,20,25,23,18,23,22,25,24,20,21,29,20,24,16,27,21] +; CHECK-NEXT: retq + %res = shufflevector <32 x i8> %vec, <32 x i8> undef, <32 x i32> + ret <32 x i8> %res +} +define <32 x i8> @test_masked_32xi8_perm_mask0(<32 x i8> %vec, <32 x i8> %vec2) { +; CHECK-LABEL: test_masked_32xi8_perm_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: movl $948454498, %eax # imm = 0x38884462 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpshufb {{.*#+}} ymm1 {%k1} = ymm0[8,0,1,15,3,5,11,13,14,2,10,15,0,10,13,5,20,25,23,18,23,22,25,24,20,21,29,20,24,16,27,21] +; CHECK-NEXT: vmovdqa %ymm1, %ymm0 +; CHECK-NEXT: retq + %shuf = shufflevector <32 x i8> %vec, <32 x i8> undef, <32 x i32> + %res = select <32 x i1> , <32 x i8> %shuf, <32 x i8> %vec2 + ret <32 x i8> %res +} + +define <32 x i8> @test_masked_z_32xi8_perm_mask0(<32 x i8> %vec) { +; CHECK-LABEL: test_masked_z_32xi8_perm_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: movl $948454498, %eax # imm = 0x38884462 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpshufb {{.*#+}} ymm0 {%k1} {z} = ymm0[8,0,1,15,3,5,11,13,14,2,10,15,0,10,13,5,20,25,23,18,23,22,25,24,20,21,29,20,24,16,27,21] +; CHECK-NEXT: retq + %shuf = shufflevector <32 x i8> %vec, <32 x i8> undef, <32 x i32> + %res = select <32 x i1> , <32 x i8> %shuf, <32 x i8> zeroinitializer + ret <32 x i8> %res +} +define <32 x i8> @test_masked_32xi8_perm_mask1(<32 x i8> %vec, <32 x i8> %vec2) { +; CHECK-LABEL: test_masked_32xi8_perm_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: movl $-1516442487, %eax # imm = 0xA59CEC89 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpshufb {{.*#+}} ymm1 {%k1} = ymm0[0,4,3,15,5,4,5,15,10,9,11,6,6,10,0,3,21,19,26,22,30,25,22,22,27,22,26,16,23,20,18,24] +; CHECK-NEXT: vmovdqa %ymm1, %ymm0 +; CHECK-NEXT: retq + %shuf = shufflevector <32 x i8> %vec, <32 x i8> undef, <32 x i32> + %res = select <32 x i1> , <32 x i8> %shuf, <32 x i8> %vec2 + ret <32 x i8> %res +} + +define <32 x i8> @test_masked_z_32xi8_perm_mask1(<32 x i8> %vec) { +; CHECK-LABEL: test_masked_z_32xi8_perm_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: movl $-1516442487, %eax # imm = 0xA59CEC89 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpshufb {{.*#+}} ymm0 {%k1} {z} = ymm0[0,4,3,15,5,4,5,15,10,9,11,6,6,10,0,3,21,19,26,22,30,25,22,22,27,22,26,16,23,20,18,24] +; CHECK-NEXT: retq + %shuf = shufflevector <32 x i8> %vec, <32 x i8> undef, <32 x i32> + %res = select <32 x i1> , <32 x i8> %shuf, <32 x i8> zeroinitializer + ret <32 x i8> %res +} +define <32 x i8> @test_masked_32xi8_perm_mask2(<32 x i8> %vec, <32 x i8> %vec2) { +; CHECK-LABEL: test_masked_32xi8_perm_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: movl $1504501134, %eax # imm = 0x59ACDD8E +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpshufb {{.*#+}} ymm1 {%k1} = ymm0[7,8,12,14,7,4,7,12,14,12,3,15,10,1,11,15,22,26,21,19,27,16,29,24,17,17,26,29,20,31,17,29] +; CHECK-NEXT: vmovdqa %ymm1, %ymm0 +; CHECK-NEXT: retq + %shuf = shufflevector <32 x i8> %vec, <32 x i8> undef, <32 x i32> + %res = select <32 x i1> , <32 x i8> %shuf, <32 x i8> %vec2 + ret <32 x i8> %res +} + +define <32 x i8> @test_masked_z_32xi8_perm_mask2(<32 x i8> %vec) { +; CHECK-LABEL: test_masked_z_32xi8_perm_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: movl $1504501134, %eax # imm = 0x59ACDD8E +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpshufb {{.*#+}} ymm0 {%k1} {z} = ymm0[7,8,12,14,7,4,7,12,14,12,3,15,10,1,11,15,22,26,21,19,27,16,29,24,17,17,26,29,20,31,17,29] +; CHECK-NEXT: retq + %shuf = shufflevector <32 x i8> %vec, <32 x i8> undef, <32 x i32> + %res = select <32 x i1> , <32 x i8> %shuf, <32 x i8> zeroinitializer + ret <32 x i8> %res +} +define <32 x i8> @test_32xi8_perm_mask3(<32 x i8> %vec) { +; CHECK-LABEL: test_32xi8_perm_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[6,1,4,7,12,13,2,8,10,5,13,4,0,0,10,8,31,31,30,16,27,27,26,27,30,26,21,24,19,25,16,18] +; CHECK-NEXT: retq + %res = shufflevector <32 x i8> %vec, <32 x i8> undef, <32 x i32> + ret <32 x i8> %res +} +define <32 x i8> @test_masked_32xi8_perm_mask3(<32 x i8> %vec, <32 x i8> %vec2) { +; CHECK-LABEL: test_masked_32xi8_perm_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: movl $774459490, %eax # imm = 0x2E295062 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpshufb {{.*#+}} ymm1 {%k1} = ymm0[6,1,4,7,12,13,2,8,10,5,13,4,0,0,10,8,31,31,30,16,27,27,26,27,30,26,21,24,19,25,16,18] +; CHECK-NEXT: vmovdqa %ymm1, %ymm0 +; CHECK-NEXT: retq + %shuf = shufflevector <32 x i8> %vec, <32 x i8> undef, <32 x i32> + %res = select <32 x i1> , <32 x i8> %shuf, <32 x i8> %vec2 + ret <32 x i8> %res +} + +define <32 x i8> @test_masked_z_32xi8_perm_mask3(<32 x i8> %vec) { +; CHECK-LABEL: test_masked_z_32xi8_perm_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: movl $774459490, %eax # imm = 0x2E295062 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpshufb {{.*#+}} ymm0 {%k1} {z} = ymm0[6,1,4,7,12,13,2,8,10,5,13,4,0,0,10,8,31,31,30,16,27,27,26,27,30,26,21,24,19,25,16,18] +; CHECK-NEXT: retq + %shuf = shufflevector <32 x i8> %vec, <32 x i8> undef, <32 x i32> + %res = select <32 x i1> , <32 x i8> %shuf, <32 x i8> zeroinitializer + ret <32 x i8> %res +} +define <32 x i8> @test_32xi8_perm_mem_mask0(<32 x i8>* %vp) { +; CHECK-LABEL: test_32xi8_perm_mem_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: vmovdqa (%rdi), %ymm0 +; CHECK-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[9,0,2,15,4,6,8,4,7,3,0,2,8,1,6,5,22,17,30,23,29,31,21,23,27,22,20,27,30,30,26,22] +; CHECK-NEXT: retq + %vec = load <32 x i8>, <32 x i8>* %vp + %res = shufflevector <32 x i8> %vec, <32 x i8> undef, <32 x i32> + ret <32 x i8> %res +} +define <32 x i8> @test_masked_32xi8_perm_mem_mask0(<32 x i8>* %vp, <32 x i8> %vec2) { +; CHECK-LABEL: test_masked_32xi8_perm_mem_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: vmovdqa (%rdi), %ymm1 +; CHECK-NEXT: movl $1431978123, %eax # imm = 0x555A408B +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpshufb {{.*#+}} ymm0 {%k1} = ymm1[9,0,2,15,4,6,8,4,7,3,0,2,8,1,6,5,22,17,30,23,29,31,21,23,27,22,20,27,30,30,26,22] +; CHECK-NEXT: retq + %vec = load <32 x i8>, <32 x i8>* %vp + %shuf = shufflevector <32 x i8> %vec, <32 x i8> undef, <32 x i32> + %res = select <32 x i1> , <32 x i8> %shuf, <32 x i8> %vec2 + ret <32 x i8> %res +} + +define <32 x i8> @test_masked_z_32xi8_perm_mem_mask0(<32 x i8>* %vp) { +; CHECK-LABEL: test_masked_z_32xi8_perm_mem_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: vmovdqa (%rdi), %ymm0 +; CHECK-NEXT: movl $1431978123, %eax # imm = 0x555A408B +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpshufb {{.*#+}} ymm0 {%k1} {z} = ymm0[9,0,2,15,4,6,8,4,7,3,0,2,8,1,6,5,22,17,30,23,29,31,21,23,27,22,20,27,30,30,26,22] +; CHECK-NEXT: retq + %vec = load <32 x i8>, <32 x i8>* %vp + %shuf = shufflevector <32 x i8> %vec, <32 x i8> undef, <32 x i32> + %res = select <32 x i1> , <32 x i8> %shuf, <32 x i8> zeroinitializer + ret <32 x i8> %res +} + +define <32 x i8> @test_masked_32xi8_perm_mem_mask1(<32 x i8>* %vp, <32 x i8> %vec2) { +; CHECK-LABEL: test_masked_32xi8_perm_mem_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: vmovdqa (%rdi), %ymm1 +; CHECK-NEXT: movl $-903561653, %eax # imm = 0xCA24BE4B +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpshufb {{.*#+}} ymm0 {%k1} = ymm1[15,10,1,1,11,0,0,6,8,7,7,9,10,6,5,15,20,28,22,21,17,29,27,30,23,26,17,22,19,16,31,19] +; CHECK-NEXT: retq + %vec = load <32 x i8>, <32 x i8>* %vp + %shuf = shufflevector <32 x i8> %vec, <32 x i8> undef, <32 x i32> + %res = select <32 x i1> , <32 x i8> %shuf, <32 x i8> %vec2 + ret <32 x i8> %res +} + +define <32 x i8> @test_masked_z_32xi8_perm_mem_mask1(<32 x i8>* %vp) { +; CHECK-LABEL: test_masked_z_32xi8_perm_mem_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: vmovdqa (%rdi), %ymm0 +; CHECK-NEXT: movl $-903561653, %eax # imm = 0xCA24BE4B +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpshufb {{.*#+}} ymm0 {%k1} {z} = ymm0[15,10,1,1,11,0,0,6,8,7,7,9,10,6,5,15,20,28,22,21,17,29,27,30,23,26,17,22,19,16,31,19] +; CHECK-NEXT: retq + %vec = load <32 x i8>, <32 x i8>* %vp + %shuf = shufflevector <32 x i8> %vec, <32 x i8> undef, <32 x i32> + %res = select <32 x i1> , <32 x i8> %shuf, <32 x i8> zeroinitializer + ret <32 x i8> %res +} + +define <32 x i8> @test_masked_32xi8_perm_mem_mask2(<32 x i8>* %vp, <32 x i8> %vec2) { +; CHECK-LABEL: test_masked_32xi8_perm_mem_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: vmovdqa (%rdi), %ymm1 +; CHECK-NEXT: movl $-1209035774, %eax # imm = 0xB7EF9402 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpshufb {{.*#+}} ymm0 {%k1} = ymm1[2,3,6,8,2,15,15,2,6,10,14,7,14,5,7,7,26,19,25,19,21,31,30,29,16,18,20,28,29,25,27,28] +; CHECK-NEXT: retq + %vec = load <32 x i8>, <32 x i8>* %vp + %shuf = shufflevector <32 x i8> %vec, <32 x i8> undef, <32 x i32> + %res = select <32 x i1> , <32 x i8> %shuf, <32 x i8> %vec2 + ret <32 x i8> %res +} + +define <32 x i8> @test_masked_z_32xi8_perm_mem_mask2(<32 x i8>* %vp) { +; CHECK-LABEL: test_masked_z_32xi8_perm_mem_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: vmovdqa (%rdi), %ymm0 +; CHECK-NEXT: movl $-1209035774, %eax # imm = 0xB7EF9402 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpshufb {{.*#+}} ymm0 {%k1} {z} = ymm0[2,3,6,8,2,15,15,2,6,10,14,7,14,5,7,7,26,19,25,19,21,31,30,29,16,18,20,28,29,25,27,28] +; CHECK-NEXT: retq + %vec = load <32 x i8>, <32 x i8>* %vp + %shuf = shufflevector <32 x i8> %vec, <32 x i8> undef, <32 x i32> + %res = select <32 x i1> , <32 x i8> %shuf, <32 x i8> zeroinitializer + ret <32 x i8> %res +} + +define <32 x i8> @test_32xi8_perm_mem_mask3(<32 x i8>* %vp) { +; CHECK-LABEL: test_32xi8_perm_mem_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: vmovdqa (%rdi), %ymm0 +; CHECK-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[1,1,13,0,3,0,0,13,5,2,2,10,15,8,14,8,25,26,28,28,31,27,30,19,24,25,29,23,28,22,25,29] +; CHECK-NEXT: retq + %vec = load <32 x i8>, <32 x i8>* %vp + %res = shufflevector <32 x i8> %vec, <32 x i8> undef, <32 x i32> + ret <32 x i8> %res +} +define <32 x i8> @test_masked_32xi8_perm_mem_mask3(<32 x i8>* %vp, <32 x i8> %vec2) { +; CHECK-LABEL: test_masked_32xi8_perm_mem_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: vmovdqa (%rdi), %ymm1 +; CHECK-NEXT: movl $1452798329, %eax # imm = 0x5697F179 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpshufb {{.*#+}} ymm0 {%k1} = ymm1[1,1,13,0,3,0,0,13,5,2,2,10,15,8,14,8,25,26,28,28,31,27,30,19,24,25,29,23,28,22,25,29] +; CHECK-NEXT: retq + %vec = load <32 x i8>, <32 x i8>* %vp + %shuf = shufflevector <32 x i8> %vec, <32 x i8> undef, <32 x i32> + %res = select <32 x i1> , <32 x i8> %shuf, <32 x i8> %vec2 + ret <32 x i8> %res +} + +define <32 x i8> @test_masked_z_32xi8_perm_mem_mask3(<32 x i8>* %vp) { +; CHECK-LABEL: test_masked_z_32xi8_perm_mem_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: vmovdqa (%rdi), %ymm0 +; CHECK-NEXT: movl $1452798329, %eax # imm = 0x5697F179 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpshufb {{.*#+}} ymm0 {%k1} {z} = ymm0[1,1,13,0,3,0,0,13,5,2,2,10,15,8,14,8,25,26,28,28,31,27,30,19,24,25,29,23,28,22,25,29] +; CHECK-NEXT: retq + %vec = load <32 x i8>, <32 x i8>* %vp + %shuf = shufflevector <32 x i8> %vec, <32 x i8> undef, <32 x i32> + %res = select <32 x i1> , <32 x i8> %shuf, <32 x i8> zeroinitializer + ret <32 x i8> %res +} + +define <64 x i8> @test_64xi8_perm_mask0(<64 x i8> %vec) { +; CHECK-LABEL: test_64xi8_perm_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: vpshufb {{.*#+}} zmm0 = zmm0[8,4,1,13,15,4,6,12,0,10,2,4,13,0,0,6,23,29,27,26,18,31,22,25,22,16,23,18,16,25,26,17,40,37,38,44,39,46,41,39,42,37,33,42,41,44,34,46,60,62,61,58,60,56,60,51,60,55,60,55,60,49,48,62] +; CHECK-NEXT: retq + %res = shufflevector <64 x i8> %vec, <64 x i8> undef, <64 x i32> + ret <64 x i8> %res +} +define <64 x i8> @test_masked_64xi8_perm_mask0(<64 x i8> %vec, <64 x i8> %vec2) { +; CHECK-LABEL: test_masked_64xi8_perm_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: movabsq $3680399704764602881, %rax # imm = 0x3313680829F25A01 +; CHECK-NEXT: kmovq %rax, %k1 +; CHECK-NEXT: vpshufb {{.*#+}} zmm1 {%k1} = zmm0[8,4,1,13,15,4,6,12,0,10,2,4,13,0,0,6,23,29,27,26,18,31,22,25,22,16,23,18,16,25,26,17,40,37,38,44,39,46,41,39,42,37,33,42,41,44,34,46,60,62,61,58,60,56,60,51,60,55,60,55,60,49,48,62] +; CHECK-NEXT: vmovdqa64 %zmm1, %zmm0 +; CHECK-NEXT: retq + %shuf = shufflevector <64 x i8> %vec, <64 x i8> undef, <64 x i32> + %res = select <64 x i1> , <64 x i8> %shuf, <64 x i8> %vec2 + ret <64 x i8> %res +} + +define <64 x i8> @test_masked_z_64xi8_perm_mask0(<64 x i8> %vec) { +; CHECK-LABEL: test_masked_z_64xi8_perm_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: movabsq $3680399704764602881, %rax # imm = 0x3313680829F25A01 +; CHECK-NEXT: kmovq %rax, %k1 +; CHECK-NEXT: vpshufb {{.*#+}} zmm0 {%k1} {z} = zmm0[8,4,1,13,15,4,6,12,0,10,2,4,13,0,0,6,23,29,27,26,18,31,22,25,22,16,23,18,16,25,26,17,40,37,38,44,39,46,41,39,42,37,33,42,41,44,34,46,60,62,61,58,60,56,60,51,60,55,60,55,60,49,48,62] +; CHECK-NEXT: retq + %shuf = shufflevector <64 x i8> %vec, <64 x i8> undef, <64 x i32> + %res = select <64 x i1> , <64 x i8> %shuf, <64 x i8> zeroinitializer + ret <64 x i8> %res +} +define <64 x i8> @test_masked_64xi8_perm_mask1(<64 x i8> %vec, <64 x i8> %vec2) { +; CHECK-LABEL: test_masked_64xi8_perm_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: movabsq $3029806472256067585, %rax # imm = 0x2A0C08EF15009801 +; CHECK-NEXT: kmovq %rax, %k1 +; CHECK-NEXT: vpshufb {{.*#+}} zmm1 {%k1} = zmm0[7,14,15,10,9,3,1,13,14,12,11,6,4,1,6,9,30,30,22,17,28,27,16,23,26,16,30,31,27,17,17,21,32,37,32,47,45,33,46,35,35,42,47,33,32,37,32,41,61,50,49,53,63,50,63,53,55,52,62,63,58,50,63,49] +; CHECK-NEXT: vmovdqa64 %zmm1, %zmm0 +; CHECK-NEXT: retq + %shuf = shufflevector <64 x i8> %vec, <64 x i8> undef, <64 x i32> + %res = select <64 x i1> , <64 x i8> %shuf, <64 x i8> %vec2 + ret <64 x i8> %res +} + +define <64 x i8> @test_masked_z_64xi8_perm_mask1(<64 x i8> %vec) { +; CHECK-LABEL: test_masked_z_64xi8_perm_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: movabsq $3029806472256067585, %rax # imm = 0x2A0C08EF15009801 +; CHECK-NEXT: kmovq %rax, %k1 +; CHECK-NEXT: vpshufb {{.*#+}} zmm0 {%k1} {z} = zmm0[7,14,15,10,9,3,1,13,14,12,11,6,4,1,6,9,30,30,22,17,28,27,16,23,26,16,30,31,27,17,17,21,32,37,32,47,45,33,46,35,35,42,47,33,32,37,32,41,61,50,49,53,63,50,63,53,55,52,62,63,58,50,63,49] +; CHECK-NEXT: retq + %shuf = shufflevector <64 x i8> %vec, <64 x i8> undef, <64 x i32> + %res = select <64 x i1> , <64 x i8> %shuf, <64 x i8> zeroinitializer + ret <64 x i8> %res +} +define <64 x i8> @test_masked_64xi8_perm_mask2(<64 x i8> %vec, <64 x i8> %vec2) { +; CHECK-LABEL: test_masked_64xi8_perm_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: movabsq $1110016799796225, %rax # imm = 0x3F18DED0BEC01 +; CHECK-NEXT: kmovq %rax, %k1 +; CHECK-NEXT: vpshufb {{.*#+}} zmm1 {%k1} = zmm0[9,2,14,15,12,5,3,12,4,6,0,2,0,1,1,6,24,27,18,22,26,17,23,21,31,16,22,22,27,21,19,20,39,47,44,36,40,43,44,39,38,44,38,35,39,46,34,39,58,55,51,48,59,57,48,52,60,58,56,50,59,55,58,60] +; CHECK-NEXT: vmovdqa64 %zmm1, %zmm0 +; CHECK-NEXT: retq + %shuf = shufflevector <64 x i8> %vec, <64 x i8> undef, <64 x i32> + %res = select <64 x i1> , <64 x i8> %shuf, <64 x i8> %vec2 + ret <64 x i8> %res +} + +define <64 x i8> @test_masked_z_64xi8_perm_mask2(<64 x i8> %vec) { +; CHECK-LABEL: test_masked_z_64xi8_perm_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: movabsq $1110016799796225, %rax # imm = 0x3F18DED0BEC01 +; CHECK-NEXT: kmovq %rax, %k1 +; CHECK-NEXT: vpshufb {{.*#+}} zmm0 {%k1} {z} = zmm0[9,2,14,15,12,5,3,12,4,6,0,2,0,1,1,6,24,27,18,22,26,17,23,21,31,16,22,22,27,21,19,20,39,47,44,36,40,43,44,39,38,44,38,35,39,46,34,39,58,55,51,48,59,57,48,52,60,58,56,50,59,55,58,60] +; CHECK-NEXT: retq + %shuf = shufflevector <64 x i8> %vec, <64 x i8> undef, <64 x i32> + %res = select <64 x i1> , <64 x i8> %shuf, <64 x i8> zeroinitializer + ret <64 x i8> %res +} +define <64 x i8> @test_64xi8_perm_mask3(<64 x i8> %vec) { +; CHECK-LABEL: test_64xi8_perm_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: vpshufb {{.*#+}} zmm0 = zmm0[3,12,4,15,1,14,0,4,8,9,6,1,4,4,12,14,25,16,28,20,21,24,19,30,18,22,20,24,25,26,24,22,42,38,44,44,36,37,42,34,43,38,41,34,42,37,39,38,55,59,53,58,48,52,59,48,57,48,55,62,48,56,49,61] +; CHECK-NEXT: retq + %res = shufflevector <64 x i8> %vec, <64 x i8> undef, <64 x i32> + ret <64 x i8> %res +} +define <64 x i8> @test_masked_64xi8_perm_mask3(<64 x i8> %vec, <64 x i8> %vec2) { +; CHECK-LABEL: test_masked_64xi8_perm_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: movabsq $839183534234450945, %rax # imm = 0xBA560FA6B66BC01 +; CHECK-NEXT: kmovq %rax, %k1 +; CHECK-NEXT: vpshufb {{.*#+}} zmm1 {%k1} = zmm0[3,12,4,15,1,14,0,4,8,9,6,1,4,4,12,14,25,16,28,20,21,24,19,30,18,22,20,24,25,26,24,22,42,38,44,44,36,37,42,34,43,38,41,34,42,37,39,38,55,59,53,58,48,52,59,48,57,48,55,62,48,56,49,61] +; CHECK-NEXT: vmovdqa64 %zmm1, %zmm0 +; CHECK-NEXT: retq + %shuf = shufflevector <64 x i8> %vec, <64 x i8> undef, <64 x i32> + %res = select <64 x i1> , <64 x i8> %shuf, <64 x i8> %vec2 + ret <64 x i8> %res +} + +define <64 x i8> @test_masked_z_64xi8_perm_mask3(<64 x i8> %vec) { +; CHECK-LABEL: test_masked_z_64xi8_perm_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: movabsq $839183534234450945, %rax # imm = 0xBA560FA6B66BC01 +; CHECK-NEXT: kmovq %rax, %k1 +; CHECK-NEXT: vpshufb {{.*#+}} zmm0 {%k1} {z} = zmm0[3,12,4,15,1,14,0,4,8,9,6,1,4,4,12,14,25,16,28,20,21,24,19,30,18,22,20,24,25,26,24,22,42,38,44,44,36,37,42,34,43,38,41,34,42,37,39,38,55,59,53,58,48,52,59,48,57,48,55,62,48,56,49,61] +; CHECK-NEXT: retq + %shuf = shufflevector <64 x i8> %vec, <64 x i8> undef, <64 x i32> + %res = select <64 x i1> , <64 x i8> %shuf, <64 x i8> zeroinitializer + ret <64 x i8> %res +} +define <64 x i8> @test_64xi8_perm_mem_mask0(<64 x i8>* %vp) { +; CHECK-LABEL: test_64xi8_perm_mem_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: vmovdqa64 (%rdi), %zmm0 +; CHECK-NEXT: vpshufb {{.*#+}} zmm0 = zmm0[0,9,15,13,11,11,3,12,4,1,7,5,2,6,14,6,23,27,24,18,30,23,28,22,28,22,19,19,31,25,16,22,35,33,34,32,42,34,41,41,43,40,36,46,37,39,42,40,63,63,62,62,57,55,59,51,52,48,50,48,58,50,60,58] +; CHECK-NEXT: retq + %vec = load <64 x i8>, <64 x i8>* %vp + %res = shufflevector <64 x i8> %vec, <64 x i8> undef, <64 x i32> + ret <64 x i8> %res +} +define <64 x i8> @test_masked_64xi8_perm_mem_mask0(<64 x i8>* %vp, <64 x i8> %vec2) { +; CHECK-LABEL: test_masked_64xi8_perm_mem_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: vmovdqa64 (%rdi), %zmm1 +; CHECK-NEXT: movabsq $3164984076108002305, %rax # imm = 0x2BEC483F982F7401 +; CHECK-NEXT: kmovq %rax, %k1 +; CHECK-NEXT: vpshufb {{.*#+}} zmm0 {%k1} = zmm1[0,9,15,13,11,11,3,12,4,1,7,5,2,6,14,6,23,27,24,18,30,23,28,22,28,22,19,19,31,25,16,22,35,33,34,32,42,34,41,41,43,40,36,46,37,39,42,40,63,63,62,62,57,55,59,51,52,48,50,48,58,50,60,58] +; CHECK-NEXT: retq + %vec = load <64 x i8>, <64 x i8>* %vp + %shuf = shufflevector <64 x i8> %vec, <64 x i8> undef, <64 x i32> + %res = select <64 x i1> , <64 x i8> %shuf, <64 x i8> %vec2 + ret <64 x i8> %res +} + +define <64 x i8> @test_masked_z_64xi8_perm_mem_mask0(<64 x i8>* %vp) { +; CHECK-LABEL: test_masked_z_64xi8_perm_mem_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: vmovdqa64 (%rdi), %zmm0 +; CHECK-NEXT: movabsq $3164984076108002305, %rax # imm = 0x2BEC483F982F7401 +; CHECK-NEXT: kmovq %rax, %k1 +; CHECK-NEXT: vpshufb {{.*#+}} zmm0 {%k1} {z} = zmm0[0,9,15,13,11,11,3,12,4,1,7,5,2,6,14,6,23,27,24,18,30,23,28,22,28,22,19,19,31,25,16,22,35,33,34,32,42,34,41,41,43,40,36,46,37,39,42,40,63,63,62,62,57,55,59,51,52,48,50,48,58,50,60,58] +; CHECK-NEXT: retq + %vec = load <64 x i8>, <64 x i8>* %vp + %shuf = shufflevector <64 x i8> %vec, <64 x i8> undef, <64 x i32> + %res = select <64 x i1> , <64 x i8> %shuf, <64 x i8> zeroinitializer + ret <64 x i8> %res +} + +define <64 x i8> @test_masked_64xi8_perm_mem_mask1(<64 x i8>* %vp, <64 x i8> %vec2) { +; CHECK-LABEL: test_masked_64xi8_perm_mem_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: vmovdqa64 (%rdi), %zmm1 +; CHECK-NEXT: movabsq $3421658227176024577, %rax # imm = 0x2F7C2C07659EAA01 +; CHECK-NEXT: kmovq %rax, %k1 +; CHECK-NEXT: vpshufb {{.*#+}} zmm0 {%k1} = zmm1[15,6,14,7,5,1,14,12,5,7,5,0,0,5,3,8,19,19,26,27,20,29,20,21,27,16,30,17,23,27,16,28,47,39,33,33,33,44,38,46,39,33,38,44,45,32,34,39,50,61,62,53,54,56,52,56,51,52,55,57,56,52,51,49] +; CHECK-NEXT: retq + %vec = load <64 x i8>, <64 x i8>* %vp + %shuf = shufflevector <64 x i8> %vec, <64 x i8> undef, <64 x i32> + %res = select <64 x i1> , <64 x i8> %shuf, <64 x i8> %vec2 + ret <64 x i8> %res +} + +define <64 x i8> @test_masked_z_64xi8_perm_mem_mask1(<64 x i8>* %vp) { +; CHECK-LABEL: test_masked_z_64xi8_perm_mem_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: vmovdqa64 (%rdi), %zmm0 +; CHECK-NEXT: movabsq $3421658227176024577, %rax # imm = 0x2F7C2C07659EAA01 +; CHECK-NEXT: kmovq %rax, %k1 +; CHECK-NEXT: vpshufb {{.*#+}} zmm0 {%k1} {z} = zmm0[15,6,14,7,5,1,14,12,5,7,5,0,0,5,3,8,19,19,26,27,20,29,20,21,27,16,30,17,23,27,16,28,47,39,33,33,33,44,38,46,39,33,38,44,45,32,34,39,50,61,62,53,54,56,52,56,51,52,55,57,56,52,51,49] +; CHECK-NEXT: retq + %vec = load <64 x i8>, <64 x i8>* %vp + %shuf = shufflevector <64 x i8> %vec, <64 x i8> undef, <64 x i32> + %res = select <64 x i1> , <64 x i8> %shuf, <64 x i8> zeroinitializer + ret <64 x i8> %res +} + +define <64 x i8> @test_masked_64xi8_perm_mem_mask2(<64 x i8>* %vp, <64 x i8> %vec2) { +; CHECK-LABEL: test_masked_64xi8_perm_mem_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: vmovdqa64 (%rdi), %zmm1 +; CHECK-NEXT: movabsq $3085252902658394625, %rax # imm = 0x2AD1052B29324A01 +; CHECK-NEXT: kmovq %rax, %k1 +; CHECK-NEXT: vpshufb {{.*#+}} zmm0 {%k1} = zmm1[12,1,11,3,4,11,10,11,8,13,1,10,1,11,5,10,27,26,19,29,19,24,26,19,26,20,18,28,24,21,25,16,34,38,47,40,33,44,44,44,41,43,35,43,45,44,37,41,58,62,49,61,56,53,55,48,51,58,58,55,63,55,53,61] +; CHECK-NEXT: retq + %vec = load <64 x i8>, <64 x i8>* %vp + %shuf = shufflevector <64 x i8> %vec, <64 x i8> undef, <64 x i32> + %res = select <64 x i1> , <64 x i8> %shuf, <64 x i8> %vec2 + ret <64 x i8> %res +} + +define <64 x i8> @test_masked_z_64xi8_perm_mem_mask2(<64 x i8>* %vp) { +; CHECK-LABEL: test_masked_z_64xi8_perm_mem_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: vmovdqa64 (%rdi), %zmm0 +; CHECK-NEXT: movabsq $3085252902658394625, %rax # imm = 0x2AD1052B29324A01 +; CHECK-NEXT: kmovq %rax, %k1 +; CHECK-NEXT: vpshufb {{.*#+}} zmm0 {%k1} {z} = zmm0[12,1,11,3,4,11,10,11,8,13,1,10,1,11,5,10,27,26,19,29,19,24,26,19,26,20,18,28,24,21,25,16,34,38,47,40,33,44,44,44,41,43,35,43,45,44,37,41,58,62,49,61,56,53,55,48,51,58,58,55,63,55,53,61] +; CHECK-NEXT: retq + %vec = load <64 x i8>, <64 x i8>* %vp + %shuf = shufflevector <64 x i8> %vec, <64 x i8> undef, <64 x i32> + %res = select <64 x i1> , <64 x i8> %shuf, <64 x i8> zeroinitializer + ret <64 x i8> %res +} + +define <64 x i8> @test_64xi8_perm_mem_mask3(<64 x i8>* %vp) { +; CHECK-LABEL: test_64xi8_perm_mem_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: vmovdqa64 (%rdi), %zmm0 +; CHECK-NEXT: vpshufb {{.*#+}} zmm0 = zmm0[4,9,11,13,12,6,0,0,11,15,5,7,11,10,4,10,20,21,24,27,18,16,26,16,16,19,26,17,16,31,22,30,35,38,37,34,37,47,43,38,38,36,40,43,42,39,32,46,54,54,48,50,61,56,59,50,53,61,61,51,48,60,50,60] +; CHECK-NEXT: retq + %vec = load <64 x i8>, <64 x i8>* %vp + %res = shufflevector <64 x i8> %vec, <64 x i8> undef, <64 x i32> + ret <64 x i8> %res +} +define <64 x i8> @test_masked_64xi8_perm_mem_mask3(<64 x i8>* %vp, <64 x i8> %vec2) { +; CHECK-LABEL: test_masked_64xi8_perm_mem_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: vmovdqa64 (%rdi), %zmm1 +; CHECK-NEXT: movabsq $29622951609754113, %rax # imm = 0x693DEAE3E5E201 +; CHECK-NEXT: kmovq %rax, %k1 +; CHECK-NEXT: vpshufb {{.*#+}} zmm0 {%k1} = zmm1[4,9,11,13,12,6,0,0,11,15,5,7,11,10,4,10,20,21,24,27,18,16,26,16,16,19,26,17,16,31,22,30,35,38,37,34,37,47,43,38,38,36,40,43,42,39,32,46,54,54,48,50,61,56,59,50,53,61,61,51,48,60,50,60] +; CHECK-NEXT: retq + %vec = load <64 x i8>, <64 x i8>* %vp + %shuf = shufflevector <64 x i8> %vec, <64 x i8> undef, <64 x i32> + %res = select <64 x i1> , <64 x i8> %shuf, <64 x i8> %vec2 + ret <64 x i8> %res +} + +define <64 x i8> @test_masked_z_64xi8_perm_mem_mask3(<64 x i8>* %vp) { +; CHECK-LABEL: test_masked_z_64xi8_perm_mem_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: vmovdqa64 (%rdi), %zmm0 +; CHECK-NEXT: movabsq $29622951609754113, %rax # imm = 0x693DEAE3E5E201 +; CHECK-NEXT: kmovq %rax, %k1 +; CHECK-NEXT: vpshufb {{.*#+}} zmm0 {%k1} {z} = zmm0[4,9,11,13,12,6,0,0,11,15,5,7,11,10,4,10,20,21,24,27,18,16,26,16,16,19,26,17,16,31,22,30,35,38,37,34,37,47,43,38,38,36,40,43,42,39,32,46,54,54,48,50,61,56,59,50,53,61,61,51,48,60,50,60] +; CHECK-NEXT: retq + %vec = load <64 x i8>, <64 x i8>* %vp + %shuf = shufflevector <64 x i8> %vec, <64 x i8> undef, <64 x i32> + %res = select <64 x i1> , <64 x i8> %shuf, <64 x i8> zeroinitializer + ret <64 x i8> %res +} + +define <8 x i16> @test_8xi16_perm_high_mask0(<8 x i16> %vec) { +; CHECK-LABEL: test_8xi16_perm_high_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,6,5,7,6] +; CHECK-NEXT: retq + %res = shufflevector <8 x i16> %vec, <8 x i16> undef, <8 x i32> + ret <8 x i16> %res +} +define <8 x i16> @test_masked_8xi16_perm_high_mask0(<8 x i16> %vec, <8 x i16> %vec2) { +; CHECK-LABEL: test_masked_8xi16_perm_high_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: movb $-82, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpshufhw {{.*#+}} xmm1 {%k1} = xmm0[0,1,2,3,6,5,7,6] +; CHECK-NEXT: vmovdqa %xmm1, %xmm0 +; CHECK-NEXT: retq + %shuf = shufflevector <8 x i16> %vec, <8 x i16> undef, <8 x i32> + %res = select <8 x i1> , <8 x i16> %shuf, <8 x i16> %vec2 + ret <8 x i16> %res +} + +define <8 x i16> @test_masked_z_8xi16_perm_high_mask0(<8 x i16> %vec) { +; CHECK-LABEL: test_masked_z_8xi16_perm_high_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: movb $-82, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpshufhw {{.*#+}} xmm0 {%k1} {z} = xmm0[0,1,2,3,6,5,7,6] +; CHECK-NEXT: retq + %shuf = shufflevector <8 x i16> %vec, <8 x i16> undef, <8 x i32> + %res = select <8 x i1> , <8 x i16> %shuf, <8 x i16> zeroinitializer + ret <8 x i16> %res +} +define <8 x i16> @test_masked_8xi16_perm_low_mask1(<8 x i16> %vec, <8 x i16> %vec2) { +; CHECK-LABEL: test_masked_8xi16_perm_low_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: movb $43, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpshuflw {{.*#+}} xmm1 {%k1} = xmm0[0,3,0,0,4,5,6,7] +; CHECK-NEXT: vmovdqa %xmm1, %xmm0 +; CHECK-NEXT: retq + %shuf = shufflevector <8 x i16> %vec, <8 x i16> undef, <8 x i32> + %res = select <8 x i1> , <8 x i16> %shuf, <8 x i16> %vec2 + ret <8 x i16> %res +} + +define <8 x i16> @test_masked_z_8xi16_perm_low_mask1(<8 x i16> %vec) { +; CHECK-LABEL: test_masked_z_8xi16_perm_low_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: movb $43, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpshuflw {{.*#+}} xmm0 {%k1} {z} = xmm0[0,3,0,0,4,5,6,7] +; CHECK-NEXT: retq + %shuf = shufflevector <8 x i16> %vec, <8 x i16> undef, <8 x i32> + %res = select <8 x i1> , <8 x i16> %shuf, <8 x i16> zeroinitializer + ret <8 x i16> %res +} +define <8 x i16> @test_masked_8xi16_perm_high_mask2(<8 x i16> %vec, <8 x i16> %vec2) { +; CHECK-LABEL: test_masked_8xi16_perm_high_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: movb $20, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpshufhw {{.*#+}} xmm1 {%k1} = xmm0[0,1,2,3,5,4,4,5] +; CHECK-NEXT: vmovdqa %xmm1, %xmm0 +; CHECK-NEXT: retq + %shuf = shufflevector <8 x i16> %vec, <8 x i16> undef, <8 x i32> + %res = select <8 x i1> , <8 x i16> %shuf, <8 x i16> %vec2 + ret <8 x i16> %res +} + +define <8 x i16> @test_masked_z_8xi16_perm_high_mask2(<8 x i16> %vec) { +; CHECK-LABEL: test_masked_z_8xi16_perm_high_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: movb $20, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpshufhw {{.*#+}} xmm0 {%k1} {z} = xmm0[0,1,2,3,5,4,4,5] +; CHECK-NEXT: retq + %shuf = shufflevector <8 x i16> %vec, <8 x i16> undef, <8 x i32> + %res = select <8 x i1> , <8 x i16> %shuf, <8 x i16> zeroinitializer + ret <8 x i16> %res +} +define <8 x i16> @test_8xi16_perm_low_mask3(<8 x i16> %vec) { +; CHECK-LABEL: test_8xi16_perm_low_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[2,1,1,1,4,5,6,7] +; CHECK-NEXT: retq + %res = shufflevector <8 x i16> %vec, <8 x i16> undef, <8 x i32> + ret <8 x i16> %res +} +define <8 x i16> @test_masked_8xi16_perm_low_mask3(<8 x i16> %vec, <8 x i16> %vec2) { +; CHECK-LABEL: test_masked_8xi16_perm_low_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: movb $-20, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpshuflw {{.*#+}} xmm1 {%k1} = xmm0[2,1,1,1,4,5,6,7] +; CHECK-NEXT: vmovdqa %xmm1, %xmm0 +; CHECK-NEXT: retq + %shuf = shufflevector <8 x i16> %vec, <8 x i16> undef, <8 x i32> + %res = select <8 x i1> , <8 x i16> %shuf, <8 x i16> %vec2 + ret <8 x i16> %res +} + +define <8 x i16> @test_masked_z_8xi16_perm_low_mask3(<8 x i16> %vec) { +; CHECK-LABEL: test_masked_z_8xi16_perm_low_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: movb $-20, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpshuflw {{.*#+}} xmm0 {%k1} {z} = xmm0[2,1,1,1,4,5,6,7] +; CHECK-NEXT: retq + %shuf = shufflevector <8 x i16> %vec, <8 x i16> undef, <8 x i32> + %res = select <8 x i1> , <8 x i16> %shuf, <8 x i16> zeroinitializer + ret <8 x i16> %res +} +define <8 x i16> @test_masked_8xi16_perm_high_mask4(<8 x i16> %vec, <8 x i16> %vec2) { +; CHECK-LABEL: test_masked_8xi16_perm_high_mask4: +; CHECK: # BB#0: +; CHECK-NEXT: movb $-104, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpshufhw {{.*#+}} xmm1 {%k1} = xmm0[0,1,2,3,5,5,7,6] +; CHECK-NEXT: vmovdqa %xmm1, %xmm0 +; CHECK-NEXT: retq + %shuf = shufflevector <8 x i16> %vec, <8 x i16> undef, <8 x i32> + %res = select <8 x i1> , <8 x i16> %shuf, <8 x i16> %vec2 + ret <8 x i16> %res +} + +define <8 x i16> @test_masked_z_8xi16_perm_high_mask4(<8 x i16> %vec) { +; CHECK-LABEL: test_masked_z_8xi16_perm_high_mask4: +; CHECK: # BB#0: +; CHECK-NEXT: movb $-104, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpshufhw {{.*#+}} xmm0 {%k1} {z} = xmm0[0,1,2,3,5,5,7,6] +; CHECK-NEXT: retq + %shuf = shufflevector <8 x i16> %vec, <8 x i16> undef, <8 x i32> + %res = select <8 x i1> , <8 x i16> %shuf, <8 x i16> zeroinitializer + ret <8 x i16> %res +} +define <8 x i16> @test_masked_8xi16_perm_low_mask5(<8 x i16> %vec, <8 x i16> %vec2) { +; CHECK-LABEL: test_masked_8xi16_perm_low_mask5: +; CHECK: # BB#0: +; CHECK-NEXT: movb $-98, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpshuflw {{.*#+}} xmm1 {%k1} = xmm0[3,3,2,1,4,5,6,7] +; CHECK-NEXT: vmovdqa %xmm1, %xmm0 +; CHECK-NEXT: retq + %shuf = shufflevector <8 x i16> %vec, <8 x i16> undef, <8 x i32> + %res = select <8 x i1> , <8 x i16> %shuf, <8 x i16> %vec2 + ret <8 x i16> %res +} + +define <8 x i16> @test_masked_z_8xi16_perm_low_mask5(<8 x i16> %vec) { +; CHECK-LABEL: test_masked_z_8xi16_perm_low_mask5: +; CHECK: # BB#0: +; CHECK-NEXT: movb $-98, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpshuflw {{.*#+}} xmm0 {%k1} {z} = xmm0[3,3,2,1,4,5,6,7] +; CHECK-NEXT: retq + %shuf = shufflevector <8 x i16> %vec, <8 x i16> undef, <8 x i32> + %res = select <8 x i1> , <8 x i16> %shuf, <8 x i16> zeroinitializer + ret <8 x i16> %res +} +define <8 x i16> @test_8xi16_perm_high_mask6(<8 x i16> %vec) { +; CHECK-LABEL: test_8xi16_perm_high_mask6: +; CHECK: # BB#0: +; CHECK-NEXT: vpshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,6,5,6,5] +; CHECK-NEXT: retq + %res = shufflevector <8 x i16> %vec, <8 x i16> undef, <8 x i32> + ret <8 x i16> %res +} +define <8 x i16> @test_masked_8xi16_perm_high_mask6(<8 x i16> %vec, <8 x i16> %vec2) { +; CHECK-LABEL: test_masked_8xi16_perm_high_mask6: +; CHECK: # BB#0: +; CHECK-NEXT: movb $117, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpshufhw {{.*#+}} xmm1 {%k1} = xmm0[0,1,2,3,6,5,6,5] +; CHECK-NEXT: vmovdqa %xmm1, %xmm0 +; CHECK-NEXT: retq + %shuf = shufflevector <8 x i16> %vec, <8 x i16> undef, <8 x i32> + %res = select <8 x i1> , <8 x i16> %shuf, <8 x i16> %vec2 + ret <8 x i16> %res +} + +define <8 x i16> @test_masked_z_8xi16_perm_high_mask6(<8 x i16> %vec) { +; CHECK-LABEL: test_masked_z_8xi16_perm_high_mask6: +; CHECK: # BB#0: +; CHECK-NEXT: movb $117, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpshufhw {{.*#+}} xmm0 {%k1} {z} = xmm0[0,1,2,3,6,5,6,5] +; CHECK-NEXT: retq + %shuf = shufflevector <8 x i16> %vec, <8 x i16> undef, <8 x i32> + %res = select <8 x i1> , <8 x i16> %shuf, <8 x i16> zeroinitializer + ret <8 x i16> %res +} +define <8 x i16> @test_masked_8xi16_perm_low_mask7(<8 x i16> %vec, <8 x i16> %vec2) { +; CHECK-LABEL: test_masked_8xi16_perm_low_mask7: +; CHECK: # BB#0: +; CHECK-NEXT: movb $39, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpshuflw {{.*#+}} xmm1 {%k1} = xmm0[1,0,2,0,4,5,6,7] +; CHECK-NEXT: vmovdqa %xmm1, %xmm0 +; CHECK-NEXT: retq + %shuf = shufflevector <8 x i16> %vec, <8 x i16> undef, <8 x i32> + %res = select <8 x i1> , <8 x i16> %shuf, <8 x i16> %vec2 + ret <8 x i16> %res +} + +define <8 x i16> @test_masked_z_8xi16_perm_low_mask7(<8 x i16> %vec) { +; CHECK-LABEL: test_masked_z_8xi16_perm_low_mask7: +; CHECK: # BB#0: +; CHECK-NEXT: movb $39, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpshuflw {{.*#+}} xmm0 {%k1} {z} = xmm0[1,0,2,0,4,5,6,7] +; CHECK-NEXT: retq + %shuf = shufflevector <8 x i16> %vec, <8 x i16> undef, <8 x i32> + %res = select <8 x i1> , <8 x i16> %shuf, <8 x i16> zeroinitializer + ret <8 x i16> %res +} +define <8 x i16> @test_8xi16_perm_high_mem_mask0(<8 x i16>* %vp) { +; CHECK-LABEL: test_8xi16_perm_high_mem_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: vpshufhw {{.*#+}} xmm0 = mem[0,1,2,3,7,7,4,6] +; CHECK-NEXT: retq + %vec = load <8 x i16>, <8 x i16>* %vp + %res = shufflevector <8 x i16> %vec, <8 x i16> undef, <8 x i32> + ret <8 x i16> %res +} +define <8 x i16> @test_masked_8xi16_perm_high_mem_mask0(<8 x i16>* %vp, <8 x i16> %vec2) { +; CHECK-LABEL: test_masked_8xi16_perm_high_mem_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: movb $-83, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpshufhw {{.*#+}} xmm0 {%k1} = mem[0,1,2,3,7,7,4,6] +; CHECK-NEXT: retq + %vec = load <8 x i16>, <8 x i16>* %vp + %shuf = shufflevector <8 x i16> %vec, <8 x i16> undef, <8 x i32> + %res = select <8 x i1> , <8 x i16> %shuf, <8 x i16> %vec2 + ret <8 x i16> %res +} + +define <8 x i16> @test_masked_z_8xi16_perm_high_mem_mask0(<8 x i16>* %vp) { +; CHECK-LABEL: test_masked_z_8xi16_perm_high_mem_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: movb $-83, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpshufhw {{.*#+}} xmm0 {%k1} {z} = mem[0,1,2,3,7,7,4,6] +; CHECK-NEXT: retq + %vec = load <8 x i16>, <8 x i16>* %vp + %shuf = shufflevector <8 x i16> %vec, <8 x i16> undef, <8 x i32> + %res = select <8 x i1> , <8 x i16> %shuf, <8 x i16> zeroinitializer + ret <8 x i16> %res +} + +define <8 x i16> @test_masked_8xi16_perm_low_mem_mask1(<8 x i16>* %vp, <8 x i16> %vec2) { +; CHECK-LABEL: test_masked_8xi16_perm_low_mem_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: movb $-108, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpshuflw {{.*#+}} xmm0 {%k1} = mem[1,3,3,2,4,5,6,7] +; CHECK-NEXT: retq + %vec = load <8 x i16>, <8 x i16>* %vp + %shuf = shufflevector <8 x i16> %vec, <8 x i16> undef, <8 x i32> + %res = select <8 x i1> , <8 x i16> %shuf, <8 x i16> %vec2 + ret <8 x i16> %res +} + +define <8 x i16> @test_masked_z_8xi16_perm_low_mem_mask1(<8 x i16>* %vp) { +; CHECK-LABEL: test_masked_z_8xi16_perm_low_mem_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: movb $-108, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpshuflw {{.*#+}} xmm0 {%k1} {z} = mem[1,3,3,2,4,5,6,7] +; CHECK-NEXT: retq + %vec = load <8 x i16>, <8 x i16>* %vp + %shuf = shufflevector <8 x i16> %vec, <8 x i16> undef, <8 x i32> + %res = select <8 x i1> , <8 x i16> %shuf, <8 x i16> zeroinitializer + ret <8 x i16> %res +} + +define <8 x i16> @test_masked_8xi16_perm_high_mem_mask2(<8 x i16>* %vp, <8 x i16> %vec2) { +; CHECK-LABEL: test_masked_8xi16_perm_high_mem_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: movb $-58, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpshufhw {{.*#+}} xmm0 {%k1} = mem[0,1,2,3,6,6,5,7] +; CHECK-NEXT: retq + %vec = load <8 x i16>, <8 x i16>* %vp + %shuf = shufflevector <8 x i16> %vec, <8 x i16> undef, <8 x i32> + %res = select <8 x i1> , <8 x i16> %shuf, <8 x i16> %vec2 + ret <8 x i16> %res +} + +define <8 x i16> @test_masked_z_8xi16_perm_high_mem_mask2(<8 x i16>* %vp) { +; CHECK-LABEL: test_masked_z_8xi16_perm_high_mem_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: movb $-58, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpshufhw {{.*#+}} xmm0 {%k1} {z} = mem[0,1,2,3,6,6,5,7] +; CHECK-NEXT: retq + %vec = load <8 x i16>, <8 x i16>* %vp + %shuf = shufflevector <8 x i16> %vec, <8 x i16> undef, <8 x i32> + %res = select <8 x i1> , <8 x i16> %shuf, <8 x i16> zeroinitializer + ret <8 x i16> %res +} + +define <8 x i16> @test_8xi16_perm_low_mem_mask3(<8 x i16>* %vp) { +; CHECK-LABEL: test_8xi16_perm_low_mem_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: vpshuflw {{.*#+}} xmm0 = mem[3,1,2,0,4,5,6,7] +; CHECK-NEXT: retq + %vec = load <8 x i16>, <8 x i16>* %vp + %res = shufflevector <8 x i16> %vec, <8 x i16> undef, <8 x i32> + ret <8 x i16> %res +} +define <8 x i16> @test_masked_8xi16_perm_low_mem_mask3(<8 x i16>* %vp, <8 x i16> %vec2) { +; CHECK-LABEL: test_masked_8xi16_perm_low_mem_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: movb $74, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpshuflw {{.*#+}} xmm0 {%k1} = mem[3,1,2,0,4,5,6,7] +; CHECK-NEXT: retq + %vec = load <8 x i16>, <8 x i16>* %vp + %shuf = shufflevector <8 x i16> %vec, <8 x i16> undef, <8 x i32> + %res = select <8 x i1> , <8 x i16> %shuf, <8 x i16> %vec2 + ret <8 x i16> %res +} + +define <8 x i16> @test_masked_z_8xi16_perm_low_mem_mask3(<8 x i16>* %vp) { +; CHECK-LABEL: test_masked_z_8xi16_perm_low_mem_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: movb $74, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpshuflw {{.*#+}} xmm0 {%k1} {z} = mem[3,1,2,0,4,5,6,7] +; CHECK-NEXT: retq + %vec = load <8 x i16>, <8 x i16>* %vp + %shuf = shufflevector <8 x i16> %vec, <8 x i16> undef, <8 x i32> + %res = select <8 x i1> , <8 x i16> %shuf, <8 x i16> zeroinitializer + ret <8 x i16> %res +} + +define <8 x i16> @test_masked_8xi16_perm_high_mem_mask4(<8 x i16>* %vp, <8 x i16> %vec2) { +; CHECK-LABEL: test_masked_8xi16_perm_high_mem_mask4: +; CHECK: # BB#0: +; CHECK-NEXT: movb $-81, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpshufhw {{.*#+}} xmm0 {%k1} = mem[0,1,2,3,7,6,7,5] +; CHECK-NEXT: retq + %vec = load <8 x i16>, <8 x i16>* %vp + %shuf = shufflevector <8 x i16> %vec, <8 x i16> undef, <8 x i32> + %res = select <8 x i1> , <8 x i16> %shuf, <8 x i16> %vec2 + ret <8 x i16> %res +} + +define <8 x i16> @test_masked_z_8xi16_perm_high_mem_mask4(<8 x i16>* %vp) { +; CHECK-LABEL: test_masked_z_8xi16_perm_high_mem_mask4: +; CHECK: # BB#0: +; CHECK-NEXT: movb $-81, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpshufhw {{.*#+}} xmm0 {%k1} {z} = mem[0,1,2,3,7,6,7,5] +; CHECK-NEXT: retq + %vec = load <8 x i16>, <8 x i16>* %vp + %shuf = shufflevector <8 x i16> %vec, <8 x i16> undef, <8 x i32> + %res = select <8 x i1> , <8 x i16> %shuf, <8 x i16> zeroinitializer + ret <8 x i16> %res +} + +define <8 x i16> @test_masked_8xi16_perm_low_mem_mask5(<8 x i16>* %vp, <8 x i16> %vec2) { +; CHECK-LABEL: test_masked_8xi16_perm_low_mem_mask5: +; CHECK: # BB#0: +; CHECK-NEXT: movb $53, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpshuflw {{.*#+}} xmm0 {%k1} = mem[2,1,3,2,4,5,6,7] +; CHECK-NEXT: retq + %vec = load <8 x i16>, <8 x i16>* %vp + %shuf = shufflevector <8 x i16> %vec, <8 x i16> undef, <8 x i32> + %res = select <8 x i1> , <8 x i16> %shuf, <8 x i16> %vec2 + ret <8 x i16> %res +} + +define <8 x i16> @test_masked_z_8xi16_perm_low_mem_mask5(<8 x i16>* %vp) { +; CHECK-LABEL: test_masked_z_8xi16_perm_low_mem_mask5: +; CHECK: # BB#0: +; CHECK-NEXT: movb $53, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpshuflw {{.*#+}} xmm0 {%k1} {z} = mem[2,1,3,2,4,5,6,7] +; CHECK-NEXT: retq + %vec = load <8 x i16>, <8 x i16>* %vp + %shuf = shufflevector <8 x i16> %vec, <8 x i16> undef, <8 x i32> + %res = select <8 x i1> , <8 x i16> %shuf, <8 x i16> zeroinitializer + ret <8 x i16> %res +} + +define <8 x i16> @test_8xi16_perm_high_mem_mask6(<8 x i16>* %vp) { +; CHECK-LABEL: test_8xi16_perm_high_mem_mask6: +; CHECK: # BB#0: +; CHECK-NEXT: vpshufhw {{.*#+}} xmm0 = mem[0,1,2,3,7,4,4,4] +; CHECK-NEXT: retq + %vec = load <8 x i16>, <8 x i16>* %vp + %res = shufflevector <8 x i16> %vec, <8 x i16> undef, <8 x i32> + ret <8 x i16> %res +} +define <8 x i16> @test_masked_8xi16_perm_high_mem_mask6(<8 x i16>* %vp, <8 x i16> %vec2) { +; CHECK-LABEL: test_masked_8xi16_perm_high_mem_mask6: +; CHECK: # BB#0: +; CHECK-NEXT: movb $-121, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpshufhw {{.*#+}} xmm0 {%k1} = mem[0,1,2,3,7,4,4,4] +; CHECK-NEXT: retq + %vec = load <8 x i16>, <8 x i16>* %vp + %shuf = shufflevector <8 x i16> %vec, <8 x i16> undef, <8 x i32> + %res = select <8 x i1> , <8 x i16> %shuf, <8 x i16> %vec2 + ret <8 x i16> %res +} + +define <8 x i16> @test_masked_z_8xi16_perm_high_mem_mask6(<8 x i16>* %vp) { +; CHECK-LABEL: test_masked_z_8xi16_perm_high_mem_mask6: +; CHECK: # BB#0: +; CHECK-NEXT: movb $-121, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpshufhw {{.*#+}} xmm0 {%k1} {z} = mem[0,1,2,3,7,4,4,4] +; CHECK-NEXT: retq + %vec = load <8 x i16>, <8 x i16>* %vp + %shuf = shufflevector <8 x i16> %vec, <8 x i16> undef, <8 x i32> + %res = select <8 x i1> , <8 x i16> %shuf, <8 x i16> zeroinitializer + ret <8 x i16> %res +} + +define <8 x i16> @test_masked_8xi16_perm_low_mem_mask7(<8 x i16>* %vp, <8 x i16> %vec2) { +; CHECK-LABEL: test_masked_8xi16_perm_low_mem_mask7: +; CHECK: # BB#0: +; CHECK-NEXT: movb $87, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpshuflw {{.*#+}} xmm0 {%k1} = mem[0,3,3,1,4,5,6,7] +; CHECK-NEXT: retq + %vec = load <8 x i16>, <8 x i16>* %vp + %shuf = shufflevector <8 x i16> %vec, <8 x i16> undef, <8 x i32> + %res = select <8 x i1> , <8 x i16> %shuf, <8 x i16> %vec2 + ret <8 x i16> %res +} + +define <8 x i16> @test_masked_z_8xi16_perm_low_mem_mask7(<8 x i16>* %vp) { +; CHECK-LABEL: test_masked_z_8xi16_perm_low_mem_mask7: +; CHECK: # BB#0: +; CHECK-NEXT: movb $87, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpshuflw {{.*#+}} xmm0 {%k1} {z} = mem[0,3,3,1,4,5,6,7] +; CHECK-NEXT: retq + %vec = load <8 x i16>, <8 x i16>* %vp + %shuf = shufflevector <8 x i16> %vec, <8 x i16> undef, <8 x i32> + %res = select <8 x i1> , <8 x i16> %shuf, <8 x i16> zeroinitializer + ret <8 x i16> %res +} + +define <16 x i16> @test_16xi16_perm_high_mask0(<16 x i16> %vec) { +; CHECK-LABEL: test_16xi16_perm_high_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: vpshufhw {{.*#+}} ymm0 = ymm0[0,1,2,3,4,4,6,4,8,9,10,11,12,12,14,12] +; CHECK-NEXT: retq + %res = shufflevector <16 x i16> %vec, <16 x i16> undef, <16 x i32> + ret <16 x i16> %res +} +define <16 x i16> @test_masked_16xi16_perm_high_mask0(<16 x i16> %vec, <16 x i16> %vec2) { +; CHECK-LABEL: test_masked_16xi16_perm_high_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: movw $-3495, %ax # imm = 0xF259 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpshufhw {{.*#+}} ymm1 {%k1} = ymm0[0,1,2,3,4,4,6,4,8,9,10,11,12,12,14,12] +; CHECK-NEXT: vmovdqa %ymm1, %ymm0 +; CHECK-NEXT: retq + %shuf = shufflevector <16 x i16> %vec, <16 x i16> undef, <16 x i32> + %res = select <16 x i1> , <16 x i16> %shuf, <16 x i16> %vec2 + ret <16 x i16> %res +} + +define <16 x i16> @test_masked_z_16xi16_perm_high_mask0(<16 x i16> %vec) { +; CHECK-LABEL: test_masked_z_16xi16_perm_high_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: movw $-3495, %ax # imm = 0xF259 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpshufhw {{.*#+}} ymm0 {%k1} {z} = ymm0[0,1,2,3,4,4,6,4,8,9,10,11,12,12,14,12] +; CHECK-NEXT: retq + %shuf = shufflevector <16 x i16> %vec, <16 x i16> undef, <16 x i32> + %res = select <16 x i1> , <16 x i16> %shuf, <16 x i16> zeroinitializer + ret <16 x i16> %res +} +define <16 x i16> @test_masked_16xi16_perm_low_mask1(<16 x i16> %vec, <16 x i16> %vec2) { +; CHECK-LABEL: test_masked_16xi16_perm_low_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: movw $-11903, %ax # imm = 0xD181 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpshuflw {{.*#+}} ymm1 {%k1} = ymm0[0,2,3,2,4,5,6,7,8,10,11,10,12,13,14,15] +; CHECK-NEXT: vmovdqa %ymm1, %ymm0 +; CHECK-NEXT: retq + %shuf = shufflevector <16 x i16> %vec, <16 x i16> undef, <16 x i32> + %res = select <16 x i1> , <16 x i16> %shuf, <16 x i16> %vec2 + ret <16 x i16> %res +} + +define <16 x i16> @test_masked_z_16xi16_perm_low_mask1(<16 x i16> %vec) { +; CHECK-LABEL: test_masked_z_16xi16_perm_low_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: movw $-11903, %ax # imm = 0xD181 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpshuflw {{.*#+}} ymm0 {%k1} {z} = ymm0[0,2,3,2,4,5,6,7,8,10,11,10,12,13,14,15] +; CHECK-NEXT: retq + %shuf = shufflevector <16 x i16> %vec, <16 x i16> undef, <16 x i32> + %res = select <16 x i1> , <16 x i16> %shuf, <16 x i16> zeroinitializer + ret <16 x i16> %res +} +define <16 x i16> @test_masked_16xi16_perm_high_mask2(<16 x i16> %vec, <16 x i16> %vec2) { +; CHECK-LABEL: test_masked_16xi16_perm_high_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: movw $-14510, %ax # imm = 0xC752 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpshufhw {{.*#+}} ymm1 {%k1} = ymm0[0,1,2,3,7,5,5,5,8,9,10,11,15,13,13,13] +; CHECK-NEXT: vmovdqa %ymm1, %ymm0 +; CHECK-NEXT: retq + %shuf = shufflevector <16 x i16> %vec, <16 x i16> undef, <16 x i32> + %res = select <16 x i1> , <16 x i16> %shuf, <16 x i16> %vec2 + ret <16 x i16> %res +} + +define <16 x i16> @test_masked_z_16xi16_perm_high_mask2(<16 x i16> %vec) { +; CHECK-LABEL: test_masked_z_16xi16_perm_high_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: movw $-14510, %ax # imm = 0xC752 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpshufhw {{.*#+}} ymm0 {%k1} {z} = ymm0[0,1,2,3,7,5,5,5,8,9,10,11,15,13,13,13] +; CHECK-NEXT: retq + %shuf = shufflevector <16 x i16> %vec, <16 x i16> undef, <16 x i32> + %res = select <16 x i1> , <16 x i16> %shuf, <16 x i16> zeroinitializer + ret <16 x i16> %res +} +define <16 x i16> @test_16xi16_perm_low_mask3(<16 x i16> %vec) { +; CHECK-LABEL: test_16xi16_perm_low_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: vpshuflw {{.*#+}} ymm0 = ymm0[3,2,3,2,4,5,6,7,11,10,11,10,12,13,14,15] +; CHECK-NEXT: retq + %res = shufflevector <16 x i16> %vec, <16 x i16> undef, <16 x i32> + ret <16 x i16> %res +} +define <16 x i16> @test_masked_16xi16_perm_low_mask3(<16 x i16> %vec, <16 x i16> %vec2) { +; CHECK-LABEL: test_masked_16xi16_perm_low_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: movw $-16563, %ax # imm = 0xBF4D +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpshuflw {{.*#+}} ymm1 {%k1} = ymm0[3,2,3,2,4,5,6,7,11,10,11,10,12,13,14,15] +; CHECK-NEXT: vmovdqa %ymm1, %ymm0 +; CHECK-NEXT: retq + %shuf = shufflevector <16 x i16> %vec, <16 x i16> undef, <16 x i32> + %res = select <16 x i1> , <16 x i16> %shuf, <16 x i16> %vec2 + ret <16 x i16> %res +} + +define <16 x i16> @test_masked_z_16xi16_perm_low_mask3(<16 x i16> %vec) { +; CHECK-LABEL: test_masked_z_16xi16_perm_low_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: movw $-16563, %ax # imm = 0xBF4D +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpshuflw {{.*#+}} ymm0 {%k1} {z} = ymm0[3,2,3,2,4,5,6,7,11,10,11,10,12,13,14,15] +; CHECK-NEXT: retq + %shuf = shufflevector <16 x i16> %vec, <16 x i16> undef, <16 x i32> + %res = select <16 x i1> , <16 x i16> %shuf, <16 x i16> zeroinitializer + ret <16 x i16> %res +} +define <16 x i16> @test_masked_16xi16_perm_high_mask4(<16 x i16> %vec, <16 x i16> %vec2) { +; CHECK-LABEL: test_masked_16xi16_perm_high_mask4: +; CHECK: # BB#0: +; CHECK-NEXT: movw $12298, %ax # imm = 0x300A +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpshufhw {{.*#+}} ymm1 {%k1} = ymm0[0,1,2,3,6,7,4,7,8,9,10,11,14,15,12,15] +; CHECK-NEXT: vmovdqa %ymm1, %ymm0 +; CHECK-NEXT: retq + %shuf = shufflevector <16 x i16> %vec, <16 x i16> undef, <16 x i32> + %res = select <16 x i1> , <16 x i16> %shuf, <16 x i16> %vec2 + ret <16 x i16> %res +} + +define <16 x i16> @test_masked_z_16xi16_perm_high_mask4(<16 x i16> %vec) { +; CHECK-LABEL: test_masked_z_16xi16_perm_high_mask4: +; CHECK: # BB#0: +; CHECK-NEXT: movw $12298, %ax # imm = 0x300A +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpshufhw {{.*#+}} ymm0 {%k1} {z} = ymm0[0,1,2,3,6,7,4,7,8,9,10,11,14,15,12,15] +; CHECK-NEXT: retq + %shuf = shufflevector <16 x i16> %vec, <16 x i16> undef, <16 x i32> + %res = select <16 x i1> , <16 x i16> %shuf, <16 x i16> zeroinitializer + ret <16 x i16> %res +} +define <16 x i16> @test_masked_16xi16_perm_low_mask5(<16 x i16> %vec, <16 x i16> %vec2) { +; CHECK-LABEL: test_masked_16xi16_perm_low_mask5: +; CHECK: # BB#0: +; CHECK-NEXT: movw $-29565, %ax # imm = 0x8C83 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpshuflw {{.*#+}} ymm1 {%k1} = ymm0[3,3,3,0,4,5,6,7,11,11,11,8,12,13,14,15] +; CHECK-NEXT: vmovdqa %ymm1, %ymm0 +; CHECK-NEXT: retq + %shuf = shufflevector <16 x i16> %vec, <16 x i16> undef, <16 x i32> + %res = select <16 x i1> , <16 x i16> %shuf, <16 x i16> %vec2 + ret <16 x i16> %res +} + +define <16 x i16> @test_masked_z_16xi16_perm_low_mask5(<16 x i16> %vec) { +; CHECK-LABEL: test_masked_z_16xi16_perm_low_mask5: +; CHECK: # BB#0: +; CHECK-NEXT: movw $-29565, %ax # imm = 0x8C83 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpshuflw {{.*#+}} ymm0 {%k1} {z} = ymm0[3,3,3,0,4,5,6,7,11,11,11,8,12,13,14,15] +; CHECK-NEXT: retq + %shuf = shufflevector <16 x i16> %vec, <16 x i16> undef, <16 x i32> + %res = select <16 x i1> , <16 x i16> %shuf, <16 x i16> zeroinitializer + ret <16 x i16> %res +} +define <16 x i16> @test_16xi16_perm_high_mask6(<16 x i16> %vec) { +; CHECK-LABEL: test_16xi16_perm_high_mask6: +; CHECK: # BB#0: +; CHECK-NEXT: vpshufhw {{.*#+}} ymm0 = ymm0[0,1,2,3,6,7,6,5,8,9,10,11,14,15,14,13] +; CHECK-NEXT: retq + %res = shufflevector <16 x i16> %vec, <16 x i16> undef, <16 x i32> + ret <16 x i16> %res +} +define <16 x i16> @test_masked_16xi16_perm_high_mask6(<16 x i16> %vec, <16 x i16> %vec2) { +; CHECK-LABEL: test_masked_16xi16_perm_high_mask6: +; CHECK: # BB#0: +; CHECK-NEXT: movw $27779, %ax # imm = 0x6C83 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpshufhw {{.*#+}} ymm1 {%k1} = ymm0[0,1,2,3,6,7,6,5,8,9,10,11,14,15,14,13] +; CHECK-NEXT: vmovdqa %ymm1, %ymm0 +; CHECK-NEXT: retq + %shuf = shufflevector <16 x i16> %vec, <16 x i16> undef, <16 x i32> + %res = select <16 x i1> , <16 x i16> %shuf, <16 x i16> %vec2 + ret <16 x i16> %res +} + +define <16 x i16> @test_masked_z_16xi16_perm_high_mask6(<16 x i16> %vec) { +; CHECK-LABEL: test_masked_z_16xi16_perm_high_mask6: +; CHECK: # BB#0: +; CHECK-NEXT: movw $27779, %ax # imm = 0x6C83 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpshufhw {{.*#+}} ymm0 {%k1} {z} = ymm0[0,1,2,3,6,7,6,5,8,9,10,11,14,15,14,13] +; CHECK-NEXT: retq + %shuf = shufflevector <16 x i16> %vec, <16 x i16> undef, <16 x i32> + %res = select <16 x i1> , <16 x i16> %shuf, <16 x i16> zeroinitializer + ret <16 x i16> %res +} +define <16 x i16> @test_masked_16xi16_perm_low_mask7(<16 x i16> %vec, <16 x i16> %vec2) { +; CHECK-LABEL: test_masked_16xi16_perm_low_mask7: +; CHECK: # BB#0: +; CHECK-NEXT: movw $-3292, %ax # imm = 0xF324 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpshuflw {{.*#+}} ymm1 {%k1} = ymm0[3,2,1,2,4,5,6,7,11,10,9,10,12,13,14,15] +; CHECK-NEXT: vmovdqa %ymm1, %ymm0 +; CHECK-NEXT: retq + %shuf = shufflevector <16 x i16> %vec, <16 x i16> undef, <16 x i32> + %res = select <16 x i1> , <16 x i16> %shuf, <16 x i16> %vec2 + ret <16 x i16> %res +} + +define <16 x i16> @test_masked_z_16xi16_perm_low_mask7(<16 x i16> %vec) { +; CHECK-LABEL: test_masked_z_16xi16_perm_low_mask7: +; CHECK: # BB#0: +; CHECK-NEXT: movw $-3292, %ax # imm = 0xF324 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpshuflw {{.*#+}} ymm0 {%k1} {z} = ymm0[3,2,1,2,4,5,6,7,11,10,9,10,12,13,14,15] +; CHECK-NEXT: retq + %shuf = shufflevector <16 x i16> %vec, <16 x i16> undef, <16 x i32> + %res = select <16 x i1> , <16 x i16> %shuf, <16 x i16> zeroinitializer + ret <16 x i16> %res +} +define <16 x i16> @test_16xi16_perm_high_mem_mask0(<16 x i16>* %vp) { +; CHECK-LABEL: test_16xi16_perm_high_mem_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: vpshufhw {{.*#+}} ymm0 = mem[0,1,2,3,5,6,4,7,8,9,10,11,13,14,12,15] +; CHECK-NEXT: retq + %vec = load <16 x i16>, <16 x i16>* %vp + %res = shufflevector <16 x i16> %vec, <16 x i16> undef, <16 x i32> + ret <16 x i16> %res +} +define <16 x i16> @test_masked_16xi16_perm_high_mem_mask0(<16 x i16>* %vp, <16 x i16> %vec2) { +; CHECK-LABEL: test_masked_16xi16_perm_high_mem_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: movw $-12838, %ax # imm = 0xCDDA +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpshufhw {{.*#+}} ymm0 {%k1} = mem[0,1,2,3,5,6,4,7,8,9,10,11,13,14,12,15] +; CHECK-NEXT: retq + %vec = load <16 x i16>, <16 x i16>* %vp + %shuf = shufflevector <16 x i16> %vec, <16 x i16> undef, <16 x i32> + %res = select <16 x i1> , <16 x i16> %shuf, <16 x i16> %vec2 + ret <16 x i16> %res +} + +define <16 x i16> @test_masked_z_16xi16_perm_high_mem_mask0(<16 x i16>* %vp) { +; CHECK-LABEL: test_masked_z_16xi16_perm_high_mem_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: movw $-12838, %ax # imm = 0xCDDA +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpshufhw {{.*#+}} ymm0 {%k1} {z} = mem[0,1,2,3,5,6,4,7,8,9,10,11,13,14,12,15] +; CHECK-NEXT: retq + %vec = load <16 x i16>, <16 x i16>* %vp + %shuf = shufflevector <16 x i16> %vec, <16 x i16> undef, <16 x i32> + %res = select <16 x i1> , <16 x i16> %shuf, <16 x i16> zeroinitializer + ret <16 x i16> %res +} + +define <16 x i16> @test_masked_16xi16_perm_low_mem_mask1(<16 x i16>* %vp, <16 x i16> %vec2) { +; CHECK-LABEL: test_masked_16xi16_perm_low_mem_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: movw $14962, %ax # imm = 0x3A72 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpshuflw {{.*#+}} ymm0 {%k1} = mem[1,3,3,0,4,5,6,7,9,11,11,8,12,13,14,15] +; CHECK-NEXT: retq + %vec = load <16 x i16>, <16 x i16>* %vp + %shuf = shufflevector <16 x i16> %vec, <16 x i16> undef, <16 x i32> + %res = select <16 x i1> , <16 x i16> %shuf, <16 x i16> %vec2 + ret <16 x i16> %res +} + +define <16 x i16> @test_masked_z_16xi16_perm_low_mem_mask1(<16 x i16>* %vp) { +; CHECK-LABEL: test_masked_z_16xi16_perm_low_mem_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: movw $14962, %ax # imm = 0x3A72 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpshuflw {{.*#+}} ymm0 {%k1} {z} = mem[1,3,3,0,4,5,6,7,9,11,11,8,12,13,14,15] +; CHECK-NEXT: retq + %vec = load <16 x i16>, <16 x i16>* %vp + %shuf = shufflevector <16 x i16> %vec, <16 x i16> undef, <16 x i32> + %res = select <16 x i1> , <16 x i16> %shuf, <16 x i16> zeroinitializer + ret <16 x i16> %res +} + +define <16 x i16> @test_masked_16xi16_perm_high_mem_mask2(<16 x i16>* %vp, <16 x i16> %vec2) { +; CHECK-LABEL: test_masked_16xi16_perm_high_mem_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: movw $1029, %ax # imm = 0x405 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpshufhw {{.*#+}} ymm0 {%k1} = mem[0,1,2,3,5,6,5,6,8,9,10,11,13,14,13,14] +; CHECK-NEXT: retq + %vec = load <16 x i16>, <16 x i16>* %vp + %shuf = shufflevector <16 x i16> %vec, <16 x i16> undef, <16 x i32> + %res = select <16 x i1> , <16 x i16> %shuf, <16 x i16> %vec2 + ret <16 x i16> %res +} + +define <16 x i16> @test_masked_z_16xi16_perm_high_mem_mask2(<16 x i16>* %vp) { +; CHECK-LABEL: test_masked_z_16xi16_perm_high_mem_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: movw $1029, %ax # imm = 0x405 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpshufhw {{.*#+}} ymm0 {%k1} {z} = mem[0,1,2,3,5,6,5,6,8,9,10,11,13,14,13,14] +; CHECK-NEXT: retq + %vec = load <16 x i16>, <16 x i16>* %vp + %shuf = shufflevector <16 x i16> %vec, <16 x i16> undef, <16 x i32> + %res = select <16 x i1> , <16 x i16> %shuf, <16 x i16> zeroinitializer + ret <16 x i16> %res +} + +define <16 x i16> @test_16xi16_perm_low_mem_mask3(<16 x i16>* %vp) { +; CHECK-LABEL: test_16xi16_perm_low_mem_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: vpshuflw {{.*#+}} ymm0 = mem[3,2,3,0,4,5,6,7,11,10,11,8,12,13,14,15] +; CHECK-NEXT: retq + %vec = load <16 x i16>, <16 x i16>* %vp + %res = shufflevector <16 x i16> %vec, <16 x i16> undef, <16 x i32> + ret <16 x i16> %res +} +define <16 x i16> @test_masked_16xi16_perm_low_mem_mask3(<16 x i16>* %vp, <16 x i16> %vec2) { +; CHECK-LABEL: test_masked_16xi16_perm_low_mem_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: movw $-30862, %ax # imm = 0x8772 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpshuflw {{.*#+}} ymm0 {%k1} = mem[3,2,3,0,4,5,6,7,11,10,11,8,12,13,14,15] +; CHECK-NEXT: retq + %vec = load <16 x i16>, <16 x i16>* %vp + %shuf = shufflevector <16 x i16> %vec, <16 x i16> undef, <16 x i32> + %res = select <16 x i1> , <16 x i16> %shuf, <16 x i16> %vec2 + ret <16 x i16> %res +} + +define <16 x i16> @test_masked_z_16xi16_perm_low_mem_mask3(<16 x i16>* %vp) { +; CHECK-LABEL: test_masked_z_16xi16_perm_low_mem_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: movw $-30862, %ax # imm = 0x8772 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpshuflw {{.*#+}} ymm0 {%k1} {z} = mem[3,2,3,0,4,5,6,7,11,10,11,8,12,13,14,15] +; CHECK-NEXT: retq + %vec = load <16 x i16>, <16 x i16>* %vp + %shuf = shufflevector <16 x i16> %vec, <16 x i16> undef, <16 x i32> + %res = select <16 x i1> , <16 x i16> %shuf, <16 x i16> zeroinitializer + ret <16 x i16> %res +} + +define <16 x i16> @test_masked_16xi16_perm_high_mem_mask4(<16 x i16>* %vp, <16 x i16> %vec2) { +; CHECK-LABEL: test_masked_16xi16_perm_high_mem_mask4: +; CHECK: # BB#0: +; CHECK-NEXT: movw $-3845, %ax # imm = 0xF0FB +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpshufhw {{.*#+}} ymm0 {%k1} = mem[0,1,2,3,7,7,6,7,8,9,10,11,15,15,14,15] +; CHECK-NEXT: retq + %vec = load <16 x i16>, <16 x i16>* %vp + %shuf = shufflevector <16 x i16> %vec, <16 x i16> undef, <16 x i32> + %res = select <16 x i1> , <16 x i16> %shuf, <16 x i16> %vec2 + ret <16 x i16> %res +} + +define <16 x i16> @test_masked_z_16xi16_perm_high_mem_mask4(<16 x i16>* %vp) { +; CHECK-LABEL: test_masked_z_16xi16_perm_high_mem_mask4: +; CHECK: # BB#0: +; CHECK-NEXT: movw $-3845, %ax # imm = 0xF0FB +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpshufhw {{.*#+}} ymm0 {%k1} {z} = mem[0,1,2,3,7,7,6,7,8,9,10,11,15,15,14,15] +; CHECK-NEXT: retq + %vec = load <16 x i16>, <16 x i16>* %vp + %shuf = shufflevector <16 x i16> %vec, <16 x i16> undef, <16 x i32> + %res = select <16 x i1> , <16 x i16> %shuf, <16 x i16> zeroinitializer + ret <16 x i16> %res +} + +define <16 x i16> @test_masked_16xi16_perm_low_mem_mask5(<16 x i16>* %vp, <16 x i16> %vec2) { +; CHECK-LABEL: test_masked_16xi16_perm_low_mem_mask5: +; CHECK: # BB#0: +; CHECK-NEXT: movw $-20955, %ax # imm = 0xAE25 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpshuflw {{.*#+}} ymm0 {%k1} = mem[1,3,3,2,4,5,6,7,9,11,11,10,12,13,14,15] +; CHECK-NEXT: retq + %vec = load <16 x i16>, <16 x i16>* %vp + %shuf = shufflevector <16 x i16> %vec, <16 x i16> undef, <16 x i32> + %res = select <16 x i1> , <16 x i16> %shuf, <16 x i16> %vec2 + ret <16 x i16> %res +} + +define <16 x i16> @test_masked_z_16xi16_perm_low_mem_mask5(<16 x i16>* %vp) { +; CHECK-LABEL: test_masked_z_16xi16_perm_low_mem_mask5: +; CHECK: # BB#0: +; CHECK-NEXT: movw $-20955, %ax # imm = 0xAE25 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpshuflw {{.*#+}} ymm0 {%k1} {z} = mem[1,3,3,2,4,5,6,7,9,11,11,10,12,13,14,15] +; CHECK-NEXT: retq + %vec = load <16 x i16>, <16 x i16>* %vp + %shuf = shufflevector <16 x i16> %vec, <16 x i16> undef, <16 x i32> + %res = select <16 x i1> , <16 x i16> %shuf, <16 x i16> zeroinitializer + ret <16 x i16> %res +} + +define <16 x i16> @test_16xi16_perm_high_mem_mask6(<16 x i16>* %vp) { +; CHECK-LABEL: test_16xi16_perm_high_mem_mask6: +; CHECK: # BB#0: +; CHECK-NEXT: vpshufhw {{.*#+}} ymm0 = mem[0,1,2,3,4,4,4,5,8,9,10,11,12,12,12,13] +; CHECK-NEXT: retq + %vec = load <16 x i16>, <16 x i16>* %vp + %res = shufflevector <16 x i16> %vec, <16 x i16> undef, <16 x i32> + ret <16 x i16> %res +} +define <16 x i16> @test_masked_16xi16_perm_high_mem_mask6(<16 x i16>* %vp, <16 x i16> %vec2) { +; CHECK-LABEL: test_masked_16xi16_perm_high_mem_mask6: +; CHECK: # BB#0: +; CHECK-NEXT: movw $-24190, %ax # imm = 0xA182 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpshufhw {{.*#+}} ymm0 {%k1} = mem[0,1,2,3,4,4,4,5,8,9,10,11,12,12,12,13] +; CHECK-NEXT: retq + %vec = load <16 x i16>, <16 x i16>* %vp + %shuf = shufflevector <16 x i16> %vec, <16 x i16> undef, <16 x i32> + %res = select <16 x i1> , <16 x i16> %shuf, <16 x i16> %vec2 + ret <16 x i16> %res +} + +define <16 x i16> @test_masked_z_16xi16_perm_high_mem_mask6(<16 x i16>* %vp) { +; CHECK-LABEL: test_masked_z_16xi16_perm_high_mem_mask6: +; CHECK: # BB#0: +; CHECK-NEXT: movw $-24190, %ax # imm = 0xA182 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpshufhw {{.*#+}} ymm0 {%k1} {z} = mem[0,1,2,3,4,4,4,5,8,9,10,11,12,12,12,13] +; CHECK-NEXT: retq + %vec = load <16 x i16>, <16 x i16>* %vp + %shuf = shufflevector <16 x i16> %vec, <16 x i16> undef, <16 x i32> + %res = select <16 x i1> , <16 x i16> %shuf, <16 x i16> zeroinitializer + ret <16 x i16> %res +} + +define <16 x i16> @test_masked_16xi16_perm_low_mem_mask7(<16 x i16>* %vp, <16 x i16> %vec2) { +; CHECK-LABEL: test_masked_16xi16_perm_low_mem_mask7: +; CHECK: # BB#0: +; CHECK-NEXT: movw $-24392, %ax # imm = 0xA0B8 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpshuflw {{.*#+}} ymm0 {%k1} = mem[3,1,3,2,4,5,6,7,11,9,11,10,12,13,14,15] +; CHECK-NEXT: retq + %vec = load <16 x i16>, <16 x i16>* %vp + %shuf = shufflevector <16 x i16> %vec, <16 x i16> undef, <16 x i32> + %res = select <16 x i1> , <16 x i16> %shuf, <16 x i16> %vec2 + ret <16 x i16> %res +} + +define <16 x i16> @test_masked_z_16xi16_perm_low_mem_mask7(<16 x i16>* %vp) { +; CHECK-LABEL: test_masked_z_16xi16_perm_low_mem_mask7: +; CHECK: # BB#0: +; CHECK-NEXT: movw $-24392, %ax # imm = 0xA0B8 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpshuflw {{.*#+}} ymm0 {%k1} {z} = mem[3,1,3,2,4,5,6,7,11,9,11,10,12,13,14,15] +; CHECK-NEXT: retq + %vec = load <16 x i16>, <16 x i16>* %vp + %shuf = shufflevector <16 x i16> %vec, <16 x i16> undef, <16 x i32> + %res = select <16 x i1> , <16 x i16> %shuf, <16 x i16> zeroinitializer + ret <16 x i16> %res +} + +define <32 x i16> @test_32xi16_perm_high_mask0(<32 x i16> %vec) { +; CHECK-LABEL: test_32xi16_perm_high_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: vpshufhw {{.*#+}} zmm0 = zmm0[0,1,2,3,4,5,6,4,8,9,10,11,12,13,14,12,16,17,18,19,20,21,22,20,24,25,26,27,28,29,30,28] +; CHECK-NEXT: retq + %res = shufflevector <32 x i16> %vec, <32 x i16> undef, <32 x i32> + ret <32 x i16> %res +} +define <32 x i16> @test_masked_32xi16_perm_high_mask0(<32 x i16> %vec, <32 x i16> %vec2) { +; CHECK-LABEL: test_masked_32xi16_perm_high_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: movl $1671867126, %eax # imm = 0x63A6AAF6 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpshufhw {{.*#+}} zmm1 {%k1} = zmm0[0,1,2,3,4,5,6,4,8,9,10,11,12,13,14,12,16,17,18,19,20,21,22,20,24,25,26,27,28,29,30,28] +; CHECK-NEXT: vmovdqa64 %zmm1, %zmm0 +; CHECK-NEXT: retq + %shuf = shufflevector <32 x i16> %vec, <32 x i16> undef, <32 x i32> + %res = select <32 x i1> , <32 x i16> %shuf, <32 x i16> %vec2 + ret <32 x i16> %res +} + +define <32 x i16> @test_masked_z_32xi16_perm_high_mask0(<32 x i16> %vec) { +; CHECK-LABEL: test_masked_z_32xi16_perm_high_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: movl $1671867126, %eax # imm = 0x63A6AAF6 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpshufhw {{.*#+}} zmm0 {%k1} {z} = zmm0[0,1,2,3,4,5,6,4,8,9,10,11,12,13,14,12,16,17,18,19,20,21,22,20,24,25,26,27,28,29,30,28] +; CHECK-NEXT: retq + %shuf = shufflevector <32 x i16> %vec, <32 x i16> undef, <32 x i32> + %res = select <32 x i1> , <32 x i16> %shuf, <32 x i16> zeroinitializer + ret <32 x i16> %res +} +define <32 x i16> @test_masked_32xi16_perm_low_mask1(<32 x i16> %vec, <32 x i16> %vec2) { +; CHECK-LABEL: test_masked_32xi16_perm_low_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: movl $-514766311, %eax # imm = 0xE1514A19 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpshuflw {{.*#+}} zmm1 {%k1} = zmm0[2,1,0,0,4,5,6,7,10,9,8,8,12,13,14,15,18,17,16,16,20,21,22,23,26,25,24,24,28,29,30,31] +; CHECK-NEXT: vmovdqa64 %zmm1, %zmm0 +; CHECK-NEXT: retq + %shuf = shufflevector <32 x i16> %vec, <32 x i16> undef, <32 x i32> + %res = select <32 x i1> , <32 x i16> %shuf, <32 x i16> %vec2 + ret <32 x i16> %res +} + +define <32 x i16> @test_masked_z_32xi16_perm_low_mask1(<32 x i16> %vec) { +; CHECK-LABEL: test_masked_z_32xi16_perm_low_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: movl $-514766311, %eax # imm = 0xE1514A19 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpshuflw {{.*#+}} zmm0 {%k1} {z} = zmm0[2,1,0,0,4,5,6,7,10,9,8,8,12,13,14,15,18,17,16,16,20,21,22,23,26,25,24,24,28,29,30,31] +; CHECK-NEXT: retq + %shuf = shufflevector <32 x i16> %vec, <32 x i16> undef, <32 x i32> + %res = select <32 x i1> , <32 x i16> %shuf, <32 x i16> zeroinitializer + ret <32 x i16> %res +} +define <32 x i16> @test_masked_32xi16_perm_high_mask2(<32 x i16> %vec, <32 x i16> %vec2) { +; CHECK-LABEL: test_masked_32xi16_perm_high_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: movl $165000787, %eax # imm = 0x9D5B653 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpshufhw {{.*#+}} zmm1 {%k1} = zmm0[0,1,2,3,4,6,4,7,8,9,10,11,12,14,12,15,16,17,18,19,20,22,20,23,24,25,26,27,28,30,28,31] +; CHECK-NEXT: vmovdqa64 %zmm1, %zmm0 +; CHECK-NEXT: retq + %shuf = shufflevector <32 x i16> %vec, <32 x i16> undef, <32 x i32> + %res = select <32 x i1> , <32 x i16> %shuf, <32 x i16> %vec2 + ret <32 x i16> %res +} + +define <32 x i16> @test_masked_z_32xi16_perm_high_mask2(<32 x i16> %vec) { +; CHECK-LABEL: test_masked_z_32xi16_perm_high_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: movl $165000787, %eax # imm = 0x9D5B653 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpshufhw {{.*#+}} zmm0 {%k1} {z} = zmm0[0,1,2,3,4,6,4,7,8,9,10,11,12,14,12,15,16,17,18,19,20,22,20,23,24,25,26,27,28,30,28,31] +; CHECK-NEXT: retq + %shuf = shufflevector <32 x i16> %vec, <32 x i16> undef, <32 x i32> + %res = select <32 x i1> , <32 x i16> %shuf, <32 x i16> zeroinitializer + ret <32 x i16> %res +} +define <32 x i16> @test_32xi16_perm_low_mask3(<32 x i16> %vec) { +; CHECK-LABEL: test_32xi16_perm_low_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: vpshuflw {{.*#+}} zmm0 = zmm0[3,3,1,3,4,5,6,7,11,11,9,11,12,13,14,15,19,19,17,19,20,21,22,23,27,27,25,27,28,29,30,31] +; CHECK-NEXT: retq + %res = shufflevector <32 x i16> %vec, <32 x i16> undef, <32 x i32> + ret <32 x i16> %res +} +define <32 x i16> @test_masked_32xi16_perm_low_mask3(<32 x i16> %vec, <32 x i16> %vec2) { +; CHECK-LABEL: test_masked_32xi16_perm_low_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: movl $1998504075, %eax # imm = 0x771EC08B +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpshuflw {{.*#+}} zmm1 {%k1} = zmm0[3,3,1,3,4,5,6,7,11,11,9,11,12,13,14,15,19,19,17,19,20,21,22,23,27,27,25,27,28,29,30,31] +; CHECK-NEXT: vmovdqa64 %zmm1, %zmm0 +; CHECK-NEXT: retq + %shuf = shufflevector <32 x i16> %vec, <32 x i16> undef, <32 x i32> + %res = select <32 x i1> , <32 x i16> %shuf, <32 x i16> %vec2 + ret <32 x i16> %res +} + +define <32 x i16> @test_masked_z_32xi16_perm_low_mask3(<32 x i16> %vec) { +; CHECK-LABEL: test_masked_z_32xi16_perm_low_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: movl $1998504075, %eax # imm = 0x771EC08B +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpshuflw {{.*#+}} zmm0 {%k1} {z} = zmm0[3,3,1,3,4,5,6,7,11,11,9,11,12,13,14,15,19,19,17,19,20,21,22,23,27,27,25,27,28,29,30,31] +; CHECK-NEXT: retq + %shuf = shufflevector <32 x i16> %vec, <32 x i16> undef, <32 x i32> + %res = select <32 x i1> , <32 x i16> %shuf, <32 x i16> zeroinitializer + ret <32 x i16> %res +} +define <32 x i16> @test_masked_32xi16_perm_high_mask4(<32 x i16> %vec, <32 x i16> %vec2) { +; CHECK-LABEL: test_masked_32xi16_perm_high_mask4: +; CHECK: # BB#0: +; CHECK-NEXT: movl $-730778639, %eax # imm = 0xD47133F1 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpshufhw {{.*#+}} zmm1 {%k1} = zmm0[0,1,2,3,7,7,5,6,8,9,10,11,15,15,13,14,16,17,18,19,23,23,21,22,24,25,26,27,31,31,29,30] +; CHECK-NEXT: vmovdqa64 %zmm1, %zmm0 +; CHECK-NEXT: retq + %shuf = shufflevector <32 x i16> %vec, <32 x i16> undef, <32 x i32> + %res = select <32 x i1> , <32 x i16> %shuf, <32 x i16> %vec2 + ret <32 x i16> %res +} + +define <32 x i16> @test_masked_z_32xi16_perm_high_mask4(<32 x i16> %vec) { +; CHECK-LABEL: test_masked_z_32xi16_perm_high_mask4: +; CHECK: # BB#0: +; CHECK-NEXT: movl $-730778639, %eax # imm = 0xD47133F1 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpshufhw {{.*#+}} zmm0 {%k1} {z} = zmm0[0,1,2,3,7,7,5,6,8,9,10,11,15,15,13,14,16,17,18,19,23,23,21,22,24,25,26,27,31,31,29,30] +; CHECK-NEXT: retq + %shuf = shufflevector <32 x i16> %vec, <32 x i16> undef, <32 x i32> + %res = select <32 x i1> , <32 x i16> %shuf, <32 x i16> zeroinitializer + ret <32 x i16> %res +} +define <32 x i16> @test_masked_32xi16_perm_low_mask5(<32 x i16> %vec, <32 x i16> %vec2) { +; CHECK-LABEL: test_masked_32xi16_perm_low_mask5: +; CHECK: # BB#0: +; CHECK-NEXT: movl $544659762, %eax # imm = 0x2076D932 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpshuflw {{.*#+}} zmm1 {%k1} = zmm0[2,1,1,0,4,5,6,7,10,9,9,8,12,13,14,15,18,17,17,16,20,21,22,23,26,25,25,24,28,29,30,31] +; CHECK-NEXT: vmovdqa64 %zmm1, %zmm0 +; CHECK-NEXT: retq + %shuf = shufflevector <32 x i16> %vec, <32 x i16> undef, <32 x i32> + %res = select <32 x i1> , <32 x i16> %shuf, <32 x i16> %vec2 + ret <32 x i16> %res +} + +define <32 x i16> @test_masked_z_32xi16_perm_low_mask5(<32 x i16> %vec) { +; CHECK-LABEL: test_masked_z_32xi16_perm_low_mask5: +; CHECK: # BB#0: +; CHECK-NEXT: movl $544659762, %eax # imm = 0x2076D932 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpshuflw {{.*#+}} zmm0 {%k1} {z} = zmm0[2,1,1,0,4,5,6,7,10,9,9,8,12,13,14,15,18,17,17,16,20,21,22,23,26,25,25,24,28,29,30,31] +; CHECK-NEXT: retq + %shuf = shufflevector <32 x i16> %vec, <32 x i16> undef, <32 x i32> + %res = select <32 x i1> , <32 x i16> %shuf, <32 x i16> zeroinitializer + ret <32 x i16> %res +} +define <32 x i16> @test_32xi16_perm_high_mask6(<32 x i16> %vec) { +; CHECK-LABEL: test_32xi16_perm_high_mask6: +; CHECK: # BB#0: +; CHECK-NEXT: vpshufhw {{.*#+}} zmm0 = zmm0[0,1,2,3,4,4,5,6,8,9,10,11,12,12,13,14,16,17,18,19,20,20,21,22,24,25,26,27,28,28,29,30] +; CHECK-NEXT: retq + %res = shufflevector <32 x i16> %vec, <32 x i16> undef, <32 x i32> + ret <32 x i16> %res +} +define <32 x i16> @test_masked_32xi16_perm_high_mask6(<32 x i16> %vec, <32 x i16> %vec2) { +; CHECK-LABEL: test_masked_32xi16_perm_high_mask6: +; CHECK: # BB#0: +; CHECK-NEXT: movl $-1243446456, %eax # imm = 0xB5E28348 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpshufhw {{.*#+}} zmm1 {%k1} = zmm0[0,1,2,3,4,4,5,6,8,9,10,11,12,12,13,14,16,17,18,19,20,20,21,22,24,25,26,27,28,28,29,30] +; CHECK-NEXT: vmovdqa64 %zmm1, %zmm0 +; CHECK-NEXT: retq + %shuf = shufflevector <32 x i16> %vec, <32 x i16> undef, <32 x i32> + %res = select <32 x i1> , <32 x i16> %shuf, <32 x i16> %vec2 + ret <32 x i16> %res +} + +define <32 x i16> @test_masked_z_32xi16_perm_high_mask6(<32 x i16> %vec) { +; CHECK-LABEL: test_masked_z_32xi16_perm_high_mask6: +; CHECK: # BB#0: +; CHECK-NEXT: movl $-1243446456, %eax # imm = 0xB5E28348 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpshufhw {{.*#+}} zmm0 {%k1} {z} = zmm0[0,1,2,3,4,4,5,6,8,9,10,11,12,12,13,14,16,17,18,19,20,20,21,22,24,25,26,27,28,28,29,30] +; CHECK-NEXT: retq + %shuf = shufflevector <32 x i16> %vec, <32 x i16> undef, <32 x i32> + %res = select <32 x i1> , <32 x i16> %shuf, <32 x i16> zeroinitializer + ret <32 x i16> %res +} +define <32 x i16> @test_masked_32xi16_perm_low_mask7(<32 x i16> %vec, <32 x i16> %vec2) { +; CHECK-LABEL: test_masked_32xi16_perm_low_mask7: +; CHECK: # BB#0: +; CHECK-NEXT: movl $1409246810, %eax # imm = 0x53FF665A +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpshuflw {{.*#+}} zmm1 {%k1} = zmm0[3,0,3,0,4,5,6,7,11,8,11,8,12,13,14,15,19,16,19,16,20,21,22,23,27,24,27,24,28,29,30,31] +; CHECK-NEXT: vmovdqa64 %zmm1, %zmm0 +; CHECK-NEXT: retq + %shuf = shufflevector <32 x i16> %vec, <32 x i16> undef, <32 x i32> + %res = select <32 x i1> , <32 x i16> %shuf, <32 x i16> %vec2 + ret <32 x i16> %res +} + +define <32 x i16> @test_masked_z_32xi16_perm_low_mask7(<32 x i16> %vec) { +; CHECK-LABEL: test_masked_z_32xi16_perm_low_mask7: +; CHECK: # BB#0: +; CHECK-NEXT: movl $1409246810, %eax # imm = 0x53FF665A +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpshuflw {{.*#+}} zmm0 {%k1} {z} = zmm0[3,0,3,0,4,5,6,7,11,8,11,8,12,13,14,15,19,16,19,16,20,21,22,23,27,24,27,24,28,29,30,31] +; CHECK-NEXT: retq + %shuf = shufflevector <32 x i16> %vec, <32 x i16> undef, <32 x i32> + %res = select <32 x i1> , <32 x i16> %shuf, <32 x i16> zeroinitializer + ret <32 x i16> %res +} +define <32 x i16> @test_32xi16_perm_high_mem_mask0(<32 x i16>* %vp) { +; CHECK-LABEL: test_32xi16_perm_high_mem_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: vpshufhw {{.*#+}} zmm0 = mem[0,1,2,3,7,4,5,6,8,9,10,11,15,12,13,14,16,17,18,19,23,20,21,22,24,25,26,27,31,28,29,30] +; CHECK-NEXT: retq + %vec = load <32 x i16>, <32 x i16>* %vp + %res = shufflevector <32 x i16> %vec, <32 x i16> undef, <32 x i32> + ret <32 x i16> %res +} +define <32 x i16> @test_masked_32xi16_perm_high_mem_mask0(<32 x i16>* %vp, <32 x i16> %vec2) { +; CHECK-LABEL: test_masked_32xi16_perm_high_mem_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: movl $-1911488810, %eax # imm = 0x8E10FED6 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpshufhw {{.*#+}} zmm0 {%k1} = mem[0,1,2,3,7,4,5,6,8,9,10,11,15,12,13,14,16,17,18,19,23,20,21,22,24,25,26,27,31,28,29,30] +; CHECK-NEXT: retq + %vec = load <32 x i16>, <32 x i16>* %vp + %shuf = shufflevector <32 x i16> %vec, <32 x i16> undef, <32 x i32> + %res = select <32 x i1> , <32 x i16> %shuf, <32 x i16> %vec2 + ret <32 x i16> %res +} + +define <32 x i16> @test_masked_z_32xi16_perm_high_mem_mask0(<32 x i16>* %vp) { +; CHECK-LABEL: test_masked_z_32xi16_perm_high_mem_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: movl $-1911488810, %eax # imm = 0x8E10FED6 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpshufhw {{.*#+}} zmm0 {%k1} {z} = mem[0,1,2,3,7,4,5,6,8,9,10,11,15,12,13,14,16,17,18,19,23,20,21,22,24,25,26,27,31,28,29,30] +; CHECK-NEXT: retq + %vec = load <32 x i16>, <32 x i16>* %vp + %shuf = shufflevector <32 x i16> %vec, <32 x i16> undef, <32 x i32> + %res = select <32 x i1> , <32 x i16> %shuf, <32 x i16> zeroinitializer + ret <32 x i16> %res +} + +define <32 x i16> @test_masked_32xi16_perm_low_mem_mask1(<32 x i16>* %vp, <32 x i16> %vec2) { +; CHECK-LABEL: test_masked_32xi16_perm_low_mem_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: movl $-1098876619, %eax # imm = 0xBE807935 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpshuflw {{.*#+}} zmm0 {%k1} = mem[1,1,3,3,4,5,6,7,9,9,11,11,12,13,14,15,17,17,19,19,20,21,22,23,25,25,27,27,28,29,30,31] +; CHECK-NEXT: retq + %vec = load <32 x i16>, <32 x i16>* %vp + %shuf = shufflevector <32 x i16> %vec, <32 x i16> undef, <32 x i32> + %res = select <32 x i1> , <32 x i16> %shuf, <32 x i16> %vec2 + ret <32 x i16> %res +} + +define <32 x i16> @test_masked_z_32xi16_perm_low_mem_mask1(<32 x i16>* %vp) { +; CHECK-LABEL: test_masked_z_32xi16_perm_low_mem_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: movl $-1098876619, %eax # imm = 0xBE807935 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpshuflw {{.*#+}} zmm0 {%k1} {z} = mem[1,1,3,3,4,5,6,7,9,9,11,11,12,13,14,15,17,17,19,19,20,21,22,23,25,25,27,27,28,29,30,31] +; CHECK-NEXT: retq + %vec = load <32 x i16>, <32 x i16>* %vp + %shuf = shufflevector <32 x i16> %vec, <32 x i16> undef, <32 x i32> + %res = select <32 x i1> , <32 x i16> %shuf, <32 x i16> zeroinitializer + ret <32 x i16> %res +} + +define <32 x i16> @test_masked_32xi16_perm_high_mem_mask2(<32 x i16>* %vp, <32 x i16> %vec2) { +; CHECK-LABEL: test_masked_32xi16_perm_high_mem_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: movl $-1583892148, %eax # imm = 0xA197B94C +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpshufhw {{.*#+}} zmm0 {%k1} = mem[0,1,2,3,4,7,6,4,8,9,10,11,12,15,14,12,16,17,18,19,20,23,22,20,24,25,26,27,28,31,30,28] +; CHECK-NEXT: retq + %vec = load <32 x i16>, <32 x i16>* %vp + %shuf = shufflevector <32 x i16> %vec, <32 x i16> undef, <32 x i32> + %res = select <32 x i1> , <32 x i16> %shuf, <32 x i16> %vec2 + ret <32 x i16> %res +} + +define <32 x i16> @test_masked_z_32xi16_perm_high_mem_mask2(<32 x i16>* %vp) { +; CHECK-LABEL: test_masked_z_32xi16_perm_high_mem_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: movl $-1583892148, %eax # imm = 0xA197B94C +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpshufhw {{.*#+}} zmm0 {%k1} {z} = mem[0,1,2,3,4,7,6,4,8,9,10,11,12,15,14,12,16,17,18,19,20,23,22,20,24,25,26,27,28,31,30,28] +; CHECK-NEXT: retq + %vec = load <32 x i16>, <32 x i16>* %vp + %shuf = shufflevector <32 x i16> %vec, <32 x i16> undef, <32 x i32> + %res = select <32 x i1> , <32 x i16> %shuf, <32 x i16> zeroinitializer + ret <32 x i16> %res +} + +define <32 x i16> @test_32xi16_perm_low_mem_mask3(<32 x i16>* %vp) { +; CHECK-LABEL: test_32xi16_perm_low_mem_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: vpshuflw {{.*#+}} zmm0 = mem[2,2,0,3,4,5,6,7,10,10,8,11,12,13,14,15,18,18,16,19,20,21,22,23,26,26,24,27,28,29,30,31] +; CHECK-NEXT: retq + %vec = load <32 x i16>, <32 x i16>* %vp + %res = shufflevector <32 x i16> %vec, <32 x i16> undef, <32 x i32> + ret <32 x i16> %res +} +define <32 x i16> @test_masked_32xi16_perm_low_mem_mask3(<32 x i16>* %vp, <32 x i16> %vec2) { +; CHECK-LABEL: test_masked_32xi16_perm_low_mem_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: movl $-216128444, %eax # imm = 0xF31E2444 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpshuflw {{.*#+}} zmm0 {%k1} = mem[2,2,0,3,4,5,6,7,10,10,8,11,12,13,14,15,18,18,16,19,20,21,22,23,26,26,24,27,28,29,30,31] +; CHECK-NEXT: retq + %vec = load <32 x i16>, <32 x i16>* %vp + %shuf = shufflevector <32 x i16> %vec, <32 x i16> undef, <32 x i32> + %res = select <32 x i1> , <32 x i16> %shuf, <32 x i16> %vec2 + ret <32 x i16> %res +} + +define <32 x i16> @test_masked_z_32xi16_perm_low_mem_mask3(<32 x i16>* %vp) { +; CHECK-LABEL: test_masked_z_32xi16_perm_low_mem_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: movl $-216128444, %eax # imm = 0xF31E2444 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpshuflw {{.*#+}} zmm0 {%k1} {z} = mem[2,2,0,3,4,5,6,7,10,10,8,11,12,13,14,15,18,18,16,19,20,21,22,23,26,26,24,27,28,29,30,31] +; CHECK-NEXT: retq + %vec = load <32 x i16>, <32 x i16>* %vp + %shuf = shufflevector <32 x i16> %vec, <32 x i16> undef, <32 x i32> + %res = select <32 x i1> , <32 x i16> %shuf, <32 x i16> zeroinitializer + ret <32 x i16> %res +} + +define <32 x i16> @test_masked_32xi16_perm_high_mem_mask4(<32 x i16>* %vp, <32 x i16> %vec2) { +; CHECK-LABEL: test_masked_32xi16_perm_high_mem_mask4: +; CHECK: # BB#0: +; CHECK-NEXT: movl $1480468153, %eax # imm = 0x583E26B9 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpshufhw {{.*#+}} zmm0 {%k1} = mem[0,1,2,3,7,4,6,5,8,9,10,11,15,12,14,13,16,17,18,19,23,20,22,21,24,25,26,27,31,28,30,29] +; CHECK-NEXT: retq + %vec = load <32 x i16>, <32 x i16>* %vp + %shuf = shufflevector <32 x i16> %vec, <32 x i16> undef, <32 x i32> + %res = select <32 x i1> , <32 x i16> %shuf, <32 x i16> %vec2 + ret <32 x i16> %res +} + +define <32 x i16> @test_masked_z_32xi16_perm_high_mem_mask4(<32 x i16>* %vp) { +; CHECK-LABEL: test_masked_z_32xi16_perm_high_mem_mask4: +; CHECK: # BB#0: +; CHECK-NEXT: movl $1480468153, %eax # imm = 0x583E26B9 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpshufhw {{.*#+}} zmm0 {%k1} {z} = mem[0,1,2,3,7,4,6,5,8,9,10,11,15,12,14,13,16,17,18,19,23,20,22,21,24,25,26,27,31,28,30,29] +; CHECK-NEXT: retq + %vec = load <32 x i16>, <32 x i16>* %vp + %shuf = shufflevector <32 x i16> %vec, <32 x i16> undef, <32 x i32> + %res = select <32 x i1> , <32 x i16> %shuf, <32 x i16> zeroinitializer + ret <32 x i16> %res +} + +define <32 x i16> @test_masked_32xi16_perm_low_mem_mask5(<32 x i16>* %vp, <32 x i16> %vec2) { +; CHECK-LABEL: test_masked_32xi16_perm_low_mem_mask5: +; CHECK: # BB#0: +; CHECK-NEXT: vpshufd {{.*#+}} zmm1 = mem[0,0,2,3,4,4,6,7,8,8,10,11,12,12,14,15] +; CHECK-NEXT: movl $-1778617447, %eax # imm = 0x95FC7399 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vmovdqu16 %zmm1, %zmm0 {%k1} +; CHECK-NEXT: retq + %vec = load <32 x i16>, <32 x i16>* %vp + %shuf = shufflevector <32 x i16> %vec, <32 x i16> undef, <32 x i32> + %res = select <32 x i1> , <32 x i16> %shuf, <32 x i16> %vec2 + ret <32 x i16> %res +} + +define <32 x i16> @test_masked_z_32xi16_perm_low_mem_mask5(<32 x i16>* %vp) { +; CHECK-LABEL: test_masked_z_32xi16_perm_low_mem_mask5: +; CHECK: # BB#0: +; CHECK-NEXT: vpshufd {{.*#+}} zmm0 = mem[0,0,2,3,4,4,6,7,8,8,10,11,12,12,14,15] +; CHECK-NEXT: movl $-1778617447, %eax # imm = 0x95FC7399 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vmovdqu16 %zmm0, %zmm0 {%k1} {z} +; CHECK-NEXT: retq + %vec = load <32 x i16>, <32 x i16>* %vp + %shuf = shufflevector <32 x i16> %vec, <32 x i16> undef, <32 x i32> + %res = select <32 x i1> , <32 x i16> %shuf, <32 x i16> zeroinitializer + ret <32 x i16> %res +} + +define <32 x i16> @test_32xi16_perm_high_mem_mask6(<32 x i16>* %vp) { +; CHECK-LABEL: test_32xi16_perm_high_mem_mask6: +; CHECK: # BB#0: +; CHECK-NEXT: vpshufhw {{.*#+}} zmm0 = mem[0,1,2,3,6,5,6,6,8,9,10,11,14,13,14,14,16,17,18,19,22,21,22,22,24,25,26,27,30,29,30,30] +; CHECK-NEXT: retq + %vec = load <32 x i16>, <32 x i16>* %vp + %res = shufflevector <32 x i16> %vec, <32 x i16> undef, <32 x i32> + ret <32 x i16> %res +} +define <32 x i16> @test_masked_32xi16_perm_high_mem_mask6(<32 x i16>* %vp, <32 x i16> %vec2) { +; CHECK-LABEL: test_masked_32xi16_perm_high_mem_mask6: +; CHECK: # BB#0: +; CHECK-NEXT: movl $355619267, %eax # imm = 0x153251C3 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpshufhw {{.*#+}} zmm0 {%k1} = mem[0,1,2,3,6,5,6,6,8,9,10,11,14,13,14,14,16,17,18,19,22,21,22,22,24,25,26,27,30,29,30,30] +; CHECK-NEXT: retq + %vec = load <32 x i16>, <32 x i16>* %vp + %shuf = shufflevector <32 x i16> %vec, <32 x i16> undef, <32 x i32> + %res = select <32 x i1> , <32 x i16> %shuf, <32 x i16> %vec2 + ret <32 x i16> %res +} + +define <32 x i16> @test_masked_z_32xi16_perm_high_mem_mask6(<32 x i16>* %vp) { +; CHECK-LABEL: test_masked_z_32xi16_perm_high_mem_mask6: +; CHECK: # BB#0: +; CHECK-NEXT: movl $355619267, %eax # imm = 0x153251C3 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpshufhw {{.*#+}} zmm0 {%k1} {z} = mem[0,1,2,3,6,5,6,6,8,9,10,11,14,13,14,14,16,17,18,19,22,21,22,22,24,25,26,27,30,29,30,30] +; CHECK-NEXT: retq + %vec = load <32 x i16>, <32 x i16>* %vp + %shuf = shufflevector <32 x i16> %vec, <32 x i16> undef, <32 x i32> + %res = select <32 x i1> , <32 x i16> %shuf, <32 x i16> zeroinitializer + ret <32 x i16> %res +} + +define <32 x i16> @test_masked_32xi16_perm_low_mem_mask7(<32 x i16>* %vp, <32 x i16> %vec2) { +; CHECK-LABEL: test_masked_32xi16_perm_low_mem_mask7: +; CHECK: # BB#0: +; CHECK-NEXT: movl $-1890659259, %eax # imm = 0x8F4ED445 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpshuflw {{.*#+}} zmm0 {%k1} = mem[3,1,3,0,4,5,6,7,11,9,11,8,12,13,14,15,19,17,19,16,20,21,22,23,27,25,27,24,28,29,30,31] +; CHECK-NEXT: retq + %vec = load <32 x i16>, <32 x i16>* %vp + %shuf = shufflevector <32 x i16> %vec, <32 x i16> undef, <32 x i32> + %res = select <32 x i1> , <32 x i16> %shuf, <32 x i16> %vec2 + ret <32 x i16> %res +} + +define <32 x i16> @test_masked_z_32xi16_perm_low_mem_mask7(<32 x i16>* %vp) { +; CHECK-LABEL: test_masked_z_32xi16_perm_low_mem_mask7: +; CHECK: # BB#0: +; CHECK-NEXT: movl $-1890659259, %eax # imm = 0x8F4ED445 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpshuflw {{.*#+}} zmm0 {%k1} {z} = mem[3,1,3,0,4,5,6,7,11,9,11,8,12,13,14,15,19,17,19,16,20,21,22,23,27,25,27,24,28,29,30,31] +; CHECK-NEXT: retq + %vec = load <32 x i16>, <32 x i16>* %vp + %shuf = shufflevector <32 x i16> %vec, <32 x i16> undef, <32 x i32> + %res = select <32 x i1> , <32 x i16> %shuf, <32 x i16> zeroinitializer + ret <32 x i16> %res +} + +define <4 x i32> @test_4xi32_perm_mask0(<4 x i32> %vec) { +; CHECK-LABEL: test_4xi32_perm_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,3,0] +; CHECK-NEXT: retq + %res = shufflevector <4 x i32> %vec, <4 x i32> undef, <4 x i32> + ret <4 x i32> %res +} +define <4 x i32> @test_masked_4xi32_perm_mask0(<4 x i32> %vec, <4 x i32> %vec2) { +; CHECK-LABEL: test_masked_4xi32_perm_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: movb $6, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpshufd {{.*#+}} xmm1 {%k1} = xmm0[2,3,3,0] +; CHECK-NEXT: vmovdqa %xmm1, %xmm0 +; CHECK-NEXT: retq + %shuf = shufflevector <4 x i32> %vec, <4 x i32> undef, <4 x i32> + %res = select <4 x i1> , <4 x i32> %shuf, <4 x i32> %vec2 + ret <4 x i32> %res +} + +define <4 x i32> @test_masked_z_4xi32_perm_mask0(<4 x i32> %vec) { +; CHECK-LABEL: test_masked_z_4xi32_perm_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: movb $6, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpshufd {{.*#+}} xmm0 {%k1} {z} = xmm0[2,3,3,0] +; CHECK-NEXT: retq + %shuf = shufflevector <4 x i32> %vec, <4 x i32> undef, <4 x i32> + %res = select <4 x i1> , <4 x i32> %shuf, <4 x i32> zeroinitializer + ret <4 x i32> %res +} +define <4 x i32> @test_masked_4xi32_perm_mask1(<4 x i32> %vec, <4 x i32> %vec2) { +; CHECK-LABEL: test_masked_4xi32_perm_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: movb $5, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpshufd {{.*#+}} xmm1 {%k1} = xmm0[1,0,2,0] +; CHECK-NEXT: vmovdqa %xmm1, %xmm0 +; CHECK-NEXT: retq + %shuf = shufflevector <4 x i32> %vec, <4 x i32> undef, <4 x i32> + %res = select <4 x i1> , <4 x i32> %shuf, <4 x i32> %vec2 + ret <4 x i32> %res +} + +define <4 x i32> @test_masked_z_4xi32_perm_mask1(<4 x i32> %vec) { +; CHECK-LABEL: test_masked_z_4xi32_perm_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: movb $5, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpshufd {{.*#+}} xmm0 {%k1} {z} = xmm0[1,0,2,0] +; CHECK-NEXT: retq + %shuf = shufflevector <4 x i32> %vec, <4 x i32> undef, <4 x i32> + %res = select <4 x i1> , <4 x i32> %shuf, <4 x i32> zeroinitializer + ret <4 x i32> %res +} +define <4 x i32> @test_masked_4xi32_perm_mask2(<4 x i32> %vec, <4 x i32> %vec2) { +; CHECK-LABEL: test_masked_4xi32_perm_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: movb $14, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpshufd {{.*#+}} xmm1 {%k1} = xmm0[3,0,1,0] +; CHECK-NEXT: vmovdqa %xmm1, %xmm0 +; CHECK-NEXT: retq + %shuf = shufflevector <4 x i32> %vec, <4 x i32> undef, <4 x i32> + %res = select <4 x i1> , <4 x i32> %shuf, <4 x i32> %vec2 + ret <4 x i32> %res +} + +define <4 x i32> @test_masked_z_4xi32_perm_mask2(<4 x i32> %vec) { +; CHECK-LABEL: test_masked_z_4xi32_perm_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: movb $14, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpshufd {{.*#+}} xmm0 {%k1} {z} = xmm0[3,0,1,0] +; CHECK-NEXT: retq + %shuf = shufflevector <4 x i32> %vec, <4 x i32> undef, <4 x i32> + %res = select <4 x i1> , <4 x i32> %shuf, <4 x i32> zeroinitializer + ret <4 x i32> %res +} +define <4 x i32> @test_4xi32_perm_mask3(<4 x i32> %vec) { +; CHECK-LABEL: test_4xi32_perm_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,0,3] +; CHECK-NEXT: retq + %res = shufflevector <4 x i32> %vec, <4 x i32> undef, <4 x i32> + ret <4 x i32> %res +} +define <4 x i32> @test_masked_4xi32_perm_mask3(<4 x i32> %vec, <4 x i32> %vec2) { +; CHECK-LABEL: test_masked_4xi32_perm_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: movb $10, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpshufd {{.*#+}} xmm1 {%k1} = xmm0[1,1,0,3] +; CHECK-NEXT: vmovdqa %xmm1, %xmm0 +; CHECK-NEXT: retq + %shuf = shufflevector <4 x i32> %vec, <4 x i32> undef, <4 x i32> + %res = select <4 x i1> , <4 x i32> %shuf, <4 x i32> %vec2 + ret <4 x i32> %res +} + +define <4 x i32> @test_masked_z_4xi32_perm_mask3(<4 x i32> %vec) { +; CHECK-LABEL: test_masked_z_4xi32_perm_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: movb $10, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpshufd {{.*#+}} xmm0 {%k1} {z} = xmm0[1,1,0,3] +; CHECK-NEXT: retq + %shuf = shufflevector <4 x i32> %vec, <4 x i32> undef, <4 x i32> + %res = select <4 x i1> , <4 x i32> %shuf, <4 x i32> zeroinitializer + ret <4 x i32> %res +} +define <4 x i32> @test_4xi32_perm_mem_mask0(<4 x i32>* %vp) { +; CHECK-LABEL: test_4xi32_perm_mem_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: vpshufd {{.*#+}} xmm0 = mem[0,1,3,3] +; CHECK-NEXT: retq + %vec = load <4 x i32>, <4 x i32>* %vp + %res = shufflevector <4 x i32> %vec, <4 x i32> undef, <4 x i32> + ret <4 x i32> %res +} +define <4 x i32> @test_masked_4xi32_perm_mem_mask0(<4 x i32>* %vp, <4 x i32> %vec2) { +; CHECK-LABEL: test_masked_4xi32_perm_mem_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: movb $10, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpshufd {{.*#+}} xmm0 {%k1} = mem[0,1,3,3] +; CHECK-NEXT: retq + %vec = load <4 x i32>, <4 x i32>* %vp + %shuf = shufflevector <4 x i32> %vec, <4 x i32> undef, <4 x i32> + %res = select <4 x i1> , <4 x i32> %shuf, <4 x i32> %vec2 + ret <4 x i32> %res +} + +define <4 x i32> @test_masked_z_4xi32_perm_mem_mask0(<4 x i32>* %vp) { +; CHECK-LABEL: test_masked_z_4xi32_perm_mem_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: movb $10, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpshufd {{.*#+}} xmm0 {%k1} {z} = mem[0,1,3,3] +; CHECK-NEXT: retq + %vec = load <4 x i32>, <4 x i32>* %vp + %shuf = shufflevector <4 x i32> %vec, <4 x i32> undef, <4 x i32> + %res = select <4 x i1> , <4 x i32> %shuf, <4 x i32> zeroinitializer + ret <4 x i32> %res +} + +define <4 x i32> @test_masked_4xi32_perm_mem_mask1(<4 x i32>* %vp, <4 x i32> %vec2) { +; CHECK-LABEL: test_masked_4xi32_perm_mem_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: movb $5, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpshufd {{.*#+}} xmm0 {%k1} = mem[2,2,3,1] +; CHECK-NEXT: retq + %vec = load <4 x i32>, <4 x i32>* %vp + %shuf = shufflevector <4 x i32> %vec, <4 x i32> undef, <4 x i32> + %res = select <4 x i1> , <4 x i32> %shuf, <4 x i32> %vec2 + ret <4 x i32> %res +} + +define <4 x i32> @test_masked_z_4xi32_perm_mem_mask1(<4 x i32>* %vp) { +; CHECK-LABEL: test_masked_z_4xi32_perm_mem_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: movb $5, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpshufd {{.*#+}} xmm0 {%k1} {z} = mem[2,2,3,1] +; CHECK-NEXT: retq + %vec = load <4 x i32>, <4 x i32>* %vp + %shuf = shufflevector <4 x i32> %vec, <4 x i32> undef, <4 x i32> + %res = select <4 x i1> , <4 x i32> %shuf, <4 x i32> zeroinitializer + ret <4 x i32> %res +} + +define <4 x i32> @test_masked_4xi32_perm_mem_mask2(<4 x i32>* %vp, <4 x i32> %vec2) { +; CHECK-LABEL: test_masked_4xi32_perm_mem_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: movb $11, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpshufd {{.*#+}} xmm0 {%k1} = mem[0,3,0,1] +; CHECK-NEXT: retq + %vec = load <4 x i32>, <4 x i32>* %vp + %shuf = shufflevector <4 x i32> %vec, <4 x i32> undef, <4 x i32> + %res = select <4 x i1> , <4 x i32> %shuf, <4 x i32> %vec2 + ret <4 x i32> %res +} + +define <4 x i32> @test_masked_z_4xi32_perm_mem_mask2(<4 x i32>* %vp) { +; CHECK-LABEL: test_masked_z_4xi32_perm_mem_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: movb $11, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpshufd {{.*#+}} xmm0 {%k1} {z} = mem[0,3,0,1] +; CHECK-NEXT: retq + %vec = load <4 x i32>, <4 x i32>* %vp + %shuf = shufflevector <4 x i32> %vec, <4 x i32> undef, <4 x i32> + %res = select <4 x i1> , <4 x i32> %shuf, <4 x i32> zeroinitializer + ret <4 x i32> %res +} + +define <4 x i32> @test_4xi32_perm_mem_mask3(<4 x i32>* %vp) { +; CHECK-LABEL: test_4xi32_perm_mem_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: vpshufd {{.*#+}} xmm0 = mem[1,0,1,0] +; CHECK-NEXT: retq + %vec = load <4 x i32>, <4 x i32>* %vp + %res = shufflevector <4 x i32> %vec, <4 x i32> undef, <4 x i32> + ret <4 x i32> %res +} +define <4 x i32> @test_masked_4xi32_perm_mem_mask3(<4 x i32>* %vp, <4 x i32> %vec2) { +; CHECK-LABEL: test_masked_4xi32_perm_mem_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: movb $1, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpshufd {{.*#+}} xmm0 {%k1} = mem[1,0,1,0] +; CHECK-NEXT: retq + %vec = load <4 x i32>, <4 x i32>* %vp + %shuf = shufflevector <4 x i32> %vec, <4 x i32> undef, <4 x i32> + %res = select <4 x i1> , <4 x i32> %shuf, <4 x i32> %vec2 + ret <4 x i32> %res +} + +define <4 x i32> @test_masked_z_4xi32_perm_mem_mask3(<4 x i32>* %vp) { +; CHECK-LABEL: test_masked_z_4xi32_perm_mem_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: movb $1, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpshufd {{.*#+}} xmm0 {%k1} {z} = mem[1,0,1,0] +; CHECK-NEXT: retq + %vec = load <4 x i32>, <4 x i32>* %vp + %shuf = shufflevector <4 x i32> %vec, <4 x i32> undef, <4 x i32> + %res = select <4 x i1> , <4 x i32> %shuf, <4 x i32> zeroinitializer + ret <4 x i32> %res +} + +define <8 x i32> @test_8xi32_perm_mask0(<8 x i32> %vec) { +; CHECK-LABEL: test_8xi32_perm_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[2,3,1,0,6,7,5,4] +; CHECK-NEXT: retq + %res = shufflevector <8 x i32> %vec, <8 x i32> undef, <8 x i32> + ret <8 x i32> %res +} +define <8 x i32> @test_masked_8xi32_perm_mask0(<8 x i32> %vec, <8 x i32> %vec2) { +; CHECK-LABEL: test_masked_8xi32_perm_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: movb $-99, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpshufd {{.*#+}} ymm1 {%k1} = ymm0[2,3,1,0,6,7,5,4] +; CHECK-NEXT: vmovdqa %ymm1, %ymm0 +; CHECK-NEXT: retq + %shuf = shufflevector <8 x i32> %vec, <8 x i32> undef, <8 x i32> + %res = select <8 x i1> , <8 x i32> %shuf, <8 x i32> %vec2 + ret <8 x i32> %res +} + +define <8 x i32> @test_masked_z_8xi32_perm_mask0(<8 x i32> %vec) { +; CHECK-LABEL: test_masked_z_8xi32_perm_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: movb $-99, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpshufd {{.*#+}} ymm0 {%k1} {z} = ymm0[2,3,1,0,6,7,5,4] +; CHECK-NEXT: retq + %shuf = shufflevector <8 x i32> %vec, <8 x i32> undef, <8 x i32> + %res = select <8 x i1> , <8 x i32> %shuf, <8 x i32> zeroinitializer + ret <8 x i32> %res +} +define <8 x i32> @test_masked_8xi32_perm_mask1(<8 x i32> %vec, <8 x i32> %vec2) { +; CHECK-LABEL: test_masked_8xi32_perm_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: movb $-90, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpshufd {{.*#+}} ymm1 {%k1} = ymm0[0,3,3,3,4,7,7,7] +; CHECK-NEXT: vmovdqa %ymm1, %ymm0 +; CHECK-NEXT: retq + %shuf = shufflevector <8 x i32> %vec, <8 x i32> undef, <8 x i32> + %res = select <8 x i1> , <8 x i32> %shuf, <8 x i32> %vec2 + ret <8 x i32> %res +} + +define <8 x i32> @test_masked_z_8xi32_perm_mask1(<8 x i32> %vec) { +; CHECK-LABEL: test_masked_z_8xi32_perm_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: movb $-90, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpshufd {{.*#+}} ymm0 {%k1} {z} = ymm0[0,3,3,3,4,7,7,7] +; CHECK-NEXT: retq + %shuf = shufflevector <8 x i32> %vec, <8 x i32> undef, <8 x i32> + %res = select <8 x i1> , <8 x i32> %shuf, <8 x i32> zeroinitializer + ret <8 x i32> %res +} +define <8 x i32> @test_masked_8xi32_perm_mask2(<8 x i32> %vec, <8 x i32> %vec2) { +; CHECK-LABEL: test_masked_8xi32_perm_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: movb $4, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpshufd {{.*#+}} ymm1 {%k1} = ymm0[1,2,0,3,5,6,4,7] +; CHECK-NEXT: vmovdqa %ymm1, %ymm0 +; CHECK-NEXT: retq + %shuf = shufflevector <8 x i32> %vec, <8 x i32> undef, <8 x i32> + %res = select <8 x i1> , <8 x i32> %shuf, <8 x i32> %vec2 + ret <8 x i32> %res +} + +define <8 x i32> @test_masked_z_8xi32_perm_mask2(<8 x i32> %vec) { +; CHECK-LABEL: test_masked_z_8xi32_perm_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: movb $4, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpshufd {{.*#+}} ymm0 {%k1} {z} = ymm0[1,2,0,3,5,6,4,7] +; CHECK-NEXT: retq + %shuf = shufflevector <8 x i32> %vec, <8 x i32> undef, <8 x i32> + %res = select <8 x i1> , <8 x i32> %shuf, <8 x i32> zeroinitializer + ret <8 x i32> %res +} +define <8 x i32> @test_8xi32_perm_mask3(<8 x i32> %vec) { +; CHECK-LABEL: test_8xi32_perm_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[1,3,1,0,5,7,5,4] +; CHECK-NEXT: retq + %res = shufflevector <8 x i32> %vec, <8 x i32> undef, <8 x i32> + ret <8 x i32> %res +} +define <8 x i32> @test_masked_8xi32_perm_mask3(<8 x i32> %vec, <8 x i32> %vec2) { +; CHECK-LABEL: test_masked_8xi32_perm_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: movb $116, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpshufd {{.*#+}} ymm1 {%k1} = ymm0[1,3,1,0,5,7,5,4] +; CHECK-NEXT: vmovdqa %ymm1, %ymm0 +; CHECK-NEXT: retq + %shuf = shufflevector <8 x i32> %vec, <8 x i32> undef, <8 x i32> + %res = select <8 x i1> , <8 x i32> %shuf, <8 x i32> %vec2 + ret <8 x i32> %res +} + +define <8 x i32> @test_masked_z_8xi32_perm_mask3(<8 x i32> %vec) { +; CHECK-LABEL: test_masked_z_8xi32_perm_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: movb $116, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpshufd {{.*#+}} ymm0 {%k1} {z} = ymm0[1,3,1,0,5,7,5,4] +; CHECK-NEXT: retq + %shuf = shufflevector <8 x i32> %vec, <8 x i32> undef, <8 x i32> + %res = select <8 x i1> , <8 x i32> %shuf, <8 x i32> zeroinitializer + ret <8 x i32> %res +} +define <8 x i32> @test_8xi32_perm_mem_mask0(<8 x i32>* %vp) { +; CHECK-LABEL: test_8xi32_perm_mem_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: vpshufd {{.*#+}} ymm0 = mem[1,0,2,0,5,4,6,4] +; CHECK-NEXT: retq + %vec = load <8 x i32>, <8 x i32>* %vp + %res = shufflevector <8 x i32> %vec, <8 x i32> undef, <8 x i32> + ret <8 x i32> %res +} +define <8 x i32> @test_masked_8xi32_perm_mem_mask0(<8 x i32>* %vp, <8 x i32> %vec2) { +; CHECK-LABEL: test_masked_8xi32_perm_mem_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: movb $-25, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpshufd {{.*#+}} ymm0 {%k1} = mem[1,0,2,0,5,4,6,4] +; CHECK-NEXT: retq + %vec = load <8 x i32>, <8 x i32>* %vp + %shuf = shufflevector <8 x i32> %vec, <8 x i32> undef, <8 x i32> + %res = select <8 x i1> , <8 x i32> %shuf, <8 x i32> %vec2 + ret <8 x i32> %res +} + +define <8 x i32> @test_masked_z_8xi32_perm_mem_mask0(<8 x i32>* %vp) { +; CHECK-LABEL: test_masked_z_8xi32_perm_mem_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: movb $-25, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpshufd {{.*#+}} ymm0 {%k1} {z} = mem[1,0,2,0,5,4,6,4] +; CHECK-NEXT: retq + %vec = load <8 x i32>, <8 x i32>* %vp + %shuf = shufflevector <8 x i32> %vec, <8 x i32> undef, <8 x i32> + %res = select <8 x i1> , <8 x i32> %shuf, <8 x i32> zeroinitializer + ret <8 x i32> %res +} + +define <8 x i32> @test_masked_8xi32_perm_mem_mask1(<8 x i32>* %vp, <8 x i32> %vec2) { +; CHECK-LABEL: test_masked_8xi32_perm_mem_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: movb $-97, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpshufd {{.*#+}} ymm0 {%k1} = mem[0,3,2,0,4,7,6,4] +; CHECK-NEXT: retq + %vec = load <8 x i32>, <8 x i32>* %vp + %shuf = shufflevector <8 x i32> %vec, <8 x i32> undef, <8 x i32> + %res = select <8 x i1> , <8 x i32> %shuf, <8 x i32> %vec2 + ret <8 x i32> %res +} + +define <8 x i32> @test_masked_z_8xi32_perm_mem_mask1(<8 x i32>* %vp) { +; CHECK-LABEL: test_masked_z_8xi32_perm_mem_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: movb $-97, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpshufd {{.*#+}} ymm0 {%k1} {z} = mem[0,3,2,0,4,7,6,4] +; CHECK-NEXT: retq + %vec = load <8 x i32>, <8 x i32>* %vp + %shuf = shufflevector <8 x i32> %vec, <8 x i32> undef, <8 x i32> + %res = select <8 x i1> , <8 x i32> %shuf, <8 x i32> zeroinitializer + ret <8 x i32> %res +} + +define <8 x i32> @test_masked_8xi32_perm_mem_mask2(<8 x i32>* %vp, <8 x i32> %vec2) { +; CHECK-LABEL: test_masked_8xi32_perm_mem_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: movb $73, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpshufd {{.*#+}} ymm0 {%k1} = mem[3,2,3,1,7,6,7,5] +; CHECK-NEXT: retq + %vec = load <8 x i32>, <8 x i32>* %vp + %shuf = shufflevector <8 x i32> %vec, <8 x i32> undef, <8 x i32> + %res = select <8 x i1> , <8 x i32> %shuf, <8 x i32> %vec2 + ret <8 x i32> %res +} + +define <8 x i32> @test_masked_z_8xi32_perm_mem_mask2(<8 x i32>* %vp) { +; CHECK-LABEL: test_masked_z_8xi32_perm_mem_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: movb $73, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpshufd {{.*#+}} ymm0 {%k1} {z} = mem[3,2,3,1,7,6,7,5] +; CHECK-NEXT: retq + %vec = load <8 x i32>, <8 x i32>* %vp + %shuf = shufflevector <8 x i32> %vec, <8 x i32> undef, <8 x i32> + %res = select <8 x i1> , <8 x i32> %shuf, <8 x i32> zeroinitializer + ret <8 x i32> %res +} + +define <8 x i32> @test_8xi32_perm_mem_mask3(<8 x i32>* %vp) { +; CHECK-LABEL: test_8xi32_perm_mem_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: vpshufd {{.*#+}} ymm0 = mem[3,2,0,0,7,6,4,4] +; CHECK-NEXT: retq + %vec = load <8 x i32>, <8 x i32>* %vp + %res = shufflevector <8 x i32> %vec, <8 x i32> undef, <8 x i32> + ret <8 x i32> %res +} +define <8 x i32> @test_masked_8xi32_perm_mem_mask3(<8 x i32>* %vp, <8 x i32> %vec2) { +; CHECK-LABEL: test_masked_8xi32_perm_mem_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: movb $10, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpshufd {{.*#+}} ymm0 {%k1} = mem[3,2,0,0,7,6,4,4] +; CHECK-NEXT: retq + %vec = load <8 x i32>, <8 x i32>* %vp + %shuf = shufflevector <8 x i32> %vec, <8 x i32> undef, <8 x i32> + %res = select <8 x i1> , <8 x i32> %shuf, <8 x i32> %vec2 + ret <8 x i32> %res +} + +define <8 x i32> @test_masked_z_8xi32_perm_mem_mask3(<8 x i32>* %vp) { +; CHECK-LABEL: test_masked_z_8xi32_perm_mem_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: movb $10, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpshufd {{.*#+}} ymm0 {%k1} {z} = mem[3,2,0,0,7,6,4,4] +; CHECK-NEXT: retq + %vec = load <8 x i32>, <8 x i32>* %vp + %shuf = shufflevector <8 x i32> %vec, <8 x i32> undef, <8 x i32> + %res = select <8 x i1> , <8 x i32> %shuf, <8 x i32> zeroinitializer + ret <8 x i32> %res +} + +define <16 x i32> @test_16xi32_perm_mask0(<16 x i32> %vec) { +; CHECK-LABEL: test_16xi32_perm_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: vpshufd {{.*#+}} zmm0 = zmm0[3,1,3,0,7,5,7,4,11,9,11,8,15,13,15,12] +; CHECK-NEXT: retq + %res = shufflevector <16 x i32> %vec, <16 x i32> undef, <16 x i32> + ret <16 x i32> %res +} +define <16 x i32> @test_masked_16xi32_perm_mask0(<16 x i32> %vec, <16 x i32> %vec2) { +; CHECK-LABEL: test_masked_16xi32_perm_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: movw $18453, %ax # imm = 0x4815 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpshufd {{.*#+}} zmm1 {%k1} = zmm0[3,1,3,0,7,5,7,4,11,9,11,8,15,13,15,12] +; CHECK-NEXT: vmovdqa64 %zmm1, %zmm0 +; CHECK-NEXT: retq + %shuf = shufflevector <16 x i32> %vec, <16 x i32> undef, <16 x i32> + %res = select <16 x i1> , <16 x i32> %shuf, <16 x i32> %vec2 + ret <16 x i32> %res +} + +define <16 x i32> @test_masked_z_16xi32_perm_mask0(<16 x i32> %vec) { +; CHECK-LABEL: test_masked_z_16xi32_perm_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: movw $18453, %ax # imm = 0x4815 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpshufd {{.*#+}} zmm0 {%k1} {z} = zmm0[3,1,3,0,7,5,7,4,11,9,11,8,15,13,15,12] +; CHECK-NEXT: retq + %shuf = shufflevector <16 x i32> %vec, <16 x i32> undef, <16 x i32> + %res = select <16 x i1> , <16 x i32> %shuf, <16 x i32> zeroinitializer + ret <16 x i32> %res +} +define <16 x i32> @test_masked_16xi32_perm_mask1(<16 x i32> %vec, <16 x i32> %vec2) { +; CHECK-LABEL: test_masked_16xi32_perm_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: movw $11142, %ax # imm = 0x2B86 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpshufd {{.*#+}} zmm1 {%k1} = zmm0[2,0,3,0,6,4,7,4,10,8,11,8,14,12,15,12] +; CHECK-NEXT: vmovdqa64 %zmm1, %zmm0 +; CHECK-NEXT: retq + %shuf = shufflevector <16 x i32> %vec, <16 x i32> undef, <16 x i32> + %res = select <16 x i1> , <16 x i32> %shuf, <16 x i32> %vec2 + ret <16 x i32> %res +} + +define <16 x i32> @test_masked_z_16xi32_perm_mask1(<16 x i32> %vec) { +; CHECK-LABEL: test_masked_z_16xi32_perm_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: movw $11142, %ax # imm = 0x2B86 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpshufd {{.*#+}} zmm0 {%k1} {z} = zmm0[2,0,3,0,6,4,7,4,10,8,11,8,14,12,15,12] +; CHECK-NEXT: retq + %shuf = shufflevector <16 x i32> %vec, <16 x i32> undef, <16 x i32> + %res = select <16 x i1> , <16 x i32> %shuf, <16 x i32> zeroinitializer + ret <16 x i32> %res +} +define <16 x i32> @test_masked_16xi32_perm_mask2(<16 x i32> %vec, <16 x i32> %vec2) { +; CHECK-LABEL: test_masked_16xi32_perm_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: movw $15610, %ax # imm = 0x3CFA +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpshufd {{.*#+}} zmm1 {%k1} = zmm0[1,3,3,0,5,7,7,4,9,11,11,8,13,15,15,12] +; CHECK-NEXT: vmovdqa64 %zmm1, %zmm0 +; CHECK-NEXT: retq + %shuf = shufflevector <16 x i32> %vec, <16 x i32> undef, <16 x i32> + %res = select <16 x i1> , <16 x i32> %shuf, <16 x i32> %vec2 + ret <16 x i32> %res +} + +define <16 x i32> @test_masked_z_16xi32_perm_mask2(<16 x i32> %vec) { +; CHECK-LABEL: test_masked_z_16xi32_perm_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: movw $15610, %ax # imm = 0x3CFA +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpshufd {{.*#+}} zmm0 {%k1} {z} = zmm0[1,3,3,0,5,7,7,4,9,11,11,8,13,15,15,12] +; CHECK-NEXT: retq + %shuf = shufflevector <16 x i32> %vec, <16 x i32> undef, <16 x i32> + %res = select <16 x i1> , <16 x i32> %shuf, <16 x i32> zeroinitializer + ret <16 x i32> %res +} +define <16 x i32> @test_16xi32_perm_mask3(<16 x i32> %vec) { +; CHECK-LABEL: test_16xi32_perm_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: vpshufd {{.*#+}} zmm0 = zmm0[3,2,0,3,7,6,4,7,11,10,8,11,15,14,12,15] +; CHECK-NEXT: retq + %res = shufflevector <16 x i32> %vec, <16 x i32> undef, <16 x i32> + ret <16 x i32> %res +} +define <16 x i32> @test_masked_16xi32_perm_mask3(<16 x i32> %vec, <16 x i32> %vec2) { +; CHECK-LABEL: test_masked_16xi32_perm_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: movw $14814, %ax # imm = 0x39DE +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpshufd {{.*#+}} zmm1 {%k1} = zmm0[3,2,0,3,7,6,4,7,11,10,8,11,15,14,12,15] +; CHECK-NEXT: vmovdqa64 %zmm1, %zmm0 +; CHECK-NEXT: retq + %shuf = shufflevector <16 x i32> %vec, <16 x i32> undef, <16 x i32> + %res = select <16 x i1> , <16 x i32> %shuf, <16 x i32> %vec2 + ret <16 x i32> %res +} + +define <16 x i32> @test_masked_z_16xi32_perm_mask3(<16 x i32> %vec) { +; CHECK-LABEL: test_masked_z_16xi32_perm_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: movw $14814, %ax # imm = 0x39DE +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpshufd {{.*#+}} zmm0 {%k1} {z} = zmm0[3,2,0,3,7,6,4,7,11,10,8,11,15,14,12,15] +; CHECK-NEXT: retq + %shuf = shufflevector <16 x i32> %vec, <16 x i32> undef, <16 x i32> + %res = select <16 x i1> , <16 x i32> %shuf, <16 x i32> zeroinitializer + ret <16 x i32> %res +} +define <16 x i32> @test_16xi32_perm_mem_mask0(<16 x i32>* %vp) { +; CHECK-LABEL: test_16xi32_perm_mem_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: vpshufd {{.*#+}} zmm0 = mem[1,0,1,3,5,4,5,7,9,8,9,11,13,12,13,15] +; CHECK-NEXT: retq + %vec = load <16 x i32>, <16 x i32>* %vp + %res = shufflevector <16 x i32> %vec, <16 x i32> undef, <16 x i32> + ret <16 x i32> %res +} +define <16 x i32> @test_masked_16xi32_perm_mem_mask0(<16 x i32>* %vp, <16 x i32> %vec2) { +; CHECK-LABEL: test_masked_16xi32_perm_mem_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: movw $7334, %ax # imm = 0x1CA6 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpshufd {{.*#+}} zmm0 {%k1} = mem[1,0,1,3,5,4,5,7,9,8,9,11,13,12,13,15] +; CHECK-NEXT: retq + %vec = load <16 x i32>, <16 x i32>* %vp + %shuf = shufflevector <16 x i32> %vec, <16 x i32> undef, <16 x i32> + %res = select <16 x i1> , <16 x i32> %shuf, <16 x i32> %vec2 + ret <16 x i32> %res +} + +define <16 x i32> @test_masked_z_16xi32_perm_mem_mask0(<16 x i32>* %vp) { +; CHECK-LABEL: test_masked_z_16xi32_perm_mem_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: movw $7334, %ax # imm = 0x1CA6 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpshufd {{.*#+}} zmm0 {%k1} {z} = mem[1,0,1,3,5,4,5,7,9,8,9,11,13,12,13,15] +; CHECK-NEXT: retq + %vec = load <16 x i32>, <16 x i32>* %vp + %shuf = shufflevector <16 x i32> %vec, <16 x i32> undef, <16 x i32> + %res = select <16 x i1> , <16 x i32> %shuf, <16 x i32> zeroinitializer + ret <16 x i32> %res +} + +define <16 x i32> @test_masked_16xi32_perm_mem_mask1(<16 x i32>* %vp, <16 x i32> %vec2) { +; CHECK-LABEL: test_masked_16xi32_perm_mem_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: movw $-25463, %ax # imm = 0x9C89 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpshufd {{.*#+}} zmm0 {%k1} = mem[1,0,0,2,5,4,4,6,9,8,8,10,13,12,12,14] +; CHECK-NEXT: retq + %vec = load <16 x i32>, <16 x i32>* %vp + %shuf = shufflevector <16 x i32> %vec, <16 x i32> undef, <16 x i32> + %res = select <16 x i1> , <16 x i32> %shuf, <16 x i32> %vec2 + ret <16 x i32> %res +} + +define <16 x i32> @test_masked_z_16xi32_perm_mem_mask1(<16 x i32>* %vp) { +; CHECK-LABEL: test_masked_z_16xi32_perm_mem_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: movw $-25463, %ax # imm = 0x9C89 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpshufd {{.*#+}} zmm0 {%k1} {z} = mem[1,0,0,2,5,4,4,6,9,8,8,10,13,12,12,14] +; CHECK-NEXT: retq + %vec = load <16 x i32>, <16 x i32>* %vp + %shuf = shufflevector <16 x i32> %vec, <16 x i32> undef, <16 x i32> + %res = select <16 x i1> , <16 x i32> %shuf, <16 x i32> zeroinitializer + ret <16 x i32> %res +} + +define <16 x i32> @test_masked_16xi32_perm_mem_mask2(<16 x i32>* %vp, <16 x i32> %vec2) { +; CHECK-LABEL: test_masked_16xi32_perm_mem_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: movw $-14529, %ax # imm = 0xC73F +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpshufd {{.*#+}} zmm0 {%k1} = mem[2,0,1,2,6,4,5,6,10,8,9,10,14,12,13,14] +; CHECK-NEXT: retq + %vec = load <16 x i32>, <16 x i32>* %vp + %shuf = shufflevector <16 x i32> %vec, <16 x i32> undef, <16 x i32> + %res = select <16 x i1> , <16 x i32> %shuf, <16 x i32> %vec2 + ret <16 x i32> %res +} + +define <16 x i32> @test_masked_z_16xi32_perm_mem_mask2(<16 x i32>* %vp) { +; CHECK-LABEL: test_masked_z_16xi32_perm_mem_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: movw $-14529, %ax # imm = 0xC73F +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpshufd {{.*#+}} zmm0 {%k1} {z} = mem[2,0,1,2,6,4,5,6,10,8,9,10,14,12,13,14] +; CHECK-NEXT: retq + %vec = load <16 x i32>, <16 x i32>* %vp + %shuf = shufflevector <16 x i32> %vec, <16 x i32> undef, <16 x i32> + %res = select <16 x i1> , <16 x i32> %shuf, <16 x i32> zeroinitializer + ret <16 x i32> %res +} + +define <16 x i32> @test_16xi32_perm_mem_mask3(<16 x i32>* %vp) { +; CHECK-LABEL: test_16xi32_perm_mem_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: vpshufd {{.*#+}} zmm0 = mem[3,1,1,1,7,5,5,5,11,9,9,9,15,13,13,13] +; CHECK-NEXT: retq + %vec = load <16 x i32>, <16 x i32>* %vp + %res = shufflevector <16 x i32> %vec, <16 x i32> undef, <16 x i32> + ret <16 x i32> %res +} +define <16 x i32> @test_masked_16xi32_perm_mem_mask3(<16 x i32>* %vp, <16 x i32> %vec2) { +; CHECK-LABEL: test_masked_16xi32_perm_mem_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: movw $-21392, %ax # imm = 0xAC70 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpshufd {{.*#+}} zmm0 {%k1} = mem[3,1,1,1,7,5,5,5,11,9,9,9,15,13,13,13] +; CHECK-NEXT: retq + %vec = load <16 x i32>, <16 x i32>* %vp + %shuf = shufflevector <16 x i32> %vec, <16 x i32> undef, <16 x i32> + %res = select <16 x i1> , <16 x i32> %shuf, <16 x i32> %vec2 + ret <16 x i32> %res +} + +define <16 x i32> @test_masked_z_16xi32_perm_mem_mask3(<16 x i32>* %vp) { +; CHECK-LABEL: test_masked_z_16xi32_perm_mem_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: movw $-21392, %ax # imm = 0xAC70 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vpshufd {{.*#+}} zmm0 {%k1} {z} = mem[3,1,1,1,7,5,5,5,11,9,9,9,15,13,13,13] +; CHECK-NEXT: retq + %vec = load <16 x i32>, <16 x i32>* %vp + %shuf = shufflevector <16 x i32> %vec, <16 x i32> undef, <16 x i32> + %res = select <16 x i1> , <16 x i32> %shuf, <16 x i32> zeroinitializer + ret <16 x i32> %res +} + diff --git a/llvm/test/CodeGen/X86/avx512-shuffles/unpack.ll b/llvm/test/CodeGen/X86/avx512-shuffles/unpack.ll new file mode 100644 index 000000000000..3e77390fe052 --- /dev/null +++ b/llvm/test/CodeGen/X86/avx512-shuffles/unpack.ll @@ -0,0 +1,2621 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mcpu=skx %s -o - | FileCheck %s + +define <4 x float> @test_4xfloat_unpack_low_mask0(<4 x float> %vec1, <4 x float> %vec2) { +; CHECK-LABEL: test_4xfloat_unpack_low_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: vunpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] +; CHECK-NEXT: retq + %res = shufflevector <4 x float> %vec1, <4 x float> %vec2, <4 x i32> + ret <4 x float> %res +} +define <4 x float> @test_4xfloat_masked_unpack_low_mask0(<4 x float> %vec1, <4 x float> %vec2, <4 x float> %vec3) { +; CHECK-LABEL: test_4xfloat_masked_unpack_low_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: movb $12, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vunpcklps {{.*#+}} xmm2 {%k1} = xmm0[0],xmm1[0],xmm0[1],xmm1[1] +; CHECK-NEXT: vmovaps %xmm2, %xmm0 +; CHECK-NEXT: retq + %shuf = shufflevector <4 x float> %vec1, <4 x float> %vec2, <4 x i32> + %res = select <4 x i1> , <4 x float> %shuf, <4 x float> %vec3 + ret <4 x float> %res +} + +define <4 x float> @test_4xfloat_zero_masked_unpack_low_mask0(<4 x float> %vec1, <4 x float> %vec2) { +; CHECK-LABEL: test_4xfloat_zero_masked_unpack_low_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: movb $12, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vunpcklps {{.*#+}} xmm0 {%k1} {z} = xmm0[0],xmm1[0],xmm0[1],xmm1[1] +; CHECK-NEXT: retq + %shuf = shufflevector <4 x float> %vec1, <4 x float> %vec2, <4 x i32> + %res = select <4 x i1> , <4 x float> %shuf, <4 x float> zeroinitializer + ret <4 x float> %res +} +define <4 x float> @test_4xfloat_masked_unpack_low_mask1(<4 x float> %vec1, <4 x float> %vec2, <4 x float> %vec3) { +; CHECK-LABEL: test_4xfloat_masked_unpack_low_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: movb $10, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vunpcklps {{.*#+}} xmm2 {%k1} = xmm0[0],xmm1[0],xmm0[1],xmm1[1] +; CHECK-NEXT: vmovaps %xmm2, %xmm0 +; CHECK-NEXT: retq + %shuf = shufflevector <4 x float> %vec1, <4 x float> %vec2, <4 x i32> + %res = select <4 x i1> , <4 x float> %shuf, <4 x float> %vec3 + ret <4 x float> %res +} + +define <4 x float> @test_4xfloat_zero_masked_unpack_low_mask1(<4 x float> %vec1, <4 x float> %vec2) { +; CHECK-LABEL: test_4xfloat_zero_masked_unpack_low_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: movb $10, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vunpcklps {{.*#+}} xmm0 {%k1} {z} = xmm0[0],xmm1[0],xmm0[1],xmm1[1] +; CHECK-NEXT: retq + %shuf = shufflevector <4 x float> %vec1, <4 x float> %vec2, <4 x i32> + %res = select <4 x i1> , <4 x float> %shuf, <4 x float> zeroinitializer + ret <4 x float> %res +} +define <4 x float> @test_4xfloat_masked_unpack_low_mask2(<4 x float> %vec1, <4 x float> %vec2, <4 x float> %vec3) { +; CHECK-LABEL: test_4xfloat_masked_unpack_low_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: movb $6, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vunpcklps {{.*#+}} xmm2 {%k1} = xmm0[0],xmm1[0],xmm0[1],xmm1[1] +; CHECK-NEXT: vmovaps %xmm2, %xmm0 +; CHECK-NEXT: retq + %shuf = shufflevector <4 x float> %vec1, <4 x float> %vec2, <4 x i32> + %res = select <4 x i1> , <4 x float> %shuf, <4 x float> %vec3 + ret <4 x float> %res +} + +define <4 x float> @test_4xfloat_zero_masked_unpack_low_mask2(<4 x float> %vec1, <4 x float> %vec2) { +; CHECK-LABEL: test_4xfloat_zero_masked_unpack_low_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: movb $6, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vunpcklps {{.*#+}} xmm0 {%k1} {z} = xmm0[0],xmm1[0],xmm0[1],xmm1[1] +; CHECK-NEXT: retq + %shuf = shufflevector <4 x float> %vec1, <4 x float> %vec2, <4 x i32> + %res = select <4 x i1> , <4 x float> %shuf, <4 x float> zeroinitializer + ret <4 x float> %res +} +define <4 x float> @test_4xfloat_unpack_low_mask3(<4 x float> %vec1, <4 x float> %vec2) { +; CHECK-LABEL: test_4xfloat_unpack_low_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: vunpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] +; CHECK-NEXT: retq + %res = shufflevector <4 x float> %vec1, <4 x float> %vec2, <4 x i32> + ret <4 x float> %res +} +define <4 x float> @test_4xfloat_masked_unpack_low_mask3(<4 x float> %vec1, <4 x float> %vec2, <4 x float> %vec3) { +; CHECK-LABEL: test_4xfloat_masked_unpack_low_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: movb $3, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vunpcklps {{.*#+}} xmm2 {%k1} = xmm0[0],xmm1[0],xmm0[1],xmm1[1] +; CHECK-NEXT: vmovaps %xmm2, %xmm0 +; CHECK-NEXT: retq + %shuf = shufflevector <4 x float> %vec1, <4 x float> %vec2, <4 x i32> + %res = select <4 x i1> , <4 x float> %shuf, <4 x float> %vec3 + ret <4 x float> %res +} + +define <4 x float> @test_4xfloat_zero_masked_unpack_low_mask3(<4 x float> %vec1, <4 x float> %vec2) { +; CHECK-LABEL: test_4xfloat_zero_masked_unpack_low_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: movb $3, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vunpcklps {{.*#+}} xmm0 {%k1} {z} = xmm0[0],xmm1[0],xmm0[1],xmm1[1] +; CHECK-NEXT: retq + %shuf = shufflevector <4 x float> %vec1, <4 x float> %vec2, <4 x i32> + %res = select <4 x i1> , <4 x float> %shuf, <4 x float> zeroinitializer + ret <4 x float> %res +} +define <4 x float> @test_4xfloat_unpack_low_mem_mask0(<4 x float> %vec1, <4 x float>* %vec2p) { +; CHECK-LABEL: test_4xfloat_unpack_low_mem_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: vunpcklps {{.*#+}} xmm0 = xmm0[0],mem[0],xmm0[1],mem[1] +; CHECK-NEXT: retq + %vec2 = load <4 x float>, <4 x float>* %vec2p + %res = shufflevector <4 x float> %vec1, <4 x float> %vec2, <4 x i32> + ret <4 x float> %res +} +define <4 x float> @test_4xfloat_masked_unpack_low_mem_mask0(<4 x float> %vec1, <4 x float>* %vec2p, <4 x float> %vec3) { +; CHECK-LABEL: test_4xfloat_masked_unpack_low_mem_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: movb $8, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vunpcklps {{.*#+}} xmm1 {%k1} = xmm0[0],mem[0],xmm0[1],mem[1] +; CHECK-NEXT: vmovaps %xmm1, %xmm0 +; CHECK-NEXT: retq + %vec2 = load <4 x float>, <4 x float>* %vec2p + %shuf = shufflevector <4 x float> %vec1, <4 x float> %vec2, <4 x i32> + %res = select <4 x i1> , <4 x float> %shuf, <4 x float> %vec3 + ret <4 x float> %res +} + +define <4 x float> @test_4xfloat_zero_masked_unpack_low_mem_mask0(<4 x float> %vec1, <4 x float>* %vec2p) { +; CHECK-LABEL: test_4xfloat_zero_masked_unpack_low_mem_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: movb $8, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vunpcklps {{.*#+}} xmm0 {%k1} {z} = xmm0[0],mem[0],xmm0[1],mem[1] +; CHECK-NEXT: retq + %vec2 = load <4 x float>, <4 x float>* %vec2p + %shuf = shufflevector <4 x float> %vec1, <4 x float> %vec2, <4 x i32> + %res = select <4 x i1> , <4 x float> %shuf, <4 x float> zeroinitializer + ret <4 x float> %res +} + +define <4 x float> @test_4xfloat_masked_unpack_low_mem_mask1(<4 x float> %vec1, <4 x float>* %vec2p, <4 x float> %vec3) { +; CHECK-LABEL: test_4xfloat_masked_unpack_low_mem_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: movb $6, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vunpcklps {{.*#+}} xmm1 {%k1} = xmm0[0],mem[0],xmm0[1],mem[1] +; CHECK-NEXT: vmovaps %xmm1, %xmm0 +; CHECK-NEXT: retq + %vec2 = load <4 x float>, <4 x float>* %vec2p + %shuf = shufflevector <4 x float> %vec1, <4 x float> %vec2, <4 x i32> + %res = select <4 x i1> , <4 x float> %shuf, <4 x float> %vec3 + ret <4 x float> %res +} + +define <4 x float> @test_4xfloat_zero_masked_unpack_low_mem_mask1(<4 x float> %vec1, <4 x float>* %vec2p) { +; CHECK-LABEL: test_4xfloat_zero_masked_unpack_low_mem_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: movb $6, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vunpcklps {{.*#+}} xmm0 {%k1} {z} = xmm0[0],mem[0],xmm0[1],mem[1] +; CHECK-NEXT: retq + %vec2 = load <4 x float>, <4 x float>* %vec2p + %shuf = shufflevector <4 x float> %vec1, <4 x float> %vec2, <4 x i32> + %res = select <4 x i1> , <4 x float> %shuf, <4 x float> zeroinitializer + ret <4 x float> %res +} + +define <4 x float> @test_4xfloat_masked_unpack_low_mem_mask2(<4 x float> %vec1, <4 x float>* %vec2p, <4 x float> %vec3) { +; CHECK-LABEL: test_4xfloat_masked_unpack_low_mem_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: movb $10, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vunpcklps {{.*#+}} xmm1 {%k1} = xmm0[0],mem[0],xmm0[1],mem[1] +; CHECK-NEXT: vmovaps %xmm1, %xmm0 +; CHECK-NEXT: retq + %vec2 = load <4 x float>, <4 x float>* %vec2p + %shuf = shufflevector <4 x float> %vec1, <4 x float> %vec2, <4 x i32> + %res = select <4 x i1> , <4 x float> %shuf, <4 x float> %vec3 + ret <4 x float> %res +} + +define <4 x float> @test_4xfloat_zero_masked_unpack_low_mem_mask2(<4 x float> %vec1, <4 x float>* %vec2p) { +; CHECK-LABEL: test_4xfloat_zero_masked_unpack_low_mem_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: movb $10, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vunpcklps {{.*#+}} xmm0 {%k1} {z} = xmm0[0],mem[0],xmm0[1],mem[1] +; CHECK-NEXT: retq + %vec2 = load <4 x float>, <4 x float>* %vec2p + %shuf = shufflevector <4 x float> %vec1, <4 x float> %vec2, <4 x i32> + %res = select <4 x i1> , <4 x float> %shuf, <4 x float> zeroinitializer + ret <4 x float> %res +} + +define <4 x float> @test_4xfloat_unpack_low_mem_mask3(<4 x float> %vec1, <4 x float>* %vec2p) { +; CHECK-LABEL: test_4xfloat_unpack_low_mem_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: vunpcklps {{.*#+}} xmm0 = xmm0[0],mem[0],xmm0[1],mem[1] +; CHECK-NEXT: retq + %vec2 = load <4 x float>, <4 x float>* %vec2p + %res = shufflevector <4 x float> %vec1, <4 x float> %vec2, <4 x i32> + ret <4 x float> %res +} +define <4 x float> @test_4xfloat_masked_unpack_low_mem_mask3(<4 x float> %vec1, <4 x float>* %vec2p, <4 x float> %vec3) { +; CHECK-LABEL: test_4xfloat_masked_unpack_low_mem_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: movb $4, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vunpcklps {{.*#+}} xmm1 {%k1} = xmm0[0],mem[0],xmm0[1],mem[1] +; CHECK-NEXT: vmovaps %xmm1, %xmm0 +; CHECK-NEXT: retq + %vec2 = load <4 x float>, <4 x float>* %vec2p + %shuf = shufflevector <4 x float> %vec1, <4 x float> %vec2, <4 x i32> + %res = select <4 x i1> , <4 x float> %shuf, <4 x float> %vec3 + ret <4 x float> %res +} + +define <4 x float> @test_4xfloat_zero_masked_unpack_low_mem_mask3(<4 x float> %vec1, <4 x float>* %vec2p) { +; CHECK-LABEL: test_4xfloat_zero_masked_unpack_low_mem_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: movb $4, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vunpcklps {{.*#+}} xmm0 {%k1} {z} = xmm0[0],mem[0],xmm0[1],mem[1] +; CHECK-NEXT: retq + %vec2 = load <4 x float>, <4 x float>* %vec2p + %shuf = shufflevector <4 x float> %vec1, <4 x float> %vec2, <4 x i32> + %res = select <4 x i1> , <4 x float> %shuf, <4 x float> zeroinitializer + ret <4 x float> %res +} + +define <8 x float> @test_8xfloat_unpack_low_mask0(<8 x float> %vec1, <8 x float> %vec2) { +; CHECK-LABEL: test_8xfloat_unpack_low_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: vunpcklps {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[4],ymm1[4],ymm0[5],ymm1[5] +; CHECK-NEXT: retq + %res = shufflevector <8 x float> %vec1, <8 x float> %vec2, <8 x i32> + ret <8 x float> %res +} +define <8 x float> @test_8xfloat_masked_unpack_low_mask0(<8 x float> %vec1, <8 x float> %vec2, <8 x float> %vec3) { +; CHECK-LABEL: test_8xfloat_masked_unpack_low_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: movb $122, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vunpcklps {{.*#+}} ymm2 {%k1} = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[4],ymm1[4],ymm0[5],ymm1[5] +; CHECK-NEXT: vmovaps %ymm2, %ymm0 +; CHECK-NEXT: retq + %shuf = shufflevector <8 x float> %vec1, <8 x float> %vec2, <8 x i32> + %res = select <8 x i1> , <8 x float> %shuf, <8 x float> %vec3 + ret <8 x float> %res +} + +define <8 x float> @test_8xfloat_zero_masked_unpack_low_mask0(<8 x float> %vec1, <8 x float> %vec2) { +; CHECK-LABEL: test_8xfloat_zero_masked_unpack_low_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: movb $122, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vunpcklps {{.*#+}} ymm0 {%k1} {z} = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[4],ymm1[4],ymm0[5],ymm1[5] +; CHECK-NEXT: retq + %shuf = shufflevector <8 x float> %vec1, <8 x float> %vec2, <8 x i32> + %res = select <8 x i1> , <8 x float> %shuf, <8 x float> zeroinitializer + ret <8 x float> %res +} +define <8 x float> @test_8xfloat_masked_unpack_low_mask1(<8 x float> %vec1, <8 x float> %vec2, <8 x float> %vec3) { +; CHECK-LABEL: test_8xfloat_masked_unpack_low_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: movb $-107, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vunpcklps {{.*#+}} ymm2 {%k1} = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[4],ymm1[4],ymm0[5],ymm1[5] +; CHECK-NEXT: vmovaps %ymm2, %ymm0 +; CHECK-NEXT: retq + %shuf = shufflevector <8 x float> %vec1, <8 x float> %vec2, <8 x i32> + %res = select <8 x i1> , <8 x float> %shuf, <8 x float> %vec3 + ret <8 x float> %res +} + +define <8 x float> @test_8xfloat_zero_masked_unpack_low_mask1(<8 x float> %vec1, <8 x float> %vec2) { +; CHECK-LABEL: test_8xfloat_zero_masked_unpack_low_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: movb $-107, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vunpcklps {{.*#+}} ymm0 {%k1} {z} = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[4],ymm1[4],ymm0[5],ymm1[5] +; CHECK-NEXT: retq + %shuf = shufflevector <8 x float> %vec1, <8 x float> %vec2, <8 x i32> + %res = select <8 x i1> , <8 x float> %shuf, <8 x float> zeroinitializer + ret <8 x float> %res +} +define <8 x float> @test_8xfloat_masked_unpack_low_mask2(<8 x float> %vec1, <8 x float> %vec2, <8 x float> %vec3) { +; CHECK-LABEL: test_8xfloat_masked_unpack_low_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: movb $-25, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vunpcklps {{.*#+}} ymm2 {%k1} = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[4],ymm1[4],ymm0[5],ymm1[5] +; CHECK-NEXT: vmovaps %ymm2, %ymm0 +; CHECK-NEXT: retq + %shuf = shufflevector <8 x float> %vec1, <8 x float> %vec2, <8 x i32> + %res = select <8 x i1> , <8 x float> %shuf, <8 x float> %vec3 + ret <8 x float> %res +} + +define <8 x float> @test_8xfloat_zero_masked_unpack_low_mask2(<8 x float> %vec1, <8 x float> %vec2) { +; CHECK-LABEL: test_8xfloat_zero_masked_unpack_low_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: movb $-25, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vunpcklps {{.*#+}} ymm0 {%k1} {z} = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[4],ymm1[4],ymm0[5],ymm1[5] +; CHECK-NEXT: retq + %shuf = shufflevector <8 x float> %vec1, <8 x float> %vec2, <8 x i32> + %res = select <8 x i1> , <8 x float> %shuf, <8 x float> zeroinitializer + ret <8 x float> %res +} +define <8 x float> @test_8xfloat_unpack_low_mask3(<8 x float> %vec1, <8 x float> %vec2) { +; CHECK-LABEL: test_8xfloat_unpack_low_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: vunpcklps {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[4],ymm1[4],ymm0[5],ymm1[5] +; CHECK-NEXT: retq + %res = shufflevector <8 x float> %vec1, <8 x float> %vec2, <8 x i32> + ret <8 x float> %res +} +define <8 x float> @test_8xfloat_masked_unpack_low_mask3(<8 x float> %vec1, <8 x float> %vec2, <8 x float> %vec3) { +; CHECK-LABEL: test_8xfloat_masked_unpack_low_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: movb $-127, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vunpcklps {{.*#+}} ymm2 {%k1} = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[4],ymm1[4],ymm0[5],ymm1[5] +; CHECK-NEXT: vmovaps %ymm2, %ymm0 +; CHECK-NEXT: retq + %shuf = shufflevector <8 x float> %vec1, <8 x float> %vec2, <8 x i32> + %res = select <8 x i1> , <8 x float> %shuf, <8 x float> %vec3 + ret <8 x float> %res +} + +define <8 x float> @test_8xfloat_zero_masked_unpack_low_mask3(<8 x float> %vec1, <8 x float> %vec2) { +; CHECK-LABEL: test_8xfloat_zero_masked_unpack_low_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: movb $-127, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vunpcklps {{.*#+}} ymm0 {%k1} {z} = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[4],ymm1[4],ymm0[5],ymm1[5] +; CHECK-NEXT: retq + %shuf = shufflevector <8 x float> %vec1, <8 x float> %vec2, <8 x i32> + %res = select <8 x i1> , <8 x float> %shuf, <8 x float> zeroinitializer + ret <8 x float> %res +} +define <8 x float> @test_8xfloat_unpack_low_mem_mask0(<8 x float> %vec1, <8 x float>* %vec2p) { +; CHECK-LABEL: test_8xfloat_unpack_low_mem_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: vunpcklps {{.*#+}} ymm0 = ymm0[0],mem[0],ymm0[1],mem[1],ymm0[4],mem[4],ymm0[5],mem[5] +; CHECK-NEXT: retq + %vec2 = load <8 x float>, <8 x float>* %vec2p + %res = shufflevector <8 x float> %vec1, <8 x float> %vec2, <8 x i32> + ret <8 x float> %res +} +define <8 x float> @test_8xfloat_masked_unpack_low_mem_mask0(<8 x float> %vec1, <8 x float>* %vec2p, <8 x float> %vec3) { +; CHECK-LABEL: test_8xfloat_masked_unpack_low_mem_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: movb $72, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vunpcklps {{.*#+}} ymm1 {%k1} = ymm0[0],mem[0],ymm0[1],mem[1],ymm0[4],mem[4],ymm0[5],mem[5] +; CHECK-NEXT: vmovaps %ymm1, %ymm0 +; CHECK-NEXT: retq + %vec2 = load <8 x float>, <8 x float>* %vec2p + %shuf = shufflevector <8 x float> %vec1, <8 x float> %vec2, <8 x i32> + %res = select <8 x i1> , <8 x float> %shuf, <8 x float> %vec3 + ret <8 x float> %res +} + +define <8 x float> @test_8xfloat_zero_masked_unpack_low_mem_mask0(<8 x float> %vec1, <8 x float>* %vec2p) { +; CHECK-LABEL: test_8xfloat_zero_masked_unpack_low_mem_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: movb $72, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vunpcklps {{.*#+}} ymm0 {%k1} {z} = ymm0[0],mem[0],ymm0[1],mem[1],ymm0[4],mem[4],ymm0[5],mem[5] +; CHECK-NEXT: retq + %vec2 = load <8 x float>, <8 x float>* %vec2p + %shuf = shufflevector <8 x float> %vec1, <8 x float> %vec2, <8 x i32> + %res = select <8 x i1> , <8 x float> %shuf, <8 x float> zeroinitializer + ret <8 x float> %res +} + +define <8 x float> @test_8xfloat_masked_unpack_low_mem_mask1(<8 x float> %vec1, <8 x float>* %vec2p, <8 x float> %vec3) { +; CHECK-LABEL: test_8xfloat_masked_unpack_low_mem_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: movb $-64, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vunpcklps {{.*#+}} ymm1 {%k1} = ymm0[0],mem[0],ymm0[1],mem[1],ymm0[4],mem[4],ymm0[5],mem[5] +; CHECK-NEXT: vmovaps %ymm1, %ymm0 +; CHECK-NEXT: retq + %vec2 = load <8 x float>, <8 x float>* %vec2p + %shuf = shufflevector <8 x float> %vec1, <8 x float> %vec2, <8 x i32> + %res = select <8 x i1> , <8 x float> %shuf, <8 x float> %vec3 + ret <8 x float> %res +} + +define <8 x float> @test_8xfloat_zero_masked_unpack_low_mem_mask1(<8 x float> %vec1, <8 x float>* %vec2p) { +; CHECK-LABEL: test_8xfloat_zero_masked_unpack_low_mem_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: movb $-64, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vunpcklps {{.*#+}} ymm0 {%k1} {z} = ymm0[0],mem[0],ymm0[1],mem[1],ymm0[4],mem[4],ymm0[5],mem[5] +; CHECK-NEXT: retq + %vec2 = load <8 x float>, <8 x float>* %vec2p + %shuf = shufflevector <8 x float> %vec1, <8 x float> %vec2, <8 x i32> + %res = select <8 x i1> , <8 x float> %shuf, <8 x float> zeroinitializer + ret <8 x float> %res +} + +define <8 x float> @test_8xfloat_masked_unpack_low_mem_mask2(<8 x float> %vec1, <8 x float>* %vec2p, <8 x float> %vec3) { +; CHECK-LABEL: test_8xfloat_masked_unpack_low_mem_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: movb $-98, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vunpcklps {{.*#+}} ymm1 {%k1} = ymm0[0],mem[0],ymm0[1],mem[1],ymm0[4],mem[4],ymm0[5],mem[5] +; CHECK-NEXT: vmovaps %ymm1, %ymm0 +; CHECK-NEXT: retq + %vec2 = load <8 x float>, <8 x float>* %vec2p + %shuf = shufflevector <8 x float> %vec1, <8 x float> %vec2, <8 x i32> + %res = select <8 x i1> , <8 x float> %shuf, <8 x float> %vec3 + ret <8 x float> %res +} + +define <8 x float> @test_8xfloat_zero_masked_unpack_low_mem_mask2(<8 x float> %vec1, <8 x float>* %vec2p) { +; CHECK-LABEL: test_8xfloat_zero_masked_unpack_low_mem_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: movb $-98, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vunpcklps {{.*#+}} ymm0 {%k1} {z} = ymm0[0],mem[0],ymm0[1],mem[1],ymm0[4],mem[4],ymm0[5],mem[5] +; CHECK-NEXT: retq + %vec2 = load <8 x float>, <8 x float>* %vec2p + %shuf = shufflevector <8 x float> %vec1, <8 x float> %vec2, <8 x i32> + %res = select <8 x i1> , <8 x float> %shuf, <8 x float> zeroinitializer + ret <8 x float> %res +} + +define <8 x float> @test_8xfloat_unpack_low_mem_mask3(<8 x float> %vec1, <8 x float>* %vec2p) { +; CHECK-LABEL: test_8xfloat_unpack_low_mem_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: vunpcklps {{.*#+}} ymm0 = ymm0[0],mem[0],ymm0[1],mem[1],ymm0[4],mem[4],ymm0[5],mem[5] +; CHECK-NEXT: retq + %vec2 = load <8 x float>, <8 x float>* %vec2p + %res = shufflevector <8 x float> %vec1, <8 x float> %vec2, <8 x i32> + ret <8 x float> %res +} +define <8 x float> @test_8xfloat_masked_unpack_low_mem_mask3(<8 x float> %vec1, <8 x float>* %vec2p, <8 x float> %vec3) { +; CHECK-LABEL: test_8xfloat_masked_unpack_low_mem_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: movb $64, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vunpcklps {{.*#+}} ymm1 {%k1} = ymm0[0],mem[0],ymm0[1],mem[1],ymm0[4],mem[4],ymm0[5],mem[5] +; CHECK-NEXT: vmovaps %ymm1, %ymm0 +; CHECK-NEXT: retq + %vec2 = load <8 x float>, <8 x float>* %vec2p + %shuf = shufflevector <8 x float> %vec1, <8 x float> %vec2, <8 x i32> + %res = select <8 x i1> , <8 x float> %shuf, <8 x float> %vec3 + ret <8 x float> %res +} + +define <8 x float> @test_8xfloat_zero_masked_unpack_low_mem_mask3(<8 x float> %vec1, <8 x float>* %vec2p) { +; CHECK-LABEL: test_8xfloat_zero_masked_unpack_low_mem_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: movb $64, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vunpcklps {{.*#+}} ymm0 {%k1} {z} = ymm0[0],mem[0],ymm0[1],mem[1],ymm0[4],mem[4],ymm0[5],mem[5] +; CHECK-NEXT: retq + %vec2 = load <8 x float>, <8 x float>* %vec2p + %shuf = shufflevector <8 x float> %vec1, <8 x float> %vec2, <8 x i32> + %res = select <8 x i1> , <8 x float> %shuf, <8 x float> zeroinitializer + ret <8 x float> %res +} + +define <16 x float> @test_16xfloat_unpack_low_mask0(<16 x float> %vec1, <16 x float> %vec2) { +; CHECK-LABEL: test_16xfloat_unpack_low_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: vunpcklps {{.*#+}} zmm0 = zmm0[0],zmm1[0],zmm0[1],zmm1[1],zmm0[4],zmm1[4],zmm0[5],zmm1[5],zmm0[8],zmm1[8],zmm0[9],zmm1[9],zmm0[12],zmm1[12],zmm0[13],zmm1[13] +; CHECK-NEXT: retq + %res = shufflevector <16 x float> %vec1, <16 x float> %vec2, <16 x i32> + ret <16 x float> %res +} +define <16 x float> @test_16xfloat_masked_unpack_low_mask0(<16 x float> %vec1, <16 x float> %vec2, <16 x float> %vec3) { +; CHECK-LABEL: test_16xfloat_masked_unpack_low_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: movw $-5916, %ax # imm = 0xE8E4 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vunpcklps {{.*#+}} zmm2 {%k1} = zmm0[0],zmm1[0],zmm0[1],zmm1[1],zmm0[4],zmm1[4],zmm0[5],zmm1[5],zmm0[8],zmm1[8],zmm0[9],zmm1[9],zmm0[12],zmm1[12],zmm0[13],zmm1[13] +; CHECK-NEXT: vmovaps %zmm2, %zmm0 +; CHECK-NEXT: retq + %shuf = shufflevector <16 x float> %vec1, <16 x float> %vec2, <16 x i32> + %res = select <16 x i1> , <16 x float> %shuf, <16 x float> %vec3 + ret <16 x float> %res +} + +define <16 x float> @test_16xfloat_zero_masked_unpack_low_mask0(<16 x float> %vec1, <16 x float> %vec2) { +; CHECK-LABEL: test_16xfloat_zero_masked_unpack_low_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: movw $-5916, %ax # imm = 0xE8E4 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vunpcklps {{.*#+}} zmm0 {%k1} {z} = zmm0[0],zmm1[0],zmm0[1],zmm1[1],zmm0[4],zmm1[4],zmm0[5],zmm1[5],zmm0[8],zmm1[8],zmm0[9],zmm1[9],zmm0[12],zmm1[12],zmm0[13],zmm1[13] +; CHECK-NEXT: retq + %shuf = shufflevector <16 x float> %vec1, <16 x float> %vec2, <16 x i32> + %res = select <16 x i1> , <16 x float> %shuf, <16 x float> zeroinitializer + ret <16 x float> %res +} +define <16 x float> @test_16xfloat_masked_unpack_low_mask1(<16 x float> %vec1, <16 x float> %vec2, <16 x float> %vec3) { +; CHECK-LABEL: test_16xfloat_masked_unpack_low_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: movw $-1130, %ax # imm = 0xFB96 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vunpcklps {{.*#+}} zmm2 {%k1} = zmm0[0],zmm1[0],zmm0[1],zmm1[1],zmm0[4],zmm1[4],zmm0[5],zmm1[5],zmm0[8],zmm1[8],zmm0[9],zmm1[9],zmm0[12],zmm1[12],zmm0[13],zmm1[13] +; CHECK-NEXT: vmovaps %zmm2, %zmm0 +; CHECK-NEXT: retq + %shuf = shufflevector <16 x float> %vec1, <16 x float> %vec2, <16 x i32> + %res = select <16 x i1> , <16 x float> %shuf, <16 x float> %vec3 + ret <16 x float> %res +} + +define <16 x float> @test_16xfloat_zero_masked_unpack_low_mask1(<16 x float> %vec1, <16 x float> %vec2) { +; CHECK-LABEL: test_16xfloat_zero_masked_unpack_low_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: movw $-1130, %ax # imm = 0xFB96 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vunpcklps {{.*#+}} zmm0 {%k1} {z} = zmm0[0],zmm1[0],zmm0[1],zmm1[1],zmm0[4],zmm1[4],zmm0[5],zmm1[5],zmm0[8],zmm1[8],zmm0[9],zmm1[9],zmm0[12],zmm1[12],zmm0[13],zmm1[13] +; CHECK-NEXT: retq + %shuf = shufflevector <16 x float> %vec1, <16 x float> %vec2, <16 x i32> + %res = select <16 x i1> , <16 x float> %shuf, <16 x float> zeroinitializer + ret <16 x float> %res +} +define <16 x float> @test_16xfloat_masked_unpack_low_mask2(<16 x float> %vec1, <16 x float> %vec2, <16 x float> %vec3) { +; CHECK-LABEL: test_16xfloat_masked_unpack_low_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: movw $-12439, %ax # imm = 0xCF69 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vunpcklps {{.*#+}} zmm2 {%k1} = zmm0[0],zmm1[0],zmm0[1],zmm1[1],zmm0[4],zmm1[4],zmm0[5],zmm1[5],zmm0[8],zmm1[8],zmm0[9],zmm1[9],zmm0[12],zmm1[12],zmm0[13],zmm1[13] +; CHECK-NEXT: vmovaps %zmm2, %zmm0 +; CHECK-NEXT: retq + %shuf = shufflevector <16 x float> %vec1, <16 x float> %vec2, <16 x i32> + %res = select <16 x i1> , <16 x float> %shuf, <16 x float> %vec3 + ret <16 x float> %res +} + +define <16 x float> @test_16xfloat_zero_masked_unpack_low_mask2(<16 x float> %vec1, <16 x float> %vec2) { +; CHECK-LABEL: test_16xfloat_zero_masked_unpack_low_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: movw $-12439, %ax # imm = 0xCF69 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vunpcklps {{.*#+}} zmm0 {%k1} {z} = zmm0[0],zmm1[0],zmm0[1],zmm1[1],zmm0[4],zmm1[4],zmm0[5],zmm1[5],zmm0[8],zmm1[8],zmm0[9],zmm1[9],zmm0[12],zmm1[12],zmm0[13],zmm1[13] +; CHECK-NEXT: retq + %shuf = shufflevector <16 x float> %vec1, <16 x float> %vec2, <16 x i32> + %res = select <16 x i1> , <16 x float> %shuf, <16 x float> zeroinitializer + ret <16 x float> %res +} +define <16 x float> @test_16xfloat_unpack_low_mask3(<16 x float> %vec1, <16 x float> %vec2) { +; CHECK-LABEL: test_16xfloat_unpack_low_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: vunpcklps {{.*#+}} zmm0 = zmm0[0],zmm1[0],zmm0[1],zmm1[1],zmm0[4],zmm1[4],zmm0[5],zmm1[5],zmm0[8],zmm1[8],zmm0[9],zmm1[9],zmm0[12],zmm1[12],zmm0[13],zmm1[13] +; CHECK-NEXT: retq + %res = shufflevector <16 x float> %vec1, <16 x float> %vec2, <16 x i32> + ret <16 x float> %res +} +define <16 x float> @test_16xfloat_masked_unpack_low_mask3(<16 x float> %vec1, <16 x float> %vec2, <16 x float> %vec3) { +; CHECK-LABEL: test_16xfloat_masked_unpack_low_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: movw $-6413, %ax # imm = 0xE6F3 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vunpcklps {{.*#+}} zmm2 {%k1} = zmm0[0],zmm1[0],zmm0[1],zmm1[1],zmm0[4],zmm1[4],zmm0[5],zmm1[5],zmm0[8],zmm1[8],zmm0[9],zmm1[9],zmm0[12],zmm1[12],zmm0[13],zmm1[13] +; CHECK-NEXT: vmovaps %zmm2, %zmm0 +; CHECK-NEXT: retq + %shuf = shufflevector <16 x float> %vec1, <16 x float> %vec2, <16 x i32> + %res = select <16 x i1> , <16 x float> %shuf, <16 x float> %vec3 + ret <16 x float> %res +} + +define <16 x float> @test_16xfloat_zero_masked_unpack_low_mask3(<16 x float> %vec1, <16 x float> %vec2) { +; CHECK-LABEL: test_16xfloat_zero_masked_unpack_low_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: movw $-6413, %ax # imm = 0xE6F3 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vunpcklps {{.*#+}} zmm0 {%k1} {z} = zmm0[0],zmm1[0],zmm0[1],zmm1[1],zmm0[4],zmm1[4],zmm0[5],zmm1[5],zmm0[8],zmm1[8],zmm0[9],zmm1[9],zmm0[12],zmm1[12],zmm0[13],zmm1[13] +; CHECK-NEXT: retq + %shuf = shufflevector <16 x float> %vec1, <16 x float> %vec2, <16 x i32> + %res = select <16 x i1> , <16 x float> %shuf, <16 x float> zeroinitializer + ret <16 x float> %res +} +define <16 x float> @test_16xfloat_unpack_low_mem_mask0(<16 x float> %vec1, <16 x float>* %vec2p) { +; CHECK-LABEL: test_16xfloat_unpack_low_mem_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: vunpcklps {{.*#+}} zmm0 = zmm0[0],mem[0],zmm0[1],mem[1],zmm0[4],mem[4],zmm0[5],mem[5],zmm0[8],mem[8],zmm0[9],mem[9],zmm0[12],mem[12],zmm0[13],mem[13] +; CHECK-NEXT: retq + %vec2 = load <16 x float>, <16 x float>* %vec2p + %res = shufflevector <16 x float> %vec1, <16 x float> %vec2, <16 x i32> + ret <16 x float> %res +} +define <16 x float> @test_16xfloat_masked_unpack_low_mem_mask0(<16 x float> %vec1, <16 x float>* %vec2p, <16 x float> %vec3) { +; CHECK-LABEL: test_16xfloat_masked_unpack_low_mem_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: movw $20326, %ax # imm = 0x4F66 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vunpcklps {{.*#+}} zmm1 {%k1} = zmm0[0],mem[0],zmm0[1],mem[1],zmm0[4],mem[4],zmm0[5],mem[5],zmm0[8],mem[8],zmm0[9],mem[9],zmm0[12],mem[12],zmm0[13],mem[13] +; CHECK-NEXT: vmovaps %zmm1, %zmm0 +; CHECK-NEXT: retq + %vec2 = load <16 x float>, <16 x float>* %vec2p + %shuf = shufflevector <16 x float> %vec1, <16 x float> %vec2, <16 x i32> + %res = select <16 x i1> , <16 x float> %shuf, <16 x float> %vec3 + ret <16 x float> %res +} + +define <16 x float> @test_16xfloat_zero_masked_unpack_low_mem_mask0(<16 x float> %vec1, <16 x float>* %vec2p) { +; CHECK-LABEL: test_16xfloat_zero_masked_unpack_low_mem_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: movw $20326, %ax # imm = 0x4F66 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vunpcklps {{.*#+}} zmm0 {%k1} {z} = zmm0[0],mem[0],zmm0[1],mem[1],zmm0[4],mem[4],zmm0[5],mem[5],zmm0[8],mem[8],zmm0[9],mem[9],zmm0[12],mem[12],zmm0[13],mem[13] +; CHECK-NEXT: retq + %vec2 = load <16 x float>, <16 x float>* %vec2p + %shuf = shufflevector <16 x float> %vec1, <16 x float> %vec2, <16 x i32> + %res = select <16 x i1> , <16 x float> %shuf, <16 x float> zeroinitializer + ret <16 x float> %res +} + +define <16 x float> @test_16xfloat_masked_unpack_low_mem_mask1(<16 x float> %vec1, <16 x float>* %vec2p, <16 x float> %vec3) { +; CHECK-LABEL: test_16xfloat_masked_unpack_low_mem_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: movw $-17707, %ax # imm = 0xBAD5 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vunpcklps {{.*#+}} zmm1 {%k1} = zmm0[0],mem[0],zmm0[1],mem[1],zmm0[4],mem[4],zmm0[5],mem[5],zmm0[8],mem[8],zmm0[9],mem[9],zmm0[12],mem[12],zmm0[13],mem[13] +; CHECK-NEXT: vmovaps %zmm1, %zmm0 +; CHECK-NEXT: retq + %vec2 = load <16 x float>, <16 x float>* %vec2p + %shuf = shufflevector <16 x float> %vec1, <16 x float> %vec2, <16 x i32> + %res = select <16 x i1> , <16 x float> %shuf, <16 x float> %vec3 + ret <16 x float> %res +} + +define <16 x float> @test_16xfloat_zero_masked_unpack_low_mem_mask1(<16 x float> %vec1, <16 x float>* %vec2p) { +; CHECK-LABEL: test_16xfloat_zero_masked_unpack_low_mem_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: movw $-17707, %ax # imm = 0xBAD5 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vunpcklps {{.*#+}} zmm0 {%k1} {z} = zmm0[0],mem[0],zmm0[1],mem[1],zmm0[4],mem[4],zmm0[5],mem[5],zmm0[8],mem[8],zmm0[9],mem[9],zmm0[12],mem[12],zmm0[13],mem[13] +; CHECK-NEXT: retq + %vec2 = load <16 x float>, <16 x float>* %vec2p + %shuf = shufflevector <16 x float> %vec1, <16 x float> %vec2, <16 x i32> + %res = select <16 x i1> , <16 x float> %shuf, <16 x float> zeroinitializer + ret <16 x float> %res +} + +define <16 x float> @test_16xfloat_masked_unpack_low_mem_mask2(<16 x float> %vec1, <16 x float>* %vec2p, <16 x float> %vec3) { +; CHECK-LABEL: test_16xfloat_masked_unpack_low_mem_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: movw $-6631, %ax # imm = 0xE619 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vunpcklps {{.*#+}} zmm1 {%k1} = zmm0[0],mem[0],zmm0[1],mem[1],zmm0[4],mem[4],zmm0[5],mem[5],zmm0[8],mem[8],zmm0[9],mem[9],zmm0[12],mem[12],zmm0[13],mem[13] +; CHECK-NEXT: vmovaps %zmm1, %zmm0 +; CHECK-NEXT: retq + %vec2 = load <16 x float>, <16 x float>* %vec2p + %shuf = shufflevector <16 x float> %vec1, <16 x float> %vec2, <16 x i32> + %res = select <16 x i1> , <16 x float> %shuf, <16 x float> %vec3 + ret <16 x float> %res +} + +define <16 x float> @test_16xfloat_zero_masked_unpack_low_mem_mask2(<16 x float> %vec1, <16 x float>* %vec2p) { +; CHECK-LABEL: test_16xfloat_zero_masked_unpack_low_mem_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: movw $-6631, %ax # imm = 0xE619 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vunpcklps {{.*#+}} zmm0 {%k1} {z} = zmm0[0],mem[0],zmm0[1],mem[1],zmm0[4],mem[4],zmm0[5],mem[5],zmm0[8],mem[8],zmm0[9],mem[9],zmm0[12],mem[12],zmm0[13],mem[13] +; CHECK-NEXT: retq + %vec2 = load <16 x float>, <16 x float>* %vec2p + %shuf = shufflevector <16 x float> %vec1, <16 x float> %vec2, <16 x i32> + %res = select <16 x i1> , <16 x float> %shuf, <16 x float> zeroinitializer + ret <16 x float> %res +} + +define <16 x float> @test_16xfloat_unpack_low_mem_mask3(<16 x float> %vec1, <16 x float>* %vec2p) { +; CHECK-LABEL: test_16xfloat_unpack_low_mem_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: vunpcklps {{.*#+}} zmm0 = zmm0[0],mem[0],zmm0[1],mem[1],zmm0[4],mem[4],zmm0[5],mem[5],zmm0[8],mem[8],zmm0[9],mem[9],zmm0[12],mem[12],zmm0[13],mem[13] +; CHECK-NEXT: retq + %vec2 = load <16 x float>, <16 x float>* %vec2p + %res = shufflevector <16 x float> %vec1, <16 x float> %vec2, <16 x i32> + ret <16 x float> %res +} +define <16 x float> @test_16xfloat_masked_unpack_low_mem_mask3(<16 x float> %vec1, <16 x float>* %vec2p, <16 x float> %vec3) { +; CHECK-LABEL: test_16xfloat_masked_unpack_low_mem_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: movw $-20711, %ax # imm = 0xAF19 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vunpcklps {{.*#+}} zmm1 {%k1} = zmm0[0],mem[0],zmm0[1],mem[1],zmm0[4],mem[4],zmm0[5],mem[5],zmm0[8],mem[8],zmm0[9],mem[9],zmm0[12],mem[12],zmm0[13],mem[13] +; CHECK-NEXT: vmovaps %zmm1, %zmm0 +; CHECK-NEXT: retq + %vec2 = load <16 x float>, <16 x float>* %vec2p + %shuf = shufflevector <16 x float> %vec1, <16 x float> %vec2, <16 x i32> + %res = select <16 x i1> , <16 x float> %shuf, <16 x float> %vec3 + ret <16 x float> %res +} + +define <16 x float> @test_16xfloat_zero_masked_unpack_low_mem_mask3(<16 x float> %vec1, <16 x float>* %vec2p) { +; CHECK-LABEL: test_16xfloat_zero_masked_unpack_low_mem_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: movw $-20711, %ax # imm = 0xAF19 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vunpcklps {{.*#+}} zmm0 {%k1} {z} = zmm0[0],mem[0],zmm0[1],mem[1],zmm0[4],mem[4],zmm0[5],mem[5],zmm0[8],mem[8],zmm0[9],mem[9],zmm0[12],mem[12],zmm0[13],mem[13] +; CHECK-NEXT: retq + %vec2 = load <16 x float>, <16 x float>* %vec2p + %shuf = shufflevector <16 x float> %vec1, <16 x float> %vec2, <16 x i32> + %res = select <16 x i1> , <16 x float> %shuf, <16 x float> zeroinitializer + ret <16 x float> %res +} + +define <2 x double> @test_2xdouble_unpack_low_mask0(<2 x double> %vec1, <2 x double> %vec2) { +; CHECK-LABEL: test_2xdouble_unpack_low_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: vunpcklpd {{.*#+}} xmm0 = xmm0[0],xmm1[0] +; CHECK-NEXT: retq + %res = shufflevector <2 x double> %vec1, <2 x double> %vec2, <2 x i32> + ret <2 x double> %res +} +define <2 x double> @test_2xdouble_masked_unpack_low_mask0(<2 x double> %vec1, <2 x double> %vec2, <2 x double> %vec3) { +; CHECK-LABEL: test_2xdouble_masked_unpack_low_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: movb $1, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vunpcklpd {{.*#+}} xmm2 {%k1} = xmm0[0],xmm1[0] +; CHECK-NEXT: vmovapd %xmm2, %xmm0 +; CHECK-NEXT: retq + %shuf = shufflevector <2 x double> %vec1, <2 x double> %vec2, <2 x i32> + %res = select <2 x i1> , <2 x double> %shuf, <2 x double> %vec3 + ret <2 x double> %res +} + +define <2 x double> @test_2xdouble_zero_masked_unpack_low_mask0(<2 x double> %vec1, <2 x double> %vec2) { +; CHECK-LABEL: test_2xdouble_zero_masked_unpack_low_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: movb $1, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vunpcklpd {{.*#+}} xmm0 {%k1} {z} = xmm0[0],xmm1[0] +; CHECK-NEXT: retq + %shuf = shufflevector <2 x double> %vec1, <2 x double> %vec2, <2 x i32> + %res = select <2 x i1> , <2 x double> %shuf, <2 x double> zeroinitializer + ret <2 x double> %res +} +define <2 x double> @test_2xdouble_masked_unpack_low_mask1(<2 x double> %vec1, <2 x double> %vec2, <2 x double> %vec3) { +; CHECK-LABEL: test_2xdouble_masked_unpack_low_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: movb $2, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vunpcklpd {{.*#+}} xmm2 {%k1} = xmm0[0],xmm1[0] +; CHECK-NEXT: vmovapd %xmm2, %xmm0 +; CHECK-NEXT: retq + %shuf = shufflevector <2 x double> %vec1, <2 x double> %vec2, <2 x i32> + %res = select <2 x i1> , <2 x double> %shuf, <2 x double> %vec3 + ret <2 x double> %res +} + +define <2 x double> @test_2xdouble_zero_masked_unpack_low_mask1(<2 x double> %vec1, <2 x double> %vec2) { +; CHECK-LABEL: test_2xdouble_zero_masked_unpack_low_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: movb $2, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vunpcklpd {{.*#+}} xmm0 {%k1} {z} = xmm0[0],xmm1[0] +; CHECK-NEXT: retq + %shuf = shufflevector <2 x double> %vec1, <2 x double> %vec2, <2 x i32> + %res = select <2 x i1> , <2 x double> %shuf, <2 x double> zeroinitializer + ret <2 x double> %res +} +define <2 x double> @test_2xdouble_unpack_low_mem_mask0(<2 x double> %vec1, <2 x double>* %vec2p) { +; CHECK-LABEL: test_2xdouble_unpack_low_mem_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: vunpcklpd {{.*#+}} xmm0 = xmm0[0],mem[0] +; CHECK-NEXT: retq + %vec2 = load <2 x double>, <2 x double>* %vec2p + %res = shufflevector <2 x double> %vec1, <2 x double> %vec2, <2 x i32> + ret <2 x double> %res +} +define <2 x double> @test_2xdouble_masked_unpack_low_mem_mask0(<2 x double> %vec1, <2 x double>* %vec2p, <2 x double> %vec3) { +; CHECK-LABEL: test_2xdouble_masked_unpack_low_mem_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: movb $1, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vunpcklpd {{.*#+}} xmm1 {%k1} = xmm0[0],mem[0] +; CHECK-NEXT: vmovapd %xmm1, %xmm0 +; CHECK-NEXT: retq + %vec2 = load <2 x double>, <2 x double>* %vec2p + %shuf = shufflevector <2 x double> %vec1, <2 x double> %vec2, <2 x i32> + %res = select <2 x i1> , <2 x double> %shuf, <2 x double> %vec3 + ret <2 x double> %res +} + +define <2 x double> @test_2xdouble_zero_masked_unpack_low_mem_mask0(<2 x double> %vec1, <2 x double>* %vec2p) { +; CHECK-LABEL: test_2xdouble_zero_masked_unpack_low_mem_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: movb $1, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vunpcklpd {{.*#+}} xmm0 {%k1} {z} = xmm0[0],mem[0] +; CHECK-NEXT: retq + %vec2 = load <2 x double>, <2 x double>* %vec2p + %shuf = shufflevector <2 x double> %vec1, <2 x double> %vec2, <2 x i32> + %res = select <2 x i1> , <2 x double> %shuf, <2 x double> zeroinitializer + ret <2 x double> %res +} + +define <2 x double> @test_2xdouble_masked_unpack_low_mem_mask1(<2 x double> %vec1, <2 x double>* %vec2p, <2 x double> %vec3) { +; CHECK-LABEL: test_2xdouble_masked_unpack_low_mem_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: movb $2, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vunpcklpd {{.*#+}} xmm1 {%k1} = xmm0[0],mem[0] +; CHECK-NEXT: vmovapd %xmm1, %xmm0 +; CHECK-NEXT: retq + %vec2 = load <2 x double>, <2 x double>* %vec2p + %shuf = shufflevector <2 x double> %vec1, <2 x double> %vec2, <2 x i32> + %res = select <2 x i1> , <2 x double> %shuf, <2 x double> %vec3 + ret <2 x double> %res +} + +define <2 x double> @test_2xdouble_zero_masked_unpack_low_mem_mask1(<2 x double> %vec1, <2 x double>* %vec2p) { +; CHECK-LABEL: test_2xdouble_zero_masked_unpack_low_mem_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: movb $2, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vunpcklpd {{.*#+}} xmm0 {%k1} {z} = xmm0[0],mem[0] +; CHECK-NEXT: retq + %vec2 = load <2 x double>, <2 x double>* %vec2p + %shuf = shufflevector <2 x double> %vec1, <2 x double> %vec2, <2 x i32> + %res = select <2 x i1> , <2 x double> %shuf, <2 x double> zeroinitializer + ret <2 x double> %res +} + +define <4 x double> @test_4xdouble_unpack_low_mask0(<4 x double> %vec1, <4 x double> %vec2) { +; CHECK-LABEL: test_4xdouble_unpack_low_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[2],ymm1[2] +; CHECK-NEXT: retq + %res = shufflevector <4 x double> %vec1, <4 x double> %vec2, <4 x i32> + ret <4 x double> %res +} +define <4 x double> @test_4xdouble_masked_unpack_low_mask0(<4 x double> %vec1, <4 x double> %vec2, <4 x double> %vec3) { +; CHECK-LABEL: test_4xdouble_masked_unpack_low_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: movb $13, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vunpcklpd {{.*#+}} ymm2 {%k1} = ymm0[0],ymm1[0],ymm0[2],ymm1[2] +; CHECK-NEXT: vmovapd %ymm2, %ymm0 +; CHECK-NEXT: retq + %shuf = shufflevector <4 x double> %vec1, <4 x double> %vec2, <4 x i32> + %res = select <4 x i1> , <4 x double> %shuf, <4 x double> %vec3 + ret <4 x double> %res +} + +define <4 x double> @test_4xdouble_zero_masked_unpack_low_mask0(<4 x double> %vec1, <4 x double> %vec2) { +; CHECK-LABEL: test_4xdouble_zero_masked_unpack_low_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: movb $13, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vunpcklpd {{.*#+}} ymm0 {%k1} {z} = ymm0[0],ymm1[0],ymm0[2],ymm1[2] +; CHECK-NEXT: retq + %shuf = shufflevector <4 x double> %vec1, <4 x double> %vec2, <4 x i32> + %res = select <4 x i1> , <4 x double> %shuf, <4 x double> zeroinitializer + ret <4 x double> %res +} +define <4 x double> @test_4xdouble_masked_unpack_low_mask1(<4 x double> %vec1, <4 x double> %vec2, <4 x double> %vec3) { +; CHECK-LABEL: test_4xdouble_masked_unpack_low_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: movb $14, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vunpcklpd {{.*#+}} ymm2 {%k1} = ymm0[0],ymm1[0],ymm0[2],ymm1[2] +; CHECK-NEXT: vmovapd %ymm2, %ymm0 +; CHECK-NEXT: retq + %shuf = shufflevector <4 x double> %vec1, <4 x double> %vec2, <4 x i32> + %res = select <4 x i1> , <4 x double> %shuf, <4 x double> %vec3 + ret <4 x double> %res +} + +define <4 x double> @test_4xdouble_zero_masked_unpack_low_mask1(<4 x double> %vec1, <4 x double> %vec2) { +; CHECK-LABEL: test_4xdouble_zero_masked_unpack_low_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: movb $14, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vunpcklpd {{.*#+}} ymm0 {%k1} {z} = ymm0[0],ymm1[0],ymm0[2],ymm1[2] +; CHECK-NEXT: retq + %shuf = shufflevector <4 x double> %vec1, <4 x double> %vec2, <4 x i32> + %res = select <4 x i1> , <4 x double> %shuf, <4 x double> zeroinitializer + ret <4 x double> %res +} +define <4 x double> @test_4xdouble_masked_unpack_low_mask2(<4 x double> %vec1, <4 x double> %vec2, <4 x double> %vec3) { +; CHECK-LABEL: test_4xdouble_masked_unpack_low_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: movb $6, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vunpcklpd {{.*#+}} ymm2 {%k1} = ymm0[0],ymm1[0],ymm0[2],ymm1[2] +; CHECK-NEXT: vmovapd %ymm2, %ymm0 +; CHECK-NEXT: retq + %shuf = shufflevector <4 x double> %vec1, <4 x double> %vec2, <4 x i32> + %res = select <4 x i1> , <4 x double> %shuf, <4 x double> %vec3 + ret <4 x double> %res +} + +define <4 x double> @test_4xdouble_zero_masked_unpack_low_mask2(<4 x double> %vec1, <4 x double> %vec2) { +; CHECK-LABEL: test_4xdouble_zero_masked_unpack_low_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: movb $6, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vunpcklpd {{.*#+}} ymm0 {%k1} {z} = ymm0[0],ymm1[0],ymm0[2],ymm1[2] +; CHECK-NEXT: retq + %shuf = shufflevector <4 x double> %vec1, <4 x double> %vec2, <4 x i32> + %res = select <4 x i1> , <4 x double> %shuf, <4 x double> zeroinitializer + ret <4 x double> %res +} +define <4 x double> @test_4xdouble_unpack_low_mask3(<4 x double> %vec1, <4 x double> %vec2) { +; CHECK-LABEL: test_4xdouble_unpack_low_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[2],ymm1[2] +; CHECK-NEXT: retq + %res = shufflevector <4 x double> %vec1, <4 x double> %vec2, <4 x i32> + ret <4 x double> %res +} +define <4 x double> @test_4xdouble_masked_unpack_low_mask3(<4 x double> %vec1, <4 x double> %vec2, <4 x double> %vec3) { +; CHECK-LABEL: test_4xdouble_masked_unpack_low_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: movb $10, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vunpcklpd {{.*#+}} ymm2 {%k1} = ymm0[0],ymm1[0],ymm0[2],ymm1[2] +; CHECK-NEXT: vmovapd %ymm2, %ymm0 +; CHECK-NEXT: retq + %shuf = shufflevector <4 x double> %vec1, <4 x double> %vec2, <4 x i32> + %res = select <4 x i1> , <4 x double> %shuf, <4 x double> %vec3 + ret <4 x double> %res +} + +define <4 x double> @test_4xdouble_zero_masked_unpack_low_mask3(<4 x double> %vec1, <4 x double> %vec2) { +; CHECK-LABEL: test_4xdouble_zero_masked_unpack_low_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: movb $10, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vunpcklpd {{.*#+}} ymm0 {%k1} {z} = ymm0[0],ymm1[0],ymm0[2],ymm1[2] +; CHECK-NEXT: retq + %shuf = shufflevector <4 x double> %vec1, <4 x double> %vec2, <4 x i32> + %res = select <4 x i1> , <4 x double> %shuf, <4 x double> zeroinitializer + ret <4 x double> %res +} +define <4 x double> @test_4xdouble_unpack_low_mem_mask0(<4 x double> %vec1, <4 x double>* %vec2p) { +; CHECK-LABEL: test_4xdouble_unpack_low_mem_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm0[0],mem[0],ymm0[2],mem[2] +; CHECK-NEXT: retq + %vec2 = load <4 x double>, <4 x double>* %vec2p + %res = shufflevector <4 x double> %vec1, <4 x double> %vec2, <4 x i32> + ret <4 x double> %res +} +define <4 x double> @test_4xdouble_masked_unpack_low_mem_mask0(<4 x double> %vec1, <4 x double>* %vec2p, <4 x double> %vec3) { +; CHECK-LABEL: test_4xdouble_masked_unpack_low_mem_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: movb $4, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vunpcklpd {{.*#+}} ymm1 {%k1} = ymm0[0],mem[0],ymm0[2],mem[2] +; CHECK-NEXT: vmovapd %ymm1, %ymm0 +; CHECK-NEXT: retq + %vec2 = load <4 x double>, <4 x double>* %vec2p + %shuf = shufflevector <4 x double> %vec1, <4 x double> %vec2, <4 x i32> + %res = select <4 x i1> , <4 x double> %shuf, <4 x double> %vec3 + ret <4 x double> %res +} + +define <4 x double> @test_4xdouble_zero_masked_unpack_low_mem_mask0(<4 x double> %vec1, <4 x double>* %vec2p) { +; CHECK-LABEL: test_4xdouble_zero_masked_unpack_low_mem_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: movb $4, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vunpcklpd {{.*#+}} ymm0 {%k1} {z} = ymm0[0],mem[0],ymm0[2],mem[2] +; CHECK-NEXT: retq + %vec2 = load <4 x double>, <4 x double>* %vec2p + %shuf = shufflevector <4 x double> %vec1, <4 x double> %vec2, <4 x i32> + %res = select <4 x i1> , <4 x double> %shuf, <4 x double> zeroinitializer + ret <4 x double> %res +} + +define <4 x double> @test_4xdouble_masked_unpack_low_mem_mask1(<4 x double> %vec1, <4 x double>* %vec2p, <4 x double> %vec3) { +; CHECK-LABEL: test_4xdouble_masked_unpack_low_mem_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: movb $11, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vunpcklpd {{.*#+}} ymm1 {%k1} = ymm0[0],mem[0],ymm0[2],mem[2] +; CHECK-NEXT: vmovapd %ymm1, %ymm0 +; CHECK-NEXT: retq + %vec2 = load <4 x double>, <4 x double>* %vec2p + %shuf = shufflevector <4 x double> %vec1, <4 x double> %vec2, <4 x i32> + %res = select <4 x i1> , <4 x double> %shuf, <4 x double> %vec3 + ret <4 x double> %res +} + +define <4 x double> @test_4xdouble_zero_masked_unpack_low_mem_mask1(<4 x double> %vec1, <4 x double>* %vec2p) { +; CHECK-LABEL: test_4xdouble_zero_masked_unpack_low_mem_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: movb $11, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vunpcklpd {{.*#+}} ymm0 {%k1} {z} = ymm0[0],mem[0],ymm0[2],mem[2] +; CHECK-NEXT: retq + %vec2 = load <4 x double>, <4 x double>* %vec2p + %shuf = shufflevector <4 x double> %vec1, <4 x double> %vec2, <4 x i32> + %res = select <4 x i1> , <4 x double> %shuf, <4 x double> zeroinitializer + ret <4 x double> %res +} + +define <4 x double> @test_4xdouble_masked_unpack_low_mem_mask2(<4 x double> %vec1, <4 x double>* %vec2p, <4 x double> %vec3) { +; CHECK-LABEL: test_4xdouble_masked_unpack_low_mem_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: movb $7, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vunpcklpd {{.*#+}} ymm1 {%k1} = ymm0[0],mem[0],ymm0[2],mem[2] +; CHECK-NEXT: vmovapd %ymm1, %ymm0 +; CHECK-NEXT: retq + %vec2 = load <4 x double>, <4 x double>* %vec2p + %shuf = shufflevector <4 x double> %vec1, <4 x double> %vec2, <4 x i32> + %res = select <4 x i1> , <4 x double> %shuf, <4 x double> %vec3 + ret <4 x double> %res +} + +define <4 x double> @test_4xdouble_zero_masked_unpack_low_mem_mask2(<4 x double> %vec1, <4 x double>* %vec2p) { +; CHECK-LABEL: test_4xdouble_zero_masked_unpack_low_mem_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: movb $7, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vunpcklpd {{.*#+}} ymm0 {%k1} {z} = ymm0[0],mem[0],ymm0[2],mem[2] +; CHECK-NEXT: retq + %vec2 = load <4 x double>, <4 x double>* %vec2p + %shuf = shufflevector <4 x double> %vec1, <4 x double> %vec2, <4 x i32> + %res = select <4 x i1> , <4 x double> %shuf, <4 x double> zeroinitializer + ret <4 x double> %res +} + +define <4 x double> @test_4xdouble_unpack_low_mem_mask3(<4 x double> %vec1, <4 x double>* %vec2p) { +; CHECK-LABEL: test_4xdouble_unpack_low_mem_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm0[0],mem[0],ymm0[2],mem[2] +; CHECK-NEXT: retq + %vec2 = load <4 x double>, <4 x double>* %vec2p + %res = shufflevector <4 x double> %vec1, <4 x double> %vec2, <4 x i32> + ret <4 x double> %res +} +define <4 x double> @test_4xdouble_masked_unpack_low_mem_mask3(<4 x double> %vec1, <4 x double>* %vec2p, <4 x double> %vec3) { +; CHECK-LABEL: test_4xdouble_masked_unpack_low_mem_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: movb $1, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vunpcklpd {{.*#+}} ymm1 {%k1} = ymm0[0],mem[0],ymm0[2],mem[2] +; CHECK-NEXT: vmovapd %ymm1, %ymm0 +; CHECK-NEXT: retq + %vec2 = load <4 x double>, <4 x double>* %vec2p + %shuf = shufflevector <4 x double> %vec1, <4 x double> %vec2, <4 x i32> + %res = select <4 x i1> , <4 x double> %shuf, <4 x double> %vec3 + ret <4 x double> %res +} + +define <4 x double> @test_4xdouble_zero_masked_unpack_low_mem_mask3(<4 x double> %vec1, <4 x double>* %vec2p) { +; CHECK-LABEL: test_4xdouble_zero_masked_unpack_low_mem_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: movb $1, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vunpcklpd {{.*#+}} ymm0 {%k1} {z} = ymm0[0],mem[0],ymm0[2],mem[2] +; CHECK-NEXT: retq + %vec2 = load <4 x double>, <4 x double>* %vec2p + %shuf = shufflevector <4 x double> %vec1, <4 x double> %vec2, <4 x i32> + %res = select <4 x i1> , <4 x double> %shuf, <4 x double> zeroinitializer + ret <4 x double> %res +} + +define <8 x double> @test_8xdouble_unpack_low_mask0(<8 x double> %vec1, <8 x double> %vec2) { +; CHECK-LABEL: test_8xdouble_unpack_low_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: vunpcklpd {{.*#+}} zmm0 = zmm0[0],zmm1[0],zmm0[2],zmm1[2],zmm0[4],zmm1[4],zmm0[6],zmm1[6] +; CHECK-NEXT: retq + %res = shufflevector <8 x double> %vec1, <8 x double> %vec2, <8 x i32> + ret <8 x double> %res +} +define <8 x double> @test_8xdouble_masked_unpack_low_mask0(<8 x double> %vec1, <8 x double> %vec2, <8 x double> %vec3) { +; CHECK-LABEL: test_8xdouble_masked_unpack_low_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: movb $-73, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vunpcklpd {{.*#+}} zmm2 {%k1} = zmm0[0],zmm1[0],zmm0[2],zmm1[2],zmm0[4],zmm1[4],zmm0[6],zmm1[6] +; CHECK-NEXT: vmovapd %zmm2, %zmm0 +; CHECK-NEXT: retq + %shuf = shufflevector <8 x double> %vec1, <8 x double> %vec2, <8 x i32> + %res = select <8 x i1> , <8 x double> %shuf, <8 x double> %vec3 + ret <8 x double> %res +} + +define <8 x double> @test_8xdouble_zero_masked_unpack_low_mask0(<8 x double> %vec1, <8 x double> %vec2) { +; CHECK-LABEL: test_8xdouble_zero_masked_unpack_low_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: movb $-73, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vunpcklpd {{.*#+}} zmm0 {%k1} {z} = zmm0[0],zmm1[0],zmm0[2],zmm1[2],zmm0[4],zmm1[4],zmm0[6],zmm1[6] +; CHECK-NEXT: retq + %shuf = shufflevector <8 x double> %vec1, <8 x double> %vec2, <8 x i32> + %res = select <8 x i1> , <8 x double> %shuf, <8 x double> zeroinitializer + ret <8 x double> %res +} +define <8 x double> @test_8xdouble_masked_unpack_low_mask1(<8 x double> %vec1, <8 x double> %vec2, <8 x double> %vec3) { +; CHECK-LABEL: test_8xdouble_masked_unpack_low_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: movb $102, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vunpcklpd {{.*#+}} zmm2 {%k1} = zmm0[0],zmm1[0],zmm0[2],zmm1[2],zmm0[4],zmm1[4],zmm0[6],zmm1[6] +; CHECK-NEXT: vmovapd %zmm2, %zmm0 +; CHECK-NEXT: retq + %shuf = shufflevector <8 x double> %vec1, <8 x double> %vec2, <8 x i32> + %res = select <8 x i1> , <8 x double> %shuf, <8 x double> %vec3 + ret <8 x double> %res +} + +define <8 x double> @test_8xdouble_zero_masked_unpack_low_mask1(<8 x double> %vec1, <8 x double> %vec2) { +; CHECK-LABEL: test_8xdouble_zero_masked_unpack_low_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: movb $102, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vunpcklpd {{.*#+}} zmm0 {%k1} {z} = zmm0[0],zmm1[0],zmm0[2],zmm1[2],zmm0[4],zmm1[4],zmm0[6],zmm1[6] +; CHECK-NEXT: retq + %shuf = shufflevector <8 x double> %vec1, <8 x double> %vec2, <8 x i32> + %res = select <8 x i1> , <8 x double> %shuf, <8 x double> zeroinitializer + ret <8 x double> %res +} +define <8 x double> @test_8xdouble_masked_unpack_low_mask2(<8 x double> %vec1, <8 x double> %vec2, <8 x double> %vec3) { +; CHECK-LABEL: test_8xdouble_masked_unpack_low_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: movb $-46, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vunpcklpd {{.*#+}} zmm2 {%k1} = zmm0[0],zmm1[0],zmm0[2],zmm1[2],zmm0[4],zmm1[4],zmm0[6],zmm1[6] +; CHECK-NEXT: vmovapd %zmm2, %zmm0 +; CHECK-NEXT: retq + %shuf = shufflevector <8 x double> %vec1, <8 x double> %vec2, <8 x i32> + %res = select <8 x i1> , <8 x double> %shuf, <8 x double> %vec3 + ret <8 x double> %res +} + +define <8 x double> @test_8xdouble_zero_masked_unpack_low_mask2(<8 x double> %vec1, <8 x double> %vec2) { +; CHECK-LABEL: test_8xdouble_zero_masked_unpack_low_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: movb $-46, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vunpcklpd {{.*#+}} zmm0 {%k1} {z} = zmm0[0],zmm1[0],zmm0[2],zmm1[2],zmm0[4],zmm1[4],zmm0[6],zmm1[6] +; CHECK-NEXT: retq + %shuf = shufflevector <8 x double> %vec1, <8 x double> %vec2, <8 x i32> + %res = select <8 x i1> , <8 x double> %shuf, <8 x double> zeroinitializer + ret <8 x double> %res +} +define <8 x double> @test_8xdouble_unpack_low_mask3(<8 x double> %vec1, <8 x double> %vec2) { +; CHECK-LABEL: test_8xdouble_unpack_low_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: vunpcklpd {{.*#+}} zmm0 = zmm0[0],zmm1[0],zmm0[2],zmm1[2],zmm0[4],zmm1[4],zmm0[6],zmm1[6] +; CHECK-NEXT: retq + %res = shufflevector <8 x double> %vec1, <8 x double> %vec2, <8 x i32> + ret <8 x double> %res +} +define <8 x double> @test_8xdouble_masked_unpack_low_mask3(<8 x double> %vec1, <8 x double> %vec2, <8 x double> %vec3) { +; CHECK-LABEL: test_8xdouble_masked_unpack_low_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: movb $-86, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vunpcklpd {{.*#+}} zmm2 {%k1} = zmm0[0],zmm1[0],zmm0[2],zmm1[2],zmm0[4],zmm1[4],zmm0[6],zmm1[6] +; CHECK-NEXT: vmovapd %zmm2, %zmm0 +; CHECK-NEXT: retq + %shuf = shufflevector <8 x double> %vec1, <8 x double> %vec2, <8 x i32> + %res = select <8 x i1> , <8 x double> %shuf, <8 x double> %vec3 + ret <8 x double> %res +} + +define <8 x double> @test_8xdouble_zero_masked_unpack_low_mask3(<8 x double> %vec1, <8 x double> %vec2) { +; CHECK-LABEL: test_8xdouble_zero_masked_unpack_low_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: movb $-86, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vunpcklpd {{.*#+}} zmm0 {%k1} {z} = zmm0[0],zmm1[0],zmm0[2],zmm1[2],zmm0[4],zmm1[4],zmm0[6],zmm1[6] +; CHECK-NEXT: retq + %shuf = shufflevector <8 x double> %vec1, <8 x double> %vec2, <8 x i32> + %res = select <8 x i1> , <8 x double> %shuf, <8 x double> zeroinitializer + ret <8 x double> %res +} +define <8 x double> @test_8xdouble_unpack_low_mem_mask0(<8 x double> %vec1, <8 x double>* %vec2p) { +; CHECK-LABEL: test_8xdouble_unpack_low_mem_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: vunpcklpd {{.*#+}} zmm0 = zmm0[0],mem[0],zmm0[2],mem[2],zmm0[4],mem[4],zmm0[6],mem[6] +; CHECK-NEXT: retq + %vec2 = load <8 x double>, <8 x double>* %vec2p + %res = shufflevector <8 x double> %vec1, <8 x double> %vec2, <8 x i32> + ret <8 x double> %res +} +define <8 x double> @test_8xdouble_masked_unpack_low_mem_mask0(<8 x double> %vec1, <8 x double>* %vec2p, <8 x double> %vec3) { +; CHECK-LABEL: test_8xdouble_masked_unpack_low_mem_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: movb $1, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vunpcklpd {{.*#+}} zmm1 {%k1} = zmm0[0],mem[0],zmm0[2],mem[2],zmm0[4],mem[4],zmm0[6],mem[6] +; CHECK-NEXT: vmovapd %zmm1, %zmm0 +; CHECK-NEXT: retq + %vec2 = load <8 x double>, <8 x double>* %vec2p + %shuf = shufflevector <8 x double> %vec1, <8 x double> %vec2, <8 x i32> + %res = select <8 x i1> , <8 x double> %shuf, <8 x double> %vec3 + ret <8 x double> %res +} + +define <8 x double> @test_8xdouble_zero_masked_unpack_low_mem_mask0(<8 x double> %vec1, <8 x double>* %vec2p) { +; CHECK-LABEL: test_8xdouble_zero_masked_unpack_low_mem_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: movb $1, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vunpcklpd {{.*#+}} zmm0 {%k1} {z} = zmm0[0],mem[0],zmm0[2],mem[2],zmm0[4],mem[4],zmm0[6],mem[6] +; CHECK-NEXT: retq + %vec2 = load <8 x double>, <8 x double>* %vec2p + %shuf = shufflevector <8 x double> %vec1, <8 x double> %vec2, <8 x i32> + %res = select <8 x i1> , <8 x double> %shuf, <8 x double> zeroinitializer + ret <8 x double> %res +} + +define <8 x double> @test_8xdouble_masked_unpack_low_mem_mask1(<8 x double> %vec1, <8 x double>* %vec2p, <8 x double> %vec3) { +; CHECK-LABEL: test_8xdouble_masked_unpack_low_mem_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: movb $126, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vunpcklpd {{.*#+}} zmm1 {%k1} = zmm0[0],mem[0],zmm0[2],mem[2],zmm0[4],mem[4],zmm0[6],mem[6] +; CHECK-NEXT: vmovapd %zmm1, %zmm0 +; CHECK-NEXT: retq + %vec2 = load <8 x double>, <8 x double>* %vec2p + %shuf = shufflevector <8 x double> %vec1, <8 x double> %vec2, <8 x i32> + %res = select <8 x i1> , <8 x double> %shuf, <8 x double> %vec3 + ret <8 x double> %res +} + +define <8 x double> @test_8xdouble_zero_masked_unpack_low_mem_mask1(<8 x double> %vec1, <8 x double>* %vec2p) { +; CHECK-LABEL: test_8xdouble_zero_masked_unpack_low_mem_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: movb $126, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vunpcklpd {{.*#+}} zmm0 {%k1} {z} = zmm0[0],mem[0],zmm0[2],mem[2],zmm0[4],mem[4],zmm0[6],mem[6] +; CHECK-NEXT: retq + %vec2 = load <8 x double>, <8 x double>* %vec2p + %shuf = shufflevector <8 x double> %vec1, <8 x double> %vec2, <8 x i32> + %res = select <8 x i1> , <8 x double> %shuf, <8 x double> zeroinitializer + ret <8 x double> %res +} + +define <8 x double> @test_8xdouble_masked_unpack_low_mem_mask2(<8 x double> %vec1, <8 x double>* %vec2p, <8 x double> %vec3) { +; CHECK-LABEL: test_8xdouble_masked_unpack_low_mem_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: movb $-35, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vunpcklpd {{.*#+}} zmm1 {%k1} = zmm0[0],mem[0],zmm0[2],mem[2],zmm0[4],mem[4],zmm0[6],mem[6] +; CHECK-NEXT: vmovapd %zmm1, %zmm0 +; CHECK-NEXT: retq + %vec2 = load <8 x double>, <8 x double>* %vec2p + %shuf = shufflevector <8 x double> %vec1, <8 x double> %vec2, <8 x i32> + %res = select <8 x i1> , <8 x double> %shuf, <8 x double> %vec3 + ret <8 x double> %res +} + +define <8 x double> @test_8xdouble_zero_masked_unpack_low_mem_mask2(<8 x double> %vec1, <8 x double>* %vec2p) { +; CHECK-LABEL: test_8xdouble_zero_masked_unpack_low_mem_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: movb $-35, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vunpcklpd {{.*#+}} zmm0 {%k1} {z} = zmm0[0],mem[0],zmm0[2],mem[2],zmm0[4],mem[4],zmm0[6],mem[6] +; CHECK-NEXT: retq + %vec2 = load <8 x double>, <8 x double>* %vec2p + %shuf = shufflevector <8 x double> %vec1, <8 x double> %vec2, <8 x i32> + %res = select <8 x i1> , <8 x double> %shuf, <8 x double> zeroinitializer + ret <8 x double> %res +} + +define <8 x double> @test_8xdouble_unpack_low_mem_mask3(<8 x double> %vec1, <8 x double>* %vec2p) { +; CHECK-LABEL: test_8xdouble_unpack_low_mem_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: vunpcklpd {{.*#+}} zmm0 = zmm0[0],mem[0],zmm0[2],mem[2],zmm0[4],mem[4],zmm0[6],mem[6] +; CHECK-NEXT: retq + %vec2 = load <8 x double>, <8 x double>* %vec2p + %res = shufflevector <8 x double> %vec1, <8 x double> %vec2, <8 x i32> + ret <8 x double> %res +} +define <8 x double> @test_8xdouble_masked_unpack_low_mem_mask3(<8 x double> %vec1, <8 x double>* %vec2p, <8 x double> %vec3) { +; CHECK-LABEL: test_8xdouble_masked_unpack_low_mem_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: movb $62, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vunpcklpd {{.*#+}} zmm1 {%k1} = zmm0[0],mem[0],zmm0[2],mem[2],zmm0[4],mem[4],zmm0[6],mem[6] +; CHECK-NEXT: vmovapd %zmm1, %zmm0 +; CHECK-NEXT: retq + %vec2 = load <8 x double>, <8 x double>* %vec2p + %shuf = shufflevector <8 x double> %vec1, <8 x double> %vec2, <8 x i32> + %res = select <8 x i1> , <8 x double> %shuf, <8 x double> %vec3 + ret <8 x double> %res +} + +define <8 x double> @test_8xdouble_zero_masked_unpack_low_mem_mask3(<8 x double> %vec1, <8 x double>* %vec2p) { +; CHECK-LABEL: test_8xdouble_zero_masked_unpack_low_mem_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: movb $62, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vunpcklpd {{.*#+}} zmm0 {%k1} {z} = zmm0[0],mem[0],zmm0[2],mem[2],zmm0[4],mem[4],zmm0[6],mem[6] +; CHECK-NEXT: retq + %vec2 = load <8 x double>, <8 x double>* %vec2p + %shuf = shufflevector <8 x double> %vec1, <8 x double> %vec2, <8 x i32> + %res = select <8 x i1> , <8 x double> %shuf, <8 x double> zeroinitializer + ret <8 x double> %res +} + +define <4 x float> @test_4xfloat_unpack_high_mask0(<4 x float> %vec1, <4 x float> %vec2) { +; CHECK-LABEL: test_4xfloat_unpack_high_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: vunpckhps {{.*#+}} xmm0 = xmm0[2],xmm1[2],xmm0[3],xmm1[3] +; CHECK-NEXT: retq + %res = shufflevector <4 x float> %vec1, <4 x float> %vec2, <4 x i32> + ret <4 x float> %res +} +define <4 x float> @test_4xfloat_masked_unpack_high_mask0(<4 x float> %vec1, <4 x float> %vec2, <4 x float> %vec3) { +; CHECK-LABEL: test_4xfloat_masked_unpack_high_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: movb $5, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vunpckhps {{.*#+}} xmm2 {%k1} = xmm0[2],xmm1[2],xmm0[3],xmm1[3] +; CHECK-NEXT: vmovaps %xmm2, %xmm0 +; CHECK-NEXT: retq + %shuf = shufflevector <4 x float> %vec1, <4 x float> %vec2, <4 x i32> + %res = select <4 x i1> , <4 x float> %shuf, <4 x float> %vec3 + ret <4 x float> %res +} + +define <4 x float> @test_4xfloat_zero_masked_unpack_high_mask0(<4 x float> %vec1, <4 x float> %vec2) { +; CHECK-LABEL: test_4xfloat_zero_masked_unpack_high_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: movb $5, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vunpckhps {{.*#+}} xmm0 {%k1} {z} = xmm0[2],xmm1[2],xmm0[3],xmm1[3] +; CHECK-NEXT: retq + %shuf = shufflevector <4 x float> %vec1, <4 x float> %vec2, <4 x i32> + %res = select <4 x i1> , <4 x float> %shuf, <4 x float> zeroinitializer + ret <4 x float> %res +} +define <4 x float> @test_4xfloat_masked_unpack_high_mask1(<4 x float> %vec1, <4 x float> %vec2, <4 x float> %vec3) { +; CHECK-LABEL: test_4xfloat_masked_unpack_high_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: movb $12, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vunpckhps {{.*#+}} xmm2 {%k1} = xmm0[2],xmm1[2],xmm0[3],xmm1[3] +; CHECK-NEXT: vmovaps %xmm2, %xmm0 +; CHECK-NEXT: retq + %shuf = shufflevector <4 x float> %vec1, <4 x float> %vec2, <4 x i32> + %res = select <4 x i1> , <4 x float> %shuf, <4 x float> %vec3 + ret <4 x float> %res +} + +define <4 x float> @test_4xfloat_zero_masked_unpack_high_mask1(<4 x float> %vec1, <4 x float> %vec2) { +; CHECK-LABEL: test_4xfloat_zero_masked_unpack_high_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: movb $12, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vunpckhps {{.*#+}} xmm0 {%k1} {z} = xmm0[2],xmm1[2],xmm0[3],xmm1[3] +; CHECK-NEXT: retq + %shuf = shufflevector <4 x float> %vec1, <4 x float> %vec2, <4 x i32> + %res = select <4 x i1> , <4 x float> %shuf, <4 x float> zeroinitializer + ret <4 x float> %res +} +define <4 x float> @test_4xfloat_masked_unpack_high_mask2(<4 x float> %vec1, <4 x float> %vec2, <4 x float> %vec3) { +; CHECK-LABEL: test_4xfloat_masked_unpack_high_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: movb $3, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vunpckhps {{.*#+}} xmm2 {%k1} = xmm0[2],xmm1[2],xmm0[3],xmm1[3] +; CHECK-NEXT: vmovaps %xmm2, %xmm0 +; CHECK-NEXT: retq + %shuf = shufflevector <4 x float> %vec1, <4 x float> %vec2, <4 x i32> + %res = select <4 x i1> , <4 x float> %shuf, <4 x float> %vec3 + ret <4 x float> %res +} + +define <4 x float> @test_4xfloat_zero_masked_unpack_high_mask2(<4 x float> %vec1, <4 x float> %vec2) { +; CHECK-LABEL: test_4xfloat_zero_masked_unpack_high_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: movb $3, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vunpckhps {{.*#+}} xmm0 {%k1} {z} = xmm0[2],xmm1[2],xmm0[3],xmm1[3] +; CHECK-NEXT: retq + %shuf = shufflevector <4 x float> %vec1, <4 x float> %vec2, <4 x i32> + %res = select <4 x i1> , <4 x float> %shuf, <4 x float> zeroinitializer + ret <4 x float> %res +} +define <4 x float> @test_4xfloat_unpack_high_mask3(<4 x float> %vec1, <4 x float> %vec2) { +; CHECK-LABEL: test_4xfloat_unpack_high_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: vunpckhps {{.*#+}} xmm0 = xmm0[2],xmm1[2],xmm0[3],xmm1[3] +; CHECK-NEXT: retq + %res = shufflevector <4 x float> %vec1, <4 x float> %vec2, <4 x i32> + ret <4 x float> %res +} +define <4 x float> @test_4xfloat_masked_unpack_high_mask3(<4 x float> %vec1, <4 x float> %vec2, <4 x float> %vec3) { +; CHECK-LABEL: test_4xfloat_masked_unpack_high_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: movb $7, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vunpckhps {{.*#+}} xmm2 {%k1} = xmm0[2],xmm1[2],xmm0[3],xmm1[3] +; CHECK-NEXT: vmovaps %xmm2, %xmm0 +; CHECK-NEXT: retq + %shuf = shufflevector <4 x float> %vec1, <4 x float> %vec2, <4 x i32> + %res = select <4 x i1> , <4 x float> %shuf, <4 x float> %vec3 + ret <4 x float> %res +} + +define <4 x float> @test_4xfloat_zero_masked_unpack_high_mask3(<4 x float> %vec1, <4 x float> %vec2) { +; CHECK-LABEL: test_4xfloat_zero_masked_unpack_high_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: movb $7, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vunpckhps {{.*#+}} xmm0 {%k1} {z} = xmm0[2],xmm1[2],xmm0[3],xmm1[3] +; CHECK-NEXT: retq + %shuf = shufflevector <4 x float> %vec1, <4 x float> %vec2, <4 x i32> + %res = select <4 x i1> , <4 x float> %shuf, <4 x float> zeroinitializer + ret <4 x float> %res +} +define <4 x float> @test_4xfloat_unpack_high_mem_mask0(<4 x float> %vec1, <4 x float>* %vec2p) { +; CHECK-LABEL: test_4xfloat_unpack_high_mem_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: vunpckhps {{.*#+}} xmm0 = xmm0[2],mem[2],xmm0[3],mem[3] +; CHECK-NEXT: retq + %vec2 = load <4 x float>, <4 x float>* %vec2p + %res = shufflevector <4 x float> %vec1, <4 x float> %vec2, <4 x i32> + ret <4 x float> %res +} +define <4 x float> @test_4xfloat_masked_unpack_high_mem_mask0(<4 x float> %vec1, <4 x float>* %vec2p, <4 x float> %vec3) { +; CHECK-LABEL: test_4xfloat_masked_unpack_high_mem_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: movb $4, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vunpckhps {{.*#+}} xmm1 {%k1} = xmm0[2],mem[2],xmm0[3],mem[3] +; CHECK-NEXT: vmovaps %xmm1, %xmm0 +; CHECK-NEXT: retq + %vec2 = load <4 x float>, <4 x float>* %vec2p + %shuf = shufflevector <4 x float> %vec1, <4 x float> %vec2, <4 x i32> + %res = select <4 x i1> , <4 x float> %shuf, <4 x float> %vec3 + ret <4 x float> %res +} + +define <4 x float> @test_4xfloat_zero_masked_unpack_high_mem_mask0(<4 x float> %vec1, <4 x float>* %vec2p) { +; CHECK-LABEL: test_4xfloat_zero_masked_unpack_high_mem_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: movb $4, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vunpckhps {{.*#+}} xmm0 {%k1} {z} = xmm0[2],mem[2],xmm0[3],mem[3] +; CHECK-NEXT: retq + %vec2 = load <4 x float>, <4 x float>* %vec2p + %shuf = shufflevector <4 x float> %vec1, <4 x float> %vec2, <4 x i32> + %res = select <4 x i1> , <4 x float> %shuf, <4 x float> zeroinitializer + ret <4 x float> %res +} + +define <4 x float> @test_4xfloat_masked_unpack_high_mem_mask1(<4 x float> %vec1, <4 x float>* %vec2p, <4 x float> %vec3) { +; CHECK-LABEL: test_4xfloat_masked_unpack_high_mem_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: movb $13, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vunpckhps {{.*#+}} xmm1 {%k1} = xmm0[2],mem[2],xmm0[3],mem[3] +; CHECK-NEXT: vmovaps %xmm1, %xmm0 +; CHECK-NEXT: retq + %vec2 = load <4 x float>, <4 x float>* %vec2p + %shuf = shufflevector <4 x float> %vec1, <4 x float> %vec2, <4 x i32> + %res = select <4 x i1> , <4 x float> %shuf, <4 x float> %vec3 + ret <4 x float> %res +} + +define <4 x float> @test_4xfloat_zero_masked_unpack_high_mem_mask1(<4 x float> %vec1, <4 x float>* %vec2p) { +; CHECK-LABEL: test_4xfloat_zero_masked_unpack_high_mem_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: movb $13, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vunpckhps {{.*#+}} xmm0 {%k1} {z} = xmm0[2],mem[2],xmm0[3],mem[3] +; CHECK-NEXT: retq + %vec2 = load <4 x float>, <4 x float>* %vec2p + %shuf = shufflevector <4 x float> %vec1, <4 x float> %vec2, <4 x i32> + %res = select <4 x i1> , <4 x float> %shuf, <4 x float> zeroinitializer + ret <4 x float> %res +} + +define <4 x float> @test_4xfloat_masked_unpack_high_mem_mask2(<4 x float> %vec1, <4 x float>* %vec2p, <4 x float> %vec3) { +; CHECK-LABEL: test_4xfloat_masked_unpack_high_mem_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: movb $10, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vunpckhps {{.*#+}} xmm1 {%k1} = xmm0[2],mem[2],xmm0[3],mem[3] +; CHECK-NEXT: vmovaps %xmm1, %xmm0 +; CHECK-NEXT: retq + %vec2 = load <4 x float>, <4 x float>* %vec2p + %shuf = shufflevector <4 x float> %vec1, <4 x float> %vec2, <4 x i32> + %res = select <4 x i1> , <4 x float> %shuf, <4 x float> %vec3 + ret <4 x float> %res +} + +define <4 x float> @test_4xfloat_zero_masked_unpack_high_mem_mask2(<4 x float> %vec1, <4 x float>* %vec2p) { +; CHECK-LABEL: test_4xfloat_zero_masked_unpack_high_mem_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: movb $10, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vunpckhps {{.*#+}} xmm0 {%k1} {z} = xmm0[2],mem[2],xmm0[3],mem[3] +; CHECK-NEXT: retq + %vec2 = load <4 x float>, <4 x float>* %vec2p + %shuf = shufflevector <4 x float> %vec1, <4 x float> %vec2, <4 x i32> + %res = select <4 x i1> , <4 x float> %shuf, <4 x float> zeroinitializer + ret <4 x float> %res +} + +define <4 x float> @test_4xfloat_unpack_high_mem_mask3(<4 x float> %vec1, <4 x float>* %vec2p) { +; CHECK-LABEL: test_4xfloat_unpack_high_mem_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: vunpckhps {{.*#+}} xmm0 = xmm0[2],mem[2],xmm0[3],mem[3] +; CHECK-NEXT: retq + %vec2 = load <4 x float>, <4 x float>* %vec2p + %res = shufflevector <4 x float> %vec1, <4 x float> %vec2, <4 x i32> + ret <4 x float> %res +} +define <4 x float> @test_4xfloat_masked_unpack_high_mem_mask3(<4 x float> %vec1, <4 x float>* %vec2p, <4 x float> %vec3) { +; CHECK-LABEL: test_4xfloat_masked_unpack_high_mem_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: movb $5, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vunpckhps {{.*#+}} xmm1 {%k1} = xmm0[2],mem[2],xmm0[3],mem[3] +; CHECK-NEXT: vmovaps %xmm1, %xmm0 +; CHECK-NEXT: retq + %vec2 = load <4 x float>, <4 x float>* %vec2p + %shuf = shufflevector <4 x float> %vec1, <4 x float> %vec2, <4 x i32> + %res = select <4 x i1> , <4 x float> %shuf, <4 x float> %vec3 + ret <4 x float> %res +} + +define <4 x float> @test_4xfloat_zero_masked_unpack_high_mem_mask3(<4 x float> %vec1, <4 x float>* %vec2p) { +; CHECK-LABEL: test_4xfloat_zero_masked_unpack_high_mem_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: movb $5, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vunpckhps {{.*#+}} xmm0 {%k1} {z} = xmm0[2],mem[2],xmm0[3],mem[3] +; CHECK-NEXT: retq + %vec2 = load <4 x float>, <4 x float>* %vec2p + %shuf = shufflevector <4 x float> %vec1, <4 x float> %vec2, <4 x i32> + %res = select <4 x i1> , <4 x float> %shuf, <4 x float> zeroinitializer + ret <4 x float> %res +} + +define <8 x float> @test_8xfloat_unpack_high_mask0(<8 x float> %vec1, <8 x float> %vec2) { +; CHECK-LABEL: test_8xfloat_unpack_high_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: vunpckhps {{.*#+}} ymm0 = ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[6],ymm1[6],ymm0[7],ymm1[7] +; CHECK-NEXT: retq + %res = shufflevector <8 x float> %vec1, <8 x float> %vec2, <8 x i32> + ret <8 x float> %res +} +define <8 x float> @test_8xfloat_masked_unpack_high_mask0(<8 x float> %vec1, <8 x float> %vec2, <8 x float> %vec3) { +; CHECK-LABEL: test_8xfloat_masked_unpack_high_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: movb $21, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vunpckhps {{.*#+}} ymm2 {%k1} = ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[6],ymm1[6],ymm0[7],ymm1[7] +; CHECK-NEXT: vmovaps %ymm2, %ymm0 +; CHECK-NEXT: retq + %shuf = shufflevector <8 x float> %vec1, <8 x float> %vec2, <8 x i32> + %res = select <8 x i1> , <8 x float> %shuf, <8 x float> %vec3 + ret <8 x float> %res +} + +define <8 x float> @test_8xfloat_zero_masked_unpack_high_mask0(<8 x float> %vec1, <8 x float> %vec2) { +; CHECK-LABEL: test_8xfloat_zero_masked_unpack_high_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: movb $21, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vunpckhps {{.*#+}} ymm0 {%k1} {z} = ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[6],ymm1[6],ymm0[7],ymm1[7] +; CHECK-NEXT: retq + %shuf = shufflevector <8 x float> %vec1, <8 x float> %vec2, <8 x i32> + %res = select <8 x i1> , <8 x float> %shuf, <8 x float> zeroinitializer + ret <8 x float> %res +} +define <8 x float> @test_8xfloat_masked_unpack_high_mask1(<8 x float> %vec1, <8 x float> %vec2, <8 x float> %vec3) { +; CHECK-LABEL: test_8xfloat_masked_unpack_high_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: movb $82, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vunpckhps {{.*#+}} ymm2 {%k1} = ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[6],ymm1[6],ymm0[7],ymm1[7] +; CHECK-NEXT: vmovaps %ymm2, %ymm0 +; CHECK-NEXT: retq + %shuf = shufflevector <8 x float> %vec1, <8 x float> %vec2, <8 x i32> + %res = select <8 x i1> , <8 x float> %shuf, <8 x float> %vec3 + ret <8 x float> %res +} + +define <8 x float> @test_8xfloat_zero_masked_unpack_high_mask1(<8 x float> %vec1, <8 x float> %vec2) { +; CHECK-LABEL: test_8xfloat_zero_masked_unpack_high_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: movb $82, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vunpckhps {{.*#+}} ymm0 {%k1} {z} = ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[6],ymm1[6],ymm0[7],ymm1[7] +; CHECK-NEXT: retq + %shuf = shufflevector <8 x float> %vec1, <8 x float> %vec2, <8 x i32> + %res = select <8 x i1> , <8 x float> %shuf, <8 x float> zeroinitializer + ret <8 x float> %res +} +define <8 x float> @test_8xfloat_masked_unpack_high_mask2(<8 x float> %vec1, <8 x float> %vec2, <8 x float> %vec3) { +; CHECK-LABEL: test_8xfloat_masked_unpack_high_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: movb $-126, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vunpckhps {{.*#+}} ymm2 {%k1} = ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[6],ymm1[6],ymm0[7],ymm1[7] +; CHECK-NEXT: vmovaps %ymm2, %ymm0 +; CHECK-NEXT: retq + %shuf = shufflevector <8 x float> %vec1, <8 x float> %vec2, <8 x i32> + %res = select <8 x i1> , <8 x float> %shuf, <8 x float> %vec3 + ret <8 x float> %res +} + +define <8 x float> @test_8xfloat_zero_masked_unpack_high_mask2(<8 x float> %vec1, <8 x float> %vec2) { +; CHECK-LABEL: test_8xfloat_zero_masked_unpack_high_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: movb $-126, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vunpckhps {{.*#+}} ymm0 {%k1} {z} = ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[6],ymm1[6],ymm0[7],ymm1[7] +; CHECK-NEXT: retq + %shuf = shufflevector <8 x float> %vec1, <8 x float> %vec2, <8 x i32> + %res = select <8 x i1> , <8 x float> %shuf, <8 x float> zeroinitializer + ret <8 x float> %res +} +define <8 x float> @test_8xfloat_unpack_high_mask3(<8 x float> %vec1, <8 x float> %vec2) { +; CHECK-LABEL: test_8xfloat_unpack_high_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: vunpckhps {{.*#+}} ymm0 = ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[6],ymm1[6],ymm0[7],ymm1[7] +; CHECK-NEXT: retq + %res = shufflevector <8 x float> %vec1, <8 x float> %vec2, <8 x i32> + ret <8 x float> %res +} +define <8 x float> @test_8xfloat_masked_unpack_high_mask3(<8 x float> %vec1, <8 x float> %vec2, <8 x float> %vec3) { +; CHECK-LABEL: test_8xfloat_masked_unpack_high_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: movb $-19, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vunpckhps {{.*#+}} ymm2 {%k1} = ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[6],ymm1[6],ymm0[7],ymm1[7] +; CHECK-NEXT: vmovaps %ymm2, %ymm0 +; CHECK-NEXT: retq + %shuf = shufflevector <8 x float> %vec1, <8 x float> %vec2, <8 x i32> + %res = select <8 x i1> , <8 x float> %shuf, <8 x float> %vec3 + ret <8 x float> %res +} + +define <8 x float> @test_8xfloat_zero_masked_unpack_high_mask3(<8 x float> %vec1, <8 x float> %vec2) { +; CHECK-LABEL: test_8xfloat_zero_masked_unpack_high_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: movb $-19, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vunpckhps {{.*#+}} ymm0 {%k1} {z} = ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[6],ymm1[6],ymm0[7],ymm1[7] +; CHECK-NEXT: retq + %shuf = shufflevector <8 x float> %vec1, <8 x float> %vec2, <8 x i32> + %res = select <8 x i1> , <8 x float> %shuf, <8 x float> zeroinitializer + ret <8 x float> %res +} +define <8 x float> @test_8xfloat_unpack_high_mem_mask0(<8 x float> %vec1, <8 x float>* %vec2p) { +; CHECK-LABEL: test_8xfloat_unpack_high_mem_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: vunpckhps {{.*#+}} ymm0 = ymm0[2],mem[2],ymm0[3],mem[3],ymm0[6],mem[6],ymm0[7],mem[7] +; CHECK-NEXT: retq + %vec2 = load <8 x float>, <8 x float>* %vec2p + %res = shufflevector <8 x float> %vec1, <8 x float> %vec2, <8 x i32> + ret <8 x float> %res +} +define <8 x float> @test_8xfloat_masked_unpack_high_mem_mask0(<8 x float> %vec1, <8 x float>* %vec2p, <8 x float> %vec3) { +; CHECK-LABEL: test_8xfloat_masked_unpack_high_mem_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: movb $28, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vunpckhps {{.*#+}} ymm1 {%k1} = ymm0[2],mem[2],ymm0[3],mem[3],ymm0[6],mem[6],ymm0[7],mem[7] +; CHECK-NEXT: vmovaps %ymm1, %ymm0 +; CHECK-NEXT: retq + %vec2 = load <8 x float>, <8 x float>* %vec2p + %shuf = shufflevector <8 x float> %vec1, <8 x float> %vec2, <8 x i32> + %res = select <8 x i1> , <8 x float> %shuf, <8 x float> %vec3 + ret <8 x float> %res +} + +define <8 x float> @test_8xfloat_zero_masked_unpack_high_mem_mask0(<8 x float> %vec1, <8 x float>* %vec2p) { +; CHECK-LABEL: test_8xfloat_zero_masked_unpack_high_mem_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: movb $28, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vunpckhps {{.*#+}} ymm0 {%k1} {z} = ymm0[2],mem[2],ymm0[3],mem[3],ymm0[6],mem[6],ymm0[7],mem[7] +; CHECK-NEXT: retq + %vec2 = load <8 x float>, <8 x float>* %vec2p + %shuf = shufflevector <8 x float> %vec1, <8 x float> %vec2, <8 x i32> + %res = select <8 x i1> , <8 x float> %shuf, <8 x float> zeroinitializer + ret <8 x float> %res +} + +define <8 x float> @test_8xfloat_masked_unpack_high_mem_mask1(<8 x float> %vec1, <8 x float>* %vec2p, <8 x float> %vec3) { +; CHECK-LABEL: test_8xfloat_masked_unpack_high_mem_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: movb $-115, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vunpckhps {{.*#+}} ymm1 {%k1} = ymm0[2],mem[2],ymm0[3],mem[3],ymm0[6],mem[6],ymm0[7],mem[7] +; CHECK-NEXT: vmovaps %ymm1, %ymm0 +; CHECK-NEXT: retq + %vec2 = load <8 x float>, <8 x float>* %vec2p + %shuf = shufflevector <8 x float> %vec1, <8 x float> %vec2, <8 x i32> + %res = select <8 x i1> , <8 x float> %shuf, <8 x float> %vec3 + ret <8 x float> %res +} + +define <8 x float> @test_8xfloat_zero_masked_unpack_high_mem_mask1(<8 x float> %vec1, <8 x float>* %vec2p) { +; CHECK-LABEL: test_8xfloat_zero_masked_unpack_high_mem_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: movb $-115, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vunpckhps {{.*#+}} ymm0 {%k1} {z} = ymm0[2],mem[2],ymm0[3],mem[3],ymm0[6],mem[6],ymm0[7],mem[7] +; CHECK-NEXT: retq + %vec2 = load <8 x float>, <8 x float>* %vec2p + %shuf = shufflevector <8 x float> %vec1, <8 x float> %vec2, <8 x i32> + %res = select <8 x i1> , <8 x float> %shuf, <8 x float> zeroinitializer + ret <8 x float> %res +} + +define <8 x float> @test_8xfloat_masked_unpack_high_mem_mask2(<8 x float> %vec1, <8 x float>* %vec2p, <8 x float> %vec3) { +; CHECK-LABEL: test_8xfloat_masked_unpack_high_mem_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: movb $-76, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vunpckhps {{.*#+}} ymm1 {%k1} = ymm0[2],mem[2],ymm0[3],mem[3],ymm0[6],mem[6],ymm0[7],mem[7] +; CHECK-NEXT: vmovaps %ymm1, %ymm0 +; CHECK-NEXT: retq + %vec2 = load <8 x float>, <8 x float>* %vec2p + %shuf = shufflevector <8 x float> %vec1, <8 x float> %vec2, <8 x i32> + %res = select <8 x i1> , <8 x float> %shuf, <8 x float> %vec3 + ret <8 x float> %res +} + +define <8 x float> @test_8xfloat_zero_masked_unpack_high_mem_mask2(<8 x float> %vec1, <8 x float>* %vec2p) { +; CHECK-LABEL: test_8xfloat_zero_masked_unpack_high_mem_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: movb $-76, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vunpckhps {{.*#+}} ymm0 {%k1} {z} = ymm0[2],mem[2],ymm0[3],mem[3],ymm0[6],mem[6],ymm0[7],mem[7] +; CHECK-NEXT: retq + %vec2 = load <8 x float>, <8 x float>* %vec2p + %shuf = shufflevector <8 x float> %vec1, <8 x float> %vec2, <8 x i32> + %res = select <8 x i1> , <8 x float> %shuf, <8 x float> zeroinitializer + ret <8 x float> %res +} + +define <8 x float> @test_8xfloat_unpack_high_mem_mask3(<8 x float> %vec1, <8 x float>* %vec2p) { +; CHECK-LABEL: test_8xfloat_unpack_high_mem_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: vunpckhps {{.*#+}} ymm0 = ymm0[2],mem[2],ymm0[3],mem[3],ymm0[6],mem[6],ymm0[7],mem[7] +; CHECK-NEXT: retq + %vec2 = load <8 x float>, <8 x float>* %vec2p + %res = shufflevector <8 x float> %vec1, <8 x float> %vec2, <8 x i32> + ret <8 x float> %res +} +define <8 x float> @test_8xfloat_masked_unpack_high_mem_mask3(<8 x float> %vec1, <8 x float>* %vec2p, <8 x float> %vec3) { +; CHECK-LABEL: test_8xfloat_masked_unpack_high_mem_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: movb $-116, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vunpckhps {{.*#+}} ymm1 {%k1} = ymm0[2],mem[2],ymm0[3],mem[3],ymm0[6],mem[6],ymm0[7],mem[7] +; CHECK-NEXT: vmovaps %ymm1, %ymm0 +; CHECK-NEXT: retq + %vec2 = load <8 x float>, <8 x float>* %vec2p + %shuf = shufflevector <8 x float> %vec1, <8 x float> %vec2, <8 x i32> + %res = select <8 x i1> , <8 x float> %shuf, <8 x float> %vec3 + ret <8 x float> %res +} + +define <8 x float> @test_8xfloat_zero_masked_unpack_high_mem_mask3(<8 x float> %vec1, <8 x float>* %vec2p) { +; CHECK-LABEL: test_8xfloat_zero_masked_unpack_high_mem_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: movb $-116, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vunpckhps {{.*#+}} ymm0 {%k1} {z} = ymm0[2],mem[2],ymm0[3],mem[3],ymm0[6],mem[6],ymm0[7],mem[7] +; CHECK-NEXT: retq + %vec2 = load <8 x float>, <8 x float>* %vec2p + %shuf = shufflevector <8 x float> %vec1, <8 x float> %vec2, <8 x i32> + %res = select <8 x i1> , <8 x float> %shuf, <8 x float> zeroinitializer + ret <8 x float> %res +} + +define <16 x float> @test_16xfloat_unpack_high_mask0(<16 x float> %vec1, <16 x float> %vec2) { +; CHECK-LABEL: test_16xfloat_unpack_high_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: vunpckhps {{.*#+}} zmm0 = zmm0[2],zmm1[2],zmm0[3],zmm1[3],zmm0[6],zmm1[6],zmm0[7],zmm1[7],zmm0[10],zmm1[10],zmm0[11],zmm1[11],zmm0[14],zmm1[14],zmm0[15],zmm1[15] +; CHECK-NEXT: retq + %res = shufflevector <16 x float> %vec1, <16 x float> %vec2, <16 x i32> + ret <16 x float> %res +} +define <16 x float> @test_16xfloat_masked_unpack_high_mask0(<16 x float> %vec1, <16 x float> %vec2, <16 x float> %vec3) { +; CHECK-LABEL: test_16xfloat_masked_unpack_high_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: movw $-12160, %ax # imm = 0xD080 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vunpckhps {{.*#+}} zmm2 {%k1} = zmm0[2],zmm1[2],zmm0[3],zmm1[3],zmm0[6],zmm1[6],zmm0[7],zmm1[7],zmm0[10],zmm1[10],zmm0[11],zmm1[11],zmm0[14],zmm1[14],zmm0[15],zmm1[15] +; CHECK-NEXT: vmovaps %zmm2, %zmm0 +; CHECK-NEXT: retq + %shuf = shufflevector <16 x float> %vec1, <16 x float> %vec2, <16 x i32> + %res = select <16 x i1> , <16 x float> %shuf, <16 x float> %vec3 + ret <16 x float> %res +} + +define <16 x float> @test_16xfloat_zero_masked_unpack_high_mask0(<16 x float> %vec1, <16 x float> %vec2) { +; CHECK-LABEL: test_16xfloat_zero_masked_unpack_high_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: movw $-12160, %ax # imm = 0xD080 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vunpckhps {{.*#+}} zmm0 {%k1} {z} = zmm0[2],zmm1[2],zmm0[3],zmm1[3],zmm0[6],zmm1[6],zmm0[7],zmm1[7],zmm0[10],zmm1[10],zmm0[11],zmm1[11],zmm0[14],zmm1[14],zmm0[15],zmm1[15] +; CHECK-NEXT: retq + %shuf = shufflevector <16 x float> %vec1, <16 x float> %vec2, <16 x i32> + %res = select <16 x i1> , <16 x float> %shuf, <16 x float> zeroinitializer + ret <16 x float> %res +} +define <16 x float> @test_16xfloat_masked_unpack_high_mask1(<16 x float> %vec1, <16 x float> %vec2, <16 x float> %vec3) { +; CHECK-LABEL: test_16xfloat_masked_unpack_high_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: movw $-30129, %ax # imm = 0x8A4F +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vunpckhps {{.*#+}} zmm2 {%k1} = zmm0[2],zmm1[2],zmm0[3],zmm1[3],zmm0[6],zmm1[6],zmm0[7],zmm1[7],zmm0[10],zmm1[10],zmm0[11],zmm1[11],zmm0[14],zmm1[14],zmm0[15],zmm1[15] +; CHECK-NEXT: vmovaps %zmm2, %zmm0 +; CHECK-NEXT: retq + %shuf = shufflevector <16 x float> %vec1, <16 x float> %vec2, <16 x i32> + %res = select <16 x i1> , <16 x float> %shuf, <16 x float> %vec3 + ret <16 x float> %res +} + +define <16 x float> @test_16xfloat_zero_masked_unpack_high_mask1(<16 x float> %vec1, <16 x float> %vec2) { +; CHECK-LABEL: test_16xfloat_zero_masked_unpack_high_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: movw $-30129, %ax # imm = 0x8A4F +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vunpckhps {{.*#+}} zmm0 {%k1} {z} = zmm0[2],zmm1[2],zmm0[3],zmm1[3],zmm0[6],zmm1[6],zmm0[7],zmm1[7],zmm0[10],zmm1[10],zmm0[11],zmm1[11],zmm0[14],zmm1[14],zmm0[15],zmm1[15] +; CHECK-NEXT: retq + %shuf = shufflevector <16 x float> %vec1, <16 x float> %vec2, <16 x i32> + %res = select <16 x i1> , <16 x float> %shuf, <16 x float> zeroinitializer + ret <16 x float> %res +} +define <16 x float> @test_16xfloat_masked_unpack_high_mask2(<16 x float> %vec1, <16 x float> %vec2, <16 x float> %vec3) { +; CHECK-LABEL: test_16xfloat_masked_unpack_high_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: movw $-2371, %ax # imm = 0xF6BD +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vunpckhps {{.*#+}} zmm2 {%k1} = zmm0[2],zmm1[2],zmm0[3],zmm1[3],zmm0[6],zmm1[6],zmm0[7],zmm1[7],zmm0[10],zmm1[10],zmm0[11],zmm1[11],zmm0[14],zmm1[14],zmm0[15],zmm1[15] +; CHECK-NEXT: vmovaps %zmm2, %zmm0 +; CHECK-NEXT: retq + %shuf = shufflevector <16 x float> %vec1, <16 x float> %vec2, <16 x i32> + %res = select <16 x i1> , <16 x float> %shuf, <16 x float> %vec3 + ret <16 x float> %res +} + +define <16 x float> @test_16xfloat_zero_masked_unpack_high_mask2(<16 x float> %vec1, <16 x float> %vec2) { +; CHECK-LABEL: test_16xfloat_zero_masked_unpack_high_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: movw $-2371, %ax # imm = 0xF6BD +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vunpckhps {{.*#+}} zmm0 {%k1} {z} = zmm0[2],zmm1[2],zmm0[3],zmm1[3],zmm0[6],zmm1[6],zmm0[7],zmm1[7],zmm0[10],zmm1[10],zmm0[11],zmm1[11],zmm0[14],zmm1[14],zmm0[15],zmm1[15] +; CHECK-NEXT: retq + %shuf = shufflevector <16 x float> %vec1, <16 x float> %vec2, <16 x i32> + %res = select <16 x i1> , <16 x float> %shuf, <16 x float> zeroinitializer + ret <16 x float> %res +} +define <16 x float> @test_16xfloat_unpack_high_mask3(<16 x float> %vec1, <16 x float> %vec2) { +; CHECK-LABEL: test_16xfloat_unpack_high_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: vunpckhps {{.*#+}} zmm0 = zmm0[2],zmm1[2],zmm0[3],zmm1[3],zmm0[6],zmm1[6],zmm0[7],zmm1[7],zmm0[10],zmm1[10],zmm0[11],zmm1[11],zmm0[14],zmm1[14],zmm0[15],zmm1[15] +; CHECK-NEXT: retq + %res = shufflevector <16 x float> %vec1, <16 x float> %vec2, <16 x i32> + ret <16 x float> %res +} +define <16 x float> @test_16xfloat_masked_unpack_high_mask3(<16 x float> %vec1, <16 x float> %vec2, <16 x float> %vec3) { +; CHECK-LABEL: test_16xfloat_masked_unpack_high_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: movw $-26006, %ax # imm = 0x9A6A +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vunpckhps {{.*#+}} zmm2 {%k1} = zmm0[2],zmm1[2],zmm0[3],zmm1[3],zmm0[6],zmm1[6],zmm0[7],zmm1[7],zmm0[10],zmm1[10],zmm0[11],zmm1[11],zmm0[14],zmm1[14],zmm0[15],zmm1[15] +; CHECK-NEXT: vmovaps %zmm2, %zmm0 +; CHECK-NEXT: retq + %shuf = shufflevector <16 x float> %vec1, <16 x float> %vec2, <16 x i32> + %res = select <16 x i1> , <16 x float> %shuf, <16 x float> %vec3 + ret <16 x float> %res +} + +define <16 x float> @test_16xfloat_zero_masked_unpack_high_mask3(<16 x float> %vec1, <16 x float> %vec2) { +; CHECK-LABEL: test_16xfloat_zero_masked_unpack_high_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: movw $-26006, %ax # imm = 0x9A6A +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vunpckhps {{.*#+}} zmm0 {%k1} {z} = zmm0[2],zmm1[2],zmm0[3],zmm1[3],zmm0[6],zmm1[6],zmm0[7],zmm1[7],zmm0[10],zmm1[10],zmm0[11],zmm1[11],zmm0[14],zmm1[14],zmm0[15],zmm1[15] +; CHECK-NEXT: retq + %shuf = shufflevector <16 x float> %vec1, <16 x float> %vec2, <16 x i32> + %res = select <16 x i1> , <16 x float> %shuf, <16 x float> zeroinitializer + ret <16 x float> %res +} +define <16 x float> @test_16xfloat_unpack_high_mem_mask0(<16 x float> %vec1, <16 x float>* %vec2p) { +; CHECK-LABEL: test_16xfloat_unpack_high_mem_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: vunpckhps {{.*#+}} zmm0 = zmm0[2],mem[2],zmm0[3],mem[3],zmm0[6],mem[6],zmm0[7],mem[7],zmm0[10],mem[10],zmm0[11],mem[11],zmm0[14],mem[14],zmm0[15],mem[15] +; CHECK-NEXT: retq + %vec2 = load <16 x float>, <16 x float>* %vec2p + %res = shufflevector <16 x float> %vec1, <16 x float> %vec2, <16 x i32> + ret <16 x float> %res +} +define <16 x float> @test_16xfloat_masked_unpack_high_mem_mask0(<16 x float> %vec1, <16 x float>* %vec2p, <16 x float> %vec3) { +; CHECK-LABEL: test_16xfloat_masked_unpack_high_mem_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: movw $-27027, %ax # imm = 0x966D +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vunpckhps {{.*#+}} zmm1 {%k1} = zmm0[2],mem[2],zmm0[3],mem[3],zmm0[6],mem[6],zmm0[7],mem[7],zmm0[10],mem[10],zmm0[11],mem[11],zmm0[14],mem[14],zmm0[15],mem[15] +; CHECK-NEXT: vmovaps %zmm1, %zmm0 +; CHECK-NEXT: retq + %vec2 = load <16 x float>, <16 x float>* %vec2p + %shuf = shufflevector <16 x float> %vec1, <16 x float> %vec2, <16 x i32> + %res = select <16 x i1> , <16 x float> %shuf, <16 x float> %vec3 + ret <16 x float> %res +} + +define <16 x float> @test_16xfloat_zero_masked_unpack_high_mem_mask0(<16 x float> %vec1, <16 x float>* %vec2p) { +; CHECK-LABEL: test_16xfloat_zero_masked_unpack_high_mem_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: movw $-27027, %ax # imm = 0x966D +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vunpckhps {{.*#+}} zmm0 {%k1} {z} = zmm0[2],mem[2],zmm0[3],mem[3],zmm0[6],mem[6],zmm0[7],mem[7],zmm0[10],mem[10],zmm0[11],mem[11],zmm0[14],mem[14],zmm0[15],mem[15] +; CHECK-NEXT: retq + %vec2 = load <16 x float>, <16 x float>* %vec2p + %shuf = shufflevector <16 x float> %vec1, <16 x float> %vec2, <16 x i32> + %res = select <16 x i1> , <16 x float> %shuf, <16 x float> zeroinitializer + ret <16 x float> %res +} + +define <16 x float> @test_16xfloat_masked_unpack_high_mem_mask1(<16 x float> %vec1, <16 x float>* %vec2p, <16 x float> %vec3) { +; CHECK-LABEL: test_16xfloat_masked_unpack_high_mem_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: movw $29162, %ax # imm = 0x71EA +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vunpckhps {{.*#+}} zmm1 {%k1} = zmm0[2],mem[2],zmm0[3],mem[3],zmm0[6],mem[6],zmm0[7],mem[7],zmm0[10],mem[10],zmm0[11],mem[11],zmm0[14],mem[14],zmm0[15],mem[15] +; CHECK-NEXT: vmovaps %zmm1, %zmm0 +; CHECK-NEXT: retq + %vec2 = load <16 x float>, <16 x float>* %vec2p + %shuf = shufflevector <16 x float> %vec1, <16 x float> %vec2, <16 x i32> + %res = select <16 x i1> , <16 x float> %shuf, <16 x float> %vec3 + ret <16 x float> %res +} + +define <16 x float> @test_16xfloat_zero_masked_unpack_high_mem_mask1(<16 x float> %vec1, <16 x float>* %vec2p) { +; CHECK-LABEL: test_16xfloat_zero_masked_unpack_high_mem_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: movw $29162, %ax # imm = 0x71EA +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vunpckhps {{.*#+}} zmm0 {%k1} {z} = zmm0[2],mem[2],zmm0[3],mem[3],zmm0[6],mem[6],zmm0[7],mem[7],zmm0[10],mem[10],zmm0[11],mem[11],zmm0[14],mem[14],zmm0[15],mem[15] +; CHECK-NEXT: retq + %vec2 = load <16 x float>, <16 x float>* %vec2p + %shuf = shufflevector <16 x float> %vec1, <16 x float> %vec2, <16 x i32> + %res = select <16 x i1> , <16 x float> %shuf, <16 x float> zeroinitializer + ret <16 x float> %res +} + +define <16 x float> @test_16xfloat_masked_unpack_high_mem_mask2(<16 x float> %vec1, <16 x float>* %vec2p, <16 x float> %vec3) { +; CHECK-LABEL: test_16xfloat_masked_unpack_high_mem_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: movw $-26458, %ax # imm = 0x98A6 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vunpckhps {{.*#+}} zmm1 {%k1} = zmm0[2],mem[2],zmm0[3],mem[3],zmm0[6],mem[6],zmm0[7],mem[7],zmm0[10],mem[10],zmm0[11],mem[11],zmm0[14],mem[14],zmm0[15],mem[15] +; CHECK-NEXT: vmovaps %zmm1, %zmm0 +; CHECK-NEXT: retq + %vec2 = load <16 x float>, <16 x float>* %vec2p + %shuf = shufflevector <16 x float> %vec1, <16 x float> %vec2, <16 x i32> + %res = select <16 x i1> , <16 x float> %shuf, <16 x float> %vec3 + ret <16 x float> %res +} + +define <16 x float> @test_16xfloat_zero_masked_unpack_high_mem_mask2(<16 x float> %vec1, <16 x float>* %vec2p) { +; CHECK-LABEL: test_16xfloat_zero_masked_unpack_high_mem_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: movw $-26458, %ax # imm = 0x98A6 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vunpckhps {{.*#+}} zmm0 {%k1} {z} = zmm0[2],mem[2],zmm0[3],mem[3],zmm0[6],mem[6],zmm0[7],mem[7],zmm0[10],mem[10],zmm0[11],mem[11],zmm0[14],mem[14],zmm0[15],mem[15] +; CHECK-NEXT: retq + %vec2 = load <16 x float>, <16 x float>* %vec2p + %shuf = shufflevector <16 x float> %vec1, <16 x float> %vec2, <16 x i32> + %res = select <16 x i1> , <16 x float> %shuf, <16 x float> zeroinitializer + ret <16 x float> %res +} + +define <16 x float> @test_16xfloat_unpack_high_mem_mask3(<16 x float> %vec1, <16 x float>* %vec2p) { +; CHECK-LABEL: test_16xfloat_unpack_high_mem_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: vunpckhps {{.*#+}} zmm0 = zmm0[2],mem[2],zmm0[3],mem[3],zmm0[6],mem[6],zmm0[7],mem[7],zmm0[10],mem[10],zmm0[11],mem[11],zmm0[14],mem[14],zmm0[15],mem[15] +; CHECK-NEXT: retq + %vec2 = load <16 x float>, <16 x float>* %vec2p + %res = shufflevector <16 x float> %vec1, <16 x float> %vec2, <16 x i32> + ret <16 x float> %res +} +define <16 x float> @test_16xfloat_masked_unpack_high_mem_mask3(<16 x float> %vec1, <16 x float>* %vec2p, <16 x float> %vec3) { +; CHECK-LABEL: test_16xfloat_masked_unpack_high_mem_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: movw $25225, %ax # imm = 0x6289 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vunpckhps {{.*#+}} zmm1 {%k1} = zmm0[2],mem[2],zmm0[3],mem[3],zmm0[6],mem[6],zmm0[7],mem[7],zmm0[10],mem[10],zmm0[11],mem[11],zmm0[14],mem[14],zmm0[15],mem[15] +; CHECK-NEXT: vmovaps %zmm1, %zmm0 +; CHECK-NEXT: retq + %vec2 = load <16 x float>, <16 x float>* %vec2p + %shuf = shufflevector <16 x float> %vec1, <16 x float> %vec2, <16 x i32> + %res = select <16 x i1> , <16 x float> %shuf, <16 x float> %vec3 + ret <16 x float> %res +} + +define <16 x float> @test_16xfloat_zero_masked_unpack_high_mem_mask3(<16 x float> %vec1, <16 x float>* %vec2p) { +; CHECK-LABEL: test_16xfloat_zero_masked_unpack_high_mem_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: movw $25225, %ax # imm = 0x6289 +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vunpckhps {{.*#+}} zmm0 {%k1} {z} = zmm0[2],mem[2],zmm0[3],mem[3],zmm0[6],mem[6],zmm0[7],mem[7],zmm0[10],mem[10],zmm0[11],mem[11],zmm0[14],mem[14],zmm0[15],mem[15] +; CHECK-NEXT: retq + %vec2 = load <16 x float>, <16 x float>* %vec2p + %shuf = shufflevector <16 x float> %vec1, <16 x float> %vec2, <16 x i32> + %res = select <16 x i1> , <16 x float> %shuf, <16 x float> zeroinitializer + ret <16 x float> %res +} + +define <2 x double> @test_2xdouble_unpack_high_mask0(<2 x double> %vec1, <2 x double> %vec2) { +; CHECK-LABEL: test_2xdouble_unpack_high_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm0[1],xmm1[1] +; CHECK-NEXT: retq + %res = shufflevector <2 x double> %vec1, <2 x double> %vec2, <2 x i32> + ret <2 x double> %res +} +define <2 x double> @test_2xdouble_masked_unpack_high_mask0(<2 x double> %vec1, <2 x double> %vec2, <2 x double> %vec3) { +; CHECK-LABEL: test_2xdouble_masked_unpack_high_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: movb $2, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vunpckhpd {{.*#+}} xmm2 {%k1} = xmm0[1],xmm1[1] +; CHECK-NEXT: vmovapd %xmm2, %xmm0 +; CHECK-NEXT: retq + %shuf = shufflevector <2 x double> %vec1, <2 x double> %vec2, <2 x i32> + %res = select <2 x i1> , <2 x double> %shuf, <2 x double> %vec3 + ret <2 x double> %res +} + +define <2 x double> @test_2xdouble_zero_masked_unpack_high_mask0(<2 x double> %vec1, <2 x double> %vec2) { +; CHECK-LABEL: test_2xdouble_zero_masked_unpack_high_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: movb $2, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vunpckhpd {{.*#+}} xmm0 {%k1} {z} = xmm0[1],xmm1[1] +; CHECK-NEXT: retq + %shuf = shufflevector <2 x double> %vec1, <2 x double> %vec2, <2 x i32> + %res = select <2 x i1> , <2 x double> %shuf, <2 x double> zeroinitializer + ret <2 x double> %res +} +define <2 x double> @test_2xdouble_masked_unpack_high_mask1(<2 x double> %vec1, <2 x double> %vec2, <2 x double> %vec3) { +; CHECK-LABEL: test_2xdouble_masked_unpack_high_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: movb $1, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vunpckhpd {{.*#+}} xmm2 {%k1} = xmm0[1],xmm1[1] +; CHECK-NEXT: vmovapd %xmm2, %xmm0 +; CHECK-NEXT: retq + %shuf = shufflevector <2 x double> %vec1, <2 x double> %vec2, <2 x i32> + %res = select <2 x i1> , <2 x double> %shuf, <2 x double> %vec3 + ret <2 x double> %res +} + +define <2 x double> @test_2xdouble_zero_masked_unpack_high_mask1(<2 x double> %vec1, <2 x double> %vec2) { +; CHECK-LABEL: test_2xdouble_zero_masked_unpack_high_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: movb $1, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vunpckhpd {{.*#+}} xmm0 {%k1} {z} = xmm0[1],xmm1[1] +; CHECK-NEXT: retq + %shuf = shufflevector <2 x double> %vec1, <2 x double> %vec2, <2 x i32> + %res = select <2 x i1> , <2 x double> %shuf, <2 x double> zeroinitializer + ret <2 x double> %res +} +define <2 x double> @test_2xdouble_unpack_high_mem_mask0(<2 x double> %vec1, <2 x double>* %vec2p) { +; CHECK-LABEL: test_2xdouble_unpack_high_mem_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm0[1],mem[1] +; CHECK-NEXT: retq + %vec2 = load <2 x double>, <2 x double>* %vec2p + %res = shufflevector <2 x double> %vec1, <2 x double> %vec2, <2 x i32> + ret <2 x double> %res +} +define <2 x double> @test_2xdouble_masked_unpack_high_mem_mask0(<2 x double> %vec1, <2 x double>* %vec2p, <2 x double> %vec3) { +; CHECK-LABEL: test_2xdouble_masked_unpack_high_mem_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: movb $1, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vunpckhpd {{.*#+}} xmm1 {%k1} = xmm0[1],mem[1] +; CHECK-NEXT: vmovapd %xmm1, %xmm0 +; CHECK-NEXT: retq + %vec2 = load <2 x double>, <2 x double>* %vec2p + %shuf = shufflevector <2 x double> %vec1, <2 x double> %vec2, <2 x i32> + %res = select <2 x i1> , <2 x double> %shuf, <2 x double> %vec3 + ret <2 x double> %res +} + +define <2 x double> @test_2xdouble_zero_masked_unpack_high_mem_mask0(<2 x double> %vec1, <2 x double>* %vec2p) { +; CHECK-LABEL: test_2xdouble_zero_masked_unpack_high_mem_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: movb $1, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vunpckhpd {{.*#+}} xmm0 {%k1} {z} = xmm0[1],mem[1] +; CHECK-NEXT: retq + %vec2 = load <2 x double>, <2 x double>* %vec2p + %shuf = shufflevector <2 x double> %vec1, <2 x double> %vec2, <2 x i32> + %res = select <2 x i1> , <2 x double> %shuf, <2 x double> zeroinitializer + ret <2 x double> %res +} + +define <2 x double> @test_2xdouble_masked_unpack_high_mem_mask1(<2 x double> %vec1, <2 x double>* %vec2p, <2 x double> %vec3) { +; CHECK-LABEL: test_2xdouble_masked_unpack_high_mem_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: movb $2, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vunpckhpd {{.*#+}} xmm1 {%k1} = xmm0[1],mem[1] +; CHECK-NEXT: vmovapd %xmm1, %xmm0 +; CHECK-NEXT: retq + %vec2 = load <2 x double>, <2 x double>* %vec2p + %shuf = shufflevector <2 x double> %vec1, <2 x double> %vec2, <2 x i32> + %res = select <2 x i1> , <2 x double> %shuf, <2 x double> %vec3 + ret <2 x double> %res +} + +define <2 x double> @test_2xdouble_zero_masked_unpack_high_mem_mask1(<2 x double> %vec1, <2 x double>* %vec2p) { +; CHECK-LABEL: test_2xdouble_zero_masked_unpack_high_mem_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: movb $2, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vunpckhpd {{.*#+}} xmm0 {%k1} {z} = xmm0[1],mem[1] +; CHECK-NEXT: retq + %vec2 = load <2 x double>, <2 x double>* %vec2p + %shuf = shufflevector <2 x double> %vec1, <2 x double> %vec2, <2 x i32> + %res = select <2 x i1> , <2 x double> %shuf, <2 x double> zeroinitializer + ret <2 x double> %res +} + +define <4 x double> @test_4xdouble_unpack_high_mask0(<4 x double> %vec1, <4 x double> %vec2) { +; CHECK-LABEL: test_4xdouble_unpack_high_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: vunpckhpd {{.*#+}} ymm0 = ymm0[1],ymm1[1],ymm0[3],ymm1[3] +; CHECK-NEXT: retq + %res = shufflevector <4 x double> %vec1, <4 x double> %vec2, <4 x i32> + ret <4 x double> %res +} +define <4 x double> @test_4xdouble_masked_unpack_high_mask0(<4 x double> %vec1, <4 x double> %vec2, <4 x double> %vec3) { +; CHECK-LABEL: test_4xdouble_masked_unpack_high_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: movb $9, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vunpckhpd {{.*#+}} ymm2 {%k1} = ymm0[1],ymm1[1],ymm0[3],ymm1[3] +; CHECK-NEXT: vmovapd %ymm2, %ymm0 +; CHECK-NEXT: retq + %shuf = shufflevector <4 x double> %vec1, <4 x double> %vec2, <4 x i32> + %res = select <4 x i1> , <4 x double> %shuf, <4 x double> %vec3 + ret <4 x double> %res +} + +define <4 x double> @test_4xdouble_zero_masked_unpack_high_mask0(<4 x double> %vec1, <4 x double> %vec2) { +; CHECK-LABEL: test_4xdouble_zero_masked_unpack_high_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: movb $9, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vunpckhpd {{.*#+}} ymm0 {%k1} {z} = ymm0[1],ymm1[1],ymm0[3],ymm1[3] +; CHECK-NEXT: retq + %shuf = shufflevector <4 x double> %vec1, <4 x double> %vec2, <4 x i32> + %res = select <4 x i1> , <4 x double> %shuf, <4 x double> zeroinitializer + ret <4 x double> %res +} +define <4 x double> @test_4xdouble_masked_unpack_high_mask1(<4 x double> %vec1, <4 x double> %vec2, <4 x double> %vec3) { +; CHECK-LABEL: test_4xdouble_masked_unpack_high_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: movb $14, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vunpckhpd {{.*#+}} ymm2 {%k1} = ymm0[1],ymm1[1],ymm0[3],ymm1[3] +; CHECK-NEXT: vmovapd %ymm2, %ymm0 +; CHECK-NEXT: retq + %shuf = shufflevector <4 x double> %vec1, <4 x double> %vec2, <4 x i32> + %res = select <4 x i1> , <4 x double> %shuf, <4 x double> %vec3 + ret <4 x double> %res +} + +define <4 x double> @test_4xdouble_zero_masked_unpack_high_mask1(<4 x double> %vec1, <4 x double> %vec2) { +; CHECK-LABEL: test_4xdouble_zero_masked_unpack_high_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: movb $14, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vunpckhpd {{.*#+}} ymm0 {%k1} {z} = ymm0[1],ymm1[1],ymm0[3],ymm1[3] +; CHECK-NEXT: retq + %shuf = shufflevector <4 x double> %vec1, <4 x double> %vec2, <4 x i32> + %res = select <4 x i1> , <4 x double> %shuf, <4 x double> zeroinitializer + ret <4 x double> %res +} +define <4 x double> @test_4xdouble_masked_unpack_high_mask2(<4 x double> %vec1, <4 x double> %vec2, <4 x double> %vec3) { +; CHECK-LABEL: test_4xdouble_masked_unpack_high_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: movb $6, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vunpckhpd {{.*#+}} ymm2 {%k1} = ymm0[1],ymm1[1],ymm0[3],ymm1[3] +; CHECK-NEXT: vmovapd %ymm2, %ymm0 +; CHECK-NEXT: retq + %shuf = shufflevector <4 x double> %vec1, <4 x double> %vec2, <4 x i32> + %res = select <4 x i1> , <4 x double> %shuf, <4 x double> %vec3 + ret <4 x double> %res +} + +define <4 x double> @test_4xdouble_zero_masked_unpack_high_mask2(<4 x double> %vec1, <4 x double> %vec2) { +; CHECK-LABEL: test_4xdouble_zero_masked_unpack_high_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: movb $6, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vunpckhpd {{.*#+}} ymm0 {%k1} {z} = ymm0[1],ymm1[1],ymm0[3],ymm1[3] +; CHECK-NEXT: retq + %shuf = shufflevector <4 x double> %vec1, <4 x double> %vec2, <4 x i32> + %res = select <4 x i1> , <4 x double> %shuf, <4 x double> zeroinitializer + ret <4 x double> %res +} +define <4 x double> @test_4xdouble_unpack_high_mask3(<4 x double> %vec1, <4 x double> %vec2) { +; CHECK-LABEL: test_4xdouble_unpack_high_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: vunpckhpd {{.*#+}} ymm0 = ymm0[1],ymm1[1],ymm0[3],ymm1[3] +; CHECK-NEXT: retq + %res = shufflevector <4 x double> %vec1, <4 x double> %vec2, <4 x i32> + ret <4 x double> %res +} +define <4 x double> @test_4xdouble_masked_unpack_high_mask3(<4 x double> %vec1, <4 x double> %vec2, <4 x double> %vec3) { +; CHECK-LABEL: test_4xdouble_masked_unpack_high_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: movb $1, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vunpckhpd {{.*#+}} ymm2 {%k1} = ymm0[1],ymm1[1],ymm0[3],ymm1[3] +; CHECK-NEXT: vmovapd %ymm2, %ymm0 +; CHECK-NEXT: retq + %shuf = shufflevector <4 x double> %vec1, <4 x double> %vec2, <4 x i32> + %res = select <4 x i1> , <4 x double> %shuf, <4 x double> %vec3 + ret <4 x double> %res +} + +define <4 x double> @test_4xdouble_zero_masked_unpack_high_mask3(<4 x double> %vec1, <4 x double> %vec2) { +; CHECK-LABEL: test_4xdouble_zero_masked_unpack_high_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: movb $1, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vunpckhpd {{.*#+}} ymm0 {%k1} {z} = ymm0[1],ymm1[1],ymm0[3],ymm1[3] +; CHECK-NEXT: retq + %shuf = shufflevector <4 x double> %vec1, <4 x double> %vec2, <4 x i32> + %res = select <4 x i1> , <4 x double> %shuf, <4 x double> zeroinitializer + ret <4 x double> %res +} +define <4 x double> @test_4xdouble_unpack_high_mem_mask0(<4 x double> %vec1, <4 x double>* %vec2p) { +; CHECK-LABEL: test_4xdouble_unpack_high_mem_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: vunpckhpd {{.*#+}} ymm0 = ymm0[1],mem[1],ymm0[3],mem[3] +; CHECK-NEXT: retq + %vec2 = load <4 x double>, <4 x double>* %vec2p + %res = shufflevector <4 x double> %vec1, <4 x double> %vec2, <4 x i32> + ret <4 x double> %res +} +define <4 x double> @test_4xdouble_masked_unpack_high_mem_mask0(<4 x double> %vec1, <4 x double>* %vec2p, <4 x double> %vec3) { +; CHECK-LABEL: test_4xdouble_masked_unpack_high_mem_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: movb $11, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vunpckhpd {{.*#+}} ymm1 {%k1} = ymm0[1],mem[1],ymm0[3],mem[3] +; CHECK-NEXT: vmovapd %ymm1, %ymm0 +; CHECK-NEXT: retq + %vec2 = load <4 x double>, <4 x double>* %vec2p + %shuf = shufflevector <4 x double> %vec1, <4 x double> %vec2, <4 x i32> + %res = select <4 x i1> , <4 x double> %shuf, <4 x double> %vec3 + ret <4 x double> %res +} + +define <4 x double> @test_4xdouble_zero_masked_unpack_high_mem_mask0(<4 x double> %vec1, <4 x double>* %vec2p) { +; CHECK-LABEL: test_4xdouble_zero_masked_unpack_high_mem_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: movb $11, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vunpckhpd {{.*#+}} ymm0 {%k1} {z} = ymm0[1],mem[1],ymm0[3],mem[3] +; CHECK-NEXT: retq + %vec2 = load <4 x double>, <4 x double>* %vec2p + %shuf = shufflevector <4 x double> %vec1, <4 x double> %vec2, <4 x i32> + %res = select <4 x i1> , <4 x double> %shuf, <4 x double> zeroinitializer + ret <4 x double> %res +} + +define <4 x double> @test_4xdouble_masked_unpack_high_mem_mask1(<4 x double> %vec1, <4 x double>* %vec2p, <4 x double> %vec3) { +; CHECK-LABEL: test_4xdouble_masked_unpack_high_mem_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: movb $12, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vunpckhpd {{.*#+}} ymm1 {%k1} = ymm0[1],mem[1],ymm0[3],mem[3] +; CHECK-NEXT: vmovapd %ymm1, %ymm0 +; CHECK-NEXT: retq + %vec2 = load <4 x double>, <4 x double>* %vec2p + %shuf = shufflevector <4 x double> %vec1, <4 x double> %vec2, <4 x i32> + %res = select <4 x i1> , <4 x double> %shuf, <4 x double> %vec3 + ret <4 x double> %res +} + +define <4 x double> @test_4xdouble_zero_masked_unpack_high_mem_mask1(<4 x double> %vec1, <4 x double>* %vec2p) { +; CHECK-LABEL: test_4xdouble_zero_masked_unpack_high_mem_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: movb $12, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vunpckhpd {{.*#+}} ymm0 {%k1} {z} = ymm0[1],mem[1],ymm0[3],mem[3] +; CHECK-NEXT: retq + %vec2 = load <4 x double>, <4 x double>* %vec2p + %shuf = shufflevector <4 x double> %vec1, <4 x double> %vec2, <4 x i32> + %res = select <4 x i1> , <4 x double> %shuf, <4 x double> zeroinitializer + ret <4 x double> %res +} + +define <4 x double> @test_4xdouble_masked_unpack_high_mem_mask2(<4 x double> %vec1, <4 x double>* %vec2p, <4 x double> %vec3) { +; CHECK-LABEL: test_4xdouble_masked_unpack_high_mem_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: movb $13, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vunpckhpd {{.*#+}} ymm1 {%k1} = ymm0[1],mem[1],ymm0[3],mem[3] +; CHECK-NEXT: vmovapd %ymm1, %ymm0 +; CHECK-NEXT: retq + %vec2 = load <4 x double>, <4 x double>* %vec2p + %shuf = shufflevector <4 x double> %vec1, <4 x double> %vec2, <4 x i32> + %res = select <4 x i1> , <4 x double> %shuf, <4 x double> %vec3 + ret <4 x double> %res +} + +define <4 x double> @test_4xdouble_zero_masked_unpack_high_mem_mask2(<4 x double> %vec1, <4 x double>* %vec2p) { +; CHECK-LABEL: test_4xdouble_zero_masked_unpack_high_mem_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: movb $13, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vunpckhpd {{.*#+}} ymm0 {%k1} {z} = ymm0[1],mem[1],ymm0[3],mem[3] +; CHECK-NEXT: retq + %vec2 = load <4 x double>, <4 x double>* %vec2p + %shuf = shufflevector <4 x double> %vec1, <4 x double> %vec2, <4 x i32> + %res = select <4 x i1> , <4 x double> %shuf, <4 x double> zeroinitializer + ret <4 x double> %res +} + +define <4 x double> @test_4xdouble_unpack_high_mem_mask3(<4 x double> %vec1, <4 x double>* %vec2p) { +; CHECK-LABEL: test_4xdouble_unpack_high_mem_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: vunpckhpd {{.*#+}} ymm0 = ymm0[1],mem[1],ymm0[3],mem[3] +; CHECK-NEXT: retq + %vec2 = load <4 x double>, <4 x double>* %vec2p + %res = shufflevector <4 x double> %vec1, <4 x double> %vec2, <4 x i32> + ret <4 x double> %res +} +define <4 x double> @test_4xdouble_masked_unpack_high_mem_mask3(<4 x double> %vec1, <4 x double>* %vec2p, <4 x double> %vec3) { +; CHECK-LABEL: test_4xdouble_masked_unpack_high_mem_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: movb $10, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vunpckhpd {{.*#+}} ymm1 {%k1} = ymm0[1],mem[1],ymm0[3],mem[3] +; CHECK-NEXT: vmovapd %ymm1, %ymm0 +; CHECK-NEXT: retq + %vec2 = load <4 x double>, <4 x double>* %vec2p + %shuf = shufflevector <4 x double> %vec1, <4 x double> %vec2, <4 x i32> + %res = select <4 x i1> , <4 x double> %shuf, <4 x double> %vec3 + ret <4 x double> %res +} + +define <4 x double> @test_4xdouble_zero_masked_unpack_high_mem_mask3(<4 x double> %vec1, <4 x double>* %vec2p) { +; CHECK-LABEL: test_4xdouble_zero_masked_unpack_high_mem_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: movb $10, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vunpckhpd {{.*#+}} ymm0 {%k1} {z} = ymm0[1],mem[1],ymm0[3],mem[3] +; CHECK-NEXT: retq + %vec2 = load <4 x double>, <4 x double>* %vec2p + %shuf = shufflevector <4 x double> %vec1, <4 x double> %vec2, <4 x i32> + %res = select <4 x i1> , <4 x double> %shuf, <4 x double> zeroinitializer + ret <4 x double> %res +} + +define <8 x double> @test_8xdouble_unpack_high_mask0(<8 x double> %vec1, <8 x double> %vec2) { +; CHECK-LABEL: test_8xdouble_unpack_high_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: vunpckhpd {{.*#+}} zmm0 = zmm0[1],zmm1[1],zmm0[3],zmm1[3],zmm0[5],zmm1[5],zmm0[7],zmm1[7] +; CHECK-NEXT: retq + %res = shufflevector <8 x double> %vec1, <8 x double> %vec2, <8 x i32> + ret <8 x double> %res +} +define <8 x double> @test_8xdouble_masked_unpack_high_mask0(<8 x double> %vec1, <8 x double> %vec2, <8 x double> %vec3) { +; CHECK-LABEL: test_8xdouble_masked_unpack_high_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: movb $-27, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vunpckhpd {{.*#+}} zmm2 {%k1} = zmm0[1],zmm1[1],zmm0[3],zmm1[3],zmm0[5],zmm1[5],zmm0[7],zmm1[7] +; CHECK-NEXT: vmovapd %zmm2, %zmm0 +; CHECK-NEXT: retq + %shuf = shufflevector <8 x double> %vec1, <8 x double> %vec2, <8 x i32> + %res = select <8 x i1> , <8 x double> %shuf, <8 x double> %vec3 + ret <8 x double> %res +} + +define <8 x double> @test_8xdouble_zero_masked_unpack_high_mask0(<8 x double> %vec1, <8 x double> %vec2) { +; CHECK-LABEL: test_8xdouble_zero_masked_unpack_high_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: movb $-27, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vunpckhpd {{.*#+}} zmm0 {%k1} {z} = zmm0[1],zmm1[1],zmm0[3],zmm1[3],zmm0[5],zmm1[5],zmm0[7],zmm1[7] +; CHECK-NEXT: retq + %shuf = shufflevector <8 x double> %vec1, <8 x double> %vec2, <8 x i32> + %res = select <8 x i1> , <8 x double> %shuf, <8 x double> zeroinitializer + ret <8 x double> %res +} +define <8 x double> @test_8xdouble_masked_unpack_high_mask1(<8 x double> %vec1, <8 x double> %vec2, <8 x double> %vec3) { +; CHECK-LABEL: test_8xdouble_masked_unpack_high_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: movb $-21, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vunpckhpd {{.*#+}} zmm2 {%k1} = zmm0[1],zmm1[1],zmm0[3],zmm1[3],zmm0[5],zmm1[5],zmm0[7],zmm1[7] +; CHECK-NEXT: vmovapd %zmm2, %zmm0 +; CHECK-NEXT: retq + %shuf = shufflevector <8 x double> %vec1, <8 x double> %vec2, <8 x i32> + %res = select <8 x i1> , <8 x double> %shuf, <8 x double> %vec3 + ret <8 x double> %res +} + +define <8 x double> @test_8xdouble_zero_masked_unpack_high_mask1(<8 x double> %vec1, <8 x double> %vec2) { +; CHECK-LABEL: test_8xdouble_zero_masked_unpack_high_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: movb $-21, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vunpckhpd {{.*#+}} zmm0 {%k1} {z} = zmm0[1],zmm1[1],zmm0[3],zmm1[3],zmm0[5],zmm1[5],zmm0[7],zmm1[7] +; CHECK-NEXT: retq + %shuf = shufflevector <8 x double> %vec1, <8 x double> %vec2, <8 x i32> + %res = select <8 x i1> , <8 x double> %shuf, <8 x double> zeroinitializer + ret <8 x double> %res +} +define <8 x double> @test_8xdouble_masked_unpack_high_mask2(<8 x double> %vec1, <8 x double> %vec2, <8 x double> %vec3) { +; CHECK-LABEL: test_8xdouble_masked_unpack_high_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: movb $-118, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vunpckhpd {{.*#+}} zmm2 {%k1} = zmm0[1],zmm1[1],zmm0[3],zmm1[3],zmm0[5],zmm1[5],zmm0[7],zmm1[7] +; CHECK-NEXT: vmovapd %zmm2, %zmm0 +; CHECK-NEXT: retq + %shuf = shufflevector <8 x double> %vec1, <8 x double> %vec2, <8 x i32> + %res = select <8 x i1> , <8 x double> %shuf, <8 x double> %vec3 + ret <8 x double> %res +} + +define <8 x double> @test_8xdouble_zero_masked_unpack_high_mask2(<8 x double> %vec1, <8 x double> %vec2) { +; CHECK-LABEL: test_8xdouble_zero_masked_unpack_high_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: movb $-118, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vunpckhpd {{.*#+}} zmm0 {%k1} {z} = zmm0[1],zmm1[1],zmm0[3],zmm1[3],zmm0[5],zmm1[5],zmm0[7],zmm1[7] +; CHECK-NEXT: retq + %shuf = shufflevector <8 x double> %vec1, <8 x double> %vec2, <8 x i32> + %res = select <8 x i1> , <8 x double> %shuf, <8 x double> zeroinitializer + ret <8 x double> %res +} +define <8 x double> @test_8xdouble_unpack_high_mask3(<8 x double> %vec1, <8 x double> %vec2) { +; CHECK-LABEL: test_8xdouble_unpack_high_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: vunpckhpd {{.*#+}} zmm0 = zmm0[1],zmm1[1],zmm0[3],zmm1[3],zmm0[5],zmm1[5],zmm0[7],zmm1[7] +; CHECK-NEXT: retq + %res = shufflevector <8 x double> %vec1, <8 x double> %vec2, <8 x i32> + ret <8 x double> %res +} +define <8 x double> @test_8xdouble_masked_unpack_high_mask3(<8 x double> %vec1, <8 x double> %vec2, <8 x double> %vec3) { +; CHECK-LABEL: test_8xdouble_masked_unpack_high_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: movb $100, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vunpckhpd {{.*#+}} zmm2 {%k1} = zmm0[1],zmm1[1],zmm0[3],zmm1[3],zmm0[5],zmm1[5],zmm0[7],zmm1[7] +; CHECK-NEXT: vmovapd %zmm2, %zmm0 +; CHECK-NEXT: retq + %shuf = shufflevector <8 x double> %vec1, <8 x double> %vec2, <8 x i32> + %res = select <8 x i1> , <8 x double> %shuf, <8 x double> %vec3 + ret <8 x double> %res +} + +define <8 x double> @test_8xdouble_zero_masked_unpack_high_mask3(<8 x double> %vec1, <8 x double> %vec2) { +; CHECK-LABEL: test_8xdouble_zero_masked_unpack_high_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: movb $100, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vunpckhpd {{.*#+}} zmm0 {%k1} {z} = zmm0[1],zmm1[1],zmm0[3],zmm1[3],zmm0[5],zmm1[5],zmm0[7],zmm1[7] +; CHECK-NEXT: retq + %shuf = shufflevector <8 x double> %vec1, <8 x double> %vec2, <8 x i32> + %res = select <8 x i1> , <8 x double> %shuf, <8 x double> zeroinitializer + ret <8 x double> %res +} +define <8 x double> @test_8xdouble_unpack_high_mem_mask0(<8 x double> %vec1, <8 x double>* %vec2p) { +; CHECK-LABEL: test_8xdouble_unpack_high_mem_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: vunpckhpd {{.*#+}} zmm0 = zmm0[1],mem[1],zmm0[3],mem[3],zmm0[5],mem[5],zmm0[7],mem[7] +; CHECK-NEXT: retq + %vec2 = load <8 x double>, <8 x double>* %vec2p + %res = shufflevector <8 x double> %vec1, <8 x double> %vec2, <8 x i32> + ret <8 x double> %res +} +define <8 x double> @test_8xdouble_masked_unpack_high_mem_mask0(<8 x double> %vec1, <8 x double>* %vec2p, <8 x double> %vec3) { +; CHECK-LABEL: test_8xdouble_masked_unpack_high_mem_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: movb $-76, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vunpckhpd {{.*#+}} zmm1 {%k1} = zmm0[1],mem[1],zmm0[3],mem[3],zmm0[5],mem[5],zmm0[7],mem[7] +; CHECK-NEXT: vmovapd %zmm1, %zmm0 +; CHECK-NEXT: retq + %vec2 = load <8 x double>, <8 x double>* %vec2p + %shuf = shufflevector <8 x double> %vec1, <8 x double> %vec2, <8 x i32> + %res = select <8 x i1> , <8 x double> %shuf, <8 x double> %vec3 + ret <8 x double> %res +} + +define <8 x double> @test_8xdouble_zero_masked_unpack_high_mem_mask0(<8 x double> %vec1, <8 x double>* %vec2p) { +; CHECK-LABEL: test_8xdouble_zero_masked_unpack_high_mem_mask0: +; CHECK: # BB#0: +; CHECK-NEXT: movb $-76, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vunpckhpd {{.*#+}} zmm0 {%k1} {z} = zmm0[1],mem[1],zmm0[3],mem[3],zmm0[5],mem[5],zmm0[7],mem[7] +; CHECK-NEXT: retq + %vec2 = load <8 x double>, <8 x double>* %vec2p + %shuf = shufflevector <8 x double> %vec1, <8 x double> %vec2, <8 x i32> + %res = select <8 x i1> , <8 x double> %shuf, <8 x double> zeroinitializer + ret <8 x double> %res +} + +define <8 x double> @test_8xdouble_masked_unpack_high_mem_mask1(<8 x double> %vec1, <8 x double>* %vec2p, <8 x double> %vec3) { +; CHECK-LABEL: test_8xdouble_masked_unpack_high_mem_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: movb $71, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vunpckhpd {{.*#+}} zmm1 {%k1} = zmm0[1],mem[1],zmm0[3],mem[3],zmm0[5],mem[5],zmm0[7],mem[7] +; CHECK-NEXT: vmovapd %zmm1, %zmm0 +; CHECK-NEXT: retq + %vec2 = load <8 x double>, <8 x double>* %vec2p + %shuf = shufflevector <8 x double> %vec1, <8 x double> %vec2, <8 x i32> + %res = select <8 x i1> , <8 x double> %shuf, <8 x double> %vec3 + ret <8 x double> %res +} + +define <8 x double> @test_8xdouble_zero_masked_unpack_high_mem_mask1(<8 x double> %vec1, <8 x double>* %vec2p) { +; CHECK-LABEL: test_8xdouble_zero_masked_unpack_high_mem_mask1: +; CHECK: # BB#0: +; CHECK-NEXT: movb $71, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vunpckhpd {{.*#+}} zmm0 {%k1} {z} = zmm0[1],mem[1],zmm0[3],mem[3],zmm0[5],mem[5],zmm0[7],mem[7] +; CHECK-NEXT: retq + %vec2 = load <8 x double>, <8 x double>* %vec2p + %shuf = shufflevector <8 x double> %vec1, <8 x double> %vec2, <8 x i32> + %res = select <8 x i1> , <8 x double> %shuf, <8 x double> zeroinitializer + ret <8 x double> %res +} + +define <8 x double> @test_8xdouble_masked_unpack_high_mem_mask2(<8 x double> %vec1, <8 x double>* %vec2p, <8 x double> %vec3) { +; CHECK-LABEL: test_8xdouble_masked_unpack_high_mem_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: movb $-49, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vunpckhpd {{.*#+}} zmm1 {%k1} = zmm0[1],mem[1],zmm0[3],mem[3],zmm0[5],mem[5],zmm0[7],mem[7] +; CHECK-NEXT: vmovapd %zmm1, %zmm0 +; CHECK-NEXT: retq + %vec2 = load <8 x double>, <8 x double>* %vec2p + %shuf = shufflevector <8 x double> %vec1, <8 x double> %vec2, <8 x i32> + %res = select <8 x i1> , <8 x double> %shuf, <8 x double> %vec3 + ret <8 x double> %res +} + +define <8 x double> @test_8xdouble_zero_masked_unpack_high_mem_mask2(<8 x double> %vec1, <8 x double>* %vec2p) { +; CHECK-LABEL: test_8xdouble_zero_masked_unpack_high_mem_mask2: +; CHECK: # BB#0: +; CHECK-NEXT: movb $-49, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vunpckhpd {{.*#+}} zmm0 {%k1} {z} = zmm0[1],mem[1],zmm0[3],mem[3],zmm0[5],mem[5],zmm0[7],mem[7] +; CHECK-NEXT: retq + %vec2 = load <8 x double>, <8 x double>* %vec2p + %shuf = shufflevector <8 x double> %vec1, <8 x double> %vec2, <8 x i32> + %res = select <8 x i1> , <8 x double> %shuf, <8 x double> zeroinitializer + ret <8 x double> %res +} + +define <8 x double> @test_8xdouble_unpack_high_mem_mask3(<8 x double> %vec1, <8 x double>* %vec2p) { +; CHECK-LABEL: test_8xdouble_unpack_high_mem_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: vunpckhpd {{.*#+}} zmm0 = zmm0[1],mem[1],zmm0[3],mem[3],zmm0[5],mem[5],zmm0[7],mem[7] +; CHECK-NEXT: retq + %vec2 = load <8 x double>, <8 x double>* %vec2p + %res = shufflevector <8 x double> %vec1, <8 x double> %vec2, <8 x i32> + ret <8 x double> %res +} +define <8 x double> @test_8xdouble_masked_unpack_high_mem_mask3(<8 x double> %vec1, <8 x double>* %vec2p, <8 x double> %vec3) { +; CHECK-LABEL: test_8xdouble_masked_unpack_high_mem_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: movb $-40, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vunpckhpd {{.*#+}} zmm1 {%k1} = zmm0[1],mem[1],zmm0[3],mem[3],zmm0[5],mem[5],zmm0[7],mem[7] +; CHECK-NEXT: vmovapd %zmm1, %zmm0 +; CHECK-NEXT: retq + %vec2 = load <8 x double>, <8 x double>* %vec2p + %shuf = shufflevector <8 x double> %vec1, <8 x double> %vec2, <8 x i32> + %res = select <8 x i1> , <8 x double> %shuf, <8 x double> %vec3 + ret <8 x double> %res +} + +define <8 x double> @test_8xdouble_zero_masked_unpack_high_mem_mask3(<8 x double> %vec1, <8 x double>* %vec2p) { +; CHECK-LABEL: test_8xdouble_zero_masked_unpack_high_mem_mask3: +; CHECK: # BB#0: +; CHECK-NEXT: movb $-40, %al +; CHECK-NEXT: kmovd %eax, %k1 +; CHECK-NEXT: vunpckhpd {{.*#+}} zmm0 {%k1} {z} = zmm0[1],mem[1],zmm0[3],mem[3],zmm0[5],mem[5],zmm0[7],mem[7] +; CHECK-NEXT: retq + %vec2 = load <8 x double>, <8 x double>* %vec2p + %shuf = shufflevector <8 x double> %vec1, <8 x double> %vec2, <8 x i32> + %res = select <8 x i1> , <8 x double> %shuf, <8 x double> zeroinitializer + ret <8 x double> %res +} +