From d27386a9edfa0d1c33bf6bb4ba704ba1e4bee7c7 Mon Sep 17 00:00:00 2001 From: Craig Topper Date: Fri, 25 Aug 2017 23:34:59 +0000 Subject: [PATCH] [AVX512] Add patterns to use masked moves to implement masked extract_subvector of the lowest subvector. This only supports 32 and 64 bit element sizes for now. But we could probably do 16 and 8-bit elements with BWI. llvm-svn: 311821 --- llvm/lib/Target/X86/X86InstrAVX512.td | 133 ++++++++++++++++++ .../test/CodeGen/X86/vector-shuffle-masked.ll | 60 ++++---- 2 files changed, 157 insertions(+), 36 deletions(-) diff --git a/llvm/lib/Target/X86/X86InstrAVX512.td b/llvm/lib/Target/X86/X86InstrAVX512.td index 8f925e4ca55b..f73716e556fb 100644 --- a/llvm/lib/Target/X86/X86InstrAVX512.td +++ b/llvm/lib/Target/X86/X86InstrAVX512.td @@ -3739,6 +3739,139 @@ let Predicates = [HasVLX] in { (VMOVDQU32Z256mr addr:$dst, (v32i8 (EXTRACT_SUBREG VR512:$src,sub_ymm)))>; } +let Predicates = [HasVLX] in { +// A masked extract from the first 128-bits of a 256-bit vector can be +// implemented with masked move. +def : Pat<(v2i64 (vselect VK2WM:$mask, + (extract_subvector (v4i64 VR256X:$src), (iPTR 0)), + VR128X:$src0)), + (v2i64 (VMOVDQA64Z128rrk VR128X:$src0, VK2WM:$mask, + (EXTRACT_SUBREG (v4i64 VR256X:$src), sub_xmm)))>; +def : Pat<(v4i32 (vselect VK4WM:$mask, + (extract_subvector (v8i32 VR256X:$src), (iPTR 0)), + VR128X:$src0)), + (v4i32 (VMOVDQA32Z128rrk VR128X:$src0, VK4WM:$mask, + (EXTRACT_SUBREG (v8i32 VR256X:$src), sub_xmm)))>; +def : Pat<(v2f64 (vselect VK2WM:$mask, + (extract_subvector (v4f64 VR256X:$src), (iPTR 0)), + VR128X:$src0)), + (v2f64 (VMOVAPDZ128rrk VR128X:$src0, VK2WM:$mask, + (EXTRACT_SUBREG (v4f64 VR256X:$src), sub_xmm)))>; +def : Pat<(v4f32 (vselect VK4WM:$mask, + (extract_subvector (v8f32 VR256X:$src), (iPTR 0)), + VR128X:$src0)), + (v4f32 (VMOVAPSZ128rrk VR128X:$src0, VK4WM:$mask, + (EXTRACT_SUBREG (v8f32 VR256X:$src), sub_xmm)))>; + +def : Pat<(v2i64 (vselect VK2WM:$mask, + (extract_subvector (v4i64 VR256X:$src), (iPTR 0)), + (bitconvert (v4i32 immAllZerosV)))), + (v2i64 (VMOVDQA64Z128rrkz VK2WM:$mask, + (EXTRACT_SUBREG (v4i64 VR256X:$src), sub_xmm)))>; +def : Pat<(v4i32 (vselect VK4WM:$mask, + (extract_subvector (v8i32 VR256X:$src), (iPTR 0)), + (bitconvert (v4i32 immAllZerosV)))), + (v4i32 (VMOVDQA32Z128rrkz VK4WM:$mask, + (EXTRACT_SUBREG (v8i32 VR256X:$src), sub_xmm)))>; +def : Pat<(v2f64 (vselect VK2WM:$mask, + (extract_subvector (v4f64 VR256X:$src), (iPTR 0)), + (bitconvert (v4i32 immAllZerosV)))), + (v2f64 (VMOVAPDZ128rrkz VK2WM:$mask, + (EXTRACT_SUBREG (v4f64 VR256X:$src), sub_xmm)))>; +def : Pat<(v4f32 (vselect VK4WM:$mask, + (extract_subvector (v8f32 VR256X:$src), (iPTR 0)), + (bitconvert (v4i32 immAllZerosV)))), + (v4f32 (VMOVAPSZ128rrkz VK4WM:$mask, + (EXTRACT_SUBREG (v8f32 VR256X:$src), sub_xmm)))>; + +// A masked extract from the first 128-bits of a 512-bit vector can be +// implemented with masked move. +def : Pat<(v2i64 (vselect VK2WM:$mask, + (extract_subvector (v8i64 VR512:$src), (iPTR 0)), + VR128X:$src0)), + (v2i64 (VMOVDQA64Z128rrk VR128X:$src0, VK2WM:$mask, + (EXTRACT_SUBREG (v8i64 VR512:$src), sub_xmm)))>; +def : Pat<(v4i32 (vselect VK4WM:$mask, + (extract_subvector (v16i32 VR512:$src), (iPTR 0)), + VR128X:$src0)), + (v4i32 (VMOVDQA32Z128rrk VR128X:$src0, VK4WM:$mask, + (EXTRACT_SUBREG (v16i32 VR512:$src), sub_xmm)))>; +def : Pat<(v2f64 (vselect VK2WM:$mask, + (extract_subvector (v8f64 VR512:$src), (iPTR 0)), + VR128X:$src0)), + (v2f64 (VMOVAPDZ128rrk VR128X:$src0, VK2WM:$mask, + (EXTRACT_SUBREG (v8f64 VR512:$src), sub_xmm)))>; +def : Pat<(v4f32 (vselect VK4WM:$mask, + (extract_subvector (v16f32 VR512:$src), (iPTR 0)), + VR128X:$src0)), + (v4f32 (VMOVAPSZ128rrk VR128X:$src0, VK4WM:$mask, + (EXTRACT_SUBREG (v16f32 VR512:$src), sub_xmm)))>; + +def : Pat<(v2i64 (vselect VK2WM:$mask, + (extract_subvector (v8i64 VR512:$src), (iPTR 0)), + (bitconvert (v4i32 immAllZerosV)))), + (v2i64 (VMOVDQA64Z128rrkz VK2WM:$mask, + (EXTRACT_SUBREG (v8i64 VR512:$src), sub_xmm)))>; +def : Pat<(v4i32 (vselect VK4WM:$mask, + (extract_subvector (v16i32 VR512:$src), (iPTR 0)), + (bitconvert (v4i32 immAllZerosV)))), + (v4i32 (VMOVDQA32Z128rrkz VK4WM:$mask, + (EXTRACT_SUBREG (v16i32 VR512:$src), sub_xmm)))>; +def : Pat<(v2f64 (vselect VK2WM:$mask, + (extract_subvector (v8f64 VR512:$src), (iPTR 0)), + (bitconvert (v4i32 immAllZerosV)))), + (v2f64 (VMOVAPDZ128rrkz VK2WM:$mask, + (EXTRACT_SUBREG (v8f64 VR512:$src), sub_xmm)))>; +def : Pat<(v4f32 (vselect VK4WM:$mask, + (extract_subvector (v16f32 VR512:$src), (iPTR 0)), + (bitconvert (v4i32 immAllZerosV)))), + (v4f32 (VMOVAPSZ128rrkz VK4WM:$mask, + (EXTRACT_SUBREG (v16f32 VR512:$src), sub_xmm)))>; + +// A masked extract from the first 256-bits of a 512-bit vector can be +// implemented with masked move. +def : Pat<(v4i64 (vselect VK4WM:$mask, + (extract_subvector (v8i64 VR512:$src), (iPTR 0)), + VR256X:$src0)), + (v4i64 (VMOVDQA64Z256rrk VR256X:$src0, VK4WM:$mask, + (EXTRACT_SUBREG (v8i64 VR512:$src), sub_ymm)))>; +def : Pat<(v8i32 (vselect VK8WM:$mask, + (extract_subvector (v16i32 VR512:$src), (iPTR 0)), + VR256X:$src0)), + (v8i32 (VMOVDQA32Z256rrk VR256X:$src0, VK8WM:$mask, + (EXTRACT_SUBREG (v16i32 VR512:$src), sub_ymm)))>; +def : Pat<(v4f64 (vselect VK4WM:$mask, + (extract_subvector (v8f64 VR512:$src), (iPTR 0)), + VR256X:$src0)), + (v4f64 (VMOVAPDZ256rrk VR256X:$src0, VK4WM:$mask, + (EXTRACT_SUBREG (v8f64 VR512:$src), sub_ymm)))>; +def : Pat<(v8f32 (vselect VK8WM:$mask, + (extract_subvector (v16f32 VR512:$src), (iPTR 0)), + VR256X:$src0)), + (v8f32 (VMOVAPSZ256rrk VR256X:$src0, VK8WM:$mask, + (EXTRACT_SUBREG (v16f32 VR512:$src), sub_ymm)))>; + +def : Pat<(v4i64 (vselect VK4WM:$mask, + (extract_subvector (v8i64 VR512:$src), (iPTR 0)), + (bitconvert (v8i32 immAllZerosV)))), + (v4i64 (VMOVDQA64Z256rrkz VK4WM:$mask, + (EXTRACT_SUBREG (v8i64 VR512:$src), sub_ymm)))>; +def : Pat<(v8i32 (vselect VK8WM:$mask, + (extract_subvector (v16i32 VR512:$src), (iPTR 0)), + (bitconvert (v8i32 immAllZerosV)))), + (v8i32 (VMOVDQA32Z256rrkz VK8WM:$mask, + (EXTRACT_SUBREG (v16i32 VR512:$src), sub_ymm)))>; +def : Pat<(v4f64 (vselect VK4WM:$mask, + (extract_subvector (v8f64 VR512:$src), (iPTR 0)), + (bitconvert (v8i32 immAllZerosV)))), + (v4f64 (VMOVAPDZ256rrkz VK4WM:$mask, + (EXTRACT_SUBREG (v8f64 VR512:$src), sub_ymm)))>; +def : Pat<(v8f32 (vselect VK8WM:$mask, + (extract_subvector (v16f32 VR512:$src), (iPTR 0)), + (bitconvert (v8i32 immAllZerosV)))), + (v8f32 (VMOVAPSZ256rrkz VK8WM:$mask, + (EXTRACT_SUBREG (v16f32 VR512:$src), sub_ymm)))>; +} // Move Int Doubleword to Packed Double Int // diff --git a/llvm/test/CodeGen/X86/vector-shuffle-masked.ll b/llvm/test/CodeGen/X86/vector-shuffle-masked.ll index abdc6992b092..cd543cd13f3f 100644 --- a/llvm/test/CodeGen/X86/vector-shuffle-masked.ll +++ b/llvm/test/CodeGen/X86/vector-shuffle-masked.ll @@ -241,8 +241,7 @@ define <4 x i32> @mask_extract_v8i32_v4i32_0(<8 x i32> %a, <4 x i32> %passthru, ; CHECK-LABEL: mask_extract_v8i32_v4i32_0: ; CHECK: # BB#0: ; CHECK-NEXT: kmovw %edi, %k1 -; CHECK-NEXT: vextracti32x4 $0, %ymm0, %xmm1 {%k1} -; CHECK-NEXT: vmovdqa %xmm1, %xmm0 +; CHECK-NEXT: vpblendmd %xmm0, %xmm1, %xmm0 {%k1} ; CHECK-NEXT: vzeroupper ; CHECK-NEXT: retq %shuffle = shufflevector <8 x i32> %a, <8 x i32> undef, <4 x i32> @@ -256,7 +255,7 @@ define <4 x i32> @mask_extract_v8i32_v4i32_0_z(<8 x i32> %a, i8 %mask) { ; CHECK-LABEL: mask_extract_v8i32_v4i32_0_z: ; CHECK: # BB#0: ; CHECK-NEXT: kmovw %edi, %k1 -; CHECK-NEXT: vextracti32x4 $0, %ymm0, %xmm0 {%k1} {z} +; CHECK-NEXT: vmovdqa32 %xmm0, %xmm0 {%k1} {z} ; CHECK-NEXT: vzeroupper ; CHECK-NEXT: retq %shuffle = shufflevector <8 x i32> %a, <8 x i32> undef, <4 x i32> @@ -299,8 +298,7 @@ define <4 x float> @mask_extract_v8f32_v4f32_0(<8 x float> %a, <4 x float> %pass ; CHECK-LABEL: mask_extract_v8f32_v4f32_0: ; CHECK: # BB#0: ; CHECK-NEXT: kmovw %edi, %k1 -; CHECK-NEXT: vextractf32x4 $0, %ymm0, %xmm1 {%k1} -; CHECK-NEXT: vmovaps %xmm1, %xmm0 +; CHECK-NEXT: vblendmps %xmm0, %xmm1, %xmm0 {%k1} ; CHECK-NEXT: vzeroupper ; CHECK-NEXT: retq %shuffle = shufflevector <8 x float> %a, <8 x float> undef, <4 x i32> @@ -314,7 +312,7 @@ define <4 x float> @mask_extract_v8f32_v4f32_0_z(<8 x float> %a, i8 %mask) { ; CHECK-LABEL: mask_extract_v8f32_v4f32_0_z: ; CHECK: # BB#0: ; CHECK-NEXT: kmovw %edi, %k1 -; CHECK-NEXT: vextractf32x4 $0, %ymm0, %xmm0 {%k1} {z} +; CHECK-NEXT: vmovaps %xmm0, %xmm0 {%k1} {z} ; CHECK-NEXT: vzeroupper ; CHECK-NEXT: retq %shuffle = shufflevector <8 x float> %a, <8 x float> undef, <4 x i32> @@ -357,8 +355,7 @@ define <2 x i64> @mask_extract_v4i64_v2i64_0(<4 x i64> %a, <2 x i64> %passthru, ; CHECK-LABEL: mask_extract_v4i64_v2i64_0: ; CHECK: # BB#0: ; CHECK-NEXT: kmovw %edi, %k1 -; CHECK-NEXT: vextracti64x2 $0, %ymm0, %xmm1 {%k1} -; CHECK-NEXT: vmovdqa %xmm1, %xmm0 +; CHECK-NEXT: vpblendmq %xmm0, %xmm1, %xmm0 {%k1} ; CHECK-NEXT: vzeroupper ; CHECK-NEXT: retq %shuffle = shufflevector <4 x i64> %a, <4 x i64> undef, <2 x i32> @@ -372,7 +369,7 @@ define <2 x i64> @mask_extract_v4i64_v2i64_0_z(<4 x i64> %a, i8 %mask) { ; CHECK-LABEL: mask_extract_v4i64_v2i64_0_z: ; CHECK: # BB#0: ; CHECK-NEXT: kmovw %edi, %k1 -; CHECK-NEXT: vextracti64x2 $0, %ymm0, %xmm0 {%k1} {z} +; CHECK-NEXT: vmovdqa64 %xmm0, %xmm0 {%k1} {z} ; CHECK-NEXT: vzeroupper ; CHECK-NEXT: retq %shuffle = shufflevector <4 x i64> %a, <4 x i64> undef, <2 x i32> @@ -415,8 +412,7 @@ define <2 x double> @mask_extract_v4f64_v2f64_0(<4 x double> %a, <2 x double> %p ; CHECK-LABEL: mask_extract_v4f64_v2f64_0: ; CHECK: # BB#0: ; CHECK-NEXT: kmovw %edi, %k1 -; CHECK-NEXT: vextractf64x2 $0, %ymm0, %xmm1 {%k1} -; CHECK-NEXT: vmovapd %xmm1, %xmm0 +; CHECK-NEXT: vblendmpd %xmm0, %xmm1, %xmm0 {%k1} ; CHECK-NEXT: vzeroupper ; CHECK-NEXT: retq %shuffle = shufflevector <4 x double> %a, <4 x double> undef, <2 x i32> @@ -430,7 +426,7 @@ define <2 x double> @mask_extract_v4f64_v2f64_0_z(<4 x double> %a, i8 %mask) { ; CHECK-LABEL: mask_extract_v4f64_v2f64_0_z: ; CHECK: # BB#0: ; CHECK-NEXT: kmovw %edi, %k1 -; CHECK-NEXT: vextractf64x2 $0, %ymm0, %xmm0 {%k1} {z} +; CHECK-NEXT: vmovapd %xmm0, %xmm0 {%k1} {z} ; CHECK-NEXT: vzeroupper ; CHECK-NEXT: retq %shuffle = shufflevector <4 x double> %a, <4 x double> undef, <2 x i32> @@ -473,8 +469,7 @@ define <4 x i32> @mask_extract_v16i32_v4i32_0(<16 x i32> %a, <4 x i32> %passthru ; CHECK-LABEL: mask_extract_v16i32_v4i32_0: ; CHECK: # BB#0: ; CHECK-NEXT: kmovw %edi, %k1 -; CHECK-NEXT: vextracti32x4 $0, %zmm0, %xmm1 {%k1} -; CHECK-NEXT: vmovdqa %xmm1, %xmm0 +; CHECK-NEXT: vpblendmd %xmm0, %xmm1, %xmm0 {%k1} ; CHECK-NEXT: vzeroupper ; CHECK-NEXT: retq %shuffle = shufflevector <16 x i32> %a, <16 x i32> undef, <4 x i32> @@ -488,7 +483,7 @@ define <4 x i32> @mask_extract_v16i32_v4i32_0_z(<16 x i32> %a, i8 %mask) { ; CHECK-LABEL: mask_extract_v16i32_v4i32_0_z: ; CHECK: # BB#0: ; CHECK-NEXT: kmovw %edi, %k1 -; CHECK-NEXT: vextracti32x4 $0, %zmm0, %xmm0 {%k1} {z} +; CHECK-NEXT: vmovdqa32 %xmm0, %xmm0 {%k1} {z} ; CHECK-NEXT: vzeroupper ; CHECK-NEXT: retq %shuffle = shufflevector <16 x i32> %a, <16 x i32> undef, <4 x i32> @@ -561,8 +556,7 @@ define <4 x float> @mask_extract_v16f32_v4f32_0(<16 x float> %a, <4 x float> %pa ; CHECK-LABEL: mask_extract_v16f32_v4f32_0: ; CHECK: # BB#0: ; CHECK-NEXT: kmovw %edi, %k1 -; CHECK-NEXT: vextractf32x4 $0, %zmm0, %xmm1 {%k1} -; CHECK-NEXT: vmovaps %xmm1, %xmm0 +; CHECK-NEXT: vblendmps %xmm0, %xmm1, %xmm0 {%k1} ; CHECK-NEXT: vzeroupper ; CHECK-NEXT: retq %shuffle = shufflevector <16 x float> %a, <16 x float> undef, <4 x i32> @@ -576,7 +570,7 @@ define <4 x float> @mask_extract_v16f32_v4f32_0_z(<16 x float> %a, i8 %mask) { ; CHECK-LABEL: mask_extract_v16f32_v4f32_0_z: ; CHECK: # BB#0: ; CHECK-NEXT: kmovw %edi, %k1 -; CHECK-NEXT: vextractf32x4 $0, %zmm0, %xmm0 {%k1} {z} +; CHECK-NEXT: vmovaps %xmm0, %xmm0 {%k1} {z} ; CHECK-NEXT: vzeroupper ; CHECK-NEXT: retq %shuffle = shufflevector <16 x float> %a, <16 x float> undef, <4 x i32> @@ -649,8 +643,7 @@ define <8 x i32> @mask_extract_v16i32_v8i32_0(<16 x i32> %a, <8 x i32> %passthru ; CHECK-LABEL: mask_extract_v16i32_v8i32_0: ; CHECK: # BB#0: ; CHECK-NEXT: kmovw %edi, %k1 -; CHECK-NEXT: vextracti32x8 $0, %zmm0, %ymm1 {%k1} -; CHECK-NEXT: vmovdqa %ymm1, %ymm0 +; CHECK-NEXT: vpblendmd %ymm0, %ymm1, %ymm0 {%k1} ; CHECK-NEXT: retq %shuffle = shufflevector <16 x i32> %a, <16 x i32> undef, <8 x i32> %mask.cast = bitcast i8 %mask to <8 x i1> @@ -662,7 +655,7 @@ define <8 x i32> @mask_extract_v16i32_v8i32_0_z(<16 x i32> %a, i8 %mask) { ; CHECK-LABEL: mask_extract_v16i32_v8i32_0_z: ; CHECK: # BB#0: ; CHECK-NEXT: kmovw %edi, %k1 -; CHECK-NEXT: vextracti32x8 $0, %zmm0, %ymm0 {%k1} {z} +; CHECK-NEXT: vmovdqa32 %ymm0, %ymm0 {%k1} {z} ; CHECK-NEXT: retq %shuffle = shufflevector <16 x i32> %a, <16 x i32> undef, <8 x i32> %mask.cast = bitcast i8 %mask to <8 x i1> @@ -699,8 +692,7 @@ define <8 x float> @mask_extract_v16f32_v8f32_0(<16 x float> %a, <8 x float> %pa ; CHECK-LABEL: mask_extract_v16f32_v8f32_0: ; CHECK: # BB#0: ; CHECK-NEXT: kmovw %edi, %k1 -; CHECK-NEXT: vextractf32x8 $0, %zmm0, %ymm1 {%k1} -; CHECK-NEXT: vmovaps %ymm1, %ymm0 +; CHECK-NEXT: vblendmps %ymm0, %ymm1, %ymm0 {%k1} ; CHECK-NEXT: retq %shuffle = shufflevector <16 x float> %a, <16 x float> undef, <8 x i32> %mask.cast = bitcast i8 %mask to <8 x i1> @@ -712,7 +704,7 @@ define <8 x float> @mask_extract_v16f32_v8f32_0_z(<16 x float> %a, i8 %mask) { ; CHECK-LABEL: mask_extract_v16f32_v8f32_0_z: ; CHECK: # BB#0: ; CHECK-NEXT: kmovw %edi, %k1 -; CHECK-NEXT: vextractf32x8 $0, %zmm0, %ymm0 {%k1} {z} +; CHECK-NEXT: vmovaps %ymm0, %ymm0 {%k1} {z} ; CHECK-NEXT: retq %shuffle = shufflevector <16 x float> %a, <16 x float> undef, <8 x i32> %mask.cast = bitcast i8 %mask to <8 x i1> @@ -749,8 +741,7 @@ define <2 x i64> @mask_extract_v8i64_v2i64_0(<8 x i64> %a, <2 x i64> %passthru, ; CHECK-LABEL: mask_extract_v8i64_v2i64_0: ; CHECK: # BB#0: ; CHECK-NEXT: kmovw %edi, %k1 -; CHECK-NEXT: vextracti64x2 $0, %zmm0, %xmm1 {%k1} -; CHECK-NEXT: vmovdqa %xmm1, %xmm0 +; CHECK-NEXT: vpblendmq %xmm0, %xmm1, %xmm0 {%k1} ; CHECK-NEXT: vzeroupper ; CHECK-NEXT: retq %shuffle = shufflevector <8 x i64> %a, <8 x i64> undef, <2 x i32> @@ -764,7 +755,7 @@ define <2 x i64> @mask_extract_v8i64_v2i64_0_z(<8 x i64> %a, i8 %mask) { ; CHECK-LABEL: mask_extract_v8i64_v2i64_0_z: ; CHECK: # BB#0: ; CHECK-NEXT: kmovw %edi, %k1 -; CHECK-NEXT: vextracti64x2 $0, %zmm0, %xmm0 {%k1} {z} +; CHECK-NEXT: vmovdqa64 %xmm0, %xmm0 {%k1} {z} ; CHECK-NEXT: vzeroupper ; CHECK-NEXT: retq %shuffle = shufflevector <8 x i64> %a, <8 x i64> undef, <2 x i32> @@ -837,8 +828,7 @@ define <2 x double> @mask_extract_v8f64_v2f64_0(<8 x double> %a, <2 x double> %p ; CHECK-LABEL: mask_extract_v8f64_v2f64_0: ; CHECK: # BB#0: ; CHECK-NEXT: kmovw %edi, %k1 -; CHECK-NEXT: vextractf64x2 $0, %zmm0, %xmm1 {%k1} -; CHECK-NEXT: vmovapd %xmm1, %xmm0 +; CHECK-NEXT: vblendmpd %xmm0, %xmm1, %xmm0 {%k1} ; CHECK-NEXT: vzeroupper ; CHECK-NEXT: retq %shuffle = shufflevector <8 x double> %a, <8 x double> undef, <2 x i32> @@ -852,7 +842,7 @@ define <2 x double> @mask_extract_v8f64_v2f64_0_z(<8 x double> %a, i8 %mask) { ; CHECK-LABEL: mask_extract_v8f64_v2f64_0_z: ; CHECK: # BB#0: ; CHECK-NEXT: kmovw %edi, %k1 -; CHECK-NEXT: vextractf64x2 $0, %zmm0, %xmm0 {%k1} {z} +; CHECK-NEXT: vmovapd %xmm0, %xmm0 {%k1} {z} ; CHECK-NEXT: vzeroupper ; CHECK-NEXT: retq %shuffle = shufflevector <8 x double> %a, <8 x double> undef, <2 x i32> @@ -925,8 +915,7 @@ define <4 x i64> @mask_extract_v8i64_v4i64_0(<8 x i64> %a, <4 x i64> %passthru, ; CHECK-LABEL: mask_extract_v8i64_v4i64_0: ; CHECK: # BB#0: ; CHECK-NEXT: kmovw %edi, %k1 -; CHECK-NEXT: vextracti64x4 $0, %zmm0, %ymm1 {%k1} -; CHECK-NEXT: vmovdqa %ymm1, %ymm0 +; CHECK-NEXT: vpblendmq %ymm0, %ymm1, %ymm0 {%k1} ; CHECK-NEXT: retq %shuffle = shufflevector <8 x i64> %a, <8 x i64> undef, <4 x i32> %mask.cast = bitcast i8 %mask to <8 x i1> @@ -939,7 +928,7 @@ define <4 x i64> @mask_extract_v8i64_v4i64_0_z(<8 x i64> %a, i8 %mask) { ; CHECK-LABEL: mask_extract_v8i64_v4i64_0_z: ; CHECK: # BB#0: ; CHECK-NEXT: kmovw %edi, %k1 -; CHECK-NEXT: vextracti64x4 $0, %zmm0, %ymm0 {%k1} {z} +; CHECK-NEXT: vmovdqa64 %ymm0, %ymm0 {%k1} {z} ; CHECK-NEXT: retq %shuffle = shufflevector <8 x i64> %a, <8 x i64> undef, <4 x i32> %mask.cast = bitcast i8 %mask to <8 x i1> @@ -979,8 +968,7 @@ define <4 x double> @mask_extract_v8f64_v4f64_0(<8 x double> %a, <4 x double> %p ; CHECK-LABEL: mask_extract_v8f64_v4f64_0: ; CHECK: # BB#0: ; CHECK-NEXT: kmovw %edi, %k1 -; CHECK-NEXT: vextractf64x4 $0, %zmm0, %ymm1 {%k1} -; CHECK-NEXT: vmovapd %ymm1, %ymm0 +; CHECK-NEXT: vblendmpd %ymm0, %ymm1, %ymm0 {%k1} ; CHECK-NEXT: retq %shuffle = shufflevector <8 x double> %a, <8 x double> undef, <4 x i32> %mask.cast = bitcast i8 %mask to <8 x i1> @@ -993,7 +981,7 @@ define <4 x double> @mask_extract_v8f64_v4f64_0_z(<8 x double> %a, i8 %mask) { ; CHECK-LABEL: mask_extract_v8f64_v4f64_0_z: ; CHECK: # BB#0: ; CHECK-NEXT: kmovw %edi, %k1 -; CHECK-NEXT: vextractf64x4 $0, %zmm0, %ymm0 {%k1} {z} +; CHECK-NEXT: vmovapd %ymm0, %ymm0 {%k1} {z} ; CHECK-NEXT: retq %shuffle = shufflevector <8 x double> %a, <8 x double> undef, <4 x i32> %mask.cast = bitcast i8 %mask to <8 x i1>