forked from OSchip/llvm-project
[DAGCombiner] allow hoisting vector bitwise logic ahead of truncates
The transform performs a bitwise logic op in a wider type followed by truncate when both inputs are truncated from the same source type: logic_op (truncate x), (truncate y) --> truncate (logic_op x, y) There are a bunch of other checks that should prevent doing this when it might be harmful. We already do this transform for scalars in this spot. The vector limitation was shared with a check for the case when the operands are extended. I'm not sure if that limit is needed either, but that would be a separate patch. Differential Revision: https://reviews.llvm.org/D55448 llvm-svn: 349303
This commit is contained in:
parent
aad3645fe1
commit
f24900b934
|
@ -3755,11 +3755,8 @@ SDValue DAGCombiner::hoistLogicOpWithSameOpcodeHands(SDNode *N) {
|
|||
// instructions without eliminating anything.
|
||||
if (!N0.hasOneUse() && !N1.hasOneUse())
|
||||
return SDValue();
|
||||
// We need matching integer source types.
|
||||
// Do not hoist logic op inside of a vector extend, since it may combine
|
||||
// into a vsetcc.
|
||||
// TODO: Should the vector check apply to truncate though?
|
||||
if (VT.isVector() || XVT != Y.getValueType())
|
||||
// We need matching source types.
|
||||
if (XVT != Y.getValueType())
|
||||
return SDValue();
|
||||
// Don't create an illegal op during or after legalization.
|
||||
if (LegalOperations && !TLI.isOperationLegal(LogicOpcode, XVT))
|
||||
|
|
|
@ -61,9 +61,8 @@ define <4 x i1> @and_eq_vec(<4 x i32> %a, <4 x i32> %b, <4 x i32> %c, <4 x i32>
|
|||
; CHECK-NEXT: vceq.i32 q8, q9, q8
|
||||
; CHECK-NEXT: vld1.64 {d22, d23}, [r0]
|
||||
; CHECK-NEXT: vceq.i32 q9, q11, q10
|
||||
; CHECK-NEXT: vand q8, q8, q9
|
||||
; CHECK-NEXT: vmovn.i32 d16, q8
|
||||
; CHECK-NEXT: vmovn.i32 d17, q9
|
||||
; CHECK-NEXT: vand d16, d16, d17
|
||||
; CHECK-NEXT: vmov r0, r1, d16
|
||||
; CHECK-NEXT: pop {r11, pc}
|
||||
%cmp1 = icmp eq <4 x i32> %a, %b
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
; RUN: llc -march=hexagon < %s | FileCheck %s
|
||||
|
||||
; CHECK-LABEL: t00
|
||||
; CHECK: and(q{{[0-3]}},q{{[0-3]}})
|
||||
; CHECK: vand(v{{[0-9:]+}},v{{[0-9:]+}})
|
||||
define <128 x i8> @t00(<128 x i8> %a0, <128 x i8> %a1) #0 {
|
||||
%q0 = trunc <128 x i8> %a0 to <128 x i1>
|
||||
%q1 = trunc <128 x i8> %a1 to <128 x i1>
|
||||
|
@ -13,7 +13,7 @@ define <128 x i8> @t00(<128 x i8> %a0, <128 x i8> %a1) #0 {
|
|||
declare <1024 x i1> @llvm.hexagon.vandvrt.128B(<128 x i8>, i32)
|
||||
|
||||
; CHECK-LABEL: t01
|
||||
; CHECK: or(q{{[0-3]}},q{{[0-3]}})
|
||||
; CHECK: vor(v{{[0-9:]+}},v{{[0-9:]+}})
|
||||
define <128 x i8> @t01(<128 x i8> %a0, <128 x i8> %a1) #0 {
|
||||
%q0 = trunc <128 x i8> %a0 to <128 x i1>
|
||||
%q1 = trunc <128 x i8> %a1 to <128 x i1>
|
||||
|
@ -23,7 +23,7 @@ define <128 x i8> @t01(<128 x i8> %a0, <128 x i8> %a1) #0 {
|
|||
}
|
||||
|
||||
; CHECK-LABEL: t02
|
||||
; CHECK: xor(q{{[0-3]}},q{{[0-3]}})
|
||||
; CHECK: vxor(v{{[0-9:]+}},v{{[0-9:]+}})
|
||||
define <128 x i8> @t02(<128 x i8> %a0, <128 x i8> %a1) #0 {
|
||||
%q0 = trunc <128 x i8> %a0 to <128 x i1>
|
||||
%q1 = trunc <128 x i8> %a1 to <128 x i1>
|
||||
|
@ -33,7 +33,7 @@ define <128 x i8> @t02(<128 x i8> %a0, <128 x i8> %a1) #0 {
|
|||
}
|
||||
|
||||
; CHECK-LABEL: t10
|
||||
; CHECK: and(q{{[0-3]}},q{{[0-3]}})
|
||||
; CHECK: vand(v{{[0-9:]+}},v{{[0-9:]+}})
|
||||
define <64 x i16> @t10(<64 x i16> %a0, <64 x i16> %a1) #0 {
|
||||
%q0 = trunc <64 x i16> %a0 to <64 x i1>
|
||||
%q1 = trunc <64 x i16> %a1 to <64 x i1>
|
||||
|
@ -43,7 +43,7 @@ define <64 x i16> @t10(<64 x i16> %a0, <64 x i16> %a1) #0 {
|
|||
}
|
||||
|
||||
; CHECK-LABEL: t11
|
||||
; CHECK: or(q{{[0-3]}},q{{[0-3]}})
|
||||
; CHECK: vor(v{{[0-9:]+}},v{{[0-9:]+}})
|
||||
define <64 x i16> @t11(<64 x i16> %a0, <64 x i16> %a1) #0 {
|
||||
%q0 = trunc <64 x i16> %a0 to <64 x i1>
|
||||
%q1 = trunc <64 x i16> %a1 to <64 x i1>
|
||||
|
@ -53,7 +53,7 @@ define <64 x i16> @t11(<64 x i16> %a0, <64 x i16> %a1) #0 {
|
|||
}
|
||||
|
||||
; CHECK-LABEL: t12
|
||||
; CHECK: xor(q{{[0-3]}},q{{[0-3]}})
|
||||
; CHECK: vxor(v{{[0-9:]+}},v{{[0-9:]+}})
|
||||
define <64 x i16> @t12(<64 x i16> %a0, <64 x i16> %a1) #0 {
|
||||
%q0 = trunc <64 x i16> %a0 to <64 x i1>
|
||||
%q1 = trunc <64 x i16> %a1 to <64 x i1>
|
||||
|
@ -63,7 +63,7 @@ define <64 x i16> @t12(<64 x i16> %a0, <64 x i16> %a1) #0 {
|
|||
}
|
||||
|
||||
; CHECK-LABEL: t20
|
||||
; CHECK: and(q{{[0-3]}},q{{[0-3]}})
|
||||
; CHECK: vand(v{{[0-9:]+}},v{{[0-9:]+}})
|
||||
define <32 x i32> @t20(<32 x i32> %a0, <32 x i32> %a1) #0 {
|
||||
%q0 = trunc <32 x i32> %a0 to <32 x i1>
|
||||
%q1 = trunc <32 x i32> %a1 to <32 x i1>
|
||||
|
@ -73,7 +73,7 @@ define <32 x i32> @t20(<32 x i32> %a0, <32 x i32> %a1) #0 {
|
|||
}
|
||||
|
||||
; CHECK-LABEL: t21
|
||||
; CHECK: or(q{{[0-3]}},q{{[0-3]}})
|
||||
; CHECK: vor(v{{[0-9:]+}},v{{[0-9:]+}})
|
||||
define <32 x i32> @t21(<32 x i32> %a0, <32 x i32> %a1) #0 {
|
||||
%q0 = trunc <32 x i32> %a0 to <32 x i1>
|
||||
%q1 = trunc <32 x i32> %a1 to <32 x i1>
|
||||
|
@ -83,7 +83,7 @@ define <32 x i32> @t21(<32 x i32> %a0, <32 x i32> %a1) #0 {
|
|||
}
|
||||
|
||||
; CHECK-LABEL: t22
|
||||
; CHECK: xor(q{{[0-3]}},q{{[0-3]}})
|
||||
; CHECK: vxor(v{{[0-9:]+}},v{{[0-9:]+}})
|
||||
define <32 x i32> @t22(<32 x i32> %a0, <32 x i32> %a1) #0 {
|
||||
%q0 = trunc <32 x i32> %a0 to <32 x i1>
|
||||
%q1 = trunc <32 x i32> %a1 to <32 x i1>
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
; RUN: llc -march=hexagon < %s | FileCheck %s
|
||||
|
||||
; CHECK-LABEL: t00
|
||||
; CHECK: and(q{{[0-3]}},q{{[0-3]}})
|
||||
; CHECK: vand(v{{[0-9:]+}},v{{[0-9:]+}})
|
||||
define <64 x i8> @t00(<64 x i8> %a0, <64 x i8> %a1) #0 {
|
||||
%q0 = trunc <64 x i8> %a0 to <64 x i1>
|
||||
%q1 = trunc <64 x i8> %a1 to <64 x i1>
|
||||
|
@ -11,7 +11,7 @@ define <64 x i8> @t00(<64 x i8> %a0, <64 x i8> %a1) #0 {
|
|||
}
|
||||
|
||||
; CHECK-LABEL: t01
|
||||
; CHECK: or(q{{[0-3]}},q{{[0-3]}})
|
||||
; CHECK: vor(v{{[0-9:]+}},v{{[0-9:]+}})
|
||||
define <64 x i8> @t01(<64 x i8> %a0, <64 x i8> %a1) #0 {
|
||||
%q0 = trunc <64 x i8> %a0 to <64 x i1>
|
||||
%q1 = trunc <64 x i8> %a1 to <64 x i1>
|
||||
|
@ -21,7 +21,7 @@ define <64 x i8> @t01(<64 x i8> %a0, <64 x i8> %a1) #0 {
|
|||
}
|
||||
|
||||
; CHECK-LABEL: t02
|
||||
; CHECK: xor(q{{[0-3]}},q{{[0-3]}})
|
||||
; CHECK: vxor(v{{[0-9:]+}},v{{[0-9:]+}})
|
||||
define <64 x i8> @t02(<64 x i8> %a0, <64 x i8> %a1) #0 {
|
||||
%q0 = trunc <64 x i8> %a0 to <64 x i1>
|
||||
%q1 = trunc <64 x i8> %a1 to <64 x i1>
|
||||
|
@ -31,7 +31,7 @@ define <64 x i8> @t02(<64 x i8> %a0, <64 x i8> %a1) #0 {
|
|||
}
|
||||
|
||||
; CHECK-LABEL: t10
|
||||
; CHECK: and(q{{[0-3]}},q{{[0-3]}})
|
||||
; CHECK: vand(v{{[0-9:]+}},v{{[0-9:]+}})
|
||||
define <32 x i16> @t10(<32 x i16> %a0, <32 x i16> %a1) #0 {
|
||||
%q0 = trunc <32 x i16> %a0 to <32 x i1>
|
||||
%q1 = trunc <32 x i16> %a1 to <32 x i1>
|
||||
|
@ -41,7 +41,7 @@ define <32 x i16> @t10(<32 x i16> %a0, <32 x i16> %a1) #0 {
|
|||
}
|
||||
|
||||
; CHECK-LABEL: t11
|
||||
; CHECK: or(q{{[0-3]}},q{{[0-3]}})
|
||||
; CHECK: vor(v{{[0-9:]+}},v{{[0-9:]+}})
|
||||
define <32 x i16> @t11(<32 x i16> %a0, <32 x i16> %a1) #0 {
|
||||
%q0 = trunc <32 x i16> %a0 to <32 x i1>
|
||||
%q1 = trunc <32 x i16> %a1 to <32 x i1>
|
||||
|
@ -51,7 +51,7 @@ define <32 x i16> @t11(<32 x i16> %a0, <32 x i16> %a1) #0 {
|
|||
}
|
||||
|
||||
; CHECK-LABEL: t12
|
||||
; CHECK: xor(q{{[0-3]}},q{{[0-3]}})
|
||||
; CHECK: vxor(v{{[0-9:]+}},v{{[0-9:]+}})
|
||||
define <32 x i16> @t12(<32 x i16> %a0, <32 x i16> %a1) #0 {
|
||||
%q0 = trunc <32 x i16> %a0 to <32 x i1>
|
||||
%q1 = trunc <32 x i16> %a1 to <32 x i1>
|
||||
|
@ -61,7 +61,7 @@ define <32 x i16> @t12(<32 x i16> %a0, <32 x i16> %a1) #0 {
|
|||
}
|
||||
|
||||
; CHECK-LABEL: t20
|
||||
; CHECK: and(q{{[0-3]}},q{{[0-3]}})
|
||||
; CHECK: vand(v{{[0-9:]+}},v{{[0-9:]+}})
|
||||
define <16 x i32> @t20(<16 x i32> %a0, <16 x i32> %a1) #0 {
|
||||
%q0 = trunc <16 x i32> %a0 to <16 x i1>
|
||||
%q1 = trunc <16 x i32> %a1 to <16 x i1>
|
||||
|
@ -71,7 +71,7 @@ define <16 x i32> @t20(<16 x i32> %a0, <16 x i32> %a1) #0 {
|
|||
}
|
||||
|
||||
; CHECK-LABEL: t21
|
||||
; CHECK: or(q{{[0-3]}},q{{[0-3]}})
|
||||
; CHECK: vor(v{{[0-9:]+}},v{{[0-9:]+}})
|
||||
define <16 x i32> @t21(<16 x i32> %a0, <16 x i32> %a1) #0 {
|
||||
%q0 = trunc <16 x i32> %a0 to <16 x i1>
|
||||
%q1 = trunc <16 x i32> %a1 to <16 x i1>
|
||||
|
@ -81,7 +81,7 @@ define <16 x i32> @t21(<16 x i32> %a0, <16 x i32> %a1) #0 {
|
|||
}
|
||||
|
||||
; CHECK-LABEL: t22
|
||||
; CHECK: xor(q{{[0-3]}},q{{[0-3]}})
|
||||
; CHECK: vxor(v{{[0-9:]+}},v{{[0-9:]+}})
|
||||
define <16 x i32> @t22(<16 x i32> %a0, <16 x i32> %a1) #0 {
|
||||
%q0 = trunc <16 x i32> %a0 to <16 x i1>
|
||||
%q1 = trunc <16 x i32> %a1 to <16 x i1>
|
||||
|
|
|
@ -318,27 +318,19 @@ define float @pr30561_f32(float %b, float %a, i1 %c) {
|
|||
define <16 x i16> @pr31515(<16 x i1> %a, <16 x i1> %b, <16 x i16> %c) nounwind {
|
||||
; X86-LABEL: pr31515:
|
||||
; X86: # %bb.0:
|
||||
; X86-NEXT: vpmovzxbd {{.*#+}} zmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero,xmm1[4],zero,zero,zero,xmm1[5],zero,zero,zero,xmm1[6],zero,zero,zero,xmm1[7],zero,zero,zero,xmm1[8],zero,zero,zero,xmm1[9],zero,zero,zero,xmm1[10],zero,zero,zero,xmm1[11],zero,zero,zero,xmm1[12],zero,zero,zero,xmm1[13],zero,zero,zero,xmm1[14],zero,zero,zero,xmm1[15],zero,zero,zero
|
||||
; X86-NEXT: vpslld $31, %zmm1, %zmm1
|
||||
; X86-NEXT: vpmovzxbd {{.*#+}} zmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero,xmm0[8],zero,zero,zero,xmm0[9],zero,zero,zero,xmm0[10],zero,zero,zero,xmm0[11],zero,zero,zero,xmm0[12],zero,zero,zero,xmm0[13],zero,zero,zero,xmm0[14],zero,zero,zero,xmm0[15],zero,zero,zero
|
||||
; X86-NEXT: vpslld $31, %zmm0, %zmm0
|
||||
; X86-NEXT: vptestmd %zmm0, %zmm0, %k1
|
||||
; X86-NEXT: vptestmd %zmm1, %zmm1, %k1 {%k1}
|
||||
; X86-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
|
||||
; X86-NEXT: vpmovdw %zmm0, %ymm0
|
||||
; X86-NEXT: vpand %xmm1, %xmm0, %xmm0
|
||||
; X86-NEXT: vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
|
||||
; X86-NEXT: vpsllw $15, %ymm0, %ymm0
|
||||
; X86-NEXT: vpsraw $15, %ymm0, %ymm0
|
||||
; X86-NEXT: vpandn %ymm2, %ymm0, %ymm0
|
||||
; X86-NEXT: retl
|
||||
;
|
||||
; X64-LABEL: pr31515:
|
||||
; X64: # %bb.0:
|
||||
; X64-NEXT: vpmovzxbd {{.*#+}} zmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero,xmm1[4],zero,zero,zero,xmm1[5],zero,zero,zero,xmm1[6],zero,zero,zero,xmm1[7],zero,zero,zero,xmm1[8],zero,zero,zero,xmm1[9],zero,zero,zero,xmm1[10],zero,zero,zero,xmm1[11],zero,zero,zero,xmm1[12],zero,zero,zero,xmm1[13],zero,zero,zero,xmm1[14],zero,zero,zero,xmm1[15],zero,zero,zero
|
||||
; X64-NEXT: vpslld $31, %zmm1, %zmm1
|
||||
; X64-NEXT: vpmovzxbd {{.*#+}} zmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero,xmm0[8],zero,zero,zero,xmm0[9],zero,zero,zero,xmm0[10],zero,zero,zero,xmm0[11],zero,zero,zero,xmm0[12],zero,zero,zero,xmm0[13],zero,zero,zero,xmm0[14],zero,zero,zero,xmm0[15],zero,zero,zero
|
||||
; X64-NEXT: vpslld $31, %zmm0, %zmm0
|
||||
; X64-NEXT: vptestmd %zmm0, %zmm0, %k1
|
||||
; X64-NEXT: vptestmd %zmm1, %zmm1, %k1 {%k1}
|
||||
; X64-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z}
|
||||
; X64-NEXT: vpmovdw %zmm0, %ymm0
|
||||
; X64-NEXT: vpand %xmm1, %xmm0, %xmm0
|
||||
; X64-NEXT: vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
|
||||
; X64-NEXT: vpsllw $15, %ymm0, %ymm0
|
||||
; X64-NEXT: vpsraw $15, %ymm0, %ymm0
|
||||
; X64-NEXT: vpandn %ymm2, %ymm0, %ymm0
|
||||
; X64-NEXT: retq
|
||||
%mask = and <16 x i1> %a, %b
|
||||
|
|
|
@ -30,11 +30,10 @@ define i8 @v8i16(<8 x i16> %a, <8 x i16> %b, <8 x i16> %c, <8 x i16> %d) {
|
|||
; AVX512F-LABEL: v8i16:
|
||||
; AVX512F: # %bb.0:
|
||||
; AVX512F-NEXT: vpcmpgtw %xmm1, %xmm0, %xmm0
|
||||
; AVX512F-NEXT: vpcmpgtw %xmm3, %xmm2, %xmm1
|
||||
; AVX512F-NEXT: vpand %xmm1, %xmm0, %xmm0
|
||||
; AVX512F-NEXT: vpmovsxwd %xmm0, %ymm0
|
||||
; AVX512F-NEXT: vptestmd %ymm0, %ymm0, %k1
|
||||
; AVX512F-NEXT: vpcmpgtw %xmm3, %xmm2, %xmm0
|
||||
; AVX512F-NEXT: vpmovsxwd %xmm0, %ymm0
|
||||
; AVX512F-NEXT: vptestmd %ymm0, %ymm0, %k0 {%k1}
|
||||
; AVX512F-NEXT: vptestmd %ymm0, %ymm0, %k0
|
||||
; AVX512F-NEXT: kmovw %k0, %eax
|
||||
; AVX512F-NEXT: # kill: def $al killed $al killed $eax
|
||||
; AVX512F-NEXT: vzeroupper
|
||||
|
@ -158,11 +157,10 @@ define i16 @v16i8(<16 x i8> %a, <16 x i8> %b, <16 x i8> %c, <16 x i8> %d) {
|
|||
; AVX512F-LABEL: v16i8:
|
||||
; AVX512F: # %bb.0:
|
||||
; AVX512F-NEXT: vpcmpgtb %xmm1, %xmm0, %xmm0
|
||||
; AVX512F-NEXT: vpcmpgtb %xmm3, %xmm2, %xmm1
|
||||
; AVX512F-NEXT: vpand %xmm1, %xmm0, %xmm0
|
||||
; AVX512F-NEXT: vpmovsxbd %xmm0, %zmm0
|
||||
; AVX512F-NEXT: vptestmd %zmm0, %zmm0, %k1
|
||||
; AVX512F-NEXT: vpcmpgtb %xmm3, %xmm2, %xmm0
|
||||
; AVX512F-NEXT: vpmovsxbd %xmm0, %zmm0
|
||||
; AVX512F-NEXT: vptestmd %zmm0, %zmm0, %k0 {%k1}
|
||||
; AVX512F-NEXT: vptestmd %zmm0, %zmm0, %k0
|
||||
; AVX512F-NEXT: kmovw %k0, %eax
|
||||
; AVX512F-NEXT: # kill: def $ax killed $ax killed $eax
|
||||
; AVX512F-NEXT: vzeroupper
|
||||
|
@ -897,10 +895,9 @@ define i8 @v8i8(<8 x i8> %a, <8 x i8> %b, <8 x i8> %c, <8 x i8> %d) {
|
|||
; AVX512F-NEXT: vpsllw $8, %xmm0, %xmm0
|
||||
; AVX512F-NEXT: vpsraw $8, %xmm0, %xmm0
|
||||
; AVX512F-NEXT: vpcmpgtw %xmm1, %xmm0, %xmm0
|
||||
; AVX512F-NEXT: vpand %xmm2, %xmm0, %xmm0
|
||||
; AVX512F-NEXT: vpmovsxwd %xmm0, %ymm0
|
||||
; AVX512F-NEXT: vptestmd %ymm0, %ymm0, %k1
|
||||
; AVX512F-NEXT: vpmovsxwd %xmm2, %ymm0
|
||||
; AVX512F-NEXT: vptestmd %ymm0, %ymm0, %k0 {%k1}
|
||||
; AVX512F-NEXT: vptestmd %ymm0, %ymm0, %k0
|
||||
; AVX512F-NEXT: kmovw %k0, %eax
|
||||
; AVX512F-NEXT: # kill: def $al killed $al killed $eax
|
||||
; AVX512F-NEXT: vzeroupper
|
||||
|
|
|
@ -55,17 +55,17 @@ define i4 @v4i64(<4 x i64> %a, <4 x i64> %b, <4 x i64> %c, <4 x i64> %d) {
|
|||
;
|
||||
; AVX1-LABEL: v4i64:
|
||||
; AVX1: # %bb.0:
|
||||
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm4
|
||||
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm5
|
||||
; AVX1-NEXT: vpcmpgtq %xmm4, %xmm5, %xmm4
|
||||
; AVX1-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm4
|
||||
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm1
|
||||
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
|
||||
; AVX1-NEXT: vpcmpgtq %xmm1, %xmm0, %xmm0
|
||||
; AVX1-NEXT: vpackssdw %xmm4, %xmm0, %xmm0
|
||||
; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm1
|
||||
; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm4
|
||||
; AVX1-NEXT: vpcmpgtq %xmm1, %xmm4, %xmm1
|
||||
; AVX1-NEXT: vpcmpgtq %xmm3, %xmm2, %xmm1
|
||||
; AVX1-NEXT: vpand %xmm1, %xmm4, %xmm1
|
||||
; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm3
|
||||
; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm2
|
||||
; AVX1-NEXT: vpcmpgtq %xmm3, %xmm2, %xmm2
|
||||
; AVX1-NEXT: vpackssdw %xmm1, %xmm2, %xmm1
|
||||
; AVX1-NEXT: vpand %xmm1, %xmm0, %xmm0
|
||||
; AVX1-NEXT: vpand %xmm2, %xmm0, %xmm0
|
||||
; AVX1-NEXT: vpackssdw %xmm0, %xmm1, %xmm0
|
||||
; AVX1-NEXT: vmovmskps %xmm0, %eax
|
||||
; AVX1-NEXT: # kill: def $al killed $al killed $eax
|
||||
; AVX1-NEXT: vzeroupper
|
||||
|
@ -74,12 +74,10 @@ define i4 @v4i64(<4 x i64> %a, <4 x i64> %b, <4 x i64> %c, <4 x i64> %d) {
|
|||
; AVX2-LABEL: v4i64:
|
||||
; AVX2: # %bb.0:
|
||||
; AVX2-NEXT: vpcmpgtq %ymm1, %ymm0, %ymm0
|
||||
; AVX2-NEXT: vpcmpgtq %ymm3, %ymm2, %ymm1
|
||||
; AVX2-NEXT: vpand %ymm1, %ymm0, %ymm0
|
||||
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
|
||||
; AVX2-NEXT: vpackssdw %xmm1, %xmm0, %xmm0
|
||||
; AVX2-NEXT: vpcmpgtq %ymm3, %ymm2, %ymm1
|
||||
; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm2
|
||||
; AVX2-NEXT: vpackssdw %xmm2, %xmm1, %xmm1
|
||||
; AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0
|
||||
; AVX2-NEXT: vmovmskps %xmm0, %eax
|
||||
; AVX2-NEXT: # kill: def $al killed $al killed $eax
|
||||
; AVX2-NEXT: vzeroupper
|
||||
|
@ -126,12 +124,10 @@ define i4 @v4f64(<4 x double> %a, <4 x double> %b, <4 x double> %c, <4 x double>
|
|||
; AVX12-LABEL: v4f64:
|
||||
; AVX12: # %bb.0:
|
||||
; AVX12-NEXT: vcmpltpd %ymm0, %ymm1, %ymm0
|
||||
; AVX12-NEXT: vcmpltpd %ymm2, %ymm3, %ymm1
|
||||
; AVX12-NEXT: vandpd %ymm1, %ymm0, %ymm0
|
||||
; AVX12-NEXT: vextractf128 $1, %ymm0, %xmm1
|
||||
; AVX12-NEXT: vpackssdw %xmm1, %xmm0, %xmm0
|
||||
; AVX12-NEXT: vcmpltpd %ymm2, %ymm3, %ymm1
|
||||
; AVX12-NEXT: vextractf128 $1, %ymm1, %xmm2
|
||||
; AVX12-NEXT: vpackssdw %xmm2, %xmm1, %xmm1
|
||||
; AVX12-NEXT: vpand %xmm1, %xmm0, %xmm0
|
||||
; AVX12-NEXT: vmovmskps %xmm0, %eax
|
||||
; AVX12-NEXT: # kill: def $al killed $al killed $eax
|
||||
; AVX12-NEXT: vzeroupper
|
||||
|
@ -177,17 +173,17 @@ define i16 @v16i16(<16 x i16> %a, <16 x i16> %b, <16 x i16> %c, <16 x i16> %d) {
|
|||
;
|
||||
; AVX1-LABEL: v16i16:
|
||||
; AVX1: # %bb.0:
|
||||
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm4
|
||||
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm5
|
||||
; AVX1-NEXT: vpcmpgtw %xmm4, %xmm5, %xmm4
|
||||
; AVX1-NEXT: vpcmpgtw %xmm1, %xmm0, %xmm4
|
||||
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm1
|
||||
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
|
||||
; AVX1-NEXT: vpcmpgtw %xmm1, %xmm0, %xmm0
|
||||
; AVX1-NEXT: vpacksswb %xmm4, %xmm0, %xmm0
|
||||
; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm1
|
||||
; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm4
|
||||
; AVX1-NEXT: vpcmpgtw %xmm1, %xmm4, %xmm1
|
||||
; AVX1-NEXT: vpcmpgtw %xmm3, %xmm2, %xmm1
|
||||
; AVX1-NEXT: vpand %xmm1, %xmm4, %xmm1
|
||||
; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm3
|
||||
; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm2
|
||||
; AVX1-NEXT: vpcmpgtw %xmm3, %xmm2, %xmm2
|
||||
; AVX1-NEXT: vpacksswb %xmm1, %xmm2, %xmm1
|
||||
; AVX1-NEXT: vpand %xmm1, %xmm0, %xmm0
|
||||
; AVX1-NEXT: vpand %xmm2, %xmm0, %xmm0
|
||||
; AVX1-NEXT: vpacksswb %xmm0, %xmm1, %xmm0
|
||||
; AVX1-NEXT: vpmovmskb %xmm0, %eax
|
||||
; AVX1-NEXT: # kill: def $ax killed $ax killed $eax
|
||||
; AVX1-NEXT: vzeroupper
|
||||
|
@ -196,12 +192,10 @@ define i16 @v16i16(<16 x i16> %a, <16 x i16> %b, <16 x i16> %c, <16 x i16> %d) {
|
|||
; AVX2-LABEL: v16i16:
|
||||
; AVX2: # %bb.0:
|
||||
; AVX2-NEXT: vpcmpgtw %ymm1, %ymm0, %ymm0
|
||||
; AVX2-NEXT: vpcmpgtw %ymm3, %ymm2, %ymm1
|
||||
; AVX2-NEXT: vpand %ymm1, %ymm0, %ymm0
|
||||
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
|
||||
; AVX2-NEXT: vpacksswb %xmm1, %xmm0, %xmm0
|
||||
; AVX2-NEXT: vpcmpgtw %ymm3, %ymm2, %ymm1
|
||||
; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm2
|
||||
; AVX2-NEXT: vpacksswb %xmm2, %xmm1, %xmm1
|
||||
; AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0
|
||||
; AVX2-NEXT: vpmovmskb %xmm0, %eax
|
||||
; AVX2-NEXT: # kill: def $ax killed $ax killed $eax
|
||||
; AVX2-NEXT: vzeroupper
|
||||
|
@ -210,11 +204,10 @@ define i16 @v16i16(<16 x i16> %a, <16 x i16> %b, <16 x i16> %c, <16 x i16> %d) {
|
|||
; AVX512F-LABEL: v16i16:
|
||||
; AVX512F: # %bb.0:
|
||||
; AVX512F-NEXT: vpcmpgtw %ymm1, %ymm0, %ymm0
|
||||
; AVX512F-NEXT: vpcmpgtw %ymm3, %ymm2, %ymm1
|
||||
; AVX512F-NEXT: vpand %ymm1, %ymm0, %ymm0
|
||||
; AVX512F-NEXT: vpmovsxwd %ymm0, %zmm0
|
||||
; AVX512F-NEXT: vptestmd %zmm0, %zmm0, %k1
|
||||
; AVX512F-NEXT: vpcmpgtw %ymm3, %ymm2, %ymm0
|
||||
; AVX512F-NEXT: vpmovsxwd %ymm0, %zmm0
|
||||
; AVX512F-NEXT: vptestmd %zmm0, %zmm0, %k0 {%k1}
|
||||
; AVX512F-NEXT: vptestmd %zmm0, %zmm0, %k0
|
||||
; AVX512F-NEXT: kmovw %k0, %eax
|
||||
; AVX512F-NEXT: # kill: def $ax killed $ax killed $eax
|
||||
; AVX512F-NEXT: vzeroupper
|
||||
|
@ -252,17 +245,17 @@ define i8 @v8i32(<8 x i32> %a, <8 x i32> %b, <8 x i32> %c, <8 x i32> %d) {
|
|||
;
|
||||
; AVX1-LABEL: v8i32:
|
||||
; AVX1: # %bb.0:
|
||||
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm4
|
||||
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm5
|
||||
; AVX1-NEXT: vpcmpgtd %xmm4, %xmm5, %xmm4
|
||||
; AVX1-NEXT: vpcmpgtd %xmm1, %xmm0, %xmm4
|
||||
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm1
|
||||
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
|
||||
; AVX1-NEXT: vpcmpgtd %xmm1, %xmm0, %xmm0
|
||||
; AVX1-NEXT: vpackssdw %xmm4, %xmm0, %xmm0
|
||||
; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm1
|
||||
; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm4
|
||||
; AVX1-NEXT: vpcmpgtd %xmm1, %xmm4, %xmm1
|
||||
; AVX1-NEXT: vpcmpgtd %xmm3, %xmm2, %xmm1
|
||||
; AVX1-NEXT: vpand %xmm1, %xmm4, %xmm1
|
||||
; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm3
|
||||
; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm2
|
||||
; AVX1-NEXT: vpcmpgtd %xmm3, %xmm2, %xmm2
|
||||
; AVX1-NEXT: vpackssdw %xmm1, %xmm2, %xmm1
|
||||
; AVX1-NEXT: vpand %xmm1, %xmm0, %xmm0
|
||||
; AVX1-NEXT: vpand %xmm2, %xmm0, %xmm0
|
||||
; AVX1-NEXT: vpackssdw %xmm0, %xmm1, %xmm0
|
||||
; AVX1-NEXT: vpacksswb %xmm0, %xmm0, %xmm0
|
||||
; AVX1-NEXT: vpmovmskb %xmm0, %eax
|
||||
; AVX1-NEXT: # kill: def $al killed $al killed $eax
|
||||
|
@ -272,12 +265,10 @@ define i8 @v8i32(<8 x i32> %a, <8 x i32> %b, <8 x i32> %c, <8 x i32> %d) {
|
|||
; AVX2-LABEL: v8i32:
|
||||
; AVX2: # %bb.0:
|
||||
; AVX2-NEXT: vpcmpgtd %ymm1, %ymm0, %ymm0
|
||||
; AVX2-NEXT: vpcmpgtd %ymm3, %ymm2, %ymm1
|
||||
; AVX2-NEXT: vpand %ymm1, %ymm0, %ymm0
|
||||
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
|
||||
; AVX2-NEXT: vpackssdw %xmm1, %xmm0, %xmm0
|
||||
; AVX2-NEXT: vpcmpgtd %ymm3, %ymm2, %ymm1
|
||||
; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm2
|
||||
; AVX2-NEXT: vpackssdw %xmm2, %xmm1, %xmm1
|
||||
; AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0
|
||||
; AVX2-NEXT: vpacksswb %xmm0, %xmm0, %xmm0
|
||||
; AVX2-NEXT: vpmovmskb %xmm0, %eax
|
||||
; AVX2-NEXT: # kill: def $al killed $al killed $eax
|
||||
|
@ -326,12 +317,10 @@ define i8 @v8f32(<8 x float> %a, <8 x float> %b, <8 x float> %c, <8 x float> %d)
|
|||
; AVX12-LABEL: v8f32:
|
||||
; AVX12: # %bb.0:
|
||||
; AVX12-NEXT: vcmpltps %ymm0, %ymm1, %ymm0
|
||||
; AVX12-NEXT: vcmpltps %ymm2, %ymm3, %ymm1
|
||||
; AVX12-NEXT: vandps %ymm1, %ymm0, %ymm0
|
||||
; AVX12-NEXT: vextractf128 $1, %ymm0, %xmm1
|
||||
; AVX12-NEXT: vpackssdw %xmm1, %xmm0, %xmm0
|
||||
; AVX12-NEXT: vcmpltps %ymm2, %ymm3, %ymm1
|
||||
; AVX12-NEXT: vextractf128 $1, %ymm1, %xmm2
|
||||
; AVX12-NEXT: vpackssdw %xmm2, %xmm1, %xmm1
|
||||
; AVX12-NEXT: vpand %xmm1, %xmm0, %xmm0
|
||||
; AVX12-NEXT: vpacksswb %xmm0, %xmm0, %xmm0
|
||||
; AVX12-NEXT: vpmovmskb %xmm0, %eax
|
||||
; AVX12-NEXT: # kill: def $al killed $al killed $eax
|
||||
|
@ -408,18 +397,14 @@ define i32 @v32i8(<32 x i8> %a, <32 x i8> %b, <32 x i8> %c, <32 x i8> %d) {
|
|||
; AVX512F-LABEL: v32i8:
|
||||
; AVX512F: # %bb.0:
|
||||
; AVX512F-NEXT: vpcmpgtb %ymm1, %ymm0, %ymm0
|
||||
; AVX512F-NEXT: vextracti128 $1, %ymm0, %xmm1
|
||||
; AVX512F-NEXT: vpmovsxbd %xmm1, %zmm1
|
||||
; AVX512F-NEXT: vptestmd %zmm1, %zmm1, %k1
|
||||
; AVX512F-NEXT: vpmovsxbd %xmm0, %zmm0
|
||||
; AVX512F-NEXT: vptestmd %zmm0, %zmm0, %k2
|
||||
; AVX512F-NEXT: vpcmpgtb %ymm3, %ymm2, %ymm0
|
||||
; AVX512F-NEXT: vextracti128 $1, %ymm0, %xmm1
|
||||
; AVX512F-NEXT: vpmovsxbd %xmm1, %zmm1
|
||||
; AVX512F-NEXT: vpmovsxbd %xmm0, %zmm0
|
||||
; AVX512F-NEXT: vptestmd %zmm0, %zmm0, %k0 {%k2}
|
||||
; AVX512F-NEXT: vpcmpgtb %ymm3, %ymm2, %ymm1
|
||||
; AVX512F-NEXT: vpand %ymm1, %ymm0, %ymm0
|
||||
; AVX512F-NEXT: vpmovsxbd %xmm0, %zmm1
|
||||
; AVX512F-NEXT: vptestmd %zmm1, %zmm1, %k0
|
||||
; AVX512F-NEXT: kmovw %k0, %ecx
|
||||
; AVX512F-NEXT: vptestmd %zmm1, %zmm1, %k0 {%k1}
|
||||
; AVX512F-NEXT: vextracti128 $1, %ymm0, %xmm0
|
||||
; AVX512F-NEXT: vpmovsxbd %xmm0, %zmm0
|
||||
; AVX512F-NEXT: vptestmd %zmm0, %zmm0, %k0
|
||||
; AVX512F-NEXT: kmovw %k0, %eax
|
||||
; AVX512F-NEXT: shll $16, %eax
|
||||
; AVX512F-NEXT: orl %ecx, %eax
|
||||
|
|
|
@ -304,18 +304,16 @@ define i32 @v32i16(<32 x i16> %a, <32 x i16> %b, <32 x i16> %c, <32 x i16> %d) {
|
|||
; AVX512F-LABEL: v32i16:
|
||||
; AVX512F: # %bb.0:
|
||||
; AVX512F-NEXT: vpcmpgtw %ymm3, %ymm1, %ymm1
|
||||
; AVX512F-NEXT: vpmovsxwd %ymm1, %zmm1
|
||||
; AVX512F-NEXT: vptestmd %zmm1, %zmm1, %k1
|
||||
; AVX512F-NEXT: vpcmpgtw %ymm2, %ymm0, %ymm0
|
||||
; AVX512F-NEXT: vpcmpgtw %ymm7, %ymm5, %ymm2
|
||||
; AVX512F-NEXT: vpand %ymm2, %ymm1, %ymm1
|
||||
; AVX512F-NEXT: vpcmpgtw %ymm6, %ymm4, %ymm2
|
||||
; AVX512F-NEXT: vpand %ymm2, %ymm0, %ymm0
|
||||
; AVX512F-NEXT: vpmovsxwd %ymm0, %zmm0
|
||||
; AVX512F-NEXT: vptestmd %zmm0, %zmm0, %k2
|
||||
; AVX512F-NEXT: vpcmpgtw %ymm7, %ymm5, %ymm0
|
||||
; AVX512F-NEXT: vpmovsxwd %ymm0, %zmm0
|
||||
; AVX512F-NEXT: vpcmpgtw %ymm6, %ymm4, %ymm1
|
||||
; AVX512F-NEXT: vpmovsxwd %ymm1, %zmm1
|
||||
; AVX512F-NEXT: vptestmd %zmm1, %zmm1, %k0 {%k2}
|
||||
; AVX512F-NEXT: vptestmd %zmm0, %zmm0, %k0
|
||||
; AVX512F-NEXT: kmovw %k0, %ecx
|
||||
; AVX512F-NEXT: vptestmd %zmm0, %zmm0, %k0 {%k1}
|
||||
; AVX512F-NEXT: vpmovsxwd %ymm1, %zmm0
|
||||
; AVX512F-NEXT: vptestmd %zmm0, %zmm0, %k0
|
||||
; AVX512F-NEXT: kmovw %k0, %eax
|
||||
; AVX512F-NEXT: shll $16, %eax
|
||||
; AVX512F-NEXT: orl %ecx, %eax
|
||||
|
@ -615,33 +613,29 @@ define i64 @v64i8(<64 x i8> %a, <64 x i8> %b, <64 x i8> %c, <64 x i8> %d) {
|
|||
; AVX512F: # %bb.0:
|
||||
; AVX512F-NEXT: vpcmpgtb %ymm3, %ymm1, %ymm1
|
||||
; AVX512F-NEXT: vextracti128 $1, %ymm1, %xmm3
|
||||
; AVX512F-NEXT: vpmovsxbd %xmm3, %zmm3
|
||||
; AVX512F-NEXT: vptestmd %zmm3, %zmm3, %k1
|
||||
; AVX512F-NEXT: vpmovsxbd %xmm1, %zmm1
|
||||
; AVX512F-NEXT: vptestmd %zmm1, %zmm1, %k2
|
||||
; AVX512F-NEXT: vpcmpgtb %ymm2, %ymm0, %ymm0
|
||||
; AVX512F-NEXT: vextracti128 $1, %ymm0, %xmm1
|
||||
; AVX512F-NEXT: vpmovsxbd %xmm1, %zmm1
|
||||
; AVX512F-NEXT: vptestmd %zmm1, %zmm1, %k3
|
||||
; AVX512F-NEXT: vextracti128 $1, %ymm0, %xmm2
|
||||
; AVX512F-NEXT: vpcmpgtb %ymm7, %ymm5, %ymm5
|
||||
; AVX512F-NEXT: vextracti128 $1, %ymm5, %xmm7
|
||||
; AVX512F-NEXT: vpand %xmm7, %xmm3, %xmm3
|
||||
; AVX512F-NEXT: vpcmpgtb %ymm6, %ymm4, %ymm4
|
||||
; AVX512F-NEXT: vextracti128 $1, %ymm4, %xmm6
|
||||
; AVX512F-NEXT: vpand %xmm6, %xmm2, %xmm2
|
||||
; AVX512F-NEXT: vpand %xmm4, %xmm0, %xmm0
|
||||
; AVX512F-NEXT: vpmovsxbd %xmm0, %zmm0
|
||||
; AVX512F-NEXT: vptestmd %zmm0, %zmm0, %k4
|
||||
; AVX512F-NEXT: vpcmpgtb %ymm7, %ymm5, %ymm0
|
||||
; AVX512F-NEXT: vextracti128 $1, %ymm0, %xmm1
|
||||
; AVX512F-NEXT: vpmovsxbd %xmm1, %zmm1
|
||||
; AVX512F-NEXT: vpmovsxbd %xmm0, %zmm0
|
||||
; AVX512F-NEXT: vpcmpgtb %ymm6, %ymm4, %ymm2
|
||||
; AVX512F-NEXT: vextracti128 $1, %ymm2, %xmm3
|
||||
; AVX512F-NEXT: vpmovsxbd %xmm3, %zmm3
|
||||
; AVX512F-NEXT: vpmovsxbd %xmm2, %zmm2
|
||||
; AVX512F-NEXT: vptestmd %zmm2, %zmm2, %k0 {%k4}
|
||||
; AVX512F-NEXT: vptestmd %zmm0, %zmm0, %k0
|
||||
; AVX512F-NEXT: kmovw %k0, %eax
|
||||
; AVX512F-NEXT: vptestmd %zmm3, %zmm3, %k0 {%k3}
|
||||
; AVX512F-NEXT: vpmovsxbd %xmm2, %zmm0
|
||||
; AVX512F-NEXT: vptestmd %zmm0, %zmm0, %k0
|
||||
; AVX512F-NEXT: kmovw %k0, %ecx
|
||||
; AVX512F-NEXT: shll $16, %ecx
|
||||
; AVX512F-NEXT: orl %eax, %ecx
|
||||
; AVX512F-NEXT: vptestmd %zmm0, %zmm0, %k0 {%k2}
|
||||
; AVX512F-NEXT: vpand %xmm5, %xmm1, %xmm0
|
||||
; AVX512F-NEXT: vpmovsxbd %xmm0, %zmm0
|
||||
; AVX512F-NEXT: vptestmd %zmm0, %zmm0, %k0
|
||||
; AVX512F-NEXT: kmovw %k0, %edx
|
||||
; AVX512F-NEXT: vptestmd %zmm1, %zmm1, %k0 {%k1}
|
||||
; AVX512F-NEXT: vpmovsxbd %xmm3, %zmm0
|
||||
; AVX512F-NEXT: vptestmd %zmm0, %zmm0, %k0
|
||||
; AVX512F-NEXT: kmovw %k0, %eax
|
||||
; AVX512F-NEXT: shll $16, %eax
|
||||
; AVX512F-NEXT: orl %edx, %eax
|
||||
|
|
|
@ -936,19 +936,16 @@ define <8 x i16> @test15(<8 x i16> %x, <8 x i32> %y) nounwind {
|
|||
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3
|
||||
; AVX1-NEXT: vpminud %xmm3, %xmm2, %xmm4
|
||||
; AVX1-NEXT: vpcmpeqd %xmm4, %xmm2, %xmm4
|
||||
; AVX1-NEXT: vpcmpeqd %xmm5, %xmm5, %xmm5
|
||||
; AVX1-NEXT: vpxor %xmm5, %xmm4, %xmm4
|
||||
; AVX1-NEXT: vpminud %xmm1, %xmm0, %xmm6
|
||||
; AVX1-NEXT: vpcmpeqd %xmm6, %xmm0, %xmm6
|
||||
; AVX1-NEXT: vpxor %xmm5, %xmm6, %xmm5
|
||||
; AVX1-NEXT: vpackssdw %xmm4, %xmm5, %xmm4
|
||||
; AVX1-NEXT: vpminud %xmm1, %xmm0, %xmm5
|
||||
; AVX1-NEXT: vpcmpeqd %xmm5, %xmm0, %xmm5
|
||||
; AVX1-NEXT: vpsubd %xmm1, %xmm0, %xmm0
|
||||
; AVX1-NEXT: vpandn %xmm0, %xmm5, %xmm0
|
||||
; AVX1-NEXT: vpsubd %xmm3, %xmm2, %xmm1
|
||||
; AVX1-NEXT: vpandn %xmm1, %xmm4, %xmm1
|
||||
; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
|
||||
; AVX1-NEXT: vpshufb %xmm2, %xmm1, %xmm1
|
||||
; AVX1-NEXT: vpshufb %xmm2, %xmm0, %xmm0
|
||||
; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
|
||||
; AVX1-NEXT: vpand %xmm0, %xmm4, %xmm0
|
||||
; AVX1-NEXT: vzeroupper
|
||||
; AVX1-NEXT: retq
|
||||
;
|
||||
|
@ -957,14 +954,11 @@ define <8 x i16> @test15(<8 x i16> %x, <8 x i32> %y) nounwind {
|
|||
; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
|
||||
; AVX2-NEXT: vpminud %ymm1, %ymm0, %ymm2
|
||||
; AVX2-NEXT: vpcmpeqd %ymm2, %ymm0, %ymm2
|
||||
; AVX2-NEXT: vpcmpeqd %ymm3, %ymm3, %ymm3
|
||||
; AVX2-NEXT: vpxor %ymm3, %ymm2, %ymm2
|
||||
; AVX2-NEXT: vextracti128 $1, %ymm2, %xmm3
|
||||
; AVX2-NEXT: vpackssdw %xmm3, %xmm2, %xmm2
|
||||
; AVX2-NEXT: vpsubd %ymm1, %ymm0, %ymm0
|
||||
; AVX2-NEXT: vpandn %ymm0, %ymm2, %ymm0
|
||||
; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
|
||||
; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
|
||||
; AVX2-NEXT: vpand %xmm0, %xmm2, %xmm0
|
||||
; AVX2-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
|
||||
; AVX2-NEXT: vzeroupper
|
||||
; AVX2-NEXT: retq
|
||||
;
|
||||
|
@ -1067,22 +1061,19 @@ define <8 x i16> @test16(<8 x i16> %x, <8 x i32> %y) nounwind {
|
|||
; AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
|
||||
; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm2 = xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7]
|
||||
; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
|
||||
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3
|
||||
; AVX1-NEXT: vpmaxud %xmm2, %xmm3, %xmm4
|
||||
; AVX1-NEXT: vpcmpeqd %xmm4, %xmm3, %xmm4
|
||||
; AVX1-NEXT: vpcmpeqd %xmm5, %xmm5, %xmm5
|
||||
; AVX1-NEXT: vpxor %xmm5, %xmm4, %xmm4
|
||||
; AVX1-NEXT: vpmaxud %xmm0, %xmm1, %xmm6
|
||||
; AVX1-NEXT: vpcmpeqd %xmm6, %xmm1, %xmm6
|
||||
; AVX1-NEXT: vpxor %xmm5, %xmm6, %xmm5
|
||||
; AVX1-NEXT: vpackssdw %xmm4, %xmm5, %xmm4
|
||||
; AVX1-NEXT: vpmaxud %xmm0, %xmm1, %xmm3
|
||||
; AVX1-NEXT: vpcmpeqd %xmm3, %xmm1, %xmm3
|
||||
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm4
|
||||
; AVX1-NEXT: vpmaxud %xmm2, %xmm4, %xmm5
|
||||
; AVX1-NEXT: vpcmpeqd %xmm5, %xmm4, %xmm5
|
||||
; AVX1-NEXT: vpsubd %xmm1, %xmm0, %xmm0
|
||||
; AVX1-NEXT: vpsubd %xmm3, %xmm2, %xmm1
|
||||
; AVX1-NEXT: vpandn %xmm0, %xmm3, %xmm0
|
||||
; AVX1-NEXT: vpsubd %xmm4, %xmm2, %xmm1
|
||||
; AVX1-NEXT: vpandn %xmm1, %xmm5, %xmm1
|
||||
; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
|
||||
; AVX1-NEXT: vpshufb %xmm2, %xmm1, %xmm1
|
||||
; AVX1-NEXT: vpshufb %xmm2, %xmm0, %xmm0
|
||||
; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
|
||||
; AVX1-NEXT: vpand %xmm0, %xmm4, %xmm0
|
||||
; AVX1-NEXT: vzeroupper
|
||||
; AVX1-NEXT: retq
|
||||
;
|
||||
|
@ -1091,14 +1082,11 @@ define <8 x i16> @test16(<8 x i16> %x, <8 x i32> %y) nounwind {
|
|||
; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
|
||||
; AVX2-NEXT: vpmaxud %ymm0, %ymm1, %ymm2
|
||||
; AVX2-NEXT: vpcmpeqd %ymm2, %ymm1, %ymm2
|
||||
; AVX2-NEXT: vpcmpeqd %ymm3, %ymm3, %ymm3
|
||||
; AVX2-NEXT: vpxor %ymm3, %ymm2, %ymm2
|
||||
; AVX2-NEXT: vextracti128 $1, %ymm2, %xmm3
|
||||
; AVX2-NEXT: vpackssdw %xmm3, %xmm2, %xmm2
|
||||
; AVX2-NEXT: vpsubd %ymm1, %ymm0, %ymm0
|
||||
; AVX2-NEXT: vpandn %ymm0, %ymm2, %ymm0
|
||||
; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
|
||||
; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
|
||||
; AVX2-NEXT: vpand %xmm0, %xmm2, %xmm0
|
||||
; AVX2-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
|
||||
; AVX2-NEXT: vzeroupper
|
||||
; AVX2-NEXT: retq
|
||||
;
|
||||
|
|
|
@ -348,13 +348,13 @@ define <8 x i16> @var_rotate_v8i16(<8 x i16> %a, <8 x i16> %b) nounwind {
|
|||
; AVX512F-NEXT: vpand {{.*}}(%rip), %xmm1, %xmm1
|
||||
; AVX512F-NEXT: vpmovzxwd {{.*#+}} ymm2 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
|
||||
; AVX512F-NEXT: vpsllvd %ymm2, %ymm0, %ymm2
|
||||
; AVX512F-NEXT: vpmovdw %zmm2, %ymm2
|
||||
; AVX512F-NEXT: vmovdqa {{.*#+}} xmm3 = [16,16,16,16,16,16,16,16]
|
||||
; AVX512F-NEXT: vpsubw %xmm1, %xmm3, %xmm1
|
||||
; AVX512F-NEXT: vpmovzxwd {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
|
||||
; AVX512F-NEXT: vpsrlvd %ymm1, %ymm0, %ymm0
|
||||
; AVX512F-NEXT: vpor %ymm0, %ymm2, %ymm0
|
||||
; AVX512F-NEXT: vpmovdw %zmm0, %ymm0
|
||||
; AVX512F-NEXT: vpor %xmm0, %xmm2, %xmm0
|
||||
; AVX512F-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
|
||||
; AVX512F-NEXT: vzeroupper
|
||||
; AVX512F-NEXT: retq
|
||||
;
|
||||
|
@ -364,13 +364,12 @@ define <8 x i16> @var_rotate_v8i16(<8 x i16> %a, <8 x i16> %b) nounwind {
|
|||
; AVX512VL-NEXT: vpand {{.*}}(%rip), %xmm1, %xmm1
|
||||
; AVX512VL-NEXT: vpmovzxwd {{.*#+}} ymm2 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
|
||||
; AVX512VL-NEXT: vpsllvd %ymm2, %ymm0, %ymm2
|
||||
; AVX512VL-NEXT: vpmovdw %ymm2, %xmm2
|
||||
; AVX512VL-NEXT: vmovdqa {{.*#+}} xmm3 = [16,16,16,16,16,16,16,16]
|
||||
; AVX512VL-NEXT: vpsubw %xmm1, %xmm3, %xmm1
|
||||
; AVX512VL-NEXT: vpmovzxwd {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
|
||||
; AVX512VL-NEXT: vpsrlvd %ymm1, %ymm0, %ymm0
|
||||
; AVX512VL-NEXT: vpor %ymm0, %ymm2, %ymm0
|
||||
; AVX512VL-NEXT: vpmovdw %ymm0, %xmm0
|
||||
; AVX512VL-NEXT: vpor %xmm0, %xmm2, %xmm0
|
||||
; AVX512VL-NEXT: vzeroupper
|
||||
; AVX512VL-NEXT: retq
|
||||
;
|
||||
|
@ -592,11 +591,11 @@ define <16 x i8> @var_rotate_v16i8(<16 x i8> %a, <16 x i8> %b) nounwind {
|
|||
; AVX512BW-NEXT: vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero
|
||||
; AVX512BW-NEXT: vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
|
||||
; AVX512BW-NEXT: vpsllvw %zmm1, %zmm0, %zmm1
|
||||
; AVX512BW-NEXT: vpmovwb %zmm1, %ymm1
|
||||
; AVX512BW-NEXT: vpmovzxbw {{.*#+}} ymm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero,xmm2[8],zero,xmm2[9],zero,xmm2[10],zero,xmm2[11],zero,xmm2[12],zero,xmm2[13],zero,xmm2[14],zero,xmm2[15],zero
|
||||
; AVX512BW-NEXT: vpsrlvw %zmm2, %zmm0, %zmm0
|
||||
; AVX512BW-NEXT: vpor %ymm0, %ymm1, %ymm0
|
||||
; AVX512BW-NEXT: vpmovwb %zmm0, %ymm0
|
||||
; AVX512BW-NEXT: vpor %xmm0, %xmm1, %xmm0
|
||||
; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
|
||||
; AVX512BW-NEXT: vzeroupper
|
||||
; AVX512BW-NEXT: retq
|
||||
;
|
||||
|
@ -607,11 +606,10 @@ define <16 x i8> @var_rotate_v16i8(<16 x i8> %a, <16 x i8> %b) nounwind {
|
|||
; AVX512VLBW-NEXT: vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero
|
||||
; AVX512VLBW-NEXT: vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
|
||||
; AVX512VLBW-NEXT: vpsllvw %ymm1, %ymm0, %ymm1
|
||||
; AVX512VLBW-NEXT: vpmovwb %ymm1, %xmm1
|
||||
; AVX512VLBW-NEXT: vpmovzxbw {{.*#+}} ymm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero,xmm2[8],zero,xmm2[9],zero,xmm2[10],zero,xmm2[11],zero,xmm2[12],zero,xmm2[13],zero,xmm2[14],zero,xmm2[15],zero
|
||||
; AVX512VLBW-NEXT: vpsrlvw %ymm2, %ymm0, %ymm0
|
||||
; AVX512VLBW-NEXT: vpor %ymm0, %ymm1, %ymm0
|
||||
; AVX512VLBW-NEXT: vpmovwb %ymm0, %xmm0
|
||||
; AVX512VLBW-NEXT: vpor %xmm0, %xmm1, %xmm0
|
||||
; AVX512VLBW-NEXT: vzeroupper
|
||||
; AVX512VLBW-NEXT: retq
|
||||
;
|
||||
|
@ -1184,11 +1182,11 @@ define <16 x i8> @splatvar_rotate_v16i8(<16 x i8> %a, <16 x i8> %b) nounwind {
|
|||
; AVX512BW-NEXT: vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
|
||||
; AVX512BW-NEXT: vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero
|
||||
; AVX512BW-NEXT: vpsllvw %zmm1, %zmm0, %zmm1
|
||||
; AVX512BW-NEXT: vpmovwb %zmm1, %ymm1
|
||||
; AVX512BW-NEXT: vpmovzxbw {{.*#+}} ymm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero,xmm2[8],zero,xmm2[9],zero,xmm2[10],zero,xmm2[11],zero,xmm2[12],zero,xmm2[13],zero,xmm2[14],zero,xmm2[15],zero
|
||||
; AVX512BW-NEXT: vpsrlvw %zmm2, %zmm0, %zmm0
|
||||
; AVX512BW-NEXT: vpor %ymm0, %ymm1, %ymm0
|
||||
; AVX512BW-NEXT: vpmovwb %zmm0, %ymm0
|
||||
; AVX512BW-NEXT: vpor %xmm0, %xmm1, %xmm0
|
||||
; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
|
||||
; AVX512BW-NEXT: vzeroupper
|
||||
; AVX512BW-NEXT: retq
|
||||
;
|
||||
|
@ -1200,11 +1198,10 @@ define <16 x i8> @splatvar_rotate_v16i8(<16 x i8> %a, <16 x i8> %b) nounwind {
|
|||
; AVX512VLBW-NEXT: vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
|
||||
; AVX512VLBW-NEXT: vpmovzxbw {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero,xmm1[8],zero,xmm1[9],zero,xmm1[10],zero,xmm1[11],zero,xmm1[12],zero,xmm1[13],zero,xmm1[14],zero,xmm1[15],zero
|
||||
; AVX512VLBW-NEXT: vpsllvw %ymm1, %ymm0, %ymm1
|
||||
; AVX512VLBW-NEXT: vpmovwb %ymm1, %xmm1
|
||||
; AVX512VLBW-NEXT: vpmovzxbw {{.*#+}} ymm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero,xmm2[8],zero,xmm2[9],zero,xmm2[10],zero,xmm2[11],zero,xmm2[12],zero,xmm2[13],zero,xmm2[14],zero,xmm2[15],zero
|
||||
; AVX512VLBW-NEXT: vpsrlvw %ymm2, %ymm0, %ymm0
|
||||
; AVX512VLBW-NEXT: vpor %ymm0, %ymm1, %ymm0
|
||||
; AVX512VLBW-NEXT: vpmovwb %ymm0, %xmm0
|
||||
; AVX512VLBW-NEXT: vpor %xmm0, %xmm1, %xmm0
|
||||
; AVX512VLBW-NEXT: vzeroupper
|
||||
; AVX512VLBW-NEXT: retq
|
||||
;
|
||||
|
@ -1706,11 +1703,11 @@ define <16 x i8> @constant_rotate_v16i8(<16 x i8> %a) nounwind {
|
|||
; AVX512BW-NEXT: vmovdqa {{.*#+}} ymm1 = [0,1,2,3,4,5,6,7,8,7,6,5,4,3,2,1]
|
||||
; AVX512BW-NEXT: vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
|
||||
; AVX512BW-NEXT: vpsllvw %zmm1, %zmm0, %zmm1
|
||||
; AVX512BW-NEXT: vpmovwb %zmm1, %ymm1
|
||||
; AVX512BW-NEXT: vmovdqa {{.*#+}} ymm2 = [8,7,6,5,4,3,2,1,0,1,2,3,4,5,6,7]
|
||||
; AVX512BW-NEXT: vpsrlvw %zmm2, %zmm0, %zmm0
|
||||
; AVX512BW-NEXT: vpor %ymm0, %ymm1, %ymm0
|
||||
; AVX512BW-NEXT: vpmovwb %zmm0, %ymm0
|
||||
; AVX512BW-NEXT: vpor %xmm0, %xmm1, %xmm0
|
||||
; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
|
||||
; AVX512BW-NEXT: vzeroupper
|
||||
; AVX512BW-NEXT: retq
|
||||
;
|
||||
|
@ -1718,10 +1715,9 @@ define <16 x i8> @constant_rotate_v16i8(<16 x i8> %a) nounwind {
|
|||
; AVX512VLBW: # %bb.0:
|
||||
; AVX512VLBW-NEXT: vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
|
||||
; AVX512VLBW-NEXT: vpsllvw {{.*}}(%rip), %ymm0, %ymm1
|
||||
; AVX512VLBW-NEXT: vpmovwb %ymm1, %xmm1
|
||||
; AVX512VLBW-NEXT: vpsrlvw {{.*}}(%rip), %ymm0, %ymm0
|
||||
; AVX512VLBW-NEXT: vpor %ymm0, %ymm1, %ymm0
|
||||
; AVX512VLBW-NEXT: vpmovwb %ymm0, %xmm0
|
||||
; AVX512VLBW-NEXT: vpor %xmm0, %xmm1, %xmm0
|
||||
; AVX512VLBW-NEXT: vzeroupper
|
||||
; AVX512VLBW-NEXT: retq
|
||||
;
|
||||
|
|
|
@ -265,13 +265,12 @@ define <16 x i16> @var_rotate_v16i16(<16 x i16> %a, <16 x i16> %b) nounwind {
|
|||
; AVX512F-NEXT: vpand {{.*}}(%rip), %ymm1, %ymm1
|
||||
; AVX512F-NEXT: vpmovzxwd {{.*#+}} zmm2 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero,ymm1[8],zero,ymm1[9],zero,ymm1[10],zero,ymm1[11],zero,ymm1[12],zero,ymm1[13],zero,ymm1[14],zero,ymm1[15],zero
|
||||
; AVX512F-NEXT: vpsllvd %zmm2, %zmm0, %zmm2
|
||||
; AVX512F-NEXT: vpmovdw %zmm2, %ymm2
|
||||
; AVX512F-NEXT: vmovdqa {{.*#+}} ymm3 = [16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16]
|
||||
; AVX512F-NEXT: vpsubw %ymm1, %ymm3, %ymm1
|
||||
; AVX512F-NEXT: vpmovzxwd {{.*#+}} zmm1 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero,ymm1[8],zero,ymm1[9],zero,ymm1[10],zero,ymm1[11],zero,ymm1[12],zero,ymm1[13],zero,ymm1[14],zero,ymm1[15],zero
|
||||
; AVX512F-NEXT: vpsrlvd %zmm1, %zmm0, %zmm0
|
||||
; AVX512F-NEXT: vpord %zmm0, %zmm2, %zmm0
|
||||
; AVX512F-NEXT: vpmovdw %zmm0, %ymm0
|
||||
; AVX512F-NEXT: vpor %ymm0, %ymm2, %ymm0
|
||||
; AVX512F-NEXT: retq
|
||||
;
|
||||
; AVX512VL-LABEL: var_rotate_v16i16:
|
||||
|
@ -280,13 +279,12 @@ define <16 x i16> @var_rotate_v16i16(<16 x i16> %a, <16 x i16> %b) nounwind {
|
|||
; AVX512VL-NEXT: vpand {{.*}}(%rip), %ymm1, %ymm1
|
||||
; AVX512VL-NEXT: vpmovzxwd {{.*#+}} zmm2 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero,ymm1[8],zero,ymm1[9],zero,ymm1[10],zero,ymm1[11],zero,ymm1[12],zero,ymm1[13],zero,ymm1[14],zero,ymm1[15],zero
|
||||
; AVX512VL-NEXT: vpsllvd %zmm2, %zmm0, %zmm2
|
||||
; AVX512VL-NEXT: vpmovdw %zmm2, %ymm2
|
||||
; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm3 = [16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16]
|
||||
; AVX512VL-NEXT: vpsubw %ymm1, %ymm3, %ymm1
|
||||
; AVX512VL-NEXT: vpmovzxwd {{.*#+}} zmm1 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero,ymm1[8],zero,ymm1[9],zero,ymm1[10],zero,ymm1[11],zero,ymm1[12],zero,ymm1[13],zero,ymm1[14],zero,ymm1[15],zero
|
||||
; AVX512VL-NEXT: vpsrlvd %zmm1, %zmm0, %zmm0
|
||||
; AVX512VL-NEXT: vpord %zmm0, %zmm2, %zmm0
|
||||
; AVX512VL-NEXT: vpmovdw %zmm0, %ymm0
|
||||
; AVX512VL-NEXT: vpor %ymm0, %ymm2, %ymm0
|
||||
; AVX512VL-NEXT: retq
|
||||
;
|
||||
; AVX512BW-LABEL: var_rotate_v16i16:
|
||||
|
@ -466,11 +464,10 @@ define <32 x i8> @var_rotate_v32i8(<32 x i8> %a, <32 x i8> %b) nounwind {
|
|||
; AVX512BW-NEXT: vpmovzxbw {{.*#+}} zmm1 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero,ymm1[8],zero,ymm1[9],zero,ymm1[10],zero,ymm1[11],zero,ymm1[12],zero,ymm1[13],zero,ymm1[14],zero,ymm1[15],zero,ymm1[16],zero,ymm1[17],zero,ymm1[18],zero,ymm1[19],zero,ymm1[20],zero,ymm1[21],zero,ymm1[22],zero,ymm1[23],zero,ymm1[24],zero,ymm1[25],zero,ymm1[26],zero,ymm1[27],zero,ymm1[28],zero,ymm1[29],zero,ymm1[30],zero,ymm1[31],zero
|
||||
; AVX512BW-NEXT: vpmovzxbw {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero,ymm0[16],zero,ymm0[17],zero,ymm0[18],zero,ymm0[19],zero,ymm0[20],zero,ymm0[21],zero,ymm0[22],zero,ymm0[23],zero,ymm0[24],zero,ymm0[25],zero,ymm0[26],zero,ymm0[27],zero,ymm0[28],zero,ymm0[29],zero,ymm0[30],zero,ymm0[31],zero
|
||||
; AVX512BW-NEXT: vpsllvw %zmm1, %zmm0, %zmm1
|
||||
; AVX512BW-NEXT: vpmovwb %zmm1, %ymm1
|
||||
; AVX512BW-NEXT: vpmovzxbw {{.*#+}} zmm2 = ymm2[0],zero,ymm2[1],zero,ymm2[2],zero,ymm2[3],zero,ymm2[4],zero,ymm2[5],zero,ymm2[6],zero,ymm2[7],zero,ymm2[8],zero,ymm2[9],zero,ymm2[10],zero,ymm2[11],zero,ymm2[12],zero,ymm2[13],zero,ymm2[14],zero,ymm2[15],zero,ymm2[16],zero,ymm2[17],zero,ymm2[18],zero,ymm2[19],zero,ymm2[20],zero,ymm2[21],zero,ymm2[22],zero,ymm2[23],zero,ymm2[24],zero,ymm2[25],zero,ymm2[26],zero,ymm2[27],zero,ymm2[28],zero,ymm2[29],zero,ymm2[30],zero,ymm2[31],zero
|
||||
; AVX512BW-NEXT: vpsrlvw %zmm2, %zmm0, %zmm0
|
||||
; AVX512BW-NEXT: vporq %zmm0, %zmm1, %zmm0
|
||||
; AVX512BW-NEXT: vpmovwb %zmm0, %ymm0
|
||||
; AVX512BW-NEXT: vpor %ymm0, %ymm1, %ymm0
|
||||
; AVX512BW-NEXT: retq
|
||||
;
|
||||
; AVX512VLBW-LABEL: var_rotate_v32i8:
|
||||
|
@ -480,11 +477,10 @@ define <32 x i8> @var_rotate_v32i8(<32 x i8> %a, <32 x i8> %b) nounwind {
|
|||
; AVX512VLBW-NEXT: vpmovzxbw {{.*#+}} zmm1 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero,ymm1[8],zero,ymm1[9],zero,ymm1[10],zero,ymm1[11],zero,ymm1[12],zero,ymm1[13],zero,ymm1[14],zero,ymm1[15],zero,ymm1[16],zero,ymm1[17],zero,ymm1[18],zero,ymm1[19],zero,ymm1[20],zero,ymm1[21],zero,ymm1[22],zero,ymm1[23],zero,ymm1[24],zero,ymm1[25],zero,ymm1[26],zero,ymm1[27],zero,ymm1[28],zero,ymm1[29],zero,ymm1[30],zero,ymm1[31],zero
|
||||
; AVX512VLBW-NEXT: vpmovzxbw {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero,ymm0[16],zero,ymm0[17],zero,ymm0[18],zero,ymm0[19],zero,ymm0[20],zero,ymm0[21],zero,ymm0[22],zero,ymm0[23],zero,ymm0[24],zero,ymm0[25],zero,ymm0[26],zero,ymm0[27],zero,ymm0[28],zero,ymm0[29],zero,ymm0[30],zero,ymm0[31],zero
|
||||
; AVX512VLBW-NEXT: vpsllvw %zmm1, %zmm0, %zmm1
|
||||
; AVX512VLBW-NEXT: vpmovwb %zmm1, %ymm1
|
||||
; AVX512VLBW-NEXT: vpmovzxbw {{.*#+}} zmm2 = ymm2[0],zero,ymm2[1],zero,ymm2[2],zero,ymm2[3],zero,ymm2[4],zero,ymm2[5],zero,ymm2[6],zero,ymm2[7],zero,ymm2[8],zero,ymm2[9],zero,ymm2[10],zero,ymm2[11],zero,ymm2[12],zero,ymm2[13],zero,ymm2[14],zero,ymm2[15],zero,ymm2[16],zero,ymm2[17],zero,ymm2[18],zero,ymm2[19],zero,ymm2[20],zero,ymm2[21],zero,ymm2[22],zero,ymm2[23],zero,ymm2[24],zero,ymm2[25],zero,ymm2[26],zero,ymm2[27],zero,ymm2[28],zero,ymm2[29],zero,ymm2[30],zero,ymm2[31],zero
|
||||
; AVX512VLBW-NEXT: vpsrlvw %zmm2, %zmm0, %zmm0
|
||||
; AVX512VLBW-NEXT: vporq %zmm0, %zmm1, %zmm0
|
||||
; AVX512VLBW-NEXT: vpmovwb %zmm0, %ymm0
|
||||
; AVX512VLBW-NEXT: vpor %ymm0, %ymm1, %ymm0
|
||||
; AVX512VLBW-NEXT: retq
|
||||
;
|
||||
; XOPAVX1-LABEL: var_rotate_v32i8:
|
||||
|
@ -888,11 +884,10 @@ define <32 x i8> @splatvar_rotate_v32i8(<32 x i8> %a, <32 x i8> %b) nounwind {
|
|||
; AVX512BW-NEXT: vpmovzxbw {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero,ymm0[16],zero,ymm0[17],zero,ymm0[18],zero,ymm0[19],zero,ymm0[20],zero,ymm0[21],zero,ymm0[22],zero,ymm0[23],zero,ymm0[24],zero,ymm0[25],zero,ymm0[26],zero,ymm0[27],zero,ymm0[28],zero,ymm0[29],zero,ymm0[30],zero,ymm0[31],zero
|
||||
; AVX512BW-NEXT: vpmovzxbw {{.*#+}} zmm1 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero,ymm1[8],zero,ymm1[9],zero,ymm1[10],zero,ymm1[11],zero,ymm1[12],zero,ymm1[13],zero,ymm1[14],zero,ymm1[15],zero,ymm1[16],zero,ymm1[17],zero,ymm1[18],zero,ymm1[19],zero,ymm1[20],zero,ymm1[21],zero,ymm1[22],zero,ymm1[23],zero,ymm1[24],zero,ymm1[25],zero,ymm1[26],zero,ymm1[27],zero,ymm1[28],zero,ymm1[29],zero,ymm1[30],zero,ymm1[31],zero
|
||||
; AVX512BW-NEXT: vpsllvw %zmm1, %zmm0, %zmm1
|
||||
; AVX512BW-NEXT: vpmovwb %zmm1, %ymm1
|
||||
; AVX512BW-NEXT: vpmovzxbw {{.*#+}} zmm2 = ymm2[0],zero,ymm2[1],zero,ymm2[2],zero,ymm2[3],zero,ymm2[4],zero,ymm2[5],zero,ymm2[6],zero,ymm2[7],zero,ymm2[8],zero,ymm2[9],zero,ymm2[10],zero,ymm2[11],zero,ymm2[12],zero,ymm2[13],zero,ymm2[14],zero,ymm2[15],zero,ymm2[16],zero,ymm2[17],zero,ymm2[18],zero,ymm2[19],zero,ymm2[20],zero,ymm2[21],zero,ymm2[22],zero,ymm2[23],zero,ymm2[24],zero,ymm2[25],zero,ymm2[26],zero,ymm2[27],zero,ymm2[28],zero,ymm2[29],zero,ymm2[30],zero,ymm2[31],zero
|
||||
; AVX512BW-NEXT: vpsrlvw %zmm2, %zmm0, %zmm0
|
||||
; AVX512BW-NEXT: vporq %zmm0, %zmm1, %zmm0
|
||||
; AVX512BW-NEXT: vpmovwb %zmm0, %ymm0
|
||||
; AVX512BW-NEXT: vpor %ymm0, %ymm1, %ymm0
|
||||
; AVX512BW-NEXT: retq
|
||||
;
|
||||
; AVX512VLBW-LABEL: splatvar_rotate_v32i8:
|
||||
|
@ -903,11 +898,10 @@ define <32 x i8> @splatvar_rotate_v32i8(<32 x i8> %a, <32 x i8> %b) nounwind {
|
|||
; AVX512VLBW-NEXT: vpmovzxbw {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero,ymm0[16],zero,ymm0[17],zero,ymm0[18],zero,ymm0[19],zero,ymm0[20],zero,ymm0[21],zero,ymm0[22],zero,ymm0[23],zero,ymm0[24],zero,ymm0[25],zero,ymm0[26],zero,ymm0[27],zero,ymm0[28],zero,ymm0[29],zero,ymm0[30],zero,ymm0[31],zero
|
||||
; AVX512VLBW-NEXT: vpmovzxbw {{.*#+}} zmm1 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero,ymm1[8],zero,ymm1[9],zero,ymm1[10],zero,ymm1[11],zero,ymm1[12],zero,ymm1[13],zero,ymm1[14],zero,ymm1[15],zero,ymm1[16],zero,ymm1[17],zero,ymm1[18],zero,ymm1[19],zero,ymm1[20],zero,ymm1[21],zero,ymm1[22],zero,ymm1[23],zero,ymm1[24],zero,ymm1[25],zero,ymm1[26],zero,ymm1[27],zero,ymm1[28],zero,ymm1[29],zero,ymm1[30],zero,ymm1[31],zero
|
||||
; AVX512VLBW-NEXT: vpsllvw %zmm1, %zmm0, %zmm1
|
||||
; AVX512VLBW-NEXT: vpmovwb %zmm1, %ymm1
|
||||
; AVX512VLBW-NEXT: vpmovzxbw {{.*#+}} zmm2 = ymm2[0],zero,ymm2[1],zero,ymm2[2],zero,ymm2[3],zero,ymm2[4],zero,ymm2[5],zero,ymm2[6],zero,ymm2[7],zero,ymm2[8],zero,ymm2[9],zero,ymm2[10],zero,ymm2[11],zero,ymm2[12],zero,ymm2[13],zero,ymm2[14],zero,ymm2[15],zero,ymm2[16],zero,ymm2[17],zero,ymm2[18],zero,ymm2[19],zero,ymm2[20],zero,ymm2[21],zero,ymm2[22],zero,ymm2[23],zero,ymm2[24],zero,ymm2[25],zero,ymm2[26],zero,ymm2[27],zero,ymm2[28],zero,ymm2[29],zero,ymm2[30],zero,ymm2[31],zero
|
||||
; AVX512VLBW-NEXT: vpsrlvw %zmm2, %zmm0, %zmm0
|
||||
; AVX512VLBW-NEXT: vporq %zmm0, %zmm1, %zmm0
|
||||
; AVX512VLBW-NEXT: vpmovwb %zmm0, %ymm0
|
||||
; AVX512VLBW-NEXT: vpor %ymm0, %ymm1, %ymm0
|
||||
; AVX512VLBW-NEXT: retq
|
||||
;
|
||||
; XOPAVX1-LABEL: splatvar_rotate_v32i8:
|
||||
|
@ -1299,20 +1293,18 @@ define <32 x i8> @constant_rotate_v32i8(<32 x i8> %a) nounwind {
|
|||
; AVX512BW: # %bb.0:
|
||||
; AVX512BW-NEXT: vpmovzxbw {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero,ymm0[16],zero,ymm0[17],zero,ymm0[18],zero,ymm0[19],zero,ymm0[20],zero,ymm0[21],zero,ymm0[22],zero,ymm0[23],zero,ymm0[24],zero,ymm0[25],zero,ymm0[26],zero,ymm0[27],zero,ymm0[28],zero,ymm0[29],zero,ymm0[30],zero,ymm0[31],zero
|
||||
; AVX512BW-NEXT: vpsllvw {{.*}}(%rip), %zmm0, %zmm1
|
||||
; AVX512BW-NEXT: vpmovwb %zmm1, %ymm1
|
||||
; AVX512BW-NEXT: vpsrlvw {{.*}}(%rip), %zmm0, %zmm0
|
||||
; AVX512BW-NEXT: vporq %zmm0, %zmm1, %zmm0
|
||||
; AVX512BW-NEXT: vpmovwb %zmm0, %ymm0
|
||||
; AVX512BW-NEXT: vpor %ymm0, %ymm1, %ymm0
|
||||
; AVX512BW-NEXT: retq
|
||||
;
|
||||
; AVX512VLBW-LABEL: constant_rotate_v32i8:
|
||||
; AVX512VLBW: # %bb.0:
|
||||
; AVX512VLBW-NEXT: vpmovzxbw {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero,ymm0[16],zero,ymm0[17],zero,ymm0[18],zero,ymm0[19],zero,ymm0[20],zero,ymm0[21],zero,ymm0[22],zero,ymm0[23],zero,ymm0[24],zero,ymm0[25],zero,ymm0[26],zero,ymm0[27],zero,ymm0[28],zero,ymm0[29],zero,ymm0[30],zero,ymm0[31],zero
|
||||
; AVX512VLBW-NEXT: vpsllvw {{.*}}(%rip), %zmm0, %zmm1
|
||||
; AVX512VLBW-NEXT: vpmovwb %zmm1, %ymm1
|
||||
; AVX512VLBW-NEXT: vpsrlvw {{.*}}(%rip), %zmm0, %zmm0
|
||||
; AVX512VLBW-NEXT: vporq %zmm0, %zmm1, %zmm0
|
||||
; AVX512VLBW-NEXT: vpmovwb %zmm0, %ymm0
|
||||
; AVX512VLBW-NEXT: vpor %ymm0, %ymm1, %ymm0
|
||||
; AVX512VLBW-NEXT: retq
|
||||
;
|
||||
; XOPAVX1-LABEL: constant_rotate_v32i8:
|
||||
|
|
|
@ -40,23 +40,21 @@ define <32 x i16> @var_rotate_v32i16(<32 x i16> %a, <32 x i16> %b) nounwind {
|
|||
; AVX512F-NEXT: vpmovzxwd {{.*#+}} zmm5 = ymm2[0],zero,ymm2[1],zero,ymm2[2],zero,ymm2[3],zero,ymm2[4],zero,ymm2[5],zero,ymm2[6],zero,ymm2[7],zero,ymm2[8],zero,ymm2[9],zero,ymm2[10],zero,ymm2[11],zero,ymm2[12],zero,ymm2[13],zero,ymm2[14],zero,ymm2[15],zero
|
||||
; AVX512F-NEXT: vpmovzxwd {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero
|
||||
; AVX512F-NEXT: vpsllvd %zmm5, %zmm0, %zmm5
|
||||
; AVX512F-NEXT: vpmovdw %zmm5, %ymm5
|
||||
; AVX512F-NEXT: vmovdqa {{.*#+}} ymm6 = [16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16]
|
||||
; AVX512F-NEXT: vpsubw %ymm2, %ymm6, %ymm2
|
||||
; AVX512F-NEXT: vpmovzxwd {{.*#+}} zmm2 = ymm2[0],zero,ymm2[1],zero,ymm2[2],zero,ymm2[3],zero,ymm2[4],zero,ymm2[5],zero,ymm2[6],zero,ymm2[7],zero,ymm2[8],zero,ymm2[9],zero,ymm2[10],zero,ymm2[11],zero,ymm2[12],zero,ymm2[13],zero,ymm2[14],zero,ymm2[15],zero
|
||||
; AVX512F-NEXT: vpsrlvd %zmm2, %zmm0, %zmm0
|
||||
; AVX512F-NEXT: vpord %zmm0, %zmm5, %zmm0
|
||||
; AVX512F-NEXT: vpmovdw %zmm0, %ymm0
|
||||
; AVX512F-NEXT: vpor %ymm0, %ymm5, %ymm0
|
||||
; AVX512F-NEXT: vpand %ymm4, %ymm3, %ymm2
|
||||
; AVX512F-NEXT: vpmovzxwd {{.*#+}} zmm3 = ymm2[0],zero,ymm2[1],zero,ymm2[2],zero,ymm2[3],zero,ymm2[4],zero,ymm2[5],zero,ymm2[6],zero,ymm2[7],zero,ymm2[8],zero,ymm2[9],zero,ymm2[10],zero,ymm2[11],zero,ymm2[12],zero,ymm2[13],zero,ymm2[14],zero,ymm2[15],zero
|
||||
; AVX512F-NEXT: vpmovzxwd {{.*#+}} zmm1 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero,ymm1[8],zero,ymm1[9],zero,ymm1[10],zero,ymm1[11],zero,ymm1[12],zero,ymm1[13],zero,ymm1[14],zero,ymm1[15],zero
|
||||
; AVX512F-NEXT: vpsllvd %zmm3, %zmm1, %zmm3
|
||||
; AVX512F-NEXT: vpmovdw %zmm3, %ymm3
|
||||
; AVX512F-NEXT: vpsubw %ymm2, %ymm6, %ymm2
|
||||
; AVX512F-NEXT: vpmovzxwd {{.*#+}} zmm2 = ymm2[0],zero,ymm2[1],zero,ymm2[2],zero,ymm2[3],zero,ymm2[4],zero,ymm2[5],zero,ymm2[6],zero,ymm2[7],zero,ymm2[8],zero,ymm2[9],zero,ymm2[10],zero,ymm2[11],zero,ymm2[12],zero,ymm2[13],zero,ymm2[14],zero,ymm2[15],zero
|
||||
; AVX512F-NEXT: vpsrlvd %zmm2, %zmm1, %zmm1
|
||||
; AVX512F-NEXT: vpord %zmm1, %zmm3, %zmm1
|
||||
; AVX512F-NEXT: vpmovdw %zmm1, %ymm1
|
||||
; AVX512F-NEXT: vpor %ymm1, %ymm3, %ymm1
|
||||
; AVX512F-NEXT: retq
|
||||
;
|
||||
; AVX512VL-LABEL: var_rotate_v32i16:
|
||||
|
@ -66,23 +64,21 @@ define <32 x i16> @var_rotate_v32i16(<32 x i16> %a, <32 x i16> %b) nounwind {
|
|||
; AVX512VL-NEXT: vpmovzxwd {{.*#+}} zmm5 = ymm2[0],zero,ymm2[1],zero,ymm2[2],zero,ymm2[3],zero,ymm2[4],zero,ymm2[5],zero,ymm2[6],zero,ymm2[7],zero,ymm2[8],zero,ymm2[9],zero,ymm2[10],zero,ymm2[11],zero,ymm2[12],zero,ymm2[13],zero,ymm2[14],zero,ymm2[15],zero
|
||||
; AVX512VL-NEXT: vpmovzxwd {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero
|
||||
; AVX512VL-NEXT: vpsllvd %zmm5, %zmm0, %zmm5
|
||||
; AVX512VL-NEXT: vpmovdw %zmm5, %ymm5
|
||||
; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm6 = [16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16]
|
||||
; AVX512VL-NEXT: vpsubw %ymm2, %ymm6, %ymm2
|
||||
; AVX512VL-NEXT: vpmovzxwd {{.*#+}} zmm2 = ymm2[0],zero,ymm2[1],zero,ymm2[2],zero,ymm2[3],zero,ymm2[4],zero,ymm2[5],zero,ymm2[6],zero,ymm2[7],zero,ymm2[8],zero,ymm2[9],zero,ymm2[10],zero,ymm2[11],zero,ymm2[12],zero,ymm2[13],zero,ymm2[14],zero,ymm2[15],zero
|
||||
; AVX512VL-NEXT: vpsrlvd %zmm2, %zmm0, %zmm0
|
||||
; AVX512VL-NEXT: vpord %zmm0, %zmm5, %zmm0
|
||||
; AVX512VL-NEXT: vpmovdw %zmm0, %ymm0
|
||||
; AVX512VL-NEXT: vpor %ymm0, %ymm5, %ymm0
|
||||
; AVX512VL-NEXT: vpand %ymm4, %ymm3, %ymm2
|
||||
; AVX512VL-NEXT: vpmovzxwd {{.*#+}} zmm3 = ymm2[0],zero,ymm2[1],zero,ymm2[2],zero,ymm2[3],zero,ymm2[4],zero,ymm2[5],zero,ymm2[6],zero,ymm2[7],zero,ymm2[8],zero,ymm2[9],zero,ymm2[10],zero,ymm2[11],zero,ymm2[12],zero,ymm2[13],zero,ymm2[14],zero,ymm2[15],zero
|
||||
; AVX512VL-NEXT: vpmovzxwd {{.*#+}} zmm1 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero,ymm1[8],zero,ymm1[9],zero,ymm1[10],zero,ymm1[11],zero,ymm1[12],zero,ymm1[13],zero,ymm1[14],zero,ymm1[15],zero
|
||||
; AVX512VL-NEXT: vpsllvd %zmm3, %zmm1, %zmm3
|
||||
; AVX512VL-NEXT: vpmovdw %zmm3, %ymm3
|
||||
; AVX512VL-NEXT: vpsubw %ymm2, %ymm6, %ymm2
|
||||
; AVX512VL-NEXT: vpmovzxwd {{.*#+}} zmm2 = ymm2[0],zero,ymm2[1],zero,ymm2[2],zero,ymm2[3],zero,ymm2[4],zero,ymm2[5],zero,ymm2[6],zero,ymm2[7],zero,ymm2[8],zero,ymm2[9],zero,ymm2[10],zero,ymm2[11],zero,ymm2[12],zero,ymm2[13],zero,ymm2[14],zero,ymm2[15],zero
|
||||
; AVX512VL-NEXT: vpsrlvd %zmm2, %zmm1, %zmm1
|
||||
; AVX512VL-NEXT: vpord %zmm1, %zmm3, %zmm1
|
||||
; AVX512VL-NEXT: vpmovdw %zmm1, %ymm1
|
||||
; AVX512VL-NEXT: vpor %ymm1, %ymm3, %ymm1
|
||||
; AVX512VL-NEXT: retq
|
||||
;
|
||||
; AVX512BW-LABEL: var_rotate_v32i16:
|
||||
|
|
Loading…
Reference in New Issue