[X86] Combine concat(shufps,shufps) -> shufps(concat,concat)

Now that rG18c19441d105 has improved VPERM2X128 handling, we can perform this to improve x64->x32 truncation without poor cross-lane issues.

Someday combineX86ShufflesRecursively will handle this, but we're still really bad at dealing with different vector widths.
This commit is contained in:
Simon Pilgrim 2020-03-21 12:39:29 +00:00
parent 7a62ea3889
commit 4ceade0428
13 changed files with 217 additions and 307 deletions

View File

@ -46402,6 +46402,24 @@ static SDValue combineConcatVectorOps(const SDLoc &DL, MVT VT,
})) {
unsigned NumOps = Ops.size();
switch (Op0.getOpcode()) {
case X86ISD::SHUFP: {
// Add SHUFPD support if/when necessary.
if (!IsSplat && VT.getScalarType() == MVT::f32 &&
llvm::all_of(Ops, [Op0](SDValue Op) {
return Op.getOperand(2) == Op0.getOperand(2);
})) {
SmallVector<SDValue, 2> LHS, RHS;
for (unsigned i = 0; i != NumOps; ++i) {
LHS.push_back(Ops[i].getOperand(0));
RHS.push_back(Ops[i].getOperand(1));
}
return DAG.getNode(Op0.getOpcode(), DL, VT,
DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, LHS),
DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, RHS),
Op0.getOperand(2));
}
break;
}
case X86ISD::PSHUFHW:
case X86ISD::PSHUFLW:
case X86ISD::PSHUFD:

View File

@ -163,11 +163,9 @@ define void @truncstore_v8i64_v8i32(<8 x i64> %x, <8 x i32>* %p, <8 x i32> %mask
; AVX1-NEXT: vpcmpeqd %xmm4, %xmm2, %xmm2
; AVX1-NEXT: vpxor %xmm5, %xmm2, %xmm2
; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm2, %ymm2
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3
; AVX1-NEXT: vshufps {{.*#+}} xmm1 = xmm1[0,2],xmm3[0,2]
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
; AVX1-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm3[0,2]
; AVX1-NEXT: vperm2f128 {{.*#+}} ymm3 = ymm0[2,3],ymm1[2,3]
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; AVX1-NEXT: vshufps {{.*#+}} ymm0 = ymm0[0,2],ymm3[0,2],ymm0[4,6],ymm3[4,6]
; AVX1-NEXT: vmaskmovps %ymm0, %ymm2, (%rdi)
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: retq
@ -178,11 +176,9 @@ define void @truncstore_v8i64_v8i32(<8 x i64> %x, <8 x i32>* %p, <8 x i32> %mask
; AVX2-NEXT: vpcmpeqd %ymm3, %ymm2, %ymm2
; AVX2-NEXT: vpcmpeqd %ymm3, %ymm3, %ymm3
; AVX2-NEXT: vpxor %ymm3, %ymm2, %ymm2
; AVX2-NEXT: vextractf128 $1, %ymm1, %xmm3
; AVX2-NEXT: vshufps {{.*#+}} xmm1 = xmm1[0,2],xmm3[0,2]
; AVX2-NEXT: vextractf128 $1, %ymm0, %xmm3
; AVX2-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm3[0,2]
; AVX2-NEXT: vperm2f128 {{.*#+}} ymm3 = ymm0[2,3],ymm1[2,3]
; AVX2-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; AVX2-NEXT: vshufps {{.*#+}} ymm0 = ymm0[0,2],ymm3[0,2],ymm0[4,6],ymm3[4,6]
; AVX2-NEXT: vpmaskmovd %ymm0, %ymm2, (%rdi)
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
@ -457,11 +453,9 @@ define void @truncstore_v8i64_v8i16(<8 x i64> %x, <8 x i16>* %p, <8 x i32> %mask
; AVX2-LABEL: truncstore_v8i64_v8i16:
; AVX2: # %bb.0:
; AVX2-NEXT: vpxor %xmm3, %xmm3, %xmm3
; AVX2-NEXT: vextractf128 $1, %ymm1, %xmm4
; AVX2-NEXT: vshufps {{.*#+}} xmm1 = xmm1[0,2],xmm4[0,2]
; AVX2-NEXT: vextractf128 $1, %ymm0, %xmm4
; AVX2-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm4[0,2]
; AVX2-NEXT: vperm2f128 {{.*#+}} ymm4 = ymm0[2,3],ymm1[2,3]
; AVX2-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; AVX2-NEXT: vshufps {{.*#+}} ymm0 = ymm0[0,2],ymm4[0,2],ymm0[4,6],ymm4[4,6]
; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
; AVX2-NEXT: vpcmpeqd %ymm3, %ymm2, %ymm1

View File

@ -300,26 +300,26 @@ define void @truncstore_v8i64_v8i32(<8 x i64> %x, <8 x i32>* %p, <8 x i32> %mask
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3
; AVX1-NEXT: vmovdqa {{.*#+}} xmm4 = [2147483647,2147483647]
; AVX1-NEXT: vpcmpgtq %xmm3, %xmm4, %xmm9
; AVX1-NEXT: vpcmpgtq %xmm1, %xmm4, %xmm6
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm7
; AVX1-NEXT: vpcmpgtq %xmm7, %xmm4, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm6
; AVX1-NEXT: vpcmpgtq %xmm6, %xmm4, %xmm7
; AVX1-NEXT: vpcmpgtq %xmm1, %xmm4, %xmm2
; AVX1-NEXT: vpcmpgtq %xmm0, %xmm4, %xmm5
; AVX1-NEXT: vblendvpd %xmm5, %xmm0, %xmm4, %xmm0
; AVX1-NEXT: vmovdqa {{.*#+}} xmm5 = [18446744071562067968,18446744071562067968]
; AVX1-NEXT: vpcmpgtq %xmm5, %xmm0, %xmm10
; AVX1-NEXT: vblendvpd %xmm2, %xmm7, %xmm4, %xmm2
; AVX1-NEXT: vpcmpgtq %xmm5, %xmm2, %xmm7
; AVX1-NEXT: vblendvpd %xmm6, %xmm1, %xmm4, %xmm1
; AVX1-NEXT: vpcmpgtq %xmm5, %xmm1, %xmm6
; AVX1-NEXT: vblendvpd %xmm2, %xmm1, %xmm4, %xmm1
; AVX1-NEXT: vpcmpgtq %xmm5, %xmm1, %xmm2
; AVX1-NEXT: vblendvpd %xmm7, %xmm6, %xmm4, %xmm6
; AVX1-NEXT: vpcmpgtq %xmm5, %xmm6, %xmm7
; AVX1-NEXT: vblendvpd %xmm9, %xmm3, %xmm4, %xmm3
; AVX1-NEXT: vpcmpgtq %xmm5, %xmm3, %xmm4
; AVX1-NEXT: vblendvpd %xmm4, %xmm3, %xmm5, %xmm3
; AVX1-NEXT: vblendvpd %xmm6, %xmm1, %xmm5, %xmm1
; AVX1-NEXT: vshufps {{.*#+}} xmm1 = xmm1[0,2],xmm3[0,2]
; AVX1-NEXT: vblendvpd %xmm7, %xmm2, %xmm5, %xmm2
; AVX1-NEXT: vblendvpd %xmm7, %xmm6, %xmm5, %xmm4
; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm4, %ymm3
; AVX1-NEXT: vblendvpd %xmm2, %xmm1, %xmm5, %xmm1
; AVX1-NEXT: vblendvpd %xmm10, %xmm0, %xmm5, %xmm0
; AVX1-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm2[0,2]
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; AVX1-NEXT: vshufps {{.*#+}} ymm0 = ymm0[0,2],ymm3[0,2],ymm0[4,6],ymm3[4,6]
; AVX1-NEXT: vmaskmovps %ymm0, %ymm8, (%rdi)
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: retq
@ -331,20 +331,18 @@ define void @truncstore_v8i64_v8i32(<8 x i64> %x, <8 x i32>* %p, <8 x i32> %mask
; AVX2-NEXT: vpcmpeqd %ymm3, %ymm3, %ymm3
; AVX2-NEXT: vpxor %ymm3, %ymm2, %ymm2
; AVX2-NEXT: vpbroadcastq {{.*#+}} ymm3 = [2147483647,2147483647,2147483647,2147483647]
; AVX2-NEXT: vpcmpgtq %ymm1, %ymm3, %ymm4
; AVX2-NEXT: vblendvpd %ymm4, %ymm1, %ymm3, %ymm1
; AVX2-NEXT: vpcmpgtq %ymm0, %ymm3, %ymm4
; AVX2-NEXT: vblendvpd %ymm4, %ymm0, %ymm3, %ymm0
; AVX2-NEXT: vpcmpgtq %ymm1, %ymm3, %ymm4
; AVX2-NEXT: vblendvpd %ymm4, %ymm1, %ymm3, %ymm1
; AVX2-NEXT: vpbroadcastq {{.*#+}} ymm3 = [18446744071562067968,18446744071562067968,18446744071562067968,18446744071562067968]
; AVX2-NEXT: vpcmpgtq %ymm3, %ymm0, %ymm4
; AVX2-NEXT: vblendvpd %ymm4, %ymm0, %ymm3, %ymm0
; AVX2-NEXT: vpcmpgtq %ymm3, %ymm1, %ymm4
; AVX2-NEXT: vblendvpd %ymm4, %ymm1, %ymm3, %ymm1
; AVX2-NEXT: vextractf128 $1, %ymm1, %xmm3
; AVX2-NEXT: vshufps {{.*#+}} xmm1 = xmm1[0,2],xmm3[0,2]
; AVX2-NEXT: vextractf128 $1, %ymm0, %xmm3
; AVX2-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm3[0,2]
; AVX2-NEXT: vpcmpgtq %ymm3, %ymm0, %ymm4
; AVX2-NEXT: vblendvpd %ymm4, %ymm0, %ymm3, %ymm0
; AVX2-NEXT: vperm2f128 {{.*#+}} ymm3 = ymm0[2,3],ymm1[2,3]
; AVX2-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; AVX2-NEXT: vshufps {{.*#+}} ymm0 = ymm0[0,2],ymm3[0,2],ymm0[4,6],ymm3[4,6]
; AVX2-NEXT: vpmaskmovd %ymm0, %ymm2, (%rdi)
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq

View File

@ -240,22 +240,22 @@ define void @truncstore_v8i64_v8i32(<8 x i64> %x, <8 x i32>* %p, <8 x i32> %mask
; AVX1-NEXT: vpxor %xmm3, %xmm0, %xmm4
; AVX1-NEXT: vmovdqa {{.*#+}} xmm5 = [9223372041149743103,9223372041149743103]
; AVX1-NEXT: vpcmpgtq %xmm4, %xmm5, %xmm9
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm6
; AVX1-NEXT: vpxor %xmm3, %xmm6, %xmm7
; AVX1-NEXT: vpcmpgtq %xmm7, %xmm5, %xmm7
; AVX1-NEXT: vpxor %xmm3, %xmm1, %xmm2
; AVX1-NEXT: vpxor %xmm3, %xmm1, %xmm6
; AVX1-NEXT: vpcmpgtq %xmm6, %xmm5, %xmm6
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm7
; AVX1-NEXT: vpxor %xmm3, %xmm7, %xmm2
; AVX1-NEXT: vpcmpgtq %xmm2, %xmm5, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm4
; AVX1-NEXT: vpxor %xmm3, %xmm4, %xmm3
; AVX1-NEXT: vpcmpgtq %xmm3, %xmm5, %xmm3
; AVX1-NEXT: vmovapd {{.*#+}} xmm5 = [4294967295,4294967295]
; AVX1-NEXT: vblendvpd %xmm3, %xmm4, %xmm5, %xmm3
; AVX1-NEXT: vblendvpd %xmm2, %xmm1, %xmm5, %xmm1
; AVX1-NEXT: vshufps {{.*#+}} xmm1 = xmm1[0,2],xmm3[0,2]
; AVX1-NEXT: vblendvpd %xmm7, %xmm6, %xmm5, %xmm2
; AVX1-NEXT: vblendvpd %xmm2, %xmm7, %xmm5, %xmm2
; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm2, %ymm2
; AVX1-NEXT: vblendvpd %xmm6, %xmm1, %xmm5, %xmm1
; AVX1-NEXT: vblendvpd %xmm9, %xmm0, %xmm5, %xmm0
; AVX1-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm2[0,2]
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; AVX1-NEXT: vshufps {{.*#+}} ymm0 = ymm0[0,2],ymm2[0,2],ymm0[4,6],ymm2[4,6]
; AVX1-NEXT: vmaskmovps %ymm0, %ymm8, (%rdi)
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: retq
@ -268,18 +268,16 @@ define void @truncstore_v8i64_v8i32(<8 x i64> %x, <8 x i32>* %p, <8 x i32> %mask
; AVX2-NEXT: vpxor %ymm3, %ymm2, %ymm2
; AVX2-NEXT: vbroadcastsd {{.*#+}} ymm3 = [4294967295,4294967295,4294967295,4294967295]
; AVX2-NEXT: vpbroadcastq {{.*#+}} ymm4 = [9223372036854775808,9223372036854775808,9223372036854775808,9223372036854775808]
; AVX2-NEXT: vpxor %ymm4, %ymm0, %ymm5
; AVX2-NEXT: vpxor %ymm4, %ymm1, %ymm5
; AVX2-NEXT: vpbroadcastq {{.*#+}} ymm6 = [9223372041149743103,9223372041149743103,9223372041149743103,9223372041149743103]
; AVX2-NEXT: vpcmpgtq %ymm5, %ymm6, %ymm5
; AVX2-NEXT: vblendvpd %ymm5, %ymm0, %ymm3, %ymm0
; AVX2-NEXT: vpxor %ymm4, %ymm1, %ymm4
; AVX2-NEXT: vblendvpd %ymm5, %ymm1, %ymm3, %ymm1
; AVX2-NEXT: vpxor %ymm4, %ymm0, %ymm4
; AVX2-NEXT: vpcmpgtq %ymm4, %ymm6, %ymm4
; AVX2-NEXT: vblendvpd %ymm4, %ymm1, %ymm3, %ymm1
; AVX2-NEXT: vextractf128 $1, %ymm1, %xmm3
; AVX2-NEXT: vshufps {{.*#+}} xmm1 = xmm1[0,2],xmm3[0,2]
; AVX2-NEXT: vextractf128 $1, %ymm0, %xmm3
; AVX2-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm3[0,2]
; AVX2-NEXT: vblendvpd %ymm4, %ymm0, %ymm3, %ymm0
; AVX2-NEXT: vperm2f128 {{.*#+}} ymm3 = ymm0[2,3],ymm1[2,3]
; AVX2-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; AVX2-NEXT: vshufps {{.*#+}} ymm0 = ymm0[0,2],ymm3[0,2],ymm0[4,6],ymm3[4,6]
; AVX2-NEXT: vpmaskmovd %ymm0, %ymm2, (%rdi)
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq

View File

@ -8,11 +8,9 @@ define <8 x i32> @foo(<8 x i64> %x, <4 x i64> %y) {
; CHECK: # %bb.0:
; CHECK-NEXT: vandps %ymm2, %ymm0, %ymm0
; CHECK-NEXT: vandps {{\.LCPI.*}}, %ymm1, %ymm1
; CHECK-NEXT: vextractf128 $1, %ymm0, %xmm2
; CHECK-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm2[0,2]
; CHECK-NEXT: vextractf128 $1, %ymm1, %xmm2
; CHECK-NEXT: vshufps {{.*#+}} xmm1 = xmm1[0,2],xmm2[0,2]
; CHECK-NEXT: vperm2f128 {{.*#+}} ymm2 = ymm0[2,3],ymm1[2,3]
; CHECK-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; CHECK-NEXT: vshufps {{.*#+}} ymm0 = ymm0[0,2],ymm2[0,2],ymm0[4,6],ymm2[4,6]
; CHECK-NEXT: retl
%a = shufflevector <4 x i64> %y, <4 x i64> <i64 12345, i64 67890, i64 13579, i64 24680>, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
%b = and <8 x i64> %x, %a

View File

@ -566,11 +566,9 @@ define i1 @trunc_v8i64_v8i1(<8 x i64>) {
;
; AVX2-LABEL: trunc_v8i64_v8i1:
; AVX2: # %bb.0:
; AVX2-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX2-NEXT: vshufps {{.*#+}} xmm1 = xmm1[0,2],xmm2[0,2]
; AVX2-NEXT: vextractf128 $1, %ymm0, %xmm2
; AVX2-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm2[0,2]
; AVX2-NEXT: vperm2f128 {{.*#+}} ymm2 = ymm0[2,3],ymm1[2,3]
; AVX2-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; AVX2-NEXT: vshufps {{.*#+}} ymm0 = ymm0[0,2],ymm2[0,2],ymm0[4,6],ymm2[4,6]
; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
; AVX2-NEXT: vpsllw $15, %xmm0, %xmm0

View File

@ -560,11 +560,9 @@ define i1 @trunc_v8i64_v8i1(<8 x i64>) {
;
; AVX2-LABEL: trunc_v8i64_v8i1:
; AVX2: # %bb.0:
; AVX2-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX2-NEXT: vshufps {{.*#+}} xmm1 = xmm1[0,2],xmm2[0,2]
; AVX2-NEXT: vextractf128 $1, %ymm0, %xmm2
; AVX2-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm2[0,2]
; AVX2-NEXT: vperm2f128 {{.*#+}} ymm2 = ymm0[2,3],ymm1[2,3]
; AVX2-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; AVX2-NEXT: vshufps {{.*#+}} ymm0 = ymm0[0,2],ymm2[0,2],ymm0[4,6],ymm2[4,6]
; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
; AVX2-NEXT: vpsllw $15, %xmm0, %xmm0

View File

@ -611,11 +611,9 @@ define i1 @trunc_v8i64_v8i1(<8 x i64>) {
;
; AVX2-LABEL: trunc_v8i64_v8i1:
; AVX2: # %bb.0:
; AVX2-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX2-NEXT: vshufps {{.*#+}} xmm1 = xmm1[0,2],xmm2[0,2]
; AVX2-NEXT: vextractf128 $1, %ymm0, %xmm2
; AVX2-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm2[0,2]
; AVX2-NEXT: vperm2f128 {{.*#+}} ymm2 = ymm0[2,3],ymm1[2,3]
; AVX2-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; AVX2-NEXT: vshufps {{.*#+}} ymm0 = ymm0[0,2],ymm2[0,2],ymm0[4,6],ymm2[4,6]
; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
; AVX2-NEXT: vpsllw $15, %xmm0, %xmm0

View File

@ -101,13 +101,11 @@ define <8 x i16> @trunc_add_v8i64_v8i16(<8 x i64> %a0, <8 x i64> %a1) nounwind {
;
; AVX2-SLOW-LABEL: trunc_add_v8i64_v8i16:
; AVX2-SLOW: # %bb.0:
; AVX2-SLOW-NEXT: vpaddq %ymm2, %ymm0, %ymm0
; AVX2-SLOW-NEXT: vpaddq %ymm3, %ymm1, %ymm1
; AVX2-SLOW-NEXT: vextracti128 $1, %ymm1, %xmm2
; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm1 = xmm1[0,2],xmm2[0,2]
; AVX2-SLOW-NEXT: vextracti128 $1, %ymm0, %xmm2
; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm2[0,2]
; AVX2-SLOW-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; AVX2-SLOW-NEXT: vpaddq %ymm2, %ymm0, %ymm0
; AVX2-SLOW-NEXT: vperm2i128 {{.*#+}} ymm2 = ymm0[2,3],ymm1[2,3]
; AVX2-SLOW-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm0 = ymm0[0,2],ymm2[0,2],ymm0[4,6],ymm2[4,6]
; AVX2-SLOW-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
; AVX2-SLOW-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
@ -253,25 +251,21 @@ define <16 x i8> @trunc_add_v16i64_v16i8(<16 x i64> %a0, <16 x i64> %a1) nounwin
;
; AVX2-SLOW-LABEL: trunc_add_v16i64_v16i8:
; AVX2-SLOW: # %bb.0:
; AVX2-SLOW-NEXT: vpaddq %ymm4, %ymm0, %ymm0
; AVX2-SLOW-NEXT: vpaddq %ymm5, %ymm1, %ymm1
; AVX2-SLOW-NEXT: vpaddq %ymm6, %ymm2, %ymm2
; AVX2-SLOW-NEXT: vpaddq %ymm4, %ymm0, %ymm0
; AVX2-SLOW-NEXT: vpaddq %ymm7, %ymm3, %ymm3
; AVX2-SLOW-NEXT: vextracti128 $1, %ymm3, %xmm4
; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm3 = xmm3[0,2],xmm4[0,2]
; AVX2-SLOW-NEXT: vextracti128 $1, %ymm2, %xmm4
; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm2 = xmm2[0,2],xmm4[0,2]
; AVX2-SLOW-NEXT: vinsertf128 $1, %xmm3, %ymm2, %ymm2
; AVX2-SLOW-NEXT: vpaddq %ymm6, %ymm2, %ymm2
; AVX2-SLOW-NEXT: vperm2i128 {{.*#+}} ymm4 = ymm2[2,3],ymm3[2,3]
; AVX2-SLOW-NEXT: vinserti128 $1, %xmm3, %ymm2, %ymm2
; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm2 = ymm2[0,2],ymm4[0,2],ymm2[4,6],ymm4[4,6]
; AVX2-SLOW-NEXT: vmovdqa {{.*#+}} ymm3 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; AVX2-SLOW-NEXT: vpshufb %ymm3, %ymm2, %ymm2
; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm2 = ymm2[0,2,2,3]
; AVX2-SLOW-NEXT: vmovdqa {{.*#+}} xmm4 = [255,255,255,255,255,255,255,255]
; AVX2-SLOW-NEXT: vpand %xmm4, %xmm2, %xmm2
; AVX2-SLOW-NEXT: vextracti128 $1, %ymm1, %xmm5
; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm1 = xmm1[0,2],xmm5[0,2]
; AVX2-SLOW-NEXT: vextracti128 $1, %ymm0, %xmm5
; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm5[0,2]
; AVX2-SLOW-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; AVX2-SLOW-NEXT: vperm2i128 {{.*#+}} ymm5 = ymm0[2,3],ymm1[2,3]
; AVX2-SLOW-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm0 = ymm0[0,2],ymm5[0,2],ymm0[4,6],ymm5[4,6]
; AVX2-SLOW-NEXT: vpshufb %ymm3, %ymm0, %ymm0
; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
; AVX2-SLOW-NEXT: vpand %xmm4, %xmm0, %xmm0
@ -574,11 +568,9 @@ define <8 x i16> @trunc_add_const_v8i64_v8i16(<8 x i64> %a0) nounwind {
;
; AVX2-SLOW-LABEL: trunc_add_const_v8i64_v8i16:
; AVX2-SLOW: # %bb.0:
; AVX2-SLOW-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm1 = xmm1[0,2],xmm2[0,2]
; AVX2-SLOW-NEXT: vextractf128 $1, %ymm0, %xmm2
; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm2[0,2]
; AVX2-SLOW-NEXT: vperm2f128 {{.*#+}} ymm2 = ymm0[2,3],ymm1[2,3]
; AVX2-SLOW-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm0 = ymm0[0,2],ymm2[0,2],ymm0[4,6],ymm2[4,6]
; AVX2-SLOW-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
; AVX2-SLOW-NEXT: vpaddw {{.*}}(%rip), %xmm0, %xmm0
@ -696,21 +688,17 @@ define <16 x i8> @trunc_add_const_v16i64_v16i8(<16 x i64> %a0) nounwind {
;
; AVX2-SLOW-LABEL: trunc_add_const_v16i64_v16i8:
; AVX2-SLOW: # %bb.0:
; AVX2-SLOW-NEXT: vextractf128 $1, %ymm3, %xmm4
; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm3 = xmm3[0,2],xmm4[0,2]
; AVX2-SLOW-NEXT: vextractf128 $1, %ymm2, %xmm4
; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm2 = xmm2[0,2],xmm4[0,2]
; AVX2-SLOW-NEXT: vperm2f128 {{.*#+}} ymm4 = ymm2[2,3],ymm3[2,3]
; AVX2-SLOW-NEXT: vinsertf128 $1, %xmm3, %ymm2, %ymm2
; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm2 = ymm2[0,2],ymm4[0,2],ymm2[4,6],ymm4[4,6]
; AVX2-SLOW-NEXT: vmovdqa {{.*#+}} ymm3 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; AVX2-SLOW-NEXT: vpshufb %ymm3, %ymm2, %ymm2
; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm2 = ymm2[0,2,2,3]
; AVX2-SLOW-NEXT: vmovdqa {{.*#+}} xmm4 = [255,255,255,255,255,255,255,255]
; AVX2-SLOW-NEXT: vpand %xmm4, %xmm2, %xmm2
; AVX2-SLOW-NEXT: vextractf128 $1, %ymm1, %xmm5
; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm1 = xmm1[0,2],xmm5[0,2]
; AVX2-SLOW-NEXT: vextractf128 $1, %ymm0, %xmm5
; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm5[0,2]
; AVX2-SLOW-NEXT: vperm2f128 {{.*#+}} ymm5 = ymm0[2,3],ymm1[2,3]
; AVX2-SLOW-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm0 = ymm0[0,2],ymm5[0,2],ymm0[4,6],ymm5[4,6]
; AVX2-SLOW-NEXT: vpshufb %ymm3, %ymm0, %ymm0
; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
; AVX2-SLOW-NEXT: vpand %xmm4, %xmm0, %xmm0
@ -959,13 +947,11 @@ define <8 x i16> @trunc_sub_v8i64_v8i16(<8 x i64> %a0, <8 x i64> %a1) nounwind {
;
; AVX2-SLOW-LABEL: trunc_sub_v8i64_v8i16:
; AVX2-SLOW: # %bb.0:
; AVX2-SLOW-NEXT: vpsubq %ymm2, %ymm0, %ymm0
; AVX2-SLOW-NEXT: vpsubq %ymm3, %ymm1, %ymm1
; AVX2-SLOW-NEXT: vextracti128 $1, %ymm1, %xmm2
; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm1 = xmm1[0,2],xmm2[0,2]
; AVX2-SLOW-NEXT: vextracti128 $1, %ymm0, %xmm2
; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm2[0,2]
; AVX2-SLOW-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; AVX2-SLOW-NEXT: vpsubq %ymm2, %ymm0, %ymm0
; AVX2-SLOW-NEXT: vperm2i128 {{.*#+}} ymm2 = ymm0[2,3],ymm1[2,3]
; AVX2-SLOW-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm0 = ymm0[0,2],ymm2[0,2],ymm0[4,6],ymm2[4,6]
; AVX2-SLOW-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
; AVX2-SLOW-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
@ -1111,25 +1097,21 @@ define <16 x i8> @trunc_sub_v16i64_v16i8(<16 x i64> %a0, <16 x i64> %a1) nounwin
;
; AVX2-SLOW-LABEL: trunc_sub_v16i64_v16i8:
; AVX2-SLOW: # %bb.0:
; AVX2-SLOW-NEXT: vpsubq %ymm4, %ymm0, %ymm0
; AVX2-SLOW-NEXT: vpsubq %ymm5, %ymm1, %ymm1
; AVX2-SLOW-NEXT: vpsubq %ymm6, %ymm2, %ymm2
; AVX2-SLOW-NEXT: vpsubq %ymm4, %ymm0, %ymm0
; AVX2-SLOW-NEXT: vpsubq %ymm7, %ymm3, %ymm3
; AVX2-SLOW-NEXT: vextracti128 $1, %ymm3, %xmm4
; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm3 = xmm3[0,2],xmm4[0,2]
; AVX2-SLOW-NEXT: vextracti128 $1, %ymm2, %xmm4
; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm2 = xmm2[0,2],xmm4[0,2]
; AVX2-SLOW-NEXT: vinsertf128 $1, %xmm3, %ymm2, %ymm2
; AVX2-SLOW-NEXT: vpsubq %ymm6, %ymm2, %ymm2
; AVX2-SLOW-NEXT: vperm2i128 {{.*#+}} ymm4 = ymm2[2,3],ymm3[2,3]
; AVX2-SLOW-NEXT: vinserti128 $1, %xmm3, %ymm2, %ymm2
; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm2 = ymm2[0,2],ymm4[0,2],ymm2[4,6],ymm4[4,6]
; AVX2-SLOW-NEXT: vmovdqa {{.*#+}} ymm3 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; AVX2-SLOW-NEXT: vpshufb %ymm3, %ymm2, %ymm2
; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm2 = ymm2[0,2,2,3]
; AVX2-SLOW-NEXT: vmovdqa {{.*#+}} xmm4 = [255,255,255,255,255,255,255,255]
; AVX2-SLOW-NEXT: vpand %xmm4, %xmm2, %xmm2
; AVX2-SLOW-NEXT: vextracti128 $1, %ymm1, %xmm5
; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm1 = xmm1[0,2],xmm5[0,2]
; AVX2-SLOW-NEXT: vextracti128 $1, %ymm0, %xmm5
; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm5[0,2]
; AVX2-SLOW-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; AVX2-SLOW-NEXT: vperm2i128 {{.*#+}} ymm5 = ymm0[2,3],ymm1[2,3]
; AVX2-SLOW-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm0 = ymm0[0,2],ymm5[0,2],ymm0[4,6],ymm5[4,6]
; AVX2-SLOW-NEXT: vpshufb %ymm3, %ymm0, %ymm0
; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
; AVX2-SLOW-NEXT: vpand %xmm4, %xmm0, %xmm0
@ -1400,11 +1382,9 @@ define <8 x i16> @trunc_sub_const_v8i64_v8i16(<8 x i64> %a0) nounwind {
;
; AVX2-SLOW-LABEL: trunc_sub_const_v8i64_v8i16:
; AVX2-SLOW: # %bb.0:
; AVX2-SLOW-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm1 = xmm1[0,2],xmm2[0,2]
; AVX2-SLOW-NEXT: vextractf128 $1, %ymm0, %xmm2
; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm2[0,2]
; AVX2-SLOW-NEXT: vperm2f128 {{.*#+}} ymm2 = ymm0[2,3],ymm1[2,3]
; AVX2-SLOW-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm0 = ymm0[0,2],ymm2[0,2],ymm0[4,6],ymm2[4,6]
; AVX2-SLOW-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
; AVX2-SLOW-NEXT: vpsubw {{.*}}(%rip), %xmm0, %xmm0
@ -1522,21 +1502,17 @@ define <16 x i8> @trunc_sub_const_v16i64_v16i8(<16 x i64> %a0) nounwind {
;
; AVX2-SLOW-LABEL: trunc_sub_const_v16i64_v16i8:
; AVX2-SLOW: # %bb.0:
; AVX2-SLOW-NEXT: vextractf128 $1, %ymm3, %xmm4
; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm3 = xmm3[0,2],xmm4[0,2]
; AVX2-SLOW-NEXT: vextractf128 $1, %ymm2, %xmm4
; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm2 = xmm2[0,2],xmm4[0,2]
; AVX2-SLOW-NEXT: vperm2f128 {{.*#+}} ymm4 = ymm2[2,3],ymm3[2,3]
; AVX2-SLOW-NEXT: vinsertf128 $1, %xmm3, %ymm2, %ymm2
; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm2 = ymm2[0,2],ymm4[0,2],ymm2[4,6],ymm4[4,6]
; AVX2-SLOW-NEXT: vmovdqa {{.*#+}} ymm3 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; AVX2-SLOW-NEXT: vpshufb %ymm3, %ymm2, %ymm2
; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm2 = ymm2[0,2,2,3]
; AVX2-SLOW-NEXT: vmovdqa {{.*#+}} xmm4 = [255,255,255,255,255,255,255,255]
; AVX2-SLOW-NEXT: vpand %xmm4, %xmm2, %xmm2
; AVX2-SLOW-NEXT: vextractf128 $1, %ymm1, %xmm5
; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm1 = xmm1[0,2],xmm5[0,2]
; AVX2-SLOW-NEXT: vextractf128 $1, %ymm0, %xmm5
; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm5[0,2]
; AVX2-SLOW-NEXT: vperm2f128 {{.*#+}} ymm5 = ymm0[2,3],ymm1[2,3]
; AVX2-SLOW-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm0 = ymm0[0,2],ymm5[0,2],ymm0[4,6],ymm5[4,6]
; AVX2-SLOW-NEXT: vpshufb %ymm3, %ymm0, %ymm0
; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
; AVX2-SLOW-NEXT: vpand %xmm4, %xmm0, %xmm0
@ -1852,19 +1828,15 @@ define <8 x i16> @trunc_mul_v8i64_v8i16(<8 x i64> %a0, <8 x i64> %a1) nounwind {
;
; AVX2-SLOW-LABEL: trunc_mul_v8i64_v8i16:
; AVX2-SLOW: # %bb.0:
; AVX2-SLOW-NEXT: vextractf128 $1, %ymm3, %xmm4
; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm3 = xmm3[0,2],xmm4[0,2]
; AVX2-SLOW-NEXT: vextractf128 $1, %ymm2, %xmm4
; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm2 = xmm2[0,2],xmm4[0,2]
; AVX2-SLOW-NEXT: vperm2f128 {{.*#+}} ymm4 = ymm2[2,3],ymm3[2,3]
; AVX2-SLOW-NEXT: vinsertf128 $1, %xmm3, %ymm2, %ymm2
; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm2 = ymm2[0,2],ymm4[0,2],ymm2[4,6],ymm4[4,6]
; AVX2-SLOW-NEXT: vmovdqa {{.*#+}} ymm3 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; AVX2-SLOW-NEXT: vpshufb %ymm3, %ymm2, %ymm2
; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm2 = ymm2[0,2,2,3]
; AVX2-SLOW-NEXT: vextractf128 $1, %ymm1, %xmm4
; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm1 = xmm1[0,2],xmm4[0,2]
; AVX2-SLOW-NEXT: vextractf128 $1, %ymm0, %xmm4
; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm4[0,2]
; AVX2-SLOW-NEXT: vperm2f128 {{.*#+}} ymm4 = ymm0[2,3],ymm1[2,3]
; AVX2-SLOW-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm0 = ymm0[0,2],ymm4[0,2],ymm0[4,6],ymm4[4,6]
; AVX2-SLOW-NEXT: vpshufb %ymm3, %ymm0, %ymm0
; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
; AVX2-SLOW-NEXT: vpmullw %xmm2, %xmm0, %xmm0
@ -2426,11 +2398,9 @@ define <8 x i16> @trunc_mul_const_v8i64_v8i16(<8 x i64> %a0) nounwind {
;
; AVX2-SLOW-LABEL: trunc_mul_const_v8i64_v8i16:
; AVX2-SLOW: # %bb.0:
; AVX2-SLOW-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm1 = xmm1[0,2],xmm2[0,2]
; AVX2-SLOW-NEXT: vextractf128 $1, %ymm0, %xmm2
; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm2[0,2]
; AVX2-SLOW-NEXT: vperm2f128 {{.*#+}} ymm2 = ymm0[2,3],ymm1[2,3]
; AVX2-SLOW-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm0 = ymm0[0,2],ymm2[0,2],ymm0[4,6],ymm2[4,6]
; AVX2-SLOW-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
; AVX2-SLOW-NEXT: vpmullw {{.*}}(%rip), %xmm0, %xmm0
@ -2869,13 +2839,11 @@ define <8 x i16> @trunc_and_v8i64_v8i16(<8 x i64> %a0, <8 x i64> %a1) nounwind {
;
; AVX2-SLOW-LABEL: trunc_and_v8i64_v8i16:
; AVX2-SLOW: # %bb.0:
; AVX2-SLOW-NEXT: vandps %ymm2, %ymm0, %ymm0
; AVX2-SLOW-NEXT: vandps %ymm3, %ymm1, %ymm1
; AVX2-SLOW-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm1 = xmm1[0,2],xmm2[0,2]
; AVX2-SLOW-NEXT: vextractf128 $1, %ymm0, %xmm2
; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm2[0,2]
; AVX2-SLOW-NEXT: vandps %ymm2, %ymm0, %ymm0
; AVX2-SLOW-NEXT: vperm2f128 {{.*#+}} ymm2 = ymm0[2,3],ymm1[2,3]
; AVX2-SLOW-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm0 = ymm0[0,2],ymm2[0,2],ymm0[4,6],ymm2[4,6]
; AVX2-SLOW-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
; AVX2-SLOW-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
@ -3007,25 +2975,21 @@ define <16 x i8> @trunc_and_v16i64_v16i8(<16 x i64> %a0, <16 x i64> %a1) nounwin
;
; AVX2-SLOW-LABEL: trunc_and_v16i64_v16i8:
; AVX2-SLOW: # %bb.0:
; AVX2-SLOW-NEXT: vandps %ymm4, %ymm0, %ymm0
; AVX2-SLOW-NEXT: vandps %ymm5, %ymm1, %ymm1
; AVX2-SLOW-NEXT: vandps %ymm6, %ymm2, %ymm2
; AVX2-SLOW-NEXT: vandps %ymm4, %ymm0, %ymm0
; AVX2-SLOW-NEXT: vandps %ymm7, %ymm3, %ymm3
; AVX2-SLOW-NEXT: vextractf128 $1, %ymm3, %xmm4
; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm3 = xmm3[0,2],xmm4[0,2]
; AVX2-SLOW-NEXT: vextractf128 $1, %ymm2, %xmm4
; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm2 = xmm2[0,2],xmm4[0,2]
; AVX2-SLOW-NEXT: vandps %ymm6, %ymm2, %ymm2
; AVX2-SLOW-NEXT: vperm2f128 {{.*#+}} ymm4 = ymm2[2,3],ymm3[2,3]
; AVX2-SLOW-NEXT: vinsertf128 $1, %xmm3, %ymm2, %ymm2
; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm2 = ymm2[0,2],ymm4[0,2],ymm2[4,6],ymm4[4,6]
; AVX2-SLOW-NEXT: vmovdqa {{.*#+}} ymm3 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; AVX2-SLOW-NEXT: vpshufb %ymm3, %ymm2, %ymm2
; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm2 = ymm2[0,2,2,3]
; AVX2-SLOW-NEXT: vmovdqa {{.*#+}} xmm4 = [255,255,255,255,255,255,255,255]
; AVX2-SLOW-NEXT: vpand %xmm4, %xmm2, %xmm2
; AVX2-SLOW-NEXT: vextractf128 $1, %ymm1, %xmm5
; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm1 = xmm1[0,2],xmm5[0,2]
; AVX2-SLOW-NEXT: vextractf128 $1, %ymm0, %xmm5
; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm5[0,2]
; AVX2-SLOW-NEXT: vperm2f128 {{.*#+}} ymm5 = ymm0[2,3],ymm1[2,3]
; AVX2-SLOW-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm0 = ymm0[0,2],ymm5[0,2],ymm0[4,6],ymm5[4,6]
; AVX2-SLOW-NEXT: vpshufb %ymm3, %ymm0, %ymm0
; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
; AVX2-SLOW-NEXT: vpand %xmm4, %xmm0, %xmm0
@ -3269,11 +3233,9 @@ define <8 x i16> @trunc_and_const_v8i64_v8i16(<8 x i64> %a0) nounwind {
;
; AVX2-SLOW-LABEL: trunc_and_const_v8i64_v8i16:
; AVX2-SLOW: # %bb.0:
; AVX2-SLOW-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm1 = xmm1[0,2],xmm2[0,2]
; AVX2-SLOW-NEXT: vextractf128 $1, %ymm0, %xmm2
; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm2[0,2]
; AVX2-SLOW-NEXT: vperm2f128 {{.*#+}} ymm2 = ymm0[2,3],ymm1[2,3]
; AVX2-SLOW-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm0 = ymm0[0,2],ymm2[0,2],ymm0[4,6],ymm2[4,6]
; AVX2-SLOW-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
; AVX2-SLOW-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
@ -3391,21 +3353,17 @@ define <16 x i8> @trunc_and_const_v16i64_v16i8(<16 x i64> %a0) nounwind {
;
; AVX2-SLOW-LABEL: trunc_and_const_v16i64_v16i8:
; AVX2-SLOW: # %bb.0:
; AVX2-SLOW-NEXT: vextractf128 $1, %ymm3, %xmm4
; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm3 = xmm3[0,2],xmm4[0,2]
; AVX2-SLOW-NEXT: vextractf128 $1, %ymm2, %xmm4
; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm2 = xmm2[0,2],xmm4[0,2]
; AVX2-SLOW-NEXT: vperm2f128 {{.*#+}} ymm4 = ymm2[2,3],ymm3[2,3]
; AVX2-SLOW-NEXT: vinsertf128 $1, %xmm3, %ymm2, %ymm2
; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm2 = ymm2[0,2],ymm4[0,2],ymm2[4,6],ymm4[4,6]
; AVX2-SLOW-NEXT: vmovdqa {{.*#+}} ymm3 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; AVX2-SLOW-NEXT: vpshufb %ymm3, %ymm2, %ymm2
; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm2 = ymm2[0,2,2,3]
; AVX2-SLOW-NEXT: vmovdqa {{.*#+}} xmm4 = [255,255,255,255,255,255,255,255]
; AVX2-SLOW-NEXT: vpand %xmm4, %xmm2, %xmm2
; AVX2-SLOW-NEXT: vextractf128 $1, %ymm1, %xmm5
; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm1 = xmm1[0,2],xmm5[0,2]
; AVX2-SLOW-NEXT: vextractf128 $1, %ymm0, %xmm5
; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm5[0,2]
; AVX2-SLOW-NEXT: vperm2f128 {{.*#+}} ymm5 = ymm0[2,3],ymm1[2,3]
; AVX2-SLOW-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm0 = ymm0[0,2],ymm5[0,2],ymm0[4,6],ymm5[4,6]
; AVX2-SLOW-NEXT: vpshufb %ymm3, %ymm0, %ymm0
; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
; AVX2-SLOW-NEXT: vpand %xmm4, %xmm0, %xmm0
@ -3646,13 +3604,11 @@ define <8 x i16> @trunc_xor_v8i64_v8i16(<8 x i64> %a0, <8 x i64> %a1) nounwind {
;
; AVX2-SLOW-LABEL: trunc_xor_v8i64_v8i16:
; AVX2-SLOW: # %bb.0:
; AVX2-SLOW-NEXT: vxorps %ymm2, %ymm0, %ymm0
; AVX2-SLOW-NEXT: vxorps %ymm3, %ymm1, %ymm1
; AVX2-SLOW-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm1 = xmm1[0,2],xmm2[0,2]
; AVX2-SLOW-NEXT: vextractf128 $1, %ymm0, %xmm2
; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm2[0,2]
; AVX2-SLOW-NEXT: vxorps %ymm2, %ymm0, %ymm0
; AVX2-SLOW-NEXT: vperm2f128 {{.*#+}} ymm2 = ymm0[2,3],ymm1[2,3]
; AVX2-SLOW-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm0 = ymm0[0,2],ymm2[0,2],ymm0[4,6],ymm2[4,6]
; AVX2-SLOW-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
; AVX2-SLOW-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
@ -3784,25 +3740,21 @@ define <16 x i8> @trunc_xor_v16i64_v16i8(<16 x i64> %a0, <16 x i64> %a1) nounwin
;
; AVX2-SLOW-LABEL: trunc_xor_v16i64_v16i8:
; AVX2-SLOW: # %bb.0:
; AVX2-SLOW-NEXT: vxorps %ymm4, %ymm0, %ymm0
; AVX2-SLOW-NEXT: vxorps %ymm5, %ymm1, %ymm1
; AVX2-SLOW-NEXT: vxorps %ymm6, %ymm2, %ymm2
; AVX2-SLOW-NEXT: vxorps %ymm4, %ymm0, %ymm0
; AVX2-SLOW-NEXT: vxorps %ymm7, %ymm3, %ymm3
; AVX2-SLOW-NEXT: vextractf128 $1, %ymm3, %xmm4
; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm3 = xmm3[0,2],xmm4[0,2]
; AVX2-SLOW-NEXT: vextractf128 $1, %ymm2, %xmm4
; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm2 = xmm2[0,2],xmm4[0,2]
; AVX2-SLOW-NEXT: vxorps %ymm6, %ymm2, %ymm2
; AVX2-SLOW-NEXT: vperm2f128 {{.*#+}} ymm4 = ymm2[2,3],ymm3[2,3]
; AVX2-SLOW-NEXT: vinsertf128 $1, %xmm3, %ymm2, %ymm2
; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm2 = ymm2[0,2],ymm4[0,2],ymm2[4,6],ymm4[4,6]
; AVX2-SLOW-NEXT: vmovdqa {{.*#+}} ymm3 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; AVX2-SLOW-NEXT: vpshufb %ymm3, %ymm2, %ymm2
; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm2 = ymm2[0,2,2,3]
; AVX2-SLOW-NEXT: vmovdqa {{.*#+}} xmm4 = [255,255,255,255,255,255,255,255]
; AVX2-SLOW-NEXT: vpand %xmm4, %xmm2, %xmm2
; AVX2-SLOW-NEXT: vextractf128 $1, %ymm1, %xmm5
; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm1 = xmm1[0,2],xmm5[0,2]
; AVX2-SLOW-NEXT: vextractf128 $1, %ymm0, %xmm5
; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm5[0,2]
; AVX2-SLOW-NEXT: vperm2f128 {{.*#+}} ymm5 = ymm0[2,3],ymm1[2,3]
; AVX2-SLOW-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm0 = ymm0[0,2],ymm5[0,2],ymm0[4,6],ymm5[4,6]
; AVX2-SLOW-NEXT: vpshufb %ymm3, %ymm0, %ymm0
; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
; AVX2-SLOW-NEXT: vpand %xmm4, %xmm0, %xmm0
@ -4046,11 +3998,9 @@ define <8 x i16> @trunc_xor_const_v8i64_v8i16(<8 x i64> %a0) nounwind {
;
; AVX2-SLOW-LABEL: trunc_xor_const_v8i64_v8i16:
; AVX2-SLOW: # %bb.0:
; AVX2-SLOW-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm1 = xmm1[0,2],xmm2[0,2]
; AVX2-SLOW-NEXT: vextractf128 $1, %ymm0, %xmm2
; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm2[0,2]
; AVX2-SLOW-NEXT: vperm2f128 {{.*#+}} ymm2 = ymm0[2,3],ymm1[2,3]
; AVX2-SLOW-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm0 = ymm0[0,2],ymm2[0,2],ymm0[4,6],ymm2[4,6]
; AVX2-SLOW-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
; AVX2-SLOW-NEXT: vpxor {{.*}}(%rip), %xmm0, %xmm0
@ -4168,21 +4118,17 @@ define <16 x i8> @trunc_xor_const_v16i64_v16i8(<16 x i64> %a0) nounwind {
;
; AVX2-SLOW-LABEL: trunc_xor_const_v16i64_v16i8:
; AVX2-SLOW: # %bb.0:
; AVX2-SLOW-NEXT: vextractf128 $1, %ymm3, %xmm4
; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm3 = xmm3[0,2],xmm4[0,2]
; AVX2-SLOW-NEXT: vextractf128 $1, %ymm2, %xmm4
; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm2 = xmm2[0,2],xmm4[0,2]
; AVX2-SLOW-NEXT: vperm2f128 {{.*#+}} ymm4 = ymm2[2,3],ymm3[2,3]
; AVX2-SLOW-NEXT: vinsertf128 $1, %xmm3, %ymm2, %ymm2
; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm2 = ymm2[0,2],ymm4[0,2],ymm2[4,6],ymm4[4,6]
; AVX2-SLOW-NEXT: vmovdqa {{.*#+}} ymm3 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; AVX2-SLOW-NEXT: vpshufb %ymm3, %ymm2, %ymm2
; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm2 = ymm2[0,2,2,3]
; AVX2-SLOW-NEXT: vmovdqa {{.*#+}} xmm4 = [255,255,255,255,255,255,255,255]
; AVX2-SLOW-NEXT: vpand %xmm4, %xmm2, %xmm2
; AVX2-SLOW-NEXT: vextractf128 $1, %ymm1, %xmm5
; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm1 = xmm1[0,2],xmm5[0,2]
; AVX2-SLOW-NEXT: vextractf128 $1, %ymm0, %xmm5
; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm5[0,2]
; AVX2-SLOW-NEXT: vperm2f128 {{.*#+}} ymm5 = ymm0[2,3],ymm1[2,3]
; AVX2-SLOW-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm0 = ymm0[0,2],ymm5[0,2],ymm0[4,6],ymm5[4,6]
; AVX2-SLOW-NEXT: vpshufb %ymm3, %ymm0, %ymm0
; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
; AVX2-SLOW-NEXT: vpand %xmm4, %xmm0, %xmm0
@ -4423,13 +4369,11 @@ define <8 x i16> @trunc_or_v8i64_v8i16(<8 x i64> %a0, <8 x i64> %a1) nounwind {
;
; AVX2-SLOW-LABEL: trunc_or_v8i64_v8i16:
; AVX2-SLOW: # %bb.0:
; AVX2-SLOW-NEXT: vorps %ymm2, %ymm0, %ymm0
; AVX2-SLOW-NEXT: vorps %ymm3, %ymm1, %ymm1
; AVX2-SLOW-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm1 = xmm1[0,2],xmm2[0,2]
; AVX2-SLOW-NEXT: vextractf128 $1, %ymm0, %xmm2
; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm2[0,2]
; AVX2-SLOW-NEXT: vorps %ymm2, %ymm0, %ymm0
; AVX2-SLOW-NEXT: vperm2f128 {{.*#+}} ymm2 = ymm0[2,3],ymm1[2,3]
; AVX2-SLOW-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm0 = ymm0[0,2],ymm2[0,2],ymm0[4,6],ymm2[4,6]
; AVX2-SLOW-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
; AVX2-SLOW-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
@ -4561,25 +4505,21 @@ define <16 x i8> @trunc_or_v16i64_v16i8(<16 x i64> %a0, <16 x i64> %a1) nounwind
;
; AVX2-SLOW-LABEL: trunc_or_v16i64_v16i8:
; AVX2-SLOW: # %bb.0:
; AVX2-SLOW-NEXT: vorps %ymm4, %ymm0, %ymm0
; AVX2-SLOW-NEXT: vorps %ymm5, %ymm1, %ymm1
; AVX2-SLOW-NEXT: vorps %ymm6, %ymm2, %ymm2
; AVX2-SLOW-NEXT: vorps %ymm4, %ymm0, %ymm0
; AVX2-SLOW-NEXT: vorps %ymm7, %ymm3, %ymm3
; AVX2-SLOW-NEXT: vextractf128 $1, %ymm3, %xmm4
; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm3 = xmm3[0,2],xmm4[0,2]
; AVX2-SLOW-NEXT: vextractf128 $1, %ymm2, %xmm4
; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm2 = xmm2[0,2],xmm4[0,2]
; AVX2-SLOW-NEXT: vorps %ymm6, %ymm2, %ymm2
; AVX2-SLOW-NEXT: vperm2f128 {{.*#+}} ymm4 = ymm2[2,3],ymm3[2,3]
; AVX2-SLOW-NEXT: vinsertf128 $1, %xmm3, %ymm2, %ymm2
; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm2 = ymm2[0,2],ymm4[0,2],ymm2[4,6],ymm4[4,6]
; AVX2-SLOW-NEXT: vmovdqa {{.*#+}} ymm3 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; AVX2-SLOW-NEXT: vpshufb %ymm3, %ymm2, %ymm2
; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm2 = ymm2[0,2,2,3]
; AVX2-SLOW-NEXT: vmovdqa {{.*#+}} xmm4 = [255,255,255,255,255,255,255,255]
; AVX2-SLOW-NEXT: vpand %xmm4, %xmm2, %xmm2
; AVX2-SLOW-NEXT: vextractf128 $1, %ymm1, %xmm5
; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm1 = xmm1[0,2],xmm5[0,2]
; AVX2-SLOW-NEXT: vextractf128 $1, %ymm0, %xmm5
; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm5[0,2]
; AVX2-SLOW-NEXT: vperm2f128 {{.*#+}} ymm5 = ymm0[2,3],ymm1[2,3]
; AVX2-SLOW-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm0 = ymm0[0,2],ymm5[0,2],ymm0[4,6],ymm5[4,6]
; AVX2-SLOW-NEXT: vpshufb %ymm3, %ymm0, %ymm0
; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
; AVX2-SLOW-NEXT: vpand %xmm4, %xmm0, %xmm0
@ -4823,11 +4763,9 @@ define <8 x i16> @trunc_or_const_v8i64_v8i16(<8 x i64> %a0) nounwind {
;
; AVX2-SLOW-LABEL: trunc_or_const_v8i64_v8i16:
; AVX2-SLOW: # %bb.0:
; AVX2-SLOW-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm1 = xmm1[0,2],xmm2[0,2]
; AVX2-SLOW-NEXT: vextractf128 $1, %ymm0, %xmm2
; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm2[0,2]
; AVX2-SLOW-NEXT: vperm2f128 {{.*#+}} ymm2 = ymm0[2,3],ymm1[2,3]
; AVX2-SLOW-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm0 = ymm0[0,2],ymm2[0,2],ymm0[4,6],ymm2[4,6]
; AVX2-SLOW-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
; AVX2-SLOW-NEXT: vpor {{.*}}(%rip), %xmm0, %xmm0
@ -4945,21 +4883,17 @@ define <16 x i8> @trunc_or_const_v16i64_v16i8(<16 x i64> %a0) nounwind {
;
; AVX2-SLOW-LABEL: trunc_or_const_v16i64_v16i8:
; AVX2-SLOW: # %bb.0:
; AVX2-SLOW-NEXT: vextractf128 $1, %ymm3, %xmm4
; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm3 = xmm3[0,2],xmm4[0,2]
; AVX2-SLOW-NEXT: vextractf128 $1, %ymm2, %xmm4
; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm2 = xmm2[0,2],xmm4[0,2]
; AVX2-SLOW-NEXT: vperm2f128 {{.*#+}} ymm4 = ymm2[2,3],ymm3[2,3]
; AVX2-SLOW-NEXT: vinsertf128 $1, %xmm3, %ymm2, %ymm2
; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm2 = ymm2[0,2],ymm4[0,2],ymm2[4,6],ymm4[4,6]
; AVX2-SLOW-NEXT: vmovdqa {{.*#+}} ymm3 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; AVX2-SLOW-NEXT: vpshufb %ymm3, %ymm2, %ymm2
; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm2 = ymm2[0,2,2,3]
; AVX2-SLOW-NEXT: vmovdqa {{.*#+}} xmm4 = [255,255,255,255,255,255,255,255]
; AVX2-SLOW-NEXT: vpand %xmm4, %xmm2, %xmm2
; AVX2-SLOW-NEXT: vextractf128 $1, %ymm1, %xmm5
; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm1 = xmm1[0,2],xmm5[0,2]
; AVX2-SLOW-NEXT: vextractf128 $1, %ymm0, %xmm5
; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm5[0,2]
; AVX2-SLOW-NEXT: vperm2f128 {{.*#+}} ymm5 = ymm0[2,3],ymm1[2,3]
; AVX2-SLOW-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm0 = ymm0[0,2],ymm5[0,2],ymm0[4,6],ymm5[4,6]
; AVX2-SLOW-NEXT: vpshufb %ymm3, %ymm0, %ymm0
; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
; AVX2-SLOW-NEXT: vpand %xmm4, %xmm0, %xmm0

View File

@ -899,25 +899,25 @@ define <8 x i32> @trunc_packus_v8i64_v8i32(<8 x i64>* %p0) "min-legal-vector-wid
; AVX1-NEXT: vmovdqa 48(%rdi), %xmm3
; AVX1-NEXT: vmovdqa {{.*#+}} xmm4 = [4294967295,4294967295]
; AVX1-NEXT: vpcmpgtq %xmm3, %xmm4, %xmm8
; AVX1-NEXT: vpcmpgtq %xmm2, %xmm4, %xmm6
; AVX1-NEXT: vpcmpgtq %xmm1, %xmm4, %xmm7
; AVX1-NEXT: vpcmpgtq %xmm1, %xmm4, %xmm6
; AVX1-NEXT: vpcmpgtq %xmm2, %xmm4, %xmm7
; AVX1-NEXT: vpcmpgtq %xmm0, %xmm4, %xmm5
; AVX1-NEXT: vblendvpd %xmm5, %xmm0, %xmm4, %xmm0
; AVX1-NEXT: vpxor %xmm5, %xmm5, %xmm5
; AVX1-NEXT: vpcmpgtq %xmm5, %xmm0, %xmm9
; AVX1-NEXT: vblendvpd %xmm7, %xmm1, %xmm4, %xmm1
; AVX1-NEXT: vpcmpgtq %xmm5, %xmm1, %xmm7
; AVX1-NEXT: vblendvpd %xmm6, %xmm2, %xmm4, %xmm2
; AVX1-NEXT: vpcmpgtq %xmm5, %xmm2, %xmm6
; AVX1-NEXT: vblendvpd %xmm7, %xmm2, %xmm4, %xmm2
; AVX1-NEXT: vpcmpgtq %xmm5, %xmm2, %xmm7
; AVX1-NEXT: vblendvpd %xmm6, %xmm1, %xmm4, %xmm1
; AVX1-NEXT: vpcmpgtq %xmm5, %xmm1, %xmm6
; AVX1-NEXT: vblendvpd %xmm8, %xmm3, %xmm4, %xmm3
; AVX1-NEXT: vpcmpgtq %xmm5, %xmm3, %xmm4
; AVX1-NEXT: vpand %xmm3, %xmm4, %xmm3
; AVX1-NEXT: vpand %xmm2, %xmm6, %xmm2
; AVX1-NEXT: vshufps {{.*#+}} xmm2 = xmm2[0,2],xmm3[0,2]
; AVX1-NEXT: vpand %xmm1, %xmm7, %xmm1
; AVX1-NEXT: vpand %xmm1, %xmm6, %xmm1
; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm1, %ymm1
; AVX1-NEXT: vpand %xmm2, %xmm7, %xmm2
; AVX1-NEXT: vpand %xmm0, %xmm9, %xmm0
; AVX1-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
; AVX1-NEXT: vshufps {{.*#+}} ymm0 = ymm0[0,2],ymm1[0,2],ymm0[4,6],ymm1[4,6]
; AVX1-NEXT: retq
;
; AVX2-SLOW-LABEL: trunc_packus_v8i64_v8i32:
@ -925,20 +925,18 @@ define <8 x i32> @trunc_packus_v8i64_v8i32(<8 x i64>* %p0) "min-legal-vector-wid
; AVX2-SLOW-NEXT: vmovdqa (%rdi), %ymm0
; AVX2-SLOW-NEXT: vmovdqa 32(%rdi), %ymm1
; AVX2-SLOW-NEXT: vpbroadcastq {{.*#+}} ymm2 = [4294967295,4294967295,4294967295,4294967295]
; AVX2-SLOW-NEXT: vpcmpgtq %ymm1, %ymm2, %ymm3
; AVX2-SLOW-NEXT: vblendvpd %ymm3, %ymm1, %ymm2, %ymm1
; AVX2-SLOW-NEXT: vpcmpgtq %ymm0, %ymm2, %ymm3
; AVX2-SLOW-NEXT: vblendvpd %ymm3, %ymm0, %ymm2, %ymm0
; AVX2-SLOW-NEXT: vpcmpgtq %ymm1, %ymm2, %ymm3
; AVX2-SLOW-NEXT: vblendvpd %ymm3, %ymm1, %ymm2, %ymm1
; AVX2-SLOW-NEXT: vpxor %xmm2, %xmm2, %xmm2
; AVX2-SLOW-NEXT: vpcmpgtq %ymm2, %ymm0, %ymm3
; AVX2-SLOW-NEXT: vpand %ymm0, %ymm3, %ymm0
; AVX2-SLOW-NEXT: vpcmpgtq %ymm2, %ymm1, %ymm2
; AVX2-SLOW-NEXT: vpand %ymm1, %ymm2, %ymm1
; AVX2-SLOW-NEXT: vextracti128 $1, %ymm1, %xmm2
; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm1 = xmm1[0,2],xmm2[0,2]
; AVX2-SLOW-NEXT: vextracti128 $1, %ymm0, %xmm2
; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm2[0,2]
; AVX2-SLOW-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; AVX2-SLOW-NEXT: vpcmpgtq %ymm2, %ymm1, %ymm3
; AVX2-SLOW-NEXT: vpand %ymm1, %ymm3, %ymm1
; AVX2-SLOW-NEXT: vpcmpgtq %ymm2, %ymm0, %ymm2
; AVX2-SLOW-NEXT: vpand %ymm0, %ymm2, %ymm0
; AVX2-SLOW-NEXT: vperm2i128 {{.*#+}} ymm2 = ymm0[2,3],ymm1[2,3]
; AVX2-SLOW-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm0 = ymm0[0,2],ymm2[0,2],ymm0[4,6],ymm2[4,6]
; AVX2-SLOW-NEXT: retq
;
; AVX2-FAST-LABEL: trunc_packus_v8i64_v8i32:

View File

@ -905,25 +905,25 @@ define <8 x i32> @trunc_ssat_v8i64_v8i32(<8 x i64>* %p0) "min-legal-vector-width
; AVX1-NEXT: vmovdqa 48(%rdi), %xmm3
; AVX1-NEXT: vmovdqa {{.*#+}} xmm4 = [2147483647,2147483647]
; AVX1-NEXT: vpcmpgtq %xmm3, %xmm4, %xmm8
; AVX1-NEXT: vpcmpgtq %xmm2, %xmm4, %xmm6
; AVX1-NEXT: vpcmpgtq %xmm1, %xmm4, %xmm7
; AVX1-NEXT: vpcmpgtq %xmm1, %xmm4, %xmm6
; AVX1-NEXT: vpcmpgtq %xmm2, %xmm4, %xmm7
; AVX1-NEXT: vpcmpgtq %xmm0, %xmm4, %xmm5
; AVX1-NEXT: vblendvpd %xmm5, %xmm0, %xmm4, %xmm0
; AVX1-NEXT: vmovdqa {{.*#+}} xmm5 = [18446744071562067968,18446744071562067968]
; AVX1-NEXT: vpcmpgtq %xmm5, %xmm0, %xmm9
; AVX1-NEXT: vblendvpd %xmm7, %xmm1, %xmm4, %xmm1
; AVX1-NEXT: vpcmpgtq %xmm5, %xmm1, %xmm7
; AVX1-NEXT: vblendvpd %xmm6, %xmm2, %xmm4, %xmm2
; AVX1-NEXT: vpcmpgtq %xmm5, %xmm2, %xmm6
; AVX1-NEXT: vblendvpd %xmm7, %xmm2, %xmm4, %xmm2
; AVX1-NEXT: vpcmpgtq %xmm5, %xmm2, %xmm7
; AVX1-NEXT: vblendvpd %xmm6, %xmm1, %xmm4, %xmm1
; AVX1-NEXT: vpcmpgtq %xmm5, %xmm1, %xmm6
; AVX1-NEXT: vblendvpd %xmm8, %xmm3, %xmm4, %xmm3
; AVX1-NEXT: vpcmpgtq %xmm5, %xmm3, %xmm4
; AVX1-NEXT: vblendvpd %xmm4, %xmm3, %xmm5, %xmm3
; AVX1-NEXT: vblendvpd %xmm6, %xmm2, %xmm5, %xmm2
; AVX1-NEXT: vshufps {{.*#+}} xmm2 = xmm2[0,2],xmm3[0,2]
; AVX1-NEXT: vblendvpd %xmm7, %xmm1, %xmm5, %xmm1
; AVX1-NEXT: vblendvpd %xmm6, %xmm1, %xmm5, %xmm1
; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm1, %ymm1
; AVX1-NEXT: vblendvpd %xmm7, %xmm2, %xmm5, %xmm2
; AVX1-NEXT: vblendvpd %xmm9, %xmm0, %xmm5, %xmm0
; AVX1-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
; AVX1-NEXT: vshufps {{.*#+}} ymm0 = ymm0[0,2],ymm1[0,2],ymm0[4,6],ymm1[4,6]
; AVX1-NEXT: retq
;
; AVX2-SLOW-LABEL: trunc_ssat_v8i64_v8i32:
@ -931,20 +931,18 @@ define <8 x i32> @trunc_ssat_v8i64_v8i32(<8 x i64>* %p0) "min-legal-vector-width
; AVX2-SLOW-NEXT: vmovdqa (%rdi), %ymm0
; AVX2-SLOW-NEXT: vmovdqa 32(%rdi), %ymm1
; AVX2-SLOW-NEXT: vpbroadcastq {{.*#+}} ymm2 = [2147483647,2147483647,2147483647,2147483647]
; AVX2-SLOW-NEXT: vpcmpgtq %ymm1, %ymm2, %ymm3
; AVX2-SLOW-NEXT: vblendvpd %ymm3, %ymm1, %ymm2, %ymm1
; AVX2-SLOW-NEXT: vpcmpgtq %ymm0, %ymm2, %ymm3
; AVX2-SLOW-NEXT: vblendvpd %ymm3, %ymm0, %ymm2, %ymm0
; AVX2-SLOW-NEXT: vpcmpgtq %ymm1, %ymm2, %ymm3
; AVX2-SLOW-NEXT: vblendvpd %ymm3, %ymm1, %ymm2, %ymm1
; AVX2-SLOW-NEXT: vpbroadcastq {{.*#+}} ymm2 = [18446744071562067968,18446744071562067968,18446744071562067968,18446744071562067968]
; AVX2-SLOW-NEXT: vpcmpgtq %ymm2, %ymm0, %ymm3
; AVX2-SLOW-NEXT: vblendvpd %ymm3, %ymm0, %ymm2, %ymm0
; AVX2-SLOW-NEXT: vpcmpgtq %ymm2, %ymm1, %ymm3
; AVX2-SLOW-NEXT: vblendvpd %ymm3, %ymm1, %ymm2, %ymm1
; AVX2-SLOW-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm1 = xmm1[0,2],xmm2[0,2]
; AVX2-SLOW-NEXT: vextractf128 $1, %ymm0, %xmm2
; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm2[0,2]
; AVX2-SLOW-NEXT: vpcmpgtq %ymm2, %ymm0, %ymm3
; AVX2-SLOW-NEXT: vblendvpd %ymm3, %ymm0, %ymm2, %ymm0
; AVX2-SLOW-NEXT: vperm2f128 {{.*#+}} ymm2 = ymm0[2,3],ymm1[2,3]
; AVX2-SLOW-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm0 = ymm0[0,2],ymm2[0,2],ymm0[4,6],ymm2[4,6]
; AVX2-SLOW-NEXT: retq
;
; AVX2-FAST-LABEL: trunc_ssat_v8i64_v8i32:

View File

@ -611,20 +611,20 @@ define <8 x i32> @trunc_usat_v8i64_v8i32(<8 x i64>* %p0) {
; AVX1-NEXT: vpxor %xmm4, %xmm0, %xmm5
; AVX1-NEXT: vmovdqa {{.*#+}} xmm6 = [9223372041149743103,9223372041149743103]
; AVX1-NEXT: vpcmpgtq %xmm5, %xmm6, %xmm8
; AVX1-NEXT: vpxor %xmm4, %xmm1, %xmm7
; AVX1-NEXT: vpxor %xmm4, %xmm2, %xmm7
; AVX1-NEXT: vpcmpgtq %xmm7, %xmm6, %xmm7
; AVX1-NEXT: vpxor %xmm4, %xmm2, %xmm5
; AVX1-NEXT: vpxor %xmm4, %xmm1, %xmm5
; AVX1-NEXT: vpcmpgtq %xmm5, %xmm6, %xmm5
; AVX1-NEXT: vpxor %xmm4, %xmm3, %xmm4
; AVX1-NEXT: vpcmpgtq %xmm4, %xmm6, %xmm4
; AVX1-NEXT: vmovapd {{.*#+}} xmm6 = [4294967295,4294967295]
; AVX1-NEXT: vblendvpd %xmm4, %xmm3, %xmm6, %xmm3
; AVX1-NEXT: vblendvpd %xmm5, %xmm2, %xmm6, %xmm2
; AVX1-NEXT: vshufps {{.*#+}} xmm2 = xmm2[0,2],xmm3[0,2]
; AVX1-NEXT: vblendvpd %xmm7, %xmm1, %xmm6, %xmm1
; AVX1-NEXT: vblendvpd %xmm5, %xmm1, %xmm6, %xmm1
; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm1, %ymm1
; AVX1-NEXT: vblendvpd %xmm7, %xmm2, %xmm6, %xmm2
; AVX1-NEXT: vblendvpd %xmm8, %xmm0, %xmm6, %xmm0
; AVX1-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
; AVX1-NEXT: vshufps {{.*#+}} ymm0 = ymm0[0,2],ymm1[0,2],ymm0[4,6],ymm1[4,6]
; AVX1-NEXT: retq
;
; AVX2-SLOW-LABEL: trunc_usat_v8i64_v8i32:
@ -633,18 +633,16 @@ define <8 x i32> @trunc_usat_v8i64_v8i32(<8 x i64>* %p0) {
; AVX2-SLOW-NEXT: vmovdqa 32(%rdi), %ymm1
; AVX2-SLOW-NEXT: vbroadcastsd {{.*#+}} ymm2 = [4294967295,4294967295,4294967295,4294967295]
; AVX2-SLOW-NEXT: vpbroadcastq {{.*#+}} ymm3 = [9223372036854775808,9223372036854775808,9223372036854775808,9223372036854775808]
; AVX2-SLOW-NEXT: vpxor %ymm3, %ymm0, %ymm4
; AVX2-SLOW-NEXT: vpxor %ymm3, %ymm1, %ymm4
; AVX2-SLOW-NEXT: vpbroadcastq {{.*#+}} ymm5 = [9223372041149743103,9223372041149743103,9223372041149743103,9223372041149743103]
; AVX2-SLOW-NEXT: vpcmpgtq %ymm4, %ymm5, %ymm4
; AVX2-SLOW-NEXT: vblendvpd %ymm4, %ymm0, %ymm2, %ymm0
; AVX2-SLOW-NEXT: vpxor %ymm3, %ymm1, %ymm3
; AVX2-SLOW-NEXT: vblendvpd %ymm4, %ymm1, %ymm2, %ymm1
; AVX2-SLOW-NEXT: vpxor %ymm3, %ymm0, %ymm3
; AVX2-SLOW-NEXT: vpcmpgtq %ymm3, %ymm5, %ymm3
; AVX2-SLOW-NEXT: vblendvpd %ymm3, %ymm1, %ymm2, %ymm1
; AVX2-SLOW-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm1 = xmm1[0,2],xmm2[0,2]
; AVX2-SLOW-NEXT: vextractf128 $1, %ymm0, %xmm2
; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm2[0,2]
; AVX2-SLOW-NEXT: vblendvpd %ymm3, %ymm0, %ymm2, %ymm0
; AVX2-SLOW-NEXT: vperm2f128 {{.*#+}} ymm2 = ymm0[2,3],ymm1[2,3]
; AVX2-SLOW-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm0 = ymm0[0,2],ymm2[0,2],ymm0[4,6],ymm2[4,6]
; AVX2-SLOW-NEXT: retq
;
; AVX2-FAST-LABEL: trunc_usat_v8i64_v8i32:

View File

@ -20,20 +20,16 @@ define <8 x i32> @trunc8i64_8i32(<8 x i64> %a) {
;
; AVX1-LABEL: trunc8i64_8i32:
; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX1-NEXT: vshufps {{.*#+}} xmm1 = xmm1[0,2],xmm2[0,2]
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
; AVX1-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm2[0,2]
; AVX1-NEXT: vperm2f128 {{.*#+}} ymm2 = ymm0[2,3],ymm1[2,3]
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; AVX1-NEXT: vshufps {{.*#+}} ymm0 = ymm0[0,2],ymm2[0,2],ymm0[4,6],ymm2[4,6]
; AVX1-NEXT: retq
;
; AVX2-SLOW-LABEL: trunc8i64_8i32:
; AVX2-SLOW: # %bb.0: # %entry
; AVX2-SLOW-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm1 = xmm1[0,2],xmm2[0,2]
; AVX2-SLOW-NEXT: vextractf128 $1, %ymm0, %xmm2
; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm2[0,2]
; AVX2-SLOW-NEXT: vperm2f128 {{.*#+}} ymm2 = ymm0[2,3],ymm1[2,3]
; AVX2-SLOW-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm0 = ymm0[0,2],ymm2[0,2],ymm0[4,6],ymm2[4,6]
; AVX2-SLOW-NEXT: retq
;
; AVX2-FAST-LABEL: trunc8i64_8i32:
@ -63,20 +59,16 @@ define <8 x i32> @trunc8i64_8i32_ashr(<8 x i64> %a) {
;
; AVX1-LABEL: trunc8i64_8i32_ashr:
; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3
; AVX1-NEXT: vshufps {{.*#+}} xmm1 = xmm1[1,3],xmm3[1,3]
; AVX1-NEXT: vshufps {{.*#+}} xmm0 = xmm0[1,3],xmm2[1,3]
; AVX1-NEXT: vperm2f128 {{.*#+}} ymm2 = ymm0[2,3],ymm1[2,3]
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; AVX1-NEXT: vshufps {{.*#+}} ymm0 = ymm0[1,3],ymm2[1,3],ymm0[5,7],ymm2[5,7]
; AVX1-NEXT: retq
;
; AVX2-SLOW-LABEL: trunc8i64_8i32_ashr:
; AVX2-SLOW: # %bb.0: # %entry
; AVX2-SLOW-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm1 = xmm1[1,3],xmm2[1,3]
; AVX2-SLOW-NEXT: vextractf128 $1, %ymm0, %xmm2
; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm0 = xmm0[1,3],xmm2[1,3]
; AVX2-SLOW-NEXT: vperm2f128 {{.*#+}} ymm2 = ymm0[2,3],ymm1[2,3]
; AVX2-SLOW-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm0 = ymm0[1,3],ymm2[1,3],ymm0[5,7],ymm2[5,7]
; AVX2-SLOW-NEXT: retq
;
; AVX2-FAST-LABEL: trunc8i64_8i32_ashr:
@ -108,22 +100,18 @@ define <8 x i32> @trunc8i64_8i32_lshr(<8 x i64> %a) {
;
; AVX1-LABEL: trunc8i64_8i32_lshr:
; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3
; AVX1-NEXT: vshufps {{.*#+}} xmm1 = xmm1[1,3],xmm3[1,3]
; AVX1-NEXT: vshufps {{.*#+}} xmm0 = xmm0[1,3],xmm2[1,3]
; AVX1-NEXT: vperm2f128 {{.*#+}} ymm2 = ymm0[2,3],ymm1[2,3]
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; AVX1-NEXT: vshufps {{.*#+}} ymm0 = ymm0[1,3],ymm2[1,3],ymm0[5,7],ymm2[5,7]
; AVX1-NEXT: retq
;
; AVX2-SLOW-LABEL: trunc8i64_8i32_lshr:
; AVX2-SLOW: # %bb.0: # %entry
; AVX2-SLOW-NEXT: vpsrlq $32, %ymm0, %ymm0
; AVX2-SLOW-NEXT: vpsrlq $32, %ymm1, %ymm1
; AVX2-SLOW-NEXT: vextracti128 $1, %ymm1, %xmm2
; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm1 = xmm1[0,2],xmm2[0,2]
; AVX2-SLOW-NEXT: vextracti128 $1, %ymm0, %xmm2
; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm2[0,2]
; AVX2-SLOW-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; AVX2-SLOW-NEXT: vpsrlq $32, %ymm0, %ymm0
; AVX2-SLOW-NEXT: vperm2i128 {{.*#+}} ymm2 = ymm0[2,3],ymm1[2,3]
; AVX2-SLOW-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm0 = ymm0[0,2],ymm2[0,2],ymm0[4,6],ymm2[4,6]
; AVX2-SLOW-NEXT: retq
;
; AVX2-FAST-LABEL: trunc8i64_8i32_lshr:
@ -205,11 +193,9 @@ define <8 x i16> @trunc8i64_8i16(<8 x i64> %a) {
;
; AVX2-SLOW-LABEL: trunc8i64_8i16:
; AVX2-SLOW: # %bb.0: # %entry
; AVX2-SLOW-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm1 = xmm1[0,2],xmm2[0,2]
; AVX2-SLOW-NEXT: vextractf128 $1, %ymm0, %xmm2
; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm2[0,2]
; AVX2-SLOW-NEXT: vperm2f128 {{.*#+}} ymm2 = ymm0[2,3],ymm1[2,3]
; AVX2-SLOW-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm0 = ymm0[0,2],ymm2[0,2],ymm0[4,6],ymm2[4,6]
; AVX2-SLOW-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
; AVX2-SLOW-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
@ -1361,20 +1347,16 @@ define <8 x i32> @trunc2x4i64_8i32(<4 x i64> %a, <4 x i64> %b) {
;
; AVX1-LABEL: trunc2x4i64_8i32:
; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
; AVX1-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm2[0,2]
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX1-NEXT: vshufps {{.*#+}} xmm1 = xmm1[0,2],xmm2[0,2]
; AVX1-NEXT: vperm2f128 {{.*#+}} ymm2 = ymm0[2,3],ymm1[2,3]
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; AVX1-NEXT: vshufps {{.*#+}} ymm0 = ymm0[0,2],ymm2[0,2],ymm0[4,6],ymm2[4,6]
; AVX1-NEXT: retq
;
; AVX2-SLOW-LABEL: trunc2x4i64_8i32:
; AVX2-SLOW: # %bb.0: # %entry
; AVX2-SLOW-NEXT: vextractf128 $1, %ymm0, %xmm2
; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm2[0,2]
; AVX2-SLOW-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm1 = xmm1[0,2],xmm2[0,2]
; AVX2-SLOW-NEXT: vperm2f128 {{.*#+}} ymm2 = ymm0[2,3],ymm1[2,3]
; AVX2-SLOW-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm0 = ymm0[0,2],ymm2[0,2],ymm0[4,6],ymm2[4,6]
; AVX2-SLOW-NEXT: retq
;
; AVX2-FAST-LABEL: trunc2x4i64_8i32: