X86 Tests: add more pamddwd cases. NFC

Improve coverage of D41811

llvm-svn: 322434
This commit is contained in:
Zvi Rackover 2018-01-13 08:21:29 +00:00
parent 10a02577ea
commit 63f1f322c9
1 changed files with 482 additions and 0 deletions

View File

@ -635,3 +635,485 @@ define <4 x i32> @pmaddwd_negative2(<8 x i16> %A) {
%ret = add <4 x i32> %odd, %even
ret <4 x i32> %ret
}
define <4 x i32> @jumbled_indices4(<8 x i16> %A, <8 x i16> %B) {
; SSE2-LABEL: jumbled_indices4:
; SSE2: # %bb.0:
; SSE2-NEXT: movdqa %xmm0, %xmm2
; SSE2-NEXT: pmulhw %xmm1, %xmm2
; SSE2-NEXT: pmullw %xmm1, %xmm0
; SSE2-NEXT: movdqa %xmm0, %xmm1
; SSE2-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7]
; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3]
; SSE2-NEXT: movdqa %xmm0, %xmm2
; SSE2-NEXT: shufps {{.*#+}} xmm2 = xmm2[3,1],xmm1[1,2]
; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[2,0],xmm1[0,3]
; SSE2-NEXT: paddd %xmm2, %xmm0
; SSE2-NEXT: retq
;
; AVX-LABEL: jumbled_indices4:
; AVX: # %bb.0:
; AVX-NEXT: vpmovsxwd %xmm0, %ymm0
; AVX-NEXT: vpmovsxwd %xmm1, %ymm1
; AVX-NEXT: vpmulld %ymm1, %ymm0, %ymm0
; AVX-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX-NEXT: vshufps {{.*#+}} xmm2 = xmm0[3,1],xmm1[1,2]
; AVX-NEXT: vshufps {{.*#+}} xmm0 = xmm0[2,0],xmm1[0,3]
; AVX-NEXT: vpaddd %xmm0, %xmm2, %xmm0
; AVX-NEXT: vzeroupper
; AVX-NEXT: retq
%exta = sext <8 x i16> %A to <8 x i32>
%extb = sext <8 x i16> %B to <8 x i32>
%m = mul <8 x i32> %exta, %extb
%sa = shufflevector <8 x i32> %m, <8 x i32> undef, <4 x i32> <i32 3, i32 1, i32 5, i32 6>
%sb = shufflevector <8 x i32> %m, <8 x i32> undef, <4 x i32> <i32 2, i32 0, i32 4, i32 7>
%a = add <4 x i32> %sa, %sb
ret <4 x i32> %a
}
define <8 x i32> @jumbled_indices8(<16 x i16> %A, <16 x i16> %B) {
; SSE2-LABEL: jumbled_indices8:
; SSE2: # %bb.0:
; SSE2-NEXT: movdqa %xmm0, %xmm4
; SSE2-NEXT: pmulhw %xmm2, %xmm4
; SSE2-NEXT: pmullw %xmm2, %xmm0
; SSE2-NEXT: movdqa %xmm0, %xmm2
; SSE2-NEXT: punpckhwd {{.*#+}} xmm2 = xmm2[4],xmm4[4],xmm2[5],xmm4[5],xmm2[6],xmm4[6],xmm2[7],xmm4[7]
; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1],xmm0[2],xmm4[2],xmm0[3],xmm4[3]
; SSE2-NEXT: movdqa %xmm1, %xmm4
; SSE2-NEXT: pmulhw %xmm3, %xmm4
; SSE2-NEXT: pmullw %xmm3, %xmm1
; SSE2-NEXT: movdqa %xmm1, %xmm3
; SSE2-NEXT: punpckhwd {{.*#+}} xmm3 = xmm3[4],xmm4[4],xmm3[5],xmm4[5],xmm3[6],xmm4[6],xmm3[7],xmm4[7]
; SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1],xmm1[2],xmm4[2],xmm1[3],xmm4[3]
; SSE2-NEXT: movdqa %xmm1, %xmm4
; SSE2-NEXT: shufps {{.*#+}} xmm4 = xmm4[3,0],xmm3[3,0]
; SSE2-NEXT: movdqa %xmm0, %xmm5
; SSE2-NEXT: shufps {{.*#+}} xmm5 = xmm5[0,2],xmm2[3,0]
; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[2,1],xmm3[2,1]
; SSE2-NEXT: paddd %xmm4, %xmm1
; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,3],xmm2[2,1]
; SSE2-NEXT: paddd %xmm5, %xmm0
; SSE2-NEXT: retq
;
; AVX2-LABEL: jumbled_indices8:
; AVX2: # %bb.0:
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm2
; AVX2-NEXT: vpmovsxwd %xmm2, %ymm2
; AVX2-NEXT: vpmovsxwd %xmm0, %ymm0
; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm3
; AVX2-NEXT: vpmovsxwd %xmm3, %ymm3
; AVX2-NEXT: vpmulld %ymm3, %ymm2, %ymm2
; AVX2-NEXT: vpmovsxwd %xmm1, %ymm1
; AVX2-NEXT: vpmulld %ymm1, %ymm0, %ymm0
; AVX2-NEXT: vpshufd {{.*#+}} ymm1 = ymm2[3,0,2,3,7,4,6,7]
; AVX2-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,1,0,2]
; AVX2-NEXT: vpshufd {{.*#+}} ymm3 = ymm0[0,2,3,0,4,6,7,4]
; AVX2-NEXT: vpermq {{.*#+}} ymm3 = ymm3[0,3,2,3]
; AVX2-NEXT: vpblendd {{.*#+}} ymm1 = ymm3[0,1,2,3],ymm1[4,5,6,7]
; AVX2-NEXT: vpshufd {{.*#+}} ymm2 = ymm2[2,1,2,3,6,5,6,7]
; AVX2-NEXT: vpermq {{.*#+}} ymm2 = ymm2[0,1,0,2]
; AVX2-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[1,3,2,1,5,7,6,5]
; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,3,2,3]
; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm2[4,5,6,7]
; AVX2-NEXT: vpaddd %ymm0, %ymm1, %ymm0
; AVX2-NEXT: retq
;
; AVX512-LABEL: jumbled_indices8:
; AVX512: # %bb.0:
; AVX512-NEXT: vpmovsxwd %ymm0, %zmm0
; AVX512-NEXT: vpmovsxwd %ymm1, %zmm1
; AVX512-NEXT: vpmulld %zmm1, %zmm0, %zmm0
; AVX512-NEXT: vextracti64x4 $1, %zmm0, %ymm1
; AVX512-NEXT: vpshufd {{.*#+}} ymm2 = ymm1[3,0,2,3,7,4,6,7]
; AVX512-NEXT: vpermq {{.*#+}} ymm2 = ymm2[0,1,0,2]
; AVX512-NEXT: vpshufd {{.*#+}} ymm3 = ymm0[0,2,3,0,4,6,7,4]
; AVX512-NEXT: vpermq {{.*#+}} ymm3 = ymm3[0,3,2,3]
; AVX512-NEXT: vpblendd {{.*#+}} ymm2 = ymm3[0,1,2,3],ymm2[4,5,6,7]
; AVX512-NEXT: vpshufd {{.*#+}} ymm1 = ymm1[2,1,2,3,6,5,6,7]
; AVX512-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,1,0,2]
; AVX512-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[1,3,2,1,5,7,6,5]
; AVX512-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,3,2,3]
; AVX512-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
; AVX512-NEXT: vpaddd %ymm0, %ymm2, %ymm0
; AVX512-NEXT: retq
%exta = sext <16 x i16> %A to <16 x i32>
%extb = sext <16 x i16> %B to <16 x i32>
%m = mul <16 x i32> %exta, %extb
%sa = shufflevector <16 x i32> %m, <16 x i32> undef, <8 x i32> <i32 0, i32 2, i32 7, i32 4, i32 11, i32 8, i32 15, i32 12>
%sb = shufflevector <16 x i32> %m, <16 x i32> undef, <8 x i32> <i32 1, i32 3, i32 6, i32 5, i32 10, i32 9, i32 14, i32 13>
%a = add <8 x i32> %sa, %sb
ret <8 x i32> %a
}
define <16 x i32> @jumbled_indices16(<32 x i16> %A, <32 x i16> %B) {
; SSE2-LABEL: jumbled_indices16:
; SSE2: # %bb.0:
; SSE2-NEXT: movdqa %xmm0, %xmm8
; SSE2-NEXT: pmulhw %xmm4, %xmm8
; SSE2-NEXT: pmullw %xmm4, %xmm0
; SSE2-NEXT: movdqa %xmm0, %xmm9
; SSE2-NEXT: punpckhwd {{.*#+}} xmm9 = xmm9[4],xmm8[4],xmm9[5],xmm8[5],xmm9[6],xmm8[6],xmm9[7],xmm8[7]
; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm8[0],xmm0[1],xmm8[1],xmm0[2],xmm8[2],xmm0[3],xmm8[3]
; SSE2-NEXT: movdqa %xmm1, %xmm4
; SSE2-NEXT: pmulhw %xmm5, %xmm4
; SSE2-NEXT: pmullw %xmm5, %xmm1
; SSE2-NEXT: movdqa %xmm1, %xmm8
; SSE2-NEXT: punpckhwd {{.*#+}} xmm8 = xmm8[4],xmm4[4],xmm8[5],xmm4[5],xmm8[6],xmm4[6],xmm8[7],xmm4[7]
; SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1],xmm1[2],xmm4[2],xmm1[3],xmm4[3]
; SSE2-NEXT: movdqa %xmm2, %xmm4
; SSE2-NEXT: pmulhw %xmm6, %xmm4
; SSE2-NEXT: pmullw %xmm6, %xmm2
; SSE2-NEXT: movdqa %xmm2, %xmm6
; SSE2-NEXT: punpckhwd {{.*#+}} xmm6 = xmm6[4],xmm4[4],xmm6[5],xmm4[5],xmm6[6],xmm4[6],xmm6[7],xmm4[7]
; SSE2-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm4[0],xmm2[1],xmm4[1],xmm2[2],xmm4[2],xmm2[3],xmm4[3]
; SSE2-NEXT: movdqa %xmm3, %xmm4
; SSE2-NEXT: pmulhw %xmm7, %xmm4
; SSE2-NEXT: pmullw %xmm7, %xmm3
; SSE2-NEXT: movdqa %xmm3, %xmm7
; SSE2-NEXT: punpckhwd {{.*#+}} xmm7 = xmm7[4],xmm4[4],xmm7[5],xmm4[5],xmm7[6],xmm4[6],xmm7[7],xmm4[7]
; SSE2-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm4[0],xmm3[1],xmm4[1],xmm3[2],xmm4[2],xmm3[3],xmm4[3]
; SSE2-NEXT: movdqa %xmm3, %xmm4
; SSE2-NEXT: shufps {{.*#+}} xmm4 = xmm4[3,0],xmm7[3,1]
; SSE2-NEXT: movdqa %xmm2, %xmm5
; SSE2-NEXT: shufps {{.*#+}} xmm5 = xmm5[1,2],xmm6[0,3]
; SSE2-NEXT: movdqa %xmm1, %xmm10
; SSE2-NEXT: shufps {{.*#+}} xmm10 = xmm10[3,1],xmm8[3,0]
; SSE2-NEXT: movdqa %xmm0, %xmm11
; SSE2-NEXT: shufps {{.*#+}} xmm11 = xmm11[2,0],xmm9[1,2]
; SSE2-NEXT: shufps {{.*#+}} xmm3 = xmm3[2,1],xmm7[2,0]
; SSE2-NEXT: paddd %xmm4, %xmm3
; SSE2-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,3],xmm6[1,2]
; SSE2-NEXT: paddd %xmm5, %xmm2
; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[2,0],xmm8[2,1]
; SSE2-NEXT: paddd %xmm10, %xmm1
; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,1],xmm9[0,3]
; SSE2-NEXT: paddd %xmm11, %xmm0
; SSE2-NEXT: retq
;
; AVX2-LABEL: jumbled_indices16:
; AVX2: # %bb.0:
; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm4
; AVX2-NEXT: vpmovsxwd %xmm4, %ymm4
; AVX2-NEXT: vpmovsxwd %xmm1, %ymm1
; AVX2-NEXT: vpmovsxwd %xmm0, %ymm5
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm0
; AVX2-NEXT: vpmovsxwd %xmm0, %ymm0
; AVX2-NEXT: vextracti128 $1, %ymm3, %xmm6
; AVX2-NEXT: vpmovsxwd %xmm6, %ymm6
; AVX2-NEXT: vpmulld %ymm6, %ymm4, %ymm4
; AVX2-NEXT: vpmovsxwd %xmm3, %ymm3
; AVX2-NEXT: vpmulld %ymm3, %ymm1, %ymm1
; AVX2-NEXT: vpmovsxwd %xmm2, %ymm3
; AVX2-NEXT: vpmulld %ymm3, %ymm5, %ymm3
; AVX2-NEXT: vextracti128 $1, %ymm2, %xmm2
; AVX2-NEXT: vpmovsxwd %xmm2, %ymm2
; AVX2-NEXT: vpmulld %ymm2, %ymm0, %ymm0
; AVX2-NEXT: vpshufd {{.*#+}} ymm2 = ymm4[3,0,3,1,7,4,7,5]
; AVX2-NEXT: vpermq {{.*#+}} ymm2 = ymm2[0,1,0,3]
; AVX2-NEXT: vpshufd {{.*#+}} ymm5 = ymm1[1,2,0,3,5,6,4,7]
; AVX2-NEXT: vpermq {{.*#+}} ymm5 = ymm5[0,3,2,3]
; AVX2-NEXT: vpblendd {{.*#+}} ymm2 = ymm5[0,1,2,3],ymm2[4,5,6,7]
; AVX2-NEXT: vpblendd {{.*#+}} ymm5 = ymm3[0],ymm0[1],ymm3[2],ymm0[3,4],ymm3[5,6],ymm0[7]
; AVX2-NEXT: vmovdqa {{.*#+}} ymm6 = [2,0,5,6,3,1,7,4]
; AVX2-NEXT: vpermd %ymm5, %ymm6, %ymm5
; AVX2-NEXT: vpshufd {{.*#+}} ymm4 = ymm4[2,1,2,0,6,5,6,4]
; AVX2-NEXT: vpermq {{.*#+}} ymm4 = ymm4[0,1,0,3]
; AVX2-NEXT: vpshufd {{.*#+}} ymm1 = ymm1[0,3,1,2,4,7,5,6]
; AVX2-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,3,2,3]
; AVX2-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm4[4,5,6,7]
; AVX2-NEXT: vpaddd %ymm1, %ymm2, %ymm1
; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0],ymm3[1],ymm0[2],ymm3[3,4],ymm0[5,6],ymm3[7]
; AVX2-NEXT: vmovdqa {{.*#+}} ymm2 = [3,1,4,7,2,0,6,5]
; AVX2-NEXT: vpermd %ymm0, %ymm2, %ymm0
; AVX2-NEXT: vpaddd %ymm0, %ymm5, %ymm0
; AVX2-NEXT: retq
;
; AVX512F-LABEL: jumbled_indices16:
; AVX512F: # %bb.0:
; AVX512F-NEXT: vpmovsxwd %ymm0, %zmm0
; AVX512F-NEXT: vpmovsxwd %ymm1, %zmm1
; AVX512F-NEXT: vpmovsxwd %ymm2, %zmm2
; AVX512F-NEXT: vpmulld %zmm2, %zmm0, %zmm0
; AVX512F-NEXT: vpmovsxwd %ymm3, %zmm2
; AVX512F-NEXT: vpmulld %zmm2, %zmm1, %zmm1
; AVX512F-NEXT: vmovdqa32 {{.*#+}} zmm2 = [2,0,5,6,11,9,15,12,17,18,20,23,27,24,31,29]
; AVX512F-NEXT: vpermi2d %zmm1, %zmm0, %zmm2
; AVX512F-NEXT: vmovdqa32 {{.*#+}} zmm3 = [3,1,4,7,10,8,14,13,16,19,21,22,26,25,30,28]
; AVX512F-NEXT: vpermi2d %zmm1, %zmm0, %zmm3
; AVX512F-NEXT: vpaddd %zmm3, %zmm2, %zmm0
; AVX512F-NEXT: retq
;
; AVX512BW-LABEL: jumbled_indices16:
; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpmovsxwd %ymm0, %zmm2
; AVX512BW-NEXT: vextracti64x4 $1, %zmm0, %ymm0
; AVX512BW-NEXT: vpmovsxwd %ymm0, %zmm0
; AVX512BW-NEXT: vpmovsxwd %ymm1, %zmm3
; AVX512BW-NEXT: vpmulld %zmm3, %zmm2, %zmm2
; AVX512BW-NEXT: vextracti64x4 $1, %zmm1, %ymm1
; AVX512BW-NEXT: vpmovsxwd %ymm1, %zmm1
; AVX512BW-NEXT: vpmulld %zmm1, %zmm0, %zmm0
; AVX512BW-NEXT: vmovdqa32 {{.*#+}} zmm1 = [2,0,5,6,11,9,15,12,17,18,20,23,27,24,31,29]
; AVX512BW-NEXT: vpermi2d %zmm0, %zmm2, %zmm1
; AVX512BW-NEXT: vmovdqa32 {{.*#+}} zmm3 = [3,1,4,7,10,8,14,13,16,19,21,22,26,25,30,28]
; AVX512BW-NEXT: vpermi2d %zmm0, %zmm2, %zmm3
; AVX512BW-NEXT: vpaddd %zmm3, %zmm1, %zmm0
; AVX512BW-NEXT: retq
%exta = sext <32 x i16> %A to <32 x i32>
%extb = sext <32 x i16> %B to <32 x i32>
%m = mul <32 x i32> %exta, %extb
%sa = shufflevector <32 x i32> %m, <32 x i32> undef, <16 x i32> <i32 2, i32 0, i32 5, i32 6, i32 11, i32 9, i32 15, i32 12, i32 17, i32 18, i32 20, i32 23, i32 27, i32 24, i32 31, i32 29>
%sb = shufflevector <32 x i32> %m, <32 x i32> undef, <16 x i32> <i32 3, i32 1, i32 4, i32 7, i32 10, i32 8, i32 14, i32 13, i32 16, i32 19, i32 21, i32 22, i32 26, i32 25, i32 30, i32 28>
%a = add <16 x i32> %sa, %sb
ret <16 x i32> %a
}
define <32 x i32> @jumbled_indices32(<64 x i16> %A, <64 x i16> %B) {
; SSE2-LABEL: jumbled_indices32:
; SSE2: # %bb.0:
; SSE2-NEXT: movdqa %xmm7, %xmm8
; SSE2-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm12
; SSE2-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm14
; SSE2-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm13
; SSE2-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm11
; SSE2-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm10
; SSE2-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm9
; SSE2-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm7
; SSE2-NEXT: movdqa %xmm0, %xmm15
; SSE2-NEXT: pmulhw %xmm7, %xmm15
; SSE2-NEXT: pmullw %xmm7, %xmm0
; SSE2-NEXT: movdqa %xmm0, %xmm7
; SSE2-NEXT: punpckhwd {{.*#+}} xmm7 = xmm7[4],xmm15[4],xmm7[5],xmm15[5],xmm7[6],xmm15[6],xmm7[7],xmm15[7]
; SSE2-NEXT: movdqa %xmm7, -{{[0-9]+}}(%rsp) # 16-byte Spill
; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm15[0],xmm0[1],xmm15[1],xmm0[2],xmm15[2],xmm0[3],xmm15[3]
; SSE2-NEXT: movdqa %xmm1, %xmm15
; SSE2-NEXT: pmulhw %xmm9, %xmm15
; SSE2-NEXT: pmullw %xmm9, %xmm1
; SSE2-NEXT: movdqa %xmm1, %xmm9
; SSE2-NEXT: punpckhwd {{.*#+}} xmm9 = xmm9[4],xmm15[4],xmm9[5],xmm15[5],xmm9[6],xmm15[6],xmm9[7],xmm15[7]
; SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm15[0],xmm1[1],xmm15[1],xmm1[2],xmm15[2],xmm1[3],xmm15[3]
; SSE2-NEXT: movdqa %xmm2, %xmm15
; SSE2-NEXT: pmulhw %xmm10, %xmm15
; SSE2-NEXT: pmullw %xmm10, %xmm2
; SSE2-NEXT: movdqa %xmm2, %xmm10
; SSE2-NEXT: punpckhwd {{.*#+}} xmm10 = xmm10[4],xmm15[4],xmm10[5],xmm15[5],xmm10[6],xmm15[6],xmm10[7],xmm15[7]
; SSE2-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm15[0],xmm2[1],xmm15[1],xmm2[2],xmm15[2],xmm2[3],xmm15[3]
; SSE2-NEXT: movdqa %xmm3, %xmm15
; SSE2-NEXT: pmulhw %xmm11, %xmm15
; SSE2-NEXT: pmullw %xmm11, %xmm3
; SSE2-NEXT: movdqa %xmm3, %xmm11
; SSE2-NEXT: punpckhwd {{.*#+}} xmm11 = xmm11[4],xmm15[4],xmm11[5],xmm15[5],xmm11[6],xmm15[6],xmm11[7],xmm15[7]
; SSE2-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm15[0],xmm3[1],xmm15[1],xmm3[2],xmm15[2],xmm3[3],xmm15[3]
; SSE2-NEXT: movdqa %xmm4, %xmm15
; SSE2-NEXT: pmulhw %xmm13, %xmm15
; SSE2-NEXT: pmullw %xmm13, %xmm4
; SSE2-NEXT: movdqa %xmm4, %xmm13
; SSE2-NEXT: punpckhwd {{.*#+}} xmm13 = xmm13[4],xmm15[4],xmm13[5],xmm15[5],xmm13[6],xmm15[6],xmm13[7],xmm15[7]
; SSE2-NEXT: punpcklwd {{.*#+}} xmm4 = xmm4[0],xmm15[0],xmm4[1],xmm15[1],xmm4[2],xmm15[2],xmm4[3],xmm15[3]
; SSE2-NEXT: movdqa %xmm5, %xmm15
; SSE2-NEXT: pmulhw %xmm14, %xmm15
; SSE2-NEXT: pmullw %xmm14, %xmm5
; SSE2-NEXT: movdqa %xmm5, %xmm14
; SSE2-NEXT: punpckhwd {{.*#+}} xmm14 = xmm14[4],xmm15[4],xmm14[5],xmm15[5],xmm14[6],xmm15[6],xmm14[7],xmm15[7]
; SSE2-NEXT: punpcklwd {{.*#+}} xmm5 = xmm5[0],xmm15[0],xmm5[1],xmm15[1],xmm5[2],xmm15[2],xmm5[3],xmm15[3]
; SSE2-NEXT: movdqa %xmm6, %xmm15
; SSE2-NEXT: pmulhw %xmm12, %xmm15
; SSE2-NEXT: pmullw %xmm12, %xmm6
; SSE2-NEXT: movdqa %xmm6, %xmm12
; SSE2-NEXT: punpckhwd {{.*#+}} xmm12 = xmm12[4],xmm15[4],xmm12[5],xmm15[5],xmm12[6],xmm15[6],xmm12[7],xmm15[7]
; SSE2-NEXT: punpcklwd {{.*#+}} xmm6 = xmm6[0],xmm15[0],xmm6[1],xmm15[1],xmm6[2],xmm15[2],xmm6[3],xmm15[3]
; SSE2-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm15
; SSE2-NEXT: movdqa %xmm8, %xmm7
; SSE2-NEXT: pmulhw %xmm15, %xmm7
; SSE2-NEXT: pmullw %xmm15, %xmm8
; SSE2-NEXT: movdqa %xmm8, %xmm15
; SSE2-NEXT: punpckhwd {{.*#+}} xmm15 = xmm15[4],xmm7[4],xmm15[5],xmm7[5],xmm15[6],xmm7[6],xmm15[7],xmm7[7]
; SSE2-NEXT: punpcklwd {{.*#+}} xmm8 = xmm8[0],xmm7[0],xmm8[1],xmm7[1],xmm8[2],xmm7[2],xmm8[3],xmm7[3]
; SSE2-NEXT: movdqa %xmm8, %xmm7
; SSE2-NEXT: shufps {{.*#+}} xmm7 = xmm7[3,0],xmm15[1,3]
; SSE2-NEXT: shufps {{.*#+}} xmm8 = xmm8[2,1],xmm15[0,2]
; SSE2-NEXT: paddd %xmm7, %xmm8
; SSE2-NEXT: movdqa %xmm6, %xmm7
; SSE2-NEXT: shufps {{.*#+}} xmm7 = xmm7[2,0],xmm12[0,2]
; SSE2-NEXT: shufps {{.*#+}} xmm6 = xmm6[3,1],xmm12[1,3]
; SSE2-NEXT: paddd %xmm7, %xmm6
; SSE2-NEXT: movdqa %xmm5, %xmm7
; SSE2-NEXT: shufps {{.*#+}} xmm7 = xmm7[1,3],xmm14[1,3]
; SSE2-NEXT: shufps {{.*#+}} xmm5 = xmm5[0,2],xmm14[0,2]
; SSE2-NEXT: paddd %xmm7, %xmm5
; SSE2-NEXT: movdqa %xmm4, %xmm7
; SSE2-NEXT: shufps {{.*#+}} xmm7 = xmm7[0,2],xmm13[1,2]
; SSE2-NEXT: shufps {{.*#+}} xmm4 = xmm4[1,3],xmm13[0,3]
; SSE2-NEXT: paddd %xmm7, %xmm4
; SSE2-NEXT: movdqa %xmm3, %xmm7
; SSE2-NEXT: shufps {{.*#+}} xmm7 = xmm7[1,3],xmm11[2,0]
; SSE2-NEXT: shufps {{.*#+}} xmm3 = xmm3[0,2],xmm11[3,1]
; SSE2-NEXT: paddd %xmm7, %xmm3
; SSE2-NEXT: movdqa %xmm2, %xmm7
; SSE2-NEXT: shufps {{.*#+}} xmm7 = xmm7[3,1],xmm10[2,0]
; SSE2-NEXT: shufps {{.*#+}} xmm2 = xmm2[2,0],xmm10[3,1]
; SSE2-NEXT: paddd %xmm7, %xmm2
; SSE2-NEXT: movdqa %xmm1, %xmm7
; SSE2-NEXT: shufps {{.*#+}} xmm7 = xmm7[2,0],xmm9[2,0]
; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[3,1],xmm9[3,1]
; SSE2-NEXT: paddd %xmm7, %xmm1
; SSE2-NEXT: movdqa %xmm0, %xmm7
; SSE2-NEXT: movaps -{{[0-9]+}}(%rsp), %xmm9 # 16-byte Reload
; SSE2-NEXT: shufps {{.*#+}} xmm7 = xmm7[1,2],xmm9[2,1]
; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,3],xmm9[3,0]
; SSE2-NEXT: paddd %xmm7, %xmm0
; SSE2-NEXT: movdqa %xmm8, 112(%rdi)
; SSE2-NEXT: movdqa %xmm6, 96(%rdi)
; SSE2-NEXT: movdqa %xmm5, 80(%rdi)
; SSE2-NEXT: movdqa %xmm4, 64(%rdi)
; SSE2-NEXT: movdqa %xmm3, 48(%rdi)
; SSE2-NEXT: movdqa %xmm2, 32(%rdi)
; SSE2-NEXT: movdqa %xmm1, 16(%rdi)
; SSE2-NEXT: movdqa %xmm0, (%rdi)
; SSE2-NEXT: movq %rdi, %rax
; SSE2-NEXT: retq
;
; AVX2-LABEL: jumbled_indices32:
; AVX2: # %bb.0:
; AVX2-NEXT: vextracti128 $1, %ymm3, %xmm8
; AVX2-NEXT: vpmovsxwd %xmm8, %ymm8
; AVX2-NEXT: vpmovsxwd %xmm3, %ymm9
; AVX2-NEXT: vextracti128 $1, %ymm2, %xmm3
; AVX2-NEXT: vpmovsxwd %xmm3, %ymm10
; AVX2-NEXT: vpmovsxwd %xmm2, %ymm11
; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm2
; AVX2-NEXT: vpmovsxwd %xmm2, %ymm12
; AVX2-NEXT: vpmovsxwd %xmm1, %ymm13
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
; AVX2-NEXT: vpmovsxwd %xmm1, %ymm14
; AVX2-NEXT: vpmovsxwd %xmm0, %ymm15
; AVX2-NEXT: vextracti128 $1, %ymm7, %xmm0
; AVX2-NEXT: vpmovsxwd %xmm0, %ymm0
; AVX2-NEXT: vpmulld %ymm0, %ymm8, %ymm3
; AVX2-NEXT: vpmovsxwd %xmm7, %ymm0
; AVX2-NEXT: vpmulld %ymm0, %ymm9, %ymm7
; AVX2-NEXT: vextracti128 $1, %ymm6, %xmm0
; AVX2-NEXT: vpmovsxwd %xmm0, %ymm0
; AVX2-NEXT: vpmulld %ymm0, %ymm10, %ymm2
; AVX2-NEXT: vpmovsxwd %xmm6, %ymm0
; AVX2-NEXT: vpmulld %ymm0, %ymm11, %ymm6
; AVX2-NEXT: vextracti128 $1, %ymm5, %xmm0
; AVX2-NEXT: vpmovsxwd %xmm0, %ymm0
; AVX2-NEXT: vpmulld %ymm0, %ymm12, %ymm1
; AVX2-NEXT: vpmovsxwd %xmm5, %ymm0
; AVX2-NEXT: vpmulld %ymm0, %ymm13, %ymm5
; AVX2-NEXT: vextracti128 $1, %ymm4, %xmm0
; AVX2-NEXT: vpmovsxwd %xmm0, %ymm0
; AVX2-NEXT: vpmulld %ymm0, %ymm14, %ymm0
; AVX2-NEXT: vpmovsxwd %xmm4, %ymm4
; AVX2-NEXT: vpmulld %ymm4, %ymm15, %ymm4
; AVX2-NEXT: vpshufd {{.*#+}} ymm8 = ymm3[3,0,1,3,7,4,5,7]
; AVX2-NEXT: vpermq {{.*#+}} ymm8 = ymm8[0,1,0,3]
; AVX2-NEXT: vpshufd {{.*#+}} ymm9 = ymm7[2,0,0,2,6,4,4,6]
; AVX2-NEXT: vpermq {{.*#+}} ymm9 = ymm9[0,3,2,3]
; AVX2-NEXT: vpblendd {{.*#+}} ymm8 = ymm9[0,1,2,3],ymm8[4,5,6,7]
; AVX2-NEXT: vpshufd {{.*#+}} ymm9 = ymm2[1,3,2,3,5,7,6,7]
; AVX2-NEXT: vpermq {{.*#+}} ymm9 = ymm9[0,1,0,2]
; AVX2-NEXT: vpshufd {{.*#+}} ymm10 = ymm6[0,2,1,2,4,6,5,6]
; AVX2-NEXT: vpermq {{.*#+}} ymm10 = ymm10[0,3,2,3]
; AVX2-NEXT: vpblendd {{.*#+}} ymm9 = ymm10[0,1,2,3],ymm9[4,5,6,7]
; AVX2-NEXT: vpshufd {{.*#+}} ymm10 = ymm1[1,3,2,0,5,7,6,4]
; AVX2-NEXT: vpermq {{.*#+}} ymm10 = ymm10[0,1,0,3]
; AVX2-NEXT: vpshufd {{.*#+}} ymm11 = ymm5[3,1,2,0,7,5,6,4]
; AVX2-NEXT: vpermq {{.*#+}} ymm11 = ymm11[0,3,2,3]
; AVX2-NEXT: vpblendd {{.*#+}} ymm10 = ymm11[0,1,2,3],ymm10[4,5,6,7]
; AVX2-NEXT: vpshufd {{.*#+}} ymm11 = ymm0[2,0,2,3,6,4,6,7]
; AVX2-NEXT: vpermq {{.*#+}} ymm11 = ymm11[0,1,0,2]
; AVX2-NEXT: vpshufd {{.*#+}} ymm12 = ymm4[1,2,2,1,5,6,6,5]
; AVX2-NEXT: vpermq {{.*#+}} ymm12 = ymm12[0,3,2,3]
; AVX2-NEXT: vpblendd {{.*#+}} ymm11 = ymm12[0,1,2,3],ymm11[4,5,6,7]
; AVX2-NEXT: vpshufd {{.*#+}} ymm3 = ymm3[2,1,0,2,6,5,4,6]
; AVX2-NEXT: vpermq {{.*#+}} ymm3 = ymm3[0,1,0,3]
; AVX2-NEXT: vpshufd {{.*#+}} ymm7 = ymm7[3,1,1,3,7,5,5,7]
; AVX2-NEXT: vpermq {{.*#+}} ymm7 = ymm7[0,3,2,3]
; AVX2-NEXT: vpblendd {{.*#+}} ymm3 = ymm7[0,1,2,3],ymm3[4,5,6,7]
; AVX2-NEXT: vpaddd %ymm3, %ymm8, %ymm3
; AVX2-NEXT: vpshufd {{.*#+}} ymm2 = ymm2[0,2,2,3,4,6,6,7]
; AVX2-NEXT: vpermq {{.*#+}} ymm2 = ymm2[0,1,0,2]
; AVX2-NEXT: vpshufd {{.*#+}} ymm6 = ymm6[1,3,0,3,5,7,4,7]
; AVX2-NEXT: vpermq {{.*#+}} ymm6 = ymm6[0,3,2,3]
; AVX2-NEXT: vpblendd {{.*#+}} ymm2 = ymm6[0,1,2,3],ymm2[4,5,6,7]
; AVX2-NEXT: vpaddd %ymm2, %ymm9, %ymm2
; AVX2-NEXT: vpshufd {{.*#+}} ymm1 = ymm1[0,2,3,1,4,6,7,5]
; AVX2-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,1,0,3]
; AVX2-NEXT: vpshufd {{.*#+}} ymm5 = ymm5[2,0,3,1,6,4,7,5]
; AVX2-NEXT: vpermq {{.*#+}} ymm5 = ymm5[0,3,2,3]
; AVX2-NEXT: vpblendd {{.*#+}} ymm1 = ymm5[0,1,2,3],ymm1[4,5,6,7]
; AVX2-NEXT: vpaddd %ymm1, %ymm10, %ymm1
; AVX2-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[3,1,2,3,7,5,6,7]
; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,1,0,2]
; AVX2-NEXT: vpshufd {{.*#+}} ymm4 = ymm4[0,3,3,0,4,7,7,4]
; AVX2-NEXT: vpermq {{.*#+}} ymm4 = ymm4[0,3,2,3]
; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm4[0,1,2,3],ymm0[4,5,6,7]
; AVX2-NEXT: vpaddd %ymm0, %ymm11, %ymm0
; AVX2-NEXT: retq
;
; AVX512F-LABEL: jumbled_indices32:
; AVX512F: # %bb.0:
; AVX512F-NEXT: vpmovsxwd %ymm2, %zmm2
; AVX512F-NEXT: vpmovsxwd %ymm3, %zmm3
; AVX512F-NEXT: vpmovsxwd %ymm0, %zmm0
; AVX512F-NEXT: vpmovsxwd %ymm1, %zmm1
; AVX512F-NEXT: vpmovsxwd %ymm6, %zmm6
; AVX512F-NEXT: vpmulld %zmm6, %zmm2, %zmm2
; AVX512F-NEXT: vpmovsxwd %ymm7, %zmm6
; AVX512F-NEXT: vpmulld %zmm6, %zmm3, %zmm3
; AVX512F-NEXT: vpmovsxwd %ymm4, %zmm4
; AVX512F-NEXT: vpmulld %zmm4, %zmm0, %zmm0
; AVX512F-NEXT: vpmovsxwd %ymm5, %zmm4
; AVX512F-NEXT: vpmulld %zmm4, %zmm1, %zmm4
; AVX512F-NEXT: vmovdqa32 {{.*#+}} zmm1 = [0,2,5,6,9,11,13,15,18,16,20,22,27,24,29,31]
; AVX512F-NEXT: vpermi2d %zmm3, %zmm2, %zmm1
; AVX512F-NEXT: vmovdqa32 {{.*#+}} zmm5 = [1,2,6,5,10,8,14,12,19,17,22,20,25,27,30,28]
; AVX512F-NEXT: vpermi2d %zmm4, %zmm0, %zmm5
; AVX512F-NEXT: vmovdqa32 {{.*#+}} zmm6 = [1,3,4,7,8,10,12,14,19,17,21,23,26,25,28,30]
; AVX512F-NEXT: vpermi2d %zmm3, %zmm2, %zmm6
; AVX512F-NEXT: vpaddd %zmm6, %zmm1, %zmm1
; AVX512F-NEXT: vmovdqa32 {{.*#+}} zmm2 = [0,3,7,4,11,9,15,13,18,16,23,21,24,26,31,29]
; AVX512F-NEXT: vpermi2d %zmm4, %zmm0, %zmm2
; AVX512F-NEXT: vpaddd %zmm2, %zmm5, %zmm0
; AVX512F-NEXT: retq
;
; AVX512BW-LABEL: jumbled_indices32:
; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpmovsxwd %ymm1, %zmm4
; AVX512BW-NEXT: vextracti64x4 $1, %zmm1, %ymm1
; AVX512BW-NEXT: vpmovsxwd %ymm1, %zmm1
; AVX512BW-NEXT: vpmovsxwd %ymm0, %zmm5
; AVX512BW-NEXT: vextracti64x4 $1, %zmm0, %ymm0
; AVX512BW-NEXT: vpmovsxwd %ymm0, %zmm0
; AVX512BW-NEXT: vpmovsxwd %ymm3, %zmm6
; AVX512BW-NEXT: vpmulld %zmm6, %zmm4, %zmm4
; AVX512BW-NEXT: vextracti64x4 $1, %zmm3, %ymm3
; AVX512BW-NEXT: vpmovsxwd %ymm3, %zmm3
; AVX512BW-NEXT: vpmulld %zmm3, %zmm1, %zmm1
; AVX512BW-NEXT: vpmovsxwd %ymm2, %zmm3
; AVX512BW-NEXT: vpmulld %zmm3, %zmm5, %zmm3
; AVX512BW-NEXT: vextracti64x4 $1, %zmm2, %ymm2
; AVX512BW-NEXT: vpmovsxwd %ymm2, %zmm2
; AVX512BW-NEXT: vpmulld %zmm2, %zmm0, %zmm0
; AVX512BW-NEXT: vmovdqa32 {{.*#+}} zmm2 = [0,2,5,6,9,11,13,15,18,16,20,22,27,24,29,31]
; AVX512BW-NEXT: vpermi2d %zmm1, %zmm4, %zmm2
; AVX512BW-NEXT: vmovdqa32 {{.*#+}} zmm5 = [1,2,6,5,10,8,14,12,19,17,22,20,25,27,30,28]
; AVX512BW-NEXT: vpermi2d %zmm0, %zmm3, %zmm5
; AVX512BW-NEXT: vmovdqa32 {{.*#+}} zmm6 = [1,3,4,7,8,10,12,14,19,17,21,23,26,25,28,30]
; AVX512BW-NEXT: vpermi2d %zmm1, %zmm4, %zmm6
; AVX512BW-NEXT: vpaddd %zmm6, %zmm2, %zmm1
; AVX512BW-NEXT: vmovdqa32 {{.*#+}} zmm2 = [0,3,7,4,11,9,15,13,18,16,23,21,24,26,31,29]
; AVX512BW-NEXT: vpermi2d %zmm0, %zmm3, %zmm2
; AVX512BW-NEXT: vpaddd %zmm2, %zmm5, %zmm0
; AVX512BW-NEXT: retq
%exta = sext <64 x i16> %A to <64 x i32>
%extb = sext <64 x i16> %B to <64 x i32>
%m = mul <64 x i32> %exta, %extb
%sa = shufflevector <64 x i32> %m, <64 x i32> undef, <32 x i32> <i32 1, i32 2, i32 6, i32 5, i32 10, i32 8, i32 14, i32 12, i32 19, i32 17, i32 22, i32 20, i32 25, i32 27, i32 30, i32 28, i32 32, i32 34, i32 37, i32 38, i32 41, i32 43, i32 45, i32 47, i32 50, i32 48, i32 52, i32 54, i32 59, i32 56, i32 61, i32 63>
%sb = shufflevector <64 x i32> %m, <64 x i32> undef, <32 x i32> <i32 0, i32 3, i32 7, i32 4, i32 11, i32 9, i32 15, i32 13, i32 18, i32 16, i32 23, i32 21, i32 24, i32 26, i32 31, i32 29, i32 33, i32 35, i32 36, i32 39, i32 40, i32 42, i32 44, i32 46, i32 51, i32 49, i32 53, i32 55, i32 58, i32 57, i32 60, i32 62>
%a = add <32 x i32> %sa, %sb
ret <32 x i32> %a
}