[X86] combineMulToPMADDWD - don't bitcast the source ops before splitting to ensure we split the build vectors early

Fixes a regression on D127115 - splitting was creating extract_subvector(bitcast(build_vector())) patterns which prevented the build vectors being split before being bitcast to vXi16 types, resulting in various issues with further folding of the (now legal) build vectors
This commit is contained in:
Simon Pilgrim 2022-06-10 13:29:04 +01:00
parent 12ccdd67aa
commit 5acbb2dda2
2 changed files with 24 additions and 24 deletions

View File

@ -45709,8 +45709,6 @@ static SDValue combineMulToPMADDWD(SDNode *N, SelectionDAG &DAG,
if (NumElts == 1 || !isPowerOf2_32(NumElts))
return SDValue();
EVT WVT = EVT::getVectorVT(*DAG.getContext(), MVT::i16, 2 * NumElts);
// With AVX512 but without BWI, we would need to split v32i16.
if (32 <= (2 * NumElts) && Subtarget.hasAVX512() && !Subtarget.hasBWI())
return SDValue();
@ -45793,11 +45791,13 @@ static SDValue combineMulToPMADDWD(SDNode *N, SelectionDAG &DAG,
// Use SplitOpsAndApply to handle AVX splitting.
auto PMADDWDBuilder = [](SelectionDAG &DAG, const SDLoc &DL,
ArrayRef<SDValue> Ops) {
MVT OpVT = MVT::getVectorVT(MVT::i32, Ops[0].getValueSizeInBits() / 32);
return DAG.getNode(X86ISD::VPMADDWD, DL, OpVT, Ops);
MVT ResVT = MVT::getVectorVT(MVT::i32, Ops[0].getValueSizeInBits() / 32);
MVT OpVT = MVT::getVectorVT(MVT::i16, Ops[0].getValueSizeInBits() / 16);
return DAG.getNode(X86ISD::VPMADDWD, DL, ResVT,
DAG.getBitcast(OpVT, Ops[0]),
DAG.getBitcast(OpVT, Ops[1]));
};
return SplitOpsAndApply(DAG, Subtarget, SDLoc(N), VT,
{ DAG.getBitcast(WVT, N0), DAG.getBitcast(WVT, N1) },
return SplitOpsAndApply(DAG, Subtarget, SDLoc(N), VT, {N0, N1},
PMADDWDBuilder);
}

View File

@ -101,7 +101,7 @@ define <4 x i32> @test_mul_v4i32_v4i8(<4 x i8> %A) {
define <8 x i32> @test_mul_v8i32_v8i8(<8 x i8> %A) {
; SLM-LABEL: test_mul_v8i32_v8i8:
; SLM: # %bb.0:
; SLM-NEXT: movdqa {{.*#+}} xmm2 = <18778,u,18778,u,18778,u,18778,u>
; SLM-NEXT: movdqa {{.*#+}} xmm2 = [18778,0,18778,0,18778,0,18778,0]
; SLM-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
; SLM-NEXT: pmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
; SLM-NEXT: pmovzxbd {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero
@ -114,7 +114,7 @@ define <8 x i32> @test_mul_v8i32_v8i8(<8 x i8> %A) {
; SLOW-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
; SLOW-NEXT: pmovzxbd {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero
; SLOW-NEXT: pmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
; SLOW-NEXT: movdqa {{.*#+}} xmm2 = <18778,u,18778,u,18778,u,18778,u>
; SLOW-NEXT: movdqa {{.*#+}} xmm2 = [18778,0,18778,0,18778,0,18778,0]
; SLOW-NEXT: pmaddwd %xmm2, %xmm0
; SLOW-NEXT: pmaddwd %xmm2, %xmm1
; SLOW-NEXT: ret{{[l|q]}}
@ -124,7 +124,7 @@ define <8 x i32> @test_mul_v8i32_v8i8(<8 x i8> %A) {
; SSE4-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
; SSE4-NEXT: pmovzxbd {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero
; SSE4-NEXT: pmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
; SSE4-NEXT: movdqa {{.*#+}} xmm2 = <18778,u,18778,u,18778,u,18778,u>
; SSE4-NEXT: movdqa {{.*#+}} xmm2 = [18778,0,18778,0,18778,0,18778,0]
; SSE4-NEXT: pmaddwd %xmm2, %xmm0
; SSE4-NEXT: pmaddwd %xmm2, %xmm1
; SSE4-NEXT: ret{{[l|q]}}
@ -199,7 +199,7 @@ define <16 x i32> @test_mul_v16i32_v16i8(<16 x i8> %A) {
; SLM-LABEL: test_mul_v16i32_v16i8:
; SLM: # %bb.0:
; SLM-NEXT: pshufd {{.*#+}} xmm1 = xmm0[3,3,3,3]
; SLM-NEXT: movdqa {{.*#+}} xmm5 = <18778,u,18778,u,18778,u,18778,u>
; SLM-NEXT: movdqa {{.*#+}} xmm5 = [18778,0,18778,0,18778,0,18778,0]
; SLM-NEXT: pshufd {{.*#+}} xmm4 = xmm0[1,1,1,1]
; SLM-NEXT: pmovzxbd {{.*#+}} xmm3 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero
; SLM-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
@ -221,7 +221,7 @@ define <16 x i32> @test_mul_v16i32_v16i8(<16 x i8> %A) {
; SLOW-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
; SLOW-NEXT: pmovzxbd {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero
; SLOW-NEXT: pmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
; SLOW-NEXT: movdqa {{.*#+}} xmm4 = <18778,u,18778,u,18778,u,18778,u>
; SLOW-NEXT: movdqa {{.*#+}} xmm4 = [18778,0,18778,0,18778,0,18778,0]
; SLOW-NEXT: pmaddwd %xmm4, %xmm0
; SLOW-NEXT: pmaddwd %xmm4, %xmm1
; SLOW-NEXT: pmaddwd %xmm4, %xmm2
@ -237,7 +237,7 @@ define <16 x i32> @test_mul_v16i32_v16i8(<16 x i8> %A) {
; SSE4-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
; SSE4-NEXT: pmovzxbd {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero
; SSE4-NEXT: pmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
; SSE4-NEXT: movdqa {{.*#+}} xmm4 = <18778,u,18778,u,18778,u,18778,u>
; SSE4-NEXT: movdqa {{.*#+}} xmm4 = [18778,0,18778,0,18778,0,18778,0]
; SSE4-NEXT: pmaddwd %xmm4, %xmm0
; SSE4-NEXT: pmaddwd %xmm4, %xmm1
; SSE4-NEXT: pmaddwd %xmm4, %xmm2
@ -249,7 +249,7 @@ define <16 x i32> @test_mul_v16i32_v16i8(<16 x i8> %A) {
; AVX2-SLOW-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
; AVX2-SLOW-NEXT: vpmovzxbd {{.*#+}} ymm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero,xmm1[4],zero,zero,zero,xmm1[5],zero,zero,zero,xmm1[6],zero,zero,zero,xmm1[7],zero,zero,zero
; AVX2-SLOW-NEXT: vpmovzxbd {{.*#+}} ymm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero
; AVX2-SLOW-NEXT: vmovdqa {{.*#+}} ymm2 = <18778,u,18778,u,18778,u,18778,u,18778,u,18778,u,18778,u,18778,u>
; AVX2-SLOW-NEXT: vpbroadcastd {{.*#+}} ymm2 = [18778,18778,18778,18778,18778,18778,18778,18778]
; AVX2-SLOW-NEXT: vpmaddwd %ymm2, %ymm0, %ymm0
; AVX2-SLOW-NEXT: vpmaddwd %ymm2, %ymm1, %ymm1
; AVX2-SLOW-NEXT: ret{{[l|q]}}
@ -259,7 +259,7 @@ define <16 x i32> @test_mul_v16i32_v16i8(<16 x i8> %A) {
; AVX2-32-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
; AVX2-32-NEXT: vpmovzxbd {{.*#+}} ymm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero,xmm1[4],zero,zero,zero,xmm1[5],zero,zero,zero,xmm1[6],zero,zero,zero,xmm1[7],zero,zero,zero
; AVX2-32-NEXT: vpmovzxbd {{.*#+}} ymm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero
; AVX2-32-NEXT: vmovdqa {{.*#+}} ymm2 = <18778,u,18778,u,18778,u,18778,u,18778,u,18778,u,18778,u,18778,u>
; AVX2-32-NEXT: vpbroadcastd {{.*#+}} ymm2 = [18778,18778,18778,18778,18778,18778,18778,18778]
; AVX2-32-NEXT: vpmaddwd %ymm2, %ymm0, %ymm0
; AVX2-32-NEXT: vpmaddwd %ymm2, %ymm1, %ymm1
; AVX2-32-NEXT: retl
@ -269,7 +269,7 @@ define <16 x i32> @test_mul_v16i32_v16i8(<16 x i8> %A) {
; AVX2-64-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
; AVX2-64-NEXT: vpmovzxbd {{.*#+}} ymm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero,xmm1[4],zero,zero,zero,xmm1[5],zero,zero,zero,xmm1[6],zero,zero,zero,xmm1[7],zero,zero,zero
; AVX2-64-NEXT: vpmovzxbd {{.*#+}} ymm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero
; AVX2-64-NEXT: vmovdqa {{.*#+}} ymm2 = <18778,u,18778,u,18778,u,18778,u,18778,u,18778,u,18778,u,18778,u>
; AVX2-64-NEXT: vpbroadcastd {{.*#+}} ymm2 = [18778,18778,18778,18778,18778,18778,18778,18778]
; AVX2-64-NEXT: vpmaddwd %ymm2, %ymm0, %ymm0
; AVX2-64-NEXT: vpmaddwd %ymm2, %ymm1, %ymm1
; AVX2-64-NEXT: retq
@ -625,7 +625,7 @@ define <4 x i32> @test_mul_v4i32_v4i8_minsize(<4 x i8> %A) minsize {
define <8 x i32> @test_mul_v8i32_v8i8_minsize(<8 x i8> %A) minsize {
; SLM-LABEL: test_mul_v8i32_v8i8_minsize:
; SLM: # %bb.0:
; SLM-NEXT: movdqa {{.*#+}} xmm2 = <18778,u,18778,u,18778,u,18778,u>
; SLM-NEXT: movdqa {{.*#+}} xmm2 = [18778,0,18778,0,18778,0,18778,0]
; SLM-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
; SLM-NEXT: pmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
; SLM-NEXT: pmovzxbd {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero
@ -638,7 +638,7 @@ define <8 x i32> @test_mul_v8i32_v8i8_minsize(<8 x i8> %A) minsize {
; SLOW-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
; SLOW-NEXT: pmovzxbd {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero
; SLOW-NEXT: pmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
; SLOW-NEXT: movdqa {{.*#+}} xmm2 = <18778,u,18778,u,18778,u,18778,u>
; SLOW-NEXT: movdqa {{.*#+}} xmm2 = [18778,0,18778,0,18778,0,18778,0]
; SLOW-NEXT: pmaddwd %xmm2, %xmm0
; SLOW-NEXT: pmaddwd %xmm2, %xmm1
; SLOW-NEXT: ret{{[l|q]}}
@ -648,7 +648,7 @@ define <8 x i32> @test_mul_v8i32_v8i8_minsize(<8 x i8> %A) minsize {
; SSE4-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
; SSE4-NEXT: pmovzxbd {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero
; SSE4-NEXT: pmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
; SSE4-NEXT: movdqa {{.*#+}} xmm2 = <18778,u,18778,u,18778,u,18778,u>
; SSE4-NEXT: movdqa {{.*#+}} xmm2 = [18778,0,18778,0,18778,0,18778,0]
; SSE4-NEXT: pmaddwd %xmm2, %xmm0
; SSE4-NEXT: pmaddwd %xmm2, %xmm1
; SSE4-NEXT: ret{{[l|q]}}
@ -723,7 +723,7 @@ define <16 x i32> @test_mul_v16i32_v16i8_minsize(<16 x i8> %A) minsize {
; SLM-LABEL: test_mul_v16i32_v16i8_minsize:
; SLM: # %bb.0:
; SLM-NEXT: pshufd {{.*#+}} xmm1 = xmm0[3,3,3,3]
; SLM-NEXT: movdqa {{.*#+}} xmm5 = <18778,u,18778,u,18778,u,18778,u>
; SLM-NEXT: movdqa {{.*#+}} xmm5 = [18778,0,18778,0,18778,0,18778,0]
; SLM-NEXT: pshufd {{.*#+}} xmm4 = xmm0[1,1,1,1]
; SLM-NEXT: pmovzxbd {{.*#+}} xmm3 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero
; SLM-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
@ -745,7 +745,7 @@ define <16 x i32> @test_mul_v16i32_v16i8_minsize(<16 x i8> %A) minsize {
; SLOW-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
; SLOW-NEXT: pmovzxbd {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero
; SLOW-NEXT: pmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
; SLOW-NEXT: movdqa {{.*#+}} xmm4 = <18778,u,18778,u,18778,u,18778,u>
; SLOW-NEXT: movdqa {{.*#+}} xmm4 = [18778,0,18778,0,18778,0,18778,0]
; SLOW-NEXT: pmaddwd %xmm4, %xmm0
; SLOW-NEXT: pmaddwd %xmm4, %xmm1
; SLOW-NEXT: pmaddwd %xmm4, %xmm2
@ -761,7 +761,7 @@ define <16 x i32> @test_mul_v16i32_v16i8_minsize(<16 x i8> %A) minsize {
; SSE4-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
; SSE4-NEXT: pmovzxbd {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero
; SSE4-NEXT: pmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
; SSE4-NEXT: movdqa {{.*#+}} xmm4 = <18778,u,18778,u,18778,u,18778,u>
; SSE4-NEXT: movdqa {{.*#+}} xmm4 = [18778,0,18778,0,18778,0,18778,0]
; SSE4-NEXT: pmaddwd %xmm4, %xmm0
; SSE4-NEXT: pmaddwd %xmm4, %xmm1
; SSE4-NEXT: pmaddwd %xmm4, %xmm2
@ -773,7 +773,7 @@ define <16 x i32> @test_mul_v16i32_v16i8_minsize(<16 x i8> %A) minsize {
; AVX2-SLOW-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
; AVX2-SLOW-NEXT: vpmovzxbd {{.*#+}} ymm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero,xmm1[4],zero,zero,zero,xmm1[5],zero,zero,zero,xmm1[6],zero,zero,zero,xmm1[7],zero,zero,zero
; AVX2-SLOW-NEXT: vpmovzxbd {{.*#+}} ymm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero
; AVX2-SLOW-NEXT: vpbroadcastw {{.*#+}} ymm2 = [18778,18778,18778,18778,18778,18778,18778,18778,18778,18778,18778,18778,18778,18778,18778,18778]
; AVX2-SLOW-NEXT: vpbroadcastd {{.*#+}} ymm2 = [18778,18778,18778,18778,18778,18778,18778,18778]
; AVX2-SLOW-NEXT: vpmaddwd %ymm2, %ymm0, %ymm0
; AVX2-SLOW-NEXT: vpmaddwd %ymm2, %ymm1, %ymm1
; AVX2-SLOW-NEXT: ret{{[l|q]}}
@ -783,7 +783,7 @@ define <16 x i32> @test_mul_v16i32_v16i8_minsize(<16 x i8> %A) minsize {
; AVX2-32-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
; AVX2-32-NEXT: vpmovzxbd {{.*#+}} ymm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero,xmm1[4],zero,zero,zero,xmm1[5],zero,zero,zero,xmm1[6],zero,zero,zero,xmm1[7],zero,zero,zero
; AVX2-32-NEXT: vpmovzxbd {{.*#+}} ymm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero
; AVX2-32-NEXT: vpbroadcastw {{.*#+}} ymm2 = [18778,18778,18778,18778,18778,18778,18778,18778,18778,18778,18778,18778,18778,18778,18778,18778]
; AVX2-32-NEXT: vpbroadcastd {{.*#+}} ymm2 = [18778,18778,18778,18778,18778,18778,18778,18778]
; AVX2-32-NEXT: vpmaddwd %ymm2, %ymm0, %ymm0
; AVX2-32-NEXT: vpmaddwd %ymm2, %ymm1, %ymm1
; AVX2-32-NEXT: retl
@ -793,7 +793,7 @@ define <16 x i32> @test_mul_v16i32_v16i8_minsize(<16 x i8> %A) minsize {
; AVX2-64-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,2,3]
; AVX2-64-NEXT: vpmovzxbd {{.*#+}} ymm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero,xmm1[4],zero,zero,zero,xmm1[5],zero,zero,zero,xmm1[6],zero,zero,zero,xmm1[7],zero,zero,zero
; AVX2-64-NEXT: vpmovzxbd {{.*#+}} ymm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero
; AVX2-64-NEXT: vpbroadcastw {{.*#+}} ymm2 = [18778,18778,18778,18778,18778,18778,18778,18778,18778,18778,18778,18778,18778,18778,18778,18778]
; AVX2-64-NEXT: vpbroadcastd {{.*#+}} ymm2 = [18778,18778,18778,18778,18778,18778,18778,18778]
; AVX2-64-NEXT: vpmaddwd %ymm2, %ymm0, %ymm0
; AVX2-64-NEXT: vpmaddwd %ymm2, %ymm1, %ymm1
; AVX2-64-NEXT: retq