forked from OSchip/llvm-project
[X86][AVX] Attempt to fold PACK(SHUFFLE(X,Y),SHUFFLE(X,Y)) -> SHUFFLE(PACK(X,Y)).
Truncations lowered as shuffles of multiple (concatenated) vectors often leave us with lane-crossing shuffles that feed a PACKSS/PACKUS, if both shuffles are fed from the same 2 vector sources, then we can PACK the sources directly and shuffle the result instead. This is currently limited to whole i128 lanes in a 256-bit vector, but we can extend this if the need arises (but I'm not seeing many examples in real world code).
This commit is contained in:
parent
00997d1cad
commit
77133cc1e2
|
@ -41937,6 +41937,37 @@ static SDValue combineVectorPackWithShuffle(SDNode *N, SelectionDAG &DAG) {
|
|||
}
|
||||
}
|
||||
|
||||
// Attempt to fold PACK(SHUFFLE(X,Y),SHUFFLE(X,Y)) -> SHUFFLE(PACK(X,Y)).
|
||||
// TODO: Relax shuffle scaling to support sub-128-bit subvector shuffles.
|
||||
if (VT.is256BitVector()) {
|
||||
if (auto *SVN0 = dyn_cast<ShuffleVectorSDNode>(N0)) {
|
||||
if (auto *SVN1 = dyn_cast<ShuffleVectorSDNode>(N1)) {
|
||||
SmallVector<int, 2> ShuffleMask0, ShuffleMask1;
|
||||
if (scaleShuffleElements(SVN0->getMask(), 2, ShuffleMask0) &&
|
||||
scaleShuffleElements(SVN1->getMask(), 2, ShuffleMask1)) {
|
||||
SDValue Op00 = SVN0->getOperand(0);
|
||||
SDValue Op01 = SVN0->getOperand(1);
|
||||
SDValue Op10 = SVN1->getOperand(0);
|
||||
SDValue Op11 = SVN1->getOperand(1);
|
||||
if ((Op00 == Op11) && (Op01 == Op10)) {
|
||||
std::swap(Op10, Op11);
|
||||
ShuffleVectorSDNode::commuteMask(ShuffleMask1);
|
||||
}
|
||||
if ((Op00 == Op10) && (Op01 == Op11)) {
|
||||
SmallVector<int, 4> ShuffleMask;
|
||||
ShuffleMask.append(ShuffleMask0.begin(), ShuffleMask0.end());
|
||||
ShuffleMask.append(ShuffleMask1.begin(), ShuffleMask1.end());
|
||||
SDLoc DL(N);
|
||||
SDValue Res = DAG.getNode(Opcode, DL, VT, Op00, Op01);
|
||||
Res = DAG.getBitcast(MVT::v4i64, Res);
|
||||
Res = DAG.getVectorShuffle(MVT::v4i64, DL, Res, Res, ShuffleMask);
|
||||
return DAG.getBitcast(VT, Res);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return SDValue();
|
||||
}
|
||||
|
||||
|
|
|
@ -233,9 +233,8 @@ define void @avg_v24i8(<24 x i8>* %a, <24 x i8>* %b) nounwind {
|
|||
; AVX2-NEXT: vpsrld $1, %ymm2, %ymm2
|
||||
; AVX2-NEXT: vpsrld $1, %ymm1, %ymm1
|
||||
; AVX2-NEXT: vpsrld $1, %ymm0, %ymm0
|
||||
; AVX2-NEXT: vperm2i128 {{.*#+}} ymm3 = ymm1[2,3],ymm0[2,3]
|
||||
; AVX2-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0
|
||||
; AVX2-NEXT: vpackusdw %ymm3, %ymm0, %ymm0
|
||||
; AVX2-NEXT: vpackusdw %ymm0, %ymm1, %ymm0
|
||||
; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,1,3]
|
||||
; AVX2-NEXT: vpand {{.*}}(%rip), %ymm0, %ymm0
|
||||
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
|
||||
; AVX2-NEXT: vpackuswb %xmm1, %xmm0, %xmm0
|
||||
|
@ -597,25 +596,22 @@ define void @avg_v48i8(<48 x i8>* %a, <48 x i8>* %b) nounwind {
|
|||
; AVX2-NEXT: vpsubd %ymm6, %ymm5, %ymm5
|
||||
; AVX2-NEXT: vpsrld $1, %ymm5, %ymm5
|
||||
; AVX2-NEXT: vpsrld $1, %ymm4, %ymm4
|
||||
; AVX2-NEXT: vpackusdw %ymm4, %ymm5, %ymm4
|
||||
; AVX2-NEXT: vpsrld $1, %ymm3, %ymm3
|
||||
; AVX2-NEXT: vpsrld $1, %ymm2, %ymm2
|
||||
; AVX2-NEXT: vpackusdw %ymm2, %ymm3, %ymm2
|
||||
; AVX2-NEXT: vpsrld $1, %ymm1, %ymm1
|
||||
; AVX2-NEXT: vpsrld $1, %ymm0, %ymm0
|
||||
; AVX2-NEXT: vperm2i128 {{.*#+}} ymm6 = ymm1[2,3],ymm0[2,3]
|
||||
; AVX2-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0
|
||||
; AVX2-NEXT: vpackusdw %ymm6, %ymm0, %ymm0
|
||||
; AVX2-NEXT: vpackusdw %ymm0, %ymm1, %ymm0
|
||||
; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,1,3]
|
||||
; AVX2-NEXT: vmovdqa {{.*#+}} ymm1 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
|
||||
; AVX2-NEXT: vpand %ymm1, %ymm0, %ymm0
|
||||
; AVX2-NEXT: vperm2i128 {{.*#+}} ymm6 = ymm3[2,3],ymm2[2,3]
|
||||
; AVX2-NEXT: vinserti128 $1, %xmm2, %ymm3, %ymm2
|
||||
; AVX2-NEXT: vpackusdw %ymm6, %ymm2, %ymm2
|
||||
; AVX2-NEXT: vpermq {{.*#+}} ymm2 = ymm2[0,2,1,3]
|
||||
; AVX2-NEXT: vpand %ymm1, %ymm2, %ymm2
|
||||
; AVX2-NEXT: vperm2i128 {{.*#+}} ymm3 = ymm2[2,3],ymm0[2,3]
|
||||
; AVX2-NEXT: vinserti128 $1, %xmm0, %ymm2, %ymm0
|
||||
; AVX2-NEXT: vpackuswb %ymm3, %ymm0, %ymm0
|
||||
; AVX2-NEXT: vperm2i128 {{.*#+}} ymm2 = ymm5[2,3],ymm4[2,3]
|
||||
; AVX2-NEXT: vinserti128 $1, %xmm4, %ymm5, %ymm3
|
||||
; AVX2-NEXT: vpackusdw %ymm2, %ymm3, %ymm2
|
||||
; AVX2-NEXT: vpermq {{.*#+}} ymm2 = ymm4[0,2,1,3]
|
||||
; AVX2-NEXT: vpand %ymm1, %ymm2, %ymm1
|
||||
; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm2
|
||||
; AVX2-NEXT: vpackuswb %xmm2, %xmm1, %xmm1
|
||||
|
|
|
@ -251,15 +251,12 @@ define i32 @v32i16(<32 x i16> %a, <32 x i16> %b, <32 x i16> %c, <32 x i16> %d) {
|
|||
; AVX2: # %bb.0:
|
||||
; AVX2-NEXT: vpcmpgtw %ymm3, %ymm1, %ymm1
|
||||
; AVX2-NEXT: vpcmpgtw %ymm2, %ymm0, %ymm0
|
||||
; AVX2-NEXT: vperm2i128 {{.*#+}} ymm2 = ymm0[2,3],ymm1[2,3]
|
||||
; AVX2-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
|
||||
; AVX2-NEXT: vpacksswb %ymm2, %ymm0, %ymm0
|
||||
; AVX2-NEXT: vpacksswb %ymm1, %ymm0, %ymm0
|
||||
; AVX2-NEXT: vpcmpgtw %ymm7, %ymm5, %ymm1
|
||||
; AVX2-NEXT: vpcmpgtw %ymm6, %ymm4, %ymm2
|
||||
; AVX2-NEXT: vperm2i128 {{.*#+}} ymm3 = ymm2[2,3],ymm1[2,3]
|
||||
; AVX2-NEXT: vinserti128 $1, %xmm1, %ymm2, %ymm1
|
||||
; AVX2-NEXT: vpacksswb %ymm3, %ymm1, %ymm1
|
||||
; AVX2-NEXT: vpacksswb %ymm1, %ymm2, %ymm1
|
||||
; AVX2-NEXT: vpand %ymm1, %ymm0, %ymm0
|
||||
; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,1,3]
|
||||
; AVX2-NEXT: vpmovmskb %ymm0, %eax
|
||||
; AVX2-NEXT: vzeroupper
|
||||
; AVX2-NEXT: retq
|
||||
|
|
|
@ -369,10 +369,8 @@ define <32 x i8> @packsswb_icmp_zero_trunc_256(<16 x i16> %a0) {
|
|||
; AVX2: # %bb.0:
|
||||
; AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1
|
||||
; AVX2-NEXT: vpcmpeqw %ymm1, %ymm0, %ymm0
|
||||
; AVX2-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm0[4,5,6,7]
|
||||
; AVX2-NEXT: vperm2i128 {{.*#+}} ymm0 = zero,zero,ymm0[0,1]
|
||||
; AVX2-NEXT: vpacksswb %ymm1, %ymm0, %ymm0
|
||||
; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,1,3]
|
||||
; AVX2-NEXT: vpacksswb %ymm0, %ymm1, %ymm0
|
||||
; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,0,2,3]
|
||||
; AVX2-NEXT: ret{{[l|q]}}
|
||||
%1 = icmp eq <16 x i16> %a0, zeroinitializer
|
||||
%2 = sext <16 x i1> %1 to <16 x i16>
|
||||
|
|
|
@ -1453,15 +1453,13 @@ define <64 x i1> @test_cmp_v64i16(<64 x i16> %a0, <64 x i16> %a1) nounwind {
|
|||
; AVX2-NEXT: movq %rdi, %rax
|
||||
; AVX2-NEXT: vpcmpgtw %ymm5, %ymm1, %ymm1
|
||||
; AVX2-NEXT: vpcmpgtw %ymm4, %ymm0, %ymm0
|
||||
; AVX2-NEXT: vperm2i128 {{.*#+}} ymm4 = ymm0[2,3],ymm1[2,3]
|
||||
; AVX2-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
|
||||
; AVX2-NEXT: vpacksswb %ymm4, %ymm0, %ymm0
|
||||
; AVX2-NEXT: vpacksswb %ymm1, %ymm0, %ymm0
|
||||
; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,1,3]
|
||||
; AVX2-NEXT: vpmovmskb %ymm0, %ecx
|
||||
; AVX2-NEXT: vpcmpgtw %ymm7, %ymm3, %ymm0
|
||||
; AVX2-NEXT: vpcmpgtw %ymm6, %ymm2, %ymm1
|
||||
; AVX2-NEXT: vperm2i128 {{.*#+}} ymm2 = ymm1[2,3],ymm0[2,3]
|
||||
; AVX2-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0
|
||||
; AVX2-NEXT: vpacksswb %ymm2, %ymm0, %ymm0
|
||||
; AVX2-NEXT: vpacksswb %ymm0, %ymm1, %ymm0
|
||||
; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,1,3]
|
||||
; AVX2-NEXT: vpmovmskb %ymm0, %edx
|
||||
; AVX2-NEXT: shlq $32, %rdx
|
||||
; AVX2-NEXT: orq %rcx, %rdx
|
||||
|
|
|
@ -24,10 +24,7 @@ define <16 x i16> @trunc_concat_packssdw_256(<8 x i32> %a0, <8 x i32> %a1) nounw
|
|||
; AVX2: # %bb.0:
|
||||
; AVX2-NEXT: vpsrad $17, %ymm0, %ymm0
|
||||
; AVX2-NEXT: vpsrad $23, %ymm1, %ymm1
|
||||
; AVX2-NEXT: vperm2i128 {{.*#+}} ymm2 = ymm0[2,3],ymm1[2,3]
|
||||
; AVX2-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
|
||||
; AVX2-NEXT: vpackssdw %ymm2, %ymm0, %ymm0
|
||||
; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,1,3]
|
||||
; AVX2-NEXT: vpackssdw %ymm1, %ymm0, %ymm0
|
||||
; AVX2-NEXT: retq
|
||||
;
|
||||
; AVX512-LABEL: trunc_concat_packssdw_256:
|
||||
|
@ -64,10 +61,7 @@ define <16 x i16> @trunc_concat_packusdw_256(<8 x i32> %a0, <8 x i32> %a1) nounw
|
|||
; AVX2-NEXT: vpsrld $17, %ymm0, %ymm0
|
||||
; AVX2-NEXT: vpbroadcastd {{.*#+}} ymm2 = [15,15,15,15,15,15,15,15]
|
||||
; AVX2-NEXT: vpand %ymm2, %ymm1, %ymm1
|
||||
; AVX2-NEXT: vperm2i128 {{.*#+}} ymm2 = ymm0[2,3],ymm1[2,3]
|
||||
; AVX2-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
|
||||
; AVX2-NEXT: vpackusdw %ymm2, %ymm0, %ymm0
|
||||
; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,1,3]
|
||||
; AVX2-NEXT: vpackusdw %ymm1, %ymm0, %ymm0
|
||||
; AVX2-NEXT: retq
|
||||
;
|
||||
; AVX512-LABEL: trunc_concat_packusdw_256:
|
||||
|
@ -103,10 +97,7 @@ define <32 x i8> @trunc_concat_packsswb_256(<16 x i16> %a0, <16 x i16> %a1) noun
|
|||
; AVX2: # %bb.0:
|
||||
; AVX2-NEXT: vpsraw $15, %ymm0, %ymm0
|
||||
; AVX2-NEXT: vpand {{.*}}(%rip), %ymm1, %ymm1
|
||||
; AVX2-NEXT: vperm2i128 {{.*#+}} ymm2 = ymm0[2,3],ymm1[2,3]
|
||||
; AVX2-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
|
||||
; AVX2-NEXT: vpacksswb %ymm2, %ymm0, %ymm0
|
||||
; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,1,3]
|
||||
; AVX2-NEXT: vpacksswb %ymm1, %ymm0, %ymm0
|
||||
; AVX2-NEXT: retq
|
||||
;
|
||||
; AVX512F-LABEL: trunc_concat_packsswb_256:
|
||||
|
@ -155,10 +146,7 @@ define <32 x i8> @trunc_concat_packuswb_256(<16 x i16> %a0, <16 x i16> %a1) noun
|
|||
; AVX2: # %bb.0:
|
||||
; AVX2-NEXT: vpsrlw $15, %ymm0, %ymm0
|
||||
; AVX2-NEXT: vpand {{.*}}(%rip), %ymm1, %ymm1
|
||||
; AVX2-NEXT: vperm2i128 {{.*#+}} ymm2 = ymm0[2,3],ymm1[2,3]
|
||||
; AVX2-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
|
||||
; AVX2-NEXT: vpackuswb %ymm2, %ymm0, %ymm0
|
||||
; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,1,3]
|
||||
; AVX2-NEXT: vpackuswb %ymm1, %ymm0, %ymm0
|
||||
; AVX2-NEXT: retq
|
||||
;
|
||||
; AVX512F-LABEL: trunc_concat_packuswb_256:
|
||||
|
|
Loading…
Reference in New Issue