forked from OSchip/llvm-project
[x86, dag] Teach the DAG combiner to prune inputs toa vector_shuffle
that are unused. This allows the combiner to delete math feeding shuffles where the math isn't actually necessary. This improves some of the vperm2x128 tests that regressed when the vector shuffle lowering started actually generating vperm instructions rather than forcibly decomposing them. Sadly, this isn't enough to get this *really* right because we still form a completely unnecessary permutation. To fix that, we also need to fold shuffles which just rearrange concatenated or inserted subvectors. llvm-svn: 219086
This commit is contained in:
parent
60b8662ea7
commit
daa1ff985c
|
@ -17,6 +17,7 @@
|
|||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#include "llvm/CodeGen/SelectionDAG.h"
|
||||
#include "llvm/ADT/SmallBitVector.h"
|
||||
#include "llvm/ADT/SmallPtrSet.h"
|
||||
#include "llvm/ADT/SetVector.h"
|
||||
#include "llvm/ADT/Statistic.h"
|
||||
|
@ -10708,6 +10709,92 @@ SDValue DAGCombiner::visitEXTRACT_SUBVECTOR(SDNode* N) {
|
|||
return SDValue();
|
||||
}
|
||||
|
||||
static SDValue simplifyShuffleOperandRecursively(SmallBitVector &UsedElements,
|
||||
SDValue V, SelectionDAG &DAG) {
|
||||
SDLoc DL(V);
|
||||
EVT VT = V.getValueType();
|
||||
|
||||
switch (V.getOpcode()) {
|
||||
default:
|
||||
return V;
|
||||
|
||||
case ISD::CONCAT_VECTORS: {
|
||||
EVT OpVT = V->getOperand(0).getValueType();
|
||||
int OpSize = OpVT.getVectorNumElements();
|
||||
SmallBitVector OpUsedElements(OpSize, false);
|
||||
bool FoundSimplification = false;
|
||||
SmallVector<SDValue, 4> NewOps;
|
||||
NewOps.reserve(V->getNumOperands());
|
||||
for (int i = 0, NumOps = V->getNumOperands(); i < NumOps; ++i) {
|
||||
SDValue Op = V->getOperand(i);
|
||||
bool OpUsed = false;
|
||||
for (int j = 0; j < OpSize; ++j)
|
||||
if (UsedElements[i * OpSize + j]) {
|
||||
OpUsedElements[j] = true;
|
||||
OpUsed = true;
|
||||
}
|
||||
NewOps.push_back(
|
||||
OpUsed ? simplifyShuffleOperandRecursively(OpUsedElements, Op, DAG)
|
||||
: DAG.getUNDEF(OpVT));
|
||||
FoundSimplification |= Op == NewOps.back();
|
||||
OpUsedElements.reset();
|
||||
}
|
||||
if (FoundSimplification)
|
||||
V = DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, NewOps);
|
||||
return V;
|
||||
}
|
||||
|
||||
case ISD::INSERT_SUBVECTOR: {
|
||||
SDValue BaseV = V->getOperand(0);
|
||||
SDValue SubV = V->getOperand(1);
|
||||
auto *IdxN = dyn_cast<ConstantSDNode>(V->getOperand(2));
|
||||
if (!IdxN)
|
||||
return V;
|
||||
|
||||
int SubSize = SubV.getValueType().getVectorNumElements();
|
||||
int Idx = IdxN->getZExtValue();
|
||||
bool SubVectorUsed = false;
|
||||
SmallBitVector SubUsedElements(SubSize, false);
|
||||
for (int i = 0; i < SubSize; ++i)
|
||||
if (UsedElements[i + Idx]) {
|
||||
SubVectorUsed = true;
|
||||
SubUsedElements[i] = true;
|
||||
UsedElements[i + Idx] = false;
|
||||
}
|
||||
|
||||
// Now recurse on both the base and sub vectors.
|
||||
SDValue SimplifiedSubV =
|
||||
SubVectorUsed
|
||||
? simplifyShuffleOperandRecursively(SubUsedElements, SubV, DAG)
|
||||
: DAG.getUNDEF(SubV.getValueType());
|
||||
SDValue SimplifiedBaseV = simplifyShuffleOperandRecursively(UsedElements, BaseV, DAG);
|
||||
if (SimplifiedSubV != SubV || SimplifiedBaseV != BaseV)
|
||||
V = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT,
|
||||
SimplifiedBaseV, SimplifiedSubV, V->getOperand(2));
|
||||
return V;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static SDValue simplifyShuffleOperands(ShuffleVectorSDNode *SVN, SDValue N0,
|
||||
SDValue N1, SelectionDAG &DAG) {
|
||||
EVT VT = SVN->getValueType(0);
|
||||
int NumElts = VT.getVectorNumElements();
|
||||
SmallBitVector N0UsedElements(NumElts, false), N1UsedElements(NumElts, false);
|
||||
for (int M : SVN->getMask())
|
||||
if (M >= 0 && M < NumElts)
|
||||
N0UsedElements[M] = true;
|
||||
else if (M >= NumElts)
|
||||
N1UsedElements[M - NumElts] = true;
|
||||
|
||||
SDValue S0 = simplifyShuffleOperandRecursively(N0UsedElements, N0, DAG);
|
||||
SDValue S1 = simplifyShuffleOperandRecursively(N1UsedElements, N1, DAG);
|
||||
if (S0 == N0 && S1 == N1)
|
||||
return SDValue();
|
||||
|
||||
return DAG.getVectorShuffle(VT, SDLoc(SVN), S0, S1, SVN->getMask());
|
||||
}
|
||||
|
||||
// Tries to turn a shuffle of two CONCAT_VECTORS into a single concat.
|
||||
static SDValue partitionShuffleOfConcats(SDNode *N, SelectionDAG &DAG) {
|
||||
EVT VT = N->getValueType(0);
|
||||
|
@ -10860,6 +10947,12 @@ SDValue DAGCombiner::visitVECTOR_SHUFFLE(SDNode *N) {
|
|||
}
|
||||
}
|
||||
|
||||
// There are various patterns used to build up a vector from smaller vectors,
|
||||
// subvectors, or elements. Scan chains of these and replace unused insertions
|
||||
// or components with undef.
|
||||
if (SDValue S = simplifyShuffleOperands(SVN, N0, N1, DAG))
|
||||
return S;
|
||||
|
||||
if (N0.getOpcode() == ISD::CONCAT_VECTORS &&
|
||||
Level < AfterLegalizeVectorOps &&
|
||||
(N1.getOpcode() == ISD::UNDEF ||
|
||||
|
|
|
@ -64,11 +64,9 @@ entry:
|
|||
define <32 x i8> @Ei(<32 x i8> %a, <32 x i8> %b) nounwind uwtable readnone ssp {
|
||||
; AVX1-LABEL: Ei:
|
||||
; AVX1: ## BB#0: ## %entry
|
||||
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
|
||||
; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
|
||||
; AVX1-NEXT: vpaddb %xmm2, %xmm1, %xmm1
|
||||
; AVX1-NEXT: vpaddb %xmm2, %xmm0, %xmm0
|
||||
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
|
||||
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
|
||||
; AVX1-NEXT: vpaddb {{.*}}(%rip), %xmm0, %xmm0
|
||||
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
|
||||
; AVX1-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3,2,3]
|
||||
; AVX1-NEXT: retq
|
||||
;
|
||||
|
@ -87,11 +85,7 @@ entry:
|
|||
define <4 x i64> @E2i(<4 x i64> %a, <4 x i64> %b) nounwind uwtable readnone ssp {
|
||||
; AVX1-LABEL: E2i:
|
||||
; AVX1: ## BB#0: ## %entry
|
||||
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
|
||||
; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [1,1]
|
||||
; AVX1-NEXT: vpaddq %xmm3, %xmm2, %xmm2
|
||||
; AVX1-NEXT: vpaddq %xmm3, %xmm0, %xmm0
|
||||
; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
|
||||
; AVX1-NEXT: vpaddq {{.*}}(%rip), %xmm0, %xmm0
|
||||
; AVX1-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm1[2,3],ymm0[0,1]
|
||||
; AVX1-NEXT: retq
|
||||
;
|
||||
|
@ -111,11 +105,9 @@ entry:
|
|||
define <8 x i32> @E3i(<8 x i32> %a, <8 x i32> %b) nounwind uwtable readnone ssp {
|
||||
; AVX1-LABEL: E3i:
|
||||
; AVX1: ## BB#0: ## %entry
|
||||
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
|
||||
; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [1,1,1,1]
|
||||
; AVX1-NEXT: vpaddd %xmm3, %xmm2, %xmm2
|
||||
; AVX1-NEXT: vpaddd %xmm3, %xmm0, %xmm0
|
||||
; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
|
||||
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
|
||||
; AVX1-NEXT: vpaddd {{.*}}(%rip), %xmm0, %xmm0
|
||||
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
|
||||
; AVX1-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],ymm1[2,3]
|
||||
; AVX1-NEXT: retq
|
||||
;
|
||||
|
|
|
@ -2512,3 +2512,67 @@ define <4 x float> @combine_undef_input_test20(<4 x float> %a) {
|
|||
%2 = shufflevector <4 x float> %a, <4 x float> %1, <4 x i32> <i32 4, i32 6, i32 2, i32 3>
|
||||
ret <4 x float> %2
|
||||
}
|
||||
|
||||
; These tests are designed to test the ability to combine away unnecessary
|
||||
; operations feeding into a shuffle. The AVX cases are the important ones as
|
||||
; they leverage operations which cannot be done naturally on the entire vector
|
||||
; and thus are decomposed into multiple smaller operations.
|
||||
|
||||
define <8 x i32> @combine_unneeded_subvector1(<8 x i32> %a) {
|
||||
; SSE-LABEL: combine_unneeded_subvector1:
|
||||
; SSE: # BB#0:
|
||||
; SSE-NEXT: paddd {{.*}}(%rip), %xmm1
|
||||
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm1[3,2,1,0]
|
||||
; SSE-NEXT: movdqa %xmm0, %xmm1
|
||||
; SSE-NEXT: retq
|
||||
;
|
||||
; AVX1-LABEL: combine_unneeded_subvector1:
|
||||
; AVX1: # BB#0:
|
||||
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
|
||||
; AVX1-NEXT: vpaddd {{.*}}(%rip), %xmm0, %xmm0
|
||||
; AVX1-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[3,2,1,0]
|
||||
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
|
||||
; AVX1-NEXT: retq
|
||||
;
|
||||
; AVX2-LABEL: combine_unneeded_subvector1:
|
||||
; AVX2: # BB#0:
|
||||
; AVX2-NEXT: vpaddd {{.*}}(%rip), %ymm0, %ymm0
|
||||
; AVX2-NEXT: vmovdqa {{.*#+}} ymm1 = [7,6,5,4,7,6,5,4]
|
||||
; AVX2-NEXT: vpermd %ymm0, %ymm1, %ymm0
|
||||
; AVX2-NEXT: retq
|
||||
%b = add <8 x i32> %a, <i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8>
|
||||
%c = shufflevector <8 x i32> %b, <8 x i32> undef, <8 x i32> <i32 7, i32 6, i32 5, i32 4, i32 7, i32 6, i32 5, i32 4>
|
||||
ret <8 x i32> %c
|
||||
}
|
||||
|
||||
define <8 x i32> @combine_unneeded_subvector2(<8 x i32> %a, <8 x i32> %b) {
|
||||
; SSE-LABEL: combine_unneeded_subvector2:
|
||||
; SSE: # BB#0:
|
||||
; SSE-NEXT: paddd {{.*}}(%rip), %xmm1
|
||||
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm3[3,2,1,0]
|
||||
; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[3,2,1,0]
|
||||
; SSE-NEXT: retq
|
||||
;
|
||||
; AVX1-LABEL: combine_unneeded_subvector2:
|
||||
; AVX1: # BB#0:
|
||||
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm0
|
||||
; AVX1-NEXT: vpaddd {{.*}}(%rip), %xmm0, %xmm0
|
||||
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
|
||||
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm1
|
||||
; AVX1-NEXT: vpermilps {{.*#+}} xmm1 = xmm1[3,2,1,0]
|
||||
; AVX1-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[3,2,1,0,7,6,5,4]
|
||||
; AVX1-NEXT: vblendpd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3]
|
||||
; AVX1-NEXT: retq
|
||||
;
|
||||
; AVX2-LABEL: combine_unneeded_subvector2:
|
||||
; AVX2: # BB#0:
|
||||
; AVX2-NEXT: vpaddd {{.*}}(%rip), %ymm0, %ymm0
|
||||
; AVX2-NEXT: vmovdqa {{.*#+}} ymm2 = <7,6,5,4,u,u,u,u>
|
||||
; AVX2-NEXT: vpermd %ymm1, %ymm2, %ymm1
|
||||
; AVX2-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[3,2,1,0,7,6,5,4]
|
||||
; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
|
||||
; AVX2-NEXT: retq
|
||||
%c = add <8 x i32> %a, <i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8>
|
||||
%d = shufflevector <8 x i32> %b, <8 x i32> %c, <8 x i32> <i32 7, i32 6, i32 5, i32 4, i32 15, i32 14, i32 13, i32 12>
|
||||
ret <8 x i32> %d
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue