[X86][SSE] lowerShuffleWithSHUFPS - commute '2*V1+2*V2 elements' mask if it allows a loaded fold

As mentioned on D73023.
This commit is contained in:
Simon Pilgrim 2020-01-24 11:52:47 +00:00
parent 805c157e8a
commit 30fcd29fe4
2 changed files with 15 additions and 9 deletions

View File

@ -13316,10 +13316,11 @@ static SDValue lowerV2I64Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
/// It makes no assumptions about whether this is the *best* lowering, it simply
/// uses it.
static SDValue lowerShuffleWithSHUFPS(const SDLoc &DL, MVT VT,
ArrayRef<int> Mask, SDValue V1,
ArrayRef<int> OriginalMask, SDValue V1,
SDValue V2, SelectionDAG &DAG) {
SDValue LowV = V1, HighV = V2;
int NewMask[4] = {Mask[0], Mask[1], Mask[2], Mask[3]};
SmallVector<int, 4> Mask(OriginalMask.begin(), OriginalMask.end());
SmallVector<int, 4> NewMask = Mask;
int NumV2Elements = count_if(Mask, [](int M) { return M >= 4; });
@ -13357,6 +13358,14 @@ static SDValue lowerShuffleWithSHUFPS(const SDLoc &DL, MVT VT,
NewMask[V2Index] = 0; // We shifted the V2 element into V2[0].
}
} else if (NumV2Elements == 2) {
// If we are likely to fold V1 but not V2, then commute the shuffle.
if (MayFoldLoad(V1) && !MayFoldLoad(V2)) {
ShuffleVectorSDNode::commuteMask(Mask);
NewMask = Mask;
std::swap(V1, V2);
std::swap(LowV, HighV);
}
if (Mask[0] < 4 && Mask[1] < 4) {
// Handle the easy case where we have V1 in the low lanes and V2 in the
// high lanes.

View File

@ -2471,17 +2471,14 @@ define <4 x float> @shuffle_mem_v4f32_4523(<4 x float> %a, <4 x float>* %pb) {
define <4 x float> @shuffle_mem_v4f32_0624(<4 x float> %a0, <4 x float>* %a1) {
; SSE-LABEL: shuffle_mem_v4f32_0624:
; SSE: # %bb.0:
; SSE-NEXT: movaps (%rdi), %xmm1
; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,2],xmm0[2,0]
; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,2,1,3]
; SSE-NEXT: movaps %xmm1, %xmm0
; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[2,0],mem[0,2]
; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[2,0,3,1]
; SSE-NEXT: retq
;
; AVX1OR2-LABEL: shuffle_mem_v4f32_0624:
; AVX1OR2: # %bb.0:
; AVX1OR2-NEXT: vmovaps (%rdi), %xmm1
; AVX1OR2-NEXT: vshufps {{.*#+}} xmm0 = xmm1[0,2],xmm0[2,0]
; AVX1OR2-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[0,2,1,3]
; AVX1OR2-NEXT: vshufps {{.*#+}} xmm0 = xmm0[2,0],mem[0,2]
; AVX1OR2-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[2,0,3,1]
; AVX1OR2-NEXT: retq
;
; AVX512VL-LABEL: shuffle_mem_v4f32_0624: