[X86] isTargetShuffleEquivalent - attempt to match SM_SentinelZero shuffle mask elements using known bits

If the combined shuffle mask requires zero elements, we don't currently have much chance of matching them against the expected source vector. This patch uses the SelectionDAG::MaskedVectorIsZero wrapper to attempt to determine if the expected lement we want to use is already known to be zero.

I've also tightened up the ExpectedMask assertion to always be in range - we're never giving it a target shuffle mask that has sentinels at all - allowing to remove some of the confusing bounds checks.

This attempts to address some of the regressions uncovered by D129150 where we more aggressively fold shuffles as AND / 'clear' masks which results in more combined shuffles using SM_SentinelZero.

Differential Revision: https://reviews.llvm.org/D129207
This commit is contained in:
Simon Pilgrim 2022-07-11 15:29:44 +01:00
parent 5cbe39ef88
commit 97868fb972
6 changed files with 150 additions and 133 deletions

View File

@ -5978,6 +5978,11 @@ static bool isInRange(int Val, int Low, int Hi) {
return (Val >= Low && Val < Hi);
}
/// Return true if every val in Mask falls within the specified range (L, H].
static bool isInRange(ArrayRef<int> Mask, int Low, int Hi) {
return llvm::all_of(Mask, [Low, Hi](int M) { return isInRange(M, Low, Hi); });
}
/// Return true if the value of any element in Mask falls within the specified
/// range (L, H].
static bool isAnyInRange(ArrayRef<int> Mask, int Low, int Hi) {
@ -11785,15 +11790,16 @@ static bool isShuffleEquivalent(ArrayRef<int> Mask, ArrayRef<int> ExpectedMask,
/// value in ExpectedMask is always accepted. Otherwise the indices must match.
///
/// SM_SentinelZero is accepted as a valid negative index but must match in
/// both.
/// both, or via a known bits test.
static bool isTargetShuffleEquivalent(MVT VT, ArrayRef<int> Mask,
ArrayRef<int> ExpectedMask,
const SelectionDAG &DAG,
SDValue V1 = SDValue(),
SDValue V2 = SDValue()) {
int Size = Mask.size();
if (Size != (int)ExpectedMask.size())
return false;
assert(isUndefOrZeroOrInRange(ExpectedMask, 0, 2 * Size) &&
assert(isInRange(ExpectedMask, 0, 2 * Size) &&
"Illegal target shuffle mask");
// Check for out-of-range target shuffle mask indices.
@ -11806,12 +11812,28 @@ static bool isTargetShuffleEquivalent(MVT VT, ArrayRef<int> Mask,
if (V2 && V2.getValueSizeInBits() != VT.getSizeInBits())
V2 = SDValue();
APInt ZeroV1 = APInt::getNullValue(Size);
APInt ZeroV2 = APInt::getNullValue(Size);
for (int i = 0; i < Size; ++i) {
int MaskIdx = Mask[i];
int ExpectedIdx = ExpectedMask[i];
if (MaskIdx == SM_SentinelUndef || MaskIdx == ExpectedIdx)
continue;
if (0 <= MaskIdx && 0 <= ExpectedIdx) {
if (MaskIdx == SM_SentinelZero) {
// If we need this expected index to be a zero element, then update the
// relevant zero mask and perform the known bits at the end to minimize
// repeated computes.
SDValue ExpectedV = ExpectedIdx < Size ? V1 : V2;
if (ExpectedV &&
Size == (int)ExpectedV.getValueType().getVectorNumElements()) {
int BitIdx = ExpectedIdx < Size ? ExpectedIdx : (ExpectedIdx - Size);
APInt &ZeroMask = ExpectedIdx < Size ? ZeroV1 : ZeroV2;
ZeroMask.setBit(BitIdx);
continue;
}
}
if (MaskIdx >= 0) {
SDValue MaskV = MaskIdx < Size ? V1 : V2;
SDValue ExpectedV = ExpectedIdx < Size ? V1 : V2;
MaskIdx = MaskIdx < Size ? MaskIdx : (MaskIdx - Size);
@ -11819,15 +11841,16 @@ static bool isTargetShuffleEquivalent(MVT VT, ArrayRef<int> Mask,
if (IsElementEquivalent(Size, MaskV, ExpectedV, MaskIdx, ExpectedIdx))
continue;
}
// TODO - handle SM_Sentinel equivalences.
return false;
}
return true;
return (ZeroV1.isNullValue() || DAG.MaskedVectorIsZero(V1, ZeroV1)) &&
(ZeroV2.isNullValue() || DAG.MaskedVectorIsZero(V2, ZeroV2));
}
// Check if the shuffle mask is suitable for the AVX vpunpcklwd or vpunpckhwd
// instructions.
static bool isUnpackWdShuffleMask(ArrayRef<int> Mask, MVT VT) {
static bool isUnpackWdShuffleMask(ArrayRef<int> Mask, MVT VT,
const SelectionDAG &DAG) {
if (VT != MVT::v8i32 && VT != MVT::v8f32)
return false;
@ -11837,12 +11860,13 @@ static bool isUnpackWdShuffleMask(ArrayRef<int> Mask, MVT VT) {
SmallVector<int, 8> Unpckhwd;
createUnpackShuffleMask(MVT::v8i16, Unpckhwd, /* Lo = */ false,
/* Unary = */ false);
bool IsUnpackwdMask = (isTargetShuffleEquivalent(VT, Mask, Unpcklwd) ||
isTargetShuffleEquivalent(VT, Mask, Unpckhwd));
bool IsUnpackwdMask = (isTargetShuffleEquivalent(VT, Mask, Unpcklwd, DAG) ||
isTargetShuffleEquivalent(VT, Mask, Unpckhwd, DAG));
return IsUnpackwdMask;
}
static bool is128BitUnpackShuffleMask(ArrayRef<int> Mask) {
static bool is128BitUnpackShuffleMask(ArrayRef<int> Mask,
const SelectionDAG &DAG) {
// Create 128-bit vector type based on mask size.
MVT EltVT = MVT::getIntegerVT(128 / Mask.size());
MVT VT = MVT::getVectorVT(EltVT, Mask.size());
@ -11855,8 +11879,8 @@ static bool is128BitUnpackShuffleMask(ArrayRef<int> Mask) {
for (unsigned i = 0; i != 4; ++i) {
SmallVector<int, 16> UnpackMask;
createUnpackShuffleMask(VT, UnpackMask, (i >> 1) % 2, i % 2);
if (isTargetShuffleEquivalent(VT, Mask, UnpackMask) ||
isTargetShuffleEquivalent(VT, CommutedMask, UnpackMask))
if (isTargetShuffleEquivalent(VT, Mask, UnpackMask, DAG) ||
isTargetShuffleEquivalent(VT, CommutedMask, UnpackMask, DAG))
return true;
}
return false;
@ -12049,7 +12073,7 @@ static bool matchShuffleWithUNPCK(MVT VT, SDValue &V1, SDValue &V2,
// Attempt to match the target mask against the unpack lo/hi mask patterns.
SmallVector<int, 64> Unpckl, Unpckh;
createUnpackShuffleMask(VT, Unpckl, /* Lo = */ true, IsUnary);
if (isTargetShuffleEquivalent(VT, TargetMask, Unpckl, V1,
if (isTargetShuffleEquivalent(VT, TargetMask, Unpckl, DAG, V1,
(IsUnary ? V1 : V2))) {
UnpackOpcode = X86ISD::UNPCKL;
V2 = (Undef2 ? DAG.getUNDEF(VT) : (IsUnary ? V1 : V2));
@ -12058,7 +12082,7 @@ static bool matchShuffleWithUNPCK(MVT VT, SDValue &V1, SDValue &V2,
}
createUnpackShuffleMask(VT, Unpckh, /* Lo = */ false, IsUnary);
if (isTargetShuffleEquivalent(VT, TargetMask, Unpckh, V1,
if (isTargetShuffleEquivalent(VT, TargetMask, Unpckh, DAG, V1,
(IsUnary ? V1 : V2))) {
UnpackOpcode = X86ISD::UNPCKH;
V2 = (Undef2 ? DAG.getUNDEF(VT) : (IsUnary ? V1 : V2));
@ -12097,14 +12121,14 @@ static bool matchShuffleWithUNPCK(MVT VT, SDValue &V1, SDValue &V2,
// If a binary shuffle, commute and try again.
if (!IsUnary) {
ShuffleVectorSDNode::commuteMask(Unpckl);
if (isTargetShuffleEquivalent(VT, TargetMask, Unpckl)) {
if (isTargetShuffleEquivalent(VT, TargetMask, Unpckl, DAG)) {
UnpackOpcode = X86ISD::UNPCKL;
std::swap(V1, V2);
return true;
}
ShuffleVectorSDNode::commuteMask(Unpckh);
if (isTargetShuffleEquivalent(VT, TargetMask, Unpckh)) {
if (isTargetShuffleEquivalent(VT, TargetMask, Unpckh, DAG)) {
UnpackOpcode = X86ISD::UNPCKH;
std::swap(V1, V2);
return true;
@ -12492,14 +12516,14 @@ static bool matchShuffleWithPACK(MVT VT, MVT &SrcVT, SDValue &V1, SDValue &V2,
// Try binary shuffle.
SmallVector<int, 32> BinaryMask;
createPackShuffleMask(VT, BinaryMask, false, NumStages);
if (isTargetShuffleEquivalent(VT, TargetMask, BinaryMask, V1, V2))
if (isTargetShuffleEquivalent(VT, TargetMask, BinaryMask, DAG, V1, V2))
if (MatchPACK(V1, V2, PackVT))
return true;
// Try unary shuffle.
SmallVector<int, 32> UnaryMask;
createPackShuffleMask(VT, UnaryMask, true, NumStages);
if (isTargetShuffleEquivalent(VT, TargetMask, UnaryMask, V1))
if (isTargetShuffleEquivalent(VT, TargetMask, UnaryMask, DAG, V1))
if (MatchPACK(V1, V1, PackVT))
return true;
}
@ -14311,7 +14335,7 @@ static SDValue lowerShuffleOfExtractsAsVperm(const SDLoc &DL, SDValue N0,
// and a simple narrow shuffle. Prefer extract+unpack(h/l)ps to vpermps
// because that avoids a constant load from memory.
if (NumElts == 4 &&
(isSingleSHUFPSMask(NewMask) || is128BitUnpackShuffleMask(NewMask)))
(isSingleSHUFPSMask(NewMask) || is128BitUnpackShuffleMask(NewMask, DAG)))
return SDValue();
// Extend the shuffle mask with undef elements.
@ -17258,7 +17282,7 @@ static SDValue lowerShuffleWithUndefHalf(const SDLoc &DL, MVT VT, SDValue V1,
if (Subtarget.hasAVX2()) {
// extract128 + vunpckhps/vshufps, is better than vblend + vpermps.
if (EltWidth == 32 && NumLowerHalves && HalfVT.is128BitVector() &&
!is128BitUnpackShuffleMask(HalfMask) &&
!is128BitUnpackShuffleMask(HalfMask, DAG) &&
(!isSingleSHUFPSMask(HalfMask) ||
Subtarget.hasFastVariableCrossLaneShuffle()))
return SDValue();
@ -17920,7 +17944,7 @@ static SDValue lowerV8F32Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
// For non-AVX512 if the Mask is of 16bit elements in lane then try to split
// since after split we get a more efficient code using vpunpcklwd and
// vpunpckhwd instrs than vblend.
if (!Subtarget.hasAVX512() && isUnpackWdShuffleMask(Mask, MVT::v8f32))
if (!Subtarget.hasAVX512() && isUnpackWdShuffleMask(Mask, MVT::v8f32, DAG))
return lowerShuffleAsSplitOrBlend(DL, MVT::v8f32, V1, V2, Mask, Subtarget,
DAG);
@ -17958,7 +17982,7 @@ static SDValue lowerV8I32Shuffle(const SDLoc &DL, ArrayRef<int> Mask,
// For non-AVX512 if the Mask is of 16bit elements in lane then try to split
// since after split we get a more efficient code than vblend by using
// vpunpcklwd and vpunpckhwd instrs.
if (isUnpackWdShuffleMask(Mask, MVT::v8i32) && !V2.isUndef() &&
if (isUnpackWdShuffleMask(Mask, MVT::v8i32, DAG) && !V2.isUndef() &&
!Subtarget.hasAVX512())
return lowerShuffleAsSplitOrBlend(DL, MVT::v8i32, V1, V2, Mask, Subtarget,
DAG);
@ -37020,8 +37044,9 @@ static SDValue narrowLoadToVZLoad(LoadSDNode *LN, MVT MemVT, MVT VT,
// TODO: Investigate sharing more of this with shuffle lowering.
static bool matchUnaryShuffle(MVT MaskVT, ArrayRef<int> Mask,
bool AllowFloatDomain, bool AllowIntDomain,
SDValue V1, const X86Subtarget &Subtarget,
unsigned &Shuffle, MVT &SrcVT, MVT &DstVT) {
SDValue V1, const SelectionDAG &DAG,
const X86Subtarget &Subtarget, unsigned &Shuffle,
MVT &SrcVT, MVT &DstVT) {
unsigned NumMaskElts = Mask.size();
unsigned MaskEltSize = MaskVT.getScalarSizeInBits();
@ -37092,17 +37117,17 @@ static bool matchUnaryShuffle(MVT MaskVT, ArrayRef<int> Mask,
// instructions are no slower than UNPCKLPD but has the option to
// fold the input operand into even an unaligned memory load.
if (MaskVT.is128BitVector() && Subtarget.hasSSE3() && AllowFloatDomain) {
if (isTargetShuffleEquivalent(MaskVT, Mask, {0, 0}, V1)) {
if (isTargetShuffleEquivalent(MaskVT, Mask, {0, 0}, DAG, V1)) {
Shuffle = X86ISD::MOVDDUP;
SrcVT = DstVT = MVT::v2f64;
return true;
}
if (isTargetShuffleEquivalent(MaskVT, Mask, {0, 0, 2, 2}, V1)) {
if (isTargetShuffleEquivalent(MaskVT, Mask, {0, 0, 2, 2}, DAG, V1)) {
Shuffle = X86ISD::MOVSLDUP;
SrcVT = DstVT = MVT::v4f32;
return true;
}
if (isTargetShuffleEquivalent(MaskVT, Mask, {1, 1, 3, 3}, V1)) {
if (isTargetShuffleEquivalent(MaskVT, Mask, {1, 1, 3, 3}, DAG, V1)) {
Shuffle = X86ISD::MOVSHDUP;
SrcVT = DstVT = MVT::v4f32;
return true;
@ -37111,17 +37136,19 @@ static bool matchUnaryShuffle(MVT MaskVT, ArrayRef<int> Mask,
if (MaskVT.is256BitVector() && AllowFloatDomain) {
assert(Subtarget.hasAVX() && "AVX required for 256-bit vector shuffles");
if (isTargetShuffleEquivalent(MaskVT, Mask, {0, 0, 2, 2}, V1)) {
if (isTargetShuffleEquivalent(MaskVT, Mask, {0, 0, 2, 2}, DAG, V1)) {
Shuffle = X86ISD::MOVDDUP;
SrcVT = DstVT = MVT::v4f64;
return true;
}
if (isTargetShuffleEquivalent(MaskVT, Mask, {0, 0, 2, 2, 4, 4, 6, 6}, V1)) {
if (isTargetShuffleEquivalent(MaskVT, Mask, {0, 0, 2, 2, 4, 4, 6, 6}, DAG,
V1)) {
Shuffle = X86ISD::MOVSLDUP;
SrcVT = DstVT = MVT::v8f32;
return true;
}
if (isTargetShuffleEquivalent(MaskVT, Mask, {1, 1, 3, 3, 5, 5, 7, 7}, V1)) {
if (isTargetShuffleEquivalent(MaskVT, Mask, {1, 1, 3, 3, 5, 5, 7, 7}, DAG,
V1)) {
Shuffle = X86ISD::MOVSHDUP;
SrcVT = DstVT = MVT::v8f32;
return true;
@ -37131,21 +37158,22 @@ static bool matchUnaryShuffle(MVT MaskVT, ArrayRef<int> Mask,
if (MaskVT.is512BitVector() && AllowFloatDomain) {
assert(Subtarget.hasAVX512() &&
"AVX512 required for 512-bit vector shuffles");
if (isTargetShuffleEquivalent(MaskVT, Mask, {0, 0, 2, 2, 4, 4, 6, 6}, V1)) {
if (isTargetShuffleEquivalent(MaskVT, Mask, {0, 0, 2, 2, 4, 4, 6, 6}, DAG,
V1)) {
Shuffle = X86ISD::MOVDDUP;
SrcVT = DstVT = MVT::v8f64;
return true;
}
if (isTargetShuffleEquivalent(
MaskVT, Mask,
{0, 0, 2, 2, 4, 4, 6, 6, 8, 8, 10, 10, 12, 12, 14, 14}, V1)) {
{0, 0, 2, 2, 4, 4, 6, 6, 8, 8, 10, 10, 12, 12, 14, 14}, DAG, V1)) {
Shuffle = X86ISD::MOVSLDUP;
SrcVT = DstVT = MVT::v16f32;
return true;
}
if (isTargetShuffleEquivalent(
MaskVT, Mask,
{1, 1, 3, 3, 5, 5, 7, 7, 9, 9, 11, 11, 13, 13, 15, 15}, V1)) {
{1, 1, 3, 3, 5, 5, 7, 7, 9, 9, 11, 11, 13, 13, 15, 15}, DAG, V1)) {
Shuffle = X86ISD::MOVSHDUP;
SrcVT = DstVT = MVT::v16f32;
return true;
@ -37161,6 +37189,7 @@ static bool matchUnaryShuffle(MVT MaskVT, ArrayRef<int> Mask,
static bool matchUnaryPermuteShuffle(MVT MaskVT, ArrayRef<int> Mask,
const APInt &Zeroable,
bool AllowFloatDomain, bool AllowIntDomain,
const SelectionDAG &DAG,
const X86Subtarget &Subtarget,
unsigned &Shuffle, MVT &ShuffleVT,
unsigned &PermuteImm) {
@ -37304,33 +37333,36 @@ static bool matchBinaryShuffle(MVT MaskVT, ArrayRef<int> Mask,
unsigned EltSizeInBits = MaskVT.getScalarSizeInBits();
if (MaskVT.is128BitVector()) {
if (isTargetShuffleEquivalent(MaskVT, Mask, {0, 0}) && AllowFloatDomain) {
if (isTargetShuffleEquivalent(MaskVT, Mask, {0, 0}, DAG) &&
AllowFloatDomain) {
V2 = V1;
V1 = (SM_SentinelUndef == Mask[0] ? DAG.getUNDEF(MVT::v4f32) : V1);
Shuffle = Subtarget.hasSSE2() ? X86ISD::UNPCKL : X86ISD::MOVLHPS;
SrcVT = DstVT = Subtarget.hasSSE2() ? MVT::v2f64 : MVT::v4f32;
return true;
}
if (isTargetShuffleEquivalent(MaskVT, Mask, {1, 1}) && AllowFloatDomain) {
if (isTargetShuffleEquivalent(MaskVT, Mask, {1, 1}, DAG) &&
AllowFloatDomain) {
V2 = V1;
Shuffle = Subtarget.hasSSE2() ? X86ISD::UNPCKH : X86ISD::MOVHLPS;
SrcVT = DstVT = Subtarget.hasSSE2() ? MVT::v2f64 : MVT::v4f32;
return true;
}
if (isTargetShuffleEquivalent(MaskVT, Mask, {0, 3}) &&
if (isTargetShuffleEquivalent(MaskVT, Mask, {0, 3}, DAG) &&
Subtarget.hasSSE2() && (AllowFloatDomain || !Subtarget.hasSSE41())) {
std::swap(V1, V2);
Shuffle = X86ISD::MOVSD;
SrcVT = DstVT = MVT::v2f64;
return true;
}
if (isTargetShuffleEquivalent(MaskVT, Mask, {4, 1, 2, 3}) &&
if (isTargetShuffleEquivalent(MaskVT, Mask, {4, 1, 2, 3}, DAG) &&
(AllowFloatDomain || !Subtarget.hasSSE41())) {
Shuffle = X86ISD::MOVSS;
SrcVT = DstVT = MVT::v4f32;
return true;
}
if (isTargetShuffleEquivalent(MaskVT, Mask, {8, 1, 2, 3, 4, 5, 6, 7}) &&
if (isTargetShuffleEquivalent(MaskVT, Mask, {8, 1, 2, 3, 4, 5, 6, 7},
DAG) &&
Subtarget.hasFP16()) {
Shuffle = X86ISD::MOVSH;
SrcVT = DstVT = MVT::v8f16;
@ -37713,7 +37745,8 @@ static SDValue combineX86ShuffleChain(ArrayRef<SDValue> Inputs, SDValue Root,
scaleShuffleElements(Mask, NumElts, ScaledMask)) {
for (unsigned i = 0; i != NumElts; ++i)
IdentityMask.push_back(i);
if (isTargetShuffleEquivalent(RootVT, ScaledMask, IdentityMask, V1, V2))
if (isTargetShuffleEquivalent(RootVT, ScaledMask, IdentityMask, DAG, V1,
V2))
return CanonicalizeShuffleInput(RootVT, V1);
}
}
@ -37937,7 +37970,7 @@ static SDValue combineX86ShuffleChain(ArrayRef<SDValue> Inputs, SDValue Root,
}
if (matchUnaryShuffle(MaskVT, Mask, AllowFloatDomain, AllowIntDomain, V1,
Subtarget, Shuffle, ShuffleSrcVT, ShuffleVT) &&
DAG, Subtarget, Shuffle, ShuffleSrcVT, ShuffleVT) &&
(!IsMaskedShuffle ||
(NumRootElts == ShuffleVT.getVectorNumElements()))) {
if (Depth == 0 && Root.getOpcode() == Shuffle)
@ -37948,7 +37981,7 @@ static SDValue combineX86ShuffleChain(ArrayRef<SDValue> Inputs, SDValue Root,
}
if (matchUnaryPermuteShuffle(MaskVT, Mask, Zeroable, AllowFloatDomain,
AllowIntDomain, Subtarget, Shuffle, ShuffleVT,
AllowIntDomain, DAG, Subtarget, Shuffle, ShuffleVT,
PermuteImm) &&
(!IsMaskedShuffle ||
(NumRootElts == ShuffleVT.getVectorNumElements()))) {
@ -37966,7 +37999,7 @@ static SDValue combineX86ShuffleChain(ArrayRef<SDValue> Inputs, SDValue Root,
// TODO: Handle other insertions here as well?
if (!UnaryShuffle && AllowFloatDomain && RootSizeInBits == 128 &&
Subtarget.hasSSE41() &&
!isTargetShuffleEquivalent(MaskVT, Mask, {4, 1, 2, 3})) {
!isTargetShuffleEquivalent(MaskVT, Mask, {4, 1, 2, 3}, DAG)) {
if (MaskEltSizeInBits == 32) {
SDValue SrcV1 = V1, SrcV2 = V2;
if (matchShuffleAsInsertPS(SrcV1, SrcV2, PermuteImm, Zeroable, Mask,
@ -37982,12 +38015,12 @@ static SDValue combineX86ShuffleChain(ArrayRef<SDValue> Inputs, SDValue Root,
}
}
if (MaskEltSizeInBits == 64 &&
isTargetShuffleEquivalent(MaskVT, Mask, {0, 2}) &&
isTargetShuffleEquivalent(MaskVT, Mask, {0, 2}, DAG) &&
V2.getOpcode() == ISD::SCALAR_TO_VECTOR &&
V2.getScalarValueSizeInBits() <= 32) {
if (Depth == 0 && Root.getOpcode() == X86ISD::INSERTPS)
return SDValue(); // Nothing to do!
PermuteImm = (/*DstIdx*/2 << 4) | (/*SrcIdx*/0 << 0);
PermuteImm = (/*DstIdx*/ 2 << 4) | (/*SrcIdx*/ 0 << 0);
Res = DAG.getNode(X86ISD::INSERTPS, DL, MVT::v4f32,
CanonicalizeShuffleInput(MVT::v4f32, V1),
CanonicalizeShuffleInput(MVT::v4f32, V2),

View File

@ -11,15 +11,14 @@ define void @volatile_load_2_elts() {
; AVX-LABEL: volatile_load_2_elts:
; AVX: # %bb.0:
; AVX-NEXT: vmovaps g0(%rip), %xmm0
; AVX-NEXT: vmovddup {{.*#+}} xmm0 = xmm0[0,0]
; AVX-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm1
; AVX-NEXT: vpermilpd {{.*#+}} ymm1 = ymm1[0,0,3,2]
; AVX-NEXT: vxorpd %xmm2, %xmm2, %xmm2
; AVX-NEXT: vblendpd {{.*#+}} ymm1 = ymm2[0,1],ymm1[2],ymm2[3]
; AVX-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; AVX-NEXT: vblendpd {{.*#+}} ymm0 = ymm2[0],ymm0[1],ymm2[2],ymm0[3]
; AVX-NEXT: vmovapd %ymm0, (%rax)
; AVX-NEXT: vmovapd %ymm1, (%rax)
; AVX-NEXT: vmovddup {{.*#+}} xmm1 = xmm0[0,0]
; AVX-NEXT: vxorps %xmm2, %xmm2, %xmm2
; AVX-NEXT: vblendps {{.*#+}} ymm1 = ymm2[0,1],ymm1[2,3,4,5],ymm2[6,7]
; AVX-NEXT: vinsertf128 $1, %xmm1, %ymm1, %ymm1
; AVX-NEXT: vperm2f128 {{.*#+}} ymm0 = zero,zero,ymm0[0,1]
; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm2[0,1,2,3],ymm0[4,5],ymm2[6,7]
; AVX-NEXT: vmovaps %ymm0, (%rax)
; AVX-NEXT: vmovaps %ymm1, (%rax)
; AVX-NEXT: vzeroupper
; AVX-NEXT: retq
;

View File

@ -129,76 +129,74 @@ define void @load_i8_stride6_vf4(ptr %in.vec, ptr %out.vec0, ptr %out.vec1, ptr
; SSE-LABEL: load_i8_stride6_vf4:
; SSE: # %bb.0:
; SSE-NEXT: movq {{[0-9]+}}(%rsp), %rax
; SSE-NEXT: movdqa (%rdi), %xmm2
; SSE-NEXT: movdqa 16(%rdi), %xmm1
; SSE-NEXT: movdqa {{.*#+}} xmm0 = [65535,0,65535,65535,65535,65535,65535,65535]
; SSE-NEXT: movdqa %xmm2, %xmm3
; SSE-NEXT: pand %xmm0, %xmm3
; SSE-NEXT: pandn %xmm1, %xmm0
; SSE-NEXT: por %xmm3, %xmm0
; SSE-NEXT: movdqa {{.*#+}} xmm4 = [255,255,255,255,255,255,255,255]
; SSE-NEXT: pand %xmm4, %xmm0
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,1,3]
; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,6,5,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
; SSE-NEXT: pshuflw {{.*#+}} xmm9 = xmm0[0,3,2,1,4,5,6,7]
; SSE-NEXT: movdqa (%rdi), %xmm3
; SSE-NEXT: movdqa 16(%rdi), %xmm0
; SSE-NEXT: movdqa {{.*#+}} xmm1 = [65535,0,65535,65535,65535,65535,65535,65535]
; SSE-NEXT: movdqa %xmm3, %xmm2
; SSE-NEXT: pand %xmm1, %xmm2
; SSE-NEXT: pandn %xmm0, %xmm1
; SSE-NEXT: por %xmm2, %xmm1
; SSE-NEXT: movdqa {{.*#+}} xmm2 = [255,255,255,255,255,255,255,255]
; SSE-NEXT: pand %xmm2, %xmm1
; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,1,3]
; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,6,5,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
; SSE-NEXT: pshuflw {{.*#+}} xmm9 = xmm1[0,3,2,1,4,5,6,7]
; SSE-NEXT: packuswb %xmm9, %xmm9
; SSE-NEXT: pxor %xmm8, %xmm8
; SSE-NEXT: movdqa %xmm2, %xmm3
; SSE-NEXT: movdqa %xmm3, %xmm6
; SSE-NEXT: movdqa {{.*#+}} xmm5 = [255,255,255,0,255,255,255,255,255,255,255,255,255,255,255,255]
; SSE-NEXT: pandn %xmm0, %xmm5
; SSE-NEXT: movdqa %xmm0, %xmm7
; SSE-NEXT: shufps {{.*#+}} xmm7 = xmm7[1,0],xmm3[0,0]
; SSE-NEXT: shufps {{.*#+}} xmm7 = xmm7[2,0],xmm3[2,3]
; SSE-NEXT: movdqa {{.*#+}} xmm4 = [65535,65535,0,65535,65535,0,65535,65535]
; SSE-NEXT: pand %xmm4, %xmm0
; SSE-NEXT: pandn %xmm3, %xmm4
; SSE-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm8[8],xmm3[9],xmm8[9],xmm3[10],xmm8[10],xmm3[11],xmm8[11],xmm3[12],xmm8[12],xmm3[13],xmm8[13],xmm3[14],xmm8[14],xmm3[15],xmm8[15]
; SSE-NEXT: movdqa %xmm3, %xmm5
; SSE-NEXT: psrld $16, %xmm5
; SSE-NEXT: movdqa %xmm2, %xmm6
; SSE-NEXT: movdqa %xmm3, %xmm1
; SSE-NEXT: psrld $16, %xmm1
; SSE-NEXT: punpcklbw {{.*#+}} xmm6 = xmm6[0],xmm8[0],xmm6[1],xmm8[1],xmm6[2],xmm8[2],xmm6[3],xmm8[3],xmm6[4],xmm8[4],xmm6[5],xmm8[5],xmm6[6],xmm8[6],xmm6[7],xmm8[7]
; SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm6[0,1,0,3]
; SSE-NEXT: pshufhw {{.*#+}} xmm7 = xmm6[0,1,2,3,5,7,6,7]
; SSE-NEXT: punpckhdq {{.*#+}} xmm7 = xmm7[2],xmm5[2],xmm7[3],xmm5[3]
; SSE-NEXT: packuswb %xmm7, %xmm7
; SSE-NEXT: movdqa {{.*#+}} xmm6 = [255,255,255,0,255,255,255,255,255,255,255,255,255,255,255,255]
; SSE-NEXT: pand %xmm6, %xmm7
; SSE-NEXT: pandn %xmm1, %xmm6
; SSE-NEXT: por %xmm7, %xmm6
; SSE-NEXT: movdqa %xmm1, %xmm5
; SSE-NEXT: shufps {{.*#+}} xmm5 = xmm5[1,0],xmm2[0,0]
; SSE-NEXT: shufps {{.*#+}} xmm5 = xmm5[2,0],xmm2[2,3]
; SSE-NEXT: movaps %xmm5, %xmm7
; SSE-NEXT: andps %xmm4, %xmm7
; SSE-NEXT: pshuflw {{.*#+}} xmm7 = xmm7[2,1,2,3,4,5,6,7]
; SSE-NEXT: pshufhw {{.*#+}} xmm7 = xmm7[0,1,2,3,4,7,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm7 = xmm7[0,2,2,3]
; SSE-NEXT: pshuflw {{.*#+}} xmm7 = xmm7[1,2,3,0,4,5,6,7]
; SSE-NEXT: packuswb %xmm7, %xmm7
; SSE-NEXT: punpcklbw {{.*#+}} xmm5 = xmm5[0],xmm8[0],xmm5[1],xmm8[1],xmm5[2],xmm8[2],xmm5[3],xmm8[3],xmm5[4],xmm8[4],xmm5[5],xmm8[5],xmm5[6],xmm8[6],xmm5[7],xmm8[7]
; SSE-NEXT: shufps {{.*#+}} xmm5 = xmm5[1,2],xmm3[0,3]
; SSE-NEXT: shufps {{.*#+}} xmm5 = xmm5[2,0,1,3]
; SSE-NEXT: pshuflw {{.*#+}} xmm5 = xmm5[3,1,2,3,4,5,6,7]
; SSE-NEXT: pshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,7,5,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm5[0,2,2,3]
; SSE-NEXT: packuswb %xmm5, %xmm5
; SSE-NEXT: movdqa {{.*#+}} xmm0 = [65535,65535,0,65535,65535,0,65535,65535]
; SSE-NEXT: pand %xmm0, %xmm1
; SSE-NEXT: pandn %xmm2, %xmm0
; SSE-NEXT: por %xmm1, %xmm0
; SSE-NEXT: pand %xmm0, %xmm4
; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm4[3,1,2,0]
; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,6,5,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,1,2,3]
; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[2,1,0,3,4,5,6,7]
; SSE-NEXT: pshufhw {{.*#+}} xmm6 = xmm6[0,1,2,3,5,7,6,7]
; SSE-NEXT: punpckhdq {{.*#+}} xmm6 = xmm6[2],xmm1[2],xmm6[3],xmm1[3]
; SSE-NEXT: packuswb %xmm6, %xmm6
; SSE-NEXT: por %xmm6, %xmm5
; SSE-NEXT: movaps %xmm7, %xmm1
; SSE-NEXT: andps %xmm2, %xmm1
; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[2,1,2,3,4,5,6,7]
; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,7,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[1,2,3,0,4,5,6,7]
; SSE-NEXT: packuswb %xmm1, %xmm1
; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm8[0],xmm0[1],xmm8[1],xmm0[2],xmm8[2],xmm0[3],xmm8[3],xmm0[4],xmm8[4],xmm0[5],xmm8[5],xmm0[6],xmm8[6],xmm0[7],xmm8[7]
; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[1,0],xmm0[0,0]
; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[2,0],xmm0[2,3]
; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm3[3,1,2,3,4,5,6,7]
; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,7,5,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[3,0,1,2,4,5,6,7]
; SSE-NEXT: punpcklbw {{.*#+}} xmm7 = xmm7[0],xmm8[0],xmm7[1],xmm8[1],xmm7[2],xmm8[2],xmm7[3],xmm8[3],xmm7[4],xmm8[4],xmm7[5],xmm8[5],xmm7[6],xmm8[6],xmm7[7],xmm8[7]
; SSE-NEXT: shufps {{.*#+}} xmm7 = xmm7[1,2],xmm3[0,3]
; SSE-NEXT: shufps {{.*#+}} xmm7 = xmm7[2,0,1,3]
; SSE-NEXT: pshuflw {{.*#+}} xmm6 = xmm7[3,1,2,3,4,5,6,7]
; SSE-NEXT: pshufhw {{.*#+}} xmm6 = xmm6[0,1,2,3,7,5,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm6[0,2,2,3]
; SSE-NEXT: packuswb %xmm6, %xmm6
; SSE-NEXT: por %xmm0, %xmm4
; SSE-NEXT: pand %xmm4, %xmm2
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm2[3,1,2,0]
; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,6,5,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,1,2,3]
; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[2,1,0,3,4,5,6,7]
; SSE-NEXT: packuswb %xmm0, %xmm0
; SSE-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm8[0],xmm4[1],xmm8[1],xmm4[2],xmm8[2],xmm4[3],xmm8[3],xmm4[4],xmm8[4],xmm4[5],xmm8[5],xmm4[6],xmm8[6],xmm4[7],xmm8[7]
; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[1,0],xmm4[0,0]
; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[2,0],xmm4[2,3]
; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm3[3,1,2,3,4,5,6,7]
; SSE-NEXT: pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,7,5,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,2,2,3]
; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[3,0,1,2,4,5,6,7]
; SSE-NEXT: packuswb %xmm2, %xmm2
; SSE-NEXT: movd %xmm9, (%rsi)
; SSE-NEXT: movd %xmm6, (%rdx)
; SSE-NEXT: movd %xmm7, (%rcx)
; SSE-NEXT: movd %xmm5, (%r8)
; SSE-NEXT: movd %xmm1, (%r9)
; SSE-NEXT: movd %xmm0, (%rax)
; SSE-NEXT: movd %xmm5, (%rdx)
; SSE-NEXT: movd %xmm1, (%rcx)
; SSE-NEXT: movd %xmm6, (%r8)
; SSE-NEXT: movd %xmm0, (%r9)
; SSE-NEXT: movd %xmm2, (%rax)
; SSE-NEXT: retq
;
; AVX1-LABEL: load_i8_stride6_vf4:

View File

@ -126,8 +126,6 @@ define void @PR46178(ptr %0) {
; X64-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
; X64-NEXT: vpsllw $8, %ymm0, %ymm0
; X64-NEXT: vpsraw $8, %ymm0, %ymm0
; X64-NEXT: vpxor %xmm1, %xmm1, %xmm1
; X64-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5,6,7]
; X64-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,1,1]
; X64-NEXT: vmovdqu %ymm0, (%rdi)
; X64-NEXT: vzeroupper

View File

@ -2025,7 +2025,6 @@ define <16 x i8> @combine_and_or_shuffle(<16 x i8> %x, <16 x i8> %y) {
; SSE2-NEXT: packuswb %xmm0, %xmm0
; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3],xmm0[4],xmm3[4],xmm0[5],xmm3[5],xmm0[6],xmm3[6],xmm0[7],xmm3[7]
; SSE2-NEXT: por %xmm2, %xmm0
; SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; SSE2-NEXT: retq
;
; SSSE3-LABEL: combine_and_or_shuffle:

View File

@ -3083,25 +3083,15 @@ define <2 x i64> @test_128_i64_x_2_18446744065119617024_mask_ashr_1(<2 x i64> %a
;
; X64-SSE2-LABEL: test_128_i64_x_2_18446744065119617024_mask_ashr_1:
; X64-SSE2: # %bb.0:
; X64-SSE2-NEXT: psrad $1, %xmm0
; X64-SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
; X64-SSE2-NEXT: psrad $1, %xmm0
; X64-SSE2-NEXT: retq
;
; X64-AVX1-LABEL: test_128_i64_x_2_18446744065119617024_mask_ashr_1:
; X64-AVX1: # %bb.0:
; X64-AVX1-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
; X64-AVX1-NEXT: vpsrad $1, %xmm0, %xmm0
; X64-AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1
; X64-AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3],xmm1[4,5],xmm0[6,7]
; X64-AVX1-NEXT: retq
;
; X64-AVX2-LABEL: test_128_i64_x_2_18446744065119617024_mask_ashr_1:
; X64-AVX2: # %bb.0:
; X64-AVX2-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
; X64-AVX2-NEXT: vpsrad $1, %xmm0, %xmm0
; X64-AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1
; X64-AVX2-NEXT: vpblendd {{.*#+}} xmm0 = xmm1[0],xmm0[1],xmm1[2],xmm0[3]
; X64-AVX2-NEXT: retq
; X64-AVX-LABEL: test_128_i64_x_2_18446744065119617024_mask_ashr_1:
; X64-AVX: # %bb.0:
; X64-AVX-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
; X64-AVX-NEXT: vpsrad $1, %xmm0, %xmm0
; X64-AVX-NEXT: retq
%t0 = and <2 x i64> %a0, <i64 18446744065119617024, i64 18446744065119617024>
%t1 = ashr <2 x i64> %t0, <i64 1, i64 1>
ret <2 x i64> %t1