Revert "[DAG]Introduce llvm::processShuffleMasks and use it for shuffles in DAG Type Legalizer."

This reverts commit 2f49163b33 to fix
a buildbot failure. Reported in https://lab.llvm.org/buildbot#builders/105/builds/24284
This commit is contained in:
Alexey Bataev 2022-04-20 06:18:52 -07:00
parent 69dd89fdcb
commit 5f7ac15912
41 changed files with 10512 additions and 12087 deletions

View File

@ -398,24 +398,6 @@ void narrowShuffleMaskElts(int Scale, ArrayRef<int> Mask,
bool widenShuffleMaskElts(int Scale, ArrayRef<int> Mask,
SmallVectorImpl<int> &ScaledMask);
/// Splits and processes shuffle mask depending on the number of input and
/// output registers. The function does 2 main things: 1) splits the
/// source/destination vectors into real registers; 2) do the mask analysis to
/// identify which real registers are permuted. Then the function processes
/// resulting registers mask using provided action items. If no input register
/// is defined, \p NoInputAction action is used. If only 1 input register is
/// used, \p SingleInputAction is used, otherwise \p ManyInputsAction is used to
/// process > 2 input registers and masks.
/// \param Mask Original shuffle mask.
/// \param NumOfSrcRegs Number of source registers.
/// \param NumOfDestRegs Number of destination registers.
/// \param NumOfUsedRegs Number of actually used destination registers.
void processShuffleMasks(
ArrayRef<int> Mask, unsigned NumOfSrcRegs, unsigned NumOfDestRegs,
unsigned NumOfUsedRegs, function_ref<void()> NoInputAction,
function_ref<void(ArrayRef<int>, unsigned)> SingleInputAction,
function_ref<void(ArrayRef<int>, unsigned, unsigned)> ManyInputsAction);
/// Compute a map of integer instructions to their minimum legal type
/// size.
///

View File

@ -496,116 +496,6 @@ bool llvm::widenShuffleMaskElts(int Scale, ArrayRef<int> Mask,
return true;
}
void llvm::processShuffleMasks(
ArrayRef<int> Mask, unsigned NumOfSrcRegs, unsigned NumOfDestRegs,
unsigned NumOfUsedRegs, function_ref<void()> NoInputAction,
function_ref<void(ArrayRef<int>, unsigned)> SingleInputAction,
function_ref<void(ArrayRef<int>, unsigned, unsigned)> ManyInputsAction) {
SmallVector<SmallVector<SmallVector<int>>> Res(NumOfDestRegs);
// Try to perform better estimation of the permutation.
// 1. Split the source/destination vectors into real registers.
// 2. Do the mask analysis to identify which real registers are
// permuted.
int Sz = Mask.size();
unsigned SzDest = Sz / NumOfDestRegs;
unsigned SzSrc = Sz / NumOfSrcRegs;
for (unsigned I = 0; I < NumOfDestRegs; ++I) {
auto &RegMasks = Res[I];
RegMasks.assign(NumOfSrcRegs, {});
// Check that the values in dest registers are in the one src
// register.
for (unsigned K = 0; K < SzDest; ++K) {
int Idx = I * SzDest + K;
if (Idx == Sz)
break;
if (Mask[Idx] >= Sz || Mask[Idx] == UndefMaskElem)
continue;
int SrcRegIdx = Mask[Idx] / SzSrc;
// Add a cost of PermuteTwoSrc for each new source register permute,
// if we have more than one source registers.
if (RegMasks[SrcRegIdx].empty())
RegMasks[SrcRegIdx].assign(SzDest, UndefMaskElem);
RegMasks[SrcRegIdx][K] = Mask[Idx] % SzSrc;
}
}
// Process split mask.
for (unsigned I = 0; I < NumOfUsedRegs; ++I) {
auto &Dest = Res[I];
int NumSrcRegs =
count_if(Dest, [](ArrayRef<int> Mask) { return !Mask.empty(); });
switch (NumSrcRegs) {
case 0:
// No input vectors were used!
NoInputAction();
break;
case 1: {
// Find the only mask with at least single undef mask elem.
auto *It =
find_if(Dest, [](ArrayRef<int> Mask) { return !Mask.empty(); });
unsigned SrcReg = std::distance(Dest.begin(), It);
SingleInputAction(*It, SrcReg);
break;
}
default: {
// The first mask is a permutation of a single register. Since we have >2
// input registers to shuffle, we merge the masks for 2 first registers
// and generate a shuffle of 2 registers rather than the reordering of the
// first register and then shuffle with the second register. Next,
// generate the shuffles of the resulting register + the remaining
// registers from the list.
auto &&CombineMasks = [](MutableArrayRef<int> FirstMask,
ArrayRef<int> SecondMask) {
for (int Idx = 0, VF = FirstMask.size(); Idx < VF; ++Idx) {
if (SecondMask[Idx] != UndefMaskElem) {
assert(FirstMask[Idx] == UndefMaskElem &&
"Expected undefined mask element.");
FirstMask[Idx] = SecondMask[Idx] + VF;
}
}
};
auto &&NormalizeMask = [](MutableArrayRef<int> Mask) {
for (int Idx = 0, VF = Mask.size(); Idx < VF; ++Idx) {
if (Mask[Idx] != UndefMaskElem)
Mask[Idx] = Idx;
}
};
int SecondIdx;
do {
int FirstIdx = -1;
SecondIdx = -1;
MutableArrayRef<int> FirstMask, SecondMask;
for (unsigned I = 0; I < NumOfDestRegs; ++I) {
SmallVectorImpl<int> &RegMask = Dest[I];
if (RegMask.empty())
continue;
if (FirstIdx == SecondIdx) {
FirstIdx = I;
FirstMask = RegMask;
continue;
}
SecondIdx = I;
SecondMask = RegMask;
CombineMasks(FirstMask, SecondMask);
ManyInputsAction(FirstMask, FirstIdx, SecondIdx);
NormalizeMask(FirstMask);
RegMask.clear();
SecondMask = FirstMask;
SecondIdx = FirstIdx;
}
if (FirstIdx != SecondIdx && SecondIdx >= 0) {
CombineMasks(SecondMask, FirstMask);
ManyInputsAction(SecondMask, SecondIdx, FirstIdx);
Dest[FirstIdx].clear();
NormalizeMask(SecondMask);
}
} while (SecondIdx >= 0);
break;
}
}
}
}
MapVector<Instruction *, uint64_t>
llvm::computeMinimumValueSizes(ArrayRef<BasicBlock *> Blocks, DemandedBits &DB,
const TargetTransformInfo *TTI) {

View File

@ -20413,39 +20413,18 @@ SDValue DAGCombiner::reduceBuildVecToShuffle(SDNode *N) {
int Left = 2 * In;
int Right = 2 * In + 1;
SmallVector<int, 8> Mask(NumElems, -1);
SDValue L = Shuffles[Left];
ArrayRef<int> LMask;
bool IsLeftShuffle = L.getOpcode() == ISD::VECTOR_SHUFFLE &&
L.use_empty() && L.getOperand(1).isUndef() &&
L.getOperand(0).getValueType() == L.getValueType();
if (IsLeftShuffle) {
LMask = cast<ShuffleVectorSDNode>(L.getNode())->getMask();
L = L.getOperand(0);
}
SDValue R = Shuffles[Right];
ArrayRef<int> RMask;
bool IsRightShuffle = R.getOpcode() == ISD::VECTOR_SHUFFLE &&
R.use_empty() && R.getOperand(1).isUndef() &&
R.getOperand(0).getValueType() == R.getValueType();
if (IsRightShuffle) {
RMask = cast<ShuffleVectorSDNode>(R.getNode())->getMask();
R = R.getOperand(0);
}
for (unsigned I = 0; I != NumElems; ++I) {
if (VectorMask[I] == Left) {
Mask[I] = I;
if (IsLeftShuffle)
Mask[I] = LMask[I];
VectorMask[I] = In;
} else if (VectorMask[I] == Right) {
Mask[I] = I + NumElems;
if (IsRightShuffle)
Mask[I] = RMask[I] + NumElems;
VectorMask[I] = In;
for (unsigned i = 0; i != NumElems; ++i) {
if (VectorMask[i] == Left) {
Mask[i] = i;
VectorMask[i] = In;
} else if (VectorMask[i] == Right) {
Mask[i] = i + NumElems;
VectorMask[i] = In;
}
}
Shuffles[In] = DAG.getVectorShuffle(VT, DL, L, R, Mask);
Shuffles[In] =
DAG.getVectorShuffle(VT, DL, Shuffles[Left], Shuffles[Right], Mask);
}
}
return Shuffles[0];

View File

@ -20,9 +20,7 @@
//===----------------------------------------------------------------------===//
#include "LegalizeTypes.h"
#include "llvm/ADT/SmallBitVector.h"
#include "llvm/Analysis/MemoryLocation.h"
#include "llvm/Analysis/VectorUtils.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/TypeSize.h"
@ -2168,349 +2166,108 @@ void DAGTypeLegalizer::SplitVecRes_VECTOR_SHUFFLE(ShuffleVectorSDNode *N,
SDValue &Lo, SDValue &Hi) {
// The low and high parts of the original input give four input vectors.
SDValue Inputs[4];
SDLoc DL(N);
SDLoc dl(N);
GetSplitVector(N->getOperand(0), Inputs[0], Inputs[1]);
GetSplitVector(N->getOperand(1), Inputs[2], Inputs[3]);
EVT NewVT = Inputs[0].getValueType();
unsigned NewElts = NewVT.getVectorNumElements();
auto &&IsConstant = [](const SDValue &N) {
APInt SplatValue;
return N.getResNo() == 0 &&
(ISD::isConstantSplatVector(N.getNode(), SplatValue) ||
ISD::isBuildVectorOfConstantSDNodes(N.getNode()));
};
auto &&BuildVector = [NewElts, &DAG = DAG, NewVT, &DL](SDValue &Input1,
SDValue &Input2,
ArrayRef<int> Mask) {
assert(Input1->getOpcode() == ISD::BUILD_VECTOR &&
Input2->getOpcode() == ISD::BUILD_VECTOR &&
"Expected build vector node.");
SmallVector<SDValue> Ops(NewElts,
DAG.getUNDEF(Input1.getOperand(0).getValueType()));
for (unsigned I = 0; I < NewElts; ++I) {
if (Mask[I] == UndefMaskElem)
continue;
unsigned Idx = Mask[I];
if (Idx >= NewElts)
Ops[I] = Input2.getOperand(Idx - NewElts);
else
Ops[I] = Input1.getOperand(Idx);
}
return DAG.getBuildVector(NewVT, DL, Ops);
};
// If Lo or Hi uses elements from at most two of the four input vectors, then
// express it as a vector shuffle of those two inputs. Otherwise extract the
// input elements by hand and construct the Lo/Hi output using a BUILD_VECTOR.
SmallVector<int> OrigMask(N->getMask().begin(), N->getMask().end());
// Try to pack incoming shuffles/inputs.
auto &&TryPeekThroughShufflesInputs = [&Inputs, &NewVT, this, NewElts,
&DL](SmallVectorImpl<int> &Mask) {
// Check if all inputs are shuffles of the same operands or non-shuffles.
MapVector<std::pair<SDValue, SDValue>, SmallVector<unsigned>> ShufflesIdxs;
for (unsigned Idx = 0; Idx < array_lengthof(Inputs); ++Idx) {
SDValue Input = Inputs[Idx];
auto *Shuffle = dyn_cast<ShuffleVectorSDNode>(Input.getNode());
if (!Shuffle ||
Input.getOperand(0).getValueType() != Input.getValueType())
continue;
ShufflesIdxs[std::make_pair(Input.getOperand(0), Input.getOperand(1))]
.push_back(Idx);
ShufflesIdxs[std::make_pair(Input.getOperand(1), Input.getOperand(0))]
.push_back(Idx);
}
for (auto &P : ShufflesIdxs) {
if (P.second.size() < 2)
continue;
// Use shuffles operands instead of shuffles themselves.
// 1. Adjust mask.
for (int &Idx : Mask) {
if (Idx == UndefMaskElem)
continue;
unsigned SrcRegIdx = Idx / NewElts;
if (Inputs[SrcRegIdx].isUndef()) {
Idx = UndefMaskElem;
continue;
}
auto *Shuffle =
dyn_cast<ShuffleVectorSDNode>(Inputs[SrcRegIdx].getNode());
if (!Shuffle || !is_contained(P.second, SrcRegIdx))
continue;
int MaskElt = Shuffle->getMaskElt(Idx % NewElts);
if (MaskElt == UndefMaskElem) {
Idx = UndefMaskElem;
continue;
}
Idx = MaskElt % NewElts +
P.second[Shuffle->getOperand(MaskElt / NewElts) == P.first.first
? 0
: 1] *
NewElts;
}
// 2. Update inputs.
Inputs[P.second[0]] = P.first.first;
Inputs[P.second[1]] = P.first.second;
// Clear the pair data.
P.second.clear();
ShufflesIdxs[std::make_pair(P.first.second, P.first.first)].clear();
}
// Check if any concat_vectors can be simplified.
SmallBitVector UsedSubVector(2 * array_lengthof(Inputs));
for (int &Idx : Mask) {
if (Idx == UndefMaskElem)
continue;
unsigned SrcRegIdx = Idx / NewElts;
if (Inputs[SrcRegIdx].isUndef()) {
Idx = UndefMaskElem;
continue;
}
TargetLowering::LegalizeTypeAction TypeAction =
getTypeAction(Inputs[SrcRegIdx].getValueType());
if (Inputs[SrcRegIdx].getOpcode() == ISD::CONCAT_VECTORS &&
Inputs[SrcRegIdx].getNumOperands() == 2 &&
!Inputs[SrcRegIdx].getOperand(1).isUndef() &&
(TypeAction == TargetLowering::TypeLegal ||
TypeAction == TargetLowering::TypeWidenVector))
UsedSubVector.set(2 * SrcRegIdx + (Idx % NewElts) / (NewElts / 2));
}
if (UsedSubVector.count() > 1) {
SmallVector<SmallVector<std::pair<unsigned, int>, 2>> Pairs;
for (unsigned I = 0; I < array_lengthof(Inputs); ++I) {
if (UsedSubVector.test(2 * I) == UsedSubVector.test(2 * I + 1))
continue;
if (Pairs.empty() || Pairs.back().size() == 2)
Pairs.emplace_back();
if (UsedSubVector.test(2 * I)) {
Pairs.back().emplace_back(I, 0);
} else {
assert(UsedSubVector.test(2 * I + 1) &&
"Expected to be used one of the subvectors.");
Pairs.back().emplace_back(I, 1);
}
}
if (!Pairs.empty() && Pairs.front().size() > 1) {
// Adjust mask.
for (int &Idx : Mask) {
if (Idx == UndefMaskElem)
continue;
unsigned SrcRegIdx = Idx / NewElts;
auto *It = find_if(
Pairs, [SrcRegIdx](ArrayRef<std::pair<unsigned, int>> Idxs) {
return Idxs.front().first == SrcRegIdx ||
Idxs.back().first == SrcRegIdx;
});
if (It == Pairs.end())
continue;
Idx = It->front().first * NewElts + (Idx % NewElts) % (NewElts / 2) +
(SrcRegIdx == It->front().first ? 0 : (NewElts / 2));
}
// Adjust inputs.
for (ArrayRef<std::pair<unsigned, int>> Idxs : Pairs) {
Inputs[Idxs.front().first] = DAG.getNode(
ISD::CONCAT_VECTORS, DL,
Inputs[Idxs.front().first].getValueType(),
Inputs[Idxs.front().first].getOperand(Idxs.front().second),
Inputs[Idxs.back().first].getOperand(Idxs.back().second));
}
}
}
bool Changed;
do {
// Try to remove extra shuffles (except broadcasts) and shuffles with the
// reused operands.
Changed = false;
for (unsigned I = 0; I < array_lengthof(Inputs); ++I) {
auto *Shuffle = dyn_cast<ShuffleVectorSDNode>(Inputs[I].getNode());
if (!Shuffle)
continue;
if (Shuffle->getOperand(0).getValueType() != NewVT)
continue;
int Op = -1;
if (!Inputs[I].hasOneUse() && Shuffle->getOperand(1).isUndef() &&
!Shuffle->isSplat()) {
Op = 0;
} else if (!Inputs[I].hasOneUse() &&
!Shuffle->getOperand(1).isUndef()) {
// Find the only used operand, if possible.
for (int &Idx : Mask) {
if (Idx == UndefMaskElem)
continue;
unsigned SrcRegIdx = Idx / NewElts;
if (SrcRegIdx != I)
continue;
int MaskElt = Shuffle->getMaskElt(Idx % NewElts);
if (MaskElt == UndefMaskElem) {
Idx = UndefMaskElem;
continue;
}
int OpIdx = MaskElt / NewElts;
if (Op == -1) {
Op = OpIdx;
continue;
}
if (Op != OpIdx) {
Op = -1;
break;
}
}
}
if (Op < 0) {
// Try to check if one of the shuffle operands is used already.
for (int OpIdx = 0; OpIdx < 2; ++OpIdx) {
if (Shuffle->getOperand(OpIdx).isUndef())
continue;
auto *It = find(Inputs, Shuffle->getOperand(OpIdx));
if (It == std::end(Inputs))
continue;
int FoundOp = std::distance(std::begin(Inputs), It);
// Found that operand is used already.
// 1. Fix the mask for the reused operand.
for (int &Idx : Mask) {
if (Idx == UndefMaskElem)
continue;
unsigned SrcRegIdx = Idx / NewElts;
if (SrcRegIdx != I)
continue;
int MaskElt = Shuffle->getMaskElt(Idx % NewElts);
if (MaskElt == UndefMaskElem) {
Idx = UndefMaskElem;
continue;
}
int MaskIdx = MaskElt / NewElts;
if (OpIdx == MaskIdx)
Idx = MaskElt % NewElts + FoundOp * NewElts;
}
// 2. Set Op to the unused OpIdx.
Op = (OpIdx + 1) % 2;
break;
}
}
if (Op >= 0) {
Changed = true;
Inputs[I] = Shuffle->getOperand(Op);
// Adjust mask.
for (int &Idx : Mask) {
if (Idx == UndefMaskElem)
continue;
unsigned SrcRegIdx = Idx / NewElts;
if (SrcRegIdx != I)
continue;
int MaskElt = Shuffle->getMaskElt(Idx % NewElts);
int OpIdx = MaskElt / NewElts;
if (OpIdx != Op)
continue;
Idx = MaskElt % NewElts + SrcRegIdx * NewElts;
}
}
}
} while (Changed);
};
TryPeekThroughShufflesInputs(OrigMask);
// Proces unique inputs.
auto &&MakeUniqueInputs = [&Inputs, &IsConstant,
NewElts](SmallVectorImpl<int> &Mask) {
SetVector<SDValue> UniqueInputs;
SetVector<SDValue> UniqueConstantInputs;
for (unsigned I = 0; I < array_lengthof(Inputs); ++I) {
if (IsConstant(Inputs[I]))
UniqueConstantInputs.insert(Inputs[I]);
else if (!Inputs[I].isUndef())
UniqueInputs.insert(Inputs[I]);
}
// Adjust mask in case of reused inputs. Also, need to insert constant
// inputs at first, otherwise it affects the final outcome.
if (UniqueInputs.size() != array_lengthof(Inputs)) {
auto &&UniqueVec = UniqueInputs.takeVector();
auto &&UniqueConstantVec = UniqueConstantInputs.takeVector();
unsigned ConstNum = UniqueConstantVec.size();
for (int &Idx : Mask) {
if (Idx == UndefMaskElem)
continue;
unsigned SrcRegIdx = Idx / NewElts;
if (Inputs[SrcRegIdx].isUndef()) {
Idx = UndefMaskElem;
continue;
}
const auto It = find(UniqueConstantVec, Inputs[SrcRegIdx]);
if (It != UniqueConstantVec.end()) {
Idx = (Idx % NewElts) +
NewElts * std::distance(UniqueConstantVec.begin(), It);
assert(Idx >= 0 && "Expected defined mask idx.");
continue;
}
const auto RegIt = find(UniqueVec, Inputs[SrcRegIdx]);
assert(RegIt != UniqueVec.end() && "Cannot find non-const value.");
Idx = (Idx % NewElts) +
NewElts * (std::distance(UniqueVec.begin(), RegIt) + ConstNum);
assert(Idx >= 0 && "Expected defined mask idx.");
}
copy(UniqueConstantVec, std::begin(Inputs));
copy(UniqueVec, std::next(std::begin(Inputs), ConstNum));
}
};
MakeUniqueInputs(OrigMask);
SDValue OrigInputs[4];
copy(Inputs, std::begin(OrigInputs));
SmallVector<int, 16> Ops;
for (unsigned High = 0; High < 2; ++High) {
SDValue &Output = High ? Hi : Lo;
// Build a shuffle mask for the output, discovering on the fly which
// input vectors to use as shuffle operands.
// input vectors to use as shuffle operands (recorded in InputUsed).
// If building a suitable shuffle vector proves too hard, then bail
// out with useBuildVector set.
unsigned InputUsed[2] = { -1U, -1U }; // Not yet discovered.
unsigned FirstMaskIdx = High * NewElts;
SmallVector<int> Mask(NewElts * array_lengthof(Inputs), UndefMaskElem);
copy(makeArrayRef(OrigMask).slice(FirstMaskIdx, NewElts), Mask.begin());
assert(!Output && "Expected default initialized initial value.");
TryPeekThroughShufflesInputs(Mask);
MakeUniqueInputs(Mask);
SDValue TmpInputs[4];
copy(Inputs, std::begin(TmpInputs));
// Track changes in the output registers.
int UsedIdx = -1;
bool SecondIteration = false;
auto &&AccumulateResults = [&UsedIdx, &SecondIteration](unsigned Idx) {
if (UsedIdx < 0) {
UsedIdx = Idx;
return false;
bool useBuildVector = false;
for (unsigned MaskOffset = 0; MaskOffset < NewElts; ++MaskOffset) {
// The mask element. This indexes into the input.
int Idx = N->getMaskElt(FirstMaskIdx + MaskOffset);
// The input vector this mask element indexes into.
unsigned Input = (unsigned)Idx / NewElts;
if (Input >= array_lengthof(Inputs)) {
// The mask element does not index into any input vector.
Ops.push_back(-1);
continue;
}
if (UsedIdx >= 0 && static_cast<unsigned>(UsedIdx) == Idx)
SecondIteration = true;
return SecondIteration;
};
processShuffleMasks(
Mask, array_lengthof(Inputs), array_lengthof(Inputs),
/*NumOfUsedRegs=*/1,
[&Output, &DAG = DAG, NewVT]() { Output = DAG.getUNDEF(NewVT); },
[&Output, &DAG = DAG, NewVT, &DL, &Inputs,
&BuildVector](ArrayRef<int> Mask, unsigned Idx) {
if (Inputs[Idx]->getOpcode() == ISD::BUILD_VECTOR)
Output = BuildVector(Inputs[Idx], Inputs[Idx], Mask);
else
Output = DAG.getVectorShuffle(NewVT, DL, Inputs[Idx],
DAG.getUNDEF(NewVT), Mask);
Inputs[Idx] = Output;
},
[&AccumulateResults, &Output, &DAG = DAG, NewVT, &DL, &Inputs,
&TmpInputs,
&BuildVector](ArrayRef<int> Mask, unsigned Idx1, unsigned Idx2) {
if (AccumulateResults(Idx1)) {
if (Inputs[Idx1]->getOpcode() == ISD::BUILD_VECTOR &&
Inputs[Idx2]->getOpcode() == ISD::BUILD_VECTOR)
Output = BuildVector(Inputs[Idx1], Inputs[Idx2], Mask);
else
Output = DAG.getVectorShuffle(NewVT, DL, Inputs[Idx1],
Inputs[Idx2], Mask);
} else {
if (TmpInputs[Idx1]->getOpcode() == ISD::BUILD_VECTOR &&
TmpInputs[Idx2]->getOpcode() == ISD::BUILD_VECTOR)
Output = BuildVector(TmpInputs[Idx1], TmpInputs[Idx2], Mask);
else
Output = DAG.getVectorShuffle(NewVT, DL, TmpInputs[Idx1],
TmpInputs[Idx2], Mask);
}
Inputs[Idx1] = Output;
});
copy(OrigInputs, std::begin(Inputs));
// Turn the index into an offset from the start of the input vector.
Idx -= Input * NewElts;
// Find or create a shuffle vector operand to hold this input.
unsigned OpNo;
for (OpNo = 0; OpNo < array_lengthof(InputUsed); ++OpNo) {
if (InputUsed[OpNo] == Input) {
// This input vector is already an operand.
break;
} else if (InputUsed[OpNo] == -1U) {
// Create a new operand for this input vector.
InputUsed[OpNo] = Input;
break;
}
}
if (OpNo >= array_lengthof(InputUsed)) {
// More than two input vectors used! Give up on trying to create a
// shuffle vector. Insert all elements into a BUILD_VECTOR instead.
useBuildVector = true;
break;
}
// Add the mask index for the new shuffle vector.
Ops.push_back(Idx + OpNo * NewElts);
}
if (useBuildVector) {
EVT EltVT = NewVT.getVectorElementType();
SmallVector<SDValue, 16> SVOps;
// Extract the input elements by hand.
for (unsigned MaskOffset = 0; MaskOffset < NewElts; ++MaskOffset) {
// The mask element. This indexes into the input.
int Idx = N->getMaskElt(FirstMaskIdx + MaskOffset);
// The input vector this mask element indexes into.
unsigned Input = (unsigned)Idx / NewElts;
if (Input >= array_lengthof(Inputs)) {
// The mask element is "undef" or indexes off the end of the input.
SVOps.push_back(DAG.getUNDEF(EltVT));
continue;
}
// Turn the index into an offset from the start of the input vector.
Idx -= Input * NewElts;
// Extract the vector element by hand.
SVOps.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, EltVT,
Inputs[Input],
DAG.getVectorIdxConstant(Idx, dl)));
}
// Construct the Lo/Hi output using a BUILD_VECTOR.
Output = DAG.getBuildVector(NewVT, dl, SVOps);
} else if (InputUsed[0] == -1U) {
// No input vectors were used! The result is undefined.
Output = DAG.getUNDEF(NewVT);
} else {
SDValue Op0 = Inputs[InputUsed[0]];
// If only one input was used, use an undefined vector for the other.
SDValue Op1 = InputUsed[1] == -1U ?
DAG.getUNDEF(NewVT) : Inputs[InputUsed[1]];
// At least one input vector was used. Create a new shuffle vector.
Output = DAG.getVectorShuffle(NewVT, dl, Op0, Op1, Ops);
}
Ops.clear();
}
}

View File

@ -104,104 +104,104 @@ define i32 @large(i8* nocapture noundef readonly %p1, i32 noundef %st1, i8* noca
; CHECK-NEXT: rev64 v5.4s, v2.4s
; CHECK-NEXT: add v16.4s, v0.4s, v7.4s
; CHECK-NEXT: add v17.4s, v3.4s, v6.4s
; CHECK-NEXT: sub v3.4s, v3.4s, v6.4s
; CHECK-NEXT: uzp2 v6.4s, v17.4s, v16.4s
; CHECK-NEXT: add v19.4s, v2.4s, v5.4s
; CHECK-NEXT: add v20.4s, v1.4s, v4.4s
; CHECK-NEXT: sub v0.4s, v0.4s, v7.4s
; CHECK-NEXT: trn2 v18.4s, v17.4s, v16.4s
; CHECK-NEXT: sub v3.4s, v3.4s, v6.4s
; CHECK-NEXT: uzp2 v7.4s, v17.4s, v16.4s
; CHECK-NEXT: zip2 v18.4s, v0.4s, v3.4s
; CHECK-NEXT: zip1 v0.4s, v0.4s, v3.4s
; CHECK-NEXT: uzp2 v3.4s, v16.4s, v17.4s
; CHECK-NEXT: add v20.4s, v2.4s, v5.4s
; CHECK-NEXT: add v21.4s, v1.4s, v4.4s
; CHECK-NEXT: sub v2.4s, v2.4s, v5.4s
; CHECK-NEXT: sub v1.4s, v1.4s, v4.4s
; CHECK-NEXT: uzp2 v4.4s, v6.4s, v17.4s
; CHECK-NEXT: zip1 v5.4s, v20.4s, v19.4s
; CHECK-NEXT: zip2 v6.4s, v20.4s, v19.4s
; CHECK-NEXT: zip2 v7.4s, v0.4s, v3.4s
; CHECK-NEXT: mov v0.s[1], v3.s[0]
; CHECK-NEXT: ext v3.16b, v17.16b, v17.16b, #12
; CHECK-NEXT: zip1 v19.4s, v1.4s, v2.4s
; CHECK-NEXT: mov v4.d[1], v6.d[1]
; CHECK-NEXT: mov v18.d[1], v5.d[1]
; CHECK-NEXT: ext v3.16b, v16.16b, v3.16b, #12
; CHECK-NEXT: mov v17.s[0], v16.s[1]
; CHECK-NEXT: ext v16.16b, v1.16b, v19.16b, #8
; CHECK-NEXT: trn2 v6.4s, v16.4s, v17.4s
; CHECK-NEXT: trn2 v19.4s, v17.4s, v16.4s
; CHECK-NEXT: zip1 v4.4s, v21.4s, v20.4s
; CHECK-NEXT: uzp2 v5.4s, v7.4s, v17.4s
; CHECK-NEXT: zip2 v7.4s, v21.4s, v20.4s
; CHECK-NEXT: zip1 v17.4s, v1.4s, v2.4s
; CHECK-NEXT: uzp2 v3.4s, v3.4s, v16.4s
; CHECK-NEXT: mov v6.d[1], v4.d[1]
; CHECK-NEXT: mov v5.d[1], v7.d[1]
; CHECK-NEXT: ext v16.16b, v1.16b, v17.16b, #8
; CHECK-NEXT: mov v3.d[1], v7.d[1]
; CHECK-NEXT: mov v19.d[1], v4.d[1]
; CHECK-NEXT: mov v1.s[3], v2.s[2]
; CHECK-NEXT: add v2.4s, v4.4s, v18.4s
; CHECK-NEXT: mov v3.d[1], v6.d[1]
; CHECK-NEXT: mov v17.d[1], v5.d[1]
; CHECK-NEXT: mov v0.d[1], v16.d[1]
; CHECK-NEXT: sub v2.4s, v6.4s, v3.4s
; CHECK-NEXT: add v3.4s, v5.4s, v19.4s
; CHECK-NEXT: mov v18.d[1], v1.d[1]
; CHECK-NEXT: rev64 v5.4s, v3.4s
; CHECK-NEXT: rev64 v4.4s, v2.4s
; CHECK-NEXT: mov v7.d[1], v1.d[1]
; CHECK-NEXT: sub v3.4s, v17.4s, v3.4s
; CHECK-NEXT: add v5.4s, v2.4s, v4.4s
; CHECK-NEXT: sub v7.4s, v0.4s, v18.4s
; CHECK-NEXT: add v0.4s, v18.4s, v0.4s
; CHECK-NEXT: add v6.4s, v3.4s, v5.4s
; CHECK-NEXT: rev64 v16.4s, v7.4s
; CHECK-NEXT: rev64 v17.4s, v0.4s
; CHECK-NEXT: sub v3.4s, v3.4s, v5.4s
; CHECK-NEXT: rev64 v5.4s, v6.4s
; CHECK-NEXT: add v1.4s, v2.4s, v4.4s
; CHECK-NEXT: add v18.4s, v7.4s, v16.4s
; CHECK-NEXT: add v19.4s, v0.4s, v17.4s
; CHECK-NEXT: sub v7.4s, v7.4s, v16.4s
; CHECK-NEXT: sub v0.4s, v0.4s, v17.4s
; CHECK-NEXT: sub v2.4s, v2.4s, v4.4s
; CHECK-NEXT: sub v4.4s, v0.4s, v7.4s
; CHECK-NEXT: add v0.4s, v7.4s, v0.4s
; CHECK-NEXT: rev64 v1.4s, v3.4s
; CHECK-NEXT: rev64 v6.4s, v4.4s
; CHECK-NEXT: rev64 v7.4s, v0.4s
; CHECK-NEXT: rev64 v16.4s, v5.4s
; CHECK-NEXT: add v17.4s, v3.4s, v1.4s
; CHECK-NEXT: add v18.4s, v4.4s, v6.4s
; CHECK-NEXT: add v19.4s, v0.4s, v7.4s
; CHECK-NEXT: sub v4.4s, v4.4s, v6.4s
; CHECK-NEXT: sub v0.4s, v0.4s, v7.4s
; CHECK-NEXT: sub v1.4s, v3.4s, v1.4s
; CHECK-NEXT: trn2 v3.4s, v16.4s, v2.4s
; CHECK-NEXT: ext v6.16b, v17.16b, v1.16b, #4
; CHECK-NEXT: ext v7.16b, v19.16b, v0.16b, #4
; CHECK-NEXT: ext v16.16b, v18.16b, v4.16b, #4
; CHECK-NEXT: ext v5.16b, v5.16b, v5.16b, #4
; CHECK-NEXT: rev64 v6.4s, v6.4s
; CHECK-NEXT: rev64 v7.4s, v7.4s
; CHECK-NEXT: trn2 v4.4s, v5.4s, v3.4s
; CHECK-NEXT: ext v5.16b, v2.16b, v1.16b, #12
; CHECK-NEXT: ext v16.16b, v0.16b, v19.16b, #12
; CHECK-NEXT: ext v17.16b, v7.16b, v18.16b, #12
; CHECK-NEXT: ext v6.16b, v6.16b, v6.16b, #4
; CHECK-NEXT: rev64 v5.4s, v5.4s
; CHECK-NEXT: rev64 v16.4s, v16.4s
; CHECK-NEXT: mov v17.s[3], v1.s[3]
; CHECK-NEXT: rev64 v17.4s, v17.4s
; CHECK-NEXT: mov v1.s[3], v2.s[3]
; CHECK-NEXT: mov v19.s[3], v0.s[3]
; CHECK-NEXT: mov v18.s[3], v4.s[3]
; CHECK-NEXT: ext v7.16b, v0.16b, v7.16b, #12
; CHECK-NEXT: ext v16.16b, v4.16b, v16.16b, #12
; CHECK-NEXT: ext v6.16b, v1.16b, v6.16b, #12
; CHECK-NEXT: trn2 v2.4s, v2.4s, v5.4s
; CHECK-NEXT: sub v20.4s, v19.4s, v7.4s
; CHECK-NEXT: sub v21.4s, v18.4s, v16.4s
; CHECK-NEXT: sub v5.4s, v17.4s, v6.4s
; CHECK-NEXT: mov v18.s[0], v4.s[0]
; CHECK-NEXT: mov v18.s[3], v7.s[3]
; CHECK-NEXT: ext v16.16b, v16.16b, v0.16b, #4
; CHECK-NEXT: ext v17.16b, v17.16b, v7.16b, #4
; CHECK-NEXT: ext v5.16b, v5.16b, v2.16b, #4
; CHECK-NEXT: trn2 v3.4s, v3.4s, v6.4s
; CHECK-NEXT: sub v20.4s, v19.4s, v16.4s
; CHECK-NEXT: sub v21.4s, v18.4s, v17.4s
; CHECK-NEXT: sub v6.4s, v1.4s, v5.4s
; CHECK-NEXT: mov v18.s[0], v7.s[0]
; CHECK-NEXT: mov v19.s[0], v0.s[0]
; CHECK-NEXT: ext v0.16b, v2.16b, v2.16b, #4
; CHECK-NEXT: mov v17.s[0], v1.s[0]
; CHECK-NEXT: add v1.4s, v18.4s, v16.4s
; CHECK-NEXT: add v2.4s, v19.4s, v7.4s
; CHECK-NEXT: add v4.4s, v3.4s, v0.4s
; CHECK-NEXT: sub v0.4s, v3.4s, v0.4s
; CHECK-NEXT: add v3.4s, v17.4s, v6.4s
; CHECK-NEXT: mov v4.d[1], v0.d[1]
; CHECK-NEXT: mov v3.d[1], v5.d[1]
; CHECK-NEXT: mov v1.d[1], v21.d[1]
; CHECK-NEXT: mov v2.d[1], v20.d[1]
; CHECK-NEXT: ext v0.16b, v3.16b, v3.16b, #4
; CHECK-NEXT: mov v1.s[0], v2.s[0]
; CHECK-NEXT: add v2.4s, v18.4s, v17.4s
; CHECK-NEXT: add v3.4s, v19.4s, v16.4s
; CHECK-NEXT: add v7.4s, v4.4s, v0.4s
; CHECK-NEXT: sub v0.4s, v4.4s, v0.4s
; CHECK-NEXT: add v1.4s, v1.4s, v5.4s
; CHECK-NEXT: mov v7.d[1], v0.d[1]
; CHECK-NEXT: mov v1.d[1], v6.d[1]
; CHECK-NEXT: mov v2.d[1], v21.d[1]
; CHECK-NEXT: mov v3.d[1], v20.d[1]
; CHECK-NEXT: movi v0.8h, #1
; CHECK-NEXT: movi v17.2d, #0x00ffff0000ffff
; CHECK-NEXT: ushr v5.4s, v1.4s, #15
; CHECK-NEXT: ushr v6.4s, v4.4s, #15
; CHECK-NEXT: ushr v7.4s, v2.4s, #15
; CHECK-NEXT: ushr v16.4s, v3.4s, #15
; CHECK-NEXT: and v6.16b, v6.16b, v0.16b
; CHECK-NEXT: ushr v4.4s, v2.4s, #15
; CHECK-NEXT: ushr v5.4s, v7.4s, #15
; CHECK-NEXT: ushr v6.4s, v3.4s, #15
; CHECK-NEXT: ushr v16.4s, v1.4s, #15
; CHECK-NEXT: and v5.16b, v5.16b, v0.16b
; CHECK-NEXT: and v16.16b, v16.16b, v0.16b
; CHECK-NEXT: and v7.16b, v7.16b, v0.16b
; CHECK-NEXT: and v0.16b, v5.16b, v0.16b
; CHECK-NEXT: mul v5.4s, v6.4s, v17.4s
; CHECK-NEXT: mul v6.4s, v16.4s, v17.4s
; CHECK-NEXT: and v6.16b, v6.16b, v0.16b
; CHECK-NEXT: and v0.16b, v4.16b, v0.16b
; CHECK-NEXT: mul v4.4s, v5.4s, v17.4s
; CHECK-NEXT: mul v5.4s, v16.4s, v17.4s
; CHECK-NEXT: mul v0.4s, v0.4s, v17.4s
; CHECK-NEXT: mul v7.4s, v7.4s, v17.4s
; CHECK-NEXT: add v4.4s, v5.4s, v4.4s
; CHECK-NEXT: mul v6.4s, v6.4s, v17.4s
; CHECK-NEXT: add v7.4s, v4.4s, v7.4s
; CHECK-NEXT: add v1.4s, v5.4s, v1.4s
; CHECK-NEXT: add v2.4s, v0.4s, v2.4s
; CHECK-NEXT: add v3.4s, v6.4s, v3.4s
; CHECK-NEXT: add v1.4s, v0.4s, v1.4s
; CHECK-NEXT: add v2.4s, v7.4s, v2.4s
; CHECK-NEXT: eor v0.16b, v1.16b, v0.16b
; CHECK-NEXT: eor v1.16b, v2.16b, v7.16b
; CHECK-NEXT: eor v0.16b, v2.16b, v0.16b
; CHECK-NEXT: eor v2.16b, v3.16b, v6.16b
; CHECK-NEXT: eor v3.16b, v4.16b, v5.16b
; CHECK-NEXT: add v2.4s, v3.4s, v2.4s
; CHECK-NEXT: add v0.4s, v1.4s, v0.4s
; CHECK-NEXT: eor v1.16b, v1.16b, v5.16b
; CHECK-NEXT: eor v3.16b, v7.16b, v4.16b
; CHECK-NEXT: add v1.4s, v3.4s, v1.4s
; CHECK-NEXT: add v0.4s, v2.4s, v0.4s
; CHECK-NEXT: add v0.4s, v1.4s, v0.4s
; CHECK-NEXT: addv s0, v0.4s
; CHECK-NEXT: fmov w8, s0
; CHECK-NEXT: lsr w9, w8, #16

View File

@ -519,7 +519,8 @@ define <4 x i32> @shuffle3_v4i32(<4 x i32> %a, <4 x i32> %b, <4 x i32> %c) {
; CHECK: // %bb.0:
; CHECK-NEXT: zip1 v0.4s, v0.4s, v0.4s
; CHECK-NEXT: mov v0.s[1], v1.s[0]
; CHECK-NEXT: mov v0.s[2], v2.s[0]
; CHECK-NEXT: dup v1.4s, v2.s[0]
; CHECK-NEXT: mov v0.s[2], v1.s[2]
; CHECK-NEXT: ret
%x = shufflevector <4 x i32> %a, <4 x i32> %b, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
%y = shufflevector <4 x i32> %c, <4 x i32> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
@ -592,53 +593,73 @@ define <8 x i8> @insert4_v8i8(<8 x i8> %a, <16 x i8> %b, <8 x i8> %c, <16 x i8>
}
; CHECK: .LCPI15_0:
; CHECK: .byte 255 // 0xff
; CHECK: .byte 255 // 0xff
; CHECK: .byte 15 // 0xf
; CHECK: .byte 27 // 0x1b
; CHECK: .byte 255 // 0xff
; CHECK: .byte 255 // 0xff
; CHECK: .byte 24 // 0x18
; CHECK: .byte 12 // 0xc
; CHECK: .byte 255 // 0xff
; CHECK: .byte 255 // 0xff
; CHECK: .byte 15 // 0xf
; CHECK: .byte 27 // 0x1b
; CHECK: .byte 255 // 0xff
; CHECK: .byte 255 // 0xff
; CHECK: .byte 24 // 0x18
; CHECK: .byte 12 // 0xc
; CHECK: .byte 4 // 0x4
; CHECK: .byte 8 // 0x8
; CHECK: .byte 255 // 0xff
; CHECK: .byte 255 // 0xff
; CHECK: .byte 14 // 0xe
; CHECK: .byte 3 // 0x3
; CHECK: .byte 255 // 0xff
; CHECK: .byte 255 // 0xff
; CHECK: .byte 4 // 0x4
; CHECK: .byte 8 // 0x8
; CHECK: .byte 255 // 0xff
; CHECK: .byte 255 // 0xff
; CHECK: .byte 14 // 0xe
; CHECK: .byte 3 // 0x3
; CHECK: .byte 255 // 0xff
; CHECK: .byte 255 // 0xff
; CHECK: .LCPI15_1:
; CHECK: .byte 20 // 0x14
; CHECK: .byte 24 // 0x18
; CHECK: .byte 2 // 0x2
; CHECK: .byte 3 // 0x3
; CHECK: .byte 30 // 0x1e
; CHECK: .byte 19 // 0x13
; CHECK: .byte 6 // 0x6
; CHECK: .byte 7 // 0x7
; CHECK: .byte 20 // 0x14
; CHECK: .byte 24 // 0x18
; CHECK: .byte 10 // 0xa
; CHECK: .byte 11 // 0xb
; CHECK: .byte 30 // 0x1e
; CHECK: .byte 19 // 0x13
; CHECK: .byte 14 // 0xe
; CHECK: .byte 15 // 0xf
; CHECK: .byte 255 // 0xff
; CHECK: .byte 255 // 0xff
; CHECK: .byte 15 // 0xf
; CHECK: .byte 27 // 0x1b
; CHECK: .byte 255 // 0xff
; CHECK: .byte 255 // 0xff
; CHECK: .byte 24 // 0x18
; CHECK: .byte 12 // 0xc
; CHECK: .byte 255 // 0xff
; CHECK: .byte 255 // 0xff
; CHECK: .byte 15 // 0xf
; CHECK: .byte 27 // 0x1b
; CHECK: .byte 255 // 0xff
; CHECK: .byte 255 // 0xff
; CHECK: .byte 24 // 0x18
; CHECK: .byte 12 // 0xc
; CHECK: .LCPI15_2:
; CHECK: .byte 16 // 0x10
; CHECK: .byte 17 // 0x11
; CHECK: .byte 2 // 0x2
; CHECK: .byte 3 // 0x3
; CHECK: .byte 20 // 0x14
; CHECK: .byte 21 // 0x15
; CHECK: .byte 6 // 0x6
; CHECK: .byte 7 // 0x7
; CHECK: .byte 24 // 0x18
; CHECK: .byte 25 // 0x19
; CHECK: .byte 10 // 0xa
; CHECK: .byte 11 // 0xb
; CHECK: .byte 28 // 0x1c
; CHECK: .byte 29 // 0x1d
; CHECK: .byte 14 // 0xe
; CHECK: .byte 15 // 0xf
define <16 x i8> @insert4_v16i8(<8 x i8> %a, <16 x i8> %b, <8 x i8> %c, <16 x i8> %d) {
; CHECK-LABEL: insert4_v16i8:
; CHECK: // %bb.0:
; CHECK-NEXT: adrp x8, .LCPI15_0
; CHECK-NEXT: // kill: def $d0 killed $d0 def $q31_q0
; CHECK-NEXT: // kill: def $d0 killed $d0 def $q0
; CHECK-NEXT: adrp x9, .LCPI15_1
; CHECK-NEXT: // kill: def $d2 killed $d2 def $q2
; CHECK-NEXT: mov v4.16b, v3.16b
; CHECK-NEXT: mov v3.16b, v1.16b
; CHECK-NEXT: ldr q5, [x8, :lo12:.LCPI15_0]
; CHECK-NEXT: adrp x8, .LCPI15_1
; CHECK-NEXT: mov v0.d[1], v2.d[0]
; CHECK-NEXT: tbl v31.16b, { v3.16b, v4.16b }, v5.16b
; CHECK-NEXT: ldr q1, [x8, :lo12:.LCPI15_1]
; CHECK-NEXT: tbl v0.16b, { v31.16b, v0.16b }, v1.16b
; CHECK-NEXT: mov v4.16b, v3.16b
; CHECK-NEXT: ldr q2, [x8, :lo12:.LCPI15_0]
; CHECK-NEXT: adrp x8, .LCPI15_2
; CHECK-NEXT: ldr q5, [x9, :lo12:.LCPI15_1]
; CHECK-NEXT: mov v3.16b, v1.16b
; CHECK-NEXT: tbl v1.16b, { v0.16b }, v2.16b
; CHECK-NEXT: ldr q2, [x8, :lo12:.LCPI15_2]
; CHECK-NEXT: tbl v0.16b, { v3.16b, v4.16b }, v5.16b
; CHECK-NEXT: tbl v0.16b, { v0.16b, v1.16b }, v2.16b
; CHECK-NEXT: ret
%e1 = extractelement <8 x i8> %a, i32 4
%e2 = extractelement <8 x i8> %c, i32 0

View File

@ -4,19 +4,20 @@
define <16 x i32> @test_shuf1(<16 x i32> %x, <16 x i32> %y) {
; CHECK-LABEL: test_shuf1:
; CHECK: // %bb.0:
; CHECK-NEXT: dup v3.4s, v4.s[0]
; CHECK-NEXT: ext v5.16b, v6.16b, v1.16b, #4
; CHECK-NEXT: uzp1 v16.4s, v1.4s, v0.4s
; CHECK-NEXT: ext v3.16b, v6.16b, v4.16b, #12
; CHECK-NEXT: zip2 v6.4s, v7.4s, v6.4s
; CHECK-NEXT: uzp2 v17.4s, v2.4s, v4.4s
; CHECK-NEXT: mov v3.s[0], v6.s[3]
; CHECK-NEXT: trn2 v4.4s, v1.4s, v5.4s
; CHECK-NEXT: trn2 v1.4s, v16.4s, v1.4s
; CHECK-NEXT: trn2 v16.4s, v16.4s, v1.4s
; CHECK-NEXT: ext v1.16b, v1.16b, v1.16b, #4
; CHECK-NEXT: trn2 v4.4s, v7.4s, v6.4s
; CHECK-NEXT: rev64 v5.4s, v7.4s
; CHECK-NEXT: trn1 v2.4s, v17.4s, v2.4s
; CHECK-NEXT: mov v3.s[2], v7.s[3]
; CHECK-NEXT: mov v4.s[0], v7.s[1]
; CHECK-NEXT: ext v1.16b, v0.16b, v1.16b, #12
; CHECK-NEXT: mov v2.s[3], v7.s[0]
; CHECK-NEXT: mov v3.s[3], v7.s[2]
; CHECK-NEXT: dup v6.4s, v7.s[0]
; CHECK-NEXT: mov v4.d[1], v1.d[1]
; CHECK-NEXT: mov v3.d[1], v5.d[1]
; CHECK-NEXT: ext v1.16b, v0.16b, v16.16b, #12
; CHECK-NEXT: mov v2.s[3], v6.s[3]
; CHECK-NEXT: mov v0.16b, v4.16b
; CHECK-NEXT: ret
%s3 = shufflevector <16 x i32> %x, <16 x i32> %y, <16 x i32> <i32 29, i32 26, i32 7, i32 4, i32 3, i32 6, i32 5, i32 2, i32 9, i32 8, i32 17, i32 28, i32 27, i32 16, i32 31, i32 30>
@ -27,9 +28,9 @@ define <4 x i32> @test_shuf2(<16 x i32> %x, <16 x i32> %y) {
; CHECK-LABEL: test_shuf2:
; CHECK: // %bb.0:
; CHECK-NEXT: zip2 v0.4s, v7.4s, v6.4s
; CHECK-NEXT: ext v1.16b, v1.16b, v1.16b, #4
; CHECK-NEXT: trn2 v0.4s, v7.4s, v0.4s
; CHECK-NEXT: mov v0.s[2], v1.s[3]
; CHECK-NEXT: mov v0.s[3], v1.s[0]
; CHECK-NEXT: mov v0.d[1], v1.d[1]
; CHECK-NEXT: ret
%s3 = shufflevector <16 x i32> %x, <16 x i32> %y, <4 x i32> <i32 29, i32 26, i32 7, i32 4>
ret <4 x i32> %s3
@ -50,8 +51,9 @@ define <4 x i32> @test_shuf4(<16 x i32> %x, <16 x i32> %y) {
; CHECK-LABEL: test_shuf4:
; CHECK: // %bb.0:
; CHECK-NEXT: uzp2 v0.4s, v2.4s, v4.4s
; CHECK-NEXT: dup v1.4s, v7.s[0]
; CHECK-NEXT: trn1 v0.4s, v0.4s, v2.4s
; CHECK-NEXT: mov v0.s[3], v7.s[0]
; CHECK-NEXT: mov v0.s[3], v1.s[3]
; CHECK-NEXT: ret
%s3 = shufflevector <16 x i32> %x, <16 x i32> %y, <4 x i32> <i32 9, i32 8, i32 17, i32 28>
ret <4 x i32> %s3
@ -60,9 +62,9 @@ define <4 x i32> @test_shuf4(<16 x i32> %x, <16 x i32> %y) {
define <4 x i32> @test_shuf5(<16 x i32> %x, <16 x i32> %y) {
; CHECK-LABEL: test_shuf5:
; CHECK: // %bb.0:
; CHECK-NEXT: rev64 v1.4s, v7.4s
; CHECK-NEXT: ext v0.16b, v6.16b, v4.16b, #12
; CHECK-NEXT: mov v0.s[2], v7.s[3]
; CHECK-NEXT: mov v0.s[3], v7.s[2]
; CHECK-NEXT: mov v0.d[1], v1.d[1]
; CHECK-NEXT: ret
%s3 = shufflevector <16 x i32> %x, <16 x i32> %y, <4 x i32> <i32 27, i32 16, i32 31, i32 30>
ret <4 x i32> %s3

View File

@ -174,13 +174,15 @@ define arm_aapcs_vfpcc <8 x half> @shuffle3step_f16(<32 x half> %src) {
; CHECKHARD-NEXT: vmov r1, s0
; CHECKHARD-NEXT: vmovx.f16 s12, s1
; CHECKHARD-NEXT: vmov r0, s12
; CHECKHARD-NEXT: vrev32.16 d16, d3
; CHECKHARD-NEXT: vext.16 d17, d4, d5, #2
; CHECKHARD-NEXT: vext.16 d16, d4, d5, #2
; CHECKHARD-NEXT: vmovx.f16 s12, s4
; CHECKHARD-NEXT: vext.16 d16, d16, d3, #1
; CHECKHARD-NEXT: vext.16 d16, d17, d16, #2
; CHECKHARD-NEXT: vext.16 d16, d16, d17, #1
; CHECKHARD-NEXT: vext.16 d17, d16, d16, #1
; CHECKHARD-NEXT: vdup.16 q11, d3[1]
; CHECKHARD-NEXT: vrev32.16 d17, d16
; CHECKHARD-NEXT: vext.16 d16, d16, d17, #3
; CHECKHARD-NEXT: vrev32.16 d17, d3
; CHECKHARD-NEXT: vext.16 d17, d17, d3, #1
; CHECKHARD-NEXT: vext.16 d16, d16, d17, #2
; CHECKHARD-NEXT: vext.16 d17, d16, d16, #2
; CHECKHARD-NEXT: vmov.16 d16[0], r1
; CHECKHARD-NEXT: vmov.16 d16[1], r0
; CHECKHARD-NEXT: vmov r0, s3
@ -192,38 +194,37 @@ define arm_aapcs_vfpcc <8 x half> @shuffle3step_f16(<32 x half> %src) {
; CHECKHARD-NEXT: vmov.16 d16[3], r0
; CHECKHARD-NEXT: vmov r0, s2
; CHECKHARD-NEXT: vmov.16 d18[0], r1
; CHECKHARD-NEXT: vmov r1, s8
; CHECKHARD-NEXT: vmov.16 d18[1], r0
; CHECKHARD-NEXT: vmov r0, s12
; CHECKHARD-NEXT: vdup.16 q3, d3[1]
; CHECKHARD-NEXT: vmov r1, s12
; CHECKHARD-NEXT: vmovx.f16 s12, s9
; CHECKHARD-NEXT: vmov.16 d20[1], r1
; CHECKHARD-NEXT: vmov.16 d18[2], r0
; CHECKHARD-NEXT: vmov r0, s5
; CHECKHARD-NEXT: vmov.16 d18[3], r0
; CHECKHARD-NEXT: vmov r0, s8
; CHECKHARD-NEXT: vmov.16 d19[0], r1
; CHECKHARD-NEXT: vmov.16 d19[1], r0
; CHECKHARD-NEXT: vmov r0, s12
; CHECKHARD-NEXT: vmov.16 d19[2], r0
; CHECKHARD-NEXT: vmov.16 d20[2], r0
; CHECKHARD-NEXT: vmov r0, s11
; CHECKHARD-NEXT: vmov.16 d19[3], r0
; CHECKHARD-NEXT: vmov.16 d20[3], r0
; CHECKHARD-NEXT: vmov r0, s10
; CHECKHARD-NEXT: vext.16 d20, d20, d22, #1
; CHECKHARD-NEXT: vdup.16 q11, d3[2]
; CHECKHARD-NEXT: vext.16 d19, d20, d20, #3
; CHECKHARD-NEXT: vadd.f16 q8, q8, q9
; CHECKHARD-NEXT: vext.16 d18, d0, d1, #2
; CHECKHARD-NEXT: vmovx.f16 s0, s8
; CHECKHARD-NEXT: vmov r0, s0
; CHECKHARD-NEXT: vdup.16 q0, d3[2]
; CHECKHARD-NEXT: vext.16 d19, d18, d2, #3
; CHECKHARD-NEXT: vmov r1, s0
; CHECKHARD-NEXT: vext.16 d18, d2, d18, #1
; CHECKHARD-NEXT: vmovx.f16 s0, s11
; CHECKHARD-NEXT: vext.16 d19, d18, d2, #3
; CHECKHARD-NEXT: vext.16 d18, d2, d18, #1
; CHECKHARD-NEXT: vext.16 d18, d18, d19, #2
; CHECKHARD-NEXT: vext.16 d18, d18, d18, #1
; CHECKHARD-NEXT: vmov.16 d19[0], r1
; CHECKHARD-NEXT: vmov.16 d19[1], r0
; CHECKHARD-NEXT: vmov r0, s10
; CHECKHARD-NEXT: vmov.16 d19[2], r0
; CHECKHARD-NEXT: vmov.16 d20[1], r1
; CHECKHARD-NEXT: vmov.16 d20[2], r0
; CHECKHARD-NEXT: vmov r0, s0
; CHECKHARD-NEXT: vmov.16 d19[3], r0
; CHECKHARD-NEXT: vmov.16 d20[3], r0
; CHECKHARD-NEXT: vext.16 d20, d20, d22, #1
; CHECKHARD-NEXT: vext.16 d19, d20, d20, #3
; CHECKHARD-NEXT: vadd.f16 q0, q8, q9
; CHECKHARD-NEXT: bx lr
;
@ -232,13 +233,15 @@ define arm_aapcs_vfpcc <8 x half> @shuffle3step_f16(<32 x half> %src) {
; CHECKSOFT-NEXT: vmov r1, s0
; CHECKSOFT-NEXT: vmovx.f16 s12, s1
; CHECKSOFT-NEXT: vmov r0, s12
; CHECKSOFT-NEXT: vrev32.16 d16, d3
; CHECKSOFT-NEXT: vext.16 d17, d4, d5, #2
; CHECKSOFT-NEXT: vext.16 d16, d4, d5, #2
; CHECKSOFT-NEXT: vmovx.f16 s12, s4
; CHECKSOFT-NEXT: vext.16 d16, d16, d3, #1
; CHECKSOFT-NEXT: vext.16 d16, d17, d16, #2
; CHECKSOFT-NEXT: vext.16 d16, d16, d17, #1
; CHECKSOFT-NEXT: vext.16 d17, d16, d16, #1
; CHECKSOFT-NEXT: vdup.16 q11, d3[1]
; CHECKSOFT-NEXT: vrev32.16 d17, d16
; CHECKSOFT-NEXT: vext.16 d16, d16, d17, #3
; CHECKSOFT-NEXT: vrev32.16 d17, d3
; CHECKSOFT-NEXT: vext.16 d17, d17, d3, #1
; CHECKSOFT-NEXT: vext.16 d16, d16, d17, #2
; CHECKSOFT-NEXT: vext.16 d17, d16, d16, #2
; CHECKSOFT-NEXT: vmov.16 d16[0], r1
; CHECKSOFT-NEXT: vmov.16 d16[1], r0
; CHECKSOFT-NEXT: vmov r0, s3
@ -250,38 +253,37 @@ define arm_aapcs_vfpcc <8 x half> @shuffle3step_f16(<32 x half> %src) {
; CHECKSOFT-NEXT: vmov.16 d16[3], r0
; CHECKSOFT-NEXT: vmov r0, s2
; CHECKSOFT-NEXT: vmov.16 d18[0], r1
; CHECKSOFT-NEXT: vmov r1, s8
; CHECKSOFT-NEXT: vmov.16 d18[1], r0
; CHECKSOFT-NEXT: vmov r0, s12
; CHECKSOFT-NEXT: vdup.16 q3, d3[1]
; CHECKSOFT-NEXT: vmov r1, s12
; CHECKSOFT-NEXT: vmovx.f16 s12, s9
; CHECKSOFT-NEXT: vmov.16 d20[1], r1
; CHECKSOFT-NEXT: vmov.16 d18[2], r0
; CHECKSOFT-NEXT: vmov r0, s5
; CHECKSOFT-NEXT: vmov.16 d18[3], r0
; CHECKSOFT-NEXT: vmov r0, s8
; CHECKSOFT-NEXT: vmov.16 d19[0], r1
; CHECKSOFT-NEXT: vmov.16 d19[1], r0
; CHECKSOFT-NEXT: vmov r0, s12
; CHECKSOFT-NEXT: vmov.16 d19[2], r0
; CHECKSOFT-NEXT: vmov.16 d20[2], r0
; CHECKSOFT-NEXT: vmov r0, s11
; CHECKSOFT-NEXT: vmov.16 d19[3], r0
; CHECKSOFT-NEXT: vmov.16 d20[3], r0
; CHECKSOFT-NEXT: vmov r0, s10
; CHECKSOFT-NEXT: vext.16 d20, d20, d22, #1
; CHECKSOFT-NEXT: vdup.16 q11, d3[2]
; CHECKSOFT-NEXT: vext.16 d19, d20, d20, #3
; CHECKSOFT-NEXT: vadd.f16 q8, q8, q9
; CHECKSOFT-NEXT: vext.16 d18, d0, d1, #2
; CHECKSOFT-NEXT: vmovx.f16 s0, s8
; CHECKSOFT-NEXT: vmov r0, s0
; CHECKSOFT-NEXT: vdup.16 q0, d3[2]
; CHECKSOFT-NEXT: vext.16 d19, d18, d2, #3
; CHECKSOFT-NEXT: vmov r1, s0
; CHECKSOFT-NEXT: vext.16 d18, d2, d18, #1
; CHECKSOFT-NEXT: vmovx.f16 s0, s11
; CHECKSOFT-NEXT: vext.16 d19, d18, d2, #3
; CHECKSOFT-NEXT: vext.16 d18, d2, d18, #1
; CHECKSOFT-NEXT: vext.16 d18, d18, d19, #2
; CHECKSOFT-NEXT: vext.16 d18, d18, d18, #1
; CHECKSOFT-NEXT: vmov.16 d19[0], r1
; CHECKSOFT-NEXT: vmov.16 d19[1], r0
; CHECKSOFT-NEXT: vmov r0, s10
; CHECKSOFT-NEXT: vmov.16 d19[2], r0
; CHECKSOFT-NEXT: vmov.16 d20[1], r1
; CHECKSOFT-NEXT: vmov.16 d20[2], r0
; CHECKSOFT-NEXT: vmov r0, s0
; CHECKSOFT-NEXT: vmov.16 d19[3], r0
; CHECKSOFT-NEXT: vmov.16 d20[3], r0
; CHECKSOFT-NEXT: vext.16 d20, d20, d22, #1
; CHECKSOFT-NEXT: vext.16 d19, d20, d20, #3
; CHECKSOFT-NEXT: vadd.f16 q0, q8, q9
; CHECKSOFT-NEXT: bx lr
entry:

View File

@ -7,22 +7,23 @@ define <4 x float> @bar(float* %p, float* %q) {
; CHECK-NEXT: li 5, 16
; CHECK-NEXT: lxvw4x 2, 0, 3
; CHECK-NEXT: lxvw4x 3, 0, 4
; CHECK-NEXT: addis 6, 2, .LCPI0_0@toc@ha
; CHECK-NEXT: lxvw4x 0, 3, 5
; CHECK-NEXT: lxvw4x 1, 4, 5
; CHECK-NEXT: li 5, 32
; CHECK-NEXT: xvsubsp 35, 3, 2
; CHECK-NEXT: xvsubsp 34, 1, 0
; CHECK-NEXT: lxvw4x 0, 3, 5
; CHECK-NEXT: addi 3, 6, .LCPI0_0@toc@l
; CHECK-NEXT: lxvw4x 1, 4, 5
; CHECK-NEXT: addis 3, 2, .LCPI0_0@toc@ha
; CHECK-NEXT: addi 3, 3, .LCPI0_0@toc@l
; CHECK-NEXT: lxvw4x 36, 0, 3
; CHECK-NEXT: addis 3, 2, .LCPI0_1@toc@ha
; CHECK-NEXT: xvsubsp 0, 1, 0
; CHECK-NEXT: addi 3, 3, .LCPI0_1@toc@l
; CHECK-NEXT: xvsubsp 37, 1, 0
; CHECK-NEXT: vperm 2, 3, 2, 4
; CHECK-NEXT: lxvw4x 35, 0, 3
; CHECK-NEXT: vperm 2, 2, 5, 3
; CHECK-NEXT: lxvw4x 36, 0, 3
; CHECK-NEXT: xxmrghw 35, 0, 0
; CHECK-NEXT: vperm 2, 2, 3, 4
; CHECK-NEXT: blr
%1 = bitcast float* %p to <12 x float>*
%2 = bitcast float* %q to <12 x float>*

View File

@ -351,42 +351,54 @@ entry:
define arm_aapcs_vfpcc <8 x i16> @shuffle3step_i16(<32 x i16> %src) {
; CHECK-LABEL: shuffle3step_i16:
; CHECK: @ %bb.0: @ %entry
; CHECK-NEXT: .vsave {d11, d12, d13}
; CHECK-NEXT: vpush {d11, d12, d13}
; CHECK-NEXT: .vsave {d8, d9}
; CHECK-NEXT: vpush {d8, d9}
; CHECK-NEXT: vmovx.f16 s12, s0
; CHECK-NEXT: vmov.f32 s16, s1
; CHECK-NEXT: vins.f16 s12, s2
; CHECK-NEXT: vmovx.f16 s2, s2
; CHECK-NEXT: vins.f16 s16, s2
; CHECK-NEXT: vmovx.f16 s2, s5
; CHECK-NEXT: vmov.f32 s12, s0
; CHECK-NEXT: vmovx.f16 s14, s1
; CHECK-NEXT: vins.f16 s12, s14
; CHECK-NEXT: vmovx.f16 s14, s4
; CHECK-NEXT: vmov.f32 s13, s3
; CHECK-NEXT: vmovx.f16 s15, s7
; CHECK-NEXT: vins.f16 s13, s14
; CHECK-NEXT: vmov.f32 s14, s6
; CHECK-NEXT: vins.f16 s14, s15
; CHECK-NEXT: vmovx.f16 s15, s2
; CHECK-NEXT: vins.f16 s1, s15
; CHECK-NEXT: vmovx.f16 s15, s5
; CHECK-NEXT: vmov.f32 s17, s4
; CHECK-NEXT: vmovx.f16 s13, s3
; CHECK-NEXT: vins.f16 s17, s2
; CHECK-NEXT: vmovx.f16 s0, s0
; CHECK-NEXT: vins.f16 s17, s15
; CHECK-NEXT: vmov.f32 s16, s1
; CHECK-NEXT: vmovx.f16 s1, s10
; CHECK-NEXT: vmov.f32 s15, s9
; CHECK-NEXT: vins.f16 s15, s1
; CHECK-NEXT: vmovx.f16 s1, s11
; CHECK-NEXT: vins.f16 s10, s1
; CHECK-NEXT: vmovx.f16 s1, s3
; CHECK-NEXT: vmov.u16 r0, q1[5]
; CHECK-NEXT: vmov.f32 s18, s7
; CHECK-NEXT: vmovx.f16 s2, s8
; CHECK-NEXT: vmov.f32 s19, s10
; CHECK-NEXT: vins.f16 s18, s2
; CHECK-NEXT: vmovx.f16 s2, s11
; CHECK-NEXT: vins.f16 s19, s2
; CHECK-NEXT: vmovx.f16 s2, s1
; CHECK-NEXT: vmovx.f16 s7, s9
; CHECK-NEXT: vmov.f32 s23, s10
; CHECK-NEXT: vmov.f32 s22, s8
; CHECK-NEXT: vins.f16 s0, s2
; CHECK-NEXT: vmovx.f16 s2, s4
; CHECK-NEXT: vins.f16 s3, s2
; CHECK-NEXT: vmovx.f16 s2, s7
; CHECK-NEXT: vmovx.f16 s4, s10
; CHECK-NEXT: vmovx.f16 s14, s6
; CHECK-NEXT: vmovx.f16 s15, s9
; CHECK-NEXT: vins.f16 s6, s2
; CHECK-NEXT: vins.f16 s9, s4
; CHECK-NEXT: vmov.f32 s1, s3
; CHECK-NEXT: vins.f16 s14, s8
; CHECK-NEXT: vins.f16 s15, s11
; CHECK-NEXT: vins.f16 s13, s5
; CHECK-NEXT: vmov.f32 s2, s6
; CHECK-NEXT: vmov.f32 s3, s9
; CHECK-NEXT: vadd.i16 q0, q0, q3
; CHECK-NEXT: vins.f16 s1, s5
; CHECK-NEXT: vmov q6, q5
; CHECK-NEXT: vins.f16 s6, s8
; CHECK-NEXT: vins.f16 s7, s11
; CHECK-NEXT: vmovnb.i32 q6, q4
; CHECK-NEXT: vmov.f32 s19, s10
; CHECK-NEXT: vmov.16 q0[4], r0
; CHECK-NEXT: vmov q2, q1
; CHECK-NEXT: vmovnb.i32 q2, q0
; CHECK-NEXT: vmov.f32 s3, s7
; CHECK-NEXT: vmov.f32 s2, s10
; CHECK-NEXT: vmov.f32 s18, s26
; CHECK-NEXT: vadd.i16 q0, q3, q0
; CHECK-NEXT: vadd.i16 q0, q0, q4
; CHECK-NEXT: vpop {d8, d9}
; CHECK-NEXT: vpop {d11, d12, d13}
; CHECK-NEXT: bx lr
entry:
%s1 = shufflevector <32 x i16> %src, <32 x i16> undef, <8 x i32> <i32 0, i32 3, i32 6, i32 9, i32 12, i32 15, i32 18, i32 21>
@ -691,8 +703,8 @@ entry:
define arm_aapcs_vfpcc <16 x i8> @shuffle3step_i8(<64 x i8> %src) {
; CHECK-LABEL: shuffle3step_i8:
; CHECK: @ %bb.0: @ %entry
; CHECK-NEXT: .vsave {d8, d9, d10, d11}
; CHECK-NEXT: vpush {d8, d9, d10, d11}
; CHECK-NEXT: .vsave {d8, d9, d10, d11, d12, d13}
; CHECK-NEXT: vpush {d8, d9, d10, d11, d12, d13}
; CHECK-NEXT: vmov.u8 r0, q0[1]
; CHECK-NEXT: vmov.8 q3[0], r0
; CHECK-NEXT: vmov.u8 r0, q0[4]
@ -707,14 +719,14 @@ define arm_aapcs_vfpcc <16 x i8> @shuffle3step_i8(<64 x i8> %src) {
; CHECK-NEXT: vmov.8 q3[5], r0
; CHECK-NEXT: vmov.u8 r0, q1[3]
; CHECK-NEXT: vmov.8 q3[6], r0
; CHECK-NEXT: vmov.u8 r0, q1[6]
; CHECK-NEXT: vmov.8 q3[7], r0
; CHECK-NEXT: vmov.u8 r0, q1[9]
; CHECK-NEXT: vmov.8 q4[8], r0
; CHECK-NEXT: vmov.8 q3[8], r0
; CHECK-NEXT: vmov.u8 r0, q1[12]
; CHECK-NEXT: vmov.8 q4[9], r0
; CHECK-NEXT: vmov.8 q3[9], r0
; CHECK-NEXT: vmov.u8 r0, q1[15]
; CHECK-NEXT: vmov.8 q4[10], r0
; CHECK-NEXT: vmov.u8 r0, q2[2]
; CHECK-NEXT: vmov.8 q4[11], r0
; CHECK-NEXT: vmov.8 q3[10], r0
; CHECK-NEXT: vmov.u8 r0, q2[5]
; CHECK-NEXT: vmov.8 q4[12], r0
; CHECK-NEXT: vmov.u8 r0, q2[8]
@ -723,11 +735,11 @@ define arm_aapcs_vfpcc <16 x i8> @shuffle3step_i8(<64 x i8> %src) {
; CHECK-NEXT: vmov.8 q4[14], r0
; CHECK-NEXT: vmov.u8 r0, q2[14]
; CHECK-NEXT: vmov.8 q4[15], r0
; CHECK-NEXT: vmov.u8 r0, q1[6]
; CHECK-NEXT: vmov.8 q3[7], r0
; CHECK-NEXT: vmov.u8 r0, q0[0]
; CHECK-NEXT: vmov.f32 s14, s18
; CHECK-NEXT: vmov q5, q3
; CHECK-NEXT: vmov.u8 r0, q2[2]
; CHECK-NEXT: vmov.f32 s15, s19
; CHECK-NEXT: vmov.8 q5[11], r0
; CHECK-NEXT: vmov.u8 r0, q0[0]
; CHECK-NEXT: vmov.8 q4[0], r0
; CHECK-NEXT: vmov.u8 r0, q0[3]
; CHECK-NEXT: vmov.8 q4[1], r0
@ -741,27 +753,29 @@ define arm_aapcs_vfpcc <16 x i8> @shuffle3step_i8(<64 x i8> %src) {
; CHECK-NEXT: vmov.8 q4[5], r0
; CHECK-NEXT: vmov.u8 r0, q1[2]
; CHECK-NEXT: vmov.8 q4[6], r0
; CHECK-NEXT: vmov.u8 r0, q1[5]
; CHECK-NEXT: vmov.8 q4[7], r0
; CHECK-NEXT: vmov.u8 r0, q1[8]
; CHECK-NEXT: vmov.8 q5[8], r0
; CHECK-NEXT: vmov.8 q4[8], r0
; CHECK-NEXT: vmov.u8 r0, q1[11]
; CHECK-NEXT: vmov.8 q5[9], r0
; CHECK-NEXT: vmov.8 q4[9], r0
; CHECK-NEXT: vmov.u8 r0, q1[14]
; CHECK-NEXT: vmov.8 q5[10], r0
; CHECK-NEXT: vmov.u8 r0, q2[1]
; CHECK-NEXT: vmov.8 q5[11], r0
; CHECK-NEXT: vmov.8 q4[10], r0
; CHECK-NEXT: vmov.u8 r0, q2[4]
; CHECK-NEXT: vmov.f32 s14, s22
; CHECK-NEXT: vmov.8 q5[12], r0
; CHECK-NEXT: vmov.u8 r0, q2[7]
; CHECK-NEXT: vmov q6, q4
; CHECK-NEXT: vmov.8 q5[13], r0
; CHECK-NEXT: vmov.u8 r0, q2[10]
; CHECK-NEXT: vmov.8 q5[14], r0
; CHECK-NEXT: vmov.u8 r0, q2[13]
; CHECK-NEXT: vmov.8 q5[15], r0
; CHECK-NEXT: vmov.u8 r0, q1[5]
; CHECK-NEXT: vmov.8 q4[7], r0
; CHECK-NEXT: vmov.u8 r0, q0[2]
; CHECK-NEXT: vmov.f32 s18, s22
; CHECK-NEXT: vmov.u8 r0, q2[1]
; CHECK-NEXT: vmov.8 q6[11], r0
; CHECK-NEXT: vmov.f32 s19, s23
; CHECK-NEXT: vmov.f32 s18, s26
; CHECK-NEXT: vmov.u8 r0, q0[2]
; CHECK-NEXT: vadd.i8 q3, q4, q3
; CHECK-NEXT: vmov.8 q4[0], r0
; CHECK-NEXT: vmov.u8 r0, q0[5]
@ -776,14 +790,6 @@ define arm_aapcs_vfpcc <16 x i8> @shuffle3step_i8(<64 x i8> %src) {
; CHECK-NEXT: vmov.8 q4[5], r0
; CHECK-NEXT: vmov.u8 r0, q1[4]
; CHECK-NEXT: vmov.8 q4[6], r0
; CHECK-NEXT: vmov.u8 r0, q1[10]
; CHECK-NEXT: vmov.8 q0[8], r0
; CHECK-NEXT: vmov.u8 r0, q1[13]
; CHECK-NEXT: vmov.8 q0[9], r0
; CHECK-NEXT: vmov.u8 r0, q2[0]
; CHECK-NEXT: vmov.8 q0[10], r0
; CHECK-NEXT: vmov.u8 r0, q2[3]
; CHECK-NEXT: vmov.8 q0[11], r0
; CHECK-NEXT: vmov.u8 r0, q2[6]
; CHECK-NEXT: vmov.8 q0[12], r0
; CHECK-NEXT: vmov.u8 r0, q2[9]
@ -792,12 +798,20 @@ define arm_aapcs_vfpcc <16 x i8> @shuffle3step_i8(<64 x i8> %src) {
; CHECK-NEXT: vmov.8 q0[14], r0
; CHECK-NEXT: vmov.u8 r0, q2[15]
; CHECK-NEXT: vmov.8 q0[15], r0
; CHECK-NEXT: vmov.u8 r0, q1[10]
; CHECK-NEXT: vmov.8 q5[8], r0
; CHECK-NEXT: vmov.u8 r0, q1[13]
; CHECK-NEXT: vmov.8 q5[9], r0
; CHECK-NEXT: vmov.u8 r0, q2[0]
; CHECK-NEXT: vmov.8 q5[10], r0
; CHECK-NEXT: vmov.u8 r0, q2[3]
; CHECK-NEXT: vmov.8 q5[11], r0
; CHECK-NEXT: vmov.u8 r0, q1[7]
; CHECK-NEXT: vmov.8 q4[7], r0
; CHECK-NEXT: vmov.f32 s18, s2
; CHECK-NEXT: vmov.f32 s18, s22
; CHECK-NEXT: vmov.f32 s19, s3
; CHECK-NEXT: vadd.i8 q0, q3, q4
; CHECK-NEXT: vpop {d8, d9, d10, d11}
; CHECK-NEXT: vpop {d8, d9, d10, d11, d12, d13}
; CHECK-NEXT: bx lr
entry:
%s1 = shufflevector <64 x i8> %src, <64 x i8> undef, <16 x i32> <i32 0, i32 3, i32 6, i32 9, i32 12, i32 15, i32 18, i32 21, i32 24, i32 27, i32 30, i32 33, i32 36, i32 39, i32 42, i32 45>
@ -1319,42 +1333,43 @@ entry:
define arm_aapcs_vfpcc <8 x half> @shuffle3step_f16(<32 x half> %src) {
; CHECKFP-LABEL: shuffle3step_f16:
; CHECKFP: @ %bb.0: @ %entry
; CHECKFP-NEXT: .vsave {d8, d9}
; CHECKFP-NEXT: vpush {d8, d9}
; CHECKFP-NEXT: vmov.f32 s13, s4
; CHECKFP-NEXT: vmovx.f16 s4, s4
; CHECKFP-NEXT: vmovx.f16 s17, s3
; CHECKFP-NEXT: vins.f16 s3, s4
; CHECKFP-NEXT: vmovx.f16 s4, s7
; CHECKFP-NEXT: vmovx.f16 s18, s6
; CHECKFP-NEXT: vmovx.f16 s16, s0
; CHECKFP-NEXT: vins.f16 s6, s4
; CHECKFP-NEXT: vmovx.f16 s14, s2
; CHECKFP-NEXT: .vsave {d8, d9, d10}
; CHECKFP-NEXT: vpush {d8, d9, d10}
; CHECKFP-NEXT: vmov.f32 s12, s1
; CHECKFP-NEXT: vmovx.f16 s4, s10
; CHECKFP-NEXT: vmovx.f16 s19, s9
; CHECKFP-NEXT: vmovx.f16 s14, s2
; CHECKFP-NEXT: vins.f16 s12, s14
; CHECKFP-NEXT: vmov.f32 s13, s4
; CHECKFP-NEXT: vmovx.f16 s14, s5
; CHECKFP-NEXT: vins.f16 s16, s2
; CHECKFP-NEXT: vmovx.f16 s2, s11
; CHECKFP-NEXT: vmovx.f16 s15, s8
; CHECKFP-NEXT: vins.f16 s18, s8
; CHECKFP-NEXT: vmovx.f16 s8, s1
; CHECKFP-NEXT: vins.f16 s9, s4
; CHECKFP-NEXT: vins.f16 s13, s14
; CHECKFP-NEXT: vmov.f32 s14, s7
; CHECKFP-NEXT: vins.f16 s10, s2
; CHECKFP-NEXT: vmov.f32 s1, s3
; CHECKFP-NEXT: vins.f16 s19, s11
; CHECKFP-NEXT: vins.f16 s17, s5
; CHECKFP-NEXT: vins.f16 s0, s8
; CHECKFP-NEXT: vmov.f32 s2, s6
; CHECKFP-NEXT: vmov.f32 s3, s9
; CHECKFP-NEXT: vins.f16 s14, s15
; CHECKFP-NEXT: vmov.f32 s15, s10
; CHECKFP-NEXT: vins.f16 s13, s14
; CHECKFP-NEXT: vmovx.f16 s14, s11
; CHECKFP-NEXT: vins.f16 s15, s14
; CHECKFP-NEXT: vmov.f32 s14, s7
; CHECKFP-NEXT: vmovx.f16 s16, s8
; CHECKFP-NEXT: vmovx.f16 s4, s4
; CHECKFP-NEXT: vmovx.f16 s7, s7
; CHECKFP-NEXT: vmov.f32 s20, s6
; CHECKFP-NEXT: vmovx.f16 s10, s10
; CHECKFP-NEXT: vmovx.f16 s17, s3
; CHECKFP-NEXT: vmovx.f16 s19, s9
; CHECKFP-NEXT: vmovx.f16 s18, s6
; CHECKFP-NEXT: vins.f16 s14, s16
; CHECKFP-NEXT: vmovx.f16 s16, s0
; CHECKFP-NEXT: vmovx.f16 s1, s1
; CHECKFP-NEXT: vins.f16 s20, s7
; CHECKFP-NEXT: vins.f16 s3, s4
; CHECKFP-NEXT: vins.f16 s9, s10
; CHECKFP-NEXT: vins.f16 s0, s1
; CHECKFP-NEXT: vins.f16 s16, s2
; CHECKFP-NEXT: vmov.f32 s1, s3
; CHECKFP-NEXT: vins.f16 s17, s5
; CHECKFP-NEXT: vins.f16 s19, s11
; CHECKFP-NEXT: vins.f16 s18, s8
; CHECKFP-NEXT: vmov.f32 s2, s20
; CHECKFP-NEXT: vmov.f32 s3, s9
; CHECKFP-NEXT: vadd.f16 q0, q0, q4
; CHECKFP-NEXT: vadd.f16 q0, q0, q3
; CHECKFP-NEXT: vpop {d8, d9}
; CHECKFP-NEXT: vpop {d8, d9, d10}
; CHECKFP-NEXT: bx lr
entry:
%s1 = shufflevector <32 x half> %src, <32 x half> undef, <8 x i32> <i32 0, i32 3, i32 6, i32 9, i32 12, i32 15, i32 18, i32 21>

View File

@ -287,46 +287,55 @@ entry:
define void @vld3_v8i16(<24 x i16> *%src, <8 x i16> *%dst) {
; CHECK-LABEL: vld3_v8i16:
; CHECK: @ %bb.0: @ %entry
; CHECK-NEXT: .vsave {d8, d9}
; CHECK-NEXT: vpush {d8, d9}
; CHECK-NEXT: vldrw.u32 q0, [r0]
; CHECK-NEXT: .vsave {d8, d9, d10, d11}
; CHECK-NEXT: vpush {d8, d9, d10, d11}
; CHECK-NEXT: vldrw.u32 q1, [r0]
; CHECK-NEXT: vldrw.u32 q2, [r0, #16]
; CHECK-NEXT: vldrw.u32 q3, [r0, #32]
; CHECK-NEXT: vmovx.f16 s6, s2
; CHECK-NEXT: vmov.f32 s4, s1
; CHECK-NEXT: vins.f16 s4, s6
; CHECK-NEXT: vmovx.f16 s6, s9
; CHECK-NEXT: vmov.f32 s5, s8
; CHECK-NEXT: vmovx.f16 s7, s12
; CHECK-NEXT: vins.f16 s5, s6
; CHECK-NEXT: vmov.f32 s6, s11
; CHECK-NEXT: vins.f16 s6, s7
; CHECK-NEXT: vmovx.f16 s16, s15
; CHECK-NEXT: vmov.f32 s7, s14
; CHECK-NEXT: vmovx.f16 s17, s3
; CHECK-NEXT: vins.f16 s7, s16
; CHECK-NEXT: vmovx.f16 s16, s0
; CHECK-NEXT: vins.f16 s16, s2
; CHECK-NEXT: vmovx.f16 s2, s1
; CHECK-NEXT: vmov.f32 s0, s5
; CHECK-NEXT: vmovx.f16 s2, s6
; CHECK-NEXT: vins.f16 s0, s2
; CHECK-NEXT: vmovx.f16 s2, s8
; CHECK-NEXT: vins.f16 s3, s2
; CHECK-NEXT: vmovx.f16 s2, s11
; CHECK-NEXT: vmovx.f16 s8, s14
; CHECK-NEXT: vmovx.f16 s18, s10
; CHECK-NEXT: vmovx.f16 s19, s13
; CHECK-NEXT: vins.f16 s10, s2
; CHECK-NEXT: vins.f16 s13, s8
; CHECK-NEXT: vmov.f32 s1, s3
; CHECK-NEXT: vins.f16 s18, s12
; CHECK-NEXT: vins.f16 s19, s15
; CHECK-NEXT: vins.f16 s17, s9
; CHECK-NEXT: vmov.f32 s2, s10
; CHECK-NEXT: vmov.f32 s3, s13
; CHECK-NEXT: vadd.i16 q0, q0, q4
; CHECK-NEXT: vadd.i16 q0, q0, q1
; CHECK-NEXT: vmovx.f16 s2, s9
; CHECK-NEXT: vmov.f32 s1, s8
; CHECK-NEXT: vmovx.f16 s5, s5
; CHECK-NEXT: vins.f16 s1, s2
; CHECK-NEXT: vmov.f32 s19, s14
; CHECK-NEXT: vmovx.f16 s2, s15
; CHECK-NEXT: vmov.f32 s18, s12
; CHECK-NEXT: vins.f16 s19, s2
; CHECK-NEXT: vmov.f32 s2, s11
; CHECK-NEXT: vmov q5, q4
; CHECK-NEXT: vmov.f32 s16, s4
; CHECK-NEXT: vins.f16 s16, s5
; CHECK-NEXT: vmovx.f16 s5, s8
; CHECK-NEXT: vmov.f32 s17, s7
; CHECK-NEXT: vmovx.f16 s4, s4
; CHECK-NEXT: vins.f16 s17, s5
; CHECK-NEXT: vmovx.f16 s5, s11
; CHECK-NEXT: vmov.f32 s18, s10
; CHECK-NEXT: vmov.u16 r0, q2[5]
; CHECK-NEXT: vmovx.f16 s11, s13
; CHECK-NEXT: vins.f16 s18, s5
; CHECK-NEXT: vmovx.f16 s5, s7
; CHECK-NEXT: vmovnb.i32 q5, q0
; CHECK-NEXT: vmov.f32 s3, s19
; CHECK-NEXT: vmovx.f16 s14, s14
; CHECK-NEXT: vmov.f32 s19, s13
; CHECK-NEXT: vins.f16 s4, s6
; CHECK-NEXT: vins.f16 s5, s9
; CHECK-NEXT: vins.f16 s10, s12
; CHECK-NEXT: vins.f16 s11, s15
; CHECK-NEXT: vins.f16 s19, s14
; CHECK-NEXT: vmov.16 q1[4], r0
; CHECK-NEXT: vmov q3, q2
; CHECK-NEXT: vmovnb.i32 q3, q1
; CHECK-NEXT: vmov.f32 s7, s11
; CHECK-NEXT: vmov.f32 s6, s14
; CHECK-NEXT: vmov.f32 s2, s22
; CHECK-NEXT: vadd.i16 q1, q4, q1
; CHECK-NEXT: vadd.i16 q0, q1, q0
; CHECK-NEXT: vstrw.32 q0, [r1]
; CHECK-NEXT: vpop {d8, d9}
; CHECK-NEXT: vpop {d8, d9, d10, d11}
; CHECK-NEXT: bx lr
entry:
%l1 = load <24 x i16>, <24 x i16>* %src, align 4
@ -342,83 +351,101 @@ entry:
define void @vld3_v16i16(<48 x i16> *%src, <16 x i16> *%dst) {
; CHECK-LABEL: vld3_v16i16:
; CHECK: @ %bb.0: @ %entry
; CHECK-NEXT: .vsave {d8, d9, d10, d11}
; CHECK-NEXT: vpush {d8, d9, d10, d11}
; CHECK-NEXT: vldrw.u32 q0, [r0, #48]
; CHECK-NEXT: .vsave {d8, d9, d10, d11, d12, d13, d14, d15}
; CHECK-NEXT: vpush {d8, d9, d10, d11, d12, d13, d14, d15}
; CHECK-NEXT: vldrw.u32 q1, [r0, #48]
; CHECK-NEXT: vldrw.u32 q2, [r0, #64]
; CHECK-NEXT: vldrw.u32 q3, [r0, #80]
; CHECK-NEXT: vmovx.f16 s6, s2
; CHECK-NEXT: vmov.f32 s4, s1
; CHECK-NEXT: vins.f16 s4, s6
; CHECK-NEXT: vmovx.f16 s6, s9
; CHECK-NEXT: vmov.f32 s5, s8
; CHECK-NEXT: vmovx.f16 s7, s12
; CHECK-NEXT: vins.f16 s5, s6
; CHECK-NEXT: vmov.f32 s6, s11
; CHECK-NEXT: vins.f16 s6, s7
; CHECK-NEXT: vmovx.f16 s16, s15
; CHECK-NEXT: vmov.f32 s7, s14
; CHECK-NEXT: vmovx.f16 s17, s3
; CHECK-NEXT: vins.f16 s7, s16
; CHECK-NEXT: vmovx.f16 s16, s0
; CHECK-NEXT: vins.f16 s16, s2
; CHECK-NEXT: vmovx.f16 s2, s1
; CHECK-NEXT: vldrw.u32 q4, [r0, #80]
; CHECK-NEXT: vmov.f32 s0, s4
; CHECK-NEXT: vmovx.f16 s2, s5
; CHECK-NEXT: vins.f16 s0, s2
; CHECK-NEXT: vmovx.f16 s2, s8
; CHECK-NEXT: vins.f16 s3, s2
; CHECK-NEXT: vmovx.f16 s2, s11
; CHECK-NEXT: vmovx.f16 s8, s14
; CHECK-NEXT: vmovx.f16 s18, s10
; CHECK-NEXT: vmovx.f16 s19, s13
; CHECK-NEXT: vins.f16 s10, s2
; CHECK-NEXT: vins.f16 s13, s8
; CHECK-NEXT: vmov.f32 s1, s3
; CHECK-NEXT: vins.f16 s18, s12
; CHECK-NEXT: vins.f16 s19, s15
; CHECK-NEXT: vmov.f32 s3, s13
; CHECK-NEXT: vldrw.u32 q3, [r0, #16]
; CHECK-NEXT: vins.f16 s17, s9
; CHECK-NEXT: vmov.f32 s1, s7
; CHECK-NEXT: vmovx.f16 s12, s11
; CHECK-NEXT: vins.f16 s1, s2
; CHECK-NEXT: vmov.f32 s2, s10
; CHECK-NEXT: vadd.i16 q0, q0, q4
; CHECK-NEXT: vldrw.u32 q2, [r0, #32]
; CHECK-NEXT: vmovx.f16 s14, s18
; CHECK-NEXT: vmov.f32 s3, s17
; CHECK-NEXT: vins.f16 s2, s12
; CHECK-NEXT: vmovx.f16 s12, s6
; CHECK-NEXT: vins.f16 s3, s14
; CHECK-NEXT: vmovx.f16 s14, s19
; CHECK-NEXT: vins.f16 s18, s14
; CHECK-NEXT: vins.f16 s5, s12
; CHECK-NEXT: vmovx.f16 s12, s9
; CHECK-NEXT: vmov.f32 s13, s8
; CHECK-NEXT: vmovx.f16 s4, s4
; CHECK-NEXT: vins.f16 s13, s12
; CHECK-NEXT: vmov.f32 s12, s5
; CHECK-NEXT: vmovx.f16 s5, s7
; CHECK-NEXT: vmov.u16 r2, q2[5]
; CHECK-NEXT: vmov.f32 s14, s11
; CHECK-NEXT: vmovx.f16 s11, s17
; CHECK-NEXT: vmov.f32 s23, s18
; CHECK-NEXT: vmov.f32 s22, s16
; CHECK-NEXT: vins.f16 s4, s6
; CHECK-NEXT: vins.f16 s5, s9
; CHECK-NEXT: vmov q6, q5
; CHECK-NEXT: vins.f16 s10, s16
; CHECK-NEXT: vins.f16 s11, s19
; CHECK-NEXT: vmovnb.i32 q6, q3
; CHECK-NEXT: vmov.f32 s15, s18
; CHECK-NEXT: vmov.16 q1[4], r2
; CHECK-NEXT: vmov q4, q2
; CHECK-NEXT: vmovnb.i32 q4, q1
; CHECK-NEXT: vmov.f32 s7, s11
; CHECK-NEXT: vmov.f32 s6, s18
; CHECK-NEXT: vldrw.u32 q2, [r0]
; CHECK-NEXT: vadd.i16 q0, q0, q1
; CHECK-NEXT: vmovx.f16 s6, s14
; CHECK-NEXT: vldrw.u32 q4, [r0]
; CHECK-NEXT: vins.f16 s6, s8
; CHECK-NEXT: vmov.f32 s22, s15
; CHECK-NEXT: vldrw.u32 q4, [r0, #32]
; CHECK-NEXT: vmov.f32 s14, s26
; CHECK-NEXT: vmovx.f16 s6, s10
; CHECK-NEXT: vadd.i16 q0, q0, q3
; CHECK-NEXT: vldrw.u32 q3, [r0, #16]
; CHECK-NEXT: vmov.f32 s4, s9
; CHECK-NEXT: vmovx.f16 s7, s19
; CHECK-NEXT: vmov.f32 s27, s18
; CHECK-NEXT: vins.f16 s4, s6
; CHECK-NEXT: vmovx.f16 s6, s13
; CHECK-NEXT: vmov.f32 s5, s12
; CHECK-NEXT: vins.f16 s27, s7
; CHECK-NEXT: vmov.f32 s26, s16
; CHECK-NEXT: vins.f16 s5, s6
; CHECK-NEXT: vmov.f32 s6, s15
; CHECK-NEXT: vmov q7, q6
; CHECK-NEXT: vmov.f32 s20, s8
; CHECK-NEXT: vmovnb.i32 q7, q1
; CHECK-NEXT: vmovx.f16 s6, s9
; CHECK-NEXT: vins.f16 s20, s6
; CHECK-NEXT: vmovx.f16 s6, s12
; CHECK-NEXT: vmov.f32 s21, s11
; CHECK-NEXT: vmovx.f16 s8, s8
; CHECK-NEXT: vins.f16 s22, s8
; CHECK-NEXT: vmovx.f16 s8, s11
; CHECK-NEXT: vmov.f32 s23, s10
; CHECK-NEXT: vmovx.f16 s4, s16
; CHECK-NEXT: vins.f16 s23, s8
; CHECK-NEXT: vmovx.f16 s8, s17
; CHECK-NEXT: vins.f16 s16, s8
; CHECK-NEXT: vmovx.f16 s8, s12
; CHECK-NEXT: vmovx.f16 s5, s19
; CHECK-NEXT: vins.f16 s19, s8
; CHECK-NEXT: vmovx.f16 s8, s15
; CHECK-NEXT: vmovx.f16 s7, s9
; CHECK-NEXT: vins.f16 s14, s8
; CHECK-NEXT: vmovx.f16 s8, s10
; CHECK-NEXT: vins.f16 s4, s18
; CHECK-NEXT: vmov.f32 s20, s17
; CHECK-NEXT: vmovx.f16 s18, s18
; CHECK-NEXT: vins.f16 s9, s8
; CHECK-NEXT: vins.f16 s5, s13
; CHECK-NEXT: vins.f16 s20, s18
; CHECK-NEXT: vmov.f32 s17, s19
; CHECK-NEXT: vins.f16 s7, s11
; CHECK-NEXT: vmovx.f16 s13, s13
; CHECK-NEXT: vmov.f32 s21, s12
; CHECK-NEXT: vmov.f32 s18, s14
; CHECK-NEXT: vins.f16 s21, s13
; CHECK-NEXT: vmov.f32 s19, s9
; CHECK-NEXT: vmovx.f16 s9, s11
; CHECK-NEXT: vins.f16 s21, s6
; CHECK-NEXT: vmovx.f16 s6, s15
; CHECK-NEXT: vmov.u16 r0, q3[5]
; CHECK-NEXT: vmovx.f16 s15, s17
; CHECK-NEXT: vmov.f32 s22, s14
; CHECK-NEXT: vins.f16 s8, s10
; CHECK-NEXT: vins.f16 s9, s13
; CHECK-NEXT: vins.f16 s14, s16
; CHECK-NEXT: vins.f16 s15, s19
; CHECK-NEXT: vins.f16 s22, s6
; CHECK-NEXT: vmovx.f16 s6, s18
; CHECK-NEXT: vmov.f32 s23, s17
; CHECK-NEXT: vmov.16 q2[4], r0
; CHECK-NEXT: vmov q4, q3
; CHECK-NEXT: vins.f16 s23, s6
; CHECK-NEXT: vmovnb.i32 q4, q2
; CHECK-NEXT: vmov.f32 s11, s15
; CHECK-NEXT: vmov.f32 s10, s18
; CHECK-NEXT: vstrw.32 q0, [r1, #16]
; CHECK-NEXT: vadd.i16 q1, q4, q1
; CHECK-NEXT: vadd.i16 q1, q1, q5
; CHECK-NEXT: vmov.f32 s6, s30
; CHECK-NEXT: vadd.i16 q2, q5, q2
; CHECK-NEXT: vmov.f32 s7, s27
; CHECK-NEXT: vadd.i16 q1, q2, q1
; CHECK-NEXT: vstrw.32 q1, [r1]
; CHECK-NEXT: vpop {d8, d9, d10, d11}
; CHECK-NEXT: vpop {d8, d9, d10, d11, d12, d13, d14, d15}
; CHECK-NEXT: bx lr
entry:
%l1 = load <48 x i16>, <48 x i16>* %src, align 4
@ -584,8 +611,8 @@ entry:
define void @vld3_v16i8(<48 x i8> *%src, <16 x i8> *%dst) {
; CHECK-LABEL: vld3_v16i8:
; CHECK: @ %bb.0: @ %entry
; CHECK-NEXT: .vsave {d8, d9, d10, d11}
; CHECK-NEXT: vpush {d8, d9, d10, d11}
; CHECK-NEXT: .vsave {d8, d9, d10, d11, d12, d13}
; CHECK-NEXT: vpush {d8, d9, d10, d11, d12, d13}
; CHECK-NEXT: vldrw.u32 q1, [r0]
; CHECK-NEXT: vldrw.u32 q0, [r0, #16]
; CHECK-NEXT: vldrw.u32 q2, [r0, #32]
@ -603,27 +630,27 @@ define void @vld3_v16i8(<48 x i8> *%src, <16 x i8> *%dst) {
; CHECK-NEXT: vmov.8 q3[5], r2
; CHECK-NEXT: vmov.u8 r2, q0[3]
; CHECK-NEXT: vmov.8 q3[6], r2
; CHECK-NEXT: vmov.u8 r2, q0[6]
; CHECK-NEXT: vmov.8 q3[7], r2
; CHECK-NEXT: vmov.u8 r2, q0[9]
; CHECK-NEXT: vmov.8 q4[8], r2
; CHECK-NEXT: vmov.u8 r2, q0[12]
; CHECK-NEXT: vmov.8 q4[9], r2
; CHECK-NEXT: vmov.u8 r2, q0[15]
; CHECK-NEXT: vmov.8 q4[10], r2
; CHECK-NEXT: vmov.u8 r0, q2[2]
; CHECK-NEXT: vmov.8 q4[11], r0
; CHECK-NEXT: vmov.u8 r0, q2[5]
; CHECK-NEXT: vmov.8 q3[8], r2
; CHECK-NEXT: vmov.u8 r2, q0[12]
; CHECK-NEXT: vmov.8 q4[12], r0
; CHECK-NEXT: vmov.u8 r0, q2[8]
; CHECK-NEXT: vmov.8 q3[9], r2
; CHECK-NEXT: vmov.u8 r2, q0[15]
; CHECK-NEXT: vmov.8 q4[13], r0
; CHECK-NEXT: vmov.u8 r0, q2[11]
; CHECK-NEXT: vmov.8 q3[10], r2
; CHECK-NEXT: vmov.8 q4[14], r0
; CHECK-NEXT: vmov.u8 r0, q2[14]
; CHECK-NEXT: vmov.8 q4[15], r0
; CHECK-NEXT: vmov.u8 r0, q0[6]
; CHECK-NEXT: vmov.8 q3[7], r0
; CHECK-NEXT: vmov.u8 r0, q1[0]
; CHECK-NEXT: vmov.f32 s14, s18
; CHECK-NEXT: vmov q5, q3
; CHECK-NEXT: vmov.u8 r0, q2[2]
; CHECK-NEXT: vmov.f32 s15, s19
; CHECK-NEXT: vmov.8 q5[11], r0
; CHECK-NEXT: vmov.u8 r0, q1[0]
; CHECK-NEXT: vmov.8 q4[0], r0
; CHECK-NEXT: vmov.u8 r0, q1[3]
; CHECK-NEXT: vmov.8 q4[1], r0
@ -637,27 +664,29 @@ define void @vld3_v16i8(<48 x i8> *%src, <16 x i8> *%dst) {
; CHECK-NEXT: vmov.8 q4[5], r0
; CHECK-NEXT: vmov.u8 r0, q0[2]
; CHECK-NEXT: vmov.8 q4[6], r0
; CHECK-NEXT: vmov.u8 r0, q0[5]
; CHECK-NEXT: vmov.8 q4[7], r0
; CHECK-NEXT: vmov.u8 r0, q0[8]
; CHECK-NEXT: vmov.8 q5[8], r0
; CHECK-NEXT: vmov.8 q4[8], r0
; CHECK-NEXT: vmov.u8 r0, q0[11]
; CHECK-NEXT: vmov.8 q5[9], r0
; CHECK-NEXT: vmov.8 q4[9], r0
; CHECK-NEXT: vmov.u8 r0, q0[14]
; CHECK-NEXT: vmov.8 q5[10], r0
; CHECK-NEXT: vmov.u8 r0, q2[1]
; CHECK-NEXT: vmov.8 q5[11], r0
; CHECK-NEXT: vmov.8 q4[10], r0
; CHECK-NEXT: vmov.u8 r0, q2[4]
; CHECK-NEXT: vmov.f32 s14, s22
; CHECK-NEXT: vmov.8 q5[12], r0
; CHECK-NEXT: vmov.u8 r0, q2[7]
; CHECK-NEXT: vmov q6, q4
; CHECK-NEXT: vmov.8 q5[13], r0
; CHECK-NEXT: vmov.u8 r0, q2[10]
; CHECK-NEXT: vmov.8 q5[14], r0
; CHECK-NEXT: vmov.u8 r0, q2[13]
; CHECK-NEXT: vmov.8 q5[15], r0
; CHECK-NEXT: vmov.u8 r0, q0[5]
; CHECK-NEXT: vmov.8 q4[7], r0
; CHECK-NEXT: vmov.u8 r0, q1[2]
; CHECK-NEXT: vmov.f32 s18, s22
; CHECK-NEXT: vmov.u8 r0, q2[1]
; CHECK-NEXT: vmov.8 q6[11], r0
; CHECK-NEXT: vmov.f32 s19, s23
; CHECK-NEXT: vmov.f32 s18, s26
; CHECK-NEXT: vmov.u8 r0, q1[2]
; CHECK-NEXT: vadd.i8 q3, q4, q3
; CHECK-NEXT: vmov.8 q4[0], r0
; CHECK-NEXT: vmov.u8 r0, q1[5]
@ -672,14 +701,6 @@ define void @vld3_v16i8(<48 x i8> *%src, <16 x i8> *%dst) {
; CHECK-NEXT: vmov.8 q4[5], r0
; CHECK-NEXT: vmov.u8 r0, q0[4]
; CHECK-NEXT: vmov.8 q4[6], r0
; CHECK-NEXT: vmov.u8 r0, q0[10]
; CHECK-NEXT: vmov.8 q1[8], r0
; CHECK-NEXT: vmov.u8 r0, q0[13]
; CHECK-NEXT: vmov.8 q1[9], r0
; CHECK-NEXT: vmov.u8 r0, q2[0]
; CHECK-NEXT: vmov.8 q1[10], r0
; CHECK-NEXT: vmov.u8 r0, q2[3]
; CHECK-NEXT: vmov.8 q1[11], r0
; CHECK-NEXT: vmov.u8 r0, q2[6]
; CHECK-NEXT: vmov.8 q1[12], r0
; CHECK-NEXT: vmov.u8 r0, q2[9]
@ -688,13 +709,21 @@ define void @vld3_v16i8(<48 x i8> *%src, <16 x i8> *%dst) {
; CHECK-NEXT: vmov.8 q1[14], r0
; CHECK-NEXT: vmov.u8 r0, q2[15]
; CHECK-NEXT: vmov.8 q1[15], r0
; CHECK-NEXT: vmov.u8 r0, q0[10]
; CHECK-NEXT: vmov.8 q5[8], r0
; CHECK-NEXT: vmov.u8 r0, q0[13]
; CHECK-NEXT: vmov.8 q5[9], r0
; CHECK-NEXT: vmov.u8 r0, q2[0]
; CHECK-NEXT: vmov.8 q5[10], r0
; CHECK-NEXT: vmov.u8 r0, q2[3]
; CHECK-NEXT: vmov.8 q5[11], r0
; CHECK-NEXT: vmov.u8 r0, q0[7]
; CHECK-NEXT: vmov.8 q4[7], r0
; CHECK-NEXT: vmov.f32 s18, s6
; CHECK-NEXT: vmov.f32 s18, s22
; CHECK-NEXT: vmov.f32 s19, s7
; CHECK-NEXT: vadd.i8 q0, q3, q4
; CHECK-NEXT: vstrw.32 q0, [r1]
; CHECK-NEXT: vpop {d8, d9, d10, d11}
; CHECK-NEXT: vpop {d8, d9, d10, d11, d12, d13}
; CHECK-NEXT: bx lr
entry:
%l1 = load <48 x i8>, <48 x i8>* %src, align 4
@ -1092,46 +1121,47 @@ entry:
define void @vld3_v8f16(<24 x half> *%src, <8 x half> *%dst) {
; CHECK-LABEL: vld3_v8f16:
; CHECK: @ %bb.0: @ %entry
; CHECK-NEXT: .vsave {d8, d9}
; CHECK-NEXT: vpush {d8, d9}
; CHECK-NEXT: vldrw.u32 q2, [r0, #16]
; CHECK-NEXT: .vsave {d8, d9, d10}
; CHECK-NEXT: vpush {d8, d9, d10}
; CHECK-NEXT: vldrw.u32 q0, [r0]
; CHECK-NEXT: vldrw.u32 q3, [r0, #32]
; CHECK-NEXT: vmov.f32 s5, s8
; CHECK-NEXT: vmovx.f16 s8, s8
; CHECK-NEXT: vmovx.f16 s17, s3
; CHECK-NEXT: vins.f16 s3, s8
; CHECK-NEXT: vmovx.f16 s8, s11
; CHECK-NEXT: vmovx.f16 s18, s10
; CHECK-NEXT: vmovx.f16 s16, s0
; CHECK-NEXT: vins.f16 s10, s8
; CHECK-NEXT: vmovx.f16 s6, s2
; CHECK-NEXT: vldrw.u32 q3, [r0, #16]
; CHECK-NEXT: vldrw.u32 q2, [r0, #32]
; CHECK-NEXT: vmov.f32 s4, s1
; CHECK-NEXT: vmovx.f16 s8, s14
; CHECK-NEXT: vmovx.f16 s19, s13
; CHECK-NEXT: vmovx.f16 s6, s2
; CHECK-NEXT: vins.f16 s4, s6
; CHECK-NEXT: vmovx.f16 s6, s9
; CHECK-NEXT: vins.f16 s16, s2
; CHECK-NEXT: vmovx.f16 s2, s15
; CHECK-NEXT: vmovx.f16 s7, s12
; CHECK-NEXT: vins.f16 s18, s12
; CHECK-NEXT: vmovx.f16 s12, s1
; CHECK-NEXT: vins.f16 s13, s8
; CHECK-NEXT: vmov.f32 s5, s12
; CHECK-NEXT: vmovx.f16 s6, s13
; CHECK-NEXT: vmov.f32 s7, s10
; CHECK-NEXT: vins.f16 s5, s6
; CHECK-NEXT: vmov.f32 s6, s11
; CHECK-NEXT: vins.f16 s14, s2
; CHECK-NEXT: vmovx.f16 s6, s11
; CHECK-NEXT: vins.f16 s7, s6
; CHECK-NEXT: vmov.f32 s6, s15
; CHECK-NEXT: vmovx.f16 s16, s8
; CHECK-NEXT: vmovx.f16 s12, s12
; CHECK-NEXT: vmovx.f16 s15, s15
; CHECK-NEXT: vmov.f32 s20, s14
; CHECK-NEXT: vmovx.f16 s10, s10
; CHECK-NEXT: vmovx.f16 s17, s3
; CHECK-NEXT: vmovx.f16 s19, s9
; CHECK-NEXT: vmovx.f16 s18, s14
; CHECK-NEXT: vins.f16 s6, s16
; CHECK-NEXT: vmovx.f16 s16, s0
; CHECK-NEXT: vmovx.f16 s1, s1
; CHECK-NEXT: vins.f16 s20, s15
; CHECK-NEXT: vins.f16 s3, s12
; CHECK-NEXT: vins.f16 s9, s10
; CHECK-NEXT: vins.f16 s0, s1
; CHECK-NEXT: vins.f16 s16, s2
; CHECK-NEXT: vmov.f32 s1, s3
; CHECK-NEXT: vins.f16 s19, s15
; CHECK-NEXT: vins.f16 s17, s9
; CHECK-NEXT: vins.f16 s0, s12
; CHECK-NEXT: vmov.f32 s2, s10
; CHECK-NEXT: vmov.f32 s3, s13
; CHECK-NEXT: vins.f16 s6, s7
; CHECK-NEXT: vmov.f32 s7, s14
; CHECK-NEXT: vins.f16 s17, s13
; CHECK-NEXT: vins.f16 s19, s11
; CHECK-NEXT: vins.f16 s18, s8
; CHECK-NEXT: vmov.f32 s2, s20
; CHECK-NEXT: vmov.f32 s3, s9
; CHECK-NEXT: vadd.f16 q0, q0, q4
; CHECK-NEXT: vadd.f16 q0, q0, q1
; CHECK-NEXT: vstrw.32 q0, [r1]
; CHECK-NEXT: vpop {d8, d9}
; CHECK-NEXT: vpop {d8, d9, d10}
; CHECK-NEXT: bx lr
entry:
%l1 = load <24 x half>, <24 x half>* %src, align 4
@ -1147,83 +1177,85 @@ entry:
define void @vld3_v16f16(<48 x half> *%src, <16 x half> *%dst) {
; CHECK-LABEL: vld3_v16f16:
; CHECK: @ %bb.0: @ %entry
; CHECK-NEXT: .vsave {d8, d9}
; CHECK-NEXT: vpush {d8, d9}
; CHECK-NEXT: .vsave {d8, d9, d10}
; CHECK-NEXT: vpush {d8, d9, d10}
; CHECK-NEXT: vldrw.u32 q0, [r0, #48]
; CHECK-NEXT: vldrw.u32 q2, [r0, #64]
; CHECK-NEXT: vldrw.u32 q3, [r0, #80]
; CHECK-NEXT: vmovx.f16 s6, s2
; CHECK-NEXT: vldrw.u32 q3, [r0, #64]
; CHECK-NEXT: vldrw.u32 q2, [r0, #80]
; CHECK-NEXT: vmov.f32 s4, s1
; CHECK-NEXT: vmovx.f16 s6, s2
; CHECK-NEXT: vins.f16 s4, s6
; CHECK-NEXT: vmovx.f16 s6, s9
; CHECK-NEXT: vmov.f32 s5, s8
; CHECK-NEXT: vmovx.f16 s7, s12
; CHECK-NEXT: vmov.f32 s5, s12
; CHECK-NEXT: vmovx.f16 s6, s13
; CHECK-NEXT: vmov.f32 s7, s10
; CHECK-NEXT: vins.f16 s5, s6
; CHECK-NEXT: vmov.f32 s6, s11
; CHECK-NEXT: vins.f16 s6, s7
; CHECK-NEXT: vmovx.f16 s16, s15
; CHECK-NEXT: vmov.f32 s7, s14
; CHECK-NEXT: vmovx.f16 s6, s11
; CHECK-NEXT: vins.f16 s7, s6
; CHECK-NEXT: vmov.f32 s6, s15
; CHECK-NEXT: vmovx.f16 s16, s8
; CHECK-NEXT: vmovx.f16 s12, s12
; CHECK-NEXT: vmovx.f16 s15, s15
; CHECK-NEXT: vmov.f32 s20, s14
; CHECK-NEXT: vmovx.f16 s10, s10
; CHECK-NEXT: vmovx.f16 s17, s3
; CHECK-NEXT: vins.f16 s7, s16
; CHECK-NEXT: vmovx.f16 s16, s0
; CHECK-NEXT: vins.f16 s16, s2
; CHECK-NEXT: vmovx.f16 s2, s1
; CHECK-NEXT: vins.f16 s0, s2
; CHECK-NEXT: vmovx.f16 s2, s8
; CHECK-NEXT: vins.f16 s3, s2
; CHECK-NEXT: vmovx.f16 s2, s11
; CHECK-NEXT: vmovx.f16 s18, s10
; CHECK-NEXT: vins.f16 s10, s2
; CHECK-NEXT: vmovx.f16 s2, s14
; CHECK-NEXT: vmovx.f16 s19, s13
; CHECK-NEXT: vins.f16 s13, s2
; CHECK-NEXT: vmov.f32 s1, s3
; CHECK-NEXT: vins.f16 s18, s12
; CHECK-NEXT: vins.f16 s19, s15
; CHECK-NEXT: vmov.f32 s3, s13
; CHECK-NEXT: vins.f16 s17, s9
; CHECK-NEXT: vmov.f32 s2, s10
; CHECK-NEXT: vldrw.u32 q3, [r0, #16]
; CHECK-NEXT: vadd.f16 q0, q0, q4
; CHECK-NEXT: vadd.f16 q2, q0, q1
; CHECK-NEXT: vldrw.u32 q0, [r0]
; CHECK-NEXT: vldrw.u32 q1, [r0, #32]
; CHECK-NEXT: vstrw.32 q2, [r1, #16]
; CHECK-NEXT: vmovx.f16 s10, s2
; CHECK-NEXT: vmov.f32 s8, s1
; CHECK-NEXT: vins.f16 s8, s10
; CHECK-NEXT: vmovx.f16 s10, s13
; CHECK-NEXT: vmov.f32 s9, s12
; CHECK-NEXT: vmovx.f16 s11, s4
; CHECK-NEXT: vins.f16 s9, s10
; CHECK-NEXT: vmov.f32 s10, s15
; CHECK-NEXT: vins.f16 s10, s11
; CHECK-NEXT: vmovx.f16 s16, s7
; CHECK-NEXT: vmov.f32 s11, s6
; CHECK-NEXT: vmovx.f16 s17, s3
; CHECK-NEXT: vins.f16 s11, s16
; CHECK-NEXT: vmovx.f16 s16, s0
; CHECK-NEXT: vins.f16 s16, s2
; CHECK-NEXT: vmovx.f16 s2, s1
; CHECK-NEXT: vins.f16 s0, s2
; CHECK-NEXT: vmovx.f16 s2, s12
; CHECK-NEXT: vins.f16 s3, s2
; CHECK-NEXT: vmovx.f16 s2, s15
; CHECK-NEXT: vmovx.f16 s19, s9
; CHECK-NEXT: vmovx.f16 s18, s14
; CHECK-NEXT: vins.f16 s14, s2
; CHECK-NEXT: vmovx.f16 s2, s6
; CHECK-NEXT: vmovx.f16 s19, s5
; CHECK-NEXT: vins.f16 s5, s2
; CHECK-NEXT: vmov.f32 s1, s3
; CHECK-NEXT: vins.f16 s18, s4
; CHECK-NEXT: vins.f16 s19, s7
; CHECK-NEXT: vins.f16 s6, s16
; CHECK-NEXT: vmovx.f16 s16, s0
; CHECK-NEXT: vmovx.f16 s1, s1
; CHECK-NEXT: vins.f16 s20, s15
; CHECK-NEXT: vins.f16 s3, s12
; CHECK-NEXT: vins.f16 s9, s10
; CHECK-NEXT: vins.f16 s0, s1
; CHECK-NEXT: vins.f16 s16, s2
; CHECK-NEXT: vins.f16 s17, s13
; CHECK-NEXT: vmov.f32 s2, s14
; CHECK-NEXT: vmov.f32 s3, s5
; CHECK-NEXT: vmov.f32 s1, s3
; CHECK-NEXT: vins.f16 s19, s11
; CHECK-NEXT: vins.f16 s18, s8
; CHECK-NEXT: vmov.f32 s3, s9
; CHECK-NEXT: vldrw.u32 q3, [r0, #16]
; CHECK-NEXT: vmov.f32 s2, s20
; CHECK-NEXT: vldrw.u32 q2, [r0, #32]
; CHECK-NEXT: vadd.f16 q0, q0, q4
; CHECK-NEXT: vadd.f16 q0, q0, q2
; CHECK-NEXT: vmov.f32 s20, s14
; CHECK-NEXT: vadd.f16 q1, q0, q1
; CHECK-NEXT: vldrw.u32 q0, [r0]
; CHECK-NEXT: vstrw.32 q1, [r1, #16]
; CHECK-NEXT: vmov.f32 s5, s12
; CHECK-NEXT: vmov.f32 s4, s1
; CHECK-NEXT: vmovx.f16 s6, s2
; CHECK-NEXT: vins.f16 s4, s6
; CHECK-NEXT: vmovx.f16 s6, s13
; CHECK-NEXT: vins.f16 s5, s6
; CHECK-NEXT: vmov.f32 s7, s10
; CHECK-NEXT: vmovx.f16 s6, s11
; CHECK-NEXT: vmovx.f16 s16, s8
; CHECK-NEXT: vins.f16 s7, s6
; CHECK-NEXT: vmov.f32 s6, s15
; CHECK-NEXT: vmovx.f16 s15, s15
; CHECK-NEXT: vmovx.f16 s12, s12
; CHECK-NEXT: vmovx.f16 s10, s10
; CHECK-NEXT: vmovx.f16 s17, s3
; CHECK-NEXT: vmovx.f16 s19, s9
; CHECK-NEXT: vmovx.f16 s18, s14
; CHECK-NEXT: vins.f16 s6, s16
; CHECK-NEXT: vmovx.f16 s16, s0
; CHECK-NEXT: vmovx.f16 s1, s1
; CHECK-NEXT: vins.f16 s20, s15
; CHECK-NEXT: vins.f16 s3, s12
; CHECK-NEXT: vins.f16 s9, s10
; CHECK-NEXT: vins.f16 s0, s1
; CHECK-NEXT: vins.f16 s16, s2
; CHECK-NEXT: vmov.f32 s1, s3
; CHECK-NEXT: vins.f16 s17, s13
; CHECK-NEXT: vins.f16 s19, s11
; CHECK-NEXT: vins.f16 s18, s8
; CHECK-NEXT: vmov.f32 s2, s20
; CHECK-NEXT: vmov.f32 s3, s9
; CHECK-NEXT: vadd.f16 q0, q0, q4
; CHECK-NEXT: vadd.f16 q0, q0, q1
; CHECK-NEXT: vstrw.32 q0, [r1]
; CHECK-NEXT: vpop {d8, d9}
; CHECK-NEXT: vpop {d8, d9, d10}
; CHECK-NEXT: bx lr
entry:
%l1 = load <48 x half>, <48 x half>* %src, align 4

File diff suppressed because it is too large Load Diff

View File

@ -757,42 +757,53 @@ define void @vst4_v4i64(<4 x i64> *%src, <16 x i64> *%dst) {
; CHECK-NEXT: vpush {d8, d9, d10, d11, d12, d13, d14, d15}
; CHECK-NEXT: .pad #64
; CHECK-NEXT: sub sp, #64
; CHECK-NEXT: vldrw.u32 q7, [r0, #80]
; CHECK-NEXT: vldrw.u32 q5, [r0, #32]
; CHECK-NEXT: vldrw.u32 q6, [r0]
; CHECK-NEXT: vldrw.u32 q1, [r0, #96]
; CHECK-NEXT: vstrw.32 q7, [sp, #32] @ 16-byte Spill
; CHECK-NEXT: vmov.f64 d15, d10
; CHECK-NEXT: vldrw.u32 q2, [r0, #64]
; CHECK-NEXT: vldrw.u32 q0, [r0, #32]
; CHECK-NEXT: vldrw.u32 q7, [r0]
; CHECK-NEXT: vldrw.u32 q2, [r0, #96]
; CHECK-NEXT: vldrw.u32 q3, [r0, #64]
; CHECK-NEXT: vmov.f32 s6, s0
; CHECK-NEXT: vldrw.u32 q5, [r0, #112]
; CHECK-NEXT: vmov.f32 s7, s1
; CHECK-NEXT: vldrw.u32 q4, [r0, #48]
; CHECK-NEXT: vmov.f64 d13, d1
; CHECK-NEXT: vldrw.u32 q0, [r0, #16]
; CHECK-NEXT: vldrw.u32 q3, [r0, #48]
; CHECK-NEXT: vldrw.u32 q4, [r0, #112]
; CHECK-NEXT: vstrw.32 q0, [sp, #16] @ 16-byte Spill
; CHECK-NEXT: vmov.f64 d14, d12
; CHECK-NEXT: vstrw.32 q7, [sp, #48] @ 16-byte Spill
; CHECK-NEXT: vmov.f64 d14, d4
; CHECK-NEXT: vmov.f64 d15, d2
; CHECK-NEXT: vstrw.32 q7, [sp] @ 16-byte Spill
; CHECK-NEXT: vmov.f64 d4, d0
; CHECK-NEXT: vldrw.u32 q0, [sp, #32] @ 16-byte Reload
; CHECK-NEXT: vldrw.u32 q0, [r0, #80]
; CHECK-NEXT: vmov.f32 s4, s28
; CHECK-NEXT: vstrw.32 q0, [sp, #32] @ 16-byte Spill
; CHECK-NEXT: vmov.f32 s5, s29
; CHECK-NEXT: vmov.f32 s24, s30
; CHECK-NEXT: vstrw.32 q1, [sp] @ 16-byte Spill
; CHECK-NEXT: vmov.f32 s25, s31
; CHECK-NEXT: vldrw.u32 q7, [sp, #16] @ 16-byte Reload
; CHECK-NEXT: vmov.f64 d10, d13
; CHECK-NEXT: vmov.f64 d2, d5
; CHECK-NEXT: vstrw.32 q5, [r1, #32]
; CHECK-NEXT: vmov.f64 d5, d6
; CHECK-NEXT: vstrw.32 q1, [r1, #48]
; CHECK-NEXT: vmov.f64 d13, d8
; CHECK-NEXT: vstrw.32 q2, [r1, #64]
; CHECK-NEXT: vmov.f64 d12, d0
; CHECK-NEXT: vmov.f64 d8, d1
; CHECK-NEXT: vldrw.u32 q0, [sp, #48] @ 16-byte Reload
; CHECK-NEXT: vstrw.32 q6, [r1, #80]
; CHECK-NEXT: vstrw.32 q0, [r1]
; CHECK-NEXT: vmov.f32 s6, s8
; CHECK-NEXT: vstrw.32 q6, [sp, #48] @ 16-byte Spill
; CHECK-NEXT: vmov.f32 s7, s9
; CHECK-NEXT: vmov.f32 s4, s12
; CHECK-NEXT: vmov.f32 s5, s13
; CHECK-NEXT: vmov.f32 s8, s14
; CHECK-NEXT: vstrw.32 q1, [r1, #16]
; CHECK-NEXT: vmov.f32 s9, s15
; CHECK-NEXT: vldrw.u32 q3, [sp, #32] @ 16-byte Reload
; CHECK-NEXT: vmov.f64 d1, d15
; CHECK-NEXT: vstrw.32 q2, [r1, #48]
; CHECK-NEXT: vmov.f64 d13, d7
; CHECK-NEXT: vmov.f32 s14, s20
; CHECK-NEXT: vmov.f32 s15, s21
; CHECK-NEXT: vmov.f32 s30, s16
; CHECK-NEXT: vstrw.32 q3, [r1, #80]
; CHECK-NEXT: vmov.f32 s31, s17
; CHECK-NEXT: vldrw.u32 q3, [sp, #48] @ 16-byte Reload
; CHECK-NEXT: vmov.f32 s16, s2
; CHECK-NEXT: vstrw.32 q7, [r1, #64]
; CHECK-NEXT: vmov.f32 s17, s3
; CHECK-NEXT: vldrw.u32 q0, [sp] @ 16-byte Reload
; CHECK-NEXT: vmov.f64 d6, d15
; CHECK-NEXT: vstrw.32 q4, [r1, #112]
; CHECK-NEXT: vstrw.32 q0, [r1, #16]
; CHECK-NEXT: vstrw.32 q3, [r1, #96]
; CHECK-NEXT: vmov.f32 s20, s26
; CHECK-NEXT: vstrw.32 q4, [r1, #96]
; CHECK-NEXT: vmov.f32 s21, s27
; CHECK-NEXT: vstrw.32 q3, [r1, #32]
; CHECK-NEXT: vstrw.32 q5, [r1, #112]
; CHECK-NEXT: vstrw.32 q0, [r1]
; CHECK-NEXT: add sp, #64
; CHECK-NEXT: vpop {d8, d9, d10, d11, d12, d13, d14, d15}
; CHECK-NEXT: bx lr

View File

@ -85,10 +85,12 @@ define <8 x float> @hadd_reverse2_v8f32(<8 x float> %a0, <8 x float> %a1) {
; SSE-LABEL: hadd_reverse2_v8f32:
; SSE: # %bb.0:
; SSE-NEXT: movaps %xmm0, %xmm4
; SSE-NEXT: haddps %xmm3, %xmm1
; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,0,3,2]
; SSE-NEXT: shufps {{.*#+}} xmm4 = xmm4[3,2],xmm0[1,0]
; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[3,2,1,0]
; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[3,2,1,0]
; SSE-NEXT: haddps %xmm2, %xmm4
; SSE-NEXT: shufps {{.*#+}} xmm4 = xmm4[1,0,3,2]
; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[3,2,1,0]
; SSE-NEXT: haddps %xmm3, %xmm1
; SSE-NEXT: movaps %xmm1, %xmm0
; SSE-NEXT: movaps %xmm4, %xmm1
; SSE-NEXT: retq
@ -276,10 +278,18 @@ define <8 x double> @hadd_reverse2_v8f64(<8 x double> %a0, <8 x double> %a1) nou
; SSE: # %bb.0:
; SSE-NEXT: movapd %xmm1, %xmm8
; SSE-NEXT: movapd %xmm0, %xmm9
; SSE-NEXT: haddpd %xmm7, %xmm3
; SSE-NEXT: haddpd %xmm6, %xmm2
; SSE-NEXT: haddpd %xmm5, %xmm8
; SSE-NEXT: shufpd {{.*#+}} xmm9 = xmm9[1],xmm0[0]
; SSE-NEXT: shufpd {{.*#+}} xmm8 = xmm8[1],xmm1[0]
; SSE-NEXT: shufpd {{.*#+}} xmm2 = xmm2[1,0]
; SSE-NEXT: shufpd {{.*#+}} xmm3 = xmm3[1,0]
; SSE-NEXT: shufpd {{.*#+}} xmm4 = xmm4[1,0]
; SSE-NEXT: haddpd %xmm4, %xmm9
; SSE-NEXT: shufpd {{.*#+}} xmm5 = xmm5[1,0]
; SSE-NEXT: haddpd %xmm5, %xmm8
; SSE-NEXT: shufpd {{.*#+}} xmm6 = xmm6[1,0]
; SSE-NEXT: haddpd %xmm6, %xmm2
; SSE-NEXT: shufpd {{.*#+}} xmm7 = xmm7[1,0]
; SSE-NEXT: haddpd %xmm7, %xmm3
; SSE-NEXT: movapd %xmm3, %xmm0
; SSE-NEXT: movapd %xmm2, %xmm1
; SSE-NEXT: movapd %xmm8, %xmm2
@ -288,20 +298,26 @@ define <8 x double> @hadd_reverse2_v8f64(<8 x double> %a0, <8 x double> %a1) nou
;
; AVX1-LABEL: hadd_reverse2_v8f64:
; AVX1: # %bb.0:
; AVX1-NEXT: vhaddpd %ymm3, %ymm1, %ymm1
; AVX1-NEXT: vperm2f128 {{.*#+}} ymm3 = ymm1[2,3,0,1]
; AVX1-NEXT: vhaddpd %ymm2, %ymm0, %ymm0
; AVX1-NEXT: vperm2f128 {{.*#+}} ymm1 = ymm0[2,3,0,1]
; AVX1-NEXT: vmovapd %ymm3, %ymm0
; AVX1-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3,0,1]
; AVX1-NEXT: vpermilpd {{.*#+}} ymm0 = ymm0[1,0,3,2]
; AVX1-NEXT: vperm2f128 {{.*#+}} ymm1 = ymm1[2,3,0,1]
; AVX1-NEXT: vpermilpd {{.*#+}} ymm4 = ymm1[1,0,3,2]
; AVX1-NEXT: vperm2f128 {{.*#+}} ymm1 = ymm2[2,3,0,1]
; AVX1-NEXT: vpermilpd {{.*#+}} ymm1 = ymm1[1,0,3,2]
; AVX1-NEXT: vhaddpd %ymm1, %ymm0, %ymm1
; AVX1-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm3[2,3,0,1]
; AVX1-NEXT: vpermilpd {{.*#+}} ymm0 = ymm0[1,0,3,2]
; AVX1-NEXT: vhaddpd %ymm0, %ymm4, %ymm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: hadd_reverse2_v8f64:
; AVX2: # %bb.0:
; AVX2-NEXT: vhaddpd %ymm3, %ymm1, %ymm1
; AVX2-NEXT: vpermpd {{.*#+}} ymm3 = ymm1[2,3,0,1]
; AVX2-NEXT: vhaddpd %ymm2, %ymm0, %ymm0
; AVX2-NEXT: vpermpd {{.*#+}} ymm1 = ymm0[2,3,0,1]
; AVX2-NEXT: vmovapd %ymm3, %ymm0
; AVX2-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[3,2,1,0]
; AVX2-NEXT: vpermpd {{.*#+}} ymm4 = ymm1[3,2,1,0]
; AVX2-NEXT: vpermpd {{.*#+}} ymm1 = ymm2[3,2,1,0]
; AVX2-NEXT: vhaddpd %ymm1, %ymm0, %ymm1
; AVX2-NEXT: vpermpd {{.*#+}} ymm0 = ymm3[3,2,1,0]
; AVX2-NEXT: vhaddpd %ymm0, %ymm4, %ymm0
; AVX2-NEXT: retq
%shuf0 = shufflevector <8 x double> %a0, <8 x double> undef, <8 x i32> <i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
%shuf1 = shufflevector <8 x double> %a1, <8 x double> undef, <8 x i32> <i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
@ -314,19 +330,19 @@ define <8 x double> @hadd_reverse2_v8f64(<8 x double> %a0, <8 x double> %a1) nou
define <16 x float> @hadd_reverse_v16f32(<16 x float> %a0, <16 x float> %a1) nounwind {
; SSE-LABEL: hadd_reverse_v16f32:
; SSE: # %bb.0:
; SSE-NEXT: movaps %xmm4, %xmm8
; SSE-NEXT: movaps %xmm0, %xmm4
; SSE-NEXT: haddps %xmm3, %xmm2
; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[3,2,1,0]
; SSE-NEXT: haddps %xmm7, %xmm6
; SSE-NEXT: shufps {{.*#+}} xmm6 = xmm6[3,2,1,0]
; SSE-NEXT: haddps %xmm1, %xmm4
; SSE-NEXT: shufps {{.*#+}} xmm4 = xmm4[3,2,1,0]
; SSE-NEXT: haddps %xmm5, %xmm8
; SSE-NEXT: shufps {{.*#+}} xmm8 = xmm8[3,2,1,0]
; SSE-NEXT: movaps %xmm2, %xmm0
; SSE-NEXT: movaps %xmm6, %xmm1
; SSE-NEXT: movaps %xmm4, %xmm2
; SSE-NEXT: movaps %xmm5, %xmm8
; SSE-NEXT: movaps %xmm1, %xmm5
; SSE-NEXT: haddps %xmm2, %xmm3
; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[1,0,3,2]
; SSE-NEXT: haddps %xmm6, %xmm7
; SSE-NEXT: shufps {{.*#+}} xmm7 = xmm7[1,0,3,2]
; SSE-NEXT: haddps %xmm0, %xmm5
; SSE-NEXT: shufps {{.*#+}} xmm5 = xmm5[1,0,3,2]
; SSE-NEXT: haddps %xmm4, %xmm8
; SSE-NEXT: shufps {{.*#+}} xmm8 = xmm8[1,0,3,2]
; SSE-NEXT: movaps %xmm3, %xmm0
; SSE-NEXT: movaps %xmm7, %xmm1
; SSE-NEXT: movaps %xmm5, %xmm2
; SSE-NEXT: movaps %xmm8, %xmm3
; SSE-NEXT: retq
;
@ -363,14 +379,18 @@ define <16 x float> @hadd_reverse2_v16f32(<16 x float> %a0, <16 x float> %a1) no
; SSE: # %bb.0:
; SSE-NEXT: movaps %xmm1, %xmm8
; SSE-NEXT: movaps %xmm0, %xmm9
; SSE-NEXT: haddps %xmm7, %xmm3
; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[1,0,3,2]
; SSE-NEXT: haddps %xmm6, %xmm2
; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[1,0,3,2]
; SSE-NEXT: haddps %xmm5, %xmm8
; SSE-NEXT: shufps {{.*#+}} xmm8 = xmm8[1,0,3,2]
; SSE-NEXT: shufps {{.*#+}} xmm9 = xmm9[3,2],xmm0[1,0]
; SSE-NEXT: shufps {{.*#+}} xmm8 = xmm8[3,2],xmm1[1,0]
; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[3,2,1,0]
; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[3,2,1,0]
; SSE-NEXT: shufps {{.*#+}} xmm4 = xmm4[3,2,1,0]
; SSE-NEXT: haddps %xmm4, %xmm9
; SSE-NEXT: shufps {{.*#+}} xmm9 = xmm9[1,0,3,2]
; SSE-NEXT: shufps {{.*#+}} xmm5 = xmm5[3,2,1,0]
; SSE-NEXT: haddps %xmm5, %xmm8
; SSE-NEXT: shufps {{.*#+}} xmm6 = xmm6[3,2,1,0]
; SSE-NEXT: haddps %xmm6, %xmm2
; SSE-NEXT: shufps {{.*#+}} xmm7 = xmm7[3,2,1,0]
; SSE-NEXT: haddps %xmm7, %xmm3
; SSE-NEXT: movaps %xmm3, %xmm0
; SSE-NEXT: movaps %xmm2, %xmm1
; SSE-NEXT: movaps %xmm8, %xmm2
@ -379,24 +399,30 @@ define <16 x float> @hadd_reverse2_v16f32(<16 x float> %a0, <16 x float> %a1) no
;
; AVX1-LABEL: hadd_reverse2_v16f32:
; AVX1: # %bb.0:
; AVX1-NEXT: vhaddps %ymm3, %ymm1, %ymm1
; AVX1-NEXT: vperm2f128 {{.*#+}} ymm1 = ymm1[2,3,0,1]
; AVX1-NEXT: vpermilps {{.*#+}} ymm3 = ymm1[1,0,3,2,5,4,7,6]
; AVX1-NEXT: vhaddps %ymm2, %ymm0, %ymm0
; AVX1-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3,0,1]
; AVX1-NEXT: vpermilps {{.*#+}} ymm1 = ymm0[1,0,3,2,5,4,7,6]
; AVX1-NEXT: vmovaps %ymm3, %ymm0
; AVX1-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[3,2,1,0,7,6,5,4]
; AVX1-NEXT: vperm2f128 {{.*#+}} ymm1 = ymm1[2,3,0,1]
; AVX1-NEXT: vpermilps {{.*#+}} ymm4 = ymm1[3,2,1,0,7,6,5,4]
; AVX1-NEXT: vperm2f128 {{.*#+}} ymm1 = ymm2[2,3,0,1]
; AVX1-NEXT: vpermilps {{.*#+}} ymm1 = ymm1[3,2,1,0,7,6,5,4]
; AVX1-NEXT: vhaddps %ymm1, %ymm0, %ymm1
; AVX1-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm3[2,3,0,1]
; AVX1-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[3,2,1,0,7,6,5,4]
; AVX1-NEXT: vhaddps %ymm0, %ymm4, %ymm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: hadd_reverse2_v16f32:
; AVX2: # %bb.0:
; AVX2-NEXT: vhaddps %ymm3, %ymm1, %ymm1
; AVX2-NEXT: vpermilps {{.*#+}} ymm1 = ymm1[1,0,3,2,5,4,7,6]
; AVX2-NEXT: vpermpd {{.*#+}} ymm3 = ymm1[2,3,0,1]
; AVX2-NEXT: vhaddps %ymm2, %ymm0, %ymm0
; AVX2-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[1,0,3,2,5,4,7,6]
; AVX2-NEXT: vpermpd {{.*#+}} ymm1 = ymm0[2,3,0,1]
; AVX2-NEXT: vmovaps %ymm3, %ymm0
; AVX2-NEXT: vpermilps {{.*#+}} ymm0 = ymm0[3,2,1,0,7,6,5,4]
; AVX2-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[2,3,0,1]
; AVX2-NEXT: vpermilps {{.*#+}} ymm1 = ymm1[3,2,1,0,7,6,5,4]
; AVX2-NEXT: vpermpd {{.*#+}} ymm4 = ymm1[2,3,0,1]
; AVX2-NEXT: vpermilps {{.*#+}} ymm1 = ymm2[3,2,1,0,7,6,5,4]
; AVX2-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[2,3,0,1]
; AVX2-NEXT: vhaddps %ymm1, %ymm0, %ymm1
; AVX2-NEXT: vpermilps {{.*#+}} ymm0 = ymm3[3,2,1,0,7,6,5,4]
; AVX2-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[2,3,0,1]
; AVX2-NEXT: vhaddps %ymm0, %ymm4, %ymm0
; AVX2-NEXT: retq
%shuf0 = shufflevector <16 x float> %a0, <16 x float> undef, <16 x i32> <i32 15, i32 14, i32 13, i32 12, i32 11, i32 10, i32 9, i32 8, i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
%shuf1 = shufflevector <16 x float> %a1, <16 x float> undef, <16 x i32> <i32 15, i32 14, i32 13, i32 12, i32 11, i32 10, i32 9, i32 8, i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>

View File

@ -11,7 +11,7 @@ define void @PR15298(<4 x float>* nocapture %source, <8 x float>* nocapture %des
; SSE-32-NEXT: movl {{[0-9]+}}(%esp), %ecx
; SSE-32-NEXT: xorps %xmm0, %xmm0
; SSE-32-NEXT: xorps %xmm1, %xmm1
; SSE-32-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,3],mem[0,0]
; SSE-32-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,1],mem[0,0]
; SSE-32-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,2,3,1]
; SSE-32-NEXT: movups %xmm0, 624(%eax)
; SSE-32-NEXT: movups %xmm1, 608(%eax)
@ -21,7 +21,7 @@ define void @PR15298(<4 x float>* nocapture %source, <8 x float>* nocapture %des
; SSE-64: # %bb.0: # %L.entry
; SSE-64-NEXT: xorps %xmm0, %xmm0
; SSE-64-NEXT: xorps %xmm1, %xmm1
; SSE-64-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,3],mem[0,0]
; SSE-64-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,1],mem[0,0]
; SSE-64-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,2,3,1]
; SSE-64-NEXT: movups %xmm0, 624(%rsi)
; SSE-64-NEXT: movups %xmm1, 608(%rsi)

View File

@ -519,14 +519,15 @@ define void @v12i32(<8 x i32> %a, <8 x i32> %b, <12 x i32>* %p) nounwind {
; SSE2-NEXT: shufps {{.*#+}} xmm4 = xmm4[0,1],xmm3[0,2]
; SSE2-NEXT: movaps %xmm0, %xmm3
; SSE2-NEXT: unpckhpd {{.*#+}} xmm3 = xmm3[1],xmm1[1]
; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,3],xmm1[3,3]
; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,2],xmm2[2,3]
; SSE2-NEXT: shufps {{.*#+}} xmm2 = xmm2[1,1],xmm1[1,1]
; SSE2-NEXT: shufps {{.*#+}} xmm2 = xmm2[2,0],xmm3[0,2]
; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[2,0,1,3]
; SSE2-NEXT: movaps %xmm2, 16(%rdi)
; SSE2-NEXT: movaps %xmm2, %xmm5
; SSE2-NEXT: shufps {{.*#+}} xmm5 = xmm5[1,0],xmm1[1,0]
; SSE2-NEXT: shufps {{.*#+}} xmm5 = xmm5[2,0],xmm3[0,2]
; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[3,3],xmm2[3,3]
; SSE2-NEXT: unpckhps {{.*#+}} xmm2 = xmm2[2],xmm0[2],xmm2[3],xmm0[3]
; SSE2-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,3],xmm1[0,2]
; SSE2-NEXT: movaps %xmm2, 32(%rdi)
; SSE2-NEXT: movaps %xmm5, 16(%rdi)
; SSE2-NEXT: movaps %xmm4, (%rdi)
; SSE2-NEXT: movaps %xmm0, 32(%rdi)
; SSE2-NEXT: retq
;
; SSE42-LABEL: v12i32:
@ -537,12 +538,14 @@ define void @v12i32(<8 x i32> %a, <8 x i32> %b, <12 x i32>* %p) nounwind {
; SSE42-NEXT: pshufd {{.*#+}} xmm3 = xmm2[0,1,0,1]
; SSE42-NEXT: pblendw {{.*#+}} xmm3 = xmm4[0,1,2,3],xmm3[4,5],xmm4[6,7]
; SSE42-NEXT: pshufd {{.*#+}} xmm4 = xmm1[1,1,2,2]
; SSE42-NEXT: pblendw {{.*#+}} xmm4 = xmm4[0,1,2,3],xmm0[4,5],xmm4[6,7]
; SSE42-NEXT: pblendw {{.*#+}} xmm4 = xmm4[0,1],xmm2[2,3],xmm4[4,5,6,7]
; SSE42-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,3],xmm1[3,3]
; SSE42-NEXT: pshufd {{.*#+}} xmm1 = xmm2[2,3,2,3]
; SSE42-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1],xmm0[2,3,4,5],xmm1[6,7]
; SSE42-NEXT: movdqa %xmm1, 32(%rdi)
; SSE42-NEXT: pblendw {{.*#+}} xmm4 = xmm4[0,1,2,3],xmm0[4,5],xmm4[6,7]
; SSE42-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,2,3]
; SSE42-NEXT: pshufd {{.*#+}} xmm2 = xmm2[2,3,2,3]
; SSE42-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1],xmm0[2,3],xmm2[4,5,6,7]
; SSE42-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,2,3,3]
; SSE42-NEXT: pblendw {{.*#+}} xmm0 = xmm2[0,1,2,3],xmm0[4,5],xmm2[6,7]
; SSE42-NEXT: movdqa %xmm0, 32(%rdi)
; SSE42-NEXT: movdqa %xmm4, 16(%rdi)
; SSE42-NEXT: movdqa %xmm3, (%rdi)
; SSE42-NEXT: retq
@ -1208,42 +1211,42 @@ define void @interleave_24i16_out_reverse(<24 x i16>* %p, <8 x i16>* %q1, <8 x i
define void @interleave_24i16_in(<24 x i16>* %p, <8 x i16>* %q1, <8 x i16>* %q2, <8 x i16>* %q3) nounwind {
; SSE2-LABEL: interleave_24i16_in:
; SSE2: # %bb.0:
; SSE2-NEXT: movdqu (%rsi), %xmm0
; SSE2-NEXT: movdqu (%rsi), %xmm3
; SSE2-NEXT: movdqu (%rdx), %xmm2
; SSE2-NEXT: movdqu (%rcx), %xmm3
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm3[0,0,0,0]
; SSE2-NEXT: movdqa {{.*#+}} xmm4 = [65535,65535,0,65535,65535,0,65535,65535]
; SSE2-NEXT: movdqa %xmm4, %xmm5
; SSE2-NEXT: pandn %xmm1, %xmm5
; SSE2-NEXT: movdqa %xmm0, %xmm1
; SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3]
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,1,2,1]
; SSE2-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,1,2,2,4,5,6,7]
; SSE2-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,7,5,4,5]
; SSE2-NEXT: pand %xmm4, %xmm1
; SSE2-NEXT: por %xmm5, %xmm1
; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm0[1,1,2,2]
; SSE2-NEXT: pand %xmm4, %xmm5
; SSE2-NEXT: pshuflw {{.*#+}} xmm6 = xmm2[3,3,3,3,4,5,6,7]
; SSE2-NEXT: pshufhw {{.*#+}} xmm6 = xmm6[0,1,2,3,4,4,4,4]
; SSE2-NEXT: pandn %xmm6, %xmm4
; SSE2-NEXT: por %xmm5, %xmm4
; SSE2-NEXT: movdqa {{.*#+}} xmm5 = [0,65535,65535,0,65535,65535,0,65535]
; SSE2-NEXT: pand %xmm5, %xmm4
; SSE2-NEXT: movdqu (%rcx), %xmm1
; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm1[0,0,0,0]
; SSE2-NEXT: movdqa {{.*#+}} xmm0 = [65535,65535,0,65535,65535,0,65535,65535]
; SSE2-NEXT: movdqa %xmm0, %xmm5
; SSE2-NEXT: pandn %xmm4, %xmm5
; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm3[3,3,3,3]
; SSE2-NEXT: pshufd {{.*#+}} xmm6 = xmm3[1,1,2,2]
; SSE2-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3]
; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,1,2,1]
; SSE2-NEXT: pshuflw {{.*#+}} xmm3 = xmm3[0,1,2,2,4,5,6,7]
; SSE2-NEXT: pshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,7,5,4,5]
; SSE2-NEXT: pand %xmm0, %xmm3
; SSE2-NEXT: por %xmm5, %xmm3
; SSE2-NEXT: movdqa %xmm0, %xmm5
; SSE2-NEXT: pandn %xmm4, %xmm5
; SSE2-NEXT: pshuflw {{.*#+}} xmm4 = xmm2[3,3,3,3,4,5,6,7]
; SSE2-NEXT: punpckhwd {{.*#+}} xmm2 = xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7]
; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[2,1,2,3]
; SSE2-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[2,3,2,0,4,5,6,7]
; SSE2-NEXT: pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,5,5,6,7]
; SSE2-NEXT: pand %xmm0, %xmm2
; SSE2-NEXT: por %xmm5, %xmm2
; SSE2-NEXT: movdqa {{.*#+}} xmm5 = [65535,0,65535,65535,0,65535,65535,0]
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,2,2]
; SSE2-NEXT: pand %xmm5, %xmm1
; SSE2-NEXT: pandn %xmm6, %xmm5
; SSE2-NEXT: por %xmm4, %xmm5
; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm3[2,2,3,3]
; SSE2-NEXT: movdqa {{.*#+}} xmm4 = [65535,0,65535,65535,0,65535,65535,0]
; SSE2-NEXT: punpckhwd {{.*#+}} xmm2 = xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm2[2,1,3,3]
; SSE2-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[2,1,1,0,4,5,6,7]
; SSE2-NEXT: pand %xmm4, %xmm0
; SSE2-NEXT: pandn %xmm3, %xmm4
; SSE2-NEXT: por %xmm0, %xmm4
; SSE2-NEXT: movdqu %xmm4, 32(%rdi)
; SSE2-NEXT: movdqu %xmm5, 16(%rdi)
; SSE2-NEXT: movdqu %xmm1, (%rdi)
; SSE2-NEXT: por %xmm1, %xmm5
; SSE2-NEXT: pand %xmm0, %xmm5
; SSE2-NEXT: pshufhw {{.*#+}} xmm1 = xmm4[0,1,2,3,4,4,4,4]
; SSE2-NEXT: pandn %xmm1, %xmm0
; SSE2-NEXT: por %xmm5, %xmm0
; SSE2-NEXT: movdqu %xmm0, 16(%rdi)
; SSE2-NEXT: movdqu %xmm2, 32(%rdi)
; SSE2-NEXT: movdqu %xmm3, (%rdi)
; SSE2-NEXT: retq
;
; SSE42-LABEL: interleave_24i16_in:
@ -1252,23 +1255,22 @@ define void @interleave_24i16_in(<24 x i16>* %p, <8 x i16>* %q1, <8 x i16>* %q2,
; SSE42-NEXT: movdqu (%rdx), %xmm1
; SSE42-NEXT: movdqu (%rcx), %xmm2
; SSE42-NEXT: pshufd {{.*#+}} xmm3 = xmm0[1,1,2,2]
; SSE42-NEXT: pshuflw {{.*#+}} xmm4 = xmm1[3,3,3,3,4,5,6,7]
; SSE42-NEXT: pshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,4,4,4,4]
; SSE42-NEXT: pblendw {{.*#+}} xmm4 = xmm3[0,1],xmm4[2],xmm3[3,4],xmm4[5],xmm3[6,7]
; SSE42-NEXT: pshufd {{.*#+}} xmm3 = xmm2[1,1,2,2]
; SSE42-NEXT: pblendw {{.*#+}} xmm3 = xmm3[0],xmm4[1,2],xmm3[3],xmm4[4,5],xmm3[6],xmm4[7]
; SSE42-NEXT: movdqa %xmm0, %xmm4
; SSE42-NEXT: punpcklwd {{.*#+}} xmm4 = xmm4[0],xmm1[0],xmm4[1],xmm1[1],xmm4[2],xmm1[2],xmm4[3],xmm1[3]
; SSE42-NEXT: pshufb {{.*#+}} xmm4 = xmm4[0,1,2,3,u,u,4,5,6,7,u,u,8,9,10,11]
; SSE42-NEXT: pshufd {{.*#+}} xmm4 = xmm0[3,3,3,3]
; SSE42-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
; SSE42-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0,1,2,3,u,u,4,5,6,7,u,u,8,9,10,11]
; SSE42-NEXT: pshufd {{.*#+}} xmm5 = xmm2[0,0,0,0]
; SSE42-NEXT: pblendw {{.*#+}} xmm5 = xmm4[0,1],xmm5[2],xmm4[3,4],xmm5[5],xmm4[6,7]
; SSE42-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
; SSE42-NEXT: pshufb {{.*#+}} xmm1 = xmm1[4,5,u,u,10,11,8,9,u,u,14,15,12,13,u,u]
; SSE42-NEXT: pshufd {{.*#+}} xmm0 = xmm2[2,2,3,3]
; SSE42-NEXT: pblendw {{.*#+}} xmm0 = xmm1[0],xmm0[1],xmm1[2,3],xmm0[4],xmm1[5,6],xmm0[7]
; SSE42-NEXT: movdqu %xmm0, 32(%rdi)
; SSE42-NEXT: movdqu %xmm5, (%rdi)
; SSE42-NEXT: pblendw {{.*#+}} xmm5 = xmm0[0,1],xmm5[2],xmm0[3,4],xmm5[5],xmm0[6,7]
; SSE42-NEXT: pshufd {{.*#+}} xmm0 = xmm2[1,1,2,2]
; SSE42-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0],xmm3[1],xmm0[2,3],xmm3[4],xmm0[5,6],xmm3[7]
; SSE42-NEXT: pshuflw {{.*#+}} xmm3 = xmm1[3,3,3,3,4,5,6,7]
; SSE42-NEXT: pshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,4,4,4,4]
; SSE42-NEXT: pblendw {{.*#+}} xmm3 = xmm0[0,1],xmm3[2],xmm0[3,4],xmm3[5],xmm0[6,7]
; SSE42-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7]
; SSE42-NEXT: pshufb {{.*#+}} xmm1 = xmm1[4,5,6,7,u,u,8,9,10,11,u,u,12,13,14,15]
; SSE42-NEXT: pblendw {{.*#+}} xmm4 = xmm1[0,1],xmm4[2],xmm1[3,4],xmm4[5],xmm1[6,7]
; SSE42-NEXT: movdqu %xmm4, 32(%rdi)
; SSE42-NEXT: movdqu %xmm3, 16(%rdi)
; SSE42-NEXT: movdqu %xmm5, (%rdi)
; SSE42-NEXT: retq
;
; AVX1-LABEL: interleave_24i16_in:
@ -1404,7 +1406,7 @@ define void @interleave_24i32_out(<24 x i32>* %p, <8 x i32>* %q1, <8 x i32>* %q2
; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm10[1,1,1,1]
; SSE2-NEXT: punpckldq {{.*#+}} xmm7 = xmm7[0],xmm4[0],xmm7[1],xmm4[1]
; SSE2-NEXT: shufps {{.*#+}} xmm7 = xmm7[0,1],xmm5[0,3]
; SSE2-NEXT: shufps {{.*#+}} xmm5 = xmm5[1,0],xmm10[2,0]
; SSE2-NEXT: shufps {{.*#+}} xmm5 = xmm5[1,1],xmm10[2,3]
; SSE2-NEXT: movdqa %xmm0, %xmm4
; SSE2-NEXT: shufps {{.*#+}} xmm4 = xmm4[0,3],xmm5[2,0]
; SSE2-NEXT: movaps %xmm8, %xmm5
@ -1412,7 +1414,7 @@ define void @interleave_24i32_out(<24 x i32>* %p, <8 x i32>* %q1, <8 x i32>* %q2
; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm9[1,1,1,1]
; SSE2-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,1],xmm8[0,3]
; SSE2-NEXT: shufps {{.*#+}} xmm8 = xmm8[1,0],xmm9[2,0]
; SSE2-NEXT: shufps {{.*#+}} xmm8 = xmm8[1,1],xmm9[2,3]
; SSE2-NEXT: movdqa %xmm3, %xmm2
; SSE2-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,3],xmm8[2,0]
; SSE2-NEXT: shufps {{.*#+}} xmm5 = xmm5[2,1],xmm9[3,3]
@ -1631,109 +1633,114 @@ define void @interleave_24i32_in(<24 x i32>* %p, <8 x i32>* %q1, <8 x i32>* %q2,
; SSE2: # %bb.0:
; SSE2-NEXT: movups (%rsi), %xmm1
; SSE2-NEXT: movups 16(%rsi), %xmm0
; SSE2-NEXT: movups (%rdx), %xmm2
; SSE2-NEXT: movups (%rdx), %xmm8
; SSE2-NEXT: movups 16(%rdx), %xmm5
; SSE2-NEXT: movups (%rcx), %xmm8
; SSE2-NEXT: movups 16(%rcx), %xmm9
; SSE2-NEXT: movaps %xmm8, %xmm7
; SSE2-NEXT: movups (%rcx), %xmm3
; SSE2-NEXT: movups 16(%rcx), %xmm6
; SSE2-NEXT: movaps %xmm3, %xmm7
; SSE2-NEXT: shufps {{.*#+}} xmm7 = xmm7[0,1],xmm1[1,3]
; SSE2-NEXT: movaps %xmm1, %xmm3
; SSE2-NEXT: unpcklps {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1]
; SSE2-NEXT: shufps {{.*#+}} xmm3 = xmm3[0,1],xmm7[0,2]
; SSE2-NEXT: movaps %xmm1, %xmm9
; SSE2-NEXT: unpcklps {{.*#+}} xmm9 = xmm9[0],xmm8[0],xmm9[1],xmm8[1]
; SSE2-NEXT: shufps {{.*#+}} xmm9 = xmm9[0,1],xmm7[0,2]
; SSE2-NEXT: movaps %xmm5, %xmm7
; SSE2-NEXT: shufps {{.*#+}} xmm7 = xmm7[3,3],xmm6[3,3]
; SSE2-NEXT: movaps %xmm6, %xmm4
; SSE2-NEXT: unpckhps {{.*#+}} xmm4 = xmm4[2],xmm0[2],xmm4[3],xmm0[3]
; SSE2-NEXT: shufps {{.*#+}} xmm4 = xmm4[0,3],xmm7[0,2]
; SSE2-NEXT: movaps %xmm0, %xmm7
; SSE2-NEXT: unpckhpd {{.*#+}} xmm7 = xmm7[1],xmm5[1]
; SSE2-NEXT: movaps %xmm9, %xmm6
; SSE2-NEXT: movaps %xmm6, %xmm2
; SSE2-NEXT: shufps {{.*#+}} xmm2 = xmm2[1,0],xmm5[1,0]
; SSE2-NEXT: shufps {{.*#+}} xmm2 = xmm2[2,0],xmm7[0,2]
; SSE2-NEXT: shufps {{.*#+}} xmm6 = xmm6[0,1],xmm0[1,3]
; SSE2-NEXT: movaps %xmm0, %xmm4
; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,3],xmm5[3,3]
; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,2],xmm9[2,3]
; SSE2-NEXT: shufps {{.*#+}} xmm9 = xmm9[1,1],xmm5[1,1]
; SSE2-NEXT: shufps {{.*#+}} xmm9 = xmm9[2,0],xmm7[0,2]
; SSE2-NEXT: unpcklps {{.*#+}} xmm4 = xmm4[0],xmm5[0],xmm4[1],xmm5[1]
; SSE2-NEXT: shufps {{.*#+}} xmm4 = xmm4[0,1],xmm6[0,2]
; SSE2-NEXT: movaps %xmm1, %xmm5
; SSE2-NEXT: unpckhpd {{.*#+}} xmm5 = xmm5[1],xmm2[1]
; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[3,3],xmm2[3,3]
; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,2],xmm8[2,3]
; SSE2-NEXT: shufps {{.*#+}} xmm8 = xmm8[1,1],xmm2[1,1]
; SSE2-NEXT: shufps {{.*#+}} xmm8 = xmm8[2,0],xmm5[0,2]
; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[2,0,1,3]
; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[2,0,1,3]
; SSE2-NEXT: movups %xmm8, 16(%rdi)
; SSE2-NEXT: movups %xmm4, 48(%rdi)
; SSE2-NEXT: movups %xmm9, 64(%rdi)
; SSE2-NEXT: movups %xmm3, (%rdi)
; SSE2-NEXT: movups %xmm1, 32(%rdi)
; SSE2-NEXT: movups %xmm0, 80(%rdi)
; SSE2-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm5[0],xmm0[1],xmm5[1]
; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,1],xmm6[0,2]
; SSE2-NEXT: movaps %xmm8, %xmm5
; SSE2-NEXT: shufps {{.*#+}} xmm5 = xmm5[3,3],xmm3[3,3]
; SSE2-NEXT: movaps %xmm3, %xmm6
; SSE2-NEXT: unpckhps {{.*#+}} xmm6 = xmm6[2],xmm1[2],xmm6[3],xmm1[3]
; SSE2-NEXT: shufps {{.*#+}} xmm6 = xmm6[0,3],xmm5[0,2]
; SSE2-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm8[1]
; SSE2-NEXT: shufps {{.*#+}} xmm3 = xmm3[1,0],xmm8[1,0]
; SSE2-NEXT: shufps {{.*#+}} xmm3 = xmm3[2,0],xmm1[0,2]
; SSE2-NEXT: movups %xmm3, 16(%rdi)
; SSE2-NEXT: movups %xmm6, 32(%rdi)
; SSE2-NEXT: movups %xmm0, 48(%rdi)
; SSE2-NEXT: movups %xmm2, 64(%rdi)
; SSE2-NEXT: movups %xmm4, 80(%rdi)
; SSE2-NEXT: movups %xmm9, (%rdi)
; SSE2-NEXT: retq
;
; SSE42-LABEL: interleave_24i32_in:
; SSE42: # %bb.0:
; SSE42-NEXT: movdqu (%rsi), %xmm0
; SSE42-NEXT: movdqu (%rsi), %xmm8
; SSE42-NEXT: movdqu 16(%rsi), %xmm4
; SSE42-NEXT: movdqu (%rdx), %xmm9
; SSE42-NEXT: movdqu (%rdx), %xmm2
; SSE42-NEXT: movdqu 16(%rdx), %xmm5
; SSE42-NEXT: movdqu (%rcx), %xmm3
; SSE42-NEXT: movdqu 16(%rcx), %xmm6
; SSE42-NEXT: pshufd {{.*#+}} xmm1 = xmm9[0,0,1,1]
; SSE42-NEXT: pshufd {{.*#+}} xmm7 = xmm0[0,1,0,1]
; SSE42-NEXT: pshufd {{.*#+}} xmm1 = xmm2[0,0,1,1]
; SSE42-NEXT: pshufd {{.*#+}} xmm7 = xmm8[0,1,0,1]
; SSE42-NEXT: pblendw {{.*#+}} xmm7 = xmm7[0,1],xmm1[2,3],xmm7[4,5,6,7]
; SSE42-NEXT: pshufd {{.*#+}} xmm8 = xmm3[0,1,0,1]
; SSE42-NEXT: pblendw {{.*#+}} xmm8 = xmm7[0,1,2,3],xmm8[4,5],xmm7[6,7]
; SSE42-NEXT: pshufd {{.*#+}} xmm7 = xmm5[1,1,2,2]
; SSE42-NEXT: pblendw {{.*#+}} xmm7 = xmm7[0,1,2,3],xmm4[4,5],xmm7[6,7]
; SSE42-NEXT: pblendw {{.*#+}} xmm7 = xmm7[0,1],xmm6[2,3],xmm7[4,5,6,7]
; SSE42-NEXT: pshufd {{.*#+}} xmm1 = xmm5[0,0,1,1]
; SSE42-NEXT: pshufd {{.*#+}} xmm2 = xmm4[0,1,0,1]
; SSE42-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1],xmm1[2,3],xmm2[4,5,6,7]
; SSE42-NEXT: pshufd {{.*#+}} xmm1 = xmm6[0,1,0,1]
; SSE42-NEXT: pblendw {{.*#+}} xmm1 = xmm2[0,1,2,3],xmm1[4,5],xmm2[6,7]
; SSE42-NEXT: pshufd {{.*#+}} xmm2 = xmm9[1,1,2,2]
; SSE42-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm0[4,5],xmm2[6,7]
; SSE42-NEXT: pshufd {{.*#+}} xmm1 = xmm3[0,1,0,1]
; SSE42-NEXT: pblendw {{.*#+}} xmm1 = xmm7[0,1,2,3],xmm1[4,5],xmm7[6,7]
; SSE42-NEXT: pshufd {{.*#+}} xmm7 = xmm4[2,3,2,3]
; SSE42-NEXT: pshufd {{.*#+}} xmm0 = xmm6[2,3,2,3]
; SSE42-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm7[2,3],xmm0[4,5,6,7]
; SSE42-NEXT: pshufd {{.*#+}} xmm7 = xmm5[2,2,3,3]
; SSE42-NEXT: pblendw {{.*#+}} xmm7 = xmm0[0,1,2,3],xmm7[4,5],xmm0[6,7]
; SSE42-NEXT: pshufd {{.*#+}} xmm0 = xmm5[1,1,2,2]
; SSE42-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm6[2,3],xmm0[4,5,6,7]
; SSE42-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm4[4,5],xmm0[6,7]
; SSE42-NEXT: pshufd {{.*#+}} xmm5 = xmm5[0,0,1,1]
; SSE42-NEXT: pshufd {{.*#+}} xmm4 = xmm4[0,1,0,1]
; SSE42-NEXT: pblendw {{.*#+}} xmm4 = xmm4[0,1],xmm5[2,3],xmm4[4,5,6,7]
; SSE42-NEXT: pshufd {{.*#+}} xmm5 = xmm6[0,1,0,1]
; SSE42-NEXT: pblendw {{.*#+}} xmm5 = xmm4[0,1,2,3],xmm5[4,5],xmm4[6,7]
; SSE42-NEXT: pshufd {{.*#+}} xmm4 = xmm8[2,3,2,3]
; SSE42-NEXT: pshufd {{.*#+}} xmm6 = xmm3[2,3,2,3]
; SSE42-NEXT: pblendw {{.*#+}} xmm6 = xmm6[0,1],xmm4[2,3],xmm6[4,5,6,7]
; SSE42-NEXT: pshufd {{.*#+}} xmm4 = xmm2[2,2,3,3]
; SSE42-NEXT: pblendw {{.*#+}} xmm4 = xmm6[0,1,2,3],xmm4[4,5],xmm6[6,7]
; SSE42-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,2,2]
; SSE42-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1],xmm3[2,3],xmm2[4,5,6,7]
; SSE42-NEXT: shufps {{.*#+}} xmm4 = xmm4[3,3],xmm5[3,3]
; SSE42-NEXT: pshufd {{.*#+}} xmm5 = xmm6[2,3,2,3]
; SSE42-NEXT: pblendw {{.*#+}} xmm5 = xmm5[0,1],xmm4[2,3,4,5],xmm5[6,7]
; SSE42-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,3],xmm9[3,3]
; SSE42-NEXT: pshufd {{.*#+}} xmm3 = xmm3[2,3,2,3]
; SSE42-NEXT: pblendw {{.*#+}} xmm3 = xmm3[0,1],xmm0[2,3,4,5],xmm3[6,7]
; SSE42-NEXT: movdqu %xmm3, 32(%rdi)
; SSE42-NEXT: movdqu %xmm5, 80(%rdi)
; SSE42-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm8[4,5],xmm2[6,7]
; SSE42-NEXT: movdqu %xmm2, 16(%rdi)
; SSE42-NEXT: movdqu %xmm1, 48(%rdi)
; SSE42-NEXT: movdqu %xmm7, 64(%rdi)
; SSE42-NEXT: movdqu %xmm8, (%rdi)
; SSE42-NEXT: movdqu %xmm4, 32(%rdi)
; SSE42-NEXT: movdqu %xmm5, 48(%rdi)
; SSE42-NEXT: movdqu %xmm0, 64(%rdi)
; SSE42-NEXT: movdqu %xmm7, 80(%rdi)
; SSE42-NEXT: movdqu %xmm1, (%rdi)
; SSE42-NEXT: retq
;
; AVX1-LABEL: interleave_24i32_in:
; AVX1: # %bb.0:
; AVX1-NEXT: vmovupd (%rcx), %ymm0
; AVX1-NEXT: vmovups (%rdx), %xmm1
; AVX1-NEXT: vmovups (%rdx), %xmm0
; AVX1-NEXT: vmovups (%rsi), %xmm1
; AVX1-NEXT: vunpckhpd {{.*#+}} xmm2 = xmm1[1],xmm0[1]
; AVX1-NEXT: vshufps {{.*#+}} xmm2 = xmm0[1,1],xmm2[0,2]
; AVX1-NEXT: vmovlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
; AVX1-NEXT: vshufps {{.*#+}} xmm0 = xmm0[2,0],xmm1[2,1]
; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
; AVX1-NEXT: vbroadcastsd (%rcx), %ymm1
; AVX1-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2],ymm0[3,4],ymm1[5],ymm0[6,7]
; AVX1-NEXT: vmovups 16(%rcx), %xmm1
; AVX1-NEXT: vmovups 16(%rdx), %xmm2
; AVX1-NEXT: vmovups (%rsi), %xmm3
; AVX1-NEXT: vmovups 16(%rsi), %xmm4
; AVX1-NEXT: vshufps {{.*#+}} xmm5 = xmm4[3,3],xmm2[3,3]
; AVX1-NEXT: vunpckhpd {{.*#+}} xmm4 = xmm4[1],xmm2[1]
; AVX1-NEXT: vshufps {{.*#+}} xmm2 = xmm2[1,1],xmm4[0,2]
; AVX1-NEXT: vinsertf128 $1, %xmm5, %ymm2, %ymm2
; AVX1-NEXT: vperm2f128 {{.*#+}} ymm4 = ymm0[2,3,2,3]
; AVX1-NEXT: vpermilpd {{.*#+}} ymm4 = ymm4[0,0,3,3]
; AVX1-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0],ymm4[1],ymm2[2,3],ymm4[4],ymm2[5,6],ymm4[7]
; AVX1-NEXT: vunpckhpd {{.*#+}} xmm4 = xmm3[1],xmm1[1]
; AVX1-NEXT: vshufps {{.*#+}} xmm4 = xmm1[1,1],xmm4[0,2]
; AVX1-NEXT: vmovlhps {{.*#+}} xmm1 = xmm1[0],xmm3[0]
; AVX1-NEXT: vshufps {{.*#+}} xmm1 = xmm1[2,0],xmm3[2,1]
; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm1, %ymm1
; AVX1-NEXT: vbroadcastsd (%rcx), %ymm3
; AVX1-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1],ymm3[2],ymm1[3,4],ymm3[5],ymm1[6,7]
; AVX1-NEXT: vshufps {{.*#+}} xmm3 = xmm2[3,0],xmm1[3,0]
; AVX1-NEXT: vshufps {{.*#+}} xmm3 = xmm1[2,1],xmm3[0,2]
; AVX1-NEXT: vshufps {{.*#+}} xmm1 = xmm1[1,0],xmm2[1,0]
; AVX1-NEXT: vshufps {{.*#+}} xmm1 = xmm1[2,0],xmm2[2,2]
; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm1, %ymm1
; AVX1-NEXT: vbroadcastsd 24(%rsi), %ymm2
; AVX1-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1],ymm2[2],ymm1[3,4],ymm2[5],ymm1[6,7]
; AVX1-NEXT: vpermilpd {{.*#+}} ymm2 = mem[1,0,2,2]
; AVX1-NEXT: vpermilpd {{.*#+}} ymm3 = mem[1,1,2,2]
; AVX1-NEXT: vblendps {{.*#+}} ymm2 = ymm3[0],ymm2[1],ymm3[2,3],ymm2[4],ymm3[5,6],ymm2[7]
; AVX1-NEXT: vpermilps {{.*#+}} ymm3 = mem[0,0,3,3,4,4,7,7]
; AVX1-NEXT: vpermilpd {{.*#+}} ymm4 = mem[1,0,2,2]
; AVX1-NEXT: vblendps {{.*#+}} ymm3 = ymm4[0,1],ymm3[2],ymm4[3,4],ymm3[5],ymm4[6,7]
; AVX1-NEXT: vpermilpd {{.*#+}} ymm0 = ymm0[1,1,2,2]
; AVX1-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm3[1,2],ymm0[3],ymm3[4,5],ymm0[6],ymm3[7]
; AVX1-NEXT: vmovups %ymm0, 32(%rdi)
; AVX1-NEXT: vmovups %ymm1, (%rdi)
; AVX1-NEXT: vmovups %ymm2, 64(%rdi)
; AVX1-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1],ymm3[2],ymm2[3,4],ymm3[5],ymm2[6,7]
; AVX1-NEXT: vmovups %ymm2, 32(%rdi)
; AVX1-NEXT: vmovups %ymm1, 64(%rdi)
; AVX1-NEXT: vmovups %ymm0, (%rdi)
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: retq
;
@ -1742,26 +1749,26 @@ define void @interleave_24i32_in(<24 x i32>* %p, <8 x i32>* %q1, <8 x i32>* %q2,
; AVX2-SLOW-NEXT: vmovups (%rsi), %ymm0
; AVX2-SLOW-NEXT: vmovups (%rdx), %ymm1
; AVX2-SLOW-NEXT: vmovups (%rcx), %ymm2
; AVX2-SLOW-NEXT: vbroadcastsd 24(%rsi), %ymm3
; AVX2-SLOW-NEXT: vpermilps {{.*#+}} ymm4 = ymm1[1,2,3,3,5,6,7,7]
; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm4 = ymm4[2,2,2,3]
; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm3 = ymm4[0,1],ymm3[2],ymm4[3,4],ymm3[5],ymm4[6,7]
; AVX2-SLOW-NEXT: vpermilps {{.*#+}} xmm3 = mem[1,0,2,2]
; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm3 = ymm3[0,1,0,1]
; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm4 = ymm0[0,0,2,1]
; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm3 = ymm4[0],ymm3[1],ymm4[2,3],ymm3[4],ymm4[5,6],ymm3[7]
; AVX2-SLOW-NEXT: vbroadcastsd (%rcx), %ymm4
; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm3 = ymm3[0,1],ymm4[2],ymm3[3,4],ymm4[5],ymm3[6,7]
; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm4 = ymm2[2,1,3,3]
; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm3 = ymm3[0],ymm4[1],ymm3[2,3],ymm4[4],ymm3[5,6],ymm4[7]
; AVX2-SLOW-NEXT: vpermilps {{.*#+}} xmm4 = mem[1,0,2,2]
; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm4 = ymm4[0,1,0,1]
; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm5 = ymm0[0,0,2,1]
; AVX2-SLOW-NEXT: vpermilps {{.*#+}} ymm5 = ymm1[1,2,3,3,5,6,7,7]
; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm5 = ymm5[2,2,2,3]
; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm4 = ymm5[0],ymm4[1],ymm5[2,3],ymm4[4],ymm5[5,6],ymm4[7]
; AVX2-SLOW-NEXT: vbroadcastsd (%rcx), %ymm5
; AVX2-SLOW-NEXT: vbroadcastsd 24(%rsi), %ymm5
; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm4 = ymm4[0,1],ymm5[2],ymm4[3,4],ymm5[5],ymm4[6,7]
; AVX2-SLOW-NEXT: vpermilps {{.*#+}} ymm1 = ymm1[0,0,3,3,4,4,7,7]
; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[1,1,2,2]
; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm2 = ymm2[1,1,2,2]
; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm2[0],ymm0[1],ymm2[2,3],ymm0[4],ymm2[5,6],ymm0[7]
; AVX2-SLOW-NEXT: vpermilps {{.*#+}} ymm1 = ymm1[0,0,3,3,4,4,7,7]
; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2],ymm0[3,4],ymm1[5],ymm0[6,7]
; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm1 = ymm2[1,1,2,2]
; AVX2-SLOW-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0],ymm0[1,2],ymm1[3],ymm0[4,5],ymm1[6],ymm0[7]
; AVX2-SLOW-NEXT: vmovups %ymm0, 32(%rdi)
; AVX2-SLOW-NEXT: vmovups %ymm4, (%rdi)
; AVX2-SLOW-NEXT: vmovups %ymm3, 64(%rdi)
; AVX2-SLOW-NEXT: vmovups %ymm4, 64(%rdi)
; AVX2-SLOW-NEXT: vmovups %ymm3, (%rdi)
; AVX2-SLOW-NEXT: vzeroupper
; AVX2-SLOW-NEXT: retq
;
@ -1770,27 +1777,27 @@ define void @interleave_24i32_in(<24 x i32>* %p, <8 x i32>* %q1, <8 x i32>* %q2,
; AVX2-FAST-ALL-NEXT: vmovups (%rsi), %ymm0
; AVX2-FAST-ALL-NEXT: vmovups (%rdx), %ymm1
; AVX2-FAST-ALL-NEXT: vmovups (%rcx), %ymm2
; AVX2-FAST-ALL-NEXT: vmovaps {{.*#+}} ymm3 = <5,u,u,6,u,u,7,u>
; AVX2-FAST-ALL-NEXT: vbroadcastf128 {{.*#+}} ymm3 = [1,0,2,2,1,0,2,2]
; AVX2-FAST-ALL-NEXT: # ymm3 = mem[0,1,0,1]
; AVX2-FAST-ALL-NEXT: vpermps %ymm1, %ymm3, %ymm3
; AVX2-FAST-ALL-NEXT: vbroadcastsd 24(%rsi), %ymm4
; AVX2-FAST-ALL-NEXT: vpermpd {{.*#+}} ymm4 = ymm0[0,0,2,1]
; AVX2-FAST-ALL-NEXT: vblendps {{.*#+}} ymm3 = ymm4[0],ymm3[1],ymm4[2,3],ymm3[4],ymm4[5,6],ymm3[7]
; AVX2-FAST-ALL-NEXT: vbroadcastsd (%rcx), %ymm4
; AVX2-FAST-ALL-NEXT: vblendps {{.*#+}} ymm3 = ymm3[0,1],ymm4[2],ymm3[3,4],ymm4[5],ymm3[6,7]
; AVX2-FAST-ALL-NEXT: vpermpd {{.*#+}} ymm4 = ymm2[2,1,3,3]
; AVX2-FAST-ALL-NEXT: vblendps {{.*#+}} ymm3 = ymm3[0],ymm4[1],ymm3[2,3],ymm4[4],ymm3[5,6],ymm4[7]
; AVX2-FAST-ALL-NEXT: vbroadcastf128 {{.*#+}} ymm4 = [1,0,2,2,1,0,2,2]
; AVX2-FAST-ALL-NEXT: # ymm4 = mem[0,1,0,1]
; AVX2-FAST-ALL-NEXT: vmovaps {{.*#+}} ymm4 = <5,u,u,6,u,u,7,u>
; AVX2-FAST-ALL-NEXT: vpermps %ymm1, %ymm4, %ymm4
; AVX2-FAST-ALL-NEXT: vpermpd {{.*#+}} ymm5 = ymm0[0,0,2,1]
; AVX2-FAST-ALL-NEXT: vblendps {{.*#+}} ymm4 = ymm5[0],ymm4[1],ymm5[2,3],ymm4[4],ymm5[5,6],ymm4[7]
; AVX2-FAST-ALL-NEXT: vbroadcastsd (%rcx), %ymm5
; AVX2-FAST-ALL-NEXT: vpermpd {{.*#+}} ymm5 = ymm2[2,1,3,3]
; AVX2-FAST-ALL-NEXT: vblendps {{.*#+}} ymm4 = ymm4[0],ymm5[1],ymm4[2,3],ymm5[4],ymm4[5,6],ymm5[7]
; AVX2-FAST-ALL-NEXT: vbroadcastsd 24(%rsi), %ymm5
; AVX2-FAST-ALL-NEXT: vblendps {{.*#+}} ymm4 = ymm4[0,1],ymm5[2],ymm4[3,4],ymm5[5],ymm4[6,7]
; AVX2-FAST-ALL-NEXT: vpermilps {{.*#+}} ymm1 = ymm1[0,0,3,3,4,4,7,7]
; AVX2-FAST-ALL-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[1,1,2,2]
; AVX2-FAST-ALL-NEXT: vpermpd {{.*#+}} ymm2 = ymm2[1,1,2,2]
; AVX2-FAST-ALL-NEXT: vblendps {{.*#+}} ymm0 = ymm2[0],ymm0[1],ymm2[2,3],ymm0[4],ymm2[5,6],ymm0[7]
; AVX2-FAST-ALL-NEXT: vpermilps {{.*#+}} ymm1 = ymm1[0,0,3,3,4,4,7,7]
; AVX2-FAST-ALL-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2],ymm0[3,4],ymm1[5],ymm0[6,7]
; AVX2-FAST-ALL-NEXT: vpermpd {{.*#+}} ymm1 = ymm2[1,1,2,2]
; AVX2-FAST-ALL-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0],ymm0[1,2],ymm1[3],ymm0[4,5],ymm1[6],ymm0[7]
; AVX2-FAST-ALL-NEXT: vmovups %ymm0, 32(%rdi)
; AVX2-FAST-ALL-NEXT: vmovups %ymm4, (%rdi)
; AVX2-FAST-ALL-NEXT: vmovups %ymm3, 64(%rdi)
; AVX2-FAST-ALL-NEXT: vmovups %ymm4, 64(%rdi)
; AVX2-FAST-ALL-NEXT: vmovups %ymm3, (%rdi)
; AVX2-FAST-ALL-NEXT: vzeroupper
; AVX2-FAST-ALL-NEXT: retq
;
@ -1799,58 +1806,57 @@ define void @interleave_24i32_in(<24 x i32>* %p, <8 x i32>* %q1, <8 x i32>* %q2,
; AVX2-FAST-PERLANE-NEXT: vmovups (%rsi), %ymm0
; AVX2-FAST-PERLANE-NEXT: vmovups (%rdx), %ymm1
; AVX2-FAST-PERLANE-NEXT: vmovups (%rcx), %ymm2
; AVX2-FAST-PERLANE-NEXT: vbroadcastsd 24(%rsi), %ymm3
; AVX2-FAST-PERLANE-NEXT: vpermilps {{.*#+}} ymm4 = ymm1[1,2,3,3,5,6,7,7]
; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm4 = ymm4[2,2,2,3]
; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm3 = ymm4[0,1],ymm3[2],ymm4[3,4],ymm3[5],ymm4[6,7]
; AVX2-FAST-PERLANE-NEXT: vpermilps {{.*#+}} xmm3 = mem[1,0,2,2]
; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm3 = ymm3[0,1,0,1]
; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm4 = ymm0[0,0,2,1]
; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm3 = ymm4[0],ymm3[1],ymm4[2,3],ymm3[4],ymm4[5,6],ymm3[7]
; AVX2-FAST-PERLANE-NEXT: vbroadcastsd (%rcx), %ymm4
; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm3 = ymm3[0,1],ymm4[2],ymm3[3,4],ymm4[5],ymm3[6,7]
; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm4 = ymm2[2,1,3,3]
; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm3 = ymm3[0],ymm4[1],ymm3[2,3],ymm4[4],ymm3[5,6],ymm4[7]
; AVX2-FAST-PERLANE-NEXT: vpermilps {{.*#+}} xmm4 = mem[1,0,2,2]
; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm4 = ymm4[0,1,0,1]
; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm5 = ymm0[0,0,2,1]
; AVX2-FAST-PERLANE-NEXT: vpermilps {{.*#+}} ymm5 = ymm1[1,2,3,3,5,6,7,7]
; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm5 = ymm5[2,2,2,3]
; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm4 = ymm5[0],ymm4[1],ymm5[2,3],ymm4[4],ymm5[5,6],ymm4[7]
; AVX2-FAST-PERLANE-NEXT: vbroadcastsd (%rcx), %ymm5
; AVX2-FAST-PERLANE-NEXT: vbroadcastsd 24(%rsi), %ymm5
; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm4 = ymm4[0,1],ymm5[2],ymm4[3,4],ymm5[5],ymm4[6,7]
; AVX2-FAST-PERLANE-NEXT: vpermilps {{.*#+}} ymm1 = ymm1[0,0,3,3,4,4,7,7]
; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[1,1,2,2]
; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm2 = ymm2[1,1,2,2]
; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm2[0],ymm0[1],ymm2[2,3],ymm0[4],ymm2[5,6],ymm0[7]
; AVX2-FAST-PERLANE-NEXT: vpermilps {{.*#+}} ymm1 = ymm1[0,0,3,3,4,4,7,7]
; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2],ymm0[3,4],ymm1[5],ymm0[6,7]
; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm1 = ymm2[1,1,2,2]
; AVX2-FAST-PERLANE-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0],ymm0[1,2],ymm1[3],ymm0[4,5],ymm1[6],ymm0[7]
; AVX2-FAST-PERLANE-NEXT: vmovups %ymm0, 32(%rdi)
; AVX2-FAST-PERLANE-NEXT: vmovups %ymm4, (%rdi)
; AVX2-FAST-PERLANE-NEXT: vmovups %ymm3, 64(%rdi)
; AVX2-FAST-PERLANE-NEXT: vmovups %ymm4, 64(%rdi)
; AVX2-FAST-PERLANE-NEXT: vmovups %ymm3, (%rdi)
; AVX2-FAST-PERLANE-NEXT: vzeroupper
; AVX2-FAST-PERLANE-NEXT: retq
;
; XOP-LABEL: interleave_24i32_in:
; XOP: # %bb.0:
; XOP-NEXT: vmovups (%rsi), %ymm0
; XOP-NEXT: vmovups (%rdx), %ymm1
; XOP-NEXT: vpermil2ps {{.*#+}} ymm0 = ymm0[u,3],ymm1[3],ymm0[u,4],ymm1[4],ymm0[u,5]
; XOP-NEXT: vmovups (%rcx), %ymm1
; XOP-NEXT: vmovups (%rdx), %xmm2
; XOP-NEXT: vpermil2ps {{.*#+}} ymm0 = ymm1[2],ymm0[3],ymm1[u,3],ymm0[4],ymm1[u,4],ymm0[5]
; XOP-NEXT: vmovups (%rdx), %xmm1
; XOP-NEXT: vmovups (%rsi), %xmm2
; XOP-NEXT: vunpckhpd {{.*#+}} xmm3 = xmm2[1],xmm1[1]
; XOP-NEXT: vshufps {{.*#+}} xmm3 = xmm1[1,1],xmm3[0,2]
; XOP-NEXT: vmovlhps {{.*#+}} xmm1 = xmm1[0],xmm2[0]
; XOP-NEXT: vshufps {{.*#+}} xmm1 = xmm1[2,0],xmm2[2,1]
; XOP-NEXT: vinsertf128 $1, %xmm3, %ymm1, %ymm1
; XOP-NEXT: vbroadcastsd (%rcx), %ymm2
; XOP-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1],ymm2[2],ymm1[3,4],ymm2[5],ymm1[6,7]
; XOP-NEXT: vmovups 16(%rcx), %xmm2
; XOP-NEXT: vmovups 16(%rdx), %xmm3
; XOP-NEXT: vmovups (%rsi), %xmm4
; XOP-NEXT: vmovups 16(%rsi), %xmm5
; XOP-NEXT: vshufps {{.*#+}} xmm6 = xmm5[3,3],xmm3[3,3]
; XOP-NEXT: vunpckhpd {{.*#+}} xmm5 = xmm5[1],xmm3[1]
; XOP-NEXT: vshufps {{.*#+}} xmm3 = xmm3[1,1],xmm5[0,2]
; XOP-NEXT: vinsertf128 $1, %xmm6, %ymm3, %ymm3
; XOP-NEXT: vperm2f128 {{.*#+}} ymm5 = ymm1[2,3,2,3]
; XOP-NEXT: vpermilpd {{.*#+}} ymm5 = ymm5[0,0,3,3]
; XOP-NEXT: vblendps {{.*#+}} ymm3 = ymm3[0],ymm5[1],ymm3[2,3],ymm5[4],ymm3[5,6],ymm5[7]
; XOP-NEXT: vunpckhpd {{.*#+}} xmm5 = xmm4[1],xmm2[1]
; XOP-NEXT: vshufps {{.*#+}} xmm5 = xmm2[1,1],xmm5[0,2]
; XOP-NEXT: vmovlhps {{.*#+}} xmm2 = xmm2[0],xmm4[0]
; XOP-NEXT: vshufps {{.*#+}} xmm2 = xmm2[2,0],xmm4[2,1]
; XOP-NEXT: vinsertf128 $1, %xmm5, %ymm2, %ymm2
; XOP-NEXT: vbroadcastsd (%rcx), %ymm4
; XOP-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1],ymm4[2],ymm2[3,4],ymm4[5],ymm2[6,7]
; XOP-NEXT: vpermilpd {{.*#+}} ymm1 = ymm1[1,1,2,2]
; XOP-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0],ymm0[1,2],ymm1[3],ymm0[4,5],ymm1[6],ymm0[7]
; XOP-NEXT: vshufps {{.*#+}} xmm4 = xmm3[3,0],xmm2[3,0]
; XOP-NEXT: vshufps {{.*#+}} xmm4 = xmm2[2,1],xmm4[0,2]
; XOP-NEXT: vshufps {{.*#+}} xmm2 = xmm2[1,0],xmm3[1,0]
; XOP-NEXT: vshufps {{.*#+}} xmm2 = xmm2[2,0],xmm3[2,2]
; XOP-NEXT: vinsertf128 $1, %xmm4, %ymm2, %ymm2
; XOP-NEXT: vbroadcastsd 24(%rsi), %ymm3
; XOP-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1],ymm3[2],ymm2[3,4],ymm3[5],ymm2[6,7]
; XOP-NEXT: vpermilps {{.*#+}} ymm3 = mem[0,0,3,3,4,4,7,7]
; XOP-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm3[2],ymm0[3,4],ymm3[5],ymm0[6,7]
; XOP-NEXT: vmovups %ymm0, 32(%rdi)
; XOP-NEXT: vmovups %ymm2, (%rdi)
; XOP-NEXT: vmovups %ymm3, 64(%rdi)
; XOP-NEXT: vmovups %ymm2, 64(%rdi)
; XOP-NEXT: vmovups %ymm1, (%rdi)
; XOP-NEXT: vzeroupper
; XOP-NEXT: retq
%s1 = load <8 x i32>, <8 x i32>* %q1, align 4

View File

@ -14,34 +14,39 @@ define <16 x i64> @pluto(<16 x i64> %arg, <16 x i64> %arg1, <16 x i64> %arg2, <1
; CHECK-NEXT: vmovaps %ymm4, %ymm10
; CHECK-NEXT: vmovaps %ymm3, %ymm9
; CHECK-NEXT: vmovaps %ymm1, %ymm8
; CHECK-NEXT: vmovaps 240(%rbp), %ymm4
; CHECK-NEXT: vmovaps %ymm0, %ymm4
; CHECK-NEXT: vmovaps 240(%rbp), %ymm1
; CHECK-NEXT: vmovaps 208(%rbp), %ymm3
; CHECK-NEXT: vmovaps 176(%rbp), %ymm1
; CHECK-NEXT: vmovaps 144(%rbp), %ymm1
; CHECK-NEXT: vmovaps 176(%rbp), %ymm0
; CHECK-NEXT: vmovaps 144(%rbp), %ymm0
; CHECK-NEXT: vmovaps 112(%rbp), %ymm11
; CHECK-NEXT: vmovaps 80(%rbp), %ymm11
; CHECK-NEXT: vmovaps 48(%rbp), %ymm11
; CHECK-NEXT: vmovaps 16(%rbp), %ymm11
; CHECK-NEXT: vpblendd {{.*#+}} ymm0 = ymm6[0,1,2,3,4,5],ymm2[6,7]
; CHECK-NEXT: vpunpcklqdq {{.*#+}} ymm1 = ymm1[0],ymm3[0],ymm1[2],ymm3[2]
; CHECK-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,2,1,3]
; CHECK-NEXT: vpermq {{.*#+}} ymm0 = ymm0[3,1,2,1]
; CHECK-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3,4,5],ymm0[6,7]
; CHECK-NEXT: vperm2i128 {{.*#+}} ymm1 = ymm7[2,3],ymm6[0,1]
; CHECK-NEXT: vxorps %xmm2, %xmm2, %xmm2
; CHECK-NEXT: vpblendd {{.*#+}} ymm2 = ymm1[0,1],ymm2[2,3],ymm1[4,5,6,7]
; CHECK-NEXT: vpunpcklqdq {{.*#+}} ymm1 = ymm7[0],ymm5[0],ymm7[2],ymm5[2]
; CHECK-NEXT: vpermq {{.*#+}} ymm1 = ymm1[2,1,2,3]
; CHECK-NEXT: vpermq {{.*#+}} ymm4 = ymm4[1,1,1,1]
; CHECK-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1],ymm4[2,3,4,5],ymm1[6,7]
; CHECK-NEXT: vmovaps %xmm3, %xmm4
; CHECK-NEXT: vmovaps %xmm7, %xmm3
; CHECK-NEXT: vpblendd {{.*#+}} xmm4 = xmm3[0,1],xmm4[2,3]
; CHECK-NEXT: # implicit-def: $ymm3
; CHECK-NEXT: vmovaps %xmm4, %xmm3
; CHECK-NEXT: vpermq {{.*#+}} ymm4 = ymm3[0,0,1,3]
; CHECK-NEXT: vpslldq {{.*#+}} ymm3 = zero,zero,zero,zero,zero,zero,zero,zero,ymm5[0,1,2,3,4,5,6,7],zero,zero,zero,zero,zero,zero,zero,zero,ymm5[16,17,18,19,20,21,22,23]
; CHECK-NEXT: vpblendd {{.*#+}} ymm3 = ymm3[0,1],ymm4[2,3,4,5],ymm3[6,7]
; CHECK-NEXT: vpblendd {{.*#+}} ymm4 = ymm6[0,1,2,3,4,5],ymm2[6,7]
; CHECK-NEXT: vmovaps %xmm3, %xmm6
; CHECK-NEXT: # implicit-def: $ymm2
; CHECK-NEXT: vinserti128 $1, %xmm6, %ymm2, %ymm2
; CHECK-NEXT: vpalignr {{.*#+}} ymm0 = ymm4[8,9,10,11,12,13,14,15],ymm0[0,1,2,3,4,5,6,7],ymm4[24,25,26,27,28,29,30,31],ymm0[16,17,18,19,20,21,22,23]
; CHECK-NEXT: vpermq {{.*#+}} ymm0 = ymm0[2,3,2,0]
; CHECK-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm2[4,5],ymm0[6,7]
; CHECK-NEXT: vextracti128 $1, %ymm7, %xmm2
; CHECK-NEXT: vmovq {{.*#+}} xmm6 = xmm2[0],zero
; CHECK-NEXT: # implicit-def: $ymm2
; CHECK-NEXT: vmovaps %xmm6, %xmm2
; CHECK-NEXT: # kill: def $xmm4 killed $xmm4 killed $ymm4
; CHECK-NEXT: vinserti128 $1, %xmm4, %ymm2, %ymm2
; CHECK-NEXT: vmovaps %xmm7, %xmm4
; CHECK-NEXT: vpslldq {{.*#+}} xmm6 = zero,zero,zero,zero,zero,zero,zero,zero,xmm4[0,1,2,3,4,5,6,7]
; CHECK-NEXT: # implicit-def: $ymm4
; CHECK-NEXT: vmovaps %xmm6, %xmm4
; CHECK-NEXT: vpalignr {{.*#+}} ymm3 = ymm3[8,9,10,11,12,13,14,15],ymm5[0,1,2,3,4,5,6,7],ymm3[24,25,26,27,28,29,30,31],ymm5[16,17,18,19,20,21,22,23]
; CHECK-NEXT: vpermq {{.*#+}} ymm3 = ymm3[0,1,0,3]
; CHECK-NEXT: vblendps {{.*#+}} ymm3 = ymm4[0,1,2,3],ymm3[4,5,6,7]
; CHECK-NEXT: vpblendd {{.*#+}} ymm1 = ymm7[0,1],ymm1[2,3],ymm7[4,5,6,7]
; CHECK-NEXT: vpermq {{.*#+}} ymm1 = ymm1[2,1,1,3]
; CHECK-NEXT: vpshufd {{.*#+}} ymm4 = ymm5[0,1,0,1,4,5,4,5]
; CHECK-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5],ymm4[6,7]
; CHECK-NEXT: movq %rbp, %rsp
; CHECK-NEXT: popq %rbp
; CHECK-NEXT: .cfi_def_cfa %rsp, 8

View File

@ -12,75 +12,57 @@ define <3 x i32> @f_29(<12 x i16> %a, <12 x i16> %b) {
; CHECK-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3]
; CHECK-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
; CHECK-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
; CHECK-NEXT: movd {{.*#+}} xmm3 = mem[0],zero,zero,zero
; CHECK-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3]
; CHECK-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
; CHECK-NEXT: movd {{.*#+}} xmm1 = mem[0],zero,zero,zero
; CHECK-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
; CHECK-NEXT: movd {{.*#+}} xmm3 = mem[0],zero,zero,zero
; CHECK-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1]
; CHECK-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0]
; CHECK-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
; CHECK-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3]
; CHECK-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
; CHECK-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm2[0]
; CHECK-NEXT: movd {{.*#+}} xmm1 = mem[0],zero,zero,zero
; CHECK-NEXT: movd {{.*#+}} xmm2 = mem[0],zero,zero,zero
; CHECK-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3]
; CHECK-NEXT: movd %r9d, %xmm1
; CHECK-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3]
; CHECK-NEXT: movd %r9d, %xmm0
; CHECK-NEXT: movd %r8d, %xmm3
; CHECK-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1],xmm3[2],xmm1[2],xmm3[3],xmm1[3]
; CHECK-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3]
; CHECK-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1]
; CHECK-NEXT: movd %ecx, %xmm1
; CHECK-NEXT: movd %ecx, %xmm0
; CHECK-NEXT: movd %edx, %xmm2
; CHECK-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3]
; CHECK-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3]
; CHECK-NEXT: movd %esi, %xmm4
; CHECK-NEXT: movd %edi, %xmm1
; CHECK-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1],xmm1[2],xmm4[2],xmm1[3],xmm4[3]
; CHECK-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
; CHECK-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm3[0]
; CHECK-NEXT: movd {{.*#+}} xmm4 = mem[0],zero,zero,zero
; CHECK-NEXT: pinsrw $1, {{[0-9]+}}(%rsp), %xmm4
; CHECK-NEXT: pinsrw $2, {{[0-9]+}}(%rsp), %xmm4
; CHECK-NEXT: pinsrw $3, {{[0-9]+}}(%rsp), %xmm4
; CHECK-NEXT: movd %edi, %xmm0
; CHECK-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1],xmm0[2],xmm4[2],xmm0[3],xmm4[3]
; CHECK-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
; CHECK-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm3[0]
; CHECK-NEXT: movd {{.*#+}} xmm2 = mem[0],zero,zero,zero
; CHECK-NEXT: pinsrw $1, {{[0-9]+}}(%rsp), %xmm2
; CHECK-NEXT: pinsrw $2, {{[0-9]+}}(%rsp), %xmm2
; CHECK-NEXT: pinsrw $3, {{[0-9]+}}(%rsp), %xmm2
; CHECK-NEXT: movdqa %xmm1, %xmm3
; CHECK-NEXT: pmulhuw %xmm0, %xmm3
; CHECK-NEXT: pmullw %xmm0, %xmm1
; CHECK-NEXT: movdqa %xmm1, %xmm0
; CHECK-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3]
; CHECK-NEXT: pshufd {{.*#+}} xmm5 = xmm1[1,2,3,3]
; CHECK-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm3[4],xmm1[5],xmm3[5],xmm1[6],xmm3[6],xmm1[7],xmm3[7]
; CHECK-NEXT: pshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,4,6,6,7]
; CHECK-NEXT: pshufd {{.*#+}} xmm6 = xmm3[2,3,2,3]
; CHECK-NEXT: movdqa %xmm2, %xmm7
; CHECK-NEXT: pmulhuw %xmm4, %xmm7
; CHECK-NEXT: pshuflw {{.*#+}} xmm3 = xmm7[0,0,2,1,4,5,6,7]
; CHECK-NEXT: punpcklqdq {{.*#+}} xmm6 = xmm6[0],xmm3[0]
; CHECK-NEXT: movdqa {{.*#+}} xmm3 = [65535,0,65535,0,65535,0,65535,0]
; CHECK-NEXT: punpckhwd {{.*#+}} xmm5 = xmm5[4,4,5,5,6,6,7,7]
; CHECK-NEXT: pmullw %xmm4, %xmm2
; CHECK-NEXT: pshuflw {{.*#+}} xmm4 = xmm2[0,1,1,3,4,5,6,7]
; CHECK-NEXT: punpcklqdq {{.*#+}} xmm5 = xmm5[0],xmm4[0]
; CHECK-NEXT: pand %xmm3, %xmm5
; CHECK-NEXT: pandn %xmm6, %xmm3
; CHECK-NEXT: por %xmm5, %xmm3
; CHECK-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm7[0],xmm2[1],xmm7[1],xmm2[2],xmm7[2],xmm2[3],xmm7[3]
; CHECK-NEXT: movdqa %xmm3, %xmm4
; CHECK-NEXT: shufps {{.*#+}} xmm4 = xmm4[1,1],xmm1[1,1]
; CHECK-NEXT: movdqa %xmm0, %xmm5
; CHECK-NEXT: shufps {{.*#+}} xmm5 = xmm5[1,3],xmm4[2,0]
; CHECK-NEXT: pshufd {{.*#+}} xmm4 = xmm3[3,3,3,3]
; CHECK-NEXT: pshufd {{.*#+}} xmm6 = xmm2[3,3,3,3]
; CHECK-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm6[0],xmm4[1],xmm6[1]
; CHECK-NEXT: movdqa %xmm3, %xmm6
; CHECK-NEXT: punpcklqdq {{.*#+}} xmm6 = xmm6[0],xmm1[0]
; CHECK-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm6[2,0]
; CHECK-NEXT: paddd %xmm5, %xmm0
; CHECK-NEXT: punpckhdq {{.*#+}} xmm3 = xmm3[2],xmm2[2],xmm3[3],xmm2[3]
; CHECK-NEXT: paddd %xmm4, %xmm3
; CHECK-NEXT: movd {{.*#+}} xmm3 = mem[0],zero,zero,zero
; CHECK-NEXT: pinsrw $1, {{[0-9]+}}(%rsp), %xmm3
; CHECK-NEXT: pinsrw $2, {{[0-9]+}}(%rsp), %xmm3
; CHECK-NEXT: pinsrw $3, {{[0-9]+}}(%rsp), %xmm3
; CHECK-NEXT: movdqa %xmm0, %xmm4
; CHECK-NEXT: pmulhuw %xmm1, %xmm4
; CHECK-NEXT: pmullw %xmm1, %xmm0
; CHECK-NEXT: movdqa %xmm0, %xmm1
; CHECK-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,3],xmm3[1,3]
; CHECK-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm3[0,3]
; CHECK-NEXT: paddd %xmm1, %xmm0
; CHECK-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm4[4],xmm1[5],xmm4[5],xmm1[6],xmm4[6],xmm1[7],xmm4[7]
; CHECK-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1],xmm0[2],xmm4[2],xmm0[3],xmm4[3]
; CHECK-NEXT: movdqa %xmm3, %xmm4
; CHECK-NEXT: pmulhuw %xmm2, %xmm4
; CHECK-NEXT: pmullw %xmm2, %xmm3
; CHECK-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm4[0],xmm3[1],xmm4[1],xmm3[2],xmm4[2],xmm3[3],xmm4[3]
; CHECK-NEXT: movdqa %xmm0, %xmm2
; CHECK-NEXT: shufps {{.*#+}} xmm2 = xmm2[1,3],xmm1[1,3]
; CHECK-NEXT: pshufd {{.*#+}} xmm4 = xmm3[1,3,1,3]
; CHECK-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
; CHECK-NEXT: paddd %xmm2, %xmm0
; CHECK-NEXT: pshufd {{.*#+}} xmm1 = xmm3[0,2,1,3]
; CHECK-NEXT: paddd %xmm4, %xmm1
; CHECK-NEXT: movdqa %xmm0, %xmm2
; CHECK-NEXT: shufps {{.*#+}} xmm2 = xmm2[1,3],xmm1[1,3]
; CHECK-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,3]
; CHECK-NEXT: paddd %xmm2, %xmm0
; CHECK-NEXT: retq
entry:
%a32 = zext <12 x i16> %a to <12 x i32>

View File

@ -385,13 +385,27 @@ define <32 x i8> @splat_v32i8_pgso(<32 x i8> %x) !prof !14 {
@A = common dso_local global <3 x i64> zeroinitializer, align 32
define <8 x i64> @pr23259() #1 {
; CHECK-LABEL: pr23259:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmovaps A+16(%rip), %xmm0
; CHECK-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0,1],mem[2,3]
; CHECK-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],mem[4,5,6,7]
; CHECK-NEXT: vbroadcastsd {{.*#+}} ymm1 = [1,1,1,1]
; CHECK-NEXT: retq
; AVX-LABEL: pr23259:
; AVX: # %bb.0: # %entry
; AVX-NEXT: pushq $1
; AVX-NEXT: popq %rax
; AVX-NEXT: vmovq %rax, %xmm0
; AVX-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm1
; AVX-NEXT: vperm2f128 {{.*#+}} ymm0 = mem[2,3],ymm0[0,1]
; AVX-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[2],ymm1[2]
; AVX-NEXT: vbroadcastsd {{.*#+}} ymm1 = [1,1,1,1]
; AVX-NEXT: retq
;
; AVX2-LABEL: pr23259:
; AVX2: # %bb.0: # %entry
; AVX2-NEXT: vmovdqa A(%rip), %ymm0
; AVX2-NEXT: pushq $1
; AVX2-NEXT: popq %rax
; AVX2-NEXT: vmovq %rax, %xmm1
; AVX2-NEXT: vpunpcklqdq {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[2],ymm1[2]
; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[2,1,1,1]
; AVX2-NEXT: vbroadcastsd {{.*#+}} ymm1 = [1,1,1,1]
; AVX2-NEXT: retq
entry:
%0 = load <4 x i64>, <4 x i64>* bitcast (<3 x i64>* @A to <4 x i64>*), align 32
%1 = shufflevector <4 x i64> %0, <4 x i64> undef, <3 x i32> <i32 undef, i32 undef, i32 2>

View File

@ -5,9 +5,10 @@
define <4 x i64> @autogen_SD88863() {
; CHECK-LABEL: autogen_SD88863:
; CHECK: # %bb.0: # %BB
; CHECK-NEXT: vperm2f128 {{.*#+}} ymm0 = zero,zero,ymm0[0,1]
; CHECK-NEXT: vxorpd %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vshufpd {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[3],ymm1[3]
; CHECK-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[2,3,2,3]
; CHECK-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0
; CHECK-NEXT: vxorps %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5],ymm1[6,7]
; CHECK-NEXT: movb $1, %al
; CHECK-NEXT: .p2align 4, 0x90
; CHECK-NEXT: .LBB0_1: # %CF

View File

@ -131,55 +131,58 @@ define void @vf2(<10 x i16>* %in.vec, <2 x i16>* %out.vec0, <2 x i16>* %out.vec1
define void @vf4(<20 x i16>* %in.vec, <4 x i16>* %out.vec0, <4 x i16>* %out.vec1, <4 x i16>* %out.vec2, <4 x i16>* %out.vec3, <4 x i16>* %out.vec4) nounwind {
; SSE-LABEL: vf4:
; SSE: # %bb.0:
; SSE-NEXT: movdqa (%rdi), %xmm2
; SSE-NEXT: movdqa 16(%rdi), %xmm3
; SSE-NEXT: movdqa (%rdi), %xmm3
; SSE-NEXT: movdqa 16(%rdi), %xmm2
; SSE-NEXT: movdqa 32(%rdi), %xmm0
; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm3[3,1,2,3]
; SSE-NEXT: pshuflw {{.*#+}} xmm4 = xmm1[2,1,2,3,4,5,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm2[0,2,2,3]
; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,3,2,3,4,5,6,7]
; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1]
; SSE-NEXT: movdqa %xmm3, %xmm4
; SSE-NEXT: psrlq $48, %xmm4
; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm2[0,3,2,3]
; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm2[3,1,2,3]
; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[2,1,2,3,4,5,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm3[0,2,2,3]
; SSE-NEXT: pshuflw {{.*#+}} xmm8 = xmm4[0,3,2,3,4,5,6,7]
; SSE-NEXT: punpckldq {{.*#+}} xmm8 = xmm8[0],xmm1[0],xmm8[1],xmm1[1]
; SSE-NEXT: movdqa %xmm0, %xmm9
; SSE-NEXT: psllq $48, %xmm9
; SSE-NEXT: movdqa {{.*#+}} xmm10 = [65535,65535,65535,0,65535,65535,65535,65535]
; SSE-NEXT: movdqa %xmm2, %xmm6
; SSE-NEXT: psrlq $48, %xmm6
; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm3[0,3,2,3]
; SSE-NEXT: pshuflw {{.*#+}} xmm5 = xmm5[1,2,2,3,4,5,6,7]
; SSE-NEXT: punpckldq {{.*#+}} xmm5 = xmm5[0],xmm4[0],xmm5[1],xmm4[1]
; SSE-NEXT: punpcklwd {{.*#+}} xmm5 = xmm5[0],xmm0[0],xmm5[1],xmm0[1],xmm5[2],xmm0[2],xmm5[3],xmm0[3]
; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm5[0,3,2,1]
; SSE-NEXT: pshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,4,6,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm4[0,2,2,3]
; SSE-NEXT: pshuflw {{.*#+}} xmm4 = xmm4[0,3,2,1,4,5,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm2[0,1,1,3]
; SSE-NEXT: pshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,4,7,6,7]
; SSE-NEXT: punpckhdq {{.*#+}} xmm5 = xmm5[2],xmm3[2],xmm5[3],xmm3[3]
; SSE-NEXT: punpcklwd {{.*#+}} xmm5 = xmm5[0],xmm0[0],xmm5[1],xmm0[1],xmm5[2],xmm0[2],xmm5[3],xmm0[3]
; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm5[3,1,2,0]
; SSE-NEXT: pshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,4,6,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm5[2,1,2,3]
; SSE-NEXT: pshuflw {{.*#+}} xmm5 = xmm5[1,2,0,3,4,5,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm2[1,1,1,1]
; SSE-NEXT: pshufd {{.*#+}} xmm7 = xmm3[0,2,2,3]
; SSE-NEXT: punpckldq {{.*#+}} xmm7 = xmm7[0],xmm6[0],xmm7[1],xmm6[1]
; SSE-NEXT: pshuflw {{.*#+}} xmm6 = xmm7[0,3,2,3,4,5,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm6[0,2,2,3]
; SSE-NEXT: pshuflw {{.*#+}} xmm6 = xmm6[1,0,3,3,4,5,6,7]
; SSE-NEXT: punpcklwd {{.*#+}} xmm6 = xmm6[0],xmm0[0],xmm6[1],xmm0[1],xmm6[2],xmm0[2],xmm6[3],xmm0[3]
; SSE-NEXT: pshuflw {{.*#+}} xmm6 = xmm6[0,2,2,3,4,5,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm6[0,2,2,3]
; SSE-NEXT: movdqa {{.*#+}} xmm7 = [65535,65535,65535,0,65535,65535,65535,65535]
; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[2,0],xmm3[3,0]
; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[0,1],xmm2[0,2]
; SSE-NEXT: pshufhw {{.*#+}} xmm2 = xmm3[0,1,2,3,4,6,6,7]
; SSE-NEXT: punpckldq {{.*#+}} xmm5 = xmm5[0],xmm6[0],xmm5[1],xmm6[1]
; SSE-NEXT: pand %xmm10, %xmm5
; SSE-NEXT: movdqa %xmm10, %xmm11
; SSE-NEXT: pshufd {{.*#+}} xmm7 = xmm3[0,1,1,3]
; SSE-NEXT: pshufhw {{.*#+}} xmm7 = xmm7[0,1,2,3,4,7,6,7]
; SSE-NEXT: punpckhdq {{.*#+}} xmm7 = xmm7[2],xmm2[2],xmm7[3],xmm2[3]
; SSE-NEXT: pand %xmm10, %xmm7
; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm3[1,1,1,1]
; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm2[0,2,2,3]
; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1]
; SSE-NEXT: movdqa %xmm10, %xmm4
; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[2,0],xmm2[3,0]
; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,1],xmm3[0,2]
; SSE-NEXT: movdqa %xmm10, %xmm3
; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,3,2,3,4,5,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[1,0,3,3,4,5,6,7]
; SSE-NEXT: pand %xmm10, %xmm1
; SSE-NEXT: pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,6,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,2,2,3]
; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[2,1,3,3,4,5,6,7]
; SSE-NEXT: pand %xmm7, %xmm2
; SSE-NEXT: pandn %xmm0, %xmm7
; SSE-NEXT: por %xmm2, %xmm7
; SSE-NEXT: movq %xmm1, (%rsi)
; SSE-NEXT: movq %xmm4, (%rdx)
; SSE-NEXT: movq %xmm5, (%rcx)
; SSE-NEXT: movq %xmm6, (%r8)
; SSE-NEXT: movq %xmm7, (%r9)
; SSE-NEXT: pand %xmm10, %xmm2
; SSE-NEXT: pandn %xmm9, %xmm10
; SSE-NEXT: por %xmm10, %xmm5
; SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm0[0,0,1,1]
; SSE-NEXT: pandn %xmm6, %xmm11
; SSE-NEXT: por %xmm11, %xmm7
; SSE-NEXT: pandn %xmm0, %xmm4
; SSE-NEXT: pslld $16, %xmm0
; SSE-NEXT: pandn %xmm0, %xmm3
; SSE-NEXT: por %xmm3, %xmm1
; SSE-NEXT: por %xmm4, %xmm2
; SSE-NEXT: movq %xmm8, (%rsi)
; SSE-NEXT: movq %xmm5, (%rdx)
; SSE-NEXT: movq %xmm7, (%rcx)
; SSE-NEXT: movq %xmm1, (%r8)
; SSE-NEXT: movq %xmm2, (%r9)
; SSE-NEXT: retq
;
; AVX1-LABEL: vf4:

View File

@ -167,24 +167,29 @@ define void @vf4(<24 x i16>* %in.vec, <4 x i16>* %out.vec0, <4 x i16>* %out.vec1
; SSE: # %bb.0:
; SSE-NEXT: movdqa (%rdi), %xmm0
; SSE-NEXT: movdqa 16(%rdi), %xmm1
; SSE-NEXT: movdqa 32(%rdi), %xmm5
; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm0[0,1,0,3]
; SSE-NEXT: pshufhw {{.*#+}} xmm3 = xmm4[0,1,2,3,4,6,6,7]
; SSE-NEXT: punpckhdq {{.*#+}} xmm3 = xmm3[2],xmm1[2],xmm3[3],xmm1[3]
; SSE-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm5[0],xmm3[1],xmm5[1],xmm3[2],xmm5[2],xmm3[3],xmm5[3]
; SSE-NEXT: movdqa {{.*#+}} xmm6 = [65535,65535,65535,0,65535,65535,65535,65535]
; SSE-NEXT: movdqa %xmm6, %xmm2
; SSE-NEXT: pandn %xmm5, %xmm2
; SSE-NEXT: movdqa %xmm1, %xmm7
; SSE-NEXT: psrld $16, %xmm7
; SSE-NEXT: pshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,5,7,6,7]
; SSE-NEXT: punpckhdq {{.*#+}} xmm4 = xmm4[2],xmm7[2],xmm4[3],xmm7[3]
; SSE-NEXT: pand %xmm6, %xmm4
; SSE-NEXT: por %xmm2, %xmm4
; SSE-NEXT: pshufd {{.*#+}} xmm8 = xmm5[2,2,3,3]
; SSE-NEXT: pshufd {{.*#+}} xmm9 = xmm5[0,3,2,3]
; SSE-NEXT: movdqa 32(%rdi), %xmm3
; SSE-NEXT: movdqa {{.*#+}} xmm4 = [65535,65535,65535,0,65535,65535,65535,65535]
; SSE-NEXT: movdqa %xmm4, %xmm9
; SSE-NEXT: pandn %xmm3, %xmm9
; SSE-NEXT: movdqa %xmm3, %xmm5
; SSE-NEXT: pshufd {{.*#+}} xmm8 = xmm3[2,2,3,3]
; SSE-NEXT: pshufd {{.*#+}} xmm10 = xmm3[0,3,2,3]
; SSE-NEXT: pslld $16, %xmm3
; SSE-NEXT: movdqa %xmm4, %xmm2
; SSE-NEXT: pandn %xmm3, %xmm2
; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm0[0,1,0,3]
; SSE-NEXT: pshufhw {{.*#+}} xmm6 = xmm3[0,1,2,3,4,6,6,7]
; SSE-NEXT: punpckhdq {{.*#+}} xmm6 = xmm6[2],xmm1[2],xmm6[3],xmm1[3]
; SSE-NEXT: pand %xmm4, %xmm6
; SSE-NEXT: por %xmm2, %xmm6
; SSE-NEXT: movdqa %xmm1, %xmm2
; SSE-NEXT: psrld $16, %xmm2
; SSE-NEXT: pshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,5,7,6,7]
; SSE-NEXT: punpckhdq {{.*#+}} xmm3 = xmm3[2],xmm2[2],xmm3[3],xmm2[3]
; SSE-NEXT: pand %xmm4, %xmm3
; SSE-NEXT: por %xmm9, %xmm3
; SSE-NEXT: psrldq {{.*#+}} xmm5 = xmm5[2,3,4,5,6,7,8,9,10,11,12,13,14,15],zero,zero
; SSE-NEXT: movdqa %xmm6, %xmm2
; SSE-NEXT: movdqa %xmm4, %xmm2
; SSE-NEXT: pandn %xmm5, %xmm2
; SSE-NEXT: movdqa %xmm0, %xmm5
; SSE-NEXT: shufps {{.*#+}} xmm5 = xmm5[1,0],xmm1[0,0]
@ -192,31 +197,29 @@ define void @vf4(<24 x i16>* %in.vec, <4 x i16>* %out.vec0, <4 x i16>* %out.vec1
; SSE-NEXT: pshuflw {{.*#+}} xmm7 = xmm5[0,2,2,3,4,5,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm7 = xmm7[0,3,2,3]
; SSE-NEXT: pshuflw {{.*#+}} xmm7 = xmm7[1,0,2,3,4,5,6,7]
; SSE-NEXT: pand %xmm6, %xmm7
; SSE-NEXT: pand %xmm4, %xmm7
; SSE-NEXT: por %xmm2, %xmm7
; SSE-NEXT: movq {{[0-9]+}}(%rsp), %rax
; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm3[0,2,2,3,4,5,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,2,2,3]
; SSE-NEXT: pshuflw {{.*#+}} xmm3 = xmm5[3,1,2,3,4,5,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,3,2,3]
; SSE-NEXT: pshuflw {{.*#+}} xmm3 = xmm3[0,1,3,3,4,5,6,7]
; SSE-NEXT: pand %xmm6, %xmm3
; SSE-NEXT: pandn %xmm8, %xmm6
; SSE-NEXT: por %xmm3, %xmm6
; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm1[1,1,1,1]
; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm5[3,1,2,3,4,5,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,3,2,3]
; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[0,1,3,3,4,5,6,7]
; SSE-NEXT: pand %xmm4, %xmm2
; SSE-NEXT: pandn %xmm8, %xmm4
; SSE-NEXT: por %xmm2, %xmm4
; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm1[1,1,1,1]
; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm0[2,3,2,3]
; SSE-NEXT: punpcklwd {{.*#+}} xmm5 = xmm5[0],xmm3[0],xmm5[1],xmm3[1],xmm5[2],xmm3[2],xmm5[3],xmm3[3]
; SSE-NEXT: pshuflw {{.*#+}} xmm3 = xmm9[0,2,2,3,4,5,6,7]
; SSE-NEXT: punpckldq {{.*#+}} xmm5 = xmm5[0],xmm3[0],xmm5[1],xmm3[1]
; SSE-NEXT: punpcklwd {{.*#+}} xmm5 = xmm5[0],xmm2[0],xmm5[1],xmm2[1],xmm5[2],xmm2[2],xmm5[3],xmm2[3]
; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm10[0,2,2,3,4,5,6,7]
; SSE-NEXT: punpckldq {{.*#+}} xmm5 = xmm5[0],xmm2[0],xmm5[1],xmm2[1]
; SSE-NEXT: psrlq $48, %xmm1
; SSE-NEXT: psrldq {{.*#+}} xmm0 = xmm0[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
; SSE-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm9[1,3,2,3,4,5,6,7]
; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm10[1,3,2,3,4,5,6,7]
; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
; SSE-NEXT: movq %xmm2, (%rsi)
; SSE-NEXT: movq %xmm4, (%rdx)
; SSE-NEXT: movq %xmm6, (%rsi)
; SSE-NEXT: movq %xmm3, (%rdx)
; SSE-NEXT: movq %xmm7, (%rcx)
; SSE-NEXT: movq %xmm6, (%r8)
; SSE-NEXT: movq %xmm4, (%r8)
; SSE-NEXT: movq %xmm5, (%r9)
; SSE-NEXT: movq %xmm0, (%rax)
; SSE-NEXT: retq

View File

@ -85,14 +85,14 @@ define void @load_i32_stride3_vf4(<12 x i32>* %in.vec, <4 x i32>* %out.vec0, <4
; SSE-NEXT: movaps 16(%rdi), %xmm1
; SSE-NEXT: movaps 32(%rdi), %xmm2
; SSE-NEXT: movdqa %xmm0, %xmm3
; SSE-NEXT: movaps %xmm1, %xmm4
; SSE-NEXT: movaps %xmm2, %xmm4
; SSE-NEXT: shufps {{.*#+}} xmm4 = xmm4[2,1],xmm1[3,3]
; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm0[2,3,2,3]
; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,0],xmm1[0,0]
; SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm1[1,1,1,1]
; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[2,0],xmm2[1,0]
; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[2,3],xmm2[1,1]
; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[0,3],xmm1[0,2]
; SSE-NEXT: shufps {{.*#+}} xmm4 = xmm4[3,1],xmm2[2,3]
; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm4[0,2]
; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm4[2,0]
; SSE-NEXT: punpckldq {{.*#+}} xmm5 = xmm5[0],xmm6[0],xmm5[1],xmm6[1]
; SSE-NEXT: shufps {{.*#+}} xmm5 = xmm5[0,1],xmm2[0,3]
; SSE-NEXT: movaps %xmm3, (%rsi)
@ -173,36 +173,36 @@ define void @load_i32_stride3_vf8(<24 x i32>* %in.vec, <8 x i32>* %out.vec0, <8
; SSE-NEXT: movaps 80(%rdi), %xmm8
; SSE-NEXT: movaps 64(%rdi), %xmm3
; SSE-NEXT: movdqa (%rdi), %xmm1
; SSE-NEXT: movaps 16(%rdi), %xmm6
; SSE-NEXT: movaps 16(%rdi), %xmm5
; SSE-NEXT: movaps 32(%rdi), %xmm10
; SSE-NEXT: movdqa 48(%rdi), %xmm2
; SSE-NEXT: movdqa %xmm1, %xmm11
; SSE-NEXT: movaps %xmm6, %xmm7
; SSE-NEXT: movaps %xmm10, %xmm7
; SSE-NEXT: shufps {{.*#+}} xmm7 = xmm7[2,1],xmm5[3,3]
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,2,3]
; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,0],xmm6[0,0]
; SSE-NEXT: pshufd {{.*#+}} xmm9 = xmm6[1,1,1,1]
; SSE-NEXT: shufps {{.*#+}} xmm6 = xmm6[2,0],xmm10[1,0]
; SSE-NEXT: shufps {{.*#+}} xmm11 = xmm11[0,3],xmm6[0,2]
; SSE-NEXT: movdqa %xmm2, %xmm6
; SSE-NEXT: movaps %xmm3, %xmm4
; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm2[2,3,2,3]
; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,0],xmm5[0,0]
; SSE-NEXT: pshufd {{.*#+}} xmm9 = xmm5[1,1,1,1]
; SSE-NEXT: shufps {{.*#+}} xmm5 = xmm5[2,3],xmm10[1,1]
; SSE-NEXT: shufps {{.*#+}} xmm11 = xmm11[0,3],xmm5[0,2]
; SSE-NEXT: movdqa %xmm2, %xmm5
; SSE-NEXT: movaps %xmm8, %xmm4
; SSE-NEXT: shufps {{.*#+}} xmm4 = xmm4[2,1],xmm3[3,3]
; SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm2[2,3,2,3]
; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[1,0],xmm3[0,0]
; SSE-NEXT: pshufd {{.*#+}} xmm12 = xmm3[1,1,1,1]
; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[2,0],xmm8[1,0]
; SSE-NEXT: shufps {{.*#+}} xmm6 = xmm6[0,3],xmm3[0,2]
; SSE-NEXT: shufps {{.*#+}} xmm4 = xmm4[3,1],xmm8[2,3]
; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,2],xmm4[0,2]
; SSE-NEXT: shufps {{.*#+}} xmm7 = xmm7[3,1],xmm10[2,3]
; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,2],xmm7[0,2]
; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[2,3],xmm8[1,1]
; SSE-NEXT: shufps {{.*#+}} xmm5 = xmm5[0,3],xmm3[0,2]
; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,2],xmm4[2,0]
; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,2],xmm7[2,0]
; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm9[0],xmm0[1],xmm9[1]
; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,1],xmm10[0,3]
; SSE-NEXT: punpckldq {{.*#+}} xmm5 = xmm5[0],xmm12[0],xmm5[1],xmm12[1]
; SSE-NEXT: shufps {{.*#+}} xmm5 = xmm5[0,1],xmm8[0,3]
; SSE-NEXT: movaps %xmm6, 16(%rsi)
; SSE-NEXT: punpckldq {{.*#+}} xmm6 = xmm6[0],xmm12[0],xmm6[1],xmm12[1]
; SSE-NEXT: shufps {{.*#+}} xmm6 = xmm6[0,1],xmm8[0,3]
; SSE-NEXT: movaps %xmm5, 16(%rsi)
; SSE-NEXT: movaps %xmm11, (%rsi)
; SSE-NEXT: movaps %xmm2, 16(%rdx)
; SSE-NEXT: movaps %xmm1, (%rdx)
; SSE-NEXT: movaps %xmm5, 16(%rcx)
; SSE-NEXT: movaps %xmm6, 16(%rcx)
; SSE-NEXT: movaps %xmm0, (%rcx)
; SSE-NEXT: retq
;
@ -358,95 +358,95 @@ define void @load_i32_stride3_vf8(<24 x i32>* %in.vec, <8 x i32>* %out.vec0, <8
define void @load_i32_stride3_vf16(<48 x i32>* %in.vec, <16 x i32>* %out.vec0, <16 x i32>* %out.vec1, <16 x i32>* %out.vec2) nounwind {
; SSE-LABEL: load_i32_stride3_vf16:
; SSE: # %bb.0:
; SSE-NEXT: movaps 96(%rdi), %xmm14
; SSE-NEXT: movaps 128(%rdi), %xmm11
; SSE-NEXT: movaps 112(%rdi), %xmm12
; SSE-NEXT: movaps 144(%rdi), %xmm3
; SSE-NEXT: movaps 176(%rdi), %xmm13
; SSE-NEXT: movaps 96(%rdi), %xmm10
; SSE-NEXT: movaps 128(%rdi), %xmm6
; SSE-NEXT: movaps %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: movaps 112(%rdi), %xmm13
; SSE-NEXT: movaps 144(%rdi), %xmm1
; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: movaps 176(%rdi), %xmm7
; SSE-NEXT: movaps 160(%rdi), %xmm5
; SSE-NEXT: movaps (%rdi), %xmm15
; SSE-NEXT: movaps 16(%rdi), %xmm8
; SSE-NEXT: movaps 32(%rdi), %xmm6
; SSE-NEXT: movaps %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: movaps 48(%rdi), %xmm10
; SSE-NEXT: movaps 80(%rdi), %xmm9
; SSE-NEXT: movaps 64(%rdi), %xmm2
; SSE-NEXT: movaps %xmm2, %xmm0
; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[2,0],xmm9[1,0]
; SSE-NEXT: movaps %xmm10, %xmm4
; SSE-NEXT: movaps 16(%rdi), %xmm9
; SSE-NEXT: movaps 32(%rdi), %xmm8
; SSE-NEXT: movaps 48(%rdi), %xmm11
; SSE-NEXT: movaps 80(%rdi), %xmm12
; SSE-NEXT: movaps 64(%rdi), %xmm3
; SSE-NEXT: movaps %xmm3, %xmm0
; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[2,3],xmm12[1,1]
; SSE-NEXT: movaps %xmm11, %xmm4
; SSE-NEXT: shufps {{.*#+}} xmm4 = xmm4[0,3],xmm0[0,2]
; SSE-NEXT: movaps %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: movaps %xmm5, %xmm0
; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[2,0],xmm13[1,0]
; SSE-NEXT: movaps %xmm3, %xmm1
; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,3],xmm0[0,2]
; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: movaps %xmm8, %xmm0
; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[2,0],xmm6[1,0]
; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[2,3],xmm7[1,1]
; SSE-NEXT: movaps %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: movaps %xmm1, %xmm4
; SSE-NEXT: shufps {{.*#+}} xmm4 = xmm4[0,3],xmm0[0,2]
; SSE-NEXT: movaps %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: movaps %xmm9, %xmm0
; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[2,3],xmm8[1,1]
; SSE-NEXT: movaps %xmm15, %xmm1
; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,3],xmm0[0,2]
; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: movaps %xmm13, %xmm0
; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[2,3],xmm6[1,1]
; SSE-NEXT: movaps %xmm10, %xmm14
; SSE-NEXT: movaps %xmm10, %xmm1
; SSE-NEXT: shufps {{.*#+}} xmm14 = xmm14[0,3],xmm0[0,2]
; SSE-NEXT: movaps %xmm12, %xmm0
; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[2,0],xmm11[1,0]
; SSE-NEXT: movaps %xmm14, %xmm1
; SSE-NEXT: movaps %xmm14, %xmm7
; SSE-NEXT: movaps %xmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,3],xmm0[0,2]
; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: movaps %xmm10, %xmm6
; SSE-NEXT: shufps {{.*#+}} xmm6 = xmm6[1,0],xmm2[0,0]
; SSE-NEXT: movaps %xmm2, %xmm0
; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,1],xmm9[2,3]
; SSE-NEXT: shufps {{.*#+}} xmm6 = xmm6[0,2],xmm0[0,2]
; SSE-NEXT: movaps %xmm3, %xmm4
; SSE-NEXT: movaps %xmm3, %xmm14
; SSE-NEXT: shufps {{.*#+}} xmm4 = xmm4[1,0],xmm5[0,0]
; SSE-NEXT: movaps %xmm5, %xmm0
; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,1],xmm13[2,3]
; SSE-NEXT: shufps {{.*#+}} xmm4 = xmm4[0,2],xmm0[0,2]
; SSE-NEXT: movaps %xmm7, %xmm1
; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,0],xmm12[0,0]
; SSE-NEXT: movaps %xmm12, %xmm0
; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,1],xmm11[2,3]
; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,2],xmm0[0,2]
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm15[2,3,2,3]
; SSE-NEXT: shufps {{.*#+}} xmm15 = xmm15[1,0],xmm8[0,0]
; SSE-NEXT: movaps %xmm8, %xmm3
; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[2,1],xmm3[3,3]
; SSE-NEXT: movaps %xmm11, %xmm10
; SSE-NEXT: shufps {{.*#+}} xmm10 = xmm10[1,0],xmm3[0,0]
; SSE-NEXT: shufps {{.*#+}} xmm10 = xmm10[0,2],xmm0[2,0]
; SSE-NEXT: movaps %xmm7, %xmm0
; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[2,1],xmm5[3,3]
; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Reload
; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[3,1],xmm7[2,3]
; SSE-NEXT: shufps {{.*#+}} xmm15 = xmm15[0,2],xmm3[0,2]
; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm2[1,1,1,1]
; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm10[2,3,2,3]
; SSE-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1]
; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,1],xmm9[0,3]
; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm5[1,1,1,1]
; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm14[2,3,2,3]
; SSE-NEXT: punpckldq {{.*#+}} xmm5 = xmm5[0],xmm3[0],xmm5[1],xmm3[1]
; SSE-NEXT: shufps {{.*#+}} xmm5 = xmm5[0,1],xmm13[0,3]
; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm8[1,1,1,1]
; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1]
; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,1],xmm7[0,3]
; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm12[1,1,1,1]
; SSE-NEXT: pshufd $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Folded Reload
; SSE-NEXT: # xmm7 = mem[2,3,2,3]
; SSE-NEXT: punpckldq {{.*#+}} xmm7 = xmm7[0],xmm3[0],xmm7[1],xmm3[1]
; SSE-NEXT: shufps {{.*#+}} xmm7 = xmm7[0,1],xmm11[0,3]
; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
; SSE-NEXT: movaps %xmm3, 32(%rsi)
; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
; SSE-NEXT: movaps %xmm3, (%rsi)
; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
; SSE-NEXT: movaps %xmm3, 48(%rsi)
; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
; SSE-NEXT: movaps %xmm3, 16(%rsi)
; SSE-NEXT: movaps %xmm1, 32(%rdx)
; SSE-NEXT: movaps %xmm7, %xmm4
; SSE-NEXT: shufps {{.*#+}} xmm4 = xmm4[1,0],xmm5[0,0]
; SSE-NEXT: shufps {{.*#+}} xmm4 = xmm4[0,2],xmm0[2,0]
; SSE-NEXT: movaps %xmm6, %xmm0
; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[2,1],xmm13[3,3]
; SSE-NEXT: movaps %xmm1, %xmm2
; SSE-NEXT: movaps %xmm1, %xmm6
; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[1,0],xmm13[0,0]
; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,2],xmm0[2,0]
; SSE-NEXT: movaps %xmm8, %xmm0
; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[2,1],xmm9[3,3]
; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm15[2,3,2,3]
; SSE-NEXT: shufps {{.*#+}} xmm15 = xmm15[1,0],xmm9[0,0]
; SSE-NEXT: shufps {{.*#+}} xmm15 = xmm15[0,2],xmm0[2,0]
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm3[1,1,1,1]
; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm11[2,3,2,3]
; SSE-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1]
; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[0,1],xmm12[0,3]
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm5[1,1,1,1]
; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm7[2,3,2,3]
; SSE-NEXT: punpckldq {{.*#+}} xmm5 = xmm5[0],xmm0[0],xmm5[1],xmm0[1]
; SSE-NEXT: shufps $196, {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Folded Reload
; SSE-NEXT: # xmm5 = xmm5[0,1],mem[0,3]
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm9[1,1,1,1]
; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,1],xmm8[0,3]
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm13[1,1,1,1]
; SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm6[2,3,2,3]
; SSE-NEXT: punpckldq {{.*#+}} xmm6 = xmm6[0],xmm0[0],xmm6[1],xmm0[1]
; SSE-NEXT: shufps $196, {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Folded Reload
; SSE-NEXT: # xmm6 = xmm6[0,1],mem[0,3]
; SSE-NEXT: movaps %xmm14, 32(%rsi)
; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT: movaps %xmm0, (%rsi)
; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT: movaps %xmm0, 48(%rsi)
; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT: movaps %xmm0, 16(%rsi)
; SSE-NEXT: movaps %xmm2, 32(%rdx)
; SSE-NEXT: movaps %xmm15, (%rdx)
; SSE-NEXT: movaps %xmm4, 48(%rdx)
; SSE-NEXT: movaps %xmm6, 16(%rdx)
; SSE-NEXT: movaps %xmm7, 32(%rcx)
; SSE-NEXT: movaps %xmm0, (%rcx)
; SSE-NEXT: movaps %xmm10, 16(%rdx)
; SSE-NEXT: movaps %xmm6, 32(%rcx)
; SSE-NEXT: movaps %xmm1, (%rcx)
; SSE-NEXT: movaps %xmm5, 48(%rcx)
; SSE-NEXT: movaps %xmm2, 16(%rcx)
; SSE-NEXT: movaps %xmm3, 16(%rcx)
; SSE-NEXT: retq
;
; AVX1-LABEL: load_i32_stride3_vf16:
@ -690,144 +690,150 @@ define void @load_i32_stride3_vf16(<48 x i32>* %in.vec, <16 x i32>* %out.vec0, <
define void @load_i32_stride3_vf32(<96 x i32>* %in.vec, <32 x i32>* %out.vec0, <32 x i32>* %out.vec1, <32 x i32>* %out.vec2) nounwind {
; SSE-LABEL: load_i32_stride3_vf32:
; SSE: # %bb.0:
; SSE-NEXT: subq $344, %rsp # imm = 0x158
; SSE-NEXT: movaps 336(%rdi), %xmm1
; SSE-NEXT: movaps 368(%rdi), %xmm9
; SSE-NEXT: movaps %xmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: movaps 352(%rdi), %xmm14
; SSE-NEXT: subq $360, %rsp # imm = 0x168
; SSE-NEXT: movaps 336(%rdi), %xmm11
; SSE-NEXT: movaps 368(%rdi), %xmm14
; SSE-NEXT: movaps %xmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: movaps 240(%rdi), %xmm15
; SSE-NEXT: movaps 272(%rdi), %xmm13
; SSE-NEXT: movaps 256(%rdi), %xmm7
; SSE-NEXT: movaps %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: movaps 144(%rdi), %xmm3
; SSE-NEXT: movaps 176(%rdi), %xmm12
; SSE-NEXT: movaps 160(%rdi), %xmm10
; SSE-NEXT: movaps 48(%rdi), %xmm5
; SSE-NEXT: movaps 80(%rdi), %xmm6
; SSE-NEXT: movaps 64(%rdi), %xmm8
; SSE-NEXT: movaps %xmm8, %xmm0
; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[2,0],xmm6[1,0]
; SSE-NEXT: movaps %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: movaps %xmm5, %xmm2
; SSE-NEXT: movaps %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,3],xmm0[0,2]
; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: movaps %xmm10, %xmm0
; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[2,0],xmm12[1,0]
; SSE-NEXT: movaps %xmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: movaps %xmm3, %xmm2
; SSE-NEXT: movaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,3],xmm0[0,2]
; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: movaps %xmm7, %xmm0
; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[2,0],xmm13[1,0]
; SSE-NEXT: movaps %xmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: movaps %xmm15, %xmm2
; SSE-NEXT: movaps %xmm15, %xmm4
; SSE-NEXT: movaps %xmm15, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,3],xmm0[0,2]
; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: movaps %xmm14, %xmm0
; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[2,0],xmm9[1,0]
; SSE-NEXT: movaps 352(%rdi), %xmm13
; SSE-NEXT: movaps 240(%rdi), %xmm12
; SSE-NEXT: movaps 272(%rdi), %xmm6
; SSE-NEXT: movaps 256(%rdi), %xmm1
; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: movaps %xmm1, %xmm2
; SSE-NEXT: movaps 144(%rdi), %xmm10
; SSE-NEXT: movaps 176(%rdi), %xmm3
; SSE-NEXT: movaps 160(%rdi), %xmm7
; SSE-NEXT: movaps %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: movaps 48(%rdi), %xmm5
; SSE-NEXT: movaps %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: movaps 80(%rdi), %xmm8
; SSE-NEXT: movaps 64(%rdi), %xmm9
; SSE-NEXT: movaps %xmm9, %xmm0
; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[2,3],xmm8[1,1]
; SSE-NEXT: movaps %xmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: movaps %xmm5, %xmm2
; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,3],xmm0[0,2]
; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: movaps 16(%rdi), %xmm7
; SSE-NEXT: movaps 32(%rdi), %xmm2
; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: movaps %xmm7, %xmm0
; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[2,0],xmm2[1,0]
; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[2,3],xmm3[1,1]
; SSE-NEXT: movaps %xmm3, %xmm5
; SSE-NEXT: movaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: movaps %xmm10, %xmm2
; SSE-NEXT: movaps %xmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,3],xmm0[0,2]
; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: movaps %xmm1, %xmm0
; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[2,3],xmm6[1,1]
; SSE-NEXT: movaps %xmm6, %xmm15
; SSE-NEXT: movaps %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: movaps %xmm12, %xmm2
; SSE-NEXT: movaps %xmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,3],xmm0[0,2]
; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: movaps %xmm13, %xmm2
; SSE-NEXT: movaps %xmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: movaps %xmm13, %xmm0
; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[2,3],xmm14[1,1]
; SSE-NEXT: movaps %xmm11, %xmm1
; SSE-NEXT: movaps %xmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: movaps %xmm11, %xmm3
; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[0,3],xmm0[0,2]
; SSE-NEXT: movaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: movaps 16(%rdi), %xmm4
; SSE-NEXT: movaps 32(%rdi), %xmm3
; SSE-NEXT: movaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: movaps %xmm4, %xmm0
; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[2,3],xmm3[1,1]
; SSE-NEXT: movaps (%rdi), %xmm11
; SSE-NEXT: movaps %xmm11, %xmm2
; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,3],xmm0[0,2]
; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: movaps 128(%rdi), %xmm2
; SSE-NEXT: movaps %xmm2, (%rsp) # 16-byte Spill
; SSE-NEXT: movaps 112(%rdi), %xmm14
; SSE-NEXT: movaps %xmm14, %xmm0
; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[2,0],xmm2[1,0]
; SSE-NEXT: movaps 96(%rdi), %xmm2
; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,3],xmm0[0,2]
; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: movaps 224(%rdi), %xmm2
; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: movaps 208(%rdi), %xmm9
; SSE-NEXT: movaps %xmm9, %xmm0
; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[2,0],xmm2[1,0]
; SSE-NEXT: movaps 192(%rdi), %xmm2
; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,3],xmm0[0,2]
; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: movaps 320(%rdi), %xmm15
; SSE-NEXT: movaps %xmm15, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: movaps %xmm11, %xmm3
; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[0,3],xmm0[0,2]
; SSE-NEXT: movaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: movaps 128(%rdi), %xmm3
; SSE-NEXT: movaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: movaps 112(%rdi), %xmm7
; SSE-NEXT: movaps %xmm7, %xmm0
; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[2,3],xmm3[1,1]
; SSE-NEXT: movaps 96(%rdi), %xmm6
; SSE-NEXT: movaps %xmm6, %xmm3
; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[0,3],xmm0[0,2]
; SSE-NEXT: movaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: movaps 224(%rdi), %xmm3
; SSE-NEXT: movaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: movaps 208(%rdi), %xmm13
; SSE-NEXT: movaps %xmm13, %xmm0
; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[2,3],xmm3[1,1]
; SSE-NEXT: movaps 192(%rdi), %xmm3
; SSE-NEXT: movaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[0,3],xmm0[0,2]
; SSE-NEXT: movaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: movaps 320(%rdi), %xmm3
; SSE-NEXT: movaps 304(%rdi), %xmm0
; SSE-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[2,0],xmm15[1,0]
; SSE-NEXT: movaps 288(%rdi), %xmm2
; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,3],xmm0[0,2]
; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: movaps %xmm5, %xmm2
; SSE-NEXT: movaps %xmm8, %xmm5
; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[1,0],xmm8[0,0]
; SSE-NEXT: movaps %xmm8, %xmm0
; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,1],xmm6[2,3]
; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,2],xmm0[0,2]
; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: movaps %xmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[1,0],xmm10[0,0]
; SSE-NEXT: shufps {{.*#+}} xmm10 = xmm10[3,1],xmm12[2,3]
; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[0,2],xmm10[0,2]
; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[2,3],xmm3[1,1]
; SSE-NEXT: movaps %xmm3, %xmm14
; SSE-NEXT: movaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: movaps %xmm4, %xmm15
; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT: shufps {{.*#+}} xmm15 = xmm15[1,0],xmm0[0,0]
; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,1],xmm13[2,3]
; SSE-NEXT: shufps {{.*#+}} xmm15 = xmm15[0,2],xmm0[0,2]
; SSE-NEXT: movaps %xmm1, %xmm12
; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT: shufps {{.*#+}} xmm12 = xmm12[1,0],xmm0[0,0]
; SSE-NEXT: shufps $231, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
; SSE-NEXT: # xmm0 = xmm0[3,1],mem[2,3]
; SSE-NEXT: shufps {{.*#+}} xmm12 = xmm12[0,2],xmm0[0,2]
; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload
; SSE-NEXT: movaps %xmm6, %xmm10
; SSE-NEXT: shufps {{.*#+}} xmm10 = xmm10[1,0],xmm14[0,0]
; SSE-NEXT: movaps %xmm14, %xmm0
; SSE-NEXT: movaps (%rsp), %xmm8 # 16-byte Reload
; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,1],xmm8[2,3]
; SSE-NEXT: shufps {{.*#+}} xmm10 = xmm10[0,2],xmm0[0,2]
; SSE-NEXT: movaps 288(%rdi), %xmm3
; SSE-NEXT: movaps %xmm3, (%rsp) # 16-byte Spill
; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[0,3],xmm0[0,2]
; SSE-NEXT: movaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: movaps %xmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: shufps {{.*#+}} xmm8 = xmm8[2,1],xmm9[3,3]
; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[1,0],xmm9[0,0]
; SSE-NEXT: movaps %xmm9, %xmm0
; SSE-NEXT: shufps $231, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
; SSE-NEXT: # xmm0 = xmm0[3,1],mem[2,3]
; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[0,2],xmm0[0,2]
; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[0,2],xmm8[2,0]
; SSE-NEXT: movaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: movaps %xmm5, %xmm0
; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload
; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[2,1],xmm5[3,3]
; SSE-NEXT: shufps {{.*#+}} xmm10 = xmm10[1,0],xmm5[0,0]
; SSE-NEXT: shufps {{.*#+}} xmm10 = xmm10[0,2],xmm0[2,0]
; SSE-NEXT: movaps %xmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: movaps %xmm15, %xmm0
; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[2,1],xmm3[3,3]
; SSE-NEXT: movaps %xmm12, %xmm15
; SSE-NEXT: shufps {{.*#+}} xmm15 = xmm15[1,0],xmm3[0,0]
; SSE-NEXT: shufps {{.*#+}} xmm15 = xmm15[0,2],xmm0[2,0]
; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT: movaps %xmm0, %xmm1
; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[2,1],xmm2[3,3]
; SSE-NEXT: movaps %xmm1, %xmm12
; SSE-NEXT: shufps {{.*#+}} xmm12 = xmm12[1,0],xmm2[0,0]
; SSE-NEXT: shufps {{.*#+}} xmm12 = xmm12[0,2],xmm0[2,0]
; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload
; SSE-NEXT: movaps %xmm8, %xmm0
; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[2,1],xmm7[3,3]
; SSE-NEXT: movaps %xmm6, %xmm1
; SSE-NEXT: movaps %xmm6, %xmm10
; SSE-NEXT: shufps {{.*#+}} xmm10 = xmm10[1,0],xmm7[0,0]
; SSE-NEXT: shufps {{.*#+}} xmm10 = xmm10[0,2],xmm0[2,0]
; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT: movaps %xmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[2,1],xmm13[3,3]
; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload
; SSE-NEXT: shufps {{.*#+}} xmm6 = xmm6[1,0],xmm13[0,0]
; SSE-NEXT: shufps {{.*#+}} xmm6 = xmm6[0,2],xmm0[2,0]
; SSE-NEXT: movaps %xmm14, %xmm2
; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload
; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[2,1],xmm5[3,3]
; SSE-NEXT: movaps (%rsp), %xmm0 # 16-byte Reload
; SSE-NEXT: movaps %xmm0, %xmm3
; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[1,0],xmm5[0,0]
; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[0,2],xmm2[2,0]
; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,0],xmm2[0,0]
; SSE-NEXT: shufps $231, {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Folded Reload
; SSE-NEXT: # xmm2 = xmm2[3,1],mem[2,3]
; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,2],xmm2[0,2]
; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm11[2,3,2,3]
; SSE-NEXT: shufps {{.*#+}} xmm11 = xmm11[1,0],xmm7[0,0]
; SSE-NEXT: movaps %xmm7, %xmm4
; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm13 # 16-byte Reload
; SSE-NEXT: shufps {{.*#+}} xmm4 = xmm4[3,1],xmm13[2,3]
; SSE-NEXT: shufps {{.*#+}} xmm11 = xmm11[0,2],xmm4[0,2]
; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm5[1,1,1,1]
; SSE-NEXT: movaps %xmm2, %xmm14
; SSE-NEXT: shufps {{.*#+}} xmm14 = xmm14[2,1],xmm4[3,3]
; SSE-NEXT: pshufd {{.*#+}} xmm9 = xmm11[2,3,2,3]
; SSE-NEXT: shufps {{.*#+}} xmm11 = xmm11[1,0],xmm4[0,0]
; SSE-NEXT: shufps {{.*#+}} xmm11 = xmm11[0,2],xmm14[2,0]
; SSE-NEXT: pshufd $85, {{[-0-9]+}}(%r{{[sb]}}p), %xmm14 # 16-byte Folded Reload
; SSE-NEXT: # xmm14 = mem[1,1,1,1]
; SSE-NEXT: pshufd $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Folded Reload
; SSE-NEXT: # xmm5 = mem[2,3,2,3]
; SSE-NEXT: punpckldq {{.*#+}} xmm5 = xmm5[0],xmm4[0],xmm5[1],xmm4[1]
; SSE-NEXT: punpckldq {{.*#+}} xmm5 = xmm5[0],xmm14[0],xmm5[1],xmm14[1]
; SSE-NEXT: shufps $196, {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Folded Reload
; SSE-NEXT: # xmm5 = xmm5[0,1],mem[0,3]
; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm7[1,1,1,1]
; SSE-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm4[0],xmm2[1],xmm4[1]
; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,1],xmm13[0,3]
; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm4[1,1,1,1]
; SSE-NEXT: punpckldq {{.*#+}} xmm9 = xmm9[0],xmm4[0],xmm9[1],xmm4[1]
; SSE-NEXT: shufps {{.*#+}} xmm9 = xmm9[0,1],xmm2[0,3]
; SSE-NEXT: pshufd $85, {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Folded Reload
; SSE-NEXT: # xmm4 = mem[1,1,1,1]
; SSE-NEXT: pshufd $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm13 # 16-byte Folded Reload
@ -835,8 +841,8 @@ define void @load_i32_stride3_vf32(<96 x i32>* %in.vec, <32 x i32>* %out.vec0, <
; SSE-NEXT: punpckldq {{.*#+}} xmm13 = xmm13[0],xmm4[0],xmm13[1],xmm4[1]
; SSE-NEXT: shufps $196, {{[-0-9]+}}(%r{{[sb]}}p), %xmm13 # 16-byte Folded Reload
; SSE-NEXT: # xmm13 = xmm13[0,1],mem[0,3]
; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm14[1,1,1,1]
; SSE-NEXT: pshufd {{.*#+}} xmm14 = xmm6[2,3,2,3]
; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm7[1,1,1,1]
; SSE-NEXT: pshufd {{.*#+}} xmm14 = xmm1[2,3,2,3]
; SSE-NEXT: punpckldq {{.*#+}} xmm14 = xmm14[0],xmm4[0],xmm14[1],xmm4[1]
; SSE-NEXT: shufps {{.*#+}} xmm14 = xmm14[0,1],xmm8[0,3]
; SSE-NEXT: pshufd $85, {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Folded Reload
@ -846,12 +852,6 @@ define void @load_i32_stride3_vf32(<96 x i32>* %in.vec, <32 x i32>* %out.vec0, <
; SSE-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm7[0],xmm4[1],xmm7[1]
; SSE-NEXT: shufps $196, {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Folded Reload
; SSE-NEXT: # xmm4 = xmm4[0,1],mem[0,3]
; SSE-NEXT: pshufd {{.*#+}} xmm8 = xmm9[1,1,1,1]
; SSE-NEXT: pshufd $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm9 # 16-byte Folded Reload
; SSE-NEXT: # xmm9 = mem[2,3,2,3]
; SSE-NEXT: punpckldq {{.*#+}} xmm9 = xmm9[0],xmm8[0],xmm9[1],xmm8[1]
; SSE-NEXT: shufps $196, {{[-0-9]+}}(%r{{[sb]}}p), %xmm9 # 16-byte Folded Reload
; SSE-NEXT: # xmm9 = xmm9[0,1],mem[0,3]
; SSE-NEXT: pshufd $85, {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Folded Reload
; SSE-NEXT: # xmm8 = mem[1,1,1,1]
; SSE-NEXT: pshufd $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Folded Reload
@ -861,45 +861,52 @@ define void @load_i32_stride3_vf32(<96 x i32>* %in.vec, <32 x i32>* %out.vec0, <
; SSE-NEXT: # xmm7 = xmm7[0,1],mem[0,3]
; SSE-NEXT: pshufd $85, {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Folded Reload
; SSE-NEXT: # xmm8 = mem[1,1,1,1]
; SSE-NEXT: pshufd $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
; SSE-NEXT: # xmm1 = mem[2,3,2,3]
; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm8[0],xmm1[1],xmm8[1]
; SSE-NEXT: shufps $196, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
; SSE-NEXT: # xmm1 = xmm1[0,1],mem[0,3]
; SSE-NEXT: pshufd $85, {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Folded Reload
; SSE-NEXT: # xmm8 = mem[1,1,1,1]
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,2,3]
; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm8[0],xmm0[1],xmm8[1]
; SSE-NEXT: shufps $196, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
; SSE-NEXT: # xmm0 = xmm0[0,1],mem[0,3]
; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload
; SSE-NEXT: movaps %xmm6, 96(%rsi)
; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload
; SSE-NEXT: movaps %xmm6, 64(%rsi)
; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload
; SSE-NEXT: movaps %xmm6, 32(%rsi)
; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload
; SSE-NEXT: movaps %xmm6, (%rsi)
; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload
; SSE-NEXT: movaps %xmm6, 112(%rsi)
; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload
; SSE-NEXT: movaps %xmm6, 80(%rsi)
; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload
; SSE-NEXT: movaps %xmm6, 48(%rsi)
; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload
; SSE-NEXT: movaps %xmm6, 16(%rsi)
; SSE-NEXT: movaps %xmm1, 96(%rdx)
; SSE-NEXT: movaps %xmm3, 64(%rdx)
; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
; SSE-NEXT: movaps %xmm2, 96(%rsi)
; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
; SSE-NEXT: movaps %xmm2, 64(%rsi)
; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
; SSE-NEXT: movaps %xmm2, 32(%rsi)
; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
; SSE-NEXT: movaps %xmm2, (%rsi)
; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
; SSE-NEXT: movaps %xmm2, 112(%rsi)
; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
; SSE-NEXT: movaps %xmm2, 80(%rsi)
; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
; SSE-NEXT: movaps %xmm2, 48(%rsi)
; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
; SSE-NEXT: movaps %xmm2, 16(%rsi)
; SSE-NEXT: movaps %xmm3, 96(%rdx)
; SSE-NEXT: movaps %xmm6, 64(%rdx)
; SSE-NEXT: movaps %xmm10, 32(%rdx)
; SSE-NEXT: movaps %xmm11, (%rdx)
; SSE-NEXT: movaps %xmm12, 112(%rdx)
; SSE-NEXT: movaps %xmm15, 80(%rdx)
; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
; SSE-NEXT: movaps %xmm1, 48(%rdx)
; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
; SSE-NEXT: movaps %xmm1, 16(%rdx)
; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
; SSE-NEXT: movaps %xmm2, 48(%rdx)
; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
; SSE-NEXT: movaps %xmm2, 16(%rdx)
; SSE-NEXT: movaps %xmm0, 96(%rcx)
; SSE-NEXT: movaps %xmm7, 112(%rcx)
; SSE-NEXT: movaps %xmm9, 64(%rcx)
; SSE-NEXT: movaps %xmm1, 112(%rcx)
; SSE-NEXT: movaps %xmm7, 64(%rcx)
; SSE-NEXT: movaps %xmm4, 80(%rcx)
; SSE-NEXT: movaps %xmm14, 32(%rcx)
; SSE-NEXT: movaps %xmm13, 48(%rcx)
; SSE-NEXT: movaps %xmm2, (%rcx)
; SSE-NEXT: movaps %xmm9, (%rcx)
; SSE-NEXT: movaps %xmm5, 16(%rcx)
; SSE-NEXT: addq $344, %rsp # imm = 0x158
; SSE-NEXT: addq $360, %rsp # imm = 0x168
; SSE-NEXT: retq
;
; AVX1-LABEL: load_i32_stride3_vf32:

View File

@ -103,17 +103,20 @@ define void @load_i64_stride3_vf4(<12 x i64>* %in.vec, <4 x i64>* %out.vec0, <4
; AVX1: # %bb.0:
; AVX1-NEXT: vmovapd 32(%rdi), %ymm0
; AVX1-NEXT: vmovapd (%rdi), %ymm1
; AVX1-NEXT: vblendpd {{.*#+}} ymm2 = ymm1[0,1],ymm0[2],ymm1[3]
; AVX1-NEXT: vmovaps 16(%rdi), %xmm3
; AVX1-NEXT: vinsertf128 $1, 64(%rdi), %ymm3, %ymm3
; AVX1-NEXT: vblendpd {{.*#+}} ymm2 = ymm2[0],ymm3[1],ymm2[2],ymm3[3]
; AVX1-NEXT: vmovapd 16(%rdi), %xmm2
; AVX1-NEXT: vblendpd {{.*#+}} ymm3 = ymm1[0,1],ymm0[2],ymm1[3]
; AVX1-NEXT: vblendpd {{.*#+}} ymm3 = ymm3[0],ymm2[1],ymm3[2,3]
; AVX1-NEXT: vmovaps 64(%rdi), %xmm4
; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm0, %ymm5
; AVX1-NEXT: vblendpd {{.*#+}} ymm3 = ymm3[0,1,2],ymm5[3]
; AVX1-NEXT: vblendpd {{.*#+}} ymm1 = ymm1[0,1],ymm0[2,3]
; AVX1-NEXT: vshufpd {{.*#+}} ymm1 = ymm1[1],ymm0[0],ymm1[3],ymm0[2]
; AVX1-NEXT: vbroadcastsd 80(%rdi), %ymm4
; AVX1-NEXT: vblendpd {{.*#+}} ymm1 = ymm1[0,1,2],ymm4[3]
; AVX1-NEXT: vbroadcastsd 80(%rdi), %ymm5
; AVX1-NEXT: vblendpd {{.*#+}} ymm1 = ymm1[0,1,2],ymm5[3]
; AVX1-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0,1],mem[2,3]
; AVX1-NEXT: vblendpd {{.*#+}} ymm0 = ymm3[0],ymm0[1],ymm3[2],ymm0[3]
; AVX1-NEXT: vmovapd %ymm2, (%rsi)
; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm2, %ymm2
; AVX1-NEXT: vblendpd {{.*#+}} ymm0 = ymm2[0],ymm0[1],ymm2[2],ymm0[3]
; AVX1-NEXT: vmovapd %ymm3, (%rsi)
; AVX1-NEXT: vmovapd %ymm1, (%rdx)
; AVX1-NEXT: vmovapd %ymm0, (%rcx)
; AVX1-NEXT: vzeroupper
@ -220,28 +223,34 @@ define void @load_i64_stride3_vf8(<24 x i64>* %in.vec, <8 x i64>* %out.vec0, <8
; AVX1-NEXT: vmovapd (%rdi), %ymm1
; AVX1-NEXT: vmovapd 128(%rdi), %ymm2
; AVX1-NEXT: vmovapd 96(%rdi), %ymm3
; AVX1-NEXT: vblendpd {{.*#+}} ymm4 = ymm3[0,1],ymm2[2],ymm3[3]
; AVX1-NEXT: vmovaps 112(%rdi), %xmm5
; AVX1-NEXT: vinsertf128 $1, 160(%rdi), %ymm5, %ymm5
; AVX1-NEXT: vblendpd {{.*#+}} ymm4 = ymm4[0],ymm5[1],ymm4[2],ymm5[3]
; AVX1-NEXT: vblendpd {{.*#+}} ymm6 = ymm1[0,1],ymm0[2],ymm1[3]
; AVX1-NEXT: vmovaps 16(%rdi), %xmm7
; AVX1-NEXT: vinsertf128 $1, 64(%rdi), %ymm7, %ymm7
; AVX1-NEXT: vblendpd {{.*#+}} ymm6 = ymm6[0],ymm7[1],ymm6[2],ymm7[3]
; AVX1-NEXT: vmovapd 112(%rdi), %xmm4
; AVX1-NEXT: vblendpd {{.*#+}} ymm5 = ymm3[0,1],ymm2[2],ymm3[3]
; AVX1-NEXT: vblendpd {{.*#+}} ymm5 = ymm5[0],ymm4[1],ymm5[2,3]
; AVX1-NEXT: vmovaps 160(%rdi), %xmm6
; AVX1-NEXT: vinsertf128 $1, %xmm6, %ymm0, %ymm7
; AVX1-NEXT: vblendpd {{.*#+}} ymm9 = ymm5[0,1,2],ymm7[3]
; AVX1-NEXT: vmovapd 16(%rdi), %xmm7
; AVX1-NEXT: vblendpd {{.*#+}} ymm8 = ymm1[0,1],ymm0[2],ymm1[3]
; AVX1-NEXT: vblendpd {{.*#+}} ymm8 = ymm8[0],ymm7[1],ymm8[2,3]
; AVX1-NEXT: vmovaps 64(%rdi), %xmm5
; AVX1-NEXT: vinsertf128 $1, %xmm5, %ymm0, %ymm10
; AVX1-NEXT: vblendpd {{.*#+}} ymm8 = ymm8[0,1,2],ymm10[3]
; AVX1-NEXT: vblendpd {{.*#+}} ymm1 = ymm1[0,1],ymm0[2,3]
; AVX1-NEXT: vshufpd {{.*#+}} ymm1 = ymm1[1],ymm0[0],ymm1[3],ymm0[2]
; AVX1-NEXT: vbroadcastsd 80(%rdi), %ymm8
; AVX1-NEXT: vblendpd {{.*#+}} ymm1 = ymm1[0,1,2],ymm8[3]
; AVX1-NEXT: vbroadcastsd 80(%rdi), %ymm10
; AVX1-NEXT: vblendpd {{.*#+}} ymm1 = ymm1[0,1,2],ymm10[3]
; AVX1-NEXT: vblendpd {{.*#+}} ymm3 = ymm3[0,1],ymm2[2,3]
; AVX1-NEXT: vshufpd {{.*#+}} ymm3 = ymm3[1],ymm2[0],ymm3[3],ymm2[2]
; AVX1-NEXT: vbroadcastsd 176(%rdi), %ymm8
; AVX1-NEXT: vblendpd {{.*#+}} ymm3 = ymm3[0,1,2],ymm8[3]
; AVX1-NEXT: vbroadcastsd 176(%rdi), %ymm10
; AVX1-NEXT: vblendpd {{.*#+}} ymm3 = ymm3[0,1,2],ymm10[3]
; AVX1-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0,1],mem[2,3]
; AVX1-NEXT: vblendpd {{.*#+}} ymm0 = ymm7[0],ymm0[1],ymm7[2],ymm0[3]
; AVX1-NEXT: vinsertf128 $1, %xmm5, %ymm7, %ymm5
; AVX1-NEXT: vblendpd {{.*#+}} ymm0 = ymm5[0],ymm0[1],ymm5[2],ymm0[3]
; AVX1-NEXT: vblendpd {{.*#+}} ymm2 = ymm2[0,1],mem[2,3]
; AVX1-NEXT: vblendpd {{.*#+}} ymm2 = ymm5[0],ymm2[1],ymm5[2],ymm2[3]
; AVX1-NEXT: vmovapd %ymm6, (%rsi)
; AVX1-NEXT: vmovapd %ymm4, 32(%rsi)
; AVX1-NEXT: vinsertf128 $1, %xmm6, %ymm4, %ymm4
; AVX1-NEXT: vblendpd {{.*#+}} ymm2 = ymm4[0],ymm2[1],ymm4[2],ymm2[3]
; AVX1-NEXT: vmovapd %ymm8, (%rsi)
; AVX1-NEXT: vmovapd %ymm9, 32(%rsi)
; AVX1-NEXT: vmovapd %ymm3, 32(%rdx)
; AVX1-NEXT: vmovapd %ymm1, (%rdx)
; AVX1-NEXT: vmovapd %ymm2, 32(%rcx)
@ -432,66 +441,92 @@ define void @load_i64_stride3_vf16(<48 x i64>* %in.vec, <16 x i64>* %out.vec0, <
;
; AVX1-LABEL: load_i64_stride3_vf16:
; AVX1: # %bb.0:
; AVX1-NEXT: vmovapd 224(%rdi), %ymm0
; AVX1-NEXT: vmovapd 192(%rdi), %ymm1
; AVX1-NEXT: vmovapd 320(%rdi), %ymm2
; AVX1-NEXT: vmovapd 288(%rdi), %ymm3
; AVX1-NEXT: vmovapd 32(%rdi), %ymm4
; AVX1-NEXT: vmovapd (%rdi), %ymm5
; AVX1-NEXT: vmovapd 128(%rdi), %ymm6
; AVX1-NEXT: vmovapd 96(%rdi), %ymm8
; AVX1-NEXT: vblendpd {{.*#+}} ymm7 = ymm8[0,1],ymm6[2],ymm8[3]
; AVX1-NEXT: vblendpd {{.*#+}} ymm9 = ymm1[0,1],ymm0[2,3]
; AVX1-NEXT: vshufpd {{.*#+}} ymm9 = ymm9[1],ymm0[0],ymm9[3],ymm0[2]
; AVX1-NEXT: vbroadcastsd 272(%rdi), %ymm10
; AVX1-NEXT: vblendpd {{.*#+}} ymm9 = ymm9[0,1,2],ymm10[3]
; AVX1-NEXT: vblendpd {{.*#+}} ymm8 = ymm8[0,1],ymm6[2,3]
; AVX1-NEXT: vshufpd {{.*#+}} ymm8 = ymm8[1],ymm6[0],ymm8[3],ymm6[2]
; AVX1-NEXT: vbroadcastsd 176(%rdi), %ymm10
; AVX1-NEXT: vblendpd {{.*#+}} ymm8 = ymm8[0,1,2],ymm10[3]
; AVX1-NEXT: vblendpd {{.*#+}} ymm10 = ymm5[0,1],ymm4[2,3]
; AVX1-NEXT: vshufpd {{.*#+}} ymm10 = ymm10[1],ymm4[0],ymm10[3],ymm4[2]
; AVX1-NEXT: vbroadcastsd 80(%rdi), %ymm11
; AVX1-NEXT: vblendpd {{.*#+}} ymm10 = ymm10[0,1,2],ymm11[3]
; AVX1-NEXT: vblendpd {{.*#+}} ymm11 = ymm3[0,1],ymm2[2,3]
; AVX1-NEXT: vshufpd {{.*#+}} ymm11 = ymm11[1],ymm2[0],ymm11[3],ymm2[2]
; AVX1-NEXT: vbroadcastsd 368(%rdi), %ymm12
; AVX1-NEXT: vblendpd {{.*#+}} ymm11 = ymm11[0,1,2],ymm12[3]
; AVX1-NEXT: vmovaps 112(%rdi), %xmm12
; AVX1-NEXT: vinsertf128 $1, 160(%rdi), %ymm12, %ymm12
; AVX1-NEXT: vblendpd {{.*#+}} ymm7 = ymm7[0],ymm12[1],ymm7[2],ymm12[3]
; AVX1-NEXT: vblendpd {{.*#+}} ymm5 = ymm5[0,1],ymm4[2],ymm5[3]
; AVX1-NEXT: vblendpd {{.*#+}} ymm6 = ymm6[0,1],mem[2,3]
; AVX1-NEXT: vblendpd {{.*#+}} ymm6 = ymm12[0],ymm6[1],ymm12[2],ymm6[3]
; AVX1-NEXT: vmovaps 16(%rdi), %xmm12
; AVX1-NEXT: vinsertf128 $1, 64(%rdi), %ymm12, %ymm12
; AVX1-NEXT: vblendpd {{.*#+}} ymm5 = ymm5[0],ymm12[1],ymm5[2],ymm12[3]
; AVX1-NEXT: vblendpd {{.*#+}} ymm3 = ymm3[0,1],ymm2[2],ymm3[3]
; AVX1-NEXT: vblendpd {{.*#+}} ymm4 = ymm4[0,1],mem[2,3]
; AVX1-NEXT: vblendpd {{.*#+}} ymm4 = ymm12[0],ymm4[1],ymm12[2],ymm4[3]
; AVX1-NEXT: vmovaps 304(%rdi), %xmm12
; AVX1-NEXT: vinsertf128 $1, 352(%rdi), %ymm12, %ymm12
; AVX1-NEXT: vblendpd {{.*#+}} ymm3 = ymm3[0],ymm12[1],ymm3[2],ymm12[3]
; AVX1-NEXT: vblendpd {{.*#+}} ymm1 = ymm1[0,1],ymm0[2],ymm1[3]
; AVX1-NEXT: subq $72, %rsp
; AVX1-NEXT: vmovapd 320(%rdi), %ymm5
; AVX1-NEXT: vmovapd 288(%rdi), %ymm12
; AVX1-NEXT: vmovupd %ymm12, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX1-NEXT: vmovapd 32(%rdi), %ymm2
; AVX1-NEXT: vmovapd (%rdi), %ymm9
; AVX1-NEXT: vmovapd 128(%rdi), %ymm3
; AVX1-NEXT: vmovapd 96(%rdi), %ymm14
; AVX1-NEXT: vmovapd 112(%rdi), %xmm0
; AVX1-NEXT: vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX1-NEXT: vblendpd {{.*#+}} ymm1 = ymm14[0,1],ymm3[2],ymm14[3]
; AVX1-NEXT: vblendpd {{.*#+}} ymm1 = ymm1[0],ymm0[1],ymm1[2,3]
; AVX1-NEXT: vmovaps 160(%rdi), %xmm10
; AVX1-NEXT: vinsertf128 $1, %xmm10, %ymm0, %ymm4
; AVX1-NEXT: vblendpd {{.*#+}} ymm0 = ymm1[0,1,2],ymm4[3]
; AVX1-NEXT: vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX1-NEXT: vmovapd 16(%rdi), %xmm8
; AVX1-NEXT: vblendpd {{.*#+}} ymm1 = ymm9[0,1],ymm2[2],ymm9[3]
; AVX1-NEXT: vblendpd {{.*#+}} ymm1 = ymm1[0],ymm8[1],ymm1[2,3]
; AVX1-NEXT: vmovaps 64(%rdi), %xmm7
; AVX1-NEXT: vinsertf128 $1, %xmm7, %ymm0, %ymm6
; AVX1-NEXT: vblendpd {{.*#+}} ymm0 = ymm1[0,1,2],ymm6[3]
; AVX1-NEXT: vmovupd %ymm0, (%rsp) # 32-byte Spill
; AVX1-NEXT: vmovapd 304(%rdi), %xmm11
; AVX1-NEXT: vblendpd {{.*#+}} ymm1 = ymm12[0,1],ymm5[2],ymm12[3]
; AVX1-NEXT: vblendpd {{.*#+}} ymm1 = ymm1[0],ymm11[1],ymm1[2,3]
; AVX1-NEXT: vmovaps 352(%rdi), %xmm6
; AVX1-NEXT: vinsertf128 $1, %xmm6, %ymm0, %ymm12
; AVX1-NEXT: vblendpd {{.*#+}} ymm0 = ymm1[0,1,2],ymm12[3]
; AVX1-NEXT: vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX1-NEXT: vmovapd 224(%rdi), %ymm13
; AVX1-NEXT: vmovapd 192(%rdi), %ymm0
; AVX1-NEXT: vmovapd 208(%rdi), %xmm4
; AVX1-NEXT: vblendpd {{.*#+}} ymm12 = ymm0[0,1],ymm13[2],ymm0[3]
; AVX1-NEXT: vblendpd {{.*#+}} ymm12 = ymm12[0],ymm4[1],ymm12[2,3]
; AVX1-NEXT: vmovaps 256(%rdi), %xmm1
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm15
; AVX1-NEXT: vblendpd {{.*#+}} ymm12 = ymm12[0,1,2],ymm15[3]
; AVX1-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0,1],ymm13[2,3]
; AVX1-NEXT: vshufpd {{.*#+}} ymm0 = ymm0[1],ymm13[0],ymm0[3],ymm13[2]
; AVX1-NEXT: vbroadcastsd 272(%rdi), %ymm15
; AVX1-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0,1,2],ymm15[3]
; AVX1-NEXT: vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX1-NEXT: vblendpd {{.*#+}} ymm14 = ymm14[0,1],ymm3[2,3]
; AVX1-NEXT: vshufpd {{.*#+}} ymm14 = ymm14[1],ymm3[0],ymm14[3],ymm3[2]
; AVX1-NEXT: vbroadcastsd 176(%rdi), %ymm15
; AVX1-NEXT: vblendpd {{.*#+}} ymm14 = ymm14[0,1,2],ymm15[3]
; AVX1-NEXT: vblendpd {{.*#+}} ymm9 = ymm9[0,1],ymm2[2,3]
; AVX1-NEXT: vshufpd {{.*#+}} ymm9 = ymm9[1],ymm2[0],ymm9[3],ymm2[2]
; AVX1-NEXT: vbroadcastsd 80(%rdi), %ymm15
; AVX1-NEXT: vblendpd {{.*#+}} ymm9 = ymm9[0,1,2],ymm15[3]
; AVX1-NEXT: vblendpd $3, {{[-0-9]+}}(%r{{[sb]}}p), %ymm5, %ymm15 # 32-byte Folded Reload
; AVX1-NEXT: # ymm15 = mem[0,1],ymm5[2,3]
; AVX1-NEXT: vshufpd {{.*#+}} ymm15 = ymm15[1],ymm5[0],ymm15[3],ymm5[2]
; AVX1-NEXT: vbroadcastsd 368(%rdi), %ymm0
; AVX1-NEXT: vblendpd {{.*#+}} ymm0 = ymm15[0,1,2],ymm0[3]
; AVX1-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Reload
; AVX1-NEXT: vinsertf128 $1, %xmm10, %ymm15, %ymm10
; AVX1-NEXT: vblendpd {{.*#+}} ymm3 = ymm3[0,1],mem[2,3]
; AVX1-NEXT: vblendpd {{.*#+}} ymm3 = ymm10[0],ymm3[1],ymm10[2],ymm3[3]
; AVX1-NEXT: vinsertf128 $1, %xmm7, %ymm8, %ymm7
; AVX1-NEXT: vblendpd {{.*#+}} ymm2 = ymm2[0,1],mem[2,3]
; AVX1-NEXT: vblendpd {{.*#+}} ymm2 = ymm12[0],ymm2[1],ymm12[2],ymm2[3]
; AVX1-NEXT: vmovaps 208(%rdi), %xmm12
; AVX1-NEXT: vinsertf128 $1, 256(%rdi), %ymm12, %ymm12
; AVX1-NEXT: vblendpd {{.*#+}} ymm1 = ymm1[0],ymm12[1],ymm1[2],ymm12[3]
; AVX1-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0,1],mem[2,3]
; AVX1-NEXT: vblendpd {{.*#+}} ymm0 = ymm12[0],ymm0[1],ymm12[2],ymm0[3]
; AVX1-NEXT: vmovapd %ymm1, 64(%rsi)
; AVX1-NEXT: vmovapd %ymm3, 96(%rsi)
; AVX1-NEXT: vmovapd %ymm5, (%rsi)
; AVX1-NEXT: vmovapd %ymm7, 32(%rsi)
; AVX1-NEXT: vmovapd %ymm11, 96(%rdx)
; AVX1-NEXT: vmovapd %ymm10, (%rdx)
; AVX1-NEXT: vmovapd %ymm8, 32(%rdx)
; AVX1-NEXT: vmovapd %ymm9, 64(%rdx)
; AVX1-NEXT: vmovapd %ymm0, 64(%rcx)
; AVX1-NEXT: vmovapd %ymm2, 96(%rcx)
; AVX1-NEXT: vmovapd %ymm4, (%rcx)
; AVX1-NEXT: vmovapd %ymm6, 32(%rcx)
; AVX1-NEXT: vblendpd {{.*#+}} ymm2 = ymm7[0],ymm2[1],ymm7[2],ymm2[3]
; AVX1-NEXT: vinsertf128 $1, %xmm6, %ymm11, %ymm6
; AVX1-NEXT: vblendpd {{.*#+}} ymm5 = ymm5[0,1],mem[2,3]
; AVX1-NEXT: vblendpd {{.*#+}} ymm5 = ymm6[0],ymm5[1],ymm6[2],ymm5[3]
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm4, %ymm1
; AVX1-NEXT: vblendpd {{.*#+}} ymm4 = ymm13[0,1],mem[2,3]
; AVX1-NEXT: vblendpd {{.*#+}} ymm1 = ymm1[0],ymm4[1],ymm1[2],ymm4[3]
; AVX1-NEXT: vmovapd %ymm12, 64(%rsi)
; AVX1-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
; AVX1-NEXT: vmovaps %ymm4, 96(%rsi)
; AVX1-NEXT: vmovups (%rsp), %ymm4 # 32-byte Reload
; AVX1-NEXT: vmovaps %ymm4, (%rsi)
; AVX1-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload
; AVX1-NEXT: vmovaps %ymm4, 32(%rsi)
; AVX1-NEXT: vmovapd %ymm0, 96(%rdx)
; AVX1-NEXT: vmovapd %ymm9, (%rdx)
; AVX1-NEXT: vmovapd %ymm14, 32(%rdx)
; AVX1-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX1-NEXT: vmovaps %ymm0, 64(%rdx)
; AVX1-NEXT: vmovapd %ymm1, 64(%rcx)
; AVX1-NEXT: vmovapd %ymm5, 96(%rcx)
; AVX1-NEXT: vmovapd %ymm2, (%rcx)
; AVX1-NEXT: vmovapd %ymm3, 32(%rcx)
; AVX1-NEXT: addq $72, %rsp
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: retq
;

View File

@ -249,10 +249,9 @@ define void @load_i64_stride6_vf4(<24 x i64>* %in.vec, <4 x i64>* %out.vec0, <4
; AVX512-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0,1,2,3,4,5],ymm4[6,7]
; AVX512-NEXT: vmovdqa {{.*#+}} ymm5 = <10,0,6,u>
; AVX512-NEXT: vpermi2q %zmm2, %zmm3, %zmm5
; AVX512-NEXT: vmovdqa 160(%rdi), %xmm6
; AVX512-NEXT: vpbroadcastq %xmm6, %ymm7
; AVX512-NEXT: vpblendd {{.*#+}} ymm5 = ymm5[0,1,2,3,4,5],ymm7[6,7]
; AVX512-NEXT: vinserti128 $1, %xmm6, %ymm0, %ymm6
; AVX512-NEXT: vpbroadcastq 160(%rdi), %ymm6
; AVX512-NEXT: vpblendd {{.*#+}} ymm5 = ymm5[0,1,2,3,4,5],ymm6[6,7]
; AVX512-NEXT: vinserti128 $1, 160(%rdi), %ymm0, %ymm6
; AVX512-NEXT: vmovdqa {{.*#+}} ymm7 = <11,1,7,u>
; AVX512-NEXT: vpermi2q %zmm2, %zmm3, %zmm7
; AVX512-NEXT: vpblendd {{.*#+}} ymm6 = ymm7[0,1,2,3,4,5],ymm6[6,7]

File diff suppressed because it is too large Load Diff

View File

@ -63,15 +63,22 @@ define void @vf4(<4 x i16>* %in.vecptr0, <4 x i16>* %in.vecptr1, <4 x i16>* %in.
; SSE: # %bb.0:
; SSE-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
; SSE-NEXT: movq {{.*#+}} xmm1 = mem[0],zero
; SSE-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
; SSE-NEXT: movq {{.*#+}} xmm1 = mem[0],zero
; SSE-NEXT: movq {{.*#+}} xmm2 = mem[0],zero
; SSE-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3]
; SSE-NEXT: movdqa %xmm0, %xmm2
; SSE-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
; SSE-NEXT: punpckhdq {{.*#+}} xmm0 = xmm0[2],xmm1[2],xmm0[3],xmm1[3]
; SSE-NEXT: movdqa %xmm0, 16(%r8)
; SSE-NEXT: movdqa %xmm2, (%r8)
; SSE-NEXT: movq {{.*#+}} xmm3 = mem[0],zero
; SSE-NEXT: movdqa %xmm0, %xmm4
; SSE-NEXT: punpcklqdq {{.*#+}} xmm4 = xmm4[0],xmm1[0]
; SSE-NEXT: movdqa %xmm2, %xmm5
; SSE-NEXT: punpcklqdq {{.*#+}} xmm5 = xmm5[0],xmm3[0]
; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm5[3,1,2,3]
; SSE-NEXT: pshuflw {{.*#+}} xmm5 = xmm5[2,0,3,1,4,5,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm4[3,1,2,3]
; SSE-NEXT: pshuflw {{.*#+}} xmm4 = xmm4[2,0,3,1,4,5,6,7]
; SSE-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm5[0],xmm4[1],xmm5[1]
; SSE-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1],xmm2[2],xmm3[2],xmm2[3],xmm3[3]
; SSE-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
; SSE-NEXT: movdqa %xmm0, (%r8)
; SSE-NEXT: movdqa %xmm4, 16(%r8)
; SSE-NEXT: retq
;
; AVX1-LABEL: vf4:
@ -317,107 +324,107 @@ define void @vf16(<16 x i16>* %in.vecptr0, <16 x i16>* %in.vecptr1, <16 x i16>*
;
; AVX1-LABEL: vf16:
; AVX1: # %bb.0:
; AVX1-NEXT: vmovdqa (%rcx), %xmm8
; AVX1-NEXT: vmovdqa 16(%rcx), %xmm5
; AVX1-NEXT: vmovdqa (%rdx), %xmm9
; AVX1-NEXT: vmovdqa 16(%rdx), %xmm6
; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm2 = xmm9[4],xmm8[4],xmm9[5],xmm8[5],xmm9[6],xmm8[6],xmm9[7],xmm8[7]
; AVX1-NEXT: vmovdqa (%rcx), %xmm5
; AVX1-NEXT: vmovdqa 16(%rcx), %xmm8
; AVX1-NEXT: vmovdqa (%rdx), %xmm6
; AVX1-NEXT: vmovdqa 16(%rdx), %xmm9
; AVX1-NEXT: vpunpcklwd {{.*#+}} xmm2 = xmm9[0],xmm8[0],xmm9[1],xmm8[1],xmm9[2],xmm8[2],xmm9[3],xmm8[3]
; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm2[0,0,1,1]
; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[2,2,3,3]
; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm3, %ymm3
; AVX1-NEXT: vmovdqa (%rsi), %xmm2
; AVX1-NEXT: vmovdqa 16(%rsi), %xmm7
; AVX1-NEXT: vmovdqa (%rdi), %xmm4
; AVX1-NEXT: vmovdqa 16(%rdi), %xmm0
; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm1 = xmm4[4],xmm2[4],xmm4[5],xmm2[5],xmm4[6],xmm2[6],xmm4[7],xmm2[7]
; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm3, %ymm2
; AVX1-NEXT: vmovdqa (%rsi), %xmm7
; AVX1-NEXT: vmovdqa 16(%rsi), %xmm3
; AVX1-NEXT: vmovdqa (%rdi), %xmm0
; AVX1-NEXT: vmovdqa 16(%rdi), %xmm4
; AVX1-NEXT: vpunpcklwd {{.*#+}} xmm1 = xmm4[0],xmm3[0],xmm4[1],xmm3[1],xmm4[2],xmm3[2],xmm4[3],xmm3[3]
; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm10 = xmm1[0],zero,xmm1[1],zero
; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[2,2,3,3]
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm10, %ymm1
; AVX1-NEXT: vblendps {{.*#+}} ymm11 = ymm1[0],ymm3[1],ymm1[2],ymm3[3],ymm1[4],ymm3[5],ymm1[6],ymm3[7]
; AVX1-NEXT: vpunpcklwd {{.*#+}} xmm1 = xmm6[0],xmm5[0],xmm6[1],xmm5[1],xmm6[2],xmm5[2],xmm6[3],xmm5[3]
; AVX1-NEXT: vblendps {{.*#+}} ymm11 = ymm1[0],ymm2[1],ymm1[2],ymm2[3],ymm1[4],ymm2[5],ymm1[6],ymm2[7]
; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm1 = xmm6[4],xmm5[4],xmm6[5],xmm5[5],xmm6[6],xmm5[6],xmm6[7],xmm5[7]
; AVX1-NEXT: vpshufd {{.*#+}} xmm10 = xmm1[0,0,1,1]
; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[2,2,3,3]
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm10, %ymm1
; AVX1-NEXT: vpunpcklwd {{.*#+}} xmm3 = xmm0[0],xmm7[0],xmm0[1],xmm7[1],xmm0[2],xmm7[2],xmm0[3],xmm7[3]
; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm10 = xmm3[0],zero,xmm3[1],zero
; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm3[2,2,3,3]
; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm10, %ymm3
; AVX1-NEXT: vblendps {{.*#+}} ymm1 = ymm3[0],ymm1[1],ymm3[2],ymm1[3],ymm3[4],ymm1[5],ymm3[6],ymm1[7]
; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm3 = xmm6[4],xmm5[4],xmm6[5],xmm5[5],xmm6[6],xmm5[6],xmm6[7],xmm5[7]
; AVX1-NEXT: vpshufd {{.*#+}} xmm5 = xmm3[0,0,1,1]
; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm3[2,2,3,3]
; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm5, %ymm3
; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm0 = xmm0[4],xmm7[4],xmm0[5],xmm7[5],xmm0[6],xmm7[6],xmm0[7],xmm7[7]
; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm2 = xmm0[4],xmm7[4],xmm0[5],xmm7[5],xmm0[6],xmm7[6],xmm0[7],xmm7[7]
; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm10 = xmm2[0],zero,xmm2[1],zero
; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[2,2,3,3]
; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm10, %ymm2
; AVX1-NEXT: vblendps {{.*#+}} ymm1 = ymm2[0],ymm1[1],ymm2[2],ymm1[3],ymm2[4],ymm1[5],ymm2[6],ymm1[7]
; AVX1-NEXT: vpunpcklwd {{.*#+}} xmm2 = xmm6[0],xmm5[0],xmm6[1],xmm5[1],xmm6[2],xmm5[2],xmm6[3],xmm5[3]
; AVX1-NEXT: vpshufd {{.*#+}} xmm5 = xmm2[0,0,1,1]
; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[2,2,3,3]
; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm5, %ymm2
; AVX1-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm0[0],xmm7[0],xmm0[1],xmm7[1],xmm0[2],xmm7[2],xmm0[3],xmm7[3]
; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm5 = xmm0[0],zero,xmm0[1],zero
; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,2,3,3]
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm5, %ymm0
; AVX1-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm3[1],ymm0[2],ymm3[3],ymm0[4],ymm3[5],ymm0[6],ymm3[7]
; AVX1-NEXT: vpunpcklwd {{.*#+}} xmm3 = xmm9[0],xmm8[0],xmm9[1],xmm8[1],xmm9[2],xmm8[2],xmm9[3],xmm8[3]
; AVX1-NEXT: vpshufd {{.*#+}} xmm5 = xmm3[0,0,1,1]
; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm3[2,2,3,3]
; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm5, %ymm3
; AVX1-NEXT: vpunpcklwd {{.*#+}} xmm2 = xmm4[0],xmm2[0],xmm4[1],xmm2[1],xmm4[2],xmm2[2],xmm4[3],xmm2[3]
; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm4 = xmm2[0],zero,xmm2[1],zero
; AVX1-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm2[1],ymm0[2],ymm2[3],ymm0[4],ymm2[5],ymm0[6],ymm2[7]
; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm2 = xmm9[4],xmm8[4],xmm9[5],xmm8[5],xmm9[6],xmm8[6],xmm9[7],xmm8[7]
; AVX1-NEXT: vpshufd {{.*#+}} xmm5 = xmm2[0,0,1,1]
; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[2,2,3,3]
; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm4, %ymm2
; AVX1-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0],ymm3[1],ymm2[2],ymm3[3],ymm2[4],ymm3[5],ymm2[6],ymm3[7]
; AVX1-NEXT: vmovaps %ymm2, (%r8)
; AVX1-NEXT: vmovaps %ymm0, 96(%r8)
; AVX1-NEXT: vmovaps %ymm1, 64(%r8)
; AVX1-NEXT: vmovaps %ymm11, 32(%r8)
; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm5, %ymm2
; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm3 = xmm4[4],xmm3[4],xmm4[5],xmm3[5],xmm4[6],xmm3[6],xmm4[7],xmm3[7]
; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm4 = xmm3[0],zero,xmm3[1],zero
; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm3[2,2,3,3]
; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm4, %ymm3
; AVX1-NEXT: vblendps {{.*#+}} ymm2 = ymm3[0],ymm2[1],ymm3[2],ymm2[3],ymm3[4],ymm2[5],ymm3[6],ymm2[7]
; AVX1-NEXT: vmovaps %ymm2, 96(%r8)
; AVX1-NEXT: vmovaps %ymm0, (%r8)
; AVX1-NEXT: vmovaps %ymm1, 32(%r8)
; AVX1-NEXT: vmovaps %ymm11, 64(%r8)
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: retq
;
; AVX2-LABEL: vf16:
; AVX2: # %bb.0:
; AVX2-NEXT: vmovdqa (%rcx), %xmm8
; AVX2-NEXT: vmovdqa 16(%rcx), %xmm5
; AVX2-NEXT: vmovdqa (%rdx), %xmm9
; AVX2-NEXT: vmovdqa 16(%rdx), %xmm6
; AVX2-NEXT: vpunpckhwd {{.*#+}} xmm2 = xmm9[4],xmm8[4],xmm9[5],xmm8[5],xmm9[6],xmm8[6],xmm9[7],xmm8[7]
; AVX2-NEXT: vmovdqa (%rcx), %xmm5
; AVX2-NEXT: vmovdqa 16(%rcx), %xmm8
; AVX2-NEXT: vmovdqa (%rdx), %xmm6
; AVX2-NEXT: vmovdqa 16(%rdx), %xmm9
; AVX2-NEXT: vpunpcklwd {{.*#+}} xmm2 = xmm9[0],xmm8[0],xmm9[1],xmm8[1],xmm9[2],xmm8[2],xmm9[3],xmm8[3]
; AVX2-NEXT: vpshufd {{.*#+}} xmm3 = xmm2[0,0,1,1]
; AVX2-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[2,2,3,3]
; AVX2-NEXT: vinserti128 $1, %xmm2, %ymm3, %ymm3
; AVX2-NEXT: vmovdqa (%rsi), %xmm2
; AVX2-NEXT: vmovdqa 16(%rsi), %xmm7
; AVX2-NEXT: vmovdqa (%rdi), %xmm4
; AVX2-NEXT: vmovdqa 16(%rdi), %xmm0
; AVX2-NEXT: vpunpckhwd {{.*#+}} xmm1 = xmm4[4],xmm2[4],xmm4[5],xmm2[5],xmm4[6],xmm2[6],xmm4[7],xmm2[7]
; AVX2-NEXT: vinserti128 $1, %xmm2, %ymm3, %ymm2
; AVX2-NEXT: vmovdqa (%rsi), %xmm7
; AVX2-NEXT: vmovdqa 16(%rsi), %xmm3
; AVX2-NEXT: vmovdqa (%rdi), %xmm0
; AVX2-NEXT: vmovdqa 16(%rdi), %xmm4
; AVX2-NEXT: vpunpcklwd {{.*#+}} xmm1 = xmm4[0],xmm3[0],xmm4[1],xmm3[1],xmm4[2],xmm3[2],xmm4[3],xmm3[3]
; AVX2-NEXT: vpmovzxdq {{.*#+}} xmm10 = xmm1[0],zero,xmm1[1],zero
; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[2,2,3,3]
; AVX2-NEXT: vinserti128 $1, %xmm1, %ymm10, %ymm1
; AVX2-NEXT: vpblendd {{.*#+}} ymm11 = ymm1[0],ymm3[1],ymm1[2],ymm3[3],ymm1[4],ymm3[5],ymm1[6],ymm3[7]
; AVX2-NEXT: vpunpcklwd {{.*#+}} xmm1 = xmm6[0],xmm5[0],xmm6[1],xmm5[1],xmm6[2],xmm5[2],xmm6[3],xmm5[3]
; AVX2-NEXT: vpblendd {{.*#+}} ymm11 = ymm1[0],ymm2[1],ymm1[2],ymm2[3],ymm1[4],ymm2[5],ymm1[6],ymm2[7]
; AVX2-NEXT: vpunpckhwd {{.*#+}} xmm1 = xmm6[4],xmm5[4],xmm6[5],xmm5[5],xmm6[6],xmm5[6],xmm6[7],xmm5[7]
; AVX2-NEXT: vpshufd {{.*#+}} xmm10 = xmm1[0,0,1,1]
; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[2,2,3,3]
; AVX2-NEXT: vinserti128 $1, %xmm1, %ymm10, %ymm1
; AVX2-NEXT: vpunpcklwd {{.*#+}} xmm3 = xmm0[0],xmm7[0],xmm0[1],xmm7[1],xmm0[2],xmm7[2],xmm0[3],xmm7[3]
; AVX2-NEXT: vpmovzxdq {{.*#+}} xmm10 = xmm3[0],zero,xmm3[1],zero
; AVX2-NEXT: vpshufd {{.*#+}} xmm3 = xmm3[2,2,3,3]
; AVX2-NEXT: vinserti128 $1, %xmm3, %ymm10, %ymm3
; AVX2-NEXT: vpblendd {{.*#+}} ymm1 = ymm3[0],ymm1[1],ymm3[2],ymm1[3],ymm3[4],ymm1[5],ymm3[6],ymm1[7]
; AVX2-NEXT: vpunpckhwd {{.*#+}} xmm3 = xmm6[4],xmm5[4],xmm6[5],xmm5[5],xmm6[6],xmm5[6],xmm6[7],xmm5[7]
; AVX2-NEXT: vpshufd {{.*#+}} xmm5 = xmm3[0,0,1,1]
; AVX2-NEXT: vpshufd {{.*#+}} xmm3 = xmm3[2,2,3,3]
; AVX2-NEXT: vinserti128 $1, %xmm3, %ymm5, %ymm3
; AVX2-NEXT: vpunpckhwd {{.*#+}} xmm0 = xmm0[4],xmm7[4],xmm0[5],xmm7[5],xmm0[6],xmm7[6],xmm0[7],xmm7[7]
; AVX2-NEXT: vpunpckhwd {{.*#+}} xmm2 = xmm0[4],xmm7[4],xmm0[5],xmm7[5],xmm0[6],xmm7[6],xmm0[7],xmm7[7]
; AVX2-NEXT: vpmovzxdq {{.*#+}} xmm10 = xmm2[0],zero,xmm2[1],zero
; AVX2-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[2,2,3,3]
; AVX2-NEXT: vinserti128 $1, %xmm2, %ymm10, %ymm2
; AVX2-NEXT: vpblendd {{.*#+}} ymm1 = ymm2[0],ymm1[1],ymm2[2],ymm1[3],ymm2[4],ymm1[5],ymm2[6],ymm1[7]
; AVX2-NEXT: vpunpcklwd {{.*#+}} xmm2 = xmm6[0],xmm5[0],xmm6[1],xmm5[1],xmm6[2],xmm5[2],xmm6[3],xmm5[3]
; AVX2-NEXT: vpshufd {{.*#+}} xmm5 = xmm2[0,0,1,1]
; AVX2-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[2,2,3,3]
; AVX2-NEXT: vinserti128 $1, %xmm2, %ymm5, %ymm2
; AVX2-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm0[0],xmm7[0],xmm0[1],xmm7[1],xmm0[2],xmm7[2],xmm0[3],xmm7[3]
; AVX2-NEXT: vpmovzxdq {{.*#+}} xmm5 = xmm0[0],zero,xmm0[1],zero
; AVX2-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,2,3,3]
; AVX2-NEXT: vinserti128 $1, %xmm0, %ymm5, %ymm0
; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0],ymm3[1],ymm0[2],ymm3[3],ymm0[4],ymm3[5],ymm0[6],ymm3[7]
; AVX2-NEXT: vpunpcklwd {{.*#+}} xmm3 = xmm9[0],xmm8[0],xmm9[1],xmm8[1],xmm9[2],xmm8[2],xmm9[3],xmm8[3]
; AVX2-NEXT: vpshufd {{.*#+}} xmm5 = xmm3[0,0,1,1]
; AVX2-NEXT: vpshufd {{.*#+}} xmm3 = xmm3[2,2,3,3]
; AVX2-NEXT: vinserti128 $1, %xmm3, %ymm5, %ymm3
; AVX2-NEXT: vpunpcklwd {{.*#+}} xmm2 = xmm4[0],xmm2[0],xmm4[1],xmm2[1],xmm4[2],xmm2[2],xmm4[3],xmm2[3]
; AVX2-NEXT: vpmovzxdq {{.*#+}} xmm4 = xmm2[0],zero,xmm2[1],zero
; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0],ymm2[1],ymm0[2],ymm2[3],ymm0[4],ymm2[5],ymm0[6],ymm2[7]
; AVX2-NEXT: vpunpckhwd {{.*#+}} xmm2 = xmm9[4],xmm8[4],xmm9[5],xmm8[5],xmm9[6],xmm8[6],xmm9[7],xmm8[7]
; AVX2-NEXT: vpshufd {{.*#+}} xmm5 = xmm2[0,0,1,1]
; AVX2-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[2,2,3,3]
; AVX2-NEXT: vinserti128 $1, %xmm2, %ymm4, %ymm2
; AVX2-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0],ymm3[1],ymm2[2],ymm3[3],ymm2[4],ymm3[5],ymm2[6],ymm3[7]
; AVX2-NEXT: vmovdqa %ymm2, (%r8)
; AVX2-NEXT: vmovdqa %ymm0, 96(%r8)
; AVX2-NEXT: vmovdqa %ymm1, 64(%r8)
; AVX2-NEXT: vmovdqa %ymm11, 32(%r8)
; AVX2-NEXT: vinserti128 $1, %xmm2, %ymm5, %ymm2
; AVX2-NEXT: vpunpckhwd {{.*#+}} xmm3 = xmm4[4],xmm3[4],xmm4[5],xmm3[5],xmm4[6],xmm3[6],xmm4[7],xmm3[7]
; AVX2-NEXT: vpmovzxdq {{.*#+}} xmm4 = xmm3[0],zero,xmm3[1],zero
; AVX2-NEXT: vpshufd {{.*#+}} xmm3 = xmm3[2,2,3,3]
; AVX2-NEXT: vinserti128 $1, %xmm3, %ymm4, %ymm3
; AVX2-NEXT: vpblendd {{.*#+}} ymm2 = ymm3[0],ymm2[1],ymm3[2],ymm2[3],ymm3[4],ymm2[5],ymm3[6],ymm2[7]
; AVX2-NEXT: vmovdqa %ymm2, 96(%r8)
; AVX2-NEXT: vmovdqa %ymm0, (%r8)
; AVX2-NEXT: vmovdqa %ymm1, 32(%r8)
; AVX2-NEXT: vmovdqa %ymm11, 64(%r8)
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
@ -543,207 +550,207 @@ define void @vf32(<32 x i16>* %in.vecptr0, <32 x i16>* %in.vecptr1, <32 x i16>*
;
; AVX1-LABEL: vf32:
; AVX1: # %bb.0:
; AVX1-NEXT: vmovdqa (%rcx), %xmm12
; AVX1-NEXT: vmovdqa 16(%rcx), %xmm15
; AVX1-NEXT: vmovdqa 32(%rcx), %xmm3
; AVX1-NEXT: vmovdqa 48(%rcx), %xmm11
; AVX1-NEXT: vmovdqa (%rdx), %xmm13
; AVX1-NEXT: vmovdqa 16(%rdx), %xmm6
; AVX1-NEXT: vmovdqa 32(%rdx), %xmm7
; AVX1-NEXT: vmovdqa 48(%rdx), %xmm1
; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm2 = xmm13[4],xmm12[4],xmm13[5],xmm12[5],xmm13[6],xmm12[6],xmm13[7],xmm12[7]
; AVX1-NEXT: vpshufd {{.*#+}} xmm5 = xmm2[0,0,1,1]
; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[2,2,3,3]
; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm5, %ymm8
; AVX1-NEXT: vmovdqa (%rsi), %xmm14
; AVX1-NEXT: vmovdqa 48(%rsi), %xmm2
; AVX1-NEXT: vmovdqa (%rdi), %xmm5
; AVX1-NEXT: vmovdqa 48(%rdi), %xmm4
; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm0 = xmm5[4],xmm14[4],xmm5[5],xmm14[5],xmm5[6],xmm14[6],xmm5[7],xmm14[7]
; AVX1-NEXT: vmovdqa (%rcx), %xmm15
; AVX1-NEXT: vmovdqa 16(%rcx), %xmm12
; AVX1-NEXT: vmovdqa 32(%rcx), %xmm11
; AVX1-NEXT: vmovdqa 48(%rcx), %xmm2
; AVX1-NEXT: vmovdqa (%rdx), %xmm6
; AVX1-NEXT: vmovdqa 16(%rdx), %xmm13
; AVX1-NEXT: vmovdqa 32(%rdx), %xmm1
; AVX1-NEXT: vmovdqa 48(%rdx), %xmm7
; AVX1-NEXT: vpunpcklwd {{.*#+}} xmm3 = xmm13[0],xmm12[0],xmm13[1],xmm12[1],xmm13[2],xmm12[2],xmm13[3],xmm12[3]
; AVX1-NEXT: vpshufd {{.*#+}} xmm5 = xmm3[0,0,1,1]
; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm3[2,2,3,3]
; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm5, %ymm8
; AVX1-NEXT: vmovdqa 16(%rsi), %xmm14
; AVX1-NEXT: vmovdqa 32(%rsi), %xmm3
; AVX1-NEXT: vmovdqa 16(%rdi), %xmm5
; AVX1-NEXT: vmovdqa 32(%rdi), %xmm4
; AVX1-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm5[0],xmm14[0],xmm5[1],xmm14[1],xmm5[2],xmm14[2],xmm5[3],xmm14[3]
; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm9 = xmm0[0],zero,xmm0[1],zero
; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,2,3,3]
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm9, %ymm0
; AVX1-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm8[1],ymm0[2],ymm8[3],ymm0[4],ymm8[5],ymm0[6],ymm8[7]
; AVX1-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX1-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm1[0],xmm11[0],xmm1[1],xmm11[1],xmm1[2],xmm11[2],xmm1[3],xmm11[3]
; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm0 = xmm1[4],xmm11[4],xmm1[5],xmm11[5],xmm1[6],xmm11[6],xmm1[7],xmm11[7]
; AVX1-NEXT: vpshufd {{.*#+}} xmm8 = xmm0[0,0,1,1]
; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,2,3,3]
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm8, %ymm8
; AVX1-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm4[0],xmm2[0],xmm4[1],xmm2[1],xmm4[2],xmm2[2],xmm4[3],xmm2[3]
; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm0 = xmm4[4],xmm3[4],xmm4[5],xmm3[5],xmm4[6],xmm3[6],xmm4[7],xmm3[7]
; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm10 = xmm0[0],zero,xmm0[1],zero
; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,2,3,3]
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm10, %ymm0
; AVX1-NEXT: vmovdqa 32(%rsi), %xmm10
; AVX1-NEXT: vmovdqa 48(%rsi), %xmm10
; AVX1-NEXT: vblendps {{.*#+}} ymm9 = ymm0[0],ymm8[1],ymm0[2],ymm8[3],ymm0[4],ymm8[5],ymm0[6],ymm8[7]
; AVX1-NEXT: vmovdqa 32(%rdi), %xmm0
; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm1 = xmm1[4],xmm11[4],xmm1[5],xmm11[5],xmm1[6],xmm11[6],xmm1[7],xmm11[7]
; AVX1-NEXT: vmovdqa 48(%rdi), %xmm0
; AVX1-NEXT: vpunpcklwd {{.*#+}} xmm1 = xmm1[0],xmm11[0],xmm1[1],xmm11[1],xmm1[2],xmm11[2],xmm1[3],xmm11[3]
; AVX1-NEXT: vpshufd {{.*#+}} xmm8 = xmm1[0,0,1,1]
; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[2,2,3,3]
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm8, %ymm1
; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm2 = xmm4[4],xmm2[4],xmm4[5],xmm2[5],xmm4[6],xmm2[6],xmm4[7],xmm2[7]
; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm4 = xmm2[0],zero,xmm2[1],zero
; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[2,2,3,3]
; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm4, %ymm2
; AVX1-NEXT: vblendps {{.*#+}} ymm8 = ymm2[0],ymm1[1],ymm2[2],ymm1[3],ymm2[4],ymm1[5],ymm2[6],ymm1[7]
; AVX1-NEXT: vpunpcklwd {{.*#+}} xmm1 = xmm7[0],xmm3[0],xmm7[1],xmm3[1],xmm7[2],xmm3[2],xmm7[3],xmm3[3]
; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm1[0,0,1,1]
; AVX1-NEXT: vpunpcklwd {{.*#+}} xmm3 = xmm4[0],xmm3[0],xmm4[1],xmm3[1],xmm4[2],xmm3[2],xmm4[3],xmm3[3]
; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm4 = xmm3[0],zero,xmm3[1],zero
; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm3[2,2,3,3]
; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm4, %ymm3
; AVX1-NEXT: vblendps {{.*#+}} ymm8 = ymm3[0],ymm1[1],ymm3[2],ymm1[3],ymm3[4],ymm1[5],ymm3[6],ymm1[7]
; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm1 = xmm7[4],xmm2[4],xmm7[5],xmm2[5],xmm7[6],xmm2[6],xmm7[7],xmm2[7]
; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm1[0,0,1,1]
; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[2,2,3,3]
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm2, %ymm1
; AVX1-NEXT: vpunpcklwd {{.*#+}} xmm2 = xmm0[0],xmm10[0],xmm0[1],xmm10[1],xmm0[2],xmm10[2],xmm0[3],xmm10[3]
; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm4 = xmm2[0],zero,xmm2[1],zero
; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[2,2,3,3]
; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm4, %ymm2
; AVX1-NEXT: vmovdqa 16(%rsi), %xmm4
; AVX1-NEXT: vblendps {{.*#+}} ymm11 = ymm2[0],ymm1[1],ymm2[2],ymm1[3],ymm2[4],ymm1[5],ymm2[6],ymm1[7]
; AVX1-NEXT: vmovdqa 16(%rdi), %xmm1
; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm2 = xmm7[4],xmm3[4],xmm7[5],xmm3[5],xmm7[6],xmm3[6],xmm7[7],xmm3[7]
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm3, %ymm1
; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm3 = xmm0[4],xmm10[4],xmm0[5],xmm10[5],xmm0[6],xmm10[6],xmm0[7],xmm10[7]
; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm4 = xmm3[0],zero,xmm3[1],zero
; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm3[2,2,3,3]
; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm4, %ymm3
; AVX1-NEXT: vmovdqa (%rsi), %xmm4
; AVX1-NEXT: vblendps {{.*#+}} ymm11 = ymm3[0],ymm1[1],ymm3[2],ymm1[3],ymm3[4],ymm1[5],ymm3[6],ymm1[7]
; AVX1-NEXT: vmovdqa (%rdi), %xmm1
; AVX1-NEXT: vpunpcklwd {{.*#+}} xmm2 = xmm7[0],xmm2[0],xmm7[1],xmm2[1],xmm7[2],xmm2[2],xmm7[3],xmm2[3]
; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm2[0,0,1,1]
; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[2,2,3,3]
; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm3, %ymm2
; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm0 = xmm0[4],xmm10[4],xmm0[5],xmm10[5],xmm0[6],xmm10[6],xmm0[7],xmm10[7]
; AVX1-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm0[0],xmm10[0],xmm0[1],xmm10[1],xmm0[2],xmm10[2],xmm0[3],xmm10[3]
; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm3 = xmm0[0],zero,xmm0[1],zero
; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,2,3,3]
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm3, %ymm0
; AVX1-NEXT: vblendps {{.*#+}} ymm3 = ymm0[0],ymm2[1],ymm0[2],ymm2[3],ymm0[4],ymm2[5],ymm0[6],ymm2[7]
; AVX1-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm6[0],xmm15[0],xmm6[1],xmm15[1],xmm6[2],xmm15[2],xmm6[3],xmm15[3]
; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm0[0,0,1,1]
; AVX1-NEXT: vblendps {{.*#+}} ymm2 = ymm0[0],ymm2[1],ymm0[2],ymm2[3],ymm0[4],ymm2[5],ymm0[6],ymm2[7]
; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm0 = xmm6[4],xmm15[4],xmm6[5],xmm15[5],xmm6[6],xmm15[6],xmm6[7],xmm15[7]
; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[0,0,1,1]
; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,2,3,3]
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm2, %ymm0
; AVX1-NEXT: vpunpcklwd {{.*#+}} xmm2 = xmm1[0],xmm4[0],xmm1[1],xmm4[1],xmm1[2],xmm4[2],xmm1[3],xmm4[3]
; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm7 = xmm2[0],zero,xmm2[1],zero
; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[2,2,3,3]
; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm7, %ymm2
; AVX1-NEXT: vblendps {{.*#+}} ymm0 = ymm2[0],ymm0[1],ymm2[2],ymm0[3],ymm2[4],ymm0[5],ymm2[6],ymm0[7]
; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm2 = xmm6[4],xmm15[4],xmm6[5],xmm15[5],xmm6[6],xmm15[6],xmm6[7],xmm15[7]
; AVX1-NEXT: vpshufd {{.*#+}} xmm6 = xmm2[0,0,1,1]
; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[2,2,3,3]
; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm6, %ymm2
; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm1 = xmm1[4],xmm4[4],xmm1[5],xmm4[5],xmm1[6],xmm4[6],xmm1[7],xmm4[7]
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm3, %ymm0
; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm3 = xmm1[4],xmm4[4],xmm1[5],xmm4[5],xmm1[6],xmm4[6],xmm1[7],xmm4[7]
; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm7 = xmm3[0],zero,xmm3[1],zero
; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm3[2,2,3,3]
; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm7, %ymm3
; AVX1-NEXT: vblendps {{.*#+}} ymm0 = ymm3[0],ymm0[1],ymm3[2],ymm0[3],ymm3[4],ymm0[5],ymm3[6],ymm0[7]
; AVX1-NEXT: vpunpcklwd {{.*#+}} xmm3 = xmm6[0],xmm15[0],xmm6[1],xmm15[1],xmm6[2],xmm15[2],xmm6[3],xmm15[3]
; AVX1-NEXT: vpshufd {{.*#+}} xmm6 = xmm3[0,0,1,1]
; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm3[2,2,3,3]
; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm6, %ymm3
; AVX1-NEXT: vpunpcklwd {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1],xmm1[2],xmm4[2],xmm1[3],xmm4[3]
; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm4 = xmm1[0],zero,xmm1[1],zero
; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[2,2,3,3]
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm4, %ymm1
; AVX1-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0],ymm2[1],ymm1[2],ymm2[3],ymm1[4],ymm2[5],ymm1[6],ymm2[7]
; AVX1-NEXT: vpunpcklwd {{.*#+}} xmm2 = xmm13[0],xmm12[0],xmm13[1],xmm12[1],xmm13[2],xmm12[2],xmm13[3],xmm12[3]
; AVX1-NEXT: vpshufd {{.*#+}} xmm4 = xmm2[0,0,1,1]
; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[2,2,3,3]
; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm4, %ymm2
; AVX1-NEXT: vpunpcklwd {{.*#+}} xmm4 = xmm5[0],xmm14[0],xmm5[1],xmm14[1],xmm5[2],xmm14[2],xmm5[3],xmm14[3]
; AVX1-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0],ymm3[1],ymm1[2],ymm3[3],ymm1[4],ymm3[5],ymm1[6],ymm3[7]
; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm3 = xmm13[4],xmm12[4],xmm13[5],xmm12[5],xmm13[6],xmm12[6],xmm13[7],xmm12[7]
; AVX1-NEXT: vpshufd {{.*#+}} xmm4 = xmm3[0,0,1,1]
; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm3[2,2,3,3]
; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm4, %ymm3
; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm4 = xmm5[4],xmm14[4],xmm5[5],xmm14[5],xmm5[6],xmm14[6],xmm5[7],xmm14[7]
; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm5 = xmm4[0],zero,xmm4[1],zero
; AVX1-NEXT: vpshufd {{.*#+}} xmm4 = xmm4[2,2,3,3]
; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm5, %ymm4
; AVX1-NEXT: vblendps {{.*#+}} ymm2 = ymm4[0],ymm2[1],ymm4[2],ymm2[3],ymm4[4],ymm2[5],ymm4[6],ymm2[7]
; AVX1-NEXT: vmovaps %ymm2, (%r8)
; AVX1-NEXT: vmovaps %ymm1, 96(%r8)
; AVX1-NEXT: vmovaps %ymm0, 64(%r8)
; AVX1-NEXT: vmovaps %ymm3, 160(%r8)
; AVX1-NEXT: vmovaps %ymm11, 128(%r8)
; AVX1-NEXT: vmovaps %ymm8, 224(%r8)
; AVX1-NEXT: vmovaps %ymm9, 192(%r8)
; AVX1-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX1-NEXT: vblendps {{.*#+}} ymm3 = ymm4[0],ymm3[1],ymm4[2],ymm3[3],ymm4[4],ymm3[5],ymm4[6],ymm3[7]
; AVX1-NEXT: vmovaps %ymm3, 96(%r8)
; AVX1-NEXT: vmovaps %ymm1, (%r8)
; AVX1-NEXT: vmovaps %ymm0, 32(%r8)
; AVX1-NEXT: vmovaps %ymm2, 192(%r8)
; AVX1-NEXT: vmovaps %ymm11, 224(%r8)
; AVX1-NEXT: vmovaps %ymm8, 128(%r8)
; AVX1-NEXT: vmovaps %ymm9, 160(%r8)
; AVX1-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX1-NEXT: vmovaps %ymm0, 64(%r8)
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: retq
;
; AVX2-LABEL: vf32:
; AVX2: # %bb.0:
; AVX2-NEXT: vmovdqa (%rcx), %xmm12
; AVX2-NEXT: vmovdqa 16(%rcx), %xmm15
; AVX2-NEXT: vmovdqa 32(%rcx), %xmm3
; AVX2-NEXT: vmovdqa 48(%rcx), %xmm11
; AVX2-NEXT: vmovdqa (%rdx), %xmm13
; AVX2-NEXT: vmovdqa 16(%rdx), %xmm6
; AVX2-NEXT: vmovdqa 32(%rdx), %xmm7
; AVX2-NEXT: vmovdqa 48(%rdx), %xmm1
; AVX2-NEXT: vpunpckhwd {{.*#+}} xmm2 = xmm13[4],xmm12[4],xmm13[5],xmm12[5],xmm13[6],xmm12[6],xmm13[7],xmm12[7]
; AVX2-NEXT: vpshufd {{.*#+}} xmm5 = xmm2[0,0,1,1]
; AVX2-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[2,2,3,3]
; AVX2-NEXT: vinserti128 $1, %xmm2, %ymm5, %ymm8
; AVX2-NEXT: vmovdqa (%rsi), %xmm14
; AVX2-NEXT: vmovdqa 48(%rsi), %xmm2
; AVX2-NEXT: vmovdqa (%rdi), %xmm5
; AVX2-NEXT: vmovdqa 48(%rdi), %xmm4
; AVX2-NEXT: vpunpckhwd {{.*#+}} xmm0 = xmm5[4],xmm14[4],xmm5[5],xmm14[5],xmm5[6],xmm14[6],xmm5[7],xmm14[7]
; AVX2-NEXT: vmovdqa (%rcx), %xmm15
; AVX2-NEXT: vmovdqa 16(%rcx), %xmm12
; AVX2-NEXT: vmovdqa 32(%rcx), %xmm11
; AVX2-NEXT: vmovdqa 48(%rcx), %xmm2
; AVX2-NEXT: vmovdqa (%rdx), %xmm6
; AVX2-NEXT: vmovdqa 16(%rdx), %xmm13
; AVX2-NEXT: vmovdqa 32(%rdx), %xmm1
; AVX2-NEXT: vmovdqa 48(%rdx), %xmm7
; AVX2-NEXT: vpunpcklwd {{.*#+}} xmm3 = xmm13[0],xmm12[0],xmm13[1],xmm12[1],xmm13[2],xmm12[2],xmm13[3],xmm12[3]
; AVX2-NEXT: vpshufd {{.*#+}} xmm5 = xmm3[0,0,1,1]
; AVX2-NEXT: vpshufd {{.*#+}} xmm3 = xmm3[2,2,3,3]
; AVX2-NEXT: vinserti128 $1, %xmm3, %ymm5, %ymm8
; AVX2-NEXT: vmovdqa 16(%rsi), %xmm14
; AVX2-NEXT: vmovdqa 32(%rsi), %xmm3
; AVX2-NEXT: vmovdqa 16(%rdi), %xmm5
; AVX2-NEXT: vmovdqa 32(%rdi), %xmm4
; AVX2-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm5[0],xmm14[0],xmm5[1],xmm14[1],xmm5[2],xmm14[2],xmm5[3],xmm14[3]
; AVX2-NEXT: vpmovzxdq {{.*#+}} xmm9 = xmm0[0],zero,xmm0[1],zero
; AVX2-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,2,3,3]
; AVX2-NEXT: vinserti128 $1, %xmm0, %ymm9, %ymm0
; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0],ymm8[1],ymm0[2],ymm8[3],ymm0[4],ymm8[5],ymm0[6],ymm8[7]
; AVX2-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm1[0],xmm11[0],xmm1[1],xmm11[1],xmm1[2],xmm11[2],xmm1[3],xmm11[3]
; AVX2-NEXT: vpunpckhwd {{.*#+}} xmm0 = xmm1[4],xmm11[4],xmm1[5],xmm11[5],xmm1[6],xmm11[6],xmm1[7],xmm11[7]
; AVX2-NEXT: vpshufd {{.*#+}} xmm8 = xmm0[0,0,1,1]
; AVX2-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,2,3,3]
; AVX2-NEXT: vinserti128 $1, %xmm0, %ymm8, %ymm8
; AVX2-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm4[0],xmm2[0],xmm4[1],xmm2[1],xmm4[2],xmm2[2],xmm4[3],xmm2[3]
; AVX2-NEXT: vpunpckhwd {{.*#+}} xmm0 = xmm4[4],xmm3[4],xmm4[5],xmm3[5],xmm4[6],xmm3[6],xmm4[7],xmm3[7]
; AVX2-NEXT: vpmovzxdq {{.*#+}} xmm10 = xmm0[0],zero,xmm0[1],zero
; AVX2-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,2,3,3]
; AVX2-NEXT: vinserti128 $1, %xmm0, %ymm10, %ymm0
; AVX2-NEXT: vmovdqa 32(%rsi), %xmm10
; AVX2-NEXT: vmovdqa 48(%rsi), %xmm10
; AVX2-NEXT: vpblendd {{.*#+}} ymm9 = ymm0[0],ymm8[1],ymm0[2],ymm8[3],ymm0[4],ymm8[5],ymm0[6],ymm8[7]
; AVX2-NEXT: vmovdqa 32(%rdi), %xmm0
; AVX2-NEXT: vpunpckhwd {{.*#+}} xmm1 = xmm1[4],xmm11[4],xmm1[5],xmm11[5],xmm1[6],xmm11[6],xmm1[7],xmm11[7]
; AVX2-NEXT: vmovdqa 48(%rdi), %xmm0
; AVX2-NEXT: vpunpcklwd {{.*#+}} xmm1 = xmm1[0],xmm11[0],xmm1[1],xmm11[1],xmm1[2],xmm11[2],xmm1[3],xmm11[3]
; AVX2-NEXT: vpshufd {{.*#+}} xmm8 = xmm1[0,0,1,1]
; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[2,2,3,3]
; AVX2-NEXT: vinserti128 $1, %xmm1, %ymm8, %ymm1
; AVX2-NEXT: vpunpckhwd {{.*#+}} xmm2 = xmm4[4],xmm2[4],xmm4[5],xmm2[5],xmm4[6],xmm2[6],xmm4[7],xmm2[7]
; AVX2-NEXT: vpmovzxdq {{.*#+}} xmm4 = xmm2[0],zero,xmm2[1],zero
; AVX2-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[2,2,3,3]
; AVX2-NEXT: vinserti128 $1, %xmm2, %ymm4, %ymm2
; AVX2-NEXT: vpblendd {{.*#+}} ymm8 = ymm2[0],ymm1[1],ymm2[2],ymm1[3],ymm2[4],ymm1[5],ymm2[6],ymm1[7]
; AVX2-NEXT: vpunpcklwd {{.*#+}} xmm1 = xmm7[0],xmm3[0],xmm7[1],xmm3[1],xmm7[2],xmm3[2],xmm7[3],xmm3[3]
; AVX2-NEXT: vpshufd {{.*#+}} xmm2 = xmm1[0,0,1,1]
; AVX2-NEXT: vpunpcklwd {{.*#+}} xmm3 = xmm4[0],xmm3[0],xmm4[1],xmm3[1],xmm4[2],xmm3[2],xmm4[3],xmm3[3]
; AVX2-NEXT: vpmovzxdq {{.*#+}} xmm4 = xmm3[0],zero,xmm3[1],zero
; AVX2-NEXT: vpshufd {{.*#+}} xmm3 = xmm3[2,2,3,3]
; AVX2-NEXT: vinserti128 $1, %xmm3, %ymm4, %ymm3
; AVX2-NEXT: vpblendd {{.*#+}} ymm8 = ymm3[0],ymm1[1],ymm3[2],ymm1[3],ymm3[4],ymm1[5],ymm3[6],ymm1[7]
; AVX2-NEXT: vpunpckhwd {{.*#+}} xmm1 = xmm7[4],xmm2[4],xmm7[5],xmm2[5],xmm7[6],xmm2[6],xmm7[7],xmm2[7]
; AVX2-NEXT: vpshufd {{.*#+}} xmm3 = xmm1[0,0,1,1]
; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[2,2,3,3]
; AVX2-NEXT: vinserti128 $1, %xmm1, %ymm2, %ymm1
; AVX2-NEXT: vpunpcklwd {{.*#+}} xmm2 = xmm0[0],xmm10[0],xmm0[1],xmm10[1],xmm0[2],xmm10[2],xmm0[3],xmm10[3]
; AVX2-NEXT: vpmovzxdq {{.*#+}} xmm4 = xmm2[0],zero,xmm2[1],zero
; AVX2-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[2,2,3,3]
; AVX2-NEXT: vinserti128 $1, %xmm2, %ymm4, %ymm2
; AVX2-NEXT: vmovdqa 16(%rsi), %xmm4
; AVX2-NEXT: vpblendd {{.*#+}} ymm11 = ymm2[0],ymm1[1],ymm2[2],ymm1[3],ymm2[4],ymm1[5],ymm2[6],ymm1[7]
; AVX2-NEXT: vmovdqa 16(%rdi), %xmm1
; AVX2-NEXT: vpunpckhwd {{.*#+}} xmm2 = xmm7[4],xmm3[4],xmm7[5],xmm3[5],xmm7[6],xmm3[6],xmm7[7],xmm3[7]
; AVX2-NEXT: vinserti128 $1, %xmm1, %ymm3, %ymm1
; AVX2-NEXT: vpunpckhwd {{.*#+}} xmm3 = xmm0[4],xmm10[4],xmm0[5],xmm10[5],xmm0[6],xmm10[6],xmm0[7],xmm10[7]
; AVX2-NEXT: vpmovzxdq {{.*#+}} xmm4 = xmm3[0],zero,xmm3[1],zero
; AVX2-NEXT: vpshufd {{.*#+}} xmm3 = xmm3[2,2,3,3]
; AVX2-NEXT: vinserti128 $1, %xmm3, %ymm4, %ymm3
; AVX2-NEXT: vmovdqa (%rsi), %xmm4
; AVX2-NEXT: vpblendd {{.*#+}} ymm11 = ymm3[0],ymm1[1],ymm3[2],ymm1[3],ymm3[4],ymm1[5],ymm3[6],ymm1[7]
; AVX2-NEXT: vmovdqa (%rdi), %xmm1
; AVX2-NEXT: vpunpcklwd {{.*#+}} xmm2 = xmm7[0],xmm2[0],xmm7[1],xmm2[1],xmm7[2],xmm2[2],xmm7[3],xmm2[3]
; AVX2-NEXT: vpshufd {{.*#+}} xmm3 = xmm2[0,0,1,1]
; AVX2-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[2,2,3,3]
; AVX2-NEXT: vinserti128 $1, %xmm2, %ymm3, %ymm2
; AVX2-NEXT: vpunpckhwd {{.*#+}} xmm0 = xmm0[4],xmm10[4],xmm0[5],xmm10[5],xmm0[6],xmm10[6],xmm0[7],xmm10[7]
; AVX2-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm0[0],xmm10[0],xmm0[1],xmm10[1],xmm0[2],xmm10[2],xmm0[3],xmm10[3]
; AVX2-NEXT: vpmovzxdq {{.*#+}} xmm3 = xmm0[0],zero,xmm0[1],zero
; AVX2-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,2,3,3]
; AVX2-NEXT: vinserti128 $1, %xmm0, %ymm3, %ymm0
; AVX2-NEXT: vpblendd {{.*#+}} ymm3 = ymm0[0],ymm2[1],ymm0[2],ymm2[3],ymm0[4],ymm2[5],ymm0[6],ymm2[7]
; AVX2-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm6[0],xmm15[0],xmm6[1],xmm15[1],xmm6[2],xmm15[2],xmm6[3],xmm15[3]
; AVX2-NEXT: vpshufd {{.*#+}} xmm2 = xmm0[0,0,1,1]
; AVX2-NEXT: vpblendd {{.*#+}} ymm2 = ymm0[0],ymm2[1],ymm0[2],ymm2[3],ymm0[4],ymm2[5],ymm0[6],ymm2[7]
; AVX2-NEXT: vpunpckhwd {{.*#+}} xmm0 = xmm6[4],xmm15[4],xmm6[5],xmm15[5],xmm6[6],xmm15[6],xmm6[7],xmm15[7]
; AVX2-NEXT: vpshufd {{.*#+}} xmm3 = xmm0[0,0,1,1]
; AVX2-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,2,3,3]
; AVX2-NEXT: vinserti128 $1, %xmm0, %ymm2, %ymm0
; AVX2-NEXT: vpunpcklwd {{.*#+}} xmm2 = xmm1[0],xmm4[0],xmm1[1],xmm4[1],xmm1[2],xmm4[2],xmm1[3],xmm4[3]
; AVX2-NEXT: vpmovzxdq {{.*#+}} xmm7 = xmm2[0],zero,xmm2[1],zero
; AVX2-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[2,2,3,3]
; AVX2-NEXT: vinserti128 $1, %xmm2, %ymm7, %ymm2
; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm2[0],ymm0[1],ymm2[2],ymm0[3],ymm2[4],ymm0[5],ymm2[6],ymm0[7]
; AVX2-NEXT: vpunpckhwd {{.*#+}} xmm2 = xmm6[4],xmm15[4],xmm6[5],xmm15[5],xmm6[6],xmm15[6],xmm6[7],xmm15[7]
; AVX2-NEXT: vpshufd {{.*#+}} xmm6 = xmm2[0,0,1,1]
; AVX2-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[2,2,3,3]
; AVX2-NEXT: vinserti128 $1, %xmm2, %ymm6, %ymm2
; AVX2-NEXT: vpunpckhwd {{.*#+}} xmm1 = xmm1[4],xmm4[4],xmm1[5],xmm4[5],xmm1[6],xmm4[6],xmm1[7],xmm4[7]
; AVX2-NEXT: vinserti128 $1, %xmm0, %ymm3, %ymm0
; AVX2-NEXT: vpunpckhwd {{.*#+}} xmm3 = xmm1[4],xmm4[4],xmm1[5],xmm4[5],xmm1[6],xmm4[6],xmm1[7],xmm4[7]
; AVX2-NEXT: vpmovzxdq {{.*#+}} xmm7 = xmm3[0],zero,xmm3[1],zero
; AVX2-NEXT: vpshufd {{.*#+}} xmm3 = xmm3[2,2,3,3]
; AVX2-NEXT: vinserti128 $1, %xmm3, %ymm7, %ymm3
; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm3[0],ymm0[1],ymm3[2],ymm0[3],ymm3[4],ymm0[5],ymm3[6],ymm0[7]
; AVX2-NEXT: vpunpcklwd {{.*#+}} xmm3 = xmm6[0],xmm15[0],xmm6[1],xmm15[1],xmm6[2],xmm15[2],xmm6[3],xmm15[3]
; AVX2-NEXT: vpshufd {{.*#+}} xmm6 = xmm3[0,0,1,1]
; AVX2-NEXT: vpshufd {{.*#+}} xmm3 = xmm3[2,2,3,3]
; AVX2-NEXT: vinserti128 $1, %xmm3, %ymm6, %ymm3
; AVX2-NEXT: vpunpcklwd {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1],xmm1[2],xmm4[2],xmm1[3],xmm4[3]
; AVX2-NEXT: vpmovzxdq {{.*#+}} xmm4 = xmm1[0],zero,xmm1[1],zero
; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[2,2,3,3]
; AVX2-NEXT: vinserti128 $1, %xmm1, %ymm4, %ymm1
; AVX2-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0],ymm2[1],ymm1[2],ymm2[3],ymm1[4],ymm2[5],ymm1[6],ymm2[7]
; AVX2-NEXT: vpunpcklwd {{.*#+}} xmm2 = xmm13[0],xmm12[0],xmm13[1],xmm12[1],xmm13[2],xmm12[2],xmm13[3],xmm12[3]
; AVX2-NEXT: vpshufd {{.*#+}} xmm4 = xmm2[0,0,1,1]
; AVX2-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[2,2,3,3]
; AVX2-NEXT: vinserti128 $1, %xmm2, %ymm4, %ymm2
; AVX2-NEXT: vpunpcklwd {{.*#+}} xmm4 = xmm5[0],xmm14[0],xmm5[1],xmm14[1],xmm5[2],xmm14[2],xmm5[3],xmm14[3]
; AVX2-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0],ymm3[1],ymm1[2],ymm3[3],ymm1[4],ymm3[5],ymm1[6],ymm3[7]
; AVX2-NEXT: vpunpckhwd {{.*#+}} xmm3 = xmm13[4],xmm12[4],xmm13[5],xmm12[5],xmm13[6],xmm12[6],xmm13[7],xmm12[7]
; AVX2-NEXT: vpshufd {{.*#+}} xmm4 = xmm3[0,0,1,1]
; AVX2-NEXT: vpshufd {{.*#+}} xmm3 = xmm3[2,2,3,3]
; AVX2-NEXT: vinserti128 $1, %xmm3, %ymm4, %ymm3
; AVX2-NEXT: vpunpckhwd {{.*#+}} xmm4 = xmm5[4],xmm14[4],xmm5[5],xmm14[5],xmm5[6],xmm14[6],xmm5[7],xmm14[7]
; AVX2-NEXT: vpmovzxdq {{.*#+}} xmm5 = xmm4[0],zero,xmm4[1],zero
; AVX2-NEXT: vpshufd {{.*#+}} xmm4 = xmm4[2,2,3,3]
; AVX2-NEXT: vinserti128 $1, %xmm4, %ymm5, %ymm4
; AVX2-NEXT: vpblendd {{.*#+}} ymm2 = ymm4[0],ymm2[1],ymm4[2],ymm2[3],ymm4[4],ymm2[5],ymm4[6],ymm2[7]
; AVX2-NEXT: vmovdqa %ymm2, (%r8)
; AVX2-NEXT: vmovdqa %ymm1, 96(%r8)
; AVX2-NEXT: vmovdqa %ymm0, 64(%r8)
; AVX2-NEXT: vmovdqa %ymm3, 160(%r8)
; AVX2-NEXT: vmovdqa %ymm11, 128(%r8)
; AVX2-NEXT: vmovdqa %ymm8, 224(%r8)
; AVX2-NEXT: vmovdqa %ymm9, 192(%r8)
; AVX2-NEXT: vpblendd {{.*#+}} ymm3 = ymm4[0],ymm3[1],ymm4[2],ymm3[3],ymm4[4],ymm3[5],ymm4[6],ymm3[7]
; AVX2-NEXT: vmovdqa %ymm3, 96(%r8)
; AVX2-NEXT: vmovdqa %ymm1, (%r8)
; AVX2-NEXT: vmovdqa %ymm0, 32(%r8)
; AVX2-NEXT: vmovdqa %ymm2, 192(%r8)
; AVX2-NEXT: vmovdqa %ymm11, 224(%r8)
; AVX2-NEXT: vmovdqa %ymm8, 128(%r8)
; AVX2-NEXT: vmovdqa %ymm9, 160(%r8)
; AVX2-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-NEXT: vmovaps %ymm0, 32(%r8)
; AVX2-NEXT: vmovaps %ymm0, 64(%r8)
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -13,14 +13,14 @@ define void @store_i32_stride4_vf2(<2 x i32>* %in.vecptr0, <2 x i32>* %in.vecptr
; SSE: # %bb.0:
; SSE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; SSE-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
; SSE-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
; SSE-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0],xmm1[0]
; SSE-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
; SSE-NEXT: movsd {{.*#+}} xmm2 = mem[0],zero
; SSE-NEXT: unpcklps {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
; SSE-NEXT: movlhps {{.*#+}} xmm1 = xmm1[0],xmm2[0]
; SSE-NEXT: movaps %xmm0, %xmm2
; SSE-NEXT: movlhps {{.*#+}} xmm2 = xmm2[0],xmm1[0]
; SSE-NEXT: movhlps {{.*#+}} xmm1 = xmm0[1],xmm1[1]
; SSE-NEXT: movaps %xmm1, 16(%r8)
; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,2],xmm1[0,2]
; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,3],xmm1[1,3]
; SSE-NEXT: movaps %xmm0, 16(%r8)
; SSE-NEXT: movaps %xmm2, (%r8)
; SSE-NEXT: retq
;
@ -469,176 +469,176 @@ define void @store_i32_stride4_vf16(<16 x i32>* %in.vecptr0, <16 x i32>* %in.vec
; AVX1-LABEL: store_i32_stride4_vf16:
; AVX1: # %bb.0:
; AVX1-NEXT: subq $24, %rsp
; AVX1-NEXT: vmovaps 16(%rdi), %xmm7
; AVX1-NEXT: vmovaps 32(%rdi), %xmm6
; AVX1-NEXT: vmovaps 16(%rdi), %xmm2
; AVX1-NEXT: vmovaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX1-NEXT: vmovaps 32(%rdi), %xmm13
; AVX1-NEXT: vmovaps 48(%rdi), %xmm11
; AVX1-NEXT: vmovaps 16(%rsi), %xmm13
; AVX1-NEXT: vmovaps 32(%rsi), %xmm14
; AVX1-NEXT: vmovaps 48(%rsi), %xmm10
; AVX1-NEXT: vinsertps {{.*#+}} xmm0 = xmm11[1],xmm10[1],zero,zero
; AVX1-NEXT: vunpcklps {{.*#+}} xmm1 = xmm11[0],xmm10[0],xmm11[1],xmm10[1]
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm8
; AVX1-NEXT: vmovaps 16(%rcx), %xmm12
; AVX1-NEXT: vmovaps 16(%rsi), %xmm1
; AVX1-NEXT: vmovaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX1-NEXT: vmovaps 32(%rsi), %xmm8
; AVX1-NEXT: vmovaps 48(%rsi), %xmm9
; AVX1-NEXT: vinsertps {{.*#+}} xmm0 = xmm2[1],xmm1[1],zero,zero
; AVX1-NEXT: vunpcklps {{.*#+}} xmm1 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm10
; AVX1-NEXT: vmovaps 16(%rcx), %xmm0
; AVX1-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX1-NEXT: vmovaps 32(%rcx), %xmm3
; AVX1-NEXT: vmovaps 48(%rcx), %xmm2
; AVX1-NEXT: vmovaps 48(%rcx), %xmm6
; AVX1-NEXT: vmovaps 16(%rdx), %xmm15
; AVX1-NEXT: vmovaps 32(%rdx), %xmm1
; AVX1-NEXT: vmovaps 48(%rdx), %xmm4
; AVX1-NEXT: vunpcklps {{.*#+}} xmm9 = xmm4[0],xmm2[0],xmm4[1],xmm2[1]
; AVX1-NEXT: vmovlhps {{.*#+}} xmm5 = xmm2[0],xmm4[0]
; AVX1-NEXT: vpermilps {{.*#+}} xmm5 = xmm5[0,1,2,0]
; AVX1-NEXT: vinsertf128 $1, %xmm9, %ymm5, %ymm5
; AVX1-NEXT: vblendps {{.*#+}} ymm0 = ymm8[0,1],ymm5[2,3],ymm8[4,5],ymm5[6,7]
; AVX1-NEXT: vmovaps 32(%rdx), %xmm4
; AVX1-NEXT: vmovaps 48(%rdx), %xmm5
; AVX1-NEXT: vunpcklps {{.*#+}} xmm1 = xmm15[0],xmm0[0],xmm15[1],xmm0[1]
; AVX1-NEXT: vmovlhps {{.*#+}} xmm0 = xmm0[0],xmm15[0]
; AVX1-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[0,1,2,0]
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; AVX1-NEXT: vblendps {{.*#+}} ymm0 = ymm10[0,1],ymm0[2,3],ymm10[4,5],ymm0[6,7]
; AVX1-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX1-NEXT: vmovaps %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX1-NEXT: vinsertps {{.*#+}} xmm5 = xmm6[1],xmm14[1],zero,zero
; AVX1-NEXT: vunpcklps {{.*#+}} xmm8 = xmm6[0],xmm14[0],xmm6[1],xmm14[1]
; AVX1-NEXT: vinsertf128 $1, %xmm5, %ymm8, %ymm8
; AVX1-NEXT: vunpcklps {{.*#+}} xmm5 = xmm1[0],xmm3[0],xmm1[1],xmm3[1]
; AVX1-NEXT: vmovlhps {{.*#+}} xmm6 = xmm3[0],xmm1[0]
; AVX1-NEXT: vpermilps {{.*#+}} xmm6 = xmm6[0,1,2,0]
; AVX1-NEXT: vinsertf128 $1, %xmm5, %ymm6, %ymm5
; AVX1-NEXT: vblendps {{.*#+}} ymm0 = ymm8[0,1],ymm5[2,3],ymm8[4,5],ymm5[6,7]
; AVX1-NEXT: vinsertps {{.*#+}} xmm0 = xmm13[1],xmm8[1],zero,zero
; AVX1-NEXT: vunpcklps {{.*#+}} xmm1 = xmm13[0],xmm8[0],xmm13[1],xmm8[1]
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
; AVX1-NEXT: vunpcklps {{.*#+}} xmm1 = xmm4[0],xmm3[0],xmm4[1],xmm3[1]
; AVX1-NEXT: vmovlhps {{.*#+}} xmm7 = xmm3[0],xmm4[0]
; AVX1-NEXT: vpermilps {{.*#+}} xmm7 = xmm7[0,1,2,0]
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm7, %ymm1
; AVX1-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5],ymm1[6,7]
; AVX1-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX1-NEXT: vmovaps %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX1-NEXT: vmovaps %xmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX1-NEXT: vinsertps {{.*#+}} xmm5 = xmm7[1],xmm13[1],zero,zero
; AVX1-NEXT: vunpcklps {{.*#+}} xmm6 = xmm7[0],xmm13[0],xmm7[1],xmm13[1]
; AVX1-NEXT: vinsertf128 $1, %xmm5, %ymm6, %ymm5
; AVX1-NEXT: vmovlhps {{.*#+}} xmm6 = xmm12[0],xmm15[0]
; AVX1-NEXT: vpermilps {{.*#+}} xmm6 = xmm6[0,1,2,0]
; AVX1-NEXT: vunpcklps {{.*#+}} xmm7 = xmm15[0],xmm12[0],xmm15[1],xmm12[1]
; AVX1-NEXT: vinsertf128 $1, %xmm7, %ymm6, %ymm6
; AVX1-NEXT: vblendps {{.*#+}} ymm0 = ymm5[0,1],ymm6[2,3],ymm5[4,5],ymm6[6,7]
; AVX1-NEXT: vinsertps {{.*#+}} xmm0 = xmm11[1],xmm9[1],zero,zero
; AVX1-NEXT: vunpcklps {{.*#+}} xmm1 = xmm11[0],xmm9[0],xmm11[1],xmm9[1]
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
; AVX1-NEXT: vmovlhps {{.*#+}} xmm1 = xmm6[0],xmm5[0]
; AVX1-NEXT: vpermilps {{.*#+}} xmm1 = xmm1[0,1,2,0]
; AVX1-NEXT: vunpcklps {{.*#+}} xmm7 = xmm5[0],xmm6[0],xmm5[1],xmm6[1]
; AVX1-NEXT: vinsertf128 $1, %xmm7, %ymm1, %ymm1
; AVX1-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5],ymm1[6,7]
; AVX1-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX1-NEXT: vmovaps (%rdi), %xmm8
; AVX1-NEXT: vmovaps (%rsi), %xmm7
; AVX1-NEXT: vinsertps {{.*#+}} xmm6 = xmm8[1],xmm7[1],zero,zero
; AVX1-NEXT: vunpcklps {{.*#+}} xmm9 = xmm8[0],xmm7[0],xmm8[1],xmm7[1]
; AVX1-NEXT: vinsertf128 $1, %xmm6, %ymm9, %ymm9
; AVX1-NEXT: vmovaps (%rcx), %xmm6
; AVX1-NEXT: vmovaps (%rdi), %xmm2
; AVX1-NEXT: vmovaps (%rsi), %xmm1
; AVX1-NEXT: vinsertps {{.*#+}} xmm7 = xmm2[1],xmm1[1],zero,zero
; AVX1-NEXT: vunpcklps {{.*#+}} xmm10 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
; AVX1-NEXT: vinsertf128 $1, %xmm7, %ymm10, %ymm10
; AVX1-NEXT: vmovaps (%rcx), %xmm7
; AVX1-NEXT: vmovaps (%rdx), %xmm0
; AVX1-NEXT: vmovlhps {{.*#+}} xmm5 = xmm6[0],xmm0[0]
; AVX1-NEXT: vpermilps {{.*#+}} xmm5 = xmm5[0,1,2,0]
; AVX1-NEXT: vunpcklps {{.*#+}} xmm13 = xmm0[0],xmm6[0],xmm0[1],xmm6[1]
; AVX1-NEXT: vinsertf128 $1, %xmm13, %ymm5, %ymm5
; AVX1-NEXT: vblendps {{.*#+}} ymm9 = ymm9[0,1],ymm5[2,3],ymm9[4,5],ymm5[6,7]
; AVX1-NEXT: vunpckhps {{.*#+}} xmm5 = xmm0[2],xmm6[2],xmm0[3],xmm6[3]
; AVX1-NEXT: vinsertps {{.*#+}} xmm0 = zero,zero,xmm0[2],xmm6[2]
; AVX1-NEXT: vinsertf128 $1, %xmm5, %ymm0, %ymm0
; AVX1-NEXT: vunpckhps {{.*#+}} xmm5 = xmm8[2],xmm7[2],xmm8[3],xmm7[3]
; AVX1-NEXT: vshufps {{.*#+}} xmm6 = xmm7[3,0],xmm8[3,0]
; AVX1-NEXT: vmovlhps {{.*#+}} xmm12 = xmm7[0],xmm0[0]
; AVX1-NEXT: vpermilps {{.*#+}} xmm12 = xmm12[0,1,2,0]
; AVX1-NEXT: vunpcklps {{.*#+}} xmm14 = xmm0[0],xmm7[0],xmm0[1],xmm7[1]
; AVX1-NEXT: vinsertf128 $1, %xmm14, %ymm12, %ymm12
; AVX1-NEXT: vblendps {{.*#+}} ymm10 = ymm10[0,1],ymm12[2,3],ymm10[4,5],ymm12[6,7]
; AVX1-NEXT: vunpckhps {{.*#+}} xmm12 = xmm4[2],xmm3[2],xmm4[3],xmm3[3]
; AVX1-NEXT: vinsertps {{.*#+}} xmm3 = zero,zero,xmm4[2],xmm3[2]
; AVX1-NEXT: vinsertf128 $1, %xmm12, %ymm3, %ymm12
; AVX1-NEXT: vunpckhps {{.*#+}} xmm4 = xmm13[2],xmm8[2],xmm13[3],xmm8[3]
; AVX1-NEXT: vshufps {{.*#+}} xmm3 = xmm8[3,0],xmm13[3,0]
; AVX1-NEXT: vpermilps {{.*#+}} xmm3 = xmm3[2,0,2,3]
; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm4, %ymm3
; AVX1-NEXT: vblendps {{.*#+}} ymm3 = ymm3[0,1],ymm12[2,3],ymm3[4,5],ymm12[6,7]
; AVX1-NEXT: vunpckhps {{.*#+}} xmm4 = xmm5[2],xmm6[2],xmm5[3],xmm6[3]
; AVX1-NEXT: vinsertps {{.*#+}} xmm5 = zero,zero,xmm5[2],xmm6[2]
; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm5, %ymm4
; AVX1-NEXT: vunpckhps {{.*#+}} xmm5 = xmm11[2],xmm9[2],xmm11[3],xmm9[3]
; AVX1-NEXT: vshufps {{.*#+}} xmm6 = xmm9[3,0],xmm11[3,0]
; AVX1-NEXT: vpermilps {{.*#+}} xmm6 = xmm6[2,0,2,3]
; AVX1-NEXT: vinsertf128 $1, %xmm6, %ymm5, %ymm5
; AVX1-NEXT: vblendps {{.*#+}} ymm8 = ymm5[0,1],ymm0[2,3],ymm5[4,5],ymm0[6,7]
; AVX1-NEXT: vunpckhps {{.*#+}} xmm0 = xmm4[2],xmm2[2],xmm4[3],xmm2[3]
; AVX1-NEXT: vinsertps {{.*#+}} xmm2 = zero,zero,xmm4[2],xmm2[2]
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm2, %ymm0
; AVX1-NEXT: vunpckhps {{.*#+}} xmm2 = xmm11[2],xmm10[2],xmm11[3],xmm10[3]
; AVX1-NEXT: vshufps {{.*#+}} xmm4 = xmm10[3,0],xmm11[3,0]
; AVX1-NEXT: vpermilps {{.*#+}} xmm4 = xmm4[2,0,2,3]
; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm2, %ymm2
; AVX1-NEXT: vblendps {{.*#+}} ymm0 = ymm2[0,1],ymm0[2,3],ymm2[4,5],ymm0[6,7]
; AVX1-NEXT: vunpckhps {{.*#+}} xmm2 = xmm1[2],xmm3[2],xmm1[3],xmm3[3]
; AVX1-NEXT: vinsertps {{.*#+}} xmm1 = zero,zero,xmm1[2],xmm3[2]
; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm1
; AVX1-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
; AVX1-NEXT: vunpckhps {{.*#+}} xmm2 = xmm3[2],xmm14[2],xmm3[3],xmm14[3]
; AVX1-NEXT: vshufps {{.*#+}} xmm3 = xmm14[3,0],xmm3[3,0]
; AVX1-NEXT: vpermilps {{.*#+}} xmm3 = xmm3[2,0,2,3]
; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm2, %ymm2
; AVX1-NEXT: vblendps {{.*#+}} ymm1 = ymm2[0,1],ymm1[2,3],ymm2[4,5],ymm1[6,7]
; AVX1-NEXT: vunpckhps {{.*#+}} xmm2 = xmm15[2],xmm12[2],xmm15[3],xmm12[3]
; AVX1-NEXT: vinsertps {{.*#+}} xmm3 = zero,zero,xmm15[2],xmm12[2]
; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm3, %ymm2
; AVX1-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
; AVX1-NEXT: vblendps {{.*#+}} ymm4 = ymm5[0,1],ymm4[2,3],ymm5[4,5],ymm4[6,7]
; AVX1-NEXT: vunpckhps {{.*#+}} xmm5 = xmm0[2],xmm7[2],xmm0[3],xmm7[3]
; AVX1-NEXT: vinsertps {{.*#+}} xmm0 = zero,zero,xmm0[2],xmm7[2]
; AVX1-NEXT: vinsertf128 $1, %xmm5, %ymm0, %ymm0
; AVX1-NEXT: vunpckhps {{.*#+}} xmm5 = xmm2[2],xmm1[2],xmm2[3],xmm1[3]
; AVX1-NEXT: vshufps {{.*#+}} xmm1 = xmm1[3,0],xmm2[3,0]
; AVX1-NEXT: vpermilps {{.*#+}} xmm1 = xmm1[2,0,2,3]
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm5, %ymm1
; AVX1-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm0[2,3],ymm1[4,5],ymm0[6,7]
; AVX1-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
; AVX1-NEXT: vunpckhps {{.*#+}} xmm1 = xmm15[2],xmm2[2],xmm15[3],xmm2[3]
; AVX1-NEXT: vinsertps {{.*#+}} xmm2 = zero,zero,xmm15[2],xmm2[2]
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm2, %ymm1
; AVX1-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload
; AVX1-NEXT: vunpckhps {{.*#+}} xmm3 = xmm5[2],xmm4[2],xmm5[3],xmm4[3]
; AVX1-NEXT: vshufps {{.*#+}} xmm4 = xmm4[3,0],xmm5[3,0]
; AVX1-NEXT: vpermilps {{.*#+}} xmm4 = xmm4[2,0,2,3]
; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm3, %ymm3
; AVX1-NEXT: vblendps {{.*#+}} ymm2 = ymm3[0,1],ymm2[2,3],ymm3[4,5],ymm2[6,7]
; AVX1-NEXT: vmovaps %ymm2, 96(%r8)
; AVX1-NEXT: vmovaps %ymm1, 160(%r8)
; AVX1-NEXT: vmovaps %ymm0, 224(%r8)
; AVX1-NEXT: vmovaps %ymm8, 32(%r8)
; AVX1-NEXT: vmovaps %ymm9, (%r8)
; AVX1-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload
; AVX1-NEXT: vunpckhps {{.*#+}} xmm2 = xmm6[2],xmm5[2],xmm6[3],xmm5[3]
; AVX1-NEXT: vshufps {{.*#+}} xmm5 = xmm5[3,0],xmm6[3,0]
; AVX1-NEXT: vpermilps {{.*#+}} xmm5 = xmm5[2,0,2,3]
; AVX1-NEXT: vinsertf128 $1, %xmm5, %ymm2, %ymm2
; AVX1-NEXT: vblendps {{.*#+}} ymm1 = ymm2[0,1],ymm1[2,3],ymm2[4,5],ymm1[6,7]
; AVX1-NEXT: vmovaps %ymm1, 96(%r8)
; AVX1-NEXT: vmovaps %ymm0, 32(%r8)
; AVX1-NEXT: vmovaps %ymm4, 224(%r8)
; AVX1-NEXT: vmovaps %ymm3, 160(%r8)
; AVX1-NEXT: vmovaps %ymm10, (%r8)
; AVX1-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX1-NEXT: vmovaps %ymm0, 64(%r8)
; AVX1-NEXT: vmovaps %ymm0, 192(%r8)
; AVX1-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX1-NEXT: vmovaps %ymm0, 128(%r8)
; AVX1-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX1-NEXT: vmovaps %ymm0, 192(%r8)
; AVX1-NEXT: vmovaps %ymm0, 64(%r8)
; AVX1-NEXT: addq $24, %rsp
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: retq
;
; AVX2-LABEL: store_i32_stride4_vf16:
; AVX2: # %bb.0:
; AVX2-NEXT: vmovaps (%rdi), %ymm9
; AVX2-NEXT: vmovaps (%rcx), %xmm10
; AVX2-NEXT: vmovaps 32(%rcx), %xmm3
; AVX2-NEXT: vmovaps (%rdx), %xmm5
; AVX2-NEXT: vmovaps 32(%rdx), %xmm6
; AVX2-NEXT: vunpckhps {{.*#+}} xmm1 = xmm5[2],xmm10[2],xmm5[3],xmm10[3]
; AVX2-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,0,2,1]
; AVX2-NEXT: vmovaps (%rsi), %xmm7
; AVX2-NEXT: vmovaps 32(%rsi), %xmm2
; AVX2-NEXT: vmovaps (%rdi), %xmm0
; AVX2-NEXT: vmovaps 32(%rdi), %xmm4
; AVX2-NEXT: vunpckhps {{.*#+}} xmm8 = xmm0[2],xmm7[2],xmm0[3],xmm7[3]
; AVX2-NEXT: vpermpd {{.*#+}} ymm8 = ymm8[0,1,1,3]
; AVX2-NEXT: vblendps {{.*#+}} ymm1 = ymm8[0,1],ymm1[2,3],ymm8[4,5],ymm1[6,7]
; AVX2-NEXT: vunpcklps {{.*#+}} xmm8 = xmm6[0],xmm3[0],xmm6[1],xmm3[1]
; AVX2-NEXT: vmovaps 32(%rdi), %ymm13
; AVX2-NEXT: vmovaps (%rdi), %ymm11
; AVX2-NEXT: vmovaps 32(%rsi), %ymm14
; AVX2-NEXT: vmovaps (%rsi), %ymm12
; AVX2-NEXT: vmovaps 32(%rdx), %ymm5
; AVX2-NEXT: vmovaps (%rdx), %ymm15
; AVX2-NEXT: vmovaps 32(%rcx), %ymm7
; AVX2-NEXT: vmovaps (%rcx), %xmm6
; AVX2-NEXT: vmovaps 32(%rcx), %xmm0
; AVX2-NEXT: vmovaps (%rdx), %xmm1
; AVX2-NEXT: vmovaps 32(%rdx), %xmm2
; AVX2-NEXT: vunpckhps {{.*#+}} xmm8 = xmm2[2],xmm0[2],xmm2[3],xmm0[3]
; AVX2-NEXT: vpermpd {{.*#+}} ymm8 = ymm8[0,0,2,1]
; AVX2-NEXT: vunpcklps {{.*#+}} xmm11 = xmm4[0],xmm2[0],xmm4[1],xmm2[1]
; AVX2-NEXT: vpermpd {{.*#+}} ymm11 = ymm11[0,1,1,3]
; AVX2-NEXT: vblendps {{.*#+}} ymm8 = ymm11[0,1],ymm8[2,3],ymm11[4,5],ymm8[6,7]
; AVX2-NEXT: vmovaps 32(%rdi), %ymm11
; AVX2-NEXT: vunpckhps {{.*#+}} xmm3 = xmm6[2],xmm3[2],xmm6[3],xmm3[3]
; AVX2-NEXT: vmovaps (%rsi), %ymm6
; AVX2-NEXT: vunpckhps {{.*#+}} xmm2 = xmm4[2],xmm2[2],xmm4[3],xmm2[3]
; AVX2-NEXT: vmovaps 32(%rsi), %ymm4
; AVX2-NEXT: vmovaps 32(%rsi), %xmm3
; AVX2-NEXT: vmovaps 32(%rdi), %xmm4
; AVX2-NEXT: vunpckhps {{.*#+}} xmm9 = xmm4[2],xmm3[2],xmm4[3],xmm3[3]
; AVX2-NEXT: vpermpd {{.*#+}} ymm9 = ymm9[0,1,1,3]
; AVX2-NEXT: vblendps {{.*#+}} ymm10 = ymm9[0,1],ymm8[2,3],ymm9[4,5],ymm8[6,7]
; AVX2-NEXT: vmovaps (%rsi), %xmm8
; AVX2-NEXT: vunpcklps {{.*#+}} xmm0 = xmm2[0],xmm0[0],xmm2[1],xmm0[1]
; AVX2-NEXT: vmovaps (%rdi), %xmm2
; AVX2-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,0,2,1]
; AVX2-NEXT: vunpcklps {{.*#+}} xmm3 = xmm4[0],xmm3[0],xmm4[1],xmm3[1]
; AVX2-NEXT: vpermpd {{.*#+}} ymm3 = ymm3[0,1,1,3]
; AVX2-NEXT: vblendps {{.*#+}} ymm0 = ymm3[0,1],ymm0[2,3],ymm3[4,5],ymm0[6,7]
; AVX2-NEXT: vunpckhps {{.*#+}} xmm3 = xmm1[2],xmm6[2],xmm1[3],xmm6[3]
; AVX2-NEXT: vpermpd {{.*#+}} ymm3 = ymm3[0,0,2,1]
; AVX2-NEXT: vunpckhps {{.*#+}} xmm4 = xmm2[2],xmm8[2],xmm2[3],xmm8[3]
; AVX2-NEXT: vpermpd {{.*#+}} ymm4 = ymm4[0,1,1,3]
; AVX2-NEXT: vblendps {{.*#+}} ymm9 = ymm4[0,1],ymm3[2,3],ymm4[4,5],ymm3[6,7]
; AVX2-NEXT: vmovaps (%rcx), %ymm3
; AVX2-NEXT: vunpcklps {{.*#+}} xmm1 = xmm1[0],xmm6[0],xmm1[1],xmm6[1]
; AVX2-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,0,2,1]
; AVX2-NEXT: vunpcklps {{.*#+}} xmm2 = xmm2[0],xmm8[0],xmm2[1],xmm8[1]
; AVX2-NEXT: vpermpd {{.*#+}} ymm2 = ymm2[0,1,1,3]
; AVX2-NEXT: vblendps {{.*#+}} ymm3 = ymm2[0,1],ymm3[2,3],ymm2[4,5],ymm3[6,7]
; AVX2-NEXT: vmovaps 32(%rdx), %ymm2
; AVX2-NEXT: vunpcklps {{.*#+}} xmm5 = xmm5[0],xmm10[0],xmm5[1],xmm10[1]
; AVX2-NEXT: vmovaps 32(%rcx), %ymm10
; AVX2-NEXT: vpermpd {{.*#+}} ymm5 = ymm5[0,0,2,1]
; AVX2-NEXT: vunpcklps {{.*#+}} xmm0 = xmm0[0],xmm7[0],xmm0[1],xmm7[1]
; AVX2-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,1,1,3]
; AVX2-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm5[2,3],ymm0[4,5],ymm5[6,7]
; AVX2-NEXT: vunpcklps {{.*#+}} ymm5 = ymm2[0],ymm10[0],ymm2[1],ymm10[1],ymm2[4],ymm10[4],ymm2[5],ymm10[5]
; AVX2-NEXT: vpermpd {{.*#+}} ymm5 = ymm5[0,2,2,3]
; AVX2-NEXT: vunpcklps {{.*#+}} ymm7 = ymm11[0],ymm4[0],ymm11[1],ymm4[1],ymm11[4],ymm4[4],ymm11[5],ymm4[5]
; AVX2-NEXT: vpermpd {{.*#+}} ymm7 = ymm7[2,1,3,3]
; AVX2-NEXT: vblendps {{.*#+}} ymm5 = ymm7[0,1],ymm5[2,3],ymm7[4,5],ymm5[6,7]
; AVX2-NEXT: vmovaps (%rdx), %ymm7
; AVX2-NEXT: vunpckhps {{.*#+}} ymm2 = ymm2[2],ymm10[2],ymm2[3],ymm10[3],ymm2[6],ymm10[6],ymm2[7],ymm10[7]
; AVX2-NEXT: vmovaps (%rcx), %ymm10
; AVX2-NEXT: vblendps {{.*#+}} ymm1 = ymm2[0,1],ymm1[2,3],ymm2[4,5],ymm1[6,7]
; AVX2-NEXT: vunpcklps {{.*#+}} ymm2 = ymm15[0],ymm3[0],ymm15[1],ymm3[1],ymm15[4],ymm3[4],ymm15[5],ymm3[5]
; AVX2-NEXT: vpermpd {{.*#+}} ymm2 = ymm2[0,2,2,3]
; AVX2-NEXT: vunpckhps {{.*#+}} ymm4 = ymm11[2],ymm4[2],ymm11[3],ymm4[3],ymm11[6],ymm4[6],ymm11[7],ymm4[7]
; AVX2-NEXT: vunpcklps {{.*#+}} ymm4 = ymm11[0],ymm12[0],ymm11[1],ymm12[1],ymm11[4],ymm12[4],ymm11[5],ymm12[5]
; AVX2-NEXT: vpermpd {{.*#+}} ymm4 = ymm4[2,1,3,3]
; AVX2-NEXT: vblendps {{.*#+}} ymm2 = ymm4[0,1],ymm2[2,3],ymm4[4,5],ymm2[6,7]
; AVX2-NEXT: vunpcklps {{.*#+}} ymm4 = ymm7[0],ymm10[0],ymm7[1],ymm10[1],ymm7[4],ymm10[4],ymm7[5],ymm10[5]
; AVX2-NEXT: vunpckhps {{.*#+}} ymm4 = ymm5[2],ymm7[2],ymm5[3],ymm7[3],ymm5[6],ymm7[6],ymm5[7],ymm7[7]
; AVX2-NEXT: vpermpd {{.*#+}} ymm4 = ymm4[0,2,2,3]
; AVX2-NEXT: vunpcklps {{.*#+}} ymm11 = ymm9[0],ymm6[0],ymm9[1],ymm6[1],ymm9[4],ymm6[4],ymm9[5],ymm6[5]
; AVX2-NEXT: vpermpd {{.*#+}} ymm11 = ymm11[2,1,3,3]
; AVX2-NEXT: vblendps {{.*#+}} ymm4 = ymm11[0,1],ymm4[2,3],ymm11[4,5],ymm4[6,7]
; AVX2-NEXT: vunpckhps {{.*#+}} ymm7 = ymm7[2],ymm10[2],ymm7[3],ymm10[3],ymm7[6],ymm10[6],ymm7[7],ymm10[7]
; AVX2-NEXT: vunpckhps {{.*#+}} ymm6 = ymm9[2],ymm6[2],ymm9[3],ymm6[3],ymm9[6],ymm6[6],ymm9[7],ymm6[7]
; AVX2-NEXT: vpermpd {{.*#+}} ymm7 = ymm7[0,2,2,3]
; AVX2-NEXT: vunpckhps {{.*#+}} ymm6 = ymm13[2],ymm14[2],ymm13[3],ymm14[3],ymm13[6],ymm14[6],ymm13[7],ymm14[7]
; AVX2-NEXT: vpermpd {{.*#+}} ymm6 = ymm6[2,1,3,3]
; AVX2-NEXT: vblendps {{.*#+}} ymm6 = ymm6[0,1],ymm7[2,3],ymm6[4,5],ymm7[6,7]
; AVX2-NEXT: vmovaps %ymm6, 96(%r8)
; AVX2-NEXT: vmovaps %ymm4, 64(%r8)
; AVX2-NEXT: vmovaps %ymm2, 224(%r8)
; AVX2-NEXT: vblendps {{.*#+}} ymm4 = ymm6[0,1],ymm4[2,3],ymm6[4,5],ymm4[6,7]
; AVX2-NEXT: vunpcklps {{.*#+}} ymm5 = ymm5[0],ymm7[0],ymm5[1],ymm7[1],ymm5[4],ymm7[4],ymm5[5],ymm7[5]
; AVX2-NEXT: vunpcklps {{.*#+}} ymm6 = ymm13[0],ymm14[0],ymm13[1],ymm14[1],ymm13[4],ymm14[4],ymm13[5],ymm14[5]
; AVX2-NEXT: vpermpd {{.*#+}} ymm5 = ymm5[0,2,2,3]
; AVX2-NEXT: vpermpd {{.*#+}} ymm6 = ymm6[2,1,3,3]
; AVX2-NEXT: vblendps {{.*#+}} ymm5 = ymm6[0,1],ymm5[2,3],ymm6[4,5],ymm5[6,7]
; AVX2-NEXT: vunpckhps {{.*#+}} ymm3 = ymm15[2],ymm3[2],ymm15[3],ymm3[3],ymm15[6],ymm3[6],ymm15[7],ymm3[7]
; AVX2-NEXT: vunpckhps {{.*#+}} ymm6 = ymm11[2],ymm12[2],ymm11[3],ymm12[3],ymm11[6],ymm12[6],ymm11[7],ymm12[7]
; AVX2-NEXT: vpermpd {{.*#+}} ymm3 = ymm3[0,2,2,3]
; AVX2-NEXT: vpermpd {{.*#+}} ymm6 = ymm6[2,1,3,3]
; AVX2-NEXT: vblendps {{.*#+}} ymm3 = ymm6[0,1],ymm3[2,3],ymm6[4,5],ymm3[6,7]
; AVX2-NEXT: vmovaps %ymm3, 96(%r8)
; AVX2-NEXT: vmovaps %ymm5, 192(%r8)
; AVX2-NEXT: vmovaps %ymm0, (%r8)
; AVX2-NEXT: vmovaps %ymm3, 160(%r8)
; AVX2-NEXT: vmovaps %ymm8, 128(%r8)
; AVX2-NEXT: vmovaps %ymm1, 32(%r8)
; AVX2-NEXT: vmovaps %ymm4, 224(%r8)
; AVX2-NEXT: vmovaps %ymm2, 64(%r8)
; AVX2-NEXT: vmovaps %ymm1, (%r8)
; AVX2-NEXT: vmovaps %ymm9, 32(%r8)
; AVX2-NEXT: vmovaps %ymm0, 128(%r8)
; AVX2-NEXT: vmovaps %ymm10, 160(%r8)
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
@ -1152,49 +1152,49 @@ define void @store_i32_stride4_vf32(<32 x i32>* %in.vecptr0, <32 x i32>* %in.vec
; AVX2-NEXT: vmovaps (%rdx), %xmm14
; AVX2-NEXT: vmovaps 32(%rdx), %xmm12
; AVX2-NEXT: vmovaps 64(%rdx), %xmm3
; AVX2-NEXT: vunpcklps {{.*#+}} xmm8 = xmm3[0],xmm11[0],xmm3[1],xmm11[1]
; AVX2-NEXT: vunpckhps {{.*#+}} xmm8 = xmm3[2],xmm11[2],xmm3[3],xmm11[3]
; AVX2-NEXT: vpermpd {{.*#+}} ymm8 = ymm8[0,0,2,1]
; AVX2-NEXT: vmovaps 32(%rsi), %xmm4
; AVX2-NEXT: vmovaps 64(%rsi), %xmm7
; AVX2-NEXT: vmovaps 32(%rdi), %xmm0
; AVX2-NEXT: vmovaps 64(%rdi), %xmm5
; AVX2-NEXT: vunpcklps {{.*#+}} xmm9 = xmm5[0],xmm7[0],xmm5[1],xmm7[1]
; AVX2-NEXT: vunpckhps {{.*#+}} xmm9 = xmm5[2],xmm7[2],xmm5[3],xmm7[3]
; AVX2-NEXT: vpermpd {{.*#+}} ymm9 = ymm9[0,1,1,3]
; AVX2-NEXT: vblendps {{.*#+}} ymm1 = ymm9[0,1],ymm8[2,3],ymm9[4,5],ymm8[6,7]
; AVX2-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-NEXT: vunpckhps {{.*#+}} xmm3 = xmm3[2],xmm11[2],xmm3[3],xmm11[3]
; AVX2-NEXT: vunpcklps {{.*#+}} xmm3 = xmm3[0],xmm11[0],xmm3[1],xmm11[1]
; AVX2-NEXT: vpermpd {{.*#+}} ymm3 = ymm3[0,0,2,1]
; AVX2-NEXT: vunpckhps {{.*#+}} xmm5 = xmm5[2],xmm7[2],xmm5[3],xmm7[3]
; AVX2-NEXT: vpermpd {{.*#+}} ymm5 = ymm5[0,1,1,3]
; AVX2-NEXT: vblendps {{.*#+}} ymm1 = ymm5[0,1],ymm3[2,3],ymm5[4,5],ymm3[6,7]
; AVX2-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-NEXT: vunpcklps {{.*#+}} xmm3 = xmm12[0],xmm10[0],xmm12[1],xmm10[1]
; AVX2-NEXT: vpermpd {{.*#+}} ymm3 = ymm3[0,0,2,1]
; AVX2-NEXT: vunpcklps {{.*#+}} xmm5 = xmm0[0],xmm4[0],xmm0[1],xmm4[1]
; AVX2-NEXT: vunpcklps {{.*#+}} xmm5 = xmm5[0],xmm7[0],xmm5[1],xmm7[1]
; AVX2-NEXT: vpermpd {{.*#+}} ymm5 = ymm5[0,1,1,3]
; AVX2-NEXT: vblendps {{.*#+}} ymm1 = ymm5[0,1],ymm3[2,3],ymm5[4,5],ymm3[6,7]
; AVX2-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-NEXT: vunpckhps {{.*#+}} xmm3 = xmm12[2],xmm10[2],xmm12[3],xmm10[3]
; AVX2-NEXT: vpermpd {{.*#+}} ymm3 = ymm3[0,0,2,1]
; AVX2-NEXT: vunpckhps {{.*#+}} xmm0 = xmm0[2],xmm4[2],xmm0[3],xmm4[3]
; AVX2-NEXT: vunpckhps {{.*#+}} xmm5 = xmm0[2],xmm4[2],xmm0[3],xmm4[3]
; AVX2-NEXT: vpermpd {{.*#+}} ymm5 = ymm5[0,1,1,3]
; AVX2-NEXT: vblendps {{.*#+}} ymm1 = ymm5[0,1],ymm3[2,3],ymm5[4,5],ymm3[6,7]
; AVX2-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-NEXT: vunpcklps {{.*#+}} xmm3 = xmm12[0],xmm10[0],xmm12[1],xmm10[1]
; AVX2-NEXT: vpermpd {{.*#+}} ymm3 = ymm3[0,0,2,1]
; AVX2-NEXT: vunpcklps {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1]
; AVX2-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,1,1,3]
; AVX2-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm3[2,3],ymm0[4,5],ymm3[6,7]
; AVX2-NEXT: vmovups %ymm0, (%rsp) # 32-byte Spill
; AVX2-NEXT: vmovaps 96(%rcx), %xmm10
; AVX2-NEXT: vmovaps 96(%rdx), %xmm3
; AVX2-NEXT: vunpcklps {{.*#+}} xmm4 = xmm3[0],xmm10[0],xmm3[1],xmm10[1]
; AVX2-NEXT: vunpckhps {{.*#+}} xmm4 = xmm3[2],xmm10[2],xmm3[3],xmm10[3]
; AVX2-NEXT: vpermpd {{.*#+}} ymm8 = ymm4[0,0,2,1]
; AVX2-NEXT: vmovaps 96(%rsi), %xmm4
; AVX2-NEXT: vmovaps 96(%rdi), %xmm0
; AVX2-NEXT: vunpcklps {{.*#+}} xmm12 = xmm0[0],xmm4[0],xmm0[1],xmm4[1]
; AVX2-NEXT: vunpckhps {{.*#+}} xmm12 = xmm0[2],xmm4[2],xmm0[3],xmm4[3]
; AVX2-NEXT: vpermpd {{.*#+}} ymm12 = ymm12[0,1,1,3]
; AVX2-NEXT: vblendps {{.*#+}} ymm1 = ymm12[0,1],ymm8[2,3],ymm12[4,5],ymm8[6,7]
; AVX2-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-NEXT: vmovaps (%rsi), %xmm1
; AVX2-NEXT: vunpckhps {{.*#+}} xmm3 = xmm3[2],xmm10[2],xmm3[3],xmm10[3]
; AVX2-NEXT: vunpcklps {{.*#+}} xmm3 = xmm3[0],xmm10[0],xmm3[1],xmm10[1]
; AVX2-NEXT: vmovaps (%rdi), %xmm10
; AVX2-NEXT: vpermpd {{.*#+}} ymm3 = ymm3[0,0,2,1]
; AVX2-NEXT: vunpckhps {{.*#+}} xmm0 = xmm0[2],xmm4[2],xmm0[3],xmm4[3]
; AVX2-NEXT: vunpcklps {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1]
; AVX2-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,1,1,3]
; AVX2-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm3[2,3],ymm0[4,5],ymm3[6,7]
; AVX2-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
@ -1219,43 +1219,43 @@ define void @store_i32_stride4_vf32(<32 x i32>* %in.vecptr0, <32 x i32>* %in.vec
; AVX2-NEXT: vunpcklps {{.*#+}} ymm4 = ymm3[0],ymm2[0],ymm3[1],ymm2[1],ymm3[4],ymm2[4],ymm3[5],ymm2[5]
; AVX2-NEXT: vpermpd {{.*#+}} ymm4 = ymm4[2,1,3,3]
; AVX2-NEXT: vblendps {{.*#+}} ymm12 = ymm4[0,1],ymm1[2,3],ymm4[4,5],ymm1[6,7]
; AVX2-NEXT: vunpcklps {{.*#+}} ymm1 = ymm6[0],ymm0[0],ymm6[1],ymm0[1],ymm6[4],ymm0[4],ymm6[5],ymm0[5]
; AVX2-NEXT: vunpckhps {{.*#+}} ymm1 = ymm6[2],ymm0[2],ymm6[3],ymm0[3],ymm6[6],ymm0[6],ymm6[7],ymm0[7]
; AVX2-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,2,2,3]
; AVX2-NEXT: vmovaps 64(%rdi), %ymm10
; AVX2-NEXT: vmovaps 64(%rsi), %ymm14
; AVX2-NEXT: vunpcklps {{.*#+}} ymm9 = ymm10[0],ymm14[0],ymm10[1],ymm14[1],ymm10[4],ymm14[4],ymm10[5],ymm14[5]
; AVX2-NEXT: vunpckhps {{.*#+}} ymm9 = ymm10[2],ymm14[2],ymm10[3],ymm14[3],ymm10[6],ymm14[6],ymm10[7],ymm14[7]
; AVX2-NEXT: vpermpd {{.*#+}} ymm9 = ymm9[2,1,3,3]
; AVX2-NEXT: vblendps {{.*#+}} ymm7 = ymm9[0,1],ymm1[2,3],ymm9[4,5],ymm1[6,7]
; AVX2-NEXT: vunpckhps {{.*#+}} ymm0 = ymm6[2],ymm0[2],ymm6[3],ymm0[3],ymm6[6],ymm0[6],ymm6[7],ymm0[7]
; AVX2-NEXT: vunpckhps {{.*#+}} ymm6 = ymm10[2],ymm14[2],ymm10[3],ymm14[3],ymm10[6],ymm14[6],ymm10[7],ymm14[7]
; AVX2-NEXT: vunpcklps {{.*#+}} ymm0 = ymm6[0],ymm0[0],ymm6[1],ymm0[1],ymm6[4],ymm0[4],ymm6[5],ymm0[5]
; AVX2-NEXT: vunpcklps {{.*#+}} ymm6 = ymm10[0],ymm14[0],ymm10[1],ymm14[1],ymm10[4],ymm14[4],ymm10[5],ymm14[5]
; AVX2-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,2,2,3]
; AVX2-NEXT: vpermpd {{.*#+}} ymm6 = ymm6[2,1,3,3]
; AVX2-NEXT: vblendps {{.*#+}} ymm4 = ymm6[0,1],ymm0[2,3],ymm6[4,5],ymm0[6,7]
; AVX2-NEXT: vmovaps 32(%rdx), %ymm6
; AVX2-NEXT: vmovaps 32(%rcx), %ymm9
; AVX2-NEXT: vunpcklps {{.*#+}} ymm10 = ymm6[0],ymm9[0],ymm6[1],ymm9[1],ymm6[4],ymm9[4],ymm6[5],ymm9[5]
; AVX2-NEXT: vunpckhps {{.*#+}} ymm10 = ymm6[2],ymm9[2],ymm6[3],ymm9[3],ymm6[6],ymm9[6],ymm6[7],ymm9[7]
; AVX2-NEXT: vpermpd {{.*#+}} ymm10 = ymm10[0,2,2,3]
; AVX2-NEXT: vmovaps 32(%rdi), %ymm14
; AVX2-NEXT: vmovaps 32(%rsi), %ymm0
; AVX2-NEXT: vunpcklps {{.*#+}} ymm11 = ymm14[0],ymm0[0],ymm14[1],ymm0[1],ymm14[4],ymm0[4],ymm14[5],ymm0[5]
; AVX2-NEXT: vunpckhps {{.*#+}} ymm11 = ymm14[2],ymm0[2],ymm14[3],ymm0[3],ymm14[6],ymm0[6],ymm14[7],ymm0[7]
; AVX2-NEXT: vpermpd {{.*#+}} ymm11 = ymm11[2,1,3,3]
; AVX2-NEXT: vblendps {{.*#+}} ymm10 = ymm11[0,1],ymm10[2,3],ymm11[4,5],ymm10[6,7]
; AVX2-NEXT: vunpckhps {{.*#+}} ymm6 = ymm6[2],ymm9[2],ymm6[3],ymm9[3],ymm6[6],ymm9[6],ymm6[7],ymm9[7]
; AVX2-NEXT: vunpckhps {{.*#+}} ymm0 = ymm14[2],ymm0[2],ymm14[3],ymm0[3],ymm14[6],ymm0[6],ymm14[7],ymm0[7]
; AVX2-NEXT: vunpcklps {{.*#+}} ymm6 = ymm6[0],ymm9[0],ymm6[1],ymm9[1],ymm6[4],ymm9[4],ymm6[5],ymm9[5]
; AVX2-NEXT: vunpcklps {{.*#+}} ymm0 = ymm14[0],ymm0[0],ymm14[1],ymm0[1],ymm14[4],ymm0[4],ymm14[5],ymm0[5]
; AVX2-NEXT: vpermpd {{.*#+}} ymm6 = ymm6[0,2,2,3]
; AVX2-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[2,1,3,3]
; AVX2-NEXT: vblendps {{.*#+}} ymm1 = ymm0[0,1],ymm6[2,3],ymm0[4,5],ymm6[6,7]
; AVX2-NEXT: vmovaps 96(%rdx), %ymm6
; AVX2-NEXT: vmovaps 96(%rcx), %ymm9
; AVX2-NEXT: vunpcklps {{.*#+}} ymm11 = ymm6[0],ymm9[0],ymm6[1],ymm9[1],ymm6[4],ymm9[4],ymm6[5],ymm9[5]
; AVX2-NEXT: vunpckhps {{.*#+}} ymm11 = ymm6[2],ymm9[2],ymm6[3],ymm9[3],ymm6[6],ymm9[6],ymm6[7],ymm9[7]
; AVX2-NEXT: vpermpd {{.*#+}} ymm11 = ymm11[0,2,2,3]
; AVX2-NEXT: vmovaps 96(%rdi), %ymm14
; AVX2-NEXT: vmovaps 96(%rsi), %ymm0
; AVX2-NEXT: vunpcklps {{.*#+}} ymm8 = ymm14[0],ymm0[0],ymm14[1],ymm0[1],ymm14[4],ymm0[4],ymm14[5],ymm0[5]
; AVX2-NEXT: vunpckhps {{.*#+}} ymm8 = ymm14[2],ymm0[2],ymm14[3],ymm0[3],ymm14[6],ymm0[6],ymm14[7],ymm0[7]
; AVX2-NEXT: vpermpd {{.*#+}} ymm8 = ymm8[2,1,3,3]
; AVX2-NEXT: vblendps {{.*#+}} ymm8 = ymm8[0,1],ymm11[2,3],ymm8[4,5],ymm11[6,7]
; AVX2-NEXT: vunpckhps {{.*#+}} ymm6 = ymm6[2],ymm9[2],ymm6[3],ymm9[3],ymm6[6],ymm9[6],ymm6[7],ymm9[7]
; AVX2-NEXT: vunpckhps {{.*#+}} ymm0 = ymm14[2],ymm0[2],ymm14[3],ymm0[3],ymm14[6],ymm0[6],ymm14[7],ymm0[7]
; AVX2-NEXT: vunpcklps {{.*#+}} ymm6 = ymm6[0],ymm9[0],ymm6[1],ymm9[1],ymm6[4],ymm9[4],ymm6[5],ymm9[5]
; AVX2-NEXT: vunpcklps {{.*#+}} ymm0 = ymm14[0],ymm0[0],ymm14[1],ymm0[1],ymm14[4],ymm0[4],ymm14[5],ymm0[5]
; AVX2-NEXT: vpermpd {{.*#+}} ymm6 = ymm6[0,2,2,3]
; AVX2-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[2,1,3,3]
; AVX2-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm6[2,3],ymm0[4,5],ymm6[6,7]
@ -1265,28 +1265,28 @@ define void @store_i32_stride4_vf32(<32 x i32>* %in.vecptr0, <32 x i32>* %in.vec
; AVX2-NEXT: vpermpd {{.*#+}} ymm9 = ymm9[2,1,3,3]
; AVX2-NEXT: vblendps {{.*#+}} ymm6 = ymm9[0,1],ymm6[2,3],ymm9[4,5],ymm6[6,7]
; AVX2-NEXT: vmovaps %ymm6, 96(%r8)
; AVX2-NEXT: vmovaps %ymm0, 480(%r8)
; AVX2-NEXT: vmovaps %ymm8, 448(%r8)
; AVX2-NEXT: vmovaps %ymm1, 224(%r8)
; AVX2-NEXT: vmovaps %ymm10, 192(%r8)
; AVX2-NEXT: vmovaps %ymm4, 352(%r8)
; AVX2-NEXT: vmovaps %ymm7, 320(%r8)
; AVX2-NEXT: vmovaps %ymm0, 448(%r8)
; AVX2-NEXT: vmovaps %ymm8, 480(%r8)
; AVX2-NEXT: vmovaps %ymm1, 192(%r8)
; AVX2-NEXT: vmovaps %ymm10, 224(%r8)
; AVX2-NEXT: vmovaps %ymm4, 320(%r8)
; AVX2-NEXT: vmovaps %ymm7, 352(%r8)
; AVX2-NEXT: vmovaps %ymm12, 64(%r8)
; AVX2-NEXT: vmovaps %ymm15, (%r8)
; AVX2-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-NEXT: vmovaps %ymm0, 32(%r8)
; AVX2-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-NEXT: vmovaps %ymm0, 416(%r8)
; AVX2-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-NEXT: vmovaps %ymm0, 384(%r8)
; AVX2-NEXT: vmovups (%rsp), %ymm0 # 32-byte Reload
; AVX2-NEXT: vmovaps %ymm0, 160(%r8)
; AVX2-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-NEXT: vmovaps %ymm0, 416(%r8)
; AVX2-NEXT: vmovups (%rsp), %ymm0 # 32-byte Reload
; AVX2-NEXT: vmovaps %ymm0, 128(%r8)
; AVX2-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-NEXT: vmovaps %ymm0, 288(%r8)
; AVX2-NEXT: vmovaps %ymm0, 160(%r8)
; AVX2-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-NEXT: vmovaps %ymm0, 256(%r8)
; AVX2-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-NEXT: vmovaps %ymm0, 288(%r8)
; AVX2-NEXT: addq $168, %rsp
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq

File diff suppressed because it is too large Load Diff

View File

@ -106,46 +106,46 @@ define void @store_i64_stride3_vf4(<4 x i64>* %in.vecptr0, <4 x i64>* %in.vecptr
; AVX1-LABEL: store_i64_stride3_vf4:
; AVX1: # %bb.0:
; AVX1-NEXT: vmovapd (%rdi), %ymm0
; AVX1-NEXT: vmovapd (%rsi), %ymm1
; AVX1-NEXT: vmovapd (%rdx), %ymm2
; AVX1-NEXT: vmovapd 16(%rdi), %xmm3
; AVX1-NEXT: vblendpd {{.*#+}} ymm3 = ymm3[0,1],ymm2[2,3]
; AVX1-NEXT: vpermilpd {{.*#+}} ymm4 = ymm1[0,0,3,2]
; AVX1-NEXT: vperm2f128 {{.*#+}} ymm4 = ymm2[2,3],ymm4[2,3]
; AVX1-NEXT: vblendpd {{.*#+}} ymm3 = ymm4[0],ymm3[1],ymm4[2],ymm3[3]
; AVX1-NEXT: vinsertf128 $1, (%rdx), %ymm0, %ymm4
; AVX1-NEXT: vpermilps {{.*#+}} xmm5 = mem[0,1,0,1]
; AVX1-NEXT: vinsertf128 $1, (%rdi), %ymm5, %ymm5
; AVX1-NEXT: vblendps {{.*#+}} ymm4 = ymm4[0,1],ymm5[2,3],ymm4[4,5],ymm5[6,7]
; AVX1-NEXT: vpermilpd {{.*#+}} ymm1 = ymm1[1,0,2,2]
; AVX1-NEXT: vmovapd (%rdx), %ymm1
; AVX1-NEXT: vinsertf128 $1, (%rdx), %ymm0, %ymm2
; AVX1-NEXT: vmovaps (%rdi), %xmm3
; AVX1-NEXT: vunpcklpd {{.*#+}} xmm4 = xmm3[0],mem[0]
; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm4, %ymm3
; AVX1-NEXT: vblendps {{.*#+}} ymm2 = ymm3[0,1,2,3],ymm2[4,5],ymm3[6,7]
; AVX1-NEXT: vmovapd 16(%rdx), %xmm3
; AVX1-NEXT: vperm2f128 {{.*#+}} ymm4 = ymm0[2,3],ymm1[2,3]
; AVX1-NEXT: vblendpd {{.*#+}} ymm3 = ymm3[0],ymm4[1,2,3]
; AVX1-NEXT: vbroadcastsd 24(%rsi), %ymm4
; AVX1-NEXT: vblendpd {{.*#+}} ymm3 = ymm3[0,1],ymm4[2],ymm3[3]
; AVX1-NEXT: vpermilpd {{.*#+}} ymm4 = mem[1,0,2,2]
; AVX1-NEXT: vblendpd {{.*#+}} ymm1 = ymm4[0],ymm1[1],ymm4[2,3]
; AVX1-NEXT: vblendpd {{.*#+}} ymm0 = ymm1[0,1],ymm0[2],ymm1[3]
; AVX1-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0],ymm2[1],ymm0[2,3]
; AVX1-NEXT: vmovaps %ymm4, (%rcx)
; AVX1-NEXT: vmovapd %ymm3, 64(%rcx)
; AVX1-NEXT: vmovapd %ymm0, 32(%rcx)
; AVX1-NEXT: vmovapd %ymm3, 64(%rcx)
; AVX1-NEXT: vmovaps %ymm2, (%rcx)
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: retq
;
; AVX2-LABEL: store_i64_stride3_vf4:
; AVX2: # %bb.0:
; AVX2-NEXT: vmovaps (%rdi), %ymm0
; AVX2-NEXT: vmovaps (%rsi), %ymm1
; AVX2-NEXT: vmovaps (%rdx), %ymm2
; AVX2-NEXT: vpermilps {{.*#+}} ymm3 = ymm1[2,3,0,1,6,7,4,5]
; AVX2-NEXT: vblendps {{.*#+}} ymm3 = ymm3[0,1,2,3],ymm0[4,5],ymm3[6,7]
; AVX2-NEXT: vblendps {{.*#+}} ymm3 = ymm3[0,1],ymm2[2,3],ymm3[4,5,6,7]
; AVX2-NEXT: vunpckhpd {{.*#+}} ymm1 = ymm0[1],ymm1[1],ymm0[3],ymm1[3]
; AVX2-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,2,3,3]
; AVX2-NEXT: vpermpd {{.*#+}} ymm2 = ymm2[2,1,2,3]
; AVX2-NEXT: vblendps {{.*#+}} ymm1 = ymm2[0,1],ymm1[2,3,4,5],ymm2[6,7]
; AVX2-NEXT: vmovddup {{.*#+}} xmm2 = mem[0,0]
; AVX2-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,1,2,1]
; AVX2-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm2[2,3],ymm0[4,5,6,7]
; AVX2-NEXT: vbroadcastsd (%rdx), %ymm2
; AVX2-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm2[4,5],ymm0[6,7]
; AVX2-NEXT: vmovaps %ymm0, (%rcx)
; AVX2-NEXT: vmovaps %ymm1, 64(%rcx)
; AVX2-NEXT: vmovaps %ymm3, 32(%rcx)
; AVX2-NEXT: vmovaps (%rdx), %ymm1
; AVX2-NEXT: vperm2f128 {{.*#+}} ymm2 = ymm0[2,3],ymm1[2,3]
; AVX2-NEXT: vmovaps 16(%rdx), %xmm3
; AVX2-NEXT: vblendps {{.*#+}} ymm2 = ymm3[0,1],ymm2[2,3],ymm3[4,5],ymm2[6,7]
; AVX2-NEXT: vbroadcastsd 24(%rsi), %ymm3
; AVX2-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm3[4,5],ymm2[6,7]
; AVX2-NEXT: vinsertf128 $1, (%rdx), %ymm0, %ymm3
; AVX2-NEXT: vmovddup {{.*#+}} xmm4 = mem[0,0]
; AVX2-NEXT: vpermpd {{.*#+}} ymm5 = ymm0[0,1,2,1]
; AVX2-NEXT: vblendps {{.*#+}} ymm4 = ymm5[0,1],ymm4[2,3],ymm5[4,5,6,7]
; AVX2-NEXT: vblendps {{.*#+}} ymm3 = ymm4[0,1,2,3],ymm3[4,5],ymm4[6,7]
; AVX2-NEXT: vpermilps {{.*#+}} ymm4 = mem[2,3,0,1,6,7,4,5]
; AVX2-NEXT: vblendps {{.*#+}} ymm1 = ymm4[0,1],ymm1[2,3],ymm4[4,5,6,7]
; AVX2-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5],ymm1[6,7]
; AVX2-NEXT: vmovaps %ymm0, 32(%rcx)
; AVX2-NEXT: vmovaps %ymm3, (%rcx)
; AVX2-NEXT: vmovaps %ymm2, 64(%rcx)
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
@ -154,8 +154,8 @@ define void @store_i64_stride3_vf4(<4 x i64>* %in.vecptr0, <4 x i64>* %in.vecptr
; AVX512-NEXT: vmovdqa (%rdi), %ymm0
; AVX512-NEXT: vmovdqa (%rdx), %ymm1
; AVX512-NEXT: vinserti64x4 $1, (%rsi), %zmm0, %zmm0
; AVX512-NEXT: vmovdqa {{.*#+}} ymm2 = [10,3,7,11]
; AVX512-NEXT: vpermi2q %zmm1, %zmm0, %zmm2
; AVX512-NEXT: vmovdqa {{.*#+}} ymm2 = [2,11,15,3]
; AVX512-NEXT: vpermi2q %zmm0, %zmm1, %zmm2
; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm3 = [0,4,8,1,5,9,2,6]
; AVX512-NEXT: vpermi2q %zmm1, %zmm0, %zmm3
; AVX512-NEXT: vmovdqu64 %zmm3, (%rcx)
@ -225,40 +225,40 @@ define void @store_i64_stride3_vf8(<8 x i64>* %in.vecptr0, <8 x i64>* %in.vecptr
; AVX1: # %bb.0:
; AVX1-NEXT: vmovapd 32(%rdi), %ymm0
; AVX1-NEXT: vmovapd (%rdi), %ymm1
; AVX1-NEXT: vmovapd (%rsi), %ymm2
; AVX1-NEXT: vmovapd 32(%rsi), %ymm3
; AVX1-NEXT: vmovapd (%rdx), %ymm4
; AVX1-NEXT: vmovapd 32(%rdx), %ymm5
; AVX1-NEXT: vinsertf128 $1, (%rdx), %ymm1, %ymm6
; AVX1-NEXT: vpermilps {{.*#+}} xmm7 = mem[0,1,0,1]
; AVX1-NEXT: vinsertf128 $1, (%rdi), %ymm7, %ymm7
; AVX1-NEXT: vblendps {{.*#+}} ymm6 = ymm6[0,1],ymm7[2,3],ymm6[4,5],ymm7[6,7]
; AVX1-NEXT: vmovapd 48(%rdi), %xmm7
; AVX1-NEXT: vblendpd {{.*#+}} ymm7 = ymm7[0,1],ymm5[2,3]
; AVX1-NEXT: vpermilpd {{.*#+}} ymm8 = ymm3[0,0,3,2]
; AVX1-NEXT: vperm2f128 {{.*#+}} ymm8 = ymm5[2,3],ymm8[2,3]
; AVX1-NEXT: vblendpd {{.*#+}} ymm7 = ymm8[0],ymm7[1],ymm8[2],ymm7[3]
; AVX1-NEXT: vinsertf128 $1, 32(%rdx), %ymm0, %ymm8
; AVX1-NEXT: vpermilps {{.*#+}} xmm9 = mem[0,1,0,1]
; AVX1-NEXT: vinsertf128 $1, 32(%rdi), %ymm9, %ymm9
; AVX1-NEXT: vblendps {{.*#+}} ymm8 = ymm8[0,1],ymm9[2,3],ymm8[4,5],ymm9[6,7]
; AVX1-NEXT: vmovapd 16(%rdi), %xmm9
; AVX1-NEXT: vblendpd {{.*#+}} ymm9 = ymm9[0,1],ymm4[2,3]
; AVX1-NEXT: vpermilpd {{.*#+}} ymm10 = ymm2[0,0,3,2]
; AVX1-NEXT: vperm2f128 {{.*#+}} ymm10 = ymm4[2,3],ymm10[2,3]
; AVX1-NEXT: vblendpd {{.*#+}} ymm9 = ymm10[0],ymm9[1],ymm10[2],ymm9[3]
; AVX1-NEXT: vpermilpd {{.*#+}} ymm3 = ymm3[1,0,2,2]
; AVX1-NEXT: vblendpd {{.*#+}} ymm0 = ymm3[0,1],ymm0[2],ymm3[3]
; AVX1-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0],ymm5[1],ymm0[2,3]
; AVX1-NEXT: vpermilpd {{.*#+}} ymm2 = ymm2[1,0,2,2]
; AVX1-NEXT: vmovapd 32(%rdx), %ymm2
; AVX1-NEXT: vmovapd (%rdx), %ymm3
; AVX1-NEXT: vinsertf128 $1, 32(%rdx), %ymm0, %ymm4
; AVX1-NEXT: vmovaps (%rdi), %xmm5
; AVX1-NEXT: vmovaps 32(%rdi), %xmm6
; AVX1-NEXT: vunpcklpd {{.*#+}} xmm7 = xmm6[0],mem[0]
; AVX1-NEXT: vinsertf128 $1, %xmm6, %ymm7, %ymm6
; AVX1-NEXT: vblendps {{.*#+}} ymm4 = ymm6[0,1,2,3],ymm4[4,5],ymm6[6,7]
; AVX1-NEXT: vinsertf128 $1, (%rdx), %ymm0, %ymm6
; AVX1-NEXT: vunpcklpd {{.*#+}} xmm7 = xmm5[0],mem[0]
; AVX1-NEXT: vinsertf128 $1, %xmm5, %ymm7, %ymm5
; AVX1-NEXT: vblendps {{.*#+}} ymm5 = ymm5[0,1,2,3],ymm6[4,5],ymm5[6,7]
; AVX1-NEXT: vmovapd 16(%rdx), %xmm6
; AVX1-NEXT: vperm2f128 {{.*#+}} ymm7 = ymm1[2,3],ymm3[2,3]
; AVX1-NEXT: vblendpd {{.*#+}} ymm6 = ymm6[0],ymm7[1,2,3]
; AVX1-NEXT: vbroadcastsd 24(%rsi), %ymm7
; AVX1-NEXT: vblendpd {{.*#+}} ymm6 = ymm6[0,1],ymm7[2],ymm6[3]
; AVX1-NEXT: vmovapd 48(%rdx), %xmm7
; AVX1-NEXT: vperm2f128 {{.*#+}} ymm8 = ymm0[2,3],ymm2[2,3]
; AVX1-NEXT: vblendpd {{.*#+}} ymm7 = ymm7[0],ymm8[1,2,3]
; AVX1-NEXT: vbroadcastsd 56(%rsi), %ymm8
; AVX1-NEXT: vblendpd {{.*#+}} ymm7 = ymm7[0,1],ymm8[2],ymm7[3]
; AVX1-NEXT: vpermilpd {{.*#+}} ymm8 = mem[1,0,2,2]
; AVX1-NEXT: vblendpd {{.*#+}} ymm2 = ymm8[0],ymm2[1],ymm8[2,3]
; AVX1-NEXT: vblendpd {{.*#+}} ymm0 = ymm2[0,1],ymm0[2],ymm2[3]
; AVX1-NEXT: vpermilpd {{.*#+}} ymm2 = mem[1,0,2,2]
; AVX1-NEXT: vblendpd {{.*#+}} ymm2 = ymm2[0],ymm3[1],ymm2[2,3]
; AVX1-NEXT: vblendpd {{.*#+}} ymm1 = ymm2[0,1],ymm1[2],ymm2[3]
; AVX1-NEXT: vblendpd {{.*#+}} ymm1 = ymm1[0],ymm4[1],ymm1[2,3]
; AVX1-NEXT: vmovapd %ymm1, 32(%rcx)
; AVX1-NEXT: vmovapd %ymm9, 64(%rcx)
; AVX1-NEXT: vmovaps %ymm8, 96(%rcx)
; AVX1-NEXT: vmovapd %ymm7, 160(%rcx)
; AVX1-NEXT: vmovapd %ymm0, 128(%rcx)
; AVX1-NEXT: vmovaps %ymm6, (%rcx)
; AVX1-NEXT: vmovapd %ymm7, 160(%rcx)
; AVX1-NEXT: vmovapd %ymm6, 64(%rcx)
; AVX1-NEXT: vmovaps %ymm5, (%rcx)
; AVX1-NEXT: vmovaps %ymm4, 96(%rcx)
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: retq
;
@ -266,40 +266,40 @@ define void @store_i64_stride3_vf8(<8 x i64>* %in.vecptr0, <8 x i64>* %in.vecptr
; AVX2: # %bb.0:
; AVX2-NEXT: vmovaps (%rdi), %ymm0
; AVX2-NEXT: vmovaps 32(%rdi), %ymm1
; AVX2-NEXT: vmovaps (%rsi), %ymm2
; AVX2-NEXT: vmovaps 32(%rsi), %ymm3
; AVX2-NEXT: vmovaps (%rdx), %ymm4
; AVX2-NEXT: vmovaps 32(%rdx), %ymm5
; AVX2-NEXT: vmovaps 32(%rdx), %ymm2
; AVX2-NEXT: vmovaps (%rdx), %ymm3
; AVX2-NEXT: vperm2f128 {{.*#+}} ymm4 = ymm0[2,3],ymm3[2,3]
; AVX2-NEXT: vmovaps 16(%rdx), %xmm5
; AVX2-NEXT: vblendps {{.*#+}} ymm4 = ymm5[0,1],ymm4[2,3],ymm5[4,5],ymm4[6,7]
; AVX2-NEXT: vbroadcastsd 24(%rsi), %ymm5
; AVX2-NEXT: vblendps {{.*#+}} ymm4 = ymm4[0,1,2,3],ymm5[4,5],ymm4[6,7]
; AVX2-NEXT: vinsertf128 $1, 32(%rdx), %ymm0, %ymm5
; AVX2-NEXT: vmovddup {{.*#+}} xmm6 = mem[0,0]
; AVX2-NEXT: vpermpd {{.*#+}} ymm7 = ymm0[0,1,2,1]
; AVX2-NEXT: vpermpd {{.*#+}} ymm7 = ymm1[0,1,2,1]
; AVX2-NEXT: vblendps {{.*#+}} ymm6 = ymm7[0,1],ymm6[2,3],ymm7[4,5,6,7]
; AVX2-NEXT: vbroadcastsd (%rdx), %ymm7
; AVX2-NEXT: vblendps {{.*#+}} ymm5 = ymm6[0,1,2,3],ymm5[4,5],ymm6[6,7]
; AVX2-NEXT: vperm2f128 {{.*#+}} ymm6 = ymm1[2,3],ymm2[2,3]
; AVX2-NEXT: vmovaps 48(%rdx), %xmm7
; AVX2-NEXT: vblendps {{.*#+}} ymm6 = ymm7[0,1],ymm6[2,3],ymm7[4,5],ymm6[6,7]
; AVX2-NEXT: vbroadcastsd 56(%rsi), %ymm7
; AVX2-NEXT: vblendps {{.*#+}} ymm6 = ymm6[0,1,2,3],ymm7[4,5],ymm6[6,7]
; AVX2-NEXT: vpermilps {{.*#+}} ymm7 = ymm3[2,3,0,1,6,7,4,5]
; AVX2-NEXT: vblendps {{.*#+}} ymm7 = ymm7[0,1,2,3],ymm1[4,5],ymm7[6,7]
; AVX2-NEXT: vblendps {{.*#+}} ymm7 = ymm7[0,1],ymm5[2,3],ymm7[4,5,6,7]
; AVX2-NEXT: vunpckhpd {{.*#+}} ymm3 = ymm1[1],ymm3[1],ymm1[3],ymm3[3]
; AVX2-NEXT: vpermpd {{.*#+}} ymm3 = ymm3[0,2,3,3]
; AVX2-NEXT: vpermpd {{.*#+}} ymm5 = ymm5[2,1,2,3]
; AVX2-NEXT: vblendps {{.*#+}} ymm3 = ymm5[0,1],ymm3[2,3,4,5],ymm5[6,7]
; AVX2-NEXT: vmovddup {{.*#+}} xmm5 = mem[0,0]
; AVX2-NEXT: vpermpd {{.*#+}} ymm1 = ymm1[0,1,2,1]
; AVX2-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1],ymm5[2,3],ymm1[4,5,6,7]
; AVX2-NEXT: vbroadcastsd 32(%rdx), %ymm5
; AVX2-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm5[4,5],ymm1[6,7]
; AVX2-NEXT: vunpckhpd {{.*#+}} ymm5 = ymm0[1],ymm2[1],ymm0[3],ymm2[3]
; AVX2-NEXT: vpermpd {{.*#+}} ymm5 = ymm5[0,2,3,3]
; AVX2-NEXT: vpermpd {{.*#+}} ymm8 = ymm4[2,1,2,3]
; AVX2-NEXT: vblendps {{.*#+}} ymm5 = ymm8[0,1],ymm5[2,3,4,5],ymm8[6,7]
; AVX2-NEXT: vpermilps {{.*#+}} ymm2 = ymm2[2,3,0,1,6,7,4,5]
; AVX2-NEXT: vinsertf128 $1, (%rdx), %ymm0, %ymm7
; AVX2-NEXT: vmovddup {{.*#+}} xmm8 = mem[0,0]
; AVX2-NEXT: vpermpd {{.*#+}} ymm9 = ymm0[0,1,2,1]
; AVX2-NEXT: vblendps {{.*#+}} ymm8 = ymm9[0,1],ymm8[2,3],ymm9[4,5,6,7]
; AVX2-NEXT: vblendps {{.*#+}} ymm7 = ymm8[0,1,2,3],ymm7[4,5],ymm8[6,7]
; AVX2-NEXT: vpermilps {{.*#+}} ymm8 = mem[2,3,0,1,6,7,4,5]
; AVX2-NEXT: vblendps {{.*#+}} ymm2 = ymm8[0,1],ymm2[2,3],ymm8[4,5,6,7]
; AVX2-NEXT: vblendps {{.*#+}} ymm1 = ymm2[0,1,2,3],ymm1[4,5],ymm2[6,7]
; AVX2-NEXT: vpermilps {{.*#+}} ymm2 = mem[2,3,0,1,6,7,4,5]
; AVX2-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1],ymm3[2,3],ymm2[4,5,6,7]
; AVX2-NEXT: vblendps {{.*#+}} ymm0 = ymm2[0,1,2,3],ymm0[4,5],ymm2[6,7]
; AVX2-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm4[2,3],ymm0[4,5,6,7]
; AVX2-NEXT: vmovaps %ymm0, 32(%rcx)
; AVX2-NEXT: vmovaps %ymm5, 64(%rcx)
; AVX2-NEXT: vmovaps %ymm1, 96(%rcx)
; AVX2-NEXT: vmovaps %ymm3, 160(%rcx)
; AVX2-NEXT: vmovaps %ymm7, 128(%rcx)
; AVX2-NEXT: vmovaps %ymm6, (%rcx)
; AVX2-NEXT: vmovaps %ymm1, 128(%rcx)
; AVX2-NEXT: vmovaps %ymm7, (%rcx)
; AVX2-NEXT: vmovaps %ymm6, 160(%rcx)
; AVX2-NEXT: vmovaps %ymm5, 96(%rcx)
; AVX2-NEXT: vmovaps %ymm4, 64(%rcx)
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
@ -312,15 +312,15 @@ define void @store_i64_stride3_vf8(<8 x i64>* %in.vecptr0, <8 x i64>* %in.vecptr
; AVX512-NEXT: vpermi2q %zmm1, %zmm0, %zmm3
; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm4 = [0,1,8,3,4,9,6,7]
; AVX512-NEXT: vpermi2q %zmm2, %zmm3, %zmm4
; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm3 = <u,3,11,u,4,12,u,5>
; AVX512-NEXT: vpermi2q %zmm1, %zmm0, %zmm3
; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm5 = [10,1,2,11,4,5,12,7]
; AVX512-NEXT: vpermi2q %zmm2, %zmm3, %zmm5
; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm3 = <5,u,14,6,u,15,7,u>
; AVX512-NEXT: vpermi2q %zmm0, %zmm1, %zmm3
; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm0 = [0,13,2,3,14,5,6,15]
; AVX512-NEXT: vpermi2q %zmm2, %zmm3, %zmm0
; AVX512-NEXT: vmovdqu64 %zmm0, 128(%rcx)
; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm3 = <2,11,u,3,12,u,4,13>
; AVX512-NEXT: vpermi2q %zmm0, %zmm2, %zmm3
; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm5 = [0,1,11,3,4,12,6,7]
; AVX512-NEXT: vpermi2q %zmm1, %zmm3, %zmm5
; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm3 = <5,13,u,6,14,u,7,15>
; AVX512-NEXT: vpermi2q %zmm2, %zmm1, %zmm3
; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm1 = [0,1,14,3,4,15,6,7]
; AVX512-NEXT: vpermi2q %zmm0, %zmm3, %zmm1
; AVX512-NEXT: vmovdqu64 %zmm1, 128(%rcx)
; AVX512-NEXT: vmovdqu64 %zmm5, 64(%rcx)
; AVX512-NEXT: vmovdqu64 %zmm4, (%rcx)
; AVX512-NEXT: vzeroupper
@ -446,168 +446,159 @@ define void @store_i64_stride3_vf16(<16 x i64>* %in.vecptr0, <16 x i64>* %in.vec
;
; AVX1-LABEL: store_i64_stride3_vf16:
; AVX1: # %bb.0:
; AVX1-NEXT: subq $40, %rsp
; AVX1-NEXT: vmovapd 32(%rdi), %ymm2
; AVX1-NEXT: vmovapd 64(%rdi), %ymm4
; AVX1-NEXT: vmovaps (%rdi), %ymm0
; AVX1-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX1-NEXT: vmovapd 32(%rsi), %ymm6
; AVX1-NEXT: vmovapd 64(%rsi), %ymm9
; AVX1-NEXT: vmovapd 96(%rsi), %ymm5
; AVX1-NEXT: vmovapd (%rdi), %ymm13
; AVX1-NEXT: vmovapd 96(%rdi), %ymm14
; AVX1-NEXT: vmovapd 32(%rdi), %ymm4
; AVX1-NEXT: vmovapd 64(%rdi), %ymm7
; AVX1-NEXT: vmovapd (%rdx), %ymm3
; AVX1-NEXT: vmovapd 96(%rdx), %ymm5
; AVX1-NEXT: vmovapd 32(%rdx), %ymm8
; AVX1-NEXT: vmovapd 64(%rdx), %ymm11
; AVX1-NEXT: vmovapd 96(%rdx), %ymm7
; AVX1-NEXT: vmovapd 64(%rdx), %ymm10
; AVX1-NEXT: vinsertf128 $1, (%rdx), %ymm0, %ymm1
; AVX1-NEXT: vpermilps {{.*#+}} xmm3 = mem[0,1,0,1]
; AVX1-NEXT: vinsertf128 $1, (%rdi), %ymm3, %ymm3
; AVX1-NEXT: vblendps {{.*#+}} ymm0 = ymm1[0,1],ymm3[2,3],ymm1[4,5],ymm3[6,7]
; AVX1-NEXT: vmovups %ymm0, (%rsp) # 32-byte Spill
; AVX1-NEXT: vmovapd 80(%rdi), %xmm3
; AVX1-NEXT: vblendpd {{.*#+}} ymm3 = ymm3[0,1],ymm11[2,3]
; AVX1-NEXT: vpermilpd {{.*#+}} ymm10 = ymm9[0,0,3,2]
; AVX1-NEXT: vperm2f128 {{.*#+}} ymm10 = ymm11[2,3],ymm10[2,3]
; AVX1-NEXT: vblendpd {{.*#+}} ymm0 = ymm10[0],ymm3[1],ymm10[2],ymm3[3]
; AVX1-NEXT: vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX1-NEXT: vinsertf128 $1, 64(%rdx), %ymm4, %ymm10
; AVX1-NEXT: vpermilps {{.*#+}} xmm12 = mem[0,1,0,1]
; AVX1-NEXT: vinsertf128 $1, 64(%rdi), %ymm12, %ymm12
; AVX1-NEXT: vblendps {{.*#+}} ymm0 = ymm10[0,1],ymm12[2,3],ymm10[4,5],ymm12[6,7]
; AVX1-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX1-NEXT: vmovapd 48(%rdi), %xmm12
; AVX1-NEXT: vblendpd {{.*#+}} ymm12 = ymm12[0,1],ymm8[2,3]
; AVX1-NEXT: vpermilpd {{.*#+}} ymm13 = ymm6[0,0,3,2]
; AVX1-NEXT: vperm2f128 {{.*#+}} ymm13 = ymm8[2,3],ymm13[2,3]
; AVX1-NEXT: vblendpd {{.*#+}} ymm0 = ymm13[0],ymm12[1],ymm13[2],ymm12[3]
; AVX1-NEXT: vmovupd %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX1-NEXT: vinsertf128 $1, 32(%rdx), %ymm2, %ymm13
; AVX1-NEXT: vmovapd %ymm2, %ymm12
; AVX1-NEXT: vpermilps {{.*#+}} xmm14 = mem[0,1,0,1]
; AVX1-NEXT: vinsertf128 $1, 32(%rdi), %ymm14, %ymm14
; AVX1-NEXT: vblendps {{.*#+}} ymm13 = ymm13[0,1],ymm14[2,3],ymm13[4,5],ymm14[6,7]
; AVX1-NEXT: vmovapd 112(%rdi), %xmm14
; AVX1-NEXT: vblendpd {{.*#+}} ymm14 = ymm14[0,1],ymm7[2,3]
; AVX1-NEXT: vpermilpd {{.*#+}} ymm15 = ymm5[0,0,3,2]
; AVX1-NEXT: vperm2f128 {{.*#+}} ymm15 = ymm7[2,3],ymm15[2,3]
; AVX1-NEXT: vblendpd {{.*#+}} ymm14 = ymm15[0],ymm14[1],ymm15[2],ymm14[3]
; AVX1-NEXT: vpermilps {{.*#+}} xmm15 = mem[0,1,0,1]
; AVX1-NEXT: vinsertf128 $1, 96(%rdi), %ymm15, %ymm15
; AVX1-NEXT: vmovapd 96(%rdi), %ymm2
; AVX1-NEXT: vinsertf128 $1, 96(%rdx), %ymm2, %ymm1
; AVX1-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1],ymm15[2,3],ymm1[4,5],ymm15[6,7]
; AVX1-NEXT: vmovapd (%rdx), %ymm15
; AVX1-NEXT: vmovapd 16(%rdi), %xmm3
; AVX1-NEXT: vblendpd {{.*#+}} ymm3 = ymm3[0,1],ymm15[2,3]
; AVX1-NEXT: vmovapd (%rsi), %ymm0
; AVX1-NEXT: vpermilpd {{.*#+}} ymm10 = ymm0[0,0,3,2]
; AVX1-NEXT: vperm2f128 {{.*#+}} ymm10 = ymm15[2,3],ymm10[2,3]
; AVX1-NEXT: vblendpd {{.*#+}} ymm3 = ymm10[0],ymm3[1],ymm10[2],ymm3[3]
; AVX1-NEXT: vpermilpd {{.*#+}} ymm9 = ymm9[1,0,2,2]
; AVX1-NEXT: vblendpd {{.*#+}} ymm4 = ymm9[0,1],ymm4[2],ymm9[3]
; AVX1-NEXT: vblendpd {{.*#+}} ymm4 = ymm4[0],ymm11[1],ymm4[2,3]
; AVX1-NEXT: vpermilpd {{.*#+}} ymm6 = ymm6[1,0,2,2]
; AVX1-NEXT: vblendpd {{.*#+}} ymm6 = ymm6[0,1],ymm12[2],ymm6[3]
; AVX1-NEXT: vblendpd {{.*#+}} ymm6 = ymm6[0],ymm8[1],ymm6[2,3]
; AVX1-NEXT: vpermilpd {{.*#+}} ymm5 = ymm5[1,0,2,2]
; AVX1-NEXT: vblendpd {{.*#+}} ymm2 = ymm5[0,1],ymm2[2],ymm5[3]
; AVX1-NEXT: vblendpd {{.*#+}} ymm2 = ymm2[0],ymm7[1],ymm2[2,3]
; AVX1-NEXT: vpermilpd {{.*#+}} ymm0 = ymm0[1,0,2,2]
; AVX1-NEXT: vblendpd $4, {{[-0-9]+}}(%r{{[sb]}}p), %ymm0, %ymm0 # 32-byte Folded Reload
; AVX1-NEXT: # ymm0 = ymm0[0,1],mem[2],ymm0[3]
; AVX1-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0],ymm15[1],ymm0[2,3]
; AVX1-NEXT: vmovapd %ymm0, 32(%rcx)
; AVX1-NEXT: vmovapd %ymm3, 64(%rcx)
; AVX1-NEXT: vmovaps %ymm1, 288(%rcx)
; AVX1-NEXT: vmovapd %ymm14, 352(%rcx)
; AVX1-NEXT: vmovapd %ymm2, 320(%rcx)
; AVX1-NEXT: vmovaps %ymm13, 96(%rcx)
; AVX1-NEXT: vmovaps (%rdi), %xmm6
; AVX1-NEXT: vmovaps 32(%rdi), %xmm0
; AVX1-NEXT: vmovaps 64(%rdi), %xmm2
; AVX1-NEXT: vunpcklpd {{.*#+}} xmm9 = xmm6[0],mem[0]
; AVX1-NEXT: vinsertf128 $1, %xmm6, %ymm9, %ymm6
; AVX1-NEXT: vblendps {{.*#+}} ymm1 = ymm6[0,1,2,3],ymm1[4,5],ymm6[6,7]
; AVX1-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX1-NEXT: vinsertf128 $1, 64(%rdx), %ymm0, %ymm6
; AVX1-NEXT: vunpcklpd {{.*#+}} xmm9 = xmm2[0],mem[0]
; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm9, %ymm2
; AVX1-NEXT: vblendps {{.*#+}} ymm6 = ymm2[0,1,2,3],ymm6[4,5],ymm2[6,7]
; AVX1-NEXT: vinsertf128 $1, 32(%rdx), %ymm0, %ymm2
; AVX1-NEXT: vunpcklpd {{.*#+}} xmm9 = xmm0[0],mem[0]
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm9, %ymm0
; AVX1-NEXT: vblendps {{.*#+}} ymm9 = ymm0[0,1,2,3],ymm2[4,5],ymm0[6,7]
; AVX1-NEXT: vinsertf128 $1, 96(%rdx), %ymm0, %ymm0
; AVX1-NEXT: vmovaps 96(%rdi), %xmm2
; AVX1-NEXT: vunpcklpd {{.*#+}} xmm11 = xmm2[0],mem[0]
; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm11, %ymm2
; AVX1-NEXT: vblendps {{.*#+}} ymm11 = ymm2[0,1,2,3],ymm0[4,5],ymm2[6,7]
; AVX1-NEXT: vmovapd 80(%rdx), %xmm0
; AVX1-NEXT: vperm2f128 {{.*#+}} ymm2 = ymm7[2,3],ymm10[2,3]
; AVX1-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0],ymm2[1,2,3]
; AVX1-NEXT: vbroadcastsd 88(%rsi), %ymm2
; AVX1-NEXT: vblendpd {{.*#+}} ymm12 = ymm0[0,1],ymm2[2],ymm0[3]
; AVX1-NEXT: vmovapd 48(%rdx), %xmm0
; AVX1-NEXT: vperm2f128 {{.*#+}} ymm2 = ymm4[2,3],ymm8[2,3]
; AVX1-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0],ymm2[1,2,3]
; AVX1-NEXT: vbroadcastsd 56(%rsi), %ymm2
; AVX1-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0,1],ymm2[2],ymm0[3]
; AVX1-NEXT: vmovapd 112(%rdx), %xmm2
; AVX1-NEXT: vperm2f128 {{.*#+}} ymm15 = ymm14[2,3],ymm5[2,3]
; AVX1-NEXT: vblendpd {{.*#+}} ymm2 = ymm2[0],ymm15[1,2,3]
; AVX1-NEXT: vbroadcastsd 120(%rsi), %ymm15
; AVX1-NEXT: vblendpd {{.*#+}} ymm2 = ymm2[0,1],ymm15[2],ymm2[3]
; AVX1-NEXT: vmovapd 16(%rdx), %xmm15
; AVX1-NEXT: vperm2f128 {{.*#+}} ymm1 = ymm13[2,3],ymm3[2,3]
; AVX1-NEXT: vblendpd {{.*#+}} ymm1 = ymm15[0],ymm1[1,2,3]
; AVX1-NEXT: vbroadcastsd 24(%rsi), %ymm15
; AVX1-NEXT: vblendpd {{.*#+}} ymm1 = ymm1[0,1],ymm15[2],ymm1[3]
; AVX1-NEXT: vpermilpd {{.*#+}} ymm15 = mem[1,0,2,2]
; AVX1-NEXT: vblendpd {{.*#+}} ymm10 = ymm15[0],ymm10[1],ymm15[2,3]
; AVX1-NEXT: vblendpd {{.*#+}} ymm7 = ymm10[0,1],ymm7[2],ymm10[3]
; AVX1-NEXT: vpermilpd {{.*#+}} ymm10 = mem[1,0,2,2]
; AVX1-NEXT: vblendpd {{.*#+}} ymm8 = ymm10[0],ymm8[1],ymm10[2,3]
; AVX1-NEXT: vblendpd {{.*#+}} ymm4 = ymm8[0,1],ymm4[2],ymm8[3]
; AVX1-NEXT: vpermilpd {{.*#+}} ymm8 = mem[1,0,2,2]
; AVX1-NEXT: vblendpd {{.*#+}} ymm5 = ymm8[0],ymm5[1],ymm8[2,3]
; AVX1-NEXT: vblendpd {{.*#+}} ymm5 = ymm5[0,1],ymm14[2],ymm5[3]
; AVX1-NEXT: vpermilpd {{.*#+}} ymm8 = mem[1,0,2,2]
; AVX1-NEXT: vblendpd {{.*#+}} ymm3 = ymm8[0],ymm3[1],ymm8[2,3]
; AVX1-NEXT: vblendpd {{.*#+}} ymm3 = ymm3[0,1],ymm13[2],ymm3[3]
; AVX1-NEXT: vmovapd %ymm3, 32(%rcx)
; AVX1-NEXT: vmovapd %ymm5, 320(%rcx)
; AVX1-NEXT: vmovapd %ymm4, 128(%rcx)
; AVX1-NEXT: vmovapd %ymm7, 224(%rcx)
; AVX1-NEXT: vmovapd %ymm1, 64(%rcx)
; AVX1-NEXT: vmovapd %ymm2, 352(%rcx)
; AVX1-NEXT: vmovapd %ymm0, 160(%rcx)
; AVX1-NEXT: vmovapd %ymm12, 256(%rcx)
; AVX1-NEXT: vmovaps %ymm11, 288(%rcx)
; AVX1-NEXT: vmovaps %ymm9, 96(%rcx)
; AVX1-NEXT: vmovaps %ymm6, 192(%rcx)
; AVX1-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX1-NEXT: vmovaps %ymm0, 160(%rcx)
; AVX1-NEXT: vmovapd %ymm6, 128(%rcx)
; AVX1-NEXT: vmovapd %ymm4, 224(%rcx)
; AVX1-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX1-NEXT: vmovaps %ymm0, 192(%rcx)
; AVX1-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX1-NEXT: vmovaps %ymm0, 256(%rcx)
; AVX1-NEXT: vmovups (%rsp), %ymm0 # 32-byte Reload
; AVX1-NEXT: vmovaps %ymm0, (%rcx)
; AVX1-NEXT: addq $40, %rsp
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: retq
;
; AVX2-LABEL: store_i64_stride3_vf16:
; AVX2: # %bb.0:
; AVX2-NEXT: vmovaps (%rdi), %ymm0
; AVX2-NEXT: vmovaps 32(%rdi), %ymm9
; AVX2-NEXT: vmovaps 32(%rdi), %ymm4
; AVX2-NEXT: vmovaps 64(%rdi), %ymm7
; AVX2-NEXT: vmovaps 96(%rdi), %ymm5
; AVX2-NEXT: vmovaps (%rsi), %ymm2
; AVX2-NEXT: vmovaps 32(%rsi), %ymm12
; AVX2-NEXT: vmovaps 64(%rsi), %ymm11
; AVX2-NEXT: vmovaps 96(%rsi), %ymm8
; AVX2-NEXT: vmovaps (%rdx), %ymm3
; AVX2-NEXT: vmovaps 32(%rdx), %ymm13
; AVX2-NEXT: vmovaps 64(%rdx), %ymm14
; AVX2-NEXT: vmovaps 96(%rdx), %ymm10
; AVX2-NEXT: vmovddup {{.*#+}} xmm1 = mem[0,0]
; AVX2-NEXT: vpermpd {{.*#+}} ymm4 = ymm0[0,1,2,1]
; AVX2-NEXT: vblendps {{.*#+}} ymm1 = ymm4[0,1],ymm1[2,3],ymm4[4,5,6,7]
; AVX2-NEXT: vbroadcastsd (%rdx), %ymm4
; AVX2-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm4[4,5],ymm1[6,7]
; AVX2-NEXT: vunpckhpd {{.*#+}} ymm4 = ymm7[1],ymm11[1],ymm7[3],ymm11[3]
; AVX2-NEXT: vpermpd {{.*#+}} ymm4 = ymm4[0,2,3,3]
; AVX2-NEXT: vpermpd {{.*#+}} ymm6 = ymm14[2,1,2,3]
; AVX2-NEXT: vblendps {{.*#+}} ymm4 = ymm6[0,1],ymm4[2,3,4,5],ymm6[6,7]
; AVX2-NEXT: vmovddup {{.*#+}} xmm6 = mem[0,0]
; AVX2-NEXT: vpermpd {{.*#+}} ymm15 = ymm7[0,1,2,1]
; AVX2-NEXT: vblendps {{.*#+}} ymm6 = ymm15[0,1],ymm6[2,3],ymm15[4,5,6,7]
; AVX2-NEXT: vbroadcastsd 64(%rdx), %ymm15
; AVX2-NEXT: vblendps {{.*#+}} ymm6 = ymm6[0,1,2,3],ymm15[4,5],ymm6[6,7]
; AVX2-NEXT: vpermilps {{.*#+}} ymm11 = ymm11[2,3,0,1,6,7,4,5]
; AVX2-NEXT: vblendps {{.*#+}} ymm7 = ymm11[0,1,2,3],ymm7[4,5],ymm11[6,7]
; AVX2-NEXT: vblendps {{.*#+}} ymm7 = ymm7[0,1],ymm14[2,3],ymm7[4,5,6,7]
; AVX2-NEXT: vpermilps {{.*#+}} ymm11 = ymm12[2,3,0,1,6,7,4,5]
; AVX2-NEXT: vblendps {{.*#+}} ymm11 = ymm11[0,1,2,3],ymm9[4,5],ymm11[6,7]
; AVX2-NEXT: vblendps {{.*#+}} ymm11 = ymm11[0,1],ymm13[2,3],ymm11[4,5,6,7]
; AVX2-NEXT: vunpckhpd {{.*#+}} ymm12 = ymm9[1],ymm12[1],ymm9[3],ymm12[3]
; AVX2-NEXT: vpermpd {{.*#+}} ymm12 = ymm12[0,2,3,3]
; AVX2-NEXT: vpermpd {{.*#+}} ymm13 = ymm13[2,1,2,3]
; AVX2-NEXT: vblendps {{.*#+}} ymm12 = ymm13[0,1],ymm12[2,3,4,5],ymm13[6,7]
; AVX2-NEXT: vmovddup {{.*#+}} xmm13 = mem[0,0]
; AVX2-NEXT: vpermpd {{.*#+}} ymm9 = ymm9[0,1,2,1]
; AVX2-NEXT: vblendps {{.*#+}} ymm9 = ymm9[0,1],ymm13[2,3],ymm9[4,5,6,7]
; AVX2-NEXT: vbroadcastsd 32(%rdx), %ymm13
; AVX2-NEXT: vblendps {{.*#+}} ymm9 = ymm9[0,1,2,3],ymm13[4,5],ymm9[6,7]
; AVX2-NEXT: vpermilps {{.*#+}} ymm13 = ymm8[2,3,0,1,6,7,4,5]
; AVX2-NEXT: vblendps {{.*#+}} ymm13 = ymm13[0,1,2,3],ymm5[4,5],ymm13[6,7]
; AVX2-NEXT: vblendps {{.*#+}} ymm13 = ymm13[0,1],ymm10[2,3],ymm13[4,5,6,7]
; AVX2-NEXT: vunpckhpd {{.*#+}} ymm8 = ymm5[1],ymm8[1],ymm5[3],ymm8[3]
; AVX2-NEXT: vpermpd {{.*#+}} ymm8 = ymm8[0,2,3,3]
; AVX2-NEXT: vpermpd {{.*#+}} ymm10 = ymm10[2,1,2,3]
; AVX2-NEXT: vblendps {{.*#+}} ymm8 = ymm10[0,1],ymm8[2,3,4,5],ymm10[6,7]
; AVX2-NEXT: vmovddup {{.*#+}} xmm10 = mem[0,0]
; AVX2-NEXT: vpermpd {{.*#+}} ymm5 = ymm5[0,1,2,1]
; AVX2-NEXT: vblendps {{.*#+}} ymm5 = ymm5[0,1],ymm10[2,3],ymm5[4,5,6,7]
; AVX2-NEXT: vbroadcastsd 96(%rdx), %ymm10
; AVX2-NEXT: vblendps {{.*#+}} ymm5 = ymm5[0,1,2,3],ymm10[4,5],ymm5[6,7]
; AVX2-NEXT: vunpckhpd {{.*#+}} ymm10 = ymm0[1],ymm2[1],ymm0[3],ymm2[3]
; AVX2-NEXT: vpermpd {{.*#+}} ymm10 = ymm10[0,2,3,3]
; AVX2-NEXT: vpermpd {{.*#+}} ymm14 = ymm3[2,1,2,3]
; AVX2-NEXT: vblendps {{.*#+}} ymm10 = ymm14[0,1],ymm10[2,3,4,5],ymm14[6,7]
; AVX2-NEXT: vpermilps {{.*#+}} ymm2 = ymm2[2,3,0,1,6,7,4,5]
; AVX2-NEXT: vmovaps 96(%rdi), %ymm3
; AVX2-NEXT: vmovaps (%rdx), %ymm2
; AVX2-NEXT: vmovaps 96(%rdx), %ymm6
; AVX2-NEXT: vmovaps 32(%rdx), %ymm8
; AVX2-NEXT: vmovaps 64(%rdx), %ymm10
; AVX2-NEXT: vinsertf128 $1, (%rdx), %ymm0, %ymm1
; AVX2-NEXT: vmovddup {{.*#+}} xmm5 = mem[0,0]
; AVX2-NEXT: vpermpd {{.*#+}} ymm9 = ymm0[0,1,2,1]
; AVX2-NEXT: vblendps {{.*#+}} ymm5 = ymm9[0,1],ymm5[2,3],ymm9[4,5,6,7]
; AVX2-NEXT: vblendps {{.*#+}} ymm1 = ymm5[0,1,2,3],ymm1[4,5],ymm5[6,7]
; AVX2-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-NEXT: vinsertf128 $1, 64(%rdx), %ymm0, %ymm5
; AVX2-NEXT: vmovddup {{.*#+}} xmm9 = mem[0,0]
; AVX2-NEXT: vpermpd {{.*#+}} ymm11 = ymm7[0,1,2,1]
; AVX2-NEXT: vblendps {{.*#+}} ymm9 = ymm11[0,1],ymm9[2,3],ymm11[4,5,6,7]
; AVX2-NEXT: vblendps {{.*#+}} ymm5 = ymm9[0,1,2,3],ymm5[4,5],ymm9[6,7]
; AVX2-NEXT: vperm2f128 {{.*#+}} ymm9 = ymm7[2,3],ymm10[2,3]
; AVX2-NEXT: vmovaps 80(%rdx), %xmm11
; AVX2-NEXT: vblendps {{.*#+}} ymm9 = ymm11[0,1],ymm9[2,3],ymm11[4,5],ymm9[6,7]
; AVX2-NEXT: vbroadcastsd 88(%rsi), %ymm11
; AVX2-NEXT: vblendps {{.*#+}} ymm9 = ymm9[0,1,2,3],ymm11[4,5],ymm9[6,7]
; AVX2-NEXT: vinsertf128 $1, 32(%rdx), %ymm0, %ymm11
; AVX2-NEXT: vmovddup {{.*#+}} xmm12 = mem[0,0]
; AVX2-NEXT: vpermpd {{.*#+}} ymm13 = ymm4[0,1,2,1]
; AVX2-NEXT: vblendps {{.*#+}} ymm12 = ymm13[0,1],ymm12[2,3],ymm13[4,5,6,7]
; AVX2-NEXT: vblendps {{.*#+}} ymm11 = ymm12[0,1,2,3],ymm11[4,5],ymm12[6,7]
; AVX2-NEXT: vperm2f128 {{.*#+}} ymm12 = ymm4[2,3],ymm8[2,3]
; AVX2-NEXT: vmovaps 48(%rdx), %xmm13
; AVX2-NEXT: vblendps {{.*#+}} ymm12 = ymm13[0,1],ymm12[2,3],ymm13[4,5],ymm12[6,7]
; AVX2-NEXT: vbroadcastsd 56(%rsi), %ymm13
; AVX2-NEXT: vblendps {{.*#+}} ymm12 = ymm12[0,1,2,3],ymm13[4,5],ymm12[6,7]
; AVX2-NEXT: vinsertf128 $1, 96(%rdx), %ymm0, %ymm13
; AVX2-NEXT: vmovddup {{.*#+}} xmm14 = mem[0,0]
; AVX2-NEXT: vpermpd {{.*#+}} ymm15 = ymm3[0,1,2,1]
; AVX2-NEXT: vblendps {{.*#+}} ymm14 = ymm15[0,1],ymm14[2,3],ymm15[4,5,6,7]
; AVX2-NEXT: vblendps {{.*#+}} ymm13 = ymm14[0,1,2,3],ymm13[4,5],ymm14[6,7]
; AVX2-NEXT: vperm2f128 {{.*#+}} ymm14 = ymm3[2,3],ymm6[2,3]
; AVX2-NEXT: vmovaps 112(%rdx), %xmm15
; AVX2-NEXT: vblendps {{.*#+}} ymm14 = ymm15[0,1],ymm14[2,3],ymm15[4,5],ymm14[6,7]
; AVX2-NEXT: vbroadcastsd 120(%rsi), %ymm15
; AVX2-NEXT: vblendps {{.*#+}} ymm14 = ymm14[0,1,2,3],ymm15[4,5],ymm14[6,7]
; AVX2-NEXT: vperm2f128 {{.*#+}} ymm15 = ymm0[2,3],ymm2[2,3]
; AVX2-NEXT: vmovaps 16(%rdx), %xmm1
; AVX2-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1],ymm15[2,3],ymm1[4,5],ymm15[6,7]
; AVX2-NEXT: vbroadcastsd 24(%rsi), %ymm15
; AVX2-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm15[4,5],ymm1[6,7]
; AVX2-NEXT: vpermilps {{.*#+}} ymm15 = mem[2,3,0,1,6,7,4,5]
; AVX2-NEXT: vblendps {{.*#+}} ymm10 = ymm15[0,1],ymm10[2,3],ymm15[4,5,6,7]
; AVX2-NEXT: vblendps {{.*#+}} ymm7 = ymm10[0,1,2,3],ymm7[4,5],ymm10[6,7]
; AVX2-NEXT: vpermilps {{.*#+}} ymm10 = mem[2,3,0,1,6,7,4,5]
; AVX2-NEXT: vblendps {{.*#+}} ymm8 = ymm10[0,1],ymm8[2,3],ymm10[4,5,6,7]
; AVX2-NEXT: vblendps {{.*#+}} ymm4 = ymm8[0,1,2,3],ymm4[4,5],ymm8[6,7]
; AVX2-NEXT: vpermilps {{.*#+}} ymm8 = mem[2,3,0,1,6,7,4,5]
; AVX2-NEXT: vblendps {{.*#+}} ymm6 = ymm8[0,1],ymm6[2,3],ymm8[4,5,6,7]
; AVX2-NEXT: vblendps {{.*#+}} ymm3 = ymm6[0,1,2,3],ymm3[4,5],ymm6[6,7]
; AVX2-NEXT: vpermilps {{.*#+}} ymm6 = mem[2,3,0,1,6,7,4,5]
; AVX2-NEXT: vblendps {{.*#+}} ymm2 = ymm6[0,1],ymm2[2,3],ymm6[4,5,6,7]
; AVX2-NEXT: vblendps {{.*#+}} ymm0 = ymm2[0,1,2,3],ymm0[4,5],ymm2[6,7]
; AVX2-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],ymm3[2,3],ymm0[4,5,6,7]
; AVX2-NEXT: vmovaps %ymm0, 32(%rcx)
; AVX2-NEXT: vmovaps %ymm10, 64(%rcx)
; AVX2-NEXT: vmovaps %ymm5, 288(%rcx)
; AVX2-NEXT: vmovaps %ymm8, 352(%rcx)
; AVX2-NEXT: vmovaps %ymm13, 320(%rcx)
; AVX2-NEXT: vmovaps %ymm9, 96(%rcx)
; AVX2-NEXT: vmovaps %ymm12, 160(%rcx)
; AVX2-NEXT: vmovaps %ymm11, 128(%rcx)
; AVX2-NEXT: vmovaps %ymm3, 320(%rcx)
; AVX2-NEXT: vmovaps %ymm4, 128(%rcx)
; AVX2-NEXT: vmovaps %ymm7, 224(%rcx)
; AVX2-NEXT: vmovaps %ymm6, 192(%rcx)
; AVX2-NEXT: vmovaps %ymm4, 256(%rcx)
; AVX2-NEXT: vmovaps %ymm1, (%rcx)
; AVX2-NEXT: vmovaps %ymm1, 64(%rcx)
; AVX2-NEXT: vmovaps %ymm14, 352(%rcx)
; AVX2-NEXT: vmovaps %ymm13, 288(%rcx)
; AVX2-NEXT: vmovaps %ymm12, 160(%rcx)
; AVX2-NEXT: vmovaps %ymm11, 96(%rcx)
; AVX2-NEXT: vmovaps %ymm9, 256(%rcx)
; AVX2-NEXT: vmovaps %ymm5, 192(%rcx)
; AVX2-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-NEXT: vmovaps %ymm0, (%rcx)
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
@ -624,23 +615,23 @@ define void @store_i64_stride3_vf16(<16 x i64>* %in.vecptr0, <16 x i64>* %in.vec
; AVX512-NEXT: vpermt2q %zmm2, %zmm6, %zmm7
; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm8 = [0,1,8,3,4,9,6,7]
; AVX512-NEXT: vpermt2q %zmm4, %zmm8, %zmm7
; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm9 = <5,u,14,6,u,15,7,u>
; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm9 = <5,13,u,6,14,u,7,15>
; AVX512-NEXT: vmovdqa64 %zmm3, %zmm10
; AVX512-NEXT: vpermt2q %zmm1, %zmm9, %zmm10
; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm11 = [0,13,2,3,14,5,6,15]
; AVX512-NEXT: vpermt2q %zmm5, %zmm11, %zmm10
; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm12 = <u,3,11,u,4,12,u,5>
; AVX512-NEXT: vmovdqa64 %zmm1, %zmm13
; AVX512-NEXT: vpermt2q %zmm3, %zmm12, %zmm13
; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm14 = [10,1,2,11,4,5,12,7]
; AVX512-NEXT: vpermt2q %zmm5, %zmm14, %zmm13
; AVX512-NEXT: vpermt2q %zmm5, %zmm9, %zmm10
; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm11 = [0,1,14,3,4,15,6,7]
; AVX512-NEXT: vpermt2q %zmm1, %zmm11, %zmm10
; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm12 = <2,11,u,3,12,u,4,13>
; AVX512-NEXT: vmovdqa64 %zmm5, %zmm13
; AVX512-NEXT: vpermt2q %zmm1, %zmm12, %zmm13
; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm14 = [0,1,11,3,4,12,6,7]
; AVX512-NEXT: vpermt2q %zmm3, %zmm14, %zmm13
; AVX512-NEXT: vpermt2q %zmm3, %zmm6, %zmm1
; AVX512-NEXT: vpermt2q %zmm5, %zmm8, %zmm1
; AVX512-NEXT: vpermi2q %zmm0, %zmm2, %zmm9
; AVX512-NEXT: vpermt2q %zmm4, %zmm11, %zmm9
; AVX512-NEXT: vpermt2q %zmm2, %zmm12, %zmm0
; AVX512-NEXT: vpermt2q %zmm4, %zmm14, %zmm0
; AVX512-NEXT: vmovdqu64 %zmm0, 64(%rcx)
; AVX512-NEXT: vpermi2q %zmm4, %zmm2, %zmm9
; AVX512-NEXT: vpermt2q %zmm0, %zmm11, %zmm9
; AVX512-NEXT: vpermt2q %zmm0, %zmm12, %zmm4
; AVX512-NEXT: vpermt2q %zmm2, %zmm14, %zmm4
; AVX512-NEXT: vmovdqu64 %zmm4, 64(%rcx)
; AVX512-NEXT: vmovdqu64 %zmm9, 128(%rcx)
; AVX512-NEXT: vmovdqu64 %zmm1, 192(%rcx)
; AVX512-NEXT: vmovdqu64 %zmm13, 256(%rcx)

View File

@ -258,107 +258,107 @@ define void @store_i64_stride4_vf8(<8 x i64>* %in.vecptr0, <8 x i64>* %in.vecptr
;
; AVX1-LABEL: store_i64_stride4_vf8:
; AVX1: # %bb.0:
; AVX1-NEXT: vmovaps (%rdx), %ymm2
; AVX1-NEXT: vmovaps 32(%rdx), %ymm1
; AVX1-NEXT: vmovaps (%rcx), %ymm3
; AVX1-NEXT: vmovaps 32(%rdx), %ymm2
; AVX1-NEXT: vmovaps (%rdx), %ymm3
; AVX1-NEXT: vmovaps 32(%rcx), %ymm4
; AVX1-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm1[0],ymm4[0],ymm1[2],ymm4[2]
; AVX1-NEXT: vmovaps 48(%rsi), %xmm5
; AVX1-NEXT: vmovaps 48(%rdi), %xmm6
; AVX1-NEXT: vmovlhps {{.*#+}} xmm7 = xmm6[0],xmm5[0]
; AVX1-NEXT: vblendps {{.*#+}} ymm8 = ymm7[0,1,2,3],ymm0[4,5,6,7]
; AVX1-NEXT: vunpckhpd {{.*#+}} ymm1 = ymm1[1],ymm4[1],ymm1[3],ymm4[3]
; AVX1-NEXT: vunpckhpd {{.*#+}} xmm4 = xmm6[1],xmm5[1]
; AVX1-NEXT: vblendps {{.*#+}} ymm9 = ymm4[0,1,2,3],ymm1[4,5,6,7]
; AVX1-NEXT: vunpcklpd {{.*#+}} ymm4 = ymm2[0],ymm3[0],ymm2[2],ymm3[2]
; AVX1-NEXT: vmovaps 16(%rsi), %xmm5
; AVX1-NEXT: vmovaps 16(%rdi), %xmm6
; AVX1-NEXT: vmovlhps {{.*#+}} xmm7 = xmm6[0],xmm5[0]
; AVX1-NEXT: vblendps {{.*#+}} ymm10 = ymm7[0,1,2,3],ymm4[4,5,6,7]
; AVX1-NEXT: vunpckhpd {{.*#+}} ymm2 = ymm2[1],ymm3[1],ymm2[3],ymm3[3]
; AVX1-NEXT: vunpckhpd {{.*#+}} xmm3 = xmm6[1],xmm5[1]
; AVX1-NEXT: vblendps {{.*#+}} ymm11 = ymm3[0,1,2,3],ymm2[4,5,6,7]
; AVX1-NEXT: vmovaps 32(%rsi), %xmm3
; AVX1-NEXT: vmovaps 32(%rdi), %xmm5
; AVX1-NEXT: vmovlhps {{.*#+}} xmm12 = xmm5[0],xmm3[0]
; AVX1-NEXT: vunpckhpd {{.*#+}} xmm3 = xmm5[1],xmm3[1]
; AVX1-NEXT: vmovaps (%rsi), %xmm5
; AVX1-NEXT: vmovaps (%rdi), %xmm7
; AVX1-NEXT: vmovlhps {{.*#+}} xmm0 = xmm7[0],xmm5[0]
; AVX1-NEXT: vunpckhpd {{.*#+}} xmm5 = xmm7[1],xmm5[1]
; AVX1-NEXT: vmovaps (%rcx), %ymm5
; AVX1-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm3[0],ymm5[0],ymm3[2],ymm5[2]
; AVX1-NEXT: vmovaps 16(%rsi), %xmm6
; AVX1-NEXT: vmovaps 16(%rdi), %xmm7
; AVX1-NEXT: vmovlhps {{.*#+}} xmm1 = xmm7[0],xmm6[0]
; AVX1-NEXT: vblendps {{.*#+}} ymm8 = ymm1[0,1,2,3],ymm0[4,5,6,7]
; AVX1-NEXT: vunpckhpd {{.*#+}} ymm9 = ymm2[1],ymm4[1],ymm2[3],ymm4[3]
; AVX1-NEXT: vmovaps 48(%rsi), %xmm0
; AVX1-NEXT: vmovaps 48(%rdi), %xmm1
; AVX1-NEXT: vunpckhpd {{.*#+}} xmm10 = xmm1[1],xmm0[1]
; AVX1-NEXT: vblendps {{.*#+}} ymm9 = ymm10[0,1,2,3],ymm9[4,5,6,7]
; AVX1-NEXT: vunpcklpd {{.*#+}} ymm2 = ymm2[0],ymm4[0],ymm2[2],ymm4[2]
; AVX1-NEXT: vmovlhps {{.*#+}} xmm0 = xmm1[0],xmm0[0]
; AVX1-NEXT: vblendps {{.*#+}} ymm10 = ymm0[0,1,2,3],ymm2[4,5,6,7]
; AVX1-NEXT: vunpckhpd {{.*#+}} ymm1 = ymm3[1],ymm5[1],ymm3[3],ymm5[3]
; AVX1-NEXT: vunpckhpd {{.*#+}} xmm2 = xmm7[1],xmm6[1]
; AVX1-NEXT: vblendps {{.*#+}} ymm11 = ymm2[0,1,2,3],ymm1[4,5,6,7]
; AVX1-NEXT: vmovaps 32(%rsi), %xmm2
; AVX1-NEXT: vmovaps 32(%rdi), %xmm3
; AVX1-NEXT: vunpckhpd {{.*#+}} xmm12 = xmm3[1],xmm2[1]
; AVX1-NEXT: vmovlhps {{.*#+}} xmm2 = xmm3[0],xmm2[0]
; AVX1-NEXT: vmovaps (%rsi), %xmm3
; AVX1-NEXT: vmovaps (%rdi), %xmm5
; AVX1-NEXT: vunpckhpd {{.*#+}} xmm6 = xmm5[1],xmm3[1]
; AVX1-NEXT: vmovlhps {{.*#+}} xmm3 = xmm5[0],xmm3[0]
; AVX1-NEXT: vmovaps (%rcx), %xmm5
; AVX1-NEXT: vmovaps 32(%rcx), %xmm7
; AVX1-NEXT: vmovaps (%rdx), %xmm0
; AVX1-NEXT: vmovaps 32(%rdx), %xmm1
; AVX1-NEXT: vmovlhps {{.*#+}} xmm4 = xmm1[0],xmm7[0]
; AVX1-NEXT: vunpckhpd {{.*#+}} xmm1 = xmm1[1],xmm7[1]
; AVX1-NEXT: vmovaps (%rcx), %xmm7
; AVX1-NEXT: vmovaps (%rdx), %xmm2
; AVX1-NEXT: vmovlhps {{.*#+}} xmm6 = xmm2[0],xmm7[0]
; AVX1-NEXT: vunpckhpd {{.*#+}} xmm2 = xmm2[1],xmm7[1]
; AVX1-NEXT: vmovaps %xmm2, 48(%r8)
; AVX1-NEXT: vmovaps %xmm6, 16(%r8)
; AVX1-NEXT: vmovaps %xmm1, 176(%r8)
; AVX1-NEXT: vmovaps %xmm4, 144(%r8)
; AVX1-NEXT: vmovaps %xmm5, 32(%r8)
; AVX1-NEXT: vmovaps %xmm0, (%r8)
; AVX1-NEXT: vmovaps %xmm3, 160(%r8)
; AVX1-NEXT: vmovaps %xmm12, 128(%r8)
; AVX1-NEXT: vunpckhpd {{.*#+}} xmm4 = xmm1[1],xmm7[1]
; AVX1-NEXT: vmovlhps {{.*#+}} xmm1 = xmm1[0],xmm7[0]
; AVX1-NEXT: vunpckhpd {{.*#+}} xmm7 = xmm0[1],xmm5[1]
; AVX1-NEXT: vmovlhps {{.*#+}} xmm0 = xmm0[0],xmm5[0]
; AVX1-NEXT: vmovaps %xmm0, 16(%r8)
; AVX1-NEXT: vmovaps %xmm7, 48(%r8)
; AVX1-NEXT: vmovaps %xmm1, 144(%r8)
; AVX1-NEXT: vmovaps %xmm4, 176(%r8)
; AVX1-NEXT: vmovaps %xmm3, (%r8)
; AVX1-NEXT: vmovaps %xmm6, 32(%r8)
; AVX1-NEXT: vmovaps %xmm2, 128(%r8)
; AVX1-NEXT: vmovaps %xmm12, 160(%r8)
; AVX1-NEXT: vmovaps %ymm11, 96(%r8)
; AVX1-NEXT: vmovaps %ymm10, 64(%r8)
; AVX1-NEXT: vmovaps %ymm10, 192(%r8)
; AVX1-NEXT: vmovaps %ymm9, 224(%r8)
; AVX1-NEXT: vmovaps %ymm8, 192(%r8)
; AVX1-NEXT: vmovaps %ymm8, 64(%r8)
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: retq
;
; AVX2-LABEL: store_i64_stride4_vf8:
; AVX2: # %bb.0:
; AVX2-NEXT: vmovaps (%rdi), %ymm2
; AVX2-NEXT: vmovaps 32(%rdi), %ymm1
; AVX2-NEXT: vmovaps (%rsi), %ymm3
; AVX2-NEXT: vmovaps 32(%rdi), %ymm2
; AVX2-NEXT: vmovaps (%rdi), %ymm3
; AVX2-NEXT: vmovaps 32(%rsi), %ymm4
; AVX2-NEXT: vmovaps (%rdx), %ymm5
; AVX2-NEXT: vmovaps (%rsi), %ymm5
; AVX2-NEXT: vmovaps 32(%rdx), %ymm6
; AVX2-NEXT: vmovaps (%rcx), %ymm7
; AVX2-NEXT: vmovaps (%rdx), %ymm7
; AVX2-NEXT: vmovaps 32(%rcx), %ymm8
; AVX2-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm6[0],ymm8[0],ymm6[2],ymm8[2]
; AVX2-NEXT: vunpcklpd {{.*#+}} ymm9 = ymm1[0],ymm4[0],ymm1[2],ymm4[2]
; AVX2-NEXT: vperm2f128 {{.*#+}} ymm9 = ymm9[2,3],ymm0[2,3]
; AVX2-NEXT: vunpckhpd {{.*#+}} ymm6 = ymm6[1],ymm8[1],ymm6[3],ymm8[3]
; AVX2-NEXT: vunpckhpd {{.*#+}} ymm1 = ymm1[1],ymm4[1],ymm1[3],ymm4[3]
; AVX2-NEXT: vperm2f128 {{.*#+}} ymm8 = ymm1[2,3],ymm6[2,3]
; AVX2-NEXT: vunpcklpd {{.*#+}} ymm4 = ymm5[0],ymm7[0],ymm5[2],ymm7[2]
; AVX2-NEXT: vunpcklpd {{.*#+}} ymm6 = ymm2[0],ymm3[0],ymm2[2],ymm3[2]
; AVX2-NEXT: vperm2f128 {{.*#+}} ymm10 = ymm6[2,3],ymm4[2,3]
; AVX2-NEXT: vunpckhpd {{.*#+}} ymm5 = ymm5[1],ymm7[1],ymm5[3],ymm7[3]
; AVX2-NEXT: vunpckhpd {{.*#+}} ymm2 = ymm2[1],ymm3[1],ymm2[3],ymm3[3]
; AVX2-NEXT: vperm2f128 {{.*#+}} ymm11 = ymm2[2,3],ymm5[2,3]
; AVX2-NEXT: vmovaps (%rsi), %xmm3
; AVX2-NEXT: vmovaps (%rcx), %ymm9
; AVX2-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm7[0],ymm9[0],ymm7[2],ymm9[2]
; AVX2-NEXT: vunpcklpd {{.*#+}} ymm1 = ymm3[0],ymm5[0],ymm3[2],ymm5[2]
; AVX2-NEXT: vperm2f128 {{.*#+}} ymm11 = ymm1[2,3],ymm0[2,3]
; AVX2-NEXT: vunpckhpd {{.*#+}} ymm1 = ymm6[1],ymm8[1],ymm6[3],ymm8[3]
; AVX2-NEXT: vunpckhpd {{.*#+}} ymm10 = ymm2[1],ymm4[1],ymm2[3],ymm4[3]
; AVX2-NEXT: vperm2f128 {{.*#+}} ymm10 = ymm10[2,3],ymm1[2,3]
; AVX2-NEXT: vunpcklpd {{.*#+}} ymm6 = ymm6[0],ymm8[0],ymm6[2],ymm8[2]
; AVX2-NEXT: vunpcklpd {{.*#+}} ymm2 = ymm2[0],ymm4[0],ymm2[2],ymm4[2]
; AVX2-NEXT: vperm2f128 {{.*#+}} ymm8 = ymm2[2,3],ymm6[2,3]
; AVX2-NEXT: vunpckhpd {{.*#+}} ymm4 = ymm7[1],ymm9[1],ymm7[3],ymm9[3]
; AVX2-NEXT: vunpckhpd {{.*#+}} ymm3 = ymm3[1],ymm5[1],ymm3[3],ymm5[3]
; AVX2-NEXT: vperm2f128 {{.*#+}} ymm9 = ymm3[2,3],ymm4[2,3]
; AVX2-NEXT: vmovaps (%rsi), %xmm4
; AVX2-NEXT: vmovaps 32(%rsi), %xmm5
; AVX2-NEXT: vmovaps (%rdi), %xmm6
; AVX2-NEXT: vmovaps 32(%rdi), %xmm7
; AVX2-NEXT: vmovlhps {{.*#+}} xmm12 = xmm7[0],xmm5[0]
; AVX2-NEXT: vunpckhpd {{.*#+}} xmm12 = xmm7[1],xmm5[1]
; AVX2-NEXT: vmovaps (%rcx), %xmm1
; AVX2-NEXT: vmovaps 32(%rcx), %xmm4
; AVX2-NEXT: vmovaps (%rdx), %xmm2
; AVX2-NEXT: vmovaps 32(%rcx), %xmm2
; AVX2-NEXT: vmovaps (%rdx), %xmm3
; AVX2-NEXT: vmovaps 32(%rdx), %xmm0
; AVX2-NEXT: vmovlhps {{.*#+}} xmm13 = xmm0[0],xmm4[0]
; AVX2-NEXT: vunpckhpd {{.*#+}} xmm5 = xmm7[1],xmm5[1]
; AVX2-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm0[1],xmm4[1]
; AVX2-NEXT: vmovlhps {{.*#+}} xmm4 = xmm6[0],xmm3[0]
; AVX2-NEXT: vmovlhps {{.*#+}} xmm7 = xmm2[0],xmm1[0]
; AVX2-NEXT: vunpckhpd {{.*#+}} xmm3 = xmm6[1],xmm3[1]
; AVX2-NEXT: vunpckhpd {{.*#+}} xmm1 = xmm2[1],xmm1[1]
; AVX2-NEXT: vmovaps %xmm1, 48(%r8)
; AVX2-NEXT: vmovaps %xmm3, 32(%r8)
; AVX2-NEXT: vmovaps %xmm7, 16(%r8)
; AVX2-NEXT: vunpckhpd {{.*#+}} xmm13 = xmm0[1],xmm2[1]
; AVX2-NEXT: vmovlhps {{.*#+}} xmm5 = xmm7[0],xmm5[0]
; AVX2-NEXT: vmovlhps {{.*#+}} xmm0 = xmm0[0],xmm2[0]
; AVX2-NEXT: vunpckhpd {{.*#+}} xmm2 = xmm6[1],xmm4[1]
; AVX2-NEXT: vunpckhpd {{.*#+}} xmm7 = xmm3[1],xmm1[1]
; AVX2-NEXT: vmovlhps {{.*#+}} xmm4 = xmm6[0],xmm4[0]
; AVX2-NEXT: vmovlhps {{.*#+}} xmm1 = xmm3[0],xmm1[0]
; AVX2-NEXT: vmovaps %xmm1, 16(%r8)
; AVX2-NEXT: vmovaps %xmm4, (%r8)
; AVX2-NEXT: vmovaps %xmm0, 176(%r8)
; AVX2-NEXT: vmovaps %xmm5, 160(%r8)
; AVX2-NEXT: vmovaps %xmm13, 144(%r8)
; AVX2-NEXT: vmovaps %xmm12, 128(%r8)
; AVX2-NEXT: vmovaps %ymm11, 96(%r8)
; AVX2-NEXT: vmovaps %ymm10, 64(%r8)
; AVX2-NEXT: vmovaps %ymm8, 224(%r8)
; AVX2-NEXT: vmovaps %ymm9, 192(%r8)
; AVX2-NEXT: vmovaps %xmm7, 48(%r8)
; AVX2-NEXT: vmovaps %xmm2, 32(%r8)
; AVX2-NEXT: vmovaps %xmm0, 144(%r8)
; AVX2-NEXT: vmovaps %xmm5, 128(%r8)
; AVX2-NEXT: vmovaps %xmm13, 176(%r8)
; AVX2-NEXT: vmovaps %xmm12, 160(%r8)
; AVX2-NEXT: vmovaps %ymm9, 96(%r8)
; AVX2-NEXT: vmovaps %ymm8, 192(%r8)
; AVX2-NEXT: vmovaps %ymm10, 224(%r8)
; AVX2-NEXT: vmovaps %ymm11, 64(%r8)
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
@ -617,54 +617,54 @@ define void @store_i64_stride4_vf16(<16 x i64>* %in.vecptr0, <16 x i64>* %in.vec
; AVX1-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX1-NEXT: vmovaps 64(%rsi), %xmm2
; AVX1-NEXT: vmovaps 64(%rdi), %xmm3
; AVX1-NEXT: vmovlhps {{.*#+}} xmm0 = xmm3[0],xmm2[0]
; AVX1-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm3[1],xmm2[1]
; AVX1-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX1-NEXT: vmovaps 32(%rcx), %xmm4
; AVX1-NEXT: vmovaps 64(%rcx), %xmm5
; AVX1-NEXT: vmovaps 64(%rdx), %xmm7
; AVX1-NEXT: vmovlhps {{.*#+}} xmm15 = xmm7[0],xmm5[0]
; AVX1-NEXT: vunpckhpd {{.*#+}} xmm14 = xmm3[1],xmm2[1]
; AVX1-NEXT: vunpckhpd {{.*#+}} xmm13 = xmm7[1],xmm5[1]
; AVX1-NEXT: vunpckhpd {{.*#+}} xmm15 = xmm7[1],xmm5[1]
; AVX1-NEXT: vmovlhps {{.*#+}} xmm14 = xmm3[0],xmm2[0]
; AVX1-NEXT: vmovlhps {{.*#+}} xmm13 = xmm7[0],xmm5[0]
; AVX1-NEXT: vmovaps 32(%rsi), %xmm5
; AVX1-NEXT: vmovaps 32(%rdi), %xmm7
; AVX1-NEXT: vmovlhps {{.*#+}} xmm12 = xmm7[0],xmm5[0]
; AVX1-NEXT: vunpckhpd {{.*#+}} xmm10 = xmm7[1],xmm5[1]
; AVX1-NEXT: vunpckhpd {{.*#+}} xmm12 = xmm7[1],xmm5[1]
; AVX1-NEXT: vmovlhps {{.*#+}} xmm10 = xmm7[0],xmm5[0]
; AVX1-NEXT: vmovaps 32(%rdx), %xmm7
; AVX1-NEXT: vmovlhps {{.*#+}} xmm11 = xmm7[0],xmm4[0]
; AVX1-NEXT: vunpckhpd {{.*#+}} xmm9 = xmm7[1],xmm4[1]
; AVX1-NEXT: vunpckhpd {{.*#+}} xmm11 = xmm7[1],xmm4[1]
; AVX1-NEXT: vmovlhps {{.*#+}} xmm9 = xmm7[0],xmm4[0]
; AVX1-NEXT: vmovaps 96(%rsi), %xmm7
; AVX1-NEXT: vmovaps 96(%rdi), %xmm0
; AVX1-NEXT: vmovlhps {{.*#+}} xmm8 = xmm0[0],xmm7[0]
; AVX1-NEXT: vunpckhpd {{.*#+}} xmm6 = xmm0[1],xmm7[1]
; AVX1-NEXT: vunpckhpd {{.*#+}} xmm8 = xmm0[1],xmm7[1]
; AVX1-NEXT: vmovlhps {{.*#+}} xmm6 = xmm0[0],xmm7[0]
; AVX1-NEXT: vmovaps 96(%rcx), %xmm7
; AVX1-NEXT: vmovaps 96(%rdx), %xmm0
; AVX1-NEXT: vmovlhps {{.*#+}} xmm5 = xmm0[0],xmm7[0]
; AVX1-NEXT: vunpckhpd {{.*#+}} xmm4 = xmm0[1],xmm7[1]
; AVX1-NEXT: vunpckhpd {{.*#+}} xmm5 = xmm0[1],xmm7[1]
; AVX1-NEXT: vmovlhps {{.*#+}} xmm4 = xmm0[0],xmm7[0]
; AVX1-NEXT: vmovaps (%rsi), %xmm7
; AVX1-NEXT: vmovaps (%rdi), %xmm0
; AVX1-NEXT: vmovlhps {{.*#+}} xmm3 = xmm0[0],xmm7[0]
; AVX1-NEXT: vunpckhpd {{.*#+}} xmm2 = xmm0[1],xmm7[1]
; AVX1-NEXT: vunpckhpd {{.*#+}} xmm3 = xmm0[1],xmm7[1]
; AVX1-NEXT: vmovlhps {{.*#+}} xmm2 = xmm0[0],xmm7[0]
; AVX1-NEXT: vmovaps (%rcx), %xmm7
; AVX1-NEXT: vmovaps (%rdx), %xmm0
; AVX1-NEXT: vmovlhps {{.*#+}} xmm1 = xmm0[0],xmm7[0]
; AVX1-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm0[1],xmm7[1]
; AVX1-NEXT: vmovaps %xmm0, 48(%r8)
; AVX1-NEXT: vmovaps %xmm2, 32(%r8)
; AVX1-NEXT: vmovaps %xmm1, 16(%r8)
; AVX1-NEXT: vmovaps %xmm3, (%r8)
; AVX1-NEXT: vmovaps %xmm4, 432(%r8)
; AVX1-NEXT: vmovaps %xmm6, 416(%r8)
; AVX1-NEXT: vmovaps %xmm5, 400(%r8)
; AVX1-NEXT: vmovaps %xmm8, 384(%r8)
; AVX1-NEXT: vmovaps %xmm9, 176(%r8)
; AVX1-NEXT: vmovaps %xmm10, 160(%r8)
; AVX1-NEXT: vmovaps %xmm11, 144(%r8)
; AVX1-NEXT: vmovaps %xmm12, 128(%r8)
; AVX1-NEXT: vmovaps %xmm13, 304(%r8)
; AVX1-NEXT: vmovaps %xmm14, 288(%r8)
; AVX1-NEXT: vmovaps %xmm15, 272(%r8)
; AVX1-NEXT: vunpckhpd {{.*#+}} xmm1 = xmm0[1],xmm7[1]
; AVX1-NEXT: vmovlhps {{.*#+}} xmm0 = xmm0[0],xmm7[0]
; AVX1-NEXT: vmovaps %xmm0, 16(%r8)
; AVX1-NEXT: vmovaps %xmm2, (%r8)
; AVX1-NEXT: vmovaps %xmm1, 48(%r8)
; AVX1-NEXT: vmovaps %xmm3, 32(%r8)
; AVX1-NEXT: vmovaps %xmm4, 400(%r8)
; AVX1-NEXT: vmovaps %xmm6, 384(%r8)
; AVX1-NEXT: vmovaps %xmm5, 432(%r8)
; AVX1-NEXT: vmovaps %xmm8, 416(%r8)
; AVX1-NEXT: vmovaps %xmm9, 144(%r8)
; AVX1-NEXT: vmovaps %xmm10, 128(%r8)
; AVX1-NEXT: vmovaps %xmm11, 176(%r8)
; AVX1-NEXT: vmovaps %xmm12, 160(%r8)
; AVX1-NEXT: vmovaps %xmm13, 272(%r8)
; AVX1-NEXT: vmovaps %xmm14, 256(%r8)
; AVX1-NEXT: vmovaps %xmm15, 304(%r8)
; AVX1-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; AVX1-NEXT: vmovaps %xmm0, 256(%r8)
; AVX1-NEXT: vmovaps %xmm0, 288(%r8)
; AVX1-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX1-NEXT: vmovaps %ymm0, 448(%r8)
; AVX1-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
@ -740,52 +740,52 @@ define void @store_i64_stride4_vf16(<16 x i64>* %in.vecptr0, <16 x i64>* %in.vec
; AVX2-NEXT: vmovaps 64(%rsi), %xmm3
; AVX2-NEXT: vmovaps 32(%rdi), %xmm4
; AVX2-NEXT: vmovaps 64(%rdi), %xmm5
; AVX2-NEXT: vmovlhps {{.*#+}} xmm0 = xmm5[0],xmm3[0]
; AVX2-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm5[1],xmm3[1]
; AVX2-NEXT: vmovaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX2-NEXT: vmovaps 32(%rcx), %xmm6
; AVX2-NEXT: vmovaps 64(%rcx), %xmm7
; AVX2-NEXT: vmovaps 64(%rdx), %xmm0
; AVX2-NEXT: vunpckhpd {{.*#+}} xmm14 = xmm5[1],xmm3[1]
; AVX2-NEXT: vmovlhps {{.*#+}} xmm15 = xmm0[0],xmm7[0]
; AVX2-NEXT: vunpckhpd {{.*#+}} xmm13 = xmm0[1],xmm7[1]
; AVX2-NEXT: vmovlhps {{.*#+}} xmm12 = xmm4[0],xmm2[0]
; AVX2-NEXT: vunpckhpd {{.*#+}} xmm10 = xmm4[1],xmm2[1]
; AVX2-NEXT: vmovlhps {{.*#+}} xmm14 = xmm5[0],xmm3[0]
; AVX2-NEXT: vunpckhpd {{.*#+}} xmm15 = xmm0[1],xmm7[1]
; AVX2-NEXT: vmovlhps {{.*#+}} xmm13 = xmm0[0],xmm7[0]
; AVX2-NEXT: vunpckhpd {{.*#+}} xmm12 = xmm4[1],xmm2[1]
; AVX2-NEXT: vmovlhps {{.*#+}} xmm10 = xmm4[0],xmm2[0]
; AVX2-NEXT: vmovaps 32(%rdx), %xmm4
; AVX2-NEXT: vmovlhps {{.*#+}} xmm11 = xmm4[0],xmm6[0]
; AVX2-NEXT: vunpckhpd {{.*#+}} xmm9 = xmm4[1],xmm6[1]
; AVX2-NEXT: vunpckhpd {{.*#+}} xmm11 = xmm4[1],xmm6[1]
; AVX2-NEXT: vmovlhps {{.*#+}} xmm9 = xmm4[0],xmm6[0]
; AVX2-NEXT: vmovaps 96(%rsi), %xmm6
; AVX2-NEXT: vmovaps 96(%rdi), %xmm0
; AVX2-NEXT: vmovlhps {{.*#+}} xmm8 = xmm0[0],xmm6[0]
; AVX2-NEXT: vunpckhpd {{.*#+}} xmm7 = xmm0[1],xmm6[1]
; AVX2-NEXT: vunpckhpd {{.*#+}} xmm8 = xmm0[1],xmm6[1]
; AVX2-NEXT: vmovlhps {{.*#+}} xmm7 = xmm0[0],xmm6[0]
; AVX2-NEXT: vmovaps 96(%rcx), %xmm6
; AVX2-NEXT: vmovaps 96(%rdx), %xmm0
; AVX2-NEXT: vmovlhps {{.*#+}} xmm5 = xmm0[0],xmm6[0]
; AVX2-NEXT: vunpckhpd {{.*#+}} xmm4 = xmm0[1],xmm6[1]
; AVX2-NEXT: vunpckhpd {{.*#+}} xmm5 = xmm0[1],xmm6[1]
; AVX2-NEXT: vmovlhps {{.*#+}} xmm4 = xmm0[0],xmm6[0]
; AVX2-NEXT: vmovaps (%rsi), %xmm6
; AVX2-NEXT: vmovaps (%rdi), %xmm0
; AVX2-NEXT: vmovlhps {{.*#+}} xmm3 = xmm0[0],xmm6[0]
; AVX2-NEXT: vunpckhpd {{.*#+}} xmm2 = xmm0[1],xmm6[1]
; AVX2-NEXT: vunpckhpd {{.*#+}} xmm3 = xmm0[1],xmm6[1]
; AVX2-NEXT: vmovlhps {{.*#+}} xmm2 = xmm0[0],xmm6[0]
; AVX2-NEXT: vmovaps (%rcx), %xmm6
; AVX2-NEXT: vmovaps (%rdx), %xmm0
; AVX2-NEXT: vmovlhps {{.*#+}} xmm1 = xmm0[0],xmm6[0]
; AVX2-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm0[1],xmm6[1]
; AVX2-NEXT: vmovaps %xmm0, 48(%r8)
; AVX2-NEXT: vmovaps %xmm2, 32(%r8)
; AVX2-NEXT: vmovaps %xmm1, 16(%r8)
; AVX2-NEXT: vmovaps %xmm3, (%r8)
; AVX2-NEXT: vmovaps %xmm4, 432(%r8)
; AVX2-NEXT: vmovaps %xmm7, 416(%r8)
; AVX2-NEXT: vmovaps %xmm5, 400(%r8)
; AVX2-NEXT: vmovaps %xmm8, 384(%r8)
; AVX2-NEXT: vmovaps %xmm9, 176(%r8)
; AVX2-NEXT: vmovaps %xmm10, 160(%r8)
; AVX2-NEXT: vmovaps %xmm11, 144(%r8)
; AVX2-NEXT: vmovaps %xmm12, 128(%r8)
; AVX2-NEXT: vmovaps %xmm13, 304(%r8)
; AVX2-NEXT: vmovaps %xmm14, 288(%r8)
; AVX2-NEXT: vmovaps %xmm15, 272(%r8)
; AVX2-NEXT: vunpckhpd {{.*#+}} xmm1 = xmm0[1],xmm6[1]
; AVX2-NEXT: vmovlhps {{.*#+}} xmm0 = xmm0[0],xmm6[0]
; AVX2-NEXT: vmovaps %xmm0, 16(%r8)
; AVX2-NEXT: vmovaps %xmm2, (%r8)
; AVX2-NEXT: vmovaps %xmm1, 48(%r8)
; AVX2-NEXT: vmovaps %xmm3, 32(%r8)
; AVX2-NEXT: vmovaps %xmm4, 400(%r8)
; AVX2-NEXT: vmovaps %xmm7, 384(%r8)
; AVX2-NEXT: vmovaps %xmm5, 432(%r8)
; AVX2-NEXT: vmovaps %xmm8, 416(%r8)
; AVX2-NEXT: vmovaps %xmm9, 144(%r8)
; AVX2-NEXT: vmovaps %xmm10, 128(%r8)
; AVX2-NEXT: vmovaps %xmm11, 176(%r8)
; AVX2-NEXT: vmovaps %xmm12, 160(%r8)
; AVX2-NEXT: vmovaps %xmm13, 272(%r8)
; AVX2-NEXT: vmovaps %xmm14, 256(%r8)
; AVX2-NEXT: vmovaps %xmm15, 304(%r8)
; AVX2-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; AVX2-NEXT: vmovaps %xmm0, 256(%r8)
; AVX2-NEXT: vmovaps %xmm0, 288(%r8)
; AVX2-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-NEXT: vmovaps %ymm0, 448(%r8)
; AVX2-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload

View File

@ -169,92 +169,84 @@ define void @store_i64_stride6_vf4(<4 x i64>* %in.vecptr0, <4 x i64>* %in.vecptr
; AVX1-LABEL: store_i64_stride6_vf4:
; AVX1: # %bb.0:
; AVX1-NEXT: movq {{[0-9]+}}(%rsp), %rax
; AVX1-NEXT: vmovapd (%rdi), %ymm0
; AVX1-NEXT: vmovapd (%rsi), %ymm1
; AVX1-NEXT: vmovaps (%rdx), %ymm8
; AVX1-NEXT: vmovapd (%r8), %ymm3
; AVX1-NEXT: vmovapd (%r9), %ymm4
; AVX1-NEXT: vmovddup {{.*#+}} xmm5 = mem[0,0]
; AVX1-NEXT: vmovaps (%rsi), %xmm6
; AVX1-NEXT: vmovaps (%rdi), %xmm7
; AVX1-NEXT: vunpckhpd {{.*#+}} xmm2 = xmm7[1],xmm6[1]
; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm2
; AVX1-NEXT: vblendpd {{.*#+}} ymm2 = ymm3[0],ymm2[1,2,3]
; AVX1-NEXT: vblendpd {{.*#+}} ymm2 = ymm2[0],ymm5[1],ymm2[2,3]
; AVX1-NEXT: vmovaps (%rcx), %xmm5
; AVX1-NEXT: vinsertf128 $1, (%r9), %ymm5, %ymm9
; AVX1-NEXT: vpermilps {{.*#+}} xmm10 = mem[2,3,2,3]
; AVX1-NEXT: vbroadcastsd 8(%r8), %ymm11
; AVX1-NEXT: vblendps {{.*#+}} ymm10 = ymm10[0,1,2,3],ymm11[4,5],ymm10[6,7]
; AVX1-NEXT: vblendps {{.*#+}} ymm9 = ymm10[0,1],ymm9[2,3],ymm10[4,5],ymm9[6,7]
; AVX1-NEXT: vperm2f128 {{.*#+}} ymm10 = ymm4[2,3],ymm1[2,3]
; AVX1-NEXT: vunpckhpd {{.*#+}} ymm0 = ymm0[1],ymm1[1],ymm0[3],ymm1[3]
; AVX1-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm3[2,3],ymm0[2,3]
; AVX1-NEXT: vshufpd {{.*#+}} ymm0 = ymm0[0],ymm10[0],ymm0[2],ymm10[3]
; AVX1-NEXT: vunpcklpd {{.*#+}} ymm1 = ymm8[0],mem[0],ymm8[2],mem[2]
; AVX1-NEXT: vmovaps (%rdi), %ymm0
; AVX1-NEXT: vmovaps (%rdx), %ymm1
; AVX1-NEXT: vmovaps (%r8), %ymm2
; AVX1-NEXT: vunpcklpd {{.*#+}} ymm1 = ymm1[0],mem[0],ymm1[2],mem[2]
; AVX1-NEXT: vmovaps 16(%rdi), %xmm3
; AVX1-NEXT: vunpcklpd {{.*#+}} xmm3 = xmm3[0],mem[0]
; AVX1-NEXT: vblendps {{.*#+}} ymm1 = ymm3[0,1,2,3],ymm1[4,5,6,7]
; AVX1-NEXT: vmovapd 16(%rdx), %xmm3
; AVX1-NEXT: vblendps {{.*#+}} ymm8 = ymm3[0,1,2,3],ymm1[4,5,6,7]
; AVX1-NEXT: vunpckhpd {{.*#+}} ymm2 = ymm2[1],mem[1],ymm2[3],mem[3]
; AVX1-NEXT: vmovaps 16(%rdx), %xmm3
; AVX1-NEXT: vunpckhpd {{.*#+}} xmm3 = xmm3[1],mem[1]
; AVX1-NEXT: vbroadcastsd 24(%r8), %ymm8
; AVX1-NEXT: vblendpd {{.*#+}} ymm3 = ymm3[0,1],ymm8[2],ymm3[3]
; AVX1-NEXT: vblendpd {{.*#+}} ymm3 = ymm3[0,1,2],ymm4[3]
; AVX1-NEXT: vmovlhps {{.*#+}} xmm4 = xmm7[0],xmm6[0]
; AVX1-NEXT: vmovaps (%rdx), %xmm6
; AVX1-NEXT: vmovlhps {{.*#+}} xmm5 = xmm6[0],xmm5[0]
; AVX1-NEXT: vmovaps %xmm5, 16(%rax)
; AVX1-NEXT: vmovaps %xmm4, (%rax)
; AVX1-NEXT: vmovaps %ymm1, 96(%rax)
; AVX1-NEXT: vmovapd %ymm0, 128(%rax)
; AVX1-NEXT: vmovaps %ymm9, 64(%rax)
; AVX1-NEXT: vmovapd %ymm2, 32(%rax)
; AVX1-NEXT: vmovapd %ymm3, 160(%rax)
; AVX1-NEXT: vblendps {{.*#+}} ymm9 = ymm3[0,1,2,3],ymm2[4,5,6,7]
; AVX1-NEXT: vunpckhpd {{.*#+}} ymm0 = ymm0[1],mem[1],ymm0[3],mem[3]
; AVX1-NEXT: vmovaps 16(%r8), %xmm3
; AVX1-NEXT: vunpcklpd {{.*#+}} xmm3 = xmm3[0],mem[0]
; AVX1-NEXT: vblendps {{.*#+}} ymm10 = ymm3[0,1,2,3],ymm0[4,5,6,7]
; AVX1-NEXT: vmovaps (%rcx), %xmm3
; AVX1-NEXT: vmovaps (%rdx), %xmm4
; AVX1-NEXT: vunpckhpd {{.*#+}} xmm5 = xmm4[1],xmm3[1]
; AVX1-NEXT: vmovaps (%r9), %xmm6
; AVX1-NEXT: vmovaps (%r8), %xmm7
; AVX1-NEXT: vunpckhpd {{.*#+}} xmm1 = xmm7[1],xmm6[1]
; AVX1-NEXT: vmovlhps {{.*#+}} xmm6 = xmm7[0],xmm6[0]
; AVX1-NEXT: vmovaps (%rsi), %xmm7
; AVX1-NEXT: vmovaps (%rdi), %xmm2
; AVX1-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm2[1],xmm7[1]
; AVX1-NEXT: vmovlhps {{.*#+}} xmm2 = xmm2[0],xmm7[0]
; AVX1-NEXT: vmovlhps {{.*#+}} xmm3 = xmm4[0],xmm3[0]
; AVX1-NEXT: vmovaps %xmm3, 16(%rax)
; AVX1-NEXT: vmovaps %xmm2, (%rax)
; AVX1-NEXT: vmovaps %xmm0, 48(%rax)
; AVX1-NEXT: vmovaps %xmm6, 32(%rax)
; AVX1-NEXT: vmovaps %xmm1, 80(%rax)
; AVX1-NEXT: vmovaps %xmm5, 64(%rax)
; AVX1-NEXT: vmovaps %ymm10, 128(%rax)
; AVX1-NEXT: vmovaps %ymm9, 160(%rax)
; AVX1-NEXT: vmovaps %ymm8, 96(%rax)
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: retq
;
; AVX2-LABEL: store_i64_stride6_vf4:
; AVX2: # %bb.0:
; AVX2-NEXT: movq {{[0-9]+}}(%rsp), %rax
; AVX2-NEXT: vmovaps (%rdi), %ymm8
; AVX2-NEXT: vmovaps (%rsi), %ymm11
; AVX2-NEXT: vmovaps (%rdi), %ymm0
; AVX2-NEXT: vmovaps (%rsi), %ymm1
; AVX2-NEXT: vmovaps (%rdx), %ymm2
; AVX2-NEXT: vmovaps (%rcx), %ymm3
; AVX2-NEXT: vmovaps (%r8), %ymm4
; AVX2-NEXT: vmovaps (%r9), %xmm5
; AVX2-NEXT: vinsertf128 $1, %xmm5, %ymm0, %ymm6
; AVX2-NEXT: vmovaps (%rcx), %xmm7
; AVX2-NEXT: vmovaps (%rdx), %xmm0
; AVX2-NEXT: vunpckhpd {{.*#+}} xmm9 = xmm0[1],xmm7[1]
; AVX2-NEXT: vbroadcastsd 8(%r8), %ymm10
; AVX2-NEXT: vblendps {{.*#+}} ymm9 = ymm9[0,1,2,3],ymm10[4,5],ymm9[6,7]
; AVX2-NEXT: vblendps {{.*#+}} ymm9 = ymm9[0,1,2,3,4,5],ymm6[6,7]
; AVX2-NEXT: vmovddup {{.*#+}} xmm5 = xmm5[0,0]
; AVX2-NEXT: vmovaps (%rsi), %xmm6
; AVX2-NEXT: vmovaps (%rdi), %xmm1
; AVX2-NEXT: vunpckhpd {{.*#+}} xmm10 = xmm1[1],xmm6[1]
; AVX2-NEXT: vperm2f128 {{.*#+}} ymm10 = ymm4[0,1],ymm10[0,1]
; AVX2-NEXT: vblendps {{.*#+}} ymm5 = ymm10[0,1],ymm5[2,3],ymm10[4,5,6,7]
; AVX2-NEXT: vunpcklpd {{.*#+}} ymm10 = ymm2[0],ymm3[0],ymm2[2],ymm3[2]
; AVX2-NEXT: vunpcklpd {{.*#+}} ymm12 = ymm8[0],ymm11[0],ymm8[2],ymm11[2]
; AVX2-NEXT: vperm2f128 {{.*#+}} ymm10 = ymm12[2,3],ymm10[2,3]
; AVX2-NEXT: vmovaps (%r9), %ymm5
; AVX2-NEXT: vunpcklpd {{.*#+}} ymm6 = ymm2[0],ymm3[0],ymm2[2],ymm3[2]
; AVX2-NEXT: vunpcklpd {{.*#+}} ymm7 = ymm0[0],ymm1[0],ymm0[2],ymm1[2]
; AVX2-NEXT: vperm2f128 {{.*#+}} ymm8 = ymm7[2,3],ymm6[2,3]
; AVX2-NEXT: vunpckhpd {{.*#+}} ymm7 = ymm4[1],ymm5[1],ymm4[3],ymm5[3]
; AVX2-NEXT: vunpckhpd {{.*#+}} ymm2 = ymm2[1],ymm3[1],ymm2[3],ymm3[3]
; AVX2-NEXT: vbroadcastsd 24(%r8), %ymm3
; AVX2-NEXT: vperm2f128 {{.*#+}} ymm2 = ymm2[2,3],ymm3[2,3]
; AVX2-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1,2,3,4,5],mem[6,7]
; AVX2-NEXT: vunpckhpd {{.*#+}} ymm3 = ymm8[1],ymm11[1],ymm8[3],ymm11[3]
; AVX2-NEXT: vperm2f128 {{.*#+}} ymm3 = ymm4[2,3],ymm3[2,3]
; AVX2-NEXT: vbroadcastsd 16(%r9), %ymm4
; AVX2-NEXT: vblendps {{.*#+}} ymm3 = ymm3[0,1],ymm4[2,3],ymm3[4,5,6,7]
; AVX2-NEXT: vmovlhps {{.*#+}} xmm1 = xmm1[0],xmm6[0]
; AVX2-NEXT: vmovlhps {{.*#+}} xmm0 = xmm0[0],xmm7[0]
; AVX2-NEXT: vmovaps %xmm0, 16(%rax)
; AVX2-NEXT: vmovaps %xmm1, (%rax)
; AVX2-NEXT: vmovaps %ymm10, 96(%rax)
; AVX2-NEXT: vmovaps %ymm3, 128(%rax)
; AVX2-NEXT: vmovaps %ymm2, 160(%rax)
; AVX2-NEXT: vmovaps %ymm5, 32(%rax)
; AVX2-NEXT: vmovaps %ymm9, 64(%rax)
; AVX2-NEXT: vperm2f128 {{.*#+}} ymm9 = ymm2[2,3],ymm7[2,3]
; AVX2-NEXT: vunpckhpd {{.*#+}} ymm0 = ymm0[1],ymm1[1],ymm0[3],ymm1[3]
; AVX2-NEXT: vunpcklpd {{.*#+}} ymm1 = ymm4[0],ymm5[0],ymm4[2],ymm5[2]
; AVX2-NEXT: vperm2f128 {{.*#+}} ymm10 = ymm1[2,3],ymm0[2,3]
; AVX2-NEXT: vmovaps (%rcx), %xmm1
; AVX2-NEXT: vmovaps (%rdx), %xmm3
; AVX2-NEXT: vunpckhpd {{.*#+}} xmm4 = xmm3[1],xmm1[1]
; AVX2-NEXT: vmovaps (%r9), %xmm5
; AVX2-NEXT: vmovaps (%r8), %xmm7
; AVX2-NEXT: vunpckhpd {{.*#+}} xmm6 = xmm7[1],xmm5[1]
; AVX2-NEXT: vmovlhps {{.*#+}} xmm5 = xmm7[0],xmm5[0]
; AVX2-NEXT: vmovaps (%rsi), %xmm7
; AVX2-NEXT: vmovaps (%rdi), %xmm2
; AVX2-NEXT: vunpckhpd {{.*#+}} xmm0 = xmm2[1],xmm7[1]
; AVX2-NEXT: vmovlhps {{.*#+}} xmm2 = xmm2[0],xmm7[0]
; AVX2-NEXT: vmovlhps {{.*#+}} xmm1 = xmm3[0],xmm1[0]
; AVX2-NEXT: vmovaps %xmm1, 16(%rax)
; AVX2-NEXT: vmovaps %xmm2, (%rax)
; AVX2-NEXT: vmovaps %xmm0, 48(%rax)
; AVX2-NEXT: vmovaps %xmm5, 32(%rax)
; AVX2-NEXT: vmovaps %xmm6, 80(%rax)
; AVX2-NEXT: vmovaps %xmm4, 64(%rax)
; AVX2-NEXT: vmovaps %ymm10, 128(%rax)
; AVX2-NEXT: vmovaps %ymm9, 160(%rax)
; AVX2-NEXT: vmovaps %ymm8, 96(%rax)
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
@ -264,22 +256,21 @@ define void @store_i64_stride6_vf4(<4 x i64>* %in.vecptr0, <4 x i64>* %in.vecptr
; AVX512-NEXT: vmovdqa (%rdi), %ymm0
; AVX512-NEXT: vmovdqa (%rdx), %ymm1
; AVX512-NEXT: vmovdqa (%r8), %ymm2
; AVX512-NEXT: vinserti64x4 $1, (%rsi), %zmm0, %zmm0
; AVX512-NEXT: vinserti64x4 $1, (%rcx), %zmm1, %zmm1
; AVX512-NEXT: vinserti64x4 $1, (%r9), %zmm2, %zmm2
; AVX512-NEXT: vinserti64x4 $1, (%rsi), %zmm0, %zmm0
; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm3 = <0,4,8,12,u,u,1,5>
; AVX512-NEXT: vpermi2q %zmm1, %zmm0, %zmm3
; AVX512-NEXT: vinserti64x4 $1, (%r9), %zmm2, %zmm2
; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm4 = [0,1,2,3,8,12,6,7]
; AVX512-NEXT: vpermi2q %zmm2, %zmm3, %zmm4
; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm3 = <1,5,u,u,10,14,2,6>
; AVX512-NEXT: vpermi2q %zmm0, %zmm1, %zmm3
; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm5 = [0,1,9,13,4,5,6,7]
; AVX512-NEXT: vpermi2q %zmm2, %zmm3, %zmm5
; AVX512-NEXT: vbroadcasti64x4 {{.*#+}} zmm3 = [11,15,3,7,11,15,3,7]
; AVX512-NEXT: # zmm3 = mem[0,1,2,3,0,1,2,3]
; AVX512-NEXT: vpermi2q %zmm1, %zmm0, %zmm3
; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm0 = [10,14,2,3,4,5,11,15]
; AVX512-NEXT: vpermi2q %zmm2, %zmm3, %zmm0
; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm3 = <1,5,9,13,u,u,2,6>
; AVX512-NEXT: vpermi2q %zmm2, %zmm1, %zmm3
; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm5 = [0,1,2,3,10,14,6,7]
; AVX512-NEXT: vpermi2q %zmm0, %zmm3, %zmm5
; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm3 = <2,6,11,15,u,u,3,7>
; AVX512-NEXT: vpermi2q %zmm0, %zmm2, %zmm3
; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm0 = [0,1,2,3,11,15,6,7]
; AVX512-NEXT: vpermi2q %zmm1, %zmm3, %zmm0
; AVX512-NEXT: vmovdqu64 %zmm0, 128(%rax)
; AVX512-NEXT: vmovdqu64 %zmm5, 64(%rax)
; AVX512-NEXT: vmovdqu64 %zmm4, (%rax)
@ -417,276 +408,250 @@ define void @store_i64_stride6_vf8(<8 x i64>* %in.vecptr0, <8 x i64>* %in.vecptr
;
; AVX1-LABEL: store_i64_stride6_vf8:
; AVX1: # %bb.0:
; AVX1-NEXT: vmovapd (%rdi), %ymm15
; AVX1-NEXT: vmovapd 32(%rdi), %ymm12
; AVX1-NEXT: vmovapd (%rsi), %ymm9
; AVX1-NEXT: vmovapd 32(%rsi), %ymm13
; AVX1-NEXT: vmovapd (%r8), %ymm10
; AVX1-NEXT: vmovapd 32(%r8), %ymm14
; AVX1-NEXT: vmovapd 32(%r9), %ymm2
; AVX1-NEXT: vmovaps 48(%rsi), %xmm0
; AVX1-NEXT: vmovaps 48(%rdi), %xmm1
; AVX1-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm1[0],ymm0[0],ymm1[2],ymm0[2]
; AVX1-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],mem[4,5],ymm0[6,7]
; AVX1-NEXT: vbroadcastsd 48(%rcx), %ymm1
; AVX1-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm1[6,7]
; AVX1-NEXT: vmovaps (%rdi), %ymm4
; AVX1-NEXT: vmovaps 32(%rdi), %ymm1
; AVX1-NEXT: vmovaps (%rdx), %ymm5
; AVX1-NEXT: vmovaps 32(%rdx), %ymm2
; AVX1-NEXT: vmovaps (%r8), %ymm3
; AVX1-NEXT: vmovaps 32(%r8), %ymm0
; AVX1-NEXT: vunpckhpd {{.*#+}} ymm0 = ymm0[1],mem[1],ymm0[3],mem[3]
; AVX1-NEXT: vmovaps 48(%rdx), %xmm6
; AVX1-NEXT: vunpckhpd {{.*#+}} xmm6 = xmm6[1],mem[1]
; AVX1-NEXT: vblendps {{.*#+}} ymm0 = ymm6[0,1,2,3],ymm0[4,5,6,7]
; AVX1-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX1-NEXT: vmovddup {{.*#+}} xmm1 = mem[0,0]
; AVX1-NEXT: vunpckhpd {{.*#+}} ymm1 = ymm1[1],mem[1],ymm1[3],mem[3]
; AVX1-NEXT: vmovaps 48(%r8), %xmm6
; AVX1-NEXT: vunpcklpd {{.*#+}} xmm6 = xmm6[0],mem[0]
; AVX1-NEXT: vblendps {{.*#+}} ymm0 = ymm6[0,1,2,3],ymm1[4,5,6,7]
; AVX1-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX1-NEXT: vunpcklpd {{.*#+}} ymm2 = ymm2[0],mem[0],ymm2[2],mem[2]
; AVX1-NEXT: vmovaps 48(%rdi), %xmm6
; AVX1-NEXT: vunpcklpd {{.*#+}} xmm6 = xmm6[0],mem[0]
; AVX1-NEXT: vblendps {{.*#+}} ymm0 = ymm6[0,1,2,3],ymm2[4,5,6,7]
; AVX1-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX1-NEXT: vunpckhpd {{.*#+}} ymm3 = ymm3[1],mem[1],ymm3[3],mem[3]
; AVX1-NEXT: vmovaps 16(%rdx), %xmm6
; AVX1-NEXT: vunpckhpd {{.*#+}} xmm6 = xmm6[1],mem[1]
; AVX1-NEXT: vblendps {{.*#+}} ymm11 = ymm6[0,1,2,3],ymm3[4,5,6,7]
; AVX1-NEXT: vunpckhpd {{.*#+}} ymm4 = ymm4[1],mem[1],ymm4[3],mem[3]
; AVX1-NEXT: vmovaps 16(%r8), %xmm6
; AVX1-NEXT: vunpcklpd {{.*#+}} xmm6 = xmm6[0],mem[0]
; AVX1-NEXT: vblendps {{.*#+}} ymm12 = ymm6[0,1,2,3],ymm4[4,5,6,7]
; AVX1-NEXT: vunpcklpd {{.*#+}} ymm5 = ymm5[0],mem[0],ymm5[2],mem[2]
; AVX1-NEXT: vmovaps 16(%rdi), %xmm6
; AVX1-NEXT: vunpcklpd {{.*#+}} xmm6 = xmm6[0],mem[0]
; AVX1-NEXT: vblendps {{.*#+}} ymm13 = ymm6[0,1,2,3],ymm5[4,5,6,7]
; AVX1-NEXT: vmovaps 32(%rcx), %xmm6
; AVX1-NEXT: vmovaps 32(%rdx), %xmm7
; AVX1-NEXT: vunpckhpd {{.*#+}} xmm14 = xmm7[1],xmm6[1]
; AVX1-NEXT: vmovaps (%r9), %xmm1
; AVX1-NEXT: vmovaps 32(%r9), %xmm2
; AVX1-NEXT: vmovaps 32(%r8), %xmm3
; AVX1-NEXT: vunpckhpd {{.*#+}} xmm15 = xmm3[1],xmm2[1]
; AVX1-NEXT: vmovlhps {{.*#+}} xmm10 = xmm3[0],xmm2[0]
; AVX1-NEXT: vmovaps (%rsi), %xmm3
; AVX1-NEXT: vmovaps 16(%rsi), %xmm5
; AVX1-NEXT: vmovaps 32(%rsi), %xmm6
; AVX1-NEXT: vmovaps (%rdi), %xmm4
; AVX1-NEXT: vmovaps 16(%rdi), %xmm11
; AVX1-NEXT: vmovaps 32(%rsi), %xmm5
; AVX1-NEXT: vmovaps 32(%rdi), %xmm0
; AVX1-NEXT: vunpckhpd {{.*#+}} xmm7 = xmm0[1],xmm6[1]
; AVX1-NEXT: vinsertf128 $1, %xmm7, %ymm0, %ymm7
; AVX1-NEXT: vblendpd {{.*#+}} ymm7 = ymm14[0],ymm7[1,2,3]
; AVX1-NEXT: vblendpd {{.*#+}} ymm1 = ymm7[0],ymm1[1],ymm7[2,3]
; AVX1-NEXT: vmovupd %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX1-NEXT: vunpcklpd {{.*#+}} ymm5 = ymm11[0],ymm5[0],ymm11[2],ymm5[2]
; AVX1-NEXT: vblendps {{.*#+}} ymm5 = ymm5[0,1,2,3],mem[4,5],ymm5[6,7]
; AVX1-NEXT: vbroadcastsd 16(%rcx), %ymm7
; AVX1-NEXT: vblendps {{.*#+}} ymm8 = ymm5[0,1,2,3,4,5],ymm7[6,7]
; AVX1-NEXT: vmovddup {{.*#+}} xmm7 = mem[0,0]
; AVX1-NEXT: vunpckhpd {{.*#+}} xmm1 = xmm4[1],xmm3[1]
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1
; AVX1-NEXT: vblendpd {{.*#+}} ymm1 = ymm10[0],ymm1[1,2,3]
; AVX1-NEXT: vblendpd {{.*#+}} ymm11 = ymm1[0],ymm7[1],ymm1[2,3]
; AVX1-NEXT: vperm2f128 {{.*#+}} ymm1 = ymm2[2,3],ymm13[2,3]
; AVX1-NEXT: vunpckhpd {{.*#+}} ymm7 = ymm12[1],ymm13[1],ymm12[3],ymm13[3]
; AVX1-NEXT: vperm2f128 {{.*#+}} ymm7 = ymm14[2,3],ymm7[2,3]
; AVX1-NEXT: vshufpd {{.*#+}} ymm12 = ymm7[0],ymm1[0],ymm7[2],ymm1[3]
; AVX1-NEXT: vmovaps 32(%rcx), %xmm14
; AVX1-NEXT: vpermilps {{.*#+}} xmm1 = mem[2,3,2,3]
; AVX1-NEXT: vbroadcastsd 40(%r8), %ymm7
; AVX1-NEXT: vblendps {{.*#+}} ymm1 = ymm1[0,1,2,3],ymm7[4,5],ymm1[6,7]
; AVX1-NEXT: vinsertf128 $1, 32(%r9), %ymm14, %ymm7
; AVX1-NEXT: vblendps {{.*#+}} ymm13 = ymm1[0,1],ymm7[2,3],ymm1[4,5],ymm7[6,7]
; AVX1-NEXT: vmovapd (%r9), %ymm1
; AVX1-NEXT: vunpckhpd {{.*#+}} ymm7 = ymm15[1],ymm9[1],ymm15[3],ymm9[3]
; AVX1-NEXT: vperm2f128 {{.*#+}} ymm9 = ymm1[2,3],ymm9[2,3]
; AVX1-NEXT: vperm2f128 {{.*#+}} ymm7 = ymm10[2,3],ymm7[2,3]
; AVX1-NEXT: vshufpd {{.*#+}} ymm7 = ymm7[0],ymm9[0],ymm7[2],ymm9[3]
; AVX1-NEXT: vpermilps {{.*#+}} xmm9 = mem[2,3,2,3]
; AVX1-NEXT: vbroadcastsd 8(%r8), %ymm10
; AVX1-NEXT: vblendps {{.*#+}} ymm9 = ymm9[0,1,2,3],ymm10[4,5],ymm9[6,7]
; AVX1-NEXT: vmovaps (%rcx), %xmm10
; AVX1-NEXT: vinsertf128 $1, (%r9), %ymm10, %ymm15
; AVX1-NEXT: vblendps {{.*#+}} ymm9 = ymm9[0,1],ymm15[2,3],ymm9[4,5],ymm15[6,7]
; AVX1-NEXT: vmovapd 48(%rdx), %xmm5
; AVX1-NEXT: vunpckhpd {{.*#+}} xmm5 = xmm5[1],mem[1]
; AVX1-NEXT: vbroadcastsd 56(%r8), %ymm15
; AVX1-NEXT: vblendpd {{.*#+}} ymm5 = ymm5[0,1],ymm15[2],ymm5[3]
; AVX1-NEXT: vblendpd {{.*#+}} ymm2 = ymm5[0,1,2],ymm2[3]
; AVX1-NEXT: vmovapd 16(%rdx), %xmm5
; AVX1-NEXT: vunpckhpd {{.*#+}} xmm5 = xmm5[1],mem[1]
; AVX1-NEXT: vbroadcastsd 24(%r8), %ymm15
; AVX1-NEXT: vblendpd {{.*#+}} ymm5 = ymm5[0,1],ymm15[2],ymm5[3]
; AVX1-NEXT: vblendpd {{.*#+}} ymm1 = ymm5[0,1,2],ymm1[3]
; AVX1-NEXT: vunpckhpd {{.*#+}} xmm9 = xmm0[1],xmm5[1]
; AVX1-NEXT: vmovlhps {{.*#+}} xmm8 = xmm0[0],xmm5[0]
; AVX1-NEXT: vmovlhps {{.*#+}} xmm5 = xmm7[0],xmm6[0]
; AVX1-NEXT: vmovaps (%r8), %xmm6
; AVX1-NEXT: vunpckhpd {{.*#+}} xmm7 = xmm6[1],xmm1[1]
; AVX1-NEXT: vmovlhps {{.*#+}} xmm4 = xmm6[0],xmm1[0]
; AVX1-NEXT: vmovaps (%rdi), %xmm6
; AVX1-NEXT: vunpckhpd {{.*#+}} xmm1 = xmm6[1],xmm3[1]
; AVX1-NEXT: vmovlhps {{.*#+}} xmm3 = xmm6[0],xmm3[0]
; AVX1-NEXT: vmovaps (%rcx), %xmm6
; AVX1-NEXT: vmovaps (%rdx), %xmm0
; AVX1-NEXT: vunpckhpd {{.*#+}} xmm2 = xmm0[1],xmm6[1]
; AVX1-NEXT: vmovlhps {{.*#+}} xmm0 = xmm0[0],xmm6[0]
; AVX1-NEXT: vmovaps 32(%rdx), %xmm5
; AVX1-NEXT: vmovlhps {{.*#+}} xmm5 = xmm5[0],xmm14[0]
; AVX1-NEXT: vmovlhps {{.*#+}} xmm3 = xmm4[0],xmm3[0]
; AVX1-NEXT: vmovaps (%rdx), %xmm4
; AVX1-NEXT: vmovlhps {{.*#+}} xmm4 = xmm4[0],xmm10[0]
; AVX1-NEXT: movq {{[0-9]+}}(%rsp), %rax
; AVX1-NEXT: vmovaps %xmm4, 16(%rax)
; AVX1-NEXT: vmovaps %xmm0, 16(%rax)
; AVX1-NEXT: vmovaps %xmm3, (%rax)
; AVX1-NEXT: vmovaps %xmm1, 48(%rax)
; AVX1-NEXT: vmovaps %xmm4, 32(%rax)
; AVX1-NEXT: vmovaps %xmm7, 80(%rax)
; AVX1-NEXT: vmovaps %xmm2, 64(%rax)
; AVX1-NEXT: vmovaps %xmm5, 208(%rax)
; AVX1-NEXT: vmovaps %xmm0, 192(%rax)
; AVX1-NEXT: vmovaps %ymm9, 64(%rax)
; AVX1-NEXT: vmovapd %ymm7, 128(%rax)
; AVX1-NEXT: vmovaps %ymm13, 256(%rax)
; AVX1-NEXT: vmovapd %ymm12, 320(%rax)
; AVX1-NEXT: vmovapd %ymm11, 32(%rax)
; AVX1-NEXT: vmovaps %ymm8, 96(%rax)
; AVX1-NEXT: vmovapd %ymm1, 160(%rax)
; AVX1-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX1-NEXT: vmovaps %ymm0, 224(%rax)
; AVX1-NEXT: vmovaps %xmm8, 192(%rax)
; AVX1-NEXT: vmovaps %xmm9, 240(%rax)
; AVX1-NEXT: vmovaps %xmm10, 224(%rax)
; AVX1-NEXT: vmovaps %xmm15, 272(%rax)
; AVX1-NEXT: vmovaps %xmm14, 256(%rax)
; AVX1-NEXT: vmovaps %ymm13, 96(%rax)
; AVX1-NEXT: vmovaps %ymm12, 128(%rax)
; AVX1-NEXT: vmovaps %ymm11, 160(%rax)
; AVX1-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX1-NEXT: vmovaps %ymm0, 288(%rax)
; AVX1-NEXT: vmovapd %ymm2, 352(%rax)
; AVX1-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX1-NEXT: vmovaps %ymm0, 320(%rax)
; AVX1-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX1-NEXT: vmovaps %ymm0, 352(%rax)
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: retq
;
; AVX2-LABEL: store_i64_stride6_vf8:
; AVX2: # %bb.0:
; AVX2-NEXT: pushq %rax
; AVX2-NEXT: vmovaps 32(%rdx), %ymm7
; AVX2-NEXT: vmovaps (%r8), %ymm11
; AVX2-NEXT: vmovaps 32(%r8), %ymm13
; AVX2-NEXT: vmovaps (%r9), %xmm8
; AVX2-NEXT: vmovaps 32(%r9), %xmm0
; AVX2-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm1
; AVX2-NEXT: vmovaps (%rcx), %xmm5
; AVX2-NEXT: vmovaps %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX2-NEXT: vmovaps 32(%rcx), %xmm15
; AVX2-NEXT: vmovaps (%rdx), %xmm3
; AVX2-NEXT: vmovaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX2-NEXT: vmovaps 32(%rdx), %xmm12
; AVX2-NEXT: vunpckhpd {{.*#+}} xmm2 = xmm12[1],xmm15[1]
; AVX2-NEXT: vbroadcastsd 40(%r8), %ymm6
; AVX2-NEXT: vblendps {{.*#+}} ymm2 = ymm2[0,1,2,3],ymm6[4,5],ymm2[6,7]
; AVX2-NEXT: vblendps {{.*#+}} ymm1 = ymm2[0,1,2,3,4,5],ymm1[6,7]
; AVX2-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-NEXT: vmovddup {{.*#+}} xmm9 = xmm0[0,0]
; AVX2-NEXT: vmovaps (%rsi), %xmm4
; AVX2-NEXT: vmovaps 32(%rsi), %xmm1
; AVX2-NEXT: vmovaps (%rdi), %xmm6
; AVX2-NEXT: vmovaps 32(%rdi), %xmm2
; AVX2-NEXT: vunpckhpd {{.*#+}} xmm10 = xmm2[1],xmm1[1]
; AVX2-NEXT: vperm2f128 {{.*#+}} ymm10 = ymm13[0,1],ymm10[0,1]
; AVX2-NEXT: vblendps {{.*#+}} ymm0 = ymm10[0,1],ymm9[2,3],ymm10[4,5,6,7]
; AVX2-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-NEXT: vunpckhpd {{.*#+}} xmm9 = xmm3[1],xmm5[1]
; AVX2-NEXT: vbroadcastsd 8(%r8), %ymm14
; AVX2-NEXT: vblendps {{.*#+}} ymm9 = ymm9[0,1,2,3],ymm14[4,5],ymm9[6,7]
; AVX2-NEXT: vinsertf128 $1, %xmm8, %ymm0, %ymm14
; AVX2-NEXT: vblendps {{.*#+}} ymm0 = ymm9[0,1,2,3,4,5],ymm14[6,7]
; AVX2-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-NEXT: vmovddup {{.*#+}} xmm3 = xmm8[0,0]
; AVX2-NEXT: vunpckhpd {{.*#+}} xmm14 = xmm6[1],xmm4[1]
; AVX2-NEXT: vperm2f128 {{.*#+}} ymm14 = ymm11[0,1],ymm14[0,1]
; AVX2-NEXT: vblendps {{.*#+}} ymm14 = ymm14[0,1],ymm3[2,3],ymm14[4,5,6,7]
; AVX2-NEXT: vunpckhpd {{.*#+}} ymm3 = ymm7[1],mem[1],ymm7[3],mem[3]
; AVX2-NEXT: vbroadcastsd 56(%r8), %ymm8
; AVX2-NEXT: vperm2f128 {{.*#+}} ymm5 = ymm3[2,3],ymm8[2,3]
; AVX2-NEXT: vmovaps 32(%rdi), %ymm8
; AVX2-NEXT: vmovaps 32(%rsi), %ymm0
; AVX2-NEXT: vunpckhpd {{.*#+}} ymm10 = ymm8[1],ymm0[1],ymm8[3],ymm0[3]
; AVX2-NEXT: vperm2f128 {{.*#+}} ymm10 = ymm13[2,3],ymm10[2,3]
; AVX2-NEXT: vbroadcastsd 48(%r9), %ymm13
; AVX2-NEXT: vblendps {{.*#+}} ymm10 = ymm10[0,1],ymm13[2,3],ymm10[4,5,6,7]
; AVX2-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm8[0],ymm0[0],ymm8[2],ymm0[2]
; AVX2-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],ymm7[2,3]
; AVX2-NEXT: vbroadcastsd 48(%rcx), %ymm7
; AVX2-NEXT: vblendps {{.*#+}} ymm3 = ymm0[0,1,2,3,4,5],ymm7[6,7]
; AVX2-NEXT: vmovaps (%rdi), %ymm5
; AVX2-NEXT: vmovaps 32(%rdi), %ymm2
; AVX2-NEXT: vmovaps (%rsi), %ymm6
; AVX2-NEXT: vmovaps 32(%rsi), %ymm3
; AVX2-NEXT: vmovaps (%rdx), %ymm7
; AVX2-NEXT: vunpckhpd {{.*#+}} ymm8 = ymm7[1],mem[1],ymm7[3],mem[3]
; AVX2-NEXT: vbroadcastsd 24(%r8), %ymm13
; AVX2-NEXT: vperm2f128 {{.*#+}} ymm8 = ymm8[2,3],ymm13[2,3]
; AVX2-NEXT: vmovaps (%rdi), %ymm13
; AVX2-NEXT: vmovaps (%rsi), %ymm0
; AVX2-NEXT: vunpckhpd {{.*#+}} ymm9 = ymm13[1],ymm0[1],ymm13[3],ymm0[3]
; AVX2-NEXT: vperm2f128 {{.*#+}} ymm9 = ymm11[2,3],ymm9[2,3]
; AVX2-NEXT: vbroadcastsd 16(%r9), %ymm11
; AVX2-NEXT: vblendps {{.*#+}} ymm9 = ymm9[0,1],ymm11[2,3],ymm9[4,5,6,7]
; AVX2-NEXT: vunpcklpd {{.*#+}} ymm0 = ymm13[0],ymm0[0],ymm13[2],ymm0[2]
; AVX2-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm0[2,3],ymm7[2,3]
; AVX2-NEXT: vbroadcastsd 16(%rcx), %ymm7
; AVX2-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3,4,5],ymm7[6,7]
; AVX2-NEXT: vmovlhps {{.*#+}} xmm1 = xmm2[0],xmm1[0]
; AVX2-NEXT: vmovlhps {{.*#+}} xmm2 = xmm12[0],xmm15[0]
; AVX2-NEXT: vmovlhps {{.*#+}} xmm4 = xmm6[0],xmm4[0]
; AVX2-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload
; AVX2-NEXT: vunpcklpd {{[-0-9]+}}(%r{{[sb]}}p), %xmm6, %xmm6 # 16-byte Folded Reload
; AVX2-NEXT: # xmm6 = xmm6[0],mem[0]
; AVX2-NEXT: vmovaps 32(%rdx), %ymm4
; AVX2-NEXT: vmovaps (%rcx), %ymm8
; AVX2-NEXT: vmovaps 32(%rcx), %ymm9
; AVX2-NEXT: vmovaps (%r8), %ymm10
; AVX2-NEXT: vmovaps 32(%r8), %ymm1
; AVX2-NEXT: vmovaps (%r9), %ymm11
; AVX2-NEXT: vmovaps 32(%r9), %ymm12
; AVX2-NEXT: vunpckhpd {{.*#+}} ymm0 = ymm1[1],ymm12[1],ymm1[3],ymm12[3]
; AVX2-NEXT: vunpckhpd {{.*#+}} ymm13 = ymm4[1],ymm9[1],ymm4[3],ymm9[3]
; AVX2-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm13[2,3],ymm0[2,3]
; AVX2-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-NEXT: vunpckhpd {{.*#+}} ymm13 = ymm2[1],ymm3[1],ymm2[3],ymm3[3]
; AVX2-NEXT: vunpcklpd {{.*#+}} ymm1 = ymm1[0],ymm12[0],ymm1[2],ymm12[2]
; AVX2-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm1[2,3],ymm13[2,3]
; AVX2-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-NEXT: vunpcklpd {{.*#+}} ymm4 = ymm4[0],ymm9[0],ymm4[2],ymm9[2]
; AVX2-NEXT: vunpcklpd {{.*#+}} ymm2 = ymm2[0],ymm3[0],ymm2[2],ymm3[2]
; AVX2-NEXT: vperm2f128 {{.*#+}} ymm0 = ymm2[2,3],ymm4[2,3]
; AVX2-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX2-NEXT: vunpckhpd {{.*#+}} ymm3 = ymm10[1],ymm11[1],ymm10[3],ymm11[3]
; AVX2-NEXT: vunpckhpd {{.*#+}} ymm4 = ymm7[1],ymm8[1],ymm7[3],ymm8[3]
; AVX2-NEXT: vperm2f128 {{.*#+}} ymm15 = ymm4[2,3],ymm3[2,3]
; AVX2-NEXT: vunpckhpd {{.*#+}} ymm4 = ymm5[1],ymm6[1],ymm5[3],ymm6[3]
; AVX2-NEXT: vunpcklpd {{.*#+}} ymm9 = ymm10[0],ymm11[0],ymm10[2],ymm11[2]
; AVX2-NEXT: vperm2f128 {{.*#+}} ymm14 = ymm9[2,3],ymm4[2,3]
; AVX2-NEXT: vunpcklpd {{.*#+}} ymm7 = ymm7[0],ymm8[0],ymm7[2],ymm8[2]
; AVX2-NEXT: vunpcklpd {{.*#+}} ymm5 = ymm5[0],ymm6[0],ymm5[2],ymm6[2]
; AVX2-NEXT: vperm2f128 {{.*#+}} ymm13 = ymm5[2,3],ymm7[2,3]
; AVX2-NEXT: vmovaps 32(%rcx), %xmm6
; AVX2-NEXT: vmovaps 32(%rdx), %xmm7
; AVX2-NEXT: vunpckhpd {{.*#+}} xmm12 = xmm7[1],xmm6[1]
; AVX2-NEXT: vmovaps (%r9), %xmm1
; AVX2-NEXT: vmovaps 32(%r9), %xmm2
; AVX2-NEXT: vmovaps (%r8), %xmm3
; AVX2-NEXT: vmovaps 32(%r8), %xmm4
; AVX2-NEXT: vunpckhpd {{.*#+}} xmm11 = xmm4[1],xmm2[1]
; AVX2-NEXT: vmovlhps {{.*#+}} xmm10 = xmm4[0],xmm2[0]
; AVX2-NEXT: vmovaps 32(%rsi), %xmm4
; AVX2-NEXT: vmovaps 32(%rdi), %xmm0
; AVX2-NEXT: vunpckhpd {{.*#+}} xmm9 = xmm0[1],xmm4[1]
; AVX2-NEXT: vmovlhps {{.*#+}} xmm8 = xmm0[0],xmm4[0]
; AVX2-NEXT: vmovaps (%rsi), %xmm4
; AVX2-NEXT: vmovlhps {{.*#+}} xmm6 = xmm7[0],xmm6[0]
; AVX2-NEXT: vunpckhpd {{.*#+}} xmm7 = xmm3[1],xmm1[1]
; AVX2-NEXT: vmovlhps {{.*#+}} xmm5 = xmm3[0],xmm1[0]
; AVX2-NEXT: vmovaps (%rdi), %xmm3
; AVX2-NEXT: vunpckhpd {{.*#+}} xmm1 = xmm3[1],xmm4[1]
; AVX2-NEXT: vmovlhps {{.*#+}} xmm3 = xmm3[0],xmm4[0]
; AVX2-NEXT: vmovaps (%rcx), %xmm4
; AVX2-NEXT: vmovaps (%rdx), %xmm0
; AVX2-NEXT: vunpckhpd {{.*#+}} xmm2 = xmm0[1],xmm4[1]
; AVX2-NEXT: vmovlhps {{.*#+}} xmm0 = xmm0[0],xmm4[0]
; AVX2-NEXT: movq {{[0-9]+}}(%rsp), %rax
; AVX2-NEXT: vblendps {{.*#+}} ymm5 = ymm5[0,1,2,3,4,5],mem[6,7]
; AVX2-NEXT: vblendps {{.*#+}} ymm7 = ymm8[0,1,2,3,4,5],mem[6,7]
; AVX2-NEXT: vmovaps %xmm6, 16(%rax)
; AVX2-NEXT: vmovaps %xmm4, (%rax)
; AVX2-NEXT: vmovaps %xmm2, 208(%rax)
; AVX2-NEXT: vmovaps %xmm1, 192(%rax)
; AVX2-NEXT: vmovaps %ymm0, 96(%rax)
; AVX2-NEXT: vmovaps %ymm9, 128(%rax)
; AVX2-NEXT: vmovaps %ymm7, 160(%rax)
; AVX2-NEXT: vmovaps %ymm3, 288(%rax)
; AVX2-NEXT: vmovaps %ymm10, 320(%rax)
; AVX2-NEXT: vmovaps %ymm5, 352(%rax)
; AVX2-NEXT: vmovaps %ymm14, 32(%rax)
; AVX2-NEXT: vmovaps %xmm0, 16(%rax)
; AVX2-NEXT: vmovaps %xmm3, (%rax)
; AVX2-NEXT: vmovaps %xmm1, 48(%rax)
; AVX2-NEXT: vmovaps %xmm5, 32(%rax)
; AVX2-NEXT: vmovaps %xmm7, 80(%rax)
; AVX2-NEXT: vmovaps %xmm2, 64(%rax)
; AVX2-NEXT: vmovaps %xmm6, 208(%rax)
; AVX2-NEXT: vmovaps %xmm8, 192(%rax)
; AVX2-NEXT: vmovaps %xmm9, 240(%rax)
; AVX2-NEXT: vmovaps %xmm10, 224(%rax)
; AVX2-NEXT: vmovaps %xmm11, 272(%rax)
; AVX2-NEXT: vmovaps %xmm12, 256(%rax)
; AVX2-NEXT: vmovaps %ymm13, 96(%rax)
; AVX2-NEXT: vmovaps %ymm14, 128(%rax)
; AVX2-NEXT: vmovaps %ymm15, 160(%rax)
; AVX2-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-NEXT: vmovaps %ymm0, 64(%rax)
; AVX2-NEXT: vmovaps %ymm0, 288(%rax)
; AVX2-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-NEXT: vmovaps %ymm0, 224(%rax)
; AVX2-NEXT: vmovaps %ymm0, 320(%rax)
; AVX2-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload
; AVX2-NEXT: vmovaps %ymm0, 256(%rax)
; AVX2-NEXT: popq %rax
; AVX2-NEXT: vmovaps %ymm0, 352(%rax)
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
; AVX512-LABEL: store_i64_stride6_vf8:
; AVX512: # %bb.0:
; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %r10
; AVX512-NEXT: vmovdqu64 (%rdi), %zmm4
; AVX512-NEXT: vmovdqu64 (%rdi), %zmm5
; AVX512-NEXT: vmovdqu64 (%rsi), %zmm6
; AVX512-NEXT: vmovdqu64 (%rdx), %zmm2
; AVX512-NEXT: vmovdqu64 (%rcx), %zmm3
; AVX512-NEXT: vmovdqu64 (%r8), %zmm10
; AVX512-NEXT: vbroadcasti64x4 {{.*#+}} zmm0 = [4,12,5,13,4,12,5,13]
; AVX512-NEXT: vmovdqu64 (%rdx), %zmm3
; AVX512-NEXT: vmovdqu64 (%rcx), %zmm4
; AVX512-NEXT: vmovdqu64 (%r8), %zmm8
; AVX512-NEXT: vmovdqu64 (%r9), %zmm2
; AVX512-NEXT: vbroadcasti64x4 {{.*#+}} zmm0 = [1,9,2,10,1,9,2,10]
; AVX512-NEXT: # zmm0 = mem[0,1,2,3,0,1,2,3]
; AVX512-NEXT: vpermi2q %zmm6, %zmm4, %zmm0
; AVX512-NEXT: vmovdqa {{.*#+}} ymm5 = <u,u,4,12>
; AVX512-NEXT: vpermi2q %zmm3, %zmm2, %zmm5
; AVX512-NEXT: vpermi2q %zmm4, %zmm3, %zmm0
; AVX512-NEXT: vmovdqa (%r8), %xmm7
; AVX512-NEXT: vpunpckhqdq {{.*#+}} xmm7 = xmm7[1],mem[1]
; AVX512-NEXT: vinserti128 $1, %xmm7, %ymm0, %ymm7
; AVX512-NEXT: movb $12, %al
; AVX512-NEXT: kmovd %eax, %k1
; AVX512-NEXT: vmovdqa64 %zmm5, %zmm0 {%k1}
; AVX512-NEXT: movb $16, %al
; AVX512-NEXT: kmovd %eax, %k2
; AVX512-NEXT: vmovdqa64 %zmm10, %zmm0 {%k2}
; AVX512-NEXT: vmovdqu64 (%r9), %zmm5
; AVX512-NEXT: vinserti64x4 $0, %ymm7, %zmm0, %zmm0 {%k1}
; AVX512-NEXT: vbroadcasti32x4 {{.*#+}} zmm7 = [2,10,2,10,2,10,2,10]
; AVX512-NEXT: # zmm7 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
; AVX512-NEXT: vpermi2q %zmm6, %zmm4, %zmm7
; AVX512-NEXT: vbroadcasti64x4 {{.*#+}} zmm8 = [1,9,2,10,1,9,2,10]
; AVX512-NEXT: # zmm8 = mem[0,1,2,3,0,1,2,3]
; AVX512-NEXT: vpermi2q %zmm3, %zmm2, %zmm8
; AVX512-NEXT: vpermi2q %zmm6, %zmm5, %zmm7
; AVX512-NEXT: movb $48, %al
; AVX512-NEXT: kmovd %eax, %k2
; AVX512-NEXT: vmovdqa64 %zmm7, %zmm8 {%k2}
; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm7 = <0,1,9,u,4,5,6,7>
; AVX512-NEXT: vpermi2q %zmm10, %zmm8, %zmm7
; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm8 = [0,1,2,9,4,5,6,7]
; AVX512-NEXT: vpermi2q %zmm5, %zmm7, %zmm8
; AVX512-NEXT: vbroadcasti32x4 {{.*#+}} zmm7 = [6,14,6,14,6,14,6,14]
; AVX512-NEXT: # zmm7 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
; AVX512-NEXT: vpermi2q %zmm6, %zmm4, %zmm7
; AVX512-NEXT: vbroadcasti64x4 {{.*#+}} zmm9 = [5,13,6,14,5,13,6,14]
; AVX512-NEXT: # zmm9 = mem[0,1,2,3,0,1,2,3]
; AVX512-NEXT: vpermi2q %zmm3, %zmm2, %zmm9
; AVX512-NEXT: vmovdqa64 %zmm7, %zmm9 {%k2}
; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm7 = <0,1,13,u,4,5,6,7>
; AVX512-NEXT: vpermi2q %zmm10, %zmm9, %zmm7
; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm9 = [0,1,2,13,4,5,6,7]
; AVX512-NEXT: vpermi2q %zmm5, %zmm7, %zmm9
; AVX512-NEXT: vmovdqa64 %zmm7, %zmm0 {%k2}
; AVX512-NEXT: vbroadcasti64x4 {{.*#+}} zmm7 = [0,8,1,9,0,8,1,9]
; AVX512-NEXT: # zmm7 = mem[0,1,2,3,0,1,2,3]
; AVX512-NEXT: vpermi2q %zmm6, %zmm4, %zmm7
; AVX512-NEXT: vpermi2q %zmm6, %zmm5, %zmm7
; AVX512-NEXT: vmovdqa (%rdx), %xmm1
; AVX512-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm1[0],mem[0]
; AVX512-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm1
; AVX512-NEXT: vinserti64x4 $0, %ymm1, %zmm0, %zmm7 {%k1}
; AVX512-NEXT: vinserti32x4 $2, (%r8), %zmm7, %zmm1
; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm7 = [0,1,2,3,4,8,6,7]
; AVX512-NEXT: vpermi2q %zmm5, %zmm1, %zmm7
; AVX512-NEXT: vbroadcasti32x4 {{.*#+}} zmm1 = [7,15,7,15,7,15,7,15]
; AVX512-NEXT: vbroadcasti32x4 {{.*#+}} zmm1 = [0,8,0,8,0,8,0,8]
; AVX512-NEXT: # zmm1 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
; AVX512-NEXT: vpermi2q %zmm3, %zmm2, %zmm1
; AVX512-NEXT: vpermi2q %zmm2, %zmm8, %zmm1
; AVX512-NEXT: vmovdqa64 %zmm1, %zmm7 {%k2}
; AVX512-NEXT: vbroadcasti64x4 {{.*#+}} zmm1 = [2,10,3,11,2,10,3,11]
; AVX512-NEXT: # zmm1 = mem[0,1,2,3,0,1,2,3]
; AVX512-NEXT: vpermi2q %zmm2, %zmm8, %zmm1
; AVX512-NEXT: vmovdqa (%rdi), %ymm9
; AVX512-NEXT: vpunpckhqdq {{.*#+}} ymm9 = ymm9[1],mem[1],ymm9[3],mem[3]
; AVX512-NEXT: vinserti64x4 $0, %ymm9, %zmm0, %zmm1 {%k1}
; AVX512-NEXT: vbroadcasti32x4 {{.*#+}} zmm9 = [3,11,3,11,3,11,3,11]
; AVX512-NEXT: # zmm9 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
; AVX512-NEXT: vpermi2q %zmm4, %zmm3, %zmm9
; AVX512-NEXT: vmovdqa64 %zmm9, %zmm1 {%k2}
; AVX512-NEXT: vbroadcasti32x4 {{.*#+}} zmm9 = [7,15,7,15,7,15,7,15]
; AVX512-NEXT: # zmm9 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
; AVX512-NEXT: vpermi2q %zmm4, %zmm3, %zmm9
; AVX512-NEXT: vbroadcasti64x4 {{.*#+}} zmm10 = [6,14,7,15,6,14,7,15]
; AVX512-NEXT: # zmm10 = mem[0,1,2,3,0,1,2,3]
; AVX512-NEXT: vpermi2q %zmm2, %zmm8, %zmm10
; AVX512-NEXT: vmovdqa {{.*#+}} ymm11 = <u,u,7,15>
; AVX512-NEXT: vpermi2q %zmm6, %zmm4, %zmm11
; AVX512-NEXT: vshufi64x2 {{.*#+}} zmm1 = zmm11[0,1,2,3],zmm1[4,5,6,7]
; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm4 = <14,u,2,3,4,5,15,u>
; AVX512-NEXT: vpermi2q %zmm10, %zmm1, %zmm4
; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm1 = [0,14,2,3,4,5,6,15]
; AVX512-NEXT: vpermi2q %zmm5, %zmm4, %zmm1
; AVX512-NEXT: vbroadcasti32x4 {{.*#+}} zmm4 = [3,11,3,11,3,11,3,11]
; AVX512-NEXT: # zmm4 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
; AVX512-NEXT: vpermi2q %zmm3, %zmm2, %zmm4
; AVX512-NEXT: vmovdqa (%rdi), %ymm2
; AVX512-NEXT: vpunpckhqdq {{.*#+}} ymm2 = ymm2[1],mem[1],ymm2[3],mem[3]
; AVX512-NEXT: vinserti64x4 $0, %ymm2, %zmm4, %zmm2
; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm3 = <10,u,2,3,4,5,11,u>
; AVX512-NEXT: vpermi2q %zmm10, %zmm2, %zmm3
; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm2 = [0,10,2,3,4,5,6,11]
; AVX512-NEXT: vpermi2q %zmm5, %zmm3, %zmm2
; AVX512-NEXT: vmovdqa64 {{.*#+}} zmm3 = [0,1,2,3,4,12,6,7]
; AVX512-NEXT: vpermi2q %zmm5, %zmm0, %zmm3
; AVX512-NEXT: vmovdqu64 %zmm3, 192(%r10)
; AVX512-NEXT: vmovdqu64 %zmm2, 128(%r10)
; AVX512-NEXT: vmovdqu64 %zmm1, 320(%r10)
; AVX512-NEXT: vmovdqu64 %zmm9, 256(%r10)
; AVX512-NEXT: vmovdqu64 %zmm8, 64(%r10)
; AVX512-NEXT: vpermi2q %zmm6, %zmm5, %zmm11
; AVX512-NEXT: vmovdqa64 %zmm11, %zmm10 {%k1}
; AVX512-NEXT: vmovdqa64 %zmm9, %zmm10 {%k2}
; AVX512-NEXT: vbroadcasti32x4 {{.*#+}} zmm9 = [6,14,6,14,6,14,6,14]
; AVX512-NEXT: # zmm9 = mem[0,1,2,3,0,1,2,3,0,1,2,3,0,1,2,3]
; AVX512-NEXT: vpermi2q %zmm6, %zmm5, %zmm9
; AVX512-NEXT: vbroadcasti64x4 {{.*#+}} zmm11 = [5,13,6,14,5,13,6,14]
; AVX512-NEXT: # zmm11 = mem[0,1,2,3,0,1,2,3]
; AVX512-NEXT: vpermi2q %zmm4, %zmm3, %zmm11
; AVX512-NEXT: vmovdqa {{.*#+}} ymm12 = <u,u,5,13>
; AVX512-NEXT: vpermi2q %zmm2, %zmm8, %zmm12
; AVX512-NEXT: vmovdqa64 %zmm12, %zmm11 {%k1}
; AVX512-NEXT: vmovdqa64 %zmm9, %zmm11 {%k2}
; AVX512-NEXT: vbroadcasti64x4 {{.*#+}} zmm9 = [4,12,5,13,4,12,5,13]
; AVX512-NEXT: # zmm9 = mem[0,1,2,3,0,1,2,3]
; AVX512-NEXT: vpermi2q %zmm6, %zmm5, %zmm9
; AVX512-NEXT: vmovdqa {{.*#+}} ymm5 = <u,u,4,12>
; AVX512-NEXT: vpermi2q %zmm4, %zmm3, %zmm5
; AVX512-NEXT: vmovdqa64 %zmm5, %zmm9 {%k1}
; AVX512-NEXT: vpunpcklqdq {{.*#+}} zmm9 {%k2} = zmm8[0],zmm2[0],zmm8[2],zmm2[2],zmm8[4],zmm2[4],zmm8[6],zmm2[6]
; AVX512-NEXT: vmovdqu64 %zmm11, 256(%r10)
; AVX512-NEXT: vmovdqu64 %zmm10, 320(%r10)
; AVX512-NEXT: vmovdqu64 %zmm1, 128(%r10)
; AVX512-NEXT: vmovdqu64 %zmm9, 192(%r10)
; AVX512-NEXT: vmovdqu64 %zmm7, (%r10)
; AVX512-NEXT: vmovdqu64 %zmm0, 64(%r10)
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
%in.vec0 = load <8 x i64>, <8 x i64>* %in.vecptr0, align 32

View File

@ -227,64 +227,64 @@ define void @store_i8_stride3_vf8(<8 x i8>* %in.vecptr0, <8 x i8>* %in.vecptr1,
define void @store_i8_stride3_vf16(<16 x i8>* %in.vecptr0, <16 x i8>* %in.vecptr1, <16 x i8>* %in.vecptr2, <48 x i8>* %out.vec) nounwind {
; SSE-LABEL: store_i8_stride3_vf16:
; SSE: # %bb.0:
; SSE-NEXT: movdqa (%rdi), %xmm2
; SSE-NEXT: movdqa (%rsi), %xmm4
; SSE-NEXT: movdqa (%rdx), %xmm1
; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm2[3,3,3,3,4,5,6,7]
; SSE-NEXT: pshufhw {{.*#+}} xmm3 = xmm0[0,1,2,3,4,4,6,5]
; SSE-NEXT: movdqa {{.*#+}} xmm0 = [255,255,0,255,255,0,255,255,0,255,255,0,255,255,0,255]
; SSE-NEXT: movdqa %xmm0, %xmm5
; SSE-NEXT: pandn %xmm3, %xmm5
; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm4[2,1,2,3]
; SSE-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm3[2,3,0,1]
; SSE-NEXT: pshuflw {{.*#+}} xmm3 = xmm3[1,2,2,3,4,5,6,7]
; SSE-NEXT: pshufhw {{.*#+}} xmm6 = xmm3[0,1,2,3,4,5,5,6]
; SSE-NEXT: movdqa (%rdi), %xmm5
; SSE-NEXT: movdqa (%rsi), %xmm1
; SSE-NEXT: movdqa (%rdx), %xmm8
; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm8[2,1,3,3,4,5,6,7]
; SSE-NEXT: pshufhw {{.*#+}} xmm2 = xmm0[0,1,2,3,4,4,4,4]
; SSE-NEXT: movdqa {{.*#+}} xmm0 = [255,0,255,255,0,255,255,0,255,255,0,255,255,0,255,255]
; SSE-NEXT: movdqa %xmm0, %xmm4
; SSE-NEXT: pandn %xmm2, %xmm4
; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm1[2,1,2,3]
; SSE-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[2,3,0,1]
; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[1,2,2,3,4,5,6,7]
; SSE-NEXT: pshufhw {{.*#+}} xmm6 = xmm2[0,1,2,3,4,5,5,6]
; SSE-NEXT: pand %xmm0, %xmm6
; SSE-NEXT: por %xmm5, %xmm6
; SSE-NEXT: movdqa {{.*#+}} xmm5 = [255,0,255,255,0,255,255,0,255,255,0,255,255,0,255,255]
; SSE-NEXT: pand %xmm5, %xmm6
; SSE-NEXT: pshuflw {{.*#+}} xmm3 = xmm1[2,1,3,3,4,5,6,7]
; SSE-NEXT: pshufhw {{.*#+}} xmm7 = xmm3[0,1,2,3,4,4,4,4]
; SSE-NEXT: movdqa %xmm5, %xmm3
; SSE-NEXT: pandn %xmm7, %xmm3
; SSE-NEXT: por %xmm6, %xmm3
; SSE-NEXT: movdqa %xmm4, %xmm6
; SSE-NEXT: punpcklbw {{.*#+}} xmm6 = xmm6[0],xmm4[0],xmm6[1],xmm4[1],xmm6[2],xmm4[2],xmm6[3],xmm4[3],xmm6[4],xmm4[4],xmm6[5],xmm4[5],xmm6[6],xmm4[6],xmm6[7],xmm4[7]
; SSE-NEXT: por %xmm4, %xmm6
; SSE-NEXT: movdqa {{.*#+}} xmm2 = [255,255,0,255,255,0,255,255,0,255,255,0,255,255,0,255]
; SSE-NEXT: pand %xmm2, %xmm6
; SSE-NEXT: pshuflw {{.*#+}} xmm4 = xmm5[3,3,3,3,4,5,6,7]
; SSE-NEXT: pshufhw {{.*#+}} xmm7 = xmm4[0,1,2,3,4,4,6,5]
; SSE-NEXT: movdqa %xmm2, %xmm4
; SSE-NEXT: pandn %xmm7, %xmm4
; SSE-NEXT: por %xmm6, %xmm4
; SSE-NEXT: movdqa %xmm1, %xmm6
; SSE-NEXT: punpcklbw {{.*#+}} xmm6 = xmm6[0],xmm1[0],xmm6[1],xmm1[1],xmm6[2],xmm1[2],xmm6[3],xmm1[3],xmm6[4],xmm1[4],xmm6[5],xmm1[5],xmm6[6],xmm1[6],xmm6[7],xmm1[7]
; SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm6[0,1,1,2]
; SSE-NEXT: pshuflw {{.*#+}} xmm6 = xmm6[0,1,1,2,4,5,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm7 = xmm2[0,1,0,1]
; SSE-NEXT: pshuflw {{.*#+}} xmm7 = xmm7[0,0,2,1,4,5,6,7]
; SSE-NEXT: pshufhw {{.*#+}} xmm7 = xmm7[0,1,2,3,5,5,6,6]
; SSE-NEXT: pand %xmm5, %xmm7
; SSE-NEXT: pandn %xmm6, %xmm5
; SSE-NEXT: por %xmm7, %xmm5
; SSE-NEXT: pand %xmm0, %xmm5
; SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm1[0,1,0,1]
; SSE-NEXT: pshuflw {{.*#+}} xmm6 = xmm6[0,0,0,0,4,5,6,7]
; SSE-NEXT: pshufhw {{.*#+}} xmm6 = xmm6[0,1,2,3,5,5,6,6]
; SSE-NEXT: movdqa %xmm0, %xmm7
; SSE-NEXT: pandn %xmm6, %xmm7
; SSE-NEXT: por %xmm5, %xmm7
; SSE-NEXT: punpckhbw {{.*#+}} xmm4 = xmm4[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm4[1,2,2,3]
; SSE-NEXT: pshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,5,6,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[2,3,2,3]
; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[1,1,2,2,4,5,6,7]
; SSE-NEXT: pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,7,7,7,7]
; SSE-NEXT: pand %xmm0, %xmm2
; SSE-NEXT: pandn %xmm4, %xmm0
; SSE-NEXT: por %xmm2, %xmm0
; SSE-NEXT: movdqa {{.*#+}} xmm2 = [0,255,255,0,255,255,0,255,255,0,255,255,0,255,255,0]
; SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm5[0,1,0,1]
; SSE-NEXT: pshuflw {{.*#+}} xmm6 = xmm6[0,0,2,1,4,5,6,7]
; SSE-NEXT: pshufhw {{.*#+}} xmm6 = xmm6[0,1,2,3,5,5,6,6]
; SSE-NEXT: pand %xmm0, %xmm6
; SSE-NEXT: por %xmm7, %xmm6
; SSE-NEXT: pand %xmm2, %xmm6
; SSE-NEXT: pshufd {{.*#+}} xmm7 = xmm8[0,1,0,1]
; SSE-NEXT: pshuflw {{.*#+}} xmm7 = xmm7[0,0,0,0,4,5,6,7]
; SSE-NEXT: pshufhw {{.*#+}} xmm7 = xmm7[0,1,2,3,5,5,6,6]
; SSE-NEXT: movdqa %xmm2, %xmm3
; SSE-NEXT: pandn %xmm7, %xmm3
; SSE-NEXT: por %xmm6, %xmm3
; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm5[2,3,2,3]
; SSE-NEXT: pshuflw {{.*#+}} xmm5 = xmm5[1,1,2,2,4,5,6,7]
; SSE-NEXT: pshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,7,7,7,7]
; SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm8[2,3,2,3]
; SSE-NEXT: pshuflw {{.*#+}} xmm6 = xmm6[1,1,2,2,4,5,6,7]
; SSE-NEXT: pshufhw {{.*#+}} xmm6 = xmm6[0,1,2,3,6,5,7,7]
; SSE-NEXT: pand %xmm0, %xmm6
; SSE-NEXT: pandn %xmm5, %xmm0
; SSE-NEXT: por %xmm6, %xmm0
; SSE-NEXT: pand %xmm2, %xmm0
; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,3,2,3]
; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[1,1,2,2,4,5,6,7]
; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,6,5,7,7]
; SSE-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,2,2,3]
; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,5,6,6,7]
; SSE-NEXT: pandn %xmm1, %xmm2
; SSE-NEXT: por %xmm0, %xmm2
; SSE-NEXT: movdqa %xmm2, 32(%rcx)
; SSE-NEXT: movdqa %xmm7, (%rcx)
; SSE-NEXT: movdqa %xmm3, 16(%rcx)
; SSE-NEXT: movdqa %xmm3, (%rcx)
; SSE-NEXT: movdqa %xmm4, 16(%rcx)
; SSE-NEXT: retq
;
; AVX-LABEL: store_i8_stride3_vf16:
@ -355,114 +355,114 @@ define void @store_i8_stride3_vf32(<32 x i8>* %in.vecptr0, <32 x i8>* %in.vecptr
; SSE-NEXT: movdqa 16(%rsi), %xmm7
; SSE-NEXT: movdqa (%rdx), %xmm8
; SSE-NEXT: movdqa 16(%rdx), %xmm10
; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm11[3,3,3,3,4,5,6,7]
; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm0[0,1,2,3,4,4,6,5]
; SSE-NEXT: movdqa {{.*#+}} xmm0 = [255,255,0,255,255,0,255,255,0,255,255,0,255,255,0,255]
; SSE-NEXT: movdqa %xmm0, %xmm2
; SSE-NEXT: pandn %xmm1, %xmm2
; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm7[2,1,2,3]
; SSE-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,3,0,1]
; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[1,2,2,3,4,5,6,7]
; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,5,5,6]
; SSE-NEXT: pand %xmm0, %xmm1
; SSE-NEXT: por %xmm2, %xmm1
; SSE-NEXT: movdqa {{.*#+}} xmm5 = [255,0,255,255,0,255,255,0,255,255,0,255,255,0,255,255]
; SSE-NEXT: pand %xmm5, %xmm1
; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm10[2,1,3,3,4,5,6,7]
; SSE-NEXT: pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,4,4,4]
; SSE-NEXT: movdqa %xmm5, %xmm12
; SSE-NEXT: pandn %xmm2, %xmm12
; SSE-NEXT: por %xmm1, %xmm12
; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm9[3,3,3,3,4,5,6,7]
; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,4,6,5]
; SSE-NEXT: movdqa %xmm0, %xmm2
; SSE-NEXT: pandn %xmm1, %xmm2
; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm13[2,1,2,3]
; SSE-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,3,0,1]
; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[1,2,2,3,4,5,6,7]
; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,5,5,6]
; SSE-NEXT: pand %xmm0, %xmm1
; SSE-NEXT: por %xmm2, %xmm1
; SSE-NEXT: pand %xmm5, %xmm1
; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm10[2,1,3,3,4,5,6,7]
; SSE-NEXT: pshufhw {{.*#+}} xmm2 = xmm0[0,1,2,3,4,4,4,4]
; SSE-NEXT: movdqa {{.*#+}} xmm0 = [255,0,255,255,0,255,255,0,255,255,0,255,255,0,255,255]
; SSE-NEXT: movdqa %xmm0, %xmm3
; SSE-NEXT: pandn %xmm2, %xmm3
; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm7[2,1,2,3]
; SSE-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[2,3,0,1]
; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[1,2,2,3,4,5,6,7]
; SSE-NEXT: pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,5,5,6]
; SSE-NEXT: pand %xmm0, %xmm2
; SSE-NEXT: por %xmm3, %xmm2
; SSE-NEXT: movdqa {{.*#+}} xmm4 = [255,255,0,255,255,0,255,255,0,255,255,0,255,255,0,255]
; SSE-NEXT: pand %xmm4, %xmm2
; SSE-NEXT: pshuflw {{.*#+}} xmm3 = xmm11[3,3,3,3,4,5,6,7]
; SSE-NEXT: pshufhw {{.*#+}} xmm6 = xmm3[0,1,2,3,4,4,6,5]
; SSE-NEXT: movdqa %xmm4, %xmm12
; SSE-NEXT: pandn %xmm6, %xmm12
; SSE-NEXT: por %xmm2, %xmm12
; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm8[2,1,3,3,4,5,6,7]
; SSE-NEXT: pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,4,4,4]
; SSE-NEXT: movdqa %xmm5, %xmm6
; SSE-NEXT: movdqa %xmm0, %xmm6
; SSE-NEXT: pandn %xmm2, %xmm6
; SSE-NEXT: por %xmm1, %xmm6
; SSE-NEXT: movdqa %xmm7, %xmm1
; SSE-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm7[8],xmm1[9],xmm7[9],xmm1[10],xmm7[10],xmm1[11],xmm7[11],xmm1[12],xmm7[12],xmm1[13],xmm7[13],xmm1[14],xmm7[14],xmm1[15],xmm7[15]
; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,2,2,3]
; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,5,6,6,7]
; SSE-NEXT: movdqa %xmm0, %xmm2
; SSE-NEXT: pandn %xmm1, %xmm2
; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm13[2,1,2,3]
; SSE-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[2,3,0,1]
; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[1,2,2,3,4,5,6,7]
; SSE-NEXT: pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,5,5,6]
; SSE-NEXT: pand %xmm0, %xmm2
; SSE-NEXT: por %xmm6, %xmm2
; SSE-NEXT: pand %xmm4, %xmm2
; SSE-NEXT: pshuflw {{.*#+}} xmm6 = xmm9[3,3,3,3,4,5,6,7]
; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm6[0,1,2,3,4,4,6,5]
; SSE-NEXT: movdqa %xmm4, %xmm6
; SSE-NEXT: pandn %xmm1, %xmm6
; SSE-NEXT: por %xmm2, %xmm6
; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm11[2,3,2,3]
; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[1,1,2,2,4,5,6,7]
; SSE-NEXT: pshufhw {{.*#+}} xmm3 = xmm1[0,1,2,3,7,7,7,7]
; SSE-NEXT: pand %xmm0, %xmm3
; SSE-NEXT: por %xmm2, %xmm3
; SSE-NEXT: movdqa {{.*#+}} xmm2 = [0,255,255,0,255,255,0,255,255,0,255,255,0,255,255,0]
; SSE-NEXT: pand %xmm2, %xmm3
; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,7,7,7,7]
; SSE-NEXT: movdqa %xmm0, %xmm2
; SSE-NEXT: pandn %xmm1, %xmm2
; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm10[2,3,2,3]
; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[1,1,2,2,4,5,6,7]
; SSE-NEXT: pshufhw {{.*#+}} xmm4 = xmm1[0,1,2,3,6,5,7,7]
; SSE-NEXT: movdqa %xmm2, %xmm1
; SSE-NEXT: pandn %xmm4, %xmm1
; SSE-NEXT: por %xmm3, %xmm1
; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,6,5,7,7]
; SSE-NEXT: pand %xmm0, %xmm1
; SSE-NEXT: por %xmm2, %xmm1
; SSE-NEXT: pand %xmm4, %xmm1
; SSE-NEXT: movdqa %xmm7, %xmm2
; SSE-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm7[8],xmm2[9],xmm7[9],xmm2[10],xmm7[10],xmm2[11],xmm7[11],xmm2[12],xmm7[12],xmm2[13],xmm7[13],xmm2[14],xmm7[14],xmm2[15],xmm7[15]
; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,2,2,3]
; SSE-NEXT: pshufhw {{.*#+}} xmm3 = xmm2[0,1,2,3,5,6,6,7]
; SSE-NEXT: movdqa %xmm4, %xmm2
; SSE-NEXT: pandn %xmm3, %xmm2
; SSE-NEXT: por %xmm1, %xmm2
; SSE-NEXT: punpcklbw {{.*#+}} xmm7 = xmm7[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm7[0,1,1,2]
; SSE-NEXT: pshuflw {{.*#+}} xmm3 = xmm3[0,1,1,2,4,5,6,7]
; SSE-NEXT: movdqa %xmm5, %xmm4
; SSE-NEXT: pandn %xmm3, %xmm4
; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm11[0,1,0,1]
; SSE-NEXT: pshuflw {{.*#+}} xmm3 = xmm3[0,0,2,1,4,5,6,7]
; SSE-NEXT: pshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,5,5,6,6]
; SSE-NEXT: pand %xmm5, %xmm3
; SSE-NEXT: por %xmm4, %xmm3
; SSE-NEXT: pand %xmm0, %xmm3
; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm10[0,1,0,1]
; SSE-NEXT: pshuflw {{.*#+}} xmm4 = xmm4[0,0,0,0,4,5,6,7]
; SSE-NEXT: pshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,5,5,6,6]
; SSE-NEXT: movdqa %xmm0, %xmm7
; SSE-NEXT: pandn %xmm4, %xmm7
; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm7[0,1,1,2]
; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,1,1,2,4,5,6,7]
; SSE-NEXT: movdqa %xmm0, %xmm3
; SSE-NEXT: pandn %xmm1, %xmm3
; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm11[0,1,0,1]
; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,0,2,1,4,5,6,7]
; SSE-NEXT: pshufhw {{.*#+}} xmm7 = xmm1[0,1,2,3,5,5,6,6]
; SSE-NEXT: pand %xmm0, %xmm7
; SSE-NEXT: por %xmm3, %xmm7
; SSE-NEXT: movdqa %xmm13, %xmm3
; SSE-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm13[8],xmm3[9],xmm13[9],xmm3[10],xmm13[10],xmm3[11],xmm13[11],xmm3[12],xmm13[12],xmm3[13],xmm13[13],xmm3[14],xmm13[14],xmm3[15],xmm13[15]
; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm3[1,2,2,3]
; SSE-NEXT: pshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,5,6,6,7]
; SSE-NEXT: movdqa %xmm0, %xmm4
; SSE-NEXT: pandn %xmm3, %xmm4
; SSE-NEXT: pand %xmm4, %xmm7
; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm10[0,1,0,1]
; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,0,0,0,4,5,6,7]
; SSE-NEXT: pshufhw {{.*#+}} xmm3 = xmm1[0,1,2,3,5,5,6,6]
; SSE-NEXT: movdqa %xmm4, %xmm1
; SSE-NEXT: pandn %xmm3, %xmm1
; SSE-NEXT: por %xmm7, %xmm1
; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm9[2,3,2,3]
; SSE-NEXT: pshuflw {{.*#+}} xmm3 = xmm3[1,1,2,2,4,5,6,7]
; SSE-NEXT: pshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,7,7,7,7]
; SSE-NEXT: movdqa %xmm0, %xmm7
; SSE-NEXT: pandn %xmm3, %xmm7
; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm8[2,3,2,3]
; SSE-NEXT: pshuflw {{.*#+}} xmm3 = xmm3[1,1,2,2,4,5,6,7]
; SSE-NEXT: pshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,6,5,7,7]
; SSE-NEXT: pand %xmm0, %xmm3
; SSE-NEXT: por %xmm4, %xmm3
; SSE-NEXT: pand %xmm2, %xmm3
; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm8[2,3,2,3]
; SSE-NEXT: pshuflw {{.*#+}} xmm4 = xmm4[1,1,2,2,4,5,6,7]
; SSE-NEXT: pshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,6,5,7,7]
; SSE-NEXT: pandn %xmm4, %xmm2
; SSE-NEXT: por %xmm3, %xmm2
; SSE-NEXT: por %xmm7, %xmm3
; SSE-NEXT: pand %xmm4, %xmm3
; SSE-NEXT: movdqa %xmm13, %xmm7
; SSE-NEXT: punpckhbw {{.*#+}} xmm7 = xmm7[8],xmm13[8],xmm7[9],xmm13[9],xmm7[10],xmm13[10],xmm7[11],xmm13[11],xmm7[12],xmm13[12],xmm7[13],xmm13[13],xmm7[14],xmm13[14],xmm7[15],xmm13[15]
; SSE-NEXT: pshufd {{.*#+}} xmm7 = xmm7[1,2,2,3]
; SSE-NEXT: pshufhw {{.*#+}} xmm7 = xmm7[0,1,2,3,5,6,6,7]
; SSE-NEXT: movdqa %xmm4, %xmm5
; SSE-NEXT: pandn %xmm7, %xmm5
; SSE-NEXT: por %xmm3, %xmm5
; SSE-NEXT: punpcklbw {{.*#+}} xmm13 = xmm13[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm13[0,1,1,2]
; SSE-NEXT: pshuflw {{.*#+}} xmm3 = xmm3[0,1,1,2,4,5,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm9[0,1,0,1]
; SSE-NEXT: pshuflw {{.*#+}} xmm4 = xmm4[0,0,2,1,4,5,6,7]
; SSE-NEXT: pshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,5,5,6,6]
; SSE-NEXT: pand %xmm5, %xmm4
; SSE-NEXT: pandn %xmm3, %xmm5
; SSE-NEXT: por %xmm4, %xmm5
; SSE-NEXT: pand %xmm0, %xmm5
; SSE-NEXT: pshufd {{.*#+}} xmm7 = xmm9[0,1,0,1]
; SSE-NEXT: pshuflw {{.*#+}} xmm7 = xmm7[0,0,2,1,4,5,6,7]
; SSE-NEXT: pshufhw {{.*#+}} xmm7 = xmm7[0,1,2,3,5,5,6,6]
; SSE-NEXT: pand %xmm0, %xmm7
; SSE-NEXT: pandn %xmm3, %xmm0
; SSE-NEXT: por %xmm7, %xmm0
; SSE-NEXT: pand %xmm4, %xmm0
; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm8[0,1,0,1]
; SSE-NEXT: pshuflw {{.*#+}} xmm3 = xmm3[0,0,0,0,4,5,6,7]
; SSE-NEXT: pshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,5,5,6,6]
; SSE-NEXT: pandn %xmm3, %xmm0
; SSE-NEXT: por %xmm5, %xmm0
; SSE-NEXT: movdqa %xmm0, (%rcx)
; SSE-NEXT: movdqa %xmm2, 32(%rcx)
; SSE-NEXT: movdqa %xmm7, 48(%rcx)
; SSE-NEXT: movdqa %xmm1, 80(%rcx)
; SSE-NEXT: pandn %xmm3, %xmm4
; SSE-NEXT: por %xmm0, %xmm4
; SSE-NEXT: movdqa %xmm4, (%rcx)
; SSE-NEXT: movdqa %xmm5, 32(%rcx)
; SSE-NEXT: movdqa %xmm1, 48(%rcx)
; SSE-NEXT: movdqa %xmm2, 80(%rcx)
; SSE-NEXT: movdqa %xmm6, 16(%rcx)
; SSE-NEXT: movdqa %xmm12, 64(%rcx)
; SSE-NEXT: retq

View File

@ -139,15 +139,41 @@ define void @store_i8_stride4_vf8(<8 x i8>* %in.vecptr0, <8 x i8>* %in.vecptr1,
; SSE: # %bb.0:
; SSE-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
; SSE-NEXT: movq {{.*#+}} xmm1 = mem[0],zero
; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
; SSE-NEXT: movq {{.*#+}} xmm1 = mem[0],zero
; SSE-NEXT: movq {{.*#+}} xmm2 = mem[0],zero
; SSE-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3],xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7]
; SSE-NEXT: movdqa %xmm0, %xmm2
; SSE-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3]
; SSE-NEXT: movq {{.*#+}} xmm3 = mem[0],zero
; SSE-NEXT: movq {{.*#+}} xmm4 = mem[0],zero
; SSE-NEXT: pxor %xmm5, %xmm5
; SSE-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm5[0],xmm4[1],xmm5[1],xmm4[2],xmm5[2],xmm4[3],xmm5[3],xmm4[4],xmm5[4],xmm4[5],xmm5[5],xmm4[6],xmm5[6],xmm4[7],xmm5[7]
; SSE-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm5[0],xmm3[1],xmm5[1],xmm3[2],xmm5[2],xmm3[3],xmm5[3],xmm3[4],xmm5[4],xmm3[5],xmm5[5],xmm3[6],xmm5[6],xmm3[7],xmm5[7]
; SSE-NEXT: movdqa %xmm3, %xmm2
; SSE-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm4[0],xmm2[1],xmm4[1],xmm2[2],xmm4[2],xmm2[3],xmm4[3]
; SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm2[2,2,3,3]
; SSE-NEXT: pshufd {{.*#+}} xmm7 = xmm2[0,0,1,1]
; SSE-NEXT: packuswb %xmm6, %xmm7
; SSE-NEXT: movdqa {{.*#+}} xmm2 = [65535,0,65535,0,65535,0,65535,0]
; SSE-NEXT: movdqa %xmm2, %xmm6
; SSE-NEXT: pandn %xmm7, %xmm6
; SSE-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm5[0],xmm1[1],xmm5[1],xmm1[2],xmm5[2],xmm1[3],xmm5[3],xmm1[4],xmm5[4],xmm1[5],xmm5[5],xmm1[6],xmm5[6],xmm1[7],xmm5[7]
; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm5[0],xmm0[1],xmm5[1],xmm0[2],xmm5[2],xmm0[3],xmm5[3],xmm0[4],xmm5[4],xmm0[5],xmm5[5],xmm0[6],xmm5[6],xmm0[7],xmm5[7]
; SSE-NEXT: movdqa %xmm0, %xmm5
; SSE-NEXT: punpcklwd {{.*#+}} xmm5 = xmm5[0],xmm1[0],xmm5[1],xmm1[1],xmm5[2],xmm1[2],xmm5[3],xmm1[3]
; SSE-NEXT: pshufd {{.*#+}} xmm7 = xmm5[2,1,3,3]
; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm5[0,1,1,3]
; SSE-NEXT: packuswb %xmm7, %xmm5
; SSE-NEXT: pand %xmm2, %xmm5
; SSE-NEXT: por %xmm6, %xmm5
; SSE-NEXT: punpckhwd {{.*#+}} xmm3 = xmm3[4],xmm4[4],xmm3[5],xmm4[5],xmm3[6],xmm4[6],xmm3[7],xmm4[7]
; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm3[2,2,3,3]
; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,0,1,1]
; SSE-NEXT: packuswb %xmm4, %xmm3
; SSE-NEXT: punpckhwd {{.*#+}} xmm0 = xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
; SSE-NEXT: movdqa %xmm0, 16(%r8)
; SSE-NEXT: movdqa %xmm2, (%r8)
; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,1,3,3]
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,1,3]
; SSE-NEXT: packuswb %xmm1, %xmm0
; SSE-NEXT: pand %xmm2, %xmm0
; SSE-NEXT: pandn %xmm3, %xmm2
; SSE-NEXT: por %xmm0, %xmm2
; SSE-NEXT: movdqa %xmm2, 16(%r8)
; SSE-NEXT: movdqa %xmm5, (%r8)
; SSE-NEXT: retq
;
; AVX-LABEL: store_i8_stride4_vf8:

File diff suppressed because it is too large Load Diff

View File

@ -108,12 +108,12 @@ define void @PR46178(i16* %0) {
; X86-NEXT: vmovdqu (%eax), %ymm1
; X86-NEXT: vpmovqw %ymm0, %xmm0
; X86-NEXT: vpmovqw %ymm1, %xmm1
; X86-NEXT: vpsllw $8, %xmm0, %xmm0
; X86-NEXT: vpsraw $8, %xmm0, %xmm0
; X86-NEXT: vpsllw $8, %xmm1, %xmm1
; X86-NEXT: vpsraw $8, %xmm1, %xmm1
; X86-NEXT: vpunpcklqdq {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[2],ymm1[2]
; X86-NEXT: vmovdqu %ymm0, (%eax)
; X86-NEXT: vpsllw $8, %xmm0, %xmm0
; X86-NEXT: vpsraw $8, %xmm0, %xmm0
; X86-NEXT: vshufpd {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[2],ymm1[3]
; X86-NEXT: vmovupd %ymm0, (%eax)
; X86-NEXT: vzeroupper
; X86-NEXT: retl
;
@ -126,9 +126,8 @@ define void @PR46178(i16* %0) {
; X64-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
; X64-NEXT: vpsllw $8, %ymm0, %ymm0
; X64-NEXT: vpsraw $8, %ymm0, %ymm0
; X64-NEXT: vpxor %xmm1, %xmm1, %xmm1
; X64-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5,6,7]
; X64-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,1,1]
; X64-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
; X64-NEXT: vmovdqa %xmm0, %xmm0
; X64-NEXT: vmovdqu %ymm0, (%rdi)
; X64-NEXT: vzeroupper
; X64-NEXT: retq

View File

@ -3219,74 +3219,106 @@ define void @PR43024() {
define void @PR45604(<32 x i16>* %dst, <8 x i16>* %src) {
; SSE2-LABEL: PR45604:
; SSE2: # %bb.0:
; SSE2-NEXT: movdqa (%rsi), %xmm0
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,0,0]
; SSE2-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,5,5,5,5]
; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [0,65535,65535,65535,0,65535,65535,65535]
; SSE2-NEXT: movdqa %xmm2, %xmm3
; SSE2-NEXT: pandn %xmm1, %xmm3
; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [0,0,0,0,11,0,0,0,0,0,0,0,11,0,0,0]
; SSE2-NEXT: por %xmm1, %xmm3
; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm0[1,1,1,1]
; SSE2-NEXT: pshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,5,5,5,5]
; SSE2-NEXT: movdqa %xmm2, %xmm5
; SSE2-NEXT: pandn %xmm4, %xmm5
; SSE2-NEXT: por %xmm1, %xmm5
; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm0[2,2,2,2]
; SSE2-NEXT: pshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,5,5,5,5]
; SSE2-NEXT: movdqa %xmm2, %xmm6
; SSE2-NEXT: pandn %xmm4, %xmm6
; SSE2-NEXT: por %xmm1, %xmm6
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[3,3,3,3]
; SSE2-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,5,5,5,5]
; SSE2-NEXT: pandn %xmm0, %xmm2
; SSE2-NEXT: por %xmm1, %xmm2
; SSE2-NEXT: movdqa %xmm2, 48(%rdi)
; SSE2-NEXT: movdqa %xmm6, 32(%rdi)
; SSE2-NEXT: movdqa %xmm5, 16(%rdi)
; SSE2-NEXT: movdqa %xmm3, (%rdi)
; SSE2-NEXT: movdqa (%rsi), %xmm1
; SSE2-NEXT: movd %xmm1, %eax
; SSE2-NEXT: movzwl %ax, %eax
; SSE2-NEXT: movd %eax, %xmm0
; SSE2-NEXT: movl $11, %eax
; SSE2-NEXT: pinsrw $2, %eax, %xmm0
; SSE2-NEXT: pextrw $1, %xmm1, %ecx
; SSE2-NEXT: pinsrw $4, %ecx, %xmm0
; SSE2-NEXT: pinsrw $6, %eax, %xmm0
; SSE2-NEXT: pextrw $2, %xmm1, %ecx
; SSE2-NEXT: movd %ecx, %xmm2
; SSE2-NEXT: pinsrw $2, %eax, %xmm2
; SSE2-NEXT: pextrw $3, %xmm1, %ecx
; SSE2-NEXT: pinsrw $4, %ecx, %xmm2
; SSE2-NEXT: pinsrw $6, %eax, %xmm2
; SSE2-NEXT: pextrw $4, %xmm1, %ecx
; SSE2-NEXT: movd %ecx, %xmm3
; SSE2-NEXT: pinsrw $2, %eax, %xmm3
; SSE2-NEXT: pextrw $5, %xmm1, %ecx
; SSE2-NEXT: pinsrw $4, %ecx, %xmm3
; SSE2-NEXT: pinsrw $6, %eax, %xmm3
; SSE2-NEXT: pextrw $6, %xmm1, %ecx
; SSE2-NEXT: movd %ecx, %xmm4
; SSE2-NEXT: pinsrw $2, %eax, %xmm4
; SSE2-NEXT: pextrw $7, %xmm1, %ecx
; SSE2-NEXT: pinsrw $4, %ecx, %xmm4
; SSE2-NEXT: pinsrw $6, %eax, %xmm4
; SSE2-NEXT: movdqa %xmm4, 48(%rdi)
; SSE2-NEXT: movdqa %xmm3, 32(%rdi)
; SSE2-NEXT: movdqa %xmm2, 16(%rdi)
; SSE2-NEXT: movdqa %xmm0, (%rdi)
; SSE2-NEXT: retq
;
; SSSE3-LABEL: PR45604:
; SSSE3: # %bb.0:
; SSSE3-NEXT: movdqa (%rsi), %xmm0
; SSSE3-NEXT: movdqa %xmm0, %xmm1
; SSSE3-NEXT: pshufb {{.*#+}} xmm1 = xmm1[0,1],zero,zero,zero,zero,zero,zero,xmm1[2,3],zero,zero,zero,zero,zero,zero
; SSSE3-NEXT: movdqa {{.*#+}} xmm2 = [0,0,0,0,11,0,0,0,0,0,0,0,11,0,0,0]
; SSSE3-NEXT: movdqa %xmm0, %xmm3
; SSSE3-NEXT: pshufb {{.*#+}} xmm3 = xmm3[4,5],zero,zero,zero,zero,zero,zero,xmm3[6,7],zero,zero,zero,zero,zero,zero
; SSSE3-NEXT: por %xmm2, %xmm1
; SSSE3-NEXT: por %xmm2, %xmm3
; SSSE3-NEXT: movdqa %xmm0, %xmm4
; SSSE3-NEXT: pshufb {{.*#+}} xmm4 = xmm4[8,9],zero,zero,zero,zero,zero,zero,xmm4[10,11],zero,zero,zero,zero,zero,zero
; SSSE3-NEXT: por %xmm2, %xmm4
; SSSE3-NEXT: pshufb {{.*#+}} xmm0 = xmm0[12,13],zero,zero,zero,zero,zero,zero,xmm0[14,15],zero,zero,zero,zero,zero,zero
; SSSE3-NEXT: por %xmm2, %xmm0
; SSSE3-NEXT: movdqa %xmm0, 48(%rdi)
; SSSE3-NEXT: movdqa %xmm4, 32(%rdi)
; SSSE3-NEXT: movdqa %xmm3, 16(%rdi)
; SSSE3-NEXT: movdqa %xmm1, (%rdi)
; SSSE3-NEXT: movdqa (%rsi), %xmm1
; SSSE3-NEXT: movd %xmm1, %eax
; SSSE3-NEXT: movzwl %ax, %eax
; SSSE3-NEXT: movd %eax, %xmm0
; SSSE3-NEXT: movl $11, %eax
; SSSE3-NEXT: pinsrw $2, %eax, %xmm0
; SSSE3-NEXT: pextrw $1, %xmm1, %ecx
; SSSE3-NEXT: pinsrw $4, %ecx, %xmm0
; SSSE3-NEXT: pinsrw $6, %eax, %xmm0
; SSSE3-NEXT: pextrw $2, %xmm1, %ecx
; SSSE3-NEXT: movd %ecx, %xmm2
; SSSE3-NEXT: pinsrw $2, %eax, %xmm2
; SSSE3-NEXT: pextrw $3, %xmm1, %ecx
; SSSE3-NEXT: pinsrw $4, %ecx, %xmm2
; SSSE3-NEXT: pinsrw $6, %eax, %xmm2
; SSSE3-NEXT: pextrw $4, %xmm1, %ecx
; SSSE3-NEXT: movd %ecx, %xmm3
; SSSE3-NEXT: pinsrw $2, %eax, %xmm3
; SSSE3-NEXT: pextrw $5, %xmm1, %ecx
; SSSE3-NEXT: pinsrw $4, %ecx, %xmm3
; SSSE3-NEXT: pinsrw $6, %eax, %xmm3
; SSSE3-NEXT: pextrw $6, %xmm1, %ecx
; SSSE3-NEXT: movd %ecx, %xmm4
; SSSE3-NEXT: pinsrw $2, %eax, %xmm4
; SSSE3-NEXT: pextrw $7, %xmm1, %ecx
; SSSE3-NEXT: pinsrw $4, %ecx, %xmm4
; SSSE3-NEXT: pinsrw $6, %eax, %xmm4
; SSSE3-NEXT: movdqa %xmm4, 48(%rdi)
; SSSE3-NEXT: movdqa %xmm3, 32(%rdi)
; SSSE3-NEXT: movdqa %xmm2, 16(%rdi)
; SSSE3-NEXT: movdqa %xmm0, (%rdi)
; SSSE3-NEXT: retq
;
; SSE41-LABEL: PR45604:
; SSE41: # %bb.0:
; SSE41-NEXT: movdqa (%rsi), %xmm0
; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
; SSE41-NEXT: pmovzxwq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero
; SSE41-NEXT: movdqa {{.*#+}} xmm2 = <u,0,11,0,u,0,11,0>
; SSE41-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0],xmm2[1,2,3],xmm1[4],xmm2[5,6,7]
; SSE41-NEXT: pshufd {{.*#+}} xmm3 = xmm0[2,3,2,3]
; SSE41-NEXT: pmovzxwq {{.*#+}} xmm3 = xmm3[0],zero,zero,zero,xmm3[1],zero,zero,zero
; SSE41-NEXT: pblendw {{.*#+}} xmm3 = xmm3[0],xmm2[1,2,3],xmm3[4],xmm2[5,6,7]
; SSE41-NEXT: pshufd {{.*#+}} xmm4 = xmm0[3,3,3,3]
; SSE41-NEXT: pmovzxwq {{.*#+}} xmm4 = xmm4[0],zero,zero,zero,xmm4[1],zero,zero,zero
; SSE41-NEXT: pblendw {{.*#+}} xmm4 = xmm4[0],xmm2[1,2,3],xmm4[4],xmm2[5,6,7]
; SSE41-NEXT: pmovzxwq {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero
; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0],xmm2[1,2,3],xmm0[4],xmm2[5,6,7]
; SSE41-NEXT: movdqa %xmm0, (%rdi)
; SSE41-NEXT: movdqa %xmm4, 48(%rdi)
; SSE41-NEXT: movdqa %xmm3, 32(%rdi)
; SSE41-NEXT: movdqa %xmm1, 16(%rdi)
; SSE41-NEXT: movdqa (%rsi), %xmm1
; SSE41-NEXT: pextrw $2, %xmm1, %eax
; SSE41-NEXT: movd %eax, %xmm0
; SSE41-NEXT: movl $11, %eax
; SSE41-NEXT: pinsrw $2, %eax, %xmm0
; SSE41-NEXT: pextrw $3, %xmm1, %ecx
; SSE41-NEXT: pinsrw $4, %ecx, %xmm0
; SSE41-NEXT: pinsrw $6, %eax, %xmm0
; SSE41-NEXT: pextrw $4, %xmm1, %ecx
; SSE41-NEXT: movd %ecx, %xmm2
; SSE41-NEXT: pinsrw $2, %eax, %xmm2
; SSE41-NEXT: pextrw $5, %xmm1, %ecx
; SSE41-NEXT: pinsrw $4, %ecx, %xmm2
; SSE41-NEXT: pinsrw $6, %eax, %xmm2
; SSE41-NEXT: pextrw $6, %xmm1, %ecx
; SSE41-NEXT: movd %ecx, %xmm3
; SSE41-NEXT: pinsrw $2, %eax, %xmm3
; SSE41-NEXT: pextrw $7, %xmm1, %ecx
; SSE41-NEXT: pinsrw $4, %ecx, %xmm3
; SSE41-NEXT: pinsrw $6, %eax, %xmm3
; SSE41-NEXT: pxor %xmm4, %xmm4
; SSE41-NEXT: pblendw {{.*#+}} xmm4 = xmm1[0],xmm4[1,2,3,4,5,6,7]
; SSE41-NEXT: pinsrw $2, %eax, %xmm4
; SSE41-NEXT: pextrw $1, %xmm1, %ecx
; SSE41-NEXT: pinsrw $4, %ecx, %xmm4
; SSE41-NEXT: pinsrw $6, %eax, %xmm4
; SSE41-NEXT: movdqa %xmm4, (%rdi)
; SSE41-NEXT: movdqa %xmm3, 48(%rdi)
; SSE41-NEXT: movdqa %xmm2, 32(%rdi)
; SSE41-NEXT: movdqa %xmm0, 16(%rdi)
; SSE41-NEXT: retq
;
; AVX1-LABEL: PR45604:

View File

@ -1113,171 +1113,110 @@ ret void
define void @interleaved_store_vf64_i8_stride3(<64 x i8> %a, <64 x i8> %b, <64 x i8> %c, <192 x i8>* %p) {
; AVX1-LABEL: interleaved_store_vf64_i8_stride3:
; AVX1: # %bb.0:
; AVX1-NEXT: subq $88, %rsp
; AVX1-NEXT: .cfi_def_cfa_offset 96
; AVX1-NEXT: vmovups %ymm5, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX1-NEXT: vmovups %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
; AVX1-NEXT: vmovdqa %ymm3, %ymm11
; AVX1-NEXT: vmovdqa %ymm2, %ymm12
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm10
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm13
; AVX1-NEXT: vmovdqa {{.*#+}} xmm5 = <u,u,u,u,u,128,128,128,128,128,128,6,7,8,9,10>
; AVX1-NEXT: vpshufb %xmm5, %xmm13, %xmm8
; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = <u,u,u,u,u,5,6,7,8,9,10,128,128,128,128,128>
; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm15
; AVX1-NEXT: vpshufb %xmm2, %xmm15, %xmm6
; AVX1-NEXT: vpor %xmm6, %xmm8, %xmm3
; AVX1-NEXT: vmovdqa %xmm3, (%rsp) # 16-byte Spill
; AVX1-NEXT: vpshufb %xmm5, %xmm1, %xmm9
; AVX1-NEXT: vpshufb %xmm2, %xmm11, %xmm6
; AVX1-NEXT: vpor %xmm6, %xmm9, %xmm3
; AVX1-NEXT: vmovdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX1-NEXT: vpshufb %xmm5, %xmm10, %xmm14
; AVX1-NEXT: vextractf128 $1, %ymm12, %xmm6
; AVX1-NEXT: vpshufb %xmm2, %xmm6, %xmm7
; AVX1-NEXT: vpor %xmm7, %xmm14, %xmm3
; AVX1-NEXT: vmovdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX1-NEXT: vmovdqa {{.*#+}} xmm7 = [11,12,13,14,15,0,1,2,3,4,5,128,128,128,128,128]
; AVX1-NEXT: vpshufb %xmm7, %xmm10, %xmm10
; AVX1-NEXT: vpshufb %xmm7, %xmm1, %xmm3
; AVX1-NEXT: vpshufb %xmm7, %xmm13, %xmm13
; AVX1-NEXT: vpshufb %xmm7, %xmm0, %xmm4
; AVX1-NEXT: vpshufb %xmm5, %xmm0, %xmm0
; AVX1-NEXT: vpshufb %xmm2, %xmm12, %xmm7
; AVX1-NEXT: vpor %xmm0, %xmm7, %xmm0
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm6
; AVX1-NEXT: vpalignr {{.*#+}} xmm8 = xmm6[6,7,8,9,10,11,12,13,14,15,0,1,2,3,4,5]
; AVX1-NEXT: vpalignr {{.*#+}} xmm9 = xmm1[6,7,8,9,10,11,12,13,14,15,0,1,2,3,4,5]
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm7
; AVX1-NEXT: vpalignr {{.*#+}} xmm12 = xmm7[6,7,8,9,10,11,12,13,14,15,0,1,2,3,4,5]
; AVX1-NEXT: vpalignr {{.*#+}} xmm0 = xmm0[6,7,8,9,10,11,12,13,14,15,0,1,2,3,4,5]
; AVX1-NEXT: vextractf128 $1, %ymm3, %xmm10
; AVX1-NEXT: vpalignr {{.*#+}} xmm13 = xmm10[11,12,13,14,15,0,1,2,3,4,5,6,7,8,9,10]
; AVX1-NEXT: vpalignr {{.*#+}} xmm15 = xmm3[11,12,13,14,15,0,1,2,3,4,5,6,7,8,9,10]
; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm11
; AVX1-NEXT: vpalignr {{.*#+}} xmm7 = xmm11[11,12,13,14,15,0,1,2,3,4,5,6,7,8,9,10]
; AVX1-NEXT: vpalignr {{.*#+}} xmm1 = xmm2[11,12,13,14,15,0,1,2,3,4,5,6,7,8,9,10]
; AVX1-NEXT: vpalignr {{.*#+}} xmm14 = xmm0[5,6,7,8,9,10,11,12,13,14,15],xmm4[0,1,2,3,4]
; AVX1-NEXT: vextractf128 $1, %ymm4, %xmm6
; AVX1-NEXT: vpalignr {{.*#+}} xmm0 = xmm1[5,6,7,8,9,10,11,12,13,14,15],xmm0[0,1,2,3,4]
; AVX1-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX1-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload
; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm7 = xmm1[8],xmm12[8],xmm1[9],xmm12[9],xmm1[10],xmm12[10],xmm1[11],xmm12[11],xmm1[12],xmm12[12],xmm1[13],xmm12[13],xmm1[14],xmm12[14],xmm1[15],xmm12[15]
; AVX1-NEXT: vmovdqa {{.*#+}} xmm0 = <u,u,u,u,u,4,6,8,10,12,14,7,9,11,13,15>
; AVX1-NEXT: vpshufb %xmm0, %xmm7, %xmm2
; AVX1-NEXT: vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm7
; AVX1-NEXT: vmovdqa %ymm1, %ymm2
; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm8 = xmm7[8],xmm6[8],xmm7[9],xmm6[9],xmm7[10],xmm6[10],xmm7[11],xmm6[11],xmm7[12],xmm6[12],xmm7[13],xmm6[13],xmm7[14],xmm6[14],xmm7[15],xmm6[15]
; AVX1-NEXT: vpshufb %xmm0, %xmm8, %xmm1
; AVX1-NEXT: vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX1-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload
; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm9 = xmm8[8],xmm11[8],xmm8[9],xmm11[9],xmm8[10],xmm11[10],xmm8[11],xmm11[11],xmm8[12],xmm11[12],xmm8[13],xmm11[13],xmm8[14],xmm11[14],xmm8[15],xmm11[15]
; AVX1-NEXT: vpshufb %xmm0, %xmm9, %xmm9
; AVX1-NEXT: vextractf128 $1, %ymm8, %xmm1
; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm14 = xmm1[8],xmm15[8],xmm1[9],xmm15[9],xmm1[10],xmm15[10],xmm1[11],xmm15[11],xmm1[12],xmm15[12],xmm1[13],xmm15[13],xmm1[14],xmm15[14],xmm1[15],xmm15[15]
; AVX1-NEXT: vpshufb %xmm0, %xmm14, %xmm0
; AVX1-NEXT: vpslldq {{.*#+}} xmm5 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm1[0,1,2,3,4]
; AVX1-NEXT: vpor %xmm5, %xmm13, %xmm5
; AVX1-NEXT: vpalignr {{.*#+}} xmm5 = xmm5[5,6,7,8,9,10,11,12,13,14,15],xmm15[0,1,2,3,4]
; AVX1-NEXT: vmovdqa %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX1-NEXT: vpslldq {{.*#+}} xmm5 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm8[0,1,2,3,4]
; AVX1-NEXT: vpor %xmm5, %xmm3, %xmm5
; AVX1-NEXT: vpalignr {{.*#+}} xmm5 = xmm5[5,6,7,8,9,10,11,12,13,14,15],xmm11[0,1,2,3,4]
; AVX1-NEXT: vmovdqa %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX1-NEXT: vpslldq {{.*#+}} xmm5 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm7[0,1,2,3,4]
; AVX1-NEXT: vpor %xmm5, %xmm10, %xmm5
; AVX1-NEXT: vpalignr {{.*#+}} xmm15 = xmm5[5,6,7,8,9,10,11,12,13,14,15],xmm6[0,1,2,3,4]
; AVX1-NEXT: vpslldq {{.*#+}} xmm6 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm2[0,1,2,3,4]
; AVX1-NEXT: vpor %xmm6, %xmm4, %xmm6
; AVX1-NEXT: vpalignr {{.*#+}} xmm14 = xmm6[5,6,7,8,9,10,11,12,13,14,15],xmm12[0,1,2,3,4]
; AVX1-NEXT: vpalignr {{.*#+}} xmm0 = xmm0[5,6,7,8,9,10,11,12,13,14,15],xmm13[0,1,2,3,4]
; AVX1-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX1-NEXT: vpalignr {{.*#+}} xmm13 = xmm9[5,6,7,8,9,10,11,12,13,14,15],xmm3[0,1,2,3,4]
; AVX1-NEXT: vpalignr $5, {{[-0-9]+}}(%r{{[sb]}}p), %xmm10, %xmm9 # 16-byte Folded Reload
; AVX1-NEXT: # xmm9 = mem[5,6,7,8,9,10,11,12,13,14,15],xmm10[0,1,2,3,4]
; AVX1-NEXT: vpalignr $5, {{[-0-9]+}}(%r{{[sb]}}p), %xmm4, %xmm3 # 16-byte Folded Reload
; AVX1-NEXT: # xmm3 = mem[5,6,7,8,9,10,11,12,13,14,15],xmm4[0,1,2,3,4]
; AVX1-NEXT: vmovdqa {{.*#+}} xmm0 = [5,128,11,6,128,12,7,128,13,8,128,14,9,128,15,10]
; AVX1-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
; AVX1-NEXT: vpshufb %xmm0, %xmm4, %xmm6
; AVX1-NEXT: vmovdqa {{.*#+}} xmm4 = [128,5,128,128,6,128,128,7,128,128,8,128,128,9,128,128]
; AVX1-NEXT: vpshufb %xmm4, %xmm2, %xmm5
; AVX1-NEXT: vpor %xmm5, %xmm6, %xmm11
; AVX1-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
; AVX1-NEXT: vpshufb %xmm0, %xmm2, %xmm5
; AVX1-NEXT: vpalignr {{.*#+}} xmm1 = xmm4[5,6,7,8,9,10,11,12,13,14,15],xmm1[0,1,2,3,4]
; AVX1-NEXT: vpalignr {{.*#+}} xmm4 = xmm12[5,6,7,8,9,10,11,12,13,14,15],xmm6[0,1,2,3,4]
; AVX1-NEXT: vpalignr {{.*#+}} xmm12 = xmm7[5,6,7,8,9,10,11,12,13,14,15],xmm12[0,1,2,3,4]
; AVX1-NEXT: vpalignr {{.*#+}} xmm6 = xmm6[5,6,7,8,9,10,11,12,13,14,15],xmm7[0,1,2,3,4]
; AVX1-NEXT: vpalignr {{.*#+}} xmm7 = xmm9[5,6,7,8,9,10,11,12,13,14,15],xmm5[0,1,2,3,4]
; AVX1-NEXT: vpalignr {{.*#+}} xmm9 = xmm15[5,6,7,8,9,10,11,12,13,14,15],xmm9[0,1,2,3,4]
; AVX1-NEXT: vpalignr {{.*#+}} xmm15 = xmm5[5,6,7,8,9,10,11,12,13,14,15],xmm15[0,1,2,3,4]
; AVX1-NEXT: vextractf128 $1, %ymm5, %xmm5
; AVX1-NEXT: vpalignr {{.*#+}} xmm0 = xmm8[5,6,7,8,9,10,11,12,13,14,15],xmm5[0,1,2,3,4]
; AVX1-NEXT: vpalignr {{.*#+}} xmm8 = xmm13[5,6,7,8,9,10,11,12,13,14,15],xmm8[0,1,2,3,4]
; AVX1-NEXT: vpalignr {{.*#+}} xmm5 = xmm5[5,6,7,8,9,10,11,12,13,14,15],xmm13[0,1,2,3,4]
; AVX1-NEXT: vpalignr {{.*#+}} xmm10 = xmm0[5,6,7,8,9,10,11,12,13,14,15],xmm10[0,1,2,3,4]
; AVX1-NEXT: vpalignr {{.*#+}} xmm8 = xmm8[5,6,7,8,9,10,11,12,13,14,15],xmm5[0,1,2,3,4]
; AVX1-NEXT: vpalignr {{.*#+}} xmm13 = xmm5[5,6,7,8,9,10,11,12,13,14,15],xmm0[0,1,2,3,4]
; AVX1-NEXT: vpalignr {{.*#+}} xmm3 = xmm7[5,6,7,8,9,10,11,12,13,14,15],xmm3[0,1,2,3,4]
; AVX1-NEXT: vpalignr {{.*#+}} xmm5 = xmm9[5,6,7,8,9,10,11,12,13,14,15],xmm15[0,1,2,3,4]
; AVX1-NEXT: vpalignr {{.*#+}} xmm9 = xmm15[5,6,7,8,9,10,11,12,13,14,15],xmm7[0,1,2,3,4]
; AVX1-NEXT: vpalignr {{.*#+}} xmm0 = xmm4[5,6,7,8,9,10,11,12,13,14,15],xmm11[0,1,2,3,4]
; AVX1-NEXT: vpalignr {{.*#+}} xmm7 = xmm12[5,6,7,8,9,10,11,12,13,14,15],xmm6[0,1,2,3,4]
; AVX1-NEXT: vpalignr {{.*#+}} xmm11 = xmm6[5,6,7,8,9,10,11,12,13,14,15],xmm4[0,1,2,3,4]
; AVX1-NEXT: vpalignr {{.*#+}} xmm2 = xmm14[5,6,7,8,9,10,11,12,13,14,15],xmm2[0,1,2,3,4]
; AVX1-NEXT: vpalignr $5, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm6 # 16-byte Folded Reload
; AVX1-NEXT: # xmm6 = mem[5,6,7,8,9,10,11,12,13,14,15],xmm1[0,1,2,3,4]
; AVX1-NEXT: vpalignr {{.*#+}} xmm1 = xmm1[5,6,7,8,9,10,11,12,13,14,15],xmm14[0,1,2,3,4]
; AVX1-NEXT: vmovdqa {{.*#+}} xmm4 = [0,11,6,1,12,7,2,13,8,3,14,9,4,15,10,5]
; AVX1-NEXT: vpshufb %xmm4, %xmm6, %xmm6
; AVX1-NEXT: vpshufb %xmm4, %xmm2, %xmm14
; AVX1-NEXT: vpshufb %xmm4, %xmm1, %xmm12
; AVX1-NEXT: vpshufb %xmm4, %xmm0, %xmm0
; AVX1-NEXT: vpshufb %xmm4, %xmm7, %xmm7
; AVX1-NEXT: vpor %xmm7, %xmm5, %xmm10
; AVX1-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
; AVX1-NEXT: vpshufb %xmm0, %xmm2, %xmm7
; AVX1-NEXT: vpshufb %xmm4, %xmm8, %xmm6
; AVX1-NEXT: vpor %xmm6, %xmm7, %xmm12
; AVX1-NEXT: vmovdqa (%rsp), %xmm2 # 16-byte Reload
; AVX1-NEXT: vpshufb %xmm0, %xmm2, %xmm0
; AVX1-NEXT: vpshufb %xmm4, %xmm1, %xmm1
; AVX1-NEXT: vpor %xmm1, %xmm0, %xmm0
; AVX1-NEXT: vmovdqa {{.*#+}} xmm1 = [0,11,6,1,12,7,2,13,8,3,14,9,4,15,10,5]
; AVX1-NEXT: vpshufb %xmm1, %xmm14, %xmm4
; AVX1-NEXT: vpshufb %xmm1, %xmm3, %xmm3
; AVX1-NEXT: vpshufb %xmm1, %xmm15, %xmm7
; AVX1-NEXT: vpshufb %xmm1, %xmm9, %xmm2
; AVX1-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload
; AVX1-NEXT: vpshufb %xmm1, %xmm5, %xmm6
; AVX1-NEXT: vpshufb %xmm1, %xmm13, %xmm8
; AVX1-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload
; AVX1-NEXT: vpshufb %xmm1, %xmm5, %xmm9
; AVX1-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload
; AVX1-NEXT: vpshufb %xmm1, %xmm5, %xmm1
; AVX1-NEXT: vmovdqu %xmm2, 80(%rdi)
; AVX1-NEXT: vmovdqu %xmm10, 64(%rdi)
; AVX1-NEXT: vmovdqu %xmm11, 16(%rdi)
; AVX1-NEXT: vmovdqu %xmm4, (%rdi)
; AVX1-NEXT: vmovdqu %xmm7, 48(%rdi)
; AVX1-NEXT: vmovdqu %xmm3, 32(%rdi)
; AVX1-NEXT: vmovdqu %xmm1, 176(%rdi)
; AVX1-NEXT: vmovdqu %xmm0, 160(%rdi)
; AVX1-NEXT: vmovdqu %xmm12, 112(%rdi)
; AVX1-NEXT: vmovdqu %xmm6, 96(%rdi)
; AVX1-NEXT: vmovdqu %xmm9, 144(%rdi)
; AVX1-NEXT: vmovdqu %xmm8, 128(%rdi)
; AVX1-NEXT: addq $88, %rsp
; AVX1-NEXT: .cfi_def_cfa_offset 8
; AVX1-NEXT: vpshufb %xmm4, %xmm11, %xmm1
; AVX1-NEXT: vpshufb %xmm4, %xmm5, %xmm5
; AVX1-NEXT: vpshufb %xmm4, %xmm3, %xmm11
; AVX1-NEXT: vpshufb %xmm4, %xmm9, %xmm9
; AVX1-NEXT: vpshufb %xmm4, %xmm10, %xmm2
; AVX1-NEXT: vpshufb %xmm4, %xmm8, %xmm3
; AVX1-NEXT: vpshufb %xmm4, %xmm13, %xmm4
; AVX1-NEXT: vmovdqu %xmm1, 80(%rdi)
; AVX1-NEXT: vmovdqu %xmm7, 64(%rdi)
; AVX1-NEXT: vmovdqu %xmm6, 16(%rdi)
; AVX1-NEXT: vmovdqu %xmm14, (%rdi)
; AVX1-NEXT: vmovdqu %xmm0, 48(%rdi)
; AVX1-NEXT: vmovdqu %xmm12, 32(%rdi)
; AVX1-NEXT: vmovdqu %xmm4, 176(%rdi)
; AVX1-NEXT: vmovdqu %xmm3, 160(%rdi)
; AVX1-NEXT: vmovdqu %xmm5, 112(%rdi)
; AVX1-NEXT: vmovdqu %xmm11, 96(%rdi)
; AVX1-NEXT: vmovdqu %xmm2, 144(%rdi)
; AVX1-NEXT: vmovdqu %xmm9, 128(%rdi)
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: retq
;
; AVX2-LABEL: interleaved_store_vf64_i8_stride3:
; AVX2: # %bb.0:
; AVX2-NEXT: vpalignr {{.*#+}} ymm6 = ymm0[11,12,13,14,15,0,1,2,3,4,5,6,7,8,9,10,27,28,29,30,31,16,17,18,19,20,21,22,23,24,25,26]
; AVX2-NEXT: vpslldq {{.*#+}} ymm7 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm4[0,1,2,3,4],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm4[16,17,18,19,20]
; AVX2-NEXT: vbroadcasti128 {{.*#+}} ymm8 = [255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0]
; AVX2-NEXT: # ymm8 = mem[0,1,0,1]
; AVX2-NEXT: vpblendvb %ymm8, %ymm6, %ymm7, %ymm6
; AVX2-NEXT: vpalignr {{.*#+}} ymm7 = ymm1[11,12,13,14,15,0,1,2,3,4,5,6,7,8,9,10,27,28,29,30,31,16,17,18,19,20,21,22,23,24,25,26]
; AVX2-NEXT: vpslldq {{.*#+}} ymm9 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm5[0,1,2,3,4],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm5[16,17,18,19,20]
; AVX2-NEXT: vpblendvb %ymm8, %ymm7, %ymm9, %ymm7
; AVX2-NEXT: vpslldq {{.*#+}} ymm0 = zero,zero,zero,zero,zero,ymm0[0,1,2,3,4,5,6,7,8,9,10],zero,zero,zero,zero,zero,ymm0[16,17,18,19,20,21,22,23,24,25,26]
; AVX2-NEXT: vbroadcasti128 {{.*#+}} ymm9 = [0,0,0,0,0,255,255,255,255,255,255,0,0,0,0,0,0,0,0,0,0,255,255,255,255,255,255,0,0,0,0,0]
; AVX2-NEXT: # ymm9 = mem[0,1,0,1]
; AVX2-NEXT: vpblendvb %ymm9, %ymm2, %ymm0, %ymm0
; AVX2-NEXT: vpslldq {{.*#+}} ymm1 = zero,zero,zero,zero,zero,ymm1[0,1,2,3,4,5,6,7,8,9,10],zero,zero,zero,zero,zero,ymm1[16,17,18,19,20,21,22,23,24,25,26]
; AVX2-NEXT: vpblendvb %ymm9, %ymm3, %ymm1, %ymm1
; AVX2-NEXT: vpsrldq {{.*#+}} ymm10 = ymm4[5,6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,ymm4[21,22,23,24,25,26,27,28,29,30,31],zero,zero,zero,zero,zero
; AVX2-NEXT: vpblendvb %ymm9, %ymm10, %ymm2, %ymm10
; AVX2-NEXT: vpsrldq {{.*#+}} ymm11 = ymm5[5,6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,ymm5[21,22,23,24,25,26,27,28,29,30,31],zero,zero,zero,zero,zero
; AVX2-NEXT: vpblendvb %ymm9, %ymm11, %ymm3, %ymm9
; AVX2-NEXT: vpalignr {{.*#+}} ymm3 = ymm7[5,6,7,8,9,10,11,12,13,14,15],ymm3[0,1,2,3,4],ymm7[21,22,23,24,25,26,27,28,29,30,31],ymm3[16,17,18,19,20]
; AVX2-NEXT: vpalignr {{.*#+}} ymm2 = ymm6[5,6,7,8,9,10,11,12,13,14,15],ymm2[0,1,2,3,4],ymm6[21,22,23,24,25,26,27,28,29,30,31],ymm2[16,17,18,19,20]
; AVX2-NEXT: vpsrldq {{.*#+}} ymm1 = ymm1[5,6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,ymm1[21,22,23,24,25,26,27,28,29,30,31],zero,zero,zero,zero,zero
; AVX2-NEXT: vpslldq {{.*#+}} ymm5 = zero,zero,zero,zero,zero,zero,ymm5[0,1,2,3,4,5,6,7,8,9],zero,zero,zero,zero,zero,zero,ymm5[16,17,18,19,20,21,22,23,24,25]
; AVX2-NEXT: vpblendvb %ymm8, %ymm1, %ymm5, %ymm1
; AVX2-NEXT: vpsrldq {{.*#+}} ymm0 = ymm0[5,6,7,8,9,10,11,12,13,14,15],zero,zero,zero,zero,zero,ymm0[21,22,23,24,25,26,27,28,29,30,31],zero,zero,zero,zero,zero
; AVX2-NEXT: vpslldq {{.*#+}} ymm4 = zero,zero,zero,zero,zero,zero,ymm4[0,1,2,3,4,5,6,7,8,9],zero,zero,zero,zero,zero,zero,ymm4[16,17,18,19,20,21,22,23,24,25]
; AVX2-NEXT: vpblendvb %ymm8, %ymm0, %ymm4, %ymm0
; AVX2-NEXT: vpalignr {{.*#+}} ymm4 = ymm9[5,6,7,8,9,10,11,12,13,14,15],ymm7[0,1,2,3,4],ymm9[21,22,23,24,25,26,27,28,29,30,31],ymm7[16,17,18,19,20]
; AVX2-NEXT: vpalignr {{.*#+}} ymm5 = ymm10[5,6,7,8,9,10,11,12,13,14,15],ymm6[0,1,2,3,4],ymm10[21,22,23,24,25,26,27,28,29,30,31],ymm6[16,17,18,19,20]
; AVX2-NEXT: vpalignr {{.*#+}} ymm1 = ymm1[6,7,8,9,10,11,12,13,14,15,0,1,2,3,4,5,22,23,24,25,26,27,28,29,30,31,16,17,18,19,20,21]
; AVX2-NEXT: vpalignr {{.*#+}} ymm0 = ymm0[6,7,8,9,10,11,12,13,14,15,0,1,2,3,4,5,22,23,24,25,26,27,28,29,30,31,16,17,18,19,20,21]
; AVX2-NEXT: vpalignr {{.*#+}} ymm6 = ymm3[11,12,13,14,15,0,1,2,3,4,5,6,7,8,9,10,27,28,29,30,31,16,17,18,19,20,21,22,23,24,25,26]
; AVX2-NEXT: vpalignr {{.*#+}} ymm7 = ymm2[11,12,13,14,15,0,1,2,3,4,5,6,7,8,9,10,27,28,29,30,31,16,17,18,19,20,21,22,23,24,25,26]
; AVX2-NEXT: vpalignr {{.*#+}} ymm8 = ymm0[5,6,7,8,9,10,11,12,13,14,15],ymm4[0,1,2,3,4],ymm0[21,22,23,24,25,26,27,28,29,30,31],ymm4[16,17,18,19,20]
; AVX2-NEXT: vpalignr {{.*#+}} ymm9 = ymm1[5,6,7,8,9,10,11,12,13,14,15],ymm5[0,1,2,3,4],ymm1[21,22,23,24,25,26,27,28,29,30,31],ymm5[16,17,18,19,20]
; AVX2-NEXT: vpalignr {{.*#+}} ymm0 = ymm7[5,6,7,8,9,10,11,12,13,14,15],ymm0[0,1,2,3,4],ymm7[21,22,23,24,25,26,27,28,29,30,31],ymm0[16,17,18,19,20]
; AVX2-NEXT: vpalignr {{.*#+}} ymm1 = ymm6[5,6,7,8,9,10,11,12,13,14,15],ymm1[0,1,2,3,4],ymm6[21,22,23,24,25,26,27,28,29,30,31],ymm1[16,17,18,19,20]
; AVX2-NEXT: vpalignr {{.*#+}} ymm4 = ymm4[5,6,7,8,9,10,11,12,13,14,15],ymm7[0,1,2,3,4],ymm4[21,22,23,24,25,26,27,28,29,30,31],ymm7[16,17,18,19,20]
; AVX2-NEXT: vpalignr {{.*#+}} ymm5 = ymm5[5,6,7,8,9,10,11,12,13,14,15],ymm6[0,1,2,3,4],ymm5[21,22,23,24,25,26,27,28,29,30,31],ymm6[16,17,18,19,20]
; AVX2-NEXT: vpalignr {{.*#+}} ymm3 = ymm9[5,6,7,8,9,10,11,12,13,14,15],ymm3[0,1,2,3,4],ymm9[21,22,23,24,25,26,27,28,29,30,31],ymm3[16,17,18,19,20]
; AVX2-NEXT: vpalignr {{.*#+}} ymm2 = ymm8[5,6,7,8,9,10,11,12,13,14,15],ymm2[0,1,2,3,4],ymm8[21,22,23,24,25,26,27,28,29,30,31],ymm2[16,17,18,19,20]
; AVX2-NEXT: vpalignr {{.*#+}} ymm1 = ymm1[5,6,7,8,9,10,11,12,13,14,15],ymm5[0,1,2,3,4],ymm1[21,22,23,24,25,26,27,28,29,30,31],ymm5[16,17,18,19,20]
; AVX2-NEXT: vpalignr {{.*#+}} ymm0 = ymm0[5,6,7,8,9,10,11,12,13,14,15],ymm4[0,1,2,3,4],ymm0[21,22,23,24,25,26,27,28,29,30,31],ymm4[16,17,18,19,20]
; AVX2-NEXT: vpalignr {{.*#+}} ymm5 = ymm5[5,6,7,8,9,10,11,12,13,14,15],ymm9[0,1,2,3,4],ymm5[21,22,23,24,25,26,27,28,29,30,31],ymm9[16,17,18,19,20]
; AVX2-NEXT: vpalignr {{.*#+}} ymm4 = ymm4[5,6,7,8,9,10,11,12,13,14,15],ymm8[0,1,2,3,4],ymm4[21,22,23,24,25,26,27,28,29,30,31],ymm8[16,17,18,19,20]
; AVX2-NEXT: vinserti128 $1, %xmm0, %ymm2, %ymm6
; AVX2-NEXT: vmovdqa {{.*#+}} ymm7 = [0,11,6,1,12,7,2,13,8,3,14,9,4,15,10,5,0,11,6,1,12,7,2,13,8,3,14,9,4,15,10,5]
; AVX2-NEXT: vpshufb %ymm7, %ymm6, %ymm6
; AVX2-NEXT: vpblendd {{.*#+}} ymm2 = ymm5[0,1,2,3],ymm2[4,5,6,7]
; AVX2-NEXT: vpblendd {{.*#+}} ymm2 = ymm4[0,1,2,3],ymm2[4,5,6,7]
; AVX2-NEXT: vpshufb %ymm7, %ymm2, %ymm2
; AVX2-NEXT: vperm2i128 {{.*#+}} ymm0 = ymm0[2,3],ymm5[2,3]
; AVX2-NEXT: vperm2i128 {{.*#+}} ymm0 = ymm0[2,3],ymm4[2,3]
; AVX2-NEXT: vpshufb %ymm7, %ymm0, %ymm0
; AVX2-NEXT: vinserti128 $1, %xmm1, %ymm3, %ymm5
; AVX2-NEXT: vpshufb %ymm7, %ymm5, %ymm5
; AVX2-NEXT: vpblendd {{.*#+}} ymm3 = ymm4[0,1,2,3],ymm3[4,5,6,7]
; AVX2-NEXT: vinserti128 $1, %xmm1, %ymm3, %ymm4
; AVX2-NEXT: vpshufb %ymm7, %ymm4, %ymm4
; AVX2-NEXT: vpblendd {{.*#+}} ymm3 = ymm5[0,1,2,3],ymm3[4,5,6,7]
; AVX2-NEXT: vpshufb %ymm7, %ymm3, %ymm3
; AVX2-NEXT: vperm2i128 {{.*#+}} ymm1 = ymm1[2,3],ymm4[2,3]
; AVX2-NEXT: vperm2i128 {{.*#+}} ymm1 = ymm1[2,3],ymm5[2,3]
; AVX2-NEXT: vpshufb %ymm7, %ymm1, %ymm1
; AVX2-NEXT: vmovdqu %ymm3, 128(%rdi)
; AVX2-NEXT: vmovdqu %ymm2, 32(%rdi)
; AVX2-NEXT: vmovdqu %ymm1, 160(%rdi)
; AVX2-NEXT: vmovdqu %ymm3, 128(%rdi)
; AVX2-NEXT: vmovdqu %ymm0, 64(%rdi)
; AVX2-NEXT: vmovdqu %ymm5, 96(%rdi)
; AVX2-NEXT: vmovdqu %ymm2, 32(%rdi)
; AVX2-NEXT: vmovdqu %ymm4, 96(%rdi)
; AVX2-NEXT: vmovdqu %ymm6, (%rdi)
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
@ -1325,125 +1264,79 @@ ret void
define <64 x i8> @interleaved_load_vf64_i8_stride3(<192 x i8>* %ptr){
; AVX1-LABEL: interleaved_load_vf64_i8_stride3:
; AVX1: # %bb.0:
; AVX1-NEXT: subq $40, %rsp
; AVX1-NEXT: .cfi_def_cfa_offset 48
; AVX1-NEXT: vmovdqu (%rdi), %xmm9
; AVX1-NEXT: vmovdqu 16(%rdi), %xmm11
; AVX1-NEXT: vmovdqu 48(%rdi), %xmm10
; AVX1-NEXT: vmovdqu 64(%rdi), %xmm15
; AVX1-NEXT: vmovdqu 80(%rdi), %xmm14
; AVX1-NEXT: vmovdqu 96(%rdi), %xmm3
; AVX1-NEXT: vmovdqu 112(%rdi), %xmm1
; AVX1-NEXT: vmovdqu 144(%rdi), %xmm6
; AVX1-NEXT: vmovdqu 160(%rdi), %xmm12
; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [128,128,128,128,128,0,3,6,9,12,15,2,5,8,11,14]
; AVX1-NEXT: vpshufb %xmm2, %xmm3, %xmm0
; AVX1-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX1-NEXT: vpshufb %xmm2, %xmm6, %xmm13
; AVX1-NEXT: vpshufb %xmm2, %xmm9, %xmm5
; AVX1-NEXT: vpshufb %xmm2, %xmm10, %xmm4
; AVX1-NEXT: vmovdqa {{.*#+}} xmm0 = <1,4,7,10,13,128,128,128,128,128,128,u,u,u,u,u>
; AVX1-NEXT: vpshufb %xmm0, %xmm3, %xmm3
; AVX1-NEXT: vmovdqa {{.*#+}} xmm8 = <128,128,128,128,128,0,3,6,9,12,15,u,u,u,u,u>
; AVX1-NEXT: vpshufb %xmm8, %xmm1, %xmm7
; AVX1-NEXT: vmovdqa %xmm1, %xmm2
; AVX1-NEXT: vmovdqa %xmm1, (%rsp) # 16-byte Spill
; AVX1-NEXT: vpor %xmm3, %xmm7, %xmm1
; AVX1-NEXT: vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX1-NEXT: vpshufb %xmm0, %xmm6, %xmm6
; AVX1-NEXT: vpshufb %xmm8, %xmm12, %xmm7
; AVX1-NEXT: vmovdqa %xmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX1-NEXT: vpor %xmm6, %xmm7, %xmm1
; AVX1-NEXT: vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX1-NEXT: vpshufb %xmm0, %xmm9, %xmm7
; AVX1-NEXT: vmovdqa %xmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX1-NEXT: vpshufb %xmm8, %xmm11, %xmm3
; AVX1-NEXT: vpor %xmm7, %xmm3, %xmm1
; AVX1-NEXT: vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX1-NEXT: vpshufb %xmm0, %xmm10, %xmm1
; AVX1-NEXT: vmovdqa %xmm15, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX1-NEXT: vpshufb %xmm8, %xmm15, %xmm7
; AVX1-NEXT: vpor %xmm1, %xmm7, %xmm1
; AVX1-NEXT: vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX1-NEXT: vpshufb %xmm0, %xmm15, %xmm7
; AVX1-NEXT: vpshufb %xmm8, %xmm14, %xmm6
; AVX1-NEXT: vpor %xmm7, %xmm6, %xmm15
; AVX1-NEXT: vmovdqu 32(%rdi), %xmm7
; AVX1-NEXT: vpshufb %xmm0, %xmm11, %xmm9
; AVX1-NEXT: vpshufb %xmm8, %xmm7, %xmm10
; AVX1-NEXT: vpor %xmm9, %xmm10, %xmm10
; AVX1-NEXT: vmovdqu 176(%rdi), %xmm9
; AVX1-NEXT: vpshufb %xmm0, %xmm12, %xmm1
; AVX1-NEXT: vpshufb %xmm8, %xmm9, %xmm11
; AVX1-NEXT: vpor %xmm1, %xmm11, %xmm11
; AVX1-NEXT: vpshufb %xmm0, %xmm2, %xmm0
; AVX1-NEXT: vmovdqu 128(%rdi), %xmm1
; AVX1-NEXT: vpshufb %xmm8, %xmm1, %xmm3
; AVX1-NEXT: vpor %xmm0, %xmm3, %xmm3
; AVX1-NEXT: vmovdqa {{.*#+}} xmm0 = [1,4,7,10,13,128,128,128,128,128,128,128,128,128,128,128]
; AVX1-NEXT: vpshufb %xmm0, %xmm1, %xmm6
; AVX1-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
; AVX1-NEXT: vpor %xmm6, %xmm2, %xmm6
; AVX1-NEXT: vmovdqa %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; AVX1-NEXT: vpalignr {{.*#+}} xmm8 = xmm2[11,12,13,14,15],xmm3[0,1,2,3,4,5,6,7,8,9,10]
; AVX1-NEXT: vpshufb %xmm0, %xmm9, %xmm6
; AVX1-NEXT: vpor %xmm6, %xmm13, %xmm12
; AVX1-NEXT: vpalignr {{.*#+}} xmm13 = xmm13[11,12,13,14,15],xmm11[0,1,2,3,4,5,6,7,8,9,10]
; AVX1-NEXT: vpshufb %xmm0, %xmm7, %xmm3
; AVX1-NEXT: vpor %xmm3, %xmm5, %xmm11
; AVX1-NEXT: vpalignr {{.*#+}} xmm5 = xmm5[11,12,13,14,15],xmm10[0,1,2,3,4,5,6,7,8,9,10]
; AVX1-NEXT: vpshufb %xmm0, %xmm14, %xmm0
; AVX1-NEXT: vpor %xmm0, %xmm4, %xmm10
; AVX1-NEXT: vpalignr {{.*#+}} xmm2 = xmm4[11,12,13,14,15],xmm15[0,1,2,3,4,5,6,7,8,9,10]
; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [0,1,2,3,4,5,6,7,8,9,10,128,128,128,128,128]
; AVX1-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; AVX1-NEXT: vpshufb %xmm3, %xmm0, %xmm0
; AVX1-NEXT: vmovdqa {{.*#+}} xmm6 = [128,128,128,128,128,128,128,128,128,128,128,2,5,8,11,14]
; AVX1-NEXT: vpshufb %xmm6, %xmm14, %xmm4
; AVX1-NEXT: vpor %xmm4, %xmm0, %xmm0
; AVX1-NEXT: vpaddb %xmm2, %xmm0, %xmm0
; AVX1-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
; AVX1-NEXT: vpshufb %xmm3, %xmm2, %xmm2
; AVX1-NEXT: vpshufb %xmm6, %xmm7, %xmm4
; AVX1-NEXT: vpor %xmm4, %xmm2, %xmm2
; AVX1-NEXT: vpaddb %xmm5, %xmm2, %xmm2
; AVX1-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
; AVX1-NEXT: vpshufb %xmm3, %xmm4, %xmm4
; AVX1-NEXT: vpshufb %xmm6, %xmm9, %xmm5
; AVX1-NEXT: vpor %xmm5, %xmm4, %xmm4
; AVX1-NEXT: vpaddb %xmm4, %xmm13, %xmm4
; AVX1-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload
; AVX1-NEXT: vpshufb %xmm3, %xmm5, %xmm3
; AVX1-NEXT: vpshufb %xmm6, %xmm1, %xmm1
; AVX1-NEXT: vpor %xmm1, %xmm3, %xmm1
; AVX1-NEXT: vpaddb %xmm1, %xmm8, %xmm1
; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [5,6,7,8,9,10,128,128,128,128,128,0,1,2,3,4]
; AVX1-NEXT: vpshufb %xmm3, %xmm10, %xmm5
; AVX1-NEXT: vmovdqa {{.*#+}} xmm6 = [128,128,128,128,128,128,2,5,8,11,14,128,128,128,128,128]
; AVX1-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Reload
; AVX1-NEXT: vpshufb %xmm6, %xmm7, %xmm7
; AVX1-NEXT: vpor %xmm7, %xmm5, %xmm5
; AVX1-NEXT: vpaddb %xmm0, %xmm5, %xmm0
; AVX1-NEXT: vpshufb %xmm3, %xmm11, %xmm5
; AVX1-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Reload
; AVX1-NEXT: vpshufb %xmm6, %xmm7, %xmm7
; AVX1-NEXT: vpor %xmm7, %xmm5, %xmm5
; AVX1-NEXT: vpaddb %xmm2, %xmm5, %xmm2
; AVX1-NEXT: vpshufb %xmm3, %xmm12, %xmm5
; AVX1-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Reload
; AVX1-NEXT: vpshufb %xmm6, %xmm7, %xmm7
; AVX1-NEXT: vpor %xmm7, %xmm5, %xmm5
; AVX1-NEXT: vpaddb %xmm4, %xmm5, %xmm4
; AVX1-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload
; AVX1-NEXT: vpshufb %xmm3, %xmm5, %xmm3
; AVX1-NEXT: vmovdqa (%rsp), %xmm5 # 16-byte Reload
; AVX1-NEXT: vpshufb %xmm6, %xmm5, %xmm5
; AVX1-NEXT: vpor %xmm5, %xmm3, %xmm3
; AVX1-NEXT: vmovdqu (%rdi), %xmm11
; AVX1-NEXT: vmovdqu 16(%rdi), %xmm10
; AVX1-NEXT: vmovdqu 32(%rdi), %xmm8
; AVX1-NEXT: vmovdqu 48(%rdi), %xmm3
; AVX1-NEXT: vmovdqu 64(%rdi), %xmm12
; AVX1-NEXT: vmovdqu 80(%rdi), %xmm9
; AVX1-NEXT: vmovdqu 96(%rdi), %xmm6
; AVX1-NEXT: vmovdqu 112(%rdi), %xmm14
; AVX1-NEXT: vmovdqu 128(%rdi), %xmm13
; AVX1-NEXT: vmovdqu 144(%rdi), %xmm5
; AVX1-NEXT: vmovdqu 160(%rdi), %xmm1
; AVX1-NEXT: vmovdqu 176(%rdi), %xmm15
; AVX1-NEXT: vmovdqa {{.*#+}} xmm4 = [0,3,6,9,12,15,2,5,8,11,14,1,4,7,10,13]
; AVX1-NEXT: vpshufb %xmm4, %xmm6, %xmm6
; AVX1-NEXT: vpshufb %xmm4, %xmm5, %xmm5
; AVX1-NEXT: vpshufb %xmm4, %xmm11, %xmm11
; AVX1-NEXT: vpshufb %xmm4, %xmm3, %xmm3
; AVX1-NEXT: vpshufb %xmm4, %xmm10, %xmm10
; AVX1-NEXT: vpshufb %xmm4, %xmm12, %xmm12
; AVX1-NEXT: vpshufb %xmm4, %xmm14, %xmm14
; AVX1-NEXT: vpshufb %xmm4, %xmm1, %xmm1
; AVX1-NEXT: vpshufb %xmm4, %xmm13, %xmm2
; AVX1-NEXT: vpshufb %xmm4, %xmm15, %xmm0
; AVX1-NEXT: vpshufb %xmm4, %xmm8, %xmm7
; AVX1-NEXT: vpshufb %xmm4, %xmm9, %xmm4
; AVX1-NEXT: vpalignr {{.*#+}} xmm13 = xmm4[11,12,13,14,15],xmm3[0,1,2,3,4,5,6,7,8,9,10]
; AVX1-NEXT: vpalignr {{.*#+}} xmm15 = xmm7[11,12,13,14,15],xmm11[0,1,2,3,4,5,6,7,8,9,10]
; AVX1-NEXT: vpalignr {{.*#+}} xmm9 = xmm0[11,12,13,14,15],xmm5[0,1,2,3,4,5,6,7,8,9,10]
; AVX1-NEXT: vpalignr {{.*#+}} xmm8 = xmm2[11,12,13,14,15],xmm6[0,1,2,3,4,5,6,7,8,9,10]
; AVX1-NEXT: vpalignr {{.*#+}} xmm5 = xmm5[11,12,13,14,15],xmm1[0,1,2,3,4,5,6,7,8,9,10]
; AVX1-NEXT: vpalignr {{.*#+}} xmm6 = xmm6[11,12,13,14,15],xmm14[0,1,2,3,4,5,6,7,8,9,10]
; AVX1-NEXT: vpalignr {{.*#+}} xmm0 = xmm1[11,12,13,14,15],xmm0[0,1,2,3,4,5,6,7,8,9,10]
; AVX1-NEXT: vinsertf128 $1, %xmm5, %ymm6, %ymm1
; AVX1-NEXT: vpalignr {{.*#+}} xmm3 = xmm3[11,12,13,14,15],xmm12[0,1,2,3,4,5,6,7,8,9,10]
; AVX1-NEXT: vpalignr {{.*#+}} xmm11 = xmm11[11,12,13,14,15],xmm10[0,1,2,3,4,5,6,7,8,9,10]
; AVX1-NEXT: vpalignr {{.*#+}} xmm2 = xmm14[11,12,13,14,15],xmm2[0,1,2,3,4,5,6,7,8,9,10]
; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm11, %ymm14
; AVX1-NEXT: vpalignr {{.*#+}} xmm4 = xmm12[11,12,13,14,15],xmm4[0,1,2,3,4,5,6,7,8,9,10]
; AVX1-NEXT: vpalignr {{.*#+}} xmm7 = xmm10[11,12,13,14,15],xmm7[0,1,2,3,4,5,6,7,8,9,10]
; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm7, %ymm10
; AVX1-NEXT: vbroadcastf128 {{.*#+}} ymm12 = [255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0]
; AVX1-NEXT: # ymm12 = mem[0,1,0,1]
; AVX1-NEXT: vandnps %ymm10, %ymm12, %ymm10
; AVX1-NEXT: vandps %ymm12, %ymm14, %ymm14
; AVX1-NEXT: vorps %ymm10, %ymm14, %ymm10
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm2, %ymm14
; AVX1-NEXT: vandnps %ymm14, %ymm12, %ymm14
; AVX1-NEXT: vandps %ymm1, %ymm12, %ymm1
; AVX1-NEXT: vorps %ymm1, %ymm14, %ymm1
; AVX1-NEXT: vpalignr {{.*#+}} xmm4 = xmm13[11,12,13,14,15],xmm4[0,1,2,3,4,5,6,7,8,9,10]
; AVX1-NEXT: vpalignr {{.*#+}} xmm3 = xmm3[11,12,13,14,15],xmm13[0,1,2,3,4,5,6,7,8,9,10]
; AVX1-NEXT: vpalignr {{.*#+}} xmm12 = xmm15[11,12,13,14,15],xmm7[0,1,2,3,4,5,6,7,8,9,10]
; AVX1-NEXT: vpalignr {{.*#+}} xmm7 = xmm11[11,12,13,14,15],xmm15[0,1,2,3,4,5,6,7,8,9,10]
; AVX1-NEXT: vpalignr {{.*#+}} xmm0 = xmm9[11,12,13,14,15],xmm0[0,1,2,3,4,5,6,7,8,9,10]
; AVX1-NEXT: vpalignr {{.*#+}} xmm5 = xmm5[11,12,13,14,15],xmm9[0,1,2,3,4,5,6,7,8,9,10]
; AVX1-NEXT: vpalignr {{.*#+}} xmm9 = xmm8[11,12,13,14,15],xmm2[0,1,2,3,4,5,6,7,8,9,10]
; AVX1-NEXT: vpalignr {{.*#+}} xmm6 = xmm6[11,12,13,14,15],xmm8[0,1,2,3,4,5,6,7,8,9,10]
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX1-NEXT: vpaddb %xmm0, %xmm2, %xmm0
; AVX1-NEXT: vpalignr {{.*#+}} xmm2 = xmm5[10,11,12,13,14,15,0,1,2,3,4,5,6,7,8,9]
; AVX1-NEXT: vpaddb %xmm0, %xmm2, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm10, %xmm0
; AVX1-NEXT: vpaddb %xmm4, %xmm0, %xmm0
; AVX1-NEXT: vpalignr {{.*#+}} xmm3 = xmm3[10,11,12,13,14,15,0,1,2,3,4,5,6,7,8,9]
; AVX1-NEXT: vpaddb %xmm0, %xmm3, %xmm0
; AVX1-NEXT: vpaddb %xmm12, %xmm10, %xmm3
; AVX1-NEXT: vpalignr {{.*#+}} xmm4 = xmm7[10,11,12,13,14,15,0,1,2,3,4,5,6,7,8,9]
; AVX1-NEXT: vpaddb %xmm3, %xmm4, %xmm3
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm3, %ymm0
; AVX1-NEXT: vpaddb %xmm1, %xmm9, %xmm1
; AVX1-NEXT: vpalignr {{.*#+}} xmm3 = xmm6[10,11,12,13,14,15,0,1,2,3,4,5,6,7,8,9]
; AVX1-NEXT: vpaddb %xmm1, %xmm3, %xmm1
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm2, %ymm0
; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm1, %ymm1
; AVX1-NEXT: addq $40, %rsp
; AVX1-NEXT: .cfi_def_cfa_offset 8
; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm1
; AVX1-NEXT: retq
;
; AVX2-LABEL: interleaved_load_vf64_i8_stride3:
@ -1454,51 +1347,39 @@ define <64 x i8> @interleaved_load_vf64_i8_stride3(<192 x i8>* %ptr){
; AVX2-NEXT: vmovdqu 96(%rdi), %xmm3
; AVX2-NEXT: vmovdqu 112(%rdi), %xmm4
; AVX2-NEXT: vmovdqu 128(%rdi), %xmm5
; AVX2-NEXT: vinserti128 $1, 48(%rdi), %ymm0, %ymm6
; AVX2-NEXT: vinserti128 $1, 64(%rdi), %ymm1, %ymm0
; AVX2-NEXT: vinserti128 $1, 48(%rdi), %ymm0, %ymm0
; AVX2-NEXT: vinserti128 $1, 64(%rdi), %ymm1, %ymm1
; AVX2-NEXT: vinserti128 $1, 80(%rdi), %ymm2, %ymm2
; AVX2-NEXT: vinserti128 $1, 144(%rdi), %ymm3, %ymm3
; AVX2-NEXT: vinserti128 $1, 160(%rdi), %ymm4, %ymm1
; AVX2-NEXT: vinserti128 $1, 176(%rdi), %ymm5, %ymm4
; AVX2-NEXT: vbroadcasti128 {{.*#+}} ymm5 = [255,0,255,255,0,255,255,0,255,255,0,255,255,0,255,255,255,0,255,255,0,255,255,0,255,255,0,255,255,0,255,255]
; AVX2-NEXT: # ymm5 = mem[0,1,0,1]
; AVX2-NEXT: vpblendvb %ymm5, %ymm6, %ymm2, %ymm7
; AVX2-NEXT: vmovdqa {{.*#+}} ymm8 = [1,4,7,10,13,0,3,6,9,12,15,2,5,8,11,14,1,4,7,10,13,0,3,6,9,12,15,2,5,8,11,14]
; AVX2-NEXT: vpshufb %ymm8, %ymm7, %ymm7
; AVX2-NEXT: vpblendvb %ymm5, %ymm3, %ymm4, %ymm5
; AVX2-NEXT: vpshufb %ymm8, %ymm5, %ymm5
; AVX2-NEXT: vbroadcasti128 {{.*#+}} ymm8 = [255,0,0,255,0,0,255,0,0,255,0,0,255,0,0,255,255,0,0,255,0,0,255,0,0,255,0,0,255,0,0,255]
; AVX2-NEXT: # ymm8 = mem[0,1,0,1]
; AVX2-NEXT: vpblendvb %ymm8, %ymm1, %ymm3, %ymm3
; AVX2-NEXT: vpblendvb %ymm8, %ymm0, %ymm6, %ymm6
; AVX2-NEXT: vpblendvb %ymm8, %ymm2, %ymm0, %ymm9
; AVX2-NEXT: vmovdqa {{.*#+}} ymm10 = <1,4,7,10,13,0,3,6,9,12,15,u,u,u,u,u,1,4,7,10,13,0,3,6,9,12,15,u,u,u,u,u>
; AVX2-NEXT: vpshufb %ymm10, %ymm9, %ymm9
; AVX2-NEXT: vpblendvb %ymm8, %ymm4, %ymm1, %ymm8
; AVX2-NEXT: vpshufb %ymm10, %ymm8, %ymm8
; AVX2-NEXT: vpalignr {{.*#+}} ymm8 = ymm5[11,12,13,14,15],ymm8[0,1,2,3,4,5,6,7,8,9,10],ymm5[27,28,29,30,31],ymm8[16,17,18,19,20,21,22,23,24,25,26]
; AVX2-NEXT: vpalignr {{.*#+}} ymm9 = ymm7[11,12,13,14,15],ymm9[0,1,2,3,4,5,6,7,8,9,10],ymm7[27,28,29,30,31],ymm9[16,17,18,19,20,21,22,23,24,25,26]
; AVX2-NEXT: vmovdqa {{.*#+}} ymm10 = [1,4,7,10,13,0,3,6,9,12,15,128,128,128,128,128,17,20,23,26,29,16,19,22,25,28,31,128,128,128,128,128]
; AVX2-NEXT: vpshufb %ymm10, %ymm6, %ymm6
; AVX2-NEXT: vmovdqa {{.*#+}} ymm11 = [128,128,128,128,128,128,128,128,128,128,128,2,5,8,11,14,128,128,128,128,128,128,128,128,128,128,128,18,21,24,27,30]
; AVX2-NEXT: vpshufb %ymm11, %ymm2, %ymm2
; AVX2-NEXT: vpor %ymm2, %ymm6, %ymm2
; AVX2-NEXT: vpaddb %ymm2, %ymm9, %ymm2
; AVX2-NEXT: vpshufb %ymm10, %ymm3, %ymm3
; AVX2-NEXT: vpshufb %ymm11, %ymm4, %ymm4
; AVX2-NEXT: vpor %ymm4, %ymm3, %ymm3
; AVX2-NEXT: vpaddb %ymm3, %ymm8, %ymm3
; AVX2-NEXT: vpalignr {{.*#+}} ymm4 = ymm7[5,6,7,8,9,10,11,12,13,14,15,0,1,2,3,4,21,22,23,24,25,26,27,28,29,30,31,16,17,18,19,20]
; AVX2-NEXT: vmovdqa {{.*#+}} ymm6 = <u,u,u,u,u,u,2,5,8,11,14,u,u,u,u,u,u,u,u,u,u,u,2,5,8,11,14,u,u,u,u,u>
; AVX2-NEXT: vinserti128 $1, 160(%rdi), %ymm4, %ymm4
; AVX2-NEXT: vinserti128 $1, 176(%rdi), %ymm5, %ymm5
; AVX2-NEXT: vmovdqa {{.*#+}} ymm6 = [0,3,6,9,12,15,2,5,8,11,14,1,4,7,10,13,0,3,6,9,12,15,2,5,8,11,14,1,4,7,10,13]
; AVX2-NEXT: vpshufb %ymm6, %ymm3, %ymm3
; AVX2-NEXT: vpshufb %ymm6, %ymm0, %ymm0
; AVX2-NEXT: vbroadcasti128 {{.*#+}} ymm7 = [255,255,255,255,255,255,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,255,255,255,255,255]
; AVX2-NEXT: # ymm7 = mem[0,1,0,1]
; AVX2-NEXT: vpblendvb %ymm7, %ymm4, %ymm0, %ymm0
; AVX2-NEXT: vpaddb %ymm2, %ymm0, %ymm0
; AVX2-NEXT: vpalignr {{.*#+}} ymm2 = ymm5[5,6,7,8,9,10,11,12,13,14,15,0,1,2,3,4,21,22,23,24,25,26,27,28,29,30,31,16,17,18,19,20]
; AVX2-NEXT: vpshufb %ymm6, %ymm1, %ymm1
; AVX2-NEXT: vpblendvb %ymm7, %ymm2, %ymm1, %ymm1
; AVX2-NEXT: vpaddb %ymm3, %ymm1, %ymm1
; AVX2-NEXT: vpshufb %ymm6, %ymm4, %ymm4
; AVX2-NEXT: vpshufb %ymm6, %ymm5, %ymm5
; AVX2-NEXT: vpshufb %ymm6, %ymm2, %ymm2
; AVX2-NEXT: vpalignr {{.*#+}} ymm6 = ymm2[11,12,13,14,15],ymm0[0,1,2,3,4,5,6,7,8,9,10],ymm2[27,28,29,30,31],ymm0[16,17,18,19,20,21,22,23,24,25,26]
; AVX2-NEXT: vpalignr {{.*#+}} ymm7 = ymm5[11,12,13,14,15],ymm3[0,1,2,3,4,5,6,7,8,9,10],ymm5[27,28,29,30,31],ymm3[16,17,18,19,20,21,22,23,24,25,26]
; AVX2-NEXT: vpalignr {{.*#+}} ymm3 = ymm3[11,12,13,14,15],ymm4[0,1,2,3,4,5,6,7,8,9,10],ymm3[27,28,29,30,31],ymm4[16,17,18,19,20,21,22,23,24,25,26]
; AVX2-NEXT: vpalignr {{.*#+}} ymm0 = ymm0[11,12,13,14,15],ymm1[0,1,2,3,4,5,6,7,8,9,10],ymm0[27,28,29,30,31],ymm1[16,17,18,19,20,21,22,23,24,25,26]
; AVX2-NEXT: vpalignr {{.*#+}} ymm1 = ymm1[11,12,13,14,15],ymm2[0,1,2,3,4,5,6,7,8,9,10],ymm1[27,28,29,30,31],ymm2[16,17,18,19,20,21,22,23,24,25,26]
; AVX2-NEXT: vpalignr {{.*#+}} ymm2 = ymm4[11,12,13,14,15],ymm5[0,1,2,3,4,5,6,7,8,9,10],ymm4[27,28,29,30,31],ymm5[16,17,18,19,20,21,22,23,24,25,26]
; AVX2-NEXT: vpalignr {{.*#+}} ymm4 = ymm7[11,12,13,14,15],ymm2[0,1,2,3,4,5,6,7,8,9,10],ymm7[27,28,29,30,31],ymm2[16,17,18,19,20,21,22,23,24,25,26]
; AVX2-NEXT: vpalignr {{.*#+}} ymm5 = ymm6[11,12,13,14,15],ymm1[0,1,2,3,4,5,6,7,8,9,10],ymm6[27,28,29,30,31],ymm1[16,17,18,19,20,21,22,23,24,25,26]
; AVX2-NEXT: vbroadcasti128 {{.*#+}} ymm8 = [255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0,255,255,255,255,255,255,255,255,255,255,255,0,0,0,0,0]
; AVX2-NEXT: # ymm8 = mem[0,1,0,1]
; AVX2-NEXT: vpblendvb %ymm8, %ymm0, %ymm1, %ymm1
; AVX2-NEXT: vpaddb %ymm5, %ymm1, %ymm1
; AVX2-NEXT: vpblendvb %ymm8, %ymm3, %ymm2, %ymm2
; AVX2-NEXT: vpaddb %ymm4, %ymm2, %ymm2
; AVX2-NEXT: vpalignr {{.*#+}} ymm0 = ymm0[11,12,13,14,15],ymm6[0,1,2,3,4,5,6,7,8,9,10],ymm0[27,28,29,30,31],ymm6[16,17,18,19,20,21,22,23,24,25,26]
; AVX2-NEXT: vpalignr {{.*#+}} ymm0 = ymm0[10,11,12,13,14,15,0,1,2,3,4,5,6,7,8,9,26,27,28,29,30,31,16,17,18,19,20,21,22,23,24,25]
; AVX2-NEXT: vpaddb %ymm1, %ymm0, %ymm0
; AVX2-NEXT: vpalignr {{.*#+}} ymm1 = ymm3[11,12,13,14,15],ymm7[0,1,2,3,4,5,6,7,8,9,10],ymm3[27,28,29,30,31],ymm7[16,17,18,19,20,21,22,23,24,25,26]
; AVX2-NEXT: vpalignr {{.*#+}} ymm1 = ymm1[10,11,12,13,14,15,0,1,2,3,4,5,6,7,8,9,26,27,28,29,30,31,16,17,18,19,20,21,22,23,24,25]
; AVX2-NEXT: vpaddb %ymm2, %ymm1, %ymm1
; AVX2-NEXT: retq
;
; AVX512-LABEL: interleaved_load_vf64_i8_stride3: