AVX-512: Lowering for 512-bit vector shuffles.

Vector types: <8 x 64>, <16 x 32>, <32 x 16> float and integer.

Differential Revision: http://reviews.llvm.org/D10683

llvm-svn: 246981
This commit is contained in:
Elena Demikhovsky 2015-09-08 06:38:21 +00:00
parent a89dc57b41
commit e88038f235
7 changed files with 691 additions and 574 deletions

View File

@ -503,4 +503,74 @@ void DecodeINSERTQIMask(int Len, int Idx,
ShuffleMask.push_back(SM_SentinelUndef);
}
void DecodeVPERMVMask(ArrayRef<uint64_t> RawMask,
SmallVectorImpl<int> &ShuffleMask) {
for (int i = 0, e = RawMask.size(); i < e; ++i) {
uint64_t M = RawMask[i];
ShuffleMask.push_back((int)M);
}
}
void DecodeVPERMV3Mask(ArrayRef<uint64_t> RawMask,
SmallVectorImpl<int> &ShuffleMask) {
for (int i = 0, e = RawMask.size(); i < e; ++i) {
uint64_t M = RawMask[i];
ShuffleMask.push_back((int)M);
}
}
void DecodeVPERMVMask(const Constant *C, MVT VT,
SmallVectorImpl<int> &ShuffleMask) {
Type *MaskTy = C->getType();
if (MaskTy->isVectorTy()) {
unsigned NumElements = MaskTy->getVectorNumElements();
if (NumElements == VT.getVectorNumElements()) {
for (unsigned i = 0; i < NumElements; ++i) {
Constant *COp = C->getAggregateElement(i);
if (!COp || (!isa<UndefValue>(COp) && !isa<ConstantInt>(COp))) {
ShuffleMask.clear();
return;
}
if (isa<UndefValue>(COp))
ShuffleMask.push_back(SM_SentinelUndef);
else {
uint64_t Element = cast<ConstantInt>(COp)->getZExtValue();
Element &= (1 << NumElements) - 1;
ShuffleMask.push_back(Element);
}
}
}
return;
}
// Scalar value; just broadcast it
if (!isa<ConstantInt>(C))
return;
uint64_t Element = cast<ConstantInt>(C)->getZExtValue();
int NumElements = VT.getVectorNumElements();
Element &= (1 << NumElements) - 1;
for (int i = 0; i < NumElements; ++i)
ShuffleMask.push_back(Element);
}
void DecodeVPERMV3Mask(const Constant *C, MVT VT,
SmallVectorImpl<int> &ShuffleMask) {
Type *MaskTy = C->getType();
unsigned NumElements = MaskTy->getVectorNumElements();
if (NumElements == VT.getVectorNumElements()) {
for (unsigned i = 0; i < NumElements; ++i) {
Constant *COp = C->getAggregateElement(i);
if (!COp) {
ShuffleMask.clear();
return;
}
if (isa<UndefValue>(COp))
ShuffleMask.push_back(SM_SentinelUndef);
else {
uint64_t Element = cast<ConstantInt>(COp)->getZExtValue();
Element &= (1 << NumElements*2) - 1;
ShuffleMask.push_back(Element);
}
}
}
}
} // llvm namespace

View File

@ -108,6 +108,22 @@ void DecodeEXTRQIMask(int Len, int Idx,
/// \brief Decode a SSE4A INSERTQ instruction as a v16i8 shuffle mask.
void DecodeINSERTQIMask(int Len, int Idx,
SmallVectorImpl<int> &ShuffleMask);
/// \brief Decode a VPERM W/D/Q/PS/PD mask from an IR-level vector constant.
void DecodeVPERMVMask(const Constant *C, MVT VT,
SmallVectorImpl<int> &ShuffleMask);
/// \brief Decode a VPERM W/D/Q/PS/PD mask from a raw array of constants.
void DecodeVPERMVMask(ArrayRef<uint64_t> RawMask,
SmallVectorImpl<int> &ShuffleMask);
/// \brief Decode a VPERMT2 W/D/Q/PS/PD mask from an IR-level vector constant.
void DecodeVPERMV3Mask(const Constant *C, MVT VT,
SmallVectorImpl<int> &ShuffleMask);
/// \brief Decode a VPERMT2 W/D/Q/PS/PD mask from a raw array of constants.
void DecodeVPERMV3Mask(ArrayRef<uint64_t> RawMask,
SmallVectorImpl<int> &ShuffleMask);
} // llvm namespace
#endif

View File

@ -1590,6 +1590,7 @@ X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM,
setOperationAction(ISD::ZERO_EXTEND, MVT::v32i8, Custom);
setOperationAction(ISD::SIGN_EXTEND, MVT::v32i16, Custom);
setOperationAction(ISD::ZERO_EXTEND, MVT::v32i16, Custom);
setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v32i16, Custom);
setOperationAction(ISD::SIGN_EXTEND, MVT::v64i8, Custom);
setOperationAction(ISD::ZERO_EXTEND, MVT::v64i8, Custom);
setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v32i1, Custom);
@ -3794,6 +3795,8 @@ static bool isTargetShuffle(unsigned Opcode) {
case X86ISD::VPERMILPI:
case X86ISD::VPERM2X128:
case X86ISD::VPERMI:
case X86ISD::VPERMV:
case X86ISD::VPERMV3:
return true;
}
}
@ -4658,6 +4661,122 @@ static bool getTargetShuffleMask(SDNode *N, MVT VT,
case X86ISD::MOVLPS:
// Not yet implemented
return false;
case X86ISD::VPERMV: {
IsUnary = true;
SDValue MaskNode = N->getOperand(0);
while (MaskNode->getOpcode() == ISD::BITCAST)
MaskNode = MaskNode->getOperand(0);
unsigned MaskLoBits = Log2_64(VT.getVectorNumElements());
SmallVector<uint64_t, 32> RawMask;
if (MaskNode->getOpcode() == ISD::BUILD_VECTOR) {
// If we have a build-vector, then things are easy.
EVT MaskVT = MaskNode.getValueType();
assert(MaskVT.isInteger() &&
MaskVT.getVectorNumElements() == VT.getVectorNumElements());
for (unsigned i = 0; i < MaskNode->getNumOperands(); ++i) {
SDValue Op = MaskNode->getOperand(i);
if (Op->getOpcode() == ISD::UNDEF)
RawMask.push_back((uint64_t)SM_SentinelUndef);
else if (isa<ConstantSDNode>(Op)) {
APInt MaskElement = cast<ConstantSDNode>(Op)->getAPIntValue();
RawMask.push_back(MaskElement.getLoBits(MaskLoBits).getZExtValue());
} else
return false;
}
DecodeVPERMVMask(RawMask, Mask);
break;
}
if (MaskNode->getOpcode() == X86ISD::VBROADCAST) {
unsigned NumEltsInMask = MaskNode->getNumOperands();
MaskNode = MaskNode->getOperand(0);
auto *CN = dyn_cast<ConstantSDNode>(MaskNode);
if (CN) {
APInt MaskEltValue = CN->getAPIntValue();
for (unsigned i = 0; i < NumEltsInMask; ++i)
RawMask.push_back(MaskEltValue.getLoBits(MaskLoBits).getZExtValue());
DecodeVPERMVMask(RawMask, Mask);
break;
}
// It may be a scalar load
}
auto *MaskLoad = dyn_cast<LoadSDNode>(MaskNode);
if (!MaskLoad)
return false;
SDValue Ptr = MaskLoad->getBasePtr();
if (Ptr->getOpcode() == X86ISD::Wrapper ||
Ptr->getOpcode() == X86ISD::WrapperRIP)
Ptr = Ptr->getOperand(0);
auto *MaskCP = dyn_cast<ConstantPoolSDNode>(Ptr);
if (!MaskCP || MaskCP->isMachineConstantPoolEntry())
return false;
auto *C = dyn_cast<Constant>(MaskCP->getConstVal());
if (C) {
DecodeVPERMVMask(C, VT, Mask);
if (Mask.empty())
return false;
break;
}
return false;
}
case X86ISD::VPERMV3: {
IsUnary = false;
SDValue MaskNode = N->getOperand(1);
while (MaskNode->getOpcode() == ISD::BITCAST)
MaskNode = MaskNode->getOperand(1);
if (MaskNode->getOpcode() == ISD::BUILD_VECTOR) {
// If we have a build-vector, then things are easy.
EVT MaskVT = MaskNode.getValueType();
assert(MaskVT.isInteger() &&
MaskVT.getVectorNumElements() == VT.getVectorNumElements());
SmallVector<uint64_t, 32> RawMask;
unsigned MaskLoBits = Log2_64(VT.getVectorNumElements()*2);
for (unsigned i = 0; i < MaskNode->getNumOperands(); ++i) {
SDValue Op = MaskNode->getOperand(i);
if (Op->getOpcode() == ISD::UNDEF)
RawMask.push_back((uint64_t)SM_SentinelUndef);
else {
auto *CN = dyn_cast<ConstantSDNode>(Op.getNode());
if (!CN)
return false;
APInt MaskElement = CN->getAPIntValue();
RawMask.push_back(MaskElement.getLoBits(MaskLoBits).getZExtValue());
}
}
DecodeVPERMV3Mask(RawMask, Mask);
break;
}
auto *MaskLoad = dyn_cast<LoadSDNode>(MaskNode);
if (!MaskLoad)
return false;
SDValue Ptr = MaskLoad->getBasePtr();
if (Ptr->getOpcode() == X86ISD::Wrapper ||
Ptr->getOpcode() == X86ISD::WrapperRIP)
Ptr = Ptr->getOperand(0);
auto *MaskCP = dyn_cast<ConstantPoolSDNode>(Ptr);
if (!MaskCP || MaskCP->isMachineConstantPoolEntry())
return false;
auto *C = dyn_cast<Constant>(MaskCP->getConstVal());
if (C) {
DecodeVPERMV3Mask(C, VT, Mask);
if (Mask.empty())
return false;
break;
}
return false;
}
default: llvm_unreachable("unknown target shuffle node");
}
@ -10445,6 +10564,73 @@ static SDValue lower256BitVectorShuffle(SDValue Op, SDValue V1, SDValue V2,
}
}
static SDValue lowerVectorShuffleWithPERMV(SDLoc DL, MVT VT,
ArrayRef<int> Mask, SDValue V1,
SDValue V2, SelectionDAG &DAG) {
assert(VT.getScalarSizeInBits() >= 16 && "Unexpected data type for PERMV");
MVT MaskEltVT = MVT::getIntegerVT(VT.getScalarSizeInBits());
MVT MaskVecVT = MVT::getVectorVT(MaskEltVT, VT.getVectorNumElements());
SmallVector<SDValue, 32> VPermMask;
for (unsigned i = 0; i < VT.getVectorNumElements(); ++i)
VPermMask.push_back(Mask[i] < 0 ? DAG.getUNDEF(MaskEltVT) :
DAG.getConstant(Mask[i], DL, MaskEltVT));
SDValue MaskNode = DAG.getNode(ISD::BUILD_VECTOR, DL, MaskVecVT,
VPermMask);
if (isSingleInputShuffleMask(Mask))
return DAG.getNode(X86ISD::VPERMV, DL, VT, MaskNode, V1);
return DAG.getNode(X86ISD::VPERMV3, DL, VT, V1, MaskNode, V2);
}
// X86 has dedicated unpack instructions that can handle specific blend
// operations: UNPCKH and UNPCKL.
static SDValue lowerVectorShuffleWithUNPCK(SDLoc DL, MVT VT,
ArrayRef<int> Mask, SDValue V1,
SDValue V2, SelectionDAG &DAG) {
int NumElts = VT.getVectorNumElements();
bool Unpckl = true;
bool Unpckh = true;
bool UnpcklSwapped = true;
bool UnpckhSwapped = true;
int NumEltsInLane = 128 / VT.getScalarSizeInBits();
for (int i = 0; i < NumElts ; ++i) {
unsigned LaneStart = (i / NumEltsInLane) * NumEltsInLane;
int LoPos = (i % NumEltsInLane) / 2 + LaneStart + NumElts * (i % 2);
int HiPos = LoPos + NumEltsInLane / 2;
int LoPosSwapped = (LoPos + NumElts) % (NumElts * 2);
int HiPosSwapped = (HiPos + NumElts) % (NumElts * 2);
if (Mask[i] == -1)
continue;
if (Mask[i] != LoPos)
Unpckl = false;
if (Mask[i] != HiPos)
Unpckh = false;
if (Mask[i] != LoPosSwapped)
UnpcklSwapped = false;
if (Mask[i] != HiPosSwapped)
UnpckhSwapped = false;
if (!Unpckl && !Unpckh && !UnpcklSwapped && !UnpckhSwapped)
return SDValue();
}
if (Unpckl)
return DAG.getNode(X86ISD::UNPCKL, DL, VT, V1, V2);
if (Unpckh)
return DAG.getNode(X86ISD::UNPCKH, DL, VT, V1, V2);
if (UnpcklSwapped)
return DAG.getNode(X86ISD::UNPCKL, DL, VT, V2, V1);
if (UnpckhSwapped)
return DAG.getNode(X86ISD::UNPCKH, DL, VT, V2, V1);
llvm_unreachable("Unexpected result of UNPCK mask analysis");
return SDValue();
}
/// \brief Handle lowering of 8-lane 64-bit floating point shuffles.
static SDValue lowerV8F64VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
const X86Subtarget *Subtarget,
@ -10456,15 +10642,12 @@ static SDValue lowerV8F64VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
ArrayRef<int> Mask = SVOp->getMask();
assert(Mask.size() == 8 && "Unexpected mask size for v8 shuffle!");
// X86 has dedicated unpack instructions that can handle specific blend
// operations: UNPCKH and UNPCKL.
if (isShuffleEquivalent(V1, V2, Mask, {0, 8, 2, 10, 4, 12, 6, 14}))
return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v8f64, V1, V2);
if (isShuffleEquivalent(V1, V2, Mask, {1, 9, 3, 11, 5, 13, 7, 15}))
return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v8f64, V1, V2);
SDValue UnpckNode =
lowerVectorShuffleWithUNPCK(DL, MVT::v8f64, Mask, V1, V2, DAG);
if (UnpckNode)
return UnpckNode;
// FIXME: Implement direct support for this type!
return splitAndLowerVectorShuffle(DL, MVT::v8f64, V1, V2, Mask, DAG);
return lowerVectorShuffleWithPERMV(DL, MVT::v8f64, Mask, V1, V2, DAG);
}
/// \brief Handle lowering of 16-lane 32-bit floating point shuffles.
@ -10478,22 +10661,12 @@ static SDValue lowerV16F32VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
ArrayRef<int> Mask = SVOp->getMask();
assert(Mask.size() == 16 && "Unexpected mask size for v16 shuffle!");
// Use dedicated unpack instructions for masks that match their pattern.
if (isShuffleEquivalent(V1, V2, Mask,
{// First 128-bit lane.
0, 16, 1, 17, 4, 20, 5, 21,
// Second 128-bit lane.
8, 24, 9, 25, 12, 28, 13, 29}))
return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v16f32, V1, V2);
if (isShuffleEquivalent(V1, V2, Mask,
{// First 128-bit lane.
2, 18, 3, 19, 6, 22, 7, 23,
// Second 128-bit lane.
10, 26, 11, 27, 14, 30, 15, 31}))
return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v16f32, V1, V2);
SDValue UnpckNode =
lowerVectorShuffleWithUNPCK(DL, MVT::v16f32, Mask, V1, V2, DAG);
if (UnpckNode)
return UnpckNode;
// FIXME: Implement direct support for this type!
return splitAndLowerVectorShuffle(DL, MVT::v16f32, V1, V2, Mask, DAG);
return lowerVectorShuffleWithPERMV(DL, MVT::v16f32, Mask, V1, V2, DAG);
}
/// \brief Handle lowering of 8-lane 64-bit integer shuffles.
@ -10507,15 +10680,12 @@ static SDValue lowerV8I64VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
ArrayRef<int> Mask = SVOp->getMask();
assert(Mask.size() == 8 && "Unexpected mask size for v8 shuffle!");
// X86 has dedicated unpack instructions that can handle specific blend
// operations: UNPCKH and UNPCKL.
if (isShuffleEquivalent(V1, V2, Mask, {0, 8, 2, 10, 4, 12, 6, 14}))
return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v8i64, V1, V2);
if (isShuffleEquivalent(V1, V2, Mask, {1, 9, 3, 11, 5, 13, 7, 15}))
return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v8i64, V1, V2);
SDValue UnpckNode =
lowerVectorShuffleWithUNPCK(DL, MVT::v8i64, Mask, V1, V2, DAG);
if (UnpckNode)
return UnpckNode;
// FIXME: Implement direct support for this type!
return splitAndLowerVectorShuffle(DL, MVT::v8i64, V1, V2, Mask, DAG);
return lowerVectorShuffleWithPERMV(DL, MVT::v8i64, Mask, V1, V2, DAG);
}
/// \brief Handle lowering of 16-lane 32-bit integer shuffles.
@ -10529,22 +10699,12 @@ static SDValue lowerV16I32VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
ArrayRef<int> Mask = SVOp->getMask();
assert(Mask.size() == 16 && "Unexpected mask size for v16 shuffle!");
// Use dedicated unpack instructions for masks that match their pattern.
if (isShuffleEquivalent(V1, V2, Mask,
{// First 128-bit lane.
0, 16, 1, 17, 4, 20, 5, 21,
// Second 128-bit lane.
8, 24, 9, 25, 12, 28, 13, 29}))
return DAG.getNode(X86ISD::UNPCKL, DL, MVT::v16i32, V1, V2);
if (isShuffleEquivalent(V1, V2, Mask,
{// First 128-bit lane.
2, 18, 3, 19, 6, 22, 7, 23,
// Second 128-bit lane.
10, 26, 11, 27, 14, 30, 15, 31}))
return DAG.getNode(X86ISD::UNPCKH, DL, MVT::v16i32, V1, V2);
SDValue UnpckNode =
lowerVectorShuffleWithUNPCK(DL, MVT::v16i32, Mask, V1, V2, DAG);
if (UnpckNode)
return UnpckNode;
// FIXME: Implement direct support for this type!
return splitAndLowerVectorShuffle(DL, MVT::v16i32, V1, V2, Mask, DAG);
return lowerVectorShuffleWithPERMV(DL, MVT::v16i32, Mask, V1, V2, DAG);
}
/// \brief Handle lowering of 32-lane 16-bit integer shuffles.
@ -10559,8 +10719,7 @@ static SDValue lowerV32I16VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
assert(Mask.size() == 32 && "Unexpected mask size for v32 shuffle!");
assert(Subtarget->hasBWI() && "We can only lower v32i16 with AVX-512-BWI!");
// FIXME: Implement direct support for this type!
return splitAndLowerVectorShuffle(DL, MVT::v32i16, V1, V2, Mask, DAG);
return lowerVectorShuffleWithPERMV(DL, MVT::v32i16, Mask, V1, V2, DAG);
}
/// \brief Handle lowering of 64-lane 8-bit integer shuffles.

View File

@ -1269,26 +1269,37 @@ void X86AsmPrinter::EmitInstruction(const MachineInstr *MI) {
break;
}
// For loads from a constant pool to a vector register, print the constant
// loaded.
case X86::MOVAPDrm:
case X86::VMOVAPDrm:
case X86::VMOVAPDYrm:
case X86::MOVUPDrm:
case X86::VMOVUPDrm:
case X86::VMOVUPDYrm:
case X86::MOVAPSrm:
case X86::VMOVAPSrm:
case X86::VMOVAPSYrm:
case X86::MOVUPSrm:
case X86::VMOVUPSrm:
case X86::VMOVUPSYrm:
case X86::MOVDQArm:
case X86::VMOVDQArm:
case X86::VMOVDQAYrm:
case X86::MOVDQUrm:
case X86::VMOVDQUrm:
case X86::VMOVDQUYrm:
#define MOV_CASE(Prefix, Suffix) \
case X86::Prefix##MOVAPD##Suffix##rm: \
case X86::Prefix##MOVAPS##Suffix##rm: \
case X86::Prefix##MOVUPD##Suffix##rm: \
case X86::Prefix##MOVUPS##Suffix##rm: \
case X86::Prefix##MOVDQA##Suffix##rm: \
case X86::Prefix##MOVDQU##Suffix##rm:
#define MOV_AVX512_CASE(Suffix) \
case X86::VMOVDQA64##Suffix##rm: \
case X86::VMOVDQA32##Suffix##rm: \
case X86::VMOVDQU64##Suffix##rm: \
case X86::VMOVDQU32##Suffix##rm: \
case X86::VMOVDQU16##Suffix##rm: \
case X86::VMOVDQU8##Suffix##rm: \
case X86::VMOVAPS##Suffix##rm: \
case X86::VMOVAPD##Suffix##rm: \
case X86::VMOVUPS##Suffix##rm: \
case X86::VMOVUPD##Suffix##rm:
#define CASE_ALL_MOV_RM() \
MOV_CASE(, ) /* SSE */ \
MOV_CASE(V, ) /* AVX-128 */ \
MOV_CASE(V, Y) /* AVX-256 */ \
MOV_AVX512_CASE(Z) \
MOV_AVX512_CASE(Z256) \
MOV_AVX512_CASE(Z128)
// For loads from a constant pool to a vector register, print the constant
// loaded.
CASE_ALL_MOV_RM()
if (!OutStreamer->isVerboseAsm())
break;
if (MI->getNumOperands() > 4)

View File

@ -12,6 +12,15 @@ define <16 x float> @shuffle_v16f32_00_10_01_11_04_14_05_15_08_18_09_19_0c_1c_0d
ret <16 x float> %shuffle
}
define <16 x float> @shuffle_v16f32_vunpcklps_swap(<16 x float> %a, <16 x float> %b) {
; ALL-LABEL: shuffle_v16f32_vunpcklps_swap:
; ALL: # BB#0:
; ALL-NEXT: vunpcklps {{.*#+}} zmm0 = zmm1[0],zmm0[0],zmm1[1],zmm0[1],zmm1[4],zmm0[4],zmm1[5],zmm0[5],zmm1[8],zmm0[8],zmm1[9],zmm0[9],zmm1[12],zmm0[12],zmm1[13],zmm0[13]
; ALL-NEXT: retq
%shuffle = shufflevector <16 x float> %a, <16 x float> %b, <16 x i32> <i32 16, i32 0, i32 17, i32 1, i32 20, i32 4, i32 21, i32 5, i32 24, i32 8, i32 25, i32 9, i32 28, i32 12, i32 29, i32 13>
ret <16 x float> %shuffle
}
define <16 x i32> @shuffle_v16i32_00_10_01_11_04_14_05_15_08_18_09_19_0c_1c_0d_1d(<16 x i32> %a, <16 x i32> %b) {
; ALL-LABEL: shuffle_v16i32_00_10_01_11_04_14_05_15_08_18_09_19_0c_1c_0d_1d:
; ALL: # BB#0:
@ -38,3 +47,76 @@ define <16 x i32> @shuffle_v16i32_02_12_03_13_06_16_07_17_0a_1a_0b_1b_0e_1e_0f_1
%shuffle = shufflevector <16 x i32> %a, <16 x i32> %b, <16 x i32><i32 2, i32 18, i32 3, i32 19, i32 6, i32 22, i32 7, i32 23, i32 10, i32 26, i32 11, i32 27, i32 14, i32 30, i32 15, i32 31>
ret <16 x i32> %shuffle
}
define <16 x float> @shuffle_v16f32_02_05_u_u_07_u_0a_01_00_05_u_04_07_u_0a_01(<16 x float> %a) {
; ALL-LABEL: shuffle_v16f32_02_05_u_u_07_u_0a_01_00_05_u_04_07_u_0a_01:
; ALL: # BB#0:
; ALL-NEXT: vmovdqa32 {{.*#+}} zmm1 = <2,5,u,u,7,u,10,1,0,5,u,4,7,u,10,1>
; ALL-NEXT: vpermps %zmm0, %zmm1, %zmm0
; ALL-NEXT: retq
%c = shufflevector <16 x float> %a, <16 x float> undef, <16 x i32> <i32 2, i32 5, i32 undef, i32 undef, i32 7, i32 undef, i32 10, i32 1, i32 0, i32 5, i32 undef, i32 4, i32 7, i32 undef, i32 10, i32 1>
ret <16 x float> %c
}
define <16 x i32> @shuffle_v16i32_02_05_u_u_07_u_0a_01_00_05_u_04_07_u_0a_01(<16 x i32> %a) {
; ALL-LABEL: shuffle_v16i32_02_05_u_u_07_u_0a_01_00_05_u_04_07_u_0a_01:
; ALL: # BB#0:
; ALL-NEXT: vmovdqa32 {{.*#+}} zmm1 = <2,5,u,u,7,u,10,1,0,5,u,4,7,u,10,1>
; ALL-NEXT: vpermd %zmm0, %zmm1, %zmm0
; ALL-NEXT: retq
%c = shufflevector <16 x i32> %a, <16 x i32> undef, <16 x i32> <i32 2, i32 5, i32 undef, i32 undef, i32 7, i32 undef, i32 10, i32 1, i32 0, i32 5, i32 undef, i32 4, i32 7, i32 undef, i32 10, i32 1>
ret <16 x i32> %c
}
define <16 x i32> @shuffle_v16i32_0f_1f_0e_16_0d_1d_04_1e_0b_1b_0a_1a_09_19_08_18(<16 x i32> %a, <16 x i32> %b) {
; ALL-LABEL: shuffle_v16i32_0f_1f_0e_16_0d_1d_04_1e_0b_1b_0a_1a_09_19_08_18:
; ALL: # BB#0:
; ALL-NEXT: vmovdqa32 {{.*#+}} zmm2 = [15,31,14,22,13,29,4,28,11,27,10,26,9,25,8,24]
; ALL-NEXT: vpermt2d %zmm1, %zmm2, %zmm0
; ALL-NEXT: retq
%c = shufflevector <16 x i32> %a, <16 x i32> %b, <16 x i32> <i32 15, i32 31, i32 14, i32 22, i32 13, i32 29, i32 4, i32 28, i32 11, i32 27, i32 10, i32 26, i32 9, i32 25, i32 8, i32 24>
ret <16 x i32> %c
}
define <16 x float> @shuffle_v16f32_0f_1f_0e_16_0d_1d_04_1e_0b_1b_0a_1a_09_19_08_18(<16 x float> %a, <16 x float> %b) {
; ALL-LABEL: shuffle_v16f32_0f_1f_0e_16_0d_1d_04_1e_0b_1b_0a_1a_09_19_08_18:
; ALL: # BB#0:
; ALL-NEXT: vmovdqa32 {{.*#+}} zmm2 = [15,31,14,22,13,29,4,28,11,27,10,26,9,25,8,24]
; ALL-NEXT: vpermt2ps %zmm1, %zmm2, %zmm0
; ALL-NEXT: retq
%c = shufflevector <16 x float> %a, <16 x float> %b, <16 x i32> <i32 15, i32 31, i32 14, i32 22, i32 13, i32 29, i32 4, i32 28, i32 11, i32 27, i32 10, i32 26, i32 9, i32 25, i32 8, i32 24>
ret <16 x float> %c
}
define <16 x float> @shuffle_v16f32_load_0f_1f_0e_16_0d_1d_04_1e_0b_1b_0a_1a_09_19_08_18(<16 x float> %a, <16 x float>* %b) {
; ALL-LABEL: shuffle_v16f32_load_0f_1f_0e_16_0d_1d_04_1e_0b_1b_0a_1a_09_19_08_18:
; ALL: # BB#0:
; ALL-NEXT: vmovdqa32 {{.*#+}} zmm1 = [15,31,14,22,13,29,4,28,11,27,10,26,9,25,8,24]
; ALL-NEXT: vpermt2ps (%rdi), %zmm1, %zmm0
; ALL-NEXT: retq
%c = load <16 x float>, <16 x float>* %b
%d = shufflevector <16 x float> %a, <16 x float> %c, <16 x i32> <i32 15, i32 31, i32 14, i32 22, i32 13, i32 29, i32 4, i32 28, i32 11, i32 27, i32 10, i32 26, i32 9, i32 25, i32 8, i32 24>
ret <16 x float> %d
}
define <16 x i32> @shuffle_v16i32_load_0f_1f_0e_16_0d_1d_04_1e_0b_1b_0a_1a_09_19_08_18(<16 x i32> %a, <16 x i32>* %b) {
; ALL-LABEL: shuffle_v16i32_load_0f_1f_0e_16_0d_1d_04_1e_0b_1b_0a_1a_09_19_08_18:
; ALL: # BB#0:
; ALL-NEXT: vmovdqa32 {{.*#+}} zmm1 = [15,31,14,22,13,29,4,28,11,27,10,26,9,25,8,24]
; ALL-NEXT: vpermt2d (%rdi), %zmm1, %zmm0
; ALL-NEXT: retq
%c = load <16 x i32>, <16 x i32>* %b
%d = shufflevector <16 x i32> %a, <16 x i32> %c, <16 x i32> <i32 15, i32 31, i32 14, i32 22, i32 13, i32 29, i32 4, i32 28, i32 11, i32 27, i32 10, i32 26, i32 9, i32 25, i32 8, i32 24>
ret <16 x i32> %d
}
define <16 x i32> @shuffle_v16i32_0_1_2_13_u_u_u_u_u_u_u_u_u_u_u_u(<16 x i32> %a, <16 x i32> %b) {
; ALL-LABEL: shuffle_v16i32_0_1_2_13_u_u_u_u_u_u_u_u_u_u_u_u:
; ALL: # BB#0:
; ALL-NEXT: vmovdqa32 {{.*#+}} zmm2 = <0,1,2,19,u,u,u,u,u,u,u,u,u,u,u,u>
; ALL-NEXT: vpermt2d %zmm1, %zmm2, %zmm0
; ALL-NEXT: retq
%c = shufflevector <16 x i32> %a, <16 x i32> %b, <16 x i32> <i32 0, i32 1, i32 2, i32 19, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
ret <16 x i32> %c
}

View File

@ -0,0 +1,43 @@
; RUN: llc < %s -mcpu=x86-64 -mattr=+avx512f -mattr=+avx512bw | FileCheck %s --check-prefix=ALL --check-prefix=AVX512 --check-prefix=AVX512BW
target triple = "x86_64-unknown-unknown"
define <32 x i16> @shuffle_v32i16_02_05_u_u_07_u_0a_01_00_05_u_04_07_u_0a_01_02_05_u_u_07_u_0a_01_00_05_u_04_07_u_0a_1f(<32 x i16> %a) {
; ALL-LABEL: shuffle_v32i16_02_05_u_u_07_u_0a_01_00_05_u_04_07_u_0a_01_02_05_u_u_07_u_0a_01_00_05_u_04_07_u_0a_1f:
; ALL: # BB#0:
; ALL-NEXT: vmovdqu16 {{.*#+}} zmm1 = <2,5,u,u,7,u,10,1,0,5,u,4,7,u,10,1,2,5,u,u,7,u,10,1,0,5,u,4,7,u,10,31>
; ALL-NEXT: vpermw %zmm0, %zmm1, %zmm0
; ALL-NEXT: retq
%c = shufflevector <32 x i16> %a, <32 x i16> undef, <32 x i32> <i32 2, i32 5, i32 undef, i32 undef, i32 7, i32 undef, i32 10, i32 1, i32 0, i32 5, i32 undef, i32 4, i32 7, i32 undef, i32 10, i32 1, i32 2, i32 5, i32 undef, i32 undef, i32 7, i32 undef, i32 10, i32 1, i32 0, i32 5, i32 undef, i32 4, i32 7, i32 undef, i32 10, i32 31>
ret <32 x i16> %c
}
define <32 x i16> @shuffle_v32i16_0f_1f_0e_16_0d_1d_04_1e_0b_1b_0a_1a_09_19_08_18_0f_1f_0e_16_0d_1d_04_1e_0b_1b_0a_1a_09_19_08_38(<32 x i16> %a, <32 x i16> %b) {
; ALL-LABEL: shuffle_v32i16_0f_1f_0e_16_0d_1d_04_1e_0b_1b_0a_1a_09_19_08_18_0f_1f_0e_16_0d_1d_04_1e_0b_1b_0a_1a_09_19_08_38:
; ALL: # BB#0:
; ALL-NEXT: vmovdqu16 {{.*#+}} zmm2 = [15,31,14,22,13,29,4,28,11,27,10,26,9,25,8,24,15,31,14,22,13,29,4,28,11,27,10,26,9,25,8,56]
; ALL-NEXT: vpermt2w %zmm1, %zmm2, %zmm0
; ALL-NEXT: retq
%c = shufflevector <32 x i16> %a, <32 x i16> %b, <32 x i32> <i32 15, i32 31, i32 14, i32 22, i32 13, i32 29, i32 4, i32 28, i32 11, i32 27, i32 10, i32 26, i32 9, i32 25, i32 8, i32 24, i32 15, i32 31, i32 14, i32 22, i32 13, i32 29, i32 4, i32 28, i32 11, i32 27, i32 10, i32 26, i32 9, i32 25, i32 8, i32 56>
ret <32 x i16> %c
}
define <32 x i16> @shuffle_v16i32_0_32_1_33_2_34_3_35_8_40_9_41_u_u_u_u(<32 x i16> %a, <32 x i16> %b) {
; ALL-LABEL: shuffle_v16i32_0_32_1_33_2_34_3_35_8_40_9_41_u_u_u_u:
; ALL: # BB#0:
; ALL-NEXT: vmovdqu16 {{.*#+}} zmm2 = <0,32,1,33,2,34,3,35,8,40,9,41,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u>
; ALL-NEXT: vpermt2w %zmm1, %zmm2, %zmm0
; ALL-NEXT: retq
%c = shufflevector <32 x i16> %a, <32 x i16> %b, <32 x i32> <i32 0, i32 32, i32 1, i32 33, i32 2, i32 34, i32 3, i32 35, i32 8, i32 40, i32 9, i32 41, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
ret <32 x i16> %c
}
define <32 x i16> @shuffle_v16i32_4_36_5_37_6_38_7_39_12_44_13_45_u_u_u_u(<32 x i16> %a, <32 x i16> %b) {
; ALL-LABEL: shuffle_v16i32_4_36_5_37_6_38_7_39_12_44_13_45_u_u_u_u:
; ALL: # BB#0:
; ALL-NEXT: vmovdqu16 {{.*#+}} zmm2 = <4,36,5,37,6,38,7,39,12,44,13,45,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u>
; ALL-NEXT: vpermt2w %zmm1, %zmm2, %zmm0
; ALL-NEXT: retq
%c = shufflevector <32 x i16> %a, <32 x i16> %b, <32 x i32> <i32 4, i32 36, i32 5, i32 37, i32 6, i32 38, i32 7, i32 39, i32 12, i32 44, i32 13, i32 45, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
ret <32 x i16> %c
}

File diff suppressed because it is too large Load Diff