forked from OSchip/llvm-project
[x86] Add the dispatch skeleton to the new vector shuffle lowering for
AVX-512. There is no interesting logic yet. Everything ends up eventually delegating to the generic code to split the vector and shuffle the halves. Interestingly, that logic does a significantly better job of lowering all of these types than the generic vector expansion code does. Mostly, it lets most of the cases fall back to nice AVX2 code rather than all the way back to SSE code paths. Step 2 of basic AVX-512 support in the new vector shuffle lowering. Next up will be to incrementally add direct support for the basic instruction set to each type (adding tests first). llvm-svn: 218585
This commit is contained in:
parent
32a3ebda14
commit
c7129276cd
|
@ -9875,6 +9875,148 @@ static SDValue lower256BitVectorShuffle(SDValue Op, SDValue V1, SDValue V2,
|
|||
}
|
||||
}
|
||||
|
||||
/// \brief Handle lowering of 8-lane 64-bit floating point shuffles.
|
||||
static SDValue lowerV8F64VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
|
||||
const X86Subtarget *Subtarget,
|
||||
SelectionDAG &DAG) {
|
||||
SDLoc DL(Op);
|
||||
assert(V1.getSimpleValueType() == MVT::v8f64 && "Bad operand type!");
|
||||
assert(V2.getSimpleValueType() == MVT::v8f64 && "Bad operand type!");
|
||||
ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
|
||||
ArrayRef<int> Mask = SVOp->getMask();
|
||||
assert(Mask.size() == 8 && "Unexpected mask size for v8 shuffle!");
|
||||
|
||||
// FIXME: Implement direct support for this type!
|
||||
return splitAndLowerVectorShuffle(DL, MVT::v8f64, V1, V2, Mask, DAG);
|
||||
}
|
||||
|
||||
/// \brief Handle lowering of 16-lane 32-bit floating point shuffles.
|
||||
static SDValue lowerV16F32VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
|
||||
const X86Subtarget *Subtarget,
|
||||
SelectionDAG &DAG) {
|
||||
SDLoc DL(Op);
|
||||
assert(V1.getSimpleValueType() == MVT::v16f32 && "Bad operand type!");
|
||||
assert(V2.getSimpleValueType() == MVT::v16f32 && "Bad operand type!");
|
||||
ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
|
||||
ArrayRef<int> Mask = SVOp->getMask();
|
||||
assert(Mask.size() == 16 && "Unexpected mask size for v16 shuffle!");
|
||||
|
||||
// FIXME: Implement direct support for this type!
|
||||
return splitAndLowerVectorShuffle(DL, MVT::v16f32, V1, V2, Mask, DAG);
|
||||
}
|
||||
|
||||
/// \brief Handle lowering of 8-lane 64-bit integer shuffles.
|
||||
static SDValue lowerV8I64VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
|
||||
const X86Subtarget *Subtarget,
|
||||
SelectionDAG &DAG) {
|
||||
SDLoc DL(Op);
|
||||
assert(V1.getSimpleValueType() == MVT::v8i64 && "Bad operand type!");
|
||||
assert(V2.getSimpleValueType() == MVT::v8i64 && "Bad operand type!");
|
||||
ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
|
||||
ArrayRef<int> Mask = SVOp->getMask();
|
||||
assert(Mask.size() == 8 && "Unexpected mask size for v8 shuffle!");
|
||||
assert(Subtarget->hasDQI() && "We can only lower v8i64 with AVX-512-DQI");
|
||||
|
||||
// FIXME: Implement direct support for this type!
|
||||
return splitAndLowerVectorShuffle(DL, MVT::v8i64, V1, V2, Mask, DAG);
|
||||
}
|
||||
|
||||
/// \brief Handle lowering of 16-lane 32-bit integer shuffles.
|
||||
static SDValue lowerV16I32VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
|
||||
const X86Subtarget *Subtarget,
|
||||
SelectionDAG &DAG) {
|
||||
SDLoc DL(Op);
|
||||
assert(V1.getSimpleValueType() == MVT::v16i32 && "Bad operand type!");
|
||||
assert(V2.getSimpleValueType() == MVT::v16i32 && "Bad operand type!");
|
||||
ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
|
||||
ArrayRef<int> Mask = SVOp->getMask();
|
||||
assert(Mask.size() == 16 && "Unexpected mask size for v16 shuffle!");
|
||||
assert(Subtarget->hasDQI() && "We can only lower v16i32 with AVX-512-DQI!");
|
||||
|
||||
// FIXME: Implement direct support for this type!
|
||||
return splitAndLowerVectorShuffle(DL, MVT::v16i32, V1, V2, Mask, DAG);
|
||||
}
|
||||
|
||||
/// \brief Handle lowering of 32-lane 16-bit integer shuffles.
|
||||
static SDValue lowerV32I16VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
|
||||
const X86Subtarget *Subtarget,
|
||||
SelectionDAG &DAG) {
|
||||
SDLoc DL(Op);
|
||||
assert(V1.getSimpleValueType() == MVT::v32i16 && "Bad operand type!");
|
||||
assert(V2.getSimpleValueType() == MVT::v32i16 && "Bad operand type!");
|
||||
ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
|
||||
ArrayRef<int> Mask = SVOp->getMask();
|
||||
assert(Mask.size() == 32 && "Unexpected mask size for v32 shuffle!");
|
||||
assert(Subtarget->hasBWI() && "We can only lower v32i16 with AVX-512-BWI!");
|
||||
|
||||
// FIXME: Implement direct support for this type!
|
||||
return splitAndLowerVectorShuffle(DL, MVT::v32i16, V1, V2, Mask, DAG);
|
||||
}
|
||||
|
||||
/// \brief Handle lowering of 64-lane 8-bit integer shuffles.
|
||||
static SDValue lowerV64I8VectorShuffle(SDValue Op, SDValue V1, SDValue V2,
|
||||
const X86Subtarget *Subtarget,
|
||||
SelectionDAG &DAG) {
|
||||
SDLoc DL(Op);
|
||||
assert(V1.getSimpleValueType() == MVT::v64i8 && "Bad operand type!");
|
||||
assert(V2.getSimpleValueType() == MVT::v64i8 && "Bad operand type!");
|
||||
ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
|
||||
ArrayRef<int> Mask = SVOp->getMask();
|
||||
assert(Mask.size() == 64 && "Unexpected mask size for v64 shuffle!");
|
||||
assert(Subtarget->hasBWI() && "We can only lower v64i8 with AVX-512-BWI!");
|
||||
|
||||
// FIXME: Implement direct support for this type!
|
||||
return splitAndLowerVectorShuffle(DL, MVT::v64i8, V1, V2, Mask, DAG);
|
||||
}
|
||||
|
||||
/// \brief High-level routine to lower various 512-bit x86 vector shuffles.
|
||||
///
|
||||
/// This routine either breaks down the specific type of a 512-bit x86 vector
|
||||
/// shuffle or splits it into two 256-bit shuffles and fuses the results back
|
||||
/// together based on the available instructions.
|
||||
static SDValue lower512BitVectorShuffle(SDValue Op, SDValue V1, SDValue V2,
|
||||
MVT VT, const X86Subtarget *Subtarget,
|
||||
SelectionDAG &DAG) {
|
||||
SDLoc DL(Op);
|
||||
ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(Op);
|
||||
ArrayRef<int> Mask = SVOp->getMask();
|
||||
assert(Subtarget->hasAVX512() &&
|
||||
"Cannot lower 512-bit vectors w/ basic ISA!");
|
||||
|
||||
// Dispatch to each element type for lowering. If we don't have supprot for
|
||||
// specific element type shuffles at 512 bits, immediately split them and
|
||||
// lower them. Each lowering routine of a given type is allowed to assume that
|
||||
// the requisite ISA extensions for that element type are available.
|
||||
switch (VT.SimpleTy) {
|
||||
case MVT::v8f64:
|
||||
return lowerV8F64VectorShuffle(Op, V1, V2, Subtarget, DAG);
|
||||
case MVT::v16f32:
|
||||
return lowerV16F32VectorShuffle(Op, V1, V2, Subtarget, DAG);
|
||||
case MVT::v8i64:
|
||||
if (Subtarget->hasDQI())
|
||||
return lowerV8I64VectorShuffle(Op, V1, V2, Subtarget, DAG);
|
||||
break;
|
||||
case MVT::v16i32:
|
||||
if (Subtarget->hasDQI())
|
||||
return lowerV16I32VectorShuffle(Op, V1, V2, Subtarget, DAG);
|
||||
break;
|
||||
case MVT::v32i16:
|
||||
if (Subtarget->hasBWI())
|
||||
return lowerV32I16VectorShuffle(Op, V1, V2, Subtarget, DAG);
|
||||
break;
|
||||
case MVT::v64i8:
|
||||
if (Subtarget->hasBWI())
|
||||
return lowerV64I8VectorShuffle(Op, V1, V2, Subtarget, DAG);
|
||||
break;
|
||||
|
||||
default:
|
||||
llvm_unreachable("Not a valid 512-bit x86 vector type!");
|
||||
}
|
||||
|
||||
// Otherwise fall back on splitting.
|
||||
return splitAndLowerVectorShuffle(DL, VT, V1, V2, Mask, DAG);
|
||||
}
|
||||
|
||||
/// \brief Helper function to test whether a shuffle mask could be
|
||||
/// simplified by widening the elements being shuffled.
|
||||
///
|
||||
|
@ -10039,7 +10181,7 @@ static SDValue lowerVectorShuffle(SDValue Op, const X86Subtarget *Subtarget,
|
|||
// Force AVX-512 vectors to be scalarized for now.
|
||||
// FIXME: Implement AVX-512 support!
|
||||
if (VT.getSizeInBits() == 512)
|
||||
return SDValue();
|
||||
return lower512BitVectorShuffle(Op, V1, V2, VT, Subtarget, DAG);
|
||||
|
||||
llvm_unreachable("Unimplemented!");
|
||||
}
|
||||
|
|
File diff suppressed because it is too large
Load Diff
Loading…
Reference in New Issue