forked from OSchip/llvm-project
X86 ISel: Basic support for variable-index vector permutations
Summary: Try to lower a BUILD_VECTOR composed of extract-extract chains that can be reasoned to be a permutation of a vector by indices in a non-constant vector. We saw this pattern created by ISPC, which resolts to creating it due to the requirement that shufflevector's mask operand be a *constant* vector. I didn't check this but we could possibly use this pattern for lowering the X86 permute C-instrinsics instead of llvm.x86 instrinsics. This change can be followed by more improvements: 1. Handle vectors with undef elements. 2. Utilize pshufb and zero-mask-blending to support more effiecient construction of vectors with constant-0 elements. 3. Use smaller-element vectors of same width, and "interpolate" the indices, when no native operation available. Reviewers: RKSimon, craig.topper Reviewed By: RKSimon Subscribers: chandlerc, DavidKreitzer Differential Revision: https://reviews.llvm.org/D39126 llvm-svn: 317463
This commit is contained in:
parent
3844f1ad5c
commit
3122698040
|
@ -7713,6 +7713,111 @@ static SDValue materializeVectorConstant(SDValue Op, SelectionDAG &DAG,
|
|||
return SDValue();
|
||||
}
|
||||
|
||||
// Tries to lower a BUILD_VECTOR composed of extract-extract chains that can be
|
||||
// reasoned to be a permutation of a vector by indices in a non-constant vector.
|
||||
// (build_vector (extract_elt V, (extract_elt I, 0)),
|
||||
// (extract_elt V, (extract_elt I, 1)),
|
||||
// ...
|
||||
// ->
|
||||
// (vpermv I, V)
|
||||
//
|
||||
// TODO: Handle undefs
|
||||
// TODO: Utilize pshufb and zero mask blending to support more efficient
|
||||
// construction of vectors with constant-0 elements.
|
||||
// TODO: Use smaller-element vectors of same width, and "interpolate" the indices,
|
||||
// when no native operation available.
|
||||
static SDValue
|
||||
LowerBUILD_VECTORAsVariablePermute(SDValue V, SelectionDAG &DAG,
|
||||
const X86Subtarget &Subtarget) {
|
||||
// Look for VPERMV and PSHUFB opportunities.
|
||||
MVT VT = V.getSimpleValueType();
|
||||
switch (VT.SimpleTy) {
|
||||
default:
|
||||
return SDValue();
|
||||
case MVT::v16i8:
|
||||
if (!Subtarget.hasSSE3())
|
||||
return SDValue();
|
||||
break;
|
||||
case MVT::v8f32:
|
||||
case MVT::v8i32:
|
||||
if (!Subtarget.hasAVX2())
|
||||
return SDValue();
|
||||
break;
|
||||
case MVT::v4i64:
|
||||
case MVT::v4f64:
|
||||
if (!Subtarget.hasVLX())
|
||||
return SDValue();
|
||||
break;
|
||||
case MVT::v16f32:
|
||||
case MVT::v8f64:
|
||||
case MVT::v16i32:
|
||||
case MVT::v8i64:
|
||||
if (!Subtarget.hasAVX512())
|
||||
return SDValue();
|
||||
break;
|
||||
case MVT::v32i16:
|
||||
if (!Subtarget.hasBWI())
|
||||
return SDValue();
|
||||
break;
|
||||
case MVT::v8i16:
|
||||
case MVT::v16i16:
|
||||
if (!Subtarget.hasVLX() || !Subtarget.hasBWI())
|
||||
return SDValue();
|
||||
break;
|
||||
case MVT::v64i8:
|
||||
if (!Subtarget.hasVBMI())
|
||||
return SDValue();
|
||||
break;
|
||||
case MVT::v32i8:
|
||||
if (!Subtarget.hasVLX() || !Subtarget.hasVBMI())
|
||||
return SDValue();
|
||||
break;
|
||||
}
|
||||
SDValue SrcVec, IndicesVec;
|
||||
// Check for a match of the permute source vector and permute index elements.
|
||||
// This is done by checking that the i-th build_vector operand is of the form:
|
||||
// (extract_elt SrcVec, (extract_elt IndicesVec, i)).
|
||||
for (unsigned Idx = 0, E = V.getNumOperands(); Idx != E; ++Idx) {
|
||||
SDValue Op = V.getOperand(Idx);
|
||||
if (Op.getOpcode() != ISD::EXTRACT_VECTOR_ELT)
|
||||
return SDValue();
|
||||
|
||||
// If this is the first extract encountered in V, set the source vector,
|
||||
// otherwise verify the extract is from the previously defined source
|
||||
// vector.
|
||||
if (!SrcVec)
|
||||
SrcVec = Op.getOperand(0);
|
||||
else if (SrcVec != Op.getOperand(0))
|
||||
return SDValue();
|
||||
SDValue ExtractedIndex = Op->getOperand(1);
|
||||
// Peek through extends.
|
||||
if (ExtractedIndex.getOpcode() == ISD::ZERO_EXTEND ||
|
||||
ExtractedIndex.getOpcode() == ISD::SIGN_EXTEND)
|
||||
ExtractedIndex = ExtractedIndex.getOperand(0);
|
||||
if (ExtractedIndex.getOpcode() != ISD::EXTRACT_VECTOR_ELT)
|
||||
return SDValue();
|
||||
|
||||
// If this is the first extract from the index vector candidate, set the
|
||||
// indices vector, otherwise verify the extract is from the previously
|
||||
// defined indices vector.
|
||||
if (!IndicesVec)
|
||||
IndicesVec = ExtractedIndex.getOperand(0);
|
||||
else if (IndicesVec != ExtractedIndex.getOperand(0))
|
||||
return SDValue();
|
||||
|
||||
auto *PermIdx = dyn_cast<ConstantSDNode>(ExtractedIndex.getOperand(1));
|
||||
if (!PermIdx || PermIdx->getZExtValue() != Idx)
|
||||
return SDValue();
|
||||
}
|
||||
MVT IndicesVT = VT;
|
||||
if (VT.isFloatingPoint())
|
||||
IndicesVT = MVT::getVectorVT(MVT::getIntegerVT(VT.getScalarSizeInBits()),
|
||||
VT.getVectorNumElements());
|
||||
IndicesVec = DAG.getZExtOrTrunc(IndicesVec, SDLoc(IndicesVec), IndicesVT);
|
||||
return DAG.getNode(VT == MVT::v16i8 ? X86ISD::PSHUFB : X86ISD::VPERMV,
|
||||
SDLoc(V), VT, IndicesVec, SrcVec);
|
||||
}
|
||||
|
||||
SDValue
|
||||
X86TargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const {
|
||||
SDLoc dl(Op);
|
||||
|
@ -7928,6 +8033,9 @@ X86TargetLowering::LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const {
|
|||
if (IsAllConstants)
|
||||
return SDValue();
|
||||
|
||||
if (SDValue V = LowerBUILD_VECTORAsVariablePermute(Op, DAG, Subtarget))
|
||||
return V;
|
||||
|
||||
// See if we can use a vector load to get all of the elements.
|
||||
if (VT.is128BitVector() || VT.is256BitVector() || VT.is512BitVector()) {
|
||||
SmallVector<SDValue, 64> Ops(Op->op_begin(), Op->op_begin() + NumElems);
|
||||
|
|
|
@ -143,35 +143,40 @@ define <8 x i16> @var_shuffle_v8i16(<8 x i16> %v, <8 x i16> %indices) nounwind {
|
|||
; SSSE3-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm2[0]
|
||||
; SSSE3-NEXT: retq
|
||||
;
|
||||
; AVX-LABEL: var_shuffle_v8i16:
|
||||
; AVX: # BB#0:
|
||||
; AVX-NEXT: vmovd %xmm1, %eax
|
||||
; AVX-NEXT: vpextrw $1, %xmm1, %r10d
|
||||
; AVX-NEXT: vpextrw $2, %xmm1, %ecx
|
||||
; AVX-NEXT: vpextrw $3, %xmm1, %edx
|
||||
; AVX-NEXT: vpextrw $4, %xmm1, %esi
|
||||
; AVX-NEXT: vpextrw $5, %xmm1, %edi
|
||||
; AVX-NEXT: vpextrw $6, %xmm1, %r8d
|
||||
; AVX-NEXT: vpextrw $7, %xmm1, %r9d
|
||||
; AVX-NEXT: vmovaps %xmm0, -{{[0-9]+}}(%rsp)
|
||||
; AVX-NEXT: andl $7, %eax
|
||||
; AVX-NEXT: andl $7, %r10d
|
||||
; AVX-NEXT: andl $7, %ecx
|
||||
; AVX-NEXT: andl $7, %edx
|
||||
; AVX-NEXT: andl $7, %esi
|
||||
; AVX-NEXT: andl $7, %edi
|
||||
; AVX-NEXT: andl $7, %r8d
|
||||
; AVX-NEXT: andl $7, %r9d
|
||||
; AVX-NEXT: movzwl -24(%rsp,%rax,2), %eax
|
||||
; AVX-NEXT: vmovd %eax, %xmm0
|
||||
; AVX-NEXT: vpinsrw $1, -24(%rsp,%r10,2), %xmm0, %xmm0
|
||||
; AVX-NEXT: vpinsrw $2, -24(%rsp,%rcx,2), %xmm0, %xmm0
|
||||
; AVX-NEXT: vpinsrw $3, -24(%rsp,%rdx,2), %xmm0, %xmm0
|
||||
; AVX-NEXT: vpinsrw $4, -24(%rsp,%rsi,2), %xmm0, %xmm0
|
||||
; AVX-NEXT: vpinsrw $5, -24(%rsp,%rdi,2), %xmm0, %xmm0
|
||||
; AVX-NEXT: vpinsrw $6, -24(%rsp,%r8,2), %xmm0, %xmm0
|
||||
; AVX-NEXT: vpinsrw $7, -24(%rsp,%r9,2), %xmm0, %xmm0
|
||||
; AVX-NEXT: retq
|
||||
; AVXNOVLBW-LABEL: var_shuffle_v8i16:
|
||||
; AVXNOVLBW: # BB#0:
|
||||
; AVXNOVLBW-NEXT: vmovd %xmm1, %eax
|
||||
; AVXNOVLBW-NEXT: vpextrw $1, %xmm1, %r10d
|
||||
; AVXNOVLBW-NEXT: vpextrw $2, %xmm1, %ecx
|
||||
; AVXNOVLBW-NEXT: vpextrw $3, %xmm1, %edx
|
||||
; AVXNOVLBW-NEXT: vpextrw $4, %xmm1, %esi
|
||||
; AVXNOVLBW-NEXT: vpextrw $5, %xmm1, %edi
|
||||
; AVXNOVLBW-NEXT: vpextrw $6, %xmm1, %r8d
|
||||
; AVXNOVLBW-NEXT: vpextrw $7, %xmm1, %r9d
|
||||
; AVXNOVLBW-NEXT: vmovaps %xmm0, -{{[0-9]+}}(%rsp)
|
||||
; AVXNOVLBW-NEXT: andl $7, %eax
|
||||
; AVXNOVLBW-NEXT: andl $7, %r10d
|
||||
; AVXNOVLBW-NEXT: andl $7, %ecx
|
||||
; AVXNOVLBW-NEXT: andl $7, %edx
|
||||
; AVXNOVLBW-NEXT: andl $7, %esi
|
||||
; AVXNOVLBW-NEXT: andl $7, %edi
|
||||
; AVXNOVLBW-NEXT: andl $7, %r8d
|
||||
; AVXNOVLBW-NEXT: andl $7, %r9d
|
||||
; AVXNOVLBW-NEXT: movzwl -24(%rsp,%rax,2), %eax
|
||||
; AVXNOVLBW-NEXT: vmovd %eax, %xmm0
|
||||
; AVXNOVLBW-NEXT: vpinsrw $1, -24(%rsp,%r10,2), %xmm0, %xmm0
|
||||
; AVXNOVLBW-NEXT: vpinsrw $2, -24(%rsp,%rcx,2), %xmm0, %xmm0
|
||||
; AVXNOVLBW-NEXT: vpinsrw $3, -24(%rsp,%rdx,2), %xmm0, %xmm0
|
||||
; AVXNOVLBW-NEXT: vpinsrw $4, -24(%rsp,%rsi,2), %xmm0, %xmm0
|
||||
; AVXNOVLBW-NEXT: vpinsrw $5, -24(%rsp,%rdi,2), %xmm0, %xmm0
|
||||
; AVXNOVLBW-NEXT: vpinsrw $6, -24(%rsp,%r8,2), %xmm0, %xmm0
|
||||
; AVXNOVLBW-NEXT: vpinsrw $7, -24(%rsp,%r9,2), %xmm0, %xmm0
|
||||
; AVXNOVLBW-NEXT: retq
|
||||
;
|
||||
; AVX512VLBW-LABEL: var_shuffle_v8i16:
|
||||
; AVX512VLBW: # BB#0:
|
||||
; AVX512VLBW-NEXT: vpermw %xmm0, %xmm1, %xmm0
|
||||
; AVX512VLBW-NEXT: retq
|
||||
%index0 = extractelement <8 x i16> %indices, i32 0
|
||||
%index1 = extractelement <8 x i16> %indices, i32 1
|
||||
%index2 = extractelement <8 x i16> %indices, i32 2
|
||||
|
@ -202,143 +207,13 @@ define <8 x i16> @var_shuffle_v8i16(<8 x i16> %v, <8 x i16> %indices) nounwind {
|
|||
define <16 x i8> @var_shuffle_v16i8(<16 x i8> %v, <16 x i8> %indices) nounwind {
|
||||
; SSSE3-LABEL: var_shuffle_v16i8:
|
||||
; SSSE3: # BB#0:
|
||||
; SSSE3-NEXT: movaps %xmm1, -{{[0-9]+}}(%rsp)
|
||||
; SSSE3-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp)
|
||||
; SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx
|
||||
; SSSE3-NEXT: andl $15, %ecx
|
||||
; SSSE3-NEXT: leaq -{{[0-9]+}}(%rsp), %rax
|
||||
; SSSE3-NEXT: movzbl (%rcx,%rax), %ecx
|
||||
; SSSE3-NEXT: movd %ecx, %xmm8
|
||||
; SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx
|
||||
; SSSE3-NEXT: andl $15, %ecx
|
||||
; SSSE3-NEXT: movzbl (%rcx,%rax), %ecx
|
||||
; SSSE3-NEXT: movd %ecx, %xmm15
|
||||
; SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx
|
||||
; SSSE3-NEXT: andl $15, %ecx
|
||||
; SSSE3-NEXT: movzbl (%rcx,%rax), %ecx
|
||||
; SSSE3-NEXT: movd %ecx, %xmm9
|
||||
; SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx
|
||||
; SSSE3-NEXT: andl $15, %ecx
|
||||
; SSSE3-NEXT: movzbl (%rcx,%rax), %ecx
|
||||
; SSSE3-NEXT: movd %ecx, %xmm3
|
||||
; SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx
|
||||
; SSSE3-NEXT: andl $15, %ecx
|
||||
; SSSE3-NEXT: movzbl (%rcx,%rax), %ecx
|
||||
; SSSE3-NEXT: movd %ecx, %xmm10
|
||||
; SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx
|
||||
; SSSE3-NEXT: andl $15, %ecx
|
||||
; SSSE3-NEXT: movzbl (%rcx,%rax), %ecx
|
||||
; SSSE3-NEXT: movd %ecx, %xmm7
|
||||
; SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx
|
||||
; SSSE3-NEXT: andl $15, %ecx
|
||||
; SSSE3-NEXT: movzbl (%rcx,%rax), %ecx
|
||||
; SSSE3-NEXT: movd %ecx, %xmm11
|
||||
; SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx
|
||||
; SSSE3-NEXT: andl $15, %ecx
|
||||
; SSSE3-NEXT: movzbl (%rcx,%rax), %ecx
|
||||
; SSSE3-NEXT: movd %ecx, %xmm6
|
||||
; SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx
|
||||
; SSSE3-NEXT: andl $15, %ecx
|
||||
; SSSE3-NEXT: movzbl (%rcx,%rax), %ecx
|
||||
; SSSE3-NEXT: movd %ecx, %xmm12
|
||||
; SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx
|
||||
; SSSE3-NEXT: andl $15, %ecx
|
||||
; SSSE3-NEXT: movzbl (%rcx,%rax), %ecx
|
||||
; SSSE3-NEXT: movd %ecx, %xmm5
|
||||
; SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx
|
||||
; SSSE3-NEXT: andl $15, %ecx
|
||||
; SSSE3-NEXT: movzbl (%rcx,%rax), %ecx
|
||||
; SSSE3-NEXT: movd %ecx, %xmm13
|
||||
; SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx
|
||||
; SSSE3-NEXT: andl $15, %ecx
|
||||
; SSSE3-NEXT: movzbl (%rcx,%rax), %ecx
|
||||
; SSSE3-NEXT: movd %ecx, %xmm4
|
||||
; SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx
|
||||
; SSSE3-NEXT: andl $15, %ecx
|
||||
; SSSE3-NEXT: movzbl (%rcx,%rax), %ecx
|
||||
; SSSE3-NEXT: movd %ecx, %xmm14
|
||||
; SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx
|
||||
; SSSE3-NEXT: andl $15, %ecx
|
||||
; SSSE3-NEXT: movzbl (%rcx,%rax), %ecx
|
||||
; SSSE3-NEXT: movd %ecx, %xmm1
|
||||
; SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx
|
||||
; SSSE3-NEXT: andl $15, %ecx
|
||||
; SSSE3-NEXT: movzbl (%rcx,%rax), %ecx
|
||||
; SSSE3-NEXT: movd %ecx, %xmm2
|
||||
; SSSE3-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx
|
||||
; SSSE3-NEXT: andl $15, %ecx
|
||||
; SSSE3-NEXT: movzbl (%rcx,%rax), %eax
|
||||
; SSSE3-NEXT: movd %eax, %xmm0
|
||||
; SSSE3-NEXT: punpcklbw {{.*#+}} xmm15 = xmm15[0],xmm8[0],xmm15[1],xmm8[1],xmm15[2],xmm8[2],xmm15[3],xmm8[3],xmm15[4],xmm8[4],xmm15[5],xmm8[5],xmm15[6],xmm8[6],xmm15[7],xmm8[7]
|
||||
; SSSE3-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm9[0],xmm3[1],xmm9[1],xmm3[2],xmm9[2],xmm3[3],xmm9[3],xmm3[4],xmm9[4],xmm3[5],xmm9[5],xmm3[6],xmm9[6],xmm3[7],xmm9[7]
|
||||
; SSSE3-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm15[0],xmm3[1],xmm15[1],xmm3[2],xmm15[2],xmm3[3],xmm15[3]
|
||||
; SSSE3-NEXT: punpcklbw {{.*#+}} xmm7 = xmm7[0],xmm10[0],xmm7[1],xmm10[1],xmm7[2],xmm10[2],xmm7[3],xmm10[3],xmm7[4],xmm10[4],xmm7[5],xmm10[5],xmm7[6],xmm10[6],xmm7[7],xmm10[7]
|
||||
; SSSE3-NEXT: punpcklbw {{.*#+}} xmm6 = xmm6[0],xmm11[0],xmm6[1],xmm11[1],xmm6[2],xmm11[2],xmm6[3],xmm11[3],xmm6[4],xmm11[4],xmm6[5],xmm11[5],xmm6[6],xmm11[6],xmm6[7],xmm11[7]
|
||||
; SSSE3-NEXT: punpcklwd {{.*#+}} xmm6 = xmm6[0],xmm7[0],xmm6[1],xmm7[1],xmm6[2],xmm7[2],xmm6[3],xmm7[3]
|
||||
; SSSE3-NEXT: punpckldq {{.*#+}} xmm6 = xmm6[0],xmm3[0],xmm6[1],xmm3[1]
|
||||
; SSSE3-NEXT: punpcklbw {{.*#+}} xmm5 = xmm5[0],xmm12[0],xmm5[1],xmm12[1],xmm5[2],xmm12[2],xmm5[3],xmm12[3],xmm5[4],xmm12[4],xmm5[5],xmm12[5],xmm5[6],xmm12[6],xmm5[7],xmm12[7]
|
||||
; SSSE3-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm13[0],xmm4[1],xmm13[1],xmm4[2],xmm13[2],xmm4[3],xmm13[3],xmm4[4],xmm13[4],xmm4[5],xmm13[5],xmm4[6],xmm13[6],xmm4[7],xmm13[7]
|
||||
; SSSE3-NEXT: punpcklwd {{.*#+}} xmm4 = xmm4[0],xmm5[0],xmm4[1],xmm5[1],xmm4[2],xmm5[2],xmm4[3],xmm5[3]
|
||||
; SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm14[0],xmm1[1],xmm14[1],xmm1[2],xmm14[2],xmm1[3],xmm14[3],xmm1[4],xmm14[4],xmm1[5],xmm14[5],xmm1[6],xmm14[6],xmm1[7],xmm14[7]
|
||||
; SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7]
|
||||
; SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
|
||||
; SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1]
|
||||
; SSSE3-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm6[0]
|
||||
; SSSE3-NEXT: pshufb %xmm0, %xmm1
|
||||
; SSSE3-NEXT: movdqa %xmm1, %xmm0
|
||||
; SSSE3-NEXT: retq
|
||||
;
|
||||
; AVX-LABEL: var_shuffle_v16i8:
|
||||
; AVX: # BB#0:
|
||||
; AVX-NEXT: vpextrb $0, %xmm1, %eax
|
||||
; AVX-NEXT: vmovaps %xmm0, -{{[0-9]+}}(%rsp)
|
||||
; AVX-NEXT: andl $15, %eax
|
||||
; AVX-NEXT: leaq -{{[0-9]+}}(%rsp), %rcx
|
||||
; AVX-NEXT: movzbl (%rax,%rcx), %eax
|
||||
; AVX-NEXT: vmovd %eax, %xmm0
|
||||
; AVX-NEXT: vpextrb $1, %xmm1, %eax
|
||||
; AVX-NEXT: andl $15, %eax
|
||||
; AVX-NEXT: vpinsrb $1, (%rax,%rcx), %xmm0, %xmm0
|
||||
; AVX-NEXT: vpextrb $2, %xmm1, %eax
|
||||
; AVX-NEXT: andl $15, %eax
|
||||
; AVX-NEXT: vpinsrb $2, (%rax,%rcx), %xmm0, %xmm0
|
||||
; AVX-NEXT: vpextrb $3, %xmm1, %eax
|
||||
; AVX-NEXT: andl $15, %eax
|
||||
; AVX-NEXT: vpinsrb $3, (%rax,%rcx), %xmm0, %xmm0
|
||||
; AVX-NEXT: vpextrb $4, %xmm1, %eax
|
||||
; AVX-NEXT: andl $15, %eax
|
||||
; AVX-NEXT: vpinsrb $4, (%rax,%rcx), %xmm0, %xmm0
|
||||
; AVX-NEXT: vpextrb $5, %xmm1, %eax
|
||||
; AVX-NEXT: andl $15, %eax
|
||||
; AVX-NEXT: vpinsrb $5, (%rax,%rcx), %xmm0, %xmm0
|
||||
; AVX-NEXT: vpextrb $6, %xmm1, %eax
|
||||
; AVX-NEXT: andl $15, %eax
|
||||
; AVX-NEXT: vpinsrb $6, (%rax,%rcx), %xmm0, %xmm0
|
||||
; AVX-NEXT: vpextrb $7, %xmm1, %eax
|
||||
; AVX-NEXT: andl $15, %eax
|
||||
; AVX-NEXT: vpinsrb $7, (%rax,%rcx), %xmm0, %xmm0
|
||||
; AVX-NEXT: vpextrb $8, %xmm1, %eax
|
||||
; AVX-NEXT: andl $15, %eax
|
||||
; AVX-NEXT: vpinsrb $8, (%rax,%rcx), %xmm0, %xmm0
|
||||
; AVX-NEXT: vpextrb $9, %xmm1, %eax
|
||||
; AVX-NEXT: andl $15, %eax
|
||||
; AVX-NEXT: vpinsrb $9, (%rax,%rcx), %xmm0, %xmm0
|
||||
; AVX-NEXT: vpextrb $10, %xmm1, %eax
|
||||
; AVX-NEXT: andl $15, %eax
|
||||
; AVX-NEXT: vpinsrb $10, (%rax,%rcx), %xmm0, %xmm0
|
||||
; AVX-NEXT: vpextrb $11, %xmm1, %eax
|
||||
; AVX-NEXT: andl $15, %eax
|
||||
; AVX-NEXT: vpinsrb $11, (%rax,%rcx), %xmm0, %xmm0
|
||||
; AVX-NEXT: vpextrb $12, %xmm1, %eax
|
||||
; AVX-NEXT: andl $15, %eax
|
||||
; AVX-NEXT: vpinsrb $12, (%rax,%rcx), %xmm0, %xmm0
|
||||
; AVX-NEXT: vpextrb $13, %xmm1, %eax
|
||||
; AVX-NEXT: andl $15, %eax
|
||||
; AVX-NEXT: vpinsrb $13, (%rax,%rcx), %xmm0, %xmm0
|
||||
; AVX-NEXT: vpextrb $14, %xmm1, %eax
|
||||
; AVX-NEXT: andl $15, %eax
|
||||
; AVX-NEXT: vpinsrb $14, (%rax,%rcx), %xmm0, %xmm0
|
||||
; AVX-NEXT: vpextrb $15, %xmm1, %eax
|
||||
; AVX-NEXT: andl $15, %eax
|
||||
; AVX-NEXT: vpinsrb $15, (%rax,%rcx), %xmm0, %xmm0
|
||||
; AVX-NEXT: vpshufb %xmm0, %xmm1, %xmm0
|
||||
; AVX-NEXT: retq
|
||||
%index0 = extractelement <16 x i8> %indices, i32 0
|
||||
%index1 = extractelement <16 x i8> %indices, i32 1
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -6,47 +6,7 @@
|
|||
define <8 x i64> @var_shuffle_v8i64(<8 x i64> %v, <8 x i64> %indices) nounwind {
|
||||
; AVX512-LABEL: var_shuffle_v8i64:
|
||||
; AVX512: # BB#0:
|
||||
; AVX512-NEXT: pushq %rbp
|
||||
; AVX512-NEXT: movq %rsp, %rbp
|
||||
; AVX512-NEXT: andq $-64, %rsp
|
||||
; AVX512-NEXT: subq $128, %rsp
|
||||
; AVX512-NEXT: vmovq %xmm1, %r8
|
||||
; AVX512-NEXT: andl $7, %r8d
|
||||
; AVX512-NEXT: vpextrq $1, %xmm1, %r9
|
||||
; AVX512-NEXT: andl $7, %r9d
|
||||
; AVX512-NEXT: vextracti128 $1, %ymm1, %xmm2
|
||||
; AVX512-NEXT: vmovq %xmm2, %r10
|
||||
; AVX512-NEXT: andl $7, %r10d
|
||||
; AVX512-NEXT: vpextrq $1, %xmm2, %rsi
|
||||
; AVX512-NEXT: andl $7, %esi
|
||||
; AVX512-NEXT: vextracti32x4 $2, %zmm1, %xmm2
|
||||
; AVX512-NEXT: vmovq %xmm2, %rdi
|
||||
; AVX512-NEXT: andl $7, %edi
|
||||
; AVX512-NEXT: vpextrq $1, %xmm2, %rax
|
||||
; AVX512-NEXT: andl $7, %eax
|
||||
; AVX512-NEXT: vextracti32x4 $3, %zmm1, %xmm1
|
||||
; AVX512-NEXT: vmovq %xmm1, %rcx
|
||||
; AVX512-NEXT: andl $7, %ecx
|
||||
; AVX512-NEXT: vpextrq $1, %xmm1, %rdx
|
||||
; AVX512-NEXT: andl $7, %edx
|
||||
; AVX512-NEXT: vmovaps %zmm0, (%rsp)
|
||||
; AVX512-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
|
||||
; AVX512-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
|
||||
; AVX512-NEXT: vmovlhps {{.*#+}} xmm0 = xmm1[0],xmm0[0]
|
||||
; AVX512-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
|
||||
; AVX512-NEXT: vmovsd {{.*#+}} xmm2 = mem[0],zero
|
||||
; AVX512-NEXT: vmovlhps {{.*#+}} xmm1 = xmm2[0],xmm1[0]
|
||||
; AVX512-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
|
||||
; AVX512-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
|
||||
; AVX512-NEXT: vmovsd {{.*#+}} xmm2 = mem[0],zero
|
||||
; AVX512-NEXT: vmovlhps {{.*#+}} xmm1 = xmm2[0],xmm1[0]
|
||||
; AVX512-NEXT: vmovsd {{.*#+}} xmm2 = mem[0],zero
|
||||
; AVX512-NEXT: vmovsd {{.*#+}} xmm3 = mem[0],zero
|
||||
; AVX512-NEXT: vmovlhps {{.*#+}} xmm2 = xmm3[0],xmm2[0]
|
||||
; AVX512-NEXT: vinsertf128 $1, %xmm1, %ymm2, %ymm1
|
||||
; AVX512-NEXT: vinsertf64x4 $1, %ymm0, %zmm1, %zmm0
|
||||
; AVX512-NEXT: movq %rbp, %rsp
|
||||
; AVX512-NEXT: popq %rbp
|
||||
; AVX512-NEXT: vpermpd %zmm0, %zmm1, %zmm0
|
||||
; AVX512-NEXT: retq
|
||||
%index0 = extractelement <8 x i64> %indices, i32 0
|
||||
%index1 = extractelement <8 x i64> %indices, i32 1
|
||||
|
@ -78,76 +38,7 @@ define <8 x i64> @var_shuffle_v8i64(<8 x i64> %v, <8 x i64> %indices) nounwind {
|
|||
define <16 x i32> @var_shuffle_v16i32(<16 x i32> %v, <16 x i32> %indices) nounwind {
|
||||
; AVX512-LABEL: var_shuffle_v16i32:
|
||||
; AVX512: # BB#0:
|
||||
; AVX512-NEXT: pushq %rbp
|
||||
; AVX512-NEXT: movq %rsp, %rbp
|
||||
; AVX512-NEXT: andq $-64, %rsp
|
||||
; AVX512-NEXT: subq $128, %rsp
|
||||
; AVX512-NEXT: vextracti128 $1, %ymm1, %xmm2
|
||||
; AVX512-NEXT: vextracti32x4 $2, %zmm1, %xmm3
|
||||
; AVX512-NEXT: vextracti32x4 $3, %zmm1, %xmm4
|
||||
; AVX512-NEXT: vpextrq $1, %xmm4, %rax
|
||||
; AVX512-NEXT: vmovq %xmm4, %rdx
|
||||
; AVX512-NEXT: movq %rdx, %rcx
|
||||
; AVX512-NEXT: shrq $30, %rcx
|
||||
; AVX512-NEXT: vmovaps %zmm0, (%rsp)
|
||||
; AVX512-NEXT: andl $15, %edx
|
||||
; AVX512-NEXT: andl $60, %ecx
|
||||
; AVX512-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
|
||||
; AVX512-NEXT: movq %rsp, %rdx
|
||||
; AVX512-NEXT: vpinsrd $1, (%rcx,%rdx), %xmm0, %xmm0
|
||||
; AVX512-NEXT: movq %rax, %rcx
|
||||
; AVX512-NEXT: andl $15, %eax
|
||||
; AVX512-NEXT: vpinsrd $2, (%rsp,%rax,4), %xmm0, %xmm0
|
||||
; AVX512-NEXT: vmovq %xmm3, %rax
|
||||
; AVX512-NEXT: shrq $30, %rcx
|
||||
; AVX512-NEXT: andl $60, %ecx
|
||||
; AVX512-NEXT: vpinsrd $3, (%rcx,%rdx), %xmm0, %xmm0
|
||||
; AVX512-NEXT: movq %rax, %rcx
|
||||
; AVX512-NEXT: andl $15, %eax
|
||||
; AVX512-NEXT: vmovd {{.*#+}} xmm4 = mem[0],zero,zero,zero
|
||||
; AVX512-NEXT: vpextrq $1, %xmm3, %rax
|
||||
; AVX512-NEXT: shrq $30, %rcx
|
||||
; AVX512-NEXT: andl $60, %ecx
|
||||
; AVX512-NEXT: vpinsrd $1, (%rcx,%rdx), %xmm4, %xmm3
|
||||
; AVX512-NEXT: movq %rax, %rcx
|
||||
; AVX512-NEXT: andl $15, %eax
|
||||
; AVX512-NEXT: vpinsrd $2, (%rsp,%rax,4), %xmm3, %xmm3
|
||||
; AVX512-NEXT: vmovq %xmm2, %rax
|
||||
; AVX512-NEXT: shrq $30, %rcx
|
||||
; AVX512-NEXT: andl $60, %ecx
|
||||
; AVX512-NEXT: vpinsrd $3, (%rcx,%rdx), %xmm3, %xmm3
|
||||
; AVX512-NEXT: movq %rax, %rcx
|
||||
; AVX512-NEXT: andl $15, %eax
|
||||
; AVX512-NEXT: vmovd {{.*#+}} xmm4 = mem[0],zero,zero,zero
|
||||
; AVX512-NEXT: vpextrq $1, %xmm2, %rax
|
||||
; AVX512-NEXT: shrq $30, %rcx
|
||||
; AVX512-NEXT: andl $60, %ecx
|
||||
; AVX512-NEXT: vpinsrd $1, (%rcx,%rdx), %xmm4, %xmm2
|
||||
; AVX512-NEXT: movq %rax, %rcx
|
||||
; AVX512-NEXT: andl $15, %eax
|
||||
; AVX512-NEXT: vpinsrd $2, (%rsp,%rax,4), %xmm2, %xmm2
|
||||
; AVX512-NEXT: vmovq %xmm1, %rax
|
||||
; AVX512-NEXT: shrq $30, %rcx
|
||||
; AVX512-NEXT: andl $60, %ecx
|
||||
; AVX512-NEXT: vpinsrd $3, (%rcx,%rdx), %xmm2, %xmm2
|
||||
; AVX512-NEXT: movq %rax, %rcx
|
||||
; AVX512-NEXT: andl $15, %eax
|
||||
; AVX512-NEXT: vmovd {{.*#+}} xmm4 = mem[0],zero,zero,zero
|
||||
; AVX512-NEXT: vpextrq $1, %xmm1, %rax
|
||||
; AVX512-NEXT: shrq $30, %rcx
|
||||
; AVX512-NEXT: andl $60, %ecx
|
||||
; AVX512-NEXT: vpinsrd $1, (%rcx,%rdx), %xmm4, %xmm1
|
||||
; AVX512-NEXT: movq %rax, %rcx
|
||||
; AVX512-NEXT: andl $15, %eax
|
||||
; AVX512-NEXT: vpinsrd $2, (%rsp,%rax,4), %xmm1, %xmm1
|
||||
; AVX512-NEXT: shrq $30, %rcx
|
||||
; AVX512-NEXT: andl $60, %ecx
|
||||
; AVX512-NEXT: vpinsrd $3, (%rcx,%rdx), %xmm1, %xmm1
|
||||
; AVX512-NEXT: vinserti128 $1, %xmm0, %ymm3, %ymm0
|
||||
; AVX512-NEXT: vinserti128 $1, %xmm2, %ymm1, %ymm1
|
||||
; AVX512-NEXT: vinserti64x4 $1, %ymm0, %zmm1, %zmm0
|
||||
; AVX512-NEXT: movq %rbp, %rsp
|
||||
; AVX512-NEXT: popq %rbp
|
||||
; AVX512-NEXT: vpermps %zmm0, %zmm1, %zmm0
|
||||
; AVX512-NEXT: retq
|
||||
%index0 = extractelement <16 x i32> %indices, i32 0
|
||||
%index1 = extractelement <16 x i32> %indices, i32 1
|
||||
|
@ -381,136 +272,7 @@ define <32 x i16> @var_shuffle_v32i16(<32 x i16> %v, <32 x i16> %indices) nounwi
|
|||
;
|
||||
; AVX512BW-LABEL: var_shuffle_v32i16:
|
||||
; AVX512BW: # BB#0:
|
||||
; AVX512BW-NEXT: pushq %rbp
|
||||
; AVX512BW-NEXT: movq %rsp, %rbp
|
||||
; AVX512BW-NEXT: andq $-64, %rsp
|
||||
; AVX512BW-NEXT: subq $128, %rsp
|
||||
; AVX512BW-NEXT: vextracti128 $1, %ymm1, %xmm2
|
||||
; AVX512BW-NEXT: vextracti32x4 $2, %zmm1, %xmm3
|
||||
; AVX512BW-NEXT: vextracti32x4 $3, %zmm1, %xmm4
|
||||
; AVX512BW-NEXT: vmovd %xmm4, %eax
|
||||
; AVX512BW-NEXT: vmovaps %zmm0, (%rsp)
|
||||
; AVX512BW-NEXT: andl $31, %eax
|
||||
; AVX512BW-NEXT: movzwl (%rsp,%rax,2), %eax
|
||||
; AVX512BW-NEXT: vmovd %eax, %xmm0
|
||||
; AVX512BW-NEXT: vpextrw $1, %xmm4, %eax
|
||||
; AVX512BW-NEXT: andl $31, %eax
|
||||
; AVX512BW-NEXT: movzwl (%rsp,%rax,2), %eax
|
||||
; AVX512BW-NEXT: vpinsrw $1, %eax, %xmm0, %xmm0
|
||||
; AVX512BW-NEXT: vpextrw $2, %xmm4, %eax
|
||||
; AVX512BW-NEXT: andl $31, %eax
|
||||
; AVX512BW-NEXT: movzwl (%rsp,%rax,2), %eax
|
||||
; AVX512BW-NEXT: vpinsrw $2, %eax, %xmm0, %xmm0
|
||||
; AVX512BW-NEXT: vpextrw $3, %xmm4, %eax
|
||||
; AVX512BW-NEXT: andl $31, %eax
|
||||
; AVX512BW-NEXT: movzwl (%rsp,%rax,2), %eax
|
||||
; AVX512BW-NEXT: vpinsrw $3, %eax, %xmm0, %xmm0
|
||||
; AVX512BW-NEXT: vpextrw $4, %xmm4, %eax
|
||||
; AVX512BW-NEXT: andl $31, %eax
|
||||
; AVX512BW-NEXT: movzwl (%rsp,%rax,2), %eax
|
||||
; AVX512BW-NEXT: vpinsrw $4, %eax, %xmm0, %xmm0
|
||||
; AVX512BW-NEXT: vpextrw $5, %xmm4, %eax
|
||||
; AVX512BW-NEXT: andl $31, %eax
|
||||
; AVX512BW-NEXT: movzwl (%rsp,%rax,2), %eax
|
||||
; AVX512BW-NEXT: vpinsrw $5, %eax, %xmm0, %xmm0
|
||||
; AVX512BW-NEXT: vpextrw $6, %xmm4, %eax
|
||||
; AVX512BW-NEXT: andl $31, %eax
|
||||
; AVX512BW-NEXT: movzwl (%rsp,%rax,2), %eax
|
||||
; AVX512BW-NEXT: vpinsrw $6, %eax, %xmm0, %xmm0
|
||||
; AVX512BW-NEXT: vpextrw $7, %xmm4, %eax
|
||||
; AVX512BW-NEXT: andl $31, %eax
|
||||
; AVX512BW-NEXT: movzwl (%rsp,%rax,2), %eax
|
||||
; AVX512BW-NEXT: vpinsrw $7, %eax, %xmm0, %xmm0
|
||||
; AVX512BW-NEXT: vmovd %xmm3, %eax
|
||||
; AVX512BW-NEXT: andl $31, %eax
|
||||
; AVX512BW-NEXT: movzwl (%rsp,%rax,2), %eax
|
||||
; AVX512BW-NEXT: vmovd %eax, %xmm4
|
||||
; AVX512BW-NEXT: vpextrw $1, %xmm3, %eax
|
||||
; AVX512BW-NEXT: andl $31, %eax
|
||||
; AVX512BW-NEXT: movzwl (%rsp,%rax,2), %eax
|
||||
; AVX512BW-NEXT: vpinsrw $1, %eax, %xmm4, %xmm4
|
||||
; AVX512BW-NEXT: vpextrw $2, %xmm3, %eax
|
||||
; AVX512BW-NEXT: andl $31, %eax
|
||||
; AVX512BW-NEXT: movzwl (%rsp,%rax,2), %eax
|
||||
; AVX512BW-NEXT: vpinsrw $2, %eax, %xmm4, %xmm4
|
||||
; AVX512BW-NEXT: vpextrw $3, %xmm3, %eax
|
||||
; AVX512BW-NEXT: andl $31, %eax
|
||||
; AVX512BW-NEXT: movzwl (%rsp,%rax,2), %eax
|
||||
; AVX512BW-NEXT: vpinsrw $3, %eax, %xmm4, %xmm4
|
||||
; AVX512BW-NEXT: vpextrw $4, %xmm3, %eax
|
||||
; AVX512BW-NEXT: andl $31, %eax
|
||||
; AVX512BW-NEXT: movzwl (%rsp,%rax,2), %eax
|
||||
; AVX512BW-NEXT: vpinsrw $4, %eax, %xmm4, %xmm4
|
||||
; AVX512BW-NEXT: vpextrw $5, %xmm3, %eax
|
||||
; AVX512BW-NEXT: andl $31, %eax
|
||||
; AVX512BW-NEXT: movzwl (%rsp,%rax,2), %eax
|
||||
; AVX512BW-NEXT: vpinsrw $5, %eax, %xmm4, %xmm4
|
||||
; AVX512BW-NEXT: vpextrw $6, %xmm3, %eax
|
||||
; AVX512BW-NEXT: andl $31, %eax
|
||||
; AVX512BW-NEXT: movzwl (%rsp,%rax,2), %eax
|
||||
; AVX512BW-NEXT: vpinsrw $6, %eax, %xmm4, %xmm4
|
||||
; AVX512BW-NEXT: vpextrw $7, %xmm3, %eax
|
||||
; AVX512BW-NEXT: andl $31, %eax
|
||||
; AVX512BW-NEXT: movzwl (%rsp,%rax,2), %eax
|
||||
; AVX512BW-NEXT: vpinsrw $7, %eax, %xmm4, %xmm3
|
||||
; AVX512BW-NEXT: vmovd %xmm2, %eax
|
||||
; AVX512BW-NEXT: andl $31, %eax
|
||||
; AVX512BW-NEXT: movzwl (%rsp,%rax,2), %eax
|
||||
; AVX512BW-NEXT: vmovd %eax, %xmm4
|
||||
; AVX512BW-NEXT: vpextrw $1, %xmm2, %eax
|
||||
; AVX512BW-NEXT: andl $31, %eax
|
||||
; AVX512BW-NEXT: vpinsrw $1, (%rsp,%rax,2), %xmm4, %xmm4
|
||||
; AVX512BW-NEXT: vpextrw $2, %xmm2, %eax
|
||||
; AVX512BW-NEXT: andl $31, %eax
|
||||
; AVX512BW-NEXT: vpinsrw $2, (%rsp,%rax,2), %xmm4, %xmm4
|
||||
; AVX512BW-NEXT: vpextrw $3, %xmm2, %eax
|
||||
; AVX512BW-NEXT: andl $31, %eax
|
||||
; AVX512BW-NEXT: vpinsrw $3, (%rsp,%rax,2), %xmm4, %xmm4
|
||||
; AVX512BW-NEXT: vpextrw $4, %xmm2, %eax
|
||||
; AVX512BW-NEXT: andl $31, %eax
|
||||
; AVX512BW-NEXT: vpinsrw $4, (%rsp,%rax,2), %xmm4, %xmm4
|
||||
; AVX512BW-NEXT: vpextrw $5, %xmm2, %eax
|
||||
; AVX512BW-NEXT: andl $31, %eax
|
||||
; AVX512BW-NEXT: movzwl (%rsp,%rax,2), %eax
|
||||
; AVX512BW-NEXT: vpinsrw $5, %eax, %xmm4, %xmm4
|
||||
; AVX512BW-NEXT: vpextrw $6, %xmm2, %eax
|
||||
; AVX512BW-NEXT: andl $31, %eax
|
||||
; AVX512BW-NEXT: movzwl (%rsp,%rax,2), %eax
|
||||
; AVX512BW-NEXT: vpinsrw $6, %eax, %xmm4, %xmm4
|
||||
; AVX512BW-NEXT: vpextrw $7, %xmm2, %eax
|
||||
; AVX512BW-NEXT: andl $31, %eax
|
||||
; AVX512BW-NEXT: movzwl (%rsp,%rax,2), %eax
|
||||
; AVX512BW-NEXT: vpinsrw $7, %eax, %xmm4, %xmm2
|
||||
; AVX512BW-NEXT: vmovd %xmm1, %eax
|
||||
; AVX512BW-NEXT: andl $31, %eax
|
||||
; AVX512BW-NEXT: movzwl (%rsp,%rax,2), %eax
|
||||
; AVX512BW-NEXT: vmovd %eax, %xmm4
|
||||
; AVX512BW-NEXT: vpextrw $1, %xmm1, %eax
|
||||
; AVX512BW-NEXT: andl $31, %eax
|
||||
; AVX512BW-NEXT: vpinsrw $1, (%rsp,%rax,2), %xmm4, %xmm4
|
||||
; AVX512BW-NEXT: vpextrw $2, %xmm1, %eax
|
||||
; AVX512BW-NEXT: andl $31, %eax
|
||||
; AVX512BW-NEXT: vpinsrw $2, (%rsp,%rax,2), %xmm4, %xmm4
|
||||
; AVX512BW-NEXT: vpextrw $3, %xmm1, %eax
|
||||
; AVX512BW-NEXT: andl $31, %eax
|
||||
; AVX512BW-NEXT: vpinsrw $3, (%rsp,%rax,2), %xmm4, %xmm4
|
||||
; AVX512BW-NEXT: vpextrw $4, %xmm1, %eax
|
||||
; AVX512BW-NEXT: andl $31, %eax
|
||||
; AVX512BW-NEXT: vpinsrw $4, (%rsp,%rax,2), %xmm4, %xmm4
|
||||
; AVX512BW-NEXT: vpextrw $5, %xmm1, %eax
|
||||
; AVX512BW-NEXT: andl $31, %eax
|
||||
; AVX512BW-NEXT: vpinsrw $5, (%rsp,%rax,2), %xmm4, %xmm4
|
||||
; AVX512BW-NEXT: vpextrw $6, %xmm1, %eax
|
||||
; AVX512BW-NEXT: andl $31, %eax
|
||||
; AVX512BW-NEXT: vpinsrw $6, (%rsp,%rax,2), %xmm4, %xmm4
|
||||
; AVX512BW-NEXT: vpextrw $7, %xmm1, %eax
|
||||
; AVX512BW-NEXT: andl $31, %eax
|
||||
; AVX512BW-NEXT: vpinsrw $7, (%rsp,%rax,2), %xmm4, %xmm1
|
||||
; AVX512BW-NEXT: vinserti128 $1, %xmm0, %ymm3, %ymm0
|
||||
; AVX512BW-NEXT: vinserti128 $1, %xmm2, %ymm1, %ymm1
|
||||
; AVX512BW-NEXT: vinserti64x4 $1, %ymm0, %zmm1, %zmm0
|
||||
; AVX512BW-NEXT: movq %rbp, %rsp
|
||||
; AVX512BW-NEXT: popq %rbp
|
||||
; AVX512BW-NEXT: vpermw %zmm0, %zmm1, %zmm0
|
||||
; AVX512BW-NEXT: retq
|
||||
%index0 = extractelement <32 x i16> %indices, i32 0
|
||||
%index1 = extractelement <32 x i16> %indices, i32 1
|
||||
|
@ -1014,267 +776,10 @@ define <64 x i8> @var_shuffle_v64i8(<64 x i8> %v, <64 x i8> %indices) nounwind {
|
|||
; NOBW-NEXT: popq %rbp
|
||||
; NOBW-NEXT: retq
|
||||
;
|
||||
; AVX512BW-LABEL: var_shuffle_v64i8:
|
||||
; AVX512BW: # BB#0:
|
||||
; AVX512BW-NEXT: pushq %rbp
|
||||
; AVX512BW-NEXT: movq %rsp, %rbp
|
||||
; AVX512BW-NEXT: andq $-64, %rsp
|
||||
; AVX512BW-NEXT: subq $128, %rsp
|
||||
; AVX512BW-NEXT: vpextrb $0, %xmm1, %ecx
|
||||
; AVX512BW-NEXT: vpextrb $1, %xmm1, %eax
|
||||
; AVX512BW-NEXT: vextracti128 $1, %ymm1, %xmm2
|
||||
; AVX512BW-NEXT: vextracti32x4 $2, %zmm1, %xmm3
|
||||
; AVX512BW-NEXT: vextracti32x4 $3, %zmm1, %xmm4
|
||||
; AVX512BW-NEXT: vpextrb $0, %xmm4, %edx
|
||||
; AVX512BW-NEXT: vmovaps %zmm0, (%rsp)
|
||||
; AVX512BW-NEXT: andl $63, %edx
|
||||
; AVX512BW-NEXT: movq %rsp, %rsi
|
||||
; AVX512BW-NEXT: movzbl (%rdx,%rsi), %edx
|
||||
; AVX512BW-NEXT: vmovd %edx, %xmm0
|
||||
; AVX512BW-NEXT: vpextrb $1, %xmm4, %edx
|
||||
; AVX512BW-NEXT: andl $63, %edx
|
||||
; AVX512BW-NEXT: movzbl (%rdx,%rsi), %edx
|
||||
; AVX512BW-NEXT: vpinsrb $1, %edx, %xmm0, %xmm0
|
||||
; AVX512BW-NEXT: vpextrb $2, %xmm4, %edx
|
||||
; AVX512BW-NEXT: andl $63, %edx
|
||||
; AVX512BW-NEXT: movzbl (%rdx,%rsi), %edx
|
||||
; AVX512BW-NEXT: vpinsrb $2, %edx, %xmm0, %xmm0
|
||||
; AVX512BW-NEXT: vpextrb $3, %xmm4, %edx
|
||||
; AVX512BW-NEXT: andl $63, %edx
|
||||
; AVX512BW-NEXT: movzbl (%rdx,%rsi), %edx
|
||||
; AVX512BW-NEXT: vpinsrb $3, %edx, %xmm0, %xmm0
|
||||
; AVX512BW-NEXT: vpextrb $4, %xmm4, %edx
|
||||
; AVX512BW-NEXT: andl $63, %edx
|
||||
; AVX512BW-NEXT: movzbl (%rdx,%rsi), %edx
|
||||
; AVX512BW-NEXT: vpinsrb $4, %edx, %xmm0, %xmm0
|
||||
; AVX512BW-NEXT: vpextrb $5, %xmm4, %edx
|
||||
; AVX512BW-NEXT: andl $63, %edx
|
||||
; AVX512BW-NEXT: movzbl (%rdx,%rsi), %edx
|
||||
; AVX512BW-NEXT: vpinsrb $5, %edx, %xmm0, %xmm0
|
||||
; AVX512BW-NEXT: vpextrb $6, %xmm4, %edx
|
||||
; AVX512BW-NEXT: andl $63, %edx
|
||||
; AVX512BW-NEXT: movzbl (%rdx,%rsi), %edx
|
||||
; AVX512BW-NEXT: vpinsrb $6, %edx, %xmm0, %xmm0
|
||||
; AVX512BW-NEXT: vpextrb $7, %xmm4, %edx
|
||||
; AVX512BW-NEXT: andl $63, %edx
|
||||
; AVX512BW-NEXT: movzbl (%rdx,%rsi), %edx
|
||||
; AVX512BW-NEXT: vpinsrb $7, %edx, %xmm0, %xmm0
|
||||
; AVX512BW-NEXT: vpextrb $8, %xmm4, %edx
|
||||
; AVX512BW-NEXT: andl $63, %edx
|
||||
; AVX512BW-NEXT: movzbl (%rdx,%rsi), %edx
|
||||
; AVX512BW-NEXT: vpinsrb $8, %edx, %xmm0, %xmm0
|
||||
; AVX512BW-NEXT: vpextrb $9, %xmm4, %edx
|
||||
; AVX512BW-NEXT: andl $63, %edx
|
||||
; AVX512BW-NEXT: movzbl (%rdx,%rsi), %edx
|
||||
; AVX512BW-NEXT: vpinsrb $9, %edx, %xmm0, %xmm0
|
||||
; AVX512BW-NEXT: vpextrb $10, %xmm4, %edx
|
||||
; AVX512BW-NEXT: andl $63, %edx
|
||||
; AVX512BW-NEXT: movzbl (%rdx,%rsi), %edx
|
||||
; AVX512BW-NEXT: vpinsrb $10, %edx, %xmm0, %xmm0
|
||||
; AVX512BW-NEXT: vpextrb $11, %xmm4, %edx
|
||||
; AVX512BW-NEXT: andl $63, %edx
|
||||
; AVX512BW-NEXT: movzbl (%rdx,%rsi), %edx
|
||||
; AVX512BW-NEXT: vpinsrb $11, %edx, %xmm0, %xmm0
|
||||
; AVX512BW-NEXT: vpextrb $12, %xmm4, %edx
|
||||
; AVX512BW-NEXT: andl $63, %edx
|
||||
; AVX512BW-NEXT: movzbl (%rdx,%rsi), %edx
|
||||
; AVX512BW-NEXT: vpinsrb $12, %edx, %xmm0, %xmm0
|
||||
; AVX512BW-NEXT: vpextrb $13, %xmm4, %edx
|
||||
; AVX512BW-NEXT: andl $63, %edx
|
||||
; AVX512BW-NEXT: movzbl (%rdx,%rsi), %edx
|
||||
; AVX512BW-NEXT: vpinsrb $13, %edx, %xmm0, %xmm0
|
||||
; AVX512BW-NEXT: vpextrb $14, %xmm4, %edx
|
||||
; AVX512BW-NEXT: andl $63, %edx
|
||||
; AVX512BW-NEXT: movzbl (%rdx,%rsi), %edx
|
||||
; AVX512BW-NEXT: vpinsrb $14, %edx, %xmm0, %xmm0
|
||||
; AVX512BW-NEXT: vpextrb $15, %xmm4, %edx
|
||||
; AVX512BW-NEXT: andl $63, %edx
|
||||
; AVX512BW-NEXT: movzbl (%rdx,%rsi), %edx
|
||||
; AVX512BW-NEXT: vpinsrb $15, %edx, %xmm0, %xmm0
|
||||
; AVX512BW-NEXT: vpextrb $0, %xmm3, %edx
|
||||
; AVX512BW-NEXT: andl $63, %edx
|
||||
; AVX512BW-NEXT: movzbl (%rdx,%rsi), %edx
|
||||
; AVX512BW-NEXT: vmovd %edx, %xmm4
|
||||
; AVX512BW-NEXT: vpextrb $1, %xmm3, %edx
|
||||
; AVX512BW-NEXT: andl $63, %edx
|
||||
; AVX512BW-NEXT: movzbl (%rdx,%rsi), %edx
|
||||
; AVX512BW-NEXT: vpinsrb $1, %edx, %xmm4, %xmm4
|
||||
; AVX512BW-NEXT: vpextrb $2, %xmm3, %edx
|
||||
; AVX512BW-NEXT: andl $63, %edx
|
||||
; AVX512BW-NEXT: movzbl (%rdx,%rsi), %edx
|
||||
; AVX512BW-NEXT: vpinsrb $2, %edx, %xmm4, %xmm4
|
||||
; AVX512BW-NEXT: vpextrb $3, %xmm3, %edx
|
||||
; AVX512BW-NEXT: andl $63, %edx
|
||||
; AVX512BW-NEXT: movzbl (%rdx,%rsi), %edx
|
||||
; AVX512BW-NEXT: vpinsrb $3, %edx, %xmm4, %xmm4
|
||||
; AVX512BW-NEXT: vpextrb $4, %xmm3, %edx
|
||||
; AVX512BW-NEXT: andl $63, %edx
|
||||
; AVX512BW-NEXT: movzbl (%rdx,%rsi), %edx
|
||||
; AVX512BW-NEXT: vpinsrb $4, %edx, %xmm4, %xmm4
|
||||
; AVX512BW-NEXT: vpextrb $5, %xmm3, %edx
|
||||
; AVX512BW-NEXT: andl $63, %edx
|
||||
; AVX512BW-NEXT: movzbl (%rdx,%rsi), %edx
|
||||
; AVX512BW-NEXT: vpinsrb $5, %edx, %xmm4, %xmm4
|
||||
; AVX512BW-NEXT: vpextrb $6, %xmm3, %edx
|
||||
; AVX512BW-NEXT: andl $63, %edx
|
||||
; AVX512BW-NEXT: movzbl (%rdx,%rsi), %edx
|
||||
; AVX512BW-NEXT: vpinsrb $6, %edx, %xmm4, %xmm4
|
||||
; AVX512BW-NEXT: vpextrb $7, %xmm3, %edx
|
||||
; AVX512BW-NEXT: andl $63, %edx
|
||||
; AVX512BW-NEXT: movzbl (%rdx,%rsi), %edx
|
||||
; AVX512BW-NEXT: vpinsrb $7, %edx, %xmm4, %xmm4
|
||||
; AVX512BW-NEXT: vpextrb $8, %xmm3, %edx
|
||||
; AVX512BW-NEXT: andl $63, %edx
|
||||
; AVX512BW-NEXT: movzbl (%rdx,%rsi), %edx
|
||||
; AVX512BW-NEXT: vpinsrb $8, %edx, %xmm4, %xmm4
|
||||
; AVX512BW-NEXT: vpextrb $9, %xmm3, %edx
|
||||
; AVX512BW-NEXT: andl $63, %edx
|
||||
; AVX512BW-NEXT: movzbl (%rdx,%rsi), %edx
|
||||
; AVX512BW-NEXT: vpinsrb $9, %edx, %xmm4, %xmm4
|
||||
; AVX512BW-NEXT: vpextrb $10, %xmm3, %edx
|
||||
; AVX512BW-NEXT: andl $63, %edx
|
||||
; AVX512BW-NEXT: movzbl (%rdx,%rsi), %edx
|
||||
; AVX512BW-NEXT: vpinsrb $10, %edx, %xmm4, %xmm4
|
||||
; AVX512BW-NEXT: vpextrb $11, %xmm3, %edx
|
||||
; AVX512BW-NEXT: andl $63, %edx
|
||||
; AVX512BW-NEXT: movzbl (%rdx,%rsi), %edx
|
||||
; AVX512BW-NEXT: vpinsrb $11, %edx, %xmm4, %xmm4
|
||||
; AVX512BW-NEXT: vpextrb $12, %xmm3, %edx
|
||||
; AVX512BW-NEXT: andl $63, %edx
|
||||
; AVX512BW-NEXT: movzbl (%rdx,%rsi), %edx
|
||||
; AVX512BW-NEXT: vpinsrb $12, %edx, %xmm4, %xmm4
|
||||
; AVX512BW-NEXT: vpextrb $13, %xmm3, %edx
|
||||
; AVX512BW-NEXT: andl $63, %edx
|
||||
; AVX512BW-NEXT: movzbl (%rdx,%rsi), %edx
|
||||
; AVX512BW-NEXT: vpinsrb $13, %edx, %xmm4, %xmm4
|
||||
; AVX512BW-NEXT: vpextrb $14, %xmm3, %edx
|
||||
; AVX512BW-NEXT: andl $63, %edx
|
||||
; AVX512BW-NEXT: movzbl (%rdx,%rsi), %edx
|
||||
; AVX512BW-NEXT: vpinsrb $14, %edx, %xmm4, %xmm4
|
||||
; AVX512BW-NEXT: vpextrb $15, %xmm3, %edx
|
||||
; AVX512BW-NEXT: andl $63, %edx
|
||||
; AVX512BW-NEXT: movzbl (%rdx,%rsi), %edx
|
||||
; AVX512BW-NEXT: vpinsrb $15, %edx, %xmm4, %xmm3
|
||||
; AVX512BW-NEXT: vpextrb $0, %xmm2, %edx
|
||||
; AVX512BW-NEXT: andl $63, %edx
|
||||
; AVX512BW-NEXT: movzbl (%rdx,%rsi), %edx
|
||||
; AVX512BW-NEXT: vmovd %edx, %xmm4
|
||||
; AVX512BW-NEXT: vpextrb $1, %xmm2, %edx
|
||||
; AVX512BW-NEXT: andl $63, %edx
|
||||
; AVX512BW-NEXT: movzbl (%rdx,%rsi), %edx
|
||||
; AVX512BW-NEXT: vpinsrb $1, %edx, %xmm4, %xmm4
|
||||
; AVX512BW-NEXT: vpextrb $2, %xmm2, %edx
|
||||
; AVX512BW-NEXT: andl $63, %edx
|
||||
; AVX512BW-NEXT: movzbl (%rdx,%rsi), %edx
|
||||
; AVX512BW-NEXT: vpinsrb $2, %edx, %xmm4, %xmm4
|
||||
; AVX512BW-NEXT: vpextrb $3, %xmm2, %edx
|
||||
; AVX512BW-NEXT: andl $63, %edx
|
||||
; AVX512BW-NEXT: movzbl (%rdx,%rsi), %edx
|
||||
; AVX512BW-NEXT: vpinsrb $3, %edx, %xmm4, %xmm4
|
||||
; AVX512BW-NEXT: vpextrb $4, %xmm2, %edx
|
||||
; AVX512BW-NEXT: andl $63, %edx
|
||||
; AVX512BW-NEXT: movzbl (%rdx,%rsi), %edx
|
||||
; AVX512BW-NEXT: vpinsrb $4, %edx, %xmm4, %xmm4
|
||||
; AVX512BW-NEXT: vpextrb $5, %xmm2, %edx
|
||||
; AVX512BW-NEXT: andl $63, %edx
|
||||
; AVX512BW-NEXT: movzbl (%rdx,%rsi), %edx
|
||||
; AVX512BW-NEXT: vpinsrb $5, %edx, %xmm4, %xmm4
|
||||
; AVX512BW-NEXT: vpextrb $6, %xmm2, %edx
|
||||
; AVX512BW-NEXT: andl $63, %edx
|
||||
; AVX512BW-NEXT: movzbl (%rdx,%rsi), %edx
|
||||
; AVX512BW-NEXT: vpinsrb $6, %edx, %xmm4, %xmm4
|
||||
; AVX512BW-NEXT: vpextrb $7, %xmm2, %edx
|
||||
; AVX512BW-NEXT: andl $63, %edx
|
||||
; AVX512BW-NEXT: movzbl (%rdx,%rsi), %edx
|
||||
; AVX512BW-NEXT: vpinsrb $7, %edx, %xmm4, %xmm4
|
||||
; AVX512BW-NEXT: vpextrb $8, %xmm2, %edx
|
||||
; AVX512BW-NEXT: andl $63, %edx
|
||||
; AVX512BW-NEXT: movzbl (%rdx,%rsi), %edx
|
||||
; AVX512BW-NEXT: vpinsrb $8, %edx, %xmm4, %xmm4
|
||||
; AVX512BW-NEXT: vpextrb $9, %xmm2, %edx
|
||||
; AVX512BW-NEXT: andl $63, %edx
|
||||
; AVX512BW-NEXT: movzbl (%rdx,%rsi), %edx
|
||||
; AVX512BW-NEXT: vpinsrb $9, %edx, %xmm4, %xmm4
|
||||
; AVX512BW-NEXT: vpextrb $10, %xmm2, %edx
|
||||
; AVX512BW-NEXT: andl $63, %edx
|
||||
; AVX512BW-NEXT: movzbl (%rdx,%rsi), %edx
|
||||
; AVX512BW-NEXT: vpinsrb $10, %edx, %xmm4, %xmm4
|
||||
; AVX512BW-NEXT: vpextrb $11, %xmm2, %edx
|
||||
; AVX512BW-NEXT: andl $63, %edx
|
||||
; AVX512BW-NEXT: movzbl (%rdx,%rsi), %edx
|
||||
; AVX512BW-NEXT: vpinsrb $11, %edx, %xmm4, %xmm4
|
||||
; AVX512BW-NEXT: vpextrb $12, %xmm2, %edx
|
||||
; AVX512BW-NEXT: andl $63, %edx
|
||||
; AVX512BW-NEXT: movzbl (%rdx,%rsi), %edx
|
||||
; AVX512BW-NEXT: vpinsrb $12, %edx, %xmm4, %xmm4
|
||||
; AVX512BW-NEXT: vpextrb $13, %xmm2, %edx
|
||||
; AVX512BW-NEXT: andl $63, %edx
|
||||
; AVX512BW-NEXT: movzbl (%rdx,%rsi), %edx
|
||||
; AVX512BW-NEXT: vpinsrb $13, %edx, %xmm4, %xmm4
|
||||
; AVX512BW-NEXT: vpextrb $14, %xmm2, %edx
|
||||
; AVX512BW-NEXT: andl $63, %edx
|
||||
; AVX512BW-NEXT: movzbl (%rdx,%rsi), %edx
|
||||
; AVX512BW-NEXT: vpinsrb $14, %edx, %xmm4, %xmm4
|
||||
; AVX512BW-NEXT: vpextrb $15, %xmm2, %edx
|
||||
; AVX512BW-NEXT: andl $63, %edx
|
||||
; AVX512BW-NEXT: movzbl (%rdx,%rsi), %edx
|
||||
; AVX512BW-NEXT: vpinsrb $15, %edx, %xmm4, %xmm2
|
||||
; AVX512BW-NEXT: vpextrb $2, %xmm1, %edx
|
||||
; AVX512BW-NEXT: andl $63, %ecx
|
||||
; AVX512BW-NEXT: movzbl (%rcx,%rsi), %ecx
|
||||
; AVX512BW-NEXT: vmovd %ecx, %xmm4
|
||||
; AVX512BW-NEXT: vpextrb $3, %xmm1, %ecx
|
||||
; AVX512BW-NEXT: andl $63, %eax
|
||||
; AVX512BW-NEXT: vpinsrb $1, (%rax,%rsi), %xmm4, %xmm4
|
||||
; AVX512BW-NEXT: vpextrb $4, %xmm1, %eax
|
||||
; AVX512BW-NEXT: andl $63, %edx
|
||||
; AVX512BW-NEXT: vpinsrb $2, (%rdx,%rsi), %xmm4, %xmm4
|
||||
; AVX512BW-NEXT: vpextrb $5, %xmm1, %edx
|
||||
; AVX512BW-NEXT: andl $63, %ecx
|
||||
; AVX512BW-NEXT: vpinsrb $3, (%rcx,%rsi), %xmm4, %xmm4
|
||||
; AVX512BW-NEXT: vpextrb $6, %xmm1, %ecx
|
||||
; AVX512BW-NEXT: andl $63, %eax
|
||||
; AVX512BW-NEXT: vpinsrb $4, (%rax,%rsi), %xmm4, %xmm4
|
||||
; AVX512BW-NEXT: vpextrb $7, %xmm1, %eax
|
||||
; AVX512BW-NEXT: andl $63, %edx
|
||||
; AVX512BW-NEXT: vpinsrb $5, (%rdx,%rsi), %xmm4, %xmm4
|
||||
; AVX512BW-NEXT: vpextrb $8, %xmm1, %edx
|
||||
; AVX512BW-NEXT: andl $63, %ecx
|
||||
; AVX512BW-NEXT: vpinsrb $6, (%rcx,%rsi), %xmm4, %xmm4
|
||||
; AVX512BW-NEXT: vpextrb $9, %xmm1, %ecx
|
||||
; AVX512BW-NEXT: andl $63, %eax
|
||||
; AVX512BW-NEXT: vpinsrb $7, (%rax,%rsi), %xmm4, %xmm4
|
||||
; AVX512BW-NEXT: vpextrb $10, %xmm1, %eax
|
||||
; AVX512BW-NEXT: andl $63, %edx
|
||||
; AVX512BW-NEXT: vpinsrb $8, (%rdx,%rsi), %xmm4, %xmm4
|
||||
; AVX512BW-NEXT: vpextrb $11, %xmm1, %edx
|
||||
; AVX512BW-NEXT: andl $63, %ecx
|
||||
; AVX512BW-NEXT: vpinsrb $9, (%rcx,%rsi), %xmm4, %xmm4
|
||||
; AVX512BW-NEXT: vpextrb $12, %xmm1, %ecx
|
||||
; AVX512BW-NEXT: andl $63, %eax
|
||||
; AVX512BW-NEXT: vpinsrb $10, (%rax,%rsi), %xmm4, %xmm4
|
||||
; AVX512BW-NEXT: vpextrb $13, %xmm1, %eax
|
||||
; AVX512BW-NEXT: andl $63, %edx
|
||||
; AVX512BW-NEXT: vpinsrb $11, (%rdx,%rsi), %xmm4, %xmm4
|
||||
; AVX512BW-NEXT: vpextrb $14, %xmm1, %edx
|
||||
; AVX512BW-NEXT: andl $63, %ecx
|
||||
; AVX512BW-NEXT: vpinsrb $12, (%rcx,%rsi), %xmm4, %xmm4
|
||||
; AVX512BW-NEXT: vpextrb $15, %xmm1, %ecx
|
||||
; AVX512BW-NEXT: andl $63, %eax
|
||||
; AVX512BW-NEXT: andl $63, %edx
|
||||
; AVX512BW-NEXT: andl $63, %ecx
|
||||
; AVX512BW-NEXT: movzbl (%rcx,%rsi), %ecx
|
||||
; AVX512BW-NEXT: movzbl (%rdx,%rsi), %edx
|
||||
; AVX512BW-NEXT: movzbl (%rax,%rsi), %eax
|
||||
; AVX512BW-NEXT: vpinsrb $13, %eax, %xmm4, %xmm1
|
||||
; AVX512BW-NEXT: vpinsrb $14, %edx, %xmm1, %xmm1
|
||||
; AVX512BW-NEXT: vpinsrb $15, %ecx, %xmm1, %xmm1
|
||||
; AVX512BW-NEXT: vinserti128 $1, %xmm0, %ymm3, %ymm0
|
||||
; AVX512BW-NEXT: vinserti128 $1, %xmm2, %ymm1, %ymm1
|
||||
; AVX512BW-NEXT: vinserti64x4 $1, %ymm0, %zmm1, %zmm0
|
||||
; AVX512BW-NEXT: movq %rbp, %rsp
|
||||
; AVX512BW-NEXT: popq %rbp
|
||||
; AVX512BW-NEXT: retq
|
||||
; VBMI-LABEL: var_shuffle_v64i8:
|
||||
; VBMI: # BB#0:
|
||||
; VBMI-NEXT: vpermb %zmm0, %zmm1, %zmm0
|
||||
; VBMI-NEXT: retq
|
||||
%index0 = extractelement <64 x i8> %indices, i32 0
|
||||
%index1 = extractelement <64 x i8> %indices, i32 1
|
||||
%index2 = extractelement <64 x i8> %indices, i32 2
|
||||
|
@ -1473,43 +978,7 @@ define <64 x i8> @var_shuffle_v64i8(<64 x i8> %v, <64 x i8> %indices) nounwind {
|
|||
define <8 x double> @var_shuffle_v8f64(<8 x double> %v, <8 x i64> %indices) nounwind {
|
||||
; AVX512-LABEL: var_shuffle_v8f64:
|
||||
; AVX512: # BB#0:
|
||||
; AVX512-NEXT: pushq %rbp
|
||||
; AVX512-NEXT: movq %rsp, %rbp
|
||||
; AVX512-NEXT: andq $-64, %rsp
|
||||
; AVX512-NEXT: subq $128, %rsp
|
||||
; AVX512-NEXT: vmovq %xmm1, %r8
|
||||
; AVX512-NEXT: andl $7, %r8d
|
||||
; AVX512-NEXT: vpextrq $1, %xmm1, %r9
|
||||
; AVX512-NEXT: andl $7, %r9d
|
||||
; AVX512-NEXT: vextracti128 $1, %ymm1, %xmm2
|
||||
; AVX512-NEXT: vmovq %xmm2, %r10
|
||||
; AVX512-NEXT: andl $7, %r10d
|
||||
; AVX512-NEXT: vpextrq $1, %xmm2, %rsi
|
||||
; AVX512-NEXT: andl $7, %esi
|
||||
; AVX512-NEXT: vextracti32x4 $2, %zmm1, %xmm2
|
||||
; AVX512-NEXT: vmovq %xmm2, %rdi
|
||||
; AVX512-NEXT: andl $7, %edi
|
||||
; AVX512-NEXT: vpextrq $1, %xmm2, %rax
|
||||
; AVX512-NEXT: andl $7, %eax
|
||||
; AVX512-NEXT: vextracti32x4 $3, %zmm1, %xmm1
|
||||
; AVX512-NEXT: vmovq %xmm1, %rcx
|
||||
; AVX512-NEXT: andl $7, %ecx
|
||||
; AVX512-NEXT: vpextrq $1, %xmm1, %rdx
|
||||
; AVX512-NEXT: andl $7, %edx
|
||||
; AVX512-NEXT: vmovaps %zmm0, (%rsp)
|
||||
; AVX512-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
|
||||
; AVX512-NEXT: vmovhpd {{.*#+}} xmm0 = xmm0[0],mem[0]
|
||||
; AVX512-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
|
||||
; AVX512-NEXT: vmovhpd {{.*#+}} xmm1 = xmm1[0],mem[0]
|
||||
; AVX512-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
|
||||
; AVX512-NEXT: vmovsd {{.*#+}} xmm1 = mem[0],zero
|
||||
; AVX512-NEXT: vmovhpd {{.*#+}} xmm1 = xmm1[0],mem[0]
|
||||
; AVX512-NEXT: vmovsd {{.*#+}} xmm2 = mem[0],zero
|
||||
; AVX512-NEXT: vmovhpd {{.*#+}} xmm2 = xmm2[0],mem[0]
|
||||
; AVX512-NEXT: vinsertf128 $1, %xmm1, %ymm2, %ymm1
|
||||
; AVX512-NEXT: vinsertf64x4 $1, %ymm0, %zmm1, %zmm0
|
||||
; AVX512-NEXT: movq %rbp, %rsp
|
||||
; AVX512-NEXT: popq %rbp
|
||||
; AVX512-NEXT: vpermpd %zmm0, %zmm1, %zmm0
|
||||
; AVX512-NEXT: retq
|
||||
%index0 = extractelement <8 x i64> %indices, i32 0
|
||||
%index1 = extractelement <8 x i64> %indices, i32 1
|
||||
|
@ -1541,76 +1010,7 @@ define <8 x double> @var_shuffle_v8f64(<8 x double> %v, <8 x i64> %indices) noun
|
|||
define <16 x float> @var_shuffle_v16f32(<16 x float> %v, <16 x i32> %indices) nounwind {
|
||||
; AVX512-LABEL: var_shuffle_v16f32:
|
||||
; AVX512: # BB#0:
|
||||
; AVX512-NEXT: pushq %rbp
|
||||
; AVX512-NEXT: movq %rsp, %rbp
|
||||
; AVX512-NEXT: andq $-64, %rsp
|
||||
; AVX512-NEXT: subq $128, %rsp
|
||||
; AVX512-NEXT: vextracti128 $1, %ymm1, %xmm2
|
||||
; AVX512-NEXT: vextracti32x4 $2, %zmm1, %xmm3
|
||||
; AVX512-NEXT: vextracti32x4 $3, %zmm1, %xmm4
|
||||
; AVX512-NEXT: vpextrq $1, %xmm4, %rax
|
||||
; AVX512-NEXT: vmovq %xmm4, %rdx
|
||||
; AVX512-NEXT: movq %rdx, %rcx
|
||||
; AVX512-NEXT: shrq $30, %rcx
|
||||
; AVX512-NEXT: vmovaps %zmm0, (%rsp)
|
||||
; AVX512-NEXT: andl $15, %edx
|
||||
; AVX512-NEXT: andl $60, %ecx
|
||||
; AVX512-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
|
||||
; AVX512-NEXT: movq %rsp, %rdx
|
||||
; AVX512-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0],mem[0],xmm0[2,3]
|
||||
; AVX512-NEXT: movq %rax, %rcx
|
||||
; AVX512-NEXT: andl $15, %eax
|
||||
; AVX512-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,1],mem[0],xmm0[3]
|
||||
; AVX512-NEXT: vmovq %xmm3, %rax
|
||||
; AVX512-NEXT: shrq $30, %rcx
|
||||
; AVX512-NEXT: andl $60, %ecx
|
||||
; AVX512-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,1,2],mem[0]
|
||||
; AVX512-NEXT: movq %rax, %rcx
|
||||
; AVX512-NEXT: andl $15, %eax
|
||||
; AVX512-NEXT: vmovss {{.*#+}} xmm4 = mem[0],zero,zero,zero
|
||||
; AVX512-NEXT: vpextrq $1, %xmm3, %rax
|
||||
; AVX512-NEXT: shrq $30, %rcx
|
||||
; AVX512-NEXT: andl $60, %ecx
|
||||
; AVX512-NEXT: vinsertps {{.*#+}} xmm3 = xmm4[0],mem[0],xmm4[2,3]
|
||||
; AVX512-NEXT: movq %rax, %rcx
|
||||
; AVX512-NEXT: andl $15, %eax
|
||||
; AVX512-NEXT: vinsertps {{.*#+}} xmm3 = xmm3[0,1],mem[0],xmm3[3]
|
||||
; AVX512-NEXT: vmovq %xmm2, %rax
|
||||
; AVX512-NEXT: shrq $30, %rcx
|
||||
; AVX512-NEXT: andl $60, %ecx
|
||||
; AVX512-NEXT: vinsertps {{.*#+}} xmm3 = xmm3[0,1,2],mem[0]
|
||||
; AVX512-NEXT: movq %rax, %rcx
|
||||
; AVX512-NEXT: andl $15, %eax
|
||||
; AVX512-NEXT: vmovss {{.*#+}} xmm4 = mem[0],zero,zero,zero
|
||||
; AVX512-NEXT: vpextrq $1, %xmm2, %rax
|
||||
; AVX512-NEXT: shrq $30, %rcx
|
||||
; AVX512-NEXT: andl $60, %ecx
|
||||
; AVX512-NEXT: vinsertps {{.*#+}} xmm2 = xmm4[0],mem[0],xmm4[2,3]
|
||||
; AVX512-NEXT: movq %rax, %rcx
|
||||
; AVX512-NEXT: andl $15, %eax
|
||||
; AVX512-NEXT: vinsertps {{.*#+}} xmm2 = xmm2[0,1],mem[0],xmm2[3]
|
||||
; AVX512-NEXT: vmovq %xmm1, %rax
|
||||
; AVX512-NEXT: shrq $30, %rcx
|
||||
; AVX512-NEXT: andl $60, %ecx
|
||||
; AVX512-NEXT: vinsertps {{.*#+}} xmm2 = xmm2[0,1,2],mem[0]
|
||||
; AVX512-NEXT: movq %rax, %rcx
|
||||
; AVX512-NEXT: andl $15, %eax
|
||||
; AVX512-NEXT: vmovss {{.*#+}} xmm4 = mem[0],zero,zero,zero
|
||||
; AVX512-NEXT: vpextrq $1, %xmm1, %rax
|
||||
; AVX512-NEXT: shrq $30, %rcx
|
||||
; AVX512-NEXT: andl $60, %ecx
|
||||
; AVX512-NEXT: vinsertps {{.*#+}} xmm1 = xmm4[0],mem[0],xmm4[2,3]
|
||||
; AVX512-NEXT: movq %rax, %rcx
|
||||
; AVX512-NEXT: andl $15, %eax
|
||||
; AVX512-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0,1],mem[0],xmm1[3]
|
||||
; AVX512-NEXT: shrq $30, %rcx
|
||||
; AVX512-NEXT: andl $60, %ecx
|
||||
; AVX512-NEXT: vinsertps {{.*#+}} xmm1 = xmm1[0,1,2],mem[0]
|
||||
; AVX512-NEXT: vinsertf128 $1, %xmm0, %ymm3, %ymm0
|
||||
; AVX512-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm1
|
||||
; AVX512-NEXT: vinsertf64x4 $1, %ymm0, %zmm1, %zmm0
|
||||
; AVX512-NEXT: movq %rbp, %rsp
|
||||
; AVX512-NEXT: popq %rbp
|
||||
; AVX512-NEXT: vpermps %zmm0, %zmm1, %zmm0
|
||||
; AVX512-NEXT: retq
|
||||
%index0 = extractelement <16 x i32> %indices, i32 0
|
||||
%index1 = extractelement <16 x i32> %indices, i32 1
|
||||
|
|
Loading…
Reference in New Issue