forked from OSchip/llvm-project
[X86] LowerRotate - use vXi8 custom lowering for non-uniform constant amounts
Instead of bailing and using the default expansion, we can more efficiently use the shl(unpack(x,x),unpack(amt,zero)) pattern for vXi8 rotl, as we'll then use vXi16 fast PMULLW (or PSLLVW). This required some minor changes to improve constant folding during unpack shuffle creation and convertShiftLeftToScale to support constants that have already been lowered to constant pools.
This commit is contained in:
parent
6f2e087631
commit
52cb0bbec3
|
@ -6782,12 +6782,33 @@ void llvm::createSplat2ShuffleMask(MVT VT, SmallVectorImpl<int> &Mask,
|
|||
}
|
||||
}
|
||||
|
||||
// Attempt to constant fold, else just create a VECTOR_SHUFFLE.
|
||||
static SDValue getVectorShuffle(SelectionDAG &DAG, EVT VT, const SDLoc &dl,
|
||||
SDValue V1, SDValue V2, ArrayRef<int> Mask) {
|
||||
if ((ISD::isBuildVectorOfConstantSDNodes(V1.getNode()) || V1.isUndef()) &&
|
||||
(ISD::isBuildVectorOfConstantSDNodes(V2.getNode()) || V2.isUndef())) {
|
||||
SmallVector<SDValue> Ops(Mask.size(), DAG.getUNDEF(VT.getScalarType()));
|
||||
for (int I = 0, NumElts = Mask.size(); I != NumElts; ++I) {
|
||||
int M = Mask[I];
|
||||
if (M < 0)
|
||||
continue;
|
||||
SDValue V = (M < NumElts) ? V1 : V2;
|
||||
if (V.isUndef())
|
||||
continue;
|
||||
Ops[I] = V.getOperand(M % NumElts);
|
||||
}
|
||||
return DAG.getBuildVector(VT, dl, Ops);
|
||||
}
|
||||
|
||||
return DAG.getVectorShuffle(VT, dl, V1, V2, Mask);
|
||||
}
|
||||
|
||||
/// Returns a vector_shuffle node for an unpackl operation.
|
||||
static SDValue getUnpackl(SelectionDAG &DAG, const SDLoc &dl, EVT VT,
|
||||
SDValue V1, SDValue V2) {
|
||||
SmallVector<int, 8> Mask;
|
||||
createUnpackShuffleMask(VT, Mask, /* Lo = */ true, /* Unary = */ false);
|
||||
return DAG.getVectorShuffle(VT, dl, V1, V2, Mask);
|
||||
return getVectorShuffle(DAG, VT, dl, V1, V2, Mask);
|
||||
}
|
||||
|
||||
/// Returns a vector_shuffle node for an unpackh operation.
|
||||
|
@ -6795,7 +6816,7 @@ static SDValue getUnpackh(SelectionDAG &DAG, const SDLoc &dl, EVT VT,
|
|||
SDValue V1, SDValue V2) {
|
||||
SmallVector<int, 8> Mask;
|
||||
createUnpackShuffleMask(VT, Mask, /* Lo = */ false, /* Unary = */ false);
|
||||
return DAG.getVectorShuffle(VT, dl, V1, V2, Mask);
|
||||
return getVectorShuffle(DAG, VT, dl, V1, V2, Mask);
|
||||
}
|
||||
|
||||
/// Returns a node that packs the LHS + RHS nodes together at half width.
|
||||
|
@ -29164,28 +29185,20 @@ static SDValue convertShiftLeftToScale(SDValue Amt, const SDLoc &dl,
|
|||
(Subtarget.hasBWI() && VT == MVT::v64i8)))
|
||||
return SDValue();
|
||||
|
||||
if (ISD::isBuildVectorOfConstantSDNodes(Amt.getNode())) {
|
||||
SmallVector<SDValue, 8> Elts;
|
||||
MVT SVT = VT.getVectorElementType();
|
||||
unsigned SVTBits = SVT.getSizeInBits();
|
||||
APInt One(SVTBits, 1);
|
||||
unsigned NumElems = VT.getVectorNumElements();
|
||||
|
||||
for (unsigned i = 0; i != NumElems; ++i) {
|
||||
SDValue Op = Amt->getOperand(i);
|
||||
if (Op->isUndef()) {
|
||||
Elts.push_back(Op);
|
||||
APInt UndefElts;
|
||||
SmallVector<APInt> EltBits;
|
||||
if (getTargetConstantBitsFromNode(Amt, SVTBits, UndefElts, EltBits)) {
|
||||
APInt One(SVTBits, 1);
|
||||
SmallVector<SDValue> Elts(NumElems, DAG.getUNDEF(SVT));
|
||||
for (unsigned I = 0; I != NumElems; ++I) {
|
||||
if (UndefElts[I] || EltBits[I].uge(SVTBits))
|
||||
continue;
|
||||
}
|
||||
|
||||
ConstantSDNode *ND = cast<ConstantSDNode>(Op);
|
||||
APInt C(SVTBits, ND->getZExtValue());
|
||||
uint64_t ShAmt = C.getZExtValue();
|
||||
if (ShAmt >= SVTBits) {
|
||||
Elts.push_back(DAG.getUNDEF(SVT));
|
||||
continue;
|
||||
}
|
||||
Elts.push_back(DAG.getConstant(One.shl(ShAmt), dl, SVT));
|
||||
uint64_t ShAmt = EltBits[I].getZExtValue();
|
||||
Elts[I] = DAG.getConstant(One.shl(ShAmt), dl, SVT);
|
||||
}
|
||||
return DAG.getBuildVector(VT, dl, Elts);
|
||||
}
|
||||
|
@ -29861,9 +29874,7 @@ static SDValue LowerRotate(SDValue Op, const X86Subtarget &Subtarget,
|
|||
// the amount bit.
|
||||
// TODO: We're doing nothing here that we couldn't do for funnel shifts.
|
||||
if (EltSizeInBits == 8) {
|
||||
if (ISD::isBuildVectorOfConstantSDNodes(Amt.getNode()))
|
||||
return SDValue();
|
||||
|
||||
bool IsConstAmt = ISD::isBuildVectorOfConstantSDNodes(Amt.getNode());
|
||||
// Check for a hidden ISD::ROTR, vXi8 lowering can handle both, but we
|
||||
// currently hit infinite loops in legalization if we allow ISD::ROTR.
|
||||
// FIXME: Infinite ROTL<->ROTR legalization in TargetLowering::expandROT.
|
||||
|
@ -29894,6 +29905,9 @@ static SDValue LowerRotate(SDValue Op, const X86Subtarget &Subtarget,
|
|||
return getPack(DAG, Subtarget, DL, VT, Lo, Hi, !HiddenROTRAmt);
|
||||
} else if (supportedVectorVarShift(WideVT, Subtarget, ShiftOpc) &&
|
||||
supportedVectorShiftWithImm(WideVT, Subtarget, ShiftOpc)) {
|
||||
// If we're rotating by constant, just use default promotion.
|
||||
if (IsConstAmt)
|
||||
return SDValue();
|
||||
// See if we can perform this by widening to vXi16 or vXi32.
|
||||
R = DAG.getNode(ISD::ZERO_EXTEND, DL, WideVT, R);
|
||||
R = DAG.getNode(
|
||||
|
@ -29904,9 +29918,10 @@ static SDValue LowerRotate(SDValue Op, const X86Subtarget &Subtarget,
|
|||
if (!HiddenROTRAmt)
|
||||
R = getTargetVShiftByConstNode(X86ISD::VSRLI, DL, WideVT, R, 8, DAG);
|
||||
return DAG.getNode(ISD::TRUNCATE, DL, VT, R);
|
||||
} else if (supportedVectorVarShift(ExtVT, Subtarget, ShiftOpc)) {
|
||||
} else if (IsConstAmt ||
|
||||
supportedVectorVarShift(ExtVT, Subtarget, ShiftOpc)) {
|
||||
// See if we can perform this by unpacking to lo/hi vXi16.
|
||||
SDValue Z = getZeroVector(VT, Subtarget, DAG, DL);
|
||||
SDValue Z = DAG.getConstant(0, DL, VT);
|
||||
SDValue RLo = DAG.getBitcast(ExtVT, getUnpackl(DAG, DL, VT, R, R));
|
||||
SDValue RHi = DAG.getBitcast(ExtVT, getUnpackh(DAG, DL, VT, R, R));
|
||||
SDValue ALo = DAG.getBitcast(ExtVT, getUnpackl(DAG, DL, VT, AmtMod, Z));
|
||||
|
|
|
@ -1716,45 +1716,16 @@ define <32 x i8> @splatvar_rotate_v32i8(<32 x i8> %a, <32 x i8> %b) nounwind "mi
|
|||
}
|
||||
|
||||
define <32 x i8> @constant_rotate_v32i8(<32 x i8> %a) nounwind "min-legal-vector-width"="256" {
|
||||
; CHECK-AVX512-LABEL: constant_rotate_v32i8:
|
||||
; CHECK-AVX512: # %bb.0:
|
||||
; CHECK-AVX512-NEXT: vpxor %xmm1, %xmm1, %xmm1
|
||||
; CHECK-AVX512-NEXT: vpunpckhbw {{.*#+}} ymm2 = ymm0[8],ymm1[8],ymm0[9],ymm1[9],ymm0[10],ymm1[10],ymm0[11],ymm1[11],ymm0[12],ymm1[12],ymm0[13],ymm1[13],ymm0[14],ymm1[14],ymm0[15],ymm1[15],ymm0[24],ymm1[24],ymm0[25],ymm1[25],ymm0[26],ymm1[26],ymm0[27],ymm1[27],ymm0[28],ymm1[28],ymm0[29],ymm1[29],ymm0[30],ymm1[30],ymm0[31],ymm1[31]
|
||||
; CHECK-AVX512-NEXT: vpsllvw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm2, %ymm2
|
||||
; CHECK-AVX512-NEXT: vpsrlw $8, %ymm2, %ymm2
|
||||
; CHECK-AVX512-NEXT: vpunpcklbw {{.*#+}} ymm1 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[4],ymm1[4],ymm0[5],ymm1[5],ymm0[6],ymm1[6],ymm0[7],ymm1[7],ymm0[16],ymm1[16],ymm0[17],ymm1[17],ymm0[18],ymm1[18],ymm0[19],ymm1[19],ymm0[20],ymm1[20],ymm0[21],ymm1[21],ymm0[22],ymm1[22],ymm0[23],ymm1[23]
|
||||
; CHECK-AVX512-NEXT: vpsllvw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm1
|
||||
; CHECK-AVX512-NEXT: vpsrlw $8, %ymm1, %ymm1
|
||||
; CHECK-AVX512-NEXT: vpackuswb %ymm2, %ymm1, %ymm1
|
||||
; CHECK-AVX512-NEXT: vpunpckhbw {{.*#+}} ymm2 = ymm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
|
||||
; CHECK-AVX512-NEXT: vpsllvw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm2, %ymm2
|
||||
; CHECK-AVX512-NEXT: vmovdqa {{.*#+}} ymm3 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
|
||||
; CHECK-AVX512-NEXT: vpand %ymm3, %ymm2, %ymm2
|
||||
; CHECK-AVX512-NEXT: vpunpcklbw {{.*#+}} ymm0 = ymm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23]
|
||||
; CHECK-AVX512-NEXT: vpsllvw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
|
||||
; CHECK-AVX512-NEXT: vpand %ymm3, %ymm0, %ymm0
|
||||
; CHECK-AVX512-NEXT: vpackuswb %ymm2, %ymm0, %ymm0
|
||||
; CHECK-AVX512-NEXT: vpor %ymm1, %ymm0, %ymm0
|
||||
; CHECK-AVX512-NEXT: retq
|
||||
;
|
||||
; CHECK-VBMI-LABEL: constant_rotate_v32i8:
|
||||
; CHECK-VBMI: # %bb.0:
|
||||
; CHECK-VBMI-NEXT: vpunpckhbw {{.*#+}} ymm1 = ymm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
|
||||
; CHECK-VBMI-NEXT: vpsllvw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm1
|
||||
; CHECK-VBMI-NEXT: vpunpcklbw {{.*#+}} ymm2 = ymm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23]
|
||||
; CHECK-VBMI-NEXT: vpsllvw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm2, %ymm2
|
||||
; CHECK-VBMI-NEXT: vmovdqa {{.*#+}} ymm3 = [0,2,4,6,8,10,12,14,32,34,36,38,40,42,44,46,16,18,20,22,24,26,28,30,48,50,52,54,56,58,60,62]
|
||||
; CHECK-VBMI-NEXT: vpermi2b %ymm1, %ymm2, %ymm3
|
||||
; CHECK-VBMI-NEXT: vpxor %xmm1, %xmm1, %xmm1
|
||||
; CHECK-VBMI-NEXT: vpunpckhbw {{.*#+}} ymm2 = ymm0[8],ymm1[8],ymm0[9],ymm1[9],ymm0[10],ymm1[10],ymm0[11],ymm1[11],ymm0[12],ymm1[12],ymm0[13],ymm1[13],ymm0[14],ymm1[14],ymm0[15],ymm1[15],ymm0[24],ymm1[24],ymm0[25],ymm1[25],ymm0[26],ymm1[26],ymm0[27],ymm1[27],ymm0[28],ymm1[28],ymm0[29],ymm1[29],ymm0[30],ymm1[30],ymm0[31],ymm1[31]
|
||||
; CHECK-VBMI-NEXT: vpsllvw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm2, %ymm2
|
||||
; CHECK-VBMI-NEXT: vpsrlw $8, %ymm2, %ymm2
|
||||
; CHECK-VBMI-NEXT: vpunpcklbw {{.*#+}} ymm0 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[4],ymm1[4],ymm0[5],ymm1[5],ymm0[6],ymm1[6],ymm0[7],ymm1[7],ymm0[16],ymm1[16],ymm0[17],ymm1[17],ymm0[18],ymm1[18],ymm0[19],ymm1[19],ymm0[20],ymm1[20],ymm0[21],ymm1[21],ymm0[22],ymm1[22],ymm0[23],ymm1[23]
|
||||
; CHECK-VBMI-NEXT: vpsllvw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
|
||||
; CHECK-VBMI-NEXT: vpsrlw $8, %ymm0, %ymm0
|
||||
; CHECK-VBMI-NEXT: vpackuswb %ymm2, %ymm0, %ymm0
|
||||
; CHECK-VBMI-NEXT: vpor %ymm0, %ymm3, %ymm0
|
||||
; CHECK-VBMI-NEXT: retq
|
||||
; CHECK-LABEL: constant_rotate_v32i8:
|
||||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: vpunpckhbw {{.*#+}} ymm1 = ymm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
|
||||
; CHECK-NEXT: vpsllvw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm1
|
||||
; CHECK-NEXT: vpsrlw $8, %ymm1, %ymm1
|
||||
; CHECK-NEXT: vpunpcklbw {{.*#+}} ymm0 = ymm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23]
|
||||
; CHECK-NEXT: vpsllvw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
|
||||
; CHECK-NEXT: vpsrlw $8, %ymm0, %ymm0
|
||||
; CHECK-NEXT: vpackuswb %ymm1, %ymm0, %ymm0
|
||||
; CHECK-NEXT: retq
|
||||
%shl = shl <32 x i8> %a, <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 7, i8 6, i8 5, i8 4, i8 3, i8 2, i8 1, i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 7, i8 6, i8 5, i8 4, i8 3, i8 2, i8 1>
|
||||
%lshr = lshr <32 x i8> %a, <i8 8, i8 7, i8 6, i8 5, i8 4, i8 3, i8 2, i8 1, i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 7, i8 6, i8 5, i8 4, i8 3, i8 2, i8 1, i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7>
|
||||
%or = or <32 x i8> %shl, %lshr
|
||||
|
|
|
@ -2290,78 +2290,58 @@ define <32 x i1> @pr51133(<32 x i8> %x, <32 x i8> %y) {
|
|||
; CHECK-SSE2-LABEL: pr51133:
|
||||
; CHECK-SSE2: # %bb.0:
|
||||
; CHECK-SSE2-NEXT: movq %rdi, %rax
|
||||
; CHECK-SSE2-NEXT: movdqa %xmm1, %xmm4
|
||||
; CHECK-SSE2-NEXT: punpckhbw {{.*#+}} xmm4 = xmm4[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
||||
; CHECK-SSE2-NEXT: pmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm4
|
||||
; CHECK-SSE2-NEXT: movdqa {{.*#+}} xmm5 = [255,255,255,255,255,255,255,255]
|
||||
; CHECK-SSE2-NEXT: pand %xmm5, %xmm4
|
||||
; CHECK-SSE2-NEXT: movdqa %xmm1, %xmm5
|
||||
; CHECK-SSE2-NEXT: punpckhbw {{.*#+}} xmm5 = xmm5[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
||||
; CHECK-SSE2-NEXT: pmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm5
|
||||
; CHECK-SSE2-NEXT: movdqa {{.*#+}} xmm4 = [255,255,255,255,255,255,255,255]
|
||||
; CHECK-SSE2-NEXT: pand %xmm4, %xmm5
|
||||
; CHECK-SSE2-NEXT: movdqa %xmm1, %xmm6
|
||||
; CHECK-SSE2-NEXT: punpcklbw {{.*#+}} xmm6 = xmm6[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
|
||||
; CHECK-SSE2-NEXT: pmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm6
|
||||
; CHECK-SSE2-NEXT: pand %xmm5, %xmm6
|
||||
; CHECK-SSE2-NEXT: packuswb %xmm4, %xmm6
|
||||
; CHECK-SSE2-NEXT: pand %xmm4, %xmm6
|
||||
; CHECK-SSE2-NEXT: packuswb %xmm5, %xmm6
|
||||
; CHECK-SSE2-NEXT: paddb {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm6
|
||||
; CHECK-SSE2-NEXT: pxor %xmm8, %xmm8
|
||||
; CHECK-SSE2-NEXT: movdqa %xmm6, %xmm7
|
||||
; CHECK-SSE2-NEXT: punpckhbw {{.*#+}} xmm7 = xmm7[8],xmm8[8],xmm7[9],xmm8[9],xmm7[10],xmm8[10],xmm7[11],xmm8[11],xmm7[12],xmm8[12],xmm7[13],xmm8[13],xmm7[14],xmm8[14],xmm7[15],xmm8[15]
|
||||
; CHECK-SSE2-NEXT: pmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm7
|
||||
; CHECK-SSE2-NEXT: psrlw $8, %xmm7
|
||||
; CHECK-SSE2-NEXT: movdqa %xmm6, %xmm4
|
||||
; CHECK-SSE2-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm8[0],xmm4[1],xmm8[1],xmm4[2],xmm8[2],xmm4[3],xmm8[3],xmm4[4],xmm8[4],xmm4[5],xmm8[5],xmm4[6],xmm8[6],xmm4[7],xmm8[7]
|
||||
; CHECK-SSE2-NEXT: pmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm4
|
||||
; CHECK-SSE2-NEXT: psrlw $8, %xmm4
|
||||
; CHECK-SSE2-NEXT: packuswb %xmm7, %xmm4
|
||||
; CHECK-SSE2-NEXT: movdqa %xmm6, %xmm7
|
||||
; CHECK-SSE2-NEXT: punpckhbw {{.*#+}} xmm7 = xmm7[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
||||
; CHECK-SSE2-NEXT: pmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm7
|
||||
; CHECK-SSE2-NEXT: pand %xmm5, %xmm7
|
||||
; CHECK-SSE2-NEXT: movdqa %xmm6, %xmm5
|
||||
; CHECK-SSE2-NEXT: punpckhbw {{.*#+}} xmm5 = xmm5[8],xmm6[8],xmm5[9],xmm6[9],xmm5[10],xmm6[10],xmm5[11],xmm6[11],xmm5[12],xmm6[12],xmm5[13],xmm6[13],xmm5[14],xmm6[14],xmm5[15],xmm6[15]
|
||||
; CHECK-SSE2-NEXT: pmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm5
|
||||
; CHECK-SSE2-NEXT: psrlw $8, %xmm5
|
||||
; CHECK-SSE2-NEXT: punpcklbw {{.*#+}} xmm6 = xmm6[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
|
||||
; CHECK-SSE2-NEXT: pmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm6
|
||||
; CHECK-SSE2-NEXT: pand %xmm5, %xmm6
|
||||
; CHECK-SSE2-NEXT: packuswb %xmm7, %xmm6
|
||||
; CHECK-SSE2-NEXT: por %xmm4, %xmm6
|
||||
; CHECK-SSE2-NEXT: movdqa {{.*#+}} xmm4 = [84,2,36,42,2,0,2,4,2,255,4,36,126,30,2,2]
|
||||
; CHECK-SSE2-NEXT: pminub %xmm6, %xmm4
|
||||
; CHECK-SSE2-NEXT: pcmpeqb %xmm6, %xmm4
|
||||
; CHECK-SSE2-NEXT: movdqa {{.*#+}} xmm6 = [255,255,255,255,255,0,255,255,255,255,255,255,255,255,255,255]
|
||||
; CHECK-SSE2-NEXT: pandn %xmm6, %xmm4
|
||||
; CHECK-SSE2-NEXT: psrlw $8, %xmm6
|
||||
; CHECK-SSE2-NEXT: packuswb %xmm5, %xmm6
|
||||
; CHECK-SSE2-NEXT: movdqa {{.*#+}} xmm7 = [84,2,36,42,2,0,2,4,2,255,4,36,126,30,2,2]
|
||||
; CHECK-SSE2-NEXT: pminub %xmm6, %xmm7
|
||||
; CHECK-SSE2-NEXT: pcmpeqb %xmm6, %xmm7
|
||||
; CHECK-SSE2-NEXT: movdqa {{.*#+}} xmm5 = [255,255,255,255,255,0,255,255,255,255,255,255,255,255,255,255]
|
||||
; CHECK-SSE2-NEXT: pandn %xmm5, %xmm7
|
||||
; CHECK-SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
|
||||
; CHECK-SSE2-NEXT: pcmpgtb %xmm8, %xmm1
|
||||
; CHECK-SSE2-NEXT: pandn %xmm1, %xmm6
|
||||
; CHECK-SSE2-NEXT: por %xmm4, %xmm6
|
||||
; CHECK-SSE2-NEXT: pxor %xmm6, %xmm6
|
||||
; CHECK-SSE2-NEXT: pcmpgtb %xmm6, %xmm1
|
||||
; CHECK-SSE2-NEXT: pandn %xmm1, %xmm5
|
||||
; CHECK-SSE2-NEXT: por %xmm7, %xmm5
|
||||
; CHECK-SSE2-NEXT: movdqa %xmm0, %xmm1
|
||||
; CHECK-SSE2-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
||||
; CHECK-SSE2-NEXT: pmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
|
||||
; CHECK-SSE2-NEXT: pand %xmm5, %xmm1
|
||||
; CHECK-SSE2-NEXT: pand %xmm4, %xmm1
|
||||
; CHECK-SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
|
||||
; CHECK-SSE2-NEXT: pmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
|
||||
; CHECK-SSE2-NEXT: pand %xmm5, %xmm0
|
||||
; CHECK-SSE2-NEXT: pand %xmm4, %xmm0
|
||||
; CHECK-SSE2-NEXT: packuswb %xmm1, %xmm0
|
||||
; CHECK-SSE2-NEXT: paddb {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
|
||||
; CHECK-SSE2-NEXT: movdqa %xmm0, %xmm1
|
||||
; CHECK-SSE2-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm8[8],xmm1[9],xmm8[9],xmm1[10],xmm8[10],xmm1[11],xmm8[11],xmm1[12],xmm8[12],xmm1[13],xmm8[13],xmm1[14],xmm8[14],xmm1[15],xmm8[15]
|
||||
; CHECK-SSE2-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm0[8],xmm1[9],xmm0[9],xmm1[10],xmm0[10],xmm1[11],xmm0[11],xmm1[12],xmm0[12],xmm1[13],xmm0[13],xmm1[14],xmm0[14],xmm1[15],xmm0[15]
|
||||
; CHECK-SSE2-NEXT: pmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
|
||||
; CHECK-SSE2-NEXT: psrlw $8, %xmm1
|
||||
; CHECK-SSE2-NEXT: movdqa %xmm0, %xmm4
|
||||
; CHECK-SSE2-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm8[0],xmm4[1],xmm8[1],xmm4[2],xmm8[2],xmm4[3],xmm8[3],xmm4[4],xmm8[4],xmm4[5],xmm8[5],xmm4[6],xmm8[6],xmm4[7],xmm8[7]
|
||||
; CHECK-SSE2-NEXT: pmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm4
|
||||
; CHECK-SSE2-NEXT: psrlw $8, %xmm4
|
||||
; CHECK-SSE2-NEXT: packuswb %xmm1, %xmm4
|
||||
; CHECK-SSE2-NEXT: movdqa %xmm0, %xmm1
|
||||
; CHECK-SSE2-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
||||
; CHECK-SSE2-NEXT: pmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
|
||||
; CHECK-SSE2-NEXT: pand %xmm5, %xmm1
|
||||
; CHECK-SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
|
||||
; CHECK-SSE2-NEXT: pmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
|
||||
; CHECK-SSE2-NEXT: pand %xmm5, %xmm0
|
||||
; CHECK-SSE2-NEXT: psrlw $8, %xmm0
|
||||
; CHECK-SSE2-NEXT: packuswb %xmm1, %xmm0
|
||||
; CHECK-SSE2-NEXT: por %xmm4, %xmm0
|
||||
; CHECK-SSE2-NEXT: movdqa {{.*#+}} xmm1 = [19,51,13,7,127,31,127,3,5,5,51,37,3,127,85,5]
|
||||
; CHECK-SSE2-NEXT: pmaxub %xmm0, %xmm1
|
||||
; CHECK-SSE2-NEXT: pcmpeqb %xmm0, %xmm1
|
||||
; CHECK-SSE2-NEXT: pcmpeqb %xmm8, %xmm3
|
||||
; CHECK-SSE2-NEXT: pandn %xmm6, %xmm3
|
||||
; CHECK-SSE2-NEXT: pcmpeqb %xmm8, %xmm2
|
||||
; CHECK-SSE2-NEXT: pcmpeqb %xmm6, %xmm3
|
||||
; CHECK-SSE2-NEXT: pandn %xmm5, %xmm3
|
||||
; CHECK-SSE2-NEXT: pcmpeqb %xmm6, %xmm2
|
||||
; CHECK-SSE2-NEXT: pandn %xmm1, %xmm2
|
||||
; CHECK-SSE2-NEXT: pmovmskb %xmm2, %ecx
|
||||
; CHECK-SSE2-NEXT: pmovmskb %xmm3, %edx
|
||||
|
@ -2374,73 +2354,57 @@ define <32 x i1> @pr51133(<32 x i8> %x, <32 x i8> %y) {
|
|||
; CHECK-SSE41: # %bb.0:
|
||||
; CHECK-SSE41-NEXT: movdqa %xmm0, %xmm4
|
||||
; CHECK-SSE41-NEXT: movq %rdi, %rax
|
||||
; CHECK-SSE41-NEXT: movdqa %xmm1, %xmm5
|
||||
; CHECK-SSE41-NEXT: punpckhbw {{.*#+}} xmm5 = xmm5[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
||||
; CHECK-SSE41-NEXT: pmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm5
|
||||
; CHECK-SSE41-NEXT: movdqa {{.*#+}} xmm9 = [255,255,255,255,255,255,255,255]
|
||||
; CHECK-SSE41-NEXT: pand %xmm9, %xmm5
|
||||
; CHECK-SSE41-NEXT: pmovzxbw {{.*#+}} xmm0 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
|
||||
; CHECK-SSE41-NEXT: pmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
|
||||
; CHECK-SSE41-NEXT: pand %xmm9, %xmm0
|
||||
; CHECK-SSE41-NEXT: packuswb %xmm5, %xmm0
|
||||
; CHECK-SSE41-NEXT: paddb {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
|
||||
; CHECK-SSE41-NEXT: pxor %xmm8, %xmm8
|
||||
; CHECK-SSE41-NEXT: movdqa %xmm0, %xmm7
|
||||
; CHECK-SSE41-NEXT: punpckhbw {{.*#+}} xmm7 = xmm7[8],xmm8[8],xmm7[9],xmm8[9],xmm7[10],xmm8[10],xmm7[11],xmm8[11],xmm7[12],xmm8[12],xmm7[13],xmm8[13],xmm7[14],xmm8[14],xmm7[15],xmm8[15]
|
||||
; CHECK-SSE41-NEXT: pmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm7
|
||||
; CHECK-SSE41-NEXT: psrlw $8, %xmm7
|
||||
; CHECK-SSE41-NEXT: pmovzxbw {{.*#+}} xmm5 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
|
||||
; CHECK-SSE41-NEXT: movdqa {{.*#+}} xmm6 = [256,256,256,128,64,2,256,32]
|
||||
; CHECK-SSE41-NEXT: pmullw %xmm5, %xmm6
|
||||
; CHECK-SSE41-NEXT: psrlw $8, %xmm6
|
||||
; CHECK-SSE41-NEXT: packuswb %xmm7, %xmm6
|
||||
; CHECK-SSE41-NEXT: movdqa %xmm1, %xmm0
|
||||
; CHECK-SSE41-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
||||
; CHECK-SSE41-NEXT: pmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
|
||||
; CHECK-SSE41-NEXT: pand %xmm9, %xmm0
|
||||
; CHECK-SSE41-NEXT: pmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm5
|
||||
; CHECK-SSE41-NEXT: pand %xmm9, %xmm5
|
||||
; CHECK-SSE41-NEXT: packuswb %xmm0, %xmm5
|
||||
; CHECK-SSE41-NEXT: por %xmm6, %xmm5
|
||||
; CHECK-SSE41-NEXT: movdqa {{.*#+}} xmm5 = [255,255,255,255,255,255,255,255]
|
||||
; CHECK-SSE41-NEXT: pand %xmm5, %xmm0
|
||||
; CHECK-SSE41-NEXT: pmovzxbw {{.*#+}} xmm6 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
|
||||
; CHECK-SSE41-NEXT: pmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm6
|
||||
; CHECK-SSE41-NEXT: pand %xmm5, %xmm6
|
||||
; CHECK-SSE41-NEXT: packuswb %xmm0, %xmm6
|
||||
; CHECK-SSE41-NEXT: paddb {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm6
|
||||
; CHECK-SSE41-NEXT: movdqa %xmm6, %xmm0
|
||||
; CHECK-SSE41-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm6[8],xmm0[9],xmm6[9],xmm0[10],xmm6[10],xmm0[11],xmm6[11],xmm0[12],xmm6[12],xmm0[13],xmm6[13],xmm0[14],xmm6[14],xmm0[15],xmm6[15]
|
||||
; CHECK-SSE41-NEXT: pmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
|
||||
; CHECK-SSE41-NEXT: psrlw $8, %xmm0
|
||||
; CHECK-SSE41-NEXT: punpcklbw {{.*#+}} xmm6 = xmm6[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
|
||||
; CHECK-SSE41-NEXT: pmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm6
|
||||
; CHECK-SSE41-NEXT: psrlw $8, %xmm6
|
||||
; CHECK-SSE41-NEXT: packuswb %xmm0, %xmm6
|
||||
; CHECK-SSE41-NEXT: movdqa {{.*#+}} xmm0 = [84,2,36,42,2,0,2,4,2,255,4,36,126,30,2,2]
|
||||
; CHECK-SSE41-NEXT: pminub %xmm5, %xmm0
|
||||
; CHECK-SSE41-NEXT: pcmpeqb %xmm5, %xmm0
|
||||
; CHECK-SSE41-NEXT: pcmpeqd %xmm5, %xmm5
|
||||
; CHECK-SSE41-NEXT: pxor %xmm0, %xmm5
|
||||
; CHECK-SSE41-NEXT: pminub %xmm6, %xmm0
|
||||
; CHECK-SSE41-NEXT: pcmpeqb %xmm6, %xmm0
|
||||
; CHECK-SSE41-NEXT: pcmpeqd %xmm7, %xmm7
|
||||
; CHECK-SSE41-NEXT: pxor %xmm0, %xmm7
|
||||
; CHECK-SSE41-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
|
||||
; CHECK-SSE41-NEXT: pcmpgtb %xmm8, %xmm1
|
||||
; CHECK-SSE41-NEXT: pxor %xmm6, %xmm6
|
||||
; CHECK-SSE41-NEXT: pcmpgtb %xmm6, %xmm1
|
||||
; CHECK-SSE41-NEXT: movaps {{.*#+}} xmm0 = [255,255,255,255,255,0,255,255,255,255,255,255,255,255,255,255]
|
||||
; CHECK-SSE41-NEXT: pblendvb %xmm0, %xmm5, %xmm1
|
||||
; CHECK-SSE41-NEXT: pblendvb %xmm0, %xmm7, %xmm1
|
||||
; CHECK-SSE41-NEXT: pmovzxbw {{.*#+}} xmm0 = xmm4[0],zero,xmm4[1],zero,xmm4[2],zero,xmm4[3],zero,xmm4[4],zero,xmm4[5],zero,xmm4[6],zero,xmm4[7],zero
|
||||
; CHECK-SSE41-NEXT: punpckhbw {{.*#+}} xmm4 = xmm4[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
||||
; CHECK-SSE41-NEXT: pmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm4
|
||||
; CHECK-SSE41-NEXT: pand %xmm9, %xmm4
|
||||
; CHECK-SSE41-NEXT: pand %xmm5, %xmm4
|
||||
; CHECK-SSE41-NEXT: pmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
|
||||
; CHECK-SSE41-NEXT: pand %xmm9, %xmm0
|
||||
; CHECK-SSE41-NEXT: pand %xmm5, %xmm0
|
||||
; CHECK-SSE41-NEXT: packuswb %xmm4, %xmm0
|
||||
; CHECK-SSE41-NEXT: paddb {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
|
||||
; CHECK-SSE41-NEXT: movdqa %xmm0, %xmm4
|
||||
; CHECK-SSE41-NEXT: punpckhbw {{.*#+}} xmm4 = xmm4[8],xmm8[8],xmm4[9],xmm8[9],xmm4[10],xmm8[10],xmm4[11],xmm8[11],xmm4[12],xmm8[12],xmm4[13],xmm8[13],xmm4[14],xmm8[14],xmm4[15],xmm8[15]
|
||||
; CHECK-SSE41-NEXT: punpckhbw {{.*#+}} xmm4 = xmm4[8],xmm0[8],xmm4[9],xmm0[9],xmm4[10],xmm0[10],xmm4[11],xmm0[11],xmm4[12],xmm0[12],xmm4[13],xmm0[13],xmm4[14],xmm0[14],xmm4[15],xmm0[15]
|
||||
; CHECK-SSE41-NEXT: pmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm4
|
||||
; CHECK-SSE41-NEXT: psrlw $8, %xmm4
|
||||
; CHECK-SSE41-NEXT: pmovzxbw {{.*#+}} xmm5 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
|
||||
; CHECK-SSE41-NEXT: movdqa {{.*#+}} xmm6 = [256,256,256,128,128,32,128,32]
|
||||
; CHECK-SSE41-NEXT: pmullw %xmm5, %xmm6
|
||||
; CHECK-SSE41-NEXT: psrlw $8, %xmm6
|
||||
; CHECK-SSE41-NEXT: packuswb %xmm4, %xmm6
|
||||
; CHECK-SSE41-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
||||
; CHECK-SSE41-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
|
||||
; CHECK-SSE41-NEXT: pmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
|
||||
; CHECK-SSE41-NEXT: pand %xmm9, %xmm0
|
||||
; CHECK-SSE41-NEXT: pmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm5
|
||||
; CHECK-SSE41-NEXT: pand %xmm9, %xmm5
|
||||
; CHECK-SSE41-NEXT: packuswb %xmm0, %xmm5
|
||||
; CHECK-SSE41-NEXT: por %xmm6, %xmm5
|
||||
; CHECK-SSE41-NEXT: movdqa {{.*#+}} xmm0 = [19,51,13,7,127,31,127,3,5,5,51,37,3,127,85,5]
|
||||
; CHECK-SSE41-NEXT: pmaxub %xmm5, %xmm0
|
||||
; CHECK-SSE41-NEXT: pcmpeqb %xmm5, %xmm0
|
||||
; CHECK-SSE41-NEXT: pcmpeqb %xmm8, %xmm3
|
||||
; CHECK-SSE41-NEXT: psrlw $8, %xmm0
|
||||
; CHECK-SSE41-NEXT: packuswb %xmm4, %xmm0
|
||||
; CHECK-SSE41-NEXT: movdqa {{.*#+}} xmm4 = [19,51,13,7,127,31,127,3,5,5,51,37,3,127,85,5]
|
||||
; CHECK-SSE41-NEXT: pmaxub %xmm0, %xmm4
|
||||
; CHECK-SSE41-NEXT: pcmpeqb %xmm0, %xmm4
|
||||
; CHECK-SSE41-NEXT: pcmpeqb %xmm6, %xmm3
|
||||
; CHECK-SSE41-NEXT: pandn %xmm1, %xmm3
|
||||
; CHECK-SSE41-NEXT: pcmpeqb %xmm8, %xmm2
|
||||
; CHECK-SSE41-NEXT: pandn %xmm0, %xmm2
|
||||
; CHECK-SSE41-NEXT: pcmpeqb %xmm6, %xmm2
|
||||
; CHECK-SSE41-NEXT: pandn %xmm4, %xmm2
|
||||
; CHECK-SSE41-NEXT: pmovmskb %xmm2, %ecx
|
||||
; CHECK-SSE41-NEXT: pmovmskb %xmm3, %edx
|
||||
; CHECK-SSE41-NEXT: shll $16, %edx
|
||||
|
@ -2583,30 +2547,22 @@ define <32 x i1> @pr51133(<32 x i8> %x, <32 x i8> %y) {
|
|||
; CHECK-AVX512VL-NEXT: vpand %ymm3, %ymm2, %ymm2
|
||||
; CHECK-AVX512VL-NEXT: vpunpcklbw {{.*#+}} ymm4 = ymm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23]
|
||||
; CHECK-AVX512VL-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm4, %ymm4
|
||||
; CHECK-AVX512VL-NEXT: vpand %ymm3, %ymm4, %ymm4
|
||||
; CHECK-AVX512VL-NEXT: vpackuswb %ymm2, %ymm4, %ymm2
|
||||
; CHECK-AVX512VL-NEXT: vpand %ymm3, %ymm4, %ymm3
|
||||
; CHECK-AVX512VL-NEXT: vpackuswb %ymm2, %ymm3, %ymm2
|
||||
; CHECK-AVX512VL-NEXT: vpaddb {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm2, %ymm2
|
||||
; CHECK-AVX512VL-NEXT: vpxor %xmm4, %xmm4, %xmm4
|
||||
; CHECK-AVX512VL-NEXT: vpunpckhbw {{.*#+}} ymm5 = ymm2[8],ymm4[8],ymm2[9],ymm4[9],ymm2[10],ymm4[10],ymm2[11],ymm4[11],ymm2[12],ymm4[12],ymm2[13],ymm4[13],ymm2[14],ymm4[14],ymm2[15],ymm4[15],ymm2[24],ymm4[24],ymm2[25],ymm4[25],ymm2[26],ymm4[26],ymm2[27],ymm4[27],ymm2[28],ymm4[28],ymm2[29],ymm4[29],ymm2[30],ymm4[30],ymm2[31],ymm4[31]
|
||||
; CHECK-AVX512VL-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm5, %ymm5
|
||||
; CHECK-AVX512VL-NEXT: vpsrlw $8, %ymm5, %ymm5
|
||||
; CHECK-AVX512VL-NEXT: vpunpcklbw {{.*#+}} ymm6 = ymm2[0],ymm4[0],ymm2[1],ymm4[1],ymm2[2],ymm4[2],ymm2[3],ymm4[3],ymm2[4],ymm4[4],ymm2[5],ymm4[5],ymm2[6],ymm4[6],ymm2[7],ymm4[7],ymm2[16],ymm4[16],ymm2[17],ymm4[17],ymm2[18],ymm4[18],ymm2[19],ymm4[19],ymm2[20],ymm4[20],ymm2[21],ymm4[21],ymm2[22],ymm4[22],ymm2[23],ymm4[23]
|
||||
; CHECK-AVX512VL-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm6, %ymm6
|
||||
; CHECK-AVX512VL-NEXT: vpsrlw $8, %ymm6, %ymm6
|
||||
; CHECK-AVX512VL-NEXT: vpackuswb %ymm5, %ymm6, %ymm5
|
||||
; CHECK-AVX512VL-NEXT: vpunpckhbw {{.*#+}} ymm6 = ymm2[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
|
||||
; CHECK-AVX512VL-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm6, %ymm6
|
||||
; CHECK-AVX512VL-NEXT: vpand %ymm3, %ymm6, %ymm6
|
||||
; CHECK-AVX512VL-NEXT: vpunpckhbw {{.*#+}} ymm3 = ymm2[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
|
||||
; CHECK-AVX512VL-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm3, %ymm3
|
||||
; CHECK-AVX512VL-NEXT: vpsrlw $8, %ymm3, %ymm3
|
||||
; CHECK-AVX512VL-NEXT: vpunpcklbw {{.*#+}} ymm2 = ymm2[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23]
|
||||
; CHECK-AVX512VL-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm2, %ymm2
|
||||
; CHECK-AVX512VL-NEXT: vpand %ymm3, %ymm2, %ymm2
|
||||
; CHECK-AVX512VL-NEXT: vpackuswb %ymm6, %ymm2, %ymm2
|
||||
; CHECK-AVX512VL-NEXT: vpor %ymm5, %ymm2, %ymm2
|
||||
; CHECK-AVX512VL-NEXT: vpsrlw $8, %ymm2, %ymm2
|
||||
; CHECK-AVX512VL-NEXT: vpackuswb %ymm3, %ymm2, %ymm2
|
||||
; CHECK-AVX512VL-NEXT: vpminub {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm2, %ymm3
|
||||
; CHECK-AVX512VL-NEXT: vpcmpeqb %ymm3, %ymm2, %ymm2
|
||||
; CHECK-AVX512VL-NEXT: vmovdqa {{.*#+}} ymm3 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,0,255,255,255,255,255,255,255,255,255,255]
|
||||
; CHECK-AVX512VL-NEXT: vpandn %ymm3, %ymm2, %ymm2
|
||||
; CHECK-AVX512VL-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
|
||||
; CHECK-AVX512VL-NEXT: vpxor %xmm4, %xmm4, %xmm4
|
||||
; CHECK-AVX512VL-NEXT: vpcmpgtb %ymm4, %ymm0, %ymm0
|
||||
; CHECK-AVX512VL-NEXT: vpandn %ymm0, %ymm3, %ymm3
|
||||
; CHECK-AVX512VL-NEXT: vpcmpeqb %ymm4, %ymm1, %ymm0
|
||||
|
|
|
@ -1542,87 +1542,28 @@ define <8 x i16> @constant_funnnel_v8i16(<8 x i16> %x) nounwind {
|
|||
}
|
||||
|
||||
define <16 x i8> @constant_funnnel_v16i8(<16 x i8> %x) nounwind {
|
||||
; SSE2-LABEL: constant_funnnel_v16i8:
|
||||
; SSE2: # %bb.0:
|
||||
; SSE2-NEXT: pxor %xmm1, %xmm1
|
||||
; SSE2-NEXT: movdqa %xmm0, %xmm2
|
||||
; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm1[8],xmm2[9],xmm1[9],xmm2[10],xmm1[10],xmm2[11],xmm1[11],xmm2[12],xmm1[12],xmm2[13],xmm1[13],xmm2[14],xmm1[14],xmm2[15],xmm1[15]
|
||||
; SSE2-NEXT: pmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2
|
||||
; SSE2-NEXT: psrlw $8, %xmm2
|
||||
; SSE2-NEXT: movdqa %xmm0, %xmm3
|
||||
; SSE2-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1],xmm3[2],xmm1[2],xmm3[3],xmm1[3],xmm3[4],xmm1[4],xmm3[5],xmm1[5],xmm3[6],xmm1[6],xmm3[7],xmm1[7]
|
||||
; SSE2-NEXT: pmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3
|
||||
; SSE2-NEXT: psrlw $8, %xmm3
|
||||
; SSE2-NEXT: packuswb %xmm2, %xmm3
|
||||
; SSE2-NEXT: movdqa %xmm0, %xmm1
|
||||
; SSE2-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
||||
; SSE2-NEXT: pmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
|
||||
; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [255,255,255,255,255,255,255,255]
|
||||
; SSE2-NEXT: pand %xmm2, %xmm1
|
||||
; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
|
||||
; SSE2-NEXT: pmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
|
||||
; SSE2-NEXT: pand %xmm2, %xmm0
|
||||
; SSE2-NEXT: packuswb %xmm1, %xmm0
|
||||
; SSE2-NEXT: por %xmm3, %xmm0
|
||||
; SSE2-NEXT: retq
|
||||
; SSE-LABEL: constant_funnnel_v16i8:
|
||||
; SSE: # %bb.0:
|
||||
; SSE-NEXT: movdqa %xmm0, %xmm1
|
||||
; SSE-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm0[8],xmm1[9],xmm0[9],xmm1[10],xmm0[10],xmm1[11],xmm0[11],xmm1[12],xmm0[12],xmm1[13],xmm0[13],xmm1[14],xmm0[14],xmm1[15],xmm0[15]
|
||||
; SSE-NEXT: pmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
|
||||
; SSE-NEXT: psrlw $8, %xmm1
|
||||
; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
|
||||
; SSE-NEXT: pmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
|
||||
; SSE-NEXT: psrlw $8, %xmm0
|
||||
; SSE-NEXT: packuswb %xmm1, %xmm0
|
||||
; SSE-NEXT: retq
|
||||
;
|
||||
; SSE41-LABEL: constant_funnnel_v16i8:
|
||||
; SSE41: # %bb.0:
|
||||
; SSE41-NEXT: movdqa %xmm0, %xmm2
|
||||
; SSE41-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
||||
; SSE41-NEXT: pmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2
|
||||
; SSE41-NEXT: movdqa {{.*#+}} xmm3 = [255,255,255,255,255,255,255,255]
|
||||
; SSE41-NEXT: pand %xmm3, %xmm2
|
||||
; SSE41-NEXT: pmovzxbw {{.*#+}} xmm1 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
|
||||
; SSE41-NEXT: movdqa {{.*#+}} xmm4 = [1,2,4,8,16,32,64,128]
|
||||
; SSE41-NEXT: pmullw %xmm1, %xmm4
|
||||
; SSE41-NEXT: pand %xmm3, %xmm4
|
||||
; SSE41-NEXT: packuswb %xmm2, %xmm4
|
||||
; SSE41-NEXT: pxor %xmm2, %xmm2
|
||||
; SSE41-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm2[8],xmm0[9],xmm2[9],xmm0[10],xmm2[10],xmm0[11],xmm2[11],xmm0[12],xmm2[12],xmm0[13],xmm2[13],xmm0[14],xmm2[14],xmm0[15],xmm2[15]
|
||||
; SSE41-NEXT: pmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
|
||||
; SSE41-NEXT: psrlw $8, %xmm0
|
||||
; SSE41-NEXT: pmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
|
||||
; SSE41-NEXT: psrlw $8, %xmm1
|
||||
; SSE41-NEXT: packuswb %xmm0, %xmm1
|
||||
; SSE41-NEXT: por %xmm4, %xmm1
|
||||
; SSE41-NEXT: movdqa %xmm1, %xmm0
|
||||
; SSE41-NEXT: retq
|
||||
;
|
||||
; AVX1-LABEL: constant_funnnel_v16i8:
|
||||
; AVX1: # %bb.0:
|
||||
; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm1 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
||||
; AVX1-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
|
||||
; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [255,255,255,255,255,255,255,255]
|
||||
; AVX1-NEXT: vpand %xmm2, %xmm1, %xmm1
|
||||
; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm3 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
|
||||
; AVX1-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3, %xmm4
|
||||
; AVX1-NEXT: vpand %xmm2, %xmm4, %xmm2
|
||||
; AVX1-NEXT: vpackuswb %xmm1, %xmm2, %xmm1
|
||||
; AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
|
||||
; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm0 = xmm0[8],xmm2[8],xmm0[9],xmm2[9],xmm0[10],xmm2[10],xmm0[11],xmm2[11],xmm0[12],xmm2[12],xmm0[13],xmm2[13],xmm0[14],xmm2[14],xmm0[15],xmm2[15]
|
||||
; AVX1-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
|
||||
; AVX1-NEXT: vpsrlw $8, %xmm0, %xmm0
|
||||
; AVX1-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3, %xmm2
|
||||
; AVX1-NEXT: vpsrlw $8, %xmm2, %xmm2
|
||||
; AVX1-NEXT: vpackuswb %xmm0, %xmm2, %xmm0
|
||||
; AVX1-NEXT: vpor %xmm0, %xmm1, %xmm0
|
||||
; AVX1-NEXT: retq
|
||||
;
|
||||
; AVX2-LABEL: constant_funnnel_v16i8:
|
||||
; AVX2: # %bb.0:
|
||||
; AVX2-NEXT: vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
|
||||
; AVX2-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm1
|
||||
; AVX2-NEXT: vpsrlw $8, %ymm1, %ymm1
|
||||
; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm2
|
||||
; AVX2-NEXT: vpackuswb %xmm2, %xmm1, %xmm1
|
||||
; AVX2-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
|
||||
; AVX2-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
|
||||
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm2
|
||||
; AVX2-NEXT: vpackuswb %xmm2, %xmm0, %xmm0
|
||||
; AVX2-NEXT: vpor %xmm1, %xmm0, %xmm0
|
||||
; AVX2-NEXT: vzeroupper
|
||||
; AVX2-NEXT: retq
|
||||
; AVX-LABEL: constant_funnnel_v16i8:
|
||||
; AVX: # %bb.0:
|
||||
; AVX-NEXT: vpunpckhbw {{.*#+}} xmm1 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
||||
; AVX-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
|
||||
; AVX-NEXT: vpsrlw $8, %xmm1, %xmm1
|
||||
; AVX-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
|
||||
; AVX-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
|
||||
; AVX-NEXT: vpsrlw $8, %xmm0, %xmm0
|
||||
; AVX-NEXT: vpackuswb %xmm1, %xmm0, %xmm0
|
||||
; AVX-NEXT: retq
|
||||
;
|
||||
; AVX512F-LABEL: constant_funnnel_v16i8:
|
||||
; AVX512F: # %bb.0:
|
||||
|
@ -1697,26 +1638,14 @@ define <16 x i8> @constant_funnnel_v16i8(<16 x i8> %x) nounwind {
|
|||
;
|
||||
; X86-SSE2-LABEL: constant_funnnel_v16i8:
|
||||
; X86-SSE2: # %bb.0:
|
||||
; X86-SSE2-NEXT: pxor %xmm2, %xmm2
|
||||
; X86-SSE2-NEXT: movdqa %xmm0, %xmm3
|
||||
; X86-SSE2-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm2[8],xmm3[9],xmm2[9],xmm3[10],xmm2[10],xmm3[11],xmm2[11],xmm3[12],xmm2[12],xmm3[13],xmm2[13],xmm3[14],xmm2[14],xmm3[15],xmm2[15]
|
||||
; X86-SSE2-NEXT: pmullw {{\.?LCPI[0-9]+_[0-9]+}}, %xmm3
|
||||
; X86-SSE2-NEXT: psrlw $8, %xmm3
|
||||
; X86-SSE2-NEXT: movdqa %xmm0, %xmm1
|
||||
; X86-SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3],xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7]
|
||||
; X86-SSE2-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm0[8],xmm1[9],xmm0[9],xmm1[10],xmm0[10],xmm1[11],xmm0[11],xmm1[12],xmm0[12],xmm1[13],xmm0[13],xmm1[14],xmm0[14],xmm1[15],xmm0[15]
|
||||
; X86-SSE2-NEXT: pmullw {{\.?LCPI[0-9]+_[0-9]+}}, %xmm1
|
||||
; X86-SSE2-NEXT: psrlw $8, %xmm1
|
||||
; X86-SSE2-NEXT: packuswb %xmm3, %xmm1
|
||||
; X86-SSE2-NEXT: movdqa %xmm0, %xmm2
|
||||
; X86-SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
||||
; X86-SSE2-NEXT: pmullw {{\.?LCPI[0-9]+_[0-9]+}}, %xmm2
|
||||
; X86-SSE2-NEXT: movdqa {{.*#+}} xmm3 = [255,255,255,255,255,255,255,255]
|
||||
; X86-SSE2-NEXT: pand %xmm3, %xmm2
|
||||
; X86-SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
|
||||
; X86-SSE2-NEXT: pmullw {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
|
||||
; X86-SSE2-NEXT: pand %xmm3, %xmm0
|
||||
; X86-SSE2-NEXT: packuswb %xmm2, %xmm0
|
||||
; X86-SSE2-NEXT: por %xmm1, %xmm0
|
||||
; X86-SSE2-NEXT: psrlw $8, %xmm0
|
||||
; X86-SSE2-NEXT: packuswb %xmm1, %xmm0
|
||||
; X86-SSE2-NEXT: retl
|
||||
%res = call <16 x i8> @llvm.fshl.v16i8(<16 x i8> %x, <16 x i8> %x, <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 7, i8 6, i8 5, i8 4, i8 3, i8 2, i8 1>)
|
||||
ret <16 x i8> %res
|
||||
|
|
|
@ -1275,104 +1275,56 @@ define <32 x i8> @constant_funnnel_v32i8(<32 x i8> %x) nounwind {
|
|||
; AVX1-LABEL: constant_funnnel_v32i8:
|
||||
; AVX1: # %bb.0:
|
||||
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
|
||||
; AVX1-NEXT: vpxor %xmm8, %xmm8, %xmm8
|
||||
; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm3 = xmm1[8],xmm8[8],xmm1[9],xmm8[9],xmm1[10],xmm8[10],xmm1[11],xmm8[11],xmm1[12],xmm8[12],xmm1[13],xmm8[13],xmm1[14],xmm8[14],xmm1[15],xmm8[15]
|
||||
; AVX1-NEXT: vmovdqa {{.*#+}} xmm9 = [256,128,64,32,16,8,4,2]
|
||||
; AVX1-NEXT: vpmullw %xmm3, %xmm9, %xmm3
|
||||
; AVX1-NEXT: vpsrlw $8, %xmm3, %xmm3
|
||||
; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm5 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
|
||||
; AVX1-NEXT: vmovdqa {{.*#+}} xmm6 = [256,2,4,8,16,32,64,128]
|
||||
; AVX1-NEXT: vpmullw %xmm6, %xmm5, %xmm7
|
||||
; AVX1-NEXT: vpsrlw $8, %xmm7, %xmm7
|
||||
; AVX1-NEXT: vpackuswb %xmm3, %xmm7, %xmm3
|
||||
; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm1 = xmm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
||||
; AVX1-NEXT: vmovdqa {{.*#+}} xmm7 = [1,128,64,32,16,8,4,2]
|
||||
; AVX1-NEXT: vpmullw %xmm7, %xmm1, %xmm1
|
||||
; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [255,255,255,255,255,255,255,255]
|
||||
; AVX1-NEXT: vpand %xmm2, %xmm1, %xmm1
|
||||
; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm2 = xmm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
||||
; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [1,128,64,32,16,8,4,2]
|
||||
; AVX1-NEXT: vpmullw %xmm3, %xmm2, %xmm2
|
||||
; AVX1-NEXT: vpsrlw $8, %xmm2, %xmm2
|
||||
; AVX1-NEXT: vpunpcklbw {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
|
||||
; AVX1-NEXT: vmovdqa {{.*#+}} xmm4 = [1,2,4,8,16,32,64,128]
|
||||
; AVX1-NEXT: vpmullw %xmm4, %xmm5, %xmm5
|
||||
; AVX1-NEXT: vpand %xmm2, %xmm5, %xmm5
|
||||
; AVX1-NEXT: vpackuswb %xmm1, %xmm5, %xmm1
|
||||
; AVX1-NEXT: vpor %xmm3, %xmm1, %xmm1
|
||||
; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm3 = xmm0[8],xmm8[8],xmm0[9],xmm8[9],xmm0[10],xmm8[10],xmm0[11],xmm8[11],xmm0[12],xmm8[12],xmm0[13],xmm8[13],xmm0[14],xmm8[14],xmm0[15],xmm8[15]
|
||||
; AVX1-NEXT: vpmullw %xmm3, %xmm9, %xmm3
|
||||
; AVX1-NEXT: vpsrlw $8, %xmm3, %xmm3
|
||||
; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm5 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
|
||||
; AVX1-NEXT: vpmullw %xmm6, %xmm5, %xmm6
|
||||
; AVX1-NEXT: vpsrlw $8, %xmm6, %xmm6
|
||||
; AVX1-NEXT: vpackuswb %xmm3, %xmm6, %xmm3
|
||||
; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm0 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
||||
; AVX1-NEXT: vpmullw %xmm7, %xmm0, %xmm0
|
||||
; AVX1-NEXT: vpand %xmm2, %xmm0, %xmm0
|
||||
; AVX1-NEXT: vpmullw %xmm4, %xmm5, %xmm4
|
||||
; AVX1-NEXT: vpand %xmm2, %xmm4, %xmm2
|
||||
; AVX1-NEXT: vpackuswb %xmm0, %xmm2, %xmm0
|
||||
; AVX1-NEXT: vpor %xmm3, %xmm0, %xmm0
|
||||
; AVX1-NEXT: vpmullw %xmm4, %xmm1, %xmm1
|
||||
; AVX1-NEXT: vpsrlw $8, %xmm1, %xmm1
|
||||
; AVX1-NEXT: vpackuswb %xmm2, %xmm1, %xmm1
|
||||
; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm2 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
||||
; AVX1-NEXT: vpmullw %xmm3, %xmm2, %xmm2
|
||||
; AVX1-NEXT: vpsrlw $8, %xmm2, %xmm2
|
||||
; AVX1-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
|
||||
; AVX1-NEXT: vpmullw %xmm4, %xmm0, %xmm0
|
||||
; AVX1-NEXT: vpsrlw $8, %xmm0, %xmm0
|
||||
; AVX1-NEXT: vpackuswb %xmm2, %xmm0, %xmm0
|
||||
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
|
||||
; AVX1-NEXT: retq
|
||||
;
|
||||
; AVX2-LABEL: constant_funnnel_v32i8:
|
||||
; AVX2: # %bb.0:
|
||||
; AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1
|
||||
; AVX2-NEXT: vpunpckhbw {{.*#+}} ymm2 = ymm0[8],ymm1[8],ymm0[9],ymm1[9],ymm0[10],ymm1[10],ymm0[11],ymm1[11],ymm0[12],ymm1[12],ymm0[13],ymm1[13],ymm0[14],ymm1[14],ymm0[15],ymm1[15],ymm0[24],ymm1[24],ymm0[25],ymm1[25],ymm0[26],ymm1[26],ymm0[27],ymm1[27],ymm0[28],ymm1[28],ymm0[29],ymm1[29],ymm0[30],ymm1[30],ymm0[31],ymm1[31]
|
||||
; AVX2-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm2, %ymm2
|
||||
; AVX2-NEXT: vpsrlw $8, %ymm2, %ymm2
|
||||
; AVX2-NEXT: vpunpcklbw {{.*#+}} ymm1 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[4],ymm1[4],ymm0[5],ymm1[5],ymm0[6],ymm1[6],ymm0[7],ymm1[7],ymm0[16],ymm1[16],ymm0[17],ymm1[17],ymm0[18],ymm1[18],ymm0[19],ymm1[19],ymm0[20],ymm1[20],ymm0[21],ymm1[21],ymm0[22],ymm1[22],ymm0[23],ymm1[23]
|
||||
; AVX2-NEXT: vpunpckhbw {{.*#+}} ymm1 = ymm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
|
||||
; AVX2-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm1
|
||||
; AVX2-NEXT: vpsrlw $8, %ymm1, %ymm1
|
||||
; AVX2-NEXT: vpackuswb %ymm2, %ymm1, %ymm1
|
||||
; AVX2-NEXT: vpunpckhbw {{.*#+}} ymm2 = ymm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
|
||||
; AVX2-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm2, %ymm2
|
||||
; AVX2-NEXT: vmovdqa {{.*#+}} ymm3 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
|
||||
; AVX2-NEXT: vpand %ymm3, %ymm2, %ymm2
|
||||
; AVX2-NEXT: vpunpcklbw {{.*#+}} ymm0 = ymm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23]
|
||||
; AVX2-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
|
||||
; AVX2-NEXT: vpand %ymm3, %ymm0, %ymm0
|
||||
; AVX2-NEXT: vpackuswb %ymm2, %ymm0, %ymm0
|
||||
; AVX2-NEXT: vpor %ymm1, %ymm0, %ymm0
|
||||
; AVX2-NEXT: vpsrlw $8, %ymm0, %ymm0
|
||||
; AVX2-NEXT: vpackuswb %ymm1, %ymm0, %ymm0
|
||||
; AVX2-NEXT: retq
|
||||
;
|
||||
; AVX512F-LABEL: constant_funnnel_v32i8:
|
||||
; AVX512F: # %bb.0:
|
||||
; AVX512F-NEXT: vpxor %xmm1, %xmm1, %xmm1
|
||||
; AVX512F-NEXT: vpunpckhbw {{.*#+}} ymm2 = ymm0[8],ymm1[8],ymm0[9],ymm1[9],ymm0[10],ymm1[10],ymm0[11],ymm1[11],ymm0[12],ymm1[12],ymm0[13],ymm1[13],ymm0[14],ymm1[14],ymm0[15],ymm1[15],ymm0[24],ymm1[24],ymm0[25],ymm1[25],ymm0[26],ymm1[26],ymm0[27],ymm1[27],ymm0[28],ymm1[28],ymm0[29],ymm1[29],ymm0[30],ymm1[30],ymm0[31],ymm1[31]
|
||||
; AVX512F-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm2, %ymm2
|
||||
; AVX512F-NEXT: vpsrlw $8, %ymm2, %ymm2
|
||||
; AVX512F-NEXT: vpunpcklbw {{.*#+}} ymm1 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[4],ymm1[4],ymm0[5],ymm1[5],ymm0[6],ymm1[6],ymm0[7],ymm1[7],ymm0[16],ymm1[16],ymm0[17],ymm1[17],ymm0[18],ymm1[18],ymm0[19],ymm1[19],ymm0[20],ymm1[20],ymm0[21],ymm1[21],ymm0[22],ymm1[22],ymm0[23],ymm1[23]
|
||||
; AVX512F-NEXT: vpunpckhbw {{.*#+}} ymm1 = ymm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
|
||||
; AVX512F-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm1
|
||||
; AVX512F-NEXT: vpsrlw $8, %ymm1, %ymm1
|
||||
; AVX512F-NEXT: vpackuswb %ymm2, %ymm1, %ymm1
|
||||
; AVX512F-NEXT: vpunpckhbw {{.*#+}} ymm2 = ymm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
|
||||
; AVX512F-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm2, %ymm2
|
||||
; AVX512F-NEXT: vmovdqa {{.*#+}} ymm3 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
|
||||
; AVX512F-NEXT: vpand %ymm3, %ymm2, %ymm2
|
||||
; AVX512F-NEXT: vpunpcklbw {{.*#+}} ymm0 = ymm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23]
|
||||
; AVX512F-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
|
||||
; AVX512F-NEXT: vpand %ymm3, %ymm0, %ymm0
|
||||
; AVX512F-NEXT: vpackuswb %ymm2, %ymm0, %ymm0
|
||||
; AVX512F-NEXT: vpor %ymm1, %ymm0, %ymm0
|
||||
; AVX512F-NEXT: vpsrlw $8, %ymm0, %ymm0
|
||||
; AVX512F-NEXT: vpackuswb %ymm1, %ymm0, %ymm0
|
||||
; AVX512F-NEXT: retq
|
||||
;
|
||||
; AVX512VL-LABEL: constant_funnnel_v32i8:
|
||||
; AVX512VL: # %bb.0:
|
||||
; AVX512VL-NEXT: vpxor %xmm1, %xmm1, %xmm1
|
||||
; AVX512VL-NEXT: vpunpckhbw {{.*#+}} ymm2 = ymm0[8],ymm1[8],ymm0[9],ymm1[9],ymm0[10],ymm1[10],ymm0[11],ymm1[11],ymm0[12],ymm1[12],ymm0[13],ymm1[13],ymm0[14],ymm1[14],ymm0[15],ymm1[15],ymm0[24],ymm1[24],ymm0[25],ymm1[25],ymm0[26],ymm1[26],ymm0[27],ymm1[27],ymm0[28],ymm1[28],ymm0[29],ymm1[29],ymm0[30],ymm1[30],ymm0[31],ymm1[31]
|
||||
; AVX512VL-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm2, %ymm2
|
||||
; AVX512VL-NEXT: vpsrlw $8, %ymm2, %ymm2
|
||||
; AVX512VL-NEXT: vpunpcklbw {{.*#+}} ymm1 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[4],ymm1[4],ymm0[5],ymm1[5],ymm0[6],ymm1[6],ymm0[7],ymm1[7],ymm0[16],ymm1[16],ymm0[17],ymm1[17],ymm0[18],ymm1[18],ymm0[19],ymm1[19],ymm0[20],ymm1[20],ymm0[21],ymm1[21],ymm0[22],ymm1[22],ymm0[23],ymm1[23]
|
||||
; AVX512VL-NEXT: vpunpckhbw {{.*#+}} ymm1 = ymm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
|
||||
; AVX512VL-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm1
|
||||
; AVX512VL-NEXT: vpsrlw $8, %ymm1, %ymm1
|
||||
; AVX512VL-NEXT: vpackuswb %ymm2, %ymm1, %ymm1
|
||||
; AVX512VL-NEXT: vpunpckhbw {{.*#+}} ymm2 = ymm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
|
||||
; AVX512VL-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm2, %ymm2
|
||||
; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm3 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
|
||||
; AVX512VL-NEXT: vpand %ymm3, %ymm2, %ymm2
|
||||
; AVX512VL-NEXT: vpunpcklbw {{.*#+}} ymm0 = ymm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23]
|
||||
; AVX512VL-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
|
||||
; AVX512VL-NEXT: vpand %ymm3, %ymm0, %ymm0
|
||||
; AVX512VL-NEXT: vpackuswb %ymm2, %ymm0, %ymm0
|
||||
; AVX512VL-NEXT: vpor %ymm1, %ymm0, %ymm0
|
||||
; AVX512VL-NEXT: vpsrlw $8, %ymm0, %ymm0
|
||||
; AVX512VL-NEXT: vpackuswb %ymm1, %ymm0, %ymm0
|
||||
; AVX512VL-NEXT: retq
|
||||
;
|
||||
; AVX512BW-LABEL: constant_funnnel_v32i8:
|
||||
|
|
|
@ -557,177 +557,93 @@ define <64 x i8> @constant_funnnel_v64i8(<64 x i8> %x) nounwind {
|
|||
; AVX512F-LABEL: constant_funnnel_v64i8:
|
||||
; AVX512F: # %bb.0:
|
||||
; AVX512F-NEXT: vextracti64x4 $1, %zmm0, %ymm1
|
||||
; AVX512F-NEXT: vpxor %xmm2, %xmm2, %xmm2
|
||||
; AVX512F-NEXT: vpunpckhbw {{.*#+}} ymm3 = ymm1[8],ymm2[8],ymm1[9],ymm2[9],ymm1[10],ymm2[10],ymm1[11],ymm2[11],ymm1[12],ymm2[12],ymm1[13],ymm2[13],ymm1[14],ymm2[14],ymm1[15],ymm2[15],ymm1[24],ymm2[24],ymm1[25],ymm2[25],ymm1[26],ymm2[26],ymm1[27],ymm2[27],ymm1[28],ymm2[28],ymm1[29],ymm2[29],ymm1[30],ymm2[30],ymm1[31],ymm2[31]
|
||||
; AVX512F-NEXT: vbroadcasti128 {{.*#+}} ymm4 = [256,128,64,32,16,8,4,2,256,128,64,32,16,8,4,2]
|
||||
; AVX512F-NEXT: # ymm4 = mem[0,1,0,1]
|
||||
; AVX512F-NEXT: vpmullw %ymm4, %ymm3, %ymm3
|
||||
; AVX512F-NEXT: vpsrlw $8, %ymm3, %ymm3
|
||||
; AVX512F-NEXT: vpunpcklbw {{.*#+}} ymm5 = ymm1[0],ymm2[0],ymm1[1],ymm2[1],ymm1[2],ymm2[2],ymm1[3],ymm2[3],ymm1[4],ymm2[4],ymm1[5],ymm2[5],ymm1[6],ymm2[6],ymm1[7],ymm2[7],ymm1[16],ymm2[16],ymm1[17],ymm2[17],ymm1[18],ymm2[18],ymm1[19],ymm2[19],ymm1[20],ymm2[20],ymm1[21],ymm2[21],ymm1[22],ymm2[22],ymm1[23],ymm2[23]
|
||||
; AVX512F-NEXT: vbroadcasti128 {{.*#+}} ymm6 = [256,2,4,8,16,32,64,128,256,2,4,8,16,32,64,128]
|
||||
; AVX512F-NEXT: # ymm6 = mem[0,1,0,1]
|
||||
; AVX512F-NEXT: vpmullw %ymm6, %ymm5, %ymm5
|
||||
; AVX512F-NEXT: vpsrlw $8, %ymm5, %ymm5
|
||||
; AVX512F-NEXT: vpackuswb %ymm3, %ymm5, %ymm3
|
||||
; AVX512F-NEXT: vpunpckhbw {{.*#+}} ymm5 = ymm0[8],ymm2[8],ymm0[9],ymm2[9],ymm0[10],ymm2[10],ymm0[11],ymm2[11],ymm0[12],ymm2[12],ymm0[13],ymm2[13],ymm0[14],ymm2[14],ymm0[15],ymm2[15],ymm0[24],ymm2[24],ymm0[25],ymm2[25],ymm0[26],ymm2[26],ymm0[27],ymm2[27],ymm0[28],ymm2[28],ymm0[29],ymm2[29],ymm0[30],ymm2[30],ymm0[31],ymm2[31]
|
||||
; AVX512F-NEXT: vpmullw %ymm4, %ymm5, %ymm4
|
||||
; AVX512F-NEXT: vpsrlw $8, %ymm4, %ymm4
|
||||
; AVX512F-NEXT: vpunpcklbw {{.*#+}} ymm2 = ymm0[0],ymm2[0],ymm0[1],ymm2[1],ymm0[2],ymm2[2],ymm0[3],ymm2[3],ymm0[4],ymm2[4],ymm0[5],ymm2[5],ymm0[6],ymm2[6],ymm0[7],ymm2[7],ymm0[16],ymm2[16],ymm0[17],ymm2[17],ymm0[18],ymm2[18],ymm0[19],ymm2[19],ymm0[20],ymm2[20],ymm0[21],ymm2[21],ymm0[22],ymm2[22],ymm0[23],ymm2[23]
|
||||
; AVX512F-NEXT: vpmullw %ymm6, %ymm2, %ymm2
|
||||
; AVX512F-NEXT: vpunpckhbw {{.*#+}} ymm2 = ymm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
|
||||
; AVX512F-NEXT: vbroadcasti128 {{.*#+}} ymm3 = [1,128,64,32,16,8,4,2,1,128,64,32,16,8,4,2]
|
||||
; AVX512F-NEXT: # ymm3 = mem[0,1,0,1]
|
||||
; AVX512F-NEXT: vpmullw %ymm3, %ymm2, %ymm2
|
||||
; AVX512F-NEXT: vpsrlw $8, %ymm2, %ymm2
|
||||
; AVX512F-NEXT: vpackuswb %ymm4, %ymm2, %ymm2
|
||||
; AVX512F-NEXT: vinserti64x4 $1, %ymm3, %zmm2, %zmm2
|
||||
; AVX512F-NEXT: vpunpckhbw {{.*#+}} ymm3 = ymm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
|
||||
; AVX512F-NEXT: vbroadcasti128 {{.*#+}} ymm4 = [1,128,64,32,16,8,4,2,1,128,64,32,16,8,4,2]
|
||||
; AVX512F-NEXT: # ymm4 = mem[0,1,0,1]
|
||||
; AVX512F-NEXT: vpmullw %ymm4, %ymm3, %ymm3
|
||||
; AVX512F-NEXT: vmovdqa {{.*#+}} ymm5 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
|
||||
; AVX512F-NEXT: vpand %ymm5, %ymm3, %ymm3
|
||||
; AVX512F-NEXT: vpunpcklbw {{.*#+}} ymm1 = ymm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23]
|
||||
; AVX512F-NEXT: vbroadcasti128 {{.*#+}} ymm6 = [1,2,4,8,16,32,64,128,1,2,4,8,16,32,64,128]
|
||||
; AVX512F-NEXT: # ymm6 = mem[0,1,0,1]
|
||||
; AVX512F-NEXT: vpmullw %ymm6, %ymm1, %ymm1
|
||||
; AVX512F-NEXT: vpand %ymm5, %ymm1, %ymm1
|
||||
; AVX512F-NEXT: vpackuswb %ymm3, %ymm1, %ymm1
|
||||
; AVX512F-NEXT: vpunpckhbw {{.*#+}} ymm3 = ymm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
|
||||
; AVX512F-NEXT: vpmullw %ymm4, %ymm3, %ymm3
|
||||
; AVX512F-NEXT: vpand %ymm5, %ymm3, %ymm3
|
||||
; AVX512F-NEXT: vbroadcasti128 {{.*#+}} ymm4 = [1,2,4,8,16,32,64,128,1,2,4,8,16,32,64,128]
|
||||
; AVX512F-NEXT: # ymm4 = mem[0,1,0,1]
|
||||
; AVX512F-NEXT: vpmullw %ymm4, %ymm1, %ymm1
|
||||
; AVX512F-NEXT: vpsrlw $8, %ymm1, %ymm1
|
||||
; AVX512F-NEXT: vpackuswb %ymm2, %ymm1, %ymm1
|
||||
; AVX512F-NEXT: vpunpckhbw {{.*#+}} ymm2 = ymm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
|
||||
; AVX512F-NEXT: vpmullw %ymm3, %ymm2, %ymm2
|
||||
; AVX512F-NEXT: vpsrlw $8, %ymm2, %ymm2
|
||||
; AVX512F-NEXT: vpunpcklbw {{.*#+}} ymm0 = ymm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23]
|
||||
; AVX512F-NEXT: vpmullw %ymm6, %ymm0, %ymm0
|
||||
; AVX512F-NEXT: vpand %ymm5, %ymm0, %ymm0
|
||||
; AVX512F-NEXT: vpackuswb %ymm3, %ymm0, %ymm0
|
||||
; AVX512F-NEXT: vpmullw %ymm4, %ymm0, %ymm0
|
||||
; AVX512F-NEXT: vpsrlw $8, %ymm0, %ymm0
|
||||
; AVX512F-NEXT: vpackuswb %ymm2, %ymm0, %ymm0
|
||||
; AVX512F-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0
|
||||
; AVX512F-NEXT: vporq %zmm2, %zmm0, %zmm0
|
||||
; AVX512F-NEXT: retq
|
||||
;
|
||||
; AVX512VL-LABEL: constant_funnnel_v64i8:
|
||||
; AVX512VL: # %bb.0:
|
||||
; AVX512VL-NEXT: vextracti64x4 $1, %zmm0, %ymm1
|
||||
; AVX512VL-NEXT: vpxor %xmm2, %xmm2, %xmm2
|
||||
; AVX512VL-NEXT: vpunpckhbw {{.*#+}} ymm3 = ymm1[8],ymm2[8],ymm1[9],ymm2[9],ymm1[10],ymm2[10],ymm1[11],ymm2[11],ymm1[12],ymm2[12],ymm1[13],ymm2[13],ymm1[14],ymm2[14],ymm1[15],ymm2[15],ymm1[24],ymm2[24],ymm1[25],ymm2[25],ymm1[26],ymm2[26],ymm1[27],ymm2[27],ymm1[28],ymm2[28],ymm1[29],ymm2[29],ymm1[30],ymm2[30],ymm1[31],ymm2[31]
|
||||
; AVX512VL-NEXT: vbroadcasti128 {{.*#+}} ymm4 = [256,128,64,32,16,8,4,2,256,128,64,32,16,8,4,2]
|
||||
; AVX512VL-NEXT: # ymm4 = mem[0,1,0,1]
|
||||
; AVX512VL-NEXT: vpmullw %ymm4, %ymm3, %ymm3
|
||||
; AVX512VL-NEXT: vpsrlw $8, %ymm3, %ymm3
|
||||
; AVX512VL-NEXT: vpunpcklbw {{.*#+}} ymm5 = ymm1[0],ymm2[0],ymm1[1],ymm2[1],ymm1[2],ymm2[2],ymm1[3],ymm2[3],ymm1[4],ymm2[4],ymm1[5],ymm2[5],ymm1[6],ymm2[6],ymm1[7],ymm2[7],ymm1[16],ymm2[16],ymm1[17],ymm2[17],ymm1[18],ymm2[18],ymm1[19],ymm2[19],ymm1[20],ymm2[20],ymm1[21],ymm2[21],ymm1[22],ymm2[22],ymm1[23],ymm2[23]
|
||||
; AVX512VL-NEXT: vbroadcasti128 {{.*#+}} ymm6 = [256,2,4,8,16,32,64,128,256,2,4,8,16,32,64,128]
|
||||
; AVX512VL-NEXT: # ymm6 = mem[0,1,0,1]
|
||||
; AVX512VL-NEXT: vpmullw %ymm6, %ymm5, %ymm5
|
||||
; AVX512VL-NEXT: vpsrlw $8, %ymm5, %ymm5
|
||||
; AVX512VL-NEXT: vpackuswb %ymm3, %ymm5, %ymm3
|
||||
; AVX512VL-NEXT: vpunpckhbw {{.*#+}} ymm5 = ymm0[8],ymm2[8],ymm0[9],ymm2[9],ymm0[10],ymm2[10],ymm0[11],ymm2[11],ymm0[12],ymm2[12],ymm0[13],ymm2[13],ymm0[14],ymm2[14],ymm0[15],ymm2[15],ymm0[24],ymm2[24],ymm0[25],ymm2[25],ymm0[26],ymm2[26],ymm0[27],ymm2[27],ymm0[28],ymm2[28],ymm0[29],ymm2[29],ymm0[30],ymm2[30],ymm0[31],ymm2[31]
|
||||
; AVX512VL-NEXT: vpmullw %ymm4, %ymm5, %ymm4
|
||||
; AVX512VL-NEXT: vpsrlw $8, %ymm4, %ymm4
|
||||
; AVX512VL-NEXT: vpunpcklbw {{.*#+}} ymm2 = ymm0[0],ymm2[0],ymm0[1],ymm2[1],ymm0[2],ymm2[2],ymm0[3],ymm2[3],ymm0[4],ymm2[4],ymm0[5],ymm2[5],ymm0[6],ymm2[6],ymm0[7],ymm2[7],ymm0[16],ymm2[16],ymm0[17],ymm2[17],ymm0[18],ymm2[18],ymm0[19],ymm2[19],ymm0[20],ymm2[20],ymm0[21],ymm2[21],ymm0[22],ymm2[22],ymm0[23],ymm2[23]
|
||||
; AVX512VL-NEXT: vpmullw %ymm6, %ymm2, %ymm2
|
||||
; AVX512VL-NEXT: vpunpckhbw {{.*#+}} ymm2 = ymm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
|
||||
; AVX512VL-NEXT: vbroadcasti128 {{.*#+}} ymm3 = [1,128,64,32,16,8,4,2,1,128,64,32,16,8,4,2]
|
||||
; AVX512VL-NEXT: # ymm3 = mem[0,1,0,1]
|
||||
; AVX512VL-NEXT: vpmullw %ymm3, %ymm2, %ymm2
|
||||
; AVX512VL-NEXT: vpsrlw $8, %ymm2, %ymm2
|
||||
; AVX512VL-NEXT: vpackuswb %ymm4, %ymm2, %ymm2
|
||||
; AVX512VL-NEXT: vinserti64x4 $1, %ymm3, %zmm2, %zmm2
|
||||
; AVX512VL-NEXT: vpunpckhbw {{.*#+}} ymm3 = ymm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
|
||||
; AVX512VL-NEXT: vbroadcasti128 {{.*#+}} ymm4 = [1,128,64,32,16,8,4,2,1,128,64,32,16,8,4,2]
|
||||
; AVX512VL-NEXT: # ymm4 = mem[0,1,0,1]
|
||||
; AVX512VL-NEXT: vpmullw %ymm4, %ymm3, %ymm3
|
||||
; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm5 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
|
||||
; AVX512VL-NEXT: vpand %ymm5, %ymm3, %ymm3
|
||||
; AVX512VL-NEXT: vpunpcklbw {{.*#+}} ymm1 = ymm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23]
|
||||
; AVX512VL-NEXT: vbroadcasti128 {{.*#+}} ymm6 = [1,2,4,8,16,32,64,128,1,2,4,8,16,32,64,128]
|
||||
; AVX512VL-NEXT: # ymm6 = mem[0,1,0,1]
|
||||
; AVX512VL-NEXT: vpmullw %ymm6, %ymm1, %ymm1
|
||||
; AVX512VL-NEXT: vpand %ymm5, %ymm1, %ymm1
|
||||
; AVX512VL-NEXT: vpackuswb %ymm3, %ymm1, %ymm1
|
||||
; AVX512VL-NEXT: vpunpckhbw {{.*#+}} ymm3 = ymm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
|
||||
; AVX512VL-NEXT: vpmullw %ymm4, %ymm3, %ymm3
|
||||
; AVX512VL-NEXT: vpand %ymm5, %ymm3, %ymm3
|
||||
; AVX512VL-NEXT: vbroadcasti128 {{.*#+}} ymm4 = [1,2,4,8,16,32,64,128,1,2,4,8,16,32,64,128]
|
||||
; AVX512VL-NEXT: # ymm4 = mem[0,1,0,1]
|
||||
; AVX512VL-NEXT: vpmullw %ymm4, %ymm1, %ymm1
|
||||
; AVX512VL-NEXT: vpsrlw $8, %ymm1, %ymm1
|
||||
; AVX512VL-NEXT: vpackuswb %ymm2, %ymm1, %ymm1
|
||||
; AVX512VL-NEXT: vpunpckhbw {{.*#+}} ymm2 = ymm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
|
||||
; AVX512VL-NEXT: vpmullw %ymm3, %ymm2, %ymm2
|
||||
; AVX512VL-NEXT: vpsrlw $8, %ymm2, %ymm2
|
||||
; AVX512VL-NEXT: vpunpcklbw {{.*#+}} ymm0 = ymm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23]
|
||||
; AVX512VL-NEXT: vpmullw %ymm6, %ymm0, %ymm0
|
||||
; AVX512VL-NEXT: vpand %ymm5, %ymm0, %ymm0
|
||||
; AVX512VL-NEXT: vpackuswb %ymm3, %ymm0, %ymm0
|
||||
; AVX512VL-NEXT: vpmullw %ymm4, %ymm0, %ymm0
|
||||
; AVX512VL-NEXT: vpsrlw $8, %ymm0, %ymm0
|
||||
; AVX512VL-NEXT: vpackuswb %ymm2, %ymm0, %ymm0
|
||||
; AVX512VL-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0
|
||||
; AVX512VL-NEXT: vporq %zmm2, %zmm0, %zmm0
|
||||
; AVX512VL-NEXT: retq
|
||||
;
|
||||
; AVX512BW-LABEL: constant_funnnel_v64i8:
|
||||
; AVX512BW: # %bb.0:
|
||||
; AVX512BW-NEXT: vpxor %xmm1, %xmm1, %xmm1
|
||||
; AVX512BW-NEXT: vpunpckhbw {{.*#+}} zmm2 = zmm0[8],zmm1[8],zmm0[9],zmm1[9],zmm0[10],zmm1[10],zmm0[11],zmm1[11],zmm0[12],zmm1[12],zmm0[13],zmm1[13],zmm0[14],zmm1[14],zmm0[15],zmm1[15],zmm0[24],zmm1[24],zmm0[25],zmm1[25],zmm0[26],zmm1[26],zmm0[27],zmm1[27],zmm0[28],zmm1[28],zmm0[29],zmm1[29],zmm0[30],zmm1[30],zmm0[31],zmm1[31],zmm0[40],zmm1[40],zmm0[41],zmm1[41],zmm0[42],zmm1[42],zmm0[43],zmm1[43],zmm0[44],zmm1[44],zmm0[45],zmm1[45],zmm0[46],zmm1[46],zmm0[47],zmm1[47],zmm0[56],zmm1[56],zmm0[57],zmm1[57],zmm0[58],zmm1[58],zmm0[59],zmm1[59],zmm0[60],zmm1[60],zmm0[61],zmm1[61],zmm0[62],zmm1[62],zmm0[63],zmm1[63]
|
||||
; AVX512BW-NEXT: vpsllvw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm2, %zmm2
|
||||
; AVX512BW-NEXT: vpsrlw $8, %zmm2, %zmm2
|
||||
; AVX512BW-NEXT: vpunpcklbw {{.*#+}} zmm1 = zmm0[0],zmm1[0],zmm0[1],zmm1[1],zmm0[2],zmm1[2],zmm0[3],zmm1[3],zmm0[4],zmm1[4],zmm0[5],zmm1[5],zmm0[6],zmm1[6],zmm0[7],zmm1[7],zmm0[16],zmm1[16],zmm0[17],zmm1[17],zmm0[18],zmm1[18],zmm0[19],zmm1[19],zmm0[20],zmm1[20],zmm0[21],zmm1[21],zmm0[22],zmm1[22],zmm0[23],zmm1[23],zmm0[32],zmm1[32],zmm0[33],zmm1[33],zmm0[34],zmm1[34],zmm0[35],zmm1[35],zmm0[36],zmm1[36],zmm0[37],zmm1[37],zmm0[38],zmm1[38],zmm0[39],zmm1[39],zmm0[48],zmm1[48],zmm0[49],zmm1[49],zmm0[50],zmm1[50],zmm0[51],zmm1[51],zmm0[52],zmm1[52],zmm0[53],zmm1[53],zmm0[54],zmm1[54],zmm0[55],zmm1[55]
|
||||
; AVX512BW-NEXT: vpunpckhbw {{.*#+}} zmm1 = zmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31,40,40,41,41,42,42,43,43,44,44,45,45,46,46,47,47,56,56,57,57,58,58,59,59,60,60,61,61,62,62,63,63]
|
||||
; AVX512BW-NEXT: vpsllvw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm1, %zmm1
|
||||
; AVX512BW-NEXT: vpsrlw $8, %zmm1, %zmm1
|
||||
; AVX512BW-NEXT: vpackuswb %zmm2, %zmm1, %zmm1
|
||||
; AVX512BW-NEXT: vpunpckhbw {{.*#+}} zmm2 = zmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31,40,40,41,41,42,42,43,43,44,44,45,45,46,46,47,47,56,56,57,57,58,58,59,59,60,60,61,61,62,62,63,63]
|
||||
; AVX512BW-NEXT: vpsllvw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm2, %zmm2
|
||||
; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm3 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
|
||||
; AVX512BW-NEXT: vpandq %zmm3, %zmm2, %zmm2
|
||||
; AVX512BW-NEXT: vpunpcklbw {{.*#+}} zmm0 = zmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23,32,32,33,33,34,34,35,35,36,36,37,37,38,38,39,39,48,48,49,49,50,50,51,51,52,52,53,53,54,54,55,55]
|
||||
; AVX512BW-NEXT: vpsllvw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm0
|
||||
; AVX512BW-NEXT: vpandq %zmm3, %zmm0, %zmm0
|
||||
; AVX512BW-NEXT: vpackuswb %zmm2, %zmm0, %zmm0
|
||||
; AVX512BW-NEXT: vporq %zmm1, %zmm0, %zmm0
|
||||
; AVX512BW-NEXT: vpsrlw $8, %zmm0, %zmm0
|
||||
; AVX512BW-NEXT: vpackuswb %zmm1, %zmm0, %zmm0
|
||||
; AVX512BW-NEXT: retq
|
||||
;
|
||||
; AVX512VLBW-LABEL: constant_funnnel_v64i8:
|
||||
; AVX512VLBW: # %bb.0:
|
||||
; AVX512VLBW-NEXT: vpxor %xmm1, %xmm1, %xmm1
|
||||
; AVX512VLBW-NEXT: vpunpckhbw {{.*#+}} zmm2 = zmm0[8],zmm1[8],zmm0[9],zmm1[9],zmm0[10],zmm1[10],zmm0[11],zmm1[11],zmm0[12],zmm1[12],zmm0[13],zmm1[13],zmm0[14],zmm1[14],zmm0[15],zmm1[15],zmm0[24],zmm1[24],zmm0[25],zmm1[25],zmm0[26],zmm1[26],zmm0[27],zmm1[27],zmm0[28],zmm1[28],zmm0[29],zmm1[29],zmm0[30],zmm1[30],zmm0[31],zmm1[31],zmm0[40],zmm1[40],zmm0[41],zmm1[41],zmm0[42],zmm1[42],zmm0[43],zmm1[43],zmm0[44],zmm1[44],zmm0[45],zmm1[45],zmm0[46],zmm1[46],zmm0[47],zmm1[47],zmm0[56],zmm1[56],zmm0[57],zmm1[57],zmm0[58],zmm1[58],zmm0[59],zmm1[59],zmm0[60],zmm1[60],zmm0[61],zmm1[61],zmm0[62],zmm1[62],zmm0[63],zmm1[63]
|
||||
; AVX512VLBW-NEXT: vpsllvw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm2, %zmm2
|
||||
; AVX512VLBW-NEXT: vpsrlw $8, %zmm2, %zmm2
|
||||
; AVX512VLBW-NEXT: vpunpcklbw {{.*#+}} zmm1 = zmm0[0],zmm1[0],zmm0[1],zmm1[1],zmm0[2],zmm1[2],zmm0[3],zmm1[3],zmm0[4],zmm1[4],zmm0[5],zmm1[5],zmm0[6],zmm1[6],zmm0[7],zmm1[7],zmm0[16],zmm1[16],zmm0[17],zmm1[17],zmm0[18],zmm1[18],zmm0[19],zmm1[19],zmm0[20],zmm1[20],zmm0[21],zmm1[21],zmm0[22],zmm1[22],zmm0[23],zmm1[23],zmm0[32],zmm1[32],zmm0[33],zmm1[33],zmm0[34],zmm1[34],zmm0[35],zmm1[35],zmm0[36],zmm1[36],zmm0[37],zmm1[37],zmm0[38],zmm1[38],zmm0[39],zmm1[39],zmm0[48],zmm1[48],zmm0[49],zmm1[49],zmm0[50],zmm1[50],zmm0[51],zmm1[51],zmm0[52],zmm1[52],zmm0[53],zmm1[53],zmm0[54],zmm1[54],zmm0[55],zmm1[55]
|
||||
; AVX512VLBW-NEXT: vpunpckhbw {{.*#+}} zmm1 = zmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31,40,40,41,41,42,42,43,43,44,44,45,45,46,46,47,47,56,56,57,57,58,58,59,59,60,60,61,61,62,62,63,63]
|
||||
; AVX512VLBW-NEXT: vpsllvw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm1, %zmm1
|
||||
; AVX512VLBW-NEXT: vpsrlw $8, %zmm1, %zmm1
|
||||
; AVX512VLBW-NEXT: vpackuswb %zmm2, %zmm1, %zmm1
|
||||
; AVX512VLBW-NEXT: vpunpckhbw {{.*#+}} zmm2 = zmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31,40,40,41,41,42,42,43,43,44,44,45,45,46,46,47,47,56,56,57,57,58,58,59,59,60,60,61,61,62,62,63,63]
|
||||
; AVX512VLBW-NEXT: vpsllvw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm2, %zmm2
|
||||
; AVX512VLBW-NEXT: vmovdqa64 {{.*#+}} zmm3 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
|
||||
; AVX512VLBW-NEXT: vpandq %zmm3, %zmm2, %zmm2
|
||||
; AVX512VLBW-NEXT: vpunpcklbw {{.*#+}} zmm0 = zmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23,32,32,33,33,34,34,35,35,36,36,37,37,38,38,39,39,48,48,49,49,50,50,51,51,52,52,53,53,54,54,55,55]
|
||||
; AVX512VLBW-NEXT: vpsllvw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm0
|
||||
; AVX512VLBW-NEXT: vpandq %zmm3, %zmm0, %zmm0
|
||||
; AVX512VLBW-NEXT: vpackuswb %zmm2, %zmm0, %zmm0
|
||||
; AVX512VLBW-NEXT: vporq %zmm1, %zmm0, %zmm0
|
||||
; AVX512VLBW-NEXT: vpsrlw $8, %zmm0, %zmm0
|
||||
; AVX512VLBW-NEXT: vpackuswb %zmm1, %zmm0, %zmm0
|
||||
; AVX512VLBW-NEXT: retq
|
||||
;
|
||||
; AVX512VBMI2-LABEL: constant_funnnel_v64i8:
|
||||
; AVX512VBMI2: # %bb.0:
|
||||
; AVX512VBMI2-NEXT: vpxor %xmm1, %xmm1, %xmm1
|
||||
; AVX512VBMI2-NEXT: vpunpckhbw {{.*#+}} zmm2 = zmm0[8],zmm1[8],zmm0[9],zmm1[9],zmm0[10],zmm1[10],zmm0[11],zmm1[11],zmm0[12],zmm1[12],zmm0[13],zmm1[13],zmm0[14],zmm1[14],zmm0[15],zmm1[15],zmm0[24],zmm1[24],zmm0[25],zmm1[25],zmm0[26],zmm1[26],zmm0[27],zmm1[27],zmm0[28],zmm1[28],zmm0[29],zmm1[29],zmm0[30],zmm1[30],zmm0[31],zmm1[31],zmm0[40],zmm1[40],zmm0[41],zmm1[41],zmm0[42],zmm1[42],zmm0[43],zmm1[43],zmm0[44],zmm1[44],zmm0[45],zmm1[45],zmm0[46],zmm1[46],zmm0[47],zmm1[47],zmm0[56],zmm1[56],zmm0[57],zmm1[57],zmm0[58],zmm1[58],zmm0[59],zmm1[59],zmm0[60],zmm1[60],zmm0[61],zmm1[61],zmm0[62],zmm1[62],zmm0[63],zmm1[63]
|
||||
; AVX512VBMI2-NEXT: vpsllvw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm2, %zmm2
|
||||
; AVX512VBMI2-NEXT: vpsrlw $8, %zmm2, %zmm2
|
||||
; AVX512VBMI2-NEXT: vpunpcklbw {{.*#+}} zmm1 = zmm0[0],zmm1[0],zmm0[1],zmm1[1],zmm0[2],zmm1[2],zmm0[3],zmm1[3],zmm0[4],zmm1[4],zmm0[5],zmm1[5],zmm0[6],zmm1[6],zmm0[7],zmm1[7],zmm0[16],zmm1[16],zmm0[17],zmm1[17],zmm0[18],zmm1[18],zmm0[19],zmm1[19],zmm0[20],zmm1[20],zmm0[21],zmm1[21],zmm0[22],zmm1[22],zmm0[23],zmm1[23],zmm0[32],zmm1[32],zmm0[33],zmm1[33],zmm0[34],zmm1[34],zmm0[35],zmm1[35],zmm0[36],zmm1[36],zmm0[37],zmm1[37],zmm0[38],zmm1[38],zmm0[39],zmm1[39],zmm0[48],zmm1[48],zmm0[49],zmm1[49],zmm0[50],zmm1[50],zmm0[51],zmm1[51],zmm0[52],zmm1[52],zmm0[53],zmm1[53],zmm0[54],zmm1[54],zmm0[55],zmm1[55]
|
||||
; AVX512VBMI2-NEXT: vpunpckhbw {{.*#+}} zmm1 = zmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31,40,40,41,41,42,42,43,43,44,44,45,45,46,46,47,47,56,56,57,57,58,58,59,59,60,60,61,61,62,62,63,63]
|
||||
; AVX512VBMI2-NEXT: vpsllvw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm1, %zmm1
|
||||
; AVX512VBMI2-NEXT: vpsrlw $8, %zmm1, %zmm1
|
||||
; AVX512VBMI2-NEXT: vpackuswb %zmm2, %zmm1, %zmm1
|
||||
; AVX512VBMI2-NEXT: vpunpckhbw {{.*#+}} zmm2 = zmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31,40,40,41,41,42,42,43,43,44,44,45,45,46,46,47,47,56,56,57,57,58,58,59,59,60,60,61,61,62,62,63,63]
|
||||
; AVX512VBMI2-NEXT: vpsllvw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm2, %zmm2
|
||||
; AVX512VBMI2-NEXT: vmovdqa64 {{.*#+}} zmm3 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
|
||||
; AVX512VBMI2-NEXT: vpandq %zmm3, %zmm2, %zmm2
|
||||
; AVX512VBMI2-NEXT: vpunpcklbw {{.*#+}} zmm0 = zmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23,32,32,33,33,34,34,35,35,36,36,37,37,38,38,39,39,48,48,49,49,50,50,51,51,52,52,53,53,54,54,55,55]
|
||||
; AVX512VBMI2-NEXT: vpsllvw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm0
|
||||
; AVX512VBMI2-NEXT: vpandq %zmm3, %zmm0, %zmm0
|
||||
; AVX512VBMI2-NEXT: vpackuswb %zmm2, %zmm0, %zmm0
|
||||
; AVX512VBMI2-NEXT: vporq %zmm1, %zmm0, %zmm0
|
||||
; AVX512VBMI2-NEXT: vpsrlw $8, %zmm0, %zmm0
|
||||
; AVX512VBMI2-NEXT: vpackuswb %zmm1, %zmm0, %zmm0
|
||||
; AVX512VBMI2-NEXT: retq
|
||||
;
|
||||
; AVX512VLVBMI2-LABEL: constant_funnnel_v64i8:
|
||||
; AVX512VLVBMI2: # %bb.0:
|
||||
; AVX512VLVBMI2-NEXT: vpxor %xmm1, %xmm1, %xmm1
|
||||
; AVX512VLVBMI2-NEXT: vpunpckhbw {{.*#+}} zmm2 = zmm0[8],zmm1[8],zmm0[9],zmm1[9],zmm0[10],zmm1[10],zmm0[11],zmm1[11],zmm0[12],zmm1[12],zmm0[13],zmm1[13],zmm0[14],zmm1[14],zmm0[15],zmm1[15],zmm0[24],zmm1[24],zmm0[25],zmm1[25],zmm0[26],zmm1[26],zmm0[27],zmm1[27],zmm0[28],zmm1[28],zmm0[29],zmm1[29],zmm0[30],zmm1[30],zmm0[31],zmm1[31],zmm0[40],zmm1[40],zmm0[41],zmm1[41],zmm0[42],zmm1[42],zmm0[43],zmm1[43],zmm0[44],zmm1[44],zmm0[45],zmm1[45],zmm0[46],zmm1[46],zmm0[47],zmm1[47],zmm0[56],zmm1[56],zmm0[57],zmm1[57],zmm0[58],zmm1[58],zmm0[59],zmm1[59],zmm0[60],zmm1[60],zmm0[61],zmm1[61],zmm0[62],zmm1[62],zmm0[63],zmm1[63]
|
||||
; AVX512VLVBMI2-NEXT: vpsllvw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm2, %zmm2
|
||||
; AVX512VLVBMI2-NEXT: vpsrlw $8, %zmm2, %zmm2
|
||||
; AVX512VLVBMI2-NEXT: vpunpcklbw {{.*#+}} zmm1 = zmm0[0],zmm1[0],zmm0[1],zmm1[1],zmm0[2],zmm1[2],zmm0[3],zmm1[3],zmm0[4],zmm1[4],zmm0[5],zmm1[5],zmm0[6],zmm1[6],zmm0[7],zmm1[7],zmm0[16],zmm1[16],zmm0[17],zmm1[17],zmm0[18],zmm1[18],zmm0[19],zmm1[19],zmm0[20],zmm1[20],zmm0[21],zmm1[21],zmm0[22],zmm1[22],zmm0[23],zmm1[23],zmm0[32],zmm1[32],zmm0[33],zmm1[33],zmm0[34],zmm1[34],zmm0[35],zmm1[35],zmm0[36],zmm1[36],zmm0[37],zmm1[37],zmm0[38],zmm1[38],zmm0[39],zmm1[39],zmm0[48],zmm1[48],zmm0[49],zmm1[49],zmm0[50],zmm1[50],zmm0[51],zmm1[51],zmm0[52],zmm1[52],zmm0[53],zmm1[53],zmm0[54],zmm1[54],zmm0[55],zmm1[55]
|
||||
; AVX512VLVBMI2-NEXT: vpunpckhbw {{.*#+}} zmm1 = zmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31,40,40,41,41,42,42,43,43,44,44,45,45,46,46,47,47,56,56,57,57,58,58,59,59,60,60,61,61,62,62,63,63]
|
||||
; AVX512VLVBMI2-NEXT: vpsllvw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm1, %zmm1
|
||||
; AVX512VLVBMI2-NEXT: vpsrlw $8, %zmm1, %zmm1
|
||||
; AVX512VLVBMI2-NEXT: vpackuswb %zmm2, %zmm1, %zmm1
|
||||
; AVX512VLVBMI2-NEXT: vpunpckhbw {{.*#+}} zmm2 = zmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31,40,40,41,41,42,42,43,43,44,44,45,45,46,46,47,47,56,56,57,57,58,58,59,59,60,60,61,61,62,62,63,63]
|
||||
; AVX512VLVBMI2-NEXT: vpsllvw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm2, %zmm2
|
||||
; AVX512VLVBMI2-NEXT: vmovdqa64 {{.*#+}} zmm3 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
|
||||
; AVX512VLVBMI2-NEXT: vpandq %zmm3, %zmm2, %zmm2
|
||||
; AVX512VLVBMI2-NEXT: vpunpcklbw {{.*#+}} zmm0 = zmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23,32,32,33,33,34,34,35,35,36,36,37,37,38,38,39,39,48,48,49,49,50,50,51,51,52,52,53,53,54,54,55,55]
|
||||
; AVX512VLVBMI2-NEXT: vpsllvw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm0
|
||||
; AVX512VLVBMI2-NEXT: vpandq %zmm3, %zmm0, %zmm0
|
||||
; AVX512VLVBMI2-NEXT: vpackuswb %zmm2, %zmm0, %zmm0
|
||||
; AVX512VLVBMI2-NEXT: vporq %zmm1, %zmm0, %zmm0
|
||||
; AVX512VLVBMI2-NEXT: vpsrlw $8, %zmm0, %zmm0
|
||||
; AVX512VLVBMI2-NEXT: vpackuswb %zmm1, %zmm0, %zmm0
|
||||
; AVX512VLVBMI2-NEXT: retq
|
||||
%res = call <64 x i8> @llvm.fshl.v64i8(<64 x i8> %x, <64 x i8> %x, <64 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 7, i8 6, i8 5, i8 4, i8 3, i8 2, i8 1, i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 7, i8 6, i8 5, i8 4, i8 3, i8 2, i8 1, i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 7, i8 6, i8 5, i8 4, i8 3, i8 2, i8 1, i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 7, i8 6, i8 5, i8 4, i8 3, i8 2, i8 1>)
|
||||
ret <64 x i8> %res
|
||||
|
|
|
@ -1623,87 +1623,28 @@ define <8 x i16> @constant_funnnel_v8i16(<8 x i16> %x) nounwind {
|
|||
}
|
||||
|
||||
define <16 x i8> @constant_funnnel_v16i8(<16 x i8> %x) nounwind {
|
||||
; SSE2-LABEL: constant_funnnel_v16i8:
|
||||
; SSE2: # %bb.0:
|
||||
; SSE2-NEXT: pxor %xmm1, %xmm1
|
||||
; SSE2-NEXT: movdqa %xmm0, %xmm2
|
||||
; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm1[8],xmm2[9],xmm1[9],xmm2[10],xmm1[10],xmm2[11],xmm1[11],xmm2[12],xmm1[12],xmm2[13],xmm1[13],xmm2[14],xmm1[14],xmm2[15],xmm1[15]
|
||||
; SSE2-NEXT: pmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2
|
||||
; SSE2-NEXT: psrlw $8, %xmm2
|
||||
; SSE2-NEXT: movdqa %xmm0, %xmm3
|
||||
; SSE2-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1],xmm3[2],xmm1[2],xmm3[3],xmm1[3],xmm3[4],xmm1[4],xmm3[5],xmm1[5],xmm3[6],xmm1[6],xmm3[7],xmm1[7]
|
||||
; SSE2-NEXT: pmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3
|
||||
; SSE2-NEXT: psrlw $8, %xmm3
|
||||
; SSE2-NEXT: packuswb %xmm2, %xmm3
|
||||
; SSE2-NEXT: movdqa %xmm0, %xmm1
|
||||
; SSE2-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
||||
; SSE2-NEXT: pmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
|
||||
; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [255,255,255,255,255,255,255,255]
|
||||
; SSE2-NEXT: pand %xmm2, %xmm1
|
||||
; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
|
||||
; SSE2-NEXT: pmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
|
||||
; SSE2-NEXT: pand %xmm2, %xmm0
|
||||
; SSE2-NEXT: packuswb %xmm1, %xmm0
|
||||
; SSE2-NEXT: por %xmm3, %xmm0
|
||||
; SSE2-NEXT: retq
|
||||
; SSE-LABEL: constant_funnnel_v16i8:
|
||||
; SSE: # %bb.0:
|
||||
; SSE-NEXT: movdqa %xmm0, %xmm1
|
||||
; SSE-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm0[8],xmm1[9],xmm0[9],xmm1[10],xmm0[10],xmm1[11],xmm0[11],xmm1[12],xmm0[12],xmm1[13],xmm0[13],xmm1[14],xmm0[14],xmm1[15],xmm0[15]
|
||||
; SSE-NEXT: pmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
|
||||
; SSE-NEXT: psrlw $8, %xmm1
|
||||
; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
|
||||
; SSE-NEXT: pmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
|
||||
; SSE-NEXT: psrlw $8, %xmm0
|
||||
; SSE-NEXT: packuswb %xmm1, %xmm0
|
||||
; SSE-NEXT: retq
|
||||
;
|
||||
; SSE41-LABEL: constant_funnnel_v16i8:
|
||||
; SSE41: # %bb.0:
|
||||
; SSE41-NEXT: movdqa %xmm0, %xmm2
|
||||
; SSE41-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
||||
; SSE41-NEXT: pmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2
|
||||
; SSE41-NEXT: movdqa {{.*#+}} xmm3 = [255,255,255,255,255,255,255,255]
|
||||
; SSE41-NEXT: pand %xmm3, %xmm2
|
||||
; SSE41-NEXT: pmovzxbw {{.*#+}} xmm1 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
|
||||
; SSE41-NEXT: movdqa {{.*#+}} xmm4 = [1,128,64,32,16,8,4,2]
|
||||
; SSE41-NEXT: pmullw %xmm1, %xmm4
|
||||
; SSE41-NEXT: pand %xmm3, %xmm4
|
||||
; SSE41-NEXT: packuswb %xmm2, %xmm4
|
||||
; SSE41-NEXT: pxor %xmm2, %xmm2
|
||||
; SSE41-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm2[8],xmm0[9],xmm2[9],xmm0[10],xmm2[10],xmm0[11],xmm2[11],xmm0[12],xmm2[12],xmm0[13],xmm2[13],xmm0[14],xmm2[14],xmm0[15],xmm2[15]
|
||||
; SSE41-NEXT: pmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
|
||||
; SSE41-NEXT: psrlw $8, %xmm0
|
||||
; SSE41-NEXT: pmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
|
||||
; SSE41-NEXT: psrlw $8, %xmm1
|
||||
; SSE41-NEXT: packuswb %xmm0, %xmm1
|
||||
; SSE41-NEXT: por %xmm4, %xmm1
|
||||
; SSE41-NEXT: movdqa %xmm1, %xmm0
|
||||
; SSE41-NEXT: retq
|
||||
;
|
||||
; AVX1-LABEL: constant_funnnel_v16i8:
|
||||
; AVX1: # %bb.0:
|
||||
; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm1 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
||||
; AVX1-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
|
||||
; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [255,255,255,255,255,255,255,255]
|
||||
; AVX1-NEXT: vpand %xmm2, %xmm1, %xmm1
|
||||
; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm3 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
|
||||
; AVX1-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3, %xmm4
|
||||
; AVX1-NEXT: vpand %xmm2, %xmm4, %xmm2
|
||||
; AVX1-NEXT: vpackuswb %xmm1, %xmm2, %xmm1
|
||||
; AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
|
||||
; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm0 = xmm0[8],xmm2[8],xmm0[9],xmm2[9],xmm0[10],xmm2[10],xmm0[11],xmm2[11],xmm0[12],xmm2[12],xmm0[13],xmm2[13],xmm0[14],xmm2[14],xmm0[15],xmm2[15]
|
||||
; AVX1-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
|
||||
; AVX1-NEXT: vpsrlw $8, %xmm0, %xmm0
|
||||
; AVX1-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3, %xmm2
|
||||
; AVX1-NEXT: vpsrlw $8, %xmm2, %xmm2
|
||||
; AVX1-NEXT: vpackuswb %xmm0, %xmm2, %xmm0
|
||||
; AVX1-NEXT: vpor %xmm0, %xmm1, %xmm0
|
||||
; AVX1-NEXT: retq
|
||||
;
|
||||
; AVX2-LABEL: constant_funnnel_v16i8:
|
||||
; AVX2: # %bb.0:
|
||||
; AVX2-NEXT: vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
|
||||
; AVX2-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm1
|
||||
; AVX2-NEXT: vpsrlw $8, %ymm1, %ymm1
|
||||
; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm2
|
||||
; AVX2-NEXT: vpackuswb %xmm2, %xmm1, %xmm1
|
||||
; AVX2-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
|
||||
; AVX2-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
|
||||
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm2
|
||||
; AVX2-NEXT: vpackuswb %xmm2, %xmm0, %xmm0
|
||||
; AVX2-NEXT: vpor %xmm1, %xmm0, %xmm0
|
||||
; AVX2-NEXT: vzeroupper
|
||||
; AVX2-NEXT: retq
|
||||
; AVX-LABEL: constant_funnnel_v16i8:
|
||||
; AVX: # %bb.0:
|
||||
; AVX-NEXT: vpunpckhbw {{.*#+}} xmm1 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
||||
; AVX-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
|
||||
; AVX-NEXT: vpsrlw $8, %xmm1, %xmm1
|
||||
; AVX-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
|
||||
; AVX-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
|
||||
; AVX-NEXT: vpsrlw $8, %xmm0, %xmm0
|
||||
; AVX-NEXT: vpackuswb %xmm1, %xmm0, %xmm0
|
||||
; AVX-NEXT: retq
|
||||
;
|
||||
; AVX512F-LABEL: constant_funnnel_v16i8:
|
||||
; AVX512F: # %bb.0:
|
||||
|
@ -1778,26 +1719,14 @@ define <16 x i8> @constant_funnnel_v16i8(<16 x i8> %x) nounwind {
|
|||
;
|
||||
; X86-SSE2-LABEL: constant_funnnel_v16i8:
|
||||
; X86-SSE2: # %bb.0:
|
||||
; X86-SSE2-NEXT: pxor %xmm2, %xmm2
|
||||
; X86-SSE2-NEXT: movdqa %xmm0, %xmm3
|
||||
; X86-SSE2-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm2[8],xmm3[9],xmm2[9],xmm3[10],xmm2[10],xmm3[11],xmm2[11],xmm3[12],xmm2[12],xmm3[13],xmm2[13],xmm3[14],xmm2[14],xmm3[15],xmm2[15]
|
||||
; X86-SSE2-NEXT: pmullw {{\.?LCPI[0-9]+_[0-9]+}}, %xmm3
|
||||
; X86-SSE2-NEXT: psrlw $8, %xmm3
|
||||
; X86-SSE2-NEXT: movdqa %xmm0, %xmm1
|
||||
; X86-SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3],xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7]
|
||||
; X86-SSE2-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm0[8],xmm1[9],xmm0[9],xmm1[10],xmm0[10],xmm1[11],xmm0[11],xmm1[12],xmm0[12],xmm1[13],xmm0[13],xmm1[14],xmm0[14],xmm1[15],xmm0[15]
|
||||
; X86-SSE2-NEXT: pmullw {{\.?LCPI[0-9]+_[0-9]+}}, %xmm1
|
||||
; X86-SSE2-NEXT: psrlw $8, %xmm1
|
||||
; X86-SSE2-NEXT: packuswb %xmm3, %xmm1
|
||||
; X86-SSE2-NEXT: movdqa %xmm0, %xmm2
|
||||
; X86-SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
||||
; X86-SSE2-NEXT: pmullw {{\.?LCPI[0-9]+_[0-9]+}}, %xmm2
|
||||
; X86-SSE2-NEXT: movdqa {{.*#+}} xmm3 = [255,255,255,255,255,255,255,255]
|
||||
; X86-SSE2-NEXT: pand %xmm3, %xmm2
|
||||
; X86-SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
|
||||
; X86-SSE2-NEXT: pmullw {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
|
||||
; X86-SSE2-NEXT: pand %xmm3, %xmm0
|
||||
; X86-SSE2-NEXT: packuswb %xmm2, %xmm0
|
||||
; X86-SSE2-NEXT: por %xmm1, %xmm0
|
||||
; X86-SSE2-NEXT: psrlw $8, %xmm0
|
||||
; X86-SSE2-NEXT: packuswb %xmm1, %xmm0
|
||||
; X86-SSE2-NEXT: retl
|
||||
%res = call <16 x i8> @llvm.fshr.v16i8(<16 x i8> %x, <16 x i8> %x, <16 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 7, i8 6, i8 5, i8 4, i8 3, i8 2, i8 1>)
|
||||
ret <16 x i8> %res
|
||||
|
|
|
@ -1351,104 +1351,56 @@ define <32 x i8> @constant_funnnel_v32i8(<32 x i8> %x) nounwind {
|
|||
; AVX1-LABEL: constant_funnnel_v32i8:
|
||||
; AVX1: # %bb.0:
|
||||
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
|
||||
; AVX1-NEXT: vpxor %xmm8, %xmm8, %xmm8
|
||||
; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm3 = xmm1[8],xmm8[8],xmm1[9],xmm8[9],xmm1[10],xmm8[10],xmm1[11],xmm8[11],xmm1[12],xmm8[12],xmm1[13],xmm8[13],xmm1[14],xmm8[14],xmm1[15],xmm8[15]
|
||||
; AVX1-NEXT: vmovdqa {{.*#+}} xmm9 = [256,2,4,8,16,32,64,128]
|
||||
; AVX1-NEXT: vpmullw %xmm3, %xmm9, %xmm3
|
||||
; AVX1-NEXT: vpsrlw $8, %xmm3, %xmm3
|
||||
; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm5 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
|
||||
; AVX1-NEXT: vmovdqa {{.*#+}} xmm6 = [256,128,64,32,16,8,4,2]
|
||||
; AVX1-NEXT: vpmullw %xmm6, %xmm5, %xmm7
|
||||
; AVX1-NEXT: vpsrlw $8, %xmm7, %xmm7
|
||||
; AVX1-NEXT: vpackuswb %xmm3, %xmm7, %xmm3
|
||||
; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm1 = xmm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
||||
; AVX1-NEXT: vmovdqa {{.*#+}} xmm7 = [1,2,4,8,16,32,64,128]
|
||||
; AVX1-NEXT: vpmullw %xmm7, %xmm1, %xmm1
|
||||
; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [255,255,255,255,255,255,255,255]
|
||||
; AVX1-NEXT: vpand %xmm2, %xmm1, %xmm1
|
||||
; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm2 = xmm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
||||
; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [1,2,4,8,16,32,64,128]
|
||||
; AVX1-NEXT: vpmullw %xmm3, %xmm2, %xmm2
|
||||
; AVX1-NEXT: vpsrlw $8, %xmm2, %xmm2
|
||||
; AVX1-NEXT: vpunpcklbw {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
|
||||
; AVX1-NEXT: vmovdqa {{.*#+}} xmm4 = [1,128,64,32,16,8,4,2]
|
||||
; AVX1-NEXT: vpmullw %xmm4, %xmm5, %xmm5
|
||||
; AVX1-NEXT: vpand %xmm2, %xmm5, %xmm5
|
||||
; AVX1-NEXT: vpackuswb %xmm1, %xmm5, %xmm1
|
||||
; AVX1-NEXT: vpor %xmm3, %xmm1, %xmm1
|
||||
; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm3 = xmm0[8],xmm8[8],xmm0[9],xmm8[9],xmm0[10],xmm8[10],xmm0[11],xmm8[11],xmm0[12],xmm8[12],xmm0[13],xmm8[13],xmm0[14],xmm8[14],xmm0[15],xmm8[15]
|
||||
; AVX1-NEXT: vpmullw %xmm3, %xmm9, %xmm3
|
||||
; AVX1-NEXT: vpsrlw $8, %xmm3, %xmm3
|
||||
; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm5 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
|
||||
; AVX1-NEXT: vpmullw %xmm6, %xmm5, %xmm6
|
||||
; AVX1-NEXT: vpsrlw $8, %xmm6, %xmm6
|
||||
; AVX1-NEXT: vpackuswb %xmm3, %xmm6, %xmm3
|
||||
; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm0 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
||||
; AVX1-NEXT: vpmullw %xmm7, %xmm0, %xmm0
|
||||
; AVX1-NEXT: vpand %xmm2, %xmm0, %xmm0
|
||||
; AVX1-NEXT: vpmullw %xmm4, %xmm5, %xmm4
|
||||
; AVX1-NEXT: vpand %xmm2, %xmm4, %xmm2
|
||||
; AVX1-NEXT: vpackuswb %xmm0, %xmm2, %xmm0
|
||||
; AVX1-NEXT: vpor %xmm3, %xmm0, %xmm0
|
||||
; AVX1-NEXT: vpmullw %xmm4, %xmm1, %xmm1
|
||||
; AVX1-NEXT: vpsrlw $8, %xmm1, %xmm1
|
||||
; AVX1-NEXT: vpackuswb %xmm2, %xmm1, %xmm1
|
||||
; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm2 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
||||
; AVX1-NEXT: vpmullw %xmm3, %xmm2, %xmm2
|
||||
; AVX1-NEXT: vpsrlw $8, %xmm2, %xmm2
|
||||
; AVX1-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
|
||||
; AVX1-NEXT: vpmullw %xmm4, %xmm0, %xmm0
|
||||
; AVX1-NEXT: vpsrlw $8, %xmm0, %xmm0
|
||||
; AVX1-NEXT: vpackuswb %xmm2, %xmm0, %xmm0
|
||||
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
|
||||
; AVX1-NEXT: retq
|
||||
;
|
||||
; AVX2-LABEL: constant_funnnel_v32i8:
|
||||
; AVX2: # %bb.0:
|
||||
; AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1
|
||||
; AVX2-NEXT: vpunpckhbw {{.*#+}} ymm2 = ymm0[8],ymm1[8],ymm0[9],ymm1[9],ymm0[10],ymm1[10],ymm0[11],ymm1[11],ymm0[12],ymm1[12],ymm0[13],ymm1[13],ymm0[14],ymm1[14],ymm0[15],ymm1[15],ymm0[24],ymm1[24],ymm0[25],ymm1[25],ymm0[26],ymm1[26],ymm0[27],ymm1[27],ymm0[28],ymm1[28],ymm0[29],ymm1[29],ymm0[30],ymm1[30],ymm0[31],ymm1[31]
|
||||
; AVX2-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm2, %ymm2
|
||||
; AVX2-NEXT: vpsrlw $8, %ymm2, %ymm2
|
||||
; AVX2-NEXT: vpunpcklbw {{.*#+}} ymm1 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[4],ymm1[4],ymm0[5],ymm1[5],ymm0[6],ymm1[6],ymm0[7],ymm1[7],ymm0[16],ymm1[16],ymm0[17],ymm1[17],ymm0[18],ymm1[18],ymm0[19],ymm1[19],ymm0[20],ymm1[20],ymm0[21],ymm1[21],ymm0[22],ymm1[22],ymm0[23],ymm1[23]
|
||||
; AVX2-NEXT: vpunpckhbw {{.*#+}} ymm1 = ymm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
|
||||
; AVX2-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm1
|
||||
; AVX2-NEXT: vpsrlw $8, %ymm1, %ymm1
|
||||
; AVX2-NEXT: vpackuswb %ymm2, %ymm1, %ymm1
|
||||
; AVX2-NEXT: vpunpckhbw {{.*#+}} ymm2 = ymm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
|
||||
; AVX2-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm2, %ymm2
|
||||
; AVX2-NEXT: vmovdqa {{.*#+}} ymm3 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
|
||||
; AVX2-NEXT: vpand %ymm3, %ymm2, %ymm2
|
||||
; AVX2-NEXT: vpunpcklbw {{.*#+}} ymm0 = ymm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23]
|
||||
; AVX2-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
|
||||
; AVX2-NEXT: vpand %ymm3, %ymm0, %ymm0
|
||||
; AVX2-NEXT: vpackuswb %ymm2, %ymm0, %ymm0
|
||||
; AVX2-NEXT: vpor %ymm1, %ymm0, %ymm0
|
||||
; AVX2-NEXT: vpsrlw $8, %ymm0, %ymm0
|
||||
; AVX2-NEXT: vpackuswb %ymm1, %ymm0, %ymm0
|
||||
; AVX2-NEXT: retq
|
||||
;
|
||||
; AVX512F-LABEL: constant_funnnel_v32i8:
|
||||
; AVX512F: # %bb.0:
|
||||
; AVX512F-NEXT: vpxor %xmm1, %xmm1, %xmm1
|
||||
; AVX512F-NEXT: vpunpckhbw {{.*#+}} ymm2 = ymm0[8],ymm1[8],ymm0[9],ymm1[9],ymm0[10],ymm1[10],ymm0[11],ymm1[11],ymm0[12],ymm1[12],ymm0[13],ymm1[13],ymm0[14],ymm1[14],ymm0[15],ymm1[15],ymm0[24],ymm1[24],ymm0[25],ymm1[25],ymm0[26],ymm1[26],ymm0[27],ymm1[27],ymm0[28],ymm1[28],ymm0[29],ymm1[29],ymm0[30],ymm1[30],ymm0[31],ymm1[31]
|
||||
; AVX512F-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm2, %ymm2
|
||||
; AVX512F-NEXT: vpsrlw $8, %ymm2, %ymm2
|
||||
; AVX512F-NEXT: vpunpcklbw {{.*#+}} ymm1 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[4],ymm1[4],ymm0[5],ymm1[5],ymm0[6],ymm1[6],ymm0[7],ymm1[7],ymm0[16],ymm1[16],ymm0[17],ymm1[17],ymm0[18],ymm1[18],ymm0[19],ymm1[19],ymm0[20],ymm1[20],ymm0[21],ymm1[21],ymm0[22],ymm1[22],ymm0[23],ymm1[23]
|
||||
; AVX512F-NEXT: vpunpckhbw {{.*#+}} ymm1 = ymm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
|
||||
; AVX512F-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm1
|
||||
; AVX512F-NEXT: vpsrlw $8, %ymm1, %ymm1
|
||||
; AVX512F-NEXT: vpackuswb %ymm2, %ymm1, %ymm1
|
||||
; AVX512F-NEXT: vpunpckhbw {{.*#+}} ymm2 = ymm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
|
||||
; AVX512F-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm2, %ymm2
|
||||
; AVX512F-NEXT: vmovdqa {{.*#+}} ymm3 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
|
||||
; AVX512F-NEXT: vpand %ymm3, %ymm2, %ymm2
|
||||
; AVX512F-NEXT: vpunpcklbw {{.*#+}} ymm0 = ymm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23]
|
||||
; AVX512F-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
|
||||
; AVX512F-NEXT: vpand %ymm3, %ymm0, %ymm0
|
||||
; AVX512F-NEXT: vpackuswb %ymm2, %ymm0, %ymm0
|
||||
; AVX512F-NEXT: vpor %ymm1, %ymm0, %ymm0
|
||||
; AVX512F-NEXT: vpsrlw $8, %ymm0, %ymm0
|
||||
; AVX512F-NEXT: vpackuswb %ymm1, %ymm0, %ymm0
|
||||
; AVX512F-NEXT: retq
|
||||
;
|
||||
; AVX512VL-LABEL: constant_funnnel_v32i8:
|
||||
; AVX512VL: # %bb.0:
|
||||
; AVX512VL-NEXT: vpxor %xmm1, %xmm1, %xmm1
|
||||
; AVX512VL-NEXT: vpunpckhbw {{.*#+}} ymm2 = ymm0[8],ymm1[8],ymm0[9],ymm1[9],ymm0[10],ymm1[10],ymm0[11],ymm1[11],ymm0[12],ymm1[12],ymm0[13],ymm1[13],ymm0[14],ymm1[14],ymm0[15],ymm1[15],ymm0[24],ymm1[24],ymm0[25],ymm1[25],ymm0[26],ymm1[26],ymm0[27],ymm1[27],ymm0[28],ymm1[28],ymm0[29],ymm1[29],ymm0[30],ymm1[30],ymm0[31],ymm1[31]
|
||||
; AVX512VL-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm2, %ymm2
|
||||
; AVX512VL-NEXT: vpsrlw $8, %ymm2, %ymm2
|
||||
; AVX512VL-NEXT: vpunpcklbw {{.*#+}} ymm1 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[4],ymm1[4],ymm0[5],ymm1[5],ymm0[6],ymm1[6],ymm0[7],ymm1[7],ymm0[16],ymm1[16],ymm0[17],ymm1[17],ymm0[18],ymm1[18],ymm0[19],ymm1[19],ymm0[20],ymm1[20],ymm0[21],ymm1[21],ymm0[22],ymm1[22],ymm0[23],ymm1[23]
|
||||
; AVX512VL-NEXT: vpunpckhbw {{.*#+}} ymm1 = ymm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
|
||||
; AVX512VL-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm1
|
||||
; AVX512VL-NEXT: vpsrlw $8, %ymm1, %ymm1
|
||||
; AVX512VL-NEXT: vpackuswb %ymm2, %ymm1, %ymm1
|
||||
; AVX512VL-NEXT: vpunpckhbw {{.*#+}} ymm2 = ymm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
|
||||
; AVX512VL-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm2, %ymm2
|
||||
; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm3 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
|
||||
; AVX512VL-NEXT: vpand %ymm3, %ymm2, %ymm2
|
||||
; AVX512VL-NEXT: vpunpcklbw {{.*#+}} ymm0 = ymm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23]
|
||||
; AVX512VL-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
|
||||
; AVX512VL-NEXT: vpand %ymm3, %ymm0, %ymm0
|
||||
; AVX512VL-NEXT: vpackuswb %ymm2, %ymm0, %ymm0
|
||||
; AVX512VL-NEXT: vpor %ymm1, %ymm0, %ymm0
|
||||
; AVX512VL-NEXT: vpsrlw $8, %ymm0, %ymm0
|
||||
; AVX512VL-NEXT: vpackuswb %ymm1, %ymm0, %ymm0
|
||||
; AVX512VL-NEXT: retq
|
||||
;
|
||||
; AVX512BW-LABEL: constant_funnnel_v32i8:
|
||||
|
|
|
@ -575,177 +575,93 @@ define <64 x i8> @constant_funnnel_v64i8(<64 x i8> %x) nounwind {
|
|||
; AVX512F-LABEL: constant_funnnel_v64i8:
|
||||
; AVX512F: # %bb.0:
|
||||
; AVX512F-NEXT: vextracti64x4 $1, %zmm0, %ymm1
|
||||
; AVX512F-NEXT: vpxor %xmm2, %xmm2, %xmm2
|
||||
; AVX512F-NEXT: vpunpckhbw {{.*#+}} ymm3 = ymm1[8],ymm2[8],ymm1[9],ymm2[9],ymm1[10],ymm2[10],ymm1[11],ymm2[11],ymm1[12],ymm2[12],ymm1[13],ymm2[13],ymm1[14],ymm2[14],ymm1[15],ymm2[15],ymm1[24],ymm2[24],ymm1[25],ymm2[25],ymm1[26],ymm2[26],ymm1[27],ymm2[27],ymm1[28],ymm2[28],ymm1[29],ymm2[29],ymm1[30],ymm2[30],ymm1[31],ymm2[31]
|
||||
; AVX512F-NEXT: vbroadcasti128 {{.*#+}} ymm4 = [256,2,4,8,16,32,64,128,256,2,4,8,16,32,64,128]
|
||||
; AVX512F-NEXT: # ymm4 = mem[0,1,0,1]
|
||||
; AVX512F-NEXT: vpmullw %ymm4, %ymm3, %ymm3
|
||||
; AVX512F-NEXT: vpsrlw $8, %ymm3, %ymm3
|
||||
; AVX512F-NEXT: vpunpcklbw {{.*#+}} ymm5 = ymm1[0],ymm2[0],ymm1[1],ymm2[1],ymm1[2],ymm2[2],ymm1[3],ymm2[3],ymm1[4],ymm2[4],ymm1[5],ymm2[5],ymm1[6],ymm2[6],ymm1[7],ymm2[7],ymm1[16],ymm2[16],ymm1[17],ymm2[17],ymm1[18],ymm2[18],ymm1[19],ymm2[19],ymm1[20],ymm2[20],ymm1[21],ymm2[21],ymm1[22],ymm2[22],ymm1[23],ymm2[23]
|
||||
; AVX512F-NEXT: vbroadcasti128 {{.*#+}} ymm6 = [256,128,64,32,16,8,4,2,256,128,64,32,16,8,4,2]
|
||||
; AVX512F-NEXT: # ymm6 = mem[0,1,0,1]
|
||||
; AVX512F-NEXT: vpmullw %ymm6, %ymm5, %ymm5
|
||||
; AVX512F-NEXT: vpsrlw $8, %ymm5, %ymm5
|
||||
; AVX512F-NEXT: vpackuswb %ymm3, %ymm5, %ymm3
|
||||
; AVX512F-NEXT: vpunpckhbw {{.*#+}} ymm5 = ymm0[8],ymm2[8],ymm0[9],ymm2[9],ymm0[10],ymm2[10],ymm0[11],ymm2[11],ymm0[12],ymm2[12],ymm0[13],ymm2[13],ymm0[14],ymm2[14],ymm0[15],ymm2[15],ymm0[24],ymm2[24],ymm0[25],ymm2[25],ymm0[26],ymm2[26],ymm0[27],ymm2[27],ymm0[28],ymm2[28],ymm0[29],ymm2[29],ymm0[30],ymm2[30],ymm0[31],ymm2[31]
|
||||
; AVX512F-NEXT: vpmullw %ymm4, %ymm5, %ymm4
|
||||
; AVX512F-NEXT: vpsrlw $8, %ymm4, %ymm4
|
||||
; AVX512F-NEXT: vpunpcklbw {{.*#+}} ymm2 = ymm0[0],ymm2[0],ymm0[1],ymm2[1],ymm0[2],ymm2[2],ymm0[3],ymm2[3],ymm0[4],ymm2[4],ymm0[5],ymm2[5],ymm0[6],ymm2[6],ymm0[7],ymm2[7],ymm0[16],ymm2[16],ymm0[17],ymm2[17],ymm0[18],ymm2[18],ymm0[19],ymm2[19],ymm0[20],ymm2[20],ymm0[21],ymm2[21],ymm0[22],ymm2[22],ymm0[23],ymm2[23]
|
||||
; AVX512F-NEXT: vpmullw %ymm6, %ymm2, %ymm2
|
||||
; AVX512F-NEXT: vpunpckhbw {{.*#+}} ymm2 = ymm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
|
||||
; AVX512F-NEXT: vbroadcasti128 {{.*#+}} ymm3 = [1,2,4,8,16,32,64,128,1,2,4,8,16,32,64,128]
|
||||
; AVX512F-NEXT: # ymm3 = mem[0,1,0,1]
|
||||
; AVX512F-NEXT: vpmullw %ymm3, %ymm2, %ymm2
|
||||
; AVX512F-NEXT: vpsrlw $8, %ymm2, %ymm2
|
||||
; AVX512F-NEXT: vpackuswb %ymm4, %ymm2, %ymm2
|
||||
; AVX512F-NEXT: vinserti64x4 $1, %ymm3, %zmm2, %zmm2
|
||||
; AVX512F-NEXT: vpunpckhbw {{.*#+}} ymm3 = ymm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
|
||||
; AVX512F-NEXT: vbroadcasti128 {{.*#+}} ymm4 = [1,2,4,8,16,32,64,128,1,2,4,8,16,32,64,128]
|
||||
; AVX512F-NEXT: # ymm4 = mem[0,1,0,1]
|
||||
; AVX512F-NEXT: vpmullw %ymm4, %ymm3, %ymm3
|
||||
; AVX512F-NEXT: vmovdqa {{.*#+}} ymm5 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
|
||||
; AVX512F-NEXT: vpand %ymm5, %ymm3, %ymm3
|
||||
; AVX512F-NEXT: vpunpcklbw {{.*#+}} ymm1 = ymm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23]
|
||||
; AVX512F-NEXT: vbroadcasti128 {{.*#+}} ymm6 = [1,128,64,32,16,8,4,2,1,128,64,32,16,8,4,2]
|
||||
; AVX512F-NEXT: # ymm6 = mem[0,1,0,1]
|
||||
; AVX512F-NEXT: vpmullw %ymm6, %ymm1, %ymm1
|
||||
; AVX512F-NEXT: vpand %ymm5, %ymm1, %ymm1
|
||||
; AVX512F-NEXT: vpackuswb %ymm3, %ymm1, %ymm1
|
||||
; AVX512F-NEXT: vpunpckhbw {{.*#+}} ymm3 = ymm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
|
||||
; AVX512F-NEXT: vpmullw %ymm4, %ymm3, %ymm3
|
||||
; AVX512F-NEXT: vpand %ymm5, %ymm3, %ymm3
|
||||
; AVX512F-NEXT: vbroadcasti128 {{.*#+}} ymm4 = [1,128,64,32,16,8,4,2,1,128,64,32,16,8,4,2]
|
||||
; AVX512F-NEXT: # ymm4 = mem[0,1,0,1]
|
||||
; AVX512F-NEXT: vpmullw %ymm4, %ymm1, %ymm1
|
||||
; AVX512F-NEXT: vpsrlw $8, %ymm1, %ymm1
|
||||
; AVX512F-NEXT: vpackuswb %ymm2, %ymm1, %ymm1
|
||||
; AVX512F-NEXT: vpunpckhbw {{.*#+}} ymm2 = ymm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
|
||||
; AVX512F-NEXT: vpmullw %ymm3, %ymm2, %ymm2
|
||||
; AVX512F-NEXT: vpsrlw $8, %ymm2, %ymm2
|
||||
; AVX512F-NEXT: vpunpcklbw {{.*#+}} ymm0 = ymm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23]
|
||||
; AVX512F-NEXT: vpmullw %ymm6, %ymm0, %ymm0
|
||||
; AVX512F-NEXT: vpand %ymm5, %ymm0, %ymm0
|
||||
; AVX512F-NEXT: vpackuswb %ymm3, %ymm0, %ymm0
|
||||
; AVX512F-NEXT: vpmullw %ymm4, %ymm0, %ymm0
|
||||
; AVX512F-NEXT: vpsrlw $8, %ymm0, %ymm0
|
||||
; AVX512F-NEXT: vpackuswb %ymm2, %ymm0, %ymm0
|
||||
; AVX512F-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0
|
||||
; AVX512F-NEXT: vporq %zmm2, %zmm0, %zmm0
|
||||
; AVX512F-NEXT: retq
|
||||
;
|
||||
; AVX512VL-LABEL: constant_funnnel_v64i8:
|
||||
; AVX512VL: # %bb.0:
|
||||
; AVX512VL-NEXT: vextracti64x4 $1, %zmm0, %ymm1
|
||||
; AVX512VL-NEXT: vpxor %xmm2, %xmm2, %xmm2
|
||||
; AVX512VL-NEXT: vpunpckhbw {{.*#+}} ymm3 = ymm1[8],ymm2[8],ymm1[9],ymm2[9],ymm1[10],ymm2[10],ymm1[11],ymm2[11],ymm1[12],ymm2[12],ymm1[13],ymm2[13],ymm1[14],ymm2[14],ymm1[15],ymm2[15],ymm1[24],ymm2[24],ymm1[25],ymm2[25],ymm1[26],ymm2[26],ymm1[27],ymm2[27],ymm1[28],ymm2[28],ymm1[29],ymm2[29],ymm1[30],ymm2[30],ymm1[31],ymm2[31]
|
||||
; AVX512VL-NEXT: vbroadcasti128 {{.*#+}} ymm4 = [256,2,4,8,16,32,64,128,256,2,4,8,16,32,64,128]
|
||||
; AVX512VL-NEXT: # ymm4 = mem[0,1,0,1]
|
||||
; AVX512VL-NEXT: vpmullw %ymm4, %ymm3, %ymm3
|
||||
; AVX512VL-NEXT: vpsrlw $8, %ymm3, %ymm3
|
||||
; AVX512VL-NEXT: vpunpcklbw {{.*#+}} ymm5 = ymm1[0],ymm2[0],ymm1[1],ymm2[1],ymm1[2],ymm2[2],ymm1[3],ymm2[3],ymm1[4],ymm2[4],ymm1[5],ymm2[5],ymm1[6],ymm2[6],ymm1[7],ymm2[7],ymm1[16],ymm2[16],ymm1[17],ymm2[17],ymm1[18],ymm2[18],ymm1[19],ymm2[19],ymm1[20],ymm2[20],ymm1[21],ymm2[21],ymm1[22],ymm2[22],ymm1[23],ymm2[23]
|
||||
; AVX512VL-NEXT: vbroadcasti128 {{.*#+}} ymm6 = [256,128,64,32,16,8,4,2,256,128,64,32,16,8,4,2]
|
||||
; AVX512VL-NEXT: # ymm6 = mem[0,1,0,1]
|
||||
; AVX512VL-NEXT: vpmullw %ymm6, %ymm5, %ymm5
|
||||
; AVX512VL-NEXT: vpsrlw $8, %ymm5, %ymm5
|
||||
; AVX512VL-NEXT: vpackuswb %ymm3, %ymm5, %ymm3
|
||||
; AVX512VL-NEXT: vpunpckhbw {{.*#+}} ymm5 = ymm0[8],ymm2[8],ymm0[9],ymm2[9],ymm0[10],ymm2[10],ymm0[11],ymm2[11],ymm0[12],ymm2[12],ymm0[13],ymm2[13],ymm0[14],ymm2[14],ymm0[15],ymm2[15],ymm0[24],ymm2[24],ymm0[25],ymm2[25],ymm0[26],ymm2[26],ymm0[27],ymm2[27],ymm0[28],ymm2[28],ymm0[29],ymm2[29],ymm0[30],ymm2[30],ymm0[31],ymm2[31]
|
||||
; AVX512VL-NEXT: vpmullw %ymm4, %ymm5, %ymm4
|
||||
; AVX512VL-NEXT: vpsrlw $8, %ymm4, %ymm4
|
||||
; AVX512VL-NEXT: vpunpcklbw {{.*#+}} ymm2 = ymm0[0],ymm2[0],ymm0[1],ymm2[1],ymm0[2],ymm2[2],ymm0[3],ymm2[3],ymm0[4],ymm2[4],ymm0[5],ymm2[5],ymm0[6],ymm2[6],ymm0[7],ymm2[7],ymm0[16],ymm2[16],ymm0[17],ymm2[17],ymm0[18],ymm2[18],ymm0[19],ymm2[19],ymm0[20],ymm2[20],ymm0[21],ymm2[21],ymm0[22],ymm2[22],ymm0[23],ymm2[23]
|
||||
; AVX512VL-NEXT: vpmullw %ymm6, %ymm2, %ymm2
|
||||
; AVX512VL-NEXT: vpunpckhbw {{.*#+}} ymm2 = ymm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
|
||||
; AVX512VL-NEXT: vbroadcasti128 {{.*#+}} ymm3 = [1,2,4,8,16,32,64,128,1,2,4,8,16,32,64,128]
|
||||
; AVX512VL-NEXT: # ymm3 = mem[0,1,0,1]
|
||||
; AVX512VL-NEXT: vpmullw %ymm3, %ymm2, %ymm2
|
||||
; AVX512VL-NEXT: vpsrlw $8, %ymm2, %ymm2
|
||||
; AVX512VL-NEXT: vpackuswb %ymm4, %ymm2, %ymm2
|
||||
; AVX512VL-NEXT: vinserti64x4 $1, %ymm3, %zmm2, %zmm2
|
||||
; AVX512VL-NEXT: vpunpckhbw {{.*#+}} ymm3 = ymm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
|
||||
; AVX512VL-NEXT: vbroadcasti128 {{.*#+}} ymm4 = [1,2,4,8,16,32,64,128,1,2,4,8,16,32,64,128]
|
||||
; AVX512VL-NEXT: # ymm4 = mem[0,1,0,1]
|
||||
; AVX512VL-NEXT: vpmullw %ymm4, %ymm3, %ymm3
|
||||
; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm5 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
|
||||
; AVX512VL-NEXT: vpand %ymm5, %ymm3, %ymm3
|
||||
; AVX512VL-NEXT: vpunpcklbw {{.*#+}} ymm1 = ymm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23]
|
||||
; AVX512VL-NEXT: vbroadcasti128 {{.*#+}} ymm6 = [1,128,64,32,16,8,4,2,1,128,64,32,16,8,4,2]
|
||||
; AVX512VL-NEXT: # ymm6 = mem[0,1,0,1]
|
||||
; AVX512VL-NEXT: vpmullw %ymm6, %ymm1, %ymm1
|
||||
; AVX512VL-NEXT: vpand %ymm5, %ymm1, %ymm1
|
||||
; AVX512VL-NEXT: vpackuswb %ymm3, %ymm1, %ymm1
|
||||
; AVX512VL-NEXT: vpunpckhbw {{.*#+}} ymm3 = ymm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
|
||||
; AVX512VL-NEXT: vpmullw %ymm4, %ymm3, %ymm3
|
||||
; AVX512VL-NEXT: vpand %ymm5, %ymm3, %ymm3
|
||||
; AVX512VL-NEXT: vbroadcasti128 {{.*#+}} ymm4 = [1,128,64,32,16,8,4,2,1,128,64,32,16,8,4,2]
|
||||
; AVX512VL-NEXT: # ymm4 = mem[0,1,0,1]
|
||||
; AVX512VL-NEXT: vpmullw %ymm4, %ymm1, %ymm1
|
||||
; AVX512VL-NEXT: vpsrlw $8, %ymm1, %ymm1
|
||||
; AVX512VL-NEXT: vpackuswb %ymm2, %ymm1, %ymm1
|
||||
; AVX512VL-NEXT: vpunpckhbw {{.*#+}} ymm2 = ymm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
|
||||
; AVX512VL-NEXT: vpmullw %ymm3, %ymm2, %ymm2
|
||||
; AVX512VL-NEXT: vpsrlw $8, %ymm2, %ymm2
|
||||
; AVX512VL-NEXT: vpunpcklbw {{.*#+}} ymm0 = ymm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23]
|
||||
; AVX512VL-NEXT: vpmullw %ymm6, %ymm0, %ymm0
|
||||
; AVX512VL-NEXT: vpand %ymm5, %ymm0, %ymm0
|
||||
; AVX512VL-NEXT: vpackuswb %ymm3, %ymm0, %ymm0
|
||||
; AVX512VL-NEXT: vpmullw %ymm4, %ymm0, %ymm0
|
||||
; AVX512VL-NEXT: vpsrlw $8, %ymm0, %ymm0
|
||||
; AVX512VL-NEXT: vpackuswb %ymm2, %ymm0, %ymm0
|
||||
; AVX512VL-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0
|
||||
; AVX512VL-NEXT: vporq %zmm2, %zmm0, %zmm0
|
||||
; AVX512VL-NEXT: retq
|
||||
;
|
||||
; AVX512BW-LABEL: constant_funnnel_v64i8:
|
||||
; AVX512BW: # %bb.0:
|
||||
; AVX512BW-NEXT: vpxor %xmm1, %xmm1, %xmm1
|
||||
; AVX512BW-NEXT: vpunpckhbw {{.*#+}} zmm2 = zmm0[8],zmm1[8],zmm0[9],zmm1[9],zmm0[10],zmm1[10],zmm0[11],zmm1[11],zmm0[12],zmm1[12],zmm0[13],zmm1[13],zmm0[14],zmm1[14],zmm0[15],zmm1[15],zmm0[24],zmm1[24],zmm0[25],zmm1[25],zmm0[26],zmm1[26],zmm0[27],zmm1[27],zmm0[28],zmm1[28],zmm0[29],zmm1[29],zmm0[30],zmm1[30],zmm0[31],zmm1[31],zmm0[40],zmm1[40],zmm0[41],zmm1[41],zmm0[42],zmm1[42],zmm0[43],zmm1[43],zmm0[44],zmm1[44],zmm0[45],zmm1[45],zmm0[46],zmm1[46],zmm0[47],zmm1[47],zmm0[56],zmm1[56],zmm0[57],zmm1[57],zmm0[58],zmm1[58],zmm0[59],zmm1[59],zmm0[60],zmm1[60],zmm0[61],zmm1[61],zmm0[62],zmm1[62],zmm0[63],zmm1[63]
|
||||
; AVX512BW-NEXT: vpsllvw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm2, %zmm2
|
||||
; AVX512BW-NEXT: vpsrlw $8, %zmm2, %zmm2
|
||||
; AVX512BW-NEXT: vpunpcklbw {{.*#+}} zmm1 = zmm0[0],zmm1[0],zmm0[1],zmm1[1],zmm0[2],zmm1[2],zmm0[3],zmm1[3],zmm0[4],zmm1[4],zmm0[5],zmm1[5],zmm0[6],zmm1[6],zmm0[7],zmm1[7],zmm0[16],zmm1[16],zmm0[17],zmm1[17],zmm0[18],zmm1[18],zmm0[19],zmm1[19],zmm0[20],zmm1[20],zmm0[21],zmm1[21],zmm0[22],zmm1[22],zmm0[23],zmm1[23],zmm0[32],zmm1[32],zmm0[33],zmm1[33],zmm0[34],zmm1[34],zmm0[35],zmm1[35],zmm0[36],zmm1[36],zmm0[37],zmm1[37],zmm0[38],zmm1[38],zmm0[39],zmm1[39],zmm0[48],zmm1[48],zmm0[49],zmm1[49],zmm0[50],zmm1[50],zmm0[51],zmm1[51],zmm0[52],zmm1[52],zmm0[53],zmm1[53],zmm0[54],zmm1[54],zmm0[55],zmm1[55]
|
||||
; AVX512BW-NEXT: vpunpckhbw {{.*#+}} zmm1 = zmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31,40,40,41,41,42,42,43,43,44,44,45,45,46,46,47,47,56,56,57,57,58,58,59,59,60,60,61,61,62,62,63,63]
|
||||
; AVX512BW-NEXT: vpsllvw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm1, %zmm1
|
||||
; AVX512BW-NEXT: vpsrlw $8, %zmm1, %zmm1
|
||||
; AVX512BW-NEXT: vpackuswb %zmm2, %zmm1, %zmm1
|
||||
; AVX512BW-NEXT: vpunpckhbw {{.*#+}} zmm2 = zmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31,40,40,41,41,42,42,43,43,44,44,45,45,46,46,47,47,56,56,57,57,58,58,59,59,60,60,61,61,62,62,63,63]
|
||||
; AVX512BW-NEXT: vpsllvw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm2, %zmm2
|
||||
; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm3 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
|
||||
; AVX512BW-NEXT: vpandq %zmm3, %zmm2, %zmm2
|
||||
; AVX512BW-NEXT: vpunpcklbw {{.*#+}} zmm0 = zmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23,32,32,33,33,34,34,35,35,36,36,37,37,38,38,39,39,48,48,49,49,50,50,51,51,52,52,53,53,54,54,55,55]
|
||||
; AVX512BW-NEXT: vpsllvw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm0
|
||||
; AVX512BW-NEXT: vpandq %zmm3, %zmm0, %zmm0
|
||||
; AVX512BW-NEXT: vpackuswb %zmm2, %zmm0, %zmm0
|
||||
; AVX512BW-NEXT: vporq %zmm1, %zmm0, %zmm0
|
||||
; AVX512BW-NEXT: vpsrlw $8, %zmm0, %zmm0
|
||||
; AVX512BW-NEXT: vpackuswb %zmm1, %zmm0, %zmm0
|
||||
; AVX512BW-NEXT: retq
|
||||
;
|
||||
; AVX512VLBW-LABEL: constant_funnnel_v64i8:
|
||||
; AVX512VLBW: # %bb.0:
|
||||
; AVX512VLBW-NEXT: vpxor %xmm1, %xmm1, %xmm1
|
||||
; AVX512VLBW-NEXT: vpunpckhbw {{.*#+}} zmm2 = zmm0[8],zmm1[8],zmm0[9],zmm1[9],zmm0[10],zmm1[10],zmm0[11],zmm1[11],zmm0[12],zmm1[12],zmm0[13],zmm1[13],zmm0[14],zmm1[14],zmm0[15],zmm1[15],zmm0[24],zmm1[24],zmm0[25],zmm1[25],zmm0[26],zmm1[26],zmm0[27],zmm1[27],zmm0[28],zmm1[28],zmm0[29],zmm1[29],zmm0[30],zmm1[30],zmm0[31],zmm1[31],zmm0[40],zmm1[40],zmm0[41],zmm1[41],zmm0[42],zmm1[42],zmm0[43],zmm1[43],zmm0[44],zmm1[44],zmm0[45],zmm1[45],zmm0[46],zmm1[46],zmm0[47],zmm1[47],zmm0[56],zmm1[56],zmm0[57],zmm1[57],zmm0[58],zmm1[58],zmm0[59],zmm1[59],zmm0[60],zmm1[60],zmm0[61],zmm1[61],zmm0[62],zmm1[62],zmm0[63],zmm1[63]
|
||||
; AVX512VLBW-NEXT: vpsllvw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm2, %zmm2
|
||||
; AVX512VLBW-NEXT: vpsrlw $8, %zmm2, %zmm2
|
||||
; AVX512VLBW-NEXT: vpunpcklbw {{.*#+}} zmm1 = zmm0[0],zmm1[0],zmm0[1],zmm1[1],zmm0[2],zmm1[2],zmm0[3],zmm1[3],zmm0[4],zmm1[4],zmm0[5],zmm1[5],zmm0[6],zmm1[6],zmm0[7],zmm1[7],zmm0[16],zmm1[16],zmm0[17],zmm1[17],zmm0[18],zmm1[18],zmm0[19],zmm1[19],zmm0[20],zmm1[20],zmm0[21],zmm1[21],zmm0[22],zmm1[22],zmm0[23],zmm1[23],zmm0[32],zmm1[32],zmm0[33],zmm1[33],zmm0[34],zmm1[34],zmm0[35],zmm1[35],zmm0[36],zmm1[36],zmm0[37],zmm1[37],zmm0[38],zmm1[38],zmm0[39],zmm1[39],zmm0[48],zmm1[48],zmm0[49],zmm1[49],zmm0[50],zmm1[50],zmm0[51],zmm1[51],zmm0[52],zmm1[52],zmm0[53],zmm1[53],zmm0[54],zmm1[54],zmm0[55],zmm1[55]
|
||||
; AVX512VLBW-NEXT: vpunpckhbw {{.*#+}} zmm1 = zmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31,40,40,41,41,42,42,43,43,44,44,45,45,46,46,47,47,56,56,57,57,58,58,59,59,60,60,61,61,62,62,63,63]
|
||||
; AVX512VLBW-NEXT: vpsllvw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm1, %zmm1
|
||||
; AVX512VLBW-NEXT: vpsrlw $8, %zmm1, %zmm1
|
||||
; AVX512VLBW-NEXT: vpackuswb %zmm2, %zmm1, %zmm1
|
||||
; AVX512VLBW-NEXT: vpunpckhbw {{.*#+}} zmm2 = zmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31,40,40,41,41,42,42,43,43,44,44,45,45,46,46,47,47,56,56,57,57,58,58,59,59,60,60,61,61,62,62,63,63]
|
||||
; AVX512VLBW-NEXT: vpsllvw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm2, %zmm2
|
||||
; AVX512VLBW-NEXT: vmovdqa64 {{.*#+}} zmm3 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
|
||||
; AVX512VLBW-NEXT: vpandq %zmm3, %zmm2, %zmm2
|
||||
; AVX512VLBW-NEXT: vpunpcklbw {{.*#+}} zmm0 = zmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23,32,32,33,33,34,34,35,35,36,36,37,37,38,38,39,39,48,48,49,49,50,50,51,51,52,52,53,53,54,54,55,55]
|
||||
; AVX512VLBW-NEXT: vpsllvw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm0
|
||||
; AVX512VLBW-NEXT: vpandq %zmm3, %zmm0, %zmm0
|
||||
; AVX512VLBW-NEXT: vpackuswb %zmm2, %zmm0, %zmm0
|
||||
; AVX512VLBW-NEXT: vporq %zmm1, %zmm0, %zmm0
|
||||
; AVX512VLBW-NEXT: vpsrlw $8, %zmm0, %zmm0
|
||||
; AVX512VLBW-NEXT: vpackuswb %zmm1, %zmm0, %zmm0
|
||||
; AVX512VLBW-NEXT: retq
|
||||
;
|
||||
; AVX512VBMI2-LABEL: constant_funnnel_v64i8:
|
||||
; AVX512VBMI2: # %bb.0:
|
||||
; AVX512VBMI2-NEXT: vpxor %xmm1, %xmm1, %xmm1
|
||||
; AVX512VBMI2-NEXT: vpunpckhbw {{.*#+}} zmm2 = zmm0[8],zmm1[8],zmm0[9],zmm1[9],zmm0[10],zmm1[10],zmm0[11],zmm1[11],zmm0[12],zmm1[12],zmm0[13],zmm1[13],zmm0[14],zmm1[14],zmm0[15],zmm1[15],zmm0[24],zmm1[24],zmm0[25],zmm1[25],zmm0[26],zmm1[26],zmm0[27],zmm1[27],zmm0[28],zmm1[28],zmm0[29],zmm1[29],zmm0[30],zmm1[30],zmm0[31],zmm1[31],zmm0[40],zmm1[40],zmm0[41],zmm1[41],zmm0[42],zmm1[42],zmm0[43],zmm1[43],zmm0[44],zmm1[44],zmm0[45],zmm1[45],zmm0[46],zmm1[46],zmm0[47],zmm1[47],zmm0[56],zmm1[56],zmm0[57],zmm1[57],zmm0[58],zmm1[58],zmm0[59],zmm1[59],zmm0[60],zmm1[60],zmm0[61],zmm1[61],zmm0[62],zmm1[62],zmm0[63],zmm1[63]
|
||||
; AVX512VBMI2-NEXT: vpsllvw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm2, %zmm2
|
||||
; AVX512VBMI2-NEXT: vpsrlw $8, %zmm2, %zmm2
|
||||
; AVX512VBMI2-NEXT: vpunpcklbw {{.*#+}} zmm1 = zmm0[0],zmm1[0],zmm0[1],zmm1[1],zmm0[2],zmm1[2],zmm0[3],zmm1[3],zmm0[4],zmm1[4],zmm0[5],zmm1[5],zmm0[6],zmm1[6],zmm0[7],zmm1[7],zmm0[16],zmm1[16],zmm0[17],zmm1[17],zmm0[18],zmm1[18],zmm0[19],zmm1[19],zmm0[20],zmm1[20],zmm0[21],zmm1[21],zmm0[22],zmm1[22],zmm0[23],zmm1[23],zmm0[32],zmm1[32],zmm0[33],zmm1[33],zmm0[34],zmm1[34],zmm0[35],zmm1[35],zmm0[36],zmm1[36],zmm0[37],zmm1[37],zmm0[38],zmm1[38],zmm0[39],zmm1[39],zmm0[48],zmm1[48],zmm0[49],zmm1[49],zmm0[50],zmm1[50],zmm0[51],zmm1[51],zmm0[52],zmm1[52],zmm0[53],zmm1[53],zmm0[54],zmm1[54],zmm0[55],zmm1[55]
|
||||
; AVX512VBMI2-NEXT: vpunpckhbw {{.*#+}} zmm1 = zmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31,40,40,41,41,42,42,43,43,44,44,45,45,46,46,47,47,56,56,57,57,58,58,59,59,60,60,61,61,62,62,63,63]
|
||||
; AVX512VBMI2-NEXT: vpsllvw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm1, %zmm1
|
||||
; AVX512VBMI2-NEXT: vpsrlw $8, %zmm1, %zmm1
|
||||
; AVX512VBMI2-NEXT: vpackuswb %zmm2, %zmm1, %zmm1
|
||||
; AVX512VBMI2-NEXT: vpunpckhbw {{.*#+}} zmm2 = zmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31,40,40,41,41,42,42,43,43,44,44,45,45,46,46,47,47,56,56,57,57,58,58,59,59,60,60,61,61,62,62,63,63]
|
||||
; AVX512VBMI2-NEXT: vpsllvw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm2, %zmm2
|
||||
; AVX512VBMI2-NEXT: vmovdqa64 {{.*#+}} zmm3 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
|
||||
; AVX512VBMI2-NEXT: vpandq %zmm3, %zmm2, %zmm2
|
||||
; AVX512VBMI2-NEXT: vpunpcklbw {{.*#+}} zmm0 = zmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23,32,32,33,33,34,34,35,35,36,36,37,37,38,38,39,39,48,48,49,49,50,50,51,51,52,52,53,53,54,54,55,55]
|
||||
; AVX512VBMI2-NEXT: vpsllvw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm0
|
||||
; AVX512VBMI2-NEXT: vpandq %zmm3, %zmm0, %zmm0
|
||||
; AVX512VBMI2-NEXT: vpackuswb %zmm2, %zmm0, %zmm0
|
||||
; AVX512VBMI2-NEXT: vporq %zmm1, %zmm0, %zmm0
|
||||
; AVX512VBMI2-NEXT: vpsrlw $8, %zmm0, %zmm0
|
||||
; AVX512VBMI2-NEXT: vpackuswb %zmm1, %zmm0, %zmm0
|
||||
; AVX512VBMI2-NEXT: retq
|
||||
;
|
||||
; AVX512VLVBMI2-LABEL: constant_funnnel_v64i8:
|
||||
; AVX512VLVBMI2: # %bb.0:
|
||||
; AVX512VLVBMI2-NEXT: vpxor %xmm1, %xmm1, %xmm1
|
||||
; AVX512VLVBMI2-NEXT: vpunpckhbw {{.*#+}} zmm2 = zmm0[8],zmm1[8],zmm0[9],zmm1[9],zmm0[10],zmm1[10],zmm0[11],zmm1[11],zmm0[12],zmm1[12],zmm0[13],zmm1[13],zmm0[14],zmm1[14],zmm0[15],zmm1[15],zmm0[24],zmm1[24],zmm0[25],zmm1[25],zmm0[26],zmm1[26],zmm0[27],zmm1[27],zmm0[28],zmm1[28],zmm0[29],zmm1[29],zmm0[30],zmm1[30],zmm0[31],zmm1[31],zmm0[40],zmm1[40],zmm0[41],zmm1[41],zmm0[42],zmm1[42],zmm0[43],zmm1[43],zmm0[44],zmm1[44],zmm0[45],zmm1[45],zmm0[46],zmm1[46],zmm0[47],zmm1[47],zmm0[56],zmm1[56],zmm0[57],zmm1[57],zmm0[58],zmm1[58],zmm0[59],zmm1[59],zmm0[60],zmm1[60],zmm0[61],zmm1[61],zmm0[62],zmm1[62],zmm0[63],zmm1[63]
|
||||
; AVX512VLVBMI2-NEXT: vpsllvw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm2, %zmm2
|
||||
; AVX512VLVBMI2-NEXT: vpsrlw $8, %zmm2, %zmm2
|
||||
; AVX512VLVBMI2-NEXT: vpunpcklbw {{.*#+}} zmm1 = zmm0[0],zmm1[0],zmm0[1],zmm1[1],zmm0[2],zmm1[2],zmm0[3],zmm1[3],zmm0[4],zmm1[4],zmm0[5],zmm1[5],zmm0[6],zmm1[6],zmm0[7],zmm1[7],zmm0[16],zmm1[16],zmm0[17],zmm1[17],zmm0[18],zmm1[18],zmm0[19],zmm1[19],zmm0[20],zmm1[20],zmm0[21],zmm1[21],zmm0[22],zmm1[22],zmm0[23],zmm1[23],zmm0[32],zmm1[32],zmm0[33],zmm1[33],zmm0[34],zmm1[34],zmm0[35],zmm1[35],zmm0[36],zmm1[36],zmm0[37],zmm1[37],zmm0[38],zmm1[38],zmm0[39],zmm1[39],zmm0[48],zmm1[48],zmm0[49],zmm1[49],zmm0[50],zmm1[50],zmm0[51],zmm1[51],zmm0[52],zmm1[52],zmm0[53],zmm1[53],zmm0[54],zmm1[54],zmm0[55],zmm1[55]
|
||||
; AVX512VLVBMI2-NEXT: vpunpckhbw {{.*#+}} zmm1 = zmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31,40,40,41,41,42,42,43,43,44,44,45,45,46,46,47,47,56,56,57,57,58,58,59,59,60,60,61,61,62,62,63,63]
|
||||
; AVX512VLVBMI2-NEXT: vpsllvw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm1, %zmm1
|
||||
; AVX512VLVBMI2-NEXT: vpsrlw $8, %zmm1, %zmm1
|
||||
; AVX512VLVBMI2-NEXT: vpackuswb %zmm2, %zmm1, %zmm1
|
||||
; AVX512VLVBMI2-NEXT: vpunpckhbw {{.*#+}} zmm2 = zmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31,40,40,41,41,42,42,43,43,44,44,45,45,46,46,47,47,56,56,57,57,58,58,59,59,60,60,61,61,62,62,63,63]
|
||||
; AVX512VLVBMI2-NEXT: vpsllvw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm2, %zmm2
|
||||
; AVX512VLVBMI2-NEXT: vmovdqa64 {{.*#+}} zmm3 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
|
||||
; AVX512VLVBMI2-NEXT: vpandq %zmm3, %zmm2, %zmm2
|
||||
; AVX512VLVBMI2-NEXT: vpunpcklbw {{.*#+}} zmm0 = zmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23,32,32,33,33,34,34,35,35,36,36,37,37,38,38,39,39,48,48,49,49,50,50,51,51,52,52,53,53,54,54,55,55]
|
||||
; AVX512VLVBMI2-NEXT: vpsllvw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm0
|
||||
; AVX512VLVBMI2-NEXT: vpandq %zmm3, %zmm0, %zmm0
|
||||
; AVX512VLVBMI2-NEXT: vpackuswb %zmm2, %zmm0, %zmm0
|
||||
; AVX512VLVBMI2-NEXT: vporq %zmm1, %zmm0, %zmm0
|
||||
; AVX512VLVBMI2-NEXT: vpsrlw $8, %zmm0, %zmm0
|
||||
; AVX512VLVBMI2-NEXT: vpackuswb %zmm1, %zmm0, %zmm0
|
||||
; AVX512VLVBMI2-NEXT: retq
|
||||
%res = call <64 x i8> @llvm.fshr.v64i8(<64 x i8> %x, <64 x i8> %x, <64 x i8> <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 7, i8 6, i8 5, i8 4, i8 3, i8 2, i8 1, i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 7, i8 6, i8 5, i8 4, i8 3, i8 2, i8 1, i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 7, i8 6, i8 5, i8 4, i8 3, i8 2, i8 1, i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 7, i8 6, i8 5, i8 4, i8 3, i8 2, i8 1>)
|
||||
ret <64 x i8> %res
|
||||
|
|
|
@ -1479,87 +1479,28 @@ define <8 x i16> @constant_rotate_v8i16(<8 x i16> %a) nounwind {
|
|||
}
|
||||
|
||||
define <16 x i8> @constant_rotate_v16i8(<16 x i8> %a) nounwind {
|
||||
; SSE2-LABEL: constant_rotate_v16i8:
|
||||
; SSE2: # %bb.0:
|
||||
; SSE2-NEXT: pxor %xmm1, %xmm1
|
||||
; SSE2-NEXT: movdqa %xmm0, %xmm2
|
||||
; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm1[8],xmm2[9],xmm1[9],xmm2[10],xmm1[10],xmm2[11],xmm1[11],xmm2[12],xmm1[12],xmm2[13],xmm1[13],xmm2[14],xmm1[14],xmm2[15],xmm1[15]
|
||||
; SSE2-NEXT: pmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2
|
||||
; SSE2-NEXT: psrlw $8, %xmm2
|
||||
; SSE2-NEXT: movdqa %xmm0, %xmm3
|
||||
; SSE2-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1],xmm3[2],xmm1[2],xmm3[3],xmm1[3],xmm3[4],xmm1[4],xmm3[5],xmm1[5],xmm3[6],xmm1[6],xmm3[7],xmm1[7]
|
||||
; SSE2-NEXT: pmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3
|
||||
; SSE2-NEXT: psrlw $8, %xmm3
|
||||
; SSE2-NEXT: packuswb %xmm2, %xmm3
|
||||
; SSE2-NEXT: movdqa %xmm0, %xmm1
|
||||
; SSE2-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
||||
; SSE2-NEXT: pmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
|
||||
; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [255,255,255,255,255,255,255,255]
|
||||
; SSE2-NEXT: pand %xmm2, %xmm1
|
||||
; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
|
||||
; SSE2-NEXT: pmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
|
||||
; SSE2-NEXT: pand %xmm2, %xmm0
|
||||
; SSE2-NEXT: packuswb %xmm1, %xmm0
|
||||
; SSE2-NEXT: por %xmm3, %xmm0
|
||||
; SSE2-NEXT: retq
|
||||
; SSE-LABEL: constant_rotate_v16i8:
|
||||
; SSE: # %bb.0:
|
||||
; SSE-NEXT: movdqa %xmm0, %xmm1
|
||||
; SSE-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm0[8],xmm1[9],xmm0[9],xmm1[10],xmm0[10],xmm1[11],xmm0[11],xmm1[12],xmm0[12],xmm1[13],xmm0[13],xmm1[14],xmm0[14],xmm1[15],xmm0[15]
|
||||
; SSE-NEXT: pmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
|
||||
; SSE-NEXT: psrlw $8, %xmm1
|
||||
; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
|
||||
; SSE-NEXT: pmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
|
||||
; SSE-NEXT: psrlw $8, %xmm0
|
||||
; SSE-NEXT: packuswb %xmm1, %xmm0
|
||||
; SSE-NEXT: retq
|
||||
;
|
||||
; SSE41-LABEL: constant_rotate_v16i8:
|
||||
; SSE41: # %bb.0:
|
||||
; SSE41-NEXT: movdqa %xmm0, %xmm2
|
||||
; SSE41-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
||||
; SSE41-NEXT: pmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2
|
||||
; SSE41-NEXT: movdqa {{.*#+}} xmm3 = [255,255,255,255,255,255,255,255]
|
||||
; SSE41-NEXT: pand %xmm3, %xmm2
|
||||
; SSE41-NEXT: pmovzxbw {{.*#+}} xmm1 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
|
||||
; SSE41-NEXT: movdqa {{.*#+}} xmm4 = [1,2,4,8,16,32,64,128]
|
||||
; SSE41-NEXT: pmullw %xmm1, %xmm4
|
||||
; SSE41-NEXT: pand %xmm3, %xmm4
|
||||
; SSE41-NEXT: packuswb %xmm2, %xmm4
|
||||
; SSE41-NEXT: pxor %xmm2, %xmm2
|
||||
; SSE41-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm2[8],xmm0[9],xmm2[9],xmm0[10],xmm2[10],xmm0[11],xmm2[11],xmm0[12],xmm2[12],xmm0[13],xmm2[13],xmm0[14],xmm2[14],xmm0[15],xmm2[15]
|
||||
; SSE41-NEXT: pmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
|
||||
; SSE41-NEXT: psrlw $8, %xmm0
|
||||
; SSE41-NEXT: pmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
|
||||
; SSE41-NEXT: psrlw $8, %xmm1
|
||||
; SSE41-NEXT: packuswb %xmm0, %xmm1
|
||||
; SSE41-NEXT: por %xmm4, %xmm1
|
||||
; SSE41-NEXT: movdqa %xmm1, %xmm0
|
||||
; SSE41-NEXT: retq
|
||||
;
|
||||
; AVX1-LABEL: constant_rotate_v16i8:
|
||||
; AVX1: # %bb.0:
|
||||
; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm1 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
||||
; AVX1-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
|
||||
; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [255,255,255,255,255,255,255,255]
|
||||
; AVX1-NEXT: vpand %xmm2, %xmm1, %xmm1
|
||||
; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm3 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
|
||||
; AVX1-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3, %xmm4
|
||||
; AVX1-NEXT: vpand %xmm2, %xmm4, %xmm2
|
||||
; AVX1-NEXT: vpackuswb %xmm1, %xmm2, %xmm1
|
||||
; AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
|
||||
; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm0 = xmm0[8],xmm2[8],xmm0[9],xmm2[9],xmm0[10],xmm2[10],xmm0[11],xmm2[11],xmm0[12],xmm2[12],xmm0[13],xmm2[13],xmm0[14],xmm2[14],xmm0[15],xmm2[15]
|
||||
; AVX1-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
|
||||
; AVX1-NEXT: vpsrlw $8, %xmm0, %xmm0
|
||||
; AVX1-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3, %xmm2
|
||||
; AVX1-NEXT: vpsrlw $8, %xmm2, %xmm2
|
||||
; AVX1-NEXT: vpackuswb %xmm0, %xmm2, %xmm0
|
||||
; AVX1-NEXT: vpor %xmm0, %xmm1, %xmm0
|
||||
; AVX1-NEXT: retq
|
||||
;
|
||||
; AVX2-LABEL: constant_rotate_v16i8:
|
||||
; AVX2: # %bb.0:
|
||||
; AVX2-NEXT: vpmovzxbw {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero,xmm0[8],zero,xmm0[9],zero,xmm0[10],zero,xmm0[11],zero,xmm0[12],zero,xmm0[13],zero,xmm0[14],zero,xmm0[15],zero
|
||||
; AVX2-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm1
|
||||
; AVX2-NEXT: vpsrlw $8, %ymm1, %ymm1
|
||||
; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm2
|
||||
; AVX2-NEXT: vpackuswb %xmm2, %xmm1, %xmm1
|
||||
; AVX2-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
|
||||
; AVX2-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
|
||||
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm2
|
||||
; AVX2-NEXT: vpackuswb %xmm2, %xmm0, %xmm0
|
||||
; AVX2-NEXT: vpor %xmm1, %xmm0, %xmm0
|
||||
; AVX2-NEXT: vzeroupper
|
||||
; AVX2-NEXT: retq
|
||||
; AVX-LABEL: constant_rotate_v16i8:
|
||||
; AVX: # %bb.0:
|
||||
; AVX-NEXT: vpunpckhbw {{.*#+}} xmm1 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
||||
; AVX-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
|
||||
; AVX-NEXT: vpsrlw $8, %xmm1, %xmm1
|
||||
; AVX-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
|
||||
; AVX-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm0
|
||||
; AVX-NEXT: vpsrlw $8, %xmm0, %xmm0
|
||||
; AVX-NEXT: vpackuswb %xmm1, %xmm0, %xmm0
|
||||
; AVX-NEXT: retq
|
||||
;
|
||||
; AVX512F-LABEL: constant_rotate_v16i8:
|
||||
; AVX512F: # %bb.0:
|
||||
|
@ -1634,26 +1575,14 @@ define <16 x i8> @constant_rotate_v16i8(<16 x i8> %a) nounwind {
|
|||
;
|
||||
; X86-SSE2-LABEL: constant_rotate_v16i8:
|
||||
; X86-SSE2: # %bb.0:
|
||||
; X86-SSE2-NEXT: pxor %xmm2, %xmm2
|
||||
; X86-SSE2-NEXT: movdqa %xmm0, %xmm3
|
||||
; X86-SSE2-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm2[8],xmm3[9],xmm2[9],xmm3[10],xmm2[10],xmm3[11],xmm2[11],xmm3[12],xmm2[12],xmm3[13],xmm2[13],xmm3[14],xmm2[14],xmm3[15],xmm2[15]
|
||||
; X86-SSE2-NEXT: pmullw {{\.?LCPI[0-9]+_[0-9]+}}, %xmm3
|
||||
; X86-SSE2-NEXT: psrlw $8, %xmm3
|
||||
; X86-SSE2-NEXT: movdqa %xmm0, %xmm1
|
||||
; X86-SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3],xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7]
|
||||
; X86-SSE2-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm0[8],xmm1[9],xmm0[9],xmm1[10],xmm0[10],xmm1[11],xmm0[11],xmm1[12],xmm0[12],xmm1[13],xmm0[13],xmm1[14],xmm0[14],xmm1[15],xmm0[15]
|
||||
; X86-SSE2-NEXT: pmullw {{\.?LCPI[0-9]+_[0-9]+}}, %xmm1
|
||||
; X86-SSE2-NEXT: psrlw $8, %xmm1
|
||||
; X86-SSE2-NEXT: packuswb %xmm3, %xmm1
|
||||
; X86-SSE2-NEXT: movdqa %xmm0, %xmm2
|
||||
; X86-SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
||||
; X86-SSE2-NEXT: pmullw {{\.?LCPI[0-9]+_[0-9]+}}, %xmm2
|
||||
; X86-SSE2-NEXT: movdqa {{.*#+}} xmm3 = [255,255,255,255,255,255,255,255]
|
||||
; X86-SSE2-NEXT: pand %xmm3, %xmm2
|
||||
; X86-SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
|
||||
; X86-SSE2-NEXT: pmullw {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
|
||||
; X86-SSE2-NEXT: pand %xmm3, %xmm0
|
||||
; X86-SSE2-NEXT: packuswb %xmm2, %xmm0
|
||||
; X86-SSE2-NEXT: por %xmm1, %xmm0
|
||||
; X86-SSE2-NEXT: psrlw $8, %xmm0
|
||||
; X86-SSE2-NEXT: packuswb %xmm1, %xmm0
|
||||
; X86-SSE2-NEXT: retl
|
||||
%shl = shl <16 x i8> %a, <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 7, i8 6, i8 5, i8 4, i8 3, i8 2, i8 1>
|
||||
%lshr = lshr <16 x i8> %a, <i8 8, i8 7, i8 6, i8 5, i8 4, i8 3, i8 2, i8 1, i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7>
|
||||
|
|
|
@ -1215,104 +1215,56 @@ define <32 x i8> @constant_rotate_v32i8(<32 x i8> %a) nounwind {
|
|||
; AVX1-LABEL: constant_rotate_v32i8:
|
||||
; AVX1: # %bb.0:
|
||||
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
|
||||
; AVX1-NEXT: vpxor %xmm8, %xmm8, %xmm8
|
||||
; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm3 = xmm1[8],xmm8[8],xmm1[9],xmm8[9],xmm1[10],xmm8[10],xmm1[11],xmm8[11],xmm1[12],xmm8[12],xmm1[13],xmm8[13],xmm1[14],xmm8[14],xmm1[15],xmm8[15]
|
||||
; AVX1-NEXT: vmovdqa {{.*#+}} xmm9 = [256,128,64,32,16,8,4,2]
|
||||
; AVX1-NEXT: vpmullw %xmm3, %xmm9, %xmm3
|
||||
; AVX1-NEXT: vpsrlw $8, %xmm3, %xmm3
|
||||
; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm5 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
|
||||
; AVX1-NEXT: vmovdqa {{.*#+}} xmm6 = [256,2,4,8,16,32,64,128]
|
||||
; AVX1-NEXT: vpmullw %xmm6, %xmm5, %xmm7
|
||||
; AVX1-NEXT: vpsrlw $8, %xmm7, %xmm7
|
||||
; AVX1-NEXT: vpackuswb %xmm3, %xmm7, %xmm3
|
||||
; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm1 = xmm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
||||
; AVX1-NEXT: vmovdqa {{.*#+}} xmm7 = [1,128,64,32,16,8,4,2]
|
||||
; AVX1-NEXT: vpmullw %xmm7, %xmm1, %xmm1
|
||||
; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [255,255,255,255,255,255,255,255]
|
||||
; AVX1-NEXT: vpand %xmm2, %xmm1, %xmm1
|
||||
; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm2 = xmm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
||||
; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [1,128,64,32,16,8,4,2]
|
||||
; AVX1-NEXT: vpmullw %xmm3, %xmm2, %xmm2
|
||||
; AVX1-NEXT: vpsrlw $8, %xmm2, %xmm2
|
||||
; AVX1-NEXT: vpunpcklbw {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
|
||||
; AVX1-NEXT: vmovdqa {{.*#+}} xmm4 = [1,2,4,8,16,32,64,128]
|
||||
; AVX1-NEXT: vpmullw %xmm4, %xmm5, %xmm5
|
||||
; AVX1-NEXT: vpand %xmm2, %xmm5, %xmm5
|
||||
; AVX1-NEXT: vpackuswb %xmm1, %xmm5, %xmm1
|
||||
; AVX1-NEXT: vpor %xmm3, %xmm1, %xmm1
|
||||
; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm3 = xmm0[8],xmm8[8],xmm0[9],xmm8[9],xmm0[10],xmm8[10],xmm0[11],xmm8[11],xmm0[12],xmm8[12],xmm0[13],xmm8[13],xmm0[14],xmm8[14],xmm0[15],xmm8[15]
|
||||
; AVX1-NEXT: vpmullw %xmm3, %xmm9, %xmm3
|
||||
; AVX1-NEXT: vpsrlw $8, %xmm3, %xmm3
|
||||
; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm5 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
|
||||
; AVX1-NEXT: vpmullw %xmm6, %xmm5, %xmm6
|
||||
; AVX1-NEXT: vpsrlw $8, %xmm6, %xmm6
|
||||
; AVX1-NEXT: vpackuswb %xmm3, %xmm6, %xmm3
|
||||
; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm0 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
||||
; AVX1-NEXT: vpmullw %xmm7, %xmm0, %xmm0
|
||||
; AVX1-NEXT: vpand %xmm2, %xmm0, %xmm0
|
||||
; AVX1-NEXT: vpmullw %xmm4, %xmm5, %xmm4
|
||||
; AVX1-NEXT: vpand %xmm2, %xmm4, %xmm2
|
||||
; AVX1-NEXT: vpackuswb %xmm0, %xmm2, %xmm0
|
||||
; AVX1-NEXT: vpor %xmm3, %xmm0, %xmm0
|
||||
; AVX1-NEXT: vpmullw %xmm4, %xmm1, %xmm1
|
||||
; AVX1-NEXT: vpsrlw $8, %xmm1, %xmm1
|
||||
; AVX1-NEXT: vpackuswb %xmm2, %xmm1, %xmm1
|
||||
; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm2 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
||||
; AVX1-NEXT: vpmullw %xmm3, %xmm2, %xmm2
|
||||
; AVX1-NEXT: vpsrlw $8, %xmm2, %xmm2
|
||||
; AVX1-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
|
||||
; AVX1-NEXT: vpmullw %xmm4, %xmm0, %xmm0
|
||||
; AVX1-NEXT: vpsrlw $8, %xmm0, %xmm0
|
||||
; AVX1-NEXT: vpackuswb %xmm2, %xmm0, %xmm0
|
||||
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
|
||||
; AVX1-NEXT: retq
|
||||
;
|
||||
; AVX2-LABEL: constant_rotate_v32i8:
|
||||
; AVX2: # %bb.0:
|
||||
; AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1
|
||||
; AVX2-NEXT: vpunpckhbw {{.*#+}} ymm2 = ymm0[8],ymm1[8],ymm0[9],ymm1[9],ymm0[10],ymm1[10],ymm0[11],ymm1[11],ymm0[12],ymm1[12],ymm0[13],ymm1[13],ymm0[14],ymm1[14],ymm0[15],ymm1[15],ymm0[24],ymm1[24],ymm0[25],ymm1[25],ymm0[26],ymm1[26],ymm0[27],ymm1[27],ymm0[28],ymm1[28],ymm0[29],ymm1[29],ymm0[30],ymm1[30],ymm0[31],ymm1[31]
|
||||
; AVX2-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm2, %ymm2
|
||||
; AVX2-NEXT: vpsrlw $8, %ymm2, %ymm2
|
||||
; AVX2-NEXT: vpunpcklbw {{.*#+}} ymm1 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[4],ymm1[4],ymm0[5],ymm1[5],ymm0[6],ymm1[6],ymm0[7],ymm1[7],ymm0[16],ymm1[16],ymm0[17],ymm1[17],ymm0[18],ymm1[18],ymm0[19],ymm1[19],ymm0[20],ymm1[20],ymm0[21],ymm1[21],ymm0[22],ymm1[22],ymm0[23],ymm1[23]
|
||||
; AVX2-NEXT: vpunpckhbw {{.*#+}} ymm1 = ymm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
|
||||
; AVX2-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm1
|
||||
; AVX2-NEXT: vpsrlw $8, %ymm1, %ymm1
|
||||
; AVX2-NEXT: vpackuswb %ymm2, %ymm1, %ymm1
|
||||
; AVX2-NEXT: vpunpckhbw {{.*#+}} ymm2 = ymm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
|
||||
; AVX2-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm2, %ymm2
|
||||
; AVX2-NEXT: vmovdqa {{.*#+}} ymm3 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
|
||||
; AVX2-NEXT: vpand %ymm3, %ymm2, %ymm2
|
||||
; AVX2-NEXT: vpunpcklbw {{.*#+}} ymm0 = ymm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23]
|
||||
; AVX2-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
|
||||
; AVX2-NEXT: vpand %ymm3, %ymm0, %ymm0
|
||||
; AVX2-NEXT: vpackuswb %ymm2, %ymm0, %ymm0
|
||||
; AVX2-NEXT: vpor %ymm1, %ymm0, %ymm0
|
||||
; AVX2-NEXT: vpsrlw $8, %ymm0, %ymm0
|
||||
; AVX2-NEXT: vpackuswb %ymm1, %ymm0, %ymm0
|
||||
; AVX2-NEXT: retq
|
||||
;
|
||||
; AVX512F-LABEL: constant_rotate_v32i8:
|
||||
; AVX512F: # %bb.0:
|
||||
; AVX512F-NEXT: vpxor %xmm1, %xmm1, %xmm1
|
||||
; AVX512F-NEXT: vpunpckhbw {{.*#+}} ymm2 = ymm0[8],ymm1[8],ymm0[9],ymm1[9],ymm0[10],ymm1[10],ymm0[11],ymm1[11],ymm0[12],ymm1[12],ymm0[13],ymm1[13],ymm0[14],ymm1[14],ymm0[15],ymm1[15],ymm0[24],ymm1[24],ymm0[25],ymm1[25],ymm0[26],ymm1[26],ymm0[27],ymm1[27],ymm0[28],ymm1[28],ymm0[29],ymm1[29],ymm0[30],ymm1[30],ymm0[31],ymm1[31]
|
||||
; AVX512F-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm2, %ymm2
|
||||
; AVX512F-NEXT: vpsrlw $8, %ymm2, %ymm2
|
||||
; AVX512F-NEXT: vpunpcklbw {{.*#+}} ymm1 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[4],ymm1[4],ymm0[5],ymm1[5],ymm0[6],ymm1[6],ymm0[7],ymm1[7],ymm0[16],ymm1[16],ymm0[17],ymm1[17],ymm0[18],ymm1[18],ymm0[19],ymm1[19],ymm0[20],ymm1[20],ymm0[21],ymm1[21],ymm0[22],ymm1[22],ymm0[23],ymm1[23]
|
||||
; AVX512F-NEXT: vpunpckhbw {{.*#+}} ymm1 = ymm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
|
||||
; AVX512F-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm1
|
||||
; AVX512F-NEXT: vpsrlw $8, %ymm1, %ymm1
|
||||
; AVX512F-NEXT: vpackuswb %ymm2, %ymm1, %ymm1
|
||||
; AVX512F-NEXT: vpunpckhbw {{.*#+}} ymm2 = ymm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
|
||||
; AVX512F-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm2, %ymm2
|
||||
; AVX512F-NEXT: vmovdqa {{.*#+}} ymm3 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
|
||||
; AVX512F-NEXT: vpand %ymm3, %ymm2, %ymm2
|
||||
; AVX512F-NEXT: vpunpcklbw {{.*#+}} ymm0 = ymm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23]
|
||||
; AVX512F-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
|
||||
; AVX512F-NEXT: vpand %ymm3, %ymm0, %ymm0
|
||||
; AVX512F-NEXT: vpackuswb %ymm2, %ymm0, %ymm0
|
||||
; AVX512F-NEXT: vpor %ymm1, %ymm0, %ymm0
|
||||
; AVX512F-NEXT: vpsrlw $8, %ymm0, %ymm0
|
||||
; AVX512F-NEXT: vpackuswb %ymm1, %ymm0, %ymm0
|
||||
; AVX512F-NEXT: retq
|
||||
;
|
||||
; AVX512VL-LABEL: constant_rotate_v32i8:
|
||||
; AVX512VL: # %bb.0:
|
||||
; AVX512VL-NEXT: vpxor %xmm1, %xmm1, %xmm1
|
||||
; AVX512VL-NEXT: vpunpckhbw {{.*#+}} ymm2 = ymm0[8],ymm1[8],ymm0[9],ymm1[9],ymm0[10],ymm1[10],ymm0[11],ymm1[11],ymm0[12],ymm1[12],ymm0[13],ymm1[13],ymm0[14],ymm1[14],ymm0[15],ymm1[15],ymm0[24],ymm1[24],ymm0[25],ymm1[25],ymm0[26],ymm1[26],ymm0[27],ymm1[27],ymm0[28],ymm1[28],ymm0[29],ymm1[29],ymm0[30],ymm1[30],ymm0[31],ymm1[31]
|
||||
; AVX512VL-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm2, %ymm2
|
||||
; AVX512VL-NEXT: vpsrlw $8, %ymm2, %ymm2
|
||||
; AVX512VL-NEXT: vpunpcklbw {{.*#+}} ymm1 = ymm0[0],ymm1[0],ymm0[1],ymm1[1],ymm0[2],ymm1[2],ymm0[3],ymm1[3],ymm0[4],ymm1[4],ymm0[5],ymm1[5],ymm0[6],ymm1[6],ymm0[7],ymm1[7],ymm0[16],ymm1[16],ymm0[17],ymm1[17],ymm0[18],ymm1[18],ymm0[19],ymm1[19],ymm0[20],ymm1[20],ymm0[21],ymm1[21],ymm0[22],ymm1[22],ymm0[23],ymm1[23]
|
||||
; AVX512VL-NEXT: vpunpckhbw {{.*#+}} ymm1 = ymm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
|
||||
; AVX512VL-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm1
|
||||
; AVX512VL-NEXT: vpsrlw $8, %ymm1, %ymm1
|
||||
; AVX512VL-NEXT: vpackuswb %ymm2, %ymm1, %ymm1
|
||||
; AVX512VL-NEXT: vpunpckhbw {{.*#+}} ymm2 = ymm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
|
||||
; AVX512VL-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm2, %ymm2
|
||||
; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm3 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
|
||||
; AVX512VL-NEXT: vpand %ymm3, %ymm2, %ymm2
|
||||
; AVX512VL-NEXT: vpunpcklbw {{.*#+}} ymm0 = ymm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23]
|
||||
; AVX512VL-NEXT: vpmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0
|
||||
; AVX512VL-NEXT: vpand %ymm3, %ymm0, %ymm0
|
||||
; AVX512VL-NEXT: vpackuswb %ymm2, %ymm0, %ymm0
|
||||
; AVX512VL-NEXT: vpor %ymm1, %ymm0, %ymm0
|
||||
; AVX512VL-NEXT: vpsrlw $8, %ymm0, %ymm0
|
||||
; AVX512VL-NEXT: vpackuswb %ymm1, %ymm0, %ymm0
|
||||
; AVX512VL-NEXT: retq
|
||||
;
|
||||
; AVX512BW-LABEL: constant_rotate_v32i8:
|
||||
|
|
|
@ -570,177 +570,93 @@ define <64 x i8> @constant_rotate_v64i8(<64 x i8> %a) nounwind {
|
|||
; AVX512F-LABEL: constant_rotate_v64i8:
|
||||
; AVX512F: # %bb.0:
|
||||
; AVX512F-NEXT: vextracti64x4 $1, %zmm0, %ymm1
|
||||
; AVX512F-NEXT: vpxor %xmm2, %xmm2, %xmm2
|
||||
; AVX512F-NEXT: vpunpckhbw {{.*#+}} ymm3 = ymm1[8],ymm2[8],ymm1[9],ymm2[9],ymm1[10],ymm2[10],ymm1[11],ymm2[11],ymm1[12],ymm2[12],ymm1[13],ymm2[13],ymm1[14],ymm2[14],ymm1[15],ymm2[15],ymm1[24],ymm2[24],ymm1[25],ymm2[25],ymm1[26],ymm2[26],ymm1[27],ymm2[27],ymm1[28],ymm2[28],ymm1[29],ymm2[29],ymm1[30],ymm2[30],ymm1[31],ymm2[31]
|
||||
; AVX512F-NEXT: vbroadcasti128 {{.*#+}} ymm4 = [256,128,64,32,16,8,4,2,256,128,64,32,16,8,4,2]
|
||||
; AVX512F-NEXT: # ymm4 = mem[0,1,0,1]
|
||||
; AVX512F-NEXT: vpmullw %ymm4, %ymm3, %ymm3
|
||||
; AVX512F-NEXT: vpsrlw $8, %ymm3, %ymm3
|
||||
; AVX512F-NEXT: vpunpcklbw {{.*#+}} ymm5 = ymm1[0],ymm2[0],ymm1[1],ymm2[1],ymm1[2],ymm2[2],ymm1[3],ymm2[3],ymm1[4],ymm2[4],ymm1[5],ymm2[5],ymm1[6],ymm2[6],ymm1[7],ymm2[7],ymm1[16],ymm2[16],ymm1[17],ymm2[17],ymm1[18],ymm2[18],ymm1[19],ymm2[19],ymm1[20],ymm2[20],ymm1[21],ymm2[21],ymm1[22],ymm2[22],ymm1[23],ymm2[23]
|
||||
; AVX512F-NEXT: vbroadcasti128 {{.*#+}} ymm6 = [256,2,4,8,16,32,64,128,256,2,4,8,16,32,64,128]
|
||||
; AVX512F-NEXT: # ymm6 = mem[0,1,0,1]
|
||||
; AVX512F-NEXT: vpmullw %ymm6, %ymm5, %ymm5
|
||||
; AVX512F-NEXT: vpsrlw $8, %ymm5, %ymm5
|
||||
; AVX512F-NEXT: vpackuswb %ymm3, %ymm5, %ymm3
|
||||
; AVX512F-NEXT: vpunpckhbw {{.*#+}} ymm5 = ymm0[8],ymm2[8],ymm0[9],ymm2[9],ymm0[10],ymm2[10],ymm0[11],ymm2[11],ymm0[12],ymm2[12],ymm0[13],ymm2[13],ymm0[14],ymm2[14],ymm0[15],ymm2[15],ymm0[24],ymm2[24],ymm0[25],ymm2[25],ymm0[26],ymm2[26],ymm0[27],ymm2[27],ymm0[28],ymm2[28],ymm0[29],ymm2[29],ymm0[30],ymm2[30],ymm0[31],ymm2[31]
|
||||
; AVX512F-NEXT: vpmullw %ymm4, %ymm5, %ymm4
|
||||
; AVX512F-NEXT: vpsrlw $8, %ymm4, %ymm4
|
||||
; AVX512F-NEXT: vpunpcklbw {{.*#+}} ymm2 = ymm0[0],ymm2[0],ymm0[1],ymm2[1],ymm0[2],ymm2[2],ymm0[3],ymm2[3],ymm0[4],ymm2[4],ymm0[5],ymm2[5],ymm0[6],ymm2[6],ymm0[7],ymm2[7],ymm0[16],ymm2[16],ymm0[17],ymm2[17],ymm0[18],ymm2[18],ymm0[19],ymm2[19],ymm0[20],ymm2[20],ymm0[21],ymm2[21],ymm0[22],ymm2[22],ymm0[23],ymm2[23]
|
||||
; AVX512F-NEXT: vpmullw %ymm6, %ymm2, %ymm2
|
||||
; AVX512F-NEXT: vpunpckhbw {{.*#+}} ymm2 = ymm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
|
||||
; AVX512F-NEXT: vbroadcasti128 {{.*#+}} ymm3 = [1,128,64,32,16,8,4,2,1,128,64,32,16,8,4,2]
|
||||
; AVX512F-NEXT: # ymm3 = mem[0,1,0,1]
|
||||
; AVX512F-NEXT: vpmullw %ymm3, %ymm2, %ymm2
|
||||
; AVX512F-NEXT: vpsrlw $8, %ymm2, %ymm2
|
||||
; AVX512F-NEXT: vpackuswb %ymm4, %ymm2, %ymm2
|
||||
; AVX512F-NEXT: vinserti64x4 $1, %ymm3, %zmm2, %zmm2
|
||||
; AVX512F-NEXT: vpunpckhbw {{.*#+}} ymm3 = ymm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
|
||||
; AVX512F-NEXT: vbroadcasti128 {{.*#+}} ymm4 = [1,128,64,32,16,8,4,2,1,128,64,32,16,8,4,2]
|
||||
; AVX512F-NEXT: # ymm4 = mem[0,1,0,1]
|
||||
; AVX512F-NEXT: vpmullw %ymm4, %ymm3, %ymm3
|
||||
; AVX512F-NEXT: vmovdqa {{.*#+}} ymm5 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
|
||||
; AVX512F-NEXT: vpand %ymm5, %ymm3, %ymm3
|
||||
; AVX512F-NEXT: vpunpcklbw {{.*#+}} ymm1 = ymm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23]
|
||||
; AVX512F-NEXT: vbroadcasti128 {{.*#+}} ymm6 = [1,2,4,8,16,32,64,128,1,2,4,8,16,32,64,128]
|
||||
; AVX512F-NEXT: # ymm6 = mem[0,1,0,1]
|
||||
; AVX512F-NEXT: vpmullw %ymm6, %ymm1, %ymm1
|
||||
; AVX512F-NEXT: vpand %ymm5, %ymm1, %ymm1
|
||||
; AVX512F-NEXT: vpackuswb %ymm3, %ymm1, %ymm1
|
||||
; AVX512F-NEXT: vpunpckhbw {{.*#+}} ymm3 = ymm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
|
||||
; AVX512F-NEXT: vpmullw %ymm4, %ymm3, %ymm3
|
||||
; AVX512F-NEXT: vpand %ymm5, %ymm3, %ymm3
|
||||
; AVX512F-NEXT: vbroadcasti128 {{.*#+}} ymm4 = [1,2,4,8,16,32,64,128,1,2,4,8,16,32,64,128]
|
||||
; AVX512F-NEXT: # ymm4 = mem[0,1,0,1]
|
||||
; AVX512F-NEXT: vpmullw %ymm4, %ymm1, %ymm1
|
||||
; AVX512F-NEXT: vpsrlw $8, %ymm1, %ymm1
|
||||
; AVX512F-NEXT: vpackuswb %ymm2, %ymm1, %ymm1
|
||||
; AVX512F-NEXT: vpunpckhbw {{.*#+}} ymm2 = ymm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
|
||||
; AVX512F-NEXT: vpmullw %ymm3, %ymm2, %ymm2
|
||||
; AVX512F-NEXT: vpsrlw $8, %ymm2, %ymm2
|
||||
; AVX512F-NEXT: vpunpcklbw {{.*#+}} ymm0 = ymm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23]
|
||||
; AVX512F-NEXT: vpmullw %ymm6, %ymm0, %ymm0
|
||||
; AVX512F-NEXT: vpand %ymm5, %ymm0, %ymm0
|
||||
; AVX512F-NEXT: vpackuswb %ymm3, %ymm0, %ymm0
|
||||
; AVX512F-NEXT: vpmullw %ymm4, %ymm0, %ymm0
|
||||
; AVX512F-NEXT: vpsrlw $8, %ymm0, %ymm0
|
||||
; AVX512F-NEXT: vpackuswb %ymm2, %ymm0, %ymm0
|
||||
; AVX512F-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0
|
||||
; AVX512F-NEXT: vporq %zmm2, %zmm0, %zmm0
|
||||
; AVX512F-NEXT: retq
|
||||
;
|
||||
; AVX512VL-LABEL: constant_rotate_v64i8:
|
||||
; AVX512VL: # %bb.0:
|
||||
; AVX512VL-NEXT: vextracti64x4 $1, %zmm0, %ymm1
|
||||
; AVX512VL-NEXT: vpxor %xmm2, %xmm2, %xmm2
|
||||
; AVX512VL-NEXT: vpunpckhbw {{.*#+}} ymm3 = ymm1[8],ymm2[8],ymm1[9],ymm2[9],ymm1[10],ymm2[10],ymm1[11],ymm2[11],ymm1[12],ymm2[12],ymm1[13],ymm2[13],ymm1[14],ymm2[14],ymm1[15],ymm2[15],ymm1[24],ymm2[24],ymm1[25],ymm2[25],ymm1[26],ymm2[26],ymm1[27],ymm2[27],ymm1[28],ymm2[28],ymm1[29],ymm2[29],ymm1[30],ymm2[30],ymm1[31],ymm2[31]
|
||||
; AVX512VL-NEXT: vbroadcasti128 {{.*#+}} ymm4 = [256,128,64,32,16,8,4,2,256,128,64,32,16,8,4,2]
|
||||
; AVX512VL-NEXT: # ymm4 = mem[0,1,0,1]
|
||||
; AVX512VL-NEXT: vpmullw %ymm4, %ymm3, %ymm3
|
||||
; AVX512VL-NEXT: vpsrlw $8, %ymm3, %ymm3
|
||||
; AVX512VL-NEXT: vpunpcklbw {{.*#+}} ymm5 = ymm1[0],ymm2[0],ymm1[1],ymm2[1],ymm1[2],ymm2[2],ymm1[3],ymm2[3],ymm1[4],ymm2[4],ymm1[5],ymm2[5],ymm1[6],ymm2[6],ymm1[7],ymm2[7],ymm1[16],ymm2[16],ymm1[17],ymm2[17],ymm1[18],ymm2[18],ymm1[19],ymm2[19],ymm1[20],ymm2[20],ymm1[21],ymm2[21],ymm1[22],ymm2[22],ymm1[23],ymm2[23]
|
||||
; AVX512VL-NEXT: vbroadcasti128 {{.*#+}} ymm6 = [256,2,4,8,16,32,64,128,256,2,4,8,16,32,64,128]
|
||||
; AVX512VL-NEXT: # ymm6 = mem[0,1,0,1]
|
||||
; AVX512VL-NEXT: vpmullw %ymm6, %ymm5, %ymm5
|
||||
; AVX512VL-NEXT: vpsrlw $8, %ymm5, %ymm5
|
||||
; AVX512VL-NEXT: vpackuswb %ymm3, %ymm5, %ymm3
|
||||
; AVX512VL-NEXT: vpunpckhbw {{.*#+}} ymm5 = ymm0[8],ymm2[8],ymm0[9],ymm2[9],ymm0[10],ymm2[10],ymm0[11],ymm2[11],ymm0[12],ymm2[12],ymm0[13],ymm2[13],ymm0[14],ymm2[14],ymm0[15],ymm2[15],ymm0[24],ymm2[24],ymm0[25],ymm2[25],ymm0[26],ymm2[26],ymm0[27],ymm2[27],ymm0[28],ymm2[28],ymm0[29],ymm2[29],ymm0[30],ymm2[30],ymm0[31],ymm2[31]
|
||||
; AVX512VL-NEXT: vpmullw %ymm4, %ymm5, %ymm4
|
||||
; AVX512VL-NEXT: vpsrlw $8, %ymm4, %ymm4
|
||||
; AVX512VL-NEXT: vpunpcklbw {{.*#+}} ymm2 = ymm0[0],ymm2[0],ymm0[1],ymm2[1],ymm0[2],ymm2[2],ymm0[3],ymm2[3],ymm0[4],ymm2[4],ymm0[5],ymm2[5],ymm0[6],ymm2[6],ymm0[7],ymm2[7],ymm0[16],ymm2[16],ymm0[17],ymm2[17],ymm0[18],ymm2[18],ymm0[19],ymm2[19],ymm0[20],ymm2[20],ymm0[21],ymm2[21],ymm0[22],ymm2[22],ymm0[23],ymm2[23]
|
||||
; AVX512VL-NEXT: vpmullw %ymm6, %ymm2, %ymm2
|
||||
; AVX512VL-NEXT: vpunpckhbw {{.*#+}} ymm2 = ymm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
|
||||
; AVX512VL-NEXT: vbroadcasti128 {{.*#+}} ymm3 = [1,128,64,32,16,8,4,2,1,128,64,32,16,8,4,2]
|
||||
; AVX512VL-NEXT: # ymm3 = mem[0,1,0,1]
|
||||
; AVX512VL-NEXT: vpmullw %ymm3, %ymm2, %ymm2
|
||||
; AVX512VL-NEXT: vpsrlw $8, %ymm2, %ymm2
|
||||
; AVX512VL-NEXT: vpackuswb %ymm4, %ymm2, %ymm2
|
||||
; AVX512VL-NEXT: vinserti64x4 $1, %ymm3, %zmm2, %zmm2
|
||||
; AVX512VL-NEXT: vpunpckhbw {{.*#+}} ymm3 = ymm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
|
||||
; AVX512VL-NEXT: vbroadcasti128 {{.*#+}} ymm4 = [1,128,64,32,16,8,4,2,1,128,64,32,16,8,4,2]
|
||||
; AVX512VL-NEXT: # ymm4 = mem[0,1,0,1]
|
||||
; AVX512VL-NEXT: vpmullw %ymm4, %ymm3, %ymm3
|
||||
; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm5 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
|
||||
; AVX512VL-NEXT: vpand %ymm5, %ymm3, %ymm3
|
||||
; AVX512VL-NEXT: vpunpcklbw {{.*#+}} ymm1 = ymm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23]
|
||||
; AVX512VL-NEXT: vbroadcasti128 {{.*#+}} ymm6 = [1,2,4,8,16,32,64,128,1,2,4,8,16,32,64,128]
|
||||
; AVX512VL-NEXT: # ymm6 = mem[0,1,0,1]
|
||||
; AVX512VL-NEXT: vpmullw %ymm6, %ymm1, %ymm1
|
||||
; AVX512VL-NEXT: vpand %ymm5, %ymm1, %ymm1
|
||||
; AVX512VL-NEXT: vpackuswb %ymm3, %ymm1, %ymm1
|
||||
; AVX512VL-NEXT: vpunpckhbw {{.*#+}} ymm3 = ymm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
|
||||
; AVX512VL-NEXT: vpmullw %ymm4, %ymm3, %ymm3
|
||||
; AVX512VL-NEXT: vpand %ymm5, %ymm3, %ymm3
|
||||
; AVX512VL-NEXT: vbroadcasti128 {{.*#+}} ymm4 = [1,2,4,8,16,32,64,128,1,2,4,8,16,32,64,128]
|
||||
; AVX512VL-NEXT: # ymm4 = mem[0,1,0,1]
|
||||
; AVX512VL-NEXT: vpmullw %ymm4, %ymm1, %ymm1
|
||||
; AVX512VL-NEXT: vpsrlw $8, %ymm1, %ymm1
|
||||
; AVX512VL-NEXT: vpackuswb %ymm2, %ymm1, %ymm1
|
||||
; AVX512VL-NEXT: vpunpckhbw {{.*#+}} ymm2 = ymm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
|
||||
; AVX512VL-NEXT: vpmullw %ymm3, %ymm2, %ymm2
|
||||
; AVX512VL-NEXT: vpsrlw $8, %ymm2, %ymm2
|
||||
; AVX512VL-NEXT: vpunpcklbw {{.*#+}} ymm0 = ymm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23]
|
||||
; AVX512VL-NEXT: vpmullw %ymm6, %ymm0, %ymm0
|
||||
; AVX512VL-NEXT: vpand %ymm5, %ymm0, %ymm0
|
||||
; AVX512VL-NEXT: vpackuswb %ymm3, %ymm0, %ymm0
|
||||
; AVX512VL-NEXT: vpmullw %ymm4, %ymm0, %ymm0
|
||||
; AVX512VL-NEXT: vpsrlw $8, %ymm0, %ymm0
|
||||
; AVX512VL-NEXT: vpackuswb %ymm2, %ymm0, %ymm0
|
||||
; AVX512VL-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0
|
||||
; AVX512VL-NEXT: vporq %zmm2, %zmm0, %zmm0
|
||||
; AVX512VL-NEXT: retq
|
||||
;
|
||||
; AVX512BW-LABEL: constant_rotate_v64i8:
|
||||
; AVX512BW: # %bb.0:
|
||||
; AVX512BW-NEXT: vpxor %xmm1, %xmm1, %xmm1
|
||||
; AVX512BW-NEXT: vpunpckhbw {{.*#+}} zmm2 = zmm0[8],zmm1[8],zmm0[9],zmm1[9],zmm0[10],zmm1[10],zmm0[11],zmm1[11],zmm0[12],zmm1[12],zmm0[13],zmm1[13],zmm0[14],zmm1[14],zmm0[15],zmm1[15],zmm0[24],zmm1[24],zmm0[25],zmm1[25],zmm0[26],zmm1[26],zmm0[27],zmm1[27],zmm0[28],zmm1[28],zmm0[29],zmm1[29],zmm0[30],zmm1[30],zmm0[31],zmm1[31],zmm0[40],zmm1[40],zmm0[41],zmm1[41],zmm0[42],zmm1[42],zmm0[43],zmm1[43],zmm0[44],zmm1[44],zmm0[45],zmm1[45],zmm0[46],zmm1[46],zmm0[47],zmm1[47],zmm0[56],zmm1[56],zmm0[57],zmm1[57],zmm0[58],zmm1[58],zmm0[59],zmm1[59],zmm0[60],zmm1[60],zmm0[61],zmm1[61],zmm0[62],zmm1[62],zmm0[63],zmm1[63]
|
||||
; AVX512BW-NEXT: vpsllvw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm2, %zmm2
|
||||
; AVX512BW-NEXT: vpsrlw $8, %zmm2, %zmm2
|
||||
; AVX512BW-NEXT: vpunpcklbw {{.*#+}} zmm1 = zmm0[0],zmm1[0],zmm0[1],zmm1[1],zmm0[2],zmm1[2],zmm0[3],zmm1[3],zmm0[4],zmm1[4],zmm0[5],zmm1[5],zmm0[6],zmm1[6],zmm0[7],zmm1[7],zmm0[16],zmm1[16],zmm0[17],zmm1[17],zmm0[18],zmm1[18],zmm0[19],zmm1[19],zmm0[20],zmm1[20],zmm0[21],zmm1[21],zmm0[22],zmm1[22],zmm0[23],zmm1[23],zmm0[32],zmm1[32],zmm0[33],zmm1[33],zmm0[34],zmm1[34],zmm0[35],zmm1[35],zmm0[36],zmm1[36],zmm0[37],zmm1[37],zmm0[38],zmm1[38],zmm0[39],zmm1[39],zmm0[48],zmm1[48],zmm0[49],zmm1[49],zmm0[50],zmm1[50],zmm0[51],zmm1[51],zmm0[52],zmm1[52],zmm0[53],zmm1[53],zmm0[54],zmm1[54],zmm0[55],zmm1[55]
|
||||
; AVX512BW-NEXT: vpunpckhbw {{.*#+}} zmm1 = zmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31,40,40,41,41,42,42,43,43,44,44,45,45,46,46,47,47,56,56,57,57,58,58,59,59,60,60,61,61,62,62,63,63]
|
||||
; AVX512BW-NEXT: vpsllvw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm1, %zmm1
|
||||
; AVX512BW-NEXT: vpsrlw $8, %zmm1, %zmm1
|
||||
; AVX512BW-NEXT: vpackuswb %zmm2, %zmm1, %zmm1
|
||||
; AVX512BW-NEXT: vpunpckhbw {{.*#+}} zmm2 = zmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31,40,40,41,41,42,42,43,43,44,44,45,45,46,46,47,47,56,56,57,57,58,58,59,59,60,60,61,61,62,62,63,63]
|
||||
; AVX512BW-NEXT: vpsllvw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm2, %zmm2
|
||||
; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm3 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
|
||||
; AVX512BW-NEXT: vpandq %zmm3, %zmm2, %zmm2
|
||||
; AVX512BW-NEXT: vpunpcklbw {{.*#+}} zmm0 = zmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23,32,32,33,33,34,34,35,35,36,36,37,37,38,38,39,39,48,48,49,49,50,50,51,51,52,52,53,53,54,54,55,55]
|
||||
; AVX512BW-NEXT: vpsllvw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm0
|
||||
; AVX512BW-NEXT: vpandq %zmm3, %zmm0, %zmm0
|
||||
; AVX512BW-NEXT: vpackuswb %zmm2, %zmm0, %zmm0
|
||||
; AVX512BW-NEXT: vporq %zmm1, %zmm0, %zmm0
|
||||
; AVX512BW-NEXT: vpsrlw $8, %zmm0, %zmm0
|
||||
; AVX512BW-NEXT: vpackuswb %zmm1, %zmm0, %zmm0
|
||||
; AVX512BW-NEXT: retq
|
||||
;
|
||||
; AVX512VLBW-LABEL: constant_rotate_v64i8:
|
||||
; AVX512VLBW: # %bb.0:
|
||||
; AVX512VLBW-NEXT: vpxor %xmm1, %xmm1, %xmm1
|
||||
; AVX512VLBW-NEXT: vpunpckhbw {{.*#+}} zmm2 = zmm0[8],zmm1[8],zmm0[9],zmm1[9],zmm0[10],zmm1[10],zmm0[11],zmm1[11],zmm0[12],zmm1[12],zmm0[13],zmm1[13],zmm0[14],zmm1[14],zmm0[15],zmm1[15],zmm0[24],zmm1[24],zmm0[25],zmm1[25],zmm0[26],zmm1[26],zmm0[27],zmm1[27],zmm0[28],zmm1[28],zmm0[29],zmm1[29],zmm0[30],zmm1[30],zmm0[31],zmm1[31],zmm0[40],zmm1[40],zmm0[41],zmm1[41],zmm0[42],zmm1[42],zmm0[43],zmm1[43],zmm0[44],zmm1[44],zmm0[45],zmm1[45],zmm0[46],zmm1[46],zmm0[47],zmm1[47],zmm0[56],zmm1[56],zmm0[57],zmm1[57],zmm0[58],zmm1[58],zmm0[59],zmm1[59],zmm0[60],zmm1[60],zmm0[61],zmm1[61],zmm0[62],zmm1[62],zmm0[63],zmm1[63]
|
||||
; AVX512VLBW-NEXT: vpsllvw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm2, %zmm2
|
||||
; AVX512VLBW-NEXT: vpsrlw $8, %zmm2, %zmm2
|
||||
; AVX512VLBW-NEXT: vpunpcklbw {{.*#+}} zmm1 = zmm0[0],zmm1[0],zmm0[1],zmm1[1],zmm0[2],zmm1[2],zmm0[3],zmm1[3],zmm0[4],zmm1[4],zmm0[5],zmm1[5],zmm0[6],zmm1[6],zmm0[7],zmm1[7],zmm0[16],zmm1[16],zmm0[17],zmm1[17],zmm0[18],zmm1[18],zmm0[19],zmm1[19],zmm0[20],zmm1[20],zmm0[21],zmm1[21],zmm0[22],zmm1[22],zmm0[23],zmm1[23],zmm0[32],zmm1[32],zmm0[33],zmm1[33],zmm0[34],zmm1[34],zmm0[35],zmm1[35],zmm0[36],zmm1[36],zmm0[37],zmm1[37],zmm0[38],zmm1[38],zmm0[39],zmm1[39],zmm0[48],zmm1[48],zmm0[49],zmm1[49],zmm0[50],zmm1[50],zmm0[51],zmm1[51],zmm0[52],zmm1[52],zmm0[53],zmm1[53],zmm0[54],zmm1[54],zmm0[55],zmm1[55]
|
||||
; AVX512VLBW-NEXT: vpunpckhbw {{.*#+}} zmm1 = zmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31,40,40,41,41,42,42,43,43,44,44,45,45,46,46,47,47,56,56,57,57,58,58,59,59,60,60,61,61,62,62,63,63]
|
||||
; AVX512VLBW-NEXT: vpsllvw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm1, %zmm1
|
||||
; AVX512VLBW-NEXT: vpsrlw $8, %zmm1, %zmm1
|
||||
; AVX512VLBW-NEXT: vpackuswb %zmm2, %zmm1, %zmm1
|
||||
; AVX512VLBW-NEXT: vpunpckhbw {{.*#+}} zmm2 = zmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31,40,40,41,41,42,42,43,43,44,44,45,45,46,46,47,47,56,56,57,57,58,58,59,59,60,60,61,61,62,62,63,63]
|
||||
; AVX512VLBW-NEXT: vpsllvw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm2, %zmm2
|
||||
; AVX512VLBW-NEXT: vmovdqa64 {{.*#+}} zmm3 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
|
||||
; AVX512VLBW-NEXT: vpandq %zmm3, %zmm2, %zmm2
|
||||
; AVX512VLBW-NEXT: vpunpcklbw {{.*#+}} zmm0 = zmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23,32,32,33,33,34,34,35,35,36,36,37,37,38,38,39,39,48,48,49,49,50,50,51,51,52,52,53,53,54,54,55,55]
|
||||
; AVX512VLBW-NEXT: vpsllvw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm0
|
||||
; AVX512VLBW-NEXT: vpandq %zmm3, %zmm0, %zmm0
|
||||
; AVX512VLBW-NEXT: vpackuswb %zmm2, %zmm0, %zmm0
|
||||
; AVX512VLBW-NEXT: vporq %zmm1, %zmm0, %zmm0
|
||||
; AVX512VLBW-NEXT: vpsrlw $8, %zmm0, %zmm0
|
||||
; AVX512VLBW-NEXT: vpackuswb %zmm1, %zmm0, %zmm0
|
||||
; AVX512VLBW-NEXT: retq
|
||||
;
|
||||
; AVX512VBMI2-LABEL: constant_rotate_v64i8:
|
||||
; AVX512VBMI2: # %bb.0:
|
||||
; AVX512VBMI2-NEXT: vpxor %xmm1, %xmm1, %xmm1
|
||||
; AVX512VBMI2-NEXT: vpunpckhbw {{.*#+}} zmm2 = zmm0[8],zmm1[8],zmm0[9],zmm1[9],zmm0[10],zmm1[10],zmm0[11],zmm1[11],zmm0[12],zmm1[12],zmm0[13],zmm1[13],zmm0[14],zmm1[14],zmm0[15],zmm1[15],zmm0[24],zmm1[24],zmm0[25],zmm1[25],zmm0[26],zmm1[26],zmm0[27],zmm1[27],zmm0[28],zmm1[28],zmm0[29],zmm1[29],zmm0[30],zmm1[30],zmm0[31],zmm1[31],zmm0[40],zmm1[40],zmm0[41],zmm1[41],zmm0[42],zmm1[42],zmm0[43],zmm1[43],zmm0[44],zmm1[44],zmm0[45],zmm1[45],zmm0[46],zmm1[46],zmm0[47],zmm1[47],zmm0[56],zmm1[56],zmm0[57],zmm1[57],zmm0[58],zmm1[58],zmm0[59],zmm1[59],zmm0[60],zmm1[60],zmm0[61],zmm1[61],zmm0[62],zmm1[62],zmm0[63],zmm1[63]
|
||||
; AVX512VBMI2-NEXT: vpsllvw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm2, %zmm2
|
||||
; AVX512VBMI2-NEXT: vpsrlw $8, %zmm2, %zmm2
|
||||
; AVX512VBMI2-NEXT: vpunpcklbw {{.*#+}} zmm1 = zmm0[0],zmm1[0],zmm0[1],zmm1[1],zmm0[2],zmm1[2],zmm0[3],zmm1[3],zmm0[4],zmm1[4],zmm0[5],zmm1[5],zmm0[6],zmm1[6],zmm0[7],zmm1[7],zmm0[16],zmm1[16],zmm0[17],zmm1[17],zmm0[18],zmm1[18],zmm0[19],zmm1[19],zmm0[20],zmm1[20],zmm0[21],zmm1[21],zmm0[22],zmm1[22],zmm0[23],zmm1[23],zmm0[32],zmm1[32],zmm0[33],zmm1[33],zmm0[34],zmm1[34],zmm0[35],zmm1[35],zmm0[36],zmm1[36],zmm0[37],zmm1[37],zmm0[38],zmm1[38],zmm0[39],zmm1[39],zmm0[48],zmm1[48],zmm0[49],zmm1[49],zmm0[50],zmm1[50],zmm0[51],zmm1[51],zmm0[52],zmm1[52],zmm0[53],zmm1[53],zmm0[54],zmm1[54],zmm0[55],zmm1[55]
|
||||
; AVX512VBMI2-NEXT: vpunpckhbw {{.*#+}} zmm1 = zmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31,40,40,41,41,42,42,43,43,44,44,45,45,46,46,47,47,56,56,57,57,58,58,59,59,60,60,61,61,62,62,63,63]
|
||||
; AVX512VBMI2-NEXT: vpsllvw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm1, %zmm1
|
||||
; AVX512VBMI2-NEXT: vpsrlw $8, %zmm1, %zmm1
|
||||
; AVX512VBMI2-NEXT: vpackuswb %zmm2, %zmm1, %zmm1
|
||||
; AVX512VBMI2-NEXT: vpunpckhbw {{.*#+}} zmm2 = zmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31,40,40,41,41,42,42,43,43,44,44,45,45,46,46,47,47,56,56,57,57,58,58,59,59,60,60,61,61,62,62,63,63]
|
||||
; AVX512VBMI2-NEXT: vpsllvw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm2, %zmm2
|
||||
; AVX512VBMI2-NEXT: vmovdqa64 {{.*#+}} zmm3 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
|
||||
; AVX512VBMI2-NEXT: vpandq %zmm3, %zmm2, %zmm2
|
||||
; AVX512VBMI2-NEXT: vpunpcklbw {{.*#+}} zmm0 = zmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23,32,32,33,33,34,34,35,35,36,36,37,37,38,38,39,39,48,48,49,49,50,50,51,51,52,52,53,53,54,54,55,55]
|
||||
; AVX512VBMI2-NEXT: vpsllvw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm0
|
||||
; AVX512VBMI2-NEXT: vpandq %zmm3, %zmm0, %zmm0
|
||||
; AVX512VBMI2-NEXT: vpackuswb %zmm2, %zmm0, %zmm0
|
||||
; AVX512VBMI2-NEXT: vporq %zmm1, %zmm0, %zmm0
|
||||
; AVX512VBMI2-NEXT: vpsrlw $8, %zmm0, %zmm0
|
||||
; AVX512VBMI2-NEXT: vpackuswb %zmm1, %zmm0, %zmm0
|
||||
; AVX512VBMI2-NEXT: retq
|
||||
;
|
||||
; AVX512VLVBMI2-LABEL: constant_rotate_v64i8:
|
||||
; AVX512VLVBMI2: # %bb.0:
|
||||
; AVX512VLVBMI2-NEXT: vpxor %xmm1, %xmm1, %xmm1
|
||||
; AVX512VLVBMI2-NEXT: vpunpckhbw {{.*#+}} zmm2 = zmm0[8],zmm1[8],zmm0[9],zmm1[9],zmm0[10],zmm1[10],zmm0[11],zmm1[11],zmm0[12],zmm1[12],zmm0[13],zmm1[13],zmm0[14],zmm1[14],zmm0[15],zmm1[15],zmm0[24],zmm1[24],zmm0[25],zmm1[25],zmm0[26],zmm1[26],zmm0[27],zmm1[27],zmm0[28],zmm1[28],zmm0[29],zmm1[29],zmm0[30],zmm1[30],zmm0[31],zmm1[31],zmm0[40],zmm1[40],zmm0[41],zmm1[41],zmm0[42],zmm1[42],zmm0[43],zmm1[43],zmm0[44],zmm1[44],zmm0[45],zmm1[45],zmm0[46],zmm1[46],zmm0[47],zmm1[47],zmm0[56],zmm1[56],zmm0[57],zmm1[57],zmm0[58],zmm1[58],zmm0[59],zmm1[59],zmm0[60],zmm1[60],zmm0[61],zmm1[61],zmm0[62],zmm1[62],zmm0[63],zmm1[63]
|
||||
; AVX512VLVBMI2-NEXT: vpsllvw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm2, %zmm2
|
||||
; AVX512VLVBMI2-NEXT: vpsrlw $8, %zmm2, %zmm2
|
||||
; AVX512VLVBMI2-NEXT: vpunpcklbw {{.*#+}} zmm1 = zmm0[0],zmm1[0],zmm0[1],zmm1[1],zmm0[2],zmm1[2],zmm0[3],zmm1[3],zmm0[4],zmm1[4],zmm0[5],zmm1[5],zmm0[6],zmm1[6],zmm0[7],zmm1[7],zmm0[16],zmm1[16],zmm0[17],zmm1[17],zmm0[18],zmm1[18],zmm0[19],zmm1[19],zmm0[20],zmm1[20],zmm0[21],zmm1[21],zmm0[22],zmm1[22],zmm0[23],zmm1[23],zmm0[32],zmm1[32],zmm0[33],zmm1[33],zmm0[34],zmm1[34],zmm0[35],zmm1[35],zmm0[36],zmm1[36],zmm0[37],zmm1[37],zmm0[38],zmm1[38],zmm0[39],zmm1[39],zmm0[48],zmm1[48],zmm0[49],zmm1[49],zmm0[50],zmm1[50],zmm0[51],zmm1[51],zmm0[52],zmm1[52],zmm0[53],zmm1[53],zmm0[54],zmm1[54],zmm0[55],zmm1[55]
|
||||
; AVX512VLVBMI2-NEXT: vpunpckhbw {{.*#+}} zmm1 = zmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31,40,40,41,41,42,42,43,43,44,44,45,45,46,46,47,47,56,56,57,57,58,58,59,59,60,60,61,61,62,62,63,63]
|
||||
; AVX512VLVBMI2-NEXT: vpsllvw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm1, %zmm1
|
||||
; AVX512VLVBMI2-NEXT: vpsrlw $8, %zmm1, %zmm1
|
||||
; AVX512VLVBMI2-NEXT: vpackuswb %zmm2, %zmm1, %zmm1
|
||||
; AVX512VLVBMI2-NEXT: vpunpckhbw {{.*#+}} zmm2 = zmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31,40,40,41,41,42,42,43,43,44,44,45,45,46,46,47,47,56,56,57,57,58,58,59,59,60,60,61,61,62,62,63,63]
|
||||
; AVX512VLVBMI2-NEXT: vpsllvw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm2, %zmm2
|
||||
; AVX512VLVBMI2-NEXT: vmovdqa64 {{.*#+}} zmm3 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
|
||||
; AVX512VLVBMI2-NEXT: vpandq %zmm3, %zmm2, %zmm2
|
||||
; AVX512VLVBMI2-NEXT: vpunpcklbw {{.*#+}} zmm0 = zmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23,32,32,33,33,34,34,35,35,36,36,37,37,38,38,39,39,48,48,49,49,50,50,51,51,52,52,53,53,54,54,55,55]
|
||||
; AVX512VLVBMI2-NEXT: vpsllvw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm0
|
||||
; AVX512VLVBMI2-NEXT: vpandq %zmm3, %zmm0, %zmm0
|
||||
; AVX512VLVBMI2-NEXT: vpackuswb %zmm2, %zmm0, %zmm0
|
||||
; AVX512VLVBMI2-NEXT: vporq %zmm1, %zmm0, %zmm0
|
||||
; AVX512VLVBMI2-NEXT: vpsrlw $8, %zmm0, %zmm0
|
||||
; AVX512VLVBMI2-NEXT: vpackuswb %zmm1, %zmm0, %zmm0
|
||||
; AVX512VLVBMI2-NEXT: retq
|
||||
%shl = shl <64 x i8> %a, <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 7, i8 6, i8 5, i8 4, i8 3, i8 2, i8 1, i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 7, i8 6, i8 5, i8 4, i8 3, i8 2, i8 1, i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 7, i8 6, i8 5, i8 4, i8 3, i8 2, i8 1, i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 7, i8 6, i8 5, i8 4, i8 3, i8 2, i8 1>
|
||||
%lshr = lshr <64 x i8> %a, <i8 8, i8 7, i8 6, i8 5, i8 4, i8 3, i8 2, i8 1, i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 7, i8 6, i8 5, i8 4, i8 3, i8 2, i8 1, i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 7, i8 6, i8 5, i8 4, i8 3, i8 2, i8 1, i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 7, i8 6, i8 5, i8 4, i8 3, i8 2, i8 1, i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7>
|
||||
|
|
Loading…
Reference in New Issue