forked from OSchip/llvm-project
[X86][SSE] Canonicalize OR(AND(X,C),AND(Y,~C)) -> OR(AND(X,C),ANDNP(C,Y))
For constant bit select patterns, replace one AND with a ANDNP, allowing us to reuse the constant mask. Only do this if the mask has multiple uses (to avoid losing load folding) or if we have XOP as its VPCMOV can handle most folding commutations. This also requires computeKnownBitsForTargetNode support for X86ISD::ANDNP and X86ISD::FOR to prevent regressions in fabs/fcopysign patterns. Differential Revision: https://reviews.llvm.org/D55935 llvm-svn: 351819
This commit is contained in:
parent
6202a3ad67
commit
933673d878
|
@ -30233,6 +30233,27 @@ void X86TargetLowering::computeKnownBitsForTargetNode(const SDValue Op,
|
|||
Known = Known.trunc(BitWidth);
|
||||
break;
|
||||
}
|
||||
case X86ISD::ANDNP: {
|
||||
KnownBits Known2;
|
||||
Known = DAG.computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
|
||||
Known2 = DAG.computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
|
||||
|
||||
// ANDNP = (~X & Y);
|
||||
Known.One &= Known2.Zero;
|
||||
Known.Zero |= Known2.One;
|
||||
break;
|
||||
}
|
||||
case X86ISD::FOR: {
|
||||
KnownBits Known2;
|
||||
Known = DAG.computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
|
||||
Known2 = DAG.computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
|
||||
|
||||
// Output known-0 bits are only known if clear in both the LHS & RHS.
|
||||
Known.Zero &= Known2.Zero;
|
||||
// Output known-1 are known to be set if set in either the LHS | RHS.
|
||||
Known.One |= Known2.One;
|
||||
break;
|
||||
}
|
||||
case X86ISD::CMOV: {
|
||||
Known = DAG.computeKnownBits(Op.getOperand(1), Depth+1);
|
||||
// If we don't know any bits, early out.
|
||||
|
@ -36519,6 +36540,52 @@ static SDValue combineAnd(SDNode *N, SelectionDAG &DAG,
|
|||
return SDValue();
|
||||
}
|
||||
|
||||
// Canonicalize OR(AND(X,C),AND(Y,~C)) -> OR(AND(X,C),ANDNP(C,Y))
|
||||
static SDValue canonicalizeBitSelect(SDNode *N, SelectionDAG &DAG,
|
||||
const X86Subtarget &Subtarget) {
|
||||
assert(N->getOpcode() == ISD::OR && "Unexpected Opcode");
|
||||
|
||||
EVT VT = N->getValueType(0);
|
||||
if (!VT.isVector())
|
||||
return SDValue();
|
||||
|
||||
SDValue N0 = peekThroughBitcasts(N->getOperand(0));
|
||||
SDValue N1 = peekThroughBitcasts(N->getOperand(1));
|
||||
if (N0.getOpcode() != ISD::AND || N1.getOpcode() != ISD::AND)
|
||||
return SDValue();
|
||||
|
||||
// On XOP we'll lower to PCMOV so accept one use, otherwise only
|
||||
// do this if either mask has multiple uses already.
|
||||
if (!(Subtarget.hasXOP() || !N0.getOperand(1).hasOneUse() ||
|
||||
!N1.getOperand(1).hasOneUse()))
|
||||
return SDValue();
|
||||
|
||||
// Attempt to extract constant byte masks.
|
||||
APInt UndefElts0, UndefElts1;
|
||||
SmallVector<APInt, 32> EltBits0, EltBits1;
|
||||
if (!getTargetConstantBitsFromNode(N0.getOperand(1), 8, UndefElts0, EltBits0,
|
||||
false, false))
|
||||
return SDValue();
|
||||
if (!getTargetConstantBitsFromNode(N1.getOperand(1), 8, UndefElts1, EltBits1,
|
||||
false, false))
|
||||
return SDValue();
|
||||
|
||||
for (unsigned i = 0, e = EltBits0.size(); i != e; ++i) {
|
||||
// TODO - add UNDEF elts support.
|
||||
if (UndefElts0[i] || UndefElts1[i])
|
||||
return SDValue();
|
||||
if (EltBits0[i] != ~EltBits1[i])
|
||||
return SDValue();
|
||||
}
|
||||
|
||||
SDLoc DL(N);
|
||||
SDValue X = N->getOperand(0);
|
||||
SDValue Y =
|
||||
DAG.getNode(X86ISD::ANDNP, DL, VT, DAG.getBitcast(VT, N0.getOperand(1)),
|
||||
DAG.getBitcast(VT, N1.getOperand(0)));
|
||||
return DAG.getNode(ISD::OR, DL, VT, X, Y);
|
||||
}
|
||||
|
||||
// Try to match OR(AND(~MASK,X),AND(MASK,Y)) logic pattern.
|
||||
static bool matchLogicBlend(SDNode *N, SDValue &X, SDValue &Y, SDValue &Mask) {
|
||||
if (N->getOpcode() != ISD::OR)
|
||||
|
@ -36781,6 +36848,9 @@ static SDValue combineOr(SDNode *N, SelectionDAG &DAG,
|
|||
if (SDValue FPLogic = convertIntLogicToFPLogic(N, DAG, Subtarget))
|
||||
return FPLogic;
|
||||
|
||||
if (SDValue R = canonicalizeBitSelect(N, DAG, Subtarget))
|
||||
return R;
|
||||
|
||||
if (SDValue R = combineLogicBlendIntoPBLENDV(N, DAG, Subtarget))
|
||||
return R;
|
||||
|
||||
|
|
|
@ -61,18 +61,17 @@ define <2 x i16> @test_bitreverse_v2i16(<2 x i16> %a) nounwind {
|
|||
; X64-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
|
||||
; X64-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
|
||||
; X64-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[3,2,1,0,4,5,6,7]
|
||||
; X64-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,7,6,5,4]
|
||||
; X64-NEXT: packuswb %xmm2, %xmm0
|
||||
; X64-NEXT: movdqa {{.*#+}} xmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
|
||||
; X64-NEXT: movdqa %xmm0, %xmm2
|
||||
; X64-NEXT: pand %xmm1, %xmm2
|
||||
; X64-NEXT: pshufhw {{.*#+}} xmm1 = xmm0[0,1,2,3,7,6,5,4]
|
||||
; X64-NEXT: packuswb %xmm2, %xmm1
|
||||
; X64-NEXT: movdqa {{.*#+}} xmm0 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
|
||||
; X64-NEXT: movdqa %xmm1, %xmm2
|
||||
; X64-NEXT: pand %xmm0, %xmm2
|
||||
; X64-NEXT: psllw $4, %xmm2
|
||||
; X64-NEXT: movdqa {{.*#+}} xmm3 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240]
|
||||
; X64-NEXT: pand %xmm3, %xmm2
|
||||
; X64-NEXT: pand %xmm3, %xmm0
|
||||
; X64-NEXT: psrlw $4, %xmm0
|
||||
; X64-NEXT: pand %xmm1, %xmm0
|
||||
; X64-NEXT: por %xmm2, %xmm0
|
||||
; X64-NEXT: pand {{.*}}(%rip), %xmm1
|
||||
; X64-NEXT: psrlw $4, %xmm1
|
||||
; X64-NEXT: pand %xmm0, %xmm1
|
||||
; X64-NEXT: pandn %xmm2, %xmm0
|
||||
; X64-NEXT: por %xmm1, %xmm0
|
||||
; X64-NEXT: movdqa {{.*#+}} xmm1 = [51,51,51,51,51,51,51,51,51,51,51,51,51,51,51,51]
|
||||
; X64-NEXT: pand %xmm0, %xmm1
|
||||
; X64-NEXT: psllw $2, %xmm1
|
||||
|
|
|
@ -19,9 +19,7 @@ define <2 x i64> @bitselect_v2i64_rr(<2 x i64>, <2 x i64>) {
|
|||
;
|
||||
; XOP-LABEL: bitselect_v2i64_rr:
|
||||
; XOP: # %bb.0:
|
||||
; XOP-NEXT: vandps {{.*}}(%rip), %xmm0, %xmm0
|
||||
; XOP-NEXT: vandps {{.*}}(%rip), %xmm1, %xmm1
|
||||
; XOP-NEXT: vorps %xmm0, %xmm1, %xmm0
|
||||
; XOP-NEXT: vpcmov {{.*}}(%rip), %xmm0, %xmm1, %xmm0
|
||||
; XOP-NEXT: retq
|
||||
;
|
||||
; AVX-LABEL: bitselect_v2i64_rr:
|
||||
|
@ -47,10 +45,8 @@ define <2 x i64> @bitselect_v2i64_rm(<2 x i64>, <2 x i64>* nocapture readonly) {
|
|||
;
|
||||
; XOP-LABEL: bitselect_v2i64_rm:
|
||||
; XOP: # %bb.0:
|
||||
; XOP-NEXT: vmovaps (%rdi), %xmm1
|
||||
; XOP-NEXT: vandps {{.*}}(%rip), %xmm0, %xmm0
|
||||
; XOP-NEXT: vandps {{.*}}(%rip), %xmm1, %xmm1
|
||||
; XOP-NEXT: vorps %xmm0, %xmm1, %xmm0
|
||||
; XOP-NEXT: vmovdqa (%rdi), %xmm1
|
||||
; XOP-NEXT: vpcmov {{.*}}(%rip), %xmm0, %xmm1, %xmm0
|
||||
; XOP-NEXT: retq
|
||||
;
|
||||
; AVX-LABEL: bitselect_v2i64_rm:
|
||||
|
@ -78,10 +74,8 @@ define <2 x i64> @bitselect_v2i64_mr(<2 x i64>* nocapture readonly, <2 x i64>) {
|
|||
;
|
||||
; XOP-LABEL: bitselect_v2i64_mr:
|
||||
; XOP: # %bb.0:
|
||||
; XOP-NEXT: vmovaps (%rdi), %xmm1
|
||||
; XOP-NEXT: vandps {{.*}}(%rip), %xmm1, %xmm1
|
||||
; XOP-NEXT: vandps {{.*}}(%rip), %xmm0, %xmm0
|
||||
; XOP-NEXT: vorps %xmm0, %xmm1, %xmm0
|
||||
; XOP-NEXT: vmovdqa (%rdi), %xmm1
|
||||
; XOP-NEXT: vpcmov {{.*}}(%rip), %xmm0, %xmm1, %xmm0
|
||||
; XOP-NEXT: retq
|
||||
;
|
||||
; AVX-LABEL: bitselect_v2i64_mr:
|
||||
|
@ -110,11 +104,9 @@ define <2 x i64> @bitselect_v2i64_mm(<2 x i64>* nocapture readonly, <2 x i64>* n
|
|||
;
|
||||
; XOP-LABEL: bitselect_v2i64_mm:
|
||||
; XOP: # %bb.0:
|
||||
; XOP-NEXT: vmovaps (%rdi), %xmm0
|
||||
; XOP-NEXT: vmovaps (%rsi), %xmm1
|
||||
; XOP-NEXT: vandps {{.*}}(%rip), %xmm0, %xmm0
|
||||
; XOP-NEXT: vandps {{.*}}(%rip), %xmm1, %xmm1
|
||||
; XOP-NEXT: vorps %xmm0, %xmm1, %xmm0
|
||||
; XOP-NEXT: vmovdqa (%rsi), %xmm0
|
||||
; XOP-NEXT: vmovdqa {{.*#+}} xmm1 = [18446744073709551612,18446744065119617022]
|
||||
; XOP-NEXT: vpcmov %xmm1, (%rdi), %xmm0, %xmm0
|
||||
; XOP-NEXT: retq
|
||||
;
|
||||
; AVX-LABEL: bitselect_v2i64_mm:
|
||||
|
@ -150,9 +142,7 @@ define <4 x i64> @bitselect_v4i64_rr(<4 x i64>, <4 x i64>) {
|
|||
;
|
||||
; XOP-LABEL: bitselect_v4i64_rr:
|
||||
; XOP: # %bb.0:
|
||||
; XOP-NEXT: vandps {{.*}}(%rip), %ymm0, %ymm0
|
||||
; XOP-NEXT: vandps {{.*}}(%rip), %ymm1, %ymm1
|
||||
; XOP-NEXT: vorps %ymm0, %ymm1, %ymm0
|
||||
; XOP-NEXT: vpcmov {{.*}}(%rip), %ymm0, %ymm1, %ymm0
|
||||
; XOP-NEXT: retq
|
||||
;
|
||||
; AVX-LABEL: bitselect_v4i64_rr:
|
||||
|
@ -170,23 +160,24 @@ define <4 x i64> @bitselect_v4i64_rr(<4 x i64>, <4 x i64>) {
|
|||
define <4 x i64> @bitselect_v4i64_rm(<4 x i64>, <4 x i64>* nocapture readonly) {
|
||||
; SSE-LABEL: bitselect_v4i64_rm:
|
||||
; SSE: # %bb.0:
|
||||
; SSE-NEXT: movaps {{.*#+}} xmm2 = [8589934593,3]
|
||||
; SSE-NEXT: andps %xmm2, %xmm1
|
||||
; SSE-NEXT: andps %xmm2, %xmm0
|
||||
; SSE-NEXT: movaps {{.*#+}} xmm2 = [18446744065119617022,18446744073709551612]
|
||||
; SSE-NEXT: movaps 16(%rdi), %xmm3
|
||||
; SSE-NEXT: andps %xmm2, %xmm3
|
||||
; SSE-NEXT: orps %xmm3, %xmm1
|
||||
; SSE-NEXT: andps (%rdi), %xmm2
|
||||
; SSE-NEXT: orps %xmm2, %xmm0
|
||||
; SSE-NEXT: movaps 16(%rdi), %xmm4
|
||||
; SSE-NEXT: andps %xmm2, %xmm4
|
||||
; SSE-NEXT: movaps (%rdi), %xmm5
|
||||
; SSE-NEXT: andps %xmm2, %xmm5
|
||||
; SSE-NEXT: movaps %xmm2, %xmm3
|
||||
; SSE-NEXT: andnps %xmm0, %xmm3
|
||||
; SSE-NEXT: orps %xmm5, %xmm3
|
||||
; SSE-NEXT: andnps %xmm1, %xmm2
|
||||
; SSE-NEXT: orps %xmm4, %xmm2
|
||||
; SSE-NEXT: movaps %xmm3, %xmm0
|
||||
; SSE-NEXT: movaps %xmm2, %xmm1
|
||||
; SSE-NEXT: retq
|
||||
;
|
||||
; XOP-LABEL: bitselect_v4i64_rm:
|
||||
; XOP: # %bb.0:
|
||||
; XOP-NEXT: vmovaps (%rdi), %ymm1
|
||||
; XOP-NEXT: vandps {{.*}}(%rip), %ymm0, %ymm0
|
||||
; XOP-NEXT: vandps {{.*}}(%rip), %ymm1, %ymm1
|
||||
; XOP-NEXT: vorps %ymm0, %ymm1, %ymm0
|
||||
; XOP-NEXT: vmovdqa (%rdi), %ymm1
|
||||
; XOP-NEXT: vpcmov {{.*}}(%rip), %ymm0, %ymm1, %ymm0
|
||||
; XOP-NEXT: retq
|
||||
;
|
||||
; AVX-LABEL: bitselect_v4i64_rm:
|
||||
|
@ -207,22 +198,23 @@ define <4 x i64> @bitselect_v4i64_mr(<4 x i64>* nocapture readonly, <4 x i64>) {
|
|||
; SSE-LABEL: bitselect_v4i64_mr:
|
||||
; SSE: # %bb.0:
|
||||
; SSE-NEXT: movaps {{.*#+}} xmm2 = [12884901890,4294967296]
|
||||
; SSE-NEXT: movaps 16(%rdi), %xmm3
|
||||
; SSE-NEXT: andps %xmm2, %xmm3
|
||||
; SSE-NEXT: andps (%rdi), %xmm2
|
||||
; SSE-NEXT: movaps {{.*#+}} xmm4 = [18446744060824649725,18446744069414584319]
|
||||
; SSE-NEXT: andps %xmm4, %xmm1
|
||||
; SSE-NEXT: orps %xmm3, %xmm1
|
||||
; SSE-NEXT: andps %xmm4, %xmm0
|
||||
; SSE-NEXT: orps %xmm2, %xmm0
|
||||
; SSE-NEXT: movaps 16(%rdi), %xmm4
|
||||
; SSE-NEXT: andps %xmm2, %xmm4
|
||||
; SSE-NEXT: movaps (%rdi), %xmm5
|
||||
; SSE-NEXT: andps %xmm2, %xmm5
|
||||
; SSE-NEXT: movaps %xmm2, %xmm3
|
||||
; SSE-NEXT: andnps %xmm0, %xmm3
|
||||
; SSE-NEXT: orps %xmm5, %xmm3
|
||||
; SSE-NEXT: andnps %xmm1, %xmm2
|
||||
; SSE-NEXT: orps %xmm4, %xmm2
|
||||
; SSE-NEXT: movaps %xmm3, %xmm0
|
||||
; SSE-NEXT: movaps %xmm2, %xmm1
|
||||
; SSE-NEXT: retq
|
||||
;
|
||||
; XOP-LABEL: bitselect_v4i64_mr:
|
||||
; XOP: # %bb.0:
|
||||
; XOP-NEXT: vmovaps (%rdi), %ymm1
|
||||
; XOP-NEXT: vandps {{.*}}(%rip), %ymm1, %ymm1
|
||||
; XOP-NEXT: vandps {{.*}}(%rip), %ymm0, %ymm0
|
||||
; XOP-NEXT: vorps %ymm0, %ymm1, %ymm0
|
||||
; XOP-NEXT: vmovdqa (%rdi), %ymm1
|
||||
; XOP-NEXT: vpcmov {{.*}}(%rip), %ymm0, %ymm1, %ymm0
|
||||
; XOP-NEXT: retq
|
||||
;
|
||||
; AVX-LABEL: bitselect_v4i64_mr:
|
||||
|
@ -242,25 +234,23 @@ define <4 x i64> @bitselect_v4i64_mr(<4 x i64>* nocapture readonly, <4 x i64>) {
|
|||
define <4 x i64> @bitselect_v4i64_mm(<4 x i64>* nocapture readonly, <4 x i64>* nocapture readonly) {
|
||||
; SSE-LABEL: bitselect_v4i64_mm:
|
||||
; SSE: # %bb.0:
|
||||
; SSE-NEXT: movaps {{.*#+}} xmm2 = [3,8589934593]
|
||||
; SSE-NEXT: movaps 16(%rdi), %xmm3
|
||||
; SSE-NEXT: andps %xmm2, %xmm3
|
||||
; SSE-NEXT: andps (%rdi), %xmm2
|
||||
; SSE-NEXT: movaps {{.*#+}} xmm0 = [18446744073709551612,18446744065119617022]
|
||||
; SSE-NEXT: movaps 16(%rsi), %xmm1
|
||||
; SSE-NEXT: andps %xmm0, %xmm1
|
||||
; SSE-NEXT: orps %xmm3, %xmm1
|
||||
; SSE-NEXT: andps (%rsi), %xmm0
|
||||
; SSE-NEXT: orps %xmm2, %xmm0
|
||||
; SSE-NEXT: movaps {{.*#+}} xmm1 = [18446744073709551612,18446744065119617022]
|
||||
; SSE-NEXT: movaps 16(%rsi), %xmm2
|
||||
; SSE-NEXT: andps %xmm1, %xmm2
|
||||
; SSE-NEXT: movaps (%rsi), %xmm3
|
||||
; SSE-NEXT: andps %xmm1, %xmm3
|
||||
; SSE-NEXT: movaps %xmm1, %xmm0
|
||||
; SSE-NEXT: andnps (%rdi), %xmm0
|
||||
; SSE-NEXT: orps %xmm3, %xmm0
|
||||
; SSE-NEXT: andnps 16(%rdi), %xmm1
|
||||
; SSE-NEXT: orps %xmm2, %xmm1
|
||||
; SSE-NEXT: retq
|
||||
;
|
||||
; XOP-LABEL: bitselect_v4i64_mm:
|
||||
; XOP: # %bb.0:
|
||||
; XOP-NEXT: vmovaps (%rdi), %ymm0
|
||||
; XOP-NEXT: vmovaps (%rsi), %ymm1
|
||||
; XOP-NEXT: vandps {{.*}}(%rip), %ymm0, %ymm0
|
||||
; XOP-NEXT: vandps {{.*}}(%rip), %ymm1, %ymm1
|
||||
; XOP-NEXT: vorps %ymm0, %ymm1, %ymm0
|
||||
; XOP-NEXT: vmovdqa (%rsi), %ymm0
|
||||
; XOP-NEXT: vmovdqa {{.*#+}} ymm1 = [18446744073709551612,18446744065119617022,18446744073709551612,18446744065119617022]
|
||||
; XOP-NEXT: vpcmov %ymm1, (%rdi), %ymm0, %ymm0
|
||||
; XOP-NEXT: retq
|
||||
;
|
||||
; AVX-LABEL: bitselect_v4i64_mm:
|
||||
|
@ -286,58 +276,55 @@ define <4 x i64> @bitselect_v4i64_mm(<4 x i64>* nocapture readonly, <4 x i64>* n
|
|||
define <8 x i64> @bitselect_v8i64_rr(<8 x i64>, <8 x i64>) {
|
||||
; SSE-LABEL: bitselect_v8i64_rr:
|
||||
; SSE: # %bb.0:
|
||||
; SSE-NEXT: movaps {{.*#+}} xmm8 = [12884901890,12884901890]
|
||||
; SSE-NEXT: andps %xmm8, %xmm3
|
||||
; SSE-NEXT: movaps {{.*#+}} xmm9 = [4294967296,12884901890]
|
||||
; SSE-NEXT: andps %xmm9, %xmm2
|
||||
; SSE-NEXT: andps %xmm8, %xmm1
|
||||
; SSE-NEXT: andps %xmm9, %xmm0
|
||||
; SSE-NEXT: movaps {{.*#+}} xmm8 = [18446744060824649725,18446744060824649725]
|
||||
; SSE-NEXT: andps %xmm8, %xmm7
|
||||
; SSE-NEXT: orps %xmm7, %xmm3
|
||||
; SSE-NEXT: movaps {{.*#+}} xmm7 = [18446744069414584319,18446744060824649725]
|
||||
; SSE-NEXT: andps %xmm7, %xmm6
|
||||
; SSE-NEXT: orps %xmm6, %xmm2
|
||||
; SSE-NEXT: andps %xmm5, %xmm8
|
||||
; SSE-NEXT: orps %xmm8, %xmm1
|
||||
; SSE-NEXT: andps %xmm4, %xmm7
|
||||
; SSE-NEXT: orps %xmm7, %xmm0
|
||||
; SSE-NEXT: movaps {{.*#+}} xmm9 = [18446744069414584319,18446744060824649725]
|
||||
; SSE-NEXT: andps %xmm9, %xmm6
|
||||
; SSE-NEXT: andps %xmm8, %xmm5
|
||||
; SSE-NEXT: andps %xmm9, %xmm4
|
||||
; SSE-NEXT: movaps %xmm9, %xmm10
|
||||
; SSE-NEXT: andnps %xmm0, %xmm10
|
||||
; SSE-NEXT: orps %xmm4, %xmm10
|
||||
; SSE-NEXT: movaps %xmm8, %xmm4
|
||||
; SSE-NEXT: andnps %xmm1, %xmm4
|
||||
; SSE-NEXT: orps %xmm5, %xmm4
|
||||
; SSE-NEXT: andnps %xmm2, %xmm9
|
||||
; SSE-NEXT: orps %xmm6, %xmm9
|
||||
; SSE-NEXT: andnps %xmm3, %xmm8
|
||||
; SSE-NEXT: orps %xmm7, %xmm8
|
||||
; SSE-NEXT: movaps %xmm10, %xmm0
|
||||
; SSE-NEXT: movaps %xmm4, %xmm1
|
||||
; SSE-NEXT: movaps %xmm9, %xmm2
|
||||
; SSE-NEXT: movaps %xmm8, %xmm3
|
||||
; SSE-NEXT: retq
|
||||
;
|
||||
; XOP-LABEL: bitselect_v8i64_rr:
|
||||
; XOP: # %bb.0:
|
||||
; XOP-NEXT: vmovaps {{.*#+}} ymm4 = [4294967296,12884901890,12884901890,12884901890]
|
||||
; XOP-NEXT: vandps %ymm4, %ymm1, %ymm1
|
||||
; XOP-NEXT: vandps %ymm4, %ymm0, %ymm0
|
||||
; XOP-NEXT: vmovaps {{.*#+}} ymm4 = [18446744069414584319,18446744060824649725,18446744060824649725,18446744060824649725]
|
||||
; XOP-NEXT: vandps %ymm4, %ymm3, %ymm3
|
||||
; XOP-NEXT: vorps %ymm1, %ymm3, %ymm1
|
||||
; XOP-NEXT: vandps %ymm4, %ymm2, %ymm2
|
||||
; XOP-NEXT: vorps %ymm0, %ymm2, %ymm0
|
||||
; XOP-NEXT: vmovdqa {{.*#+}} ymm4 = [18446744069414584319,18446744060824649725,18446744060824649725,18446744060824649725]
|
||||
; XOP-NEXT: vpcmov %ymm4, %ymm0, %ymm2, %ymm0
|
||||
; XOP-NEXT: vpcmov %ymm4, %ymm1, %ymm3, %ymm1
|
||||
; XOP-NEXT: retq
|
||||
;
|
||||
; AVX1-LABEL: bitselect_v8i64_rr:
|
||||
; AVX1: # %bb.0:
|
||||
; AVX1-NEXT: vmovaps {{.*#+}} ymm4 = [4294967296,12884901890,12884901890,12884901890]
|
||||
; AVX1-NEXT: vandps %ymm4, %ymm1, %ymm1
|
||||
; AVX1-NEXT: vandps %ymm4, %ymm0, %ymm0
|
||||
; AVX1-NEXT: vmovaps {{.*#+}} ymm4 = [18446744069414584319,18446744060824649725,18446744060824649725,18446744060824649725]
|
||||
; AVX1-NEXT: vandps %ymm4, %ymm3, %ymm3
|
||||
; AVX1-NEXT: vorps %ymm1, %ymm3, %ymm1
|
||||
; AVX1-NEXT: vandps %ymm4, %ymm2, %ymm2
|
||||
; AVX1-NEXT: vandnps %ymm0, %ymm4, %ymm0
|
||||
; AVX1-NEXT: vorps %ymm0, %ymm2, %ymm0
|
||||
; AVX1-NEXT: vandnps %ymm1, %ymm4, %ymm1
|
||||
; AVX1-NEXT: vorps %ymm1, %ymm3, %ymm1
|
||||
; AVX1-NEXT: retq
|
||||
;
|
||||
; AVX2-LABEL: bitselect_v8i64_rr:
|
||||
; AVX2: # %bb.0:
|
||||
; AVX2-NEXT: vmovaps {{.*#+}} ymm4 = [4294967296,12884901890,12884901890,12884901890]
|
||||
; AVX2-NEXT: vandps %ymm4, %ymm1, %ymm1
|
||||
; AVX2-NEXT: vandps %ymm4, %ymm0, %ymm0
|
||||
; AVX2-NEXT: vmovaps {{.*#+}} ymm4 = [18446744069414584319,18446744060824649725,18446744060824649725,18446744060824649725]
|
||||
; AVX2-NEXT: vandps %ymm4, %ymm3, %ymm3
|
||||
; AVX2-NEXT: vorps %ymm1, %ymm3, %ymm1
|
||||
; AVX2-NEXT: vandps %ymm4, %ymm2, %ymm2
|
||||
; AVX2-NEXT: vandnps %ymm0, %ymm4, %ymm0
|
||||
; AVX2-NEXT: vorps %ymm0, %ymm2, %ymm0
|
||||
; AVX2-NEXT: vandnps %ymm1, %ymm4, %ymm1
|
||||
; AVX2-NEXT: vorps %ymm1, %ymm3, %ymm1
|
||||
; AVX2-NEXT: retq
|
||||
;
|
||||
; AVX512F-LABEL: bitselect_v8i64_rr:
|
||||
|
@ -355,23 +342,30 @@ define <8 x i64> @bitselect_v8i64_rr(<8 x i64>, <8 x i64>) {
|
|||
define <8 x i64> @bitselect_v8i64_rm(<8 x i64>, <8 x i64>* nocapture readonly) {
|
||||
; SSE-LABEL: bitselect_v8i64_rm:
|
||||
; SSE: # %bb.0:
|
||||
; SSE-NEXT: movaps {{.*#+}} xmm4 = [8589934593,3]
|
||||
; SSE-NEXT: andps %xmm4, %xmm3
|
||||
; SSE-NEXT: andps %xmm4, %xmm2
|
||||
; SSE-NEXT: andps %xmm4, %xmm1
|
||||
; SSE-NEXT: andps %xmm4, %xmm0
|
||||
; SSE-NEXT: movaps {{.*#+}} xmm4 = [18446744065119617022,18446744073709551612]
|
||||
; SSE-NEXT: movaps 48(%rdi), %xmm5
|
||||
; SSE-NEXT: andps %xmm4, %xmm5
|
||||
; SSE-NEXT: orps %xmm5, %xmm3
|
||||
; SSE-NEXT: movaps 32(%rdi), %xmm5
|
||||
; SSE-NEXT: andps %xmm4, %xmm5
|
||||
; SSE-NEXT: orps %xmm5, %xmm2
|
||||
; SSE-NEXT: movaps 16(%rdi), %xmm5
|
||||
; SSE-NEXT: andps %xmm4, %xmm5
|
||||
; SSE-NEXT: orps %xmm5, %xmm1
|
||||
; SSE-NEXT: andps (%rdi), %xmm4
|
||||
; SSE-NEXT: orps %xmm4, %xmm0
|
||||
; SSE-NEXT: movaps 48(%rdi), %xmm8
|
||||
; SSE-NEXT: andps %xmm4, %xmm8
|
||||
; SSE-NEXT: movaps 32(%rdi), %xmm9
|
||||
; SSE-NEXT: andps %xmm4, %xmm9
|
||||
; SSE-NEXT: movaps 16(%rdi), %xmm7
|
||||
; SSE-NEXT: andps %xmm4, %xmm7
|
||||
; SSE-NEXT: movaps (%rdi), %xmm6
|
||||
; SSE-NEXT: andps %xmm4, %xmm6
|
||||
; SSE-NEXT: movaps %xmm4, %xmm5
|
||||
; SSE-NEXT: andnps %xmm0, %xmm5
|
||||
; SSE-NEXT: orps %xmm6, %xmm5
|
||||
; SSE-NEXT: movaps %xmm4, %xmm6
|
||||
; SSE-NEXT: andnps %xmm1, %xmm6
|
||||
; SSE-NEXT: orps %xmm7, %xmm6
|
||||
; SSE-NEXT: movaps %xmm4, %xmm7
|
||||
; SSE-NEXT: andnps %xmm2, %xmm7
|
||||
; SSE-NEXT: orps %xmm9, %xmm7
|
||||
; SSE-NEXT: andnps %xmm3, %xmm4
|
||||
; SSE-NEXT: orps %xmm8, %xmm4
|
||||
; SSE-NEXT: movaps %xmm5, %xmm0
|
||||
; SSE-NEXT: movaps %xmm6, %xmm1
|
||||
; SSE-NEXT: movaps %xmm7, %xmm2
|
||||
; SSE-NEXT: movaps %xmm4, %xmm3
|
||||
; SSE-NEXT: retq
|
||||
;
|
||||
; XOP-LABEL: bitselect_v8i64_rm:
|
||||
|
@ -434,22 +428,29 @@ define <8 x i64> @bitselect_v8i64_mr(<8 x i64>* nocapture readonly, <8 x i64>) {
|
|||
; SSE-LABEL: bitselect_v8i64_mr:
|
||||
; SSE: # %bb.0:
|
||||
; SSE-NEXT: movaps {{.*#+}} xmm4 = [12884901890,4294967296]
|
||||
; SSE-NEXT: movaps 48(%rdi), %xmm5
|
||||
; SSE-NEXT: andps %xmm4, %xmm5
|
||||
; SSE-NEXT: movaps 32(%rdi), %xmm6
|
||||
; SSE-NEXT: andps %xmm4, %xmm6
|
||||
; SSE-NEXT: movaps 48(%rdi), %xmm8
|
||||
; SSE-NEXT: andps %xmm4, %xmm8
|
||||
; SSE-NEXT: movaps 32(%rdi), %xmm9
|
||||
; SSE-NEXT: andps %xmm4, %xmm9
|
||||
; SSE-NEXT: movaps 16(%rdi), %xmm7
|
||||
; SSE-NEXT: andps %xmm4, %xmm7
|
||||
; SSE-NEXT: andps (%rdi), %xmm4
|
||||
; SSE-NEXT: movaps {{.*#+}} xmm8 = [18446744060824649725,18446744069414584319]
|
||||
; SSE-NEXT: andps %xmm8, %xmm3
|
||||
; SSE-NEXT: orps %xmm5, %xmm3
|
||||
; SSE-NEXT: andps %xmm8, %xmm2
|
||||
; SSE-NEXT: orps %xmm6, %xmm2
|
||||
; SSE-NEXT: andps %xmm8, %xmm1
|
||||
; SSE-NEXT: orps %xmm7, %xmm1
|
||||
; SSE-NEXT: andps %xmm8, %xmm0
|
||||
; SSE-NEXT: orps %xmm4, %xmm0
|
||||
; SSE-NEXT: movaps (%rdi), %xmm6
|
||||
; SSE-NEXT: andps %xmm4, %xmm6
|
||||
; SSE-NEXT: movaps %xmm4, %xmm5
|
||||
; SSE-NEXT: andnps %xmm0, %xmm5
|
||||
; SSE-NEXT: orps %xmm6, %xmm5
|
||||
; SSE-NEXT: movaps %xmm4, %xmm6
|
||||
; SSE-NEXT: andnps %xmm1, %xmm6
|
||||
; SSE-NEXT: orps %xmm7, %xmm6
|
||||
; SSE-NEXT: movaps %xmm4, %xmm7
|
||||
; SSE-NEXT: andnps %xmm2, %xmm7
|
||||
; SSE-NEXT: orps %xmm9, %xmm7
|
||||
; SSE-NEXT: andnps %xmm3, %xmm4
|
||||
; SSE-NEXT: orps %xmm8, %xmm4
|
||||
; SSE-NEXT: movaps %xmm5, %xmm0
|
||||
; SSE-NEXT: movaps %xmm6, %xmm1
|
||||
; SSE-NEXT: movaps %xmm7, %xmm2
|
||||
; SSE-NEXT: movaps %xmm4, %xmm3
|
||||
; SSE-NEXT: retq
|
||||
;
|
||||
; XOP-LABEL: bitselect_v8i64_mr:
|
||||
|
@ -511,26 +512,26 @@ define <8 x i64> @bitselect_v8i64_mr(<8 x i64>* nocapture readonly, <8 x i64>) {
|
|||
define <8 x i64> @bitselect_v8i64_mm(<8 x i64>* nocapture readonly, <8 x i64>* nocapture readonly) {
|
||||
; SSE-LABEL: bitselect_v8i64_mm:
|
||||
; SSE: # %bb.0:
|
||||
; SSE-NEXT: movaps {{.*#+}} xmm4 = [3,8589934593]
|
||||
; SSE-NEXT: movaps 48(%rdi), %xmm1
|
||||
; SSE-NEXT: andps %xmm4, %xmm1
|
||||
; SSE-NEXT: movaps 32(%rdi), %xmm5
|
||||
; SSE-NEXT: andps %xmm4, %xmm5
|
||||
; SSE-NEXT: movaps 16(%rdi), %xmm6
|
||||
; SSE-NEXT: andps %xmm4, %xmm6
|
||||
; SSE-NEXT: andps (%rdi), %xmm4
|
||||
; SSE-NEXT: movaps {{.*#+}} xmm0 = [18446744073709551612,18446744065119617022]
|
||||
; SSE-NEXT: movaps 48(%rsi), %xmm3
|
||||
; SSE-NEXT: andps %xmm0, %xmm3
|
||||
; SSE-NEXT: orps %xmm1, %xmm3
|
||||
; SSE-NEXT: movaps 32(%rsi), %xmm2
|
||||
; SSE-NEXT: andps %xmm0, %xmm2
|
||||
; SSE-NEXT: movaps {{.*#+}} xmm3 = [18446744073709551612,18446744065119617022]
|
||||
; SSE-NEXT: movaps 48(%rsi), %xmm4
|
||||
; SSE-NEXT: andps %xmm3, %xmm4
|
||||
; SSE-NEXT: movaps 32(%rsi), %xmm5
|
||||
; SSE-NEXT: andps %xmm3, %xmm5
|
||||
; SSE-NEXT: movaps 16(%rsi), %xmm2
|
||||
; SSE-NEXT: andps %xmm3, %xmm2
|
||||
; SSE-NEXT: movaps (%rsi), %xmm1
|
||||
; SSE-NEXT: andps %xmm3, %xmm1
|
||||
; SSE-NEXT: movaps %xmm3, %xmm0
|
||||
; SSE-NEXT: andnps (%rdi), %xmm0
|
||||
; SSE-NEXT: orps %xmm1, %xmm0
|
||||
; SSE-NEXT: movaps %xmm3, %xmm1
|
||||
; SSE-NEXT: andnps 16(%rdi), %xmm1
|
||||
; SSE-NEXT: orps %xmm2, %xmm1
|
||||
; SSE-NEXT: movaps %xmm3, %xmm2
|
||||
; SSE-NEXT: andnps 32(%rdi), %xmm2
|
||||
; SSE-NEXT: orps %xmm5, %xmm2
|
||||
; SSE-NEXT: movaps 16(%rsi), %xmm1
|
||||
; SSE-NEXT: andps %xmm0, %xmm1
|
||||
; SSE-NEXT: orps %xmm6, %xmm1
|
||||
; SSE-NEXT: andps (%rsi), %xmm0
|
||||
; SSE-NEXT: orps %xmm4, %xmm0
|
||||
; SSE-NEXT: andnps 48(%rdi), %xmm3
|
||||
; SSE-NEXT: orps %xmm4, %xmm3
|
||||
; SSE-NEXT: retq
|
||||
;
|
||||
; XOP-LABEL: bitselect_v8i64_mm:
|
||||
|
|
|
@ -194,37 +194,35 @@ define <4 x double> @combine_vec_fcopysign_fpext_sgn(<4 x double> %x, <4 x float
|
|||
; SSE-LABEL: combine_vec_fcopysign_fpext_sgn:
|
||||
; SSE: # %bb.0:
|
||||
; SSE-NEXT: movaps %xmm2, %xmm3
|
||||
; SSE-NEXT: cvtss2sd %xmm2, %xmm4
|
||||
; SSE-NEXT: movshdup {{.*#+}} xmm5 = xmm2[1,1,3,3]
|
||||
; SSE-NEXT: movaps %xmm2, %xmm6
|
||||
; SSE-NEXT: unpckhpd {{.*#+}} xmm6 = xmm6[1],xmm2[1]
|
||||
; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[3,1],xmm2[2,3]
|
||||
; SSE-NEXT: movaps {{.*#+}} xmm7
|
||||
; SSE-NEXT: movaps %xmm0, %xmm2
|
||||
; SSE-NEXT: andps %xmm7, %xmm2
|
||||
; SSE-NEXT: movaps {{.*#+}} xmm8 = [-0.0E+0,-0.0E+0]
|
||||
; SSE-NEXT: andps %xmm8, %xmm4
|
||||
; SSE-NEXT: orps %xmm4, %xmm2
|
||||
; SSE-NEXT: movhlps {{.*#+}} xmm0 = xmm0[1,1]
|
||||
; SSE-NEXT: andps %xmm7, %xmm0
|
||||
; SSE-NEXT: xorps %xmm4, %xmm4
|
||||
; SSE-NEXT: cvtss2sd %xmm5, %xmm4
|
||||
; SSE-NEXT: andps %xmm8, %xmm4
|
||||
; SSE-NEXT: orps %xmm0, %xmm4
|
||||
; SSE-NEXT: movlhps {{.*#+}} xmm2 = xmm2[0],xmm4[0]
|
||||
; SSE-NEXT: movaps %xmm1, %xmm0
|
||||
; SSE-NEXT: unpckhpd {{.*#+}} xmm0 = xmm0[1],xmm1[1]
|
||||
; SSE-NEXT: andps %xmm7, %xmm0
|
||||
; SSE-NEXT: movshdup {{.*#+}} xmm4 = xmm2[1,1,3,3]
|
||||
; SSE-NEXT: cvtss2sd %xmm2, %xmm5
|
||||
; SSE-NEXT: movhlps {{.*#+}} xmm2 = xmm2[1,1]
|
||||
; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[3,1,2,3]
|
||||
; SSE-NEXT: movaps {{.*#+}} xmm6
|
||||
; SSE-NEXT: cvtss2sd %xmm3, %xmm3
|
||||
; SSE-NEXT: andps %xmm8, %xmm3
|
||||
; SSE-NEXT: orps %xmm0, %xmm3
|
||||
; SSE-NEXT: andps %xmm7, %xmm1
|
||||
; SSE-NEXT: xorps %xmm0, %xmm0
|
||||
; SSE-NEXT: cvtss2sd %xmm6, %xmm0
|
||||
; SSE-NEXT: andps %xmm8, %xmm0
|
||||
; SSE-NEXT: orps %xmm0, %xmm1
|
||||
; SSE-NEXT: movlhps {{.*#+}} xmm1 = xmm1[0],xmm3[0]
|
||||
; SSE-NEXT: movaps %xmm2, %xmm0
|
||||
; SSE-NEXT: movaps %xmm6, %xmm7
|
||||
; SSE-NEXT: andnps %xmm3, %xmm7
|
||||
; SSE-NEXT: movaps %xmm1, %xmm3
|
||||
; SSE-NEXT: unpckhpd {{.*#+}} xmm3 = xmm3[1],xmm1[1]
|
||||
; SSE-NEXT: andps %xmm6, %xmm3
|
||||
; SSE-NEXT: orps %xmm3, %xmm7
|
||||
; SSE-NEXT: andps %xmm6, %xmm1
|
||||
; SSE-NEXT: cvtss2sd %xmm2, %xmm2
|
||||
; SSE-NEXT: movaps %xmm6, %xmm3
|
||||
; SSE-NEXT: andnps %xmm2, %xmm3
|
||||
; SSE-NEXT: orps %xmm3, %xmm1
|
||||
; SSE-NEXT: movlhps {{.*#+}} xmm1 = xmm1[0],xmm7[0]
|
||||
; SSE-NEXT: movaps %xmm0, %xmm2
|
||||
; SSE-NEXT: unpckhpd {{.*#+}} xmm2 = xmm2[1],xmm0[1]
|
||||
; SSE-NEXT: andps %xmm6, %xmm2
|
||||
; SSE-NEXT: xorps %xmm3, %xmm3
|
||||
; SSE-NEXT: cvtss2sd %xmm4, %xmm3
|
||||
; SSE-NEXT: andps %xmm6, %xmm0
|
||||
; SSE-NEXT: andnps %xmm3, %xmm6
|
||||
; SSE-NEXT: orps %xmm2, %xmm6
|
||||
; SSE-NEXT: andps {{.*}}(%rip), %xmm5
|
||||
; SSE-NEXT: orps %xmm5, %xmm0
|
||||
; SSE-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0],xmm6[0]
|
||||
; SSE-NEXT: retq
|
||||
;
|
||||
; AVX-LABEL: combine_vec_fcopysign_fpext_sgn:
|
||||
|
@ -246,35 +244,37 @@ define <4 x float> @combine_vec_fcopysign_fptrunc_sgn(<4 x float> %x, <4 x doubl
|
|||
; SSE-LABEL: combine_vec_fcopysign_fptrunc_sgn:
|
||||
; SSE: # %bb.0:
|
||||
; SSE-NEXT: movaps %xmm0, %xmm3
|
||||
; SSE-NEXT: movaps {{.*#+}} xmm5
|
||||
; SSE-NEXT: andps %xmm5, %xmm0
|
||||
; SSE-NEXT: cvtsd2ss %xmm1, %xmm6
|
||||
; SSE-NEXT: movaps {{.*#+}} xmm4 = [-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0]
|
||||
; SSE-NEXT: andps %xmm4, %xmm6
|
||||
; SSE-NEXT: orps %xmm6, %xmm0
|
||||
; SSE-NEXT: movshdup {{.*#+}} xmm6 = xmm3[1,1,3,3]
|
||||
; SSE-NEXT: andps %xmm5, %xmm6
|
||||
; SSE-NEXT: unpckhpd {{.*#+}} xmm3 = xmm3[1],xmm0[1]
|
||||
; SSE-NEXT: movaps {{.*#+}} xmm4
|
||||
; SSE-NEXT: andps %xmm4, %xmm3
|
||||
; SSE-NEXT: cvtsd2ss %xmm2, %xmm5
|
||||
; SSE-NEXT: movaps %xmm4, %xmm6
|
||||
; SSE-NEXT: andnps %xmm5, %xmm6
|
||||
; SSE-NEXT: orps %xmm3, %xmm6
|
||||
; SSE-NEXT: movaps %xmm0, %xmm3
|
||||
; SSE-NEXT: andps %xmm4, %xmm3
|
||||
; SSE-NEXT: xorps %xmm5, %xmm5
|
||||
; SSE-NEXT: cvtsd2ss %xmm1, %xmm5
|
||||
; SSE-NEXT: movaps %xmm4, %xmm7
|
||||
; SSE-NEXT: andnps %xmm5, %xmm7
|
||||
; SSE-NEXT: orps %xmm7, %xmm3
|
||||
; SSE-NEXT: movshdup {{.*#+}} xmm5 = xmm0[1,1,3,3]
|
||||
; SSE-NEXT: andps %xmm4, %xmm5
|
||||
; SSE-NEXT: movhlps {{.*#+}} xmm1 = xmm1[1,1]
|
||||
; SSE-NEXT: cvtsd2ss %xmm1, %xmm1
|
||||
; SSE-NEXT: andps %xmm4, %xmm1
|
||||
; SSE-NEXT: orps %xmm6, %xmm1
|
||||
; SSE-NEXT: unpcklps {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
|
||||
; SSE-NEXT: movaps %xmm3, %xmm1
|
||||
; SSE-NEXT: unpckhpd {{.*#+}} xmm1 = xmm1[1],xmm3[1]
|
||||
; SSE-NEXT: andps %xmm5, %xmm1
|
||||
; SSE-NEXT: xorps %xmm6, %xmm6
|
||||
; SSE-NEXT: cvtsd2ss %xmm2, %xmm6
|
||||
; SSE-NEXT: andps %xmm4, %xmm6
|
||||
; SSE-NEXT: orps %xmm1, %xmm6
|
||||
; SSE-NEXT: insertps {{.*#+}} xmm0 = xmm0[0,1],xmm6[0],xmm0[3]
|
||||
; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[3,1,2,3]
|
||||
; SSE-NEXT: andps %xmm5, %xmm3
|
||||
; SSE-NEXT: andps {{.*}}(%rip), %xmm1
|
||||
; SSE-NEXT: orps %xmm5, %xmm1
|
||||
; SSE-NEXT: unpcklps {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1]
|
||||
; SSE-NEXT: insertps {{.*#+}} xmm3 = xmm3[0,1],xmm6[0],xmm3[3]
|
||||
; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,1,2,3]
|
||||
; SSE-NEXT: andps %xmm4, %xmm0
|
||||
; SSE-NEXT: movhlps {{.*#+}} xmm2 = xmm2[1,1]
|
||||
; SSE-NEXT: xorps %xmm1, %xmm1
|
||||
; SSE-NEXT: cvtsd2ss %xmm2, %xmm1
|
||||
; SSE-NEXT: andps %xmm4, %xmm1
|
||||
; SSE-NEXT: orps %xmm3, %xmm1
|
||||
; SSE-NEXT: insertps {{.*#+}} xmm0 = xmm0[0,1,2],xmm1[0]
|
||||
; SSE-NEXT: andnps %xmm1, %xmm4
|
||||
; SSE-NEXT: orps %xmm0, %xmm4
|
||||
; SSE-NEXT: insertps {{.*#+}} xmm3 = xmm3[0,1,2],xmm4[0]
|
||||
; SSE-NEXT: movaps %xmm3, %xmm0
|
||||
; SSE-NEXT: retq
|
||||
;
|
||||
; AVX-LABEL: combine_vec_fcopysign_fptrunc_sgn:
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
; RUN: llc < %s -mtriple=x86_64-apple-macosx10.10.0 -mattr=+sse2 | FileCheck %s --check-prefix=SSE2 --check-prefix=CHECK
|
||||
; RUN: llc < %s -mtriple=x86_64-apple-macosx10.10.0 -mattr=+avx | FileCheck %s --check-prefix=AVX --check-prefix=CHECK
|
||||
; RUN: llc < %s -mtriple=x86_64-apple-macosx10.10.0 -mattr=+sse2 | FileCheck %s --check-prefixes=CHECK,SSE2
|
||||
; RUN: llc < %s -mtriple=x86_64-apple-macosx10.10.0 -mattr=+avx | FileCheck %s --check-prefixes=CHECK,AVX
|
||||
|
||||
; Assertions have been enhanced from utils/update_llc_test_checks.py to show the constant pool values.
|
||||
; Use a macosx triple to make sure the format of those constant strings is exact.
|
||||
|
@ -35,12 +35,6 @@ define <4 x float> @v4f32(<4 x float> %a, <4 x float> %b) nounwind {
|
|||
ret <4 x float> %tmp
|
||||
}
|
||||
|
||||
; SSE2: [[SIGNMASK2:L.+]]:
|
||||
; SSE2-NEXT: .long 2147483648
|
||||
; SSE2-NEXT: .long 2147483648
|
||||
; SSE2-NEXT: .long 2147483648
|
||||
; SSE2-NEXT: .long 2147483648
|
||||
|
||||
; SSE2: [[MAGMASK2:L.+]]:
|
||||
; SSE2-NEXT: .long 2147483647
|
||||
; SSE2-NEXT: .long 2147483647
|
||||
|
@ -70,14 +64,14 @@ define <4 x float> @v4f32(<4 x float> %a, <4 x float> %b) nounwind {
|
|||
define <8 x float> @v8f32(<8 x float> %a, <8 x float> %b) nounwind {
|
||||
; SSE2-LABEL: v8f32:
|
||||
; SSE2: # %bb.0:
|
||||
; SSE2-NEXT: movaps [[SIGNMASK2]](%rip), %xmm4
|
||||
; SSE2-NEXT: andps %xmm4, %xmm2
|
||||
; SSE2-NEXT: movaps [[MAGMASK2]](%rip), %xmm5
|
||||
; SSE2-NEXT: andps %xmm5, %xmm0
|
||||
; SSE2-NEXT: orps %xmm2, %xmm0
|
||||
; SSE2-NEXT: andps %xmm4, %xmm3
|
||||
; SSE2-NEXT: andps %xmm5, %xmm1
|
||||
; SSE2-NEXT: orps %xmm3, %xmm1
|
||||
; SSE2-NEXT: movaps [[MAGMASK2]](%rip), %xmm4
|
||||
; SSE2-NEXT: movaps %xmm4, %xmm5
|
||||
; SSE2-NEXT: andnps %xmm2, %xmm5
|
||||
; SSE2-NEXT: andps %xmm4, %xmm0
|
||||
; SSE2-NEXT: orps %xmm5, %xmm0
|
||||
; SSE2-NEXT: andps %xmm4, %xmm1
|
||||
; SSE2-NEXT: andnps %xmm3, %xmm4
|
||||
; SSE2-NEXT: orps %xmm4, %xmm1
|
||||
; SSE2-NEXT: retq
|
||||
;
|
||||
; AVX-LABEL: v8f32:
|
||||
|
@ -118,10 +112,6 @@ define <2 x double> @v2f64(<2 x double> %a, <2 x double> %b) nounwind {
|
|||
ret <2 x double> %tmp
|
||||
}
|
||||
|
||||
; SSE2: [[SIGNMASK4:L.+]]:
|
||||
; SSE2-NEXT: .quad -9223372036854775808
|
||||
; SSE2-NEXT: .quad -9223372036854775808
|
||||
|
||||
; SSE2: [[MAGMASK4:L.+]]:
|
||||
; SSE2-NEXT: .quad 9223372036854775807
|
||||
; SSE2-NEXT: .quad 9223372036854775807
|
||||
|
@ -141,14 +131,14 @@ define <2 x double> @v2f64(<2 x double> %a, <2 x double> %b) nounwind {
|
|||
define <4 x double> @v4f64(<4 x double> %a, <4 x double> %b) nounwind {
|
||||
; SSE2-LABEL: v4f64:
|
||||
; SSE2: # %bb.0:
|
||||
; SSE2-NEXT: movaps [[SIGNMASK4]](%rip), %xmm4
|
||||
; SSE2-NEXT: andps %xmm4, %xmm2
|
||||
; SSE2-NEXT: movaps [[MAGMASK4]](%rip), %xmm5
|
||||
; SSE2-NEXT: andps %xmm5, %xmm0
|
||||
; SSE2-NEXT: orps %xmm2, %xmm0
|
||||
; SSE2-NEXT: andps %xmm4, %xmm3
|
||||
; SSE2-NEXT: andps %xmm5, %xmm1
|
||||
; SSE2-NEXT: orps %xmm3, %xmm1
|
||||
; SSE2-NEXT: movaps [[MAGMASK4]](%rip), %xmm4
|
||||
; SSE2-NEXT: movaps %xmm4, %xmm5
|
||||
; SSE2-NEXT: andnps %xmm2, %xmm5
|
||||
; SSE2-NEXT: andps %xmm4, %xmm0
|
||||
; SSE2-NEXT: orps %xmm5, %xmm0
|
||||
; SSE2-NEXT: andps %xmm4, %xmm1
|
||||
; SSE2-NEXT: andnps %xmm3, %xmm4
|
||||
; SSE2-NEXT: orps %xmm4, %xmm1
|
||||
; SSE2-NEXT: retq
|
||||
;
|
||||
; AVX-LABEL: v4f64:
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -2556,10 +2556,8 @@ define <32 x i8> @splatconstant_funnnel_v32i8(<32 x i8> %x, <32 x i8> %y) nounwi
|
|||
; XOPAVX2-LABEL: splatconstant_funnnel_v32i8:
|
||||
; XOPAVX2: # %bb.0:
|
||||
; XOPAVX2-NEXT: vpsrlw $4, %ymm1, %ymm1
|
||||
; XOPAVX2-NEXT: vpand {{.*}}(%rip), %ymm1, %ymm1
|
||||
; XOPAVX2-NEXT: vpsllw $4, %ymm0, %ymm0
|
||||
; XOPAVX2-NEXT: vpand {{.*}}(%rip), %ymm0, %ymm0
|
||||
; XOPAVX2-NEXT: vpor %ymm1, %ymm0, %ymm0
|
||||
; XOPAVX2-NEXT: vpcmov {{.*}}(%rip), %ymm1, %ymm0, %ymm0
|
||||
; XOPAVX2-NEXT: retq
|
||||
%res = call <32 x i8> @llvm.fshl.v32i8(<32 x i8> %x, <32 x i8> %y, <32 x i8> <i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4>)
|
||||
ret <32 x i8> %res
|
||||
|
|
|
@ -1504,32 +1504,30 @@ define <64 x i8> @splatconstant_funnnel_v64i8(<64 x i8> %x, <64 x i8> %y) nounwi
|
|||
; AVX512F-LABEL: splatconstant_funnnel_v64i8:
|
||||
; AVX512F: # %bb.0:
|
||||
; AVX512F-NEXT: vpsrlw $4, %ymm2, %ymm2
|
||||
; AVX512F-NEXT: vmovdqa {{.*#+}} ymm4 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
|
||||
; AVX512F-NEXT: vpand %ymm4, %ymm2, %ymm2
|
||||
; AVX512F-NEXT: vmovdqa {{.*#+}} ymm4 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240]
|
||||
; AVX512F-NEXT: vpandn %ymm2, %ymm4, %ymm2
|
||||
; AVX512F-NEXT: vpsllw $4, %ymm0, %ymm0
|
||||
; AVX512F-NEXT: vmovdqa {{.*#+}} ymm5 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240]
|
||||
; AVX512F-NEXT: vpand %ymm5, %ymm0, %ymm0
|
||||
; AVX512F-NEXT: vpand %ymm4, %ymm0, %ymm0
|
||||
; AVX512F-NEXT: vpor %ymm2, %ymm0, %ymm0
|
||||
; AVX512F-NEXT: vpsrlw $4, %ymm3, %ymm2
|
||||
; AVX512F-NEXT: vpand %ymm4, %ymm2, %ymm2
|
||||
; AVX512F-NEXT: vpandn %ymm2, %ymm4, %ymm2
|
||||
; AVX512F-NEXT: vpsllw $4, %ymm1, %ymm1
|
||||
; AVX512F-NEXT: vpand %ymm5, %ymm1, %ymm1
|
||||
; AVX512F-NEXT: vpand %ymm4, %ymm1, %ymm1
|
||||
; AVX512F-NEXT: vpor %ymm2, %ymm1, %ymm1
|
||||
; AVX512F-NEXT: retq
|
||||
;
|
||||
; AVX512VL-LABEL: splatconstant_funnnel_v64i8:
|
||||
; AVX512VL: # %bb.0:
|
||||
; AVX512VL-NEXT: vpsrlw $4, %ymm2, %ymm2
|
||||
; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm4 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
|
||||
; AVX512VL-NEXT: vpand %ymm4, %ymm2, %ymm2
|
||||
; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm4 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240]
|
||||
; AVX512VL-NEXT: vpandn %ymm2, %ymm4, %ymm2
|
||||
; AVX512VL-NEXT: vpsllw $4, %ymm0, %ymm0
|
||||
; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm5 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240]
|
||||
; AVX512VL-NEXT: vpand %ymm5, %ymm0, %ymm0
|
||||
; AVX512VL-NEXT: vpand %ymm4, %ymm0, %ymm0
|
||||
; AVX512VL-NEXT: vpor %ymm2, %ymm0, %ymm0
|
||||
; AVX512VL-NEXT: vpsrlw $4, %ymm3, %ymm2
|
||||
; AVX512VL-NEXT: vpand %ymm4, %ymm2, %ymm2
|
||||
; AVX512VL-NEXT: vpandn %ymm2, %ymm4, %ymm2
|
||||
; AVX512VL-NEXT: vpsllw $4, %ymm1, %ymm1
|
||||
; AVX512VL-NEXT: vpand %ymm5, %ymm1, %ymm1
|
||||
; AVX512VL-NEXT: vpand %ymm4, %ymm1, %ymm1
|
||||
; AVX512VL-NEXT: vpor %ymm2, %ymm1, %ymm1
|
||||
; AVX512VL-NEXT: retq
|
||||
;
|
||||
|
|
|
@ -341,47 +341,45 @@ define <32 x i8> @var_funnnel_v32i8(<32 x i8> %x, <32 x i8> %amt) nounwind {
|
|||
; AVX1: # %bb.0:
|
||||
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
|
||||
; AVX1-NEXT: vpsrlw $4, %xmm2, %xmm3
|
||||
; AVX1-NEXT: vmovdqa {{.*#+}} xmm8 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
|
||||
; AVX1-NEXT: vpand %xmm8, %xmm3, %xmm3
|
||||
; AVX1-NEXT: vmovdqa {{.*#+}} xmm4 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240]
|
||||
; AVX1-NEXT: vpandn %xmm3, %xmm4, %xmm3
|
||||
; AVX1-NEXT: vpsllw $4, %xmm2, %xmm5
|
||||
; AVX1-NEXT: vmovdqa {{.*#+}} xmm9 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240]
|
||||
; AVX1-NEXT: vpand %xmm9, %xmm5, %xmm5
|
||||
; AVX1-NEXT: vpand %xmm4, %xmm5, %xmm5
|
||||
; AVX1-NEXT: vpor %xmm3, %xmm5, %xmm3
|
||||
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm5
|
||||
; AVX1-NEXT: vpsllw $5, %xmm5, %xmm5
|
||||
; AVX1-NEXT: vpblendvb %xmm5, %xmm3, %xmm2, %xmm2
|
||||
; AVX1-NEXT: vpsrlw $6, %xmm2, %xmm3
|
||||
; AVX1-NEXT: vmovdqa {{.*#+}} xmm10 = [3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3]
|
||||
; AVX1-NEXT: vpand %xmm10, %xmm3, %xmm3
|
||||
; AVX1-NEXT: vpsllw $2, %xmm2, %xmm4
|
||||
; AVX1-NEXT: vmovdqa {{.*#+}} xmm6 = [252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252]
|
||||
; AVX1-NEXT: vpand %xmm6, %xmm4, %xmm4
|
||||
; AVX1-NEXT: vpor %xmm3, %xmm4, %xmm3
|
||||
; AVX1-NEXT: vpaddb %xmm5, %xmm5, %xmm4
|
||||
; AVX1-NEXT: vpblendvb %xmm4, %xmm3, %xmm2, %xmm2
|
||||
; AVX1-NEXT: vpandn %xmm3, %xmm6, %xmm3
|
||||
; AVX1-NEXT: vpsllw $2, %xmm2, %xmm7
|
||||
; AVX1-NEXT: vpand %xmm6, %xmm7, %xmm7
|
||||
; AVX1-NEXT: vpor %xmm3, %xmm7, %xmm3
|
||||
; AVX1-NEXT: vpaddb %xmm5, %xmm5, %xmm5
|
||||
; AVX1-NEXT: vpblendvb %xmm5, %xmm3, %xmm2, %xmm2
|
||||
; AVX1-NEXT: vpsrlw $7, %xmm2, %xmm3
|
||||
; AVX1-NEXT: vmovdqa {{.*#+}} xmm5 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
|
||||
; AVX1-NEXT: vpand %xmm5, %xmm3, %xmm3
|
||||
; AVX1-NEXT: vmovdqa {{.*#+}} xmm8 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
|
||||
; AVX1-NEXT: vpand %xmm8, %xmm3, %xmm3
|
||||
; AVX1-NEXT: vpaddb %xmm2, %xmm2, %xmm7
|
||||
; AVX1-NEXT: vpor %xmm3, %xmm7, %xmm3
|
||||
; AVX1-NEXT: vpaddb %xmm4, %xmm4, %xmm4
|
||||
; AVX1-NEXT: vpblendvb %xmm4, %xmm3, %xmm2, %xmm2
|
||||
; AVX1-NEXT: vpaddb %xmm5, %xmm5, %xmm5
|
||||
; AVX1-NEXT: vpblendvb %xmm5, %xmm3, %xmm2, %xmm2
|
||||
; AVX1-NEXT: vpsrlw $4, %xmm0, %xmm3
|
||||
; AVX1-NEXT: vpand %xmm8, %xmm3, %xmm3
|
||||
; AVX1-NEXT: vpsllw $4, %xmm0, %xmm4
|
||||
; AVX1-NEXT: vpand %xmm9, %xmm4, %xmm4
|
||||
; AVX1-NEXT: vpandn %xmm3, %xmm4, %xmm3
|
||||
; AVX1-NEXT: vpsllw $4, %xmm0, %xmm5
|
||||
; AVX1-NEXT: vpand %xmm4, %xmm5, %xmm4
|
||||
; AVX1-NEXT: vpor %xmm3, %xmm4, %xmm3
|
||||
; AVX1-NEXT: vpsllw $5, %xmm1, %xmm1
|
||||
; AVX1-NEXT: vpblendvb %xmm1, %xmm3, %xmm0, %xmm0
|
||||
; AVX1-NEXT: vpsrlw $6, %xmm0, %xmm3
|
||||
; AVX1-NEXT: vpand %xmm10, %xmm3, %xmm3
|
||||
; AVX1-NEXT: vpandn %xmm3, %xmm6, %xmm3
|
||||
; AVX1-NEXT: vpsllw $2, %xmm0, %xmm4
|
||||
; AVX1-NEXT: vpand %xmm6, %xmm4, %xmm4
|
||||
; AVX1-NEXT: vpor %xmm3, %xmm4, %xmm3
|
||||
; AVX1-NEXT: vpaddb %xmm1, %xmm1, %xmm1
|
||||
; AVX1-NEXT: vpblendvb %xmm1, %xmm3, %xmm0, %xmm0
|
||||
; AVX1-NEXT: vpsrlw $7, %xmm0, %xmm3
|
||||
; AVX1-NEXT: vpand %xmm5, %xmm3, %xmm3
|
||||
; AVX1-NEXT: vpand %xmm8, %xmm3, %xmm3
|
||||
; AVX1-NEXT: vpaddb %xmm0, %xmm0, %xmm4
|
||||
; AVX1-NEXT: vpor %xmm3, %xmm4, %xmm3
|
||||
; AVX1-NEXT: vpaddb %xmm1, %xmm1, %xmm1
|
||||
|
@ -1475,16 +1473,15 @@ define <32 x i8> @splatconstant_funnnel_v32i8(<32 x i8> %x) nounwind {
|
|||
; AVX1: # %bb.0:
|
||||
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
|
||||
; AVX1-NEXT: vpsrlw $4, %xmm1, %xmm2
|
||||
; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
|
||||
; AVX1-NEXT: vpand %xmm3, %xmm2, %xmm2
|
||||
; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240]
|
||||
; AVX1-NEXT: vpandn %xmm2, %xmm3, %xmm2
|
||||
; AVX1-NEXT: vpsllw $4, %xmm1, %xmm1
|
||||
; AVX1-NEXT: vmovdqa {{.*#+}} xmm4 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240]
|
||||
; AVX1-NEXT: vpand %xmm4, %xmm1, %xmm1
|
||||
; AVX1-NEXT: vpand %xmm3, %xmm1, %xmm1
|
||||
; AVX1-NEXT: vpor %xmm2, %xmm1, %xmm1
|
||||
; AVX1-NEXT: vpsrlw $4, %xmm0, %xmm2
|
||||
; AVX1-NEXT: vpand %xmm3, %xmm2, %xmm2
|
||||
; AVX1-NEXT: vpandn %xmm2, %xmm3, %xmm2
|
||||
; AVX1-NEXT: vpsllw $4, %xmm0, %xmm0
|
||||
; AVX1-NEXT: vpand %xmm4, %xmm0, %xmm0
|
||||
; AVX1-NEXT: vpand %xmm3, %xmm0, %xmm0
|
||||
; AVX1-NEXT: vpor %xmm2, %xmm0, %xmm0
|
||||
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
|
||||
; AVX1-NEXT: retq
|
||||
|
|
|
@ -111,49 +111,47 @@ define <64 x i8> @var_funnnel_v64i8(<64 x i8> %x, <64 x i8> %amt) nounwind {
|
|||
; AVX512F-LABEL: var_funnnel_v64i8:
|
||||
; AVX512F: # %bb.0:
|
||||
; AVX512F-NEXT: vpsrlw $4, %ymm0, %ymm4
|
||||
; AVX512F-NEXT: vmovdqa {{.*#+}} ymm5 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
|
||||
; AVX512F-NEXT: vpand %ymm5, %ymm4, %ymm4
|
||||
; AVX512F-NEXT: vmovdqa {{.*#+}} ymm5 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240]
|
||||
; AVX512F-NEXT: vpandn %ymm4, %ymm5, %ymm4
|
||||
; AVX512F-NEXT: vpsllw $4, %ymm0, %ymm6
|
||||
; AVX512F-NEXT: vmovdqa {{.*#+}} ymm7 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240]
|
||||
; AVX512F-NEXT: vpand %ymm7, %ymm6, %ymm6
|
||||
; AVX512F-NEXT: vpand %ymm5, %ymm6, %ymm6
|
||||
; AVX512F-NEXT: vpor %ymm4, %ymm6, %ymm4
|
||||
; AVX512F-NEXT: vmovdqa {{.*#+}} ymm6 = [7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7]
|
||||
; AVX512F-NEXT: vpand %ymm6, %ymm2, %ymm2
|
||||
; AVX512F-NEXT: vpsllw $5, %ymm2, %ymm2
|
||||
; AVX512F-NEXT: vpblendvb %ymm2, %ymm4, %ymm0, %ymm0
|
||||
; AVX512F-NEXT: vpsrlw $6, %ymm0, %ymm4
|
||||
; AVX512F-NEXT: vmovdqa {{.*#+}} ymm8 = [3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3]
|
||||
; AVX512F-NEXT: vpand %ymm8, %ymm4, %ymm4
|
||||
; AVX512F-NEXT: vpsllw $2, %ymm0, %ymm9
|
||||
; AVX512F-NEXT: vmovdqa {{.*#+}} ymm10 = [252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252]
|
||||
; AVX512F-NEXT: vpand %ymm10, %ymm9, %ymm9
|
||||
; AVX512F-NEXT: vpor %ymm4, %ymm9, %ymm4
|
||||
; AVX512F-NEXT: vmovdqa {{.*#+}} ymm7 = [252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252]
|
||||
; AVX512F-NEXT: vpandn %ymm4, %ymm7, %ymm4
|
||||
; AVX512F-NEXT: vpsllw $2, %ymm0, %ymm8
|
||||
; AVX512F-NEXT: vpand %ymm7, %ymm8, %ymm8
|
||||
; AVX512F-NEXT: vpor %ymm4, %ymm8, %ymm4
|
||||
; AVX512F-NEXT: vpaddb %ymm2, %ymm2, %ymm2
|
||||
; AVX512F-NEXT: vpblendvb %ymm2, %ymm4, %ymm0, %ymm0
|
||||
; AVX512F-NEXT: vpsrlw $7, %ymm0, %ymm4
|
||||
; AVX512F-NEXT: vmovdqa {{.*#+}} ymm9 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
|
||||
; AVX512F-NEXT: vpand %ymm9, %ymm4, %ymm4
|
||||
; AVX512F-NEXT: vpaddb %ymm0, %ymm0, %ymm11
|
||||
; AVX512F-NEXT: vpor %ymm4, %ymm11, %ymm4
|
||||
; AVX512F-NEXT: vmovdqa {{.*#+}} ymm8 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
|
||||
; AVX512F-NEXT: vpand %ymm8, %ymm4, %ymm4
|
||||
; AVX512F-NEXT: vpaddb %ymm0, %ymm0, %ymm9
|
||||
; AVX512F-NEXT: vpor %ymm4, %ymm9, %ymm4
|
||||
; AVX512F-NEXT: vpaddb %ymm2, %ymm2, %ymm2
|
||||
; AVX512F-NEXT: vpblendvb %ymm2, %ymm4, %ymm0, %ymm0
|
||||
; AVX512F-NEXT: vpsrlw $4, %ymm1, %ymm2
|
||||
; AVX512F-NEXT: vpand %ymm5, %ymm2, %ymm2
|
||||
; AVX512F-NEXT: vpandn %ymm2, %ymm5, %ymm2
|
||||
; AVX512F-NEXT: vpsllw $4, %ymm1, %ymm4
|
||||
; AVX512F-NEXT: vpand %ymm7, %ymm4, %ymm4
|
||||
; AVX512F-NEXT: vpand %ymm5, %ymm4, %ymm4
|
||||
; AVX512F-NEXT: vpor %ymm2, %ymm4, %ymm2
|
||||
; AVX512F-NEXT: vpand %ymm6, %ymm3, %ymm3
|
||||
; AVX512F-NEXT: vpsllw $5, %ymm3, %ymm3
|
||||
; AVX512F-NEXT: vpblendvb %ymm3, %ymm2, %ymm1, %ymm1
|
||||
; AVX512F-NEXT: vpsrlw $6, %ymm1, %ymm2
|
||||
; AVX512F-NEXT: vpand %ymm8, %ymm2, %ymm2
|
||||
; AVX512F-NEXT: vpandn %ymm2, %ymm7, %ymm2
|
||||
; AVX512F-NEXT: vpsllw $2, %ymm1, %ymm4
|
||||
; AVX512F-NEXT: vpand %ymm10, %ymm4, %ymm4
|
||||
; AVX512F-NEXT: vpand %ymm7, %ymm4, %ymm4
|
||||
; AVX512F-NEXT: vpor %ymm2, %ymm4, %ymm2
|
||||
; AVX512F-NEXT: vpaddb %ymm3, %ymm3, %ymm3
|
||||
; AVX512F-NEXT: vpblendvb %ymm3, %ymm2, %ymm1, %ymm1
|
||||
; AVX512F-NEXT: vpsrlw $7, %ymm1, %ymm2
|
||||
; AVX512F-NEXT: vpand %ymm9, %ymm2, %ymm2
|
||||
; AVX512F-NEXT: vpand %ymm8, %ymm2, %ymm2
|
||||
; AVX512F-NEXT: vpaddb %ymm1, %ymm1, %ymm4
|
||||
; AVX512F-NEXT: vpor %ymm2, %ymm4, %ymm2
|
||||
; AVX512F-NEXT: vpaddb %ymm3, %ymm3, %ymm3
|
||||
|
@ -163,49 +161,47 @@ define <64 x i8> @var_funnnel_v64i8(<64 x i8> %x, <64 x i8> %amt) nounwind {
|
|||
; AVX512VL-LABEL: var_funnnel_v64i8:
|
||||
; AVX512VL: # %bb.0:
|
||||
; AVX512VL-NEXT: vpsrlw $4, %ymm0, %ymm4
|
||||
; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm5 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
|
||||
; AVX512VL-NEXT: vpand %ymm5, %ymm4, %ymm4
|
||||
; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm5 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240]
|
||||
; AVX512VL-NEXT: vpandn %ymm4, %ymm5, %ymm4
|
||||
; AVX512VL-NEXT: vpsllw $4, %ymm0, %ymm6
|
||||
; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm7 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240]
|
||||
; AVX512VL-NEXT: vpand %ymm7, %ymm6, %ymm6
|
||||
; AVX512VL-NEXT: vpand %ymm5, %ymm6, %ymm6
|
||||
; AVX512VL-NEXT: vpor %ymm4, %ymm6, %ymm4
|
||||
; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm6 = [7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7]
|
||||
; AVX512VL-NEXT: vpand %ymm6, %ymm2, %ymm2
|
||||
; AVX512VL-NEXT: vpsllw $5, %ymm2, %ymm2
|
||||
; AVX512VL-NEXT: vpblendvb %ymm2, %ymm4, %ymm0, %ymm0
|
||||
; AVX512VL-NEXT: vpsrlw $6, %ymm0, %ymm4
|
||||
; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm8 = [3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3]
|
||||
; AVX512VL-NEXT: vpand %ymm8, %ymm4, %ymm4
|
||||
; AVX512VL-NEXT: vpsllw $2, %ymm0, %ymm9
|
||||
; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm10 = [252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252]
|
||||
; AVX512VL-NEXT: vpand %ymm10, %ymm9, %ymm9
|
||||
; AVX512VL-NEXT: vpor %ymm4, %ymm9, %ymm4
|
||||
; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm7 = [252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252]
|
||||
; AVX512VL-NEXT: vpandn %ymm4, %ymm7, %ymm4
|
||||
; AVX512VL-NEXT: vpsllw $2, %ymm0, %ymm8
|
||||
; AVX512VL-NEXT: vpand %ymm7, %ymm8, %ymm8
|
||||
; AVX512VL-NEXT: vpor %ymm4, %ymm8, %ymm4
|
||||
; AVX512VL-NEXT: vpaddb %ymm2, %ymm2, %ymm2
|
||||
; AVX512VL-NEXT: vpblendvb %ymm2, %ymm4, %ymm0, %ymm0
|
||||
; AVX512VL-NEXT: vpsrlw $7, %ymm0, %ymm4
|
||||
; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm9 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
|
||||
; AVX512VL-NEXT: vpand %ymm9, %ymm4, %ymm4
|
||||
; AVX512VL-NEXT: vpaddb %ymm0, %ymm0, %ymm11
|
||||
; AVX512VL-NEXT: vpor %ymm4, %ymm11, %ymm4
|
||||
; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm8 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
|
||||
; AVX512VL-NEXT: vpand %ymm8, %ymm4, %ymm4
|
||||
; AVX512VL-NEXT: vpaddb %ymm0, %ymm0, %ymm9
|
||||
; AVX512VL-NEXT: vpor %ymm4, %ymm9, %ymm4
|
||||
; AVX512VL-NEXT: vpaddb %ymm2, %ymm2, %ymm2
|
||||
; AVX512VL-NEXT: vpblendvb %ymm2, %ymm4, %ymm0, %ymm0
|
||||
; AVX512VL-NEXT: vpsrlw $4, %ymm1, %ymm2
|
||||
; AVX512VL-NEXT: vpand %ymm5, %ymm2, %ymm2
|
||||
; AVX512VL-NEXT: vpandn %ymm2, %ymm5, %ymm2
|
||||
; AVX512VL-NEXT: vpsllw $4, %ymm1, %ymm4
|
||||
; AVX512VL-NEXT: vpand %ymm7, %ymm4, %ymm4
|
||||
; AVX512VL-NEXT: vpand %ymm5, %ymm4, %ymm4
|
||||
; AVX512VL-NEXT: vpor %ymm2, %ymm4, %ymm2
|
||||
; AVX512VL-NEXT: vpand %ymm6, %ymm3, %ymm3
|
||||
; AVX512VL-NEXT: vpsllw $5, %ymm3, %ymm3
|
||||
; AVX512VL-NEXT: vpblendvb %ymm3, %ymm2, %ymm1, %ymm1
|
||||
; AVX512VL-NEXT: vpsrlw $6, %ymm1, %ymm2
|
||||
; AVX512VL-NEXT: vpand %ymm8, %ymm2, %ymm2
|
||||
; AVX512VL-NEXT: vpandn %ymm2, %ymm7, %ymm2
|
||||
; AVX512VL-NEXT: vpsllw $2, %ymm1, %ymm4
|
||||
; AVX512VL-NEXT: vpand %ymm10, %ymm4, %ymm4
|
||||
; AVX512VL-NEXT: vpand %ymm7, %ymm4, %ymm4
|
||||
; AVX512VL-NEXT: vpor %ymm2, %ymm4, %ymm2
|
||||
; AVX512VL-NEXT: vpaddb %ymm3, %ymm3, %ymm3
|
||||
; AVX512VL-NEXT: vpblendvb %ymm3, %ymm2, %ymm1, %ymm1
|
||||
; AVX512VL-NEXT: vpsrlw $7, %ymm1, %ymm2
|
||||
; AVX512VL-NEXT: vpand %ymm9, %ymm2, %ymm2
|
||||
; AVX512VL-NEXT: vpand %ymm8, %ymm2, %ymm2
|
||||
; AVX512VL-NEXT: vpaddb %ymm1, %ymm1, %ymm4
|
||||
; AVX512VL-NEXT: vpor %ymm2, %ymm4, %ymm2
|
||||
; AVX512VL-NEXT: vpaddb %ymm3, %ymm3, %ymm3
|
||||
|
@ -776,32 +772,30 @@ define <64 x i8> @splatconstant_funnnel_v64i8(<64 x i8> %x) nounwind {
|
|||
; AVX512F-LABEL: splatconstant_funnnel_v64i8:
|
||||
; AVX512F: # %bb.0:
|
||||
; AVX512F-NEXT: vpsrlw $4, %ymm0, %ymm2
|
||||
; AVX512F-NEXT: vmovdqa {{.*#+}} ymm3 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
|
||||
; AVX512F-NEXT: vpand %ymm3, %ymm2, %ymm2
|
||||
; AVX512F-NEXT: vmovdqa {{.*#+}} ymm3 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240]
|
||||
; AVX512F-NEXT: vpandn %ymm2, %ymm3, %ymm2
|
||||
; AVX512F-NEXT: vpsllw $4, %ymm0, %ymm0
|
||||
; AVX512F-NEXT: vmovdqa {{.*#+}} ymm4 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240]
|
||||
; AVX512F-NEXT: vpand %ymm4, %ymm0, %ymm0
|
||||
; AVX512F-NEXT: vpand %ymm3, %ymm0, %ymm0
|
||||
; AVX512F-NEXT: vpor %ymm2, %ymm0, %ymm0
|
||||
; AVX512F-NEXT: vpsrlw $4, %ymm1, %ymm2
|
||||
; AVX512F-NEXT: vpand %ymm3, %ymm2, %ymm2
|
||||
; AVX512F-NEXT: vpandn %ymm2, %ymm3, %ymm2
|
||||
; AVX512F-NEXT: vpsllw $4, %ymm1, %ymm1
|
||||
; AVX512F-NEXT: vpand %ymm4, %ymm1, %ymm1
|
||||
; AVX512F-NEXT: vpand %ymm3, %ymm1, %ymm1
|
||||
; AVX512F-NEXT: vpor %ymm2, %ymm1, %ymm1
|
||||
; AVX512F-NEXT: retq
|
||||
;
|
||||
; AVX512VL-LABEL: splatconstant_funnnel_v64i8:
|
||||
; AVX512VL: # %bb.0:
|
||||
; AVX512VL-NEXT: vpsrlw $4, %ymm0, %ymm2
|
||||
; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm3 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
|
||||
; AVX512VL-NEXT: vpand %ymm3, %ymm2, %ymm2
|
||||
; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm3 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240]
|
||||
; AVX512VL-NEXT: vpandn %ymm2, %ymm3, %ymm2
|
||||
; AVX512VL-NEXT: vpsllw $4, %ymm0, %ymm0
|
||||
; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm4 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240]
|
||||
; AVX512VL-NEXT: vpand %ymm4, %ymm0, %ymm0
|
||||
; AVX512VL-NEXT: vpand %ymm3, %ymm0, %ymm0
|
||||
; AVX512VL-NEXT: vpor %ymm2, %ymm0, %ymm0
|
||||
; AVX512VL-NEXT: vpsrlw $4, %ymm1, %ymm2
|
||||
; AVX512VL-NEXT: vpand %ymm3, %ymm2, %ymm2
|
||||
; AVX512VL-NEXT: vpandn %ymm2, %ymm3, %ymm2
|
||||
; AVX512VL-NEXT: vpsllw $4, %ymm1, %ymm1
|
||||
; AVX512VL-NEXT: vpand %ymm4, %ymm1, %ymm1
|
||||
; AVX512VL-NEXT: vpand %ymm3, %ymm1, %ymm1
|
||||
; AVX512VL-NEXT: vpor %ymm2, %ymm1, %ymm1
|
||||
; AVX512VL-NEXT: retq
|
||||
;
|
||||
|
|
|
@ -2569,10 +2569,8 @@ define <32 x i8> @splatconstant_funnnel_v32i8(<32 x i8> %x, <32 x i8> %y) nounwi
|
|||
; XOPAVX2-LABEL: splatconstant_funnnel_v32i8:
|
||||
; XOPAVX2: # %bb.0:
|
||||
; XOPAVX2-NEXT: vpsrlw $4, %ymm1, %ymm1
|
||||
; XOPAVX2-NEXT: vpand {{.*}}(%rip), %ymm1, %ymm1
|
||||
; XOPAVX2-NEXT: vpsllw $4, %ymm0, %ymm0
|
||||
; XOPAVX2-NEXT: vpand {{.*}}(%rip), %ymm0, %ymm0
|
||||
; XOPAVX2-NEXT: vpor %ymm1, %ymm0, %ymm0
|
||||
; XOPAVX2-NEXT: vpcmov {{.*}}(%rip), %ymm1, %ymm0, %ymm0
|
||||
; XOPAVX2-NEXT: retq
|
||||
%res = call <32 x i8> @llvm.fshr.v32i8(<32 x i8> %x, <32 x i8> %y, <32 x i8> <i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4, i8 4>)
|
||||
ret <32 x i8> %res
|
||||
|
|
|
@ -1496,32 +1496,30 @@ define <64 x i8> @splatconstant_funnnel_v64i8(<64 x i8> %x, <64 x i8> %y) nounwi
|
|||
; AVX512F-LABEL: splatconstant_funnnel_v64i8:
|
||||
; AVX512F: # %bb.0:
|
||||
; AVX512F-NEXT: vpsrlw $4, %ymm2, %ymm2
|
||||
; AVX512F-NEXT: vmovdqa {{.*#+}} ymm4 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
|
||||
; AVX512F-NEXT: vpand %ymm4, %ymm2, %ymm2
|
||||
; AVX512F-NEXT: vmovdqa {{.*#+}} ymm4 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240]
|
||||
; AVX512F-NEXT: vpandn %ymm2, %ymm4, %ymm2
|
||||
; AVX512F-NEXT: vpsllw $4, %ymm0, %ymm0
|
||||
; AVX512F-NEXT: vmovdqa {{.*#+}} ymm5 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240]
|
||||
; AVX512F-NEXT: vpand %ymm5, %ymm0, %ymm0
|
||||
; AVX512F-NEXT: vpand %ymm4, %ymm0, %ymm0
|
||||
; AVX512F-NEXT: vpor %ymm2, %ymm0, %ymm0
|
||||
; AVX512F-NEXT: vpsrlw $4, %ymm3, %ymm2
|
||||
; AVX512F-NEXT: vpand %ymm4, %ymm2, %ymm2
|
||||
; AVX512F-NEXT: vpandn %ymm2, %ymm4, %ymm2
|
||||
; AVX512F-NEXT: vpsllw $4, %ymm1, %ymm1
|
||||
; AVX512F-NEXT: vpand %ymm5, %ymm1, %ymm1
|
||||
; AVX512F-NEXT: vpand %ymm4, %ymm1, %ymm1
|
||||
; AVX512F-NEXT: vpor %ymm2, %ymm1, %ymm1
|
||||
; AVX512F-NEXT: retq
|
||||
;
|
||||
; AVX512VL-LABEL: splatconstant_funnnel_v64i8:
|
||||
; AVX512VL: # %bb.0:
|
||||
; AVX512VL-NEXT: vpsrlw $4, %ymm2, %ymm2
|
||||
; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm4 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
|
||||
; AVX512VL-NEXT: vpand %ymm4, %ymm2, %ymm2
|
||||
; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm4 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240]
|
||||
; AVX512VL-NEXT: vpandn %ymm2, %ymm4, %ymm2
|
||||
; AVX512VL-NEXT: vpsllw $4, %ymm0, %ymm0
|
||||
; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm5 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240]
|
||||
; AVX512VL-NEXT: vpand %ymm5, %ymm0, %ymm0
|
||||
; AVX512VL-NEXT: vpand %ymm4, %ymm0, %ymm0
|
||||
; AVX512VL-NEXT: vpor %ymm2, %ymm0, %ymm0
|
||||
; AVX512VL-NEXT: vpsrlw $4, %ymm3, %ymm2
|
||||
; AVX512VL-NEXT: vpand %ymm4, %ymm2, %ymm2
|
||||
; AVX512VL-NEXT: vpandn %ymm2, %ymm4, %ymm2
|
||||
; AVX512VL-NEXT: vpsllw $4, %ymm1, %ymm1
|
||||
; AVX512VL-NEXT: vpand %ymm5, %ymm1, %ymm1
|
||||
; AVX512VL-NEXT: vpand %ymm4, %ymm1, %ymm1
|
||||
; AVX512VL-NEXT: vpor %ymm2, %ymm1, %ymm1
|
||||
; AVX512VL-NEXT: retq
|
||||
;
|
||||
|
|
|
@ -372,50 +372,48 @@ define <32 x i8> @var_funnnel_v32i8(<32 x i8> %x, <32 x i8> %amt) nounwind {
|
|||
; AVX1: # %bb.0:
|
||||
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
|
||||
; AVX1-NEXT: vpsrlw $4, %xmm2, %xmm3
|
||||
; AVX1-NEXT: vmovdqa {{.*#+}} xmm8 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
|
||||
; AVX1-NEXT: vpand %xmm8, %xmm3, %xmm3
|
||||
; AVX1-NEXT: vmovdqa {{.*#+}} xmm4 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240]
|
||||
; AVX1-NEXT: vpandn %xmm3, %xmm4, %xmm3
|
||||
; AVX1-NEXT: vpsllw $4, %xmm2, %xmm5
|
||||
; AVX1-NEXT: vmovdqa {{.*#+}} xmm9 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240]
|
||||
; AVX1-NEXT: vpand %xmm9, %xmm5, %xmm5
|
||||
; AVX1-NEXT: vpand %xmm4, %xmm5, %xmm5
|
||||
; AVX1-NEXT: vpor %xmm3, %xmm5, %xmm3
|
||||
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm5
|
||||
; AVX1-NEXT: vpxor %xmm10, %xmm10, %xmm10
|
||||
; AVX1-NEXT: vpsubb %xmm5, %xmm10, %xmm5
|
||||
; AVX1-NEXT: vpxor %xmm8, %xmm8, %xmm8
|
||||
; AVX1-NEXT: vpsubb %xmm5, %xmm8, %xmm5
|
||||
; AVX1-NEXT: vpsllw $5, %xmm5, %xmm5
|
||||
; AVX1-NEXT: vpblendvb %xmm5, %xmm3, %xmm2, %xmm2
|
||||
; AVX1-NEXT: vpsrlw $6, %xmm2, %xmm3
|
||||
; AVX1-NEXT: vmovdqa {{.*#+}} xmm11 = [3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3]
|
||||
; AVX1-NEXT: vpand %xmm11, %xmm3, %xmm3
|
||||
; AVX1-NEXT: vpsllw $2, %xmm2, %xmm6
|
||||
; AVX1-NEXT: vmovdqa {{.*#+}} xmm7 = [252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252]
|
||||
; AVX1-NEXT: vpandn %xmm3, %xmm7, %xmm3
|
||||
; AVX1-NEXT: vpsllw $2, %xmm2, %xmm6
|
||||
; AVX1-NEXT: vpand %xmm7, %xmm6, %xmm6
|
||||
; AVX1-NEXT: vpor %xmm3, %xmm6, %xmm3
|
||||
; AVX1-NEXT: vpaddb %xmm5, %xmm5, %xmm5
|
||||
; AVX1-NEXT: vpblendvb %xmm5, %xmm3, %xmm2, %xmm2
|
||||
; AVX1-NEXT: vpsrlw $7, %xmm2, %xmm3
|
||||
; AVX1-NEXT: vmovdqa {{.*#+}} xmm6 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
|
||||
; AVX1-NEXT: vpand %xmm6, %xmm3, %xmm3
|
||||
; AVX1-NEXT: vpaddb %xmm2, %xmm2, %xmm4
|
||||
; AVX1-NEXT: vpor %xmm3, %xmm4, %xmm3
|
||||
; AVX1-NEXT: vpaddb %xmm5, %xmm5, %xmm4
|
||||
; AVX1-NEXT: vpblendvb %xmm4, %xmm3, %xmm2, %xmm2
|
||||
; AVX1-NEXT: vmovdqa {{.*#+}} xmm9 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
|
||||
; AVX1-NEXT: vpand %xmm9, %xmm3, %xmm3
|
||||
; AVX1-NEXT: vpaddb %xmm2, %xmm2, %xmm6
|
||||
; AVX1-NEXT: vpor %xmm3, %xmm6, %xmm3
|
||||
; AVX1-NEXT: vpaddb %xmm5, %xmm5, %xmm5
|
||||
; AVX1-NEXT: vpblendvb %xmm5, %xmm3, %xmm2, %xmm2
|
||||
; AVX1-NEXT: vpsrlw $4, %xmm0, %xmm3
|
||||
; AVX1-NEXT: vpand %xmm8, %xmm3, %xmm3
|
||||
; AVX1-NEXT: vpsllw $4, %xmm0, %xmm4
|
||||
; AVX1-NEXT: vpand %xmm9, %xmm4, %xmm4
|
||||
; AVX1-NEXT: vpandn %xmm3, %xmm4, %xmm3
|
||||
; AVX1-NEXT: vpsllw $4, %xmm0, %xmm5
|
||||
; AVX1-NEXT: vpand %xmm4, %xmm5, %xmm4
|
||||
; AVX1-NEXT: vpor %xmm3, %xmm4, %xmm3
|
||||
; AVX1-NEXT: vpsubb %xmm1, %xmm10, %xmm1
|
||||
; AVX1-NEXT: vpsubb %xmm1, %xmm8, %xmm1
|
||||
; AVX1-NEXT: vpsllw $5, %xmm1, %xmm1
|
||||
; AVX1-NEXT: vpblendvb %xmm1, %xmm3, %xmm0, %xmm0
|
||||
; AVX1-NEXT: vpsrlw $6, %xmm0, %xmm3
|
||||
; AVX1-NEXT: vpand %xmm11, %xmm3, %xmm3
|
||||
; AVX1-NEXT: vpandn %xmm3, %xmm7, %xmm3
|
||||
; AVX1-NEXT: vpsllw $2, %xmm0, %xmm4
|
||||
; AVX1-NEXT: vpand %xmm7, %xmm4, %xmm4
|
||||
; AVX1-NEXT: vpor %xmm3, %xmm4, %xmm3
|
||||
; AVX1-NEXT: vpaddb %xmm1, %xmm1, %xmm1
|
||||
; AVX1-NEXT: vpblendvb %xmm1, %xmm3, %xmm0, %xmm0
|
||||
; AVX1-NEXT: vpsrlw $7, %xmm0, %xmm3
|
||||
; AVX1-NEXT: vpand %xmm6, %xmm3, %xmm3
|
||||
; AVX1-NEXT: vpand %xmm9, %xmm3, %xmm3
|
||||
; AVX1-NEXT: vpaddb %xmm0, %xmm0, %xmm4
|
||||
; AVX1-NEXT: vpor %xmm3, %xmm4, %xmm3
|
||||
; AVX1-NEXT: vpaddb %xmm1, %xmm1, %xmm1
|
||||
|
@ -1552,16 +1550,15 @@ define <32 x i8> @splatconstant_funnnel_v32i8(<32 x i8> %x) nounwind {
|
|||
; AVX1: # %bb.0:
|
||||
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
|
||||
; AVX1-NEXT: vpsrlw $4, %xmm1, %xmm2
|
||||
; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
|
||||
; AVX1-NEXT: vpand %xmm3, %xmm2, %xmm2
|
||||
; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240]
|
||||
; AVX1-NEXT: vpandn %xmm2, %xmm3, %xmm2
|
||||
; AVX1-NEXT: vpsllw $4, %xmm1, %xmm1
|
||||
; AVX1-NEXT: vmovdqa {{.*#+}} xmm4 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240]
|
||||
; AVX1-NEXT: vpand %xmm4, %xmm1, %xmm1
|
||||
; AVX1-NEXT: vpand %xmm3, %xmm1, %xmm1
|
||||
; AVX1-NEXT: vpor %xmm2, %xmm1, %xmm1
|
||||
; AVX1-NEXT: vpsrlw $4, %xmm0, %xmm2
|
||||
; AVX1-NEXT: vpand %xmm3, %xmm2, %xmm2
|
||||
; AVX1-NEXT: vpandn %xmm2, %xmm3, %xmm2
|
||||
; AVX1-NEXT: vpsllw $4, %xmm0, %xmm0
|
||||
; AVX1-NEXT: vpand %xmm4, %xmm0, %xmm0
|
||||
; AVX1-NEXT: vpand %xmm3, %xmm0, %xmm0
|
||||
; AVX1-NEXT: vpor %xmm2, %xmm0, %xmm0
|
||||
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
|
||||
; AVX1-NEXT: retq
|
||||
|
|
|
@ -117,52 +117,50 @@ define <64 x i8> @var_funnnel_v64i8(<64 x i8> %x, <64 x i8> %amt) nounwind {
|
|||
; AVX512F-LABEL: var_funnnel_v64i8:
|
||||
; AVX512F: # %bb.0:
|
||||
; AVX512F-NEXT: vpsrlw $4, %ymm0, %ymm4
|
||||
; AVX512F-NEXT: vmovdqa {{.*#+}} ymm5 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
|
||||
; AVX512F-NEXT: vpand %ymm5, %ymm4, %ymm4
|
||||
; AVX512F-NEXT: vmovdqa {{.*#+}} ymm5 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240]
|
||||
; AVX512F-NEXT: vpandn %ymm4, %ymm5, %ymm4
|
||||
; AVX512F-NEXT: vpsllw $4, %ymm0, %ymm6
|
||||
; AVX512F-NEXT: vmovdqa {{.*#+}} ymm7 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240]
|
||||
; AVX512F-NEXT: vpand %ymm7, %ymm6, %ymm6
|
||||
; AVX512F-NEXT: vpand %ymm5, %ymm6, %ymm6
|
||||
; AVX512F-NEXT: vpor %ymm4, %ymm6, %ymm4
|
||||
; AVX512F-NEXT: vpxor %xmm6, %xmm6, %xmm6
|
||||
; AVX512F-NEXT: vpsubb %ymm2, %ymm6, %ymm2
|
||||
; AVX512F-NEXT: vmovdqa {{.*#+}} ymm8 = [7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7]
|
||||
; AVX512F-NEXT: vpand %ymm8, %ymm2, %ymm2
|
||||
; AVX512F-NEXT: vmovdqa {{.*#+}} ymm7 = [7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7]
|
||||
; AVX512F-NEXT: vpand %ymm7, %ymm2, %ymm2
|
||||
; AVX512F-NEXT: vpsllw $5, %ymm2, %ymm2
|
||||
; AVX512F-NEXT: vpblendvb %ymm2, %ymm4, %ymm0, %ymm0
|
||||
; AVX512F-NEXT: vpsrlw $6, %ymm0, %ymm4
|
||||
; AVX512F-NEXT: vmovdqa {{.*#+}} ymm9 = [3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3]
|
||||
; AVX512F-NEXT: vpand %ymm9, %ymm4, %ymm4
|
||||
; AVX512F-NEXT: vpsllw $2, %ymm0, %ymm10
|
||||
; AVX512F-NEXT: vmovdqa {{.*#+}} ymm11 = [252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252]
|
||||
; AVX512F-NEXT: vpand %ymm11, %ymm10, %ymm10
|
||||
; AVX512F-NEXT: vpor %ymm4, %ymm10, %ymm4
|
||||
; AVX512F-NEXT: vmovdqa {{.*#+}} ymm8 = [252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252]
|
||||
; AVX512F-NEXT: vpandn %ymm4, %ymm8, %ymm4
|
||||
; AVX512F-NEXT: vpsllw $2, %ymm0, %ymm9
|
||||
; AVX512F-NEXT: vpand %ymm8, %ymm9, %ymm9
|
||||
; AVX512F-NEXT: vpor %ymm4, %ymm9, %ymm4
|
||||
; AVX512F-NEXT: vpaddb %ymm2, %ymm2, %ymm2
|
||||
; AVX512F-NEXT: vpblendvb %ymm2, %ymm4, %ymm0, %ymm0
|
||||
; AVX512F-NEXT: vpsrlw $7, %ymm0, %ymm4
|
||||
; AVX512F-NEXT: vmovdqa {{.*#+}} ymm10 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
|
||||
; AVX512F-NEXT: vpand %ymm10, %ymm4, %ymm4
|
||||
; AVX512F-NEXT: vpaddb %ymm0, %ymm0, %ymm12
|
||||
; AVX512F-NEXT: vpor %ymm4, %ymm12, %ymm4
|
||||
; AVX512F-NEXT: vmovdqa {{.*#+}} ymm9 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
|
||||
; AVX512F-NEXT: vpand %ymm9, %ymm4, %ymm4
|
||||
; AVX512F-NEXT: vpaddb %ymm0, %ymm0, %ymm10
|
||||
; AVX512F-NEXT: vpor %ymm4, %ymm10, %ymm4
|
||||
; AVX512F-NEXT: vpaddb %ymm2, %ymm2, %ymm2
|
||||
; AVX512F-NEXT: vpblendvb %ymm2, %ymm4, %ymm0, %ymm0
|
||||
; AVX512F-NEXT: vpsrlw $4, %ymm1, %ymm2
|
||||
; AVX512F-NEXT: vpand %ymm5, %ymm2, %ymm2
|
||||
; AVX512F-NEXT: vpandn %ymm2, %ymm5, %ymm2
|
||||
; AVX512F-NEXT: vpsllw $4, %ymm1, %ymm4
|
||||
; AVX512F-NEXT: vpand %ymm7, %ymm4, %ymm4
|
||||
; AVX512F-NEXT: vpand %ymm5, %ymm4, %ymm4
|
||||
; AVX512F-NEXT: vpor %ymm2, %ymm4, %ymm2
|
||||
; AVX512F-NEXT: vpsubb %ymm3, %ymm6, %ymm3
|
||||
; AVX512F-NEXT: vpand %ymm8, %ymm3, %ymm3
|
||||
; AVX512F-NEXT: vpand %ymm7, %ymm3, %ymm3
|
||||
; AVX512F-NEXT: vpsllw $5, %ymm3, %ymm3
|
||||
; AVX512F-NEXT: vpblendvb %ymm3, %ymm2, %ymm1, %ymm1
|
||||
; AVX512F-NEXT: vpsrlw $6, %ymm1, %ymm2
|
||||
; AVX512F-NEXT: vpand %ymm9, %ymm2, %ymm2
|
||||
; AVX512F-NEXT: vpandn %ymm2, %ymm8, %ymm2
|
||||
; AVX512F-NEXT: vpsllw $2, %ymm1, %ymm4
|
||||
; AVX512F-NEXT: vpand %ymm11, %ymm4, %ymm4
|
||||
; AVX512F-NEXT: vpand %ymm8, %ymm4, %ymm4
|
||||
; AVX512F-NEXT: vpor %ymm2, %ymm4, %ymm2
|
||||
; AVX512F-NEXT: vpaddb %ymm3, %ymm3, %ymm3
|
||||
; AVX512F-NEXT: vpblendvb %ymm3, %ymm2, %ymm1, %ymm1
|
||||
; AVX512F-NEXT: vpsrlw $7, %ymm1, %ymm2
|
||||
; AVX512F-NEXT: vpand %ymm10, %ymm2, %ymm2
|
||||
; AVX512F-NEXT: vpand %ymm9, %ymm2, %ymm2
|
||||
; AVX512F-NEXT: vpaddb %ymm1, %ymm1, %ymm4
|
||||
; AVX512F-NEXT: vpor %ymm2, %ymm4, %ymm2
|
||||
; AVX512F-NEXT: vpaddb %ymm3, %ymm3, %ymm3
|
||||
|
@ -172,52 +170,50 @@ define <64 x i8> @var_funnnel_v64i8(<64 x i8> %x, <64 x i8> %amt) nounwind {
|
|||
; AVX512VL-LABEL: var_funnnel_v64i8:
|
||||
; AVX512VL: # %bb.0:
|
||||
; AVX512VL-NEXT: vpsrlw $4, %ymm0, %ymm4
|
||||
; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm5 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
|
||||
; AVX512VL-NEXT: vpand %ymm5, %ymm4, %ymm4
|
||||
; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm5 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240]
|
||||
; AVX512VL-NEXT: vpandn %ymm4, %ymm5, %ymm4
|
||||
; AVX512VL-NEXT: vpsllw $4, %ymm0, %ymm6
|
||||
; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm7 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240]
|
||||
; AVX512VL-NEXT: vpand %ymm7, %ymm6, %ymm6
|
||||
; AVX512VL-NEXT: vpand %ymm5, %ymm6, %ymm6
|
||||
; AVX512VL-NEXT: vpor %ymm4, %ymm6, %ymm4
|
||||
; AVX512VL-NEXT: vpxor %xmm6, %xmm6, %xmm6
|
||||
; AVX512VL-NEXT: vpsubb %ymm2, %ymm6, %ymm2
|
||||
; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm8 = [7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7]
|
||||
; AVX512VL-NEXT: vpand %ymm8, %ymm2, %ymm2
|
||||
; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm7 = [7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7]
|
||||
; AVX512VL-NEXT: vpand %ymm7, %ymm2, %ymm2
|
||||
; AVX512VL-NEXT: vpsllw $5, %ymm2, %ymm2
|
||||
; AVX512VL-NEXT: vpblendvb %ymm2, %ymm4, %ymm0, %ymm0
|
||||
; AVX512VL-NEXT: vpsrlw $6, %ymm0, %ymm4
|
||||
; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm9 = [3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3]
|
||||
; AVX512VL-NEXT: vpand %ymm9, %ymm4, %ymm4
|
||||
; AVX512VL-NEXT: vpsllw $2, %ymm0, %ymm10
|
||||
; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm11 = [252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252]
|
||||
; AVX512VL-NEXT: vpand %ymm11, %ymm10, %ymm10
|
||||
; AVX512VL-NEXT: vpor %ymm4, %ymm10, %ymm4
|
||||
; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm8 = [252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252]
|
||||
; AVX512VL-NEXT: vpandn %ymm4, %ymm8, %ymm4
|
||||
; AVX512VL-NEXT: vpsllw $2, %ymm0, %ymm9
|
||||
; AVX512VL-NEXT: vpand %ymm8, %ymm9, %ymm9
|
||||
; AVX512VL-NEXT: vpor %ymm4, %ymm9, %ymm4
|
||||
; AVX512VL-NEXT: vpaddb %ymm2, %ymm2, %ymm2
|
||||
; AVX512VL-NEXT: vpblendvb %ymm2, %ymm4, %ymm0, %ymm0
|
||||
; AVX512VL-NEXT: vpsrlw $7, %ymm0, %ymm4
|
||||
; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm10 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
|
||||
; AVX512VL-NEXT: vpand %ymm10, %ymm4, %ymm4
|
||||
; AVX512VL-NEXT: vpaddb %ymm0, %ymm0, %ymm12
|
||||
; AVX512VL-NEXT: vpor %ymm4, %ymm12, %ymm4
|
||||
; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm9 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
|
||||
; AVX512VL-NEXT: vpand %ymm9, %ymm4, %ymm4
|
||||
; AVX512VL-NEXT: vpaddb %ymm0, %ymm0, %ymm10
|
||||
; AVX512VL-NEXT: vpor %ymm4, %ymm10, %ymm4
|
||||
; AVX512VL-NEXT: vpaddb %ymm2, %ymm2, %ymm2
|
||||
; AVX512VL-NEXT: vpblendvb %ymm2, %ymm4, %ymm0, %ymm0
|
||||
; AVX512VL-NEXT: vpsrlw $4, %ymm1, %ymm2
|
||||
; AVX512VL-NEXT: vpand %ymm5, %ymm2, %ymm2
|
||||
; AVX512VL-NEXT: vpandn %ymm2, %ymm5, %ymm2
|
||||
; AVX512VL-NEXT: vpsllw $4, %ymm1, %ymm4
|
||||
; AVX512VL-NEXT: vpand %ymm7, %ymm4, %ymm4
|
||||
; AVX512VL-NEXT: vpand %ymm5, %ymm4, %ymm4
|
||||
; AVX512VL-NEXT: vpor %ymm2, %ymm4, %ymm2
|
||||
; AVX512VL-NEXT: vpsubb %ymm3, %ymm6, %ymm3
|
||||
; AVX512VL-NEXT: vpand %ymm8, %ymm3, %ymm3
|
||||
; AVX512VL-NEXT: vpand %ymm7, %ymm3, %ymm3
|
||||
; AVX512VL-NEXT: vpsllw $5, %ymm3, %ymm3
|
||||
; AVX512VL-NEXT: vpblendvb %ymm3, %ymm2, %ymm1, %ymm1
|
||||
; AVX512VL-NEXT: vpsrlw $6, %ymm1, %ymm2
|
||||
; AVX512VL-NEXT: vpand %ymm9, %ymm2, %ymm2
|
||||
; AVX512VL-NEXT: vpandn %ymm2, %ymm8, %ymm2
|
||||
; AVX512VL-NEXT: vpsllw $2, %ymm1, %ymm4
|
||||
; AVX512VL-NEXT: vpand %ymm11, %ymm4, %ymm4
|
||||
; AVX512VL-NEXT: vpand %ymm8, %ymm4, %ymm4
|
||||
; AVX512VL-NEXT: vpor %ymm2, %ymm4, %ymm2
|
||||
; AVX512VL-NEXT: vpaddb %ymm3, %ymm3, %ymm3
|
||||
; AVX512VL-NEXT: vpblendvb %ymm3, %ymm2, %ymm1, %ymm1
|
||||
; AVX512VL-NEXT: vpsrlw $7, %ymm1, %ymm2
|
||||
; AVX512VL-NEXT: vpand %ymm10, %ymm2, %ymm2
|
||||
; AVX512VL-NEXT: vpand %ymm9, %ymm2, %ymm2
|
||||
; AVX512VL-NEXT: vpaddb %ymm1, %ymm1, %ymm4
|
||||
; AVX512VL-NEXT: vpor %ymm2, %ymm4, %ymm2
|
||||
; AVX512VL-NEXT: vpaddb %ymm3, %ymm3, %ymm3
|
||||
|
@ -796,32 +792,30 @@ define <64 x i8> @splatconstant_funnnel_v64i8(<64 x i8> %x) nounwind {
|
|||
; AVX512F-LABEL: splatconstant_funnnel_v64i8:
|
||||
; AVX512F: # %bb.0:
|
||||
; AVX512F-NEXT: vpsrlw $4, %ymm0, %ymm2
|
||||
; AVX512F-NEXT: vmovdqa {{.*#+}} ymm3 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
|
||||
; AVX512F-NEXT: vpand %ymm3, %ymm2, %ymm2
|
||||
; AVX512F-NEXT: vmovdqa {{.*#+}} ymm3 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240]
|
||||
; AVX512F-NEXT: vpandn %ymm2, %ymm3, %ymm2
|
||||
; AVX512F-NEXT: vpsllw $4, %ymm0, %ymm0
|
||||
; AVX512F-NEXT: vmovdqa {{.*#+}} ymm4 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240]
|
||||
; AVX512F-NEXT: vpand %ymm4, %ymm0, %ymm0
|
||||
; AVX512F-NEXT: vpand %ymm3, %ymm0, %ymm0
|
||||
; AVX512F-NEXT: vpor %ymm2, %ymm0, %ymm0
|
||||
; AVX512F-NEXT: vpsrlw $4, %ymm1, %ymm2
|
||||
; AVX512F-NEXT: vpand %ymm3, %ymm2, %ymm2
|
||||
; AVX512F-NEXT: vpandn %ymm2, %ymm3, %ymm2
|
||||
; AVX512F-NEXT: vpsllw $4, %ymm1, %ymm1
|
||||
; AVX512F-NEXT: vpand %ymm4, %ymm1, %ymm1
|
||||
; AVX512F-NEXT: vpand %ymm3, %ymm1, %ymm1
|
||||
; AVX512F-NEXT: vpor %ymm2, %ymm1, %ymm1
|
||||
; AVX512F-NEXT: retq
|
||||
;
|
||||
; AVX512VL-LABEL: splatconstant_funnnel_v64i8:
|
||||
; AVX512VL: # %bb.0:
|
||||
; AVX512VL-NEXT: vpsrlw $4, %ymm0, %ymm2
|
||||
; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm3 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
|
||||
; AVX512VL-NEXT: vpand %ymm3, %ymm2, %ymm2
|
||||
; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm3 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240]
|
||||
; AVX512VL-NEXT: vpandn %ymm2, %ymm3, %ymm2
|
||||
; AVX512VL-NEXT: vpsllw $4, %ymm0, %ymm0
|
||||
; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm4 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240]
|
||||
; AVX512VL-NEXT: vpand %ymm4, %ymm0, %ymm0
|
||||
; AVX512VL-NEXT: vpand %ymm3, %ymm0, %ymm0
|
||||
; AVX512VL-NEXT: vpor %ymm2, %ymm0, %ymm0
|
||||
; AVX512VL-NEXT: vpsrlw $4, %ymm1, %ymm2
|
||||
; AVX512VL-NEXT: vpand %ymm3, %ymm2, %ymm2
|
||||
; AVX512VL-NEXT: vpandn %ymm2, %ymm3, %ymm2
|
||||
; AVX512VL-NEXT: vpsllw $4, %ymm1, %ymm1
|
||||
; AVX512VL-NEXT: vpand %ymm4, %ymm1, %ymm1
|
||||
; AVX512VL-NEXT: vpand %ymm3, %ymm1, %ymm1
|
||||
; AVX512VL-NEXT: vpor %ymm2, %ymm1, %ymm1
|
||||
; AVX512VL-NEXT: retq
|
||||
;
|
||||
|
|
|
@ -337,47 +337,45 @@ define <32 x i8> @var_rotate_v32i8(<32 x i8> %a, <32 x i8> %b) nounwind {
|
|||
; AVX1: # %bb.0:
|
||||
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
|
||||
; AVX1-NEXT: vpsrlw $4, %xmm2, %xmm3
|
||||
; AVX1-NEXT: vmovdqa {{.*#+}} xmm8 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
|
||||
; AVX1-NEXT: vpand %xmm8, %xmm3, %xmm3
|
||||
; AVX1-NEXT: vmovdqa {{.*#+}} xmm4 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240]
|
||||
; AVX1-NEXT: vpandn %xmm3, %xmm4, %xmm3
|
||||
; AVX1-NEXT: vpsllw $4, %xmm2, %xmm5
|
||||
; AVX1-NEXT: vmovdqa {{.*#+}} xmm9 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240]
|
||||
; AVX1-NEXT: vpand %xmm9, %xmm5, %xmm5
|
||||
; AVX1-NEXT: vpand %xmm4, %xmm5, %xmm5
|
||||
; AVX1-NEXT: vpor %xmm3, %xmm5, %xmm3
|
||||
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm5
|
||||
; AVX1-NEXT: vpsllw $5, %xmm5, %xmm5
|
||||
; AVX1-NEXT: vpblendvb %xmm5, %xmm3, %xmm2, %xmm2
|
||||
; AVX1-NEXT: vpsrlw $6, %xmm2, %xmm3
|
||||
; AVX1-NEXT: vmovdqa {{.*#+}} xmm10 = [3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3]
|
||||
; AVX1-NEXT: vpand %xmm10, %xmm3, %xmm3
|
||||
; AVX1-NEXT: vpsllw $2, %xmm2, %xmm4
|
||||
; AVX1-NEXT: vmovdqa {{.*#+}} xmm6 = [252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252]
|
||||
; AVX1-NEXT: vpand %xmm6, %xmm4, %xmm4
|
||||
; AVX1-NEXT: vpor %xmm3, %xmm4, %xmm3
|
||||
; AVX1-NEXT: vpaddb %xmm5, %xmm5, %xmm4
|
||||
; AVX1-NEXT: vpblendvb %xmm4, %xmm3, %xmm2, %xmm2
|
||||
; AVX1-NEXT: vpandn %xmm3, %xmm6, %xmm3
|
||||
; AVX1-NEXT: vpsllw $2, %xmm2, %xmm7
|
||||
; AVX1-NEXT: vpand %xmm6, %xmm7, %xmm7
|
||||
; AVX1-NEXT: vpor %xmm3, %xmm7, %xmm3
|
||||
; AVX1-NEXT: vpaddb %xmm5, %xmm5, %xmm5
|
||||
; AVX1-NEXT: vpblendvb %xmm5, %xmm3, %xmm2, %xmm2
|
||||
; AVX1-NEXT: vpsrlw $7, %xmm2, %xmm3
|
||||
; AVX1-NEXT: vmovdqa {{.*#+}} xmm5 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
|
||||
; AVX1-NEXT: vpand %xmm5, %xmm3, %xmm3
|
||||
; AVX1-NEXT: vmovdqa {{.*#+}} xmm8 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
|
||||
; AVX1-NEXT: vpand %xmm8, %xmm3, %xmm3
|
||||
; AVX1-NEXT: vpaddb %xmm2, %xmm2, %xmm7
|
||||
; AVX1-NEXT: vpor %xmm3, %xmm7, %xmm3
|
||||
; AVX1-NEXT: vpaddb %xmm4, %xmm4, %xmm4
|
||||
; AVX1-NEXT: vpblendvb %xmm4, %xmm3, %xmm2, %xmm2
|
||||
; AVX1-NEXT: vpaddb %xmm5, %xmm5, %xmm5
|
||||
; AVX1-NEXT: vpblendvb %xmm5, %xmm3, %xmm2, %xmm2
|
||||
; AVX1-NEXT: vpsrlw $4, %xmm0, %xmm3
|
||||
; AVX1-NEXT: vpand %xmm8, %xmm3, %xmm3
|
||||
; AVX1-NEXT: vpsllw $4, %xmm0, %xmm4
|
||||
; AVX1-NEXT: vpand %xmm9, %xmm4, %xmm4
|
||||
; AVX1-NEXT: vpandn %xmm3, %xmm4, %xmm3
|
||||
; AVX1-NEXT: vpsllw $4, %xmm0, %xmm5
|
||||
; AVX1-NEXT: vpand %xmm4, %xmm5, %xmm4
|
||||
; AVX1-NEXT: vpor %xmm3, %xmm4, %xmm3
|
||||
; AVX1-NEXT: vpsllw $5, %xmm1, %xmm1
|
||||
; AVX1-NEXT: vpblendvb %xmm1, %xmm3, %xmm0, %xmm0
|
||||
; AVX1-NEXT: vpsrlw $6, %xmm0, %xmm3
|
||||
; AVX1-NEXT: vpand %xmm10, %xmm3, %xmm3
|
||||
; AVX1-NEXT: vpandn %xmm3, %xmm6, %xmm3
|
||||
; AVX1-NEXT: vpsllw $2, %xmm0, %xmm4
|
||||
; AVX1-NEXT: vpand %xmm6, %xmm4, %xmm4
|
||||
; AVX1-NEXT: vpor %xmm3, %xmm4, %xmm3
|
||||
; AVX1-NEXT: vpaddb %xmm1, %xmm1, %xmm1
|
||||
; AVX1-NEXT: vpblendvb %xmm1, %xmm3, %xmm0, %xmm0
|
||||
; AVX1-NEXT: vpsrlw $7, %xmm0, %xmm3
|
||||
; AVX1-NEXT: vpand %xmm5, %xmm3, %xmm3
|
||||
; AVX1-NEXT: vpand %xmm8, %xmm3, %xmm3
|
||||
; AVX1-NEXT: vpaddb %xmm0, %xmm0, %xmm4
|
||||
; AVX1-NEXT: vpor %xmm3, %xmm4, %xmm3
|
||||
; AVX1-NEXT: vpaddb %xmm1, %xmm1, %xmm1
|
||||
|
@ -1476,16 +1474,15 @@ define <32 x i8> @splatconstant_rotate_v32i8(<32 x i8> %a) nounwind {
|
|||
; AVX1: # %bb.0:
|
||||
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
|
||||
; AVX1-NEXT: vpsrlw $4, %xmm1, %xmm2
|
||||
; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
|
||||
; AVX1-NEXT: vpand %xmm3, %xmm2, %xmm2
|
||||
; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240]
|
||||
; AVX1-NEXT: vpandn %xmm2, %xmm3, %xmm2
|
||||
; AVX1-NEXT: vpsllw $4, %xmm1, %xmm1
|
||||
; AVX1-NEXT: vmovdqa {{.*#+}} xmm4 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240]
|
||||
; AVX1-NEXT: vpand %xmm4, %xmm1, %xmm1
|
||||
; AVX1-NEXT: vpand %xmm3, %xmm1, %xmm1
|
||||
; AVX1-NEXT: vpor %xmm2, %xmm1, %xmm1
|
||||
; AVX1-NEXT: vpsrlw $4, %xmm0, %xmm2
|
||||
; AVX1-NEXT: vpand %xmm3, %xmm2, %xmm2
|
||||
; AVX1-NEXT: vpandn %xmm2, %xmm3, %xmm2
|
||||
; AVX1-NEXT: vpsllw $4, %xmm0, %xmm0
|
||||
; AVX1-NEXT: vpand %xmm4, %xmm0, %xmm0
|
||||
; AVX1-NEXT: vpand %xmm3, %xmm0, %xmm0
|
||||
; AVX1-NEXT: vpor %xmm2, %xmm0, %xmm0
|
||||
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
|
||||
; AVX1-NEXT: retq
|
||||
|
@ -1760,16 +1757,15 @@ define <32 x i8> @splatconstant_rotate_mask_v32i8(<32 x i8> %a) nounwind {
|
|||
; AVX1: # %bb.0:
|
||||
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
|
||||
; AVX1-NEXT: vpsrlw $4, %xmm1, %xmm2
|
||||
; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
|
||||
; AVX1-NEXT: vpand %xmm3, %xmm2, %xmm2
|
||||
; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240]
|
||||
; AVX1-NEXT: vpandn %xmm2, %xmm3, %xmm2
|
||||
; AVX1-NEXT: vpsllw $4, %xmm1, %xmm1
|
||||
; AVX1-NEXT: vmovdqa {{.*#+}} xmm4 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240]
|
||||
; AVX1-NEXT: vpand %xmm4, %xmm1, %xmm1
|
||||
; AVX1-NEXT: vpand %xmm3, %xmm1, %xmm1
|
||||
; AVX1-NEXT: vpor %xmm2, %xmm1, %xmm1
|
||||
; AVX1-NEXT: vpsrlw $4, %xmm0, %xmm2
|
||||
; AVX1-NEXT: vpand %xmm3, %xmm2, %xmm2
|
||||
; AVX1-NEXT: vpandn %xmm2, %xmm3, %xmm2
|
||||
; AVX1-NEXT: vpsllw $4, %xmm0, %xmm0
|
||||
; AVX1-NEXT: vpand %xmm4, %xmm0, %xmm0
|
||||
; AVX1-NEXT: vpand %xmm3, %xmm0, %xmm0
|
||||
; AVX1-NEXT: vpor %xmm2, %xmm0, %xmm0
|
||||
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
|
||||
; AVX1-NEXT: vandps {{.*}}(%rip), %ymm0, %ymm0
|
||||
|
|
|
@ -109,46 +109,44 @@ define <64 x i8> @var_rotate_v64i8(<64 x i8> %a, <64 x i8> %b) nounwind {
|
|||
; AVX512F-LABEL: var_rotate_v64i8:
|
||||
; AVX512F: # %bb.0:
|
||||
; AVX512F-NEXT: vpsrlw $4, %ymm0, %ymm4
|
||||
; AVX512F-NEXT: vmovdqa {{.*#+}} ymm5 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
|
||||
; AVX512F-NEXT: vpand %ymm5, %ymm4, %ymm4
|
||||
; AVX512F-NEXT: vmovdqa {{.*#+}} ymm5 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240]
|
||||
; AVX512F-NEXT: vpandn %ymm4, %ymm5, %ymm4
|
||||
; AVX512F-NEXT: vpsllw $4, %ymm0, %ymm6
|
||||
; AVX512F-NEXT: vmovdqa {{.*#+}} ymm7 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240]
|
||||
; AVX512F-NEXT: vpand %ymm7, %ymm6, %ymm6
|
||||
; AVX512F-NEXT: vpand %ymm5, %ymm6, %ymm6
|
||||
; AVX512F-NEXT: vpor %ymm4, %ymm6, %ymm4
|
||||
; AVX512F-NEXT: vpsllw $5, %ymm2, %ymm2
|
||||
; AVX512F-NEXT: vpblendvb %ymm2, %ymm4, %ymm0, %ymm0
|
||||
; AVX512F-NEXT: vpsrlw $6, %ymm0, %ymm4
|
||||
; AVX512F-NEXT: vmovdqa {{.*#+}} ymm6 = [3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3]
|
||||
; AVX512F-NEXT: vpand %ymm6, %ymm4, %ymm4
|
||||
; AVX512F-NEXT: vpsllw $2, %ymm0, %ymm8
|
||||
; AVX512F-NEXT: vmovdqa {{.*#+}} ymm9 = [252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252]
|
||||
; AVX512F-NEXT: vpand %ymm9, %ymm8, %ymm8
|
||||
; AVX512F-NEXT: vpor %ymm4, %ymm8, %ymm4
|
||||
; AVX512F-NEXT: vmovdqa {{.*#+}} ymm6 = [252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252]
|
||||
; AVX512F-NEXT: vpandn %ymm4, %ymm6, %ymm4
|
||||
; AVX512F-NEXT: vpsllw $2, %ymm0, %ymm7
|
||||
; AVX512F-NEXT: vpand %ymm6, %ymm7, %ymm7
|
||||
; AVX512F-NEXT: vpor %ymm4, %ymm7, %ymm4
|
||||
; AVX512F-NEXT: vpaddb %ymm2, %ymm2, %ymm2
|
||||
; AVX512F-NEXT: vpblendvb %ymm2, %ymm4, %ymm0, %ymm0
|
||||
; AVX512F-NEXT: vpsrlw $7, %ymm0, %ymm4
|
||||
; AVX512F-NEXT: vmovdqa {{.*#+}} ymm8 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
|
||||
; AVX512F-NEXT: vpand %ymm8, %ymm4, %ymm4
|
||||
; AVX512F-NEXT: vpaddb %ymm0, %ymm0, %ymm10
|
||||
; AVX512F-NEXT: vpor %ymm4, %ymm10, %ymm4
|
||||
; AVX512F-NEXT: vmovdqa {{.*#+}} ymm7 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
|
||||
; AVX512F-NEXT: vpand %ymm7, %ymm4, %ymm4
|
||||
; AVX512F-NEXT: vpaddb %ymm0, %ymm0, %ymm8
|
||||
; AVX512F-NEXT: vpor %ymm4, %ymm8, %ymm4
|
||||
; AVX512F-NEXT: vpaddb %ymm2, %ymm2, %ymm2
|
||||
; AVX512F-NEXT: vpblendvb %ymm2, %ymm4, %ymm0, %ymm0
|
||||
; AVX512F-NEXT: vpsrlw $4, %ymm1, %ymm2
|
||||
; AVX512F-NEXT: vpand %ymm5, %ymm2, %ymm2
|
||||
; AVX512F-NEXT: vpandn %ymm2, %ymm5, %ymm2
|
||||
; AVX512F-NEXT: vpsllw $4, %ymm1, %ymm4
|
||||
; AVX512F-NEXT: vpand %ymm7, %ymm4, %ymm4
|
||||
; AVX512F-NEXT: vpand %ymm5, %ymm4, %ymm4
|
||||
; AVX512F-NEXT: vpor %ymm2, %ymm4, %ymm2
|
||||
; AVX512F-NEXT: vpsllw $5, %ymm3, %ymm3
|
||||
; AVX512F-NEXT: vpblendvb %ymm3, %ymm2, %ymm1, %ymm1
|
||||
; AVX512F-NEXT: vpsrlw $6, %ymm1, %ymm2
|
||||
; AVX512F-NEXT: vpand %ymm6, %ymm2, %ymm2
|
||||
; AVX512F-NEXT: vpandn %ymm2, %ymm6, %ymm2
|
||||
; AVX512F-NEXT: vpsllw $2, %ymm1, %ymm4
|
||||
; AVX512F-NEXT: vpand %ymm9, %ymm4, %ymm4
|
||||
; AVX512F-NEXT: vpand %ymm6, %ymm4, %ymm4
|
||||
; AVX512F-NEXT: vpor %ymm2, %ymm4, %ymm2
|
||||
; AVX512F-NEXT: vpaddb %ymm3, %ymm3, %ymm3
|
||||
; AVX512F-NEXT: vpblendvb %ymm3, %ymm2, %ymm1, %ymm1
|
||||
; AVX512F-NEXT: vpsrlw $7, %ymm1, %ymm2
|
||||
; AVX512F-NEXT: vpand %ymm8, %ymm2, %ymm2
|
||||
; AVX512F-NEXT: vpand %ymm7, %ymm2, %ymm2
|
||||
; AVX512F-NEXT: vpaddb %ymm1, %ymm1, %ymm4
|
||||
; AVX512F-NEXT: vpor %ymm2, %ymm4, %ymm2
|
||||
; AVX512F-NEXT: vpaddb %ymm3, %ymm3, %ymm3
|
||||
|
@ -158,46 +156,44 @@ define <64 x i8> @var_rotate_v64i8(<64 x i8> %a, <64 x i8> %b) nounwind {
|
|||
; AVX512VL-LABEL: var_rotate_v64i8:
|
||||
; AVX512VL: # %bb.0:
|
||||
; AVX512VL-NEXT: vpsrlw $4, %ymm0, %ymm4
|
||||
; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm5 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
|
||||
; AVX512VL-NEXT: vpand %ymm5, %ymm4, %ymm4
|
||||
; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm5 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240]
|
||||
; AVX512VL-NEXT: vpandn %ymm4, %ymm5, %ymm4
|
||||
; AVX512VL-NEXT: vpsllw $4, %ymm0, %ymm6
|
||||
; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm7 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240]
|
||||
; AVX512VL-NEXT: vpand %ymm7, %ymm6, %ymm6
|
||||
; AVX512VL-NEXT: vpand %ymm5, %ymm6, %ymm6
|
||||
; AVX512VL-NEXT: vpor %ymm4, %ymm6, %ymm4
|
||||
; AVX512VL-NEXT: vpsllw $5, %ymm2, %ymm2
|
||||
; AVX512VL-NEXT: vpblendvb %ymm2, %ymm4, %ymm0, %ymm0
|
||||
; AVX512VL-NEXT: vpsrlw $6, %ymm0, %ymm4
|
||||
; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm6 = [3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3]
|
||||
; AVX512VL-NEXT: vpand %ymm6, %ymm4, %ymm4
|
||||
; AVX512VL-NEXT: vpsllw $2, %ymm0, %ymm8
|
||||
; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm9 = [252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252]
|
||||
; AVX512VL-NEXT: vpand %ymm9, %ymm8, %ymm8
|
||||
; AVX512VL-NEXT: vpor %ymm4, %ymm8, %ymm4
|
||||
; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm6 = [252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252,252]
|
||||
; AVX512VL-NEXT: vpandn %ymm4, %ymm6, %ymm4
|
||||
; AVX512VL-NEXT: vpsllw $2, %ymm0, %ymm7
|
||||
; AVX512VL-NEXT: vpand %ymm6, %ymm7, %ymm7
|
||||
; AVX512VL-NEXT: vpor %ymm4, %ymm7, %ymm4
|
||||
; AVX512VL-NEXT: vpaddb %ymm2, %ymm2, %ymm2
|
||||
; AVX512VL-NEXT: vpblendvb %ymm2, %ymm4, %ymm0, %ymm0
|
||||
; AVX512VL-NEXT: vpsrlw $7, %ymm0, %ymm4
|
||||
; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm8 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
|
||||
; AVX512VL-NEXT: vpand %ymm8, %ymm4, %ymm4
|
||||
; AVX512VL-NEXT: vpaddb %ymm0, %ymm0, %ymm10
|
||||
; AVX512VL-NEXT: vpor %ymm4, %ymm10, %ymm4
|
||||
; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm7 = [1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]
|
||||
; AVX512VL-NEXT: vpand %ymm7, %ymm4, %ymm4
|
||||
; AVX512VL-NEXT: vpaddb %ymm0, %ymm0, %ymm8
|
||||
; AVX512VL-NEXT: vpor %ymm4, %ymm8, %ymm4
|
||||
; AVX512VL-NEXT: vpaddb %ymm2, %ymm2, %ymm2
|
||||
; AVX512VL-NEXT: vpblendvb %ymm2, %ymm4, %ymm0, %ymm0
|
||||
; AVX512VL-NEXT: vpsrlw $4, %ymm1, %ymm2
|
||||
; AVX512VL-NEXT: vpand %ymm5, %ymm2, %ymm2
|
||||
; AVX512VL-NEXT: vpandn %ymm2, %ymm5, %ymm2
|
||||
; AVX512VL-NEXT: vpsllw $4, %ymm1, %ymm4
|
||||
; AVX512VL-NEXT: vpand %ymm7, %ymm4, %ymm4
|
||||
; AVX512VL-NEXT: vpand %ymm5, %ymm4, %ymm4
|
||||
; AVX512VL-NEXT: vpor %ymm2, %ymm4, %ymm2
|
||||
; AVX512VL-NEXT: vpsllw $5, %ymm3, %ymm3
|
||||
; AVX512VL-NEXT: vpblendvb %ymm3, %ymm2, %ymm1, %ymm1
|
||||
; AVX512VL-NEXT: vpsrlw $6, %ymm1, %ymm2
|
||||
; AVX512VL-NEXT: vpand %ymm6, %ymm2, %ymm2
|
||||
; AVX512VL-NEXT: vpandn %ymm2, %ymm6, %ymm2
|
||||
; AVX512VL-NEXT: vpsllw $2, %ymm1, %ymm4
|
||||
; AVX512VL-NEXT: vpand %ymm9, %ymm4, %ymm4
|
||||
; AVX512VL-NEXT: vpand %ymm6, %ymm4, %ymm4
|
||||
; AVX512VL-NEXT: vpor %ymm2, %ymm4, %ymm2
|
||||
; AVX512VL-NEXT: vpaddb %ymm3, %ymm3, %ymm3
|
||||
; AVX512VL-NEXT: vpblendvb %ymm3, %ymm2, %ymm1, %ymm1
|
||||
; AVX512VL-NEXT: vpsrlw $7, %ymm1, %ymm2
|
||||
; AVX512VL-NEXT: vpand %ymm8, %ymm2, %ymm2
|
||||
; AVX512VL-NEXT: vpand %ymm7, %ymm2, %ymm2
|
||||
; AVX512VL-NEXT: vpaddb %ymm1, %ymm1, %ymm4
|
||||
; AVX512VL-NEXT: vpor %ymm2, %ymm4, %ymm2
|
||||
; AVX512VL-NEXT: vpaddb %ymm3, %ymm3, %ymm3
|
||||
|
@ -767,32 +763,30 @@ define <64 x i8> @splatconstant_rotate_v64i8(<64 x i8> %a) nounwind {
|
|||
; AVX512F-LABEL: splatconstant_rotate_v64i8:
|
||||
; AVX512F: # %bb.0:
|
||||
; AVX512F-NEXT: vpsrlw $4, %ymm0, %ymm2
|
||||
; AVX512F-NEXT: vmovdqa {{.*#+}} ymm3 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
|
||||
; AVX512F-NEXT: vpand %ymm3, %ymm2, %ymm2
|
||||
; AVX512F-NEXT: vmovdqa {{.*#+}} ymm3 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240]
|
||||
; AVX512F-NEXT: vpandn %ymm2, %ymm3, %ymm2
|
||||
; AVX512F-NEXT: vpsllw $4, %ymm0, %ymm0
|
||||
; AVX512F-NEXT: vmovdqa {{.*#+}} ymm4 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240]
|
||||
; AVX512F-NEXT: vpand %ymm4, %ymm0, %ymm0
|
||||
; AVX512F-NEXT: vpand %ymm3, %ymm0, %ymm0
|
||||
; AVX512F-NEXT: vpor %ymm2, %ymm0, %ymm0
|
||||
; AVX512F-NEXT: vpsrlw $4, %ymm1, %ymm2
|
||||
; AVX512F-NEXT: vpand %ymm3, %ymm2, %ymm2
|
||||
; AVX512F-NEXT: vpandn %ymm2, %ymm3, %ymm2
|
||||
; AVX512F-NEXT: vpsllw $4, %ymm1, %ymm1
|
||||
; AVX512F-NEXT: vpand %ymm4, %ymm1, %ymm1
|
||||
; AVX512F-NEXT: vpand %ymm3, %ymm1, %ymm1
|
||||
; AVX512F-NEXT: vpor %ymm2, %ymm1, %ymm1
|
||||
; AVX512F-NEXT: retq
|
||||
;
|
||||
; AVX512VL-LABEL: splatconstant_rotate_v64i8:
|
||||
; AVX512VL: # %bb.0:
|
||||
; AVX512VL-NEXT: vpsrlw $4, %ymm0, %ymm2
|
||||
; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm3 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
|
||||
; AVX512VL-NEXT: vpand %ymm3, %ymm2, %ymm2
|
||||
; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm3 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240]
|
||||
; AVX512VL-NEXT: vpandn %ymm2, %ymm3, %ymm2
|
||||
; AVX512VL-NEXT: vpsllw $4, %ymm0, %ymm0
|
||||
; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm4 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240]
|
||||
; AVX512VL-NEXT: vpand %ymm4, %ymm0, %ymm0
|
||||
; AVX512VL-NEXT: vpand %ymm3, %ymm0, %ymm0
|
||||
; AVX512VL-NEXT: vpor %ymm2, %ymm0, %ymm0
|
||||
; AVX512VL-NEXT: vpsrlw $4, %ymm1, %ymm2
|
||||
; AVX512VL-NEXT: vpand %ymm3, %ymm2, %ymm2
|
||||
; AVX512VL-NEXT: vpandn %ymm2, %ymm3, %ymm2
|
||||
; AVX512VL-NEXT: vpsllw $4, %ymm1, %ymm1
|
||||
; AVX512VL-NEXT: vpand %ymm4, %ymm1, %ymm1
|
||||
; AVX512VL-NEXT: vpand %ymm3, %ymm1, %ymm1
|
||||
; AVX512VL-NEXT: vpor %ymm2, %ymm1, %ymm1
|
||||
; AVX512VL-NEXT: retq
|
||||
;
|
||||
|
@ -907,38 +901,36 @@ define <64 x i8> @splatconstant_rotate_mask_v64i8(<64 x i8> %a) nounwind {
|
|||
; AVX512F-LABEL: splatconstant_rotate_mask_v64i8:
|
||||
; AVX512F: # %bb.0:
|
||||
; AVX512F-NEXT: vpsrlw $4, %ymm0, %ymm2
|
||||
; AVX512F-NEXT: vmovdqa {{.*#+}} ymm3 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
|
||||
; AVX512F-NEXT: vpand %ymm3, %ymm2, %ymm2
|
||||
; AVX512F-NEXT: vmovdqa {{.*#+}} ymm3 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240]
|
||||
; AVX512F-NEXT: vpandn %ymm2, %ymm3, %ymm2
|
||||
; AVX512F-NEXT: vpsllw $4, %ymm0, %ymm0
|
||||
; AVX512F-NEXT: vmovdqa {{.*#+}} ymm4 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240]
|
||||
; AVX512F-NEXT: vpand %ymm4, %ymm0, %ymm0
|
||||
; AVX512F-NEXT: vpand %ymm3, %ymm0, %ymm0
|
||||
; AVX512F-NEXT: vpor %ymm2, %ymm0, %ymm0
|
||||
; AVX512F-NEXT: vmovdqa {{.*#+}} ymm2 = [39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39]
|
||||
; AVX512F-NEXT: vpand %ymm2, %ymm0, %ymm0
|
||||
; AVX512F-NEXT: vpsrlw $4, %ymm1, %ymm5
|
||||
; AVX512F-NEXT: vpand %ymm3, %ymm5, %ymm3
|
||||
; AVX512F-NEXT: vpsrlw $4, %ymm1, %ymm4
|
||||
; AVX512F-NEXT: vpandn %ymm4, %ymm3, %ymm4
|
||||
; AVX512F-NEXT: vpsllw $4, %ymm1, %ymm1
|
||||
; AVX512F-NEXT: vpand %ymm4, %ymm1, %ymm1
|
||||
; AVX512F-NEXT: vpor %ymm3, %ymm1, %ymm1
|
||||
; AVX512F-NEXT: vpand %ymm3, %ymm1, %ymm1
|
||||
; AVX512F-NEXT: vpor %ymm4, %ymm1, %ymm1
|
||||
; AVX512F-NEXT: vpand %ymm2, %ymm1, %ymm1
|
||||
; AVX512F-NEXT: retq
|
||||
;
|
||||
; AVX512VL-LABEL: splatconstant_rotate_mask_v64i8:
|
||||
; AVX512VL: # %bb.0:
|
||||
; AVX512VL-NEXT: vpsrlw $4, %ymm0, %ymm2
|
||||
; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm3 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
|
||||
; AVX512VL-NEXT: vpand %ymm3, %ymm2, %ymm2
|
||||
; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm3 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240]
|
||||
; AVX512VL-NEXT: vpandn %ymm2, %ymm3, %ymm2
|
||||
; AVX512VL-NEXT: vpsllw $4, %ymm0, %ymm0
|
||||
; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm4 = [240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240,240]
|
||||
; AVX512VL-NEXT: vpand %ymm4, %ymm0, %ymm0
|
||||
; AVX512VL-NEXT: vpand %ymm3, %ymm0, %ymm0
|
||||
; AVX512VL-NEXT: vpor %ymm2, %ymm0, %ymm0
|
||||
; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm2 = [39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39,39]
|
||||
; AVX512VL-NEXT: vpand %ymm2, %ymm0, %ymm0
|
||||
; AVX512VL-NEXT: vpsrlw $4, %ymm1, %ymm5
|
||||
; AVX512VL-NEXT: vpand %ymm3, %ymm5, %ymm3
|
||||
; AVX512VL-NEXT: vpsrlw $4, %ymm1, %ymm4
|
||||
; AVX512VL-NEXT: vpandn %ymm4, %ymm3, %ymm4
|
||||
; AVX512VL-NEXT: vpsllw $4, %ymm1, %ymm1
|
||||
; AVX512VL-NEXT: vpand %ymm4, %ymm1, %ymm1
|
||||
; AVX512VL-NEXT: vpor %ymm3, %ymm1, %ymm1
|
||||
; AVX512VL-NEXT: vpand %ymm3, %ymm1, %ymm1
|
||||
; AVX512VL-NEXT: vpor %ymm4, %ymm1, %ymm1
|
||||
; AVX512VL-NEXT: vpand %ymm2, %ymm1, %ymm1
|
||||
; AVX512VL-NEXT: retq
|
||||
;
|
||||
|
|
Loading…
Reference in New Issue