forked from OSchip/llvm-project
[X86] combineAndnp - if an input has a zero (after inversion for Op0) in a vector element, then we don't demand that bit/element in the other input
Similar to what we already perform in combineAnd
This commit is contained in:
parent
bee4531bee
commit
a4ed0c2f03
|
@ -50362,6 +50362,41 @@ static SDValue combineAndnp(SDNode *N, SelectionDAG &DAG,
|
|||
SDValue Op(N, 0);
|
||||
if (SDValue Res = combineX86ShufflesRecursively(Op, DAG, Subtarget))
|
||||
return Res;
|
||||
|
||||
// If either operand is a constant mask, then only the elements that aren't
|
||||
// zero are actually demanded by the other operand.
|
||||
auto GetDemandedMasks = [&](SDValue Op, bool Invert = false) {
|
||||
APInt UndefElts;
|
||||
SmallVector<APInt> EltBits;
|
||||
int NumElts = VT.getVectorNumElements();
|
||||
int EltSizeInBits = VT.getScalarSizeInBits();
|
||||
APInt DemandedBits = APInt::getAllOnes(EltSizeInBits);
|
||||
APInt DemandedElts = APInt::getAllOnes(NumElts);
|
||||
if (getTargetConstantBitsFromNode(Op, EltSizeInBits, UndefElts,
|
||||
EltBits)) {
|
||||
DemandedBits.clearAllBits();
|
||||
DemandedElts.clearAllBits();
|
||||
for (int I = 0; I != NumElts; ++I)
|
||||
if ((Invert && !EltBits[I].isAllOnes()) ||
|
||||
(!Invert && !EltBits[I].isZero())) {
|
||||
DemandedBits |= Invert ? ~EltBits[I] : EltBits[I];
|
||||
DemandedElts.setBit(I);
|
||||
}
|
||||
}
|
||||
return std::make_pair(DemandedBits, DemandedElts);
|
||||
};
|
||||
std::pair<APInt, APInt> Demand0 = GetDemandedMasks(N1);
|
||||
std::pair<APInt, APInt> Demand1 = GetDemandedMasks(N0, true);
|
||||
|
||||
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
|
||||
if (TLI.SimplifyDemandedVectorElts(N0, Demand0.second, DCI) ||
|
||||
TLI.SimplifyDemandedVectorElts(N1, Demand1.second, DCI) ||
|
||||
TLI.SimplifyDemandedBits(N0, Demand0.first, Demand0.second, DCI) ||
|
||||
TLI.SimplifyDemandedBits(N1, Demand1.first, Demand1.second, DCI)) {
|
||||
if (N->getOpcode() != ISD::DELETED_NODE)
|
||||
DCI.AddToWorklist(N);
|
||||
return SDValue(N, 0);
|
||||
}
|
||||
}
|
||||
|
||||
return SDValue();
|
||||
|
|
|
@ -668,7 +668,6 @@ define <32 x i16> @insert_v32i16(<32 x i16> %x, i16 %y, i16* %ptr) {
|
|||
; KNL-NEXT: vinserti32x4 $0, %xmm1, %zmm0, %zmm1
|
||||
; KNL-NEXT: vmovd %edi, %xmm0
|
||||
; KNL-NEXT: vpbroadcastw %xmm0, %ymm0
|
||||
; KNL-NEXT: vinserti64x4 $1, %ymm0, %zmm0, %zmm0
|
||||
; KNL-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm1, %zmm0
|
||||
; KNL-NEXT: retq
|
||||
;
|
||||
|
|
|
@ -638,10 +638,7 @@ define <16 x i8> @combine_vec_udiv_nonuniform4(<16 x i8> %x) {
|
|||
; SSE2-NEXT: pxor %xmm3, %xmm3
|
||||
; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3],xmm0[4],xmm3[4],xmm0[5],xmm3[5],xmm0[6],xmm3[6],xmm0[7],xmm3[7]
|
||||
; SSE2-NEXT: pmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
|
||||
; SSE2-NEXT: psrlw $8, %xmm0
|
||||
; SSE2-NEXT: packuswb %xmm0, %xmm0
|
||||
; SSE2-NEXT: psrlw $7, %xmm0
|
||||
; SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
|
||||
; SSE2-NEXT: psrlw $15, %xmm0
|
||||
; SSE2-NEXT: pandn %xmm0, %xmm1
|
||||
; SSE2-NEXT: por %xmm2, %xmm1
|
||||
; SSE2-NEXT: movdqa %xmm1, %xmm0
|
||||
|
|
|
@ -956,8 +956,7 @@ define void @interleave_24i16_out(<24 x i16>* %p, <8 x i16>* %q1, <8 x i16>* %q2
|
|||
; SSE2-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[2,1,0,3,4,5,6,7]
|
||||
; SSE2-NEXT: pand %xmm6, %xmm2
|
||||
; SSE2-NEXT: pshufhw {{.*#+}} xmm3 = xmm8[0,1,2,3,4,7,6,7]
|
||||
; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,1,2,0]
|
||||
; SSE2-NEXT: pshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,4,7,4,5]
|
||||
; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,1,0,2]
|
||||
; SSE2-NEXT: pandn %xmm3, %xmm6
|
||||
; SSE2-NEXT: por %xmm2, %xmm6
|
||||
; SSE2-NEXT: movups %xmm1, (%rsi)
|
||||
|
|
|
@ -240,8 +240,7 @@ define void @vf8(<24 x i16>* %in.vec, <8 x i16>* %out.vec0, <8 x i16>* %out.vec1
|
|||
; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[2,1,0,3,4,5,6,7]
|
||||
; SSE-NEXT: pand %xmm6, %xmm2
|
||||
; SSE-NEXT: pshufhw {{.*#+}} xmm3 = xmm8[0,1,2,3,4,7,6,7]
|
||||
; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,1,2,0]
|
||||
; SSE-NEXT: pshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,4,7,4,5]
|
||||
; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,1,0,2]
|
||||
; SSE-NEXT: pandn %xmm3, %xmm6
|
||||
; SSE-NEXT: por %xmm2, %xmm6
|
||||
; SSE-NEXT: movaps %xmm1, (%rsi)
|
||||
|
@ -401,8 +400,7 @@ define void @vf16(<48 x i16>* %in.vec, <16 x i16>* %out.vec0, <16 x i16>* %out.v
|
|||
; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[2,1,0,3,4,5,6,7]
|
||||
; SSE-NEXT: pand %xmm5, %xmm1
|
||||
; SSE-NEXT: pshufhw {{.*#+}} xmm3 = xmm10[0,1,2,3,4,7,6,7]
|
||||
; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,1,2,0]
|
||||
; SSE-NEXT: pshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,4,7,4,5]
|
||||
; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,1,0,2]
|
||||
; SSE-NEXT: movdqa %xmm5, %xmm4
|
||||
; SSE-NEXT: pandn %xmm3, %xmm4
|
||||
; SSE-NEXT: por %xmm1, %xmm4
|
||||
|
@ -415,8 +413,7 @@ define void @vf16(<48 x i16>* %in.vec, <16 x i16>* %out.vec0, <16 x i16>* %out.v
|
|||
; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[2,1,0,3,4,5,6,7]
|
||||
; SSE-NEXT: pand %xmm5, %xmm1
|
||||
; SSE-NEXT: pshufhw {{.*#+}} xmm3 = xmm8[0,1,2,3,4,7,6,7]
|
||||
; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,1,2,0]
|
||||
; SSE-NEXT: pshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,4,7,4,5]
|
||||
; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,1,0,2]
|
||||
; SSE-NEXT: pandn %xmm3, %xmm5
|
||||
; SSE-NEXT: por %xmm1, %xmm5
|
||||
; SSE-NEXT: movaps %xmm13, 16(%rsi)
|
||||
|
@ -549,7 +546,7 @@ define void @vf32(<96 x i16>* %in.vec, <32 x i16>* %out.vec0, <32 x i16>* %out.v
|
|||
; SSE-NEXT: movdqa 96(%rdi), %xmm11
|
||||
; SSE-NEXT: movdqa %xmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
||||
; SSE-NEXT: movdqa 176(%rdi), %xmm7
|
||||
; SSE-NEXT: movdqa 144(%rdi), %xmm8
|
||||
; SSE-NEXT: movdqa 144(%rdi), %xmm9
|
||||
; SSE-NEXT: movdqa 160(%rdi), %xmm5
|
||||
; SSE-NEXT: movdqa 80(%rdi), %xmm1
|
||||
; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
||||
|
@ -557,12 +554,12 @@ define void @vf32(<96 x i16>* %in.vec, <32 x i16>* %out.vec0, <32 x i16>* %out.v
|
|||
; SSE-NEXT: movdqa 16(%rdi), %xmm10
|
||||
; SSE-NEXT: movdqa 32(%rdi), %xmm13
|
||||
; SSE-NEXT: movdqa %xmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
||||
; SSE-NEXT: movdqa 48(%rdi), %xmm9
|
||||
; SSE-NEXT: movdqa 48(%rdi), %xmm8
|
||||
; SSE-NEXT: movdqa 64(%rdi), %xmm6
|
||||
; SSE-NEXT: movdqa {{.*#+}} xmm0 = [65535,0,65535,65535,0,65535,65535,0]
|
||||
; SSE-NEXT: movdqa %xmm0, %xmm2
|
||||
; SSE-NEXT: pandn %xmm6, %xmm2
|
||||
; SSE-NEXT: movdqa %xmm9, %xmm3
|
||||
; SSE-NEXT: movdqa %xmm8, %xmm3
|
||||
; SSE-NEXT: pand %xmm0, %xmm3
|
||||
; SSE-NEXT: por %xmm2, %xmm3
|
||||
; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm3[0,2,1,3]
|
||||
|
@ -579,7 +576,7 @@ define void @vf32(<96 x i16>* %in.vec, <32 x i16>* %out.vec0, <32 x i16>* %out.v
|
|||
; SSE-NEXT: movaps %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
|
||||
; SSE-NEXT: movdqa %xmm0, %xmm2
|
||||
; SSE-NEXT: pandn %xmm5, %xmm2
|
||||
; SSE-NEXT: movdqa %xmm8, %xmm3
|
||||
; SSE-NEXT: movdqa %xmm9, %xmm3
|
||||
; SSE-NEXT: pand %xmm0, %xmm3
|
||||
; SSE-NEXT: por %xmm2, %xmm3
|
||||
; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm3[0,2,1,3]
|
||||
|
@ -631,11 +628,11 @@ define void @vf32(<96 x i16>* %in.vec, <32 x i16>* %out.vec0, <32 x i16>* %out.v
|
|||
; SSE-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill
|
||||
; SSE-NEXT: movdqa {{.*#+}} xmm3 = [65535,65535,0,65535,65535,0,65535,65535]
|
||||
; SSE-NEXT: movdqa %xmm3, %xmm5
|
||||
; SSE-NEXT: pandn %xmm9, %xmm5
|
||||
; SSE-NEXT: pandn %xmm8, %xmm5
|
||||
; SSE-NEXT: movdqa %xmm3, %xmm1
|
||||
; SSE-NEXT: pandn %xmm15, %xmm1
|
||||
; SSE-NEXT: pand %xmm3, %xmm9
|
||||
; SSE-NEXT: por %xmm1, %xmm9
|
||||
; SSE-NEXT: pand %xmm3, %xmm8
|
||||
; SSE-NEXT: por %xmm1, %xmm8
|
||||
; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Reload
|
||||
; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm15[0,3,2,3,4,5,6,7]
|
||||
; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,1,0,3]
|
||||
|
@ -643,7 +640,7 @@ define void @vf32(<96 x i16>* %in.vec, <32 x i16>* %out.vec0, <32 x i16>* %out.v
|
|||
; SSE-NEXT: movdqa {{.*#+}} xmm1 = [65535,65535,65535,65535,65535,0,0,0]
|
||||
; SSE-NEXT: movdqa %xmm1, %xmm10
|
||||
; SSE-NEXT: pandn %xmm2, %xmm10
|
||||
; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm9[2,1,2,3,4,5,6,7]
|
||||
; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm8[2,1,2,3,4,5,6,7]
|
||||
; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5,4,7]
|
||||
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,3,2,3]
|
||||
; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[1,2,3,0,4,5,6,7]
|
||||
|
@ -651,31 +648,31 @@ define void @vf32(<96 x i16>* %in.vec, <32 x i16>* %out.vec0, <32 x i16>* %out.v
|
|||
; SSE-NEXT: pand %xmm1, %xmm0
|
||||
; SSE-NEXT: por %xmm0, %xmm10
|
||||
; SSE-NEXT: movdqa %xmm3, %xmm4
|
||||
; SSE-NEXT: pandn %xmm8, %xmm4
|
||||
; SSE-NEXT: movdqa %xmm8, %xmm0
|
||||
; SSE-NEXT: movdqa %xmm3, %xmm8
|
||||
; SSE-NEXT: pandn %xmm12, %xmm8
|
||||
; SSE-NEXT: pandn %xmm9, %xmm4
|
||||
; SSE-NEXT: movdqa %xmm9, %xmm0
|
||||
; SSE-NEXT: movdqa %xmm3, %xmm9
|
||||
; SSE-NEXT: pandn %xmm12, %xmm9
|
||||
; SSE-NEXT: pand %xmm3, %xmm0
|
||||
; SSE-NEXT: por %xmm8, %xmm0
|
||||
; SSE-NEXT: por %xmm9, %xmm0
|
||||
; SSE-NEXT: movdqa %xmm7, %xmm13
|
||||
; SSE-NEXT: pshuflw {{.*#+}} xmm7 = xmm7[0,3,2,3,4,5,6,7]
|
||||
; SSE-NEXT: pshufd {{.*#+}} xmm7 = xmm7[0,1,0,3]
|
||||
; SSE-NEXT: pshufhw {{.*#+}} xmm7 = xmm7[0,1,2,3,4,4,5,6]
|
||||
; SSE-NEXT: movdqa %xmm1, %xmm8
|
||||
; SSE-NEXT: pandn %xmm7, %xmm8
|
||||
; SSE-NEXT: movdqa %xmm1, %xmm9
|
||||
; SSE-NEXT: pandn %xmm7, %xmm9
|
||||
; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[2,1,2,3,4,5,6,7]
|
||||
; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5,4,7]
|
||||
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,3,2,3]
|
||||
; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[1,2,3,0,4,5,6,7]
|
||||
; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,5,5,5,5]
|
||||
; SSE-NEXT: pand %xmm1, %xmm0
|
||||
; SSE-NEXT: por %xmm0, %xmm8
|
||||
; SSE-NEXT: por %xmm0, %xmm9
|
||||
; SSE-NEXT: movdqa %xmm3, %xmm7
|
||||
; SSE-NEXT: pandn %xmm14, %xmm7
|
||||
; SSE-NEXT: movdqa %xmm14, %xmm0
|
||||
; SSE-NEXT: movdqa %xmm3, %xmm14
|
||||
; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm9 # 16-byte Reload
|
||||
; SSE-NEXT: pandn %xmm9, %xmm14
|
||||
; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload
|
||||
; SSE-NEXT: pandn %xmm8, %xmm14
|
||||
; SSE-NEXT: pand %xmm3, %xmm0
|
||||
; SSE-NEXT: por %xmm14, %xmm0
|
||||
; SSE-NEXT: pshuflw $236, {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Folded Reload
|
||||
|
@ -715,8 +712,7 @@ define void @vf32(<96 x i16>* %in.vec, <32 x i16>* %out.vec0, <32 x i16>* %out.v
|
|||
; SSE-NEXT: por %xmm5, %xmm2
|
||||
; SSE-NEXT: movdqa %xmm2, %xmm6
|
||||
; SSE-NEXT: pshufhw {{.*#+}} xmm2 = xmm15[0,1,2,3,4,7,6,7]
|
||||
; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,1,2,0]
|
||||
; SSE-NEXT: pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,7,4,5]
|
||||
; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,1,0,2]
|
||||
; SSE-NEXT: movdqa %xmm1, %xmm5
|
||||
; SSE-NEXT: pandn %xmm2, %xmm5
|
||||
; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm6[3,1,2,0]
|
||||
|
@ -730,8 +726,7 @@ define void @vf32(<96 x i16>* %in.vec, <32 x i16>* %out.vec0, <32 x i16>* %out.v
|
|||
; SSE-NEXT: por %xmm4, %xmm2
|
||||
; SSE-NEXT: movdqa %xmm2, %xmm6
|
||||
; SSE-NEXT: pshufhw {{.*#+}} xmm2 = xmm13[0,1,2,3,4,7,6,7]
|
||||
; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,1,2,0]
|
||||
; SSE-NEXT: pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,7,4,5]
|
||||
; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,1,0,2]
|
||||
; SSE-NEXT: movdqa %xmm1, %xmm4
|
||||
; SSE-NEXT: pandn %xmm2, %xmm4
|
||||
; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm6[3,1,2,0]
|
||||
|
@ -740,15 +735,14 @@ define void @vf32(<96 x i16>* %in.vec, <32 x i16>* %out.vec0, <32 x i16>* %out.v
|
|||
; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[2,1,0,3,4,5,6,7]
|
||||
; SSE-NEXT: pand %xmm1, %xmm2
|
||||
; SSE-NEXT: por %xmm2, %xmm4
|
||||
; SSE-NEXT: pand %xmm3, %xmm9
|
||||
; SSE-NEXT: por %xmm7, %xmm9
|
||||
; SSE-NEXT: pand %xmm3, %xmm8
|
||||
; SSE-NEXT: por %xmm7, %xmm8
|
||||
; SSE-NEXT: pshufhw $236, {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Folded Reload
|
||||
; SSE-NEXT: # xmm2 = mem[0,1,2,3,4,7,6,7]
|
||||
; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,1,2,0]
|
||||
; SSE-NEXT: pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,7,4,5]
|
||||
; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,1,0,2]
|
||||
; SSE-NEXT: movdqa %xmm1, %xmm6
|
||||
; SSE-NEXT: pandn %xmm2, %xmm6
|
||||
; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm9[3,1,2,0]
|
||||
; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm8[3,1,2,0]
|
||||
; SSE-NEXT: pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,6,5,6,7]
|
||||
; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[2,1,0,3]
|
||||
; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[2,1,0,3,4,5,6,7]
|
||||
|
@ -764,8 +758,7 @@ define void @vf32(<96 x i16>* %in.vec, <32 x i16>* %out.vec0, <32 x i16>* %out.v
|
|||
; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[2,1,0,3,4,5,6,7]
|
||||
; SSE-NEXT: pand %xmm1, %xmm2
|
||||
; SSE-NEXT: pshufhw {{.*#+}} xmm3 = xmm12[0,1,2,3,4,7,6,7]
|
||||
; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,1,2,0]
|
||||
; SSE-NEXT: pshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,4,7,4,5]
|
||||
; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,1,0,2]
|
||||
; SSE-NEXT: pandn %xmm3, %xmm1
|
||||
; SSE-NEXT: por %xmm2, %xmm1
|
||||
; SSE-NEXT: movaps (%rsp), %xmm2 # 16-byte Reload
|
||||
|
@ -778,7 +771,7 @@ define void @vf32(<96 x i16>* %in.vec, <32 x i16>* %out.vec0, <32 x i16>* %out.v
|
|||
; SSE-NEXT: movaps %xmm2, 16(%rsi)
|
||||
; SSE-NEXT: movdqa %xmm0, 32(%rdx)
|
||||
; SSE-NEXT: movdqa %xmm14, (%rdx)
|
||||
; SSE-NEXT: movdqa %xmm8, 48(%rdx)
|
||||
; SSE-NEXT: movdqa %xmm9, 48(%rdx)
|
||||
; SSE-NEXT: movdqa %xmm10, 16(%rdx)
|
||||
; SSE-NEXT: movdqa %xmm1, 32(%rcx)
|
||||
; SSE-NEXT: movdqa %xmm6, (%rcx)
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -481,7 +481,7 @@ define void @load_i8_stride6_vf8(<48 x i8>* %in.vec, <8 x i8>* %out.vec0, <8 x i
|
|||
; SSE-NEXT: por %xmm1, %xmm5
|
||||
; SSE-NEXT: movdqa %xmm3, %xmm1
|
||||
; SSE-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm9[8],xmm1[9],xmm9[9],xmm1[10],xmm9[10],xmm1[11],xmm9[11],xmm1[12],xmm9[12],xmm1[13],xmm9[13],xmm1[14],xmm9[14],xmm1[15],xmm9[15]
|
||||
; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[3,3,3,3,4,5,6,7]
|
||||
; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,2,3]
|
||||
; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,5,5,5,5]
|
||||
; SSE-NEXT: movdqa {{.*#+}} xmm4 = [65535,0,65535,65535,0,65535,65535,65535]
|
||||
; SSE-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm9[0],xmm3[1],xmm9[1],xmm3[2],xmm9[2],xmm3[3],xmm9[3],xmm3[4],xmm9[4],xmm3[5],xmm9[5],xmm3[6],xmm9[6],xmm3[7],xmm9[7]
|
||||
|
@ -894,8 +894,7 @@ define void @load_i8_stride6_vf16(<96 x i8>* %in.vec, <16 x i8>* %out.vec0, <16
|
|||
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,2,1]
|
||||
; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,7,5,6,5]
|
||||
; SSE-NEXT: punpcklbw {{.*#+}} xmm7 = xmm7[0],xmm13[0],xmm7[1],xmm13[1],xmm7[2],xmm13[2],xmm7[3],xmm13[3],xmm7[4],xmm13[4],xmm7[5],xmm13[5],xmm7[6],xmm13[6],xmm7[7],xmm13[7]
|
||||
; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm7[2,1,0,3]
|
||||
; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[1,1,1,1,4,5,6,7]
|
||||
; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm7[0,2,0,3]
|
||||
; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,5,7,7]
|
||||
; SSE-NEXT: movdqa {{.*#+}} xmm2 = [65535,65535,65535,65535,0,65535,65535,0]
|
||||
; SSE-NEXT: pand %xmm2, %xmm1
|
||||
|
@ -958,7 +957,7 @@ define void @load_i8_stride6_vf16(<96 x i8>* %in.vec, <16 x i8>* %out.vec0, <16
|
|||
; SSE-NEXT: packuswb %xmm5, %xmm1
|
||||
; SSE-NEXT: movdqa %xmm10, %xmm5
|
||||
; SSE-NEXT: punpckhbw {{.*#+}} xmm5 = xmm5[8],xmm13[8],xmm5[9],xmm13[9],xmm5[10],xmm13[10],xmm5[11],xmm13[11],xmm5[12],xmm13[12],xmm5[13],xmm13[13],xmm5[14],xmm13[14],xmm5[15],xmm13[15]
|
||||
; SSE-NEXT: pshuflw {{.*#+}} xmm5 = xmm5[3,3,3,3,4,5,6,7]
|
||||
; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm5[1,1,2,3]
|
||||
; SSE-NEXT: pshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,5,5,5,5]
|
||||
; SSE-NEXT: movdqa {{.*#+}} xmm6 = [65535,0,65535,65535,0,65535,65535,65535]
|
||||
; SSE-NEXT: punpcklbw {{.*#+}} xmm10 = xmm10[0],xmm13[0],xmm10[1],xmm13[1],xmm10[2],xmm13[2],xmm10[3],xmm13[3],xmm10[4],xmm13[4],xmm10[5],xmm13[5],xmm10[6],xmm13[6],xmm10[7],xmm13[7]
|
||||
|
@ -1808,8 +1807,7 @@ define void @load_i8_stride6_vf32(<192 x i8>* %in.vec, <32 x i8>* %out.vec0, <32
|
|||
; SSE-NEXT: movdqa %xmm2, %xmm4
|
||||
; SSE-NEXT: pandn %xmm0, %xmm4
|
||||
; SSE-NEXT: punpcklbw {{.*#+}} xmm14 = xmm14[0],xmm10[0],xmm14[1],xmm10[1],xmm14[2],xmm10[2],xmm14[3],xmm10[3],xmm14[4],xmm10[4],xmm14[5],xmm10[5],xmm14[6],xmm10[6],xmm14[7],xmm10[7]
|
||||
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm14[2,1,0,3]
|
||||
; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[1,1,1,1,4,5,6,7]
|
||||
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm14[0,2,0,3]
|
||||
; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5,7,7]
|
||||
; SSE-NEXT: pand %xmm2, %xmm0
|
||||
; SSE-NEXT: por %xmm4, %xmm0
|
||||
|
@ -1853,8 +1851,7 @@ define void @load_i8_stride6_vf32(<192 x i8>* %in.vec, <32 x i8>* %out.vec0, <32
|
|||
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,2,1]
|
||||
; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,7,5,6,5]
|
||||
; SSE-NEXT: punpcklbw {{.*#+}} xmm7 = xmm7[0],xmm10[0],xmm7[1],xmm10[1],xmm7[2],xmm10[2],xmm7[3],xmm10[3],xmm7[4],xmm10[4],xmm7[5],xmm10[5],xmm7[6],xmm10[6],xmm7[7],xmm10[7]
|
||||
; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm7[2,1,0,3]
|
||||
; SSE-NEXT: pshuflw {{.*#+}} xmm3 = xmm3[1,1,1,1,4,5,6,7]
|
||||
; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm7[0,2,0,3]
|
||||
; SSE-NEXT: pshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,4,5,7,7]
|
||||
; SSE-NEXT: pand %xmm2, %xmm3
|
||||
; SSE-NEXT: pandn %xmm0, %xmm2
|
||||
|
@ -1958,7 +1955,7 @@ define void @load_i8_stride6_vf32(<192 x i8>* %in.vec, <32 x i8>* %out.vec0, <32
|
|||
; SSE-NEXT: pandn %xmm3, %xmm6
|
||||
; SSE-NEXT: movdqa %xmm7, %xmm3
|
||||
; SSE-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm10[8],xmm3[9],xmm10[9],xmm3[10],xmm10[10],xmm3[11],xmm10[11],xmm3[12],xmm10[12],xmm3[13],xmm10[13],xmm3[14],xmm10[14],xmm3[15],xmm10[15]
|
||||
; SSE-NEXT: pshuflw {{.*#+}} xmm3 = xmm3[3,3,3,3,4,5,6,7]
|
||||
; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm3[1,1,2,3]
|
||||
; SSE-NEXT: pshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,5,5,5,5]
|
||||
; SSE-NEXT: movdqa {{.*#+}} xmm5 = [65535,0,65535,65535,0,65535,65535,65535]
|
||||
; SSE-NEXT: movdqa %xmm5, %xmm4
|
||||
|
@ -2006,7 +2003,7 @@ define void @load_i8_stride6_vf32(<192 x i8>* %in.vec, <32 x i8>* %out.vec0, <32
|
|||
; SSE-NEXT: packuswb %xmm4, %xmm3
|
||||
; SSE-NEXT: movdqa %xmm8, %xmm4
|
||||
; SSE-NEXT: punpckhbw {{.*#+}} xmm4 = xmm4[8],xmm10[8],xmm4[9],xmm10[9],xmm4[10],xmm10[10],xmm4[11],xmm10[11],xmm4[12],xmm10[12],xmm4[13],xmm10[13],xmm4[14],xmm10[14],xmm4[15],xmm10[15]
|
||||
; SSE-NEXT: pshuflw {{.*#+}} xmm4 = xmm4[3,3,3,3,4,5,6,7]
|
||||
; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm4[1,1,2,3]
|
||||
; SSE-NEXT: pshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,5,5,5,5]
|
||||
; SSE-NEXT: punpcklbw {{.*#+}} xmm8 = xmm8[0],xmm10[0],xmm8[1],xmm10[1],xmm8[2],xmm10[2],xmm8[3],xmm10[3],xmm8[4],xmm10[4],xmm8[5],xmm10[5],xmm8[6],xmm10[6],xmm8[7],xmm10[7]
|
||||
; SSE-NEXT: pshufhw {{.*#+}} xmm7 = xmm8[0,1,2,3,7,5,6,7]
|
||||
|
|
|
@ -821,7 +821,7 @@ define void @vf16(<16 x i16>* %in.vecptr0, <16 x i16>* %in.vecptr1, <16 x i16>*
|
|||
; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2],xmm12[3],xmm0[4,5,6,7]
|
||||
; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm1 = xmm12[4],xmm7[4],xmm12[5],xmm7[5],xmm12[6],xmm7[6],xmm12[7],xmm7[7]
|
||||
; AVX1-NEXT: vpshuflw {{.*#+}} xmm1 = xmm1[0,1,3,2,4,5,6,7]
|
||||
; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,1,1,3]
|
||||
; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero
|
||||
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
|
||||
; AVX1-NEXT: vmovaps {{.*#+}} ymm9 = [65535,65535,0,0,65535,65535,65535,0,0,65535,65535,65535,0,0,65535,65535]
|
||||
; AVX1-NEXT: vandnps %ymm0, %ymm9, %ymm0
|
||||
|
@ -873,7 +873,7 @@ define void @vf16(<16 x i16>* %in.vecptr0, <16 x i16>* %in.vecptr1, <16 x i16>*
|
|||
; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
|
||||
; AVX1-NEXT: vpunpcklwd {{.*#+}} xmm2 = xmm10[0],xmm11[0],xmm10[1],xmm11[1],xmm10[2],xmm11[2],xmm10[3],xmm11[3]
|
||||
; AVX1-NEXT: vpshuflw {{.*#+}} xmm2 = xmm2[0,1,1,2,4,5,6,7]
|
||||
; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[0,1,1,3]
|
||||
; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm2 = xmm2[0],zero,xmm2[1],zero
|
||||
; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm1
|
||||
; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm2 = xmm4[4],xmm7[4],xmm4[5],xmm7[5],xmm4[6],xmm7[6],xmm4[7],xmm7[7]
|
||||
; AVX1-NEXT: vpshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,5,7,6]
|
||||
|
@ -1686,7 +1686,7 @@ define void @vf32(<32 x i16>* %in.vecptr0, <32 x i16>* %in.vecptr1, <32 x i16>*
|
|||
; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
|
||||
; AVX1-NEXT: vpunpcklwd {{.*#+}} xmm2 = xmm9[0],xmm10[0],xmm9[1],xmm10[1],xmm9[2],xmm10[2],xmm9[3],xmm10[3]
|
||||
; AVX1-NEXT: vpshuflw {{.*#+}} xmm2 = xmm2[0,1,1,2,4,5,6,7]
|
||||
; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[0,1,1,3]
|
||||
; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm2 = xmm2[0],zero,xmm2[1],zero
|
||||
; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
|
||||
; AVX1-NEXT: vmovaps {{.*#+}} ymm12 = [65535,65535,0,0,65535,65535,65535,0,0,65535,65535,65535,0,0,65535,65535]
|
||||
; AVX1-NEXT: vandnps %ymm0, %ymm12, %ymm2
|
||||
|
@ -1718,7 +1718,7 @@ define void @vf32(<32 x i16>* %in.vecptr0, <32 x i16>* %in.vecptr1, <32 x i16>*
|
|||
; AVX1-NEXT: vpblendw {{.*#+}} xmm5 = xmm5[0,1,2],xmm8[3],xmm5[4,5,6,7]
|
||||
; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm6 = xmm8[4],xmm0[4],xmm8[5],xmm0[5],xmm8[6],xmm0[6],xmm8[7],xmm0[7]
|
||||
; AVX1-NEXT: vpshuflw {{.*#+}} xmm6 = xmm6[0,1,3,2,4,5,6,7]
|
||||
; AVX1-NEXT: vpshufd {{.*#+}} xmm6 = xmm6[0,1,1,3]
|
||||
; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm6 = xmm6[0],zero,xmm6[1],zero
|
||||
; AVX1-NEXT: vinsertf128 $1, %xmm6, %ymm5, %ymm5
|
||||
; AVX1-NEXT: vandnps %ymm5, %ymm12, %ymm5
|
||||
; AVX1-NEXT: vpshufd {{.*#+}} xmm6 = xmm3[1,1,2,2]
|
||||
|
@ -1794,7 +1794,7 @@ define void @vf32(<32 x i16>* %in.vecptr0, <32 x i16>* %in.vecptr1, <32 x i16>*
|
|||
; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2],xmm10[3],xmm0[4,5,6,7]
|
||||
; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm1 = xmm10[4],xmm4[4],xmm10[5],xmm4[5],xmm10[6],xmm4[6],xmm10[7],xmm4[7]
|
||||
; AVX1-NEXT: vpshuflw {{.*#+}} xmm1 = xmm1[0,1,3,2,4,5,6,7]
|
||||
; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,1,1,3]
|
||||
; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero
|
||||
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1
|
||||
; AVX1-NEXT: vmovdqa (%rsi), %xmm8
|
||||
; AVX1-NEXT: vpshuflw {{.*#+}} xmm0 = xmm8[3,3,3,3,4,5,6,7]
|
||||
|
@ -1846,7 +1846,7 @@ define void @vf32(<32 x i16>* %in.vecptr0, <32 x i16>* %in.vecptr1, <32 x i16>*
|
|||
; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
|
||||
; AVX1-NEXT: vpunpcklwd {{.*#+}} xmm7 = xmm13[0],xmm11[0],xmm13[1],xmm11[1],xmm13[2],xmm11[2],xmm13[3],xmm11[3]
|
||||
; AVX1-NEXT: vpshuflw {{.*#+}} xmm7 = xmm7[0,1,1,2,4,5,6,7]
|
||||
; AVX1-NEXT: vpshufd {{.*#+}} xmm7 = xmm7[0,1,1,3]
|
||||
; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm7 = xmm7[0],zero,xmm7[1],zero
|
||||
; AVX1-NEXT: vinsertf128 $1, %xmm7, %ymm0, %ymm0
|
||||
; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm7 = xmm3[4],xmm4[4],xmm3[5],xmm4[5],xmm3[6],xmm4[6],xmm3[7],xmm4[7]
|
||||
; AVX1-NEXT: vpshufhw {{.*#+}} xmm7 = xmm7[0,1,2,3,4,5,7,6]
|
||||
|
|
|
@ -233,22 +233,22 @@ define void @store_i8_stride3_vf16(<16 x i8>* %in.vecptr0, <16 x i8>* %in.vecptr
|
|||
; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm8[2,1,3,3,4,5,6,7]
|
||||
; SSE-NEXT: pshufhw {{.*#+}} xmm2 = xmm0[0,1,2,3,4,4,4,4]
|
||||
; SSE-NEXT: movdqa {{.*#+}} xmm0 = [255,0,255,255,0,255,255,0,255,255,0,255,255,0,255,255]
|
||||
; SSE-NEXT: movdqa %xmm0, %xmm3
|
||||
; SSE-NEXT: pandn %xmm2, %xmm3
|
||||
; SSE-NEXT: movdqa %xmm0, %xmm4
|
||||
; SSE-NEXT: pandn %xmm2, %xmm4
|
||||
; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm1[2,1,2,3]
|
||||
; SSE-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
|
||||
; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[2,3,0,1]
|
||||
; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[1,2,2,3,4,5,6,7]
|
||||
; SSE-NEXT: pshufhw {{.*#+}} xmm6 = xmm2[0,1,2,3,4,5,5,6]
|
||||
; SSE-NEXT: pand %xmm0, %xmm6
|
||||
; SSE-NEXT: por %xmm3, %xmm6
|
||||
; SSE-NEXT: por %xmm4, %xmm6
|
||||
; SSE-NEXT: movdqa {{.*#+}} xmm2 = [255,255,0,255,255,0,255,255,0,255,255,0,255,255,0,255]
|
||||
; SSE-NEXT: pand %xmm2, %xmm6
|
||||
; SSE-NEXT: pshuflw {{.*#+}} xmm3 = xmm5[3,3,3,3,4,5,6,7]
|
||||
; SSE-NEXT: pshufhw {{.*#+}} xmm7 = xmm3[0,1,2,3,4,4,6,5]
|
||||
; SSE-NEXT: movdqa %xmm2, %xmm3
|
||||
; SSE-NEXT: pandn %xmm7, %xmm3
|
||||
; SSE-NEXT: por %xmm6, %xmm3
|
||||
; SSE-NEXT: pshuflw {{.*#+}} xmm4 = xmm5[3,3,3,3,4,5,6,7]
|
||||
; SSE-NEXT: pshufhw {{.*#+}} xmm7 = xmm4[0,1,2,3,4,4,6,5]
|
||||
; SSE-NEXT: movdqa %xmm2, %xmm4
|
||||
; SSE-NEXT: pandn %xmm7, %xmm4
|
||||
; SSE-NEXT: por %xmm6, %xmm4
|
||||
; SSE-NEXT: movdqa %xmm1, %xmm6
|
||||
; SSE-NEXT: punpcklbw {{.*#+}} xmm6 = xmm6[0],xmm1[0],xmm6[1],xmm1[1],xmm6[2],xmm1[2],xmm6[3],xmm1[3],xmm6[4],xmm1[4],xmm6[5],xmm1[5],xmm6[6],xmm1[6],xmm6[7],xmm1[7]
|
||||
; SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm6[0,1,1,2]
|
||||
|
@ -264,9 +264,9 @@ define void @store_i8_stride3_vf16(<16 x i8>* %in.vecptr0, <16 x i8>* %in.vecptr
|
|||
; SSE-NEXT: pshufd {{.*#+}} xmm7 = xmm8[0,1,0,1]
|
||||
; SSE-NEXT: pshuflw {{.*#+}} xmm7 = xmm7[0,0,0,0,4,5,6,7]
|
||||
; SSE-NEXT: pshufhw {{.*#+}} xmm7 = xmm7[0,1,2,3,5,5,6,6]
|
||||
; SSE-NEXT: movdqa %xmm2, %xmm4
|
||||
; SSE-NEXT: pandn %xmm7, %xmm4
|
||||
; SSE-NEXT: por %xmm6, %xmm4
|
||||
; SSE-NEXT: movdqa %xmm2, %xmm3
|
||||
; SSE-NEXT: pandn %xmm7, %xmm3
|
||||
; SSE-NEXT: por %xmm6, %xmm3
|
||||
; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm5[2,3,2,3]
|
||||
; SSE-NEXT: pshuflw {{.*#+}} xmm5 = xmm5[1,1,2,2,4,5,6,7]
|
||||
; SSE-NEXT: pshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,7,7,7,7]
|
||||
|
@ -278,14 +278,13 @@ define void @store_i8_stride3_vf16(<16 x i8>* %in.vecptr0, <16 x i8>* %in.vecptr
|
|||
; SSE-NEXT: por %xmm6, %xmm0
|
||||
; SSE-NEXT: pand %xmm2, %xmm0
|
||||
; SSE-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
||||
; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,1,2,3]
|
||||
; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,3,0,3,4,5,6,7]
|
||||
; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,2,2,3]
|
||||
; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,5,6,6,7]
|
||||
; SSE-NEXT: pandn %xmm1, %xmm2
|
||||
; SSE-NEXT: por %xmm0, %xmm2
|
||||
; SSE-NEXT: movdqa %xmm2, 32(%rcx)
|
||||
; SSE-NEXT: movdqa %xmm4, (%rcx)
|
||||
; SSE-NEXT: movdqa %xmm3, 16(%rcx)
|
||||
; SSE-NEXT: movdqa %xmm3, (%rcx)
|
||||
; SSE-NEXT: movdqa %xmm4, 16(%rcx)
|
||||
; SSE-NEXT: retq
|
||||
;
|
||||
; AVX-LABEL: store_i8_stride3_vf16:
|
||||
|
@ -403,8 +402,7 @@ define void @store_i8_stride3_vf32(<32 x i8>* %in.vecptr0, <32 x i8>* %in.vecptr
|
|||
; SSE-NEXT: pand %xmm4, %xmm1
|
||||
; SSE-NEXT: movdqa %xmm7, %xmm2
|
||||
; SSE-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm7[8],xmm2[9],xmm7[9],xmm2[10],xmm7[10],xmm2[11],xmm7[11],xmm2[12],xmm7[12],xmm2[13],xmm7[13],xmm2[14],xmm7[14],xmm2[15],xmm7[15]
|
||||
; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[2,1,2,3]
|
||||
; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[0,3,0,3,4,5,6,7]
|
||||
; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,2,2,3]
|
||||
; SSE-NEXT: pshufhw {{.*#+}} xmm3 = xmm2[0,1,2,3,5,6,6,7]
|
||||
; SSE-NEXT: movdqa %xmm4, %xmm2
|
||||
; SSE-NEXT: pandn %xmm3, %xmm2
|
||||
|
@ -439,8 +437,7 @@ define void @store_i8_stride3_vf32(<32 x i8>* %in.vecptr0, <32 x i8>* %in.vecptr
|
|||
; SSE-NEXT: pand %xmm4, %xmm3
|
||||
; SSE-NEXT: movdqa %xmm13, %xmm7
|
||||
; SSE-NEXT: punpckhbw {{.*#+}} xmm7 = xmm7[8],xmm13[8],xmm7[9],xmm13[9],xmm7[10],xmm13[10],xmm7[11],xmm13[11],xmm7[12],xmm13[12],xmm7[13],xmm13[13],xmm7[14],xmm13[14],xmm7[15],xmm13[15]
|
||||
; SSE-NEXT: pshufd {{.*#+}} xmm7 = xmm7[2,1,2,3]
|
||||
; SSE-NEXT: pshuflw {{.*#+}} xmm7 = xmm7[0,3,0,3,4,5,6,7]
|
||||
; SSE-NEXT: pshufd {{.*#+}} xmm7 = xmm7[1,2,2,3]
|
||||
; SSE-NEXT: pshufhw {{.*#+}} xmm7 = xmm7[0,1,2,3,5,6,6,7]
|
||||
; SSE-NEXT: movdqa %xmm4, %xmm5
|
||||
; SSE-NEXT: pandn %xmm7, %xmm5
|
||||
|
|
Loading…
Reference in New Issue