[X86] Don't emit *_extend_vector_inreg nodes when both the input and output types are legal with AVX1

We already have custom lowering for the AVX case in LegalizeVectorOps. So its better to keep the regular extend op around as long as possible.

I had to qualify one place in DAG combine that created illegal vector extending load operations. This change by itself had no effect on any tests which is why its included here.

I've made a few cleanups to the custom lowering. The sign extend code no longer creates an identity shuffle with undef elements. The zero extend code now emits a zero_extend_vector_inreg instead of an unpckl with a zero vector.

For the high half of the custom lowering of zero_extend/any_extend, we're now using an unpckh with a zero vector or undef. Previously we used used a pshufd to move the upper 64-bits to the lower 64-bits and then used a zero_extend_vector_inreg. I think the zero vector should require less execution resources and be smaller code size.

Differential Revision: https://reviews.llvm.org/D54024

llvm-svn: 346043
This commit is contained in:
Craig Topper 2018-11-02 21:09:49 +00:00
parent 3095874d3c
commit 60c202a494
12 changed files with 333 additions and 366 deletions

View File

@ -8391,7 +8391,7 @@ static SDValue tryToFoldExtOfExtload(SelectionDAG &DAG, DAGCombiner &Combiner,
LoadSDNode *LN0 = cast<LoadSDNode>(N0);
EVT MemVT = LN0->getMemoryVT();
if ((LegalOperations || LN0->isVolatile()) &&
if ((LegalOperations || LN0->isVolatile() || VT.isVector()) &&
!TLI.isLoadExtLegal(ExtLoadType, VT, MemVT))
return {};

View File

@ -17446,27 +17446,26 @@ static SDValue LowerAVXExtend(SDValue Op, SelectionDAG &DAG,
// Optimize vectors in AVX mode:
//
// v8i16 -> v8i32
// Use vpunpcklwd for 4 lower elements v8i16 -> v4i32.
// Use vpmovzwd for 4 lower elements v8i16 -> v4i32.
// Use vpunpckhwd for 4 upper elements v8i16 -> v4i32.
// Concat upper and lower parts.
//
// v4i32 -> v4i64
// Use vpunpckldq for 4 lower elements v4i32 -> v2i64.
// Use vpmovzdq for 4 lower elements v4i32 -> v2i64.
// Use vpunpckhdq for 4 upper elements v4i32 -> v2i64.
// Concat upper and lower parts.
//
SDValue ZeroVec = getZeroVector(InVT, Subtarget, DAG, dl);
MVT HalfVT = MVT::getVectorVT(VT.getVectorElementType(),
VT.getVectorNumElements() / 2);
SDValue OpLo = DAG.getZeroExtendVectorInReg(In, dl, HalfVT);
SDValue ZeroVec = DAG.getConstant(0, dl, InVT);
SDValue Undef = DAG.getUNDEF(InVT);
bool NeedZero = Op.getOpcode() == ISD::ZERO_EXTEND;
SDValue OpLo = getUnpackl(DAG, dl, InVT, In, NeedZero ? ZeroVec : Undef);
SDValue OpHi = getUnpackh(DAG, dl, InVT, In, NeedZero ? ZeroVec : Undef);
MVT HVT = MVT::getVectorVT(VT.getVectorElementType(),
VT.getVectorNumElements()/2);
OpLo = DAG.getBitcast(HVT, OpLo);
OpHi = DAG.getBitcast(HVT, OpHi);
OpHi = DAG.getBitcast(HalfVT, OpHi);
return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, OpLo, OpHi);
}
@ -19878,29 +19877,21 @@ static SDValue LowerSIGN_EXTEND(SDValue Op, const X86Subtarget &Subtarget,
// v4i32 to v4i64
//
// Divide input vector into two parts
// for v4i32 the shuffle mask will be { 0, 1, -1, -1} {2, 3, -1, -1}
// for v4i32 the high shuffle mask will be {2, 3, -1, -1}
// use vpmovsx instruction to extend v4i32 -> v2i64; v8i16 -> v4i32
// concat the vectors to original VT
unsigned NumElems = InVT.getVectorNumElements();
SDValue Undef = DAG.getUNDEF(InVT);
SmallVector<int,8> ShufMask1(NumElems, -1);
for (unsigned i = 0; i != NumElems/2; ++i)
ShufMask1[i] = i;
SDValue OpLo = DAG.getVectorShuffle(InVT, dl, In, Undef, ShufMask1);
SmallVector<int,8> ShufMask2(NumElems, -1);
for (unsigned i = 0; i != NumElems/2; ++i)
ShufMask2[i] = i + NumElems/2;
SDValue OpHi = DAG.getVectorShuffle(InVT, dl, In, Undef, ShufMask2);
MVT HalfVT = MVT::getVectorVT(VT.getVectorElementType(),
VT.getVectorNumElements() / 2);
OpLo = DAG.getSignExtendVectorInReg(OpLo, dl, HalfVT);
SDValue OpLo = DAG.getSignExtendVectorInReg(In, dl, HalfVT);
unsigned NumElems = InVT.getVectorNumElements();
SmallVector<int,8> ShufMask(NumElems, -1);
for (unsigned i = 0; i != NumElems/2; ++i)
ShufMask[i] = i + NumElems/2;
SDValue OpHi = DAG.getVectorShuffle(InVT, dl, In, In, ShufMask);
OpHi = DAG.getSignExtendVectorInReg(OpHi, dl, HalfVT);
return DAG.getNode(ISD::CONCAT_VECTORS, dl, VT, OpLo, OpHi);
@ -38323,7 +38314,7 @@ static SDValue combineToExtendVectorInReg(SDNode *N, SelectionDAG &DAG,
// On AVX2+ targets, if the input/output types are both legal then we will be
// able to use SIGN_EXTEND/ZERO_EXTEND directly.
if (Subtarget.hasInt256() && DAG.getTargetLoweringInfo().isTypeLegal(VT) &&
if (DAG.getTargetLoweringInfo().isTypeLegal(VT) &&
DAG.getTargetLoweringInfo().isTypeLegal(InVT))
return SDValue();

View File

@ -2142,243 +2142,231 @@ define void @not_avg_v16i8_wide_constants(<16 x i8>* %a, <16 x i8>* %b) nounwind
; AVX1-NEXT: pushq %r12
; AVX1-NEXT: pushq %rbx
; AVX1-NEXT: subq $24, %rsp
; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm1 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm3 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm2 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm4 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero
; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm5 = xmm4[0],zero,xmm4[1],zero
; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm4 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm3 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero
; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm5 = xmm4[4],xmm1[4],xmm4[5],xmm1[5],xmm4[6],xmm1[6],xmm4[7],xmm1[7]
; AVX1-NEXT: vpunpckhdq {{.*#+}} xmm6 = xmm5[2],xmm1[2],xmm5[3],xmm1[3]
; AVX1-NEXT: vpextrq $1, %xmm6, %rdi
; AVX1-NEXT: vmovq %xmm6, %rbp
; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm5 = xmm5[0],zero,xmm5[1],zero
; AVX1-NEXT: vpextrq $1, %xmm5, %rbx
; AVX1-NEXT: vmovq %xmm5, %rbp
; AVX1-NEXT: vpshufd {{.*#+}} xmm4 = xmm4[2,3,0,1]
; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm4 = xmm4[0],zero,xmm4[1],zero
; AVX1-NEXT: vpextrq $1, %xmm4, %rsi
; AVX1-NEXT: vmovq %xmm4, %rcx
; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm3[2,3,0,1]
; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm3 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero
; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm4 = xmm3[0],zero,xmm3[1],zero
; AVX1-NEXT: vpextrq $1, %xmm4, %r8
; AVX1-NEXT: vmovq %xmm4, %r11
; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm3[2,3,0,1]
; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm3 = xmm3[0],zero,xmm3[1],zero
; AVX1-NEXT: vpextrq $1, %xmm3, %r13
; AVX1-NEXT: vmovq %xmm3, %r12
; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm3 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero
; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm4 = xmm3[0],zero,xmm3[1],zero
; AVX1-NEXT: vpextrq $1, %xmm4, %r15
; AVX1-NEXT: vmovq %xmm4, %rdi
; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm3[2,3,0,1]
; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm3 = xmm3[0],zero,xmm3[1],zero
; AVX1-NEXT: vpextrq $1, %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill
; AVX1-NEXT: vmovq %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill
; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[2,3,0,1]
; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero
; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm3 = xmm1[0],zero,xmm1[1],zero
; AVX1-NEXT: vpextrq $1, %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill
; AVX1-NEXT: vmovq %xmm3, %r10
; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[2,3,0,1]
; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero
; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm3 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero
; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm4 = xmm3[0],zero,xmm3[1],zero
; AVX1-NEXT: vpextrq $1, %xmm4, %rdx
; AVX1-NEXT: addq %rbx, %rdx
; AVX1-NEXT: vmovq %xmm4, %r9
; AVX1-NEXT: addq %rbp, %r9
; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm3[2,3,0,1]
; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm3 = xmm3[0],zero,xmm3[1],zero
; AVX1-NEXT: vpextrq $1, %xmm3, %rax
; AVX1-NEXT: addq %rsi, %rax
; AVX1-NEXT: movq %rax, %r14
; AVX1-NEXT: vmovq %xmm3, %rbp
; AVX1-NEXT: addq %rcx, %rbp
; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[2,3,0,1]
; AVX1-NEXT: vmovq %xmm5, %rsi
; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm4 = xmm4[0],zero,xmm4[1],zero,xmm4[2],zero,xmm4[3],zero
; AVX1-NEXT: vpunpckhdq {{.*#+}} xmm5 = xmm4[2],xmm1[2],xmm4[3],xmm1[3]
; AVX1-NEXT: vpextrq $1, %xmm5, %rdx
; AVX1-NEXT: vmovq %xmm5, %rcx
; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm5 = xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7]
; AVX1-NEXT: vpunpckhdq {{.*#+}} xmm6 = xmm5[2],xmm1[2],xmm5[3],xmm1[3]
; AVX1-NEXT: vpextrq $1, %xmm6, %r13
; AVX1-NEXT: vmovq %xmm6, %r12
; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm5 = xmm5[0],zero,xmm5[1],zero
; AVX1-NEXT: vpextrq $1, %xmm5, %r11
; AVX1-NEXT: vmovq %xmm5, %r14
; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero
; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm3 = xmm2[0],zero,xmm2[1],zero
; AVX1-NEXT: vpextrq $1, %xmm3, %rsi
; AVX1-NEXT: addq %r8, %rsi
; AVX1-NEXT: vmovq %xmm3, %rax
; AVX1-NEXT: addq %r11, %rax
; AVX1-NEXT: movq %rax, %r11
; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[2,3,0,1]
; AVX1-NEXT: vpunpckhdq {{.*#+}} xmm5 = xmm2[2],xmm1[2],xmm2[3],xmm1[3]
; AVX1-NEXT: vpextrq $1, %xmm5, %r9
; AVX1-NEXT: vmovq %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill
; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm4 = xmm4[0],zero,xmm4[1],zero
; AVX1-NEXT: vpextrq $1, %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill
; AVX1-NEXT: vmovq %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill
; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm2 = xmm2[0],zero,xmm2[1],zero
; AVX1-NEXT: vpextrq $1, %xmm2, %rax
; AVX1-NEXT: addq %r13, %rax
; AVX1-NEXT: movq %rax, %rcx
; AVX1-NEXT: vmovq %xmm2, %rax
; AVX1-NEXT: addq %r12, %rax
; AVX1-NEXT: movq %rax, %r8
; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm2 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm3 = xmm2[0],zero,xmm2[1],zero
; AVX1-NEXT: vpextrq $1, %xmm3, %rax
; AVX1-NEXT: addq %r15, %rax
; AVX1-NEXT: movq %rax, %rbx
; AVX1-NEXT: vmovq %xmm3, %rax
; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm4 = xmm3[4],xmm1[4],xmm3[5],xmm1[5],xmm3[6],xmm1[6],xmm3[7],xmm1[7]
; AVX1-NEXT: vpunpckhdq {{.*#+}} xmm5 = xmm4[2],xmm1[2],xmm4[3],xmm1[3]
; AVX1-NEXT: vpextrq $1, %xmm5, %rax
; AVX1-NEXT: addq %rdi, %rax
; AVX1-NEXT: movq %rax, %rdi
; AVX1-NEXT: vmovq %xmm5, %rax
; AVX1-NEXT: addq %rbp, %rax
; AVX1-NEXT: movq %rax, %rbp
; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm4 = xmm4[0],zero,xmm4[1],zero
; AVX1-NEXT: vpextrq $1, %xmm4, %r15
; AVX1-NEXT: addq %rbx, %r15
; AVX1-NEXT: vmovq %xmm4, %r10
; AVX1-NEXT: addq %rsi, %r10
; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm3 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero
; AVX1-NEXT: vpunpckhdq {{.*#+}} xmm4 = xmm3[2],xmm1[2],xmm3[3],xmm1[3]
; AVX1-NEXT: vpextrq $1, %xmm4, %rax
; AVX1-NEXT: addq %rdx, %rax
; AVX1-NEXT: movq %rax, %rdx
; AVX1-NEXT: vmovq %xmm4, %r8
; AVX1-NEXT: addq %rcx, %r8
; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm4 = xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
; AVX1-NEXT: vpunpckhdq {{.*#+}} xmm5 = xmm4[2],xmm1[2],xmm4[3],xmm1[3]
; AVX1-NEXT: vpextrq $1, %xmm5, %rcx
; AVX1-NEXT: addq %r13, %rcx
; AVX1-NEXT: vmovq %xmm5, %rax
; AVX1-NEXT: addq %r12, %rax
; AVX1-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[2,3,0,1]
; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm2 = xmm2[0],zero,xmm2[1],zero
; AVX1-NEXT: vpextrq $1, %xmm2, %rax
; AVX1-NEXT: addq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Folded Reload
; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm4 = xmm4[0],zero,xmm4[1],zero
; AVX1-NEXT: vpextrq $1, %xmm4, %rax
; AVX1-NEXT: addq %r11, %rax
; AVX1-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
; AVX1-NEXT: vmovq %xmm2, %rax
; AVX1-NEXT: addq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Folded Reload
; AVX1-NEXT: vmovq %xmm4, %rax
; AVX1-NEXT: addq %r14, %rax
; AVX1-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm2 = xmm0[0],zero,xmm0[1],zero
; AVX1-NEXT: vpextrq $1, %xmm2, %rax
; AVX1-NEXT: vpunpckhdq {{.*#+}} xmm1 = xmm0[2],xmm1[2],xmm0[3],xmm1[3]
; AVX1-NEXT: vpextrq $1, %xmm1, %rax
; AVX1-NEXT: addq %r9, %rax
; AVX1-NEXT: movq %rax, %r13
; AVX1-NEXT: vmovq %xmm1, %rbx
; AVX1-NEXT: addq {{[-0-9]+}}(%r{{[sb]}}p), %rbx # 8-byte Folded Reload
; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm1 = xmm3[0],zero,xmm3[1],zero
; AVX1-NEXT: vpextrq $1, %xmm1, %rax
; AVX1-NEXT: addq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Folded Reload
; AVX1-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
; AVX1-NEXT: vmovq %xmm2, %r12
; AVX1-NEXT: addq %r10, %r12
; AVX1-NEXT: vpextrq $1, %xmm1, %rax
; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero
; AVX1-NEXT: vpextrq $1, %xmm0, %r10
; AVX1-NEXT: addq %rax, %r10
; AVX1-NEXT: vmovq %xmm1, %rax
; AVX1-NEXT: vmovq %xmm0, %rdi
; AVX1-NEXT: addq %rax, %rdi
; AVX1-NEXT: addq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Folded Reload
; AVX1-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
; AVX1-NEXT: vpextrq $1, %xmm2, %rax
; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero
; AVX1-NEXT: vpextrq $1, %xmm0, %rsi
; AVX1-NEXT: addq %rax, %rsi
; AVX1-NEXT: movq %rsi, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
; AVX1-NEXT: vmovq %xmm2, %rax
; AVX1-NEXT: vmovq %xmm0, %rsi
; AVX1-NEXT: addq %rax, %rsi
; AVX1-NEXT: addq $-1, %rdi
; AVX1-NEXT: movq %rdi, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
; AVX1-NEXT: movl $0, %eax
; AVX1-NEXT: adcq $-1, %rax
; AVX1-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
; AVX1-NEXT: addq $-1, %rbp
; AVX1-NEXT: movq %rbp, (%rsp) # 8-byte Spill
; AVX1-NEXT: movl $0, %eax
; AVX1-NEXT: adcq $-1, %rax
; AVX1-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
; AVX1-NEXT: addq $-1, %r15
; AVX1-NEXT: movq %r15, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
; AVX1-NEXT: movl $0, %eax
; AVX1-NEXT: adcq $-1, %rax
; AVX1-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
; AVX1-NEXT: addq $-1, %r10
; AVX1-NEXT: movq %r10, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
; AVX1-NEXT: movl $0, %eax
; AVX1-NEXT: adcq $-1, %rax
; AVX1-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
; AVX1-NEXT: addq $-1, %rdx
; AVX1-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
; AVX1-NEXT: movl $0, %eax
; AVX1-NEXT: adcq $-1, %rax
; AVX1-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
; AVX1-NEXT: addq $-1, %r9
; AVX1-NEXT: movq %r9, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
; AVX1-NEXT: movl $0, %eax
; AVX1-NEXT: adcq $-1, %rax
; AVX1-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
; AVX1-NEXT: addq $-1, %r14
; AVX1-NEXT: movq %r14, (%rsp) # 8-byte Spill
; AVX1-NEXT: movl $0, %eax
; AVX1-NEXT: adcq $-1, %rax
; AVX1-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
; AVX1-NEXT: addq $-1, %rbp
; AVX1-NEXT: movq %rbp, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
; AVX1-NEXT: movl $0, %eax
; AVX1-NEXT: adcq $-1, %rax
; AVX1-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
; AVX1-NEXT: addq $-1, %rsi
; AVX1-NEXT: movq %rsi, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
; AVX1-NEXT: movl $0, %eax
; AVX1-NEXT: adcq $-1, %rax
; AVX1-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
; AVX1-NEXT: addq $-1, %r11
; AVX1-NEXT: movq %r11, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
; AVX1-NEXT: movl $0, %eax
; AVX1-NEXT: adcq $-1, %rax
; AVX1-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
; AVX1-NEXT: addq $-1, %rcx
; AVX1-NEXT: movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
; AVX1-NEXT: movl $0, %ebp
; AVX1-NEXT: adcq $-1, %rbp
; AVX1-NEXT: addq $-1, %r8
; AVX1-NEXT: movq %r8, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
; AVX1-NEXT: movl $0, %r15d
; AVX1-NEXT: adcq $-1, %r15
; AVX1-NEXT: addq $-1, %rbx
; AVX1-NEXT: movq %rbx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
; AVX1-NEXT: movl $0, %r12d
; AVX1-NEXT: adcq $-1, %r12
; AVX1-NEXT: addq $-1, %rcx
; AVX1-NEXT: movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
; AVX1-NEXT: movl $0, %eax
; AVX1-NEXT: adcq $-1, %rax
; AVX1-NEXT: movq %rax, %rsi
; AVX1-NEXT: addq $-1, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill
; AVX1-NEXT: movl $0, %r13d
; AVX1-NEXT: adcq $-1, %r13
; AVX1-NEXT: addq $-1, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill
; AVX1-NEXT: movl $0, %r14d
; AVX1-NEXT: adcq $-1, %r14
; AVX1-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rdx # 8-byte Reload
; AVX1-NEXT: addq $-1, %rdx
; AVX1-NEXT: movl $0, %r11d
; AVX1-NEXT: adcq $-1, %r11
; AVX1-NEXT: movq %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
; AVX1-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
; AVX1-NEXT: addq $-1, %rax
; AVX1-NEXT: movl $0, %ebx
; AVX1-NEXT: adcq $-1, %rbx
; AVX1-NEXT: addq $-1, %r12
; AVX1-NEXT: movl $0, %r9d
; AVX1-NEXT: adcq $-1, %r9
; AVX1-NEXT: addq $-1, %r10
; AVX1-NEXT: movl $0, %r8d
; AVX1-NEXT: adcq $-1, %r8
; AVX1-NEXT: addq $-1, %rdi
; AVX1-NEXT: movl $0, %ecx
; AVX1-NEXT: adcq $-1, %rcx
; AVX1-NEXT: shldq $63, %rdi, %rcx
; AVX1-NEXT: addq $-1, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill
; AVX1-NEXT: movl $0, %edx
; AVX1-NEXT: adcq $-1, %rdx
; AVX1-NEXT: addq $-1, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill
; AVX1-NEXT: movl $0, %r15d
; AVX1-NEXT: adcq $-1, %r15
; AVX1-NEXT: addq $-1, %r13
; AVX1-NEXT: movl $0, %r14d
; AVX1-NEXT: adcq $-1, %r14
; AVX1-NEXT: addq $-1, %rbx
; AVX1-NEXT: movl $0, %r11d
; AVX1-NEXT: adcq $-1, %r11
; AVX1-NEXT: addq $-1, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill
; AVX1-NEXT: movl $0, %r8d
; AVX1-NEXT: adcq $-1, %r8
; AVX1-NEXT: addq $-1, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill
; AVX1-NEXT: movl $0, %edi
; AVX1-NEXT: adcq $-1, %rdi
; AVX1-NEXT: addq $-1, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill
; AVX1-NEXT: movl $0, %r10d
; AVX1-NEXT: adcq $-1, %r10
; AVX1-NEXT: movq %rsi, %rbp
; AVX1-NEXT: addq $-1, %rbp
; AVX1-NEXT: movl $0, %r9d
; AVX1-NEXT: adcq $-1, %r9
; AVX1-NEXT: shldq $63, %rbx, %r11
; AVX1-NEXT: shldq $63, %r13, %r14
; AVX1-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rbx # 8-byte Reload
; AVX1-NEXT: shldq $63, %rbx, %r15
; AVX1-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rbx # 8-byte Reload
; AVX1-NEXT: shldq $63, %rbx, %rdx
; AVX1-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
; AVX1-NEXT: shldq $63, %rax, %rcx
; AVX1-NEXT: movq %rcx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
; AVX1-NEXT: shldq $63, %r10, %r8
; AVX1-NEXT: shldq $63, %r12, %r9
; AVX1-NEXT: shldq $63, %rax, %rbx
; AVX1-NEXT: shldq $63, %rdx, %r11
; AVX1-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rdx # 8-byte Reload
; AVX1-NEXT: shldq $63, %rdx, %r14
; AVX1-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rdx # 8-byte Reload
; AVX1-NEXT: shldq $63, %rdx, %r13
; AVX1-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
; AVX1-NEXT: shldq $63, %rax, %rsi
; AVX1-NEXT: movq %rsi, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill
; AVX1-NEXT: shldq $63, %rax, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Folded Spill
; AVX1-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
; AVX1-NEXT: shldq $63, %rax, %r15
; AVX1-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
; AVX1-NEXT: shldq $63, %rax, %rbp
; AVX1-NEXT: shldq $63, %rax, %r12
; AVX1-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rsi # 8-byte Reload
; AVX1-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
; AVX1-NEXT: shldq $63, %rax, %rsi
; AVX1-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rdx # 8-byte Reload
; AVX1-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
; AVX1-NEXT: shldq $63, %rax, %rdx
; AVX1-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rcx # 8-byte Reload
; AVX1-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
; AVX1-NEXT: shldq $63, %rax, %rcx
; AVX1-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rdi # 8-byte Reload
; AVX1-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
; AVX1-NEXT: shldq $63, %rax, %rdi
; AVX1-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r12 # 8-byte Reload
; AVX1-NEXT: movq (%rsp), %rax # 8-byte Reload
; AVX1-NEXT: shldq $63, %rax, %r12
; AVX1-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r10 # 8-byte Reload
; AVX1-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
; AVX1-NEXT: shldq $63, %rax, %r10
; AVX1-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rdx # 8-byte Reload
; AVX1-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rax # 8-byte Reload
; AVX1-NEXT: shldq $63, %rdx, %rax
; AVX1-NEXT: vmovq %rax, %xmm8
; AVX1-NEXT: vmovq %r10, %xmm0
; AVX1-NEXT: vmovq %r12, %xmm1
; AVX1-NEXT: vmovq %rdi, %xmm11
; AVX1-NEXT: vmovq %rcx, %xmm2
; AVX1-NEXT: vmovq %rsi, %xmm13
; AVX1-NEXT: vmovq %rbp, %xmm14
; AVX1-NEXT: vmovq %r15, %xmm15
; AVX1-NEXT: vmovq {{[-0-9]+}}(%r{{[sb]}}p), %xmm9 # 8-byte Folded Reload
; AVX1-NEXT: movq (%rsp), %rbx # 8-byte Reload
; AVX1-NEXT: shldq $63, %rbx, %rax
; AVX1-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rbx # 8-byte Reload
; AVX1-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %r13 # 8-byte Reload
; AVX1-NEXT: shldq $63, %r13, %rbx
; AVX1-NEXT: shldq $63, %rbp, %r9
; AVX1-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rbp # 8-byte Reload
; AVX1-NEXT: shldq $63, %rbp, %r10
; AVX1-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rbp # 8-byte Reload
; AVX1-NEXT: shldq $63, %rbp, %rdi
; AVX1-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rbp # 8-byte Reload
; AVX1-NEXT: shldq $63, %rbp, %r8
; AVX1-NEXT: vmovq %rbx, %xmm8
; AVX1-NEXT: vmovq %rax, %xmm0
; AVX1-NEXT: vmovq %rcx, %xmm1
; AVX1-NEXT: vmovq %rdx, %xmm11
; AVX1-NEXT: vmovq %rsi, %xmm2
; AVX1-NEXT: vmovq %r12, %xmm13
; AVX1-NEXT: vmovq %r8, %xmm14
; AVX1-NEXT: vmovq %rdi, %xmm15
; AVX1-NEXT: vmovsd {{[-0-9]+}}(%r{{[sb]}}p), %xmm9 # 8-byte Reload
; AVX1-NEXT: # xmm9 = mem[0],zero
; AVX1-NEXT: vmovq %r13, %xmm10
; AVX1-NEXT: vmovq %r14, %xmm12
; AVX1-NEXT: vmovq %r11, %xmm3
; AVX1-NEXT: vmovq %rbx, %xmm4
; AVX1-NEXT: vmovq %r9, %xmm5
; AVX1-NEXT: vmovq %r8, %xmm6
; AVX1-NEXT: vmovq {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 8-byte Folded Reload
; AVX1-NEXT: # xmm7 = mem[0],zero
; AVX1-NEXT: vmovsd {{[-0-9]+}}(%r{{[sb]}}p), %xmm10 # 8-byte Reload
; AVX1-NEXT: # xmm10 = mem[0],zero
; AVX1-NEXT: vmovq {{[-0-9]+}}(%r{{[sb]}}p), %xmm12 # 8-byte Folded Reload
; AVX1-NEXT: # xmm12 = mem[0],zero
; AVX1-NEXT: vmovq %r15, %xmm3
; AVX1-NEXT: vmovq %r14, %xmm4
; AVX1-NEXT: vmovq %r11, %xmm5
; AVX1-NEXT: vmovq %r10, %xmm6
; AVX1-NEXT: vmovq %r9, %xmm7
; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm8 = xmm0[0],xmm8[0]
; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm11[0],xmm1[0]
; AVX1-NEXT: vshufps {{.*#+}} xmm8 = xmm8[0,2],xmm0[0,2]
; AVX1-NEXT: vshufps {{.*#+}} xmm8 = xmm0[0,2],xmm8[0,2]
; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm13[0],xmm2[0]
; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm15[0],xmm14[0]
; AVX1-NEXT: vshufps {{.*#+}} xmm11 = xmm0[0,2],xmm1[0,2]
; AVX1-NEXT: vshufps {{.*#+}} xmm11 = xmm1[0,2],xmm0[0,2]
; AVX1-NEXT: vmovdqa {{.*#+}} xmm1 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
; AVX1-NEXT: vpshufb %xmm1, %xmm8, %xmm0
; AVX1-NEXT: vpshufb %xmm1, %xmm11, %xmm2
; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm2[0]
; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm2 = xmm10[0],xmm9[0]
; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm2[0],xmm0[0]
; AVX1-NEXT: vmovlhps {{.*#+}} xmm2 = xmm10[0],xmm9[0]
; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm3 = xmm3[0],xmm12[0]
; AVX1-NEXT: vshufps {{.*#+}} xmm2 = xmm2[0,2],xmm3[0,2]
; AVX1-NEXT: vshufps {{.*#+}} xmm2 = xmm3[0,2],xmm2[0,2]
; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm3 = xmm5[0],xmm4[0]
; AVX1-NEXT: vmovdqa {{.*#+}} xmm4 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
; AVX1-NEXT: vpshufb %xmm4, %xmm0, %xmm0
; AVX1-NEXT: vpshufb %xmm1, %xmm2, %xmm2
; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm5 = xmm7[0],xmm6[0]
; AVX1-NEXT: vshufps {{.*#+}} xmm3 = xmm3[0,2],xmm5[0,2]
; AVX1-NEXT: vshufps {{.*#+}} xmm3 = xmm5[0,2],xmm3[0,2]
; AVX1-NEXT: vpshufb %xmm1, %xmm3, %xmm1
; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm2[0],xmm1[0]
; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0]
; AVX1-NEXT: vpshufb %xmm4, %xmm1, %xmm1
; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0]
; AVX1-NEXT: vmovdqu %xmm0, (%rax)

View File

@ -93,15 +93,14 @@ define <8 x i32> @zext(<8 x float> %a, <8 x float> %b, <8 x i16> %c, <8 x i16> %
; AVX1-LABEL: zext:
; AVX1: # %bb.0:
; AVX1-NEXT: vcmpltps %ymm1, %ymm0, %ymm0
; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm1 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero
; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[2,3,0,1]
; AVX1-NEXT: vxorps %xmm1, %xmm1, %xmm1
; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm4 = xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7]
; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero
; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm1
; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm2 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero
; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm3[2,3,0,1]
; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm2, %ymm2
; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm1 = xmm3[4],xmm1[4],xmm3[5],xmm1[5],xmm3[6],xmm1[6],xmm3[7],xmm1[7]
; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm3 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero
; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm2, %ymm2
; AVX1-NEXT: vblendvps %ymm0, %ymm1, %ymm2, %ymm0
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm3, %ymm1
; AVX1-NEXT: vblendvps %ymm0, %ymm2, %ymm1, %ymm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: zext:

View File

@ -2061,12 +2061,11 @@ define <4 x i32> @pmaddwd_negative1(<8 x i16> %A, <8 x i16> %B) {
;
; AVX1-LABEL: pmaddwd_negative1:
; AVX1: # %bb.0:
; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm0[2,3,0,1]
; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero
; AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm3 = xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7]
; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm1[2,3,0,1]
; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm3 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero
; AVX1-NEXT: vpmulld %xmm3, %xmm2, %xmm2
; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm2 = xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7]
; AVX1-NEXT: vpmulld %xmm2, %xmm3, %xmm2
; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero
; AVX1-NEXT: vpmulld %xmm1, %xmm0, %xmm0
; AVX1-NEXT: vphaddd %xmm2, %xmm0, %xmm0

View File

@ -547,25 +547,25 @@ define <8 x i16> @test13(<8 x i16> %x, <8 x i32> %y) nounwind {
;
; AVX1-LABEL: test13:
; AVX1: # %bb.0: # %vector.ph
; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm0[2,3,0,1]
; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero
; AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm2 = xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7]
; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
; AVX1-NEXT: vpmaxud %xmm1, %xmm0, %xmm3
; AVX1-NEXT: vpcmpeqd %xmm3, %xmm0, %xmm3
; AVX1-NEXT: vpcmpeqd %xmm4, %xmm4, %xmm4
; AVX1-NEXT: vpxor %xmm4, %xmm3, %xmm3
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm5
; AVX1-NEXT: vpmaxud %xmm5, %xmm2, %xmm6
; AVX1-NEXT: vpcmpeqd %xmm6, %xmm2, %xmm6
; AVX1-NEXT: vpxor %xmm4, %xmm6, %xmm4
; AVX1-NEXT: vpackssdw %xmm4, %xmm3, %xmm3
; AVX1-NEXT: vpsubd %xmm5, %xmm2, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3
; AVX1-NEXT: vpmaxud %xmm3, %xmm2, %xmm4
; AVX1-NEXT: vpcmpeqd %xmm4, %xmm2, %xmm4
; AVX1-NEXT: vpcmpeqd %xmm5, %xmm5, %xmm5
; AVX1-NEXT: vpxor %xmm5, %xmm4, %xmm4
; AVX1-NEXT: vpmaxud %xmm1, %xmm0, %xmm6
; AVX1-NEXT: vpcmpeqd %xmm6, %xmm0, %xmm6
; AVX1-NEXT: vpxor %xmm5, %xmm6, %xmm5
; AVX1-NEXT: vpackssdw %xmm4, %xmm5, %xmm4
; AVX1-NEXT: vpsubd %xmm1, %xmm0, %xmm0
; AVX1-NEXT: vmovdqa {{.*#+}} xmm1 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
; AVX1-NEXT: vpshufb %xmm1, %xmm0, %xmm0
; AVX1-NEXT: vpshufb %xmm1, %xmm2, %xmm1
; AVX1-NEXT: vpsubd %xmm3, %xmm2, %xmm1
; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
; AVX1-NEXT: vpshufb %xmm2, %xmm1, %xmm1
; AVX1-NEXT: vpshufb %xmm2, %xmm0, %xmm0
; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
; AVX1-NEXT: vpandn %xmm0, %xmm3, %xmm0
; AVX1-NEXT: vpandn %xmm0, %xmm4, %xmm0
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: retq
;
@ -930,25 +930,25 @@ define <8 x i16> @test15(<8 x i16> %x, <8 x i32> %y) nounwind {
;
; AVX1-LABEL: test15:
; AVX1: # %bb.0: # %vector.ph
; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm0[2,3,0,1]
; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero
; AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm2 = xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7]
; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
; AVX1-NEXT: vpminud %xmm1, %xmm0, %xmm3
; AVX1-NEXT: vpcmpeqd %xmm3, %xmm0, %xmm3
; AVX1-NEXT: vpcmpeqd %xmm4, %xmm4, %xmm4
; AVX1-NEXT: vpxor %xmm4, %xmm3, %xmm3
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm5
; AVX1-NEXT: vpminud %xmm5, %xmm2, %xmm6
; AVX1-NEXT: vpcmpeqd %xmm6, %xmm2, %xmm6
; AVX1-NEXT: vpxor %xmm4, %xmm6, %xmm4
; AVX1-NEXT: vpackssdw %xmm4, %xmm3, %xmm3
; AVX1-NEXT: vpsubd %xmm5, %xmm2, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3
; AVX1-NEXT: vpminud %xmm3, %xmm2, %xmm4
; AVX1-NEXT: vpcmpeqd %xmm4, %xmm2, %xmm4
; AVX1-NEXT: vpcmpeqd %xmm5, %xmm5, %xmm5
; AVX1-NEXT: vpxor %xmm5, %xmm4, %xmm4
; AVX1-NEXT: vpminud %xmm1, %xmm0, %xmm6
; AVX1-NEXT: vpcmpeqd %xmm6, %xmm0, %xmm6
; AVX1-NEXT: vpxor %xmm5, %xmm6, %xmm5
; AVX1-NEXT: vpackssdw %xmm4, %xmm5, %xmm4
; AVX1-NEXT: vpsubd %xmm1, %xmm0, %xmm0
; AVX1-NEXT: vmovdqa {{.*#+}} xmm1 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
; AVX1-NEXT: vpshufb %xmm1, %xmm0, %xmm0
; AVX1-NEXT: vpshufb %xmm1, %xmm2, %xmm1
; AVX1-NEXT: vpsubd %xmm3, %xmm2, %xmm1
; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
; AVX1-NEXT: vpshufb %xmm2, %xmm1, %xmm1
; AVX1-NEXT: vpshufb %xmm2, %xmm0, %xmm0
; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
; AVX1-NEXT: vpand %xmm0, %xmm3, %xmm0
; AVX1-NEXT: vpand %xmm0, %xmm4, %xmm0
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: retq
;
@ -1064,25 +1064,25 @@ define <8 x i16> @test16(<8 x i16> %x, <8 x i32> %y) nounwind {
;
; AVX1-LABEL: test16:
; AVX1: # %bb.0: # %vector.ph
; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm0[2,3,0,1]
; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero
; AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm2 = xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7]
; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
; AVX1-NEXT: vpmaxud %xmm0, %xmm1, %xmm3
; AVX1-NEXT: vpcmpeqd %xmm3, %xmm1, %xmm3
; AVX1-NEXT: vpcmpeqd %xmm4, %xmm4, %xmm4
; AVX1-NEXT: vpxor %xmm4, %xmm3, %xmm3
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm5
; AVX1-NEXT: vpmaxud %xmm2, %xmm5, %xmm6
; AVX1-NEXT: vpcmpeqd %xmm6, %xmm5, %xmm6
; AVX1-NEXT: vpxor %xmm4, %xmm6, %xmm4
; AVX1-NEXT: vpackssdw %xmm4, %xmm3, %xmm3
; AVX1-NEXT: vpsubd %xmm5, %xmm2, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3
; AVX1-NEXT: vpmaxud %xmm2, %xmm3, %xmm4
; AVX1-NEXT: vpcmpeqd %xmm4, %xmm3, %xmm4
; AVX1-NEXT: vpcmpeqd %xmm5, %xmm5, %xmm5
; AVX1-NEXT: vpxor %xmm5, %xmm4, %xmm4
; AVX1-NEXT: vpmaxud %xmm0, %xmm1, %xmm6
; AVX1-NEXT: vpcmpeqd %xmm6, %xmm1, %xmm6
; AVX1-NEXT: vpxor %xmm5, %xmm6, %xmm5
; AVX1-NEXT: vpackssdw %xmm4, %xmm5, %xmm4
; AVX1-NEXT: vpsubd %xmm1, %xmm0, %xmm0
; AVX1-NEXT: vmovdqa {{.*#+}} xmm1 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
; AVX1-NEXT: vpshufb %xmm1, %xmm0, %xmm0
; AVX1-NEXT: vpshufb %xmm1, %xmm2, %xmm1
; AVX1-NEXT: vpsubd %xmm3, %xmm2, %xmm1
; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
; AVX1-NEXT: vpshufb %xmm2, %xmm1, %xmm1
; AVX1-NEXT: vpshufb %xmm2, %xmm0, %xmm0
; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
; AVX1-NEXT: vpand %xmm0, %xmm3, %xmm0
; AVX1-NEXT: vpand %xmm0, %xmm4, %xmm0
; AVX1-NEXT: vzeroupper
; AVX1-NEXT: retq
;

View File

@ -2316,8 +2316,8 @@ define void @PR34947(<9 x i16>* %p0, <9 x i32>* %p1) {
; X86-AVX1-NEXT: .cfi_def_cfa_offset 16
; X86-AVX1-NEXT: pushl %esi
; X86-AVX1-NEXT: .cfi_def_cfa_offset 20
; X86-AVX1-NEXT: subl $16, %esp
; X86-AVX1-NEXT: .cfi_def_cfa_offset 36
; X86-AVX1-NEXT: subl $8, %esp
; X86-AVX1-NEXT: .cfi_def_cfa_offset 28
; X86-AVX1-NEXT: .cfi_offset %esi, -20
; X86-AVX1-NEXT: .cfi_offset %edi, -16
; X86-AVX1-NEXT: .cfi_offset %ebx, -12
@ -2326,8 +2326,8 @@ define void @PR34947(<9 x i16>* %p0, <9 x i32>* %p1) {
; X86-AVX1-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-AVX1-NEXT: vmovdqa (%eax), %ymm2
; X86-AVX1-NEXT: vmovdqa (%ecx), %ymm1
; X86-AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm2[2,3,0,1]
; X86-AVX1-NEXT: vpmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
; X86-AVX1-NEXT: vpxor %xmm0, %xmm0, %xmm0
; X86-AVX1-NEXT: vpunpckhwd {{.*#+}} xmm0 = xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
; X86-AVX1-NEXT: vpmovzxwd {{.*#+}} xmm3 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero
; X86-AVX1-NEXT: vextractf128 $1, %ymm2, %xmm2
; X86-AVX1-NEXT: vpmovzxwd {{.*#+}} xmm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero
@ -2339,50 +2339,50 @@ define void @PR34947(<9 x i16>* %p0, <9 x i32>* %p1) {
; X86-AVX1-NEXT: vpextrd $3, %xmm3, %eax
; X86-AVX1-NEXT: xorl %edx, %edx
; X86-AVX1-NEXT: divl %ecx
; X86-AVX1-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-AVX1-NEXT: movl %edx, (%esp) # 4-byte Spill
; X86-AVX1-NEXT: vpextrd $2, %xmm1, %ecx
; X86-AVX1-NEXT: vpextrd $2, %xmm3, %eax
; X86-AVX1-NEXT: xorl %edx, %edx
; X86-AVX1-NEXT: divl %ecx
; X86-AVX1-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill
; X86-AVX1-NEXT: movl %edx, %edi
; X86-AVX1-NEXT: vpextrd $1, %xmm1, %ecx
; X86-AVX1-NEXT: vpextrd $1, %xmm3, %eax
; X86-AVX1-NEXT: xorl %edx, %edx
; X86-AVX1-NEXT: divl %ecx
; X86-AVX1-NEXT: movl %edx, (%esp) # 4-byte Spill
; X86-AVX1-NEXT: movl %edx, %ebx
; X86-AVX1-NEXT: vmovd %xmm1, %ecx
; X86-AVX1-NEXT: vmovd %xmm3, %eax
; X86-AVX1-NEXT: xorl %edx, %edx
; X86-AVX1-NEXT: divl %ecx
; X86-AVX1-NEXT: movl %edx, %ebp
; X86-AVX1-NEXT: vextractf128 $1, %ymm1, %xmm1
; X86-AVX1-NEXT: vpextrd $3, %xmm1, %ecx
; X86-AVX1-NEXT: xorl %edx, %edx
; X86-AVX1-NEXT: vpextrd $3, %xmm0, %eax
; X86-AVX1-NEXT: divl %ecx
; X86-AVX1-NEXT: movl %edx, %ebx
; X86-AVX1-NEXT: vextractf128 $1, %ymm1, %xmm1
; X86-AVX1-NEXT: xorl %edx, %edx
; X86-AVX1-NEXT: vpextrd $3, %xmm1, %ecx
; X86-AVX1-NEXT: divl %ecx
; X86-AVX1-NEXT: movl %edx, %ecx
; X86-AVX1-NEXT: xorl %edx, %edx
; X86-AVX1-NEXT: vpextrd $2, %xmm1, %esi
; X86-AVX1-NEXT: vpextrd $2, %xmm0, %eax
; X86-AVX1-NEXT: vpextrd $2, %xmm1, %esi
; X86-AVX1-NEXT: divl %esi
; X86-AVX1-NEXT: movl %edx, %esi
; X86-AVX1-NEXT: vmovd %ebp, %xmm2
; X86-AVX1-NEXT: xorl %edx, %edx
; X86-AVX1-NEXT: vpextrd $1, %xmm1, %edi
; X86-AVX1-NEXT: vpextrd $1, %xmm0, %eax
; X86-AVX1-NEXT: divl %edi
; X86-AVX1-NEXT: movl %edx, %edi
; X86-AVX1-NEXT: xorl %edx, %edx
; X86-AVX1-NEXT: vmovd %xmm1, %ecx
; X86-AVX1-NEXT: vpextrd $1, %xmm1, %ebp
; X86-AVX1-NEXT: divl %ebp
; X86-AVX1-NEXT: movl %edx, %ebp
; X86-AVX1-NEXT: vpinsrd $1, %ebx, %xmm2, %xmm2
; X86-AVX1-NEXT: vmovd %xmm0, %eax
; X86-AVX1-NEXT: divl %ecx
; X86-AVX1-NEXT: vmovd %edx, %xmm0
; X86-AVX1-NEXT: vpinsrd $1, %edi, %xmm0, %xmm0
; X86-AVX1-NEXT: vpinsrd $2, %esi, %xmm0, %xmm0
; X86-AVX1-NEXT: vpinsrd $3, %ebx, %xmm0, %xmm0
; X86-AVX1-NEXT: vmovd %ebp, %xmm1
; X86-AVX1-NEXT: vpinsrd $1, (%esp), %xmm1, %xmm1 # 4-byte Folded Reload
; X86-AVX1-NEXT: vpinsrd $2, {{[-0-9]+}}(%e{{[sb]}}p), %xmm1, %xmm1 # 4-byte Folded Reload
; X86-AVX1-NEXT: vpinsrd $3, {{[-0-9]+}}(%e{{[sb]}}p), %xmm1, %xmm1 # 4-byte Folded Reload
; X86-AVX1-NEXT: vpinsrd $2, %edi, %xmm2, %xmm0
; X86-AVX1-NEXT: vmovd %xmm1, %edi
; X86-AVX1-NEXT: vpinsrd $3, (%esp), %xmm0, %xmm0 # 4-byte Folded Reload
; X86-AVX1-NEXT: xorl %edx, %edx
; X86-AVX1-NEXT: divl %edi
; X86-AVX1-NEXT: vmovd %edx, %xmm1
; X86-AVX1-NEXT: vpinsrd $1, %ebp, %xmm1, %xmm1
; X86-AVX1-NEXT: vpinsrd $2, %esi, %xmm1, %xmm1
; X86-AVX1-NEXT: vpinsrd $3, %ecx, %xmm1, %xmm1
; X86-AVX1-NEXT: vmovd {{[-0-9]+}}(%e{{[sb]}}p), %xmm2 # 4-byte Folded Reload
; X86-AVX1-NEXT: # xmm2 = mem[0],zero,zero,zero
; X86-AVX1-NEXT: movl $8199, %eax # imm = 0x2007
@ -2390,11 +2390,11 @@ define void @PR34947(<9 x i16>* %p0, <9 x i32>* %p1) {
; X86-AVX1-NEXT: vmovdqa {{.*#+}} xmm4 = [8199,8199,8199,8199]
; X86-AVX1-NEXT: vpmulld %xmm4, %xmm0, %xmm0
; X86-AVX1-NEXT: vpmulld %xmm4, %xmm1, %xmm1
; X86-AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
; X86-AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; X86-AVX1-NEXT: vpmulld %xmm3, %xmm2, %xmm1
; X86-AVX1-NEXT: vmovd %xmm1, (%eax)
; X86-AVX1-NEXT: vmovaps %ymm0, (%eax)
; X86-AVX1-NEXT: addl $16, %esp
; X86-AVX1-NEXT: addl $8, %esp
; X86-AVX1-NEXT: .cfi_def_cfa_offset 20
; X86-AVX1-NEXT: popl %esi
; X86-AVX1-NEXT: .cfi_def_cfa_offset 16
@ -2589,8 +2589,8 @@ define void @PR34947(<9 x i16>* %p0, <9 x i32>* %p1) {
; X64-AVX1-NEXT: .cfi_offset %rbp, -16
; X64-AVX1-NEXT: vmovdqa (%rdi), %ymm2
; X64-AVX1-NEXT: vmovdqa (%rsi), %ymm1
; X64-AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm2[2,3,0,1]
; X64-AVX1-NEXT: vpmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
; X64-AVX1-NEXT: vpxor %xmm0, %xmm0, %xmm0
; X64-AVX1-NEXT: vpunpckhwd {{.*#+}} xmm0 = xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
; X64-AVX1-NEXT: vpmovzxwd {{.*#+}} xmm3 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero
; X64-AVX1-NEXT: vextractf128 $1, %ymm2, %xmm2
; X64-AVX1-NEXT: vpmovzxwd {{.*#+}} xmm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero
@ -2618,38 +2618,38 @@ define void @PR34947(<9 x i16>* %p0, <9 x i32>* %p1) {
; X64-AVX1-NEXT: xorl %edx, %edx
; X64-AVX1-NEXT: divl %ecx
; X64-AVX1-NEXT: movl %edx, %esi
; X64-AVX1-NEXT: vpextrd $3, %xmm0, %eax
; X64-AVX1-NEXT: vextractf128 $1, %ymm1, %xmm1
; X64-AVX1-NEXT: vpextrd $3, %xmm1, %ecx
; X64-AVX1-NEXT: vpextrd $3, %xmm0, %eax
; X64-AVX1-NEXT: xorl %edx, %edx
; X64-AVX1-NEXT: divl %ecx
; X64-AVX1-NEXT: movl %edx, %edi
; X64-AVX1-NEXT: vpextrd $2, %xmm1, %ecx
; X64-AVX1-NEXT: vpextrd $2, %xmm0, %eax
; X64-AVX1-NEXT: vpextrd $2, %xmm1, %ecx
; X64-AVX1-NEXT: xorl %edx, %edx
; X64-AVX1-NEXT: divl %ecx
; X64-AVX1-NEXT: movl %edx, %ecx
; X64-AVX1-NEXT: vpextrd $1, %xmm1, %ebx
; X64-AVX1-NEXT: vpextrd $1, %xmm0, %eax
; X64-AVX1-NEXT: vpextrd $1, %xmm1, %ebx
; X64-AVX1-NEXT: xorl %edx, %edx
; X64-AVX1-NEXT: divl %ebx
; X64-AVX1-NEXT: movl %edx, %ebx
; X64-AVX1-NEXT: vmovd %xmm1, %ebp
; X64-AVX1-NEXT: vmovd %xmm0, %eax
; X64-AVX1-NEXT: vmovd %xmm1, %ebp
; X64-AVX1-NEXT: xorl %edx, %edx
; X64-AVX1-NEXT: divl %ebp
; X64-AVX1-NEXT: vmovd %edx, %xmm0
; X64-AVX1-NEXT: vpinsrd $1, %ebx, %xmm0, %xmm0
; X64-AVX1-NEXT: vpinsrd $2, %ecx, %xmm0, %xmm0
; X64-AVX1-NEXT: vpinsrd $3, %edi, %xmm0, %xmm0
; X64-AVX1-NEXT: vmovd %esi, %xmm0
; X64-AVX1-NEXT: vpinsrd $1, %r11d, %xmm0, %xmm0
; X64-AVX1-NEXT: vpinsrd $2, %r10d, %xmm0, %xmm0
; X64-AVX1-NEXT: vpinsrd $3, %r9d, %xmm0, %xmm0
; X64-AVX1-NEXT: vmovdqa {{.*#+}} xmm1 = [8199,8199,8199,8199]
; X64-AVX1-NEXT: vpmulld %xmm1, %xmm0, %xmm0
; X64-AVX1-NEXT: vmovd %esi, %xmm2
; X64-AVX1-NEXT: vpinsrd $1, %r11d, %xmm2, %xmm2
; X64-AVX1-NEXT: vpinsrd $2, %r10d, %xmm2, %xmm2
; X64-AVX1-NEXT: vpinsrd $3, %r9d, %xmm2, %xmm2
; X64-AVX1-NEXT: vmovd %edx, %xmm2
; X64-AVX1-NEXT: vpinsrd $1, %ebx, %xmm2, %xmm2
; X64-AVX1-NEXT: vpinsrd $2, %ecx, %xmm2, %xmm2
; X64-AVX1-NEXT: vpinsrd $3, %edi, %xmm2, %xmm2
; X64-AVX1-NEXT: vpmulld %xmm1, %xmm2, %xmm1
; X64-AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
; X64-AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; X64-AVX1-NEXT: vmovd %r8d, %xmm1
; X64-AVX1-NEXT: movl $8199, %eax # imm = 0x2007
; X64-AVX1-NEXT: vmovd %eax, %xmm2

View File

@ -131,12 +131,8 @@ define <8 x i32> @and_mask_constant(<8 x i32> %v0, <8 x i32> %v1) {
; X32-NEXT: vpxor %xmm2, %xmm2, %xmm2
; X32-NEXT: vpcmpeqd %xmm2, %xmm1, %xmm1
; X32-NEXT: vpcmpeqd %xmm2, %xmm0, %xmm0
; X32-NEXT: vpackssdw %xmm1, %xmm0, %xmm0
; X32-NEXT: vpand LCPI2_0, %xmm0, %xmm0
; X32-NEXT: vpmovzxwd {{.*#+}} xmm1 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
; X32-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
; X32-NEXT: vpmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
; X32-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
; X32-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; X32-NEXT: vandps LCPI2_0, %ymm0, %ymm0
; X32-NEXT: retl
;
; X64-LABEL: and_mask_constant:
@ -145,12 +141,8 @@ define <8 x i32> @and_mask_constant(<8 x i32> %v0, <8 x i32> %v1) {
; X64-NEXT: vpxor %xmm2, %xmm2, %xmm2
; X64-NEXT: vpcmpeqd %xmm2, %xmm1, %xmm1
; X64-NEXT: vpcmpeqd %xmm2, %xmm0, %xmm0
; X64-NEXT: vpackssdw %xmm1, %xmm0, %xmm0
; X64-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
; X64-NEXT: vpmovzxwd {{.*#+}} xmm1 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
; X64-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
; X64-NEXT: vpmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
; X64-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
; X64-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; X64-NEXT: vandps {{.*}}(%rip), %ymm0, %ymm0
; X64-NEXT: retq
;
; X32-AVX2-LABEL: and_mask_constant:

View File

@ -87,10 +87,10 @@ define <8 x float> @cvt_v8u8_v8f32(<8 x i8> %src) {
; CHECK-LABEL: cvt_v8u8_v8f32:
; CHECK: ## %bb.0:
; CHECK-NEXT: vpand LCPI4_0, %xmm0, %xmm0
; CHECK-NEXT: vpmovzxwd {{.*#+}} xmm1 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
; CHECK-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vpunpckhwd {{.*#+}} xmm1 = xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
; CHECK-NEXT: vpmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
; CHECK-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
; CHECK-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; CHECK-NEXT: vcvtdq2ps %ymm0, %ymm0
; CHECK-NEXT: retl
;
@ -109,19 +109,19 @@ define <8 x float> @cvt_v8u8_v8f32(<8 x i8> %src) {
define <8 x float> @cvt_v8u16_v8f32(<8 x i16> %src) {
; CHECK-LABEL: cvt_v8u16_v8f32:
; CHECK: ## %bb.0:
; CHECK-NEXT: vpmovzxwd {{.*#+}} xmm1 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
; CHECK-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
; CHECK-NEXT: vpxor %xmm1, %xmm1, %xmm1
; CHECK-NEXT: vpunpckhwd {{.*#+}} xmm1 = xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
; CHECK-NEXT: vpmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
; CHECK-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
; CHECK-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; CHECK-NEXT: vcvtdq2ps %ymm0, %ymm0
; CHECK-NEXT: retl
;
; CHECK-WIDE-LABEL: cvt_v8u16_v8f32:
; CHECK-WIDE: ## %bb.0:
; CHECK-WIDE-NEXT: vpmovzxwd {{.*#+}} xmm1 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
; CHECK-WIDE-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
; CHECK-WIDE-NEXT: vpxor %xmm1, %xmm1, %xmm1
; CHECK-WIDE-NEXT: vpunpckhwd {{.*#+}} xmm1 = xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
; CHECK-WIDE-NEXT: vpmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
; CHECK-WIDE-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
; CHECK-WIDE-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; CHECK-WIDE-NEXT: vcvtdq2ps %ymm0, %ymm0
; CHECK-WIDE-NEXT: retl
%res = uitofp <8 x i16> %src to <8 x float>

View File

@ -2413,10 +2413,10 @@ define <4 x float> @uitofp_8i16_to_4f32(<8 x i16> %a) {
;
; AVX1-LABEL: uitofp_8i16_to_4f32:
; AVX1: # %bb.0:
; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm1 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm1 = xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; AVX1-NEXT: vcvtdq2ps %ymm0, %ymm0
; AVX1-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX1-NEXT: vzeroupper
@ -2952,10 +2952,10 @@ define <8 x float> @uitofp_8i16_to_8f32(<8 x i16> %a) {
;
; AVX1-LABEL: uitofp_8i16_to_8f32:
; AVX1: # %bb.0:
; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm1 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm1 = xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; AVX1-NEXT: vcvtdq2ps %ymm0, %ymm0
; AVX1-NEXT: retq
;
@ -5729,10 +5729,8 @@ define void @aggregate_sitofp_8i16_to_8f32(%Arguments* nocapture readonly %a0) {
; AVX1-LABEL: aggregate_sitofp_8i16_to_8f32:
; AVX1: # %bb.0:
; AVX1-NEXT: movq 24(%rdi), %rax
; AVX1-NEXT: vmovdqu 8(%rdi), %xmm0
; AVX1-NEXT: vpmovsxwd %xmm0, %xmm1
; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
; AVX1-NEXT: vpmovsxwd %xmm0, %xmm0
; AVX1-NEXT: vpmovsxwd 16(%rdi), %xmm0
; AVX1-NEXT: vpmovsxwd 8(%rdi), %xmm1
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
; AVX1-NEXT: vcvtdq2ps %ymm0, %ymm0
; AVX1-NEXT: vmovaps %ymm0, (%rax)

View File

@ -464,10 +464,10 @@ define <8 x i32> @cmpne_knownzeros_zext_v8i16_v8i32(<8 x i16> %x) {
; AVX1-LABEL: cmpne_knownzeros_zext_v8i16_v8i32:
; AVX1: # %bb.0:
; AVX1-NEXT: vpsrlw $15, %xmm0, %xmm0
; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm1 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm1 = xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: cmpne_knownzeros_zext_v8i16_v8i32:

View File

@ -64,10 +64,10 @@ define <16 x i16> @zext_16i8_to_16i16(<16 x i8> %A) {
;
; AVX1-LABEL: zext_16i8_to_16i16:
; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm1 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX1-NEXT: vpunpckhbw {{.*#+}} xmm1 = xmm0[8],xmm1[8],xmm0[9],xmm1[9],xmm0[10],xmm1[10],xmm0[11],xmm1[11],xmm0[12],xmm1[12],xmm0[13],xmm1[13],xmm0[14],xmm1[14],xmm0[15],xmm1[15]
; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: zext_16i8_to_16i16:
@ -526,10 +526,10 @@ define <8 x i32> @zext_8i16_to_8i32(<8 x i16> %A) nounwind uwtable readnone ssp
;
; AVX1-LABEL: zext_8i16_to_8i32:
; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm1 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm1 = xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: zext_8i16_to_8i32:
@ -825,10 +825,10 @@ define <4 x i64> @zext_4i32_to_4i64(<4 x i32> %A) nounwind uwtable readnone ssp
;
; AVX1-LABEL: zext_4i32_to_4i64:
; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm1 = xmm0[0],zero,xmm0[1],zero
; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX1-NEXT: vpunpckhdq {{.*#+}} xmm1 = xmm0[2],xmm1[2],xmm0[3],xmm1[3]
; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: zext_4i32_to_4i64:
@ -1540,10 +1540,10 @@ define <8 x i32> @zext_8i8_to_8i32(<8 x i8> %z) {
; AVX1-LABEL: zext_8i8_to_8i32:
; AVX1: # %bb.0: # %entry
; AVX1-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm1 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1
; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm1 = xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: zext_8i8_to_8i32: