forked from OSchip/llvm-project
[X86][SSE] Add initial support for combining (V)PMOVZX with shuffles.
llvm-svn: 288049
This commit is contained in:
parent
a415a9bde6
commit
2228f70a85
|
@ -5509,6 +5509,15 @@ static bool getFauxShuffleMask(SDValue N, SmallVectorImpl<int> &Mask,
|
|||
}
|
||||
return true;
|
||||
}
|
||||
case X86ISD::VZEXT: {
|
||||
// TODO - add support for VPMOVZX with smaller input vector types.
|
||||
SDValue Op0 = N.getOperand(0);
|
||||
if (VT.getSizeInBits() != Op0.getValueSizeInBits())
|
||||
break;
|
||||
DecodeZeroExtendMask(Op0.getSimpleValueType().getScalarType(), VT, Mask);
|
||||
Ops.push_back(Op0);
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
return false;
|
||||
|
|
|
@ -9,9 +9,7 @@ define void @cmp_2_floats(<2 x float> %a, <2 x float> %b) {
|
|||
; CHECK: # BB#0: # %entry
|
||||
; CHECK-NEXT: movaps %xmm0, %xmm2
|
||||
; CHECK-NEXT: cmpordps %xmm0, %xmm0
|
||||
; CHECK-NEXT: pmovzxdq {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero
|
||||
; CHECK-NEXT: psllq $32, %xmm0
|
||||
; CHECK-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,3,2,3]
|
||||
; CHECK-NEXT: insertps {{.*#+}} xmm0 = xmm0[0,1],zero,xmm0[1]
|
||||
; CHECK-NEXT: pslld $31, %xmm0
|
||||
; CHECK-NEXT: blendvps %xmm2, %xmm1
|
||||
; CHECK-NEXT: movlps %xmm1, (%rax)
|
||||
|
@ -26,7 +24,7 @@ entry:
|
|||
define void @cmp_2_doubles(<2 x double> %a, <2 x double> %b) {
|
||||
; CHECK-LABEL: cmp_2_doubles:
|
||||
; CHECK: # BB#0: # %entry
|
||||
; CHECK-NEXT: movapd %xmm0, %xmm2
|
||||
; CHECK-NEXT: movapd %xmm0, %xmm2
|
||||
; CHECK-NEXT: cmpordpd %xmm0, %xmm0
|
||||
; CHECK-NEXT: blendvpd %xmm2, %xmm1
|
||||
; CHECK-NEXT: movapd %xmm1, (%rax)
|
||||
|
|
|
@ -279,15 +279,15 @@ define <16 x i16> @broadcast_mem_v4i16_v16i16(<4 x i16>* %ptr) {
|
|||
; X32-AVX2-LABEL: broadcast_mem_v4i16_v16i16:
|
||||
; X32-AVX2: ## BB#0:
|
||||
; X32-AVX2-NEXT: movl {{[0-9]+}}(%esp), %eax
|
||||
; X32-AVX2-NEXT: vpmovzxwd {{.*#+}} xmm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero
|
||||
; X32-AVX2-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
|
||||
; X32-AVX2-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
|
||||
; X32-AVX2-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5,6,7,4,5,6,7,6,7],zero,zero
|
||||
; X32-AVX2-NEXT: vpbroadcastq %xmm0, %ymm0
|
||||
; X32-AVX2-NEXT: retl
|
||||
;
|
||||
; X64-AVX2-LABEL: broadcast_mem_v4i16_v16i16:
|
||||
; X64-AVX2: ## BB#0:
|
||||
; X64-AVX2-NEXT: vpmovzxwd {{.*#+}} xmm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero
|
||||
; X64-AVX2-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
|
||||
; X64-AVX2-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero
|
||||
; X64-AVX2-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5,6,7,4,5,6,7,6,7],zero,zero
|
||||
; X64-AVX2-NEXT: vpbroadcastq %xmm0, %ymm0
|
||||
; X64-AVX2-NEXT: retq
|
||||
;
|
||||
|
|
|
@ -923,9 +923,7 @@ define <2 x float> @sitofp_2i1_float(<2 x float> %a) {
|
|||
; KNL: ## BB#0:
|
||||
; KNL-NEXT: vxorps %xmm1, %xmm1, %xmm1
|
||||
; KNL-NEXT: vcmpltps %xmm0, %xmm1, %xmm0
|
||||
; KNL-NEXT: vpmovzxdq {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero
|
||||
; KNL-NEXT: vpsllq $32, %xmm0, %xmm0
|
||||
; KNL-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,3,2,3]
|
||||
; KNL-NEXT: vinsertps {{.*#+}} xmm0 = xmm0[0,1],zero,xmm0[1]
|
||||
; KNL-NEXT: vcvtdq2ps %xmm0, %xmm0
|
||||
; KNL-NEXT: retq
|
||||
;
|
||||
|
|
|
@ -1337,9 +1337,7 @@ define <2 x float> @test_maxps_illegal_v2f32(<2 x float> %x, <2 x float> %y) {
|
|||
; STRICT-NEXT: movaps %xmm0, %xmm2
|
||||
; STRICT-NEXT: movaps %xmm1, %xmm0
|
||||
; STRICT-NEXT: cmpleps %xmm2, %xmm0
|
||||
; STRICT-NEXT: pmovzxdq {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero
|
||||
; STRICT-NEXT: psllq $32, %xmm0
|
||||
; STRICT-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,3,2,3]
|
||||
; STRICT-NEXT: insertps {{.*#+}} xmm0 = xmm0[0,1],zero,xmm0[1]
|
||||
; STRICT-NEXT: pslld $31, %xmm0
|
||||
; STRICT-NEXT: blendvps %xmm2, %xmm1
|
||||
; STRICT-NEXT: movaps %xmm1, %xmm0
|
||||
|
@ -1360,9 +1358,7 @@ define <2 x float> @test_minps_illegal_v2f32(<2 x float> %x, <2 x float> %y) {
|
|||
; STRICT: # BB#0:
|
||||
; STRICT-NEXT: movaps %xmm0, %xmm2
|
||||
; STRICT-NEXT: cmpleps %xmm1, %xmm0
|
||||
; STRICT-NEXT: pmovzxdq {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero
|
||||
; STRICT-NEXT: psllq $32, %xmm0
|
||||
; STRICT-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,3,2,3]
|
||||
; STRICT-NEXT: insertps {{.*#+}} xmm0 = xmm0[0,1],zero,xmm0[1]
|
||||
; STRICT-NEXT: pslld $31, %xmm0
|
||||
; STRICT-NEXT: blendvps %xmm2, %xmm1
|
||||
; STRICT-NEXT: movaps %xmm1, %xmm0
|
||||
|
|
Loading…
Reference in New Issue