forked from OSchip/llvm-project
[X86][AVX] Fix lowering of X86ISD::VZEXT_MOVL for 128-bit -> 256-bit extension
The lowering patterns for X86ISD::VZEXT_MOVL for 128-bit to 256-bit vectors were just copying the lower xmm instead of actually masking off the first scalar using a blend. Fix for PR25320. Differential Revision: http://reviews.llvm.org/D14151 llvm-svn: 253561
This commit is contained in:
parent
f503d51e5b
commit
846b64e17a
|
@ -935,22 +935,6 @@ let isCodeGenOnly = 1, ForceDisassemble = 1, hasSideEffects = 0,
|
|||
IIC_SSE_MOVU_P_RR>, VEX, VEX_L;
|
||||
}
|
||||
|
||||
let Predicates = [HasAVX] in {
|
||||
def : Pat<(v8i32 (X86vzmovl
|
||||
(insert_subvector undef, (v4i32 VR128:$src), (iPTR 0)))),
|
||||
(SUBREG_TO_REG (i32 0), (VMOVAPSrr VR128:$src), sub_xmm)>;
|
||||
def : Pat<(v4i64 (X86vzmovl
|
||||
(insert_subvector undef, (v2i64 VR128:$src), (iPTR 0)))),
|
||||
(SUBREG_TO_REG (i32 0), (VMOVAPSrr VR128:$src), sub_xmm)>;
|
||||
def : Pat<(v8f32 (X86vzmovl
|
||||
(insert_subvector undef, (v4f32 VR128:$src), (iPTR 0)))),
|
||||
(SUBREG_TO_REG (i32 0), (VMOVAPSrr VR128:$src), sub_xmm)>;
|
||||
def : Pat<(v4f64 (X86vzmovl
|
||||
(insert_subvector undef, (v2f64 VR128:$src), (iPTR 0)))),
|
||||
(SUBREG_TO_REG (i32 0), (VMOVAPSrr VR128:$src), sub_xmm)>;
|
||||
}
|
||||
|
||||
|
||||
def : Pat<(int_x86_avx_storeu_ps_256 addr:$dst, VR256:$src),
|
||||
(VMOVUPSYmr addr:$dst, VR256:$src)>;
|
||||
def : Pat<(int_x86_avx_storeu_pd_256 addr:$dst, VR256:$src),
|
||||
|
|
|
@ -80,3 +80,70 @@ define void @high_v4f64_to_v2f64(<4 x double> %v, <2 x double>* %ptr) {
|
|||
; CHECK-NEXT: vzeroupper
|
||||
; CHECK-NEXT: retq
|
||||
}
|
||||
|
||||
; PR25320 Make sure that a widened (possibly legalized) vector correctly zero-extends upper elements.
|
||||
; FIXME - Ideally these should just call VMOVD/VMOVQ/VMOVSS/VMOVSD
|
||||
|
||||
define void @legal_vzmovl_2i32_8i32(<2 x i32>* %in, <8 x i32>* %out) {
|
||||
%ld = load <2 x i32>, <2 x i32>* %in, align 8
|
||||
%ext = extractelement <2 x i32> %ld, i64 0
|
||||
%ins = insertelement <8 x i32> <i32 undef, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>, i32 %ext, i64 0
|
||||
store <8 x i32> %ins, <8 x i32>* %out, align 32
|
||||
ret void
|
||||
|
||||
; CHECK-LABEL: legal_vzmovl_2i32_8i32
|
||||
; CHECK: vpmovzxdq {{.*#+}} xmm0 = mem[0],zero,mem[1],zero
|
||||
; CHECK-NEXT: vxorps %ymm1, %ymm1, %ymm1
|
||||
; CHECK-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3,4,5,6,7]
|
||||
; CHECK-NEXT: vmovaps %ymm0, (%rsi)
|
||||
; CHECK-NEXT: vzeroupper
|
||||
; CHECK-NEXT: retq
|
||||
}
|
||||
|
||||
define void @legal_vzmovl_2i64_4i64(<2 x i64>* %in, <4 x i64>* %out) {
|
||||
%ld = load <2 x i64>, <2 x i64>* %in, align 8
|
||||
%ext = extractelement <2 x i64> %ld, i64 0
|
||||
%ins = insertelement <4 x i64> <i64 undef, i64 0, i64 0, i64 0>, i64 %ext, i64 0
|
||||
store <4 x i64> %ins, <4 x i64>* %out, align 32
|
||||
ret void
|
||||
|
||||
; CHECK-LABEL: legal_vzmovl_2i64_4i64
|
||||
; CHECK: vmovupd (%rdi), %xmm0
|
||||
; CHECK-NEXT: vxorpd %ymm1, %ymm1, %ymm1
|
||||
; CHECK-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3]
|
||||
; CHECK-NEXT: vmovapd %ymm0, (%rsi)
|
||||
; CHECK-NEXT: vzeroupper
|
||||
; CHECK-NEXT: retq
|
||||
}
|
||||
|
||||
define void @legal_vzmovl_2f32_8f32(<2 x float>* %in, <8 x float>* %out) {
|
||||
%ld = load <2 x float>, <2 x float>* %in, align 8
|
||||
%ext = extractelement <2 x float> %ld, i64 0
|
||||
%ins = insertelement <8 x float> <float undef, float 0.0, float 0.0, float 0.0, float 0.0, float 0.0, float 0.0, float 0.0>, float %ext, i64 0
|
||||
store <8 x float> %ins, <8 x float>* %out, align 32
|
||||
ret void
|
||||
|
||||
; CHECK-LABEL: legal_vzmovl_2f32_8f32
|
||||
; CHECK: vmovq {{.*#+}} xmm0 = mem[0],zero
|
||||
; CHECK-NEXT: vxorps %ymm1, %ymm1, %ymm1
|
||||
; CHECK-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3,4,5,6,7]
|
||||
; CHECK-NEXT: vmovaps %ymm0, (%rsi)
|
||||
; CHECK-NEXT: vzeroupper
|
||||
; CHECK-NEXT: retq
|
||||
}
|
||||
|
||||
define void @legal_vzmovl_2f64_4f64(<2 x double>* %in, <4 x double>* %out) {
|
||||
%ld = load <2 x double>, <2 x double>* %in, align 8
|
||||
%ext = extractelement <2 x double> %ld, i64 0
|
||||
%ins = insertelement <4 x double> <double undef, double 0.0, double 0.0, double 0.0>, double %ext, i64 0
|
||||
store <4 x double> %ins, <4 x double>* %out, align 32
|
||||
ret void
|
||||
|
||||
; CHECK-LABEL: legal_vzmovl_2f64_4f64
|
||||
; CHECK: vmovupd (%rdi), %xmm0
|
||||
; CHECK-NEXT: vxorpd %ymm1, %ymm1, %ymm1
|
||||
; CHECK-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3]
|
||||
; CHECK-NEXT: vmovapd %ymm0, (%rsi)
|
||||
; CHECK-NEXT: vzeroupper
|
||||
; CHECK-NEXT: retq
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue