[X86] Use sse_load_f32/f64 and timm in patterns for memory form of vgetmantss/sd.

Previously we only matched scalar_to_vector and scalar load, but
we should be able to narrow a vector load or match vzload.

Also need to match TargetConstant instead of Constant. The register
patterns were previously updated, but not the memory patterns.

llvm-svn: 372458
This commit is contained in:
Craig Topper 2019-09-21 06:44:29 +00:00
parent 4fa12ac92c
commit 04682939eb
2 changed files with 5 additions and 8 deletions

View File

@ -10280,12 +10280,11 @@ multiclass avx512_fp_scalar_imm<bits<8> opc, string OpcodeStr, SDNode OpNode,
(i32 timm:$src3))>,
Sched<[sched]>;
defm rmi : AVX512_maskable_scalar<opc, MRMSrcMem, _, (outs _.RC:$dst),
(ins _.RC:$src1, _.ScalarMemOp:$src2, i32u8imm:$src3),
(ins _.RC:$src1, _.IntScalarMemOp:$src2, i32u8imm:$src3),
OpcodeStr, "$src3, $src2, $src1", "$src1, $src2, $src3",
(OpNode (_.VT _.RC:$src1),
(_.VT (scalar_to_vector
(_.ScalarLdFrag addr:$src2))),
(i32 imm:$src3))>,
(_.VT _.ScalarIntMemCPat:$src2),
(i32 timm:$src3))>,
Sched<[sched.Folded, sched.ReadAfterFold]>;
}
}

View File

@ -4801,15 +4801,13 @@ define <4 x float>@test_int_x86_avx512_mask_getmant_ss(<4 x float> %x0, <4 x flo
define <4 x float> @test_int_x86_avx512_mask_getmant_ss_load(<4 x float> %x0, <4 x float>* %x1p) {
; X64-LABEL: test_int_x86_avx512_mask_getmant_ss_load:
; X64: # %bb.0:
; X64-NEXT: vmovaps (%rdi), %xmm1
; X64-NEXT: vgetmantss $11, %xmm1, %xmm0, %xmm0
; X64-NEXT: vgetmantss $11, (%rdi), %xmm0, %xmm0
; X64-NEXT: retq
;
; X86-LABEL: test_int_x86_avx512_mask_getmant_ss_load:
; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: vmovaps (%eax), %xmm1
; X86-NEXT: vgetmantss $11, %xmm1, %xmm0, %xmm0
; X86-NEXT: vgetmantss $11, (%eax), %xmm0, %xmm0
; X86-NEXT: retl
%x1 = load <4 x float>, <4 x float>* %x1p
%res = call <4 x float> @llvm.x86.avx512.mask.getmant.ss(<4 x float> %x0, <4 x float> %x1, i32 11, <4 x float> undef, i8 -1, i32 4)