From 0bcba19cdf6f2ba07d38959595a5b6c66ff33194 Mon Sep 17 00:00:00 2001 From: Craig Topper Date: Wed, 1 Feb 2017 07:17:16 +0000 Subject: [PATCH] [X86] For AVX1/AVX2 isel, don't use FP move instructions for 128-bit loads/stores of integer types. For SSE we use fp because of the smaller encoding, but that doesn't apply to AVX. So just do the natural thing so we don't have to explain why we aren't. We can't do this for 256-bit loads/stores since integer loads and stores aren't available in AVX1 so we need fallback patterns since the integer types are legal. This doesn't affect any tests because execution domain fixing freely converts the instructions anyway. Honestly, we could probably rely on it for the SSE size optimization too. llvm-svn: 293743 --- llvm/lib/Target/X86/X86InstrSSE.td | 152 ++++++++++++++--------------- 1 file changed, 74 insertions(+), 78 deletions(-) diff --git a/llvm/lib/Target/X86/X86InstrSSE.td b/llvm/lib/Target/X86/X86InstrSSE.td index bc32eee15165..afc9a4f35ef7 100644 --- a/llvm/lib/Target/X86/X86InstrSSE.td +++ b/llvm/lib/Target/X86/X86InstrSSE.td @@ -954,24 +954,10 @@ let isCodeGenOnly = 1, ForceDisassemble = 1, hasSideEffects = 0, IIC_SSE_MOVU_P_RR>; } -// Use vmovaps/vmovups for AVX integer load/store. let Predicates = [HasAVX, NoVLX] in { - // 128-bit load/store - def : Pat<(alignedloadv2i64 addr:$src), - (VMOVAPSrm addr:$src)>; - def : Pat<(loadv2i64 addr:$src), - (VMOVUPSrm addr:$src)>; - - def : Pat<(alignedstore (v2i64 VR128:$src), addr:$dst), - (VMOVAPSmr addr:$dst, VR128:$src)>; - def : Pat<(alignedstore (v4i32 VR128:$src), addr:$dst), - (VMOVAPSmr addr:$dst, VR128:$src)>; - def : Pat<(store (v2i64 VR128:$src), addr:$dst), - (VMOVUPSmr addr:$dst, VR128:$src)>; - def : Pat<(store (v4i32 VR128:$src), addr:$dst), - (VMOVUPSmr addr:$dst, VR128:$src)>; - - // 256-bit load/store + // 256-bit load/store need to use floating point load/store in case we don't + // have AVX2. Execution domain fixing will convert to integer if AVX2 is + // available and changing the domain is beneficial. def : Pat<(alignedloadv4i64 addr:$src), (VMOVAPSYrm addr:$src)>; def : Pat<(loadv4i64 addr:$src), @@ -980,10 +966,18 @@ let Predicates = [HasAVX, NoVLX] in { (VMOVAPSYmr addr:$dst, VR256:$src)>; def : Pat<(alignedstore256 (v8i32 VR256:$src), addr:$dst), (VMOVAPSYmr addr:$dst, VR256:$src)>; + def : Pat<(alignedstore256 (v16i16 VR256:$src), addr:$dst), + (VMOVAPSYmr addr:$dst, VR256:$src)>; + def : Pat<(alignedstore256 (v32i8 VR256:$src), addr:$dst), + (VMOVAPSYmr addr:$dst, VR256:$src)>; def : Pat<(store (v4i64 VR256:$src), addr:$dst), (VMOVUPSYmr addr:$dst, VR256:$src)>; def : Pat<(store (v8i32 VR256:$src), addr:$dst), (VMOVUPSYmr addr:$dst, VR256:$src)>; + def : Pat<(store (v16i16 VR256:$src), addr:$dst), + (VMOVUPSYmr addr:$dst, VR256:$src)>; + def : Pat<(store (v32i8 VR256:$src), addr:$dst), + (VMOVUPSYmr addr:$dst, VR256:$src)>; // Special patterns for storing subvector extracts of lower 128-bits // Its cheaper to just use VMOVAPS/VMOVUPS instead of VEXTRACTF128mr @@ -993,18 +987,6 @@ let Predicates = [HasAVX, NoVLX] in { def : Pat<(alignedstore (v4f32 (extract_subvector (v8f32 VR256:$src), (iPTR 0))), addr:$dst), (VMOVAPSmr addr:$dst, (v4f32 (EXTRACT_SUBREG VR256:$src,sub_xmm)))>; - def : Pat<(alignedstore (v2i64 (extract_subvector - (v4i64 VR256:$src), (iPTR 0))), addr:$dst), - (VMOVAPDmr addr:$dst, (v2i64 (EXTRACT_SUBREG VR256:$src,sub_xmm)))>; - def : Pat<(alignedstore (v4i32 (extract_subvector - (v8i32 VR256:$src), (iPTR 0))), addr:$dst), - (VMOVAPSmr addr:$dst, (v4i32 (EXTRACT_SUBREG VR256:$src,sub_xmm)))>; - def : Pat<(alignedstore (v8i16 (extract_subvector - (v16i16 VR256:$src), (iPTR 0))), addr:$dst), - (VMOVAPSmr addr:$dst, (v8i16 (EXTRACT_SUBREG VR256:$src,sub_xmm)))>; - def : Pat<(alignedstore (v16i8 (extract_subvector - (v32i8 VR256:$src), (iPTR 0))), addr:$dst), - (VMOVAPSmr addr:$dst, (v16i8 (EXTRACT_SUBREG VR256:$src,sub_xmm)))>; def : Pat<(store (v2f64 (extract_subvector (v4f64 VR256:$src), (iPTR 0))), addr:$dst), @@ -1012,40 +994,6 @@ let Predicates = [HasAVX, NoVLX] in { def : Pat<(store (v4f32 (extract_subvector (v8f32 VR256:$src), (iPTR 0))), addr:$dst), (VMOVUPSmr addr:$dst, (v4f32 (EXTRACT_SUBREG VR256:$src,sub_xmm)))>; - def : Pat<(store (v2i64 (extract_subvector - (v4i64 VR256:$src), (iPTR 0))), addr:$dst), - (VMOVUPDmr addr:$dst, (v2i64 (EXTRACT_SUBREG VR256:$src,sub_xmm)))>; - def : Pat<(store (v4i32 (extract_subvector - (v8i32 VR256:$src), (iPTR 0))), addr:$dst), - (VMOVUPSmr addr:$dst, (v4i32 (EXTRACT_SUBREG VR256:$src,sub_xmm)))>; - def : Pat<(store (v8i16 (extract_subvector - (v16i16 VR256:$src), (iPTR 0))), addr:$dst), - (VMOVUPSmr addr:$dst, (v8i16 (EXTRACT_SUBREG VR256:$src,sub_xmm)))>; - def : Pat<(store (v16i8 (extract_subvector - (v32i8 VR256:$src), (iPTR 0))), addr:$dst), - (VMOVUPSmr addr:$dst, (v16i8 (EXTRACT_SUBREG VR256:$src,sub_xmm)))>; -} - -let Predicates = [HasAVX, NoVLX] in { - // 128-bit load/store - def : Pat<(alignedstore (v8i16 VR128:$src), addr:$dst), - (VMOVAPSmr addr:$dst, VR128:$src)>; - def : Pat<(alignedstore (v16i8 VR128:$src), addr:$dst), - (VMOVAPSmr addr:$dst, VR128:$src)>; - def : Pat<(store (v8i16 VR128:$src), addr:$dst), - (VMOVUPSmr addr:$dst, VR128:$src)>; - def : Pat<(store (v16i8 VR128:$src), addr:$dst), - (VMOVUPSmr addr:$dst, VR128:$src)>; - - // 256-bit load/store - def : Pat<(alignedstore256 (v16i16 VR256:$src), addr:$dst), - (VMOVAPSYmr addr:$dst, VR256:$src)>; - def : Pat<(alignedstore256 (v32i8 VR256:$src), addr:$dst), - (VMOVAPSYmr addr:$dst, VR256:$src)>; - def : Pat<(store (v16i16 VR256:$src), addr:$dst), - (VMOVUPSYmr addr:$dst, VR256:$src)>; - def : Pat<(store (v32i8 VR256:$src), addr:$dst), - (VMOVUPSYmr addr:$dst, VR256:$src)>; } // Use movaps / movups for SSE integer load / store (one byte shorter). @@ -3853,40 +3801,44 @@ def VMOVDQUYrr_REV : VSSI<0x7F, MRMDestReg, (outs VR256:$dst), (ins VR256:$src), let canFoldAsLoad = 1, mayLoad = 1, isReMaterializable = 1, hasSideEffects = 0, SchedRW = [WriteLoad] in { +let Predicates = [HasAVX,NoVLX] in def VMOVDQArm : VPDI<0x6F, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src), - "movdqa\t{$src, $dst|$dst, $src}", [], IIC_SSE_MOVA_P_RM>, - VEX; + "movdqa\t{$src, $dst|$dst, $src}", + [(set VR128:$dst, (alignedloadv2i64 addr:$src))], + IIC_SSE_MOVA_P_RM>, VEX; def VMOVDQAYrm : VPDI<0x6F, MRMSrcMem, (outs VR256:$dst), (ins i256mem:$src), "movdqa\t{$src, $dst|$dst, $src}", [], IIC_SSE_MOVA_P_RM>, VEX, VEX_L; -let Predicates = [HasAVX] in { - def VMOVDQUrm : I<0x6F, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src), - "vmovdqu\t{$src, $dst|$dst, $src}",[], IIC_SSE_MOVU_P_RM>, - XS, VEX; - def VMOVDQUYrm : I<0x6F, MRMSrcMem, (outs VR256:$dst), (ins i256mem:$src), - "vmovdqu\t{$src, $dst|$dst, $src}",[], IIC_SSE_MOVU_P_RM>, - XS, VEX, VEX_L; -} +let Predicates = [HasAVX,NoVLX] in +def VMOVDQUrm : I<0x6F, MRMSrcMem, (outs VR128:$dst), (ins i128mem:$src), + "vmovdqu\t{$src, $dst|$dst, $src}", + [(set VR128:$dst, (loadv2i64 addr:$src))], + IIC_SSE_MOVU_P_RM>, XS, VEX; +def VMOVDQUYrm : I<0x6F, MRMSrcMem, (outs VR256:$dst), (ins i256mem:$src), + "vmovdqu\t{$src, $dst|$dst, $src}",[], IIC_SSE_MOVU_P_RM>, + XS, VEX, VEX_L; } let mayStore = 1, hasSideEffects = 0, SchedRW = [WriteStore] in { +let Predicates = [HasAVX,NoVLX] in def VMOVDQAmr : VPDI<0x7F, MRMDestMem, (outs), (ins i128mem:$dst, VR128:$src), - "movdqa\t{$src, $dst|$dst, $src}", [], IIC_SSE_MOVA_P_MR>, - VEX; + "movdqa\t{$src, $dst|$dst, $src}", + [(alignedstore (v2i64 VR128:$src), addr:$dst)], + IIC_SSE_MOVA_P_MR>, VEX; def VMOVDQAYmr : VPDI<0x7F, MRMDestMem, (outs), (ins i256mem:$dst, VR256:$src), "movdqa\t{$src, $dst|$dst, $src}", [], IIC_SSE_MOVA_P_MR>, VEX, VEX_L; -let Predicates = [HasAVX] in { +let Predicates = [HasAVX,NoVLX] in def VMOVDQUmr : I<0x7F, MRMDestMem, (outs), (ins i128mem:$dst, VR128:$src), - "vmovdqu\t{$src, $dst|$dst, $src}",[], IIC_SSE_MOVU_P_MR>, + "vmovdqu\t{$src, $dst|$dst, $src}", + [(store (v2i64 VR128:$src), addr:$dst)], IIC_SSE_MOVU_P_MR>, XS, VEX; def VMOVDQUYmr : I<0x7F, MRMDestMem, (outs), (ins i256mem:$dst, VR256:$src), "vmovdqu\t{$src, $dst|$dst, $src}",[], IIC_SSE_MOVU_P_MR>, XS, VEX, VEX_L; } -} let SchedRW = [WriteMove] in { let hasSideEffects = 0 in { @@ -3948,6 +3900,50 @@ def : InstAlias<"vmovdqu\t{$src, $dst|$dst, $src}", def : InstAlias<"vmovdqu\t{$src, $dst|$dst, $src}", (VMOVDQUYrr_REV VR256L:$dst, VR256H:$src), 0>; +let Predicates = [HasAVX, NoVLX] in { + // Additional patterns for other integer sizes. + def : Pat<(alignedstore (v4i32 VR128:$src), addr:$dst), + (VMOVDQAmr addr:$dst, VR128:$src)>; + def : Pat<(alignedstore (v8i16 VR128:$src), addr:$dst), + (VMOVDQAmr addr:$dst, VR128:$src)>; + def : Pat<(alignedstore (v16i8 VR128:$src), addr:$dst), + (VMOVDQAmr addr:$dst, VR128:$src)>; + def : Pat<(store (v4i32 VR128:$src), addr:$dst), + (VMOVDQUmr addr:$dst, VR128:$src)>; + def : Pat<(store (v8i16 VR128:$src), addr:$dst), + (VMOVDQUmr addr:$dst, VR128:$src)>; + def : Pat<(store (v16i8 VR128:$src), addr:$dst), + (VMOVDQUmr addr:$dst, VR128:$src)>; + + // Special patterns for storing subvector extracts of lower 128-bits + // Its cheaper to just use VMOVDQA/VMOVDQU instead of VEXTRACTF128mr + def : Pat<(alignedstore (v2i64 (extract_subvector + (v4i64 VR256:$src), (iPTR 0))), addr:$dst), + (VMOVDQAmr addr:$dst, (v2i64 (EXTRACT_SUBREG VR256:$src,sub_xmm)))>; + def : Pat<(alignedstore (v4i32 (extract_subvector + (v8i32 VR256:$src), (iPTR 0))), addr:$dst), + (VMOVDQAmr addr:$dst, (v4i32 (EXTRACT_SUBREG VR256:$src,sub_xmm)))>; + def : Pat<(alignedstore (v8i16 (extract_subvector + (v16i16 VR256:$src), (iPTR 0))), addr:$dst), + (VMOVDQAmr addr:$dst, (v8i16 (EXTRACT_SUBREG VR256:$src,sub_xmm)))>; + def : Pat<(alignedstore (v16i8 (extract_subvector + (v32i8 VR256:$src), (iPTR 0))), addr:$dst), + (VMOVDQAmr addr:$dst, (v16i8 (EXTRACT_SUBREG VR256:$src,sub_xmm)))>; + + def : Pat<(store (v2i64 (extract_subvector + (v4i64 VR256:$src), (iPTR 0))), addr:$dst), + (VMOVDQUmr addr:$dst, (v2i64 (EXTRACT_SUBREG VR256:$src,sub_xmm)))>; + def : Pat<(store (v4i32 (extract_subvector + (v8i32 VR256:$src), (iPTR 0))), addr:$dst), + (VMOVDQUmr addr:$dst, (v4i32 (EXTRACT_SUBREG VR256:$src,sub_xmm)))>; + def : Pat<(store (v8i16 (extract_subvector + (v16i16 VR256:$src), (iPTR 0))), addr:$dst), + (VMOVDQUmr addr:$dst, (v8i16 (EXTRACT_SUBREG VR256:$src,sub_xmm)))>; + def : Pat<(store (v16i8 (extract_subvector + (v32i8 VR256:$src), (iPTR 0))), addr:$dst), + (VMOVDQUmr addr:$dst, (v16i8 (EXTRACT_SUBREG VR256:$src,sub_xmm)))>; +} + //===---------------------------------------------------------------------===// // SSE2 - Packed Integer Arithmetic Instructions //===---------------------------------------------------------------------===//