[RISCV] Select int_riscv_vsll with shift of 1 to vadd.vv.

Add might be faster than shift. We can't do this earlier without
using a Freeze instruction.

This is the intrinsic version of D106689.

Reviewed By: frasercrmck

Differential Revision: https://reviews.llvm.org/D118013
This commit is contained in:
Craig Topper 2022-01-24 07:54:59 -08:00
parent c1335166b2
commit cd2a9ff397
3 changed files with 88 additions and 0 deletions

View File

@ -4543,6 +4543,30 @@ defm : VPatBinaryV_VV_VX_VI<"int_riscv_vsrl", "PseudoVSRL", AllIntegerVectors,
defm : VPatBinaryV_VV_VX_VI<"int_riscv_vsra", "PseudoVSRA", AllIntegerVectors, defm : VPatBinaryV_VV_VX_VI<"int_riscv_vsra", "PseudoVSRA", AllIntegerVectors,
uimm5>; uimm5>;
foreach vti = AllIntegerVectors in {
// Emit shift by 1 as an add since it might be faster.
def : Pat<(vti.Vector (int_riscv_vsll (vti.Vector vti.RegClass:$rs1),
(XLenVT 1), VLOpFrag)),
(!cast<Instruction>("PseudoVADD_VV_"#vti.LMul.MX) vti.RegClass:$rs1,
vti.RegClass:$rs1,
GPR:$vl,
vti.Log2SEW)>;
def : Pat<(vti.Vector (int_riscv_vsll_mask (vti.Vector vti.RegClass:$merge),
(vti.Vector vti.RegClass:$rs1),
(XLenVT 1),
(vti.Mask V0),
VLOpFrag,
(XLenVT timm:$policy))),
(!cast<Instruction>("PseudoVADD_VV_"#vti.LMul.MX#"_MASK")
vti.RegClass:$merge,
vti.RegClass:$rs1,
vti.RegClass:$rs1,
(vti.Mask V0),
GPR:$vl,
vti.Log2SEW,
(XLenVT timm:$policy))>;
}
//===----------------------------------------------------------------------===// //===----------------------------------------------------------------------===//
// 12.7. Vector Narrowing Integer Right Shift Instructions // 12.7. Vector Narrowing Integer Right Shift Instructions
//===----------------------------------------------------------------------===// //===----------------------------------------------------------------------===//

View File

@ -2000,6 +2000,21 @@ entry:
ret <vscale x 1 x i8> %a ret <vscale x 1 x i8> %a
} }
define <vscale x 1 x i8> @intrinsic_vsll_1_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, i32 %1) nounwind {
; CHECK-LABEL: intrinsic_vsll_1_nxv1i8_nxv1i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
; CHECK-NEXT: vadd.vv v8, v8, v8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i8> @llvm.riscv.vsll.nxv1i8(
<vscale x 1 x i8> %0,
i32 1,
i32 %1)
ret <vscale x 1 x i8> %a
}
define <vscale x 1 x i8> @intrinsic_vsll_mask_vi_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, i32 %3) nounwind { define <vscale x 1 x i8> @intrinsic_vsll_mask_vi_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vsll_mask_vi_nxv1i8_nxv1i8_i8: ; CHECK-LABEL: intrinsic_vsll_mask_vi_nxv1i8_nxv1i8_i8:
; CHECK: # %bb.0: # %entry ; CHECK: # %bb.0: # %entry
@ -2017,6 +2032,23 @@ entry:
ret <vscale x 1 x i8> %a ret <vscale x 1 x i8> %a
} }
define <vscale x 1 x i8> @intrinsic_vsll_mask_1_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vsll_mask_1_nxv1i8_nxv1i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
; CHECK-NEXT: vadd.vv v8, v9, v9, v0.t
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i8> @llvm.riscv.vsll.mask.nxv1i8(
<vscale x 1 x i8> %0,
<vscale x 1 x i8> %1,
i32 1,
<vscale x 1 x i1> %2,
i32 %3, i32 1)
ret <vscale x 1 x i8> %a
}
define <vscale x 2 x i8> @intrinsic_vsll_vi_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, i32 %1) nounwind { define <vscale x 2 x i8> @intrinsic_vsll_vi_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, i32 %1) nounwind {
; CHECK-LABEL: intrinsic_vsll_vi_nxv2i8_nxv2i8_i8: ; CHECK-LABEL: intrinsic_vsll_vi_nxv2i8_nxv2i8_i8:
; CHECK: # %bb.0: # %entry ; CHECK: # %bb.0: # %entry

View File

@ -2000,6 +2000,21 @@ entry:
ret <vscale x 1 x i8> %a ret <vscale x 1 x i8> %a
} }
define <vscale x 1 x i8> @intrinsic_vsll_1_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, i64 %1) nounwind {
; CHECK-LABEL: intrinsic_vsll_1_nxv1i8_nxv1i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
; CHECK-NEXT: vadd.vv v8, v8, v8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i8> @llvm.riscv.vsll.nxv1i8(
<vscale x 1 x i8> %0,
i64 1,
i64 %1)
ret <vscale x 1 x i8> %a
}
define <vscale x 1 x i8> @intrinsic_vsll_mask_vi_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, i64 %3) nounwind { define <vscale x 1 x i8> @intrinsic_vsll_mask_vi_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vsll_mask_vi_nxv1i8_nxv1i8_i8: ; CHECK-LABEL: intrinsic_vsll_mask_vi_nxv1i8_nxv1i8_i8:
; CHECK: # %bb.0: # %entry ; CHECK: # %bb.0: # %entry
@ -2017,6 +2032,23 @@ entry:
ret <vscale x 1 x i8> %a ret <vscale x 1 x i8> %a
} }
define <vscale x 1 x i8> @intrinsic_vsll_mask_1_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vsll_mask_1_nxv1i8_nxv1i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
; CHECK-NEXT: vadd.vv v8, v9, v9, v0.t
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i8> @llvm.riscv.vsll.mask.nxv1i8(
<vscale x 1 x i8> %0,
<vscale x 1 x i8> %1,
i64 1,
<vscale x 1 x i1> %2,
i64 %3, i64 1)
ret <vscale x 1 x i8> %a
}
define <vscale x 2 x i8> @intrinsic_vsll_vi_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, i64 %1) nounwind { define <vscale x 2 x i8> @intrinsic_vsll_vi_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, i64 %1) nounwind {
; CHECK-LABEL: intrinsic_vsll_vi_nxv2i8_nxv2i8_i8: ; CHECK-LABEL: intrinsic_vsll_vi_nxv2i8_nxv2i8_i8:
; CHECK: # %bb.0: # %entry ; CHECK: # %bb.0: # %entry