[RISCV] Add isel patterns to match sbset/sbclr/sbinv/sbext even if the shift amount isn't masked.

This uses the shiftop PatFrags to handle the masked shift amount
and unmasked shift amount cases. That also checks XLen as part
of the masked amount check so we don't need separate RV32 and RV64
patterns.

Differential Revision: https://reviews.llvm.org/D91016
This commit is contained in:
Craig Topper 2020-11-09 09:45:22 -08:00
parent 2eccde4a2b
commit c0dd22e44a
3 changed files with 30 additions and 72 deletions

View File

@ -675,37 +675,17 @@ def : Pat<(rotl GPR:$rs1, GPR:$rs2), (ROL GPR:$rs1, GPR:$rs2)>;
def : Pat<(rotr GPR:$rs1, GPR:$rs2), (ROR GPR:$rs1, GPR:$rs2)>;
} // Predicates = [HasStdExtZbbOrZbp]
let Predicates = [HasStdExtZbs, IsRV32] in
def : Pat<(and (not (shl 1, (and GPR:$rs2, 31))), GPR:$rs1),
let Predicates = [HasStdExtZbs] in {
def : Pat<(and (not (shiftop<shl> 1, GPR:$rs2)), GPR:$rs1),
(SBCLR GPR:$rs1, GPR:$rs2)>;
let Predicates = [HasStdExtZbs, IsRV64] in
def : Pat<(and (not (shl 1, (and GPR:$rs2, 63))), GPR:$rs1),
(SBCLR GPR:$rs1, GPR:$rs2)>;
let Predicates = [HasStdExtZbs] in
def : Pat<(and (rotl -2, GPR:$rs2), GPR:$rs1), (SBCLR GPR:$rs1, GPR:$rs2)>;
let Predicates = [HasStdExtZbs, IsRV32] in
def : Pat<(or (shl 1, (and GPR:$rs2, 31)), GPR:$rs1),
def : Pat<(or (shiftop<shl> 1, GPR:$rs2), GPR:$rs1),
(SBSET GPR:$rs1, GPR:$rs2)>;
let Predicates = [HasStdExtZbs, IsRV64] in
def : Pat<(or (shl 1, (and GPR:$rs2, 63)), GPR:$rs1),
(SBSET GPR:$rs1, GPR:$rs2)>;
let Predicates = [HasStdExtZbs, IsRV32] in
def : Pat<(xor (shl 1, (and GPR:$rs2, 31)), GPR:$rs1),
def : Pat<(xor (shiftop<shl> 1, GPR:$rs2), GPR:$rs1),
(SBINV GPR:$rs1, GPR:$rs2)>;
let Predicates = [HasStdExtZbs, IsRV64] in
def : Pat<(xor (shl 1, (and GPR:$rs2, 63)), GPR:$rs1),
(SBINV GPR:$rs1, GPR:$rs2)>;
let Predicates = [HasStdExtZbs, IsRV32] in
def : Pat<(and (srl GPR:$rs1, (and GPR:$rs2, 31)), 1),
(SBEXT GPR:$rs1, GPR:$rs2)>;
let Predicates = [HasStdExtZbs, IsRV64] in
def : Pat<(and (srl GPR:$rs1, (and GPR:$rs2, 63)), 1),
def : Pat<(and (shiftop<srl> GPR:$rs1, GPR:$rs2), 1),
(SBEXT GPR:$rs1, GPR:$rs2)>;
}
let Predicates = [HasStdExtZbb] in {
def : Pat<(SLOIPat GPR:$rs1, uimmlog2xlen:$shamt),

View File

@ -47,10 +47,7 @@ define i32 @sbclr_i32_no_mask(i32 %a, i32 %b) nounwind {
;
; RV32IBS-LABEL: sbclr_i32_no_mask:
; RV32IBS: # %bb.0:
; RV32IBS-NEXT: addi a2, zero, 1
; RV32IBS-NEXT: sll a1, a2, a1
; RV32IBS-NEXT: not a1, a1
; RV32IBS-NEXT: and a0, a1, a0
; RV32IBS-NEXT: sbclr a0, a0, a1
; RV32IBS-NEXT: ret
%shl = shl nuw i32 1, %b
%neg = xor i32 %shl, -1
@ -156,16 +153,12 @@ define i32 @sbset_i32_no_mask(i32 %a, i32 %b) nounwind {
;
; RV32IB-LABEL: sbset_i32_no_mask:
; RV32IB: # %bb.0:
; RV32IB-NEXT: addi a2, zero, 1
; RV32IB-NEXT: sll a1, a2, a1
; RV32IB-NEXT: or a0, a1, a0
; RV32IB-NEXT: sbset a0, a0, a1
; RV32IB-NEXT: ret
;
; RV32IBS-LABEL: sbset_i32_no_mask:
; RV32IBS: # %bb.0:
; RV32IBS-NEXT: addi a2, zero, 1
; RV32IBS-NEXT: sll a1, a2, a1
; RV32IBS-NEXT: or a0, a1, a0
; RV32IBS-NEXT: sbset a0, a0, a1
; RV32IBS-NEXT: ret
%shl = shl nuw i32 1, %b
%or = or i32 %shl, %a
@ -190,18 +183,18 @@ define i64 @sbset_i64(i64 %a, i64 %b) nounwind {
; RV32IB-LABEL: sbset_i64:
; RV32IB: # %bb.0:
; RV32IB-NEXT: addi a3, zero, 1
; RV32IB-NEXT: sll a2, a3, a2
; RV32IB-NEXT: srai a3, a2, 31
; RV32IB-NEXT: or a0, a2, a0
; RV32IB-NEXT: sll a3, a3, a2
; RV32IB-NEXT: srai a3, a3, 31
; RV32IB-NEXT: sbset a0, a0, a2
; RV32IB-NEXT: or a1, a3, a1
; RV32IB-NEXT: ret
;
; RV32IBS-LABEL: sbset_i64:
; RV32IBS: # %bb.0:
; RV32IBS-NEXT: addi a3, zero, 1
; RV32IBS-NEXT: sll a2, a3, a2
; RV32IBS-NEXT: srai a3, a2, 31
; RV32IBS-NEXT: or a0, a2, a0
; RV32IBS-NEXT: sll a3, a3, a2
; RV32IBS-NEXT: srai a3, a3, 31
; RV32IBS-NEXT: sbset a0, a0, a2
; RV32IBS-NEXT: or a1, a3, a1
; RV32IBS-NEXT: ret
%1 = trunc i64 %b to i32
@ -253,18 +246,18 @@ define i64 @sbinv_i64(i64 %a, i64 %b) nounwind {
; RV32IB-LABEL: sbinv_i64:
; RV32IB: # %bb.0:
; RV32IB-NEXT: addi a3, zero, 1
; RV32IB-NEXT: sll a2, a3, a2
; RV32IB-NEXT: srai a3, a2, 31
; RV32IB-NEXT: xor a0, a2, a0
; RV32IB-NEXT: sll a3, a3, a2
; RV32IB-NEXT: srai a3, a3, 31
; RV32IB-NEXT: sbinv a0, a0, a2
; RV32IB-NEXT: xor a1, a3, a1
; RV32IB-NEXT: ret
;
; RV32IBS-LABEL: sbinv_i64:
; RV32IBS: # %bb.0:
; RV32IBS-NEXT: addi a3, zero, 1
; RV32IBS-NEXT: sll a2, a3, a2
; RV32IBS-NEXT: srai a3, a2, 31
; RV32IBS-NEXT: xor a0, a2, a0
; RV32IBS-NEXT: sll a3, a3, a2
; RV32IBS-NEXT: srai a3, a3, 31
; RV32IBS-NEXT: sbinv a0, a0, a2
; RV32IBS-NEXT: xor a1, a3, a1
; RV32IBS-NEXT: ret
%1 = trunc i64 %b to i32
@ -306,14 +299,12 @@ define i32 @sbext_i32_no_mask(i32 %a, i32 %b) nounwind {
;
; RV32IB-LABEL: sbext_i32_no_mask:
; RV32IB: # %bb.0:
; RV32IB-NEXT: srl a0, a0, a1
; RV32IB-NEXT: andi a0, a0, 1
; RV32IB-NEXT: sbext a0, a0, a1
; RV32IB-NEXT: ret
;
; RV32IBS-LABEL: sbext_i32_no_mask:
; RV32IBS: # %bb.0:
; RV32IBS-NEXT: srl a0, a0, a1
; RV32IBS-NEXT: andi a0, a0, 1
; RV32IBS-NEXT: sbext a0, a0, a1
; RV32IBS-NEXT: ret
%shr = lshr i32 %a, %b
%and1 = and i32 %shr, 1

View File

@ -96,10 +96,7 @@ define i64 @sbclr_i64_no_mask(i64 %a, i64 %b) nounwind {
;
; RV64IBS-LABEL: sbclr_i64_no_mask:
; RV64IBS: # %bb.0:
; RV64IBS-NEXT: addi a2, zero, 1
; RV64IBS-NEXT: sll a1, a2, a1
; RV64IBS-NEXT: not a1, a1
; RV64IBS-NEXT: and a0, a1, a0
; RV64IBS-NEXT: sbclr a0, a0, a1
; RV64IBS-NEXT: ret
%shl = shl i64 1, %b
%neg = xor i64 %shl, -1
@ -185,16 +182,12 @@ define i64 @sbset_i64_no_mask(i64 %a, i64 %b) nounwind {
;
; RV64IB-LABEL: sbset_i64_no_mask:
; RV64IB: # %bb.0:
; RV64IB-NEXT: addi a2, zero, 1
; RV64IB-NEXT: sll a1, a2, a1
; RV64IB-NEXT: or a0, a1, a0
; RV64IB-NEXT: sbset a0, a0, a1
; RV64IB-NEXT: ret
;
; RV64IBS-LABEL: sbset_i64_no_mask:
; RV64IBS: # %bb.0:
; RV64IBS-NEXT: addi a2, zero, 1
; RV64IBS-NEXT: sll a1, a2, a1
; RV64IBS-NEXT: or a0, a1, a0
; RV64IBS-NEXT: sbset a0, a0, a1
; RV64IBS-NEXT: ret
%shl = shl i64 1, %b
%or = or i64 %shl, %a
@ -279,16 +272,12 @@ define i64 @sbinv_i64_no_mask(i64 %a, i64 %b) nounwind {
;
; RV64IB-LABEL: sbinv_i64_no_mask:
; RV64IB: # %bb.0:
; RV64IB-NEXT: addi a2, zero, 1
; RV64IB-NEXT: sll a1, a2, a1
; RV64IB-NEXT: xor a0, a1, a0
; RV64IB-NEXT: sbinv a0, a0, a1
; RV64IB-NEXT: ret
;
; RV64IBS-LABEL: sbinv_i64_no_mask:
; RV64IBS: # %bb.0:
; RV64IBS-NEXT: addi a2, zero, 1
; RV64IBS-NEXT: sll a1, a2, a1
; RV64IBS-NEXT: xor a0, a1, a0
; RV64IBS-NEXT: sbinv a0, a0, a1
; RV64IBS-NEXT: ret
%shl = shl nuw i64 1, %b
%xor = xor i64 %shl, %a
@ -369,14 +358,12 @@ define i64 @sbext_i64_no_mask(i64 %a, i64 %b) nounwind {
;
; RV64IB-LABEL: sbext_i64_no_mask:
; RV64IB: # %bb.0:
; RV64IB-NEXT: srl a0, a0, a1
; RV64IB-NEXT: andi a0, a0, 1
; RV64IB-NEXT: sbext a0, a0, a1
; RV64IB-NEXT: ret
;
; RV64IBS-LABEL: sbext_i64_no_mask:
; RV64IBS: # %bb.0:
; RV64IBS-NEXT: srl a0, a0, a1
; RV64IBS-NEXT: andi a0, a0, 1
; RV64IBS-NEXT: sbext a0, a0, a1
; RV64IBS-NEXT: ret
%shr = lshr i64 %a, %b
%and1 = and i64 %shr, 1