[RISCV] Add tests for existing (rotr (bswap X), (i32 16))->grevi pattern for RV32. Extend same pattern to rotl and GREVIW.

Not sure why bswap was treated specially. This also applies to bitreverse
or generic grevi. We can improve this in future patches.
For now I just wanted to get the consistency and the test coverage
as I plan to make some other changes around bswap.
This commit is contained in:
Craig Topper 2020-11-27 15:17:34 -08:00
parent 88974e829e
commit 6ee22ca6ce
3 changed files with 184 additions and 1 deletions

View File

@ -730,7 +730,8 @@ def : Pat<(riscv_gorci GPR:$rs1, timm:$shamt), (GORCI GPR:$rs1, timm:$shamt)>;
} // Predicates = [HasStdExtZbp] } // Predicates = [HasStdExtZbp]
let Predicates = [HasStdExtZbp, IsRV32] in { let Predicates = [HasStdExtZbp, IsRV32] in {
def : Pat<(rotr (bswap GPR:$rs1), (i32 16)), (GREVI GPR:$rs1, (i32 8))>; def : Pat<(rotr (bswap GPR:$rs1), (i32 16)), (GREVI GPR:$rs1, 8)>;
def : Pat<(rotl (bswap GPR:$rs1), (i32 16)), (GREVI GPR:$rs1, 8)>;
def : Pat<(bswap GPR:$rs1), (GREVI GPR:$rs1, (i32 24))>; def : Pat<(bswap GPR:$rs1), (GREVI GPR:$rs1, (i32 24))>;
def : Pat<(bitreverse GPR:$rs1), (GREVI GPR:$rs1, (i32 31))>; def : Pat<(bitreverse GPR:$rs1), (GREVI GPR:$rs1, (i32 31))>;
} // Predicates = [HasStdExtZbp, IsRV32] } // Predicates = [HasStdExtZbp, IsRV32]
@ -921,6 +922,8 @@ def : Pat<(SROIWPat GPR:$rs1, uimmlog2xlen:$shamt),
} // Predicates = [HasStdExtZbb, IsRV64] } // Predicates = [HasStdExtZbb, IsRV64]
let Predicates = [HasStdExtZbp, IsRV64] in { let Predicates = [HasStdExtZbp, IsRV64] in {
def : Pat<(riscv_rorw (riscv_greviw GPR:$rs1, 24), (i64 16)), (GREVIW GPR:$rs1, 8)>;
def : Pat<(riscv_rolw (riscv_greviw GPR:$rs1, 24), (i64 16)), (GREVIW GPR:$rs1, 8)>;
def : Pat<(riscv_greviw GPR:$rs1, timm:$shamt), (GREVIW GPR:$rs1, timm:$shamt)>; def : Pat<(riscv_greviw GPR:$rs1, timm:$shamt), (GREVIW GPR:$rs1, timm:$shamt)>;
def : Pat<(riscv_gorciw GPR:$rs1, timm:$shamt), (GORCIW GPR:$rs1, timm:$shamt)>; def : Pat<(riscv_gorciw GPR:$rs1, timm:$shamt), (GORCIW GPR:$rs1, timm:$shamt)>;
} // Predicates = [HasStdExtZbp, IsRV64] } // Predicates = [HasStdExtZbp, IsRV64]

View File

@ -1781,6 +1781,74 @@ define i64 @bitreverse_i64(i64 %a) nounwind {
ret i64 %1 ret i64 %1
} }
define i32 @bswap_rotr_i32(i32 %a) {
; RV32I-LABEL: bswap_rotr_i32:
; RV32I: # %bb.0:
; RV32I-NEXT: srli a1, a0, 8
; RV32I-NEXT: lui a2, 16
; RV32I-NEXT: addi a2, a2, -256
; RV32I-NEXT: and a1, a1, a2
; RV32I-NEXT: srli a2, a0, 24
; RV32I-NEXT: or a1, a1, a2
; RV32I-NEXT: slli a2, a0, 8
; RV32I-NEXT: lui a3, 4080
; RV32I-NEXT: and a2, a2, a3
; RV32I-NEXT: slli a0, a0, 24
; RV32I-NEXT: or a0, a0, a2
; RV32I-NEXT: or a0, a0, a1
; RV32I-NEXT: slli a1, a0, 16
; RV32I-NEXT: srli a0, a0, 16
; RV32I-NEXT: or a0, a0, a1
; RV32I-NEXT: ret
;
; RV32IB-LABEL: bswap_rotr_i32:
; RV32IB: # %bb.0:
; RV32IB-NEXT: rev8.h a0, a0
; RV32IB-NEXT: ret
;
; RV32IBP-LABEL: bswap_rotr_i32:
; RV32IBP: # %bb.0:
; RV32IBP-NEXT: rev8.h a0, a0
; RV32IBP-NEXT: ret
%1 = call i32 @llvm.bswap.i32(i32 %a)
%2 = call i32 @llvm.fshr.i32(i32 %1, i32 %1, i32 16)
ret i32 %2
}
define i32 @bswap_rotl_i32(i32 %a) {
; RV32I-LABEL: bswap_rotl_i32:
; RV32I: # %bb.0:
; RV32I-NEXT: srli a1, a0, 8
; RV32I-NEXT: lui a2, 16
; RV32I-NEXT: addi a2, a2, -256
; RV32I-NEXT: and a1, a1, a2
; RV32I-NEXT: srli a2, a0, 24
; RV32I-NEXT: or a1, a1, a2
; RV32I-NEXT: slli a2, a0, 8
; RV32I-NEXT: lui a3, 4080
; RV32I-NEXT: and a2, a2, a3
; RV32I-NEXT: slli a0, a0, 24
; RV32I-NEXT: or a0, a0, a2
; RV32I-NEXT: or a0, a0, a1
; RV32I-NEXT: srli a1, a0, 16
; RV32I-NEXT: slli a0, a0, 16
; RV32I-NEXT: or a0, a0, a1
; RV32I-NEXT: ret
;
; RV32IB-LABEL: bswap_rotl_i32:
; RV32IB: # %bb.0:
; RV32IB-NEXT: rev8.h a0, a0
; RV32IB-NEXT: ret
;
; RV32IBP-LABEL: bswap_rotl_i32:
; RV32IBP: # %bb.0:
; RV32IBP-NEXT: rev8.h a0, a0
; RV32IBP-NEXT: ret
%1 = call i32 @llvm.bswap.i32(i32 %a)
%2 = call i32 @llvm.fshl.i32(i32 %1, i32 %1, i32 16)
ret i32 %2
}
define i32 @shfl1_i32(i32 %a, i32 %b) nounwind { define i32 @shfl1_i32(i32 %a, i32 %b) nounwind {
; RV32I-LABEL: shfl1_i32: ; RV32I-LABEL: shfl1_i32:
; RV32I: # %bb.0: ; RV32I: # %bb.0:

View File

@ -2265,6 +2265,118 @@ define i64 @bitreverse_i64(i64 %a) nounwind {
ret i64 %1 ret i64 %1
} }
define i32 @bswap_rotr_i32(i32 %a) {
; RV64I-LABEL: bswap_rotr_i32:
; RV64I: # %bb.0:
; RV64I-NEXT: srli a1, a0, 24
; RV64I-NEXT: lui a2, 4080
; RV64I-NEXT: and a1, a1, a2
; RV64I-NEXT: srli a2, a0, 8
; RV64I-NEXT: addi a3, zero, 255
; RV64I-NEXT: slli a4, a3, 24
; RV64I-NEXT: and a2, a2, a4
; RV64I-NEXT: or a1, a2, a1
; RV64I-NEXT: srli a2, a0, 40
; RV64I-NEXT: lui a4, 16
; RV64I-NEXT: addiw a4, a4, -256
; RV64I-NEXT: and a2, a2, a4
; RV64I-NEXT: srli a4, a0, 56
; RV64I-NEXT: or a2, a2, a4
; RV64I-NEXT: or a1, a1, a2
; RV64I-NEXT: slli a2, a0, 8
; RV64I-NEXT: slli a4, a3, 32
; RV64I-NEXT: and a2, a2, a4
; RV64I-NEXT: slli a4, a0, 24
; RV64I-NEXT: slli a5, a3, 40
; RV64I-NEXT: and a4, a4, a5
; RV64I-NEXT: or a2, a4, a2
; RV64I-NEXT: slli a4, a0, 40
; RV64I-NEXT: slli a3, a3, 48
; RV64I-NEXT: and a3, a4, a3
; RV64I-NEXT: slli a0, a0, 56
; RV64I-NEXT: or a0, a0, a3
; RV64I-NEXT: or a0, a0, a2
; RV64I-NEXT: or a0, a0, a1
; RV64I-NEXT: srli a1, a0, 16
; RV64I-NEXT: addi a2, zero, 1
; RV64I-NEXT: slli a2, a2, 32
; RV64I-NEXT: addi a2, a2, -1
; RV64I-NEXT: slli a2, a2, 16
; RV64I-NEXT: and a1, a1, a2
; RV64I-NEXT: srli a0, a0, 48
; RV64I-NEXT: or a0, a0, a1
; RV64I-NEXT: ret
;
; RV64IB-LABEL: bswap_rotr_i32:
; RV64IB: # %bb.0:
; RV64IB-NEXT: greviw a0, a0, 8
; RV64IB-NEXT: ret
;
; RV64IBP-LABEL: bswap_rotr_i32:
; RV64IBP: # %bb.0:
; RV64IBP-NEXT: greviw a0, a0, 8
; RV64IBP-NEXT: ret
%1 = call i32 @llvm.bswap.i32(i32 %a)
%2 = call i32 @llvm.fshr.i32(i32 %1, i32 %1, i32 16)
ret i32 %2
}
define i32 @bswap_rotl_i32(i32 %a) {
; RV64I-LABEL: bswap_rotl_i32:
; RV64I: # %bb.0:
; RV64I-NEXT: srli a1, a0, 24
; RV64I-NEXT: lui a2, 4080
; RV64I-NEXT: and a1, a1, a2
; RV64I-NEXT: srli a2, a0, 8
; RV64I-NEXT: addi a3, zero, 255
; RV64I-NEXT: slli a4, a3, 24
; RV64I-NEXT: and a2, a2, a4
; RV64I-NEXT: or a1, a2, a1
; RV64I-NEXT: srli a2, a0, 40
; RV64I-NEXT: lui a4, 16
; RV64I-NEXT: addiw a4, a4, -256
; RV64I-NEXT: and a2, a2, a4
; RV64I-NEXT: srli a4, a0, 56
; RV64I-NEXT: or a2, a2, a4
; RV64I-NEXT: or a1, a1, a2
; RV64I-NEXT: slli a2, a0, 8
; RV64I-NEXT: slli a4, a3, 32
; RV64I-NEXT: and a2, a2, a4
; RV64I-NEXT: slli a4, a0, 24
; RV64I-NEXT: slli a5, a3, 40
; RV64I-NEXT: and a4, a4, a5
; RV64I-NEXT: or a2, a4, a2
; RV64I-NEXT: slli a4, a0, 40
; RV64I-NEXT: slli a3, a3, 48
; RV64I-NEXT: and a3, a4, a3
; RV64I-NEXT: slli a0, a0, 56
; RV64I-NEXT: or a0, a0, a3
; RV64I-NEXT: or a0, a0, a2
; RV64I-NEXT: or a0, a0, a1
; RV64I-NEXT: srli a1, a0, 16
; RV64I-NEXT: addi a2, zero, 1
; RV64I-NEXT: slli a2, a2, 32
; RV64I-NEXT: addi a2, a2, -1
; RV64I-NEXT: slli a2, a2, 16
; RV64I-NEXT: and a1, a1, a2
; RV64I-NEXT: srli a0, a0, 48
; RV64I-NEXT: or a0, a1, a0
; RV64I-NEXT: ret
;
; RV64IB-LABEL: bswap_rotl_i32:
; RV64IB: # %bb.0:
; RV64IB-NEXT: greviw a0, a0, 8
; RV64IB-NEXT: ret
;
; RV64IBP-LABEL: bswap_rotl_i32:
; RV64IBP: # %bb.0:
; RV64IBP-NEXT: greviw a0, a0, 8
; RV64IBP-NEXT: ret
%1 = call i32 @llvm.bswap.i32(i32 %a)
%2 = call i32 @llvm.fshl.i32(i32 %1, i32 %1, i32 16)
ret i32 %2
}
; There's no [un]shfliw instruction as slliu.w occupies the encoding slot that ; There's no [un]shfliw instruction as slliu.w occupies the encoding slot that
; would be occupied by shfliw. ; would be occupied by shfliw.