[RISCV] Refine known bits for READ_VLENB

This implements known bits for READ_VALUE using any information known about minimum and maximum VLEN. There's an additional assumption that VLEN is a power of two.

The motivation here is mostly to remove the last use of getMinVLen, but while I was here, I decided to also fix the bug for VLEN < 128 and handle max from command line generically too.

Differential Revision: https://reviews.llvm.org/D128758
This commit is contained in:
Philip Reames 2022-06-28 15:36:34 -07:00 committed by Philip Reames
parent 44a114fec7
commit 860c62f53c
2 changed files with 70 additions and 15 deletions

View File

@ -9388,14 +9388,15 @@ void RISCVTargetLowering::computeKnownBitsForTargetNode(const SDValue Op,
break;
}
case RISCVISD::READ_VLENB: {
// If we know the minimum VLen from Zvl extensions, we can use that to
// determine the trailing zeros of VLENB.
// FIXME: Limit to 128 bit vectors until we have more testing.
unsigned MinVLenB = std::min(128U, Subtarget.getMinVLen()) / 8;
if (MinVLenB > 0)
Known.Zero.setLowBits(Log2_32(MinVLenB));
// We assume VLENB is no more than 65536 / 8 bytes.
Known.Zero.setBitsFrom(14);
// We can use the minimum and maximum VLEN values to bound VLENB. We
// know VLEN must be a power of two.
const unsigned MinVLenB = Subtarget.getRealMinVLen() / 8;
const unsigned MaxVLenB = Subtarget.getRealMaxVLen() / 8;
assert(MinVLenB > 0 && "READ_VLENB without vector extension enabled?");
Known.Zero.setLowBits(Log2_32(MinVLenB));
Known.Zero.setBitsFrom(Log2_32(MaxVLenB)+1);
if (MaxVLenB == MinVLenB)
Known.One.setBit(Log2_32(MinVLenB));
break;
}
case ISD::INTRINSIC_W_CHAIN:

View File

@ -1,8 +1,14 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple riscv64 -mattr=+m,+v < %s \
; RUN: | FileCheck %s -check-prefix=RV64
; RUN: | FileCheck %s -check-prefixes=RV64,RV64-VLENUNK
; RUN: llc -mtriple riscv32 -mattr=+m,+v < %s \
; RUN: | FileCheck %s -check-prefix=RV32
; RUN: llc -mtriple riscv64 -mattr=+m,+v -riscv-v-vector-bits-min=256 < %s \
; RUN: | FileCheck %s -check-prefixes=RV64,RV64-VLEN256MIN
; RUN: llc -mtriple riscv64 -mattr=+m,+v -riscv-v-vector-bits-max=256 < %s \
; RUN: | FileCheck %s -check-prefixes=RV64,RV64-VLEN256MAX
; RUN: llc -mtriple riscv64 -mattr=+m,+v -riscv-v-vector-bits-min=256 -riscv-v-vector-bits-max=256 < %s \
; RUN: | FileCheck %s -check-prefixes=RV64-VLEN256EXACT
define i64 @vscale_zero() nounwind {
@ -16,6 +22,11 @@ define i64 @vscale_zero() nounwind {
; RV32-NEXT: li a0, 0
; RV32-NEXT: li a1, 0
; RV32-NEXT: ret
;
; RV64-VLEN256EXACT-LABEL: vscale_zero:
; RV64-VLEN256EXACT: # %bb.0: # %entry
; RV64-VLEN256EXACT-NEXT: li a0, 0
; RV64-VLEN256EXACT-NEXT: ret
entry:
%0 = call i64 @llvm.vscale.i64()
%1 = mul i64 %0, 0
@ -35,6 +46,11 @@ define i64 @vscale_one() nounwind {
; RV32-NEXT: srli a0, a0, 3
; RV32-NEXT: li a1, 0
; RV32-NEXT: ret
;
; RV64-VLEN256EXACT-LABEL: vscale_one:
; RV64-VLEN256EXACT: # %bb.0: # %entry
; RV64-VLEN256EXACT-NEXT: li a0, 4
; RV64-VLEN256EXACT-NEXT: ret
entry:
%0 = call i64 @llvm.vscale.i64()
%1 = mul i64 %0, 1
@ -54,6 +70,11 @@ define i64 @vscale_uimmpow2xlen() nounwind {
; RV32-NEXT: slli a0, a0, 3
; RV32-NEXT: li a1, 0
; RV32-NEXT: ret
;
; RV64-VLEN256EXACT-LABEL: vscale_uimmpow2xlen:
; RV64-VLEN256EXACT: # %bb.0: # %entry
; RV64-VLEN256EXACT-NEXT: li a0, 256
; RV64-VLEN256EXACT-NEXT: ret
entry:
%0 = call i64 @llvm.vscale.i64()
%1 = mul i64 %0, 64
@ -75,6 +96,11 @@ define i64 @vscale_non_pow2() nounwind {
; RV32-NEXT: add a0, a1, a0
; RV32-NEXT: li a1, 0
; RV32-NEXT: ret
;
; RV64-VLEN256EXACT-LABEL: vscale_non_pow2:
; RV64-VLEN256EXACT: # %bb.0: # %entry
; RV64-VLEN256EXACT-NEXT: li a0, 96
; RV64-VLEN256EXACT-NEXT: ret
entry:
%0 = call i64 @llvm.vscale.i64()
%1 = mul i64 %0, 24
@ -99,6 +125,11 @@ define i64 @vscale_select(i32 %x, i32 %y) {
; RV32-NEXT: srli a0, a0, 3
; RV32-NEXT: li a1, 0
; RV32-NEXT: ret
;
; RV64-VLEN256EXACT-LABEL: vscale_select:
; RV64-VLEN256EXACT: # %bb.0:
; RV64-VLEN256EXACT-NEXT: li a0, 4
; RV64-VLEN256EXACT-NEXT: ret
%a = call i64 @llvm.vscale.i64()
%b = and i64 %a, 4294967295
%c = icmp eq i32 %x, %y
@ -119,6 +150,11 @@ define i64 @vscale_high_bits_zero() nounwind {
; RV32-NEXT: srli a0, a0, 3
; RV32-NEXT: li a1, 0
; RV32-NEXT: ret
;
; RV64-VLEN256EXACT-LABEL: vscale_high_bits_zero:
; RV64-VLEN256EXACT: # %bb.0: # %entry
; RV64-VLEN256EXACT-NEXT: li a0, 4
; RV64-VLEN256EXACT-NEXT: ret
entry:
%0 = call i64 @llvm.vscale.i64()
%1 = and i64 %0, 2047
@ -126,12 +162,12 @@ entry:
}
define i64 @vscale_masked() nounwind {
; RV64-LABEL: vscale_masked:
; RV64: # %bb.0: # %entry
; RV64-NEXT: csrr a0, vlenb
; RV64-NEXT: srli a0, a0, 3
; RV64-NEXT: andi a0, a0, 510
; RV64-NEXT: ret
; RV64-VLENUNK-LABEL: vscale_masked:
; RV64-VLENUNK: # %bb.0: # %entry
; RV64-VLENUNK-NEXT: csrr a0, vlenb
; RV64-VLENUNK-NEXT: srli a0, a0, 3
; RV64-VLENUNK-NEXT: andi a0, a0, 510
; RV64-VLENUNK-NEXT: ret
;
; RV32-LABEL: vscale_masked:
; RV32: # %bb.0: # %entry
@ -140,6 +176,24 @@ define i64 @vscale_masked() nounwind {
; RV32-NEXT: andi a0, a0, 510
; RV32-NEXT: li a1, 0
; RV32-NEXT: ret
;
; RV64-VLEN256MIN-LABEL: vscale_masked:
; RV64-VLEN256MIN: # %bb.0: # %entry
; RV64-VLEN256MIN-NEXT: csrr a0, vlenb
; RV64-VLEN256MIN-NEXT: srli a0, a0, 3
; RV64-VLEN256MIN-NEXT: andi a0, a0, 508
; RV64-VLEN256MIN-NEXT: ret
;
; RV64-VLEN256MAX-LABEL: vscale_masked:
; RV64-VLEN256MAX: # %bb.0: # %entry
; RV64-VLEN256MAX-NEXT: csrr a0, vlenb
; RV64-VLEN256MAX-NEXT: srli a0, a0, 3
; RV64-VLEN256MAX-NEXT: ret
;
; RV64-VLEN256EXACT-LABEL: vscale_masked:
; RV64-VLEN256EXACT: # %bb.0: # %entry
; RV64-VLEN256EXACT-NEXT: li a0, 4
; RV64-VLEN256EXACT-NEXT: ret
entry:
%0 = call i64 @llvm.vscale.i64()
%1 = and i64 %0, 511