[RISCV] Use ComputeNumSignBits/MaskedValueIsZero in RISCVDAGToDAGISel::selectSExti32/selectZExti32.

This helps us select W instructions in more cases. Most of the
affected tests have had the sign_extend_inreg or AND folded into
sextload/zextload.

Differential Revision: https://reviews.llvm.org/D104079
This commit is contained in:
Craig Topper 2021-06-10 17:05:34 -07:00
parent 7836d058c7
commit 420bd5ee8e
7 changed files with 22 additions and 28 deletions

View File

@ -1338,14 +1338,8 @@ bool RISCVDAGToDAGISel::selectSExti32(SDValue N, SDValue &Val) {
Val = N.getOperand(0);
return true;
}
// FIXME: Should we just call computeNumSignBits here?
if (N.getOpcode() == ISD::AssertSext &&
cast<VTSDNode>(N->getOperand(1))->getVT().bitsLE(MVT::i32)) {
Val = N;
return true;
}
if (N.getOpcode() == ISD::AssertZext &&
cast<VTSDNode>(N->getOperand(1))->getVT().bitsLT(MVT::i32)) {
MVT VT = N.getSimpleValueType();
if (CurDAG->ComputeNumSignBits(N) > (VT.getSizeInBits() - 32)) {
Val = N;
return true;
}
@ -1361,9 +1355,9 @@ bool RISCVDAGToDAGISel::selectZExti32(SDValue N, SDValue &Val) {
return true;
}
}
// FIXME: Should we just call computeKnownBits here?
if (N.getOpcode() == ISD::AssertZext &&
cast<VTSDNode>(N->getOperand(1))->getVT().bitsLE(MVT::i32)) {
MVT VT = N.getSimpleValueType();
APInt Mask = APInt::getHighBitsSet(VT.getSizeInBits(), 32);
if (CurDAG->MaskedValueIsZero(N, Mask)) {
Val = N;
return true;
}

View File

@ -1029,7 +1029,7 @@ def : Pat<(riscv_fslw GPR:$rs3, GPR:$rs1, uimm5:$shamt),
let Predicates = [HasStdExtZbb, IsRV64] in {
def : PatGpr<riscv_clzw, CLZW>;
def : PatGpr<riscv_ctzw, CTZW>;
def : Pat<(i64 (ctpop (and GPR:$rs1, 0xFFFFFFFF))), (CPOPW GPR:$rs1)>;
def : Pat<(i64 (ctpop (i64 (zexti32 (i64 GPR:$rs1))))), (CPOPW GPR:$rs1)>;
} // Predicates = [HasStdExtZbb, IsRV64]
let Predicates = [HasStdExtZbp, IsRV64] in {

View File

@ -206,7 +206,7 @@ define double @fcvt_d_w_load(i32* %p) nounwind {
; RV64IFD-LABEL: fcvt_d_w_load:
; RV64IFD: # %bb.0:
; RV64IFD-NEXT: lw a0, 0(a0)
; RV64IFD-NEXT: fcvt.d.l ft0, a0
; RV64IFD-NEXT: fcvt.d.w ft0, a0
; RV64IFD-NEXT: fmv.x.d a0, ft0
; RV64IFD-NEXT: ret
%a = load i32, i32* %p
@ -249,7 +249,7 @@ define double @fcvt_d_wu_load(i32* %p) nounwind {
; RV64IFD-LABEL: fcvt_d_wu_load:
; RV64IFD: # %bb.0:
; RV64IFD-NEXT: lwu a0, 0(a0)
; RV64IFD-NEXT: fcvt.d.lu ft0, a0
; RV64IFD-NEXT: fcvt.d.wu ft0, a0
; RV64IFD-NEXT: fmv.x.d a0, ft0
; RV64IFD-NEXT: ret
%a = load i32, i32* %p

View File

@ -207,7 +207,7 @@ define float @fcvt_s_w_load(i32* %p) nounwind {
; RV64IF-LABEL: fcvt_s_w_load:
; RV64IF: # %bb.0:
; RV64IF-NEXT: lw a0, 0(a0)
; RV64IF-NEXT: fcvt.s.l ft0, a0
; RV64IF-NEXT: fcvt.s.w ft0, a0
; RV64IF-NEXT: fmv.x.w a0, ft0
; RV64IF-NEXT: ret
%a = load i32, i32* %p
@ -242,7 +242,7 @@ define float @fcvt_s_wu_load(i32* %p) nounwind {
; RV64IF-LABEL: fcvt_s_wu_load:
; RV64IF: # %bb.0:
; RV64IF-NEXT: lwu a0, 0(a0)
; RV64IF-NEXT: fcvt.s.lu ft0, a0
; RV64IF-NEXT: fcvt.s.wu ft0, a0
; RV64IF-NEXT: fmv.x.w a0, ft0
; RV64IF-NEXT: ret
%a = load i32, i32* %p

View File

@ -855,14 +855,14 @@ define half @fcvt_h_si(i16 %a) nounwind {
; RV64IZFH: # %bb.0:
; RV64IZFH-NEXT: slli a0, a0, 48
; RV64IZFH-NEXT: srai a0, a0, 48
; RV64IZFH-NEXT: fcvt.h.l fa0, a0
; RV64IZFH-NEXT: fcvt.h.w fa0, a0
; RV64IZFH-NEXT: ret
;
; RV64IDZFH-LABEL: fcvt_h_si:
; RV64IDZFH: # %bb.0:
; RV64IDZFH-NEXT: slli a0, a0, 48
; RV64IDZFH-NEXT: srai a0, a0, 48
; RV64IDZFH-NEXT: fcvt.h.l fa0, a0
; RV64IDZFH-NEXT: fcvt.h.w fa0, a0
; RV64IDZFH-NEXT: ret
%1 = sitofp i16 %a to half
ret half %1
@ -914,7 +914,7 @@ define half @fcvt_h_ui(i16 %a) nounwind {
; RV64IZFH-NEXT: lui a1, 16
; RV64IZFH-NEXT: addiw a1, a1, -1
; RV64IZFH-NEXT: and a0, a0, a1
; RV64IZFH-NEXT: fcvt.h.lu fa0, a0
; RV64IZFH-NEXT: fcvt.h.wu fa0, a0
; RV64IZFH-NEXT: ret
;
; RV64IDZFH-LABEL: fcvt_h_ui:
@ -922,7 +922,7 @@ define half @fcvt_h_ui(i16 %a) nounwind {
; RV64IDZFH-NEXT: lui a1, 16
; RV64IDZFH-NEXT: addiw a1, a1, -1
; RV64IDZFH-NEXT: and a0, a0, a1
; RV64IDZFH-NEXT: fcvt.h.lu fa0, a0
; RV64IDZFH-NEXT: fcvt.h.wu fa0, a0
; RV64IDZFH-NEXT: ret
%1 = uitofp i16 %a to half
ret half %1
@ -992,13 +992,13 @@ define half @fcvt_h_w_load(i32* %p) nounwind {
; RV64IZFH-LABEL: fcvt_h_w_load:
; RV64IZFH: # %bb.0:
; RV64IZFH-NEXT: lw a0, 0(a0)
; RV64IZFH-NEXT: fcvt.h.l fa0, a0
; RV64IZFH-NEXT: fcvt.h.w fa0, a0
; RV64IZFH-NEXT: ret
;
; RV64IDZFH-LABEL: fcvt_h_w_load:
; RV64IDZFH: # %bb.0:
; RV64IDZFH-NEXT: lw a0, 0(a0)
; RV64IDZFH-NEXT: fcvt.h.l fa0, a0
; RV64IDZFH-NEXT: fcvt.h.w fa0, a0
; RV64IDZFH-NEXT: ret
%a = load i32, i32* %p
%1 = sitofp i32 %a to half
@ -1045,13 +1045,13 @@ define half @fcvt_h_wu_load(i32* %p) nounwind {
; RV64IZFH-LABEL: fcvt_h_wu_load:
; RV64IZFH: # %bb.0:
; RV64IZFH-NEXT: lwu a0, 0(a0)
; RV64IZFH-NEXT: fcvt.h.lu fa0, a0
; RV64IZFH-NEXT: fcvt.h.wu fa0, a0
; RV64IZFH-NEXT: ret
;
; RV64IDZFH-LABEL: fcvt_h_wu_load:
; RV64IDZFH: # %bb.0:
; RV64IDZFH-NEXT: lwu a0, 0(a0)
; RV64IDZFH-NEXT: fcvt.h.lu fa0, a0
; RV64IDZFH-NEXT: fcvt.h.wu fa0, a0
; RV64IDZFH-NEXT: ret
%a = load i32, i32* %p
%1 = uitofp i32 %a to half

View File

@ -232,7 +232,7 @@ define i8 @srem8(i8 %a, i8 %b) nounwind {
; RV64IM-NEXT: srai a1, a1, 56
; RV64IM-NEXT: slli a0, a0, 56
; RV64IM-NEXT: srai a0, a0, 56
; RV64IM-NEXT: rem a0, a0, a1
; RV64IM-NEXT: remw a0, a0, a1
; RV64IM-NEXT: ret
%1 = srem i8 %a, %b
ret i8 %1
@ -328,7 +328,7 @@ define i16 @srem16(i16 %a, i16 %b) nounwind {
; RV64IM-NEXT: srai a1, a1, 48
; RV64IM-NEXT: slli a0, a0, 48
; RV64IM-NEXT: srai a0, a0, 48
; RV64IM-NEXT: rem a0, a0, a1
; RV64IM-NEXT: remw a0, a0, a1
; RV64IM-NEXT: ret
%1 = srem i16 %a, %b
ret i16 %1

View File

@ -1060,13 +1060,13 @@ define signext i32 @ctpop_i32_load(i32* %p) nounwind {
; RV64IB-LABEL: ctpop_i32_load:
; RV64IB: # %bb.0:
; RV64IB-NEXT: lwu a0, 0(a0)
; RV64IB-NEXT: cpop a0, a0
; RV64IB-NEXT: cpopw a0, a0
; RV64IB-NEXT: ret
;
; RV64IBB-LABEL: ctpop_i32_load:
; RV64IBB: # %bb.0:
; RV64IBB-NEXT: lwu a0, 0(a0)
; RV64IBB-NEXT: cpop a0, a0
; RV64IBB-NEXT: cpopw a0, a0
; RV64IBB-NEXT: ret
%a = load i32, i32* %p
%1 = call i32 @llvm.ctpop.i32(i32 %a)