[RISCV] Remove support for the unratified Zbt extension.

This extension does not appear to be on its way to ratification.

Out of the unratified bitmanip extensions, this one had the
largest impact on the compiler.

Posting this patch to start a discussion about whether we should
remove these extensions. We'll talk more at the RISC-V sync meeting this
Thursday.

Reviewed By: asb, reames

Differential Revision: https://reviews.llvm.org/D133834
This commit is contained in:
Craig Topper 2022-09-20 20:26:48 -07:00
parent 669e508772
commit 70a64fe7b1
38 changed files with 367 additions and 4536 deletions

View File

@ -69,12 +69,6 @@ TARGET_BUILTIN(__builtin_riscv_crc32c_w, "LiLi", "nc", "experimental-zbr")
TARGET_BUILTIN(__builtin_riscv_crc32_d, "LiLi", "nc", "experimental-zbr,64bit")
TARGET_BUILTIN(__builtin_riscv_crc32c_d, "LiLi", "nc", "experimental-zbr,64bit")
// Zbt extension
TARGET_BUILTIN(__builtin_riscv_fsl_32, "LiLiLiLi", "nc", "experimental-zbt")
TARGET_BUILTIN(__builtin_riscv_fsr_32, "LiLiLiLi", "nc", "experimental-zbt")
TARGET_BUILTIN(__builtin_riscv_fsl_64, "WiWiWiWi", "nc", "experimental-zbt,64bit")
TARGET_BUILTIN(__builtin_riscv_fsr_64, "WiWiWiWi", "nc", "experimental-zbt,64bit")
// Zbkb extension
TARGET_BUILTIN(__builtin_riscv_brev8, "LiLi", "nc", "zbkb")
TARGET_BUILTIN(__builtin_riscv_zip_32, "ZiZi", "nc", "zbkb,32bit")

View File

@ -19212,10 +19212,6 @@ Value *CodeGenFunction::EmitRISCVBuiltinExpr(unsigned BuiltinID,
case RISCV::BI__builtin_riscv_crc32c_h:
case RISCV::BI__builtin_riscv_crc32c_w:
case RISCV::BI__builtin_riscv_crc32c_d:
case RISCV::BI__builtin_riscv_fsl_32:
case RISCV::BI__builtin_riscv_fsr_32:
case RISCV::BI__builtin_riscv_fsl_64:
case RISCV::BI__builtin_riscv_fsr_64:
case RISCV::BI__builtin_riscv_brev8:
case RISCV::BI__builtin_riscv_zip_32:
case RISCV::BI__builtin_riscv_unzip_32: {
@ -19320,16 +19316,6 @@ Value *CodeGenFunction::EmitRISCVBuiltinExpr(unsigned BuiltinID,
ID = Intrinsic::riscv_crc32c_d;
break;
// Zbt
case RISCV::BI__builtin_riscv_fsl_32:
case RISCV::BI__builtin_riscv_fsl_64:
ID = Intrinsic::riscv_fsl;
break;
case RISCV::BI__builtin_riscv_fsr_32:
case RISCV::BI__builtin_riscv_fsr_64:
ID = Intrinsic::riscv_fsr;
break;
// Zbkx
case RISCV::BI__builtin_riscv_xperm8:
ID = Intrinsic::riscv_xperm8;

View File

@ -1,54 +0,0 @@
// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
// RUN: %clang_cc1 -no-opaque-pointers -triple riscv32 -target-feature +experimental-zbt -emit-llvm %s -o - \
// RUN: | FileCheck %s -check-prefix=RV32ZBT
// RV32ZBT-LABEL: @fsl(
// RV32ZBT-NEXT: entry:
// RV32ZBT-NEXT: [[RS1_ADDR:%.*]] = alloca i32, align 4
// RV32ZBT-NEXT: [[RS2_ADDR:%.*]] = alloca i32, align 4
// RV32ZBT-NEXT: [[RS3_ADDR:%.*]] = alloca i32, align 4
// RV32ZBT-NEXT: store i32 [[RS1:%.*]], i32* [[RS1_ADDR]], align 4
// RV32ZBT-NEXT: store i32 [[RS2:%.*]], i32* [[RS2_ADDR]], align 4
// RV32ZBT-NEXT: store i32 [[RS3:%.*]], i32* [[RS3_ADDR]], align 4
// RV32ZBT-NEXT: [[TMP0:%.*]] = load i32, i32* [[RS1_ADDR]], align 4
// RV32ZBT-NEXT: [[TMP1:%.*]] = load i32, i32* [[RS2_ADDR]], align 4
// RV32ZBT-NEXT: [[TMP2:%.*]] = load i32, i32* [[RS3_ADDR]], align 4
// RV32ZBT-NEXT: [[TMP3:%.*]] = call i32 @llvm.riscv.fsl.i32(i32 [[TMP0]], i32 [[TMP1]], i32 [[TMP2]])
// RV32ZBT-NEXT: ret i32 [[TMP3]]
//
int fsl(int rs1, int rs2, int rs3) {
return __builtin_riscv_fsl_32(rs1, rs2, rs3);
}
// RV32ZBT-LABEL: @fsr(
// RV32ZBT-NEXT: entry:
// RV32ZBT-NEXT: [[RS1_ADDR:%.*]] = alloca i32, align 4
// RV32ZBT-NEXT: [[RS2_ADDR:%.*]] = alloca i32, align 4
// RV32ZBT-NEXT: [[RS3_ADDR:%.*]] = alloca i32, align 4
// RV32ZBT-NEXT: store i32 [[RS1:%.*]], i32* [[RS1_ADDR]], align 4
// RV32ZBT-NEXT: store i32 [[RS2:%.*]], i32* [[RS2_ADDR]], align 4
// RV32ZBT-NEXT: store i32 [[RS3:%.*]], i32* [[RS3_ADDR]], align 4
// RV32ZBT-NEXT: [[TMP0:%.*]] = load i32, i32* [[RS1_ADDR]], align 4
// RV32ZBT-NEXT: [[TMP1:%.*]] = load i32, i32* [[RS2_ADDR]], align 4
// RV32ZBT-NEXT: [[TMP2:%.*]] = load i32, i32* [[RS3_ADDR]], align 4
// RV32ZBT-NEXT: [[TMP3:%.*]] = call i32 @llvm.riscv.fsr.i32(i32 [[TMP0]], i32 [[TMP1]], i32 [[TMP2]])
// RV32ZBT-NEXT: ret i32 [[TMP3]]
//
int fsr(int rs1, int rs2, int rs3) {
return __builtin_riscv_fsr_32(rs1, rs2, rs3);
}
// RV32ZBT-LABEL: @fsri(
// RV32ZBT-NEXT: entry:
// RV32ZBT-NEXT: [[RS1_ADDR:%.*]] = alloca i32, align 4
// RV32ZBT-NEXT: [[RS2_ADDR:%.*]] = alloca i32, align 4
// RV32ZBT-NEXT: store i32 [[RS1:%.*]], i32* [[RS1_ADDR]], align 4
// RV32ZBT-NEXT: store i32 [[RS2:%.*]], i32* [[RS2_ADDR]], align 4
// RV32ZBT-NEXT: [[TMP0:%.*]] = load i32, i32* [[RS1_ADDR]], align 4
// RV32ZBT-NEXT: [[TMP1:%.*]] = load i32, i32* [[RS2_ADDR]], align 4
// RV32ZBT-NEXT: [[TMP2:%.*]] = call i32 @llvm.riscv.fsr.i32(i32 [[TMP0]], i32 [[TMP1]], i32 15)
// RV32ZBT-NEXT: ret i32 [[TMP2]]
//
int fsri(int rs1, int rs2) {
return __builtin_riscv_fsr_32(rs1, rs2, 15);
}

View File

@ -1,116 +0,0 @@
// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
// RUN: %clang_cc1 -no-opaque-pointers -triple riscv64 -target-feature +experimental-zbt -emit-llvm %s -o - \
// RUN: | FileCheck %s -check-prefix=RV64ZBT
// RV64ZBT-LABEL: @fsl(
// RV64ZBT-NEXT: entry:
// RV64ZBT-NEXT: [[RS1_ADDR:%.*]] = alloca i32, align 4
// RV64ZBT-NEXT: [[RS2_ADDR:%.*]] = alloca i32, align 4
// RV64ZBT-NEXT: [[RS3_ADDR:%.*]] = alloca i32, align 4
// RV64ZBT-NEXT: store i32 [[RS1:%.*]], i32* [[RS1_ADDR]], align 4
// RV64ZBT-NEXT: store i32 [[RS2:%.*]], i32* [[RS2_ADDR]], align 4
// RV64ZBT-NEXT: store i32 [[RS3:%.*]], i32* [[RS3_ADDR]], align 4
// RV64ZBT-NEXT: [[TMP0:%.*]] = load i32, i32* [[RS1_ADDR]], align 4
// RV64ZBT-NEXT: [[CONV:%.*]] = sext i32 [[TMP0]] to i64
// RV64ZBT-NEXT: [[TMP1:%.*]] = load i32, i32* [[RS2_ADDR]], align 4
// RV64ZBT-NEXT: [[CONV1:%.*]] = sext i32 [[TMP1]] to i64
// RV64ZBT-NEXT: [[TMP2:%.*]] = load i32, i32* [[RS3_ADDR]], align 4
// RV64ZBT-NEXT: [[CONV2:%.*]] = sext i32 [[TMP2]] to i64
// RV64ZBT-NEXT: [[TMP3:%.*]] = call i64 @llvm.riscv.fsl.i64(i64 [[CONV]], i64 [[CONV1]], i64 [[CONV2]])
// RV64ZBT-NEXT: [[CONV3:%.*]] = trunc i64 [[TMP3]] to i32
// RV64ZBT-NEXT: ret i32 [[CONV3]]
//
int fsl(int rs1, int rs2, int rs3) {
return __builtin_riscv_fsl_32(rs1, rs2, rs3);
}
// RV64ZBT-LABEL: @fsr(
// RV64ZBT-NEXT: entry:
// RV64ZBT-NEXT: [[RS1_ADDR:%.*]] = alloca i32, align 4
// RV64ZBT-NEXT: [[RS2_ADDR:%.*]] = alloca i32, align 4
// RV64ZBT-NEXT: [[RS3_ADDR:%.*]] = alloca i32, align 4
// RV64ZBT-NEXT: store i32 [[RS1:%.*]], i32* [[RS1_ADDR]], align 4
// RV64ZBT-NEXT: store i32 [[RS2:%.*]], i32* [[RS2_ADDR]], align 4
// RV64ZBT-NEXT: store i32 [[RS3:%.*]], i32* [[RS3_ADDR]], align 4
// RV64ZBT-NEXT: [[TMP0:%.*]] = load i32, i32* [[RS1_ADDR]], align 4
// RV64ZBT-NEXT: [[CONV:%.*]] = sext i32 [[TMP0]] to i64
// RV64ZBT-NEXT: [[TMP1:%.*]] = load i32, i32* [[RS2_ADDR]], align 4
// RV64ZBT-NEXT: [[CONV1:%.*]] = sext i32 [[TMP1]] to i64
// RV64ZBT-NEXT: [[TMP2:%.*]] = load i32, i32* [[RS3_ADDR]], align 4
// RV64ZBT-NEXT: [[CONV2:%.*]] = sext i32 [[TMP2]] to i64
// RV64ZBT-NEXT: [[TMP3:%.*]] = call i64 @llvm.riscv.fsr.i64(i64 [[CONV]], i64 [[CONV1]], i64 [[CONV2]])
// RV64ZBT-NEXT: [[CONV3:%.*]] = trunc i64 [[TMP3]] to i32
// RV64ZBT-NEXT: ret i32 [[CONV3]]
//
int fsr(int rs1, int rs2, int rs3) {
return __builtin_riscv_fsr_32(rs1, rs2, rs3);
}
// RV64ZBT-LABEL: @fsri(
// RV64ZBT-NEXT: entry:
// RV64ZBT-NEXT: [[RS1_ADDR:%.*]] = alloca i32, align 4
// RV64ZBT-NEXT: [[RS2_ADDR:%.*]] = alloca i32, align 4
// RV64ZBT-NEXT: store i32 [[RS1:%.*]], i32* [[RS1_ADDR]], align 4
// RV64ZBT-NEXT: store i32 [[RS2:%.*]], i32* [[RS2_ADDR]], align 4
// RV64ZBT-NEXT: [[TMP0:%.*]] = load i32, i32* [[RS1_ADDR]], align 4
// RV64ZBT-NEXT: [[CONV:%.*]] = sext i32 [[TMP0]] to i64
// RV64ZBT-NEXT: [[TMP1:%.*]] = load i32, i32* [[RS2_ADDR]], align 4
// RV64ZBT-NEXT: [[CONV1:%.*]] = sext i32 [[TMP1]] to i64
// RV64ZBT-NEXT: [[TMP2:%.*]] = call i64 @llvm.riscv.fsr.i64(i64 [[CONV]], i64 [[CONV1]], i64 15)
// RV64ZBT-NEXT: [[CONV2:%.*]] = trunc i64 [[TMP2]] to i32
// RV64ZBT-NEXT: ret i32 [[CONV2]]
//
int fsri(int rs1, int rs2) {
return __builtin_riscv_fsr_32(rs1, rs2, 15);
}
// RV64ZBT-LABEL: @fslw(
// RV64ZBT-NEXT: entry:
// RV64ZBT-NEXT: [[RS1_ADDR:%.*]] = alloca i64, align 8
// RV64ZBT-NEXT: [[RS2_ADDR:%.*]] = alloca i64, align 8
// RV64ZBT-NEXT: [[RS3_ADDR:%.*]] = alloca i64, align 8
// RV64ZBT-NEXT: store i64 [[RS1:%.*]], i64* [[RS1_ADDR]], align 8
// RV64ZBT-NEXT: store i64 [[RS2:%.*]], i64* [[RS2_ADDR]], align 8
// RV64ZBT-NEXT: store i64 [[RS3:%.*]], i64* [[RS3_ADDR]], align 8
// RV64ZBT-NEXT: [[TMP0:%.*]] = load i64, i64* [[RS1_ADDR]], align 8
// RV64ZBT-NEXT: [[TMP1:%.*]] = load i64, i64* [[RS2_ADDR]], align 8
// RV64ZBT-NEXT: [[TMP2:%.*]] = load i64, i64* [[RS3_ADDR]], align 8
// RV64ZBT-NEXT: [[TMP3:%.*]] = call i64 @llvm.riscv.fsl.i64(i64 [[TMP0]], i64 [[TMP1]], i64 [[TMP2]])
// RV64ZBT-NEXT: ret i64 [[TMP3]]
//
long fslw(long rs1, long rs2, long rs3) {
return __builtin_riscv_fsl_64(rs1, rs2, rs3);
}
// RV64ZBT-LABEL: @fsrw(
// RV64ZBT-NEXT: entry:
// RV64ZBT-NEXT: [[RS1_ADDR:%.*]] = alloca i64, align 8
// RV64ZBT-NEXT: [[RS2_ADDR:%.*]] = alloca i64, align 8
// RV64ZBT-NEXT: [[RS3_ADDR:%.*]] = alloca i64, align 8
// RV64ZBT-NEXT: store i64 [[RS1:%.*]], i64* [[RS1_ADDR]], align 8
// RV64ZBT-NEXT: store i64 [[RS2:%.*]], i64* [[RS2_ADDR]], align 8
// RV64ZBT-NEXT: store i64 [[RS3:%.*]], i64* [[RS3_ADDR]], align 8
// RV64ZBT-NEXT: [[TMP0:%.*]] = load i64, i64* [[RS1_ADDR]], align 8
// RV64ZBT-NEXT: [[TMP1:%.*]] = load i64, i64* [[RS2_ADDR]], align 8
// RV64ZBT-NEXT: [[TMP2:%.*]] = load i64, i64* [[RS3_ADDR]], align 8
// RV64ZBT-NEXT: [[TMP3:%.*]] = call i64 @llvm.riscv.fsr.i64(i64 [[TMP0]], i64 [[TMP1]], i64 [[TMP2]])
// RV64ZBT-NEXT: ret i64 [[TMP3]]
//
long fsrw(long rs1, long rs2, long rs3) {
return __builtin_riscv_fsr_64(rs1, rs2, rs3);
}
// RV64ZBT-LABEL: @fsriw(
// RV64ZBT-NEXT: entry:
// RV64ZBT-NEXT: [[RS1_ADDR:%.*]] = alloca i64, align 8
// RV64ZBT-NEXT: [[RS2_ADDR:%.*]] = alloca i64, align 8
// RV64ZBT-NEXT: store i64 [[RS1:%.*]], i64* [[RS1_ADDR]], align 8
// RV64ZBT-NEXT: store i64 [[RS2:%.*]], i64* [[RS2_ADDR]], align 8
// RV64ZBT-NEXT: [[TMP0:%.*]] = load i64, i64* [[RS1_ADDR]], align 8
// RV64ZBT-NEXT: [[TMP1:%.*]] = load i64, i64* [[RS2_ADDR]], align 8
// RV64ZBT-NEXT: [[TMP2:%.*]] = call i64 @llvm.riscv.fsr.i64(i64 [[TMP0]], i64 [[TMP1]], i64 15)
// RV64ZBT-NEXT: ret i64 [[TMP2]]
//
long fsriw(long rs1, long rs2) {
return __builtin_riscv_fsr_64(rs1, rs2, 15);
}

View File

@ -131,7 +131,7 @@ The primary goal of experimental support is to assist in the process of ratifica
``experimental-zawrs``
LLVM implements the `1.0-rc3 draft specification <https://github.com/riscv/riscv-zawrs/releases/download/V1.0-rc3/Zawrs.pdf>`_. Note that have been backwards incompatible changes made between release candidates for the 1.0 draft.
``experimental-zbe``, ``experimental-zbf``, ``experimental-zbm``, ``experimental-zbp``, ``experimental-zbr``, ``experimental-zbt``
``experimental-zbe``, ``experimental-zbf``, ``experimental-zbm``, ``experimental-zbp``, ``experimental-zbr``
LLVM implements the `latest state of the bitmanip working branch <https://github.com/riscv/riscv-bitmanip/tree/main-history>`_, which is largely similar to the 0.93 draft specification but with some instruction naming changes. These are individual portions of the bitmanip efforts which did *not* get ratified. Given ratification for these sub-extensions appears stalled; they are a likely candidate for removal in the future.
``experimental-zca``

View File

@ -112,6 +112,8 @@ Changes to the PowerPC Backend
Changes to the RISC-V Backend
-----------------------------
* Support the unratified Zbt extension has been removed.
Changes to the WebAssembly Backend
----------------------------------

View File

@ -129,10 +129,6 @@ let TargetPrefix = "riscv" in {
def int_riscv_crc32c_w : BitManipGPRIntrinsics;
def int_riscv_crc32c_d : BitManipGPRIntrinsics;
// Zbt
def int_riscv_fsl : BitManipGPRGPRGRIntrinsics;
def int_riscv_fsr : BitManipGPRGPRGRIntrinsics;
// Zbkb
def int_riscv_brev8 : BitManipGPRIntrinsics;
def int_riscv_zip : BitManipGPRIntrinsics;

View File

@ -198,13 +198,6 @@ def HasStdExtZbs : Predicate<"Subtarget->hasStdExtZbs()">,
AssemblerPredicate<(all_of FeatureStdExtZbs),
"'Zbs' (Single-Bit Instructions)">;
def FeatureStdExtZbt
: SubtargetFeature<"experimental-zbt", "HasStdExtZbt", "true",
"'Zbt' (Ternary 'Zb' Instructions)">;
def HasStdExtZbt : Predicate<"Subtarget->hasStdExtZbt()">,
AssemblerPredicate<(all_of FeatureStdExtZbt),
"'Zbt' (Ternary 'Zb' Instructions)">;
// Some instructions belong to both the basic and the permutation
// subextensions. They should be enabled if either has been specified.
def HasStdExtZbbOrZbp

View File

@ -301,15 +301,7 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
setOperationAction(ISD::ABS, MVT::i32, Custom);
}
if (Subtarget.hasStdExtZbt()) {
setOperationAction({ISD::FSHL, ISD::FSHR}, XLenVT, Custom);
setOperationAction(ISD::SELECT, XLenVT, Legal);
if (Subtarget.is64Bit())
setOperationAction({ISD::FSHL, ISD::FSHR}, MVT::i32, Custom);
} else {
setOperationAction(ISD::SELECT, XLenVT, Custom);
}
setOperationAction(ISD::SELECT, XLenVT, Custom);
static const unsigned FPLegalNodeTypes[] = {
ISD::FMINNUM, ISD::FMAXNUM, ISD::LRINT,
@ -3398,31 +3390,6 @@ SDValue RISCVTargetLowering::LowerOperation(SDValue Op,
return DAG.getNode(RISCVISD::GREV, DL, VT, BSwap,
DAG.getConstant(7, DL, VT));
}
case ISD::FSHL:
case ISD::FSHR: {
MVT VT = Op.getSimpleValueType();
assert(VT == Subtarget.getXLenVT() && "Unexpected custom legalization");
SDLoc DL(Op);
// FSL/FSR take a log2(XLen)+1 bit shift amount but XLenVT FSHL/FSHR only
// use log(XLen) bits. Mask the shift amount accordingly to prevent
// accidentally setting the extra bit.
unsigned ShAmtWidth = Subtarget.getXLen() - 1;
SDValue ShAmt = DAG.getNode(ISD::AND, DL, VT, Op.getOperand(2),
DAG.getConstant(ShAmtWidth, DL, VT));
// fshl and fshr concatenate their operands in the same order. fsr and fsl
// instruction use different orders. fshl will return its first operand for
// shift of zero, fshr will return its second operand. fsl and fsr both
// return rs1 so the ISD nodes need to have different operand orders.
// Shift amount is in rs2.
SDValue Op0 = Op.getOperand(0);
SDValue Op1 = Op.getOperand(1);
unsigned Opc = RISCVISD::FSL;
if (Op.getOpcode() == ISD::FSHR) {
std::swap(Op0, Op1);
Opc = RISCVISD::FSR;
}
return DAG.getNode(Opc, DL, VT, Op0, Op1, ShAmt);
}
case ISD::TRUNCATE:
// Only custom-lower vector truncates
if (!Op.getSimpleValueType().isVector())
@ -5150,12 +5117,6 @@ SDValue RISCVTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
case Intrinsic::riscv_bfp:
return DAG.getNode(RISCVISD::BFP, DL, XLenVT, Op.getOperand(1),
Op.getOperand(2));
case Intrinsic::riscv_fsl:
return DAG.getNode(RISCVISD::FSL, DL, XLenVT, Op.getOperand(1),
Op.getOperand(2), Op.getOperand(3));
case Intrinsic::riscv_fsr:
return DAG.getNode(RISCVISD::FSR, DL, XLenVT, Op.getOperand(1),
Op.getOperand(2), Op.getOperand(3));
case Intrinsic::riscv_vmv_x_s:
assert(Op.getValueType() == XLenVT && "Unexpected VT!");
return DAG.getNode(RISCVISD::VMV_X_S, DL, Op.getValueType(),
@ -7115,10 +7076,6 @@ static RISCVISD::NodeType getRISCVWOpcodeByIntr(unsigned IntNo) {
return RISCVISD::BDECOMPRESSW;
case Intrinsic::riscv_bfp:
return RISCVISD::BFPW;
case Intrinsic::riscv_fsl:
return RISCVISD::FSLW;
case Intrinsic::riscv_fsr:
return RISCVISD::FSRW;
}
}
@ -7554,34 +7511,6 @@ void RISCVTargetLowering::ReplaceNodeResults(SDNode *N,
Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, VT, GREVI));
break;
}
case ISD::FSHL:
case ISD::FSHR: {
assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
Subtarget.hasStdExtZbt() && "Unexpected custom legalisation");
SDValue NewOp0 =
DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0));
SDValue NewOp1 =
DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
SDValue NewShAmt =
DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(2));
// FSLW/FSRW take a 6 bit shift amount but i32 FSHL/FSHR only use 5 bits.
// Mask the shift amount to 5 bits to prevent accidentally setting bit 5.
NewShAmt = DAG.getNode(ISD::AND, DL, MVT::i64, NewShAmt,
DAG.getConstant(0x1f, DL, MVT::i64));
// fshl and fshr concatenate their operands in the same order. fsrw and fslw
// instruction use different orders. fshl will return its first operand for
// shift of zero, fshr will return its second operand. fsl and fsr both
// return rs1 so the ISD nodes need to have different operand orders.
// Shift amount is in rs2.
unsigned Opc = RISCVISD::FSLW;
if (N->getOpcode() == ISD::FSHR) {
std::swap(NewOp0, NewOp1);
Opc = RISCVISD::FSRW;
}
SDValue NewOp = DAG.getNode(Opc, DL, MVT::i64, NewOp0, NewOp1, NewShAmt);
Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, NewOp));
break;
}
case ISD::EXTRACT_VECTOR_ELT: {
// Custom-legalize an EXTRACT_VECTOR_ELT where XLEN<SEW, as the SEW element
// type is illegal (currently only vXi64 RV32).
@ -7671,9 +7600,7 @@ void RISCVTargetLowering::ReplaceNodeResults(SDNode *N,
}
case Intrinsic::riscv_bcompress:
case Intrinsic::riscv_bdecompress:
case Intrinsic::riscv_bfp:
case Intrinsic::riscv_fsl:
case Intrinsic::riscv_fsr: {
case Intrinsic::riscv_bfp: {
assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
"Unexpected custom legalisation");
Results.push_back(customLegalizeToWOpByIntr(N, DAG, IntNo));
@ -9524,21 +9451,6 @@ SDValue RISCVTargetLowering::PerformDAGCombine(SDNode *N,
break;
}
case RISCVISD::FSR:
case RISCVISD::FSL:
case RISCVISD::FSRW:
case RISCVISD::FSLW: {
bool IsWInstruction =
N->getOpcode() == RISCVISD::FSRW || N->getOpcode() == RISCVISD::FSLW;
unsigned BitWidth =
IsWInstruction ? 32 : N->getSimpleValueType(0).getSizeInBits();
assert(isPowerOf2_32(BitWidth) && "Unexpected bit width");
// Only the lower log2(Bitwidth)+1 bits of the the shift amount are read.
if (SimplifyDemandedLowBitsHelper(1, Log2_32(BitWidth) + 1))
return SDValue(N, 0);
break;
}
case RISCVISD::FMV_X_ANYEXTH:
case RISCVISD::FMV_X_ANYEXTW_RV64: {
SDLoc DL(N);
@ -10232,8 +10144,6 @@ unsigned RISCVTargetLowering::ComputeNumSignBitsForTargetNode(
case RISCVISD::RORW:
case RISCVISD::GREVW:
case RISCVISD::GORCW:
case RISCVISD::FSLW:
case RISCVISD::FSRW:
case RISCVISD::SHFLW:
case RISCVISD::UNSHFLW:
case RISCVISD::BCOMPRESSW:
@ -12290,10 +12200,6 @@ const char *RISCVTargetLowering::getTargetNodeName(unsigned Opcode) const {
NODE_NAME_CASE(RORW)
NODE_NAME_CASE(CLZW)
NODE_NAME_CASE(CTZW)
NODE_NAME_CASE(FSLW)
NODE_NAME_CASE(FSRW)
NODE_NAME_CASE(FSL)
NODE_NAME_CASE(FSR)
NODE_NAME_CASE(FMV_H_X)
NODE_NAME_CASE(FMV_X_ANYEXTH)
NODE_NAME_CASE(FMV_X_SIGNEXTH)
@ -13158,60 +13064,6 @@ SDValue RISCVTargetLowering::joinRegisterPartsIntoValue(
return SDValue();
}
SDValue
RISCVTargetLowering::BuildSDIVPow2(SDNode *N, const APInt &Divisor,
SelectionDAG &DAG,
SmallVectorImpl<SDNode *> &Created) const {
AttributeList Attr = DAG.getMachineFunction().getFunction().getAttributes();
if (isIntDivCheap(N->getValueType(0), Attr))
return SDValue(N, 0); // Lower SDIV as SDIV
assert((Divisor.isPowerOf2() || Divisor.isNegatedPowerOf2()) &&
"Unexpected divisor!");
// Conditional move is needed, so do the transformation iff Zbt is enabled.
if (!Subtarget.hasStdExtZbt())
return SDValue();
// When |Divisor| >= 2 ^ 12, it isn't profitable to do such transformation.
// Besides, more critical path instructions will be generated when dividing
// by 2. So we keep using the original DAGs for these cases.
unsigned Lg2 = Divisor.countTrailingZeros();
if (Lg2 == 1 || Lg2 >= 12)
return SDValue();
// fold (sdiv X, pow2)
EVT VT = N->getValueType(0);
if (VT != MVT::i32 && !(Subtarget.is64Bit() && VT == MVT::i64))
return SDValue();
SDLoc DL(N);
SDValue N0 = N->getOperand(0);
SDValue Zero = DAG.getConstant(0, DL, VT);
SDValue Pow2MinusOne = DAG.getConstant((1ULL << Lg2) - 1, DL, VT);
// Add (N0 < 0) ? Pow2 - 1 : 0;
SDValue Cmp = DAG.getSetCC(DL, VT, N0, Zero, ISD::SETLT);
SDValue Add = DAG.getNode(ISD::ADD, DL, VT, N0, Pow2MinusOne);
SDValue Sel = DAG.getNode(ISD::SELECT, DL, VT, Cmp, Add, N0);
Created.push_back(Cmp.getNode());
Created.push_back(Add.getNode());
Created.push_back(Sel.getNode());
// Divide by pow2.
SDValue SRA =
DAG.getNode(ISD::SRA, DL, VT, Sel, DAG.getConstant(Lg2, DL, VT));
// If we're dividing by a positive value, we're done. Otherwise, we must
// negate the result.
if (Divisor.isNonNegative())
return SRA;
Created.push_back(SRA.getNode());
return DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT), SRA);
}
bool RISCVTargetLowering::isIntDivCheap(EVT VT, AttributeList Attr) const {
// When aggressively optimizing for code size, we prefer to use a div
// instruction, as it is usually smaller than the alternative sequence.

View File

@ -77,14 +77,6 @@ enum NodeType : unsigned {
// named RISC-V instructions.
CLZW,
CTZW,
// RV64IB/RV32IB funnel shifts, with the semantics of the named RISC-V
// instructions. Operand order is rs1, rs3, rs2/shamt.
FSR,
FSL,
// RV64IB funnel shifts, with the semantics of the named RISC-V instructions.
// Operand order is rs1, rs3, rs2/shamt.
FSRW,
FSLW,
// FPR<->GPR transfer operations when the FPR is smaller than XLEN, needed as
// XLEN is the only legal integer width.
//
@ -601,9 +593,6 @@ public:
bool shouldConvertFpToSat(unsigned Op, EVT FPVT, EVT VT) const override;
SDValue BuildSDIVPow2(SDNode *N, const APInt &Divisor, SelectionDAG &DAG,
SmallVectorImpl<SDNode *> &Created) const override;
unsigned getJumpTableEncoding() const override;
const MCExpr *LowerCustomJumpTableEntry(const MachineJumpTableInfo *MJTI,

View File

@ -17,7 +17,6 @@
// Zbm - 0.93 *experimental
// Zbp - 0.93 *experimental
// Zbr - 0.93 *experimental
// Zbt - 0.93 *experimental
//
// The experimental extensions appeared in an earlier draft of the Bitmanip
// extensions. They are not ratified and subject to change.
@ -39,10 +38,6 @@ def riscv_clzw : SDNode<"RISCVISD::CLZW", SDT_RISCVIntUnaryOpW>;
def riscv_ctzw : SDNode<"RISCVISD::CTZW", SDT_RISCVIntUnaryOpW>;
def riscv_rolw : SDNode<"RISCVISD::ROLW", SDT_RISCVIntBinOpW>;
def riscv_rorw : SDNode<"RISCVISD::RORW", SDT_RISCVIntBinOpW>;
def riscv_fslw : SDNode<"RISCVISD::FSLW", SDT_RISCVIntShiftDOpW>;
def riscv_fsrw : SDNode<"RISCVISD::FSRW", SDT_RISCVIntShiftDOpW>;
def riscv_fsl : SDNode<"RISCVISD::FSL", SDTIntShiftDOp>;
def riscv_fsr : SDNode<"RISCVISD::FSR", SDTIntShiftDOp>;
def riscv_grev : SDNode<"RISCVISD::GREV", SDTIntBinOp>;
def riscv_grevw : SDNode<"RISCVISD::GREVW", SDT_RISCVIntBinOpW>;
def riscv_gorc : SDNode<"RISCVISD::GORC", SDTIntBinOp>;
@ -315,46 +310,6 @@ class RVBTernaryR<bits<2> funct2, bits<3> funct3, RISCVOpcode opcode,
: RVInstR4<funct2, funct3, opcode, (outs GPR:$rd),
(ins GPR:$rs1, GPR:$rs2, GPR:$rs3), opcodestr, argstr>;
// Currently used by FSRI only
let hasSideEffects = 0, mayLoad = 0, mayStore = 0 in
class RVBTernaryImm6<bits<3> funct3, RISCVOpcode opcode,
string opcodestr, string argstr>
: RVInst<(outs GPR:$rd), (ins GPR:$rs1, GPR:$rs3, uimmlog2xlen:$shamt),
opcodestr, argstr, [], InstFormatR4> {
bits<5> rs3;
bits<6> shamt;
bits<5> rs1;
bits<5> rd;
let Inst{31-27} = rs3;
let Inst{26} = 1;
let Inst{25-20} = shamt;
let Inst{19-15} = rs1;
let Inst{14-12} = funct3;
let Inst{11-7} = rd;
let Opcode = opcode.Value;
}
// Currently used by FSRIW only
let hasSideEffects = 0, mayLoad = 0, mayStore = 0 in
class RVBTernaryImm5<bits<2> funct2, bits<3> funct3, RISCVOpcode opcode,
string opcodestr, string argstr>
: RVInst<(outs GPR:$rd), (ins GPR:$rs1, GPR:$rs3, uimm5:$shamt),
opcodestr, argstr, [], InstFormatR4> {
bits<5> rs3;
bits<5> shamt;
bits<5> rs1;
bits<5> rd;
let Inst{31-27} = rs3;
let Inst{26-25} = funct2;
let Inst{24-20} = shamt;
let Inst{19-15} = rs1;
let Inst{14-12} = funct3;
let Inst{11-7} = rd;
let Opcode = opcode.Value;
}
//===----------------------------------------------------------------------===//
// Instructions
//===----------------------------------------------------------------------===//
@ -483,32 +438,6 @@ def XPERM4 : ALU_rr<0b0010100, 0b010, "xperm4">, Sched<[]>;
def XPERM8 : ALU_rr<0b0010100, 0b100, "xperm8">, Sched<[]>;
} // Predicates = [HasStdExtZbpOrZbkx]
let Predicates = [HasStdExtZbt] in {
def CMIX : RVBTernaryR<0b11, 0b001, OPC_OP, "cmix", "$rd, $rs2, $rs1, $rs3">,
Sched<[WriteCMix, ReadCMix, ReadCMix, ReadCMix]>;
def CMOV : RVBTernaryR<0b11, 0b101, OPC_OP, "cmov", "$rd, $rs2, $rs1, $rs3">,
Sched<[WriteCMov, ReadCMov, ReadCMov, ReadCMov]>;
def FSL : RVBTernaryR<0b10, 0b001, OPC_OP, "fsl", "$rd, $rs1, $rs3, $rs2">,
Sched<[WriteFSReg, ReadFSReg, ReadFSReg, ReadFSReg]>;
def FSR : RVBTernaryR<0b10, 0b101, OPC_OP, "fsr", "$rd, $rs1, $rs3, $rs2">,
Sched<[WriteFSReg, ReadFSReg, ReadFSReg, ReadFSReg]>;
def FSRI : RVBTernaryImm6<0b101, OPC_OP_IMM, "fsri",
"$rd, $rs1, $rs3, $shamt">,
Sched<[WriteFSRImm, ReadFSRImm, ReadFSRImm]>;
} // Predicates = [HasStdExtZbt]
let Predicates = [HasStdExtZbt, IsRV64] in {
def FSLW : RVBTernaryR<0b10, 0b001, OPC_OP_32,
"fslw", "$rd, $rs1, $rs3, $rs2">,
Sched<[WriteFSReg32, ReadFSReg32, ReadFSReg32, ReadFSReg32]>;
def FSRW : RVBTernaryR<0b10, 0b101, OPC_OP_32, "fsrw",
"$rd, $rs1, $rs3, $rs2">,
Sched<[WriteFSReg32, ReadFSReg32, ReadFSReg32, ReadFSReg32]>;
def FSRIW : RVBTernaryImm5<0b10, 0b101, OPC_OP_IMM_32,
"fsriw", "$rd, $rs1, $rs3, $shamt">,
Sched<[WriteFSRImm32, ReadFSRImm32, ReadFSRImm32]>;
} // Predicates = [HasStdExtZbt, IsRV64]
let Predicates = [HasStdExtZbb] in {
def CLZ : RVBUnary<0b0110000, 0b00000, 0b001, OPC_OP_IMM, "clz">,
Sched<[WriteCLZ, ReadCLZ]>;
@ -981,55 +910,6 @@ let Predicates = [HasStdExtZbp, IsRV64] in {
def : Pat<(i64 (riscv_grev GPR:$rs1, 56)), (REV8_RV64 GPR:$rs1)>;
} // Predicates = [HasStdExtZbp, IsRV64]
let Predicates = [HasStdExtZbt] in {
def : Pat<(or (and (not GPR:$rs2), GPR:$rs3), (and GPR:$rs2, GPR:$rs1)),
(CMIX GPR:$rs1, GPR:$rs2, GPR:$rs3)>;
def : Pat<(xor (and (xor GPR:$rs1, GPR:$rs3), GPR:$rs2), GPR:$rs3),
(CMIX GPR:$rs1, GPR:$rs2, GPR:$rs3)>;
def : Pat<(select (XLenVT (setne GPR:$rs2, 0)), GPR:$rs1, GPR:$rs3),
(CMOV GPR:$rs1, GPR:$rs2, GPR:$rs3)>;
def : Pat<(select (XLenVT (seteq GPR:$rs2, 0)), GPR:$rs3, GPR:$rs1),
(CMOV GPR:$rs1, GPR:$rs2, GPR:$rs3)>;
def : Pat<(select (XLenVT (setne GPR:$x, simm12_plus1:$y)), GPR:$rs1, GPR:$rs3),
(CMOV GPR:$rs1, (ADDI GPR:$x, (NegImm simm12_plus1:$y)), GPR:$rs3)>;
def : Pat<(select (XLenVT (seteq GPR:$x, simm12_plus1:$y)), GPR:$rs3, GPR:$rs1),
(CMOV GPR:$rs1, (ADDI GPR:$x, (NegImm simm12_plus1:$y)), GPR:$rs3)>;
def : Pat<(select (XLenVT (setne GPR:$x, GPR:$y)), GPR:$rs1, GPR:$rs3),
(CMOV GPR:$rs1, (XOR GPR:$x, GPR:$y), GPR:$rs3)>;
def : Pat<(select (XLenVT (seteq GPR:$x, GPR:$y)), GPR:$rs3, GPR:$rs1),
(CMOV GPR:$rs1, (XOR GPR:$x, GPR:$y), GPR:$rs3)>;
def : Pat<(select GPR:$rs2, GPR:$rs1, GPR:$rs3),
(CMOV GPR:$rs1, GPR:$rs2, GPR:$rs3)>;
} // Predicates = [HasStdExtZbt]
let Predicates = [HasStdExtZbt] in {
def : Pat<(riscv_fsl GPR:$rs1, GPR:$rs3, GPR:$rs2),
(FSL GPR:$rs1, GPR:$rs2, GPR:$rs3)>;
def : Pat<(riscv_fsr GPR:$rs1, GPR:$rs3, GPR:$rs2),
(FSR GPR:$rs1, GPR:$rs2, GPR:$rs3)>;
def : Pat<(riscv_fsr GPR:$rs1, GPR:$rs3, uimmlog2xlen:$shamt),
(FSRI GPR:$rs1, GPR:$rs3, uimmlog2xlen:$shamt)>;
// We can use FSRI for FSL by immediate if we subtract the immediate from
// XLen and swap the operands.
def : Pat<(riscv_fsl GPR:$rs3, GPR:$rs1, uimmlog2xlen:$shamt),
(FSRI GPR:$rs1, GPR:$rs3, (ImmSubFromXLen uimmlog2xlen:$shamt))>;
} // Predicates = [HasStdExtZbt]
let Predicates = [HasStdExtZbt, IsRV64] in {
def : Pat<(riscv_fslw GPR:$rs1, GPR:$rs3, GPR:$rs2),
(FSLW GPR:$rs1, GPR:$rs2, GPR:$rs3)>;
def : Pat<(riscv_fsrw GPR:$rs1, GPR:$rs3, GPR:$rs2),
(FSRW GPR:$rs1, GPR:$rs2, GPR:$rs3)>;
def : Pat<(riscv_fsrw GPR:$rs1, GPR:$rs3, uimm5:$shamt),
(FSRIW GPR:$rs1, GPR:$rs3, uimm5:$shamt)>;
// We can use FSRIW for FSLW by immediate if we subtract the immediate from
// 32 and swap the operands.
def : Pat<(riscv_fslw GPR:$rs3, GPR:$rs1, uimm5:$shamt),
(FSRIW GPR:$rs1, GPR:$rs3, (ImmSubFrom32 uimm5:$shamt))>;
} // Predicates = [HasStdExtZbt, IsRV64]
let Predicates = [HasStdExtZbb] in {
def : PatGpr<ctlz, CLZ>;
def : PatGpr<cttz, CTZ>;

View File

@ -248,6 +248,5 @@ defm : UnsupportedSchedZbf;
defm : UnsupportedSchedZbm;
defm : UnsupportedSchedZbp;
defm : UnsupportedSchedZbr;
defm : UnsupportedSchedZbt;
defm : UnsupportedSchedZfh;
}

View File

@ -235,6 +235,5 @@ defm : UnsupportedSchedZbf;
defm : UnsupportedSchedZbm;
defm : UnsupportedSchedZbp;
defm : UnsupportedSchedZbr;
defm : UnsupportedSchedZbt;
defm : UnsupportedSchedZfh;
}

View File

@ -78,14 +78,6 @@ def WriteCRCCH : SchedWrite; // crc32c.h
def WriteCRCCW : SchedWrite; // crc32c.w
def WriteCRCCD : SchedWrite; // crc32c.d
// Zbt extension
def WriteCMix : SchedWrite; // cmix
def WriteCMov : SchedWrite; // cmov
def WriteFSReg : SchedWrite; // fsl/fsr
def WriteFSRImm : SchedWrite; // fsri
def WriteFSReg32 : SchedWrite; // fslw/fsrw
def WriteFSRImm32 : SchedWrite; // fsriw
/// Define scheduler resources associated with use operands.
// Zba extension
@ -158,14 +150,6 @@ def ReadCRCCH : SchedRead; // crc32c.h
def ReadCRCCW : SchedRead; // crc32c.w
def ReadCRCCD : SchedRead; // crc32c.d
// Zbt extension
def ReadCMix : SchedRead; // cmix
def ReadCMov : SchedRead; // cmov
def ReadFSReg : SchedRead; // fsl/fsr
def ReadFSRImm : SchedRead; // fsri
def ReadFSReg32 : SchedRead; // fslw/fsrw
def ReadFSRImm32 : SchedRead; // fsriw
/// Define default scheduler resources for B.
multiclass UnsupportedSchedZba {
@ -325,21 +309,3 @@ def : ReadAdvance<ReadCRCCW, 0>;
def : ReadAdvance<ReadCRCCD, 0>;
}
}
multiclass UnsupportedSchedZbt {
let Unsupported = true in {
def : WriteRes<WriteCMix, []>;
def : WriteRes<WriteCMov, []>;
def : WriteRes<WriteFSReg, []>;
def : WriteRes<WriteFSRImm, []>;
def : WriteRes<WriteFSReg32, []>;
def : WriteRes<WriteFSRImm32, []>;
def : ReadAdvance<ReadCMix, 0>;
def : ReadAdvance<ReadCMov, 0>;
def : ReadAdvance<ReadFSReg, 0>;
def : ReadAdvance<ReadFSRImm, 0>;
def : ReadAdvance<ReadFSReg32, 0>;
def : ReadAdvance<ReadFSRImm32, 0>;
}
}

View File

@ -60,7 +60,6 @@ private:
bool HasStdExtZbp = false;
bool HasStdExtZbr = false;
bool HasStdExtZbs = false;
bool HasStdExtZbt = false;
bool HasStdExtZca = false;
bool HasStdExtV = false;
bool HasStdExtZve32x = false;
@ -171,7 +170,6 @@ public:
bool hasStdExtZbp() const { return HasStdExtZbp; }
bool hasStdExtZbr() const { return HasStdExtZbr; }
bool hasStdExtZbs() const { return HasStdExtZbs; }
bool hasStdExtZbt() const { return HasStdExtZbt; }
bool hasStdExtZca() const { return HasStdExtZca; }
bool hasStdExtZvl() const { return ZvlLen != 0; }
bool hasStdExtZvfh() const { return HasStdExtZvfh; }

View File

@ -20,7 +20,6 @@
; RUN: llc -mtriple=riscv32 -mattr=+experimental-zbp %s -o - | FileCheck --check-prefix=RV32ZBP %s
; RUN: llc -mtriple=riscv32 -mattr=+experimental-zbr %s -o - | FileCheck --check-prefix=RV32ZBR %s
; RUN: llc -mtriple=riscv32 -mattr=+zbs %s -o - | FileCheck --check-prefix=RV32ZBS %s
; RUN: llc -mtriple=riscv32 -mattr=+experimental-zbt %s -o - | FileCheck --check-prefix=RV32ZBT %s
; RUN: llc -mtriple=riscv32 -mattr=+v %s -o - | FileCheck --check-prefix=RV32V %s
; RUN: llc -mtriple=riscv32 -mattr=+zbb,+zfh,+v,+f %s -o - | FileCheck --check-prefix=RV32COMBINED %s
; RUN: llc -mtriple=riscv32 -mattr=+zbkb %s -o - | FileCheck --check-prefix=RV32ZBKB %s
@ -62,7 +61,6 @@
; RUN: llc -mtriple=riscv64 -mattr=+experimental-zbp %s -o - | FileCheck --check-prefix=RV64ZBP %s
; RUN: llc -mtriple=riscv64 -mattr=+experimental-zbr %s -o - | FileCheck --check-prefix=RV64ZBR %s
; RUN: llc -mtriple=riscv64 -mattr=+zbs %s -o - | FileCheck --check-prefix=RV64ZBS %s
; RUN: llc -mtriple=riscv64 -mattr=+experimental-zbt %s -o - | FileCheck --check-prefix=RV64ZBT %s
; RUN: llc -mtriple=riscv64 -mattr=+v %s -o - | FileCheck --check-prefix=RV64V %s
; RUN: llc -mtriple=riscv64 -mattr=+zbb,+zfh,+v,+f %s -o - | FileCheck --check-prefix=RV64COMBINED %s
; RUN: llc -mtriple=riscv64 -mattr=+zbkb %s -o - | FileCheck --check-prefix=RV64ZBKB %s
@ -107,7 +105,6 @@
; RV32ZBP: .attribute 5, "rv32i2p0_zbp0p93"
; RV32ZBR: .attribute 5, "rv32i2p0_zbr0p93"
; RV32ZBS: .attribute 5, "rv32i2p0_zbs1p0"
; RV32ZBT: .attribute 5, "rv32i2p0_zbt0p93"
; RV32V: .attribute 5, "rv32i2p0_f2p0_d2p0_v1p0_zve32f1p0_zve32x1p0_zve64d1p0_zve64f1p0_zve64x1p0_zvl128b1p0_zvl32b1p0_zvl64b1p0"
; RV32COMBINED: .attribute 5, "rv32i2p0_f2p0_d2p0_v1p0_zfh1p0_zbb1p0_zve32f1p0_zve32x1p0_zve64d1p0_zve64f1p0_zve64x1p0_zvl128b1p0_zvl32b1p0_zvl64b1p0"
; RV32ZBKB: .attribute 5, "rv32i2p0_zbkb1p0"
@ -150,7 +147,6 @@
; RV64ZBP: .attribute 5, "rv64i2p0_zbp0p93"
; RV64ZBR: .attribute 5, "rv64i2p0_zbr0p93"
; RV64ZBS: .attribute 5, "rv64i2p0_zbs1p0"
; RV64ZBT: .attribute 5, "rv64i2p0_zbt0p93"
; RV64V: .attribute 5, "rv64i2p0_f2p0_d2p0_v1p0_zve32f1p0_zve32x1p0_zve64d1p0_zve64f1p0_zve64x1p0_zvl128b1p0_zvl32b1p0_zvl64b1p0"
; RV64COMBINED: .attribute 5, "rv64i2p0_f2p0_d2p0_v1p0_zfh1p0_zbb1p0_zve32f1p0_zve32x1p0_zve64d1p0_zve64f1p0_zve64x1p0_zvl128b1p0_zvl32b1p0_zvl64b1p0"
; RV64ZBKB: .attribute 5, "rv64i2p0_zbkb1p0"

View File

@ -1,48 +1,44 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=riscv32 -verify-machineinstrs < %s \
; RUN: | FileCheck %s -check-prefixes=RV32,RV32I
; RUN: llc -mtriple=riscv32 -mattr=+experimental-zbt -verify-machineinstrs < %s \
; RUN: | FileCheck %s -check-prefixes=RV32,RV32ZBT
; RUN: | FileCheck %s -check-prefixes=RV32I
; RUN: llc -mtriple=riscv64 -verify-machineinstrs < %s \
; RUN: | FileCheck %s -check-prefixes=RV64,RV64I
; RUN: llc -mtriple=riscv64 -mattr=+experimental-zbt -verify-machineinstrs < %s \
; RUN: | FileCheck %s -check-prefixes=RV64,RV64ZBT
; RUN: | FileCheck %s -check-prefixes=RV64I
define i32 @sdiv32_pow2_2(i32 %a) {
; RV32-LABEL: sdiv32_pow2_2:
; RV32: # %bb.0: # %entry
; RV32-NEXT: srli a1, a0, 31
; RV32-NEXT: add a0, a0, a1
; RV32-NEXT: srai a0, a0, 1
; RV32-NEXT: ret
; RV32I-LABEL: sdiv32_pow2_2:
; RV32I: # %bb.0: # %entry
; RV32I-NEXT: srli a1, a0, 31
; RV32I-NEXT: add a0, a0, a1
; RV32I-NEXT: srai a0, a0, 1
; RV32I-NEXT: ret
;
; RV64-LABEL: sdiv32_pow2_2:
; RV64: # %bb.0: # %entry
; RV64-NEXT: srliw a1, a0, 31
; RV64-NEXT: addw a0, a0, a1
; RV64-NEXT: sraiw a0, a0, 1
; RV64-NEXT: ret
; RV64I-LABEL: sdiv32_pow2_2:
; RV64I: # %bb.0: # %entry
; RV64I-NEXT: srliw a1, a0, 31
; RV64I-NEXT: addw a0, a0, a1
; RV64I-NEXT: sraiw a0, a0, 1
; RV64I-NEXT: ret
entry:
%div = sdiv i32 %a, 2
ret i32 %div
}
define i32 @sdiv32_pow2_negative_2(i32 %a) {
; RV32-LABEL: sdiv32_pow2_negative_2:
; RV32: # %bb.0: # %entry
; RV32-NEXT: srli a1, a0, 31
; RV32-NEXT: add a0, a0, a1
; RV32-NEXT: srai a0, a0, 1
; RV32-NEXT: neg a0, a0
; RV32-NEXT: ret
; RV32I-LABEL: sdiv32_pow2_negative_2:
; RV32I: # %bb.0: # %entry
; RV32I-NEXT: srli a1, a0, 31
; RV32I-NEXT: add a0, a0, a1
; RV32I-NEXT: srai a0, a0, 1
; RV32I-NEXT: neg a0, a0
; RV32I-NEXT: ret
;
; RV64-LABEL: sdiv32_pow2_negative_2:
; RV64: # %bb.0: # %entry
; RV64-NEXT: srliw a1, a0, 31
; RV64-NEXT: addw a0, a0, a1
; RV64-NEXT: sraiw a0, a0, 1
; RV64-NEXT: neg a0, a0
; RV64-NEXT: ret
; RV64I-LABEL: sdiv32_pow2_negative_2:
; RV64I: # %bb.0: # %entry
; RV64I-NEXT: srliw a1, a0, 31
; RV64I-NEXT: addw a0, a0, a1
; RV64I-NEXT: sraiw a0, a0, 1
; RV64I-NEXT: neg a0, a0
; RV64I-NEXT: ret
entry:
%div = sdiv i32 %a, -2
ret i32 %div
@ -57,14 +53,6 @@ define i32 @sdiv32_pow2_2048(i32 %a) {
; RV32I-NEXT: srai a0, a0, 11
; RV32I-NEXT: ret
;
; RV32ZBT-LABEL: sdiv32_pow2_2048:
; RV32ZBT: # %bb.0: # %entry
; RV32ZBT-NEXT: slti a1, a0, 0
; RV32ZBT-NEXT: addi a2, a0, 2047
; RV32ZBT-NEXT: cmov a0, a1, a2, a0
; RV32ZBT-NEXT: srai a0, a0, 11
; RV32ZBT-NEXT: ret
;
; RV64I-LABEL: sdiv32_pow2_2048:
; RV64I: # %bb.0: # %entry
; RV64I-NEXT: sraiw a1, a0, 31
@ -72,15 +60,6 @@ define i32 @sdiv32_pow2_2048(i32 %a) {
; RV64I-NEXT: addw a0, a0, a1
; RV64I-NEXT: sraiw a0, a0, 11
; RV64I-NEXT: ret
;
; RV64ZBT-LABEL: sdiv32_pow2_2048:
; RV64ZBT: # %bb.0: # %entry
; RV64ZBT-NEXT: sext.w a1, a0
; RV64ZBT-NEXT: addi a2, a0, 2047
; RV64ZBT-NEXT: slti a1, a1, 0
; RV64ZBT-NEXT: cmov a0, a1, a2, a0
; RV64ZBT-NEXT: sraiw a0, a0, 11
; RV64ZBT-NEXT: ret
entry:
%div = sdiv i32 %a, 2048
ret i32 %div
@ -96,15 +75,6 @@ define i32 @sdiv32_pow2_negative_2048(i32 %a) {
; RV32I-NEXT: neg a0, a0
; RV32I-NEXT: ret
;
; RV32ZBT-LABEL: sdiv32_pow2_negative_2048:
; RV32ZBT: # %bb.0: # %entry
; RV32ZBT-NEXT: slti a1, a0, 0
; RV32ZBT-NEXT: addi a2, a0, 2047
; RV32ZBT-NEXT: cmov a0, a1, a2, a0
; RV32ZBT-NEXT: srai a0, a0, 11
; RV32ZBT-NEXT: neg a0, a0
; RV32ZBT-NEXT: ret
;
; RV64I-LABEL: sdiv32_pow2_negative_2048:
; RV64I: # %bb.0: # %entry
; RV64I-NEXT: sraiw a1, a0, 31
@ -113,104 +83,94 @@ define i32 @sdiv32_pow2_negative_2048(i32 %a) {
; RV64I-NEXT: sraiw a0, a0, 11
; RV64I-NEXT: neg a0, a0
; RV64I-NEXT: ret
;
; RV64ZBT-LABEL: sdiv32_pow2_negative_2048:
; RV64ZBT: # %bb.0: # %entry
; RV64ZBT-NEXT: sext.w a1, a0
; RV64ZBT-NEXT: addi a2, a0, 2047
; RV64ZBT-NEXT: slti a1, a1, 0
; RV64ZBT-NEXT: cmov a0, a1, a2, a0
; RV64ZBT-NEXT: sraiw a0, a0, 11
; RV64ZBT-NEXT: neg a0, a0
; RV64ZBT-NEXT: ret
entry:
%div = sdiv i32 %a, -2048
ret i32 %div
}
define i32 @sdiv32_pow2_4096(i32 %a) {
; RV32-LABEL: sdiv32_pow2_4096:
; RV32: # %bb.0: # %entry
; RV32-NEXT: srai a1, a0, 31
; RV32-NEXT: srli a1, a1, 20
; RV32-NEXT: add a0, a0, a1
; RV32-NEXT: srai a0, a0, 12
; RV32-NEXT: ret
; RV32I-LABEL: sdiv32_pow2_4096:
; RV32I: # %bb.0: # %entry
; RV32I-NEXT: srai a1, a0, 31
; RV32I-NEXT: srli a1, a1, 20
; RV32I-NEXT: add a0, a0, a1
; RV32I-NEXT: srai a0, a0, 12
; RV32I-NEXT: ret
;
; RV64-LABEL: sdiv32_pow2_4096:
; RV64: # %bb.0: # %entry
; RV64-NEXT: sraiw a1, a0, 31
; RV64-NEXT: srliw a1, a1, 20
; RV64-NEXT: addw a0, a0, a1
; RV64-NEXT: sraiw a0, a0, 12
; RV64-NEXT: ret
; RV64I-LABEL: sdiv32_pow2_4096:
; RV64I: # %bb.0: # %entry
; RV64I-NEXT: sraiw a1, a0, 31
; RV64I-NEXT: srliw a1, a1, 20
; RV64I-NEXT: addw a0, a0, a1
; RV64I-NEXT: sraiw a0, a0, 12
; RV64I-NEXT: ret
entry:
%div = sdiv i32 %a, 4096
ret i32 %div
}
define i32 @sdiv32_pow2_negative_4096(i32 %a) {
; RV32-LABEL: sdiv32_pow2_negative_4096:
; RV32: # %bb.0: # %entry
; RV32-NEXT: srai a1, a0, 31
; RV32-NEXT: srli a1, a1, 20
; RV32-NEXT: add a0, a0, a1
; RV32-NEXT: srai a0, a0, 12
; RV32-NEXT: neg a0, a0
; RV32-NEXT: ret
; RV32I-LABEL: sdiv32_pow2_negative_4096:
; RV32I: # %bb.0: # %entry
; RV32I-NEXT: srai a1, a0, 31
; RV32I-NEXT: srli a1, a1, 20
; RV32I-NEXT: add a0, a0, a1
; RV32I-NEXT: srai a0, a0, 12
; RV32I-NEXT: neg a0, a0
; RV32I-NEXT: ret
;
; RV64-LABEL: sdiv32_pow2_negative_4096:
; RV64: # %bb.0: # %entry
; RV64-NEXT: sraiw a1, a0, 31
; RV64-NEXT: srliw a1, a1, 20
; RV64-NEXT: addw a0, a0, a1
; RV64-NEXT: sraiw a0, a0, 12
; RV64-NEXT: neg a0, a0
; RV64-NEXT: ret
; RV64I-LABEL: sdiv32_pow2_negative_4096:
; RV64I: # %bb.0: # %entry
; RV64I-NEXT: sraiw a1, a0, 31
; RV64I-NEXT: srliw a1, a1, 20
; RV64I-NEXT: addw a0, a0, a1
; RV64I-NEXT: sraiw a0, a0, 12
; RV64I-NEXT: neg a0, a0
; RV64I-NEXT: ret
entry:
%div = sdiv i32 %a, -4096
ret i32 %div
}
define i32 @sdiv32_pow2_65536(i32 %a) {
; RV32-LABEL: sdiv32_pow2_65536:
; RV32: # %bb.0: # %entry
; RV32-NEXT: srai a1, a0, 31
; RV32-NEXT: srli a1, a1, 16
; RV32-NEXT: add a0, a0, a1
; RV32-NEXT: srai a0, a0, 16
; RV32-NEXT: ret
; RV32I-LABEL: sdiv32_pow2_65536:
; RV32I: # %bb.0: # %entry
; RV32I-NEXT: srai a1, a0, 31
; RV32I-NEXT: srli a1, a1, 16
; RV32I-NEXT: add a0, a0, a1
; RV32I-NEXT: srai a0, a0, 16
; RV32I-NEXT: ret
;
; RV64-LABEL: sdiv32_pow2_65536:
; RV64: # %bb.0: # %entry
; RV64-NEXT: sraiw a1, a0, 31
; RV64-NEXT: srliw a1, a1, 16
; RV64-NEXT: addw a0, a0, a1
; RV64-NEXT: sraiw a0, a0, 16
; RV64-NEXT: ret
; RV64I-LABEL: sdiv32_pow2_65536:
; RV64I: # %bb.0: # %entry
; RV64I-NEXT: sraiw a1, a0, 31
; RV64I-NEXT: srliw a1, a1, 16
; RV64I-NEXT: addw a0, a0, a1
; RV64I-NEXT: sraiw a0, a0, 16
; RV64I-NEXT: ret
entry:
%div = sdiv i32 %a, 65536
ret i32 %div
}
define i32 @sdiv32_pow2_negative_65536(i32 %a) {
; RV32-LABEL: sdiv32_pow2_negative_65536:
; RV32: # %bb.0: # %entry
; RV32-NEXT: srai a1, a0, 31
; RV32-NEXT: srli a1, a1, 16
; RV32-NEXT: add a0, a0, a1
; RV32-NEXT: srai a0, a0, 16
; RV32-NEXT: neg a0, a0
; RV32-NEXT: ret
; RV32I-LABEL: sdiv32_pow2_negative_65536:
; RV32I: # %bb.0: # %entry
; RV32I-NEXT: srai a1, a0, 31
; RV32I-NEXT: srli a1, a1, 16
; RV32I-NEXT: add a0, a0, a1
; RV32I-NEXT: srai a0, a0, 16
; RV32I-NEXT: neg a0, a0
; RV32I-NEXT: ret
;
; RV64-LABEL: sdiv32_pow2_negative_65536:
; RV64: # %bb.0: # %entry
; RV64-NEXT: sraiw a1, a0, 31
; RV64-NEXT: srliw a1, a1, 16
; RV64-NEXT: addw a0, a0, a1
; RV64-NEXT: sraiw a0, a0, 16
; RV64-NEXT: neg a0, a0
; RV64-NEXT: ret
; RV64I-LABEL: sdiv32_pow2_negative_65536:
; RV64I: # %bb.0: # %entry
; RV64I-NEXT: sraiw a1, a0, 31
; RV64I-NEXT: srliw a1, a1, 16
; RV64I-NEXT: addw a0, a0, a1
; RV64I-NEXT: sraiw a0, a0, 16
; RV64I-NEXT: neg a0, a0
; RV64I-NEXT: ret
entry:
%div = sdiv i32 %a, -65536
ret i32 %div
@ -229,22 +189,12 @@ define i64 @sdiv64_pow2_2(i64 %a) {
; RV32I-NEXT: srai a1, a1, 1
; RV32I-NEXT: ret
;
; RV32ZBT-LABEL: sdiv64_pow2_2:
; RV32ZBT: # %bb.0: # %entry
; RV32ZBT-NEXT: srli a2, a1, 31
; RV32ZBT-NEXT: add a2, a0, a2
; RV32ZBT-NEXT: sltu a0, a2, a0
; RV32ZBT-NEXT: add a1, a1, a0
; RV32ZBT-NEXT: fsri a0, a2, a1, 1
; RV32ZBT-NEXT: srai a1, a1, 1
; RV32ZBT-NEXT: ret
;
; RV64-LABEL: sdiv64_pow2_2:
; RV64: # %bb.0: # %entry
; RV64-NEXT: srli a1, a0, 63
; RV64-NEXT: add a0, a0, a1
; RV64-NEXT: srai a0, a0, 1
; RV64-NEXT: ret
; RV64I-LABEL: sdiv64_pow2_2:
; RV64I: # %bb.0: # %entry
; RV64I-NEXT: srli a1, a0, 63
; RV64I-NEXT: add a0, a0, a1
; RV64I-NEXT: srai a0, a0, 1
; RV64I-NEXT: ret
entry:
%div = sdiv i64 %a, 2
ret i64 %div
@ -267,27 +217,13 @@ define i64 @sdiv64_pow2_negative_2(i64 %a) {
; RV32I-NEXT: neg a1, a1
; RV32I-NEXT: ret
;
; RV32ZBT-LABEL: sdiv64_pow2_negative_2:
; RV32ZBT: # %bb.0: # %entry
; RV32ZBT-NEXT: srli a2, a1, 31
; RV32ZBT-NEXT: add a2, a0, a2
; RV32ZBT-NEXT: sltu a0, a2, a0
; RV32ZBT-NEXT: add a1, a1, a0
; RV32ZBT-NEXT: fsri a2, a2, a1, 1
; RV32ZBT-NEXT: neg a0, a2
; RV32ZBT-NEXT: snez a2, a2
; RV32ZBT-NEXT: srai a1, a1, 1
; RV32ZBT-NEXT: add a1, a1, a2
; RV32ZBT-NEXT: neg a1, a1
; RV32ZBT-NEXT: ret
;
; RV64-LABEL: sdiv64_pow2_negative_2:
; RV64: # %bb.0: # %entry
; RV64-NEXT: srli a1, a0, 63
; RV64-NEXT: add a0, a0, a1
; RV64-NEXT: srai a0, a0, 1
; RV64-NEXT: neg a0, a0
; RV64-NEXT: ret
; RV64I-LABEL: sdiv64_pow2_negative_2:
; RV64I: # %bb.0: # %entry
; RV64I-NEXT: srli a1, a0, 63
; RV64I-NEXT: add a0, a0, a1
; RV64I-NEXT: srai a0, a0, 1
; RV64I-NEXT: neg a0, a0
; RV64I-NEXT: ret
entry:
%div = sdiv i64 %a, -2
ret i64 %div
@ -307,17 +243,6 @@ define i64 @sdiv64_pow2_2048(i64 %a) {
; RV32I-NEXT: srai a1, a1, 11
; RV32I-NEXT: ret
;
; RV32ZBT-LABEL: sdiv64_pow2_2048:
; RV32ZBT: # %bb.0: # %entry
; RV32ZBT-NEXT: srai a2, a1, 31
; RV32ZBT-NEXT: srli a2, a2, 21
; RV32ZBT-NEXT: add a2, a0, a2
; RV32ZBT-NEXT: sltu a0, a2, a0
; RV32ZBT-NEXT: add a1, a1, a0
; RV32ZBT-NEXT: fsri a0, a2, a1, 11
; RV32ZBT-NEXT: srai a1, a1, 11
; RV32ZBT-NEXT: ret
;
; RV64I-LABEL: sdiv64_pow2_2048:
; RV64I: # %bb.0: # %entry
; RV64I-NEXT: srai a1, a0, 63
@ -325,14 +250,6 @@ define i64 @sdiv64_pow2_2048(i64 %a) {
; RV64I-NEXT: add a0, a0, a1
; RV64I-NEXT: srai a0, a0, 11
; RV64I-NEXT: ret
;
; RV64ZBT-LABEL: sdiv64_pow2_2048:
; RV64ZBT: # %bb.0: # %entry
; RV64ZBT-NEXT: slti a1, a0, 0
; RV64ZBT-NEXT: addi a2, a0, 2047
; RV64ZBT-NEXT: cmov a0, a1, a2, a0
; RV64ZBT-NEXT: srai a0, a0, 11
; RV64ZBT-NEXT: ret
entry:
%div = sdiv i64 %a, 2048
ret i64 %div
@ -356,21 +273,6 @@ define i64 @sdiv64_pow2_negative_2048(i64 %a) {
; RV32I-NEXT: neg a1, a1
; RV32I-NEXT: ret
;
; RV32ZBT-LABEL: sdiv64_pow2_negative_2048:
; RV32ZBT: # %bb.0: # %entry
; RV32ZBT-NEXT: srai a2, a1, 31
; RV32ZBT-NEXT: srli a2, a2, 21
; RV32ZBT-NEXT: add a2, a0, a2
; RV32ZBT-NEXT: sltu a0, a2, a0
; RV32ZBT-NEXT: add a1, a1, a0
; RV32ZBT-NEXT: fsri a2, a2, a1, 11
; RV32ZBT-NEXT: neg a0, a2
; RV32ZBT-NEXT: snez a2, a2
; RV32ZBT-NEXT: srai a1, a1, 11
; RV32ZBT-NEXT: add a1, a1, a2
; RV32ZBT-NEXT: neg a1, a1
; RV32ZBT-NEXT: ret
;
; RV64I-LABEL: sdiv64_pow2_negative_2048:
; RV64I: # %bb.0: # %entry
; RV64I-NEXT: srai a1, a0, 63
@ -379,15 +281,6 @@ define i64 @sdiv64_pow2_negative_2048(i64 %a) {
; RV64I-NEXT: srai a0, a0, 11
; RV64I-NEXT: neg a0, a0
; RV64I-NEXT: ret
;
; RV64ZBT-LABEL: sdiv64_pow2_negative_2048:
; RV64ZBT: # %bb.0: # %entry
; RV64ZBT-NEXT: slti a1, a0, 0
; RV64ZBT-NEXT: addi a2, a0, 2047
; RV64ZBT-NEXT: cmov a0, a1, a2, a0
; RV64ZBT-NEXT: srai a0, a0, 11
; RV64ZBT-NEXT: neg a0, a0
; RV64ZBT-NEXT: ret
entry:
%div = sdiv i64 %a, -2048
ret i64 %div
@ -407,24 +300,13 @@ define i64 @sdiv64_pow2_4096(i64 %a) {
; RV32I-NEXT: srai a1, a1, 12
; RV32I-NEXT: ret
;
; RV32ZBT-LABEL: sdiv64_pow2_4096:
; RV32ZBT: # %bb.0: # %entry
; RV32ZBT-NEXT: srai a2, a1, 31
; RV32ZBT-NEXT: srli a2, a2, 20
; RV32ZBT-NEXT: add a2, a0, a2
; RV32ZBT-NEXT: sltu a0, a2, a0
; RV32ZBT-NEXT: add a1, a1, a0
; RV32ZBT-NEXT: fsri a0, a2, a1, 12
; RV32ZBT-NEXT: srai a1, a1, 12
; RV32ZBT-NEXT: ret
;
; RV64-LABEL: sdiv64_pow2_4096:
; RV64: # %bb.0: # %entry
; RV64-NEXT: srai a1, a0, 63
; RV64-NEXT: srli a1, a1, 52
; RV64-NEXT: add a0, a0, a1
; RV64-NEXT: srai a0, a0, 12
; RV64-NEXT: ret
; RV64I-LABEL: sdiv64_pow2_4096:
; RV64I: # %bb.0: # %entry
; RV64I-NEXT: srai a1, a0, 63
; RV64I-NEXT: srli a1, a1, 52
; RV64I-NEXT: add a0, a0, a1
; RV64I-NEXT: srai a0, a0, 12
; RV64I-NEXT: ret
entry:
%div = sdiv i64 %a, 4096
ret i64 %div
@ -448,29 +330,14 @@ define i64 @sdiv64_pow2_negative_4096(i64 %a) {
; RV32I-NEXT: neg a1, a1
; RV32I-NEXT: ret
;
; RV32ZBT-LABEL: sdiv64_pow2_negative_4096:
; RV32ZBT: # %bb.0: # %entry
; RV32ZBT-NEXT: srai a2, a1, 31
; RV32ZBT-NEXT: srli a2, a2, 20
; RV32ZBT-NEXT: add a2, a0, a2
; RV32ZBT-NEXT: sltu a0, a2, a0
; RV32ZBT-NEXT: add a1, a1, a0
; RV32ZBT-NEXT: fsri a2, a2, a1, 12
; RV32ZBT-NEXT: neg a0, a2
; RV32ZBT-NEXT: snez a2, a2
; RV32ZBT-NEXT: srai a1, a1, 12
; RV32ZBT-NEXT: add a1, a1, a2
; RV32ZBT-NEXT: neg a1, a1
; RV32ZBT-NEXT: ret
;
; RV64-LABEL: sdiv64_pow2_negative_4096:
; RV64: # %bb.0: # %entry
; RV64-NEXT: srai a1, a0, 63
; RV64-NEXT: srli a1, a1, 52
; RV64-NEXT: add a0, a0, a1
; RV64-NEXT: srai a0, a0, 12
; RV64-NEXT: neg a0, a0
; RV64-NEXT: ret
; RV64I-LABEL: sdiv64_pow2_negative_4096:
; RV64I: # %bb.0: # %entry
; RV64I-NEXT: srai a1, a0, 63
; RV64I-NEXT: srli a1, a1, 52
; RV64I-NEXT: add a0, a0, a1
; RV64I-NEXT: srai a0, a0, 12
; RV64I-NEXT: neg a0, a0
; RV64I-NEXT: ret
entry:
%div = sdiv i64 %a, -4096
ret i64 %div
@ -490,24 +357,13 @@ define i64 @sdiv64_pow2_65536(i64 %a) {
; RV32I-NEXT: srai a1, a1, 16
; RV32I-NEXT: ret
;
; RV32ZBT-LABEL: sdiv64_pow2_65536:
; RV32ZBT: # %bb.0: # %entry
; RV32ZBT-NEXT: srai a2, a1, 31
; RV32ZBT-NEXT: srli a2, a2, 16
; RV32ZBT-NEXT: add a2, a0, a2
; RV32ZBT-NEXT: sltu a0, a2, a0
; RV32ZBT-NEXT: add a1, a1, a0
; RV32ZBT-NEXT: fsri a0, a2, a1, 16
; RV32ZBT-NEXT: srai a1, a1, 16
; RV32ZBT-NEXT: ret
;
; RV64-LABEL: sdiv64_pow2_65536:
; RV64: # %bb.0: # %entry
; RV64-NEXT: srai a1, a0, 63
; RV64-NEXT: srli a1, a1, 48
; RV64-NEXT: add a0, a0, a1
; RV64-NEXT: srai a0, a0, 16
; RV64-NEXT: ret
; RV64I-LABEL: sdiv64_pow2_65536:
; RV64I: # %bb.0: # %entry
; RV64I-NEXT: srai a1, a0, 63
; RV64I-NEXT: srli a1, a1, 48
; RV64I-NEXT: add a0, a0, a1
; RV64I-NEXT: srai a0, a0, 16
; RV64I-NEXT: ret
entry:
%div = sdiv i64 %a, 65536
ret i64 %div
@ -531,29 +387,14 @@ define i64 @sdiv64_pow2_negative_65536(i64 %a) {
; RV32I-NEXT: neg a1, a1
; RV32I-NEXT: ret
;
; RV32ZBT-LABEL: sdiv64_pow2_negative_65536:
; RV32ZBT: # %bb.0: # %entry
; RV32ZBT-NEXT: srai a2, a1, 31
; RV32ZBT-NEXT: srli a2, a2, 16
; RV32ZBT-NEXT: add a2, a0, a2
; RV32ZBT-NEXT: sltu a0, a2, a0
; RV32ZBT-NEXT: add a1, a1, a0
; RV32ZBT-NEXT: fsri a2, a2, a1, 16
; RV32ZBT-NEXT: neg a0, a2
; RV32ZBT-NEXT: snez a2, a2
; RV32ZBT-NEXT: srai a1, a1, 16
; RV32ZBT-NEXT: add a1, a1, a2
; RV32ZBT-NEXT: neg a1, a1
; RV32ZBT-NEXT: ret
;
; RV64-LABEL: sdiv64_pow2_negative_65536:
; RV64: # %bb.0: # %entry
; RV64-NEXT: srai a1, a0, 63
; RV64-NEXT: srli a1, a1, 48
; RV64-NEXT: add a0, a0, a1
; RV64-NEXT: srai a0, a0, 16
; RV64-NEXT: neg a0, a0
; RV64-NEXT: ret
; RV64I-LABEL: sdiv64_pow2_negative_65536:
; RV64I: # %bb.0: # %entry
; RV64I-NEXT: srai a1, a0, 63
; RV64I-NEXT: srli a1, a1, 48
; RV64I-NEXT: add a0, a0, a1
; RV64I-NEXT: srai a0, a0, 16
; RV64I-NEXT: neg a0, a0
; RV64I-NEXT: ret
entry:
%div = sdiv i64 %a, -65536
ret i64 %div
@ -572,26 +413,13 @@ define i64 @sdiv64_pow2_8589934592(i64 %a) {
; RV32I-NEXT: srai a1, a1, 31
; RV32I-NEXT: ret
;
; RV32ZBT-LABEL: sdiv64_pow2_8589934592:
; RV32ZBT: # %bb.0: # %entry
; RV32ZBT-NEXT: srli a2, a1, 31
; RV32ZBT-NEXT: add a2, a1, a2
; RV32ZBT-NEXT: srai a1, a1, 31
; RV32ZBT-NEXT: fsri a1, a1, a1, 31
; RV32ZBT-NEXT: add a1, a0, a1
; RV32ZBT-NEXT: sltu a0, a1, a0
; RV32ZBT-NEXT: add a1, a2, a0
; RV32ZBT-NEXT: srai a0, a1, 1
; RV32ZBT-NEXT: srai a1, a1, 31
; RV32ZBT-NEXT: ret
;
; RV64-LABEL: sdiv64_pow2_8589934592:
; RV64: # %bb.0: # %entry
; RV64-NEXT: srai a1, a0, 63
; RV64-NEXT: srli a1, a1, 31
; RV64-NEXT: add a0, a0, a1
; RV64-NEXT: srai a0, a0, 33
; RV64-NEXT: ret
; RV64I-LABEL: sdiv64_pow2_8589934592:
; RV64I: # %bb.0: # %entry
; RV64I-NEXT: srai a1, a0, 63
; RV64I-NEXT: srli a1, a1, 31
; RV64I-NEXT: add a0, a0, a1
; RV64I-NEXT: srai a0, a0, 33
; RV64I-NEXT: ret
entry:
%div = sdiv i64 %a, 8589934592 ; 2^33
ret i64 %div
@ -614,31 +442,14 @@ define i64 @sdiv64_pow2_negative_8589934592(i64 %a) {
; RV32I-NEXT: neg a0, a0
; RV32I-NEXT: ret
;
; RV32ZBT-LABEL: sdiv64_pow2_negative_8589934592:
; RV32ZBT: # %bb.0: # %entry
; RV32ZBT-NEXT: srli a2, a1, 31
; RV32ZBT-NEXT: add a2, a1, a2
; RV32ZBT-NEXT: srai a1, a1, 31
; RV32ZBT-NEXT: fsri a1, a1, a1, 31
; RV32ZBT-NEXT: add a1, a0, a1
; RV32ZBT-NEXT: sltu a0, a1, a0
; RV32ZBT-NEXT: add a0, a2, a0
; RV32ZBT-NEXT: srai a1, a0, 31
; RV32ZBT-NEXT: srai a0, a0, 1
; RV32ZBT-NEXT: snez a2, a0
; RV32ZBT-NEXT: add a1, a1, a2
; RV32ZBT-NEXT: neg a1, a1
; RV32ZBT-NEXT: neg a0, a0
; RV32ZBT-NEXT: ret
;
; RV64-LABEL: sdiv64_pow2_negative_8589934592:
; RV64: # %bb.0: # %entry
; RV64-NEXT: srai a1, a0, 63
; RV64-NEXT: srli a1, a1, 31
; RV64-NEXT: add a0, a0, a1
; RV64-NEXT: srai a0, a0, 33
; RV64-NEXT: neg a0, a0
; RV64-NEXT: ret
; RV64I-LABEL: sdiv64_pow2_negative_8589934592:
; RV64I: # %bb.0: # %entry
; RV64I-NEXT: srai a1, a0, 63
; RV64I-NEXT: srli a1, a1, 31
; RV64I-NEXT: add a0, a0, a1
; RV64I-NEXT: srai a0, a0, 33
; RV64I-NEXT: neg a0, a0
; RV64I-NEXT: ret
entry:
%div = sdiv i64 %a, -8589934592 ; -2^33
ret i64 %div

View File

@ -3,14 +3,10 @@
; RUN: | FileCheck %s --check-prefix=RV32I
; RUN: llc -mtriple=riscv32 -mattr=+zbb -verify-machineinstrs < %s \
; RUN: | FileCheck %s --check-prefix=RV32ZBB
; RUN: llc -mtriple=riscv32 -mattr=+experimental-zbt -verify-machineinstrs < %s \
; RUN: | FileCheck %s --check-prefix=RV32ZBT
; RUN: llc -mtriple=riscv64 -verify-machineinstrs < %s \
; RUN: | FileCheck %s --check-prefix=RV64I
; RUN: llc -mtriple=riscv64 -mattr=+zbb -verify-machineinstrs < %s \
; RUN: | FileCheck %s --check-prefix=RV64ZBB
; RUN: llc -mtriple=riscv64 -mattr=+experimental-zbt -verify-machineinstrs < %s \
; RUN: | FileCheck %s --check-prefix=RV64ZBT
declare i8 @llvm.abs.i8(i8, i1 immarg)
declare i16 @llvm.abs.i16(i16, i1 immarg)
@ -34,14 +30,6 @@ define i8 @abs8(i8 %x) {
; RV32ZBB-NEXT: max a0, a0, a1
; RV32ZBB-NEXT: ret
;
; RV32ZBT-LABEL: abs8:
; RV32ZBT: # %bb.0:
; RV32ZBT-NEXT: slli a1, a0, 24
; RV32ZBT-NEXT: srai a1, a1, 31
; RV32ZBT-NEXT: xor a0, a0, a1
; RV32ZBT-NEXT: sub a0, a0, a1
; RV32ZBT-NEXT: ret
;
; RV64I-LABEL: abs8:
; RV64I: # %bb.0:
; RV64I-NEXT: slli a1, a0, 56
@ -56,14 +44,6 @@ define i8 @abs8(i8 %x) {
; RV64ZBB-NEXT: neg a1, a0
; RV64ZBB-NEXT: max a0, a0, a1
; RV64ZBB-NEXT: ret
;
; RV64ZBT-LABEL: abs8:
; RV64ZBT: # %bb.0:
; RV64ZBT-NEXT: slli a1, a0, 56
; RV64ZBT-NEXT: srai a1, a1, 63
; RV64ZBT-NEXT: xor a0, a0, a1
; RV64ZBT-NEXT: sub a0, a0, a1
; RV64ZBT-NEXT: ret
%abs = tail call i8 @llvm.abs.i8(i8 %x, i1 true)
ret i8 %abs
}
@ -84,14 +64,6 @@ define i8 @select_abs8(i8 %x) {
; RV32ZBB-NEXT: max a0, a0, a1
; RV32ZBB-NEXT: ret
;
; RV32ZBT-LABEL: select_abs8:
; RV32ZBT: # %bb.0:
; RV32ZBT-NEXT: slli a1, a0, 24
; RV32ZBT-NEXT: srai a1, a1, 31
; RV32ZBT-NEXT: xor a0, a0, a1
; RV32ZBT-NEXT: sub a0, a0, a1
; RV32ZBT-NEXT: ret
;
; RV64I-LABEL: select_abs8:
; RV64I: # %bb.0:
; RV64I-NEXT: slli a1, a0, 56
@ -106,14 +78,6 @@ define i8 @select_abs8(i8 %x) {
; RV64ZBB-NEXT: neg a1, a0
; RV64ZBB-NEXT: max a0, a0, a1
; RV64ZBB-NEXT: ret
;
; RV64ZBT-LABEL: select_abs8:
; RV64ZBT: # %bb.0:
; RV64ZBT-NEXT: slli a1, a0, 56
; RV64ZBT-NEXT: srai a1, a1, 63
; RV64ZBT-NEXT: xor a0, a0, a1
; RV64ZBT-NEXT: sub a0, a0, a1
; RV64ZBT-NEXT: ret
%1 = icmp slt i8 %x, 0
%2 = sub nsw i8 0, %x
%3 = select i1 %1, i8 %2, i8 %x
@ -136,14 +100,6 @@ define i16 @abs16(i16 %x) {
; RV32ZBB-NEXT: max a0, a0, a1
; RV32ZBB-NEXT: ret
;
; RV32ZBT-LABEL: abs16:
; RV32ZBT: # %bb.0:
; RV32ZBT-NEXT: slli a1, a0, 16
; RV32ZBT-NEXT: srai a1, a1, 31
; RV32ZBT-NEXT: xor a0, a0, a1
; RV32ZBT-NEXT: sub a0, a0, a1
; RV32ZBT-NEXT: ret
;
; RV64I-LABEL: abs16:
; RV64I: # %bb.0:
; RV64I-NEXT: slli a1, a0, 48
@ -158,14 +114,6 @@ define i16 @abs16(i16 %x) {
; RV64ZBB-NEXT: neg a1, a0
; RV64ZBB-NEXT: max a0, a0, a1
; RV64ZBB-NEXT: ret
;
; RV64ZBT-LABEL: abs16:
; RV64ZBT: # %bb.0:
; RV64ZBT-NEXT: slli a1, a0, 48
; RV64ZBT-NEXT: srai a1, a1, 63
; RV64ZBT-NEXT: xor a0, a0, a1
; RV64ZBT-NEXT: sub a0, a0, a1
; RV64ZBT-NEXT: ret
%abs = tail call i16 @llvm.abs.i16(i16 %x, i1 true)
ret i16 %abs
}
@ -186,14 +134,6 @@ define i16 @select_abs16(i16 %x) {
; RV32ZBB-NEXT: max a0, a0, a1
; RV32ZBB-NEXT: ret
;
; RV32ZBT-LABEL: select_abs16:
; RV32ZBT: # %bb.0:
; RV32ZBT-NEXT: slli a1, a0, 16
; RV32ZBT-NEXT: srai a1, a1, 31
; RV32ZBT-NEXT: xor a0, a0, a1
; RV32ZBT-NEXT: sub a0, a0, a1
; RV32ZBT-NEXT: ret
;
; RV64I-LABEL: select_abs16:
; RV64I: # %bb.0:
; RV64I-NEXT: slli a1, a0, 48
@ -208,14 +148,6 @@ define i16 @select_abs16(i16 %x) {
; RV64ZBB-NEXT: neg a1, a0
; RV64ZBB-NEXT: max a0, a0, a1
; RV64ZBB-NEXT: ret
;
; RV64ZBT-LABEL: select_abs16:
; RV64ZBT: # %bb.0:
; RV64ZBT-NEXT: slli a1, a0, 48
; RV64ZBT-NEXT: srai a1, a1, 63
; RV64ZBT-NEXT: xor a0, a0, a1
; RV64ZBT-NEXT: sub a0, a0, a1
; RV64ZBT-NEXT: ret
%1 = icmp slt i16 %x, 0
%2 = sub nsw i16 0, %x
%3 = select i1 %1, i16 %2, i16 %x
@ -236,13 +168,6 @@ define i32 @abs32(i32 %x) {
; RV32ZBB-NEXT: max a0, a0, a1
; RV32ZBB-NEXT: ret
;
; RV32ZBT-LABEL: abs32:
; RV32ZBT: # %bb.0:
; RV32ZBT-NEXT: srai a1, a0, 31
; RV32ZBT-NEXT: xor a0, a0, a1
; RV32ZBT-NEXT: sub a0, a0, a1
; RV32ZBT-NEXT: ret
;
; RV64I-LABEL: abs32:
; RV64I: # %bb.0:
; RV64I-NEXT: sraiw a1, a0, 31
@ -256,13 +181,6 @@ define i32 @abs32(i32 %x) {
; RV64ZBB-NEXT: neg a1, a0
; RV64ZBB-NEXT: max a0, a0, a1
; RV64ZBB-NEXT: ret
;
; RV64ZBT-LABEL: abs32:
; RV64ZBT: # %bb.0:
; RV64ZBT-NEXT: sraiw a1, a0, 31
; RV64ZBT-NEXT: xor a0, a0, a1
; RV64ZBT-NEXT: subw a0, a0, a1
; RV64ZBT-NEXT: ret
%abs = tail call i32 @llvm.abs.i32(i32 %x, i1 true)
ret i32 %abs
}
@ -281,13 +199,6 @@ define i32 @select_abs32(i32 %x) {
; RV32ZBB-NEXT: max a0, a0, a1
; RV32ZBB-NEXT: ret
;
; RV32ZBT-LABEL: select_abs32:
; RV32ZBT: # %bb.0:
; RV32ZBT-NEXT: srai a1, a0, 31
; RV32ZBT-NEXT: xor a0, a0, a1
; RV32ZBT-NEXT: sub a0, a0, a1
; RV32ZBT-NEXT: ret
;
; RV64I-LABEL: select_abs32:
; RV64I: # %bb.0:
; RV64I-NEXT: sraiw a1, a0, 31
@ -301,13 +212,6 @@ define i32 @select_abs32(i32 %x) {
; RV64ZBB-NEXT: neg a1, a0
; RV64ZBB-NEXT: max a0, a0, a1
; RV64ZBB-NEXT: ret
;
; RV64ZBT-LABEL: select_abs32:
; RV64ZBT: # %bb.0:
; RV64ZBT-NEXT: sraiw a1, a0, 31
; RV64ZBT-NEXT: xor a0, a0, a1
; RV64ZBT-NEXT: subw a0, a0, a1
; RV64ZBT-NEXT: ret
%1 = icmp slt i32 %x, 0
%2 = sub nsw i32 0, %x
%3 = select i1 %1, i32 %2, i32 %x
@ -337,18 +241,6 @@ define i64 @abs64(i64 %x) {
; RV32ZBB-NEXT: .LBB6_2:
; RV32ZBB-NEXT: ret
;
; RV32ZBT-LABEL: abs64:
; RV32ZBT: # %bb.0:
; RV32ZBT-NEXT: neg a2, a0
; RV32ZBT-NEXT: slti a3, a1, 0
; RV32ZBT-NEXT: cmov a2, a3, a2, a0
; RV32ZBT-NEXT: snez a0, a0
; RV32ZBT-NEXT: add a0, a1, a0
; RV32ZBT-NEXT: neg a0, a0
; RV32ZBT-NEXT: cmov a1, a3, a0, a1
; RV32ZBT-NEXT: mv a0, a2
; RV32ZBT-NEXT: ret
;
; RV64I-LABEL: abs64:
; RV64I: # %bb.0:
; RV64I-NEXT: srai a1, a0, 63
@ -361,13 +253,6 @@ define i64 @abs64(i64 %x) {
; RV64ZBB-NEXT: neg a1, a0
; RV64ZBB-NEXT: max a0, a0, a1
; RV64ZBB-NEXT: ret
;
; RV64ZBT-LABEL: abs64:
; RV64ZBT: # %bb.0:
; RV64ZBT-NEXT: srai a1, a0, 63
; RV64ZBT-NEXT: xor a0, a0, a1
; RV64ZBT-NEXT: sub a0, a0, a1
; RV64ZBT-NEXT: ret
%abs = tail call i64 @llvm.abs.i64(i64 %x, i1 true)
ret i64 %abs
}
@ -395,18 +280,6 @@ define i64 @select_abs64(i64 %x) {
; RV32ZBB-NEXT: .LBB7_2:
; RV32ZBB-NEXT: ret
;
; RV32ZBT-LABEL: select_abs64:
; RV32ZBT: # %bb.0:
; RV32ZBT-NEXT: neg a2, a0
; RV32ZBT-NEXT: slti a3, a1, 0
; RV32ZBT-NEXT: cmov a2, a3, a2, a0
; RV32ZBT-NEXT: snez a0, a0
; RV32ZBT-NEXT: add a0, a1, a0
; RV32ZBT-NEXT: neg a0, a0
; RV32ZBT-NEXT: cmov a1, a3, a0, a1
; RV32ZBT-NEXT: mv a0, a2
; RV32ZBT-NEXT: ret
;
; RV64I-LABEL: select_abs64:
; RV64I: # %bb.0:
; RV64I-NEXT: srai a1, a0, 63
@ -419,13 +292,6 @@ define i64 @select_abs64(i64 %x) {
; RV64ZBB-NEXT: neg a1, a0
; RV64ZBB-NEXT: max a0, a0, a1
; RV64ZBB-NEXT: ret
;
; RV64ZBT-LABEL: select_abs64:
; RV64ZBT: # %bb.0:
; RV64ZBT-NEXT: srai a1, a0, 63
; RV64ZBT-NEXT: xor a0, a0, a1
; RV64ZBT-NEXT: sub a0, a0, a1
; RV64ZBT-NEXT: ret
%1 = icmp slt i64 %x, 0
%2 = sub nsw i64 0, %x
%3 = select i1 %1, i64 %2, i64 %x
@ -495,36 +361,6 @@ define i128 @abs128(i128 %x) {
; RV32ZBB-NEXT: sw a4, 12(a0)
; RV32ZBB-NEXT: ret
;
; RV32ZBT-LABEL: abs128:
; RV32ZBT: # %bb.0:
; RV32ZBT-NEXT: lw a2, 0(a1)
; RV32ZBT-NEXT: lw a3, 4(a1)
; RV32ZBT-NEXT: lw a4, 12(a1)
; RV32ZBT-NEXT: lw a1, 8(a1)
; RV32ZBT-NEXT: snez a5, a2
; RV32ZBT-NEXT: snez a6, a3
; RV32ZBT-NEXT: cmov a6, a3, a6, a5
; RV32ZBT-NEXT: neg a7, a1
; RV32ZBT-NEXT: sltu t0, a7, a6
; RV32ZBT-NEXT: snez t1, a1
; RV32ZBT-NEXT: add t1, a4, t1
; RV32ZBT-NEXT: add t0, t1, t0
; RV32ZBT-NEXT: neg t0, t0
; RV32ZBT-NEXT: slti t1, a4, 0
; RV32ZBT-NEXT: cmov a4, t1, t0, a4
; RV32ZBT-NEXT: sub a6, a7, a6
; RV32ZBT-NEXT: cmov a1, t1, a6, a1
; RV32ZBT-NEXT: add a5, a3, a5
; RV32ZBT-NEXT: neg a5, a5
; RV32ZBT-NEXT: cmov a3, t1, a5, a3
; RV32ZBT-NEXT: neg a5, a2
; RV32ZBT-NEXT: cmov a2, t1, a5, a2
; RV32ZBT-NEXT: sw a2, 0(a0)
; RV32ZBT-NEXT: sw a1, 8(a0)
; RV32ZBT-NEXT: sw a3, 4(a0)
; RV32ZBT-NEXT: sw a4, 12(a0)
; RV32ZBT-NEXT: ret
;
; RV64I-LABEL: abs128:
; RV64I: # %bb.0:
; RV64I-NEXT: bgez a1, .LBB8_2
@ -546,18 +382,6 @@ define i128 @abs128(i128 %x) {
; RV64ZBB-NEXT: neg a1, a1
; RV64ZBB-NEXT: .LBB8_2:
; RV64ZBB-NEXT: ret
;
; RV64ZBT-LABEL: abs128:
; RV64ZBT: # %bb.0:
; RV64ZBT-NEXT: neg a2, a0
; RV64ZBT-NEXT: slti a3, a1, 0
; RV64ZBT-NEXT: cmov a2, a3, a2, a0
; RV64ZBT-NEXT: snez a0, a0
; RV64ZBT-NEXT: add a0, a1, a0
; RV64ZBT-NEXT: neg a0, a0
; RV64ZBT-NEXT: cmov a1, a3, a0, a1
; RV64ZBT-NEXT: mv a0, a2
; RV64ZBT-NEXT: ret
%abs = tail call i128 @llvm.abs.i128(i128 %x, i1 true)
ret i128 %abs
}
@ -625,36 +449,6 @@ define i128 @select_abs128(i128 %x) {
; RV32ZBB-NEXT: sw a4, 12(a0)
; RV32ZBB-NEXT: ret
;
; RV32ZBT-LABEL: select_abs128:
; RV32ZBT: # %bb.0:
; RV32ZBT-NEXT: lw a2, 0(a1)
; RV32ZBT-NEXT: lw a3, 4(a1)
; RV32ZBT-NEXT: lw a4, 12(a1)
; RV32ZBT-NEXT: lw a1, 8(a1)
; RV32ZBT-NEXT: snez a5, a2
; RV32ZBT-NEXT: snez a6, a3
; RV32ZBT-NEXT: cmov a6, a3, a6, a5
; RV32ZBT-NEXT: neg a7, a1
; RV32ZBT-NEXT: sltu t0, a7, a6
; RV32ZBT-NEXT: snez t1, a1
; RV32ZBT-NEXT: add t1, a4, t1
; RV32ZBT-NEXT: add t0, t1, t0
; RV32ZBT-NEXT: neg t0, t0
; RV32ZBT-NEXT: slti t1, a4, 0
; RV32ZBT-NEXT: cmov a4, t1, t0, a4
; RV32ZBT-NEXT: sub a6, a7, a6
; RV32ZBT-NEXT: cmov a1, t1, a6, a1
; RV32ZBT-NEXT: add a5, a3, a5
; RV32ZBT-NEXT: neg a5, a5
; RV32ZBT-NEXT: cmov a3, t1, a5, a3
; RV32ZBT-NEXT: neg a5, a2
; RV32ZBT-NEXT: cmov a2, t1, a5, a2
; RV32ZBT-NEXT: sw a2, 0(a0)
; RV32ZBT-NEXT: sw a1, 8(a0)
; RV32ZBT-NEXT: sw a3, 4(a0)
; RV32ZBT-NEXT: sw a4, 12(a0)
; RV32ZBT-NEXT: ret
;
; RV64I-LABEL: select_abs128:
; RV64I: # %bb.0:
; RV64I-NEXT: bgez a1, .LBB9_2
@ -676,18 +470,6 @@ define i128 @select_abs128(i128 %x) {
; RV64ZBB-NEXT: neg a1, a1
; RV64ZBB-NEXT: .LBB9_2:
; RV64ZBB-NEXT: ret
;
; RV64ZBT-LABEL: select_abs128:
; RV64ZBT: # %bb.0:
; RV64ZBT-NEXT: neg a2, a0
; RV64ZBT-NEXT: slti a3, a1, 0
; RV64ZBT-NEXT: cmov a2, a3, a2, a0
; RV64ZBT-NEXT: snez a0, a0
; RV64ZBT-NEXT: add a0, a1, a0
; RV64ZBT-NEXT: neg a0, a0
; RV64ZBT-NEXT: cmov a1, a3, a0, a1
; RV64ZBT-NEXT: mv a0, a2
; RV64ZBT-NEXT: ret
%1 = icmp slt i128 %x, 0
%2 = sub nsw i128 0, %x
%3 = select i1 %1, i128 %2, i128 %x
@ -710,14 +492,6 @@ define i64 @zext_abs32(i32 %x) {
; RV32ZBB-NEXT: li a1, 0
; RV32ZBB-NEXT: ret
;
; RV32ZBT-LABEL: zext_abs32:
; RV32ZBT: # %bb.0:
; RV32ZBT-NEXT: srai a1, a0, 31
; RV32ZBT-NEXT: xor a0, a0, a1
; RV32ZBT-NEXT: sub a0, a0, a1
; RV32ZBT-NEXT: li a1, 0
; RV32ZBT-NEXT: ret
;
; RV64I-LABEL: zext_abs32:
; RV64I: # %bb.0:
; RV64I-NEXT: sraiw a1, a0, 31
@ -731,13 +505,6 @@ define i64 @zext_abs32(i32 %x) {
; RV64ZBB-NEXT: negw a0, a0
; RV64ZBB-NEXT: max a0, a1, a0
; RV64ZBB-NEXT: ret
;
; RV64ZBT-LABEL: zext_abs32:
; RV64ZBT: # %bb.0:
; RV64ZBT-NEXT: sraiw a1, a0, 31
; RV64ZBT-NEXT: xor a0, a0, a1
; RV64ZBT-NEXT: subw a0, a0, a1
; RV64ZBT-NEXT: ret
%abs = tail call i32 @llvm.abs.i32(i32 %x, i1 true)
%zext = zext i32 %abs to i64
ret i64 %zext

View File

@ -3,14 +3,10 @@
; RUN: | FileCheck %s --check-prefix=RV32I
; RUN: llc -mtriple=riscv32 -mattr=+zbb -verify-machineinstrs < %s \
; RUN: | FileCheck %s --check-prefix=RV32ZBB
; RUN: llc -mtriple=riscv32 -mattr=+experimental-zbt -verify-machineinstrs < %s \
; RUN: | FileCheck %s --check-prefix=RV32IBT
; RUN: llc -mtriple=riscv64 -verify-machineinstrs < %s \
; RUN: | FileCheck %s --check-prefix=RV64I
; RUN: llc -mtriple=riscv64 -mattr=+zbb -verify-machineinstrs < %s \
; RUN: | FileCheck %s --check-prefix=RV64ZBB
; RUN: llc -mtriple=riscv64 -mattr=+experimental-zbt -verify-machineinstrs < %s \
; RUN: | FileCheck %s --check-prefix=RV64IBT
declare i32 @llvm.abs.i32(i32, i1 immarg)
declare i64 @llvm.abs.i64(i64, i1 immarg)
@ -29,13 +25,6 @@ define i32 @neg_abs32(i32 %x) {
; RV32ZBB-NEXT: min a0, a0, a1
; RV32ZBB-NEXT: ret
;
; RV32IBT-LABEL: neg_abs32:
; RV32IBT: # %bb.0:
; RV32IBT-NEXT: srai a1, a0, 31
; RV32IBT-NEXT: xor a0, a0, a1
; RV32IBT-NEXT: sub a0, a1, a0
; RV32IBT-NEXT: ret
;
; RV64I-LABEL: neg_abs32:
; RV64I: # %bb.0:
; RV64I-NEXT: sraiw a1, a0, 31
@ -49,13 +38,6 @@ define i32 @neg_abs32(i32 %x) {
; RV64ZBB-NEXT: xor a0, a0, a1
; RV64ZBB-NEXT: subw a0, a1, a0
; RV64ZBB-NEXT: ret
;
; RV64IBT-LABEL: neg_abs32:
; RV64IBT: # %bb.0:
; RV64IBT-NEXT: sraiw a1, a0, 31
; RV64IBT-NEXT: xor a0, a0, a1
; RV64IBT-NEXT: subw a0, a1, a0
; RV64IBT-NEXT: ret
%abs = tail call i32 @llvm.abs.i32(i32 %x, i1 true)
%neg = sub nsw i32 0, %abs
ret i32 %neg
@ -75,13 +57,6 @@ define i32 @select_neg_abs32(i32 %x) {
; RV32ZBB-NEXT: min a0, a0, a1
; RV32ZBB-NEXT: ret
;
; RV32IBT-LABEL: select_neg_abs32:
; RV32IBT: # %bb.0:
; RV32IBT-NEXT: srai a1, a0, 31
; RV32IBT-NEXT: xor a0, a0, a1
; RV32IBT-NEXT: sub a0, a1, a0
; RV32IBT-NEXT: ret
;
; RV64I-LABEL: select_neg_abs32:
; RV64I: # %bb.0:
; RV64I-NEXT: sraiw a1, a0, 31
@ -95,13 +70,6 @@ define i32 @select_neg_abs32(i32 %x) {
; RV64ZBB-NEXT: xor a0, a0, a1
; RV64ZBB-NEXT: subw a0, a1, a0
; RV64ZBB-NEXT: ret
;
; RV64IBT-LABEL: select_neg_abs32:
; RV64IBT: # %bb.0:
; RV64IBT-NEXT: sraiw a1, a0, 31
; RV64IBT-NEXT: xor a0, a0, a1
; RV64IBT-NEXT: subw a0, a1, a0
; RV64IBT-NEXT: ret
%1 = icmp slt i32 %x, 0
%2 = sub nsw i32 0, %x
%3 = select i1 %1, i32 %x, i32 %2
@ -131,17 +99,6 @@ define i64 @neg_abs64(i64 %x) {
; RV32ZBB-NEXT: sub a0, a2, a0
; RV32ZBB-NEXT: ret
;
; RV32IBT-LABEL: neg_abs64:
; RV32IBT: # %bb.0:
; RV32IBT-NEXT: srai a2, a1, 31
; RV32IBT-NEXT: xor a0, a0, a2
; RV32IBT-NEXT: sltu a3, a2, a0
; RV32IBT-NEXT: xor a1, a1, a2
; RV32IBT-NEXT: sub a1, a2, a1
; RV32IBT-NEXT: sub a1, a1, a3
; RV32IBT-NEXT: sub a0, a2, a0
; RV32IBT-NEXT: ret
;
; RV64I-LABEL: neg_abs64:
; RV64I: # %bb.0:
; RV64I-NEXT: srai a1, a0, 63
@ -154,13 +111,6 @@ define i64 @neg_abs64(i64 %x) {
; RV64ZBB-NEXT: neg a1, a0
; RV64ZBB-NEXT: min a0, a0, a1
; RV64ZBB-NEXT: ret
;
; RV64IBT-LABEL: neg_abs64:
; RV64IBT: # %bb.0:
; RV64IBT-NEXT: srai a1, a0, 63
; RV64IBT-NEXT: xor a0, a0, a1
; RV64IBT-NEXT: sub a0, a1, a0
; RV64IBT-NEXT: ret
%abs = tail call i64 @llvm.abs.i64(i64 %x, i1 true)
%neg = sub nsw i64 0, %abs
ret i64 %neg
@ -189,17 +139,6 @@ define i64 @select_neg_abs64(i64 %x) {
; RV32ZBB-NEXT: sub a0, a2, a0
; RV32ZBB-NEXT: ret
;
; RV32IBT-LABEL: select_neg_abs64:
; RV32IBT: # %bb.0:
; RV32IBT-NEXT: srai a2, a1, 31
; RV32IBT-NEXT: xor a0, a0, a2
; RV32IBT-NEXT: sltu a3, a2, a0
; RV32IBT-NEXT: xor a1, a1, a2
; RV32IBT-NEXT: sub a1, a2, a1
; RV32IBT-NEXT: sub a1, a1, a3
; RV32IBT-NEXT: sub a0, a2, a0
; RV32IBT-NEXT: ret
;
; RV64I-LABEL: select_neg_abs64:
; RV64I: # %bb.0:
; RV64I-NEXT: srai a1, a0, 63
@ -212,13 +151,6 @@ define i64 @select_neg_abs64(i64 %x) {
; RV64ZBB-NEXT: neg a1, a0
; RV64ZBB-NEXT: min a0, a0, a1
; RV64ZBB-NEXT: ret
;
; RV64IBT-LABEL: select_neg_abs64:
; RV64IBT: # %bb.0:
; RV64IBT-NEXT: srai a1, a0, 63
; RV64IBT-NEXT: xor a0, a0, a1
; RV64IBT-NEXT: sub a0, a1, a0
; RV64IBT-NEXT: ret
%1 = icmp slt i64 %x, 0
%2 = sub nsw i64 0, %x
%3 = select i1 %1, i64 %x, i64 %2
@ -243,15 +175,6 @@ define i32 @neg_abs32_multiuse(i32 %x, i32* %y) {
; RV32ZBB-NEXT: sw a2, 0(a1)
; RV32ZBB-NEXT: ret
;
; RV32IBT-LABEL: neg_abs32_multiuse:
; RV32IBT: # %bb.0:
; RV32IBT-NEXT: srai a2, a0, 31
; RV32IBT-NEXT: xor a0, a0, a2
; RV32IBT-NEXT: sub a2, a0, a2
; RV32IBT-NEXT: neg a0, a2
; RV32IBT-NEXT: sw a2, 0(a1)
; RV32IBT-NEXT: ret
;
; RV64I-LABEL: neg_abs32_multiuse:
; RV64I: # %bb.0:
; RV64I-NEXT: sraiw a2, a0, 31
@ -269,15 +192,6 @@ define i32 @neg_abs32_multiuse(i32 %x, i32* %y) {
; RV64ZBB-NEXT: negw a0, a2
; RV64ZBB-NEXT: sw a2, 0(a1)
; RV64ZBB-NEXT: ret
;
; RV64IBT-LABEL: neg_abs32_multiuse:
; RV64IBT: # %bb.0:
; RV64IBT-NEXT: sraiw a2, a0, 31
; RV64IBT-NEXT: xor a0, a0, a2
; RV64IBT-NEXT: subw a2, a0, a2
; RV64IBT-NEXT: negw a0, a2
; RV64IBT-NEXT: sw a2, 0(a1)
; RV64IBT-NEXT: ret
%abs = tail call i32 @llvm.abs.i32(i32 %x, i1 true)
store i32 %abs, i32* %y
%neg = sub nsw i32 0, %abs
@ -321,23 +235,6 @@ define i64 @neg_abs64_multiuse(i64 %x, i64* %y) {
; RV32ZBB-NEXT: mv a1, a3
; RV32ZBB-NEXT: ret
;
; RV32IBT-LABEL: neg_abs64_multiuse:
; RV32IBT: # %bb.0:
; RV32IBT-NEXT: snez a3, a0
; RV32IBT-NEXT: add a3, a1, a3
; RV32IBT-NEXT: neg a3, a3
; RV32IBT-NEXT: slti a4, a1, 0
; RV32IBT-NEXT: cmov a3, a4, a3, a1
; RV32IBT-NEXT: neg a1, a0
; RV32IBT-NEXT: cmov a0, a4, a1, a0
; RV32IBT-NEXT: sw a0, 0(a2)
; RV32IBT-NEXT: snez a1, a0
; RV32IBT-NEXT: add a1, a3, a1
; RV32IBT-NEXT: neg a1, a1
; RV32IBT-NEXT: neg a0, a0
; RV32IBT-NEXT: sw a3, 4(a2)
; RV32IBT-NEXT: ret
;
; RV64I-LABEL: neg_abs64_multiuse:
; RV64I: # %bb.0:
; RV64I-NEXT: srai a2, a0, 63
@ -354,15 +251,6 @@ define i64 @neg_abs64_multiuse(i64 %x, i64* %y) {
; RV64ZBB-NEXT: neg a0, a2
; RV64ZBB-NEXT: sd a2, 0(a1)
; RV64ZBB-NEXT: ret
;
; RV64IBT-LABEL: neg_abs64_multiuse:
; RV64IBT: # %bb.0:
; RV64IBT-NEXT: srai a2, a0, 63
; RV64IBT-NEXT: xor a0, a0, a2
; RV64IBT-NEXT: sub a2, a0, a2
; RV64IBT-NEXT: neg a0, a2
; RV64IBT-NEXT: sd a2, 0(a1)
; RV64IBT-NEXT: ret
%abs = tail call i64 @llvm.abs.i64(i64 %x, i1 true)
store i64 %abs, i64* %y
%neg = sub nsw i64 0, %abs

View File

@ -1,65 +0,0 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=riscv32 -mattr=+experimental-zbt -verify-machineinstrs < %s \
; RUN: | FileCheck %s -check-prefix=RV32ZBT
declare i32 @llvm.riscv.fsl.i32(i32, i32, i32)
define i32 @fsl_i32(i32 %a, i32 %b, i32 %c) nounwind {
; RV32ZBT-LABEL: fsl_i32:
; RV32ZBT: # %bb.0:
; RV32ZBT-NEXT: fsl a0, a0, a1, a2
; RV32ZBT-NEXT: ret
%1 = call i32 @llvm.riscv.fsl.i32(i32 %a, i32 %b, i32 %c)
ret i32 %1
}
define i32 @fsl_i32_demandedbits(i32 %a, i32 %b, i32 %c) nounwind {
; RV32ZBT-LABEL: fsl_i32_demandedbits:
; RV32ZBT: # %bb.0:
; RV32ZBT-NEXT: andi a1, a1, 31
; RV32ZBT-NEXT: fsl a0, a0, a1, a2
; RV32ZBT-NEXT: ret
%bmask = and i32 %b, 95
%1 = call i32 @llvm.riscv.fsl.i32(i32 %a, i32 %bmask, i32 %c)
ret i32 %1
}
declare i32 @llvm.riscv.fsr.i32(i32, i32, i32)
define i32 @fsr_i32(i32 %a, i32 %b, i32 %c) nounwind {
; RV32ZBT-LABEL: fsr_i32:
; RV32ZBT: # %bb.0:
; RV32ZBT-NEXT: fsr a0, a0, a1, a2
; RV32ZBT-NEXT: ret
%1 = call i32 @llvm.riscv.fsr.i32(i32 %a, i32 %b, i32 %c)
ret i32 %1
}
define i32 @fsr_i32_demandedbits(i32 %a, i32 %b, i32 %c) nounwind {
; RV32ZBT-LABEL: fsr_i32_demandedbits:
; RV32ZBT: # %bb.0:
; RV32ZBT-NEXT: andi a1, a1, 31
; RV32ZBT-NEXT: fsr a0, a0, a1, a2
; RV32ZBT-NEXT: ret
%bmask = and i32 %b, 95
%1 = call i32 @llvm.riscv.fsr.i32(i32 %a, i32 %bmask, i32 %c)
ret i32 %1
}
define i32 @fsli_i32(i32 %a, i32 %b) nounwind {
; RV32ZBT-LABEL: fsli_i32:
; RV32ZBT: # %bb.0:
; RV32ZBT-NEXT: fsri a0, a1, a0, 27
; RV32ZBT-NEXT: ret
%1 = call i32 @llvm.riscv.fsl.i32(i32 %a, i32 %b, i32 5)
ret i32 %1
}
define i32 @fsri_i32(i32 %a, i32 %b) nounwind {
; RV32ZBT-LABEL: fsri_i32:
; RV32ZBT: # %bb.0:
; RV32ZBT-NEXT: fsri a0, a0, a1, 15
; RV32ZBT-NEXT: ret
%1 = call i32 @llvm.riscv.fsr.i32(i32 %a, i32 %b, i32 15)
ret i32 %1
}

View File

@ -1,984 +0,0 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=riscv32 -verify-machineinstrs < %s \
; RUN: | FileCheck %s -check-prefix=RV32I
; RUN: llc -mtriple=riscv32 -mattr=+experimental-zbt -verify-machineinstrs < %s \
; RUN: | FileCheck %s -check-prefix=RV32ZBT
define i32 @cmix_i32(i32 %a, i32 %b, i32 %c) nounwind {
; RV32I-LABEL: cmix_i32:
; RV32I: # %bb.0:
; RV32I-NEXT: and a0, a1, a0
; RV32I-NEXT: not a1, a1
; RV32I-NEXT: and a1, a1, a2
; RV32I-NEXT: or a0, a1, a0
; RV32I-NEXT: ret
;
; RV32ZBT-LABEL: cmix_i32:
; RV32ZBT: # %bb.0:
; RV32ZBT-NEXT: cmix a0, a1, a0, a2
; RV32ZBT-NEXT: ret
%and = and i32 %b, %a
%neg = xor i32 %b, -1
%and1 = and i32 %neg, %c
%or = or i32 %and1, %and
ret i32 %or
}
define i32 @cmix_i32_2(i32 %a, i32 %b, i32 %c) nounwind {
; RV32I-LABEL: cmix_i32_2:
; RV32I: # %bb.0:
; RV32I-NEXT: xor a0, a0, a2
; RV32I-NEXT: and a0, a0, a1
; RV32I-NEXT: xor a0, a0, a2
; RV32I-NEXT: ret
;
; RV32ZBT-LABEL: cmix_i32_2:
; RV32ZBT: # %bb.0:
; RV32ZBT-NEXT: cmix a0, a1, a0, a2
; RV32ZBT-NEXT: ret
%xor = xor i32 %a, %c
%and = and i32 %xor, %b
%xor1 = xor i32 %and, %c
ret i32 %xor1
}
define i64 @cmix_i64(i64 %a, i64 %b, i64 %c) nounwind {
; RV32I-LABEL: cmix_i64:
; RV32I: # %bb.0:
; RV32I-NEXT: and a1, a3, a1
; RV32I-NEXT: and a0, a2, a0
; RV32I-NEXT: not a2, a2
; RV32I-NEXT: not a3, a3
; RV32I-NEXT: and a3, a3, a5
; RV32I-NEXT: and a2, a2, a4
; RV32I-NEXT: or a0, a2, a0
; RV32I-NEXT: or a1, a3, a1
; RV32I-NEXT: ret
;
; RV32ZBT-LABEL: cmix_i64:
; RV32ZBT: # %bb.0:
; RV32ZBT-NEXT: cmix a0, a2, a0, a4
; RV32ZBT-NEXT: cmix a1, a3, a1, a5
; RV32ZBT-NEXT: ret
%and = and i64 %b, %a
%neg = xor i64 %b, -1
%and1 = and i64 %neg, %c
%or = or i64 %and1, %and
ret i64 %or
}
define i64 @cmix_i64_2(i64 %a, i64 %b, i64 %c) nounwind {
; RV32I-LABEL: cmix_i64_2:
; RV32I: # %bb.0:
; RV32I-NEXT: xor a0, a0, a4
; RV32I-NEXT: xor a1, a1, a5
; RV32I-NEXT: and a1, a1, a3
; RV32I-NEXT: and a0, a0, a2
; RV32I-NEXT: xor a0, a0, a4
; RV32I-NEXT: xor a1, a1, a5
; RV32I-NEXT: ret
;
; RV32ZBT-LABEL: cmix_i64_2:
; RV32ZBT: # %bb.0:
; RV32ZBT-NEXT: cmix a0, a2, a0, a4
; RV32ZBT-NEXT: cmix a1, a3, a1, a5
; RV32ZBT-NEXT: ret
%xor = xor i64 %a, %c
%and = and i64 %xor, %b
%xor1 = xor i64 %and, %c
ret i64 %xor1
}
define i32 @cmov_eq_i32(i32 %a, i32 %b, i32 %c, i32 %d) nounwind {
; RV32I-LABEL: cmov_eq_i32:
; RV32I: # %bb.0:
; RV32I-NEXT: beq a1, a2, .LBB4_2
; RV32I-NEXT: # %bb.1:
; RV32I-NEXT: mv a0, a3
; RV32I-NEXT: .LBB4_2:
; RV32I-NEXT: ret
;
; RV32ZBT-LABEL: cmov_eq_i32:
; RV32ZBT: # %bb.0:
; RV32ZBT-NEXT: xor a1, a1, a2
; RV32ZBT-NEXT: cmov a0, a1, a3, a0
; RV32ZBT-NEXT: ret
%tobool.not = icmp eq i32 %b, %c
%cond = select i1 %tobool.not, i32 %a, i32 %d
ret i32 %cond
}
define i32 @cmov_eq_i32_constant_zero(i32 %a, i32 %b, i32 %c) nounwind {
; RV32I-LABEL: cmov_eq_i32_constant_zero:
; RV32I: # %bb.0:
; RV32I-NEXT: beqz a1, .LBB5_2
; RV32I-NEXT: # %bb.1:
; RV32I-NEXT: mv a0, a2
; RV32I-NEXT: .LBB5_2:
; RV32I-NEXT: ret
;
; RV32ZBT-LABEL: cmov_eq_i32_constant_zero:
; RV32ZBT: # %bb.0:
; RV32ZBT-NEXT: cmov a0, a1, a2, a0
; RV32ZBT-NEXT: ret
%tobool.not = icmp eq i32 %b, 0
%cond = select i1 %tobool.not, i32 %a, i32 %c
ret i32 %cond
}
define i32 @cmov_eq_i32_constant_2048(i32 %a, i32 %b, i32 %c) nounwind {
; RV32I-LABEL: cmov_eq_i32_constant_2048:
; RV32I: # %bb.0:
; RV32I-NEXT: lui a3, 1
; RV32I-NEXT: addi a3, a3, -2048
; RV32I-NEXT: beq a1, a3, .LBB6_2
; RV32I-NEXT: # %bb.1:
; RV32I-NEXT: mv a0, a2
; RV32I-NEXT: .LBB6_2:
; RV32I-NEXT: ret
;
; RV32ZBT-LABEL: cmov_eq_i32_constant_2048:
; RV32ZBT: # %bb.0:
; RV32ZBT-NEXT: addi a1, a1, -2048
; RV32ZBT-NEXT: cmov a0, a1, a2, a0
; RV32ZBT-NEXT: ret
%tobool.not = icmp eq i32 %b, 2048
%cond = select i1 %tobool.not, i32 %a, i32 %c
ret i32 %cond
}
define i32 @cmov_eq_i32_constant_neg_2047(i32 %a, i32 %b, i32 %c) nounwind {
; RV32I-LABEL: cmov_eq_i32_constant_neg_2047:
; RV32I: # %bb.0:
; RV32I-NEXT: li a3, -2047
; RV32I-NEXT: beq a1, a3, .LBB7_2
; RV32I-NEXT: # %bb.1:
; RV32I-NEXT: mv a0, a2
; RV32I-NEXT: .LBB7_2:
; RV32I-NEXT: ret
;
; RV32ZBT-LABEL: cmov_eq_i32_constant_neg_2047:
; RV32ZBT: # %bb.0:
; RV32ZBT-NEXT: addi a1, a1, 2047
; RV32ZBT-NEXT: cmov a0, a1, a2, a0
; RV32ZBT-NEXT: ret
%tobool.not = icmp eq i32 %b, -2047
%cond = select i1 %tobool.not, i32 %a, i32 %c
ret i32 %cond
}
define i32 @cmov_ne_i32(i32 %a, i32 %b, i32 %c, i32 %d) nounwind {
; RV32I-LABEL: cmov_ne_i32:
; RV32I: # %bb.0:
; RV32I-NEXT: bne a1, a2, .LBB8_2
; RV32I-NEXT: # %bb.1:
; RV32I-NEXT: mv a0, a3
; RV32I-NEXT: .LBB8_2:
; RV32I-NEXT: ret
;
; RV32ZBT-LABEL: cmov_ne_i32:
; RV32ZBT: # %bb.0:
; RV32ZBT-NEXT: xor a1, a1, a2
; RV32ZBT-NEXT: cmov a0, a1, a0, a3
; RV32ZBT-NEXT: ret
%tobool.not = icmp ne i32 %b, %c
%cond = select i1 %tobool.not, i32 %a, i32 %d
ret i32 %cond
}
define i32 @cmov_ne_i32_constant_zero(i32 %a, i32 %b, i32 %c) nounwind {
; RV32I-LABEL: cmov_ne_i32_constant_zero:
; RV32I: # %bb.0:
; RV32I-NEXT: bnez a1, .LBB9_2
; RV32I-NEXT: # %bb.1:
; RV32I-NEXT: mv a0, a2
; RV32I-NEXT: .LBB9_2:
; RV32I-NEXT: ret
;
; RV32ZBT-LABEL: cmov_ne_i32_constant_zero:
; RV32ZBT: # %bb.0:
; RV32ZBT-NEXT: cmov a0, a1, a0, a2
; RV32ZBT-NEXT: ret
%tobool.not = icmp ne i32 %b, 0
%cond = select i1 %tobool.not, i32 %a, i32 %c
ret i32 %cond
}
define i32 @cmov_ne_i32_constant_2048(i32 %a, i32 %b, i32 %c) nounwind {
; RV32I-LABEL: cmov_ne_i32_constant_2048:
; RV32I: # %bb.0:
; RV32I-NEXT: lui a3, 1
; RV32I-NEXT: addi a3, a3, -2048
; RV32I-NEXT: bne a1, a3, .LBB10_2
; RV32I-NEXT: # %bb.1:
; RV32I-NEXT: mv a0, a2
; RV32I-NEXT: .LBB10_2:
; RV32I-NEXT: ret
;
; RV32ZBT-LABEL: cmov_ne_i32_constant_2048:
; RV32ZBT: # %bb.0:
; RV32ZBT-NEXT: addi a1, a1, -2048
; RV32ZBT-NEXT: cmov a0, a1, a0, a2
; RV32ZBT-NEXT: ret
%tobool.not = icmp ne i32 %b, 2048
%cond = select i1 %tobool.not, i32 %a, i32 %c
ret i32 %cond
}
define i32 @cmov_ne_i32_constant_neg_2047(i32 %a, i32 %b, i32 %c) nounwind {
; RV32I-LABEL: cmov_ne_i32_constant_neg_2047:
; RV32I: # %bb.0:
; RV32I-NEXT: li a3, -2047
; RV32I-NEXT: bne a1, a3, .LBB11_2
; RV32I-NEXT: # %bb.1:
; RV32I-NEXT: mv a0, a2
; RV32I-NEXT: .LBB11_2:
; RV32I-NEXT: ret
;
; RV32ZBT-LABEL: cmov_ne_i32_constant_neg_2047:
; RV32ZBT: # %bb.0:
; RV32ZBT-NEXT: addi a1, a1, 2047
; RV32ZBT-NEXT: cmov a0, a1, a0, a2
; RV32ZBT-NEXT: ret
%tobool.not = icmp ne i32 %b, -2047
%cond = select i1 %tobool.not, i32 %a, i32 %c
ret i32 %cond
}
define i32 @cmov_sle_i32(i32 %a, i32 %b, i32 %c, i32 %d) nounwind {
; RV32I-LABEL: cmov_sle_i32:
; RV32I: # %bb.0:
; RV32I-NEXT: bge a2, a1, .LBB12_2
; RV32I-NEXT: # %bb.1:
; RV32I-NEXT: mv a0, a3
; RV32I-NEXT: .LBB12_2:
; RV32I-NEXT: ret
;
; RV32ZBT-LABEL: cmov_sle_i32:
; RV32ZBT: # %bb.0:
; RV32ZBT-NEXT: slt a1, a2, a1
; RV32ZBT-NEXT: cmov a0, a1, a3, a0
; RV32ZBT-NEXT: ret
%tobool = icmp sle i32 %b, %c
%cond = select i1 %tobool, i32 %a, i32 %d
ret i32 %cond
}
define i32 @cmov_sle_i32_constant_2046(i32 %a, i32 %b, i32 %c) nounwind {
; RV32I-LABEL: cmov_sle_i32_constant_2046:
; RV32I: # %bb.0:
; RV32I-NEXT: li a3, 2047
; RV32I-NEXT: blt a1, a3, .LBB13_2
; RV32I-NEXT: # %bb.1:
; RV32I-NEXT: mv a0, a2
; RV32I-NEXT: .LBB13_2:
; RV32I-NEXT: ret
;
; RV32ZBT-LABEL: cmov_sle_i32_constant_2046:
; RV32ZBT: # %bb.0:
; RV32ZBT-NEXT: slti a1, a1, 2047
; RV32ZBT-NEXT: cmov a0, a1, a0, a2
; RV32ZBT-NEXT: ret
%tobool = icmp sle i32 %b, 2046
%cond = select i1 %tobool, i32 %a, i32 %c
ret i32 %cond
}
define i32 @cmov_sle_i32_constant_neg_2049(i32 %a, i32 %b, i32 %c) nounwind {
; RV32I-LABEL: cmov_sle_i32_constant_neg_2049:
; RV32I: # %bb.0:
; RV32I-NEXT: li a3, -2048
; RV32I-NEXT: blt a1, a3, .LBB14_2
; RV32I-NEXT: # %bb.1:
; RV32I-NEXT: mv a0, a2
; RV32I-NEXT: .LBB14_2:
; RV32I-NEXT: ret
;
; RV32ZBT-LABEL: cmov_sle_i32_constant_neg_2049:
; RV32ZBT: # %bb.0:
; RV32ZBT-NEXT: slti a1, a1, -2048
; RV32ZBT-NEXT: cmov a0, a1, a0, a2
; RV32ZBT-NEXT: ret
%tobool = icmp sle i32 %b, -2049
%cond = select i1 %tobool, i32 %a, i32 %c
ret i32 %cond
}
define i32 @cmov_sgt_i32(i32 %a, i32 %b, i32 %c, i32 %d) nounwind {
; RV32I-LABEL: cmov_sgt_i32:
; RV32I: # %bb.0:
; RV32I-NEXT: blt a2, a1, .LBB15_2
; RV32I-NEXT: # %bb.1:
; RV32I-NEXT: mv a0, a3
; RV32I-NEXT: .LBB15_2:
; RV32I-NEXT: ret
;
; RV32ZBT-LABEL: cmov_sgt_i32:
; RV32ZBT: # %bb.0:
; RV32ZBT-NEXT: slt a1, a2, a1
; RV32ZBT-NEXT: cmov a0, a1, a0, a3
; RV32ZBT-NEXT: ret
%tobool = icmp sgt i32 %b, %c
%cond = select i1 %tobool, i32 %a, i32 %d
ret i32 %cond
}
define i32 @cmov_sgt_i32_constant_2046(i32 %a, i32 %b, i32 %c) nounwind {
; RV32I-LABEL: cmov_sgt_i32_constant_2046:
; RV32I: # %bb.0:
; RV32I-NEXT: li a3, 2046
; RV32I-NEXT: blt a3, a1, .LBB16_2
; RV32I-NEXT: # %bb.1:
; RV32I-NEXT: mv a0, a2
; RV32I-NEXT: .LBB16_2:
; RV32I-NEXT: ret
;
; RV32ZBT-LABEL: cmov_sgt_i32_constant_2046:
; RV32ZBT: # %bb.0:
; RV32ZBT-NEXT: slti a1, a1, 2047
; RV32ZBT-NEXT: cmov a0, a1, a2, a0
; RV32ZBT-NEXT: ret
%tobool = icmp sgt i32 %b, 2046
%cond = select i1 %tobool, i32 %a, i32 %c
ret i32 %cond
}
define i32 @cmov_sgt_i32_constant_neg_2049(i32 %a, i32 %b, i32 %c) nounwind {
; RV32I-LABEL: cmov_sgt_i32_constant_neg_2049:
; RV32I: # %bb.0:
; RV32I-NEXT: lui a3, 1048575
; RV32I-NEXT: addi a3, a3, 2047
; RV32I-NEXT: blt a3, a1, .LBB17_2
; RV32I-NEXT: # %bb.1:
; RV32I-NEXT: mv a0, a2
; RV32I-NEXT: .LBB17_2:
; RV32I-NEXT: ret
;
; RV32ZBT-LABEL: cmov_sgt_i32_constant_neg_2049:
; RV32ZBT: # %bb.0:
; RV32ZBT-NEXT: slti a1, a1, -2048
; RV32ZBT-NEXT: cmov a0, a1, a2, a0
; RV32ZBT-NEXT: ret
%tobool = icmp sgt i32 %b, -2049
%cond = select i1 %tobool, i32 %a, i32 %c
ret i32 %cond
}
define i32 @cmov_sge_i32(i32 %a, i32 %b, i32 %c, i32 %d) nounwind {
; RV32I-LABEL: cmov_sge_i32:
; RV32I: # %bb.0:
; RV32I-NEXT: bge a1, a2, .LBB18_2
; RV32I-NEXT: # %bb.1:
; RV32I-NEXT: mv a0, a3
; RV32I-NEXT: .LBB18_2:
; RV32I-NEXT: ret
;
; RV32ZBT-LABEL: cmov_sge_i32:
; RV32ZBT: # %bb.0:
; RV32ZBT-NEXT: slt a1, a1, a2
; RV32ZBT-NEXT: cmov a0, a1, a3, a0
; RV32ZBT-NEXT: ret
%tobool = icmp sge i32 %b, %c
%cond = select i1 %tobool, i32 %a, i32 %d
ret i32 %cond
}
define i32 @cmov_sge_i32_constant_2047(i32 %a, i32 %b, i32 %c) nounwind {
; RV32I-LABEL: cmov_sge_i32_constant_2047:
; RV32I: # %bb.0:
; RV32I-NEXT: li a3, 2046
; RV32I-NEXT: blt a3, a1, .LBB19_2
; RV32I-NEXT: # %bb.1:
; RV32I-NEXT: mv a0, a2
; RV32I-NEXT: .LBB19_2:
; RV32I-NEXT: ret
;
; RV32ZBT-LABEL: cmov_sge_i32_constant_2047:
; RV32ZBT: # %bb.0:
; RV32ZBT-NEXT: slti a1, a1, 2047
; RV32ZBT-NEXT: cmov a0, a1, a2, a0
; RV32ZBT-NEXT: ret
%tobool = icmp sge i32 %b, 2047
%cond = select i1 %tobool, i32 %a, i32 %c
ret i32 %cond
}
define i32 @cmov_sge_i32_constant_neg_2048(i32 %a, i32 %b, i32 %c) nounwind {
; RV32I-LABEL: cmov_sge_i32_constant_neg_2048:
; RV32I: # %bb.0:
; RV32I-NEXT: lui a3, 1048575
; RV32I-NEXT: addi a3, a3, 2047
; RV32I-NEXT: blt a3, a1, .LBB20_2
; RV32I-NEXT: # %bb.1:
; RV32I-NEXT: mv a0, a2
; RV32I-NEXT: .LBB20_2:
; RV32I-NEXT: ret
;
; RV32ZBT-LABEL: cmov_sge_i32_constant_neg_2048:
; RV32ZBT: # %bb.0:
; RV32ZBT-NEXT: slti a1, a1, -2048
; RV32ZBT-NEXT: cmov a0, a1, a2, a0
; RV32ZBT-NEXT: ret
%tobool = icmp sge i32 %b, -2048
%cond = select i1 %tobool, i32 %a, i32 %c
ret i32 %cond
}
define i32 @cmov_ule_i32(i32 %a, i32 %b, i32 %c, i32 %d) nounwind {
; RV32I-LABEL: cmov_ule_i32:
; RV32I: # %bb.0:
; RV32I-NEXT: bgeu a2, a1, .LBB21_2
; RV32I-NEXT: # %bb.1:
; RV32I-NEXT: mv a0, a3
; RV32I-NEXT: .LBB21_2:
; RV32I-NEXT: ret
;
; RV32ZBT-LABEL: cmov_ule_i32:
; RV32ZBT: # %bb.0:
; RV32ZBT-NEXT: sltu a1, a2, a1
; RV32ZBT-NEXT: cmov a0, a1, a3, a0
; RV32ZBT-NEXT: ret
%tobool = icmp ule i32 %b, %c
%cond = select i1 %tobool, i32 %a, i32 %d
ret i32 %cond
}
define i32 @cmov_ule_i32_constant_2047(i32 %a, i32 %b, i32 %c) nounwind {
; RV32I-LABEL: cmov_ule_i32_constant_2047:
; RV32I: # %bb.0:
; RV32I-NEXT: srli a1, a1, 11
; RV32I-NEXT: beqz a1, .LBB22_2
; RV32I-NEXT: # %bb.1:
; RV32I-NEXT: mv a0, a2
; RV32I-NEXT: .LBB22_2:
; RV32I-NEXT: ret
;
; RV32ZBT-LABEL: cmov_ule_i32_constant_2047:
; RV32ZBT: # %bb.0:
; RV32ZBT-NEXT: srli a1, a1, 11
; RV32ZBT-NEXT: cmov a0, a1, a2, a0
; RV32ZBT-NEXT: ret
%tobool = icmp ule i32 %b, 2047
%cond = select i1 %tobool, i32 %a, i32 %c
ret i32 %cond
}
define i32 @cmov_ule_i32_constant_neg_2049(i32 %a, i32 %b, i32 %c) nounwind {
; RV32I-LABEL: cmov_ule_i32_constant_neg_2049:
; RV32I: # %bb.0:
; RV32I-NEXT: li a3, -2048
; RV32I-NEXT: bltu a1, a3, .LBB23_2
; RV32I-NEXT: # %bb.1:
; RV32I-NEXT: mv a0, a2
; RV32I-NEXT: .LBB23_2:
; RV32I-NEXT: ret
;
; RV32ZBT-LABEL: cmov_ule_i32_constant_neg_2049:
; RV32ZBT: # %bb.0:
; RV32ZBT-NEXT: sltiu a1, a1, -2048
; RV32ZBT-NEXT: cmov a0, a1, a0, a2
; RV32ZBT-NEXT: ret
%tobool = icmp ule i32 %b, 4294965247
%cond = select i1 %tobool, i32 %a, i32 %c
ret i32 %cond
}
define i32 @cmov_ugt_i32(i32 %a, i32 %b, i32 %c, i32 %d) nounwind {
; RV32I-LABEL: cmov_ugt_i32:
; RV32I: # %bb.0:
; RV32I-NEXT: bltu a2, a1, .LBB24_2
; RV32I-NEXT: # %bb.1:
; RV32I-NEXT: mv a0, a3
; RV32I-NEXT: .LBB24_2:
; RV32I-NEXT: ret
;
; RV32ZBT-LABEL: cmov_ugt_i32:
; RV32ZBT: # %bb.0:
; RV32ZBT-NEXT: sltu a1, a2, a1
; RV32ZBT-NEXT: cmov a0, a1, a0, a3
; RV32ZBT-NEXT: ret
%tobool = icmp ugt i32 %b, %c
%cond = select i1 %tobool, i32 %a, i32 %d
ret i32 %cond
}
define i32 @cmov_ugt_i32_constant_2046(i32 %a, i32 %b, i32 %c) nounwind {
; RV32I-LABEL: cmov_ugt_i32_constant_2046:
; RV32I: # %bb.0:
; RV32I-NEXT: li a3, 2046
; RV32I-NEXT: bltu a3, a1, .LBB25_2
; RV32I-NEXT: # %bb.1:
; RV32I-NEXT: mv a0, a2
; RV32I-NEXT: .LBB25_2:
; RV32I-NEXT: ret
;
; RV32ZBT-LABEL: cmov_ugt_i32_constant_2046:
; RV32ZBT: # %bb.0:
; RV32ZBT-NEXT: sltiu a1, a1, 2047
; RV32ZBT-NEXT: cmov a0, a1, a2, a0
; RV32ZBT-NEXT: ret
%tobool = icmp ugt i32 %b, 2046
%cond = select i1 %tobool, i32 %a, i32 %c
ret i32 %cond
}
define i32 @cmov_ugt_i32_constant_neg_2049(i32 %a, i32 %b, i32 %c) nounwind {
; RV32I-LABEL: cmov_ugt_i32_constant_neg_2049:
; RV32I: # %bb.0:
; RV32I-NEXT: lui a3, 1048575
; RV32I-NEXT: addi a3, a3, 2047
; RV32I-NEXT: bltu a3, a1, .LBB26_2
; RV32I-NEXT: # %bb.1:
; RV32I-NEXT: mv a0, a2
; RV32I-NEXT: .LBB26_2:
; RV32I-NEXT: ret
;
; RV32ZBT-LABEL: cmov_ugt_i32_constant_neg_2049:
; RV32ZBT: # %bb.0:
; RV32ZBT-NEXT: sltiu a1, a1, -2048
; RV32ZBT-NEXT: cmov a0, a1, a2, a0
; RV32ZBT-NEXT: ret
%tobool = icmp ugt i32 %b, 4294965247
%cond = select i1 %tobool, i32 %a, i32 %c
ret i32 %cond
}
define i32 @cmov_uge_i32(i32 %a, i32 %b, i32 %c, i32 %d) nounwind {
; RV32I-LABEL: cmov_uge_i32:
; RV32I: # %bb.0:
; RV32I-NEXT: bgeu a1, a2, .LBB27_2
; RV32I-NEXT: # %bb.1:
; RV32I-NEXT: mv a0, a3
; RV32I-NEXT: .LBB27_2:
; RV32I-NEXT: ret
;
; RV32ZBT-LABEL: cmov_uge_i32:
; RV32ZBT: # %bb.0:
; RV32ZBT-NEXT: sltu a1, a1, a2
; RV32ZBT-NEXT: cmov a0, a1, a3, a0
; RV32ZBT-NEXT: ret
%tobool = icmp uge i32 %b, %c
%cond = select i1 %tobool, i32 %a, i32 %d
ret i32 %cond
}
define i32 @cmov_uge_i32_constant_2047(i32 %a, i32 %b, i32 %c) nounwind {
; RV32I-LABEL: cmov_uge_i32_constant_2047:
; RV32I: # %bb.0:
; RV32I-NEXT: li a3, 2046
; RV32I-NEXT: bltu a3, a1, .LBB28_2
; RV32I-NEXT: # %bb.1:
; RV32I-NEXT: mv a0, a2
; RV32I-NEXT: .LBB28_2:
; RV32I-NEXT: ret
;
; RV32ZBT-LABEL: cmov_uge_i32_constant_2047:
; RV32ZBT: # %bb.0:
; RV32ZBT-NEXT: sltiu a1, a1, 2047
; RV32ZBT-NEXT: cmov a0, a1, a2, a0
; RV32ZBT-NEXT: ret
%tobool = icmp uge i32 %b, 2047
%cond = select i1 %tobool, i32 %a, i32 %c
ret i32 %cond
}
define i32 @cmov_uge_i32_constant_neg_2048(i32 %a, i32 %b, i32 %c) nounwind {
; RV32I-LABEL: cmov_uge_i32_constant_neg_2048:
; RV32I: # %bb.0:
; RV32I-NEXT: lui a3, 1048575
; RV32I-NEXT: addi a3, a3, 2047
; RV32I-NEXT: bltu a3, a1, .LBB29_2
; RV32I-NEXT: # %bb.1:
; RV32I-NEXT: mv a0, a2
; RV32I-NEXT: .LBB29_2:
; RV32I-NEXT: ret
;
; RV32ZBT-LABEL: cmov_uge_i32_constant_neg_2048:
; RV32ZBT: # %bb.0:
; RV32ZBT-NEXT: sltiu a1, a1, -2048
; RV32ZBT-NEXT: cmov a0, a1, a2, a0
; RV32ZBT-NEXT: ret
%tobool = icmp uge i32 %b, 4294965248
%cond = select i1 %tobool, i32 %a, i32 %c
ret i32 %cond
}
define i64 @cmov_i64(i64 %a, i64 %b, i64 %c) nounwind {
; RV32I-LABEL: cmov_i64:
; RV32I: # %bb.0:
; RV32I-NEXT: or a2, a2, a3
; RV32I-NEXT: beqz a2, .LBB30_2
; RV32I-NEXT: # %bb.1:
; RV32I-NEXT: mv a4, a0
; RV32I-NEXT: mv a5, a1
; RV32I-NEXT: .LBB30_2:
; RV32I-NEXT: mv a0, a4
; RV32I-NEXT: mv a1, a5
; RV32I-NEXT: ret
;
; RV32ZBT-LABEL: cmov_i64:
; RV32ZBT: # %bb.0:
; RV32ZBT-NEXT: or a2, a2, a3
; RV32ZBT-NEXT: cmov a0, a2, a0, a4
; RV32ZBT-NEXT: cmov a1, a2, a1, a5
; RV32ZBT-NEXT: ret
%tobool.not = icmp eq i64 %b, 0
%cond = select i1 %tobool.not, i64 %c, i64 %a
ret i64 %cond
}
define i64 @cmov_sle_i64(i64 %a, i64 %b, i64 %c, i64 %d) nounwind {
; RV32I-LABEL: cmov_sle_i64:
; RV32I: # %bb.0:
; RV32I-NEXT: beq a3, a5, .LBB31_2
; RV32I-NEXT: # %bb.1:
; RV32I-NEXT: slt a2, a5, a3
; RV32I-NEXT: xori a2, a2, 1
; RV32I-NEXT: beqz a2, .LBB31_3
; RV32I-NEXT: j .LBB31_4
; RV32I-NEXT: .LBB31_2:
; RV32I-NEXT: sltu a2, a4, a2
; RV32I-NEXT: xori a2, a2, 1
; RV32I-NEXT: bnez a2, .LBB31_4
; RV32I-NEXT: .LBB31_3:
; RV32I-NEXT: mv a0, a6
; RV32I-NEXT: mv a1, a7
; RV32I-NEXT: .LBB31_4:
; RV32I-NEXT: ret
;
; RV32ZBT-LABEL: cmov_sle_i64:
; RV32ZBT: # %bb.0:
; RV32ZBT-NEXT: sltu a2, a4, a2
; RV32ZBT-NEXT: xor a4, a3, a5
; RV32ZBT-NEXT: slt a3, a5, a3
; RV32ZBT-NEXT: cmov a2, a4, a3, a2
; RV32ZBT-NEXT: cmov a0, a2, a6, a0
; RV32ZBT-NEXT: cmov a1, a2, a7, a1
; RV32ZBT-NEXT: ret
%tobool = icmp sle i64 %b, %c
%cond = select i1 %tobool, i64 %a, i64 %d
ret i64 %cond
}
define i64 @cmov_sge_i64(i64 %a, i64 %b, i64 %c, i64 %d) nounwind {
; RV32I-LABEL: cmov_sge_i64:
; RV32I: # %bb.0:
; RV32I-NEXT: beq a3, a5, .LBB32_2
; RV32I-NEXT: # %bb.1:
; RV32I-NEXT: slt a2, a3, a5
; RV32I-NEXT: xori a2, a2, 1
; RV32I-NEXT: beqz a2, .LBB32_3
; RV32I-NEXT: j .LBB32_4
; RV32I-NEXT: .LBB32_2:
; RV32I-NEXT: sltu a2, a2, a4
; RV32I-NEXT: xori a2, a2, 1
; RV32I-NEXT: bnez a2, .LBB32_4
; RV32I-NEXT: .LBB32_3:
; RV32I-NEXT: mv a0, a6
; RV32I-NEXT: mv a1, a7
; RV32I-NEXT: .LBB32_4:
; RV32I-NEXT: ret
;
; RV32ZBT-LABEL: cmov_sge_i64:
; RV32ZBT: # %bb.0:
; RV32ZBT-NEXT: sltu a2, a2, a4
; RV32ZBT-NEXT: xor a4, a3, a5
; RV32ZBT-NEXT: slt a3, a3, a5
; RV32ZBT-NEXT: cmov a2, a4, a3, a2
; RV32ZBT-NEXT: cmov a0, a2, a6, a0
; RV32ZBT-NEXT: cmov a1, a2, a7, a1
; RV32ZBT-NEXT: ret
%tobool = icmp sge i64 %b, %c
%cond = select i1 %tobool, i64 %a, i64 %d
ret i64 %cond
}
define i64 @cmov_ule_i64(i64 %a, i64 %b, i64 %c, i64 %d) nounwind {
; RV32I-LABEL: cmov_ule_i64:
; RV32I: # %bb.0:
; RV32I-NEXT: beq a3, a5, .LBB33_2
; RV32I-NEXT: # %bb.1:
; RV32I-NEXT: sltu a2, a5, a3
; RV32I-NEXT: xori a2, a2, 1
; RV32I-NEXT: beqz a2, .LBB33_3
; RV32I-NEXT: j .LBB33_4
; RV32I-NEXT: .LBB33_2:
; RV32I-NEXT: sltu a2, a4, a2
; RV32I-NEXT: xori a2, a2, 1
; RV32I-NEXT: bnez a2, .LBB33_4
; RV32I-NEXT: .LBB33_3:
; RV32I-NEXT: mv a0, a6
; RV32I-NEXT: mv a1, a7
; RV32I-NEXT: .LBB33_4:
; RV32I-NEXT: ret
;
; RV32ZBT-LABEL: cmov_ule_i64:
; RV32ZBT: # %bb.0:
; RV32ZBT-NEXT: sltu a2, a4, a2
; RV32ZBT-NEXT: xor a4, a3, a5
; RV32ZBT-NEXT: sltu a3, a5, a3
; RV32ZBT-NEXT: cmov a2, a4, a3, a2
; RV32ZBT-NEXT: cmov a0, a2, a6, a0
; RV32ZBT-NEXT: cmov a1, a2, a7, a1
; RV32ZBT-NEXT: ret
%tobool = icmp ule i64 %b, %c
%cond = select i1 %tobool, i64 %a, i64 %d
ret i64 %cond
}
define i64 @cmov_uge_i64(i64 %a, i64 %b, i64 %c, i64 %d) nounwind {
; RV32I-LABEL: cmov_uge_i64:
; RV32I: # %bb.0:
; RV32I-NEXT: beq a3, a5, .LBB34_2
; RV32I-NEXT: # %bb.1:
; RV32I-NEXT: sltu a2, a3, a5
; RV32I-NEXT: xori a2, a2, 1
; RV32I-NEXT: beqz a2, .LBB34_3
; RV32I-NEXT: j .LBB34_4
; RV32I-NEXT: .LBB34_2:
; RV32I-NEXT: sltu a2, a2, a4
; RV32I-NEXT: xori a2, a2, 1
; RV32I-NEXT: bnez a2, .LBB34_4
; RV32I-NEXT: .LBB34_3:
; RV32I-NEXT: mv a0, a6
; RV32I-NEXT: mv a1, a7
; RV32I-NEXT: .LBB34_4:
; RV32I-NEXT: ret
;
; RV32ZBT-LABEL: cmov_uge_i64:
; RV32ZBT: # %bb.0:
; RV32ZBT-NEXT: sltu a2, a2, a4
; RV32ZBT-NEXT: xor a4, a3, a5
; RV32ZBT-NEXT: sltu a3, a3, a5
; RV32ZBT-NEXT: cmov a2, a4, a3, a2
; RV32ZBT-NEXT: cmov a0, a2, a6, a0
; RV32ZBT-NEXT: cmov a1, a2, a7, a1
; RV32ZBT-NEXT: ret
%tobool = icmp uge i64 %b, %c
%cond = select i1 %tobool, i64 %a, i64 %d
ret i64 %cond
}
declare i32 @llvm.fshl.i32(i32, i32, i32)
define i32 @fshl_i32(i32 %a, i32 %b, i32 %c) nounwind {
; RV32I-LABEL: fshl_i32:
; RV32I: # %bb.0:
; RV32I-NEXT: sll a0, a0, a2
; RV32I-NEXT: not a2, a2
; RV32I-NEXT: srli a1, a1, 1
; RV32I-NEXT: srl a1, a1, a2
; RV32I-NEXT: or a0, a0, a1
; RV32I-NEXT: ret
;
; RV32ZBT-LABEL: fshl_i32:
; RV32ZBT: # %bb.0:
; RV32ZBT-NEXT: andi a2, a2, 31
; RV32ZBT-NEXT: fsl a0, a0, a1, a2
; RV32ZBT-NEXT: ret
%1 = tail call i32 @llvm.fshl.i32(i32 %a, i32 %b, i32 %c)
ret i32 %1
}
; As we are not matching directly i64 code patterns on RV32 some i64 patterns
; don't have yet an efficient pattern-matching with bit manipulation
; instructions on RV32.
; This test is presented here in case future expansions of the Bitmanip
; extensions introduce instructions that can match more efficiently this pattern.
declare i64 @llvm.fshl.i64(i64, i64, i64)
define i64 @fshl_i64(i64 %a, i64 %b, i64 %c) nounwind {
; RV32I-LABEL: fshl_i64:
; RV32I: # %bb.0:
; RV32I-NEXT: slli a5, a4, 26
; RV32I-NEXT: srli a6, a5, 31
; RV32I-NEXT: mv a5, a3
; RV32I-NEXT: bnez a6, .LBB36_2
; RV32I-NEXT: # %bb.1:
; RV32I-NEXT: mv a5, a0
; RV32I-NEXT: .LBB36_2:
; RV32I-NEXT: sll a7, a5, a4
; RV32I-NEXT: bnez a6, .LBB36_4
; RV32I-NEXT: # %bb.3:
; RV32I-NEXT: mv a2, a3
; RV32I-NEXT: .LBB36_4:
; RV32I-NEXT: srli a2, a2, 1
; RV32I-NEXT: not a3, a4
; RV32I-NEXT: srl a2, a2, a3
; RV32I-NEXT: or a2, a7, a2
; RV32I-NEXT: bnez a6, .LBB36_6
; RV32I-NEXT: # %bb.5:
; RV32I-NEXT: mv a0, a1
; RV32I-NEXT: .LBB36_6:
; RV32I-NEXT: sll a0, a0, a4
; RV32I-NEXT: srli a1, a5, 1
; RV32I-NEXT: srl a1, a1, a3
; RV32I-NEXT: or a1, a0, a1
; RV32I-NEXT: mv a0, a2
; RV32I-NEXT: ret
;
; RV32ZBT-LABEL: fshl_i64:
; RV32ZBT: # %bb.0:
; RV32ZBT-NEXT: slli a5, a4, 26
; RV32ZBT-NEXT: srli a5, a5, 31
; RV32ZBT-NEXT: cmov a2, a5, a2, a3
; RV32ZBT-NEXT: cmov a3, a5, a3, a0
; RV32ZBT-NEXT: andi a4, a4, 31
; RV32ZBT-NEXT: fsl a2, a3, a2, a4
; RV32ZBT-NEXT: cmov a0, a5, a0, a1
; RV32ZBT-NEXT: fsl a1, a0, a3, a4
; RV32ZBT-NEXT: mv a0, a2
; RV32ZBT-NEXT: ret
%1 = tail call i64 @llvm.fshl.i64(i64 %a, i64 %b, i64 %c)
ret i64 %1
}
declare i32 @llvm.fshr.i32(i32, i32, i32)
define i32 @fshr_i32(i32 %a, i32 %b, i32 %c) nounwind {
; RV32I-LABEL: fshr_i32:
; RV32I: # %bb.0:
; RV32I-NEXT: srl a1, a1, a2
; RV32I-NEXT: not a2, a2
; RV32I-NEXT: slli a0, a0, 1
; RV32I-NEXT: sll a0, a0, a2
; RV32I-NEXT: or a0, a0, a1
; RV32I-NEXT: ret
;
; RV32ZBT-LABEL: fshr_i32:
; RV32ZBT: # %bb.0:
; RV32ZBT-NEXT: andi a2, a2, 31
; RV32ZBT-NEXT: fsr a0, a1, a0, a2
; RV32ZBT-NEXT: ret
%1 = tail call i32 @llvm.fshr.i32(i32 %a, i32 %b, i32 %c)
ret i32 %1
}
; As we are not matching directly i64 code patterns on RV32 some i64 patterns
; don't have yet an efficient pattern-matching with bit manipulation
; instructions on RV32.
; This test is presented here in case future expansions of the Bitmanip
; extensions introduce instructions that can match more efficiently this pattern.
declare i64 @llvm.fshr.i64(i64, i64, i64)
define i64 @fshr_i64(i64 %a, i64 %b, i64 %c) nounwind {
; RV32I-LABEL: fshr_i64:
; RV32I: # %bb.0:
; RV32I-NEXT: andi a5, a4, 32
; RV32I-NEXT: beqz a5, .LBB38_2
; RV32I-NEXT: # %bb.1:
; RV32I-NEXT: mv a2, a3
; RV32I-NEXT: .LBB38_2:
; RV32I-NEXT: srl a2, a2, a4
; RV32I-NEXT: beqz a5, .LBB38_4
; RV32I-NEXT: # %bb.3:
; RV32I-NEXT: mv a3, a0
; RV32I-NEXT: .LBB38_4:
; RV32I-NEXT: slli a7, a3, 1
; RV32I-NEXT: not a6, a4
; RV32I-NEXT: sll a7, a7, a6
; RV32I-NEXT: or a2, a7, a2
; RV32I-NEXT: srl a3, a3, a4
; RV32I-NEXT: beqz a5, .LBB38_6
; RV32I-NEXT: # %bb.5:
; RV32I-NEXT: mv a0, a1
; RV32I-NEXT: .LBB38_6:
; RV32I-NEXT: slli a0, a0, 1
; RV32I-NEXT: sll a0, a0, a6
; RV32I-NEXT: or a1, a0, a3
; RV32I-NEXT: mv a0, a2
; RV32I-NEXT: ret
;
; RV32ZBT-LABEL: fshr_i64:
; RV32ZBT: # %bb.0:
; RV32ZBT-NEXT: andi a5, a4, 32
; RV32ZBT-NEXT: cmov a6, a5, a0, a3
; RV32ZBT-NEXT: cmov a2, a5, a3, a2
; RV32ZBT-NEXT: andi a3, a4, 31
; RV32ZBT-NEXT: fsr a2, a2, a6, a3
; RV32ZBT-NEXT: cmov a0, a5, a1, a0
; RV32ZBT-NEXT: fsr a1, a6, a0, a3
; RV32ZBT-NEXT: mv a0, a2
; RV32ZBT-NEXT: ret
%1 = tail call i64 @llvm.fshr.i64(i64 %a, i64 %b, i64 %c)
ret i64 %1
}
define i32 @fshri_i32(i32 %a, i32 %b) nounwind {
; RV32I-LABEL: fshri_i32:
; RV32I: # %bb.0:
; RV32I-NEXT: srli a1, a1, 5
; RV32I-NEXT: slli a0, a0, 27
; RV32I-NEXT: or a0, a0, a1
; RV32I-NEXT: ret
;
; RV32ZBT-LABEL: fshri_i32:
; RV32ZBT: # %bb.0:
; RV32ZBT-NEXT: fsri a0, a1, a0, 5
; RV32ZBT-NEXT: ret
%1 = tail call i32 @llvm.fshr.i32(i32 %a, i32 %b, i32 5)
ret i32 %1
}
define i64 @fshri_i64(i64 %a, i64 %b) nounwind {
; RV32I-LABEL: fshri_i64:
; RV32I: # %bb.0:
; RV32I-NEXT: srli a1, a2, 5
; RV32I-NEXT: slli a2, a3, 27
; RV32I-NEXT: or a2, a2, a1
; RV32I-NEXT: srli a1, a3, 5
; RV32I-NEXT: slli a0, a0, 27
; RV32I-NEXT: or a1, a0, a1
; RV32I-NEXT: mv a0, a2
; RV32I-NEXT: ret
;
; RV32ZBT-LABEL: fshri_i64:
; RV32ZBT: # %bb.0:
; RV32ZBT-NEXT: fsri a2, a2, a3, 5
; RV32ZBT-NEXT: fsri a1, a3, a0, 5
; RV32ZBT-NEXT: mv a0, a2
; RV32ZBT-NEXT: ret
%1 = tail call i64 @llvm.fshr.i64(i64 %a, i64 %b, i64 5)
ret i64 %1
}
define i32 @fshli_i32(i32 %a, i32 %b) nounwind {
; RV32I-LABEL: fshli_i32:
; RV32I: # %bb.0:
; RV32I-NEXT: srli a1, a1, 27
; RV32I-NEXT: slli a0, a0, 5
; RV32I-NEXT: or a0, a0, a1
; RV32I-NEXT: ret
;
; RV32ZBT-LABEL: fshli_i32:
; RV32ZBT: # %bb.0:
; RV32ZBT-NEXT: fsri a0, a1, a0, 27
; RV32ZBT-NEXT: ret
%1 = tail call i32 @llvm.fshl.i32(i32 %a, i32 %b, i32 5)
ret i32 %1
}
define i64 @fshli_i64(i64 %a, i64 %b) nounwind {
; RV32I-LABEL: fshli_i64:
; RV32I: # %bb.0:
; RV32I-NEXT: srli a2, a3, 27
; RV32I-NEXT: slli a3, a0, 5
; RV32I-NEXT: or a2, a3, a2
; RV32I-NEXT: srli a0, a0, 27
; RV32I-NEXT: slli a1, a1, 5
; RV32I-NEXT: or a1, a1, a0
; RV32I-NEXT: mv a0, a2
; RV32I-NEXT: ret
;
; RV32ZBT-LABEL: fshli_i64:
; RV32ZBT: # %bb.0:
; RV32ZBT-NEXT: fsri a2, a3, a0, 27
; RV32ZBT-NEXT: fsri a1, a0, a1, 27
; RV32ZBT-NEXT: mv a0, a2
; RV32ZBT-NEXT: ret
%1 = tail call i64 @llvm.fshl.i64(i64 %a, i64 %b, i64 5)
ret i64 %1
}

View File

@ -1,127 +0,0 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=riscv64 -mattr=+experimental-zbt -verify-machineinstrs < %s \
; RUN: | FileCheck %s -check-prefix=RV64ZBT
declare i32 @llvm.riscv.fsl.i32(i32, i32, i32)
define i32 @fsl_i32(i32 %a, i32 %b, i32 %c) nounwind {
; RV64ZBT-LABEL: fsl_i32:
; RV64ZBT: # %bb.0:
; RV64ZBT-NEXT: fslw a0, a0, a1, a2
; RV64ZBT-NEXT: ret
%1 = call i32 @llvm.riscv.fsl.i32(i32 %a, i32 %b, i32 %c)
ret i32 %1
}
define i32 @fsl_i32_demandedbits(i32 %a, i32 %b, i32 %c) nounwind {
; RV64ZBT-LABEL: fsl_i32_demandedbits:
; RV64ZBT: # %bb.0:
; RV64ZBT-NEXT: andi a1, a1, 31
; RV64ZBT-NEXT: fslw a0, a0, a1, a2
; RV64ZBT-NEXT: ret
%bmask = and i32 %b, 95
%1 = call i32 @llvm.riscv.fsl.i32(i32 %a, i32 %bmask, i32 %c)
ret i32 %1
}
declare i32 @llvm.riscv.fsr.i32(i32, i32, i32)
define i32 @fsr_i32(i32 %a, i32 %b, i32 %c) nounwind {
; RV64ZBT-LABEL: fsr_i32:
; RV64ZBT: # %bb.0:
; RV64ZBT-NEXT: fsrw a0, a0, a1, a2
; RV64ZBT-NEXT: ret
%1 = call i32 @llvm.riscv.fsr.i32(i32 %a, i32 %b, i32 %c)
ret i32 %1
}
define i32 @fsr_i32_demandedbits(i32 %a, i32 %b, i32 %c) nounwind {
; RV64ZBT-LABEL: fsr_i32_demandedbits:
; RV64ZBT: # %bb.0:
; RV64ZBT-NEXT: andi a1, a1, 31
; RV64ZBT-NEXT: fsrw a0, a0, a1, a2
; RV64ZBT-NEXT: ret
%bmask = and i32 %b, 95
%1 = call i32 @llvm.riscv.fsr.i32(i32 %a, i32 %bmask, i32 %c)
ret i32 %1
}
define i32 @fsli_i32(i32 %a, i32 %b) nounwind {
; RV64ZBT-LABEL: fsli_i32:
; RV64ZBT: # %bb.0:
; RV64ZBT-NEXT: fsriw a0, a1, a0, 27
; RV64ZBT-NEXT: ret
%1 = call i32 @llvm.riscv.fsl.i32(i32 %a, i32 %b, i32 5)
ret i32 %1
}
define i32 @fsri_i32(i32 %a, i32 %b) nounwind {
; RV64ZBT-LABEL: fsri_i32:
; RV64ZBT: # %bb.0:
; RV64ZBT-NEXT: fsriw a0, a0, a1, 15
; RV64ZBT-NEXT: ret
%1 = call i32 @llvm.riscv.fsr.i32(i32 %a, i32 %b, i32 15)
ret i32 %1
}
declare i64 @llvm.riscv.fsl.i64(i64, i64, i64)
define i64 @fsl_i64(i64 %a, i64 %b, i64 %c) nounwind {
; RV64ZBT-LABEL: fsl_i64:
; RV64ZBT: # %bb.0:
; RV64ZBT-NEXT: fsl a0, a0, a1, a2
; RV64ZBT-NEXT: ret
%1 = call i64 @llvm.riscv.fsl.i64(i64 %a, i64 %b, i64 %c)
ret i64 %1
}
define i64 @fsl_i64_demandedbits(i64 %a, i64 %b, i64 %c) nounwind {
; RV64ZBT-LABEL: fsl_i64_demandedbits:
; RV64ZBT: # %bb.0:
; RV64ZBT-NEXT: andi a1, a1, 63
; RV64ZBT-NEXT: fsl a0, a0, a1, a2
; RV64ZBT-NEXT: ret
%bmask = and i64 %b, 191
%1 = call i64 @llvm.riscv.fsl.i64(i64 %a, i64 %bmask, i64 %c)
ret i64 %1
}
declare i64 @llvm.riscv.fsr.i64(i64, i64, i64)
define i64 @fsr_i64(i64 %a, i64 %b, i64 %c) nounwind {
; RV64ZBT-LABEL: fsr_i64:
; RV64ZBT: # %bb.0:
; RV64ZBT-NEXT: fsr a0, a0, a1, a2
; RV64ZBT-NEXT: ret
%1 = call i64 @llvm.riscv.fsr.i64(i64 %a, i64 %b, i64 %c)
ret i64 %1
}
define i64 @fsr_i64_demandedbits(i64 %a, i64 %b, i64 %c) nounwind {
; RV64ZBT-LABEL: fsr_i64_demandedbits:
; RV64ZBT: # %bb.0:
; RV64ZBT-NEXT: andi a1, a1, 63
; RV64ZBT-NEXT: fsr a0, a0, a1, a2
; RV64ZBT-NEXT: ret
%bmask = and i64 %b, 191
%1 = call i64 @llvm.riscv.fsr.i64(i64 %a, i64 %bmask, i64 %c)
ret i64 %1
}
define i64 @fsli_i64(i64 %a, i64 %b) nounwind {
; RV64ZBT-LABEL: fsli_i64:
; RV64ZBT: # %bb.0:
; RV64ZBT-NEXT: fsri a0, a1, a0, 49
; RV64ZBT-NEXT: ret
%1 = call i64 @llvm.riscv.fsl.i64(i64 %a, i64 %b, i64 15)
ret i64 %1
}
define i64 @fsri_i64(i64 %a, i64 %b) nounwind {
; RV64ZBT-LABEL: fsri_i64:
; RV64ZBT: # %bb.0:
; RV64ZBT-NEXT: fsri a0, a0, a1, 5
; RV64ZBT-NEXT: ret
%1 = call i64 @llvm.riscv.fsr.i64(i64 %a, i64 %b, i64 5)
ret i64 %1
}

View File

@ -1,913 +0,0 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=riscv64 -verify-machineinstrs < %s \
; RUN: | FileCheck %s -check-prefix=RV64I
; RUN: llc -mtriple=riscv64 -mattr=+experimental-zbt -verify-machineinstrs < %s \
; RUN: | FileCheck %s -check-prefix=RV64ZBT
define signext i32 @cmix_i32(i32 signext %a, i32 signext %b, i32 signext %c) nounwind {
; RV64I-LABEL: cmix_i32:
; RV64I: # %bb.0:
; RV64I-NEXT: and a0, a1, a0
; RV64I-NEXT: not a1, a1
; RV64I-NEXT: and a1, a1, a2
; RV64I-NEXT: or a0, a1, a0
; RV64I-NEXT: ret
;
; RV64ZBT-LABEL: cmix_i32:
; RV64ZBT: # %bb.0:
; RV64ZBT-NEXT: cmix a0, a1, a0, a2
; RV64ZBT-NEXT: ret
%and = and i32 %b, %a
%neg = xor i32 %b, -1
%and1 = and i32 %neg, %c
%or = or i32 %and1, %and
ret i32 %or
}
define signext i32 @cmix_i32_2(i32 signext %a, i32 signext %b, i32 signext %c) nounwind {
; RV64I-LABEL: cmix_i32_2:
; RV64I: # %bb.0:
; RV64I-NEXT: xor a0, a0, a2
; RV64I-NEXT: and a0, a0, a1
; RV64I-NEXT: xor a0, a0, a2
; RV64I-NEXT: ret
;
; RV64ZBT-LABEL: cmix_i32_2:
; RV64ZBT: # %bb.0:
; RV64ZBT-NEXT: cmix a0, a1, a0, a2
; RV64ZBT-NEXT: ret
%xor = xor i32 %a, %c
%and = and i32 %xor, %b
%xor1 = xor i32 %and, %c
ret i32 %xor1
}
define i64 @cmix_i64(i64 %a, i64 %b, i64 %c) nounwind {
; RV64I-LABEL: cmix_i64:
; RV64I: # %bb.0:
; RV64I-NEXT: and a0, a1, a0
; RV64I-NEXT: not a1, a1
; RV64I-NEXT: and a1, a1, a2
; RV64I-NEXT: or a0, a1, a0
; RV64I-NEXT: ret
;
; RV64ZBT-LABEL: cmix_i64:
; RV64ZBT: # %bb.0:
; RV64ZBT-NEXT: cmix a0, a1, a0, a2
; RV64ZBT-NEXT: ret
%and = and i64 %b, %a
%neg = xor i64 %b, -1
%and1 = and i64 %neg, %c
%or = or i64 %and1, %and
ret i64 %or
}
define i64 @cmix_i64_2(i64 %a, i64 %b, i64 %c) nounwind {
; RV64I-LABEL: cmix_i64_2:
; RV64I: # %bb.0:
; RV64I-NEXT: xor a0, a1, a2
; RV64I-NEXT: and a0, a0, a1
; RV64I-NEXT: xor a0, a0, a2
; RV64I-NEXT: ret
;
; RV64ZBT-LABEL: cmix_i64_2:
; RV64ZBT: # %bb.0:
; RV64ZBT-NEXT: cmix a0, a1, a1, a2
; RV64ZBT-NEXT: ret
%xor = xor i64 %b, %c
%and = and i64 %xor, %b
%xor1 = xor i64 %and, %c
ret i64 %xor1
}
define signext i32 @cmov_i32(i32 signext %a, i32 signext %b, i32 signext %c) nounwind {
; RV64I-LABEL: cmov_i32:
; RV64I: # %bb.0:
; RV64I-NEXT: beqz a1, .LBB4_2
; RV64I-NEXT: # %bb.1:
; RV64I-NEXT: mv a2, a0
; RV64I-NEXT: .LBB4_2:
; RV64I-NEXT: mv a0, a2
; RV64I-NEXT: ret
;
; RV64ZBT-LABEL: cmov_i32:
; RV64ZBT: # %bb.0:
; RV64ZBT-NEXT: cmov a0, a1, a0, a2
; RV64ZBT-NEXT: ret
%tobool.not = icmp eq i32 %b, 0
%cond = select i1 %tobool.not, i32 %c, i32 %a
ret i32 %cond
}
define signext i32 @cmov_sle_i32(i32 signext %a, i32 signext %b, i32 signext %c, i32 signext %d) nounwind {
; RV64I-LABEL: cmov_sle_i32:
; RV64I: # %bb.0:
; RV64I-NEXT: bge a2, a1, .LBB5_2
; RV64I-NEXT: # %bb.1:
; RV64I-NEXT: mv a0, a3
; RV64I-NEXT: .LBB5_2:
; RV64I-NEXT: ret
;
; RV64ZBT-LABEL: cmov_sle_i32:
; RV64ZBT: # %bb.0:
; RV64ZBT-NEXT: slt a1, a2, a1
; RV64ZBT-NEXT: cmov a0, a1, a3, a0
; RV64ZBT-NEXT: ret
%tobool = icmp sle i32 %b, %c
%cond = select i1 %tobool, i32 %a, i32 %d
ret i32 %cond
}
define signext i32 @cmov_sge_i32(i32 signext %a, i32 signext %b, i32 signext %c, i32 signext %d) nounwind {
; RV64I-LABEL: cmov_sge_i32:
; RV64I: # %bb.0:
; RV64I-NEXT: bge a1, a2, .LBB6_2
; RV64I-NEXT: # %bb.1:
; RV64I-NEXT: mv a0, a3
; RV64I-NEXT: .LBB6_2:
; RV64I-NEXT: ret
;
; RV64ZBT-LABEL: cmov_sge_i32:
; RV64ZBT: # %bb.0:
; RV64ZBT-NEXT: slt a1, a1, a2
; RV64ZBT-NEXT: cmov a0, a1, a3, a0
; RV64ZBT-NEXT: ret
%tobool = icmp sge i32 %b, %c
%cond = select i1 %tobool, i32 %a, i32 %d
ret i32 %cond
}
define signext i32 @cmov_ule_i32(i32 signext %a, i32 signext %b, i32 signext %c, i32 signext %d) nounwind {
; RV64I-LABEL: cmov_ule_i32:
; RV64I: # %bb.0:
; RV64I-NEXT: bgeu a2, a1, .LBB7_2
; RV64I-NEXT: # %bb.1:
; RV64I-NEXT: mv a0, a3
; RV64I-NEXT: .LBB7_2:
; RV64I-NEXT: ret
;
; RV64ZBT-LABEL: cmov_ule_i32:
; RV64ZBT: # %bb.0:
; RV64ZBT-NEXT: sltu a1, a2, a1
; RV64ZBT-NEXT: cmov a0, a1, a3, a0
; RV64ZBT-NEXT: ret
%tobool = icmp ule i32 %b, %c
%cond = select i1 %tobool, i32 %a, i32 %d
ret i32 %cond
}
define signext i32 @cmov_uge_i32(i32 signext %a, i32 signext %b, i32 signext %c, i32 signext %d) nounwind {
; RV64I-LABEL: cmov_uge_i32:
; RV64I: # %bb.0:
; RV64I-NEXT: bgeu a1, a2, .LBB8_2
; RV64I-NEXT: # %bb.1:
; RV64I-NEXT: mv a0, a3
; RV64I-NEXT: .LBB8_2:
; RV64I-NEXT: ret
;
; RV64ZBT-LABEL: cmov_uge_i32:
; RV64ZBT: # %bb.0:
; RV64ZBT-NEXT: sltu a1, a1, a2
; RV64ZBT-NEXT: cmov a0, a1, a3, a0
; RV64ZBT-NEXT: ret
%tobool = icmp uge i32 %b, %c
%cond = select i1 %tobool, i32 %a, i32 %d
ret i32 %cond
}
define i64 @cmov_i64(i64 %a, i64 %b, i64 %c) nounwind {
; RV64I-LABEL: cmov_i64:
; RV64I: # %bb.0:
; RV64I-NEXT: beqz a1, .LBB9_2
; RV64I-NEXT: # %bb.1:
; RV64I-NEXT: mv a2, a0
; RV64I-NEXT: .LBB9_2:
; RV64I-NEXT: mv a0, a2
; RV64I-NEXT: ret
;
; RV64ZBT-LABEL: cmov_i64:
; RV64ZBT: # %bb.0:
; RV64ZBT-NEXT: cmov a0, a1, a0, a2
; RV64ZBT-NEXT: ret
%tobool.not = icmp eq i64 %b, 0
%cond = select i1 %tobool.not, i64 %c, i64 %a
ret i64 %cond
}
define i64 @cmov_eq_i64_constant_2048(i64 %a, i64 %b, i64 %c) nounwind {
; RV64I-LABEL: cmov_eq_i64_constant_2048:
; RV64I: # %bb.0:
; RV64I-NEXT: lui a3, 1
; RV64I-NEXT: addiw a3, a3, -2048
; RV64I-NEXT: beq a1, a3, .LBB10_2
; RV64I-NEXT: # %bb.1:
; RV64I-NEXT: mv a0, a2
; RV64I-NEXT: .LBB10_2:
; RV64I-NEXT: ret
;
; RV64ZBT-LABEL: cmov_eq_i64_constant_2048:
; RV64ZBT: # %bb.0:
; RV64ZBT-NEXT: addi a1, a1, -2048
; RV64ZBT-NEXT: cmov a0, a1, a2, a0
; RV64ZBT-NEXT: ret
%tobool.not = icmp eq i64 %b, 2048
%cond = select i1 %tobool.not, i64 %a, i64 %c
ret i64 %cond
}
define i64 @cmov_eq_i64_constant_neg_2047(i64 %a, i64 %b, i64 %c) nounwind {
; RV64I-LABEL: cmov_eq_i64_constant_neg_2047:
; RV64I: # %bb.0:
; RV64I-NEXT: li a3, -2047
; RV64I-NEXT: beq a1, a3, .LBB11_2
; RV64I-NEXT: # %bb.1:
; RV64I-NEXT: mv a0, a2
; RV64I-NEXT: .LBB11_2:
; RV64I-NEXT: ret
;
; RV64ZBT-LABEL: cmov_eq_i64_constant_neg_2047:
; RV64ZBT: # %bb.0:
; RV64ZBT-NEXT: addi a1, a1, 2047
; RV64ZBT-NEXT: cmov a0, a1, a2, a0
; RV64ZBT-NEXT: ret
%tobool.not = icmp eq i64 %b, -2047
%cond = select i1 %tobool.not, i64 %a, i64 %c
ret i64 %cond
}
define i64 @cmov_ne_i64(i64 %a, i64 %b, i64 %c, i64 %d) nounwind {
; RV64I-LABEL: cmov_ne_i64:
; RV64I: # %bb.0:
; RV64I-NEXT: bne a1, a2, .LBB12_2
; RV64I-NEXT: # %bb.1:
; RV64I-NEXT: mv a0, a3
; RV64I-NEXT: .LBB12_2:
; RV64I-NEXT: ret
;
; RV64ZBT-LABEL: cmov_ne_i64:
; RV64ZBT: # %bb.0:
; RV64ZBT-NEXT: xor a1, a1, a2
; RV64ZBT-NEXT: cmov a0, a1, a0, a3
; RV64ZBT-NEXT: ret
%tobool.not = icmp ne i64 %b, %c
%cond = select i1 %tobool.not, i64 %a, i64 %d
ret i64 %cond
}
define i64 @cmov_ne_i64_constant_zero(i64 %a, i64 %b, i64 %c) nounwind {
; RV64I-LABEL: cmov_ne_i64_constant_zero:
; RV64I: # %bb.0:
; RV64I-NEXT: bnez a1, .LBB13_2
; RV64I-NEXT: # %bb.1:
; RV64I-NEXT: mv a0, a2
; RV64I-NEXT: .LBB13_2:
; RV64I-NEXT: ret
;
; RV64ZBT-LABEL: cmov_ne_i64_constant_zero:
; RV64ZBT: # %bb.0:
; RV64ZBT-NEXT: cmov a0, a1, a0, a2
; RV64ZBT-NEXT: ret
%tobool.not = icmp ne i64 %b, 0
%cond = select i1 %tobool.not, i64 %a, i64 %c
ret i64 %cond
}
define i64 @cmov_ne_i64_constant_2048(i64 %a, i64 %b, i64 %c) nounwind {
; RV64I-LABEL: cmov_ne_i64_constant_2048:
; RV64I: # %bb.0:
; RV64I-NEXT: lui a3, 1
; RV64I-NEXT: addiw a3, a3, -2048
; RV64I-NEXT: bne a1, a3, .LBB14_2
; RV64I-NEXT: # %bb.1:
; RV64I-NEXT: mv a0, a2
; RV64I-NEXT: .LBB14_2:
; RV64I-NEXT: ret
;
; RV64ZBT-LABEL: cmov_ne_i64_constant_2048:
; RV64ZBT: # %bb.0:
; RV64ZBT-NEXT: addi a1, a1, -2048
; RV64ZBT-NEXT: cmov a0, a1, a0, a2
; RV64ZBT-NEXT: ret
%tobool.not = icmp ne i64 %b, 2048
%cond = select i1 %tobool.not, i64 %a, i64 %c
ret i64 %cond
}
define i64 @cmov_ne_i64_constant_neg_2047(i64 %a, i64 %b, i64 %c) nounwind {
; RV64I-LABEL: cmov_ne_i64_constant_neg_2047:
; RV64I: # %bb.0:
; RV64I-NEXT: li a3, -2047
; RV64I-NEXT: bne a1, a3, .LBB15_2
; RV64I-NEXT: # %bb.1:
; RV64I-NEXT: mv a0, a2
; RV64I-NEXT: .LBB15_2:
; RV64I-NEXT: ret
;
; RV64ZBT-LABEL: cmov_ne_i64_constant_neg_2047:
; RV64ZBT: # %bb.0:
; RV64ZBT-NEXT: addi a1, a1, 2047
; RV64ZBT-NEXT: cmov a0, a1, a0, a2
; RV64ZBT-NEXT: ret
%tobool.not = icmp ne i64 %b, -2047
%cond = select i1 %tobool.not, i64 %a, i64 %c
ret i64 %cond
}
define i64 @cmov_sle_i64(i64 %a, i64 %b, i64 %c, i64 %d) nounwind {
; RV64I-LABEL: cmov_sle_i64:
; RV64I: # %bb.0:
; RV64I-NEXT: bge a2, a1, .LBB16_2
; RV64I-NEXT: # %bb.1:
; RV64I-NEXT: mv a0, a3
; RV64I-NEXT: .LBB16_2:
; RV64I-NEXT: ret
;
; RV64ZBT-LABEL: cmov_sle_i64:
; RV64ZBT: # %bb.0:
; RV64ZBT-NEXT: slt a1, a2, a1
; RV64ZBT-NEXT: cmov a0, a1, a3, a0
; RV64ZBT-NEXT: ret
%tobool = icmp sle i64 %b, %c
%cond = select i1 %tobool, i64 %a, i64 %d
ret i64 %cond
}
define i64 @cmov_sle_i64_constant_2046(i64 %a, i64 %b, i64 %c) nounwind {
; RV64I-LABEL: cmov_sle_i64_constant_2046:
; RV64I: # %bb.0:
; RV64I-NEXT: li a3, 2047
; RV64I-NEXT: blt a1, a3, .LBB17_2
; RV64I-NEXT: # %bb.1:
; RV64I-NEXT: mv a0, a2
; RV64I-NEXT: .LBB17_2:
; RV64I-NEXT: ret
;
; RV64ZBT-LABEL: cmov_sle_i64_constant_2046:
; RV64ZBT: # %bb.0:
; RV64ZBT-NEXT: slti a1, a1, 2047
; RV64ZBT-NEXT: cmov a0, a1, a0, a2
; RV64ZBT-NEXT: ret
%tobool = icmp sle i64 %b, 2046
%cond = select i1 %tobool, i64 %a, i64 %c
ret i64 %cond
}
define i64 @cmov_sle_i64_constant_neg_2049(i64 %a, i64 %b, i64 %c) nounwind {
; RV64I-LABEL: cmov_sle_i64_constant_neg_2049:
; RV64I: # %bb.0:
; RV64I-NEXT: li a3, -2048
; RV64I-NEXT: blt a1, a3, .LBB18_2
; RV64I-NEXT: # %bb.1:
; RV64I-NEXT: mv a0, a2
; RV64I-NEXT: .LBB18_2:
; RV64I-NEXT: ret
;
; RV64ZBT-LABEL: cmov_sle_i64_constant_neg_2049:
; RV64ZBT: # %bb.0:
; RV64ZBT-NEXT: slti a1, a1, -2048
; RV64ZBT-NEXT: cmov a0, a1, a0, a2
; RV64ZBT-NEXT: ret
%tobool = icmp sle i64 %b, -2049
%cond = select i1 %tobool, i64 %a, i64 %c
ret i64 %cond
}
define i64 @cmov_sgt_i64(i64 %a, i64 %b, i64 %c, i64 %d) nounwind {
; RV64I-LABEL: cmov_sgt_i64:
; RV64I: # %bb.0:
; RV64I-NEXT: blt a2, a1, .LBB19_2
; RV64I-NEXT: # %bb.1:
; RV64I-NEXT: mv a0, a3
; RV64I-NEXT: .LBB19_2:
; RV64I-NEXT: ret
;
; RV64ZBT-LABEL: cmov_sgt_i64:
; RV64ZBT: # %bb.0:
; RV64ZBT-NEXT: slt a1, a2, a1
; RV64ZBT-NEXT: cmov a0, a1, a0, a3
; RV64ZBT-NEXT: ret
%tobool = icmp sgt i64 %b, %c
%cond = select i1 %tobool, i64 %a, i64 %d
ret i64 %cond
}
define i64 @cmov_sgt_i64_constant_2046(i64 %a, i64 %b, i64 %c) nounwind {
; RV64I-LABEL: cmov_sgt_i64_constant_2046:
; RV64I: # %bb.0:
; RV64I-NEXT: li a3, 2046
; RV64I-NEXT: blt a3, a1, .LBB20_2
; RV64I-NEXT: # %bb.1:
; RV64I-NEXT: mv a0, a2
; RV64I-NEXT: .LBB20_2:
; RV64I-NEXT: ret
;
; RV64ZBT-LABEL: cmov_sgt_i64_constant_2046:
; RV64ZBT: # %bb.0:
; RV64ZBT-NEXT: slti a1, a1, 2047
; RV64ZBT-NEXT: cmov a0, a1, a2, a0
; RV64ZBT-NEXT: ret
%tobool = icmp sgt i64 %b, 2046
%cond = select i1 %tobool, i64 %a, i64 %c
ret i64 %cond
}
define i64 @cmov_sgt_i64_constant_neg_2049(i64 %a, i64 %b, i64 %c) nounwind {
; RV64I-LABEL: cmov_sgt_i64_constant_neg_2049:
; RV64I: # %bb.0:
; RV64I-NEXT: lui a3, 1048575
; RV64I-NEXT: addiw a3, a3, 2047
; RV64I-NEXT: blt a3, a1, .LBB21_2
; RV64I-NEXT: # %bb.1:
; RV64I-NEXT: mv a0, a2
; RV64I-NEXT: .LBB21_2:
; RV64I-NEXT: ret
;
; RV64ZBT-LABEL: cmov_sgt_i64_constant_neg_2049:
; RV64ZBT: # %bb.0:
; RV64ZBT-NEXT: slti a1, a1, -2048
; RV64ZBT-NEXT: cmov a0, a1, a2, a0
; RV64ZBT-NEXT: ret
%tobool = icmp sgt i64 %b, -2049
%cond = select i1 %tobool, i64 %a, i64 %c
ret i64 %cond
}
define i64 @cmov_sge_i64(i64 %a, i64 %b, i64 %c, i64 %d) nounwind {
; RV64I-LABEL: cmov_sge_i64:
; RV64I: # %bb.0:
; RV64I-NEXT: bge a1, a2, .LBB22_2
; RV64I-NEXT: # %bb.1:
; RV64I-NEXT: mv a0, a3
; RV64I-NEXT: .LBB22_2:
; RV64I-NEXT: ret
;
; RV64ZBT-LABEL: cmov_sge_i64:
; RV64ZBT: # %bb.0:
; RV64ZBT-NEXT: slt a1, a1, a2
; RV64ZBT-NEXT: cmov a0, a1, a3, a0
; RV64ZBT-NEXT: ret
%tobool = icmp sge i64 %b, %c
%cond = select i1 %tobool, i64 %a, i64 %d
ret i64 %cond
}
define i64 @cmov_sge_i64_constant_2047(i64 %a, i64 %b, i64 %c) nounwind {
; RV64I-LABEL: cmov_sge_i64_constant_2047:
; RV64I: # %bb.0:
; RV64I-NEXT: li a3, 2046
; RV64I-NEXT: blt a3, a1, .LBB23_2
; RV64I-NEXT: # %bb.1:
; RV64I-NEXT: mv a0, a2
; RV64I-NEXT: .LBB23_2:
; RV64I-NEXT: ret
;
; RV64ZBT-LABEL: cmov_sge_i64_constant_2047:
; RV64ZBT: # %bb.0:
; RV64ZBT-NEXT: slti a1, a1, 2047
; RV64ZBT-NEXT: cmov a0, a1, a2, a0
; RV64ZBT-NEXT: ret
%tobool = icmp sge i64 %b, 2047
%cond = select i1 %tobool, i64 %a, i64 %c
ret i64 %cond
}
define i64 @cmov_sge_i64_constant_neg_2048(i64 %a, i64 %b, i64 %c) nounwind {
; RV64I-LABEL: cmov_sge_i64_constant_neg_2048:
; RV64I: # %bb.0:
; RV64I-NEXT: lui a3, 1048575
; RV64I-NEXT: addiw a3, a3, 2047
; RV64I-NEXT: blt a3, a1, .LBB24_2
; RV64I-NEXT: # %bb.1:
; RV64I-NEXT: mv a0, a2
; RV64I-NEXT: .LBB24_2:
; RV64I-NEXT: ret
;
; RV64ZBT-LABEL: cmov_sge_i64_constant_neg_2048:
; RV64ZBT: # %bb.0:
; RV64ZBT-NEXT: slti a1, a1, -2048
; RV64ZBT-NEXT: cmov a0, a1, a2, a0
; RV64ZBT-NEXT: ret
%tobool = icmp sge i64 %b, -2048
%cond = select i1 %tobool, i64 %a, i64 %c
ret i64 %cond
}
define i64 @cmov_ule_i64(i64 %a, i64 %b, i64 %c, i64 %d) nounwind {
; RV64I-LABEL: cmov_ule_i64:
; RV64I: # %bb.0:
; RV64I-NEXT: bgeu a2, a1, .LBB25_2
; RV64I-NEXT: # %bb.1:
; RV64I-NEXT: mv a0, a3
; RV64I-NEXT: .LBB25_2:
; RV64I-NEXT: ret
;
; RV64ZBT-LABEL: cmov_ule_i64:
; RV64ZBT: # %bb.0:
; RV64ZBT-NEXT: sltu a1, a2, a1
; RV64ZBT-NEXT: cmov a0, a1, a3, a0
; RV64ZBT-NEXT: ret
%tobool = icmp ule i64 %b, %c
%cond = select i1 %tobool, i64 %a, i64 %d
ret i64 %cond
}
define i64 @cmov_ule_i64_constant_2047(i64 %a, i64 %b, i64 %c) nounwind {
; RV64I-LABEL: cmov_ule_i64_constant_2047:
; RV64I: # %bb.0:
; RV64I-NEXT: srli a1, a1, 11
; RV64I-NEXT: beqz a1, .LBB26_2
; RV64I-NEXT: # %bb.1:
; RV64I-NEXT: mv a0, a2
; RV64I-NEXT: .LBB26_2:
; RV64I-NEXT: ret
;
; RV64ZBT-LABEL: cmov_ule_i64_constant_2047:
; RV64ZBT: # %bb.0:
; RV64ZBT-NEXT: srli a1, a1, 11
; RV64ZBT-NEXT: cmov a0, a1, a2, a0
; RV64ZBT-NEXT: ret
%tobool = icmp ule i64 %b, 2047
%cond = select i1 %tobool, i64 %a, i64 %c
ret i64 %cond
}
define i64 @cmov_ule_i64_constant_neg_2049(i64 %a, i64 %b, i64 %c) nounwind {
; RV64I-LABEL: cmov_ule_i64_constant_neg_2049:
; RV64I: # %bb.0:
; RV64I-NEXT: li a3, -2048
; RV64I-NEXT: bltu a1, a3, .LBB27_2
; RV64I-NEXT: # %bb.1:
; RV64I-NEXT: mv a0, a2
; RV64I-NEXT: .LBB27_2:
; RV64I-NEXT: ret
;
; RV64ZBT-LABEL: cmov_ule_i64_constant_neg_2049:
; RV64ZBT: # %bb.0:
; RV64ZBT-NEXT: sltiu a1, a1, -2048
; RV64ZBT-NEXT: cmov a0, a1, a0, a2
; RV64ZBT-NEXT: ret
%tobool = icmp ule i64 %b, 18446744073709549567
%cond = select i1 %tobool, i64 %a, i64 %c
ret i64 %cond
}
define i64 @cmov_ugt_i64(i64 %a, i64 %b, i64 %c, i64 %d) nounwind {
; RV64I-LABEL: cmov_ugt_i64:
; RV64I: # %bb.0:
; RV64I-NEXT: bltu a2, a1, .LBB28_2
; RV64I-NEXT: # %bb.1:
; RV64I-NEXT: mv a0, a3
; RV64I-NEXT: .LBB28_2:
; RV64I-NEXT: ret
;
; RV64ZBT-LABEL: cmov_ugt_i64:
; RV64ZBT: # %bb.0:
; RV64ZBT-NEXT: sltu a1, a2, a1
; RV64ZBT-NEXT: cmov a0, a1, a0, a3
; RV64ZBT-NEXT: ret
%tobool = icmp ugt i64 %b, %c
%cond = select i1 %tobool, i64 %a, i64 %d
ret i64 %cond
}
define i64 @cmov_ugt_i64_constant_2046(i64 %a, i64 %b, i64 %c) nounwind {
; RV64I-LABEL: cmov_ugt_i64_constant_2046:
; RV64I: # %bb.0:
; RV64I-NEXT: li a3, 2046
; RV64I-NEXT: bltu a3, a1, .LBB29_2
; RV64I-NEXT: # %bb.1:
; RV64I-NEXT: mv a0, a2
; RV64I-NEXT: .LBB29_2:
; RV64I-NEXT: ret
;
; RV64ZBT-LABEL: cmov_ugt_i64_constant_2046:
; RV64ZBT: # %bb.0:
; RV64ZBT-NEXT: sltiu a1, a1, 2047
; RV64ZBT-NEXT: cmov a0, a1, a2, a0
; RV64ZBT-NEXT: ret
%tobool = icmp ugt i64 %b, 2046
%cond = select i1 %tobool, i64 %a, i64 %c
ret i64 %cond
}
define i64 @cmov_ugt_i64_constant_neg_2049(i64 %a, i64 %b, i64 %c) nounwind {
; RV64I-LABEL: cmov_ugt_i64_constant_neg_2049:
; RV64I: # %bb.0:
; RV64I-NEXT: lui a3, 1048575
; RV64I-NEXT: addiw a3, a3, 2047
; RV64I-NEXT: bltu a3, a1, .LBB30_2
; RV64I-NEXT: # %bb.1:
; RV64I-NEXT: mv a0, a2
; RV64I-NEXT: .LBB30_2:
; RV64I-NEXT: ret
;
; RV64ZBT-LABEL: cmov_ugt_i64_constant_neg_2049:
; RV64ZBT: # %bb.0:
; RV64ZBT-NEXT: sltiu a1, a1, -2048
; RV64ZBT-NEXT: cmov a0, a1, a2, a0
; RV64ZBT-NEXT: ret
%tobool = icmp ugt i64 %b, 18446744073709549567
%cond = select i1 %tobool, i64 %a, i64 %c
ret i64 %cond
}
define i64 @cmov_uge_i64(i64 %a, i64 %b, i64 %c, i64 %d) nounwind {
; RV64I-LABEL: cmov_uge_i64:
; RV64I: # %bb.0:
; RV64I-NEXT: bgeu a1, a2, .LBB31_2
; RV64I-NEXT: # %bb.1:
; RV64I-NEXT: mv a0, a3
; RV64I-NEXT: .LBB31_2:
; RV64I-NEXT: ret
;
; RV64ZBT-LABEL: cmov_uge_i64:
; RV64ZBT: # %bb.0:
; RV64ZBT-NEXT: sltu a1, a1, a2
; RV64ZBT-NEXT: cmov a0, a1, a3, a0
; RV64ZBT-NEXT: ret
%tobool = icmp uge i64 %b, %c
%cond = select i1 %tobool, i64 %a, i64 %d
ret i64 %cond
}
define i64 @cmov_uge_i64_constant_2047(i64 %a, i64 %b, i64 %c) nounwind {
; RV64I-LABEL: cmov_uge_i64_constant_2047:
; RV64I: # %bb.0:
; RV64I-NEXT: li a3, 2046
; RV64I-NEXT: bltu a3, a1, .LBB32_2
; RV64I-NEXT: # %bb.1:
; RV64I-NEXT: mv a0, a2
; RV64I-NEXT: .LBB32_2:
; RV64I-NEXT: ret
;
; RV64ZBT-LABEL: cmov_uge_i64_constant_2047:
; RV64ZBT: # %bb.0:
; RV64ZBT-NEXT: sltiu a1, a1, 2047
; RV64ZBT-NEXT: cmov a0, a1, a2, a0
; RV64ZBT-NEXT: ret
%tobool = icmp uge i64 %b, 2047
%cond = select i1 %tobool, i64 %a, i64 %c
ret i64 %cond
}
define i64 @cmov_uge_i64_constant_neg_2048(i64 %a, i64 %b, i64 %c) nounwind {
; RV64I-LABEL: cmov_uge_i64_constant_neg_2048:
; RV64I: # %bb.0:
; RV64I-NEXT: lui a3, 1048575
; RV64I-NEXT: addiw a3, a3, 2047
; RV64I-NEXT: bltu a3, a1, .LBB33_2
; RV64I-NEXT: # %bb.1:
; RV64I-NEXT: mv a0, a2
; RV64I-NEXT: .LBB33_2:
; RV64I-NEXT: ret
;
; RV64ZBT-LABEL: cmov_uge_i64_constant_neg_2048:
; RV64ZBT: # %bb.0:
; RV64ZBT-NEXT: sltiu a1, a1, -2048
; RV64ZBT-NEXT: cmov a0, a1, a2, a0
; RV64ZBT-NEXT: ret
%tobool = icmp uge i64 %b, 18446744073709549568
%cond = select i1 %tobool, i64 %a, i64 %c
ret i64 %cond
}
declare i32 @llvm.fshl.i32(i32, i32, i32)
define signext i32 @fshl_i32(i32 signext %a, i32 signext %b, i32 signext %c) nounwind {
; RV64I-LABEL: fshl_i32:
; RV64I: # %bb.0:
; RV64I-NEXT: slli a0, a0, 32
; RV64I-NEXT: slli a1, a1, 32
; RV64I-NEXT: srli a1, a1, 32
; RV64I-NEXT: or a0, a0, a1
; RV64I-NEXT: andi a1, a2, 31
; RV64I-NEXT: sll a0, a0, a1
; RV64I-NEXT: srai a0, a0, 32
; RV64I-NEXT: ret
;
; RV64ZBT-LABEL: fshl_i32:
; RV64ZBT: # %bb.0:
; RV64ZBT-NEXT: andi a2, a2, 31
; RV64ZBT-NEXT: fslw a0, a0, a1, a2
; RV64ZBT-NEXT: ret
%1 = tail call i32 @llvm.fshl.i32(i32 %a, i32 %b, i32 %c)
ret i32 %1
}
; Similar to fshl_i32 but result is not sign extended.
define void @fshl_i32_nosext(i32 signext %a, i32 signext %b, i32 signext %c, i32* %x) nounwind {
; RV64I-LABEL: fshl_i32_nosext:
; RV64I: # %bb.0:
; RV64I-NEXT: slli a0, a0, 32
; RV64I-NEXT: slli a1, a1, 32
; RV64I-NEXT: srli a1, a1, 32
; RV64I-NEXT: or a0, a0, a1
; RV64I-NEXT: andi a1, a2, 31
; RV64I-NEXT: sll a0, a0, a1
; RV64I-NEXT: srli a0, a0, 32
; RV64I-NEXT: sw a0, 0(a3)
; RV64I-NEXT: ret
;
; RV64ZBT-LABEL: fshl_i32_nosext:
; RV64ZBT: # %bb.0:
; RV64ZBT-NEXT: andi a2, a2, 31
; RV64ZBT-NEXT: fslw a0, a0, a1, a2
; RV64ZBT-NEXT: sw a0, 0(a3)
; RV64ZBT-NEXT: ret
%1 = tail call i32 @llvm.fshl.i32(i32 %a, i32 %b, i32 %c)
store i32 %1, i32* %x
ret void
}
declare i64 @llvm.fshl.i64(i64, i64, i64)
define i64 @fshl_i64(i64 %a, i64 %b, i64 %c) nounwind {
; RV64I-LABEL: fshl_i64:
; RV64I: # %bb.0:
; RV64I-NEXT: sll a0, a0, a2
; RV64I-NEXT: not a2, a2
; RV64I-NEXT: srli a1, a1, 1
; RV64I-NEXT: srl a1, a1, a2
; RV64I-NEXT: or a0, a0, a1
; RV64I-NEXT: ret
;
; RV64ZBT-LABEL: fshl_i64:
; RV64ZBT: # %bb.0:
; RV64ZBT-NEXT: andi a2, a2, 63
; RV64ZBT-NEXT: fsl a0, a0, a1, a2
; RV64ZBT-NEXT: ret
%1 = tail call i64 @llvm.fshl.i64(i64 %a, i64 %b, i64 %c)
ret i64 %1
}
declare i32 @llvm.fshr.i32(i32, i32, i32)
define signext i32 @fshr_i32(i32 signext %a, i32 signext %b, i32 signext %c) nounwind {
; RV64I-LABEL: fshr_i32:
; RV64I: # %bb.0:
; RV64I-NEXT: slli a0, a0, 32
; RV64I-NEXT: slli a1, a1, 32
; RV64I-NEXT: srli a1, a1, 32
; RV64I-NEXT: or a0, a0, a1
; RV64I-NEXT: andi a1, a2, 31
; RV64I-NEXT: srl a0, a0, a1
; RV64I-NEXT: sext.w a0, a0
; RV64I-NEXT: ret
;
; RV64ZBT-LABEL: fshr_i32:
; RV64ZBT: # %bb.0:
; RV64ZBT-NEXT: andi a2, a2, 31
; RV64ZBT-NEXT: fsrw a0, a1, a0, a2
; RV64ZBT-NEXT: ret
%1 = tail call i32 @llvm.fshr.i32(i32 %a, i32 %b, i32 %c)
ret i32 %1
}
; Similar to fshr_i32 but result is not sign extended.
define void @fshr_i32_nosext(i32 signext %a, i32 signext %b, i32 signext %c, i32* %x) nounwind {
; RV64I-LABEL: fshr_i32_nosext:
; RV64I: # %bb.0:
; RV64I-NEXT: slli a0, a0, 32
; RV64I-NEXT: slli a1, a1, 32
; RV64I-NEXT: srli a1, a1, 32
; RV64I-NEXT: or a0, a0, a1
; RV64I-NEXT: andi a1, a2, 31
; RV64I-NEXT: srl a0, a0, a1
; RV64I-NEXT: sw a0, 0(a3)
; RV64I-NEXT: ret
;
; RV64ZBT-LABEL: fshr_i32_nosext:
; RV64ZBT: # %bb.0:
; RV64ZBT-NEXT: andi a2, a2, 31
; RV64ZBT-NEXT: fsrw a0, a1, a0, a2
; RV64ZBT-NEXT: sw a0, 0(a3)
; RV64ZBT-NEXT: ret
%1 = tail call i32 @llvm.fshr.i32(i32 %a, i32 %b, i32 %c)
store i32 %1, i32* %x
ret void
}
declare i64 @llvm.fshr.i64(i64, i64, i64)
define i64 @fshr_i64(i64 %a, i64 %b, i64 %c) nounwind {
; RV64I-LABEL: fshr_i64:
; RV64I: # %bb.0:
; RV64I-NEXT: srl a1, a1, a2
; RV64I-NEXT: not a2, a2
; RV64I-NEXT: slli a0, a0, 1
; RV64I-NEXT: sll a0, a0, a2
; RV64I-NEXT: or a0, a0, a1
; RV64I-NEXT: ret
;
; RV64ZBT-LABEL: fshr_i64:
; RV64ZBT: # %bb.0:
; RV64ZBT-NEXT: andi a2, a2, 63
; RV64ZBT-NEXT: fsr a0, a1, a0, a2
; RV64ZBT-NEXT: ret
%1 = tail call i64 @llvm.fshr.i64(i64 %a, i64 %b, i64 %c)
ret i64 %1
}
define signext i32 @fshri_i32(i32 signext %a, i32 signext %b) nounwind {
; RV64I-LABEL: fshri_i32:
; RV64I: # %bb.0:
; RV64I-NEXT: srliw a1, a1, 5
; RV64I-NEXT: slliw a0, a0, 27
; RV64I-NEXT: or a0, a0, a1
; RV64I-NEXT: ret
;
; RV64ZBT-LABEL: fshri_i32:
; RV64ZBT: # %bb.0:
; RV64ZBT-NEXT: fsriw a0, a1, a0, 5
; RV64ZBT-NEXT: ret
%1 = tail call i32 @llvm.fshr.i32(i32 %a, i32 %b, i32 5)
ret i32 %1
}
; Similar to fshr_i32 but result is not sign extended.
define void @fshri_i32_nosext(i32 signext %a, i32 signext %b, i32* %x) nounwind {
; RV64I-LABEL: fshri_i32_nosext:
; RV64I: # %bb.0:
; RV64I-NEXT: srliw a1, a1, 5
; RV64I-NEXT: slli a0, a0, 27
; RV64I-NEXT: or a0, a0, a1
; RV64I-NEXT: sw a0, 0(a2)
; RV64I-NEXT: ret
;
; RV64ZBT-LABEL: fshri_i32_nosext:
; RV64ZBT: # %bb.0:
; RV64ZBT-NEXT: fsriw a0, a1, a0, 5
; RV64ZBT-NEXT: sw a0, 0(a2)
; RV64ZBT-NEXT: ret
%1 = tail call i32 @llvm.fshr.i32(i32 %a, i32 %b, i32 5)
store i32 %1, i32* %x
ret void
}
define i64 @fshri_i64(i64 %a, i64 %b) nounwind {
; RV64I-LABEL: fshri_i64:
; RV64I: # %bb.0:
; RV64I-NEXT: srli a1, a1, 5
; RV64I-NEXT: slli a0, a0, 59
; RV64I-NEXT: or a0, a0, a1
; RV64I-NEXT: ret
;
; RV64ZBT-LABEL: fshri_i64:
; RV64ZBT: # %bb.0:
; RV64ZBT-NEXT: fsri a0, a1, a0, 5
; RV64ZBT-NEXT: ret
%1 = tail call i64 @llvm.fshr.i64(i64 %a, i64 %b, i64 5)
ret i64 %1
}
define signext i32 @fshli_i32(i32 signext %a, i32 signext %b) nounwind {
; RV64I-LABEL: fshli_i32:
; RV64I: # %bb.0:
; RV64I-NEXT: srliw a1, a1, 27
; RV64I-NEXT: slliw a0, a0, 5
; RV64I-NEXT: or a0, a0, a1
; RV64I-NEXT: ret
;
; RV64ZBT-LABEL: fshli_i32:
; RV64ZBT: # %bb.0:
; RV64ZBT-NEXT: fsriw a0, a1, a0, 27
; RV64ZBT-NEXT: ret
%1 = tail call i32 @llvm.fshl.i32(i32 %a, i32 %b, i32 5)
ret i32 %1
}
; Similar to fshl_i32 but result is not sign extended.
define void @fshli_i32_nosext(i32 signext %a, i32 signext %b, i32* %x) nounwind {
; RV64I-LABEL: fshli_i32_nosext:
; RV64I: # %bb.0:
; RV64I-NEXT: srliw a1, a1, 27
; RV64I-NEXT: slli a0, a0, 5
; RV64I-NEXT: or a0, a0, a1
; RV64I-NEXT: sw a0, 0(a2)
; RV64I-NEXT: ret
;
; RV64ZBT-LABEL: fshli_i32_nosext:
; RV64ZBT: # %bb.0:
; RV64ZBT-NEXT: fsriw a0, a1, a0, 27
; RV64ZBT-NEXT: sw a0, 0(a2)
; RV64ZBT-NEXT: ret
%1 = tail call i32 @llvm.fshl.i32(i32 %a, i32 %b, i32 5)
store i32 %1, i32* %x
ret void
}
define i64 @fshli_i64(i64 %a, i64 %b) nounwind {
; RV64I-LABEL: fshli_i64:
; RV64I: # %bb.0:
; RV64I-NEXT: srli a1, a1, 59
; RV64I-NEXT: slli a0, a0, 5
; RV64I-NEXT: or a0, a0, a1
; RV64I-NEXT: ret
;
; RV64ZBT-LABEL: fshli_i64:
; RV64ZBT: # %bb.0:
; RV64ZBT-NEXT: fsri a0, a1, a0, 59
; RV64ZBT-NEXT: ret
%1 = tail call i64 @llvm.fshl.i64(i64 %a, i64 %b, i64 5)
ret i64 %1
}

View File

@ -1,10 +1,8 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s -mtriple=riscv32 -mattr=+m | FileCheck %s --check-prefix=RV32I
; RUN: llc < %s -mtriple=riscv64 -mattr=+m | FileCheck %s --check-prefix=RV64I
; RUN: llc < %s -mtriple=riscv32 -mattr=+m,+zbb | FileCheck %s --check-prefixes=RV32IZbb,RV32IZbbNOZbt
; RUN: llc < %s -mtriple=riscv64 -mattr=+m,+zbb | FileCheck %s --check-prefixes=RV64IZbb,RV64IZbbNOZbt
; RUN: llc < %s -mtriple=riscv32 -mattr=+m,+zbb,+experimental-zbt | FileCheck %s --check-prefixes=RV32IZbb,RV32IZbbZbt
; RUN: llc < %s -mtriple=riscv64 -mattr=+m,+zbb,+experimental-zbt | FileCheck %s --check-prefixes=RV64IZbb,RV64IZbbZbt
; RUN: llc < %s -mtriple=riscv32 -mattr=+m | FileCheck %s --check-prefixes=RV32,RV32I
; RUN: llc < %s -mtriple=riscv64 -mattr=+m | FileCheck %s --check-prefixes=RV64,RV64I
; RUN: llc < %s -mtriple=riscv32 -mattr=+m,+zbb | FileCheck %s --check-prefixes=RV32,RV32IZbb
; RUN: llc < %s -mtriple=riscv64 -mattr=+m,+zbb | FileCheck %s --check-prefixes=RV64,RV64IZbb
declare i4 @llvm.sadd.sat.i4(i4, i4)
declare i8 @llvm.sadd.sat.i8(i8, i8)
@ -13,19 +11,19 @@ declare i32 @llvm.sadd.sat.i32(i32, i32)
declare i64 @llvm.sadd.sat.i64(i64, i64)
define signext i32 @func(i32 signext %x, i32 signext %y) nounwind {
; RV32I-LABEL: func:
; RV32I: # %bb.0:
; RV32I-NEXT: mv a2, a0
; RV32I-NEXT: add a0, a0, a1
; RV32I-NEXT: slt a2, a0, a2
; RV32I-NEXT: slti a1, a1, 0
; RV32I-NEXT: beq a1, a2, .LBB0_2
; RV32I-NEXT: # %bb.1:
; RV32I-NEXT: srai a0, a0, 31
; RV32I-NEXT: lui a1, 524288
; RV32I-NEXT: xor a0, a0, a1
; RV32I-NEXT: .LBB0_2:
; RV32I-NEXT: ret
; RV32-LABEL: func:
; RV32: # %bb.0:
; RV32-NEXT: mv a2, a0
; RV32-NEXT: add a0, a0, a1
; RV32-NEXT: slt a2, a0, a2
; RV32-NEXT: slti a1, a1, 0
; RV32-NEXT: beq a1, a2, .LBB0_2
; RV32-NEXT: # %bb.1:
; RV32-NEXT: srai a0, a0, 31
; RV32-NEXT: lui a1, 524288
; RV32-NEXT: xor a0, a0, a1
; RV32-NEXT: .LBB0_2:
; RV32-NEXT: ret
;
; RV64I-LABEL: func:
; RV64I: # %bb.0:
@ -44,20 +42,6 @@ define signext i32 @func(i32 signext %x, i32 signext %y) nounwind {
; RV64I-NEXT: lui a0, 524288
; RV64I-NEXT: ret
;
; RV32IZbbNOZbt-LABEL: func:
; RV32IZbbNOZbt: # %bb.0:
; RV32IZbbNOZbt-NEXT: mv a2, a0
; RV32IZbbNOZbt-NEXT: add a0, a0, a1
; RV32IZbbNOZbt-NEXT: slt a2, a0, a2
; RV32IZbbNOZbt-NEXT: slti a1, a1, 0
; RV32IZbbNOZbt-NEXT: beq a1, a2, .LBB0_2
; RV32IZbbNOZbt-NEXT: # %bb.1:
; RV32IZbbNOZbt-NEXT: srai a0, a0, 31
; RV32IZbbNOZbt-NEXT: lui a1, 524288
; RV32IZbbNOZbt-NEXT: xor a0, a0, a1
; RV32IZbbNOZbt-NEXT: .LBB0_2:
; RV32IZbbNOZbt-NEXT: ret
;
; RV64IZbb-LABEL: func:
; RV64IZbb: # %bb.0:
; RV64IZbb-NEXT: add a0, a0, a1
@ -66,18 +50,6 @@ define signext i32 @func(i32 signext %x, i32 signext %y) nounwind {
; RV64IZbb-NEXT: min a0, a0, a2
; RV64IZbb-NEXT: max a0, a0, a1
; RV64IZbb-NEXT: ret
;
; RV32IZbbZbt-LABEL: func:
; RV32IZbbZbt: # %bb.0:
; RV32IZbbZbt-NEXT: add a2, a0, a1
; RV32IZbbZbt-NEXT: slt a0, a2, a0
; RV32IZbbZbt-NEXT: slti a1, a1, 0
; RV32IZbbZbt-NEXT: xor a0, a1, a0
; RV32IZbbZbt-NEXT: srai a1, a2, 31
; RV32IZbbZbt-NEXT: lui a3, 524288
; RV32IZbbZbt-NEXT: xor a1, a1, a3
; RV32IZbbZbt-NEXT: cmov a0, a0, a1, a2
; RV32IZbbZbt-NEXT: ret
%tmp = call i32 @llvm.sadd.sat.i32(i32 %x, i32 %y);
ret i32 %tmp;
}
@ -103,84 +75,39 @@ define i64 @func2(i64 %x, i64 %y) nounwind {
; RV32I-NEXT: .LBB1_2:
; RV32I-NEXT: ret
;
; RV64I-LABEL: func2:
; RV64I: # %bb.0:
; RV64I-NEXT: mv a2, a0
; RV64I-NEXT: add a0, a0, a1
; RV64I-NEXT: slt a2, a0, a2
; RV64I-NEXT: slti a1, a1, 0
; RV64I-NEXT: beq a1, a2, .LBB1_2
; RV64I-NEXT: # %bb.1:
; RV64I-NEXT: srai a0, a0, 63
; RV64I-NEXT: li a1, -1
; RV64I-NEXT: slli a1, a1, 63
; RV64I-NEXT: xor a0, a0, a1
; RV64I-NEXT: .LBB1_2:
; RV64I-NEXT: ret
; RV64-LABEL: func2:
; RV64: # %bb.0:
; RV64-NEXT: mv a2, a0
; RV64-NEXT: add a0, a0, a1
; RV64-NEXT: slt a2, a0, a2
; RV64-NEXT: slti a1, a1, 0
; RV64-NEXT: beq a1, a2, .LBB1_2
; RV64-NEXT: # %bb.1:
; RV64-NEXT: srai a0, a0, 63
; RV64-NEXT: li a1, -1
; RV64-NEXT: slli a1, a1, 63
; RV64-NEXT: xor a0, a0, a1
; RV64-NEXT: .LBB1_2:
; RV64-NEXT: ret
;
; RV32IZbbNOZbt-LABEL: func2:
; RV32IZbbNOZbt: # %bb.0:
; RV32IZbbNOZbt-NEXT: mv a4, a1
; RV32IZbbNOZbt-NEXT: mv a1, a0
; RV32IZbbNOZbt-NEXT: add a5, a4, a3
; RV32IZbbNOZbt-NEXT: add a0, a0, a2
; RV32IZbbNOZbt-NEXT: sltu a1, a0, a1
; RV32IZbbNOZbt-NEXT: add a1, a5, a1
; RV32IZbbNOZbt-NEXT: xor a2, a4, a1
; RV32IZbbNOZbt-NEXT: xor a3, a4, a3
; RV32IZbbNOZbt-NEXT: andn a2, a2, a3
; RV32IZbbNOZbt-NEXT: bgez a2, .LBB1_2
; RV32IZbbNOZbt-NEXT: # %bb.1:
; RV32IZbbNOZbt-NEXT: srai a0, a1, 31
; RV32IZbbNOZbt-NEXT: lui a1, 524288
; RV32IZbbNOZbt-NEXT: xor a1, a0, a1
; RV32IZbbNOZbt-NEXT: .LBB1_2:
; RV32IZbbNOZbt-NEXT: ret
;
; RV64IZbbNOZbt-LABEL: func2:
; RV64IZbbNOZbt: # %bb.0:
; RV64IZbbNOZbt-NEXT: mv a2, a0
; RV64IZbbNOZbt-NEXT: add a0, a0, a1
; RV64IZbbNOZbt-NEXT: slt a2, a0, a2
; RV64IZbbNOZbt-NEXT: slti a1, a1, 0
; RV64IZbbNOZbt-NEXT: beq a1, a2, .LBB1_2
; RV64IZbbNOZbt-NEXT: # %bb.1:
; RV64IZbbNOZbt-NEXT: srai a0, a0, 63
; RV64IZbbNOZbt-NEXT: li a1, -1
; RV64IZbbNOZbt-NEXT: slli a1, a1, 63
; RV64IZbbNOZbt-NEXT: xor a0, a0, a1
; RV64IZbbNOZbt-NEXT: .LBB1_2:
; RV64IZbbNOZbt-NEXT: ret
;
; RV32IZbbZbt-LABEL: func2:
; RV32IZbbZbt: # %bb.0:
; RV32IZbbZbt-NEXT: add a4, a1, a3
; RV32IZbbZbt-NEXT: add a2, a0, a2
; RV32IZbbZbt-NEXT: sltu a0, a2, a0
; RV32IZbbZbt-NEXT: add a0, a4, a0
; RV32IZbbZbt-NEXT: srai a4, a0, 31
; RV32IZbbZbt-NEXT: lui a5, 524288
; RV32IZbbZbt-NEXT: xor a5, a4, a5
; RV32IZbbZbt-NEXT: xor a6, a1, a0
; RV32IZbbZbt-NEXT: xor a1, a1, a3
; RV32IZbbZbt-NEXT: andn a1, a6, a1
; RV32IZbbZbt-NEXT: slti a3, a1, 0
; RV32IZbbZbt-NEXT: cmov a1, a3, a5, a0
; RV32IZbbZbt-NEXT: cmov a0, a3, a4, a2
; RV32IZbbZbt-NEXT: ret
;
; RV64IZbbZbt-LABEL: func2:
; RV64IZbbZbt: # %bb.0:
; RV64IZbbZbt-NEXT: add a2, a0, a1
; RV64IZbbZbt-NEXT: slt a0, a2, a0
; RV64IZbbZbt-NEXT: slti a1, a1, 0
; RV64IZbbZbt-NEXT: xor a0, a1, a0
; RV64IZbbZbt-NEXT: srai a1, a2, 63
; RV64IZbbZbt-NEXT: li a3, -1
; RV64IZbbZbt-NEXT: slli a3, a3, 63
; RV64IZbbZbt-NEXT: xor a1, a1, a3
; RV64IZbbZbt-NEXT: cmov a0, a0, a1, a2
; RV64IZbbZbt-NEXT: ret
; RV32IZbb-LABEL: func2:
; RV32IZbb: # %bb.0:
; RV32IZbb-NEXT: mv a4, a1
; RV32IZbb-NEXT: mv a1, a0
; RV32IZbb-NEXT: add a5, a4, a3
; RV32IZbb-NEXT: add a0, a0, a2
; RV32IZbb-NEXT: sltu a1, a0, a1
; RV32IZbb-NEXT: add a1, a5, a1
; RV32IZbb-NEXT: xor a2, a4, a1
; RV32IZbb-NEXT: xor a3, a4, a3
; RV32IZbb-NEXT: andn a2, a2, a3
; RV32IZbb-NEXT: bgez a2, .LBB1_2
; RV32IZbb-NEXT: # %bb.1:
; RV32IZbb-NEXT: srai a0, a1, 31
; RV32IZbb-NEXT: lui a1, 524288
; RV32IZbb-NEXT: xor a1, a0, a1
; RV32IZbb-NEXT: .LBB1_2:
; RV32IZbb-NEXT: ret
%tmp = call i64 @llvm.sadd.sat.i64(i64 %x, i64 %y);
ret i64 %tmp;
}

View File

@ -1,10 +1,8 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s -mtriple=riscv32 -mattr=+m | FileCheck %s --check-prefix=RV32I
; RUN: llc < %s -mtriple=riscv64 -mattr=+m | FileCheck %s --check-prefix=RV64I
; RUN: llc < %s -mtriple=riscv32 -mattr=+m,+zbb | FileCheck %s --check-prefixes=RV32IZbb,RV32IZbbNOZbt
; RUN: llc < %s -mtriple=riscv64 -mattr=+m,+zbb | FileCheck %s --check-prefixes=RV64IZbb,RV64IZbbNOZbt
; RUN: llc < %s -mtriple=riscv32 -mattr=+m,+zbb,+experimental-zbt | FileCheck %s --check-prefixes=RV32IZbb,RV32IZbbZbt
; RUN: llc < %s -mtriple=riscv64 -mattr=+m,+zbb,+experimental-zbt | FileCheck %s --check-prefixes=RV64IZbb,RV64IZbbZbt
; RUN: llc < %s -mtriple=riscv32 -mattr=+m | FileCheck %s --check-prefixes=RV32,RV32I
; RUN: llc < %s -mtriple=riscv64 -mattr=+m | FileCheck %s --check-prefixes=RV64,RV64I
; RUN: llc < %s -mtriple=riscv32 -mattr=+m,+zbb | FileCheck %s --check-prefixes=RV32,RV32IZbb
; RUN: llc < %s -mtriple=riscv64 -mattr=+m,+zbb | FileCheck %s --check-prefixes=RV64,RV64IZbb
declare i4 @llvm.sadd.sat.i4(i4, i4)
declare i8 @llvm.sadd.sat.i8(i8, i8)
@ -13,20 +11,20 @@ declare i32 @llvm.sadd.sat.i32(i32, i32)
declare i64 @llvm.sadd.sat.i64(i64, i64)
define i32 @func32(i32 %x, i32 %y, i32 %z) nounwind {
; RV32I-LABEL: func32:
; RV32I: # %bb.0:
; RV32I-NEXT: mv a3, a0
; RV32I-NEXT: mul a1, a1, a2
; RV32I-NEXT: add a0, a0, a1
; RV32I-NEXT: slt a2, a0, a3
; RV32I-NEXT: slti a1, a1, 0
; RV32I-NEXT: beq a1, a2, .LBB0_2
; RV32I-NEXT: # %bb.1:
; RV32I-NEXT: srai a0, a0, 31
; RV32I-NEXT: lui a1, 524288
; RV32I-NEXT: xor a0, a0, a1
; RV32I-NEXT: .LBB0_2:
; RV32I-NEXT: ret
; RV32-LABEL: func32:
; RV32: # %bb.0:
; RV32-NEXT: mv a3, a0
; RV32-NEXT: mul a1, a1, a2
; RV32-NEXT: add a0, a0, a1
; RV32-NEXT: slt a2, a0, a3
; RV32-NEXT: slti a1, a1, 0
; RV32-NEXT: beq a1, a2, .LBB0_2
; RV32-NEXT: # %bb.1:
; RV32-NEXT: srai a0, a0, 31
; RV32-NEXT: lui a1, 524288
; RV32-NEXT: xor a0, a0, a1
; RV32-NEXT: .LBB0_2:
; RV32-NEXT: ret
;
; RV64I-LABEL: func32:
; RV64I: # %bb.0:
@ -47,21 +45,6 @@ define i32 @func32(i32 %x, i32 %y, i32 %z) nounwind {
; RV64I-NEXT: lui a0, 524288
; RV64I-NEXT: ret
;
; RV32IZbbNOZbt-LABEL: func32:
; RV32IZbbNOZbt: # %bb.0:
; RV32IZbbNOZbt-NEXT: mv a3, a0
; RV32IZbbNOZbt-NEXT: mul a1, a1, a2
; RV32IZbbNOZbt-NEXT: add a0, a0, a1
; RV32IZbbNOZbt-NEXT: slt a2, a0, a3
; RV32IZbbNOZbt-NEXT: slti a1, a1, 0
; RV32IZbbNOZbt-NEXT: beq a1, a2, .LBB0_2
; RV32IZbbNOZbt-NEXT: # %bb.1:
; RV32IZbbNOZbt-NEXT: srai a0, a0, 31
; RV32IZbbNOZbt-NEXT: lui a1, 524288
; RV32IZbbNOZbt-NEXT: xor a0, a0, a1
; RV32IZbbNOZbt-NEXT: .LBB0_2:
; RV32IZbbNOZbt-NEXT: ret
;
; RV64IZbb-LABEL: func32:
; RV64IZbb: # %bb.0:
; RV64IZbb-NEXT: sext.w a0, a0
@ -72,19 +55,6 @@ define i32 @func32(i32 %x, i32 %y, i32 %z) nounwind {
; RV64IZbb-NEXT: min a0, a0, a2
; RV64IZbb-NEXT: max a0, a0, a1
; RV64IZbb-NEXT: ret
;
; RV32IZbbZbt-LABEL: func32:
; RV32IZbbZbt: # %bb.0:
; RV32IZbbZbt-NEXT: mul a1, a1, a2
; RV32IZbbZbt-NEXT: add a2, a0, a1
; RV32IZbbZbt-NEXT: slt a0, a2, a0
; RV32IZbbZbt-NEXT: slti a1, a1, 0
; RV32IZbbZbt-NEXT: xor a0, a1, a0
; RV32IZbbZbt-NEXT: srai a1, a2, 31
; RV32IZbbZbt-NEXT: lui a3, 524288
; RV32IZbbZbt-NEXT: xor a1, a1, a3
; RV32IZbbZbt-NEXT: cmov a0, a0, a1, a2
; RV32IZbbZbt-NEXT: ret
%a = mul i32 %y, %z
%tmp = call i32 @llvm.sadd.sat.i32(i32 %x, i32 %a)
ret i32 %tmp
@ -111,84 +81,39 @@ define i64 @func64(i64 %x, i64 %y, i64 %z) nounwind {
; RV32I-NEXT: .LBB1_2:
; RV32I-NEXT: ret
;
; RV64I-LABEL: func64:
; RV64I: # %bb.0:
; RV64I-NEXT: mv a1, a0
; RV64I-NEXT: add a0, a0, a2
; RV64I-NEXT: slt a1, a0, a1
; RV64I-NEXT: slti a2, a2, 0
; RV64I-NEXT: beq a2, a1, .LBB1_2
; RV64I-NEXT: # %bb.1:
; RV64I-NEXT: srai a0, a0, 63
; RV64I-NEXT: li a1, -1
; RV64I-NEXT: slli a1, a1, 63
; RV64I-NEXT: xor a0, a0, a1
; RV64I-NEXT: .LBB1_2:
; RV64I-NEXT: ret
; RV64-LABEL: func64:
; RV64: # %bb.0:
; RV64-NEXT: mv a1, a0
; RV64-NEXT: add a0, a0, a2
; RV64-NEXT: slt a1, a0, a1
; RV64-NEXT: slti a2, a2, 0
; RV64-NEXT: beq a2, a1, .LBB1_2
; RV64-NEXT: # %bb.1:
; RV64-NEXT: srai a0, a0, 63
; RV64-NEXT: li a1, -1
; RV64-NEXT: slli a1, a1, 63
; RV64-NEXT: xor a0, a0, a1
; RV64-NEXT: .LBB1_2:
; RV64-NEXT: ret
;
; RV32IZbbNOZbt-LABEL: func64:
; RV32IZbbNOZbt: # %bb.0:
; RV32IZbbNOZbt-NEXT: mv a2, a1
; RV32IZbbNOZbt-NEXT: mv a1, a0
; RV32IZbbNOZbt-NEXT: add a3, a2, a5
; RV32IZbbNOZbt-NEXT: add a0, a0, a4
; RV32IZbbNOZbt-NEXT: sltu a1, a0, a1
; RV32IZbbNOZbt-NEXT: add a1, a3, a1
; RV32IZbbNOZbt-NEXT: xor a3, a2, a1
; RV32IZbbNOZbt-NEXT: xor a2, a2, a5
; RV32IZbbNOZbt-NEXT: andn a2, a3, a2
; RV32IZbbNOZbt-NEXT: bgez a2, .LBB1_2
; RV32IZbbNOZbt-NEXT: # %bb.1:
; RV32IZbbNOZbt-NEXT: srai a0, a1, 31
; RV32IZbbNOZbt-NEXT: lui a1, 524288
; RV32IZbbNOZbt-NEXT: xor a1, a0, a1
; RV32IZbbNOZbt-NEXT: .LBB1_2:
; RV32IZbbNOZbt-NEXT: ret
;
; RV64IZbbNOZbt-LABEL: func64:
; RV64IZbbNOZbt: # %bb.0:
; RV64IZbbNOZbt-NEXT: mv a1, a0
; RV64IZbbNOZbt-NEXT: add a0, a0, a2
; RV64IZbbNOZbt-NEXT: slt a1, a0, a1
; RV64IZbbNOZbt-NEXT: slti a2, a2, 0
; RV64IZbbNOZbt-NEXT: beq a2, a1, .LBB1_2
; RV64IZbbNOZbt-NEXT: # %bb.1:
; RV64IZbbNOZbt-NEXT: srai a0, a0, 63
; RV64IZbbNOZbt-NEXT: li a1, -1
; RV64IZbbNOZbt-NEXT: slli a1, a1, 63
; RV64IZbbNOZbt-NEXT: xor a0, a0, a1
; RV64IZbbNOZbt-NEXT: .LBB1_2:
; RV64IZbbNOZbt-NEXT: ret
;
; RV32IZbbZbt-LABEL: func64:
; RV32IZbbZbt: # %bb.0:
; RV32IZbbZbt-NEXT: add a2, a1, a5
; RV32IZbbZbt-NEXT: add a3, a0, a4
; RV32IZbbZbt-NEXT: sltu a0, a3, a0
; RV32IZbbZbt-NEXT: add a0, a2, a0
; RV32IZbbZbt-NEXT: srai a2, a0, 31
; RV32IZbbZbt-NEXT: lui a4, 524288
; RV32IZbbZbt-NEXT: xor a4, a2, a4
; RV32IZbbZbt-NEXT: xor a6, a1, a0
; RV32IZbbZbt-NEXT: xor a1, a1, a5
; RV32IZbbZbt-NEXT: andn a1, a6, a1
; RV32IZbbZbt-NEXT: slti a5, a1, 0
; RV32IZbbZbt-NEXT: cmov a1, a5, a4, a0
; RV32IZbbZbt-NEXT: cmov a0, a5, a2, a3
; RV32IZbbZbt-NEXT: ret
;
; RV64IZbbZbt-LABEL: func64:
; RV64IZbbZbt: # %bb.0:
; RV64IZbbZbt-NEXT: add a1, a0, a2
; RV64IZbbZbt-NEXT: slt a0, a1, a0
; RV64IZbbZbt-NEXT: slti a2, a2, 0
; RV64IZbbZbt-NEXT: xor a0, a2, a0
; RV64IZbbZbt-NEXT: srai a2, a1, 63
; RV64IZbbZbt-NEXT: li a3, -1
; RV64IZbbZbt-NEXT: slli a3, a3, 63
; RV64IZbbZbt-NEXT: xor a2, a2, a3
; RV64IZbbZbt-NEXT: cmov a0, a0, a2, a1
; RV64IZbbZbt-NEXT: ret
; RV32IZbb-LABEL: func64:
; RV32IZbb: # %bb.0:
; RV32IZbb-NEXT: mv a2, a1
; RV32IZbb-NEXT: mv a1, a0
; RV32IZbb-NEXT: add a3, a2, a5
; RV32IZbb-NEXT: add a0, a0, a4
; RV32IZbb-NEXT: sltu a1, a0, a1
; RV32IZbb-NEXT: add a1, a3, a1
; RV32IZbb-NEXT: xor a3, a2, a1
; RV32IZbb-NEXT: xor a2, a2, a5
; RV32IZbb-NEXT: andn a2, a3, a2
; RV32IZbb-NEXT: bgez a2, .LBB1_2
; RV32IZbb-NEXT: # %bb.1:
; RV32IZbb-NEXT: srai a0, a1, 31
; RV32IZbb-NEXT: lui a1, 524288
; RV32IZbb-NEXT: xor a1, a0, a1
; RV32IZbb-NEXT: .LBB1_2:
; RV32IZbb-NEXT: ret
%a = mul i64 %y, %z
%tmp = call i64 @llvm.sadd.sat.i64(i64 %x, i64 %z)
ret i64 %tmp

View File

@ -1,12 +1,8 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=riscv32 -verify-machineinstrs < %s \
; RUN: | FileCheck -check-prefix=RV32I %s
; RUN: llc -mtriple=riscv32 -mattr=+experimental-zbt -verify-machineinstrs < %s \
; RUN: | FileCheck -check-prefix=RV32IBT %s
; RUN: llc -mtriple=riscv64 -verify-machineinstrs < %s \
; RUN: | FileCheck -check-prefix=RV64I %s
; RUN: llc -mtriple=riscv64 -mattr=+experimental-zbt -verify-machineinstrs < %s \
; RUN: | FileCheck -check-prefix=RV64IBT %s
;; There are a few different ways to lower (select (and A, B), X, Y). This test
;; ensures that we do so with as few branches as possible.
@ -22,12 +18,6 @@ define signext i32 @select_of_and(i1 zeroext %a, i1 zeroext %b, i32 signext %c,
; RV32I-NEXT: .LBB0_2:
; RV32I-NEXT: ret
;
; RV32IBT-LABEL: select_of_and:
; RV32IBT: # %bb.0:
; RV32IBT-NEXT: and a0, a0, a1
; RV32IBT-NEXT: cmov a0, a0, a2, a3
; RV32IBT-NEXT: ret
;
; RV64I-LABEL: select_of_and:
; RV64I: # %bb.0:
; RV64I-NEXT: and a1, a0, a1
@ -37,12 +27,6 @@ define signext i32 @select_of_and(i1 zeroext %a, i1 zeroext %b, i32 signext %c,
; RV64I-NEXT: mv a0, a3
; RV64I-NEXT: .LBB0_2:
; RV64I-NEXT: ret
;
; RV64IBT-LABEL: select_of_and:
; RV64IBT: # %bb.0:
; RV64IBT-NEXT: and a0, a0, a1
; RV64IBT-NEXT: cmov a0, a0, a2, a3
; RV64IBT-NEXT: ret
%1 = and i1 %a, %b
%2 = select i1 %1, i32 %c, i32 %d
ret i32 %2
@ -68,22 +52,6 @@ define signext i32 @if_of_and(i1 zeroext %a, i1 zeroext %b) nounwind {
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
; RV32IBT-LABEL: if_of_and:
; RV32IBT: # %bb.0:
; RV32IBT-NEXT: addi sp, sp, -16
; RV32IBT-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IBT-NEXT: and a0, a0, a1
; RV32IBT-NEXT: beqz a0, .LBB1_2
; RV32IBT-NEXT: # %bb.1: # %if.then
; RV32IBT-NEXT: call both@plt
; RV32IBT-NEXT: j .LBB1_3
; RV32IBT-NEXT: .LBB1_2: # %if.else
; RV32IBT-NEXT: call neither@plt
; RV32IBT-NEXT: .LBB1_3: # %if.end
; RV32IBT-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IBT-NEXT: addi sp, sp, 16
; RV32IBT-NEXT: ret
;
; RV64I-LABEL: if_of_and:
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
@ -99,22 +67,6 @@ define signext i32 @if_of_and(i1 zeroext %a, i1 zeroext %b) nounwind {
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
; RV64IBT-LABEL: if_of_and:
; RV64IBT: # %bb.0:
; RV64IBT-NEXT: addi sp, sp, -16
; RV64IBT-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64IBT-NEXT: and a0, a0, a1
; RV64IBT-NEXT: beqz a0, .LBB1_2
; RV64IBT-NEXT: # %bb.1: # %if.then
; RV64IBT-NEXT: call both@plt
; RV64IBT-NEXT: j .LBB1_3
; RV64IBT-NEXT: .LBB1_2: # %if.else
; RV64IBT-NEXT: call neither@plt
; RV64IBT-NEXT: .LBB1_3: # %if.end
; RV64IBT-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64IBT-NEXT: addi sp, sp, 16
; RV64IBT-NEXT: ret
%1 = and i1 %a, %b
br i1 %1, label %if.then, label %if.else

View File

@ -1,8 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=riscv32 -verify-machineinstrs < %s \
; RUN: | FileCheck %s -check-prefix=RV32I
; RUN: llc -mtriple=riscv32 -mattr=+experimental-zbt -verify-machineinstrs < %s \
; RUN: | FileCheck %s -check-prefix=RV32IBT
define i32 @bare_select(i1 %a, i32 %b, i32 %c) nounwind {
; RV32I-LABEL: bare_select:
@ -14,12 +12,6 @@ define i32 @bare_select(i1 %a, i32 %b, i32 %c) nounwind {
; RV32I-NEXT: mv a0, a2
; RV32I-NEXT: .LBB0_2:
; RV32I-NEXT: ret
;
; RV32IBT-LABEL: bare_select:
; RV32IBT: # %bb.0:
; RV32IBT-NEXT: andi a0, a0, 1
; RV32IBT-NEXT: cmov a0, a0, a1, a2
; RV32IBT-NEXT: ret
%1 = select i1 %a, i32 %b, i32 %c
ret i32 %1
}
@ -34,12 +26,6 @@ define float @bare_select_float(i1 %a, float %b, float %c) nounwind {
; RV32I-NEXT: mv a0, a2
; RV32I-NEXT: .LBB1_2:
; RV32I-NEXT: ret
;
; RV32IBT-LABEL: bare_select_float:
; RV32IBT: # %bb.0:
; RV32IBT-NEXT: andi a0, a0, 1
; RV32IBT-NEXT: cmov a0, a0, a1, a2
; RV32IBT-NEXT: ret
%1 = select i1 %a, float %b, float %c
ret float %1
}

View File

@ -1,12 +1,8 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=riscv32 -disable-block-placement -verify-machineinstrs < %s \
; RUN: | FileCheck -check-prefixes=RV32I %s
; RUN: llc -mtriple=riscv32 -mattr=+experimental-zbt -disable-block-placement -verify-machineinstrs < %s \
; RUN: | FileCheck -check-prefixes=RV32ZBT %s
; RUN: llc -mtriple=riscv64 -disable-block-placement -verify-machineinstrs < %s \
; RUN: | FileCheck -check-prefixes=RV64I %s
; RUN: llc -mtriple=riscv64 -mattr=+experimental-zbt -disable-block-placement -verify-machineinstrs < %s \
; RUN: | FileCheck -check-prefixes=RV64ZBT %s
define signext i32 @foo(i32 signext %a, i32 *%b) nounwind {
; RV32I-LABEL: foo:
@ -85,52 +81,6 @@ define signext i32 @foo(i32 signext %a, i32 *%b) nounwind {
; RV32I-NEXT: .LBB0_28:
; RV32I-NEXT: ret
;
; RV32ZBT-LABEL: foo:
; RV32ZBT: # %bb.0:
; RV32ZBT-NEXT: lw a2, 0(a1)
; RV32ZBT-NEXT: lw a3, 0(a1)
; RV32ZBT-NEXT: xor a4, a0, a2
; RV32ZBT-NEXT: cmov a0, a4, a2, a0
; RV32ZBT-NEXT: lw a2, 0(a1)
; RV32ZBT-NEXT: xor a4, a0, a3
; RV32ZBT-NEXT: cmov a0, a4, a0, a3
; RV32ZBT-NEXT: lw a3, 0(a1)
; RV32ZBT-NEXT: sltu a4, a2, a0
; RV32ZBT-NEXT: cmov a0, a4, a0, a2
; RV32ZBT-NEXT: lw a2, 0(a1)
; RV32ZBT-NEXT: sltu a4, a0, a3
; RV32ZBT-NEXT: cmov a0, a4, a3, a0
; RV32ZBT-NEXT: lw a3, 0(a1)
; RV32ZBT-NEXT: sltu a4, a0, a2
; RV32ZBT-NEXT: cmov a0, a4, a0, a2
; RV32ZBT-NEXT: lw a2, 0(a1)
; RV32ZBT-NEXT: sltu a4, a3, a0
; RV32ZBT-NEXT: cmov a0, a4, a3, a0
; RV32ZBT-NEXT: lw a3, 0(a1)
; RV32ZBT-NEXT: slt a4, a2, a0
; RV32ZBT-NEXT: cmov a0, a4, a0, a2
; RV32ZBT-NEXT: lw a2, 0(a1)
; RV32ZBT-NEXT: slt a4, a0, a3
; RV32ZBT-NEXT: cmov a0, a4, a3, a0
; RV32ZBT-NEXT: lw a3, 0(a1)
; RV32ZBT-NEXT: slt a4, a0, a2
; RV32ZBT-NEXT: lw a5, 0(a1)
; RV32ZBT-NEXT: cmov a0, a4, a0, a2
; RV32ZBT-NEXT: slt a2, a3, a0
; RV32ZBT-NEXT: cmov a0, a2, a3, a0
; RV32ZBT-NEXT: slti a2, a5, 1
; RV32ZBT-NEXT: lw a3, 0(a1)
; RV32ZBT-NEXT: cmov a0, a2, a0, a5
; RV32ZBT-NEXT: lw a2, 0(a1)
; RV32ZBT-NEXT: slti a4, a5, 0
; RV32ZBT-NEXT: cmov a0, a4, a3, a0
; RV32ZBT-NEXT: lw a1, 0(a1)
; RV32ZBT-NEXT: slti a3, a2, 1025
; RV32ZBT-NEXT: cmov a0, a3, a2, a0
; RV32ZBT-NEXT: sltiu a2, a5, 2047
; RV32ZBT-NEXT: cmov a0, a2, a1, a0
; RV32ZBT-NEXT: ret
;
; RV64I-LABEL: foo:
; RV64I: # %bb.0:
; RV64I-NEXT: lw a2, 0(a1)
@ -211,57 +161,6 @@ define signext i32 @foo(i32 signext %a, i32 *%b) nounwind {
; RV64I-NEXT: .LBB0_28:
; RV64I-NEXT: sext.w a0, a0
; RV64I-NEXT: ret
;
; RV64ZBT-LABEL: foo:
; RV64ZBT: # %bb.0:
; RV64ZBT-NEXT: lw a2, 0(a1)
; RV64ZBT-NEXT: lw a3, 0(a1)
; RV64ZBT-NEXT: xor a4, a0, a2
; RV64ZBT-NEXT: cmov a0, a4, a2, a0
; RV64ZBT-NEXT: lw a2, 0(a1)
; RV64ZBT-NEXT: xor a4, a0, a3
; RV64ZBT-NEXT: cmov a0, a4, a0, a3
; RV64ZBT-NEXT: lw a3, 0(a1)
; RV64ZBT-NEXT: sltu a4, a2, a0
; RV64ZBT-NEXT: cmov a0, a4, a0, a2
; RV64ZBT-NEXT: lw a2, 0(a1)
; RV64ZBT-NEXT: sltu a4, a0, a3
; RV64ZBT-NEXT: cmov a0, a4, a3, a0
; RV64ZBT-NEXT: lw a3, 0(a1)
; RV64ZBT-NEXT: sltu a4, a0, a2
; RV64ZBT-NEXT: cmov a0, a4, a0, a2
; RV64ZBT-NEXT: lw a2, 0(a1)
; RV64ZBT-NEXT: sltu a4, a3, a0
; RV64ZBT-NEXT: cmov a0, a4, a3, a0
; RV64ZBT-NEXT: sext.w a3, a0
; RV64ZBT-NEXT: slt a3, a2, a3
; RV64ZBT-NEXT: lw a4, 0(a1)
; RV64ZBT-NEXT: cmov a0, a3, a0, a2
; RV64ZBT-NEXT: sext.w a2, a0
; RV64ZBT-NEXT: lw a3, 0(a1)
; RV64ZBT-NEXT: slt a2, a2, a4
; RV64ZBT-NEXT: cmov a0, a2, a4, a0
; RV64ZBT-NEXT: sext.w a2, a0
; RV64ZBT-NEXT: slt a2, a2, a3
; RV64ZBT-NEXT: lw a4, 0(a1)
; RV64ZBT-NEXT: cmov a0, a2, a0, a3
; RV64ZBT-NEXT: lw a2, 0(a1)
; RV64ZBT-NEXT: sext.w a3, a0
; RV64ZBT-NEXT: slt a3, a4, a3
; RV64ZBT-NEXT: cmov a0, a3, a4, a0
; RV64ZBT-NEXT: slti a3, a2, 1
; RV64ZBT-NEXT: lw a4, 0(a1)
; RV64ZBT-NEXT: cmov a0, a3, a0, a2
; RV64ZBT-NEXT: lw a3, 0(a1)
; RV64ZBT-NEXT: slti a5, a2, 0
; RV64ZBT-NEXT: cmov a0, a5, a4, a0
; RV64ZBT-NEXT: lw a1, 0(a1)
; RV64ZBT-NEXT: slti a4, a3, 1025
; RV64ZBT-NEXT: cmov a0, a4, a3, a0
; RV64ZBT-NEXT: sltiu a2, a2, 2047
; RV64ZBT-NEXT: cmov a0, a2, a1, a0
; RV64ZBT-NEXT: sext.w a0, a0
; RV64ZBT-NEXT: ret
%val1 = load volatile i32, i32* %b
%tst1 = icmp eq i32 %a, %val1
%val2 = select i1 %tst1, i32 %a, i32 %val1
@ -344,23 +243,6 @@ define signext i16 @numsignbits(i16 signext %0, i16 signext %1, i16 signext %2,
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
; RV32ZBT-LABEL: numsignbits:
; RV32ZBT: # %bb.0:
; RV32ZBT-NEXT: addi sp, sp, -16
; RV32ZBT-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32ZBT-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
; RV32ZBT-NEXT: cmov s0, a0, a2, a3
; RV32ZBT-NEXT: beqz a1, .LBB1_2
; RV32ZBT-NEXT: # %bb.1:
; RV32ZBT-NEXT: mv a0, s0
; RV32ZBT-NEXT: call bar@plt
; RV32ZBT-NEXT: .LBB1_2:
; RV32ZBT-NEXT: mv a0, s0
; RV32ZBT-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32ZBT-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
; RV32ZBT-NEXT: addi sp, sp, 16
; RV32ZBT-NEXT: ret
;
; RV64I-LABEL: numsignbits:
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
@ -381,23 +263,6 @@ define signext i16 @numsignbits(i16 signext %0, i16 signext %1, i16 signext %2,
; RV64I-NEXT: ld s0, 0(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
; RV64ZBT-LABEL: numsignbits:
; RV64ZBT: # %bb.0:
; RV64ZBT-NEXT: addi sp, sp, -16
; RV64ZBT-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64ZBT-NEXT: sd s0, 0(sp) # 8-byte Folded Spill
; RV64ZBT-NEXT: cmov s0, a0, a2, a3
; RV64ZBT-NEXT: beqz a1, .LBB1_2
; RV64ZBT-NEXT: # %bb.1:
; RV64ZBT-NEXT: mv a0, s0
; RV64ZBT-NEXT: call bar@plt
; RV64ZBT-NEXT: .LBB1_2:
; RV64ZBT-NEXT: mv a0, s0
; RV64ZBT-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64ZBT-NEXT: ld s0, 0(sp) # 8-byte Folded Reload
; RV64ZBT-NEXT: addi sp, sp, 16
; RV64ZBT-NEXT: ret
%5 = icmp eq i16 %0, 0
%6 = select i1 %5, i16 %3, i16 %2
%7 = icmp eq i16 %1, 0
@ -425,14 +290,6 @@ define i32 @select_sge_int16min(i32 signext %x, i32 signext %y, i32 signext %z)
; RV32I-NEXT: mv a0, a1
; RV32I-NEXT: ret
;
; RV32ZBT-LABEL: select_sge_int16min:
; RV32ZBT: # %bb.0:
; RV32ZBT-NEXT: lui a3, 1048560
; RV32ZBT-NEXT: addi a3, a3, -1
; RV32ZBT-NEXT: slt a0, a3, a0
; RV32ZBT-NEXT: cmov a0, a0, a1, a2
; RV32ZBT-NEXT: ret
;
; RV64I-LABEL: select_sge_int16min:
; RV64I: # %bb.0:
; RV64I-NEXT: lui a3, 1048560
@ -443,14 +300,6 @@ define i32 @select_sge_int16min(i32 signext %x, i32 signext %y, i32 signext %z)
; RV64I-NEXT: .LBB2_2:
; RV64I-NEXT: mv a0, a1
; RV64I-NEXT: ret
;
; RV64ZBT-LABEL: select_sge_int16min:
; RV64ZBT: # %bb.0:
; RV64ZBT-NEXT: lui a3, 1048560
; RV64ZBT-NEXT: addiw a3, a3, -1
; RV64ZBT-NEXT: slt a0, a3, a0
; RV64ZBT-NEXT: cmov a0, a0, a1, a2
; RV64ZBT-NEXT: ret
%a = icmp sge i32 %x, -65536
%b = select i1 %a, i32 %y, i32 %z
ret i32 %b
@ -477,17 +326,6 @@ define i64 @select_sge_int32min(i64 %x, i64 %y, i64 %z) {
; RV32I-NEXT: mv a1, a3
; RV32I-NEXT: ret
;
; RV32ZBT-LABEL: select_sge_int32min:
; RV32ZBT: # %bb.0:
; RV32ZBT-NEXT: slti a0, a0, 0
; RV32ZBT-NEXT: addi a6, a1, 1
; RV32ZBT-NEXT: slti a1, a1, 0
; RV32ZBT-NEXT: xori a1, a1, 1
; RV32ZBT-NEXT: cmov a1, a6, a1, a0
; RV32ZBT-NEXT: cmov a0, a1, a2, a4
; RV32ZBT-NEXT: cmov a1, a1, a3, a5
; RV32ZBT-NEXT: ret
;
; RV64I-LABEL: select_sge_int32min:
; RV64I: # %bb.0:
; RV64I-NEXT: lui a3, 524288
@ -498,14 +336,6 @@ define i64 @select_sge_int32min(i64 %x, i64 %y, i64 %z) {
; RV64I-NEXT: .LBB3_2:
; RV64I-NEXT: mv a0, a1
; RV64I-NEXT: ret
;
; RV64ZBT-LABEL: select_sge_int32min:
; RV64ZBT: # %bb.0:
; RV64ZBT-NEXT: lui a3, 524288
; RV64ZBT-NEXT: addi a3, a3, -1
; RV64ZBT-NEXT: slt a0, a3, a0
; RV64ZBT-NEXT: cmov a0, a0, a1, a2
; RV64ZBT-NEXT: ret
%a = icmp sge i64 %x, -2147483648
%b = select i1 %a, i64 %y, i64 %z
ret i64 %b

View File

@ -3,18 +3,10 @@
; RUN: | FileCheck -check-prefixes=RV32,RV32I %s
; RUN: llc -mtriple=riscv32 -mattr=+f -target-abi=ilp32 -verify-machineinstrs < %s \
; RUN: | FileCheck -check-prefixes=RV32,RV32IF %s
; RUN: llc -mtriple=riscv32 -mattr=+experimental-zbt -target-abi=ilp32 -verify-machineinstrs < %s \
; RUN: | FileCheck -check-prefixes=RV32,RV32IBT %s
; RUN: llc -mtriple=riscv32 -mattr=+f,+experimental-zbt -target-abi=ilp32 -verify-machineinstrs < %s \
; RUN: | FileCheck -check-prefixes=RV32,RV32IFBT %s
; RUN: llc -mtriple=riscv64 -target-abi=lp64 -verify-machineinstrs < %s \
; RUN: | FileCheck -check-prefixes=RV64,RV64I %s
; RUN: llc -mtriple=riscv64 -mattr=+f,+d -target-abi=lp64 -verify-machineinstrs < %s \
; RUN: | FileCheck -check-prefixes=RV64,RV64IFD %s
; RUN: llc -mtriple=riscv64 -mattr=+experimental-zbt -target-abi=lp64 -verify-machineinstrs < %s \
; RUN: | FileCheck -check-prefixes=RV64,RV64IBT %s
; RUN: llc -mtriple=riscv64 -mattr=+f,+d,+experimental-zbt -target-abi=lp64 -verify-machineinstrs < %s \
; RUN: | FileCheck -check-prefixes=RV64,RV64IFDBT %s
;; This tests how good we are at materialising constants using `select`. The aim
;; is that we do so without a branch if possible (at the moment our lowering of
@ -67,73 +59,25 @@ define signext i32 @select_const_int_pow2_zero(i1 zeroext %a) nounwind {
}
define signext i32 @select_const_int_harder(i1 zeroext %a) nounwind {
; RV32I-LABEL: select_const_int_harder:
; RV32I: # %bb.0:
; RV32I-NEXT: mv a1, a0
; RV32I-NEXT: li a0, 6
; RV32I-NEXT: bnez a1, .LBB3_2
; RV32I-NEXT: # %bb.1:
; RV32I-NEXT: li a0, 38
; RV32I-NEXT: .LBB3_2:
; RV32I-NEXT: ret
; RV32-LABEL: select_const_int_harder:
; RV32: # %bb.0:
; RV32-NEXT: mv a1, a0
; RV32-NEXT: li a0, 6
; RV32-NEXT: bnez a1, .LBB3_2
; RV32-NEXT: # %bb.1:
; RV32-NEXT: li a0, 38
; RV32-NEXT: .LBB3_2:
; RV32-NEXT: ret
;
; RV32IF-LABEL: select_const_int_harder:
; RV32IF: # %bb.0:
; RV32IF-NEXT: mv a1, a0
; RV32IF-NEXT: li a0, 6
; RV32IF-NEXT: bnez a1, .LBB3_2
; RV32IF-NEXT: # %bb.1:
; RV32IF-NEXT: li a0, 38
; RV32IF-NEXT: .LBB3_2:
; RV32IF-NEXT: ret
;
; RV32IBT-LABEL: select_const_int_harder:
; RV32IBT: # %bb.0:
; RV32IBT-NEXT: li a1, 38
; RV32IBT-NEXT: li a2, 6
; RV32IBT-NEXT: cmov a0, a0, a2, a1
; RV32IBT-NEXT: ret
;
; RV32IFBT-LABEL: select_const_int_harder:
; RV32IFBT: # %bb.0:
; RV32IFBT-NEXT: li a1, 38
; RV32IFBT-NEXT: li a2, 6
; RV32IFBT-NEXT: cmov a0, a0, a2, a1
; RV32IFBT-NEXT: ret
;
; RV64I-LABEL: select_const_int_harder:
; RV64I: # %bb.0:
; RV64I-NEXT: mv a1, a0
; RV64I-NEXT: li a0, 6
; RV64I-NEXT: bnez a1, .LBB3_2
; RV64I-NEXT: # %bb.1:
; RV64I-NEXT: li a0, 38
; RV64I-NEXT: .LBB3_2:
; RV64I-NEXT: ret
;
; RV64IFD-LABEL: select_const_int_harder:
; RV64IFD: # %bb.0:
; RV64IFD-NEXT: mv a1, a0
; RV64IFD-NEXT: li a0, 6
; RV64IFD-NEXT: bnez a1, .LBB3_2
; RV64IFD-NEXT: # %bb.1:
; RV64IFD-NEXT: li a0, 38
; RV64IFD-NEXT: .LBB3_2:
; RV64IFD-NEXT: ret
;
; RV64IBT-LABEL: select_const_int_harder:
; RV64IBT: # %bb.0:
; RV64IBT-NEXT: li a1, 38
; RV64IBT-NEXT: li a2, 6
; RV64IBT-NEXT: cmov a0, a0, a2, a1
; RV64IBT-NEXT: ret
;
; RV64IFDBT-LABEL: select_const_int_harder:
; RV64IFDBT: # %bb.0:
; RV64IFDBT-NEXT: li a1, 38
; RV64IFDBT-NEXT: li a2, 6
; RV64IFDBT-NEXT: cmov a0, a0, a2, a1
; RV64IFDBT-NEXT: ret
; RV64-LABEL: select_const_int_harder:
; RV64: # %bb.0:
; RV64-NEXT: mv a1, a0
; RV64-NEXT: li a0, 6
; RV64-NEXT: bnez a1, .LBB3_2
; RV64-NEXT: # %bb.1:
; RV64-NEXT: li a0, 38
; RV64-NEXT: .LBB3_2:
; RV64-NEXT: ret
%1 = select i1 %a, i32 6, i32 38
ret i32 %1
}
@ -163,27 +107,6 @@ define float @select_const_fp(i1 zeroext %a) nounwind {
; RV32IF-NEXT: fmv.x.w a0, ft0
; RV32IF-NEXT: ret
;
; RV32IBT-LABEL: select_const_fp:
; RV32IBT: # %bb.0:
; RV32IBT-NEXT: lui a1, 264192
; RV32IBT-NEXT: lui a2, 263168
; RV32IBT-NEXT: cmov a0, a0, a2, a1
; RV32IBT-NEXT: ret
;
; RV32IFBT-LABEL: select_const_fp:
; RV32IFBT: # %bb.0:
; RV32IFBT-NEXT: bnez a0, .LBB4_2
; RV32IFBT-NEXT: # %bb.1:
; RV32IFBT-NEXT: lui a0, %hi(.LCPI4_0)
; RV32IFBT-NEXT: flw ft0, %lo(.LCPI4_0)(a0)
; RV32IFBT-NEXT: fmv.x.w a0, ft0
; RV32IFBT-NEXT: ret
; RV32IFBT-NEXT: .LBB4_2:
; RV32IFBT-NEXT: lui a0, %hi(.LCPI4_1)
; RV32IFBT-NEXT: flw ft0, %lo(.LCPI4_1)(a0)
; RV32IFBT-NEXT: fmv.x.w a0, ft0
; RV32IFBT-NEXT: ret
;
; RV64I-LABEL: select_const_fp:
; RV64I: # %bb.0:
; RV64I-NEXT: mv a1, a0
@ -207,27 +130,6 @@ define float @select_const_fp(i1 zeroext %a) nounwind {
; RV64IFD-NEXT: flw ft0, %lo(.LCPI4_1)(a0)
; RV64IFD-NEXT: fmv.x.w a0, ft0
; RV64IFD-NEXT: ret
;
; RV64IBT-LABEL: select_const_fp:
; RV64IBT: # %bb.0:
; RV64IBT-NEXT: lui a1, 264192
; RV64IBT-NEXT: lui a2, 263168
; RV64IBT-NEXT: cmov a0, a0, a2, a1
; RV64IBT-NEXT: ret
;
; RV64IFDBT-LABEL: select_const_fp:
; RV64IFDBT: # %bb.0:
; RV64IFDBT-NEXT: bnez a0, .LBB4_2
; RV64IFDBT-NEXT: # %bb.1:
; RV64IFDBT-NEXT: lui a0, %hi(.LCPI4_0)
; RV64IFDBT-NEXT: flw ft0, %lo(.LCPI4_0)(a0)
; RV64IFDBT-NEXT: fmv.x.w a0, ft0
; RV64IFDBT-NEXT: ret
; RV64IFDBT-NEXT: .LBB4_2:
; RV64IFDBT-NEXT: lui a0, %hi(.LCPI4_1)
; RV64IFDBT-NEXT: flw ft0, %lo(.LCPI4_1)(a0)
; RV64IFDBT-NEXT: fmv.x.w a0, ft0
; RV64IFDBT-NEXT: ret
%1 = select i1 %a, float 3.0, float 4.0
ret float %1
}

View File

@ -1,12 +1,8 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=riscv32 -mattr=+d -target-abi=ilp32 -verify-machineinstrs < %s \
; RUN: | FileCheck %s -check-prefix=RV32I
; RUN: llc -mtriple=riscv32 -mattr=+d,+experimental-zbt -target-abi=ilp32 -verify-machineinstrs < %s \
; RUN: | FileCheck %s -check-prefix=RV32IBT
; RUN: llc -mtriple=riscv64 -mattr=+d -target-abi=lp64 -verify-machineinstrs < %s \
; RUN: | FileCheck %s -check-prefix=RV64I
; RUN: llc -mtriple=riscv64 -mattr=+d,+experimental-zbt -target-abi=lp64 -verify-machineinstrs < %s \
; RUN: | FileCheck %s -check-prefix=RV64IBT
; Selects of wide values are split into two selects, which can easily cause
; unnecessary control flow. Here we check some cases where we can currently
@ -25,13 +21,6 @@ define i64 @cmovcc64(i32 signext %a, i64 %b, i64 %c) nounwind {
; RV32I-NEXT: mv a1, a2
; RV32I-NEXT: ret
;
; RV32IBT-LABEL: cmovcc64:
; RV32IBT: # %bb.0: # %entry
; RV32IBT-NEXT: addi a5, a0, -123
; RV32IBT-NEXT: cmov a0, a5, a3, a1
; RV32IBT-NEXT: cmov a1, a5, a4, a2
; RV32IBT-NEXT: ret
;
; RV64I-LABEL: cmovcc64:
; RV64I: # %bb.0: # %entry
; RV64I-NEXT: li a3, 123
@ -41,12 +30,6 @@ define i64 @cmovcc64(i32 signext %a, i64 %b, i64 %c) nounwind {
; RV64I-NEXT: .LBB0_2: # %entry
; RV64I-NEXT: mv a0, a1
; RV64I-NEXT: ret
;
; RV64IBT-LABEL: cmovcc64:
; RV64IBT: # %bb.0: # %entry
; RV64IBT-NEXT: addi a0, a0, -123
; RV64IBT-NEXT: cmov a0, a0, a2, a1
; RV64IBT-NEXT: ret
entry:
%cmp = icmp eq i32 %a, 123
%cond = select i1 %cmp, i64 %b, i64 %c
@ -93,30 +76,6 @@ define i128 @cmovcc128(i64 signext %a, i128 %b, i128 %c) nounwind {
; RV32I-NEXT: sw a2, 0(a0)
; RV32I-NEXT: ret
;
; RV32IBT-LABEL: cmovcc128:
; RV32IBT: # %bb.0: # %entry
; RV32IBT-NEXT: addi a5, a3, 12
; RV32IBT-NEXT: addi a6, a4, 12
; RV32IBT-NEXT: addi a7, a3, 8
; RV32IBT-NEXT: addi t0, a4, 8
; RV32IBT-NEXT: addi t1, a3, 4
; RV32IBT-NEXT: addi t2, a4, 4
; RV32IBT-NEXT: xori a1, a1, 123
; RV32IBT-NEXT: or a1, a1, a2
; RV32IBT-NEXT: cmov a2, a1, a4, a3
; RV32IBT-NEXT: cmov a3, a1, t2, t1
; RV32IBT-NEXT: cmov a4, a1, t0, a7
; RV32IBT-NEXT: cmov a1, a1, a6, a5
; RV32IBT-NEXT: lw a1, 0(a1)
; RV32IBT-NEXT: lw a4, 0(a4)
; RV32IBT-NEXT: lw a3, 0(a3)
; RV32IBT-NEXT: lw a2, 0(a2)
; RV32IBT-NEXT: sw a1, 12(a0)
; RV32IBT-NEXT: sw a4, 8(a0)
; RV32IBT-NEXT: sw a3, 4(a0)
; RV32IBT-NEXT: sw a2, 0(a0)
; RV32IBT-NEXT: ret
;
; RV64I-LABEL: cmovcc128:
; RV64I: # %bb.0: # %entry
; RV64I-NEXT: li a5, 123
@ -128,13 +87,6 @@ define i128 @cmovcc128(i64 signext %a, i128 %b, i128 %c) nounwind {
; RV64I-NEXT: mv a0, a1
; RV64I-NEXT: mv a1, a2
; RV64I-NEXT: ret
;
; RV64IBT-LABEL: cmovcc128:
; RV64IBT: # %bb.0: # %entry
; RV64IBT-NEXT: addi a5, a0, -123
; RV64IBT-NEXT: cmov a0, a5, a3, a1
; RV64IBT-NEXT: cmov a1, a5, a4, a2
; RV64IBT-NEXT: ret
entry:
%cmp = icmp eq i64 %a, 123
%cond = select i1 %cmp, i128 %b, i128 %c
@ -154,13 +106,6 @@ define i64 @cmov64(i1 %a, i64 %b, i64 %c) nounwind {
; RV32I-NEXT: mv a1, a2
; RV32I-NEXT: ret
;
; RV32IBT-LABEL: cmov64:
; RV32IBT: # %bb.0: # %entry
; RV32IBT-NEXT: andi a5, a0, 1
; RV32IBT-NEXT: cmov a0, a5, a1, a3
; RV32IBT-NEXT: cmov a1, a5, a2, a4
; RV32IBT-NEXT: ret
;
; RV64I-LABEL: cmov64:
; RV64I: # %bb.0: # %entry
; RV64I-NEXT: andi a3, a0, 1
@ -170,12 +115,6 @@ define i64 @cmov64(i1 %a, i64 %b, i64 %c) nounwind {
; RV64I-NEXT: mv a0, a2
; RV64I-NEXT: .LBB2_2: # %entry
; RV64I-NEXT: ret
;
; RV64IBT-LABEL: cmov64:
; RV64IBT: # %bb.0: # %entry
; RV64IBT-NEXT: andi a0, a0, 1
; RV64IBT-NEXT: cmov a0, a0, a1, a2
; RV64IBT-NEXT: ret
entry:
%cond = select i1 %a, i64 %b, i64 %c
ret i64 %cond
@ -220,29 +159,6 @@ define i128 @cmov128(i1 %a, i128 %b, i128 %c) nounwind {
; RV32I-NEXT: sw a4, 0(a0)
; RV32I-NEXT: ret
;
; RV32IBT-LABEL: cmov128:
; RV32IBT: # %bb.0: # %entry
; RV32IBT-NEXT: addi a4, a3, 12
; RV32IBT-NEXT: addi a5, a2, 12
; RV32IBT-NEXT: addi a6, a3, 8
; RV32IBT-NEXT: addi a7, a2, 8
; RV32IBT-NEXT: addi t0, a3, 4
; RV32IBT-NEXT: addi t1, a2, 4
; RV32IBT-NEXT: andi a1, a1, 1
; RV32IBT-NEXT: cmov a2, a1, a2, a3
; RV32IBT-NEXT: cmov a3, a1, t1, t0
; RV32IBT-NEXT: cmov a6, a1, a7, a6
; RV32IBT-NEXT: cmov a1, a1, a5, a4
; RV32IBT-NEXT: lw a1, 0(a1)
; RV32IBT-NEXT: lw a4, 0(a6)
; RV32IBT-NEXT: lw a3, 0(a3)
; RV32IBT-NEXT: lw a2, 0(a2)
; RV32IBT-NEXT: sw a1, 12(a0)
; RV32IBT-NEXT: sw a4, 8(a0)
; RV32IBT-NEXT: sw a3, 4(a0)
; RV32IBT-NEXT: sw a2, 0(a0)
; RV32IBT-NEXT: ret
;
; RV64I-LABEL: cmov128:
; RV64I: # %bb.0: # %entry
; RV64I-NEXT: andi a5, a0, 1
@ -254,13 +170,6 @@ define i128 @cmov128(i1 %a, i128 %b, i128 %c) nounwind {
; RV64I-NEXT: .LBB3_2: # %entry
; RV64I-NEXT: mv a1, a2
; RV64I-NEXT: ret
;
; RV64IBT-LABEL: cmov128:
; RV64IBT: # %bb.0: # %entry
; RV64IBT-NEXT: andi a5, a0, 1
; RV64IBT-NEXT: cmov a0, a5, a1, a3
; RV64IBT-NEXT: cmov a1, a5, a2, a4
; RV64IBT-NEXT: ret
entry:
%cond = select i1 %a, i128 %b, i128 %c
ret i128 %cond
@ -283,22 +192,6 @@ define float @cmovfloat(i1 %a, float %b, float %c, float %d, float %e) nounwind
; RV32I-NEXT: fmv.x.w a0, ft0
; RV32I-NEXT: ret
;
; RV32IBT-LABEL: cmovfloat:
; RV32IBT: # %bb.0: # %entry
; RV32IBT-NEXT: andi a0, a0, 1
; RV32IBT-NEXT: bnez a0, .LBB4_2
; RV32IBT-NEXT: # %bb.1: # %entry
; RV32IBT-NEXT: fmv.w.x ft0, a4
; RV32IBT-NEXT: fmv.w.x ft1, a2
; RV32IBT-NEXT: j .LBB4_3
; RV32IBT-NEXT: .LBB4_2:
; RV32IBT-NEXT: fmv.w.x ft0, a3
; RV32IBT-NEXT: fmv.w.x ft1, a1
; RV32IBT-NEXT: .LBB4_3: # %entry
; RV32IBT-NEXT: fadd.s ft0, ft1, ft0
; RV32IBT-NEXT: fmv.x.w a0, ft0
; RV32IBT-NEXT: ret
;
; RV64I-LABEL: cmovfloat:
; RV64I: # %bb.0: # %entry
; RV64I-NEXT: andi a0, a0, 1
@ -314,22 +207,6 @@ define float @cmovfloat(i1 %a, float %b, float %c, float %d, float %e) nounwind
; RV64I-NEXT: fadd.s ft0, ft1, ft0
; RV64I-NEXT: fmv.x.w a0, ft0
; RV64I-NEXT: ret
;
; RV64IBT-LABEL: cmovfloat:
; RV64IBT: # %bb.0: # %entry
; RV64IBT-NEXT: andi a0, a0, 1
; RV64IBT-NEXT: bnez a0, .LBB4_2
; RV64IBT-NEXT: # %bb.1: # %entry
; RV64IBT-NEXT: fmv.w.x ft0, a4
; RV64IBT-NEXT: fmv.w.x ft1, a2
; RV64IBT-NEXT: j .LBB4_3
; RV64IBT-NEXT: .LBB4_2:
; RV64IBT-NEXT: fmv.w.x ft0, a3
; RV64IBT-NEXT: fmv.w.x ft1, a1
; RV64IBT-NEXT: .LBB4_3: # %entry
; RV64IBT-NEXT: fadd.s ft0, ft1, ft0
; RV64IBT-NEXT: fmv.x.w a0, ft0
; RV64IBT-NEXT: ret
entry:
%cond1 = select i1 %a, float %b, float %c
%cond2 = select i1 %a, float %d, float %e
@ -357,25 +234,6 @@ define double @cmovdouble(i1 %a, double %b, double %c) nounwind {
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
; RV32IBT-LABEL: cmovdouble:
; RV32IBT: # %bb.0: # %entry
; RV32IBT-NEXT: addi sp, sp, -16
; RV32IBT-NEXT: sw a3, 8(sp)
; RV32IBT-NEXT: sw a4, 12(sp)
; RV32IBT-NEXT: fld ft0, 8(sp)
; RV32IBT-NEXT: sw a1, 8(sp)
; RV32IBT-NEXT: andi a0, a0, 1
; RV32IBT-NEXT: sw a2, 12(sp)
; RV32IBT-NEXT: beqz a0, .LBB5_2
; RV32IBT-NEXT: # %bb.1:
; RV32IBT-NEXT: fld ft0, 8(sp)
; RV32IBT-NEXT: .LBB5_2: # %entry
; RV32IBT-NEXT: fsd ft0, 8(sp)
; RV32IBT-NEXT: lw a0, 8(sp)
; RV32IBT-NEXT: lw a1, 12(sp)
; RV32IBT-NEXT: addi sp, sp, 16
; RV32IBT-NEXT: ret
;
; RV64I-LABEL: cmovdouble:
; RV64I: # %bb.0: # %entry
; RV64I-NEXT: andi a0, a0, 1
@ -388,19 +246,6 @@ define double @cmovdouble(i1 %a, double %b, double %c) nounwind {
; RV64I-NEXT: fmv.d.x ft0, a1
; RV64I-NEXT: fmv.x.d a0, ft0
; RV64I-NEXT: ret
;
; RV64IBT-LABEL: cmovdouble:
; RV64IBT: # %bb.0: # %entry
; RV64IBT-NEXT: andi a0, a0, 1
; RV64IBT-NEXT: bnez a0, .LBB5_2
; RV64IBT-NEXT: # %bb.1: # %entry
; RV64IBT-NEXT: fmv.d.x ft0, a2
; RV64IBT-NEXT: fmv.x.d a0, ft0
; RV64IBT-NEXT: ret
; RV64IBT-NEXT: .LBB5_2:
; RV64IBT-NEXT: fmv.d.x ft0, a1
; RV64IBT-NEXT: fmv.x.d a0, ft0
; RV64IBT-NEXT: ret
entry:
%cond = select i1 %a, double %b, double %c
ret double %cond
@ -429,14 +274,6 @@ define i32 @cmovccdep(i32 signext %a, i32 %b, i32 %c, i32 %d) nounwind {
; RV32I-NEXT: add a0, a1, a2
; RV32I-NEXT: ret
;
; RV32IBT-LABEL: cmovccdep:
; RV32IBT: # %bb.0: # %entry
; RV32IBT-NEXT: addi a0, a0, -123
; RV32IBT-NEXT: cmov a1, a0, a2, a1
; RV32IBT-NEXT: cmov a0, a0, a3, a1
; RV32IBT-NEXT: add a0, a1, a0
; RV32IBT-NEXT: ret
;
; RV64I-LABEL: cmovccdep:
; RV64I: # %bb.0: # %entry
; RV64I-NEXT: li a4, 123
@ -455,14 +292,6 @@ define i32 @cmovccdep(i32 signext %a, i32 %b, i32 %c, i32 %d) nounwind {
; RV64I-NEXT: mv a2, a3
; RV64I-NEXT: addw a0, a1, a2
; RV64I-NEXT: ret
;
; RV64IBT-LABEL: cmovccdep:
; RV64IBT: # %bb.0: # %entry
; RV64IBT-NEXT: addi a0, a0, -123
; RV64IBT-NEXT: cmov a1, a0, a2, a1
; RV64IBT-NEXT: cmov a0, a0, a3, a1
; RV64IBT-NEXT: addw a0, a1, a0
; RV64IBT-NEXT: ret
entry:
%cmp = icmp eq i32 %a, 123
%cond1 = select i1 %cmp, i32 %b, i32 %c
@ -492,15 +321,6 @@ define i32 @cmovdiffcc(i1 %a, i1 %b, i32 %c, i32 %d, i32 %e, i32 %f) nounwind {
; RV32I-NEXT: add a0, a2, a4
; RV32I-NEXT: ret
;
; RV32IBT-LABEL: cmovdiffcc:
; RV32IBT: # %bb.0: # %entry
; RV32IBT-NEXT: andi a1, a1, 1
; RV32IBT-NEXT: andi a0, a0, 1
; RV32IBT-NEXT: cmov a0, a0, a2, a3
; RV32IBT-NEXT: cmov a1, a1, a4, a5
; RV32IBT-NEXT: add a0, a0, a1
; RV32IBT-NEXT: ret
;
; RV64I-LABEL: cmovdiffcc:
; RV64I: # %bb.0: # %entry
; RV64I-NEXT: andi a6, a0, 1
@ -518,15 +338,6 @@ define i32 @cmovdiffcc(i1 %a, i1 %b, i32 %c, i32 %d, i32 %e, i32 %f) nounwind {
; RV64I-NEXT: mv a4, a5
; RV64I-NEXT: addw a0, a2, a4
; RV64I-NEXT: ret
;
; RV64IBT-LABEL: cmovdiffcc:
; RV64IBT: # %bb.0: # %entry
; RV64IBT-NEXT: andi a1, a1, 1
; RV64IBT-NEXT: andi a0, a0, 1
; RV64IBT-NEXT: cmov a0, a0, a2, a3
; RV64IBT-NEXT: cmov a1, a1, a4, a5
; RV64IBT-NEXT: addw a0, a0, a1
; RV64IBT-NEXT: ret
entry:
%cond1 = select i1 %a, i32 %c, i32 %d
%cond2 = select i1 %b, i32 %e, i32 %f
@ -552,23 +363,6 @@ define float @CascadedSelect(float noundef %a) {
; RV32I-NEXT: fmv.x.w a0, ft0
; RV32I-NEXT: ret
;
; RV32IBT-LABEL: CascadedSelect:
; RV32IBT: # %bb.0: # %entry
; RV32IBT-NEXT: lui a1, %hi(.LCPI8_0)
; RV32IBT-NEXT: flw ft0, %lo(.LCPI8_0)(a1)
; RV32IBT-NEXT: fmv.w.x ft1, a0
; RV32IBT-NEXT: flt.s a0, ft0, ft1
; RV32IBT-NEXT: bnez a0, .LBB8_3
; RV32IBT-NEXT: # %bb.1: # %entry
; RV32IBT-NEXT: fmv.w.x ft0, zero
; RV32IBT-NEXT: flt.s a0, ft1, ft0
; RV32IBT-NEXT: bnez a0, .LBB8_3
; RV32IBT-NEXT: # %bb.2: # %entry
; RV32IBT-NEXT: fmv.s ft0, ft1
; RV32IBT-NEXT: .LBB8_3: # %entry
; RV32IBT-NEXT: fmv.x.w a0, ft0
; RV32IBT-NEXT: ret
;
; RV64I-LABEL: CascadedSelect:
; RV64I: # %bb.0: # %entry
; RV64I-NEXT: lui a1, %hi(.LCPI8_0)
@ -585,23 +379,6 @@ define float @CascadedSelect(float noundef %a) {
; RV64I-NEXT: .LBB8_3: # %entry
; RV64I-NEXT: fmv.x.w a0, ft0
; RV64I-NEXT: ret
;
; RV64IBT-LABEL: CascadedSelect:
; RV64IBT: # %bb.0: # %entry
; RV64IBT-NEXT: lui a1, %hi(.LCPI8_0)
; RV64IBT-NEXT: flw ft0, %lo(.LCPI8_0)(a1)
; RV64IBT-NEXT: fmv.w.x ft1, a0
; RV64IBT-NEXT: flt.s a0, ft0, ft1
; RV64IBT-NEXT: bnez a0, .LBB8_3
; RV64IBT-NEXT: # %bb.1: # %entry
; RV64IBT-NEXT: fmv.w.x ft0, zero
; RV64IBT-NEXT: flt.s a0, ft1, ft0
; RV64IBT-NEXT: bnez a0, .LBB8_3
; RV64IBT-NEXT: # %bb.2: # %entry
; RV64IBT-NEXT: fmv.s ft0, ft1
; RV64IBT-NEXT: .LBB8_3: # %entry
; RV64IBT-NEXT: fmv.x.w a0, ft0
; RV64IBT-NEXT: ret
entry:
%cmp = fcmp ogt float %a, 1.000000e+00
%cmp1 = fcmp olt float %a, 0.000000e+00

View File

@ -1,12 +1,8 @@
# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
# RUN: llc -mtriple=riscv32 -run-pass=finalize-isel -simplify-mir -o - %s \
# RUN: | FileCheck -check-prefix=RV32I %s
# RUN: llc -mtriple=riscv32 -mattr=+experimental-zbt -run-pass=finalize-isel -simplify-mir -o - %s \
# RUN: | FileCheck -check-prefix=RV32IBT %s
# RUN: llc -mtriple=riscv64 -run-pass=finalize-isel -simplify-mir -o - %s \
# RUN: | FileCheck -check-prefix=RV64I %s
# RUN: llc -mtriple=riscv64 -mattr=+experimental-zbt -run-pass=finalize-isel -simplify-mir -o - %s \
# RUN: | FileCheck -check-prefix=RV64IBT %s
# Provide dummy definitions of functions and just enough metadata to create a
# DBG_VALUE.
@ -74,34 +70,6 @@ body: |
; RV32I-NEXT: [[ADD:%[0-9]+]]:gpr = ADD [[PHI]], killed [[PHI1]]
; RV32I-NEXT: $x10 = COPY [[ADD]]
; RV32I-NEXT: PseudoRET implicit $x10
; RV32IBT-LABEL: name: cmov_interleaved_bad
; RV32IBT: successors: %bb.1, %bb.2
; RV32IBT-NEXT: liveins: $x10, $x11, $x12, $x13
; RV32IBT-NEXT: {{ $}}
; RV32IBT-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x13
; RV32IBT-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x12
; RV32IBT-NEXT: [[COPY2:%[0-9]+]]:gpr = COPY $x11
; RV32IBT-NEXT: [[COPY3:%[0-9]+]]:gpr = COPY $x10
; RV32IBT-NEXT: [[ANDI:%[0-9]+]]:gpr = ANDI [[COPY3]], 1
; RV32IBT-NEXT: [[COPY4:%[0-9]+]]:gpr = COPY $x0
; RV32IBT-NEXT: BNE [[ANDI]], [[COPY4]], %bb.2
; RV32IBT-NEXT: {{ $}}
; RV32IBT-NEXT: .1:
; RV32IBT-NEXT: {{ $}}
; RV32IBT-NEXT: .2:
; RV32IBT-NEXT: successors: %bb.3, %bb.4
; RV32IBT-NEXT: {{ $}}
; RV32IBT-NEXT: [[PHI:%[0-9]+]]:gpr = PHI [[COPY2]], %bb.0, [[COPY1]], %bb.1
; RV32IBT-NEXT: [[ADDI:%[0-9]+]]:gpr = ADDI [[PHI]], 1
; RV32IBT-NEXT: BNE [[ANDI]], [[COPY4]], %bb.4
; RV32IBT-NEXT: {{ $}}
; RV32IBT-NEXT: .3:
; RV32IBT-NEXT: {{ $}}
; RV32IBT-NEXT: .4:
; RV32IBT-NEXT: [[PHI1:%[0-9]+]]:gpr = PHI [[COPY]], %bb.2, [[COPY1]], %bb.3
; RV32IBT-NEXT: [[ADD:%[0-9]+]]:gpr = ADD [[PHI]], killed [[PHI1]]
; RV32IBT-NEXT: $x10 = COPY [[ADD]]
; RV32IBT-NEXT: PseudoRET implicit $x10
; RV64I-LABEL: name: cmov_interleaved_bad
; RV64I: successors: %bb.1, %bb.2
; RV64I-NEXT: liveins: $x10, $x11, $x12, $x13
@ -130,34 +98,6 @@ body: |
; RV64I-NEXT: [[ADD:%[0-9]+]]:gpr = ADD [[PHI]], killed [[PHI1]]
; RV64I-NEXT: $x10 = COPY [[ADD]]
; RV64I-NEXT: PseudoRET implicit $x10
; RV64IBT-LABEL: name: cmov_interleaved_bad
; RV64IBT: successors: %bb.1, %bb.2
; RV64IBT-NEXT: liveins: $x10, $x11, $x12, $x13
; RV64IBT-NEXT: {{ $}}
; RV64IBT-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x13
; RV64IBT-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x12
; RV64IBT-NEXT: [[COPY2:%[0-9]+]]:gpr = COPY $x11
; RV64IBT-NEXT: [[COPY3:%[0-9]+]]:gpr = COPY $x10
; RV64IBT-NEXT: [[ANDI:%[0-9]+]]:gpr = ANDI [[COPY3]], 1
; RV64IBT-NEXT: [[COPY4:%[0-9]+]]:gpr = COPY $x0
; RV64IBT-NEXT: BNE [[ANDI]], [[COPY4]], %bb.2
; RV64IBT-NEXT: {{ $}}
; RV64IBT-NEXT: .1:
; RV64IBT-NEXT: {{ $}}
; RV64IBT-NEXT: .2:
; RV64IBT-NEXT: successors: %bb.3, %bb.4
; RV64IBT-NEXT: {{ $}}
; RV64IBT-NEXT: [[PHI:%[0-9]+]]:gpr = PHI [[COPY2]], %bb.0, [[COPY1]], %bb.1
; RV64IBT-NEXT: [[ADDI:%[0-9]+]]:gpr = ADDI [[PHI]], 1
; RV64IBT-NEXT: BNE [[ANDI]], [[COPY4]], %bb.4
; RV64IBT-NEXT: {{ $}}
; RV64IBT-NEXT: .3:
; RV64IBT-NEXT: {{ $}}
; RV64IBT-NEXT: .4:
; RV64IBT-NEXT: [[PHI1:%[0-9]+]]:gpr = PHI [[COPY]], %bb.2, [[COPY1]], %bb.3
; RV64IBT-NEXT: [[ADD:%[0-9]+]]:gpr = ADD [[PHI]], killed [[PHI1]]
; RV64IBT-NEXT: $x10 = COPY [[ADD]]
; RV64IBT-NEXT: PseudoRET implicit $x10
%3:gpr = COPY $x13
%2:gpr = COPY $x12
%1:gpr = COPY $x11
@ -224,30 +164,6 @@ body: |
; RV32I-NEXT: [[ADD:%[0-9]+]]:gpr = ADD [[PHI]], killed [[PHI1]]
; RV32I-NEXT: $x10 = COPY [[ADD]]
; RV32I-NEXT: PseudoRET implicit $x10
; RV32IBT-LABEL: name: cmov_interleaved_debug_value
; RV32IBT: successors: %bb.1, %bb.2
; RV32IBT-NEXT: liveins: $x10, $x11, $x12, $x13
; RV32IBT-NEXT: {{ $}}
; RV32IBT-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x13
; RV32IBT-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x12
; RV32IBT-NEXT: [[COPY2:%[0-9]+]]:gpr = COPY $x11
; RV32IBT-NEXT: [[COPY3:%[0-9]+]]:gpr = COPY $x10
; RV32IBT-NEXT: [[ANDI:%[0-9]+]]:gpr = ANDI [[COPY3]], 1
; RV32IBT-NEXT: [[COPY4:%[0-9]+]]:gpr = COPY $x0
; RV32IBT-NEXT: [[ADDI:%[0-9]+]]:gpr = ADDI [[COPY3]], 1
; RV32IBT-NEXT: DBG_VALUE [[ADDI]], $noreg
; RV32IBT-NEXT: BNE [[ANDI]], [[COPY4]], %bb.2
; RV32IBT-NEXT: {{ $}}
; RV32IBT-NEXT: .1:
; RV32IBT-NEXT: {{ $}}
; RV32IBT-NEXT: .2:
; RV32IBT-NEXT: [[PHI:%[0-9]+]]:gpr = PHI [[COPY2]], %bb.0, [[COPY1]], %bb.1
; RV32IBT-NEXT: [[PHI1:%[0-9]+]]:gpr = PHI [[COPY]], %bb.0, [[COPY1]], %bb.1
; RV32IBT-NEXT: DBG_VALUE [[PHI]], $noreg
; RV32IBT-NEXT: DBG_VALUE [[PHI1]], $noreg
; RV32IBT-NEXT: [[ADD:%[0-9]+]]:gpr = ADD [[PHI]], killed [[PHI1]]
; RV32IBT-NEXT: $x10 = COPY [[ADD]]
; RV32IBT-NEXT: PseudoRET implicit $x10
; RV64I-LABEL: name: cmov_interleaved_debug_value
; RV64I: successors: %bb.1, %bb.2
; RV64I-NEXT: liveins: $x10, $x11, $x12, $x13
@ -272,30 +188,6 @@ body: |
; RV64I-NEXT: [[ADD:%[0-9]+]]:gpr = ADD [[PHI]], killed [[PHI1]]
; RV64I-NEXT: $x10 = COPY [[ADD]]
; RV64I-NEXT: PseudoRET implicit $x10
; RV64IBT-LABEL: name: cmov_interleaved_debug_value
; RV64IBT: successors: %bb.1, %bb.2
; RV64IBT-NEXT: liveins: $x10, $x11, $x12, $x13
; RV64IBT-NEXT: {{ $}}
; RV64IBT-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x13
; RV64IBT-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x12
; RV64IBT-NEXT: [[COPY2:%[0-9]+]]:gpr = COPY $x11
; RV64IBT-NEXT: [[COPY3:%[0-9]+]]:gpr = COPY $x10
; RV64IBT-NEXT: [[ANDI:%[0-9]+]]:gpr = ANDI [[COPY3]], 1
; RV64IBT-NEXT: [[COPY4:%[0-9]+]]:gpr = COPY $x0
; RV64IBT-NEXT: [[ADDI:%[0-9]+]]:gpr = ADDI [[COPY3]], 1
; RV64IBT-NEXT: DBG_VALUE [[ADDI]], $noreg
; RV64IBT-NEXT: BNE [[ANDI]], [[COPY4]], %bb.2
; RV64IBT-NEXT: {{ $}}
; RV64IBT-NEXT: .1:
; RV64IBT-NEXT: {{ $}}
; RV64IBT-NEXT: .2:
; RV64IBT-NEXT: [[PHI:%[0-9]+]]:gpr = PHI [[COPY2]], %bb.0, [[COPY1]], %bb.1
; RV64IBT-NEXT: [[PHI1:%[0-9]+]]:gpr = PHI [[COPY]], %bb.0, [[COPY1]], %bb.1
; RV64IBT-NEXT: DBG_VALUE [[PHI]], $noreg
; RV64IBT-NEXT: DBG_VALUE [[PHI1]], $noreg
; RV64IBT-NEXT: [[ADD:%[0-9]+]]:gpr = ADD [[PHI]], killed [[PHI1]]
; RV64IBT-NEXT: $x10 = COPY [[ADD]]
; RV64IBT-NEXT: PseudoRET implicit $x10
%3:gpr = COPY $x13
%2:gpr = COPY $x12
%1:gpr = COPY $x11

View File

@ -1,12 +1,8 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=riscv32 -verify-machineinstrs < %s \
; RUN: | FileCheck -check-prefix=RV32I %s
; RUN: llc -mtriple=riscv32 -mattr=+experimental-zbt -verify-machineinstrs < %s \
; RUN: | FileCheck -check-prefix=RV32IBT %s
; RUN: llc -mtriple=riscv64 -verify-machineinstrs < %s \
; RUN: | FileCheck -check-prefix=RV64I %s
; RUN: llc -mtriple=riscv64 -mattr=+experimental-zbt -verify-machineinstrs < %s \
; RUN: | FileCheck -check-prefix=RV64IBT %s
;; There are a few different ways to lower (select (or A, B), X, Y). This test
;; ensures that we do so with as few branches as possible.
@ -22,12 +18,6 @@ define signext i32 @select_of_or(i1 zeroext %a, i1 zeroext %b, i32 signext %c, i
; RV32I-NEXT: .LBB0_2:
; RV32I-NEXT: ret
;
; RV32IBT-LABEL: select_of_or:
; RV32IBT: # %bb.0:
; RV32IBT-NEXT: or a0, a0, a1
; RV32IBT-NEXT: cmov a0, a0, a2, a3
; RV32IBT-NEXT: ret
;
; RV64I-LABEL: select_of_or:
; RV64I: # %bb.0:
; RV64I-NEXT: or a1, a0, a1
@ -37,12 +27,6 @@ define signext i32 @select_of_or(i1 zeroext %a, i1 zeroext %b, i32 signext %c, i
; RV64I-NEXT: mv a0, a3
; RV64I-NEXT: .LBB0_2:
; RV64I-NEXT: ret
;
; RV64IBT-LABEL: select_of_or:
; RV64IBT: # %bb.0:
; RV64IBT-NEXT: or a0, a0, a1
; RV64IBT-NEXT: cmov a0, a0, a2, a3
; RV64IBT-NEXT: ret
%1 = or i1 %a, %b
%2 = select i1 %1, i32 %c, i32 %d
ret i32 %2
@ -68,22 +52,6 @@ define signext i32 @if_of_or(i1 zeroext %a, i1 zeroext %b) nounwind {
; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
; RV32IBT-LABEL: if_of_or:
; RV32IBT: # %bb.0:
; RV32IBT-NEXT: addi sp, sp, -16
; RV32IBT-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IBT-NEXT: or a0, a0, a1
; RV32IBT-NEXT: beqz a0, .LBB1_2
; RV32IBT-NEXT: # %bb.1: # %if.then
; RV32IBT-NEXT: call either@plt
; RV32IBT-NEXT: j .LBB1_3
; RV32IBT-NEXT: .LBB1_2: # %if.else
; RV32IBT-NEXT: call neither@plt
; RV32IBT-NEXT: .LBB1_3: # %if.end
; RV32IBT-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32IBT-NEXT: addi sp, sp, 16
; RV32IBT-NEXT: ret
;
; RV64I-LABEL: if_of_or:
; RV64I: # %bb.0:
; RV64I-NEXT: addi sp, sp, -16
@ -99,22 +67,6 @@ define signext i32 @if_of_or(i1 zeroext %a, i1 zeroext %b) nounwind {
; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
; RV64IBT-LABEL: if_of_or:
; RV64IBT: # %bb.0:
; RV64IBT-NEXT: addi sp, sp, -16
; RV64IBT-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64IBT-NEXT: or a0, a0, a1
; RV64IBT-NEXT: beqz a0, .LBB1_2
; RV64IBT-NEXT: # %bb.1: # %if.then
; RV64IBT-NEXT: call either@plt
; RV64IBT-NEXT: j .LBB1_3
; RV64IBT-NEXT: .LBB1_2: # %if.else
; RV64IBT-NEXT: call neither@plt
; RV64IBT-NEXT: .LBB1_3: # %if.end
; RV64IBT-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64IBT-NEXT: addi sp, sp, 16
; RV64IBT-NEXT: ret
%1 = or i1 %a, %b
br i1 %1, label %if.then, label %if.else

View File

@ -1,10 +1,8 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s -mtriple=riscv32 -mattr=+m | FileCheck %s --check-prefixes=RV32NoZbt,RV32I
; RUN: llc < %s -mtriple=riscv64 -mattr=+m | FileCheck %s --check-prefixes=RV64NoZbt,RV64I
; RUN: llc < %s -mtriple=riscv32 -mattr=+m,+zbb | FileCheck %s --check-prefixes=RV32NoZbt,RV32IZbb
; RUN: llc < %s -mtriple=riscv64 -mattr=+m,+zbb | FileCheck %s --check-prefixes=RV64NoZbt,RV64IZbb
; RUN: llc < %s -mtriple=riscv32 -mattr=+m,+zbb,+experimental-zbt | FileCheck %s --check-prefixes=RV32IZbb,RV32IZbbZbt
; RUN: llc < %s -mtriple=riscv64 -mattr=+m,+zbb,+experimental-zbt | FileCheck %s --check-prefixes=RV64IZbb,RV64IZbbZbt
; RUN: llc < %s -mtriple=riscv32 -mattr=+m | FileCheck %s --check-prefixes=RV32,RV32I
; RUN: llc < %s -mtriple=riscv64 -mattr=+m | FileCheck %s --check-prefixes=RV64,RV64I
; RUN: llc < %s -mtriple=riscv32 -mattr=+m,+zbb | FileCheck %s --check-prefixes=RV32,RV32IZbb
; RUN: llc < %s -mtriple=riscv64 -mattr=+m,+zbb | FileCheck %s --check-prefixes=RV64,RV64IZbb
declare i4 @llvm.ssub.sat.i4(i4, i4)
declare i8 @llvm.ssub.sat.i8(i8, i8)
@ -13,19 +11,19 @@ declare i32 @llvm.ssub.sat.i32(i32, i32)
declare i64 @llvm.ssub.sat.i64(i64, i64)
define signext i32 @func(i32 signext %x, i32 signext %y) nounwind {
; RV32NoZbt-LABEL: func:
; RV32NoZbt: # %bb.0:
; RV32NoZbt-NEXT: mv a2, a0
; RV32NoZbt-NEXT: sgtz a3, a1
; RV32NoZbt-NEXT: sub a0, a0, a1
; RV32NoZbt-NEXT: slt a1, a0, a2
; RV32NoZbt-NEXT: beq a3, a1, .LBB0_2
; RV32NoZbt-NEXT: # %bb.1:
; RV32NoZbt-NEXT: srai a0, a0, 31
; RV32NoZbt-NEXT: lui a1, 524288
; RV32NoZbt-NEXT: xor a0, a0, a1
; RV32NoZbt-NEXT: .LBB0_2:
; RV32NoZbt-NEXT: ret
; RV32-LABEL: func:
; RV32: # %bb.0:
; RV32-NEXT: mv a2, a0
; RV32-NEXT: sgtz a3, a1
; RV32-NEXT: sub a0, a0, a1
; RV32-NEXT: slt a1, a0, a2
; RV32-NEXT: beq a3, a1, .LBB0_2
; RV32-NEXT: # %bb.1:
; RV32-NEXT: srai a0, a0, 31
; RV32-NEXT: lui a1, 524288
; RV32-NEXT: xor a0, a0, a1
; RV32-NEXT: .LBB0_2:
; RV32-NEXT: ret
;
; RV64I-LABEL: func:
; RV64I: # %bb.0:
@ -52,86 +50,44 @@ define signext i32 @func(i32 signext %x, i32 signext %y) nounwind {
; RV64IZbb-NEXT: min a0, a0, a2
; RV64IZbb-NEXT: max a0, a0, a1
; RV64IZbb-NEXT: ret
;
; RV32IZbbZbt-LABEL: func:
; RV32IZbbZbt: # %bb.0:
; RV32IZbbZbt-NEXT: sgtz a2, a1
; RV32IZbbZbt-NEXT: sub a1, a0, a1
; RV32IZbbZbt-NEXT: slt a0, a1, a0
; RV32IZbbZbt-NEXT: xor a0, a2, a0
; RV32IZbbZbt-NEXT: srai a2, a1, 31
; RV32IZbbZbt-NEXT: lui a3, 524288
; RV32IZbbZbt-NEXT: xor a2, a2, a3
; RV32IZbbZbt-NEXT: cmov a0, a0, a2, a1
; RV32IZbbZbt-NEXT: ret
%tmp = call i32 @llvm.ssub.sat.i32(i32 %x, i32 %y);
ret i32 %tmp;
}
define i64 @func2(i64 %x, i64 %y) nounwind {
; RV32NoZbt-LABEL: func2:
; RV32NoZbt: # %bb.0:
; RV32NoZbt-NEXT: mv a4, a1
; RV32NoZbt-NEXT: sltu a1, a0, a2
; RV32NoZbt-NEXT: sub a5, a4, a3
; RV32NoZbt-NEXT: sub a1, a5, a1
; RV32NoZbt-NEXT: xor a5, a4, a1
; RV32NoZbt-NEXT: xor a3, a4, a3
; RV32NoZbt-NEXT: and a3, a3, a5
; RV32NoZbt-NEXT: bltz a3, .LBB1_2
; RV32NoZbt-NEXT: # %bb.1:
; RV32NoZbt-NEXT: sub a0, a0, a2
; RV32NoZbt-NEXT: ret
; RV32NoZbt-NEXT: .LBB1_2:
; RV32NoZbt-NEXT: srai a0, a1, 31
; RV32NoZbt-NEXT: lui a1, 524288
; RV32NoZbt-NEXT: xor a1, a0, a1
; RV32NoZbt-NEXT: ret
; RV32-LABEL: func2:
; RV32: # %bb.0:
; RV32-NEXT: mv a4, a1
; RV32-NEXT: sltu a1, a0, a2
; RV32-NEXT: sub a5, a4, a3
; RV32-NEXT: sub a1, a5, a1
; RV32-NEXT: xor a5, a4, a1
; RV32-NEXT: xor a3, a4, a3
; RV32-NEXT: and a3, a3, a5
; RV32-NEXT: bltz a3, .LBB1_2
; RV32-NEXT: # %bb.1:
; RV32-NEXT: sub a0, a0, a2
; RV32-NEXT: ret
; RV32-NEXT: .LBB1_2:
; RV32-NEXT: srai a0, a1, 31
; RV32-NEXT: lui a1, 524288
; RV32-NEXT: xor a1, a0, a1
; RV32-NEXT: ret
;
; RV64NoZbt-LABEL: func2:
; RV64NoZbt: # %bb.0:
; RV64NoZbt-NEXT: mv a2, a0
; RV64NoZbt-NEXT: sgtz a3, a1
; RV64NoZbt-NEXT: sub a0, a0, a1
; RV64NoZbt-NEXT: slt a1, a0, a2
; RV64NoZbt-NEXT: beq a3, a1, .LBB1_2
; RV64NoZbt-NEXT: # %bb.1:
; RV64NoZbt-NEXT: srai a0, a0, 63
; RV64NoZbt-NEXT: li a1, -1
; RV64NoZbt-NEXT: slli a1, a1, 63
; RV64NoZbt-NEXT: xor a0, a0, a1
; RV64NoZbt-NEXT: .LBB1_2:
; RV64NoZbt-NEXT: ret
;
; RV32IZbbZbt-LABEL: func2:
; RV32IZbbZbt: # %bb.0:
; RV32IZbbZbt-NEXT: sltu a4, a0, a2
; RV32IZbbZbt-NEXT: sub a5, a1, a3
; RV32IZbbZbt-NEXT: sub a4, a5, a4
; RV32IZbbZbt-NEXT: srai a5, a4, 31
; RV32IZbbZbt-NEXT: lui a6, 524288
; RV32IZbbZbt-NEXT: xor a6, a5, a6
; RV32IZbbZbt-NEXT: xor a7, a1, a4
; RV32IZbbZbt-NEXT: xor a1, a1, a3
; RV32IZbbZbt-NEXT: and a1, a1, a7
; RV32IZbbZbt-NEXT: slti a3, a1, 0
; RV32IZbbZbt-NEXT: cmov a1, a3, a6, a4
; RV32IZbbZbt-NEXT: sub a0, a0, a2
; RV32IZbbZbt-NEXT: cmov a0, a3, a5, a0
; RV32IZbbZbt-NEXT: ret
;
; RV64IZbbZbt-LABEL: func2:
; RV64IZbbZbt: # %bb.0:
; RV64IZbbZbt-NEXT: sgtz a2, a1
; RV64IZbbZbt-NEXT: sub a1, a0, a1
; RV64IZbbZbt-NEXT: slt a0, a1, a0
; RV64IZbbZbt-NEXT: xor a0, a2, a0
; RV64IZbbZbt-NEXT: srai a2, a1, 63
; RV64IZbbZbt-NEXT: li a3, -1
; RV64IZbbZbt-NEXT: slli a3, a3, 63
; RV64IZbbZbt-NEXT: xor a2, a2, a3
; RV64IZbbZbt-NEXT: cmov a0, a0, a2, a1
; RV64IZbbZbt-NEXT: ret
; RV64-LABEL: func2:
; RV64: # %bb.0:
; RV64-NEXT: mv a2, a0
; RV64-NEXT: sgtz a3, a1
; RV64-NEXT: sub a0, a0, a1
; RV64-NEXT: slt a1, a0, a2
; RV64-NEXT: beq a3, a1, .LBB1_2
; RV64-NEXT: # %bb.1:
; RV64-NEXT: srai a0, a0, 63
; RV64-NEXT: li a1, -1
; RV64-NEXT: slli a1, a1, 63
; RV64-NEXT: xor a0, a0, a1
; RV64-NEXT: .LBB1_2:
; RV64-NEXT: ret
%tmp = call i64 @llvm.ssub.sat.i64(i64 %x, i64 %y);
ret i64 %tmp;
}

View File

@ -1,10 +1,8 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s -mtriple=riscv32 -mattr=+m | FileCheck %s --check-prefixes=RV32NoZbt,RV32I
; RUN: llc < %s -mtriple=riscv64 -mattr=+m | FileCheck %s --check-prefixes=RV64NoZbt,RV64I
; RUN: llc < %s -mtriple=riscv32 -mattr=+m,+zbb | FileCheck %s --check-prefixes=RV32NoZbt,RV32IZbb
; RUN: llc < %s -mtriple=riscv64 -mattr=+m,+zbb | FileCheck %s --check-prefixes=RV64NoZbt,RV64IZbb
; RUN: llc < %s -mtriple=riscv32 -mattr=+m,+zbb,+experimental-zbt | FileCheck %s --check-prefixes=RV32IZbb,RV32IZbbZbt
; RUN: llc < %s -mtriple=riscv64 -mattr=+m,+zbb,+experimental-zbt | FileCheck %s --check-prefixes=RV64IZbb,RV64IZbbZbt
; RUN: llc < %s -mtriple=riscv32 -mattr=+m | FileCheck %s --check-prefixes=RV32,RV32I
; RUN: llc < %s -mtriple=riscv64 -mattr=+m | FileCheck %s --check-prefixes=RV64,RV64I
; RUN: llc < %s -mtriple=riscv32 -mattr=+m,+zbb | FileCheck %s --check-prefixes=RV32,RV32IZbb
; RUN: llc < %s -mtriple=riscv64 -mattr=+m,+zbb | FileCheck %s --check-prefixes=RV64,RV64IZbb
declare i4 @llvm.ssub.sat.i4(i4, i4)
declare i8 @llvm.ssub.sat.i8(i8, i8)
@ -13,20 +11,20 @@ declare i32 @llvm.ssub.sat.i32(i32, i32)
declare i64 @llvm.ssub.sat.i64(i64, i64)
define i32 @func32(i32 %x, i32 %y, i32 %z) nounwind {
; RV32NoZbt-LABEL: func32:
; RV32NoZbt: # %bb.0:
; RV32NoZbt-NEXT: mv a3, a0
; RV32NoZbt-NEXT: mul a0, a1, a2
; RV32NoZbt-NEXT: sgtz a1, a0
; RV32NoZbt-NEXT: sub a0, a3, a0
; RV32NoZbt-NEXT: slt a2, a0, a3
; RV32NoZbt-NEXT: beq a1, a2, .LBB0_2
; RV32NoZbt-NEXT: # %bb.1:
; RV32NoZbt-NEXT: srai a0, a0, 31
; RV32NoZbt-NEXT: lui a1, 524288
; RV32NoZbt-NEXT: xor a0, a0, a1
; RV32NoZbt-NEXT: .LBB0_2:
; RV32NoZbt-NEXT: ret
; RV32-LABEL: func32:
; RV32: # %bb.0:
; RV32-NEXT: mv a3, a0
; RV32-NEXT: mul a0, a1, a2
; RV32-NEXT: sgtz a1, a0
; RV32-NEXT: sub a0, a3, a0
; RV32-NEXT: slt a2, a0, a3
; RV32-NEXT: beq a1, a2, .LBB0_2
; RV32-NEXT: # %bb.1:
; RV32-NEXT: srai a0, a0, 31
; RV32-NEXT: lui a1, 524288
; RV32-NEXT: xor a0, a0, a1
; RV32-NEXT: .LBB0_2:
; RV32-NEXT: ret
;
; RV64I-LABEL: func32:
; RV64I: # %bb.0:
@ -57,88 +55,45 @@ define i32 @func32(i32 %x, i32 %y, i32 %z) nounwind {
; RV64IZbb-NEXT: min a0, a0, a2
; RV64IZbb-NEXT: max a0, a0, a1
; RV64IZbb-NEXT: ret
;
; RV32IZbbZbt-LABEL: func32:
; RV32IZbbZbt: # %bb.0:
; RV32IZbbZbt-NEXT: mul a1, a1, a2
; RV32IZbbZbt-NEXT: sgtz a2, a1
; RV32IZbbZbt-NEXT: sub a1, a0, a1
; RV32IZbbZbt-NEXT: slt a0, a1, a0
; RV32IZbbZbt-NEXT: xor a0, a2, a0
; RV32IZbbZbt-NEXT: srai a2, a1, 31
; RV32IZbbZbt-NEXT: lui a3, 524288
; RV32IZbbZbt-NEXT: xor a2, a2, a3
; RV32IZbbZbt-NEXT: cmov a0, a0, a2, a1
; RV32IZbbZbt-NEXT: ret
%a = mul i32 %y, %z
%tmp = call i32 @llvm.ssub.sat.i32(i32 %x, i32 %a)
ret i32 %tmp
}
define i64 @func64(i64 %x, i64 %y, i64 %z) nounwind {
; RV32NoZbt-LABEL: func64:
; RV32NoZbt: # %bb.0:
; RV32NoZbt-NEXT: mv a2, a1
; RV32NoZbt-NEXT: sltu a1, a0, a4
; RV32NoZbt-NEXT: sub a3, a2, a5
; RV32NoZbt-NEXT: sub a1, a3, a1
; RV32NoZbt-NEXT: xor a3, a2, a1
; RV32NoZbt-NEXT: xor a2, a2, a5
; RV32NoZbt-NEXT: and a2, a2, a3
; RV32NoZbt-NEXT: bltz a2, .LBB1_2
; RV32NoZbt-NEXT: # %bb.1:
; RV32NoZbt-NEXT: sub a0, a0, a4
; RV32NoZbt-NEXT: ret
; RV32NoZbt-NEXT: .LBB1_2:
; RV32NoZbt-NEXT: srai a0, a1, 31
; RV32NoZbt-NEXT: lui a1, 524288
; RV32NoZbt-NEXT: xor a1, a0, a1
; RV32NoZbt-NEXT: ret
; RV32-LABEL: func64:
; RV32: # %bb.0:
; RV32-NEXT: mv a2, a1
; RV32-NEXT: sltu a1, a0, a4
; RV32-NEXT: sub a3, a2, a5
; RV32-NEXT: sub a1, a3, a1
; RV32-NEXT: xor a3, a2, a1
; RV32-NEXT: xor a2, a2, a5
; RV32-NEXT: and a2, a2, a3
; RV32-NEXT: bltz a2, .LBB1_2
; RV32-NEXT: # %bb.1:
; RV32-NEXT: sub a0, a0, a4
; RV32-NEXT: ret
; RV32-NEXT: .LBB1_2:
; RV32-NEXT: srai a0, a1, 31
; RV32-NEXT: lui a1, 524288
; RV32-NEXT: xor a1, a0, a1
; RV32-NEXT: ret
;
; RV64NoZbt-LABEL: func64:
; RV64NoZbt: # %bb.0:
; RV64NoZbt-NEXT: mv a1, a0
; RV64NoZbt-NEXT: sgtz a3, a2
; RV64NoZbt-NEXT: sub a0, a0, a2
; RV64NoZbt-NEXT: slt a1, a0, a1
; RV64NoZbt-NEXT: beq a3, a1, .LBB1_2
; RV64NoZbt-NEXT: # %bb.1:
; RV64NoZbt-NEXT: srai a0, a0, 63
; RV64NoZbt-NEXT: li a1, -1
; RV64NoZbt-NEXT: slli a1, a1, 63
; RV64NoZbt-NEXT: xor a0, a0, a1
; RV64NoZbt-NEXT: .LBB1_2:
; RV64NoZbt-NEXT: ret
;
; RV32IZbbZbt-LABEL: func64:
; RV32IZbbZbt: # %bb.0:
; RV32IZbbZbt-NEXT: sltu a2, a0, a4
; RV32IZbbZbt-NEXT: sub a3, a1, a5
; RV32IZbbZbt-NEXT: sub a2, a3, a2
; RV32IZbbZbt-NEXT: srai a3, a2, 31
; RV32IZbbZbt-NEXT: lui a6, 524288
; RV32IZbbZbt-NEXT: xor a6, a3, a6
; RV32IZbbZbt-NEXT: xor a7, a1, a2
; RV32IZbbZbt-NEXT: xor a1, a1, a5
; RV32IZbbZbt-NEXT: and a1, a1, a7
; RV32IZbbZbt-NEXT: slti a5, a1, 0
; RV32IZbbZbt-NEXT: cmov a1, a5, a6, a2
; RV32IZbbZbt-NEXT: sub a0, a0, a4
; RV32IZbbZbt-NEXT: cmov a0, a5, a3, a0
; RV32IZbbZbt-NEXT: ret
;
; RV64IZbbZbt-LABEL: func64:
; RV64IZbbZbt: # %bb.0:
; RV64IZbbZbt-NEXT: sgtz a1, a2
; RV64IZbbZbt-NEXT: sub a2, a0, a2
; RV64IZbbZbt-NEXT: slt a0, a2, a0
; RV64IZbbZbt-NEXT: xor a0, a1, a0
; RV64IZbbZbt-NEXT: srai a1, a2, 63
; RV64IZbbZbt-NEXT: li a3, -1
; RV64IZbbZbt-NEXT: slli a3, a3, 63
; RV64IZbbZbt-NEXT: xor a1, a1, a3
; RV64IZbbZbt-NEXT: cmov a0, a0, a1, a2
; RV64IZbbZbt-NEXT: ret
; RV64-LABEL: func64:
; RV64: # %bb.0:
; RV64-NEXT: mv a1, a0
; RV64-NEXT: sgtz a3, a2
; RV64-NEXT: sub a0, a0, a2
; RV64-NEXT: slt a1, a0, a1
; RV64-NEXT: beq a3, a1, .LBB1_2
; RV64-NEXT: # %bb.1:
; RV64-NEXT: srai a0, a0, 63
; RV64-NEXT: li a1, -1
; RV64-NEXT: slli a1, a1, 63
; RV64-NEXT: xor a0, a0, a1
; RV64-NEXT: .LBB1_2:
; RV64-NEXT: ret
%a = mul i64 %y, %z
%tmp = call i64 @llvm.ssub.sat.i64(i64 %x, i64 %z)
ret i64 %tmp

View File

@ -1,18 +0,0 @@
# RUN: not llvm-mc -triple riscv32 -mattr=+experimental-zbt < %s 2>&1 | FileCheck %s
# Too few operands
cmix t0, t1, t2 # CHECK: :[[@LINE]]:1: error: too few operands for instruction
# Too few operands
cmov t0, t1, t2 # CHECK: :[[@LINE]]:1: error: too few operands for instruction
# Too few operands
fsl t0, t1, t2 # CHECK: :[[@LINE]]:1: error: too few operands for instruction
# Too few operands
fsr t0, t1, t2 # CHECK: :[[@LINE]]:1: error: too few operands for instruction
# Too few operands
fsri t0, t1, t2 # CHECK: :[[@LINE]]:1: error: too few operands for instruction
# Immediate operand out of range
fsri t0, t1, t2, 32 # CHECK: :[[@LINE]]:18: error: immediate must be an integer in the range [0, 31]
fsri t0, t1, t2, -1 # CHECK: :[[@LINE]]:18: error: immediate must be an integer in the range [0, 31]
fslw t0, t1, t2, t3 # CHECK: :[[@LINE]]:1: error: instruction requires the following: RV64I Base Instruction Set{{$}}
fsrw t0, t1, t2, t3 # CHECK: :[[@LINE]]:1: error: instruction requires the following: RV64I Base Instruction Set{{$}}
fsriw t0, t1, t2, 0 # CHECK: :[[@LINE]]:1: error: instruction requires the following: RV64I Base Instruction Set{{$}}

View File

@ -1,22 +0,0 @@
# With Bitmanip ternary extension:
# RUN: llvm-mc %s -triple=riscv32 -mattr=+experimental-zbt -show-encoding \
# RUN: | FileCheck -check-prefixes=CHECK-ASM,CHECK-ASM-AND-OBJ %s
# RUN: llvm-mc -filetype=obj -triple=riscv32 -mattr=+experimental-zbt < %s \
# RUN: | llvm-objdump --mattr=+experimental-zbt -d -r - \
# RUN: | FileCheck --check-prefix=CHECK-ASM-AND-OBJ %s
# CHECK-ASM-AND-OBJ: cmix t0, t1, t2, t3
# CHECK-ASM: encoding: [0xb3,0x92,0x63,0xe6]
cmix t0, t1, t2, t3
# CHECK-ASM-AND-OBJ: cmov t0, t1, t2, t3
# CHECK-ASM: encoding: [0xb3,0xd2,0x63,0xe6]
cmov t0, t1, t2, t3
# CHECK-ASM-AND-OBJ: fsl t0, t1, t2, t3
# CHECK-ASM: encoding: [0xb3,0x12,0xc3,0x3d]
fsl t0, t1, t2, t3
# CHECK-ASM-AND-OBJ: fsr t0, t1, t2, t3
# CHECK-ASM: encoding: [0xb3,0x52,0xc3,0x3d]
fsr t0, t1, t2, t3
# CHECK-ASM-AND-OBJ: fsri t0, t1, t2, 0
# CHECK-ASM: encoding: [0x93,0x52,0x03,0x3c]
fsri t0, t1, t2, 0

View File

@ -1,9 +0,0 @@
# RUN: not llvm-mc -triple riscv64 -mattr=+experimental-zbt < %s 2>&1 | FileCheck %s
# Too few operands
fslw t0, t1, t2 # CHECK: :[[@LINE]]:1: error: too few operands for instruction
# Too few operands
fsrw t0, t1, t2 # CHECK: :[[@LINE]]:1: error: too few operands for instruction
# Immediate operand out of range
fsriw t0, t1, t2, 32 # CHECK: :[[@LINE]]:19: error: immediate must be an integer in the range [0, 31]
fsriw t0, t1, t2, -1 # CHECK: :[[@LINE]]:19: error: immediate must be an integer in the range [0, 31]

View File

@ -1,31 +0,0 @@
# With Bitmanip ternary extension:
# RUN: llvm-mc %s -triple=riscv64 -mattr=+experimental-zbt -show-encoding \
# RUN: | FileCheck -check-prefixes=CHECK-ASM,CHECK-ASM-AND-OBJ %s
# RUN: llvm-mc -filetype=obj -triple=riscv64 -mattr=+experimental-zbt < %s \
# RUN: | llvm-objdump --mattr=+experimental-zbt -d -r - \
# RUN: | FileCheck --check-prefix=CHECK-ASM-AND-OBJ %s
# CHECK-ASM-AND-OBJ: cmix t0, t1, t2, t3
# CHECK-ASM: encoding: [0xb3,0x92,0x63,0xe6]
cmix t0, t1, t2, t3
# CHECK-ASM-AND-OBJ: cmov t0, t1, t2, t3
# CHECK-ASM: encoding: [0xb3,0xd2,0x63,0xe6]
cmov t0, t1, t2, t3
# CHECK-ASM-AND-OBJ: fsl t0, t1, t2, t3
# CHECK-ASM: encoding: [0xb3,0x12,0xc3,0x3d]
fsl t0, t1, t2, t3
# CHECK-ASM-AND-OBJ: fsr t0, t1, t2, t3
# CHECK-ASM: encoding: [0xb3,0x52,0xc3,0x3d]
fsr t0, t1, t2, t3
# CHECK-ASM-AND-OBJ: fsri t0, t1, t2, 0
# CHECK-ASM: encoding: [0x93,0x52,0x03,0x3c]
fsri t0, t1, t2, 0
# CHECK-ASM-AND-OBJ: fslw t0, t1, t2, t3
# CHECK-ASM: encoding: [0xbb,0x12,0xc3,0x3d]
fslw t0, t1, t2, t3
# CHECK-ASM-AND-OBJ: fsrw t0, t1, t2, t3
# CHECK-ASM: encoding: [0xbb,0x52,0xc3,0x3d]
fsrw t0, t1, t2, t3
# CHECK-ASM-AND-OBJ: fsriw t0, t1, t2, 0
# CHECK-ASM: encoding: [0x9b,0x52,0x03,0x3c]
fsriw t0, t1, t2, 0