forked from OSchip/llvm-project
[RISCV] Add intrinsic for Zbt extension
RV32: fsl, fsr, fsri RV64: fsl, fsr, fsri, fslw, fsrw, fsriw Reviewed By: craig.topper Differential Revision: https://reviews.llvm.org/D117468
This commit is contained in:
parent
d7bff2e9d2
commit
0be3da1fab
|
@ -61,5 +61,11 @@ TARGET_BUILTIN(__builtin_riscv_crc32c_w, "LiLi", "nc", "experimental-zbr")
|
|||
TARGET_BUILTIN(__builtin_riscv_crc32_d, "LiLi", "nc", "experimental-zbr,64bit")
|
||||
TARGET_BUILTIN(__builtin_riscv_crc32c_d, "LiLi", "nc", "experimental-zbr,64bit")
|
||||
|
||||
// Zbt extension
|
||||
TARGET_BUILTIN(__builtin_riscv_fsl_32, "LiLiLiLi", "nc", "experimental-zbt")
|
||||
TARGET_BUILTIN(__builtin_riscv_fsr_32, "LiLiLiLi", "nc", "experimental-zbt")
|
||||
TARGET_BUILTIN(__builtin_riscv_fsl_64, "WiWiWiWi", "nc", "experimental-zbt,64bit")
|
||||
TARGET_BUILTIN(__builtin_riscv_fsr_64, "WiWiWiWi", "nc", "experimental-zbt,64bit")
|
||||
|
||||
#undef BUILTIN
|
||||
#undef TARGET_BUILTIN
|
||||
|
|
|
@ -18853,7 +18853,11 @@ Value *CodeGenFunction::EmitRISCVBuiltinExpr(unsigned BuiltinID,
|
|||
case RISCV::BI__builtin_riscv_crc32c_b:
|
||||
case RISCV::BI__builtin_riscv_crc32c_h:
|
||||
case RISCV::BI__builtin_riscv_crc32c_w:
|
||||
case RISCV::BI__builtin_riscv_crc32c_d: {
|
||||
case RISCV::BI__builtin_riscv_crc32c_d:
|
||||
case RISCV::BI__builtin_riscv_fsl_32:
|
||||
case RISCV::BI__builtin_riscv_fsr_32:
|
||||
case RISCV::BI__builtin_riscv_fsl_64:
|
||||
case RISCV::BI__builtin_riscv_fsr_64: {
|
||||
switch (BuiltinID) {
|
||||
default: llvm_unreachable("unexpected builtin ID");
|
||||
// Zbb
|
||||
|
@ -18944,6 +18948,16 @@ Value *CodeGenFunction::EmitRISCVBuiltinExpr(unsigned BuiltinID,
|
|||
case RISCV::BI__builtin_riscv_crc32c_d:
|
||||
ID = Intrinsic::riscv_crc32c_d;
|
||||
break;
|
||||
|
||||
// Zbt
|
||||
case RISCV::BI__builtin_riscv_fsl_32:
|
||||
case RISCV::BI__builtin_riscv_fsl_64:
|
||||
ID = Intrinsic::riscv_fsl;
|
||||
break;
|
||||
case RISCV::BI__builtin_riscv_fsr_32:
|
||||
case RISCV::BI__builtin_riscv_fsr_64:
|
||||
ID = Intrinsic::riscv_fsr;
|
||||
break;
|
||||
}
|
||||
|
||||
IntrinsicTypes = {ResultType};
|
||||
|
|
|
@ -0,0 +1,54 @@
|
|||
// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
|
||||
// RUN: %clang_cc1 -triple riscv32 -target-feature +experimental-zbt -emit-llvm %s -o - \
|
||||
// RUN: | FileCheck %s -check-prefix=RV32ZBT
|
||||
|
||||
// RV32ZBT-LABEL: @fsl(
|
||||
// RV32ZBT-NEXT: entry:
|
||||
// RV32ZBT-NEXT: [[RS1_ADDR:%.*]] = alloca i32, align 4
|
||||
// RV32ZBT-NEXT: [[RS2_ADDR:%.*]] = alloca i32, align 4
|
||||
// RV32ZBT-NEXT: [[RS3_ADDR:%.*]] = alloca i32, align 4
|
||||
// RV32ZBT-NEXT: store i32 [[RS1:%.*]], i32* [[RS1_ADDR]], align 4
|
||||
// RV32ZBT-NEXT: store i32 [[RS2:%.*]], i32* [[RS2_ADDR]], align 4
|
||||
// RV32ZBT-NEXT: store i32 [[RS3:%.*]], i32* [[RS3_ADDR]], align 4
|
||||
// RV32ZBT-NEXT: [[TMP0:%.*]] = load i32, i32* [[RS1_ADDR]], align 4
|
||||
// RV32ZBT-NEXT: [[TMP1:%.*]] = load i32, i32* [[RS2_ADDR]], align 4
|
||||
// RV32ZBT-NEXT: [[TMP2:%.*]] = load i32, i32* [[RS3_ADDR]], align 4
|
||||
// RV32ZBT-NEXT: [[TMP3:%.*]] = call i32 @llvm.riscv.fsl.i32(i32 [[TMP0]], i32 [[TMP1]], i32 [[TMP2]])
|
||||
// RV32ZBT-NEXT: ret i32 [[TMP3]]
|
||||
//
|
||||
int fsl(int rs1, int rs2, int rs3) {
|
||||
return __builtin_riscv_fsl_32(rs1, rs2, rs3);
|
||||
}
|
||||
|
||||
// RV32ZBT-LABEL: @fsr(
|
||||
// RV32ZBT-NEXT: entry:
|
||||
// RV32ZBT-NEXT: [[RS1_ADDR:%.*]] = alloca i32, align 4
|
||||
// RV32ZBT-NEXT: [[RS2_ADDR:%.*]] = alloca i32, align 4
|
||||
// RV32ZBT-NEXT: [[RS3_ADDR:%.*]] = alloca i32, align 4
|
||||
// RV32ZBT-NEXT: store i32 [[RS1:%.*]], i32* [[RS1_ADDR]], align 4
|
||||
// RV32ZBT-NEXT: store i32 [[RS2:%.*]], i32* [[RS2_ADDR]], align 4
|
||||
// RV32ZBT-NEXT: store i32 [[RS3:%.*]], i32* [[RS3_ADDR]], align 4
|
||||
// RV32ZBT-NEXT: [[TMP0:%.*]] = load i32, i32* [[RS1_ADDR]], align 4
|
||||
// RV32ZBT-NEXT: [[TMP1:%.*]] = load i32, i32* [[RS2_ADDR]], align 4
|
||||
// RV32ZBT-NEXT: [[TMP2:%.*]] = load i32, i32* [[RS3_ADDR]], align 4
|
||||
// RV32ZBT-NEXT: [[TMP3:%.*]] = call i32 @llvm.riscv.fsr.i32(i32 [[TMP0]], i32 [[TMP1]], i32 [[TMP2]])
|
||||
// RV32ZBT-NEXT: ret i32 [[TMP3]]
|
||||
//
|
||||
int fsr(int rs1, int rs2, int rs3) {
|
||||
return __builtin_riscv_fsr_32(rs1, rs2, rs3);
|
||||
}
|
||||
|
||||
// RV32ZBT-LABEL: @fsri(
|
||||
// RV32ZBT-NEXT: entry:
|
||||
// RV32ZBT-NEXT: [[RS1_ADDR:%.*]] = alloca i32, align 4
|
||||
// RV32ZBT-NEXT: [[RS2_ADDR:%.*]] = alloca i32, align 4
|
||||
// RV32ZBT-NEXT: store i32 [[RS1:%.*]], i32* [[RS1_ADDR]], align 4
|
||||
// RV32ZBT-NEXT: store i32 [[RS2:%.*]], i32* [[RS2_ADDR]], align 4
|
||||
// RV32ZBT-NEXT: [[TMP0:%.*]] = load i32, i32* [[RS1_ADDR]], align 4
|
||||
// RV32ZBT-NEXT: [[TMP1:%.*]] = load i32, i32* [[RS2_ADDR]], align 4
|
||||
// RV32ZBT-NEXT: [[TMP2:%.*]] = call i32 @llvm.riscv.fsr.i32(i32 [[TMP0]], i32 [[TMP1]], i32 15)
|
||||
// RV32ZBT-NEXT: ret i32 [[TMP2]]
|
||||
//
|
||||
int fsri(int rs1, int rs2) {
|
||||
return __builtin_riscv_fsr_32(rs1, rs2, 15);
|
||||
}
|
|
@ -0,0 +1,116 @@
|
|||
// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
|
||||
// RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-zbt -emit-llvm %s -o - \
|
||||
// RUN: | FileCheck %s -check-prefix=RV64ZBT
|
||||
|
||||
// RV64ZBT-LABEL: @fsl(
|
||||
// RV64ZBT-NEXT: entry:
|
||||
// RV64ZBT-NEXT: [[RS1_ADDR:%.*]] = alloca i32, align 4
|
||||
// RV64ZBT-NEXT: [[RS2_ADDR:%.*]] = alloca i32, align 4
|
||||
// RV64ZBT-NEXT: [[RS3_ADDR:%.*]] = alloca i32, align 4
|
||||
// RV64ZBT-NEXT: store i32 [[RS1:%.*]], i32* [[RS1_ADDR]], align 4
|
||||
// RV64ZBT-NEXT: store i32 [[RS2:%.*]], i32* [[RS2_ADDR]], align 4
|
||||
// RV64ZBT-NEXT: store i32 [[RS3:%.*]], i32* [[RS3_ADDR]], align 4
|
||||
// RV64ZBT-NEXT: [[TMP0:%.*]] = load i32, i32* [[RS1_ADDR]], align 4
|
||||
// RV64ZBT-NEXT: [[CONV:%.*]] = sext i32 [[TMP0]] to i64
|
||||
// RV64ZBT-NEXT: [[TMP1:%.*]] = load i32, i32* [[RS2_ADDR]], align 4
|
||||
// RV64ZBT-NEXT: [[CONV1:%.*]] = sext i32 [[TMP1]] to i64
|
||||
// RV64ZBT-NEXT: [[TMP2:%.*]] = load i32, i32* [[RS3_ADDR]], align 4
|
||||
// RV64ZBT-NEXT: [[CONV2:%.*]] = sext i32 [[TMP2]] to i64
|
||||
// RV64ZBT-NEXT: [[TMP3:%.*]] = call i64 @llvm.riscv.fsl.i64(i64 [[CONV]], i64 [[CONV1]], i64 [[CONV2]])
|
||||
// RV64ZBT-NEXT: [[CONV3:%.*]] = trunc i64 [[TMP3]] to i32
|
||||
// RV64ZBT-NEXT: ret i32 [[CONV3]]
|
||||
//
|
||||
int fsl(int rs1, int rs2, int rs3) {
|
||||
return __builtin_riscv_fsl_32(rs1, rs2, rs3);
|
||||
}
|
||||
|
||||
// RV64ZBT-LABEL: @fsr(
|
||||
// RV64ZBT-NEXT: entry:
|
||||
// RV64ZBT-NEXT: [[RS1_ADDR:%.*]] = alloca i32, align 4
|
||||
// RV64ZBT-NEXT: [[RS2_ADDR:%.*]] = alloca i32, align 4
|
||||
// RV64ZBT-NEXT: [[RS3_ADDR:%.*]] = alloca i32, align 4
|
||||
// RV64ZBT-NEXT: store i32 [[RS1:%.*]], i32* [[RS1_ADDR]], align 4
|
||||
// RV64ZBT-NEXT: store i32 [[RS2:%.*]], i32* [[RS2_ADDR]], align 4
|
||||
// RV64ZBT-NEXT: store i32 [[RS3:%.*]], i32* [[RS3_ADDR]], align 4
|
||||
// RV64ZBT-NEXT: [[TMP0:%.*]] = load i32, i32* [[RS1_ADDR]], align 4
|
||||
// RV64ZBT-NEXT: [[CONV:%.*]] = sext i32 [[TMP0]] to i64
|
||||
// RV64ZBT-NEXT: [[TMP1:%.*]] = load i32, i32* [[RS2_ADDR]], align 4
|
||||
// RV64ZBT-NEXT: [[CONV1:%.*]] = sext i32 [[TMP1]] to i64
|
||||
// RV64ZBT-NEXT: [[TMP2:%.*]] = load i32, i32* [[RS3_ADDR]], align 4
|
||||
// RV64ZBT-NEXT: [[CONV2:%.*]] = sext i32 [[TMP2]] to i64
|
||||
// RV64ZBT-NEXT: [[TMP3:%.*]] = call i64 @llvm.riscv.fsr.i64(i64 [[CONV]], i64 [[CONV1]], i64 [[CONV2]])
|
||||
// RV64ZBT-NEXT: [[CONV3:%.*]] = trunc i64 [[TMP3]] to i32
|
||||
// RV64ZBT-NEXT: ret i32 [[CONV3]]
|
||||
//
|
||||
int fsr(int rs1, int rs2, int rs3) {
|
||||
return __builtin_riscv_fsr_32(rs1, rs2, rs3);
|
||||
}
|
||||
|
||||
// RV64ZBT-LABEL: @fsri(
|
||||
// RV64ZBT-NEXT: entry:
|
||||
// RV64ZBT-NEXT: [[RS1_ADDR:%.*]] = alloca i32, align 4
|
||||
// RV64ZBT-NEXT: [[RS2_ADDR:%.*]] = alloca i32, align 4
|
||||
// RV64ZBT-NEXT: store i32 [[RS1:%.*]], i32* [[RS1_ADDR]], align 4
|
||||
// RV64ZBT-NEXT: store i32 [[RS2:%.*]], i32* [[RS2_ADDR]], align 4
|
||||
// RV64ZBT-NEXT: [[TMP0:%.*]] = load i32, i32* [[RS1_ADDR]], align 4
|
||||
// RV64ZBT-NEXT: [[CONV:%.*]] = sext i32 [[TMP0]] to i64
|
||||
// RV64ZBT-NEXT: [[TMP1:%.*]] = load i32, i32* [[RS2_ADDR]], align 4
|
||||
// RV64ZBT-NEXT: [[CONV1:%.*]] = sext i32 [[TMP1]] to i64
|
||||
// RV64ZBT-NEXT: [[TMP2:%.*]] = call i64 @llvm.riscv.fsr.i64(i64 [[CONV]], i64 [[CONV1]], i64 15)
|
||||
// RV64ZBT-NEXT: [[CONV2:%.*]] = trunc i64 [[TMP2]] to i32
|
||||
// RV64ZBT-NEXT: ret i32 [[CONV2]]
|
||||
//
|
||||
int fsri(int rs1, int rs2) {
|
||||
return __builtin_riscv_fsr_32(rs1, rs2, 15);
|
||||
}
|
||||
|
||||
// RV64ZBT-LABEL: @fslw(
|
||||
// RV64ZBT-NEXT: entry:
|
||||
// RV64ZBT-NEXT: [[RS1_ADDR:%.*]] = alloca i64, align 8
|
||||
// RV64ZBT-NEXT: [[RS2_ADDR:%.*]] = alloca i64, align 8
|
||||
// RV64ZBT-NEXT: [[RS3_ADDR:%.*]] = alloca i64, align 8
|
||||
// RV64ZBT-NEXT: store i64 [[RS1:%.*]], i64* [[RS1_ADDR]], align 8
|
||||
// RV64ZBT-NEXT: store i64 [[RS2:%.*]], i64* [[RS2_ADDR]], align 8
|
||||
// RV64ZBT-NEXT: store i64 [[RS3:%.*]], i64* [[RS3_ADDR]], align 8
|
||||
// RV64ZBT-NEXT: [[TMP0:%.*]] = load i64, i64* [[RS1_ADDR]], align 8
|
||||
// RV64ZBT-NEXT: [[TMP1:%.*]] = load i64, i64* [[RS2_ADDR]], align 8
|
||||
// RV64ZBT-NEXT: [[TMP2:%.*]] = load i64, i64* [[RS3_ADDR]], align 8
|
||||
// RV64ZBT-NEXT: [[TMP3:%.*]] = call i64 @llvm.riscv.fsl.i64(i64 [[TMP0]], i64 [[TMP1]], i64 [[TMP2]])
|
||||
// RV64ZBT-NEXT: ret i64 [[TMP3]]
|
||||
//
|
||||
long fslw(long rs1, long rs2, long rs3) {
|
||||
return __builtin_riscv_fsl_64(rs1, rs2, rs3);
|
||||
}
|
||||
|
||||
// RV64ZBT-LABEL: @fsrw(
|
||||
// RV64ZBT-NEXT: entry:
|
||||
// RV64ZBT-NEXT: [[RS1_ADDR:%.*]] = alloca i64, align 8
|
||||
// RV64ZBT-NEXT: [[RS2_ADDR:%.*]] = alloca i64, align 8
|
||||
// RV64ZBT-NEXT: [[RS3_ADDR:%.*]] = alloca i64, align 8
|
||||
// RV64ZBT-NEXT: store i64 [[RS1:%.*]], i64* [[RS1_ADDR]], align 8
|
||||
// RV64ZBT-NEXT: store i64 [[RS2:%.*]], i64* [[RS2_ADDR]], align 8
|
||||
// RV64ZBT-NEXT: store i64 [[RS3:%.*]], i64* [[RS3_ADDR]], align 8
|
||||
// RV64ZBT-NEXT: [[TMP0:%.*]] = load i64, i64* [[RS1_ADDR]], align 8
|
||||
// RV64ZBT-NEXT: [[TMP1:%.*]] = load i64, i64* [[RS2_ADDR]], align 8
|
||||
// RV64ZBT-NEXT: [[TMP2:%.*]] = load i64, i64* [[RS3_ADDR]], align 8
|
||||
// RV64ZBT-NEXT: [[TMP3:%.*]] = call i64 @llvm.riscv.fsr.i64(i64 [[TMP0]], i64 [[TMP1]], i64 [[TMP2]])
|
||||
// RV64ZBT-NEXT: ret i64 [[TMP3]]
|
||||
//
|
||||
long fsrw(long rs1, long rs2, long rs3) {
|
||||
return __builtin_riscv_fsr_64(rs1, rs2, rs3);
|
||||
}
|
||||
|
||||
// RV64ZBT-LABEL: @fsriw(
|
||||
// RV64ZBT-NEXT: entry:
|
||||
// RV64ZBT-NEXT: [[RS1_ADDR:%.*]] = alloca i64, align 8
|
||||
// RV64ZBT-NEXT: [[RS2_ADDR:%.*]] = alloca i64, align 8
|
||||
// RV64ZBT-NEXT: store i64 [[RS1:%.*]], i64* [[RS1_ADDR]], align 8
|
||||
// RV64ZBT-NEXT: store i64 [[RS2:%.*]], i64* [[RS2_ADDR]], align 8
|
||||
// RV64ZBT-NEXT: [[TMP0:%.*]] = load i64, i64* [[RS1_ADDR]], align 8
|
||||
// RV64ZBT-NEXT: [[TMP1:%.*]] = load i64, i64* [[RS2_ADDR]], align 8
|
||||
// RV64ZBT-NEXT: [[TMP2:%.*]] = call i64 @llvm.riscv.fsr.i64(i64 [[TMP0]], i64 [[TMP1]], i64 15)
|
||||
// RV64ZBT-NEXT: ret i64 [[TMP2]]
|
||||
//
|
||||
long fsriw(long rs1, long rs2) {
|
||||
return __builtin_riscv_fsr_64(rs1, rs2, 15);
|
||||
}
|
|
@ -80,6 +80,10 @@ let TargetPrefix = "riscv" in {
|
|||
: Intrinsic<[llvm_any_ty],
|
||||
[LLVMMatchType<0>, LLVMMatchType<0>],
|
||||
[IntrNoMem, IntrSpeculatable, IntrWillReturn]>;
|
||||
class BitManipGPRGPRGRIntrinsics
|
||||
: Intrinsic<[llvm_any_ty],
|
||||
[LLVMMatchType<0>, LLVMMatchType<0>, LLVMMatchType<0>],
|
||||
[IntrNoMem, IntrSpeculatable, IntrWillReturn]>;
|
||||
|
||||
// Zbb
|
||||
def int_riscv_orc_b : BitManipGPRIntrinsics;
|
||||
|
@ -115,6 +119,10 @@ let TargetPrefix = "riscv" in {
|
|||
def int_riscv_crc32c_h : BitManipGPRIntrinsics;
|
||||
def int_riscv_crc32c_w : BitManipGPRIntrinsics;
|
||||
def int_riscv_crc32c_d : BitManipGPRIntrinsics;
|
||||
|
||||
// Zbt
|
||||
def int_riscv_fsl : BitManipGPRGPRGRIntrinsics;
|
||||
def int_riscv_fsr : BitManipGPRGPRGRIntrinsics;
|
||||
} // TargetPrefix = "riscv"
|
||||
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
|
|
@ -4258,6 +4258,12 @@ SDValue RISCVTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
|
|||
case Intrinsic::riscv_bfp:
|
||||
return DAG.getNode(RISCVISD::BFP, DL, XLenVT, Op.getOperand(1),
|
||||
Op.getOperand(2));
|
||||
case Intrinsic::riscv_fsl:
|
||||
return DAG.getNode(RISCVISD::FSL, DL, XLenVT, Op.getOperand(1),
|
||||
Op.getOperand(2), Op.getOperand(3));
|
||||
case Intrinsic::riscv_fsr:
|
||||
return DAG.getNode(RISCVISD::FSR, DL, XLenVT, Op.getOperand(1),
|
||||
Op.getOperand(2), Op.getOperand(3));
|
||||
case Intrinsic::riscv_vmv_x_s:
|
||||
assert(Op.getValueType() == XLenVT && "Unexpected VT!");
|
||||
return DAG.getNode(RISCVISD::VMV_X_S, DL, Op.getValueType(),
|
||||
|
@ -5845,6 +5851,10 @@ static RISCVISD::NodeType getRISCVWOpcodeByIntr(unsigned IntNo) {
|
|||
return RISCVISD::BDECOMPRESSW;
|
||||
case Intrinsic::riscv_bfp:
|
||||
return RISCVISD::BFPW;
|
||||
case Intrinsic::riscv_fsl:
|
||||
return RISCVISD::FSLW;
|
||||
case Intrinsic::riscv_fsr:
|
||||
return RISCVISD::FSRW;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -6338,6 +6348,21 @@ void RISCVTargetLowering::ReplaceNodeResults(SDNode *N,
|
|||
Results.push_back(customLegalizeToWOpByIntr(N, DAG, IntNo));
|
||||
break;
|
||||
}
|
||||
case Intrinsic::riscv_fsl:
|
||||
case Intrinsic::riscv_fsr: {
|
||||
assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
|
||||
"Unexpected custom legalisation");
|
||||
SDValue NewOp1 =
|
||||
DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
|
||||
SDValue NewOp2 =
|
||||
DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(2));
|
||||
SDValue NewOp3 =
|
||||
DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(3));
|
||||
unsigned Opc = getRISCVWOpcodeByIntr(IntNo);
|
||||
SDValue Res = DAG.getNode(Opc, DL, MVT::i64, NewOp1, NewOp2, NewOp3);
|
||||
Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res));
|
||||
break;
|
||||
}
|
||||
case Intrinsic::riscv_orc_b: {
|
||||
// Lower to the GORCI encoding for orc.b with the operand extended.
|
||||
SDValue NewOp =
|
||||
|
|
|
@ -0,0 +1,43 @@
|
|||
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
|
||||
; RUN: llc -mtriple=riscv32 -mattr=+experimental-zbt -verify-machineinstrs < %s \
|
||||
; RUN: | FileCheck %s -check-prefix=RV32ZBT
|
||||
|
||||
declare i32 @llvm.riscv.fsl.i32(i32, i32, i32)
|
||||
|
||||
define i32 @fsl_i32(i32 %a, i32 %b, i32 %c) nounwind {
|
||||
; RV32ZBT-LABEL: fsl_i32:
|
||||
; RV32ZBT: # %bb.0:
|
||||
; RV32ZBT-NEXT: fsl a0, a0, a1, a2
|
||||
; RV32ZBT-NEXT: ret
|
||||
%1 = call i32 @llvm.riscv.fsl.i32(i32 %a, i32 %b, i32 %c)
|
||||
ret i32 %1
|
||||
}
|
||||
|
||||
declare i32 @llvm.riscv.fsr.i32(i32, i32, i32)
|
||||
|
||||
define i32 @fsr_i32(i32 %a, i32 %b, i32 %c) nounwind {
|
||||
; RV32ZBT-LABEL: fsr_i32:
|
||||
; RV32ZBT: # %bb.0:
|
||||
; RV32ZBT-NEXT: fsr a0, a0, a1, a2
|
||||
; RV32ZBT-NEXT: ret
|
||||
%1 = call i32 @llvm.riscv.fsr.i32(i32 %a, i32 %b, i32 %c)
|
||||
ret i32 %1
|
||||
}
|
||||
|
||||
define i32 @fsli_i32(i32 %a, i32 %b) nounwind {
|
||||
; RV32ZBT-LABEL: fsli_i32:
|
||||
; RV32ZBT: # %bb.0:
|
||||
; RV32ZBT-NEXT: fsri a0, a1, a0, 27
|
||||
; RV32ZBT-NEXT: ret
|
||||
%1 = call i32 @llvm.riscv.fsl.i32(i32 %a, i32 %b, i32 5)
|
||||
ret i32 %1
|
||||
}
|
||||
|
||||
define i32 @fsri_i32(i32 %a, i32 %b) nounwind {
|
||||
; RV32ZBT-LABEL: fsri_i32:
|
||||
; RV32ZBT: # %bb.0:
|
||||
; RV32ZBT-NEXT: fsri a0, a0, a1, 15
|
||||
; RV32ZBT-NEXT: ret
|
||||
%1 = call i32 @llvm.riscv.fsr.i32(i32 %a, i32 %b, i32 15)
|
||||
ret i32 %1
|
||||
}
|
|
@ -0,0 +1,83 @@
|
|||
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
|
||||
; RUN: llc -mtriple=riscv64 -mattr=+experimental-zbt -verify-machineinstrs < %s \
|
||||
; RUN: | FileCheck %s -check-prefix=RV64ZBT
|
||||
|
||||
declare i32 @llvm.riscv.fsl.i32(i32, i32, i32)
|
||||
|
||||
define i32 @fsl_i32(i32 %a, i32 %b, i32 %c) nounwind {
|
||||
; RV64ZBT-LABEL: fsl_i32:
|
||||
; RV64ZBT: # %bb.0:
|
||||
; RV64ZBT-NEXT: fslw a0, a0, a1, a2
|
||||
; RV64ZBT-NEXT: ret
|
||||
%1 = call i32 @llvm.riscv.fsl.i32(i32 %a, i32 %b, i32 %c)
|
||||
ret i32 %1
|
||||
}
|
||||
|
||||
declare i32 @llvm.riscv.fsr.i32(i32, i32, i32)
|
||||
|
||||
define i32 @fsr_i32(i32 %a, i32 %b, i32 %c) nounwind {
|
||||
; RV64ZBT-LABEL: fsr_i32:
|
||||
; RV64ZBT: # %bb.0:
|
||||
; RV64ZBT-NEXT: fsrw a0, a0, a1, a2
|
||||
; RV64ZBT-NEXT: ret
|
||||
%1 = call i32 @llvm.riscv.fsr.i32(i32 %a, i32 %b, i32 %c)
|
||||
ret i32 %1
|
||||
}
|
||||
|
||||
define i32 @fsli_i32(i32 %a, i32 %b) nounwind {
|
||||
; RV64ZBT-LABEL: fsli_i32:
|
||||
; RV64ZBT: # %bb.0:
|
||||
; RV64ZBT-NEXT: fsriw a0, a1, a0, 27
|
||||
; RV64ZBT-NEXT: ret
|
||||
%1 = call i32 @llvm.riscv.fsl.i32(i32 %a, i32 %b, i32 5)
|
||||
ret i32 %1
|
||||
}
|
||||
|
||||
define i32 @fsri_i32(i32 %a, i32 %b) nounwind {
|
||||
; RV64ZBT-LABEL: fsri_i32:
|
||||
; RV64ZBT: # %bb.0:
|
||||
; RV64ZBT-NEXT: fsriw a0, a0, a1, 15
|
||||
; RV64ZBT-NEXT: ret
|
||||
%1 = call i32 @llvm.riscv.fsr.i32(i32 %a, i32 %b, i32 15)
|
||||
ret i32 %1
|
||||
}
|
||||
|
||||
declare i64 @llvm.riscv.fsl.i64(i64, i64, i64)
|
||||
|
||||
define i64 @fsl_i64(i64 %a, i64 %b, i64 %c) nounwind {
|
||||
; RV64ZBT-LABEL: fsl_i64:
|
||||
; RV64ZBT: # %bb.0:
|
||||
; RV64ZBT-NEXT: fsl a0, a0, a1, a2
|
||||
; RV64ZBT-NEXT: ret
|
||||
%1 = call i64 @llvm.riscv.fsl.i64(i64 %a, i64 %b, i64 %c)
|
||||
ret i64 %1
|
||||
}
|
||||
|
||||
declare i64 @llvm.riscv.fsr.i64(i64, i64, i64)
|
||||
|
||||
define i64 @fsr_i64(i64 %a, i64 %b, i64 %c) nounwind {
|
||||
; RV64ZBT-LABEL: fsr_i64:
|
||||
; RV64ZBT: # %bb.0:
|
||||
; RV64ZBT-NEXT: fsr a0, a0, a1, a2
|
||||
; RV64ZBT-NEXT: ret
|
||||
%1 = call i64 @llvm.riscv.fsr.i64(i64 %a, i64 %b, i64 %c)
|
||||
ret i64 %1
|
||||
}
|
||||
|
||||
define i64 @fsli_i64(i64 %a, i64 %b) nounwind {
|
||||
; RV64ZBT-LABEL: fsli_i64:
|
||||
; RV64ZBT: # %bb.0:
|
||||
; RV64ZBT-NEXT: fsri a0, a1, a0, 49
|
||||
; RV64ZBT-NEXT: ret
|
||||
%1 = call i64 @llvm.riscv.fsl.i64(i64 %a, i64 %b, i64 15)
|
||||
ret i64 %1
|
||||
}
|
||||
|
||||
define i64 @fsri_i64(i64 %a, i64 %b) nounwind {
|
||||
; RV64ZBT-LABEL: fsri_i64:
|
||||
; RV64ZBT: # %bb.0:
|
||||
; RV64ZBT-NEXT: fsri a0, a0, a1, 5
|
||||
; RV64ZBT-NEXT: ret
|
||||
%1 = call i64 @llvm.riscv.fsr.i64(i64 %a, i64 %b, i64 5)
|
||||
ret i64 %1
|
||||
}
|
Loading…
Reference in New Issue