forked from OSchip/llvm-project
[RISCV] Add the passthru operand for some RVV nomask unary and nullary intrinsics.
The goal is support tail and mask policy in RVV builtins. We focus on IR part first. If the passthru operand is undef, we use tail agnostic, otherwise use tail undisturbed. My plan is to handle more complex operations in follow-up patches. Reviewers: frasercrmck Differential Revision: https://reviews.llvm.org/D118253
This commit is contained in:
parent
86bebe1a90
commit
e8973dd389
|
@ -1616,6 +1616,7 @@ defm vwcvt_x_x_v : RVVPseudoVWCVTBuiltin<"vwadd", "vwcvt_x", "csi",
|
|||
[["w", "wv"]]>;
|
||||
|
||||
// 12.3. Vector Integer Extension
|
||||
let HasNoMaskPassThru = true in {
|
||||
let Log2LMUL = [-3, -2, -1, 0, 1, 2] in {
|
||||
def vsext_vf2 : RVVIntExt<"vsext", "w", "wv", "csi">;
|
||||
def vzext_vf2 : RVVIntExt<"vzext", "Uw", "UwUv", "csi">;
|
||||
|
@ -1628,6 +1629,7 @@ let Log2LMUL = [-3, -2, -1, 0] in {
|
|||
def vsext_vf8 : RVVIntExt<"vsext", "o", "ov", "c">;
|
||||
def vzext_vf8 : RVVIntExt<"vzext", "Uo", "UoUv", "c">;
|
||||
}
|
||||
}
|
||||
|
||||
// 12.4. Vector Integer Add-with-Carry / Subtract-with-Borrow Instructions
|
||||
let HasMask = false, HasPolicy = false in {
|
||||
|
@ -1833,6 +1835,7 @@ defm vfwmsac : RVVFloatingWidenTerBuiltinSet;
|
|||
defm vfwnmsac : RVVFloatingWidenTerBuiltinSet;
|
||||
|
||||
// 14.8. Vector Floating-Point Square-Root Instruction
|
||||
let HasNoMaskPassThru = true in {
|
||||
def vfsqrt : RVVFloatingUnaryVVBuiltin;
|
||||
|
||||
// 14.9. Vector Floating-Point Reciprocal Square-Root Estimate Instruction
|
||||
|
@ -1842,7 +1845,6 @@ def vfrsqrt7 : RVVFloatingUnaryVVBuiltin;
|
|||
def vfrec7 : RVVFloatingUnaryVVBuiltin;
|
||||
|
||||
// 14.11. Vector Floating-Point MIN/MAX Instructions
|
||||
let HasNoMaskPassThru = true in {
|
||||
defm vfmin : RVVFloatingBinBuiltinSet;
|
||||
defm vfmax : RVVFloatingBinBuiltinSet;
|
||||
|
||||
|
@ -1865,7 +1867,7 @@ defm vmfge : RVVFloatingMaskOutBuiltinSet;
|
|||
}
|
||||
|
||||
// 14.14. Vector Floating-Point Classify Instruction
|
||||
let Name = "vfclass_v", HasPolicy = false in
|
||||
let Name = "vfclass_v", HasNoMaskPassThru = true in
|
||||
def vfclass : RVVOp0Builtin<"Uv", "Uvv", "xfd">;
|
||||
|
||||
// 14.15. Vector Floating-Point Merge Instructio
|
||||
|
@ -1887,6 +1889,7 @@ let HasMask = false, HasNoMaskedOverloaded = false, HasPolicy = false in
|
|||
[["f", "v", "ve"]]>;
|
||||
|
||||
// 14.17. Single-Width Floating-Point/Integer Type-Convert Instructions
|
||||
let HasNoMaskPassThru = true in {
|
||||
def vfcvt_xu_f_v : RVVConvToUnsignedBuiltin<"vfcvt_xu">;
|
||||
def vfcvt_x_f_v : RVVConvToSignedBuiltin<"vfcvt_x">;
|
||||
def vfcvt_rtz_xu_f_v : RVVConvToUnsignedBuiltin<"vfcvt_rtz_xu">;
|
||||
|
@ -1916,6 +1919,7 @@ let Log2LMUL = [-3, -2, -1, 0, 1, 2] in {
|
|||
def vfncvt_f_f_w : RVVConvBuiltin<"v", "vw", "xf", "vfncvt_f">;
|
||||
def vfncvt_rod_f_f_w : RVVConvBuiltin<"v", "vw", "xf", "vfncvt_rod_f">;
|
||||
}
|
||||
}
|
||||
|
||||
// 15. Vector Reduction Operations
|
||||
// 15.1. Vector Single-Width Integer Reduction Instructions
|
||||
|
@ -1981,7 +1985,7 @@ def vmsif : RVVMaskUnaryBuiltin;
|
|||
// 16.6. vmsof.m set-only-first mask bit
|
||||
def vmsof : RVVMaskUnaryBuiltin;
|
||||
|
||||
let HasNoMaskedOverloaded = false in {
|
||||
let HasNoMaskPassThru = true, HasNoMaskedOverloaded = false in {
|
||||
// 16.8. Vector Iota Instruction
|
||||
defm viota : RVVOutBuiltinSet<"viota", "csil", [["m", "Uv", "Uvm"]]>;
|
||||
|
||||
|
|
|
@ -7,7 +7,7 @@
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfclass_v_u32mf2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfclass.nxv1f32.i64(<vscale x 1 x float> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfclass.nxv1f32.i64(<vscale x 1 x i32> undef, <vscale x 1 x float> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
|
||||
//
|
||||
vuint32mf2_t test_vfclass_v_u32mf2(vfloat32mf2_t op1, size_t vl) {
|
||||
|
@ -16,7 +16,7 @@ vuint32mf2_t test_vfclass_v_u32mf2(vfloat32mf2_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfclass_v_u32m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vfclass.nxv2f32.i64(<vscale x 2 x float> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vfclass.nxv2f32.i64(<vscale x 2 x i32> undef, <vscale x 2 x float> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
|
||||
//
|
||||
vuint32m1_t test_vfclass_v_u32m1(vfloat32m1_t op1, size_t vl) {
|
||||
|
@ -25,7 +25,7 @@ vuint32m1_t test_vfclass_v_u32m1(vfloat32m1_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfclass_v_u32m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vfclass.nxv4f32.i64(<vscale x 4 x float> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vfclass.nxv4f32.i64(<vscale x 4 x i32> undef, <vscale x 4 x float> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
|
||||
//
|
||||
vuint32m2_t test_vfclass_v_u32m2(vfloat32m2_t op1, size_t vl) {
|
||||
|
@ -34,7 +34,7 @@ vuint32m2_t test_vfclass_v_u32m2(vfloat32m2_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfclass_v_u32m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vfclass.nxv8f32.i64(<vscale x 8 x float> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vfclass.nxv8f32.i64(<vscale x 8 x i32> undef, <vscale x 8 x float> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
|
||||
//
|
||||
vuint32m4_t test_vfclass_v_u32m4(vfloat32m4_t op1, size_t vl) {
|
||||
|
@ -43,7 +43,7 @@ vuint32m4_t test_vfclass_v_u32m4(vfloat32m4_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfclass_v_u32m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vfclass.nxv16f32.i64(<vscale x 16 x float> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vfclass.nxv16f32.i64(<vscale x 16 x i32> undef, <vscale x 16 x float> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
|
||||
//
|
||||
vuint32m8_t test_vfclass_v_u32m8(vfloat32m8_t op1, size_t vl) {
|
||||
|
@ -52,7 +52,7 @@ vuint32m8_t test_vfclass_v_u32m8(vfloat32m8_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfclass_v_u64m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vfclass.nxv1f64.i64(<vscale x 1 x double> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vfclass.nxv1f64.i64(<vscale x 1 x i64> undef, <vscale x 1 x double> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
|
||||
//
|
||||
vuint64m1_t test_vfclass_v_u64m1(vfloat64m1_t op1, size_t vl) {
|
||||
|
@ -61,7 +61,7 @@ vuint64m1_t test_vfclass_v_u64m1(vfloat64m1_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfclass_v_u64m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vfclass.nxv2f64.i64(<vscale x 2 x double> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vfclass.nxv2f64.i64(<vscale x 2 x i64> undef, <vscale x 2 x double> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
|
||||
//
|
||||
vuint64m2_t test_vfclass_v_u64m2(vfloat64m2_t op1, size_t vl) {
|
||||
|
@ -70,7 +70,7 @@ vuint64m2_t test_vfclass_v_u64m2(vfloat64m2_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfclass_v_u64m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vfclass.nxv4f64.i64(<vscale x 4 x double> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vfclass.nxv4f64.i64(<vscale x 4 x i64> undef, <vscale x 4 x double> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
|
||||
//
|
||||
vuint64m4_t test_vfclass_v_u64m4(vfloat64m4_t op1, size_t vl) {
|
||||
|
@ -79,7 +79,7 @@ vuint64m4_t test_vfclass_v_u64m4(vfloat64m4_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfclass_v_u64m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vfclass.nxv8f64.i64(<vscale x 8 x double> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vfclass.nxv8f64.i64(<vscale x 8 x i64> undef, <vscale x 8 x double> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
|
||||
//
|
||||
vuint64m8_t test_vfclass_v_u64m8(vfloat64m8_t op1, size_t vl) {
|
||||
|
@ -88,7 +88,7 @@ vuint64m8_t test_vfclass_v_u64m8(vfloat64m8_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfclass_v_u32mf2_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfclass.mask.nxv1f32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfclass.mask.nxv1f32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
|
||||
//
|
||||
vuint32mf2_t test_vfclass_v_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff,
|
||||
|
@ -98,7 +98,7 @@ vuint32mf2_t test_vfclass_v_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff,
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfclass_v_u32m1_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vfclass.mask.nxv2f32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vfclass.mask.nxv2f32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
|
||||
//
|
||||
vuint32m1_t test_vfclass_v_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff,
|
||||
|
@ -108,7 +108,7 @@ vuint32m1_t test_vfclass_v_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff,
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfclass_v_u32m2_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vfclass.mask.nxv4f32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vfclass.mask.nxv4f32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
|
||||
//
|
||||
vuint32m2_t test_vfclass_v_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff,
|
||||
|
@ -118,7 +118,7 @@ vuint32m2_t test_vfclass_v_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff,
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfclass_v_u32m4_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vfclass.mask.nxv8f32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vfclass.mask.nxv8f32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
|
||||
//
|
||||
vuint32m4_t test_vfclass_v_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff,
|
||||
|
@ -128,7 +128,7 @@ vuint32m4_t test_vfclass_v_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff,
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfclass_v_u32m8_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vfclass.mask.nxv16f32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vfclass.mask.nxv16f32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
|
||||
//
|
||||
vuint32m8_t test_vfclass_v_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff,
|
||||
|
@ -138,7 +138,7 @@ vuint32m8_t test_vfclass_v_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff,
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfclass_v_u64m1_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vfclass.mask.nxv1f64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vfclass.mask.nxv1f64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
|
||||
//
|
||||
vuint64m1_t test_vfclass_v_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff,
|
||||
|
@ -148,7 +148,7 @@ vuint64m1_t test_vfclass_v_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff,
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfclass_v_u64m2_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vfclass.mask.nxv2f64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vfclass.mask.nxv2f64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
|
||||
//
|
||||
vuint64m2_t test_vfclass_v_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff,
|
||||
|
@ -158,7 +158,7 @@ vuint64m2_t test_vfclass_v_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff,
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfclass_v_u64m4_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vfclass.mask.nxv4f64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vfclass.mask.nxv4f64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
|
||||
//
|
||||
vuint64m4_t test_vfclass_v_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff,
|
||||
|
@ -168,7 +168,7 @@ vuint64m4_t test_vfclass_v_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff,
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfclass_v_u64m8_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vfclass.mask.nxv8f64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vfclass.mask.nxv8f64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
|
||||
//
|
||||
vuint64m8_t test_vfclass_v_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff,
|
||||
|
|
|
@ -7,7 +7,7 @@
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i32mf2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfcvt.x.f.v.nxv1i32.nxv1f32.i64(<vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfcvt.x.f.v.nxv1i32.nxv1f32.i64(<vscale x 1 x i32> undef, <vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
|
||||
//
|
||||
vint32mf2_t test_vfcvt_x_f_v_i32mf2(vfloat32mf2_t src, size_t vl) {
|
||||
|
@ -16,7 +16,7 @@ vint32mf2_t test_vfcvt_x_f_v_i32mf2(vfloat32mf2_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32mf2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfcvt.rtz.x.f.v.nxv1i32.nxv1f32.i64(<vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfcvt.rtz.x.f.v.nxv1i32.nxv1f32.i64(<vscale x 1 x i32> undef, <vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
|
||||
//
|
||||
vint32mf2_t test_vfcvt_rtz_x_f_v_i32mf2(vfloat32mf2_t src, size_t vl) {
|
||||
|
@ -25,7 +25,7 @@ vint32mf2_t test_vfcvt_rtz_x_f_v_i32mf2(vfloat32mf2_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i32m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vfcvt.x.f.v.nxv2i32.nxv2f32.i64(<vscale x 2 x float> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vfcvt.x.f.v.nxv2i32.nxv2f32.i64(<vscale x 2 x i32> undef, <vscale x 2 x float> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
|
||||
//
|
||||
vint32m1_t test_vfcvt_x_f_v_i32m1(vfloat32m1_t src, size_t vl) {
|
||||
|
@ -34,7 +34,7 @@ vint32m1_t test_vfcvt_x_f_v_i32m1(vfloat32m1_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vfcvt.rtz.x.f.v.nxv2i32.nxv2f32.i64(<vscale x 2 x float> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vfcvt.rtz.x.f.v.nxv2i32.nxv2f32.i64(<vscale x 2 x i32> undef, <vscale x 2 x float> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
|
||||
//
|
||||
vint32m1_t test_vfcvt_rtz_x_f_v_i32m1(vfloat32m1_t src, size_t vl) {
|
||||
|
@ -43,7 +43,7 @@ vint32m1_t test_vfcvt_rtz_x_f_v_i32m1(vfloat32m1_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i32m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vfcvt.x.f.v.nxv4i32.nxv4f32.i64(<vscale x 4 x float> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vfcvt.x.f.v.nxv4i32.nxv4f32.i64(<vscale x 4 x i32> undef, <vscale x 4 x float> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
|
||||
//
|
||||
vint32m2_t test_vfcvt_x_f_v_i32m2(vfloat32m2_t src, size_t vl) {
|
||||
|
@ -52,7 +52,7 @@ vint32m2_t test_vfcvt_x_f_v_i32m2(vfloat32m2_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vfcvt.rtz.x.f.v.nxv4i32.nxv4f32.i64(<vscale x 4 x float> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vfcvt.rtz.x.f.v.nxv4i32.nxv4f32.i64(<vscale x 4 x i32> undef, <vscale x 4 x float> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
|
||||
//
|
||||
vint32m2_t test_vfcvt_rtz_x_f_v_i32m2(vfloat32m2_t src, size_t vl) {
|
||||
|
@ -61,7 +61,7 @@ vint32m2_t test_vfcvt_rtz_x_f_v_i32m2(vfloat32m2_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i32m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vfcvt.x.f.v.nxv8i32.nxv8f32.i64(<vscale x 8 x float> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vfcvt.x.f.v.nxv8i32.nxv8f32.i64(<vscale x 8 x i32> undef, <vscale x 8 x float> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
|
||||
//
|
||||
vint32m4_t test_vfcvt_x_f_v_i32m4(vfloat32m4_t src, size_t vl) {
|
||||
|
@ -70,7 +70,7 @@ vint32m4_t test_vfcvt_x_f_v_i32m4(vfloat32m4_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vfcvt.rtz.x.f.v.nxv8i32.nxv8f32.i64(<vscale x 8 x float> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vfcvt.rtz.x.f.v.nxv8i32.nxv8f32.i64(<vscale x 8 x i32> undef, <vscale x 8 x float> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
|
||||
//
|
||||
vint32m4_t test_vfcvt_rtz_x_f_v_i32m4(vfloat32m4_t src, size_t vl) {
|
||||
|
@ -79,7 +79,7 @@ vint32m4_t test_vfcvt_rtz_x_f_v_i32m4(vfloat32m4_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i32m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vfcvt.x.f.v.nxv16i32.nxv16f32.i64(<vscale x 16 x float> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vfcvt.x.f.v.nxv16i32.nxv16f32.i64(<vscale x 16 x i32> undef, <vscale x 16 x float> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
|
||||
//
|
||||
vint32m8_t test_vfcvt_x_f_v_i32m8(vfloat32m8_t src, size_t vl) {
|
||||
|
@ -88,7 +88,7 @@ vint32m8_t test_vfcvt_x_f_v_i32m8(vfloat32m8_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vfcvt.rtz.x.f.v.nxv16i32.nxv16f32.i64(<vscale x 16 x float> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vfcvt.rtz.x.f.v.nxv16i32.nxv16f32.i64(<vscale x 16 x i32> undef, <vscale x 16 x float> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
|
||||
//
|
||||
vint32m8_t test_vfcvt_rtz_x_f_v_i32m8(vfloat32m8_t src, size_t vl) {
|
||||
|
@ -97,7 +97,7 @@ vint32m8_t test_vfcvt_rtz_x_f_v_i32m8(vfloat32m8_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u32mf2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfcvt.xu.f.v.nxv1i32.nxv1f32.i64(<vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfcvt.xu.f.v.nxv1i32.nxv1f32.i64(<vscale x 1 x i32> undef, <vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
|
||||
//
|
||||
vuint32mf2_t test_vfcvt_xu_f_v_u32mf2(vfloat32mf2_t src, size_t vl) {
|
||||
|
@ -106,7 +106,7 @@ vuint32mf2_t test_vfcvt_xu_f_v_u32mf2(vfloat32mf2_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32mf2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv1i32.nxv1f32.i64(<vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv1i32.nxv1f32.i64(<vscale x 1 x i32> undef, <vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
|
||||
//
|
||||
vuint32mf2_t test_vfcvt_rtz_xu_f_v_u32mf2(vfloat32mf2_t src, size_t vl) {
|
||||
|
@ -115,7 +115,7 @@ vuint32mf2_t test_vfcvt_rtz_xu_f_v_u32mf2(vfloat32mf2_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u32m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vfcvt.xu.f.v.nxv2i32.nxv2f32.i64(<vscale x 2 x float> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vfcvt.xu.f.v.nxv2i32.nxv2f32.i64(<vscale x 2 x i32> undef, <vscale x 2 x float> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
|
||||
//
|
||||
vuint32m1_t test_vfcvt_xu_f_v_u32m1(vfloat32m1_t src, size_t vl) {
|
||||
|
@ -124,7 +124,7 @@ vuint32m1_t test_vfcvt_xu_f_v_u32m1(vfloat32m1_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv2i32.nxv2f32.i64(<vscale x 2 x float> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv2i32.nxv2f32.i64(<vscale x 2 x i32> undef, <vscale x 2 x float> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
|
||||
//
|
||||
vuint32m1_t test_vfcvt_rtz_xu_f_v_u32m1(vfloat32m1_t src, size_t vl) {
|
||||
|
@ -133,7 +133,7 @@ vuint32m1_t test_vfcvt_rtz_xu_f_v_u32m1(vfloat32m1_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u32m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vfcvt.xu.f.v.nxv4i32.nxv4f32.i64(<vscale x 4 x float> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vfcvt.xu.f.v.nxv4i32.nxv4f32.i64(<vscale x 4 x i32> undef, <vscale x 4 x float> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
|
||||
//
|
||||
vuint32m2_t test_vfcvt_xu_f_v_u32m2(vfloat32m2_t src, size_t vl) {
|
||||
|
@ -142,7 +142,7 @@ vuint32m2_t test_vfcvt_xu_f_v_u32m2(vfloat32m2_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv4i32.nxv4f32.i64(<vscale x 4 x float> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv4i32.nxv4f32.i64(<vscale x 4 x i32> undef, <vscale x 4 x float> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
|
||||
//
|
||||
vuint32m2_t test_vfcvt_rtz_xu_f_v_u32m2(vfloat32m2_t src, size_t vl) {
|
||||
|
@ -151,7 +151,7 @@ vuint32m2_t test_vfcvt_rtz_xu_f_v_u32m2(vfloat32m2_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u32m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vfcvt.xu.f.v.nxv8i32.nxv8f32.i64(<vscale x 8 x float> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vfcvt.xu.f.v.nxv8i32.nxv8f32.i64(<vscale x 8 x i32> undef, <vscale x 8 x float> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
|
||||
//
|
||||
vuint32m4_t test_vfcvt_xu_f_v_u32m4(vfloat32m4_t src, size_t vl) {
|
||||
|
@ -160,7 +160,7 @@ vuint32m4_t test_vfcvt_xu_f_v_u32m4(vfloat32m4_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv8i32.nxv8f32.i64(<vscale x 8 x float> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv8i32.nxv8f32.i64(<vscale x 8 x i32> undef, <vscale x 8 x float> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
|
||||
//
|
||||
vuint32m4_t test_vfcvt_rtz_xu_f_v_u32m4(vfloat32m4_t src, size_t vl) {
|
||||
|
@ -169,7 +169,7 @@ vuint32m4_t test_vfcvt_rtz_xu_f_v_u32m4(vfloat32m4_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u32m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vfcvt.xu.f.v.nxv16i32.nxv16f32.i64(<vscale x 16 x float> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vfcvt.xu.f.v.nxv16i32.nxv16f32.i64(<vscale x 16 x i32> undef, <vscale x 16 x float> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
|
||||
//
|
||||
vuint32m8_t test_vfcvt_xu_f_v_u32m8(vfloat32m8_t src, size_t vl) {
|
||||
|
@ -178,7 +178,7 @@ vuint32m8_t test_vfcvt_xu_f_v_u32m8(vfloat32m8_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv16i32.nxv16f32.i64(<vscale x 16 x float> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv16i32.nxv16f32.i64(<vscale x 16 x i32> undef, <vscale x 16 x float> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
|
||||
//
|
||||
vuint32m8_t test_vfcvt_rtz_xu_f_v_u32m8(vfloat32m8_t src, size_t vl) {
|
||||
|
@ -187,7 +187,7 @@ vuint32m8_t test_vfcvt_rtz_xu_f_v_u32m8(vfloat32m8_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f32mf2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfcvt.f.x.v.nxv1f32.nxv1i32.i64(<vscale x 1 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfcvt.f.x.v.nxv1f32.nxv1i32.i64(<vscale x 1 x float> undef, <vscale x 1 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32mf2_t test_vfcvt_f_x_v_f32mf2(vint32mf2_t src, size_t vl) {
|
||||
|
@ -196,7 +196,7 @@ vfloat32mf2_t test_vfcvt_f_x_v_f32mf2(vint32mf2_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f32m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfcvt.f.x.v.nxv2f32.nxv2i32.i64(<vscale x 2 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfcvt.f.x.v.nxv2f32.nxv2i32.i64(<vscale x 2 x float> undef, <vscale x 2 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m1_t test_vfcvt_f_x_v_f32m1(vint32m1_t src, size_t vl) {
|
||||
|
@ -205,7 +205,7 @@ vfloat32m1_t test_vfcvt_f_x_v_f32m1(vint32m1_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f32m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfcvt.f.x.v.nxv4f32.nxv4i32.i64(<vscale x 4 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfcvt.f.x.v.nxv4f32.nxv4i32.i64(<vscale x 4 x float> undef, <vscale x 4 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m2_t test_vfcvt_f_x_v_f32m2(vint32m2_t src, size_t vl) {
|
||||
|
@ -214,7 +214,7 @@ vfloat32m2_t test_vfcvt_f_x_v_f32m2(vint32m2_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f32m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfcvt.f.x.v.nxv8f32.nxv8i32.i64(<vscale x 8 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfcvt.f.x.v.nxv8f32.nxv8i32.i64(<vscale x 8 x float> undef, <vscale x 8 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m4_t test_vfcvt_f_x_v_f32m4(vint32m4_t src, size_t vl) {
|
||||
|
@ -223,7 +223,7 @@ vfloat32m4_t test_vfcvt_f_x_v_f32m4(vint32m4_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f32m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfcvt.f.x.v.nxv16f32.nxv16i32.i64(<vscale x 16 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfcvt.f.x.v.nxv16f32.nxv16i32.i64(<vscale x 16 x float> undef, <vscale x 16 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m8_t test_vfcvt_f_x_v_f32m8(vint32m8_t src, size_t vl) {
|
||||
|
@ -232,7 +232,7 @@ vfloat32m8_t test_vfcvt_f_x_v_f32m8(vint32m8_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f32mf2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfcvt.f.xu.v.nxv1f32.nxv1i32.i64(<vscale x 1 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfcvt.f.xu.v.nxv1f32.nxv1i32.i64(<vscale x 1 x float> undef, <vscale x 1 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32mf2_t test_vfcvt_f_xu_v_f32mf2(vuint32mf2_t src, size_t vl) {
|
||||
|
@ -241,7 +241,7 @@ vfloat32mf2_t test_vfcvt_f_xu_v_f32mf2(vuint32mf2_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f32m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfcvt.f.xu.v.nxv2f32.nxv2i32.i64(<vscale x 2 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfcvt.f.xu.v.nxv2f32.nxv2i32.i64(<vscale x 2 x float> undef, <vscale x 2 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m1_t test_vfcvt_f_xu_v_f32m1(vuint32m1_t src, size_t vl) {
|
||||
|
@ -250,7 +250,7 @@ vfloat32m1_t test_vfcvt_f_xu_v_f32m1(vuint32m1_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f32m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfcvt.f.xu.v.nxv4f32.nxv4i32.i64(<vscale x 4 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfcvt.f.xu.v.nxv4f32.nxv4i32.i64(<vscale x 4 x float> undef, <vscale x 4 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m2_t test_vfcvt_f_xu_v_f32m2(vuint32m2_t src, size_t vl) {
|
||||
|
@ -259,7 +259,7 @@ vfloat32m2_t test_vfcvt_f_xu_v_f32m2(vuint32m2_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f32m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfcvt.f.xu.v.nxv8f32.nxv8i32.i64(<vscale x 8 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfcvt.f.xu.v.nxv8f32.nxv8i32.i64(<vscale x 8 x float> undef, <vscale x 8 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m4_t test_vfcvt_f_xu_v_f32m4(vuint32m4_t src, size_t vl) {
|
||||
|
@ -268,7 +268,7 @@ vfloat32m4_t test_vfcvt_f_xu_v_f32m4(vuint32m4_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f32m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfcvt.f.xu.v.nxv16f32.nxv16i32.i64(<vscale x 16 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfcvt.f.xu.v.nxv16f32.nxv16i32.i64(<vscale x 16 x float> undef, <vscale x 16 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m8_t test_vfcvt_f_xu_v_f32m8(vuint32m8_t src, size_t vl) {
|
||||
|
@ -277,7 +277,7 @@ vfloat32m8_t test_vfcvt_f_xu_v_f32m8(vuint32m8_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i64m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vfcvt.x.f.v.nxv1i64.nxv1f64.i64(<vscale x 1 x double> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vfcvt.x.f.v.nxv1i64.nxv1f64.i64(<vscale x 1 x i64> undef, <vscale x 1 x double> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
|
||||
//
|
||||
vint64m1_t test_vfcvt_x_f_v_i64m1(vfloat64m1_t src, size_t vl) {
|
||||
|
@ -286,7 +286,7 @@ vint64m1_t test_vfcvt_x_f_v_i64m1(vfloat64m1_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i64m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vfcvt.rtz.x.f.v.nxv1i64.nxv1f64.i64(<vscale x 1 x double> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vfcvt.rtz.x.f.v.nxv1i64.nxv1f64.i64(<vscale x 1 x i64> undef, <vscale x 1 x double> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
|
||||
//
|
||||
vint64m1_t test_vfcvt_rtz_x_f_v_i64m1(vfloat64m1_t src, size_t vl) {
|
||||
|
@ -295,7 +295,7 @@ vint64m1_t test_vfcvt_rtz_x_f_v_i64m1(vfloat64m1_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i64m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vfcvt.x.f.v.nxv2i64.nxv2f64.i64(<vscale x 2 x double> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vfcvt.x.f.v.nxv2i64.nxv2f64.i64(<vscale x 2 x i64> undef, <vscale x 2 x double> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
|
||||
//
|
||||
vint64m2_t test_vfcvt_x_f_v_i64m2(vfloat64m2_t src, size_t vl) {
|
||||
|
@ -304,7 +304,7 @@ vint64m2_t test_vfcvt_x_f_v_i64m2(vfloat64m2_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i64m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vfcvt.rtz.x.f.v.nxv2i64.nxv2f64.i64(<vscale x 2 x double> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vfcvt.rtz.x.f.v.nxv2i64.nxv2f64.i64(<vscale x 2 x i64> undef, <vscale x 2 x double> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
|
||||
//
|
||||
vint64m2_t test_vfcvt_rtz_x_f_v_i64m2(vfloat64m2_t src, size_t vl) {
|
||||
|
@ -313,7 +313,7 @@ vint64m2_t test_vfcvt_rtz_x_f_v_i64m2(vfloat64m2_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i64m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vfcvt.x.f.v.nxv4i64.nxv4f64.i64(<vscale x 4 x double> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vfcvt.x.f.v.nxv4i64.nxv4f64.i64(<vscale x 4 x i64> undef, <vscale x 4 x double> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
|
||||
//
|
||||
vint64m4_t test_vfcvt_x_f_v_i64m4(vfloat64m4_t src, size_t vl) {
|
||||
|
@ -322,7 +322,7 @@ vint64m4_t test_vfcvt_x_f_v_i64m4(vfloat64m4_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i64m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vfcvt.rtz.x.f.v.nxv4i64.nxv4f64.i64(<vscale x 4 x double> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vfcvt.rtz.x.f.v.nxv4i64.nxv4f64.i64(<vscale x 4 x i64> undef, <vscale x 4 x double> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
|
||||
//
|
||||
vint64m4_t test_vfcvt_rtz_x_f_v_i64m4(vfloat64m4_t src, size_t vl) {
|
||||
|
@ -331,7 +331,7 @@ vint64m4_t test_vfcvt_rtz_x_f_v_i64m4(vfloat64m4_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i64m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vfcvt.x.f.v.nxv8i64.nxv8f64.i64(<vscale x 8 x double> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vfcvt.x.f.v.nxv8i64.nxv8f64.i64(<vscale x 8 x i64> undef, <vscale x 8 x double> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
|
||||
//
|
||||
vint64m8_t test_vfcvt_x_f_v_i64m8(vfloat64m8_t src, size_t vl) {
|
||||
|
@ -340,7 +340,7 @@ vint64m8_t test_vfcvt_x_f_v_i64m8(vfloat64m8_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i64m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vfcvt.rtz.x.f.v.nxv8i64.nxv8f64.i64(<vscale x 8 x double> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vfcvt.rtz.x.f.v.nxv8i64.nxv8f64.i64(<vscale x 8 x i64> undef, <vscale x 8 x double> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
|
||||
//
|
||||
vint64m8_t test_vfcvt_rtz_x_f_v_i64m8(vfloat64m8_t src, size_t vl) {
|
||||
|
@ -349,7 +349,7 @@ vint64m8_t test_vfcvt_rtz_x_f_v_i64m8(vfloat64m8_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u64m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vfcvt.xu.f.v.nxv1i64.nxv1f64.i64(<vscale x 1 x double> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vfcvt.xu.f.v.nxv1i64.nxv1f64.i64(<vscale x 1 x i64> undef, <vscale x 1 x double> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
|
||||
//
|
||||
vuint64m1_t test_vfcvt_xu_f_v_u64m1(vfloat64m1_t src, size_t vl) {
|
||||
|
@ -358,7 +358,7 @@ vuint64m1_t test_vfcvt_xu_f_v_u64m1(vfloat64m1_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u64m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv1i64.nxv1f64.i64(<vscale x 1 x double> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv1i64.nxv1f64.i64(<vscale x 1 x i64> undef, <vscale x 1 x double> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
|
||||
//
|
||||
vuint64m1_t test_vfcvt_rtz_xu_f_v_u64m1(vfloat64m1_t src, size_t vl) {
|
||||
|
@ -367,7 +367,7 @@ vuint64m1_t test_vfcvt_rtz_xu_f_v_u64m1(vfloat64m1_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u64m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vfcvt.xu.f.v.nxv2i64.nxv2f64.i64(<vscale x 2 x double> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vfcvt.xu.f.v.nxv2i64.nxv2f64.i64(<vscale x 2 x i64> undef, <vscale x 2 x double> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
|
||||
//
|
||||
vuint64m2_t test_vfcvt_xu_f_v_u64m2(vfloat64m2_t src, size_t vl) {
|
||||
|
@ -376,7 +376,7 @@ vuint64m2_t test_vfcvt_xu_f_v_u64m2(vfloat64m2_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u64m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv2i64.nxv2f64.i64(<vscale x 2 x double> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv2i64.nxv2f64.i64(<vscale x 2 x i64> undef, <vscale x 2 x double> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
|
||||
//
|
||||
vuint64m2_t test_vfcvt_rtz_xu_f_v_u64m2(vfloat64m2_t src, size_t vl) {
|
||||
|
@ -385,7 +385,7 @@ vuint64m2_t test_vfcvt_rtz_xu_f_v_u64m2(vfloat64m2_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u64m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vfcvt.xu.f.v.nxv4i64.nxv4f64.i64(<vscale x 4 x double> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vfcvt.xu.f.v.nxv4i64.nxv4f64.i64(<vscale x 4 x i64> undef, <vscale x 4 x double> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
|
||||
//
|
||||
vuint64m4_t test_vfcvt_xu_f_v_u64m4(vfloat64m4_t src, size_t vl) {
|
||||
|
@ -394,7 +394,7 @@ vuint64m4_t test_vfcvt_xu_f_v_u64m4(vfloat64m4_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u64m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv4i64.nxv4f64.i64(<vscale x 4 x double> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv4i64.nxv4f64.i64(<vscale x 4 x i64> undef, <vscale x 4 x double> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
|
||||
//
|
||||
vuint64m4_t test_vfcvt_rtz_xu_f_v_u64m4(vfloat64m4_t src, size_t vl) {
|
||||
|
@ -403,7 +403,7 @@ vuint64m4_t test_vfcvt_rtz_xu_f_v_u64m4(vfloat64m4_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u64m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vfcvt.xu.f.v.nxv8i64.nxv8f64.i64(<vscale x 8 x double> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vfcvt.xu.f.v.nxv8i64.nxv8f64.i64(<vscale x 8 x i64> undef, <vscale x 8 x double> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
|
||||
//
|
||||
vuint64m8_t test_vfcvt_xu_f_v_u64m8(vfloat64m8_t src, size_t vl) {
|
||||
|
@ -412,7 +412,7 @@ vuint64m8_t test_vfcvt_xu_f_v_u64m8(vfloat64m8_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u64m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv8i64.nxv8f64.i64(<vscale x 8 x double> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv8i64.nxv8f64.i64(<vscale x 8 x i64> undef, <vscale x 8 x double> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
|
||||
//
|
||||
vuint64m8_t test_vfcvt_rtz_xu_f_v_u64m8(vfloat64m8_t src, size_t vl) {
|
||||
|
@ -421,7 +421,7 @@ vuint64m8_t test_vfcvt_rtz_xu_f_v_u64m8(vfloat64m8_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f64m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfcvt.f.x.v.nxv1f64.nxv1i64.i64(<vscale x 1 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfcvt.f.x.v.nxv1f64.nxv1i64.i64(<vscale x 1 x double> undef, <vscale x 1 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m1_t test_vfcvt_f_x_v_f64m1(vint64m1_t src, size_t vl) {
|
||||
|
@ -430,7 +430,7 @@ vfloat64m1_t test_vfcvt_f_x_v_f64m1(vint64m1_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f64m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfcvt.f.x.v.nxv2f64.nxv2i64.i64(<vscale x 2 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfcvt.f.x.v.nxv2f64.nxv2i64.i64(<vscale x 2 x double> undef, <vscale x 2 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m2_t test_vfcvt_f_x_v_f64m2(vint64m2_t src, size_t vl) {
|
||||
|
@ -439,7 +439,7 @@ vfloat64m2_t test_vfcvt_f_x_v_f64m2(vint64m2_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f64m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfcvt.f.x.v.nxv4f64.nxv4i64.i64(<vscale x 4 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfcvt.f.x.v.nxv4f64.nxv4i64.i64(<vscale x 4 x double> undef, <vscale x 4 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m4_t test_vfcvt_f_x_v_f64m4(vint64m4_t src, size_t vl) {
|
||||
|
@ -448,7 +448,7 @@ vfloat64m4_t test_vfcvt_f_x_v_f64m4(vint64m4_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f64m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfcvt.f.x.v.nxv8f64.nxv8i64.i64(<vscale x 8 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfcvt.f.x.v.nxv8f64.nxv8i64.i64(<vscale x 8 x double> undef, <vscale x 8 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m8_t test_vfcvt_f_x_v_f64m8(vint64m8_t src, size_t vl) {
|
||||
|
@ -457,7 +457,7 @@ vfloat64m8_t test_vfcvt_f_x_v_f64m8(vint64m8_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f64m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfcvt.f.xu.v.nxv1f64.nxv1i64.i64(<vscale x 1 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfcvt.f.xu.v.nxv1f64.nxv1i64.i64(<vscale x 1 x double> undef, <vscale x 1 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m1_t test_vfcvt_f_xu_v_f64m1(vuint64m1_t src, size_t vl) {
|
||||
|
@ -466,7 +466,7 @@ vfloat64m1_t test_vfcvt_f_xu_v_f64m1(vuint64m1_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f64m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfcvt.f.xu.v.nxv2f64.nxv2i64.i64(<vscale x 2 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfcvt.f.xu.v.nxv2f64.nxv2i64.i64(<vscale x 2 x double> undef, <vscale x 2 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m2_t test_vfcvt_f_xu_v_f64m2(vuint64m2_t src, size_t vl) {
|
||||
|
@ -475,7 +475,7 @@ vfloat64m2_t test_vfcvt_f_xu_v_f64m2(vuint64m2_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f64m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfcvt.f.xu.v.nxv4f64.nxv4i64.i64(<vscale x 4 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfcvt.f.xu.v.nxv4f64.nxv4i64.i64(<vscale x 4 x double> undef, <vscale x 4 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m4_t test_vfcvt_f_xu_v_f64m4(vuint64m4_t src, size_t vl) {
|
||||
|
@ -484,7 +484,7 @@ vfloat64m4_t test_vfcvt_f_xu_v_f64m4(vuint64m4_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f64m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfcvt.f.xu.v.nxv8f64.nxv8i64.i64(<vscale x 8 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfcvt.f.xu.v.nxv8f64.nxv8i64.i64(<vscale x 8 x double> undef, <vscale x 8 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m8_t test_vfcvt_f_xu_v_f64m8(vuint64m8_t src, size_t vl) {
|
||||
|
|
|
@ -7,7 +7,7 @@
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i16mf4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vfncvt.x.f.w.nxv1i16.nxv1f32.i64(<vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vfncvt.x.f.w.nxv1i16.nxv1f32.i64(<vscale x 1 x i16> undef, <vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
|
||||
//
|
||||
vint16mf4_t test_vfncvt_x_f_w_i16mf4(vfloat32mf2_t src, size_t vl) {
|
||||
|
@ -16,7 +16,7 @@ vint16mf4_t test_vfncvt_x_f_w_i16mf4(vfloat32mf2_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16mf4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vfncvt.rtz.x.f.w.nxv1i16.nxv1f32.i64(<vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vfncvt.rtz.x.f.w.nxv1i16.nxv1f32.i64(<vscale x 1 x i16> undef, <vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
|
||||
//
|
||||
vint16mf4_t test_vfncvt_rtz_x_f_w_i16mf4(vfloat32mf2_t src, size_t vl) {
|
||||
|
@ -25,7 +25,7 @@ vint16mf4_t test_vfncvt_rtz_x_f_w_i16mf4(vfloat32mf2_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i16mf2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vfncvt.x.f.w.nxv2i16.nxv2f32.i64(<vscale x 2 x float> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vfncvt.x.f.w.nxv2i16.nxv2f32.i64(<vscale x 2 x i16> undef, <vscale x 2 x float> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
|
||||
//
|
||||
vint16mf2_t test_vfncvt_x_f_w_i16mf2(vfloat32m1_t src, size_t vl) {
|
||||
|
@ -34,7 +34,7 @@ vint16mf2_t test_vfncvt_x_f_w_i16mf2(vfloat32m1_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16mf2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vfncvt.rtz.x.f.w.nxv2i16.nxv2f32.i64(<vscale x 2 x float> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vfncvt.rtz.x.f.w.nxv2i16.nxv2f32.i64(<vscale x 2 x i16> undef, <vscale x 2 x float> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
|
||||
//
|
||||
vint16mf2_t test_vfncvt_rtz_x_f_w_i16mf2(vfloat32m1_t src, size_t vl) {
|
||||
|
@ -43,7 +43,7 @@ vint16mf2_t test_vfncvt_rtz_x_f_w_i16mf2(vfloat32m1_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i16m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vfncvt.x.f.w.nxv4i16.nxv4f32.i64(<vscale x 4 x float> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vfncvt.x.f.w.nxv4i16.nxv4f32.i64(<vscale x 4 x i16> undef, <vscale x 4 x float> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
|
||||
//
|
||||
vint16m1_t test_vfncvt_x_f_w_i16m1(vfloat32m2_t src, size_t vl) {
|
||||
|
@ -52,7 +52,7 @@ vint16m1_t test_vfncvt_x_f_w_i16m1(vfloat32m2_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vfncvt.rtz.x.f.w.nxv4i16.nxv4f32.i64(<vscale x 4 x float> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vfncvt.rtz.x.f.w.nxv4i16.nxv4f32.i64(<vscale x 4 x i16> undef, <vscale x 4 x float> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
|
||||
//
|
||||
vint16m1_t test_vfncvt_rtz_x_f_w_i16m1(vfloat32m2_t src, size_t vl) {
|
||||
|
@ -61,7 +61,7 @@ vint16m1_t test_vfncvt_rtz_x_f_w_i16m1(vfloat32m2_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i16m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vfncvt.x.f.w.nxv8i16.nxv8f32.i64(<vscale x 8 x float> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vfncvt.x.f.w.nxv8i16.nxv8f32.i64(<vscale x 8 x i16> undef, <vscale x 8 x float> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
|
||||
//
|
||||
vint16m2_t test_vfncvt_x_f_w_i16m2(vfloat32m4_t src, size_t vl) {
|
||||
|
@ -70,7 +70,7 @@ vint16m2_t test_vfncvt_x_f_w_i16m2(vfloat32m4_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vfncvt.rtz.x.f.w.nxv8i16.nxv8f32.i64(<vscale x 8 x float> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vfncvt.rtz.x.f.w.nxv8i16.nxv8f32.i64(<vscale x 8 x i16> undef, <vscale x 8 x float> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
|
||||
//
|
||||
vint16m2_t test_vfncvt_rtz_x_f_w_i16m2(vfloat32m4_t src, size_t vl) {
|
||||
|
@ -79,7 +79,7 @@ vint16m2_t test_vfncvt_rtz_x_f_w_i16m2(vfloat32m4_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i16m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vfncvt.x.f.w.nxv16i16.nxv16f32.i64(<vscale x 16 x float> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vfncvt.x.f.w.nxv16i16.nxv16f32.i64(<vscale x 16 x i16> undef, <vscale x 16 x float> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
|
||||
//
|
||||
vint16m4_t test_vfncvt_x_f_w_i16m4(vfloat32m8_t src, size_t vl) {
|
||||
|
@ -88,7 +88,7 @@ vint16m4_t test_vfncvt_x_f_w_i16m4(vfloat32m8_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vfncvt.rtz.x.f.w.nxv16i16.nxv16f32.i64(<vscale x 16 x float> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vfncvt.rtz.x.f.w.nxv16i16.nxv16f32.i64(<vscale x 16 x i16> undef, <vscale x 16 x float> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
|
||||
//
|
||||
vint16m4_t test_vfncvt_rtz_x_f_w_i16m4(vfloat32m8_t src, size_t vl) {
|
||||
|
@ -97,7 +97,7 @@ vint16m4_t test_vfncvt_rtz_x_f_w_i16m4(vfloat32m8_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u16mf4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vfncvt.xu.f.w.nxv1i16.nxv1f32.i64(<vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vfncvt.xu.f.w.nxv1i16.nxv1f32.i64(<vscale x 1 x i16> undef, <vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
|
||||
//
|
||||
vuint16mf4_t test_vfncvt_xu_f_w_u16mf4(vfloat32mf2_t src, size_t vl) {
|
||||
|
@ -106,7 +106,7 @@ vuint16mf4_t test_vfncvt_xu_f_w_u16mf4(vfloat32mf2_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16mf4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv1i16.nxv1f32.i64(<vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv1i16.nxv1f32.i64(<vscale x 1 x i16> undef, <vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
|
||||
//
|
||||
vuint16mf4_t test_vfncvt_rtz_xu_f_w_u16mf4(vfloat32mf2_t src, size_t vl) {
|
||||
|
@ -115,7 +115,7 @@ vuint16mf4_t test_vfncvt_rtz_xu_f_w_u16mf4(vfloat32mf2_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u16mf2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vfncvt.xu.f.w.nxv2i16.nxv2f32.i64(<vscale x 2 x float> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vfncvt.xu.f.w.nxv2i16.nxv2f32.i64(<vscale x 2 x i16> undef, <vscale x 2 x float> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
|
||||
//
|
||||
vuint16mf2_t test_vfncvt_xu_f_w_u16mf2(vfloat32m1_t src, size_t vl) {
|
||||
|
@ -124,7 +124,7 @@ vuint16mf2_t test_vfncvt_xu_f_w_u16mf2(vfloat32m1_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16mf2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv2i16.nxv2f32.i64(<vscale x 2 x float> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv2i16.nxv2f32.i64(<vscale x 2 x i16> undef, <vscale x 2 x float> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
|
||||
//
|
||||
vuint16mf2_t test_vfncvt_rtz_xu_f_w_u16mf2(vfloat32m1_t src, size_t vl) {
|
||||
|
@ -133,7 +133,7 @@ vuint16mf2_t test_vfncvt_rtz_xu_f_w_u16mf2(vfloat32m1_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u16m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vfncvt.xu.f.w.nxv4i16.nxv4f32.i64(<vscale x 4 x float> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vfncvt.xu.f.w.nxv4i16.nxv4f32.i64(<vscale x 4 x i16> undef, <vscale x 4 x float> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
|
||||
//
|
||||
vuint16m1_t test_vfncvt_xu_f_w_u16m1(vfloat32m2_t src, size_t vl) {
|
||||
|
@ -142,7 +142,7 @@ vuint16m1_t test_vfncvt_xu_f_w_u16m1(vfloat32m2_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv4i16.nxv4f32.i64(<vscale x 4 x float> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv4i16.nxv4f32.i64(<vscale x 4 x i16> undef, <vscale x 4 x float> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
|
||||
//
|
||||
vuint16m1_t test_vfncvt_rtz_xu_f_w_u16m1(vfloat32m2_t src, size_t vl) {
|
||||
|
@ -151,7 +151,7 @@ vuint16m1_t test_vfncvt_rtz_xu_f_w_u16m1(vfloat32m2_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u16m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vfncvt.xu.f.w.nxv8i16.nxv8f32.i64(<vscale x 8 x float> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vfncvt.xu.f.w.nxv8i16.nxv8f32.i64(<vscale x 8 x i16> undef, <vscale x 8 x float> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
|
||||
//
|
||||
vuint16m2_t test_vfncvt_xu_f_w_u16m2(vfloat32m4_t src, size_t vl) {
|
||||
|
@ -160,7 +160,7 @@ vuint16m2_t test_vfncvt_xu_f_w_u16m2(vfloat32m4_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv8i16.nxv8f32.i64(<vscale x 8 x float> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv8i16.nxv8f32.i64(<vscale x 8 x i16> undef, <vscale x 8 x float> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
|
||||
//
|
||||
vuint16m2_t test_vfncvt_rtz_xu_f_w_u16m2(vfloat32m4_t src, size_t vl) {
|
||||
|
@ -169,7 +169,7 @@ vuint16m2_t test_vfncvt_rtz_xu_f_w_u16m2(vfloat32m4_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u16m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vfncvt.xu.f.w.nxv16i16.nxv16f32.i64(<vscale x 16 x float> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vfncvt.xu.f.w.nxv16i16.nxv16f32.i64(<vscale x 16 x i16> undef, <vscale x 16 x float> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
|
||||
//
|
||||
vuint16m4_t test_vfncvt_xu_f_w_u16m4(vfloat32m8_t src, size_t vl) {
|
||||
|
@ -178,7 +178,7 @@ vuint16m4_t test_vfncvt_xu_f_w_u16m4(vfloat32m8_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv16i16.nxv16f32.i64(<vscale x 16 x float> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv16i16.nxv16f32.i64(<vscale x 16 x i16> undef, <vscale x 16 x float> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
|
||||
//
|
||||
vuint16m4_t test_vfncvt_rtz_xu_f_w_u16m4(vfloat32m8_t src, size_t vl) {
|
||||
|
@ -187,7 +187,7 @@ vuint16m4_t test_vfncvt_rtz_xu_f_w_u16m4(vfloat32m8_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i32mf2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfncvt.x.f.w.nxv1i32.nxv1f64.i64(<vscale x 1 x double> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfncvt.x.f.w.nxv1i32.nxv1f64.i64(<vscale x 1 x i32> undef, <vscale x 1 x double> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
|
||||
//
|
||||
vint32mf2_t test_vfncvt_x_f_w_i32mf2(vfloat64m1_t src, size_t vl) {
|
||||
|
@ -196,7 +196,7 @@ vint32mf2_t test_vfncvt_x_f_w_i32mf2(vfloat64m1_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i32mf2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfncvt.rtz.x.f.w.nxv1i32.nxv1f64.i64(<vscale x 1 x double> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfncvt.rtz.x.f.w.nxv1i32.nxv1f64.i64(<vscale x 1 x i32> undef, <vscale x 1 x double> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
|
||||
//
|
||||
vint32mf2_t test_vfncvt_rtz_x_f_w_i32mf2(vfloat64m1_t src, size_t vl) {
|
||||
|
@ -205,7 +205,7 @@ vint32mf2_t test_vfncvt_rtz_x_f_w_i32mf2(vfloat64m1_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i32m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vfncvt.x.f.w.nxv2i32.nxv2f64.i64(<vscale x 2 x double> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vfncvt.x.f.w.nxv2i32.nxv2f64.i64(<vscale x 2 x i32> undef, <vscale x 2 x double> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
|
||||
//
|
||||
vint32m1_t test_vfncvt_x_f_w_i32m1(vfloat64m2_t src, size_t vl) {
|
||||
|
@ -214,7 +214,7 @@ vint32m1_t test_vfncvt_x_f_w_i32m1(vfloat64m2_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i32m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vfncvt.rtz.x.f.w.nxv2i32.nxv2f64.i64(<vscale x 2 x double> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vfncvt.rtz.x.f.w.nxv2i32.nxv2f64.i64(<vscale x 2 x i32> undef, <vscale x 2 x double> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
|
||||
//
|
||||
vint32m1_t test_vfncvt_rtz_x_f_w_i32m1(vfloat64m2_t src, size_t vl) {
|
||||
|
@ -223,7 +223,7 @@ vint32m1_t test_vfncvt_rtz_x_f_w_i32m1(vfloat64m2_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i32m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vfncvt.x.f.w.nxv4i32.nxv4f64.i64(<vscale x 4 x double> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vfncvt.x.f.w.nxv4i32.nxv4f64.i64(<vscale x 4 x i32> undef, <vscale x 4 x double> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
|
||||
//
|
||||
vint32m2_t test_vfncvt_x_f_w_i32m2(vfloat64m4_t src, size_t vl) {
|
||||
|
@ -232,7 +232,7 @@ vint32m2_t test_vfncvt_x_f_w_i32m2(vfloat64m4_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i32m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vfncvt.rtz.x.f.w.nxv4i32.nxv4f64.i64(<vscale x 4 x double> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vfncvt.rtz.x.f.w.nxv4i32.nxv4f64.i64(<vscale x 4 x i32> undef, <vscale x 4 x double> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
|
||||
//
|
||||
vint32m2_t test_vfncvt_rtz_x_f_w_i32m2(vfloat64m4_t src, size_t vl) {
|
||||
|
@ -241,7 +241,7 @@ vint32m2_t test_vfncvt_rtz_x_f_w_i32m2(vfloat64m4_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i32m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vfncvt.x.f.w.nxv8i32.nxv8f64.i64(<vscale x 8 x double> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vfncvt.x.f.w.nxv8i32.nxv8f64.i64(<vscale x 8 x i32> undef, <vscale x 8 x double> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
|
||||
//
|
||||
vint32m4_t test_vfncvt_x_f_w_i32m4(vfloat64m8_t src, size_t vl) {
|
||||
|
@ -250,7 +250,7 @@ vint32m4_t test_vfncvt_x_f_w_i32m4(vfloat64m8_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i32m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vfncvt.rtz.x.f.w.nxv8i32.nxv8f64.i64(<vscale x 8 x double> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vfncvt.rtz.x.f.w.nxv8i32.nxv8f64.i64(<vscale x 8 x i32> undef, <vscale x 8 x double> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
|
||||
//
|
||||
vint32m4_t test_vfncvt_rtz_x_f_w_i32m4(vfloat64m8_t src, size_t vl) {
|
||||
|
@ -259,7 +259,7 @@ vint32m4_t test_vfncvt_rtz_x_f_w_i32m4(vfloat64m8_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u32mf2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfncvt.xu.f.w.nxv1i32.nxv1f64.i64(<vscale x 1 x double> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfncvt.xu.f.w.nxv1i32.nxv1f64.i64(<vscale x 1 x i32> undef, <vscale x 1 x double> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
|
||||
//
|
||||
vuint32mf2_t test_vfncvt_xu_f_w_u32mf2(vfloat64m1_t src, size_t vl) {
|
||||
|
@ -268,7 +268,7 @@ vuint32mf2_t test_vfncvt_xu_f_w_u32mf2(vfloat64m1_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u32mf2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv1i32.nxv1f64.i64(<vscale x 1 x double> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv1i32.nxv1f64.i64(<vscale x 1 x i32> undef, <vscale x 1 x double> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
|
||||
//
|
||||
vuint32mf2_t test_vfncvt_rtz_xu_f_w_u32mf2(vfloat64m1_t src, size_t vl) {
|
||||
|
@ -277,7 +277,7 @@ vuint32mf2_t test_vfncvt_rtz_xu_f_w_u32mf2(vfloat64m1_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u32m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vfncvt.xu.f.w.nxv2i32.nxv2f64.i64(<vscale x 2 x double> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vfncvt.xu.f.w.nxv2i32.nxv2f64.i64(<vscale x 2 x i32> undef, <vscale x 2 x double> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
|
||||
//
|
||||
vuint32m1_t test_vfncvt_xu_f_w_u32m1(vfloat64m2_t src, size_t vl) {
|
||||
|
@ -286,7 +286,7 @@ vuint32m1_t test_vfncvt_xu_f_w_u32m1(vfloat64m2_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u32m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv2i32.nxv2f64.i64(<vscale x 2 x double> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv2i32.nxv2f64.i64(<vscale x 2 x i32> undef, <vscale x 2 x double> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
|
||||
//
|
||||
vuint32m1_t test_vfncvt_rtz_xu_f_w_u32m1(vfloat64m2_t src, size_t vl) {
|
||||
|
@ -295,7 +295,7 @@ vuint32m1_t test_vfncvt_rtz_xu_f_w_u32m1(vfloat64m2_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u32m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vfncvt.xu.f.w.nxv4i32.nxv4f64.i64(<vscale x 4 x double> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vfncvt.xu.f.w.nxv4i32.nxv4f64.i64(<vscale x 4 x i32> undef, <vscale x 4 x double> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
|
||||
//
|
||||
vuint32m2_t test_vfncvt_xu_f_w_u32m2(vfloat64m4_t src, size_t vl) {
|
||||
|
@ -304,7 +304,7 @@ vuint32m2_t test_vfncvt_xu_f_w_u32m2(vfloat64m4_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u32m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv4i32.nxv4f64.i64(<vscale x 4 x double> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv4i32.nxv4f64.i64(<vscale x 4 x i32> undef, <vscale x 4 x double> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
|
||||
//
|
||||
vuint32m2_t test_vfncvt_rtz_xu_f_w_u32m2(vfloat64m4_t src, size_t vl) {
|
||||
|
@ -313,7 +313,7 @@ vuint32m2_t test_vfncvt_rtz_xu_f_w_u32m2(vfloat64m4_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u32m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vfncvt.xu.f.w.nxv8i32.nxv8f64.i64(<vscale x 8 x double> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vfncvt.xu.f.w.nxv8i32.nxv8f64.i64(<vscale x 8 x i32> undef, <vscale x 8 x double> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
|
||||
//
|
||||
vuint32m4_t test_vfncvt_xu_f_w_u32m4(vfloat64m8_t src, size_t vl) {
|
||||
|
@ -322,7 +322,7 @@ vuint32m4_t test_vfncvt_xu_f_w_u32m4(vfloat64m8_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u32m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv8i32.nxv8f64.i64(<vscale x 8 x double> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv8i32.nxv8f64.i64(<vscale x 8 x i32> undef, <vscale x 8 x double> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
|
||||
//
|
||||
vuint32m4_t test_vfncvt_rtz_xu_f_w_u32m4(vfloat64m8_t src, size_t vl) {
|
||||
|
@ -331,7 +331,7 @@ vuint32m4_t test_vfncvt_rtz_xu_f_w_u32m4(vfloat64m8_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f32mf2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfncvt.f.x.w.nxv1f32.nxv1i64.i64(<vscale x 1 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfncvt.f.x.w.nxv1f32.nxv1i64.i64(<vscale x 1 x float> undef, <vscale x 1 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32mf2_t test_vfncvt_f_x_w_f32mf2(vint64m1_t src, size_t vl) {
|
||||
|
@ -340,7 +340,7 @@ vfloat32mf2_t test_vfncvt_f_x_w_f32mf2(vint64m1_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f32m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfncvt.f.x.w.nxv2f32.nxv2i64.i64(<vscale x 2 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfncvt.f.x.w.nxv2f32.nxv2i64.i64(<vscale x 2 x float> undef, <vscale x 2 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m1_t test_vfncvt_f_x_w_f32m1(vint64m2_t src, size_t vl) {
|
||||
|
@ -349,7 +349,7 @@ vfloat32m1_t test_vfncvt_f_x_w_f32m1(vint64m2_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f32m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfncvt.f.x.w.nxv4f32.nxv4i64.i64(<vscale x 4 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfncvt.f.x.w.nxv4f32.nxv4i64.i64(<vscale x 4 x float> undef, <vscale x 4 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m2_t test_vfncvt_f_x_w_f32m2(vint64m4_t src, size_t vl) {
|
||||
|
@ -358,7 +358,7 @@ vfloat32m2_t test_vfncvt_f_x_w_f32m2(vint64m4_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f32m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfncvt.f.x.w.nxv8f32.nxv8i64.i64(<vscale x 8 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfncvt.f.x.w.nxv8f32.nxv8i64.i64(<vscale x 8 x float> undef, <vscale x 8 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m4_t test_vfncvt_f_x_w_f32m4(vint64m8_t src, size_t vl) {
|
||||
|
@ -367,7 +367,7 @@ vfloat32m4_t test_vfncvt_f_x_w_f32m4(vint64m8_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f32mf2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfncvt.f.xu.w.nxv1f32.nxv1i64.i64(<vscale x 1 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfncvt.f.xu.w.nxv1f32.nxv1i64.i64(<vscale x 1 x float> undef, <vscale x 1 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32mf2_t test_vfncvt_f_xu_w_f32mf2(vuint64m1_t src, size_t vl) {
|
||||
|
@ -376,7 +376,7 @@ vfloat32mf2_t test_vfncvt_f_xu_w_f32mf2(vuint64m1_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f32m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfncvt.f.xu.w.nxv2f32.nxv2i64.i64(<vscale x 2 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfncvt.f.xu.w.nxv2f32.nxv2i64.i64(<vscale x 2 x float> undef, <vscale x 2 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m1_t test_vfncvt_f_xu_w_f32m1(vuint64m2_t src, size_t vl) {
|
||||
|
@ -385,7 +385,7 @@ vfloat32m1_t test_vfncvt_f_xu_w_f32m1(vuint64m2_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f32m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfncvt.f.xu.w.nxv4f32.nxv4i64.i64(<vscale x 4 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfncvt.f.xu.w.nxv4f32.nxv4i64.i64(<vscale x 4 x float> undef, <vscale x 4 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m2_t test_vfncvt_f_xu_w_f32m2(vuint64m4_t src, size_t vl) {
|
||||
|
@ -394,7 +394,7 @@ vfloat32m2_t test_vfncvt_f_xu_w_f32m2(vuint64m4_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f32m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfncvt.f.xu.w.nxv8f32.nxv8i64.i64(<vscale x 8 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfncvt.f.xu.w.nxv8f32.nxv8i64.i64(<vscale x 8 x float> undef, <vscale x 8 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m4_t test_vfncvt_f_xu_w_f32m4(vuint64m8_t src, size_t vl) {
|
||||
|
@ -403,7 +403,7 @@ vfloat32m4_t test_vfncvt_f_xu_w_f32m4(vuint64m8_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f32mf2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfncvt.f.f.w.nxv1f32.nxv1f64.i64(<vscale x 1 x double> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfncvt.f.f.w.nxv1f32.nxv1f64.i64(<vscale x 1 x float> undef, <vscale x 1 x double> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32mf2_t test_vfncvt_f_f_w_f32mf2(vfloat64m1_t src, size_t vl) {
|
||||
|
@ -412,7 +412,7 @@ vfloat32mf2_t test_vfncvt_f_f_w_f32mf2(vfloat64m1_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f32mf2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfncvt.rod.f.f.w.nxv1f32.nxv1f64.i64(<vscale x 1 x double> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfncvt.rod.f.f.w.nxv1f32.nxv1f64.i64(<vscale x 1 x float> undef, <vscale x 1 x double> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32mf2_t test_vfncvt_rod_f_f_w_f32mf2(vfloat64m1_t src, size_t vl) {
|
||||
|
@ -421,7 +421,7 @@ vfloat32mf2_t test_vfncvt_rod_f_f_w_f32mf2(vfloat64m1_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f32m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfncvt.f.f.w.nxv2f32.nxv2f64.i64(<vscale x 2 x double> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfncvt.f.f.w.nxv2f32.nxv2f64.i64(<vscale x 2 x float> undef, <vscale x 2 x double> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m1_t test_vfncvt_f_f_w_f32m1(vfloat64m2_t src, size_t vl) {
|
||||
|
@ -430,7 +430,7 @@ vfloat32m1_t test_vfncvt_f_f_w_f32m1(vfloat64m2_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f32m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfncvt.rod.f.f.w.nxv2f32.nxv2f64.i64(<vscale x 2 x double> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfncvt.rod.f.f.w.nxv2f32.nxv2f64.i64(<vscale x 2 x float> undef, <vscale x 2 x double> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m1_t test_vfncvt_rod_f_f_w_f32m1(vfloat64m2_t src, size_t vl) {
|
||||
|
@ -439,7 +439,7 @@ vfloat32m1_t test_vfncvt_rod_f_f_w_f32m1(vfloat64m2_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f32m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfncvt.f.f.w.nxv4f32.nxv4f64.i64(<vscale x 4 x double> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfncvt.f.f.w.nxv4f32.nxv4f64.i64(<vscale x 4 x float> undef, <vscale x 4 x double> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m2_t test_vfncvt_f_f_w_f32m2(vfloat64m4_t src, size_t vl) {
|
||||
|
@ -448,7 +448,7 @@ vfloat32m2_t test_vfncvt_f_f_w_f32m2(vfloat64m4_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f32m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfncvt.rod.f.f.w.nxv4f32.nxv4f64.i64(<vscale x 4 x double> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfncvt.rod.f.f.w.nxv4f32.nxv4f64.i64(<vscale x 4 x float> undef, <vscale x 4 x double> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m2_t test_vfncvt_rod_f_f_w_f32m2(vfloat64m4_t src, size_t vl) {
|
||||
|
@ -457,7 +457,7 @@ vfloat32m2_t test_vfncvt_rod_f_f_w_f32m2(vfloat64m4_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f32m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfncvt.f.f.w.nxv8f32.nxv8f64.i64(<vscale x 8 x double> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfncvt.f.f.w.nxv8f32.nxv8f64.i64(<vscale x 8 x float> undef, <vscale x 8 x double> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m4_t test_vfncvt_f_f_w_f32m4(vfloat64m8_t src, size_t vl) {
|
||||
|
@ -466,7 +466,7 @@ vfloat32m4_t test_vfncvt_f_f_w_f32m4(vfloat64m8_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f32m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfncvt.rod.f.f.w.nxv8f32.nxv8f64.i64(<vscale x 8 x double> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfncvt.rod.f.f.w.nxv8f32.nxv8f64.i64(<vscale x 8 x float> undef, <vscale x 8 x double> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m4_t test_vfncvt_rod_f_f_w_f32m4(vfloat64m8_t src, size_t vl) {
|
||||
|
|
|
@ -7,7 +7,7 @@
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfrec7_v_f32mf2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfrec7.nxv1f32.i64(<vscale x 1 x float> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfrec7.nxv1f32.i64(<vscale x 1 x float> undef, <vscale x 1 x float> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32mf2_t test_vfrec7_v_f32mf2(vfloat32mf2_t op1, size_t vl) {
|
||||
|
@ -16,7 +16,7 @@ vfloat32mf2_t test_vfrec7_v_f32mf2(vfloat32mf2_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfrec7_v_f32m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfrec7.nxv2f32.i64(<vscale x 2 x float> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfrec7.nxv2f32.i64(<vscale x 2 x float> undef, <vscale x 2 x float> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m1_t test_vfrec7_v_f32m1(vfloat32m1_t op1, size_t vl) {
|
||||
|
@ -25,7 +25,7 @@ vfloat32m1_t test_vfrec7_v_f32m1(vfloat32m1_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfrec7_v_f32m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfrec7.nxv4f32.i64(<vscale x 4 x float> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfrec7.nxv4f32.i64(<vscale x 4 x float> undef, <vscale x 4 x float> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m2_t test_vfrec7_v_f32m2(vfloat32m2_t op1, size_t vl) {
|
||||
|
@ -34,7 +34,7 @@ vfloat32m2_t test_vfrec7_v_f32m2(vfloat32m2_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfrec7_v_f32m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfrec7.nxv8f32.i64(<vscale x 8 x float> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfrec7.nxv8f32.i64(<vscale x 8 x float> undef, <vscale x 8 x float> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m4_t test_vfrec7_v_f32m4(vfloat32m4_t op1, size_t vl) {
|
||||
|
@ -43,7 +43,7 @@ vfloat32m4_t test_vfrec7_v_f32m4(vfloat32m4_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfrec7_v_f32m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfrec7.nxv16f32.i64(<vscale x 16 x float> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfrec7.nxv16f32.i64(<vscale x 16 x float> undef, <vscale x 16 x float> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m8_t test_vfrec7_v_f32m8(vfloat32m8_t op1, size_t vl) {
|
||||
|
@ -52,7 +52,7 @@ vfloat32m8_t test_vfrec7_v_f32m8(vfloat32m8_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfrec7_v_f64m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfrec7.nxv1f64.i64(<vscale x 1 x double> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfrec7.nxv1f64.i64(<vscale x 1 x double> undef, <vscale x 1 x double> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m1_t test_vfrec7_v_f64m1(vfloat64m1_t op1, size_t vl) {
|
||||
|
@ -61,7 +61,7 @@ vfloat64m1_t test_vfrec7_v_f64m1(vfloat64m1_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfrec7_v_f64m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfrec7.nxv2f64.i64(<vscale x 2 x double> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfrec7.nxv2f64.i64(<vscale x 2 x double> undef, <vscale x 2 x double> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m2_t test_vfrec7_v_f64m2(vfloat64m2_t op1, size_t vl) {
|
||||
|
@ -70,7 +70,7 @@ vfloat64m2_t test_vfrec7_v_f64m2(vfloat64m2_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfrec7_v_f64m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfrec7.nxv4f64.i64(<vscale x 4 x double> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfrec7.nxv4f64.i64(<vscale x 4 x double> undef, <vscale x 4 x double> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m4_t test_vfrec7_v_f64m4(vfloat64m4_t op1, size_t vl) {
|
||||
|
@ -79,7 +79,7 @@ vfloat64m4_t test_vfrec7_v_f64m4(vfloat64m4_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfrec7_v_f64m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfrec7.nxv8f64.i64(<vscale x 8 x double> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfrec7.nxv8f64.i64(<vscale x 8 x double> undef, <vscale x 8 x double> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m8_t test_vfrec7_v_f64m8(vfloat64m8_t op1, size_t vl) {
|
||||
|
|
|
@ -7,7 +7,7 @@
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f32mf2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfrsqrt7.nxv1f32.i64(<vscale x 1 x float> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfrsqrt7.nxv1f32.i64(<vscale x 1 x float> undef, <vscale x 1 x float> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32mf2_t test_vfrsqrt7_v_f32mf2(vfloat32mf2_t op1, size_t vl) {
|
||||
|
@ -16,7 +16,7 @@ vfloat32mf2_t test_vfrsqrt7_v_f32mf2(vfloat32mf2_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f32m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfrsqrt7.nxv2f32.i64(<vscale x 2 x float> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfrsqrt7.nxv2f32.i64(<vscale x 2 x float> undef, <vscale x 2 x float> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m1_t test_vfrsqrt7_v_f32m1(vfloat32m1_t op1, size_t vl) {
|
||||
|
@ -25,7 +25,7 @@ vfloat32m1_t test_vfrsqrt7_v_f32m1(vfloat32m1_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f32m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfrsqrt7.nxv4f32.i64(<vscale x 4 x float> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfrsqrt7.nxv4f32.i64(<vscale x 4 x float> undef, <vscale x 4 x float> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m2_t test_vfrsqrt7_v_f32m2(vfloat32m2_t op1, size_t vl) {
|
||||
|
@ -34,7 +34,7 @@ vfloat32m2_t test_vfrsqrt7_v_f32m2(vfloat32m2_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f32m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfrsqrt7.nxv8f32.i64(<vscale x 8 x float> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfrsqrt7.nxv8f32.i64(<vscale x 8 x float> undef, <vscale x 8 x float> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m4_t test_vfrsqrt7_v_f32m4(vfloat32m4_t op1, size_t vl) {
|
||||
|
@ -43,7 +43,7 @@ vfloat32m4_t test_vfrsqrt7_v_f32m4(vfloat32m4_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f32m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfrsqrt7.nxv16f32.i64(<vscale x 16 x float> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfrsqrt7.nxv16f32.i64(<vscale x 16 x float> undef, <vscale x 16 x float> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m8_t test_vfrsqrt7_v_f32m8(vfloat32m8_t op1, size_t vl) {
|
||||
|
@ -52,7 +52,7 @@ vfloat32m8_t test_vfrsqrt7_v_f32m8(vfloat32m8_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f64m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfrsqrt7.nxv1f64.i64(<vscale x 1 x double> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfrsqrt7.nxv1f64.i64(<vscale x 1 x double> undef, <vscale x 1 x double> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m1_t test_vfrsqrt7_v_f64m1(vfloat64m1_t op1, size_t vl) {
|
||||
|
@ -61,7 +61,7 @@ vfloat64m1_t test_vfrsqrt7_v_f64m1(vfloat64m1_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f64m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfrsqrt7.nxv2f64.i64(<vscale x 2 x double> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfrsqrt7.nxv2f64.i64(<vscale x 2 x double> undef, <vscale x 2 x double> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m2_t test_vfrsqrt7_v_f64m2(vfloat64m2_t op1, size_t vl) {
|
||||
|
@ -70,7 +70,7 @@ vfloat64m2_t test_vfrsqrt7_v_f64m2(vfloat64m2_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f64m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfrsqrt7.nxv4f64.i64(<vscale x 4 x double> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfrsqrt7.nxv4f64.i64(<vscale x 4 x double> undef, <vscale x 4 x double> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m4_t test_vfrsqrt7_v_f64m4(vfloat64m4_t op1, size_t vl) {
|
||||
|
@ -79,7 +79,7 @@ vfloat64m4_t test_vfrsqrt7_v_f64m4(vfloat64m4_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f64m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfrsqrt7.nxv8f64.i64(<vscale x 8 x double> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfrsqrt7.nxv8f64.i64(<vscale x 8 x double> undef, <vscale x 8 x double> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m8_t test_vfrsqrt7_v_f64m8(vfloat64m8_t op1, size_t vl) {
|
||||
|
|
|
@ -7,7 +7,7 @@
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfsqrt_v_f32mf2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsqrt.nxv1f32.i64(<vscale x 1 x float> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsqrt.nxv1f32.i64(<vscale x 1 x float> undef, <vscale x 1 x float> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32mf2_t test_vfsqrt_v_f32mf2(vfloat32mf2_t op1, size_t vl) {
|
||||
|
@ -16,7 +16,7 @@ vfloat32mf2_t test_vfsqrt_v_f32mf2(vfloat32mf2_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfsqrt_v_f32m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfsqrt.nxv2f32.i64(<vscale x 2 x float> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfsqrt.nxv2f32.i64(<vscale x 2 x float> undef, <vscale x 2 x float> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m1_t test_vfsqrt_v_f32m1(vfloat32m1_t op1, size_t vl) {
|
||||
|
@ -25,7 +25,7 @@ vfloat32m1_t test_vfsqrt_v_f32m1(vfloat32m1_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfsqrt_v_f32m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfsqrt.nxv4f32.i64(<vscale x 4 x float> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfsqrt.nxv4f32.i64(<vscale x 4 x float> undef, <vscale x 4 x float> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m2_t test_vfsqrt_v_f32m2(vfloat32m2_t op1, size_t vl) {
|
||||
|
@ -34,7 +34,7 @@ vfloat32m2_t test_vfsqrt_v_f32m2(vfloat32m2_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfsqrt_v_f32m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfsqrt.nxv8f32.i64(<vscale x 8 x float> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfsqrt.nxv8f32.i64(<vscale x 8 x float> undef, <vscale x 8 x float> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m4_t test_vfsqrt_v_f32m4(vfloat32m4_t op1, size_t vl) {
|
||||
|
@ -43,7 +43,7 @@ vfloat32m4_t test_vfsqrt_v_f32m4(vfloat32m4_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfsqrt_v_f32m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfsqrt.nxv16f32.i64(<vscale x 16 x float> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfsqrt.nxv16f32.i64(<vscale x 16 x float> undef, <vscale x 16 x float> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m8_t test_vfsqrt_v_f32m8(vfloat32m8_t op1, size_t vl) {
|
||||
|
@ -52,7 +52,7 @@ vfloat32m8_t test_vfsqrt_v_f32m8(vfloat32m8_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfsqrt_v_f64m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfsqrt.nxv1f64.i64(<vscale x 1 x double> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfsqrt.nxv1f64.i64(<vscale x 1 x double> undef, <vscale x 1 x double> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m1_t test_vfsqrt_v_f64m1(vfloat64m1_t op1, size_t vl) {
|
||||
|
@ -61,7 +61,7 @@ vfloat64m1_t test_vfsqrt_v_f64m1(vfloat64m1_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfsqrt_v_f64m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfsqrt.nxv2f64.i64(<vscale x 2 x double> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfsqrt.nxv2f64.i64(<vscale x 2 x double> undef, <vscale x 2 x double> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m2_t test_vfsqrt_v_f64m2(vfloat64m2_t op1, size_t vl) {
|
||||
|
@ -70,7 +70,7 @@ vfloat64m2_t test_vfsqrt_v_f64m2(vfloat64m2_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfsqrt_v_f64m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfsqrt.nxv4f64.i64(<vscale x 4 x double> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfsqrt.nxv4f64.i64(<vscale x 4 x double> undef, <vscale x 4 x double> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m4_t test_vfsqrt_v_f64m4(vfloat64m4_t op1, size_t vl) {
|
||||
|
@ -79,7 +79,7 @@ vfloat64m4_t test_vfsqrt_v_f64m4(vfloat64m4_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfsqrt_v_f64m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfsqrt.nxv8f64.i64(<vscale x 8 x double> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfsqrt.nxv8f64.i64(<vscale x 8 x double> undef, <vscale x 8 x double> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m8_t test_vfsqrt_v_f64m8(vfloat64m8_t op1, size_t vl) {
|
||||
|
|
|
@ -7,7 +7,7 @@
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f32mf2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwcvt.f.x.v.nxv1f32.nxv1i16.i64(<vscale x 1 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwcvt.f.x.v.nxv1f32.nxv1i16.i64(<vscale x 1 x float> undef, <vscale x 1 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32mf2_t test_vfwcvt_f_x_v_f32mf2(vint16mf4_t src, size_t vl) {
|
||||
|
@ -16,7 +16,7 @@ vfloat32mf2_t test_vfwcvt_f_x_v_f32mf2(vint16mf4_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f32m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwcvt.f.x.v.nxv2f32.nxv2i16.i64(<vscale x 2 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwcvt.f.x.v.nxv2f32.nxv2i16.i64(<vscale x 2 x float> undef, <vscale x 2 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m1_t test_vfwcvt_f_x_v_f32m1(vint16mf2_t src, size_t vl) {
|
||||
|
@ -25,7 +25,7 @@ vfloat32m1_t test_vfwcvt_f_x_v_f32m1(vint16mf2_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f32m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwcvt.f.x.v.nxv4f32.nxv4i16.i64(<vscale x 4 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwcvt.f.x.v.nxv4f32.nxv4i16.i64(<vscale x 4 x float> undef, <vscale x 4 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m2_t test_vfwcvt_f_x_v_f32m2(vint16m1_t src, size_t vl) {
|
||||
|
@ -34,7 +34,7 @@ vfloat32m2_t test_vfwcvt_f_x_v_f32m2(vint16m1_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f32m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwcvt.f.x.v.nxv8f32.nxv8i16.i64(<vscale x 8 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwcvt.f.x.v.nxv8f32.nxv8i16.i64(<vscale x 8 x float> undef, <vscale x 8 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m4_t test_vfwcvt_f_x_v_f32m4(vint16m2_t src, size_t vl) {
|
||||
|
@ -43,7 +43,7 @@ vfloat32m4_t test_vfwcvt_f_x_v_f32m4(vint16m2_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f32m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwcvt.f.x.v.nxv16f32.nxv16i16.i64(<vscale x 16 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwcvt.f.x.v.nxv16f32.nxv16i16.i64(<vscale x 16 x float> undef, <vscale x 16 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m8_t test_vfwcvt_f_x_v_f32m8(vint16m4_t src, size_t vl) {
|
||||
|
@ -52,7 +52,7 @@ vfloat32m8_t test_vfwcvt_f_x_v_f32m8(vint16m4_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f32mf2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwcvt.f.xu.v.nxv1f32.nxv1i16.i64(<vscale x 1 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwcvt.f.xu.v.nxv1f32.nxv1i16.i64(<vscale x 1 x float> undef, <vscale x 1 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32mf2_t test_vfwcvt_f_xu_v_f32mf2(vuint16mf4_t src, size_t vl) {
|
||||
|
@ -61,7 +61,7 @@ vfloat32mf2_t test_vfwcvt_f_xu_v_f32mf2(vuint16mf4_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f32m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwcvt.f.xu.v.nxv2f32.nxv2i16.i64(<vscale x 2 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwcvt.f.xu.v.nxv2f32.nxv2i16.i64(<vscale x 2 x float> undef, <vscale x 2 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m1_t test_vfwcvt_f_xu_v_f32m1(vuint16mf2_t src, size_t vl) {
|
||||
|
@ -70,7 +70,7 @@ vfloat32m1_t test_vfwcvt_f_xu_v_f32m1(vuint16mf2_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f32m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwcvt.f.xu.v.nxv4f32.nxv4i16.i64(<vscale x 4 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwcvt.f.xu.v.nxv4f32.nxv4i16.i64(<vscale x 4 x float> undef, <vscale x 4 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m2_t test_vfwcvt_f_xu_v_f32m2(vuint16m1_t src, size_t vl) {
|
||||
|
@ -79,7 +79,7 @@ vfloat32m2_t test_vfwcvt_f_xu_v_f32m2(vuint16m1_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f32m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwcvt.f.xu.v.nxv8f32.nxv8i16.i64(<vscale x 8 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwcvt.f.xu.v.nxv8f32.nxv8i16.i64(<vscale x 8 x float> undef, <vscale x 8 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m4_t test_vfwcvt_f_xu_v_f32m4(vuint16m2_t src, size_t vl) {
|
||||
|
@ -88,7 +88,7 @@ vfloat32m4_t test_vfwcvt_f_xu_v_f32m4(vuint16m2_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f32m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwcvt.f.xu.v.nxv16f32.nxv16i16.i64(<vscale x 16 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwcvt.f.xu.v.nxv16f32.nxv16i16.i64(<vscale x 16 x float> undef, <vscale x 16 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m8_t test_vfwcvt_f_xu_v_f32m8(vuint16m4_t src, size_t vl) {
|
||||
|
@ -97,7 +97,7 @@ vfloat32m8_t test_vfwcvt_f_xu_v_f32m8(vuint16m4_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i64m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vfwcvt.x.f.v.nxv1i64.nxv1f32.i64(<vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vfwcvt.x.f.v.nxv1i64.nxv1f32.i64(<vscale x 1 x i64> undef, <vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
|
||||
//
|
||||
vint64m1_t test_vfwcvt_x_f_v_i64m1(vfloat32mf2_t src, size_t vl) {
|
||||
|
@ -106,7 +106,7 @@ vint64m1_t test_vfwcvt_x_f_v_i64m1(vfloat32mf2_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i64m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vfwcvt.rtz.x.f.v.nxv1i64.nxv1f32.i64(<vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vfwcvt.rtz.x.f.v.nxv1i64.nxv1f32.i64(<vscale x 1 x i64> undef, <vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
|
||||
//
|
||||
vint64m1_t test_vfwcvt_rtz_x_f_v_i64m1(vfloat32mf2_t src, size_t vl) {
|
||||
|
@ -115,7 +115,7 @@ vint64m1_t test_vfwcvt_rtz_x_f_v_i64m1(vfloat32mf2_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i64m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vfwcvt.x.f.v.nxv2i64.nxv2f32.i64(<vscale x 2 x float> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vfwcvt.x.f.v.nxv2i64.nxv2f32.i64(<vscale x 2 x i64> undef, <vscale x 2 x float> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
|
||||
//
|
||||
vint64m2_t test_vfwcvt_x_f_v_i64m2(vfloat32m1_t src, size_t vl) {
|
||||
|
@ -124,7 +124,7 @@ vint64m2_t test_vfwcvt_x_f_v_i64m2(vfloat32m1_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i64m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vfwcvt.rtz.x.f.v.nxv2i64.nxv2f32.i64(<vscale x 2 x float> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vfwcvt.rtz.x.f.v.nxv2i64.nxv2f32.i64(<vscale x 2 x i64> undef, <vscale x 2 x float> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
|
||||
//
|
||||
vint64m2_t test_vfwcvt_rtz_x_f_v_i64m2(vfloat32m1_t src, size_t vl) {
|
||||
|
@ -133,7 +133,7 @@ vint64m2_t test_vfwcvt_rtz_x_f_v_i64m2(vfloat32m1_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i64m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vfwcvt.x.f.v.nxv4i64.nxv4f32.i64(<vscale x 4 x float> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vfwcvt.x.f.v.nxv4i64.nxv4f32.i64(<vscale x 4 x i64> undef, <vscale x 4 x float> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
|
||||
//
|
||||
vint64m4_t test_vfwcvt_x_f_v_i64m4(vfloat32m2_t src, size_t vl) {
|
||||
|
@ -142,7 +142,7 @@ vint64m4_t test_vfwcvt_x_f_v_i64m4(vfloat32m2_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i64m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vfwcvt.rtz.x.f.v.nxv4i64.nxv4f32.i64(<vscale x 4 x float> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vfwcvt.rtz.x.f.v.nxv4i64.nxv4f32.i64(<vscale x 4 x i64> undef, <vscale x 4 x float> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
|
||||
//
|
||||
vint64m4_t test_vfwcvt_rtz_x_f_v_i64m4(vfloat32m2_t src, size_t vl) {
|
||||
|
@ -151,7 +151,7 @@ vint64m4_t test_vfwcvt_rtz_x_f_v_i64m4(vfloat32m2_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i64m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vfwcvt.x.f.v.nxv8i64.nxv8f32.i64(<vscale x 8 x float> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vfwcvt.x.f.v.nxv8i64.nxv8f32.i64(<vscale x 8 x i64> undef, <vscale x 8 x float> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
|
||||
//
|
||||
vint64m8_t test_vfwcvt_x_f_v_i64m8(vfloat32m4_t src, size_t vl) {
|
||||
|
@ -160,7 +160,7 @@ vint64m8_t test_vfwcvt_x_f_v_i64m8(vfloat32m4_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i64m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vfwcvt.rtz.x.f.v.nxv8i64.nxv8f32.i64(<vscale x 8 x float> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vfwcvt.rtz.x.f.v.nxv8i64.nxv8f32.i64(<vscale x 8 x i64> undef, <vscale x 8 x float> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
|
||||
//
|
||||
vint64m8_t test_vfwcvt_rtz_x_f_v_i64m8(vfloat32m4_t src, size_t vl) {
|
||||
|
@ -169,7 +169,7 @@ vint64m8_t test_vfwcvt_rtz_x_f_v_i64m8(vfloat32m4_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u64m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vfwcvt.xu.f.v.nxv1i64.nxv1f32.i64(<vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vfwcvt.xu.f.v.nxv1i64.nxv1f32.i64(<vscale x 1 x i64> undef, <vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
|
||||
//
|
||||
vuint64m1_t test_vfwcvt_xu_f_v_u64m1(vfloat32mf2_t src, size_t vl) {
|
||||
|
@ -178,7 +178,7 @@ vuint64m1_t test_vfwcvt_xu_f_v_u64m1(vfloat32mf2_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv1i64.nxv1f32.i64(<vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv1i64.nxv1f32.i64(<vscale x 1 x i64> undef, <vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
|
||||
//
|
||||
vuint64m1_t test_vfwcvt_rtz_xu_f_v_u64m1(vfloat32mf2_t src, size_t vl) {
|
||||
|
@ -187,7 +187,7 @@ vuint64m1_t test_vfwcvt_rtz_xu_f_v_u64m1(vfloat32mf2_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u64m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vfwcvt.xu.f.v.nxv2i64.nxv2f32.i64(<vscale x 2 x float> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vfwcvt.xu.f.v.nxv2i64.nxv2f32.i64(<vscale x 2 x i64> undef, <vscale x 2 x float> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
|
||||
//
|
||||
vuint64m2_t test_vfwcvt_xu_f_v_u64m2(vfloat32m1_t src, size_t vl) {
|
||||
|
@ -196,7 +196,7 @@ vuint64m2_t test_vfwcvt_xu_f_v_u64m2(vfloat32m1_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv2i64.nxv2f32.i64(<vscale x 2 x float> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv2i64.nxv2f32.i64(<vscale x 2 x i64> undef, <vscale x 2 x float> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
|
||||
//
|
||||
vuint64m2_t test_vfwcvt_rtz_xu_f_v_u64m2(vfloat32m1_t src, size_t vl) {
|
||||
|
@ -205,7 +205,7 @@ vuint64m2_t test_vfwcvt_rtz_xu_f_v_u64m2(vfloat32m1_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u64m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vfwcvt.xu.f.v.nxv4i64.nxv4f32.i64(<vscale x 4 x float> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vfwcvt.xu.f.v.nxv4i64.nxv4f32.i64(<vscale x 4 x i64> undef, <vscale x 4 x float> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
|
||||
//
|
||||
vuint64m4_t test_vfwcvt_xu_f_v_u64m4(vfloat32m2_t src, size_t vl) {
|
||||
|
@ -214,7 +214,7 @@ vuint64m4_t test_vfwcvt_xu_f_v_u64m4(vfloat32m2_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv4i64.nxv4f32.i64(<vscale x 4 x float> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv4i64.nxv4f32.i64(<vscale x 4 x i64> undef, <vscale x 4 x float> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
|
||||
//
|
||||
vuint64m4_t test_vfwcvt_rtz_xu_f_v_u64m4(vfloat32m2_t src, size_t vl) {
|
||||
|
@ -223,7 +223,7 @@ vuint64m4_t test_vfwcvt_rtz_xu_f_v_u64m4(vfloat32m2_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u64m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vfwcvt.xu.f.v.nxv8i64.nxv8f32.i64(<vscale x 8 x float> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vfwcvt.xu.f.v.nxv8i64.nxv8f32.i64(<vscale x 8 x i64> undef, <vscale x 8 x float> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
|
||||
//
|
||||
vuint64m8_t test_vfwcvt_xu_f_v_u64m8(vfloat32m4_t src, size_t vl) {
|
||||
|
@ -232,7 +232,7 @@ vuint64m8_t test_vfwcvt_xu_f_v_u64m8(vfloat32m4_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv8i64.nxv8f32.i64(<vscale x 8 x float> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv8i64.nxv8f32.i64(<vscale x 8 x i64> undef, <vscale x 8 x float> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
|
||||
//
|
||||
vuint64m8_t test_vfwcvt_rtz_xu_f_v_u64m8(vfloat32m4_t src, size_t vl) {
|
||||
|
@ -241,7 +241,7 @@ vuint64m8_t test_vfwcvt_rtz_xu_f_v_u64m8(vfloat32m4_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f64m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwcvt.f.x.v.nxv1f64.nxv1i32.i64(<vscale x 1 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwcvt.f.x.v.nxv1f64.nxv1i32.i64(<vscale x 1 x double> undef, <vscale x 1 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m1_t test_vfwcvt_f_x_v_f64m1(vint32mf2_t src, size_t vl) {
|
||||
|
@ -250,7 +250,7 @@ vfloat64m1_t test_vfwcvt_f_x_v_f64m1(vint32mf2_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f64m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwcvt.f.x.v.nxv2f64.nxv2i32.i64(<vscale x 2 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwcvt.f.x.v.nxv2f64.nxv2i32.i64(<vscale x 2 x double> undef, <vscale x 2 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m2_t test_vfwcvt_f_x_v_f64m2(vint32m1_t src, size_t vl) {
|
||||
|
@ -259,7 +259,7 @@ vfloat64m2_t test_vfwcvt_f_x_v_f64m2(vint32m1_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f64m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwcvt.f.x.v.nxv4f64.nxv4i32.i64(<vscale x 4 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwcvt.f.x.v.nxv4f64.nxv4i32.i64(<vscale x 4 x double> undef, <vscale x 4 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m4_t test_vfwcvt_f_x_v_f64m4(vint32m2_t src, size_t vl) {
|
||||
|
@ -268,7 +268,7 @@ vfloat64m4_t test_vfwcvt_f_x_v_f64m4(vint32m2_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f64m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwcvt.f.x.v.nxv8f64.nxv8i32.i64(<vscale x 8 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwcvt.f.x.v.nxv8f64.nxv8i32.i64(<vscale x 8 x double> undef, <vscale x 8 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m8_t test_vfwcvt_f_x_v_f64m8(vint32m4_t src, size_t vl) {
|
||||
|
@ -277,7 +277,7 @@ vfloat64m8_t test_vfwcvt_f_x_v_f64m8(vint32m4_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f64m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwcvt.f.xu.v.nxv1f64.nxv1i32.i64(<vscale x 1 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwcvt.f.xu.v.nxv1f64.nxv1i32.i64(<vscale x 1 x double> undef, <vscale x 1 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m1_t test_vfwcvt_f_xu_v_f64m1(vuint32mf2_t src, size_t vl) {
|
||||
|
@ -286,7 +286,7 @@ vfloat64m1_t test_vfwcvt_f_xu_v_f64m1(vuint32mf2_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f64m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwcvt.f.xu.v.nxv2f64.nxv2i32.i64(<vscale x 2 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwcvt.f.xu.v.nxv2f64.nxv2i32.i64(<vscale x 2 x double> undef, <vscale x 2 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m2_t test_vfwcvt_f_xu_v_f64m2(vuint32m1_t src, size_t vl) {
|
||||
|
@ -295,7 +295,7 @@ vfloat64m2_t test_vfwcvt_f_xu_v_f64m2(vuint32m1_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f64m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwcvt.f.xu.v.nxv4f64.nxv4i32.i64(<vscale x 4 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwcvt.f.xu.v.nxv4f64.nxv4i32.i64(<vscale x 4 x double> undef, <vscale x 4 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m4_t test_vfwcvt_f_xu_v_f64m4(vuint32m2_t src, size_t vl) {
|
||||
|
@ -304,7 +304,7 @@ vfloat64m4_t test_vfwcvt_f_xu_v_f64m4(vuint32m2_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f64m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwcvt.f.xu.v.nxv8f64.nxv8i32.i64(<vscale x 8 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwcvt.f.xu.v.nxv8f64.nxv8i32.i64(<vscale x 8 x double> undef, <vscale x 8 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m8_t test_vfwcvt_f_xu_v_f64m8(vuint32m4_t src, size_t vl) {
|
||||
|
@ -313,7 +313,7 @@ vfloat64m8_t test_vfwcvt_f_xu_v_f64m8(vuint32m4_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f64m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwcvt.f.f.v.nxv1f64.nxv1f32.i64(<vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwcvt.f.f.v.nxv1f64.nxv1f32.i64(<vscale x 1 x double> undef, <vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m1_t test_vfwcvt_f_f_v_f64m1(vfloat32mf2_t src, size_t vl) {
|
||||
|
@ -322,7 +322,7 @@ vfloat64m1_t test_vfwcvt_f_f_v_f64m1(vfloat32mf2_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f64m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwcvt.f.f.v.nxv2f64.nxv2f32.i64(<vscale x 2 x float> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwcvt.f.f.v.nxv2f64.nxv2f32.i64(<vscale x 2 x double> undef, <vscale x 2 x float> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m2_t test_vfwcvt_f_f_v_f64m2(vfloat32m1_t src, size_t vl) {
|
||||
|
@ -331,7 +331,7 @@ vfloat64m2_t test_vfwcvt_f_f_v_f64m2(vfloat32m1_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f64m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwcvt.f.f.v.nxv4f64.nxv4f32.i64(<vscale x 4 x float> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwcvt.f.f.v.nxv4f64.nxv4f32.i64(<vscale x 4 x double> undef, <vscale x 4 x float> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m4_t test_vfwcvt_f_f_v_f64m4(vfloat32m2_t src, size_t vl) {
|
||||
|
@ -340,7 +340,7 @@ vfloat64m4_t test_vfwcvt_f_f_v_f64m4(vfloat32m2_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f64m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwcvt.f.f.v.nxv8f64.nxv8f32.i64(<vscale x 8 x float> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwcvt.f.f.v.nxv8f64.nxv8f32.i64(<vscale x 8 x double> undef, <vscale x 8 x float> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m8_t test_vfwcvt_f_f_v_f64m8(vfloat32m4_t src, size_t vl) {
|
||||
|
|
|
@ -6,7 +6,7 @@
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vsext_vf2_i16mf4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsext.nxv1i16.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsext.nxv1i16.nxv1i8.i64(<vscale x 1 x i16> undef, <vscale x 1 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
|
||||
//
|
||||
vint16mf4_t test_vsext_vf2_i16mf4(vint8mf8_t op1, size_t vl) {
|
||||
|
@ -15,7 +15,7 @@ vint16mf4_t test_vsext_vf2_i16mf4(vint8mf8_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vsext_vf2_i16mf2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsext.nxv2i16.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsext.nxv2i16.nxv2i8.i64(<vscale x 2 x i16> undef, <vscale x 2 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
|
||||
//
|
||||
vint16mf2_t test_vsext_vf2_i16mf2(vint8mf4_t op1, size_t vl) {
|
||||
|
@ -24,7 +24,7 @@ vint16mf2_t test_vsext_vf2_i16mf2(vint8mf4_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vsext_vf2_i16m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsext.nxv4i16.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsext.nxv4i16.nxv4i8.i64(<vscale x 4 x i16> undef, <vscale x 4 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
|
||||
//
|
||||
vint16m1_t test_vsext_vf2_i16m1(vint8mf2_t op1, size_t vl) {
|
||||
|
@ -33,7 +33,7 @@ vint16m1_t test_vsext_vf2_i16m1(vint8mf2_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vsext_vf2_i16m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsext.nxv8i16.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsext.nxv8i16.nxv8i8.i64(<vscale x 8 x i16> undef, <vscale x 8 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
|
||||
//
|
||||
vint16m2_t test_vsext_vf2_i16m2(vint8m1_t op1, size_t vl) {
|
||||
|
@ -42,7 +42,7 @@ vint16m2_t test_vsext_vf2_i16m2(vint8m1_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vsext_vf2_i16m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsext.nxv16i16.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsext.nxv16i16.nxv16i8.i64(<vscale x 16 x i16> undef, <vscale x 16 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
|
||||
//
|
||||
vint16m4_t test_vsext_vf2_i16m4(vint8m2_t op1, size_t vl) {
|
||||
|
@ -51,7 +51,7 @@ vint16m4_t test_vsext_vf2_i16m4(vint8m2_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vsext_vf2_i16m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsext.nxv32i16.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsext.nxv32i16.nxv32i8.i64(<vscale x 32 x i16> undef, <vscale x 32 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
|
||||
//
|
||||
vint16m8_t test_vsext_vf2_i16m8(vint8m4_t op1, size_t vl) {
|
||||
|
@ -60,7 +60,7 @@ vint16m8_t test_vsext_vf2_i16m8(vint8m4_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vsext_vf4_i32mf2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsext.nxv1i32.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsext.nxv1i32.nxv1i8.i64(<vscale x 1 x i32> undef, <vscale x 1 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
|
||||
//
|
||||
vint32mf2_t test_vsext_vf4_i32mf2(vint8mf8_t op1, size_t vl) {
|
||||
|
@ -69,7 +69,7 @@ vint32mf2_t test_vsext_vf4_i32mf2(vint8mf8_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vsext_vf4_i32m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsext.nxv2i32.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsext.nxv2i32.nxv2i8.i64(<vscale x 2 x i32> undef, <vscale x 2 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
|
||||
//
|
||||
vint32m1_t test_vsext_vf4_i32m1(vint8mf4_t op1, size_t vl) {
|
||||
|
@ -78,7 +78,7 @@ vint32m1_t test_vsext_vf4_i32m1(vint8mf4_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vsext_vf4_i32m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsext.nxv4i32.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsext.nxv4i32.nxv4i8.i64(<vscale x 4 x i32> undef, <vscale x 4 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
|
||||
//
|
||||
vint32m2_t test_vsext_vf4_i32m2(vint8mf2_t op1, size_t vl) {
|
||||
|
@ -87,7 +87,7 @@ vint32m2_t test_vsext_vf4_i32m2(vint8mf2_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vsext_vf4_i32m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsext.nxv8i32.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsext.nxv8i32.nxv8i8.i64(<vscale x 8 x i32> undef, <vscale x 8 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
|
||||
//
|
||||
vint32m4_t test_vsext_vf4_i32m4(vint8m1_t op1, size_t vl) {
|
||||
|
@ -96,7 +96,7 @@ vint32m4_t test_vsext_vf4_i32m4(vint8m1_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vsext_vf4_i32m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsext.nxv16i32.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsext.nxv16i32.nxv16i8.i64(<vscale x 16 x i32> undef, <vscale x 16 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
|
||||
//
|
||||
vint32m8_t test_vsext_vf4_i32m8(vint8m2_t op1, size_t vl) {
|
||||
|
@ -105,7 +105,7 @@ vint32m8_t test_vsext_vf4_i32m8(vint8m2_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vsext_vf8_i64m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsext.nxv1i64.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsext.nxv1i64.nxv1i8.i64(<vscale x 1 x i64> undef, <vscale x 1 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
|
||||
//
|
||||
vint64m1_t test_vsext_vf8_i64m1(vint8mf8_t op1, size_t vl) {
|
||||
|
@ -114,7 +114,7 @@ vint64m1_t test_vsext_vf8_i64m1(vint8mf8_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vsext_vf8_i64m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsext.nxv2i64.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsext.nxv2i64.nxv2i8.i64(<vscale x 2 x i64> undef, <vscale x 2 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
|
||||
//
|
||||
vint64m2_t test_vsext_vf8_i64m2(vint8mf4_t op1, size_t vl) {
|
||||
|
@ -123,7 +123,7 @@ vint64m2_t test_vsext_vf8_i64m2(vint8mf4_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vsext_vf8_i64m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsext.nxv4i64.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsext.nxv4i64.nxv4i8.i64(<vscale x 4 x i64> undef, <vscale x 4 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
|
||||
//
|
||||
vint64m4_t test_vsext_vf8_i64m4(vint8mf2_t op1, size_t vl) {
|
||||
|
@ -132,7 +132,7 @@ vint64m4_t test_vsext_vf8_i64m4(vint8mf2_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vsext_vf8_i64m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsext.nxv8i64.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsext.nxv8i64.nxv8i8.i64(<vscale x 8 x i64> undef, <vscale x 8 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
|
||||
//
|
||||
vint64m8_t test_vsext_vf8_i64m8(vint8m1_t op1, size_t vl) {
|
||||
|
@ -141,7 +141,7 @@ vint64m8_t test_vsext_vf8_i64m8(vint8m1_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vsext_vf2_i32mf2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsext.nxv1i32.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsext.nxv1i32.nxv1i16.i64(<vscale x 1 x i32> undef, <vscale x 1 x i16> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
|
||||
//
|
||||
vint32mf2_t test_vsext_vf2_i32mf2(vint16mf4_t op1, size_t vl) {
|
||||
|
@ -150,7 +150,7 @@ vint32mf2_t test_vsext_vf2_i32mf2(vint16mf4_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vsext_vf2_i32m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsext.nxv2i32.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsext.nxv2i32.nxv2i16.i64(<vscale x 2 x i32> undef, <vscale x 2 x i16> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
|
||||
//
|
||||
vint32m1_t test_vsext_vf2_i32m1(vint16mf2_t op1, size_t vl) {
|
||||
|
@ -159,7 +159,7 @@ vint32m1_t test_vsext_vf2_i32m1(vint16mf2_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vsext_vf2_i32m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsext.nxv4i32.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsext.nxv4i32.nxv4i16.i64(<vscale x 4 x i32> undef, <vscale x 4 x i16> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
|
||||
//
|
||||
vint32m2_t test_vsext_vf2_i32m2(vint16m1_t op1, size_t vl) {
|
||||
|
@ -168,7 +168,7 @@ vint32m2_t test_vsext_vf2_i32m2(vint16m1_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vsext_vf2_i32m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsext.nxv8i32.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsext.nxv8i32.nxv8i16.i64(<vscale x 8 x i32> undef, <vscale x 8 x i16> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
|
||||
//
|
||||
vint32m4_t test_vsext_vf2_i32m4(vint16m2_t op1, size_t vl) {
|
||||
|
@ -177,7 +177,7 @@ vint32m4_t test_vsext_vf2_i32m4(vint16m2_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vsext_vf2_i32m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsext.nxv16i32.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsext.nxv16i32.nxv16i16.i64(<vscale x 16 x i32> undef, <vscale x 16 x i16> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
|
||||
//
|
||||
vint32m8_t test_vsext_vf2_i32m8(vint16m4_t op1, size_t vl) {
|
||||
|
@ -186,7 +186,7 @@ vint32m8_t test_vsext_vf2_i32m8(vint16m4_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vsext_vf4_i64m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsext.nxv1i64.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsext.nxv1i64.nxv1i16.i64(<vscale x 1 x i64> undef, <vscale x 1 x i16> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
|
||||
//
|
||||
vint64m1_t test_vsext_vf4_i64m1(vint16mf4_t op1, size_t vl) {
|
||||
|
@ -195,7 +195,7 @@ vint64m1_t test_vsext_vf4_i64m1(vint16mf4_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vsext_vf4_i64m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsext.nxv2i64.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsext.nxv2i64.nxv2i16.i64(<vscale x 2 x i64> undef, <vscale x 2 x i16> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
|
||||
//
|
||||
vint64m2_t test_vsext_vf4_i64m2(vint16mf2_t op1, size_t vl) {
|
||||
|
@ -204,7 +204,7 @@ vint64m2_t test_vsext_vf4_i64m2(vint16mf2_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vsext_vf4_i64m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsext.nxv4i64.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsext.nxv4i64.nxv4i16.i64(<vscale x 4 x i64> undef, <vscale x 4 x i16> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
|
||||
//
|
||||
vint64m4_t test_vsext_vf4_i64m4(vint16m1_t op1, size_t vl) {
|
||||
|
@ -213,7 +213,7 @@ vint64m4_t test_vsext_vf4_i64m4(vint16m1_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vsext_vf4_i64m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsext.nxv8i64.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsext.nxv8i64.nxv8i16.i64(<vscale x 8 x i64> undef, <vscale x 8 x i16> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
|
||||
//
|
||||
vint64m8_t test_vsext_vf4_i64m8(vint16m2_t op1, size_t vl) {
|
||||
|
@ -222,7 +222,7 @@ vint64m8_t test_vsext_vf4_i64m8(vint16m2_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vsext_vf2_i64m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsext.nxv1i64.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsext.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> undef, <vscale x 1 x i32> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
|
||||
//
|
||||
vint64m1_t test_vsext_vf2_i64m1(vint32mf2_t op1, size_t vl) {
|
||||
|
@ -231,7 +231,7 @@ vint64m1_t test_vsext_vf2_i64m1(vint32mf2_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vsext_vf2_i64m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsext.nxv2i64.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsext.nxv2i64.nxv2i32.i64(<vscale x 2 x i64> undef, <vscale x 2 x i32> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
|
||||
//
|
||||
vint64m2_t test_vsext_vf2_i64m2(vint32m1_t op1, size_t vl) {
|
||||
|
@ -240,7 +240,7 @@ vint64m2_t test_vsext_vf2_i64m2(vint32m1_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vsext_vf2_i64m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsext.nxv4i64.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsext.nxv4i64.nxv4i32.i64(<vscale x 4 x i64> undef, <vscale x 4 x i32> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
|
||||
//
|
||||
vint64m4_t test_vsext_vf2_i64m4(vint32m2_t op1, size_t vl) {
|
||||
|
@ -249,7 +249,7 @@ vint64m4_t test_vsext_vf2_i64m4(vint32m2_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vsext_vf2_i64m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsext.nxv8i64.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsext.nxv8i64.nxv8i32.i64(<vscale x 8 x i64> undef, <vscale x 8 x i32> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
|
||||
//
|
||||
vint64m8_t test_vsext_vf2_i64m8(vint32m4_t op1, size_t vl) {
|
||||
|
|
|
@ -6,7 +6,7 @@
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vzext_vf2_u16mf4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vzext.nxv1i16.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vzext.nxv1i16.nxv1i8.i64(<vscale x 1 x i16> undef, <vscale x 1 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
|
||||
//
|
||||
vuint16mf4_t test_vzext_vf2_u16mf4(vuint8mf8_t op1, size_t vl) {
|
||||
|
@ -15,7 +15,7 @@ vuint16mf4_t test_vzext_vf2_u16mf4(vuint8mf8_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vzext_vf2_u16mf2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vzext.nxv2i16.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vzext.nxv2i16.nxv2i8.i64(<vscale x 2 x i16> undef, <vscale x 2 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
|
||||
//
|
||||
vuint16mf2_t test_vzext_vf2_u16mf2(vuint8mf4_t op1, size_t vl) {
|
||||
|
@ -24,7 +24,7 @@ vuint16mf2_t test_vzext_vf2_u16mf2(vuint8mf4_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vzext_vf2_u16m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vzext.nxv4i16.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vzext.nxv4i16.nxv4i8.i64(<vscale x 4 x i16> undef, <vscale x 4 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
|
||||
//
|
||||
vuint16m1_t test_vzext_vf2_u16m1(vuint8mf2_t op1, size_t vl) {
|
||||
|
@ -33,7 +33,7 @@ vuint16m1_t test_vzext_vf2_u16m1(vuint8mf2_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vzext_vf2_u16m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vzext.nxv8i16.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vzext.nxv8i16.nxv8i8.i64(<vscale x 8 x i16> undef, <vscale x 8 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
|
||||
//
|
||||
vuint16m2_t test_vzext_vf2_u16m2(vuint8m1_t op1, size_t vl) {
|
||||
|
@ -42,7 +42,7 @@ vuint16m2_t test_vzext_vf2_u16m2(vuint8m1_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vzext_vf2_u16m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vzext.nxv16i16.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vzext.nxv16i16.nxv16i8.i64(<vscale x 16 x i16> undef, <vscale x 16 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
|
||||
//
|
||||
vuint16m4_t test_vzext_vf2_u16m4(vuint8m2_t op1, size_t vl) {
|
||||
|
@ -51,7 +51,7 @@ vuint16m4_t test_vzext_vf2_u16m4(vuint8m2_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vzext_vf2_u16m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vzext.nxv32i16.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vzext.nxv32i16.nxv32i8.i64(<vscale x 32 x i16> undef, <vscale x 32 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
|
||||
//
|
||||
vuint16m8_t test_vzext_vf2_u16m8(vuint8m4_t op1, size_t vl) {
|
||||
|
@ -60,7 +60,7 @@ vuint16m8_t test_vzext_vf2_u16m8(vuint8m4_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vzext_vf4_u32mf2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vzext.nxv1i32.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vzext.nxv1i32.nxv1i8.i64(<vscale x 1 x i32> undef, <vscale x 1 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
|
||||
//
|
||||
vuint32mf2_t test_vzext_vf4_u32mf2(vuint8mf8_t op1, size_t vl) {
|
||||
|
@ -69,7 +69,7 @@ vuint32mf2_t test_vzext_vf4_u32mf2(vuint8mf8_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vzext_vf4_u32m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vzext.nxv2i32.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vzext.nxv2i32.nxv2i8.i64(<vscale x 2 x i32> undef, <vscale x 2 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
|
||||
//
|
||||
vuint32m1_t test_vzext_vf4_u32m1(vuint8mf4_t op1, size_t vl) {
|
||||
|
@ -78,7 +78,7 @@ vuint32m1_t test_vzext_vf4_u32m1(vuint8mf4_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vzext_vf4_u32m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vzext.nxv4i32.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vzext.nxv4i32.nxv4i8.i64(<vscale x 4 x i32> undef, <vscale x 4 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
|
||||
//
|
||||
vuint32m2_t test_vzext_vf4_u32m2(vuint8mf2_t op1, size_t vl) {
|
||||
|
@ -87,7 +87,7 @@ vuint32m2_t test_vzext_vf4_u32m2(vuint8mf2_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vzext_vf4_u32m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vzext.nxv8i32.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vzext.nxv8i32.nxv8i8.i64(<vscale x 8 x i32> undef, <vscale x 8 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
|
||||
//
|
||||
vuint32m4_t test_vzext_vf4_u32m4(vuint8m1_t op1, size_t vl) {
|
||||
|
@ -96,7 +96,7 @@ vuint32m4_t test_vzext_vf4_u32m4(vuint8m1_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vzext_vf4_u32m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vzext.nxv16i32.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vzext.nxv16i32.nxv16i8.i64(<vscale x 16 x i32> undef, <vscale x 16 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
|
||||
//
|
||||
vuint32m8_t test_vzext_vf4_u32m8(vuint8m2_t op1, size_t vl) {
|
||||
|
@ -105,7 +105,7 @@ vuint32m8_t test_vzext_vf4_u32m8(vuint8m2_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vzext_vf8_u64m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vzext.nxv1i64.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vzext.nxv1i64.nxv1i8.i64(<vscale x 1 x i64> undef, <vscale x 1 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
|
||||
//
|
||||
vuint64m1_t test_vzext_vf8_u64m1(vuint8mf8_t op1, size_t vl) {
|
||||
|
@ -114,7 +114,7 @@ vuint64m1_t test_vzext_vf8_u64m1(vuint8mf8_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vzext_vf8_u64m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vzext.nxv2i64.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vzext.nxv2i64.nxv2i8.i64(<vscale x 2 x i64> undef, <vscale x 2 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
|
||||
//
|
||||
vuint64m2_t test_vzext_vf8_u64m2(vuint8mf4_t op1, size_t vl) {
|
||||
|
@ -123,7 +123,7 @@ vuint64m2_t test_vzext_vf8_u64m2(vuint8mf4_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vzext_vf8_u64m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vzext.nxv4i64.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vzext.nxv4i64.nxv4i8.i64(<vscale x 4 x i64> undef, <vscale x 4 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
|
||||
//
|
||||
vuint64m4_t test_vzext_vf8_u64m4(vuint8mf2_t op1, size_t vl) {
|
||||
|
@ -132,7 +132,7 @@ vuint64m4_t test_vzext_vf8_u64m4(vuint8mf2_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vzext_vf8_u64m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vzext.nxv8i64.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vzext.nxv8i64.nxv8i8.i64(<vscale x 8 x i64> undef, <vscale x 8 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
|
||||
//
|
||||
vuint64m8_t test_vzext_vf8_u64m8(vuint8m1_t op1, size_t vl) {
|
||||
|
@ -141,7 +141,7 @@ vuint64m8_t test_vzext_vf8_u64m8(vuint8m1_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vzext_vf2_u32mf2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vzext.nxv1i32.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vzext.nxv1i32.nxv1i16.i64(<vscale x 1 x i32> undef, <vscale x 1 x i16> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
|
||||
//
|
||||
vuint32mf2_t test_vzext_vf2_u32mf2(vuint16mf4_t op1, size_t vl) {
|
||||
|
@ -150,7 +150,7 @@ vuint32mf2_t test_vzext_vf2_u32mf2(vuint16mf4_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vzext_vf2_u32m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vzext.nxv2i32.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vzext.nxv2i32.nxv2i16.i64(<vscale x 2 x i32> undef, <vscale x 2 x i16> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
|
||||
//
|
||||
vuint32m1_t test_vzext_vf2_u32m1(vuint16mf2_t op1, size_t vl) {
|
||||
|
@ -159,7 +159,7 @@ vuint32m1_t test_vzext_vf2_u32m1(vuint16mf2_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vzext_vf2_u32m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vzext.nxv4i32.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vzext.nxv4i32.nxv4i16.i64(<vscale x 4 x i32> undef, <vscale x 4 x i16> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
|
||||
//
|
||||
vuint32m2_t test_vzext_vf2_u32m2(vuint16m1_t op1, size_t vl) {
|
||||
|
@ -168,7 +168,7 @@ vuint32m2_t test_vzext_vf2_u32m2(vuint16m1_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vzext_vf2_u32m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vzext.nxv8i32.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vzext.nxv8i32.nxv8i16.i64(<vscale x 8 x i32> undef, <vscale x 8 x i16> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
|
||||
//
|
||||
vuint32m4_t test_vzext_vf2_u32m4(vuint16m2_t op1, size_t vl) {
|
||||
|
@ -177,7 +177,7 @@ vuint32m4_t test_vzext_vf2_u32m4(vuint16m2_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vzext_vf2_u32m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vzext.nxv16i32.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vzext.nxv16i32.nxv16i16.i64(<vscale x 16 x i32> undef, <vscale x 16 x i16> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
|
||||
//
|
||||
vuint32m8_t test_vzext_vf2_u32m8(vuint16m4_t op1, size_t vl) {
|
||||
|
@ -186,7 +186,7 @@ vuint32m8_t test_vzext_vf2_u32m8(vuint16m4_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vzext_vf4_u64m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vzext.nxv1i64.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vzext.nxv1i64.nxv1i16.i64(<vscale x 1 x i64> undef, <vscale x 1 x i16> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
|
||||
//
|
||||
vuint64m1_t test_vzext_vf4_u64m1(vuint16mf4_t op1, size_t vl) {
|
||||
|
@ -195,7 +195,7 @@ vuint64m1_t test_vzext_vf4_u64m1(vuint16mf4_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vzext_vf4_u64m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vzext.nxv2i64.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vzext.nxv2i64.nxv2i16.i64(<vscale x 2 x i64> undef, <vscale x 2 x i16> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
|
||||
//
|
||||
vuint64m2_t test_vzext_vf4_u64m2(vuint16mf2_t op1, size_t vl) {
|
||||
|
@ -204,7 +204,7 @@ vuint64m2_t test_vzext_vf4_u64m2(vuint16mf2_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vzext_vf4_u64m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vzext.nxv4i64.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vzext.nxv4i64.nxv4i16.i64(<vscale x 4 x i64> undef, <vscale x 4 x i16> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
|
||||
//
|
||||
vuint64m4_t test_vzext_vf4_u64m4(vuint16m1_t op1, size_t vl) {
|
||||
|
@ -213,7 +213,7 @@ vuint64m4_t test_vzext_vf4_u64m4(vuint16m1_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vzext_vf4_u64m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vzext.nxv8i64.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vzext.nxv8i64.nxv8i16.i64(<vscale x 8 x i64> undef, <vscale x 8 x i16> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
|
||||
//
|
||||
vuint64m8_t test_vzext_vf4_u64m8(vuint16m2_t op1, size_t vl) {
|
||||
|
@ -222,7 +222,7 @@ vuint64m8_t test_vzext_vf4_u64m8(vuint16m2_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vzext_vf2_u64m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vzext.nxv1i64.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vzext.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> undef, <vscale x 1 x i32> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
|
||||
//
|
||||
vuint64m1_t test_vzext_vf2_u64m1(vuint32mf2_t op1, size_t vl) {
|
||||
|
@ -231,7 +231,7 @@ vuint64m1_t test_vzext_vf2_u64m1(vuint32mf2_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vzext_vf2_u64m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vzext.nxv2i64.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vzext.nxv2i64.nxv2i32.i64(<vscale x 2 x i64> undef, <vscale x 2 x i32> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
|
||||
//
|
||||
vuint64m2_t test_vzext_vf2_u64m2(vuint32m1_t op1, size_t vl) {
|
||||
|
@ -240,7 +240,7 @@ vuint64m2_t test_vzext_vf2_u64m2(vuint32m1_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vzext_vf2_u64m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vzext.nxv4i64.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vzext.nxv4i64.nxv4i32.i64(<vscale x 4 x i64> undef, <vscale x 4 x i32> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
|
||||
//
|
||||
vuint64m4_t test_vzext_vf2_u64m4(vuint32m2_t op1, size_t vl) {
|
||||
|
@ -249,7 +249,7 @@ vuint64m4_t test_vzext_vf2_u64m4(vuint32m2_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vzext_vf2_u64m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vzext.nxv8i64.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vzext.nxv8i64.nxv8i32.i64(<vscale x 8 x i64> undef, <vscale x 8 x i32> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
|
||||
//
|
||||
vuint64m8_t test_vzext_vf2_u64m8(vuint32m4_t op1, size_t vl) {
|
||||
|
|
|
@ -8,7 +8,7 @@
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfclass_v_u32mf2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfclass.nxv1f32.i64(<vscale x 1 x float> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfclass.nxv1f32.i64(<vscale x 1 x i32> undef, <vscale x 1 x float> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
|
||||
//
|
||||
vuint32mf2_t test_vfclass_v_u32mf2(vfloat32mf2_t op1, size_t vl) {
|
||||
|
@ -17,7 +17,7 @@ vuint32mf2_t test_vfclass_v_u32mf2(vfloat32mf2_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfclass_v_u32m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vfclass.nxv2f32.i64(<vscale x 2 x float> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vfclass.nxv2f32.i64(<vscale x 2 x i32> undef, <vscale x 2 x float> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
|
||||
//
|
||||
vuint32m1_t test_vfclass_v_u32m1(vfloat32m1_t op1, size_t vl) {
|
||||
|
@ -26,7 +26,7 @@ vuint32m1_t test_vfclass_v_u32m1(vfloat32m1_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfclass_v_u32m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vfclass.nxv4f32.i64(<vscale x 4 x float> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vfclass.nxv4f32.i64(<vscale x 4 x i32> undef, <vscale x 4 x float> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
|
||||
//
|
||||
vuint32m2_t test_vfclass_v_u32m2(vfloat32m2_t op1, size_t vl) {
|
||||
|
@ -35,7 +35,7 @@ vuint32m2_t test_vfclass_v_u32m2(vfloat32m2_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfclass_v_u32m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vfclass.nxv8f32.i64(<vscale x 8 x float> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vfclass.nxv8f32.i64(<vscale x 8 x i32> undef, <vscale x 8 x float> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
|
||||
//
|
||||
vuint32m4_t test_vfclass_v_u32m4(vfloat32m4_t op1, size_t vl) {
|
||||
|
@ -44,7 +44,7 @@ vuint32m4_t test_vfclass_v_u32m4(vfloat32m4_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfclass_v_u32m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vfclass.nxv16f32.i64(<vscale x 16 x float> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vfclass.nxv16f32.i64(<vscale x 16 x i32> undef, <vscale x 16 x float> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
|
||||
//
|
||||
vuint32m8_t test_vfclass_v_u32m8(vfloat32m8_t op1, size_t vl) {
|
||||
|
@ -53,7 +53,7 @@ vuint32m8_t test_vfclass_v_u32m8(vfloat32m8_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfclass_v_u64m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vfclass.nxv1f64.i64(<vscale x 1 x double> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vfclass.nxv1f64.i64(<vscale x 1 x i64> undef, <vscale x 1 x double> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
|
||||
//
|
||||
vuint64m1_t test_vfclass_v_u64m1(vfloat64m1_t op1, size_t vl) {
|
||||
|
@ -62,7 +62,7 @@ vuint64m1_t test_vfclass_v_u64m1(vfloat64m1_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfclass_v_u64m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vfclass.nxv2f64.i64(<vscale x 2 x double> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vfclass.nxv2f64.i64(<vscale x 2 x i64> undef, <vscale x 2 x double> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
|
||||
//
|
||||
vuint64m2_t test_vfclass_v_u64m2(vfloat64m2_t op1, size_t vl) {
|
||||
|
@ -71,7 +71,7 @@ vuint64m2_t test_vfclass_v_u64m2(vfloat64m2_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfclass_v_u64m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vfclass.nxv4f64.i64(<vscale x 4 x double> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vfclass.nxv4f64.i64(<vscale x 4 x i64> undef, <vscale x 4 x double> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
|
||||
//
|
||||
vuint64m4_t test_vfclass_v_u64m4(vfloat64m4_t op1, size_t vl) {
|
||||
|
@ -80,7 +80,7 @@ vuint64m4_t test_vfclass_v_u64m4(vfloat64m4_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfclass_v_u64m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vfclass.nxv8f64.i64(<vscale x 8 x double> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vfclass.nxv8f64.i64(<vscale x 8 x i64> undef, <vscale x 8 x double> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
|
||||
//
|
||||
vuint64m8_t test_vfclass_v_u64m8(vfloat64m8_t op1, size_t vl) {
|
||||
|
@ -89,7 +89,7 @@ vuint64m8_t test_vfclass_v_u64m8(vfloat64m8_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfclass_v_u32mf2_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfclass.mask.nxv1f32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfclass.mask.nxv1f32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
|
||||
//
|
||||
vuint32mf2_t test_vfclass_v_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff,
|
||||
|
@ -99,7 +99,7 @@ vuint32mf2_t test_vfclass_v_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff,
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfclass_v_u32m1_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vfclass.mask.nxv2f32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vfclass.mask.nxv2f32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
|
||||
//
|
||||
vuint32m1_t test_vfclass_v_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff,
|
||||
|
@ -109,7 +109,7 @@ vuint32m1_t test_vfclass_v_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff,
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfclass_v_u32m2_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vfclass.mask.nxv4f32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vfclass.mask.nxv4f32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
|
||||
//
|
||||
vuint32m2_t test_vfclass_v_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff,
|
||||
|
@ -119,7 +119,7 @@ vuint32m2_t test_vfclass_v_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff,
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfclass_v_u32m4_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vfclass.mask.nxv8f32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vfclass.mask.nxv8f32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
|
||||
//
|
||||
vuint32m4_t test_vfclass_v_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff,
|
||||
|
@ -129,7 +129,7 @@ vuint32m4_t test_vfclass_v_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff,
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfclass_v_u32m8_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vfclass.mask.nxv16f32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vfclass.mask.nxv16f32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
|
||||
//
|
||||
vuint32m8_t test_vfclass_v_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff,
|
||||
|
@ -139,7 +139,7 @@ vuint32m8_t test_vfclass_v_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff,
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfclass_v_u64m1_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vfclass.mask.nxv1f64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vfclass.mask.nxv1f64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
|
||||
//
|
||||
vuint64m1_t test_vfclass_v_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff,
|
||||
|
@ -149,7 +149,7 @@ vuint64m1_t test_vfclass_v_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff,
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfclass_v_u64m2_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vfclass.mask.nxv2f64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vfclass.mask.nxv2f64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
|
||||
//
|
||||
vuint64m2_t test_vfclass_v_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff,
|
||||
|
@ -159,7 +159,7 @@ vuint64m2_t test_vfclass_v_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff,
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfclass_v_u64m4_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vfclass.mask.nxv4f64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vfclass.mask.nxv4f64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
|
||||
//
|
||||
vuint64m4_t test_vfclass_v_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff,
|
||||
|
@ -169,7 +169,7 @@ vuint64m4_t test_vfclass_v_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff,
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfclass_v_u64m8_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vfclass.mask.nxv8f64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vfclass.mask.nxv8f64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
|
||||
//
|
||||
vuint64m8_t test_vfclass_v_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff,
|
||||
|
@ -179,7 +179,7 @@ vuint64m8_t test_vfclass_v_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff,
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfclass_v_u16mf4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vfclass.nxv1f16.i64(<vscale x 1 x half> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vfclass.nxv1f16.i64(<vscale x 1 x i16> undef, <vscale x 1 x half> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
|
||||
//
|
||||
vuint16mf4_t test_vfclass_v_u16mf4 (vfloat16mf4_t op1, size_t vl) {
|
||||
|
@ -188,7 +188,7 @@ vuint16mf4_t test_vfclass_v_u16mf4 (vfloat16mf4_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfclass_v_u16mf2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vfclass.nxv2f16.i64(<vscale x 2 x half> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vfclass.nxv2f16.i64(<vscale x 2 x i16> undef, <vscale x 2 x half> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
|
||||
//
|
||||
vuint16mf2_t test_vfclass_v_u16mf2 (vfloat16mf2_t op1, size_t vl) {
|
||||
|
@ -197,7 +197,7 @@ vuint16mf2_t test_vfclass_v_u16mf2 (vfloat16mf2_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfclass_v_u16m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vfclass.nxv4f16.i64(<vscale x 4 x half> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vfclass.nxv4f16.i64(<vscale x 4 x i16> undef, <vscale x 4 x half> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
|
||||
//
|
||||
vuint16m1_t test_vfclass_v_u16m1 (vfloat16m1_t op1, size_t vl) {
|
||||
|
@ -206,7 +206,7 @@ vuint16m1_t test_vfclass_v_u16m1 (vfloat16m1_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfclass_v_u16m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vfclass.nxv8f16.i64(<vscale x 8 x half> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vfclass.nxv8f16.i64(<vscale x 8 x i16> undef, <vscale x 8 x half> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
|
||||
//
|
||||
vuint16m2_t test_vfclass_v_u16m2 (vfloat16m2_t op1, size_t vl) {
|
||||
|
@ -215,7 +215,7 @@ vuint16m2_t test_vfclass_v_u16m2 (vfloat16m2_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfclass_v_u16m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vfclass.nxv16f16.i64(<vscale x 16 x half> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vfclass.nxv16f16.i64(<vscale x 16 x i16> undef, <vscale x 16 x half> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
|
||||
//
|
||||
vuint16m4_t test_vfclass_v_u16m4 (vfloat16m4_t op1, size_t vl) {
|
||||
|
@ -224,7 +224,7 @@ vuint16m4_t test_vfclass_v_u16m4 (vfloat16m4_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfclass_v_u16m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vfclass.nxv32f16.i64(<vscale x 32 x half> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vfclass.nxv32f16.i64(<vscale x 32 x i16> undef, <vscale x 32 x half> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
|
||||
//
|
||||
vuint16m8_t test_vfclass_v_u16m8 (vfloat16m8_t op1, size_t vl) {
|
||||
|
@ -233,7 +233,7 @@ vuint16m8_t test_vfclass_v_u16m8 (vfloat16m8_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfclass_v_u16mf4_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vfclass.mask.nxv1f16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x half> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vfclass.mask.nxv1f16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x half> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
|
||||
//
|
||||
vuint16mf4_t test_vfclass_v_u16mf4_m (vbool64_t mask, vuint16mf4_t maskedoff, vfloat16mf4_t op1, size_t vl) {
|
||||
|
@ -242,7 +242,7 @@ vuint16mf4_t test_vfclass_v_u16mf4_m (vbool64_t mask, vuint16mf4_t maskedoff, vf
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfclass_v_u16mf2_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vfclass.mask.nxv2f16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x half> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vfclass.mask.nxv2f16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x half> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
|
||||
//
|
||||
vuint16mf2_t test_vfclass_v_u16mf2_m (vbool32_t mask, vuint16mf2_t maskedoff, vfloat16mf2_t op1, size_t vl) {
|
||||
|
@ -251,7 +251,7 @@ vuint16mf2_t test_vfclass_v_u16mf2_m (vbool32_t mask, vuint16mf2_t maskedoff, vf
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfclass_v_u16m1_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vfclass.mask.nxv4f16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x half> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vfclass.mask.nxv4f16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x half> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
|
||||
//
|
||||
vuint16m1_t test_vfclass_v_u16m1_m (vbool16_t mask, vuint16m1_t maskedoff, vfloat16m1_t op1, size_t vl) {
|
||||
|
@ -260,7 +260,7 @@ vuint16m1_t test_vfclass_v_u16m1_m (vbool16_t mask, vuint16m1_t maskedoff, vfloa
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfclass_v_u16m2_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vfclass.mask.nxv8f16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x half> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vfclass.mask.nxv8f16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x half> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
|
||||
//
|
||||
vuint16m2_t test_vfclass_v_u16m2_m (vbool8_t mask, vuint16m2_t maskedoff, vfloat16m2_t op1, size_t vl) {
|
||||
|
@ -269,7 +269,7 @@ vuint16m2_t test_vfclass_v_u16m2_m (vbool8_t mask, vuint16m2_t maskedoff, vfloat
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfclass_v_u16m4_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vfclass.mask.nxv16f16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x half> [[OP1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vfclass.mask.nxv16f16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x half> [[OP1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
|
||||
//
|
||||
vuint16m4_t test_vfclass_v_u16m4_m (vbool4_t mask, vuint16m4_t maskedoff, vfloat16m4_t op1, size_t vl) {
|
||||
|
@ -278,7 +278,7 @@ vuint16m4_t test_vfclass_v_u16m4_m (vbool4_t mask, vuint16m4_t maskedoff, vfloat
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfclass_v_u16m8_m(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vfclass.mask.nxv32f16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x half> [[OP1:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vfclass.mask.nxv32f16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x half> [[OP1:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
|
||||
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
|
||||
//
|
||||
vuint16m8_t test_vfclass_v_u16m8_m (vbool2_t mask, vuint16m8_t maskedoff, vfloat16m8_t op1, size_t vl) {
|
||||
|
|
|
@ -8,7 +8,7 @@
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i32mf2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfcvt.x.f.v.nxv1i32.nxv1f32.i64(<vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfcvt.x.f.v.nxv1i32.nxv1f32.i64(<vscale x 1 x i32> undef, <vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
|
||||
//
|
||||
vint32mf2_t test_vfcvt_x_f_v_i32mf2(vfloat32mf2_t src, size_t vl) {
|
||||
|
@ -17,7 +17,7 @@ vint32mf2_t test_vfcvt_x_f_v_i32mf2(vfloat32mf2_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32mf2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfcvt.rtz.x.f.v.nxv1i32.nxv1f32.i64(<vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfcvt.rtz.x.f.v.nxv1i32.nxv1f32.i64(<vscale x 1 x i32> undef, <vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
|
||||
//
|
||||
vint32mf2_t test_vfcvt_rtz_x_f_v_i32mf2(vfloat32mf2_t src, size_t vl) {
|
||||
|
@ -26,7 +26,7 @@ vint32mf2_t test_vfcvt_rtz_x_f_v_i32mf2(vfloat32mf2_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i32m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vfcvt.x.f.v.nxv2i32.nxv2f32.i64(<vscale x 2 x float> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vfcvt.x.f.v.nxv2i32.nxv2f32.i64(<vscale x 2 x i32> undef, <vscale x 2 x float> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
|
||||
//
|
||||
vint32m1_t test_vfcvt_x_f_v_i32m1(vfloat32m1_t src, size_t vl) {
|
||||
|
@ -35,7 +35,7 @@ vint32m1_t test_vfcvt_x_f_v_i32m1(vfloat32m1_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vfcvt.rtz.x.f.v.nxv2i32.nxv2f32.i64(<vscale x 2 x float> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vfcvt.rtz.x.f.v.nxv2i32.nxv2f32.i64(<vscale x 2 x i32> undef, <vscale x 2 x float> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
|
||||
//
|
||||
vint32m1_t test_vfcvt_rtz_x_f_v_i32m1(vfloat32m1_t src, size_t vl) {
|
||||
|
@ -44,7 +44,7 @@ vint32m1_t test_vfcvt_rtz_x_f_v_i32m1(vfloat32m1_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i32m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vfcvt.x.f.v.nxv4i32.nxv4f32.i64(<vscale x 4 x float> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vfcvt.x.f.v.nxv4i32.nxv4f32.i64(<vscale x 4 x i32> undef, <vscale x 4 x float> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
|
||||
//
|
||||
vint32m2_t test_vfcvt_x_f_v_i32m2(vfloat32m2_t src, size_t vl) {
|
||||
|
@ -53,7 +53,7 @@ vint32m2_t test_vfcvt_x_f_v_i32m2(vfloat32m2_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vfcvt.rtz.x.f.v.nxv4i32.nxv4f32.i64(<vscale x 4 x float> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vfcvt.rtz.x.f.v.nxv4i32.nxv4f32.i64(<vscale x 4 x i32> undef, <vscale x 4 x float> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
|
||||
//
|
||||
vint32m2_t test_vfcvt_rtz_x_f_v_i32m2(vfloat32m2_t src, size_t vl) {
|
||||
|
@ -62,7 +62,7 @@ vint32m2_t test_vfcvt_rtz_x_f_v_i32m2(vfloat32m2_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i32m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vfcvt.x.f.v.nxv8i32.nxv8f32.i64(<vscale x 8 x float> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vfcvt.x.f.v.nxv8i32.nxv8f32.i64(<vscale x 8 x i32> undef, <vscale x 8 x float> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
|
||||
//
|
||||
vint32m4_t test_vfcvt_x_f_v_i32m4(vfloat32m4_t src, size_t vl) {
|
||||
|
@ -71,7 +71,7 @@ vint32m4_t test_vfcvt_x_f_v_i32m4(vfloat32m4_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vfcvt.rtz.x.f.v.nxv8i32.nxv8f32.i64(<vscale x 8 x float> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vfcvt.rtz.x.f.v.nxv8i32.nxv8f32.i64(<vscale x 8 x i32> undef, <vscale x 8 x float> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
|
||||
//
|
||||
vint32m4_t test_vfcvt_rtz_x_f_v_i32m4(vfloat32m4_t src, size_t vl) {
|
||||
|
@ -80,7 +80,7 @@ vint32m4_t test_vfcvt_rtz_x_f_v_i32m4(vfloat32m4_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i32m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vfcvt.x.f.v.nxv16i32.nxv16f32.i64(<vscale x 16 x float> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vfcvt.x.f.v.nxv16i32.nxv16f32.i64(<vscale x 16 x i32> undef, <vscale x 16 x float> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
|
||||
//
|
||||
vint32m8_t test_vfcvt_x_f_v_i32m8(vfloat32m8_t src, size_t vl) {
|
||||
|
@ -89,7 +89,7 @@ vint32m8_t test_vfcvt_x_f_v_i32m8(vfloat32m8_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i32m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vfcvt.rtz.x.f.v.nxv16i32.nxv16f32.i64(<vscale x 16 x float> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vfcvt.rtz.x.f.v.nxv16i32.nxv16f32.i64(<vscale x 16 x i32> undef, <vscale x 16 x float> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
|
||||
//
|
||||
vint32m8_t test_vfcvt_rtz_x_f_v_i32m8(vfloat32m8_t src, size_t vl) {
|
||||
|
@ -98,7 +98,7 @@ vint32m8_t test_vfcvt_rtz_x_f_v_i32m8(vfloat32m8_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u32mf2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfcvt.xu.f.v.nxv1i32.nxv1f32.i64(<vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfcvt.xu.f.v.nxv1i32.nxv1f32.i64(<vscale x 1 x i32> undef, <vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
|
||||
//
|
||||
vuint32mf2_t test_vfcvt_xu_f_v_u32mf2(vfloat32mf2_t src, size_t vl) {
|
||||
|
@ -107,7 +107,7 @@ vuint32mf2_t test_vfcvt_xu_f_v_u32mf2(vfloat32mf2_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32mf2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv1i32.nxv1f32.i64(<vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv1i32.nxv1f32.i64(<vscale x 1 x i32> undef, <vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
|
||||
//
|
||||
vuint32mf2_t test_vfcvt_rtz_xu_f_v_u32mf2(vfloat32mf2_t src, size_t vl) {
|
||||
|
@ -116,7 +116,7 @@ vuint32mf2_t test_vfcvt_rtz_xu_f_v_u32mf2(vfloat32mf2_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u32m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vfcvt.xu.f.v.nxv2i32.nxv2f32.i64(<vscale x 2 x float> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vfcvt.xu.f.v.nxv2i32.nxv2f32.i64(<vscale x 2 x i32> undef, <vscale x 2 x float> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
|
||||
//
|
||||
vuint32m1_t test_vfcvt_xu_f_v_u32m1(vfloat32m1_t src, size_t vl) {
|
||||
|
@ -125,7 +125,7 @@ vuint32m1_t test_vfcvt_xu_f_v_u32m1(vfloat32m1_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv2i32.nxv2f32.i64(<vscale x 2 x float> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv2i32.nxv2f32.i64(<vscale x 2 x i32> undef, <vscale x 2 x float> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
|
||||
//
|
||||
vuint32m1_t test_vfcvt_rtz_xu_f_v_u32m1(vfloat32m1_t src, size_t vl) {
|
||||
|
@ -134,7 +134,7 @@ vuint32m1_t test_vfcvt_rtz_xu_f_v_u32m1(vfloat32m1_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u32m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vfcvt.xu.f.v.nxv4i32.nxv4f32.i64(<vscale x 4 x float> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vfcvt.xu.f.v.nxv4i32.nxv4f32.i64(<vscale x 4 x i32> undef, <vscale x 4 x float> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
|
||||
//
|
||||
vuint32m2_t test_vfcvt_xu_f_v_u32m2(vfloat32m2_t src, size_t vl) {
|
||||
|
@ -143,7 +143,7 @@ vuint32m2_t test_vfcvt_xu_f_v_u32m2(vfloat32m2_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv4i32.nxv4f32.i64(<vscale x 4 x float> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv4i32.nxv4f32.i64(<vscale x 4 x i32> undef, <vscale x 4 x float> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
|
||||
//
|
||||
vuint32m2_t test_vfcvt_rtz_xu_f_v_u32m2(vfloat32m2_t src, size_t vl) {
|
||||
|
@ -152,7 +152,7 @@ vuint32m2_t test_vfcvt_rtz_xu_f_v_u32m2(vfloat32m2_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u32m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vfcvt.xu.f.v.nxv8i32.nxv8f32.i64(<vscale x 8 x float> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vfcvt.xu.f.v.nxv8i32.nxv8f32.i64(<vscale x 8 x i32> undef, <vscale x 8 x float> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
|
||||
//
|
||||
vuint32m4_t test_vfcvt_xu_f_v_u32m4(vfloat32m4_t src, size_t vl) {
|
||||
|
@ -161,7 +161,7 @@ vuint32m4_t test_vfcvt_xu_f_v_u32m4(vfloat32m4_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv8i32.nxv8f32.i64(<vscale x 8 x float> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv8i32.nxv8f32.i64(<vscale x 8 x i32> undef, <vscale x 8 x float> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
|
||||
//
|
||||
vuint32m4_t test_vfcvt_rtz_xu_f_v_u32m4(vfloat32m4_t src, size_t vl) {
|
||||
|
@ -170,7 +170,7 @@ vuint32m4_t test_vfcvt_rtz_xu_f_v_u32m4(vfloat32m4_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u32m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vfcvt.xu.f.v.nxv16i32.nxv16f32.i64(<vscale x 16 x float> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vfcvt.xu.f.v.nxv16i32.nxv16f32.i64(<vscale x 16 x i32> undef, <vscale x 16 x float> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
|
||||
//
|
||||
vuint32m8_t test_vfcvt_xu_f_v_u32m8(vfloat32m8_t src, size_t vl) {
|
||||
|
@ -179,7 +179,7 @@ vuint32m8_t test_vfcvt_xu_f_v_u32m8(vfloat32m8_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u32m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv16i32.nxv16f32.i64(<vscale x 16 x float> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv16i32.nxv16f32.i64(<vscale x 16 x i32> undef, <vscale x 16 x float> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
|
||||
//
|
||||
vuint32m8_t test_vfcvt_rtz_xu_f_v_u32m8(vfloat32m8_t src, size_t vl) {
|
||||
|
@ -188,7 +188,7 @@ vuint32m8_t test_vfcvt_rtz_xu_f_v_u32m8(vfloat32m8_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f32mf2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfcvt.f.x.v.nxv1f32.nxv1i32.i64(<vscale x 1 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfcvt.f.x.v.nxv1f32.nxv1i32.i64(<vscale x 1 x float> undef, <vscale x 1 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32mf2_t test_vfcvt_f_x_v_f32mf2(vint32mf2_t src, size_t vl) {
|
||||
|
@ -197,7 +197,7 @@ vfloat32mf2_t test_vfcvt_f_x_v_f32mf2(vint32mf2_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f32m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfcvt.f.x.v.nxv2f32.nxv2i32.i64(<vscale x 2 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfcvt.f.x.v.nxv2f32.nxv2i32.i64(<vscale x 2 x float> undef, <vscale x 2 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m1_t test_vfcvt_f_x_v_f32m1(vint32m1_t src, size_t vl) {
|
||||
|
@ -206,7 +206,7 @@ vfloat32m1_t test_vfcvt_f_x_v_f32m1(vint32m1_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f32m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfcvt.f.x.v.nxv4f32.nxv4i32.i64(<vscale x 4 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfcvt.f.x.v.nxv4f32.nxv4i32.i64(<vscale x 4 x float> undef, <vscale x 4 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m2_t test_vfcvt_f_x_v_f32m2(vint32m2_t src, size_t vl) {
|
||||
|
@ -215,7 +215,7 @@ vfloat32m2_t test_vfcvt_f_x_v_f32m2(vint32m2_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f32m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfcvt.f.x.v.nxv8f32.nxv8i32.i64(<vscale x 8 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfcvt.f.x.v.nxv8f32.nxv8i32.i64(<vscale x 8 x float> undef, <vscale x 8 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m4_t test_vfcvt_f_x_v_f32m4(vint32m4_t src, size_t vl) {
|
||||
|
@ -224,7 +224,7 @@ vfloat32m4_t test_vfcvt_f_x_v_f32m4(vint32m4_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f32m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfcvt.f.x.v.nxv16f32.nxv16i32.i64(<vscale x 16 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfcvt.f.x.v.nxv16f32.nxv16i32.i64(<vscale x 16 x float> undef, <vscale x 16 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m8_t test_vfcvt_f_x_v_f32m8(vint32m8_t src, size_t vl) {
|
||||
|
@ -233,7 +233,7 @@ vfloat32m8_t test_vfcvt_f_x_v_f32m8(vint32m8_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f32mf2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfcvt.f.xu.v.nxv1f32.nxv1i32.i64(<vscale x 1 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfcvt.f.xu.v.nxv1f32.nxv1i32.i64(<vscale x 1 x float> undef, <vscale x 1 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32mf2_t test_vfcvt_f_xu_v_f32mf2(vuint32mf2_t src, size_t vl) {
|
||||
|
@ -242,7 +242,7 @@ vfloat32mf2_t test_vfcvt_f_xu_v_f32mf2(vuint32mf2_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f32m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfcvt.f.xu.v.nxv2f32.nxv2i32.i64(<vscale x 2 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfcvt.f.xu.v.nxv2f32.nxv2i32.i64(<vscale x 2 x float> undef, <vscale x 2 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m1_t test_vfcvt_f_xu_v_f32m1(vuint32m1_t src, size_t vl) {
|
||||
|
@ -251,7 +251,7 @@ vfloat32m1_t test_vfcvt_f_xu_v_f32m1(vuint32m1_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f32m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfcvt.f.xu.v.nxv4f32.nxv4i32.i64(<vscale x 4 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfcvt.f.xu.v.nxv4f32.nxv4i32.i64(<vscale x 4 x float> undef, <vscale x 4 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m2_t test_vfcvt_f_xu_v_f32m2(vuint32m2_t src, size_t vl) {
|
||||
|
@ -260,7 +260,7 @@ vfloat32m2_t test_vfcvt_f_xu_v_f32m2(vuint32m2_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f32m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfcvt.f.xu.v.nxv8f32.nxv8i32.i64(<vscale x 8 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfcvt.f.xu.v.nxv8f32.nxv8i32.i64(<vscale x 8 x float> undef, <vscale x 8 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m4_t test_vfcvt_f_xu_v_f32m4(vuint32m4_t src, size_t vl) {
|
||||
|
@ -269,7 +269,7 @@ vfloat32m4_t test_vfcvt_f_xu_v_f32m4(vuint32m4_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f32m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfcvt.f.xu.v.nxv16f32.nxv16i32.i64(<vscale x 16 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfcvt.f.xu.v.nxv16f32.nxv16i32.i64(<vscale x 16 x float> undef, <vscale x 16 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m8_t test_vfcvt_f_xu_v_f32m8(vuint32m8_t src, size_t vl) {
|
||||
|
@ -278,7 +278,7 @@ vfloat32m8_t test_vfcvt_f_xu_v_f32m8(vuint32m8_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i64m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vfcvt.x.f.v.nxv1i64.nxv1f64.i64(<vscale x 1 x double> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vfcvt.x.f.v.nxv1i64.nxv1f64.i64(<vscale x 1 x i64> undef, <vscale x 1 x double> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
|
||||
//
|
||||
vint64m1_t test_vfcvt_x_f_v_i64m1(vfloat64m1_t src, size_t vl) {
|
||||
|
@ -287,7 +287,7 @@ vint64m1_t test_vfcvt_x_f_v_i64m1(vfloat64m1_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i64m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vfcvt.rtz.x.f.v.nxv1i64.nxv1f64.i64(<vscale x 1 x double> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vfcvt.rtz.x.f.v.nxv1i64.nxv1f64.i64(<vscale x 1 x i64> undef, <vscale x 1 x double> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
|
||||
//
|
||||
vint64m1_t test_vfcvt_rtz_x_f_v_i64m1(vfloat64m1_t src, size_t vl) {
|
||||
|
@ -296,7 +296,7 @@ vint64m1_t test_vfcvt_rtz_x_f_v_i64m1(vfloat64m1_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i64m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vfcvt.x.f.v.nxv2i64.nxv2f64.i64(<vscale x 2 x double> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vfcvt.x.f.v.nxv2i64.nxv2f64.i64(<vscale x 2 x i64> undef, <vscale x 2 x double> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
|
||||
//
|
||||
vint64m2_t test_vfcvt_x_f_v_i64m2(vfloat64m2_t src, size_t vl) {
|
||||
|
@ -305,7 +305,7 @@ vint64m2_t test_vfcvt_x_f_v_i64m2(vfloat64m2_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i64m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vfcvt.rtz.x.f.v.nxv2i64.nxv2f64.i64(<vscale x 2 x double> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vfcvt.rtz.x.f.v.nxv2i64.nxv2f64.i64(<vscale x 2 x i64> undef, <vscale x 2 x double> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
|
||||
//
|
||||
vint64m2_t test_vfcvt_rtz_x_f_v_i64m2(vfloat64m2_t src, size_t vl) {
|
||||
|
@ -314,7 +314,7 @@ vint64m2_t test_vfcvt_rtz_x_f_v_i64m2(vfloat64m2_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i64m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vfcvt.x.f.v.nxv4i64.nxv4f64.i64(<vscale x 4 x double> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vfcvt.x.f.v.nxv4i64.nxv4f64.i64(<vscale x 4 x i64> undef, <vscale x 4 x double> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
|
||||
//
|
||||
vint64m4_t test_vfcvt_x_f_v_i64m4(vfloat64m4_t src, size_t vl) {
|
||||
|
@ -323,7 +323,7 @@ vint64m4_t test_vfcvt_x_f_v_i64m4(vfloat64m4_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i64m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vfcvt.rtz.x.f.v.nxv4i64.nxv4f64.i64(<vscale x 4 x double> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vfcvt.rtz.x.f.v.nxv4i64.nxv4f64.i64(<vscale x 4 x i64> undef, <vscale x 4 x double> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
|
||||
//
|
||||
vint64m4_t test_vfcvt_rtz_x_f_v_i64m4(vfloat64m4_t src, size_t vl) {
|
||||
|
@ -332,7 +332,7 @@ vint64m4_t test_vfcvt_rtz_x_f_v_i64m4(vfloat64m4_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i64m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vfcvt.x.f.v.nxv8i64.nxv8f64.i64(<vscale x 8 x double> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vfcvt.x.f.v.nxv8i64.nxv8f64.i64(<vscale x 8 x i64> undef, <vscale x 8 x double> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
|
||||
//
|
||||
vint64m8_t test_vfcvt_x_f_v_i64m8(vfloat64m8_t src, size_t vl) {
|
||||
|
@ -341,7 +341,7 @@ vint64m8_t test_vfcvt_x_f_v_i64m8(vfloat64m8_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i64m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vfcvt.rtz.x.f.v.nxv8i64.nxv8f64.i64(<vscale x 8 x double> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vfcvt.rtz.x.f.v.nxv8i64.nxv8f64.i64(<vscale x 8 x i64> undef, <vscale x 8 x double> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
|
||||
//
|
||||
vint64m8_t test_vfcvt_rtz_x_f_v_i64m8(vfloat64m8_t src, size_t vl) {
|
||||
|
@ -350,7 +350,7 @@ vint64m8_t test_vfcvt_rtz_x_f_v_i64m8(vfloat64m8_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u64m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vfcvt.xu.f.v.nxv1i64.nxv1f64.i64(<vscale x 1 x double> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vfcvt.xu.f.v.nxv1i64.nxv1f64.i64(<vscale x 1 x i64> undef, <vscale x 1 x double> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
|
||||
//
|
||||
vuint64m1_t test_vfcvt_xu_f_v_u64m1(vfloat64m1_t src, size_t vl) {
|
||||
|
@ -359,7 +359,7 @@ vuint64m1_t test_vfcvt_xu_f_v_u64m1(vfloat64m1_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u64m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv1i64.nxv1f64.i64(<vscale x 1 x double> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv1i64.nxv1f64.i64(<vscale x 1 x i64> undef, <vscale x 1 x double> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
|
||||
//
|
||||
vuint64m1_t test_vfcvt_rtz_xu_f_v_u64m1(vfloat64m1_t src, size_t vl) {
|
||||
|
@ -368,7 +368,7 @@ vuint64m1_t test_vfcvt_rtz_xu_f_v_u64m1(vfloat64m1_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u64m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vfcvt.xu.f.v.nxv2i64.nxv2f64.i64(<vscale x 2 x double> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vfcvt.xu.f.v.nxv2i64.nxv2f64.i64(<vscale x 2 x i64> undef, <vscale x 2 x double> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
|
||||
//
|
||||
vuint64m2_t test_vfcvt_xu_f_v_u64m2(vfloat64m2_t src, size_t vl) {
|
||||
|
@ -377,7 +377,7 @@ vuint64m2_t test_vfcvt_xu_f_v_u64m2(vfloat64m2_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u64m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv2i64.nxv2f64.i64(<vscale x 2 x double> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv2i64.nxv2f64.i64(<vscale x 2 x i64> undef, <vscale x 2 x double> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
|
||||
//
|
||||
vuint64m2_t test_vfcvt_rtz_xu_f_v_u64m2(vfloat64m2_t src, size_t vl) {
|
||||
|
@ -386,7 +386,7 @@ vuint64m2_t test_vfcvt_rtz_xu_f_v_u64m2(vfloat64m2_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u64m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vfcvt.xu.f.v.nxv4i64.nxv4f64.i64(<vscale x 4 x double> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vfcvt.xu.f.v.nxv4i64.nxv4f64.i64(<vscale x 4 x i64> undef, <vscale x 4 x double> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
|
||||
//
|
||||
vuint64m4_t test_vfcvt_xu_f_v_u64m4(vfloat64m4_t src, size_t vl) {
|
||||
|
@ -395,7 +395,7 @@ vuint64m4_t test_vfcvt_xu_f_v_u64m4(vfloat64m4_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u64m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv4i64.nxv4f64.i64(<vscale x 4 x double> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv4i64.nxv4f64.i64(<vscale x 4 x i64> undef, <vscale x 4 x double> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
|
||||
//
|
||||
vuint64m4_t test_vfcvt_rtz_xu_f_v_u64m4(vfloat64m4_t src, size_t vl) {
|
||||
|
@ -404,7 +404,7 @@ vuint64m4_t test_vfcvt_rtz_xu_f_v_u64m4(vfloat64m4_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u64m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vfcvt.xu.f.v.nxv8i64.nxv8f64.i64(<vscale x 8 x double> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vfcvt.xu.f.v.nxv8i64.nxv8f64.i64(<vscale x 8 x i64> undef, <vscale x 8 x double> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
|
||||
//
|
||||
vuint64m8_t test_vfcvt_xu_f_v_u64m8(vfloat64m8_t src, size_t vl) {
|
||||
|
@ -413,7 +413,7 @@ vuint64m8_t test_vfcvt_xu_f_v_u64m8(vfloat64m8_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u64m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv8i64.nxv8f64.i64(<vscale x 8 x double> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv8i64.nxv8f64.i64(<vscale x 8 x i64> undef, <vscale x 8 x double> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
|
||||
//
|
||||
vuint64m8_t test_vfcvt_rtz_xu_f_v_u64m8(vfloat64m8_t src, size_t vl) {
|
||||
|
@ -422,7 +422,7 @@ vuint64m8_t test_vfcvt_rtz_xu_f_v_u64m8(vfloat64m8_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f64m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfcvt.f.x.v.nxv1f64.nxv1i64.i64(<vscale x 1 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfcvt.f.x.v.nxv1f64.nxv1i64.i64(<vscale x 1 x double> undef, <vscale x 1 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m1_t test_vfcvt_f_x_v_f64m1(vint64m1_t src, size_t vl) {
|
||||
|
@ -431,7 +431,7 @@ vfloat64m1_t test_vfcvt_f_x_v_f64m1(vint64m1_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f64m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfcvt.f.x.v.nxv2f64.nxv2i64.i64(<vscale x 2 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfcvt.f.x.v.nxv2f64.nxv2i64.i64(<vscale x 2 x double> undef, <vscale x 2 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m2_t test_vfcvt_f_x_v_f64m2(vint64m2_t src, size_t vl) {
|
||||
|
@ -440,7 +440,7 @@ vfloat64m2_t test_vfcvt_f_x_v_f64m2(vint64m2_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f64m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfcvt.f.x.v.nxv4f64.nxv4i64.i64(<vscale x 4 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfcvt.f.x.v.nxv4f64.nxv4i64.i64(<vscale x 4 x double> undef, <vscale x 4 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m4_t test_vfcvt_f_x_v_f64m4(vint64m4_t src, size_t vl) {
|
||||
|
@ -449,7 +449,7 @@ vfloat64m4_t test_vfcvt_f_x_v_f64m4(vint64m4_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f64m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfcvt.f.x.v.nxv8f64.nxv8i64.i64(<vscale x 8 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfcvt.f.x.v.nxv8f64.nxv8i64.i64(<vscale x 8 x double> undef, <vscale x 8 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m8_t test_vfcvt_f_x_v_f64m8(vint64m8_t src, size_t vl) {
|
||||
|
@ -458,7 +458,7 @@ vfloat64m8_t test_vfcvt_f_x_v_f64m8(vint64m8_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f64m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfcvt.f.xu.v.nxv1f64.nxv1i64.i64(<vscale x 1 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfcvt.f.xu.v.nxv1f64.nxv1i64.i64(<vscale x 1 x double> undef, <vscale x 1 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m1_t test_vfcvt_f_xu_v_f64m1(vuint64m1_t src, size_t vl) {
|
||||
|
@ -467,7 +467,7 @@ vfloat64m1_t test_vfcvt_f_xu_v_f64m1(vuint64m1_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f64m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfcvt.f.xu.v.nxv2f64.nxv2i64.i64(<vscale x 2 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfcvt.f.xu.v.nxv2f64.nxv2i64.i64(<vscale x 2 x double> undef, <vscale x 2 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m2_t test_vfcvt_f_xu_v_f64m2(vuint64m2_t src, size_t vl) {
|
||||
|
@ -476,7 +476,7 @@ vfloat64m2_t test_vfcvt_f_xu_v_f64m2(vuint64m2_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f64m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfcvt.f.xu.v.nxv4f64.nxv4i64.i64(<vscale x 4 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfcvt.f.xu.v.nxv4f64.nxv4i64.i64(<vscale x 4 x double> undef, <vscale x 4 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m4_t test_vfcvt_f_xu_v_f64m4(vuint64m4_t src, size_t vl) {
|
||||
|
@ -485,7 +485,7 @@ vfloat64m4_t test_vfcvt_f_xu_v_f64m4(vuint64m4_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f64m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfcvt.f.xu.v.nxv8f64.nxv8i64.i64(<vscale x 8 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfcvt.f.xu.v.nxv8f64.nxv8i64.i64(<vscale x 8 x double> undef, <vscale x 8 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m8_t test_vfcvt_f_xu_v_f64m8(vuint64m8_t src, size_t vl) {
|
||||
|
@ -1036,7 +1036,7 @@ vfloat64m8_t test_vfcvt_f_xu_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff,
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i16mf4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vfcvt.x.f.v.nxv1i16.nxv1f16.i64(<vscale x 1 x half> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vfcvt.x.f.v.nxv1i16.nxv1f16.i64(<vscale x 1 x i16> undef, <vscale x 1 x half> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
|
||||
//
|
||||
vint16mf4_t test_vfcvt_x_f_v_i16mf4 (vfloat16mf4_t src, size_t vl) {
|
||||
|
@ -1045,7 +1045,7 @@ vint16mf4_t test_vfcvt_x_f_v_i16mf4 (vfloat16mf4_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16mf4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vfcvt.rtz.x.f.v.nxv1i16.nxv1f16.i64(<vscale x 1 x half> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vfcvt.rtz.x.f.v.nxv1i16.nxv1f16.i64(<vscale x 1 x i16> undef, <vscale x 1 x half> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
|
||||
//
|
||||
vint16mf4_t test_vfcvt_rtz_x_f_v_i16mf4 (vfloat16mf4_t src, size_t vl) {
|
||||
|
@ -1054,7 +1054,7 @@ vint16mf4_t test_vfcvt_rtz_x_f_v_i16mf4 (vfloat16mf4_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i16mf2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vfcvt.x.f.v.nxv2i16.nxv2f16.i64(<vscale x 2 x half> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vfcvt.x.f.v.nxv2i16.nxv2f16.i64(<vscale x 2 x i16> undef, <vscale x 2 x half> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
|
||||
//
|
||||
vint16mf2_t test_vfcvt_x_f_v_i16mf2 (vfloat16mf2_t src, size_t vl) {
|
||||
|
@ -1063,7 +1063,7 @@ vint16mf2_t test_vfcvt_x_f_v_i16mf2 (vfloat16mf2_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16mf2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vfcvt.rtz.x.f.v.nxv2i16.nxv2f16.i64(<vscale x 2 x half> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vfcvt.rtz.x.f.v.nxv2i16.nxv2f16.i64(<vscale x 2 x i16> undef, <vscale x 2 x half> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
|
||||
//
|
||||
vint16mf2_t test_vfcvt_rtz_x_f_v_i16mf2 (vfloat16mf2_t src, size_t vl) {
|
||||
|
@ -1072,7 +1072,7 @@ vint16mf2_t test_vfcvt_rtz_x_f_v_i16mf2 (vfloat16mf2_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i16m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vfcvt.x.f.v.nxv4i16.nxv4f16.i64(<vscale x 4 x half> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vfcvt.x.f.v.nxv4i16.nxv4f16.i64(<vscale x 4 x i16> undef, <vscale x 4 x half> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
|
||||
//
|
||||
vint16m1_t test_vfcvt_x_f_v_i16m1 (vfloat16m1_t src, size_t vl) {
|
||||
|
@ -1081,7 +1081,7 @@ vint16m1_t test_vfcvt_x_f_v_i16m1 (vfloat16m1_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vfcvt.rtz.x.f.v.nxv4i16.nxv4f16.i64(<vscale x 4 x half> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vfcvt.rtz.x.f.v.nxv4i16.nxv4f16.i64(<vscale x 4 x i16> undef, <vscale x 4 x half> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
|
||||
//
|
||||
vint16m1_t test_vfcvt_rtz_x_f_v_i16m1 (vfloat16m1_t src, size_t vl) {
|
||||
|
@ -1090,7 +1090,7 @@ vint16m1_t test_vfcvt_rtz_x_f_v_i16m1 (vfloat16m1_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i16m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vfcvt.x.f.v.nxv8i16.nxv8f16.i64(<vscale x 8 x half> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vfcvt.x.f.v.nxv8i16.nxv8f16.i64(<vscale x 8 x i16> undef, <vscale x 8 x half> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
|
||||
//
|
||||
vint16m2_t test_vfcvt_x_f_v_i16m2 (vfloat16m2_t src, size_t vl) {
|
||||
|
@ -1099,7 +1099,7 @@ vint16m2_t test_vfcvt_x_f_v_i16m2 (vfloat16m2_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vfcvt.rtz.x.f.v.nxv8i16.nxv8f16.i64(<vscale x 8 x half> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vfcvt.rtz.x.f.v.nxv8i16.nxv8f16.i64(<vscale x 8 x i16> undef, <vscale x 8 x half> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
|
||||
//
|
||||
vint16m2_t test_vfcvt_rtz_x_f_v_i16m2 (vfloat16m2_t src, size_t vl) {
|
||||
|
@ -1108,7 +1108,7 @@ vint16m2_t test_vfcvt_rtz_x_f_v_i16m2 (vfloat16m2_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i16m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vfcvt.x.f.v.nxv16i16.nxv16f16.i64(<vscale x 16 x half> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vfcvt.x.f.v.nxv16i16.nxv16f16.i64(<vscale x 16 x i16> undef, <vscale x 16 x half> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
|
||||
//
|
||||
vint16m4_t test_vfcvt_x_f_v_i16m4 (vfloat16m4_t src, size_t vl) {
|
||||
|
@ -1117,7 +1117,7 @@ vint16m4_t test_vfcvt_x_f_v_i16m4 (vfloat16m4_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vfcvt.rtz.x.f.v.nxv16i16.nxv16f16.i64(<vscale x 16 x half> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vfcvt.rtz.x.f.v.nxv16i16.nxv16f16.i64(<vscale x 16 x i16> undef, <vscale x 16 x half> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
|
||||
//
|
||||
vint16m4_t test_vfcvt_rtz_x_f_v_i16m4 (vfloat16m4_t src, size_t vl) {
|
||||
|
@ -1126,7 +1126,7 @@ vint16m4_t test_vfcvt_rtz_x_f_v_i16m4 (vfloat16m4_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfcvt_x_f_v_i16m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vfcvt.x.f.v.nxv32i16.nxv32f16.i64(<vscale x 32 x half> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vfcvt.x.f.v.nxv32i16.nxv32f16.i64(<vscale x 32 x i16> undef, <vscale x 32 x half> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
|
||||
//
|
||||
vint16m8_t test_vfcvt_x_f_v_i16m8 (vfloat16m8_t src, size_t vl) {
|
||||
|
@ -1135,7 +1135,7 @@ vint16m8_t test_vfcvt_x_f_v_i16m8 (vfloat16m8_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfcvt_rtz_x_f_v_i16m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vfcvt.rtz.x.f.v.nxv32i16.nxv32f16.i64(<vscale x 32 x half> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vfcvt.rtz.x.f.v.nxv32i16.nxv32f16.i64(<vscale x 32 x i16> undef, <vscale x 32 x half> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
|
||||
//
|
||||
vint16m8_t test_vfcvt_rtz_x_f_v_i16m8 (vfloat16m8_t src, size_t vl) {
|
||||
|
@ -1144,7 +1144,7 @@ vint16m8_t test_vfcvt_rtz_x_f_v_i16m8 (vfloat16m8_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u16mf4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vfcvt.xu.f.v.nxv1i16.nxv1f16.i64(<vscale x 1 x half> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vfcvt.xu.f.v.nxv1i16.nxv1f16.i64(<vscale x 1 x i16> undef, <vscale x 1 x half> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
|
||||
//
|
||||
vuint16mf4_t test_vfcvt_xu_f_v_u16mf4 (vfloat16mf4_t src, size_t vl) {
|
||||
|
@ -1153,7 +1153,7 @@ vuint16mf4_t test_vfcvt_xu_f_v_u16mf4 (vfloat16mf4_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16mf4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv1i16.nxv1f16.i64(<vscale x 1 x half> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv1i16.nxv1f16.i64(<vscale x 1 x i16> undef, <vscale x 1 x half> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
|
||||
//
|
||||
vuint16mf4_t test_vfcvt_rtz_xu_f_v_u16mf4 (vfloat16mf4_t src, size_t vl) {
|
||||
|
@ -1162,7 +1162,7 @@ vuint16mf4_t test_vfcvt_rtz_xu_f_v_u16mf4 (vfloat16mf4_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u16mf2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vfcvt.xu.f.v.nxv2i16.nxv2f16.i64(<vscale x 2 x half> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vfcvt.xu.f.v.nxv2i16.nxv2f16.i64(<vscale x 2 x i16> undef, <vscale x 2 x half> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
|
||||
//
|
||||
vuint16mf2_t test_vfcvt_xu_f_v_u16mf2 (vfloat16mf2_t src, size_t vl) {
|
||||
|
@ -1171,7 +1171,7 @@ vuint16mf2_t test_vfcvt_xu_f_v_u16mf2 (vfloat16mf2_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16mf2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv2i16.nxv2f16.i64(<vscale x 2 x half> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv2i16.nxv2f16.i64(<vscale x 2 x i16> undef, <vscale x 2 x half> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
|
||||
//
|
||||
vuint16mf2_t test_vfcvt_rtz_xu_f_v_u16mf2 (vfloat16mf2_t src, size_t vl) {
|
||||
|
@ -1180,7 +1180,7 @@ vuint16mf2_t test_vfcvt_rtz_xu_f_v_u16mf2 (vfloat16mf2_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u16m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vfcvt.xu.f.v.nxv4i16.nxv4f16.i64(<vscale x 4 x half> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vfcvt.xu.f.v.nxv4i16.nxv4f16.i64(<vscale x 4 x i16> undef, <vscale x 4 x half> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
|
||||
//
|
||||
vuint16m1_t test_vfcvt_xu_f_v_u16m1 (vfloat16m1_t src, size_t vl) {
|
||||
|
@ -1189,7 +1189,7 @@ vuint16m1_t test_vfcvt_xu_f_v_u16m1 (vfloat16m1_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv4i16.nxv4f16.i64(<vscale x 4 x half> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv4i16.nxv4f16.i64(<vscale x 4 x i16> undef, <vscale x 4 x half> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
|
||||
//
|
||||
vuint16m1_t test_vfcvt_rtz_xu_f_v_u16m1 (vfloat16m1_t src, size_t vl) {
|
||||
|
@ -1198,7 +1198,7 @@ vuint16m1_t test_vfcvt_rtz_xu_f_v_u16m1 (vfloat16m1_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u16m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vfcvt.xu.f.v.nxv8i16.nxv8f16.i64(<vscale x 8 x half> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vfcvt.xu.f.v.nxv8i16.nxv8f16.i64(<vscale x 8 x i16> undef, <vscale x 8 x half> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
|
||||
//
|
||||
vuint16m2_t test_vfcvt_xu_f_v_u16m2 (vfloat16m2_t src, size_t vl) {
|
||||
|
@ -1207,7 +1207,7 @@ vuint16m2_t test_vfcvt_xu_f_v_u16m2 (vfloat16m2_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv8i16.nxv8f16.i64(<vscale x 8 x half> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv8i16.nxv8f16.i64(<vscale x 8 x i16> undef, <vscale x 8 x half> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
|
||||
//
|
||||
vuint16m2_t test_vfcvt_rtz_xu_f_v_u16m2 (vfloat16m2_t src, size_t vl) {
|
||||
|
@ -1216,7 +1216,7 @@ vuint16m2_t test_vfcvt_rtz_xu_f_v_u16m2 (vfloat16m2_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u16m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vfcvt.xu.f.v.nxv16i16.nxv16f16.i64(<vscale x 16 x half> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vfcvt.xu.f.v.nxv16i16.nxv16f16.i64(<vscale x 16 x i16> undef, <vscale x 16 x half> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
|
||||
//
|
||||
vuint16m4_t test_vfcvt_xu_f_v_u16m4 (vfloat16m4_t src, size_t vl) {
|
||||
|
@ -1225,7 +1225,7 @@ vuint16m4_t test_vfcvt_xu_f_v_u16m4 (vfloat16m4_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv16i16.nxv16f16.i64(<vscale x 16 x half> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv16i16.nxv16f16.i64(<vscale x 16 x i16> undef, <vscale x 16 x half> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
|
||||
//
|
||||
vuint16m4_t test_vfcvt_rtz_xu_f_v_u16m4 (vfloat16m4_t src, size_t vl) {
|
||||
|
@ -1234,7 +1234,7 @@ vuint16m4_t test_vfcvt_rtz_xu_f_v_u16m4 (vfloat16m4_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfcvt_xu_f_v_u16m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vfcvt.xu.f.v.nxv32i16.nxv32f16.i64(<vscale x 32 x half> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vfcvt.xu.f.v.nxv32i16.nxv32f16.i64(<vscale x 32 x i16> undef, <vscale x 32 x half> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
|
||||
//
|
||||
vuint16m8_t test_vfcvt_xu_f_v_u16m8 (vfloat16m8_t src, size_t vl) {
|
||||
|
@ -1243,7 +1243,7 @@ vuint16m8_t test_vfcvt_xu_f_v_u16m8 (vfloat16m8_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfcvt_rtz_xu_f_v_u16m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv32i16.nxv32f16.i64(<vscale x 32 x half> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv32i16.nxv32f16.i64(<vscale x 32 x i16> undef, <vscale x 32 x half> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
|
||||
//
|
||||
vuint16m8_t test_vfcvt_rtz_xu_f_v_u16m8 (vfloat16m8_t src, size_t vl) {
|
||||
|
@ -1252,7 +1252,7 @@ vuint16m8_t test_vfcvt_rtz_xu_f_v_u16m8 (vfloat16m8_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f16mf4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfcvt.f.x.v.nxv1f16.nxv1i16.i64(<vscale x 1 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfcvt.f.x.v.nxv1f16.nxv1i16.i64(<vscale x 1 x half> undef, <vscale x 1 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16mf4_t test_vfcvt_f_x_v_f16mf4 (vint16mf4_t src, size_t vl) {
|
||||
|
@ -1261,7 +1261,7 @@ vfloat16mf4_t test_vfcvt_f_x_v_f16mf4 (vint16mf4_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f16mf2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfcvt.f.x.v.nxv2f16.nxv2i16.i64(<vscale x 2 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfcvt.f.x.v.nxv2f16.nxv2i16.i64(<vscale x 2 x half> undef, <vscale x 2 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16mf2_t test_vfcvt_f_x_v_f16mf2 (vint16mf2_t src, size_t vl) {
|
||||
|
@ -1270,7 +1270,7 @@ vfloat16mf2_t test_vfcvt_f_x_v_f16mf2 (vint16mf2_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f16m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfcvt.f.x.v.nxv4f16.nxv4i16.i64(<vscale x 4 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfcvt.f.x.v.nxv4f16.nxv4i16.i64(<vscale x 4 x half> undef, <vscale x 4 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16m1_t test_vfcvt_f_x_v_f16m1 (vint16m1_t src, size_t vl) {
|
||||
|
@ -1279,7 +1279,7 @@ vfloat16m1_t test_vfcvt_f_x_v_f16m1 (vint16m1_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f16m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfcvt.f.x.v.nxv8f16.nxv8i16.i64(<vscale x 8 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfcvt.f.x.v.nxv8f16.nxv8i16.i64(<vscale x 8 x half> undef, <vscale x 8 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16m2_t test_vfcvt_f_x_v_f16m2 (vint16m2_t src, size_t vl) {
|
||||
|
@ -1288,7 +1288,7 @@ vfloat16m2_t test_vfcvt_f_x_v_f16m2 (vint16m2_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f16m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfcvt.f.x.v.nxv16f16.nxv16i16.i64(<vscale x 16 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfcvt.f.x.v.nxv16f16.nxv16i16.i64(<vscale x 16 x half> undef, <vscale x 16 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16m4_t test_vfcvt_f_x_v_f16m4 (vint16m4_t src, size_t vl) {
|
||||
|
@ -1297,7 +1297,7 @@ vfloat16m4_t test_vfcvt_f_x_v_f16m4 (vint16m4_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfcvt_f_x_v_f16m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfcvt.f.x.v.nxv32f16.nxv32i16.i64(<vscale x 32 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfcvt.f.x.v.nxv32f16.nxv32i16.i64(<vscale x 32 x half> undef, <vscale x 32 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16m8_t test_vfcvt_f_x_v_f16m8 (vint16m8_t src, size_t vl) {
|
||||
|
@ -1306,7 +1306,7 @@ vfloat16m8_t test_vfcvt_f_x_v_f16m8 (vint16m8_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f16mf4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfcvt.f.xu.v.nxv1f16.nxv1i16.i64(<vscale x 1 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfcvt.f.xu.v.nxv1f16.nxv1i16.i64(<vscale x 1 x half> undef, <vscale x 1 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16mf4_t test_vfcvt_f_xu_v_f16mf4 (vuint16mf4_t src, size_t vl) {
|
||||
|
@ -1315,7 +1315,7 @@ vfloat16mf4_t test_vfcvt_f_xu_v_f16mf4 (vuint16mf4_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f16mf2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfcvt.f.xu.v.nxv2f16.nxv2i16.i64(<vscale x 2 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfcvt.f.xu.v.nxv2f16.nxv2i16.i64(<vscale x 2 x half> undef, <vscale x 2 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16mf2_t test_vfcvt_f_xu_v_f16mf2 (vuint16mf2_t src, size_t vl) {
|
||||
|
@ -1324,7 +1324,7 @@ vfloat16mf2_t test_vfcvt_f_xu_v_f16mf2 (vuint16mf2_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f16m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfcvt.f.xu.v.nxv4f16.nxv4i16.i64(<vscale x 4 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfcvt.f.xu.v.nxv4f16.nxv4i16.i64(<vscale x 4 x half> undef, <vscale x 4 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16m1_t test_vfcvt_f_xu_v_f16m1 (vuint16m1_t src, size_t vl) {
|
||||
|
@ -1333,7 +1333,7 @@ vfloat16m1_t test_vfcvt_f_xu_v_f16m1 (vuint16m1_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f16m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfcvt.f.xu.v.nxv8f16.nxv8i16.i64(<vscale x 8 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfcvt.f.xu.v.nxv8f16.nxv8i16.i64(<vscale x 8 x half> undef, <vscale x 8 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16m2_t test_vfcvt_f_xu_v_f16m2 (vuint16m2_t src, size_t vl) {
|
||||
|
@ -1342,7 +1342,7 @@ vfloat16m2_t test_vfcvt_f_xu_v_f16m2 (vuint16m2_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f16m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfcvt.f.xu.v.nxv16f16.nxv16i16.i64(<vscale x 16 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfcvt.f.xu.v.nxv16f16.nxv16i16.i64(<vscale x 16 x half> undef, <vscale x 16 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16m4_t test_vfcvt_f_xu_v_f16m4 (vuint16m4_t src, size_t vl) {
|
||||
|
@ -1351,7 +1351,7 @@ vfloat16m4_t test_vfcvt_f_xu_v_f16m4 (vuint16m4_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfcvt_f_xu_v_f16m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfcvt.f.xu.v.nxv32f16.nxv32i16.i64(<vscale x 32 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfcvt.f.xu.v.nxv32f16.nxv32i16.i64(<vscale x 32 x half> undef, <vscale x 32 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16m8_t test_vfcvt_f_xu_v_f16m8 (vuint16m8_t src, size_t vl) {
|
||||
|
|
|
@ -8,7 +8,7 @@
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i16mf4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vfncvt.x.f.w.nxv1i16.nxv1f32.i64(<vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vfncvt.x.f.w.nxv1i16.nxv1f32.i64(<vscale x 1 x i16> undef, <vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
|
||||
//
|
||||
vint16mf4_t test_vfncvt_x_f_w_i16mf4(vfloat32mf2_t src, size_t vl) {
|
||||
|
@ -17,7 +17,7 @@ vint16mf4_t test_vfncvt_x_f_w_i16mf4(vfloat32mf2_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16mf4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vfncvt.rtz.x.f.w.nxv1i16.nxv1f32.i64(<vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vfncvt.rtz.x.f.w.nxv1i16.nxv1f32.i64(<vscale x 1 x i16> undef, <vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
|
||||
//
|
||||
vint16mf4_t test_vfncvt_rtz_x_f_w_i16mf4(vfloat32mf2_t src, size_t vl) {
|
||||
|
@ -26,7 +26,7 @@ vint16mf4_t test_vfncvt_rtz_x_f_w_i16mf4(vfloat32mf2_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i16mf2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vfncvt.x.f.w.nxv2i16.nxv2f32.i64(<vscale x 2 x float> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vfncvt.x.f.w.nxv2i16.nxv2f32.i64(<vscale x 2 x i16> undef, <vscale x 2 x float> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
|
||||
//
|
||||
vint16mf2_t test_vfncvt_x_f_w_i16mf2(vfloat32m1_t src, size_t vl) {
|
||||
|
@ -35,7 +35,7 @@ vint16mf2_t test_vfncvt_x_f_w_i16mf2(vfloat32m1_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16mf2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vfncvt.rtz.x.f.w.nxv2i16.nxv2f32.i64(<vscale x 2 x float> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vfncvt.rtz.x.f.w.nxv2i16.nxv2f32.i64(<vscale x 2 x i16> undef, <vscale x 2 x float> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
|
||||
//
|
||||
vint16mf2_t test_vfncvt_rtz_x_f_w_i16mf2(vfloat32m1_t src, size_t vl) {
|
||||
|
@ -44,7 +44,7 @@ vint16mf2_t test_vfncvt_rtz_x_f_w_i16mf2(vfloat32m1_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i16m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vfncvt.x.f.w.nxv4i16.nxv4f32.i64(<vscale x 4 x float> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vfncvt.x.f.w.nxv4i16.nxv4f32.i64(<vscale x 4 x i16> undef, <vscale x 4 x float> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
|
||||
//
|
||||
vint16m1_t test_vfncvt_x_f_w_i16m1(vfloat32m2_t src, size_t vl) {
|
||||
|
@ -53,7 +53,7 @@ vint16m1_t test_vfncvt_x_f_w_i16m1(vfloat32m2_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vfncvt.rtz.x.f.w.nxv4i16.nxv4f32.i64(<vscale x 4 x float> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vfncvt.rtz.x.f.w.nxv4i16.nxv4f32.i64(<vscale x 4 x i16> undef, <vscale x 4 x float> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
|
||||
//
|
||||
vint16m1_t test_vfncvt_rtz_x_f_w_i16m1(vfloat32m2_t src, size_t vl) {
|
||||
|
@ -62,7 +62,7 @@ vint16m1_t test_vfncvt_rtz_x_f_w_i16m1(vfloat32m2_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i16m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vfncvt.x.f.w.nxv8i16.nxv8f32.i64(<vscale x 8 x float> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vfncvt.x.f.w.nxv8i16.nxv8f32.i64(<vscale x 8 x i16> undef, <vscale x 8 x float> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
|
||||
//
|
||||
vint16m2_t test_vfncvt_x_f_w_i16m2(vfloat32m4_t src, size_t vl) {
|
||||
|
@ -71,7 +71,7 @@ vint16m2_t test_vfncvt_x_f_w_i16m2(vfloat32m4_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vfncvt.rtz.x.f.w.nxv8i16.nxv8f32.i64(<vscale x 8 x float> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vfncvt.rtz.x.f.w.nxv8i16.nxv8f32.i64(<vscale x 8 x i16> undef, <vscale x 8 x float> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
|
||||
//
|
||||
vint16m2_t test_vfncvt_rtz_x_f_w_i16m2(vfloat32m4_t src, size_t vl) {
|
||||
|
@ -80,7 +80,7 @@ vint16m2_t test_vfncvt_rtz_x_f_w_i16m2(vfloat32m4_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i16m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vfncvt.x.f.w.nxv16i16.nxv16f32.i64(<vscale x 16 x float> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vfncvt.x.f.w.nxv16i16.nxv16f32.i64(<vscale x 16 x i16> undef, <vscale x 16 x float> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
|
||||
//
|
||||
vint16m4_t test_vfncvt_x_f_w_i16m4(vfloat32m8_t src, size_t vl) {
|
||||
|
@ -89,7 +89,7 @@ vint16m4_t test_vfncvt_x_f_w_i16m4(vfloat32m8_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i16m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vfncvt.rtz.x.f.w.nxv16i16.nxv16f32.i64(<vscale x 16 x float> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vfncvt.rtz.x.f.w.nxv16i16.nxv16f32.i64(<vscale x 16 x i16> undef, <vscale x 16 x float> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
|
||||
//
|
||||
vint16m4_t test_vfncvt_rtz_x_f_w_i16m4(vfloat32m8_t src, size_t vl) {
|
||||
|
@ -98,7 +98,7 @@ vint16m4_t test_vfncvt_rtz_x_f_w_i16m4(vfloat32m8_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u16mf4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vfncvt.xu.f.w.nxv1i16.nxv1f32.i64(<vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vfncvt.xu.f.w.nxv1i16.nxv1f32.i64(<vscale x 1 x i16> undef, <vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
|
||||
//
|
||||
vuint16mf4_t test_vfncvt_xu_f_w_u16mf4(vfloat32mf2_t src, size_t vl) {
|
||||
|
@ -107,7 +107,7 @@ vuint16mf4_t test_vfncvt_xu_f_w_u16mf4(vfloat32mf2_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16mf4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv1i16.nxv1f32.i64(<vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv1i16.nxv1f32.i64(<vscale x 1 x i16> undef, <vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
|
||||
//
|
||||
vuint16mf4_t test_vfncvt_rtz_xu_f_w_u16mf4(vfloat32mf2_t src, size_t vl) {
|
||||
|
@ -116,7 +116,7 @@ vuint16mf4_t test_vfncvt_rtz_xu_f_w_u16mf4(vfloat32mf2_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u16mf2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vfncvt.xu.f.w.nxv2i16.nxv2f32.i64(<vscale x 2 x float> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vfncvt.xu.f.w.nxv2i16.nxv2f32.i64(<vscale x 2 x i16> undef, <vscale x 2 x float> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
|
||||
//
|
||||
vuint16mf2_t test_vfncvt_xu_f_w_u16mf2(vfloat32m1_t src, size_t vl) {
|
||||
|
@ -125,7 +125,7 @@ vuint16mf2_t test_vfncvt_xu_f_w_u16mf2(vfloat32m1_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16mf2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv2i16.nxv2f32.i64(<vscale x 2 x float> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv2i16.nxv2f32.i64(<vscale x 2 x i16> undef, <vscale x 2 x float> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
|
||||
//
|
||||
vuint16mf2_t test_vfncvt_rtz_xu_f_w_u16mf2(vfloat32m1_t src, size_t vl) {
|
||||
|
@ -134,7 +134,7 @@ vuint16mf2_t test_vfncvt_rtz_xu_f_w_u16mf2(vfloat32m1_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u16m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vfncvt.xu.f.w.nxv4i16.nxv4f32.i64(<vscale x 4 x float> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vfncvt.xu.f.w.nxv4i16.nxv4f32.i64(<vscale x 4 x i16> undef, <vscale x 4 x float> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
|
||||
//
|
||||
vuint16m1_t test_vfncvt_xu_f_w_u16m1(vfloat32m2_t src, size_t vl) {
|
||||
|
@ -143,7 +143,7 @@ vuint16m1_t test_vfncvt_xu_f_w_u16m1(vfloat32m2_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv4i16.nxv4f32.i64(<vscale x 4 x float> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv4i16.nxv4f32.i64(<vscale x 4 x i16> undef, <vscale x 4 x float> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
|
||||
//
|
||||
vuint16m1_t test_vfncvt_rtz_xu_f_w_u16m1(vfloat32m2_t src, size_t vl) {
|
||||
|
@ -152,7 +152,7 @@ vuint16m1_t test_vfncvt_rtz_xu_f_w_u16m1(vfloat32m2_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u16m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vfncvt.xu.f.w.nxv8i16.nxv8f32.i64(<vscale x 8 x float> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vfncvt.xu.f.w.nxv8i16.nxv8f32.i64(<vscale x 8 x i16> undef, <vscale x 8 x float> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
|
||||
//
|
||||
vuint16m2_t test_vfncvt_xu_f_w_u16m2(vfloat32m4_t src, size_t vl) {
|
||||
|
@ -161,7 +161,7 @@ vuint16m2_t test_vfncvt_xu_f_w_u16m2(vfloat32m4_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv8i16.nxv8f32.i64(<vscale x 8 x float> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv8i16.nxv8f32.i64(<vscale x 8 x i16> undef, <vscale x 8 x float> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
|
||||
//
|
||||
vuint16m2_t test_vfncvt_rtz_xu_f_w_u16m2(vfloat32m4_t src, size_t vl) {
|
||||
|
@ -170,7 +170,7 @@ vuint16m2_t test_vfncvt_rtz_xu_f_w_u16m2(vfloat32m4_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u16m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vfncvt.xu.f.w.nxv16i16.nxv16f32.i64(<vscale x 16 x float> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vfncvt.xu.f.w.nxv16i16.nxv16f32.i64(<vscale x 16 x i16> undef, <vscale x 16 x float> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
|
||||
//
|
||||
vuint16m4_t test_vfncvt_xu_f_w_u16m4(vfloat32m8_t src, size_t vl) {
|
||||
|
@ -179,7 +179,7 @@ vuint16m4_t test_vfncvt_xu_f_w_u16m4(vfloat32m8_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u16m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv16i16.nxv16f32.i64(<vscale x 16 x float> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv16i16.nxv16f32.i64(<vscale x 16 x i16> undef, <vscale x 16 x float> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
|
||||
//
|
||||
vuint16m4_t test_vfncvt_rtz_xu_f_w_u16m4(vfloat32m8_t src, size_t vl) {
|
||||
|
@ -188,7 +188,7 @@ vuint16m4_t test_vfncvt_rtz_xu_f_w_u16m4(vfloat32m8_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i32mf2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfncvt.x.f.w.nxv1i32.nxv1f64.i64(<vscale x 1 x double> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfncvt.x.f.w.nxv1i32.nxv1f64.i64(<vscale x 1 x i32> undef, <vscale x 1 x double> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
|
||||
//
|
||||
vint32mf2_t test_vfncvt_x_f_w_i32mf2(vfloat64m1_t src, size_t vl) {
|
||||
|
@ -197,7 +197,7 @@ vint32mf2_t test_vfncvt_x_f_w_i32mf2(vfloat64m1_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i32mf2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfncvt.rtz.x.f.w.nxv1i32.nxv1f64.i64(<vscale x 1 x double> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfncvt.rtz.x.f.w.nxv1i32.nxv1f64.i64(<vscale x 1 x i32> undef, <vscale x 1 x double> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
|
||||
//
|
||||
vint32mf2_t test_vfncvt_rtz_x_f_w_i32mf2(vfloat64m1_t src, size_t vl) {
|
||||
|
@ -206,7 +206,7 @@ vint32mf2_t test_vfncvt_rtz_x_f_w_i32mf2(vfloat64m1_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i32m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vfncvt.x.f.w.nxv2i32.nxv2f64.i64(<vscale x 2 x double> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vfncvt.x.f.w.nxv2i32.nxv2f64.i64(<vscale x 2 x i32> undef, <vscale x 2 x double> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
|
||||
//
|
||||
vint32m1_t test_vfncvt_x_f_w_i32m1(vfloat64m2_t src, size_t vl) {
|
||||
|
@ -215,7 +215,7 @@ vint32m1_t test_vfncvt_x_f_w_i32m1(vfloat64m2_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i32m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vfncvt.rtz.x.f.w.nxv2i32.nxv2f64.i64(<vscale x 2 x double> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vfncvt.rtz.x.f.w.nxv2i32.nxv2f64.i64(<vscale x 2 x i32> undef, <vscale x 2 x double> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
|
||||
//
|
||||
vint32m1_t test_vfncvt_rtz_x_f_w_i32m1(vfloat64m2_t src, size_t vl) {
|
||||
|
@ -224,7 +224,7 @@ vint32m1_t test_vfncvt_rtz_x_f_w_i32m1(vfloat64m2_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i32m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vfncvt.x.f.w.nxv4i32.nxv4f64.i64(<vscale x 4 x double> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vfncvt.x.f.w.nxv4i32.nxv4f64.i64(<vscale x 4 x i32> undef, <vscale x 4 x double> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
|
||||
//
|
||||
vint32m2_t test_vfncvt_x_f_w_i32m2(vfloat64m4_t src, size_t vl) {
|
||||
|
@ -233,7 +233,7 @@ vint32m2_t test_vfncvt_x_f_w_i32m2(vfloat64m4_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i32m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vfncvt.rtz.x.f.w.nxv4i32.nxv4f64.i64(<vscale x 4 x double> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vfncvt.rtz.x.f.w.nxv4i32.nxv4f64.i64(<vscale x 4 x i32> undef, <vscale x 4 x double> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
|
||||
//
|
||||
vint32m2_t test_vfncvt_rtz_x_f_w_i32m2(vfloat64m4_t src, size_t vl) {
|
||||
|
@ -242,7 +242,7 @@ vint32m2_t test_vfncvt_rtz_x_f_w_i32m2(vfloat64m4_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i32m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vfncvt.x.f.w.nxv8i32.nxv8f64.i64(<vscale x 8 x double> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vfncvt.x.f.w.nxv8i32.nxv8f64.i64(<vscale x 8 x i32> undef, <vscale x 8 x double> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
|
||||
//
|
||||
vint32m4_t test_vfncvt_x_f_w_i32m4(vfloat64m8_t src, size_t vl) {
|
||||
|
@ -251,7 +251,7 @@ vint32m4_t test_vfncvt_x_f_w_i32m4(vfloat64m8_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i32m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vfncvt.rtz.x.f.w.nxv8i32.nxv8f64.i64(<vscale x 8 x double> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vfncvt.rtz.x.f.w.nxv8i32.nxv8f64.i64(<vscale x 8 x i32> undef, <vscale x 8 x double> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
|
||||
//
|
||||
vint32m4_t test_vfncvt_rtz_x_f_w_i32m4(vfloat64m8_t src, size_t vl) {
|
||||
|
@ -260,7 +260,7 @@ vint32m4_t test_vfncvt_rtz_x_f_w_i32m4(vfloat64m8_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u32mf2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfncvt.xu.f.w.nxv1i32.nxv1f64.i64(<vscale x 1 x double> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfncvt.xu.f.w.nxv1i32.nxv1f64.i64(<vscale x 1 x i32> undef, <vscale x 1 x double> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
|
||||
//
|
||||
vuint32mf2_t test_vfncvt_xu_f_w_u32mf2(vfloat64m1_t src, size_t vl) {
|
||||
|
@ -269,7 +269,7 @@ vuint32mf2_t test_vfncvt_xu_f_w_u32mf2(vfloat64m1_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u32mf2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv1i32.nxv1f64.i64(<vscale x 1 x double> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv1i32.nxv1f64.i64(<vscale x 1 x i32> undef, <vscale x 1 x double> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
|
||||
//
|
||||
vuint32mf2_t test_vfncvt_rtz_xu_f_w_u32mf2(vfloat64m1_t src, size_t vl) {
|
||||
|
@ -278,7 +278,7 @@ vuint32mf2_t test_vfncvt_rtz_xu_f_w_u32mf2(vfloat64m1_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u32m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vfncvt.xu.f.w.nxv2i32.nxv2f64.i64(<vscale x 2 x double> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vfncvt.xu.f.w.nxv2i32.nxv2f64.i64(<vscale x 2 x i32> undef, <vscale x 2 x double> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
|
||||
//
|
||||
vuint32m1_t test_vfncvt_xu_f_w_u32m1(vfloat64m2_t src, size_t vl) {
|
||||
|
@ -287,7 +287,7 @@ vuint32m1_t test_vfncvt_xu_f_w_u32m1(vfloat64m2_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u32m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv2i32.nxv2f64.i64(<vscale x 2 x double> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv2i32.nxv2f64.i64(<vscale x 2 x i32> undef, <vscale x 2 x double> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
|
||||
//
|
||||
vuint32m1_t test_vfncvt_rtz_xu_f_w_u32m1(vfloat64m2_t src, size_t vl) {
|
||||
|
@ -296,7 +296,7 @@ vuint32m1_t test_vfncvt_rtz_xu_f_w_u32m1(vfloat64m2_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u32m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vfncvt.xu.f.w.nxv4i32.nxv4f64.i64(<vscale x 4 x double> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vfncvt.xu.f.w.nxv4i32.nxv4f64.i64(<vscale x 4 x i32> undef, <vscale x 4 x double> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
|
||||
//
|
||||
vuint32m2_t test_vfncvt_xu_f_w_u32m2(vfloat64m4_t src, size_t vl) {
|
||||
|
@ -305,7 +305,7 @@ vuint32m2_t test_vfncvt_xu_f_w_u32m2(vfloat64m4_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u32m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv4i32.nxv4f64.i64(<vscale x 4 x double> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv4i32.nxv4f64.i64(<vscale x 4 x i32> undef, <vscale x 4 x double> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
|
||||
//
|
||||
vuint32m2_t test_vfncvt_rtz_xu_f_w_u32m2(vfloat64m4_t src, size_t vl) {
|
||||
|
@ -314,7 +314,7 @@ vuint32m2_t test_vfncvt_rtz_xu_f_w_u32m2(vfloat64m4_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u32m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vfncvt.xu.f.w.nxv8i32.nxv8f64.i64(<vscale x 8 x double> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vfncvt.xu.f.w.nxv8i32.nxv8f64.i64(<vscale x 8 x i32> undef, <vscale x 8 x double> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
|
||||
//
|
||||
vuint32m4_t test_vfncvt_xu_f_w_u32m4(vfloat64m8_t src, size_t vl) {
|
||||
|
@ -323,7 +323,7 @@ vuint32m4_t test_vfncvt_xu_f_w_u32m4(vfloat64m8_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u32m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv8i32.nxv8f64.i64(<vscale x 8 x double> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv8i32.nxv8f64.i64(<vscale x 8 x i32> undef, <vscale x 8 x double> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
|
||||
//
|
||||
vuint32m4_t test_vfncvt_rtz_xu_f_w_u32m4(vfloat64m8_t src, size_t vl) {
|
||||
|
@ -332,7 +332,7 @@ vuint32m4_t test_vfncvt_rtz_xu_f_w_u32m4(vfloat64m8_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f32mf2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfncvt.f.x.w.nxv1f32.nxv1i64.i64(<vscale x 1 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfncvt.f.x.w.nxv1f32.nxv1i64.i64(<vscale x 1 x float> undef, <vscale x 1 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32mf2_t test_vfncvt_f_x_w_f32mf2(vint64m1_t src, size_t vl) {
|
||||
|
@ -341,7 +341,7 @@ vfloat32mf2_t test_vfncvt_f_x_w_f32mf2(vint64m1_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f32m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfncvt.f.x.w.nxv2f32.nxv2i64.i64(<vscale x 2 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfncvt.f.x.w.nxv2f32.nxv2i64.i64(<vscale x 2 x float> undef, <vscale x 2 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m1_t test_vfncvt_f_x_w_f32m1(vint64m2_t src, size_t vl) {
|
||||
|
@ -350,7 +350,7 @@ vfloat32m1_t test_vfncvt_f_x_w_f32m1(vint64m2_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f32m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfncvt.f.x.w.nxv4f32.nxv4i64.i64(<vscale x 4 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfncvt.f.x.w.nxv4f32.nxv4i64.i64(<vscale x 4 x float> undef, <vscale x 4 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m2_t test_vfncvt_f_x_w_f32m2(vint64m4_t src, size_t vl) {
|
||||
|
@ -359,7 +359,7 @@ vfloat32m2_t test_vfncvt_f_x_w_f32m2(vint64m4_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f32m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfncvt.f.x.w.nxv8f32.nxv8i64.i64(<vscale x 8 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfncvt.f.x.w.nxv8f32.nxv8i64.i64(<vscale x 8 x float> undef, <vscale x 8 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m4_t test_vfncvt_f_x_w_f32m4(vint64m8_t src, size_t vl) {
|
||||
|
@ -368,7 +368,7 @@ vfloat32m4_t test_vfncvt_f_x_w_f32m4(vint64m8_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f32mf2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfncvt.f.xu.w.nxv1f32.nxv1i64.i64(<vscale x 1 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfncvt.f.xu.w.nxv1f32.nxv1i64.i64(<vscale x 1 x float> undef, <vscale x 1 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32mf2_t test_vfncvt_f_xu_w_f32mf2(vuint64m1_t src, size_t vl) {
|
||||
|
@ -377,7 +377,7 @@ vfloat32mf2_t test_vfncvt_f_xu_w_f32mf2(vuint64m1_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f32m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfncvt.f.xu.w.nxv2f32.nxv2i64.i64(<vscale x 2 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfncvt.f.xu.w.nxv2f32.nxv2i64.i64(<vscale x 2 x float> undef, <vscale x 2 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m1_t test_vfncvt_f_xu_w_f32m1(vuint64m2_t src, size_t vl) {
|
||||
|
@ -386,7 +386,7 @@ vfloat32m1_t test_vfncvt_f_xu_w_f32m1(vuint64m2_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f32m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfncvt.f.xu.w.nxv4f32.nxv4i64.i64(<vscale x 4 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfncvt.f.xu.w.nxv4f32.nxv4i64.i64(<vscale x 4 x float> undef, <vscale x 4 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m2_t test_vfncvt_f_xu_w_f32m2(vuint64m4_t src, size_t vl) {
|
||||
|
@ -395,7 +395,7 @@ vfloat32m2_t test_vfncvt_f_xu_w_f32m2(vuint64m4_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f32m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfncvt.f.xu.w.nxv8f32.nxv8i64.i64(<vscale x 8 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfncvt.f.xu.w.nxv8f32.nxv8i64.i64(<vscale x 8 x float> undef, <vscale x 8 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m4_t test_vfncvt_f_xu_w_f32m4(vuint64m8_t src, size_t vl) {
|
||||
|
@ -404,7 +404,7 @@ vfloat32m4_t test_vfncvt_f_xu_w_f32m4(vuint64m8_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f32mf2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfncvt.f.f.w.nxv1f32.nxv1f64.i64(<vscale x 1 x double> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfncvt.f.f.w.nxv1f32.nxv1f64.i64(<vscale x 1 x float> undef, <vscale x 1 x double> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32mf2_t test_vfncvt_f_f_w_f32mf2(vfloat64m1_t src, size_t vl) {
|
||||
|
@ -413,7 +413,7 @@ vfloat32mf2_t test_vfncvt_f_f_w_f32mf2(vfloat64m1_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f32mf2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfncvt.rod.f.f.w.nxv1f32.nxv1f64.i64(<vscale x 1 x double> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfncvt.rod.f.f.w.nxv1f32.nxv1f64.i64(<vscale x 1 x float> undef, <vscale x 1 x double> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32mf2_t test_vfncvt_rod_f_f_w_f32mf2(vfloat64m1_t src, size_t vl) {
|
||||
|
@ -422,7 +422,7 @@ vfloat32mf2_t test_vfncvt_rod_f_f_w_f32mf2(vfloat64m1_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f32m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfncvt.f.f.w.nxv2f32.nxv2f64.i64(<vscale x 2 x double> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfncvt.f.f.w.nxv2f32.nxv2f64.i64(<vscale x 2 x float> undef, <vscale x 2 x double> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m1_t test_vfncvt_f_f_w_f32m1(vfloat64m2_t src, size_t vl) {
|
||||
|
@ -431,7 +431,7 @@ vfloat32m1_t test_vfncvt_f_f_w_f32m1(vfloat64m2_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f32m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfncvt.rod.f.f.w.nxv2f32.nxv2f64.i64(<vscale x 2 x double> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfncvt.rod.f.f.w.nxv2f32.nxv2f64.i64(<vscale x 2 x float> undef, <vscale x 2 x double> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m1_t test_vfncvt_rod_f_f_w_f32m1(vfloat64m2_t src, size_t vl) {
|
||||
|
@ -440,7 +440,7 @@ vfloat32m1_t test_vfncvt_rod_f_f_w_f32m1(vfloat64m2_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f32m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfncvt.f.f.w.nxv4f32.nxv4f64.i64(<vscale x 4 x double> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfncvt.f.f.w.nxv4f32.nxv4f64.i64(<vscale x 4 x float> undef, <vscale x 4 x double> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m2_t test_vfncvt_f_f_w_f32m2(vfloat64m4_t src, size_t vl) {
|
||||
|
@ -449,7 +449,7 @@ vfloat32m2_t test_vfncvt_f_f_w_f32m2(vfloat64m4_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f32m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfncvt.rod.f.f.w.nxv4f32.nxv4f64.i64(<vscale x 4 x double> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfncvt.rod.f.f.w.nxv4f32.nxv4f64.i64(<vscale x 4 x float> undef, <vscale x 4 x double> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m2_t test_vfncvt_rod_f_f_w_f32m2(vfloat64m4_t src, size_t vl) {
|
||||
|
@ -458,7 +458,7 @@ vfloat32m2_t test_vfncvt_rod_f_f_w_f32m2(vfloat64m4_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f32m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfncvt.f.f.w.nxv8f32.nxv8f64.i64(<vscale x 8 x double> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfncvt.f.f.w.nxv8f32.nxv8f64.i64(<vscale x 8 x float> undef, <vscale x 8 x double> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m4_t test_vfncvt_f_f_w_f32m4(vfloat64m8_t src, size_t vl) {
|
||||
|
@ -467,7 +467,7 @@ vfloat32m4_t test_vfncvt_f_f_w_f32m4(vfloat64m8_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f32m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfncvt.rod.f.f.w.nxv8f32.nxv8f64.i64(<vscale x 8 x double> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfncvt.rod.f.f.w.nxv8f32.nxv8f64.i64(<vscale x 8 x float> undef, <vscale x 8 x double> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m4_t test_vfncvt_rod_f_f_w_f32m4(vfloat64m8_t src, size_t vl) {
|
||||
|
@ -1012,7 +1012,7 @@ vfloat32m4_t test_vfncvt_rod_f_f_w_f32m4_m(vbool8_t mask,
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i8mf8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.x.f.w.nxv1i8.nxv1f16.i64(<vscale x 1 x half> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.x.f.w.nxv1i8.nxv1f16.i64(<vscale x 1 x i8> undef, <vscale x 1 x half> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
|
||||
//
|
||||
vint8mf8_t test_vfncvt_x_f_w_i8mf8 (vfloat16mf4_t src, size_t vl) {
|
||||
|
@ -1021,7 +1021,7 @@ vint8mf8_t test_vfncvt_x_f_w_i8mf8 (vfloat16mf4_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8mf8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.nxv1i8.nxv1f16.i64(<vscale x 1 x half> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.nxv1i8.nxv1f16.i64(<vscale x 1 x i8> undef, <vscale x 1 x half> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
|
||||
//
|
||||
vint8mf8_t test_vfncvt_rtz_x_f_w_i8mf8 (vfloat16mf4_t src, size_t vl) {
|
||||
|
@ -1030,7 +1030,7 @@ vint8mf8_t test_vfncvt_rtz_x_f_w_i8mf8 (vfloat16mf4_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i8mf4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.x.f.w.nxv2i8.nxv2f16.i64(<vscale x 2 x half> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.x.f.w.nxv2i8.nxv2f16.i64(<vscale x 2 x i8> undef, <vscale x 2 x half> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
|
||||
//
|
||||
vint8mf4_t test_vfncvt_x_f_w_i8mf4 (vfloat16mf2_t src, size_t vl) {
|
||||
|
@ -1039,7 +1039,7 @@ vint8mf4_t test_vfncvt_x_f_w_i8mf4 (vfloat16mf2_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8mf4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.nxv2i8.nxv2f16.i64(<vscale x 2 x half> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.nxv2i8.nxv2f16.i64(<vscale x 2 x i8> undef, <vscale x 2 x half> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
|
||||
//
|
||||
vint8mf4_t test_vfncvt_rtz_x_f_w_i8mf4 (vfloat16mf2_t src, size_t vl) {
|
||||
|
@ -1048,7 +1048,7 @@ vint8mf4_t test_vfncvt_rtz_x_f_w_i8mf4 (vfloat16mf2_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i8mf2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.x.f.w.nxv4i8.nxv4f16.i64(<vscale x 4 x half> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.x.f.w.nxv4i8.nxv4f16.i64(<vscale x 4 x i8> undef, <vscale x 4 x half> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
|
||||
//
|
||||
vint8mf2_t test_vfncvt_x_f_w_i8mf2 (vfloat16m1_t src, size_t vl) {
|
||||
|
@ -1057,7 +1057,7 @@ vint8mf2_t test_vfncvt_x_f_w_i8mf2 (vfloat16m1_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8mf2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.nxv4i8.nxv4f16.i64(<vscale x 4 x half> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.nxv4i8.nxv4f16.i64(<vscale x 4 x i8> undef, <vscale x 4 x half> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
|
||||
//
|
||||
vint8mf2_t test_vfncvt_rtz_x_f_w_i8mf2 (vfloat16m1_t src, size_t vl) {
|
||||
|
@ -1066,7 +1066,7 @@ vint8mf2_t test_vfncvt_rtz_x_f_w_i8mf2 (vfloat16m1_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i8m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.x.f.w.nxv8i8.nxv8f16.i64(<vscale x 8 x half> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.x.f.w.nxv8i8.nxv8f16.i64(<vscale x 8 x i8> undef, <vscale x 8 x half> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
|
||||
//
|
||||
vint8m1_t test_vfncvt_x_f_w_i8m1 (vfloat16m2_t src, size_t vl) {
|
||||
|
@ -1075,7 +1075,7 @@ vint8m1_t test_vfncvt_x_f_w_i8m1 (vfloat16m2_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.nxv8i8.nxv8f16.i64(<vscale x 8 x half> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.nxv8i8.nxv8f16.i64(<vscale x 8 x i8> undef, <vscale x 8 x half> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
|
||||
//
|
||||
vint8m1_t test_vfncvt_rtz_x_f_w_i8m1 (vfloat16m2_t src, size_t vl) {
|
||||
|
@ -1084,7 +1084,7 @@ vint8m1_t test_vfncvt_rtz_x_f_w_i8m1 (vfloat16m2_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i8m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.x.f.w.nxv16i8.nxv16f16.i64(<vscale x 16 x half> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.x.f.w.nxv16i8.nxv16f16.i64(<vscale x 16 x i8> undef, <vscale x 16 x half> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
|
||||
//
|
||||
vint8m2_t test_vfncvt_x_f_w_i8m2 (vfloat16m4_t src, size_t vl) {
|
||||
|
@ -1093,7 +1093,7 @@ vint8m2_t test_vfncvt_x_f_w_i8m2 (vfloat16m4_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.nxv16i8.nxv16f16.i64(<vscale x 16 x half> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.nxv16i8.nxv16f16.i64(<vscale x 16 x i8> undef, <vscale x 16 x half> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
|
||||
//
|
||||
vint8m2_t test_vfncvt_rtz_x_f_w_i8m2 (vfloat16m4_t src, size_t vl) {
|
||||
|
@ -1102,7 +1102,7 @@ vint8m2_t test_vfncvt_rtz_x_f_w_i8m2 (vfloat16m4_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfncvt_x_f_w_i8m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.x.f.w.nxv32i8.nxv32f16.i64(<vscale x 32 x half> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.x.f.w.nxv32i8.nxv32f16.i64(<vscale x 32 x i8> undef, <vscale x 32 x half> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
|
||||
//
|
||||
vint8m4_t test_vfncvt_x_f_w_i8m4 (vfloat16m8_t src, size_t vl) {
|
||||
|
@ -1111,7 +1111,7 @@ vint8m4_t test_vfncvt_x_f_w_i8m4 (vfloat16m8_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfncvt_rtz_x_f_w_i8m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.nxv32i8.nxv32f16.i64(<vscale x 32 x half> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.nxv32i8.nxv32f16.i64(<vscale x 32 x i8> undef, <vscale x 32 x half> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
|
||||
//
|
||||
vint8m4_t test_vfncvt_rtz_x_f_w_i8m4 (vfloat16m8_t src, size_t vl) {
|
||||
|
@ -1120,7 +1120,7 @@ vint8m4_t test_vfncvt_rtz_x_f_w_i8m4 (vfloat16m8_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u8mf8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv1i8.nxv1f16.i64(<vscale x 1 x half> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv1i8.nxv1f16.i64(<vscale x 1 x i8> undef, <vscale x 1 x half> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
|
||||
//
|
||||
vuint8mf8_t test_vfncvt_xu_f_w_u8mf8 (vfloat16mf4_t src, size_t vl) {
|
||||
|
@ -1129,7 +1129,7 @@ vuint8mf8_t test_vfncvt_xu_f_w_u8mf8 (vfloat16mf4_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8mf8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv1i8.nxv1f16.i64(<vscale x 1 x half> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv1i8.nxv1f16.i64(<vscale x 1 x i8> undef, <vscale x 1 x half> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
|
||||
//
|
||||
vuint8mf8_t test_vfncvt_rtz_xu_f_w_u8mf8 (vfloat16mf4_t src, size_t vl) {
|
||||
|
@ -1138,7 +1138,7 @@ vuint8mf8_t test_vfncvt_rtz_xu_f_w_u8mf8 (vfloat16mf4_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u8mf4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv2i8.nxv2f16.i64(<vscale x 2 x half> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv2i8.nxv2f16.i64(<vscale x 2 x i8> undef, <vscale x 2 x half> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
|
||||
//
|
||||
vuint8mf4_t test_vfncvt_xu_f_w_u8mf4 (vfloat16mf2_t src, size_t vl) {
|
||||
|
@ -1147,7 +1147,7 @@ vuint8mf4_t test_vfncvt_xu_f_w_u8mf4 (vfloat16mf2_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8mf4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv2i8.nxv2f16.i64(<vscale x 2 x half> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv2i8.nxv2f16.i64(<vscale x 2 x i8> undef, <vscale x 2 x half> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
|
||||
//
|
||||
vuint8mf4_t test_vfncvt_rtz_xu_f_w_u8mf4 (vfloat16mf2_t src, size_t vl) {
|
||||
|
@ -1156,7 +1156,7 @@ vuint8mf4_t test_vfncvt_rtz_xu_f_w_u8mf4 (vfloat16mf2_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u8mf2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv4i8.nxv4f16.i64(<vscale x 4 x half> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv4i8.nxv4f16.i64(<vscale x 4 x i8> undef, <vscale x 4 x half> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
|
||||
//
|
||||
vuint8mf2_t test_vfncvt_xu_f_w_u8mf2 (vfloat16m1_t src, size_t vl) {
|
||||
|
@ -1165,7 +1165,7 @@ vuint8mf2_t test_vfncvt_xu_f_w_u8mf2 (vfloat16m1_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8mf2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv4i8.nxv4f16.i64(<vscale x 4 x half> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv4i8.nxv4f16.i64(<vscale x 4 x i8> undef, <vscale x 4 x half> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
|
||||
//
|
||||
vuint8mf2_t test_vfncvt_rtz_xu_f_w_u8mf2 (vfloat16m1_t src, size_t vl) {
|
||||
|
@ -1174,7 +1174,7 @@ vuint8mf2_t test_vfncvt_rtz_xu_f_w_u8mf2 (vfloat16m1_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u8m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv8i8.nxv8f16.i64(<vscale x 8 x half> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv8i8.nxv8f16.i64(<vscale x 8 x i8> undef, <vscale x 8 x half> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
|
||||
//
|
||||
vuint8m1_t test_vfncvt_xu_f_w_u8m1 (vfloat16m2_t src, size_t vl) {
|
||||
|
@ -1183,7 +1183,7 @@ vuint8m1_t test_vfncvt_xu_f_w_u8m1 (vfloat16m2_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv8i8.nxv8f16.i64(<vscale x 8 x half> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv8i8.nxv8f16.i64(<vscale x 8 x i8> undef, <vscale x 8 x half> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
|
||||
//
|
||||
vuint8m1_t test_vfncvt_rtz_xu_f_w_u8m1 (vfloat16m2_t src, size_t vl) {
|
||||
|
@ -1192,7 +1192,7 @@ vuint8m1_t test_vfncvt_rtz_xu_f_w_u8m1 (vfloat16m2_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u8m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv16i8.nxv16f16.i64(<vscale x 16 x half> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv16i8.nxv16f16.i64(<vscale x 16 x i8> undef, <vscale x 16 x half> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
|
||||
//
|
||||
vuint8m2_t test_vfncvt_xu_f_w_u8m2 (vfloat16m4_t src, size_t vl) {
|
||||
|
@ -1201,7 +1201,7 @@ vuint8m2_t test_vfncvt_xu_f_w_u8m2 (vfloat16m4_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv16i8.nxv16f16.i64(<vscale x 16 x half> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv16i8.nxv16f16.i64(<vscale x 16 x i8> undef, <vscale x 16 x half> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
|
||||
//
|
||||
vuint8m2_t test_vfncvt_rtz_xu_f_w_u8m2 (vfloat16m4_t src, size_t vl) {
|
||||
|
@ -1210,7 +1210,7 @@ vuint8m2_t test_vfncvt_rtz_xu_f_w_u8m2 (vfloat16m4_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfncvt_xu_f_w_u8m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv32i8.nxv32f16.i64(<vscale x 32 x half> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv32i8.nxv32f16.i64(<vscale x 32 x i8> undef, <vscale x 32 x half> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
|
||||
//
|
||||
vuint8m4_t test_vfncvt_xu_f_w_u8m4 (vfloat16m8_t src, size_t vl) {
|
||||
|
@ -1219,7 +1219,7 @@ vuint8m4_t test_vfncvt_xu_f_w_u8m4 (vfloat16m8_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfncvt_rtz_xu_f_w_u8m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv32i8.nxv32f16.i64(<vscale x 32 x half> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv32i8.nxv32f16.i64(<vscale x 32 x i8> undef, <vscale x 32 x half> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
|
||||
//
|
||||
vuint8m4_t test_vfncvt_rtz_xu_f_w_u8m4 (vfloat16m8_t src, size_t vl) {
|
||||
|
@ -1228,7 +1228,7 @@ vuint8m4_t test_vfncvt_rtz_xu_f_w_u8m4 (vfloat16m8_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f16mf4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfncvt.f.x.w.nxv1f16.nxv1i32.i64(<vscale x 1 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfncvt.f.x.w.nxv1f16.nxv1i32.i64(<vscale x 1 x half> undef, <vscale x 1 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16mf4_t test_vfncvt_f_x_w_f16mf4 (vint32mf2_t src, size_t vl) {
|
||||
|
@ -1237,7 +1237,7 @@ vfloat16mf4_t test_vfncvt_f_x_w_f16mf4 (vint32mf2_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f16mf2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfncvt.f.x.w.nxv2f16.nxv2i32.i64(<vscale x 2 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfncvt.f.x.w.nxv2f16.nxv2i32.i64(<vscale x 2 x half> undef, <vscale x 2 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16mf2_t test_vfncvt_f_x_w_f16mf2 (vint32m1_t src, size_t vl) {
|
||||
|
@ -1246,7 +1246,7 @@ vfloat16mf2_t test_vfncvt_f_x_w_f16mf2 (vint32m1_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f16m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfncvt.f.x.w.nxv4f16.nxv4i32.i64(<vscale x 4 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfncvt.f.x.w.nxv4f16.nxv4i32.i64(<vscale x 4 x half> undef, <vscale x 4 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16m1_t test_vfncvt_f_x_w_f16m1 (vint32m2_t src, size_t vl) {
|
||||
|
@ -1255,7 +1255,7 @@ vfloat16m1_t test_vfncvt_f_x_w_f16m1 (vint32m2_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f16m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfncvt.f.x.w.nxv8f16.nxv8i32.i64(<vscale x 8 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfncvt.f.x.w.nxv8f16.nxv8i32.i64(<vscale x 8 x half> undef, <vscale x 8 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16m2_t test_vfncvt_f_x_w_f16m2 (vint32m4_t src, size_t vl) {
|
||||
|
@ -1264,7 +1264,7 @@ vfloat16m2_t test_vfncvt_f_x_w_f16m2 (vint32m4_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfncvt_f_x_w_f16m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfncvt.f.x.w.nxv16f16.nxv16i32.i64(<vscale x 16 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfncvt.f.x.w.nxv16f16.nxv16i32.i64(<vscale x 16 x half> undef, <vscale x 16 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16m4_t test_vfncvt_f_x_w_f16m4 (vint32m8_t src, size_t vl) {
|
||||
|
@ -1273,7 +1273,7 @@ vfloat16m4_t test_vfncvt_f_x_w_f16m4 (vint32m8_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f16mf4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfncvt.f.xu.w.nxv1f16.nxv1i32.i64(<vscale x 1 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfncvt.f.xu.w.nxv1f16.nxv1i32.i64(<vscale x 1 x half> undef, <vscale x 1 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16mf4_t test_vfncvt_f_xu_w_f16mf4 (vuint32mf2_t src, size_t vl) {
|
||||
|
@ -1282,7 +1282,7 @@ vfloat16mf4_t test_vfncvt_f_xu_w_f16mf4 (vuint32mf2_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f16mf2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfncvt.f.xu.w.nxv2f16.nxv2i32.i64(<vscale x 2 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfncvt.f.xu.w.nxv2f16.nxv2i32.i64(<vscale x 2 x half> undef, <vscale x 2 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16mf2_t test_vfncvt_f_xu_w_f16mf2 (vuint32m1_t src, size_t vl) {
|
||||
|
@ -1291,7 +1291,7 @@ vfloat16mf2_t test_vfncvt_f_xu_w_f16mf2 (vuint32m1_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f16m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfncvt.f.xu.w.nxv4f16.nxv4i32.i64(<vscale x 4 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfncvt.f.xu.w.nxv4f16.nxv4i32.i64(<vscale x 4 x half> undef, <vscale x 4 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16m1_t test_vfncvt_f_xu_w_f16m1 (vuint32m2_t src, size_t vl) {
|
||||
|
@ -1300,7 +1300,7 @@ vfloat16m1_t test_vfncvt_f_xu_w_f16m1 (vuint32m2_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f16m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfncvt.f.xu.w.nxv8f16.nxv8i32.i64(<vscale x 8 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfncvt.f.xu.w.nxv8f16.nxv8i32.i64(<vscale x 8 x half> undef, <vscale x 8 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16m2_t test_vfncvt_f_xu_w_f16m2 (vuint32m4_t src, size_t vl) {
|
||||
|
@ -1309,7 +1309,7 @@ vfloat16m2_t test_vfncvt_f_xu_w_f16m2 (vuint32m4_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfncvt_f_xu_w_f16m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfncvt.f.xu.w.nxv16f16.nxv16i32.i64(<vscale x 16 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfncvt.f.xu.w.nxv16f16.nxv16i32.i64(<vscale x 16 x half> undef, <vscale x 16 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16m4_t test_vfncvt_f_xu_w_f16m4 (vuint32m8_t src, size_t vl) {
|
||||
|
@ -1318,7 +1318,7 @@ vfloat16m4_t test_vfncvt_f_xu_w_f16m4 (vuint32m8_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f16mf4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfncvt.f.f.w.nxv1f16.nxv1f32.i64(<vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfncvt.f.f.w.nxv1f16.nxv1f32.i64(<vscale x 1 x half> undef, <vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16mf4_t test_vfncvt_f_f_w_f16mf4 (vfloat32mf2_t src, size_t vl) {
|
||||
|
@ -1327,7 +1327,7 @@ vfloat16mf4_t test_vfncvt_f_f_w_f16mf4 (vfloat32mf2_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f16mf4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfncvt.rod.f.f.w.nxv1f16.nxv1f32.i64(<vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfncvt.rod.f.f.w.nxv1f16.nxv1f32.i64(<vscale x 1 x half> undef, <vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16mf4_t test_vfncvt_rod_f_f_w_f16mf4 (vfloat32mf2_t src, size_t vl) {
|
||||
|
@ -1336,7 +1336,7 @@ vfloat16mf4_t test_vfncvt_rod_f_f_w_f16mf4 (vfloat32mf2_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f16mf2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfncvt.f.f.w.nxv2f16.nxv2f32.i64(<vscale x 2 x float> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfncvt.f.f.w.nxv2f16.nxv2f32.i64(<vscale x 2 x half> undef, <vscale x 2 x float> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16mf2_t test_vfncvt_f_f_w_f16mf2 (vfloat32m1_t src, size_t vl) {
|
||||
|
@ -1345,7 +1345,7 @@ vfloat16mf2_t test_vfncvt_f_f_w_f16mf2 (vfloat32m1_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f16mf2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfncvt.rod.f.f.w.nxv2f16.nxv2f32.i64(<vscale x 2 x float> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfncvt.rod.f.f.w.nxv2f16.nxv2f32.i64(<vscale x 2 x half> undef, <vscale x 2 x float> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16mf2_t test_vfncvt_rod_f_f_w_f16mf2 (vfloat32m1_t src, size_t vl) {
|
||||
|
@ -1354,7 +1354,7 @@ vfloat16mf2_t test_vfncvt_rod_f_f_w_f16mf2 (vfloat32m1_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f16m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfncvt.f.f.w.nxv4f16.nxv4f32.i64(<vscale x 4 x float> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfncvt.f.f.w.nxv4f16.nxv4f32.i64(<vscale x 4 x half> undef, <vscale x 4 x float> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16m1_t test_vfncvt_f_f_w_f16m1 (vfloat32m2_t src, size_t vl) {
|
||||
|
@ -1363,7 +1363,7 @@ vfloat16m1_t test_vfncvt_f_f_w_f16m1 (vfloat32m2_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f16m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfncvt.rod.f.f.w.nxv4f16.nxv4f32.i64(<vscale x 4 x float> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfncvt.rod.f.f.w.nxv4f16.nxv4f32.i64(<vscale x 4 x half> undef, <vscale x 4 x float> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16m1_t test_vfncvt_rod_f_f_w_f16m1 (vfloat32m2_t src, size_t vl) {
|
||||
|
@ -1372,7 +1372,7 @@ vfloat16m1_t test_vfncvt_rod_f_f_w_f16m1 (vfloat32m2_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f16m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfncvt.f.f.w.nxv8f16.nxv8f32.i64(<vscale x 8 x float> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfncvt.f.f.w.nxv8f16.nxv8f32.i64(<vscale x 8 x half> undef, <vscale x 8 x float> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16m2_t test_vfncvt_f_f_w_f16m2 (vfloat32m4_t src, size_t vl) {
|
||||
|
@ -1381,7 +1381,7 @@ vfloat16m2_t test_vfncvt_f_f_w_f16m2 (vfloat32m4_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f16m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfncvt.rod.f.f.w.nxv8f16.nxv8f32.i64(<vscale x 8 x float> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfncvt.rod.f.f.w.nxv8f16.nxv8f32.i64(<vscale x 8 x half> undef, <vscale x 8 x float> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16m2_t test_vfncvt_rod_f_f_w_f16m2 (vfloat32m4_t src, size_t vl) {
|
||||
|
@ -1390,7 +1390,7 @@ vfloat16m2_t test_vfncvt_rod_f_f_w_f16m2 (vfloat32m4_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfncvt_f_f_w_f16m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfncvt.f.f.w.nxv16f16.nxv16f32.i64(<vscale x 16 x float> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfncvt.f.f.w.nxv16f16.nxv16f32.i64(<vscale x 16 x half> undef, <vscale x 16 x float> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16m4_t test_vfncvt_f_f_w_f16m4 (vfloat32m8_t src, size_t vl) {
|
||||
|
@ -1399,7 +1399,7 @@ vfloat16m4_t test_vfncvt_f_f_w_f16m4 (vfloat32m8_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfncvt_rod_f_f_w_f16m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfncvt.rod.f.f.w.nxv16f16.nxv16f32.i64(<vscale x 16 x float> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfncvt.rod.f.f.w.nxv16f16.nxv16f32.i64(<vscale x 16 x half> undef, <vscale x 16 x float> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16m4_t test_vfncvt_rod_f_f_w_f16m4 (vfloat32m8_t src, size_t vl) {
|
||||
|
|
|
@ -8,7 +8,7 @@
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfrec7_v_f32mf2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfrec7.nxv1f32.i64(<vscale x 1 x float> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfrec7.nxv1f32.i64(<vscale x 1 x float> undef, <vscale x 1 x float> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32mf2_t test_vfrec7_v_f32mf2(vfloat32mf2_t op1, size_t vl) {
|
||||
|
@ -17,7 +17,7 @@ vfloat32mf2_t test_vfrec7_v_f32mf2(vfloat32mf2_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfrec7_v_f32m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfrec7.nxv2f32.i64(<vscale x 2 x float> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfrec7.nxv2f32.i64(<vscale x 2 x float> undef, <vscale x 2 x float> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m1_t test_vfrec7_v_f32m1(vfloat32m1_t op1, size_t vl) {
|
||||
|
@ -26,7 +26,7 @@ vfloat32m1_t test_vfrec7_v_f32m1(vfloat32m1_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfrec7_v_f32m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfrec7.nxv4f32.i64(<vscale x 4 x float> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfrec7.nxv4f32.i64(<vscale x 4 x float> undef, <vscale x 4 x float> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m2_t test_vfrec7_v_f32m2(vfloat32m2_t op1, size_t vl) {
|
||||
|
@ -35,7 +35,7 @@ vfloat32m2_t test_vfrec7_v_f32m2(vfloat32m2_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfrec7_v_f32m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfrec7.nxv8f32.i64(<vscale x 8 x float> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfrec7.nxv8f32.i64(<vscale x 8 x float> undef, <vscale x 8 x float> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m4_t test_vfrec7_v_f32m4(vfloat32m4_t op1, size_t vl) {
|
||||
|
@ -44,7 +44,7 @@ vfloat32m4_t test_vfrec7_v_f32m4(vfloat32m4_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfrec7_v_f32m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfrec7.nxv16f32.i64(<vscale x 16 x float> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfrec7.nxv16f32.i64(<vscale x 16 x float> undef, <vscale x 16 x float> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m8_t test_vfrec7_v_f32m8(vfloat32m8_t op1, size_t vl) {
|
||||
|
@ -53,7 +53,7 @@ vfloat32m8_t test_vfrec7_v_f32m8(vfloat32m8_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfrec7_v_f64m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfrec7.nxv1f64.i64(<vscale x 1 x double> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfrec7.nxv1f64.i64(<vscale x 1 x double> undef, <vscale x 1 x double> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m1_t test_vfrec7_v_f64m1(vfloat64m1_t op1, size_t vl) {
|
||||
|
@ -62,7 +62,7 @@ vfloat64m1_t test_vfrec7_v_f64m1(vfloat64m1_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfrec7_v_f64m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfrec7.nxv2f64.i64(<vscale x 2 x double> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfrec7.nxv2f64.i64(<vscale x 2 x double> undef, <vscale x 2 x double> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m2_t test_vfrec7_v_f64m2(vfloat64m2_t op1, size_t vl) {
|
||||
|
@ -71,7 +71,7 @@ vfloat64m2_t test_vfrec7_v_f64m2(vfloat64m2_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfrec7_v_f64m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfrec7.nxv4f64.i64(<vscale x 4 x double> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfrec7.nxv4f64.i64(<vscale x 4 x double> undef, <vscale x 4 x double> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m4_t test_vfrec7_v_f64m4(vfloat64m4_t op1, size_t vl) {
|
||||
|
@ -80,7 +80,7 @@ vfloat64m4_t test_vfrec7_v_f64m4(vfloat64m4_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfrec7_v_f64m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfrec7.nxv8f64.i64(<vscale x 8 x double> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfrec7.nxv8f64.i64(<vscale x 8 x double> undef, <vscale x 8 x double> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m8_t test_vfrec7_v_f64m8(vfloat64m8_t op1, size_t vl) {
|
||||
|
@ -179,7 +179,7 @@ vfloat64m8_t test_vfrec7_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff,
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfrec7_v_f16mf4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfrec7.nxv1f16.i64(<vscale x 1 x half> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfrec7.nxv1f16.i64(<vscale x 1 x half> undef, <vscale x 1 x half> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16mf4_t test_vfrec7_v_f16mf4 (vfloat16mf4_t op1, size_t vl) {
|
||||
|
@ -188,7 +188,7 @@ vfloat16mf4_t test_vfrec7_v_f16mf4 (vfloat16mf4_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfrec7_v_f16mf2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfrec7.nxv2f16.i64(<vscale x 2 x half> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfrec7.nxv2f16.i64(<vscale x 2 x half> undef, <vscale x 2 x half> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16mf2_t test_vfrec7_v_f16mf2 (vfloat16mf2_t op1, size_t vl) {
|
||||
|
@ -197,7 +197,7 @@ vfloat16mf2_t test_vfrec7_v_f16mf2 (vfloat16mf2_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfrec7_v_f16m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfrec7.nxv4f16.i64(<vscale x 4 x half> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfrec7.nxv4f16.i64(<vscale x 4 x half> undef, <vscale x 4 x half> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16m1_t test_vfrec7_v_f16m1 (vfloat16m1_t op1, size_t vl) {
|
||||
|
@ -206,7 +206,7 @@ vfloat16m1_t test_vfrec7_v_f16m1 (vfloat16m1_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfrec7_v_f16m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfrec7.nxv8f16.i64(<vscale x 8 x half> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfrec7.nxv8f16.i64(<vscale x 8 x half> undef, <vscale x 8 x half> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16m2_t test_vfrec7_v_f16m2 (vfloat16m2_t op1, size_t vl) {
|
||||
|
@ -215,7 +215,7 @@ vfloat16m2_t test_vfrec7_v_f16m2 (vfloat16m2_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfrec7_v_f16m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfrec7.nxv16f16.i64(<vscale x 16 x half> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfrec7.nxv16f16.i64(<vscale x 16 x half> undef, <vscale x 16 x half> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16m4_t test_vfrec7_v_f16m4 (vfloat16m4_t op1, size_t vl) {
|
||||
|
@ -224,7 +224,7 @@ vfloat16m4_t test_vfrec7_v_f16m4 (vfloat16m4_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfrec7_v_f16m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfrec7.nxv32f16.i64(<vscale x 32 x half> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfrec7.nxv32f16.i64(<vscale x 32 x half> undef, <vscale x 32 x half> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16m8_t test_vfrec7_v_f16m8 (vfloat16m8_t op1, size_t vl) {
|
||||
|
|
|
@ -8,7 +8,7 @@
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f32mf2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfrsqrt7.nxv1f32.i64(<vscale x 1 x float> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfrsqrt7.nxv1f32.i64(<vscale x 1 x float> undef, <vscale x 1 x float> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32mf2_t test_vfrsqrt7_v_f32mf2(vfloat32mf2_t op1, size_t vl) {
|
||||
|
@ -17,7 +17,7 @@ vfloat32mf2_t test_vfrsqrt7_v_f32mf2(vfloat32mf2_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f32m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfrsqrt7.nxv2f32.i64(<vscale x 2 x float> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfrsqrt7.nxv2f32.i64(<vscale x 2 x float> undef, <vscale x 2 x float> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m1_t test_vfrsqrt7_v_f32m1(vfloat32m1_t op1, size_t vl) {
|
||||
|
@ -26,7 +26,7 @@ vfloat32m1_t test_vfrsqrt7_v_f32m1(vfloat32m1_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f32m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfrsqrt7.nxv4f32.i64(<vscale x 4 x float> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfrsqrt7.nxv4f32.i64(<vscale x 4 x float> undef, <vscale x 4 x float> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m2_t test_vfrsqrt7_v_f32m2(vfloat32m2_t op1, size_t vl) {
|
||||
|
@ -35,7 +35,7 @@ vfloat32m2_t test_vfrsqrt7_v_f32m2(vfloat32m2_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f32m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfrsqrt7.nxv8f32.i64(<vscale x 8 x float> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfrsqrt7.nxv8f32.i64(<vscale x 8 x float> undef, <vscale x 8 x float> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m4_t test_vfrsqrt7_v_f32m4(vfloat32m4_t op1, size_t vl) {
|
||||
|
@ -44,7 +44,7 @@ vfloat32m4_t test_vfrsqrt7_v_f32m4(vfloat32m4_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f32m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfrsqrt7.nxv16f32.i64(<vscale x 16 x float> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfrsqrt7.nxv16f32.i64(<vscale x 16 x float> undef, <vscale x 16 x float> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m8_t test_vfrsqrt7_v_f32m8(vfloat32m8_t op1, size_t vl) {
|
||||
|
@ -53,7 +53,7 @@ vfloat32m8_t test_vfrsqrt7_v_f32m8(vfloat32m8_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f64m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfrsqrt7.nxv1f64.i64(<vscale x 1 x double> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfrsqrt7.nxv1f64.i64(<vscale x 1 x double> undef, <vscale x 1 x double> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m1_t test_vfrsqrt7_v_f64m1(vfloat64m1_t op1, size_t vl) {
|
||||
|
@ -62,7 +62,7 @@ vfloat64m1_t test_vfrsqrt7_v_f64m1(vfloat64m1_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f64m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfrsqrt7.nxv2f64.i64(<vscale x 2 x double> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfrsqrt7.nxv2f64.i64(<vscale x 2 x double> undef, <vscale x 2 x double> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m2_t test_vfrsqrt7_v_f64m2(vfloat64m2_t op1, size_t vl) {
|
||||
|
@ -71,7 +71,7 @@ vfloat64m2_t test_vfrsqrt7_v_f64m2(vfloat64m2_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f64m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfrsqrt7.nxv4f64.i64(<vscale x 4 x double> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfrsqrt7.nxv4f64.i64(<vscale x 4 x double> undef, <vscale x 4 x double> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m4_t test_vfrsqrt7_v_f64m4(vfloat64m4_t op1, size_t vl) {
|
||||
|
@ -80,7 +80,7 @@ vfloat64m4_t test_vfrsqrt7_v_f64m4(vfloat64m4_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f64m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfrsqrt7.nxv8f64.i64(<vscale x 8 x double> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfrsqrt7.nxv8f64.i64(<vscale x 8 x double> undef, <vscale x 8 x double> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m8_t test_vfrsqrt7_v_f64m8(vfloat64m8_t op1, size_t vl) {
|
||||
|
@ -179,7 +179,7 @@ vfloat64m8_t test_vfrsqrt7_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff,
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f16mf4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfrsqrt7.nxv1f16.i64(<vscale x 1 x half> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfrsqrt7.nxv1f16.i64(<vscale x 1 x half> undef, <vscale x 1 x half> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16mf4_t test_vfrsqrt7_v_f16mf4 (vfloat16mf4_t op1, size_t vl) {
|
||||
|
@ -188,7 +188,7 @@ vfloat16mf4_t test_vfrsqrt7_v_f16mf4 (vfloat16mf4_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f16mf2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfrsqrt7.nxv2f16.i64(<vscale x 2 x half> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfrsqrt7.nxv2f16.i64(<vscale x 2 x half> undef, <vscale x 2 x half> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16mf2_t test_vfrsqrt7_v_f16mf2 (vfloat16mf2_t op1, size_t vl) {
|
||||
|
@ -197,7 +197,7 @@ vfloat16mf2_t test_vfrsqrt7_v_f16mf2 (vfloat16mf2_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f16m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfrsqrt7.nxv4f16.i64(<vscale x 4 x half> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfrsqrt7.nxv4f16.i64(<vscale x 4 x half> undef, <vscale x 4 x half> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16m1_t test_vfrsqrt7_v_f16m1 (vfloat16m1_t op1, size_t vl) {
|
||||
|
@ -206,7 +206,7 @@ vfloat16m1_t test_vfrsqrt7_v_f16m1 (vfloat16m1_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f16m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfrsqrt7.nxv8f16.i64(<vscale x 8 x half> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfrsqrt7.nxv8f16.i64(<vscale x 8 x half> undef, <vscale x 8 x half> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16m2_t test_vfrsqrt7_v_f16m2 (vfloat16m2_t op1, size_t vl) {
|
||||
|
@ -215,7 +215,7 @@ vfloat16m2_t test_vfrsqrt7_v_f16m2 (vfloat16m2_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f16m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfrsqrt7.nxv16f16.i64(<vscale x 16 x half> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfrsqrt7.nxv16f16.i64(<vscale x 16 x half> undef, <vscale x 16 x half> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16m4_t test_vfrsqrt7_v_f16m4 (vfloat16m4_t op1, size_t vl) {
|
||||
|
@ -224,7 +224,7 @@ vfloat16m4_t test_vfrsqrt7_v_f16m4 (vfloat16m4_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfrsqrt7_v_f16m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfrsqrt7.nxv32f16.i64(<vscale x 32 x half> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfrsqrt7.nxv32f16.i64(<vscale x 32 x half> undef, <vscale x 32 x half> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16m8_t test_vfrsqrt7_v_f16m8 (vfloat16m8_t op1, size_t vl) {
|
||||
|
|
|
@ -8,7 +8,7 @@
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfsqrt_v_f32mf2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsqrt.nxv1f32.i64(<vscale x 1 x float> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfsqrt.nxv1f32.i64(<vscale x 1 x float> undef, <vscale x 1 x float> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32mf2_t test_vfsqrt_v_f32mf2(vfloat32mf2_t op1, size_t vl) {
|
||||
|
@ -17,7 +17,7 @@ vfloat32mf2_t test_vfsqrt_v_f32mf2(vfloat32mf2_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfsqrt_v_f32m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfsqrt.nxv2f32.i64(<vscale x 2 x float> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfsqrt.nxv2f32.i64(<vscale x 2 x float> undef, <vscale x 2 x float> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m1_t test_vfsqrt_v_f32m1(vfloat32m1_t op1, size_t vl) {
|
||||
|
@ -26,7 +26,7 @@ vfloat32m1_t test_vfsqrt_v_f32m1(vfloat32m1_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfsqrt_v_f32m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfsqrt.nxv4f32.i64(<vscale x 4 x float> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfsqrt.nxv4f32.i64(<vscale x 4 x float> undef, <vscale x 4 x float> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m2_t test_vfsqrt_v_f32m2(vfloat32m2_t op1, size_t vl) {
|
||||
|
@ -35,7 +35,7 @@ vfloat32m2_t test_vfsqrt_v_f32m2(vfloat32m2_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfsqrt_v_f32m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfsqrt.nxv8f32.i64(<vscale x 8 x float> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfsqrt.nxv8f32.i64(<vscale x 8 x float> undef, <vscale x 8 x float> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m4_t test_vfsqrt_v_f32m4(vfloat32m4_t op1, size_t vl) {
|
||||
|
@ -44,7 +44,7 @@ vfloat32m4_t test_vfsqrt_v_f32m4(vfloat32m4_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfsqrt_v_f32m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfsqrt.nxv16f32.i64(<vscale x 16 x float> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfsqrt.nxv16f32.i64(<vscale x 16 x float> undef, <vscale x 16 x float> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m8_t test_vfsqrt_v_f32m8(vfloat32m8_t op1, size_t vl) {
|
||||
|
@ -53,7 +53,7 @@ vfloat32m8_t test_vfsqrt_v_f32m8(vfloat32m8_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfsqrt_v_f64m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfsqrt.nxv1f64.i64(<vscale x 1 x double> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfsqrt.nxv1f64.i64(<vscale x 1 x double> undef, <vscale x 1 x double> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m1_t test_vfsqrt_v_f64m1(vfloat64m1_t op1, size_t vl) {
|
||||
|
@ -62,7 +62,7 @@ vfloat64m1_t test_vfsqrt_v_f64m1(vfloat64m1_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfsqrt_v_f64m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfsqrt.nxv2f64.i64(<vscale x 2 x double> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfsqrt.nxv2f64.i64(<vscale x 2 x double> undef, <vscale x 2 x double> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m2_t test_vfsqrt_v_f64m2(vfloat64m2_t op1, size_t vl) {
|
||||
|
@ -71,7 +71,7 @@ vfloat64m2_t test_vfsqrt_v_f64m2(vfloat64m2_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfsqrt_v_f64m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfsqrt.nxv4f64.i64(<vscale x 4 x double> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfsqrt.nxv4f64.i64(<vscale x 4 x double> undef, <vscale x 4 x double> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m4_t test_vfsqrt_v_f64m4(vfloat64m4_t op1, size_t vl) {
|
||||
|
@ -80,7 +80,7 @@ vfloat64m4_t test_vfsqrt_v_f64m4(vfloat64m4_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfsqrt_v_f64m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfsqrt.nxv8f64.i64(<vscale x 8 x double> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfsqrt.nxv8f64.i64(<vscale x 8 x double> undef, <vscale x 8 x double> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m8_t test_vfsqrt_v_f64m8(vfloat64m8_t op1, size_t vl) {
|
||||
|
@ -179,7 +179,7 @@ vfloat64m8_t test_vfsqrt_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff,
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfsqrt_v_f16mf4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfsqrt.nxv1f16.i64(<vscale x 1 x half> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfsqrt.nxv1f16.i64(<vscale x 1 x half> undef, <vscale x 1 x half> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16mf4_t test_vfsqrt_v_f16mf4 (vfloat16mf4_t op1, size_t vl) {
|
||||
|
@ -188,7 +188,7 @@ vfloat16mf4_t test_vfsqrt_v_f16mf4 (vfloat16mf4_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfsqrt_v_f16mf2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfsqrt.nxv2f16.i64(<vscale x 2 x half> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfsqrt.nxv2f16.i64(<vscale x 2 x half> undef, <vscale x 2 x half> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16mf2_t test_vfsqrt_v_f16mf2 (vfloat16mf2_t op1, size_t vl) {
|
||||
|
@ -197,7 +197,7 @@ vfloat16mf2_t test_vfsqrt_v_f16mf2 (vfloat16mf2_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfsqrt_v_f16m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfsqrt.nxv4f16.i64(<vscale x 4 x half> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfsqrt.nxv4f16.i64(<vscale x 4 x half> undef, <vscale x 4 x half> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16m1_t test_vfsqrt_v_f16m1 (vfloat16m1_t op1, size_t vl) {
|
||||
|
@ -206,7 +206,7 @@ vfloat16m1_t test_vfsqrt_v_f16m1 (vfloat16m1_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfsqrt_v_f16m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfsqrt.nxv8f16.i64(<vscale x 8 x half> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfsqrt.nxv8f16.i64(<vscale x 8 x half> undef, <vscale x 8 x half> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16m2_t test_vfsqrt_v_f16m2 (vfloat16m2_t op1, size_t vl) {
|
||||
|
@ -215,7 +215,7 @@ vfloat16m2_t test_vfsqrt_v_f16m2 (vfloat16m2_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfsqrt_v_f16m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfsqrt.nxv16f16.i64(<vscale x 16 x half> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfsqrt.nxv16f16.i64(<vscale x 16 x half> undef, <vscale x 16 x half> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16m4_t test_vfsqrt_v_f16m4 (vfloat16m4_t op1, size_t vl) {
|
||||
|
@ -224,7 +224,7 @@ vfloat16m4_t test_vfsqrt_v_f16m4 (vfloat16m4_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfsqrt_v_f16m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfsqrt.nxv32f16.i64(<vscale x 32 x half> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfsqrt.nxv32f16.i64(<vscale x 32 x half> undef, <vscale x 32 x half> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16m8_t test_vfsqrt_v_f16m8 (vfloat16m8_t op1, size_t vl) {
|
||||
|
|
|
@ -8,7 +8,7 @@
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f32mf2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwcvt.f.x.v.nxv1f32.nxv1i16.i64(<vscale x 1 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwcvt.f.x.v.nxv1f32.nxv1i16.i64(<vscale x 1 x float> undef, <vscale x 1 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32mf2_t test_vfwcvt_f_x_v_f32mf2(vint16mf4_t src, size_t vl) {
|
||||
|
@ -17,7 +17,7 @@ vfloat32mf2_t test_vfwcvt_f_x_v_f32mf2(vint16mf4_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f32m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwcvt.f.x.v.nxv2f32.nxv2i16.i64(<vscale x 2 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwcvt.f.x.v.nxv2f32.nxv2i16.i64(<vscale x 2 x float> undef, <vscale x 2 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m1_t test_vfwcvt_f_x_v_f32m1(vint16mf2_t src, size_t vl) {
|
||||
|
@ -26,7 +26,7 @@ vfloat32m1_t test_vfwcvt_f_x_v_f32m1(vint16mf2_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f32m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwcvt.f.x.v.nxv4f32.nxv4i16.i64(<vscale x 4 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwcvt.f.x.v.nxv4f32.nxv4i16.i64(<vscale x 4 x float> undef, <vscale x 4 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m2_t test_vfwcvt_f_x_v_f32m2(vint16m1_t src, size_t vl) {
|
||||
|
@ -35,7 +35,7 @@ vfloat32m2_t test_vfwcvt_f_x_v_f32m2(vint16m1_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f32m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwcvt.f.x.v.nxv8f32.nxv8i16.i64(<vscale x 8 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwcvt.f.x.v.nxv8f32.nxv8i16.i64(<vscale x 8 x float> undef, <vscale x 8 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m4_t test_vfwcvt_f_x_v_f32m4(vint16m2_t src, size_t vl) {
|
||||
|
@ -44,7 +44,7 @@ vfloat32m4_t test_vfwcvt_f_x_v_f32m4(vint16m2_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f32m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwcvt.f.x.v.nxv16f32.nxv16i16.i64(<vscale x 16 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwcvt.f.x.v.nxv16f32.nxv16i16.i64(<vscale x 16 x float> undef, <vscale x 16 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m8_t test_vfwcvt_f_x_v_f32m8(vint16m4_t src, size_t vl) {
|
||||
|
@ -53,7 +53,7 @@ vfloat32m8_t test_vfwcvt_f_x_v_f32m8(vint16m4_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f32mf2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwcvt.f.xu.v.nxv1f32.nxv1i16.i64(<vscale x 1 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwcvt.f.xu.v.nxv1f32.nxv1i16.i64(<vscale x 1 x float> undef, <vscale x 1 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32mf2_t test_vfwcvt_f_xu_v_f32mf2(vuint16mf4_t src, size_t vl) {
|
||||
|
@ -62,7 +62,7 @@ vfloat32mf2_t test_vfwcvt_f_xu_v_f32mf2(vuint16mf4_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f32m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwcvt.f.xu.v.nxv2f32.nxv2i16.i64(<vscale x 2 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwcvt.f.xu.v.nxv2f32.nxv2i16.i64(<vscale x 2 x float> undef, <vscale x 2 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m1_t test_vfwcvt_f_xu_v_f32m1(vuint16mf2_t src, size_t vl) {
|
||||
|
@ -71,7 +71,7 @@ vfloat32m1_t test_vfwcvt_f_xu_v_f32m1(vuint16mf2_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f32m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwcvt.f.xu.v.nxv4f32.nxv4i16.i64(<vscale x 4 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwcvt.f.xu.v.nxv4f32.nxv4i16.i64(<vscale x 4 x float> undef, <vscale x 4 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m2_t test_vfwcvt_f_xu_v_f32m2(vuint16m1_t src, size_t vl) {
|
||||
|
@ -80,7 +80,7 @@ vfloat32m2_t test_vfwcvt_f_xu_v_f32m2(vuint16m1_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f32m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwcvt.f.xu.v.nxv8f32.nxv8i16.i64(<vscale x 8 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwcvt.f.xu.v.nxv8f32.nxv8i16.i64(<vscale x 8 x float> undef, <vscale x 8 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m4_t test_vfwcvt_f_xu_v_f32m4(vuint16m2_t src, size_t vl) {
|
||||
|
@ -89,7 +89,7 @@ vfloat32m4_t test_vfwcvt_f_xu_v_f32m4(vuint16m2_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f32m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwcvt.f.xu.v.nxv16f32.nxv16i16.i64(<vscale x 16 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwcvt.f.xu.v.nxv16f32.nxv16i16.i64(<vscale x 16 x float> undef, <vscale x 16 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m8_t test_vfwcvt_f_xu_v_f32m8(vuint16m4_t src, size_t vl) {
|
||||
|
@ -98,7 +98,7 @@ vfloat32m8_t test_vfwcvt_f_xu_v_f32m8(vuint16m4_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i64m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vfwcvt.x.f.v.nxv1i64.nxv1f32.i64(<vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vfwcvt.x.f.v.nxv1i64.nxv1f32.i64(<vscale x 1 x i64> undef, <vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
|
||||
//
|
||||
vint64m1_t test_vfwcvt_x_f_v_i64m1(vfloat32mf2_t src, size_t vl) {
|
||||
|
@ -107,7 +107,7 @@ vint64m1_t test_vfwcvt_x_f_v_i64m1(vfloat32mf2_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i64m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vfwcvt.rtz.x.f.v.nxv1i64.nxv1f32.i64(<vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vfwcvt.rtz.x.f.v.nxv1i64.nxv1f32.i64(<vscale x 1 x i64> undef, <vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
|
||||
//
|
||||
vint64m1_t test_vfwcvt_rtz_x_f_v_i64m1(vfloat32mf2_t src, size_t vl) {
|
||||
|
@ -116,7 +116,7 @@ vint64m1_t test_vfwcvt_rtz_x_f_v_i64m1(vfloat32mf2_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i64m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vfwcvt.x.f.v.nxv2i64.nxv2f32.i64(<vscale x 2 x float> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vfwcvt.x.f.v.nxv2i64.nxv2f32.i64(<vscale x 2 x i64> undef, <vscale x 2 x float> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
|
||||
//
|
||||
vint64m2_t test_vfwcvt_x_f_v_i64m2(vfloat32m1_t src, size_t vl) {
|
||||
|
@ -125,7 +125,7 @@ vint64m2_t test_vfwcvt_x_f_v_i64m2(vfloat32m1_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i64m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vfwcvt.rtz.x.f.v.nxv2i64.nxv2f32.i64(<vscale x 2 x float> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vfwcvt.rtz.x.f.v.nxv2i64.nxv2f32.i64(<vscale x 2 x i64> undef, <vscale x 2 x float> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
|
||||
//
|
||||
vint64m2_t test_vfwcvt_rtz_x_f_v_i64m2(vfloat32m1_t src, size_t vl) {
|
||||
|
@ -134,7 +134,7 @@ vint64m2_t test_vfwcvt_rtz_x_f_v_i64m2(vfloat32m1_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i64m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vfwcvt.x.f.v.nxv4i64.nxv4f32.i64(<vscale x 4 x float> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vfwcvt.x.f.v.nxv4i64.nxv4f32.i64(<vscale x 4 x i64> undef, <vscale x 4 x float> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
|
||||
//
|
||||
vint64m4_t test_vfwcvt_x_f_v_i64m4(vfloat32m2_t src, size_t vl) {
|
||||
|
@ -143,7 +143,7 @@ vint64m4_t test_vfwcvt_x_f_v_i64m4(vfloat32m2_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i64m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vfwcvt.rtz.x.f.v.nxv4i64.nxv4f32.i64(<vscale x 4 x float> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vfwcvt.rtz.x.f.v.nxv4i64.nxv4f32.i64(<vscale x 4 x i64> undef, <vscale x 4 x float> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
|
||||
//
|
||||
vint64m4_t test_vfwcvt_rtz_x_f_v_i64m4(vfloat32m2_t src, size_t vl) {
|
||||
|
@ -152,7 +152,7 @@ vint64m4_t test_vfwcvt_rtz_x_f_v_i64m4(vfloat32m2_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i64m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vfwcvt.x.f.v.nxv8i64.nxv8f32.i64(<vscale x 8 x float> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vfwcvt.x.f.v.nxv8i64.nxv8f32.i64(<vscale x 8 x i64> undef, <vscale x 8 x float> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
|
||||
//
|
||||
vint64m8_t test_vfwcvt_x_f_v_i64m8(vfloat32m4_t src, size_t vl) {
|
||||
|
@ -161,7 +161,7 @@ vint64m8_t test_vfwcvt_x_f_v_i64m8(vfloat32m4_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i64m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vfwcvt.rtz.x.f.v.nxv8i64.nxv8f32.i64(<vscale x 8 x float> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vfwcvt.rtz.x.f.v.nxv8i64.nxv8f32.i64(<vscale x 8 x i64> undef, <vscale x 8 x float> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
|
||||
//
|
||||
vint64m8_t test_vfwcvt_rtz_x_f_v_i64m8(vfloat32m4_t src, size_t vl) {
|
||||
|
@ -170,7 +170,7 @@ vint64m8_t test_vfwcvt_rtz_x_f_v_i64m8(vfloat32m4_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u64m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vfwcvt.xu.f.v.nxv1i64.nxv1f32.i64(<vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vfwcvt.xu.f.v.nxv1i64.nxv1f32.i64(<vscale x 1 x i64> undef, <vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
|
||||
//
|
||||
vuint64m1_t test_vfwcvt_xu_f_v_u64m1(vfloat32mf2_t src, size_t vl) {
|
||||
|
@ -179,7 +179,7 @@ vuint64m1_t test_vfwcvt_xu_f_v_u64m1(vfloat32mf2_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv1i64.nxv1f32.i64(<vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv1i64.nxv1f32.i64(<vscale x 1 x i64> undef, <vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
|
||||
//
|
||||
vuint64m1_t test_vfwcvt_rtz_xu_f_v_u64m1(vfloat32mf2_t src, size_t vl) {
|
||||
|
@ -188,7 +188,7 @@ vuint64m1_t test_vfwcvt_rtz_xu_f_v_u64m1(vfloat32mf2_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u64m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vfwcvt.xu.f.v.nxv2i64.nxv2f32.i64(<vscale x 2 x float> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vfwcvt.xu.f.v.nxv2i64.nxv2f32.i64(<vscale x 2 x i64> undef, <vscale x 2 x float> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
|
||||
//
|
||||
vuint64m2_t test_vfwcvt_xu_f_v_u64m2(vfloat32m1_t src, size_t vl) {
|
||||
|
@ -197,7 +197,7 @@ vuint64m2_t test_vfwcvt_xu_f_v_u64m2(vfloat32m1_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv2i64.nxv2f32.i64(<vscale x 2 x float> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv2i64.nxv2f32.i64(<vscale x 2 x i64> undef, <vscale x 2 x float> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
|
||||
//
|
||||
vuint64m2_t test_vfwcvt_rtz_xu_f_v_u64m2(vfloat32m1_t src, size_t vl) {
|
||||
|
@ -206,7 +206,7 @@ vuint64m2_t test_vfwcvt_rtz_xu_f_v_u64m2(vfloat32m1_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u64m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vfwcvt.xu.f.v.nxv4i64.nxv4f32.i64(<vscale x 4 x float> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vfwcvt.xu.f.v.nxv4i64.nxv4f32.i64(<vscale x 4 x i64> undef, <vscale x 4 x float> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
|
||||
//
|
||||
vuint64m4_t test_vfwcvt_xu_f_v_u64m4(vfloat32m2_t src, size_t vl) {
|
||||
|
@ -215,7 +215,7 @@ vuint64m4_t test_vfwcvt_xu_f_v_u64m4(vfloat32m2_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv4i64.nxv4f32.i64(<vscale x 4 x float> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv4i64.nxv4f32.i64(<vscale x 4 x i64> undef, <vscale x 4 x float> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
|
||||
//
|
||||
vuint64m4_t test_vfwcvt_rtz_xu_f_v_u64m4(vfloat32m2_t src, size_t vl) {
|
||||
|
@ -224,7 +224,7 @@ vuint64m4_t test_vfwcvt_rtz_xu_f_v_u64m4(vfloat32m2_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u64m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vfwcvt.xu.f.v.nxv8i64.nxv8f32.i64(<vscale x 8 x float> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vfwcvt.xu.f.v.nxv8i64.nxv8f32.i64(<vscale x 8 x i64> undef, <vscale x 8 x float> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
|
||||
//
|
||||
vuint64m8_t test_vfwcvt_xu_f_v_u64m8(vfloat32m4_t src, size_t vl) {
|
||||
|
@ -233,7 +233,7 @@ vuint64m8_t test_vfwcvt_xu_f_v_u64m8(vfloat32m4_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u64m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv8i64.nxv8f32.i64(<vscale x 8 x float> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv8i64.nxv8f32.i64(<vscale x 8 x i64> undef, <vscale x 8 x float> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
|
||||
//
|
||||
vuint64m8_t test_vfwcvt_rtz_xu_f_v_u64m8(vfloat32m4_t src, size_t vl) {
|
||||
|
@ -242,7 +242,7 @@ vuint64m8_t test_vfwcvt_rtz_xu_f_v_u64m8(vfloat32m4_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f64m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwcvt.f.x.v.nxv1f64.nxv1i32.i64(<vscale x 1 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwcvt.f.x.v.nxv1f64.nxv1i32.i64(<vscale x 1 x double> undef, <vscale x 1 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m1_t test_vfwcvt_f_x_v_f64m1(vint32mf2_t src, size_t vl) {
|
||||
|
@ -251,7 +251,7 @@ vfloat64m1_t test_vfwcvt_f_x_v_f64m1(vint32mf2_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f64m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwcvt.f.x.v.nxv2f64.nxv2i32.i64(<vscale x 2 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwcvt.f.x.v.nxv2f64.nxv2i32.i64(<vscale x 2 x double> undef, <vscale x 2 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m2_t test_vfwcvt_f_x_v_f64m2(vint32m1_t src, size_t vl) {
|
||||
|
@ -260,7 +260,7 @@ vfloat64m2_t test_vfwcvt_f_x_v_f64m2(vint32m1_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f64m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwcvt.f.x.v.nxv4f64.nxv4i32.i64(<vscale x 4 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwcvt.f.x.v.nxv4f64.nxv4i32.i64(<vscale x 4 x double> undef, <vscale x 4 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m4_t test_vfwcvt_f_x_v_f64m4(vint32m2_t src, size_t vl) {
|
||||
|
@ -269,7 +269,7 @@ vfloat64m4_t test_vfwcvt_f_x_v_f64m4(vint32m2_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f64m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwcvt.f.x.v.nxv8f64.nxv8i32.i64(<vscale x 8 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwcvt.f.x.v.nxv8f64.nxv8i32.i64(<vscale x 8 x double> undef, <vscale x 8 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m8_t test_vfwcvt_f_x_v_f64m8(vint32m4_t src, size_t vl) {
|
||||
|
@ -278,7 +278,7 @@ vfloat64m8_t test_vfwcvt_f_x_v_f64m8(vint32m4_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f64m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwcvt.f.xu.v.nxv1f64.nxv1i32.i64(<vscale x 1 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwcvt.f.xu.v.nxv1f64.nxv1i32.i64(<vscale x 1 x double> undef, <vscale x 1 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m1_t test_vfwcvt_f_xu_v_f64m1(vuint32mf2_t src, size_t vl) {
|
||||
|
@ -287,7 +287,7 @@ vfloat64m1_t test_vfwcvt_f_xu_v_f64m1(vuint32mf2_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f64m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwcvt.f.xu.v.nxv2f64.nxv2i32.i64(<vscale x 2 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwcvt.f.xu.v.nxv2f64.nxv2i32.i64(<vscale x 2 x double> undef, <vscale x 2 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m2_t test_vfwcvt_f_xu_v_f64m2(vuint32m1_t src, size_t vl) {
|
||||
|
@ -296,7 +296,7 @@ vfloat64m2_t test_vfwcvt_f_xu_v_f64m2(vuint32m1_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f64m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwcvt.f.xu.v.nxv4f64.nxv4i32.i64(<vscale x 4 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwcvt.f.xu.v.nxv4f64.nxv4i32.i64(<vscale x 4 x double> undef, <vscale x 4 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m4_t test_vfwcvt_f_xu_v_f64m4(vuint32m2_t src, size_t vl) {
|
||||
|
@ -305,7 +305,7 @@ vfloat64m4_t test_vfwcvt_f_xu_v_f64m4(vuint32m2_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f64m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwcvt.f.xu.v.nxv8f64.nxv8i32.i64(<vscale x 8 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwcvt.f.xu.v.nxv8f64.nxv8i32.i64(<vscale x 8 x double> undef, <vscale x 8 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m8_t test_vfwcvt_f_xu_v_f64m8(vuint32m4_t src, size_t vl) {
|
||||
|
@ -314,7 +314,7 @@ vfloat64m8_t test_vfwcvt_f_xu_v_f64m8(vuint32m4_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f64m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwcvt.f.f.v.nxv1f64.nxv1f32.i64(<vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwcvt.f.f.v.nxv1f64.nxv1f32.i64(<vscale x 1 x double> undef, <vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m1_t test_vfwcvt_f_f_v_f64m1(vfloat32mf2_t src, size_t vl) {
|
||||
|
@ -323,7 +323,7 @@ vfloat64m1_t test_vfwcvt_f_f_v_f64m1(vfloat32mf2_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f64m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwcvt.f.f.v.nxv2f64.nxv2f32.i64(<vscale x 2 x float> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwcvt.f.f.v.nxv2f64.nxv2f32.i64(<vscale x 2 x double> undef, <vscale x 2 x float> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m2_t test_vfwcvt_f_f_v_f64m2(vfloat32m1_t src, size_t vl) {
|
||||
|
@ -332,7 +332,7 @@ vfloat64m2_t test_vfwcvt_f_f_v_f64m2(vfloat32m1_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f64m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwcvt.f.f.v.nxv4f64.nxv4f32.i64(<vscale x 4 x float> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwcvt.f.f.v.nxv4f64.nxv4f32.i64(<vscale x 4 x double> undef, <vscale x 4 x float> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m4_t test_vfwcvt_f_f_v_f64m4(vfloat32m2_t src, size_t vl) {
|
||||
|
@ -341,7 +341,7 @@ vfloat64m4_t test_vfwcvt_f_f_v_f64m4(vfloat32m2_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f64m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwcvt.f.f.v.nxv8f64.nxv8f32.i64(<vscale x 8 x float> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwcvt.f.f.v.nxv8f64.nxv8f32.i64(<vscale x 8 x double> undef, <vscale x 8 x float> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
|
||||
//
|
||||
vfloat64m8_t test_vfwcvt_f_f_v_f64m8(vfloat32m4_t src, size_t vl) {
|
||||
|
@ -735,7 +735,7 @@ vfloat64m8_t test_vfwcvt_f_f_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff,
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f16mf4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfwcvt.f.x.v.nxv1f16.nxv1i8.i64(<vscale x 1 x i8> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfwcvt.f.x.v.nxv1f16.nxv1i8.i64(<vscale x 1 x half> undef, <vscale x 1 x i8> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16mf4_t test_vfwcvt_f_x_v_f16mf4 (vint8mf8_t src, size_t vl) {
|
||||
|
@ -744,7 +744,7 @@ vfloat16mf4_t test_vfwcvt_f_x_v_f16mf4 (vint8mf8_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f16mf2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfwcvt.f.x.v.nxv2f16.nxv2i8.i64(<vscale x 2 x i8> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfwcvt.f.x.v.nxv2f16.nxv2i8.i64(<vscale x 2 x half> undef, <vscale x 2 x i8> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16mf2_t test_vfwcvt_f_x_v_f16mf2 (vint8mf4_t src, size_t vl) {
|
||||
|
@ -753,7 +753,7 @@ vfloat16mf2_t test_vfwcvt_f_x_v_f16mf2 (vint8mf4_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f16m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfwcvt.f.x.v.nxv4f16.nxv4i8.i64(<vscale x 4 x i8> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfwcvt.f.x.v.nxv4f16.nxv4i8.i64(<vscale x 4 x half> undef, <vscale x 4 x i8> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16m1_t test_vfwcvt_f_x_v_f16m1 (vint8mf2_t src, size_t vl) {
|
||||
|
@ -762,7 +762,7 @@ vfloat16m1_t test_vfwcvt_f_x_v_f16m1 (vint8mf2_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f16m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfwcvt.f.x.v.nxv8f16.nxv8i8.i64(<vscale x 8 x i8> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfwcvt.f.x.v.nxv8f16.nxv8i8.i64(<vscale x 8 x half> undef, <vscale x 8 x i8> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16m2_t test_vfwcvt_f_x_v_f16m2 (vint8m1_t src, size_t vl) {
|
||||
|
@ -771,7 +771,7 @@ vfloat16m2_t test_vfwcvt_f_x_v_f16m2 (vint8m1_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f16m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfwcvt.f.x.v.nxv16f16.nxv16i8.i64(<vscale x 16 x i8> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfwcvt.f.x.v.nxv16f16.nxv16i8.i64(<vscale x 16 x half> undef, <vscale x 16 x i8> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16m4_t test_vfwcvt_f_x_v_f16m4 (vint8m2_t src, size_t vl) {
|
||||
|
@ -780,7 +780,7 @@ vfloat16m4_t test_vfwcvt_f_x_v_f16m4 (vint8m2_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfwcvt_f_x_v_f16m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfwcvt.f.x.v.nxv32f16.nxv32i8.i64(<vscale x 32 x i8> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfwcvt.f.x.v.nxv32f16.nxv32i8.i64(<vscale x 32 x half> undef, <vscale x 32 x i8> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16m8_t test_vfwcvt_f_x_v_f16m8 (vint8m4_t src, size_t vl) {
|
||||
|
@ -789,7 +789,7 @@ vfloat16m8_t test_vfwcvt_f_x_v_f16m8 (vint8m4_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f16mf4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfwcvt.f.xu.v.nxv1f16.nxv1i8.i64(<vscale x 1 x i8> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfwcvt.f.xu.v.nxv1f16.nxv1i8.i64(<vscale x 1 x half> undef, <vscale x 1 x i8> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16mf4_t test_vfwcvt_f_xu_v_f16mf4 (vuint8mf8_t src, size_t vl) {
|
||||
|
@ -798,7 +798,7 @@ vfloat16mf4_t test_vfwcvt_f_xu_v_f16mf4 (vuint8mf8_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f16mf2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfwcvt.f.xu.v.nxv2f16.nxv2i8.i64(<vscale x 2 x i8> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfwcvt.f.xu.v.nxv2f16.nxv2i8.i64(<vscale x 2 x half> undef, <vscale x 2 x i8> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16mf2_t test_vfwcvt_f_xu_v_f16mf2 (vuint8mf4_t src, size_t vl) {
|
||||
|
@ -807,7 +807,7 @@ vfloat16mf2_t test_vfwcvt_f_xu_v_f16mf2 (vuint8mf4_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f16m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfwcvt.f.xu.v.nxv4f16.nxv4i8.i64(<vscale x 4 x i8> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfwcvt.f.xu.v.nxv4f16.nxv4i8.i64(<vscale x 4 x half> undef, <vscale x 4 x i8> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16m1_t test_vfwcvt_f_xu_v_f16m1 (vuint8mf2_t src, size_t vl) {
|
||||
|
@ -816,7 +816,7 @@ vfloat16m1_t test_vfwcvt_f_xu_v_f16m1 (vuint8mf2_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f16m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfwcvt.f.xu.v.nxv8f16.nxv8i8.i64(<vscale x 8 x i8> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfwcvt.f.xu.v.nxv8f16.nxv8i8.i64(<vscale x 8 x half> undef, <vscale x 8 x i8> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16m2_t test_vfwcvt_f_xu_v_f16m2 (vuint8m1_t src, size_t vl) {
|
||||
|
@ -825,7 +825,7 @@ vfloat16m2_t test_vfwcvt_f_xu_v_f16m2 (vuint8m1_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f16m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfwcvt.f.xu.v.nxv16f16.nxv16i8.i64(<vscale x 16 x i8> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfwcvt.f.xu.v.nxv16f16.nxv16i8.i64(<vscale x 16 x half> undef, <vscale x 16 x i8> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16m4_t test_vfwcvt_f_xu_v_f16m4 (vuint8m2_t src, size_t vl) {
|
||||
|
@ -834,7 +834,7 @@ vfloat16m4_t test_vfwcvt_f_xu_v_f16m4 (vuint8m2_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfwcvt_f_xu_v_f16m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfwcvt.f.xu.v.nxv32f16.nxv32i8.i64(<vscale x 32 x i8> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfwcvt.f.xu.v.nxv32f16.nxv32i8.i64(<vscale x 32 x half> undef, <vscale x 32 x i8> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
|
||||
//
|
||||
vfloat16m8_t test_vfwcvt_f_xu_v_f16m8 (vuint8m4_t src, size_t vl) {
|
||||
|
@ -843,7 +843,7 @@ vfloat16m8_t test_vfwcvt_f_xu_v_f16m8 (vuint8m4_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i32mf2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfwcvt.x.f.v.nxv1i32.nxv1f16.i64(<vscale x 1 x half> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfwcvt.x.f.v.nxv1i32.nxv1f16.i64(<vscale x 1 x i32> undef, <vscale x 1 x half> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
|
||||
//
|
||||
vint32mf2_t test_vfwcvt_x_f_v_i32mf2 (vfloat16mf4_t src, size_t vl) {
|
||||
|
@ -852,7 +852,7 @@ vint32mf2_t test_vfwcvt_x_f_v_i32mf2 (vfloat16mf4_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i32mf2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfwcvt.rtz.x.f.v.nxv1i32.nxv1f16.i64(<vscale x 1 x half> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfwcvt.rtz.x.f.v.nxv1i32.nxv1f16.i64(<vscale x 1 x i32> undef, <vscale x 1 x half> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
|
||||
//
|
||||
vint32mf2_t test_vfwcvt_rtz_x_f_v_i32mf2 (vfloat16mf4_t src, size_t vl) {
|
||||
|
@ -861,7 +861,7 @@ vint32mf2_t test_vfwcvt_rtz_x_f_v_i32mf2 (vfloat16mf4_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i32m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vfwcvt.x.f.v.nxv2i32.nxv2f16.i64(<vscale x 2 x half> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vfwcvt.x.f.v.nxv2i32.nxv2f16.i64(<vscale x 2 x i32> undef, <vscale x 2 x half> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
|
||||
//
|
||||
vint32m1_t test_vfwcvt_x_f_v_i32m1 (vfloat16mf2_t src, size_t vl) {
|
||||
|
@ -870,7 +870,7 @@ vint32m1_t test_vfwcvt_x_f_v_i32m1 (vfloat16mf2_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i32m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vfwcvt.rtz.x.f.v.nxv2i32.nxv2f16.i64(<vscale x 2 x half> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vfwcvt.rtz.x.f.v.nxv2i32.nxv2f16.i64(<vscale x 2 x i32> undef, <vscale x 2 x half> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
|
||||
//
|
||||
vint32m1_t test_vfwcvt_rtz_x_f_v_i32m1 (vfloat16mf2_t src, size_t vl) {
|
||||
|
@ -879,7 +879,7 @@ vint32m1_t test_vfwcvt_rtz_x_f_v_i32m1 (vfloat16mf2_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i32m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vfwcvt.x.f.v.nxv4i32.nxv4f16.i64(<vscale x 4 x half> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vfwcvt.x.f.v.nxv4i32.nxv4f16.i64(<vscale x 4 x i32> undef, <vscale x 4 x half> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
|
||||
//
|
||||
vint32m2_t test_vfwcvt_x_f_v_i32m2 (vfloat16m1_t src, size_t vl) {
|
||||
|
@ -888,7 +888,7 @@ vint32m2_t test_vfwcvt_x_f_v_i32m2 (vfloat16m1_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i32m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vfwcvt.rtz.x.f.v.nxv4i32.nxv4f16.i64(<vscale x 4 x half> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vfwcvt.rtz.x.f.v.nxv4i32.nxv4f16.i64(<vscale x 4 x i32> undef, <vscale x 4 x half> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
|
||||
//
|
||||
vint32m2_t test_vfwcvt_rtz_x_f_v_i32m2 (vfloat16m1_t src, size_t vl) {
|
||||
|
@ -897,7 +897,7 @@ vint32m2_t test_vfwcvt_rtz_x_f_v_i32m2 (vfloat16m1_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i32m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vfwcvt.x.f.v.nxv8i32.nxv8f16.i64(<vscale x 8 x half> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vfwcvt.x.f.v.nxv8i32.nxv8f16.i64(<vscale x 8 x i32> undef, <vscale x 8 x half> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
|
||||
//
|
||||
vint32m4_t test_vfwcvt_x_f_v_i32m4 (vfloat16m2_t src, size_t vl) {
|
||||
|
@ -906,7 +906,7 @@ vint32m4_t test_vfwcvt_x_f_v_i32m4 (vfloat16m2_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i32m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vfwcvt.rtz.x.f.v.nxv8i32.nxv8f16.i64(<vscale x 8 x half> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vfwcvt.rtz.x.f.v.nxv8i32.nxv8f16.i64(<vscale x 8 x i32> undef, <vscale x 8 x half> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
|
||||
//
|
||||
vint32m4_t test_vfwcvt_rtz_x_f_v_i32m4 (vfloat16m2_t src, size_t vl) {
|
||||
|
@ -915,7 +915,7 @@ vint32m4_t test_vfwcvt_rtz_x_f_v_i32m4 (vfloat16m2_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfwcvt_x_f_v_i32m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vfwcvt.x.f.v.nxv16i32.nxv16f16.i64(<vscale x 16 x half> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vfwcvt.x.f.v.nxv16i32.nxv16f16.i64(<vscale x 16 x i32> undef, <vscale x 16 x half> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
|
||||
//
|
||||
vint32m8_t test_vfwcvt_x_f_v_i32m8 (vfloat16m4_t src, size_t vl) {
|
||||
|
@ -924,7 +924,7 @@ vint32m8_t test_vfwcvt_x_f_v_i32m8 (vfloat16m4_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfwcvt_rtz_x_f_v_i32m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vfwcvt.rtz.x.f.v.nxv16i32.nxv16f16.i64(<vscale x 16 x half> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vfwcvt.rtz.x.f.v.nxv16i32.nxv16f16.i64(<vscale x 16 x i32> undef, <vscale x 16 x half> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
|
||||
//
|
||||
vint32m8_t test_vfwcvt_rtz_x_f_v_i32m8 (vfloat16m4_t src, size_t vl) {
|
||||
|
@ -933,7 +933,7 @@ vint32m8_t test_vfwcvt_rtz_x_f_v_i32m8 (vfloat16m4_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u32mf2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfwcvt.xu.f.v.nxv1i32.nxv1f16.i64(<vscale x 1 x half> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfwcvt.xu.f.v.nxv1i32.nxv1f16.i64(<vscale x 1 x i32> undef, <vscale x 1 x half> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
|
||||
//
|
||||
vuint32mf2_t test_vfwcvt_xu_f_v_u32mf2 (vfloat16mf4_t src, size_t vl) {
|
||||
|
@ -942,7 +942,7 @@ vuint32mf2_t test_vfwcvt_xu_f_v_u32mf2 (vfloat16mf4_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u32mf2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv1i32.nxv1f16.i64(<vscale x 1 x half> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv1i32.nxv1f16.i64(<vscale x 1 x i32> undef, <vscale x 1 x half> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
|
||||
//
|
||||
vuint32mf2_t test_vfwcvt_rtz_xu_f_v_u32mf2 (vfloat16mf4_t src, size_t vl) {
|
||||
|
@ -951,7 +951,7 @@ vuint32mf2_t test_vfwcvt_rtz_xu_f_v_u32mf2 (vfloat16mf4_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u32m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vfwcvt.xu.f.v.nxv2i32.nxv2f16.i64(<vscale x 2 x half> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vfwcvt.xu.f.v.nxv2i32.nxv2f16.i64(<vscale x 2 x i32> undef, <vscale x 2 x half> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
|
||||
//
|
||||
vuint32m1_t test_vfwcvt_xu_f_v_u32m1 (vfloat16mf2_t src, size_t vl) {
|
||||
|
@ -960,7 +960,7 @@ vuint32m1_t test_vfwcvt_xu_f_v_u32m1 (vfloat16mf2_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u32m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv2i32.nxv2f16.i64(<vscale x 2 x half> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv2i32.nxv2f16.i64(<vscale x 2 x i32> undef, <vscale x 2 x half> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
|
||||
//
|
||||
vuint32m1_t test_vfwcvt_rtz_xu_f_v_u32m1 (vfloat16mf2_t src, size_t vl) {
|
||||
|
@ -969,7 +969,7 @@ vuint32m1_t test_vfwcvt_rtz_xu_f_v_u32m1 (vfloat16mf2_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u32m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vfwcvt.xu.f.v.nxv4i32.nxv4f16.i64(<vscale x 4 x half> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vfwcvt.xu.f.v.nxv4i32.nxv4f16.i64(<vscale x 4 x i32> undef, <vscale x 4 x half> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
|
||||
//
|
||||
vuint32m2_t test_vfwcvt_xu_f_v_u32m2 (vfloat16m1_t src, size_t vl) {
|
||||
|
@ -978,7 +978,7 @@ vuint32m2_t test_vfwcvt_xu_f_v_u32m2 (vfloat16m1_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u32m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv4i32.nxv4f16.i64(<vscale x 4 x half> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv4i32.nxv4f16.i64(<vscale x 4 x i32> undef, <vscale x 4 x half> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
|
||||
//
|
||||
vuint32m2_t test_vfwcvt_rtz_xu_f_v_u32m2 (vfloat16m1_t src, size_t vl) {
|
||||
|
@ -987,7 +987,7 @@ vuint32m2_t test_vfwcvt_rtz_xu_f_v_u32m2 (vfloat16m1_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u32m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vfwcvt.xu.f.v.nxv8i32.nxv8f16.i64(<vscale x 8 x half> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vfwcvt.xu.f.v.nxv8i32.nxv8f16.i64(<vscale x 8 x i32> undef, <vscale x 8 x half> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
|
||||
//
|
||||
vuint32m4_t test_vfwcvt_xu_f_v_u32m4 (vfloat16m2_t src, size_t vl) {
|
||||
|
@ -996,7 +996,7 @@ vuint32m4_t test_vfwcvt_xu_f_v_u32m4 (vfloat16m2_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u32m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv8i32.nxv8f16.i64(<vscale x 8 x half> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv8i32.nxv8f16.i64(<vscale x 8 x i32> undef, <vscale x 8 x half> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
|
||||
//
|
||||
vuint32m4_t test_vfwcvt_rtz_xu_f_v_u32m4 (vfloat16m2_t src, size_t vl) {
|
||||
|
@ -1005,7 +1005,7 @@ vuint32m4_t test_vfwcvt_rtz_xu_f_v_u32m4 (vfloat16m2_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfwcvt_xu_f_v_u32m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vfwcvt.xu.f.v.nxv16i32.nxv16f16.i64(<vscale x 16 x half> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vfwcvt.xu.f.v.nxv16i32.nxv16f16.i64(<vscale x 16 x i32> undef, <vscale x 16 x half> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
|
||||
//
|
||||
vuint32m8_t test_vfwcvt_xu_f_v_u32m8 (vfloat16m4_t src, size_t vl) {
|
||||
|
@ -1014,7 +1014,7 @@ vuint32m8_t test_vfwcvt_xu_f_v_u32m8 (vfloat16m4_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfwcvt_rtz_xu_f_v_u32m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv16i32.nxv16f16.i64(<vscale x 16 x half> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv16i32.nxv16f16.i64(<vscale x 16 x i32> undef, <vscale x 16 x half> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
|
||||
//
|
||||
vuint32m8_t test_vfwcvt_rtz_xu_f_v_u32m8 (vfloat16m4_t src, size_t vl) {
|
||||
|
@ -1023,7 +1023,7 @@ vuint32m8_t test_vfwcvt_rtz_xu_f_v_u32m8 (vfloat16m4_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f32mf2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwcvt.f.f.v.nxv1f32.nxv1f16.i64(<vscale x 1 x half> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwcvt.f.f.v.nxv1f32.nxv1f16.i64(<vscale x 1 x float> undef, <vscale x 1 x half> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32mf2_t test_vfwcvt_f_f_v_f32mf2 (vfloat16mf4_t src, size_t vl) {
|
||||
|
@ -1032,7 +1032,7 @@ vfloat32mf2_t test_vfwcvt_f_f_v_f32mf2 (vfloat16mf4_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f32m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwcvt.f.f.v.nxv2f32.nxv2f16.i64(<vscale x 2 x half> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwcvt.f.f.v.nxv2f32.nxv2f16.i64(<vscale x 2 x float> undef, <vscale x 2 x half> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m1_t test_vfwcvt_f_f_v_f32m1 (vfloat16mf2_t src, size_t vl) {
|
||||
|
@ -1041,7 +1041,7 @@ vfloat32m1_t test_vfwcvt_f_f_v_f32m1 (vfloat16mf2_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f32m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwcvt.f.f.v.nxv4f32.nxv4f16.i64(<vscale x 4 x half> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwcvt.f.f.v.nxv4f32.nxv4f16.i64(<vscale x 4 x float> undef, <vscale x 4 x half> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m2_t test_vfwcvt_f_f_v_f32m2 (vfloat16m1_t src, size_t vl) {
|
||||
|
@ -1050,7 +1050,7 @@ vfloat32m2_t test_vfwcvt_f_f_v_f32m2 (vfloat16m1_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f32m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwcvt.f.f.v.nxv8f32.nxv8f16.i64(<vscale x 8 x half> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwcvt.f.f.v.nxv8f32.nxv8f16.i64(<vscale x 8 x float> undef, <vscale x 8 x half> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m4_t test_vfwcvt_f_f_v_f32m4 (vfloat16m2_t src, size_t vl) {
|
||||
|
@ -1059,7 +1059,7 @@ vfloat32m4_t test_vfwcvt_f_f_v_f32m4 (vfloat16m2_t src, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vfwcvt_f_f_v_f32m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwcvt.f.f.v.nxv16f32.nxv16f16.i64(<vscale x 16 x half> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwcvt.f.f.v.nxv16f32.nxv16f16.i64(<vscale x 16 x float> undef, <vscale x 16 x half> [[SRC:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
|
||||
//
|
||||
vfloat32m8_t test_vfwcvt_f_f_v_f32m8 (vfloat16m4_t src, size_t vl) {
|
||||
|
|
|
@ -6,154 +6,154 @@
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vid_v_u8mf8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vid.nxv1i8.i64(i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vid.nxv1i8.i64(<vscale x 1 x i8> undef, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
|
||||
//
|
||||
vuint8mf8_t test_vid_v_u8mf8(size_t vl) { return vid_v_u8mf8(vl); }
|
||||
|
||||
// CHECK-RV64-LABEL: @test_vid_v_u8mf4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vid.nxv2i8.i64(i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vid.nxv2i8.i64(<vscale x 2 x i8> undef, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
|
||||
//
|
||||
vuint8mf4_t test_vid_v_u8mf4(size_t vl) { return vid_v_u8mf4(vl); }
|
||||
|
||||
// CHECK-RV64-LABEL: @test_vid_v_u8mf2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vid.nxv4i8.i64(i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vid.nxv4i8.i64(<vscale x 4 x i8> undef, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
|
||||
//
|
||||
vuint8mf2_t test_vid_v_u8mf2(size_t vl) { return vid_v_u8mf2(vl); }
|
||||
|
||||
// CHECK-RV64-LABEL: @test_vid_v_u8m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vid.nxv8i8.i64(i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vid.nxv8i8.i64(<vscale x 8 x i8> undef, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
|
||||
//
|
||||
vuint8m1_t test_vid_v_u8m1(size_t vl) { return vid_v_u8m1(vl); }
|
||||
|
||||
// CHECK-RV64-LABEL: @test_vid_v_u8m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vid.nxv16i8.i64(i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vid.nxv16i8.i64(<vscale x 16 x i8> undef, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
|
||||
//
|
||||
vuint8m2_t test_vid_v_u8m2(size_t vl) { return vid_v_u8m2(vl); }
|
||||
|
||||
// CHECK-RV64-LABEL: @test_vid_v_u8m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vid.nxv32i8.i64(i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vid.nxv32i8.i64(<vscale x 32 x i8> undef, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
|
||||
//
|
||||
vuint8m4_t test_vid_v_u8m4(size_t vl) { return vid_v_u8m4(vl); }
|
||||
|
||||
// CHECK-RV64-LABEL: @test_vid_v_u8m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vid.nxv64i8.i64(i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vid.nxv64i8.i64(<vscale x 64 x i8> undef, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
|
||||
//
|
||||
vuint8m8_t test_vid_v_u8m8(size_t vl) { return vid_v_u8m8(vl); }
|
||||
|
||||
// CHECK-RV64-LABEL: @test_vid_v_u16mf4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vid.nxv1i16.i64(i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vid.nxv1i16.i64(<vscale x 1 x i16> undef, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
|
||||
//
|
||||
vuint16mf4_t test_vid_v_u16mf4(size_t vl) { return vid_v_u16mf4(vl); }
|
||||
|
||||
// CHECK-RV64-LABEL: @test_vid_v_u16mf2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vid.nxv2i16.i64(i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vid.nxv2i16.i64(<vscale x 2 x i16> undef, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
|
||||
//
|
||||
vuint16mf2_t test_vid_v_u16mf2(size_t vl) { return vid_v_u16mf2(vl); }
|
||||
|
||||
// CHECK-RV64-LABEL: @test_vid_v_u16m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vid.nxv4i16.i64(i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vid.nxv4i16.i64(<vscale x 4 x i16> undef, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
|
||||
//
|
||||
vuint16m1_t test_vid_v_u16m1(size_t vl) { return vid_v_u16m1(vl); }
|
||||
|
||||
// CHECK-RV64-LABEL: @test_vid_v_u16m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vid.nxv8i16.i64(i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vid.nxv8i16.i64(<vscale x 8 x i16> undef, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
|
||||
//
|
||||
vuint16m2_t test_vid_v_u16m2(size_t vl) { return vid_v_u16m2(vl); }
|
||||
|
||||
// CHECK-RV64-LABEL: @test_vid_v_u16m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vid.nxv16i16.i64(i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vid.nxv16i16.i64(<vscale x 16 x i16> undef, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
|
||||
//
|
||||
vuint16m4_t test_vid_v_u16m4(size_t vl) { return vid_v_u16m4(vl); }
|
||||
|
||||
// CHECK-RV64-LABEL: @test_vid_v_u16m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vid.nxv32i16.i64(i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vid.nxv32i16.i64(<vscale x 32 x i16> undef, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
|
||||
//
|
||||
vuint16m8_t test_vid_v_u16m8(size_t vl) { return vid_v_u16m8(vl); }
|
||||
|
||||
// CHECK-RV64-LABEL: @test_vid_v_u32mf2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vid.nxv1i32.i64(i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vid.nxv1i32.i64(<vscale x 1 x i32> undef, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
|
||||
//
|
||||
vuint32mf2_t test_vid_v_u32mf2(size_t vl) { return vid_v_u32mf2(vl); }
|
||||
|
||||
// CHECK-RV64-LABEL: @test_vid_v_u32m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vid.nxv2i32.i64(i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vid.nxv2i32.i64(<vscale x 2 x i32> undef, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
|
||||
//
|
||||
vuint32m1_t test_vid_v_u32m1(size_t vl) { return vid_v_u32m1(vl); }
|
||||
|
||||
// CHECK-RV64-LABEL: @test_vid_v_u32m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vid.nxv4i32.i64(i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vid.nxv4i32.i64(<vscale x 4 x i32> undef, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
|
||||
//
|
||||
vuint32m2_t test_vid_v_u32m2(size_t vl) { return vid_v_u32m2(vl); }
|
||||
|
||||
// CHECK-RV64-LABEL: @test_vid_v_u32m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vid.nxv8i32.i64(i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vid.nxv8i32.i64(<vscale x 8 x i32> undef, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
|
||||
//
|
||||
vuint32m4_t test_vid_v_u32m4(size_t vl) { return vid_v_u32m4(vl); }
|
||||
|
||||
// CHECK-RV64-LABEL: @test_vid_v_u32m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vid.nxv16i32.i64(i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vid.nxv16i32.i64(<vscale x 16 x i32> undef, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
|
||||
//
|
||||
vuint32m8_t test_vid_v_u32m8(size_t vl) { return vid_v_u32m8(vl); }
|
||||
|
||||
// CHECK-RV64-LABEL: @test_vid_v_u64m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vid.nxv1i64.i64(i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vid.nxv1i64.i64(<vscale x 1 x i64> undef, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
|
||||
//
|
||||
vuint64m1_t test_vid_v_u64m1(size_t vl) { return vid_v_u64m1(vl); }
|
||||
|
||||
// CHECK-RV64-LABEL: @test_vid_v_u64m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vid.nxv2i64.i64(i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vid.nxv2i64.i64(<vscale x 2 x i64> undef, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
|
||||
//
|
||||
vuint64m2_t test_vid_v_u64m2(size_t vl) { return vid_v_u64m2(vl); }
|
||||
|
||||
// CHECK-RV64-LABEL: @test_vid_v_u64m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vid.nxv4i64.i64(i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vid.nxv4i64.i64(<vscale x 4 x i64> undef, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
|
||||
//
|
||||
vuint64m4_t test_vid_v_u64m4(size_t vl) { return vid_v_u64m4(vl); }
|
||||
|
||||
// CHECK-RV64-LABEL: @test_vid_v_u64m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vid.nxv8i64.i64(i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vid.nxv8i64.i64(<vscale x 8 x i64> undef, i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
|
||||
//
|
||||
vuint64m8_t test_vid_v_u64m8(size_t vl) { return vid_v_u64m8(vl); }
|
||||
|
|
|
@ -6,7 +6,7 @@
|
|||
|
||||
// CHECK-RV64-LABEL: @test_viota_m_u8mf8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.viota.nxv1i8.i64(<vscale x 1 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.viota.nxv1i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
|
||||
//
|
||||
vuint8mf8_t test_viota_m_u8mf8(vbool64_t op1, size_t vl) {
|
||||
|
@ -15,7 +15,7 @@ vuint8mf8_t test_viota_m_u8mf8(vbool64_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_viota_m_u8mf4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.viota.nxv2i8.i64(<vscale x 2 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.viota.nxv2i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
|
||||
//
|
||||
vuint8mf4_t test_viota_m_u8mf4(vbool32_t op1, size_t vl) {
|
||||
|
@ -24,7 +24,7 @@ vuint8mf4_t test_viota_m_u8mf4(vbool32_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_viota_m_u8mf2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.viota.nxv4i8.i64(<vscale x 4 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.viota.nxv4i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
|
||||
//
|
||||
vuint8mf2_t test_viota_m_u8mf2(vbool16_t op1, size_t vl) {
|
||||
|
@ -33,7 +33,7 @@ vuint8mf2_t test_viota_m_u8mf2(vbool16_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_viota_m_u8m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.viota.nxv8i8.i64(<vscale x 8 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.viota.nxv8i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
|
||||
//
|
||||
vuint8m1_t test_viota_m_u8m1(vbool8_t op1, size_t vl) {
|
||||
|
@ -42,7 +42,7 @@ vuint8m1_t test_viota_m_u8m1(vbool8_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_viota_m_u8m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.viota.nxv16i8.i64(<vscale x 16 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.viota.nxv16i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
|
||||
//
|
||||
vuint8m2_t test_viota_m_u8m2(vbool4_t op1, size_t vl) {
|
||||
|
@ -51,7 +51,7 @@ vuint8m2_t test_viota_m_u8m2(vbool4_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_viota_m_u8m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.viota.nxv32i8.i64(<vscale x 32 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.viota.nxv32i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
|
||||
//
|
||||
vuint8m4_t test_viota_m_u8m4(vbool2_t op1, size_t vl) {
|
||||
|
@ -60,7 +60,7 @@ vuint8m4_t test_viota_m_u8m4(vbool2_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_viota_m_u8m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.viota.nxv64i8.i64(<vscale x 64 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.viota.nxv64i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
|
||||
//
|
||||
vuint8m8_t test_viota_m_u8m8(vbool1_t op1, size_t vl) {
|
||||
|
@ -69,7 +69,7 @@ vuint8m8_t test_viota_m_u8m8(vbool1_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_viota_m_u16mf4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.viota.nxv1i16.i64(<vscale x 1 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.viota.nxv1i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
|
||||
//
|
||||
vuint16mf4_t test_viota_m_u16mf4(vbool64_t op1, size_t vl) {
|
||||
|
@ -78,7 +78,7 @@ vuint16mf4_t test_viota_m_u16mf4(vbool64_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_viota_m_u16mf2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.viota.nxv2i16.i64(<vscale x 2 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.viota.nxv2i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
|
||||
//
|
||||
vuint16mf2_t test_viota_m_u16mf2(vbool32_t op1, size_t vl) {
|
||||
|
@ -87,7 +87,7 @@ vuint16mf2_t test_viota_m_u16mf2(vbool32_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_viota_m_u16m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.viota.nxv4i16.i64(<vscale x 4 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.viota.nxv4i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
|
||||
//
|
||||
vuint16m1_t test_viota_m_u16m1(vbool16_t op1, size_t vl) {
|
||||
|
@ -96,7 +96,7 @@ vuint16m1_t test_viota_m_u16m1(vbool16_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_viota_m_u16m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.viota.nxv8i16.i64(<vscale x 8 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.viota.nxv8i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
|
||||
//
|
||||
vuint16m2_t test_viota_m_u16m2(vbool8_t op1, size_t vl) {
|
||||
|
@ -105,7 +105,7 @@ vuint16m2_t test_viota_m_u16m2(vbool8_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_viota_m_u16m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.viota.nxv16i16.i64(<vscale x 16 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.viota.nxv16i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
|
||||
//
|
||||
vuint16m4_t test_viota_m_u16m4(vbool4_t op1, size_t vl) {
|
||||
|
@ -114,7 +114,7 @@ vuint16m4_t test_viota_m_u16m4(vbool4_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_viota_m_u16m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.viota.nxv32i16.i64(<vscale x 32 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.viota.nxv32i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
|
||||
//
|
||||
vuint16m8_t test_viota_m_u16m8(vbool2_t op1, size_t vl) {
|
||||
|
@ -123,7 +123,7 @@ vuint16m8_t test_viota_m_u16m8(vbool2_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_viota_m_u32mf2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.viota.nxv1i32.i64(<vscale x 1 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.viota.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
|
||||
//
|
||||
vuint32mf2_t test_viota_m_u32mf2(vbool64_t op1, size_t vl) {
|
||||
|
@ -132,7 +132,7 @@ vuint32mf2_t test_viota_m_u32mf2(vbool64_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_viota_m_u32m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.viota.nxv2i32.i64(<vscale x 2 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.viota.nxv2i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
|
||||
//
|
||||
vuint32m1_t test_viota_m_u32m1(vbool32_t op1, size_t vl) {
|
||||
|
@ -141,7 +141,7 @@ vuint32m1_t test_viota_m_u32m1(vbool32_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_viota_m_u32m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.viota.nxv4i32.i64(<vscale x 4 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.viota.nxv4i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
|
||||
//
|
||||
vuint32m2_t test_viota_m_u32m2(vbool16_t op1, size_t vl) {
|
||||
|
@ -150,7 +150,7 @@ vuint32m2_t test_viota_m_u32m2(vbool16_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_viota_m_u32m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.viota.nxv8i32.i64(<vscale x 8 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.viota.nxv8i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
|
||||
//
|
||||
vuint32m4_t test_viota_m_u32m4(vbool8_t op1, size_t vl) {
|
||||
|
@ -159,7 +159,7 @@ vuint32m4_t test_viota_m_u32m4(vbool8_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_viota_m_u32m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.viota.nxv16i32.i64(<vscale x 16 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.viota.nxv16i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
|
||||
//
|
||||
vuint32m8_t test_viota_m_u32m8(vbool4_t op1, size_t vl) {
|
||||
|
@ -168,7 +168,7 @@ vuint32m8_t test_viota_m_u32m8(vbool4_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_viota_m_u64m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.viota.nxv1i64.i64(<vscale x 1 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.viota.nxv1i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
|
||||
//
|
||||
vuint64m1_t test_viota_m_u64m1(vbool64_t op1, size_t vl) {
|
||||
|
@ -177,7 +177,7 @@ vuint64m1_t test_viota_m_u64m1(vbool64_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_viota_m_u64m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.viota.nxv2i64.i64(<vscale x 2 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.viota.nxv2i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
|
||||
//
|
||||
vuint64m2_t test_viota_m_u64m2(vbool32_t op1, size_t vl) {
|
||||
|
@ -186,7 +186,7 @@ vuint64m2_t test_viota_m_u64m2(vbool32_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_viota_m_u64m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.viota.nxv4i64.i64(<vscale x 4 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.viota.nxv4i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
|
||||
//
|
||||
vuint64m4_t test_viota_m_u64m4(vbool16_t op1, size_t vl) {
|
||||
|
@ -195,7 +195,7 @@ vuint64m4_t test_viota_m_u64m4(vbool16_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_viota_m_u64m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.viota.nxv8i64.i64(<vscale x 8 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.viota.nxv8i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i1> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
|
||||
//
|
||||
vuint64m8_t test_viota_m_u64m8(vbool8_t op1, size_t vl) {
|
||||
|
|
|
@ -6,7 +6,7 @@
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vsext_vf2_i16mf4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsext.nxv1i16.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vsext.nxv1i16.nxv1i8.i64(<vscale x 1 x i16> undef, <vscale x 1 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
|
||||
//
|
||||
vint16mf4_t test_vsext_vf2_i16mf4(vint8mf8_t op1, size_t vl) {
|
||||
|
@ -15,7 +15,7 @@ vint16mf4_t test_vsext_vf2_i16mf4(vint8mf8_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vsext_vf2_i16mf2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsext.nxv2i16.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vsext.nxv2i16.nxv2i8.i64(<vscale x 2 x i16> undef, <vscale x 2 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
|
||||
//
|
||||
vint16mf2_t test_vsext_vf2_i16mf2(vint8mf4_t op1, size_t vl) {
|
||||
|
@ -24,7 +24,7 @@ vint16mf2_t test_vsext_vf2_i16mf2(vint8mf4_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vsext_vf2_i16m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsext.nxv4i16.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vsext.nxv4i16.nxv4i8.i64(<vscale x 4 x i16> undef, <vscale x 4 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
|
||||
//
|
||||
vint16m1_t test_vsext_vf2_i16m1(vint8mf2_t op1, size_t vl) {
|
||||
|
@ -33,7 +33,7 @@ vint16m1_t test_vsext_vf2_i16m1(vint8mf2_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vsext_vf2_i16m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsext.nxv8i16.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vsext.nxv8i16.nxv8i8.i64(<vscale x 8 x i16> undef, <vscale x 8 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
|
||||
//
|
||||
vint16m2_t test_vsext_vf2_i16m2(vint8m1_t op1, size_t vl) {
|
||||
|
@ -42,7 +42,7 @@ vint16m2_t test_vsext_vf2_i16m2(vint8m1_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vsext_vf2_i16m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsext.nxv16i16.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vsext.nxv16i16.nxv16i8.i64(<vscale x 16 x i16> undef, <vscale x 16 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
|
||||
//
|
||||
vint16m4_t test_vsext_vf2_i16m4(vint8m2_t op1, size_t vl) {
|
||||
|
@ -51,7 +51,7 @@ vint16m4_t test_vsext_vf2_i16m4(vint8m2_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vsext_vf2_i16m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsext.nxv32i16.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vsext.nxv32i16.nxv32i8.i64(<vscale x 32 x i16> undef, <vscale x 32 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
|
||||
//
|
||||
vint16m8_t test_vsext_vf2_i16m8(vint8m4_t op1, size_t vl) {
|
||||
|
@ -60,7 +60,7 @@ vint16m8_t test_vsext_vf2_i16m8(vint8m4_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vsext_vf4_i32mf2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsext.nxv1i32.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsext.nxv1i32.nxv1i8.i64(<vscale x 1 x i32> undef, <vscale x 1 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
|
||||
//
|
||||
vint32mf2_t test_vsext_vf4_i32mf2(vint8mf8_t op1, size_t vl) {
|
||||
|
@ -69,7 +69,7 @@ vint32mf2_t test_vsext_vf4_i32mf2(vint8mf8_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vsext_vf4_i32m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsext.nxv2i32.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsext.nxv2i32.nxv2i8.i64(<vscale x 2 x i32> undef, <vscale x 2 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
|
||||
//
|
||||
vint32m1_t test_vsext_vf4_i32m1(vint8mf4_t op1, size_t vl) {
|
||||
|
@ -78,7 +78,7 @@ vint32m1_t test_vsext_vf4_i32m1(vint8mf4_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vsext_vf4_i32m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsext.nxv4i32.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsext.nxv4i32.nxv4i8.i64(<vscale x 4 x i32> undef, <vscale x 4 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
|
||||
//
|
||||
vint32m2_t test_vsext_vf4_i32m2(vint8mf2_t op1, size_t vl) {
|
||||
|
@ -87,7 +87,7 @@ vint32m2_t test_vsext_vf4_i32m2(vint8mf2_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vsext_vf4_i32m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsext.nxv8i32.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsext.nxv8i32.nxv8i8.i64(<vscale x 8 x i32> undef, <vscale x 8 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
|
||||
//
|
||||
vint32m4_t test_vsext_vf4_i32m4(vint8m1_t op1, size_t vl) {
|
||||
|
@ -96,7 +96,7 @@ vint32m4_t test_vsext_vf4_i32m4(vint8m1_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vsext_vf4_i32m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsext.nxv16i32.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsext.nxv16i32.nxv16i8.i64(<vscale x 16 x i32> undef, <vscale x 16 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
|
||||
//
|
||||
vint32m8_t test_vsext_vf4_i32m8(vint8m2_t op1, size_t vl) {
|
||||
|
@ -105,7 +105,7 @@ vint32m8_t test_vsext_vf4_i32m8(vint8m2_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vsext_vf8_i64m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsext.nxv1i64.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsext.nxv1i64.nxv1i8.i64(<vscale x 1 x i64> undef, <vscale x 1 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
|
||||
//
|
||||
vint64m1_t test_vsext_vf8_i64m1(vint8mf8_t op1, size_t vl) {
|
||||
|
@ -114,7 +114,7 @@ vint64m1_t test_vsext_vf8_i64m1(vint8mf8_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vsext_vf8_i64m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsext.nxv2i64.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsext.nxv2i64.nxv2i8.i64(<vscale x 2 x i64> undef, <vscale x 2 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
|
||||
//
|
||||
vint64m2_t test_vsext_vf8_i64m2(vint8mf4_t op1, size_t vl) {
|
||||
|
@ -123,7 +123,7 @@ vint64m2_t test_vsext_vf8_i64m2(vint8mf4_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vsext_vf8_i64m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsext.nxv4i64.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsext.nxv4i64.nxv4i8.i64(<vscale x 4 x i64> undef, <vscale x 4 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
|
||||
//
|
||||
vint64m4_t test_vsext_vf8_i64m4(vint8mf2_t op1, size_t vl) {
|
||||
|
@ -132,7 +132,7 @@ vint64m4_t test_vsext_vf8_i64m4(vint8mf2_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vsext_vf8_i64m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsext.nxv8i64.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsext.nxv8i64.nxv8i8.i64(<vscale x 8 x i64> undef, <vscale x 8 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
|
||||
//
|
||||
vint64m8_t test_vsext_vf8_i64m8(vint8m1_t op1, size_t vl) {
|
||||
|
@ -141,7 +141,7 @@ vint64m8_t test_vsext_vf8_i64m8(vint8m1_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vsext_vf2_i32mf2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsext.nxv1i32.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsext.nxv1i32.nxv1i16.i64(<vscale x 1 x i32> undef, <vscale x 1 x i16> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
|
||||
//
|
||||
vint32mf2_t test_vsext_vf2_i32mf2(vint16mf4_t op1, size_t vl) {
|
||||
|
@ -150,7 +150,7 @@ vint32mf2_t test_vsext_vf2_i32mf2(vint16mf4_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vsext_vf2_i32m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsext.nxv2i32.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsext.nxv2i32.nxv2i16.i64(<vscale x 2 x i32> undef, <vscale x 2 x i16> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
|
||||
//
|
||||
vint32m1_t test_vsext_vf2_i32m1(vint16mf2_t op1, size_t vl) {
|
||||
|
@ -159,7 +159,7 @@ vint32m1_t test_vsext_vf2_i32m1(vint16mf2_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vsext_vf2_i32m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsext.nxv4i32.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsext.nxv4i32.nxv4i16.i64(<vscale x 4 x i32> undef, <vscale x 4 x i16> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
|
||||
//
|
||||
vint32m2_t test_vsext_vf2_i32m2(vint16m1_t op1, size_t vl) {
|
||||
|
@ -168,7 +168,7 @@ vint32m2_t test_vsext_vf2_i32m2(vint16m1_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vsext_vf2_i32m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsext.nxv8i32.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsext.nxv8i32.nxv8i16.i64(<vscale x 8 x i32> undef, <vscale x 8 x i16> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
|
||||
//
|
||||
vint32m4_t test_vsext_vf2_i32m4(vint16m2_t op1, size_t vl) {
|
||||
|
@ -177,7 +177,7 @@ vint32m4_t test_vsext_vf2_i32m4(vint16m2_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vsext_vf2_i32m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsext.nxv16i32.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsext.nxv16i32.nxv16i16.i64(<vscale x 16 x i32> undef, <vscale x 16 x i16> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
|
||||
//
|
||||
vint32m8_t test_vsext_vf2_i32m8(vint16m4_t op1, size_t vl) {
|
||||
|
@ -186,7 +186,7 @@ vint32m8_t test_vsext_vf2_i32m8(vint16m4_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vsext_vf4_i64m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsext.nxv1i64.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsext.nxv1i64.nxv1i16.i64(<vscale x 1 x i64> undef, <vscale x 1 x i16> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
|
||||
//
|
||||
vint64m1_t test_vsext_vf4_i64m1(vint16mf4_t op1, size_t vl) {
|
||||
|
@ -195,7 +195,7 @@ vint64m1_t test_vsext_vf4_i64m1(vint16mf4_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vsext_vf4_i64m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsext.nxv2i64.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsext.nxv2i64.nxv2i16.i64(<vscale x 2 x i64> undef, <vscale x 2 x i16> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
|
||||
//
|
||||
vint64m2_t test_vsext_vf4_i64m2(vint16mf2_t op1, size_t vl) {
|
||||
|
@ -204,7 +204,7 @@ vint64m2_t test_vsext_vf4_i64m2(vint16mf2_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vsext_vf4_i64m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsext.nxv4i64.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsext.nxv4i64.nxv4i16.i64(<vscale x 4 x i64> undef, <vscale x 4 x i16> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
|
||||
//
|
||||
vint64m4_t test_vsext_vf4_i64m4(vint16m1_t op1, size_t vl) {
|
||||
|
@ -213,7 +213,7 @@ vint64m4_t test_vsext_vf4_i64m4(vint16m1_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vsext_vf4_i64m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsext.nxv8i64.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsext.nxv8i64.nxv8i16.i64(<vscale x 8 x i64> undef, <vscale x 8 x i16> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
|
||||
//
|
||||
vint64m8_t test_vsext_vf4_i64m8(vint16m2_t op1, size_t vl) {
|
||||
|
@ -222,7 +222,7 @@ vint64m8_t test_vsext_vf4_i64m8(vint16m2_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vsext_vf2_i64m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsext.nxv1i64.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsext.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> undef, <vscale x 1 x i32> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
|
||||
//
|
||||
vint64m1_t test_vsext_vf2_i64m1(vint32mf2_t op1, size_t vl) {
|
||||
|
@ -231,7 +231,7 @@ vint64m1_t test_vsext_vf2_i64m1(vint32mf2_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vsext_vf2_i64m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsext.nxv2i64.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsext.nxv2i64.nxv2i32.i64(<vscale x 2 x i64> undef, <vscale x 2 x i32> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
|
||||
//
|
||||
vint64m2_t test_vsext_vf2_i64m2(vint32m1_t op1, size_t vl) {
|
||||
|
@ -240,7 +240,7 @@ vint64m2_t test_vsext_vf2_i64m2(vint32m1_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vsext_vf2_i64m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsext.nxv4i64.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsext.nxv4i64.nxv4i32.i64(<vscale x 4 x i64> undef, <vscale x 4 x i32> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
|
||||
//
|
||||
vint64m4_t test_vsext_vf2_i64m4(vint32m2_t op1, size_t vl) {
|
||||
|
@ -249,7 +249,7 @@ vint64m4_t test_vsext_vf2_i64m4(vint32m2_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vsext_vf2_i64m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsext.nxv8i64.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsext.nxv8i64.nxv8i32.i64(<vscale x 8 x i64> undef, <vscale x 8 x i32> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
|
||||
//
|
||||
vint64m8_t test_vsext_vf2_i64m8(vint32m4_t op1, size_t vl) {
|
||||
|
|
|
@ -6,7 +6,7 @@
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vzext_vf2_u16mf4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vzext.nxv1i16.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vzext.nxv1i16.nxv1i8.i64(<vscale x 1 x i16> undef, <vscale x 1 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
|
||||
//
|
||||
vuint16mf4_t test_vzext_vf2_u16mf4(vuint8mf8_t op1, size_t vl) {
|
||||
|
@ -15,7 +15,7 @@ vuint16mf4_t test_vzext_vf2_u16mf4(vuint8mf8_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vzext_vf2_u16mf2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vzext.nxv2i16.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vzext.nxv2i16.nxv2i8.i64(<vscale x 2 x i16> undef, <vscale x 2 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
|
||||
//
|
||||
vuint16mf2_t test_vzext_vf2_u16mf2(vuint8mf4_t op1, size_t vl) {
|
||||
|
@ -24,7 +24,7 @@ vuint16mf2_t test_vzext_vf2_u16mf2(vuint8mf4_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vzext_vf2_u16m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vzext.nxv4i16.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vzext.nxv4i16.nxv4i8.i64(<vscale x 4 x i16> undef, <vscale x 4 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
|
||||
//
|
||||
vuint16m1_t test_vzext_vf2_u16m1(vuint8mf2_t op1, size_t vl) {
|
||||
|
@ -33,7 +33,7 @@ vuint16m1_t test_vzext_vf2_u16m1(vuint8mf2_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vzext_vf2_u16m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vzext.nxv8i16.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vzext.nxv8i16.nxv8i8.i64(<vscale x 8 x i16> undef, <vscale x 8 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
|
||||
//
|
||||
vuint16m2_t test_vzext_vf2_u16m2(vuint8m1_t op1, size_t vl) {
|
||||
|
@ -42,7 +42,7 @@ vuint16m2_t test_vzext_vf2_u16m2(vuint8m1_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vzext_vf2_u16m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vzext.nxv16i16.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vzext.nxv16i16.nxv16i8.i64(<vscale x 16 x i16> undef, <vscale x 16 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
|
||||
//
|
||||
vuint16m4_t test_vzext_vf2_u16m4(vuint8m2_t op1, size_t vl) {
|
||||
|
@ -51,7 +51,7 @@ vuint16m4_t test_vzext_vf2_u16m4(vuint8m2_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vzext_vf2_u16m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vzext.nxv32i16.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vzext.nxv32i16.nxv32i8.i64(<vscale x 32 x i16> undef, <vscale x 32 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
|
||||
//
|
||||
vuint16m8_t test_vzext_vf2_u16m8(vuint8m4_t op1, size_t vl) {
|
||||
|
@ -60,7 +60,7 @@ vuint16m8_t test_vzext_vf2_u16m8(vuint8m4_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vzext_vf4_u32mf2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vzext.nxv1i32.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vzext.nxv1i32.nxv1i8.i64(<vscale x 1 x i32> undef, <vscale x 1 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
|
||||
//
|
||||
vuint32mf2_t test_vzext_vf4_u32mf2(vuint8mf8_t op1, size_t vl) {
|
||||
|
@ -69,7 +69,7 @@ vuint32mf2_t test_vzext_vf4_u32mf2(vuint8mf8_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vzext_vf4_u32m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vzext.nxv2i32.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vzext.nxv2i32.nxv2i8.i64(<vscale x 2 x i32> undef, <vscale x 2 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
|
||||
//
|
||||
vuint32m1_t test_vzext_vf4_u32m1(vuint8mf4_t op1, size_t vl) {
|
||||
|
@ -78,7 +78,7 @@ vuint32m1_t test_vzext_vf4_u32m1(vuint8mf4_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vzext_vf4_u32m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vzext.nxv4i32.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vzext.nxv4i32.nxv4i8.i64(<vscale x 4 x i32> undef, <vscale x 4 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
|
||||
//
|
||||
vuint32m2_t test_vzext_vf4_u32m2(vuint8mf2_t op1, size_t vl) {
|
||||
|
@ -87,7 +87,7 @@ vuint32m2_t test_vzext_vf4_u32m2(vuint8mf2_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vzext_vf4_u32m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vzext.nxv8i32.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vzext.nxv8i32.nxv8i8.i64(<vscale x 8 x i32> undef, <vscale x 8 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
|
||||
//
|
||||
vuint32m4_t test_vzext_vf4_u32m4(vuint8m1_t op1, size_t vl) {
|
||||
|
@ -96,7 +96,7 @@ vuint32m4_t test_vzext_vf4_u32m4(vuint8m1_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vzext_vf4_u32m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vzext.nxv16i32.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vzext.nxv16i32.nxv16i8.i64(<vscale x 16 x i32> undef, <vscale x 16 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
|
||||
//
|
||||
vuint32m8_t test_vzext_vf4_u32m8(vuint8m2_t op1, size_t vl) {
|
||||
|
@ -105,7 +105,7 @@ vuint32m8_t test_vzext_vf4_u32m8(vuint8m2_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vzext_vf8_u64m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vzext.nxv1i64.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vzext.nxv1i64.nxv1i8.i64(<vscale x 1 x i64> undef, <vscale x 1 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
|
||||
//
|
||||
vuint64m1_t test_vzext_vf8_u64m1(vuint8mf8_t op1, size_t vl) {
|
||||
|
@ -114,7 +114,7 @@ vuint64m1_t test_vzext_vf8_u64m1(vuint8mf8_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vzext_vf8_u64m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vzext.nxv2i64.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vzext.nxv2i64.nxv2i8.i64(<vscale x 2 x i64> undef, <vscale x 2 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
|
||||
//
|
||||
vuint64m2_t test_vzext_vf8_u64m2(vuint8mf4_t op1, size_t vl) {
|
||||
|
@ -123,7 +123,7 @@ vuint64m2_t test_vzext_vf8_u64m2(vuint8mf4_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vzext_vf8_u64m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vzext.nxv4i64.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vzext.nxv4i64.nxv4i8.i64(<vscale x 4 x i64> undef, <vscale x 4 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
|
||||
//
|
||||
vuint64m4_t test_vzext_vf8_u64m4(vuint8mf2_t op1, size_t vl) {
|
||||
|
@ -132,7 +132,7 @@ vuint64m4_t test_vzext_vf8_u64m4(vuint8mf2_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vzext_vf8_u64m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vzext.nxv8i64.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vzext.nxv8i64.nxv8i8.i64(<vscale x 8 x i64> undef, <vscale x 8 x i8> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
|
||||
//
|
||||
vuint64m8_t test_vzext_vf8_u64m8(vuint8m1_t op1, size_t vl) {
|
||||
|
@ -141,7 +141,7 @@ vuint64m8_t test_vzext_vf8_u64m8(vuint8m1_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vzext_vf2_u32mf2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vzext.nxv1i32.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vzext.nxv1i32.nxv1i16.i64(<vscale x 1 x i32> undef, <vscale x 1 x i16> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
|
||||
//
|
||||
vuint32mf2_t test_vzext_vf2_u32mf2(vuint16mf4_t op1, size_t vl) {
|
||||
|
@ -150,7 +150,7 @@ vuint32mf2_t test_vzext_vf2_u32mf2(vuint16mf4_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vzext_vf2_u32m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vzext.nxv2i32.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vzext.nxv2i32.nxv2i16.i64(<vscale x 2 x i32> undef, <vscale x 2 x i16> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
|
||||
//
|
||||
vuint32m1_t test_vzext_vf2_u32m1(vuint16mf2_t op1, size_t vl) {
|
||||
|
@ -159,7 +159,7 @@ vuint32m1_t test_vzext_vf2_u32m1(vuint16mf2_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vzext_vf2_u32m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vzext.nxv4i32.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vzext.nxv4i32.nxv4i16.i64(<vscale x 4 x i32> undef, <vscale x 4 x i16> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
|
||||
//
|
||||
vuint32m2_t test_vzext_vf2_u32m2(vuint16m1_t op1, size_t vl) {
|
||||
|
@ -168,7 +168,7 @@ vuint32m2_t test_vzext_vf2_u32m2(vuint16m1_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vzext_vf2_u32m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vzext.nxv8i32.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vzext.nxv8i32.nxv8i16.i64(<vscale x 8 x i32> undef, <vscale x 8 x i16> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
|
||||
//
|
||||
vuint32m4_t test_vzext_vf2_u32m4(vuint16m2_t op1, size_t vl) {
|
||||
|
@ -177,7 +177,7 @@ vuint32m4_t test_vzext_vf2_u32m4(vuint16m2_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vzext_vf2_u32m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vzext.nxv16i32.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vzext.nxv16i32.nxv16i16.i64(<vscale x 16 x i32> undef, <vscale x 16 x i16> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
|
||||
//
|
||||
vuint32m8_t test_vzext_vf2_u32m8(vuint16m4_t op1, size_t vl) {
|
||||
|
@ -186,7 +186,7 @@ vuint32m8_t test_vzext_vf2_u32m8(vuint16m4_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vzext_vf4_u64m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vzext.nxv1i64.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vzext.nxv1i64.nxv1i16.i64(<vscale x 1 x i64> undef, <vscale x 1 x i16> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
|
||||
//
|
||||
vuint64m1_t test_vzext_vf4_u64m1(vuint16mf4_t op1, size_t vl) {
|
||||
|
@ -195,7 +195,7 @@ vuint64m1_t test_vzext_vf4_u64m1(vuint16mf4_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vzext_vf4_u64m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vzext.nxv2i64.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vzext.nxv2i64.nxv2i16.i64(<vscale x 2 x i64> undef, <vscale x 2 x i16> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
|
||||
//
|
||||
vuint64m2_t test_vzext_vf4_u64m2(vuint16mf2_t op1, size_t vl) {
|
||||
|
@ -204,7 +204,7 @@ vuint64m2_t test_vzext_vf4_u64m2(vuint16mf2_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vzext_vf4_u64m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vzext.nxv4i64.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vzext.nxv4i64.nxv4i16.i64(<vscale x 4 x i64> undef, <vscale x 4 x i16> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
|
||||
//
|
||||
vuint64m4_t test_vzext_vf4_u64m4(vuint16m1_t op1, size_t vl) {
|
||||
|
@ -213,7 +213,7 @@ vuint64m4_t test_vzext_vf4_u64m4(vuint16m1_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vzext_vf4_u64m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vzext.nxv8i64.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vzext.nxv8i64.nxv8i16.i64(<vscale x 8 x i64> undef, <vscale x 8 x i16> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
|
||||
//
|
||||
vuint64m8_t test_vzext_vf4_u64m8(vuint16m2_t op1, size_t vl) {
|
||||
|
@ -222,7 +222,7 @@ vuint64m8_t test_vzext_vf4_u64m8(vuint16m2_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vzext_vf2_u64m1(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vzext.nxv1i64.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vzext.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> undef, <vscale x 1 x i32> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
|
||||
//
|
||||
vuint64m1_t test_vzext_vf2_u64m1(vuint32mf2_t op1, size_t vl) {
|
||||
|
@ -231,7 +231,7 @@ vuint64m1_t test_vzext_vf2_u64m1(vuint32mf2_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vzext_vf2_u64m2(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vzext.nxv2i64.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vzext.nxv2i64.nxv2i32.i64(<vscale x 2 x i64> undef, <vscale x 2 x i32> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
|
||||
//
|
||||
vuint64m2_t test_vzext_vf2_u64m2(vuint32m1_t op1, size_t vl) {
|
||||
|
@ -240,7 +240,7 @@ vuint64m2_t test_vzext_vf2_u64m2(vuint32m1_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vzext_vf2_u64m4(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vzext.nxv4i64.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vzext.nxv4i64.nxv4i32.i64(<vscale x 4 x i64> undef, <vscale x 4 x i32> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
|
||||
//
|
||||
vuint64m4_t test_vzext_vf2_u64m4(vuint32m2_t op1, size_t vl) {
|
||||
|
@ -249,7 +249,7 @@ vuint64m4_t test_vzext_vf2_u64m4(vuint32m2_t op1, size_t vl) {
|
|||
|
||||
// CHECK-RV64-LABEL: @test_vzext_vf2_u64m8(
|
||||
// CHECK-RV64-NEXT: entry:
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vzext.nxv8i64.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vzext.nxv8i64.nxv8i32.i64(<vscale x 8 x i64> undef, <vscale x 8 x i32> [[OP1:%.*]], i64 [[VL:%.*]])
|
||||
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
|
||||
//
|
||||
vuint64m8_t test_vzext_vf2_u64m8(vuint32m4_t op1, size_t vl) {
|
||||
|
|
|
@ -350,12 +350,12 @@ let TargetPrefix = "riscv" in {
|
|||
let VLOperand = 4;
|
||||
}
|
||||
// For destination vector type is the same as source vector.
|
||||
// Input: (vector_in, vl)
|
||||
// Input: (passthru, vector_in, vl)
|
||||
class RISCVUnaryAANoMask
|
||||
: Intrinsic<[llvm_anyvector_ty],
|
||||
[LLVMMatchType<0>, llvm_anyint_ty],
|
||||
[LLVMMatchType<0>, LLVMMatchType<0>, llvm_anyint_ty],
|
||||
[IntrNoMem]>, RISCVVIntrinsic {
|
||||
let VLOperand = 1;
|
||||
let VLOperand = 2;
|
||||
}
|
||||
// For destination vector type is the same as first source vector (with mask).
|
||||
// Input: (vector_in, mask, vl, ta)
|
||||
|
@ -367,7 +367,7 @@ let TargetPrefix = "riscv" in {
|
|||
[ImmArg<ArgIndex<4>>, IntrNoMem]>, RISCVVIntrinsic {
|
||||
let VLOperand = 3;
|
||||
}
|
||||
class RISCVUnaryAAMaskNoTA
|
||||
class RISCVUnaryAAMaskTU
|
||||
: Intrinsic<[llvm_anyvector_ty],
|
||||
[LLVMMatchType<0>, LLVMMatchType<0>,
|
||||
LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty],
|
||||
|
@ -583,21 +583,23 @@ let TargetPrefix = "riscv" in {
|
|||
}
|
||||
// For FP classify operations.
|
||||
// Output: (bit mask type output)
|
||||
// Input: (vector_in, vl)
|
||||
// Input: (passthru, vector_in, vl)
|
||||
class RISCVClassifyNoMask
|
||||
: Intrinsic<[LLVMVectorOfBitcastsToInt<0>],
|
||||
[llvm_anyvector_ty, llvm_anyint_ty],
|
||||
[LLVMVectorOfBitcastsToInt<0>, llvm_anyvector_ty,
|
||||
llvm_anyint_ty],
|
||||
[IntrNoMem]>, RISCVVIntrinsic {
|
||||
let VLOperand = 1;
|
||||
}
|
||||
// For FP classify operations with mask.
|
||||
// Output: (bit mask type output)
|
||||
// Input: (maskedoff, vector_in, mask, vl)
|
||||
// Input: (maskedoff, vector_in, mask, vl, policy)
|
||||
class RISCVClassifyMask
|
||||
: Intrinsic<[LLVMVectorOfBitcastsToInt<0>],
|
||||
[LLVMVectorOfBitcastsToInt<0>, llvm_anyvector_ty,
|
||||
LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty],
|
||||
[IntrNoMem]>, RISCVVIntrinsic {
|
||||
LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
|
||||
llvm_anyint_ty, LLVMMatchType<1>],
|
||||
[IntrNoMem, ImmArg<ArgIndex<4>>]>, RISCVVIntrinsic {
|
||||
let VLOperand = 3;
|
||||
}
|
||||
// For Saturating binary operations.
|
||||
|
@ -760,12 +762,12 @@ let TargetPrefix = "riscv" in {
|
|||
let VLOperand = 2;
|
||||
}
|
||||
// For destination vector type is NOT the same as source vector.
|
||||
// Input: (vector_in, vl)
|
||||
// Input: (passthru, vector_in, vl)
|
||||
class RISCVUnaryABNoMask
|
||||
: Intrinsic<[llvm_anyvector_ty],
|
||||
[llvm_anyvector_ty, llvm_anyint_ty],
|
||||
[LLVMMatchType<0>, llvm_anyvector_ty, llvm_anyint_ty],
|
||||
[IntrNoMem]>, RISCVVIntrinsic {
|
||||
let VLOperand = 1;
|
||||
let VLOperand = 2;
|
||||
}
|
||||
// For destination vector type is NOT the same as source vector (with mask).
|
||||
// Input: (maskedoff, vector_in, mask, vl, ta)
|
||||
|
@ -800,18 +802,25 @@ let TargetPrefix = "riscv" in {
|
|||
// Input: (vl)
|
||||
class RISCVNullaryIntrinsic
|
||||
: Intrinsic<[llvm_anyvector_ty],
|
||||
[llvm_anyint_ty],
|
||||
[IntrNoMem]>, RISCVVIntrinsic {
|
||||
let VLOperand = 0;
|
||||
[llvm_anyint_ty], [IntrNoMem]>, RISCVVIntrinsic {
|
||||
let VLOperand = 1;
|
||||
}
|
||||
// For Conversion unary operations.
|
||||
// Input: (vector_in, vl)
|
||||
class RISCVConversionNoMask
|
||||
// Output: (vector)
|
||||
// Input: (passthru, vl)
|
||||
class RISCVNullaryIntrinsicTU
|
||||
: Intrinsic<[llvm_anyvector_ty],
|
||||
[llvm_anyvector_ty, llvm_anyint_ty],
|
||||
[LLVMMatchType<0>, llvm_anyint_ty],
|
||||
[IntrNoMem]>, RISCVVIntrinsic {
|
||||
let VLOperand = 1;
|
||||
}
|
||||
// For Conversion unary operations.
|
||||
// Input: (passthru, vector_in, vl)
|
||||
class RISCVConversionNoMask
|
||||
: Intrinsic<[llvm_anyvector_ty],
|
||||
[LLVMMatchType<0>, llvm_anyvector_ty, llvm_anyint_ty],
|
||||
[IntrNoMem]>, RISCVVIntrinsic {
|
||||
let VLOperand = 2;
|
||||
}
|
||||
// For Conversion unary operations with mask.
|
||||
// Input: (maskedoff, vector_in, mask, vl, ta)
|
||||
class RISCVConversionMask
|
||||
|
@ -1268,7 +1277,7 @@ let TargetPrefix = "riscv" in {
|
|||
let VLOperand = 1;
|
||||
}
|
||||
def int_riscv_vfmv_v_f : Intrinsic<[llvm_anyfloat_ty],
|
||||
[LLVMVectorElementType<0>, llvm_anyint_ty],
|
||||
[LLVMVectorElementType<0>,llvm_anyint_ty],
|
||||
[IntrNoMem]>, RISCVVIntrinsic {
|
||||
let VLOperand = 1;
|
||||
}
|
||||
|
@ -1340,7 +1349,7 @@ let TargetPrefix = "riscv" in {
|
|||
defm vrgather_vx : RISCVRGatherVX;
|
||||
defm vrgatherei16_vv : RISCVRGatherEI16VV;
|
||||
|
||||
def "int_riscv_vcompress" : RISCVUnaryAAMaskNoTA;
|
||||
def "int_riscv_vcompress" : RISCVUnaryAAMaskTU;
|
||||
|
||||
defm vaaddu : RISCVSaturatingBinaryAAX;
|
||||
defm vaadd : RISCVSaturatingBinaryAAX;
|
||||
|
@ -1424,12 +1433,13 @@ let TargetPrefix = "riscv" in {
|
|||
defm vfncvt_rod_f_f_w : RISCVConversion;
|
||||
|
||||
// Output: (vector)
|
||||
// Input: (mask type input, vl)
|
||||
// Input: (passthru, mask type input, vl)
|
||||
def int_riscv_viota : Intrinsic<[llvm_anyvector_ty],
|
||||
[LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
|
||||
[LLVMMatchType<0>,
|
||||
LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
|
||||
llvm_anyint_ty],
|
||||
[IntrNoMem]>, RISCVVIntrinsic {
|
||||
let VLOperand = 1;
|
||||
let VLOperand = 2;
|
||||
}
|
||||
// Output: (vector)
|
||||
// Input: (maskedoff, mask type vector_in, mask, vl)
|
||||
|
@ -1442,8 +1452,8 @@ let TargetPrefix = "riscv" in {
|
|||
let VLOperand = 3;
|
||||
}
|
||||
// Output: (vector)
|
||||
// Input: (vl)
|
||||
def int_riscv_vid : RISCVNullaryIntrinsic;
|
||||
// Input: (passthru, vl)
|
||||
def int_riscv_vid : RISCVNullaryIntrinsicTU;
|
||||
|
||||
// Output: (vector)
|
||||
// Input: (maskedoff, mask, vl)
|
||||
|
|
|
@ -872,6 +872,21 @@ class VPseudoNullaryNoMask<VReg RegClass>:
|
|||
let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
|
||||
}
|
||||
|
||||
class VPseudoNullaryNoMaskTU<VReg RegClass>:
|
||||
Pseudo<(outs RegClass:$rd),
|
||||
(ins RegClass:$merge, AVL:$vl, ixlenimm:$sew),
|
||||
[]>, RISCVVPseudo {
|
||||
let mayLoad = 0;
|
||||
let mayStore = 0;
|
||||
let hasSideEffects = 0;
|
||||
let Constraints = "$rd = $merge";
|
||||
let HasVLOp = 1;
|
||||
let HasSEWOp = 1;
|
||||
let HasDummyMask = 1;
|
||||
let HasMergeOp = 1;
|
||||
let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
|
||||
}
|
||||
|
||||
class VPseudoNullaryMask<VReg RegClass>:
|
||||
Pseudo<(outs GetVRegNoV0<RegClass>.R:$rd),
|
||||
(ins GetVRegNoV0<RegClass>.R:$merge, VMaskOp:$vm, AVL:$vl,
|
||||
|
@ -916,6 +931,22 @@ class VPseudoUnaryNoMask<DAGOperand RetClass, VReg OpClass, string Constraint =
|
|||
let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
|
||||
}
|
||||
|
||||
// RetClass could be GPR or VReg.
|
||||
class VPseudoUnaryNoMaskTU<DAGOperand RetClass, VReg OpClass, string Constraint = ""> :
|
||||
Pseudo<(outs RetClass:$rd),
|
||||
(ins RetClass:$merge, OpClass:$rs2, AVL:$vl, ixlenimm:$sew), []>,
|
||||
RISCVVPseudo {
|
||||
let mayLoad = 0;
|
||||
let mayStore = 0;
|
||||
let hasSideEffects = 0;
|
||||
let Constraints = Join<[Constraint, "$rd = $merge"], ",">.ret;
|
||||
let HasVLOp = 1;
|
||||
let HasSEWOp = 1;
|
||||
let HasDummyMask = 1;
|
||||
let HasMergeOp = 1;
|
||||
let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
|
||||
}
|
||||
|
||||
class VPseudoUnaryMask<VReg RetClass, VReg OpClass, string Constraint = ""> :
|
||||
Pseudo<(outs GetVRegNoV0<RetClass>.R:$rd),
|
||||
(ins GetVRegNoV0<RetClass>.R:$merge, OpClass:$rs2,
|
||||
|
@ -1647,6 +1678,8 @@ multiclass VPseudoVID_V {
|
|||
let VLMul = m.value in {
|
||||
def "_V_" # m.MX : VPseudoNullaryNoMask<m.vrclass>,
|
||||
Sched<[WriteVMIdxV, ReadVMask]>;
|
||||
def "_V_" # m.MX # "_TU": VPseudoNullaryNoMaskTU<m.vrclass>,
|
||||
Sched<[WriteVMIdxV, ReadVMask]>;
|
||||
def "_V_" # m.MX # "_MASK" : VPseudoNullaryMask<m.vrclass>,
|
||||
Sched<[WriteVMIdxV, ReadVMask]>;
|
||||
}
|
||||
|
@ -1667,6 +1700,8 @@ multiclass VPseudoVIOT_M {
|
|||
let VLMul = m.value in {
|
||||
def "_" # m.MX : VPseudoUnaryNoMask<m.vrclass, VR, constraint>,
|
||||
Sched<[WriteVMIotV, ReadVMIotV, ReadVMask]>;
|
||||
def "_" # m.MX # "_TU" : VPseudoUnaryNoMaskTU<m.vrclass, VR, constraint>,
|
||||
Sched<[WriteVMIotV, ReadVMIotV, ReadVMask]>;
|
||||
def "_" # m.MX # "_MASK" : VPseudoUnaryMask<m.vrclass, VR, constraint>,
|
||||
Sched<[WriteVMIotV, ReadVMIotV, ReadVMask]>;
|
||||
}
|
||||
|
@ -1986,7 +2021,9 @@ multiclass VPseudoVCLS_V {
|
|||
let VLMul = m.value in {
|
||||
def "_V_" # m.MX : VPseudoUnaryNoMask<m.vrclass, m.vrclass>,
|
||||
Sched<[WriteVFClassV, ReadVFClassV, ReadVMask]>;
|
||||
def "_V_" # m.MX # "_MASK" : VPseudoUnaryMask<m.vrclass, m.vrclass>,
|
||||
def "_V_" # m.MX # "_TU": VPseudoUnaryNoMaskTU<m.vrclass, m.vrclass>,
|
||||
Sched<[WriteVFClassV, ReadVFClassV, ReadVMask]>;
|
||||
def "_V_" # m.MX # "_MASK" : VPseudoUnaryMaskTA<m.vrclass, m.vrclass>,
|
||||
Sched<[WriteVFClassV, ReadVFClassV, ReadVMask]>;
|
||||
}
|
||||
}
|
||||
|
@ -1997,6 +2034,8 @@ multiclass VPseudoVSQR_V {
|
|||
let VLMul = m.value in {
|
||||
def "_V_" # m.MX : VPseudoUnaryNoMask<m.vrclass, m.vrclass>,
|
||||
Sched<[WriteVFSqrtV, ReadVFSqrtV, ReadVMask]>;
|
||||
def "_V_" # m.MX # "_TU": VPseudoUnaryNoMaskTU<m.vrclass, m.vrclass>,
|
||||
Sched<[WriteVFSqrtV, ReadVFSqrtV, ReadVMask]>;
|
||||
def "_V_" # m.MX # "_MASK" : VPseudoUnaryMaskTA<m.vrclass, m.vrclass>,
|
||||
Sched<[WriteVFSqrtV, ReadVFSqrtV, ReadVMask]>;
|
||||
}
|
||||
|
@ -2008,6 +2047,8 @@ multiclass VPseudoVRCP_V {
|
|||
let VLMul = m.value in {
|
||||
def "_V_" # m.MX : VPseudoUnaryNoMask<m.vrclass, m.vrclass>,
|
||||
Sched<[WriteVFRecpV, ReadVFRecpV, ReadVMask]>;
|
||||
def "_V_" # m.MX # "_TU": VPseudoUnaryNoMaskTU<m.vrclass, m.vrclass>,
|
||||
Sched<[WriteVFRecpV, ReadVFRecpV, ReadVMask]>;
|
||||
def "_V_" # m.MX # "_MASK" : VPseudoUnaryMaskTA<m.vrclass, m.vrclass>,
|
||||
Sched<[WriteVFRecpV, ReadVFRecpV, ReadVMask]>;
|
||||
}
|
||||
|
@ -2021,6 +2062,8 @@ multiclass PseudoVEXT_VF2 {
|
|||
let VLMul = m.value in {
|
||||
def "_" # m.MX : VPseudoUnaryNoMask<m.vrclass, m.f2vrclass, constraints>,
|
||||
Sched<[WriteVExtV, ReadVExtV, ReadVMask]>;
|
||||
def "_" # m.MX # "_TU": VPseudoUnaryNoMaskTU<m.vrclass, m.f2vrclass, constraints>,
|
||||
Sched<[WriteVExtV, ReadVExtV, ReadVMask]>;
|
||||
def "_" # m.MX # "_MASK" :
|
||||
VPseudoUnaryMaskTA<m.vrclass, m.f2vrclass, constraints>,
|
||||
Sched<[WriteVExtV, ReadVExtV, ReadVMask]>;
|
||||
|
@ -2035,6 +2078,8 @@ multiclass PseudoVEXT_VF4 {
|
|||
let VLMul = m.value in {
|
||||
def "_" # m.MX : VPseudoUnaryNoMask<m.vrclass, m.f4vrclass, constraints>,
|
||||
Sched<[WriteVExtV, ReadVExtV, ReadVMask]>;
|
||||
def "_" # m.MX # "_TU": VPseudoUnaryNoMaskTU<m.vrclass, m.f4vrclass, constraints>,
|
||||
Sched<[WriteVExtV, ReadVExtV, ReadVMask]>;
|
||||
def "_" # m.MX # "_MASK" :
|
||||
VPseudoUnaryMaskTA<m.vrclass, m.f4vrclass, constraints>,
|
||||
Sched<[WriteVExtV, ReadVExtV, ReadVMask]>;
|
||||
|
@ -2049,6 +2094,8 @@ multiclass PseudoVEXT_VF8 {
|
|||
let VLMul = m.value in {
|
||||
def "_" # m.MX : VPseudoUnaryNoMask<m.vrclass, m.f8vrclass, constraints>,
|
||||
Sched<[WriteVExtV, ReadVExtV, ReadVMask]>;
|
||||
def "_" # m.MX # "_TU": VPseudoUnaryNoMaskTU<m.vrclass, m.f8vrclass, constraints>,
|
||||
Sched<[WriteVExtV, ReadVExtV, ReadVMask]>;
|
||||
def "_" # m.MX # "_MASK" :
|
||||
VPseudoUnaryMaskTA<m.vrclass, m.f8vrclass, constraints>,
|
||||
Sched<[WriteVExtV, ReadVExtV, ReadVMask]>;
|
||||
|
@ -2572,6 +2619,7 @@ multiclass VPseudoConversion<VReg RetClass,
|
|||
string Constraint = ""> {
|
||||
let VLMul = MInfo.value in {
|
||||
def "_" # MInfo.MX : VPseudoUnaryNoMask<RetClass, Op1Class, Constraint>;
|
||||
def "_" # MInfo.MX # "_TU": VPseudoUnaryNoMaskTU<RetClass, Op1Class, Constraint>;
|
||||
def "_" # MInfo.MX # "_MASK" : VPseudoUnaryMaskTA<RetClass, Op1Class,
|
||||
Constraint>;
|
||||
}
|
||||
|
@ -2773,12 +2821,31 @@ class VPatUnaryNoMask<string intrinsic_name,
|
|||
LMULInfo vlmul,
|
||||
VReg op2_reg_class> :
|
||||
Pat<(result_type (!cast<Intrinsic>(intrinsic_name)
|
||||
(result_type undef),
|
||||
(op2_type op2_reg_class:$rs2),
|
||||
VLOpFrag)),
|
||||
(!cast<Instruction>(inst#"_"#kind#"_"#vlmul.MX)
|
||||
(op2_type op2_reg_class:$rs2),
|
||||
GPR:$vl, sew)>;
|
||||
|
||||
class VPatUnaryNoMaskTU<string intrinsic_name,
|
||||
string inst,
|
||||
string kind,
|
||||
ValueType result_type,
|
||||
ValueType op2_type,
|
||||
int sew,
|
||||
LMULInfo vlmul,
|
||||
VReg result_reg_class,
|
||||
VReg op2_reg_class> :
|
||||
Pat<(result_type (!cast<Intrinsic>(intrinsic_name)
|
||||
(result_type result_reg_class:$merge),
|
||||
(op2_type op2_reg_class:$rs2),
|
||||
VLOpFrag)),
|
||||
(!cast<Instruction>(inst#"_"#kind#"_"#vlmul.MX#"_TU")
|
||||
(result_type result_reg_class:$merge),
|
||||
(op2_type op2_reg_class:$rs2),
|
||||
GPR:$vl, sew)>;
|
||||
|
||||
class VPatUnaryMask<string intrinsic_name,
|
||||
string inst,
|
||||
string kind,
|
||||
|
@ -3189,6 +3256,8 @@ multiclass VPatUnaryV_M<string intrinsic, string instruction>
|
|||
foreach vti = AllIntegerVectors in {
|
||||
def : VPatUnaryNoMask<intrinsic, instruction, "M", vti.Vector, vti.Mask,
|
||||
vti.Log2SEW, vti.LMul, VR>;
|
||||
def : VPatUnaryNoMaskTU<intrinsic, instruction, "M", vti.Vector, vti.Mask,
|
||||
vti.Log2SEW, vti.LMul, vti.RegClass,VR>;
|
||||
def : VPatUnaryMask<intrinsic, instruction, "M", vti.Vector, vti.Mask,
|
||||
vti.Mask, vti.Log2SEW, vti.LMul, vti.RegClass, VR>;
|
||||
}
|
||||
|
@ -3204,6 +3273,9 @@ multiclass VPatUnaryV_VF<string intrinsic, string instruction, string suffix,
|
|||
def : VPatUnaryNoMask<intrinsic, instruction, suffix,
|
||||
vti.Vector, fti.Vector,
|
||||
vti.Log2SEW, vti.LMul, fti.RegClass>;
|
||||
def : VPatUnaryNoMaskTU<intrinsic, instruction, suffix,
|
||||
vti.Vector, fti.Vector,
|
||||
vti.Log2SEW, vti.LMul, vti.RegClass, fti.RegClass>;
|
||||
def : VPatUnaryMaskTA<intrinsic, instruction, suffix,
|
||||
vti.Vector, fti.Vector, vti.Mask,
|
||||
vti.Log2SEW, vti.LMul, vti.RegClass, fti.RegClass>;
|
||||
|
@ -3216,6 +3288,9 @@ multiclass VPatUnaryV_V<string intrinsic, string instruction,
|
|||
def : VPatUnaryNoMask<intrinsic, instruction, "V",
|
||||
vti.Vector, vti.Vector,
|
||||
vti.Log2SEW, vti.LMul, vti.RegClass>;
|
||||
def : VPatUnaryNoMaskTU<intrinsic, instruction, "V",
|
||||
vti.Vector, vti.Vector,
|
||||
vti.Log2SEW, vti.LMul, vti.RegClass, vti.RegClass>;
|
||||
def : VPatUnaryMaskTA<intrinsic, instruction, "V",
|
||||
vti.Vector, vti.Vector, vti.Mask,
|
||||
vti.Log2SEW, vti.LMul, vti.RegClass, vti.RegClass>;
|
||||
|
@ -3226,9 +3301,15 @@ multiclass VPatNullaryV<string intrinsic, string instruction>
|
|||
{
|
||||
foreach vti = AllIntegerVectors in {
|
||||
def : Pat<(vti.Vector (!cast<Intrinsic>(intrinsic)
|
||||
(vti.Vector undef),
|
||||
VLOpFrag)),
|
||||
(!cast<Instruction>(instruction#"_V_" # vti.LMul.MX)
|
||||
GPR:$vl, vti.Log2SEW)>;
|
||||
def : Pat<(vti.Vector (!cast<Intrinsic>(intrinsic)
|
||||
(vti.Vector vti.RegClass:$merge),
|
||||
VLOpFrag)),
|
||||
(!cast<Instruction>(instruction#"_V_" # vti.LMul.MX # "_TU")
|
||||
vti.RegClass:$merge, GPR:$vl, vti.Log2SEW)>;
|
||||
def : Pat<(vti.Vector (!cast<Intrinsic>(intrinsic # "_mask")
|
||||
(vti.Vector vti.RegClass:$merge),
|
||||
(vti.Mask V0), VLOpFrag)),
|
||||
|
@ -3376,6 +3457,8 @@ multiclass VPatConversionTA<string intrinsic,
|
|||
{
|
||||
def : VPatUnaryNoMask<intrinsic, inst, kind, result_type, op1_type,
|
||||
sew, vlmul, op1_reg_class>;
|
||||
def : VPatUnaryNoMaskTU<intrinsic, inst, kind, result_type, op1_type,
|
||||
sew, vlmul, result_reg_class, op1_reg_class>;
|
||||
def : VPatUnaryMaskTA<intrinsic, inst, kind, result_type, op1_type,
|
||||
mask_type, sew, vlmul, result_reg_class, op1_reg_class>;
|
||||
}
|
||||
|
@ -3905,19 +3988,6 @@ multiclass VPatReductionW_VS<string intrinsic, string instruction, bit IsFloat =
|
|||
}
|
||||
}
|
||||
|
||||
multiclass VPatClassifyVI_VF<string intrinsic,
|
||||
string instruction>
|
||||
{
|
||||
foreach fvti = AllFloatVectors in
|
||||
{
|
||||
defvar ivti = GetIntVTypeInfo<fvti>.Vti;
|
||||
|
||||
defm : VPatConversion<intrinsic, instruction, "V",
|
||||
ivti.Vector, fvti.Vector, ivti.Mask, fvti.Log2SEW,
|
||||
fvti.LMul, ivti.RegClass, fvti.RegClass>;
|
||||
}
|
||||
}
|
||||
|
||||
multiclass VPatConversionVI_VF<string intrinsic,
|
||||
string instruction>
|
||||
{
|
||||
|
@ -5056,7 +5126,7 @@ defm : VPatBinarySwappedM_VV<"int_riscv_vmfge", "PseudoVMFLE", AllFloatVectors>;
|
|||
//===----------------------------------------------------------------------===//
|
||||
// 14.14. Vector Floating-Point Classify Instruction
|
||||
//===----------------------------------------------------------------------===//
|
||||
defm : VPatClassifyVI_VF<"int_riscv_vfclass", "PseudoVFCLASS">;
|
||||
defm : VPatConversionVI_VF<"int_riscv_vfclass", "PseudoVFCLASS">;
|
||||
|
||||
//===----------------------------------------------------------------------===//
|
||||
// 14.15. Vector Floating-Point Merge Instruction
|
||||
|
|
|
@ -2203,3 +2203,784 @@ entry:
|
|||
|
||||
ret <vscale x 1 x i8> %a
|
||||
}
|
||||
|
||||
declare <vscale x 1 x i64> @llvm.riscv.vsext.nxv1i64.nxv1i8(
|
||||
<vscale x 1 x i64>,
|
||||
<vscale x 1 x i8>,
|
||||
iXLen);
|
||||
|
||||
define <vscale x 1 x i64> @intrinsic_vsext_vf8_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i8> %1, iXLen %2) nounwind {
|
||||
; RV32-LABEL: intrinsic_vsext_vf8_nxv1i64:
|
||||
; RV32: # %bb.0: # %entry
|
||||
; RV32-NEXT: vsetvli zero, a0, e64, m1, tu, mu
|
||||
; RV32-NEXT: vsext.vf8 v8, v9
|
||||
; RV32-NEXT: ret
|
||||
;
|
||||
; RV64-LABEL: intrinsic_vsext_vf8_nxv1i64:
|
||||
; RV64: # %bb.0: # %entry
|
||||
; RV64-NEXT: vsetvli zero, a0, e64, m1, tu, mu
|
||||
; RV64-NEXT: vsext.vf8 v8, v9
|
||||
; RV64-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 1 x i64> @llvm.riscv.vsext.nxv1i64.nxv1i8(
|
||||
<vscale x 1 x i64> %0,
|
||||
<vscale x 1 x i8> %1,
|
||||
iXLen %2)
|
||||
|
||||
ret <vscale x 1 x i64> %a
|
||||
}
|
||||
|
||||
declare <vscale x 1 x i64> @llvm.riscv.vzext.nxv1i64.nxv1i8(
|
||||
<vscale x 1 x i64>,
|
||||
<vscale x 1 x i8>,
|
||||
iXLen);
|
||||
|
||||
define <vscale x 1 x i64> @intrinsic_vzext_vf8_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i8> %1, iXLen %2) nounwind {
|
||||
; RV32-LABEL: intrinsic_vzext_vf8_nxv1i64:
|
||||
; RV32: # %bb.0: # %entry
|
||||
; RV32-NEXT: vsetvli zero, a0, e64, m1, tu, mu
|
||||
; RV32-NEXT: vzext.vf8 v8, v9
|
||||
; RV32-NEXT: ret
|
||||
;
|
||||
; RV64-LABEL: intrinsic_vzext_vf8_nxv1i64:
|
||||
; RV64: # %bb.0: # %entry
|
||||
; RV64-NEXT: vsetvli zero, a0, e64, m1, tu, mu
|
||||
; RV64-NEXT: vzext.vf8 v8, v9
|
||||
; RV64-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 1 x i64> @llvm.riscv.vzext.nxv1i64.nxv1i8(
|
||||
<vscale x 1 x i64> %0,
|
||||
<vscale x 1 x i8> %1,
|
||||
iXLen %2)
|
||||
|
||||
ret <vscale x 1 x i64> %a
|
||||
}
|
||||
|
||||
declare <vscale x 2 x i16> @llvm.riscv.vfncvt.x.f.w.nxv2i16.nxv2f32(
|
||||
<vscale x 2 x i16>,
|
||||
<vscale x 2 x float>,
|
||||
iXLen);
|
||||
|
||||
define <vscale x 2 x i16> @intrinsic_vfncvt_x.f.w_nxv2i16_nxv2f32( <vscale x 2 x i16> %0, <vscale x 2 x float> %1, iXLen %2) nounwind {
|
||||
; RV32-LABEL: intrinsic_vfncvt_x.f.w_nxv2i16_nxv2f32:
|
||||
; RV32: # %bb.0: # %entry
|
||||
; RV32-NEXT: vsetvli zero, a0, e16, mf2, tu, mu
|
||||
; RV32-NEXT: vfncvt.x.f.w v8, v9
|
||||
; RV32-NEXT: ret
|
||||
;
|
||||
; RV64-LABEL: intrinsic_vfncvt_x.f.w_nxv2i16_nxv2f32:
|
||||
; RV64: # %bb.0: # %entry
|
||||
; RV64-NEXT: vsetvli zero, a0, e16, mf2, tu, mu
|
||||
; RV64-NEXT: vfncvt.x.f.w v8, v9
|
||||
; RV64-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 2 x i16> @llvm.riscv.vfncvt.x.f.w.nxv2i16.nxv2f32(
|
||||
<vscale x 2 x i16> %0,
|
||||
<vscale x 2 x float> %1,
|
||||
iXLen %2)
|
||||
|
||||
ret <vscale x 2 x i16> %a
|
||||
}
|
||||
|
||||
declare <vscale x 1 x i8> @llvm.riscv.vid.nxv1i8(
|
||||
<vscale x 1 x i8>,
|
||||
iXLen);
|
||||
|
||||
define <vscale x 1 x i8> @intrinsic_vid_v_nxv1i8(<vscale x 1 x i8> %0, iXLen %1) nounwind {
|
||||
; RV32-LABEL: intrinsic_vid_v_nxv1i8:
|
||||
; RV32: # %bb.0: # %entry
|
||||
; RV32-NEXT: vsetvli zero, a0, e8, mf8, tu, mu
|
||||
; RV32-NEXT: vid.v v8
|
||||
; RV32-NEXT: ret
|
||||
;
|
||||
; RV64-LABEL: intrinsic_vid_v_nxv1i8:
|
||||
; RV64: # %bb.0: # %entry
|
||||
; RV64-NEXT: vsetvli zero, a0, e8, mf8, tu, mu
|
||||
; RV64-NEXT: vid.v v8
|
||||
; RV64-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 1 x i8> @llvm.riscv.vid.nxv1i8(
|
||||
<vscale x 1 x i8> %0,
|
||||
iXLen %1)
|
||||
|
||||
ret <vscale x 1 x i8> %a
|
||||
}
|
||||
|
||||
declare <vscale x 1 x i16> @llvm.riscv.vfclass.nxv1i16(
|
||||
<vscale x 1 x i16>,
|
||||
<vscale x 1 x half>,
|
||||
iXLen);
|
||||
|
||||
define <vscale x 1 x i16> @intrinsic_vfclass_v_nxv1i16_nxv1f16(
|
||||
; RV32-LABEL: intrinsic_vfclass_v_nxv1i16_nxv1f16:
|
||||
; RV32: # %bb.0: # %entry
|
||||
; RV32-NEXT: vsetvli zero, a0, e16, mf4, tu, mu
|
||||
; RV32-NEXT: vfclass.v v8, v9
|
||||
; RV32-NEXT: ret
|
||||
;
|
||||
; RV64-LABEL: intrinsic_vfclass_v_nxv1i16_nxv1f16:
|
||||
; RV64: # %bb.0: # %entry
|
||||
; RV64-NEXT: vsetvli zero, a0, e16, mf4, tu, mu
|
||||
; RV64-NEXT: vfclass.v v8, v9
|
||||
; RV64-NEXT: ret
|
||||
<vscale x 1 x i16> %0,
|
||||
<vscale x 1 x half> %1,
|
||||
iXLen %2) nounwind {
|
||||
entry:
|
||||
%a = call <vscale x 1 x i16> @llvm.riscv.vfclass.nxv1i16(
|
||||
<vscale x 1 x i16> %0,
|
||||
<vscale x 1 x half> %1,
|
||||
iXLen %2)
|
||||
|
||||
ret <vscale x 1 x i16> %a
|
||||
}
|
||||
|
||||
declare <vscale x 1 x half> @llvm.riscv.vfcvt.f.x.v.nxv1f16.nxv1i16(
|
||||
<vscale x 1 x half>,
|
||||
<vscale x 1 x i16>,
|
||||
iXLen);
|
||||
|
||||
define <vscale x 1 x half> @intrinsic_vfcvt_f.x.v_nxv1f16_nxv1i16(<vscale x 1 x half> %0, <vscale x 1 x i16> %1, iXLen %2) nounwind {
|
||||
; RV32-LABEL: intrinsic_vfcvt_f.x.v_nxv1f16_nxv1i16:
|
||||
; RV32: # %bb.0: # %entry
|
||||
; RV32-NEXT: vsetvli zero, a0, e16, mf4, tu, mu
|
||||
; RV32-NEXT: vfcvt.f.x.v v8, v9
|
||||
; RV32-NEXT: ret
|
||||
;
|
||||
; RV64-LABEL: intrinsic_vfcvt_f.x.v_nxv1f16_nxv1i16:
|
||||
; RV64: # %bb.0: # %entry
|
||||
; RV64-NEXT: vsetvli zero, a0, e16, mf4, tu, mu
|
||||
; RV64-NEXT: vfcvt.f.x.v v8, v9
|
||||
; RV64-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 1 x half> @llvm.riscv.vfcvt.f.x.v.nxv1f16.nxv1i16(
|
||||
<vscale x 1 x half> %0,
|
||||
<vscale x 1 x i16> %1,
|
||||
iXLen %2)
|
||||
|
||||
ret <vscale x 1 x half> %a
|
||||
}
|
||||
|
||||
declare <vscale x 1 x half> @llvm.riscv.vfcvt.f.xu.v.nxv1f16.nxv1i16(
|
||||
<vscale x 1 x half>,
|
||||
<vscale x 1 x i16>,
|
||||
iXLen);
|
||||
|
||||
define <vscale x 1 x half> @intrinsic_vfcvt_f.xu.v_nxv1f16_nxv1i16(<vscale x 1 x half> %0, <vscale x 1 x i16> %1, iXLen %2) nounwind {
|
||||
; RV32-LABEL: intrinsic_vfcvt_f.xu.v_nxv1f16_nxv1i16:
|
||||
; RV32: # %bb.0: # %entry
|
||||
; RV32-NEXT: vsetvli zero, a0, e16, mf4, tu, mu
|
||||
; RV32-NEXT: vfcvt.f.xu.v v8, v9
|
||||
; RV32-NEXT: ret
|
||||
;
|
||||
; RV64-LABEL: intrinsic_vfcvt_f.xu.v_nxv1f16_nxv1i16:
|
||||
; RV64: # %bb.0: # %entry
|
||||
; RV64-NEXT: vsetvli zero, a0, e16, mf4, tu, mu
|
||||
; RV64-NEXT: vfcvt.f.xu.v v8, v9
|
||||
; RV64-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 1 x half> @llvm.riscv.vfcvt.f.xu.v.nxv1f16.nxv1i16(
|
||||
<vscale x 1 x half> %0,
|
||||
<vscale x 1 x i16> %1,
|
||||
iXLen %2)
|
||||
|
||||
ret <vscale x 1 x half> %a
|
||||
}
|
||||
|
||||
declare <vscale x 1 x i16> @llvm.riscv.vfcvt.rtz.x.f.v.nxv1i16.nxv1f16(
|
||||
<vscale x 1 x i16>,
|
||||
<vscale x 1 x half>,
|
||||
iXLen);
|
||||
|
||||
define <vscale x 1 x i16> @intrinsic_vfcvt_rtz.x.f.v_nxv1i16_nxv1f16(<vscale x 1 x i16> %0, <vscale x 1 x half> %1, iXLen %2) nounwind {
|
||||
; RV32-LABEL: intrinsic_vfcvt_rtz.x.f.v_nxv1i16_nxv1f16:
|
||||
; RV32: # %bb.0: # %entry
|
||||
; RV32-NEXT: vsetvli zero, a0, e16, mf4, tu, mu
|
||||
; RV32-NEXT: vfcvt.rtz.x.f.v v8, v9
|
||||
; RV32-NEXT: ret
|
||||
;
|
||||
; RV64-LABEL: intrinsic_vfcvt_rtz.x.f.v_nxv1i16_nxv1f16:
|
||||
; RV64: # %bb.0: # %entry
|
||||
; RV64-NEXT: vsetvli zero, a0, e16, mf4, tu, mu
|
||||
; RV64-NEXT: vfcvt.rtz.x.f.v v8, v9
|
||||
; RV64-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 1 x i16> @llvm.riscv.vfcvt.rtz.x.f.v.nxv1i16.nxv1f16(
|
||||
<vscale x 1 x i16> %0,
|
||||
<vscale x 1 x half> %1,
|
||||
iXLen %2)
|
||||
|
||||
ret <vscale x 1 x i16> %a
|
||||
}
|
||||
|
||||
declare <vscale x 1 x i16> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv1i16.nxv1f16(
|
||||
<vscale x 1 x i16>,
|
||||
<vscale x 1 x half>,
|
||||
iXLen);
|
||||
|
||||
define <vscale x 1 x i16> @intrinsic_vfcvt_rtz.xu.f.v_nxv1i16_nxv1f16(<vscale x 1 x i16> %0, <vscale x 1 x half> %1, iXLen %2) nounwind {
|
||||
; RV32-LABEL: intrinsic_vfcvt_rtz.xu.f.v_nxv1i16_nxv1f16:
|
||||
; RV32: # %bb.0: # %entry
|
||||
; RV32-NEXT: vsetvli zero, a0, e16, mf4, tu, mu
|
||||
; RV32-NEXT: vfcvt.rtz.xu.f.v v8, v9
|
||||
; RV32-NEXT: ret
|
||||
;
|
||||
; RV64-LABEL: intrinsic_vfcvt_rtz.xu.f.v_nxv1i16_nxv1f16:
|
||||
; RV64: # %bb.0: # %entry
|
||||
; RV64-NEXT: vsetvli zero, a0, e16, mf4, tu, mu
|
||||
; RV64-NEXT: vfcvt.rtz.xu.f.v v8, v9
|
||||
; RV64-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 1 x i16> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv1i16.nxv1f16(
|
||||
<vscale x 1 x i16> %0,
|
||||
<vscale x 1 x half> %1,
|
||||
iXLen %2)
|
||||
|
||||
ret <vscale x 1 x i16> %a
|
||||
}
|
||||
|
||||
declare <vscale x 1 x i16> @llvm.riscv.vfcvt.x.f.v.nxv1i16.nxv1f16(
|
||||
<vscale x 1 x i16>,
|
||||
<vscale x 1 x half>,
|
||||
iXLen);
|
||||
|
||||
define <vscale x 1 x i16> @intrinsic_vfcvt_x.f.v_nxv1i16_nxv1f16(<vscale x 1 x i16> %0, <vscale x 1 x half> %1, iXLen %2) nounwind {
|
||||
; RV32-LABEL: intrinsic_vfcvt_x.f.v_nxv1i16_nxv1f16:
|
||||
; RV32: # %bb.0: # %entry
|
||||
; RV32-NEXT: vsetvli zero, a0, e16, mf4, tu, mu
|
||||
; RV32-NEXT: vfcvt.x.f.v v8, v9
|
||||
; RV32-NEXT: ret
|
||||
;
|
||||
; RV64-LABEL: intrinsic_vfcvt_x.f.v_nxv1i16_nxv1f16:
|
||||
; RV64: # %bb.0: # %entry
|
||||
; RV64-NEXT: vsetvli zero, a0, e16, mf4, tu, mu
|
||||
; RV64-NEXT: vfcvt.x.f.v v8, v9
|
||||
; RV64-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 1 x i16> @llvm.riscv.vfcvt.x.f.v.nxv1i16.nxv1f16(
|
||||
<vscale x 1 x i16> %0,
|
||||
<vscale x 1 x half> %1,
|
||||
iXLen %2)
|
||||
|
||||
ret <vscale x 1 x i16> %a
|
||||
}
|
||||
|
||||
declare <vscale x 1 x half> @llvm.riscv.vfncvt.f.f.w.nxv1f16.nxv1f32(
|
||||
<vscale x 1 x half>,
|
||||
<vscale x 1 x float>,
|
||||
iXLen);
|
||||
|
||||
define <vscale x 1 x half> @intrinsic_vfncvt_f.f.w_nxv1f16_nxv1f32(<vscale x 1 x half> %0, <vscale x 1 x float> %1, iXLen %2) nounwind {
|
||||
; RV32-LABEL: intrinsic_vfncvt_f.f.w_nxv1f16_nxv1f32:
|
||||
; RV32: # %bb.0: # %entry
|
||||
; RV32-NEXT: vsetvli zero, a0, e16, mf4, tu, mu
|
||||
; RV32-NEXT: vfncvt.f.f.w v8, v9
|
||||
; RV32-NEXT: ret
|
||||
;
|
||||
; RV64-LABEL: intrinsic_vfncvt_f.f.w_nxv1f16_nxv1f32:
|
||||
; RV64: # %bb.0: # %entry
|
||||
; RV64-NEXT: vsetvli zero, a0, e16, mf4, tu, mu
|
||||
; RV64-NEXT: vfncvt.f.f.w v8, v9
|
||||
; RV64-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 1 x half> @llvm.riscv.vfncvt.f.f.w.nxv1f16.nxv1f32(
|
||||
<vscale x 1 x half> %0,
|
||||
<vscale x 1 x float> %1,
|
||||
iXLen %2)
|
||||
|
||||
ret <vscale x 1 x half> %a
|
||||
}
|
||||
|
||||
declare <vscale x 1 x i16> @llvm.riscv.vfcvt.xu.f.v.nxv1i16.nxv1f16(
|
||||
<vscale x 1 x i16>,
|
||||
<vscale x 1 x half>,
|
||||
iXLen);
|
||||
|
||||
define <vscale x 1 x i16> @intrinsic_vfcvt_xu.f.v_nxv1i16_nxv1f16(<vscale x 1 x i16> %0, <vscale x 1 x half> %1, iXLen %2) nounwind {
|
||||
; RV32-LABEL: intrinsic_vfcvt_xu.f.v_nxv1i16_nxv1f16:
|
||||
; RV32: # %bb.0: # %entry
|
||||
; RV32-NEXT: vsetvli zero, a0, e16, mf4, tu, mu
|
||||
; RV32-NEXT: vfcvt.xu.f.v v8, v9
|
||||
; RV32-NEXT: ret
|
||||
;
|
||||
; RV64-LABEL: intrinsic_vfcvt_xu.f.v_nxv1i16_nxv1f16:
|
||||
; RV64: # %bb.0: # %entry
|
||||
; RV64-NEXT: vsetvli zero, a0, e16, mf4, tu, mu
|
||||
; RV64-NEXT: vfcvt.xu.f.v v8, v9
|
||||
; RV64-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 1 x i16> @llvm.riscv.vfcvt.xu.f.v.nxv1i16.nxv1f16(
|
||||
<vscale x 1 x i16> %0,
|
||||
<vscale x 1 x half> %1,
|
||||
iXLen %2)
|
||||
|
||||
ret <vscale x 1 x i16> %a
|
||||
}
|
||||
|
||||
declare <vscale x 1 x half> @llvm.riscv.vfncvt.f.x.w.nxv1f16.nxv1i32(
|
||||
<vscale x 1 x half>,
|
||||
<vscale x 1 x i32>,
|
||||
iXLen);
|
||||
|
||||
define <vscale x 1 x half> @intrinsic_vfncvt_f.x.w_nxv1f16_nxv1i32(<vscale x 1 x half> %0, <vscale x 1 x i32> %1, iXLen %2) nounwind {
|
||||
; RV32-LABEL: intrinsic_vfncvt_f.x.w_nxv1f16_nxv1i32:
|
||||
; RV32: # %bb.0: # %entry
|
||||
; RV32-NEXT: vsetvli zero, a0, e16, mf4, tu, mu
|
||||
; RV32-NEXT: vfncvt.f.x.w v8, v9
|
||||
; RV32-NEXT: ret
|
||||
;
|
||||
; RV64-LABEL: intrinsic_vfncvt_f.x.w_nxv1f16_nxv1i32:
|
||||
; RV64: # %bb.0: # %entry
|
||||
; RV64-NEXT: vsetvli zero, a0, e16, mf4, tu, mu
|
||||
; RV64-NEXT: vfncvt.f.x.w v8, v9
|
||||
; RV64-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 1 x half> @llvm.riscv.vfncvt.f.x.w.nxv1f16.nxv1i32(
|
||||
<vscale x 1 x half> %0,
|
||||
<vscale x 1 x i32> %1,
|
||||
iXLen %2)
|
||||
|
||||
ret <vscale x 1 x half> %a
|
||||
}
|
||||
|
||||
declare <vscale x 1 x half> @llvm.riscv.vfncvt.f.xu.w.nxv1f16.nxv1i32(
|
||||
<vscale x 1 x half>,
|
||||
<vscale x 1 x i32>,
|
||||
iXLen);
|
||||
|
||||
define <vscale x 1 x half> @intrinsic_vfncvt_f.xu.w_nxv1f16_nxv1i32(<vscale x 1 x half> %0, <vscale x 1 x i32> %1, iXLen %2) nounwind {
|
||||
; RV32-LABEL: intrinsic_vfncvt_f.xu.w_nxv1f16_nxv1i32:
|
||||
; RV32: # %bb.0: # %entry
|
||||
; RV32-NEXT: vsetvli zero, a0, e16, mf4, tu, mu
|
||||
; RV32-NEXT: vfncvt.f.xu.w v8, v9
|
||||
; RV32-NEXT: ret
|
||||
;
|
||||
; RV64-LABEL: intrinsic_vfncvt_f.xu.w_nxv1f16_nxv1i32:
|
||||
; RV64: # %bb.0: # %entry
|
||||
; RV64-NEXT: vsetvli zero, a0, e16, mf4, tu, mu
|
||||
; RV64-NEXT: vfncvt.f.xu.w v8, v9
|
||||
; RV64-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 1 x half> @llvm.riscv.vfncvt.f.xu.w.nxv1f16.nxv1i32(
|
||||
<vscale x 1 x half> %0,
|
||||
<vscale x 1 x i32> %1,
|
||||
iXLen %2)
|
||||
|
||||
ret <vscale x 1 x half> %a
|
||||
}
|
||||
|
||||
declare <vscale x 1 x half> @llvm.riscv.vfncvt.rod.f.f.w.nxv1f16.nxv1f32(
|
||||
<vscale x 1 x half>,
|
||||
<vscale x 1 x float>,
|
||||
iXLen);
|
||||
|
||||
define <vscale x 1 x half> @intrinsic_vfncvt_rod.f.f.w_nxv1f16_nxv1f32(<vscale x 1 x half> %0, <vscale x 1 x float> %1, iXLen %2) nounwind {
|
||||
; RV32-LABEL: intrinsic_vfncvt_rod.f.f.w_nxv1f16_nxv1f32:
|
||||
; RV32: # %bb.0: # %entry
|
||||
; RV32-NEXT: vsetvli zero, a0, e16, mf4, tu, mu
|
||||
; RV32-NEXT: vfncvt.rod.f.f.w v8, v9
|
||||
; RV32-NEXT: ret
|
||||
;
|
||||
; RV64-LABEL: intrinsic_vfncvt_rod.f.f.w_nxv1f16_nxv1f32:
|
||||
; RV64: # %bb.0: # %entry
|
||||
; RV64-NEXT: vsetvli zero, a0, e16, mf4, tu, mu
|
||||
; RV64-NEXT: vfncvt.rod.f.f.w v8, v9
|
||||
; RV64-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 1 x half> @llvm.riscv.vfncvt.rod.f.f.w.nxv1f16.nxv1f32(
|
||||
<vscale x 1 x half> %0,
|
||||
<vscale x 1 x float> %1,
|
||||
iXLen %2)
|
||||
|
||||
ret <vscale x 1 x half> %a
|
||||
}
|
||||
|
||||
declare <vscale x 1 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.nxv1i8.nxv1f16(
|
||||
<vscale x 1 x i8>,
|
||||
<vscale x 1 x half>,
|
||||
iXLen);
|
||||
|
||||
define <vscale x 1 x i8> @intrinsic_vfncvt_rtz.x.f.w_nxv1i8_nxv1f16(<vscale x 1 x i8> %0, <vscale x 1 x half> %1, iXLen %2) nounwind {
|
||||
; RV32-LABEL: intrinsic_vfncvt_rtz.x.f.w_nxv1i8_nxv1f16:
|
||||
; RV32: # %bb.0: # %entry
|
||||
; RV32-NEXT: vsetvli zero, a0, e8, mf8, tu, mu
|
||||
; RV32-NEXT: vfncvt.rtz.x.f.w v8, v9
|
||||
; RV32-NEXT: ret
|
||||
;
|
||||
; RV64-LABEL: intrinsic_vfncvt_rtz.x.f.w_nxv1i8_nxv1f16:
|
||||
; RV64: # %bb.0: # %entry
|
||||
; RV64-NEXT: vsetvli zero, a0, e8, mf8, tu, mu
|
||||
; RV64-NEXT: vfncvt.rtz.x.f.w v8, v9
|
||||
; RV64-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 1 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.nxv1i8.nxv1f16(
|
||||
<vscale x 1 x i8> %0,
|
||||
<vscale x 1 x half> %1,
|
||||
iXLen %2)
|
||||
|
||||
ret <vscale x 1 x i8> %a
|
||||
}
|
||||
|
||||
declare <vscale x 1 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv1i8.nxv1f16(
|
||||
<vscale x 1 x i8>,
|
||||
<vscale x 1 x half>,
|
||||
iXLen);
|
||||
|
||||
define <vscale x 1 x i8> @intrinsic_vfncvt_rtz.xu.f.w_nxv1i8_nxv1f16(<vscale x 1 x i8> %0, <vscale x 1 x half> %1, iXLen %2) nounwind {
|
||||
; RV32-LABEL: intrinsic_vfncvt_rtz.xu.f.w_nxv1i8_nxv1f16:
|
||||
; RV32: # %bb.0: # %entry
|
||||
; RV32-NEXT: vsetvli zero, a0, e8, mf8, tu, mu
|
||||
; RV32-NEXT: vfncvt.rtz.xu.f.w v8, v9
|
||||
; RV32-NEXT: ret
|
||||
;
|
||||
; RV64-LABEL: intrinsic_vfncvt_rtz.xu.f.w_nxv1i8_nxv1f16:
|
||||
; RV64: # %bb.0: # %entry
|
||||
; RV64-NEXT: vsetvli zero, a0, e8, mf8, tu, mu
|
||||
; RV64-NEXT: vfncvt.rtz.xu.f.w v8, v9
|
||||
; RV64-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 1 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv1i8.nxv1f16(
|
||||
<vscale x 1 x i8> %0,
|
||||
<vscale x 1 x half> %1,
|
||||
iXLen %2)
|
||||
|
||||
ret <vscale x 1 x i8> %a
|
||||
}
|
||||
|
||||
declare <vscale x 1 x i8> @llvm.riscv.vfncvt.x.f.w.nxv1i8.nxv1f16(
|
||||
<vscale x 1 x i8>,
|
||||
<vscale x 1 x half>,
|
||||
iXLen);
|
||||
|
||||
define <vscale x 1 x i8> @intrinsic_vfncvt_x.f.w_nxv1i8_nxv1f16(<vscale x 1 x i8> %0, <vscale x 1 x half> %1, iXLen %2) nounwind {
|
||||
; RV32-LABEL: intrinsic_vfncvt_x.f.w_nxv1i8_nxv1f16:
|
||||
; RV32: # %bb.0: # %entry
|
||||
; RV32-NEXT: vsetvli zero, a0, e8, mf8, tu, mu
|
||||
; RV32-NEXT: vfncvt.x.f.w v8, v9
|
||||
; RV32-NEXT: ret
|
||||
;
|
||||
; RV64-LABEL: intrinsic_vfncvt_x.f.w_nxv1i8_nxv1f16:
|
||||
; RV64: # %bb.0: # %entry
|
||||
; RV64-NEXT: vsetvli zero, a0, e8, mf8, tu, mu
|
||||
; RV64-NEXT: vfncvt.x.f.w v8, v9
|
||||
; RV64-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 1 x i8> @llvm.riscv.vfncvt.x.f.w.nxv1i8.nxv1f16(
|
||||
<vscale x 1 x i8> %0,
|
||||
<vscale x 1 x half> %1,
|
||||
iXLen %2)
|
||||
|
||||
ret <vscale x 1 x i8> %a
|
||||
}
|
||||
|
||||
declare <vscale x 1 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv1i8.nxv1f16(
|
||||
<vscale x 1 x i8>,
|
||||
<vscale x 1 x half>,
|
||||
iXLen);
|
||||
|
||||
define <vscale x 1 x i8> @intrinsic_vfncvt_xu.f.w_nxv1i8_nxv1f16(<vscale x 1 x i8> %0, <vscale x 1 x half> %1, iXLen %2) nounwind {
|
||||
; RV32-LABEL: intrinsic_vfncvt_xu.f.w_nxv1i8_nxv1f16:
|
||||
; RV32: # %bb.0: # %entry
|
||||
; RV32-NEXT: vsetvli zero, a0, e8, mf8, tu, mu
|
||||
; RV32-NEXT: vfncvt.xu.f.w v8, v9
|
||||
; RV32-NEXT: ret
|
||||
;
|
||||
; RV64-LABEL: intrinsic_vfncvt_xu.f.w_nxv1i8_nxv1f16:
|
||||
; RV64: # %bb.0: # %entry
|
||||
; RV64-NEXT: vsetvli zero, a0, e8, mf8, tu, mu
|
||||
; RV64-NEXT: vfncvt.xu.f.w v8, v9
|
||||
; RV64-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 1 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv1i8.nxv1f16(
|
||||
<vscale x 1 x i8> %0,
|
||||
<vscale x 1 x half> %1,
|
||||
iXLen %2)
|
||||
|
||||
ret <vscale x 1 x i8> %a
|
||||
}
|
||||
|
||||
declare <vscale x 1 x half> @llvm.riscv.vfrec7.nxv1f16(
|
||||
<vscale x 1 x half>,
|
||||
<vscale x 1 x half>,
|
||||
iXLen);
|
||||
|
||||
define <vscale x 1 x half> @intrinsic_vfrec7_v_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, iXLen %2) nounwind {
|
||||
; RV32-LABEL: intrinsic_vfrec7_v_nxv1f16_nxv1f16:
|
||||
; RV32: # %bb.0: # %entry
|
||||
; RV32-NEXT: vsetvli zero, a0, e16, mf4, tu, mu
|
||||
; RV32-NEXT: vfrec7.v v8, v9
|
||||
; RV32-NEXT: ret
|
||||
;
|
||||
; RV64-LABEL: intrinsic_vfrec7_v_nxv1f16_nxv1f16:
|
||||
; RV64: # %bb.0: # %entry
|
||||
; RV64-NEXT: vsetvli zero, a0, e16, mf4, tu, mu
|
||||
; RV64-NEXT: vfrec7.v v8, v9
|
||||
; RV64-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 1 x half> @llvm.riscv.vfrec7.nxv1f16(
|
||||
<vscale x 1 x half> %0,
|
||||
<vscale x 1 x half> %1,
|
||||
iXLen %2)
|
||||
|
||||
ret <vscale x 1 x half> %a
|
||||
}
|
||||
|
||||
declare <vscale x 1 x half> @llvm.riscv.vfrsqrt7.nxv1f16(
|
||||
<vscale x 1 x half>,
|
||||
<vscale x 1 x half>,
|
||||
iXLen);
|
||||
|
||||
define <vscale x 1 x half> @intrinsic_vfrsqrt7_v_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, iXLen %2) nounwind {
|
||||
; RV32-LABEL: intrinsic_vfrsqrt7_v_nxv1f16_nxv1f16:
|
||||
; RV32: # %bb.0: # %entry
|
||||
; RV32-NEXT: vsetvli zero, a0, e16, mf4, tu, mu
|
||||
; RV32-NEXT: vfrsqrt7.v v8, v9
|
||||
; RV32-NEXT: ret
|
||||
;
|
||||
; RV64-LABEL: intrinsic_vfrsqrt7_v_nxv1f16_nxv1f16:
|
||||
; RV64: # %bb.0: # %entry
|
||||
; RV64-NEXT: vsetvli zero, a0, e16, mf4, tu, mu
|
||||
; RV64-NEXT: vfrsqrt7.v v8, v9
|
||||
; RV64-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 1 x half> @llvm.riscv.vfrsqrt7.nxv1f16(
|
||||
<vscale x 1 x half> %0,
|
||||
<vscale x 1 x half> %1,
|
||||
iXLen %2)
|
||||
|
||||
ret <vscale x 1 x half> %a
|
||||
}
|
||||
|
||||
declare <vscale x 1 x half> @llvm.riscv.vfsqrt.nxv1f16(
|
||||
<vscale x 1 x half>,
|
||||
<vscale x 1 x half>,
|
||||
iXLen);
|
||||
|
||||
define <vscale x 1 x half> @intrinsic_vfsqrt_v_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, iXLen %2) nounwind {
|
||||
; RV32-LABEL: intrinsic_vfsqrt_v_nxv1f16_nxv1f16:
|
||||
; RV32: # %bb.0: # %entry
|
||||
; RV32-NEXT: vsetvli zero, a0, e16, mf4, tu, mu
|
||||
; RV32-NEXT: vfsqrt.v v8, v9
|
||||
; RV32-NEXT: ret
|
||||
;
|
||||
; RV64-LABEL: intrinsic_vfsqrt_v_nxv1f16_nxv1f16:
|
||||
; RV64: # %bb.0: # %entry
|
||||
; RV64-NEXT: vsetvli zero, a0, e16, mf4, tu, mu
|
||||
; RV64-NEXT: vfsqrt.v v8, v9
|
||||
; RV64-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 1 x half> @llvm.riscv.vfsqrt.nxv1f16(
|
||||
<vscale x 1 x half> %0,
|
||||
<vscale x 1 x half> %1,
|
||||
iXLen %2)
|
||||
|
||||
ret <vscale x 1 x half> %a
|
||||
}
|
||||
|
||||
declare <vscale x 1 x float> @llvm.riscv.vfwcvt.f.f.v.nxv1f32.nxv1f16(
|
||||
<vscale x 1 x float>,
|
||||
<vscale x 1 x half>,
|
||||
iXLen);
|
||||
|
||||
define <vscale x 1 x float> @intrinsic_vfwcvt_f.f.v_nxv1f32_nxv1f16(<vscale x 1 x float> %0, <vscale x 1 x half> %1, iXLen %2) nounwind {
|
||||
; RV32-LABEL: intrinsic_vfwcvt_f.f.v_nxv1f32_nxv1f16:
|
||||
; RV32: # %bb.0: # %entry
|
||||
; RV32-NEXT: vsetvli zero, a0, e16, mf4, tu, mu
|
||||
; RV32-NEXT: vfwcvt.f.f.v v8, v9
|
||||
; RV32-NEXT: ret
|
||||
;
|
||||
; RV64-LABEL: intrinsic_vfwcvt_f.f.v_nxv1f32_nxv1f16:
|
||||
; RV64: # %bb.0: # %entry
|
||||
; RV64-NEXT: vsetvli zero, a0, e16, mf4, tu, mu
|
||||
; RV64-NEXT: vfwcvt.f.f.v v8, v9
|
||||
; RV64-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 1 x float> @llvm.riscv.vfwcvt.f.f.v.nxv1f32.nxv1f16(
|
||||
<vscale x 1 x float> %0,
|
||||
<vscale x 1 x half> %1,
|
||||
iXLen %2)
|
||||
|
||||
ret <vscale x 1 x float> %a
|
||||
}
|
||||
|
||||
declare <vscale x 1 x half> @llvm.riscv.vfwcvt.f.x.v.nxv1f16.nxv1i8(
|
||||
<vscale x 1 x half>,
|
||||
<vscale x 1 x i8>,
|
||||
iXLen);
|
||||
|
||||
define <vscale x 1 x half> @intrinsic_vfwcvt_f.x.v_nxv1f16_nxv1i8(<vscale x 1 x half> %0, <vscale x 1 x i8> %1, iXLen %2) nounwind {
|
||||
; RV32-LABEL: intrinsic_vfwcvt_f.x.v_nxv1f16_nxv1i8:
|
||||
; RV32: # %bb.0: # %entry
|
||||
; RV32-NEXT: vsetvli zero, a0, e8, mf8, tu, mu
|
||||
; RV32-NEXT: vfwcvt.f.x.v v8, v9
|
||||
; RV32-NEXT: ret
|
||||
;
|
||||
; RV64-LABEL: intrinsic_vfwcvt_f.x.v_nxv1f16_nxv1i8:
|
||||
; RV64: # %bb.0: # %entry
|
||||
; RV64-NEXT: vsetvli zero, a0, e8, mf8, tu, mu
|
||||
; RV64-NEXT: vfwcvt.f.x.v v8, v9
|
||||
; RV64-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 1 x half> @llvm.riscv.vfwcvt.f.x.v.nxv1f16.nxv1i8(
|
||||
<vscale x 1 x half> %0,
|
||||
<vscale x 1 x i8> %1,
|
||||
iXLen %2)
|
||||
|
||||
ret <vscale x 1 x half> %a
|
||||
}
|
||||
|
||||
declare <vscale x 1 x half> @llvm.riscv.vfwcvt.f.xu.v.nxv1f16.nxv1i8(
|
||||
<vscale x 1 x half>,
|
||||
<vscale x 1 x i8>,
|
||||
iXLen);
|
||||
|
||||
define <vscale x 1 x half> @intrinsic_vfwcvt_f.xu.v_nxv1f16_nxv1i8(<vscale x 1 x half> %0, <vscale x 1 x i8> %1, iXLen %2) nounwind {
|
||||
; RV32-LABEL: intrinsic_vfwcvt_f.xu.v_nxv1f16_nxv1i8:
|
||||
; RV32: # %bb.0: # %entry
|
||||
; RV32-NEXT: vsetvli zero, a0, e8, mf8, tu, mu
|
||||
; RV32-NEXT: vfwcvt.f.xu.v v8, v9
|
||||
; RV32-NEXT: ret
|
||||
;
|
||||
; RV64-LABEL: intrinsic_vfwcvt_f.xu.v_nxv1f16_nxv1i8:
|
||||
; RV64: # %bb.0: # %entry
|
||||
; RV64-NEXT: vsetvli zero, a0, e8, mf8, tu, mu
|
||||
; RV64-NEXT: vfwcvt.f.xu.v v8, v9
|
||||
; RV64-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 1 x half> @llvm.riscv.vfwcvt.f.xu.v.nxv1f16.nxv1i8(
|
||||
<vscale x 1 x half> %0,
|
||||
<vscale x 1 x i8> %1,
|
||||
iXLen %2)
|
||||
|
||||
ret <vscale x 1 x half> %a
|
||||
}
|
||||
|
||||
declare <vscale x 1 x i32> @llvm.riscv.vfwcvt.rtz.x.f.v.nxv1i32.nxv1f16(
|
||||
<vscale x 1 x i32>,
|
||||
<vscale x 1 x half>,
|
||||
iXLen);
|
||||
|
||||
define <vscale x 1 x i32> @intrinsic_vfwcvt_rtz.x.f.v_nxv1i32_nxv1f16(<vscale x 1 x i32> %0, <vscale x 1 x half> %1, iXLen %2) nounwind {
|
||||
; RV32-LABEL: intrinsic_vfwcvt_rtz.x.f.v_nxv1i32_nxv1f16:
|
||||
; RV32: # %bb.0: # %entry
|
||||
; RV32-NEXT: vsetvli zero, a0, e16, mf4, tu, mu
|
||||
; RV32-NEXT: vfwcvt.rtz.x.f.v v8, v9
|
||||
; RV32-NEXT: ret
|
||||
;
|
||||
; RV64-LABEL: intrinsic_vfwcvt_rtz.x.f.v_nxv1i32_nxv1f16:
|
||||
; RV64: # %bb.0: # %entry
|
||||
; RV64-NEXT: vsetvli zero, a0, e16, mf4, tu, mu
|
||||
; RV64-NEXT: vfwcvt.rtz.x.f.v v8, v9
|
||||
; RV64-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 1 x i32> @llvm.riscv.vfwcvt.rtz.x.f.v.nxv1i32.nxv1f16(
|
||||
<vscale x 1 x i32> %0,
|
||||
<vscale x 1 x half> %1,
|
||||
iXLen %2)
|
||||
|
||||
ret <vscale x 1 x i32> %a
|
||||
}
|
||||
|
||||
declare <vscale x 1 x i32> @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv1i32.nxv1f16(
|
||||
<vscale x 1 x i32>,
|
||||
<vscale x 1 x half>,
|
||||
iXLen);
|
||||
|
||||
define <vscale x 1 x i32> @intrinsic_vfwcvt_rtz.xu.f.v_nxv1i32_nxv1f16(<vscale x 1 x i32> %0, <vscale x 1 x half> %1, iXLen %2) nounwind {
|
||||
; RV32-LABEL: intrinsic_vfwcvt_rtz.xu.f.v_nxv1i32_nxv1f16:
|
||||
; RV32: # %bb.0: # %entry
|
||||
; RV32-NEXT: vsetvli zero, a0, e16, mf4, tu, mu
|
||||
; RV32-NEXT: vfwcvt.rtz.xu.f.v v8, v9
|
||||
; RV32-NEXT: ret
|
||||
;
|
||||
; RV64-LABEL: intrinsic_vfwcvt_rtz.xu.f.v_nxv1i32_nxv1f16:
|
||||
; RV64: # %bb.0: # %entry
|
||||
; RV64-NEXT: vsetvli zero, a0, e16, mf4, tu, mu
|
||||
; RV64-NEXT: vfwcvt.rtz.xu.f.v v8, v9
|
||||
; RV64-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 1 x i32> @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv1i32.nxv1f16(
|
||||
<vscale x 1 x i32> %0,
|
||||
<vscale x 1 x half> %1,
|
||||
iXLen %2)
|
||||
|
||||
ret <vscale x 1 x i32> %a
|
||||
}
|
||||
|
||||
declare <vscale x 1 x i32> @llvm.riscv.vfwcvt.x.f.v.nxv1i32.nxv1f16(
|
||||
<vscale x 1 x i32>,
|
||||
<vscale x 1 x half>,
|
||||
iXLen);
|
||||
|
||||
define <vscale x 1 x i32> @intrinsic_vfwcvt_x.f.v_nxv1i32_nxv1f16(<vscale x 1 x i32> %0, <vscale x 1 x half> %1, iXLen %2) nounwind {
|
||||
; RV32-LABEL: intrinsic_vfwcvt_x.f.v_nxv1i32_nxv1f16:
|
||||
; RV32: # %bb.0: # %entry
|
||||
; RV32-NEXT: vsetvli zero, a0, e16, mf4, tu, mu
|
||||
; RV32-NEXT: vfwcvt.x.f.v v8, v9
|
||||
; RV32-NEXT: ret
|
||||
;
|
||||
; RV64-LABEL: intrinsic_vfwcvt_x.f.v_nxv1i32_nxv1f16:
|
||||
; RV64: # %bb.0: # %entry
|
||||
; RV64-NEXT: vsetvli zero, a0, e16, mf4, tu, mu
|
||||
; RV64-NEXT: vfwcvt.x.f.v v8, v9
|
||||
; RV64-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 1 x i32> @llvm.riscv.vfwcvt.x.f.v.nxv1i32.nxv1f16(
|
||||
<vscale x 1 x i32> %0,
|
||||
<vscale x 1 x half> %1,
|
||||
iXLen %2)
|
||||
|
||||
ret <vscale x 1 x i32> %a
|
||||
}
|
||||
|
||||
declare <vscale x 1 x i32> @llvm.riscv.vfwcvt.xu.f.v.nxv1i32.nxv1f16(
|
||||
<vscale x 1 x i32>,
|
||||
<vscale x 1 x half>,
|
||||
iXLen);
|
||||
|
||||
define <vscale x 1 x i32> @intrinsic_vfwcvt_xu.f.v_nxv1i32_nxv1f16(<vscale x 1 x i32> %0, <vscale x 1 x half> %1, iXLen %2) nounwind {
|
||||
; RV32-LABEL: intrinsic_vfwcvt_xu.f.v_nxv1i32_nxv1f16:
|
||||
; RV32: # %bb.0: # %entry
|
||||
; RV32-NEXT: vsetvli zero, a0, e16, mf4, tu, mu
|
||||
; RV32-NEXT: vfwcvt.xu.f.v v8, v9
|
||||
; RV32-NEXT: ret
|
||||
;
|
||||
; RV64-LABEL: intrinsic_vfwcvt_xu.f.v_nxv1i32_nxv1f16:
|
||||
; RV64: # %bb.0: # %entry
|
||||
; RV64-NEXT: vsetvli zero, a0, e16, mf4, tu, mu
|
||||
; RV64-NEXT: vfwcvt.xu.f.v v8, v9
|
||||
; RV64-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 1 x i32> @llvm.riscv.vfwcvt.xu.f.v.nxv1i32.nxv1f16(
|
||||
<vscale x 1 x i32> %0,
|
||||
<vscale x 1 x half> %1,
|
||||
iXLen %2)
|
||||
|
||||
ret <vscale x 1 x i32> %a
|
||||
}
|
||||
|
||||
declare <vscale x 1 x i8> @llvm.riscv.viota.nxv1i8(
|
||||
<vscale x 1 x i8>,
|
||||
<vscale x 1 x i1>,
|
||||
iXLen);
|
||||
|
||||
define <vscale x 1 x i8> @intrinsic_viota_m_nxv1i8_nxv1i1(<vscale x 1 x i8> %0, <vscale x 1 x i1> %1, iXLen %2) nounwind {
|
||||
; RV32-LABEL: intrinsic_viota_m_nxv1i8_nxv1i1:
|
||||
; RV32: # %bb.0: # %entry
|
||||
; RV32-NEXT: vsetvli zero, a0, e8, mf8, tu, mu
|
||||
; RV32-NEXT: viota.m v8, v0
|
||||
; RV32-NEXT: ret
|
||||
;
|
||||
; RV64-LABEL: intrinsic_viota_m_nxv1i8_nxv1i1:
|
||||
; RV64: # %bb.0: # %entry
|
||||
; RV64-NEXT: vsetvli zero, a0, e8, mf8, tu, mu
|
||||
; RV64-NEXT: viota.m v8, v0
|
||||
; RV64-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 1 x i8> @llvm.riscv.viota.nxv1i8(
|
||||
<vscale x 1 x i8> %0,
|
||||
<vscale x 1 x i1> %1,
|
||||
iXLen %2)
|
||||
|
||||
ret <vscale x 1 x i8> %a
|
||||
}
|
||||
|
|
|
@ -4,6 +4,7 @@
|
|||
; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \
|
||||
; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s
|
||||
declare <vscale x 1 x i16> @llvm.riscv.vfclass.nxv1i16(
|
||||
<vscale x 1 x i16>,
|
||||
<vscale x 1 x half>,
|
||||
iXLen);
|
||||
|
||||
|
@ -17,6 +18,7 @@ define <vscale x 1 x i16> @intrinsic_vfclass_v_nxv1i16_nxv1f16(
|
|||
iXLen %1) nounwind {
|
||||
entry:
|
||||
%a = call <vscale x 1 x i16> @llvm.riscv.vfclass.nxv1i16(
|
||||
<vscale x 1 x i16> undef,
|
||||
<vscale x 1 x half> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -27,7 +29,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vfclass.mask.nxv1i16(
|
|||
<vscale x 1 x i16>,
|
||||
<vscale x 1 x half>,
|
||||
<vscale x 1 x i1>,
|
||||
iXLen);
|
||||
iXLen, iXLen);
|
||||
|
||||
define <vscale x 1 x i16> @intrinsic_vfclass_mask_v_nxv1i16_nxv1f16(
|
||||
; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv1i16_nxv1f16:
|
||||
|
@ -44,12 +46,13 @@ entry:
|
|||
<vscale x 1 x i16> %0,
|
||||
<vscale x 1 x half> %1,
|
||||
<vscale x 1 x i1> %2,
|
||||
iXLen %3)
|
||||
iXLen %3, iXLen 0)
|
||||
|
||||
ret <vscale x 1 x i16> %a
|
||||
}
|
||||
|
||||
declare <vscale x 2 x i16> @llvm.riscv.vfclass.nxv2i16(
|
||||
<vscale x 2 x i16>,
|
||||
<vscale x 2 x half>,
|
||||
iXLen);
|
||||
|
||||
|
@ -63,6 +66,7 @@ define <vscale x 2 x i16> @intrinsic_vfclass_v_nxv2i16_nxv2f16(
|
|||
iXLen %1) nounwind {
|
||||
entry:
|
||||
%a = call <vscale x 2 x i16> @llvm.riscv.vfclass.nxv2i16(
|
||||
<vscale x 2 x i16> undef,
|
||||
<vscale x 2 x half> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -73,7 +77,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vfclass.mask.nxv2i16(
|
|||
<vscale x 2 x i16>,
|
||||
<vscale x 2 x half>,
|
||||
<vscale x 2 x i1>,
|
||||
iXLen);
|
||||
iXLen, iXLen);
|
||||
|
||||
define <vscale x 2 x i16> @intrinsic_vfclass_mask_v_nxv2i16_nxv2f16(
|
||||
; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv2i16_nxv2f16:
|
||||
|
@ -90,12 +94,13 @@ entry:
|
|||
<vscale x 2 x i16> %0,
|
||||
<vscale x 2 x half> %1,
|
||||
<vscale x 2 x i1> %2,
|
||||
iXLen %3)
|
||||
iXLen %3, iXLen 0)
|
||||
|
||||
ret <vscale x 2 x i16> %a
|
||||
}
|
||||
|
||||
declare <vscale x 4 x i16> @llvm.riscv.vfclass.nxv4i16(
|
||||
<vscale x 4 x i16>,
|
||||
<vscale x 4 x half>,
|
||||
iXLen);
|
||||
|
||||
|
@ -109,6 +114,7 @@ define <vscale x 4 x i16> @intrinsic_vfclass_v_nxv4i16_nxv4f16(
|
|||
iXLen %1) nounwind {
|
||||
entry:
|
||||
%a = call <vscale x 4 x i16> @llvm.riscv.vfclass.nxv4i16(
|
||||
<vscale x 4 x i16> undef,
|
||||
<vscale x 4 x half> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -119,7 +125,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vfclass.mask.nxv4i16(
|
|||
<vscale x 4 x i16>,
|
||||
<vscale x 4 x half>,
|
||||
<vscale x 4 x i1>,
|
||||
iXLen);
|
||||
iXLen, iXLen);
|
||||
|
||||
define <vscale x 4 x i16> @intrinsic_vfclass_mask_v_nxv4i16_nxv4f16(
|
||||
; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv4i16_nxv4f16:
|
||||
|
@ -136,12 +142,13 @@ entry:
|
|||
<vscale x 4 x i16> %0,
|
||||
<vscale x 4 x half> %1,
|
||||
<vscale x 4 x i1> %2,
|
||||
iXLen %3)
|
||||
iXLen %3, iXLen 0)
|
||||
|
||||
ret <vscale x 4 x i16> %a
|
||||
}
|
||||
|
||||
declare <vscale x 8 x i16> @llvm.riscv.vfclass.nxv8i16(
|
||||
<vscale x 8 x i16>,
|
||||
<vscale x 8 x half>,
|
||||
iXLen);
|
||||
|
||||
|
@ -155,6 +162,7 @@ define <vscale x 8 x i16> @intrinsic_vfclass_v_nxv8i16_nxv8f16(
|
|||
iXLen %1) nounwind {
|
||||
entry:
|
||||
%a = call <vscale x 8 x i16> @llvm.riscv.vfclass.nxv8i16(
|
||||
<vscale x 8 x i16> undef,
|
||||
<vscale x 8 x half> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -165,7 +173,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vfclass.mask.nxv8i16(
|
|||
<vscale x 8 x i16>,
|
||||
<vscale x 8 x half>,
|
||||
<vscale x 8 x i1>,
|
||||
iXLen);
|
||||
iXLen, iXLen);
|
||||
|
||||
define <vscale x 8 x i16> @intrinsic_vfclass_mask_v_nxv8i16_nxv8f16(
|
||||
; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv8i16_nxv8f16:
|
||||
|
@ -182,12 +190,13 @@ entry:
|
|||
<vscale x 8 x i16> %0,
|
||||
<vscale x 8 x half> %1,
|
||||
<vscale x 8 x i1> %2,
|
||||
iXLen %3)
|
||||
iXLen %3, iXLen 0)
|
||||
|
||||
ret <vscale x 8 x i16> %a
|
||||
}
|
||||
|
||||
declare <vscale x 16 x i16> @llvm.riscv.vfclass.nxv16i16(
|
||||
<vscale x 16 x i16>,
|
||||
<vscale x 16 x half>,
|
||||
iXLen);
|
||||
|
||||
|
@ -201,6 +210,7 @@ define <vscale x 16 x i16> @intrinsic_vfclass_v_nxv16i16_nxv16f16(
|
|||
iXLen %1) nounwind {
|
||||
entry:
|
||||
%a = call <vscale x 16 x i16> @llvm.riscv.vfclass.nxv16i16(
|
||||
<vscale x 16 x i16> undef,
|
||||
<vscale x 16 x half> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -211,7 +221,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vfclass.mask.nxv16i16(
|
|||
<vscale x 16 x i16>,
|
||||
<vscale x 16 x half>,
|
||||
<vscale x 16 x i1>,
|
||||
iXLen);
|
||||
iXLen, iXLen);
|
||||
|
||||
define <vscale x 16 x i16> @intrinsic_vfclass_mask_v_nxv16i16_nxv16f16(
|
||||
; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv16i16_nxv16f16:
|
||||
|
@ -228,12 +238,13 @@ entry:
|
|||
<vscale x 16 x i16> %0,
|
||||
<vscale x 16 x half> %1,
|
||||
<vscale x 16 x i1> %2,
|
||||
iXLen %3)
|
||||
iXLen %3, iXLen 0)
|
||||
|
||||
ret <vscale x 16 x i16> %a
|
||||
}
|
||||
|
||||
declare <vscale x 32 x i16> @llvm.riscv.vfclass.nxv32i16(
|
||||
<vscale x 32 x i16>,
|
||||
<vscale x 32 x half>,
|
||||
iXLen);
|
||||
|
||||
|
@ -247,6 +258,7 @@ define <vscale x 32 x i16> @intrinsic_vfclass_v_nxv32i16_nxv32f16(
|
|||
iXLen %1) nounwind {
|
||||
entry:
|
||||
%a = call <vscale x 32 x i16> @llvm.riscv.vfclass.nxv32i16(
|
||||
<vscale x 32 x i16> undef,
|
||||
<vscale x 32 x half> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -257,7 +269,7 @@ declare <vscale x 32 x i16> @llvm.riscv.vfclass.mask.nxv32i16(
|
|||
<vscale x 32 x i16>,
|
||||
<vscale x 32 x half>,
|
||||
<vscale x 32 x i1>,
|
||||
iXLen);
|
||||
iXLen, iXLen);
|
||||
|
||||
define <vscale x 32 x i16> @intrinsic_vfclass_mask_v_nxv32i16_nxv32f16(
|
||||
; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv32i16_nxv32f16:
|
||||
|
@ -274,12 +286,13 @@ entry:
|
|||
<vscale x 32 x i16> %0,
|
||||
<vscale x 32 x half> %1,
|
||||
<vscale x 32 x i1> %2,
|
||||
iXLen %3)
|
||||
iXLen %3, iXLen 0)
|
||||
|
||||
ret <vscale x 32 x i16> %a
|
||||
}
|
||||
|
||||
declare <vscale x 1 x i32> @llvm.riscv.vfclass.nxv1i32(
|
||||
<vscale x 1 x i32>,
|
||||
<vscale x 1 x float>,
|
||||
iXLen);
|
||||
|
||||
|
@ -293,6 +306,7 @@ define <vscale x 1 x i32> @intrinsic_vfclass_v_nxv1i32_nxv1f32(
|
|||
iXLen %1) nounwind {
|
||||
entry:
|
||||
%a = call <vscale x 1 x i32> @llvm.riscv.vfclass.nxv1i32(
|
||||
<vscale x 1 x i32> undef,
|
||||
<vscale x 1 x float> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -303,7 +317,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vfclass.mask.nxv1i32(
|
|||
<vscale x 1 x i32>,
|
||||
<vscale x 1 x float>,
|
||||
<vscale x 1 x i1>,
|
||||
iXLen);
|
||||
iXLen, iXLen);
|
||||
|
||||
define <vscale x 1 x i32> @intrinsic_vfclass_mask_v_nxv1i32_nxv1f32(
|
||||
; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv1i32_nxv1f32:
|
||||
|
@ -320,12 +334,13 @@ entry:
|
|||
<vscale x 1 x i32> %0,
|
||||
<vscale x 1 x float> %1,
|
||||
<vscale x 1 x i1> %2,
|
||||
iXLen %3)
|
||||
iXLen %3, iXLen 0)
|
||||
|
||||
ret <vscale x 1 x i32> %a
|
||||
}
|
||||
|
||||
declare <vscale x 2 x i32> @llvm.riscv.vfclass.nxv2i32(
|
||||
<vscale x 2 x i32>,
|
||||
<vscale x 2 x float>,
|
||||
iXLen);
|
||||
|
||||
|
@ -339,6 +354,7 @@ define <vscale x 2 x i32> @intrinsic_vfclass_v_nxv2i32_nxv2f32(
|
|||
iXLen %1) nounwind {
|
||||
entry:
|
||||
%a = call <vscale x 2 x i32> @llvm.riscv.vfclass.nxv2i32(
|
||||
<vscale x 2 x i32> undef,
|
||||
<vscale x 2 x float> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -349,7 +365,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vfclass.mask.nxv2i32(
|
|||
<vscale x 2 x i32>,
|
||||
<vscale x 2 x float>,
|
||||
<vscale x 2 x i1>,
|
||||
iXLen);
|
||||
iXLen, iXLen);
|
||||
|
||||
define <vscale x 2 x i32> @intrinsic_vfclass_mask_v_nxv2i32_nxv2f32(
|
||||
; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv2i32_nxv2f32:
|
||||
|
@ -366,12 +382,13 @@ entry:
|
|||
<vscale x 2 x i32> %0,
|
||||
<vscale x 2 x float> %1,
|
||||
<vscale x 2 x i1> %2,
|
||||
iXLen %3)
|
||||
iXLen %3, iXLen 0)
|
||||
|
||||
ret <vscale x 2 x i32> %a
|
||||
}
|
||||
|
||||
declare <vscale x 4 x i32> @llvm.riscv.vfclass.nxv4i32(
|
||||
<vscale x 4 x i32>,
|
||||
<vscale x 4 x float>,
|
||||
iXLen);
|
||||
|
||||
|
@ -385,6 +402,7 @@ define <vscale x 4 x i32> @intrinsic_vfclass_v_nxv4i32_nxv4f32(
|
|||
iXLen %1) nounwind {
|
||||
entry:
|
||||
%a = call <vscale x 4 x i32> @llvm.riscv.vfclass.nxv4i32(
|
||||
<vscale x 4 x i32> undef,
|
||||
<vscale x 4 x float> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -395,7 +413,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vfclass.mask.nxv4i32(
|
|||
<vscale x 4 x i32>,
|
||||
<vscale x 4 x float>,
|
||||
<vscale x 4 x i1>,
|
||||
iXLen);
|
||||
iXLen, iXLen);
|
||||
|
||||
define <vscale x 4 x i32> @intrinsic_vfclass_mask_v_nxv4i32_nxv4f32(
|
||||
; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv4i32_nxv4f32:
|
||||
|
@ -412,12 +430,13 @@ entry:
|
|||
<vscale x 4 x i32> %0,
|
||||
<vscale x 4 x float> %1,
|
||||
<vscale x 4 x i1> %2,
|
||||
iXLen %3)
|
||||
iXLen %3, iXLen 0)
|
||||
|
||||
ret <vscale x 4 x i32> %a
|
||||
}
|
||||
|
||||
declare <vscale x 8 x i32> @llvm.riscv.vfclass.nxv8i32(
|
||||
<vscale x 8 x i32>,
|
||||
<vscale x 8 x float>,
|
||||
iXLen);
|
||||
|
||||
|
@ -431,6 +450,7 @@ define <vscale x 8 x i32> @intrinsic_vfclass_v_nxv8i32_nxv8f32(
|
|||
iXLen %1) nounwind {
|
||||
entry:
|
||||
%a = call <vscale x 8 x i32> @llvm.riscv.vfclass.nxv8i32(
|
||||
<vscale x 8 x i32> undef,
|
||||
<vscale x 8 x float> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -441,7 +461,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vfclass.mask.nxv8i32(
|
|||
<vscale x 8 x i32>,
|
||||
<vscale x 8 x float>,
|
||||
<vscale x 8 x i1>,
|
||||
iXLen);
|
||||
iXLen, iXLen);
|
||||
|
||||
define <vscale x 8 x i32> @intrinsic_vfclass_mask_v_nxv8i32_nxv8f32(
|
||||
; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv8i32_nxv8f32:
|
||||
|
@ -458,12 +478,13 @@ entry:
|
|||
<vscale x 8 x i32> %0,
|
||||
<vscale x 8 x float> %1,
|
||||
<vscale x 8 x i1> %2,
|
||||
iXLen %3)
|
||||
iXLen %3, iXLen 0)
|
||||
|
||||
ret <vscale x 8 x i32> %a
|
||||
}
|
||||
|
||||
declare <vscale x 16 x i32> @llvm.riscv.vfclass.nxv16i32(
|
||||
<vscale x 16 x i32>,
|
||||
<vscale x 16 x float>,
|
||||
iXLen);
|
||||
|
||||
|
@ -477,6 +498,7 @@ define <vscale x 16 x i32> @intrinsic_vfclass_v_nxv16i32_nxv16f32(
|
|||
iXLen %1) nounwind {
|
||||
entry:
|
||||
%a = call <vscale x 16 x i32> @llvm.riscv.vfclass.nxv16i32(
|
||||
<vscale x 16 x i32> undef,
|
||||
<vscale x 16 x float> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -487,7 +509,7 @@ declare <vscale x 16 x i32> @llvm.riscv.vfclass.mask.nxv16i32(
|
|||
<vscale x 16 x i32>,
|
||||
<vscale x 16 x float>,
|
||||
<vscale x 16 x i1>,
|
||||
iXLen);
|
||||
iXLen, iXLen);
|
||||
|
||||
define <vscale x 16 x i32> @intrinsic_vfclass_mask_v_nxv16i32_nxv16f32(
|
||||
; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv16i32_nxv16f32:
|
||||
|
@ -504,12 +526,13 @@ entry:
|
|||
<vscale x 16 x i32> %0,
|
||||
<vscale x 16 x float> %1,
|
||||
<vscale x 16 x i1> %2,
|
||||
iXLen %3)
|
||||
iXLen %3, iXLen 0)
|
||||
|
||||
ret <vscale x 16 x i32> %a
|
||||
}
|
||||
|
||||
declare <vscale x 1 x i64> @llvm.riscv.vfclass.nxv1i64(
|
||||
<vscale x 1 x i64>,
|
||||
<vscale x 1 x double>,
|
||||
iXLen);
|
||||
|
||||
|
@ -523,6 +546,7 @@ define <vscale x 1 x i64> @intrinsic_vfclass_v_nxv1i64_nxv1f64(
|
|||
iXLen %1) nounwind {
|
||||
entry:
|
||||
%a = call <vscale x 1 x i64> @llvm.riscv.vfclass.nxv1i64(
|
||||
<vscale x 1 x i64> undef,
|
||||
<vscale x 1 x double> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -533,7 +557,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vfclass.mask.nxv1i64(
|
|||
<vscale x 1 x i64>,
|
||||
<vscale x 1 x double>,
|
||||
<vscale x 1 x i1>,
|
||||
iXLen);
|
||||
iXLen, iXLen);
|
||||
|
||||
define <vscale x 1 x i64> @intrinsic_vfclass_mask_v_nxv1i64_nxv1f64(
|
||||
; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv1i64_nxv1f64:
|
||||
|
@ -550,12 +574,13 @@ entry:
|
|||
<vscale x 1 x i64> %0,
|
||||
<vscale x 1 x double> %1,
|
||||
<vscale x 1 x i1> %2,
|
||||
iXLen %3)
|
||||
iXLen %3, iXLen 0)
|
||||
|
||||
ret <vscale x 1 x i64> %a
|
||||
}
|
||||
|
||||
declare <vscale x 2 x i64> @llvm.riscv.vfclass.nxv2i64(
|
||||
<vscale x 2 x i64>,
|
||||
<vscale x 2 x double>,
|
||||
iXLen);
|
||||
|
||||
|
@ -569,6 +594,7 @@ define <vscale x 2 x i64> @intrinsic_vfclass_v_nxv2i64_nxv2f64(
|
|||
iXLen %1) nounwind {
|
||||
entry:
|
||||
%a = call <vscale x 2 x i64> @llvm.riscv.vfclass.nxv2i64(
|
||||
<vscale x 2 x i64> undef,
|
||||
<vscale x 2 x double> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -579,7 +605,7 @@ declare <vscale x 2 x i64> @llvm.riscv.vfclass.mask.nxv2i64(
|
|||
<vscale x 2 x i64>,
|
||||
<vscale x 2 x double>,
|
||||
<vscale x 2 x i1>,
|
||||
iXLen);
|
||||
iXLen, iXLen);
|
||||
|
||||
define <vscale x 2 x i64> @intrinsic_vfclass_mask_v_nxv2i64_nxv2f64(
|
||||
; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv2i64_nxv2f64:
|
||||
|
@ -596,12 +622,13 @@ entry:
|
|||
<vscale x 2 x i64> %0,
|
||||
<vscale x 2 x double> %1,
|
||||
<vscale x 2 x i1> %2,
|
||||
iXLen %3)
|
||||
iXLen %3, iXLen 0)
|
||||
|
||||
ret <vscale x 2 x i64> %a
|
||||
}
|
||||
|
||||
declare <vscale x 4 x i64> @llvm.riscv.vfclass.nxv4i64(
|
||||
<vscale x 4 x i64>,
|
||||
<vscale x 4 x double>,
|
||||
iXLen);
|
||||
|
||||
|
@ -615,6 +642,7 @@ define <vscale x 4 x i64> @intrinsic_vfclass_v_nxv4i64_nxv4f64(
|
|||
iXLen %1) nounwind {
|
||||
entry:
|
||||
%a = call <vscale x 4 x i64> @llvm.riscv.vfclass.nxv4i64(
|
||||
<vscale x 4 x i64> undef,
|
||||
<vscale x 4 x double> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -625,7 +653,7 @@ declare <vscale x 4 x i64> @llvm.riscv.vfclass.mask.nxv4i64(
|
|||
<vscale x 4 x i64>,
|
||||
<vscale x 4 x double>,
|
||||
<vscale x 4 x i1>,
|
||||
iXLen);
|
||||
iXLen, iXLen);
|
||||
|
||||
define <vscale x 4 x i64> @intrinsic_vfclass_mask_v_nxv4i64_nxv4f64(
|
||||
; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv4i64_nxv4f64:
|
||||
|
@ -642,12 +670,13 @@ entry:
|
|||
<vscale x 4 x i64> %0,
|
||||
<vscale x 4 x double> %1,
|
||||
<vscale x 4 x i1> %2,
|
||||
iXLen %3)
|
||||
iXLen %3, iXLen 0)
|
||||
|
||||
ret <vscale x 4 x i64> %a
|
||||
}
|
||||
|
||||
declare <vscale x 8 x i64> @llvm.riscv.vfclass.nxv8i64(
|
||||
<vscale x 8 x i64>,
|
||||
<vscale x 8 x double>,
|
||||
iXLen);
|
||||
|
||||
|
@ -661,6 +690,7 @@ define <vscale x 8 x i64> @intrinsic_vfclass_v_nxv8i64_nxv8f64(
|
|||
iXLen %1) nounwind {
|
||||
entry:
|
||||
%a = call <vscale x 8 x i64> @llvm.riscv.vfclass.nxv8i64(
|
||||
<vscale x 8 x i64> undef,
|
||||
<vscale x 8 x double> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -671,7 +701,7 @@ declare <vscale x 8 x i64> @llvm.riscv.vfclass.mask.nxv8i64(
|
|||
<vscale x 8 x i64>,
|
||||
<vscale x 8 x double>,
|
||||
<vscale x 8 x i1>,
|
||||
iXLen);
|
||||
iXLen, iXLen);
|
||||
|
||||
define <vscale x 8 x i64> @intrinsic_vfclass_mask_v_nxv8i64_nxv8f64(
|
||||
; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv8i64_nxv8f64:
|
||||
|
@ -688,7 +718,7 @@ entry:
|
|||
<vscale x 8 x i64> %0,
|
||||
<vscale x 8 x double> %1,
|
||||
<vscale x 8 x i1> %2,
|
||||
iXLen %3)
|
||||
iXLen %3, iXLen 0)
|
||||
|
||||
ret <vscale x 8 x i64> %a
|
||||
}
|
||||
|
|
|
@ -4,6 +4,7 @@
|
|||
; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \
|
||||
; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s
|
||||
declare <vscale x 1 x half> @llvm.riscv.vfcvt.f.x.v.nxv1f16.nxv1i16(
|
||||
<vscale x 1 x half>,
|
||||
<vscale x 1 x i16>,
|
||||
iXLen);
|
||||
|
||||
|
@ -15,6 +16,7 @@ define <vscale x 1 x half> @intrinsic_vfcvt_f.x.v_nxv1f16_nxv1i16(<vscale x 1 x
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 1 x half> @llvm.riscv.vfcvt.f.x.v.nxv1f16.nxv1i16(
|
||||
<vscale x 1 x half> undef,
|
||||
<vscale x 1 x i16> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -45,6 +47,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 2 x half> @llvm.riscv.vfcvt.f.x.v.nxv2f16.nxv2i16(
|
||||
<vscale x 2 x half>,
|
||||
<vscale x 2 x i16>,
|
||||
iXLen);
|
||||
|
||||
|
@ -56,6 +59,7 @@ define <vscale x 2 x half> @intrinsic_vfcvt_f.x.v_nxv2f16_nxv2i16(<vscale x 2 x
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 2 x half> @llvm.riscv.vfcvt.f.x.v.nxv2f16.nxv2i16(
|
||||
<vscale x 2 x half> undef,
|
||||
<vscale x 2 x i16> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -86,6 +90,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 4 x half> @llvm.riscv.vfcvt.f.x.v.nxv4f16.nxv4i16(
|
||||
<vscale x 4 x half>,
|
||||
<vscale x 4 x i16>,
|
||||
iXLen);
|
||||
|
||||
|
@ -97,6 +102,7 @@ define <vscale x 4 x half> @intrinsic_vfcvt_f.x.v_nxv4f16_nxv4i16(<vscale x 4 x
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 4 x half> @llvm.riscv.vfcvt.f.x.v.nxv4f16.nxv4i16(
|
||||
<vscale x 4 x half> undef,
|
||||
<vscale x 4 x i16> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -127,6 +133,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 8 x half> @llvm.riscv.vfcvt.f.x.v.nxv8f16.nxv8i16(
|
||||
<vscale x 8 x half>,
|
||||
<vscale x 8 x i16>,
|
||||
iXLen);
|
||||
|
||||
|
@ -138,6 +145,7 @@ define <vscale x 8 x half> @intrinsic_vfcvt_f.x.v_nxv8f16_nxv8i16(<vscale x 8 x
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 8 x half> @llvm.riscv.vfcvt.f.x.v.nxv8f16.nxv8i16(
|
||||
<vscale x 8 x half> undef,
|
||||
<vscale x 8 x i16> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -168,6 +176,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 16 x half> @llvm.riscv.vfcvt.f.x.v.nxv16f16.nxv16i16(
|
||||
<vscale x 16 x half>,
|
||||
<vscale x 16 x i16>,
|
||||
iXLen);
|
||||
|
||||
|
@ -179,6 +188,7 @@ define <vscale x 16 x half> @intrinsic_vfcvt_f.x.v_nxv16f16_nxv16i16(<vscale x 1
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 16 x half> @llvm.riscv.vfcvt.f.x.v.nxv16f16.nxv16i16(
|
||||
<vscale x 16 x half> undef,
|
||||
<vscale x 16 x i16> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -209,6 +219,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 32 x half> @llvm.riscv.vfcvt.f.x.v.nxv32f16.nxv32i16(
|
||||
<vscale x 32 x half>,
|
||||
<vscale x 32 x i16>,
|
||||
iXLen);
|
||||
|
||||
|
@ -220,6 +231,7 @@ define <vscale x 32 x half> @intrinsic_vfcvt_f.x.v_nxv32f16_nxv32i16(<vscale x 3
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 32 x half> @llvm.riscv.vfcvt.f.x.v.nxv32f16.nxv32i16(
|
||||
<vscale x 32 x half> undef,
|
||||
<vscale x 32 x i16> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -250,6 +262,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 1 x float> @llvm.riscv.vfcvt.f.x.v.nxv1f32.nxv1i32(
|
||||
<vscale x 1 x float>,
|
||||
<vscale x 1 x i32>,
|
||||
iXLen);
|
||||
|
||||
|
@ -261,6 +274,7 @@ define <vscale x 1 x float> @intrinsic_vfcvt_f.x.v_nxv1f32_nxv1i32(<vscale x 1 x
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 1 x float> @llvm.riscv.vfcvt.f.x.v.nxv1f32.nxv1i32(
|
||||
<vscale x 1 x float> undef,
|
||||
<vscale x 1 x i32> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -291,6 +305,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 2 x float> @llvm.riscv.vfcvt.f.x.v.nxv2f32.nxv2i32(
|
||||
<vscale x 2 x float>,
|
||||
<vscale x 2 x i32>,
|
||||
iXLen);
|
||||
|
||||
|
@ -302,6 +317,7 @@ define <vscale x 2 x float> @intrinsic_vfcvt_f.x.v_nxv2f32_nxv2i32(<vscale x 2 x
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 2 x float> @llvm.riscv.vfcvt.f.x.v.nxv2f32.nxv2i32(
|
||||
<vscale x 2 x float> undef,
|
||||
<vscale x 2 x i32> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -332,6 +348,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 4 x float> @llvm.riscv.vfcvt.f.x.v.nxv4f32.nxv4i32(
|
||||
<vscale x 4 x float>,
|
||||
<vscale x 4 x i32>,
|
||||
iXLen);
|
||||
|
||||
|
@ -343,6 +360,7 @@ define <vscale x 4 x float> @intrinsic_vfcvt_f.x.v_nxv4f32_nxv4i32(<vscale x 4 x
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 4 x float> @llvm.riscv.vfcvt.f.x.v.nxv4f32.nxv4i32(
|
||||
<vscale x 4 x float> undef,
|
||||
<vscale x 4 x i32> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -373,6 +391,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 8 x float> @llvm.riscv.vfcvt.f.x.v.nxv8f32.nxv8i32(
|
||||
<vscale x 8 x float>,
|
||||
<vscale x 8 x i32>,
|
||||
iXLen);
|
||||
|
||||
|
@ -384,6 +403,7 @@ define <vscale x 8 x float> @intrinsic_vfcvt_f.x.v_nxv8f32_nxv8i32(<vscale x 8 x
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 8 x float> @llvm.riscv.vfcvt.f.x.v.nxv8f32.nxv8i32(
|
||||
<vscale x 8 x float> undef,
|
||||
<vscale x 8 x i32> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -414,6 +434,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 16 x float> @llvm.riscv.vfcvt.f.x.v.nxv16f32.nxv16i32(
|
||||
<vscale x 16 x float>,
|
||||
<vscale x 16 x i32>,
|
||||
iXLen);
|
||||
|
||||
|
@ -425,6 +446,7 @@ define <vscale x 16 x float> @intrinsic_vfcvt_f.x.v_nxv16f32_nxv16i32(<vscale x
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 16 x float> @llvm.riscv.vfcvt.f.x.v.nxv16f32.nxv16i32(
|
||||
<vscale x 16 x float> undef,
|
||||
<vscale x 16 x i32> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -455,6 +477,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 1 x double> @llvm.riscv.vfcvt.f.x.v.nxv1f64.nxv1i64(
|
||||
<vscale x 1 x double>,
|
||||
<vscale x 1 x i64>,
|
||||
iXLen);
|
||||
|
||||
|
@ -466,6 +489,7 @@ define <vscale x 1 x double> @intrinsic_vfcvt_f.x.v_nxv1f64_nxv1i64(<vscale x 1
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 1 x double> @llvm.riscv.vfcvt.f.x.v.nxv1f64.nxv1i64(
|
||||
<vscale x 1 x double> undef,
|
||||
<vscale x 1 x i64> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -496,6 +520,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 2 x double> @llvm.riscv.vfcvt.f.x.v.nxv2f64.nxv2i64(
|
||||
<vscale x 2 x double>,
|
||||
<vscale x 2 x i64>,
|
||||
iXLen);
|
||||
|
||||
|
@ -507,6 +532,7 @@ define <vscale x 2 x double> @intrinsic_vfcvt_f.x.v_nxv2f64_nxv2i64(<vscale x 2
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 2 x double> @llvm.riscv.vfcvt.f.x.v.nxv2f64.nxv2i64(
|
||||
<vscale x 2 x double> undef,
|
||||
<vscale x 2 x i64> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -537,6 +563,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 4 x double> @llvm.riscv.vfcvt.f.x.v.nxv4f64.nxv4i64(
|
||||
<vscale x 4 x double>,
|
||||
<vscale x 4 x i64>,
|
||||
iXLen);
|
||||
|
||||
|
@ -548,6 +575,7 @@ define <vscale x 4 x double> @intrinsic_vfcvt_f.x.v_nxv4f64_nxv4i64(<vscale x 4
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 4 x double> @llvm.riscv.vfcvt.f.x.v.nxv4f64.nxv4i64(
|
||||
<vscale x 4 x double> undef,
|
||||
<vscale x 4 x i64> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -578,6 +606,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 8 x double> @llvm.riscv.vfcvt.f.x.v.nxv8f64.nxv8i64(
|
||||
<vscale x 8 x double>,
|
||||
<vscale x 8 x i64>,
|
||||
iXLen);
|
||||
|
||||
|
@ -589,6 +618,7 @@ define <vscale x 8 x double> @intrinsic_vfcvt_f.x.v_nxv8f64_nxv8i64(<vscale x 8
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 8 x double> @llvm.riscv.vfcvt.f.x.v.nxv8f64.nxv8i64(
|
||||
<vscale x 8 x double> undef,
|
||||
<vscale x 8 x i64> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
|
|
@ -4,6 +4,7 @@
|
|||
; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \
|
||||
; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s
|
||||
declare <vscale x 1 x half> @llvm.riscv.vfcvt.f.xu.v.nxv1f16.nxv1i16(
|
||||
<vscale x 1 x half>,
|
||||
<vscale x 1 x i16>,
|
||||
iXLen);
|
||||
|
||||
|
@ -15,6 +16,7 @@ define <vscale x 1 x half> @intrinsic_vfcvt_f.xu.v_nxv1f16_nxv1i16(<vscale x 1 x
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 1 x half> @llvm.riscv.vfcvt.f.xu.v.nxv1f16.nxv1i16(
|
||||
<vscale x 1 x half> undef,
|
||||
<vscale x 1 x i16> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -45,6 +47,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 2 x half> @llvm.riscv.vfcvt.f.xu.v.nxv2f16.nxv2i16(
|
||||
<vscale x 2 x half>,
|
||||
<vscale x 2 x i16>,
|
||||
iXLen);
|
||||
|
||||
|
@ -56,6 +59,7 @@ define <vscale x 2 x half> @intrinsic_vfcvt_f.xu.v_nxv2f16_nxv2i16(<vscale x 2 x
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 2 x half> @llvm.riscv.vfcvt.f.xu.v.nxv2f16.nxv2i16(
|
||||
<vscale x 2 x half> undef,
|
||||
<vscale x 2 x i16> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -86,6 +90,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 4 x half> @llvm.riscv.vfcvt.f.xu.v.nxv4f16.nxv4i16(
|
||||
<vscale x 4 x half>,
|
||||
<vscale x 4 x i16>,
|
||||
iXLen);
|
||||
|
||||
|
@ -97,6 +102,7 @@ define <vscale x 4 x half> @intrinsic_vfcvt_f.xu.v_nxv4f16_nxv4i16(<vscale x 4 x
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 4 x half> @llvm.riscv.vfcvt.f.xu.v.nxv4f16.nxv4i16(
|
||||
<vscale x 4 x half> undef,
|
||||
<vscale x 4 x i16> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -127,6 +133,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 8 x half> @llvm.riscv.vfcvt.f.xu.v.nxv8f16.nxv8i16(
|
||||
<vscale x 8 x half>,
|
||||
<vscale x 8 x i16>,
|
||||
iXLen);
|
||||
|
||||
|
@ -138,6 +145,7 @@ define <vscale x 8 x half> @intrinsic_vfcvt_f.xu.v_nxv8f16_nxv8i16(<vscale x 8 x
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 8 x half> @llvm.riscv.vfcvt.f.xu.v.nxv8f16.nxv8i16(
|
||||
<vscale x 8 x half> undef,
|
||||
<vscale x 8 x i16> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -168,6 +176,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 16 x half> @llvm.riscv.vfcvt.f.xu.v.nxv16f16.nxv16i16(
|
||||
<vscale x 16 x half>,
|
||||
<vscale x 16 x i16>,
|
||||
iXLen);
|
||||
|
||||
|
@ -179,6 +188,7 @@ define <vscale x 16 x half> @intrinsic_vfcvt_f.xu.v_nxv16f16_nxv16i16(<vscale x
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 16 x half> @llvm.riscv.vfcvt.f.xu.v.nxv16f16.nxv16i16(
|
||||
<vscale x 16 x half> undef,
|
||||
<vscale x 16 x i16> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -209,6 +219,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 32 x half> @llvm.riscv.vfcvt.f.xu.v.nxv32f16.nxv32i16(
|
||||
<vscale x 32 x half>,
|
||||
<vscale x 32 x i16>,
|
||||
iXLen);
|
||||
|
||||
|
@ -220,6 +231,7 @@ define <vscale x 32 x half> @intrinsic_vfcvt_f.xu.v_nxv32f16_nxv32i16(<vscale x
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 32 x half> @llvm.riscv.vfcvt.f.xu.v.nxv32f16.nxv32i16(
|
||||
<vscale x 32 x half> undef,
|
||||
<vscale x 32 x i16> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -250,6 +262,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 1 x float> @llvm.riscv.vfcvt.f.xu.v.nxv1f32.nxv1i32(
|
||||
<vscale x 1 x float>,
|
||||
<vscale x 1 x i32>,
|
||||
iXLen);
|
||||
|
||||
|
@ -261,6 +274,7 @@ define <vscale x 1 x float> @intrinsic_vfcvt_f.xu.v_nxv1f32_nxv1i32(<vscale x 1
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 1 x float> @llvm.riscv.vfcvt.f.xu.v.nxv1f32.nxv1i32(
|
||||
<vscale x 1 x float> undef,
|
||||
<vscale x 1 x i32> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -291,6 +305,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 2 x float> @llvm.riscv.vfcvt.f.xu.v.nxv2f32.nxv2i32(
|
||||
<vscale x 2 x float>,
|
||||
<vscale x 2 x i32>,
|
||||
iXLen);
|
||||
|
||||
|
@ -302,6 +317,7 @@ define <vscale x 2 x float> @intrinsic_vfcvt_f.xu.v_nxv2f32_nxv2i32(<vscale x 2
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 2 x float> @llvm.riscv.vfcvt.f.xu.v.nxv2f32.nxv2i32(
|
||||
<vscale x 2 x float> undef,
|
||||
<vscale x 2 x i32> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -332,6 +348,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 4 x float> @llvm.riscv.vfcvt.f.xu.v.nxv4f32.nxv4i32(
|
||||
<vscale x 4 x float>,
|
||||
<vscale x 4 x i32>,
|
||||
iXLen);
|
||||
|
||||
|
@ -343,6 +360,7 @@ define <vscale x 4 x float> @intrinsic_vfcvt_f.xu.v_nxv4f32_nxv4i32(<vscale x 4
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 4 x float> @llvm.riscv.vfcvt.f.xu.v.nxv4f32.nxv4i32(
|
||||
<vscale x 4 x float> undef,
|
||||
<vscale x 4 x i32> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -373,6 +391,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 8 x float> @llvm.riscv.vfcvt.f.xu.v.nxv8f32.nxv8i32(
|
||||
<vscale x 8 x float>,
|
||||
<vscale x 8 x i32>,
|
||||
iXLen);
|
||||
|
||||
|
@ -384,6 +403,7 @@ define <vscale x 8 x float> @intrinsic_vfcvt_f.xu.v_nxv8f32_nxv8i32(<vscale x 8
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 8 x float> @llvm.riscv.vfcvt.f.xu.v.nxv8f32.nxv8i32(
|
||||
<vscale x 8 x float> undef,
|
||||
<vscale x 8 x i32> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -414,6 +434,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 16 x float> @llvm.riscv.vfcvt.f.xu.v.nxv16f32.nxv16i32(
|
||||
<vscale x 16 x float>,
|
||||
<vscale x 16 x i32>,
|
||||
iXLen);
|
||||
|
||||
|
@ -425,6 +446,7 @@ define <vscale x 16 x float> @intrinsic_vfcvt_f.xu.v_nxv16f32_nxv16i32(<vscale x
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 16 x float> @llvm.riscv.vfcvt.f.xu.v.nxv16f32.nxv16i32(
|
||||
<vscale x 16 x float> undef,
|
||||
<vscale x 16 x i32> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -455,6 +477,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 1 x double> @llvm.riscv.vfcvt.f.xu.v.nxv1f64.nxv1i64(
|
||||
<vscale x 1 x double>,
|
||||
<vscale x 1 x i64>,
|
||||
iXLen);
|
||||
|
||||
|
@ -466,6 +489,7 @@ define <vscale x 1 x double> @intrinsic_vfcvt_f.xu.v_nxv1f64_nxv1i64(<vscale x 1
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 1 x double> @llvm.riscv.vfcvt.f.xu.v.nxv1f64.nxv1i64(
|
||||
<vscale x 1 x double> undef,
|
||||
<vscale x 1 x i64> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -496,6 +520,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 2 x double> @llvm.riscv.vfcvt.f.xu.v.nxv2f64.nxv2i64(
|
||||
<vscale x 2 x double>,
|
||||
<vscale x 2 x i64>,
|
||||
iXLen);
|
||||
|
||||
|
@ -507,6 +532,7 @@ define <vscale x 2 x double> @intrinsic_vfcvt_f.xu.v_nxv2f64_nxv2i64(<vscale x 2
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 2 x double> @llvm.riscv.vfcvt.f.xu.v.nxv2f64.nxv2i64(
|
||||
<vscale x 2 x double> undef,
|
||||
<vscale x 2 x i64> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -537,6 +563,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 4 x double> @llvm.riscv.vfcvt.f.xu.v.nxv4f64.nxv4i64(
|
||||
<vscale x 4 x double>,
|
||||
<vscale x 4 x i64>,
|
||||
iXLen);
|
||||
|
||||
|
@ -548,6 +575,7 @@ define <vscale x 4 x double> @intrinsic_vfcvt_f.xu.v_nxv4f64_nxv4i64(<vscale x 4
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 4 x double> @llvm.riscv.vfcvt.f.xu.v.nxv4f64.nxv4i64(
|
||||
<vscale x 4 x double> undef,
|
||||
<vscale x 4 x i64> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -578,6 +606,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 8 x double> @llvm.riscv.vfcvt.f.xu.v.nxv8f64.nxv8i64(
|
||||
<vscale x 8 x double>,
|
||||
<vscale x 8 x i64>,
|
||||
iXLen);
|
||||
|
||||
|
@ -589,6 +618,7 @@ define <vscale x 8 x double> @intrinsic_vfcvt_f.xu.v_nxv8f64_nxv8i64(<vscale x 8
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 8 x double> @llvm.riscv.vfcvt.f.xu.v.nxv8f64.nxv8i64(
|
||||
<vscale x 8 x double> undef,
|
||||
<vscale x 8 x i64> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
|
|
@ -4,6 +4,7 @@
|
|||
; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \
|
||||
; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s
|
||||
declare <vscale x 1 x i16> @llvm.riscv.vfcvt.rtz.x.f.v.nxv1i16.nxv1f16(
|
||||
<vscale x 1 x i16>,
|
||||
<vscale x 1 x half>,
|
||||
iXLen);
|
||||
|
||||
|
@ -15,6 +16,7 @@ define <vscale x 1 x i16> @intrinsic_vfcvt_rtz.x.f.v_nxv1i16_nxv1f16(<vscale x 1
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 1 x i16> @llvm.riscv.vfcvt.rtz.x.f.v.nxv1i16.nxv1f16(
|
||||
<vscale x 1 x i16> undef,
|
||||
<vscale x 1 x half> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -45,6 +47,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 2 x i16> @llvm.riscv.vfcvt.rtz.x.f.v.nxv2i16.nxv2f16(
|
||||
<vscale x 2 x i16>,
|
||||
<vscale x 2 x half>,
|
||||
iXLen);
|
||||
|
||||
|
@ -56,6 +59,7 @@ define <vscale x 2 x i16> @intrinsic_vfcvt_rtz.x.f.v_nxv2i16_nxv2f16(<vscale x 2
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 2 x i16> @llvm.riscv.vfcvt.rtz.x.f.v.nxv2i16.nxv2f16(
|
||||
<vscale x 2 x i16> undef,
|
||||
<vscale x 2 x half> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -86,6 +90,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 4 x i16> @llvm.riscv.vfcvt.rtz.x.f.v.nxv4i16.nxv4f16(
|
||||
<vscale x 4 x i16>,
|
||||
<vscale x 4 x half>,
|
||||
iXLen);
|
||||
|
||||
|
@ -97,6 +102,7 @@ define <vscale x 4 x i16> @intrinsic_vfcvt_rtz.x.f.v_nxv4i16_nxv4f16(<vscale x 4
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 4 x i16> @llvm.riscv.vfcvt.rtz.x.f.v.nxv4i16.nxv4f16(
|
||||
<vscale x 4 x i16> undef,
|
||||
<vscale x 4 x half> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -127,6 +133,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 8 x i16> @llvm.riscv.vfcvt.rtz.x.f.v.nxv8i16.nxv8f16(
|
||||
<vscale x 8 x i16>,
|
||||
<vscale x 8 x half>,
|
||||
iXLen);
|
||||
|
||||
|
@ -138,6 +145,7 @@ define <vscale x 8 x i16> @intrinsic_vfcvt_rtz.x.f.v_nxv8i16_nxv8f16(<vscale x 8
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 8 x i16> @llvm.riscv.vfcvt.rtz.x.f.v.nxv8i16.nxv8f16(
|
||||
<vscale x 8 x i16> undef,
|
||||
<vscale x 8 x half> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -168,6 +176,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 16 x i16> @llvm.riscv.vfcvt.rtz.x.f.v.nxv16i16.nxv16f16(
|
||||
<vscale x 16 x i16>,
|
||||
<vscale x 16 x half>,
|
||||
iXLen);
|
||||
|
||||
|
@ -179,6 +188,7 @@ define <vscale x 16 x i16> @intrinsic_vfcvt_rtz.x.f.v_nxv16i16_nxv16f16(<vscale
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 16 x i16> @llvm.riscv.vfcvt.rtz.x.f.v.nxv16i16.nxv16f16(
|
||||
<vscale x 16 x i16> undef,
|
||||
<vscale x 16 x half> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -209,6 +219,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 32 x i16> @llvm.riscv.vfcvt.rtz.x.f.v.nxv32i16.nxv32f16(
|
||||
<vscale x 32 x i16>,
|
||||
<vscale x 32 x half>,
|
||||
iXLen);
|
||||
|
||||
|
@ -220,6 +231,7 @@ define <vscale x 32 x i16> @intrinsic_vfcvt_rtz.x.f.v_nxv32i16_nxv32f16(<vscale
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 32 x i16> @llvm.riscv.vfcvt.rtz.x.f.v.nxv32i16.nxv32f16(
|
||||
<vscale x 32 x i16> undef,
|
||||
<vscale x 32 x half> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -250,6 +262,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 1 x i32> @llvm.riscv.vfcvt.rtz.x.f.v.nxv1i32.nxv1f32(
|
||||
<vscale x 1 x i32>,
|
||||
<vscale x 1 x float>,
|
||||
iXLen);
|
||||
|
||||
|
@ -261,6 +274,7 @@ define <vscale x 1 x i32> @intrinsic_vfcvt_rtz.x.f.v_nxv1i32_nxv1f32(<vscale x 1
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 1 x i32> @llvm.riscv.vfcvt.rtz.x.f.v.nxv1i32.nxv1f32(
|
||||
<vscale x 1 x i32> undef,
|
||||
<vscale x 1 x float> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -291,6 +305,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 2 x i32> @llvm.riscv.vfcvt.rtz.x.f.v.nxv2i32.nxv2f32(
|
||||
<vscale x 2 x i32>,
|
||||
<vscale x 2 x float>,
|
||||
iXLen);
|
||||
|
||||
|
@ -302,6 +317,7 @@ define <vscale x 2 x i32> @intrinsic_vfcvt_rtz.x.f.v_nxv2i32_nxv2f32(<vscale x 2
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 2 x i32> @llvm.riscv.vfcvt.rtz.x.f.v.nxv2i32.nxv2f32(
|
||||
<vscale x 2 x i32> undef,
|
||||
<vscale x 2 x float> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -332,6 +348,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 4 x i32> @llvm.riscv.vfcvt.rtz.x.f.v.nxv4i32.nxv4f32(
|
||||
<vscale x 4 x i32>,
|
||||
<vscale x 4 x float>,
|
||||
iXLen);
|
||||
|
||||
|
@ -343,6 +360,7 @@ define <vscale x 4 x i32> @intrinsic_vfcvt_rtz.x.f.v_nxv4i32_nxv4f32(<vscale x 4
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 4 x i32> @llvm.riscv.vfcvt.rtz.x.f.v.nxv4i32.nxv4f32(
|
||||
<vscale x 4 x i32> undef,
|
||||
<vscale x 4 x float> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -373,6 +391,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 8 x i32> @llvm.riscv.vfcvt.rtz.x.f.v.nxv8i32.nxv8f32(
|
||||
<vscale x 8 x i32>,
|
||||
<vscale x 8 x float>,
|
||||
iXLen);
|
||||
|
||||
|
@ -384,6 +403,7 @@ define <vscale x 8 x i32> @intrinsic_vfcvt_rtz.x.f.v_nxv8i32_nxv8f32(<vscale x 8
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 8 x i32> @llvm.riscv.vfcvt.rtz.x.f.v.nxv8i32.nxv8f32(
|
||||
<vscale x 8 x i32> undef,
|
||||
<vscale x 8 x float> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -414,6 +434,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 16 x i32> @llvm.riscv.vfcvt.rtz.x.f.v.nxv16i32.nxv16f32(
|
||||
<vscale x 16 x i32>,
|
||||
<vscale x 16 x float>,
|
||||
iXLen);
|
||||
|
||||
|
@ -425,6 +446,7 @@ define <vscale x 16 x i32> @intrinsic_vfcvt_rtz.x.f.v_nxv16i32_nxv16f32(<vscale
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 16 x i32> @llvm.riscv.vfcvt.rtz.x.f.v.nxv16i32.nxv16f32(
|
||||
<vscale x 16 x i32> undef,
|
||||
<vscale x 16 x float> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -455,6 +477,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 1 x i64> @llvm.riscv.vfcvt.rtz.x.f.v.nxv1i64.nxv1f64(
|
||||
<vscale x 1 x i64>,
|
||||
<vscale x 1 x double>,
|
||||
iXLen);
|
||||
|
||||
|
@ -466,6 +489,7 @@ define <vscale x 1 x i64> @intrinsic_vfcvt_rtz.x.f.v_nxv1i64_nxv1f64(<vscale x 1
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 1 x i64> @llvm.riscv.vfcvt.rtz.x.f.v.nxv1i64.nxv1f64(
|
||||
<vscale x 1 x i64> undef,
|
||||
<vscale x 1 x double> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -496,6 +520,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 2 x i64> @llvm.riscv.vfcvt.rtz.x.f.v.nxv2i64.nxv2f64(
|
||||
<vscale x 2 x i64>,
|
||||
<vscale x 2 x double>,
|
||||
iXLen);
|
||||
|
||||
|
@ -507,6 +532,7 @@ define <vscale x 2 x i64> @intrinsic_vfcvt_rtz.x.f.v_nxv2i64_nxv2f64(<vscale x 2
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 2 x i64> @llvm.riscv.vfcvt.rtz.x.f.v.nxv2i64.nxv2f64(
|
||||
<vscale x 2 x i64> undef,
|
||||
<vscale x 2 x double> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -537,6 +563,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 4 x i64> @llvm.riscv.vfcvt.rtz.x.f.v.nxv4i64.nxv4f64(
|
||||
<vscale x 4 x i64>,
|
||||
<vscale x 4 x double>,
|
||||
iXLen);
|
||||
|
||||
|
@ -548,6 +575,7 @@ define <vscale x 4 x i64> @intrinsic_vfcvt_rtz.x.f.v_nxv4i64_nxv4f64(<vscale x 4
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 4 x i64> @llvm.riscv.vfcvt.rtz.x.f.v.nxv4i64.nxv4f64(
|
||||
<vscale x 4 x i64> undef,
|
||||
<vscale x 4 x double> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -578,6 +606,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 8 x i64> @llvm.riscv.vfcvt.rtz.x.f.v.nxv8i64.nxv8f64(
|
||||
<vscale x 8 x i64>,
|
||||
<vscale x 8 x double>,
|
||||
iXLen);
|
||||
|
||||
|
@ -589,6 +618,7 @@ define <vscale x 8 x i64> @intrinsic_vfcvt_rtz.x.f.v_nxv8i64_nxv8f64(<vscale x 8
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 8 x i64> @llvm.riscv.vfcvt.rtz.x.f.v.nxv8i64.nxv8f64(
|
||||
<vscale x 8 x i64> undef,
|
||||
<vscale x 8 x double> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
|
|
@ -4,6 +4,7 @@
|
|||
; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \
|
||||
; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s
|
||||
declare <vscale x 1 x i16> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv1i16.nxv1f16(
|
||||
<vscale x 1 x i16>,
|
||||
<vscale x 1 x half>,
|
||||
iXLen);
|
||||
|
||||
|
@ -15,6 +16,7 @@ define <vscale x 1 x i16> @intrinsic_vfcvt_rtz.xu.f.v_nxv1i16_nxv1f16(<vscale x
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 1 x i16> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv1i16.nxv1f16(
|
||||
<vscale x 1 x i16> undef,
|
||||
<vscale x 1 x half> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -45,6 +47,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 2 x i16> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv2i16.nxv2f16(
|
||||
<vscale x 2 x i16>,
|
||||
<vscale x 2 x half>,
|
||||
iXLen);
|
||||
|
||||
|
@ -56,6 +59,7 @@ define <vscale x 2 x i16> @intrinsic_vfcvt_rtz.xu.f.v_nxv2i16_nxv2f16(<vscale x
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 2 x i16> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv2i16.nxv2f16(
|
||||
<vscale x 2 x i16> undef,
|
||||
<vscale x 2 x half> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -86,6 +90,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 4 x i16> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv4i16.nxv4f16(
|
||||
<vscale x 4 x i16>,
|
||||
<vscale x 4 x half>,
|
||||
iXLen);
|
||||
|
||||
|
@ -97,6 +102,7 @@ define <vscale x 4 x i16> @intrinsic_vfcvt_rtz.xu.f.v_nxv4i16_nxv4f16(<vscale x
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 4 x i16> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv4i16.nxv4f16(
|
||||
<vscale x 4 x i16> undef,
|
||||
<vscale x 4 x half> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -127,6 +133,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 8 x i16> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv8i16.nxv8f16(
|
||||
<vscale x 8 x i16>,
|
||||
<vscale x 8 x half>,
|
||||
iXLen);
|
||||
|
||||
|
@ -138,6 +145,7 @@ define <vscale x 8 x i16> @intrinsic_vfcvt_rtz.xu.f.v_nxv8i16_nxv8f16(<vscale x
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 8 x i16> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv8i16.nxv8f16(
|
||||
<vscale x 8 x i16> undef,
|
||||
<vscale x 8 x half> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -168,6 +176,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 16 x i16> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv16i16.nxv16f16(
|
||||
<vscale x 16 x i16>,
|
||||
<vscale x 16 x half>,
|
||||
iXLen);
|
||||
|
||||
|
@ -179,6 +188,7 @@ define <vscale x 16 x i16> @intrinsic_vfcvt_rtz.xu.f.v_nxv16i16_nxv16f16(<vscale
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 16 x i16> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv16i16.nxv16f16(
|
||||
<vscale x 16 x i16> undef,
|
||||
<vscale x 16 x half> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -209,6 +219,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 32 x i16> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv32i16.nxv32f16(
|
||||
<vscale x 32 x i16>,
|
||||
<vscale x 32 x half>,
|
||||
iXLen);
|
||||
|
||||
|
@ -220,6 +231,7 @@ define <vscale x 32 x i16> @intrinsic_vfcvt_rtz.xu.f.v_nxv32i16_nxv32f16(<vscale
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 32 x i16> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv32i16.nxv32f16(
|
||||
<vscale x 32 x i16> undef,
|
||||
<vscale x 32 x half> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -250,6 +262,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 1 x i32> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv1i32.nxv1f32(
|
||||
<vscale x 1 x i32>,
|
||||
<vscale x 1 x float>,
|
||||
iXLen);
|
||||
|
||||
|
@ -261,6 +274,7 @@ define <vscale x 1 x i32> @intrinsic_vfcvt_rtz.xu.f.v_nxv1i32_nxv1f32(<vscale x
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 1 x i32> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv1i32.nxv1f32(
|
||||
<vscale x 1 x i32> undef,
|
||||
<vscale x 1 x float> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -291,6 +305,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 2 x i32> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv2i32.nxv2f32(
|
||||
<vscale x 2 x i32>,
|
||||
<vscale x 2 x float>,
|
||||
iXLen);
|
||||
|
||||
|
@ -302,6 +317,7 @@ define <vscale x 2 x i32> @intrinsic_vfcvt_rtz.xu.f.v_nxv2i32_nxv2f32(<vscale x
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 2 x i32> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv2i32.nxv2f32(
|
||||
<vscale x 2 x i32> undef,
|
||||
<vscale x 2 x float> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -332,6 +348,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 4 x i32> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv4i32.nxv4f32(
|
||||
<vscale x 4 x i32>,
|
||||
<vscale x 4 x float>,
|
||||
iXLen);
|
||||
|
||||
|
@ -343,6 +360,7 @@ define <vscale x 4 x i32> @intrinsic_vfcvt_rtz.xu.f.v_nxv4i32_nxv4f32(<vscale x
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 4 x i32> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv4i32.nxv4f32(
|
||||
<vscale x 4 x i32> undef,
|
||||
<vscale x 4 x float> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -373,6 +391,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 8 x i32> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv8i32.nxv8f32(
|
||||
<vscale x 8 x i32>,
|
||||
<vscale x 8 x float>,
|
||||
iXLen);
|
||||
|
||||
|
@ -384,6 +403,7 @@ define <vscale x 8 x i32> @intrinsic_vfcvt_rtz.xu.f.v_nxv8i32_nxv8f32(<vscale x
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 8 x i32> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv8i32.nxv8f32(
|
||||
<vscale x 8 x i32> undef,
|
||||
<vscale x 8 x float> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -414,6 +434,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 16 x i32> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv16i32.nxv16f32(
|
||||
<vscale x 16 x i32>,
|
||||
<vscale x 16 x float>,
|
||||
iXLen);
|
||||
|
||||
|
@ -425,6 +446,7 @@ define <vscale x 16 x i32> @intrinsic_vfcvt_rtz.xu.f.v_nxv16i32_nxv16f32(<vscale
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 16 x i32> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv16i32.nxv16f32(
|
||||
<vscale x 16 x i32> undef,
|
||||
<vscale x 16 x float> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -455,6 +477,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 1 x i64> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv1i64.nxv1f64(
|
||||
<vscale x 1 x i64>,
|
||||
<vscale x 1 x double>,
|
||||
iXLen);
|
||||
|
||||
|
@ -466,6 +489,7 @@ define <vscale x 1 x i64> @intrinsic_vfcvt_rtz.xu.f.v_nxv1i64_nxv1f64(<vscale x
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 1 x i64> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv1i64.nxv1f64(
|
||||
<vscale x 1 x i64> undef,
|
||||
<vscale x 1 x double> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -496,6 +520,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 2 x i64> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv2i64.nxv2f64(
|
||||
<vscale x 2 x i64>,
|
||||
<vscale x 2 x double>,
|
||||
iXLen);
|
||||
|
||||
|
@ -507,6 +532,7 @@ define <vscale x 2 x i64> @intrinsic_vfcvt_rtz.xu.f.v_nxv2i64_nxv2f64(<vscale x
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 2 x i64> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv2i64.nxv2f64(
|
||||
<vscale x 2 x i64> undef,
|
||||
<vscale x 2 x double> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -537,6 +563,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 4 x i64> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv4i64.nxv4f64(
|
||||
<vscale x 4 x i64>,
|
||||
<vscale x 4 x double>,
|
||||
iXLen);
|
||||
|
||||
|
@ -548,6 +575,7 @@ define <vscale x 4 x i64> @intrinsic_vfcvt_rtz.xu.f.v_nxv4i64_nxv4f64(<vscale x
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 4 x i64> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv4i64.nxv4f64(
|
||||
<vscale x 4 x i64> undef,
|
||||
<vscale x 4 x double> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -578,6 +606,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 8 x i64> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv8i64.nxv8f64(
|
||||
<vscale x 8 x i64>,
|
||||
<vscale x 8 x double>,
|
||||
iXLen);
|
||||
|
||||
|
@ -589,6 +618,7 @@ define <vscale x 8 x i64> @intrinsic_vfcvt_rtz.xu.f.v_nxv8i64_nxv8f64(<vscale x
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 8 x i64> @llvm.riscv.vfcvt.rtz.xu.f.v.nxv8i64.nxv8f64(
|
||||
<vscale x 8 x i64> undef,
|
||||
<vscale x 8 x double> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
|
|
@ -4,6 +4,7 @@
|
|||
; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \
|
||||
; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s
|
||||
declare <vscale x 1 x i16> @llvm.riscv.vfcvt.x.f.v.nxv1i16.nxv1f16(
|
||||
<vscale x 1 x i16>,
|
||||
<vscale x 1 x half>,
|
||||
iXLen);
|
||||
|
||||
|
@ -15,6 +16,7 @@ define <vscale x 1 x i16> @intrinsic_vfcvt_x.f.v_nxv1i16_nxv1f16(<vscale x 1 x h
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 1 x i16> @llvm.riscv.vfcvt.x.f.v.nxv1i16.nxv1f16(
|
||||
<vscale x 1 x i16> undef,
|
||||
<vscale x 1 x half> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -45,6 +47,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 2 x i16> @llvm.riscv.vfcvt.x.f.v.nxv2i16.nxv2f16(
|
||||
<vscale x 2 x i16>,
|
||||
<vscale x 2 x half>,
|
||||
iXLen);
|
||||
|
||||
|
@ -56,6 +59,7 @@ define <vscale x 2 x i16> @intrinsic_vfcvt_x.f.v_nxv2i16_nxv2f16(<vscale x 2 x h
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 2 x i16> @llvm.riscv.vfcvt.x.f.v.nxv2i16.nxv2f16(
|
||||
<vscale x 2 x i16> undef,
|
||||
<vscale x 2 x half> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -86,6 +90,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 4 x i16> @llvm.riscv.vfcvt.x.f.v.nxv4i16.nxv4f16(
|
||||
<vscale x 4 x i16>,
|
||||
<vscale x 4 x half>,
|
||||
iXLen);
|
||||
|
||||
|
@ -97,6 +102,7 @@ define <vscale x 4 x i16> @intrinsic_vfcvt_x.f.v_nxv4i16_nxv4f16(<vscale x 4 x h
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 4 x i16> @llvm.riscv.vfcvt.x.f.v.nxv4i16.nxv4f16(
|
||||
<vscale x 4 x i16> undef,
|
||||
<vscale x 4 x half> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -127,6 +133,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 8 x i16> @llvm.riscv.vfcvt.x.f.v.nxv8i16.nxv8f16(
|
||||
<vscale x 8 x i16>,
|
||||
<vscale x 8 x half>,
|
||||
iXLen);
|
||||
|
||||
|
@ -138,6 +145,7 @@ define <vscale x 8 x i16> @intrinsic_vfcvt_x.f.v_nxv8i16_nxv8f16(<vscale x 8 x h
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 8 x i16> @llvm.riscv.vfcvt.x.f.v.nxv8i16.nxv8f16(
|
||||
<vscale x 8 x i16> undef,
|
||||
<vscale x 8 x half> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -168,6 +176,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 16 x i16> @llvm.riscv.vfcvt.x.f.v.nxv16i16.nxv16f16(
|
||||
<vscale x 16 x i16>,
|
||||
<vscale x 16 x half>,
|
||||
iXLen);
|
||||
|
||||
|
@ -179,6 +188,7 @@ define <vscale x 16 x i16> @intrinsic_vfcvt_x.f.v_nxv16i16_nxv16f16(<vscale x 16
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 16 x i16> @llvm.riscv.vfcvt.x.f.v.nxv16i16.nxv16f16(
|
||||
<vscale x 16 x i16> undef,
|
||||
<vscale x 16 x half> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -209,6 +219,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 32 x i16> @llvm.riscv.vfcvt.x.f.v.nxv32i16.nxv32f16(
|
||||
<vscale x 32 x i16>,
|
||||
<vscale x 32 x half>,
|
||||
iXLen);
|
||||
|
||||
|
@ -220,6 +231,7 @@ define <vscale x 32 x i16> @intrinsic_vfcvt_x.f.v_nxv32i16_nxv32f16(<vscale x 32
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 32 x i16> @llvm.riscv.vfcvt.x.f.v.nxv32i16.nxv32f16(
|
||||
<vscale x 32 x i16> undef,
|
||||
<vscale x 32 x half> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -250,6 +262,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 1 x i32> @llvm.riscv.vfcvt.x.f.v.nxv1i32.nxv1f32(
|
||||
<vscale x 1 x i32>,
|
||||
<vscale x 1 x float>,
|
||||
iXLen);
|
||||
|
||||
|
@ -261,6 +274,7 @@ define <vscale x 1 x i32> @intrinsic_vfcvt_x.f.v_nxv1i32_nxv1f32(<vscale x 1 x f
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 1 x i32> @llvm.riscv.vfcvt.x.f.v.nxv1i32.nxv1f32(
|
||||
<vscale x 1 x i32> undef,
|
||||
<vscale x 1 x float> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -291,6 +305,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 2 x i32> @llvm.riscv.vfcvt.x.f.v.nxv2i32.nxv2f32(
|
||||
<vscale x 2 x i32>,
|
||||
<vscale x 2 x float>,
|
||||
iXLen);
|
||||
|
||||
|
@ -302,6 +317,7 @@ define <vscale x 2 x i32> @intrinsic_vfcvt_x.f.v_nxv2i32_nxv2f32(<vscale x 2 x f
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 2 x i32> @llvm.riscv.vfcvt.x.f.v.nxv2i32.nxv2f32(
|
||||
<vscale x 2 x i32> undef,
|
||||
<vscale x 2 x float> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -332,6 +348,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 4 x i32> @llvm.riscv.vfcvt.x.f.v.nxv4i32.nxv4f32(
|
||||
<vscale x 4 x i32>,
|
||||
<vscale x 4 x float>,
|
||||
iXLen);
|
||||
|
||||
|
@ -343,6 +360,7 @@ define <vscale x 4 x i32> @intrinsic_vfcvt_x.f.v_nxv4i32_nxv4f32(<vscale x 4 x f
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 4 x i32> @llvm.riscv.vfcvt.x.f.v.nxv4i32.nxv4f32(
|
||||
<vscale x 4 x i32> undef,
|
||||
<vscale x 4 x float> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -373,6 +391,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 8 x i32> @llvm.riscv.vfcvt.x.f.v.nxv8i32.nxv8f32(
|
||||
<vscale x 8 x i32>,
|
||||
<vscale x 8 x float>,
|
||||
iXLen);
|
||||
|
||||
|
@ -384,6 +403,7 @@ define <vscale x 8 x i32> @intrinsic_vfcvt_x.f.v_nxv8i32_nxv8f32(<vscale x 8 x f
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 8 x i32> @llvm.riscv.vfcvt.x.f.v.nxv8i32.nxv8f32(
|
||||
<vscale x 8 x i32> undef,
|
||||
<vscale x 8 x float> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -414,6 +434,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 16 x i32> @llvm.riscv.vfcvt.x.f.v.nxv16i32.nxv16f32(
|
||||
<vscale x 16 x i32>,
|
||||
<vscale x 16 x float>,
|
||||
iXLen);
|
||||
|
||||
|
@ -425,6 +446,7 @@ define <vscale x 16 x i32> @intrinsic_vfcvt_x.f.v_nxv16i32_nxv16f32(<vscale x 16
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 16 x i32> @llvm.riscv.vfcvt.x.f.v.nxv16i32.nxv16f32(
|
||||
<vscale x 16 x i32> undef,
|
||||
<vscale x 16 x float> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -455,6 +477,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 1 x i64> @llvm.riscv.vfcvt.x.f.v.nxv1i64.nxv1f64(
|
||||
<vscale x 1 x i64>,
|
||||
<vscale x 1 x double>,
|
||||
iXLen);
|
||||
|
||||
|
@ -466,6 +489,7 @@ define <vscale x 1 x i64> @intrinsic_vfcvt_x.f.v_nxv1i64_nxv1f64(<vscale x 1 x d
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 1 x i64> @llvm.riscv.vfcvt.x.f.v.nxv1i64.nxv1f64(
|
||||
<vscale x 1 x i64> undef,
|
||||
<vscale x 1 x double> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -496,6 +520,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 2 x i64> @llvm.riscv.vfcvt.x.f.v.nxv2i64.nxv2f64(
|
||||
<vscale x 2 x i64>,
|
||||
<vscale x 2 x double>,
|
||||
iXLen);
|
||||
|
||||
|
@ -507,6 +532,7 @@ define <vscale x 2 x i64> @intrinsic_vfcvt_x.f.v_nxv2i64_nxv2f64(<vscale x 2 x d
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 2 x i64> @llvm.riscv.vfcvt.x.f.v.nxv2i64.nxv2f64(
|
||||
<vscale x 2 x i64> undef,
|
||||
<vscale x 2 x double> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -537,6 +563,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 4 x i64> @llvm.riscv.vfcvt.x.f.v.nxv4i64.nxv4f64(
|
||||
<vscale x 4 x i64>,
|
||||
<vscale x 4 x double>,
|
||||
iXLen);
|
||||
|
||||
|
@ -548,6 +575,7 @@ define <vscale x 4 x i64> @intrinsic_vfcvt_x.f.v_nxv4i64_nxv4f64(<vscale x 4 x d
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 4 x i64> @llvm.riscv.vfcvt.x.f.v.nxv4i64.nxv4f64(
|
||||
<vscale x 4 x i64> undef,
|
||||
<vscale x 4 x double> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -578,6 +606,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 8 x i64> @llvm.riscv.vfcvt.x.f.v.nxv8i64.nxv8f64(
|
||||
<vscale x 8 x i64>,
|
||||
<vscale x 8 x double>,
|
||||
iXLen);
|
||||
|
||||
|
@ -589,6 +618,7 @@ define <vscale x 8 x i64> @intrinsic_vfcvt_x.f.v_nxv8i64_nxv8f64(<vscale x 8 x d
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 8 x i64> @llvm.riscv.vfcvt.x.f.v.nxv8i64.nxv8f64(
|
||||
<vscale x 8 x i64> undef,
|
||||
<vscale x 8 x double> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
|
|
@ -4,6 +4,7 @@
|
|||
; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \
|
||||
; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s
|
||||
declare <vscale x 1 x i16> @llvm.riscv.vfcvt.xu.f.v.nxv1i16.nxv1f16(
|
||||
<vscale x 1 x i16>,
|
||||
<vscale x 1 x half>,
|
||||
iXLen);
|
||||
|
||||
|
@ -15,6 +16,7 @@ define <vscale x 1 x i16> @intrinsic_vfcvt_xu.f.v_nxv1i16_nxv1f16(<vscale x 1 x
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 1 x i16> @llvm.riscv.vfcvt.xu.f.v.nxv1i16.nxv1f16(
|
||||
<vscale x 1 x i16> undef,
|
||||
<vscale x 1 x half> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -45,6 +47,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 2 x i16> @llvm.riscv.vfcvt.xu.f.v.nxv2i16.nxv2f16(
|
||||
<vscale x 2 x i16>,
|
||||
<vscale x 2 x half>,
|
||||
iXLen);
|
||||
|
||||
|
@ -56,6 +59,7 @@ define <vscale x 2 x i16> @intrinsic_vfcvt_xu.f.v_nxv2i16_nxv2f16(<vscale x 2 x
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 2 x i16> @llvm.riscv.vfcvt.xu.f.v.nxv2i16.nxv2f16(
|
||||
<vscale x 2 x i16> undef,
|
||||
<vscale x 2 x half> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -86,6 +90,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 4 x i16> @llvm.riscv.vfcvt.xu.f.v.nxv4i16.nxv4f16(
|
||||
<vscale x 4 x i16>,
|
||||
<vscale x 4 x half>,
|
||||
iXLen);
|
||||
|
||||
|
@ -97,6 +102,7 @@ define <vscale x 4 x i16> @intrinsic_vfcvt_xu.f.v_nxv4i16_nxv4f16(<vscale x 4 x
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 4 x i16> @llvm.riscv.vfcvt.xu.f.v.nxv4i16.nxv4f16(
|
||||
<vscale x 4 x i16> undef,
|
||||
<vscale x 4 x half> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -127,6 +133,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 8 x i16> @llvm.riscv.vfcvt.xu.f.v.nxv8i16.nxv8f16(
|
||||
<vscale x 8 x i16>,
|
||||
<vscale x 8 x half>,
|
||||
iXLen);
|
||||
|
||||
|
@ -138,6 +145,7 @@ define <vscale x 8 x i16> @intrinsic_vfcvt_xu.f.v_nxv8i16_nxv8f16(<vscale x 8 x
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 8 x i16> @llvm.riscv.vfcvt.xu.f.v.nxv8i16.nxv8f16(
|
||||
<vscale x 8 x i16> undef,
|
||||
<vscale x 8 x half> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -168,6 +176,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 16 x i16> @llvm.riscv.vfcvt.xu.f.v.nxv16i16.nxv16f16(
|
||||
<vscale x 16 x i16>,
|
||||
<vscale x 16 x half>,
|
||||
iXLen);
|
||||
|
||||
|
@ -179,6 +188,7 @@ define <vscale x 16 x i16> @intrinsic_vfcvt_xu.f.v_nxv16i16_nxv16f16(<vscale x 1
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 16 x i16> @llvm.riscv.vfcvt.xu.f.v.nxv16i16.nxv16f16(
|
||||
<vscale x 16 x i16> undef,
|
||||
<vscale x 16 x half> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -209,6 +219,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 32 x i16> @llvm.riscv.vfcvt.xu.f.v.nxv32i16.nxv32f16(
|
||||
<vscale x 32 x i16>,
|
||||
<vscale x 32 x half>,
|
||||
iXLen);
|
||||
|
||||
|
@ -220,6 +231,7 @@ define <vscale x 32 x i16> @intrinsic_vfcvt_xu.f.v_nxv32i16_nxv32f16(<vscale x 3
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 32 x i16> @llvm.riscv.vfcvt.xu.f.v.nxv32i16.nxv32f16(
|
||||
<vscale x 32 x i16> undef,
|
||||
<vscale x 32 x half> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -250,6 +262,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 1 x i32> @llvm.riscv.vfcvt.xu.f.v.nxv1i32.nxv1f32(
|
||||
<vscale x 1 x i32>,
|
||||
<vscale x 1 x float>,
|
||||
iXLen);
|
||||
|
||||
|
@ -261,6 +274,7 @@ define <vscale x 1 x i32> @intrinsic_vfcvt_xu.f.v_nxv1i32_nxv1f32(<vscale x 1 x
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 1 x i32> @llvm.riscv.vfcvt.xu.f.v.nxv1i32.nxv1f32(
|
||||
<vscale x 1 x i32> undef,
|
||||
<vscale x 1 x float> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -291,6 +305,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 2 x i32> @llvm.riscv.vfcvt.xu.f.v.nxv2i32.nxv2f32(
|
||||
<vscale x 2 x i32>,
|
||||
<vscale x 2 x float>,
|
||||
iXLen);
|
||||
|
||||
|
@ -302,6 +317,7 @@ define <vscale x 2 x i32> @intrinsic_vfcvt_xu.f.v_nxv2i32_nxv2f32(<vscale x 2 x
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 2 x i32> @llvm.riscv.vfcvt.xu.f.v.nxv2i32.nxv2f32(
|
||||
<vscale x 2 x i32> undef,
|
||||
<vscale x 2 x float> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -332,6 +348,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 4 x i32> @llvm.riscv.vfcvt.xu.f.v.nxv4i32.nxv4f32(
|
||||
<vscale x 4 x i32>,
|
||||
<vscale x 4 x float>,
|
||||
iXLen);
|
||||
|
||||
|
@ -343,6 +360,7 @@ define <vscale x 4 x i32> @intrinsic_vfcvt_xu.f.v_nxv4i32_nxv4f32(<vscale x 4 x
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 4 x i32> @llvm.riscv.vfcvt.xu.f.v.nxv4i32.nxv4f32(
|
||||
<vscale x 4 x i32> undef,
|
||||
<vscale x 4 x float> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -373,6 +391,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 8 x i32> @llvm.riscv.vfcvt.xu.f.v.nxv8i32.nxv8f32(
|
||||
<vscale x 8 x i32>,
|
||||
<vscale x 8 x float>,
|
||||
iXLen);
|
||||
|
||||
|
@ -384,6 +403,7 @@ define <vscale x 8 x i32> @intrinsic_vfcvt_xu.f.v_nxv8i32_nxv8f32(<vscale x 8 x
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 8 x i32> @llvm.riscv.vfcvt.xu.f.v.nxv8i32.nxv8f32(
|
||||
<vscale x 8 x i32> undef,
|
||||
<vscale x 8 x float> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -414,6 +434,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 16 x i32> @llvm.riscv.vfcvt.xu.f.v.nxv16i32.nxv16f32(
|
||||
<vscale x 16 x i32>,
|
||||
<vscale x 16 x float>,
|
||||
iXLen);
|
||||
|
||||
|
@ -425,6 +446,7 @@ define <vscale x 16 x i32> @intrinsic_vfcvt_xu.f.v_nxv16i32_nxv16f32(<vscale x 1
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 16 x i32> @llvm.riscv.vfcvt.xu.f.v.nxv16i32.nxv16f32(
|
||||
<vscale x 16 x i32> undef,
|
||||
<vscale x 16 x float> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -455,6 +477,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 1 x i64> @llvm.riscv.vfcvt.xu.f.v.nxv1i64.nxv1f64(
|
||||
<vscale x 1 x i64>,
|
||||
<vscale x 1 x double>,
|
||||
iXLen);
|
||||
|
||||
|
@ -466,6 +489,7 @@ define <vscale x 1 x i64> @intrinsic_vfcvt_xu.f.v_nxv1i64_nxv1f64(<vscale x 1 x
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 1 x i64> @llvm.riscv.vfcvt.xu.f.v.nxv1i64.nxv1f64(
|
||||
<vscale x 1 x i64> undef,
|
||||
<vscale x 1 x double> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -496,6 +520,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 2 x i64> @llvm.riscv.vfcvt.xu.f.v.nxv2i64.nxv2f64(
|
||||
<vscale x 2 x i64>,
|
||||
<vscale x 2 x double>,
|
||||
iXLen);
|
||||
|
||||
|
@ -507,6 +532,7 @@ define <vscale x 2 x i64> @intrinsic_vfcvt_xu.f.v_nxv2i64_nxv2f64(<vscale x 2 x
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 2 x i64> @llvm.riscv.vfcvt.xu.f.v.nxv2i64.nxv2f64(
|
||||
<vscale x 2 x i64> undef,
|
||||
<vscale x 2 x double> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -537,6 +563,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 4 x i64> @llvm.riscv.vfcvt.xu.f.v.nxv4i64.nxv4f64(
|
||||
<vscale x 4 x i64>,
|
||||
<vscale x 4 x double>,
|
||||
iXLen);
|
||||
|
||||
|
@ -548,6 +575,7 @@ define <vscale x 4 x i64> @intrinsic_vfcvt_xu.f.v_nxv4i64_nxv4f64(<vscale x 4 x
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 4 x i64> @llvm.riscv.vfcvt.xu.f.v.nxv4i64.nxv4f64(
|
||||
<vscale x 4 x i64> undef,
|
||||
<vscale x 4 x double> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -578,6 +606,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 8 x i64> @llvm.riscv.vfcvt.xu.f.v.nxv8i64.nxv8f64(
|
||||
<vscale x 8 x i64>,
|
||||
<vscale x 8 x double>,
|
||||
iXLen);
|
||||
|
||||
|
@ -589,6 +618,7 @@ define <vscale x 8 x i64> @intrinsic_vfcvt_xu.f.v_nxv8i64_nxv8f64(<vscale x 8 x
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 8 x i64> @llvm.riscv.vfcvt.xu.f.v.nxv8i64.nxv8f64(
|
||||
<vscale x 8 x i64> undef,
|
||||
<vscale x 8 x double> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
|
|
@ -4,6 +4,7 @@
|
|||
; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \
|
||||
; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s
|
||||
declare <vscale x 1 x half> @llvm.riscv.vfncvt.f.f.w.nxv1f16.nxv1f32(
|
||||
<vscale x 1 x half>,
|
||||
<vscale x 1 x float>,
|
||||
iXLen);
|
||||
|
||||
|
@ -16,6 +17,7 @@ define <vscale x 1 x half> @intrinsic_vfncvt_f.f.w_nxv1f16_nxv1f32(<vscale x 1 x
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 1 x half> @llvm.riscv.vfncvt.f.f.w.nxv1f16.nxv1f32(
|
||||
<vscale x 1 x half> undef,
|
||||
<vscale x 1 x float> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -46,6 +48,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 2 x half> @llvm.riscv.vfncvt.f.f.w.nxv2f16.nxv2f32(
|
||||
<vscale x 2 x half>,
|
||||
<vscale x 2 x float>,
|
||||
iXLen);
|
||||
|
||||
|
@ -58,6 +61,7 @@ define <vscale x 2 x half> @intrinsic_vfncvt_f.f.w_nxv2f16_nxv2f32(<vscale x 2 x
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 2 x half> @llvm.riscv.vfncvt.f.f.w.nxv2f16.nxv2f32(
|
||||
<vscale x 2 x half> undef,
|
||||
<vscale x 2 x float> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -88,6 +92,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 4 x half> @llvm.riscv.vfncvt.f.f.w.nxv4f16.nxv4f32(
|
||||
<vscale x 4 x half>,
|
||||
<vscale x 4 x float>,
|
||||
iXLen);
|
||||
|
||||
|
@ -100,6 +105,7 @@ define <vscale x 4 x half> @intrinsic_vfncvt_f.f.w_nxv4f16_nxv4f32(<vscale x 4 x
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 4 x half> @llvm.riscv.vfncvt.f.f.w.nxv4f16.nxv4f32(
|
||||
<vscale x 4 x half> undef,
|
||||
<vscale x 4 x float> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -130,6 +136,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 8 x half> @llvm.riscv.vfncvt.f.f.w.nxv8f16.nxv8f32(
|
||||
<vscale x 8 x half>,
|
||||
<vscale x 8 x float>,
|
||||
iXLen);
|
||||
|
||||
|
@ -142,6 +149,7 @@ define <vscale x 8 x half> @intrinsic_vfncvt_f.f.w_nxv8f16_nxv8f32(<vscale x 8 x
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 8 x half> @llvm.riscv.vfncvt.f.f.w.nxv8f16.nxv8f32(
|
||||
<vscale x 8 x half> undef,
|
||||
<vscale x 8 x float> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -172,6 +180,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 16 x half> @llvm.riscv.vfncvt.f.f.w.nxv16f16.nxv16f32(
|
||||
<vscale x 16 x half>,
|
||||
<vscale x 16 x float>,
|
||||
iXLen);
|
||||
|
||||
|
@ -184,6 +193,7 @@ define <vscale x 16 x half> @intrinsic_vfncvt_f.f.w_nxv16f16_nxv16f32(<vscale x
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 16 x half> @llvm.riscv.vfncvt.f.f.w.nxv16f16.nxv16f32(
|
||||
<vscale x 16 x half> undef,
|
||||
<vscale x 16 x float> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -214,6 +224,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 1 x float> @llvm.riscv.vfncvt.f.f.w.nxv1f32.nxv1f64(
|
||||
<vscale x 1 x float>,
|
||||
<vscale x 1 x double>,
|
||||
iXLen);
|
||||
|
||||
|
@ -226,6 +237,7 @@ define <vscale x 1 x float> @intrinsic_vfncvt_f.f.w_nxv1f32_nxv1f64(<vscale x 1
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 1 x float> @llvm.riscv.vfncvt.f.f.w.nxv1f32.nxv1f64(
|
||||
<vscale x 1 x float> undef,
|
||||
<vscale x 1 x double> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -256,6 +268,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 2 x float> @llvm.riscv.vfncvt.f.f.w.nxv2f32.nxv2f64(
|
||||
<vscale x 2 x float>,
|
||||
<vscale x 2 x double>,
|
||||
iXLen);
|
||||
|
||||
|
@ -268,6 +281,7 @@ define <vscale x 2 x float> @intrinsic_vfncvt_f.f.w_nxv2f32_nxv2f64(<vscale x 2
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 2 x float> @llvm.riscv.vfncvt.f.f.w.nxv2f32.nxv2f64(
|
||||
<vscale x 2 x float> undef,
|
||||
<vscale x 2 x double> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -298,6 +312,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 4 x float> @llvm.riscv.vfncvt.f.f.w.nxv4f32.nxv4f64(
|
||||
<vscale x 4 x float>,
|
||||
<vscale x 4 x double>,
|
||||
iXLen);
|
||||
|
||||
|
@ -310,6 +325,7 @@ define <vscale x 4 x float> @intrinsic_vfncvt_f.f.w_nxv4f32_nxv4f64(<vscale x 4
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 4 x float> @llvm.riscv.vfncvt.f.f.w.nxv4f32.nxv4f64(
|
||||
<vscale x 4 x float> undef,
|
||||
<vscale x 4 x double> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -340,6 +356,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 8 x float> @llvm.riscv.vfncvt.f.f.w.nxv8f32.nxv8f64(
|
||||
<vscale x 8 x float>,
|
||||
<vscale x 8 x double>,
|
||||
iXLen);
|
||||
|
||||
|
@ -352,6 +369,7 @@ define <vscale x 8 x float> @intrinsic_vfncvt_f.f.w_nxv8f32_nxv8f64(<vscale x 8
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 8 x float> @llvm.riscv.vfncvt.f.f.w.nxv8f32.nxv8f64(
|
||||
<vscale x 8 x float> undef,
|
||||
<vscale x 8 x double> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
|
|
@ -4,6 +4,7 @@
|
|||
; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \
|
||||
; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s
|
||||
declare <vscale x 1 x half> @llvm.riscv.vfncvt.f.x.w.nxv1f16.nxv1i32(
|
||||
<vscale x 1 x half>,
|
||||
<vscale x 1 x i32>,
|
||||
iXLen);
|
||||
|
||||
|
@ -16,6 +17,7 @@ define <vscale x 1 x half> @intrinsic_vfncvt_f.x.w_nxv1f16_nxv1i32(<vscale x 1 x
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 1 x half> @llvm.riscv.vfncvt.f.x.w.nxv1f16.nxv1i32(
|
||||
<vscale x 1 x half> undef,
|
||||
<vscale x 1 x i32> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -46,6 +48,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 2 x half> @llvm.riscv.vfncvt.f.x.w.nxv2f16.nxv2i32(
|
||||
<vscale x 2 x half>,
|
||||
<vscale x 2 x i32>,
|
||||
iXLen);
|
||||
|
||||
|
@ -58,6 +61,7 @@ define <vscale x 2 x half> @intrinsic_vfncvt_f.x.w_nxv2f16_nxv2i32(<vscale x 2 x
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 2 x half> @llvm.riscv.vfncvt.f.x.w.nxv2f16.nxv2i32(
|
||||
<vscale x 2 x half> undef,
|
||||
<vscale x 2 x i32> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -88,6 +92,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 4 x half> @llvm.riscv.vfncvt.f.x.w.nxv4f16.nxv4i32(
|
||||
<vscale x 4 x half>,
|
||||
<vscale x 4 x i32>,
|
||||
iXLen);
|
||||
|
||||
|
@ -100,6 +105,7 @@ define <vscale x 4 x half> @intrinsic_vfncvt_f.x.w_nxv4f16_nxv4i32(<vscale x 4 x
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 4 x half> @llvm.riscv.vfncvt.f.x.w.nxv4f16.nxv4i32(
|
||||
<vscale x 4 x half> undef,
|
||||
<vscale x 4 x i32> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -130,6 +136,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 8 x half> @llvm.riscv.vfncvt.f.x.w.nxv8f16.nxv8i32(
|
||||
<vscale x 8 x half>,
|
||||
<vscale x 8 x i32>,
|
||||
iXLen);
|
||||
|
||||
|
@ -142,6 +149,7 @@ define <vscale x 8 x half> @intrinsic_vfncvt_f.x.w_nxv8f16_nxv8i32(<vscale x 8 x
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 8 x half> @llvm.riscv.vfncvt.f.x.w.nxv8f16.nxv8i32(
|
||||
<vscale x 8 x half> undef,
|
||||
<vscale x 8 x i32> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -172,6 +180,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 16 x half> @llvm.riscv.vfncvt.f.x.w.nxv16f16.nxv16i32(
|
||||
<vscale x 16 x half>,
|
||||
<vscale x 16 x i32>,
|
||||
iXLen);
|
||||
|
||||
|
@ -184,6 +193,7 @@ define <vscale x 16 x half> @intrinsic_vfncvt_f.x.w_nxv16f16_nxv16i32(<vscale x
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 16 x half> @llvm.riscv.vfncvt.f.x.w.nxv16f16.nxv16i32(
|
||||
<vscale x 16 x half> undef,
|
||||
<vscale x 16 x i32> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -214,6 +224,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 1 x float> @llvm.riscv.vfncvt.f.x.w.nxv1f32.nxv1i64(
|
||||
<vscale x 1 x float>,
|
||||
<vscale x 1 x i64>,
|
||||
iXLen);
|
||||
|
||||
|
@ -226,6 +237,7 @@ define <vscale x 1 x float> @intrinsic_vfncvt_f.x.w_nxv1f32_nxv1i64(<vscale x 1
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 1 x float> @llvm.riscv.vfncvt.f.x.w.nxv1f32.nxv1i64(
|
||||
<vscale x 1 x float> undef,
|
||||
<vscale x 1 x i64> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -256,6 +268,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 2 x float> @llvm.riscv.vfncvt.f.x.w.nxv2f32.nxv2i64(
|
||||
<vscale x 2 x float>,
|
||||
<vscale x 2 x i64>,
|
||||
iXLen);
|
||||
|
||||
|
@ -268,6 +281,7 @@ define <vscale x 2 x float> @intrinsic_vfncvt_f.x.w_nxv2f32_nxv2i64(<vscale x 2
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 2 x float> @llvm.riscv.vfncvt.f.x.w.nxv2f32.nxv2i64(
|
||||
<vscale x 2 x float> undef,
|
||||
<vscale x 2 x i64> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -298,6 +312,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 4 x float> @llvm.riscv.vfncvt.f.x.w.nxv4f32.nxv4i64(
|
||||
<vscale x 4 x float>,
|
||||
<vscale x 4 x i64>,
|
||||
iXLen);
|
||||
|
||||
|
@ -310,6 +325,7 @@ define <vscale x 4 x float> @intrinsic_vfncvt_f.x.w_nxv4f32_nxv4i64(<vscale x 4
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 4 x float> @llvm.riscv.vfncvt.f.x.w.nxv4f32.nxv4i64(
|
||||
<vscale x 4 x float> undef,
|
||||
<vscale x 4 x i64> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -340,6 +356,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 8 x float> @llvm.riscv.vfncvt.f.x.w.nxv8f32.nxv8i64(
|
||||
<vscale x 8 x float>,
|
||||
<vscale x 8 x i64>,
|
||||
iXLen);
|
||||
|
||||
|
@ -352,6 +369,7 @@ define <vscale x 8 x float> @intrinsic_vfncvt_f.x.w_nxv8f32_nxv8i64(<vscale x 8
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 8 x float> @llvm.riscv.vfncvt.f.x.w.nxv8f32.nxv8i64(
|
||||
<vscale x 8 x float> undef,
|
||||
<vscale x 8 x i64> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
|
|
@ -4,6 +4,7 @@
|
|||
; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \
|
||||
; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s
|
||||
declare <vscale x 1 x half> @llvm.riscv.vfncvt.f.xu.w.nxv1f16.nxv1i32(
|
||||
<vscale x 1 x half>,
|
||||
<vscale x 1 x i32>,
|
||||
iXLen);
|
||||
|
||||
|
@ -16,6 +17,7 @@ define <vscale x 1 x half> @intrinsic_vfncvt_f.xu.w_nxv1f16_nxv1i32(<vscale x 1
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 1 x half> @llvm.riscv.vfncvt.f.xu.w.nxv1f16.nxv1i32(
|
||||
<vscale x 1 x half> undef,
|
||||
<vscale x 1 x i32> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -46,6 +48,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 2 x half> @llvm.riscv.vfncvt.f.xu.w.nxv2f16.nxv2i32(
|
||||
<vscale x 2 x half>,
|
||||
<vscale x 2 x i32>,
|
||||
iXLen);
|
||||
|
||||
|
@ -58,6 +61,7 @@ define <vscale x 2 x half> @intrinsic_vfncvt_f.xu.w_nxv2f16_nxv2i32(<vscale x 2
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 2 x half> @llvm.riscv.vfncvt.f.xu.w.nxv2f16.nxv2i32(
|
||||
<vscale x 2 x half> undef,
|
||||
<vscale x 2 x i32> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -88,6 +92,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 4 x half> @llvm.riscv.vfncvt.f.xu.w.nxv4f16.nxv4i32(
|
||||
<vscale x 4 x half>,
|
||||
<vscale x 4 x i32>,
|
||||
iXLen);
|
||||
|
||||
|
@ -100,6 +105,7 @@ define <vscale x 4 x half> @intrinsic_vfncvt_f.xu.w_nxv4f16_nxv4i32(<vscale x 4
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 4 x half> @llvm.riscv.vfncvt.f.xu.w.nxv4f16.nxv4i32(
|
||||
<vscale x 4 x half> undef,
|
||||
<vscale x 4 x i32> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -130,6 +136,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 8 x half> @llvm.riscv.vfncvt.f.xu.w.nxv8f16.nxv8i32(
|
||||
<vscale x 8 x half>,
|
||||
<vscale x 8 x i32>,
|
||||
iXLen);
|
||||
|
||||
|
@ -142,6 +149,7 @@ define <vscale x 8 x half> @intrinsic_vfncvt_f.xu.w_nxv8f16_nxv8i32(<vscale x 8
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 8 x half> @llvm.riscv.vfncvt.f.xu.w.nxv8f16.nxv8i32(
|
||||
<vscale x 8 x half> undef,
|
||||
<vscale x 8 x i32> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -172,6 +180,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 16 x half> @llvm.riscv.vfncvt.f.xu.w.nxv16f16.nxv16i32(
|
||||
<vscale x 16 x half>,
|
||||
<vscale x 16 x i32>,
|
||||
iXLen);
|
||||
|
||||
|
@ -184,6 +193,7 @@ define <vscale x 16 x half> @intrinsic_vfncvt_f.xu.w_nxv16f16_nxv16i32(<vscale x
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 16 x half> @llvm.riscv.vfncvt.f.xu.w.nxv16f16.nxv16i32(
|
||||
<vscale x 16 x half> undef,
|
||||
<vscale x 16 x i32> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -214,6 +224,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 1 x float> @llvm.riscv.vfncvt.f.xu.w.nxv1f32.nxv1i64(
|
||||
<vscale x 1 x float>,
|
||||
<vscale x 1 x i64>,
|
||||
iXLen);
|
||||
|
||||
|
@ -226,6 +237,7 @@ define <vscale x 1 x float> @intrinsic_vfncvt_f.xu.w_nxv1f32_nxv1i64(<vscale x 1
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 1 x float> @llvm.riscv.vfncvt.f.xu.w.nxv1f32.nxv1i64(
|
||||
<vscale x 1 x float> undef,
|
||||
<vscale x 1 x i64> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -256,6 +268,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 2 x float> @llvm.riscv.vfncvt.f.xu.w.nxv2f32.nxv2i64(
|
||||
<vscale x 2 x float>,
|
||||
<vscale x 2 x i64>,
|
||||
iXLen);
|
||||
|
||||
|
@ -268,6 +281,7 @@ define <vscale x 2 x float> @intrinsic_vfncvt_f.xu.w_nxv2f32_nxv2i64(<vscale x 2
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 2 x float> @llvm.riscv.vfncvt.f.xu.w.nxv2f32.nxv2i64(
|
||||
<vscale x 2 x float> undef,
|
||||
<vscale x 2 x i64> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -298,6 +312,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 4 x float> @llvm.riscv.vfncvt.f.xu.w.nxv4f32.nxv4i64(
|
||||
<vscale x 4 x float>,
|
||||
<vscale x 4 x i64>,
|
||||
iXLen);
|
||||
|
||||
|
@ -310,6 +325,7 @@ define <vscale x 4 x float> @intrinsic_vfncvt_f.xu.w_nxv4f32_nxv4i64(<vscale x 4
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 4 x float> @llvm.riscv.vfncvt.f.xu.w.nxv4f32.nxv4i64(
|
||||
<vscale x 4 x float> undef,
|
||||
<vscale x 4 x i64> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -340,6 +356,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 8 x float> @llvm.riscv.vfncvt.f.xu.w.nxv8f32.nxv8i64(
|
||||
<vscale x 8 x float>,
|
||||
<vscale x 8 x i64>,
|
||||
iXLen);
|
||||
|
||||
|
@ -352,6 +369,7 @@ define <vscale x 8 x float> @intrinsic_vfncvt_f.xu.w_nxv8f32_nxv8i64(<vscale x 8
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 8 x float> @llvm.riscv.vfncvt.f.xu.w.nxv8f32.nxv8i64(
|
||||
<vscale x 8 x float> undef,
|
||||
<vscale x 8 x i64> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
|
|
@ -4,6 +4,7 @@
|
|||
; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \
|
||||
; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s
|
||||
declare <vscale x 1 x half> @llvm.riscv.vfncvt.rod.f.f.w.nxv1f16.nxv1f32(
|
||||
<vscale x 1 x half>,
|
||||
<vscale x 1 x float>,
|
||||
iXLen);
|
||||
|
||||
|
@ -16,6 +17,7 @@ define <vscale x 1 x half> @intrinsic_vfncvt_rod.f.f.w_nxv1f16_nxv1f32(<vscale x
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 1 x half> @llvm.riscv.vfncvt.rod.f.f.w.nxv1f16.nxv1f32(
|
||||
<vscale x 1 x half> undef,
|
||||
<vscale x 1 x float> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -46,6 +48,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 2 x half> @llvm.riscv.vfncvt.rod.f.f.w.nxv2f16.nxv2f32(
|
||||
<vscale x 2 x half>,
|
||||
<vscale x 2 x float>,
|
||||
iXLen);
|
||||
|
||||
|
@ -58,6 +61,7 @@ define <vscale x 2 x half> @intrinsic_vfncvt_rod.f.f.w_nxv2f16_nxv2f32(<vscale x
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 2 x half> @llvm.riscv.vfncvt.rod.f.f.w.nxv2f16.nxv2f32(
|
||||
<vscale x 2 x half> undef,
|
||||
<vscale x 2 x float> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -88,6 +92,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 4 x half> @llvm.riscv.vfncvt.rod.f.f.w.nxv4f16.nxv4f32(
|
||||
<vscale x 4 x half>,
|
||||
<vscale x 4 x float>,
|
||||
iXLen);
|
||||
|
||||
|
@ -100,6 +105,7 @@ define <vscale x 4 x half> @intrinsic_vfncvt_rod.f.f.w_nxv4f16_nxv4f32(<vscale x
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 4 x half> @llvm.riscv.vfncvt.rod.f.f.w.nxv4f16.nxv4f32(
|
||||
<vscale x 4 x half> undef,
|
||||
<vscale x 4 x float> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -130,6 +136,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 8 x half> @llvm.riscv.vfncvt.rod.f.f.w.nxv8f16.nxv8f32(
|
||||
<vscale x 8 x half>,
|
||||
<vscale x 8 x float>,
|
||||
iXLen);
|
||||
|
||||
|
@ -142,6 +149,7 @@ define <vscale x 8 x half> @intrinsic_vfncvt_rod.f.f.w_nxv8f16_nxv8f32(<vscale x
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 8 x half> @llvm.riscv.vfncvt.rod.f.f.w.nxv8f16.nxv8f32(
|
||||
<vscale x 8 x half> undef,
|
||||
<vscale x 8 x float> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -172,6 +180,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 16 x half> @llvm.riscv.vfncvt.rod.f.f.w.nxv16f16.nxv16f32(
|
||||
<vscale x 16 x half>,
|
||||
<vscale x 16 x float>,
|
||||
iXLen);
|
||||
|
||||
|
@ -184,6 +193,7 @@ define <vscale x 16 x half> @intrinsic_vfncvt_rod.f.f.w_nxv16f16_nxv16f32(<vscal
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 16 x half> @llvm.riscv.vfncvt.rod.f.f.w.nxv16f16.nxv16f32(
|
||||
<vscale x 16 x half> undef,
|
||||
<vscale x 16 x float> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -214,6 +224,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 1 x float> @llvm.riscv.vfncvt.rod.f.f.w.nxv1f32.nxv1f64(
|
||||
<vscale x 1 x float>,
|
||||
<vscale x 1 x double>,
|
||||
iXLen);
|
||||
|
||||
|
@ -226,6 +237,7 @@ define <vscale x 1 x float> @intrinsic_vfncvt_rod.f.f.w_nxv1f32_nxv1f64(<vscale
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 1 x float> @llvm.riscv.vfncvt.rod.f.f.w.nxv1f32.nxv1f64(
|
||||
<vscale x 1 x float> undef,
|
||||
<vscale x 1 x double> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -256,6 +268,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 2 x float> @llvm.riscv.vfncvt.rod.f.f.w.nxv2f32.nxv2f64(
|
||||
<vscale x 2 x float>,
|
||||
<vscale x 2 x double>,
|
||||
iXLen);
|
||||
|
||||
|
@ -268,6 +281,7 @@ define <vscale x 2 x float> @intrinsic_vfncvt_rod.f.f.w_nxv2f32_nxv2f64(<vscale
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 2 x float> @llvm.riscv.vfncvt.rod.f.f.w.nxv2f32.nxv2f64(
|
||||
<vscale x 2 x float> undef,
|
||||
<vscale x 2 x double> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -298,6 +312,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 4 x float> @llvm.riscv.vfncvt.rod.f.f.w.nxv4f32.nxv4f64(
|
||||
<vscale x 4 x float>,
|
||||
<vscale x 4 x double>,
|
||||
iXLen);
|
||||
|
||||
|
@ -310,6 +325,7 @@ define <vscale x 4 x float> @intrinsic_vfncvt_rod.f.f.w_nxv4f32_nxv4f64(<vscale
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 4 x float> @llvm.riscv.vfncvt.rod.f.f.w.nxv4f32.nxv4f64(
|
||||
<vscale x 4 x float> undef,
|
||||
<vscale x 4 x double> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -340,6 +356,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 8 x float> @llvm.riscv.vfncvt.rod.f.f.w.nxv8f32.nxv8f64(
|
||||
<vscale x 8 x float>,
|
||||
<vscale x 8 x double>,
|
||||
iXLen);
|
||||
|
||||
|
@ -352,6 +369,7 @@ define <vscale x 8 x float> @intrinsic_vfncvt_rod.f.f.w_nxv8f32_nxv8f64(<vscale
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 8 x float> @llvm.riscv.vfncvt.rod.f.f.w.nxv8f32.nxv8f64(
|
||||
<vscale x 8 x float> undef,
|
||||
<vscale x 8 x double> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
|
|
@ -4,6 +4,7 @@
|
|||
; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \
|
||||
; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s
|
||||
declare <vscale x 1 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.nxv1i8.nxv1f16(
|
||||
<vscale x 1 x i8>,
|
||||
<vscale x 1 x half>,
|
||||
iXLen);
|
||||
|
||||
|
@ -16,6 +17,7 @@ define <vscale x 1 x i8> @intrinsic_vfncvt_rtz.x.f.w_nxv1i8_nxv1f16(<vscale x 1
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 1 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.nxv1i8.nxv1f16(
|
||||
<vscale x 1 x i8> undef,
|
||||
<vscale x 1 x half> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -46,6 +48,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 2 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.nxv2i8.nxv2f16(
|
||||
<vscale x 2 x i8>,
|
||||
<vscale x 2 x half>,
|
||||
iXLen);
|
||||
|
||||
|
@ -58,6 +61,7 @@ define <vscale x 2 x i8> @intrinsic_vfncvt_rtz.x.f.w_nxv2i8_nxv2f16(<vscale x 2
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 2 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.nxv2i8.nxv2f16(
|
||||
<vscale x 2 x i8> undef,
|
||||
<vscale x 2 x half> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -88,6 +92,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 4 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.nxv4i8.nxv4f16(
|
||||
<vscale x 4 x i8>,
|
||||
<vscale x 4 x half>,
|
||||
iXLen);
|
||||
|
||||
|
@ -100,6 +105,7 @@ define <vscale x 4 x i8> @intrinsic_vfncvt_rtz.x.f.w_nxv4i8_nxv4f16(<vscale x 4
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 4 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.nxv4i8.nxv4f16(
|
||||
<vscale x 4 x i8> undef,
|
||||
<vscale x 4 x half> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -130,6 +136,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 8 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.nxv8i8.nxv8f16(
|
||||
<vscale x 8 x i8>,
|
||||
<vscale x 8 x half>,
|
||||
iXLen);
|
||||
|
||||
|
@ -142,6 +149,7 @@ define <vscale x 8 x i8> @intrinsic_vfncvt_rtz.x.f.w_nxv8i8_nxv8f16(<vscale x 8
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 8 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.nxv8i8.nxv8f16(
|
||||
<vscale x 8 x i8> undef,
|
||||
<vscale x 8 x half> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -172,6 +180,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 16 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.nxv16i8.nxv16f16(
|
||||
<vscale x 16 x i8>,
|
||||
<vscale x 16 x half>,
|
||||
iXLen);
|
||||
|
||||
|
@ -184,6 +193,7 @@ define <vscale x 16 x i8> @intrinsic_vfncvt_rtz.x.f.w_nxv16i8_nxv16f16(<vscale x
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 16 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.nxv16i8.nxv16f16(
|
||||
<vscale x 16 x i8> undef,
|
||||
<vscale x 16 x half> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -214,6 +224,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 32 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.nxv32i8.nxv32f16(
|
||||
<vscale x 32 x i8>,
|
||||
<vscale x 32 x half>,
|
||||
iXLen);
|
||||
|
||||
|
@ -226,6 +237,7 @@ define <vscale x 32 x i8> @intrinsic_vfncvt_rtz.x.f.w_nxv32i8_nxv32f16(<vscale x
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 32 x i8> @llvm.riscv.vfncvt.rtz.x.f.w.nxv32i8.nxv32f16(
|
||||
<vscale x 32 x i8> undef,
|
||||
<vscale x 32 x half> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -256,6 +268,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 1 x i16> @llvm.riscv.vfncvt.rtz.x.f.w.nxv1i16.nxv1f32(
|
||||
<vscale x 1 x i16>,
|
||||
<vscale x 1 x float>,
|
||||
iXLen);
|
||||
|
||||
|
@ -268,6 +281,7 @@ define <vscale x 1 x i16> @intrinsic_vfncvt_rtz.x.f.w_nxv1i16_nxv1f32(<vscale x
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 1 x i16> @llvm.riscv.vfncvt.rtz.x.f.w.nxv1i16.nxv1f32(
|
||||
<vscale x 1 x i16> undef,
|
||||
<vscale x 1 x float> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -298,6 +312,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 2 x i16> @llvm.riscv.vfncvt.rtz.x.f.w.nxv2i16.nxv2f32(
|
||||
<vscale x 2 x i16>,
|
||||
<vscale x 2 x float>,
|
||||
iXLen);
|
||||
|
||||
|
@ -310,6 +325,7 @@ define <vscale x 2 x i16> @intrinsic_vfncvt_rtz.x.f.w_nxv2i16_nxv2f32(<vscale x
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 2 x i16> @llvm.riscv.vfncvt.rtz.x.f.w.nxv2i16.nxv2f32(
|
||||
<vscale x 2 x i16> undef,
|
||||
<vscale x 2 x float> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -340,6 +356,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 4 x i16> @llvm.riscv.vfncvt.rtz.x.f.w.nxv4i16.nxv4f32(
|
||||
<vscale x 4 x i16>,
|
||||
<vscale x 4 x float>,
|
||||
iXLen);
|
||||
|
||||
|
@ -352,6 +369,7 @@ define <vscale x 4 x i16> @intrinsic_vfncvt_rtz.x.f.w_nxv4i16_nxv4f32(<vscale x
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 4 x i16> @llvm.riscv.vfncvt.rtz.x.f.w.nxv4i16.nxv4f32(
|
||||
<vscale x 4 x i16> undef,
|
||||
<vscale x 4 x float> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -382,6 +400,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 8 x i16> @llvm.riscv.vfncvt.rtz.x.f.w.nxv8i16.nxv8f32(
|
||||
<vscale x 8 x i16>,
|
||||
<vscale x 8 x float>,
|
||||
iXLen);
|
||||
|
||||
|
@ -394,6 +413,7 @@ define <vscale x 8 x i16> @intrinsic_vfncvt_rtz.x.f.w_nxv8i16_nxv8f32(<vscale x
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 8 x i16> @llvm.riscv.vfncvt.rtz.x.f.w.nxv8i16.nxv8f32(
|
||||
<vscale x 8 x i16> undef,
|
||||
<vscale x 8 x float> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -424,6 +444,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 16 x i16> @llvm.riscv.vfncvt.rtz.x.f.w.nxv16i16.nxv16f32(
|
||||
<vscale x 16 x i16>,
|
||||
<vscale x 16 x float>,
|
||||
iXLen);
|
||||
|
||||
|
@ -436,6 +457,7 @@ define <vscale x 16 x i16> @intrinsic_vfncvt_rtz.x.f.w_nxv16i16_nxv16f32(<vscale
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 16 x i16> @llvm.riscv.vfncvt.rtz.x.f.w.nxv16i16.nxv16f32(
|
||||
<vscale x 16 x i16> undef,
|
||||
<vscale x 16 x float> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -466,6 +488,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 1 x i32> @llvm.riscv.vfncvt.rtz.x.f.w.nxv1i32.nxv1f64(
|
||||
<vscale x 1 x i32>,
|
||||
<vscale x 1 x double>,
|
||||
iXLen);
|
||||
|
||||
|
@ -478,6 +501,7 @@ define <vscale x 1 x i32> @intrinsic_vfncvt_rtz.x.f.w_nxv1i32_nxv1f64(<vscale x
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 1 x i32> @llvm.riscv.vfncvt.rtz.x.f.w.nxv1i32.nxv1f64(
|
||||
<vscale x 1 x i32> undef,
|
||||
<vscale x 1 x double> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -508,6 +532,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 2 x i32> @llvm.riscv.vfncvt.rtz.x.f.w.nxv2i32.nxv2f64(
|
||||
<vscale x 2 x i32>,
|
||||
<vscale x 2 x double>,
|
||||
iXLen);
|
||||
|
||||
|
@ -520,6 +545,7 @@ define <vscale x 2 x i32> @intrinsic_vfncvt_rtz.x.f.w_nxv2i32_nxv2f64(<vscale x
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 2 x i32> @llvm.riscv.vfncvt.rtz.x.f.w.nxv2i32.nxv2f64(
|
||||
<vscale x 2 x i32> undef,
|
||||
<vscale x 2 x double> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -550,6 +576,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 4 x i32> @llvm.riscv.vfncvt.rtz.x.f.w.nxv4i32.nxv4f64(
|
||||
<vscale x 4 x i32>,
|
||||
<vscale x 4 x double>,
|
||||
iXLen);
|
||||
|
||||
|
@ -562,6 +589,7 @@ define <vscale x 4 x i32> @intrinsic_vfncvt_rtz.x.f.w_nxv4i32_nxv4f64(<vscale x
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 4 x i32> @llvm.riscv.vfncvt.rtz.x.f.w.nxv4i32.nxv4f64(
|
||||
<vscale x 4 x i32> undef,
|
||||
<vscale x 4 x double> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -592,6 +620,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 8 x i32> @llvm.riscv.vfncvt.rtz.x.f.w.nxv8i32.nxv8f64(
|
||||
<vscale x 8 x i32>,
|
||||
<vscale x 8 x double>,
|
||||
iXLen);
|
||||
|
||||
|
@ -604,6 +633,7 @@ define <vscale x 8 x i32> @intrinsic_vfncvt_rtz.x.f.w_nxv8i32_nxv8f64(<vscale x
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 8 x i32> @llvm.riscv.vfncvt.rtz.x.f.w.nxv8i32.nxv8f64(
|
||||
<vscale x 8 x i32> undef,
|
||||
<vscale x 8 x double> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
|
|
@ -4,6 +4,7 @@
|
|||
; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \
|
||||
; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s
|
||||
declare <vscale x 1 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv1i8.nxv1f16(
|
||||
<vscale x 1 x i8>,
|
||||
<vscale x 1 x half>,
|
||||
iXLen);
|
||||
|
||||
|
@ -16,6 +17,7 @@ define <vscale x 1 x i8> @intrinsic_vfncvt_rtz.xu.f.w_nxv1i8_nxv1f16(<vscale x 1
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 1 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv1i8.nxv1f16(
|
||||
<vscale x 1 x i8> undef,
|
||||
<vscale x 1 x half> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -46,6 +48,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 2 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv2i8.nxv2f16(
|
||||
<vscale x 2 x i8>,
|
||||
<vscale x 2 x half>,
|
||||
iXLen);
|
||||
|
||||
|
@ -58,6 +61,7 @@ define <vscale x 2 x i8> @intrinsic_vfncvt_rtz.xu.f.w_nxv2i8_nxv2f16(<vscale x 2
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 2 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv2i8.nxv2f16(
|
||||
<vscale x 2 x i8> undef,
|
||||
<vscale x 2 x half> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -88,6 +92,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 4 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv4i8.nxv4f16(
|
||||
<vscale x 4 x i8>,
|
||||
<vscale x 4 x half>,
|
||||
iXLen);
|
||||
|
||||
|
@ -100,6 +105,7 @@ define <vscale x 4 x i8> @intrinsic_vfncvt_rtz.xu.f.w_nxv4i8_nxv4f16(<vscale x 4
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 4 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv4i8.nxv4f16(
|
||||
<vscale x 4 x i8> undef,
|
||||
<vscale x 4 x half> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -130,6 +136,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 8 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv8i8.nxv8f16(
|
||||
<vscale x 8 x i8>,
|
||||
<vscale x 8 x half>,
|
||||
iXLen);
|
||||
|
||||
|
@ -142,6 +149,7 @@ define <vscale x 8 x i8> @intrinsic_vfncvt_rtz.xu.f.w_nxv8i8_nxv8f16(<vscale x 8
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 8 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv8i8.nxv8f16(
|
||||
<vscale x 8 x i8> undef,
|
||||
<vscale x 8 x half> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -172,6 +180,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 16 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv16i8.nxv16f16(
|
||||
<vscale x 16 x i8>,
|
||||
<vscale x 16 x half>,
|
||||
iXLen);
|
||||
|
||||
|
@ -184,6 +193,7 @@ define <vscale x 16 x i8> @intrinsic_vfncvt_rtz.xu.f.w_nxv16i8_nxv16f16(<vscale
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 16 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv16i8.nxv16f16(
|
||||
<vscale x 16 x i8> undef,
|
||||
<vscale x 16 x half> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -214,6 +224,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 32 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv32i8.nxv32f16(
|
||||
<vscale x 32 x i8>,
|
||||
<vscale x 32 x half>,
|
||||
iXLen);
|
||||
|
||||
|
@ -226,6 +237,7 @@ define <vscale x 32 x i8> @intrinsic_vfncvt_rtz.xu.f.w_nxv32i8_nxv32f16(<vscale
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 32 x i8> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv32i8.nxv32f16(
|
||||
<vscale x 32 x i8> undef,
|
||||
<vscale x 32 x half> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -256,6 +268,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 1 x i16> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv1i16.nxv1f32(
|
||||
<vscale x 1 x i16>,
|
||||
<vscale x 1 x float>,
|
||||
iXLen);
|
||||
|
||||
|
@ -268,6 +281,7 @@ define <vscale x 1 x i16> @intrinsic_vfncvt_rtz.xu.f.w_nxv1i16_nxv1f32(<vscale x
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 1 x i16> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv1i16.nxv1f32(
|
||||
<vscale x 1 x i16> undef,
|
||||
<vscale x 1 x float> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -298,6 +312,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 2 x i16> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv2i16.nxv2f32(
|
||||
<vscale x 2 x i16>,
|
||||
<vscale x 2 x float>,
|
||||
iXLen);
|
||||
|
||||
|
@ -310,6 +325,7 @@ define <vscale x 2 x i16> @intrinsic_vfncvt_rtz.xu.f.w_nxv2i16_nxv2f32(<vscale x
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 2 x i16> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv2i16.nxv2f32(
|
||||
<vscale x 2 x i16> undef,
|
||||
<vscale x 2 x float> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -340,6 +356,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 4 x i16> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv4i16.nxv4f32(
|
||||
<vscale x 4 x i16>,
|
||||
<vscale x 4 x float>,
|
||||
iXLen);
|
||||
|
||||
|
@ -352,6 +369,7 @@ define <vscale x 4 x i16> @intrinsic_vfncvt_rtz.xu.f.w_nxv4i16_nxv4f32(<vscale x
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 4 x i16> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv4i16.nxv4f32(
|
||||
<vscale x 4 x i16> undef,
|
||||
<vscale x 4 x float> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -382,6 +400,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 8 x i16> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv8i16.nxv8f32(
|
||||
<vscale x 8 x i16>,
|
||||
<vscale x 8 x float>,
|
||||
iXLen);
|
||||
|
||||
|
@ -394,6 +413,7 @@ define <vscale x 8 x i16> @intrinsic_vfncvt_rtz.xu.f.w_nxv8i16_nxv8f32(<vscale x
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 8 x i16> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv8i16.nxv8f32(
|
||||
<vscale x 8 x i16> undef,
|
||||
<vscale x 8 x float> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -424,6 +444,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 16 x i16> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv16i16.nxv16f32(
|
||||
<vscale x 16 x i16>,
|
||||
<vscale x 16 x float>,
|
||||
iXLen);
|
||||
|
||||
|
@ -436,6 +457,7 @@ define <vscale x 16 x i16> @intrinsic_vfncvt_rtz.xu.f.w_nxv16i16_nxv16f32(<vscal
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 16 x i16> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv16i16.nxv16f32(
|
||||
<vscale x 16 x i16> undef,
|
||||
<vscale x 16 x float> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -466,6 +488,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 1 x i32> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv1i32.nxv1f64(
|
||||
<vscale x 1 x i32>,
|
||||
<vscale x 1 x double>,
|
||||
iXLen);
|
||||
|
||||
|
@ -478,6 +501,7 @@ define <vscale x 1 x i32> @intrinsic_vfncvt_rtz.xu.f.w_nxv1i32_nxv1f64(<vscale x
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 1 x i32> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv1i32.nxv1f64(
|
||||
<vscale x 1 x i32> undef,
|
||||
<vscale x 1 x double> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -508,6 +532,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 2 x i32> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv2i32.nxv2f64(
|
||||
<vscale x 2 x i32>,
|
||||
<vscale x 2 x double>,
|
||||
iXLen);
|
||||
|
||||
|
@ -520,6 +545,7 @@ define <vscale x 2 x i32> @intrinsic_vfncvt_rtz.xu.f.w_nxv2i32_nxv2f64(<vscale x
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 2 x i32> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv2i32.nxv2f64(
|
||||
<vscale x 2 x i32> undef,
|
||||
<vscale x 2 x double> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -550,6 +576,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 4 x i32> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv4i32.nxv4f64(
|
||||
<vscale x 4 x i32>,
|
||||
<vscale x 4 x double>,
|
||||
iXLen);
|
||||
|
||||
|
@ -562,6 +589,7 @@ define <vscale x 4 x i32> @intrinsic_vfncvt_rtz.xu.f.w_nxv4i32_nxv4f64(<vscale x
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 4 x i32> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv4i32.nxv4f64(
|
||||
<vscale x 4 x i32> undef,
|
||||
<vscale x 4 x double> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -592,6 +620,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 8 x i32> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv8i32.nxv8f64(
|
||||
<vscale x 8 x i32>,
|
||||
<vscale x 8 x double>,
|
||||
iXLen);
|
||||
|
||||
|
@ -604,6 +633,7 @@ define <vscale x 8 x i32> @intrinsic_vfncvt_rtz.xu.f.w_nxv8i32_nxv8f64(<vscale x
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 8 x i32> @llvm.riscv.vfncvt.rtz.xu.f.w.nxv8i32.nxv8f64(
|
||||
<vscale x 8 x i32> undef,
|
||||
<vscale x 8 x double> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
|
|
@ -4,6 +4,7 @@
|
|||
; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \
|
||||
; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s
|
||||
declare <vscale x 1 x i8> @llvm.riscv.vfncvt.x.f.w.nxv1i8.nxv1f16(
|
||||
<vscale x 1 x i8>,
|
||||
<vscale x 1 x half>,
|
||||
iXLen);
|
||||
|
||||
|
@ -16,6 +17,7 @@ define <vscale x 1 x i8> @intrinsic_vfncvt_x.f.w_nxv1i8_nxv1f16(<vscale x 1 x ha
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 1 x i8> @llvm.riscv.vfncvt.x.f.w.nxv1i8.nxv1f16(
|
||||
<vscale x 1 x i8> undef,
|
||||
<vscale x 1 x half> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -46,6 +48,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 2 x i8> @llvm.riscv.vfncvt.x.f.w.nxv2i8.nxv2f16(
|
||||
<vscale x 2 x i8>,
|
||||
<vscale x 2 x half>,
|
||||
iXLen);
|
||||
|
||||
|
@ -58,6 +61,7 @@ define <vscale x 2 x i8> @intrinsic_vfncvt_x.f.w_nxv2i8_nxv2f16(<vscale x 2 x ha
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 2 x i8> @llvm.riscv.vfncvt.x.f.w.nxv2i8.nxv2f16(
|
||||
<vscale x 2 x i8> undef,
|
||||
<vscale x 2 x half> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -88,6 +92,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 4 x i8> @llvm.riscv.vfncvt.x.f.w.nxv4i8.nxv4f16(
|
||||
<vscale x 4 x i8>,
|
||||
<vscale x 4 x half>,
|
||||
iXLen);
|
||||
|
||||
|
@ -100,6 +105,7 @@ define <vscale x 4 x i8> @intrinsic_vfncvt_x.f.w_nxv4i8_nxv4f16(<vscale x 4 x ha
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 4 x i8> @llvm.riscv.vfncvt.x.f.w.nxv4i8.nxv4f16(
|
||||
<vscale x 4 x i8> undef,
|
||||
<vscale x 4 x half> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -130,6 +136,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 8 x i8> @llvm.riscv.vfncvt.x.f.w.nxv8i8.nxv8f16(
|
||||
<vscale x 8 x i8>,
|
||||
<vscale x 8 x half>,
|
||||
iXLen);
|
||||
|
||||
|
@ -142,6 +149,7 @@ define <vscale x 8 x i8> @intrinsic_vfncvt_x.f.w_nxv8i8_nxv8f16(<vscale x 8 x ha
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 8 x i8> @llvm.riscv.vfncvt.x.f.w.nxv8i8.nxv8f16(
|
||||
<vscale x 8 x i8> undef,
|
||||
<vscale x 8 x half> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -172,6 +180,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 16 x i8> @llvm.riscv.vfncvt.x.f.w.nxv16i8.nxv16f16(
|
||||
<vscale x 16 x i8>,
|
||||
<vscale x 16 x half>,
|
||||
iXLen);
|
||||
|
||||
|
@ -184,6 +193,7 @@ define <vscale x 16 x i8> @intrinsic_vfncvt_x.f.w_nxv16i8_nxv16f16(<vscale x 16
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 16 x i8> @llvm.riscv.vfncvt.x.f.w.nxv16i8.nxv16f16(
|
||||
<vscale x 16 x i8> undef,
|
||||
<vscale x 16 x half> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -214,6 +224,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 32 x i8> @llvm.riscv.vfncvt.x.f.w.nxv32i8.nxv32f16(
|
||||
<vscale x 32 x i8>,
|
||||
<vscale x 32 x half>,
|
||||
iXLen);
|
||||
|
||||
|
@ -226,6 +237,7 @@ define <vscale x 32 x i8> @intrinsic_vfncvt_x.f.w_nxv32i8_nxv32f16(<vscale x 32
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 32 x i8> @llvm.riscv.vfncvt.x.f.w.nxv32i8.nxv32f16(
|
||||
<vscale x 32 x i8> undef,
|
||||
<vscale x 32 x half> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -256,6 +268,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 1 x i16> @llvm.riscv.vfncvt.x.f.w.nxv1i16.nxv1f32(
|
||||
<vscale x 1 x i16>,
|
||||
<vscale x 1 x float>,
|
||||
iXLen);
|
||||
|
||||
|
@ -268,6 +281,7 @@ define <vscale x 1 x i16> @intrinsic_vfncvt_x.f.w_nxv1i16_nxv1f32(<vscale x 1 x
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 1 x i16> @llvm.riscv.vfncvt.x.f.w.nxv1i16.nxv1f32(
|
||||
<vscale x 1 x i16> undef,
|
||||
<vscale x 1 x float> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -298,6 +312,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 2 x i16> @llvm.riscv.vfncvt.x.f.w.nxv2i16.nxv2f32(
|
||||
<vscale x 2 x i16>,
|
||||
<vscale x 2 x float>,
|
||||
iXLen);
|
||||
|
||||
|
@ -310,6 +325,7 @@ define <vscale x 2 x i16> @intrinsic_vfncvt_x.f.w_nxv2i16_nxv2f32(<vscale x 2 x
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 2 x i16> @llvm.riscv.vfncvt.x.f.w.nxv2i16.nxv2f32(
|
||||
<vscale x 2 x i16> undef,
|
||||
<vscale x 2 x float> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -340,6 +356,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 4 x i16> @llvm.riscv.vfncvt.x.f.w.nxv4i16.nxv4f32(
|
||||
<vscale x 4 x i16>,
|
||||
<vscale x 4 x float>,
|
||||
iXLen);
|
||||
|
||||
|
@ -352,6 +369,7 @@ define <vscale x 4 x i16> @intrinsic_vfncvt_x.f.w_nxv4i16_nxv4f32(<vscale x 4 x
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 4 x i16> @llvm.riscv.vfncvt.x.f.w.nxv4i16.nxv4f32(
|
||||
<vscale x 4 x i16> undef,
|
||||
<vscale x 4 x float> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -382,6 +400,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 8 x i16> @llvm.riscv.vfncvt.x.f.w.nxv8i16.nxv8f32(
|
||||
<vscale x 8 x i16>,
|
||||
<vscale x 8 x float>,
|
||||
iXLen);
|
||||
|
||||
|
@ -394,6 +413,7 @@ define <vscale x 8 x i16> @intrinsic_vfncvt_x.f.w_nxv8i16_nxv8f32(<vscale x 8 x
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 8 x i16> @llvm.riscv.vfncvt.x.f.w.nxv8i16.nxv8f32(
|
||||
<vscale x 8 x i16> undef,
|
||||
<vscale x 8 x float> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -424,6 +444,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 16 x i16> @llvm.riscv.vfncvt.x.f.w.nxv16i16.nxv16f32(
|
||||
<vscale x 16 x i16>,
|
||||
<vscale x 16 x float>,
|
||||
iXLen);
|
||||
|
||||
|
@ -436,6 +457,7 @@ define <vscale x 16 x i16> @intrinsic_vfncvt_x.f.w_nxv16i16_nxv16f32(<vscale x 1
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 16 x i16> @llvm.riscv.vfncvt.x.f.w.nxv16i16.nxv16f32(
|
||||
<vscale x 16 x i16> undef,
|
||||
<vscale x 16 x float> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -466,6 +488,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 1 x i32> @llvm.riscv.vfncvt.x.f.w.nxv1i32.nxv1f64(
|
||||
<vscale x 1 x i32>,
|
||||
<vscale x 1 x double>,
|
||||
iXLen);
|
||||
|
||||
|
@ -478,6 +501,7 @@ define <vscale x 1 x i32> @intrinsic_vfncvt_x.f.w_nxv1i32_nxv1f64(<vscale x 1 x
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 1 x i32> @llvm.riscv.vfncvt.x.f.w.nxv1i32.nxv1f64(
|
||||
<vscale x 1 x i32> undef,
|
||||
<vscale x 1 x double> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -508,6 +532,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 2 x i32> @llvm.riscv.vfncvt.x.f.w.nxv2i32.nxv2f64(
|
||||
<vscale x 2 x i32>,
|
||||
<vscale x 2 x double>,
|
||||
iXLen);
|
||||
|
||||
|
@ -520,6 +545,7 @@ define <vscale x 2 x i32> @intrinsic_vfncvt_x.f.w_nxv2i32_nxv2f64(<vscale x 2 x
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 2 x i32> @llvm.riscv.vfncvt.x.f.w.nxv2i32.nxv2f64(
|
||||
<vscale x 2 x i32> undef,
|
||||
<vscale x 2 x double> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -550,6 +576,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 4 x i32> @llvm.riscv.vfncvt.x.f.w.nxv4i32.nxv4f64(
|
||||
<vscale x 4 x i32>,
|
||||
<vscale x 4 x double>,
|
||||
iXLen);
|
||||
|
||||
|
@ -562,6 +589,7 @@ define <vscale x 4 x i32> @intrinsic_vfncvt_x.f.w_nxv4i32_nxv4f64(<vscale x 4 x
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 4 x i32> @llvm.riscv.vfncvt.x.f.w.nxv4i32.nxv4f64(
|
||||
<vscale x 4 x i32> undef,
|
||||
<vscale x 4 x double> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -592,6 +620,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 8 x i32> @llvm.riscv.vfncvt.x.f.w.nxv8i32.nxv8f64(
|
||||
<vscale x 8 x i32>,
|
||||
<vscale x 8 x double>,
|
||||
iXLen);
|
||||
|
||||
|
@ -604,6 +633,7 @@ define <vscale x 8 x i32> @intrinsic_vfncvt_x.f.w_nxv8i32_nxv8f64(<vscale x 8 x
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 8 x i32> @llvm.riscv.vfncvt.x.f.w.nxv8i32.nxv8f64(
|
||||
<vscale x 8 x i32> undef,
|
||||
<vscale x 8 x double> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
|
|
@ -4,6 +4,7 @@
|
|||
; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \
|
||||
; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s
|
||||
declare <vscale x 1 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv1i8.nxv1f16(
|
||||
<vscale x 1 x i8>,
|
||||
<vscale x 1 x half>,
|
||||
iXLen);
|
||||
|
||||
|
@ -16,6 +17,7 @@ define <vscale x 1 x i8> @intrinsic_vfncvt_xu.f.w_nxv1i8_nxv1f16(<vscale x 1 x h
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 1 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv1i8.nxv1f16(
|
||||
<vscale x 1 x i8> undef,
|
||||
<vscale x 1 x half> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -46,6 +48,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 2 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv2i8.nxv2f16(
|
||||
<vscale x 2 x i8>,
|
||||
<vscale x 2 x half>,
|
||||
iXLen);
|
||||
|
||||
|
@ -58,6 +61,7 @@ define <vscale x 2 x i8> @intrinsic_vfncvt_xu.f.w_nxv2i8_nxv2f16(<vscale x 2 x h
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 2 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv2i8.nxv2f16(
|
||||
<vscale x 2 x i8> undef,
|
||||
<vscale x 2 x half> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -88,6 +92,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 4 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv4i8.nxv4f16(
|
||||
<vscale x 4 x i8>,
|
||||
<vscale x 4 x half>,
|
||||
iXLen);
|
||||
|
||||
|
@ -100,6 +105,7 @@ define <vscale x 4 x i8> @intrinsic_vfncvt_xu.f.w_nxv4i8_nxv4f16(<vscale x 4 x h
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 4 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv4i8.nxv4f16(
|
||||
<vscale x 4 x i8> undef,
|
||||
<vscale x 4 x half> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -130,6 +136,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 8 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv8i8.nxv8f16(
|
||||
<vscale x 8 x i8>,
|
||||
<vscale x 8 x half>,
|
||||
iXLen);
|
||||
|
||||
|
@ -142,6 +149,7 @@ define <vscale x 8 x i8> @intrinsic_vfncvt_xu.f.w_nxv8i8_nxv8f16(<vscale x 8 x h
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 8 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv8i8.nxv8f16(
|
||||
<vscale x 8 x i8> undef,
|
||||
<vscale x 8 x half> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -172,6 +180,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 16 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv16i8.nxv16f16(
|
||||
<vscale x 16 x i8>,
|
||||
<vscale x 16 x half>,
|
||||
iXLen);
|
||||
|
||||
|
@ -184,6 +193,7 @@ define <vscale x 16 x i8> @intrinsic_vfncvt_xu.f.w_nxv16i8_nxv16f16(<vscale x 16
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 16 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv16i8.nxv16f16(
|
||||
<vscale x 16 x i8> undef,
|
||||
<vscale x 16 x half> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -214,6 +224,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 32 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv32i8.nxv32f16(
|
||||
<vscale x 32 x i8>,
|
||||
<vscale x 32 x half>,
|
||||
iXLen);
|
||||
|
||||
|
@ -226,6 +237,7 @@ define <vscale x 32 x i8> @intrinsic_vfncvt_xu.f.w_nxv32i8_nxv32f16(<vscale x 32
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 32 x i8> @llvm.riscv.vfncvt.xu.f.w.nxv32i8.nxv32f16(
|
||||
<vscale x 32 x i8> undef,
|
||||
<vscale x 32 x half> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -256,6 +268,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 1 x i16> @llvm.riscv.vfncvt.xu.f.w.nxv1i16.nxv1f32(
|
||||
<vscale x 1 x i16>,
|
||||
<vscale x 1 x float>,
|
||||
iXLen);
|
||||
|
||||
|
@ -268,6 +281,7 @@ define <vscale x 1 x i16> @intrinsic_vfncvt_xu.f.w_nxv1i16_nxv1f32(<vscale x 1 x
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 1 x i16> @llvm.riscv.vfncvt.xu.f.w.nxv1i16.nxv1f32(
|
||||
<vscale x 1 x i16> undef,
|
||||
<vscale x 1 x float> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -298,6 +312,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 2 x i16> @llvm.riscv.vfncvt.xu.f.w.nxv2i16.nxv2f32(
|
||||
<vscale x 2 x i16>,
|
||||
<vscale x 2 x float>,
|
||||
iXLen);
|
||||
|
||||
|
@ -310,6 +325,7 @@ define <vscale x 2 x i16> @intrinsic_vfncvt_xu.f.w_nxv2i16_nxv2f32(<vscale x 2 x
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 2 x i16> @llvm.riscv.vfncvt.xu.f.w.nxv2i16.nxv2f32(
|
||||
<vscale x 2 x i16> undef,
|
||||
<vscale x 2 x float> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -340,6 +356,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 4 x i16> @llvm.riscv.vfncvt.xu.f.w.nxv4i16.nxv4f32(
|
||||
<vscale x 4 x i16>,
|
||||
<vscale x 4 x float>,
|
||||
iXLen);
|
||||
|
||||
|
@ -352,6 +369,7 @@ define <vscale x 4 x i16> @intrinsic_vfncvt_xu.f.w_nxv4i16_nxv4f32(<vscale x 4 x
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 4 x i16> @llvm.riscv.vfncvt.xu.f.w.nxv4i16.nxv4f32(
|
||||
<vscale x 4 x i16> undef,
|
||||
<vscale x 4 x float> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -382,6 +400,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 8 x i16> @llvm.riscv.vfncvt.xu.f.w.nxv8i16.nxv8f32(
|
||||
<vscale x 8 x i16>,
|
||||
<vscale x 8 x float>,
|
||||
iXLen);
|
||||
|
||||
|
@ -394,6 +413,7 @@ define <vscale x 8 x i16> @intrinsic_vfncvt_xu.f.w_nxv8i16_nxv8f32(<vscale x 8 x
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 8 x i16> @llvm.riscv.vfncvt.xu.f.w.nxv8i16.nxv8f32(
|
||||
<vscale x 8 x i16> undef,
|
||||
<vscale x 8 x float> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -424,6 +444,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 16 x i16> @llvm.riscv.vfncvt.xu.f.w.nxv16i16.nxv16f32(
|
||||
<vscale x 16 x i16>,
|
||||
<vscale x 16 x float>,
|
||||
iXLen);
|
||||
|
||||
|
@ -436,6 +457,7 @@ define <vscale x 16 x i16> @intrinsic_vfncvt_xu.f.w_nxv16i16_nxv16f32(<vscale x
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 16 x i16> @llvm.riscv.vfncvt.xu.f.w.nxv16i16.nxv16f32(
|
||||
<vscale x 16 x i16> undef,
|
||||
<vscale x 16 x float> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -466,6 +488,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 1 x i32> @llvm.riscv.vfncvt.xu.f.w.nxv1i32.nxv1f64(
|
||||
<vscale x 1 x i32>,
|
||||
<vscale x 1 x double>,
|
||||
iXLen);
|
||||
|
||||
|
@ -478,6 +501,7 @@ define <vscale x 1 x i32> @intrinsic_vfncvt_xu.f.w_nxv1i32_nxv1f64(<vscale x 1 x
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 1 x i32> @llvm.riscv.vfncvt.xu.f.w.nxv1i32.nxv1f64(
|
||||
<vscale x 1 x i32> undef,
|
||||
<vscale x 1 x double> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -508,6 +532,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 2 x i32> @llvm.riscv.vfncvt.xu.f.w.nxv2i32.nxv2f64(
|
||||
<vscale x 2 x i32>,
|
||||
<vscale x 2 x double>,
|
||||
iXLen);
|
||||
|
||||
|
@ -520,6 +545,7 @@ define <vscale x 2 x i32> @intrinsic_vfncvt_xu.f.w_nxv2i32_nxv2f64(<vscale x 2 x
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 2 x i32> @llvm.riscv.vfncvt.xu.f.w.nxv2i32.nxv2f64(
|
||||
<vscale x 2 x i32> undef,
|
||||
<vscale x 2 x double> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -550,6 +576,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 4 x i32> @llvm.riscv.vfncvt.xu.f.w.nxv4i32.nxv4f64(
|
||||
<vscale x 4 x i32>,
|
||||
<vscale x 4 x double>,
|
||||
iXLen);
|
||||
|
||||
|
@ -562,6 +589,7 @@ define <vscale x 4 x i32> @intrinsic_vfncvt_xu.f.w_nxv4i32_nxv4f64(<vscale x 4 x
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 4 x i32> @llvm.riscv.vfncvt.xu.f.w.nxv4i32.nxv4f64(
|
||||
<vscale x 4 x i32> undef,
|
||||
<vscale x 4 x double> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -592,6 +620,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 8 x i32> @llvm.riscv.vfncvt.xu.f.w.nxv8i32.nxv8f64(
|
||||
<vscale x 8 x i32>,
|
||||
<vscale x 8 x double>,
|
||||
iXLen);
|
||||
|
||||
|
@ -604,6 +633,7 @@ define <vscale x 8 x i32> @intrinsic_vfncvt_xu.f.w_nxv8i32_nxv8f64(<vscale x 8 x
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 8 x i32> @llvm.riscv.vfncvt.xu.f.w.nxv8i32.nxv8f64(
|
||||
<vscale x 8 x i32> undef,
|
||||
<vscale x 8 x double> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
|
|
@ -4,6 +4,7 @@
|
|||
; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \
|
||||
; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s
|
||||
declare <vscale x 1 x half> @llvm.riscv.vfrec7.nxv1f16(
|
||||
<vscale x 1 x half>,
|
||||
<vscale x 1 x half>,
|
||||
iXLen);
|
||||
|
||||
|
@ -15,6 +16,7 @@ define <vscale x 1 x half> @intrinsic_vfrec7_v_nxv1f16_nxv1f16(<vscale x 1 x hal
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 1 x half> @llvm.riscv.vfrec7.nxv1f16(
|
||||
<vscale x 1 x half> undef,
|
||||
<vscale x 1 x half> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -45,6 +47,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 2 x half> @llvm.riscv.vfrec7.nxv2f16(
|
||||
<vscale x 2 x half>,
|
||||
<vscale x 2 x half>,
|
||||
iXLen);
|
||||
|
||||
|
@ -56,6 +59,7 @@ define <vscale x 2 x half> @intrinsic_vfrec7_v_nxv2f16_nxv2f16(<vscale x 2 x hal
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 2 x half> @llvm.riscv.vfrec7.nxv2f16(
|
||||
<vscale x 2 x half> undef,
|
||||
<vscale x 2 x half> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -86,6 +90,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 4 x half> @llvm.riscv.vfrec7.nxv4f16(
|
||||
<vscale x 4 x half>,
|
||||
<vscale x 4 x half>,
|
||||
iXLen);
|
||||
|
||||
|
@ -97,6 +102,7 @@ define <vscale x 4 x half> @intrinsic_vfrec7_v_nxv4f16_nxv4f16(<vscale x 4 x hal
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 4 x half> @llvm.riscv.vfrec7.nxv4f16(
|
||||
<vscale x 4 x half> undef,
|
||||
<vscale x 4 x half> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -127,6 +133,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 8 x half> @llvm.riscv.vfrec7.nxv8f16(
|
||||
<vscale x 8 x half>,
|
||||
<vscale x 8 x half>,
|
||||
iXLen);
|
||||
|
||||
|
@ -138,6 +145,7 @@ define <vscale x 8 x half> @intrinsic_vfrec7_v_nxv8f16_nxv8f16(<vscale x 8 x hal
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 8 x half> @llvm.riscv.vfrec7.nxv8f16(
|
||||
<vscale x 8 x half> undef,
|
||||
<vscale x 8 x half> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -168,6 +176,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 16 x half> @llvm.riscv.vfrec7.nxv16f16(
|
||||
<vscale x 16 x half>,
|
||||
<vscale x 16 x half>,
|
||||
iXLen);
|
||||
|
||||
|
@ -179,6 +188,7 @@ define <vscale x 16 x half> @intrinsic_vfrec7_v_nxv16f16_nxv16f16(<vscale x 16 x
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 16 x half> @llvm.riscv.vfrec7.nxv16f16(
|
||||
<vscale x 16 x half> undef,
|
||||
<vscale x 16 x half> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -209,6 +219,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 32 x half> @llvm.riscv.vfrec7.nxv32f16(
|
||||
<vscale x 32 x half>,
|
||||
<vscale x 32 x half>,
|
||||
iXLen);
|
||||
|
||||
|
@ -220,6 +231,7 @@ define <vscale x 32 x half> @intrinsic_vfrec7_v_nxv32f16_nxv32f16(<vscale x 32 x
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 32 x half> @llvm.riscv.vfrec7.nxv32f16(
|
||||
<vscale x 32 x half> undef,
|
||||
<vscale x 32 x half> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -250,6 +262,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 1 x float> @llvm.riscv.vfrec7.nxv1f32(
|
||||
<vscale x 1 x float>,
|
||||
<vscale x 1 x float>,
|
||||
iXLen);
|
||||
|
||||
|
@ -261,6 +274,7 @@ define <vscale x 1 x float> @intrinsic_vfrec7_v_nxv1f32_nxv1f32(<vscale x 1 x fl
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 1 x float> @llvm.riscv.vfrec7.nxv1f32(
|
||||
<vscale x 1 x float> undef,
|
||||
<vscale x 1 x float> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -291,6 +305,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 2 x float> @llvm.riscv.vfrec7.nxv2f32(
|
||||
<vscale x 2 x float>,
|
||||
<vscale x 2 x float>,
|
||||
iXLen);
|
||||
|
||||
|
@ -302,6 +317,7 @@ define <vscale x 2 x float> @intrinsic_vfrec7_v_nxv2f32_nxv2f32(<vscale x 2 x fl
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 2 x float> @llvm.riscv.vfrec7.nxv2f32(
|
||||
<vscale x 2 x float> undef,
|
||||
<vscale x 2 x float> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -332,6 +348,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 4 x float> @llvm.riscv.vfrec7.nxv4f32(
|
||||
<vscale x 4 x float>,
|
||||
<vscale x 4 x float>,
|
||||
iXLen);
|
||||
|
||||
|
@ -343,6 +360,7 @@ define <vscale x 4 x float> @intrinsic_vfrec7_v_nxv4f32_nxv4f32(<vscale x 4 x fl
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 4 x float> @llvm.riscv.vfrec7.nxv4f32(
|
||||
<vscale x 4 x float> undef,
|
||||
<vscale x 4 x float> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -373,6 +391,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 8 x float> @llvm.riscv.vfrec7.nxv8f32(
|
||||
<vscale x 8 x float>,
|
||||
<vscale x 8 x float>,
|
||||
iXLen);
|
||||
|
||||
|
@ -384,6 +403,7 @@ define <vscale x 8 x float> @intrinsic_vfrec7_v_nxv8f32_nxv8f32(<vscale x 8 x fl
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 8 x float> @llvm.riscv.vfrec7.nxv8f32(
|
||||
<vscale x 8 x float> undef,
|
||||
<vscale x 8 x float> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -414,6 +434,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 16 x float> @llvm.riscv.vfrec7.nxv16f32(
|
||||
<vscale x 16 x float>,
|
||||
<vscale x 16 x float>,
|
||||
iXLen);
|
||||
|
||||
|
@ -425,6 +446,7 @@ define <vscale x 16 x float> @intrinsic_vfrec7_v_nxv16f32_nxv16f32(<vscale x 16
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 16 x float> @llvm.riscv.vfrec7.nxv16f32(
|
||||
<vscale x 16 x float> undef,
|
||||
<vscale x 16 x float> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -455,6 +477,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 1 x double> @llvm.riscv.vfrec7.nxv1f64(
|
||||
<vscale x 1 x double>,
|
||||
<vscale x 1 x double>,
|
||||
iXLen);
|
||||
|
||||
|
@ -466,6 +489,7 @@ define <vscale x 1 x double> @intrinsic_vfrec7_v_nxv1f64_nxv1f64(<vscale x 1 x d
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 1 x double> @llvm.riscv.vfrec7.nxv1f64(
|
||||
<vscale x 1 x double> undef,
|
||||
<vscale x 1 x double> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -496,6 +520,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 2 x double> @llvm.riscv.vfrec7.nxv2f64(
|
||||
<vscale x 2 x double>,
|
||||
<vscale x 2 x double>,
|
||||
iXLen);
|
||||
|
||||
|
@ -507,6 +532,7 @@ define <vscale x 2 x double> @intrinsic_vfrec7_v_nxv2f64_nxv2f64(<vscale x 2 x d
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 2 x double> @llvm.riscv.vfrec7.nxv2f64(
|
||||
<vscale x 2 x double> undef,
|
||||
<vscale x 2 x double> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -537,6 +563,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 4 x double> @llvm.riscv.vfrec7.nxv4f64(
|
||||
<vscale x 4 x double>,
|
||||
<vscale x 4 x double>,
|
||||
iXLen);
|
||||
|
||||
|
@ -548,6 +575,7 @@ define <vscale x 4 x double> @intrinsic_vfrec7_v_nxv4f64_nxv4f64(<vscale x 4 x d
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 4 x double> @llvm.riscv.vfrec7.nxv4f64(
|
||||
<vscale x 4 x double> undef,
|
||||
<vscale x 4 x double> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -578,6 +606,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 8 x double> @llvm.riscv.vfrec7.nxv8f64(
|
||||
<vscale x 8 x double>,
|
||||
<vscale x 8 x double>,
|
||||
iXLen);
|
||||
|
||||
|
@ -589,6 +618,7 @@ define <vscale x 8 x double> @intrinsic_vfrec7_v_nxv8f64_nxv8f64(<vscale x 8 x d
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 8 x double> @llvm.riscv.vfrec7.nxv8f64(
|
||||
<vscale x 8 x double> undef,
|
||||
<vscale x 8 x double> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
|
|
@ -4,6 +4,7 @@
|
|||
; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \
|
||||
; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s
|
||||
declare <vscale x 1 x half> @llvm.riscv.vfrsqrt7.nxv1f16(
|
||||
<vscale x 1 x half>,
|
||||
<vscale x 1 x half>,
|
||||
iXLen);
|
||||
|
||||
|
@ -15,6 +16,7 @@ define <vscale x 1 x half> @intrinsic_vfrsqrt7_v_nxv1f16_nxv1f16(<vscale x 1 x h
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 1 x half> @llvm.riscv.vfrsqrt7.nxv1f16(
|
||||
<vscale x 1 x half> undef,
|
||||
<vscale x 1 x half> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -45,6 +47,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 2 x half> @llvm.riscv.vfrsqrt7.nxv2f16(
|
||||
<vscale x 2 x half>,
|
||||
<vscale x 2 x half>,
|
||||
iXLen);
|
||||
|
||||
|
@ -56,6 +59,7 @@ define <vscale x 2 x half> @intrinsic_vfrsqrt7_v_nxv2f16_nxv2f16(<vscale x 2 x h
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 2 x half> @llvm.riscv.vfrsqrt7.nxv2f16(
|
||||
<vscale x 2 x half> undef,
|
||||
<vscale x 2 x half> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -86,6 +90,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 4 x half> @llvm.riscv.vfrsqrt7.nxv4f16(
|
||||
<vscale x 4 x half>,
|
||||
<vscale x 4 x half>,
|
||||
iXLen);
|
||||
|
||||
|
@ -97,6 +102,7 @@ define <vscale x 4 x half> @intrinsic_vfrsqrt7_v_nxv4f16_nxv4f16(<vscale x 4 x h
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 4 x half> @llvm.riscv.vfrsqrt7.nxv4f16(
|
||||
<vscale x 4 x half> undef,
|
||||
<vscale x 4 x half> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -127,6 +133,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 8 x half> @llvm.riscv.vfrsqrt7.nxv8f16(
|
||||
<vscale x 8 x half>,
|
||||
<vscale x 8 x half>,
|
||||
iXLen);
|
||||
|
||||
|
@ -138,6 +145,7 @@ define <vscale x 8 x half> @intrinsic_vfrsqrt7_v_nxv8f16_nxv8f16(<vscale x 8 x h
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 8 x half> @llvm.riscv.vfrsqrt7.nxv8f16(
|
||||
<vscale x 8 x half> undef,
|
||||
<vscale x 8 x half> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -168,6 +176,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 16 x half> @llvm.riscv.vfrsqrt7.nxv16f16(
|
||||
<vscale x 16 x half>,
|
||||
<vscale x 16 x half>,
|
||||
iXLen);
|
||||
|
||||
|
@ -179,6 +188,7 @@ define <vscale x 16 x half> @intrinsic_vfrsqrt7_v_nxv16f16_nxv16f16(<vscale x 16
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 16 x half> @llvm.riscv.vfrsqrt7.nxv16f16(
|
||||
<vscale x 16 x half> undef,
|
||||
<vscale x 16 x half> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -209,6 +219,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 32 x half> @llvm.riscv.vfrsqrt7.nxv32f16(
|
||||
<vscale x 32 x half>,
|
||||
<vscale x 32 x half>,
|
||||
iXLen);
|
||||
|
||||
|
@ -220,6 +231,7 @@ define <vscale x 32 x half> @intrinsic_vfrsqrt7_v_nxv32f16_nxv32f16(<vscale x 32
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 32 x half> @llvm.riscv.vfrsqrt7.nxv32f16(
|
||||
<vscale x 32 x half> undef,
|
||||
<vscale x 32 x half> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -250,6 +262,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 1 x float> @llvm.riscv.vfrsqrt7.nxv1f32(
|
||||
<vscale x 1 x float>,
|
||||
<vscale x 1 x float>,
|
||||
iXLen);
|
||||
|
||||
|
@ -261,6 +274,7 @@ define <vscale x 1 x float> @intrinsic_vfrsqrt7_v_nxv1f32_nxv1f32(<vscale x 1 x
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 1 x float> @llvm.riscv.vfrsqrt7.nxv1f32(
|
||||
<vscale x 1 x float> undef,
|
||||
<vscale x 1 x float> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -291,6 +305,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 2 x float> @llvm.riscv.vfrsqrt7.nxv2f32(
|
||||
<vscale x 2 x float>,
|
||||
<vscale x 2 x float>,
|
||||
iXLen);
|
||||
|
||||
|
@ -302,6 +317,7 @@ define <vscale x 2 x float> @intrinsic_vfrsqrt7_v_nxv2f32_nxv2f32(<vscale x 2 x
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 2 x float> @llvm.riscv.vfrsqrt7.nxv2f32(
|
||||
<vscale x 2 x float> undef,
|
||||
<vscale x 2 x float> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -332,6 +348,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 4 x float> @llvm.riscv.vfrsqrt7.nxv4f32(
|
||||
<vscale x 4 x float>,
|
||||
<vscale x 4 x float>,
|
||||
iXLen);
|
||||
|
||||
|
@ -343,6 +360,7 @@ define <vscale x 4 x float> @intrinsic_vfrsqrt7_v_nxv4f32_nxv4f32(<vscale x 4 x
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 4 x float> @llvm.riscv.vfrsqrt7.nxv4f32(
|
||||
<vscale x 4 x float> undef,
|
||||
<vscale x 4 x float> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -373,6 +391,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 8 x float> @llvm.riscv.vfrsqrt7.nxv8f32(
|
||||
<vscale x 8 x float>,
|
||||
<vscale x 8 x float>,
|
||||
iXLen);
|
||||
|
||||
|
@ -384,6 +403,7 @@ define <vscale x 8 x float> @intrinsic_vfrsqrt7_v_nxv8f32_nxv8f32(<vscale x 8 x
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 8 x float> @llvm.riscv.vfrsqrt7.nxv8f32(
|
||||
<vscale x 8 x float> undef,
|
||||
<vscale x 8 x float> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -414,6 +434,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 16 x float> @llvm.riscv.vfrsqrt7.nxv16f32(
|
||||
<vscale x 16 x float>,
|
||||
<vscale x 16 x float>,
|
||||
iXLen);
|
||||
|
||||
|
@ -425,6 +446,7 @@ define <vscale x 16 x float> @intrinsic_vfrsqrt7_v_nxv16f32_nxv16f32(<vscale x 1
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 16 x float> @llvm.riscv.vfrsqrt7.nxv16f32(
|
||||
<vscale x 16 x float> undef,
|
||||
<vscale x 16 x float> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -455,6 +477,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 1 x double> @llvm.riscv.vfrsqrt7.nxv1f64(
|
||||
<vscale x 1 x double>,
|
||||
<vscale x 1 x double>,
|
||||
iXLen);
|
||||
|
||||
|
@ -466,6 +489,7 @@ define <vscale x 1 x double> @intrinsic_vfrsqrt7_v_nxv1f64_nxv1f64(<vscale x 1 x
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 1 x double> @llvm.riscv.vfrsqrt7.nxv1f64(
|
||||
<vscale x 1 x double> undef,
|
||||
<vscale x 1 x double> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -496,6 +520,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 2 x double> @llvm.riscv.vfrsqrt7.nxv2f64(
|
||||
<vscale x 2 x double>,
|
||||
<vscale x 2 x double>,
|
||||
iXLen);
|
||||
|
||||
|
@ -507,6 +532,7 @@ define <vscale x 2 x double> @intrinsic_vfrsqrt7_v_nxv2f64_nxv2f64(<vscale x 2 x
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 2 x double> @llvm.riscv.vfrsqrt7.nxv2f64(
|
||||
<vscale x 2 x double> undef,
|
||||
<vscale x 2 x double> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -537,6 +563,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 4 x double> @llvm.riscv.vfrsqrt7.nxv4f64(
|
||||
<vscale x 4 x double>,
|
||||
<vscale x 4 x double>,
|
||||
iXLen);
|
||||
|
||||
|
@ -548,6 +575,7 @@ define <vscale x 4 x double> @intrinsic_vfrsqrt7_v_nxv4f64_nxv4f64(<vscale x 4 x
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 4 x double> @llvm.riscv.vfrsqrt7.nxv4f64(
|
||||
<vscale x 4 x double> undef,
|
||||
<vscale x 4 x double> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -578,6 +606,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 8 x double> @llvm.riscv.vfrsqrt7.nxv8f64(
|
||||
<vscale x 8 x double>,
|
||||
<vscale x 8 x double>,
|
||||
iXLen);
|
||||
|
||||
|
@ -589,6 +618,7 @@ define <vscale x 8 x double> @intrinsic_vfrsqrt7_v_nxv8f64_nxv8f64(<vscale x 8 x
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 8 x double> @llvm.riscv.vfrsqrt7.nxv8f64(
|
||||
<vscale x 8 x double> undef,
|
||||
<vscale x 8 x double> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
|
|
@ -4,6 +4,7 @@
|
|||
; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \
|
||||
; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s
|
||||
declare <vscale x 1 x half> @llvm.riscv.vfsqrt.nxv1f16(
|
||||
<vscale x 1 x half>,
|
||||
<vscale x 1 x half>,
|
||||
iXLen);
|
||||
|
||||
|
@ -15,6 +16,7 @@ define <vscale x 1 x half> @intrinsic_vfsqrt_v_nxv1f16_nxv1f16(<vscale x 1 x hal
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 1 x half> @llvm.riscv.vfsqrt.nxv1f16(
|
||||
<vscale x 1 x half> undef,
|
||||
<vscale x 1 x half> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -45,6 +47,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 2 x half> @llvm.riscv.vfsqrt.nxv2f16(
|
||||
<vscale x 2 x half>,
|
||||
<vscale x 2 x half>,
|
||||
iXLen);
|
||||
|
||||
|
@ -56,6 +59,7 @@ define <vscale x 2 x half> @intrinsic_vfsqrt_v_nxv2f16_nxv2f16(<vscale x 2 x hal
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 2 x half> @llvm.riscv.vfsqrt.nxv2f16(
|
||||
<vscale x 2 x half> undef,
|
||||
<vscale x 2 x half> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -86,6 +90,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 4 x half> @llvm.riscv.vfsqrt.nxv4f16(
|
||||
<vscale x 4 x half>,
|
||||
<vscale x 4 x half>,
|
||||
iXLen);
|
||||
|
||||
|
@ -97,6 +102,7 @@ define <vscale x 4 x half> @intrinsic_vfsqrt_v_nxv4f16_nxv4f16(<vscale x 4 x hal
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 4 x half> @llvm.riscv.vfsqrt.nxv4f16(
|
||||
<vscale x 4 x half> undef,
|
||||
<vscale x 4 x half> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -127,6 +133,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 8 x half> @llvm.riscv.vfsqrt.nxv8f16(
|
||||
<vscale x 8 x half>,
|
||||
<vscale x 8 x half>,
|
||||
iXLen);
|
||||
|
||||
|
@ -138,6 +145,7 @@ define <vscale x 8 x half> @intrinsic_vfsqrt_v_nxv8f16_nxv8f16(<vscale x 8 x hal
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 8 x half> @llvm.riscv.vfsqrt.nxv8f16(
|
||||
<vscale x 8 x half> undef,
|
||||
<vscale x 8 x half> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -168,6 +176,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 16 x half> @llvm.riscv.vfsqrt.nxv16f16(
|
||||
<vscale x 16 x half>,
|
||||
<vscale x 16 x half>,
|
||||
iXLen);
|
||||
|
||||
|
@ -179,6 +188,7 @@ define <vscale x 16 x half> @intrinsic_vfsqrt_v_nxv16f16_nxv16f16(<vscale x 16 x
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 16 x half> @llvm.riscv.vfsqrt.nxv16f16(
|
||||
<vscale x 16 x half> undef,
|
||||
<vscale x 16 x half> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -209,6 +219,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 32 x half> @llvm.riscv.vfsqrt.nxv32f16(
|
||||
<vscale x 32 x half>,
|
||||
<vscale x 32 x half>,
|
||||
iXLen);
|
||||
|
||||
|
@ -220,6 +231,7 @@ define <vscale x 32 x half> @intrinsic_vfsqrt_v_nxv32f16_nxv32f16(<vscale x 32 x
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 32 x half> @llvm.riscv.vfsqrt.nxv32f16(
|
||||
<vscale x 32 x half> undef,
|
||||
<vscale x 32 x half> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -250,6 +262,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 1 x float> @llvm.riscv.vfsqrt.nxv1f32(
|
||||
<vscale x 1 x float>,
|
||||
<vscale x 1 x float>,
|
||||
iXLen);
|
||||
|
||||
|
@ -261,6 +274,7 @@ define <vscale x 1 x float> @intrinsic_vfsqrt_v_nxv1f32_nxv1f32(<vscale x 1 x fl
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 1 x float> @llvm.riscv.vfsqrt.nxv1f32(
|
||||
<vscale x 1 x float> undef,
|
||||
<vscale x 1 x float> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -291,6 +305,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 2 x float> @llvm.riscv.vfsqrt.nxv2f32(
|
||||
<vscale x 2 x float>,
|
||||
<vscale x 2 x float>,
|
||||
iXLen);
|
||||
|
||||
|
@ -302,6 +317,7 @@ define <vscale x 2 x float> @intrinsic_vfsqrt_v_nxv2f32_nxv2f32(<vscale x 2 x fl
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 2 x float> @llvm.riscv.vfsqrt.nxv2f32(
|
||||
<vscale x 2 x float> undef,
|
||||
<vscale x 2 x float> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -332,6 +348,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 4 x float> @llvm.riscv.vfsqrt.nxv4f32(
|
||||
<vscale x 4 x float>,
|
||||
<vscale x 4 x float>,
|
||||
iXLen);
|
||||
|
||||
|
@ -343,6 +360,7 @@ define <vscale x 4 x float> @intrinsic_vfsqrt_v_nxv4f32_nxv4f32(<vscale x 4 x fl
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 4 x float> @llvm.riscv.vfsqrt.nxv4f32(
|
||||
<vscale x 4 x float> undef,
|
||||
<vscale x 4 x float> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -373,6 +391,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 8 x float> @llvm.riscv.vfsqrt.nxv8f32(
|
||||
<vscale x 8 x float>,
|
||||
<vscale x 8 x float>,
|
||||
iXLen);
|
||||
|
||||
|
@ -384,6 +403,7 @@ define <vscale x 8 x float> @intrinsic_vfsqrt_v_nxv8f32_nxv8f32(<vscale x 8 x fl
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 8 x float> @llvm.riscv.vfsqrt.nxv8f32(
|
||||
<vscale x 8 x float> undef,
|
||||
<vscale x 8 x float> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -414,6 +434,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 16 x float> @llvm.riscv.vfsqrt.nxv16f32(
|
||||
<vscale x 16 x float>,
|
||||
<vscale x 16 x float>,
|
||||
iXLen);
|
||||
|
||||
|
@ -425,6 +446,7 @@ define <vscale x 16 x float> @intrinsic_vfsqrt_v_nxv16f32_nxv16f32(<vscale x 16
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 16 x float> @llvm.riscv.vfsqrt.nxv16f32(
|
||||
<vscale x 16 x float> undef,
|
||||
<vscale x 16 x float> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -455,6 +477,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 1 x double> @llvm.riscv.vfsqrt.nxv1f64(
|
||||
<vscale x 1 x double>,
|
||||
<vscale x 1 x double>,
|
||||
iXLen);
|
||||
|
||||
|
@ -466,6 +489,7 @@ define <vscale x 1 x double> @intrinsic_vfsqrt_v_nxv1f64_nxv1f64(<vscale x 1 x d
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 1 x double> @llvm.riscv.vfsqrt.nxv1f64(
|
||||
<vscale x 1 x double> undef,
|
||||
<vscale x 1 x double> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -496,6 +520,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 2 x double> @llvm.riscv.vfsqrt.nxv2f64(
|
||||
<vscale x 2 x double>,
|
||||
<vscale x 2 x double>,
|
||||
iXLen);
|
||||
|
||||
|
@ -507,6 +532,7 @@ define <vscale x 2 x double> @intrinsic_vfsqrt_v_nxv2f64_nxv2f64(<vscale x 2 x d
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 2 x double> @llvm.riscv.vfsqrt.nxv2f64(
|
||||
<vscale x 2 x double> undef,
|
||||
<vscale x 2 x double> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -537,6 +563,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 4 x double> @llvm.riscv.vfsqrt.nxv4f64(
|
||||
<vscale x 4 x double>,
|
||||
<vscale x 4 x double>,
|
||||
iXLen);
|
||||
|
||||
|
@ -548,6 +575,7 @@ define <vscale x 4 x double> @intrinsic_vfsqrt_v_nxv4f64_nxv4f64(<vscale x 4 x d
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 4 x double> @llvm.riscv.vfsqrt.nxv4f64(
|
||||
<vscale x 4 x double> undef,
|
||||
<vscale x 4 x double> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -578,6 +606,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 8 x double> @llvm.riscv.vfsqrt.nxv8f64(
|
||||
<vscale x 8 x double>,
|
||||
<vscale x 8 x double>,
|
||||
iXLen);
|
||||
|
||||
|
@ -589,6 +618,7 @@ define <vscale x 8 x double> @intrinsic_vfsqrt_v_nxv8f64_nxv8f64(<vscale x 8 x d
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 8 x double> @llvm.riscv.vfsqrt.nxv8f64(
|
||||
<vscale x 8 x double> undef,
|
||||
<vscale x 8 x double> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
|
|
@ -4,6 +4,7 @@
|
|||
; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \
|
||||
; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s
|
||||
declare <vscale x 1 x float> @llvm.riscv.vfwcvt.f.f.v.nxv1f32.nxv1f16(
|
||||
<vscale x 1 x float>,
|
||||
<vscale x 1 x half>,
|
||||
iXLen);
|
||||
|
||||
|
@ -16,6 +17,7 @@ define <vscale x 1 x float> @intrinsic_vfwcvt_f.f.v_nxv1f32_nxv1f16(<vscale x 1
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 1 x float> @llvm.riscv.vfwcvt.f.f.v.nxv1f32.nxv1f16(
|
||||
<vscale x 1 x float> undef,
|
||||
<vscale x 1 x half> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -46,6 +48,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 2 x float> @llvm.riscv.vfwcvt.f.f.v.nxv2f32.nxv2f16(
|
||||
<vscale x 2 x float>,
|
||||
<vscale x 2 x half>,
|
||||
iXLen);
|
||||
|
||||
|
@ -58,6 +61,7 @@ define <vscale x 2 x float> @intrinsic_vfwcvt_f.f.v_nxv2f32_nxv2f16(<vscale x 2
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 2 x float> @llvm.riscv.vfwcvt.f.f.v.nxv2f32.nxv2f16(
|
||||
<vscale x 2 x float> undef,
|
||||
<vscale x 2 x half> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -88,6 +92,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 4 x float> @llvm.riscv.vfwcvt.f.f.v.nxv4f32.nxv4f16(
|
||||
<vscale x 4 x float>,
|
||||
<vscale x 4 x half>,
|
||||
iXLen);
|
||||
|
||||
|
@ -100,6 +105,7 @@ define <vscale x 4 x float> @intrinsic_vfwcvt_f.f.v_nxv4f32_nxv4f16(<vscale x 4
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 4 x float> @llvm.riscv.vfwcvt.f.f.v.nxv4f32.nxv4f16(
|
||||
<vscale x 4 x float> undef,
|
||||
<vscale x 4 x half> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -130,6 +136,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 8 x float> @llvm.riscv.vfwcvt.f.f.v.nxv8f32.nxv8f16(
|
||||
<vscale x 8 x float>,
|
||||
<vscale x 8 x half>,
|
||||
iXLen);
|
||||
|
||||
|
@ -142,6 +149,7 @@ define <vscale x 8 x float> @intrinsic_vfwcvt_f.f.v_nxv8f32_nxv8f16(<vscale x 8
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 8 x float> @llvm.riscv.vfwcvt.f.f.v.nxv8f32.nxv8f16(
|
||||
<vscale x 8 x float> undef,
|
||||
<vscale x 8 x half> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -172,6 +180,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 16 x float> @llvm.riscv.vfwcvt.f.f.v.nxv16f32.nxv16f16(
|
||||
<vscale x 16 x float>,
|
||||
<vscale x 16 x half>,
|
||||
iXLen);
|
||||
|
||||
|
@ -184,6 +193,7 @@ define <vscale x 16 x float> @intrinsic_vfwcvt_f.f.v_nxv16f32_nxv16f16(<vscale x
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 16 x float> @llvm.riscv.vfwcvt.f.f.v.nxv16f32.nxv16f16(
|
||||
<vscale x 16 x float> undef,
|
||||
<vscale x 16 x half> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -214,6 +224,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 1 x double> @llvm.riscv.vfwcvt.f.f.v.nxv1f64.nxv1f32(
|
||||
<vscale x 1 x double>,
|
||||
<vscale x 1 x float>,
|
||||
iXLen);
|
||||
|
||||
|
@ -226,6 +237,7 @@ define <vscale x 1 x double> @intrinsic_vfwcvt_f.f.v_nxv1f64_nxv1f32(<vscale x 1
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 1 x double> @llvm.riscv.vfwcvt.f.f.v.nxv1f64.nxv1f32(
|
||||
<vscale x 1 x double> undef,
|
||||
<vscale x 1 x float> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -256,6 +268,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 2 x double> @llvm.riscv.vfwcvt.f.f.v.nxv2f64.nxv2f32(
|
||||
<vscale x 2 x double>,
|
||||
<vscale x 2 x float>,
|
||||
iXLen);
|
||||
|
||||
|
@ -268,6 +281,7 @@ define <vscale x 2 x double> @intrinsic_vfwcvt_f.f.v_nxv2f64_nxv2f32(<vscale x 2
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 2 x double> @llvm.riscv.vfwcvt.f.f.v.nxv2f64.nxv2f32(
|
||||
<vscale x 2 x double> undef,
|
||||
<vscale x 2 x float> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -298,6 +312,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 4 x double> @llvm.riscv.vfwcvt.f.f.v.nxv4f64.nxv4f32(
|
||||
<vscale x 4 x double>,
|
||||
<vscale x 4 x float>,
|
||||
iXLen);
|
||||
|
||||
|
@ -310,6 +325,7 @@ define <vscale x 4 x double> @intrinsic_vfwcvt_f.f.v_nxv4f64_nxv4f32(<vscale x 4
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 4 x double> @llvm.riscv.vfwcvt.f.f.v.nxv4f64.nxv4f32(
|
||||
<vscale x 4 x double> undef,
|
||||
<vscale x 4 x float> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -340,6 +356,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 8 x double> @llvm.riscv.vfwcvt.f.f.v.nxv8f64.nxv8f32(
|
||||
<vscale x 8 x double>,
|
||||
<vscale x 8 x float>,
|
||||
iXLen);
|
||||
|
||||
|
@ -352,6 +369,7 @@ define <vscale x 8 x double> @intrinsic_vfwcvt_f.f.v_nxv8f64_nxv8f32(<vscale x 8
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 8 x double> @llvm.riscv.vfwcvt.f.f.v.nxv8f64.nxv8f32(
|
||||
<vscale x 8 x double> undef,
|
||||
<vscale x 8 x float> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
|
|
@ -4,6 +4,7 @@
|
|||
; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \
|
||||
; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s
|
||||
declare <vscale x 1 x half> @llvm.riscv.vfwcvt.f.x.v.nxv1f16.nxv1i8(
|
||||
<vscale x 1 x half>,
|
||||
<vscale x 1 x i8>,
|
||||
iXLen);
|
||||
|
||||
|
@ -16,6 +17,7 @@ define <vscale x 1 x half> @intrinsic_vfwcvt_f.x.v_nxv1f16_nxv1i8(<vscale x 1 x
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 1 x half> @llvm.riscv.vfwcvt.f.x.v.nxv1f16.nxv1i8(
|
||||
<vscale x 1 x half> undef,
|
||||
<vscale x 1 x i8> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -46,6 +48,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 2 x half> @llvm.riscv.vfwcvt.f.x.v.nxv2f16.nxv2i8(
|
||||
<vscale x 2 x half>,
|
||||
<vscale x 2 x i8>,
|
||||
iXLen);
|
||||
|
||||
|
@ -58,6 +61,7 @@ define <vscale x 2 x half> @intrinsic_vfwcvt_f.x.v_nxv2f16_nxv2i8(<vscale x 2 x
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 2 x half> @llvm.riscv.vfwcvt.f.x.v.nxv2f16.nxv2i8(
|
||||
<vscale x 2 x half> undef,
|
||||
<vscale x 2 x i8> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -88,6 +92,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 4 x half> @llvm.riscv.vfwcvt.f.x.v.nxv4f16.nxv4i8(
|
||||
<vscale x 4 x half>,
|
||||
<vscale x 4 x i8>,
|
||||
iXLen);
|
||||
|
||||
|
@ -100,6 +105,7 @@ define <vscale x 4 x half> @intrinsic_vfwcvt_f.x.v_nxv4f16_nxv4i8(<vscale x 4 x
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 4 x half> @llvm.riscv.vfwcvt.f.x.v.nxv4f16.nxv4i8(
|
||||
<vscale x 4 x half> undef,
|
||||
<vscale x 4 x i8> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -130,6 +136,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 8 x half> @llvm.riscv.vfwcvt.f.x.v.nxv8f16.nxv8i8(
|
||||
<vscale x 8 x half>,
|
||||
<vscale x 8 x i8>,
|
||||
iXLen);
|
||||
|
||||
|
@ -142,6 +149,7 @@ define <vscale x 8 x half> @intrinsic_vfwcvt_f.x.v_nxv8f16_nxv8i8(<vscale x 8 x
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 8 x half> @llvm.riscv.vfwcvt.f.x.v.nxv8f16.nxv8i8(
|
||||
<vscale x 8 x half> undef,
|
||||
<vscale x 8 x i8> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -172,6 +180,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 16 x half> @llvm.riscv.vfwcvt.f.x.v.nxv16f16.nxv16i8(
|
||||
<vscale x 16 x half>,
|
||||
<vscale x 16 x i8>,
|
||||
iXLen);
|
||||
|
||||
|
@ -184,6 +193,7 @@ define <vscale x 16 x half> @intrinsic_vfwcvt_f.x.v_nxv16f16_nxv16i8(<vscale x 1
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 16 x half> @llvm.riscv.vfwcvt.f.x.v.nxv16f16.nxv16i8(
|
||||
<vscale x 16 x half> undef,
|
||||
<vscale x 16 x i8> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -214,6 +224,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 32 x half> @llvm.riscv.vfwcvt.f.x.v.nxv32f16.nxv32i8(
|
||||
<vscale x 32 x half>,
|
||||
<vscale x 32 x i8>,
|
||||
iXLen);
|
||||
|
||||
|
@ -226,6 +237,7 @@ define <vscale x 32 x half> @intrinsic_vfwcvt_f.x.v_nxv32f16_nxv32i8(<vscale x 3
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 32 x half> @llvm.riscv.vfwcvt.f.x.v.nxv32f16.nxv32i8(
|
||||
<vscale x 32 x half> undef,
|
||||
<vscale x 32 x i8> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -256,6 +268,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 1 x float> @llvm.riscv.vfwcvt.f.x.v.nxv1f32.nxv1i16(
|
||||
<vscale x 1 x float>,
|
||||
<vscale x 1 x i16>,
|
||||
iXLen);
|
||||
|
||||
|
@ -268,6 +281,7 @@ define <vscale x 1 x float> @intrinsic_vfwcvt_f.x.v_nxv1f32_nxv1i16(<vscale x 1
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 1 x float> @llvm.riscv.vfwcvt.f.x.v.nxv1f32.nxv1i16(
|
||||
<vscale x 1 x float> undef,
|
||||
<vscale x 1 x i16> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -298,6 +312,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 2 x float> @llvm.riscv.vfwcvt.f.x.v.nxv2f32.nxv2i16(
|
||||
<vscale x 2 x float>,
|
||||
<vscale x 2 x i16>,
|
||||
iXLen);
|
||||
|
||||
|
@ -310,6 +325,7 @@ define <vscale x 2 x float> @intrinsic_vfwcvt_f.x.v_nxv2f32_nxv2i16(<vscale x 2
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 2 x float> @llvm.riscv.vfwcvt.f.x.v.nxv2f32.nxv2i16(
|
||||
<vscale x 2 x float> undef,
|
||||
<vscale x 2 x i16> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -340,6 +356,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 4 x float> @llvm.riscv.vfwcvt.f.x.v.nxv4f32.nxv4i16(
|
||||
<vscale x 4 x float>,
|
||||
<vscale x 4 x i16>,
|
||||
iXLen);
|
||||
|
||||
|
@ -352,6 +369,7 @@ define <vscale x 4 x float> @intrinsic_vfwcvt_f.x.v_nxv4f32_nxv4i16(<vscale x 4
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 4 x float> @llvm.riscv.vfwcvt.f.x.v.nxv4f32.nxv4i16(
|
||||
<vscale x 4 x float> undef,
|
||||
<vscale x 4 x i16> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -382,6 +400,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 8 x float> @llvm.riscv.vfwcvt.f.x.v.nxv8f32.nxv8i16(
|
||||
<vscale x 8 x float>,
|
||||
<vscale x 8 x i16>,
|
||||
iXLen);
|
||||
|
||||
|
@ -394,6 +413,7 @@ define <vscale x 8 x float> @intrinsic_vfwcvt_f.x.v_nxv8f32_nxv8i16(<vscale x 8
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 8 x float> @llvm.riscv.vfwcvt.f.x.v.nxv8f32.nxv8i16(
|
||||
<vscale x 8 x float> undef,
|
||||
<vscale x 8 x i16> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -424,6 +444,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 16 x float> @llvm.riscv.vfwcvt.f.x.v.nxv16f32.nxv16i16(
|
||||
<vscale x 16 x float>,
|
||||
<vscale x 16 x i16>,
|
||||
iXLen);
|
||||
|
||||
|
@ -436,6 +457,7 @@ define <vscale x 16 x float> @intrinsic_vfwcvt_f.x.v_nxv16f32_nxv16i16(<vscale x
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 16 x float> @llvm.riscv.vfwcvt.f.x.v.nxv16f32.nxv16i16(
|
||||
<vscale x 16 x float> undef,
|
||||
<vscale x 16 x i16> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -466,6 +488,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 1 x double> @llvm.riscv.vfwcvt.f.x.v.nxv1f64.nxv1i32(
|
||||
<vscale x 1 x double>,
|
||||
<vscale x 1 x i32>,
|
||||
iXLen);
|
||||
|
||||
|
@ -478,6 +501,7 @@ define <vscale x 1 x double> @intrinsic_vfwcvt_f.x.v_nxv1f64_nxv1i32(<vscale x 1
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 1 x double> @llvm.riscv.vfwcvt.f.x.v.nxv1f64.nxv1i32(
|
||||
<vscale x 1 x double> undef,
|
||||
<vscale x 1 x i32> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -508,6 +532,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 2 x double> @llvm.riscv.vfwcvt.f.x.v.nxv2f64.nxv2i32(
|
||||
<vscale x 2 x double>,
|
||||
<vscale x 2 x i32>,
|
||||
iXLen);
|
||||
|
||||
|
@ -520,6 +545,7 @@ define <vscale x 2 x double> @intrinsic_vfwcvt_f.x.v_nxv2f64_nxv2i32(<vscale x 2
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 2 x double> @llvm.riscv.vfwcvt.f.x.v.nxv2f64.nxv2i32(
|
||||
<vscale x 2 x double> undef,
|
||||
<vscale x 2 x i32> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -550,6 +576,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 4 x double> @llvm.riscv.vfwcvt.f.x.v.nxv4f64.nxv4i32(
|
||||
<vscale x 4 x double>,
|
||||
<vscale x 4 x i32>,
|
||||
iXLen);
|
||||
|
||||
|
@ -562,6 +589,7 @@ define <vscale x 4 x double> @intrinsic_vfwcvt_f.x.v_nxv4f64_nxv4i32(<vscale x 4
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 4 x double> @llvm.riscv.vfwcvt.f.x.v.nxv4f64.nxv4i32(
|
||||
<vscale x 4 x double> undef,
|
||||
<vscale x 4 x i32> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -592,6 +620,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 8 x double> @llvm.riscv.vfwcvt.f.x.v.nxv8f64.nxv8i32(
|
||||
<vscale x 8 x double>,
|
||||
<vscale x 8 x i32>,
|
||||
iXLen);
|
||||
|
||||
|
@ -604,6 +633,7 @@ define <vscale x 8 x double> @intrinsic_vfwcvt_f.x.v_nxv8f64_nxv8i32(<vscale x 8
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 8 x double> @llvm.riscv.vfwcvt.f.x.v.nxv8f64.nxv8i32(
|
||||
<vscale x 8 x double> undef,
|
||||
<vscale x 8 x i32> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
|
|
@ -4,6 +4,7 @@
|
|||
; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \
|
||||
; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s
|
||||
declare <vscale x 1 x half> @llvm.riscv.vfwcvt.f.xu.v.nxv1f16.nxv1i8(
|
||||
<vscale x 1 x half>,
|
||||
<vscale x 1 x i8>,
|
||||
iXLen);
|
||||
|
||||
|
@ -16,6 +17,7 @@ define <vscale x 1 x half> @intrinsic_vfwcvt_f.xu.v_nxv1f16_nxv1i8(<vscale x 1 x
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 1 x half> @llvm.riscv.vfwcvt.f.xu.v.nxv1f16.nxv1i8(
|
||||
<vscale x 1 x half> undef,
|
||||
<vscale x 1 x i8> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -46,6 +48,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 2 x half> @llvm.riscv.vfwcvt.f.xu.v.nxv2f16.nxv2i8(
|
||||
<vscale x 2 x half>,
|
||||
<vscale x 2 x i8>,
|
||||
iXLen);
|
||||
|
||||
|
@ -58,6 +61,7 @@ define <vscale x 2 x half> @intrinsic_vfwcvt_f.xu.v_nxv2f16_nxv2i8(<vscale x 2 x
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 2 x half> @llvm.riscv.vfwcvt.f.xu.v.nxv2f16.nxv2i8(
|
||||
<vscale x 2 x half> undef,
|
||||
<vscale x 2 x i8> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -88,6 +92,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 4 x half> @llvm.riscv.vfwcvt.f.xu.v.nxv4f16.nxv4i8(
|
||||
<vscale x 4 x half>,
|
||||
<vscale x 4 x i8>,
|
||||
iXLen);
|
||||
|
||||
|
@ -100,6 +105,7 @@ define <vscale x 4 x half> @intrinsic_vfwcvt_f.xu.v_nxv4f16_nxv4i8(<vscale x 4 x
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 4 x half> @llvm.riscv.vfwcvt.f.xu.v.nxv4f16.nxv4i8(
|
||||
<vscale x 4 x half> undef,
|
||||
<vscale x 4 x i8> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -130,6 +136,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 8 x half> @llvm.riscv.vfwcvt.f.xu.v.nxv8f16.nxv8i8(
|
||||
<vscale x 8 x half>,
|
||||
<vscale x 8 x i8>,
|
||||
iXLen);
|
||||
|
||||
|
@ -142,6 +149,7 @@ define <vscale x 8 x half> @intrinsic_vfwcvt_f.xu.v_nxv8f16_nxv8i8(<vscale x 8 x
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 8 x half> @llvm.riscv.vfwcvt.f.xu.v.nxv8f16.nxv8i8(
|
||||
<vscale x 8 x half> undef,
|
||||
<vscale x 8 x i8> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -172,6 +180,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 16 x half> @llvm.riscv.vfwcvt.f.xu.v.nxv16f16.nxv16i8(
|
||||
<vscale x 16 x half>,
|
||||
<vscale x 16 x i8>,
|
||||
iXLen);
|
||||
|
||||
|
@ -184,6 +193,7 @@ define <vscale x 16 x half> @intrinsic_vfwcvt_f.xu.v_nxv16f16_nxv16i8(<vscale x
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 16 x half> @llvm.riscv.vfwcvt.f.xu.v.nxv16f16.nxv16i8(
|
||||
<vscale x 16 x half> undef,
|
||||
<vscale x 16 x i8> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -214,6 +224,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 32 x half> @llvm.riscv.vfwcvt.f.xu.v.nxv32f16.nxv32i8(
|
||||
<vscale x 32 x half>,
|
||||
<vscale x 32 x i8>,
|
||||
iXLen);
|
||||
|
||||
|
@ -226,6 +237,7 @@ define <vscale x 32 x half> @intrinsic_vfwcvt_f.xu.v_nxv32f16_nxv32i8(<vscale x
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 32 x half> @llvm.riscv.vfwcvt.f.xu.v.nxv32f16.nxv32i8(
|
||||
<vscale x 32 x half> undef,
|
||||
<vscale x 32 x i8> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -256,6 +268,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 1 x float> @llvm.riscv.vfwcvt.f.xu.v.nxv1f32.nxv1i16(
|
||||
<vscale x 1 x float>,
|
||||
<vscale x 1 x i16>,
|
||||
iXLen);
|
||||
|
||||
|
@ -268,6 +281,7 @@ define <vscale x 1 x float> @intrinsic_vfwcvt_f.xu.v_nxv1f32_nxv1i16(<vscale x 1
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 1 x float> @llvm.riscv.vfwcvt.f.xu.v.nxv1f32.nxv1i16(
|
||||
<vscale x 1 x float> undef,
|
||||
<vscale x 1 x i16> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -298,6 +312,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 2 x float> @llvm.riscv.vfwcvt.f.xu.v.nxv2f32.nxv2i16(
|
||||
<vscale x 2 x float>,
|
||||
<vscale x 2 x i16>,
|
||||
iXLen);
|
||||
|
||||
|
@ -310,6 +325,7 @@ define <vscale x 2 x float> @intrinsic_vfwcvt_f.xu.v_nxv2f32_nxv2i16(<vscale x 2
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 2 x float> @llvm.riscv.vfwcvt.f.xu.v.nxv2f32.nxv2i16(
|
||||
<vscale x 2 x float> undef,
|
||||
<vscale x 2 x i16> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -340,6 +356,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 4 x float> @llvm.riscv.vfwcvt.f.xu.v.nxv4f32.nxv4i16(
|
||||
<vscale x 4 x float>,
|
||||
<vscale x 4 x i16>,
|
||||
iXLen);
|
||||
|
||||
|
@ -352,6 +369,7 @@ define <vscale x 4 x float> @intrinsic_vfwcvt_f.xu.v_nxv4f32_nxv4i16(<vscale x 4
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 4 x float> @llvm.riscv.vfwcvt.f.xu.v.nxv4f32.nxv4i16(
|
||||
<vscale x 4 x float> undef,
|
||||
<vscale x 4 x i16> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -382,6 +400,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 8 x float> @llvm.riscv.vfwcvt.f.xu.v.nxv8f32.nxv8i16(
|
||||
<vscale x 8 x float>,
|
||||
<vscale x 8 x i16>,
|
||||
iXLen);
|
||||
|
||||
|
@ -394,6 +413,7 @@ define <vscale x 8 x float> @intrinsic_vfwcvt_f.xu.v_nxv8f32_nxv8i16(<vscale x 8
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 8 x float> @llvm.riscv.vfwcvt.f.xu.v.nxv8f32.nxv8i16(
|
||||
<vscale x 8 x float> undef,
|
||||
<vscale x 8 x i16> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -424,6 +444,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 16 x float> @llvm.riscv.vfwcvt.f.xu.v.nxv16f32.nxv16i16(
|
||||
<vscale x 16 x float>,
|
||||
<vscale x 16 x i16>,
|
||||
iXLen);
|
||||
|
||||
|
@ -436,6 +457,7 @@ define <vscale x 16 x float> @intrinsic_vfwcvt_f.xu.v_nxv16f32_nxv16i16(<vscale
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 16 x float> @llvm.riscv.vfwcvt.f.xu.v.nxv16f32.nxv16i16(
|
||||
<vscale x 16 x float> undef,
|
||||
<vscale x 16 x i16> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -466,6 +488,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 1 x double> @llvm.riscv.vfwcvt.f.xu.v.nxv1f64.nxv1i32(
|
||||
<vscale x 1 x double>,
|
||||
<vscale x 1 x i32>,
|
||||
iXLen);
|
||||
|
||||
|
@ -478,6 +501,7 @@ define <vscale x 1 x double> @intrinsic_vfwcvt_f.xu.v_nxv1f64_nxv1i32(<vscale x
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 1 x double> @llvm.riscv.vfwcvt.f.xu.v.nxv1f64.nxv1i32(
|
||||
<vscale x 1 x double> undef,
|
||||
<vscale x 1 x i32> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -508,6 +532,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 2 x double> @llvm.riscv.vfwcvt.f.xu.v.nxv2f64.nxv2i32(
|
||||
<vscale x 2 x double>,
|
||||
<vscale x 2 x i32>,
|
||||
iXLen);
|
||||
|
||||
|
@ -520,6 +545,7 @@ define <vscale x 2 x double> @intrinsic_vfwcvt_f.xu.v_nxv2f64_nxv2i32(<vscale x
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 2 x double> @llvm.riscv.vfwcvt.f.xu.v.nxv2f64.nxv2i32(
|
||||
<vscale x 2 x double> undef,
|
||||
<vscale x 2 x i32> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -550,6 +576,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 4 x double> @llvm.riscv.vfwcvt.f.xu.v.nxv4f64.nxv4i32(
|
||||
<vscale x 4 x double>,
|
||||
<vscale x 4 x i32>,
|
||||
iXLen);
|
||||
|
||||
|
@ -562,6 +589,7 @@ define <vscale x 4 x double> @intrinsic_vfwcvt_f.xu.v_nxv4f64_nxv4i32(<vscale x
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 4 x double> @llvm.riscv.vfwcvt.f.xu.v.nxv4f64.nxv4i32(
|
||||
<vscale x 4 x double> undef,
|
||||
<vscale x 4 x i32> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -592,6 +620,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 8 x double> @llvm.riscv.vfwcvt.f.xu.v.nxv8f64.nxv8i32(
|
||||
<vscale x 8 x double>,
|
||||
<vscale x 8 x i32>,
|
||||
iXLen);
|
||||
|
||||
|
@ -604,6 +633,7 @@ define <vscale x 8 x double> @intrinsic_vfwcvt_f.xu.v_nxv8f64_nxv8i32(<vscale x
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 8 x double> @llvm.riscv.vfwcvt.f.xu.v.nxv8f64.nxv8i32(
|
||||
<vscale x 8 x double> undef,
|
||||
<vscale x 8 x i32> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
|
|
@ -4,6 +4,7 @@
|
|||
; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \
|
||||
; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s
|
||||
declare <vscale x 1 x i32> @llvm.riscv.vfwcvt.rtz.x.f.v.nxv1i32.nxv1f16(
|
||||
<vscale x 1 x i32>,
|
||||
<vscale x 1 x half>,
|
||||
iXLen);
|
||||
|
||||
|
@ -16,6 +17,7 @@ define <vscale x 1 x i32> @intrinsic_vfwcvt_rtz.x.f.v_nxv1i32_nxv1f16(<vscale x
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 1 x i32> @llvm.riscv.vfwcvt.rtz.x.f.v.nxv1i32.nxv1f16(
|
||||
<vscale x 1 x i32> undef,
|
||||
<vscale x 1 x half> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -46,6 +48,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 2 x i32> @llvm.riscv.vfwcvt.rtz.x.f.v.nxv2i32.nxv2f16(
|
||||
<vscale x 2 x i32>,
|
||||
<vscale x 2 x half>,
|
||||
iXLen);
|
||||
|
||||
|
@ -58,6 +61,7 @@ define <vscale x 2 x i32> @intrinsic_vfwcvt_rtz.x.f.v_nxv2i32_nxv2f16(<vscale x
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 2 x i32> @llvm.riscv.vfwcvt.rtz.x.f.v.nxv2i32.nxv2f16(
|
||||
<vscale x 2 x i32> undef,
|
||||
<vscale x 2 x half> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -88,6 +92,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 4 x i32> @llvm.riscv.vfwcvt.rtz.x.f.v.nxv4i32.nxv4f16(
|
||||
<vscale x 4 x i32>,
|
||||
<vscale x 4 x half>,
|
||||
iXLen);
|
||||
|
||||
|
@ -100,6 +105,7 @@ define <vscale x 4 x i32> @intrinsic_vfwcvt_rtz.x.f.v_nxv4i32_nxv4f16(<vscale x
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 4 x i32> @llvm.riscv.vfwcvt.rtz.x.f.v.nxv4i32.nxv4f16(
|
||||
<vscale x 4 x i32> undef,
|
||||
<vscale x 4 x half> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -130,6 +136,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 8 x i32> @llvm.riscv.vfwcvt.rtz.x.f.v.nxv8i32.nxv8f16(
|
||||
<vscale x 8 x i32>,
|
||||
<vscale x 8 x half>,
|
||||
iXLen);
|
||||
|
||||
|
@ -142,6 +149,7 @@ define <vscale x 8 x i32> @intrinsic_vfwcvt_rtz.x.f.v_nxv8i32_nxv8f16(<vscale x
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 8 x i32> @llvm.riscv.vfwcvt.rtz.x.f.v.nxv8i32.nxv8f16(
|
||||
<vscale x 8 x i32> undef,
|
||||
<vscale x 8 x half> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -172,6 +180,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 16 x i32> @llvm.riscv.vfwcvt.rtz.x.f.v.nxv16i32.nxv16f16(
|
||||
<vscale x 16 x i32>,
|
||||
<vscale x 16 x half>,
|
||||
iXLen);
|
||||
|
||||
|
@ -184,6 +193,7 @@ define <vscale x 16 x i32> @intrinsic_vfwcvt_rtz.x.f.v_nxv16i32_nxv16f16(<vscale
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 16 x i32> @llvm.riscv.vfwcvt.rtz.x.f.v.nxv16i32.nxv16f16(
|
||||
<vscale x 16 x i32> undef,
|
||||
<vscale x 16 x half> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -214,6 +224,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 1 x i64> @llvm.riscv.vfwcvt.rtz.x.f.v.nxv1i64.nxv1f32(
|
||||
<vscale x 1 x i64>,
|
||||
<vscale x 1 x float>,
|
||||
iXLen);
|
||||
|
||||
|
@ -226,6 +237,7 @@ define <vscale x 1 x i64> @intrinsic_vfwcvt_rtz.x.f.v_nxv1i64_nxv1f32(<vscale x
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 1 x i64> @llvm.riscv.vfwcvt.rtz.x.f.v.nxv1i64.nxv1f32(
|
||||
<vscale x 1 x i64> undef,
|
||||
<vscale x 1 x float> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -256,6 +268,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 2 x i64> @llvm.riscv.vfwcvt.rtz.x.f.v.nxv2i64.nxv2f32(
|
||||
<vscale x 2 x i64>,
|
||||
<vscale x 2 x float>,
|
||||
iXLen);
|
||||
|
||||
|
@ -268,6 +281,7 @@ define <vscale x 2 x i64> @intrinsic_vfwcvt_rtz.x.f.v_nxv2i64_nxv2f32(<vscale x
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 2 x i64> @llvm.riscv.vfwcvt.rtz.x.f.v.nxv2i64.nxv2f32(
|
||||
<vscale x 2 x i64> undef,
|
||||
<vscale x 2 x float> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -298,6 +312,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 4 x i64> @llvm.riscv.vfwcvt.rtz.x.f.v.nxv4i64.nxv4f32(
|
||||
<vscale x 4 x i64>,
|
||||
<vscale x 4 x float>,
|
||||
iXLen);
|
||||
|
||||
|
@ -310,6 +325,7 @@ define <vscale x 4 x i64> @intrinsic_vfwcvt_rtz.x.f.v_nxv4i64_nxv4f32(<vscale x
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 4 x i64> @llvm.riscv.vfwcvt.rtz.x.f.v.nxv4i64.nxv4f32(
|
||||
<vscale x 4 x i64> undef,
|
||||
<vscale x 4 x float> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -340,6 +356,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 8 x i64> @llvm.riscv.vfwcvt.rtz.x.f.v.nxv8i64.nxv8f32(
|
||||
<vscale x 8 x i64>,
|
||||
<vscale x 8 x float>,
|
||||
iXLen);
|
||||
|
||||
|
@ -352,6 +369,7 @@ define <vscale x 8 x i64> @intrinsic_vfwcvt_rtz.x.f.v_nxv8i64_nxv8f32(<vscale x
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 8 x i64> @llvm.riscv.vfwcvt.rtz.x.f.v.nxv8i64.nxv8f32(
|
||||
<vscale x 8 x i64> undef,
|
||||
<vscale x 8 x float> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
|
|
@ -4,6 +4,7 @@
|
|||
; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \
|
||||
; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s
|
||||
declare <vscale x 1 x i32> @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv1i32.nxv1f16(
|
||||
<vscale x 1 x i32>,
|
||||
<vscale x 1 x half>,
|
||||
iXLen);
|
||||
|
||||
|
@ -16,6 +17,7 @@ define <vscale x 1 x i32> @intrinsic_vfwcvt_rtz.xu.f.v_nxv1i32_nxv1f16(<vscale x
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 1 x i32> @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv1i32.nxv1f16(
|
||||
<vscale x 1 x i32> undef,
|
||||
<vscale x 1 x half> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -46,6 +48,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 2 x i32> @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv2i32.nxv2f16(
|
||||
<vscale x 2 x i32>,
|
||||
<vscale x 2 x half>,
|
||||
iXLen);
|
||||
|
||||
|
@ -58,6 +61,7 @@ define <vscale x 2 x i32> @intrinsic_vfwcvt_rtz.xu.f.v_nxv2i32_nxv2f16(<vscale x
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 2 x i32> @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv2i32.nxv2f16(
|
||||
<vscale x 2 x i32> undef,
|
||||
<vscale x 2 x half> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -88,6 +92,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 4 x i32> @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv4i32.nxv4f16(
|
||||
<vscale x 4 x i32>,
|
||||
<vscale x 4 x half>,
|
||||
iXLen);
|
||||
|
||||
|
@ -100,6 +105,7 @@ define <vscale x 4 x i32> @intrinsic_vfwcvt_rtz.xu.f.v_nxv4i32_nxv4f16(<vscale x
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 4 x i32> @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv4i32.nxv4f16(
|
||||
<vscale x 4 x i32> undef,
|
||||
<vscale x 4 x half> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -130,6 +136,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 8 x i32> @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv8i32.nxv8f16(
|
||||
<vscale x 8 x i32>,
|
||||
<vscale x 8 x half>,
|
||||
iXLen);
|
||||
|
||||
|
@ -142,6 +149,7 @@ define <vscale x 8 x i32> @intrinsic_vfwcvt_rtz.xu.f.v_nxv8i32_nxv8f16(<vscale x
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 8 x i32> @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv8i32.nxv8f16(
|
||||
<vscale x 8 x i32> undef,
|
||||
<vscale x 8 x half> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -172,6 +180,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 16 x i32> @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv16i32.nxv16f16(
|
||||
<vscale x 16 x i32>,
|
||||
<vscale x 16 x half>,
|
||||
iXLen);
|
||||
|
||||
|
@ -184,6 +193,7 @@ define <vscale x 16 x i32> @intrinsic_vfwcvt_rtz.xu.f.v_nxv16i32_nxv16f16(<vscal
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 16 x i32> @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv16i32.nxv16f16(
|
||||
<vscale x 16 x i32> undef,
|
||||
<vscale x 16 x half> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -214,6 +224,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 1 x i64> @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv1i64.nxv1f32(
|
||||
<vscale x 1 x i64>,
|
||||
<vscale x 1 x float>,
|
||||
iXLen);
|
||||
|
||||
|
@ -226,6 +237,7 @@ define <vscale x 1 x i64> @intrinsic_vfwcvt_rtz.xu.f.v_nxv1i64_nxv1f32(<vscale x
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 1 x i64> @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv1i64.nxv1f32(
|
||||
<vscale x 1 x i64> undef,
|
||||
<vscale x 1 x float> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -256,6 +268,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 2 x i64> @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv2i64.nxv2f32(
|
||||
<vscale x 2 x i64>,
|
||||
<vscale x 2 x float>,
|
||||
iXLen);
|
||||
|
||||
|
@ -268,6 +281,7 @@ define <vscale x 2 x i64> @intrinsic_vfwcvt_rtz.xu.f.v_nxv2i64_nxv2f32(<vscale x
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 2 x i64> @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv2i64.nxv2f32(
|
||||
<vscale x 2 x i64> undef,
|
||||
<vscale x 2 x float> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -298,6 +312,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 4 x i64> @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv4i64.nxv4f32(
|
||||
<vscale x 4 x i64>,
|
||||
<vscale x 4 x float>,
|
||||
iXLen);
|
||||
|
||||
|
@ -310,6 +325,7 @@ define <vscale x 4 x i64> @intrinsic_vfwcvt_rtz.xu.f.v_nxv4i64_nxv4f32(<vscale x
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 4 x i64> @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv4i64.nxv4f32(
|
||||
<vscale x 4 x i64> undef,
|
||||
<vscale x 4 x float> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -340,6 +356,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 8 x i64> @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv8i64.nxv8f32(
|
||||
<vscale x 8 x i64>,
|
||||
<vscale x 8 x float>,
|
||||
iXLen);
|
||||
|
||||
|
@ -352,6 +369,7 @@ define <vscale x 8 x i64> @intrinsic_vfwcvt_rtz.xu.f.v_nxv8i64_nxv8f32(<vscale x
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 8 x i64> @llvm.riscv.vfwcvt.rtz.xu.f.v.nxv8i64.nxv8f32(
|
||||
<vscale x 8 x i64> undef,
|
||||
<vscale x 8 x float> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
|
|
@ -4,6 +4,7 @@
|
|||
; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \
|
||||
; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s
|
||||
declare <vscale x 1 x i32> @llvm.riscv.vfwcvt.x.f.v.nxv1i32.nxv1f16(
|
||||
<vscale x 1 x i32>,
|
||||
<vscale x 1 x half>,
|
||||
iXLen);
|
||||
|
||||
|
@ -16,6 +17,7 @@ define <vscale x 1 x i32> @intrinsic_vfwcvt_x.f.v_nxv1i32_nxv1f16(<vscale x 1 x
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 1 x i32> @llvm.riscv.vfwcvt.x.f.v.nxv1i32.nxv1f16(
|
||||
<vscale x 1 x i32> undef,
|
||||
<vscale x 1 x half> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -46,6 +48,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 2 x i32> @llvm.riscv.vfwcvt.x.f.v.nxv2i32.nxv2f16(
|
||||
<vscale x 2 x i32>,
|
||||
<vscale x 2 x half>,
|
||||
iXLen);
|
||||
|
||||
|
@ -58,6 +61,7 @@ define <vscale x 2 x i32> @intrinsic_vfwcvt_x.f.v_nxv2i32_nxv2f16(<vscale x 2 x
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 2 x i32> @llvm.riscv.vfwcvt.x.f.v.nxv2i32.nxv2f16(
|
||||
<vscale x 2 x i32> undef,
|
||||
<vscale x 2 x half> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -88,6 +92,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 4 x i32> @llvm.riscv.vfwcvt.x.f.v.nxv4i32.nxv4f16(
|
||||
<vscale x 4 x i32>,
|
||||
<vscale x 4 x half>,
|
||||
iXLen);
|
||||
|
||||
|
@ -100,6 +105,7 @@ define <vscale x 4 x i32> @intrinsic_vfwcvt_x.f.v_nxv4i32_nxv4f16(<vscale x 4 x
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 4 x i32> @llvm.riscv.vfwcvt.x.f.v.nxv4i32.nxv4f16(
|
||||
<vscale x 4 x i32> undef,
|
||||
<vscale x 4 x half> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -130,6 +136,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 8 x i32> @llvm.riscv.vfwcvt.x.f.v.nxv8i32.nxv8f16(
|
||||
<vscale x 8 x i32>,
|
||||
<vscale x 8 x half>,
|
||||
iXLen);
|
||||
|
||||
|
@ -142,6 +149,7 @@ define <vscale x 8 x i32> @intrinsic_vfwcvt_x.f.v_nxv8i32_nxv8f16(<vscale x 8 x
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 8 x i32> @llvm.riscv.vfwcvt.x.f.v.nxv8i32.nxv8f16(
|
||||
<vscale x 8 x i32> undef,
|
||||
<vscale x 8 x half> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -172,6 +180,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 16 x i32> @llvm.riscv.vfwcvt.x.f.v.nxv16i32.nxv16f16(
|
||||
<vscale x 16 x i32>,
|
||||
<vscale x 16 x half>,
|
||||
iXLen);
|
||||
|
||||
|
@ -184,6 +193,7 @@ define <vscale x 16 x i32> @intrinsic_vfwcvt_x.f.v_nxv16i32_nxv16f16(<vscale x 1
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 16 x i32> @llvm.riscv.vfwcvt.x.f.v.nxv16i32.nxv16f16(
|
||||
<vscale x 16 x i32> undef,
|
||||
<vscale x 16 x half> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -214,6 +224,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 1 x i64> @llvm.riscv.vfwcvt.x.f.v.nxv1i64.nxv1f32(
|
||||
<vscale x 1 x i64>,
|
||||
<vscale x 1 x float>,
|
||||
iXLen);
|
||||
|
||||
|
@ -226,6 +237,7 @@ define <vscale x 1 x i64> @intrinsic_vfwcvt_x.f.v_nxv1i64_nxv1f32(<vscale x 1 x
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 1 x i64> @llvm.riscv.vfwcvt.x.f.v.nxv1i64.nxv1f32(
|
||||
<vscale x 1 x i64> undef,
|
||||
<vscale x 1 x float> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -256,6 +268,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 2 x i64> @llvm.riscv.vfwcvt.x.f.v.nxv2i64.nxv2f32(
|
||||
<vscale x 2 x i64>,
|
||||
<vscale x 2 x float>,
|
||||
iXLen);
|
||||
|
||||
|
@ -268,6 +281,7 @@ define <vscale x 2 x i64> @intrinsic_vfwcvt_x.f.v_nxv2i64_nxv2f32(<vscale x 2 x
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 2 x i64> @llvm.riscv.vfwcvt.x.f.v.nxv2i64.nxv2f32(
|
||||
<vscale x 2 x i64> undef,
|
||||
<vscale x 2 x float> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -298,6 +312,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 4 x i64> @llvm.riscv.vfwcvt.x.f.v.nxv4i64.nxv4f32(
|
||||
<vscale x 4 x i64>,
|
||||
<vscale x 4 x float>,
|
||||
iXLen);
|
||||
|
||||
|
@ -310,6 +325,7 @@ define <vscale x 4 x i64> @intrinsic_vfwcvt_x.f.v_nxv4i64_nxv4f32(<vscale x 4 x
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 4 x i64> @llvm.riscv.vfwcvt.x.f.v.nxv4i64.nxv4f32(
|
||||
<vscale x 4 x i64> undef,
|
||||
<vscale x 4 x float> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -340,6 +356,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 8 x i64> @llvm.riscv.vfwcvt.x.f.v.nxv8i64.nxv8f32(
|
||||
<vscale x 8 x i64>,
|
||||
<vscale x 8 x float>,
|
||||
iXLen);
|
||||
|
||||
|
@ -352,6 +369,7 @@ define <vscale x 8 x i64> @intrinsic_vfwcvt_x.f.v_nxv8i64_nxv8f32(<vscale x 8 x
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 8 x i64> @llvm.riscv.vfwcvt.x.f.v.nxv8i64.nxv8f32(
|
||||
<vscale x 8 x i64> undef,
|
||||
<vscale x 8 x float> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
|
|
@ -4,6 +4,7 @@
|
|||
; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \
|
||||
; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s
|
||||
declare <vscale x 1 x i32> @llvm.riscv.vfwcvt.xu.f.v.nxv1i32.nxv1f16(
|
||||
<vscale x 1 x i32>,
|
||||
<vscale x 1 x half>,
|
||||
iXLen);
|
||||
|
||||
|
@ -16,6 +17,7 @@ define <vscale x 1 x i32> @intrinsic_vfwcvt_xu.f.v_nxv1i32_nxv1f16(<vscale x 1 x
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 1 x i32> @llvm.riscv.vfwcvt.xu.f.v.nxv1i32.nxv1f16(
|
||||
<vscale x 1 x i32> undef,
|
||||
<vscale x 1 x half> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -46,6 +48,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 2 x i32> @llvm.riscv.vfwcvt.xu.f.v.nxv2i32.nxv2f16(
|
||||
<vscale x 2 x i32>,
|
||||
<vscale x 2 x half>,
|
||||
iXLen);
|
||||
|
||||
|
@ -58,6 +61,7 @@ define <vscale x 2 x i32> @intrinsic_vfwcvt_xu.f.v_nxv2i32_nxv2f16(<vscale x 2 x
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 2 x i32> @llvm.riscv.vfwcvt.xu.f.v.nxv2i32.nxv2f16(
|
||||
<vscale x 2 x i32> undef,
|
||||
<vscale x 2 x half> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -88,6 +92,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 4 x i32> @llvm.riscv.vfwcvt.xu.f.v.nxv4i32.nxv4f16(
|
||||
<vscale x 4 x i32>,
|
||||
<vscale x 4 x half>,
|
||||
iXLen);
|
||||
|
||||
|
@ -100,6 +105,7 @@ define <vscale x 4 x i32> @intrinsic_vfwcvt_xu.f.v_nxv4i32_nxv4f16(<vscale x 4 x
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 4 x i32> @llvm.riscv.vfwcvt.xu.f.v.nxv4i32.nxv4f16(
|
||||
<vscale x 4 x i32> undef,
|
||||
<vscale x 4 x half> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -130,6 +136,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 8 x i32> @llvm.riscv.vfwcvt.xu.f.v.nxv8i32.nxv8f16(
|
||||
<vscale x 8 x i32>,
|
||||
<vscale x 8 x half>,
|
||||
iXLen);
|
||||
|
||||
|
@ -142,6 +149,7 @@ define <vscale x 8 x i32> @intrinsic_vfwcvt_xu.f.v_nxv8i32_nxv8f16(<vscale x 8 x
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 8 x i32> @llvm.riscv.vfwcvt.xu.f.v.nxv8i32.nxv8f16(
|
||||
<vscale x 8 x i32> undef,
|
||||
<vscale x 8 x half> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -172,6 +180,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 16 x i32> @llvm.riscv.vfwcvt.xu.f.v.nxv16i32.nxv16f16(
|
||||
<vscale x 16 x i32>,
|
||||
<vscale x 16 x half>,
|
||||
iXLen);
|
||||
|
||||
|
@ -184,6 +193,7 @@ define <vscale x 16 x i32> @intrinsic_vfwcvt_xu.f.v_nxv16i32_nxv16f16(<vscale x
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 16 x i32> @llvm.riscv.vfwcvt.xu.f.v.nxv16i32.nxv16f16(
|
||||
<vscale x 16 x i32> undef,
|
||||
<vscale x 16 x half> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -214,6 +224,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 1 x i64> @llvm.riscv.vfwcvt.xu.f.v.nxv1i64.nxv1f32(
|
||||
<vscale x 1 x i64>,
|
||||
<vscale x 1 x float>,
|
||||
iXLen);
|
||||
|
||||
|
@ -226,6 +237,7 @@ define <vscale x 1 x i64> @intrinsic_vfwcvt_xu.f.v_nxv1i64_nxv1f32(<vscale x 1 x
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 1 x i64> @llvm.riscv.vfwcvt.xu.f.v.nxv1i64.nxv1f32(
|
||||
<vscale x 1 x i64> undef,
|
||||
<vscale x 1 x float> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -256,6 +268,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 2 x i64> @llvm.riscv.vfwcvt.xu.f.v.nxv2i64.nxv2f32(
|
||||
<vscale x 2 x i64>,
|
||||
<vscale x 2 x float>,
|
||||
iXLen);
|
||||
|
||||
|
@ -268,6 +281,7 @@ define <vscale x 2 x i64> @intrinsic_vfwcvt_xu.f.v_nxv2i64_nxv2f32(<vscale x 2 x
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 2 x i64> @llvm.riscv.vfwcvt.xu.f.v.nxv2i64.nxv2f32(
|
||||
<vscale x 2 x i64> undef,
|
||||
<vscale x 2 x float> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -298,6 +312,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 4 x i64> @llvm.riscv.vfwcvt.xu.f.v.nxv4i64.nxv4f32(
|
||||
<vscale x 4 x i64>,
|
||||
<vscale x 4 x float>,
|
||||
iXLen);
|
||||
|
||||
|
@ -310,6 +325,7 @@ define <vscale x 4 x i64> @intrinsic_vfwcvt_xu.f.v_nxv4i64_nxv4f32(<vscale x 4 x
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 4 x i64> @llvm.riscv.vfwcvt.xu.f.v.nxv4i64.nxv4f32(
|
||||
<vscale x 4 x i64> undef,
|
||||
<vscale x 4 x float> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -340,6 +356,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 8 x i64> @llvm.riscv.vfwcvt.xu.f.v.nxv8i64.nxv8f32(
|
||||
<vscale x 8 x i64>,
|
||||
<vscale x 8 x float>,
|
||||
iXLen);
|
||||
|
||||
|
@ -352,6 +369,7 @@ define <vscale x 8 x i64> @intrinsic_vfwcvt_xu.f.v_nxv8i64_nxv8f32(<vscale x 8 x
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 8 x i64> @llvm.riscv.vfwcvt.xu.f.v.nxv8i64.nxv8f32(
|
||||
<vscale x 8 x i64> undef,
|
||||
<vscale x 8 x float> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
|
|
@ -4,6 +4,7 @@
|
|||
; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \
|
||||
; RUN: -verify-machineinstrs | FileCheck %s
|
||||
declare <vscale x 1 x i8> @llvm.riscv.vid.nxv1i8(
|
||||
<vscale x 1 x i8>,
|
||||
iXLen);
|
||||
|
||||
define <vscale x 1 x i8> @intrinsic_vid_v_nxv1i8(iXLen %0) nounwind {
|
||||
|
@ -14,6 +15,7 @@ define <vscale x 1 x i8> @intrinsic_vid_v_nxv1i8(iXLen %0) nounwind {
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 1 x i8> @llvm.riscv.vid.nxv1i8(
|
||||
<vscale x 1 x i8> undef,
|
||||
iXLen %0)
|
||||
|
||||
ret <vscale x 1 x i8> %a
|
||||
|
@ -40,6 +42,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 2 x i8> @llvm.riscv.vid.nxv2i8(
|
||||
<vscale x 2 x i8>,
|
||||
iXLen);
|
||||
|
||||
define <vscale x 2 x i8> @intrinsic_vid_v_nxv2i8(iXLen %0) nounwind {
|
||||
|
@ -50,6 +53,7 @@ define <vscale x 2 x i8> @intrinsic_vid_v_nxv2i8(iXLen %0) nounwind {
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 2 x i8> @llvm.riscv.vid.nxv2i8(
|
||||
<vscale x 2 x i8> undef,
|
||||
iXLen %0)
|
||||
|
||||
ret <vscale x 2 x i8> %a
|
||||
|
@ -76,6 +80,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 4 x i8> @llvm.riscv.vid.nxv4i8(
|
||||
<vscale x 4 x i8>,
|
||||
iXLen);
|
||||
|
||||
define <vscale x 4 x i8> @intrinsic_vid_v_nxv4i8(iXLen %0) nounwind {
|
||||
|
@ -86,6 +91,7 @@ define <vscale x 4 x i8> @intrinsic_vid_v_nxv4i8(iXLen %0) nounwind {
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 4 x i8> @llvm.riscv.vid.nxv4i8(
|
||||
<vscale x 4 x i8> undef,
|
||||
iXLen %0)
|
||||
|
||||
ret <vscale x 4 x i8> %a
|
||||
|
@ -112,6 +118,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 8 x i8> @llvm.riscv.vid.nxv8i8(
|
||||
<vscale x 8 x i8>,
|
||||
iXLen);
|
||||
|
||||
define <vscale x 8 x i8> @intrinsic_vid_v_nxv8i8(iXLen %0) nounwind {
|
||||
|
@ -122,6 +129,7 @@ define <vscale x 8 x i8> @intrinsic_vid_v_nxv8i8(iXLen %0) nounwind {
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 8 x i8> @llvm.riscv.vid.nxv8i8(
|
||||
<vscale x 8 x i8> undef,
|
||||
iXLen %0)
|
||||
|
||||
ret <vscale x 8 x i8> %a
|
||||
|
@ -148,6 +156,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 16 x i8> @llvm.riscv.vid.nxv16i8(
|
||||
<vscale x 16 x i8>,
|
||||
iXLen);
|
||||
|
||||
define <vscale x 16 x i8> @intrinsic_vid_v_nxv16i8(iXLen %0) nounwind {
|
||||
|
@ -158,6 +167,7 @@ define <vscale x 16 x i8> @intrinsic_vid_v_nxv16i8(iXLen %0) nounwind {
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 16 x i8> @llvm.riscv.vid.nxv16i8(
|
||||
<vscale x 16 x i8> undef,
|
||||
iXLen %0)
|
||||
|
||||
ret <vscale x 16 x i8> %a
|
||||
|
@ -184,6 +194,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 32 x i8> @llvm.riscv.vid.nxv32i8(
|
||||
<vscale x 32 x i8>,
|
||||
iXLen);
|
||||
|
||||
define <vscale x 32 x i8> @intrinsic_vid_v_nxv32i8(iXLen %0) nounwind {
|
||||
|
@ -194,6 +205,7 @@ define <vscale x 32 x i8> @intrinsic_vid_v_nxv32i8(iXLen %0) nounwind {
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 32 x i8> @llvm.riscv.vid.nxv32i8(
|
||||
<vscale x 32 x i8> undef,
|
||||
iXLen %0)
|
||||
|
||||
ret <vscale x 32 x i8> %a
|
||||
|
@ -220,6 +232,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 1 x i16> @llvm.riscv.vid.nxv1i16(
|
||||
<vscale x 1 x i16>,
|
||||
iXLen);
|
||||
|
||||
define <vscale x 1 x i16> @intrinsic_vid_v_nxv1i16(iXLen %0) nounwind {
|
||||
|
@ -230,6 +243,7 @@ define <vscale x 1 x i16> @intrinsic_vid_v_nxv1i16(iXLen %0) nounwind {
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 1 x i16> @llvm.riscv.vid.nxv1i16(
|
||||
<vscale x 1 x i16> undef,
|
||||
iXLen %0)
|
||||
|
||||
ret <vscale x 1 x i16> %a
|
||||
|
@ -256,6 +270,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 2 x i16> @llvm.riscv.vid.nxv2i16(
|
||||
<vscale x 2 x i16>,
|
||||
iXLen);
|
||||
|
||||
define <vscale x 2 x i16> @intrinsic_vid_v_nxv2i16(iXLen %0) nounwind {
|
||||
|
@ -266,6 +281,7 @@ define <vscale x 2 x i16> @intrinsic_vid_v_nxv2i16(iXLen %0) nounwind {
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 2 x i16> @llvm.riscv.vid.nxv2i16(
|
||||
<vscale x 2 x i16> undef,
|
||||
iXLen %0)
|
||||
|
||||
ret <vscale x 2 x i16> %a
|
||||
|
@ -292,6 +308,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 4 x i16> @llvm.riscv.vid.nxv4i16(
|
||||
<vscale x 4 x i16>,
|
||||
iXLen);
|
||||
|
||||
define <vscale x 4 x i16> @intrinsic_vid_v_nxv4i16(iXLen %0) nounwind {
|
||||
|
@ -302,6 +319,7 @@ define <vscale x 4 x i16> @intrinsic_vid_v_nxv4i16(iXLen %0) nounwind {
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 4 x i16> @llvm.riscv.vid.nxv4i16(
|
||||
<vscale x 4 x i16> undef,
|
||||
iXLen %0)
|
||||
|
||||
ret <vscale x 4 x i16> %a
|
||||
|
@ -328,6 +346,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 8 x i16> @llvm.riscv.vid.nxv8i16(
|
||||
<vscale x 8 x i16>,
|
||||
iXLen);
|
||||
|
||||
define <vscale x 8 x i16> @intrinsic_vid_v_nxv8i16(iXLen %0) nounwind {
|
||||
|
@ -338,6 +357,7 @@ define <vscale x 8 x i16> @intrinsic_vid_v_nxv8i16(iXLen %0) nounwind {
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 8 x i16> @llvm.riscv.vid.nxv8i16(
|
||||
<vscale x 8 x i16> undef,
|
||||
iXLen %0)
|
||||
|
||||
ret <vscale x 8 x i16> %a
|
||||
|
@ -364,6 +384,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 16 x i16> @llvm.riscv.vid.nxv16i16(
|
||||
<vscale x 16 x i16>,
|
||||
iXLen);
|
||||
|
||||
define <vscale x 16 x i16> @intrinsic_vid_v_nxv16i16(iXLen %0) nounwind {
|
||||
|
@ -374,6 +395,7 @@ define <vscale x 16 x i16> @intrinsic_vid_v_nxv16i16(iXLen %0) nounwind {
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 16 x i16> @llvm.riscv.vid.nxv16i16(
|
||||
<vscale x 16 x i16> undef,
|
||||
iXLen %0)
|
||||
|
||||
ret <vscale x 16 x i16> %a
|
||||
|
@ -400,6 +422,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 32 x i16> @llvm.riscv.vid.nxv32i16(
|
||||
<vscale x 32 x i16>,
|
||||
iXLen);
|
||||
|
||||
define <vscale x 32 x i16> @intrinsic_vid_v_nxv32i16(iXLen %0) nounwind {
|
||||
|
@ -410,6 +433,7 @@ define <vscale x 32 x i16> @intrinsic_vid_v_nxv32i16(iXLen %0) nounwind {
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 32 x i16> @llvm.riscv.vid.nxv32i16(
|
||||
<vscale x 32 x i16> undef,
|
||||
iXLen %0)
|
||||
|
||||
ret <vscale x 32 x i16> %a
|
||||
|
@ -436,6 +460,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 1 x i32> @llvm.riscv.vid.nxv1i32(
|
||||
<vscale x 1 x i32>,
|
||||
iXLen);
|
||||
|
||||
define <vscale x 1 x i32> @intrinsic_vid_v_nxv1i32(iXLen %0) nounwind {
|
||||
|
@ -446,6 +471,7 @@ define <vscale x 1 x i32> @intrinsic_vid_v_nxv1i32(iXLen %0) nounwind {
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 1 x i32> @llvm.riscv.vid.nxv1i32(
|
||||
<vscale x 1 x i32> undef,
|
||||
iXLen %0)
|
||||
|
||||
ret <vscale x 1 x i32> %a
|
||||
|
@ -472,6 +498,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 2 x i32> @llvm.riscv.vid.nxv2i32(
|
||||
<vscale x 2 x i32>,
|
||||
iXLen);
|
||||
|
||||
define <vscale x 2 x i32> @intrinsic_vid_v_nxv2i32(iXLen %0) nounwind {
|
||||
|
@ -482,6 +509,7 @@ define <vscale x 2 x i32> @intrinsic_vid_v_nxv2i32(iXLen %0) nounwind {
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 2 x i32> @llvm.riscv.vid.nxv2i32(
|
||||
<vscale x 2 x i32> undef,
|
||||
iXLen %0)
|
||||
|
||||
ret <vscale x 2 x i32> %a
|
||||
|
@ -508,6 +536,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 4 x i32> @llvm.riscv.vid.nxv4i32(
|
||||
<vscale x 4 x i32>,
|
||||
iXLen);
|
||||
|
||||
define <vscale x 4 x i32> @intrinsic_vid_v_nxv4i32(iXLen %0) nounwind {
|
||||
|
@ -518,6 +547,7 @@ define <vscale x 4 x i32> @intrinsic_vid_v_nxv4i32(iXLen %0) nounwind {
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 4 x i32> @llvm.riscv.vid.nxv4i32(
|
||||
<vscale x 4 x i32> undef,
|
||||
iXLen %0)
|
||||
|
||||
ret <vscale x 4 x i32> %a
|
||||
|
@ -544,6 +574,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 8 x i32> @llvm.riscv.vid.nxv8i32(
|
||||
<vscale x 8 x i32>,
|
||||
iXLen);
|
||||
|
||||
define <vscale x 8 x i32> @intrinsic_vid_v_nxv8i32(iXLen %0) nounwind {
|
||||
|
@ -554,6 +585,7 @@ define <vscale x 8 x i32> @intrinsic_vid_v_nxv8i32(iXLen %0) nounwind {
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 8 x i32> @llvm.riscv.vid.nxv8i32(
|
||||
<vscale x 8 x i32> undef,
|
||||
iXLen %0)
|
||||
|
||||
ret <vscale x 8 x i32> %a
|
||||
|
@ -580,6 +612,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 16 x i32> @llvm.riscv.vid.nxv16i32(
|
||||
<vscale x 16 x i32>,
|
||||
iXLen);
|
||||
|
||||
define <vscale x 16 x i32> @intrinsic_vid_v_nxv16i32(iXLen %0) nounwind {
|
||||
|
@ -590,6 +623,7 @@ define <vscale x 16 x i32> @intrinsic_vid_v_nxv16i32(iXLen %0) nounwind {
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 16 x i32> @llvm.riscv.vid.nxv16i32(
|
||||
<vscale x 16 x i32> undef,
|
||||
iXLen %0)
|
||||
|
||||
ret <vscale x 16 x i32> %a
|
||||
|
@ -616,6 +650,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 1 x i64> @llvm.riscv.vid.nxv1i64(
|
||||
<vscale x 1 x i64>,
|
||||
iXLen);
|
||||
|
||||
define <vscale x 1 x i64> @intrinsic_vid_v_nxv1i64(iXLen %0) nounwind {
|
||||
|
@ -626,6 +661,7 @@ define <vscale x 1 x i64> @intrinsic_vid_v_nxv1i64(iXLen %0) nounwind {
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 1 x i64> @llvm.riscv.vid.nxv1i64(
|
||||
<vscale x 1 x i64> undef,
|
||||
iXLen %0)
|
||||
|
||||
ret <vscale x 1 x i64> %a
|
||||
|
@ -652,6 +688,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 2 x i64> @llvm.riscv.vid.nxv2i64(
|
||||
<vscale x 2 x i64>,
|
||||
iXLen);
|
||||
|
||||
define <vscale x 2 x i64> @intrinsic_vid_v_nxv2i64(iXLen %0) nounwind {
|
||||
|
@ -662,6 +699,7 @@ define <vscale x 2 x i64> @intrinsic_vid_v_nxv2i64(iXLen %0) nounwind {
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 2 x i64> @llvm.riscv.vid.nxv2i64(
|
||||
<vscale x 2 x i64> undef,
|
||||
iXLen %0)
|
||||
|
||||
ret <vscale x 2 x i64> %a
|
||||
|
@ -688,6 +726,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 4 x i64> @llvm.riscv.vid.nxv4i64(
|
||||
<vscale x 4 x i64>,
|
||||
iXLen);
|
||||
|
||||
define <vscale x 4 x i64> @intrinsic_vid_v_nxv4i64(iXLen %0) nounwind {
|
||||
|
@ -698,6 +737,7 @@ define <vscale x 4 x i64> @intrinsic_vid_v_nxv4i64(iXLen %0) nounwind {
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 4 x i64> @llvm.riscv.vid.nxv4i64(
|
||||
<vscale x 4 x i64> undef,
|
||||
iXLen %0)
|
||||
|
||||
ret <vscale x 4 x i64> %a
|
||||
|
@ -724,6 +764,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 8 x i64> @llvm.riscv.vid.nxv8i64(
|
||||
<vscale x 8 x i64>,
|
||||
iXLen);
|
||||
|
||||
define <vscale x 8 x i64> @intrinsic_vid_v_nxv8i64(iXLen %0) nounwind {
|
||||
|
@ -734,6 +775,7 @@ define <vscale x 8 x i64> @intrinsic_vid_v_nxv8i64(iXLen %0) nounwind {
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 8 x i64> @llvm.riscv.vid.nxv8i64(
|
||||
<vscale x 8 x i64> undef,
|
||||
iXLen %0)
|
||||
|
||||
ret <vscale x 8 x i64> %a
|
||||
|
|
|
@ -4,6 +4,7 @@
|
|||
; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v \
|
||||
; RUN: -verify-machineinstrs | FileCheck %s
|
||||
declare <vscale x 1 x i8> @llvm.riscv.viota.nxv1i8(
|
||||
<vscale x 1 x i8>,
|
||||
<vscale x 1 x i1>,
|
||||
iXLen);
|
||||
|
||||
|
@ -15,6 +16,7 @@ define <vscale x 1 x i8> @intrinsic_viota_m_nxv1i8_nxv1i1(<vscale x 1 x i1> %0,
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 1 x i8> @llvm.riscv.viota.nxv1i8(
|
||||
<vscale x 1 x i8> undef,
|
||||
<vscale x 1 x i1> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -44,6 +46,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 2 x i8> @llvm.riscv.viota.nxv2i8(
|
||||
<vscale x 2 x i8>,
|
||||
<vscale x 2 x i1>,
|
||||
iXLen);
|
||||
|
||||
|
@ -55,6 +58,7 @@ define <vscale x 2 x i8> @intrinsic_viota_m_nxv2i8_nxv2i1(<vscale x 2 x i1> %0,
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 2 x i8> @llvm.riscv.viota.nxv2i8(
|
||||
<vscale x 2 x i8> undef,
|
||||
<vscale x 2 x i1> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -84,6 +88,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 4 x i8> @llvm.riscv.viota.nxv4i8(
|
||||
<vscale x 4 x i8>,
|
||||
<vscale x 4 x i1>,
|
||||
iXLen);
|
||||
|
||||
|
@ -95,6 +100,7 @@ define <vscale x 4 x i8> @intrinsic_viota_m_nxv4i8_nxv4i1(<vscale x 4 x i1> %0,
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 4 x i8> @llvm.riscv.viota.nxv4i8(
|
||||
<vscale x 4 x i8> undef,
|
||||
<vscale x 4 x i1> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -124,6 +130,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 8 x i8> @llvm.riscv.viota.nxv8i8(
|
||||
<vscale x 8 x i8>,
|
||||
<vscale x 8 x i1>,
|
||||
iXLen);
|
||||
|
||||
|
@ -135,6 +142,7 @@ define <vscale x 8 x i8> @intrinsic_viota_m_nxv8i8_nxv8i1(<vscale x 8 x i1> %0,
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 8 x i8> @llvm.riscv.viota.nxv8i8(
|
||||
<vscale x 8 x i8> undef,
|
||||
<vscale x 8 x i1> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -164,6 +172,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 16 x i8> @llvm.riscv.viota.nxv16i8(
|
||||
<vscale x 16 x i8>,
|
||||
<vscale x 16 x i1>,
|
||||
iXLen);
|
||||
|
||||
|
@ -175,6 +184,7 @@ define <vscale x 16 x i8> @intrinsic_viota_m_nxv16i8_nxv16i1(<vscale x 16 x i1>
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 16 x i8> @llvm.riscv.viota.nxv16i8(
|
||||
<vscale x 16 x i8> undef,
|
||||
<vscale x 16 x i1> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -204,6 +214,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 32 x i8> @llvm.riscv.viota.nxv32i8(
|
||||
<vscale x 32 x i8>,
|
||||
<vscale x 32 x i1>,
|
||||
iXLen);
|
||||
|
||||
|
@ -215,6 +226,7 @@ define <vscale x 32 x i8> @intrinsic_viota_m_nxv32i8_nxv32i1(<vscale x 32 x i1>
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 32 x i8> @llvm.riscv.viota.nxv32i8(
|
||||
<vscale x 32 x i8> undef,
|
||||
<vscale x 32 x i1> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -244,6 +256,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 64 x i8> @llvm.riscv.viota.nxv64i8(
|
||||
<vscale x 64 x i8>,
|
||||
<vscale x 64 x i1>,
|
||||
iXLen);
|
||||
|
||||
|
@ -255,6 +268,7 @@ define <vscale x 64 x i8> @intrinsic_viota_m_nxv64i8_nxv64i1(<vscale x 64 x i1>
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 64 x i8> @llvm.riscv.viota.nxv64i8(
|
||||
<vscale x 64 x i8> undef,
|
||||
<vscale x 64 x i1> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -284,6 +298,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 1 x i16> @llvm.riscv.viota.nxv1i16(
|
||||
<vscale x 1 x i16>,
|
||||
<vscale x 1 x i1>,
|
||||
iXLen);
|
||||
|
||||
|
@ -295,6 +310,7 @@ define <vscale x 1 x i16> @intrinsic_viota_m_nxv1i16_nxv1i1(<vscale x 1 x i1> %0
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 1 x i16> @llvm.riscv.viota.nxv1i16(
|
||||
<vscale x 1 x i16> undef,
|
||||
<vscale x 1 x i1> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -324,6 +340,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 2 x i16> @llvm.riscv.viota.nxv2i16(
|
||||
<vscale x 2 x i16>,
|
||||
<vscale x 2 x i1>,
|
||||
iXLen);
|
||||
|
||||
|
@ -335,6 +352,7 @@ define <vscale x 2 x i16> @intrinsic_viota_m_nxv2i16_nxv2i1(<vscale x 2 x i1> %0
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 2 x i16> @llvm.riscv.viota.nxv2i16(
|
||||
<vscale x 2 x i16> undef,
|
||||
<vscale x 2 x i1> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -364,6 +382,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 4 x i16> @llvm.riscv.viota.nxv4i16(
|
||||
<vscale x 4 x i16>,
|
||||
<vscale x 4 x i1>,
|
||||
iXLen);
|
||||
|
||||
|
@ -375,6 +394,7 @@ define <vscale x 4 x i16> @intrinsic_viota_m_nxv4i16_nxv4i1(<vscale x 4 x i1> %0
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 4 x i16> @llvm.riscv.viota.nxv4i16(
|
||||
<vscale x 4 x i16> undef,
|
||||
<vscale x 4 x i1> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -404,6 +424,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 8 x i16> @llvm.riscv.viota.nxv8i16(
|
||||
<vscale x 8 x i16>,
|
||||
<vscale x 8 x i1>,
|
||||
iXLen);
|
||||
|
||||
|
@ -415,6 +436,7 @@ define <vscale x 8 x i16> @intrinsic_viota_m_nxv8i16_nxv8i1(<vscale x 8 x i1> %0
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 8 x i16> @llvm.riscv.viota.nxv8i16(
|
||||
<vscale x 8 x i16> undef,
|
||||
<vscale x 8 x i1> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -444,6 +466,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 16 x i16> @llvm.riscv.viota.nxv16i16(
|
||||
<vscale x 16 x i16>,
|
||||
<vscale x 16 x i1>,
|
||||
iXLen);
|
||||
|
||||
|
@ -455,6 +478,7 @@ define <vscale x 16 x i16> @intrinsic_viota_m_nxv16i16_nxv16i1(<vscale x 16 x i1
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 16 x i16> @llvm.riscv.viota.nxv16i16(
|
||||
<vscale x 16 x i16> undef,
|
||||
<vscale x 16 x i1> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -484,6 +508,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 32 x i16> @llvm.riscv.viota.nxv32i16(
|
||||
<vscale x 32 x i16>,
|
||||
<vscale x 32 x i1>,
|
||||
iXLen);
|
||||
|
||||
|
@ -495,6 +520,7 @@ define <vscale x 32 x i16> @intrinsic_viota_m_nxv32i16_nxv32i1(<vscale x 32 x i1
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 32 x i16> @llvm.riscv.viota.nxv32i16(
|
||||
<vscale x 32 x i16> undef,
|
||||
<vscale x 32 x i1> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -524,6 +550,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 1 x i32> @llvm.riscv.viota.nxv1i32(
|
||||
<vscale x 1 x i32>,
|
||||
<vscale x 1 x i1>,
|
||||
iXLen);
|
||||
|
||||
|
@ -535,6 +562,7 @@ define <vscale x 1 x i32> @intrinsic_viota_m_nxv1i32_nxv1i1(<vscale x 1 x i1> %0
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 1 x i32> @llvm.riscv.viota.nxv1i32(
|
||||
<vscale x 1 x i32> undef,
|
||||
<vscale x 1 x i1> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -564,6 +592,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 2 x i32> @llvm.riscv.viota.nxv2i32(
|
||||
<vscale x 2 x i32>,
|
||||
<vscale x 2 x i1>,
|
||||
iXLen);
|
||||
|
||||
|
@ -575,6 +604,7 @@ define <vscale x 2 x i32> @intrinsic_viota_m_nxv2i32_nxv2i1(<vscale x 2 x i1> %0
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 2 x i32> @llvm.riscv.viota.nxv2i32(
|
||||
<vscale x 2 x i32> undef,
|
||||
<vscale x 2 x i1> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -604,6 +634,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 4 x i32> @llvm.riscv.viota.nxv4i32(
|
||||
<vscale x 4 x i32>,
|
||||
<vscale x 4 x i1>,
|
||||
iXLen);
|
||||
|
||||
|
@ -615,6 +646,7 @@ define <vscale x 4 x i32> @intrinsic_viota_m_nxv4i32_nxv4i1(<vscale x 4 x i1> %0
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 4 x i32> @llvm.riscv.viota.nxv4i32(
|
||||
<vscale x 4 x i32> undef,
|
||||
<vscale x 4 x i1> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -644,6 +676,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 8 x i32> @llvm.riscv.viota.nxv8i32(
|
||||
<vscale x 8 x i32>,
|
||||
<vscale x 8 x i1>,
|
||||
iXLen);
|
||||
|
||||
|
@ -655,6 +688,7 @@ define <vscale x 8 x i32> @intrinsic_viota_m_nxv8i32_nxv8i1(<vscale x 8 x i1> %0
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 8 x i32> @llvm.riscv.viota.nxv8i32(
|
||||
<vscale x 8 x i32> undef,
|
||||
<vscale x 8 x i1> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -684,6 +718,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 16 x i32> @llvm.riscv.viota.nxv16i32(
|
||||
<vscale x 16 x i32>,
|
||||
<vscale x 16 x i1>,
|
||||
iXLen);
|
||||
|
||||
|
@ -695,6 +730,7 @@ define <vscale x 16 x i32> @intrinsic_viota_m_nxv16i32_nxv16i1(<vscale x 16 x i1
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 16 x i32> @llvm.riscv.viota.nxv16i32(
|
||||
<vscale x 16 x i32> undef,
|
||||
<vscale x 16 x i1> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -724,6 +760,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 1 x i64> @llvm.riscv.viota.nxv1i64(
|
||||
<vscale x 1 x i64>,
|
||||
<vscale x 1 x i1>,
|
||||
iXLen);
|
||||
|
||||
|
@ -735,6 +772,7 @@ define <vscale x 1 x i64> @intrinsic_viota_m_nxv1i64_nxv1i1(<vscale x 1 x i1> %0
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 1 x i64> @llvm.riscv.viota.nxv1i64(
|
||||
<vscale x 1 x i64> undef,
|
||||
<vscale x 1 x i1> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -764,6 +802,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 2 x i64> @llvm.riscv.viota.nxv2i64(
|
||||
<vscale x 2 x i64>,
|
||||
<vscale x 2 x i1>,
|
||||
iXLen);
|
||||
|
||||
|
@ -775,6 +814,7 @@ define <vscale x 2 x i64> @intrinsic_viota_m_nxv2i64_nxv2i1(<vscale x 2 x i1> %0
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 2 x i64> @llvm.riscv.viota.nxv2i64(
|
||||
<vscale x 2 x i64> undef,
|
||||
<vscale x 2 x i1> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -804,6 +844,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 4 x i64> @llvm.riscv.viota.nxv4i64(
|
||||
<vscale x 4 x i64>,
|
||||
<vscale x 4 x i1>,
|
||||
iXLen);
|
||||
|
||||
|
@ -815,6 +856,7 @@ define <vscale x 4 x i64> @intrinsic_viota_m_nxv4i64_nxv4i1(<vscale x 4 x i1> %0
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 4 x i64> @llvm.riscv.viota.nxv4i64(
|
||||
<vscale x 4 x i64> undef,
|
||||
<vscale x 4 x i1> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
@ -844,6 +886,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 8 x i64> @llvm.riscv.viota.nxv8i64(
|
||||
<vscale x 8 x i64>,
|
||||
<vscale x 8 x i1>,
|
||||
iXLen);
|
||||
|
||||
|
@ -855,6 +898,7 @@ define <vscale x 8 x i64> @intrinsic_viota_m_nxv8i64_nxv8i1(<vscale x 8 x i1> %0
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 8 x i64> @llvm.riscv.viota.nxv8i64(
|
||||
<vscale x 8 x i64> undef,
|
||||
<vscale x 8 x i1> %0,
|
||||
iXLen %1)
|
||||
|
||||
|
|
|
@ -34,11 +34,11 @@
|
|||
br i1 %tobool, label %if.else, label %if.then
|
||||
|
||||
if.then: ; preds = %entry
|
||||
%b = call <vscale x 1 x i64> @llvm.riscv.vzext.nxv1i64.nxv1i32.i64(<vscale x 1 x i32> %a, i64 %2)
|
||||
%b = call <vscale x 1 x i64> @llvm.riscv.vzext.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> undef, <vscale x 1 x i32> %a, i64 %2)
|
||||
br label %if.end
|
||||
|
||||
if.else: ; preds = %entry
|
||||
%c = call <vscale x 1 x i64> @llvm.riscv.vsext.nxv1i64.nxv1i32.i64(<vscale x 1 x i32> %a, i64 %2)
|
||||
%c = call <vscale x 1 x i64> @llvm.riscv.vsext.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> undef, <vscale x 1 x i32> %a, i64 %2)
|
||||
br label %if.end
|
||||
|
||||
if.end: ; preds = %if.else, %if.then
|
||||
|
@ -140,10 +140,10 @@
|
|||
declare void @llvm.riscv.vse.nxv1i64.i64(<vscale x 1 x i64>, <vscale x 1 x i64>* nocapture, i64) #4
|
||||
|
||||
; Function Attrs: nounwind readnone
|
||||
declare <vscale x 1 x i64> @llvm.riscv.vzext.nxv1i64.nxv1i32.i64(<vscale x 1 x i32>, i64) #1
|
||||
declare <vscale x 1 x i64> @llvm.riscv.vzext.nxv1i64.nxv1i32.i64(<vscale x 1 x i64>, <vscale x 1 x i32>, i64) #1
|
||||
|
||||
; Function Attrs: nounwind readnone
|
||||
declare <vscale x 1 x i64> @llvm.riscv.vsext.nxv1i64.nxv1i32.i64(<vscale x 1 x i32>, i64) #1
|
||||
declare <vscale x 1 x i64> @llvm.riscv.vsext.nxv1i64.nxv1i32.i64(<vscale x 1 x i64>, <vscale x 1 x i32>, i64) #1
|
||||
|
||||
attributes #0 = { "target-features"="+v" }
|
||||
attributes #1 = { nounwind readnone }
|
||||
|
|
|
@ -24,7 +24,7 @@
|
|||
define <vscale x 1 x i64> @load_zext(<vscale x 1 x i32>* %0, i64 %1) #0 {
|
||||
entry:
|
||||
%a = call <vscale x 1 x i32> @llvm.riscv.vle.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32>* %0, i64 %1)
|
||||
%b = call <vscale x 1 x i64> @llvm.riscv.vzext.nxv1i64.nxv1i32.i64(<vscale x 1 x i32> %a, i64 %1)
|
||||
%b = call <vscale x 1 x i64> @llvm.riscv.vzext.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> undef, <vscale x 1 x i32> %a, i64 %1)
|
||||
ret <vscale x 1 x i64> %b
|
||||
}
|
||||
|
||||
|
@ -82,7 +82,7 @@
|
|||
declare <vscale x 1 x i32> @llvm.riscv.vle.nxv1i32.i64(<vscale x 1 x i32>, <vscale x 1 x i32>* nocapture, i64) #4
|
||||
|
||||
; Function Attrs: nounwind readnone
|
||||
declare <vscale x 1 x i64> @llvm.riscv.vzext.nxv1i64.nxv1i32.i64(<vscale x 1 x i32>, i64) #1
|
||||
declare <vscale x 1 x i64> @llvm.riscv.vzext.nxv1i64.nxv1i32.i64(<vscale x 1 x i64>, <vscale x 1 x i32>, i64) #1
|
||||
|
||||
attributes #0 = { "target-features"="+v" }
|
||||
attributes #1 = { nounwind readnone }
|
||||
|
|
|
@ -2,6 +2,7 @@
|
|||
; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs \
|
||||
; RUN: < %s | FileCheck %s
|
||||
declare <vscale x 1 x i64> @llvm.riscv.vsext.nxv1i64.nxv1i8(
|
||||
<vscale x 1 x i64>,
|
||||
<vscale x 1 x i8>,
|
||||
i32);
|
||||
|
||||
|
@ -14,6 +15,7 @@ define <vscale x 1 x i64> @intrinsic_vsext_vf8_nxv1i64(<vscale x 1 x i8> %0, i32
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 1 x i64> @llvm.riscv.vsext.nxv1i64.nxv1i8(
|
||||
<vscale x 1 x i64> undef,
|
||||
<vscale x 1 x i8> %0,
|
||||
i32 %1)
|
||||
|
||||
|
@ -44,6 +46,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 2 x i64> @llvm.riscv.vsext.nxv2i64.nxv2i8(
|
||||
<vscale x 2 x i64>,
|
||||
<vscale x 2 x i8>,
|
||||
i32);
|
||||
|
||||
|
@ -56,6 +59,7 @@ define <vscale x 2 x i64> @intrinsic_vsext_vf8_nxv2i64(<vscale x 2 x i8> %0, i32
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 2 x i64> @llvm.riscv.vsext.nxv2i64.nxv2i8(
|
||||
<vscale x 2 x i64> undef,
|
||||
<vscale x 2 x i8> %0,
|
||||
i32 %1)
|
||||
|
||||
|
@ -86,6 +90,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 4 x i64> @llvm.riscv.vsext.nxv4i64.nxv4i8(
|
||||
<vscale x 4 x i64>,
|
||||
<vscale x 4 x i8>,
|
||||
i32);
|
||||
|
||||
|
@ -98,6 +103,7 @@ define <vscale x 4 x i64> @intrinsic_vsext_vf8_nxv4i64(<vscale x 4 x i8> %0, i32
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 4 x i64> @llvm.riscv.vsext.nxv4i64.nxv4i8(
|
||||
<vscale x 4 x i64> undef,
|
||||
<vscale x 4 x i8> %0,
|
||||
i32 %1)
|
||||
|
||||
|
@ -128,6 +134,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 8 x i64> @llvm.riscv.vsext.nxv8i64.nxv8i8(
|
||||
<vscale x 8 x i64>,
|
||||
<vscale x 8 x i8>,
|
||||
i32);
|
||||
|
||||
|
@ -140,6 +147,7 @@ define <vscale x 8 x i64> @intrinsic_vsext_vf8_nxv8i64(<vscale x 8 x i8> %0, i32
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 8 x i64> @llvm.riscv.vsext.nxv8i64.nxv8i8(
|
||||
<vscale x 8 x i64> undef,
|
||||
<vscale x 8 x i8> %0,
|
||||
i32 %1)
|
||||
|
||||
|
@ -170,6 +178,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 1 x i64> @llvm.riscv.vsext.nxv1i64.nxv1i16(
|
||||
<vscale x 1 x i64>,
|
||||
<vscale x 1 x i16>,
|
||||
i32);
|
||||
|
||||
|
@ -182,6 +191,7 @@ define <vscale x 1 x i64> @intrinsic_vsext_vf4_nxv1i64(<vscale x 1 x i16> %0, i3
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 1 x i64> @llvm.riscv.vsext.nxv1i64.nxv1i16(
|
||||
<vscale x 1 x i64> undef,
|
||||
<vscale x 1 x i16> %0,
|
||||
i32 %1)
|
||||
|
||||
|
@ -212,6 +222,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 2 x i64> @llvm.riscv.vsext.nxv2i64.nxv2i16(
|
||||
<vscale x 2 x i64>,
|
||||
<vscale x 2 x i16>,
|
||||
i32);
|
||||
|
||||
|
@ -224,6 +235,7 @@ define <vscale x 2 x i64> @intrinsic_vsext_vf4_nxv2i64(<vscale x 2 x i16> %0, i3
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 2 x i64> @llvm.riscv.vsext.nxv2i64.nxv2i16(
|
||||
<vscale x 2 x i64> undef,
|
||||
<vscale x 2 x i16> %0,
|
||||
i32 %1)
|
||||
|
||||
|
@ -254,6 +266,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 4 x i64> @llvm.riscv.vsext.nxv4i64.nxv4i16(
|
||||
<vscale x 4 x i64>,
|
||||
<vscale x 4 x i16>,
|
||||
i32);
|
||||
|
||||
|
@ -266,6 +279,7 @@ define <vscale x 4 x i64> @intrinsic_vsext_vf4_nxv4i64(<vscale x 4 x i16> %0, i3
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 4 x i64> @llvm.riscv.vsext.nxv4i64.nxv4i16(
|
||||
<vscale x 4 x i64> undef,
|
||||
<vscale x 4 x i16> %0,
|
||||
i32 %1)
|
||||
|
||||
|
@ -296,6 +310,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 8 x i64> @llvm.riscv.vsext.nxv8i64.nxv8i16(
|
||||
<vscale x 8 x i64>,
|
||||
<vscale x 8 x i16>,
|
||||
i32);
|
||||
|
||||
|
@ -308,6 +323,7 @@ define <vscale x 8 x i64> @intrinsic_vsext_vf4_nxv8i64(<vscale x 8 x i16> %0, i3
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 8 x i64> @llvm.riscv.vsext.nxv8i64.nxv8i16(
|
||||
<vscale x 8 x i64> undef,
|
||||
<vscale x 8 x i16> %0,
|
||||
i32 %1)
|
||||
|
||||
|
@ -338,6 +354,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 1 x i32> @llvm.riscv.vsext.nxv1i32.nxv1i8(
|
||||
<vscale x 1 x i32>,
|
||||
<vscale x 1 x i8>,
|
||||
i32);
|
||||
|
||||
|
@ -350,6 +367,7 @@ define <vscale x 1 x i32> @intrinsic_vsext_vf4_nxv1i32(<vscale x 1 x i8> %0, i32
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 1 x i32> @llvm.riscv.vsext.nxv1i32.nxv1i8(
|
||||
<vscale x 1 x i32> undef,
|
||||
<vscale x 1 x i8> %0,
|
||||
i32 %1)
|
||||
|
||||
|
@ -380,6 +398,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 2 x i32> @llvm.riscv.vsext.nxv2i32.nxv2i8(
|
||||
<vscale x 2 x i32>,
|
||||
<vscale x 2 x i8>,
|
||||
i32);
|
||||
|
||||
|
@ -392,6 +411,7 @@ define <vscale x 2 x i32> @intrinsic_vsext_vf4_nxv2i32(<vscale x 2 x i8> %0, i32
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 2 x i32> @llvm.riscv.vsext.nxv2i32.nxv2i8(
|
||||
<vscale x 2 x i32> undef,
|
||||
<vscale x 2 x i8> %0,
|
||||
i32 %1)
|
||||
|
||||
|
@ -422,6 +442,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 4 x i32> @llvm.riscv.vsext.nxv4i32.nxv4i8(
|
||||
<vscale x 4 x i32>,
|
||||
<vscale x 4 x i8>,
|
||||
i32);
|
||||
|
||||
|
@ -434,6 +455,7 @@ define <vscale x 4 x i32> @intrinsic_vsext_vf4_nxv4i32(<vscale x 4 x i8> %0, i32
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 4 x i32> @llvm.riscv.vsext.nxv4i32.nxv4i8(
|
||||
<vscale x 4 x i32> undef,
|
||||
<vscale x 4 x i8> %0,
|
||||
i32 %1)
|
||||
|
||||
|
@ -464,6 +486,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 8 x i32> @llvm.riscv.vsext.nxv8i32.nxv8i8(
|
||||
<vscale x 8 x i32>,
|
||||
<vscale x 8 x i8>,
|
||||
i32);
|
||||
|
||||
|
@ -476,6 +499,7 @@ define <vscale x 8 x i32> @intrinsic_vsext_vf4_nxv8i32(<vscale x 8 x i8> %0, i32
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 8 x i32> @llvm.riscv.vsext.nxv8i32.nxv8i8(
|
||||
<vscale x 8 x i32> undef,
|
||||
<vscale x 8 x i8> %0,
|
||||
i32 %1)
|
||||
|
||||
|
@ -506,6 +530,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 16 x i32> @llvm.riscv.vsext.nxv16i32.nxv16i8(
|
||||
<vscale x 16 x i32>,
|
||||
<vscale x 16 x i8>,
|
||||
i32);
|
||||
|
||||
|
@ -518,6 +543,7 @@ define <vscale x 16 x i32> @intrinsic_vsext_vf4_nxv16i32(<vscale x 16 x i8> %0,
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 16 x i32> @llvm.riscv.vsext.nxv16i32.nxv16i8(
|
||||
<vscale x 16 x i32> undef,
|
||||
<vscale x 16 x i8> %0,
|
||||
i32 %1)
|
||||
|
||||
|
@ -548,6 +574,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 1 x i32> @llvm.riscv.vsext.nxv1i32.nxv1i16(
|
||||
<vscale x 1 x i32>,
|
||||
<vscale x 1 x i16>,
|
||||
i32);
|
||||
|
||||
|
@ -560,6 +587,7 @@ define <vscale x 1 x i32> @intrinsic_vsext_vf2_nxv1i32(<vscale x 1 x i16> %0, i3
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 1 x i32> @llvm.riscv.vsext.nxv1i32.nxv1i16(
|
||||
<vscale x 1 x i32> undef,
|
||||
<vscale x 1 x i16> %0,
|
||||
i32 %1)
|
||||
|
||||
|
@ -590,6 +618,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 2 x i32> @llvm.riscv.vsext.nxv2i32.nxv2i16(
|
||||
<vscale x 2 x i32>,
|
||||
<vscale x 2 x i16>,
|
||||
i32);
|
||||
|
||||
|
@ -602,6 +631,7 @@ define <vscale x 2 x i32> @intrinsic_vsext_vf2_nxv2i32(<vscale x 2 x i16> %0, i3
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 2 x i32> @llvm.riscv.vsext.nxv2i32.nxv2i16(
|
||||
<vscale x 2 x i32> undef,
|
||||
<vscale x 2 x i16> %0,
|
||||
i32 %1)
|
||||
|
||||
|
@ -632,6 +662,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 4 x i32> @llvm.riscv.vsext.nxv4i32.nxv4i16(
|
||||
<vscale x 4 x i32>,
|
||||
<vscale x 4 x i16>,
|
||||
i32);
|
||||
|
||||
|
@ -644,6 +675,7 @@ define <vscale x 4 x i32> @intrinsic_vsext_vf2_nxv4i32(<vscale x 4 x i16> %0, i3
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 4 x i32> @llvm.riscv.vsext.nxv4i32.nxv4i16(
|
||||
<vscale x 4 x i32> undef,
|
||||
<vscale x 4 x i16> %0,
|
||||
i32 %1)
|
||||
|
||||
|
@ -674,6 +706,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 8 x i32> @llvm.riscv.vsext.nxv8i32.nxv8i16(
|
||||
<vscale x 8 x i32>,
|
||||
<vscale x 8 x i16>,
|
||||
i32);
|
||||
|
||||
|
@ -686,6 +719,7 @@ define <vscale x 8 x i32> @intrinsic_vsext_vf2_nxv8i32(<vscale x 8 x i16> %0, i3
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 8 x i32> @llvm.riscv.vsext.nxv8i32.nxv8i16(
|
||||
<vscale x 8 x i32> undef,
|
||||
<vscale x 8 x i16> %0,
|
||||
i32 %1)
|
||||
|
||||
|
@ -716,6 +750,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 16 x i32> @llvm.riscv.vsext.nxv16i32.nxv16i16(
|
||||
<vscale x 16 x i32>,
|
||||
<vscale x 16 x i16>,
|
||||
i32);
|
||||
|
||||
|
@ -728,6 +763,7 @@ define <vscale x 16 x i32> @intrinsic_vsext_vf2_nxv16i32(<vscale x 16 x i16> %0,
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 16 x i32> @llvm.riscv.vsext.nxv16i32.nxv16i16(
|
||||
<vscale x 16 x i32> undef,
|
||||
<vscale x 16 x i16> %0,
|
||||
i32 %1)
|
||||
|
||||
|
@ -758,6 +794,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 1 x i16> @llvm.riscv.vsext.nxv1i16.nxv1i8(
|
||||
<vscale x 1 x i16>,
|
||||
<vscale x 1 x i8>,
|
||||
i32);
|
||||
|
||||
|
@ -770,6 +807,7 @@ define <vscale x 1 x i16> @intrinsic_vsext_vf2_nxv1i16(<vscale x 1 x i8> %0, i32
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 1 x i16> @llvm.riscv.vsext.nxv1i16.nxv1i8(
|
||||
<vscale x 1 x i16> undef,
|
||||
<vscale x 1 x i8> %0,
|
||||
i32 %1)
|
||||
|
||||
|
@ -800,6 +838,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 2 x i16> @llvm.riscv.vsext.nxv2i16.nxv2i8(
|
||||
<vscale x 2 x i16>,
|
||||
<vscale x 2 x i8>,
|
||||
i32);
|
||||
|
||||
|
@ -812,6 +851,7 @@ define <vscale x 2 x i16> @intrinsic_vsext_vf2_nxv2i16(<vscale x 2 x i8> %0, i32
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 2 x i16> @llvm.riscv.vsext.nxv2i16.nxv2i8(
|
||||
<vscale x 2 x i16> undef,
|
||||
<vscale x 2 x i8> %0,
|
||||
i32 %1)
|
||||
|
||||
|
@ -842,6 +882,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 4 x i16> @llvm.riscv.vsext.nxv4i16.nxv4i8(
|
||||
<vscale x 4 x i16>,
|
||||
<vscale x 4 x i8>,
|
||||
i32);
|
||||
|
||||
|
@ -854,6 +895,7 @@ define <vscale x 4 x i16> @intrinsic_vsext_vf2_nxv4i16(<vscale x 4 x i8> %0, i32
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 4 x i16> @llvm.riscv.vsext.nxv4i16.nxv4i8(
|
||||
<vscale x 4 x i16> undef,
|
||||
<vscale x 4 x i8> %0,
|
||||
i32 %1)
|
||||
|
||||
|
@ -884,6 +926,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 8 x i16> @llvm.riscv.vsext.nxv8i16.nxv8i8(
|
||||
<vscale x 8 x i16>,
|
||||
<vscale x 8 x i8>,
|
||||
i32);
|
||||
|
||||
|
@ -896,6 +939,7 @@ define <vscale x 8 x i16> @intrinsic_vsext_vf2_nxv8i16(<vscale x 8 x i8> %0, i32
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 8 x i16> @llvm.riscv.vsext.nxv8i16.nxv8i8(
|
||||
<vscale x 8 x i16> undef,
|
||||
<vscale x 8 x i8> %0,
|
||||
i32 %1)
|
||||
|
||||
|
@ -926,6 +970,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 16 x i16> @llvm.riscv.vsext.nxv16i16.nxv16i8(
|
||||
<vscale x 16 x i16>,
|
||||
<vscale x 16 x i8>,
|
||||
i32);
|
||||
|
||||
|
@ -938,6 +983,7 @@ define <vscale x 16 x i16> @intrinsic_vsext_vf2_nxv16i16(<vscale x 16 x i8> %0,
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 16 x i16> @llvm.riscv.vsext.nxv16i16.nxv16i8(
|
||||
<vscale x 16 x i16> undef,
|
||||
<vscale x 16 x i8> %0,
|
||||
i32 %1)
|
||||
|
||||
|
@ -968,6 +1014,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 32 x i16> @llvm.riscv.vsext.nxv32i16.nxv32i8(
|
||||
<vscale x 32 x i16>,
|
||||
<vscale x 32 x i8>,
|
||||
i32);
|
||||
|
||||
|
@ -980,6 +1027,7 @@ define <vscale x 32 x i16> @intrinsic_vsext_vf2_nxv32i16(<vscale x 32 x i8> %0,
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 32 x i16> @llvm.riscv.vsext.nxv32i16.nxv32i8(
|
||||
<vscale x 32 x i16> undef,
|
||||
<vscale x 32 x i8> %0,
|
||||
i32 %1)
|
||||
|
||||
|
|
|
@ -2,6 +2,7 @@
|
|||
; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs \
|
||||
; RUN: < %s | FileCheck %s
|
||||
declare <vscale x 1 x i64> @llvm.riscv.vsext.nxv1i64.nxv1i8(
|
||||
<vscale x 1 x i64>,
|
||||
<vscale x 1 x i8>,
|
||||
i64);
|
||||
|
||||
|
@ -14,6 +15,7 @@ define <vscale x 1 x i64> @intrinsic_vsext_vf8_nxv1i64(<vscale x 1 x i8> %0, i64
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 1 x i64> @llvm.riscv.vsext.nxv1i64.nxv1i8(
|
||||
<vscale x 1 x i64> undef,
|
||||
<vscale x 1 x i8> %0,
|
||||
i64 %1)
|
||||
|
||||
|
@ -44,6 +46,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 2 x i64> @llvm.riscv.vsext.nxv2i64.nxv2i8(
|
||||
<vscale x 2 x i64>,
|
||||
<vscale x 2 x i8>,
|
||||
i64);
|
||||
|
||||
|
@ -56,6 +59,7 @@ define <vscale x 2 x i64> @intrinsic_vsext_vf8_nxv2i64(<vscale x 2 x i8> %0, i64
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 2 x i64> @llvm.riscv.vsext.nxv2i64.nxv2i8(
|
||||
<vscale x 2 x i64> undef,
|
||||
<vscale x 2 x i8> %0,
|
||||
i64 %1)
|
||||
|
||||
|
@ -86,6 +90,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 4 x i64> @llvm.riscv.vsext.nxv4i64.nxv4i8(
|
||||
<vscale x 4 x i64>,
|
||||
<vscale x 4 x i8>,
|
||||
i64);
|
||||
|
||||
|
@ -98,6 +103,7 @@ define <vscale x 4 x i64> @intrinsic_vsext_vf8_nxv4i64(<vscale x 4 x i8> %0, i64
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 4 x i64> @llvm.riscv.vsext.nxv4i64.nxv4i8(
|
||||
<vscale x 4 x i64> undef,
|
||||
<vscale x 4 x i8> %0,
|
||||
i64 %1)
|
||||
|
||||
|
@ -128,6 +134,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 8 x i64> @llvm.riscv.vsext.nxv8i64.nxv8i8(
|
||||
<vscale x 8 x i64>,
|
||||
<vscale x 8 x i8>,
|
||||
i64);
|
||||
|
||||
|
@ -140,6 +147,7 @@ define <vscale x 8 x i64> @intrinsic_vsext_vf8_nxv8i64(<vscale x 8 x i8> %0, i64
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 8 x i64> @llvm.riscv.vsext.nxv8i64.nxv8i8(
|
||||
<vscale x 8 x i64> undef,
|
||||
<vscale x 8 x i8> %0,
|
||||
i64 %1)
|
||||
|
||||
|
@ -170,6 +178,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 1 x i64> @llvm.riscv.vsext.nxv1i64.nxv1i16(
|
||||
<vscale x 1 x i64>,
|
||||
<vscale x 1 x i16>,
|
||||
i64);
|
||||
|
||||
|
@ -182,6 +191,7 @@ define <vscale x 1 x i64> @intrinsic_vsext_vf4_nxv1i64(<vscale x 1 x i16> %0, i6
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 1 x i64> @llvm.riscv.vsext.nxv1i64.nxv1i16(
|
||||
<vscale x 1 x i64> undef,
|
||||
<vscale x 1 x i16> %0,
|
||||
i64 %1)
|
||||
|
||||
|
@ -212,6 +222,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 2 x i64> @llvm.riscv.vsext.nxv2i64.nxv2i16(
|
||||
<vscale x 2 x i64>,
|
||||
<vscale x 2 x i16>,
|
||||
i64);
|
||||
|
||||
|
@ -224,6 +235,7 @@ define <vscale x 2 x i64> @intrinsic_vsext_vf4_nxv2i64(<vscale x 2 x i16> %0, i6
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 2 x i64> @llvm.riscv.vsext.nxv2i64.nxv2i16(
|
||||
<vscale x 2 x i64> undef,
|
||||
<vscale x 2 x i16> %0,
|
||||
i64 %1)
|
||||
|
||||
|
@ -254,6 +266,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 4 x i64> @llvm.riscv.vsext.nxv4i64.nxv4i16(
|
||||
<vscale x 4 x i64>,
|
||||
<vscale x 4 x i16>,
|
||||
i64);
|
||||
|
||||
|
@ -266,6 +279,7 @@ define <vscale x 4 x i64> @intrinsic_vsext_vf4_nxv4i64(<vscale x 4 x i16> %0, i6
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 4 x i64> @llvm.riscv.vsext.nxv4i64.nxv4i16(
|
||||
<vscale x 4 x i64> undef,
|
||||
<vscale x 4 x i16> %0,
|
||||
i64 %1)
|
||||
|
||||
|
@ -296,6 +310,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 8 x i64> @llvm.riscv.vsext.nxv8i64.nxv8i16(
|
||||
<vscale x 8 x i64>,
|
||||
<vscale x 8 x i16>,
|
||||
i64);
|
||||
|
||||
|
@ -308,6 +323,7 @@ define <vscale x 8 x i64> @intrinsic_vsext_vf4_nxv8i64(<vscale x 8 x i16> %0, i6
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 8 x i64> @llvm.riscv.vsext.nxv8i64.nxv8i16(
|
||||
<vscale x 8 x i64> undef,
|
||||
<vscale x 8 x i16> %0,
|
||||
i64 %1)
|
||||
|
||||
|
@ -338,6 +354,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 1 x i32> @llvm.riscv.vsext.nxv1i32.nxv1i8(
|
||||
<vscale x 1 x i32>,
|
||||
<vscale x 1 x i8>,
|
||||
i64);
|
||||
|
||||
|
@ -350,6 +367,7 @@ define <vscale x 1 x i32> @intrinsic_vsext_vf4_nxv1i32(<vscale x 1 x i8> %0, i64
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 1 x i32> @llvm.riscv.vsext.nxv1i32.nxv1i8(
|
||||
<vscale x 1 x i32> undef,
|
||||
<vscale x 1 x i8> %0,
|
||||
i64 %1)
|
||||
|
||||
|
@ -380,6 +398,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 2 x i32> @llvm.riscv.vsext.nxv2i32.nxv2i8(
|
||||
<vscale x 2 x i32>,
|
||||
<vscale x 2 x i8>,
|
||||
i64);
|
||||
|
||||
|
@ -392,6 +411,7 @@ define <vscale x 2 x i32> @intrinsic_vsext_vf4_nxv2i32(<vscale x 2 x i8> %0, i64
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 2 x i32> @llvm.riscv.vsext.nxv2i32.nxv2i8(
|
||||
<vscale x 2 x i32> undef,
|
||||
<vscale x 2 x i8> %0,
|
||||
i64 %1)
|
||||
|
||||
|
@ -422,6 +442,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 4 x i32> @llvm.riscv.vsext.nxv4i32.nxv4i8(
|
||||
<vscale x 4 x i32>,
|
||||
<vscale x 4 x i8>,
|
||||
i64);
|
||||
|
||||
|
@ -434,6 +455,7 @@ define <vscale x 4 x i32> @intrinsic_vsext_vf4_nxv4i32(<vscale x 4 x i8> %0, i64
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 4 x i32> @llvm.riscv.vsext.nxv4i32.nxv4i8(
|
||||
<vscale x 4 x i32> undef,
|
||||
<vscale x 4 x i8> %0,
|
||||
i64 %1)
|
||||
|
||||
|
@ -464,6 +486,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 8 x i32> @llvm.riscv.vsext.nxv8i32.nxv8i8(
|
||||
<vscale x 8 x i32>,
|
||||
<vscale x 8 x i8>,
|
||||
i64);
|
||||
|
||||
|
@ -476,6 +499,7 @@ define <vscale x 8 x i32> @intrinsic_vsext_vf4_nxv8i32(<vscale x 8 x i8> %0, i64
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 8 x i32> @llvm.riscv.vsext.nxv8i32.nxv8i8(
|
||||
<vscale x 8 x i32> undef,
|
||||
<vscale x 8 x i8> %0,
|
||||
i64 %1)
|
||||
|
||||
|
@ -506,6 +530,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 16 x i32> @llvm.riscv.vsext.nxv16i32.nxv16i8(
|
||||
<vscale x 16 x i32>,
|
||||
<vscale x 16 x i8>,
|
||||
i64);
|
||||
|
||||
|
@ -518,6 +543,7 @@ define <vscale x 16 x i32> @intrinsic_vsext_vf4_nxv16i32(<vscale x 16 x i8> %0,
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 16 x i32> @llvm.riscv.vsext.nxv16i32.nxv16i8(
|
||||
<vscale x 16 x i32> undef,
|
||||
<vscale x 16 x i8> %0,
|
||||
i64 %1)
|
||||
|
||||
|
@ -548,6 +574,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 1 x i64> @llvm.riscv.vsext.nxv1i64.nxv1i32(
|
||||
<vscale x 1 x i64>,
|
||||
<vscale x 1 x i32>,
|
||||
i64);
|
||||
|
||||
|
@ -560,6 +587,7 @@ define <vscale x 1 x i64> @intrinsic_vsext_vf2_nxv1i64(<vscale x 1 x i32> %0, i6
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 1 x i64> @llvm.riscv.vsext.nxv1i64.nxv1i32(
|
||||
<vscale x 1 x i64> undef,
|
||||
<vscale x 1 x i32> %0,
|
||||
i64 %1)
|
||||
|
||||
|
@ -590,6 +618,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 2 x i64> @llvm.riscv.vsext.nxv2i64.nxv2i32(
|
||||
<vscale x 2 x i64>,
|
||||
<vscale x 2 x i32>,
|
||||
i64);
|
||||
|
||||
|
@ -602,6 +631,7 @@ define <vscale x 2 x i64> @intrinsic_vsext_vf2_nxv2i64(<vscale x 2 x i32> %0, i6
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 2 x i64> @llvm.riscv.vsext.nxv2i64.nxv2i32(
|
||||
<vscale x 2 x i64> undef,
|
||||
<vscale x 2 x i32> %0,
|
||||
i64 %1)
|
||||
|
||||
|
@ -632,6 +662,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 4 x i64> @llvm.riscv.vsext.nxv4i64.nxv4i32(
|
||||
<vscale x 4 x i64>,
|
||||
<vscale x 4 x i32>,
|
||||
i64);
|
||||
|
||||
|
@ -644,6 +675,7 @@ define <vscale x 4 x i64> @intrinsic_vsext_vf2_nxv4i64(<vscale x 4 x i32> %0, i6
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 4 x i64> @llvm.riscv.vsext.nxv4i64.nxv4i32(
|
||||
<vscale x 4 x i64> undef,
|
||||
<vscale x 4 x i32> %0,
|
||||
i64 %1)
|
||||
|
||||
|
@ -674,6 +706,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 8 x i64> @llvm.riscv.vsext.nxv8i64.nxv8i32(
|
||||
<vscale x 8 x i64>,
|
||||
<vscale x 8 x i32>,
|
||||
i64);
|
||||
|
||||
|
@ -686,6 +719,7 @@ define <vscale x 8 x i64> @intrinsic_vsext_vf2_nxv8i64(<vscale x 8 x i32> %0, i6
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 8 x i64> @llvm.riscv.vsext.nxv8i64.nxv8i32(
|
||||
<vscale x 8 x i64> undef,
|
||||
<vscale x 8 x i32> %0,
|
||||
i64 %1)
|
||||
|
||||
|
@ -716,6 +750,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 1 x i32> @llvm.riscv.vsext.nxv1i32.nxv1i16(
|
||||
<vscale x 1 x i32>,
|
||||
<vscale x 1 x i16>,
|
||||
i64);
|
||||
|
||||
|
@ -728,6 +763,7 @@ define <vscale x 1 x i32> @intrinsic_vsext_vf2_nxv1i32(<vscale x 1 x i16> %0, i6
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 1 x i32> @llvm.riscv.vsext.nxv1i32.nxv1i16(
|
||||
<vscale x 1 x i32> undef,
|
||||
<vscale x 1 x i16> %0,
|
||||
i64 %1)
|
||||
|
||||
|
@ -758,6 +794,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 2 x i32> @llvm.riscv.vsext.nxv2i32.nxv2i16(
|
||||
<vscale x 2 x i32>,
|
||||
<vscale x 2 x i16>,
|
||||
i64);
|
||||
|
||||
|
@ -770,6 +807,7 @@ define <vscale x 2 x i32> @intrinsic_vsext_vf2_nxv2i32(<vscale x 2 x i16> %0, i6
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 2 x i32> @llvm.riscv.vsext.nxv2i32.nxv2i16(
|
||||
<vscale x 2 x i32> undef,
|
||||
<vscale x 2 x i16> %0,
|
||||
i64 %1)
|
||||
|
||||
|
@ -800,6 +838,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 4 x i32> @llvm.riscv.vsext.nxv4i32.nxv4i16(
|
||||
<vscale x 4 x i32>,
|
||||
<vscale x 4 x i16>,
|
||||
i64);
|
||||
|
||||
|
@ -812,6 +851,7 @@ define <vscale x 4 x i32> @intrinsic_vsext_vf2_nxv4i32(<vscale x 4 x i16> %0, i6
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 4 x i32> @llvm.riscv.vsext.nxv4i32.nxv4i16(
|
||||
<vscale x 4 x i32> undef,
|
||||
<vscale x 4 x i16> %0,
|
||||
i64 %1)
|
||||
|
||||
|
@ -842,6 +882,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 8 x i32> @llvm.riscv.vsext.nxv8i32.nxv8i16(
|
||||
<vscale x 8 x i32>,
|
||||
<vscale x 8 x i16>,
|
||||
i64);
|
||||
|
||||
|
@ -854,6 +895,7 @@ define <vscale x 8 x i32> @intrinsic_vsext_vf2_nxv8i32(<vscale x 8 x i16> %0, i6
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 8 x i32> @llvm.riscv.vsext.nxv8i32.nxv8i16(
|
||||
<vscale x 8 x i32> undef,
|
||||
<vscale x 8 x i16> %0,
|
||||
i64 %1)
|
||||
|
||||
|
@ -884,6 +926,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 16 x i32> @llvm.riscv.vsext.nxv16i32.nxv16i16(
|
||||
<vscale x 16 x i32>,
|
||||
<vscale x 16 x i16>,
|
||||
i64);
|
||||
|
||||
|
@ -896,6 +939,7 @@ define <vscale x 16 x i32> @intrinsic_vsext_vf2_nxv16i32(<vscale x 16 x i16> %0,
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 16 x i32> @llvm.riscv.vsext.nxv16i32.nxv16i16(
|
||||
<vscale x 16 x i32> undef,
|
||||
<vscale x 16 x i16> %0,
|
||||
i64 %1)
|
||||
|
||||
|
@ -926,6 +970,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 1 x i16> @llvm.riscv.vsext.nxv1i16.nxv1i8(
|
||||
<vscale x 1 x i16>,
|
||||
<vscale x 1 x i8>,
|
||||
i64);
|
||||
|
||||
|
@ -938,6 +983,7 @@ define <vscale x 1 x i16> @intrinsic_vsext_vf2_nxv1i16(<vscale x 1 x i8> %0, i64
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 1 x i16> @llvm.riscv.vsext.nxv1i16.nxv1i8(
|
||||
<vscale x 1 x i16> undef,
|
||||
<vscale x 1 x i8> %0,
|
||||
i64 %1)
|
||||
|
||||
|
@ -968,6 +1014,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 2 x i16> @llvm.riscv.vsext.nxv2i16.nxv2i8(
|
||||
<vscale x 2 x i16>,
|
||||
<vscale x 2 x i8>,
|
||||
i64);
|
||||
|
||||
|
@ -980,6 +1027,7 @@ define <vscale x 2 x i16> @intrinsic_vsext_vf2_nxv2i16(<vscale x 2 x i8> %0, i64
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 2 x i16> @llvm.riscv.vsext.nxv2i16.nxv2i8(
|
||||
<vscale x 2 x i16> undef,
|
||||
<vscale x 2 x i8> %0,
|
||||
i64 %1)
|
||||
|
||||
|
@ -1010,6 +1058,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 4 x i16> @llvm.riscv.vsext.nxv4i16.nxv4i8(
|
||||
<vscale x 4 x i16>,
|
||||
<vscale x 4 x i8>,
|
||||
i64);
|
||||
|
||||
|
@ -1022,6 +1071,7 @@ define <vscale x 4 x i16> @intrinsic_vsext_vf2_nxv4i16(<vscale x 4 x i8> %0, i64
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 4 x i16> @llvm.riscv.vsext.nxv4i16.nxv4i8(
|
||||
<vscale x 4 x i16> undef,
|
||||
<vscale x 4 x i8> %0,
|
||||
i64 %1)
|
||||
|
||||
|
@ -1052,6 +1102,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 8 x i16> @llvm.riscv.vsext.nxv8i16.nxv8i8(
|
||||
<vscale x 8 x i16>,
|
||||
<vscale x 8 x i8>,
|
||||
i64);
|
||||
|
||||
|
@ -1064,6 +1115,7 @@ define <vscale x 8 x i16> @intrinsic_vsext_vf2_nxv8i16(<vscale x 8 x i8> %0, i64
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 8 x i16> @llvm.riscv.vsext.nxv8i16.nxv8i8(
|
||||
<vscale x 8 x i16> undef,
|
||||
<vscale x 8 x i8> %0,
|
||||
i64 %1)
|
||||
|
||||
|
@ -1094,6 +1146,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 16 x i16> @llvm.riscv.vsext.nxv16i16.nxv16i8(
|
||||
<vscale x 16 x i16>,
|
||||
<vscale x 16 x i8>,
|
||||
i64);
|
||||
|
||||
|
@ -1106,6 +1159,7 @@ define <vscale x 16 x i16> @intrinsic_vsext_vf2_nxv16i16(<vscale x 16 x i8> %0,
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 16 x i16> @llvm.riscv.vsext.nxv16i16.nxv16i8(
|
||||
<vscale x 16 x i16> undef,
|
||||
<vscale x 16 x i8> %0,
|
||||
i64 %1)
|
||||
|
||||
|
@ -1136,6 +1190,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 32 x i16> @llvm.riscv.vsext.nxv32i16.nxv32i8(
|
||||
<vscale x 32 x i16>,
|
||||
<vscale x 32 x i8>,
|
||||
i64);
|
||||
|
||||
|
@ -1148,6 +1203,7 @@ define <vscale x 32 x i16> @intrinsic_vsext_vf2_nxv32i16(<vscale x 32 x i8> %0,
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 32 x i16> @llvm.riscv.vsext.nxv32i16.nxv32i8(
|
||||
<vscale x 32 x i16> undef,
|
||||
<vscale x 32 x i8> %0,
|
||||
i64 %1)
|
||||
|
||||
|
|
|
@ -2,6 +2,7 @@
|
|||
; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs \
|
||||
; RUN: < %s | FileCheck %s
|
||||
declare <vscale x 1 x i64> @llvm.riscv.vzext.nxv1i64.nxv1i8(
|
||||
<vscale x 1 x i64>,
|
||||
<vscale x 1 x i8>,
|
||||
i32);
|
||||
|
||||
|
@ -14,6 +15,7 @@ define <vscale x 1 x i64> @intrinsic_vzext_vf8_nxv1i64(<vscale x 1 x i8> %0, i32
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 1 x i64> @llvm.riscv.vzext.nxv1i64.nxv1i8(
|
||||
<vscale x 1 x i64> undef,
|
||||
<vscale x 1 x i8> %0,
|
||||
i32 %1)
|
||||
|
||||
|
@ -44,6 +46,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 2 x i64> @llvm.riscv.vzext.nxv2i64.nxv2i8(
|
||||
<vscale x 2 x i64>,
|
||||
<vscale x 2 x i8>,
|
||||
i32);
|
||||
|
||||
|
@ -56,6 +59,7 @@ define <vscale x 2 x i64> @intrinsic_vzext_vf8_nxv2i64(<vscale x 2 x i8> %0, i32
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 2 x i64> @llvm.riscv.vzext.nxv2i64.nxv2i8(
|
||||
<vscale x 2 x i64> undef,
|
||||
<vscale x 2 x i8> %0,
|
||||
i32 %1)
|
||||
|
||||
|
@ -86,6 +90,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 4 x i64> @llvm.riscv.vzext.nxv4i64.nxv4i8(
|
||||
<vscale x 4 x i64>,
|
||||
<vscale x 4 x i8>,
|
||||
i32);
|
||||
|
||||
|
@ -98,6 +103,7 @@ define <vscale x 4 x i64> @intrinsic_vzext_vf8_nxv4i64(<vscale x 4 x i8> %0, i32
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 4 x i64> @llvm.riscv.vzext.nxv4i64.nxv4i8(
|
||||
<vscale x 4 x i64> undef,
|
||||
<vscale x 4 x i8> %0,
|
||||
i32 %1)
|
||||
|
||||
|
@ -128,6 +134,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 8 x i64> @llvm.riscv.vzext.nxv8i64.nxv8i8(
|
||||
<vscale x 8 x i64>,
|
||||
<vscale x 8 x i8>,
|
||||
i32);
|
||||
|
||||
|
@ -140,6 +147,7 @@ define <vscale x 8 x i64> @intrinsic_vzext_vf8_nxv8i64(<vscale x 8 x i8> %0, i32
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 8 x i64> @llvm.riscv.vzext.nxv8i64.nxv8i8(
|
||||
<vscale x 8 x i64> undef,
|
||||
<vscale x 8 x i8> %0,
|
||||
i32 %1)
|
||||
|
||||
|
@ -170,6 +178,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 1 x i64> @llvm.riscv.vzext.nxv1i64.nxv1i16(
|
||||
<vscale x 1 x i64>,
|
||||
<vscale x 1 x i16>,
|
||||
i32);
|
||||
|
||||
|
@ -182,6 +191,7 @@ define <vscale x 1 x i64> @intrinsic_vzext_vf4_nxv1i64(<vscale x 1 x i16> %0, i3
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 1 x i64> @llvm.riscv.vzext.nxv1i64.nxv1i16(
|
||||
<vscale x 1 x i64> undef,
|
||||
<vscale x 1 x i16> %0,
|
||||
i32 %1)
|
||||
|
||||
|
@ -212,6 +222,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 2 x i64> @llvm.riscv.vzext.nxv2i64.nxv2i16(
|
||||
<vscale x 2 x i64>,
|
||||
<vscale x 2 x i16>,
|
||||
i32);
|
||||
|
||||
|
@ -224,6 +235,7 @@ define <vscale x 2 x i64> @intrinsic_vzext_vf4_nxv2i64(<vscale x 2 x i16> %0, i3
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 2 x i64> @llvm.riscv.vzext.nxv2i64.nxv2i16(
|
||||
<vscale x 2 x i64> undef,
|
||||
<vscale x 2 x i16> %0,
|
||||
i32 %1)
|
||||
|
||||
|
@ -254,6 +266,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 4 x i64> @llvm.riscv.vzext.nxv4i64.nxv4i16(
|
||||
<vscale x 4 x i64>,
|
||||
<vscale x 4 x i16>,
|
||||
i32);
|
||||
|
||||
|
@ -266,6 +279,7 @@ define <vscale x 4 x i64> @intrinsic_vzext_vf4_nxv4i64(<vscale x 4 x i16> %0, i3
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 4 x i64> @llvm.riscv.vzext.nxv4i64.nxv4i16(
|
||||
<vscale x 4 x i64> undef,
|
||||
<vscale x 4 x i16> %0,
|
||||
i32 %1)
|
||||
|
||||
|
@ -296,6 +310,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 8 x i64> @llvm.riscv.vzext.nxv8i64.nxv8i16(
|
||||
<vscale x 8 x i64>,
|
||||
<vscale x 8 x i16>,
|
||||
i32);
|
||||
|
||||
|
@ -308,6 +323,7 @@ define <vscale x 8 x i64> @intrinsic_vzext_vf4_nxv8i64(<vscale x 8 x i16> %0, i3
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 8 x i64> @llvm.riscv.vzext.nxv8i64.nxv8i16(
|
||||
<vscale x 8 x i64> undef,
|
||||
<vscale x 8 x i16> %0,
|
||||
i32 %1)
|
||||
|
||||
|
@ -338,6 +354,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 1 x i32> @llvm.riscv.vzext.nxv1i32.nxv1i8(
|
||||
<vscale x 1 x i32>,
|
||||
<vscale x 1 x i8>,
|
||||
i32);
|
||||
|
||||
|
@ -350,6 +367,7 @@ define <vscale x 1 x i32> @intrinsic_vzext_vf4_nxv1i32(<vscale x 1 x i8> %0, i32
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 1 x i32> @llvm.riscv.vzext.nxv1i32.nxv1i8(
|
||||
<vscale x 1 x i32> undef,
|
||||
<vscale x 1 x i8> %0,
|
||||
i32 %1)
|
||||
|
||||
|
@ -380,6 +398,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 2 x i32> @llvm.riscv.vzext.nxv2i32.nxv2i8(
|
||||
<vscale x 2 x i32>,
|
||||
<vscale x 2 x i8>,
|
||||
i32);
|
||||
|
||||
|
@ -392,6 +411,7 @@ define <vscale x 2 x i32> @intrinsic_vzext_vf4_nxv2i32(<vscale x 2 x i8> %0, i32
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 2 x i32> @llvm.riscv.vzext.nxv2i32.nxv2i8(
|
||||
<vscale x 2 x i32> undef,
|
||||
<vscale x 2 x i8> %0,
|
||||
i32 %1)
|
||||
|
||||
|
@ -422,6 +442,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 4 x i32> @llvm.riscv.vzext.nxv4i32.nxv4i8(
|
||||
<vscale x 4 x i32>,
|
||||
<vscale x 4 x i8>,
|
||||
i32);
|
||||
|
||||
|
@ -434,6 +455,7 @@ define <vscale x 4 x i32> @intrinsic_vzext_vf4_nxv4i32(<vscale x 4 x i8> %0, i32
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 4 x i32> @llvm.riscv.vzext.nxv4i32.nxv4i8(
|
||||
<vscale x 4 x i32> undef,
|
||||
<vscale x 4 x i8> %0,
|
||||
i32 %1)
|
||||
|
||||
|
@ -464,6 +486,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 8 x i32> @llvm.riscv.vzext.nxv8i32.nxv8i8(
|
||||
<vscale x 8 x i32>,
|
||||
<vscale x 8 x i8>,
|
||||
i32);
|
||||
|
||||
|
@ -476,6 +499,7 @@ define <vscale x 8 x i32> @intrinsic_vzext_vf4_nxv8i32(<vscale x 8 x i8> %0, i32
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 8 x i32> @llvm.riscv.vzext.nxv8i32.nxv8i8(
|
||||
<vscale x 8 x i32> undef,
|
||||
<vscale x 8 x i8> %0,
|
||||
i32 %1)
|
||||
|
||||
|
@ -506,6 +530,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 16 x i32> @llvm.riscv.vzext.nxv16i32.nxv16i8(
|
||||
<vscale x 16 x i32>,
|
||||
<vscale x 16 x i8>,
|
||||
i32);
|
||||
|
||||
|
@ -518,6 +543,7 @@ define <vscale x 16 x i32> @intrinsic_vzext_vf4_nxv16i32(<vscale x 16 x i8> %0,
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 16 x i32> @llvm.riscv.vzext.nxv16i32.nxv16i8(
|
||||
<vscale x 16 x i32> undef,
|
||||
<vscale x 16 x i8> %0,
|
||||
i32 %1)
|
||||
|
||||
|
@ -548,6 +574,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 1 x i64> @llvm.riscv.vzext.nxv1i64.nxv1i32(
|
||||
<vscale x 1 x i64>,
|
||||
<vscale x 1 x i32>,
|
||||
i32);
|
||||
|
||||
|
@ -560,6 +587,7 @@ define <vscale x 1 x i64> @intrinsic_vzext_vf2_nxv1i64(<vscale x 1 x i32> %0, i3
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 1 x i64> @llvm.riscv.vzext.nxv1i64.nxv1i32(
|
||||
<vscale x 1 x i64> undef,
|
||||
<vscale x 1 x i32> %0,
|
||||
i32 %1)
|
||||
|
||||
|
@ -590,6 +618,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 2 x i64> @llvm.riscv.vzext.nxv2i64.nxv2i32(
|
||||
<vscale x 2 x i64>,
|
||||
<vscale x 2 x i32>,
|
||||
i32);
|
||||
|
||||
|
@ -602,6 +631,7 @@ define <vscale x 2 x i64> @intrinsic_vzext_vf2_nxv2i64(<vscale x 2 x i32> %0, i3
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 2 x i64> @llvm.riscv.vzext.nxv2i64.nxv2i32(
|
||||
<vscale x 2 x i64> undef,
|
||||
<vscale x 2 x i32> %0,
|
||||
i32 %1)
|
||||
|
||||
|
@ -632,6 +662,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 4 x i64> @llvm.riscv.vzext.nxv4i64.nxv4i32(
|
||||
<vscale x 4 x i64>,
|
||||
<vscale x 4 x i32>,
|
||||
i32);
|
||||
|
||||
|
@ -644,6 +675,7 @@ define <vscale x 4 x i64> @intrinsic_vzext_vf2_nxv4i64(<vscale x 4 x i32> %0, i3
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 4 x i64> @llvm.riscv.vzext.nxv4i64.nxv4i32(
|
||||
<vscale x 4 x i64> undef,
|
||||
<vscale x 4 x i32> %0,
|
||||
i32 %1)
|
||||
|
||||
|
@ -674,6 +706,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 8 x i64> @llvm.riscv.vzext.nxv8i64.nxv8i32(
|
||||
<vscale x 8 x i64>,
|
||||
<vscale x 8 x i32>,
|
||||
i32);
|
||||
|
||||
|
@ -686,6 +719,7 @@ define <vscale x 8 x i64> @intrinsic_vzext_vf2_nxv8i64(<vscale x 8 x i32> %0, i3
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 8 x i64> @llvm.riscv.vzext.nxv8i64.nxv8i32(
|
||||
<vscale x 8 x i64> undef,
|
||||
<vscale x 8 x i32> %0,
|
||||
i32 %1)
|
||||
|
||||
|
@ -716,6 +750,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 1 x i32> @llvm.riscv.vzext.nxv1i32.nxv1i16(
|
||||
<vscale x 1 x i32>,
|
||||
<vscale x 1 x i16>,
|
||||
i32);
|
||||
|
||||
|
@ -728,6 +763,7 @@ define <vscale x 1 x i32> @intrinsic_vzext_vf2_nxv1i32(<vscale x 1 x i16> %0, i3
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 1 x i32> @llvm.riscv.vzext.nxv1i32.nxv1i16(
|
||||
<vscale x 1 x i32> undef,
|
||||
<vscale x 1 x i16> %0,
|
||||
i32 %1)
|
||||
|
||||
|
@ -758,6 +794,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 2 x i32> @llvm.riscv.vzext.nxv2i32.nxv2i16(
|
||||
<vscale x 2 x i32>,
|
||||
<vscale x 2 x i16>,
|
||||
i32);
|
||||
|
||||
|
@ -770,6 +807,7 @@ define <vscale x 2 x i32> @intrinsic_vzext_vf2_nxv2i32(<vscale x 2 x i16> %0, i3
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 2 x i32> @llvm.riscv.vzext.nxv2i32.nxv2i16(
|
||||
<vscale x 2 x i32> undef,
|
||||
<vscale x 2 x i16> %0,
|
||||
i32 %1)
|
||||
|
||||
|
@ -800,6 +838,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 4 x i32> @llvm.riscv.vzext.nxv4i32.nxv4i16(
|
||||
<vscale x 4 x i32>,
|
||||
<vscale x 4 x i16>,
|
||||
i32);
|
||||
|
||||
|
@ -812,6 +851,7 @@ define <vscale x 4 x i32> @intrinsic_vzext_vf2_nxv4i32(<vscale x 4 x i16> %0, i3
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 4 x i32> @llvm.riscv.vzext.nxv4i32.nxv4i16(
|
||||
<vscale x 4 x i32> undef,
|
||||
<vscale x 4 x i16> %0,
|
||||
i32 %1)
|
||||
|
||||
|
@ -842,6 +882,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 8 x i32> @llvm.riscv.vzext.nxv8i32.nxv8i16(
|
||||
<vscale x 8 x i32>,
|
||||
<vscale x 8 x i16>,
|
||||
i32);
|
||||
|
||||
|
@ -854,6 +895,7 @@ define <vscale x 8 x i32> @intrinsic_vzext_vf2_nxv8i32(<vscale x 8 x i16> %0, i3
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 8 x i32> @llvm.riscv.vzext.nxv8i32.nxv8i16(
|
||||
<vscale x 8 x i32> undef,
|
||||
<vscale x 8 x i16> %0,
|
||||
i32 %1)
|
||||
|
||||
|
@ -884,6 +926,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 16 x i32> @llvm.riscv.vzext.nxv16i32.nxv16i16(
|
||||
<vscale x 16 x i32>,
|
||||
<vscale x 16 x i16>,
|
||||
i32);
|
||||
|
||||
|
@ -896,6 +939,7 @@ define <vscale x 16 x i32> @intrinsic_vzext_vf2_nxv16i32(<vscale x 16 x i16> %0,
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 16 x i32> @llvm.riscv.vzext.nxv16i32.nxv16i16(
|
||||
<vscale x 16 x i32> undef,
|
||||
<vscale x 16 x i16> %0,
|
||||
i32 %1)
|
||||
|
||||
|
@ -926,6 +970,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 1 x i16> @llvm.riscv.vzext.nxv1i16.nxv1i8(
|
||||
<vscale x 1 x i16>,
|
||||
<vscale x 1 x i8>,
|
||||
i32);
|
||||
|
||||
|
@ -938,6 +983,7 @@ define <vscale x 1 x i16> @intrinsic_vzext_vf2_nxv1i16(<vscale x 1 x i8> %0, i32
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 1 x i16> @llvm.riscv.vzext.nxv1i16.nxv1i8(
|
||||
<vscale x 1 x i16> undef,
|
||||
<vscale x 1 x i8> %0,
|
||||
i32 %1)
|
||||
|
||||
|
@ -968,6 +1014,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 2 x i16> @llvm.riscv.vzext.nxv2i16.nxv2i8(
|
||||
<vscale x 2 x i16>,
|
||||
<vscale x 2 x i8>,
|
||||
i32);
|
||||
|
||||
|
@ -980,6 +1027,7 @@ define <vscale x 2 x i16> @intrinsic_vzext_vf2_nxv2i16(<vscale x 2 x i8> %0, i32
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 2 x i16> @llvm.riscv.vzext.nxv2i16.nxv2i8(
|
||||
<vscale x 2 x i16> undef,
|
||||
<vscale x 2 x i8> %0,
|
||||
i32 %1)
|
||||
|
||||
|
@ -1010,6 +1058,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 4 x i16> @llvm.riscv.vzext.nxv4i16.nxv4i8(
|
||||
<vscale x 4 x i16>,
|
||||
<vscale x 4 x i8>,
|
||||
i32);
|
||||
|
||||
|
@ -1022,6 +1071,7 @@ define <vscale x 4 x i16> @intrinsic_vzext_vf2_nxv4i16(<vscale x 4 x i8> %0, i32
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 4 x i16> @llvm.riscv.vzext.nxv4i16.nxv4i8(
|
||||
<vscale x 4 x i16> undef,
|
||||
<vscale x 4 x i8> %0,
|
||||
i32 %1)
|
||||
|
||||
|
@ -1052,6 +1102,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 8 x i16> @llvm.riscv.vzext.nxv8i16.nxv8i8(
|
||||
<vscale x 8 x i16>,
|
||||
<vscale x 8 x i8>,
|
||||
i32);
|
||||
|
||||
|
@ -1064,6 +1115,7 @@ define <vscale x 8 x i16> @intrinsic_vzext_vf2_nxv8i16(<vscale x 8 x i8> %0, i32
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 8 x i16> @llvm.riscv.vzext.nxv8i16.nxv8i8(
|
||||
<vscale x 8 x i16> undef,
|
||||
<vscale x 8 x i8> %0,
|
||||
i32 %1)
|
||||
|
||||
|
@ -1094,6 +1146,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 16 x i16> @llvm.riscv.vzext.nxv16i16.nxv16i8(
|
||||
<vscale x 16 x i16>,
|
||||
<vscale x 16 x i8>,
|
||||
i32);
|
||||
|
||||
|
@ -1106,6 +1159,7 @@ define <vscale x 16 x i16> @intrinsic_vzext_vf2_nxv16i16(<vscale x 16 x i8> %0,
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 16 x i16> @llvm.riscv.vzext.nxv16i16.nxv16i8(
|
||||
<vscale x 16 x i16> undef,
|
||||
<vscale x 16 x i8> %0,
|
||||
i32 %1)
|
||||
|
||||
|
@ -1136,6 +1190,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 32 x i16> @llvm.riscv.vzext.nxv32i16.nxv32i8(
|
||||
<vscale x 32 x i16>,
|
||||
<vscale x 32 x i8>,
|
||||
i32);
|
||||
|
||||
|
@ -1148,6 +1203,7 @@ define <vscale x 32 x i16> @intrinsic_vzext_vf2_nxv32i16(<vscale x 32 x i8> %0,
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 32 x i16> @llvm.riscv.vzext.nxv32i16.nxv32i8(
|
||||
<vscale x 32 x i16> undef,
|
||||
<vscale x 32 x i8> %0,
|
||||
i32 %1)
|
||||
|
||||
|
|
|
@ -2,6 +2,7 @@
|
|||
; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs \
|
||||
; RUN: < %s | FileCheck %s
|
||||
declare <vscale x 1 x i64> @llvm.riscv.vzext.nxv1i64.nxv1i8(
|
||||
<vscale x 1 x i64>,
|
||||
<vscale x 1 x i8>,
|
||||
i64);
|
||||
|
||||
|
@ -14,6 +15,7 @@ define <vscale x 1 x i64> @intrinsic_vzext_vf8_nxv1i64(<vscale x 1 x i8> %0, i64
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 1 x i64> @llvm.riscv.vzext.nxv1i64.nxv1i8(
|
||||
<vscale x 1 x i64> undef,
|
||||
<vscale x 1 x i8> %0,
|
||||
i64 %1)
|
||||
|
||||
|
@ -44,6 +46,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 2 x i64> @llvm.riscv.vzext.nxv2i64.nxv2i8(
|
||||
<vscale x 2 x i64>,
|
||||
<vscale x 2 x i8>,
|
||||
i64);
|
||||
|
||||
|
@ -56,6 +59,7 @@ define <vscale x 2 x i64> @intrinsic_vzext_vf8_nxv2i64(<vscale x 2 x i8> %0, i64
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 2 x i64> @llvm.riscv.vzext.nxv2i64.nxv2i8(
|
||||
<vscale x 2 x i64> undef,
|
||||
<vscale x 2 x i8> %0,
|
||||
i64 %1)
|
||||
|
||||
|
@ -86,6 +90,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 4 x i64> @llvm.riscv.vzext.nxv4i64.nxv4i8(
|
||||
<vscale x 4 x i64>,
|
||||
<vscale x 4 x i8>,
|
||||
i64);
|
||||
|
||||
|
@ -98,6 +103,7 @@ define <vscale x 4 x i64> @intrinsic_vzext_vf8_nxv4i64(<vscale x 4 x i8> %0, i64
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 4 x i64> @llvm.riscv.vzext.nxv4i64.nxv4i8(
|
||||
<vscale x 4 x i64> undef,
|
||||
<vscale x 4 x i8> %0,
|
||||
i64 %1)
|
||||
|
||||
|
@ -128,6 +134,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 8 x i64> @llvm.riscv.vzext.nxv8i64.nxv8i8(
|
||||
<vscale x 8 x i64>,
|
||||
<vscale x 8 x i8>,
|
||||
i64);
|
||||
|
||||
|
@ -140,6 +147,7 @@ define <vscale x 8 x i64> @intrinsic_vzext_vf8_nxv8i64(<vscale x 8 x i8> %0, i64
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 8 x i64> @llvm.riscv.vzext.nxv8i64.nxv8i8(
|
||||
<vscale x 8 x i64> undef,
|
||||
<vscale x 8 x i8> %0,
|
||||
i64 %1)
|
||||
|
||||
|
@ -170,6 +178,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 1 x i64> @llvm.riscv.vzext.nxv1i64.nxv1i16(
|
||||
<vscale x 1 x i64>,
|
||||
<vscale x 1 x i16>,
|
||||
i64);
|
||||
|
||||
|
@ -182,6 +191,7 @@ define <vscale x 1 x i64> @intrinsic_vzext_vf4_nxv1i64(<vscale x 1 x i16> %0, i6
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 1 x i64> @llvm.riscv.vzext.nxv1i64.nxv1i16(
|
||||
<vscale x 1 x i64> undef,
|
||||
<vscale x 1 x i16> %0,
|
||||
i64 %1)
|
||||
|
||||
|
@ -212,6 +222,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 2 x i64> @llvm.riscv.vzext.nxv2i64.nxv2i16(
|
||||
<vscale x 2 x i64>,
|
||||
<vscale x 2 x i16>,
|
||||
i64);
|
||||
|
||||
|
@ -224,6 +235,7 @@ define <vscale x 2 x i64> @intrinsic_vzext_vf4_nxv2i64(<vscale x 2 x i16> %0, i6
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 2 x i64> @llvm.riscv.vzext.nxv2i64.nxv2i16(
|
||||
<vscale x 2 x i64> undef,
|
||||
<vscale x 2 x i16> %0,
|
||||
i64 %1)
|
||||
|
||||
|
@ -254,6 +266,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 4 x i64> @llvm.riscv.vzext.nxv4i64.nxv4i16(
|
||||
<vscale x 4 x i64>,
|
||||
<vscale x 4 x i16>,
|
||||
i64);
|
||||
|
||||
|
@ -266,6 +279,7 @@ define <vscale x 4 x i64> @intrinsic_vzext_vf4_nxv4i64(<vscale x 4 x i16> %0, i6
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 4 x i64> @llvm.riscv.vzext.nxv4i64.nxv4i16(
|
||||
<vscale x 4 x i64> undef,
|
||||
<vscale x 4 x i16> %0,
|
||||
i64 %1)
|
||||
|
||||
|
@ -296,6 +310,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 8 x i64> @llvm.riscv.vzext.nxv8i64.nxv8i16(
|
||||
<vscale x 8 x i64>,
|
||||
<vscale x 8 x i16>,
|
||||
i64);
|
||||
|
||||
|
@ -308,6 +323,7 @@ define <vscale x 8 x i64> @intrinsic_vzext_vf4_nxv8i64(<vscale x 8 x i16> %0, i6
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 8 x i64> @llvm.riscv.vzext.nxv8i64.nxv8i16(
|
||||
<vscale x 8 x i64> undef,
|
||||
<vscale x 8 x i16> %0,
|
||||
i64 %1)
|
||||
|
||||
|
@ -338,6 +354,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 1 x i32> @llvm.riscv.vzext.nxv1i32.nxv1i8(
|
||||
<vscale x 1 x i32>,
|
||||
<vscale x 1 x i8>,
|
||||
i64);
|
||||
|
||||
|
@ -350,6 +367,7 @@ define <vscale x 1 x i32> @intrinsic_vzext_vf4_nxv1i32(<vscale x 1 x i8> %0, i64
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 1 x i32> @llvm.riscv.vzext.nxv1i32.nxv1i8(
|
||||
<vscale x 1 x i32> undef,
|
||||
<vscale x 1 x i8> %0,
|
||||
i64 %1)
|
||||
|
||||
|
@ -380,6 +398,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 2 x i32> @llvm.riscv.vzext.nxv2i32.nxv2i8(
|
||||
<vscale x 2 x i32>,
|
||||
<vscale x 2 x i8>,
|
||||
i64);
|
||||
|
||||
|
@ -392,6 +411,7 @@ define <vscale x 2 x i32> @intrinsic_vzext_vf4_nxv2i32(<vscale x 2 x i8> %0, i64
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 2 x i32> @llvm.riscv.vzext.nxv2i32.nxv2i8(
|
||||
<vscale x 2 x i32> undef,
|
||||
<vscale x 2 x i8> %0,
|
||||
i64 %1)
|
||||
|
||||
|
@ -422,6 +442,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 4 x i32> @llvm.riscv.vzext.nxv4i32.nxv4i8(
|
||||
<vscale x 4 x i32>,
|
||||
<vscale x 4 x i8>,
|
||||
i64);
|
||||
|
||||
|
@ -434,6 +455,7 @@ define <vscale x 4 x i32> @intrinsic_vzext_vf4_nxv4i32(<vscale x 4 x i8> %0, i64
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 4 x i32> @llvm.riscv.vzext.nxv4i32.nxv4i8(
|
||||
<vscale x 4 x i32> undef,
|
||||
<vscale x 4 x i8> %0,
|
||||
i64 %1)
|
||||
|
||||
|
@ -464,6 +486,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 8 x i32> @llvm.riscv.vzext.nxv8i32.nxv8i8(
|
||||
<vscale x 8 x i32>,
|
||||
<vscale x 8 x i8>,
|
||||
i64);
|
||||
|
||||
|
@ -476,6 +499,7 @@ define <vscale x 8 x i32> @intrinsic_vzext_vf4_nxv8i32(<vscale x 8 x i8> %0, i64
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 8 x i32> @llvm.riscv.vzext.nxv8i32.nxv8i8(
|
||||
<vscale x 8 x i32> undef,
|
||||
<vscale x 8 x i8> %0,
|
||||
i64 %1)
|
||||
|
||||
|
@ -506,6 +530,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 16 x i32> @llvm.riscv.vzext.nxv16i32.nxv16i8(
|
||||
<vscale x 16 x i32>,
|
||||
<vscale x 16 x i8>,
|
||||
i64);
|
||||
|
||||
|
@ -518,6 +543,7 @@ define <vscale x 16 x i32> @intrinsic_vzext_vf4_nxv16i32(<vscale x 16 x i8> %0,
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 16 x i32> @llvm.riscv.vzext.nxv16i32.nxv16i8(
|
||||
<vscale x 16 x i32> undef,
|
||||
<vscale x 16 x i8> %0,
|
||||
i64 %1)
|
||||
|
||||
|
@ -548,6 +574,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 1 x i64> @llvm.riscv.vzext.nxv1i64.nxv1i32(
|
||||
<vscale x 1 x i64>,
|
||||
<vscale x 1 x i32>,
|
||||
i64);
|
||||
|
||||
|
@ -560,6 +587,7 @@ define <vscale x 1 x i64> @intrinsic_vzext_vf2_nxv1i64(<vscale x 1 x i32> %0, i6
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 1 x i64> @llvm.riscv.vzext.nxv1i64.nxv1i32(
|
||||
<vscale x 1 x i64> undef,
|
||||
<vscale x 1 x i32> %0,
|
||||
i64 %1)
|
||||
|
||||
|
@ -590,6 +618,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 2 x i64> @llvm.riscv.vzext.nxv2i64.nxv2i32(
|
||||
<vscale x 2 x i64>,
|
||||
<vscale x 2 x i32>,
|
||||
i64);
|
||||
|
||||
|
@ -602,6 +631,7 @@ define <vscale x 2 x i64> @intrinsic_vzext_vf2_nxv2i64(<vscale x 2 x i32> %0, i6
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 2 x i64> @llvm.riscv.vzext.nxv2i64.nxv2i32(
|
||||
<vscale x 2 x i64> undef,
|
||||
<vscale x 2 x i32> %0,
|
||||
i64 %1)
|
||||
|
||||
|
@ -632,6 +662,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 4 x i64> @llvm.riscv.vzext.nxv4i64.nxv4i32(
|
||||
<vscale x 4 x i64>,
|
||||
<vscale x 4 x i32>,
|
||||
i64);
|
||||
|
||||
|
@ -644,6 +675,7 @@ define <vscale x 4 x i64> @intrinsic_vzext_vf2_nxv4i64(<vscale x 4 x i32> %0, i6
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 4 x i64> @llvm.riscv.vzext.nxv4i64.nxv4i32(
|
||||
<vscale x 4 x i64> undef,
|
||||
<vscale x 4 x i32> %0,
|
||||
i64 %1)
|
||||
|
||||
|
@ -674,6 +706,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 8 x i64> @llvm.riscv.vzext.nxv8i64.nxv8i32(
|
||||
<vscale x 8 x i64>,
|
||||
<vscale x 8 x i32>,
|
||||
i64);
|
||||
|
||||
|
@ -686,6 +719,7 @@ define <vscale x 8 x i64> @intrinsic_vzext_vf2_nxv8i64(<vscale x 8 x i32> %0, i6
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 8 x i64> @llvm.riscv.vzext.nxv8i64.nxv8i32(
|
||||
<vscale x 8 x i64> undef,
|
||||
<vscale x 8 x i32> %0,
|
||||
i64 %1)
|
||||
|
||||
|
@ -716,6 +750,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 1 x i32> @llvm.riscv.vzext.nxv1i32.nxv1i16(
|
||||
<vscale x 1 x i32>,
|
||||
<vscale x 1 x i16>,
|
||||
i64);
|
||||
|
||||
|
@ -728,6 +763,7 @@ define <vscale x 1 x i32> @intrinsic_vzext_vf2_nxv1i32(<vscale x 1 x i16> %0, i6
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 1 x i32> @llvm.riscv.vzext.nxv1i32.nxv1i16(
|
||||
<vscale x 1 x i32> undef,
|
||||
<vscale x 1 x i16> %0,
|
||||
i64 %1)
|
||||
|
||||
|
@ -758,6 +794,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 2 x i32> @llvm.riscv.vzext.nxv2i32.nxv2i16(
|
||||
<vscale x 2 x i32>,
|
||||
<vscale x 2 x i16>,
|
||||
i64);
|
||||
|
||||
|
@ -770,6 +807,7 @@ define <vscale x 2 x i32> @intrinsic_vzext_vf2_nxv2i32(<vscale x 2 x i16> %0, i6
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 2 x i32> @llvm.riscv.vzext.nxv2i32.nxv2i16(
|
||||
<vscale x 2 x i32> undef,
|
||||
<vscale x 2 x i16> %0,
|
||||
i64 %1)
|
||||
|
||||
|
@ -800,6 +838,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 4 x i32> @llvm.riscv.vzext.nxv4i32.nxv4i16(
|
||||
<vscale x 4 x i32>,
|
||||
<vscale x 4 x i16>,
|
||||
i64);
|
||||
|
||||
|
@ -812,6 +851,7 @@ define <vscale x 4 x i32> @intrinsic_vzext_vf2_nxv4i32(<vscale x 4 x i16> %0, i6
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 4 x i32> @llvm.riscv.vzext.nxv4i32.nxv4i16(
|
||||
<vscale x 4 x i32> undef,
|
||||
<vscale x 4 x i16> %0,
|
||||
i64 %1)
|
||||
|
||||
|
@ -842,6 +882,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 8 x i32> @llvm.riscv.vzext.nxv8i32.nxv8i16(
|
||||
<vscale x 8 x i32>,
|
||||
<vscale x 8 x i16>,
|
||||
i64);
|
||||
|
||||
|
@ -854,6 +895,7 @@ define <vscale x 8 x i32> @intrinsic_vzext_vf2_nxv8i32(<vscale x 8 x i16> %0, i6
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 8 x i32> @llvm.riscv.vzext.nxv8i32.nxv8i16(
|
||||
<vscale x 8 x i32> undef,
|
||||
<vscale x 8 x i16> %0,
|
||||
i64 %1)
|
||||
|
||||
|
@ -884,6 +926,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 16 x i32> @llvm.riscv.vzext.nxv16i32.nxv16i16(
|
||||
<vscale x 16 x i32>,
|
||||
<vscale x 16 x i16>,
|
||||
i64);
|
||||
|
||||
|
@ -896,6 +939,7 @@ define <vscale x 16 x i32> @intrinsic_vzext_vf2_nxv16i32(<vscale x 16 x i16> %0,
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 16 x i32> @llvm.riscv.vzext.nxv16i32.nxv16i16(
|
||||
<vscale x 16 x i32> undef,
|
||||
<vscale x 16 x i16> %0,
|
||||
i64 %1)
|
||||
|
||||
|
@ -926,6 +970,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 1 x i16> @llvm.riscv.vzext.nxv1i16.nxv1i8(
|
||||
<vscale x 1 x i16>,
|
||||
<vscale x 1 x i8>,
|
||||
i64);
|
||||
|
||||
|
@ -938,6 +983,7 @@ define <vscale x 1 x i16> @intrinsic_vzext_vf2_nxv1i16(<vscale x 1 x i8> %0, i64
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 1 x i16> @llvm.riscv.vzext.nxv1i16.nxv1i8(
|
||||
<vscale x 1 x i16> undef,
|
||||
<vscale x 1 x i8> %0,
|
||||
i64 %1)
|
||||
|
||||
|
@ -968,6 +1014,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 2 x i16> @llvm.riscv.vzext.nxv2i16.nxv2i8(
|
||||
<vscale x 2 x i16>,
|
||||
<vscale x 2 x i8>,
|
||||
i64);
|
||||
|
||||
|
@ -980,6 +1027,7 @@ define <vscale x 2 x i16> @intrinsic_vzext_vf2_nxv2i16(<vscale x 2 x i8> %0, i64
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 2 x i16> @llvm.riscv.vzext.nxv2i16.nxv2i8(
|
||||
<vscale x 2 x i16> undef,
|
||||
<vscale x 2 x i8> %0,
|
||||
i64 %1)
|
||||
|
||||
|
@ -1010,6 +1058,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 4 x i16> @llvm.riscv.vzext.nxv4i16.nxv4i8(
|
||||
<vscale x 4 x i16>,
|
||||
<vscale x 4 x i8>,
|
||||
i64);
|
||||
|
||||
|
@ -1022,6 +1071,7 @@ define <vscale x 4 x i16> @intrinsic_vzext_vf2_nxv4i16(<vscale x 4 x i8> %0, i64
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 4 x i16> @llvm.riscv.vzext.nxv4i16.nxv4i8(
|
||||
<vscale x 4 x i16> undef,
|
||||
<vscale x 4 x i8> %0,
|
||||
i64 %1)
|
||||
|
||||
|
@ -1052,6 +1102,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 8 x i16> @llvm.riscv.vzext.nxv8i16.nxv8i8(
|
||||
<vscale x 8 x i16>,
|
||||
<vscale x 8 x i8>,
|
||||
i64);
|
||||
|
||||
|
@ -1064,6 +1115,7 @@ define <vscale x 8 x i16> @intrinsic_vzext_vf2_nxv8i16(<vscale x 8 x i8> %0, i64
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 8 x i16> @llvm.riscv.vzext.nxv8i16.nxv8i8(
|
||||
<vscale x 8 x i16> undef,
|
||||
<vscale x 8 x i8> %0,
|
||||
i64 %1)
|
||||
|
||||
|
@ -1094,6 +1146,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 16 x i16> @llvm.riscv.vzext.nxv16i16.nxv16i8(
|
||||
<vscale x 16 x i16>,
|
||||
<vscale x 16 x i8>,
|
||||
i64);
|
||||
|
||||
|
@ -1106,6 +1159,7 @@ define <vscale x 16 x i16> @intrinsic_vzext_vf2_nxv16i16(<vscale x 16 x i8> %0,
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 16 x i16> @llvm.riscv.vzext.nxv16i16.nxv16i8(
|
||||
<vscale x 16 x i16> undef,
|
||||
<vscale x 16 x i8> %0,
|
||||
i64 %1)
|
||||
|
||||
|
@ -1136,6 +1190,7 @@ entry:
|
|||
}
|
||||
|
||||
declare <vscale x 32 x i16> @llvm.riscv.vzext.nxv32i16.nxv32i8(
|
||||
<vscale x 32 x i16>,
|
||||
<vscale x 32 x i8>,
|
||||
i64);
|
||||
|
||||
|
@ -1148,6 +1203,7 @@ define <vscale x 32 x i16> @intrinsic_vzext_vf2_nxv32i16(<vscale x 32 x i8> %0,
|
|||
; CHECK-NEXT: ret
|
||||
entry:
|
||||
%a = call <vscale x 32 x i16> @llvm.riscv.vzext.nxv32i16.nxv32i8(
|
||||
<vscale x 32 x i16> undef,
|
||||
<vscale x 32 x i8> %0,
|
||||
i64 %1)
|
||||
|
||||
|
|
Loading…
Reference in New Issue