From 62c94f06781dba1a3ec717c1486934de2e939052 Mon Sep 17 00:00:00 2001 From: Hsiangkai Wang Date: Sat, 19 Dec 2020 21:46:29 +0800 Subject: [PATCH] [RISCV] Define vector vfmul/vfdiv/vfrdiv intrinsics. Define vector vfmul/vfdiv/vfrdiv intrinsics and lower them to V instructions. We work with @rogfer01 from BSC to come out this patch. Authored-by: Roger Ferrer Ibanez Co-Authored-by: Hsiangkai Wang Differential Revision: https://reviews.llvm.org/D93580 --- llvm/include/llvm/IR/IntrinsicsRISCV.td | 4 + .../Target/RISCV/RISCVInstrInfoVPseudos.td | 14 + llvm/test/CodeGen/RISCV/rvv/vfdiv-rv32.ll | 881 ++++++++++++ llvm/test/CodeGen/RISCV/rvv/vfdiv-rv64.ll | 1201 +++++++++++++++++ llvm/test/CodeGen/RISCV/rvv/vfmul-rv32.ll | 881 ++++++++++++ llvm/test/CodeGen/RISCV/rvv/vfmul-rv64.ll | 1201 +++++++++++++++++ llvm/test/CodeGen/RISCV/rvv/vfrdiv-rv32.ll | 441 ++++++ llvm/test/CodeGen/RISCV/rvv/vfrdiv-rv64.ll | 601 +++++++++ 8 files changed, 5224 insertions(+) create mode 100644 llvm/test/CodeGen/RISCV/rvv/vfdiv-rv32.ll create mode 100644 llvm/test/CodeGen/RISCV/rvv/vfdiv-rv64.ll create mode 100644 llvm/test/CodeGen/RISCV/rvv/vfmul-rv32.ll create mode 100644 llvm/test/CodeGen/RISCV/rvv/vfmul-rv64.ll create mode 100644 llvm/test/CodeGen/RISCV/rvv/vfrdiv-rv32.ll create mode 100644 llvm/test/CodeGen/RISCV/rvv/vfrdiv-rv64.ll diff --git a/llvm/include/llvm/IR/IntrinsicsRISCV.td b/llvm/include/llvm/IR/IntrinsicsRISCV.td index c5f2dacb100a..497a712bcaf9 100644 --- a/llvm/include/llvm/IR/IntrinsicsRISCV.td +++ b/llvm/include/llvm/IR/IntrinsicsRISCV.td @@ -413,4 +413,8 @@ let TargetPrefix = "riscv" in { [LLVMMatchType<0>, LLVMVectorElementType<0>, llvm_anyint_ty], [IntrNoMem]>, RISCVVIntrinsic; + + defm vfmul : RISCVBinaryAAX; + defm vfdiv : RISCVBinaryAAX; + defm vfrdiv : RISCVBinaryAAX; } // TargetPrefix = "riscv" diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td index 8c5973a19e4b..f765e28585a8 100644 --- a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td @@ -1500,6 +1500,13 @@ defm PseudoVFADD : VPseudoBinaryV_VV_VX; defm PseudoVFSUB : VPseudoBinaryV_VV_VX; defm PseudoVFRSUB : VPseudoBinaryV_VX; +//===----------------------------------------------------------------------===// +// 14.4. Vector Single-Width Floating-Point Multiply/Divide Instructions +//===----------------------------------------------------------------------===// +defm PseudoVFMUL : VPseudoBinaryV_VV_VX; +defm PseudoVFDIV : VPseudoBinaryV_VV_VX; +defm PseudoVFRDIV : VPseudoBinaryV_VX; + } // Predicates = [HasStdExtV, HasStdExtF] //===----------------------------------------------------------------------===// @@ -1764,6 +1771,13 @@ defm "" : VPatBinaryV_VV_VX<"int_riscv_vfadd", "PseudoVFADD", AllFloatVectors>; defm "" : VPatBinaryV_VV_VX<"int_riscv_vfsub", "PseudoVFSUB", AllFloatVectors>; defm "" : VPatBinaryV_VX<"int_riscv_vfrsub", "PseudoVFRSUB", AllFloatVectors>; +//===----------------------------------------------------------------------===// +// 14.4. Vector Single-Width Floating-Point Multiply/Divide Instructions +//===----------------------------------------------------------------------===// +defm "" : VPatBinaryV_VV_VX<"int_riscv_vfmul", "PseudoVFMUL", AllFloatVectors>; +defm "" : VPatBinaryV_VV_VX<"int_riscv_vfdiv", "PseudoVFDIV", AllFloatVectors>; +defm "" : VPatBinaryV_VX<"int_riscv_vfrdiv", "PseudoVFRDIV", AllFloatVectors>; + } // Predicates = [HasStdExtV, HasStdExtF] //===----------------------------------------------------------------------===// diff --git a/llvm/test/CodeGen/RISCV/rvv/vfdiv-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfdiv-rv32.ll new file mode 100644 index 000000000000..3187dd573cba --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vfdiv-rv32.ll @@ -0,0 +1,881 @@ +; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+f,+experimental-zfh -verify-machineinstrs \ +; RUN: --riscv-no-aliases < %s | FileCheck %s +declare @llvm.riscv.vfdiv.nxv1f16( + , + , + i32); + +define @intrinsic_vfdiv_vv_nxv1f16_nxv1f16( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfdiv_vv_nxv1f16_nxv1f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vfdiv.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfdiv.nxv1f16( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vfdiv.mask.nxv1f16( + , + , + , + , + i32); + +define @intrinsic_vfdiv_mask_vv_nxv1f16_nxv1f16( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfdiv_mask_vv_nxv1f16_nxv1f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vfdiv.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfdiv.mask.nxv1f16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfdiv.nxv2f16( + , + , + i32); + +define @intrinsic_vfdiv_vv_nxv2f16_nxv2f16( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfdiv_vv_nxv2f16_nxv2f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vfdiv.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfdiv.nxv2f16( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vfdiv.mask.nxv2f16( + , + , + , + , + i32); + +define @intrinsic_vfdiv_mask_vv_nxv2f16_nxv2f16( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfdiv_mask_vv_nxv2f16_nxv2f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vfdiv.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfdiv.mask.nxv2f16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfdiv.nxv4f16( + , + , + i32); + +define @intrinsic_vfdiv_vv_nxv4f16_nxv4f16( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfdiv_vv_nxv4f16_nxv4f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vfdiv.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfdiv.nxv4f16( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vfdiv.mask.nxv4f16( + , + , + , + , + i32); + +define @intrinsic_vfdiv_mask_vv_nxv4f16_nxv4f16( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfdiv_mask_vv_nxv4f16_nxv4f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vfdiv.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfdiv.mask.nxv4f16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfdiv.nxv8f16( + , + , + i32); + +define @intrinsic_vfdiv_vv_nxv8f16_nxv8f16( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfdiv_vv_nxv8f16_nxv8f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vfdiv.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfdiv.nxv8f16( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vfdiv.mask.nxv8f16( + , + , + , + , + i32); + +define @intrinsic_vfdiv_mask_vv_nxv8f16_nxv8f16( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfdiv_mask_vv_nxv8f16_nxv8f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vfdiv.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfdiv.mask.nxv8f16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfdiv.nxv16f16( + , + , + i32); + +define @intrinsic_vfdiv_vv_nxv16f16_nxv16f16( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfdiv_vv_nxv16f16_nxv16f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vfdiv.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfdiv.nxv16f16( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vfdiv.mask.nxv16f16( + , + , + , + , + i32); + +define @intrinsic_vfdiv_mask_vv_nxv16f16_nxv16f16( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfdiv_mask_vv_nxv16f16_nxv16f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vfdiv.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfdiv.mask.nxv16f16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfdiv.nxv32f16( + , + , + i32); + +define @intrinsic_vfdiv_vv_nxv32f16_nxv32f16( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfdiv_vv_nxv32f16_nxv32f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vfdiv.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfdiv.nxv32f16( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vfdiv.mask.nxv32f16( + , + , + , + , + i32); + +define @intrinsic_vfdiv_mask_vv_nxv32f16_nxv32f16( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfdiv_mask_vv_nxv32f16_nxv32f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vfdiv.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfdiv.mask.nxv32f16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfdiv.nxv1f32( + , + , + i32); + +define @intrinsic_vfdiv_vv_nxv1f32_nxv1f32( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfdiv_vv_nxv1f32_nxv1f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vfdiv.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfdiv.nxv1f32( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vfdiv.mask.nxv1f32( + , + , + , + , + i32); + +define @intrinsic_vfdiv_mask_vv_nxv1f32_nxv1f32( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfdiv_mask_vv_nxv1f32_nxv1f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vfdiv.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfdiv.mask.nxv1f32( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfdiv.nxv2f32( + , + , + i32); + +define @intrinsic_vfdiv_vv_nxv2f32_nxv2f32( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfdiv_vv_nxv2f32_nxv2f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vfdiv.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfdiv.nxv2f32( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vfdiv.mask.nxv2f32( + , + , + , + , + i32); + +define @intrinsic_vfdiv_mask_vv_nxv2f32_nxv2f32( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfdiv_mask_vv_nxv2f32_nxv2f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vfdiv.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfdiv.mask.nxv2f32( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfdiv.nxv4f32( + , + , + i32); + +define @intrinsic_vfdiv_vv_nxv4f32_nxv4f32( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfdiv_vv_nxv4f32_nxv4f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vfdiv.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfdiv.nxv4f32( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vfdiv.mask.nxv4f32( + , + , + , + , + i32); + +define @intrinsic_vfdiv_mask_vv_nxv4f32_nxv4f32( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfdiv_mask_vv_nxv4f32_nxv4f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vfdiv.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfdiv.mask.nxv4f32( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfdiv.nxv8f32( + , + , + i32); + +define @intrinsic_vfdiv_vv_nxv8f32_nxv8f32( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfdiv_vv_nxv8f32_nxv8f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vfdiv.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfdiv.nxv8f32( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vfdiv.mask.nxv8f32( + , + , + , + , + i32); + +define @intrinsic_vfdiv_mask_vv_nxv8f32_nxv8f32( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfdiv_mask_vv_nxv8f32_nxv8f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vfdiv.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfdiv.mask.nxv8f32( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfdiv.nxv16f32( + , + , + i32); + +define @intrinsic_vfdiv_vv_nxv16f32_nxv16f32( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfdiv_vv_nxv16f32_nxv16f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vfdiv.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfdiv.nxv16f32( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vfdiv.mask.nxv16f32( + , + , + , + , + i32); + +define @intrinsic_vfdiv_mask_vv_nxv16f32_nxv16f32( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfdiv_mask_vv_nxv16f32_nxv16f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vfdiv.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfdiv.mask.nxv16f32( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfdiv.nxv1f16.f16( + , + half, + i32); + +define @intrinsic_vfdiv_vf_nxv1f16_f16( %0, half %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfdiv_vf_nxv1f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vfdiv.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} + %a = call @llvm.riscv.vfdiv.nxv1f16.f16( + %0, + half %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vfdiv.mask.nxv1f16.f16( + , + , + half, + , + i32); + +define @intrinsic_vfdiv_mask_vf_nxv1f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfdiv_mask_vf_nxv1f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vfdiv.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t + %a = call @llvm.riscv.vfdiv.mask.nxv1f16.f16( + %0, + %1, + half %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfdiv.nxv2f16.f16( + , + half, + i32); + +define @intrinsic_vfdiv_vf_nxv2f16_f16( %0, half %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfdiv_vf_nxv2f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vfdiv.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} + %a = call @llvm.riscv.vfdiv.nxv2f16.f16( + %0, + half %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vfdiv.mask.nxv2f16.f16( + , + , + half, + , + i32); + +define @intrinsic_vfdiv_mask_vf_nxv2f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfdiv_mask_vf_nxv2f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vfdiv.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t + %a = call @llvm.riscv.vfdiv.mask.nxv2f16.f16( + %0, + %1, + half %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfdiv.nxv4f16.f16( + , + half, + i32); + +define @intrinsic_vfdiv_vf_nxv4f16_f16( %0, half %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfdiv_vf_nxv4f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vfdiv.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} + %a = call @llvm.riscv.vfdiv.nxv4f16.f16( + %0, + half %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vfdiv.mask.nxv4f16.f16( + , + , + half, + , + i32); + +define @intrinsic_vfdiv_mask_vf_nxv4f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfdiv_mask_vf_nxv4f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vfdiv.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t + %a = call @llvm.riscv.vfdiv.mask.nxv4f16.f16( + %0, + %1, + half %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfdiv.nxv8f16.f16( + , + half, + i32); + +define @intrinsic_vfdiv_vf_nxv8f16_f16( %0, half %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfdiv_vf_nxv8f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vfdiv.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} + %a = call @llvm.riscv.vfdiv.nxv8f16.f16( + %0, + half %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vfdiv.mask.nxv8f16.f16( + , + , + half, + , + i32); + +define @intrinsic_vfdiv_mask_vf_nxv8f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfdiv_mask_vf_nxv8f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vfdiv.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t + %a = call @llvm.riscv.vfdiv.mask.nxv8f16.f16( + %0, + %1, + half %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfdiv.nxv16f16.f16( + , + half, + i32); + +define @intrinsic_vfdiv_vf_nxv16f16_f16( %0, half %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfdiv_vf_nxv16f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vfdiv.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} + %a = call @llvm.riscv.vfdiv.nxv16f16.f16( + %0, + half %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vfdiv.mask.nxv16f16.f16( + , + , + half, + , + i32); + +define @intrinsic_vfdiv_mask_vf_nxv16f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfdiv_mask_vf_nxv16f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vfdiv.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t + %a = call @llvm.riscv.vfdiv.mask.nxv16f16.f16( + %0, + %1, + half %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfdiv.nxv32f16.f16( + , + half, + i32); + +define @intrinsic_vfdiv_vf_nxv32f16_f16( %0, half %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfdiv_vf_nxv32f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vfdiv.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} + %a = call @llvm.riscv.vfdiv.nxv32f16.f16( + %0, + half %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vfdiv.mask.nxv32f16.f16( + , + , + half, + , + i32); + +define @intrinsic_vfdiv_mask_vf_nxv32f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfdiv_mask_vf_nxv32f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vfdiv.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t + %a = call @llvm.riscv.vfdiv.mask.nxv32f16.f16( + %0, + %1, + half %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfdiv.nxv1f32.f32( + , + float, + i32); + +define @intrinsic_vfdiv_vf_nxv1f32_f32( %0, float %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfdiv_vf_nxv1f32_f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vfdiv.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} + %a = call @llvm.riscv.vfdiv.nxv1f32.f32( + %0, + float %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vfdiv.mask.nxv1f32.f32( + , + , + float, + , + i32); + +define @intrinsic_vfdiv_mask_vf_nxv1f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfdiv_mask_vf_nxv1f32_f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vfdiv.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t + %a = call @llvm.riscv.vfdiv.mask.nxv1f32.f32( + %0, + %1, + float %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfdiv.nxv2f32.f32( + , + float, + i32); + +define @intrinsic_vfdiv_vf_nxv2f32_f32( %0, float %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfdiv_vf_nxv2f32_f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vfdiv.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} + %a = call @llvm.riscv.vfdiv.nxv2f32.f32( + %0, + float %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vfdiv.mask.nxv2f32.f32( + , + , + float, + , + i32); + +define @intrinsic_vfdiv_mask_vf_nxv2f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfdiv_mask_vf_nxv2f32_f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vfdiv.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t + %a = call @llvm.riscv.vfdiv.mask.nxv2f32.f32( + %0, + %1, + float %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfdiv.nxv4f32.f32( + , + float, + i32); + +define @intrinsic_vfdiv_vf_nxv4f32_f32( %0, float %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfdiv_vf_nxv4f32_f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vfdiv.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} + %a = call @llvm.riscv.vfdiv.nxv4f32.f32( + %0, + float %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vfdiv.mask.nxv4f32.f32( + , + , + float, + , + i32); + +define @intrinsic_vfdiv_mask_vf_nxv4f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfdiv_mask_vf_nxv4f32_f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vfdiv.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t + %a = call @llvm.riscv.vfdiv.mask.nxv4f32.f32( + %0, + %1, + float %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfdiv.nxv8f32.f32( + , + float, + i32); + +define @intrinsic_vfdiv_vf_nxv8f32_f32( %0, float %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfdiv_vf_nxv8f32_f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vfdiv.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} + %a = call @llvm.riscv.vfdiv.nxv8f32.f32( + %0, + float %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vfdiv.mask.nxv8f32.f32( + , + , + float, + , + i32); + +define @intrinsic_vfdiv_mask_vf_nxv8f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfdiv_mask_vf_nxv8f32_f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vfdiv.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t + %a = call @llvm.riscv.vfdiv.mask.nxv8f32.f32( + %0, + %1, + float %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfdiv.nxv16f32.f32( + , + float, + i32); + +define @intrinsic_vfdiv_vf_nxv16f32_f32( %0, float %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfdiv_vf_nxv16f32_f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vfdiv.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} + %a = call @llvm.riscv.vfdiv.nxv16f32.f32( + %0, + float %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vfdiv.mask.nxv16f32.f32( + , + , + float, + , + i32); + +define @intrinsic_vfdiv_mask_vf_nxv16f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfdiv_mask_vf_nxv16f32_f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vfdiv.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t + %a = call @llvm.riscv.vfdiv.mask.nxv16f32.f32( + %0, + %1, + float %2, + %3, + i32 %4) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vfdiv-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfdiv-rv64.ll new file mode 100644 index 000000000000..438098d3201d --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vfdiv-rv64.ll @@ -0,0 +1,1201 @@ +; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \ +; RUN: --riscv-no-aliases < %s | FileCheck %s +declare @llvm.riscv.vfdiv.nxv1f16( + , + , + i64); + +define @intrinsic_vfdiv_vv_nxv1f16_nxv1f16( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfdiv_vv_nxv1f16_nxv1f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vfdiv.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfdiv.nxv1f16( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfdiv.mask.nxv1f16( + , + , + , + , + i64); + +define @intrinsic_vfdiv_mask_vv_nxv1f16_nxv1f16( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfdiv_mask_vv_nxv1f16_nxv1f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vfdiv.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfdiv.mask.nxv1f16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfdiv.nxv2f16( + , + , + i64); + +define @intrinsic_vfdiv_vv_nxv2f16_nxv2f16( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfdiv_vv_nxv2f16_nxv2f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vfdiv.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfdiv.nxv2f16( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfdiv.mask.nxv2f16( + , + , + , + , + i64); + +define @intrinsic_vfdiv_mask_vv_nxv2f16_nxv2f16( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfdiv_mask_vv_nxv2f16_nxv2f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vfdiv.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfdiv.mask.nxv2f16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfdiv.nxv4f16( + , + , + i64); + +define @intrinsic_vfdiv_vv_nxv4f16_nxv4f16( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfdiv_vv_nxv4f16_nxv4f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vfdiv.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfdiv.nxv4f16( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfdiv.mask.nxv4f16( + , + , + , + , + i64); + +define @intrinsic_vfdiv_mask_vv_nxv4f16_nxv4f16( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfdiv_mask_vv_nxv4f16_nxv4f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vfdiv.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfdiv.mask.nxv4f16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfdiv.nxv8f16( + , + , + i64); + +define @intrinsic_vfdiv_vv_nxv8f16_nxv8f16( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfdiv_vv_nxv8f16_nxv8f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vfdiv.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfdiv.nxv8f16( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfdiv.mask.nxv8f16( + , + , + , + , + i64); + +define @intrinsic_vfdiv_mask_vv_nxv8f16_nxv8f16( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfdiv_mask_vv_nxv8f16_nxv8f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vfdiv.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfdiv.mask.nxv8f16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfdiv.nxv16f16( + , + , + i64); + +define @intrinsic_vfdiv_vv_nxv16f16_nxv16f16( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfdiv_vv_nxv16f16_nxv16f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vfdiv.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfdiv.nxv16f16( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfdiv.mask.nxv16f16( + , + , + , + , + i64); + +define @intrinsic_vfdiv_mask_vv_nxv16f16_nxv16f16( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfdiv_mask_vv_nxv16f16_nxv16f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vfdiv.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfdiv.mask.nxv16f16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfdiv.nxv32f16( + , + , + i64); + +define @intrinsic_vfdiv_vv_nxv32f16_nxv32f16( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfdiv_vv_nxv32f16_nxv32f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vfdiv.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfdiv.nxv32f16( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfdiv.mask.nxv32f16( + , + , + , + , + i64); + +define @intrinsic_vfdiv_mask_vv_nxv32f16_nxv32f16( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfdiv_mask_vv_nxv32f16_nxv32f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vfdiv.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfdiv.mask.nxv32f16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfdiv.nxv1f32( + , + , + i64); + +define @intrinsic_vfdiv_vv_nxv1f32_nxv1f32( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfdiv_vv_nxv1f32_nxv1f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vfdiv.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfdiv.nxv1f32( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfdiv.mask.nxv1f32( + , + , + , + , + i64); + +define @intrinsic_vfdiv_mask_vv_nxv1f32_nxv1f32( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfdiv_mask_vv_nxv1f32_nxv1f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vfdiv.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfdiv.mask.nxv1f32( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfdiv.nxv2f32( + , + , + i64); + +define @intrinsic_vfdiv_vv_nxv2f32_nxv2f32( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfdiv_vv_nxv2f32_nxv2f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vfdiv.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfdiv.nxv2f32( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfdiv.mask.nxv2f32( + , + , + , + , + i64); + +define @intrinsic_vfdiv_mask_vv_nxv2f32_nxv2f32( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfdiv_mask_vv_nxv2f32_nxv2f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vfdiv.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfdiv.mask.nxv2f32( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfdiv.nxv4f32( + , + , + i64); + +define @intrinsic_vfdiv_vv_nxv4f32_nxv4f32( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfdiv_vv_nxv4f32_nxv4f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vfdiv.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfdiv.nxv4f32( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfdiv.mask.nxv4f32( + , + , + , + , + i64); + +define @intrinsic_vfdiv_mask_vv_nxv4f32_nxv4f32( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfdiv_mask_vv_nxv4f32_nxv4f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vfdiv.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfdiv.mask.nxv4f32( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfdiv.nxv8f32( + , + , + i64); + +define @intrinsic_vfdiv_vv_nxv8f32_nxv8f32( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfdiv_vv_nxv8f32_nxv8f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vfdiv.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfdiv.nxv8f32( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfdiv.mask.nxv8f32( + , + , + , + , + i64); + +define @intrinsic_vfdiv_mask_vv_nxv8f32_nxv8f32( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfdiv_mask_vv_nxv8f32_nxv8f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vfdiv.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfdiv.mask.nxv8f32( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfdiv.nxv16f32( + , + , + i64); + +define @intrinsic_vfdiv_vv_nxv16f32_nxv16f32( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfdiv_vv_nxv16f32_nxv16f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vfdiv.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfdiv.nxv16f32( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfdiv.mask.nxv16f32( + , + , + , + , + i64); + +define @intrinsic_vfdiv_mask_vv_nxv16f32_nxv16f32( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfdiv_mask_vv_nxv16f32_nxv16f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vfdiv.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfdiv.mask.nxv16f32( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfdiv.nxv1f64( + , + , + i64); + +define @intrinsic_vfdiv_vv_nxv1f64_nxv1f64( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfdiv_vv_nxv1f64_nxv1f64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vfdiv.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfdiv.nxv1f64( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfdiv.mask.nxv1f64( + , + , + , + , + i64); + +define @intrinsic_vfdiv_mask_vv_nxv1f64_nxv1f64( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfdiv_mask_vv_nxv1f64_nxv1f64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vfdiv.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfdiv.mask.nxv1f64( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfdiv.nxv2f64( + , + , + i64); + +define @intrinsic_vfdiv_vv_nxv2f64_nxv2f64( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfdiv_vv_nxv2f64_nxv2f64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vfdiv.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfdiv.nxv2f64( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfdiv.mask.nxv2f64( + , + , + , + , + i64); + +define @intrinsic_vfdiv_mask_vv_nxv2f64_nxv2f64( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfdiv_mask_vv_nxv2f64_nxv2f64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vfdiv.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfdiv.mask.nxv2f64( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfdiv.nxv4f64( + , + , + i64); + +define @intrinsic_vfdiv_vv_nxv4f64_nxv4f64( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfdiv_vv_nxv4f64_nxv4f64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vfdiv.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfdiv.nxv4f64( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfdiv.mask.nxv4f64( + , + , + , + , + i64); + +define @intrinsic_vfdiv_mask_vv_nxv4f64_nxv4f64( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfdiv_mask_vv_nxv4f64_nxv4f64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vfdiv.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfdiv.mask.nxv4f64( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfdiv.nxv8f64( + , + , + i64); + +define @intrinsic_vfdiv_vv_nxv8f64_nxv8f64( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfdiv_vv_nxv8f64_nxv8f64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu +; CHECK: vfdiv.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfdiv.nxv8f64( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfdiv.mask.nxv8f64( + , + , + , + , + i64); + +define @intrinsic_vfdiv_mask_vv_nxv8f64_nxv8f64( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfdiv_mask_vv_nxv8f64_nxv8f64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu +; CHECK: vfdiv.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfdiv.mask.nxv8f64( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfdiv.nxv1f16.f16( + , + half, + i64); + +define @intrinsic_vfdiv_vf_nxv1f16_f16( %0, half %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfdiv_vf_nxv1f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vfdiv.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} + %a = call @llvm.riscv.vfdiv.nxv1f16.f16( + %0, + half %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfdiv.mask.nxv1f16.f16( + , + , + half, + , + i64); + +define @intrinsic_vfdiv_mask_vf_nxv1f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfdiv_mask_vf_nxv1f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vfdiv.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t + %a = call @llvm.riscv.vfdiv.mask.nxv1f16.f16( + %0, + %1, + half %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfdiv.nxv2f16.f16( + , + half, + i64); + +define @intrinsic_vfdiv_vf_nxv2f16_f16( %0, half %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfdiv_vf_nxv2f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vfdiv.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} + %a = call @llvm.riscv.vfdiv.nxv2f16.f16( + %0, + half %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfdiv.mask.nxv2f16.f16( + , + , + half, + , + i64); + +define @intrinsic_vfdiv_mask_vf_nxv2f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfdiv_mask_vf_nxv2f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vfdiv.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t + %a = call @llvm.riscv.vfdiv.mask.nxv2f16.f16( + %0, + %1, + half %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfdiv.nxv4f16.f16( + , + half, + i64); + +define @intrinsic_vfdiv_vf_nxv4f16_f16( %0, half %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfdiv_vf_nxv4f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vfdiv.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} + %a = call @llvm.riscv.vfdiv.nxv4f16.f16( + %0, + half %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfdiv.mask.nxv4f16.f16( + , + , + half, + , + i64); + +define @intrinsic_vfdiv_mask_vf_nxv4f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfdiv_mask_vf_nxv4f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vfdiv.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t + %a = call @llvm.riscv.vfdiv.mask.nxv4f16.f16( + %0, + %1, + half %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfdiv.nxv8f16.f16( + , + half, + i64); + +define @intrinsic_vfdiv_vf_nxv8f16_f16( %0, half %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfdiv_vf_nxv8f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vfdiv.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} + %a = call @llvm.riscv.vfdiv.nxv8f16.f16( + %0, + half %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfdiv.mask.nxv8f16.f16( + , + , + half, + , + i64); + +define @intrinsic_vfdiv_mask_vf_nxv8f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfdiv_mask_vf_nxv8f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vfdiv.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t + %a = call @llvm.riscv.vfdiv.mask.nxv8f16.f16( + %0, + %1, + half %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfdiv.nxv16f16.f16( + , + half, + i64); + +define @intrinsic_vfdiv_vf_nxv16f16_f16( %0, half %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfdiv_vf_nxv16f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vfdiv.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} + %a = call @llvm.riscv.vfdiv.nxv16f16.f16( + %0, + half %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfdiv.mask.nxv16f16.f16( + , + , + half, + , + i64); + +define @intrinsic_vfdiv_mask_vf_nxv16f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfdiv_mask_vf_nxv16f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vfdiv.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t + %a = call @llvm.riscv.vfdiv.mask.nxv16f16.f16( + %0, + %1, + half %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfdiv.nxv32f16.f16( + , + half, + i64); + +define @intrinsic_vfdiv_vf_nxv32f16_f16( %0, half %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfdiv_vf_nxv32f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vfdiv.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} + %a = call @llvm.riscv.vfdiv.nxv32f16.f16( + %0, + half %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfdiv.mask.nxv32f16.f16( + , + , + half, + , + i64); + +define @intrinsic_vfdiv_mask_vf_nxv32f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfdiv_mask_vf_nxv32f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vfdiv.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t + %a = call @llvm.riscv.vfdiv.mask.nxv32f16.f16( + %0, + %1, + half %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfdiv.nxv1f32.f32( + , + float, + i64); + +define @intrinsic_vfdiv_vf_nxv1f32_f32( %0, float %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfdiv_vf_nxv1f32_f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vfdiv.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} + %a = call @llvm.riscv.vfdiv.nxv1f32.f32( + %0, + float %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfdiv.mask.nxv1f32.f32( + , + , + float, + , + i64); + +define @intrinsic_vfdiv_mask_vf_nxv1f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfdiv_mask_vf_nxv1f32_f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vfdiv.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t + %a = call @llvm.riscv.vfdiv.mask.nxv1f32.f32( + %0, + %1, + float %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfdiv.nxv2f32.f32( + , + float, + i64); + +define @intrinsic_vfdiv_vf_nxv2f32_f32( %0, float %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfdiv_vf_nxv2f32_f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vfdiv.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} + %a = call @llvm.riscv.vfdiv.nxv2f32.f32( + %0, + float %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfdiv.mask.nxv2f32.f32( + , + , + float, + , + i64); + +define @intrinsic_vfdiv_mask_vf_nxv2f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfdiv_mask_vf_nxv2f32_f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vfdiv.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t + %a = call @llvm.riscv.vfdiv.mask.nxv2f32.f32( + %0, + %1, + float %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfdiv.nxv4f32.f32( + , + float, + i64); + +define @intrinsic_vfdiv_vf_nxv4f32_f32( %0, float %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfdiv_vf_nxv4f32_f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vfdiv.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} + %a = call @llvm.riscv.vfdiv.nxv4f32.f32( + %0, + float %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfdiv.mask.nxv4f32.f32( + , + , + float, + , + i64); + +define @intrinsic_vfdiv_mask_vf_nxv4f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfdiv_mask_vf_nxv4f32_f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vfdiv.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t + %a = call @llvm.riscv.vfdiv.mask.nxv4f32.f32( + %0, + %1, + float %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfdiv.nxv8f32.f32( + , + float, + i64); + +define @intrinsic_vfdiv_vf_nxv8f32_f32( %0, float %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfdiv_vf_nxv8f32_f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vfdiv.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} + %a = call @llvm.riscv.vfdiv.nxv8f32.f32( + %0, + float %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfdiv.mask.nxv8f32.f32( + , + , + float, + , + i64); + +define @intrinsic_vfdiv_mask_vf_nxv8f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfdiv_mask_vf_nxv8f32_f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vfdiv.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t + %a = call @llvm.riscv.vfdiv.mask.nxv8f32.f32( + %0, + %1, + float %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfdiv.nxv16f32.f32( + , + float, + i64); + +define @intrinsic_vfdiv_vf_nxv16f32_f32( %0, float %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfdiv_vf_nxv16f32_f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vfdiv.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} + %a = call @llvm.riscv.vfdiv.nxv16f32.f32( + %0, + float %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfdiv.mask.nxv16f32.f32( + , + , + float, + , + i64); + +define @intrinsic_vfdiv_mask_vf_nxv16f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfdiv_mask_vf_nxv16f32_f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vfdiv.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t + %a = call @llvm.riscv.vfdiv.mask.nxv16f32.f32( + %0, + %1, + float %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfdiv.nxv1f64.f64( + , + double, + i64); + +define @intrinsic_vfdiv_vf_nxv1f64_f64( %0, double %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfdiv_vf_nxv1f64_f64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vfdiv.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} + %a = call @llvm.riscv.vfdiv.nxv1f64.f64( + %0, + double %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfdiv.mask.nxv1f64.f64( + , + , + double, + , + i64); + +define @intrinsic_vfdiv_mask_vf_nxv1f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfdiv_mask_vf_nxv1f64_f64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vfdiv.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t + %a = call @llvm.riscv.vfdiv.mask.nxv1f64.f64( + %0, + %1, + double %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfdiv.nxv2f64.f64( + , + double, + i64); + +define @intrinsic_vfdiv_vf_nxv2f64_f64( %0, double %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfdiv_vf_nxv2f64_f64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vfdiv.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} + %a = call @llvm.riscv.vfdiv.nxv2f64.f64( + %0, + double %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfdiv.mask.nxv2f64.f64( + , + , + double, + , + i64); + +define @intrinsic_vfdiv_mask_vf_nxv2f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfdiv_mask_vf_nxv2f64_f64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vfdiv.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t + %a = call @llvm.riscv.vfdiv.mask.nxv2f64.f64( + %0, + %1, + double %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfdiv.nxv4f64.f64( + , + double, + i64); + +define @intrinsic_vfdiv_vf_nxv4f64_f64( %0, double %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfdiv_vf_nxv4f64_f64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vfdiv.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} + %a = call @llvm.riscv.vfdiv.nxv4f64.f64( + %0, + double %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfdiv.mask.nxv4f64.f64( + , + , + double, + , + i64); + +define @intrinsic_vfdiv_mask_vf_nxv4f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfdiv_mask_vf_nxv4f64_f64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vfdiv.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t + %a = call @llvm.riscv.vfdiv.mask.nxv4f64.f64( + %0, + %1, + double %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfdiv.nxv8f64.f64( + , + double, + i64); + +define @intrinsic_vfdiv_vf_nxv8f64_f64( %0, double %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfdiv_vf_nxv8f64_f64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu +; CHECK: vfdiv.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} + %a = call @llvm.riscv.vfdiv.nxv8f64.f64( + %0, + double %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfdiv.mask.nxv8f64.f64( + , + , + double, + , + i64); + +define @intrinsic_vfdiv_mask_vf_nxv8f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfdiv_mask_vf_nxv8f64_f64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu +; CHECK: vfdiv.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t + %a = call @llvm.riscv.vfdiv.mask.nxv8f64.f64( + %0, + %1, + double %2, + %3, + i64 %4) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmul-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfmul-rv32.ll new file mode 100644 index 000000000000..438bf82a4691 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vfmul-rv32.ll @@ -0,0 +1,881 @@ +; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+f,+experimental-zfh -verify-machineinstrs \ +; RUN: --riscv-no-aliases < %s | FileCheck %s +declare @llvm.riscv.vfmul.nxv1f16( + , + , + i32); + +define @intrinsic_vfmul_vv_nxv1f16_nxv1f16( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmul_vv_nxv1f16_nxv1f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vfmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfmul.nxv1f16( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vfmul.mask.nxv1f16( + , + , + , + , + i32); + +define @intrinsic_vfmul_mask_vv_nxv1f16_nxv1f16( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv1f16_nxv1f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vfmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfmul.mask.nxv1f16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfmul.nxv2f16( + , + , + i32); + +define @intrinsic_vfmul_vv_nxv2f16_nxv2f16( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmul_vv_nxv2f16_nxv2f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vfmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfmul.nxv2f16( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vfmul.mask.nxv2f16( + , + , + , + , + i32); + +define @intrinsic_vfmul_mask_vv_nxv2f16_nxv2f16( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv2f16_nxv2f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vfmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfmul.mask.nxv2f16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfmul.nxv4f16( + , + , + i32); + +define @intrinsic_vfmul_vv_nxv4f16_nxv4f16( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmul_vv_nxv4f16_nxv4f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vfmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfmul.nxv4f16( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vfmul.mask.nxv4f16( + , + , + , + , + i32); + +define @intrinsic_vfmul_mask_vv_nxv4f16_nxv4f16( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv4f16_nxv4f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vfmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfmul.mask.nxv4f16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfmul.nxv8f16( + , + , + i32); + +define @intrinsic_vfmul_vv_nxv8f16_nxv8f16( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmul_vv_nxv8f16_nxv8f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vfmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfmul.nxv8f16( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vfmul.mask.nxv8f16( + , + , + , + , + i32); + +define @intrinsic_vfmul_mask_vv_nxv8f16_nxv8f16( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv8f16_nxv8f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vfmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfmul.mask.nxv8f16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfmul.nxv16f16( + , + , + i32); + +define @intrinsic_vfmul_vv_nxv16f16_nxv16f16( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmul_vv_nxv16f16_nxv16f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vfmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfmul.nxv16f16( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vfmul.mask.nxv16f16( + , + , + , + , + i32); + +define @intrinsic_vfmul_mask_vv_nxv16f16_nxv16f16( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv16f16_nxv16f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vfmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfmul.mask.nxv16f16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfmul.nxv32f16( + , + , + i32); + +define @intrinsic_vfmul_vv_nxv32f16_nxv32f16( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmul_vv_nxv32f16_nxv32f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vfmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfmul.nxv32f16( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vfmul.mask.nxv32f16( + , + , + , + , + i32); + +define @intrinsic_vfmul_mask_vv_nxv32f16_nxv32f16( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv32f16_nxv32f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vfmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfmul.mask.nxv32f16( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfmul.nxv1f32( + , + , + i32); + +define @intrinsic_vfmul_vv_nxv1f32_nxv1f32( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmul_vv_nxv1f32_nxv1f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vfmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfmul.nxv1f32( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vfmul.mask.nxv1f32( + , + , + , + , + i32); + +define @intrinsic_vfmul_mask_vv_nxv1f32_nxv1f32( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv1f32_nxv1f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vfmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfmul.mask.nxv1f32( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfmul.nxv2f32( + , + , + i32); + +define @intrinsic_vfmul_vv_nxv2f32_nxv2f32( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmul_vv_nxv2f32_nxv2f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vfmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfmul.nxv2f32( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vfmul.mask.nxv2f32( + , + , + , + , + i32); + +define @intrinsic_vfmul_mask_vv_nxv2f32_nxv2f32( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv2f32_nxv2f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vfmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfmul.mask.nxv2f32( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfmul.nxv4f32( + , + , + i32); + +define @intrinsic_vfmul_vv_nxv4f32_nxv4f32( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmul_vv_nxv4f32_nxv4f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vfmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfmul.nxv4f32( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vfmul.mask.nxv4f32( + , + , + , + , + i32); + +define @intrinsic_vfmul_mask_vv_nxv4f32_nxv4f32( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv4f32_nxv4f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vfmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfmul.mask.nxv4f32( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfmul.nxv8f32( + , + , + i32); + +define @intrinsic_vfmul_vv_nxv8f32_nxv8f32( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmul_vv_nxv8f32_nxv8f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vfmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfmul.nxv8f32( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vfmul.mask.nxv8f32( + , + , + , + , + i32); + +define @intrinsic_vfmul_mask_vv_nxv8f32_nxv8f32( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv8f32_nxv8f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vfmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfmul.mask.nxv8f32( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfmul.nxv16f32( + , + , + i32); + +define @intrinsic_vfmul_vv_nxv16f32_nxv16f32( %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmul_vv_nxv16f32_nxv16f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vfmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfmul.nxv16f32( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vfmul.mask.nxv16f32( + , + , + , + , + i32); + +define @intrinsic_vfmul_mask_vv_nxv16f32_nxv16f32( %0, %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv16f32_nxv16f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vfmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfmul.mask.nxv16f32( + %0, + %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfmul.nxv1f16.f16( + , + half, + i32); + +define @intrinsic_vfmul_vf_nxv1f16_f16( %0, half %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmul_vf_nxv1f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vfmul.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} + %a = call @llvm.riscv.vfmul.nxv1f16.f16( + %0, + half %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vfmul.mask.nxv1f16.f16( + , + , + half, + , + i32); + +define @intrinsic_vfmul_mask_vf_nxv1f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv1f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vfmul.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t + %a = call @llvm.riscv.vfmul.mask.nxv1f16.f16( + %0, + %1, + half %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfmul.nxv2f16.f16( + , + half, + i32); + +define @intrinsic_vfmul_vf_nxv2f16_f16( %0, half %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmul_vf_nxv2f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vfmul.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} + %a = call @llvm.riscv.vfmul.nxv2f16.f16( + %0, + half %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vfmul.mask.nxv2f16.f16( + , + , + half, + , + i32); + +define @intrinsic_vfmul_mask_vf_nxv2f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv2f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vfmul.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t + %a = call @llvm.riscv.vfmul.mask.nxv2f16.f16( + %0, + %1, + half %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfmul.nxv4f16.f16( + , + half, + i32); + +define @intrinsic_vfmul_vf_nxv4f16_f16( %0, half %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmul_vf_nxv4f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vfmul.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} + %a = call @llvm.riscv.vfmul.nxv4f16.f16( + %0, + half %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vfmul.mask.nxv4f16.f16( + , + , + half, + , + i32); + +define @intrinsic_vfmul_mask_vf_nxv4f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv4f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vfmul.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t + %a = call @llvm.riscv.vfmul.mask.nxv4f16.f16( + %0, + %1, + half %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfmul.nxv8f16.f16( + , + half, + i32); + +define @intrinsic_vfmul_vf_nxv8f16_f16( %0, half %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmul_vf_nxv8f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vfmul.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} + %a = call @llvm.riscv.vfmul.nxv8f16.f16( + %0, + half %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vfmul.mask.nxv8f16.f16( + , + , + half, + , + i32); + +define @intrinsic_vfmul_mask_vf_nxv8f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv8f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vfmul.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t + %a = call @llvm.riscv.vfmul.mask.nxv8f16.f16( + %0, + %1, + half %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfmul.nxv16f16.f16( + , + half, + i32); + +define @intrinsic_vfmul_vf_nxv16f16_f16( %0, half %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmul_vf_nxv16f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vfmul.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} + %a = call @llvm.riscv.vfmul.nxv16f16.f16( + %0, + half %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vfmul.mask.nxv16f16.f16( + , + , + half, + , + i32); + +define @intrinsic_vfmul_mask_vf_nxv16f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv16f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vfmul.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t + %a = call @llvm.riscv.vfmul.mask.nxv16f16.f16( + %0, + %1, + half %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfmul.nxv32f16.f16( + , + half, + i32); + +define @intrinsic_vfmul_vf_nxv32f16_f16( %0, half %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmul_vf_nxv32f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vfmul.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} + %a = call @llvm.riscv.vfmul.nxv32f16.f16( + %0, + half %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vfmul.mask.nxv32f16.f16( + , + , + half, + , + i32); + +define @intrinsic_vfmul_mask_vf_nxv32f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv32f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vfmul.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t + %a = call @llvm.riscv.vfmul.mask.nxv32f16.f16( + %0, + %1, + half %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfmul.nxv1f32.f32( + , + float, + i32); + +define @intrinsic_vfmul_vf_nxv1f32_f32( %0, float %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmul_vf_nxv1f32_f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vfmul.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} + %a = call @llvm.riscv.vfmul.nxv1f32.f32( + %0, + float %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vfmul.mask.nxv1f32.f32( + , + , + float, + , + i32); + +define @intrinsic_vfmul_mask_vf_nxv1f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv1f32_f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vfmul.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t + %a = call @llvm.riscv.vfmul.mask.nxv1f32.f32( + %0, + %1, + float %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfmul.nxv2f32.f32( + , + float, + i32); + +define @intrinsic_vfmul_vf_nxv2f32_f32( %0, float %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmul_vf_nxv2f32_f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vfmul.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} + %a = call @llvm.riscv.vfmul.nxv2f32.f32( + %0, + float %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vfmul.mask.nxv2f32.f32( + , + , + float, + , + i32); + +define @intrinsic_vfmul_mask_vf_nxv2f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv2f32_f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vfmul.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t + %a = call @llvm.riscv.vfmul.mask.nxv2f32.f32( + %0, + %1, + float %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfmul.nxv4f32.f32( + , + float, + i32); + +define @intrinsic_vfmul_vf_nxv4f32_f32( %0, float %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmul_vf_nxv4f32_f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vfmul.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} + %a = call @llvm.riscv.vfmul.nxv4f32.f32( + %0, + float %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vfmul.mask.nxv4f32.f32( + , + , + float, + , + i32); + +define @intrinsic_vfmul_mask_vf_nxv4f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv4f32_f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vfmul.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t + %a = call @llvm.riscv.vfmul.mask.nxv4f32.f32( + %0, + %1, + float %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfmul.nxv8f32.f32( + , + float, + i32); + +define @intrinsic_vfmul_vf_nxv8f32_f32( %0, float %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmul_vf_nxv8f32_f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vfmul.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} + %a = call @llvm.riscv.vfmul.nxv8f32.f32( + %0, + float %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vfmul.mask.nxv8f32.f32( + , + , + float, + , + i32); + +define @intrinsic_vfmul_mask_vf_nxv8f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv8f32_f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vfmul.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t + %a = call @llvm.riscv.vfmul.mask.nxv8f32.f32( + %0, + %1, + float %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfmul.nxv16f32.f32( + , + float, + i32); + +define @intrinsic_vfmul_vf_nxv16f32_f32( %0, float %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmul_vf_nxv16f32_f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vfmul.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} + %a = call @llvm.riscv.vfmul.nxv16f32.f32( + %0, + float %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vfmul.mask.nxv16f32.f32( + , + , + float, + , + i32); + +define @intrinsic_vfmul_mask_vf_nxv16f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv16f32_f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vfmul.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t + %a = call @llvm.riscv.vfmul.mask.nxv16f32.f32( + %0, + %1, + float %2, + %3, + i32 %4) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmul-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfmul-rv64.ll new file mode 100644 index 000000000000..a0e3517c6149 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vfmul-rv64.ll @@ -0,0 +1,1201 @@ +; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \ +; RUN: --riscv-no-aliases < %s | FileCheck %s +declare @llvm.riscv.vfmul.nxv1f16( + , + , + i64); + +define @intrinsic_vfmul_vv_nxv1f16_nxv1f16( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmul_vv_nxv1f16_nxv1f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vfmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfmul.nxv1f16( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfmul.mask.nxv1f16( + , + , + , + , + i64); + +define @intrinsic_vfmul_mask_vv_nxv1f16_nxv1f16( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv1f16_nxv1f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vfmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfmul.mask.nxv1f16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfmul.nxv2f16( + , + , + i64); + +define @intrinsic_vfmul_vv_nxv2f16_nxv2f16( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmul_vv_nxv2f16_nxv2f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vfmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfmul.nxv2f16( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfmul.mask.nxv2f16( + , + , + , + , + i64); + +define @intrinsic_vfmul_mask_vv_nxv2f16_nxv2f16( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv2f16_nxv2f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vfmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfmul.mask.nxv2f16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfmul.nxv4f16( + , + , + i64); + +define @intrinsic_vfmul_vv_nxv4f16_nxv4f16( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmul_vv_nxv4f16_nxv4f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vfmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfmul.nxv4f16( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfmul.mask.nxv4f16( + , + , + , + , + i64); + +define @intrinsic_vfmul_mask_vv_nxv4f16_nxv4f16( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv4f16_nxv4f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vfmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfmul.mask.nxv4f16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfmul.nxv8f16( + , + , + i64); + +define @intrinsic_vfmul_vv_nxv8f16_nxv8f16( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmul_vv_nxv8f16_nxv8f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vfmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfmul.nxv8f16( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfmul.mask.nxv8f16( + , + , + , + , + i64); + +define @intrinsic_vfmul_mask_vv_nxv8f16_nxv8f16( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv8f16_nxv8f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vfmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfmul.mask.nxv8f16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfmul.nxv16f16( + , + , + i64); + +define @intrinsic_vfmul_vv_nxv16f16_nxv16f16( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmul_vv_nxv16f16_nxv16f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vfmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfmul.nxv16f16( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfmul.mask.nxv16f16( + , + , + , + , + i64); + +define @intrinsic_vfmul_mask_vv_nxv16f16_nxv16f16( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv16f16_nxv16f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vfmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfmul.mask.nxv16f16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfmul.nxv32f16( + , + , + i64); + +define @intrinsic_vfmul_vv_nxv32f16_nxv32f16( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmul_vv_nxv32f16_nxv32f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vfmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfmul.nxv32f16( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfmul.mask.nxv32f16( + , + , + , + , + i64); + +define @intrinsic_vfmul_mask_vv_nxv32f16_nxv32f16( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv32f16_nxv32f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vfmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfmul.mask.nxv32f16( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfmul.nxv1f32( + , + , + i64); + +define @intrinsic_vfmul_vv_nxv1f32_nxv1f32( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmul_vv_nxv1f32_nxv1f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vfmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfmul.nxv1f32( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfmul.mask.nxv1f32( + , + , + , + , + i64); + +define @intrinsic_vfmul_mask_vv_nxv1f32_nxv1f32( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv1f32_nxv1f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vfmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfmul.mask.nxv1f32( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfmul.nxv2f32( + , + , + i64); + +define @intrinsic_vfmul_vv_nxv2f32_nxv2f32( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmul_vv_nxv2f32_nxv2f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vfmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfmul.nxv2f32( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfmul.mask.nxv2f32( + , + , + , + , + i64); + +define @intrinsic_vfmul_mask_vv_nxv2f32_nxv2f32( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv2f32_nxv2f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vfmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfmul.mask.nxv2f32( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfmul.nxv4f32( + , + , + i64); + +define @intrinsic_vfmul_vv_nxv4f32_nxv4f32( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmul_vv_nxv4f32_nxv4f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vfmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfmul.nxv4f32( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfmul.mask.nxv4f32( + , + , + , + , + i64); + +define @intrinsic_vfmul_mask_vv_nxv4f32_nxv4f32( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv4f32_nxv4f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vfmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfmul.mask.nxv4f32( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfmul.nxv8f32( + , + , + i64); + +define @intrinsic_vfmul_vv_nxv8f32_nxv8f32( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmul_vv_nxv8f32_nxv8f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vfmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfmul.nxv8f32( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfmul.mask.nxv8f32( + , + , + , + , + i64); + +define @intrinsic_vfmul_mask_vv_nxv8f32_nxv8f32( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv8f32_nxv8f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vfmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfmul.mask.nxv8f32( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfmul.nxv16f32( + , + , + i64); + +define @intrinsic_vfmul_vv_nxv16f32_nxv16f32( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmul_vv_nxv16f32_nxv16f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vfmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfmul.nxv16f32( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfmul.mask.nxv16f32( + , + , + , + , + i64); + +define @intrinsic_vfmul_mask_vv_nxv16f32_nxv16f32( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv16f32_nxv16f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vfmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfmul.mask.nxv16f32( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfmul.nxv1f64( + , + , + i64); + +define @intrinsic_vfmul_vv_nxv1f64_nxv1f64( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmul_vv_nxv1f64_nxv1f64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vfmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfmul.nxv1f64( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfmul.mask.nxv1f64( + , + , + , + , + i64); + +define @intrinsic_vfmul_mask_vv_nxv1f64_nxv1f64( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv1f64_nxv1f64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vfmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfmul.mask.nxv1f64( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfmul.nxv2f64( + , + , + i64); + +define @intrinsic_vfmul_vv_nxv2f64_nxv2f64( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmul_vv_nxv2f64_nxv2f64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vfmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfmul.nxv2f64( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfmul.mask.nxv2f64( + , + , + , + , + i64); + +define @intrinsic_vfmul_mask_vv_nxv2f64_nxv2f64( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv2f64_nxv2f64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vfmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfmul.mask.nxv2f64( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfmul.nxv4f64( + , + , + i64); + +define @intrinsic_vfmul_vv_nxv4f64_nxv4f64( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmul_vv_nxv4f64_nxv4f64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vfmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfmul.nxv4f64( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfmul.mask.nxv4f64( + , + , + , + , + i64); + +define @intrinsic_vfmul_mask_vv_nxv4f64_nxv4f64( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv4f64_nxv4f64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vfmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfmul.mask.nxv4f64( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfmul.nxv8f64( + , + , + i64); + +define @intrinsic_vfmul_vv_nxv8f64_nxv8f64( %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmul_vv_nxv8f64_nxv8f64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu +; CHECK: vfmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} + %a = call @llvm.riscv.vfmul.nxv8f64( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfmul.mask.nxv8f64( + , + , + , + , + i64); + +define @intrinsic_vfmul_mask_vv_nxv8f64_nxv8f64( %0, %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv8f64_nxv8f64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu +; CHECK: vfmul.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vfmul.mask.nxv8f64( + %0, + %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfmul.nxv1f16.f16( + , + half, + i64); + +define @intrinsic_vfmul_vf_nxv1f16_f16( %0, half %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmul_vf_nxv1f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vfmul.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} + %a = call @llvm.riscv.vfmul.nxv1f16.f16( + %0, + half %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfmul.mask.nxv1f16.f16( + , + , + half, + , + i64); + +define @intrinsic_vfmul_mask_vf_nxv1f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv1f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vfmul.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t + %a = call @llvm.riscv.vfmul.mask.nxv1f16.f16( + %0, + %1, + half %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfmul.nxv2f16.f16( + , + half, + i64); + +define @intrinsic_vfmul_vf_nxv2f16_f16( %0, half %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmul_vf_nxv2f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vfmul.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} + %a = call @llvm.riscv.vfmul.nxv2f16.f16( + %0, + half %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfmul.mask.nxv2f16.f16( + , + , + half, + , + i64); + +define @intrinsic_vfmul_mask_vf_nxv2f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv2f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vfmul.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t + %a = call @llvm.riscv.vfmul.mask.nxv2f16.f16( + %0, + %1, + half %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfmul.nxv4f16.f16( + , + half, + i64); + +define @intrinsic_vfmul_vf_nxv4f16_f16( %0, half %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmul_vf_nxv4f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vfmul.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} + %a = call @llvm.riscv.vfmul.nxv4f16.f16( + %0, + half %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfmul.mask.nxv4f16.f16( + , + , + half, + , + i64); + +define @intrinsic_vfmul_mask_vf_nxv4f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv4f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vfmul.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t + %a = call @llvm.riscv.vfmul.mask.nxv4f16.f16( + %0, + %1, + half %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfmul.nxv8f16.f16( + , + half, + i64); + +define @intrinsic_vfmul_vf_nxv8f16_f16( %0, half %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmul_vf_nxv8f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vfmul.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} + %a = call @llvm.riscv.vfmul.nxv8f16.f16( + %0, + half %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfmul.mask.nxv8f16.f16( + , + , + half, + , + i64); + +define @intrinsic_vfmul_mask_vf_nxv8f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv8f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vfmul.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t + %a = call @llvm.riscv.vfmul.mask.nxv8f16.f16( + %0, + %1, + half %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfmul.nxv16f16.f16( + , + half, + i64); + +define @intrinsic_vfmul_vf_nxv16f16_f16( %0, half %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmul_vf_nxv16f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vfmul.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} + %a = call @llvm.riscv.vfmul.nxv16f16.f16( + %0, + half %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfmul.mask.nxv16f16.f16( + , + , + half, + , + i64); + +define @intrinsic_vfmul_mask_vf_nxv16f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv16f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vfmul.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t + %a = call @llvm.riscv.vfmul.mask.nxv16f16.f16( + %0, + %1, + half %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfmul.nxv32f16.f16( + , + half, + i64); + +define @intrinsic_vfmul_vf_nxv32f16_f16( %0, half %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmul_vf_nxv32f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vfmul.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} + %a = call @llvm.riscv.vfmul.nxv32f16.f16( + %0, + half %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfmul.mask.nxv32f16.f16( + , + , + half, + , + i64); + +define @intrinsic_vfmul_mask_vf_nxv32f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv32f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vfmul.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t + %a = call @llvm.riscv.vfmul.mask.nxv32f16.f16( + %0, + %1, + half %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfmul.nxv1f32.f32( + , + float, + i64); + +define @intrinsic_vfmul_vf_nxv1f32_f32( %0, float %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmul_vf_nxv1f32_f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vfmul.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} + %a = call @llvm.riscv.vfmul.nxv1f32.f32( + %0, + float %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfmul.mask.nxv1f32.f32( + , + , + float, + , + i64); + +define @intrinsic_vfmul_mask_vf_nxv1f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv1f32_f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vfmul.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t + %a = call @llvm.riscv.vfmul.mask.nxv1f32.f32( + %0, + %1, + float %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfmul.nxv2f32.f32( + , + float, + i64); + +define @intrinsic_vfmul_vf_nxv2f32_f32( %0, float %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmul_vf_nxv2f32_f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vfmul.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} + %a = call @llvm.riscv.vfmul.nxv2f32.f32( + %0, + float %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfmul.mask.nxv2f32.f32( + , + , + float, + , + i64); + +define @intrinsic_vfmul_mask_vf_nxv2f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv2f32_f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vfmul.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t + %a = call @llvm.riscv.vfmul.mask.nxv2f32.f32( + %0, + %1, + float %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfmul.nxv4f32.f32( + , + float, + i64); + +define @intrinsic_vfmul_vf_nxv4f32_f32( %0, float %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmul_vf_nxv4f32_f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vfmul.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} + %a = call @llvm.riscv.vfmul.nxv4f32.f32( + %0, + float %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfmul.mask.nxv4f32.f32( + , + , + float, + , + i64); + +define @intrinsic_vfmul_mask_vf_nxv4f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv4f32_f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vfmul.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t + %a = call @llvm.riscv.vfmul.mask.nxv4f32.f32( + %0, + %1, + float %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfmul.nxv8f32.f32( + , + float, + i64); + +define @intrinsic_vfmul_vf_nxv8f32_f32( %0, float %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmul_vf_nxv8f32_f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vfmul.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} + %a = call @llvm.riscv.vfmul.nxv8f32.f32( + %0, + float %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfmul.mask.nxv8f32.f32( + , + , + float, + , + i64); + +define @intrinsic_vfmul_mask_vf_nxv8f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv8f32_f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vfmul.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t + %a = call @llvm.riscv.vfmul.mask.nxv8f32.f32( + %0, + %1, + float %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfmul.nxv16f32.f32( + , + float, + i64); + +define @intrinsic_vfmul_vf_nxv16f32_f32( %0, float %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmul_vf_nxv16f32_f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vfmul.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} + %a = call @llvm.riscv.vfmul.nxv16f32.f32( + %0, + float %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfmul.mask.nxv16f32.f32( + , + , + float, + , + i64); + +define @intrinsic_vfmul_mask_vf_nxv16f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv16f32_f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vfmul.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t + %a = call @llvm.riscv.vfmul.mask.nxv16f32.f32( + %0, + %1, + float %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfmul.nxv1f64.f64( + , + double, + i64); + +define @intrinsic_vfmul_vf_nxv1f64_f64( %0, double %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmul_vf_nxv1f64_f64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vfmul.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} + %a = call @llvm.riscv.vfmul.nxv1f64.f64( + %0, + double %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfmul.mask.nxv1f64.f64( + , + , + double, + , + i64); + +define @intrinsic_vfmul_mask_vf_nxv1f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv1f64_f64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vfmul.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t + %a = call @llvm.riscv.vfmul.mask.nxv1f64.f64( + %0, + %1, + double %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfmul.nxv2f64.f64( + , + double, + i64); + +define @intrinsic_vfmul_vf_nxv2f64_f64( %0, double %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmul_vf_nxv2f64_f64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vfmul.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} + %a = call @llvm.riscv.vfmul.nxv2f64.f64( + %0, + double %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfmul.mask.nxv2f64.f64( + , + , + double, + , + i64); + +define @intrinsic_vfmul_mask_vf_nxv2f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv2f64_f64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vfmul.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t + %a = call @llvm.riscv.vfmul.mask.nxv2f64.f64( + %0, + %1, + double %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfmul.nxv4f64.f64( + , + double, + i64); + +define @intrinsic_vfmul_vf_nxv4f64_f64( %0, double %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmul_vf_nxv4f64_f64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vfmul.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} + %a = call @llvm.riscv.vfmul.nxv4f64.f64( + %0, + double %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfmul.mask.nxv4f64.f64( + , + , + double, + , + i64); + +define @intrinsic_vfmul_mask_vf_nxv4f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv4f64_f64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vfmul.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t + %a = call @llvm.riscv.vfmul.mask.nxv4f64.f64( + %0, + %1, + double %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfmul.nxv8f64.f64( + , + double, + i64); + +define @intrinsic_vfmul_vf_nxv8f64_f64( %0, double %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmul_vf_nxv8f64_f64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu +; CHECK: vfmul.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} + %a = call @llvm.riscv.vfmul.nxv8f64.f64( + %0, + double %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfmul.mask.nxv8f64.f64( + , + , + double, + , + i64); + +define @intrinsic_vfmul_mask_vf_nxv8f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfmul_mask_vf_nxv8f64_f64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu +; CHECK: vfmul.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t + %a = call @llvm.riscv.vfmul.mask.nxv8f64.f64( + %0, + %1, + double %2, + %3, + i64 %4) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vfrdiv-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vfrdiv-rv32.ll new file mode 100644 index 000000000000..b08d0c4bd918 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vfrdiv-rv32.ll @@ -0,0 +1,441 @@ +; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+f,+experimental-zfh -verify-machineinstrs \ +; RUN: --riscv-no-aliases < %s | FileCheck %s +declare @llvm.riscv.vfrdiv.nxv1f16.f16( + , + half, + i32); + +define @intrinsic_vfrdiv_vf_nxv1f16_f16( %0, half %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfrdiv_vf_nxv1f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vfrdiv.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} + %a = call @llvm.riscv.vfrdiv.nxv1f16.f16( + %0, + half %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vfrdiv.mask.nxv1f16.f16( + , + , + half, + , + i32); + +define @intrinsic_vfrdiv_mask_vf_nxv1f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv1f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vfrdiv.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t + %a = call @llvm.riscv.vfrdiv.mask.nxv1f16.f16( + %0, + %1, + half %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfrdiv.nxv2f16.f16( + , + half, + i32); + +define @intrinsic_vfrdiv_vf_nxv2f16_f16( %0, half %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfrdiv_vf_nxv2f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vfrdiv.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} + %a = call @llvm.riscv.vfrdiv.nxv2f16.f16( + %0, + half %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vfrdiv.mask.nxv2f16.f16( + , + , + half, + , + i32); + +define @intrinsic_vfrdiv_mask_vf_nxv2f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv2f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vfrdiv.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t + %a = call @llvm.riscv.vfrdiv.mask.nxv2f16.f16( + %0, + %1, + half %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfrdiv.nxv4f16.f16( + , + half, + i32); + +define @intrinsic_vfrdiv_vf_nxv4f16_f16( %0, half %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfrdiv_vf_nxv4f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vfrdiv.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} + %a = call @llvm.riscv.vfrdiv.nxv4f16.f16( + %0, + half %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vfrdiv.mask.nxv4f16.f16( + , + , + half, + , + i32); + +define @intrinsic_vfrdiv_mask_vf_nxv4f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv4f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vfrdiv.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t + %a = call @llvm.riscv.vfrdiv.mask.nxv4f16.f16( + %0, + %1, + half %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfrdiv.nxv8f16.f16( + , + half, + i32); + +define @intrinsic_vfrdiv_vf_nxv8f16_f16( %0, half %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfrdiv_vf_nxv8f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vfrdiv.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} + %a = call @llvm.riscv.vfrdiv.nxv8f16.f16( + %0, + half %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vfrdiv.mask.nxv8f16.f16( + , + , + half, + , + i32); + +define @intrinsic_vfrdiv_mask_vf_nxv8f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv8f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vfrdiv.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t + %a = call @llvm.riscv.vfrdiv.mask.nxv8f16.f16( + %0, + %1, + half %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfrdiv.nxv16f16.f16( + , + half, + i32); + +define @intrinsic_vfrdiv_vf_nxv16f16_f16( %0, half %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfrdiv_vf_nxv16f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vfrdiv.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} + %a = call @llvm.riscv.vfrdiv.nxv16f16.f16( + %0, + half %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vfrdiv.mask.nxv16f16.f16( + , + , + half, + , + i32); + +define @intrinsic_vfrdiv_mask_vf_nxv16f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv16f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vfrdiv.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t + %a = call @llvm.riscv.vfrdiv.mask.nxv16f16.f16( + %0, + %1, + half %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfrdiv.nxv32f16.f16( + , + half, + i32); + +define @intrinsic_vfrdiv_vf_nxv32f16_f16( %0, half %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfrdiv_vf_nxv32f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vfrdiv.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} + %a = call @llvm.riscv.vfrdiv.nxv32f16.f16( + %0, + half %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vfrdiv.mask.nxv32f16.f16( + , + , + half, + , + i32); + +define @intrinsic_vfrdiv_mask_vf_nxv32f16_f16( %0, %1, half %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv32f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vfrdiv.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t + %a = call @llvm.riscv.vfrdiv.mask.nxv32f16.f16( + %0, + %1, + half %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfrdiv.nxv1f32.f32( + , + float, + i32); + +define @intrinsic_vfrdiv_vf_nxv1f32_f32( %0, float %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfrdiv_vf_nxv1f32_f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vfrdiv.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} + %a = call @llvm.riscv.vfrdiv.nxv1f32.f32( + %0, + float %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vfrdiv.mask.nxv1f32.f32( + , + , + float, + , + i32); + +define @intrinsic_vfrdiv_mask_vf_nxv1f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv1f32_f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vfrdiv.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t + %a = call @llvm.riscv.vfrdiv.mask.nxv1f32.f32( + %0, + %1, + float %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfrdiv.nxv2f32.f32( + , + float, + i32); + +define @intrinsic_vfrdiv_vf_nxv2f32_f32( %0, float %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfrdiv_vf_nxv2f32_f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vfrdiv.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} + %a = call @llvm.riscv.vfrdiv.nxv2f32.f32( + %0, + float %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vfrdiv.mask.nxv2f32.f32( + , + , + float, + , + i32); + +define @intrinsic_vfrdiv_mask_vf_nxv2f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv2f32_f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vfrdiv.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t + %a = call @llvm.riscv.vfrdiv.mask.nxv2f32.f32( + %0, + %1, + float %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfrdiv.nxv4f32.f32( + , + float, + i32); + +define @intrinsic_vfrdiv_vf_nxv4f32_f32( %0, float %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfrdiv_vf_nxv4f32_f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vfrdiv.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} + %a = call @llvm.riscv.vfrdiv.nxv4f32.f32( + %0, + float %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vfrdiv.mask.nxv4f32.f32( + , + , + float, + , + i32); + +define @intrinsic_vfrdiv_mask_vf_nxv4f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv4f32_f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vfrdiv.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t + %a = call @llvm.riscv.vfrdiv.mask.nxv4f32.f32( + %0, + %1, + float %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfrdiv.nxv8f32.f32( + , + float, + i32); + +define @intrinsic_vfrdiv_vf_nxv8f32_f32( %0, float %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfrdiv_vf_nxv8f32_f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vfrdiv.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} + %a = call @llvm.riscv.vfrdiv.nxv8f32.f32( + %0, + float %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vfrdiv.mask.nxv8f32.f32( + , + , + float, + , + i32); + +define @intrinsic_vfrdiv_mask_vf_nxv8f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv8f32_f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vfrdiv.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t + %a = call @llvm.riscv.vfrdiv.mask.nxv8f32.f32( + %0, + %1, + float %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vfrdiv.nxv16f32.f32( + , + float, + i32); + +define @intrinsic_vfrdiv_vf_nxv16f32_f32( %0, float %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfrdiv_vf_nxv16f32_f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vfrdiv.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} + %a = call @llvm.riscv.vfrdiv.nxv16f32.f32( + %0, + float %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vfrdiv.mask.nxv16f32.f32( + , + , + float, + , + i32); + +define @intrinsic_vfrdiv_mask_vf_nxv16f32_f32( %0, %1, float %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv16f32_f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vfrdiv.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t + %a = call @llvm.riscv.vfrdiv.mask.nxv16f32.f32( + %0, + %1, + float %2, + %3, + i32 %4) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vfrdiv-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vfrdiv-rv64.ll new file mode 100644 index 000000000000..176bd9061b06 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vfrdiv-rv64.ll @@ -0,0 +1,601 @@ +; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \ +; RUN: --riscv-no-aliases < %s | FileCheck %s +declare @llvm.riscv.vfrdiv.nxv1f16.f16( + , + half, + i64); + +define @intrinsic_vfrdiv_vf_nxv1f16_f16( %0, half %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfrdiv_vf_nxv1f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vfrdiv.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} + %a = call @llvm.riscv.vfrdiv.nxv1f16.f16( + %0, + half %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfrdiv.mask.nxv1f16.f16( + , + , + half, + , + i64); + +define @intrinsic_vfrdiv_mask_vf_nxv1f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv1f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vfrdiv.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t + %a = call @llvm.riscv.vfrdiv.mask.nxv1f16.f16( + %0, + %1, + half %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfrdiv.nxv2f16.f16( + , + half, + i64); + +define @intrinsic_vfrdiv_vf_nxv2f16_f16( %0, half %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfrdiv_vf_nxv2f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vfrdiv.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} + %a = call @llvm.riscv.vfrdiv.nxv2f16.f16( + %0, + half %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfrdiv.mask.nxv2f16.f16( + , + , + half, + , + i64); + +define @intrinsic_vfrdiv_mask_vf_nxv2f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv2f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vfrdiv.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t + %a = call @llvm.riscv.vfrdiv.mask.nxv2f16.f16( + %0, + %1, + half %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfrdiv.nxv4f16.f16( + , + half, + i64); + +define @intrinsic_vfrdiv_vf_nxv4f16_f16( %0, half %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfrdiv_vf_nxv4f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vfrdiv.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} + %a = call @llvm.riscv.vfrdiv.nxv4f16.f16( + %0, + half %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfrdiv.mask.nxv4f16.f16( + , + , + half, + , + i64); + +define @intrinsic_vfrdiv_mask_vf_nxv4f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv4f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vfrdiv.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t + %a = call @llvm.riscv.vfrdiv.mask.nxv4f16.f16( + %0, + %1, + half %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfrdiv.nxv8f16.f16( + , + half, + i64); + +define @intrinsic_vfrdiv_vf_nxv8f16_f16( %0, half %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfrdiv_vf_nxv8f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vfrdiv.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} + %a = call @llvm.riscv.vfrdiv.nxv8f16.f16( + %0, + half %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfrdiv.mask.nxv8f16.f16( + , + , + half, + , + i64); + +define @intrinsic_vfrdiv_mask_vf_nxv8f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv8f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vfrdiv.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t + %a = call @llvm.riscv.vfrdiv.mask.nxv8f16.f16( + %0, + %1, + half %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfrdiv.nxv16f16.f16( + , + half, + i64); + +define @intrinsic_vfrdiv_vf_nxv16f16_f16( %0, half %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfrdiv_vf_nxv16f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vfrdiv.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} + %a = call @llvm.riscv.vfrdiv.nxv16f16.f16( + %0, + half %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfrdiv.mask.nxv16f16.f16( + , + , + half, + , + i64); + +define @intrinsic_vfrdiv_mask_vf_nxv16f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv16f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vfrdiv.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t + %a = call @llvm.riscv.vfrdiv.mask.nxv16f16.f16( + %0, + %1, + half %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfrdiv.nxv32f16.f16( + , + half, + i64); + +define @intrinsic_vfrdiv_vf_nxv32f16_f16( %0, half %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfrdiv_vf_nxv32f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vfrdiv.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} + %a = call @llvm.riscv.vfrdiv.nxv32f16.f16( + %0, + half %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfrdiv.mask.nxv32f16.f16( + , + , + half, + , + i64); + +define @intrinsic_vfrdiv_mask_vf_nxv32f16_f16( %0, %1, half %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv32f16_f16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vfrdiv.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t + %a = call @llvm.riscv.vfrdiv.mask.nxv32f16.f16( + %0, + %1, + half %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfrdiv.nxv1f32.f32( + , + float, + i64); + +define @intrinsic_vfrdiv_vf_nxv1f32_f32( %0, float %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfrdiv_vf_nxv1f32_f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vfrdiv.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} + %a = call @llvm.riscv.vfrdiv.nxv1f32.f32( + %0, + float %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfrdiv.mask.nxv1f32.f32( + , + , + float, + , + i64); + +define @intrinsic_vfrdiv_mask_vf_nxv1f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv1f32_f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vfrdiv.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t + %a = call @llvm.riscv.vfrdiv.mask.nxv1f32.f32( + %0, + %1, + float %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfrdiv.nxv2f32.f32( + , + float, + i64); + +define @intrinsic_vfrdiv_vf_nxv2f32_f32( %0, float %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfrdiv_vf_nxv2f32_f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vfrdiv.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} + %a = call @llvm.riscv.vfrdiv.nxv2f32.f32( + %0, + float %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfrdiv.mask.nxv2f32.f32( + , + , + float, + , + i64); + +define @intrinsic_vfrdiv_mask_vf_nxv2f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv2f32_f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vfrdiv.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t + %a = call @llvm.riscv.vfrdiv.mask.nxv2f32.f32( + %0, + %1, + float %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfrdiv.nxv4f32.f32( + , + float, + i64); + +define @intrinsic_vfrdiv_vf_nxv4f32_f32( %0, float %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfrdiv_vf_nxv4f32_f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vfrdiv.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} + %a = call @llvm.riscv.vfrdiv.nxv4f32.f32( + %0, + float %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfrdiv.mask.nxv4f32.f32( + , + , + float, + , + i64); + +define @intrinsic_vfrdiv_mask_vf_nxv4f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv4f32_f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vfrdiv.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t + %a = call @llvm.riscv.vfrdiv.mask.nxv4f32.f32( + %0, + %1, + float %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfrdiv.nxv8f32.f32( + , + float, + i64); + +define @intrinsic_vfrdiv_vf_nxv8f32_f32( %0, float %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfrdiv_vf_nxv8f32_f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vfrdiv.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} + %a = call @llvm.riscv.vfrdiv.nxv8f32.f32( + %0, + float %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfrdiv.mask.nxv8f32.f32( + , + , + float, + , + i64); + +define @intrinsic_vfrdiv_mask_vf_nxv8f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv8f32_f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vfrdiv.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t + %a = call @llvm.riscv.vfrdiv.mask.nxv8f32.f32( + %0, + %1, + float %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfrdiv.nxv16f32.f32( + , + float, + i64); + +define @intrinsic_vfrdiv_vf_nxv16f32_f32( %0, float %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfrdiv_vf_nxv16f32_f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vfrdiv.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} + %a = call @llvm.riscv.vfrdiv.nxv16f32.f32( + %0, + float %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfrdiv.mask.nxv16f32.f32( + , + , + float, + , + i64); + +define @intrinsic_vfrdiv_mask_vf_nxv16f32_f32( %0, %1, float %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv16f32_f32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vfrdiv.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t + %a = call @llvm.riscv.vfrdiv.mask.nxv16f32.f32( + %0, + %1, + float %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfrdiv.nxv1f64.f64( + , + double, + i64); + +define @intrinsic_vfrdiv_vf_nxv1f64_f64( %0, double %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfrdiv_vf_nxv1f64_f64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vfrdiv.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} + %a = call @llvm.riscv.vfrdiv.nxv1f64.f64( + %0, + double %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfrdiv.mask.nxv1f64.f64( + , + , + double, + , + i64); + +define @intrinsic_vfrdiv_mask_vf_nxv1f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv1f64_f64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vfrdiv.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t + %a = call @llvm.riscv.vfrdiv.mask.nxv1f64.f64( + %0, + %1, + double %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfrdiv.nxv2f64.f64( + , + double, + i64); + +define @intrinsic_vfrdiv_vf_nxv2f64_f64( %0, double %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfrdiv_vf_nxv2f64_f64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vfrdiv.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} + %a = call @llvm.riscv.vfrdiv.nxv2f64.f64( + %0, + double %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfrdiv.mask.nxv2f64.f64( + , + , + double, + , + i64); + +define @intrinsic_vfrdiv_mask_vf_nxv2f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv2f64_f64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vfrdiv.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t + %a = call @llvm.riscv.vfrdiv.mask.nxv2f64.f64( + %0, + %1, + double %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfrdiv.nxv4f64.f64( + , + double, + i64); + +define @intrinsic_vfrdiv_vf_nxv4f64_f64( %0, double %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfrdiv_vf_nxv4f64_f64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vfrdiv.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} + %a = call @llvm.riscv.vfrdiv.nxv4f64.f64( + %0, + double %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfrdiv.mask.nxv4f64.f64( + , + , + double, + , + i64); + +define @intrinsic_vfrdiv_mask_vf_nxv4f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv4f64_f64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vfrdiv.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t + %a = call @llvm.riscv.vfrdiv.mask.nxv4f64.f64( + %0, + %1, + double %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vfrdiv.nxv8f64.f64( + , + double, + i64); + +define @intrinsic_vfrdiv_vf_nxv8f64_f64( %0, double %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfrdiv_vf_nxv8f64_f64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu +; CHECK: vfrdiv.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}} + %a = call @llvm.riscv.vfrdiv.nxv8f64.f64( + %0, + double %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vfrdiv.mask.nxv8f64.f64( + , + , + double, + , + i64); + +define @intrinsic_vfrdiv_mask_vf_nxv8f64_f64( %0, %1, double %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv8f64_f64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu +; CHECK: vfrdiv.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t + %a = call @llvm.riscv.vfrdiv.mask.nxv8f64.f64( + %0, + %1, + double %2, + %3, + i64 %4) + + ret %a +}