[RISCV] Define vfadd/vfsub/vfrsub intrinsics.

Define vfadd/vfsub/vfrsub intrinsics and lower to V instructions.

We work with @rogfer01 from BSC to come out this patch.

Authored-by: Roger Ferrer Ibanez <rofirrim@gmail.com>
Co-Authored-by: Hsiangkai Wang <kai.wang@sifive.com>

Differential Revision: https://reviews.llvm.org/D93291
This commit is contained in:
Hsiangkai Wang 2020-12-15 00:51:07 +08:00
parent 903f295009
commit c1dac6bac5
8 changed files with 5293 additions and 13 deletions

View File

@ -193,4 +193,7 @@ let TargetPrefix = "riscv" in {
defm vmaxu : RISCVBinaryAAX;
defm vmax : RISCVBinaryAAX;
defm vfadd : RISCVBinaryAAX;
defm vfsub : RISCVBinaryAAX;
defm vfrsub : RISCVBinaryAAX;
} // TargetPrefix = "riscv"

View File

@ -84,18 +84,22 @@ class ToFPR32<ValueType type, DAGOperand operand, string name> {
// Vector register and vector group type information.
//===----------------------------------------------------------------------===//
class VTypeInfo<ValueType Vec, ValueType Mas, int Sew, VReg Reg, LMULInfo M>
class VTypeInfo<ValueType Vec, ValueType Mas, int Sew, VReg Reg, LMULInfo M,
ValueType Scal = XLenVT, RegisterClass ScalarReg = GPR>
{
ValueType Vector = Vec;
ValueType Mask = Mas;
int SEW = Sew;
VReg RegClass = Reg;
LMULInfo LMul = M;
ValueType Scalar = Scal;
RegisterClass ScalarRegClass = ScalarReg;
}
class GroupVTypeInfo<ValueType Vec, ValueType VecM1, ValueType Mas,
int Sew, VReg Reg, LMULInfo M>
: VTypeInfo<Vec, Mas, Sew, Reg, M>
class GroupVTypeInfo<ValueType Vec, ValueType VecM1, ValueType Mas, int Sew,
VReg Reg, LMULInfo M, ValueType Scal = XLenVT,
RegisterClass ScalarReg = GPR>
: VTypeInfo<Vec, Mas, Sew, Reg, M, Scal, ScalarReg>
{
ValueType VectorM1 = VecM1;
}
@ -129,6 +133,42 @@ defset list<VTypeInfo> AllVectors = {
def VI64M4: GroupVTypeInfo<vint64m4_t,vint64m1_t,vbool16_t,64,VRM4, V_M4>;
def VI64M8: GroupVTypeInfo<vint64m8_t,vint64m1_t,vbool8_t, 64,VRM8, V_M8>;
}
defset list<VTypeInfo> AllFloatVectors = {
defset list<VTypeInfo> NoGroupFloatVectors = {
def VF16MF4: VTypeInfo<vfloat16mf4_t, vbool64_t, 16, VR, V_MF4, f16, FPR16>;
def VF16MF2: VTypeInfo<vfloat16mf2_t, vbool32_t, 16, VR, V_MF2, f16, FPR16>;
def VF16M1: VTypeInfo<vfloat16m1_t, vbool16_t, 16, VR, V_M1, f16, FPR16>;
def VF32MF2: VTypeInfo<vfloat32mf2_t,vbool64_t, 32, VR, V_MF2, f32, FPR32>;
def VF32M1: VTypeInfo<vfloat32m1_t, vbool32_t, 32, VR, V_M1, f32, FPR32>;
def VF64M1: VTypeInfo<vfloat64m1_t, vbool64_t, 64, VR, V_M1, f64, FPR64>;
}
defset list<GroupVTypeInfo> GroupFloatVectors = {
def VF16M2: GroupVTypeInfo<vfloat16m2_t, vfloat16m1_t, vbool8_t, 16,
VRM2, V_M2, f16, FPR16>;
def VF16M4: GroupVTypeInfo<vfloat16m4_t, vfloat16m1_t, vbool4_t, 16,
VRM4, V_M4, f16, FPR16>;
def VF16M8: GroupVTypeInfo<vfloat16m8_t, vfloat16m1_t, vbool2_t, 16,
VRM8, V_M8, f16, FPR16>;
def VF32M2: GroupVTypeInfo<vfloat32m2_t, vfloat32m1_t, vbool16_t, 32,
VRM2, V_M2, f32, FPR32>;
def VF32M4: GroupVTypeInfo<vfloat32m4_t, vfloat32m1_t, vbool8_t, 32,
VRM4, V_M4, f32, FPR32>;
def VF32M8: GroupVTypeInfo<vfloat32m8_t, vfloat32m1_t, vbool4_t, 32,
VRM8, V_M8, f32, FPR32>;
def VF64M2: GroupVTypeInfo<vfloat64m2_t, vfloat64m1_t, vbool32_t, 64,
VRM2, V_M2, f64, FPR64>;
def VF64M4: GroupVTypeInfo<vfloat64m4_t, vfloat64m1_t, vbool16_t, 64,
VRM4, V_M4, f64, FPR64>;
def VF64M8: GroupVTypeInfo<vfloat64m8_t, vfloat64m1_t, vbool8_t, 64,
VRM8, V_M8, f64, FPR64>;
}
}
}
class VTypeInfoToWide<VTypeInfo vti, VTypeInfo wti>
@ -320,9 +360,10 @@ multiclass VPseudoBinaryV_VV {
defm _VV : VPseudoBinary<m.vrclass, m.vrclass, m.vrclass, m>;
}
multiclass VPseudoBinaryV_VX {
multiclass VPseudoBinaryV_VX<bit IsFloat> {
foreach m = MxList.m in
defm _VX : VPseudoBinary<m.vrclass, m.vrclass, GPR, m>;
defm !if(!eq(IsFloat, 0), "_VX", "_VF") : VPseudoBinary<m.vrclass, m.vrclass,
!if(!eq(IsFloat, 0), GPR, FPR32), m>;
}
multiclass VPseudoBinaryV_VI<Operand ImmType = simm5> {
@ -414,17 +455,17 @@ multiclass VPseudoBinaryV_IM<bit CarryOut = 0, bit CarryIn = 1,
multiclass VPseudoBinaryV_VV_VX_VI<Operand ImmType = simm5> {
defm "" : VPseudoBinaryV_VV;
defm "" : VPseudoBinaryV_VX;
defm "" : VPseudoBinaryV_VX</*IsFloat=*/0>;
defm "" : VPseudoBinaryV_VI<ImmType>;
}
multiclass VPseudoBinaryV_VV_VX {
multiclass VPseudoBinaryV_VV_VX<bit IsFloat = 0> {
defm "" : VPseudoBinaryV_VV;
defm "" : VPseudoBinaryV_VX;
defm "" : VPseudoBinaryV_VX<IsFloat>;
}
multiclass VPseudoBinaryV_VX_VI<Operand ImmType = simm5> {
defm "" : VPseudoBinaryV_VX;
defm "" : VPseudoBinaryV_VX</*IsFloat=*/0>;
defm "" : VPseudoBinaryV_VI<ImmType>;
}
@ -624,10 +665,11 @@ multiclass VPatBinaryV_VV<string intrinsic, string instruction,
multiclass VPatBinaryV_VX<string intrinsic, string instruction,
list<VTypeInfo> vtilist> {
foreach vti = vtilist in
defm : VPatBinary<intrinsic, instruction, "VX",
vti.Vector, vti.Vector, XLenVT, vti.Mask,
defm : VPatBinary<intrinsic, instruction,
!if(!eq(vti.Scalar, XLenVT), "VX", "VF"),
vti.Vector, vti.Vector, vti.Scalar, vti.Mask,
vti.SEW, vti.LMul, vti.RegClass,
vti.RegClass, GPR>;
vti.RegClass, vti.ScalarRegClass>;
}
multiclass VPatBinaryV_VI<string intrinsic, string instruction,
@ -1018,9 +1060,22 @@ defm PseudoVMIN : VPseudoBinaryV_VV_VX;
defm PseudoVMAXU : VPseudoBinaryV_VV_VX;
defm PseudoVMAX : VPseudoBinaryV_VV_VX;
} // Predicates = [HasStdExtV]
let Predicates = [HasStdExtV, HasStdExtF] in {
//===----------------------------------------------------------------------===//
// 14.2. Vector Single-Width Floating-Point Add/Subtract Instructions
//===----------------------------------------------------------------------===//
defm PseudoVFADD : VPseudoBinaryV_VV_VX</*IsFloat=*/1>;
defm PseudoVFSUB : VPseudoBinaryV_VV_VX</*IsFloat=*/1>;
defm PseudoVFRSUB : VPseudoBinaryV_VX</*IsFloat=*/1>;
} // Predicates = [HasStdExtV, HasStdExtF]
//===----------------------------------------------------------------------===//
// Patterns.
//===----------------------------------------------------------------------===//
let Predicates = [HasStdExtV] in {
// Whole-register vector patterns.
defm "" : VPatBinarySDNode<add, "PseudoVADD">;
@ -1084,3 +1139,13 @@ defm "" : VPatBinaryV_VV_VX<"int_riscv_vmaxu", "PseudoVMAXU", AllIntegerVectors>
defm "" : VPatBinaryV_VV_VX<"int_riscv_vmax", "PseudoVMAX", AllIntegerVectors>;
} // Predicates = [HasStdExtV]
let Predicates = [HasStdExtV, HasStdExtF] in {
//===----------------------------------------------------------------------===//
// 14.2. Vector Single-Width Floating-Point Add/Subtract Instructions
//===----------------------------------------------------------------------===//
defm "" : VPatBinaryV_VV_VX<"int_riscv_vfadd", "PseudoVFADD", AllFloatVectors>;
defm "" : VPatBinaryV_VV_VX<"int_riscv_vfsub", "PseudoVFSUB", AllFloatVectors>;
defm "" : VPatBinaryV_VX<"int_riscv_vfrsub", "PseudoVFRSUB", AllFloatVectors>;
} // Predicates = [HasStdExtV, HasStdExtF]

View File

@ -0,0 +1,882 @@
; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -mattr=+experimental-zfh \
; RUN: -mattr=+f -verify-machineinstrs \
; RUN: --riscv-no-aliases < %s | FileCheck %s
declare <vscale x 1 x half> @llvm.riscv.vfadd.nxv1f16.nxv1f16(
<vscale x 1 x half>,
<vscale x 1 x half>,
i32);
define <vscale x 1 x half> @intrinsic_vfadd_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, i32 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfadd_vv_nxv1f16_nxv1f16_nxv1f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
; CHECK: vfadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
%a = call <vscale x 1 x half> @llvm.riscv.vfadd.nxv1f16.nxv1f16(
<vscale x 1 x half> %0,
<vscale x 1 x half> %1,
i32 %2)
ret <vscale x 1 x half> %a
}
declare <vscale x 1 x half> @llvm.riscv.vfadd.mask.nxv1f16.nxv1f16(
<vscale x 1 x half>,
<vscale x 1 x half>,
<vscale x 1 x half>,
<vscale x 1 x i1>,
i32);
define <vscale x 1 x half> @intrinsic_vfadd_mask_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv1f16_nxv1f16_nxv1f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
; CHECK: vfadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 1 x half> @llvm.riscv.vfadd.mask.nxv1f16.nxv1f16(
<vscale x 1 x half> %0,
<vscale x 1 x half> %1,
<vscale x 1 x half> %2,
<vscale x 1 x i1> %3,
i32 %4)
ret <vscale x 1 x half> %a
}
declare <vscale x 2 x half> @llvm.riscv.vfadd.nxv2f16.nxv2f16(
<vscale x 2 x half>,
<vscale x 2 x half>,
i32);
define <vscale x 2 x half> @intrinsic_vfadd_vv_nxv2f16_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, i32 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfadd_vv_nxv2f16_nxv2f16_nxv2f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
; CHECK: vfadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
%a = call <vscale x 2 x half> @llvm.riscv.vfadd.nxv2f16.nxv2f16(
<vscale x 2 x half> %0,
<vscale x 2 x half> %1,
i32 %2)
ret <vscale x 2 x half> %a
}
declare <vscale x 2 x half> @llvm.riscv.vfadd.mask.nxv2f16.nxv2f16(
<vscale x 2 x half>,
<vscale x 2 x half>,
<vscale x 2 x half>,
<vscale x 2 x i1>,
i32);
define <vscale x 2 x half> @intrinsic_vfadd_mask_vv_nxv2f16_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv2f16_nxv2f16_nxv2f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
; CHECK: vfadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 2 x half> @llvm.riscv.vfadd.mask.nxv2f16.nxv2f16(
<vscale x 2 x half> %0,
<vscale x 2 x half> %1,
<vscale x 2 x half> %2,
<vscale x 2 x i1> %3,
i32 %4)
ret <vscale x 2 x half> %a
}
declare <vscale x 4 x half> @llvm.riscv.vfadd.nxv4f16.nxv4f16(
<vscale x 4 x half>,
<vscale x 4 x half>,
i32);
define <vscale x 4 x half> @intrinsic_vfadd_vv_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, i32 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfadd_vv_nxv4f16_nxv4f16_nxv4f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
; CHECK: vfadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
%a = call <vscale x 4 x half> @llvm.riscv.vfadd.nxv4f16.nxv4f16(
<vscale x 4 x half> %0,
<vscale x 4 x half> %1,
i32 %2)
ret <vscale x 4 x half> %a
}
declare <vscale x 4 x half> @llvm.riscv.vfadd.mask.nxv4f16.nxv4f16(
<vscale x 4 x half>,
<vscale x 4 x half>,
<vscale x 4 x half>,
<vscale x 4 x i1>,
i32);
define <vscale x 4 x half> @intrinsic_vfadd_mask_vv_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv4f16_nxv4f16_nxv4f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
; CHECK: vfadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 4 x half> @llvm.riscv.vfadd.mask.nxv4f16.nxv4f16(
<vscale x 4 x half> %0,
<vscale x 4 x half> %1,
<vscale x 4 x half> %2,
<vscale x 4 x i1> %3,
i32 %4)
ret <vscale x 4 x half> %a
}
declare <vscale x 8 x half> @llvm.riscv.vfadd.nxv8f16.nxv8f16(
<vscale x 8 x half>,
<vscale x 8 x half>,
i32);
define <vscale x 8 x half> @intrinsic_vfadd_vv_nxv8f16_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, i32 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfadd_vv_nxv8f16_nxv8f16_nxv8f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
; CHECK: vfadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
%a = call <vscale x 8 x half> @llvm.riscv.vfadd.nxv8f16.nxv8f16(
<vscale x 8 x half> %0,
<vscale x 8 x half> %1,
i32 %2)
ret <vscale x 8 x half> %a
}
declare <vscale x 8 x half> @llvm.riscv.vfadd.mask.nxv8f16.nxv8f16(
<vscale x 8 x half>,
<vscale x 8 x half>,
<vscale x 8 x half>,
<vscale x 8 x i1>,
i32);
define <vscale x 8 x half> @intrinsic_vfadd_mask_vv_nxv8f16_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv8f16_nxv8f16_nxv8f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
; CHECK: vfadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 8 x half> @llvm.riscv.vfadd.mask.nxv8f16.nxv8f16(
<vscale x 8 x half> %0,
<vscale x 8 x half> %1,
<vscale x 8 x half> %2,
<vscale x 8 x i1> %3,
i32 %4)
ret <vscale x 8 x half> %a
}
declare <vscale x 16 x half> @llvm.riscv.vfadd.nxv16f16.nxv16f16(
<vscale x 16 x half>,
<vscale x 16 x half>,
i32);
define <vscale x 16 x half> @intrinsic_vfadd_vv_nxv16f16_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, i32 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfadd_vv_nxv16f16_nxv16f16_nxv16f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
; CHECK: vfadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
%a = call <vscale x 16 x half> @llvm.riscv.vfadd.nxv16f16.nxv16f16(
<vscale x 16 x half> %0,
<vscale x 16 x half> %1,
i32 %2)
ret <vscale x 16 x half> %a
}
declare <vscale x 16 x half> @llvm.riscv.vfadd.mask.nxv16f16.nxv16f16(
<vscale x 16 x half>,
<vscale x 16 x half>,
<vscale x 16 x half>,
<vscale x 16 x i1>,
i32);
define <vscale x 16 x half> @intrinsic_vfadd_mask_vv_nxv16f16_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv16f16_nxv16f16_nxv16f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
; CHECK: vfadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 16 x half> @llvm.riscv.vfadd.mask.nxv16f16.nxv16f16(
<vscale x 16 x half> %0,
<vscale x 16 x half> %1,
<vscale x 16 x half> %2,
<vscale x 16 x i1> %3,
i32 %4)
ret <vscale x 16 x half> %a
}
declare <vscale x 32 x half> @llvm.riscv.vfadd.nxv32f16.nxv32f16(
<vscale x 32 x half>,
<vscale x 32 x half>,
i32);
define <vscale x 32 x half> @intrinsic_vfadd_vv_nxv32f16_nxv32f16_nxv32f16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, i32 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfadd_vv_nxv32f16_nxv32f16_nxv32f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
; CHECK: vfadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
%a = call <vscale x 32 x half> @llvm.riscv.vfadd.nxv32f16.nxv32f16(
<vscale x 32 x half> %0,
<vscale x 32 x half> %1,
i32 %2)
ret <vscale x 32 x half> %a
}
declare <vscale x 32 x half> @llvm.riscv.vfadd.mask.nxv32f16.nxv32f16(
<vscale x 32 x half>,
<vscale x 32 x half>,
<vscale x 32 x half>,
<vscale x 32 x i1>,
i32);
define <vscale x 32 x half> @intrinsic_vfadd_mask_vv_nxv32f16_nxv32f16_nxv32f16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, <vscale x 32 x half> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv32f16_nxv32f16_nxv32f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
; CHECK: vfadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 32 x half> @llvm.riscv.vfadd.mask.nxv32f16.nxv32f16(
<vscale x 32 x half> %0,
<vscale x 32 x half> %1,
<vscale x 32 x half> %2,
<vscale x 32 x i1> %3,
i32 %4)
ret <vscale x 32 x half> %a
}
declare <vscale x 1 x float> @llvm.riscv.vfadd.nxv1f32.nxv1f32(
<vscale x 1 x float>,
<vscale x 1 x float>,
i32);
define <vscale x 1 x float> @intrinsic_vfadd_vv_nxv1f32_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, i32 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfadd_vv_nxv1f32_nxv1f32_nxv1f32
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
; CHECK: vfadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
%a = call <vscale x 1 x float> @llvm.riscv.vfadd.nxv1f32.nxv1f32(
<vscale x 1 x float> %0,
<vscale x 1 x float> %1,
i32 %2)
ret <vscale x 1 x float> %a
}
declare <vscale x 1 x float> @llvm.riscv.vfadd.mask.nxv1f32.nxv1f32(
<vscale x 1 x float>,
<vscale x 1 x float>,
<vscale x 1 x float>,
<vscale x 1 x i1>,
i32);
define <vscale x 1 x float> @intrinsic_vfadd_mask_vv_nxv1f32_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv1f32_nxv1f32_nxv1f32
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
; CHECK: vfadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 1 x float> @llvm.riscv.vfadd.mask.nxv1f32.nxv1f32(
<vscale x 1 x float> %0,
<vscale x 1 x float> %1,
<vscale x 1 x float> %2,
<vscale x 1 x i1> %3,
i32 %4)
ret <vscale x 1 x float> %a
}
declare <vscale x 2 x float> @llvm.riscv.vfadd.nxv2f32.nxv2f32(
<vscale x 2 x float>,
<vscale x 2 x float>,
i32);
define <vscale x 2 x float> @intrinsic_vfadd_vv_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, i32 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfadd_vv_nxv2f32_nxv2f32_nxv2f32
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
; CHECK: vfadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
%a = call <vscale x 2 x float> @llvm.riscv.vfadd.nxv2f32.nxv2f32(
<vscale x 2 x float> %0,
<vscale x 2 x float> %1,
i32 %2)
ret <vscale x 2 x float> %a
}
declare <vscale x 2 x float> @llvm.riscv.vfadd.mask.nxv2f32.nxv2f32(
<vscale x 2 x float>,
<vscale x 2 x float>,
<vscale x 2 x float>,
<vscale x 2 x i1>,
i32);
define <vscale x 2 x float> @intrinsic_vfadd_mask_vv_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv2f32_nxv2f32_nxv2f32
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
; CHECK: vfadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 2 x float> @llvm.riscv.vfadd.mask.nxv2f32.nxv2f32(
<vscale x 2 x float> %0,
<vscale x 2 x float> %1,
<vscale x 2 x float> %2,
<vscale x 2 x i1> %3,
i32 %4)
ret <vscale x 2 x float> %a
}
declare <vscale x 4 x float> @llvm.riscv.vfadd.nxv4f32.nxv4f32(
<vscale x 4 x float>,
<vscale x 4 x float>,
i32);
define <vscale x 4 x float> @intrinsic_vfadd_vv_nxv4f32_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, i32 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfadd_vv_nxv4f32_nxv4f32_nxv4f32
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
; CHECK: vfadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
%a = call <vscale x 4 x float> @llvm.riscv.vfadd.nxv4f32.nxv4f32(
<vscale x 4 x float> %0,
<vscale x 4 x float> %1,
i32 %2)
ret <vscale x 4 x float> %a
}
declare <vscale x 4 x float> @llvm.riscv.vfadd.mask.nxv4f32.nxv4f32(
<vscale x 4 x float>,
<vscale x 4 x float>,
<vscale x 4 x float>,
<vscale x 4 x i1>,
i32);
define <vscale x 4 x float> @intrinsic_vfadd_mask_vv_nxv4f32_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv4f32_nxv4f32_nxv4f32
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
; CHECK: vfadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 4 x float> @llvm.riscv.vfadd.mask.nxv4f32.nxv4f32(
<vscale x 4 x float> %0,
<vscale x 4 x float> %1,
<vscale x 4 x float> %2,
<vscale x 4 x i1> %3,
i32 %4)
ret <vscale x 4 x float> %a
}
declare <vscale x 8 x float> @llvm.riscv.vfadd.nxv8f32.nxv8f32(
<vscale x 8 x float>,
<vscale x 8 x float>,
i32);
define <vscale x 8 x float> @intrinsic_vfadd_vv_nxv8f32_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, i32 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfadd_vv_nxv8f32_nxv8f32_nxv8f32
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
; CHECK: vfadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
%a = call <vscale x 8 x float> @llvm.riscv.vfadd.nxv8f32.nxv8f32(
<vscale x 8 x float> %0,
<vscale x 8 x float> %1,
i32 %2)
ret <vscale x 8 x float> %a
}
declare <vscale x 8 x float> @llvm.riscv.vfadd.mask.nxv8f32.nxv8f32(
<vscale x 8 x float>,
<vscale x 8 x float>,
<vscale x 8 x float>,
<vscale x 8 x i1>,
i32);
define <vscale x 8 x float> @intrinsic_vfadd_mask_vv_nxv8f32_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv8f32_nxv8f32_nxv8f32
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
; CHECK: vfadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 8 x float> @llvm.riscv.vfadd.mask.nxv8f32.nxv8f32(
<vscale x 8 x float> %0,
<vscale x 8 x float> %1,
<vscale x 8 x float> %2,
<vscale x 8 x i1> %3,
i32 %4)
ret <vscale x 8 x float> %a
}
declare <vscale x 16 x float> @llvm.riscv.vfadd.nxv16f32.nxv16f32(
<vscale x 16 x float>,
<vscale x 16 x float>,
i32);
define <vscale x 16 x float> @intrinsic_vfadd_vv_nxv16f32_nxv16f32_nxv16f32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, i32 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfadd_vv_nxv16f32_nxv16f32_nxv16f32
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
; CHECK: vfadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
%a = call <vscale x 16 x float> @llvm.riscv.vfadd.nxv16f32.nxv16f32(
<vscale x 16 x float> %0,
<vscale x 16 x float> %1,
i32 %2)
ret <vscale x 16 x float> %a
}
declare <vscale x 16 x float> @llvm.riscv.vfadd.mask.nxv16f32.nxv16f32(
<vscale x 16 x float>,
<vscale x 16 x float>,
<vscale x 16 x float>,
<vscale x 16 x i1>,
i32);
define <vscale x 16 x float> @intrinsic_vfadd_mask_vv_nxv16f32_nxv16f32_nxv16f32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, <vscale x 16 x float> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv16f32_nxv16f32_nxv16f32
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
; CHECK: vfadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 16 x float> @llvm.riscv.vfadd.mask.nxv16f32.nxv16f32(
<vscale x 16 x float> %0,
<vscale x 16 x float> %1,
<vscale x 16 x float> %2,
<vscale x 16 x i1> %3,
i32 %4)
ret <vscale x 16 x float> %a
}
declare <vscale x 1 x half> @llvm.riscv.vfadd.nxv1f16.f16(
<vscale x 1 x half>,
half,
i32);
define <vscale x 1 x half> @intrinsic_vfadd_vf_nxv1f16_nxv1f16_f16(<vscale x 1 x half> %0, half %1, i32 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfadd_vf_nxv1f16_nxv1f16_f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
; CHECK: vfadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
%a = call <vscale x 1 x half> @llvm.riscv.vfadd.nxv1f16.f16(
<vscale x 1 x half> %0,
half %1,
i32 %2)
ret <vscale x 1 x half> %a
}
declare <vscale x 1 x half> @llvm.riscv.vfadd.mask.nxv1f16.f16(
<vscale x 1 x half>,
<vscale x 1 x half>,
half,
<vscale x 1 x i1>,
i32);
define <vscale x 1 x half> @intrinsic_vfadd_mask_vf_nxv1f16_nxv1f16_f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, half %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv1f16_nxv1f16_f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
; CHECK: vfadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
%a = call <vscale x 1 x half> @llvm.riscv.vfadd.mask.nxv1f16.f16(
<vscale x 1 x half> %0,
<vscale x 1 x half> %1,
half %2,
<vscale x 1 x i1> %3,
i32 %4)
ret <vscale x 1 x half> %a
}
declare <vscale x 2 x half> @llvm.riscv.vfadd.nxv2f16.f16(
<vscale x 2 x half>,
half,
i32);
define <vscale x 2 x half> @intrinsic_vfadd_vf_nxv2f16_nxv2f16_f16(<vscale x 2 x half> %0, half %1, i32 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfadd_vf_nxv2f16_nxv2f16_f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
; CHECK: vfadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
%a = call <vscale x 2 x half> @llvm.riscv.vfadd.nxv2f16.f16(
<vscale x 2 x half> %0,
half %1,
i32 %2)
ret <vscale x 2 x half> %a
}
declare <vscale x 2 x half> @llvm.riscv.vfadd.mask.nxv2f16.f16(
<vscale x 2 x half>,
<vscale x 2 x half>,
half,
<vscale x 2 x i1>,
i32);
define <vscale x 2 x half> @intrinsic_vfadd_mask_vf_nxv2f16_nxv2f16_f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, half %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv2f16_nxv2f16_f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
; CHECK: vfadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
%a = call <vscale x 2 x half> @llvm.riscv.vfadd.mask.nxv2f16.f16(
<vscale x 2 x half> %0,
<vscale x 2 x half> %1,
half %2,
<vscale x 2 x i1> %3,
i32 %4)
ret <vscale x 2 x half> %a
}
declare <vscale x 4 x half> @llvm.riscv.vfadd.nxv4f16.f16(
<vscale x 4 x half>,
half,
i32);
define <vscale x 4 x half> @intrinsic_vfadd_vf_nxv4f16_nxv4f16_f16(<vscale x 4 x half> %0, half %1, i32 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfadd_vf_nxv4f16_nxv4f16_f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
; CHECK: vfadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
%a = call <vscale x 4 x half> @llvm.riscv.vfadd.nxv4f16.f16(
<vscale x 4 x half> %0,
half %1,
i32 %2)
ret <vscale x 4 x half> %a
}
declare <vscale x 4 x half> @llvm.riscv.vfadd.mask.nxv4f16.f16(
<vscale x 4 x half>,
<vscale x 4 x half>,
half,
<vscale x 4 x i1>,
i32);
define <vscale x 4 x half> @intrinsic_vfadd_mask_vf_nxv4f16_nxv4f16_f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, half %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv4f16_nxv4f16_f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
; CHECK: vfadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
%a = call <vscale x 4 x half> @llvm.riscv.vfadd.mask.nxv4f16.f16(
<vscale x 4 x half> %0,
<vscale x 4 x half> %1,
half %2,
<vscale x 4 x i1> %3,
i32 %4)
ret <vscale x 4 x half> %a
}
declare <vscale x 8 x half> @llvm.riscv.vfadd.nxv8f16.f16(
<vscale x 8 x half>,
half,
i32);
define <vscale x 8 x half> @intrinsic_vfadd_vf_nxv8f16_nxv8f16_f16(<vscale x 8 x half> %0, half %1, i32 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfadd_vf_nxv8f16_nxv8f16_f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
; CHECK: vfadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
%a = call <vscale x 8 x half> @llvm.riscv.vfadd.nxv8f16.f16(
<vscale x 8 x half> %0,
half %1,
i32 %2)
ret <vscale x 8 x half> %a
}
declare <vscale x 8 x half> @llvm.riscv.vfadd.mask.nxv8f16.f16(
<vscale x 8 x half>,
<vscale x 8 x half>,
half,
<vscale x 8 x i1>,
i32);
define <vscale x 8 x half> @intrinsic_vfadd_mask_vf_nxv8f16_nxv8f16_f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, half %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv8f16_nxv8f16_f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
; CHECK: vfadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
%a = call <vscale x 8 x half> @llvm.riscv.vfadd.mask.nxv8f16.f16(
<vscale x 8 x half> %0,
<vscale x 8 x half> %1,
half %2,
<vscale x 8 x i1> %3,
i32 %4)
ret <vscale x 8 x half> %a
}
declare <vscale x 16 x half> @llvm.riscv.vfadd.nxv16f16.f16(
<vscale x 16 x half>,
half,
i32);
define <vscale x 16 x half> @intrinsic_vfadd_vf_nxv16f16_nxv16f16_f16(<vscale x 16 x half> %0, half %1, i32 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfadd_vf_nxv16f16_nxv16f16_f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
; CHECK: vfadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
%a = call <vscale x 16 x half> @llvm.riscv.vfadd.nxv16f16.f16(
<vscale x 16 x half> %0,
half %1,
i32 %2)
ret <vscale x 16 x half> %a
}
declare <vscale x 16 x half> @llvm.riscv.vfadd.mask.nxv16f16.f16(
<vscale x 16 x half>,
<vscale x 16 x half>,
half,
<vscale x 16 x i1>,
i32);
define <vscale x 16 x half> @intrinsic_vfadd_mask_vf_nxv16f16_nxv16f16_f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, half %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv16f16_nxv16f16_f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
; CHECK: vfadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
%a = call <vscale x 16 x half> @llvm.riscv.vfadd.mask.nxv16f16.f16(
<vscale x 16 x half> %0,
<vscale x 16 x half> %1,
half %2,
<vscale x 16 x i1> %3,
i32 %4)
ret <vscale x 16 x half> %a
}
declare <vscale x 32 x half> @llvm.riscv.vfadd.nxv32f16.f16(
<vscale x 32 x half>,
half,
i32);
define <vscale x 32 x half> @intrinsic_vfadd_vf_nxv32f16_nxv32f16_f16(<vscale x 32 x half> %0, half %1, i32 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfadd_vf_nxv32f16_nxv32f16_f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
; CHECK: vfadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
%a = call <vscale x 32 x half> @llvm.riscv.vfadd.nxv32f16.f16(
<vscale x 32 x half> %0,
half %1,
i32 %2)
ret <vscale x 32 x half> %a
}
declare <vscale x 32 x half> @llvm.riscv.vfadd.mask.nxv32f16.f16(
<vscale x 32 x half>,
<vscale x 32 x half>,
half,
<vscale x 32 x i1>,
i32);
define <vscale x 32 x half> @intrinsic_vfadd_mask_vf_nxv32f16_nxv32f16_f16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, half %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv32f16_nxv32f16_f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
; CHECK: vfadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
%a = call <vscale x 32 x half> @llvm.riscv.vfadd.mask.nxv32f16.f16(
<vscale x 32 x half> %0,
<vscale x 32 x half> %1,
half %2,
<vscale x 32 x i1> %3,
i32 %4)
ret <vscale x 32 x half> %a
}
declare <vscale x 1 x float> @llvm.riscv.vfadd.nxv1f32.f32(
<vscale x 1 x float>,
float,
i32);
define <vscale x 1 x float> @intrinsic_vfadd_vf_nxv1f32_nxv1f32_f32(<vscale x 1 x float> %0, float %1, i32 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfadd_vf_nxv1f32_nxv1f32_f32
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
; CHECK: vfadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
%a = call <vscale x 1 x float> @llvm.riscv.vfadd.nxv1f32.f32(
<vscale x 1 x float> %0,
float %1,
i32 %2)
ret <vscale x 1 x float> %a
}
declare <vscale x 1 x float> @llvm.riscv.vfadd.mask.nxv1f32.f32(
<vscale x 1 x float>,
<vscale x 1 x float>,
float,
<vscale x 1 x i1>,
i32);
define <vscale x 1 x float> @intrinsic_vfadd_mask_vf_nxv1f32_nxv1f32_f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, float %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv1f32_nxv1f32_f32
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
; CHECK: vfadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
%a = call <vscale x 1 x float> @llvm.riscv.vfadd.mask.nxv1f32.f32(
<vscale x 1 x float> %0,
<vscale x 1 x float> %1,
float %2,
<vscale x 1 x i1> %3,
i32 %4)
ret <vscale x 1 x float> %a
}
declare <vscale x 2 x float> @llvm.riscv.vfadd.nxv2f32.f32(
<vscale x 2 x float>,
float,
i32);
define <vscale x 2 x float> @intrinsic_vfadd_vf_nxv2f32_nxv2f32_f32(<vscale x 2 x float> %0, float %1, i32 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfadd_vf_nxv2f32_nxv2f32_f32
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
; CHECK: vfadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
%a = call <vscale x 2 x float> @llvm.riscv.vfadd.nxv2f32.f32(
<vscale x 2 x float> %0,
float %1,
i32 %2)
ret <vscale x 2 x float> %a
}
declare <vscale x 2 x float> @llvm.riscv.vfadd.mask.nxv2f32.f32(
<vscale x 2 x float>,
<vscale x 2 x float>,
float,
<vscale x 2 x i1>,
i32);
define <vscale x 2 x float> @intrinsic_vfadd_mask_vf_nxv2f32_nxv2f32_f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, float %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv2f32_nxv2f32_f32
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
; CHECK: vfadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
%a = call <vscale x 2 x float> @llvm.riscv.vfadd.mask.nxv2f32.f32(
<vscale x 2 x float> %0,
<vscale x 2 x float> %1,
float %2,
<vscale x 2 x i1> %3,
i32 %4)
ret <vscale x 2 x float> %a
}
declare <vscale x 4 x float> @llvm.riscv.vfadd.nxv4f32.f32(
<vscale x 4 x float>,
float,
i32);
define <vscale x 4 x float> @intrinsic_vfadd_vf_nxv4f32_nxv4f32_f32(<vscale x 4 x float> %0, float %1, i32 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfadd_vf_nxv4f32_nxv4f32_f32
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
; CHECK: vfadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
%a = call <vscale x 4 x float> @llvm.riscv.vfadd.nxv4f32.f32(
<vscale x 4 x float> %0,
float %1,
i32 %2)
ret <vscale x 4 x float> %a
}
declare <vscale x 4 x float> @llvm.riscv.vfadd.mask.nxv4f32.f32(
<vscale x 4 x float>,
<vscale x 4 x float>,
float,
<vscale x 4 x i1>,
i32);
define <vscale x 4 x float> @intrinsic_vfadd_mask_vf_nxv4f32_nxv4f32_f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, float %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv4f32_nxv4f32_f32
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
; CHECK: vfadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
%a = call <vscale x 4 x float> @llvm.riscv.vfadd.mask.nxv4f32.f32(
<vscale x 4 x float> %0,
<vscale x 4 x float> %1,
float %2,
<vscale x 4 x i1> %3,
i32 %4)
ret <vscale x 4 x float> %a
}
declare <vscale x 8 x float> @llvm.riscv.vfadd.nxv8f32.f32(
<vscale x 8 x float>,
float,
i32);
define <vscale x 8 x float> @intrinsic_vfadd_vf_nxv8f32_nxv8f32_f32(<vscale x 8 x float> %0, float %1, i32 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfadd_vf_nxv8f32_nxv8f32_f32
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
; CHECK: vfadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
%a = call <vscale x 8 x float> @llvm.riscv.vfadd.nxv8f32.f32(
<vscale x 8 x float> %0,
float %1,
i32 %2)
ret <vscale x 8 x float> %a
}
declare <vscale x 8 x float> @llvm.riscv.vfadd.mask.nxv8f32.f32(
<vscale x 8 x float>,
<vscale x 8 x float>,
float,
<vscale x 8 x i1>,
i32);
define <vscale x 8 x float> @intrinsic_vfadd_mask_vf_nxv8f32_nxv8f32_f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, float %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv8f32_nxv8f32_f32
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
; CHECK: vfadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
%a = call <vscale x 8 x float> @llvm.riscv.vfadd.mask.nxv8f32.f32(
<vscale x 8 x float> %0,
<vscale x 8 x float> %1,
float %2,
<vscale x 8 x i1> %3,
i32 %4)
ret <vscale x 8 x float> %a
}
declare <vscale x 16 x float> @llvm.riscv.vfadd.nxv16f32.f32(
<vscale x 16 x float>,
float,
i32);
define <vscale x 16 x float> @intrinsic_vfadd_vf_nxv16f32_nxv16f32_f32(<vscale x 16 x float> %0, float %1, i32 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfadd_vf_nxv16f32_nxv16f32_f32
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
; CHECK: vfadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
%a = call <vscale x 16 x float> @llvm.riscv.vfadd.nxv16f32.f32(
<vscale x 16 x float> %0,
float %1,
i32 %2)
ret <vscale x 16 x float> %a
}
declare <vscale x 16 x float> @llvm.riscv.vfadd.mask.nxv16f32.f32(
<vscale x 16 x float>,
<vscale x 16 x float>,
float,
<vscale x 16 x i1>,
i32);
define <vscale x 16 x float> @intrinsic_vfadd_mask_vf_nxv16f32_nxv16f32_f32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, float %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfadd_mask_vf_nxv16f32_nxv16f32_f32
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
; CHECK: vfadd.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
%a = call <vscale x 16 x float> @llvm.riscv.vfadd.mask.nxv16f32.f32(
<vscale x 16 x float> %0,
<vscale x 16 x float> %1,
float %2,
<vscale x 16 x i1> %3,
i32 %4)
ret <vscale x 16 x float> %a
}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,442 @@
; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -mattr=+experimental-zfh \
; RUN: -mattr=+f -verify-machineinstrs \
; RUN: --riscv-no-aliases < %s | FileCheck %s
declare <vscale x 1 x half> @llvm.riscv.vfrsub.nxv1f16.f16(
<vscale x 1 x half>,
half,
i32);
define <vscale x 1 x half> @intrinsic_vfrsub_vf_nxv1f16_nxv1f16_f16(<vscale x 1 x half> %0, half %1, i32 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfrsub_vf_nxv1f16_nxv1f16_f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
; CHECK: vfrsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
%a = call <vscale x 1 x half> @llvm.riscv.vfrsub.nxv1f16.f16(
<vscale x 1 x half> %0,
half %1,
i32 %2)
ret <vscale x 1 x half> %a
}
declare <vscale x 1 x half> @llvm.riscv.vfrsub.mask.nxv1f16.f16(
<vscale x 1 x half>,
<vscale x 1 x half>,
half,
<vscale x 1 x i1>,
i32);
define <vscale x 1 x half> @intrinsic_vfrsub_mask_vf_nxv1f16_nxv1f16_f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, half %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv1f16_nxv1f16_f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
; CHECK: vfrsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
%a = call <vscale x 1 x half> @llvm.riscv.vfrsub.mask.nxv1f16.f16(
<vscale x 1 x half> %0,
<vscale x 1 x half> %1,
half %2,
<vscale x 1 x i1> %3,
i32 %4)
ret <vscale x 1 x half> %a
}
declare <vscale x 2 x half> @llvm.riscv.vfrsub.nxv2f16.f16(
<vscale x 2 x half>,
half,
i32);
define <vscale x 2 x half> @intrinsic_vfrsub_vf_nxv2f16_nxv2f16_f16(<vscale x 2 x half> %0, half %1, i32 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfrsub_vf_nxv2f16_nxv2f16_f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
; CHECK: vfrsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
%a = call <vscale x 2 x half> @llvm.riscv.vfrsub.nxv2f16.f16(
<vscale x 2 x half> %0,
half %1,
i32 %2)
ret <vscale x 2 x half> %a
}
declare <vscale x 2 x half> @llvm.riscv.vfrsub.mask.nxv2f16.f16(
<vscale x 2 x half>,
<vscale x 2 x half>,
half,
<vscale x 2 x i1>,
i32);
define <vscale x 2 x half> @intrinsic_vfrsub_mask_vf_nxv2f16_nxv2f16_f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, half %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv2f16_nxv2f16_f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
; CHECK: vfrsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
%a = call <vscale x 2 x half> @llvm.riscv.vfrsub.mask.nxv2f16.f16(
<vscale x 2 x half> %0,
<vscale x 2 x half> %1,
half %2,
<vscale x 2 x i1> %3,
i32 %4)
ret <vscale x 2 x half> %a
}
declare <vscale x 4 x half> @llvm.riscv.vfrsub.nxv4f16.f16(
<vscale x 4 x half>,
half,
i32);
define <vscale x 4 x half> @intrinsic_vfrsub_vf_nxv4f16_nxv4f16_f16(<vscale x 4 x half> %0, half %1, i32 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfrsub_vf_nxv4f16_nxv4f16_f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
; CHECK: vfrsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
%a = call <vscale x 4 x half> @llvm.riscv.vfrsub.nxv4f16.f16(
<vscale x 4 x half> %0,
half %1,
i32 %2)
ret <vscale x 4 x half> %a
}
declare <vscale x 4 x half> @llvm.riscv.vfrsub.mask.nxv4f16.f16(
<vscale x 4 x half>,
<vscale x 4 x half>,
half,
<vscale x 4 x i1>,
i32);
define <vscale x 4 x half> @intrinsic_vfrsub_mask_vf_nxv4f16_nxv4f16_f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, half %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv4f16_nxv4f16_f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
; CHECK: vfrsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
%a = call <vscale x 4 x half> @llvm.riscv.vfrsub.mask.nxv4f16.f16(
<vscale x 4 x half> %0,
<vscale x 4 x half> %1,
half %2,
<vscale x 4 x i1> %3,
i32 %4)
ret <vscale x 4 x half> %a
}
declare <vscale x 8 x half> @llvm.riscv.vfrsub.nxv8f16.f16(
<vscale x 8 x half>,
half,
i32);
define <vscale x 8 x half> @intrinsic_vfrsub_vf_nxv8f16_nxv8f16_f16(<vscale x 8 x half> %0, half %1, i32 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfrsub_vf_nxv8f16_nxv8f16_f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
; CHECK: vfrsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
%a = call <vscale x 8 x half> @llvm.riscv.vfrsub.nxv8f16.f16(
<vscale x 8 x half> %0,
half %1,
i32 %2)
ret <vscale x 8 x half> %a
}
declare <vscale x 8 x half> @llvm.riscv.vfrsub.mask.nxv8f16.f16(
<vscale x 8 x half>,
<vscale x 8 x half>,
half,
<vscale x 8 x i1>,
i32);
define <vscale x 8 x half> @intrinsic_vfrsub_mask_vf_nxv8f16_nxv8f16_f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, half %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv8f16_nxv8f16_f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
; CHECK: vfrsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
%a = call <vscale x 8 x half> @llvm.riscv.vfrsub.mask.nxv8f16.f16(
<vscale x 8 x half> %0,
<vscale x 8 x half> %1,
half %2,
<vscale x 8 x i1> %3,
i32 %4)
ret <vscale x 8 x half> %a
}
declare <vscale x 16 x half> @llvm.riscv.vfrsub.nxv16f16.f16(
<vscale x 16 x half>,
half,
i32);
define <vscale x 16 x half> @intrinsic_vfrsub_vf_nxv16f16_nxv16f16_f16(<vscale x 16 x half> %0, half %1, i32 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfrsub_vf_nxv16f16_nxv16f16_f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
; CHECK: vfrsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
%a = call <vscale x 16 x half> @llvm.riscv.vfrsub.nxv16f16.f16(
<vscale x 16 x half> %0,
half %1,
i32 %2)
ret <vscale x 16 x half> %a
}
declare <vscale x 16 x half> @llvm.riscv.vfrsub.mask.nxv16f16.f16(
<vscale x 16 x half>,
<vscale x 16 x half>,
half,
<vscale x 16 x i1>,
i32);
define <vscale x 16 x half> @intrinsic_vfrsub_mask_vf_nxv16f16_nxv16f16_f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, half %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv16f16_nxv16f16_f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
; CHECK: vfrsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
%a = call <vscale x 16 x half> @llvm.riscv.vfrsub.mask.nxv16f16.f16(
<vscale x 16 x half> %0,
<vscale x 16 x half> %1,
half %2,
<vscale x 16 x i1> %3,
i32 %4)
ret <vscale x 16 x half> %a
}
declare <vscale x 32 x half> @llvm.riscv.vfrsub.nxv32f16.f16(
<vscale x 32 x half>,
half,
i32);
define <vscale x 32 x half> @intrinsic_vfrsub_vf_nxv32f16_nxv32f16_f16(<vscale x 32 x half> %0, half %1, i32 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfrsub_vf_nxv32f16_nxv32f16_f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
; CHECK: vfrsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
%a = call <vscale x 32 x half> @llvm.riscv.vfrsub.nxv32f16.f16(
<vscale x 32 x half> %0,
half %1,
i32 %2)
ret <vscale x 32 x half> %a
}
declare <vscale x 32 x half> @llvm.riscv.vfrsub.mask.nxv32f16.f16(
<vscale x 32 x half>,
<vscale x 32 x half>,
half,
<vscale x 32 x i1>,
i32);
define <vscale x 32 x half> @intrinsic_vfrsub_mask_vf_nxv32f16_nxv32f16_f16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, half %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv32f16_nxv32f16_f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
; CHECK: vfrsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
%a = call <vscale x 32 x half> @llvm.riscv.vfrsub.mask.nxv32f16.f16(
<vscale x 32 x half> %0,
<vscale x 32 x half> %1,
half %2,
<vscale x 32 x i1> %3,
i32 %4)
ret <vscale x 32 x half> %a
}
declare <vscale x 1 x float> @llvm.riscv.vfrsub.nxv1f32.f32(
<vscale x 1 x float>,
float,
i32);
define <vscale x 1 x float> @intrinsic_vfrsub_vf_nxv1f32_nxv1f32_f32(<vscale x 1 x float> %0, float %1, i32 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfrsub_vf_nxv1f32_nxv1f32_f32
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
; CHECK: vfrsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
%a = call <vscale x 1 x float> @llvm.riscv.vfrsub.nxv1f32.f32(
<vscale x 1 x float> %0,
float %1,
i32 %2)
ret <vscale x 1 x float> %a
}
declare <vscale x 1 x float> @llvm.riscv.vfrsub.mask.nxv1f32.f32(
<vscale x 1 x float>,
<vscale x 1 x float>,
float,
<vscale x 1 x i1>,
i32);
define <vscale x 1 x float> @intrinsic_vfrsub_mask_vf_nxv1f32_nxv1f32_f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, float %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv1f32_nxv1f32_f32
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
; CHECK: vfrsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
%a = call <vscale x 1 x float> @llvm.riscv.vfrsub.mask.nxv1f32.f32(
<vscale x 1 x float> %0,
<vscale x 1 x float> %1,
float %2,
<vscale x 1 x i1> %3,
i32 %4)
ret <vscale x 1 x float> %a
}
declare <vscale x 2 x float> @llvm.riscv.vfrsub.nxv2f32.f32(
<vscale x 2 x float>,
float,
i32);
define <vscale x 2 x float> @intrinsic_vfrsub_vf_nxv2f32_nxv2f32_f32(<vscale x 2 x float> %0, float %1, i32 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfrsub_vf_nxv2f32_nxv2f32_f32
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
; CHECK: vfrsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
%a = call <vscale x 2 x float> @llvm.riscv.vfrsub.nxv2f32.f32(
<vscale x 2 x float> %0,
float %1,
i32 %2)
ret <vscale x 2 x float> %a
}
declare <vscale x 2 x float> @llvm.riscv.vfrsub.mask.nxv2f32.f32(
<vscale x 2 x float>,
<vscale x 2 x float>,
float,
<vscale x 2 x i1>,
i32);
define <vscale x 2 x float> @intrinsic_vfrsub_mask_vf_nxv2f32_nxv2f32_f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, float %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv2f32_nxv2f32_f32
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
; CHECK: vfrsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
%a = call <vscale x 2 x float> @llvm.riscv.vfrsub.mask.nxv2f32.f32(
<vscale x 2 x float> %0,
<vscale x 2 x float> %1,
float %2,
<vscale x 2 x i1> %3,
i32 %4)
ret <vscale x 2 x float> %a
}
declare <vscale x 4 x float> @llvm.riscv.vfrsub.nxv4f32.f32(
<vscale x 4 x float>,
float,
i32);
define <vscale x 4 x float> @intrinsic_vfrsub_vf_nxv4f32_nxv4f32_f32(<vscale x 4 x float> %0, float %1, i32 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfrsub_vf_nxv4f32_nxv4f32_f32
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
; CHECK: vfrsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
%a = call <vscale x 4 x float> @llvm.riscv.vfrsub.nxv4f32.f32(
<vscale x 4 x float> %0,
float %1,
i32 %2)
ret <vscale x 4 x float> %a
}
declare <vscale x 4 x float> @llvm.riscv.vfrsub.mask.nxv4f32.f32(
<vscale x 4 x float>,
<vscale x 4 x float>,
float,
<vscale x 4 x i1>,
i32);
define <vscale x 4 x float> @intrinsic_vfrsub_mask_vf_nxv4f32_nxv4f32_f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, float %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv4f32_nxv4f32_f32
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
; CHECK: vfrsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
%a = call <vscale x 4 x float> @llvm.riscv.vfrsub.mask.nxv4f32.f32(
<vscale x 4 x float> %0,
<vscale x 4 x float> %1,
float %2,
<vscale x 4 x i1> %3,
i32 %4)
ret <vscale x 4 x float> %a
}
declare <vscale x 8 x float> @llvm.riscv.vfrsub.nxv8f32.f32(
<vscale x 8 x float>,
float,
i32);
define <vscale x 8 x float> @intrinsic_vfrsub_vf_nxv8f32_nxv8f32_f32(<vscale x 8 x float> %0, float %1, i32 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfrsub_vf_nxv8f32_nxv8f32_f32
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
; CHECK: vfrsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
%a = call <vscale x 8 x float> @llvm.riscv.vfrsub.nxv8f32.f32(
<vscale x 8 x float> %0,
float %1,
i32 %2)
ret <vscale x 8 x float> %a
}
declare <vscale x 8 x float> @llvm.riscv.vfrsub.mask.nxv8f32.f32(
<vscale x 8 x float>,
<vscale x 8 x float>,
float,
<vscale x 8 x i1>,
i32);
define <vscale x 8 x float> @intrinsic_vfrsub_mask_vf_nxv8f32_nxv8f32_f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, float %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv8f32_nxv8f32_f32
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
; CHECK: vfrsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
%a = call <vscale x 8 x float> @llvm.riscv.vfrsub.mask.nxv8f32.f32(
<vscale x 8 x float> %0,
<vscale x 8 x float> %1,
float %2,
<vscale x 8 x i1> %3,
i32 %4)
ret <vscale x 8 x float> %a
}
declare <vscale x 16 x float> @llvm.riscv.vfrsub.nxv16f32.f32(
<vscale x 16 x float>,
float,
i32);
define <vscale x 16 x float> @intrinsic_vfrsub_vf_nxv16f32_nxv16f32_f32(<vscale x 16 x float> %0, float %1, i32 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfrsub_vf_nxv16f32_nxv16f32_f32
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
; CHECK: vfrsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
%a = call <vscale x 16 x float> @llvm.riscv.vfrsub.nxv16f32.f32(
<vscale x 16 x float> %0,
float %1,
i32 %2)
ret <vscale x 16 x float> %a
}
declare <vscale x 16 x float> @llvm.riscv.vfrsub.mask.nxv16f32.f32(
<vscale x 16 x float>,
<vscale x 16 x float>,
float,
<vscale x 16 x i1>,
i32);
define <vscale x 16 x float> @intrinsic_vfrsub_mask_vf_nxv16f32_nxv16f32_f32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, float %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv16f32_nxv16f32_f32
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
; CHECK: vfrsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
%a = call <vscale x 16 x float> @llvm.riscv.vfrsub.mask.nxv16f32.f32(
<vscale x 16 x float> %0,
<vscale x 16 x float> %1,
float %2,
<vscale x 16 x i1> %3,
i32 %4)
ret <vscale x 16 x float> %a
}

View File

@ -0,0 +1,602 @@
; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -mattr=+experimental-zfh \
; RUN: -mattr=+d -verify-machineinstrs \
; RUN: --riscv-no-aliases < %s | FileCheck %s
declare <vscale x 1 x half> @llvm.riscv.vfrsub.nxv1f16.f16(
<vscale x 1 x half>,
half,
i64);
define <vscale x 1 x half> @intrinsic_vfrsub_vf_nxv1f16_nxv1f16_f16(<vscale x 1 x half> %0, half %1, i64 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfrsub_vf_nxv1f16_nxv1f16_f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
; CHECK: vfrsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
%a = call <vscale x 1 x half> @llvm.riscv.vfrsub.nxv1f16.f16(
<vscale x 1 x half> %0,
half %1,
i64 %2)
ret <vscale x 1 x half> %a
}
declare <vscale x 1 x half> @llvm.riscv.vfrsub.mask.nxv1f16.f16(
<vscale x 1 x half>,
<vscale x 1 x half>,
half,
<vscale x 1 x i1>,
i64);
define <vscale x 1 x half> @intrinsic_vfrsub_mask_vf_nxv1f16_nxv1f16_f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, half %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv1f16_nxv1f16_f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
; CHECK: vfrsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
%a = call <vscale x 1 x half> @llvm.riscv.vfrsub.mask.nxv1f16.f16(
<vscale x 1 x half> %0,
<vscale x 1 x half> %1,
half %2,
<vscale x 1 x i1> %3,
i64 %4)
ret <vscale x 1 x half> %a
}
declare <vscale x 2 x half> @llvm.riscv.vfrsub.nxv2f16.f16(
<vscale x 2 x half>,
half,
i64);
define <vscale x 2 x half> @intrinsic_vfrsub_vf_nxv2f16_nxv2f16_f16(<vscale x 2 x half> %0, half %1, i64 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfrsub_vf_nxv2f16_nxv2f16_f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
; CHECK: vfrsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
%a = call <vscale x 2 x half> @llvm.riscv.vfrsub.nxv2f16.f16(
<vscale x 2 x half> %0,
half %1,
i64 %2)
ret <vscale x 2 x half> %a
}
declare <vscale x 2 x half> @llvm.riscv.vfrsub.mask.nxv2f16.f16(
<vscale x 2 x half>,
<vscale x 2 x half>,
half,
<vscale x 2 x i1>,
i64);
define <vscale x 2 x half> @intrinsic_vfrsub_mask_vf_nxv2f16_nxv2f16_f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, half %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv2f16_nxv2f16_f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
; CHECK: vfrsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
%a = call <vscale x 2 x half> @llvm.riscv.vfrsub.mask.nxv2f16.f16(
<vscale x 2 x half> %0,
<vscale x 2 x half> %1,
half %2,
<vscale x 2 x i1> %3,
i64 %4)
ret <vscale x 2 x half> %a
}
declare <vscale x 4 x half> @llvm.riscv.vfrsub.nxv4f16.f16(
<vscale x 4 x half>,
half,
i64);
define <vscale x 4 x half> @intrinsic_vfrsub_vf_nxv4f16_nxv4f16_f16(<vscale x 4 x half> %0, half %1, i64 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfrsub_vf_nxv4f16_nxv4f16_f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
; CHECK: vfrsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
%a = call <vscale x 4 x half> @llvm.riscv.vfrsub.nxv4f16.f16(
<vscale x 4 x half> %0,
half %1,
i64 %2)
ret <vscale x 4 x half> %a
}
declare <vscale x 4 x half> @llvm.riscv.vfrsub.mask.nxv4f16.f16(
<vscale x 4 x half>,
<vscale x 4 x half>,
half,
<vscale x 4 x i1>,
i64);
define <vscale x 4 x half> @intrinsic_vfrsub_mask_vf_nxv4f16_nxv4f16_f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, half %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv4f16_nxv4f16_f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
; CHECK: vfrsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
%a = call <vscale x 4 x half> @llvm.riscv.vfrsub.mask.nxv4f16.f16(
<vscale x 4 x half> %0,
<vscale x 4 x half> %1,
half %2,
<vscale x 4 x i1> %3,
i64 %4)
ret <vscale x 4 x half> %a
}
declare <vscale x 8 x half> @llvm.riscv.vfrsub.nxv8f16.f16(
<vscale x 8 x half>,
half,
i64);
define <vscale x 8 x half> @intrinsic_vfrsub_vf_nxv8f16_nxv8f16_f16(<vscale x 8 x half> %0, half %1, i64 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfrsub_vf_nxv8f16_nxv8f16_f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
; CHECK: vfrsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
%a = call <vscale x 8 x half> @llvm.riscv.vfrsub.nxv8f16.f16(
<vscale x 8 x half> %0,
half %1,
i64 %2)
ret <vscale x 8 x half> %a
}
declare <vscale x 8 x half> @llvm.riscv.vfrsub.mask.nxv8f16.f16(
<vscale x 8 x half>,
<vscale x 8 x half>,
half,
<vscale x 8 x i1>,
i64);
define <vscale x 8 x half> @intrinsic_vfrsub_mask_vf_nxv8f16_nxv8f16_f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, half %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv8f16_nxv8f16_f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
; CHECK: vfrsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
%a = call <vscale x 8 x half> @llvm.riscv.vfrsub.mask.nxv8f16.f16(
<vscale x 8 x half> %0,
<vscale x 8 x half> %1,
half %2,
<vscale x 8 x i1> %3,
i64 %4)
ret <vscale x 8 x half> %a
}
declare <vscale x 16 x half> @llvm.riscv.vfrsub.nxv16f16.f16(
<vscale x 16 x half>,
half,
i64);
define <vscale x 16 x half> @intrinsic_vfrsub_vf_nxv16f16_nxv16f16_f16(<vscale x 16 x half> %0, half %1, i64 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfrsub_vf_nxv16f16_nxv16f16_f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
; CHECK: vfrsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
%a = call <vscale x 16 x half> @llvm.riscv.vfrsub.nxv16f16.f16(
<vscale x 16 x half> %0,
half %1,
i64 %2)
ret <vscale x 16 x half> %a
}
declare <vscale x 16 x half> @llvm.riscv.vfrsub.mask.nxv16f16.f16(
<vscale x 16 x half>,
<vscale x 16 x half>,
half,
<vscale x 16 x i1>,
i64);
define <vscale x 16 x half> @intrinsic_vfrsub_mask_vf_nxv16f16_nxv16f16_f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, half %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv16f16_nxv16f16_f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
; CHECK: vfrsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
%a = call <vscale x 16 x half> @llvm.riscv.vfrsub.mask.nxv16f16.f16(
<vscale x 16 x half> %0,
<vscale x 16 x half> %1,
half %2,
<vscale x 16 x i1> %3,
i64 %4)
ret <vscale x 16 x half> %a
}
declare <vscale x 32 x half> @llvm.riscv.vfrsub.nxv32f16.f16(
<vscale x 32 x half>,
half,
i64);
define <vscale x 32 x half> @intrinsic_vfrsub_vf_nxv32f16_nxv32f16_f16(<vscale x 32 x half> %0, half %1, i64 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfrsub_vf_nxv32f16_nxv32f16_f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
; CHECK: vfrsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
%a = call <vscale x 32 x half> @llvm.riscv.vfrsub.nxv32f16.f16(
<vscale x 32 x half> %0,
half %1,
i64 %2)
ret <vscale x 32 x half> %a
}
declare <vscale x 32 x half> @llvm.riscv.vfrsub.mask.nxv32f16.f16(
<vscale x 32 x half>,
<vscale x 32 x half>,
half,
<vscale x 32 x i1>,
i64);
define <vscale x 32 x half> @intrinsic_vfrsub_mask_vf_nxv32f16_nxv32f16_f16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, half %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv32f16_nxv32f16_f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
; CHECK: vfrsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
%a = call <vscale x 32 x half> @llvm.riscv.vfrsub.mask.nxv32f16.f16(
<vscale x 32 x half> %0,
<vscale x 32 x half> %1,
half %2,
<vscale x 32 x i1> %3,
i64 %4)
ret <vscale x 32 x half> %a
}
declare <vscale x 1 x float> @llvm.riscv.vfrsub.nxv1f32.f32(
<vscale x 1 x float>,
float,
i64);
define <vscale x 1 x float> @intrinsic_vfrsub_vf_nxv1f32_nxv1f32_f32(<vscale x 1 x float> %0, float %1, i64 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfrsub_vf_nxv1f32_nxv1f32_f32
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
; CHECK: vfrsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
%a = call <vscale x 1 x float> @llvm.riscv.vfrsub.nxv1f32.f32(
<vscale x 1 x float> %0,
float %1,
i64 %2)
ret <vscale x 1 x float> %a
}
declare <vscale x 1 x float> @llvm.riscv.vfrsub.mask.nxv1f32.f32(
<vscale x 1 x float>,
<vscale x 1 x float>,
float,
<vscale x 1 x i1>,
i64);
define <vscale x 1 x float> @intrinsic_vfrsub_mask_vf_nxv1f32_nxv1f32_f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, float %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv1f32_nxv1f32_f32
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
; CHECK: vfrsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
%a = call <vscale x 1 x float> @llvm.riscv.vfrsub.mask.nxv1f32.f32(
<vscale x 1 x float> %0,
<vscale x 1 x float> %1,
float %2,
<vscale x 1 x i1> %3,
i64 %4)
ret <vscale x 1 x float> %a
}
declare <vscale x 2 x float> @llvm.riscv.vfrsub.nxv2f32.f32(
<vscale x 2 x float>,
float,
i64);
define <vscale x 2 x float> @intrinsic_vfrsub_vf_nxv2f32_nxv2f32_f32(<vscale x 2 x float> %0, float %1, i64 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfrsub_vf_nxv2f32_nxv2f32_f32
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
; CHECK: vfrsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
%a = call <vscale x 2 x float> @llvm.riscv.vfrsub.nxv2f32.f32(
<vscale x 2 x float> %0,
float %1,
i64 %2)
ret <vscale x 2 x float> %a
}
declare <vscale x 2 x float> @llvm.riscv.vfrsub.mask.nxv2f32.f32(
<vscale x 2 x float>,
<vscale x 2 x float>,
float,
<vscale x 2 x i1>,
i64);
define <vscale x 2 x float> @intrinsic_vfrsub_mask_vf_nxv2f32_nxv2f32_f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, float %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv2f32_nxv2f32_f32
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
; CHECK: vfrsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
%a = call <vscale x 2 x float> @llvm.riscv.vfrsub.mask.nxv2f32.f32(
<vscale x 2 x float> %0,
<vscale x 2 x float> %1,
float %2,
<vscale x 2 x i1> %3,
i64 %4)
ret <vscale x 2 x float> %a
}
declare <vscale x 4 x float> @llvm.riscv.vfrsub.nxv4f32.f32(
<vscale x 4 x float>,
float,
i64);
define <vscale x 4 x float> @intrinsic_vfrsub_vf_nxv4f32_nxv4f32_f32(<vscale x 4 x float> %0, float %1, i64 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfrsub_vf_nxv4f32_nxv4f32_f32
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
; CHECK: vfrsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
%a = call <vscale x 4 x float> @llvm.riscv.vfrsub.nxv4f32.f32(
<vscale x 4 x float> %0,
float %1,
i64 %2)
ret <vscale x 4 x float> %a
}
declare <vscale x 4 x float> @llvm.riscv.vfrsub.mask.nxv4f32.f32(
<vscale x 4 x float>,
<vscale x 4 x float>,
float,
<vscale x 4 x i1>,
i64);
define <vscale x 4 x float> @intrinsic_vfrsub_mask_vf_nxv4f32_nxv4f32_f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, float %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv4f32_nxv4f32_f32
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
; CHECK: vfrsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
%a = call <vscale x 4 x float> @llvm.riscv.vfrsub.mask.nxv4f32.f32(
<vscale x 4 x float> %0,
<vscale x 4 x float> %1,
float %2,
<vscale x 4 x i1> %3,
i64 %4)
ret <vscale x 4 x float> %a
}
declare <vscale x 8 x float> @llvm.riscv.vfrsub.nxv8f32.f32(
<vscale x 8 x float>,
float,
i64);
define <vscale x 8 x float> @intrinsic_vfrsub_vf_nxv8f32_nxv8f32_f32(<vscale x 8 x float> %0, float %1, i64 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfrsub_vf_nxv8f32_nxv8f32_f32
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
; CHECK: vfrsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
%a = call <vscale x 8 x float> @llvm.riscv.vfrsub.nxv8f32.f32(
<vscale x 8 x float> %0,
float %1,
i64 %2)
ret <vscale x 8 x float> %a
}
declare <vscale x 8 x float> @llvm.riscv.vfrsub.mask.nxv8f32.f32(
<vscale x 8 x float>,
<vscale x 8 x float>,
float,
<vscale x 8 x i1>,
i64);
define <vscale x 8 x float> @intrinsic_vfrsub_mask_vf_nxv8f32_nxv8f32_f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, float %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv8f32_nxv8f32_f32
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
; CHECK: vfrsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
%a = call <vscale x 8 x float> @llvm.riscv.vfrsub.mask.nxv8f32.f32(
<vscale x 8 x float> %0,
<vscale x 8 x float> %1,
float %2,
<vscale x 8 x i1> %3,
i64 %4)
ret <vscale x 8 x float> %a
}
declare <vscale x 16 x float> @llvm.riscv.vfrsub.nxv16f32.f32(
<vscale x 16 x float>,
float,
i64);
define <vscale x 16 x float> @intrinsic_vfrsub_vf_nxv16f32_nxv16f32_f32(<vscale x 16 x float> %0, float %1, i64 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfrsub_vf_nxv16f32_nxv16f32_f32
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
; CHECK: vfrsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
%a = call <vscale x 16 x float> @llvm.riscv.vfrsub.nxv16f32.f32(
<vscale x 16 x float> %0,
float %1,
i64 %2)
ret <vscale x 16 x float> %a
}
declare <vscale x 16 x float> @llvm.riscv.vfrsub.mask.nxv16f32.f32(
<vscale x 16 x float>,
<vscale x 16 x float>,
float,
<vscale x 16 x i1>,
i64);
define <vscale x 16 x float> @intrinsic_vfrsub_mask_vf_nxv16f32_nxv16f32_f32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, float %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv16f32_nxv16f32_f32
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
; CHECK: vfrsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
%a = call <vscale x 16 x float> @llvm.riscv.vfrsub.mask.nxv16f32.f32(
<vscale x 16 x float> %0,
<vscale x 16 x float> %1,
float %2,
<vscale x 16 x i1> %3,
i64 %4)
ret <vscale x 16 x float> %a
}
declare <vscale x 1 x double> @llvm.riscv.vfrsub.nxv1f64.f64(
<vscale x 1 x double>,
double,
i64);
define <vscale x 1 x double> @intrinsic_vfrsub_vf_nxv1f64_nxv1f64_f64(<vscale x 1 x double> %0, double %1, i64 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfrsub_vf_nxv1f64_nxv1f64_f64
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
; CHECK: vfrsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
%a = call <vscale x 1 x double> @llvm.riscv.vfrsub.nxv1f64.f64(
<vscale x 1 x double> %0,
double %1,
i64 %2)
ret <vscale x 1 x double> %a
}
declare <vscale x 1 x double> @llvm.riscv.vfrsub.mask.nxv1f64.f64(
<vscale x 1 x double>,
<vscale x 1 x double>,
double,
<vscale x 1 x i1>,
i64);
define <vscale x 1 x double> @intrinsic_vfrsub_mask_vf_nxv1f64_nxv1f64_f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, double %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv1f64_nxv1f64_f64
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu
; CHECK: vfrsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
%a = call <vscale x 1 x double> @llvm.riscv.vfrsub.mask.nxv1f64.f64(
<vscale x 1 x double> %0,
<vscale x 1 x double> %1,
double %2,
<vscale x 1 x i1> %3,
i64 %4)
ret <vscale x 1 x double> %a
}
declare <vscale x 2 x double> @llvm.riscv.vfrsub.nxv2f64.f64(
<vscale x 2 x double>,
double,
i64);
define <vscale x 2 x double> @intrinsic_vfrsub_vf_nxv2f64_nxv2f64_f64(<vscale x 2 x double> %0, double %1, i64 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfrsub_vf_nxv2f64_nxv2f64_f64
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
; CHECK: vfrsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
%a = call <vscale x 2 x double> @llvm.riscv.vfrsub.nxv2f64.f64(
<vscale x 2 x double> %0,
double %1,
i64 %2)
ret <vscale x 2 x double> %a
}
declare <vscale x 2 x double> @llvm.riscv.vfrsub.mask.nxv2f64.f64(
<vscale x 2 x double>,
<vscale x 2 x double>,
double,
<vscale x 2 x i1>,
i64);
define <vscale x 2 x double> @intrinsic_vfrsub_mask_vf_nxv2f64_nxv2f64_f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, double %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv2f64_nxv2f64_f64
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu
; CHECK: vfrsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
%a = call <vscale x 2 x double> @llvm.riscv.vfrsub.mask.nxv2f64.f64(
<vscale x 2 x double> %0,
<vscale x 2 x double> %1,
double %2,
<vscale x 2 x i1> %3,
i64 %4)
ret <vscale x 2 x double> %a
}
declare <vscale x 4 x double> @llvm.riscv.vfrsub.nxv4f64.f64(
<vscale x 4 x double>,
double,
i64);
define <vscale x 4 x double> @intrinsic_vfrsub_vf_nxv4f64_nxv4f64_f64(<vscale x 4 x double> %0, double %1, i64 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfrsub_vf_nxv4f64_nxv4f64_f64
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
; CHECK: vfrsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
%a = call <vscale x 4 x double> @llvm.riscv.vfrsub.nxv4f64.f64(
<vscale x 4 x double> %0,
double %1,
i64 %2)
ret <vscale x 4 x double> %a
}
declare <vscale x 4 x double> @llvm.riscv.vfrsub.mask.nxv4f64.f64(
<vscale x 4 x double>,
<vscale x 4 x double>,
double,
<vscale x 4 x i1>,
i64);
define <vscale x 4 x double> @intrinsic_vfrsub_mask_vf_nxv4f64_nxv4f64_f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, double %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv4f64_nxv4f64_f64
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu
; CHECK: vfrsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
%a = call <vscale x 4 x double> @llvm.riscv.vfrsub.mask.nxv4f64.f64(
<vscale x 4 x double> %0,
<vscale x 4 x double> %1,
double %2,
<vscale x 4 x i1> %3,
i64 %4)
ret <vscale x 4 x double> %a
}
declare <vscale x 8 x double> @llvm.riscv.vfrsub.nxv8f64.f64(
<vscale x 8 x double>,
double,
i64);
define <vscale x 8 x double> @intrinsic_vfrsub_vf_nxv8f64_nxv8f64_f64(<vscale x 8 x double> %0, double %1, i64 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfrsub_vf_nxv8f64_nxv8f64_f64
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
; CHECK: vfrsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
%a = call <vscale x 8 x double> @llvm.riscv.vfrsub.nxv8f64.f64(
<vscale x 8 x double> %0,
double %1,
i64 %2)
ret <vscale x 8 x double> %a
}
declare <vscale x 8 x double> @llvm.riscv.vfrsub.mask.nxv8f64.f64(
<vscale x 8 x double>,
<vscale x 8 x double>,
double,
<vscale x 8 x i1>,
i64);
define <vscale x 8 x double> @intrinsic_vfrsub_mask_vf_nxv8f64_nxv8f64_f64(<vscale x 8 x double> %0, <vscale x 8 x double> %1, double %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfrsub_mask_vf_nxv8f64_nxv8f64_f64
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu
; CHECK: vfrsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
%a = call <vscale x 8 x double> @llvm.riscv.vfrsub.mask.nxv8f64.f64(
<vscale x 8 x double> %0,
<vscale x 8 x double> %1,
double %2,
<vscale x 8 x i1> %3,
i64 %4)
ret <vscale x 8 x double> %a
}

View File

@ -0,0 +1,882 @@
; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -mattr=+experimental-zfh \
; RUN: -mattr=+f -verify-machineinstrs \
; RUN: --riscv-no-aliases < %s | FileCheck %s
declare <vscale x 1 x half> @llvm.riscv.vfsub.nxv1f16.nxv1f16(
<vscale x 1 x half>,
<vscale x 1 x half>,
i32);
define <vscale x 1 x half> @intrinsic_vfsub_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, i32 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfsub_vv_nxv1f16_nxv1f16_nxv1f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
; CHECK: vfsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
%a = call <vscale x 1 x half> @llvm.riscv.vfsub.nxv1f16.nxv1f16(
<vscale x 1 x half> %0,
<vscale x 1 x half> %1,
i32 %2)
ret <vscale x 1 x half> %a
}
declare <vscale x 1 x half> @llvm.riscv.vfsub.mask.nxv1f16.nxv1f16(
<vscale x 1 x half>,
<vscale x 1 x half>,
<vscale x 1 x half>,
<vscale x 1 x i1>,
i32);
define <vscale x 1 x half> @intrinsic_vfsub_mask_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv1f16_nxv1f16_nxv1f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
; CHECK: vfsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 1 x half> @llvm.riscv.vfsub.mask.nxv1f16.nxv1f16(
<vscale x 1 x half> %0,
<vscale x 1 x half> %1,
<vscale x 1 x half> %2,
<vscale x 1 x i1> %3,
i32 %4)
ret <vscale x 1 x half> %a
}
declare <vscale x 2 x half> @llvm.riscv.vfsub.nxv2f16.nxv2f16(
<vscale x 2 x half>,
<vscale x 2 x half>,
i32);
define <vscale x 2 x half> @intrinsic_vfsub_vv_nxv2f16_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, i32 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfsub_vv_nxv2f16_nxv2f16_nxv2f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
; CHECK: vfsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
%a = call <vscale x 2 x half> @llvm.riscv.vfsub.nxv2f16.nxv2f16(
<vscale x 2 x half> %0,
<vscale x 2 x half> %1,
i32 %2)
ret <vscale x 2 x half> %a
}
declare <vscale x 2 x half> @llvm.riscv.vfsub.mask.nxv2f16.nxv2f16(
<vscale x 2 x half>,
<vscale x 2 x half>,
<vscale x 2 x half>,
<vscale x 2 x i1>,
i32);
define <vscale x 2 x half> @intrinsic_vfsub_mask_vv_nxv2f16_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv2f16_nxv2f16_nxv2f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
; CHECK: vfsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 2 x half> @llvm.riscv.vfsub.mask.nxv2f16.nxv2f16(
<vscale x 2 x half> %0,
<vscale x 2 x half> %1,
<vscale x 2 x half> %2,
<vscale x 2 x i1> %3,
i32 %4)
ret <vscale x 2 x half> %a
}
declare <vscale x 4 x half> @llvm.riscv.vfsub.nxv4f16.nxv4f16(
<vscale x 4 x half>,
<vscale x 4 x half>,
i32);
define <vscale x 4 x half> @intrinsic_vfsub_vv_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, i32 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfsub_vv_nxv4f16_nxv4f16_nxv4f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
; CHECK: vfsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
%a = call <vscale x 4 x half> @llvm.riscv.vfsub.nxv4f16.nxv4f16(
<vscale x 4 x half> %0,
<vscale x 4 x half> %1,
i32 %2)
ret <vscale x 4 x half> %a
}
declare <vscale x 4 x half> @llvm.riscv.vfsub.mask.nxv4f16.nxv4f16(
<vscale x 4 x half>,
<vscale x 4 x half>,
<vscale x 4 x half>,
<vscale x 4 x i1>,
i32);
define <vscale x 4 x half> @intrinsic_vfsub_mask_vv_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv4f16_nxv4f16_nxv4f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
; CHECK: vfsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 4 x half> @llvm.riscv.vfsub.mask.nxv4f16.nxv4f16(
<vscale x 4 x half> %0,
<vscale x 4 x half> %1,
<vscale x 4 x half> %2,
<vscale x 4 x i1> %3,
i32 %4)
ret <vscale x 4 x half> %a
}
declare <vscale x 8 x half> @llvm.riscv.vfsub.nxv8f16.nxv8f16(
<vscale x 8 x half>,
<vscale x 8 x half>,
i32);
define <vscale x 8 x half> @intrinsic_vfsub_vv_nxv8f16_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, i32 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfsub_vv_nxv8f16_nxv8f16_nxv8f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
; CHECK: vfsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
%a = call <vscale x 8 x half> @llvm.riscv.vfsub.nxv8f16.nxv8f16(
<vscale x 8 x half> %0,
<vscale x 8 x half> %1,
i32 %2)
ret <vscale x 8 x half> %a
}
declare <vscale x 8 x half> @llvm.riscv.vfsub.mask.nxv8f16.nxv8f16(
<vscale x 8 x half>,
<vscale x 8 x half>,
<vscale x 8 x half>,
<vscale x 8 x i1>,
i32);
define <vscale x 8 x half> @intrinsic_vfsub_mask_vv_nxv8f16_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv8f16_nxv8f16_nxv8f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
; CHECK: vfsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 8 x half> @llvm.riscv.vfsub.mask.nxv8f16.nxv8f16(
<vscale x 8 x half> %0,
<vscale x 8 x half> %1,
<vscale x 8 x half> %2,
<vscale x 8 x i1> %3,
i32 %4)
ret <vscale x 8 x half> %a
}
declare <vscale x 16 x half> @llvm.riscv.vfsub.nxv16f16.nxv16f16(
<vscale x 16 x half>,
<vscale x 16 x half>,
i32);
define <vscale x 16 x half> @intrinsic_vfsub_vv_nxv16f16_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, i32 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfsub_vv_nxv16f16_nxv16f16_nxv16f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
; CHECK: vfsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
%a = call <vscale x 16 x half> @llvm.riscv.vfsub.nxv16f16.nxv16f16(
<vscale x 16 x half> %0,
<vscale x 16 x half> %1,
i32 %2)
ret <vscale x 16 x half> %a
}
declare <vscale x 16 x half> @llvm.riscv.vfsub.mask.nxv16f16.nxv16f16(
<vscale x 16 x half>,
<vscale x 16 x half>,
<vscale x 16 x half>,
<vscale x 16 x i1>,
i32);
define <vscale x 16 x half> @intrinsic_vfsub_mask_vv_nxv16f16_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv16f16_nxv16f16_nxv16f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
; CHECK: vfsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 16 x half> @llvm.riscv.vfsub.mask.nxv16f16.nxv16f16(
<vscale x 16 x half> %0,
<vscale x 16 x half> %1,
<vscale x 16 x half> %2,
<vscale x 16 x i1> %3,
i32 %4)
ret <vscale x 16 x half> %a
}
declare <vscale x 32 x half> @llvm.riscv.vfsub.nxv32f16.nxv32f16(
<vscale x 32 x half>,
<vscale x 32 x half>,
i32);
define <vscale x 32 x half> @intrinsic_vfsub_vv_nxv32f16_nxv32f16_nxv32f16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, i32 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfsub_vv_nxv32f16_nxv32f16_nxv32f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
; CHECK: vfsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
%a = call <vscale x 32 x half> @llvm.riscv.vfsub.nxv32f16.nxv32f16(
<vscale x 32 x half> %0,
<vscale x 32 x half> %1,
i32 %2)
ret <vscale x 32 x half> %a
}
declare <vscale x 32 x half> @llvm.riscv.vfsub.mask.nxv32f16.nxv32f16(
<vscale x 32 x half>,
<vscale x 32 x half>,
<vscale x 32 x half>,
<vscale x 32 x i1>,
i32);
define <vscale x 32 x half> @intrinsic_vfsub_mask_vv_nxv32f16_nxv32f16_nxv32f16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, <vscale x 32 x half> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv32f16_nxv32f16_nxv32f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
; CHECK: vfsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 32 x half> @llvm.riscv.vfsub.mask.nxv32f16.nxv32f16(
<vscale x 32 x half> %0,
<vscale x 32 x half> %1,
<vscale x 32 x half> %2,
<vscale x 32 x i1> %3,
i32 %4)
ret <vscale x 32 x half> %a
}
declare <vscale x 1 x float> @llvm.riscv.vfsub.nxv1f32.nxv1f32(
<vscale x 1 x float>,
<vscale x 1 x float>,
i32);
define <vscale x 1 x float> @intrinsic_vfsub_vv_nxv1f32_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, i32 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfsub_vv_nxv1f32_nxv1f32_nxv1f32
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
; CHECK: vfsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
%a = call <vscale x 1 x float> @llvm.riscv.vfsub.nxv1f32.nxv1f32(
<vscale x 1 x float> %0,
<vscale x 1 x float> %1,
i32 %2)
ret <vscale x 1 x float> %a
}
declare <vscale x 1 x float> @llvm.riscv.vfsub.mask.nxv1f32.nxv1f32(
<vscale x 1 x float>,
<vscale x 1 x float>,
<vscale x 1 x float>,
<vscale x 1 x i1>,
i32);
define <vscale x 1 x float> @intrinsic_vfsub_mask_vv_nxv1f32_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv1f32_nxv1f32_nxv1f32
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
; CHECK: vfsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 1 x float> @llvm.riscv.vfsub.mask.nxv1f32.nxv1f32(
<vscale x 1 x float> %0,
<vscale x 1 x float> %1,
<vscale x 1 x float> %2,
<vscale x 1 x i1> %3,
i32 %4)
ret <vscale x 1 x float> %a
}
declare <vscale x 2 x float> @llvm.riscv.vfsub.nxv2f32.nxv2f32(
<vscale x 2 x float>,
<vscale x 2 x float>,
i32);
define <vscale x 2 x float> @intrinsic_vfsub_vv_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, i32 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfsub_vv_nxv2f32_nxv2f32_nxv2f32
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
; CHECK: vfsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
%a = call <vscale x 2 x float> @llvm.riscv.vfsub.nxv2f32.nxv2f32(
<vscale x 2 x float> %0,
<vscale x 2 x float> %1,
i32 %2)
ret <vscale x 2 x float> %a
}
declare <vscale x 2 x float> @llvm.riscv.vfsub.mask.nxv2f32.nxv2f32(
<vscale x 2 x float>,
<vscale x 2 x float>,
<vscale x 2 x float>,
<vscale x 2 x i1>,
i32);
define <vscale x 2 x float> @intrinsic_vfsub_mask_vv_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv2f32_nxv2f32_nxv2f32
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
; CHECK: vfsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 2 x float> @llvm.riscv.vfsub.mask.nxv2f32.nxv2f32(
<vscale x 2 x float> %0,
<vscale x 2 x float> %1,
<vscale x 2 x float> %2,
<vscale x 2 x i1> %3,
i32 %4)
ret <vscale x 2 x float> %a
}
declare <vscale x 4 x float> @llvm.riscv.vfsub.nxv4f32.nxv4f32(
<vscale x 4 x float>,
<vscale x 4 x float>,
i32);
define <vscale x 4 x float> @intrinsic_vfsub_vv_nxv4f32_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, i32 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfsub_vv_nxv4f32_nxv4f32_nxv4f32
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
; CHECK: vfsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
%a = call <vscale x 4 x float> @llvm.riscv.vfsub.nxv4f32.nxv4f32(
<vscale x 4 x float> %0,
<vscale x 4 x float> %1,
i32 %2)
ret <vscale x 4 x float> %a
}
declare <vscale x 4 x float> @llvm.riscv.vfsub.mask.nxv4f32.nxv4f32(
<vscale x 4 x float>,
<vscale x 4 x float>,
<vscale x 4 x float>,
<vscale x 4 x i1>,
i32);
define <vscale x 4 x float> @intrinsic_vfsub_mask_vv_nxv4f32_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv4f32_nxv4f32_nxv4f32
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
; CHECK: vfsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 4 x float> @llvm.riscv.vfsub.mask.nxv4f32.nxv4f32(
<vscale x 4 x float> %0,
<vscale x 4 x float> %1,
<vscale x 4 x float> %2,
<vscale x 4 x i1> %3,
i32 %4)
ret <vscale x 4 x float> %a
}
declare <vscale x 8 x float> @llvm.riscv.vfsub.nxv8f32.nxv8f32(
<vscale x 8 x float>,
<vscale x 8 x float>,
i32);
define <vscale x 8 x float> @intrinsic_vfsub_vv_nxv8f32_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, i32 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfsub_vv_nxv8f32_nxv8f32_nxv8f32
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
; CHECK: vfsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
%a = call <vscale x 8 x float> @llvm.riscv.vfsub.nxv8f32.nxv8f32(
<vscale x 8 x float> %0,
<vscale x 8 x float> %1,
i32 %2)
ret <vscale x 8 x float> %a
}
declare <vscale x 8 x float> @llvm.riscv.vfsub.mask.nxv8f32.nxv8f32(
<vscale x 8 x float>,
<vscale x 8 x float>,
<vscale x 8 x float>,
<vscale x 8 x i1>,
i32);
define <vscale x 8 x float> @intrinsic_vfsub_mask_vv_nxv8f32_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv8f32_nxv8f32_nxv8f32
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
; CHECK: vfsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 8 x float> @llvm.riscv.vfsub.mask.nxv8f32.nxv8f32(
<vscale x 8 x float> %0,
<vscale x 8 x float> %1,
<vscale x 8 x float> %2,
<vscale x 8 x i1> %3,
i32 %4)
ret <vscale x 8 x float> %a
}
declare <vscale x 16 x float> @llvm.riscv.vfsub.nxv16f32.nxv16f32(
<vscale x 16 x float>,
<vscale x 16 x float>,
i32);
define <vscale x 16 x float> @intrinsic_vfsub_vv_nxv16f32_nxv16f32_nxv16f32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, i32 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfsub_vv_nxv16f32_nxv16f32_nxv16f32
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
; CHECK: vfsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
%a = call <vscale x 16 x float> @llvm.riscv.vfsub.nxv16f32.nxv16f32(
<vscale x 16 x float> %0,
<vscale x 16 x float> %1,
i32 %2)
ret <vscale x 16 x float> %a
}
declare <vscale x 16 x float> @llvm.riscv.vfsub.mask.nxv16f32.nxv16f32(
<vscale x 16 x float>,
<vscale x 16 x float>,
<vscale x 16 x float>,
<vscale x 16 x i1>,
i32);
define <vscale x 16 x float> @intrinsic_vfsub_mask_vv_nxv16f32_nxv16f32_nxv16f32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, <vscale x 16 x float> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfsub_mask_vv_nxv16f32_nxv16f32_nxv16f32
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
; CHECK: vfsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 16 x float> @llvm.riscv.vfsub.mask.nxv16f32.nxv16f32(
<vscale x 16 x float> %0,
<vscale x 16 x float> %1,
<vscale x 16 x float> %2,
<vscale x 16 x i1> %3,
i32 %4)
ret <vscale x 16 x float> %a
}
declare <vscale x 1 x half> @llvm.riscv.vfsub.nxv1f16.f16(
<vscale x 1 x half>,
half,
i32);
define <vscale x 1 x half> @intrinsic_vfsub_vf_nxv1f16_nxv1f16_f16(<vscale x 1 x half> %0, half %1, i32 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfsub_vf_nxv1f16_nxv1f16_f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
; CHECK: vfsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
%a = call <vscale x 1 x half> @llvm.riscv.vfsub.nxv1f16.f16(
<vscale x 1 x half> %0,
half %1,
i32 %2)
ret <vscale x 1 x half> %a
}
declare <vscale x 1 x half> @llvm.riscv.vfsub.mask.nxv1f16.f16(
<vscale x 1 x half>,
<vscale x 1 x half>,
half,
<vscale x 1 x i1>,
i32);
define <vscale x 1 x half> @intrinsic_vfsub_mask_vf_nxv1f16_nxv1f16_f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, half %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv1f16_nxv1f16_f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
; CHECK: vfsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
%a = call <vscale x 1 x half> @llvm.riscv.vfsub.mask.nxv1f16.f16(
<vscale x 1 x half> %0,
<vscale x 1 x half> %1,
half %2,
<vscale x 1 x i1> %3,
i32 %4)
ret <vscale x 1 x half> %a
}
declare <vscale x 2 x half> @llvm.riscv.vfsub.nxv2f16.f16(
<vscale x 2 x half>,
half,
i32);
define <vscale x 2 x half> @intrinsic_vfsub_vf_nxv2f16_nxv2f16_f16(<vscale x 2 x half> %0, half %1, i32 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfsub_vf_nxv2f16_nxv2f16_f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
; CHECK: vfsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
%a = call <vscale x 2 x half> @llvm.riscv.vfsub.nxv2f16.f16(
<vscale x 2 x half> %0,
half %1,
i32 %2)
ret <vscale x 2 x half> %a
}
declare <vscale x 2 x half> @llvm.riscv.vfsub.mask.nxv2f16.f16(
<vscale x 2 x half>,
<vscale x 2 x half>,
half,
<vscale x 2 x i1>,
i32);
define <vscale x 2 x half> @intrinsic_vfsub_mask_vf_nxv2f16_nxv2f16_f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, half %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv2f16_nxv2f16_f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
; CHECK: vfsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
%a = call <vscale x 2 x half> @llvm.riscv.vfsub.mask.nxv2f16.f16(
<vscale x 2 x half> %0,
<vscale x 2 x half> %1,
half %2,
<vscale x 2 x i1> %3,
i32 %4)
ret <vscale x 2 x half> %a
}
declare <vscale x 4 x half> @llvm.riscv.vfsub.nxv4f16.f16(
<vscale x 4 x half>,
half,
i32);
define <vscale x 4 x half> @intrinsic_vfsub_vf_nxv4f16_nxv4f16_f16(<vscale x 4 x half> %0, half %1, i32 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfsub_vf_nxv4f16_nxv4f16_f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
; CHECK: vfsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
%a = call <vscale x 4 x half> @llvm.riscv.vfsub.nxv4f16.f16(
<vscale x 4 x half> %0,
half %1,
i32 %2)
ret <vscale x 4 x half> %a
}
declare <vscale x 4 x half> @llvm.riscv.vfsub.mask.nxv4f16.f16(
<vscale x 4 x half>,
<vscale x 4 x half>,
half,
<vscale x 4 x i1>,
i32);
define <vscale x 4 x half> @intrinsic_vfsub_mask_vf_nxv4f16_nxv4f16_f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, half %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv4f16_nxv4f16_f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
; CHECK: vfsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
%a = call <vscale x 4 x half> @llvm.riscv.vfsub.mask.nxv4f16.f16(
<vscale x 4 x half> %0,
<vscale x 4 x half> %1,
half %2,
<vscale x 4 x i1> %3,
i32 %4)
ret <vscale x 4 x half> %a
}
declare <vscale x 8 x half> @llvm.riscv.vfsub.nxv8f16.f16(
<vscale x 8 x half>,
half,
i32);
define <vscale x 8 x half> @intrinsic_vfsub_vf_nxv8f16_nxv8f16_f16(<vscale x 8 x half> %0, half %1, i32 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfsub_vf_nxv8f16_nxv8f16_f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
; CHECK: vfsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
%a = call <vscale x 8 x half> @llvm.riscv.vfsub.nxv8f16.f16(
<vscale x 8 x half> %0,
half %1,
i32 %2)
ret <vscale x 8 x half> %a
}
declare <vscale x 8 x half> @llvm.riscv.vfsub.mask.nxv8f16.f16(
<vscale x 8 x half>,
<vscale x 8 x half>,
half,
<vscale x 8 x i1>,
i32);
define <vscale x 8 x half> @intrinsic_vfsub_mask_vf_nxv8f16_nxv8f16_f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, half %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv8f16_nxv8f16_f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
; CHECK: vfsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
%a = call <vscale x 8 x half> @llvm.riscv.vfsub.mask.nxv8f16.f16(
<vscale x 8 x half> %0,
<vscale x 8 x half> %1,
half %2,
<vscale x 8 x i1> %3,
i32 %4)
ret <vscale x 8 x half> %a
}
declare <vscale x 16 x half> @llvm.riscv.vfsub.nxv16f16.f16(
<vscale x 16 x half>,
half,
i32);
define <vscale x 16 x half> @intrinsic_vfsub_vf_nxv16f16_nxv16f16_f16(<vscale x 16 x half> %0, half %1, i32 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfsub_vf_nxv16f16_nxv16f16_f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
; CHECK: vfsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
%a = call <vscale x 16 x half> @llvm.riscv.vfsub.nxv16f16.f16(
<vscale x 16 x half> %0,
half %1,
i32 %2)
ret <vscale x 16 x half> %a
}
declare <vscale x 16 x half> @llvm.riscv.vfsub.mask.nxv16f16.f16(
<vscale x 16 x half>,
<vscale x 16 x half>,
half,
<vscale x 16 x i1>,
i32);
define <vscale x 16 x half> @intrinsic_vfsub_mask_vf_nxv16f16_nxv16f16_f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, half %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv16f16_nxv16f16_f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
; CHECK: vfsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
%a = call <vscale x 16 x half> @llvm.riscv.vfsub.mask.nxv16f16.f16(
<vscale x 16 x half> %0,
<vscale x 16 x half> %1,
half %2,
<vscale x 16 x i1> %3,
i32 %4)
ret <vscale x 16 x half> %a
}
declare <vscale x 32 x half> @llvm.riscv.vfsub.nxv32f16.f16(
<vscale x 32 x half>,
half,
i32);
define <vscale x 32 x half> @intrinsic_vfsub_vf_nxv32f16_nxv32f16_f16(<vscale x 32 x half> %0, half %1, i32 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfsub_vf_nxv32f16_nxv32f16_f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
; CHECK: vfsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
%a = call <vscale x 32 x half> @llvm.riscv.vfsub.nxv32f16.f16(
<vscale x 32 x half> %0,
half %1,
i32 %2)
ret <vscale x 32 x half> %a
}
declare <vscale x 32 x half> @llvm.riscv.vfsub.mask.nxv32f16.f16(
<vscale x 32 x half>,
<vscale x 32 x half>,
half,
<vscale x 32 x i1>,
i32);
define <vscale x 32 x half> @intrinsic_vfsub_mask_vf_nxv32f16_nxv32f16_f16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, half %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv32f16_nxv32f16_f16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu
; CHECK: vfsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
%a = call <vscale x 32 x half> @llvm.riscv.vfsub.mask.nxv32f16.f16(
<vscale x 32 x half> %0,
<vscale x 32 x half> %1,
half %2,
<vscale x 32 x i1> %3,
i32 %4)
ret <vscale x 32 x half> %a
}
declare <vscale x 1 x float> @llvm.riscv.vfsub.nxv1f32.f32(
<vscale x 1 x float>,
float,
i32);
define <vscale x 1 x float> @intrinsic_vfsub_vf_nxv1f32_nxv1f32_f32(<vscale x 1 x float> %0, float %1, i32 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfsub_vf_nxv1f32_nxv1f32_f32
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
; CHECK: vfsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
%a = call <vscale x 1 x float> @llvm.riscv.vfsub.nxv1f32.f32(
<vscale x 1 x float> %0,
float %1,
i32 %2)
ret <vscale x 1 x float> %a
}
declare <vscale x 1 x float> @llvm.riscv.vfsub.mask.nxv1f32.f32(
<vscale x 1 x float>,
<vscale x 1 x float>,
float,
<vscale x 1 x i1>,
i32);
define <vscale x 1 x float> @intrinsic_vfsub_mask_vf_nxv1f32_nxv1f32_f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, float %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv1f32_nxv1f32_f32
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu
; CHECK: vfsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
%a = call <vscale x 1 x float> @llvm.riscv.vfsub.mask.nxv1f32.f32(
<vscale x 1 x float> %0,
<vscale x 1 x float> %1,
float %2,
<vscale x 1 x i1> %3,
i32 %4)
ret <vscale x 1 x float> %a
}
declare <vscale x 2 x float> @llvm.riscv.vfsub.nxv2f32.f32(
<vscale x 2 x float>,
float,
i32);
define <vscale x 2 x float> @intrinsic_vfsub_vf_nxv2f32_nxv2f32_f32(<vscale x 2 x float> %0, float %1, i32 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfsub_vf_nxv2f32_nxv2f32_f32
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
; CHECK: vfsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
%a = call <vscale x 2 x float> @llvm.riscv.vfsub.nxv2f32.f32(
<vscale x 2 x float> %0,
float %1,
i32 %2)
ret <vscale x 2 x float> %a
}
declare <vscale x 2 x float> @llvm.riscv.vfsub.mask.nxv2f32.f32(
<vscale x 2 x float>,
<vscale x 2 x float>,
float,
<vscale x 2 x i1>,
i32);
define <vscale x 2 x float> @intrinsic_vfsub_mask_vf_nxv2f32_nxv2f32_f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, float %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv2f32_nxv2f32_f32
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu
; CHECK: vfsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
%a = call <vscale x 2 x float> @llvm.riscv.vfsub.mask.nxv2f32.f32(
<vscale x 2 x float> %0,
<vscale x 2 x float> %1,
float %2,
<vscale x 2 x i1> %3,
i32 %4)
ret <vscale x 2 x float> %a
}
declare <vscale x 4 x float> @llvm.riscv.vfsub.nxv4f32.f32(
<vscale x 4 x float>,
float,
i32);
define <vscale x 4 x float> @intrinsic_vfsub_vf_nxv4f32_nxv4f32_f32(<vscale x 4 x float> %0, float %1, i32 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfsub_vf_nxv4f32_nxv4f32_f32
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
; CHECK: vfsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
%a = call <vscale x 4 x float> @llvm.riscv.vfsub.nxv4f32.f32(
<vscale x 4 x float> %0,
float %1,
i32 %2)
ret <vscale x 4 x float> %a
}
declare <vscale x 4 x float> @llvm.riscv.vfsub.mask.nxv4f32.f32(
<vscale x 4 x float>,
<vscale x 4 x float>,
float,
<vscale x 4 x i1>,
i32);
define <vscale x 4 x float> @intrinsic_vfsub_mask_vf_nxv4f32_nxv4f32_f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, float %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv4f32_nxv4f32_f32
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu
; CHECK: vfsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
%a = call <vscale x 4 x float> @llvm.riscv.vfsub.mask.nxv4f32.f32(
<vscale x 4 x float> %0,
<vscale x 4 x float> %1,
float %2,
<vscale x 4 x i1> %3,
i32 %4)
ret <vscale x 4 x float> %a
}
declare <vscale x 8 x float> @llvm.riscv.vfsub.nxv8f32.f32(
<vscale x 8 x float>,
float,
i32);
define <vscale x 8 x float> @intrinsic_vfsub_vf_nxv8f32_nxv8f32_f32(<vscale x 8 x float> %0, float %1, i32 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfsub_vf_nxv8f32_nxv8f32_f32
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
; CHECK: vfsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
%a = call <vscale x 8 x float> @llvm.riscv.vfsub.nxv8f32.f32(
<vscale x 8 x float> %0,
float %1,
i32 %2)
ret <vscale x 8 x float> %a
}
declare <vscale x 8 x float> @llvm.riscv.vfsub.mask.nxv8f32.f32(
<vscale x 8 x float>,
<vscale x 8 x float>,
float,
<vscale x 8 x i1>,
i32);
define <vscale x 8 x float> @intrinsic_vfsub_mask_vf_nxv8f32_nxv8f32_f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, float %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv8f32_nxv8f32_f32
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu
; CHECK: vfsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
%a = call <vscale x 8 x float> @llvm.riscv.vfsub.mask.nxv8f32.f32(
<vscale x 8 x float> %0,
<vscale x 8 x float> %1,
float %2,
<vscale x 8 x i1> %3,
i32 %4)
ret <vscale x 8 x float> %a
}
declare <vscale x 16 x float> @llvm.riscv.vfsub.nxv16f32.f32(
<vscale x 16 x float>,
float,
i32);
define <vscale x 16 x float> @intrinsic_vfsub_vf_nxv16f32_nxv16f32_f32(<vscale x 16 x float> %0, float %1, i32 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfsub_vf_nxv16f32_nxv16f32_f32
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
; CHECK: vfsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}
%a = call <vscale x 16 x float> @llvm.riscv.vfsub.nxv16f32.f32(
<vscale x 16 x float> %0,
float %1,
i32 %2)
ret <vscale x 16 x float> %a
}
declare <vscale x 16 x float> @llvm.riscv.vfsub.mask.nxv16f32.f32(
<vscale x 16 x float>,
<vscale x 16 x float>,
float,
<vscale x 16 x i1>,
i32);
define <vscale x 16 x float> @intrinsic_vfsub_mask_vf_nxv16f32_nxv16f32_f32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, float %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfsub_mask_vf_nxv16f32_nxv16f32_f32
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu
; CHECK: vfsub.vf {{v[0-9]+}}, {{v[0-9]+}}, {{ft[0-9]+}}, v0.t
%a = call <vscale x 16 x float> @llvm.riscv.vfsub.mask.nxv16f32.f32(
<vscale x 16 x float> %0,
<vscale x 16 x float> %1,
float %2,
<vscale x 16 x i1> %3,
i32 %4)
ret <vscale x 16 x float> %a
}

File diff suppressed because it is too large Load Diff