[RISCV] Add the policy operand for nomask vector Multiply-Add IR intrinsics.

The goal is support tail and mask policy in RVV builtins.
We focus on IR part first.

The nomask vector Multiply-Add need a policy operand
because merge value could not be undef.

Reviewed By: monkchiang

Differential Revision: https://reviews.llvm.org/D119727
This commit is contained in:
Zakk Chen 2022-02-15 23:20:51 -08:00
parent fc1b21228e
commit ca78312407
69 changed files with 3691 additions and 2254 deletions

View File

@ -130,6 +130,14 @@
//
// There are a number of attributes that are used to constraint the number and
// shape of the builtins generated. Refer to the comments below for them.
class Policy<int val>{
int Value = val;
}
def NonePolicy : Policy<0>;
def HasPassthruOperand : Policy<1>;
def HasPolicyOperand : Policy<2>;
class RVVBuiltin<string suffix, string prototype, string type_range,
string mangled_suffix = ""> {
// Base name that will be prepended in __builtin_rvv_ and appended the
@ -177,8 +185,12 @@ class RVVBuiltin<string suffix, string prototype, string type_range,
// The policy argument is located at the last position.
bit HasPolicy = true;
// The nomask intrinsic IR have the passthru operand.
bit HasNoMaskPassThru = false;
// The policy scheme for nomask intrinsic IR.
// HasPassthruOperand: Has a passthru operand to decide tail policy. If it is
// undef, tail policy is tail agnostic, otherwise policy is tail undisturbed.
// HasPolicyOperand: Has a policy operand. 1 is tail agnostic and 0 is tail
// undisturbed.
Policy NoMaskPolicy = NonePolicy;
// This builtin supports non-masked function overloading api.
// All masked operations support overloading api.
@ -1586,7 +1598,7 @@ defm : RVVIndexedSegStore<"vsoxseg">;
// 12. Vector Integer Arithmetic Instructions
// 12.1. Vector Single-Width Integer Add and Subtract
let HasNoMaskPassThru = true in {
let NoMaskPolicy = HasPassthruOperand in {
defm vadd : RVVIntBinBuiltinSet;
defm vsub : RVVIntBinBuiltinSet;
defm vrsub : RVVOutOp1BuiltinSet<"vrsub", "csil",
@ -1597,7 +1609,7 @@ defm vneg_v : RVVPseudoUnaryBuiltin<"vrsub", "csil">;
// 12.2. Vector Widening Integer Add/Subtract
// Widening unsigned integer add/subtract, 2*SEW = SEW +/- SEW
let HasNoMaskPassThru = true in {
let NoMaskPolicy = HasPassthruOperand in {
defm vwaddu : RVVUnsignedWidenBinBuiltinSet;
defm vwsubu : RVVUnsignedWidenBinBuiltinSet;
// Widening signed integer add/subtract, 2*SEW = SEW +/- SEW
@ -1616,7 +1628,7 @@ defm vwcvt_x_x_v : RVVPseudoVWCVTBuiltin<"vwadd", "vwcvt_x", "csi",
[["w", "wv"]]>;
// 12.3. Vector Integer Extension
let HasNoMaskPassThru = true in {
let NoMaskPolicy = HasPassthruOperand in {
let Log2LMUL = [-3, -2, -1, 0, 1, 2] in {
def vsext_vf2 : RVVIntExt<"vsext", "w", "wv", "csi">;
def vzext_vf2 : RVVIntExt<"vzext", "Uw", "UwUv", "csi">;
@ -1633,7 +1645,7 @@ let Log2LMUL = [-3, -2, -1, 0] in {
// 12.4. Vector Integer Add-with-Carry / Subtract-with-Borrow Instructions
let HasMask = false, HasPolicy = false in {
let HasNoMaskPassThru = true in {
let NoMaskPolicy = HasPassthruOperand in {
defm vadc : RVVCarryinBuiltinSet;
defm vsbc : RVVCarryinBuiltinSet;
}
@ -1644,7 +1656,7 @@ let HasMask = false, HasPolicy = false in {
}
// 12.5. Vector Bitwise Logical Instructions
let HasNoMaskPassThru = true in {
let NoMaskPolicy = HasPassthruOperand in {
defm vand : RVVIntBinBuiltinSet;
defm vxor : RVVIntBinBuiltinSet;
defm vor : RVVIntBinBuiltinSet;
@ -1652,7 +1664,7 @@ defm vor : RVVIntBinBuiltinSet;
defm vnot_v : RVVPseudoVNotBuiltin<"vxor", "csil">;
// 12.6. Vector Single-Width Bit Shift Instructions
let HasNoMaskPassThru = true in {
let NoMaskPolicy = HasPassthruOperand in {
defm vsll : RVVShiftBuiltinSet;
defm vsrl : RVVUnsignedShiftBuiltinSet;
defm vsra : RVVSignedShiftBuiltinSet;
@ -1680,7 +1692,7 @@ defm vmsge : RVVSignedMaskOutBuiltinSet;
}
// 12.9. Vector Integer Min/Max Instructions
let HasNoMaskPassThru = true in {
let NoMaskPolicy = HasPassthruOperand in {
defm vminu : RVVUnsignedBinBuiltinSet;
defm vmin : RVVSignedBinBuiltinSet;
defm vmaxu : RVVUnsignedBinBuiltinSet;
@ -1704,7 +1716,7 @@ defm vrem : RVVSignedBinBuiltinSet;
}
// 12.12. Vector Widening Integer Multiply Instructions
let Log2LMUL = [-3, -2, -1, 0, 1, 2], HasNoMaskPassThru = true in {
let Log2LMUL = [-3, -2, -1, 0, 1, 2], NoMaskPolicy = HasPassthruOperand in {
defm vwmul : RVVOutOp0Op1BuiltinSet<"vwmul", "csi",
[["vv", "w", "wvv"],
["vx", "w", "wve"]]>;
@ -1717,6 +1729,7 @@ defm vwmulsu : RVVOutOp0Op1BuiltinSet<"vwmulsu", "csi",
}
// 12.13. Vector Single-Width Integer Multiply-Add Instructions
let NoMaskPolicy = HasPolicyOperand in {
defm vmacc : RVVIntTerBuiltinSet;
defm vnmsac : RVVIntTerBuiltinSet;
defm vmadd : RVVIntTerBuiltinSet;
@ -1737,6 +1750,7 @@ defm vwmaccsu : RVVOutOp1Op2BuiltinSet<"vwmaccsu", "csi",
defm vwmaccus : RVVOutOp1Op2BuiltinSet<"vwmaccus", "csi",
[["vx", "w", "wwUev"]]>;
}
}
// 12.15. Vector Integer Merge Instructions
// C/C++ Operand: (mask, op1, op2, vl), Intrinsic: (op1, op2, mask, vl)
@ -1755,7 +1769,7 @@ let HasMask = false, HasPolicy = false,
}
// 12.16. Vector Integer Move Instructions
let HasMask = false, HasNoMaskPassThru = true, HasPolicy = false in {
let HasMask = false, NoMaskPolicy = HasPassthruOperand, HasPolicy = false in {
let MangledName = "vmv_v" in {
defm vmv_v : RVVOutBuiltinSet<"vmv_v_v", "csil",
[["v", "Uv", "UvUv"]]>;
@ -1770,7 +1784,7 @@ let HasMask = false, HasNoMaskPassThru = true, HasPolicy = false in {
// 13. Vector Fixed-Point Arithmetic Instructions
// 13.1. Vector Single-Width Saturating Add and Subtract
let HasNoMaskPassThru = true in {
let NoMaskPolicy = HasPassthruOperand in {
defm vsaddu : RVVUnsignedBinBuiltinSet;
defm vsadd : RVVSignedBinBuiltinSet;
defm vssubu : RVVUnsignedBinBuiltinSet;
@ -1823,6 +1837,7 @@ let Log2LMUL = [-2, -1, 0, 1, 2] in {
}
// 14.6. Vector Single-Width Floating-Point Fused Multiply-Add Instructions
let NoMaskPolicy = HasPolicyOperand in {
defm vfmacc : RVVFloatingTerBuiltinSet;
defm vfnmacc : RVVFloatingTerBuiltinSet;
defm vfmsac : RVVFloatingTerBuiltinSet;
@ -1837,9 +1852,10 @@ defm vfwmacc : RVVFloatingWidenTerBuiltinSet;
defm vfwnmacc : RVVFloatingWidenTerBuiltinSet;
defm vfwmsac : RVVFloatingWidenTerBuiltinSet;
defm vfwnmsac : RVVFloatingWidenTerBuiltinSet;
}
// 14.8. Vector Floating-Point Square-Root Instruction
let HasNoMaskPassThru = true in {
let NoMaskPolicy = HasPassthruOperand in {
def vfsqrt : RVVFloatingUnaryVVBuiltin;
// 14.9. Vector Floating-Point Reciprocal Square-Root Estimate Instruction
@ -1871,7 +1887,7 @@ defm vmfge : RVVFloatingMaskOutBuiltinSet;
}
// 14.14. Vector Floating-Point Classify Instruction
let Name = "vfclass_v", HasNoMaskPassThru = true in
let Name = "vfclass_v", NoMaskPolicy = HasPassthruOperand in
def vfclass : RVVOp0Builtin<"Uv", "Uvv", "xfd">;
// 14.15. Vector Floating-Point Merge Instructio
@ -1890,13 +1906,13 @@ let HasMask = false, HasPolicy = false,
}
// 14.16. Vector Floating-Point Move Instruction
let HasMask = false, HasNoMaskPassThru = true, HasNoMaskedOverloaded = false,
HasPolicy = false in
let HasMask = false, NoMaskPolicy = HasPassthruOperand,
HasNoMaskedOverloaded = false, HasPolicy = false in
defm vfmv_v : RVVOutBuiltinSet<"vfmv_v_f", "xfd",
[["f", "v", "ve"]]>;
// 14.17. Single-Width Floating-Point/Integer Type-Convert Instructions
let HasNoMaskPassThru = true in {
let NoMaskPolicy = HasPassthruOperand in {
def vfcvt_xu_f_v : RVVConvToUnsignedBuiltin<"vfcvt_xu">;
def vfcvt_x_f_v : RVVConvToSignedBuiltin<"vfcvt_x">;
def vfcvt_rtz_xu_f_v : RVVConvToUnsignedBuiltin<"vfcvt_rtz_xu">;
@ -1992,7 +2008,7 @@ def vmsif : RVVMaskUnaryBuiltin;
// 16.6. vmsof.m set-only-first mask bit
def vmsof : RVVMaskUnaryBuiltin;
let HasNoMaskPassThru = true, HasNoMaskedOverloaded = false in {
let NoMaskPolicy = HasPassthruOperand, HasNoMaskedOverloaded = false in {
// 16.8. Vector Iota Instruction
defm viota : RVVOutBuiltinSet<"viota", "csil", [["m", "Uv", "Uvm"]]>;
@ -2033,7 +2049,7 @@ defm vslideup : RVVSlideBuiltinSet;
defm vslidedown : RVVSlideBuiltinSet;
// 17.3.3. Vector Slide1up Instructions
let HasNoMaskPassThru = true in {
let NoMaskPolicy = HasPassthruOperand in {
defm vslide1up : RVVSlideOneBuiltinSet;
defm vfslide1up : RVVFloatingBinVFBuiltinSet;

View File

@ -7,7 +7,7 @@
// CHECK-RV64-LABEL: @test_vfmacc_vv_f32mf2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmacc.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[ACC:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmacc.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[ACC:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfmacc_vv_f32mf2(vfloat32mf2_t acc, vfloat32mf2_t op1,
@ -17,7 +17,7 @@ vfloat32mf2_t test_vfmacc_vv_f32mf2(vfloat32mf2_t acc, vfloat32mf2_t op1,
// CHECK-RV64-LABEL: @test_vfmacc_vf_f32mf2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmacc.nxv1f32.f32.i64(<vscale x 1 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmacc.nxv1f32.f32.i64(<vscale x 1 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfmacc_vf_f32mf2(vfloat32mf2_t acc, float op1,
@ -27,7 +27,7 @@ vfloat32mf2_t test_vfmacc_vf_f32mf2(vfloat32mf2_t acc, float op1,
// CHECK-RV64-LABEL: @test_vfmacc_vv_f32m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmacc.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[ACC:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmacc.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[ACC:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfmacc_vv_f32m1(vfloat32m1_t acc, vfloat32m1_t op1,
@ -37,7 +37,7 @@ vfloat32m1_t test_vfmacc_vv_f32m1(vfloat32m1_t acc, vfloat32m1_t op1,
// CHECK-RV64-LABEL: @test_vfmacc_vf_f32m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmacc.nxv2f32.f32.i64(<vscale x 2 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmacc.nxv2f32.f32.i64(<vscale x 2 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfmacc_vf_f32m1(vfloat32m1_t acc, float op1, vfloat32m1_t op2,
@ -47,7 +47,7 @@ vfloat32m1_t test_vfmacc_vf_f32m1(vfloat32m1_t acc, float op1, vfloat32m1_t op2,
// CHECK-RV64-LABEL: @test_vfmacc_vv_f32m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmacc.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[ACC:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmacc.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[ACC:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfmacc_vv_f32m2(vfloat32m2_t acc, vfloat32m2_t op1,
@ -57,7 +57,7 @@ vfloat32m2_t test_vfmacc_vv_f32m2(vfloat32m2_t acc, vfloat32m2_t op1,
// CHECK-RV64-LABEL: @test_vfmacc_vf_f32m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmacc.nxv4f32.f32.i64(<vscale x 4 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmacc.nxv4f32.f32.i64(<vscale x 4 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfmacc_vf_f32m2(vfloat32m2_t acc, float op1, vfloat32m2_t op2,
@ -67,7 +67,7 @@ vfloat32m2_t test_vfmacc_vf_f32m2(vfloat32m2_t acc, float op1, vfloat32m2_t op2,
// CHECK-RV64-LABEL: @test_vfmacc_vv_f32m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmacc.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[ACC:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmacc.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[ACC:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfmacc_vv_f32m4(vfloat32m4_t acc, vfloat32m4_t op1,
@ -77,7 +77,7 @@ vfloat32m4_t test_vfmacc_vv_f32m4(vfloat32m4_t acc, vfloat32m4_t op1,
// CHECK-RV64-LABEL: @test_vfmacc_vf_f32m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmacc.nxv8f32.f32.i64(<vscale x 8 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmacc.nxv8f32.f32.i64(<vscale x 8 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfmacc_vf_f32m4(vfloat32m4_t acc, float op1, vfloat32m4_t op2,
@ -87,7 +87,7 @@ vfloat32m4_t test_vfmacc_vf_f32m4(vfloat32m4_t acc, float op1, vfloat32m4_t op2,
// CHECK-RV64-LABEL: @test_vfmacc_vv_f32m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmacc.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[ACC:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmacc.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[ACC:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfmacc_vv_f32m8(vfloat32m8_t acc, vfloat32m8_t op1,
@ -97,7 +97,7 @@ vfloat32m8_t test_vfmacc_vv_f32m8(vfloat32m8_t acc, vfloat32m8_t op1,
// CHECK-RV64-LABEL: @test_vfmacc_vf_f32m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmacc.nxv16f32.f32.i64(<vscale x 16 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmacc.nxv16f32.f32.i64(<vscale x 16 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfmacc_vf_f32m8(vfloat32m8_t acc, float op1, vfloat32m8_t op2,
@ -107,7 +107,7 @@ vfloat32m8_t test_vfmacc_vf_f32m8(vfloat32m8_t acc, float op1, vfloat32m8_t op2,
// CHECK-RV64-LABEL: @test_vfmacc_vv_f64m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmacc.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[ACC:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmacc.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[ACC:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfmacc_vv_f64m1(vfloat64m1_t acc, vfloat64m1_t op1,
@ -117,7 +117,7 @@ vfloat64m1_t test_vfmacc_vv_f64m1(vfloat64m1_t acc, vfloat64m1_t op1,
// CHECK-RV64-LABEL: @test_vfmacc_vf_f64m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmacc.nxv1f64.f64.i64(<vscale x 1 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmacc.nxv1f64.f64.i64(<vscale x 1 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfmacc_vf_f64m1(vfloat64m1_t acc, double op1,
@ -127,7 +127,7 @@ vfloat64m1_t test_vfmacc_vf_f64m1(vfloat64m1_t acc, double op1,
// CHECK-RV64-LABEL: @test_vfmacc_vv_f64m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmacc.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[ACC:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmacc.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[ACC:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfmacc_vv_f64m2(vfloat64m2_t acc, vfloat64m2_t op1,
@ -137,7 +137,7 @@ vfloat64m2_t test_vfmacc_vv_f64m2(vfloat64m2_t acc, vfloat64m2_t op1,
// CHECK-RV64-LABEL: @test_vfmacc_vf_f64m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmacc.nxv2f64.f64.i64(<vscale x 2 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmacc.nxv2f64.f64.i64(<vscale x 2 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfmacc_vf_f64m2(vfloat64m2_t acc, double op1,
@ -147,7 +147,7 @@ vfloat64m2_t test_vfmacc_vf_f64m2(vfloat64m2_t acc, double op1,
// CHECK-RV64-LABEL: @test_vfmacc_vv_f64m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmacc.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[ACC:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmacc.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[ACC:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfmacc_vv_f64m4(vfloat64m4_t acc, vfloat64m4_t op1,
@ -157,7 +157,7 @@ vfloat64m4_t test_vfmacc_vv_f64m4(vfloat64m4_t acc, vfloat64m4_t op1,
// CHECK-RV64-LABEL: @test_vfmacc_vf_f64m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmacc.nxv4f64.f64.i64(<vscale x 4 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmacc.nxv4f64.f64.i64(<vscale x 4 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfmacc_vf_f64m4(vfloat64m4_t acc, double op1,
@ -167,7 +167,7 @@ vfloat64m4_t test_vfmacc_vf_f64m4(vfloat64m4_t acc, double op1,
// CHECK-RV64-LABEL: @test_vfmacc_vv_f64m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmacc.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[ACC:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmacc.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[ACC:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfmacc_vv_f64m8(vfloat64m8_t acc, vfloat64m8_t op1,
@ -177,7 +177,7 @@ vfloat64m8_t test_vfmacc_vv_f64m8(vfloat64m8_t acc, vfloat64m8_t op1,
// CHECK-RV64-LABEL: @test_vfmacc_vf_f64m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmacc.nxv8f64.f64.i64(<vscale x 8 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmacc.nxv8f64.f64.i64(<vscale x 8 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfmacc_vf_f64m8(vfloat64m8_t acc, double op1,

View File

@ -7,7 +7,7 @@
// CHECK-RV64-LABEL: @test_vfmadd_vv_f32mf2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmadd.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[ACC:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmadd.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[ACC:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfmadd_vv_f32mf2(vfloat32mf2_t acc, vfloat32mf2_t op1,
@ -17,7 +17,7 @@ vfloat32mf2_t test_vfmadd_vv_f32mf2(vfloat32mf2_t acc, vfloat32mf2_t op1,
// CHECK-RV64-LABEL: @test_vfmadd_vf_f32mf2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmadd.nxv1f32.f32.i64(<vscale x 1 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmadd.nxv1f32.f32.i64(<vscale x 1 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfmadd_vf_f32mf2(vfloat32mf2_t acc, float op1,
@ -27,7 +27,7 @@ vfloat32mf2_t test_vfmadd_vf_f32mf2(vfloat32mf2_t acc, float op1,
// CHECK-RV64-LABEL: @test_vfmadd_vv_f32m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmadd.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[ACC:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmadd.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[ACC:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfmadd_vv_f32m1(vfloat32m1_t acc, vfloat32m1_t op1,
@ -37,7 +37,7 @@ vfloat32m1_t test_vfmadd_vv_f32m1(vfloat32m1_t acc, vfloat32m1_t op1,
// CHECK-RV64-LABEL: @test_vfmadd_vf_f32m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmadd.nxv2f32.f32.i64(<vscale x 2 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmadd.nxv2f32.f32.i64(<vscale x 2 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfmadd_vf_f32m1(vfloat32m1_t acc, float op1, vfloat32m1_t op2,
@ -47,7 +47,7 @@ vfloat32m1_t test_vfmadd_vf_f32m1(vfloat32m1_t acc, float op1, vfloat32m1_t op2,
// CHECK-RV64-LABEL: @test_vfmadd_vv_f32m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmadd.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[ACC:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmadd.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[ACC:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfmadd_vv_f32m2(vfloat32m2_t acc, vfloat32m2_t op1,
@ -57,7 +57,7 @@ vfloat32m2_t test_vfmadd_vv_f32m2(vfloat32m2_t acc, vfloat32m2_t op1,
// CHECK-RV64-LABEL: @test_vfmadd_vf_f32m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmadd.nxv4f32.f32.i64(<vscale x 4 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmadd.nxv4f32.f32.i64(<vscale x 4 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfmadd_vf_f32m2(vfloat32m2_t acc, float op1, vfloat32m2_t op2,
@ -67,7 +67,7 @@ vfloat32m2_t test_vfmadd_vf_f32m2(vfloat32m2_t acc, float op1, vfloat32m2_t op2,
// CHECK-RV64-LABEL: @test_vfmadd_vv_f32m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmadd.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[ACC:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmadd.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[ACC:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfmadd_vv_f32m4(vfloat32m4_t acc, vfloat32m4_t op1,
@ -77,7 +77,7 @@ vfloat32m4_t test_vfmadd_vv_f32m4(vfloat32m4_t acc, vfloat32m4_t op1,
// CHECK-RV64-LABEL: @test_vfmadd_vf_f32m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmadd.nxv8f32.f32.i64(<vscale x 8 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmadd.nxv8f32.f32.i64(<vscale x 8 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfmadd_vf_f32m4(vfloat32m4_t acc, float op1, vfloat32m4_t op2,
@ -87,7 +87,7 @@ vfloat32m4_t test_vfmadd_vf_f32m4(vfloat32m4_t acc, float op1, vfloat32m4_t op2,
// CHECK-RV64-LABEL: @test_vfmadd_vv_f32m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmadd.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[ACC:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmadd.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[ACC:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfmadd_vv_f32m8(vfloat32m8_t acc, vfloat32m8_t op1,
@ -97,7 +97,7 @@ vfloat32m8_t test_vfmadd_vv_f32m8(vfloat32m8_t acc, vfloat32m8_t op1,
// CHECK-RV64-LABEL: @test_vfmadd_vf_f32m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmadd.nxv16f32.f32.i64(<vscale x 16 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmadd.nxv16f32.f32.i64(<vscale x 16 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfmadd_vf_f32m8(vfloat32m8_t acc, float op1, vfloat32m8_t op2,
@ -107,7 +107,7 @@ vfloat32m8_t test_vfmadd_vf_f32m8(vfloat32m8_t acc, float op1, vfloat32m8_t op2,
// CHECK-RV64-LABEL: @test_vfmadd_vv_f64m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmadd.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[ACC:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmadd.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[ACC:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfmadd_vv_f64m1(vfloat64m1_t acc, vfloat64m1_t op1,
@ -117,7 +117,7 @@ vfloat64m1_t test_vfmadd_vv_f64m1(vfloat64m1_t acc, vfloat64m1_t op1,
// CHECK-RV64-LABEL: @test_vfmadd_vf_f64m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmadd.nxv1f64.f64.i64(<vscale x 1 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmadd.nxv1f64.f64.i64(<vscale x 1 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfmadd_vf_f64m1(vfloat64m1_t acc, double op1,
@ -127,7 +127,7 @@ vfloat64m1_t test_vfmadd_vf_f64m1(vfloat64m1_t acc, double op1,
// CHECK-RV64-LABEL: @test_vfmadd_vv_f64m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmadd.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[ACC:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmadd.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[ACC:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfmadd_vv_f64m2(vfloat64m2_t acc, vfloat64m2_t op1,
@ -137,7 +137,7 @@ vfloat64m2_t test_vfmadd_vv_f64m2(vfloat64m2_t acc, vfloat64m2_t op1,
// CHECK-RV64-LABEL: @test_vfmadd_vf_f64m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmadd.nxv2f64.f64.i64(<vscale x 2 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmadd.nxv2f64.f64.i64(<vscale x 2 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfmadd_vf_f64m2(vfloat64m2_t acc, double op1,
@ -147,7 +147,7 @@ vfloat64m2_t test_vfmadd_vf_f64m2(vfloat64m2_t acc, double op1,
// CHECK-RV64-LABEL: @test_vfmadd_vv_f64m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmadd.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[ACC:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmadd.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[ACC:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfmadd_vv_f64m4(vfloat64m4_t acc, vfloat64m4_t op1,
@ -157,7 +157,7 @@ vfloat64m4_t test_vfmadd_vv_f64m4(vfloat64m4_t acc, vfloat64m4_t op1,
// CHECK-RV64-LABEL: @test_vfmadd_vf_f64m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmadd.nxv4f64.f64.i64(<vscale x 4 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmadd.nxv4f64.f64.i64(<vscale x 4 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfmadd_vf_f64m4(vfloat64m4_t acc, double op1,
@ -167,7 +167,7 @@ vfloat64m4_t test_vfmadd_vf_f64m4(vfloat64m4_t acc, double op1,
// CHECK-RV64-LABEL: @test_vfmadd_vv_f64m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmadd.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[ACC:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmadd.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[ACC:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfmadd_vv_f64m8(vfloat64m8_t acc, vfloat64m8_t op1,
@ -177,7 +177,7 @@ vfloat64m8_t test_vfmadd_vv_f64m8(vfloat64m8_t acc, vfloat64m8_t op1,
// CHECK-RV64-LABEL: @test_vfmadd_vf_f64m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmadd.nxv8f64.f64.i64(<vscale x 8 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmadd.nxv8f64.f64.i64(<vscale x 8 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfmadd_vf_f64m8(vfloat64m8_t acc, double op1,

View File

@ -7,7 +7,7 @@
// CHECK-RV64-LABEL: @test_vfmsac_vv_f32mf2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmsac.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[ACC:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmsac.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[ACC:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfmsac_vv_f32mf2(vfloat32mf2_t acc, vfloat32mf2_t op1,
@ -17,7 +17,7 @@ vfloat32mf2_t test_vfmsac_vv_f32mf2(vfloat32mf2_t acc, vfloat32mf2_t op1,
// CHECK-RV64-LABEL: @test_vfmsac_vf_f32mf2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmsac.nxv1f32.f32.i64(<vscale x 1 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmsac.nxv1f32.f32.i64(<vscale x 1 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfmsac_vf_f32mf2(vfloat32mf2_t acc, float op1,
@ -27,7 +27,7 @@ vfloat32mf2_t test_vfmsac_vf_f32mf2(vfloat32mf2_t acc, float op1,
// CHECK-RV64-LABEL: @test_vfmsac_vv_f32m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmsac.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[ACC:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmsac.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[ACC:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfmsac_vv_f32m1(vfloat32m1_t acc, vfloat32m1_t op1,
@ -37,7 +37,7 @@ vfloat32m1_t test_vfmsac_vv_f32m1(vfloat32m1_t acc, vfloat32m1_t op1,
// CHECK-RV64-LABEL: @test_vfmsac_vf_f32m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmsac.nxv2f32.f32.i64(<vscale x 2 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmsac.nxv2f32.f32.i64(<vscale x 2 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfmsac_vf_f32m1(vfloat32m1_t acc, float op1, vfloat32m1_t op2,
@ -47,7 +47,7 @@ vfloat32m1_t test_vfmsac_vf_f32m1(vfloat32m1_t acc, float op1, vfloat32m1_t op2,
// CHECK-RV64-LABEL: @test_vfmsac_vv_f32m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmsac.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[ACC:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmsac.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[ACC:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfmsac_vv_f32m2(vfloat32m2_t acc, vfloat32m2_t op1,
@ -57,7 +57,7 @@ vfloat32m2_t test_vfmsac_vv_f32m2(vfloat32m2_t acc, vfloat32m2_t op1,
// CHECK-RV64-LABEL: @test_vfmsac_vf_f32m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmsac.nxv4f32.f32.i64(<vscale x 4 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmsac.nxv4f32.f32.i64(<vscale x 4 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfmsac_vf_f32m2(vfloat32m2_t acc, float op1, vfloat32m2_t op2,
@ -67,7 +67,7 @@ vfloat32m2_t test_vfmsac_vf_f32m2(vfloat32m2_t acc, float op1, vfloat32m2_t op2,
// CHECK-RV64-LABEL: @test_vfmsac_vv_f32m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmsac.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[ACC:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmsac.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[ACC:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfmsac_vv_f32m4(vfloat32m4_t acc, vfloat32m4_t op1,
@ -77,7 +77,7 @@ vfloat32m4_t test_vfmsac_vv_f32m4(vfloat32m4_t acc, vfloat32m4_t op1,
// CHECK-RV64-LABEL: @test_vfmsac_vf_f32m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmsac.nxv8f32.f32.i64(<vscale x 8 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmsac.nxv8f32.f32.i64(<vscale x 8 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfmsac_vf_f32m4(vfloat32m4_t acc, float op1, vfloat32m4_t op2,
@ -87,7 +87,7 @@ vfloat32m4_t test_vfmsac_vf_f32m4(vfloat32m4_t acc, float op1, vfloat32m4_t op2,
// CHECK-RV64-LABEL: @test_vfmsac_vv_f32m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmsac.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[ACC:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmsac.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[ACC:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfmsac_vv_f32m8(vfloat32m8_t acc, vfloat32m8_t op1,
@ -97,7 +97,7 @@ vfloat32m8_t test_vfmsac_vv_f32m8(vfloat32m8_t acc, vfloat32m8_t op1,
// CHECK-RV64-LABEL: @test_vfmsac_vf_f32m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmsac.nxv16f32.f32.i64(<vscale x 16 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmsac.nxv16f32.f32.i64(<vscale x 16 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfmsac_vf_f32m8(vfloat32m8_t acc, float op1, vfloat32m8_t op2,
@ -107,7 +107,7 @@ vfloat32m8_t test_vfmsac_vf_f32m8(vfloat32m8_t acc, float op1, vfloat32m8_t op2,
// CHECK-RV64-LABEL: @test_vfmsac_vv_f64m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmsac.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[ACC:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmsac.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[ACC:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfmsac_vv_f64m1(vfloat64m1_t acc, vfloat64m1_t op1,
@ -117,7 +117,7 @@ vfloat64m1_t test_vfmsac_vv_f64m1(vfloat64m1_t acc, vfloat64m1_t op1,
// CHECK-RV64-LABEL: @test_vfmsac_vf_f64m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmsac.nxv1f64.f64.i64(<vscale x 1 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmsac.nxv1f64.f64.i64(<vscale x 1 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfmsac_vf_f64m1(vfloat64m1_t acc, double op1,
@ -127,7 +127,7 @@ vfloat64m1_t test_vfmsac_vf_f64m1(vfloat64m1_t acc, double op1,
// CHECK-RV64-LABEL: @test_vfmsac_vv_f64m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmsac.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[ACC:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmsac.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[ACC:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfmsac_vv_f64m2(vfloat64m2_t acc, vfloat64m2_t op1,
@ -137,7 +137,7 @@ vfloat64m2_t test_vfmsac_vv_f64m2(vfloat64m2_t acc, vfloat64m2_t op1,
// CHECK-RV64-LABEL: @test_vfmsac_vf_f64m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmsac.nxv2f64.f64.i64(<vscale x 2 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmsac.nxv2f64.f64.i64(<vscale x 2 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfmsac_vf_f64m2(vfloat64m2_t acc, double op1,
@ -147,7 +147,7 @@ vfloat64m2_t test_vfmsac_vf_f64m2(vfloat64m2_t acc, double op1,
// CHECK-RV64-LABEL: @test_vfmsac_vv_f64m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmsac.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[ACC:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmsac.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[ACC:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfmsac_vv_f64m4(vfloat64m4_t acc, vfloat64m4_t op1,
@ -157,7 +157,7 @@ vfloat64m4_t test_vfmsac_vv_f64m4(vfloat64m4_t acc, vfloat64m4_t op1,
// CHECK-RV64-LABEL: @test_vfmsac_vf_f64m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmsac.nxv4f64.f64.i64(<vscale x 4 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmsac.nxv4f64.f64.i64(<vscale x 4 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfmsac_vf_f64m4(vfloat64m4_t acc, double op1,
@ -167,7 +167,7 @@ vfloat64m4_t test_vfmsac_vf_f64m4(vfloat64m4_t acc, double op1,
// CHECK-RV64-LABEL: @test_vfmsac_vv_f64m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmsac.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[ACC:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmsac.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[ACC:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfmsac_vv_f64m8(vfloat64m8_t acc, vfloat64m8_t op1,
@ -177,7 +177,7 @@ vfloat64m8_t test_vfmsac_vv_f64m8(vfloat64m8_t acc, vfloat64m8_t op1,
// CHECK-RV64-LABEL: @test_vfmsac_vf_f64m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmsac.nxv8f64.f64.i64(<vscale x 8 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmsac.nxv8f64.f64.i64(<vscale x 8 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfmsac_vf_f64m8(vfloat64m8_t acc, double op1,

View File

@ -7,7 +7,7 @@
// CHECK-RV64-LABEL: @test_vfmsub_vv_f32mf2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmsub.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[ACC:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmsub.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[ACC:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfmsub_vv_f32mf2(vfloat32mf2_t acc, vfloat32mf2_t op1,
@ -17,7 +17,7 @@ vfloat32mf2_t test_vfmsub_vv_f32mf2(vfloat32mf2_t acc, vfloat32mf2_t op1,
// CHECK-RV64-LABEL: @test_vfmsub_vf_f32mf2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmsub.nxv1f32.f32.i64(<vscale x 1 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmsub.nxv1f32.f32.i64(<vscale x 1 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfmsub_vf_f32mf2(vfloat32mf2_t acc, float op1,
@ -27,7 +27,7 @@ vfloat32mf2_t test_vfmsub_vf_f32mf2(vfloat32mf2_t acc, float op1,
// CHECK-RV64-LABEL: @test_vfmsub_vv_f32m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmsub.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[ACC:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmsub.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[ACC:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfmsub_vv_f32m1(vfloat32m1_t acc, vfloat32m1_t op1,
@ -37,7 +37,7 @@ vfloat32m1_t test_vfmsub_vv_f32m1(vfloat32m1_t acc, vfloat32m1_t op1,
// CHECK-RV64-LABEL: @test_vfmsub_vf_f32m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmsub.nxv2f32.f32.i64(<vscale x 2 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmsub.nxv2f32.f32.i64(<vscale x 2 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfmsub_vf_f32m1(vfloat32m1_t acc, float op1, vfloat32m1_t op2,
@ -47,7 +47,7 @@ vfloat32m1_t test_vfmsub_vf_f32m1(vfloat32m1_t acc, float op1, vfloat32m1_t op2,
// CHECK-RV64-LABEL: @test_vfmsub_vv_f32m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmsub.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[ACC:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmsub.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[ACC:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfmsub_vv_f32m2(vfloat32m2_t acc, vfloat32m2_t op1,
@ -57,7 +57,7 @@ vfloat32m2_t test_vfmsub_vv_f32m2(vfloat32m2_t acc, vfloat32m2_t op1,
// CHECK-RV64-LABEL: @test_vfmsub_vf_f32m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmsub.nxv4f32.f32.i64(<vscale x 4 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmsub.nxv4f32.f32.i64(<vscale x 4 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfmsub_vf_f32m2(vfloat32m2_t acc, float op1, vfloat32m2_t op2,
@ -67,7 +67,7 @@ vfloat32m2_t test_vfmsub_vf_f32m2(vfloat32m2_t acc, float op1, vfloat32m2_t op2,
// CHECK-RV64-LABEL: @test_vfmsub_vv_f32m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmsub.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[ACC:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmsub.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[ACC:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfmsub_vv_f32m4(vfloat32m4_t acc, vfloat32m4_t op1,
@ -77,7 +77,7 @@ vfloat32m4_t test_vfmsub_vv_f32m4(vfloat32m4_t acc, vfloat32m4_t op1,
// CHECK-RV64-LABEL: @test_vfmsub_vf_f32m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmsub.nxv8f32.f32.i64(<vscale x 8 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmsub.nxv8f32.f32.i64(<vscale x 8 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfmsub_vf_f32m4(vfloat32m4_t acc, float op1, vfloat32m4_t op2,
@ -87,7 +87,7 @@ vfloat32m4_t test_vfmsub_vf_f32m4(vfloat32m4_t acc, float op1, vfloat32m4_t op2,
// CHECK-RV64-LABEL: @test_vfmsub_vv_f32m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmsub.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[ACC:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmsub.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[ACC:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfmsub_vv_f32m8(vfloat32m8_t acc, vfloat32m8_t op1,
@ -97,7 +97,7 @@ vfloat32m8_t test_vfmsub_vv_f32m8(vfloat32m8_t acc, vfloat32m8_t op1,
// CHECK-RV64-LABEL: @test_vfmsub_vf_f32m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmsub.nxv16f32.f32.i64(<vscale x 16 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmsub.nxv16f32.f32.i64(<vscale x 16 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfmsub_vf_f32m8(vfloat32m8_t acc, float op1, vfloat32m8_t op2,
@ -107,7 +107,7 @@ vfloat32m8_t test_vfmsub_vf_f32m8(vfloat32m8_t acc, float op1, vfloat32m8_t op2,
// CHECK-RV64-LABEL: @test_vfmsub_vv_f64m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmsub.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[ACC:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmsub.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[ACC:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfmsub_vv_f64m1(vfloat64m1_t acc, vfloat64m1_t op1,
@ -117,7 +117,7 @@ vfloat64m1_t test_vfmsub_vv_f64m1(vfloat64m1_t acc, vfloat64m1_t op1,
// CHECK-RV64-LABEL: @test_vfmsub_vf_f64m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmsub.nxv1f64.f64.i64(<vscale x 1 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmsub.nxv1f64.f64.i64(<vscale x 1 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfmsub_vf_f64m1(vfloat64m1_t acc, double op1,
@ -127,7 +127,7 @@ vfloat64m1_t test_vfmsub_vf_f64m1(vfloat64m1_t acc, double op1,
// CHECK-RV64-LABEL: @test_vfmsub_vv_f64m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmsub.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[ACC:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmsub.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[ACC:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfmsub_vv_f64m2(vfloat64m2_t acc, vfloat64m2_t op1,
@ -137,7 +137,7 @@ vfloat64m2_t test_vfmsub_vv_f64m2(vfloat64m2_t acc, vfloat64m2_t op1,
// CHECK-RV64-LABEL: @test_vfmsub_vf_f64m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmsub.nxv2f64.f64.i64(<vscale x 2 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmsub.nxv2f64.f64.i64(<vscale x 2 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfmsub_vf_f64m2(vfloat64m2_t acc, double op1,
@ -147,7 +147,7 @@ vfloat64m2_t test_vfmsub_vf_f64m2(vfloat64m2_t acc, double op1,
// CHECK-RV64-LABEL: @test_vfmsub_vv_f64m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmsub.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[ACC:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmsub.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[ACC:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfmsub_vv_f64m4(vfloat64m4_t acc, vfloat64m4_t op1,
@ -157,7 +157,7 @@ vfloat64m4_t test_vfmsub_vv_f64m4(vfloat64m4_t acc, vfloat64m4_t op1,
// CHECK-RV64-LABEL: @test_vfmsub_vf_f64m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmsub.nxv4f64.f64.i64(<vscale x 4 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmsub.nxv4f64.f64.i64(<vscale x 4 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfmsub_vf_f64m4(vfloat64m4_t acc, double op1,
@ -167,7 +167,7 @@ vfloat64m4_t test_vfmsub_vf_f64m4(vfloat64m4_t acc, double op1,
// CHECK-RV64-LABEL: @test_vfmsub_vv_f64m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmsub.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[ACC:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmsub.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[ACC:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfmsub_vv_f64m8(vfloat64m8_t acc, vfloat64m8_t op1,
@ -177,7 +177,7 @@ vfloat64m8_t test_vfmsub_vv_f64m8(vfloat64m8_t acc, vfloat64m8_t op1,
// CHECK-RV64-LABEL: @test_vfmsub_vf_f64m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmsub.nxv8f64.f64.i64(<vscale x 8 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmsub.nxv8f64.f64.i64(<vscale x 8 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfmsub_vf_f64m8(vfloat64m8_t acc, double op1,

View File

@ -7,7 +7,7 @@
// CHECK-RV64-LABEL: @test_vfnmacc_vv_f32mf2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfnmacc.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[ACC:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfnmacc.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[ACC:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfnmacc_vv_f32mf2(vfloat32mf2_t acc, vfloat32mf2_t op1,
@ -17,7 +17,7 @@ vfloat32mf2_t test_vfnmacc_vv_f32mf2(vfloat32mf2_t acc, vfloat32mf2_t op1,
// CHECK-RV64-LABEL: @test_vfnmacc_vf_f32mf2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfnmacc.nxv1f32.f32.i64(<vscale x 1 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfnmacc.nxv1f32.f32.i64(<vscale x 1 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfnmacc_vf_f32mf2(vfloat32mf2_t acc, float op1,
@ -27,7 +27,7 @@ vfloat32mf2_t test_vfnmacc_vf_f32mf2(vfloat32mf2_t acc, float op1,
// CHECK-RV64-LABEL: @test_vfnmacc_vv_f32m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfnmacc.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[ACC:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfnmacc.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[ACC:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfnmacc_vv_f32m1(vfloat32m1_t acc, vfloat32m1_t op1,
@ -37,7 +37,7 @@ vfloat32m1_t test_vfnmacc_vv_f32m1(vfloat32m1_t acc, vfloat32m1_t op1,
// CHECK-RV64-LABEL: @test_vfnmacc_vf_f32m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfnmacc.nxv2f32.f32.i64(<vscale x 2 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfnmacc.nxv2f32.f32.i64(<vscale x 2 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfnmacc_vf_f32m1(vfloat32m1_t acc, float op1,
@ -47,7 +47,7 @@ vfloat32m1_t test_vfnmacc_vf_f32m1(vfloat32m1_t acc, float op1,
// CHECK-RV64-LABEL: @test_vfnmacc_vv_f32m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfnmacc.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[ACC:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfnmacc.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[ACC:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfnmacc_vv_f32m2(vfloat32m2_t acc, vfloat32m2_t op1,
@ -57,7 +57,7 @@ vfloat32m2_t test_vfnmacc_vv_f32m2(vfloat32m2_t acc, vfloat32m2_t op1,
// CHECK-RV64-LABEL: @test_vfnmacc_vf_f32m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfnmacc.nxv4f32.f32.i64(<vscale x 4 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfnmacc.nxv4f32.f32.i64(<vscale x 4 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfnmacc_vf_f32m2(vfloat32m2_t acc, float op1,
@ -67,7 +67,7 @@ vfloat32m2_t test_vfnmacc_vf_f32m2(vfloat32m2_t acc, float op1,
// CHECK-RV64-LABEL: @test_vfnmacc_vv_f32m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfnmacc.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[ACC:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfnmacc.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[ACC:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfnmacc_vv_f32m4(vfloat32m4_t acc, vfloat32m4_t op1,
@ -77,7 +77,7 @@ vfloat32m4_t test_vfnmacc_vv_f32m4(vfloat32m4_t acc, vfloat32m4_t op1,
// CHECK-RV64-LABEL: @test_vfnmacc_vf_f32m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfnmacc.nxv8f32.f32.i64(<vscale x 8 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfnmacc.nxv8f32.f32.i64(<vscale x 8 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfnmacc_vf_f32m4(vfloat32m4_t acc, float op1,
@ -87,7 +87,7 @@ vfloat32m4_t test_vfnmacc_vf_f32m4(vfloat32m4_t acc, float op1,
// CHECK-RV64-LABEL: @test_vfnmacc_vv_f32m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfnmacc.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[ACC:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfnmacc.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[ACC:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfnmacc_vv_f32m8(vfloat32m8_t acc, vfloat32m8_t op1,
@ -97,7 +97,7 @@ vfloat32m8_t test_vfnmacc_vv_f32m8(vfloat32m8_t acc, vfloat32m8_t op1,
// CHECK-RV64-LABEL: @test_vfnmacc_vf_f32m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfnmacc.nxv16f32.f32.i64(<vscale x 16 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfnmacc.nxv16f32.f32.i64(<vscale x 16 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfnmacc_vf_f32m8(vfloat32m8_t acc, float op1,
@ -107,7 +107,7 @@ vfloat32m8_t test_vfnmacc_vf_f32m8(vfloat32m8_t acc, float op1,
// CHECK-RV64-LABEL: @test_vfnmacc_vv_f64m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfnmacc.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[ACC:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfnmacc.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[ACC:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfnmacc_vv_f64m1(vfloat64m1_t acc, vfloat64m1_t op1,
@ -117,7 +117,7 @@ vfloat64m1_t test_vfnmacc_vv_f64m1(vfloat64m1_t acc, vfloat64m1_t op1,
// CHECK-RV64-LABEL: @test_vfnmacc_vf_f64m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfnmacc.nxv1f64.f64.i64(<vscale x 1 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfnmacc.nxv1f64.f64.i64(<vscale x 1 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfnmacc_vf_f64m1(vfloat64m1_t acc, double op1,
@ -127,7 +127,7 @@ vfloat64m1_t test_vfnmacc_vf_f64m1(vfloat64m1_t acc, double op1,
// CHECK-RV64-LABEL: @test_vfnmacc_vv_f64m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfnmacc.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[ACC:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfnmacc.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[ACC:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfnmacc_vv_f64m2(vfloat64m2_t acc, vfloat64m2_t op1,
@ -137,7 +137,7 @@ vfloat64m2_t test_vfnmacc_vv_f64m2(vfloat64m2_t acc, vfloat64m2_t op1,
// CHECK-RV64-LABEL: @test_vfnmacc_vf_f64m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfnmacc.nxv2f64.f64.i64(<vscale x 2 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfnmacc.nxv2f64.f64.i64(<vscale x 2 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfnmacc_vf_f64m2(vfloat64m2_t acc, double op1,
@ -147,7 +147,7 @@ vfloat64m2_t test_vfnmacc_vf_f64m2(vfloat64m2_t acc, double op1,
// CHECK-RV64-LABEL: @test_vfnmacc_vv_f64m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfnmacc.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[ACC:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfnmacc.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[ACC:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfnmacc_vv_f64m4(vfloat64m4_t acc, vfloat64m4_t op1,
@ -157,7 +157,7 @@ vfloat64m4_t test_vfnmacc_vv_f64m4(vfloat64m4_t acc, vfloat64m4_t op1,
// CHECK-RV64-LABEL: @test_vfnmacc_vf_f64m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfnmacc.nxv4f64.f64.i64(<vscale x 4 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfnmacc.nxv4f64.f64.i64(<vscale x 4 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfnmacc_vf_f64m4(vfloat64m4_t acc, double op1,
@ -167,7 +167,7 @@ vfloat64m4_t test_vfnmacc_vf_f64m4(vfloat64m4_t acc, double op1,
// CHECK-RV64-LABEL: @test_vfnmacc_vv_f64m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfnmacc.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[ACC:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfnmacc.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[ACC:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfnmacc_vv_f64m8(vfloat64m8_t acc, vfloat64m8_t op1,
@ -177,7 +177,7 @@ vfloat64m8_t test_vfnmacc_vv_f64m8(vfloat64m8_t acc, vfloat64m8_t op1,
// CHECK-RV64-LABEL: @test_vfnmacc_vf_f64m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfnmacc.nxv8f64.f64.i64(<vscale x 8 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfnmacc.nxv8f64.f64.i64(<vscale x 8 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfnmacc_vf_f64m8(vfloat64m8_t acc, double op1,

View File

@ -7,7 +7,7 @@
// CHECK-RV64-LABEL: @test_vfnmadd_vv_f32mf2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfnmadd.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[ACC:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfnmadd.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[ACC:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfnmadd_vv_f32mf2(vfloat32mf2_t acc, vfloat32mf2_t op1,
@ -17,7 +17,7 @@ vfloat32mf2_t test_vfnmadd_vv_f32mf2(vfloat32mf2_t acc, vfloat32mf2_t op1,
// CHECK-RV64-LABEL: @test_vfnmadd_vf_f32mf2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfnmadd.nxv1f32.f32.i64(<vscale x 1 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfnmadd.nxv1f32.f32.i64(<vscale x 1 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfnmadd_vf_f32mf2(vfloat32mf2_t acc, float op1,
@ -27,7 +27,7 @@ vfloat32mf2_t test_vfnmadd_vf_f32mf2(vfloat32mf2_t acc, float op1,
// CHECK-RV64-LABEL: @test_vfnmadd_vv_f32m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfnmadd.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[ACC:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfnmadd.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[ACC:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfnmadd_vv_f32m1(vfloat32m1_t acc, vfloat32m1_t op1,
@ -37,7 +37,7 @@ vfloat32m1_t test_vfnmadd_vv_f32m1(vfloat32m1_t acc, vfloat32m1_t op1,
// CHECK-RV64-LABEL: @test_vfnmadd_vf_f32m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfnmadd.nxv2f32.f32.i64(<vscale x 2 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfnmadd.nxv2f32.f32.i64(<vscale x 2 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfnmadd_vf_f32m1(vfloat32m1_t acc, float op1,
@ -47,7 +47,7 @@ vfloat32m1_t test_vfnmadd_vf_f32m1(vfloat32m1_t acc, float op1,
// CHECK-RV64-LABEL: @test_vfnmadd_vv_f32m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfnmadd.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[ACC:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfnmadd.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[ACC:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfnmadd_vv_f32m2(vfloat32m2_t acc, vfloat32m2_t op1,
@ -57,7 +57,7 @@ vfloat32m2_t test_vfnmadd_vv_f32m2(vfloat32m2_t acc, vfloat32m2_t op1,
// CHECK-RV64-LABEL: @test_vfnmadd_vf_f32m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfnmadd.nxv4f32.f32.i64(<vscale x 4 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfnmadd.nxv4f32.f32.i64(<vscale x 4 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfnmadd_vf_f32m2(vfloat32m2_t acc, float op1,
@ -67,7 +67,7 @@ vfloat32m2_t test_vfnmadd_vf_f32m2(vfloat32m2_t acc, float op1,
// CHECK-RV64-LABEL: @test_vfnmadd_vv_f32m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfnmadd.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[ACC:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfnmadd.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[ACC:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfnmadd_vv_f32m4(vfloat32m4_t acc, vfloat32m4_t op1,
@ -77,7 +77,7 @@ vfloat32m4_t test_vfnmadd_vv_f32m4(vfloat32m4_t acc, vfloat32m4_t op1,
// CHECK-RV64-LABEL: @test_vfnmadd_vf_f32m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfnmadd.nxv8f32.f32.i64(<vscale x 8 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfnmadd.nxv8f32.f32.i64(<vscale x 8 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfnmadd_vf_f32m4(vfloat32m4_t acc, float op1,
@ -87,7 +87,7 @@ vfloat32m4_t test_vfnmadd_vf_f32m4(vfloat32m4_t acc, float op1,
// CHECK-RV64-LABEL: @test_vfnmadd_vv_f32m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfnmadd.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[ACC:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfnmadd.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[ACC:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfnmadd_vv_f32m8(vfloat32m8_t acc, vfloat32m8_t op1,
@ -97,7 +97,7 @@ vfloat32m8_t test_vfnmadd_vv_f32m8(vfloat32m8_t acc, vfloat32m8_t op1,
// CHECK-RV64-LABEL: @test_vfnmadd_vf_f32m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfnmadd.nxv16f32.f32.i64(<vscale x 16 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfnmadd.nxv16f32.f32.i64(<vscale x 16 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfnmadd_vf_f32m8(vfloat32m8_t acc, float op1,
@ -107,7 +107,7 @@ vfloat32m8_t test_vfnmadd_vf_f32m8(vfloat32m8_t acc, float op1,
// CHECK-RV64-LABEL: @test_vfnmadd_vv_f64m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfnmadd.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[ACC:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfnmadd.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[ACC:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfnmadd_vv_f64m1(vfloat64m1_t acc, vfloat64m1_t op1,
@ -117,7 +117,7 @@ vfloat64m1_t test_vfnmadd_vv_f64m1(vfloat64m1_t acc, vfloat64m1_t op1,
// CHECK-RV64-LABEL: @test_vfnmadd_vf_f64m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfnmadd.nxv1f64.f64.i64(<vscale x 1 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfnmadd.nxv1f64.f64.i64(<vscale x 1 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfnmadd_vf_f64m1(vfloat64m1_t acc, double op1,
@ -127,7 +127,7 @@ vfloat64m1_t test_vfnmadd_vf_f64m1(vfloat64m1_t acc, double op1,
// CHECK-RV64-LABEL: @test_vfnmadd_vv_f64m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfnmadd.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[ACC:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfnmadd.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[ACC:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfnmadd_vv_f64m2(vfloat64m2_t acc, vfloat64m2_t op1,
@ -137,7 +137,7 @@ vfloat64m2_t test_vfnmadd_vv_f64m2(vfloat64m2_t acc, vfloat64m2_t op1,
// CHECK-RV64-LABEL: @test_vfnmadd_vf_f64m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfnmadd.nxv2f64.f64.i64(<vscale x 2 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfnmadd.nxv2f64.f64.i64(<vscale x 2 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfnmadd_vf_f64m2(vfloat64m2_t acc, double op1,
@ -147,7 +147,7 @@ vfloat64m2_t test_vfnmadd_vf_f64m2(vfloat64m2_t acc, double op1,
// CHECK-RV64-LABEL: @test_vfnmadd_vv_f64m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfnmadd.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[ACC:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfnmadd.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[ACC:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfnmadd_vv_f64m4(vfloat64m4_t acc, vfloat64m4_t op1,
@ -157,7 +157,7 @@ vfloat64m4_t test_vfnmadd_vv_f64m4(vfloat64m4_t acc, vfloat64m4_t op1,
// CHECK-RV64-LABEL: @test_vfnmadd_vf_f64m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfnmadd.nxv4f64.f64.i64(<vscale x 4 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfnmadd.nxv4f64.f64.i64(<vscale x 4 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfnmadd_vf_f64m4(vfloat64m4_t acc, double op1,
@ -167,7 +167,7 @@ vfloat64m4_t test_vfnmadd_vf_f64m4(vfloat64m4_t acc, double op1,
// CHECK-RV64-LABEL: @test_vfnmadd_vv_f64m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfnmadd.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[ACC:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfnmadd.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[ACC:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfnmadd_vv_f64m8(vfloat64m8_t acc, vfloat64m8_t op1,
@ -177,7 +177,7 @@ vfloat64m8_t test_vfnmadd_vv_f64m8(vfloat64m8_t acc, vfloat64m8_t op1,
// CHECK-RV64-LABEL: @test_vfnmadd_vf_f64m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfnmadd.nxv8f64.f64.i64(<vscale x 8 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfnmadd.nxv8f64.f64.i64(<vscale x 8 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfnmadd_vf_f64m8(vfloat64m8_t acc, double op1,

View File

@ -7,7 +7,7 @@
// CHECK-RV64-LABEL: @test_vfnmsac_vv_f32mf2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfnmsac.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[ACC:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfnmsac.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[ACC:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfnmsac_vv_f32mf2(vfloat32mf2_t acc, vfloat32mf2_t op1,
@ -17,7 +17,7 @@ vfloat32mf2_t test_vfnmsac_vv_f32mf2(vfloat32mf2_t acc, vfloat32mf2_t op1,
// CHECK-RV64-LABEL: @test_vfnmsac_vf_f32mf2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfnmsac.nxv1f32.f32.i64(<vscale x 1 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfnmsac.nxv1f32.f32.i64(<vscale x 1 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfnmsac_vf_f32mf2(vfloat32mf2_t acc, float op1,
@ -27,7 +27,7 @@ vfloat32mf2_t test_vfnmsac_vf_f32mf2(vfloat32mf2_t acc, float op1,
// CHECK-RV64-LABEL: @test_vfnmsac_vv_f32m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfnmsac.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[ACC:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfnmsac.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[ACC:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfnmsac_vv_f32m1(vfloat32m1_t acc, vfloat32m1_t op1,
@ -37,7 +37,7 @@ vfloat32m1_t test_vfnmsac_vv_f32m1(vfloat32m1_t acc, vfloat32m1_t op1,
// CHECK-RV64-LABEL: @test_vfnmsac_vf_f32m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfnmsac.nxv2f32.f32.i64(<vscale x 2 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfnmsac.nxv2f32.f32.i64(<vscale x 2 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfnmsac_vf_f32m1(vfloat32m1_t acc, float op1,
@ -47,7 +47,7 @@ vfloat32m1_t test_vfnmsac_vf_f32m1(vfloat32m1_t acc, float op1,
// CHECK-RV64-LABEL: @test_vfnmsac_vv_f32m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfnmsac.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[ACC:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfnmsac.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[ACC:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfnmsac_vv_f32m2(vfloat32m2_t acc, vfloat32m2_t op1,
@ -57,7 +57,7 @@ vfloat32m2_t test_vfnmsac_vv_f32m2(vfloat32m2_t acc, vfloat32m2_t op1,
// CHECK-RV64-LABEL: @test_vfnmsac_vf_f32m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfnmsac.nxv4f32.f32.i64(<vscale x 4 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfnmsac.nxv4f32.f32.i64(<vscale x 4 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfnmsac_vf_f32m2(vfloat32m2_t acc, float op1,
@ -67,7 +67,7 @@ vfloat32m2_t test_vfnmsac_vf_f32m2(vfloat32m2_t acc, float op1,
// CHECK-RV64-LABEL: @test_vfnmsac_vv_f32m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfnmsac.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[ACC:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfnmsac.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[ACC:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfnmsac_vv_f32m4(vfloat32m4_t acc, vfloat32m4_t op1,
@ -77,7 +77,7 @@ vfloat32m4_t test_vfnmsac_vv_f32m4(vfloat32m4_t acc, vfloat32m4_t op1,
// CHECK-RV64-LABEL: @test_vfnmsac_vf_f32m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfnmsac.nxv8f32.f32.i64(<vscale x 8 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfnmsac.nxv8f32.f32.i64(<vscale x 8 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfnmsac_vf_f32m4(vfloat32m4_t acc, float op1,
@ -87,7 +87,7 @@ vfloat32m4_t test_vfnmsac_vf_f32m4(vfloat32m4_t acc, float op1,
// CHECK-RV64-LABEL: @test_vfnmsac_vv_f32m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfnmsac.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[ACC:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfnmsac.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[ACC:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfnmsac_vv_f32m8(vfloat32m8_t acc, vfloat32m8_t op1,
@ -97,7 +97,7 @@ vfloat32m8_t test_vfnmsac_vv_f32m8(vfloat32m8_t acc, vfloat32m8_t op1,
// CHECK-RV64-LABEL: @test_vfnmsac_vf_f32m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfnmsac.nxv16f32.f32.i64(<vscale x 16 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfnmsac.nxv16f32.f32.i64(<vscale x 16 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfnmsac_vf_f32m8(vfloat32m8_t acc, float op1,
@ -107,7 +107,7 @@ vfloat32m8_t test_vfnmsac_vf_f32m8(vfloat32m8_t acc, float op1,
// CHECK-RV64-LABEL: @test_vfnmsac_vv_f64m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfnmsac.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[ACC:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfnmsac.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[ACC:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfnmsac_vv_f64m1(vfloat64m1_t acc, vfloat64m1_t op1,
@ -117,7 +117,7 @@ vfloat64m1_t test_vfnmsac_vv_f64m1(vfloat64m1_t acc, vfloat64m1_t op1,
// CHECK-RV64-LABEL: @test_vfnmsac_vf_f64m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfnmsac.nxv1f64.f64.i64(<vscale x 1 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfnmsac.nxv1f64.f64.i64(<vscale x 1 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfnmsac_vf_f64m1(vfloat64m1_t acc, double op1,
@ -127,7 +127,7 @@ vfloat64m1_t test_vfnmsac_vf_f64m1(vfloat64m1_t acc, double op1,
// CHECK-RV64-LABEL: @test_vfnmsac_vv_f64m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfnmsac.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[ACC:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfnmsac.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[ACC:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfnmsac_vv_f64m2(vfloat64m2_t acc, vfloat64m2_t op1,
@ -137,7 +137,7 @@ vfloat64m2_t test_vfnmsac_vv_f64m2(vfloat64m2_t acc, vfloat64m2_t op1,
// CHECK-RV64-LABEL: @test_vfnmsac_vf_f64m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfnmsac.nxv2f64.f64.i64(<vscale x 2 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfnmsac.nxv2f64.f64.i64(<vscale x 2 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfnmsac_vf_f64m2(vfloat64m2_t acc, double op1,
@ -147,7 +147,7 @@ vfloat64m2_t test_vfnmsac_vf_f64m2(vfloat64m2_t acc, double op1,
// CHECK-RV64-LABEL: @test_vfnmsac_vv_f64m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfnmsac.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[ACC:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfnmsac.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[ACC:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfnmsac_vv_f64m4(vfloat64m4_t acc, vfloat64m4_t op1,
@ -157,7 +157,7 @@ vfloat64m4_t test_vfnmsac_vv_f64m4(vfloat64m4_t acc, vfloat64m4_t op1,
// CHECK-RV64-LABEL: @test_vfnmsac_vf_f64m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfnmsac.nxv4f64.f64.i64(<vscale x 4 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfnmsac.nxv4f64.f64.i64(<vscale x 4 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfnmsac_vf_f64m4(vfloat64m4_t acc, double op1,
@ -167,7 +167,7 @@ vfloat64m4_t test_vfnmsac_vf_f64m4(vfloat64m4_t acc, double op1,
// CHECK-RV64-LABEL: @test_vfnmsac_vv_f64m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfnmsac.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[ACC:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfnmsac.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[ACC:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfnmsac_vv_f64m8(vfloat64m8_t acc, vfloat64m8_t op1,
@ -177,7 +177,7 @@ vfloat64m8_t test_vfnmsac_vv_f64m8(vfloat64m8_t acc, vfloat64m8_t op1,
// CHECK-RV64-LABEL: @test_vfnmsac_vf_f64m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfnmsac.nxv8f64.f64.i64(<vscale x 8 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfnmsac.nxv8f64.f64.i64(<vscale x 8 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfnmsac_vf_f64m8(vfloat64m8_t acc, double op1,

View File

@ -7,7 +7,7 @@
// CHECK-RV64-LABEL: @test_vfnmsub_vv_f32mf2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfnmsub.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[ACC:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfnmsub.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[ACC:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfnmsub_vv_f32mf2(vfloat32mf2_t acc, vfloat32mf2_t op1,
@ -17,7 +17,7 @@ vfloat32mf2_t test_vfnmsub_vv_f32mf2(vfloat32mf2_t acc, vfloat32mf2_t op1,
// CHECK-RV64-LABEL: @test_vfnmsub_vf_f32mf2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfnmsub.nxv1f32.f32.i64(<vscale x 1 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfnmsub.nxv1f32.f32.i64(<vscale x 1 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfnmsub_vf_f32mf2(vfloat32mf2_t acc, float op1,
@ -27,7 +27,7 @@ vfloat32mf2_t test_vfnmsub_vf_f32mf2(vfloat32mf2_t acc, float op1,
// CHECK-RV64-LABEL: @test_vfnmsub_vv_f32m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfnmsub.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[ACC:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfnmsub.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[ACC:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfnmsub_vv_f32m1(vfloat32m1_t acc, vfloat32m1_t op1,
@ -37,7 +37,7 @@ vfloat32m1_t test_vfnmsub_vv_f32m1(vfloat32m1_t acc, vfloat32m1_t op1,
// CHECK-RV64-LABEL: @test_vfnmsub_vf_f32m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfnmsub.nxv2f32.f32.i64(<vscale x 2 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfnmsub.nxv2f32.f32.i64(<vscale x 2 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfnmsub_vf_f32m1(vfloat32m1_t acc, float op1,
@ -47,7 +47,7 @@ vfloat32m1_t test_vfnmsub_vf_f32m1(vfloat32m1_t acc, float op1,
// CHECK-RV64-LABEL: @test_vfnmsub_vv_f32m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfnmsub.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[ACC:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfnmsub.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[ACC:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfnmsub_vv_f32m2(vfloat32m2_t acc, vfloat32m2_t op1,
@ -57,7 +57,7 @@ vfloat32m2_t test_vfnmsub_vv_f32m2(vfloat32m2_t acc, vfloat32m2_t op1,
// CHECK-RV64-LABEL: @test_vfnmsub_vf_f32m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfnmsub.nxv4f32.f32.i64(<vscale x 4 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfnmsub.nxv4f32.f32.i64(<vscale x 4 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfnmsub_vf_f32m2(vfloat32m2_t acc, float op1,
@ -67,7 +67,7 @@ vfloat32m2_t test_vfnmsub_vf_f32m2(vfloat32m2_t acc, float op1,
// CHECK-RV64-LABEL: @test_vfnmsub_vv_f32m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfnmsub.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[ACC:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfnmsub.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[ACC:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfnmsub_vv_f32m4(vfloat32m4_t acc, vfloat32m4_t op1,
@ -77,7 +77,7 @@ vfloat32m4_t test_vfnmsub_vv_f32m4(vfloat32m4_t acc, vfloat32m4_t op1,
// CHECK-RV64-LABEL: @test_vfnmsub_vf_f32m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfnmsub.nxv8f32.f32.i64(<vscale x 8 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfnmsub.nxv8f32.f32.i64(<vscale x 8 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfnmsub_vf_f32m4(vfloat32m4_t acc, float op1,
@ -87,7 +87,7 @@ vfloat32m4_t test_vfnmsub_vf_f32m4(vfloat32m4_t acc, float op1,
// CHECK-RV64-LABEL: @test_vfnmsub_vv_f32m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfnmsub.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[ACC:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfnmsub.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[ACC:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfnmsub_vv_f32m8(vfloat32m8_t acc, vfloat32m8_t op1,
@ -97,7 +97,7 @@ vfloat32m8_t test_vfnmsub_vv_f32m8(vfloat32m8_t acc, vfloat32m8_t op1,
// CHECK-RV64-LABEL: @test_vfnmsub_vf_f32m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfnmsub.nxv16f32.f32.i64(<vscale x 16 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfnmsub.nxv16f32.f32.i64(<vscale x 16 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfnmsub_vf_f32m8(vfloat32m8_t acc, float op1,
@ -107,7 +107,7 @@ vfloat32m8_t test_vfnmsub_vf_f32m8(vfloat32m8_t acc, float op1,
// CHECK-RV64-LABEL: @test_vfnmsub_vv_f64m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfnmsub.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[ACC:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfnmsub.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[ACC:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfnmsub_vv_f64m1(vfloat64m1_t acc, vfloat64m1_t op1,
@ -117,7 +117,7 @@ vfloat64m1_t test_vfnmsub_vv_f64m1(vfloat64m1_t acc, vfloat64m1_t op1,
// CHECK-RV64-LABEL: @test_vfnmsub_vf_f64m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfnmsub.nxv1f64.f64.i64(<vscale x 1 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfnmsub.nxv1f64.f64.i64(<vscale x 1 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfnmsub_vf_f64m1(vfloat64m1_t acc, double op1,
@ -127,7 +127,7 @@ vfloat64m1_t test_vfnmsub_vf_f64m1(vfloat64m1_t acc, double op1,
// CHECK-RV64-LABEL: @test_vfnmsub_vv_f64m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfnmsub.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[ACC:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfnmsub.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[ACC:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfnmsub_vv_f64m2(vfloat64m2_t acc, vfloat64m2_t op1,
@ -137,7 +137,7 @@ vfloat64m2_t test_vfnmsub_vv_f64m2(vfloat64m2_t acc, vfloat64m2_t op1,
// CHECK-RV64-LABEL: @test_vfnmsub_vf_f64m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfnmsub.nxv2f64.f64.i64(<vscale x 2 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfnmsub.nxv2f64.f64.i64(<vscale x 2 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfnmsub_vf_f64m2(vfloat64m2_t acc, double op1,
@ -147,7 +147,7 @@ vfloat64m2_t test_vfnmsub_vf_f64m2(vfloat64m2_t acc, double op1,
// CHECK-RV64-LABEL: @test_vfnmsub_vv_f64m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfnmsub.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[ACC:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfnmsub.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[ACC:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfnmsub_vv_f64m4(vfloat64m4_t acc, vfloat64m4_t op1,
@ -157,7 +157,7 @@ vfloat64m4_t test_vfnmsub_vv_f64m4(vfloat64m4_t acc, vfloat64m4_t op1,
// CHECK-RV64-LABEL: @test_vfnmsub_vf_f64m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfnmsub.nxv4f64.f64.i64(<vscale x 4 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfnmsub.nxv4f64.f64.i64(<vscale x 4 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfnmsub_vf_f64m4(vfloat64m4_t acc, double op1,
@ -167,7 +167,7 @@ vfloat64m4_t test_vfnmsub_vf_f64m4(vfloat64m4_t acc, double op1,
// CHECK-RV64-LABEL: @test_vfnmsub_vv_f64m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfnmsub.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[ACC:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfnmsub.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[ACC:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfnmsub_vv_f64m8(vfloat64m8_t acc, vfloat64m8_t op1,
@ -177,7 +177,7 @@ vfloat64m8_t test_vfnmsub_vv_f64m8(vfloat64m8_t acc, vfloat64m8_t op1,
// CHECK-RV64-LABEL: @test_vfnmsub_vf_f64m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfnmsub.nxv8f64.f64.i64(<vscale x 8 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfnmsub.nxv8f64.f64.i64(<vscale x 8 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfnmsub_vf_f64m8(vfloat64m8_t acc, double op1,

View File

@ -7,7 +7,7 @@
// CHECK-RV64-LABEL: @test_vfwmacc_vv_f64m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwmacc.nxv1f64.nxv1f32.nxv1f32.i64(<vscale x 1 x double> [[ACC:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwmacc.nxv1f64.nxv1f32.nxv1f32.i64(<vscale x 1 x double> [[ACC:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfwmacc_vv_f64m1(vfloat64m1_t acc, vfloat32mf2_t op1,
@ -17,7 +17,7 @@ vfloat64m1_t test_vfwmacc_vv_f64m1(vfloat64m1_t acc, vfloat32mf2_t op1,
// CHECK-RV64-LABEL: @test_vfwmacc_vf_f64m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwmacc.nxv1f64.f32.nxv1f32.i64(<vscale x 1 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwmacc.nxv1f64.f32.nxv1f32.i64(<vscale x 1 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfwmacc_vf_f64m1(vfloat64m1_t acc, float op1,
@ -27,7 +27,7 @@ vfloat64m1_t test_vfwmacc_vf_f64m1(vfloat64m1_t acc, float op1,
// CHECK-RV64-LABEL: @test_vfwmacc_vv_f64m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwmacc.nxv2f64.nxv2f32.nxv2f32.i64(<vscale x 2 x double> [[ACC:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwmacc.nxv2f64.nxv2f32.nxv2f32.i64(<vscale x 2 x double> [[ACC:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfwmacc_vv_f64m2(vfloat64m2_t acc, vfloat32m1_t op1,
@ -37,7 +37,7 @@ vfloat64m2_t test_vfwmacc_vv_f64m2(vfloat64m2_t acc, vfloat32m1_t op1,
// CHECK-RV64-LABEL: @test_vfwmacc_vf_f64m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwmacc.nxv2f64.f32.nxv2f32.i64(<vscale x 2 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwmacc.nxv2f64.f32.nxv2f32.i64(<vscale x 2 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfwmacc_vf_f64m2(vfloat64m2_t acc, float op1,
@ -47,7 +47,7 @@ vfloat64m2_t test_vfwmacc_vf_f64m2(vfloat64m2_t acc, float op1,
// CHECK-RV64-LABEL: @test_vfwmacc_vv_f64m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwmacc.nxv4f64.nxv4f32.nxv4f32.i64(<vscale x 4 x double> [[ACC:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwmacc.nxv4f64.nxv4f32.nxv4f32.i64(<vscale x 4 x double> [[ACC:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfwmacc_vv_f64m4(vfloat64m4_t acc, vfloat32m2_t op1,
@ -57,7 +57,7 @@ vfloat64m4_t test_vfwmacc_vv_f64m4(vfloat64m4_t acc, vfloat32m2_t op1,
// CHECK-RV64-LABEL: @test_vfwmacc_vf_f64m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwmacc.nxv4f64.f32.nxv4f32.i64(<vscale x 4 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwmacc.nxv4f64.f32.nxv4f32.i64(<vscale x 4 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfwmacc_vf_f64m4(vfloat64m4_t acc, float op1,
@ -67,7 +67,7 @@ vfloat64m4_t test_vfwmacc_vf_f64m4(vfloat64m4_t acc, float op1,
// CHECK-RV64-LABEL: @test_vfwmacc_vv_f64m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwmacc.nxv8f64.nxv8f32.nxv8f32.i64(<vscale x 8 x double> [[ACC:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwmacc.nxv8f64.nxv8f32.nxv8f32.i64(<vscale x 8 x double> [[ACC:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfwmacc_vv_f64m8(vfloat64m8_t acc, vfloat32m4_t op1,
@ -77,7 +77,7 @@ vfloat64m8_t test_vfwmacc_vv_f64m8(vfloat64m8_t acc, vfloat32m4_t op1,
// CHECK-RV64-LABEL: @test_vfwmacc_vf_f64m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwmacc.nxv8f64.f32.nxv8f32.i64(<vscale x 8 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwmacc.nxv8f64.f32.nxv8f32.i64(<vscale x 8 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfwmacc_vf_f64m8(vfloat64m8_t acc, float op1,

View File

@ -7,7 +7,7 @@
// CHECK-RV64-LABEL: @test_vfwmsac_vv_f64m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwmsac.nxv1f64.nxv1f32.nxv1f32.i64(<vscale x 1 x double> [[ACC:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwmsac.nxv1f64.nxv1f32.nxv1f32.i64(<vscale x 1 x double> [[ACC:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfwmsac_vv_f64m1(vfloat64m1_t acc, vfloat32mf2_t op1,
@ -17,7 +17,7 @@ vfloat64m1_t test_vfwmsac_vv_f64m1(vfloat64m1_t acc, vfloat32mf2_t op1,
// CHECK-RV64-LABEL: @test_vfwmsac_vf_f64m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwmsac.nxv1f64.f32.nxv1f32.i64(<vscale x 1 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwmsac.nxv1f64.f32.nxv1f32.i64(<vscale x 1 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfwmsac_vf_f64m1(vfloat64m1_t acc, float op1,
@ -27,7 +27,7 @@ vfloat64m1_t test_vfwmsac_vf_f64m1(vfloat64m1_t acc, float op1,
// CHECK-RV64-LABEL: @test_vfwmsac_vv_f64m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwmsac.nxv2f64.nxv2f32.nxv2f32.i64(<vscale x 2 x double> [[ACC:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwmsac.nxv2f64.nxv2f32.nxv2f32.i64(<vscale x 2 x double> [[ACC:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfwmsac_vv_f64m2(vfloat64m2_t acc, vfloat32m1_t op1,
@ -37,7 +37,7 @@ vfloat64m2_t test_vfwmsac_vv_f64m2(vfloat64m2_t acc, vfloat32m1_t op1,
// CHECK-RV64-LABEL: @test_vfwmsac_vf_f64m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwmsac.nxv2f64.f32.nxv2f32.i64(<vscale x 2 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwmsac.nxv2f64.f32.nxv2f32.i64(<vscale x 2 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfwmsac_vf_f64m2(vfloat64m2_t acc, float op1,
@ -47,7 +47,7 @@ vfloat64m2_t test_vfwmsac_vf_f64m2(vfloat64m2_t acc, float op1,
// CHECK-RV64-LABEL: @test_vfwmsac_vv_f64m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwmsac.nxv4f64.nxv4f32.nxv4f32.i64(<vscale x 4 x double> [[ACC:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwmsac.nxv4f64.nxv4f32.nxv4f32.i64(<vscale x 4 x double> [[ACC:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfwmsac_vv_f64m4(vfloat64m4_t acc, vfloat32m2_t op1,
@ -57,7 +57,7 @@ vfloat64m4_t test_vfwmsac_vv_f64m4(vfloat64m4_t acc, vfloat32m2_t op1,
// CHECK-RV64-LABEL: @test_vfwmsac_vf_f64m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwmsac.nxv4f64.f32.nxv4f32.i64(<vscale x 4 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwmsac.nxv4f64.f32.nxv4f32.i64(<vscale x 4 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfwmsac_vf_f64m4(vfloat64m4_t acc, float op1,
@ -67,7 +67,7 @@ vfloat64m4_t test_vfwmsac_vf_f64m4(vfloat64m4_t acc, float op1,
// CHECK-RV64-LABEL: @test_vfwmsac_vv_f64m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwmsac.nxv8f64.nxv8f32.nxv8f32.i64(<vscale x 8 x double> [[ACC:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwmsac.nxv8f64.nxv8f32.nxv8f32.i64(<vscale x 8 x double> [[ACC:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfwmsac_vv_f64m8(vfloat64m8_t acc, vfloat32m4_t op1,
@ -77,7 +77,7 @@ vfloat64m8_t test_vfwmsac_vv_f64m8(vfloat64m8_t acc, vfloat32m4_t op1,
// CHECK-RV64-LABEL: @test_vfwmsac_vf_f64m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwmsac.nxv8f64.f32.nxv8f32.i64(<vscale x 8 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwmsac.nxv8f64.f32.nxv8f32.i64(<vscale x 8 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfwmsac_vf_f64m8(vfloat64m8_t acc, float op1,

View File

@ -7,7 +7,7 @@
// CHECK-RV64-LABEL: @test_vfwnmacc_vv_f64m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwnmacc.nxv1f64.nxv1f32.nxv1f32.i64(<vscale x 1 x double> [[ACC:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwnmacc.nxv1f64.nxv1f32.nxv1f32.i64(<vscale x 1 x double> [[ACC:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfwnmacc_vv_f64m1(vfloat64m1_t acc, vfloat32mf2_t op1,
@ -17,7 +17,7 @@ vfloat64m1_t test_vfwnmacc_vv_f64m1(vfloat64m1_t acc, vfloat32mf2_t op1,
// CHECK-RV64-LABEL: @test_vfwnmacc_vf_f64m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwnmacc.nxv1f64.f32.nxv1f32.i64(<vscale x 1 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwnmacc.nxv1f64.f32.nxv1f32.i64(<vscale x 1 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfwnmacc_vf_f64m1(vfloat64m1_t acc, float op1,
@ -27,7 +27,7 @@ vfloat64m1_t test_vfwnmacc_vf_f64m1(vfloat64m1_t acc, float op1,
// CHECK-RV64-LABEL: @test_vfwnmacc_vv_f64m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwnmacc.nxv2f64.nxv2f32.nxv2f32.i64(<vscale x 2 x double> [[ACC:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwnmacc.nxv2f64.nxv2f32.nxv2f32.i64(<vscale x 2 x double> [[ACC:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfwnmacc_vv_f64m2(vfloat64m2_t acc, vfloat32m1_t op1,
@ -37,7 +37,7 @@ vfloat64m2_t test_vfwnmacc_vv_f64m2(vfloat64m2_t acc, vfloat32m1_t op1,
// CHECK-RV64-LABEL: @test_vfwnmacc_vf_f64m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwnmacc.nxv2f64.f32.nxv2f32.i64(<vscale x 2 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwnmacc.nxv2f64.f32.nxv2f32.i64(<vscale x 2 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfwnmacc_vf_f64m2(vfloat64m2_t acc, float op1,
@ -47,7 +47,7 @@ vfloat64m2_t test_vfwnmacc_vf_f64m2(vfloat64m2_t acc, float op1,
// CHECK-RV64-LABEL: @test_vfwnmacc_vv_f64m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwnmacc.nxv4f64.nxv4f32.nxv4f32.i64(<vscale x 4 x double> [[ACC:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwnmacc.nxv4f64.nxv4f32.nxv4f32.i64(<vscale x 4 x double> [[ACC:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfwnmacc_vv_f64m4(vfloat64m4_t acc, vfloat32m2_t op1,
@ -57,7 +57,7 @@ vfloat64m4_t test_vfwnmacc_vv_f64m4(vfloat64m4_t acc, vfloat32m2_t op1,
// CHECK-RV64-LABEL: @test_vfwnmacc_vf_f64m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwnmacc.nxv4f64.f32.nxv4f32.i64(<vscale x 4 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwnmacc.nxv4f64.f32.nxv4f32.i64(<vscale x 4 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfwnmacc_vf_f64m4(vfloat64m4_t acc, float op1,
@ -67,7 +67,7 @@ vfloat64m4_t test_vfwnmacc_vf_f64m4(vfloat64m4_t acc, float op1,
// CHECK-RV64-LABEL: @test_vfwnmacc_vv_f64m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwnmacc.nxv8f64.nxv8f32.nxv8f32.i64(<vscale x 8 x double> [[ACC:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwnmacc.nxv8f64.nxv8f32.nxv8f32.i64(<vscale x 8 x double> [[ACC:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfwnmacc_vv_f64m8(vfloat64m8_t acc, vfloat32m4_t op1,
@ -77,7 +77,7 @@ vfloat64m8_t test_vfwnmacc_vv_f64m8(vfloat64m8_t acc, vfloat32m4_t op1,
// CHECK-RV64-LABEL: @test_vfwnmacc_vf_f64m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwnmacc.nxv8f64.f32.nxv8f32.i64(<vscale x 8 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwnmacc.nxv8f64.f32.nxv8f32.i64(<vscale x 8 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfwnmacc_vf_f64m8(vfloat64m8_t acc, float op1,

View File

@ -7,7 +7,7 @@
// CHECK-RV64-LABEL: @test_vfwnmsac_vv_f64m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwnmsac.nxv1f64.nxv1f32.nxv1f32.i64(<vscale x 1 x double> [[ACC:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwnmsac.nxv1f64.nxv1f32.nxv1f32.i64(<vscale x 1 x double> [[ACC:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfwnmsac_vv_f64m1(vfloat64m1_t acc, vfloat32mf2_t op1,
@ -17,7 +17,7 @@ vfloat64m1_t test_vfwnmsac_vv_f64m1(vfloat64m1_t acc, vfloat32mf2_t op1,
// CHECK-RV64-LABEL: @test_vfwnmsac_vf_f64m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwnmsac.nxv1f64.f32.nxv1f32.i64(<vscale x 1 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwnmsac.nxv1f64.f32.nxv1f32.i64(<vscale x 1 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfwnmsac_vf_f64m1(vfloat64m1_t acc, float op1,
@ -27,7 +27,7 @@ vfloat64m1_t test_vfwnmsac_vf_f64m1(vfloat64m1_t acc, float op1,
// CHECK-RV64-LABEL: @test_vfwnmsac_vv_f64m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwnmsac.nxv2f64.nxv2f32.nxv2f32.i64(<vscale x 2 x double> [[ACC:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwnmsac.nxv2f64.nxv2f32.nxv2f32.i64(<vscale x 2 x double> [[ACC:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfwnmsac_vv_f64m2(vfloat64m2_t acc, vfloat32m1_t op1,
@ -37,7 +37,7 @@ vfloat64m2_t test_vfwnmsac_vv_f64m2(vfloat64m2_t acc, vfloat32m1_t op1,
// CHECK-RV64-LABEL: @test_vfwnmsac_vf_f64m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwnmsac.nxv2f64.f32.nxv2f32.i64(<vscale x 2 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwnmsac.nxv2f64.f32.nxv2f32.i64(<vscale x 2 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfwnmsac_vf_f64m2(vfloat64m2_t acc, float op1,
@ -47,7 +47,7 @@ vfloat64m2_t test_vfwnmsac_vf_f64m2(vfloat64m2_t acc, float op1,
// CHECK-RV64-LABEL: @test_vfwnmsac_vv_f64m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwnmsac.nxv4f64.nxv4f32.nxv4f32.i64(<vscale x 4 x double> [[ACC:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwnmsac.nxv4f64.nxv4f32.nxv4f32.i64(<vscale x 4 x double> [[ACC:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfwnmsac_vv_f64m4(vfloat64m4_t acc, vfloat32m2_t op1,
@ -57,7 +57,7 @@ vfloat64m4_t test_vfwnmsac_vv_f64m4(vfloat64m4_t acc, vfloat32m2_t op1,
// CHECK-RV64-LABEL: @test_vfwnmsac_vf_f64m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwnmsac.nxv4f64.f32.nxv4f32.i64(<vscale x 4 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwnmsac.nxv4f64.f32.nxv4f32.i64(<vscale x 4 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfwnmsac_vf_f64m4(vfloat64m4_t acc, float op1,
@ -67,7 +67,7 @@ vfloat64m4_t test_vfwnmsac_vf_f64m4(vfloat64m4_t acc, float op1,
// CHECK-RV64-LABEL: @test_vfwnmsac_vv_f64m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwnmsac.nxv8f64.nxv8f32.nxv8f32.i64(<vscale x 8 x double> [[ACC:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwnmsac.nxv8f64.nxv8f32.nxv8f32.i64(<vscale x 8 x double> [[ACC:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfwnmsac_vv_f64m8(vfloat64m8_t acc, vfloat32m4_t op1,
@ -77,7 +77,7 @@ vfloat64m8_t test_vfwnmsac_vv_f64m8(vfloat64m8_t acc, vfloat32m4_t op1,
// CHECK-RV64-LABEL: @test_vfwnmsac_vf_f64m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwnmsac.nxv8f64.f32.nxv8f32.i64(<vscale x 8 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwnmsac.nxv8f64.f32.nxv8f32.i64(<vscale x 8 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfwnmsac_vf_f64m8(vfloat64m8_t acc, float op1,

View File

@ -6,7 +6,7 @@
// CHECK-RV64-LABEL: @test_vmacc_vv_i8mf8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmacc.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmacc.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vmacc_vv_i8mf8(vint8mf8_t acc, vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
@ -15,7 +15,7 @@ vint8mf8_t test_vmacc_vv_i8mf8(vint8mf8_t acc, vint8mf8_t op1, vint8mf8_t op2, s
// CHECK-RV64-LABEL: @test_vmacc_vx_i8mf8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmacc.nxv1i8.i8.i64(<vscale x 1 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmacc.nxv1i8.i8.i64(<vscale x 1 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vmacc_vx_i8mf8(vint8mf8_t acc, int8_t op1, vint8mf8_t op2, size_t vl) {
@ -24,7 +24,7 @@ vint8mf8_t test_vmacc_vx_i8mf8(vint8mf8_t acc, int8_t op1, vint8mf8_t op2, size_
// CHECK-RV64-LABEL: @test_vmacc_vv_i8mf4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmacc.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmacc.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vmacc_vv_i8mf4(vint8mf4_t acc, vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
@ -33,7 +33,7 @@ vint8mf4_t test_vmacc_vv_i8mf4(vint8mf4_t acc, vint8mf4_t op1, vint8mf4_t op2, s
// CHECK-RV64-LABEL: @test_vmacc_vx_i8mf4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmacc.nxv2i8.i8.i64(<vscale x 2 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmacc.nxv2i8.i8.i64(<vscale x 2 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vmacc_vx_i8mf4(vint8mf4_t acc, int8_t op1, vint8mf4_t op2, size_t vl) {
@ -42,7 +42,7 @@ vint8mf4_t test_vmacc_vx_i8mf4(vint8mf4_t acc, int8_t op1, vint8mf4_t op2, size_
// CHECK-RV64-LABEL: @test_vmacc_vv_i8mf2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmacc.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmacc.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vmacc_vv_i8mf2(vint8mf2_t acc, vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
@ -51,7 +51,7 @@ vint8mf2_t test_vmacc_vv_i8mf2(vint8mf2_t acc, vint8mf2_t op1, vint8mf2_t op2, s
// CHECK-RV64-LABEL: @test_vmacc_vx_i8mf2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmacc.nxv4i8.i8.i64(<vscale x 4 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmacc.nxv4i8.i8.i64(<vscale x 4 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vmacc_vx_i8mf2(vint8mf2_t acc, int8_t op1, vint8mf2_t op2, size_t vl) {
@ -60,7 +60,7 @@ vint8mf2_t test_vmacc_vx_i8mf2(vint8mf2_t acc, int8_t op1, vint8mf2_t op2, size_
// CHECK-RV64-LABEL: @test_vmacc_vv_i8m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmacc.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmacc.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vmacc_vv_i8m1(vint8m1_t acc, vint8m1_t op1, vint8m1_t op2, size_t vl) {
@ -69,7 +69,7 @@ vint8m1_t test_vmacc_vv_i8m1(vint8m1_t acc, vint8m1_t op1, vint8m1_t op2, size_t
// CHECK-RV64-LABEL: @test_vmacc_vx_i8m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmacc.nxv8i8.i8.i64(<vscale x 8 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmacc.nxv8i8.i8.i64(<vscale x 8 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vmacc_vx_i8m1(vint8m1_t acc, int8_t op1, vint8m1_t op2, size_t vl) {
@ -78,7 +78,7 @@ vint8m1_t test_vmacc_vx_i8m1(vint8m1_t acc, int8_t op1, vint8m1_t op2, size_t vl
// CHECK-RV64-LABEL: @test_vmacc_vv_i8m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmacc.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmacc.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vmacc_vv_i8m2(vint8m2_t acc, vint8m2_t op1, vint8m2_t op2, size_t vl) {
@ -87,7 +87,7 @@ vint8m2_t test_vmacc_vv_i8m2(vint8m2_t acc, vint8m2_t op1, vint8m2_t op2, size_t
// CHECK-RV64-LABEL: @test_vmacc_vx_i8m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmacc.nxv16i8.i8.i64(<vscale x 16 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmacc.nxv16i8.i8.i64(<vscale x 16 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vmacc_vx_i8m2(vint8m2_t acc, int8_t op1, vint8m2_t op2, size_t vl) {
@ -96,7 +96,7 @@ vint8m2_t test_vmacc_vx_i8m2(vint8m2_t acc, int8_t op1, vint8m2_t op2, size_t vl
// CHECK-RV64-LABEL: @test_vmacc_vv_i8m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmacc.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmacc.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vmacc_vv_i8m4(vint8m4_t acc, vint8m4_t op1, vint8m4_t op2, size_t vl) {
@ -105,7 +105,7 @@ vint8m4_t test_vmacc_vv_i8m4(vint8m4_t acc, vint8m4_t op1, vint8m4_t op2, size_t
// CHECK-RV64-LABEL: @test_vmacc_vx_i8m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmacc.nxv32i8.i8.i64(<vscale x 32 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmacc.nxv32i8.i8.i64(<vscale x 32 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vmacc_vx_i8m4(vint8m4_t acc, int8_t op1, vint8m4_t op2, size_t vl) {
@ -114,7 +114,7 @@ vint8m4_t test_vmacc_vx_i8m4(vint8m4_t acc, int8_t op1, vint8m4_t op2, size_t vl
// CHECK-RV64-LABEL: @test_vmacc_vv_i8m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmacc.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[ACC:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmacc.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[ACC:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vmacc_vv_i8m8(vint8m8_t acc, vint8m8_t op1, vint8m8_t op2, size_t vl) {
@ -123,7 +123,7 @@ vint8m8_t test_vmacc_vv_i8m8(vint8m8_t acc, vint8m8_t op1, vint8m8_t op2, size_t
// CHECK-RV64-LABEL: @test_vmacc_vx_i8m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmacc.nxv64i8.i8.i64(<vscale x 64 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmacc.nxv64i8.i8.i64(<vscale x 64 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vmacc_vx_i8m8(vint8m8_t acc, int8_t op1, vint8m8_t op2, size_t vl) {
@ -132,7 +132,7 @@ vint8m8_t test_vmacc_vx_i8m8(vint8m8_t acc, int8_t op1, vint8m8_t op2, size_t vl
// CHECK-RV64-LABEL: @test_vmacc_vv_i16mf4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmacc.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmacc.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vmacc_vv_i16mf4(vint16mf4_t acc, vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
@ -141,7 +141,7 @@ vint16mf4_t test_vmacc_vv_i16mf4(vint16mf4_t acc, vint16mf4_t op1, vint16mf4_t o
// CHECK-RV64-LABEL: @test_vmacc_vx_i16mf4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmacc.nxv1i16.i16.i64(<vscale x 1 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmacc.nxv1i16.i16.i64(<vscale x 1 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vmacc_vx_i16mf4(vint16mf4_t acc, int16_t op1, vint16mf4_t op2, size_t vl) {
@ -150,7 +150,7 @@ vint16mf4_t test_vmacc_vx_i16mf4(vint16mf4_t acc, int16_t op1, vint16mf4_t op2,
// CHECK-RV64-LABEL: @test_vmacc_vv_i16mf2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmacc.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmacc.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vmacc_vv_i16mf2(vint16mf2_t acc, vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
@ -159,7 +159,7 @@ vint16mf2_t test_vmacc_vv_i16mf2(vint16mf2_t acc, vint16mf2_t op1, vint16mf2_t o
// CHECK-RV64-LABEL: @test_vmacc_vx_i16mf2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmacc.nxv2i16.i16.i64(<vscale x 2 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmacc.nxv2i16.i16.i64(<vscale x 2 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vmacc_vx_i16mf2(vint16mf2_t acc, int16_t op1, vint16mf2_t op2, size_t vl) {
@ -168,7 +168,7 @@ vint16mf2_t test_vmacc_vx_i16mf2(vint16mf2_t acc, int16_t op1, vint16mf2_t op2,
// CHECK-RV64-LABEL: @test_vmacc_vv_i16m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmacc.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmacc.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vmacc_vv_i16m1(vint16m1_t acc, vint16m1_t op1, vint16m1_t op2, size_t vl) {
@ -177,7 +177,7 @@ vint16m1_t test_vmacc_vv_i16m1(vint16m1_t acc, vint16m1_t op1, vint16m1_t op2, s
// CHECK-RV64-LABEL: @test_vmacc_vx_i16m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmacc.nxv4i16.i16.i64(<vscale x 4 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmacc.nxv4i16.i16.i64(<vscale x 4 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vmacc_vx_i16m1(vint16m1_t acc, int16_t op1, vint16m1_t op2, size_t vl) {
@ -186,7 +186,7 @@ vint16m1_t test_vmacc_vx_i16m1(vint16m1_t acc, int16_t op1, vint16m1_t op2, size
// CHECK-RV64-LABEL: @test_vmacc_vv_i16m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmacc.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmacc.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vmacc_vv_i16m2(vint16m2_t acc, vint16m2_t op1, vint16m2_t op2, size_t vl) {
@ -195,7 +195,7 @@ vint16m2_t test_vmacc_vv_i16m2(vint16m2_t acc, vint16m2_t op1, vint16m2_t op2, s
// CHECK-RV64-LABEL: @test_vmacc_vx_i16m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmacc.nxv8i16.i16.i64(<vscale x 8 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmacc.nxv8i16.i16.i64(<vscale x 8 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vmacc_vx_i16m2(vint16m2_t acc, int16_t op1, vint16m2_t op2, size_t vl) {
@ -204,7 +204,7 @@ vint16m2_t test_vmacc_vx_i16m2(vint16m2_t acc, int16_t op1, vint16m2_t op2, size
// CHECK-RV64-LABEL: @test_vmacc_vv_i16m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmacc.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmacc.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vmacc_vv_i16m4(vint16m4_t acc, vint16m4_t op1, vint16m4_t op2, size_t vl) {
@ -213,7 +213,7 @@ vint16m4_t test_vmacc_vv_i16m4(vint16m4_t acc, vint16m4_t op1, vint16m4_t op2, s
// CHECK-RV64-LABEL: @test_vmacc_vx_i16m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmacc.nxv16i16.i16.i64(<vscale x 16 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmacc.nxv16i16.i16.i64(<vscale x 16 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vmacc_vx_i16m4(vint16m4_t acc, int16_t op1, vint16m4_t op2, size_t vl) {
@ -222,7 +222,7 @@ vint16m4_t test_vmacc_vx_i16m4(vint16m4_t acc, int16_t op1, vint16m4_t op2, size
// CHECK-RV64-LABEL: @test_vmacc_vv_i16m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmacc.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmacc.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vmacc_vv_i16m8(vint16m8_t acc, vint16m8_t op1, vint16m8_t op2, size_t vl) {
@ -231,7 +231,7 @@ vint16m8_t test_vmacc_vv_i16m8(vint16m8_t acc, vint16m8_t op1, vint16m8_t op2, s
// CHECK-RV64-LABEL: @test_vmacc_vx_i16m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmacc.nxv32i16.i16.i64(<vscale x 32 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmacc.nxv32i16.i16.i64(<vscale x 32 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vmacc_vx_i16m8(vint16m8_t acc, int16_t op1, vint16m8_t op2, size_t vl) {
@ -240,7 +240,7 @@ vint16m8_t test_vmacc_vx_i16m8(vint16m8_t acc, int16_t op1, vint16m8_t op2, size
// CHECK-RV64-LABEL: @test_vmacc_vv_i32mf2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmacc.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmacc.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vmacc_vv_i32mf2(vint32mf2_t acc, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
@ -249,7 +249,7 @@ vint32mf2_t test_vmacc_vv_i32mf2(vint32mf2_t acc, vint32mf2_t op1, vint32mf2_t o
// CHECK-RV64-LABEL: @test_vmacc_vx_i32mf2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmacc.nxv1i32.i32.i64(<vscale x 1 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmacc.nxv1i32.i32.i64(<vscale x 1 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vmacc_vx_i32mf2(vint32mf2_t acc, int32_t op1, vint32mf2_t op2, size_t vl) {
@ -258,7 +258,7 @@ vint32mf2_t test_vmacc_vx_i32mf2(vint32mf2_t acc, int32_t op1, vint32mf2_t op2,
// CHECK-RV64-LABEL: @test_vmacc_vv_i32m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmacc.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmacc.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vmacc_vv_i32m1(vint32m1_t acc, vint32m1_t op1, vint32m1_t op2, size_t vl) {
@ -267,7 +267,7 @@ vint32m1_t test_vmacc_vv_i32m1(vint32m1_t acc, vint32m1_t op1, vint32m1_t op2, s
// CHECK-RV64-LABEL: @test_vmacc_vx_i32m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmacc.nxv2i32.i32.i64(<vscale x 2 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmacc.nxv2i32.i32.i64(<vscale x 2 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vmacc_vx_i32m1(vint32m1_t acc, int32_t op1, vint32m1_t op2, size_t vl) {
@ -276,7 +276,7 @@ vint32m1_t test_vmacc_vx_i32m1(vint32m1_t acc, int32_t op1, vint32m1_t op2, size
// CHECK-RV64-LABEL: @test_vmacc_vv_i32m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmacc.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmacc.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vmacc_vv_i32m2(vint32m2_t acc, vint32m2_t op1, vint32m2_t op2, size_t vl) {
@ -285,7 +285,7 @@ vint32m2_t test_vmacc_vv_i32m2(vint32m2_t acc, vint32m2_t op1, vint32m2_t op2, s
// CHECK-RV64-LABEL: @test_vmacc_vx_i32m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmacc.nxv4i32.i32.i64(<vscale x 4 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmacc.nxv4i32.i32.i64(<vscale x 4 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vmacc_vx_i32m2(vint32m2_t acc, int32_t op1, vint32m2_t op2, size_t vl) {
@ -294,7 +294,7 @@ vint32m2_t test_vmacc_vx_i32m2(vint32m2_t acc, int32_t op1, vint32m2_t op2, size
// CHECK-RV64-LABEL: @test_vmacc_vv_i32m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmacc.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmacc.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vmacc_vv_i32m4(vint32m4_t acc, vint32m4_t op1, vint32m4_t op2, size_t vl) {
@ -303,7 +303,7 @@ vint32m4_t test_vmacc_vv_i32m4(vint32m4_t acc, vint32m4_t op1, vint32m4_t op2, s
// CHECK-RV64-LABEL: @test_vmacc_vx_i32m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmacc.nxv8i32.i32.i64(<vscale x 8 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmacc.nxv8i32.i32.i64(<vscale x 8 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vmacc_vx_i32m4(vint32m4_t acc, int32_t op1, vint32m4_t op2, size_t vl) {
@ -312,7 +312,7 @@ vint32m4_t test_vmacc_vx_i32m4(vint32m4_t acc, int32_t op1, vint32m4_t op2, size
// CHECK-RV64-LABEL: @test_vmacc_vv_i32m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmacc.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmacc.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vmacc_vv_i32m8(vint32m8_t acc, vint32m8_t op1, vint32m8_t op2, size_t vl) {
@ -321,7 +321,7 @@ vint32m8_t test_vmacc_vv_i32m8(vint32m8_t acc, vint32m8_t op1, vint32m8_t op2, s
// CHECK-RV64-LABEL: @test_vmacc_vx_i32m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmacc.nxv16i32.i32.i64(<vscale x 16 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmacc.nxv16i32.i32.i64(<vscale x 16 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vmacc_vx_i32m8(vint32m8_t acc, int32_t op1, vint32m8_t op2, size_t vl) {
@ -330,7 +330,7 @@ vint32m8_t test_vmacc_vx_i32m8(vint32m8_t acc, int32_t op1, vint32m8_t op2, size
// CHECK-RV64-LABEL: @test_vmacc_vv_i64m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmacc.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmacc.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vmacc_vv_i64m1(vint64m1_t acc, vint64m1_t op1, vint64m1_t op2, size_t vl) {
@ -339,7 +339,7 @@ vint64m1_t test_vmacc_vv_i64m1(vint64m1_t acc, vint64m1_t op1, vint64m1_t op2, s
// CHECK-RV64-LABEL: @test_vmacc_vx_i64m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmacc.nxv1i64.i64.i64(<vscale x 1 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmacc.nxv1i64.i64.i64(<vscale x 1 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vmacc_vx_i64m1(vint64m1_t acc, int64_t op1, vint64m1_t op2, size_t vl) {
@ -348,7 +348,7 @@ vint64m1_t test_vmacc_vx_i64m1(vint64m1_t acc, int64_t op1, vint64m1_t op2, size
// CHECK-RV64-LABEL: @test_vmacc_vv_i64m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmacc.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmacc.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vmacc_vv_i64m2(vint64m2_t acc, vint64m2_t op1, vint64m2_t op2, size_t vl) {
@ -357,7 +357,7 @@ vint64m2_t test_vmacc_vv_i64m2(vint64m2_t acc, vint64m2_t op1, vint64m2_t op2, s
// CHECK-RV64-LABEL: @test_vmacc_vx_i64m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmacc.nxv2i64.i64.i64(<vscale x 2 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmacc.nxv2i64.i64.i64(<vscale x 2 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vmacc_vx_i64m2(vint64m2_t acc, int64_t op1, vint64m2_t op2, size_t vl) {
@ -366,7 +366,7 @@ vint64m2_t test_vmacc_vx_i64m2(vint64m2_t acc, int64_t op1, vint64m2_t op2, size
// CHECK-RV64-LABEL: @test_vmacc_vv_i64m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmacc.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmacc.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vmacc_vv_i64m4(vint64m4_t acc, vint64m4_t op1, vint64m4_t op2, size_t vl) {
@ -375,7 +375,7 @@ vint64m4_t test_vmacc_vv_i64m4(vint64m4_t acc, vint64m4_t op1, vint64m4_t op2, s
// CHECK-RV64-LABEL: @test_vmacc_vx_i64m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmacc.nxv4i64.i64.i64(<vscale x 4 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmacc.nxv4i64.i64.i64(<vscale x 4 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vmacc_vx_i64m4(vint64m4_t acc, int64_t op1, vint64m4_t op2, size_t vl) {
@ -384,7 +384,7 @@ vint64m4_t test_vmacc_vx_i64m4(vint64m4_t acc, int64_t op1, vint64m4_t op2, size
// CHECK-RV64-LABEL: @test_vmacc_vv_i64m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmacc.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmacc.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vmacc_vv_i64m8(vint64m8_t acc, vint64m8_t op1, vint64m8_t op2, size_t vl) {
@ -393,7 +393,7 @@ vint64m8_t test_vmacc_vv_i64m8(vint64m8_t acc, vint64m8_t op1, vint64m8_t op2, s
// CHECK-RV64-LABEL: @test_vmacc_vx_i64m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmacc.nxv8i64.i64.i64(<vscale x 8 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmacc.nxv8i64.i64.i64(<vscale x 8 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vmacc_vx_i64m8(vint64m8_t acc, int64_t op1, vint64m8_t op2, size_t vl) {
@ -402,7 +402,7 @@ vint64m8_t test_vmacc_vx_i64m8(vint64m8_t acc, int64_t op1, vint64m8_t op2, size
// CHECK-RV64-LABEL: @test_vmacc_vv_u8mf8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmacc.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmacc.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vmacc_vv_u8mf8(vuint8mf8_t acc, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
@ -411,7 +411,7 @@ vuint8mf8_t test_vmacc_vv_u8mf8(vuint8mf8_t acc, vuint8mf8_t op1, vuint8mf8_t op
// CHECK-RV64-LABEL: @test_vmacc_vx_u8mf8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmacc.nxv1i8.i8.i64(<vscale x 1 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmacc.nxv1i8.i8.i64(<vscale x 1 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vmacc_vx_u8mf8(vuint8mf8_t acc, uint8_t op1, vuint8mf8_t op2, size_t vl) {
@ -420,7 +420,7 @@ vuint8mf8_t test_vmacc_vx_u8mf8(vuint8mf8_t acc, uint8_t op1, vuint8mf8_t op2, s
// CHECK-RV64-LABEL: @test_vmacc_vv_u8mf4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmacc.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmacc.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vmacc_vv_u8mf4(vuint8mf4_t acc, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
@ -429,7 +429,7 @@ vuint8mf4_t test_vmacc_vv_u8mf4(vuint8mf4_t acc, vuint8mf4_t op1, vuint8mf4_t op
// CHECK-RV64-LABEL: @test_vmacc_vx_u8mf4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmacc.nxv2i8.i8.i64(<vscale x 2 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmacc.nxv2i8.i8.i64(<vscale x 2 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vmacc_vx_u8mf4(vuint8mf4_t acc, uint8_t op1, vuint8mf4_t op2, size_t vl) {
@ -438,7 +438,7 @@ vuint8mf4_t test_vmacc_vx_u8mf4(vuint8mf4_t acc, uint8_t op1, vuint8mf4_t op2, s
// CHECK-RV64-LABEL: @test_vmacc_vv_u8mf2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmacc.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmacc.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vmacc_vv_u8mf2(vuint8mf2_t acc, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
@ -447,7 +447,7 @@ vuint8mf2_t test_vmacc_vv_u8mf2(vuint8mf2_t acc, vuint8mf2_t op1, vuint8mf2_t op
// CHECK-RV64-LABEL: @test_vmacc_vx_u8mf2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmacc.nxv4i8.i8.i64(<vscale x 4 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmacc.nxv4i8.i8.i64(<vscale x 4 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vmacc_vx_u8mf2(vuint8mf2_t acc, uint8_t op1, vuint8mf2_t op2, size_t vl) {
@ -456,7 +456,7 @@ vuint8mf2_t test_vmacc_vx_u8mf2(vuint8mf2_t acc, uint8_t op1, vuint8mf2_t op2, s
// CHECK-RV64-LABEL: @test_vmacc_vv_u8m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmacc.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmacc.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vmacc_vv_u8m1(vuint8m1_t acc, vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
@ -465,7 +465,7 @@ vuint8m1_t test_vmacc_vv_u8m1(vuint8m1_t acc, vuint8m1_t op1, vuint8m1_t op2, si
// CHECK-RV64-LABEL: @test_vmacc_vx_u8m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmacc.nxv8i8.i8.i64(<vscale x 8 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmacc.nxv8i8.i8.i64(<vscale x 8 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vmacc_vx_u8m1(vuint8m1_t acc, uint8_t op1, vuint8m1_t op2, size_t vl) {
@ -474,7 +474,7 @@ vuint8m1_t test_vmacc_vx_u8m1(vuint8m1_t acc, uint8_t op1, vuint8m1_t op2, size_
// CHECK-RV64-LABEL: @test_vmacc_vv_u8m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmacc.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmacc.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vmacc_vv_u8m2(vuint8m2_t acc, vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
@ -483,7 +483,7 @@ vuint8m2_t test_vmacc_vv_u8m2(vuint8m2_t acc, vuint8m2_t op1, vuint8m2_t op2, si
// CHECK-RV64-LABEL: @test_vmacc_vx_u8m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmacc.nxv16i8.i8.i64(<vscale x 16 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmacc.nxv16i8.i8.i64(<vscale x 16 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vmacc_vx_u8m2(vuint8m2_t acc, uint8_t op1, vuint8m2_t op2, size_t vl) {
@ -492,7 +492,7 @@ vuint8m2_t test_vmacc_vx_u8m2(vuint8m2_t acc, uint8_t op1, vuint8m2_t op2, size_
// CHECK-RV64-LABEL: @test_vmacc_vv_u8m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmacc.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmacc.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vmacc_vv_u8m4(vuint8m4_t acc, vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
@ -501,7 +501,7 @@ vuint8m4_t test_vmacc_vv_u8m4(vuint8m4_t acc, vuint8m4_t op1, vuint8m4_t op2, si
// CHECK-RV64-LABEL: @test_vmacc_vx_u8m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmacc.nxv32i8.i8.i64(<vscale x 32 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmacc.nxv32i8.i8.i64(<vscale x 32 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vmacc_vx_u8m4(vuint8m4_t acc, uint8_t op1, vuint8m4_t op2, size_t vl) {
@ -510,7 +510,7 @@ vuint8m4_t test_vmacc_vx_u8m4(vuint8m4_t acc, uint8_t op1, vuint8m4_t op2, size_
// CHECK-RV64-LABEL: @test_vmacc_vv_u8m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmacc.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[ACC:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmacc.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[ACC:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vmacc_vv_u8m8(vuint8m8_t acc, vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
@ -519,7 +519,7 @@ vuint8m8_t test_vmacc_vv_u8m8(vuint8m8_t acc, vuint8m8_t op1, vuint8m8_t op2, si
// CHECK-RV64-LABEL: @test_vmacc_vx_u8m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmacc.nxv64i8.i8.i64(<vscale x 64 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmacc.nxv64i8.i8.i64(<vscale x 64 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vmacc_vx_u8m8(vuint8m8_t acc, uint8_t op1, vuint8m8_t op2, size_t vl) {
@ -528,7 +528,7 @@ vuint8m8_t test_vmacc_vx_u8m8(vuint8m8_t acc, uint8_t op1, vuint8m8_t op2, size_
// CHECK-RV64-LABEL: @test_vmacc_vv_u16mf4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmacc.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmacc.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vmacc_vv_u16mf4(vuint16mf4_t acc, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
@ -537,7 +537,7 @@ vuint16mf4_t test_vmacc_vv_u16mf4(vuint16mf4_t acc, vuint16mf4_t op1, vuint16mf4
// CHECK-RV64-LABEL: @test_vmacc_vx_u16mf4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmacc.nxv1i16.i16.i64(<vscale x 1 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmacc.nxv1i16.i16.i64(<vscale x 1 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vmacc_vx_u16mf4(vuint16mf4_t acc, uint16_t op1, vuint16mf4_t op2, size_t vl) {
@ -546,7 +546,7 @@ vuint16mf4_t test_vmacc_vx_u16mf4(vuint16mf4_t acc, uint16_t op1, vuint16mf4_t o
// CHECK-RV64-LABEL: @test_vmacc_vv_u16mf2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmacc.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmacc.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vmacc_vv_u16mf2(vuint16mf2_t acc, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
@ -555,7 +555,7 @@ vuint16mf2_t test_vmacc_vv_u16mf2(vuint16mf2_t acc, vuint16mf2_t op1, vuint16mf2
// CHECK-RV64-LABEL: @test_vmacc_vx_u16mf2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmacc.nxv2i16.i16.i64(<vscale x 2 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmacc.nxv2i16.i16.i64(<vscale x 2 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vmacc_vx_u16mf2(vuint16mf2_t acc, uint16_t op1, vuint16mf2_t op2, size_t vl) {
@ -564,7 +564,7 @@ vuint16mf2_t test_vmacc_vx_u16mf2(vuint16mf2_t acc, uint16_t op1, vuint16mf2_t o
// CHECK-RV64-LABEL: @test_vmacc_vv_u16m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmacc.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmacc.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vmacc_vv_u16m1(vuint16m1_t acc, vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
@ -573,7 +573,7 @@ vuint16m1_t test_vmacc_vv_u16m1(vuint16m1_t acc, vuint16m1_t op1, vuint16m1_t op
// CHECK-RV64-LABEL: @test_vmacc_vx_u16m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmacc.nxv4i16.i16.i64(<vscale x 4 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmacc.nxv4i16.i16.i64(<vscale x 4 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vmacc_vx_u16m1(vuint16m1_t acc, uint16_t op1, vuint16m1_t op2, size_t vl) {
@ -582,7 +582,7 @@ vuint16m1_t test_vmacc_vx_u16m1(vuint16m1_t acc, uint16_t op1, vuint16m1_t op2,
// CHECK-RV64-LABEL: @test_vmacc_vv_u16m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmacc.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmacc.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vmacc_vv_u16m2(vuint16m2_t acc, vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
@ -591,7 +591,7 @@ vuint16m2_t test_vmacc_vv_u16m2(vuint16m2_t acc, vuint16m2_t op1, vuint16m2_t op
// CHECK-RV64-LABEL: @test_vmacc_vx_u16m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmacc.nxv8i16.i16.i64(<vscale x 8 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmacc.nxv8i16.i16.i64(<vscale x 8 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vmacc_vx_u16m2(vuint16m2_t acc, uint16_t op1, vuint16m2_t op2, size_t vl) {
@ -600,7 +600,7 @@ vuint16m2_t test_vmacc_vx_u16m2(vuint16m2_t acc, uint16_t op1, vuint16m2_t op2,
// CHECK-RV64-LABEL: @test_vmacc_vv_u16m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmacc.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmacc.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vmacc_vv_u16m4(vuint16m4_t acc, vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
@ -609,7 +609,7 @@ vuint16m4_t test_vmacc_vv_u16m4(vuint16m4_t acc, vuint16m4_t op1, vuint16m4_t op
// CHECK-RV64-LABEL: @test_vmacc_vx_u16m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmacc.nxv16i16.i16.i64(<vscale x 16 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmacc.nxv16i16.i16.i64(<vscale x 16 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vmacc_vx_u16m4(vuint16m4_t acc, uint16_t op1, vuint16m4_t op2, size_t vl) {
@ -618,7 +618,7 @@ vuint16m4_t test_vmacc_vx_u16m4(vuint16m4_t acc, uint16_t op1, vuint16m4_t op2,
// CHECK-RV64-LABEL: @test_vmacc_vv_u16m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmacc.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmacc.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vmacc_vv_u16m8(vuint16m8_t acc, vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
@ -627,7 +627,7 @@ vuint16m8_t test_vmacc_vv_u16m8(vuint16m8_t acc, vuint16m8_t op1, vuint16m8_t op
// CHECK-RV64-LABEL: @test_vmacc_vx_u16m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmacc.nxv32i16.i16.i64(<vscale x 32 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmacc.nxv32i16.i16.i64(<vscale x 32 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vmacc_vx_u16m8(vuint16m8_t acc, uint16_t op1, vuint16m8_t op2, size_t vl) {
@ -636,7 +636,7 @@ vuint16m8_t test_vmacc_vx_u16m8(vuint16m8_t acc, uint16_t op1, vuint16m8_t op2,
// CHECK-RV64-LABEL: @test_vmacc_vv_u32mf2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmacc.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmacc.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vmacc_vv_u32mf2(vuint32mf2_t acc, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
@ -645,7 +645,7 @@ vuint32mf2_t test_vmacc_vv_u32mf2(vuint32mf2_t acc, vuint32mf2_t op1, vuint32mf2
// CHECK-RV64-LABEL: @test_vmacc_vx_u32mf2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmacc.nxv1i32.i32.i64(<vscale x 1 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmacc.nxv1i32.i32.i64(<vscale x 1 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vmacc_vx_u32mf2(vuint32mf2_t acc, uint32_t op1, vuint32mf2_t op2, size_t vl) {
@ -654,7 +654,7 @@ vuint32mf2_t test_vmacc_vx_u32mf2(vuint32mf2_t acc, uint32_t op1, vuint32mf2_t o
// CHECK-RV64-LABEL: @test_vmacc_vv_u32m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmacc.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmacc.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vmacc_vv_u32m1(vuint32m1_t acc, vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
@ -663,7 +663,7 @@ vuint32m1_t test_vmacc_vv_u32m1(vuint32m1_t acc, vuint32m1_t op1, vuint32m1_t op
// CHECK-RV64-LABEL: @test_vmacc_vx_u32m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmacc.nxv2i32.i32.i64(<vscale x 2 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmacc.nxv2i32.i32.i64(<vscale x 2 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vmacc_vx_u32m1(vuint32m1_t acc, uint32_t op1, vuint32m1_t op2, size_t vl) {
@ -672,7 +672,7 @@ vuint32m1_t test_vmacc_vx_u32m1(vuint32m1_t acc, uint32_t op1, vuint32m1_t op2,
// CHECK-RV64-LABEL: @test_vmacc_vv_u32m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmacc.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmacc.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vmacc_vv_u32m2(vuint32m2_t acc, vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
@ -681,7 +681,7 @@ vuint32m2_t test_vmacc_vv_u32m2(vuint32m2_t acc, vuint32m2_t op1, vuint32m2_t op
// CHECK-RV64-LABEL: @test_vmacc_vx_u32m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmacc.nxv4i32.i32.i64(<vscale x 4 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmacc.nxv4i32.i32.i64(<vscale x 4 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vmacc_vx_u32m2(vuint32m2_t acc, uint32_t op1, vuint32m2_t op2, size_t vl) {
@ -690,7 +690,7 @@ vuint32m2_t test_vmacc_vx_u32m2(vuint32m2_t acc, uint32_t op1, vuint32m2_t op2,
// CHECK-RV64-LABEL: @test_vmacc_vv_u32m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmacc.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmacc.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vmacc_vv_u32m4(vuint32m4_t acc, vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
@ -699,7 +699,7 @@ vuint32m4_t test_vmacc_vv_u32m4(vuint32m4_t acc, vuint32m4_t op1, vuint32m4_t op
// CHECK-RV64-LABEL: @test_vmacc_vx_u32m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmacc.nxv8i32.i32.i64(<vscale x 8 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmacc.nxv8i32.i32.i64(<vscale x 8 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vmacc_vx_u32m4(vuint32m4_t acc, uint32_t op1, vuint32m4_t op2, size_t vl) {
@ -708,7 +708,7 @@ vuint32m4_t test_vmacc_vx_u32m4(vuint32m4_t acc, uint32_t op1, vuint32m4_t op2,
// CHECK-RV64-LABEL: @test_vmacc_vv_u32m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmacc.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmacc.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vmacc_vv_u32m8(vuint32m8_t acc, vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
@ -717,7 +717,7 @@ vuint32m8_t test_vmacc_vv_u32m8(vuint32m8_t acc, vuint32m8_t op1, vuint32m8_t op
// CHECK-RV64-LABEL: @test_vmacc_vx_u32m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmacc.nxv16i32.i32.i64(<vscale x 16 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmacc.nxv16i32.i32.i64(<vscale x 16 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vmacc_vx_u32m8(vuint32m8_t acc, uint32_t op1, vuint32m8_t op2, size_t vl) {
@ -726,7 +726,7 @@ vuint32m8_t test_vmacc_vx_u32m8(vuint32m8_t acc, uint32_t op1, vuint32m8_t op2,
// CHECK-RV64-LABEL: @test_vmacc_vv_u64m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmacc.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmacc.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vmacc_vv_u64m1(vuint64m1_t acc, vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
@ -735,7 +735,7 @@ vuint64m1_t test_vmacc_vv_u64m1(vuint64m1_t acc, vuint64m1_t op1, vuint64m1_t op
// CHECK-RV64-LABEL: @test_vmacc_vx_u64m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmacc.nxv1i64.i64.i64(<vscale x 1 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmacc.nxv1i64.i64.i64(<vscale x 1 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vmacc_vx_u64m1(vuint64m1_t acc, uint64_t op1, vuint64m1_t op2, size_t vl) {
@ -744,7 +744,7 @@ vuint64m1_t test_vmacc_vx_u64m1(vuint64m1_t acc, uint64_t op1, vuint64m1_t op2,
// CHECK-RV64-LABEL: @test_vmacc_vv_u64m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmacc.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmacc.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vmacc_vv_u64m2(vuint64m2_t acc, vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
@ -753,7 +753,7 @@ vuint64m2_t test_vmacc_vv_u64m2(vuint64m2_t acc, vuint64m2_t op1, vuint64m2_t op
// CHECK-RV64-LABEL: @test_vmacc_vx_u64m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmacc.nxv2i64.i64.i64(<vscale x 2 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmacc.nxv2i64.i64.i64(<vscale x 2 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vmacc_vx_u64m2(vuint64m2_t acc, uint64_t op1, vuint64m2_t op2, size_t vl) {
@ -762,7 +762,7 @@ vuint64m2_t test_vmacc_vx_u64m2(vuint64m2_t acc, uint64_t op1, vuint64m2_t op2,
// CHECK-RV64-LABEL: @test_vmacc_vv_u64m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmacc.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmacc.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vmacc_vv_u64m4(vuint64m4_t acc, vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
@ -771,7 +771,7 @@ vuint64m4_t test_vmacc_vv_u64m4(vuint64m4_t acc, vuint64m4_t op1, vuint64m4_t op
// CHECK-RV64-LABEL: @test_vmacc_vx_u64m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmacc.nxv4i64.i64.i64(<vscale x 4 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmacc.nxv4i64.i64.i64(<vscale x 4 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vmacc_vx_u64m4(vuint64m4_t acc, uint64_t op1, vuint64m4_t op2, size_t vl) {
@ -780,7 +780,7 @@ vuint64m4_t test_vmacc_vx_u64m4(vuint64m4_t acc, uint64_t op1, vuint64m4_t op2,
// CHECK-RV64-LABEL: @test_vmacc_vv_u64m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmacc.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmacc.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vmacc_vv_u64m8(vuint64m8_t acc, vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
@ -789,7 +789,7 @@ vuint64m8_t test_vmacc_vv_u64m8(vuint64m8_t acc, vuint64m8_t op1, vuint64m8_t op
// CHECK-RV64-LABEL: @test_vmacc_vx_u64m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmacc.nxv8i64.i64.i64(<vscale x 8 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmacc.nxv8i64.i64.i64(<vscale x 8 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vmacc_vx_u64m8(vuint64m8_t acc, uint64_t op1, vuint64m8_t op2, size_t vl) {

View File

@ -6,7 +6,7 @@
// CHECK-RV64-LABEL: @test_vmadd_vv_i8mf8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmadd.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmadd.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vmadd_vv_i8mf8(vint8mf8_t acc, vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
@ -15,7 +15,7 @@ vint8mf8_t test_vmadd_vv_i8mf8(vint8mf8_t acc, vint8mf8_t op1, vint8mf8_t op2, s
// CHECK-RV64-LABEL: @test_vmadd_vx_i8mf8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmadd.nxv1i8.i8.i64(<vscale x 1 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmadd.nxv1i8.i8.i64(<vscale x 1 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vmadd_vx_i8mf8(vint8mf8_t acc, int8_t op1, vint8mf8_t op2, size_t vl) {
@ -24,7 +24,7 @@ vint8mf8_t test_vmadd_vx_i8mf8(vint8mf8_t acc, int8_t op1, vint8mf8_t op2, size_
// CHECK-RV64-LABEL: @test_vmadd_vv_i8mf4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmadd.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmadd.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vmadd_vv_i8mf4(vint8mf4_t acc, vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
@ -33,7 +33,7 @@ vint8mf4_t test_vmadd_vv_i8mf4(vint8mf4_t acc, vint8mf4_t op1, vint8mf4_t op2, s
// CHECK-RV64-LABEL: @test_vmadd_vx_i8mf4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmadd.nxv2i8.i8.i64(<vscale x 2 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmadd.nxv2i8.i8.i64(<vscale x 2 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vmadd_vx_i8mf4(vint8mf4_t acc, int8_t op1, vint8mf4_t op2, size_t vl) {
@ -42,7 +42,7 @@ vint8mf4_t test_vmadd_vx_i8mf4(vint8mf4_t acc, int8_t op1, vint8mf4_t op2, size_
// CHECK-RV64-LABEL: @test_vmadd_vv_i8mf2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmadd.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmadd.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vmadd_vv_i8mf2(vint8mf2_t acc, vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
@ -51,7 +51,7 @@ vint8mf2_t test_vmadd_vv_i8mf2(vint8mf2_t acc, vint8mf2_t op1, vint8mf2_t op2, s
// CHECK-RV64-LABEL: @test_vmadd_vx_i8mf2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmadd.nxv4i8.i8.i64(<vscale x 4 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmadd.nxv4i8.i8.i64(<vscale x 4 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vmadd_vx_i8mf2(vint8mf2_t acc, int8_t op1, vint8mf2_t op2, size_t vl) {
@ -60,7 +60,7 @@ vint8mf2_t test_vmadd_vx_i8mf2(vint8mf2_t acc, int8_t op1, vint8mf2_t op2, size_
// CHECK-RV64-LABEL: @test_vmadd_vv_i8m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmadd.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmadd.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vmadd_vv_i8m1(vint8m1_t acc, vint8m1_t op1, vint8m1_t op2, size_t vl) {
@ -69,7 +69,7 @@ vint8m1_t test_vmadd_vv_i8m1(vint8m1_t acc, vint8m1_t op1, vint8m1_t op2, size_t
// CHECK-RV64-LABEL: @test_vmadd_vx_i8m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmadd.nxv8i8.i8.i64(<vscale x 8 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmadd.nxv8i8.i8.i64(<vscale x 8 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vmadd_vx_i8m1(vint8m1_t acc, int8_t op1, vint8m1_t op2, size_t vl) {
@ -78,7 +78,7 @@ vint8m1_t test_vmadd_vx_i8m1(vint8m1_t acc, int8_t op1, vint8m1_t op2, size_t vl
// CHECK-RV64-LABEL: @test_vmadd_vv_i8m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmadd.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmadd.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vmadd_vv_i8m2(vint8m2_t acc, vint8m2_t op1, vint8m2_t op2, size_t vl) {
@ -87,7 +87,7 @@ vint8m2_t test_vmadd_vv_i8m2(vint8m2_t acc, vint8m2_t op1, vint8m2_t op2, size_t
// CHECK-RV64-LABEL: @test_vmadd_vx_i8m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmadd.nxv16i8.i8.i64(<vscale x 16 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmadd.nxv16i8.i8.i64(<vscale x 16 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vmadd_vx_i8m2(vint8m2_t acc, int8_t op1, vint8m2_t op2, size_t vl) {
@ -96,7 +96,7 @@ vint8m2_t test_vmadd_vx_i8m2(vint8m2_t acc, int8_t op1, vint8m2_t op2, size_t vl
// CHECK-RV64-LABEL: @test_vmadd_vv_i8m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmadd.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmadd.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vmadd_vv_i8m4(vint8m4_t acc, vint8m4_t op1, vint8m4_t op2, size_t vl) {
@ -105,7 +105,7 @@ vint8m4_t test_vmadd_vv_i8m4(vint8m4_t acc, vint8m4_t op1, vint8m4_t op2, size_t
// CHECK-RV64-LABEL: @test_vmadd_vx_i8m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmadd.nxv32i8.i8.i64(<vscale x 32 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmadd.nxv32i8.i8.i64(<vscale x 32 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vmadd_vx_i8m4(vint8m4_t acc, int8_t op1, vint8m4_t op2, size_t vl) {
@ -114,7 +114,7 @@ vint8m4_t test_vmadd_vx_i8m4(vint8m4_t acc, int8_t op1, vint8m4_t op2, size_t vl
// CHECK-RV64-LABEL: @test_vmadd_vv_i8m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmadd.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[ACC:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmadd.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[ACC:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vmadd_vv_i8m8(vint8m8_t acc, vint8m8_t op1, vint8m8_t op2, size_t vl) {
@ -123,7 +123,7 @@ vint8m8_t test_vmadd_vv_i8m8(vint8m8_t acc, vint8m8_t op1, vint8m8_t op2, size_t
// CHECK-RV64-LABEL: @test_vmadd_vx_i8m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmadd.nxv64i8.i8.i64(<vscale x 64 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmadd.nxv64i8.i8.i64(<vscale x 64 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vmadd_vx_i8m8(vint8m8_t acc, int8_t op1, vint8m8_t op2, size_t vl) {
@ -132,7 +132,7 @@ vint8m8_t test_vmadd_vx_i8m8(vint8m8_t acc, int8_t op1, vint8m8_t op2, size_t vl
// CHECK-RV64-LABEL: @test_vmadd_vv_i16mf4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmadd.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmadd.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vmadd_vv_i16mf4(vint16mf4_t acc, vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
@ -141,7 +141,7 @@ vint16mf4_t test_vmadd_vv_i16mf4(vint16mf4_t acc, vint16mf4_t op1, vint16mf4_t o
// CHECK-RV64-LABEL: @test_vmadd_vx_i16mf4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmadd.nxv1i16.i16.i64(<vscale x 1 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmadd.nxv1i16.i16.i64(<vscale x 1 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vmadd_vx_i16mf4(vint16mf4_t acc, int16_t op1, vint16mf4_t op2, size_t vl) {
@ -150,7 +150,7 @@ vint16mf4_t test_vmadd_vx_i16mf4(vint16mf4_t acc, int16_t op1, vint16mf4_t op2,
// CHECK-RV64-LABEL: @test_vmadd_vv_i16mf2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmadd.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmadd.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vmadd_vv_i16mf2(vint16mf2_t acc, vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
@ -159,7 +159,7 @@ vint16mf2_t test_vmadd_vv_i16mf2(vint16mf2_t acc, vint16mf2_t op1, vint16mf2_t o
// CHECK-RV64-LABEL: @test_vmadd_vx_i16mf2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmadd.nxv2i16.i16.i64(<vscale x 2 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmadd.nxv2i16.i16.i64(<vscale x 2 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vmadd_vx_i16mf2(vint16mf2_t acc, int16_t op1, vint16mf2_t op2, size_t vl) {
@ -168,7 +168,7 @@ vint16mf2_t test_vmadd_vx_i16mf2(vint16mf2_t acc, int16_t op1, vint16mf2_t op2,
// CHECK-RV64-LABEL: @test_vmadd_vv_i16m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmadd.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmadd.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vmadd_vv_i16m1(vint16m1_t acc, vint16m1_t op1, vint16m1_t op2, size_t vl) {
@ -177,7 +177,7 @@ vint16m1_t test_vmadd_vv_i16m1(vint16m1_t acc, vint16m1_t op1, vint16m1_t op2, s
// CHECK-RV64-LABEL: @test_vmadd_vx_i16m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmadd.nxv4i16.i16.i64(<vscale x 4 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmadd.nxv4i16.i16.i64(<vscale x 4 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vmadd_vx_i16m1(vint16m1_t acc, int16_t op1, vint16m1_t op2, size_t vl) {
@ -186,7 +186,7 @@ vint16m1_t test_vmadd_vx_i16m1(vint16m1_t acc, int16_t op1, vint16m1_t op2, size
// CHECK-RV64-LABEL: @test_vmadd_vv_i16m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmadd.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmadd.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vmadd_vv_i16m2(vint16m2_t acc, vint16m2_t op1, vint16m2_t op2, size_t vl) {
@ -195,7 +195,7 @@ vint16m2_t test_vmadd_vv_i16m2(vint16m2_t acc, vint16m2_t op1, vint16m2_t op2, s
// CHECK-RV64-LABEL: @test_vmadd_vx_i16m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmadd.nxv8i16.i16.i64(<vscale x 8 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmadd.nxv8i16.i16.i64(<vscale x 8 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vmadd_vx_i16m2(vint16m2_t acc, int16_t op1, vint16m2_t op2, size_t vl) {
@ -204,7 +204,7 @@ vint16m2_t test_vmadd_vx_i16m2(vint16m2_t acc, int16_t op1, vint16m2_t op2, size
// CHECK-RV64-LABEL: @test_vmadd_vv_i16m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmadd.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmadd.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vmadd_vv_i16m4(vint16m4_t acc, vint16m4_t op1, vint16m4_t op2, size_t vl) {
@ -213,7 +213,7 @@ vint16m4_t test_vmadd_vv_i16m4(vint16m4_t acc, vint16m4_t op1, vint16m4_t op2, s
// CHECK-RV64-LABEL: @test_vmadd_vx_i16m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmadd.nxv16i16.i16.i64(<vscale x 16 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmadd.nxv16i16.i16.i64(<vscale x 16 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vmadd_vx_i16m4(vint16m4_t acc, int16_t op1, vint16m4_t op2, size_t vl) {
@ -222,7 +222,7 @@ vint16m4_t test_vmadd_vx_i16m4(vint16m4_t acc, int16_t op1, vint16m4_t op2, size
// CHECK-RV64-LABEL: @test_vmadd_vv_i16m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmadd.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmadd.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vmadd_vv_i16m8(vint16m8_t acc, vint16m8_t op1, vint16m8_t op2, size_t vl) {
@ -231,7 +231,7 @@ vint16m8_t test_vmadd_vv_i16m8(vint16m8_t acc, vint16m8_t op1, vint16m8_t op2, s
// CHECK-RV64-LABEL: @test_vmadd_vx_i16m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmadd.nxv32i16.i16.i64(<vscale x 32 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmadd.nxv32i16.i16.i64(<vscale x 32 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vmadd_vx_i16m8(vint16m8_t acc, int16_t op1, vint16m8_t op2, size_t vl) {
@ -240,7 +240,7 @@ vint16m8_t test_vmadd_vx_i16m8(vint16m8_t acc, int16_t op1, vint16m8_t op2, size
// CHECK-RV64-LABEL: @test_vmadd_vv_i32mf2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmadd.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmadd.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vmadd_vv_i32mf2(vint32mf2_t acc, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
@ -249,7 +249,7 @@ vint32mf2_t test_vmadd_vv_i32mf2(vint32mf2_t acc, vint32mf2_t op1, vint32mf2_t o
// CHECK-RV64-LABEL: @test_vmadd_vx_i32mf2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmadd.nxv1i32.i32.i64(<vscale x 1 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmadd.nxv1i32.i32.i64(<vscale x 1 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vmadd_vx_i32mf2(vint32mf2_t acc, int32_t op1, vint32mf2_t op2, size_t vl) {
@ -258,7 +258,7 @@ vint32mf2_t test_vmadd_vx_i32mf2(vint32mf2_t acc, int32_t op1, vint32mf2_t op2,
// CHECK-RV64-LABEL: @test_vmadd_vv_i32m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmadd.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmadd.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vmadd_vv_i32m1(vint32m1_t acc, vint32m1_t op1, vint32m1_t op2, size_t vl) {
@ -267,7 +267,7 @@ vint32m1_t test_vmadd_vv_i32m1(vint32m1_t acc, vint32m1_t op1, vint32m1_t op2, s
// CHECK-RV64-LABEL: @test_vmadd_vx_i32m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmadd.nxv2i32.i32.i64(<vscale x 2 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmadd.nxv2i32.i32.i64(<vscale x 2 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vmadd_vx_i32m1(vint32m1_t acc, int32_t op1, vint32m1_t op2, size_t vl) {
@ -276,7 +276,7 @@ vint32m1_t test_vmadd_vx_i32m1(vint32m1_t acc, int32_t op1, vint32m1_t op2, size
// CHECK-RV64-LABEL: @test_vmadd_vv_i32m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmadd.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmadd.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vmadd_vv_i32m2(vint32m2_t acc, vint32m2_t op1, vint32m2_t op2, size_t vl) {
@ -285,7 +285,7 @@ vint32m2_t test_vmadd_vv_i32m2(vint32m2_t acc, vint32m2_t op1, vint32m2_t op2, s
// CHECK-RV64-LABEL: @test_vmadd_vx_i32m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmadd.nxv4i32.i32.i64(<vscale x 4 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmadd.nxv4i32.i32.i64(<vscale x 4 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vmadd_vx_i32m2(vint32m2_t acc, int32_t op1, vint32m2_t op2, size_t vl) {
@ -294,7 +294,7 @@ vint32m2_t test_vmadd_vx_i32m2(vint32m2_t acc, int32_t op1, vint32m2_t op2, size
// CHECK-RV64-LABEL: @test_vmadd_vv_i32m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmadd.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmadd.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vmadd_vv_i32m4(vint32m4_t acc, vint32m4_t op1, vint32m4_t op2, size_t vl) {
@ -303,7 +303,7 @@ vint32m4_t test_vmadd_vv_i32m4(vint32m4_t acc, vint32m4_t op1, vint32m4_t op2, s
// CHECK-RV64-LABEL: @test_vmadd_vx_i32m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmadd.nxv8i32.i32.i64(<vscale x 8 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmadd.nxv8i32.i32.i64(<vscale x 8 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vmadd_vx_i32m4(vint32m4_t acc, int32_t op1, vint32m4_t op2, size_t vl) {
@ -312,7 +312,7 @@ vint32m4_t test_vmadd_vx_i32m4(vint32m4_t acc, int32_t op1, vint32m4_t op2, size
// CHECK-RV64-LABEL: @test_vmadd_vv_i32m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmadd.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmadd.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vmadd_vv_i32m8(vint32m8_t acc, vint32m8_t op1, vint32m8_t op2, size_t vl) {
@ -321,7 +321,7 @@ vint32m8_t test_vmadd_vv_i32m8(vint32m8_t acc, vint32m8_t op1, vint32m8_t op2, s
// CHECK-RV64-LABEL: @test_vmadd_vx_i32m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmadd.nxv16i32.i32.i64(<vscale x 16 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmadd.nxv16i32.i32.i64(<vscale x 16 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vmadd_vx_i32m8(vint32m8_t acc, int32_t op1, vint32m8_t op2, size_t vl) {
@ -330,7 +330,7 @@ vint32m8_t test_vmadd_vx_i32m8(vint32m8_t acc, int32_t op1, vint32m8_t op2, size
// CHECK-RV64-LABEL: @test_vmadd_vv_i64m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmadd.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmadd.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vmadd_vv_i64m1(vint64m1_t acc, vint64m1_t op1, vint64m1_t op2, size_t vl) {
@ -339,7 +339,7 @@ vint64m1_t test_vmadd_vv_i64m1(vint64m1_t acc, vint64m1_t op1, vint64m1_t op2, s
// CHECK-RV64-LABEL: @test_vmadd_vx_i64m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmadd.nxv1i64.i64.i64(<vscale x 1 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmadd.nxv1i64.i64.i64(<vscale x 1 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vmadd_vx_i64m1(vint64m1_t acc, int64_t op1, vint64m1_t op2, size_t vl) {
@ -348,7 +348,7 @@ vint64m1_t test_vmadd_vx_i64m1(vint64m1_t acc, int64_t op1, vint64m1_t op2, size
// CHECK-RV64-LABEL: @test_vmadd_vv_i64m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmadd.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmadd.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vmadd_vv_i64m2(vint64m2_t acc, vint64m2_t op1, vint64m2_t op2, size_t vl) {
@ -357,7 +357,7 @@ vint64m2_t test_vmadd_vv_i64m2(vint64m2_t acc, vint64m2_t op1, vint64m2_t op2, s
// CHECK-RV64-LABEL: @test_vmadd_vx_i64m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmadd.nxv2i64.i64.i64(<vscale x 2 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmadd.nxv2i64.i64.i64(<vscale x 2 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vmadd_vx_i64m2(vint64m2_t acc, int64_t op1, vint64m2_t op2, size_t vl) {
@ -366,7 +366,7 @@ vint64m2_t test_vmadd_vx_i64m2(vint64m2_t acc, int64_t op1, vint64m2_t op2, size
// CHECK-RV64-LABEL: @test_vmadd_vv_i64m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmadd.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmadd.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vmadd_vv_i64m4(vint64m4_t acc, vint64m4_t op1, vint64m4_t op2, size_t vl) {
@ -375,7 +375,7 @@ vint64m4_t test_vmadd_vv_i64m4(vint64m4_t acc, vint64m4_t op1, vint64m4_t op2, s
// CHECK-RV64-LABEL: @test_vmadd_vx_i64m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmadd.nxv4i64.i64.i64(<vscale x 4 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmadd.nxv4i64.i64.i64(<vscale x 4 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vmadd_vx_i64m4(vint64m4_t acc, int64_t op1, vint64m4_t op2, size_t vl) {
@ -384,7 +384,7 @@ vint64m4_t test_vmadd_vx_i64m4(vint64m4_t acc, int64_t op1, vint64m4_t op2, size
// CHECK-RV64-LABEL: @test_vmadd_vv_i64m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmadd.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmadd.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vmadd_vv_i64m8(vint64m8_t acc, vint64m8_t op1, vint64m8_t op2, size_t vl) {
@ -393,7 +393,7 @@ vint64m8_t test_vmadd_vv_i64m8(vint64m8_t acc, vint64m8_t op1, vint64m8_t op2, s
// CHECK-RV64-LABEL: @test_vmadd_vx_i64m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmadd.nxv8i64.i64.i64(<vscale x 8 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmadd.nxv8i64.i64.i64(<vscale x 8 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vmadd_vx_i64m8(vint64m8_t acc, int64_t op1, vint64m8_t op2, size_t vl) {
@ -402,7 +402,7 @@ vint64m8_t test_vmadd_vx_i64m8(vint64m8_t acc, int64_t op1, vint64m8_t op2, size
// CHECK-RV64-LABEL: @test_vmadd_vv_u8mf8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmadd.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmadd.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vmadd_vv_u8mf8(vuint8mf8_t acc, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
@ -411,7 +411,7 @@ vuint8mf8_t test_vmadd_vv_u8mf8(vuint8mf8_t acc, vuint8mf8_t op1, vuint8mf8_t op
// CHECK-RV64-LABEL: @test_vmadd_vx_u8mf8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmadd.nxv1i8.i8.i64(<vscale x 1 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmadd.nxv1i8.i8.i64(<vscale x 1 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vmadd_vx_u8mf8(vuint8mf8_t acc, uint8_t op1, vuint8mf8_t op2, size_t vl) {
@ -420,7 +420,7 @@ vuint8mf8_t test_vmadd_vx_u8mf8(vuint8mf8_t acc, uint8_t op1, vuint8mf8_t op2, s
// CHECK-RV64-LABEL: @test_vmadd_vv_u8mf4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmadd.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmadd.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vmadd_vv_u8mf4(vuint8mf4_t acc, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
@ -429,7 +429,7 @@ vuint8mf4_t test_vmadd_vv_u8mf4(vuint8mf4_t acc, vuint8mf4_t op1, vuint8mf4_t op
// CHECK-RV64-LABEL: @test_vmadd_vx_u8mf4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmadd.nxv2i8.i8.i64(<vscale x 2 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmadd.nxv2i8.i8.i64(<vscale x 2 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vmadd_vx_u8mf4(vuint8mf4_t acc, uint8_t op1, vuint8mf4_t op2, size_t vl) {
@ -438,7 +438,7 @@ vuint8mf4_t test_vmadd_vx_u8mf4(vuint8mf4_t acc, uint8_t op1, vuint8mf4_t op2, s
// CHECK-RV64-LABEL: @test_vmadd_vv_u8mf2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmadd.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmadd.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vmadd_vv_u8mf2(vuint8mf2_t acc, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
@ -447,7 +447,7 @@ vuint8mf2_t test_vmadd_vv_u8mf2(vuint8mf2_t acc, vuint8mf2_t op1, vuint8mf2_t op
// CHECK-RV64-LABEL: @test_vmadd_vx_u8mf2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmadd.nxv4i8.i8.i64(<vscale x 4 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmadd.nxv4i8.i8.i64(<vscale x 4 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vmadd_vx_u8mf2(vuint8mf2_t acc, uint8_t op1, vuint8mf2_t op2, size_t vl) {
@ -456,7 +456,7 @@ vuint8mf2_t test_vmadd_vx_u8mf2(vuint8mf2_t acc, uint8_t op1, vuint8mf2_t op2, s
// CHECK-RV64-LABEL: @test_vmadd_vv_u8m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmadd.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmadd.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vmadd_vv_u8m1(vuint8m1_t acc, vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
@ -465,7 +465,7 @@ vuint8m1_t test_vmadd_vv_u8m1(vuint8m1_t acc, vuint8m1_t op1, vuint8m1_t op2, si
// CHECK-RV64-LABEL: @test_vmadd_vx_u8m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmadd.nxv8i8.i8.i64(<vscale x 8 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmadd.nxv8i8.i8.i64(<vscale x 8 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vmadd_vx_u8m1(vuint8m1_t acc, uint8_t op1, vuint8m1_t op2, size_t vl) {
@ -474,7 +474,7 @@ vuint8m1_t test_vmadd_vx_u8m1(vuint8m1_t acc, uint8_t op1, vuint8m1_t op2, size_
// CHECK-RV64-LABEL: @test_vmadd_vv_u8m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmadd.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmadd.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vmadd_vv_u8m2(vuint8m2_t acc, vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
@ -483,7 +483,7 @@ vuint8m2_t test_vmadd_vv_u8m2(vuint8m2_t acc, vuint8m2_t op1, vuint8m2_t op2, si
// CHECK-RV64-LABEL: @test_vmadd_vx_u8m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmadd.nxv16i8.i8.i64(<vscale x 16 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmadd.nxv16i8.i8.i64(<vscale x 16 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vmadd_vx_u8m2(vuint8m2_t acc, uint8_t op1, vuint8m2_t op2, size_t vl) {
@ -492,7 +492,7 @@ vuint8m2_t test_vmadd_vx_u8m2(vuint8m2_t acc, uint8_t op1, vuint8m2_t op2, size_
// CHECK-RV64-LABEL: @test_vmadd_vv_u8m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmadd.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmadd.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vmadd_vv_u8m4(vuint8m4_t acc, vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
@ -501,7 +501,7 @@ vuint8m4_t test_vmadd_vv_u8m4(vuint8m4_t acc, vuint8m4_t op1, vuint8m4_t op2, si
// CHECK-RV64-LABEL: @test_vmadd_vx_u8m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmadd.nxv32i8.i8.i64(<vscale x 32 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmadd.nxv32i8.i8.i64(<vscale x 32 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vmadd_vx_u8m4(vuint8m4_t acc, uint8_t op1, vuint8m4_t op2, size_t vl) {
@ -510,7 +510,7 @@ vuint8m4_t test_vmadd_vx_u8m4(vuint8m4_t acc, uint8_t op1, vuint8m4_t op2, size_
// CHECK-RV64-LABEL: @test_vmadd_vv_u8m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmadd.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[ACC:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmadd.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[ACC:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vmadd_vv_u8m8(vuint8m8_t acc, vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
@ -519,7 +519,7 @@ vuint8m8_t test_vmadd_vv_u8m8(vuint8m8_t acc, vuint8m8_t op1, vuint8m8_t op2, si
// CHECK-RV64-LABEL: @test_vmadd_vx_u8m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmadd.nxv64i8.i8.i64(<vscale x 64 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmadd.nxv64i8.i8.i64(<vscale x 64 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vmadd_vx_u8m8(vuint8m8_t acc, uint8_t op1, vuint8m8_t op2, size_t vl) {
@ -528,7 +528,7 @@ vuint8m8_t test_vmadd_vx_u8m8(vuint8m8_t acc, uint8_t op1, vuint8m8_t op2, size_
// CHECK-RV64-LABEL: @test_vmadd_vv_u16mf4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmadd.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmadd.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vmadd_vv_u16mf4(vuint16mf4_t acc, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
@ -537,7 +537,7 @@ vuint16mf4_t test_vmadd_vv_u16mf4(vuint16mf4_t acc, vuint16mf4_t op1, vuint16mf4
// CHECK-RV64-LABEL: @test_vmadd_vx_u16mf4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmadd.nxv1i16.i16.i64(<vscale x 1 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmadd.nxv1i16.i16.i64(<vscale x 1 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vmadd_vx_u16mf4(vuint16mf4_t acc, uint16_t op1, vuint16mf4_t op2, size_t vl) {
@ -546,7 +546,7 @@ vuint16mf4_t test_vmadd_vx_u16mf4(vuint16mf4_t acc, uint16_t op1, vuint16mf4_t o
// CHECK-RV64-LABEL: @test_vmadd_vv_u16mf2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmadd.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmadd.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vmadd_vv_u16mf2(vuint16mf2_t acc, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
@ -555,7 +555,7 @@ vuint16mf2_t test_vmadd_vv_u16mf2(vuint16mf2_t acc, vuint16mf2_t op1, vuint16mf2
// CHECK-RV64-LABEL: @test_vmadd_vx_u16mf2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmadd.nxv2i16.i16.i64(<vscale x 2 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmadd.nxv2i16.i16.i64(<vscale x 2 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vmadd_vx_u16mf2(vuint16mf2_t acc, uint16_t op1, vuint16mf2_t op2, size_t vl) {
@ -564,7 +564,7 @@ vuint16mf2_t test_vmadd_vx_u16mf2(vuint16mf2_t acc, uint16_t op1, vuint16mf2_t o
// CHECK-RV64-LABEL: @test_vmadd_vv_u16m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmadd.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmadd.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vmadd_vv_u16m1(vuint16m1_t acc, vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
@ -573,7 +573,7 @@ vuint16m1_t test_vmadd_vv_u16m1(vuint16m1_t acc, vuint16m1_t op1, vuint16m1_t op
// CHECK-RV64-LABEL: @test_vmadd_vx_u16m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmadd.nxv4i16.i16.i64(<vscale x 4 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmadd.nxv4i16.i16.i64(<vscale x 4 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vmadd_vx_u16m1(vuint16m1_t acc, uint16_t op1, vuint16m1_t op2, size_t vl) {
@ -582,7 +582,7 @@ vuint16m1_t test_vmadd_vx_u16m1(vuint16m1_t acc, uint16_t op1, vuint16m1_t op2,
// CHECK-RV64-LABEL: @test_vmadd_vv_u16m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmadd.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmadd.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vmadd_vv_u16m2(vuint16m2_t acc, vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
@ -591,7 +591,7 @@ vuint16m2_t test_vmadd_vv_u16m2(vuint16m2_t acc, vuint16m2_t op1, vuint16m2_t op
// CHECK-RV64-LABEL: @test_vmadd_vx_u16m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmadd.nxv8i16.i16.i64(<vscale x 8 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmadd.nxv8i16.i16.i64(<vscale x 8 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vmadd_vx_u16m2(vuint16m2_t acc, uint16_t op1, vuint16m2_t op2, size_t vl) {
@ -600,7 +600,7 @@ vuint16m2_t test_vmadd_vx_u16m2(vuint16m2_t acc, uint16_t op1, vuint16m2_t op2,
// CHECK-RV64-LABEL: @test_vmadd_vv_u16m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmadd.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmadd.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vmadd_vv_u16m4(vuint16m4_t acc, vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
@ -609,7 +609,7 @@ vuint16m4_t test_vmadd_vv_u16m4(vuint16m4_t acc, vuint16m4_t op1, vuint16m4_t op
// CHECK-RV64-LABEL: @test_vmadd_vx_u16m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmadd.nxv16i16.i16.i64(<vscale x 16 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmadd.nxv16i16.i16.i64(<vscale x 16 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vmadd_vx_u16m4(vuint16m4_t acc, uint16_t op1, vuint16m4_t op2, size_t vl) {
@ -618,7 +618,7 @@ vuint16m4_t test_vmadd_vx_u16m4(vuint16m4_t acc, uint16_t op1, vuint16m4_t op2,
// CHECK-RV64-LABEL: @test_vmadd_vv_u16m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmadd.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmadd.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vmadd_vv_u16m8(vuint16m8_t acc, vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
@ -627,7 +627,7 @@ vuint16m8_t test_vmadd_vv_u16m8(vuint16m8_t acc, vuint16m8_t op1, vuint16m8_t op
// CHECK-RV64-LABEL: @test_vmadd_vx_u16m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmadd.nxv32i16.i16.i64(<vscale x 32 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmadd.nxv32i16.i16.i64(<vscale x 32 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vmadd_vx_u16m8(vuint16m8_t acc, uint16_t op1, vuint16m8_t op2, size_t vl) {
@ -636,7 +636,7 @@ vuint16m8_t test_vmadd_vx_u16m8(vuint16m8_t acc, uint16_t op1, vuint16m8_t op2,
// CHECK-RV64-LABEL: @test_vmadd_vv_u32mf2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmadd.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmadd.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vmadd_vv_u32mf2(vuint32mf2_t acc, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
@ -645,7 +645,7 @@ vuint32mf2_t test_vmadd_vv_u32mf2(vuint32mf2_t acc, vuint32mf2_t op1, vuint32mf2
// CHECK-RV64-LABEL: @test_vmadd_vx_u32mf2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmadd.nxv1i32.i32.i64(<vscale x 1 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmadd.nxv1i32.i32.i64(<vscale x 1 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vmadd_vx_u32mf2(vuint32mf2_t acc, uint32_t op1, vuint32mf2_t op2, size_t vl) {
@ -654,7 +654,7 @@ vuint32mf2_t test_vmadd_vx_u32mf2(vuint32mf2_t acc, uint32_t op1, vuint32mf2_t o
// CHECK-RV64-LABEL: @test_vmadd_vv_u32m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmadd.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmadd.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vmadd_vv_u32m1(vuint32m1_t acc, vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
@ -663,7 +663,7 @@ vuint32m1_t test_vmadd_vv_u32m1(vuint32m1_t acc, vuint32m1_t op1, vuint32m1_t op
// CHECK-RV64-LABEL: @test_vmadd_vx_u32m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmadd.nxv2i32.i32.i64(<vscale x 2 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmadd.nxv2i32.i32.i64(<vscale x 2 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vmadd_vx_u32m1(vuint32m1_t acc, uint32_t op1, vuint32m1_t op2, size_t vl) {
@ -672,7 +672,7 @@ vuint32m1_t test_vmadd_vx_u32m1(vuint32m1_t acc, uint32_t op1, vuint32m1_t op2,
// CHECK-RV64-LABEL: @test_vmadd_vv_u32m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmadd.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmadd.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vmadd_vv_u32m2(vuint32m2_t acc, vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
@ -681,7 +681,7 @@ vuint32m2_t test_vmadd_vv_u32m2(vuint32m2_t acc, vuint32m2_t op1, vuint32m2_t op
// CHECK-RV64-LABEL: @test_vmadd_vx_u32m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmadd.nxv4i32.i32.i64(<vscale x 4 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmadd.nxv4i32.i32.i64(<vscale x 4 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vmadd_vx_u32m2(vuint32m2_t acc, uint32_t op1, vuint32m2_t op2, size_t vl) {
@ -690,7 +690,7 @@ vuint32m2_t test_vmadd_vx_u32m2(vuint32m2_t acc, uint32_t op1, vuint32m2_t op2,
// CHECK-RV64-LABEL: @test_vmadd_vv_u32m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmadd.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmadd.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vmadd_vv_u32m4(vuint32m4_t acc, vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
@ -699,7 +699,7 @@ vuint32m4_t test_vmadd_vv_u32m4(vuint32m4_t acc, vuint32m4_t op1, vuint32m4_t op
// CHECK-RV64-LABEL: @test_vmadd_vx_u32m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmadd.nxv8i32.i32.i64(<vscale x 8 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmadd.nxv8i32.i32.i64(<vscale x 8 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vmadd_vx_u32m4(vuint32m4_t acc, uint32_t op1, vuint32m4_t op2, size_t vl) {
@ -708,7 +708,7 @@ vuint32m4_t test_vmadd_vx_u32m4(vuint32m4_t acc, uint32_t op1, vuint32m4_t op2,
// CHECK-RV64-LABEL: @test_vmadd_vv_u32m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmadd.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmadd.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vmadd_vv_u32m8(vuint32m8_t acc, vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
@ -717,7 +717,7 @@ vuint32m8_t test_vmadd_vv_u32m8(vuint32m8_t acc, vuint32m8_t op1, vuint32m8_t op
// CHECK-RV64-LABEL: @test_vmadd_vx_u32m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmadd.nxv16i32.i32.i64(<vscale x 16 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmadd.nxv16i32.i32.i64(<vscale x 16 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vmadd_vx_u32m8(vuint32m8_t acc, uint32_t op1, vuint32m8_t op2, size_t vl) {
@ -726,7 +726,7 @@ vuint32m8_t test_vmadd_vx_u32m8(vuint32m8_t acc, uint32_t op1, vuint32m8_t op2,
// CHECK-RV64-LABEL: @test_vmadd_vv_u64m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmadd.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmadd.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vmadd_vv_u64m1(vuint64m1_t acc, vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
@ -735,7 +735,7 @@ vuint64m1_t test_vmadd_vv_u64m1(vuint64m1_t acc, vuint64m1_t op1, vuint64m1_t op
// CHECK-RV64-LABEL: @test_vmadd_vx_u64m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmadd.nxv1i64.i64.i64(<vscale x 1 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmadd.nxv1i64.i64.i64(<vscale x 1 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vmadd_vx_u64m1(vuint64m1_t acc, uint64_t op1, vuint64m1_t op2, size_t vl) {
@ -744,7 +744,7 @@ vuint64m1_t test_vmadd_vx_u64m1(vuint64m1_t acc, uint64_t op1, vuint64m1_t op2,
// CHECK-RV64-LABEL: @test_vmadd_vv_u64m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmadd.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmadd.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vmadd_vv_u64m2(vuint64m2_t acc, vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
@ -753,7 +753,7 @@ vuint64m2_t test_vmadd_vv_u64m2(vuint64m2_t acc, vuint64m2_t op1, vuint64m2_t op
// CHECK-RV64-LABEL: @test_vmadd_vx_u64m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmadd.nxv2i64.i64.i64(<vscale x 2 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmadd.nxv2i64.i64.i64(<vscale x 2 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vmadd_vx_u64m2(vuint64m2_t acc, uint64_t op1, vuint64m2_t op2, size_t vl) {
@ -762,7 +762,7 @@ vuint64m2_t test_vmadd_vx_u64m2(vuint64m2_t acc, uint64_t op1, vuint64m2_t op2,
// CHECK-RV64-LABEL: @test_vmadd_vv_u64m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmadd.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmadd.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vmadd_vv_u64m4(vuint64m4_t acc, vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
@ -771,7 +771,7 @@ vuint64m4_t test_vmadd_vv_u64m4(vuint64m4_t acc, vuint64m4_t op1, vuint64m4_t op
// CHECK-RV64-LABEL: @test_vmadd_vx_u64m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmadd.nxv4i64.i64.i64(<vscale x 4 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmadd.nxv4i64.i64.i64(<vscale x 4 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vmadd_vx_u64m4(vuint64m4_t acc, uint64_t op1, vuint64m4_t op2, size_t vl) {
@ -780,7 +780,7 @@ vuint64m4_t test_vmadd_vx_u64m4(vuint64m4_t acc, uint64_t op1, vuint64m4_t op2,
// CHECK-RV64-LABEL: @test_vmadd_vv_u64m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmadd.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmadd.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vmadd_vv_u64m8(vuint64m8_t acc, vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
@ -789,7 +789,7 @@ vuint64m8_t test_vmadd_vv_u64m8(vuint64m8_t acc, vuint64m8_t op1, vuint64m8_t op
// CHECK-RV64-LABEL: @test_vmadd_vx_u64m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmadd.nxv8i64.i64.i64(<vscale x 8 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmadd.nxv8i64.i64.i64(<vscale x 8 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vmadd_vx_u64m8(vuint64m8_t acc, uint64_t op1, vuint64m8_t op2, size_t vl) {

View File

@ -6,7 +6,7 @@
// CHECK-RV64-LABEL: @test_vnmsac_vv_i8mf8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnmsac.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnmsac.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vnmsac_vv_i8mf8(vint8mf8_t acc, vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
@ -15,7 +15,7 @@ vint8mf8_t test_vnmsac_vv_i8mf8(vint8mf8_t acc, vint8mf8_t op1, vint8mf8_t op2,
// CHECK-RV64-LABEL: @test_vnmsac_vx_i8mf8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnmsac.nxv1i8.i8.i64(<vscale x 1 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnmsac.nxv1i8.i8.i64(<vscale x 1 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vnmsac_vx_i8mf8(vint8mf8_t acc, int8_t op1, vint8mf8_t op2, size_t vl) {
@ -24,7 +24,7 @@ vint8mf8_t test_vnmsac_vx_i8mf8(vint8mf8_t acc, int8_t op1, vint8mf8_t op2, size
// CHECK-RV64-LABEL: @test_vnmsac_vv_i8mf4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnmsac.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnmsac.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vnmsac_vv_i8mf4(vint8mf4_t acc, vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
@ -33,7 +33,7 @@ vint8mf4_t test_vnmsac_vv_i8mf4(vint8mf4_t acc, vint8mf4_t op1, vint8mf4_t op2,
// CHECK-RV64-LABEL: @test_vnmsac_vx_i8mf4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnmsac.nxv2i8.i8.i64(<vscale x 2 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnmsac.nxv2i8.i8.i64(<vscale x 2 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vnmsac_vx_i8mf4(vint8mf4_t acc, int8_t op1, vint8mf4_t op2, size_t vl) {
@ -42,7 +42,7 @@ vint8mf4_t test_vnmsac_vx_i8mf4(vint8mf4_t acc, int8_t op1, vint8mf4_t op2, size
// CHECK-RV64-LABEL: @test_vnmsac_vv_i8mf2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnmsac.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnmsac.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vnmsac_vv_i8mf2(vint8mf2_t acc, vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
@ -51,7 +51,7 @@ vint8mf2_t test_vnmsac_vv_i8mf2(vint8mf2_t acc, vint8mf2_t op1, vint8mf2_t op2,
// CHECK-RV64-LABEL: @test_vnmsac_vx_i8mf2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnmsac.nxv4i8.i8.i64(<vscale x 4 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnmsac.nxv4i8.i8.i64(<vscale x 4 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vnmsac_vx_i8mf2(vint8mf2_t acc, int8_t op1, vint8mf2_t op2, size_t vl) {
@ -60,7 +60,7 @@ vint8mf2_t test_vnmsac_vx_i8mf2(vint8mf2_t acc, int8_t op1, vint8mf2_t op2, size
// CHECK-RV64-LABEL: @test_vnmsac_vv_i8m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnmsac.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnmsac.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vnmsac_vv_i8m1(vint8m1_t acc, vint8m1_t op1, vint8m1_t op2, size_t vl) {
@ -69,7 +69,7 @@ vint8m1_t test_vnmsac_vv_i8m1(vint8m1_t acc, vint8m1_t op1, vint8m1_t op2, size_
// CHECK-RV64-LABEL: @test_vnmsac_vx_i8m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnmsac.nxv8i8.i8.i64(<vscale x 8 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnmsac.nxv8i8.i8.i64(<vscale x 8 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vnmsac_vx_i8m1(vint8m1_t acc, int8_t op1, vint8m1_t op2, size_t vl) {
@ -78,7 +78,7 @@ vint8m1_t test_vnmsac_vx_i8m1(vint8m1_t acc, int8_t op1, vint8m1_t op2, size_t v
// CHECK-RV64-LABEL: @test_vnmsac_vv_i8m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnmsac.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnmsac.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vnmsac_vv_i8m2(vint8m2_t acc, vint8m2_t op1, vint8m2_t op2, size_t vl) {
@ -87,7 +87,7 @@ vint8m2_t test_vnmsac_vv_i8m2(vint8m2_t acc, vint8m2_t op1, vint8m2_t op2, size_
// CHECK-RV64-LABEL: @test_vnmsac_vx_i8m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnmsac.nxv16i8.i8.i64(<vscale x 16 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnmsac.nxv16i8.i8.i64(<vscale x 16 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vnmsac_vx_i8m2(vint8m2_t acc, int8_t op1, vint8m2_t op2, size_t vl) {
@ -96,7 +96,7 @@ vint8m2_t test_vnmsac_vx_i8m2(vint8m2_t acc, int8_t op1, vint8m2_t op2, size_t v
// CHECK-RV64-LABEL: @test_vnmsac_vv_i8m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnmsac.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnmsac.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vnmsac_vv_i8m4(vint8m4_t acc, vint8m4_t op1, vint8m4_t op2, size_t vl) {
@ -105,7 +105,7 @@ vint8m4_t test_vnmsac_vv_i8m4(vint8m4_t acc, vint8m4_t op1, vint8m4_t op2, size_
// CHECK-RV64-LABEL: @test_vnmsac_vx_i8m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnmsac.nxv32i8.i8.i64(<vscale x 32 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnmsac.nxv32i8.i8.i64(<vscale x 32 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vnmsac_vx_i8m4(vint8m4_t acc, int8_t op1, vint8m4_t op2, size_t vl) {
@ -114,7 +114,7 @@ vint8m4_t test_vnmsac_vx_i8m4(vint8m4_t acc, int8_t op1, vint8m4_t op2, size_t v
// CHECK-RV64-LABEL: @test_vnmsac_vv_i8m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vnmsac.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[ACC:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vnmsac.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[ACC:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vnmsac_vv_i8m8(vint8m8_t acc, vint8m8_t op1, vint8m8_t op2, size_t vl) {
@ -123,7 +123,7 @@ vint8m8_t test_vnmsac_vv_i8m8(vint8m8_t acc, vint8m8_t op1, vint8m8_t op2, size_
// CHECK-RV64-LABEL: @test_vnmsac_vx_i8m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vnmsac.nxv64i8.i8.i64(<vscale x 64 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vnmsac.nxv64i8.i8.i64(<vscale x 64 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vnmsac_vx_i8m8(vint8m8_t acc, int8_t op1, vint8m8_t op2, size_t vl) {
@ -132,7 +132,7 @@ vint8m8_t test_vnmsac_vx_i8m8(vint8m8_t acc, int8_t op1, vint8m8_t op2, size_t v
// CHECK-RV64-LABEL: @test_vnmsac_vv_i16mf4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnmsac.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnmsac.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vnmsac_vv_i16mf4(vint16mf4_t acc, vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
@ -141,7 +141,7 @@ vint16mf4_t test_vnmsac_vv_i16mf4(vint16mf4_t acc, vint16mf4_t op1, vint16mf4_t
// CHECK-RV64-LABEL: @test_vnmsac_vx_i16mf4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnmsac.nxv1i16.i16.i64(<vscale x 1 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnmsac.nxv1i16.i16.i64(<vscale x 1 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vnmsac_vx_i16mf4(vint16mf4_t acc, int16_t op1, vint16mf4_t op2, size_t vl) {
@ -150,7 +150,7 @@ vint16mf4_t test_vnmsac_vx_i16mf4(vint16mf4_t acc, int16_t op1, vint16mf4_t op2,
// CHECK-RV64-LABEL: @test_vnmsac_vv_i16mf2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnmsac.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnmsac.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vnmsac_vv_i16mf2(vint16mf2_t acc, vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
@ -159,7 +159,7 @@ vint16mf2_t test_vnmsac_vv_i16mf2(vint16mf2_t acc, vint16mf2_t op1, vint16mf2_t
// CHECK-RV64-LABEL: @test_vnmsac_vx_i16mf2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnmsac.nxv2i16.i16.i64(<vscale x 2 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnmsac.nxv2i16.i16.i64(<vscale x 2 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vnmsac_vx_i16mf2(vint16mf2_t acc, int16_t op1, vint16mf2_t op2, size_t vl) {
@ -168,7 +168,7 @@ vint16mf2_t test_vnmsac_vx_i16mf2(vint16mf2_t acc, int16_t op1, vint16mf2_t op2,
// CHECK-RV64-LABEL: @test_vnmsac_vv_i16m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnmsac.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnmsac.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vnmsac_vv_i16m1(vint16m1_t acc, vint16m1_t op1, vint16m1_t op2, size_t vl) {
@ -177,7 +177,7 @@ vint16m1_t test_vnmsac_vv_i16m1(vint16m1_t acc, vint16m1_t op1, vint16m1_t op2,
// CHECK-RV64-LABEL: @test_vnmsac_vx_i16m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnmsac.nxv4i16.i16.i64(<vscale x 4 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnmsac.nxv4i16.i16.i64(<vscale x 4 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vnmsac_vx_i16m1(vint16m1_t acc, int16_t op1, vint16m1_t op2, size_t vl) {
@ -186,7 +186,7 @@ vint16m1_t test_vnmsac_vx_i16m1(vint16m1_t acc, int16_t op1, vint16m1_t op2, siz
// CHECK-RV64-LABEL: @test_vnmsac_vv_i16m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnmsac.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnmsac.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vnmsac_vv_i16m2(vint16m2_t acc, vint16m2_t op1, vint16m2_t op2, size_t vl) {
@ -195,7 +195,7 @@ vint16m2_t test_vnmsac_vv_i16m2(vint16m2_t acc, vint16m2_t op1, vint16m2_t op2,
// CHECK-RV64-LABEL: @test_vnmsac_vx_i16m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnmsac.nxv8i16.i16.i64(<vscale x 8 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnmsac.nxv8i16.i16.i64(<vscale x 8 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vnmsac_vx_i16m2(vint16m2_t acc, int16_t op1, vint16m2_t op2, size_t vl) {
@ -204,7 +204,7 @@ vint16m2_t test_vnmsac_vx_i16m2(vint16m2_t acc, int16_t op1, vint16m2_t op2, siz
// CHECK-RV64-LABEL: @test_vnmsac_vv_i16m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnmsac.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnmsac.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vnmsac_vv_i16m4(vint16m4_t acc, vint16m4_t op1, vint16m4_t op2, size_t vl) {
@ -213,7 +213,7 @@ vint16m4_t test_vnmsac_vv_i16m4(vint16m4_t acc, vint16m4_t op1, vint16m4_t op2,
// CHECK-RV64-LABEL: @test_vnmsac_vx_i16m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnmsac.nxv16i16.i16.i64(<vscale x 16 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnmsac.nxv16i16.i16.i64(<vscale x 16 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vnmsac_vx_i16m4(vint16m4_t acc, int16_t op1, vint16m4_t op2, size_t vl) {
@ -222,7 +222,7 @@ vint16m4_t test_vnmsac_vx_i16m4(vint16m4_t acc, int16_t op1, vint16m4_t op2, siz
// CHECK-RV64-LABEL: @test_vnmsac_vv_i16m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vnmsac.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vnmsac.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vnmsac_vv_i16m8(vint16m8_t acc, vint16m8_t op1, vint16m8_t op2, size_t vl) {
@ -231,7 +231,7 @@ vint16m8_t test_vnmsac_vv_i16m8(vint16m8_t acc, vint16m8_t op1, vint16m8_t op2,
// CHECK-RV64-LABEL: @test_vnmsac_vx_i16m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vnmsac.nxv32i16.i16.i64(<vscale x 32 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vnmsac.nxv32i16.i16.i64(<vscale x 32 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vnmsac_vx_i16m8(vint16m8_t acc, int16_t op1, vint16m8_t op2, size_t vl) {
@ -240,7 +240,7 @@ vint16m8_t test_vnmsac_vx_i16m8(vint16m8_t acc, int16_t op1, vint16m8_t op2, siz
// CHECK-RV64-LABEL: @test_vnmsac_vv_i32mf2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnmsac.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnmsac.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vnmsac_vv_i32mf2(vint32mf2_t acc, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
@ -249,7 +249,7 @@ vint32mf2_t test_vnmsac_vv_i32mf2(vint32mf2_t acc, vint32mf2_t op1, vint32mf2_t
// CHECK-RV64-LABEL: @test_vnmsac_vx_i32mf2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnmsac.nxv1i32.i32.i64(<vscale x 1 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnmsac.nxv1i32.i32.i64(<vscale x 1 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vnmsac_vx_i32mf2(vint32mf2_t acc, int32_t op1, vint32mf2_t op2, size_t vl) {
@ -258,7 +258,7 @@ vint32mf2_t test_vnmsac_vx_i32mf2(vint32mf2_t acc, int32_t op1, vint32mf2_t op2,
// CHECK-RV64-LABEL: @test_vnmsac_vv_i32m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnmsac.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnmsac.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vnmsac_vv_i32m1(vint32m1_t acc, vint32m1_t op1, vint32m1_t op2, size_t vl) {
@ -267,7 +267,7 @@ vint32m1_t test_vnmsac_vv_i32m1(vint32m1_t acc, vint32m1_t op1, vint32m1_t op2,
// CHECK-RV64-LABEL: @test_vnmsac_vx_i32m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnmsac.nxv2i32.i32.i64(<vscale x 2 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnmsac.nxv2i32.i32.i64(<vscale x 2 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vnmsac_vx_i32m1(vint32m1_t acc, int32_t op1, vint32m1_t op2, size_t vl) {
@ -276,7 +276,7 @@ vint32m1_t test_vnmsac_vx_i32m1(vint32m1_t acc, int32_t op1, vint32m1_t op2, siz
// CHECK-RV64-LABEL: @test_vnmsac_vv_i32m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnmsac.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnmsac.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vnmsac_vv_i32m2(vint32m2_t acc, vint32m2_t op1, vint32m2_t op2, size_t vl) {
@ -285,7 +285,7 @@ vint32m2_t test_vnmsac_vv_i32m2(vint32m2_t acc, vint32m2_t op1, vint32m2_t op2,
// CHECK-RV64-LABEL: @test_vnmsac_vx_i32m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnmsac.nxv4i32.i32.i64(<vscale x 4 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnmsac.nxv4i32.i32.i64(<vscale x 4 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vnmsac_vx_i32m2(vint32m2_t acc, int32_t op1, vint32m2_t op2, size_t vl) {
@ -294,7 +294,7 @@ vint32m2_t test_vnmsac_vx_i32m2(vint32m2_t acc, int32_t op1, vint32m2_t op2, siz
// CHECK-RV64-LABEL: @test_vnmsac_vv_i32m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnmsac.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnmsac.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vnmsac_vv_i32m4(vint32m4_t acc, vint32m4_t op1, vint32m4_t op2, size_t vl) {
@ -303,7 +303,7 @@ vint32m4_t test_vnmsac_vv_i32m4(vint32m4_t acc, vint32m4_t op1, vint32m4_t op2,
// CHECK-RV64-LABEL: @test_vnmsac_vx_i32m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnmsac.nxv8i32.i32.i64(<vscale x 8 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnmsac.nxv8i32.i32.i64(<vscale x 8 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vnmsac_vx_i32m4(vint32m4_t acc, int32_t op1, vint32m4_t op2, size_t vl) {
@ -312,7 +312,7 @@ vint32m4_t test_vnmsac_vx_i32m4(vint32m4_t acc, int32_t op1, vint32m4_t op2, siz
// CHECK-RV64-LABEL: @test_vnmsac_vv_i32m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vnmsac.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vnmsac.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vnmsac_vv_i32m8(vint32m8_t acc, vint32m8_t op1, vint32m8_t op2, size_t vl) {
@ -321,7 +321,7 @@ vint32m8_t test_vnmsac_vv_i32m8(vint32m8_t acc, vint32m8_t op1, vint32m8_t op2,
// CHECK-RV64-LABEL: @test_vnmsac_vx_i32m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vnmsac.nxv16i32.i32.i64(<vscale x 16 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vnmsac.nxv16i32.i32.i64(<vscale x 16 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vnmsac_vx_i32m8(vint32m8_t acc, int32_t op1, vint32m8_t op2, size_t vl) {
@ -330,7 +330,7 @@ vint32m8_t test_vnmsac_vx_i32m8(vint32m8_t acc, int32_t op1, vint32m8_t op2, siz
// CHECK-RV64-LABEL: @test_vnmsac_vv_i64m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vnmsac.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vnmsac.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vnmsac_vv_i64m1(vint64m1_t acc, vint64m1_t op1, vint64m1_t op2, size_t vl) {
@ -339,7 +339,7 @@ vint64m1_t test_vnmsac_vv_i64m1(vint64m1_t acc, vint64m1_t op1, vint64m1_t op2,
// CHECK-RV64-LABEL: @test_vnmsac_vx_i64m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vnmsac.nxv1i64.i64.i64(<vscale x 1 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vnmsac.nxv1i64.i64.i64(<vscale x 1 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vnmsac_vx_i64m1(vint64m1_t acc, int64_t op1, vint64m1_t op2, size_t vl) {
@ -348,7 +348,7 @@ vint64m1_t test_vnmsac_vx_i64m1(vint64m1_t acc, int64_t op1, vint64m1_t op2, siz
// CHECK-RV64-LABEL: @test_vnmsac_vv_i64m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vnmsac.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vnmsac.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vnmsac_vv_i64m2(vint64m2_t acc, vint64m2_t op1, vint64m2_t op2, size_t vl) {
@ -357,7 +357,7 @@ vint64m2_t test_vnmsac_vv_i64m2(vint64m2_t acc, vint64m2_t op1, vint64m2_t op2,
// CHECK-RV64-LABEL: @test_vnmsac_vx_i64m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vnmsac.nxv2i64.i64.i64(<vscale x 2 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vnmsac.nxv2i64.i64.i64(<vscale x 2 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vnmsac_vx_i64m2(vint64m2_t acc, int64_t op1, vint64m2_t op2, size_t vl) {
@ -366,7 +366,7 @@ vint64m2_t test_vnmsac_vx_i64m2(vint64m2_t acc, int64_t op1, vint64m2_t op2, siz
// CHECK-RV64-LABEL: @test_vnmsac_vv_i64m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vnmsac.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vnmsac.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vnmsac_vv_i64m4(vint64m4_t acc, vint64m4_t op1, vint64m4_t op2, size_t vl) {
@ -375,7 +375,7 @@ vint64m4_t test_vnmsac_vv_i64m4(vint64m4_t acc, vint64m4_t op1, vint64m4_t op2,
// CHECK-RV64-LABEL: @test_vnmsac_vx_i64m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vnmsac.nxv4i64.i64.i64(<vscale x 4 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vnmsac.nxv4i64.i64.i64(<vscale x 4 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vnmsac_vx_i64m4(vint64m4_t acc, int64_t op1, vint64m4_t op2, size_t vl) {
@ -384,7 +384,7 @@ vint64m4_t test_vnmsac_vx_i64m4(vint64m4_t acc, int64_t op1, vint64m4_t op2, siz
// CHECK-RV64-LABEL: @test_vnmsac_vv_i64m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vnmsac.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vnmsac.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vnmsac_vv_i64m8(vint64m8_t acc, vint64m8_t op1, vint64m8_t op2, size_t vl) {
@ -393,7 +393,7 @@ vint64m8_t test_vnmsac_vv_i64m8(vint64m8_t acc, vint64m8_t op1, vint64m8_t op2,
// CHECK-RV64-LABEL: @test_vnmsac_vx_i64m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vnmsac.nxv8i64.i64.i64(<vscale x 8 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vnmsac.nxv8i64.i64.i64(<vscale x 8 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vnmsac_vx_i64m8(vint64m8_t acc, int64_t op1, vint64m8_t op2, size_t vl) {
@ -402,7 +402,7 @@ vint64m8_t test_vnmsac_vx_i64m8(vint64m8_t acc, int64_t op1, vint64m8_t op2, siz
// CHECK-RV64-LABEL: @test_vnmsac_vv_u8mf8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnmsac.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnmsac.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vnmsac_vv_u8mf8(vuint8mf8_t acc, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
@ -411,7 +411,7 @@ vuint8mf8_t test_vnmsac_vv_u8mf8(vuint8mf8_t acc, vuint8mf8_t op1, vuint8mf8_t o
// CHECK-RV64-LABEL: @test_vnmsac_vx_u8mf8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnmsac.nxv1i8.i8.i64(<vscale x 1 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnmsac.nxv1i8.i8.i64(<vscale x 1 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vnmsac_vx_u8mf8(vuint8mf8_t acc, uint8_t op1, vuint8mf8_t op2, size_t vl) {
@ -420,7 +420,7 @@ vuint8mf8_t test_vnmsac_vx_u8mf8(vuint8mf8_t acc, uint8_t op1, vuint8mf8_t op2,
// CHECK-RV64-LABEL: @test_vnmsac_vv_u8mf4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnmsac.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnmsac.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vnmsac_vv_u8mf4(vuint8mf4_t acc, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
@ -429,7 +429,7 @@ vuint8mf4_t test_vnmsac_vv_u8mf4(vuint8mf4_t acc, vuint8mf4_t op1, vuint8mf4_t o
// CHECK-RV64-LABEL: @test_vnmsac_vx_u8mf4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnmsac.nxv2i8.i8.i64(<vscale x 2 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnmsac.nxv2i8.i8.i64(<vscale x 2 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vnmsac_vx_u8mf4(vuint8mf4_t acc, uint8_t op1, vuint8mf4_t op2, size_t vl) {
@ -438,7 +438,7 @@ vuint8mf4_t test_vnmsac_vx_u8mf4(vuint8mf4_t acc, uint8_t op1, vuint8mf4_t op2,
// CHECK-RV64-LABEL: @test_vnmsac_vv_u8mf2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnmsac.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnmsac.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vnmsac_vv_u8mf2(vuint8mf2_t acc, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
@ -447,7 +447,7 @@ vuint8mf2_t test_vnmsac_vv_u8mf2(vuint8mf2_t acc, vuint8mf2_t op1, vuint8mf2_t o
// CHECK-RV64-LABEL: @test_vnmsac_vx_u8mf2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnmsac.nxv4i8.i8.i64(<vscale x 4 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnmsac.nxv4i8.i8.i64(<vscale x 4 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vnmsac_vx_u8mf2(vuint8mf2_t acc, uint8_t op1, vuint8mf2_t op2, size_t vl) {
@ -456,7 +456,7 @@ vuint8mf2_t test_vnmsac_vx_u8mf2(vuint8mf2_t acc, uint8_t op1, vuint8mf2_t op2,
// CHECK-RV64-LABEL: @test_vnmsac_vv_u8m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnmsac.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnmsac.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vnmsac_vv_u8m1(vuint8m1_t acc, vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
@ -465,7 +465,7 @@ vuint8m1_t test_vnmsac_vv_u8m1(vuint8m1_t acc, vuint8m1_t op1, vuint8m1_t op2, s
// CHECK-RV64-LABEL: @test_vnmsac_vx_u8m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnmsac.nxv8i8.i8.i64(<vscale x 8 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnmsac.nxv8i8.i8.i64(<vscale x 8 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vnmsac_vx_u8m1(vuint8m1_t acc, uint8_t op1, vuint8m1_t op2, size_t vl) {
@ -474,7 +474,7 @@ vuint8m1_t test_vnmsac_vx_u8m1(vuint8m1_t acc, uint8_t op1, vuint8m1_t op2, size
// CHECK-RV64-LABEL: @test_vnmsac_vv_u8m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnmsac.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnmsac.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vnmsac_vv_u8m2(vuint8m2_t acc, vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
@ -483,7 +483,7 @@ vuint8m2_t test_vnmsac_vv_u8m2(vuint8m2_t acc, vuint8m2_t op1, vuint8m2_t op2, s
// CHECK-RV64-LABEL: @test_vnmsac_vx_u8m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnmsac.nxv16i8.i8.i64(<vscale x 16 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnmsac.nxv16i8.i8.i64(<vscale x 16 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vnmsac_vx_u8m2(vuint8m2_t acc, uint8_t op1, vuint8m2_t op2, size_t vl) {
@ -492,7 +492,7 @@ vuint8m2_t test_vnmsac_vx_u8m2(vuint8m2_t acc, uint8_t op1, vuint8m2_t op2, size
// CHECK-RV64-LABEL: @test_vnmsac_vv_u8m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnmsac.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnmsac.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vnmsac_vv_u8m4(vuint8m4_t acc, vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
@ -501,7 +501,7 @@ vuint8m4_t test_vnmsac_vv_u8m4(vuint8m4_t acc, vuint8m4_t op1, vuint8m4_t op2, s
// CHECK-RV64-LABEL: @test_vnmsac_vx_u8m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnmsac.nxv32i8.i8.i64(<vscale x 32 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnmsac.nxv32i8.i8.i64(<vscale x 32 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vnmsac_vx_u8m4(vuint8m4_t acc, uint8_t op1, vuint8m4_t op2, size_t vl) {
@ -510,7 +510,7 @@ vuint8m4_t test_vnmsac_vx_u8m4(vuint8m4_t acc, uint8_t op1, vuint8m4_t op2, size
// CHECK-RV64-LABEL: @test_vnmsac_vv_u8m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vnmsac.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[ACC:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vnmsac.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[ACC:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vnmsac_vv_u8m8(vuint8m8_t acc, vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
@ -519,7 +519,7 @@ vuint8m8_t test_vnmsac_vv_u8m8(vuint8m8_t acc, vuint8m8_t op1, vuint8m8_t op2, s
// CHECK-RV64-LABEL: @test_vnmsac_vx_u8m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vnmsac.nxv64i8.i8.i64(<vscale x 64 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vnmsac.nxv64i8.i8.i64(<vscale x 64 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vnmsac_vx_u8m8(vuint8m8_t acc, uint8_t op1, vuint8m8_t op2, size_t vl) {
@ -528,7 +528,7 @@ vuint8m8_t test_vnmsac_vx_u8m8(vuint8m8_t acc, uint8_t op1, vuint8m8_t op2, size
// CHECK-RV64-LABEL: @test_vnmsac_vv_u16mf4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnmsac.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnmsac.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vnmsac_vv_u16mf4(vuint16mf4_t acc, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
@ -537,7 +537,7 @@ vuint16mf4_t test_vnmsac_vv_u16mf4(vuint16mf4_t acc, vuint16mf4_t op1, vuint16mf
// CHECK-RV64-LABEL: @test_vnmsac_vx_u16mf4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnmsac.nxv1i16.i16.i64(<vscale x 1 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnmsac.nxv1i16.i16.i64(<vscale x 1 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vnmsac_vx_u16mf4(vuint16mf4_t acc, uint16_t op1, vuint16mf4_t op2, size_t vl) {
@ -546,7 +546,7 @@ vuint16mf4_t test_vnmsac_vx_u16mf4(vuint16mf4_t acc, uint16_t op1, vuint16mf4_t
// CHECK-RV64-LABEL: @test_vnmsac_vv_u16mf2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnmsac.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnmsac.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vnmsac_vv_u16mf2(vuint16mf2_t acc, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
@ -555,7 +555,7 @@ vuint16mf2_t test_vnmsac_vv_u16mf2(vuint16mf2_t acc, vuint16mf2_t op1, vuint16mf
// CHECK-RV64-LABEL: @test_vnmsac_vx_u16mf2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnmsac.nxv2i16.i16.i64(<vscale x 2 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnmsac.nxv2i16.i16.i64(<vscale x 2 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vnmsac_vx_u16mf2(vuint16mf2_t acc, uint16_t op1, vuint16mf2_t op2, size_t vl) {
@ -564,7 +564,7 @@ vuint16mf2_t test_vnmsac_vx_u16mf2(vuint16mf2_t acc, uint16_t op1, vuint16mf2_t
// CHECK-RV64-LABEL: @test_vnmsac_vv_u16m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnmsac.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnmsac.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vnmsac_vv_u16m1(vuint16m1_t acc, vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
@ -573,7 +573,7 @@ vuint16m1_t test_vnmsac_vv_u16m1(vuint16m1_t acc, vuint16m1_t op1, vuint16m1_t o
// CHECK-RV64-LABEL: @test_vnmsac_vx_u16m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnmsac.nxv4i16.i16.i64(<vscale x 4 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnmsac.nxv4i16.i16.i64(<vscale x 4 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vnmsac_vx_u16m1(vuint16m1_t acc, uint16_t op1, vuint16m1_t op2, size_t vl) {
@ -582,7 +582,7 @@ vuint16m1_t test_vnmsac_vx_u16m1(vuint16m1_t acc, uint16_t op1, vuint16m1_t op2,
// CHECK-RV64-LABEL: @test_vnmsac_vv_u16m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnmsac.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnmsac.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vnmsac_vv_u16m2(vuint16m2_t acc, vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
@ -591,7 +591,7 @@ vuint16m2_t test_vnmsac_vv_u16m2(vuint16m2_t acc, vuint16m2_t op1, vuint16m2_t o
// CHECK-RV64-LABEL: @test_vnmsac_vx_u16m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnmsac.nxv8i16.i16.i64(<vscale x 8 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnmsac.nxv8i16.i16.i64(<vscale x 8 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vnmsac_vx_u16m2(vuint16m2_t acc, uint16_t op1, vuint16m2_t op2, size_t vl) {
@ -600,7 +600,7 @@ vuint16m2_t test_vnmsac_vx_u16m2(vuint16m2_t acc, uint16_t op1, vuint16m2_t op2,
// CHECK-RV64-LABEL: @test_vnmsac_vv_u16m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnmsac.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnmsac.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vnmsac_vv_u16m4(vuint16m4_t acc, vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
@ -609,7 +609,7 @@ vuint16m4_t test_vnmsac_vv_u16m4(vuint16m4_t acc, vuint16m4_t op1, vuint16m4_t o
// CHECK-RV64-LABEL: @test_vnmsac_vx_u16m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnmsac.nxv16i16.i16.i64(<vscale x 16 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnmsac.nxv16i16.i16.i64(<vscale x 16 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vnmsac_vx_u16m4(vuint16m4_t acc, uint16_t op1, vuint16m4_t op2, size_t vl) {
@ -618,7 +618,7 @@ vuint16m4_t test_vnmsac_vx_u16m4(vuint16m4_t acc, uint16_t op1, vuint16m4_t op2,
// CHECK-RV64-LABEL: @test_vnmsac_vv_u16m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vnmsac.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vnmsac.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vnmsac_vv_u16m8(vuint16m8_t acc, vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
@ -627,7 +627,7 @@ vuint16m8_t test_vnmsac_vv_u16m8(vuint16m8_t acc, vuint16m8_t op1, vuint16m8_t o
// CHECK-RV64-LABEL: @test_vnmsac_vx_u16m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vnmsac.nxv32i16.i16.i64(<vscale x 32 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vnmsac.nxv32i16.i16.i64(<vscale x 32 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vnmsac_vx_u16m8(vuint16m8_t acc, uint16_t op1, vuint16m8_t op2, size_t vl) {
@ -636,7 +636,7 @@ vuint16m8_t test_vnmsac_vx_u16m8(vuint16m8_t acc, uint16_t op1, vuint16m8_t op2,
// CHECK-RV64-LABEL: @test_vnmsac_vv_u32mf2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnmsac.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnmsac.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vnmsac_vv_u32mf2(vuint32mf2_t acc, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
@ -645,7 +645,7 @@ vuint32mf2_t test_vnmsac_vv_u32mf2(vuint32mf2_t acc, vuint32mf2_t op1, vuint32mf
// CHECK-RV64-LABEL: @test_vnmsac_vx_u32mf2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnmsac.nxv1i32.i32.i64(<vscale x 1 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnmsac.nxv1i32.i32.i64(<vscale x 1 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vnmsac_vx_u32mf2(vuint32mf2_t acc, uint32_t op1, vuint32mf2_t op2, size_t vl) {
@ -654,7 +654,7 @@ vuint32mf2_t test_vnmsac_vx_u32mf2(vuint32mf2_t acc, uint32_t op1, vuint32mf2_t
// CHECK-RV64-LABEL: @test_vnmsac_vv_u32m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnmsac.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnmsac.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vnmsac_vv_u32m1(vuint32m1_t acc, vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
@ -663,7 +663,7 @@ vuint32m1_t test_vnmsac_vv_u32m1(vuint32m1_t acc, vuint32m1_t op1, vuint32m1_t o
// CHECK-RV64-LABEL: @test_vnmsac_vx_u32m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnmsac.nxv2i32.i32.i64(<vscale x 2 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnmsac.nxv2i32.i32.i64(<vscale x 2 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vnmsac_vx_u32m1(vuint32m1_t acc, uint32_t op1, vuint32m1_t op2, size_t vl) {
@ -672,7 +672,7 @@ vuint32m1_t test_vnmsac_vx_u32m1(vuint32m1_t acc, uint32_t op1, vuint32m1_t op2,
// CHECK-RV64-LABEL: @test_vnmsac_vv_u32m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnmsac.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnmsac.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vnmsac_vv_u32m2(vuint32m2_t acc, vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
@ -681,7 +681,7 @@ vuint32m2_t test_vnmsac_vv_u32m2(vuint32m2_t acc, vuint32m2_t op1, vuint32m2_t o
// CHECK-RV64-LABEL: @test_vnmsac_vx_u32m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnmsac.nxv4i32.i32.i64(<vscale x 4 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnmsac.nxv4i32.i32.i64(<vscale x 4 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vnmsac_vx_u32m2(vuint32m2_t acc, uint32_t op1, vuint32m2_t op2, size_t vl) {
@ -690,7 +690,7 @@ vuint32m2_t test_vnmsac_vx_u32m2(vuint32m2_t acc, uint32_t op1, vuint32m2_t op2,
// CHECK-RV64-LABEL: @test_vnmsac_vv_u32m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnmsac.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnmsac.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vnmsac_vv_u32m4(vuint32m4_t acc, vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
@ -699,7 +699,7 @@ vuint32m4_t test_vnmsac_vv_u32m4(vuint32m4_t acc, vuint32m4_t op1, vuint32m4_t o
// CHECK-RV64-LABEL: @test_vnmsac_vx_u32m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnmsac.nxv8i32.i32.i64(<vscale x 8 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnmsac.nxv8i32.i32.i64(<vscale x 8 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vnmsac_vx_u32m4(vuint32m4_t acc, uint32_t op1, vuint32m4_t op2, size_t vl) {
@ -708,7 +708,7 @@ vuint32m4_t test_vnmsac_vx_u32m4(vuint32m4_t acc, uint32_t op1, vuint32m4_t op2,
// CHECK-RV64-LABEL: @test_vnmsac_vv_u32m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vnmsac.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vnmsac.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vnmsac_vv_u32m8(vuint32m8_t acc, vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
@ -717,7 +717,7 @@ vuint32m8_t test_vnmsac_vv_u32m8(vuint32m8_t acc, vuint32m8_t op1, vuint32m8_t o
// CHECK-RV64-LABEL: @test_vnmsac_vx_u32m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vnmsac.nxv16i32.i32.i64(<vscale x 16 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vnmsac.nxv16i32.i32.i64(<vscale x 16 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vnmsac_vx_u32m8(vuint32m8_t acc, uint32_t op1, vuint32m8_t op2, size_t vl) {
@ -726,7 +726,7 @@ vuint32m8_t test_vnmsac_vx_u32m8(vuint32m8_t acc, uint32_t op1, vuint32m8_t op2,
// CHECK-RV64-LABEL: @test_vnmsac_vv_u64m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vnmsac.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vnmsac.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vnmsac_vv_u64m1(vuint64m1_t acc, vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
@ -735,7 +735,7 @@ vuint64m1_t test_vnmsac_vv_u64m1(vuint64m1_t acc, vuint64m1_t op1, vuint64m1_t o
// CHECK-RV64-LABEL: @test_vnmsac_vx_u64m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vnmsac.nxv1i64.i64.i64(<vscale x 1 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vnmsac.nxv1i64.i64.i64(<vscale x 1 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vnmsac_vx_u64m1(vuint64m1_t acc, uint64_t op1, vuint64m1_t op2, size_t vl) {
@ -744,7 +744,7 @@ vuint64m1_t test_vnmsac_vx_u64m1(vuint64m1_t acc, uint64_t op1, vuint64m1_t op2,
// CHECK-RV64-LABEL: @test_vnmsac_vv_u64m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vnmsac.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vnmsac.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vnmsac_vv_u64m2(vuint64m2_t acc, vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
@ -753,7 +753,7 @@ vuint64m2_t test_vnmsac_vv_u64m2(vuint64m2_t acc, vuint64m2_t op1, vuint64m2_t o
// CHECK-RV64-LABEL: @test_vnmsac_vx_u64m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vnmsac.nxv2i64.i64.i64(<vscale x 2 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vnmsac.nxv2i64.i64.i64(<vscale x 2 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vnmsac_vx_u64m2(vuint64m2_t acc, uint64_t op1, vuint64m2_t op2, size_t vl) {
@ -762,7 +762,7 @@ vuint64m2_t test_vnmsac_vx_u64m2(vuint64m2_t acc, uint64_t op1, vuint64m2_t op2,
// CHECK-RV64-LABEL: @test_vnmsac_vv_u64m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vnmsac.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vnmsac.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vnmsac_vv_u64m4(vuint64m4_t acc, vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
@ -771,7 +771,7 @@ vuint64m4_t test_vnmsac_vv_u64m4(vuint64m4_t acc, vuint64m4_t op1, vuint64m4_t o
// CHECK-RV64-LABEL: @test_vnmsac_vx_u64m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vnmsac.nxv4i64.i64.i64(<vscale x 4 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vnmsac.nxv4i64.i64.i64(<vscale x 4 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vnmsac_vx_u64m4(vuint64m4_t acc, uint64_t op1, vuint64m4_t op2, size_t vl) {
@ -780,7 +780,7 @@ vuint64m4_t test_vnmsac_vx_u64m4(vuint64m4_t acc, uint64_t op1, vuint64m4_t op2,
// CHECK-RV64-LABEL: @test_vnmsac_vv_u64m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vnmsac.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vnmsac.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vnmsac_vv_u64m8(vuint64m8_t acc, vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
@ -789,7 +789,7 @@ vuint64m8_t test_vnmsac_vv_u64m8(vuint64m8_t acc, vuint64m8_t op1, vuint64m8_t o
// CHECK-RV64-LABEL: @test_vnmsac_vx_u64m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vnmsac.nxv8i64.i64.i64(<vscale x 8 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vnmsac.nxv8i64.i64.i64(<vscale x 8 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vnmsac_vx_u64m8(vuint64m8_t acc, uint64_t op1, vuint64m8_t op2, size_t vl) {

View File

@ -6,7 +6,7 @@
// CHECK-RV64-LABEL: @test_vnmsub_vv_i8mf8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnmsub.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnmsub.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vnmsub_vv_i8mf8(vint8mf8_t acc, vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
@ -15,7 +15,7 @@ vint8mf8_t test_vnmsub_vv_i8mf8(vint8mf8_t acc, vint8mf8_t op1, vint8mf8_t op2,
// CHECK-RV64-LABEL: @test_vnmsub_vx_i8mf8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnmsub.nxv1i8.i8.i64(<vscale x 1 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnmsub.nxv1i8.i8.i64(<vscale x 1 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vnmsub_vx_i8mf8(vint8mf8_t acc, int8_t op1, vint8mf8_t op2, size_t vl) {
@ -24,7 +24,7 @@ vint8mf8_t test_vnmsub_vx_i8mf8(vint8mf8_t acc, int8_t op1, vint8mf8_t op2, size
// CHECK-RV64-LABEL: @test_vnmsub_vv_i8mf4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnmsub.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnmsub.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vnmsub_vv_i8mf4(vint8mf4_t acc, vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
@ -33,7 +33,7 @@ vint8mf4_t test_vnmsub_vv_i8mf4(vint8mf4_t acc, vint8mf4_t op1, vint8mf4_t op2,
// CHECK-RV64-LABEL: @test_vnmsub_vx_i8mf4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnmsub.nxv2i8.i8.i64(<vscale x 2 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnmsub.nxv2i8.i8.i64(<vscale x 2 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vnmsub_vx_i8mf4(vint8mf4_t acc, int8_t op1, vint8mf4_t op2, size_t vl) {
@ -42,7 +42,7 @@ vint8mf4_t test_vnmsub_vx_i8mf4(vint8mf4_t acc, int8_t op1, vint8mf4_t op2, size
// CHECK-RV64-LABEL: @test_vnmsub_vv_i8mf2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnmsub.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnmsub.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vnmsub_vv_i8mf2(vint8mf2_t acc, vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
@ -51,7 +51,7 @@ vint8mf2_t test_vnmsub_vv_i8mf2(vint8mf2_t acc, vint8mf2_t op1, vint8mf2_t op2,
// CHECK-RV64-LABEL: @test_vnmsub_vx_i8mf2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnmsub.nxv4i8.i8.i64(<vscale x 4 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnmsub.nxv4i8.i8.i64(<vscale x 4 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vnmsub_vx_i8mf2(vint8mf2_t acc, int8_t op1, vint8mf2_t op2, size_t vl) {
@ -60,7 +60,7 @@ vint8mf2_t test_vnmsub_vx_i8mf2(vint8mf2_t acc, int8_t op1, vint8mf2_t op2, size
// CHECK-RV64-LABEL: @test_vnmsub_vv_i8m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnmsub.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnmsub.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vnmsub_vv_i8m1(vint8m1_t acc, vint8m1_t op1, vint8m1_t op2, size_t vl) {
@ -69,7 +69,7 @@ vint8m1_t test_vnmsub_vv_i8m1(vint8m1_t acc, vint8m1_t op1, vint8m1_t op2, size_
// CHECK-RV64-LABEL: @test_vnmsub_vx_i8m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnmsub.nxv8i8.i8.i64(<vscale x 8 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnmsub.nxv8i8.i8.i64(<vscale x 8 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vnmsub_vx_i8m1(vint8m1_t acc, int8_t op1, vint8m1_t op2, size_t vl) {
@ -78,7 +78,7 @@ vint8m1_t test_vnmsub_vx_i8m1(vint8m1_t acc, int8_t op1, vint8m1_t op2, size_t v
// CHECK-RV64-LABEL: @test_vnmsub_vv_i8m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnmsub.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnmsub.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vnmsub_vv_i8m2(vint8m2_t acc, vint8m2_t op1, vint8m2_t op2, size_t vl) {
@ -87,7 +87,7 @@ vint8m2_t test_vnmsub_vv_i8m2(vint8m2_t acc, vint8m2_t op1, vint8m2_t op2, size_
// CHECK-RV64-LABEL: @test_vnmsub_vx_i8m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnmsub.nxv16i8.i8.i64(<vscale x 16 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnmsub.nxv16i8.i8.i64(<vscale x 16 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vnmsub_vx_i8m2(vint8m2_t acc, int8_t op1, vint8m2_t op2, size_t vl) {
@ -96,7 +96,7 @@ vint8m2_t test_vnmsub_vx_i8m2(vint8m2_t acc, int8_t op1, vint8m2_t op2, size_t v
// CHECK-RV64-LABEL: @test_vnmsub_vv_i8m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnmsub.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnmsub.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vnmsub_vv_i8m4(vint8m4_t acc, vint8m4_t op1, vint8m4_t op2, size_t vl) {
@ -105,7 +105,7 @@ vint8m4_t test_vnmsub_vv_i8m4(vint8m4_t acc, vint8m4_t op1, vint8m4_t op2, size_
// CHECK-RV64-LABEL: @test_vnmsub_vx_i8m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnmsub.nxv32i8.i8.i64(<vscale x 32 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnmsub.nxv32i8.i8.i64(<vscale x 32 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vnmsub_vx_i8m4(vint8m4_t acc, int8_t op1, vint8m4_t op2, size_t vl) {
@ -114,7 +114,7 @@ vint8m4_t test_vnmsub_vx_i8m4(vint8m4_t acc, int8_t op1, vint8m4_t op2, size_t v
// CHECK-RV64-LABEL: @test_vnmsub_vv_i8m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vnmsub.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[ACC:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vnmsub.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[ACC:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vnmsub_vv_i8m8(vint8m8_t acc, vint8m8_t op1, vint8m8_t op2, size_t vl) {
@ -123,7 +123,7 @@ vint8m8_t test_vnmsub_vv_i8m8(vint8m8_t acc, vint8m8_t op1, vint8m8_t op2, size_
// CHECK-RV64-LABEL: @test_vnmsub_vx_i8m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vnmsub.nxv64i8.i8.i64(<vscale x 64 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vnmsub.nxv64i8.i8.i64(<vscale x 64 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vnmsub_vx_i8m8(vint8m8_t acc, int8_t op1, vint8m8_t op2, size_t vl) {
@ -132,7 +132,7 @@ vint8m8_t test_vnmsub_vx_i8m8(vint8m8_t acc, int8_t op1, vint8m8_t op2, size_t v
// CHECK-RV64-LABEL: @test_vnmsub_vv_i16mf4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnmsub.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnmsub.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vnmsub_vv_i16mf4(vint16mf4_t acc, vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
@ -141,7 +141,7 @@ vint16mf4_t test_vnmsub_vv_i16mf4(vint16mf4_t acc, vint16mf4_t op1, vint16mf4_t
// CHECK-RV64-LABEL: @test_vnmsub_vx_i16mf4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnmsub.nxv1i16.i16.i64(<vscale x 1 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnmsub.nxv1i16.i16.i64(<vscale x 1 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vnmsub_vx_i16mf4(vint16mf4_t acc, int16_t op1, vint16mf4_t op2, size_t vl) {
@ -150,7 +150,7 @@ vint16mf4_t test_vnmsub_vx_i16mf4(vint16mf4_t acc, int16_t op1, vint16mf4_t op2,
// CHECK-RV64-LABEL: @test_vnmsub_vv_i16mf2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnmsub.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnmsub.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vnmsub_vv_i16mf2(vint16mf2_t acc, vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
@ -159,7 +159,7 @@ vint16mf2_t test_vnmsub_vv_i16mf2(vint16mf2_t acc, vint16mf2_t op1, vint16mf2_t
// CHECK-RV64-LABEL: @test_vnmsub_vx_i16mf2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnmsub.nxv2i16.i16.i64(<vscale x 2 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnmsub.nxv2i16.i16.i64(<vscale x 2 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vnmsub_vx_i16mf2(vint16mf2_t acc, int16_t op1, vint16mf2_t op2, size_t vl) {
@ -168,7 +168,7 @@ vint16mf2_t test_vnmsub_vx_i16mf2(vint16mf2_t acc, int16_t op1, vint16mf2_t op2,
// CHECK-RV64-LABEL: @test_vnmsub_vv_i16m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnmsub.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnmsub.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vnmsub_vv_i16m1(vint16m1_t acc, vint16m1_t op1, vint16m1_t op2, size_t vl) {
@ -177,7 +177,7 @@ vint16m1_t test_vnmsub_vv_i16m1(vint16m1_t acc, vint16m1_t op1, vint16m1_t op2,
// CHECK-RV64-LABEL: @test_vnmsub_vx_i16m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnmsub.nxv4i16.i16.i64(<vscale x 4 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnmsub.nxv4i16.i16.i64(<vscale x 4 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vnmsub_vx_i16m1(vint16m1_t acc, int16_t op1, vint16m1_t op2, size_t vl) {
@ -186,7 +186,7 @@ vint16m1_t test_vnmsub_vx_i16m1(vint16m1_t acc, int16_t op1, vint16m1_t op2, siz
// CHECK-RV64-LABEL: @test_vnmsub_vv_i16m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnmsub.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnmsub.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vnmsub_vv_i16m2(vint16m2_t acc, vint16m2_t op1, vint16m2_t op2, size_t vl) {
@ -195,7 +195,7 @@ vint16m2_t test_vnmsub_vv_i16m2(vint16m2_t acc, vint16m2_t op1, vint16m2_t op2,
// CHECK-RV64-LABEL: @test_vnmsub_vx_i16m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnmsub.nxv8i16.i16.i64(<vscale x 8 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnmsub.nxv8i16.i16.i64(<vscale x 8 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vnmsub_vx_i16m2(vint16m2_t acc, int16_t op1, vint16m2_t op2, size_t vl) {
@ -204,7 +204,7 @@ vint16m2_t test_vnmsub_vx_i16m2(vint16m2_t acc, int16_t op1, vint16m2_t op2, siz
// CHECK-RV64-LABEL: @test_vnmsub_vv_i16m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnmsub.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnmsub.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vnmsub_vv_i16m4(vint16m4_t acc, vint16m4_t op1, vint16m4_t op2, size_t vl) {
@ -213,7 +213,7 @@ vint16m4_t test_vnmsub_vv_i16m4(vint16m4_t acc, vint16m4_t op1, vint16m4_t op2,
// CHECK-RV64-LABEL: @test_vnmsub_vx_i16m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnmsub.nxv16i16.i16.i64(<vscale x 16 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnmsub.nxv16i16.i16.i64(<vscale x 16 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vnmsub_vx_i16m4(vint16m4_t acc, int16_t op1, vint16m4_t op2, size_t vl) {
@ -222,7 +222,7 @@ vint16m4_t test_vnmsub_vx_i16m4(vint16m4_t acc, int16_t op1, vint16m4_t op2, siz
// CHECK-RV64-LABEL: @test_vnmsub_vv_i16m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vnmsub.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vnmsub.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vnmsub_vv_i16m8(vint16m8_t acc, vint16m8_t op1, vint16m8_t op2, size_t vl) {
@ -231,7 +231,7 @@ vint16m8_t test_vnmsub_vv_i16m8(vint16m8_t acc, vint16m8_t op1, vint16m8_t op2,
// CHECK-RV64-LABEL: @test_vnmsub_vx_i16m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vnmsub.nxv32i16.i16.i64(<vscale x 32 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vnmsub.nxv32i16.i16.i64(<vscale x 32 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vnmsub_vx_i16m8(vint16m8_t acc, int16_t op1, vint16m8_t op2, size_t vl) {
@ -240,7 +240,7 @@ vint16m8_t test_vnmsub_vx_i16m8(vint16m8_t acc, int16_t op1, vint16m8_t op2, siz
// CHECK-RV64-LABEL: @test_vnmsub_vv_i32mf2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnmsub.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnmsub.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vnmsub_vv_i32mf2(vint32mf2_t acc, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
@ -249,7 +249,7 @@ vint32mf2_t test_vnmsub_vv_i32mf2(vint32mf2_t acc, vint32mf2_t op1, vint32mf2_t
// CHECK-RV64-LABEL: @test_vnmsub_vx_i32mf2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnmsub.nxv1i32.i32.i64(<vscale x 1 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnmsub.nxv1i32.i32.i64(<vscale x 1 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vnmsub_vx_i32mf2(vint32mf2_t acc, int32_t op1, vint32mf2_t op2, size_t vl) {
@ -258,7 +258,7 @@ vint32mf2_t test_vnmsub_vx_i32mf2(vint32mf2_t acc, int32_t op1, vint32mf2_t op2,
// CHECK-RV64-LABEL: @test_vnmsub_vv_i32m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnmsub.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnmsub.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vnmsub_vv_i32m1(vint32m1_t acc, vint32m1_t op1, vint32m1_t op2, size_t vl) {
@ -267,7 +267,7 @@ vint32m1_t test_vnmsub_vv_i32m1(vint32m1_t acc, vint32m1_t op1, vint32m1_t op2,
// CHECK-RV64-LABEL: @test_vnmsub_vx_i32m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnmsub.nxv2i32.i32.i64(<vscale x 2 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnmsub.nxv2i32.i32.i64(<vscale x 2 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vnmsub_vx_i32m1(vint32m1_t acc, int32_t op1, vint32m1_t op2, size_t vl) {
@ -276,7 +276,7 @@ vint32m1_t test_vnmsub_vx_i32m1(vint32m1_t acc, int32_t op1, vint32m1_t op2, siz
// CHECK-RV64-LABEL: @test_vnmsub_vv_i32m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnmsub.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnmsub.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vnmsub_vv_i32m2(vint32m2_t acc, vint32m2_t op1, vint32m2_t op2, size_t vl) {
@ -285,7 +285,7 @@ vint32m2_t test_vnmsub_vv_i32m2(vint32m2_t acc, vint32m2_t op1, vint32m2_t op2,
// CHECK-RV64-LABEL: @test_vnmsub_vx_i32m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnmsub.nxv4i32.i32.i64(<vscale x 4 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnmsub.nxv4i32.i32.i64(<vscale x 4 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vnmsub_vx_i32m2(vint32m2_t acc, int32_t op1, vint32m2_t op2, size_t vl) {
@ -294,7 +294,7 @@ vint32m2_t test_vnmsub_vx_i32m2(vint32m2_t acc, int32_t op1, vint32m2_t op2, siz
// CHECK-RV64-LABEL: @test_vnmsub_vv_i32m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnmsub.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnmsub.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vnmsub_vv_i32m4(vint32m4_t acc, vint32m4_t op1, vint32m4_t op2, size_t vl) {
@ -303,7 +303,7 @@ vint32m4_t test_vnmsub_vv_i32m4(vint32m4_t acc, vint32m4_t op1, vint32m4_t op2,
// CHECK-RV64-LABEL: @test_vnmsub_vx_i32m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnmsub.nxv8i32.i32.i64(<vscale x 8 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnmsub.nxv8i32.i32.i64(<vscale x 8 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vnmsub_vx_i32m4(vint32m4_t acc, int32_t op1, vint32m4_t op2, size_t vl) {
@ -312,7 +312,7 @@ vint32m4_t test_vnmsub_vx_i32m4(vint32m4_t acc, int32_t op1, vint32m4_t op2, siz
// CHECK-RV64-LABEL: @test_vnmsub_vv_i32m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vnmsub.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vnmsub.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vnmsub_vv_i32m8(vint32m8_t acc, vint32m8_t op1, vint32m8_t op2, size_t vl) {
@ -321,7 +321,7 @@ vint32m8_t test_vnmsub_vv_i32m8(vint32m8_t acc, vint32m8_t op1, vint32m8_t op2,
// CHECK-RV64-LABEL: @test_vnmsub_vx_i32m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vnmsub.nxv16i32.i32.i64(<vscale x 16 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vnmsub.nxv16i32.i32.i64(<vscale x 16 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vnmsub_vx_i32m8(vint32m8_t acc, int32_t op1, vint32m8_t op2, size_t vl) {
@ -330,7 +330,7 @@ vint32m8_t test_vnmsub_vx_i32m8(vint32m8_t acc, int32_t op1, vint32m8_t op2, siz
// CHECK-RV64-LABEL: @test_vnmsub_vv_i64m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vnmsub.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vnmsub.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vnmsub_vv_i64m1(vint64m1_t acc, vint64m1_t op1, vint64m1_t op2, size_t vl) {
@ -339,7 +339,7 @@ vint64m1_t test_vnmsub_vv_i64m1(vint64m1_t acc, vint64m1_t op1, vint64m1_t op2,
// CHECK-RV64-LABEL: @test_vnmsub_vx_i64m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vnmsub.nxv1i64.i64.i64(<vscale x 1 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vnmsub.nxv1i64.i64.i64(<vscale x 1 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vnmsub_vx_i64m1(vint64m1_t acc, int64_t op1, vint64m1_t op2, size_t vl) {
@ -348,7 +348,7 @@ vint64m1_t test_vnmsub_vx_i64m1(vint64m1_t acc, int64_t op1, vint64m1_t op2, siz
// CHECK-RV64-LABEL: @test_vnmsub_vv_i64m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vnmsub.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vnmsub.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vnmsub_vv_i64m2(vint64m2_t acc, vint64m2_t op1, vint64m2_t op2, size_t vl) {
@ -357,7 +357,7 @@ vint64m2_t test_vnmsub_vv_i64m2(vint64m2_t acc, vint64m2_t op1, vint64m2_t op2,
// CHECK-RV64-LABEL: @test_vnmsub_vx_i64m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vnmsub.nxv2i64.i64.i64(<vscale x 2 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vnmsub.nxv2i64.i64.i64(<vscale x 2 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vnmsub_vx_i64m2(vint64m2_t acc, int64_t op1, vint64m2_t op2, size_t vl) {
@ -366,7 +366,7 @@ vint64m2_t test_vnmsub_vx_i64m2(vint64m2_t acc, int64_t op1, vint64m2_t op2, siz
// CHECK-RV64-LABEL: @test_vnmsub_vv_i64m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vnmsub.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vnmsub.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vnmsub_vv_i64m4(vint64m4_t acc, vint64m4_t op1, vint64m4_t op2, size_t vl) {
@ -375,7 +375,7 @@ vint64m4_t test_vnmsub_vv_i64m4(vint64m4_t acc, vint64m4_t op1, vint64m4_t op2,
// CHECK-RV64-LABEL: @test_vnmsub_vx_i64m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vnmsub.nxv4i64.i64.i64(<vscale x 4 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vnmsub.nxv4i64.i64.i64(<vscale x 4 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vnmsub_vx_i64m4(vint64m4_t acc, int64_t op1, vint64m4_t op2, size_t vl) {
@ -384,7 +384,7 @@ vint64m4_t test_vnmsub_vx_i64m4(vint64m4_t acc, int64_t op1, vint64m4_t op2, siz
// CHECK-RV64-LABEL: @test_vnmsub_vv_i64m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vnmsub.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vnmsub.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vnmsub_vv_i64m8(vint64m8_t acc, vint64m8_t op1, vint64m8_t op2, size_t vl) {
@ -393,7 +393,7 @@ vint64m8_t test_vnmsub_vv_i64m8(vint64m8_t acc, vint64m8_t op1, vint64m8_t op2,
// CHECK-RV64-LABEL: @test_vnmsub_vx_i64m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vnmsub.nxv8i64.i64.i64(<vscale x 8 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vnmsub.nxv8i64.i64.i64(<vscale x 8 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vnmsub_vx_i64m8(vint64m8_t acc, int64_t op1, vint64m8_t op2, size_t vl) {
@ -402,7 +402,7 @@ vint64m8_t test_vnmsub_vx_i64m8(vint64m8_t acc, int64_t op1, vint64m8_t op2, siz
// CHECK-RV64-LABEL: @test_vnmsub_vv_u8mf8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnmsub.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnmsub.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vnmsub_vv_u8mf8(vuint8mf8_t acc, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
@ -411,7 +411,7 @@ vuint8mf8_t test_vnmsub_vv_u8mf8(vuint8mf8_t acc, vuint8mf8_t op1, vuint8mf8_t o
// CHECK-RV64-LABEL: @test_vnmsub_vx_u8mf8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnmsub.nxv1i8.i8.i64(<vscale x 1 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnmsub.nxv1i8.i8.i64(<vscale x 1 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vnmsub_vx_u8mf8(vuint8mf8_t acc, uint8_t op1, vuint8mf8_t op2, size_t vl) {
@ -420,7 +420,7 @@ vuint8mf8_t test_vnmsub_vx_u8mf8(vuint8mf8_t acc, uint8_t op1, vuint8mf8_t op2,
// CHECK-RV64-LABEL: @test_vnmsub_vv_u8mf4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnmsub.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnmsub.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vnmsub_vv_u8mf4(vuint8mf4_t acc, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
@ -429,7 +429,7 @@ vuint8mf4_t test_vnmsub_vv_u8mf4(vuint8mf4_t acc, vuint8mf4_t op1, vuint8mf4_t o
// CHECK-RV64-LABEL: @test_vnmsub_vx_u8mf4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnmsub.nxv2i8.i8.i64(<vscale x 2 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnmsub.nxv2i8.i8.i64(<vscale x 2 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vnmsub_vx_u8mf4(vuint8mf4_t acc, uint8_t op1, vuint8mf4_t op2, size_t vl) {
@ -438,7 +438,7 @@ vuint8mf4_t test_vnmsub_vx_u8mf4(vuint8mf4_t acc, uint8_t op1, vuint8mf4_t op2,
// CHECK-RV64-LABEL: @test_vnmsub_vv_u8mf2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnmsub.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnmsub.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vnmsub_vv_u8mf2(vuint8mf2_t acc, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
@ -447,7 +447,7 @@ vuint8mf2_t test_vnmsub_vv_u8mf2(vuint8mf2_t acc, vuint8mf2_t op1, vuint8mf2_t o
// CHECK-RV64-LABEL: @test_vnmsub_vx_u8mf2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnmsub.nxv4i8.i8.i64(<vscale x 4 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnmsub.nxv4i8.i8.i64(<vscale x 4 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vnmsub_vx_u8mf2(vuint8mf2_t acc, uint8_t op1, vuint8mf2_t op2, size_t vl) {
@ -456,7 +456,7 @@ vuint8mf2_t test_vnmsub_vx_u8mf2(vuint8mf2_t acc, uint8_t op1, vuint8mf2_t op2,
// CHECK-RV64-LABEL: @test_vnmsub_vv_u8m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnmsub.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnmsub.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vnmsub_vv_u8m1(vuint8m1_t acc, vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
@ -465,7 +465,7 @@ vuint8m1_t test_vnmsub_vv_u8m1(vuint8m1_t acc, vuint8m1_t op1, vuint8m1_t op2, s
// CHECK-RV64-LABEL: @test_vnmsub_vx_u8m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnmsub.nxv8i8.i8.i64(<vscale x 8 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnmsub.nxv8i8.i8.i64(<vscale x 8 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vnmsub_vx_u8m1(vuint8m1_t acc, uint8_t op1, vuint8m1_t op2, size_t vl) {
@ -474,7 +474,7 @@ vuint8m1_t test_vnmsub_vx_u8m1(vuint8m1_t acc, uint8_t op1, vuint8m1_t op2, size
// CHECK-RV64-LABEL: @test_vnmsub_vv_u8m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnmsub.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnmsub.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vnmsub_vv_u8m2(vuint8m2_t acc, vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
@ -483,7 +483,7 @@ vuint8m2_t test_vnmsub_vv_u8m2(vuint8m2_t acc, vuint8m2_t op1, vuint8m2_t op2, s
// CHECK-RV64-LABEL: @test_vnmsub_vx_u8m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnmsub.nxv16i8.i8.i64(<vscale x 16 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnmsub.nxv16i8.i8.i64(<vscale x 16 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vnmsub_vx_u8m2(vuint8m2_t acc, uint8_t op1, vuint8m2_t op2, size_t vl) {
@ -492,7 +492,7 @@ vuint8m2_t test_vnmsub_vx_u8m2(vuint8m2_t acc, uint8_t op1, vuint8m2_t op2, size
// CHECK-RV64-LABEL: @test_vnmsub_vv_u8m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnmsub.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnmsub.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vnmsub_vv_u8m4(vuint8m4_t acc, vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
@ -501,7 +501,7 @@ vuint8m4_t test_vnmsub_vv_u8m4(vuint8m4_t acc, vuint8m4_t op1, vuint8m4_t op2, s
// CHECK-RV64-LABEL: @test_vnmsub_vx_u8m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnmsub.nxv32i8.i8.i64(<vscale x 32 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnmsub.nxv32i8.i8.i64(<vscale x 32 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vnmsub_vx_u8m4(vuint8m4_t acc, uint8_t op1, vuint8m4_t op2, size_t vl) {
@ -510,7 +510,7 @@ vuint8m4_t test_vnmsub_vx_u8m4(vuint8m4_t acc, uint8_t op1, vuint8m4_t op2, size
// CHECK-RV64-LABEL: @test_vnmsub_vv_u8m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vnmsub.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[ACC:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vnmsub.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[ACC:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vnmsub_vv_u8m8(vuint8m8_t acc, vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
@ -519,7 +519,7 @@ vuint8m8_t test_vnmsub_vv_u8m8(vuint8m8_t acc, vuint8m8_t op1, vuint8m8_t op2, s
// CHECK-RV64-LABEL: @test_vnmsub_vx_u8m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vnmsub.nxv64i8.i8.i64(<vscale x 64 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vnmsub.nxv64i8.i8.i64(<vscale x 64 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vnmsub_vx_u8m8(vuint8m8_t acc, uint8_t op1, vuint8m8_t op2, size_t vl) {
@ -528,7 +528,7 @@ vuint8m8_t test_vnmsub_vx_u8m8(vuint8m8_t acc, uint8_t op1, vuint8m8_t op2, size
// CHECK-RV64-LABEL: @test_vnmsub_vv_u16mf4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnmsub.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnmsub.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vnmsub_vv_u16mf4(vuint16mf4_t acc, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
@ -537,7 +537,7 @@ vuint16mf4_t test_vnmsub_vv_u16mf4(vuint16mf4_t acc, vuint16mf4_t op1, vuint16mf
// CHECK-RV64-LABEL: @test_vnmsub_vx_u16mf4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnmsub.nxv1i16.i16.i64(<vscale x 1 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnmsub.nxv1i16.i16.i64(<vscale x 1 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vnmsub_vx_u16mf4(vuint16mf4_t acc, uint16_t op1, vuint16mf4_t op2, size_t vl) {
@ -546,7 +546,7 @@ vuint16mf4_t test_vnmsub_vx_u16mf4(vuint16mf4_t acc, uint16_t op1, vuint16mf4_t
// CHECK-RV64-LABEL: @test_vnmsub_vv_u16mf2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnmsub.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnmsub.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vnmsub_vv_u16mf2(vuint16mf2_t acc, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
@ -555,7 +555,7 @@ vuint16mf2_t test_vnmsub_vv_u16mf2(vuint16mf2_t acc, vuint16mf2_t op1, vuint16mf
// CHECK-RV64-LABEL: @test_vnmsub_vx_u16mf2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnmsub.nxv2i16.i16.i64(<vscale x 2 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnmsub.nxv2i16.i16.i64(<vscale x 2 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vnmsub_vx_u16mf2(vuint16mf2_t acc, uint16_t op1, vuint16mf2_t op2, size_t vl) {
@ -564,7 +564,7 @@ vuint16mf2_t test_vnmsub_vx_u16mf2(vuint16mf2_t acc, uint16_t op1, vuint16mf2_t
// CHECK-RV64-LABEL: @test_vnmsub_vv_u16m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnmsub.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnmsub.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vnmsub_vv_u16m1(vuint16m1_t acc, vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
@ -573,7 +573,7 @@ vuint16m1_t test_vnmsub_vv_u16m1(vuint16m1_t acc, vuint16m1_t op1, vuint16m1_t o
// CHECK-RV64-LABEL: @test_vnmsub_vx_u16m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnmsub.nxv4i16.i16.i64(<vscale x 4 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnmsub.nxv4i16.i16.i64(<vscale x 4 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vnmsub_vx_u16m1(vuint16m1_t acc, uint16_t op1, vuint16m1_t op2, size_t vl) {
@ -582,7 +582,7 @@ vuint16m1_t test_vnmsub_vx_u16m1(vuint16m1_t acc, uint16_t op1, vuint16m1_t op2,
// CHECK-RV64-LABEL: @test_vnmsub_vv_u16m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnmsub.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnmsub.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vnmsub_vv_u16m2(vuint16m2_t acc, vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
@ -591,7 +591,7 @@ vuint16m2_t test_vnmsub_vv_u16m2(vuint16m2_t acc, vuint16m2_t op1, vuint16m2_t o
// CHECK-RV64-LABEL: @test_vnmsub_vx_u16m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnmsub.nxv8i16.i16.i64(<vscale x 8 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnmsub.nxv8i16.i16.i64(<vscale x 8 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vnmsub_vx_u16m2(vuint16m2_t acc, uint16_t op1, vuint16m2_t op2, size_t vl) {
@ -600,7 +600,7 @@ vuint16m2_t test_vnmsub_vx_u16m2(vuint16m2_t acc, uint16_t op1, vuint16m2_t op2,
// CHECK-RV64-LABEL: @test_vnmsub_vv_u16m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnmsub.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnmsub.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vnmsub_vv_u16m4(vuint16m4_t acc, vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
@ -609,7 +609,7 @@ vuint16m4_t test_vnmsub_vv_u16m4(vuint16m4_t acc, vuint16m4_t op1, vuint16m4_t o
// CHECK-RV64-LABEL: @test_vnmsub_vx_u16m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnmsub.nxv16i16.i16.i64(<vscale x 16 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnmsub.nxv16i16.i16.i64(<vscale x 16 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vnmsub_vx_u16m4(vuint16m4_t acc, uint16_t op1, vuint16m4_t op2, size_t vl) {
@ -618,7 +618,7 @@ vuint16m4_t test_vnmsub_vx_u16m4(vuint16m4_t acc, uint16_t op1, vuint16m4_t op2,
// CHECK-RV64-LABEL: @test_vnmsub_vv_u16m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vnmsub.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vnmsub.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vnmsub_vv_u16m8(vuint16m8_t acc, vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
@ -627,7 +627,7 @@ vuint16m8_t test_vnmsub_vv_u16m8(vuint16m8_t acc, vuint16m8_t op1, vuint16m8_t o
// CHECK-RV64-LABEL: @test_vnmsub_vx_u16m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vnmsub.nxv32i16.i16.i64(<vscale x 32 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vnmsub.nxv32i16.i16.i64(<vscale x 32 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vnmsub_vx_u16m8(vuint16m8_t acc, uint16_t op1, vuint16m8_t op2, size_t vl) {
@ -636,7 +636,7 @@ vuint16m8_t test_vnmsub_vx_u16m8(vuint16m8_t acc, uint16_t op1, vuint16m8_t op2,
// CHECK-RV64-LABEL: @test_vnmsub_vv_u32mf2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnmsub.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnmsub.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vnmsub_vv_u32mf2(vuint32mf2_t acc, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
@ -645,7 +645,7 @@ vuint32mf2_t test_vnmsub_vv_u32mf2(vuint32mf2_t acc, vuint32mf2_t op1, vuint32mf
// CHECK-RV64-LABEL: @test_vnmsub_vx_u32mf2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnmsub.nxv1i32.i32.i64(<vscale x 1 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnmsub.nxv1i32.i32.i64(<vscale x 1 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vnmsub_vx_u32mf2(vuint32mf2_t acc, uint32_t op1, vuint32mf2_t op2, size_t vl) {
@ -654,7 +654,7 @@ vuint32mf2_t test_vnmsub_vx_u32mf2(vuint32mf2_t acc, uint32_t op1, vuint32mf2_t
// CHECK-RV64-LABEL: @test_vnmsub_vv_u32m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnmsub.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnmsub.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vnmsub_vv_u32m1(vuint32m1_t acc, vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
@ -663,7 +663,7 @@ vuint32m1_t test_vnmsub_vv_u32m1(vuint32m1_t acc, vuint32m1_t op1, vuint32m1_t o
// CHECK-RV64-LABEL: @test_vnmsub_vx_u32m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnmsub.nxv2i32.i32.i64(<vscale x 2 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnmsub.nxv2i32.i32.i64(<vscale x 2 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vnmsub_vx_u32m1(vuint32m1_t acc, uint32_t op1, vuint32m1_t op2, size_t vl) {
@ -672,7 +672,7 @@ vuint32m1_t test_vnmsub_vx_u32m1(vuint32m1_t acc, uint32_t op1, vuint32m1_t op2,
// CHECK-RV64-LABEL: @test_vnmsub_vv_u32m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnmsub.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnmsub.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vnmsub_vv_u32m2(vuint32m2_t acc, vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
@ -681,7 +681,7 @@ vuint32m2_t test_vnmsub_vv_u32m2(vuint32m2_t acc, vuint32m2_t op1, vuint32m2_t o
// CHECK-RV64-LABEL: @test_vnmsub_vx_u32m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnmsub.nxv4i32.i32.i64(<vscale x 4 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnmsub.nxv4i32.i32.i64(<vscale x 4 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vnmsub_vx_u32m2(vuint32m2_t acc, uint32_t op1, vuint32m2_t op2, size_t vl) {
@ -690,7 +690,7 @@ vuint32m2_t test_vnmsub_vx_u32m2(vuint32m2_t acc, uint32_t op1, vuint32m2_t op2,
// CHECK-RV64-LABEL: @test_vnmsub_vv_u32m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnmsub.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnmsub.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vnmsub_vv_u32m4(vuint32m4_t acc, vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
@ -699,7 +699,7 @@ vuint32m4_t test_vnmsub_vv_u32m4(vuint32m4_t acc, vuint32m4_t op1, vuint32m4_t o
// CHECK-RV64-LABEL: @test_vnmsub_vx_u32m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnmsub.nxv8i32.i32.i64(<vscale x 8 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnmsub.nxv8i32.i32.i64(<vscale x 8 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vnmsub_vx_u32m4(vuint32m4_t acc, uint32_t op1, vuint32m4_t op2, size_t vl) {
@ -708,7 +708,7 @@ vuint32m4_t test_vnmsub_vx_u32m4(vuint32m4_t acc, uint32_t op1, vuint32m4_t op2,
// CHECK-RV64-LABEL: @test_vnmsub_vv_u32m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vnmsub.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vnmsub.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vnmsub_vv_u32m8(vuint32m8_t acc, vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
@ -717,7 +717,7 @@ vuint32m8_t test_vnmsub_vv_u32m8(vuint32m8_t acc, vuint32m8_t op1, vuint32m8_t o
// CHECK-RV64-LABEL: @test_vnmsub_vx_u32m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vnmsub.nxv16i32.i32.i64(<vscale x 16 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vnmsub.nxv16i32.i32.i64(<vscale x 16 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vnmsub_vx_u32m8(vuint32m8_t acc, uint32_t op1, vuint32m8_t op2, size_t vl) {
@ -726,7 +726,7 @@ vuint32m8_t test_vnmsub_vx_u32m8(vuint32m8_t acc, uint32_t op1, vuint32m8_t op2,
// CHECK-RV64-LABEL: @test_vnmsub_vv_u64m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vnmsub.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vnmsub.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vnmsub_vv_u64m1(vuint64m1_t acc, vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
@ -735,7 +735,7 @@ vuint64m1_t test_vnmsub_vv_u64m1(vuint64m1_t acc, vuint64m1_t op1, vuint64m1_t o
// CHECK-RV64-LABEL: @test_vnmsub_vx_u64m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vnmsub.nxv1i64.i64.i64(<vscale x 1 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vnmsub.nxv1i64.i64.i64(<vscale x 1 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vnmsub_vx_u64m1(vuint64m1_t acc, uint64_t op1, vuint64m1_t op2, size_t vl) {
@ -744,7 +744,7 @@ vuint64m1_t test_vnmsub_vx_u64m1(vuint64m1_t acc, uint64_t op1, vuint64m1_t op2,
// CHECK-RV64-LABEL: @test_vnmsub_vv_u64m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vnmsub.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vnmsub.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vnmsub_vv_u64m2(vuint64m2_t acc, vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
@ -753,7 +753,7 @@ vuint64m2_t test_vnmsub_vv_u64m2(vuint64m2_t acc, vuint64m2_t op1, vuint64m2_t o
// CHECK-RV64-LABEL: @test_vnmsub_vx_u64m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vnmsub.nxv2i64.i64.i64(<vscale x 2 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vnmsub.nxv2i64.i64.i64(<vscale x 2 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vnmsub_vx_u64m2(vuint64m2_t acc, uint64_t op1, vuint64m2_t op2, size_t vl) {
@ -762,7 +762,7 @@ vuint64m2_t test_vnmsub_vx_u64m2(vuint64m2_t acc, uint64_t op1, vuint64m2_t op2,
// CHECK-RV64-LABEL: @test_vnmsub_vv_u64m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vnmsub.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vnmsub.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vnmsub_vv_u64m4(vuint64m4_t acc, vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
@ -771,7 +771,7 @@ vuint64m4_t test_vnmsub_vv_u64m4(vuint64m4_t acc, vuint64m4_t op1, vuint64m4_t o
// CHECK-RV64-LABEL: @test_vnmsub_vx_u64m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vnmsub.nxv4i64.i64.i64(<vscale x 4 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vnmsub.nxv4i64.i64.i64(<vscale x 4 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vnmsub_vx_u64m4(vuint64m4_t acc, uint64_t op1, vuint64m4_t op2, size_t vl) {
@ -780,7 +780,7 @@ vuint64m4_t test_vnmsub_vx_u64m4(vuint64m4_t acc, uint64_t op1, vuint64m4_t op2,
// CHECK-RV64-LABEL: @test_vnmsub_vv_u64m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vnmsub.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vnmsub.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vnmsub_vv_u64m8(vuint64m8_t acc, vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
@ -789,7 +789,7 @@ vuint64m8_t test_vnmsub_vv_u64m8(vuint64m8_t acc, vuint64m8_t op1, vuint64m8_t o
// CHECK-RV64-LABEL: @test_vnmsub_vx_u64m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vnmsub.nxv8i64.i64.i64(<vscale x 8 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vnmsub.nxv8i64.i64.i64(<vscale x 8 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vnmsub_vx_u64m8(vuint64m8_t acc, uint64_t op1, vuint64m8_t op2, size_t vl) {

View File

@ -6,7 +6,7 @@
// CHECK-RV64-LABEL: @test_vwmacc_vv_i16mf4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmacc.nxv1i16.nxv1i8.nxv1i8.i64(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmacc.nxv1i16.nxv1i8.nxv1i8.i64(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vwmacc_vv_i16mf4(vint16mf4_t acc, vint8mf8_t op1,
@ -16,7 +16,7 @@ vint16mf4_t test_vwmacc_vv_i16mf4(vint16mf4_t acc, vint8mf8_t op1,
// CHECK-RV64-LABEL: @test_vwmacc_vx_i16mf4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmacc.nxv1i16.i8.nxv1i8.i64(<vscale x 1 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmacc.nxv1i16.i8.nxv1i8.i64(<vscale x 1 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vwmacc_vx_i16mf4(vint16mf4_t acc, int8_t op1, vint8mf8_t op2,
@ -26,7 +26,7 @@ vint16mf4_t test_vwmacc_vx_i16mf4(vint16mf4_t acc, int8_t op1, vint8mf8_t op2,
// CHECK-RV64-LABEL: @test_vwmacc_vv_i16mf2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmacc.nxv2i16.nxv2i8.nxv2i8.i64(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmacc.nxv2i16.nxv2i8.nxv2i8.i64(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vwmacc_vv_i16mf2(vint16mf2_t acc, vint8mf4_t op1,
@ -36,7 +36,7 @@ vint16mf2_t test_vwmacc_vv_i16mf2(vint16mf2_t acc, vint8mf4_t op1,
// CHECK-RV64-LABEL: @test_vwmacc_vx_i16mf2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmacc.nxv2i16.i8.nxv2i8.i64(<vscale x 2 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmacc.nxv2i16.i8.nxv2i8.i64(<vscale x 2 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vwmacc_vx_i16mf2(vint16mf2_t acc, int8_t op1, vint8mf4_t op2,
@ -46,7 +46,7 @@ vint16mf2_t test_vwmacc_vx_i16mf2(vint16mf2_t acc, int8_t op1, vint8mf4_t op2,
// CHECK-RV64-LABEL: @test_vwmacc_vv_i16m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmacc.nxv4i16.nxv4i8.nxv4i8.i64(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmacc.nxv4i16.nxv4i8.nxv4i8.i64(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vwmacc_vv_i16m1(vint16m1_t acc, vint8mf2_t op1, vint8mf2_t op2,
@ -56,7 +56,7 @@ vint16m1_t test_vwmacc_vv_i16m1(vint16m1_t acc, vint8mf2_t op1, vint8mf2_t op2,
// CHECK-RV64-LABEL: @test_vwmacc_vx_i16m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmacc.nxv4i16.i8.nxv4i8.i64(<vscale x 4 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmacc.nxv4i16.i8.nxv4i8.i64(<vscale x 4 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vwmacc_vx_i16m1(vint16m1_t acc, int8_t op1, vint8mf2_t op2,
@ -66,7 +66,7 @@ vint16m1_t test_vwmacc_vx_i16m1(vint16m1_t acc, int8_t op1, vint8mf2_t op2,
// CHECK-RV64-LABEL: @test_vwmacc_vv_i16m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmacc.nxv8i16.nxv8i8.nxv8i8.i64(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmacc.nxv8i16.nxv8i8.nxv8i8.i64(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vwmacc_vv_i16m2(vint16m2_t acc, vint8m1_t op1, vint8m1_t op2,
@ -76,7 +76,7 @@ vint16m2_t test_vwmacc_vv_i16m2(vint16m2_t acc, vint8m1_t op1, vint8m1_t op2,
// CHECK-RV64-LABEL: @test_vwmacc_vx_i16m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmacc.nxv8i16.i8.nxv8i8.i64(<vscale x 8 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmacc.nxv8i16.i8.nxv8i8.i64(<vscale x 8 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vwmacc_vx_i16m2(vint16m2_t acc, int8_t op1, vint8m1_t op2,
@ -86,7 +86,7 @@ vint16m2_t test_vwmacc_vx_i16m2(vint16m2_t acc, int8_t op1, vint8m1_t op2,
// CHECK-RV64-LABEL: @test_vwmacc_vv_i16m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmacc.nxv16i16.nxv16i8.nxv16i8.i64(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmacc.nxv16i16.nxv16i8.nxv16i8.i64(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vwmacc_vv_i16m4(vint16m4_t acc, vint8m2_t op1, vint8m2_t op2,
@ -96,7 +96,7 @@ vint16m4_t test_vwmacc_vv_i16m4(vint16m4_t acc, vint8m2_t op1, vint8m2_t op2,
// CHECK-RV64-LABEL: @test_vwmacc_vx_i16m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmacc.nxv16i16.i8.nxv16i8.i64(<vscale x 16 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmacc.nxv16i16.i8.nxv16i8.i64(<vscale x 16 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vwmacc_vx_i16m4(vint16m4_t acc, int8_t op1, vint8m2_t op2,
@ -106,7 +106,7 @@ vint16m4_t test_vwmacc_vx_i16m4(vint16m4_t acc, int8_t op1, vint8m2_t op2,
// CHECK-RV64-LABEL: @test_vwmacc_vv_i16m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmacc.nxv32i16.nxv32i8.nxv32i8.i64(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmacc.nxv32i16.nxv32i8.nxv32i8.i64(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vwmacc_vv_i16m8(vint16m8_t acc, vint8m4_t op1, vint8m4_t op2,
@ -116,7 +116,7 @@ vint16m8_t test_vwmacc_vv_i16m8(vint16m8_t acc, vint8m4_t op1, vint8m4_t op2,
// CHECK-RV64-LABEL: @test_vwmacc_vx_i16m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmacc.nxv32i16.i8.nxv32i8.i64(<vscale x 32 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmacc.nxv32i16.i8.nxv32i8.i64(<vscale x 32 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vwmacc_vx_i16m8(vint16m8_t acc, int8_t op1, vint8m4_t op2,
@ -126,7 +126,7 @@ vint16m8_t test_vwmacc_vx_i16m8(vint16m8_t acc, int8_t op1, vint8m4_t op2,
// CHECK-RV64-LABEL: @test_vwmacc_vv_i32mf2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmacc.nxv1i32.nxv1i16.nxv1i16.i64(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmacc.nxv1i32.nxv1i16.nxv1i16.i64(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vwmacc_vv_i32mf2(vint32mf2_t acc, vint16mf4_t op1,
@ -136,7 +136,7 @@ vint32mf2_t test_vwmacc_vv_i32mf2(vint32mf2_t acc, vint16mf4_t op1,
// CHECK-RV64-LABEL: @test_vwmacc_vx_i32mf2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmacc.nxv1i32.i16.nxv1i16.i64(<vscale x 1 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmacc.nxv1i32.i16.nxv1i16.i64(<vscale x 1 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vwmacc_vx_i32mf2(vint32mf2_t acc, int16_t op1, vint16mf4_t op2,
@ -146,7 +146,7 @@ vint32mf2_t test_vwmacc_vx_i32mf2(vint32mf2_t acc, int16_t op1, vint16mf4_t op2,
// CHECK-RV64-LABEL: @test_vwmacc_vv_i32m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmacc.nxv2i32.nxv2i16.nxv2i16.i64(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmacc.nxv2i32.nxv2i16.nxv2i16.i64(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vwmacc_vv_i32m1(vint32m1_t acc, vint16mf2_t op1,
@ -156,7 +156,7 @@ vint32m1_t test_vwmacc_vv_i32m1(vint32m1_t acc, vint16mf2_t op1,
// CHECK-RV64-LABEL: @test_vwmacc_vx_i32m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmacc.nxv2i32.i16.nxv2i16.i64(<vscale x 2 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmacc.nxv2i32.i16.nxv2i16.i64(<vscale x 2 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vwmacc_vx_i32m1(vint32m1_t acc, int16_t op1, vint16mf2_t op2,
@ -166,7 +166,7 @@ vint32m1_t test_vwmacc_vx_i32m1(vint32m1_t acc, int16_t op1, vint16mf2_t op2,
// CHECK-RV64-LABEL: @test_vwmacc_vv_i32m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmacc.nxv4i32.nxv4i16.nxv4i16.i64(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmacc.nxv4i32.nxv4i16.nxv4i16.i64(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vwmacc_vv_i32m2(vint32m2_t acc, vint16m1_t op1, vint16m1_t op2,
@ -176,7 +176,7 @@ vint32m2_t test_vwmacc_vv_i32m2(vint32m2_t acc, vint16m1_t op1, vint16m1_t op2,
// CHECK-RV64-LABEL: @test_vwmacc_vx_i32m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmacc.nxv4i32.i16.nxv4i16.i64(<vscale x 4 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmacc.nxv4i32.i16.nxv4i16.i64(<vscale x 4 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vwmacc_vx_i32m2(vint32m2_t acc, int16_t op1, vint16m1_t op2,
@ -186,7 +186,7 @@ vint32m2_t test_vwmacc_vx_i32m2(vint32m2_t acc, int16_t op1, vint16m1_t op2,
// CHECK-RV64-LABEL: @test_vwmacc_vv_i32m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmacc.nxv8i32.nxv8i16.nxv8i16.i64(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmacc.nxv8i32.nxv8i16.nxv8i16.i64(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vwmacc_vv_i32m4(vint32m4_t acc, vint16m2_t op1, vint16m2_t op2,
@ -196,7 +196,7 @@ vint32m4_t test_vwmacc_vv_i32m4(vint32m4_t acc, vint16m2_t op1, vint16m2_t op2,
// CHECK-RV64-LABEL: @test_vwmacc_vx_i32m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmacc.nxv8i32.i16.nxv8i16.i64(<vscale x 8 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmacc.nxv8i32.i16.nxv8i16.i64(<vscale x 8 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vwmacc_vx_i32m4(vint32m4_t acc, int16_t op1, vint16m2_t op2,
@ -206,7 +206,7 @@ vint32m4_t test_vwmacc_vx_i32m4(vint32m4_t acc, int16_t op1, vint16m2_t op2,
// CHECK-RV64-LABEL: @test_vwmacc_vv_i32m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmacc.nxv16i32.nxv16i16.nxv16i16.i64(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmacc.nxv16i32.nxv16i16.nxv16i16.i64(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vwmacc_vv_i32m8(vint32m8_t acc, vint16m4_t op1, vint16m4_t op2,
@ -216,7 +216,7 @@ vint32m8_t test_vwmacc_vv_i32m8(vint32m8_t acc, vint16m4_t op1, vint16m4_t op2,
// CHECK-RV64-LABEL: @test_vwmacc_vx_i32m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmacc.nxv16i32.i16.nxv16i16.i64(<vscale x 16 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmacc.nxv16i32.i16.nxv16i16.i64(<vscale x 16 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vwmacc_vx_i32m8(vint32m8_t acc, int16_t op1, vint16m4_t op2,
@ -226,7 +226,7 @@ vint32m8_t test_vwmacc_vx_i32m8(vint32m8_t acc, int16_t op1, vint16m4_t op2,
// CHECK-RV64-LABEL: @test_vwmacc_vv_i64m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmacc.nxv1i64.nxv1i32.nxv1i32.i64(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmacc.nxv1i64.nxv1i32.nxv1i32.i64(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vwmacc_vv_i64m1(vint64m1_t acc, vint32mf2_t op1,
@ -236,7 +236,7 @@ vint64m1_t test_vwmacc_vv_i64m1(vint64m1_t acc, vint32mf2_t op1,
// CHECK-RV64-LABEL: @test_vwmacc_vx_i64m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmacc.nxv1i64.i32.nxv1i32.i64(<vscale x 1 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmacc.nxv1i64.i32.nxv1i32.i64(<vscale x 1 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vwmacc_vx_i64m1(vint64m1_t acc, int32_t op1, vint32mf2_t op2,
@ -246,7 +246,7 @@ vint64m1_t test_vwmacc_vx_i64m1(vint64m1_t acc, int32_t op1, vint32mf2_t op2,
// CHECK-RV64-LABEL: @test_vwmacc_vv_i64m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmacc.nxv2i64.nxv2i32.nxv2i32.i64(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmacc.nxv2i64.nxv2i32.nxv2i32.i64(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vwmacc_vv_i64m2(vint64m2_t acc, vint32m1_t op1, vint32m1_t op2,
@ -256,7 +256,7 @@ vint64m2_t test_vwmacc_vv_i64m2(vint64m2_t acc, vint32m1_t op1, vint32m1_t op2,
// CHECK-RV64-LABEL: @test_vwmacc_vx_i64m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmacc.nxv2i64.i32.nxv2i32.i64(<vscale x 2 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmacc.nxv2i64.i32.nxv2i32.i64(<vscale x 2 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vwmacc_vx_i64m2(vint64m2_t acc, int32_t op1, vint32m1_t op2,
@ -266,7 +266,7 @@ vint64m2_t test_vwmacc_vx_i64m2(vint64m2_t acc, int32_t op1, vint32m1_t op2,
// CHECK-RV64-LABEL: @test_vwmacc_vv_i64m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmacc.nxv4i64.nxv4i32.nxv4i32.i64(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmacc.nxv4i64.nxv4i32.nxv4i32.i64(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vwmacc_vv_i64m4(vint64m4_t acc, vint32m2_t op1, vint32m2_t op2,
@ -276,7 +276,7 @@ vint64m4_t test_vwmacc_vv_i64m4(vint64m4_t acc, vint32m2_t op1, vint32m2_t op2,
// CHECK-RV64-LABEL: @test_vwmacc_vx_i64m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmacc.nxv4i64.i32.nxv4i32.i64(<vscale x 4 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmacc.nxv4i64.i32.nxv4i32.i64(<vscale x 4 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vwmacc_vx_i64m4(vint64m4_t acc, int32_t op1, vint32m2_t op2,
@ -286,7 +286,7 @@ vint64m4_t test_vwmacc_vx_i64m4(vint64m4_t acc, int32_t op1, vint32m2_t op2,
// CHECK-RV64-LABEL: @test_vwmacc_vv_i64m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmacc.nxv8i64.nxv8i32.nxv8i32.i64(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmacc.nxv8i64.nxv8i32.nxv8i32.i64(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vwmacc_vv_i64m8(vint64m8_t acc, vint32m4_t op1, vint32m4_t op2,
@ -296,7 +296,7 @@ vint64m8_t test_vwmacc_vv_i64m8(vint64m8_t acc, vint32m4_t op1, vint32m4_t op2,
// CHECK-RV64-LABEL: @test_vwmacc_vx_i64m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmacc.nxv8i64.i32.nxv8i32.i64(<vscale x 8 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmacc.nxv8i64.i32.nxv8i32.i64(<vscale x 8 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vwmacc_vx_i64m8(vint64m8_t acc, int32_t op1, vint32m4_t op2,
@ -306,7 +306,7 @@ vint64m8_t test_vwmacc_vx_i64m8(vint64m8_t acc, int32_t op1, vint32m4_t op2,
// CHECK-RV64-LABEL: @test_vwmaccu_vv_u16mf4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmaccu.nxv1i16.nxv1i8.nxv1i8.i64(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmaccu.nxv1i16.nxv1i8.nxv1i8.i64(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vwmaccu_vv_u16mf4(vuint16mf4_t acc, vuint8mf8_t op1,
@ -316,7 +316,7 @@ vuint16mf4_t test_vwmaccu_vv_u16mf4(vuint16mf4_t acc, vuint8mf8_t op1,
// CHECK-RV64-LABEL: @test_vwmaccu_vx_u16mf4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmaccu.nxv1i16.i8.nxv1i8.i64(<vscale x 1 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmaccu.nxv1i16.i8.nxv1i8.i64(<vscale x 1 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vwmaccu_vx_u16mf4(vuint16mf4_t acc, uint8_t op1,
@ -326,7 +326,7 @@ vuint16mf4_t test_vwmaccu_vx_u16mf4(vuint16mf4_t acc, uint8_t op1,
// CHECK-RV64-LABEL: @test_vwmaccu_vv_u16mf2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmaccu.nxv2i16.nxv2i8.nxv2i8.i64(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmaccu.nxv2i16.nxv2i8.nxv2i8.i64(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vwmaccu_vv_u16mf2(vuint16mf2_t acc, vuint8mf4_t op1,
@ -336,7 +336,7 @@ vuint16mf2_t test_vwmaccu_vv_u16mf2(vuint16mf2_t acc, vuint8mf4_t op1,
// CHECK-RV64-LABEL: @test_vwmaccu_vx_u16mf2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmaccu.nxv2i16.i8.nxv2i8.i64(<vscale x 2 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmaccu.nxv2i16.i8.nxv2i8.i64(<vscale x 2 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vwmaccu_vx_u16mf2(vuint16mf2_t acc, uint8_t op1,
@ -346,7 +346,7 @@ vuint16mf2_t test_vwmaccu_vx_u16mf2(vuint16mf2_t acc, uint8_t op1,
// CHECK-RV64-LABEL: @test_vwmaccu_vv_u16m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmaccu.nxv4i16.nxv4i8.nxv4i8.i64(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmaccu.nxv4i16.nxv4i8.nxv4i8.i64(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vwmaccu_vv_u16m1(vuint16m1_t acc, vuint8mf2_t op1,
@ -356,7 +356,7 @@ vuint16m1_t test_vwmaccu_vv_u16m1(vuint16m1_t acc, vuint8mf2_t op1,
// CHECK-RV64-LABEL: @test_vwmaccu_vx_u16m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmaccu.nxv4i16.i8.nxv4i8.i64(<vscale x 4 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmaccu.nxv4i16.i8.nxv4i8.i64(<vscale x 4 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vwmaccu_vx_u16m1(vuint16m1_t acc, uint8_t op1, vuint8mf2_t op2,
@ -366,7 +366,7 @@ vuint16m1_t test_vwmaccu_vx_u16m1(vuint16m1_t acc, uint8_t op1, vuint8mf2_t op2,
// CHECK-RV64-LABEL: @test_vwmaccu_vv_u16m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmaccu.nxv8i16.nxv8i8.nxv8i8.i64(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmaccu.nxv8i16.nxv8i8.nxv8i8.i64(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vwmaccu_vv_u16m2(vuint16m2_t acc, vuint8m1_t op1,
@ -376,7 +376,7 @@ vuint16m2_t test_vwmaccu_vv_u16m2(vuint16m2_t acc, vuint8m1_t op1,
// CHECK-RV64-LABEL: @test_vwmaccu_vx_u16m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmaccu.nxv8i16.i8.nxv8i8.i64(<vscale x 8 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmaccu.nxv8i16.i8.nxv8i8.i64(<vscale x 8 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vwmaccu_vx_u16m2(vuint16m2_t acc, uint8_t op1, vuint8m1_t op2,
@ -386,7 +386,7 @@ vuint16m2_t test_vwmaccu_vx_u16m2(vuint16m2_t acc, uint8_t op1, vuint8m1_t op2,
// CHECK-RV64-LABEL: @test_vwmaccu_vv_u16m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmaccu.nxv16i16.nxv16i8.nxv16i8.i64(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmaccu.nxv16i16.nxv16i8.nxv16i8.i64(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vwmaccu_vv_u16m4(vuint16m4_t acc, vuint8m2_t op1,
@ -396,7 +396,7 @@ vuint16m4_t test_vwmaccu_vv_u16m4(vuint16m4_t acc, vuint8m2_t op1,
// CHECK-RV64-LABEL: @test_vwmaccu_vx_u16m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmaccu.nxv16i16.i8.nxv16i8.i64(<vscale x 16 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmaccu.nxv16i16.i8.nxv16i8.i64(<vscale x 16 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vwmaccu_vx_u16m4(vuint16m4_t acc, uint8_t op1, vuint8m2_t op2,
@ -406,7 +406,7 @@ vuint16m4_t test_vwmaccu_vx_u16m4(vuint16m4_t acc, uint8_t op1, vuint8m2_t op2,
// CHECK-RV64-LABEL: @test_vwmaccu_vv_u16m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmaccu.nxv32i16.nxv32i8.nxv32i8.i64(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmaccu.nxv32i16.nxv32i8.nxv32i8.i64(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vwmaccu_vv_u16m8(vuint16m8_t acc, vuint8m4_t op1,
@ -416,7 +416,7 @@ vuint16m8_t test_vwmaccu_vv_u16m8(vuint16m8_t acc, vuint8m4_t op1,
// CHECK-RV64-LABEL: @test_vwmaccu_vx_u16m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmaccu.nxv32i16.i8.nxv32i8.i64(<vscale x 32 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmaccu.nxv32i16.i8.nxv32i8.i64(<vscale x 32 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vwmaccu_vx_u16m8(vuint16m8_t acc, uint8_t op1, vuint8m4_t op2,
@ -426,7 +426,7 @@ vuint16m8_t test_vwmaccu_vx_u16m8(vuint16m8_t acc, uint8_t op1, vuint8m4_t op2,
// CHECK-RV64-LABEL: @test_vwmaccu_vv_u32mf2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmaccu.nxv1i32.nxv1i16.nxv1i16.i64(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmaccu.nxv1i32.nxv1i16.nxv1i16.i64(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vwmaccu_vv_u32mf2(vuint32mf2_t acc, vuint16mf4_t op1,
@ -436,7 +436,7 @@ vuint32mf2_t test_vwmaccu_vv_u32mf2(vuint32mf2_t acc, vuint16mf4_t op1,
// CHECK-RV64-LABEL: @test_vwmaccu_vx_u32mf2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmaccu.nxv1i32.i16.nxv1i16.i64(<vscale x 1 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmaccu.nxv1i32.i16.nxv1i16.i64(<vscale x 1 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vwmaccu_vx_u32mf2(vuint32mf2_t acc, uint16_t op1,
@ -446,7 +446,7 @@ vuint32mf2_t test_vwmaccu_vx_u32mf2(vuint32mf2_t acc, uint16_t op1,
// CHECK-RV64-LABEL: @test_vwmaccu_vv_u32m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmaccu.nxv2i32.nxv2i16.nxv2i16.i64(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmaccu.nxv2i32.nxv2i16.nxv2i16.i64(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vwmaccu_vv_u32m1(vuint32m1_t acc, vuint16mf2_t op1,
@ -456,7 +456,7 @@ vuint32m1_t test_vwmaccu_vv_u32m1(vuint32m1_t acc, vuint16mf2_t op1,
// CHECK-RV64-LABEL: @test_vwmaccu_vx_u32m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmaccu.nxv2i32.i16.nxv2i16.i64(<vscale x 2 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmaccu.nxv2i32.i16.nxv2i16.i64(<vscale x 2 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vwmaccu_vx_u32m1(vuint32m1_t acc, uint16_t op1,
@ -466,7 +466,7 @@ vuint32m1_t test_vwmaccu_vx_u32m1(vuint32m1_t acc, uint16_t op1,
// CHECK-RV64-LABEL: @test_vwmaccu_vv_u32m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmaccu.nxv4i32.nxv4i16.nxv4i16.i64(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmaccu.nxv4i32.nxv4i16.nxv4i16.i64(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vwmaccu_vv_u32m2(vuint32m2_t acc, vuint16m1_t op1,
@ -476,7 +476,7 @@ vuint32m2_t test_vwmaccu_vv_u32m2(vuint32m2_t acc, vuint16m1_t op1,
// CHECK-RV64-LABEL: @test_vwmaccu_vx_u32m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmaccu.nxv4i32.i16.nxv4i16.i64(<vscale x 4 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmaccu.nxv4i32.i16.nxv4i16.i64(<vscale x 4 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vwmaccu_vx_u32m2(vuint32m2_t acc, uint16_t op1,
@ -486,7 +486,7 @@ vuint32m2_t test_vwmaccu_vx_u32m2(vuint32m2_t acc, uint16_t op1,
// CHECK-RV64-LABEL: @test_vwmaccu_vv_u32m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmaccu.nxv8i32.nxv8i16.nxv8i16.i64(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmaccu.nxv8i32.nxv8i16.nxv8i16.i64(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vwmaccu_vv_u32m4(vuint32m4_t acc, vuint16m2_t op1,
@ -496,7 +496,7 @@ vuint32m4_t test_vwmaccu_vv_u32m4(vuint32m4_t acc, vuint16m2_t op1,
// CHECK-RV64-LABEL: @test_vwmaccu_vx_u32m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmaccu.nxv8i32.i16.nxv8i16.i64(<vscale x 8 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmaccu.nxv8i32.i16.nxv8i16.i64(<vscale x 8 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vwmaccu_vx_u32m4(vuint32m4_t acc, uint16_t op1,
@ -506,7 +506,7 @@ vuint32m4_t test_vwmaccu_vx_u32m4(vuint32m4_t acc, uint16_t op1,
// CHECK-RV64-LABEL: @test_vwmaccu_vv_u32m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmaccu.nxv16i32.nxv16i16.nxv16i16.i64(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmaccu.nxv16i32.nxv16i16.nxv16i16.i64(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vwmaccu_vv_u32m8(vuint32m8_t acc, vuint16m4_t op1,
@ -516,7 +516,7 @@ vuint32m8_t test_vwmaccu_vv_u32m8(vuint32m8_t acc, vuint16m4_t op1,
// CHECK-RV64-LABEL: @test_vwmaccu_vx_u32m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmaccu.nxv16i32.i16.nxv16i16.i64(<vscale x 16 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmaccu.nxv16i32.i16.nxv16i16.i64(<vscale x 16 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vwmaccu_vx_u32m8(vuint32m8_t acc, uint16_t op1,
@ -526,7 +526,7 @@ vuint32m8_t test_vwmaccu_vx_u32m8(vuint32m8_t acc, uint16_t op1,
// CHECK-RV64-LABEL: @test_vwmaccu_vv_u64m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmaccu.nxv1i64.nxv1i32.nxv1i32.i64(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmaccu.nxv1i64.nxv1i32.nxv1i32.i64(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vwmaccu_vv_u64m1(vuint64m1_t acc, vuint32mf2_t op1,
@ -536,7 +536,7 @@ vuint64m1_t test_vwmaccu_vv_u64m1(vuint64m1_t acc, vuint32mf2_t op1,
// CHECK-RV64-LABEL: @test_vwmaccu_vx_u64m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmaccu.nxv1i64.i32.nxv1i32.i64(<vscale x 1 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmaccu.nxv1i64.i32.nxv1i32.i64(<vscale x 1 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vwmaccu_vx_u64m1(vuint64m1_t acc, uint32_t op1,
@ -546,7 +546,7 @@ vuint64m1_t test_vwmaccu_vx_u64m1(vuint64m1_t acc, uint32_t op1,
// CHECK-RV64-LABEL: @test_vwmaccu_vv_u64m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmaccu.nxv2i64.nxv2i32.nxv2i32.i64(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmaccu.nxv2i64.nxv2i32.nxv2i32.i64(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vwmaccu_vv_u64m2(vuint64m2_t acc, vuint32m1_t op1,
@ -556,7 +556,7 @@ vuint64m2_t test_vwmaccu_vv_u64m2(vuint64m2_t acc, vuint32m1_t op1,
// CHECK-RV64-LABEL: @test_vwmaccu_vx_u64m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmaccu.nxv2i64.i32.nxv2i32.i64(<vscale x 2 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmaccu.nxv2i64.i32.nxv2i32.i64(<vscale x 2 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vwmaccu_vx_u64m2(vuint64m2_t acc, uint32_t op1,
@ -566,7 +566,7 @@ vuint64m2_t test_vwmaccu_vx_u64m2(vuint64m2_t acc, uint32_t op1,
// CHECK-RV64-LABEL: @test_vwmaccu_vv_u64m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmaccu.nxv4i64.nxv4i32.nxv4i32.i64(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmaccu.nxv4i64.nxv4i32.nxv4i32.i64(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vwmaccu_vv_u64m4(vuint64m4_t acc, vuint32m2_t op1,
@ -576,7 +576,7 @@ vuint64m4_t test_vwmaccu_vv_u64m4(vuint64m4_t acc, vuint32m2_t op1,
// CHECK-RV64-LABEL: @test_vwmaccu_vx_u64m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmaccu.nxv4i64.i32.nxv4i32.i64(<vscale x 4 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmaccu.nxv4i64.i32.nxv4i32.i64(<vscale x 4 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vwmaccu_vx_u64m4(vuint64m4_t acc, uint32_t op1,
@ -586,7 +586,7 @@ vuint64m4_t test_vwmaccu_vx_u64m4(vuint64m4_t acc, uint32_t op1,
// CHECK-RV64-LABEL: @test_vwmaccu_vv_u64m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmaccu.nxv8i64.nxv8i32.nxv8i32.i64(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmaccu.nxv8i64.nxv8i32.nxv8i32.i64(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vwmaccu_vv_u64m8(vuint64m8_t acc, vuint32m4_t op1,
@ -596,7 +596,7 @@ vuint64m8_t test_vwmaccu_vv_u64m8(vuint64m8_t acc, vuint32m4_t op1,
// CHECK-RV64-LABEL: @test_vwmaccu_vx_u64m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmaccu.nxv8i64.i32.nxv8i32.i64(<vscale x 8 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmaccu.nxv8i64.i32.nxv8i32.i64(<vscale x 8 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vwmaccu_vx_u64m8(vuint64m8_t acc, uint32_t op1,
@ -606,7 +606,7 @@ vuint64m8_t test_vwmaccu_vx_u64m8(vuint64m8_t acc, uint32_t op1,
// CHECK-RV64-LABEL: @test_vwmaccsu_vv_i16mf4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmaccsu.nxv1i16.nxv1i8.nxv1i8.i64(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmaccsu.nxv1i16.nxv1i8.nxv1i8.i64(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vwmaccsu_vv_i16mf4(vint16mf4_t acc, vint8mf8_t op1,
@ -616,7 +616,7 @@ vint16mf4_t test_vwmaccsu_vv_i16mf4(vint16mf4_t acc, vint8mf8_t op1,
// CHECK-RV64-LABEL: @test_vwmaccsu_vx_i16mf4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmaccsu.nxv1i16.i8.nxv1i8.i64(<vscale x 1 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmaccsu.nxv1i16.i8.nxv1i8.i64(<vscale x 1 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vwmaccsu_vx_i16mf4(vint16mf4_t acc, int8_t op1,
@ -626,7 +626,7 @@ vint16mf4_t test_vwmaccsu_vx_i16mf4(vint16mf4_t acc, int8_t op1,
// CHECK-RV64-LABEL: @test_vwmaccsu_vv_i16mf2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmaccsu.nxv2i16.nxv2i8.nxv2i8.i64(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmaccsu.nxv2i16.nxv2i8.nxv2i8.i64(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vwmaccsu_vv_i16mf2(vint16mf2_t acc, vint8mf4_t op1,
@ -636,7 +636,7 @@ vint16mf2_t test_vwmaccsu_vv_i16mf2(vint16mf2_t acc, vint8mf4_t op1,
// CHECK-RV64-LABEL: @test_vwmaccsu_vx_i16mf2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmaccsu.nxv2i16.i8.nxv2i8.i64(<vscale x 2 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmaccsu.nxv2i16.i8.nxv2i8.i64(<vscale x 2 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vwmaccsu_vx_i16mf2(vint16mf2_t acc, int8_t op1,
@ -646,7 +646,7 @@ vint16mf2_t test_vwmaccsu_vx_i16mf2(vint16mf2_t acc, int8_t op1,
// CHECK-RV64-LABEL: @test_vwmaccsu_vv_i16m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmaccsu.nxv4i16.nxv4i8.nxv4i8.i64(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmaccsu.nxv4i16.nxv4i8.nxv4i8.i64(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vwmaccsu_vv_i16m1(vint16m1_t acc, vint8mf2_t op1,
@ -656,7 +656,7 @@ vint16m1_t test_vwmaccsu_vv_i16m1(vint16m1_t acc, vint8mf2_t op1,
// CHECK-RV64-LABEL: @test_vwmaccsu_vx_i16m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmaccsu.nxv4i16.i8.nxv4i8.i64(<vscale x 4 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmaccsu.nxv4i16.i8.nxv4i8.i64(<vscale x 4 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vwmaccsu_vx_i16m1(vint16m1_t acc, int8_t op1, vuint8mf2_t op2,
@ -666,7 +666,7 @@ vint16m1_t test_vwmaccsu_vx_i16m1(vint16m1_t acc, int8_t op1, vuint8mf2_t op2,
// CHECK-RV64-LABEL: @test_vwmaccsu_vv_i16m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmaccsu.nxv8i16.nxv8i8.nxv8i8.i64(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmaccsu.nxv8i16.nxv8i8.nxv8i8.i64(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vwmaccsu_vv_i16m2(vint16m2_t acc, vint8m1_t op1, vuint8m1_t op2,
@ -676,7 +676,7 @@ vint16m2_t test_vwmaccsu_vv_i16m2(vint16m2_t acc, vint8m1_t op1, vuint8m1_t op2,
// CHECK-RV64-LABEL: @test_vwmaccsu_vx_i16m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmaccsu.nxv8i16.i8.nxv8i8.i64(<vscale x 8 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmaccsu.nxv8i16.i8.nxv8i8.i64(<vscale x 8 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vwmaccsu_vx_i16m2(vint16m2_t acc, int8_t op1, vuint8m1_t op2,
@ -686,7 +686,7 @@ vint16m2_t test_vwmaccsu_vx_i16m2(vint16m2_t acc, int8_t op1, vuint8m1_t op2,
// CHECK-RV64-LABEL: @test_vwmaccsu_vv_i16m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmaccsu.nxv16i16.nxv16i8.nxv16i8.i64(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmaccsu.nxv16i16.nxv16i8.nxv16i8.i64(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vwmaccsu_vv_i16m4(vint16m4_t acc, vint8m2_t op1, vuint8m2_t op2,
@ -696,7 +696,7 @@ vint16m4_t test_vwmaccsu_vv_i16m4(vint16m4_t acc, vint8m2_t op1, vuint8m2_t op2,
// CHECK-RV64-LABEL: @test_vwmaccsu_vx_i16m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmaccsu.nxv16i16.i8.nxv16i8.i64(<vscale x 16 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmaccsu.nxv16i16.i8.nxv16i8.i64(<vscale x 16 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vwmaccsu_vx_i16m4(vint16m4_t acc, int8_t op1, vuint8m2_t op2,
@ -706,7 +706,7 @@ vint16m4_t test_vwmaccsu_vx_i16m4(vint16m4_t acc, int8_t op1, vuint8m2_t op2,
// CHECK-RV64-LABEL: @test_vwmaccsu_vv_i16m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmaccsu.nxv32i16.nxv32i8.nxv32i8.i64(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmaccsu.nxv32i16.nxv32i8.nxv32i8.i64(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vwmaccsu_vv_i16m8(vint16m8_t acc, vint8m4_t op1, vuint8m4_t op2,
@ -716,7 +716,7 @@ vint16m8_t test_vwmaccsu_vv_i16m8(vint16m8_t acc, vint8m4_t op1, vuint8m4_t op2,
// CHECK-RV64-LABEL: @test_vwmaccsu_vx_i16m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmaccsu.nxv32i16.i8.nxv32i8.i64(<vscale x 32 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmaccsu.nxv32i16.i8.nxv32i8.i64(<vscale x 32 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vwmaccsu_vx_i16m8(vint16m8_t acc, int8_t op1, vuint8m4_t op2,
@ -726,7 +726,7 @@ vint16m8_t test_vwmaccsu_vx_i16m8(vint16m8_t acc, int8_t op1, vuint8m4_t op2,
// CHECK-RV64-LABEL: @test_vwmaccsu_vv_i32mf2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmaccsu.nxv1i32.nxv1i16.nxv1i16.i64(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmaccsu.nxv1i32.nxv1i16.nxv1i16.i64(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vwmaccsu_vv_i32mf2(vint32mf2_t acc, vint16mf4_t op1,
@ -736,7 +736,7 @@ vint32mf2_t test_vwmaccsu_vv_i32mf2(vint32mf2_t acc, vint16mf4_t op1,
// CHECK-RV64-LABEL: @test_vwmaccsu_vx_i32mf2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmaccsu.nxv1i32.i16.nxv1i16.i64(<vscale x 1 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmaccsu.nxv1i32.i16.nxv1i16.i64(<vscale x 1 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vwmaccsu_vx_i32mf2(vint32mf2_t acc, int16_t op1,
@ -746,7 +746,7 @@ vint32mf2_t test_vwmaccsu_vx_i32mf2(vint32mf2_t acc, int16_t op1,
// CHECK-RV64-LABEL: @test_vwmaccsu_vv_i32m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmaccsu.nxv2i32.nxv2i16.nxv2i16.i64(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmaccsu.nxv2i32.nxv2i16.nxv2i16.i64(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vwmaccsu_vv_i32m1(vint32m1_t acc, vint16mf2_t op1,
@ -756,7 +756,7 @@ vint32m1_t test_vwmaccsu_vv_i32m1(vint32m1_t acc, vint16mf2_t op1,
// CHECK-RV64-LABEL: @test_vwmaccsu_vx_i32m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmaccsu.nxv2i32.i16.nxv2i16.i64(<vscale x 2 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmaccsu.nxv2i32.i16.nxv2i16.i64(<vscale x 2 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vwmaccsu_vx_i32m1(vint32m1_t acc, int16_t op1, vuint16mf2_t op2,
@ -766,7 +766,7 @@ vint32m1_t test_vwmaccsu_vx_i32m1(vint32m1_t acc, int16_t op1, vuint16mf2_t op2,
// CHECK-RV64-LABEL: @test_vwmaccsu_vv_i32m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmaccsu.nxv4i32.nxv4i16.nxv4i16.i64(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmaccsu.nxv4i32.nxv4i16.nxv4i16.i64(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vwmaccsu_vv_i32m2(vint32m2_t acc, vint16m1_t op1,
@ -776,7 +776,7 @@ vint32m2_t test_vwmaccsu_vv_i32m2(vint32m2_t acc, vint16m1_t op1,
// CHECK-RV64-LABEL: @test_vwmaccsu_vx_i32m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmaccsu.nxv4i32.i16.nxv4i16.i64(<vscale x 4 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmaccsu.nxv4i32.i16.nxv4i16.i64(<vscale x 4 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vwmaccsu_vx_i32m2(vint32m2_t acc, int16_t op1, vuint16m1_t op2,
@ -786,7 +786,7 @@ vint32m2_t test_vwmaccsu_vx_i32m2(vint32m2_t acc, int16_t op1, vuint16m1_t op2,
// CHECK-RV64-LABEL: @test_vwmaccsu_vv_i32m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmaccsu.nxv8i32.nxv8i16.nxv8i16.i64(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmaccsu.nxv8i32.nxv8i16.nxv8i16.i64(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vwmaccsu_vv_i32m4(vint32m4_t acc, vint16m2_t op1,
@ -796,7 +796,7 @@ vint32m4_t test_vwmaccsu_vv_i32m4(vint32m4_t acc, vint16m2_t op1,
// CHECK-RV64-LABEL: @test_vwmaccsu_vx_i32m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmaccsu.nxv8i32.i16.nxv8i16.i64(<vscale x 8 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmaccsu.nxv8i32.i16.nxv8i16.i64(<vscale x 8 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vwmaccsu_vx_i32m4(vint32m4_t acc, int16_t op1, vuint16m2_t op2,
@ -806,7 +806,7 @@ vint32m4_t test_vwmaccsu_vx_i32m4(vint32m4_t acc, int16_t op1, vuint16m2_t op2,
// CHECK-RV64-LABEL: @test_vwmaccsu_vv_i32m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmaccsu.nxv16i32.nxv16i16.nxv16i16.i64(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmaccsu.nxv16i32.nxv16i16.nxv16i16.i64(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vwmaccsu_vv_i32m8(vint32m8_t acc, vint16m4_t op1,
@ -816,7 +816,7 @@ vint32m8_t test_vwmaccsu_vv_i32m8(vint32m8_t acc, vint16m4_t op1,
// CHECK-RV64-LABEL: @test_vwmaccsu_vx_i32m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmaccsu.nxv16i32.i16.nxv16i16.i64(<vscale x 16 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmaccsu.nxv16i32.i16.nxv16i16.i64(<vscale x 16 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vwmaccsu_vx_i32m8(vint32m8_t acc, int16_t op1, vuint16m4_t op2,
@ -826,7 +826,7 @@ vint32m8_t test_vwmaccsu_vx_i32m8(vint32m8_t acc, int16_t op1, vuint16m4_t op2,
// CHECK-RV64-LABEL: @test_vwmaccsu_vv_i64m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmaccsu.nxv1i64.nxv1i32.nxv1i32.i64(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmaccsu.nxv1i64.nxv1i32.nxv1i32.i64(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vwmaccsu_vv_i64m1(vint64m1_t acc, vint32mf2_t op1,
@ -836,7 +836,7 @@ vint64m1_t test_vwmaccsu_vv_i64m1(vint64m1_t acc, vint32mf2_t op1,
// CHECK-RV64-LABEL: @test_vwmaccsu_vx_i64m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmaccsu.nxv1i64.i32.nxv1i32.i64(<vscale x 1 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmaccsu.nxv1i64.i32.nxv1i32.i64(<vscale x 1 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vwmaccsu_vx_i64m1(vint64m1_t acc, int32_t op1, vuint32mf2_t op2,
@ -846,7 +846,7 @@ vint64m1_t test_vwmaccsu_vx_i64m1(vint64m1_t acc, int32_t op1, vuint32mf2_t op2,
// CHECK-RV64-LABEL: @test_vwmaccsu_vv_i64m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmaccsu.nxv2i64.nxv2i32.nxv2i32.i64(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmaccsu.nxv2i64.nxv2i32.nxv2i32.i64(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vwmaccsu_vv_i64m2(vint64m2_t acc, vint32m1_t op1,
@ -856,7 +856,7 @@ vint64m2_t test_vwmaccsu_vv_i64m2(vint64m2_t acc, vint32m1_t op1,
// CHECK-RV64-LABEL: @test_vwmaccsu_vx_i64m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmaccsu.nxv2i64.i32.nxv2i32.i64(<vscale x 2 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmaccsu.nxv2i64.i32.nxv2i32.i64(<vscale x 2 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vwmaccsu_vx_i64m2(vint64m2_t acc, int32_t op1, vuint32m1_t op2,
@ -866,7 +866,7 @@ vint64m2_t test_vwmaccsu_vx_i64m2(vint64m2_t acc, int32_t op1, vuint32m1_t op2,
// CHECK-RV64-LABEL: @test_vwmaccsu_vv_i64m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmaccsu.nxv4i64.nxv4i32.nxv4i32.i64(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmaccsu.nxv4i64.nxv4i32.nxv4i32.i64(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vwmaccsu_vv_i64m4(vint64m4_t acc, vint32m2_t op1,
@ -876,7 +876,7 @@ vint64m4_t test_vwmaccsu_vv_i64m4(vint64m4_t acc, vint32m2_t op1,
// CHECK-RV64-LABEL: @test_vwmaccsu_vx_i64m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmaccsu.nxv4i64.i32.nxv4i32.i64(<vscale x 4 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmaccsu.nxv4i64.i32.nxv4i32.i64(<vscale x 4 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vwmaccsu_vx_i64m4(vint64m4_t acc, int32_t op1, vuint32m2_t op2,
@ -886,7 +886,7 @@ vint64m4_t test_vwmaccsu_vx_i64m4(vint64m4_t acc, int32_t op1, vuint32m2_t op2,
// CHECK-RV64-LABEL: @test_vwmaccsu_vv_i64m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmaccsu.nxv8i64.nxv8i32.nxv8i32.i64(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmaccsu.nxv8i64.nxv8i32.nxv8i32.i64(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vwmaccsu_vv_i64m8(vint64m8_t acc, vint32m4_t op1,
@ -896,7 +896,7 @@ vint64m8_t test_vwmaccsu_vv_i64m8(vint64m8_t acc, vint32m4_t op1,
// CHECK-RV64-LABEL: @test_vwmaccsu_vx_i64m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmaccsu.nxv8i64.i32.nxv8i32.i64(<vscale x 8 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmaccsu.nxv8i64.i32.nxv8i32.i64(<vscale x 8 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vwmaccsu_vx_i64m8(vint64m8_t acc, int32_t op1, vuint32m4_t op2,
@ -906,7 +906,7 @@ vint64m8_t test_vwmaccsu_vx_i64m8(vint64m8_t acc, int32_t op1, vuint32m4_t op2,
// CHECK-RV64-LABEL: @test_vwmaccus_vx_i16mf4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmaccus.nxv1i16.i8.nxv1i8.i64(<vscale x 1 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmaccus.nxv1i16.i8.nxv1i8.i64(<vscale x 1 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vwmaccus_vx_i16mf4(vint16mf4_t acc, uint8_t op1,
@ -916,7 +916,7 @@ vint16mf4_t test_vwmaccus_vx_i16mf4(vint16mf4_t acc, uint8_t op1,
// CHECK-RV64-LABEL: @test_vwmaccus_vx_i16mf2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmaccus.nxv2i16.i8.nxv2i8.i64(<vscale x 2 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmaccus.nxv2i16.i8.nxv2i8.i64(<vscale x 2 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vwmaccus_vx_i16mf2(vint16mf2_t acc, uint8_t op1,
@ -926,7 +926,7 @@ vint16mf2_t test_vwmaccus_vx_i16mf2(vint16mf2_t acc, uint8_t op1,
// CHECK-RV64-LABEL: @test_vwmaccus_vx_i16m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmaccus.nxv4i16.i8.nxv4i8.i64(<vscale x 4 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmaccus.nxv4i16.i8.nxv4i8.i64(<vscale x 4 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vwmaccus_vx_i16m1(vint16m1_t acc, uint8_t op1, vint8mf2_t op2,
@ -936,7 +936,7 @@ vint16m1_t test_vwmaccus_vx_i16m1(vint16m1_t acc, uint8_t op1, vint8mf2_t op2,
// CHECK-RV64-LABEL: @test_vwmaccus_vx_i16m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmaccus.nxv8i16.i8.nxv8i8.i64(<vscale x 8 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmaccus.nxv8i16.i8.nxv8i8.i64(<vscale x 8 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vwmaccus_vx_i16m2(vint16m2_t acc, uint8_t op1, vint8m1_t op2,
@ -946,7 +946,7 @@ vint16m2_t test_vwmaccus_vx_i16m2(vint16m2_t acc, uint8_t op1, vint8m1_t op2,
// CHECK-RV64-LABEL: @test_vwmaccus_vx_i16m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmaccus.nxv16i16.i8.nxv16i8.i64(<vscale x 16 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmaccus.nxv16i16.i8.nxv16i8.i64(<vscale x 16 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vwmaccus_vx_i16m4(vint16m4_t acc, uint8_t op1, vint8m2_t op2,
@ -956,7 +956,7 @@ vint16m4_t test_vwmaccus_vx_i16m4(vint16m4_t acc, uint8_t op1, vint8m2_t op2,
// CHECK-RV64-LABEL: @test_vwmaccus_vx_i16m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmaccus.nxv32i16.i8.nxv32i8.i64(<vscale x 32 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmaccus.nxv32i16.i8.nxv32i8.i64(<vscale x 32 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vwmaccus_vx_i16m8(vint16m8_t acc, uint8_t op1, vint8m4_t op2,
@ -966,7 +966,7 @@ vint16m8_t test_vwmaccus_vx_i16m8(vint16m8_t acc, uint8_t op1, vint8m4_t op2,
// CHECK-RV64-LABEL: @test_vwmaccus_vx_i32mf2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmaccus.nxv1i32.i16.nxv1i16.i64(<vscale x 1 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmaccus.nxv1i32.i16.nxv1i16.i64(<vscale x 1 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vwmaccus_vx_i32mf2(vint32mf2_t acc, uint16_t op1,
@ -976,7 +976,7 @@ vint32mf2_t test_vwmaccus_vx_i32mf2(vint32mf2_t acc, uint16_t op1,
// CHECK-RV64-LABEL: @test_vwmaccus_vx_i32m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmaccus.nxv2i32.i16.nxv2i16.i64(<vscale x 2 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmaccus.nxv2i32.i16.nxv2i16.i64(<vscale x 2 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vwmaccus_vx_i32m1(vint32m1_t acc, uint16_t op1, vint16mf2_t op2,
@ -986,7 +986,7 @@ vint32m1_t test_vwmaccus_vx_i32m1(vint32m1_t acc, uint16_t op1, vint16mf2_t op2,
// CHECK-RV64-LABEL: @test_vwmaccus_vx_i32m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmaccus.nxv4i32.i16.nxv4i16.i64(<vscale x 4 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmaccus.nxv4i32.i16.nxv4i16.i64(<vscale x 4 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vwmaccus_vx_i32m2(vint32m2_t acc, uint16_t op1, vint16m1_t op2,
@ -996,7 +996,7 @@ vint32m2_t test_vwmaccus_vx_i32m2(vint32m2_t acc, uint16_t op1, vint16m1_t op2,
// CHECK-RV64-LABEL: @test_vwmaccus_vx_i32m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmaccus.nxv8i32.i16.nxv8i16.i64(<vscale x 8 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmaccus.nxv8i32.i16.nxv8i16.i64(<vscale x 8 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vwmaccus_vx_i32m4(vint32m4_t acc, uint16_t op1, vint16m2_t op2,
@ -1006,7 +1006,7 @@ vint32m4_t test_vwmaccus_vx_i32m4(vint32m4_t acc, uint16_t op1, vint16m2_t op2,
// CHECK-RV64-LABEL: @test_vwmaccus_vx_i32m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmaccus.nxv16i32.i16.nxv16i16.i64(<vscale x 16 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmaccus.nxv16i32.i16.nxv16i16.i64(<vscale x 16 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vwmaccus_vx_i32m8(vint32m8_t acc, uint16_t op1, vint16m4_t op2,
@ -1016,7 +1016,7 @@ vint32m8_t test_vwmaccus_vx_i32m8(vint32m8_t acc, uint16_t op1, vint16m4_t op2,
// CHECK-RV64-LABEL: @test_vwmaccus_vx_i64m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmaccus.nxv1i64.i32.nxv1i32.i64(<vscale x 1 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmaccus.nxv1i64.i32.nxv1i32.i64(<vscale x 1 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vwmaccus_vx_i64m1(vint64m1_t acc, uint32_t op1, vint32mf2_t op2,
@ -1026,7 +1026,7 @@ vint64m1_t test_vwmaccus_vx_i64m1(vint64m1_t acc, uint32_t op1, vint32mf2_t op2,
// CHECK-RV64-LABEL: @test_vwmaccus_vx_i64m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmaccus.nxv2i64.i32.nxv2i32.i64(<vscale x 2 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmaccus.nxv2i64.i32.nxv2i32.i64(<vscale x 2 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vwmaccus_vx_i64m2(vint64m2_t acc, uint32_t op1, vint32m1_t op2,
@ -1036,7 +1036,7 @@ vint64m2_t test_vwmaccus_vx_i64m2(vint64m2_t acc, uint32_t op1, vint32m1_t op2,
// CHECK-RV64-LABEL: @test_vwmaccus_vx_i64m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmaccus.nxv4i64.i32.nxv4i32.i64(<vscale x 4 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmaccus.nxv4i64.i32.nxv4i32.i64(<vscale x 4 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vwmaccus_vx_i64m4(vint64m4_t acc, uint32_t op1, vint32m2_t op2,
@ -1046,7 +1046,7 @@ vint64m4_t test_vwmaccus_vx_i64m4(vint64m4_t acc, uint32_t op1, vint32m2_t op2,
// CHECK-RV64-LABEL: @test_vwmaccus_vx_i64m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmaccus.nxv8i64.i32.nxv8i32.i64(<vscale x 8 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmaccus.nxv8i64.i32.nxv8i32.i64(<vscale x 8 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vwmaccus_vx_i64m8(vint64m8_t acc, uint32_t op1, vint32m4_t op2,

View File

@ -8,7 +8,7 @@
// CHECK-RV64-LABEL: @test_vfmacc_vv_f32mf2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmacc.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[ACC:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmacc.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[ACC:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfmacc_vv_f32mf2(vfloat32mf2_t acc, vfloat32mf2_t op1,
@ -18,7 +18,7 @@ vfloat32mf2_t test_vfmacc_vv_f32mf2(vfloat32mf2_t acc, vfloat32mf2_t op1,
// CHECK-RV64-LABEL: @test_vfmacc_vf_f32mf2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmacc.nxv1f32.f32.i64(<vscale x 1 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmacc.nxv1f32.f32.i64(<vscale x 1 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfmacc_vf_f32mf2(vfloat32mf2_t acc, float op1,
@ -28,7 +28,7 @@ vfloat32mf2_t test_vfmacc_vf_f32mf2(vfloat32mf2_t acc, float op1,
// CHECK-RV64-LABEL: @test_vfmacc_vv_f32m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmacc.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[ACC:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmacc.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[ACC:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfmacc_vv_f32m1(vfloat32m1_t acc, vfloat32m1_t op1,
@ -38,7 +38,7 @@ vfloat32m1_t test_vfmacc_vv_f32m1(vfloat32m1_t acc, vfloat32m1_t op1,
// CHECK-RV64-LABEL: @test_vfmacc_vf_f32m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmacc.nxv2f32.f32.i64(<vscale x 2 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmacc.nxv2f32.f32.i64(<vscale x 2 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfmacc_vf_f32m1(vfloat32m1_t acc, float op1, vfloat32m1_t op2,
@ -48,7 +48,7 @@ vfloat32m1_t test_vfmacc_vf_f32m1(vfloat32m1_t acc, float op1, vfloat32m1_t op2,
// CHECK-RV64-LABEL: @test_vfmacc_vv_f32m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmacc.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[ACC:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmacc.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[ACC:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfmacc_vv_f32m2(vfloat32m2_t acc, vfloat32m2_t op1,
@ -58,7 +58,7 @@ vfloat32m2_t test_vfmacc_vv_f32m2(vfloat32m2_t acc, vfloat32m2_t op1,
// CHECK-RV64-LABEL: @test_vfmacc_vf_f32m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmacc.nxv4f32.f32.i64(<vscale x 4 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmacc.nxv4f32.f32.i64(<vscale x 4 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfmacc_vf_f32m2(vfloat32m2_t acc, float op1, vfloat32m2_t op2,
@ -68,7 +68,7 @@ vfloat32m2_t test_vfmacc_vf_f32m2(vfloat32m2_t acc, float op1, vfloat32m2_t op2,
// CHECK-RV64-LABEL: @test_vfmacc_vv_f32m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmacc.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[ACC:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmacc.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[ACC:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfmacc_vv_f32m4(vfloat32m4_t acc, vfloat32m4_t op1,
@ -78,7 +78,7 @@ vfloat32m4_t test_vfmacc_vv_f32m4(vfloat32m4_t acc, vfloat32m4_t op1,
// CHECK-RV64-LABEL: @test_vfmacc_vf_f32m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmacc.nxv8f32.f32.i64(<vscale x 8 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmacc.nxv8f32.f32.i64(<vscale x 8 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfmacc_vf_f32m4(vfloat32m4_t acc, float op1, vfloat32m4_t op2,
@ -88,7 +88,7 @@ vfloat32m4_t test_vfmacc_vf_f32m4(vfloat32m4_t acc, float op1, vfloat32m4_t op2,
// CHECK-RV64-LABEL: @test_vfmacc_vv_f32m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmacc.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[ACC:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmacc.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[ACC:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfmacc_vv_f32m8(vfloat32m8_t acc, vfloat32m8_t op1,
@ -98,7 +98,7 @@ vfloat32m8_t test_vfmacc_vv_f32m8(vfloat32m8_t acc, vfloat32m8_t op1,
// CHECK-RV64-LABEL: @test_vfmacc_vf_f32m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmacc.nxv16f32.f32.i64(<vscale x 16 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmacc.nxv16f32.f32.i64(<vscale x 16 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfmacc_vf_f32m8(vfloat32m8_t acc, float op1, vfloat32m8_t op2,
@ -108,7 +108,7 @@ vfloat32m8_t test_vfmacc_vf_f32m8(vfloat32m8_t acc, float op1, vfloat32m8_t op2,
// CHECK-RV64-LABEL: @test_vfmacc_vv_f64m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmacc.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[ACC:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmacc.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[ACC:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfmacc_vv_f64m1(vfloat64m1_t acc, vfloat64m1_t op1,
@ -118,7 +118,7 @@ vfloat64m1_t test_vfmacc_vv_f64m1(vfloat64m1_t acc, vfloat64m1_t op1,
// CHECK-RV64-LABEL: @test_vfmacc_vf_f64m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmacc.nxv1f64.f64.i64(<vscale x 1 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmacc.nxv1f64.f64.i64(<vscale x 1 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfmacc_vf_f64m1(vfloat64m1_t acc, double op1,
@ -128,7 +128,7 @@ vfloat64m1_t test_vfmacc_vf_f64m1(vfloat64m1_t acc, double op1,
// CHECK-RV64-LABEL: @test_vfmacc_vv_f64m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmacc.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[ACC:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmacc.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[ACC:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfmacc_vv_f64m2(vfloat64m2_t acc, vfloat64m2_t op1,
@ -138,7 +138,7 @@ vfloat64m2_t test_vfmacc_vv_f64m2(vfloat64m2_t acc, vfloat64m2_t op1,
// CHECK-RV64-LABEL: @test_vfmacc_vf_f64m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmacc.nxv2f64.f64.i64(<vscale x 2 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmacc.nxv2f64.f64.i64(<vscale x 2 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfmacc_vf_f64m2(vfloat64m2_t acc, double op1,
@ -148,7 +148,7 @@ vfloat64m2_t test_vfmacc_vf_f64m2(vfloat64m2_t acc, double op1,
// CHECK-RV64-LABEL: @test_vfmacc_vv_f64m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmacc.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[ACC:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmacc.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[ACC:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfmacc_vv_f64m4(vfloat64m4_t acc, vfloat64m4_t op1,
@ -158,7 +158,7 @@ vfloat64m4_t test_vfmacc_vv_f64m4(vfloat64m4_t acc, vfloat64m4_t op1,
// CHECK-RV64-LABEL: @test_vfmacc_vf_f64m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmacc.nxv4f64.f64.i64(<vscale x 4 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmacc.nxv4f64.f64.i64(<vscale x 4 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfmacc_vf_f64m4(vfloat64m4_t acc, double op1,
@ -168,7 +168,7 @@ vfloat64m4_t test_vfmacc_vf_f64m4(vfloat64m4_t acc, double op1,
// CHECK-RV64-LABEL: @test_vfmacc_vv_f64m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmacc.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[ACC:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmacc.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[ACC:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfmacc_vv_f64m8(vfloat64m8_t acc, vfloat64m8_t op1,
@ -178,7 +178,7 @@ vfloat64m8_t test_vfmacc_vv_f64m8(vfloat64m8_t acc, vfloat64m8_t op1,
// CHECK-RV64-LABEL: @test_vfmacc_vf_f64m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmacc.nxv8f64.f64.i64(<vscale x 8 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmacc.nxv8f64.f64.i64(<vscale x 8 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfmacc_vf_f64m8(vfloat64m8_t acc, double op1,
@ -377,7 +377,7 @@ vfloat64m8_t test_vfmacc_vf_f64m8_m(vbool8_t mask, vfloat64m8_t acc, double op1,
// CHECK-RV64-LABEL: @test_vfmacc_vv_f16mf4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfmacc.nxv1f16.nxv1f16.i64(<vscale x 1 x half> [[VD:%.*]], <vscale x 1 x half> [[VS1:%.*]], <vscale x 1 x half> [[VS2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfmacc.nxv1f16.nxv1f16.i64(<vscale x 1 x half> [[VD:%.*]], <vscale x 1 x half> [[VS1:%.*]], <vscale x 1 x half> [[VS2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfmacc_vv_f16mf4 (vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) {
@ -386,7 +386,7 @@ vfloat16mf4_t test_vfmacc_vv_f16mf4 (vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat
// CHECK-RV64-LABEL: @test_vfmacc_vf_f16mf4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfmacc.nxv1f16.f16.i64(<vscale x 1 x half> [[VD:%.*]], half [[RS1:%.*]], <vscale x 1 x half> [[VS2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfmacc.nxv1f16.f16.i64(<vscale x 1 x half> [[VD:%.*]], half [[RS1:%.*]], <vscale x 1 x half> [[VS2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfmacc_vf_f16mf4 (vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) {
@ -395,7 +395,7 @@ vfloat16mf4_t test_vfmacc_vf_f16mf4 (vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4
// CHECK-RV64-LABEL: @test_vfmacc_vv_f16mf2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfmacc.nxv2f16.nxv2f16.i64(<vscale x 2 x half> [[VD:%.*]], <vscale x 2 x half> [[VS1:%.*]], <vscale x 2 x half> [[VS2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfmacc.nxv2f16.nxv2f16.i64(<vscale x 2 x half> [[VD:%.*]], <vscale x 2 x half> [[VS1:%.*]], <vscale x 2 x half> [[VS2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfmacc_vv_f16mf2 (vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) {
@ -404,7 +404,7 @@ vfloat16mf2_t test_vfmacc_vv_f16mf2 (vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat
// CHECK-RV64-LABEL: @test_vfmacc_vf_f16mf2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfmacc.nxv2f16.f16.i64(<vscale x 2 x half> [[VD:%.*]], half [[RS1:%.*]], <vscale x 2 x half> [[VS2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfmacc.nxv2f16.f16.i64(<vscale x 2 x half> [[VD:%.*]], half [[RS1:%.*]], <vscale x 2 x half> [[VS2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfmacc_vf_f16mf2 (vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) {
@ -413,7 +413,7 @@ vfloat16mf2_t test_vfmacc_vf_f16mf2 (vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2
// CHECK-RV64-LABEL: @test_vfmacc_vv_f16m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfmacc.nxv4f16.nxv4f16.i64(<vscale x 4 x half> [[VD:%.*]], <vscale x 4 x half> [[VS1:%.*]], <vscale x 4 x half> [[VS2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfmacc.nxv4f16.nxv4f16.i64(<vscale x 4 x half> [[VD:%.*]], <vscale x 4 x half> [[VS1:%.*]], <vscale x 4 x half> [[VS2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfmacc_vv_f16m1 (vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) {
@ -422,7 +422,7 @@ vfloat16m1_t test_vfmacc_vv_f16m1 (vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1
// CHECK-RV64-LABEL: @test_vfmacc_vf_f16m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfmacc.nxv4f16.f16.i64(<vscale x 4 x half> [[VD:%.*]], half [[RS1:%.*]], <vscale x 4 x half> [[VS2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfmacc.nxv4f16.f16.i64(<vscale x 4 x half> [[VD:%.*]], half [[RS1:%.*]], <vscale x 4 x half> [[VS2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfmacc_vf_f16m1 (vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) {
@ -431,7 +431,7 @@ vfloat16m1_t test_vfmacc_vf_f16m1 (vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t v
// CHECK-RV64-LABEL: @test_vfmacc_vv_f16m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfmacc.nxv8f16.nxv8f16.i64(<vscale x 8 x half> [[VD:%.*]], <vscale x 8 x half> [[VS1:%.*]], <vscale x 8 x half> [[VS2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfmacc.nxv8f16.nxv8f16.i64(<vscale x 8 x half> [[VD:%.*]], <vscale x 8 x half> [[VS1:%.*]], <vscale x 8 x half> [[VS2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfmacc_vv_f16m2 (vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) {
@ -440,7 +440,7 @@ vfloat16m2_t test_vfmacc_vv_f16m2 (vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2
// CHECK-RV64-LABEL: @test_vfmacc_vf_f16m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfmacc.nxv8f16.f16.i64(<vscale x 8 x half> [[VD:%.*]], half [[RS1:%.*]], <vscale x 8 x half> [[VS2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfmacc.nxv8f16.f16.i64(<vscale x 8 x half> [[VD:%.*]], half [[RS1:%.*]], <vscale x 8 x half> [[VS2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfmacc_vf_f16m2 (vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) {
@ -449,7 +449,7 @@ vfloat16m2_t test_vfmacc_vf_f16m2 (vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t v
// CHECK-RV64-LABEL: @test_vfmacc_vv_f16m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfmacc.nxv16f16.nxv16f16.i64(<vscale x 16 x half> [[VD:%.*]], <vscale x 16 x half> [[VS1:%.*]], <vscale x 16 x half> [[VS2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfmacc.nxv16f16.nxv16f16.i64(<vscale x 16 x half> [[VD:%.*]], <vscale x 16 x half> [[VS1:%.*]], <vscale x 16 x half> [[VS2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfmacc_vv_f16m4 (vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) {
@ -458,7 +458,7 @@ vfloat16m4_t test_vfmacc_vv_f16m4 (vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4
// CHECK-RV64-LABEL: @test_vfmacc_vf_f16m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfmacc.nxv16f16.f16.i64(<vscale x 16 x half> [[VD:%.*]], half [[RS1:%.*]], <vscale x 16 x half> [[VS2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfmacc.nxv16f16.f16.i64(<vscale x 16 x half> [[VD:%.*]], half [[RS1:%.*]], <vscale x 16 x half> [[VS2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfmacc_vf_f16m4 (vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) {
@ -467,7 +467,7 @@ vfloat16m4_t test_vfmacc_vf_f16m4 (vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t v
// CHECK-RV64-LABEL: @test_vfmacc_vv_f16m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfmacc.nxv32f16.nxv32f16.i64(<vscale x 32 x half> [[VD:%.*]], <vscale x 32 x half> [[VS1:%.*]], <vscale x 32 x half> [[VS2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfmacc.nxv32f16.nxv32f16.i64(<vscale x 32 x half> [[VD:%.*]], <vscale x 32 x half> [[VS1:%.*]], <vscale x 32 x half> [[VS2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfmacc_vv_f16m8 (vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) {
@ -476,7 +476,7 @@ vfloat16m8_t test_vfmacc_vv_f16m8 (vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8
// CHECK-RV64-LABEL: @test_vfmacc_vf_f16m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfmacc.nxv32f16.f16.i64(<vscale x 32 x half> [[VD:%.*]], half [[RS1:%.*]], <vscale x 32 x half> [[VS2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfmacc.nxv32f16.f16.i64(<vscale x 32 x half> [[VD:%.*]], half [[RS1:%.*]], <vscale x 32 x half> [[VS2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfmacc_vf_f16m8 (vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) {

View File

@ -8,7 +8,7 @@
// CHECK-RV64-LABEL: @test_vfmadd_vv_f32mf2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmadd.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[ACC:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmadd.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[ACC:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfmadd_vv_f32mf2(vfloat32mf2_t acc, vfloat32mf2_t op1,
@ -18,7 +18,7 @@ vfloat32mf2_t test_vfmadd_vv_f32mf2(vfloat32mf2_t acc, vfloat32mf2_t op1,
// CHECK-RV64-LABEL: @test_vfmadd_vf_f32mf2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmadd.nxv1f32.f32.i64(<vscale x 1 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmadd.nxv1f32.f32.i64(<vscale x 1 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfmadd_vf_f32mf2(vfloat32mf2_t acc, float op1,
@ -28,7 +28,7 @@ vfloat32mf2_t test_vfmadd_vf_f32mf2(vfloat32mf2_t acc, float op1,
// CHECK-RV64-LABEL: @test_vfmadd_vv_f32m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmadd.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[ACC:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmadd.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[ACC:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfmadd_vv_f32m1(vfloat32m1_t acc, vfloat32m1_t op1,
@ -38,7 +38,7 @@ vfloat32m1_t test_vfmadd_vv_f32m1(vfloat32m1_t acc, vfloat32m1_t op1,
// CHECK-RV64-LABEL: @test_vfmadd_vf_f32m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmadd.nxv2f32.f32.i64(<vscale x 2 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmadd.nxv2f32.f32.i64(<vscale x 2 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfmadd_vf_f32m1(vfloat32m1_t acc, float op1, vfloat32m1_t op2,
@ -48,7 +48,7 @@ vfloat32m1_t test_vfmadd_vf_f32m1(vfloat32m1_t acc, float op1, vfloat32m1_t op2,
// CHECK-RV64-LABEL: @test_vfmadd_vv_f32m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmadd.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[ACC:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmadd.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[ACC:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfmadd_vv_f32m2(vfloat32m2_t acc, vfloat32m2_t op1,
@ -58,7 +58,7 @@ vfloat32m2_t test_vfmadd_vv_f32m2(vfloat32m2_t acc, vfloat32m2_t op1,
// CHECK-RV64-LABEL: @test_vfmadd_vf_f32m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmadd.nxv4f32.f32.i64(<vscale x 4 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmadd.nxv4f32.f32.i64(<vscale x 4 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfmadd_vf_f32m2(vfloat32m2_t acc, float op1, vfloat32m2_t op2,
@ -68,7 +68,7 @@ vfloat32m2_t test_vfmadd_vf_f32m2(vfloat32m2_t acc, float op1, vfloat32m2_t op2,
// CHECK-RV64-LABEL: @test_vfmadd_vv_f32m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmadd.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[ACC:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmadd.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[ACC:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfmadd_vv_f32m4(vfloat32m4_t acc, vfloat32m4_t op1,
@ -78,7 +78,7 @@ vfloat32m4_t test_vfmadd_vv_f32m4(vfloat32m4_t acc, vfloat32m4_t op1,
// CHECK-RV64-LABEL: @test_vfmadd_vf_f32m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmadd.nxv8f32.f32.i64(<vscale x 8 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmadd.nxv8f32.f32.i64(<vscale x 8 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfmadd_vf_f32m4(vfloat32m4_t acc, float op1, vfloat32m4_t op2,
@ -88,7 +88,7 @@ vfloat32m4_t test_vfmadd_vf_f32m4(vfloat32m4_t acc, float op1, vfloat32m4_t op2,
// CHECK-RV64-LABEL: @test_vfmadd_vv_f32m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmadd.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[ACC:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmadd.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[ACC:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfmadd_vv_f32m8(vfloat32m8_t acc, vfloat32m8_t op1,
@ -98,7 +98,7 @@ vfloat32m8_t test_vfmadd_vv_f32m8(vfloat32m8_t acc, vfloat32m8_t op1,
// CHECK-RV64-LABEL: @test_vfmadd_vf_f32m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmadd.nxv16f32.f32.i64(<vscale x 16 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmadd.nxv16f32.f32.i64(<vscale x 16 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfmadd_vf_f32m8(vfloat32m8_t acc, float op1, vfloat32m8_t op2,
@ -108,7 +108,7 @@ vfloat32m8_t test_vfmadd_vf_f32m8(vfloat32m8_t acc, float op1, vfloat32m8_t op2,
// CHECK-RV64-LABEL: @test_vfmadd_vv_f64m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmadd.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[ACC:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmadd.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[ACC:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfmadd_vv_f64m1(vfloat64m1_t acc, vfloat64m1_t op1,
@ -118,7 +118,7 @@ vfloat64m1_t test_vfmadd_vv_f64m1(vfloat64m1_t acc, vfloat64m1_t op1,
// CHECK-RV64-LABEL: @test_vfmadd_vf_f64m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmadd.nxv1f64.f64.i64(<vscale x 1 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmadd.nxv1f64.f64.i64(<vscale x 1 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfmadd_vf_f64m1(vfloat64m1_t acc, double op1,
@ -128,7 +128,7 @@ vfloat64m1_t test_vfmadd_vf_f64m1(vfloat64m1_t acc, double op1,
// CHECK-RV64-LABEL: @test_vfmadd_vv_f64m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmadd.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[ACC:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmadd.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[ACC:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfmadd_vv_f64m2(vfloat64m2_t acc, vfloat64m2_t op1,
@ -138,7 +138,7 @@ vfloat64m2_t test_vfmadd_vv_f64m2(vfloat64m2_t acc, vfloat64m2_t op1,
// CHECK-RV64-LABEL: @test_vfmadd_vf_f64m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmadd.nxv2f64.f64.i64(<vscale x 2 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmadd.nxv2f64.f64.i64(<vscale x 2 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfmadd_vf_f64m2(vfloat64m2_t acc, double op1,
@ -148,7 +148,7 @@ vfloat64m2_t test_vfmadd_vf_f64m2(vfloat64m2_t acc, double op1,
// CHECK-RV64-LABEL: @test_vfmadd_vv_f64m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmadd.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[ACC:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmadd.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[ACC:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfmadd_vv_f64m4(vfloat64m4_t acc, vfloat64m4_t op1,
@ -158,7 +158,7 @@ vfloat64m4_t test_vfmadd_vv_f64m4(vfloat64m4_t acc, vfloat64m4_t op1,
// CHECK-RV64-LABEL: @test_vfmadd_vf_f64m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmadd.nxv4f64.f64.i64(<vscale x 4 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmadd.nxv4f64.f64.i64(<vscale x 4 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfmadd_vf_f64m4(vfloat64m4_t acc, double op1,
@ -168,7 +168,7 @@ vfloat64m4_t test_vfmadd_vf_f64m4(vfloat64m4_t acc, double op1,
// CHECK-RV64-LABEL: @test_vfmadd_vv_f64m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmadd.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[ACC:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmadd.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[ACC:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfmadd_vv_f64m8(vfloat64m8_t acc, vfloat64m8_t op1,
@ -178,7 +178,7 @@ vfloat64m8_t test_vfmadd_vv_f64m8(vfloat64m8_t acc, vfloat64m8_t op1,
// CHECK-RV64-LABEL: @test_vfmadd_vf_f64m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmadd.nxv8f64.f64.i64(<vscale x 8 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmadd.nxv8f64.f64.i64(<vscale x 8 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfmadd_vf_f64m8(vfloat64m8_t acc, double op1,
@ -377,7 +377,7 @@ vfloat64m8_t test_vfmadd_vf_f64m8_m(vbool8_t mask, vfloat64m8_t acc, double op1,
// CHECK-RV64-LABEL: @test_vfmadd_vv_f16mf4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfmadd.nxv1f16.nxv1f16.i64(<vscale x 1 x half> [[VD:%.*]], <vscale x 1 x half> [[VS1:%.*]], <vscale x 1 x half> [[VS2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfmadd.nxv1f16.nxv1f16.i64(<vscale x 1 x half> [[VD:%.*]], <vscale x 1 x half> [[VS1:%.*]], <vscale x 1 x half> [[VS2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfmadd_vv_f16mf4 (vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) {
@ -386,7 +386,7 @@ vfloat16mf4_t test_vfmadd_vv_f16mf4 (vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat
// CHECK-RV64-LABEL: @test_vfmadd_vf_f16mf4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfmadd.nxv1f16.f16.i64(<vscale x 1 x half> [[VD:%.*]], half [[RS1:%.*]], <vscale x 1 x half> [[VS2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfmadd.nxv1f16.f16.i64(<vscale x 1 x half> [[VD:%.*]], half [[RS1:%.*]], <vscale x 1 x half> [[VS2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfmadd_vf_f16mf4 (vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) {
@ -395,7 +395,7 @@ vfloat16mf4_t test_vfmadd_vf_f16mf4 (vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4
// CHECK-RV64-LABEL: @test_vfmadd_vv_f16mf2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfmadd.nxv2f16.nxv2f16.i64(<vscale x 2 x half> [[VD:%.*]], <vscale x 2 x half> [[VS1:%.*]], <vscale x 2 x half> [[VS2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfmadd.nxv2f16.nxv2f16.i64(<vscale x 2 x half> [[VD:%.*]], <vscale x 2 x half> [[VS1:%.*]], <vscale x 2 x half> [[VS2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfmadd_vv_f16mf2 (vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) {
@ -404,7 +404,7 @@ vfloat16mf2_t test_vfmadd_vv_f16mf2 (vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat
// CHECK-RV64-LABEL: @test_vfmadd_vf_f16mf2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfmadd.nxv2f16.f16.i64(<vscale x 2 x half> [[VD:%.*]], half [[RS1:%.*]], <vscale x 2 x half> [[VS2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfmadd.nxv2f16.f16.i64(<vscale x 2 x half> [[VD:%.*]], half [[RS1:%.*]], <vscale x 2 x half> [[VS2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfmadd_vf_f16mf2 (vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) {
@ -413,7 +413,7 @@ vfloat16mf2_t test_vfmadd_vf_f16mf2 (vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2
// CHECK-RV64-LABEL: @test_vfmadd_vv_f16m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfmadd.nxv4f16.nxv4f16.i64(<vscale x 4 x half> [[VD:%.*]], <vscale x 4 x half> [[VS1:%.*]], <vscale x 4 x half> [[VS2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfmadd.nxv4f16.nxv4f16.i64(<vscale x 4 x half> [[VD:%.*]], <vscale x 4 x half> [[VS1:%.*]], <vscale x 4 x half> [[VS2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfmadd_vv_f16m1 (vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) {
@ -422,7 +422,7 @@ vfloat16m1_t test_vfmadd_vv_f16m1 (vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1
// CHECK-RV64-LABEL: @test_vfmadd_vf_f16m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfmadd.nxv4f16.f16.i64(<vscale x 4 x half> [[VD:%.*]], half [[RS1:%.*]], <vscale x 4 x half> [[VS2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfmadd.nxv4f16.f16.i64(<vscale x 4 x half> [[VD:%.*]], half [[RS1:%.*]], <vscale x 4 x half> [[VS2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfmadd_vf_f16m1 (vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) {
@ -431,7 +431,7 @@ vfloat16m1_t test_vfmadd_vf_f16m1 (vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t v
// CHECK-RV64-LABEL: @test_vfmadd_vv_f16m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfmadd.nxv8f16.nxv8f16.i64(<vscale x 8 x half> [[VD:%.*]], <vscale x 8 x half> [[VS1:%.*]], <vscale x 8 x half> [[VS2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfmadd.nxv8f16.nxv8f16.i64(<vscale x 8 x half> [[VD:%.*]], <vscale x 8 x half> [[VS1:%.*]], <vscale x 8 x half> [[VS2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfmadd_vv_f16m2 (vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) {
@ -440,7 +440,7 @@ vfloat16m2_t test_vfmadd_vv_f16m2 (vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2
// CHECK-RV64-LABEL: @test_vfmadd_vf_f16m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfmadd.nxv8f16.f16.i64(<vscale x 8 x half> [[VD:%.*]], half [[RS1:%.*]], <vscale x 8 x half> [[VS2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfmadd.nxv8f16.f16.i64(<vscale x 8 x half> [[VD:%.*]], half [[RS1:%.*]], <vscale x 8 x half> [[VS2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfmadd_vf_f16m2 (vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) {
@ -449,7 +449,7 @@ vfloat16m2_t test_vfmadd_vf_f16m2 (vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t v
// CHECK-RV64-LABEL: @test_vfmadd_vv_f16m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfmadd.nxv16f16.nxv16f16.i64(<vscale x 16 x half> [[VD:%.*]], <vscale x 16 x half> [[VS1:%.*]], <vscale x 16 x half> [[VS2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfmadd.nxv16f16.nxv16f16.i64(<vscale x 16 x half> [[VD:%.*]], <vscale x 16 x half> [[VS1:%.*]], <vscale x 16 x half> [[VS2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfmadd_vv_f16m4 (vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) {
@ -458,7 +458,7 @@ vfloat16m4_t test_vfmadd_vv_f16m4 (vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4
// CHECK-RV64-LABEL: @test_vfmadd_vf_f16m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfmadd.nxv16f16.f16.i64(<vscale x 16 x half> [[VD:%.*]], half [[RS1:%.*]], <vscale x 16 x half> [[VS2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfmadd.nxv16f16.f16.i64(<vscale x 16 x half> [[VD:%.*]], half [[RS1:%.*]], <vscale x 16 x half> [[VS2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfmadd_vf_f16m4 (vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) {
@ -467,7 +467,7 @@ vfloat16m4_t test_vfmadd_vf_f16m4 (vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t v
// CHECK-RV64-LABEL: @test_vfmadd_vv_f16m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfmadd.nxv32f16.nxv32f16.i64(<vscale x 32 x half> [[VD:%.*]], <vscale x 32 x half> [[VS1:%.*]], <vscale x 32 x half> [[VS2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfmadd.nxv32f16.nxv32f16.i64(<vscale x 32 x half> [[VD:%.*]], <vscale x 32 x half> [[VS1:%.*]], <vscale x 32 x half> [[VS2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfmadd_vv_f16m8 (vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) {
@ -476,7 +476,7 @@ vfloat16m8_t test_vfmadd_vv_f16m8 (vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8
// CHECK-RV64-LABEL: @test_vfmadd_vf_f16m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfmadd.nxv32f16.f16.i64(<vscale x 32 x half> [[VD:%.*]], half [[RS1:%.*]], <vscale x 32 x half> [[VS2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfmadd.nxv32f16.f16.i64(<vscale x 32 x half> [[VD:%.*]], half [[RS1:%.*]], <vscale x 32 x half> [[VS2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfmadd_vf_f16m8 (vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) {

View File

@ -8,7 +8,7 @@
// CHECK-RV64-LABEL: @test_vfmsac_vv_f32mf2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmsac.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[ACC:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmsac.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[ACC:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfmsac_vv_f32mf2(vfloat32mf2_t acc, vfloat32mf2_t op1,
@ -18,7 +18,7 @@ vfloat32mf2_t test_vfmsac_vv_f32mf2(vfloat32mf2_t acc, vfloat32mf2_t op1,
// CHECK-RV64-LABEL: @test_vfmsac_vf_f32mf2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmsac.nxv1f32.f32.i64(<vscale x 1 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmsac.nxv1f32.f32.i64(<vscale x 1 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfmsac_vf_f32mf2(vfloat32mf2_t acc, float op1,
@ -28,7 +28,7 @@ vfloat32mf2_t test_vfmsac_vf_f32mf2(vfloat32mf2_t acc, float op1,
// CHECK-RV64-LABEL: @test_vfmsac_vv_f32m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmsac.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[ACC:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmsac.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[ACC:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfmsac_vv_f32m1(vfloat32m1_t acc, vfloat32m1_t op1,
@ -38,7 +38,7 @@ vfloat32m1_t test_vfmsac_vv_f32m1(vfloat32m1_t acc, vfloat32m1_t op1,
// CHECK-RV64-LABEL: @test_vfmsac_vf_f32m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmsac.nxv2f32.f32.i64(<vscale x 2 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmsac.nxv2f32.f32.i64(<vscale x 2 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfmsac_vf_f32m1(vfloat32m1_t acc, float op1, vfloat32m1_t op2,
@ -48,7 +48,7 @@ vfloat32m1_t test_vfmsac_vf_f32m1(vfloat32m1_t acc, float op1, vfloat32m1_t op2,
// CHECK-RV64-LABEL: @test_vfmsac_vv_f32m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmsac.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[ACC:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmsac.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[ACC:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfmsac_vv_f32m2(vfloat32m2_t acc, vfloat32m2_t op1,
@ -58,7 +58,7 @@ vfloat32m2_t test_vfmsac_vv_f32m2(vfloat32m2_t acc, vfloat32m2_t op1,
// CHECK-RV64-LABEL: @test_vfmsac_vf_f32m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmsac.nxv4f32.f32.i64(<vscale x 4 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmsac.nxv4f32.f32.i64(<vscale x 4 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfmsac_vf_f32m2(vfloat32m2_t acc, float op1, vfloat32m2_t op2,
@ -68,7 +68,7 @@ vfloat32m2_t test_vfmsac_vf_f32m2(vfloat32m2_t acc, float op1, vfloat32m2_t op2,
// CHECK-RV64-LABEL: @test_vfmsac_vv_f32m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmsac.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[ACC:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmsac.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[ACC:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfmsac_vv_f32m4(vfloat32m4_t acc, vfloat32m4_t op1,
@ -78,7 +78,7 @@ vfloat32m4_t test_vfmsac_vv_f32m4(vfloat32m4_t acc, vfloat32m4_t op1,
// CHECK-RV64-LABEL: @test_vfmsac_vf_f32m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmsac.nxv8f32.f32.i64(<vscale x 8 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmsac.nxv8f32.f32.i64(<vscale x 8 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfmsac_vf_f32m4(vfloat32m4_t acc, float op1, vfloat32m4_t op2,
@ -88,7 +88,7 @@ vfloat32m4_t test_vfmsac_vf_f32m4(vfloat32m4_t acc, float op1, vfloat32m4_t op2,
// CHECK-RV64-LABEL: @test_vfmsac_vv_f32m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmsac.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[ACC:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmsac.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[ACC:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfmsac_vv_f32m8(vfloat32m8_t acc, vfloat32m8_t op1,
@ -98,7 +98,7 @@ vfloat32m8_t test_vfmsac_vv_f32m8(vfloat32m8_t acc, vfloat32m8_t op1,
// CHECK-RV64-LABEL: @test_vfmsac_vf_f32m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmsac.nxv16f32.f32.i64(<vscale x 16 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmsac.nxv16f32.f32.i64(<vscale x 16 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfmsac_vf_f32m8(vfloat32m8_t acc, float op1, vfloat32m8_t op2,
@ -108,7 +108,7 @@ vfloat32m8_t test_vfmsac_vf_f32m8(vfloat32m8_t acc, float op1, vfloat32m8_t op2,
// CHECK-RV64-LABEL: @test_vfmsac_vv_f64m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmsac.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[ACC:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmsac.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[ACC:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfmsac_vv_f64m1(vfloat64m1_t acc, vfloat64m1_t op1,
@ -118,7 +118,7 @@ vfloat64m1_t test_vfmsac_vv_f64m1(vfloat64m1_t acc, vfloat64m1_t op1,
// CHECK-RV64-LABEL: @test_vfmsac_vf_f64m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmsac.nxv1f64.f64.i64(<vscale x 1 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmsac.nxv1f64.f64.i64(<vscale x 1 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfmsac_vf_f64m1(vfloat64m1_t acc, double op1,
@ -128,7 +128,7 @@ vfloat64m1_t test_vfmsac_vf_f64m1(vfloat64m1_t acc, double op1,
// CHECK-RV64-LABEL: @test_vfmsac_vv_f64m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmsac.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[ACC:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmsac.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[ACC:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfmsac_vv_f64m2(vfloat64m2_t acc, vfloat64m2_t op1,
@ -138,7 +138,7 @@ vfloat64m2_t test_vfmsac_vv_f64m2(vfloat64m2_t acc, vfloat64m2_t op1,
// CHECK-RV64-LABEL: @test_vfmsac_vf_f64m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmsac.nxv2f64.f64.i64(<vscale x 2 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmsac.nxv2f64.f64.i64(<vscale x 2 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfmsac_vf_f64m2(vfloat64m2_t acc, double op1,
@ -148,7 +148,7 @@ vfloat64m2_t test_vfmsac_vf_f64m2(vfloat64m2_t acc, double op1,
// CHECK-RV64-LABEL: @test_vfmsac_vv_f64m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmsac.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[ACC:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmsac.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[ACC:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfmsac_vv_f64m4(vfloat64m4_t acc, vfloat64m4_t op1,
@ -158,7 +158,7 @@ vfloat64m4_t test_vfmsac_vv_f64m4(vfloat64m4_t acc, vfloat64m4_t op1,
// CHECK-RV64-LABEL: @test_vfmsac_vf_f64m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmsac.nxv4f64.f64.i64(<vscale x 4 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmsac.nxv4f64.f64.i64(<vscale x 4 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfmsac_vf_f64m4(vfloat64m4_t acc, double op1,
@ -168,7 +168,7 @@ vfloat64m4_t test_vfmsac_vf_f64m4(vfloat64m4_t acc, double op1,
// CHECK-RV64-LABEL: @test_vfmsac_vv_f64m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmsac.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[ACC:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmsac.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[ACC:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfmsac_vv_f64m8(vfloat64m8_t acc, vfloat64m8_t op1,
@ -178,7 +178,7 @@ vfloat64m8_t test_vfmsac_vv_f64m8(vfloat64m8_t acc, vfloat64m8_t op1,
// CHECK-RV64-LABEL: @test_vfmsac_vf_f64m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmsac.nxv8f64.f64.i64(<vscale x 8 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmsac.nxv8f64.f64.i64(<vscale x 8 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfmsac_vf_f64m8(vfloat64m8_t acc, double op1,
@ -377,7 +377,7 @@ vfloat64m8_t test_vfmsac_vf_f64m8_m(vbool8_t mask, vfloat64m8_t acc, double op1,
// CHECK-RV64-LABEL: @test_vfmsac_vv_f16mf4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfmsac.nxv1f16.nxv1f16.i64(<vscale x 1 x half> [[VD:%.*]], <vscale x 1 x half> [[VS1:%.*]], <vscale x 1 x half> [[VS2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfmsac.nxv1f16.nxv1f16.i64(<vscale x 1 x half> [[VD:%.*]], <vscale x 1 x half> [[VS1:%.*]], <vscale x 1 x half> [[VS2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfmsac_vv_f16mf4 (vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) {
@ -386,7 +386,7 @@ vfloat16mf4_t test_vfmsac_vv_f16mf4 (vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat
// CHECK-RV64-LABEL: @test_vfmsac_vf_f16mf4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfmsac.nxv1f16.f16.i64(<vscale x 1 x half> [[VD:%.*]], half [[RS1:%.*]], <vscale x 1 x half> [[VS2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfmsac.nxv1f16.f16.i64(<vscale x 1 x half> [[VD:%.*]], half [[RS1:%.*]], <vscale x 1 x half> [[VS2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfmsac_vf_f16mf4 (vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) {
@ -395,7 +395,7 @@ vfloat16mf4_t test_vfmsac_vf_f16mf4 (vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4
// CHECK-RV64-LABEL: @test_vfmsac_vv_f16mf2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfmsac.nxv2f16.nxv2f16.i64(<vscale x 2 x half> [[VD:%.*]], <vscale x 2 x half> [[VS1:%.*]], <vscale x 2 x half> [[VS2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfmsac.nxv2f16.nxv2f16.i64(<vscale x 2 x half> [[VD:%.*]], <vscale x 2 x half> [[VS1:%.*]], <vscale x 2 x half> [[VS2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfmsac_vv_f16mf2 (vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) {
@ -404,7 +404,7 @@ vfloat16mf2_t test_vfmsac_vv_f16mf2 (vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat
// CHECK-RV64-LABEL: @test_vfmsac_vf_f16mf2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfmsac.nxv2f16.f16.i64(<vscale x 2 x half> [[VD:%.*]], half [[RS1:%.*]], <vscale x 2 x half> [[VS2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfmsac.nxv2f16.f16.i64(<vscale x 2 x half> [[VD:%.*]], half [[RS1:%.*]], <vscale x 2 x half> [[VS2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfmsac_vf_f16mf2 (vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) {
@ -413,7 +413,7 @@ vfloat16mf2_t test_vfmsac_vf_f16mf2 (vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2
// CHECK-RV64-LABEL: @test_vfmsac_vv_f16m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfmsac.nxv4f16.nxv4f16.i64(<vscale x 4 x half> [[VD:%.*]], <vscale x 4 x half> [[VS1:%.*]], <vscale x 4 x half> [[VS2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfmsac.nxv4f16.nxv4f16.i64(<vscale x 4 x half> [[VD:%.*]], <vscale x 4 x half> [[VS1:%.*]], <vscale x 4 x half> [[VS2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfmsac_vv_f16m1 (vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) {
@ -422,7 +422,7 @@ vfloat16m1_t test_vfmsac_vv_f16m1 (vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1
// CHECK-RV64-LABEL: @test_vfmsac_vf_f16m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfmsac.nxv4f16.f16.i64(<vscale x 4 x half> [[VD:%.*]], half [[RS1:%.*]], <vscale x 4 x half> [[VS2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfmsac.nxv4f16.f16.i64(<vscale x 4 x half> [[VD:%.*]], half [[RS1:%.*]], <vscale x 4 x half> [[VS2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfmsac_vf_f16m1 (vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) {
@ -431,7 +431,7 @@ vfloat16m1_t test_vfmsac_vf_f16m1 (vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t v
// CHECK-RV64-LABEL: @test_vfmsac_vv_f16m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfmsac.nxv8f16.nxv8f16.i64(<vscale x 8 x half> [[VD:%.*]], <vscale x 8 x half> [[VS1:%.*]], <vscale x 8 x half> [[VS2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfmsac.nxv8f16.nxv8f16.i64(<vscale x 8 x half> [[VD:%.*]], <vscale x 8 x half> [[VS1:%.*]], <vscale x 8 x half> [[VS2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfmsac_vv_f16m2 (vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) {
@ -440,7 +440,7 @@ vfloat16m2_t test_vfmsac_vv_f16m2 (vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2
// CHECK-RV64-LABEL: @test_vfmsac_vf_f16m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfmsac.nxv8f16.f16.i64(<vscale x 8 x half> [[VD:%.*]], half [[RS1:%.*]], <vscale x 8 x half> [[VS2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfmsac.nxv8f16.f16.i64(<vscale x 8 x half> [[VD:%.*]], half [[RS1:%.*]], <vscale x 8 x half> [[VS2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfmsac_vf_f16m2 (vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) {
@ -449,7 +449,7 @@ vfloat16m2_t test_vfmsac_vf_f16m2 (vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t v
// CHECK-RV64-LABEL: @test_vfmsac_vv_f16m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfmsac.nxv16f16.nxv16f16.i64(<vscale x 16 x half> [[VD:%.*]], <vscale x 16 x half> [[VS1:%.*]], <vscale x 16 x half> [[VS2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfmsac.nxv16f16.nxv16f16.i64(<vscale x 16 x half> [[VD:%.*]], <vscale x 16 x half> [[VS1:%.*]], <vscale x 16 x half> [[VS2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfmsac_vv_f16m4 (vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) {
@ -458,7 +458,7 @@ vfloat16m4_t test_vfmsac_vv_f16m4 (vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4
// CHECK-RV64-LABEL: @test_vfmsac_vf_f16m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfmsac.nxv16f16.f16.i64(<vscale x 16 x half> [[VD:%.*]], half [[RS1:%.*]], <vscale x 16 x half> [[VS2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfmsac.nxv16f16.f16.i64(<vscale x 16 x half> [[VD:%.*]], half [[RS1:%.*]], <vscale x 16 x half> [[VS2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfmsac_vf_f16m4 (vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) {
@ -467,7 +467,7 @@ vfloat16m4_t test_vfmsac_vf_f16m4 (vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t v
// CHECK-RV64-LABEL: @test_vfmsac_vv_f16m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfmsac.nxv32f16.nxv32f16.i64(<vscale x 32 x half> [[VD:%.*]], <vscale x 32 x half> [[VS1:%.*]], <vscale x 32 x half> [[VS2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfmsac.nxv32f16.nxv32f16.i64(<vscale x 32 x half> [[VD:%.*]], <vscale x 32 x half> [[VS1:%.*]], <vscale x 32 x half> [[VS2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfmsac_vv_f16m8 (vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) {
@ -476,7 +476,7 @@ vfloat16m8_t test_vfmsac_vv_f16m8 (vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8
// CHECK-RV64-LABEL: @test_vfmsac_vf_f16m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfmsac.nxv32f16.f16.i64(<vscale x 32 x half> [[VD:%.*]], half [[RS1:%.*]], <vscale x 32 x half> [[VS2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfmsac.nxv32f16.f16.i64(<vscale x 32 x half> [[VD:%.*]], half [[RS1:%.*]], <vscale x 32 x half> [[VS2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfmsac_vf_f16m8 (vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) {

View File

@ -8,7 +8,7 @@
// CHECK-RV64-LABEL: @test_vfmsub_vv_f32mf2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmsub.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[ACC:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmsub.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[ACC:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfmsub_vv_f32mf2(vfloat32mf2_t acc, vfloat32mf2_t op1,
@ -18,7 +18,7 @@ vfloat32mf2_t test_vfmsub_vv_f32mf2(vfloat32mf2_t acc, vfloat32mf2_t op1,
// CHECK-RV64-LABEL: @test_vfmsub_vf_f32mf2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmsub.nxv1f32.f32.i64(<vscale x 1 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmsub.nxv1f32.f32.i64(<vscale x 1 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfmsub_vf_f32mf2(vfloat32mf2_t acc, float op1,
@ -28,7 +28,7 @@ vfloat32mf2_t test_vfmsub_vf_f32mf2(vfloat32mf2_t acc, float op1,
// CHECK-RV64-LABEL: @test_vfmsub_vv_f32m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmsub.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[ACC:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmsub.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[ACC:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfmsub_vv_f32m1(vfloat32m1_t acc, vfloat32m1_t op1,
@ -38,7 +38,7 @@ vfloat32m1_t test_vfmsub_vv_f32m1(vfloat32m1_t acc, vfloat32m1_t op1,
// CHECK-RV64-LABEL: @test_vfmsub_vf_f32m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmsub.nxv2f32.f32.i64(<vscale x 2 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmsub.nxv2f32.f32.i64(<vscale x 2 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfmsub_vf_f32m1(vfloat32m1_t acc, float op1, vfloat32m1_t op2,
@ -48,7 +48,7 @@ vfloat32m1_t test_vfmsub_vf_f32m1(vfloat32m1_t acc, float op1, vfloat32m1_t op2,
// CHECK-RV64-LABEL: @test_vfmsub_vv_f32m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmsub.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[ACC:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmsub.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[ACC:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfmsub_vv_f32m2(vfloat32m2_t acc, vfloat32m2_t op1,
@ -58,7 +58,7 @@ vfloat32m2_t test_vfmsub_vv_f32m2(vfloat32m2_t acc, vfloat32m2_t op1,
// CHECK-RV64-LABEL: @test_vfmsub_vf_f32m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmsub.nxv4f32.f32.i64(<vscale x 4 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmsub.nxv4f32.f32.i64(<vscale x 4 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfmsub_vf_f32m2(vfloat32m2_t acc, float op1, vfloat32m2_t op2,
@ -68,7 +68,7 @@ vfloat32m2_t test_vfmsub_vf_f32m2(vfloat32m2_t acc, float op1, vfloat32m2_t op2,
// CHECK-RV64-LABEL: @test_vfmsub_vv_f32m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmsub.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[ACC:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmsub.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[ACC:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfmsub_vv_f32m4(vfloat32m4_t acc, vfloat32m4_t op1,
@ -78,7 +78,7 @@ vfloat32m4_t test_vfmsub_vv_f32m4(vfloat32m4_t acc, vfloat32m4_t op1,
// CHECK-RV64-LABEL: @test_vfmsub_vf_f32m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmsub.nxv8f32.f32.i64(<vscale x 8 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmsub.nxv8f32.f32.i64(<vscale x 8 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfmsub_vf_f32m4(vfloat32m4_t acc, float op1, vfloat32m4_t op2,
@ -88,7 +88,7 @@ vfloat32m4_t test_vfmsub_vf_f32m4(vfloat32m4_t acc, float op1, vfloat32m4_t op2,
// CHECK-RV64-LABEL: @test_vfmsub_vv_f32m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmsub.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[ACC:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmsub.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[ACC:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfmsub_vv_f32m8(vfloat32m8_t acc, vfloat32m8_t op1,
@ -98,7 +98,7 @@ vfloat32m8_t test_vfmsub_vv_f32m8(vfloat32m8_t acc, vfloat32m8_t op1,
// CHECK-RV64-LABEL: @test_vfmsub_vf_f32m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmsub.nxv16f32.f32.i64(<vscale x 16 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmsub.nxv16f32.f32.i64(<vscale x 16 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfmsub_vf_f32m8(vfloat32m8_t acc, float op1, vfloat32m8_t op2,
@ -108,7 +108,7 @@ vfloat32m8_t test_vfmsub_vf_f32m8(vfloat32m8_t acc, float op1, vfloat32m8_t op2,
// CHECK-RV64-LABEL: @test_vfmsub_vv_f64m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmsub.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[ACC:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmsub.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[ACC:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfmsub_vv_f64m1(vfloat64m1_t acc, vfloat64m1_t op1,
@ -118,7 +118,7 @@ vfloat64m1_t test_vfmsub_vv_f64m1(vfloat64m1_t acc, vfloat64m1_t op1,
// CHECK-RV64-LABEL: @test_vfmsub_vf_f64m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmsub.nxv1f64.f64.i64(<vscale x 1 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmsub.nxv1f64.f64.i64(<vscale x 1 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfmsub_vf_f64m1(vfloat64m1_t acc, double op1,
@ -128,7 +128,7 @@ vfloat64m1_t test_vfmsub_vf_f64m1(vfloat64m1_t acc, double op1,
// CHECK-RV64-LABEL: @test_vfmsub_vv_f64m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmsub.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[ACC:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmsub.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[ACC:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfmsub_vv_f64m2(vfloat64m2_t acc, vfloat64m2_t op1,
@ -138,7 +138,7 @@ vfloat64m2_t test_vfmsub_vv_f64m2(vfloat64m2_t acc, vfloat64m2_t op1,
// CHECK-RV64-LABEL: @test_vfmsub_vf_f64m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmsub.nxv2f64.f64.i64(<vscale x 2 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmsub.nxv2f64.f64.i64(<vscale x 2 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfmsub_vf_f64m2(vfloat64m2_t acc, double op1,
@ -148,7 +148,7 @@ vfloat64m2_t test_vfmsub_vf_f64m2(vfloat64m2_t acc, double op1,
// CHECK-RV64-LABEL: @test_vfmsub_vv_f64m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmsub.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[ACC:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmsub.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[ACC:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfmsub_vv_f64m4(vfloat64m4_t acc, vfloat64m4_t op1,
@ -158,7 +158,7 @@ vfloat64m4_t test_vfmsub_vv_f64m4(vfloat64m4_t acc, vfloat64m4_t op1,
// CHECK-RV64-LABEL: @test_vfmsub_vf_f64m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmsub.nxv4f64.f64.i64(<vscale x 4 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmsub.nxv4f64.f64.i64(<vscale x 4 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfmsub_vf_f64m4(vfloat64m4_t acc, double op1,
@ -168,7 +168,7 @@ vfloat64m4_t test_vfmsub_vf_f64m4(vfloat64m4_t acc, double op1,
// CHECK-RV64-LABEL: @test_vfmsub_vv_f64m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmsub.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[ACC:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmsub.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[ACC:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfmsub_vv_f64m8(vfloat64m8_t acc, vfloat64m8_t op1,
@ -178,7 +178,7 @@ vfloat64m8_t test_vfmsub_vv_f64m8(vfloat64m8_t acc, vfloat64m8_t op1,
// CHECK-RV64-LABEL: @test_vfmsub_vf_f64m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmsub.nxv8f64.f64.i64(<vscale x 8 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmsub.nxv8f64.f64.i64(<vscale x 8 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfmsub_vf_f64m8(vfloat64m8_t acc, double op1,
@ -377,7 +377,7 @@ vfloat64m8_t test_vfmsub_vf_f64m8_m(vbool8_t mask, vfloat64m8_t acc, double op1,
// CHECK-RV64-LABEL: @test_vfmsub_vv_f16mf4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfmsub.nxv1f16.nxv1f16.i64(<vscale x 1 x half> [[VD:%.*]], <vscale x 1 x half> [[VS1:%.*]], <vscale x 1 x half> [[VS2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfmsub.nxv1f16.nxv1f16.i64(<vscale x 1 x half> [[VD:%.*]], <vscale x 1 x half> [[VS1:%.*]], <vscale x 1 x half> [[VS2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfmsub_vv_f16mf4 (vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) {
@ -386,7 +386,7 @@ vfloat16mf4_t test_vfmsub_vv_f16mf4 (vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat
// CHECK-RV64-LABEL: @test_vfmsub_vf_f16mf4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfmsub.nxv1f16.f16.i64(<vscale x 1 x half> [[VD:%.*]], half [[RS1:%.*]], <vscale x 1 x half> [[VS2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfmsub.nxv1f16.f16.i64(<vscale x 1 x half> [[VD:%.*]], half [[RS1:%.*]], <vscale x 1 x half> [[VS2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfmsub_vf_f16mf4 (vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) {
@ -395,7 +395,7 @@ vfloat16mf4_t test_vfmsub_vf_f16mf4 (vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4
// CHECK-RV64-LABEL: @test_vfmsub_vv_f16mf2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfmsub.nxv2f16.nxv2f16.i64(<vscale x 2 x half> [[VD:%.*]], <vscale x 2 x half> [[VS1:%.*]], <vscale x 2 x half> [[VS2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfmsub.nxv2f16.nxv2f16.i64(<vscale x 2 x half> [[VD:%.*]], <vscale x 2 x half> [[VS1:%.*]], <vscale x 2 x half> [[VS2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfmsub_vv_f16mf2 (vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) {
@ -404,7 +404,7 @@ vfloat16mf2_t test_vfmsub_vv_f16mf2 (vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat
// CHECK-RV64-LABEL: @test_vfmsub_vf_f16mf2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfmsub.nxv2f16.f16.i64(<vscale x 2 x half> [[VD:%.*]], half [[RS1:%.*]], <vscale x 2 x half> [[VS2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfmsub.nxv2f16.f16.i64(<vscale x 2 x half> [[VD:%.*]], half [[RS1:%.*]], <vscale x 2 x half> [[VS2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfmsub_vf_f16mf2 (vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) {
@ -413,7 +413,7 @@ vfloat16mf2_t test_vfmsub_vf_f16mf2 (vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2
// CHECK-RV64-LABEL: @test_vfmsub_vv_f16m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfmsub.nxv4f16.nxv4f16.i64(<vscale x 4 x half> [[VD:%.*]], <vscale x 4 x half> [[VS1:%.*]], <vscale x 4 x half> [[VS2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfmsub.nxv4f16.nxv4f16.i64(<vscale x 4 x half> [[VD:%.*]], <vscale x 4 x half> [[VS1:%.*]], <vscale x 4 x half> [[VS2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfmsub_vv_f16m1 (vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) {
@ -422,7 +422,7 @@ vfloat16m1_t test_vfmsub_vv_f16m1 (vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1
// CHECK-RV64-LABEL: @test_vfmsub_vf_f16m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfmsub.nxv4f16.f16.i64(<vscale x 4 x half> [[VD:%.*]], half [[RS1:%.*]], <vscale x 4 x half> [[VS2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfmsub.nxv4f16.f16.i64(<vscale x 4 x half> [[VD:%.*]], half [[RS1:%.*]], <vscale x 4 x half> [[VS2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfmsub_vf_f16m1 (vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) {
@ -431,7 +431,7 @@ vfloat16m1_t test_vfmsub_vf_f16m1 (vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t v
// CHECK-RV64-LABEL: @test_vfmsub_vv_f16m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfmsub.nxv8f16.nxv8f16.i64(<vscale x 8 x half> [[VD:%.*]], <vscale x 8 x half> [[VS1:%.*]], <vscale x 8 x half> [[VS2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfmsub.nxv8f16.nxv8f16.i64(<vscale x 8 x half> [[VD:%.*]], <vscale x 8 x half> [[VS1:%.*]], <vscale x 8 x half> [[VS2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfmsub_vv_f16m2 (vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) {
@ -440,7 +440,7 @@ vfloat16m2_t test_vfmsub_vv_f16m2 (vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2
// CHECK-RV64-LABEL: @test_vfmsub_vf_f16m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfmsub.nxv8f16.f16.i64(<vscale x 8 x half> [[VD:%.*]], half [[RS1:%.*]], <vscale x 8 x half> [[VS2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfmsub.nxv8f16.f16.i64(<vscale x 8 x half> [[VD:%.*]], half [[RS1:%.*]], <vscale x 8 x half> [[VS2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfmsub_vf_f16m2 (vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) {
@ -449,7 +449,7 @@ vfloat16m2_t test_vfmsub_vf_f16m2 (vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t v
// CHECK-RV64-LABEL: @test_vfmsub_vv_f16m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfmsub.nxv16f16.nxv16f16.i64(<vscale x 16 x half> [[VD:%.*]], <vscale x 16 x half> [[VS1:%.*]], <vscale x 16 x half> [[VS2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfmsub.nxv16f16.nxv16f16.i64(<vscale x 16 x half> [[VD:%.*]], <vscale x 16 x half> [[VS1:%.*]], <vscale x 16 x half> [[VS2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfmsub_vv_f16m4 (vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) {
@ -458,7 +458,7 @@ vfloat16m4_t test_vfmsub_vv_f16m4 (vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4
// CHECK-RV64-LABEL: @test_vfmsub_vf_f16m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfmsub.nxv16f16.f16.i64(<vscale x 16 x half> [[VD:%.*]], half [[RS1:%.*]], <vscale x 16 x half> [[VS2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfmsub.nxv16f16.f16.i64(<vscale x 16 x half> [[VD:%.*]], half [[RS1:%.*]], <vscale x 16 x half> [[VS2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfmsub_vf_f16m4 (vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) {
@ -467,7 +467,7 @@ vfloat16m4_t test_vfmsub_vf_f16m4 (vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t v
// CHECK-RV64-LABEL: @test_vfmsub_vv_f16m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfmsub.nxv32f16.nxv32f16.i64(<vscale x 32 x half> [[VD:%.*]], <vscale x 32 x half> [[VS1:%.*]], <vscale x 32 x half> [[VS2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfmsub.nxv32f16.nxv32f16.i64(<vscale x 32 x half> [[VD:%.*]], <vscale x 32 x half> [[VS1:%.*]], <vscale x 32 x half> [[VS2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfmsub_vv_f16m8 (vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) {
@ -476,7 +476,7 @@ vfloat16m8_t test_vfmsub_vv_f16m8 (vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8
// CHECK-RV64-LABEL: @test_vfmsub_vf_f16m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfmsub.nxv32f16.f16.i64(<vscale x 32 x half> [[VD:%.*]], half [[RS1:%.*]], <vscale x 32 x half> [[VS2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfmsub.nxv32f16.f16.i64(<vscale x 32 x half> [[VD:%.*]], half [[RS1:%.*]], <vscale x 32 x half> [[VS2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfmsub_vf_f16m8 (vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) {

View File

@ -8,7 +8,7 @@
// CHECK-RV64-LABEL: @test_vfnmacc_vv_f32mf2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfnmacc.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[ACC:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfnmacc.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[ACC:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfnmacc_vv_f32mf2(vfloat32mf2_t acc, vfloat32mf2_t op1,
@ -18,7 +18,7 @@ vfloat32mf2_t test_vfnmacc_vv_f32mf2(vfloat32mf2_t acc, vfloat32mf2_t op1,
// CHECK-RV64-LABEL: @test_vfnmacc_vf_f32mf2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfnmacc.nxv1f32.f32.i64(<vscale x 1 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfnmacc.nxv1f32.f32.i64(<vscale x 1 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfnmacc_vf_f32mf2(vfloat32mf2_t acc, float op1,
@ -28,7 +28,7 @@ vfloat32mf2_t test_vfnmacc_vf_f32mf2(vfloat32mf2_t acc, float op1,
// CHECK-RV64-LABEL: @test_vfnmacc_vv_f32m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfnmacc.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[ACC:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfnmacc.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[ACC:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfnmacc_vv_f32m1(vfloat32m1_t acc, vfloat32m1_t op1,
@ -38,7 +38,7 @@ vfloat32m1_t test_vfnmacc_vv_f32m1(vfloat32m1_t acc, vfloat32m1_t op1,
// CHECK-RV64-LABEL: @test_vfnmacc_vf_f32m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfnmacc.nxv2f32.f32.i64(<vscale x 2 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfnmacc.nxv2f32.f32.i64(<vscale x 2 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfnmacc_vf_f32m1(vfloat32m1_t acc, float op1,
@ -48,7 +48,7 @@ vfloat32m1_t test_vfnmacc_vf_f32m1(vfloat32m1_t acc, float op1,
// CHECK-RV64-LABEL: @test_vfnmacc_vv_f32m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfnmacc.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[ACC:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfnmacc.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[ACC:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfnmacc_vv_f32m2(vfloat32m2_t acc, vfloat32m2_t op1,
@ -58,7 +58,7 @@ vfloat32m2_t test_vfnmacc_vv_f32m2(vfloat32m2_t acc, vfloat32m2_t op1,
// CHECK-RV64-LABEL: @test_vfnmacc_vf_f32m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfnmacc.nxv4f32.f32.i64(<vscale x 4 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfnmacc.nxv4f32.f32.i64(<vscale x 4 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfnmacc_vf_f32m2(vfloat32m2_t acc, float op1,
@ -68,7 +68,7 @@ vfloat32m2_t test_vfnmacc_vf_f32m2(vfloat32m2_t acc, float op1,
// CHECK-RV64-LABEL: @test_vfnmacc_vv_f32m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfnmacc.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[ACC:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfnmacc.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[ACC:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfnmacc_vv_f32m4(vfloat32m4_t acc, vfloat32m4_t op1,
@ -78,7 +78,7 @@ vfloat32m4_t test_vfnmacc_vv_f32m4(vfloat32m4_t acc, vfloat32m4_t op1,
// CHECK-RV64-LABEL: @test_vfnmacc_vf_f32m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfnmacc.nxv8f32.f32.i64(<vscale x 8 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfnmacc.nxv8f32.f32.i64(<vscale x 8 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfnmacc_vf_f32m4(vfloat32m4_t acc, float op1,
@ -88,7 +88,7 @@ vfloat32m4_t test_vfnmacc_vf_f32m4(vfloat32m4_t acc, float op1,
// CHECK-RV64-LABEL: @test_vfnmacc_vv_f32m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfnmacc.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[ACC:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfnmacc.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[ACC:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfnmacc_vv_f32m8(vfloat32m8_t acc, vfloat32m8_t op1,
@ -98,7 +98,7 @@ vfloat32m8_t test_vfnmacc_vv_f32m8(vfloat32m8_t acc, vfloat32m8_t op1,
// CHECK-RV64-LABEL: @test_vfnmacc_vf_f32m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfnmacc.nxv16f32.f32.i64(<vscale x 16 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfnmacc.nxv16f32.f32.i64(<vscale x 16 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfnmacc_vf_f32m8(vfloat32m8_t acc, float op1,
@ -108,7 +108,7 @@ vfloat32m8_t test_vfnmacc_vf_f32m8(vfloat32m8_t acc, float op1,
// CHECK-RV64-LABEL: @test_vfnmacc_vv_f64m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfnmacc.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[ACC:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfnmacc.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[ACC:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfnmacc_vv_f64m1(vfloat64m1_t acc, vfloat64m1_t op1,
@ -118,7 +118,7 @@ vfloat64m1_t test_vfnmacc_vv_f64m1(vfloat64m1_t acc, vfloat64m1_t op1,
// CHECK-RV64-LABEL: @test_vfnmacc_vf_f64m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfnmacc.nxv1f64.f64.i64(<vscale x 1 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfnmacc.nxv1f64.f64.i64(<vscale x 1 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfnmacc_vf_f64m1(vfloat64m1_t acc, double op1,
@ -128,7 +128,7 @@ vfloat64m1_t test_vfnmacc_vf_f64m1(vfloat64m1_t acc, double op1,
// CHECK-RV64-LABEL: @test_vfnmacc_vv_f64m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfnmacc.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[ACC:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfnmacc.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[ACC:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfnmacc_vv_f64m2(vfloat64m2_t acc, vfloat64m2_t op1,
@ -138,7 +138,7 @@ vfloat64m2_t test_vfnmacc_vv_f64m2(vfloat64m2_t acc, vfloat64m2_t op1,
// CHECK-RV64-LABEL: @test_vfnmacc_vf_f64m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfnmacc.nxv2f64.f64.i64(<vscale x 2 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfnmacc.nxv2f64.f64.i64(<vscale x 2 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfnmacc_vf_f64m2(vfloat64m2_t acc, double op1,
@ -148,7 +148,7 @@ vfloat64m2_t test_vfnmacc_vf_f64m2(vfloat64m2_t acc, double op1,
// CHECK-RV64-LABEL: @test_vfnmacc_vv_f64m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfnmacc.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[ACC:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfnmacc.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[ACC:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfnmacc_vv_f64m4(vfloat64m4_t acc, vfloat64m4_t op1,
@ -158,7 +158,7 @@ vfloat64m4_t test_vfnmacc_vv_f64m4(vfloat64m4_t acc, vfloat64m4_t op1,
// CHECK-RV64-LABEL: @test_vfnmacc_vf_f64m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfnmacc.nxv4f64.f64.i64(<vscale x 4 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfnmacc.nxv4f64.f64.i64(<vscale x 4 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfnmacc_vf_f64m4(vfloat64m4_t acc, double op1,
@ -168,7 +168,7 @@ vfloat64m4_t test_vfnmacc_vf_f64m4(vfloat64m4_t acc, double op1,
// CHECK-RV64-LABEL: @test_vfnmacc_vv_f64m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfnmacc.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[ACC:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfnmacc.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[ACC:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfnmacc_vv_f64m8(vfloat64m8_t acc, vfloat64m8_t op1,
@ -178,7 +178,7 @@ vfloat64m8_t test_vfnmacc_vv_f64m8(vfloat64m8_t acc, vfloat64m8_t op1,
// CHECK-RV64-LABEL: @test_vfnmacc_vf_f64m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfnmacc.nxv8f64.f64.i64(<vscale x 8 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfnmacc.nxv8f64.f64.i64(<vscale x 8 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfnmacc_vf_f64m8(vfloat64m8_t acc, double op1,
@ -378,7 +378,7 @@ vfloat64m8_t test_vfnmacc_vf_f64m8_m(vbool8_t mask, vfloat64m8_t acc,
// CHECK-RV64-LABEL: @test_vfnmacc_vv_f16mf4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfnmacc.nxv1f16.nxv1f16.i64(<vscale x 1 x half> [[VD:%.*]], <vscale x 1 x half> [[VS1:%.*]], <vscale x 1 x half> [[VS2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfnmacc.nxv1f16.nxv1f16.i64(<vscale x 1 x half> [[VD:%.*]], <vscale x 1 x half> [[VS1:%.*]], <vscale x 1 x half> [[VS2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfnmacc_vv_f16mf4 (vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) {
@ -387,7 +387,7 @@ vfloat16mf4_t test_vfnmacc_vv_f16mf4 (vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloa
// CHECK-RV64-LABEL: @test_vfnmacc_vf_f16mf4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfnmacc.nxv1f16.f16.i64(<vscale x 1 x half> [[VD:%.*]], half [[RS1:%.*]], <vscale x 1 x half> [[VS2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfnmacc.nxv1f16.f16.i64(<vscale x 1 x half> [[VD:%.*]], half [[RS1:%.*]], <vscale x 1 x half> [[VS2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfnmacc_vf_f16mf4 (vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) {
@ -396,7 +396,7 @@ vfloat16mf4_t test_vfnmacc_vf_f16mf4 (vfloat16mf4_t vd, _Float16 rs1, vfloat16mf
// CHECK-RV64-LABEL: @test_vfnmacc_vv_f16mf2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfnmacc.nxv2f16.nxv2f16.i64(<vscale x 2 x half> [[VD:%.*]], <vscale x 2 x half> [[VS1:%.*]], <vscale x 2 x half> [[VS2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfnmacc.nxv2f16.nxv2f16.i64(<vscale x 2 x half> [[VD:%.*]], <vscale x 2 x half> [[VS1:%.*]], <vscale x 2 x half> [[VS2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfnmacc_vv_f16mf2 (vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) {
@ -405,7 +405,7 @@ vfloat16mf2_t test_vfnmacc_vv_f16mf2 (vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloa
// CHECK-RV64-LABEL: @test_vfnmacc_vf_f16mf2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfnmacc.nxv2f16.f16.i64(<vscale x 2 x half> [[VD:%.*]], half [[RS1:%.*]], <vscale x 2 x half> [[VS2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfnmacc.nxv2f16.f16.i64(<vscale x 2 x half> [[VD:%.*]], half [[RS1:%.*]], <vscale x 2 x half> [[VS2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfnmacc_vf_f16mf2 (vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) {
@ -414,7 +414,7 @@ vfloat16mf2_t test_vfnmacc_vf_f16mf2 (vfloat16mf2_t vd, _Float16 rs1, vfloat16mf
// CHECK-RV64-LABEL: @test_vfnmacc_vv_f16m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfnmacc.nxv4f16.nxv4f16.i64(<vscale x 4 x half> [[VD:%.*]], <vscale x 4 x half> [[VS1:%.*]], <vscale x 4 x half> [[VS2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfnmacc.nxv4f16.nxv4f16.i64(<vscale x 4 x half> [[VD:%.*]], <vscale x 4 x half> [[VS1:%.*]], <vscale x 4 x half> [[VS2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfnmacc_vv_f16m1 (vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) {
@ -423,7 +423,7 @@ vfloat16m1_t test_vfnmacc_vv_f16m1 (vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m
// CHECK-RV64-LABEL: @test_vfnmacc_vf_f16m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfnmacc.nxv4f16.f16.i64(<vscale x 4 x half> [[VD:%.*]], half [[RS1:%.*]], <vscale x 4 x half> [[VS2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfnmacc.nxv4f16.f16.i64(<vscale x 4 x half> [[VD:%.*]], half [[RS1:%.*]], <vscale x 4 x half> [[VS2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfnmacc_vf_f16m1 (vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) {
@ -432,7 +432,7 @@ vfloat16m1_t test_vfnmacc_vf_f16m1 (vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t
// CHECK-RV64-LABEL: @test_vfnmacc_vv_f16m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfnmacc.nxv8f16.nxv8f16.i64(<vscale x 8 x half> [[VD:%.*]], <vscale x 8 x half> [[VS1:%.*]], <vscale x 8 x half> [[VS2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfnmacc.nxv8f16.nxv8f16.i64(<vscale x 8 x half> [[VD:%.*]], <vscale x 8 x half> [[VS1:%.*]], <vscale x 8 x half> [[VS2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfnmacc_vv_f16m2 (vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) {
@ -441,7 +441,7 @@ vfloat16m2_t test_vfnmacc_vv_f16m2 (vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m
// CHECK-RV64-LABEL: @test_vfnmacc_vf_f16m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfnmacc.nxv8f16.f16.i64(<vscale x 8 x half> [[VD:%.*]], half [[RS1:%.*]], <vscale x 8 x half> [[VS2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfnmacc.nxv8f16.f16.i64(<vscale x 8 x half> [[VD:%.*]], half [[RS1:%.*]], <vscale x 8 x half> [[VS2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfnmacc_vf_f16m2 (vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) {
@ -450,7 +450,7 @@ vfloat16m2_t test_vfnmacc_vf_f16m2 (vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t
// CHECK-RV64-LABEL: @test_vfnmacc_vv_f16m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfnmacc.nxv16f16.nxv16f16.i64(<vscale x 16 x half> [[VD:%.*]], <vscale x 16 x half> [[VS1:%.*]], <vscale x 16 x half> [[VS2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfnmacc.nxv16f16.nxv16f16.i64(<vscale x 16 x half> [[VD:%.*]], <vscale x 16 x half> [[VS1:%.*]], <vscale x 16 x half> [[VS2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfnmacc_vv_f16m4 (vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) {
@ -459,7 +459,7 @@ vfloat16m4_t test_vfnmacc_vv_f16m4 (vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m
// CHECK-RV64-LABEL: @test_vfnmacc_vf_f16m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfnmacc.nxv16f16.f16.i64(<vscale x 16 x half> [[VD:%.*]], half [[RS1:%.*]], <vscale x 16 x half> [[VS2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfnmacc.nxv16f16.f16.i64(<vscale x 16 x half> [[VD:%.*]], half [[RS1:%.*]], <vscale x 16 x half> [[VS2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfnmacc_vf_f16m4 (vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) {
@ -468,7 +468,7 @@ vfloat16m4_t test_vfnmacc_vf_f16m4 (vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t
// CHECK-RV64-LABEL: @test_vfnmacc_vv_f16m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfnmacc.nxv32f16.nxv32f16.i64(<vscale x 32 x half> [[VD:%.*]], <vscale x 32 x half> [[VS1:%.*]], <vscale x 32 x half> [[VS2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfnmacc.nxv32f16.nxv32f16.i64(<vscale x 32 x half> [[VD:%.*]], <vscale x 32 x half> [[VS1:%.*]], <vscale x 32 x half> [[VS2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfnmacc_vv_f16m8 (vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) {
@ -477,7 +477,7 @@ vfloat16m8_t test_vfnmacc_vv_f16m8 (vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m
// CHECK-RV64-LABEL: @test_vfnmacc_vf_f16m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfnmacc.nxv32f16.f16.i64(<vscale x 32 x half> [[VD:%.*]], half [[RS1:%.*]], <vscale x 32 x half> [[VS2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfnmacc.nxv32f16.f16.i64(<vscale x 32 x half> [[VD:%.*]], half [[RS1:%.*]], <vscale x 32 x half> [[VS2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfnmacc_vf_f16m8 (vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) {

View File

@ -8,7 +8,7 @@
// CHECK-RV64-LABEL: @test_vfnmadd_vv_f32mf2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfnmadd.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[ACC:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfnmadd.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[ACC:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfnmadd_vv_f32mf2(vfloat32mf2_t acc, vfloat32mf2_t op1,
@ -18,7 +18,7 @@ vfloat32mf2_t test_vfnmadd_vv_f32mf2(vfloat32mf2_t acc, vfloat32mf2_t op1,
// CHECK-RV64-LABEL: @test_vfnmadd_vf_f32mf2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfnmadd.nxv1f32.f32.i64(<vscale x 1 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfnmadd.nxv1f32.f32.i64(<vscale x 1 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfnmadd_vf_f32mf2(vfloat32mf2_t acc, float op1,
@ -28,7 +28,7 @@ vfloat32mf2_t test_vfnmadd_vf_f32mf2(vfloat32mf2_t acc, float op1,
// CHECK-RV64-LABEL: @test_vfnmadd_vv_f32m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfnmadd.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[ACC:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfnmadd.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[ACC:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfnmadd_vv_f32m1(vfloat32m1_t acc, vfloat32m1_t op1,
@ -38,7 +38,7 @@ vfloat32m1_t test_vfnmadd_vv_f32m1(vfloat32m1_t acc, vfloat32m1_t op1,
// CHECK-RV64-LABEL: @test_vfnmadd_vf_f32m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfnmadd.nxv2f32.f32.i64(<vscale x 2 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfnmadd.nxv2f32.f32.i64(<vscale x 2 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfnmadd_vf_f32m1(vfloat32m1_t acc, float op1,
@ -48,7 +48,7 @@ vfloat32m1_t test_vfnmadd_vf_f32m1(vfloat32m1_t acc, float op1,
// CHECK-RV64-LABEL: @test_vfnmadd_vv_f32m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfnmadd.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[ACC:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfnmadd.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[ACC:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfnmadd_vv_f32m2(vfloat32m2_t acc, vfloat32m2_t op1,
@ -58,7 +58,7 @@ vfloat32m2_t test_vfnmadd_vv_f32m2(vfloat32m2_t acc, vfloat32m2_t op1,
// CHECK-RV64-LABEL: @test_vfnmadd_vf_f32m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfnmadd.nxv4f32.f32.i64(<vscale x 4 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfnmadd.nxv4f32.f32.i64(<vscale x 4 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfnmadd_vf_f32m2(vfloat32m2_t acc, float op1,
@ -68,7 +68,7 @@ vfloat32m2_t test_vfnmadd_vf_f32m2(vfloat32m2_t acc, float op1,
// CHECK-RV64-LABEL: @test_vfnmadd_vv_f32m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfnmadd.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[ACC:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfnmadd.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[ACC:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfnmadd_vv_f32m4(vfloat32m4_t acc, vfloat32m4_t op1,
@ -78,7 +78,7 @@ vfloat32m4_t test_vfnmadd_vv_f32m4(vfloat32m4_t acc, vfloat32m4_t op1,
// CHECK-RV64-LABEL: @test_vfnmadd_vf_f32m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfnmadd.nxv8f32.f32.i64(<vscale x 8 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfnmadd.nxv8f32.f32.i64(<vscale x 8 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfnmadd_vf_f32m4(vfloat32m4_t acc, float op1,
@ -88,7 +88,7 @@ vfloat32m4_t test_vfnmadd_vf_f32m4(vfloat32m4_t acc, float op1,
// CHECK-RV64-LABEL: @test_vfnmadd_vv_f32m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfnmadd.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[ACC:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfnmadd.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[ACC:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfnmadd_vv_f32m8(vfloat32m8_t acc, vfloat32m8_t op1,
@ -98,7 +98,7 @@ vfloat32m8_t test_vfnmadd_vv_f32m8(vfloat32m8_t acc, vfloat32m8_t op1,
// CHECK-RV64-LABEL: @test_vfnmadd_vf_f32m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfnmadd.nxv16f32.f32.i64(<vscale x 16 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfnmadd.nxv16f32.f32.i64(<vscale x 16 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfnmadd_vf_f32m8(vfloat32m8_t acc, float op1,
@ -108,7 +108,7 @@ vfloat32m8_t test_vfnmadd_vf_f32m8(vfloat32m8_t acc, float op1,
// CHECK-RV64-LABEL: @test_vfnmadd_vv_f64m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfnmadd.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[ACC:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfnmadd.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[ACC:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfnmadd_vv_f64m1(vfloat64m1_t acc, vfloat64m1_t op1,
@ -118,7 +118,7 @@ vfloat64m1_t test_vfnmadd_vv_f64m1(vfloat64m1_t acc, vfloat64m1_t op1,
// CHECK-RV64-LABEL: @test_vfnmadd_vf_f64m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfnmadd.nxv1f64.f64.i64(<vscale x 1 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfnmadd.nxv1f64.f64.i64(<vscale x 1 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfnmadd_vf_f64m1(vfloat64m1_t acc, double op1,
@ -128,7 +128,7 @@ vfloat64m1_t test_vfnmadd_vf_f64m1(vfloat64m1_t acc, double op1,
// CHECK-RV64-LABEL: @test_vfnmadd_vv_f64m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfnmadd.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[ACC:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfnmadd.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[ACC:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfnmadd_vv_f64m2(vfloat64m2_t acc, vfloat64m2_t op1,
@ -138,7 +138,7 @@ vfloat64m2_t test_vfnmadd_vv_f64m2(vfloat64m2_t acc, vfloat64m2_t op1,
// CHECK-RV64-LABEL: @test_vfnmadd_vf_f64m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfnmadd.nxv2f64.f64.i64(<vscale x 2 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfnmadd.nxv2f64.f64.i64(<vscale x 2 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfnmadd_vf_f64m2(vfloat64m2_t acc, double op1,
@ -148,7 +148,7 @@ vfloat64m2_t test_vfnmadd_vf_f64m2(vfloat64m2_t acc, double op1,
// CHECK-RV64-LABEL: @test_vfnmadd_vv_f64m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfnmadd.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[ACC:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfnmadd.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[ACC:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfnmadd_vv_f64m4(vfloat64m4_t acc, vfloat64m4_t op1,
@ -158,7 +158,7 @@ vfloat64m4_t test_vfnmadd_vv_f64m4(vfloat64m4_t acc, vfloat64m4_t op1,
// CHECK-RV64-LABEL: @test_vfnmadd_vf_f64m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfnmadd.nxv4f64.f64.i64(<vscale x 4 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfnmadd.nxv4f64.f64.i64(<vscale x 4 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfnmadd_vf_f64m4(vfloat64m4_t acc, double op1,
@ -168,7 +168,7 @@ vfloat64m4_t test_vfnmadd_vf_f64m4(vfloat64m4_t acc, double op1,
// CHECK-RV64-LABEL: @test_vfnmadd_vv_f64m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfnmadd.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[ACC:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfnmadd.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[ACC:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfnmadd_vv_f64m8(vfloat64m8_t acc, vfloat64m8_t op1,
@ -178,7 +178,7 @@ vfloat64m8_t test_vfnmadd_vv_f64m8(vfloat64m8_t acc, vfloat64m8_t op1,
// CHECK-RV64-LABEL: @test_vfnmadd_vf_f64m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfnmadd.nxv8f64.f64.i64(<vscale x 8 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfnmadd.nxv8f64.f64.i64(<vscale x 8 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfnmadd_vf_f64m8(vfloat64m8_t acc, double op1,
@ -378,7 +378,7 @@ vfloat64m8_t test_vfnmadd_vf_f64m8_m(vbool8_t mask, vfloat64m8_t acc,
// CHECK-RV64-LABEL: @test_vfnmadd_vv_f16mf4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfnmadd.nxv1f16.nxv1f16.i64(<vscale x 1 x half> [[VD:%.*]], <vscale x 1 x half> [[VS1:%.*]], <vscale x 1 x half> [[VS2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfnmadd.nxv1f16.nxv1f16.i64(<vscale x 1 x half> [[VD:%.*]], <vscale x 1 x half> [[VS1:%.*]], <vscale x 1 x half> [[VS2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfnmadd_vv_f16mf4 (vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) {
@ -387,7 +387,7 @@ vfloat16mf4_t test_vfnmadd_vv_f16mf4 (vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloa
// CHECK-RV64-LABEL: @test_vfnmadd_vf_f16mf4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfnmadd.nxv1f16.f16.i64(<vscale x 1 x half> [[VD:%.*]], half [[RS1:%.*]], <vscale x 1 x half> [[VS2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfnmadd.nxv1f16.f16.i64(<vscale x 1 x half> [[VD:%.*]], half [[RS1:%.*]], <vscale x 1 x half> [[VS2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfnmadd_vf_f16mf4 (vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) {
@ -396,7 +396,7 @@ vfloat16mf4_t test_vfnmadd_vf_f16mf4 (vfloat16mf4_t vd, _Float16 rs1, vfloat16mf
// CHECK-RV64-LABEL: @test_vfnmadd_vv_f16mf2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfnmadd.nxv2f16.nxv2f16.i64(<vscale x 2 x half> [[VD:%.*]], <vscale x 2 x half> [[VS1:%.*]], <vscale x 2 x half> [[VS2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfnmadd.nxv2f16.nxv2f16.i64(<vscale x 2 x half> [[VD:%.*]], <vscale x 2 x half> [[VS1:%.*]], <vscale x 2 x half> [[VS2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfnmadd_vv_f16mf2 (vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) {
@ -405,7 +405,7 @@ vfloat16mf2_t test_vfnmadd_vv_f16mf2 (vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloa
// CHECK-RV64-LABEL: @test_vfnmadd_vf_f16mf2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfnmadd.nxv2f16.f16.i64(<vscale x 2 x half> [[VD:%.*]], half [[RS1:%.*]], <vscale x 2 x half> [[VS2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfnmadd.nxv2f16.f16.i64(<vscale x 2 x half> [[VD:%.*]], half [[RS1:%.*]], <vscale x 2 x half> [[VS2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfnmadd_vf_f16mf2 (vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) {
@ -414,7 +414,7 @@ vfloat16mf2_t test_vfnmadd_vf_f16mf2 (vfloat16mf2_t vd, _Float16 rs1, vfloat16mf
// CHECK-RV64-LABEL: @test_vfnmadd_vv_f16m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfnmadd.nxv4f16.nxv4f16.i64(<vscale x 4 x half> [[VD:%.*]], <vscale x 4 x half> [[VS1:%.*]], <vscale x 4 x half> [[VS2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfnmadd.nxv4f16.nxv4f16.i64(<vscale x 4 x half> [[VD:%.*]], <vscale x 4 x half> [[VS1:%.*]], <vscale x 4 x half> [[VS2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfnmadd_vv_f16m1 (vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) {
@ -423,7 +423,7 @@ vfloat16m1_t test_vfnmadd_vv_f16m1 (vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m
// CHECK-RV64-LABEL: @test_vfnmadd_vf_f16m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfnmadd.nxv4f16.f16.i64(<vscale x 4 x half> [[VD:%.*]], half [[RS1:%.*]], <vscale x 4 x half> [[VS2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfnmadd.nxv4f16.f16.i64(<vscale x 4 x half> [[VD:%.*]], half [[RS1:%.*]], <vscale x 4 x half> [[VS2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfnmadd_vf_f16m1 (vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) {
@ -432,7 +432,7 @@ vfloat16m1_t test_vfnmadd_vf_f16m1 (vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t
// CHECK-RV64-LABEL: @test_vfnmadd_vv_f16m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfnmadd.nxv8f16.nxv8f16.i64(<vscale x 8 x half> [[VD:%.*]], <vscale x 8 x half> [[VS1:%.*]], <vscale x 8 x half> [[VS2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfnmadd.nxv8f16.nxv8f16.i64(<vscale x 8 x half> [[VD:%.*]], <vscale x 8 x half> [[VS1:%.*]], <vscale x 8 x half> [[VS2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfnmadd_vv_f16m2 (vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) {
@ -441,7 +441,7 @@ vfloat16m2_t test_vfnmadd_vv_f16m2 (vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m
// CHECK-RV64-LABEL: @test_vfnmadd_vf_f16m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfnmadd.nxv8f16.f16.i64(<vscale x 8 x half> [[VD:%.*]], half [[RS1:%.*]], <vscale x 8 x half> [[VS2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfnmadd.nxv8f16.f16.i64(<vscale x 8 x half> [[VD:%.*]], half [[RS1:%.*]], <vscale x 8 x half> [[VS2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfnmadd_vf_f16m2 (vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) {
@ -450,7 +450,7 @@ vfloat16m2_t test_vfnmadd_vf_f16m2 (vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t
// CHECK-RV64-LABEL: @test_vfnmadd_vv_f16m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfnmadd.nxv16f16.nxv16f16.i64(<vscale x 16 x half> [[VD:%.*]], <vscale x 16 x half> [[VS1:%.*]], <vscale x 16 x half> [[VS2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfnmadd.nxv16f16.nxv16f16.i64(<vscale x 16 x half> [[VD:%.*]], <vscale x 16 x half> [[VS1:%.*]], <vscale x 16 x half> [[VS2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfnmadd_vv_f16m4 (vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) {
@ -459,7 +459,7 @@ vfloat16m4_t test_vfnmadd_vv_f16m4 (vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m
// CHECK-RV64-LABEL: @test_vfnmadd_vf_f16m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfnmadd.nxv16f16.f16.i64(<vscale x 16 x half> [[VD:%.*]], half [[RS1:%.*]], <vscale x 16 x half> [[VS2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfnmadd.nxv16f16.f16.i64(<vscale x 16 x half> [[VD:%.*]], half [[RS1:%.*]], <vscale x 16 x half> [[VS2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfnmadd_vf_f16m4 (vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) {
@ -468,7 +468,7 @@ vfloat16m4_t test_vfnmadd_vf_f16m4 (vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t
// CHECK-RV64-LABEL: @test_vfnmadd_vv_f16m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfnmadd.nxv32f16.nxv32f16.i64(<vscale x 32 x half> [[VD:%.*]], <vscale x 32 x half> [[VS1:%.*]], <vscale x 32 x half> [[VS2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfnmadd.nxv32f16.nxv32f16.i64(<vscale x 32 x half> [[VD:%.*]], <vscale x 32 x half> [[VS1:%.*]], <vscale x 32 x half> [[VS2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfnmadd_vv_f16m8 (vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) {
@ -477,7 +477,7 @@ vfloat16m8_t test_vfnmadd_vv_f16m8 (vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m
// CHECK-RV64-LABEL: @test_vfnmadd_vf_f16m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfnmadd.nxv32f16.f16.i64(<vscale x 32 x half> [[VD:%.*]], half [[RS1:%.*]], <vscale x 32 x half> [[VS2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfnmadd.nxv32f16.f16.i64(<vscale x 32 x half> [[VD:%.*]], half [[RS1:%.*]], <vscale x 32 x half> [[VS2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfnmadd_vf_f16m8 (vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) {

View File

@ -8,7 +8,7 @@
// CHECK-RV64-LABEL: @test_vfnmsac_vv_f32mf2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfnmsac.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[ACC:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfnmsac.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[ACC:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfnmsac_vv_f32mf2(vfloat32mf2_t acc, vfloat32mf2_t op1,
@ -18,7 +18,7 @@ vfloat32mf2_t test_vfnmsac_vv_f32mf2(vfloat32mf2_t acc, vfloat32mf2_t op1,
// CHECK-RV64-LABEL: @test_vfnmsac_vf_f32mf2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfnmsac.nxv1f32.f32.i64(<vscale x 1 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfnmsac.nxv1f32.f32.i64(<vscale x 1 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfnmsac_vf_f32mf2(vfloat32mf2_t acc, float op1,
@ -28,7 +28,7 @@ vfloat32mf2_t test_vfnmsac_vf_f32mf2(vfloat32mf2_t acc, float op1,
// CHECK-RV64-LABEL: @test_vfnmsac_vv_f32m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfnmsac.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[ACC:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfnmsac.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[ACC:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfnmsac_vv_f32m1(vfloat32m1_t acc, vfloat32m1_t op1,
@ -38,7 +38,7 @@ vfloat32m1_t test_vfnmsac_vv_f32m1(vfloat32m1_t acc, vfloat32m1_t op1,
// CHECK-RV64-LABEL: @test_vfnmsac_vf_f32m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfnmsac.nxv2f32.f32.i64(<vscale x 2 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfnmsac.nxv2f32.f32.i64(<vscale x 2 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfnmsac_vf_f32m1(vfloat32m1_t acc, float op1,
@ -48,7 +48,7 @@ vfloat32m1_t test_vfnmsac_vf_f32m1(vfloat32m1_t acc, float op1,
// CHECK-RV64-LABEL: @test_vfnmsac_vv_f32m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfnmsac.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[ACC:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfnmsac.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[ACC:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfnmsac_vv_f32m2(vfloat32m2_t acc, vfloat32m2_t op1,
@ -58,7 +58,7 @@ vfloat32m2_t test_vfnmsac_vv_f32m2(vfloat32m2_t acc, vfloat32m2_t op1,
// CHECK-RV64-LABEL: @test_vfnmsac_vf_f32m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfnmsac.nxv4f32.f32.i64(<vscale x 4 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfnmsac.nxv4f32.f32.i64(<vscale x 4 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfnmsac_vf_f32m2(vfloat32m2_t acc, float op1,
@ -68,7 +68,7 @@ vfloat32m2_t test_vfnmsac_vf_f32m2(vfloat32m2_t acc, float op1,
// CHECK-RV64-LABEL: @test_vfnmsac_vv_f32m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfnmsac.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[ACC:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfnmsac.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[ACC:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfnmsac_vv_f32m4(vfloat32m4_t acc, vfloat32m4_t op1,
@ -78,7 +78,7 @@ vfloat32m4_t test_vfnmsac_vv_f32m4(vfloat32m4_t acc, vfloat32m4_t op1,
// CHECK-RV64-LABEL: @test_vfnmsac_vf_f32m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfnmsac.nxv8f32.f32.i64(<vscale x 8 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfnmsac.nxv8f32.f32.i64(<vscale x 8 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfnmsac_vf_f32m4(vfloat32m4_t acc, float op1,
@ -88,7 +88,7 @@ vfloat32m4_t test_vfnmsac_vf_f32m4(vfloat32m4_t acc, float op1,
// CHECK-RV64-LABEL: @test_vfnmsac_vv_f32m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfnmsac.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[ACC:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfnmsac.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[ACC:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfnmsac_vv_f32m8(vfloat32m8_t acc, vfloat32m8_t op1,
@ -98,7 +98,7 @@ vfloat32m8_t test_vfnmsac_vv_f32m8(vfloat32m8_t acc, vfloat32m8_t op1,
// CHECK-RV64-LABEL: @test_vfnmsac_vf_f32m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfnmsac.nxv16f32.f32.i64(<vscale x 16 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfnmsac.nxv16f32.f32.i64(<vscale x 16 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfnmsac_vf_f32m8(vfloat32m8_t acc, float op1,
@ -108,7 +108,7 @@ vfloat32m8_t test_vfnmsac_vf_f32m8(vfloat32m8_t acc, float op1,
// CHECK-RV64-LABEL: @test_vfnmsac_vv_f64m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfnmsac.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[ACC:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfnmsac.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[ACC:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfnmsac_vv_f64m1(vfloat64m1_t acc, vfloat64m1_t op1,
@ -118,7 +118,7 @@ vfloat64m1_t test_vfnmsac_vv_f64m1(vfloat64m1_t acc, vfloat64m1_t op1,
// CHECK-RV64-LABEL: @test_vfnmsac_vf_f64m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfnmsac.nxv1f64.f64.i64(<vscale x 1 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfnmsac.nxv1f64.f64.i64(<vscale x 1 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfnmsac_vf_f64m1(vfloat64m1_t acc, double op1,
@ -128,7 +128,7 @@ vfloat64m1_t test_vfnmsac_vf_f64m1(vfloat64m1_t acc, double op1,
// CHECK-RV64-LABEL: @test_vfnmsac_vv_f64m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfnmsac.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[ACC:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfnmsac.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[ACC:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfnmsac_vv_f64m2(vfloat64m2_t acc, vfloat64m2_t op1,
@ -138,7 +138,7 @@ vfloat64m2_t test_vfnmsac_vv_f64m2(vfloat64m2_t acc, vfloat64m2_t op1,
// CHECK-RV64-LABEL: @test_vfnmsac_vf_f64m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfnmsac.nxv2f64.f64.i64(<vscale x 2 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfnmsac.nxv2f64.f64.i64(<vscale x 2 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfnmsac_vf_f64m2(vfloat64m2_t acc, double op1,
@ -148,7 +148,7 @@ vfloat64m2_t test_vfnmsac_vf_f64m2(vfloat64m2_t acc, double op1,
// CHECK-RV64-LABEL: @test_vfnmsac_vv_f64m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfnmsac.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[ACC:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfnmsac.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[ACC:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfnmsac_vv_f64m4(vfloat64m4_t acc, vfloat64m4_t op1,
@ -158,7 +158,7 @@ vfloat64m4_t test_vfnmsac_vv_f64m4(vfloat64m4_t acc, vfloat64m4_t op1,
// CHECK-RV64-LABEL: @test_vfnmsac_vf_f64m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfnmsac.nxv4f64.f64.i64(<vscale x 4 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfnmsac.nxv4f64.f64.i64(<vscale x 4 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfnmsac_vf_f64m4(vfloat64m4_t acc, double op1,
@ -168,7 +168,7 @@ vfloat64m4_t test_vfnmsac_vf_f64m4(vfloat64m4_t acc, double op1,
// CHECK-RV64-LABEL: @test_vfnmsac_vv_f64m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfnmsac.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[ACC:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfnmsac.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[ACC:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfnmsac_vv_f64m8(vfloat64m8_t acc, vfloat64m8_t op1,
@ -178,7 +178,7 @@ vfloat64m8_t test_vfnmsac_vv_f64m8(vfloat64m8_t acc, vfloat64m8_t op1,
// CHECK-RV64-LABEL: @test_vfnmsac_vf_f64m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfnmsac.nxv8f64.f64.i64(<vscale x 8 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfnmsac.nxv8f64.f64.i64(<vscale x 8 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfnmsac_vf_f64m8(vfloat64m8_t acc, double op1,
@ -378,7 +378,7 @@ vfloat64m8_t test_vfnmsac_vf_f64m8_m(vbool8_t mask, vfloat64m8_t acc,
// CHECK-RV64-LABEL: @test_vfnmsac_vv_f16mf4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfnmsac.nxv1f16.nxv1f16.i64(<vscale x 1 x half> [[VD:%.*]], <vscale x 1 x half> [[VS1:%.*]], <vscale x 1 x half> [[VS2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfnmsac.nxv1f16.nxv1f16.i64(<vscale x 1 x half> [[VD:%.*]], <vscale x 1 x half> [[VS1:%.*]], <vscale x 1 x half> [[VS2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfnmsac_vv_f16mf4 (vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) {
@ -387,7 +387,7 @@ vfloat16mf4_t test_vfnmsac_vv_f16mf4 (vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloa
// CHECK-RV64-LABEL: @test_vfnmsac_vf_f16mf4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfnmsac.nxv1f16.f16.i64(<vscale x 1 x half> [[VD:%.*]], half [[RS1:%.*]], <vscale x 1 x half> [[VS2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfnmsac.nxv1f16.f16.i64(<vscale x 1 x half> [[VD:%.*]], half [[RS1:%.*]], <vscale x 1 x half> [[VS2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfnmsac_vf_f16mf4 (vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) {
@ -396,7 +396,7 @@ vfloat16mf4_t test_vfnmsac_vf_f16mf4 (vfloat16mf4_t vd, _Float16 rs1, vfloat16mf
// CHECK-RV64-LABEL: @test_vfnmsac_vv_f16mf2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfnmsac.nxv2f16.nxv2f16.i64(<vscale x 2 x half> [[VD:%.*]], <vscale x 2 x half> [[VS1:%.*]], <vscale x 2 x half> [[VS2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfnmsac.nxv2f16.nxv2f16.i64(<vscale x 2 x half> [[VD:%.*]], <vscale x 2 x half> [[VS1:%.*]], <vscale x 2 x half> [[VS2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfnmsac_vv_f16mf2 (vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) {
@ -405,7 +405,7 @@ vfloat16mf2_t test_vfnmsac_vv_f16mf2 (vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloa
// CHECK-RV64-LABEL: @test_vfnmsac_vf_f16mf2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfnmsac.nxv2f16.f16.i64(<vscale x 2 x half> [[VD:%.*]], half [[RS1:%.*]], <vscale x 2 x half> [[VS2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfnmsac.nxv2f16.f16.i64(<vscale x 2 x half> [[VD:%.*]], half [[RS1:%.*]], <vscale x 2 x half> [[VS2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfnmsac_vf_f16mf2 (vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) {
@ -414,7 +414,7 @@ vfloat16mf2_t test_vfnmsac_vf_f16mf2 (vfloat16mf2_t vd, _Float16 rs1, vfloat16mf
// CHECK-RV64-LABEL: @test_vfnmsac_vv_f16m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfnmsac.nxv4f16.nxv4f16.i64(<vscale x 4 x half> [[VD:%.*]], <vscale x 4 x half> [[VS1:%.*]], <vscale x 4 x half> [[VS2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfnmsac.nxv4f16.nxv4f16.i64(<vscale x 4 x half> [[VD:%.*]], <vscale x 4 x half> [[VS1:%.*]], <vscale x 4 x half> [[VS2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfnmsac_vv_f16m1 (vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) {
@ -423,7 +423,7 @@ vfloat16m1_t test_vfnmsac_vv_f16m1 (vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m
// CHECK-RV64-LABEL: @test_vfnmsac_vf_f16m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfnmsac.nxv4f16.f16.i64(<vscale x 4 x half> [[VD:%.*]], half [[RS1:%.*]], <vscale x 4 x half> [[VS2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfnmsac.nxv4f16.f16.i64(<vscale x 4 x half> [[VD:%.*]], half [[RS1:%.*]], <vscale x 4 x half> [[VS2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfnmsac_vf_f16m1 (vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) {
@ -432,7 +432,7 @@ vfloat16m1_t test_vfnmsac_vf_f16m1 (vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t
// CHECK-RV64-LABEL: @test_vfnmsac_vv_f16m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfnmsac.nxv8f16.nxv8f16.i64(<vscale x 8 x half> [[VD:%.*]], <vscale x 8 x half> [[VS1:%.*]], <vscale x 8 x half> [[VS2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfnmsac.nxv8f16.nxv8f16.i64(<vscale x 8 x half> [[VD:%.*]], <vscale x 8 x half> [[VS1:%.*]], <vscale x 8 x half> [[VS2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfnmsac_vv_f16m2 (vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) {
@ -441,7 +441,7 @@ vfloat16m2_t test_vfnmsac_vv_f16m2 (vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m
// CHECK-RV64-LABEL: @test_vfnmsac_vf_f16m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfnmsac.nxv8f16.f16.i64(<vscale x 8 x half> [[VD:%.*]], half [[RS1:%.*]], <vscale x 8 x half> [[VS2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfnmsac.nxv8f16.f16.i64(<vscale x 8 x half> [[VD:%.*]], half [[RS1:%.*]], <vscale x 8 x half> [[VS2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfnmsac_vf_f16m2 (vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) {
@ -450,7 +450,7 @@ vfloat16m2_t test_vfnmsac_vf_f16m2 (vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t
// CHECK-RV64-LABEL: @test_vfnmsac_vv_f16m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfnmsac.nxv16f16.nxv16f16.i64(<vscale x 16 x half> [[VD:%.*]], <vscale x 16 x half> [[VS1:%.*]], <vscale x 16 x half> [[VS2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfnmsac.nxv16f16.nxv16f16.i64(<vscale x 16 x half> [[VD:%.*]], <vscale x 16 x half> [[VS1:%.*]], <vscale x 16 x half> [[VS2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfnmsac_vv_f16m4 (vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) {
@ -459,7 +459,7 @@ vfloat16m4_t test_vfnmsac_vv_f16m4 (vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m
// CHECK-RV64-LABEL: @test_vfnmsac_vf_f16m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfnmsac.nxv16f16.f16.i64(<vscale x 16 x half> [[VD:%.*]], half [[RS1:%.*]], <vscale x 16 x half> [[VS2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfnmsac.nxv16f16.f16.i64(<vscale x 16 x half> [[VD:%.*]], half [[RS1:%.*]], <vscale x 16 x half> [[VS2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfnmsac_vf_f16m4 (vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) {
@ -468,7 +468,7 @@ vfloat16m4_t test_vfnmsac_vf_f16m4 (vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t
// CHECK-RV64-LABEL: @test_vfnmsac_vv_f16m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfnmsac.nxv32f16.nxv32f16.i64(<vscale x 32 x half> [[VD:%.*]], <vscale x 32 x half> [[VS1:%.*]], <vscale x 32 x half> [[VS2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfnmsac.nxv32f16.nxv32f16.i64(<vscale x 32 x half> [[VD:%.*]], <vscale x 32 x half> [[VS1:%.*]], <vscale x 32 x half> [[VS2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfnmsac_vv_f16m8 (vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) {
@ -477,7 +477,7 @@ vfloat16m8_t test_vfnmsac_vv_f16m8 (vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m
// CHECK-RV64-LABEL: @test_vfnmsac_vf_f16m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfnmsac.nxv32f16.f16.i64(<vscale x 32 x half> [[VD:%.*]], half [[RS1:%.*]], <vscale x 32 x half> [[VS2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfnmsac.nxv32f16.f16.i64(<vscale x 32 x half> [[VD:%.*]], half [[RS1:%.*]], <vscale x 32 x half> [[VS2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfnmsac_vf_f16m8 (vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) {

View File

@ -8,7 +8,7 @@
// CHECK-RV64-LABEL: @test_vfnmsub_vv_f32mf2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfnmsub.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[ACC:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfnmsub.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[ACC:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfnmsub_vv_f32mf2(vfloat32mf2_t acc, vfloat32mf2_t op1,
@ -18,7 +18,7 @@ vfloat32mf2_t test_vfnmsub_vv_f32mf2(vfloat32mf2_t acc, vfloat32mf2_t op1,
// CHECK-RV64-LABEL: @test_vfnmsub_vf_f32mf2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfnmsub.nxv1f32.f32.i64(<vscale x 1 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfnmsub.nxv1f32.f32.i64(<vscale x 1 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfnmsub_vf_f32mf2(vfloat32mf2_t acc, float op1,
@ -28,7 +28,7 @@ vfloat32mf2_t test_vfnmsub_vf_f32mf2(vfloat32mf2_t acc, float op1,
// CHECK-RV64-LABEL: @test_vfnmsub_vv_f32m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfnmsub.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[ACC:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfnmsub.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[ACC:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfnmsub_vv_f32m1(vfloat32m1_t acc, vfloat32m1_t op1,
@ -38,7 +38,7 @@ vfloat32m1_t test_vfnmsub_vv_f32m1(vfloat32m1_t acc, vfloat32m1_t op1,
// CHECK-RV64-LABEL: @test_vfnmsub_vf_f32m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfnmsub.nxv2f32.f32.i64(<vscale x 2 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfnmsub.nxv2f32.f32.i64(<vscale x 2 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfnmsub_vf_f32m1(vfloat32m1_t acc, float op1,
@ -48,7 +48,7 @@ vfloat32m1_t test_vfnmsub_vf_f32m1(vfloat32m1_t acc, float op1,
// CHECK-RV64-LABEL: @test_vfnmsub_vv_f32m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfnmsub.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[ACC:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfnmsub.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[ACC:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfnmsub_vv_f32m2(vfloat32m2_t acc, vfloat32m2_t op1,
@ -58,7 +58,7 @@ vfloat32m2_t test_vfnmsub_vv_f32m2(vfloat32m2_t acc, vfloat32m2_t op1,
// CHECK-RV64-LABEL: @test_vfnmsub_vf_f32m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfnmsub.nxv4f32.f32.i64(<vscale x 4 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfnmsub.nxv4f32.f32.i64(<vscale x 4 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfnmsub_vf_f32m2(vfloat32m2_t acc, float op1,
@ -68,7 +68,7 @@ vfloat32m2_t test_vfnmsub_vf_f32m2(vfloat32m2_t acc, float op1,
// CHECK-RV64-LABEL: @test_vfnmsub_vv_f32m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfnmsub.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[ACC:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfnmsub.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[ACC:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfnmsub_vv_f32m4(vfloat32m4_t acc, vfloat32m4_t op1,
@ -78,7 +78,7 @@ vfloat32m4_t test_vfnmsub_vv_f32m4(vfloat32m4_t acc, vfloat32m4_t op1,
// CHECK-RV64-LABEL: @test_vfnmsub_vf_f32m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfnmsub.nxv8f32.f32.i64(<vscale x 8 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfnmsub.nxv8f32.f32.i64(<vscale x 8 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfnmsub_vf_f32m4(vfloat32m4_t acc, float op1,
@ -88,7 +88,7 @@ vfloat32m4_t test_vfnmsub_vf_f32m4(vfloat32m4_t acc, float op1,
// CHECK-RV64-LABEL: @test_vfnmsub_vv_f32m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfnmsub.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[ACC:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfnmsub.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[ACC:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfnmsub_vv_f32m8(vfloat32m8_t acc, vfloat32m8_t op1,
@ -98,7 +98,7 @@ vfloat32m8_t test_vfnmsub_vv_f32m8(vfloat32m8_t acc, vfloat32m8_t op1,
// CHECK-RV64-LABEL: @test_vfnmsub_vf_f32m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfnmsub.nxv16f32.f32.i64(<vscale x 16 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfnmsub.nxv16f32.f32.i64(<vscale x 16 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfnmsub_vf_f32m8(vfloat32m8_t acc, float op1,
@ -108,7 +108,7 @@ vfloat32m8_t test_vfnmsub_vf_f32m8(vfloat32m8_t acc, float op1,
// CHECK-RV64-LABEL: @test_vfnmsub_vv_f64m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfnmsub.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[ACC:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfnmsub.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[ACC:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfnmsub_vv_f64m1(vfloat64m1_t acc, vfloat64m1_t op1,
@ -118,7 +118,7 @@ vfloat64m1_t test_vfnmsub_vv_f64m1(vfloat64m1_t acc, vfloat64m1_t op1,
// CHECK-RV64-LABEL: @test_vfnmsub_vf_f64m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfnmsub.nxv1f64.f64.i64(<vscale x 1 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfnmsub.nxv1f64.f64.i64(<vscale x 1 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfnmsub_vf_f64m1(vfloat64m1_t acc, double op1,
@ -128,7 +128,7 @@ vfloat64m1_t test_vfnmsub_vf_f64m1(vfloat64m1_t acc, double op1,
// CHECK-RV64-LABEL: @test_vfnmsub_vv_f64m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfnmsub.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[ACC:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfnmsub.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[ACC:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfnmsub_vv_f64m2(vfloat64m2_t acc, vfloat64m2_t op1,
@ -138,7 +138,7 @@ vfloat64m2_t test_vfnmsub_vv_f64m2(vfloat64m2_t acc, vfloat64m2_t op1,
// CHECK-RV64-LABEL: @test_vfnmsub_vf_f64m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfnmsub.nxv2f64.f64.i64(<vscale x 2 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfnmsub.nxv2f64.f64.i64(<vscale x 2 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfnmsub_vf_f64m2(vfloat64m2_t acc, double op1,
@ -148,7 +148,7 @@ vfloat64m2_t test_vfnmsub_vf_f64m2(vfloat64m2_t acc, double op1,
// CHECK-RV64-LABEL: @test_vfnmsub_vv_f64m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfnmsub.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[ACC:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfnmsub.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[ACC:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfnmsub_vv_f64m4(vfloat64m4_t acc, vfloat64m4_t op1,
@ -158,7 +158,7 @@ vfloat64m4_t test_vfnmsub_vv_f64m4(vfloat64m4_t acc, vfloat64m4_t op1,
// CHECK-RV64-LABEL: @test_vfnmsub_vf_f64m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfnmsub.nxv4f64.f64.i64(<vscale x 4 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfnmsub.nxv4f64.f64.i64(<vscale x 4 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfnmsub_vf_f64m4(vfloat64m4_t acc, double op1,
@ -168,7 +168,7 @@ vfloat64m4_t test_vfnmsub_vf_f64m4(vfloat64m4_t acc, double op1,
// CHECK-RV64-LABEL: @test_vfnmsub_vv_f64m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfnmsub.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[ACC:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfnmsub.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[ACC:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfnmsub_vv_f64m8(vfloat64m8_t acc, vfloat64m8_t op1,
@ -178,7 +178,7 @@ vfloat64m8_t test_vfnmsub_vv_f64m8(vfloat64m8_t acc, vfloat64m8_t op1,
// CHECK-RV64-LABEL: @test_vfnmsub_vf_f64m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfnmsub.nxv8f64.f64.i64(<vscale x 8 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfnmsub.nxv8f64.f64.i64(<vscale x 8 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfnmsub_vf_f64m8(vfloat64m8_t acc, double op1,
@ -378,7 +378,7 @@ vfloat64m8_t test_vfnmsub_vf_f64m8_m(vbool8_t mask, vfloat64m8_t acc,
// CHECK-RV64-LABEL: @test_vfnmsub_vv_f16mf4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfnmsub.nxv1f16.nxv1f16.i64(<vscale x 1 x half> [[VD:%.*]], <vscale x 1 x half> [[VS1:%.*]], <vscale x 1 x half> [[VS2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfnmsub.nxv1f16.nxv1f16.i64(<vscale x 1 x half> [[VD:%.*]], <vscale x 1 x half> [[VS1:%.*]], <vscale x 1 x half> [[VS2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfnmsub_vv_f16mf4 (vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) {
@ -387,7 +387,7 @@ vfloat16mf4_t test_vfnmsub_vv_f16mf4 (vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloa
// CHECK-RV64-LABEL: @test_vfnmsub_vf_f16mf4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfnmsub.nxv1f16.f16.i64(<vscale x 1 x half> [[VD:%.*]], half [[RS1:%.*]], <vscale x 1 x half> [[VS2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfnmsub.nxv1f16.f16.i64(<vscale x 1 x half> [[VD:%.*]], half [[RS1:%.*]], <vscale x 1 x half> [[VS2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vfnmsub_vf_f16mf4 (vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) {
@ -396,7 +396,7 @@ vfloat16mf4_t test_vfnmsub_vf_f16mf4 (vfloat16mf4_t vd, _Float16 rs1, vfloat16mf
// CHECK-RV64-LABEL: @test_vfnmsub_vv_f16mf2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfnmsub.nxv2f16.nxv2f16.i64(<vscale x 2 x half> [[VD:%.*]], <vscale x 2 x half> [[VS1:%.*]], <vscale x 2 x half> [[VS2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfnmsub.nxv2f16.nxv2f16.i64(<vscale x 2 x half> [[VD:%.*]], <vscale x 2 x half> [[VS1:%.*]], <vscale x 2 x half> [[VS2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfnmsub_vv_f16mf2 (vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) {
@ -405,7 +405,7 @@ vfloat16mf2_t test_vfnmsub_vv_f16mf2 (vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloa
// CHECK-RV64-LABEL: @test_vfnmsub_vf_f16mf2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfnmsub.nxv2f16.f16.i64(<vscale x 2 x half> [[VD:%.*]], half [[RS1:%.*]], <vscale x 2 x half> [[VS2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfnmsub.nxv2f16.f16.i64(<vscale x 2 x half> [[VD:%.*]], half [[RS1:%.*]], <vscale x 2 x half> [[VS2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x half> [[TMP0]]
//
vfloat16mf2_t test_vfnmsub_vf_f16mf2 (vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) {
@ -414,7 +414,7 @@ vfloat16mf2_t test_vfnmsub_vf_f16mf2 (vfloat16mf2_t vd, _Float16 rs1, vfloat16mf
// CHECK-RV64-LABEL: @test_vfnmsub_vv_f16m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfnmsub.nxv4f16.nxv4f16.i64(<vscale x 4 x half> [[VD:%.*]], <vscale x 4 x half> [[VS1:%.*]], <vscale x 4 x half> [[VS2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfnmsub.nxv4f16.nxv4f16.i64(<vscale x 4 x half> [[VD:%.*]], <vscale x 4 x half> [[VS1:%.*]], <vscale x 4 x half> [[VS2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfnmsub_vv_f16m1 (vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) {
@ -423,7 +423,7 @@ vfloat16m1_t test_vfnmsub_vv_f16m1 (vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m
// CHECK-RV64-LABEL: @test_vfnmsub_vf_f16m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfnmsub.nxv4f16.f16.i64(<vscale x 4 x half> [[VD:%.*]], half [[RS1:%.*]], <vscale x 4 x half> [[VS2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfnmsub.nxv4f16.f16.i64(<vscale x 4 x half> [[VD:%.*]], half [[RS1:%.*]], <vscale x 4 x half> [[VS2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x half> [[TMP0]]
//
vfloat16m1_t test_vfnmsub_vf_f16m1 (vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) {
@ -432,7 +432,7 @@ vfloat16m1_t test_vfnmsub_vf_f16m1 (vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t
// CHECK-RV64-LABEL: @test_vfnmsub_vv_f16m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfnmsub.nxv8f16.nxv8f16.i64(<vscale x 8 x half> [[VD:%.*]], <vscale x 8 x half> [[VS1:%.*]], <vscale x 8 x half> [[VS2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfnmsub.nxv8f16.nxv8f16.i64(<vscale x 8 x half> [[VD:%.*]], <vscale x 8 x half> [[VS1:%.*]], <vscale x 8 x half> [[VS2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfnmsub_vv_f16m2 (vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) {
@ -441,7 +441,7 @@ vfloat16m2_t test_vfnmsub_vv_f16m2 (vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m
// CHECK-RV64-LABEL: @test_vfnmsub_vf_f16m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfnmsub.nxv8f16.f16.i64(<vscale x 8 x half> [[VD:%.*]], half [[RS1:%.*]], <vscale x 8 x half> [[VS2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfnmsub.nxv8f16.f16.i64(<vscale x 8 x half> [[VD:%.*]], half [[RS1:%.*]], <vscale x 8 x half> [[VS2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x half> [[TMP0]]
//
vfloat16m2_t test_vfnmsub_vf_f16m2 (vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) {
@ -450,7 +450,7 @@ vfloat16m2_t test_vfnmsub_vf_f16m2 (vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t
// CHECK-RV64-LABEL: @test_vfnmsub_vv_f16m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfnmsub.nxv16f16.nxv16f16.i64(<vscale x 16 x half> [[VD:%.*]], <vscale x 16 x half> [[VS1:%.*]], <vscale x 16 x half> [[VS2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfnmsub.nxv16f16.nxv16f16.i64(<vscale x 16 x half> [[VD:%.*]], <vscale x 16 x half> [[VS1:%.*]], <vscale x 16 x half> [[VS2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfnmsub_vv_f16m4 (vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) {
@ -459,7 +459,7 @@ vfloat16m4_t test_vfnmsub_vv_f16m4 (vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m
// CHECK-RV64-LABEL: @test_vfnmsub_vf_f16m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfnmsub.nxv16f16.f16.i64(<vscale x 16 x half> [[VD:%.*]], half [[RS1:%.*]], <vscale x 16 x half> [[VS2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfnmsub.nxv16f16.f16.i64(<vscale x 16 x half> [[VD:%.*]], half [[RS1:%.*]], <vscale x 16 x half> [[VS2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 16 x half> [[TMP0]]
//
vfloat16m4_t test_vfnmsub_vf_f16m4 (vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) {
@ -468,7 +468,7 @@ vfloat16m4_t test_vfnmsub_vf_f16m4 (vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t
// CHECK-RV64-LABEL: @test_vfnmsub_vv_f16m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfnmsub.nxv32f16.nxv32f16.i64(<vscale x 32 x half> [[VD:%.*]], <vscale x 32 x half> [[VS1:%.*]], <vscale x 32 x half> [[VS2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfnmsub.nxv32f16.nxv32f16.i64(<vscale x 32 x half> [[VD:%.*]], <vscale x 32 x half> [[VS1:%.*]], <vscale x 32 x half> [[VS2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfnmsub_vv_f16m8 (vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) {
@ -477,7 +477,7 @@ vfloat16m8_t test_vfnmsub_vv_f16m8 (vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m
// CHECK-RV64-LABEL: @test_vfnmsub_vf_f16m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfnmsub.nxv32f16.f16.i64(<vscale x 32 x half> [[VD:%.*]], half [[RS1:%.*]], <vscale x 32 x half> [[VS2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfnmsub.nxv32f16.f16.i64(<vscale x 32 x half> [[VD:%.*]], half [[RS1:%.*]], <vscale x 32 x half> [[VS2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 32 x half> [[TMP0]]
//
vfloat16m8_t test_vfnmsub_vf_f16m8 (vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) {

View File

@ -8,7 +8,7 @@
// CHECK-RV64-LABEL: @test_vfwmacc_vv_f64m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwmacc.nxv1f64.nxv1f32.nxv1f32.i64(<vscale x 1 x double> [[ACC:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwmacc.nxv1f64.nxv1f32.nxv1f32.i64(<vscale x 1 x double> [[ACC:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfwmacc_vv_f64m1(vfloat64m1_t acc, vfloat32mf2_t op1,
@ -18,7 +18,7 @@ vfloat64m1_t test_vfwmacc_vv_f64m1(vfloat64m1_t acc, vfloat32mf2_t op1,
// CHECK-RV64-LABEL: @test_vfwmacc_vf_f64m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwmacc.nxv1f64.f32.nxv1f32.i64(<vscale x 1 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwmacc.nxv1f64.f32.nxv1f32.i64(<vscale x 1 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfwmacc_vf_f64m1(vfloat64m1_t acc, float op1,
@ -28,7 +28,7 @@ vfloat64m1_t test_vfwmacc_vf_f64m1(vfloat64m1_t acc, float op1,
// CHECK-RV64-LABEL: @test_vfwmacc_vv_f64m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwmacc.nxv2f64.nxv2f32.nxv2f32.i64(<vscale x 2 x double> [[ACC:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwmacc.nxv2f64.nxv2f32.nxv2f32.i64(<vscale x 2 x double> [[ACC:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfwmacc_vv_f64m2(vfloat64m2_t acc, vfloat32m1_t op1,
@ -38,7 +38,7 @@ vfloat64m2_t test_vfwmacc_vv_f64m2(vfloat64m2_t acc, vfloat32m1_t op1,
// CHECK-RV64-LABEL: @test_vfwmacc_vf_f64m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwmacc.nxv2f64.f32.nxv2f32.i64(<vscale x 2 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwmacc.nxv2f64.f32.nxv2f32.i64(<vscale x 2 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfwmacc_vf_f64m2(vfloat64m2_t acc, float op1,
@ -48,7 +48,7 @@ vfloat64m2_t test_vfwmacc_vf_f64m2(vfloat64m2_t acc, float op1,
// CHECK-RV64-LABEL: @test_vfwmacc_vv_f64m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwmacc.nxv4f64.nxv4f32.nxv4f32.i64(<vscale x 4 x double> [[ACC:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwmacc.nxv4f64.nxv4f32.nxv4f32.i64(<vscale x 4 x double> [[ACC:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfwmacc_vv_f64m4(vfloat64m4_t acc, vfloat32m2_t op1,
@ -58,7 +58,7 @@ vfloat64m4_t test_vfwmacc_vv_f64m4(vfloat64m4_t acc, vfloat32m2_t op1,
// CHECK-RV64-LABEL: @test_vfwmacc_vf_f64m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwmacc.nxv4f64.f32.nxv4f32.i64(<vscale x 4 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwmacc.nxv4f64.f32.nxv4f32.i64(<vscale x 4 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfwmacc_vf_f64m4(vfloat64m4_t acc, float op1,
@ -68,7 +68,7 @@ vfloat64m4_t test_vfwmacc_vf_f64m4(vfloat64m4_t acc, float op1,
// CHECK-RV64-LABEL: @test_vfwmacc_vv_f64m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwmacc.nxv8f64.nxv8f32.nxv8f32.i64(<vscale x 8 x double> [[ACC:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwmacc.nxv8f64.nxv8f32.nxv8f32.i64(<vscale x 8 x double> [[ACC:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfwmacc_vv_f64m8(vfloat64m8_t acc, vfloat32m4_t op1,
@ -78,7 +78,7 @@ vfloat64m8_t test_vfwmacc_vv_f64m8(vfloat64m8_t acc, vfloat32m4_t op1,
// CHECK-RV64-LABEL: @test_vfwmacc_vf_f64m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwmacc.nxv8f64.f32.nxv8f32.i64(<vscale x 8 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwmacc.nxv8f64.f32.nxv8f32.i64(<vscale x 8 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfwmacc_vf_f64m8(vfloat64m8_t acc, float op1,
@ -172,7 +172,7 @@ vfloat64m8_t test_vfwmacc_vf_f64m8_m(vbool8_t mask, vfloat64m8_t acc, float op1,
// CHECK-RV64-LABEL: @test_vfwmacc_vv_f32mf2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmacc.nxv1f32.nxv1f16.nxv1f16.i64(<vscale x 1 x float> [[VD:%.*]], <vscale x 1 x half> [[VS1:%.*]], <vscale x 1 x half> [[VS2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmacc.nxv1f32.nxv1f16.nxv1f16.i64(<vscale x 1 x float> [[VD:%.*]], <vscale x 1 x half> [[VS1:%.*]], <vscale x 1 x half> [[VS2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfwmacc_vv_f32mf2 (vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) {
@ -181,7 +181,7 @@ vfloat32mf2_t test_vfwmacc_vv_f32mf2 (vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloa
// CHECK-RV64-LABEL: @test_vfwmacc_vf_f32mf2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmacc.nxv1f32.f16.nxv1f16.i64(<vscale x 1 x float> [[VD:%.*]], half [[VS1:%.*]], <vscale x 1 x half> [[VS2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmacc.nxv1f32.f16.nxv1f16.i64(<vscale x 1 x float> [[VD:%.*]], half [[VS1:%.*]], <vscale x 1 x half> [[VS2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfwmacc_vf_f32mf2 (vfloat32mf2_t vd, _Float16 vs1, vfloat16mf4_t vs2, size_t vl) {
@ -190,7 +190,7 @@ vfloat32mf2_t test_vfwmacc_vf_f32mf2 (vfloat32mf2_t vd, _Float16 vs1, vfloat16mf
// CHECK-RV64-LABEL: @test_vfwmacc_vv_f32m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmacc.nxv2f32.nxv2f16.nxv2f16.i64(<vscale x 2 x float> [[VD:%.*]], <vscale x 2 x half> [[VS1:%.*]], <vscale x 2 x half> [[VS2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmacc.nxv2f32.nxv2f16.nxv2f16.i64(<vscale x 2 x float> [[VD:%.*]], <vscale x 2 x half> [[VS1:%.*]], <vscale x 2 x half> [[VS2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfwmacc_vv_f32m1 (vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) {
@ -199,7 +199,7 @@ vfloat32m1_t test_vfwmacc_vv_f32m1 (vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat16
// CHECK-RV64-LABEL: @test_vfwmacc_vf_f32m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmacc.nxv2f32.f16.nxv2f16.i64(<vscale x 2 x float> [[VD:%.*]], half [[VS1:%.*]], <vscale x 2 x half> [[VS2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmacc.nxv2f32.f16.nxv2f16.i64(<vscale x 2 x float> [[VD:%.*]], half [[VS1:%.*]], <vscale x 2 x half> [[VS2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfwmacc_vf_f32m1 (vfloat32m1_t vd, _Float16 vs1, vfloat16mf2_t vs2, size_t vl) {
@ -208,7 +208,7 @@ vfloat32m1_t test_vfwmacc_vf_f32m1 (vfloat32m1_t vd, _Float16 vs1, vfloat16mf2_t
// CHECK-RV64-LABEL: @test_vfwmacc_vv_f32m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmacc.nxv4f32.nxv4f16.nxv4f16.i64(<vscale x 4 x float> [[VD:%.*]], <vscale x 4 x half> [[VS1:%.*]], <vscale x 4 x half> [[VS2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmacc.nxv4f32.nxv4f16.nxv4f16.i64(<vscale x 4 x float> [[VD:%.*]], <vscale x 4 x half> [[VS1:%.*]], <vscale x 4 x half> [[VS2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfwmacc_vv_f32m2 (vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) {
@ -217,7 +217,7 @@ vfloat32m2_t test_vfwmacc_vv_f32m2 (vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16m
// CHECK-RV64-LABEL: @test_vfwmacc_vf_f32m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmacc.nxv4f32.f16.nxv4f16.i64(<vscale x 4 x float> [[VD:%.*]], half [[VS1:%.*]], <vscale x 4 x half> [[VS2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmacc.nxv4f32.f16.nxv4f16.i64(<vscale x 4 x float> [[VD:%.*]], half [[VS1:%.*]], <vscale x 4 x half> [[VS2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfwmacc_vf_f32m2 (vfloat32m2_t vd, _Float16 vs1, vfloat16m1_t vs2, size_t vl) {
@ -226,7 +226,7 @@ vfloat32m2_t test_vfwmacc_vf_f32m2 (vfloat32m2_t vd, _Float16 vs1, vfloat16m1_t
// CHECK-RV64-LABEL: @test_vfwmacc_vv_f32m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmacc.nxv8f32.nxv8f16.nxv8f16.i64(<vscale x 8 x float> [[VD:%.*]], <vscale x 8 x half> [[VS1:%.*]], <vscale x 8 x half> [[VS2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmacc.nxv8f32.nxv8f16.nxv8f16.i64(<vscale x 8 x float> [[VD:%.*]], <vscale x 8 x half> [[VS1:%.*]], <vscale x 8 x half> [[VS2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfwmacc_vv_f32m4 (vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) {
@ -235,7 +235,7 @@ vfloat32m4_t test_vfwmacc_vv_f32m4 (vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16m
// CHECK-RV64-LABEL: @test_vfwmacc_vf_f32m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmacc.nxv8f32.f16.nxv8f16.i64(<vscale x 8 x float> [[VD:%.*]], half [[VS1:%.*]], <vscale x 8 x half> [[VS2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmacc.nxv8f32.f16.nxv8f16.i64(<vscale x 8 x float> [[VD:%.*]], half [[VS1:%.*]], <vscale x 8 x half> [[VS2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfwmacc_vf_f32m4 (vfloat32m4_t vd, _Float16 vs1, vfloat16m2_t vs2, size_t vl) {
@ -244,7 +244,7 @@ vfloat32m4_t test_vfwmacc_vf_f32m4 (vfloat32m4_t vd, _Float16 vs1, vfloat16m2_t
// CHECK-RV64-LABEL: @test_vfwmacc_vv_f32m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmacc.nxv16f32.nxv16f16.nxv16f16.i64(<vscale x 16 x float> [[VD:%.*]], <vscale x 16 x half> [[VS1:%.*]], <vscale x 16 x half> [[VS2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmacc.nxv16f32.nxv16f16.nxv16f16.i64(<vscale x 16 x float> [[VD:%.*]], <vscale x 16 x half> [[VS1:%.*]], <vscale x 16 x half> [[VS2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfwmacc_vv_f32m8 (vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) {
@ -253,7 +253,7 @@ vfloat32m8_t test_vfwmacc_vv_f32m8 (vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16m
// CHECK-RV64-LABEL: @test_vfwmacc_vf_f32m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmacc.nxv16f32.f16.nxv16f16.i64(<vscale x 16 x float> [[VD:%.*]], half [[VS1:%.*]], <vscale x 16 x half> [[VS2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmacc.nxv16f32.f16.nxv16f16.i64(<vscale x 16 x float> [[VD:%.*]], half [[VS1:%.*]], <vscale x 16 x half> [[VS2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfwmacc_vf_f32m8 (vfloat32m8_t vd, _Float16 vs1, vfloat16m4_t vs2, size_t vl) {

View File

@ -8,7 +8,7 @@
// CHECK-RV64-LABEL: @test_vfwmsac_vv_f64m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwmsac.nxv1f64.nxv1f32.nxv1f32.i64(<vscale x 1 x double> [[ACC:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwmsac.nxv1f64.nxv1f32.nxv1f32.i64(<vscale x 1 x double> [[ACC:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfwmsac_vv_f64m1(vfloat64m1_t acc, vfloat32mf2_t op1,
@ -18,7 +18,7 @@ vfloat64m1_t test_vfwmsac_vv_f64m1(vfloat64m1_t acc, vfloat32mf2_t op1,
// CHECK-RV64-LABEL: @test_vfwmsac_vf_f64m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwmsac.nxv1f64.f32.nxv1f32.i64(<vscale x 1 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwmsac.nxv1f64.f32.nxv1f32.i64(<vscale x 1 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfwmsac_vf_f64m1(vfloat64m1_t acc, float op1,
@ -28,7 +28,7 @@ vfloat64m1_t test_vfwmsac_vf_f64m1(vfloat64m1_t acc, float op1,
// CHECK-RV64-LABEL: @test_vfwmsac_vv_f64m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwmsac.nxv2f64.nxv2f32.nxv2f32.i64(<vscale x 2 x double> [[ACC:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwmsac.nxv2f64.nxv2f32.nxv2f32.i64(<vscale x 2 x double> [[ACC:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfwmsac_vv_f64m2(vfloat64m2_t acc, vfloat32m1_t op1,
@ -38,7 +38,7 @@ vfloat64m2_t test_vfwmsac_vv_f64m2(vfloat64m2_t acc, vfloat32m1_t op1,
// CHECK-RV64-LABEL: @test_vfwmsac_vf_f64m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwmsac.nxv2f64.f32.nxv2f32.i64(<vscale x 2 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwmsac.nxv2f64.f32.nxv2f32.i64(<vscale x 2 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfwmsac_vf_f64m2(vfloat64m2_t acc, float op1,
@ -48,7 +48,7 @@ vfloat64m2_t test_vfwmsac_vf_f64m2(vfloat64m2_t acc, float op1,
// CHECK-RV64-LABEL: @test_vfwmsac_vv_f64m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwmsac.nxv4f64.nxv4f32.nxv4f32.i64(<vscale x 4 x double> [[ACC:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwmsac.nxv4f64.nxv4f32.nxv4f32.i64(<vscale x 4 x double> [[ACC:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfwmsac_vv_f64m4(vfloat64m4_t acc, vfloat32m2_t op1,
@ -58,7 +58,7 @@ vfloat64m4_t test_vfwmsac_vv_f64m4(vfloat64m4_t acc, vfloat32m2_t op1,
// CHECK-RV64-LABEL: @test_vfwmsac_vf_f64m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwmsac.nxv4f64.f32.nxv4f32.i64(<vscale x 4 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwmsac.nxv4f64.f32.nxv4f32.i64(<vscale x 4 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfwmsac_vf_f64m4(vfloat64m4_t acc, float op1,
@ -68,7 +68,7 @@ vfloat64m4_t test_vfwmsac_vf_f64m4(vfloat64m4_t acc, float op1,
// CHECK-RV64-LABEL: @test_vfwmsac_vv_f64m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwmsac.nxv8f64.nxv8f32.nxv8f32.i64(<vscale x 8 x double> [[ACC:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwmsac.nxv8f64.nxv8f32.nxv8f32.i64(<vscale x 8 x double> [[ACC:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfwmsac_vv_f64m8(vfloat64m8_t acc, vfloat32m4_t op1,
@ -78,7 +78,7 @@ vfloat64m8_t test_vfwmsac_vv_f64m8(vfloat64m8_t acc, vfloat32m4_t op1,
// CHECK-RV64-LABEL: @test_vfwmsac_vf_f64m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwmsac.nxv8f64.f32.nxv8f32.i64(<vscale x 8 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwmsac.nxv8f64.f32.nxv8f32.i64(<vscale x 8 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfwmsac_vf_f64m8(vfloat64m8_t acc, float op1,
@ -172,7 +172,7 @@ vfloat64m8_t test_vfwmsac_vf_f64m8_m(vbool8_t mask, vfloat64m8_t acc, float op1,
// CHECK-RV64-LABEL: @test_vfwmsac_vv_f32mf2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmsac.nxv1f32.nxv1f16.nxv1f16.i64(<vscale x 1 x float> [[VD:%.*]], <vscale x 1 x half> [[VS1:%.*]], <vscale x 1 x half> [[VS2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmsac.nxv1f32.nxv1f16.nxv1f16.i64(<vscale x 1 x float> [[VD:%.*]], <vscale x 1 x half> [[VS1:%.*]], <vscale x 1 x half> [[VS2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfwmsac_vv_f32mf2 (vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) {
@ -181,7 +181,7 @@ vfloat32mf2_t test_vfwmsac_vv_f32mf2 (vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloa
// CHECK-RV64-LABEL: @test_vfwmsac_vf_f32mf2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmsac.nxv1f32.f16.nxv1f16.i64(<vscale x 1 x float> [[VD:%.*]], half [[VS1:%.*]], <vscale x 1 x half> [[VS2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmsac.nxv1f32.f16.nxv1f16.i64(<vscale x 1 x float> [[VD:%.*]], half [[VS1:%.*]], <vscale x 1 x half> [[VS2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfwmsac_vf_f32mf2 (vfloat32mf2_t vd, _Float16 vs1, vfloat16mf4_t vs2, size_t vl) {
@ -190,7 +190,7 @@ vfloat32mf2_t test_vfwmsac_vf_f32mf2 (vfloat32mf2_t vd, _Float16 vs1, vfloat16mf
// CHECK-RV64-LABEL: @test_vfwmsac_vv_f32m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmsac.nxv2f32.nxv2f16.nxv2f16.i64(<vscale x 2 x float> [[VD:%.*]], <vscale x 2 x half> [[VS1:%.*]], <vscale x 2 x half> [[VS2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmsac.nxv2f32.nxv2f16.nxv2f16.i64(<vscale x 2 x float> [[VD:%.*]], <vscale x 2 x half> [[VS1:%.*]], <vscale x 2 x half> [[VS2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfwmsac_vv_f32m1 (vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) {
@ -199,7 +199,7 @@ vfloat32m1_t test_vfwmsac_vv_f32m1 (vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat16
// CHECK-RV64-LABEL: @test_vfwmsac_vf_f32m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmsac.nxv2f32.f16.nxv2f16.i64(<vscale x 2 x float> [[VD:%.*]], half [[VS1:%.*]], <vscale x 2 x half> [[VS2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmsac.nxv2f32.f16.nxv2f16.i64(<vscale x 2 x float> [[VD:%.*]], half [[VS1:%.*]], <vscale x 2 x half> [[VS2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfwmsac_vf_f32m1 (vfloat32m1_t vd, _Float16 vs1, vfloat16mf2_t vs2, size_t vl) {
@ -208,7 +208,7 @@ vfloat32m1_t test_vfwmsac_vf_f32m1 (vfloat32m1_t vd, _Float16 vs1, vfloat16mf2_t
// CHECK-RV64-LABEL: @test_vfwmsac_vv_f32m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmsac.nxv4f32.nxv4f16.nxv4f16.i64(<vscale x 4 x float> [[VD:%.*]], <vscale x 4 x half> [[VS1:%.*]], <vscale x 4 x half> [[VS2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmsac.nxv4f32.nxv4f16.nxv4f16.i64(<vscale x 4 x float> [[VD:%.*]], <vscale x 4 x half> [[VS1:%.*]], <vscale x 4 x half> [[VS2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfwmsac_vv_f32m2 (vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) {
@ -217,7 +217,7 @@ vfloat32m2_t test_vfwmsac_vv_f32m2 (vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16m
// CHECK-RV64-LABEL: @test_vfwmsac_vf_f32m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmsac.nxv4f32.f16.nxv4f16.i64(<vscale x 4 x float> [[VD:%.*]], half [[VS1:%.*]], <vscale x 4 x half> [[VS2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmsac.nxv4f32.f16.nxv4f16.i64(<vscale x 4 x float> [[VD:%.*]], half [[VS1:%.*]], <vscale x 4 x half> [[VS2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfwmsac_vf_f32m2 (vfloat32m2_t vd, _Float16 vs1, vfloat16m1_t vs2, size_t vl) {
@ -226,7 +226,7 @@ vfloat32m2_t test_vfwmsac_vf_f32m2 (vfloat32m2_t vd, _Float16 vs1, vfloat16m1_t
// CHECK-RV64-LABEL: @test_vfwmsac_vv_f32m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmsac.nxv8f32.nxv8f16.nxv8f16.i64(<vscale x 8 x float> [[VD:%.*]], <vscale x 8 x half> [[VS1:%.*]], <vscale x 8 x half> [[VS2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmsac.nxv8f32.nxv8f16.nxv8f16.i64(<vscale x 8 x float> [[VD:%.*]], <vscale x 8 x half> [[VS1:%.*]], <vscale x 8 x half> [[VS2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfwmsac_vv_f32m4 (vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) {
@ -235,7 +235,7 @@ vfloat32m4_t test_vfwmsac_vv_f32m4 (vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16m
// CHECK-RV64-LABEL: @test_vfwmsac_vf_f32m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmsac.nxv8f32.f16.nxv8f16.i64(<vscale x 8 x float> [[VD:%.*]], half [[VS1:%.*]], <vscale x 8 x half> [[VS2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmsac.nxv8f32.f16.nxv8f16.i64(<vscale x 8 x float> [[VD:%.*]], half [[VS1:%.*]], <vscale x 8 x half> [[VS2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfwmsac_vf_f32m4 (vfloat32m4_t vd, _Float16 vs1, vfloat16m2_t vs2, size_t vl) {
@ -244,7 +244,7 @@ vfloat32m4_t test_vfwmsac_vf_f32m4 (vfloat32m4_t vd, _Float16 vs1, vfloat16m2_t
// CHECK-RV64-LABEL: @test_vfwmsac_vv_f32m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmsac.nxv16f32.nxv16f16.nxv16f16.i64(<vscale x 16 x float> [[VD:%.*]], <vscale x 16 x half> [[VS1:%.*]], <vscale x 16 x half> [[VS2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmsac.nxv16f32.nxv16f16.nxv16f16.i64(<vscale x 16 x float> [[VD:%.*]], <vscale x 16 x half> [[VS1:%.*]], <vscale x 16 x half> [[VS2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfwmsac_vv_f32m8 (vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) {
@ -253,7 +253,7 @@ vfloat32m8_t test_vfwmsac_vv_f32m8 (vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16m
// CHECK-RV64-LABEL: @test_vfwmsac_vf_f32m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmsac.nxv16f32.f16.nxv16f16.i64(<vscale x 16 x float> [[VD:%.*]], half [[VS1:%.*]], <vscale x 16 x half> [[VS2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmsac.nxv16f32.f16.nxv16f16.i64(<vscale x 16 x float> [[VD:%.*]], half [[VS1:%.*]], <vscale x 16 x half> [[VS2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfwmsac_vf_f32m8 (vfloat32m8_t vd, _Float16 vs1, vfloat16m4_t vs2, size_t vl) {

View File

@ -8,7 +8,7 @@
// CHECK-RV64-LABEL: @test_vfwnmacc_vv_f64m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwnmacc.nxv1f64.nxv1f32.nxv1f32.i64(<vscale x 1 x double> [[ACC:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwnmacc.nxv1f64.nxv1f32.nxv1f32.i64(<vscale x 1 x double> [[ACC:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfwnmacc_vv_f64m1(vfloat64m1_t acc, vfloat32mf2_t op1,
@ -18,7 +18,7 @@ vfloat64m1_t test_vfwnmacc_vv_f64m1(vfloat64m1_t acc, vfloat32mf2_t op1,
// CHECK-RV64-LABEL: @test_vfwnmacc_vf_f64m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwnmacc.nxv1f64.f32.nxv1f32.i64(<vscale x 1 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwnmacc.nxv1f64.f32.nxv1f32.i64(<vscale x 1 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfwnmacc_vf_f64m1(vfloat64m1_t acc, float op1,
@ -28,7 +28,7 @@ vfloat64m1_t test_vfwnmacc_vf_f64m1(vfloat64m1_t acc, float op1,
// CHECK-RV64-LABEL: @test_vfwnmacc_vv_f64m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwnmacc.nxv2f64.nxv2f32.nxv2f32.i64(<vscale x 2 x double> [[ACC:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwnmacc.nxv2f64.nxv2f32.nxv2f32.i64(<vscale x 2 x double> [[ACC:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfwnmacc_vv_f64m2(vfloat64m2_t acc, vfloat32m1_t op1,
@ -38,7 +38,7 @@ vfloat64m2_t test_vfwnmacc_vv_f64m2(vfloat64m2_t acc, vfloat32m1_t op1,
// CHECK-RV64-LABEL: @test_vfwnmacc_vf_f64m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwnmacc.nxv2f64.f32.nxv2f32.i64(<vscale x 2 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwnmacc.nxv2f64.f32.nxv2f32.i64(<vscale x 2 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfwnmacc_vf_f64m2(vfloat64m2_t acc, float op1,
@ -48,7 +48,7 @@ vfloat64m2_t test_vfwnmacc_vf_f64m2(vfloat64m2_t acc, float op1,
// CHECK-RV64-LABEL: @test_vfwnmacc_vv_f64m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwnmacc.nxv4f64.nxv4f32.nxv4f32.i64(<vscale x 4 x double> [[ACC:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwnmacc.nxv4f64.nxv4f32.nxv4f32.i64(<vscale x 4 x double> [[ACC:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfwnmacc_vv_f64m4(vfloat64m4_t acc, vfloat32m2_t op1,
@ -58,7 +58,7 @@ vfloat64m4_t test_vfwnmacc_vv_f64m4(vfloat64m4_t acc, vfloat32m2_t op1,
// CHECK-RV64-LABEL: @test_vfwnmacc_vf_f64m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwnmacc.nxv4f64.f32.nxv4f32.i64(<vscale x 4 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwnmacc.nxv4f64.f32.nxv4f32.i64(<vscale x 4 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfwnmacc_vf_f64m4(vfloat64m4_t acc, float op1,
@ -68,7 +68,7 @@ vfloat64m4_t test_vfwnmacc_vf_f64m4(vfloat64m4_t acc, float op1,
// CHECK-RV64-LABEL: @test_vfwnmacc_vv_f64m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwnmacc.nxv8f64.nxv8f32.nxv8f32.i64(<vscale x 8 x double> [[ACC:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwnmacc.nxv8f64.nxv8f32.nxv8f32.i64(<vscale x 8 x double> [[ACC:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfwnmacc_vv_f64m8(vfloat64m8_t acc, vfloat32m4_t op1,
@ -78,7 +78,7 @@ vfloat64m8_t test_vfwnmacc_vv_f64m8(vfloat64m8_t acc, vfloat32m4_t op1,
// CHECK-RV64-LABEL: @test_vfwnmacc_vf_f64m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwnmacc.nxv8f64.f32.nxv8f32.i64(<vscale x 8 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwnmacc.nxv8f64.f32.nxv8f32.i64(<vscale x 8 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfwnmacc_vf_f64m8(vfloat64m8_t acc, float op1,
@ -172,7 +172,7 @@ vfloat64m8_t test_vfwnmacc_vf_f64m8_m(vbool8_t mask, vfloat64m8_t acc,
// CHECK-RV64-LABEL: @test_vfwnmacc_vv_f32mf2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmacc.nxv1f32.nxv1f16.nxv1f16.i64(<vscale x 1 x float> [[VD:%.*]], <vscale x 1 x half> [[VS1:%.*]], <vscale x 1 x half> [[VS2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmacc.nxv1f32.nxv1f16.nxv1f16.i64(<vscale x 1 x float> [[VD:%.*]], <vscale x 1 x half> [[VS1:%.*]], <vscale x 1 x half> [[VS2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfwnmacc_vv_f32mf2 (vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) {
@ -181,7 +181,7 @@ vfloat32mf2_t test_vfwnmacc_vv_f32mf2 (vfloat32mf2_t vd, vfloat16mf4_t vs1, vflo
// CHECK-RV64-LABEL: @test_vfwnmacc_vf_f32mf2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmacc.nxv1f32.f16.nxv1f16.i64(<vscale x 1 x float> [[VD:%.*]], half [[VS1:%.*]], <vscale x 1 x half> [[VS2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmacc.nxv1f32.f16.nxv1f16.i64(<vscale x 1 x float> [[VD:%.*]], half [[VS1:%.*]], <vscale x 1 x half> [[VS2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfwnmacc_vf_f32mf2 (vfloat32mf2_t vd, _Float16 vs1, vfloat16mf4_t vs2, size_t vl) {
@ -190,7 +190,7 @@ vfloat32mf2_t test_vfwnmacc_vf_f32mf2 (vfloat32mf2_t vd, _Float16 vs1, vfloat16m
// CHECK-RV64-LABEL: @test_vfwnmacc_vv_f32m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmacc.nxv2f32.nxv2f16.nxv2f16.i64(<vscale x 2 x float> [[VD:%.*]], <vscale x 2 x half> [[VS1:%.*]], <vscale x 2 x half> [[VS2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmacc.nxv2f32.nxv2f16.nxv2f16.i64(<vscale x 2 x float> [[VD:%.*]], <vscale x 2 x half> [[VS1:%.*]], <vscale x 2 x half> [[VS2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfwnmacc_vv_f32m1 (vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) {
@ -199,7 +199,7 @@ vfloat32m1_t test_vfwnmacc_vv_f32m1 (vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat1
// CHECK-RV64-LABEL: @test_vfwnmacc_vf_f32m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmacc.nxv2f32.f16.nxv2f16.i64(<vscale x 2 x float> [[VD:%.*]], half [[VS1:%.*]], <vscale x 2 x half> [[VS2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmacc.nxv2f32.f16.nxv2f16.i64(<vscale x 2 x float> [[VD:%.*]], half [[VS1:%.*]], <vscale x 2 x half> [[VS2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfwnmacc_vf_f32m1 (vfloat32m1_t vd, _Float16 vs1, vfloat16mf2_t vs2, size_t vl) {
@ -208,7 +208,7 @@ vfloat32m1_t test_vfwnmacc_vf_f32m1 (vfloat32m1_t vd, _Float16 vs1, vfloat16mf2_
// CHECK-RV64-LABEL: @test_vfwnmacc_vv_f32m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmacc.nxv4f32.nxv4f16.nxv4f16.i64(<vscale x 4 x float> [[VD:%.*]], <vscale x 4 x half> [[VS1:%.*]], <vscale x 4 x half> [[VS2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmacc.nxv4f32.nxv4f16.nxv4f16.i64(<vscale x 4 x float> [[VD:%.*]], <vscale x 4 x half> [[VS1:%.*]], <vscale x 4 x half> [[VS2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfwnmacc_vv_f32m2 (vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) {
@ -217,7 +217,7 @@ vfloat32m2_t test_vfwnmacc_vv_f32m2 (vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16
// CHECK-RV64-LABEL: @test_vfwnmacc_vf_f32m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmacc.nxv4f32.f16.nxv4f16.i64(<vscale x 4 x float> [[VD:%.*]], half [[VS1:%.*]], <vscale x 4 x half> [[VS2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmacc.nxv4f32.f16.nxv4f16.i64(<vscale x 4 x float> [[VD:%.*]], half [[VS1:%.*]], <vscale x 4 x half> [[VS2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfwnmacc_vf_f32m2 (vfloat32m2_t vd, _Float16 vs1, vfloat16m1_t vs2, size_t vl) {
@ -226,7 +226,7 @@ vfloat32m2_t test_vfwnmacc_vf_f32m2 (vfloat32m2_t vd, _Float16 vs1, vfloat16m1_t
// CHECK-RV64-LABEL: @test_vfwnmacc_vv_f32m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmacc.nxv8f32.nxv8f16.nxv8f16.i64(<vscale x 8 x float> [[VD:%.*]], <vscale x 8 x half> [[VS1:%.*]], <vscale x 8 x half> [[VS2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmacc.nxv8f32.nxv8f16.nxv8f16.i64(<vscale x 8 x float> [[VD:%.*]], <vscale x 8 x half> [[VS1:%.*]], <vscale x 8 x half> [[VS2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfwnmacc_vv_f32m4 (vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) {
@ -235,7 +235,7 @@ vfloat32m4_t test_vfwnmacc_vv_f32m4 (vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16
// CHECK-RV64-LABEL: @test_vfwnmacc_vf_f32m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmacc.nxv8f32.f16.nxv8f16.i64(<vscale x 8 x float> [[VD:%.*]], half [[VS1:%.*]], <vscale x 8 x half> [[VS2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmacc.nxv8f32.f16.nxv8f16.i64(<vscale x 8 x float> [[VD:%.*]], half [[VS1:%.*]], <vscale x 8 x half> [[VS2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfwnmacc_vf_f32m4 (vfloat32m4_t vd, _Float16 vs1, vfloat16m2_t vs2, size_t vl) {
@ -244,7 +244,7 @@ vfloat32m4_t test_vfwnmacc_vf_f32m4 (vfloat32m4_t vd, _Float16 vs1, vfloat16m2_t
// CHECK-RV64-LABEL: @test_vfwnmacc_vv_f32m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmacc.nxv16f32.nxv16f16.nxv16f16.i64(<vscale x 16 x float> [[VD:%.*]], <vscale x 16 x half> [[VS1:%.*]], <vscale x 16 x half> [[VS2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmacc.nxv16f32.nxv16f16.nxv16f16.i64(<vscale x 16 x float> [[VD:%.*]], <vscale x 16 x half> [[VS1:%.*]], <vscale x 16 x half> [[VS2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfwnmacc_vv_f32m8 (vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) {
@ -253,7 +253,7 @@ vfloat32m8_t test_vfwnmacc_vv_f32m8 (vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16
// CHECK-RV64-LABEL: @test_vfwnmacc_vf_f32m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmacc.nxv16f32.f16.nxv16f16.i64(<vscale x 16 x float> [[VD:%.*]], half [[VS1:%.*]], <vscale x 16 x half> [[VS2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmacc.nxv16f32.f16.nxv16f16.i64(<vscale x 16 x float> [[VD:%.*]], half [[VS1:%.*]], <vscale x 16 x half> [[VS2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfwnmacc_vf_f32m8 (vfloat32m8_t vd, _Float16 vs1, vfloat16m4_t vs2, size_t vl) {

View File

@ -8,7 +8,7 @@
// CHECK-RV64-LABEL: @test_vfwnmsac_vv_f64m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwnmsac.nxv1f64.nxv1f32.nxv1f32.i64(<vscale x 1 x double> [[ACC:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwnmsac.nxv1f64.nxv1f32.nxv1f32.i64(<vscale x 1 x double> [[ACC:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfwnmsac_vv_f64m1(vfloat64m1_t acc, vfloat32mf2_t op1,
@ -18,7 +18,7 @@ vfloat64m1_t test_vfwnmsac_vv_f64m1(vfloat64m1_t acc, vfloat32mf2_t op1,
// CHECK-RV64-LABEL: @test_vfwnmsac_vf_f64m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwnmsac.nxv1f64.f32.nxv1f32.i64(<vscale x 1 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwnmsac.nxv1f64.f32.nxv1f32.i64(<vscale x 1 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
//
vfloat64m1_t test_vfwnmsac_vf_f64m1(vfloat64m1_t acc, float op1,
@ -28,7 +28,7 @@ vfloat64m1_t test_vfwnmsac_vf_f64m1(vfloat64m1_t acc, float op1,
// CHECK-RV64-LABEL: @test_vfwnmsac_vv_f64m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwnmsac.nxv2f64.nxv2f32.nxv2f32.i64(<vscale x 2 x double> [[ACC:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwnmsac.nxv2f64.nxv2f32.nxv2f32.i64(<vscale x 2 x double> [[ACC:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfwnmsac_vv_f64m2(vfloat64m2_t acc, vfloat32m1_t op1,
@ -38,7 +38,7 @@ vfloat64m2_t test_vfwnmsac_vv_f64m2(vfloat64m2_t acc, vfloat32m1_t op1,
// CHECK-RV64-LABEL: @test_vfwnmsac_vf_f64m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwnmsac.nxv2f64.f32.nxv2f32.i64(<vscale x 2 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwnmsac.nxv2f64.f32.nxv2f32.i64(<vscale x 2 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
//
vfloat64m2_t test_vfwnmsac_vf_f64m2(vfloat64m2_t acc, float op1,
@ -48,7 +48,7 @@ vfloat64m2_t test_vfwnmsac_vf_f64m2(vfloat64m2_t acc, float op1,
// CHECK-RV64-LABEL: @test_vfwnmsac_vv_f64m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwnmsac.nxv4f64.nxv4f32.nxv4f32.i64(<vscale x 4 x double> [[ACC:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwnmsac.nxv4f64.nxv4f32.nxv4f32.i64(<vscale x 4 x double> [[ACC:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfwnmsac_vv_f64m4(vfloat64m4_t acc, vfloat32m2_t op1,
@ -58,7 +58,7 @@ vfloat64m4_t test_vfwnmsac_vv_f64m4(vfloat64m4_t acc, vfloat32m2_t op1,
// CHECK-RV64-LABEL: @test_vfwnmsac_vf_f64m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwnmsac.nxv4f64.f32.nxv4f32.i64(<vscale x 4 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwnmsac.nxv4f64.f32.nxv4f32.i64(<vscale x 4 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
//
vfloat64m4_t test_vfwnmsac_vf_f64m4(vfloat64m4_t acc, float op1,
@ -68,7 +68,7 @@ vfloat64m4_t test_vfwnmsac_vf_f64m4(vfloat64m4_t acc, float op1,
// CHECK-RV64-LABEL: @test_vfwnmsac_vv_f64m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwnmsac.nxv8f64.nxv8f32.nxv8f32.i64(<vscale x 8 x double> [[ACC:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwnmsac.nxv8f64.nxv8f32.nxv8f32.i64(<vscale x 8 x double> [[ACC:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfwnmsac_vv_f64m8(vfloat64m8_t acc, vfloat32m4_t op1,
@ -78,7 +78,7 @@ vfloat64m8_t test_vfwnmsac_vv_f64m8(vfloat64m8_t acc, vfloat32m4_t op1,
// CHECK-RV64-LABEL: @test_vfwnmsac_vf_f64m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwnmsac.nxv8f64.f32.nxv8f32.i64(<vscale x 8 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwnmsac.nxv8f64.f32.nxv8f32.i64(<vscale x 8 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
//
vfloat64m8_t test_vfwnmsac_vf_f64m8(vfloat64m8_t acc, float op1,
@ -172,7 +172,7 @@ vfloat64m8_t test_vfwnmsac_vf_f64m8_m(vbool8_t mask, vfloat64m8_t acc,
// CHECK-RV64-LABEL: @test_vfwnmsac_vv_f32mf2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmsac.nxv1f32.nxv1f16.nxv1f16.i64(<vscale x 1 x float> [[VD:%.*]], <vscale x 1 x half> [[VS1:%.*]], <vscale x 1 x half> [[VS2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmsac.nxv1f32.nxv1f16.nxv1f16.i64(<vscale x 1 x float> [[VD:%.*]], <vscale x 1 x half> [[VS1:%.*]], <vscale x 1 x half> [[VS2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfwnmsac_vv_f32mf2 (vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) {
@ -181,7 +181,7 @@ vfloat32mf2_t test_vfwnmsac_vv_f32mf2 (vfloat32mf2_t vd, vfloat16mf4_t vs1, vflo
// CHECK-RV64-LABEL: @test_vfwnmsac_vf_f32mf2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmsac.nxv1f32.f16.nxv1f16.i64(<vscale x 1 x float> [[VD:%.*]], half [[VS1:%.*]], <vscale x 1 x half> [[VS2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmsac.nxv1f32.f16.nxv1f16.i64(<vscale x 1 x float> [[VD:%.*]], half [[VS1:%.*]], <vscale x 1 x half> [[VS2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
//
vfloat32mf2_t test_vfwnmsac_vf_f32mf2 (vfloat32mf2_t vd, _Float16 vs1, vfloat16mf4_t vs2, size_t vl) {
@ -190,7 +190,7 @@ vfloat32mf2_t test_vfwnmsac_vf_f32mf2 (vfloat32mf2_t vd, _Float16 vs1, vfloat16m
// CHECK-RV64-LABEL: @test_vfwnmsac_vv_f32m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmsac.nxv2f32.nxv2f16.nxv2f16.i64(<vscale x 2 x float> [[VD:%.*]], <vscale x 2 x half> [[VS1:%.*]], <vscale x 2 x half> [[VS2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmsac.nxv2f32.nxv2f16.nxv2f16.i64(<vscale x 2 x float> [[VD:%.*]], <vscale x 2 x half> [[VS1:%.*]], <vscale x 2 x half> [[VS2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfwnmsac_vv_f32m1 (vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) {
@ -199,7 +199,7 @@ vfloat32m1_t test_vfwnmsac_vv_f32m1 (vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat1
// CHECK-RV64-LABEL: @test_vfwnmsac_vf_f32m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmsac.nxv2f32.f16.nxv2f16.i64(<vscale x 2 x float> [[VD:%.*]], half [[VS1:%.*]], <vscale x 2 x half> [[VS2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmsac.nxv2f32.f16.nxv2f16.i64(<vscale x 2 x float> [[VD:%.*]], half [[VS1:%.*]], <vscale x 2 x half> [[VS2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
//
vfloat32m1_t test_vfwnmsac_vf_f32m1 (vfloat32m1_t vd, _Float16 vs1, vfloat16mf2_t vs2, size_t vl) {
@ -208,7 +208,7 @@ vfloat32m1_t test_vfwnmsac_vf_f32m1 (vfloat32m1_t vd, _Float16 vs1, vfloat16mf2_
// CHECK-RV64-LABEL: @test_vfwnmsac_vv_f32m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmsac.nxv4f32.nxv4f16.nxv4f16.i64(<vscale x 4 x float> [[VD:%.*]], <vscale x 4 x half> [[VS1:%.*]], <vscale x 4 x half> [[VS2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmsac.nxv4f32.nxv4f16.nxv4f16.i64(<vscale x 4 x float> [[VD:%.*]], <vscale x 4 x half> [[VS1:%.*]], <vscale x 4 x half> [[VS2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfwnmsac_vv_f32m2 (vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) {
@ -217,7 +217,7 @@ vfloat32m2_t test_vfwnmsac_vv_f32m2 (vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16
// CHECK-RV64-LABEL: @test_vfwnmsac_vf_f32m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmsac.nxv4f32.f16.nxv4f16.i64(<vscale x 4 x float> [[VD:%.*]], half [[VS1:%.*]], <vscale x 4 x half> [[VS2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmsac.nxv4f32.f16.nxv4f16.i64(<vscale x 4 x float> [[VD:%.*]], half [[VS1:%.*]], <vscale x 4 x half> [[VS2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
//
vfloat32m2_t test_vfwnmsac_vf_f32m2 (vfloat32m2_t vd, _Float16 vs1, vfloat16m1_t vs2, size_t vl) {
@ -226,7 +226,7 @@ vfloat32m2_t test_vfwnmsac_vf_f32m2 (vfloat32m2_t vd, _Float16 vs1, vfloat16m1_t
// CHECK-RV64-LABEL: @test_vfwnmsac_vv_f32m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmsac.nxv8f32.nxv8f16.nxv8f16.i64(<vscale x 8 x float> [[VD:%.*]], <vscale x 8 x half> [[VS1:%.*]], <vscale x 8 x half> [[VS2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmsac.nxv8f32.nxv8f16.nxv8f16.i64(<vscale x 8 x float> [[VD:%.*]], <vscale x 8 x half> [[VS1:%.*]], <vscale x 8 x half> [[VS2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfwnmsac_vv_f32m4 (vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) {
@ -235,7 +235,7 @@ vfloat32m4_t test_vfwnmsac_vv_f32m4 (vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16
// CHECK-RV64-LABEL: @test_vfwnmsac_vf_f32m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmsac.nxv8f32.f16.nxv8f16.i64(<vscale x 8 x float> [[VD:%.*]], half [[VS1:%.*]], <vscale x 8 x half> [[VS2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmsac.nxv8f32.f16.nxv8f16.i64(<vscale x 8 x float> [[VD:%.*]], half [[VS1:%.*]], <vscale x 8 x half> [[VS2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
//
vfloat32m4_t test_vfwnmsac_vf_f32m4 (vfloat32m4_t vd, _Float16 vs1, vfloat16m2_t vs2, size_t vl) {
@ -244,7 +244,7 @@ vfloat32m4_t test_vfwnmsac_vf_f32m4 (vfloat32m4_t vd, _Float16 vs1, vfloat16m2_t
// CHECK-RV64-LABEL: @test_vfwnmsac_vv_f32m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmsac.nxv16f32.nxv16f16.nxv16f16.i64(<vscale x 16 x float> [[VD:%.*]], <vscale x 16 x half> [[VS1:%.*]], <vscale x 16 x half> [[VS2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmsac.nxv16f32.nxv16f16.nxv16f16.i64(<vscale x 16 x float> [[VD:%.*]], <vscale x 16 x half> [[VS1:%.*]], <vscale x 16 x half> [[VS2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfwnmsac_vv_f32m8 (vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) {
@ -253,7 +253,7 @@ vfloat32m8_t test_vfwnmsac_vv_f32m8 (vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16
// CHECK-RV64-LABEL: @test_vfwnmsac_vf_f32m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmsac.nxv16f32.f16.nxv16f16.i64(<vscale x 16 x float> [[VD:%.*]], half [[VS1:%.*]], <vscale x 16 x half> [[VS2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmsac.nxv16f32.f16.nxv16f16.i64(<vscale x 16 x float> [[VD:%.*]], half [[VS1:%.*]], <vscale x 16 x half> [[VS2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
//
vfloat32m8_t test_vfwnmsac_vf_f32m8 (vfloat32m8_t vd, _Float16 vs1, vfloat16m4_t vs2, size_t vl) {

View File

@ -6,7 +6,7 @@
// CHECK-RV64-LABEL: @test_vmacc_vv_i8mf8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmacc.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmacc.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vmacc_vv_i8mf8(vint8mf8_t acc, vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
@ -15,7 +15,7 @@ vint8mf8_t test_vmacc_vv_i8mf8(vint8mf8_t acc, vint8mf8_t op1, vint8mf8_t op2, s
// CHECK-RV64-LABEL: @test_vmacc_vx_i8mf8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmacc.nxv1i8.i8.i64(<vscale x 1 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmacc.nxv1i8.i8.i64(<vscale x 1 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vmacc_vx_i8mf8(vint8mf8_t acc, int8_t op1, vint8mf8_t op2, size_t vl) {
@ -24,7 +24,7 @@ vint8mf8_t test_vmacc_vx_i8mf8(vint8mf8_t acc, int8_t op1, vint8mf8_t op2, size_
// CHECK-RV64-LABEL: @test_vmacc_vv_i8mf4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmacc.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmacc.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vmacc_vv_i8mf4(vint8mf4_t acc, vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
@ -33,7 +33,7 @@ vint8mf4_t test_vmacc_vv_i8mf4(vint8mf4_t acc, vint8mf4_t op1, vint8mf4_t op2, s
// CHECK-RV64-LABEL: @test_vmacc_vx_i8mf4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmacc.nxv2i8.i8.i64(<vscale x 2 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmacc.nxv2i8.i8.i64(<vscale x 2 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vmacc_vx_i8mf4(vint8mf4_t acc, int8_t op1, vint8mf4_t op2, size_t vl) {
@ -42,7 +42,7 @@ vint8mf4_t test_vmacc_vx_i8mf4(vint8mf4_t acc, int8_t op1, vint8mf4_t op2, size_
// CHECK-RV64-LABEL: @test_vmacc_vv_i8mf2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmacc.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmacc.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vmacc_vv_i8mf2(vint8mf2_t acc, vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
@ -51,7 +51,7 @@ vint8mf2_t test_vmacc_vv_i8mf2(vint8mf2_t acc, vint8mf2_t op1, vint8mf2_t op2, s
// CHECK-RV64-LABEL: @test_vmacc_vx_i8mf2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmacc.nxv4i8.i8.i64(<vscale x 4 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmacc.nxv4i8.i8.i64(<vscale x 4 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vmacc_vx_i8mf2(vint8mf2_t acc, int8_t op1, vint8mf2_t op2, size_t vl) {
@ -60,7 +60,7 @@ vint8mf2_t test_vmacc_vx_i8mf2(vint8mf2_t acc, int8_t op1, vint8mf2_t op2, size_
// CHECK-RV64-LABEL: @test_vmacc_vv_i8m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmacc.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmacc.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vmacc_vv_i8m1(vint8m1_t acc, vint8m1_t op1, vint8m1_t op2, size_t vl) {
@ -69,7 +69,7 @@ vint8m1_t test_vmacc_vv_i8m1(vint8m1_t acc, vint8m1_t op1, vint8m1_t op2, size_t
// CHECK-RV64-LABEL: @test_vmacc_vx_i8m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmacc.nxv8i8.i8.i64(<vscale x 8 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmacc.nxv8i8.i8.i64(<vscale x 8 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vmacc_vx_i8m1(vint8m1_t acc, int8_t op1, vint8m1_t op2, size_t vl) {
@ -78,7 +78,7 @@ vint8m1_t test_vmacc_vx_i8m1(vint8m1_t acc, int8_t op1, vint8m1_t op2, size_t vl
// CHECK-RV64-LABEL: @test_vmacc_vv_i8m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmacc.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmacc.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vmacc_vv_i8m2(vint8m2_t acc, vint8m2_t op1, vint8m2_t op2, size_t vl) {
@ -87,7 +87,7 @@ vint8m2_t test_vmacc_vv_i8m2(vint8m2_t acc, vint8m2_t op1, vint8m2_t op2, size_t
// CHECK-RV64-LABEL: @test_vmacc_vx_i8m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmacc.nxv16i8.i8.i64(<vscale x 16 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmacc.nxv16i8.i8.i64(<vscale x 16 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vmacc_vx_i8m2(vint8m2_t acc, int8_t op1, vint8m2_t op2, size_t vl) {
@ -96,7 +96,7 @@ vint8m2_t test_vmacc_vx_i8m2(vint8m2_t acc, int8_t op1, vint8m2_t op2, size_t vl
// CHECK-RV64-LABEL: @test_vmacc_vv_i8m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmacc.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmacc.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vmacc_vv_i8m4(vint8m4_t acc, vint8m4_t op1, vint8m4_t op2, size_t vl) {
@ -105,7 +105,7 @@ vint8m4_t test_vmacc_vv_i8m4(vint8m4_t acc, vint8m4_t op1, vint8m4_t op2, size_t
// CHECK-RV64-LABEL: @test_vmacc_vx_i8m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmacc.nxv32i8.i8.i64(<vscale x 32 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmacc.nxv32i8.i8.i64(<vscale x 32 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vmacc_vx_i8m4(vint8m4_t acc, int8_t op1, vint8m4_t op2, size_t vl) {
@ -114,7 +114,7 @@ vint8m4_t test_vmacc_vx_i8m4(vint8m4_t acc, int8_t op1, vint8m4_t op2, size_t vl
// CHECK-RV64-LABEL: @test_vmacc_vv_i8m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmacc.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[ACC:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmacc.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[ACC:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vmacc_vv_i8m8(vint8m8_t acc, vint8m8_t op1, vint8m8_t op2, size_t vl) {
@ -123,7 +123,7 @@ vint8m8_t test_vmacc_vv_i8m8(vint8m8_t acc, vint8m8_t op1, vint8m8_t op2, size_t
// CHECK-RV64-LABEL: @test_vmacc_vx_i8m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmacc.nxv64i8.i8.i64(<vscale x 64 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmacc.nxv64i8.i8.i64(<vscale x 64 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vmacc_vx_i8m8(vint8m8_t acc, int8_t op1, vint8m8_t op2, size_t vl) {
@ -132,7 +132,7 @@ vint8m8_t test_vmacc_vx_i8m8(vint8m8_t acc, int8_t op1, vint8m8_t op2, size_t vl
// CHECK-RV64-LABEL: @test_vmacc_vv_i16mf4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmacc.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmacc.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vmacc_vv_i16mf4(vint16mf4_t acc, vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
@ -141,7 +141,7 @@ vint16mf4_t test_vmacc_vv_i16mf4(vint16mf4_t acc, vint16mf4_t op1, vint16mf4_t o
// CHECK-RV64-LABEL: @test_vmacc_vx_i16mf4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmacc.nxv1i16.i16.i64(<vscale x 1 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmacc.nxv1i16.i16.i64(<vscale x 1 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vmacc_vx_i16mf4(vint16mf4_t acc, int16_t op1, vint16mf4_t op2, size_t vl) {
@ -150,7 +150,7 @@ vint16mf4_t test_vmacc_vx_i16mf4(vint16mf4_t acc, int16_t op1, vint16mf4_t op2,
// CHECK-RV64-LABEL: @test_vmacc_vv_i16mf2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmacc.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmacc.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vmacc_vv_i16mf2(vint16mf2_t acc, vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
@ -159,7 +159,7 @@ vint16mf2_t test_vmacc_vv_i16mf2(vint16mf2_t acc, vint16mf2_t op1, vint16mf2_t o
// CHECK-RV64-LABEL: @test_vmacc_vx_i16mf2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmacc.nxv2i16.i16.i64(<vscale x 2 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmacc.nxv2i16.i16.i64(<vscale x 2 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vmacc_vx_i16mf2(vint16mf2_t acc, int16_t op1, vint16mf2_t op2, size_t vl) {
@ -168,7 +168,7 @@ vint16mf2_t test_vmacc_vx_i16mf2(vint16mf2_t acc, int16_t op1, vint16mf2_t op2,
// CHECK-RV64-LABEL: @test_vmacc_vv_i16m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmacc.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmacc.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vmacc_vv_i16m1(vint16m1_t acc, vint16m1_t op1, vint16m1_t op2, size_t vl) {
@ -177,7 +177,7 @@ vint16m1_t test_vmacc_vv_i16m1(vint16m1_t acc, vint16m1_t op1, vint16m1_t op2, s
// CHECK-RV64-LABEL: @test_vmacc_vx_i16m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmacc.nxv4i16.i16.i64(<vscale x 4 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmacc.nxv4i16.i16.i64(<vscale x 4 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vmacc_vx_i16m1(vint16m1_t acc, int16_t op1, vint16m1_t op2, size_t vl) {
@ -186,7 +186,7 @@ vint16m1_t test_vmacc_vx_i16m1(vint16m1_t acc, int16_t op1, vint16m1_t op2, size
// CHECK-RV64-LABEL: @test_vmacc_vv_i16m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmacc.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmacc.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vmacc_vv_i16m2(vint16m2_t acc, vint16m2_t op1, vint16m2_t op2, size_t vl) {
@ -195,7 +195,7 @@ vint16m2_t test_vmacc_vv_i16m2(vint16m2_t acc, vint16m2_t op1, vint16m2_t op2, s
// CHECK-RV64-LABEL: @test_vmacc_vx_i16m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmacc.nxv8i16.i16.i64(<vscale x 8 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmacc.nxv8i16.i16.i64(<vscale x 8 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vmacc_vx_i16m2(vint16m2_t acc, int16_t op1, vint16m2_t op2, size_t vl) {
@ -204,7 +204,7 @@ vint16m2_t test_vmacc_vx_i16m2(vint16m2_t acc, int16_t op1, vint16m2_t op2, size
// CHECK-RV64-LABEL: @test_vmacc_vv_i16m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmacc.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmacc.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vmacc_vv_i16m4(vint16m4_t acc, vint16m4_t op1, vint16m4_t op2, size_t vl) {
@ -213,7 +213,7 @@ vint16m4_t test_vmacc_vv_i16m4(vint16m4_t acc, vint16m4_t op1, vint16m4_t op2, s
// CHECK-RV64-LABEL: @test_vmacc_vx_i16m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmacc.nxv16i16.i16.i64(<vscale x 16 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmacc.nxv16i16.i16.i64(<vscale x 16 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vmacc_vx_i16m4(vint16m4_t acc, int16_t op1, vint16m4_t op2, size_t vl) {
@ -222,7 +222,7 @@ vint16m4_t test_vmacc_vx_i16m4(vint16m4_t acc, int16_t op1, vint16m4_t op2, size
// CHECK-RV64-LABEL: @test_vmacc_vv_i16m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmacc.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmacc.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vmacc_vv_i16m8(vint16m8_t acc, vint16m8_t op1, vint16m8_t op2, size_t vl) {
@ -231,7 +231,7 @@ vint16m8_t test_vmacc_vv_i16m8(vint16m8_t acc, vint16m8_t op1, vint16m8_t op2, s
// CHECK-RV64-LABEL: @test_vmacc_vx_i16m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmacc.nxv32i16.i16.i64(<vscale x 32 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmacc.nxv32i16.i16.i64(<vscale x 32 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vmacc_vx_i16m8(vint16m8_t acc, int16_t op1, vint16m8_t op2, size_t vl) {
@ -240,7 +240,7 @@ vint16m8_t test_vmacc_vx_i16m8(vint16m8_t acc, int16_t op1, vint16m8_t op2, size
// CHECK-RV64-LABEL: @test_vmacc_vv_i32mf2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmacc.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmacc.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vmacc_vv_i32mf2(vint32mf2_t acc, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
@ -249,7 +249,7 @@ vint32mf2_t test_vmacc_vv_i32mf2(vint32mf2_t acc, vint32mf2_t op1, vint32mf2_t o
// CHECK-RV64-LABEL: @test_vmacc_vx_i32mf2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmacc.nxv1i32.i32.i64(<vscale x 1 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmacc.nxv1i32.i32.i64(<vscale x 1 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vmacc_vx_i32mf2(vint32mf2_t acc, int32_t op1, vint32mf2_t op2, size_t vl) {
@ -258,7 +258,7 @@ vint32mf2_t test_vmacc_vx_i32mf2(vint32mf2_t acc, int32_t op1, vint32mf2_t op2,
// CHECK-RV64-LABEL: @test_vmacc_vv_i32m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmacc.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmacc.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vmacc_vv_i32m1(vint32m1_t acc, vint32m1_t op1, vint32m1_t op2, size_t vl) {
@ -267,7 +267,7 @@ vint32m1_t test_vmacc_vv_i32m1(vint32m1_t acc, vint32m1_t op1, vint32m1_t op2, s
// CHECK-RV64-LABEL: @test_vmacc_vx_i32m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmacc.nxv2i32.i32.i64(<vscale x 2 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmacc.nxv2i32.i32.i64(<vscale x 2 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vmacc_vx_i32m1(vint32m1_t acc, int32_t op1, vint32m1_t op2, size_t vl) {
@ -276,7 +276,7 @@ vint32m1_t test_vmacc_vx_i32m1(vint32m1_t acc, int32_t op1, vint32m1_t op2, size
// CHECK-RV64-LABEL: @test_vmacc_vv_i32m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmacc.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmacc.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vmacc_vv_i32m2(vint32m2_t acc, vint32m2_t op1, vint32m2_t op2, size_t vl) {
@ -285,7 +285,7 @@ vint32m2_t test_vmacc_vv_i32m2(vint32m2_t acc, vint32m2_t op1, vint32m2_t op2, s
// CHECK-RV64-LABEL: @test_vmacc_vx_i32m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmacc.nxv4i32.i32.i64(<vscale x 4 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmacc.nxv4i32.i32.i64(<vscale x 4 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vmacc_vx_i32m2(vint32m2_t acc, int32_t op1, vint32m2_t op2, size_t vl) {
@ -294,7 +294,7 @@ vint32m2_t test_vmacc_vx_i32m2(vint32m2_t acc, int32_t op1, vint32m2_t op2, size
// CHECK-RV64-LABEL: @test_vmacc_vv_i32m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmacc.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmacc.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vmacc_vv_i32m4(vint32m4_t acc, vint32m4_t op1, vint32m4_t op2, size_t vl) {
@ -303,7 +303,7 @@ vint32m4_t test_vmacc_vv_i32m4(vint32m4_t acc, vint32m4_t op1, vint32m4_t op2, s
// CHECK-RV64-LABEL: @test_vmacc_vx_i32m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmacc.nxv8i32.i32.i64(<vscale x 8 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmacc.nxv8i32.i32.i64(<vscale x 8 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vmacc_vx_i32m4(vint32m4_t acc, int32_t op1, vint32m4_t op2, size_t vl) {
@ -312,7 +312,7 @@ vint32m4_t test_vmacc_vx_i32m4(vint32m4_t acc, int32_t op1, vint32m4_t op2, size
// CHECK-RV64-LABEL: @test_vmacc_vv_i32m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmacc.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmacc.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vmacc_vv_i32m8(vint32m8_t acc, vint32m8_t op1, vint32m8_t op2, size_t vl) {
@ -321,7 +321,7 @@ vint32m8_t test_vmacc_vv_i32m8(vint32m8_t acc, vint32m8_t op1, vint32m8_t op2, s
// CHECK-RV64-LABEL: @test_vmacc_vx_i32m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmacc.nxv16i32.i32.i64(<vscale x 16 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmacc.nxv16i32.i32.i64(<vscale x 16 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vmacc_vx_i32m8(vint32m8_t acc, int32_t op1, vint32m8_t op2, size_t vl) {
@ -330,7 +330,7 @@ vint32m8_t test_vmacc_vx_i32m8(vint32m8_t acc, int32_t op1, vint32m8_t op2, size
// CHECK-RV64-LABEL: @test_vmacc_vv_i64m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmacc.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmacc.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vmacc_vv_i64m1(vint64m1_t acc, vint64m1_t op1, vint64m1_t op2, size_t vl) {
@ -339,7 +339,7 @@ vint64m1_t test_vmacc_vv_i64m1(vint64m1_t acc, vint64m1_t op1, vint64m1_t op2, s
// CHECK-RV64-LABEL: @test_vmacc_vx_i64m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmacc.nxv1i64.i64.i64(<vscale x 1 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmacc.nxv1i64.i64.i64(<vscale x 1 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vmacc_vx_i64m1(vint64m1_t acc, int64_t op1, vint64m1_t op2, size_t vl) {
@ -348,7 +348,7 @@ vint64m1_t test_vmacc_vx_i64m1(vint64m1_t acc, int64_t op1, vint64m1_t op2, size
// CHECK-RV64-LABEL: @test_vmacc_vv_i64m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmacc.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmacc.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vmacc_vv_i64m2(vint64m2_t acc, vint64m2_t op1, vint64m2_t op2, size_t vl) {
@ -357,7 +357,7 @@ vint64m2_t test_vmacc_vv_i64m2(vint64m2_t acc, vint64m2_t op1, vint64m2_t op2, s
// CHECK-RV64-LABEL: @test_vmacc_vx_i64m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmacc.nxv2i64.i64.i64(<vscale x 2 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmacc.nxv2i64.i64.i64(<vscale x 2 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vmacc_vx_i64m2(vint64m2_t acc, int64_t op1, vint64m2_t op2, size_t vl) {
@ -366,7 +366,7 @@ vint64m2_t test_vmacc_vx_i64m2(vint64m2_t acc, int64_t op1, vint64m2_t op2, size
// CHECK-RV64-LABEL: @test_vmacc_vv_i64m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmacc.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmacc.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vmacc_vv_i64m4(vint64m4_t acc, vint64m4_t op1, vint64m4_t op2, size_t vl) {
@ -375,7 +375,7 @@ vint64m4_t test_vmacc_vv_i64m4(vint64m4_t acc, vint64m4_t op1, vint64m4_t op2, s
// CHECK-RV64-LABEL: @test_vmacc_vx_i64m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmacc.nxv4i64.i64.i64(<vscale x 4 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmacc.nxv4i64.i64.i64(<vscale x 4 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vmacc_vx_i64m4(vint64m4_t acc, int64_t op1, vint64m4_t op2, size_t vl) {
@ -384,7 +384,7 @@ vint64m4_t test_vmacc_vx_i64m4(vint64m4_t acc, int64_t op1, vint64m4_t op2, size
// CHECK-RV64-LABEL: @test_vmacc_vv_i64m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmacc.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmacc.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vmacc_vv_i64m8(vint64m8_t acc, vint64m8_t op1, vint64m8_t op2, size_t vl) {
@ -393,7 +393,7 @@ vint64m8_t test_vmacc_vv_i64m8(vint64m8_t acc, vint64m8_t op1, vint64m8_t op2, s
// CHECK-RV64-LABEL: @test_vmacc_vx_i64m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmacc.nxv8i64.i64.i64(<vscale x 8 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmacc.nxv8i64.i64.i64(<vscale x 8 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vmacc_vx_i64m8(vint64m8_t acc, int64_t op1, vint64m8_t op2, size_t vl) {
@ -402,7 +402,7 @@ vint64m8_t test_vmacc_vx_i64m8(vint64m8_t acc, int64_t op1, vint64m8_t op2, size
// CHECK-RV64-LABEL: @test_vmacc_vv_u8mf8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmacc.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmacc.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vmacc_vv_u8mf8(vuint8mf8_t acc, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
@ -411,7 +411,7 @@ vuint8mf8_t test_vmacc_vv_u8mf8(vuint8mf8_t acc, vuint8mf8_t op1, vuint8mf8_t op
// CHECK-RV64-LABEL: @test_vmacc_vx_u8mf8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmacc.nxv1i8.i8.i64(<vscale x 1 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmacc.nxv1i8.i8.i64(<vscale x 1 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vmacc_vx_u8mf8(vuint8mf8_t acc, uint8_t op1, vuint8mf8_t op2, size_t vl) {
@ -420,7 +420,7 @@ vuint8mf8_t test_vmacc_vx_u8mf8(vuint8mf8_t acc, uint8_t op1, vuint8mf8_t op2, s
// CHECK-RV64-LABEL: @test_vmacc_vv_u8mf4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmacc.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmacc.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vmacc_vv_u8mf4(vuint8mf4_t acc, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
@ -429,7 +429,7 @@ vuint8mf4_t test_vmacc_vv_u8mf4(vuint8mf4_t acc, vuint8mf4_t op1, vuint8mf4_t op
// CHECK-RV64-LABEL: @test_vmacc_vx_u8mf4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmacc.nxv2i8.i8.i64(<vscale x 2 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmacc.nxv2i8.i8.i64(<vscale x 2 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vmacc_vx_u8mf4(vuint8mf4_t acc, uint8_t op1, vuint8mf4_t op2, size_t vl) {
@ -438,7 +438,7 @@ vuint8mf4_t test_vmacc_vx_u8mf4(vuint8mf4_t acc, uint8_t op1, vuint8mf4_t op2, s
// CHECK-RV64-LABEL: @test_vmacc_vv_u8mf2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmacc.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmacc.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vmacc_vv_u8mf2(vuint8mf2_t acc, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
@ -447,7 +447,7 @@ vuint8mf2_t test_vmacc_vv_u8mf2(vuint8mf2_t acc, vuint8mf2_t op1, vuint8mf2_t op
// CHECK-RV64-LABEL: @test_vmacc_vx_u8mf2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmacc.nxv4i8.i8.i64(<vscale x 4 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmacc.nxv4i8.i8.i64(<vscale x 4 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vmacc_vx_u8mf2(vuint8mf2_t acc, uint8_t op1, vuint8mf2_t op2, size_t vl) {
@ -456,7 +456,7 @@ vuint8mf2_t test_vmacc_vx_u8mf2(vuint8mf2_t acc, uint8_t op1, vuint8mf2_t op2, s
// CHECK-RV64-LABEL: @test_vmacc_vv_u8m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmacc.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmacc.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vmacc_vv_u8m1(vuint8m1_t acc, vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
@ -465,7 +465,7 @@ vuint8m1_t test_vmacc_vv_u8m1(vuint8m1_t acc, vuint8m1_t op1, vuint8m1_t op2, si
// CHECK-RV64-LABEL: @test_vmacc_vx_u8m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmacc.nxv8i8.i8.i64(<vscale x 8 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmacc.nxv8i8.i8.i64(<vscale x 8 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vmacc_vx_u8m1(vuint8m1_t acc, uint8_t op1, vuint8m1_t op2, size_t vl) {
@ -474,7 +474,7 @@ vuint8m1_t test_vmacc_vx_u8m1(vuint8m1_t acc, uint8_t op1, vuint8m1_t op2, size_
// CHECK-RV64-LABEL: @test_vmacc_vv_u8m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmacc.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmacc.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vmacc_vv_u8m2(vuint8m2_t acc, vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
@ -483,7 +483,7 @@ vuint8m2_t test_vmacc_vv_u8m2(vuint8m2_t acc, vuint8m2_t op1, vuint8m2_t op2, si
// CHECK-RV64-LABEL: @test_vmacc_vx_u8m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmacc.nxv16i8.i8.i64(<vscale x 16 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmacc.nxv16i8.i8.i64(<vscale x 16 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vmacc_vx_u8m2(vuint8m2_t acc, uint8_t op1, vuint8m2_t op2, size_t vl) {
@ -492,7 +492,7 @@ vuint8m2_t test_vmacc_vx_u8m2(vuint8m2_t acc, uint8_t op1, vuint8m2_t op2, size_
// CHECK-RV64-LABEL: @test_vmacc_vv_u8m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmacc.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmacc.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vmacc_vv_u8m4(vuint8m4_t acc, vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
@ -501,7 +501,7 @@ vuint8m4_t test_vmacc_vv_u8m4(vuint8m4_t acc, vuint8m4_t op1, vuint8m4_t op2, si
// CHECK-RV64-LABEL: @test_vmacc_vx_u8m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmacc.nxv32i8.i8.i64(<vscale x 32 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmacc.nxv32i8.i8.i64(<vscale x 32 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vmacc_vx_u8m4(vuint8m4_t acc, uint8_t op1, vuint8m4_t op2, size_t vl) {
@ -510,7 +510,7 @@ vuint8m4_t test_vmacc_vx_u8m4(vuint8m4_t acc, uint8_t op1, vuint8m4_t op2, size_
// CHECK-RV64-LABEL: @test_vmacc_vv_u8m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmacc.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[ACC:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmacc.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[ACC:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vmacc_vv_u8m8(vuint8m8_t acc, vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
@ -519,7 +519,7 @@ vuint8m8_t test_vmacc_vv_u8m8(vuint8m8_t acc, vuint8m8_t op1, vuint8m8_t op2, si
// CHECK-RV64-LABEL: @test_vmacc_vx_u8m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmacc.nxv64i8.i8.i64(<vscale x 64 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmacc.nxv64i8.i8.i64(<vscale x 64 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vmacc_vx_u8m8(vuint8m8_t acc, uint8_t op1, vuint8m8_t op2, size_t vl) {
@ -528,7 +528,7 @@ vuint8m8_t test_vmacc_vx_u8m8(vuint8m8_t acc, uint8_t op1, vuint8m8_t op2, size_
// CHECK-RV64-LABEL: @test_vmacc_vv_u16mf4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmacc.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmacc.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vmacc_vv_u16mf4(vuint16mf4_t acc, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
@ -537,7 +537,7 @@ vuint16mf4_t test_vmacc_vv_u16mf4(vuint16mf4_t acc, vuint16mf4_t op1, vuint16mf4
// CHECK-RV64-LABEL: @test_vmacc_vx_u16mf4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmacc.nxv1i16.i16.i64(<vscale x 1 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmacc.nxv1i16.i16.i64(<vscale x 1 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vmacc_vx_u16mf4(vuint16mf4_t acc, uint16_t op1, vuint16mf4_t op2, size_t vl) {
@ -546,7 +546,7 @@ vuint16mf4_t test_vmacc_vx_u16mf4(vuint16mf4_t acc, uint16_t op1, vuint16mf4_t o
// CHECK-RV64-LABEL: @test_vmacc_vv_u16mf2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmacc.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmacc.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vmacc_vv_u16mf2(vuint16mf2_t acc, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
@ -555,7 +555,7 @@ vuint16mf2_t test_vmacc_vv_u16mf2(vuint16mf2_t acc, vuint16mf2_t op1, vuint16mf2
// CHECK-RV64-LABEL: @test_vmacc_vx_u16mf2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmacc.nxv2i16.i16.i64(<vscale x 2 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmacc.nxv2i16.i16.i64(<vscale x 2 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vmacc_vx_u16mf2(vuint16mf2_t acc, uint16_t op1, vuint16mf2_t op2, size_t vl) {
@ -564,7 +564,7 @@ vuint16mf2_t test_vmacc_vx_u16mf2(vuint16mf2_t acc, uint16_t op1, vuint16mf2_t o
// CHECK-RV64-LABEL: @test_vmacc_vv_u16m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmacc.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmacc.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vmacc_vv_u16m1(vuint16m1_t acc, vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
@ -573,7 +573,7 @@ vuint16m1_t test_vmacc_vv_u16m1(vuint16m1_t acc, vuint16m1_t op1, vuint16m1_t op
// CHECK-RV64-LABEL: @test_vmacc_vx_u16m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmacc.nxv4i16.i16.i64(<vscale x 4 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmacc.nxv4i16.i16.i64(<vscale x 4 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vmacc_vx_u16m1(vuint16m1_t acc, uint16_t op1, vuint16m1_t op2, size_t vl) {
@ -582,7 +582,7 @@ vuint16m1_t test_vmacc_vx_u16m1(vuint16m1_t acc, uint16_t op1, vuint16m1_t op2,
// CHECK-RV64-LABEL: @test_vmacc_vv_u16m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmacc.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmacc.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vmacc_vv_u16m2(vuint16m2_t acc, vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
@ -591,7 +591,7 @@ vuint16m2_t test_vmacc_vv_u16m2(vuint16m2_t acc, vuint16m2_t op1, vuint16m2_t op
// CHECK-RV64-LABEL: @test_vmacc_vx_u16m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmacc.nxv8i16.i16.i64(<vscale x 8 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmacc.nxv8i16.i16.i64(<vscale x 8 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vmacc_vx_u16m2(vuint16m2_t acc, uint16_t op1, vuint16m2_t op2, size_t vl) {
@ -600,7 +600,7 @@ vuint16m2_t test_vmacc_vx_u16m2(vuint16m2_t acc, uint16_t op1, vuint16m2_t op2,
// CHECK-RV64-LABEL: @test_vmacc_vv_u16m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmacc.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmacc.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vmacc_vv_u16m4(vuint16m4_t acc, vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
@ -609,7 +609,7 @@ vuint16m4_t test_vmacc_vv_u16m4(vuint16m4_t acc, vuint16m4_t op1, vuint16m4_t op
// CHECK-RV64-LABEL: @test_vmacc_vx_u16m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmacc.nxv16i16.i16.i64(<vscale x 16 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmacc.nxv16i16.i16.i64(<vscale x 16 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vmacc_vx_u16m4(vuint16m4_t acc, uint16_t op1, vuint16m4_t op2, size_t vl) {
@ -618,7 +618,7 @@ vuint16m4_t test_vmacc_vx_u16m4(vuint16m4_t acc, uint16_t op1, vuint16m4_t op2,
// CHECK-RV64-LABEL: @test_vmacc_vv_u16m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmacc.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmacc.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vmacc_vv_u16m8(vuint16m8_t acc, vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
@ -627,7 +627,7 @@ vuint16m8_t test_vmacc_vv_u16m8(vuint16m8_t acc, vuint16m8_t op1, vuint16m8_t op
// CHECK-RV64-LABEL: @test_vmacc_vx_u16m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmacc.nxv32i16.i16.i64(<vscale x 32 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmacc.nxv32i16.i16.i64(<vscale x 32 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vmacc_vx_u16m8(vuint16m8_t acc, uint16_t op1, vuint16m8_t op2, size_t vl) {
@ -636,7 +636,7 @@ vuint16m8_t test_vmacc_vx_u16m8(vuint16m8_t acc, uint16_t op1, vuint16m8_t op2,
// CHECK-RV64-LABEL: @test_vmacc_vv_u32mf2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmacc.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmacc.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vmacc_vv_u32mf2(vuint32mf2_t acc, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
@ -645,7 +645,7 @@ vuint32mf2_t test_vmacc_vv_u32mf2(vuint32mf2_t acc, vuint32mf2_t op1, vuint32mf2
// CHECK-RV64-LABEL: @test_vmacc_vx_u32mf2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmacc.nxv1i32.i32.i64(<vscale x 1 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmacc.nxv1i32.i32.i64(<vscale x 1 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vmacc_vx_u32mf2(vuint32mf2_t acc, uint32_t op1, vuint32mf2_t op2, size_t vl) {
@ -654,7 +654,7 @@ vuint32mf2_t test_vmacc_vx_u32mf2(vuint32mf2_t acc, uint32_t op1, vuint32mf2_t o
// CHECK-RV64-LABEL: @test_vmacc_vv_u32m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmacc.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmacc.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vmacc_vv_u32m1(vuint32m1_t acc, vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
@ -663,7 +663,7 @@ vuint32m1_t test_vmacc_vv_u32m1(vuint32m1_t acc, vuint32m1_t op1, vuint32m1_t op
// CHECK-RV64-LABEL: @test_vmacc_vx_u32m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmacc.nxv2i32.i32.i64(<vscale x 2 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmacc.nxv2i32.i32.i64(<vscale x 2 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vmacc_vx_u32m1(vuint32m1_t acc, uint32_t op1, vuint32m1_t op2, size_t vl) {
@ -672,7 +672,7 @@ vuint32m1_t test_vmacc_vx_u32m1(vuint32m1_t acc, uint32_t op1, vuint32m1_t op2,
// CHECK-RV64-LABEL: @test_vmacc_vv_u32m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmacc.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmacc.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vmacc_vv_u32m2(vuint32m2_t acc, vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
@ -681,7 +681,7 @@ vuint32m2_t test_vmacc_vv_u32m2(vuint32m2_t acc, vuint32m2_t op1, vuint32m2_t op
// CHECK-RV64-LABEL: @test_vmacc_vx_u32m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmacc.nxv4i32.i32.i64(<vscale x 4 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmacc.nxv4i32.i32.i64(<vscale x 4 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vmacc_vx_u32m2(vuint32m2_t acc, uint32_t op1, vuint32m2_t op2, size_t vl) {
@ -690,7 +690,7 @@ vuint32m2_t test_vmacc_vx_u32m2(vuint32m2_t acc, uint32_t op1, vuint32m2_t op2,
// CHECK-RV64-LABEL: @test_vmacc_vv_u32m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmacc.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmacc.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vmacc_vv_u32m4(vuint32m4_t acc, vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
@ -699,7 +699,7 @@ vuint32m4_t test_vmacc_vv_u32m4(vuint32m4_t acc, vuint32m4_t op1, vuint32m4_t op
// CHECK-RV64-LABEL: @test_vmacc_vx_u32m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmacc.nxv8i32.i32.i64(<vscale x 8 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmacc.nxv8i32.i32.i64(<vscale x 8 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vmacc_vx_u32m4(vuint32m4_t acc, uint32_t op1, vuint32m4_t op2, size_t vl) {
@ -708,7 +708,7 @@ vuint32m4_t test_vmacc_vx_u32m4(vuint32m4_t acc, uint32_t op1, vuint32m4_t op2,
// CHECK-RV64-LABEL: @test_vmacc_vv_u32m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmacc.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmacc.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vmacc_vv_u32m8(vuint32m8_t acc, vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
@ -717,7 +717,7 @@ vuint32m8_t test_vmacc_vv_u32m8(vuint32m8_t acc, vuint32m8_t op1, vuint32m8_t op
// CHECK-RV64-LABEL: @test_vmacc_vx_u32m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmacc.nxv16i32.i32.i64(<vscale x 16 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmacc.nxv16i32.i32.i64(<vscale x 16 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vmacc_vx_u32m8(vuint32m8_t acc, uint32_t op1, vuint32m8_t op2, size_t vl) {
@ -726,7 +726,7 @@ vuint32m8_t test_vmacc_vx_u32m8(vuint32m8_t acc, uint32_t op1, vuint32m8_t op2,
// CHECK-RV64-LABEL: @test_vmacc_vv_u64m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmacc.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmacc.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vmacc_vv_u64m1(vuint64m1_t acc, vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
@ -735,7 +735,7 @@ vuint64m1_t test_vmacc_vv_u64m1(vuint64m1_t acc, vuint64m1_t op1, vuint64m1_t op
// CHECK-RV64-LABEL: @test_vmacc_vx_u64m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmacc.nxv1i64.i64.i64(<vscale x 1 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmacc.nxv1i64.i64.i64(<vscale x 1 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vmacc_vx_u64m1(vuint64m1_t acc, uint64_t op1, vuint64m1_t op2, size_t vl) {
@ -744,7 +744,7 @@ vuint64m1_t test_vmacc_vx_u64m1(vuint64m1_t acc, uint64_t op1, vuint64m1_t op2,
// CHECK-RV64-LABEL: @test_vmacc_vv_u64m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmacc.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmacc.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vmacc_vv_u64m2(vuint64m2_t acc, vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
@ -753,7 +753,7 @@ vuint64m2_t test_vmacc_vv_u64m2(vuint64m2_t acc, vuint64m2_t op1, vuint64m2_t op
// CHECK-RV64-LABEL: @test_vmacc_vx_u64m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmacc.nxv2i64.i64.i64(<vscale x 2 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmacc.nxv2i64.i64.i64(<vscale x 2 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vmacc_vx_u64m2(vuint64m2_t acc, uint64_t op1, vuint64m2_t op2, size_t vl) {
@ -762,7 +762,7 @@ vuint64m2_t test_vmacc_vx_u64m2(vuint64m2_t acc, uint64_t op1, vuint64m2_t op2,
// CHECK-RV64-LABEL: @test_vmacc_vv_u64m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmacc.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmacc.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vmacc_vv_u64m4(vuint64m4_t acc, vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
@ -771,7 +771,7 @@ vuint64m4_t test_vmacc_vv_u64m4(vuint64m4_t acc, vuint64m4_t op1, vuint64m4_t op
// CHECK-RV64-LABEL: @test_vmacc_vx_u64m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmacc.nxv4i64.i64.i64(<vscale x 4 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmacc.nxv4i64.i64.i64(<vscale x 4 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vmacc_vx_u64m4(vuint64m4_t acc, uint64_t op1, vuint64m4_t op2, size_t vl) {
@ -780,7 +780,7 @@ vuint64m4_t test_vmacc_vx_u64m4(vuint64m4_t acc, uint64_t op1, vuint64m4_t op2,
// CHECK-RV64-LABEL: @test_vmacc_vv_u64m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmacc.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmacc.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vmacc_vv_u64m8(vuint64m8_t acc, vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
@ -789,7 +789,7 @@ vuint64m8_t test_vmacc_vv_u64m8(vuint64m8_t acc, vuint64m8_t op1, vuint64m8_t op
// CHECK-RV64-LABEL: @test_vmacc_vx_u64m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmacc.nxv8i64.i64.i64(<vscale x 8 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmacc.nxv8i64.i64.i64(<vscale x 8 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vmacc_vx_u64m8(vuint64m8_t acc, uint64_t op1, vuint64m8_t op2, size_t vl) {

View File

@ -6,7 +6,7 @@
// CHECK-RV64-LABEL: @test_vmadd_vv_i8mf8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmadd.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmadd.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vmadd_vv_i8mf8(vint8mf8_t acc, vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
@ -15,7 +15,7 @@ vint8mf8_t test_vmadd_vv_i8mf8(vint8mf8_t acc, vint8mf8_t op1, vint8mf8_t op2, s
// CHECK-RV64-LABEL: @test_vmadd_vx_i8mf8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmadd.nxv1i8.i8.i64(<vscale x 1 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmadd.nxv1i8.i8.i64(<vscale x 1 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vmadd_vx_i8mf8(vint8mf8_t acc, int8_t op1, vint8mf8_t op2, size_t vl) {
@ -24,7 +24,7 @@ vint8mf8_t test_vmadd_vx_i8mf8(vint8mf8_t acc, int8_t op1, vint8mf8_t op2, size_
// CHECK-RV64-LABEL: @test_vmadd_vv_i8mf4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmadd.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmadd.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vmadd_vv_i8mf4(vint8mf4_t acc, vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
@ -33,7 +33,7 @@ vint8mf4_t test_vmadd_vv_i8mf4(vint8mf4_t acc, vint8mf4_t op1, vint8mf4_t op2, s
// CHECK-RV64-LABEL: @test_vmadd_vx_i8mf4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmadd.nxv2i8.i8.i64(<vscale x 2 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmadd.nxv2i8.i8.i64(<vscale x 2 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vmadd_vx_i8mf4(vint8mf4_t acc, int8_t op1, vint8mf4_t op2, size_t vl) {
@ -42,7 +42,7 @@ vint8mf4_t test_vmadd_vx_i8mf4(vint8mf4_t acc, int8_t op1, vint8mf4_t op2, size_
// CHECK-RV64-LABEL: @test_vmadd_vv_i8mf2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmadd.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmadd.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vmadd_vv_i8mf2(vint8mf2_t acc, vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
@ -51,7 +51,7 @@ vint8mf2_t test_vmadd_vv_i8mf2(vint8mf2_t acc, vint8mf2_t op1, vint8mf2_t op2, s
// CHECK-RV64-LABEL: @test_vmadd_vx_i8mf2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmadd.nxv4i8.i8.i64(<vscale x 4 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmadd.nxv4i8.i8.i64(<vscale x 4 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vmadd_vx_i8mf2(vint8mf2_t acc, int8_t op1, vint8mf2_t op2, size_t vl) {
@ -60,7 +60,7 @@ vint8mf2_t test_vmadd_vx_i8mf2(vint8mf2_t acc, int8_t op1, vint8mf2_t op2, size_
// CHECK-RV64-LABEL: @test_vmadd_vv_i8m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmadd.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmadd.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vmadd_vv_i8m1(vint8m1_t acc, vint8m1_t op1, vint8m1_t op2, size_t vl) {
@ -69,7 +69,7 @@ vint8m1_t test_vmadd_vv_i8m1(vint8m1_t acc, vint8m1_t op1, vint8m1_t op2, size_t
// CHECK-RV64-LABEL: @test_vmadd_vx_i8m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmadd.nxv8i8.i8.i64(<vscale x 8 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmadd.nxv8i8.i8.i64(<vscale x 8 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vmadd_vx_i8m1(vint8m1_t acc, int8_t op1, vint8m1_t op2, size_t vl) {
@ -78,7 +78,7 @@ vint8m1_t test_vmadd_vx_i8m1(vint8m1_t acc, int8_t op1, vint8m1_t op2, size_t vl
// CHECK-RV64-LABEL: @test_vmadd_vv_i8m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmadd.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmadd.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vmadd_vv_i8m2(vint8m2_t acc, vint8m2_t op1, vint8m2_t op2, size_t vl) {
@ -87,7 +87,7 @@ vint8m2_t test_vmadd_vv_i8m2(vint8m2_t acc, vint8m2_t op1, vint8m2_t op2, size_t
// CHECK-RV64-LABEL: @test_vmadd_vx_i8m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmadd.nxv16i8.i8.i64(<vscale x 16 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmadd.nxv16i8.i8.i64(<vscale x 16 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vmadd_vx_i8m2(vint8m2_t acc, int8_t op1, vint8m2_t op2, size_t vl) {
@ -96,7 +96,7 @@ vint8m2_t test_vmadd_vx_i8m2(vint8m2_t acc, int8_t op1, vint8m2_t op2, size_t vl
// CHECK-RV64-LABEL: @test_vmadd_vv_i8m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmadd.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmadd.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vmadd_vv_i8m4(vint8m4_t acc, vint8m4_t op1, vint8m4_t op2, size_t vl) {
@ -105,7 +105,7 @@ vint8m4_t test_vmadd_vv_i8m4(vint8m4_t acc, vint8m4_t op1, vint8m4_t op2, size_t
// CHECK-RV64-LABEL: @test_vmadd_vx_i8m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmadd.nxv32i8.i8.i64(<vscale x 32 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmadd.nxv32i8.i8.i64(<vscale x 32 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vmadd_vx_i8m4(vint8m4_t acc, int8_t op1, vint8m4_t op2, size_t vl) {
@ -114,7 +114,7 @@ vint8m4_t test_vmadd_vx_i8m4(vint8m4_t acc, int8_t op1, vint8m4_t op2, size_t vl
// CHECK-RV64-LABEL: @test_vmadd_vv_i8m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmadd.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[ACC:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmadd.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[ACC:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vmadd_vv_i8m8(vint8m8_t acc, vint8m8_t op1, vint8m8_t op2, size_t vl) {
@ -123,7 +123,7 @@ vint8m8_t test_vmadd_vv_i8m8(vint8m8_t acc, vint8m8_t op1, vint8m8_t op2, size_t
// CHECK-RV64-LABEL: @test_vmadd_vx_i8m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmadd.nxv64i8.i8.i64(<vscale x 64 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmadd.nxv64i8.i8.i64(<vscale x 64 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vmadd_vx_i8m8(vint8m8_t acc, int8_t op1, vint8m8_t op2, size_t vl) {
@ -132,7 +132,7 @@ vint8m8_t test_vmadd_vx_i8m8(vint8m8_t acc, int8_t op1, vint8m8_t op2, size_t vl
// CHECK-RV64-LABEL: @test_vmadd_vv_i16mf4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmadd.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmadd.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vmadd_vv_i16mf4(vint16mf4_t acc, vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
@ -141,7 +141,7 @@ vint16mf4_t test_vmadd_vv_i16mf4(vint16mf4_t acc, vint16mf4_t op1, vint16mf4_t o
// CHECK-RV64-LABEL: @test_vmadd_vx_i16mf4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmadd.nxv1i16.i16.i64(<vscale x 1 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmadd.nxv1i16.i16.i64(<vscale x 1 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vmadd_vx_i16mf4(vint16mf4_t acc, int16_t op1, vint16mf4_t op2, size_t vl) {
@ -150,7 +150,7 @@ vint16mf4_t test_vmadd_vx_i16mf4(vint16mf4_t acc, int16_t op1, vint16mf4_t op2,
// CHECK-RV64-LABEL: @test_vmadd_vv_i16mf2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmadd.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmadd.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vmadd_vv_i16mf2(vint16mf2_t acc, vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
@ -159,7 +159,7 @@ vint16mf2_t test_vmadd_vv_i16mf2(vint16mf2_t acc, vint16mf2_t op1, vint16mf2_t o
// CHECK-RV64-LABEL: @test_vmadd_vx_i16mf2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmadd.nxv2i16.i16.i64(<vscale x 2 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmadd.nxv2i16.i16.i64(<vscale x 2 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vmadd_vx_i16mf2(vint16mf2_t acc, int16_t op1, vint16mf2_t op2, size_t vl) {
@ -168,7 +168,7 @@ vint16mf2_t test_vmadd_vx_i16mf2(vint16mf2_t acc, int16_t op1, vint16mf2_t op2,
// CHECK-RV64-LABEL: @test_vmadd_vv_i16m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmadd.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmadd.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vmadd_vv_i16m1(vint16m1_t acc, vint16m1_t op1, vint16m1_t op2, size_t vl) {
@ -177,7 +177,7 @@ vint16m1_t test_vmadd_vv_i16m1(vint16m1_t acc, vint16m1_t op1, vint16m1_t op2, s
// CHECK-RV64-LABEL: @test_vmadd_vx_i16m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmadd.nxv4i16.i16.i64(<vscale x 4 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmadd.nxv4i16.i16.i64(<vscale x 4 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vmadd_vx_i16m1(vint16m1_t acc, int16_t op1, vint16m1_t op2, size_t vl) {
@ -186,7 +186,7 @@ vint16m1_t test_vmadd_vx_i16m1(vint16m1_t acc, int16_t op1, vint16m1_t op2, size
// CHECK-RV64-LABEL: @test_vmadd_vv_i16m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmadd.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmadd.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vmadd_vv_i16m2(vint16m2_t acc, vint16m2_t op1, vint16m2_t op2, size_t vl) {
@ -195,7 +195,7 @@ vint16m2_t test_vmadd_vv_i16m2(vint16m2_t acc, vint16m2_t op1, vint16m2_t op2, s
// CHECK-RV64-LABEL: @test_vmadd_vx_i16m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmadd.nxv8i16.i16.i64(<vscale x 8 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmadd.nxv8i16.i16.i64(<vscale x 8 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vmadd_vx_i16m2(vint16m2_t acc, int16_t op1, vint16m2_t op2, size_t vl) {
@ -204,7 +204,7 @@ vint16m2_t test_vmadd_vx_i16m2(vint16m2_t acc, int16_t op1, vint16m2_t op2, size
// CHECK-RV64-LABEL: @test_vmadd_vv_i16m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmadd.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmadd.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vmadd_vv_i16m4(vint16m4_t acc, vint16m4_t op1, vint16m4_t op2, size_t vl) {
@ -213,7 +213,7 @@ vint16m4_t test_vmadd_vv_i16m4(vint16m4_t acc, vint16m4_t op1, vint16m4_t op2, s
// CHECK-RV64-LABEL: @test_vmadd_vx_i16m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmadd.nxv16i16.i16.i64(<vscale x 16 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmadd.nxv16i16.i16.i64(<vscale x 16 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vmadd_vx_i16m4(vint16m4_t acc, int16_t op1, vint16m4_t op2, size_t vl) {
@ -222,7 +222,7 @@ vint16m4_t test_vmadd_vx_i16m4(vint16m4_t acc, int16_t op1, vint16m4_t op2, size
// CHECK-RV64-LABEL: @test_vmadd_vv_i16m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmadd.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmadd.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vmadd_vv_i16m8(vint16m8_t acc, vint16m8_t op1, vint16m8_t op2, size_t vl) {
@ -231,7 +231,7 @@ vint16m8_t test_vmadd_vv_i16m8(vint16m8_t acc, vint16m8_t op1, vint16m8_t op2, s
// CHECK-RV64-LABEL: @test_vmadd_vx_i16m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmadd.nxv32i16.i16.i64(<vscale x 32 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmadd.nxv32i16.i16.i64(<vscale x 32 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vmadd_vx_i16m8(vint16m8_t acc, int16_t op1, vint16m8_t op2, size_t vl) {
@ -240,7 +240,7 @@ vint16m8_t test_vmadd_vx_i16m8(vint16m8_t acc, int16_t op1, vint16m8_t op2, size
// CHECK-RV64-LABEL: @test_vmadd_vv_i32mf2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmadd.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmadd.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vmadd_vv_i32mf2(vint32mf2_t acc, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
@ -249,7 +249,7 @@ vint32mf2_t test_vmadd_vv_i32mf2(vint32mf2_t acc, vint32mf2_t op1, vint32mf2_t o
// CHECK-RV64-LABEL: @test_vmadd_vx_i32mf2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmadd.nxv1i32.i32.i64(<vscale x 1 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmadd.nxv1i32.i32.i64(<vscale x 1 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vmadd_vx_i32mf2(vint32mf2_t acc, int32_t op1, vint32mf2_t op2, size_t vl) {
@ -258,7 +258,7 @@ vint32mf2_t test_vmadd_vx_i32mf2(vint32mf2_t acc, int32_t op1, vint32mf2_t op2,
// CHECK-RV64-LABEL: @test_vmadd_vv_i32m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmadd.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmadd.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vmadd_vv_i32m1(vint32m1_t acc, vint32m1_t op1, vint32m1_t op2, size_t vl) {
@ -267,7 +267,7 @@ vint32m1_t test_vmadd_vv_i32m1(vint32m1_t acc, vint32m1_t op1, vint32m1_t op2, s
// CHECK-RV64-LABEL: @test_vmadd_vx_i32m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmadd.nxv2i32.i32.i64(<vscale x 2 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmadd.nxv2i32.i32.i64(<vscale x 2 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vmadd_vx_i32m1(vint32m1_t acc, int32_t op1, vint32m1_t op2, size_t vl) {
@ -276,7 +276,7 @@ vint32m1_t test_vmadd_vx_i32m1(vint32m1_t acc, int32_t op1, vint32m1_t op2, size
// CHECK-RV64-LABEL: @test_vmadd_vv_i32m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmadd.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmadd.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vmadd_vv_i32m2(vint32m2_t acc, vint32m2_t op1, vint32m2_t op2, size_t vl) {
@ -285,7 +285,7 @@ vint32m2_t test_vmadd_vv_i32m2(vint32m2_t acc, vint32m2_t op1, vint32m2_t op2, s
// CHECK-RV64-LABEL: @test_vmadd_vx_i32m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmadd.nxv4i32.i32.i64(<vscale x 4 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmadd.nxv4i32.i32.i64(<vscale x 4 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vmadd_vx_i32m2(vint32m2_t acc, int32_t op1, vint32m2_t op2, size_t vl) {
@ -294,7 +294,7 @@ vint32m2_t test_vmadd_vx_i32m2(vint32m2_t acc, int32_t op1, vint32m2_t op2, size
// CHECK-RV64-LABEL: @test_vmadd_vv_i32m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmadd.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmadd.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vmadd_vv_i32m4(vint32m4_t acc, vint32m4_t op1, vint32m4_t op2, size_t vl) {
@ -303,7 +303,7 @@ vint32m4_t test_vmadd_vv_i32m4(vint32m4_t acc, vint32m4_t op1, vint32m4_t op2, s
// CHECK-RV64-LABEL: @test_vmadd_vx_i32m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmadd.nxv8i32.i32.i64(<vscale x 8 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmadd.nxv8i32.i32.i64(<vscale x 8 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vmadd_vx_i32m4(vint32m4_t acc, int32_t op1, vint32m4_t op2, size_t vl) {
@ -312,7 +312,7 @@ vint32m4_t test_vmadd_vx_i32m4(vint32m4_t acc, int32_t op1, vint32m4_t op2, size
// CHECK-RV64-LABEL: @test_vmadd_vv_i32m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmadd.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmadd.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vmadd_vv_i32m8(vint32m8_t acc, vint32m8_t op1, vint32m8_t op2, size_t vl) {
@ -321,7 +321,7 @@ vint32m8_t test_vmadd_vv_i32m8(vint32m8_t acc, vint32m8_t op1, vint32m8_t op2, s
// CHECK-RV64-LABEL: @test_vmadd_vx_i32m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmadd.nxv16i32.i32.i64(<vscale x 16 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmadd.nxv16i32.i32.i64(<vscale x 16 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vmadd_vx_i32m8(vint32m8_t acc, int32_t op1, vint32m8_t op2, size_t vl) {
@ -330,7 +330,7 @@ vint32m8_t test_vmadd_vx_i32m8(vint32m8_t acc, int32_t op1, vint32m8_t op2, size
// CHECK-RV64-LABEL: @test_vmadd_vv_i64m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmadd.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmadd.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vmadd_vv_i64m1(vint64m1_t acc, vint64m1_t op1, vint64m1_t op2, size_t vl) {
@ -339,7 +339,7 @@ vint64m1_t test_vmadd_vv_i64m1(vint64m1_t acc, vint64m1_t op1, vint64m1_t op2, s
// CHECK-RV64-LABEL: @test_vmadd_vx_i64m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmadd.nxv1i64.i64.i64(<vscale x 1 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmadd.nxv1i64.i64.i64(<vscale x 1 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vmadd_vx_i64m1(vint64m1_t acc, int64_t op1, vint64m1_t op2, size_t vl) {
@ -348,7 +348,7 @@ vint64m1_t test_vmadd_vx_i64m1(vint64m1_t acc, int64_t op1, vint64m1_t op2, size
// CHECK-RV64-LABEL: @test_vmadd_vv_i64m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmadd.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmadd.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vmadd_vv_i64m2(vint64m2_t acc, vint64m2_t op1, vint64m2_t op2, size_t vl) {
@ -357,7 +357,7 @@ vint64m2_t test_vmadd_vv_i64m2(vint64m2_t acc, vint64m2_t op1, vint64m2_t op2, s
// CHECK-RV64-LABEL: @test_vmadd_vx_i64m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmadd.nxv2i64.i64.i64(<vscale x 2 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmadd.nxv2i64.i64.i64(<vscale x 2 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vmadd_vx_i64m2(vint64m2_t acc, int64_t op1, vint64m2_t op2, size_t vl) {
@ -366,7 +366,7 @@ vint64m2_t test_vmadd_vx_i64m2(vint64m2_t acc, int64_t op1, vint64m2_t op2, size
// CHECK-RV64-LABEL: @test_vmadd_vv_i64m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmadd.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmadd.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vmadd_vv_i64m4(vint64m4_t acc, vint64m4_t op1, vint64m4_t op2, size_t vl) {
@ -375,7 +375,7 @@ vint64m4_t test_vmadd_vv_i64m4(vint64m4_t acc, vint64m4_t op1, vint64m4_t op2, s
// CHECK-RV64-LABEL: @test_vmadd_vx_i64m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmadd.nxv4i64.i64.i64(<vscale x 4 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmadd.nxv4i64.i64.i64(<vscale x 4 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vmadd_vx_i64m4(vint64m4_t acc, int64_t op1, vint64m4_t op2, size_t vl) {
@ -384,7 +384,7 @@ vint64m4_t test_vmadd_vx_i64m4(vint64m4_t acc, int64_t op1, vint64m4_t op2, size
// CHECK-RV64-LABEL: @test_vmadd_vv_i64m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmadd.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmadd.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vmadd_vv_i64m8(vint64m8_t acc, vint64m8_t op1, vint64m8_t op2, size_t vl) {
@ -393,7 +393,7 @@ vint64m8_t test_vmadd_vv_i64m8(vint64m8_t acc, vint64m8_t op1, vint64m8_t op2, s
// CHECK-RV64-LABEL: @test_vmadd_vx_i64m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmadd.nxv8i64.i64.i64(<vscale x 8 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmadd.nxv8i64.i64.i64(<vscale x 8 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vmadd_vx_i64m8(vint64m8_t acc, int64_t op1, vint64m8_t op2, size_t vl) {
@ -402,7 +402,7 @@ vint64m8_t test_vmadd_vx_i64m8(vint64m8_t acc, int64_t op1, vint64m8_t op2, size
// CHECK-RV64-LABEL: @test_vmadd_vv_u8mf8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmadd.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmadd.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vmadd_vv_u8mf8(vuint8mf8_t acc, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
@ -411,7 +411,7 @@ vuint8mf8_t test_vmadd_vv_u8mf8(vuint8mf8_t acc, vuint8mf8_t op1, vuint8mf8_t op
// CHECK-RV64-LABEL: @test_vmadd_vx_u8mf8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmadd.nxv1i8.i8.i64(<vscale x 1 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmadd.nxv1i8.i8.i64(<vscale x 1 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vmadd_vx_u8mf8(vuint8mf8_t acc, uint8_t op1, vuint8mf8_t op2, size_t vl) {
@ -420,7 +420,7 @@ vuint8mf8_t test_vmadd_vx_u8mf8(vuint8mf8_t acc, uint8_t op1, vuint8mf8_t op2, s
// CHECK-RV64-LABEL: @test_vmadd_vv_u8mf4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmadd.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmadd.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vmadd_vv_u8mf4(vuint8mf4_t acc, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
@ -429,7 +429,7 @@ vuint8mf4_t test_vmadd_vv_u8mf4(vuint8mf4_t acc, vuint8mf4_t op1, vuint8mf4_t op
// CHECK-RV64-LABEL: @test_vmadd_vx_u8mf4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmadd.nxv2i8.i8.i64(<vscale x 2 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmadd.nxv2i8.i8.i64(<vscale x 2 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vmadd_vx_u8mf4(vuint8mf4_t acc, uint8_t op1, vuint8mf4_t op2, size_t vl) {
@ -438,7 +438,7 @@ vuint8mf4_t test_vmadd_vx_u8mf4(vuint8mf4_t acc, uint8_t op1, vuint8mf4_t op2, s
// CHECK-RV64-LABEL: @test_vmadd_vv_u8mf2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmadd.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmadd.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vmadd_vv_u8mf2(vuint8mf2_t acc, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
@ -447,7 +447,7 @@ vuint8mf2_t test_vmadd_vv_u8mf2(vuint8mf2_t acc, vuint8mf2_t op1, vuint8mf2_t op
// CHECK-RV64-LABEL: @test_vmadd_vx_u8mf2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmadd.nxv4i8.i8.i64(<vscale x 4 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmadd.nxv4i8.i8.i64(<vscale x 4 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vmadd_vx_u8mf2(vuint8mf2_t acc, uint8_t op1, vuint8mf2_t op2, size_t vl) {
@ -456,7 +456,7 @@ vuint8mf2_t test_vmadd_vx_u8mf2(vuint8mf2_t acc, uint8_t op1, vuint8mf2_t op2, s
// CHECK-RV64-LABEL: @test_vmadd_vv_u8m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmadd.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmadd.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vmadd_vv_u8m1(vuint8m1_t acc, vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
@ -465,7 +465,7 @@ vuint8m1_t test_vmadd_vv_u8m1(vuint8m1_t acc, vuint8m1_t op1, vuint8m1_t op2, si
// CHECK-RV64-LABEL: @test_vmadd_vx_u8m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmadd.nxv8i8.i8.i64(<vscale x 8 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmadd.nxv8i8.i8.i64(<vscale x 8 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vmadd_vx_u8m1(vuint8m1_t acc, uint8_t op1, vuint8m1_t op2, size_t vl) {
@ -474,7 +474,7 @@ vuint8m1_t test_vmadd_vx_u8m1(vuint8m1_t acc, uint8_t op1, vuint8m1_t op2, size_
// CHECK-RV64-LABEL: @test_vmadd_vv_u8m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmadd.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmadd.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vmadd_vv_u8m2(vuint8m2_t acc, vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
@ -483,7 +483,7 @@ vuint8m2_t test_vmadd_vv_u8m2(vuint8m2_t acc, vuint8m2_t op1, vuint8m2_t op2, si
// CHECK-RV64-LABEL: @test_vmadd_vx_u8m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmadd.nxv16i8.i8.i64(<vscale x 16 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmadd.nxv16i8.i8.i64(<vscale x 16 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vmadd_vx_u8m2(vuint8m2_t acc, uint8_t op1, vuint8m2_t op2, size_t vl) {
@ -492,7 +492,7 @@ vuint8m2_t test_vmadd_vx_u8m2(vuint8m2_t acc, uint8_t op1, vuint8m2_t op2, size_
// CHECK-RV64-LABEL: @test_vmadd_vv_u8m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmadd.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmadd.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vmadd_vv_u8m4(vuint8m4_t acc, vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
@ -501,7 +501,7 @@ vuint8m4_t test_vmadd_vv_u8m4(vuint8m4_t acc, vuint8m4_t op1, vuint8m4_t op2, si
// CHECK-RV64-LABEL: @test_vmadd_vx_u8m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmadd.nxv32i8.i8.i64(<vscale x 32 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmadd.nxv32i8.i8.i64(<vscale x 32 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vmadd_vx_u8m4(vuint8m4_t acc, uint8_t op1, vuint8m4_t op2, size_t vl) {
@ -510,7 +510,7 @@ vuint8m4_t test_vmadd_vx_u8m4(vuint8m4_t acc, uint8_t op1, vuint8m4_t op2, size_
// CHECK-RV64-LABEL: @test_vmadd_vv_u8m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmadd.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[ACC:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmadd.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[ACC:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vmadd_vv_u8m8(vuint8m8_t acc, vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
@ -519,7 +519,7 @@ vuint8m8_t test_vmadd_vv_u8m8(vuint8m8_t acc, vuint8m8_t op1, vuint8m8_t op2, si
// CHECK-RV64-LABEL: @test_vmadd_vx_u8m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmadd.nxv64i8.i8.i64(<vscale x 64 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmadd.nxv64i8.i8.i64(<vscale x 64 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vmadd_vx_u8m8(vuint8m8_t acc, uint8_t op1, vuint8m8_t op2, size_t vl) {
@ -528,7 +528,7 @@ vuint8m8_t test_vmadd_vx_u8m8(vuint8m8_t acc, uint8_t op1, vuint8m8_t op2, size_
// CHECK-RV64-LABEL: @test_vmadd_vv_u16mf4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmadd.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmadd.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vmadd_vv_u16mf4(vuint16mf4_t acc, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
@ -537,7 +537,7 @@ vuint16mf4_t test_vmadd_vv_u16mf4(vuint16mf4_t acc, vuint16mf4_t op1, vuint16mf4
// CHECK-RV64-LABEL: @test_vmadd_vx_u16mf4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmadd.nxv1i16.i16.i64(<vscale x 1 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmadd.nxv1i16.i16.i64(<vscale x 1 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vmadd_vx_u16mf4(vuint16mf4_t acc, uint16_t op1, vuint16mf4_t op2, size_t vl) {
@ -546,7 +546,7 @@ vuint16mf4_t test_vmadd_vx_u16mf4(vuint16mf4_t acc, uint16_t op1, vuint16mf4_t o
// CHECK-RV64-LABEL: @test_vmadd_vv_u16mf2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmadd.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmadd.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vmadd_vv_u16mf2(vuint16mf2_t acc, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
@ -555,7 +555,7 @@ vuint16mf2_t test_vmadd_vv_u16mf2(vuint16mf2_t acc, vuint16mf2_t op1, vuint16mf2
// CHECK-RV64-LABEL: @test_vmadd_vx_u16mf2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmadd.nxv2i16.i16.i64(<vscale x 2 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmadd.nxv2i16.i16.i64(<vscale x 2 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vmadd_vx_u16mf2(vuint16mf2_t acc, uint16_t op1, vuint16mf2_t op2, size_t vl) {
@ -564,7 +564,7 @@ vuint16mf2_t test_vmadd_vx_u16mf2(vuint16mf2_t acc, uint16_t op1, vuint16mf2_t o
// CHECK-RV64-LABEL: @test_vmadd_vv_u16m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmadd.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmadd.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vmadd_vv_u16m1(vuint16m1_t acc, vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
@ -573,7 +573,7 @@ vuint16m1_t test_vmadd_vv_u16m1(vuint16m1_t acc, vuint16m1_t op1, vuint16m1_t op
// CHECK-RV64-LABEL: @test_vmadd_vx_u16m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmadd.nxv4i16.i16.i64(<vscale x 4 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmadd.nxv4i16.i16.i64(<vscale x 4 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vmadd_vx_u16m1(vuint16m1_t acc, uint16_t op1, vuint16m1_t op2, size_t vl) {
@ -582,7 +582,7 @@ vuint16m1_t test_vmadd_vx_u16m1(vuint16m1_t acc, uint16_t op1, vuint16m1_t op2,
// CHECK-RV64-LABEL: @test_vmadd_vv_u16m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmadd.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmadd.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vmadd_vv_u16m2(vuint16m2_t acc, vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
@ -591,7 +591,7 @@ vuint16m2_t test_vmadd_vv_u16m2(vuint16m2_t acc, vuint16m2_t op1, vuint16m2_t op
// CHECK-RV64-LABEL: @test_vmadd_vx_u16m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmadd.nxv8i16.i16.i64(<vscale x 8 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmadd.nxv8i16.i16.i64(<vscale x 8 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vmadd_vx_u16m2(vuint16m2_t acc, uint16_t op1, vuint16m2_t op2, size_t vl) {
@ -600,7 +600,7 @@ vuint16m2_t test_vmadd_vx_u16m2(vuint16m2_t acc, uint16_t op1, vuint16m2_t op2,
// CHECK-RV64-LABEL: @test_vmadd_vv_u16m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmadd.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmadd.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vmadd_vv_u16m4(vuint16m4_t acc, vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
@ -609,7 +609,7 @@ vuint16m4_t test_vmadd_vv_u16m4(vuint16m4_t acc, vuint16m4_t op1, vuint16m4_t op
// CHECK-RV64-LABEL: @test_vmadd_vx_u16m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmadd.nxv16i16.i16.i64(<vscale x 16 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmadd.nxv16i16.i16.i64(<vscale x 16 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vmadd_vx_u16m4(vuint16m4_t acc, uint16_t op1, vuint16m4_t op2, size_t vl) {
@ -618,7 +618,7 @@ vuint16m4_t test_vmadd_vx_u16m4(vuint16m4_t acc, uint16_t op1, vuint16m4_t op2,
// CHECK-RV64-LABEL: @test_vmadd_vv_u16m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmadd.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmadd.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vmadd_vv_u16m8(vuint16m8_t acc, vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
@ -627,7 +627,7 @@ vuint16m8_t test_vmadd_vv_u16m8(vuint16m8_t acc, vuint16m8_t op1, vuint16m8_t op
// CHECK-RV64-LABEL: @test_vmadd_vx_u16m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmadd.nxv32i16.i16.i64(<vscale x 32 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmadd.nxv32i16.i16.i64(<vscale x 32 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vmadd_vx_u16m8(vuint16m8_t acc, uint16_t op1, vuint16m8_t op2, size_t vl) {
@ -636,7 +636,7 @@ vuint16m8_t test_vmadd_vx_u16m8(vuint16m8_t acc, uint16_t op1, vuint16m8_t op2,
// CHECK-RV64-LABEL: @test_vmadd_vv_u32mf2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmadd.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmadd.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vmadd_vv_u32mf2(vuint32mf2_t acc, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
@ -645,7 +645,7 @@ vuint32mf2_t test_vmadd_vv_u32mf2(vuint32mf2_t acc, vuint32mf2_t op1, vuint32mf2
// CHECK-RV64-LABEL: @test_vmadd_vx_u32mf2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmadd.nxv1i32.i32.i64(<vscale x 1 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmadd.nxv1i32.i32.i64(<vscale x 1 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vmadd_vx_u32mf2(vuint32mf2_t acc, uint32_t op1, vuint32mf2_t op2, size_t vl) {
@ -654,7 +654,7 @@ vuint32mf2_t test_vmadd_vx_u32mf2(vuint32mf2_t acc, uint32_t op1, vuint32mf2_t o
// CHECK-RV64-LABEL: @test_vmadd_vv_u32m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmadd.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmadd.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vmadd_vv_u32m1(vuint32m1_t acc, vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
@ -663,7 +663,7 @@ vuint32m1_t test_vmadd_vv_u32m1(vuint32m1_t acc, vuint32m1_t op1, vuint32m1_t op
// CHECK-RV64-LABEL: @test_vmadd_vx_u32m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmadd.nxv2i32.i32.i64(<vscale x 2 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmadd.nxv2i32.i32.i64(<vscale x 2 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vmadd_vx_u32m1(vuint32m1_t acc, uint32_t op1, vuint32m1_t op2, size_t vl) {
@ -672,7 +672,7 @@ vuint32m1_t test_vmadd_vx_u32m1(vuint32m1_t acc, uint32_t op1, vuint32m1_t op2,
// CHECK-RV64-LABEL: @test_vmadd_vv_u32m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmadd.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmadd.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vmadd_vv_u32m2(vuint32m2_t acc, vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
@ -681,7 +681,7 @@ vuint32m2_t test_vmadd_vv_u32m2(vuint32m2_t acc, vuint32m2_t op1, vuint32m2_t op
// CHECK-RV64-LABEL: @test_vmadd_vx_u32m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmadd.nxv4i32.i32.i64(<vscale x 4 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmadd.nxv4i32.i32.i64(<vscale x 4 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vmadd_vx_u32m2(vuint32m2_t acc, uint32_t op1, vuint32m2_t op2, size_t vl) {
@ -690,7 +690,7 @@ vuint32m2_t test_vmadd_vx_u32m2(vuint32m2_t acc, uint32_t op1, vuint32m2_t op2,
// CHECK-RV64-LABEL: @test_vmadd_vv_u32m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmadd.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmadd.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vmadd_vv_u32m4(vuint32m4_t acc, vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
@ -699,7 +699,7 @@ vuint32m4_t test_vmadd_vv_u32m4(vuint32m4_t acc, vuint32m4_t op1, vuint32m4_t op
// CHECK-RV64-LABEL: @test_vmadd_vx_u32m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmadd.nxv8i32.i32.i64(<vscale x 8 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmadd.nxv8i32.i32.i64(<vscale x 8 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vmadd_vx_u32m4(vuint32m4_t acc, uint32_t op1, vuint32m4_t op2, size_t vl) {
@ -708,7 +708,7 @@ vuint32m4_t test_vmadd_vx_u32m4(vuint32m4_t acc, uint32_t op1, vuint32m4_t op2,
// CHECK-RV64-LABEL: @test_vmadd_vv_u32m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmadd.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmadd.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vmadd_vv_u32m8(vuint32m8_t acc, vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
@ -717,7 +717,7 @@ vuint32m8_t test_vmadd_vv_u32m8(vuint32m8_t acc, vuint32m8_t op1, vuint32m8_t op
// CHECK-RV64-LABEL: @test_vmadd_vx_u32m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmadd.nxv16i32.i32.i64(<vscale x 16 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmadd.nxv16i32.i32.i64(<vscale x 16 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vmadd_vx_u32m8(vuint32m8_t acc, uint32_t op1, vuint32m8_t op2, size_t vl) {
@ -726,7 +726,7 @@ vuint32m8_t test_vmadd_vx_u32m8(vuint32m8_t acc, uint32_t op1, vuint32m8_t op2,
// CHECK-RV64-LABEL: @test_vmadd_vv_u64m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmadd.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmadd.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vmadd_vv_u64m1(vuint64m1_t acc, vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
@ -735,7 +735,7 @@ vuint64m1_t test_vmadd_vv_u64m1(vuint64m1_t acc, vuint64m1_t op1, vuint64m1_t op
// CHECK-RV64-LABEL: @test_vmadd_vx_u64m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmadd.nxv1i64.i64.i64(<vscale x 1 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmadd.nxv1i64.i64.i64(<vscale x 1 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vmadd_vx_u64m1(vuint64m1_t acc, uint64_t op1, vuint64m1_t op2, size_t vl) {
@ -744,7 +744,7 @@ vuint64m1_t test_vmadd_vx_u64m1(vuint64m1_t acc, uint64_t op1, vuint64m1_t op2,
// CHECK-RV64-LABEL: @test_vmadd_vv_u64m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmadd.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmadd.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vmadd_vv_u64m2(vuint64m2_t acc, vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
@ -753,7 +753,7 @@ vuint64m2_t test_vmadd_vv_u64m2(vuint64m2_t acc, vuint64m2_t op1, vuint64m2_t op
// CHECK-RV64-LABEL: @test_vmadd_vx_u64m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmadd.nxv2i64.i64.i64(<vscale x 2 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmadd.nxv2i64.i64.i64(<vscale x 2 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vmadd_vx_u64m2(vuint64m2_t acc, uint64_t op1, vuint64m2_t op2, size_t vl) {
@ -762,7 +762,7 @@ vuint64m2_t test_vmadd_vx_u64m2(vuint64m2_t acc, uint64_t op1, vuint64m2_t op2,
// CHECK-RV64-LABEL: @test_vmadd_vv_u64m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmadd.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmadd.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vmadd_vv_u64m4(vuint64m4_t acc, vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
@ -771,7 +771,7 @@ vuint64m4_t test_vmadd_vv_u64m4(vuint64m4_t acc, vuint64m4_t op1, vuint64m4_t op
// CHECK-RV64-LABEL: @test_vmadd_vx_u64m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmadd.nxv4i64.i64.i64(<vscale x 4 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmadd.nxv4i64.i64.i64(<vscale x 4 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vmadd_vx_u64m4(vuint64m4_t acc, uint64_t op1, vuint64m4_t op2, size_t vl) {
@ -780,7 +780,7 @@ vuint64m4_t test_vmadd_vx_u64m4(vuint64m4_t acc, uint64_t op1, vuint64m4_t op2,
// CHECK-RV64-LABEL: @test_vmadd_vv_u64m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmadd.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmadd.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vmadd_vv_u64m8(vuint64m8_t acc, vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
@ -789,7 +789,7 @@ vuint64m8_t test_vmadd_vv_u64m8(vuint64m8_t acc, vuint64m8_t op1, vuint64m8_t op
// CHECK-RV64-LABEL: @test_vmadd_vx_u64m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmadd.nxv8i64.i64.i64(<vscale x 8 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmadd.nxv8i64.i64.i64(<vscale x 8 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vmadd_vx_u64m8(vuint64m8_t acc, uint64_t op1, vuint64m8_t op2, size_t vl) {

View File

@ -6,7 +6,7 @@
// CHECK-RV64-LABEL: @test_vnmsac_vv_i8mf8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnmsac.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnmsac.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vnmsac_vv_i8mf8(vint8mf8_t acc, vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
@ -15,7 +15,7 @@ vint8mf8_t test_vnmsac_vv_i8mf8(vint8mf8_t acc, vint8mf8_t op1, vint8mf8_t op2,
// CHECK-RV64-LABEL: @test_vnmsac_vx_i8mf8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnmsac.nxv1i8.i8.i64(<vscale x 1 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnmsac.nxv1i8.i8.i64(<vscale x 1 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vnmsac_vx_i8mf8(vint8mf8_t acc, int8_t op1, vint8mf8_t op2, size_t vl) {
@ -24,7 +24,7 @@ vint8mf8_t test_vnmsac_vx_i8mf8(vint8mf8_t acc, int8_t op1, vint8mf8_t op2, size
// CHECK-RV64-LABEL: @test_vnmsac_vv_i8mf4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnmsac.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnmsac.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vnmsac_vv_i8mf4(vint8mf4_t acc, vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
@ -33,7 +33,7 @@ vint8mf4_t test_vnmsac_vv_i8mf4(vint8mf4_t acc, vint8mf4_t op1, vint8mf4_t op2,
// CHECK-RV64-LABEL: @test_vnmsac_vx_i8mf4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnmsac.nxv2i8.i8.i64(<vscale x 2 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnmsac.nxv2i8.i8.i64(<vscale x 2 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vnmsac_vx_i8mf4(vint8mf4_t acc, int8_t op1, vint8mf4_t op2, size_t vl) {
@ -42,7 +42,7 @@ vint8mf4_t test_vnmsac_vx_i8mf4(vint8mf4_t acc, int8_t op1, vint8mf4_t op2, size
// CHECK-RV64-LABEL: @test_vnmsac_vv_i8mf2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnmsac.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnmsac.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vnmsac_vv_i8mf2(vint8mf2_t acc, vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
@ -51,7 +51,7 @@ vint8mf2_t test_vnmsac_vv_i8mf2(vint8mf2_t acc, vint8mf2_t op1, vint8mf2_t op2,
// CHECK-RV64-LABEL: @test_vnmsac_vx_i8mf2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnmsac.nxv4i8.i8.i64(<vscale x 4 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnmsac.nxv4i8.i8.i64(<vscale x 4 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vnmsac_vx_i8mf2(vint8mf2_t acc, int8_t op1, vint8mf2_t op2, size_t vl) {
@ -60,7 +60,7 @@ vint8mf2_t test_vnmsac_vx_i8mf2(vint8mf2_t acc, int8_t op1, vint8mf2_t op2, size
// CHECK-RV64-LABEL: @test_vnmsac_vv_i8m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnmsac.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnmsac.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vnmsac_vv_i8m1(vint8m1_t acc, vint8m1_t op1, vint8m1_t op2, size_t vl) {
@ -69,7 +69,7 @@ vint8m1_t test_vnmsac_vv_i8m1(vint8m1_t acc, vint8m1_t op1, vint8m1_t op2, size_
// CHECK-RV64-LABEL: @test_vnmsac_vx_i8m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnmsac.nxv8i8.i8.i64(<vscale x 8 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnmsac.nxv8i8.i8.i64(<vscale x 8 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vnmsac_vx_i8m1(vint8m1_t acc, int8_t op1, vint8m1_t op2, size_t vl) {
@ -78,7 +78,7 @@ vint8m1_t test_vnmsac_vx_i8m1(vint8m1_t acc, int8_t op1, vint8m1_t op2, size_t v
// CHECK-RV64-LABEL: @test_vnmsac_vv_i8m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnmsac.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnmsac.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vnmsac_vv_i8m2(vint8m2_t acc, vint8m2_t op1, vint8m2_t op2, size_t vl) {
@ -87,7 +87,7 @@ vint8m2_t test_vnmsac_vv_i8m2(vint8m2_t acc, vint8m2_t op1, vint8m2_t op2, size_
// CHECK-RV64-LABEL: @test_vnmsac_vx_i8m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnmsac.nxv16i8.i8.i64(<vscale x 16 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnmsac.nxv16i8.i8.i64(<vscale x 16 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vnmsac_vx_i8m2(vint8m2_t acc, int8_t op1, vint8m2_t op2, size_t vl) {
@ -96,7 +96,7 @@ vint8m2_t test_vnmsac_vx_i8m2(vint8m2_t acc, int8_t op1, vint8m2_t op2, size_t v
// CHECK-RV64-LABEL: @test_vnmsac_vv_i8m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnmsac.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnmsac.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vnmsac_vv_i8m4(vint8m4_t acc, vint8m4_t op1, vint8m4_t op2, size_t vl) {
@ -105,7 +105,7 @@ vint8m4_t test_vnmsac_vv_i8m4(vint8m4_t acc, vint8m4_t op1, vint8m4_t op2, size_
// CHECK-RV64-LABEL: @test_vnmsac_vx_i8m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnmsac.nxv32i8.i8.i64(<vscale x 32 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnmsac.nxv32i8.i8.i64(<vscale x 32 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vnmsac_vx_i8m4(vint8m4_t acc, int8_t op1, vint8m4_t op2, size_t vl) {
@ -114,7 +114,7 @@ vint8m4_t test_vnmsac_vx_i8m4(vint8m4_t acc, int8_t op1, vint8m4_t op2, size_t v
// CHECK-RV64-LABEL: @test_vnmsac_vv_i8m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vnmsac.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[ACC:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vnmsac.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[ACC:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vnmsac_vv_i8m8(vint8m8_t acc, vint8m8_t op1, vint8m8_t op2, size_t vl) {
@ -123,7 +123,7 @@ vint8m8_t test_vnmsac_vv_i8m8(vint8m8_t acc, vint8m8_t op1, vint8m8_t op2, size_
// CHECK-RV64-LABEL: @test_vnmsac_vx_i8m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vnmsac.nxv64i8.i8.i64(<vscale x 64 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vnmsac.nxv64i8.i8.i64(<vscale x 64 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vnmsac_vx_i8m8(vint8m8_t acc, int8_t op1, vint8m8_t op2, size_t vl) {
@ -132,7 +132,7 @@ vint8m8_t test_vnmsac_vx_i8m8(vint8m8_t acc, int8_t op1, vint8m8_t op2, size_t v
// CHECK-RV64-LABEL: @test_vnmsac_vv_i16mf4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnmsac.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnmsac.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vnmsac_vv_i16mf4(vint16mf4_t acc, vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
@ -141,7 +141,7 @@ vint16mf4_t test_vnmsac_vv_i16mf4(vint16mf4_t acc, vint16mf4_t op1, vint16mf4_t
// CHECK-RV64-LABEL: @test_vnmsac_vx_i16mf4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnmsac.nxv1i16.i16.i64(<vscale x 1 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnmsac.nxv1i16.i16.i64(<vscale x 1 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vnmsac_vx_i16mf4(vint16mf4_t acc, int16_t op1, vint16mf4_t op2, size_t vl) {
@ -150,7 +150,7 @@ vint16mf4_t test_vnmsac_vx_i16mf4(vint16mf4_t acc, int16_t op1, vint16mf4_t op2,
// CHECK-RV64-LABEL: @test_vnmsac_vv_i16mf2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnmsac.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnmsac.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vnmsac_vv_i16mf2(vint16mf2_t acc, vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
@ -159,7 +159,7 @@ vint16mf2_t test_vnmsac_vv_i16mf2(vint16mf2_t acc, vint16mf2_t op1, vint16mf2_t
// CHECK-RV64-LABEL: @test_vnmsac_vx_i16mf2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnmsac.nxv2i16.i16.i64(<vscale x 2 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnmsac.nxv2i16.i16.i64(<vscale x 2 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vnmsac_vx_i16mf2(vint16mf2_t acc, int16_t op1, vint16mf2_t op2, size_t vl) {
@ -168,7 +168,7 @@ vint16mf2_t test_vnmsac_vx_i16mf2(vint16mf2_t acc, int16_t op1, vint16mf2_t op2,
// CHECK-RV64-LABEL: @test_vnmsac_vv_i16m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnmsac.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnmsac.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vnmsac_vv_i16m1(vint16m1_t acc, vint16m1_t op1, vint16m1_t op2, size_t vl) {
@ -177,7 +177,7 @@ vint16m1_t test_vnmsac_vv_i16m1(vint16m1_t acc, vint16m1_t op1, vint16m1_t op2,
// CHECK-RV64-LABEL: @test_vnmsac_vx_i16m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnmsac.nxv4i16.i16.i64(<vscale x 4 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnmsac.nxv4i16.i16.i64(<vscale x 4 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vnmsac_vx_i16m1(vint16m1_t acc, int16_t op1, vint16m1_t op2, size_t vl) {
@ -186,7 +186,7 @@ vint16m1_t test_vnmsac_vx_i16m1(vint16m1_t acc, int16_t op1, vint16m1_t op2, siz
// CHECK-RV64-LABEL: @test_vnmsac_vv_i16m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnmsac.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnmsac.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vnmsac_vv_i16m2(vint16m2_t acc, vint16m2_t op1, vint16m2_t op2, size_t vl) {
@ -195,7 +195,7 @@ vint16m2_t test_vnmsac_vv_i16m2(vint16m2_t acc, vint16m2_t op1, vint16m2_t op2,
// CHECK-RV64-LABEL: @test_vnmsac_vx_i16m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnmsac.nxv8i16.i16.i64(<vscale x 8 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnmsac.nxv8i16.i16.i64(<vscale x 8 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vnmsac_vx_i16m2(vint16m2_t acc, int16_t op1, vint16m2_t op2, size_t vl) {
@ -204,7 +204,7 @@ vint16m2_t test_vnmsac_vx_i16m2(vint16m2_t acc, int16_t op1, vint16m2_t op2, siz
// CHECK-RV64-LABEL: @test_vnmsac_vv_i16m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnmsac.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnmsac.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vnmsac_vv_i16m4(vint16m4_t acc, vint16m4_t op1, vint16m4_t op2, size_t vl) {
@ -213,7 +213,7 @@ vint16m4_t test_vnmsac_vv_i16m4(vint16m4_t acc, vint16m4_t op1, vint16m4_t op2,
// CHECK-RV64-LABEL: @test_vnmsac_vx_i16m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnmsac.nxv16i16.i16.i64(<vscale x 16 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnmsac.nxv16i16.i16.i64(<vscale x 16 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vnmsac_vx_i16m4(vint16m4_t acc, int16_t op1, vint16m4_t op2, size_t vl) {
@ -222,7 +222,7 @@ vint16m4_t test_vnmsac_vx_i16m4(vint16m4_t acc, int16_t op1, vint16m4_t op2, siz
// CHECK-RV64-LABEL: @test_vnmsac_vv_i16m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vnmsac.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vnmsac.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vnmsac_vv_i16m8(vint16m8_t acc, vint16m8_t op1, vint16m8_t op2, size_t vl) {
@ -231,7 +231,7 @@ vint16m8_t test_vnmsac_vv_i16m8(vint16m8_t acc, vint16m8_t op1, vint16m8_t op2,
// CHECK-RV64-LABEL: @test_vnmsac_vx_i16m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vnmsac.nxv32i16.i16.i64(<vscale x 32 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vnmsac.nxv32i16.i16.i64(<vscale x 32 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vnmsac_vx_i16m8(vint16m8_t acc, int16_t op1, vint16m8_t op2, size_t vl) {
@ -240,7 +240,7 @@ vint16m8_t test_vnmsac_vx_i16m8(vint16m8_t acc, int16_t op1, vint16m8_t op2, siz
// CHECK-RV64-LABEL: @test_vnmsac_vv_i32mf2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnmsac.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnmsac.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vnmsac_vv_i32mf2(vint32mf2_t acc, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
@ -249,7 +249,7 @@ vint32mf2_t test_vnmsac_vv_i32mf2(vint32mf2_t acc, vint32mf2_t op1, vint32mf2_t
// CHECK-RV64-LABEL: @test_vnmsac_vx_i32mf2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnmsac.nxv1i32.i32.i64(<vscale x 1 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnmsac.nxv1i32.i32.i64(<vscale x 1 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vnmsac_vx_i32mf2(vint32mf2_t acc, int32_t op1, vint32mf2_t op2, size_t vl) {
@ -258,7 +258,7 @@ vint32mf2_t test_vnmsac_vx_i32mf2(vint32mf2_t acc, int32_t op1, vint32mf2_t op2,
// CHECK-RV64-LABEL: @test_vnmsac_vv_i32m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnmsac.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnmsac.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vnmsac_vv_i32m1(vint32m1_t acc, vint32m1_t op1, vint32m1_t op2, size_t vl) {
@ -267,7 +267,7 @@ vint32m1_t test_vnmsac_vv_i32m1(vint32m1_t acc, vint32m1_t op1, vint32m1_t op2,
// CHECK-RV64-LABEL: @test_vnmsac_vx_i32m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnmsac.nxv2i32.i32.i64(<vscale x 2 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnmsac.nxv2i32.i32.i64(<vscale x 2 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vnmsac_vx_i32m1(vint32m1_t acc, int32_t op1, vint32m1_t op2, size_t vl) {
@ -276,7 +276,7 @@ vint32m1_t test_vnmsac_vx_i32m1(vint32m1_t acc, int32_t op1, vint32m1_t op2, siz
// CHECK-RV64-LABEL: @test_vnmsac_vv_i32m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnmsac.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnmsac.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vnmsac_vv_i32m2(vint32m2_t acc, vint32m2_t op1, vint32m2_t op2, size_t vl) {
@ -285,7 +285,7 @@ vint32m2_t test_vnmsac_vv_i32m2(vint32m2_t acc, vint32m2_t op1, vint32m2_t op2,
// CHECK-RV64-LABEL: @test_vnmsac_vx_i32m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnmsac.nxv4i32.i32.i64(<vscale x 4 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnmsac.nxv4i32.i32.i64(<vscale x 4 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vnmsac_vx_i32m2(vint32m2_t acc, int32_t op1, vint32m2_t op2, size_t vl) {
@ -294,7 +294,7 @@ vint32m2_t test_vnmsac_vx_i32m2(vint32m2_t acc, int32_t op1, vint32m2_t op2, siz
// CHECK-RV64-LABEL: @test_vnmsac_vv_i32m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnmsac.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnmsac.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vnmsac_vv_i32m4(vint32m4_t acc, vint32m4_t op1, vint32m4_t op2, size_t vl) {
@ -303,7 +303,7 @@ vint32m4_t test_vnmsac_vv_i32m4(vint32m4_t acc, vint32m4_t op1, vint32m4_t op2,
// CHECK-RV64-LABEL: @test_vnmsac_vx_i32m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnmsac.nxv8i32.i32.i64(<vscale x 8 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnmsac.nxv8i32.i32.i64(<vscale x 8 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vnmsac_vx_i32m4(vint32m4_t acc, int32_t op1, vint32m4_t op2, size_t vl) {
@ -312,7 +312,7 @@ vint32m4_t test_vnmsac_vx_i32m4(vint32m4_t acc, int32_t op1, vint32m4_t op2, siz
// CHECK-RV64-LABEL: @test_vnmsac_vv_i32m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vnmsac.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vnmsac.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vnmsac_vv_i32m8(vint32m8_t acc, vint32m8_t op1, vint32m8_t op2, size_t vl) {
@ -321,7 +321,7 @@ vint32m8_t test_vnmsac_vv_i32m8(vint32m8_t acc, vint32m8_t op1, vint32m8_t op2,
// CHECK-RV64-LABEL: @test_vnmsac_vx_i32m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vnmsac.nxv16i32.i32.i64(<vscale x 16 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vnmsac.nxv16i32.i32.i64(<vscale x 16 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vnmsac_vx_i32m8(vint32m8_t acc, int32_t op1, vint32m8_t op2, size_t vl) {
@ -330,7 +330,7 @@ vint32m8_t test_vnmsac_vx_i32m8(vint32m8_t acc, int32_t op1, vint32m8_t op2, siz
// CHECK-RV64-LABEL: @test_vnmsac_vv_i64m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vnmsac.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vnmsac.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vnmsac_vv_i64m1(vint64m1_t acc, vint64m1_t op1, vint64m1_t op2, size_t vl) {
@ -339,7 +339,7 @@ vint64m1_t test_vnmsac_vv_i64m1(vint64m1_t acc, vint64m1_t op1, vint64m1_t op2,
// CHECK-RV64-LABEL: @test_vnmsac_vx_i64m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vnmsac.nxv1i64.i64.i64(<vscale x 1 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vnmsac.nxv1i64.i64.i64(<vscale x 1 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vnmsac_vx_i64m1(vint64m1_t acc, int64_t op1, vint64m1_t op2, size_t vl) {
@ -348,7 +348,7 @@ vint64m1_t test_vnmsac_vx_i64m1(vint64m1_t acc, int64_t op1, vint64m1_t op2, siz
// CHECK-RV64-LABEL: @test_vnmsac_vv_i64m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vnmsac.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vnmsac.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vnmsac_vv_i64m2(vint64m2_t acc, vint64m2_t op1, vint64m2_t op2, size_t vl) {
@ -357,7 +357,7 @@ vint64m2_t test_vnmsac_vv_i64m2(vint64m2_t acc, vint64m2_t op1, vint64m2_t op2,
// CHECK-RV64-LABEL: @test_vnmsac_vx_i64m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vnmsac.nxv2i64.i64.i64(<vscale x 2 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vnmsac.nxv2i64.i64.i64(<vscale x 2 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vnmsac_vx_i64m2(vint64m2_t acc, int64_t op1, vint64m2_t op2, size_t vl) {
@ -366,7 +366,7 @@ vint64m2_t test_vnmsac_vx_i64m2(vint64m2_t acc, int64_t op1, vint64m2_t op2, siz
// CHECK-RV64-LABEL: @test_vnmsac_vv_i64m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vnmsac.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vnmsac.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vnmsac_vv_i64m4(vint64m4_t acc, vint64m4_t op1, vint64m4_t op2, size_t vl) {
@ -375,7 +375,7 @@ vint64m4_t test_vnmsac_vv_i64m4(vint64m4_t acc, vint64m4_t op1, vint64m4_t op2,
// CHECK-RV64-LABEL: @test_vnmsac_vx_i64m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vnmsac.nxv4i64.i64.i64(<vscale x 4 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vnmsac.nxv4i64.i64.i64(<vscale x 4 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vnmsac_vx_i64m4(vint64m4_t acc, int64_t op1, vint64m4_t op2, size_t vl) {
@ -384,7 +384,7 @@ vint64m4_t test_vnmsac_vx_i64m4(vint64m4_t acc, int64_t op1, vint64m4_t op2, siz
// CHECK-RV64-LABEL: @test_vnmsac_vv_i64m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vnmsac.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vnmsac.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vnmsac_vv_i64m8(vint64m8_t acc, vint64m8_t op1, vint64m8_t op2, size_t vl) {
@ -393,7 +393,7 @@ vint64m8_t test_vnmsac_vv_i64m8(vint64m8_t acc, vint64m8_t op1, vint64m8_t op2,
// CHECK-RV64-LABEL: @test_vnmsac_vx_i64m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vnmsac.nxv8i64.i64.i64(<vscale x 8 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vnmsac.nxv8i64.i64.i64(<vscale x 8 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vnmsac_vx_i64m8(vint64m8_t acc, int64_t op1, vint64m8_t op2, size_t vl) {
@ -402,7 +402,7 @@ vint64m8_t test_vnmsac_vx_i64m8(vint64m8_t acc, int64_t op1, vint64m8_t op2, siz
// CHECK-RV64-LABEL: @test_vnmsac_vv_u8mf8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnmsac.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnmsac.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vnmsac_vv_u8mf8(vuint8mf8_t acc, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
@ -411,7 +411,7 @@ vuint8mf8_t test_vnmsac_vv_u8mf8(vuint8mf8_t acc, vuint8mf8_t op1, vuint8mf8_t o
// CHECK-RV64-LABEL: @test_vnmsac_vx_u8mf8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnmsac.nxv1i8.i8.i64(<vscale x 1 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnmsac.nxv1i8.i8.i64(<vscale x 1 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vnmsac_vx_u8mf8(vuint8mf8_t acc, uint8_t op1, vuint8mf8_t op2, size_t vl) {
@ -420,7 +420,7 @@ vuint8mf8_t test_vnmsac_vx_u8mf8(vuint8mf8_t acc, uint8_t op1, vuint8mf8_t op2,
// CHECK-RV64-LABEL: @test_vnmsac_vv_u8mf4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnmsac.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnmsac.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vnmsac_vv_u8mf4(vuint8mf4_t acc, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
@ -429,7 +429,7 @@ vuint8mf4_t test_vnmsac_vv_u8mf4(vuint8mf4_t acc, vuint8mf4_t op1, vuint8mf4_t o
// CHECK-RV64-LABEL: @test_vnmsac_vx_u8mf4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnmsac.nxv2i8.i8.i64(<vscale x 2 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnmsac.nxv2i8.i8.i64(<vscale x 2 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vnmsac_vx_u8mf4(vuint8mf4_t acc, uint8_t op1, vuint8mf4_t op2, size_t vl) {
@ -438,7 +438,7 @@ vuint8mf4_t test_vnmsac_vx_u8mf4(vuint8mf4_t acc, uint8_t op1, vuint8mf4_t op2,
// CHECK-RV64-LABEL: @test_vnmsac_vv_u8mf2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnmsac.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnmsac.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vnmsac_vv_u8mf2(vuint8mf2_t acc, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
@ -447,7 +447,7 @@ vuint8mf2_t test_vnmsac_vv_u8mf2(vuint8mf2_t acc, vuint8mf2_t op1, vuint8mf2_t o
// CHECK-RV64-LABEL: @test_vnmsac_vx_u8mf2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnmsac.nxv4i8.i8.i64(<vscale x 4 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnmsac.nxv4i8.i8.i64(<vscale x 4 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vnmsac_vx_u8mf2(vuint8mf2_t acc, uint8_t op1, vuint8mf2_t op2, size_t vl) {
@ -456,7 +456,7 @@ vuint8mf2_t test_vnmsac_vx_u8mf2(vuint8mf2_t acc, uint8_t op1, vuint8mf2_t op2,
// CHECK-RV64-LABEL: @test_vnmsac_vv_u8m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnmsac.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnmsac.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vnmsac_vv_u8m1(vuint8m1_t acc, vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
@ -465,7 +465,7 @@ vuint8m1_t test_vnmsac_vv_u8m1(vuint8m1_t acc, vuint8m1_t op1, vuint8m1_t op2, s
// CHECK-RV64-LABEL: @test_vnmsac_vx_u8m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnmsac.nxv8i8.i8.i64(<vscale x 8 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnmsac.nxv8i8.i8.i64(<vscale x 8 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vnmsac_vx_u8m1(vuint8m1_t acc, uint8_t op1, vuint8m1_t op2, size_t vl) {
@ -474,7 +474,7 @@ vuint8m1_t test_vnmsac_vx_u8m1(vuint8m1_t acc, uint8_t op1, vuint8m1_t op2, size
// CHECK-RV64-LABEL: @test_vnmsac_vv_u8m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnmsac.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnmsac.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vnmsac_vv_u8m2(vuint8m2_t acc, vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
@ -483,7 +483,7 @@ vuint8m2_t test_vnmsac_vv_u8m2(vuint8m2_t acc, vuint8m2_t op1, vuint8m2_t op2, s
// CHECK-RV64-LABEL: @test_vnmsac_vx_u8m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnmsac.nxv16i8.i8.i64(<vscale x 16 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnmsac.nxv16i8.i8.i64(<vscale x 16 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vnmsac_vx_u8m2(vuint8m2_t acc, uint8_t op1, vuint8m2_t op2, size_t vl) {
@ -492,7 +492,7 @@ vuint8m2_t test_vnmsac_vx_u8m2(vuint8m2_t acc, uint8_t op1, vuint8m2_t op2, size
// CHECK-RV64-LABEL: @test_vnmsac_vv_u8m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnmsac.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnmsac.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vnmsac_vv_u8m4(vuint8m4_t acc, vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
@ -501,7 +501,7 @@ vuint8m4_t test_vnmsac_vv_u8m4(vuint8m4_t acc, vuint8m4_t op1, vuint8m4_t op2, s
// CHECK-RV64-LABEL: @test_vnmsac_vx_u8m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnmsac.nxv32i8.i8.i64(<vscale x 32 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnmsac.nxv32i8.i8.i64(<vscale x 32 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vnmsac_vx_u8m4(vuint8m4_t acc, uint8_t op1, vuint8m4_t op2, size_t vl) {
@ -510,7 +510,7 @@ vuint8m4_t test_vnmsac_vx_u8m4(vuint8m4_t acc, uint8_t op1, vuint8m4_t op2, size
// CHECK-RV64-LABEL: @test_vnmsac_vv_u8m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vnmsac.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[ACC:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vnmsac.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[ACC:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vnmsac_vv_u8m8(vuint8m8_t acc, vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
@ -519,7 +519,7 @@ vuint8m8_t test_vnmsac_vv_u8m8(vuint8m8_t acc, vuint8m8_t op1, vuint8m8_t op2, s
// CHECK-RV64-LABEL: @test_vnmsac_vx_u8m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vnmsac.nxv64i8.i8.i64(<vscale x 64 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vnmsac.nxv64i8.i8.i64(<vscale x 64 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vnmsac_vx_u8m8(vuint8m8_t acc, uint8_t op1, vuint8m8_t op2, size_t vl) {
@ -528,7 +528,7 @@ vuint8m8_t test_vnmsac_vx_u8m8(vuint8m8_t acc, uint8_t op1, vuint8m8_t op2, size
// CHECK-RV64-LABEL: @test_vnmsac_vv_u16mf4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnmsac.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnmsac.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vnmsac_vv_u16mf4(vuint16mf4_t acc, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
@ -537,7 +537,7 @@ vuint16mf4_t test_vnmsac_vv_u16mf4(vuint16mf4_t acc, vuint16mf4_t op1, vuint16mf
// CHECK-RV64-LABEL: @test_vnmsac_vx_u16mf4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnmsac.nxv1i16.i16.i64(<vscale x 1 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnmsac.nxv1i16.i16.i64(<vscale x 1 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vnmsac_vx_u16mf4(vuint16mf4_t acc, uint16_t op1, vuint16mf4_t op2, size_t vl) {
@ -546,7 +546,7 @@ vuint16mf4_t test_vnmsac_vx_u16mf4(vuint16mf4_t acc, uint16_t op1, vuint16mf4_t
// CHECK-RV64-LABEL: @test_vnmsac_vv_u16mf2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnmsac.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnmsac.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vnmsac_vv_u16mf2(vuint16mf2_t acc, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
@ -555,7 +555,7 @@ vuint16mf2_t test_vnmsac_vv_u16mf2(vuint16mf2_t acc, vuint16mf2_t op1, vuint16mf
// CHECK-RV64-LABEL: @test_vnmsac_vx_u16mf2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnmsac.nxv2i16.i16.i64(<vscale x 2 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnmsac.nxv2i16.i16.i64(<vscale x 2 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vnmsac_vx_u16mf2(vuint16mf2_t acc, uint16_t op1, vuint16mf2_t op2, size_t vl) {
@ -564,7 +564,7 @@ vuint16mf2_t test_vnmsac_vx_u16mf2(vuint16mf2_t acc, uint16_t op1, vuint16mf2_t
// CHECK-RV64-LABEL: @test_vnmsac_vv_u16m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnmsac.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnmsac.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vnmsac_vv_u16m1(vuint16m1_t acc, vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
@ -573,7 +573,7 @@ vuint16m1_t test_vnmsac_vv_u16m1(vuint16m1_t acc, vuint16m1_t op1, vuint16m1_t o
// CHECK-RV64-LABEL: @test_vnmsac_vx_u16m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnmsac.nxv4i16.i16.i64(<vscale x 4 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnmsac.nxv4i16.i16.i64(<vscale x 4 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vnmsac_vx_u16m1(vuint16m1_t acc, uint16_t op1, vuint16m1_t op2, size_t vl) {
@ -582,7 +582,7 @@ vuint16m1_t test_vnmsac_vx_u16m1(vuint16m1_t acc, uint16_t op1, vuint16m1_t op2,
// CHECK-RV64-LABEL: @test_vnmsac_vv_u16m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnmsac.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnmsac.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vnmsac_vv_u16m2(vuint16m2_t acc, vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
@ -591,7 +591,7 @@ vuint16m2_t test_vnmsac_vv_u16m2(vuint16m2_t acc, vuint16m2_t op1, vuint16m2_t o
// CHECK-RV64-LABEL: @test_vnmsac_vx_u16m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnmsac.nxv8i16.i16.i64(<vscale x 8 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnmsac.nxv8i16.i16.i64(<vscale x 8 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vnmsac_vx_u16m2(vuint16m2_t acc, uint16_t op1, vuint16m2_t op2, size_t vl) {
@ -600,7 +600,7 @@ vuint16m2_t test_vnmsac_vx_u16m2(vuint16m2_t acc, uint16_t op1, vuint16m2_t op2,
// CHECK-RV64-LABEL: @test_vnmsac_vv_u16m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnmsac.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnmsac.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vnmsac_vv_u16m4(vuint16m4_t acc, vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
@ -609,7 +609,7 @@ vuint16m4_t test_vnmsac_vv_u16m4(vuint16m4_t acc, vuint16m4_t op1, vuint16m4_t o
// CHECK-RV64-LABEL: @test_vnmsac_vx_u16m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnmsac.nxv16i16.i16.i64(<vscale x 16 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnmsac.nxv16i16.i16.i64(<vscale x 16 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vnmsac_vx_u16m4(vuint16m4_t acc, uint16_t op1, vuint16m4_t op2, size_t vl) {
@ -618,7 +618,7 @@ vuint16m4_t test_vnmsac_vx_u16m4(vuint16m4_t acc, uint16_t op1, vuint16m4_t op2,
// CHECK-RV64-LABEL: @test_vnmsac_vv_u16m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vnmsac.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vnmsac.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vnmsac_vv_u16m8(vuint16m8_t acc, vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
@ -627,7 +627,7 @@ vuint16m8_t test_vnmsac_vv_u16m8(vuint16m8_t acc, vuint16m8_t op1, vuint16m8_t o
// CHECK-RV64-LABEL: @test_vnmsac_vx_u16m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vnmsac.nxv32i16.i16.i64(<vscale x 32 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vnmsac.nxv32i16.i16.i64(<vscale x 32 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vnmsac_vx_u16m8(vuint16m8_t acc, uint16_t op1, vuint16m8_t op2, size_t vl) {
@ -636,7 +636,7 @@ vuint16m8_t test_vnmsac_vx_u16m8(vuint16m8_t acc, uint16_t op1, vuint16m8_t op2,
// CHECK-RV64-LABEL: @test_vnmsac_vv_u32mf2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnmsac.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnmsac.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vnmsac_vv_u32mf2(vuint32mf2_t acc, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
@ -645,7 +645,7 @@ vuint32mf2_t test_vnmsac_vv_u32mf2(vuint32mf2_t acc, vuint32mf2_t op1, vuint32mf
// CHECK-RV64-LABEL: @test_vnmsac_vx_u32mf2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnmsac.nxv1i32.i32.i64(<vscale x 1 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnmsac.nxv1i32.i32.i64(<vscale x 1 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vnmsac_vx_u32mf2(vuint32mf2_t acc, uint32_t op1, vuint32mf2_t op2, size_t vl) {
@ -654,7 +654,7 @@ vuint32mf2_t test_vnmsac_vx_u32mf2(vuint32mf2_t acc, uint32_t op1, vuint32mf2_t
// CHECK-RV64-LABEL: @test_vnmsac_vv_u32m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnmsac.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnmsac.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vnmsac_vv_u32m1(vuint32m1_t acc, vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
@ -663,7 +663,7 @@ vuint32m1_t test_vnmsac_vv_u32m1(vuint32m1_t acc, vuint32m1_t op1, vuint32m1_t o
// CHECK-RV64-LABEL: @test_vnmsac_vx_u32m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnmsac.nxv2i32.i32.i64(<vscale x 2 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnmsac.nxv2i32.i32.i64(<vscale x 2 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vnmsac_vx_u32m1(vuint32m1_t acc, uint32_t op1, vuint32m1_t op2, size_t vl) {
@ -672,7 +672,7 @@ vuint32m1_t test_vnmsac_vx_u32m1(vuint32m1_t acc, uint32_t op1, vuint32m1_t op2,
// CHECK-RV64-LABEL: @test_vnmsac_vv_u32m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnmsac.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnmsac.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vnmsac_vv_u32m2(vuint32m2_t acc, vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
@ -681,7 +681,7 @@ vuint32m2_t test_vnmsac_vv_u32m2(vuint32m2_t acc, vuint32m2_t op1, vuint32m2_t o
// CHECK-RV64-LABEL: @test_vnmsac_vx_u32m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnmsac.nxv4i32.i32.i64(<vscale x 4 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnmsac.nxv4i32.i32.i64(<vscale x 4 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vnmsac_vx_u32m2(vuint32m2_t acc, uint32_t op1, vuint32m2_t op2, size_t vl) {
@ -690,7 +690,7 @@ vuint32m2_t test_vnmsac_vx_u32m2(vuint32m2_t acc, uint32_t op1, vuint32m2_t op2,
// CHECK-RV64-LABEL: @test_vnmsac_vv_u32m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnmsac.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnmsac.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vnmsac_vv_u32m4(vuint32m4_t acc, vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
@ -699,7 +699,7 @@ vuint32m4_t test_vnmsac_vv_u32m4(vuint32m4_t acc, vuint32m4_t op1, vuint32m4_t o
// CHECK-RV64-LABEL: @test_vnmsac_vx_u32m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnmsac.nxv8i32.i32.i64(<vscale x 8 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnmsac.nxv8i32.i32.i64(<vscale x 8 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vnmsac_vx_u32m4(vuint32m4_t acc, uint32_t op1, vuint32m4_t op2, size_t vl) {
@ -708,7 +708,7 @@ vuint32m4_t test_vnmsac_vx_u32m4(vuint32m4_t acc, uint32_t op1, vuint32m4_t op2,
// CHECK-RV64-LABEL: @test_vnmsac_vv_u32m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vnmsac.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vnmsac.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vnmsac_vv_u32m8(vuint32m8_t acc, vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
@ -717,7 +717,7 @@ vuint32m8_t test_vnmsac_vv_u32m8(vuint32m8_t acc, vuint32m8_t op1, vuint32m8_t o
// CHECK-RV64-LABEL: @test_vnmsac_vx_u32m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vnmsac.nxv16i32.i32.i64(<vscale x 16 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vnmsac.nxv16i32.i32.i64(<vscale x 16 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vnmsac_vx_u32m8(vuint32m8_t acc, uint32_t op1, vuint32m8_t op2, size_t vl) {
@ -726,7 +726,7 @@ vuint32m8_t test_vnmsac_vx_u32m8(vuint32m8_t acc, uint32_t op1, vuint32m8_t op2,
// CHECK-RV64-LABEL: @test_vnmsac_vv_u64m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vnmsac.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vnmsac.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vnmsac_vv_u64m1(vuint64m1_t acc, vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
@ -735,7 +735,7 @@ vuint64m1_t test_vnmsac_vv_u64m1(vuint64m1_t acc, vuint64m1_t op1, vuint64m1_t o
// CHECK-RV64-LABEL: @test_vnmsac_vx_u64m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vnmsac.nxv1i64.i64.i64(<vscale x 1 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vnmsac.nxv1i64.i64.i64(<vscale x 1 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vnmsac_vx_u64m1(vuint64m1_t acc, uint64_t op1, vuint64m1_t op2, size_t vl) {
@ -744,7 +744,7 @@ vuint64m1_t test_vnmsac_vx_u64m1(vuint64m1_t acc, uint64_t op1, vuint64m1_t op2,
// CHECK-RV64-LABEL: @test_vnmsac_vv_u64m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vnmsac.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vnmsac.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vnmsac_vv_u64m2(vuint64m2_t acc, vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
@ -753,7 +753,7 @@ vuint64m2_t test_vnmsac_vv_u64m2(vuint64m2_t acc, vuint64m2_t op1, vuint64m2_t o
// CHECK-RV64-LABEL: @test_vnmsac_vx_u64m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vnmsac.nxv2i64.i64.i64(<vscale x 2 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vnmsac.nxv2i64.i64.i64(<vscale x 2 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vnmsac_vx_u64m2(vuint64m2_t acc, uint64_t op1, vuint64m2_t op2, size_t vl) {
@ -762,7 +762,7 @@ vuint64m2_t test_vnmsac_vx_u64m2(vuint64m2_t acc, uint64_t op1, vuint64m2_t op2,
// CHECK-RV64-LABEL: @test_vnmsac_vv_u64m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vnmsac.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vnmsac.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vnmsac_vv_u64m4(vuint64m4_t acc, vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
@ -771,7 +771,7 @@ vuint64m4_t test_vnmsac_vv_u64m4(vuint64m4_t acc, vuint64m4_t op1, vuint64m4_t o
// CHECK-RV64-LABEL: @test_vnmsac_vx_u64m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vnmsac.nxv4i64.i64.i64(<vscale x 4 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vnmsac.nxv4i64.i64.i64(<vscale x 4 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vnmsac_vx_u64m4(vuint64m4_t acc, uint64_t op1, vuint64m4_t op2, size_t vl) {
@ -780,7 +780,7 @@ vuint64m4_t test_vnmsac_vx_u64m4(vuint64m4_t acc, uint64_t op1, vuint64m4_t op2,
// CHECK-RV64-LABEL: @test_vnmsac_vv_u64m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vnmsac.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vnmsac.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vnmsac_vv_u64m8(vuint64m8_t acc, vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
@ -789,7 +789,7 @@ vuint64m8_t test_vnmsac_vv_u64m8(vuint64m8_t acc, vuint64m8_t op1, vuint64m8_t o
// CHECK-RV64-LABEL: @test_vnmsac_vx_u64m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vnmsac.nxv8i64.i64.i64(<vscale x 8 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vnmsac.nxv8i64.i64.i64(<vscale x 8 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vnmsac_vx_u64m8(vuint64m8_t acc, uint64_t op1, vuint64m8_t op2, size_t vl) {

View File

@ -6,7 +6,7 @@
// CHECK-RV64-LABEL: @test_vnmsub_vv_i8mf8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnmsub.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnmsub.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vnmsub_vv_i8mf8(vint8mf8_t acc, vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
@ -15,7 +15,7 @@ vint8mf8_t test_vnmsub_vv_i8mf8(vint8mf8_t acc, vint8mf8_t op1, vint8mf8_t op2,
// CHECK-RV64-LABEL: @test_vnmsub_vx_i8mf8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnmsub.nxv1i8.i8.i64(<vscale x 1 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnmsub.nxv1i8.i8.i64(<vscale x 1 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vint8mf8_t test_vnmsub_vx_i8mf8(vint8mf8_t acc, int8_t op1, vint8mf8_t op2, size_t vl) {
@ -24,7 +24,7 @@ vint8mf8_t test_vnmsub_vx_i8mf8(vint8mf8_t acc, int8_t op1, vint8mf8_t op2, size
// CHECK-RV64-LABEL: @test_vnmsub_vv_i8mf4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnmsub.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnmsub.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vnmsub_vv_i8mf4(vint8mf4_t acc, vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
@ -33,7 +33,7 @@ vint8mf4_t test_vnmsub_vv_i8mf4(vint8mf4_t acc, vint8mf4_t op1, vint8mf4_t op2,
// CHECK-RV64-LABEL: @test_vnmsub_vx_i8mf4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnmsub.nxv2i8.i8.i64(<vscale x 2 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnmsub.nxv2i8.i8.i64(<vscale x 2 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vint8mf4_t test_vnmsub_vx_i8mf4(vint8mf4_t acc, int8_t op1, vint8mf4_t op2, size_t vl) {
@ -42,7 +42,7 @@ vint8mf4_t test_vnmsub_vx_i8mf4(vint8mf4_t acc, int8_t op1, vint8mf4_t op2, size
// CHECK-RV64-LABEL: @test_vnmsub_vv_i8mf2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnmsub.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnmsub.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vnmsub_vv_i8mf2(vint8mf2_t acc, vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
@ -51,7 +51,7 @@ vint8mf2_t test_vnmsub_vv_i8mf2(vint8mf2_t acc, vint8mf2_t op1, vint8mf2_t op2,
// CHECK-RV64-LABEL: @test_vnmsub_vx_i8mf2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnmsub.nxv4i8.i8.i64(<vscale x 4 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnmsub.nxv4i8.i8.i64(<vscale x 4 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vint8mf2_t test_vnmsub_vx_i8mf2(vint8mf2_t acc, int8_t op1, vint8mf2_t op2, size_t vl) {
@ -60,7 +60,7 @@ vint8mf2_t test_vnmsub_vx_i8mf2(vint8mf2_t acc, int8_t op1, vint8mf2_t op2, size
// CHECK-RV64-LABEL: @test_vnmsub_vv_i8m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnmsub.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnmsub.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vnmsub_vv_i8m1(vint8m1_t acc, vint8m1_t op1, vint8m1_t op2, size_t vl) {
@ -69,7 +69,7 @@ vint8m1_t test_vnmsub_vv_i8m1(vint8m1_t acc, vint8m1_t op1, vint8m1_t op2, size_
// CHECK-RV64-LABEL: @test_vnmsub_vx_i8m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnmsub.nxv8i8.i8.i64(<vscale x 8 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnmsub.nxv8i8.i8.i64(<vscale x 8 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vint8m1_t test_vnmsub_vx_i8m1(vint8m1_t acc, int8_t op1, vint8m1_t op2, size_t vl) {
@ -78,7 +78,7 @@ vint8m1_t test_vnmsub_vx_i8m1(vint8m1_t acc, int8_t op1, vint8m1_t op2, size_t v
// CHECK-RV64-LABEL: @test_vnmsub_vv_i8m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnmsub.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnmsub.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vnmsub_vv_i8m2(vint8m2_t acc, vint8m2_t op1, vint8m2_t op2, size_t vl) {
@ -87,7 +87,7 @@ vint8m2_t test_vnmsub_vv_i8m2(vint8m2_t acc, vint8m2_t op1, vint8m2_t op2, size_
// CHECK-RV64-LABEL: @test_vnmsub_vx_i8m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnmsub.nxv16i8.i8.i64(<vscale x 16 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnmsub.nxv16i8.i8.i64(<vscale x 16 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vint8m2_t test_vnmsub_vx_i8m2(vint8m2_t acc, int8_t op1, vint8m2_t op2, size_t vl) {
@ -96,7 +96,7 @@ vint8m2_t test_vnmsub_vx_i8m2(vint8m2_t acc, int8_t op1, vint8m2_t op2, size_t v
// CHECK-RV64-LABEL: @test_vnmsub_vv_i8m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnmsub.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnmsub.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vnmsub_vv_i8m4(vint8m4_t acc, vint8m4_t op1, vint8m4_t op2, size_t vl) {
@ -105,7 +105,7 @@ vint8m4_t test_vnmsub_vv_i8m4(vint8m4_t acc, vint8m4_t op1, vint8m4_t op2, size_
// CHECK-RV64-LABEL: @test_vnmsub_vx_i8m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnmsub.nxv32i8.i8.i64(<vscale x 32 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnmsub.nxv32i8.i8.i64(<vscale x 32 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vint8m4_t test_vnmsub_vx_i8m4(vint8m4_t acc, int8_t op1, vint8m4_t op2, size_t vl) {
@ -114,7 +114,7 @@ vint8m4_t test_vnmsub_vx_i8m4(vint8m4_t acc, int8_t op1, vint8m4_t op2, size_t v
// CHECK-RV64-LABEL: @test_vnmsub_vv_i8m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vnmsub.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[ACC:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vnmsub.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[ACC:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vnmsub_vv_i8m8(vint8m8_t acc, vint8m8_t op1, vint8m8_t op2, size_t vl) {
@ -123,7 +123,7 @@ vint8m8_t test_vnmsub_vv_i8m8(vint8m8_t acc, vint8m8_t op1, vint8m8_t op2, size_
// CHECK-RV64-LABEL: @test_vnmsub_vx_i8m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vnmsub.nxv64i8.i8.i64(<vscale x 64 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vnmsub.nxv64i8.i8.i64(<vscale x 64 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vint8m8_t test_vnmsub_vx_i8m8(vint8m8_t acc, int8_t op1, vint8m8_t op2, size_t vl) {
@ -132,7 +132,7 @@ vint8m8_t test_vnmsub_vx_i8m8(vint8m8_t acc, int8_t op1, vint8m8_t op2, size_t v
// CHECK-RV64-LABEL: @test_vnmsub_vv_i16mf4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnmsub.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnmsub.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vnmsub_vv_i16mf4(vint16mf4_t acc, vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
@ -141,7 +141,7 @@ vint16mf4_t test_vnmsub_vv_i16mf4(vint16mf4_t acc, vint16mf4_t op1, vint16mf4_t
// CHECK-RV64-LABEL: @test_vnmsub_vx_i16mf4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnmsub.nxv1i16.i16.i64(<vscale x 1 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnmsub.nxv1i16.i16.i64(<vscale x 1 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vnmsub_vx_i16mf4(vint16mf4_t acc, int16_t op1, vint16mf4_t op2, size_t vl) {
@ -150,7 +150,7 @@ vint16mf4_t test_vnmsub_vx_i16mf4(vint16mf4_t acc, int16_t op1, vint16mf4_t op2,
// CHECK-RV64-LABEL: @test_vnmsub_vv_i16mf2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnmsub.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnmsub.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vnmsub_vv_i16mf2(vint16mf2_t acc, vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
@ -159,7 +159,7 @@ vint16mf2_t test_vnmsub_vv_i16mf2(vint16mf2_t acc, vint16mf2_t op1, vint16mf2_t
// CHECK-RV64-LABEL: @test_vnmsub_vx_i16mf2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnmsub.nxv2i16.i16.i64(<vscale x 2 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnmsub.nxv2i16.i16.i64(<vscale x 2 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vnmsub_vx_i16mf2(vint16mf2_t acc, int16_t op1, vint16mf2_t op2, size_t vl) {
@ -168,7 +168,7 @@ vint16mf2_t test_vnmsub_vx_i16mf2(vint16mf2_t acc, int16_t op1, vint16mf2_t op2,
// CHECK-RV64-LABEL: @test_vnmsub_vv_i16m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnmsub.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnmsub.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vnmsub_vv_i16m1(vint16m1_t acc, vint16m1_t op1, vint16m1_t op2, size_t vl) {
@ -177,7 +177,7 @@ vint16m1_t test_vnmsub_vv_i16m1(vint16m1_t acc, vint16m1_t op1, vint16m1_t op2,
// CHECK-RV64-LABEL: @test_vnmsub_vx_i16m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnmsub.nxv4i16.i16.i64(<vscale x 4 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnmsub.nxv4i16.i16.i64(<vscale x 4 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vnmsub_vx_i16m1(vint16m1_t acc, int16_t op1, vint16m1_t op2, size_t vl) {
@ -186,7 +186,7 @@ vint16m1_t test_vnmsub_vx_i16m1(vint16m1_t acc, int16_t op1, vint16m1_t op2, siz
// CHECK-RV64-LABEL: @test_vnmsub_vv_i16m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnmsub.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnmsub.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vnmsub_vv_i16m2(vint16m2_t acc, vint16m2_t op1, vint16m2_t op2, size_t vl) {
@ -195,7 +195,7 @@ vint16m2_t test_vnmsub_vv_i16m2(vint16m2_t acc, vint16m2_t op1, vint16m2_t op2,
// CHECK-RV64-LABEL: @test_vnmsub_vx_i16m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnmsub.nxv8i16.i16.i64(<vscale x 8 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnmsub.nxv8i16.i16.i64(<vscale x 8 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vnmsub_vx_i16m2(vint16m2_t acc, int16_t op1, vint16m2_t op2, size_t vl) {
@ -204,7 +204,7 @@ vint16m2_t test_vnmsub_vx_i16m2(vint16m2_t acc, int16_t op1, vint16m2_t op2, siz
// CHECK-RV64-LABEL: @test_vnmsub_vv_i16m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnmsub.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnmsub.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vnmsub_vv_i16m4(vint16m4_t acc, vint16m4_t op1, vint16m4_t op2, size_t vl) {
@ -213,7 +213,7 @@ vint16m4_t test_vnmsub_vv_i16m4(vint16m4_t acc, vint16m4_t op1, vint16m4_t op2,
// CHECK-RV64-LABEL: @test_vnmsub_vx_i16m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnmsub.nxv16i16.i16.i64(<vscale x 16 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnmsub.nxv16i16.i16.i64(<vscale x 16 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vnmsub_vx_i16m4(vint16m4_t acc, int16_t op1, vint16m4_t op2, size_t vl) {
@ -222,7 +222,7 @@ vint16m4_t test_vnmsub_vx_i16m4(vint16m4_t acc, int16_t op1, vint16m4_t op2, siz
// CHECK-RV64-LABEL: @test_vnmsub_vv_i16m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vnmsub.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vnmsub.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vnmsub_vv_i16m8(vint16m8_t acc, vint16m8_t op1, vint16m8_t op2, size_t vl) {
@ -231,7 +231,7 @@ vint16m8_t test_vnmsub_vv_i16m8(vint16m8_t acc, vint16m8_t op1, vint16m8_t op2,
// CHECK-RV64-LABEL: @test_vnmsub_vx_i16m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vnmsub.nxv32i16.i16.i64(<vscale x 32 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vnmsub.nxv32i16.i16.i64(<vscale x 32 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vnmsub_vx_i16m8(vint16m8_t acc, int16_t op1, vint16m8_t op2, size_t vl) {
@ -240,7 +240,7 @@ vint16m8_t test_vnmsub_vx_i16m8(vint16m8_t acc, int16_t op1, vint16m8_t op2, siz
// CHECK-RV64-LABEL: @test_vnmsub_vv_i32mf2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnmsub.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnmsub.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vnmsub_vv_i32mf2(vint32mf2_t acc, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
@ -249,7 +249,7 @@ vint32mf2_t test_vnmsub_vv_i32mf2(vint32mf2_t acc, vint32mf2_t op1, vint32mf2_t
// CHECK-RV64-LABEL: @test_vnmsub_vx_i32mf2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnmsub.nxv1i32.i32.i64(<vscale x 1 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnmsub.nxv1i32.i32.i64(<vscale x 1 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vnmsub_vx_i32mf2(vint32mf2_t acc, int32_t op1, vint32mf2_t op2, size_t vl) {
@ -258,7 +258,7 @@ vint32mf2_t test_vnmsub_vx_i32mf2(vint32mf2_t acc, int32_t op1, vint32mf2_t op2,
// CHECK-RV64-LABEL: @test_vnmsub_vv_i32m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnmsub.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnmsub.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vnmsub_vv_i32m1(vint32m1_t acc, vint32m1_t op1, vint32m1_t op2, size_t vl) {
@ -267,7 +267,7 @@ vint32m1_t test_vnmsub_vv_i32m1(vint32m1_t acc, vint32m1_t op1, vint32m1_t op2,
// CHECK-RV64-LABEL: @test_vnmsub_vx_i32m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnmsub.nxv2i32.i32.i64(<vscale x 2 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnmsub.nxv2i32.i32.i64(<vscale x 2 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vnmsub_vx_i32m1(vint32m1_t acc, int32_t op1, vint32m1_t op2, size_t vl) {
@ -276,7 +276,7 @@ vint32m1_t test_vnmsub_vx_i32m1(vint32m1_t acc, int32_t op1, vint32m1_t op2, siz
// CHECK-RV64-LABEL: @test_vnmsub_vv_i32m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnmsub.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnmsub.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vnmsub_vv_i32m2(vint32m2_t acc, vint32m2_t op1, vint32m2_t op2, size_t vl) {
@ -285,7 +285,7 @@ vint32m2_t test_vnmsub_vv_i32m2(vint32m2_t acc, vint32m2_t op1, vint32m2_t op2,
// CHECK-RV64-LABEL: @test_vnmsub_vx_i32m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnmsub.nxv4i32.i32.i64(<vscale x 4 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnmsub.nxv4i32.i32.i64(<vscale x 4 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vnmsub_vx_i32m2(vint32m2_t acc, int32_t op1, vint32m2_t op2, size_t vl) {
@ -294,7 +294,7 @@ vint32m2_t test_vnmsub_vx_i32m2(vint32m2_t acc, int32_t op1, vint32m2_t op2, siz
// CHECK-RV64-LABEL: @test_vnmsub_vv_i32m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnmsub.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnmsub.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vnmsub_vv_i32m4(vint32m4_t acc, vint32m4_t op1, vint32m4_t op2, size_t vl) {
@ -303,7 +303,7 @@ vint32m4_t test_vnmsub_vv_i32m4(vint32m4_t acc, vint32m4_t op1, vint32m4_t op2,
// CHECK-RV64-LABEL: @test_vnmsub_vx_i32m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnmsub.nxv8i32.i32.i64(<vscale x 8 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnmsub.nxv8i32.i32.i64(<vscale x 8 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vnmsub_vx_i32m4(vint32m4_t acc, int32_t op1, vint32m4_t op2, size_t vl) {
@ -312,7 +312,7 @@ vint32m4_t test_vnmsub_vx_i32m4(vint32m4_t acc, int32_t op1, vint32m4_t op2, siz
// CHECK-RV64-LABEL: @test_vnmsub_vv_i32m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vnmsub.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vnmsub.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vnmsub_vv_i32m8(vint32m8_t acc, vint32m8_t op1, vint32m8_t op2, size_t vl) {
@ -321,7 +321,7 @@ vint32m8_t test_vnmsub_vv_i32m8(vint32m8_t acc, vint32m8_t op1, vint32m8_t op2,
// CHECK-RV64-LABEL: @test_vnmsub_vx_i32m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vnmsub.nxv16i32.i32.i64(<vscale x 16 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vnmsub.nxv16i32.i32.i64(<vscale x 16 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vnmsub_vx_i32m8(vint32m8_t acc, int32_t op1, vint32m8_t op2, size_t vl) {
@ -330,7 +330,7 @@ vint32m8_t test_vnmsub_vx_i32m8(vint32m8_t acc, int32_t op1, vint32m8_t op2, siz
// CHECK-RV64-LABEL: @test_vnmsub_vv_i64m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vnmsub.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vnmsub.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vnmsub_vv_i64m1(vint64m1_t acc, vint64m1_t op1, vint64m1_t op2, size_t vl) {
@ -339,7 +339,7 @@ vint64m1_t test_vnmsub_vv_i64m1(vint64m1_t acc, vint64m1_t op1, vint64m1_t op2,
// CHECK-RV64-LABEL: @test_vnmsub_vx_i64m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vnmsub.nxv1i64.i64.i64(<vscale x 1 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vnmsub.nxv1i64.i64.i64(<vscale x 1 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vnmsub_vx_i64m1(vint64m1_t acc, int64_t op1, vint64m1_t op2, size_t vl) {
@ -348,7 +348,7 @@ vint64m1_t test_vnmsub_vx_i64m1(vint64m1_t acc, int64_t op1, vint64m1_t op2, siz
// CHECK-RV64-LABEL: @test_vnmsub_vv_i64m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vnmsub.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vnmsub.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vnmsub_vv_i64m2(vint64m2_t acc, vint64m2_t op1, vint64m2_t op2, size_t vl) {
@ -357,7 +357,7 @@ vint64m2_t test_vnmsub_vv_i64m2(vint64m2_t acc, vint64m2_t op1, vint64m2_t op2,
// CHECK-RV64-LABEL: @test_vnmsub_vx_i64m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vnmsub.nxv2i64.i64.i64(<vscale x 2 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vnmsub.nxv2i64.i64.i64(<vscale x 2 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vnmsub_vx_i64m2(vint64m2_t acc, int64_t op1, vint64m2_t op2, size_t vl) {
@ -366,7 +366,7 @@ vint64m2_t test_vnmsub_vx_i64m2(vint64m2_t acc, int64_t op1, vint64m2_t op2, siz
// CHECK-RV64-LABEL: @test_vnmsub_vv_i64m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vnmsub.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vnmsub.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vnmsub_vv_i64m4(vint64m4_t acc, vint64m4_t op1, vint64m4_t op2, size_t vl) {
@ -375,7 +375,7 @@ vint64m4_t test_vnmsub_vv_i64m4(vint64m4_t acc, vint64m4_t op1, vint64m4_t op2,
// CHECK-RV64-LABEL: @test_vnmsub_vx_i64m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vnmsub.nxv4i64.i64.i64(<vscale x 4 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vnmsub.nxv4i64.i64.i64(<vscale x 4 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vnmsub_vx_i64m4(vint64m4_t acc, int64_t op1, vint64m4_t op2, size_t vl) {
@ -384,7 +384,7 @@ vint64m4_t test_vnmsub_vx_i64m4(vint64m4_t acc, int64_t op1, vint64m4_t op2, siz
// CHECK-RV64-LABEL: @test_vnmsub_vv_i64m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vnmsub.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vnmsub.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vnmsub_vv_i64m8(vint64m8_t acc, vint64m8_t op1, vint64m8_t op2, size_t vl) {
@ -393,7 +393,7 @@ vint64m8_t test_vnmsub_vv_i64m8(vint64m8_t acc, vint64m8_t op1, vint64m8_t op2,
// CHECK-RV64-LABEL: @test_vnmsub_vx_i64m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vnmsub.nxv8i64.i64.i64(<vscale x 8 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vnmsub.nxv8i64.i64.i64(<vscale x 8 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vnmsub_vx_i64m8(vint64m8_t acc, int64_t op1, vint64m8_t op2, size_t vl) {
@ -402,7 +402,7 @@ vint64m8_t test_vnmsub_vx_i64m8(vint64m8_t acc, int64_t op1, vint64m8_t op2, siz
// CHECK-RV64-LABEL: @test_vnmsub_vv_u8mf8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnmsub.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnmsub.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vnmsub_vv_u8mf8(vuint8mf8_t acc, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
@ -411,7 +411,7 @@ vuint8mf8_t test_vnmsub_vv_u8mf8(vuint8mf8_t acc, vuint8mf8_t op1, vuint8mf8_t o
// CHECK-RV64-LABEL: @test_vnmsub_vx_u8mf8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnmsub.nxv1i8.i8.i64(<vscale x 1 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnmsub.nxv1i8.i8.i64(<vscale x 1 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
//
vuint8mf8_t test_vnmsub_vx_u8mf8(vuint8mf8_t acc, uint8_t op1, vuint8mf8_t op2, size_t vl) {
@ -420,7 +420,7 @@ vuint8mf8_t test_vnmsub_vx_u8mf8(vuint8mf8_t acc, uint8_t op1, vuint8mf8_t op2,
// CHECK-RV64-LABEL: @test_vnmsub_vv_u8mf4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnmsub.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnmsub.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vnmsub_vv_u8mf4(vuint8mf4_t acc, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
@ -429,7 +429,7 @@ vuint8mf4_t test_vnmsub_vv_u8mf4(vuint8mf4_t acc, vuint8mf4_t op1, vuint8mf4_t o
// CHECK-RV64-LABEL: @test_vnmsub_vx_u8mf4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnmsub.nxv2i8.i8.i64(<vscale x 2 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnmsub.nxv2i8.i8.i64(<vscale x 2 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
//
vuint8mf4_t test_vnmsub_vx_u8mf4(vuint8mf4_t acc, uint8_t op1, vuint8mf4_t op2, size_t vl) {
@ -438,7 +438,7 @@ vuint8mf4_t test_vnmsub_vx_u8mf4(vuint8mf4_t acc, uint8_t op1, vuint8mf4_t op2,
// CHECK-RV64-LABEL: @test_vnmsub_vv_u8mf2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnmsub.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnmsub.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vnmsub_vv_u8mf2(vuint8mf2_t acc, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
@ -447,7 +447,7 @@ vuint8mf2_t test_vnmsub_vv_u8mf2(vuint8mf2_t acc, vuint8mf2_t op1, vuint8mf2_t o
// CHECK-RV64-LABEL: @test_vnmsub_vx_u8mf2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnmsub.nxv4i8.i8.i64(<vscale x 4 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnmsub.nxv4i8.i8.i64(<vscale x 4 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
//
vuint8mf2_t test_vnmsub_vx_u8mf2(vuint8mf2_t acc, uint8_t op1, vuint8mf2_t op2, size_t vl) {
@ -456,7 +456,7 @@ vuint8mf2_t test_vnmsub_vx_u8mf2(vuint8mf2_t acc, uint8_t op1, vuint8mf2_t op2,
// CHECK-RV64-LABEL: @test_vnmsub_vv_u8m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnmsub.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnmsub.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vnmsub_vv_u8m1(vuint8m1_t acc, vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
@ -465,7 +465,7 @@ vuint8m1_t test_vnmsub_vv_u8m1(vuint8m1_t acc, vuint8m1_t op1, vuint8m1_t op2, s
// CHECK-RV64-LABEL: @test_vnmsub_vx_u8m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnmsub.nxv8i8.i8.i64(<vscale x 8 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnmsub.nxv8i8.i8.i64(<vscale x 8 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
//
vuint8m1_t test_vnmsub_vx_u8m1(vuint8m1_t acc, uint8_t op1, vuint8m1_t op2, size_t vl) {
@ -474,7 +474,7 @@ vuint8m1_t test_vnmsub_vx_u8m1(vuint8m1_t acc, uint8_t op1, vuint8m1_t op2, size
// CHECK-RV64-LABEL: @test_vnmsub_vv_u8m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnmsub.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnmsub.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vnmsub_vv_u8m2(vuint8m2_t acc, vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
@ -483,7 +483,7 @@ vuint8m2_t test_vnmsub_vv_u8m2(vuint8m2_t acc, vuint8m2_t op1, vuint8m2_t op2, s
// CHECK-RV64-LABEL: @test_vnmsub_vx_u8m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnmsub.nxv16i8.i8.i64(<vscale x 16 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnmsub.nxv16i8.i8.i64(<vscale x 16 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
//
vuint8m2_t test_vnmsub_vx_u8m2(vuint8m2_t acc, uint8_t op1, vuint8m2_t op2, size_t vl) {
@ -492,7 +492,7 @@ vuint8m2_t test_vnmsub_vx_u8m2(vuint8m2_t acc, uint8_t op1, vuint8m2_t op2, size
// CHECK-RV64-LABEL: @test_vnmsub_vv_u8m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnmsub.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnmsub.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vnmsub_vv_u8m4(vuint8m4_t acc, vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
@ -501,7 +501,7 @@ vuint8m4_t test_vnmsub_vv_u8m4(vuint8m4_t acc, vuint8m4_t op1, vuint8m4_t op2, s
// CHECK-RV64-LABEL: @test_vnmsub_vx_u8m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnmsub.nxv32i8.i8.i64(<vscale x 32 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnmsub.nxv32i8.i8.i64(<vscale x 32 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
//
vuint8m4_t test_vnmsub_vx_u8m4(vuint8m4_t acc, uint8_t op1, vuint8m4_t op2, size_t vl) {
@ -510,7 +510,7 @@ vuint8m4_t test_vnmsub_vx_u8m4(vuint8m4_t acc, uint8_t op1, vuint8m4_t op2, size
// CHECK-RV64-LABEL: @test_vnmsub_vv_u8m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vnmsub.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[ACC:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vnmsub.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[ACC:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vnmsub_vv_u8m8(vuint8m8_t acc, vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
@ -519,7 +519,7 @@ vuint8m8_t test_vnmsub_vv_u8m8(vuint8m8_t acc, vuint8m8_t op1, vuint8m8_t op2, s
// CHECK-RV64-LABEL: @test_vnmsub_vx_u8m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vnmsub.nxv64i8.i8.i64(<vscale x 64 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vnmsub.nxv64i8.i8.i64(<vscale x 64 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
//
vuint8m8_t test_vnmsub_vx_u8m8(vuint8m8_t acc, uint8_t op1, vuint8m8_t op2, size_t vl) {
@ -528,7 +528,7 @@ vuint8m8_t test_vnmsub_vx_u8m8(vuint8m8_t acc, uint8_t op1, vuint8m8_t op2, size
// CHECK-RV64-LABEL: @test_vnmsub_vv_u16mf4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnmsub.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnmsub.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vnmsub_vv_u16mf4(vuint16mf4_t acc, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
@ -537,7 +537,7 @@ vuint16mf4_t test_vnmsub_vv_u16mf4(vuint16mf4_t acc, vuint16mf4_t op1, vuint16mf
// CHECK-RV64-LABEL: @test_vnmsub_vx_u16mf4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnmsub.nxv1i16.i16.i64(<vscale x 1 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnmsub.nxv1i16.i16.i64(<vscale x 1 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vnmsub_vx_u16mf4(vuint16mf4_t acc, uint16_t op1, vuint16mf4_t op2, size_t vl) {
@ -546,7 +546,7 @@ vuint16mf4_t test_vnmsub_vx_u16mf4(vuint16mf4_t acc, uint16_t op1, vuint16mf4_t
// CHECK-RV64-LABEL: @test_vnmsub_vv_u16mf2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnmsub.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnmsub.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vnmsub_vv_u16mf2(vuint16mf2_t acc, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
@ -555,7 +555,7 @@ vuint16mf2_t test_vnmsub_vv_u16mf2(vuint16mf2_t acc, vuint16mf2_t op1, vuint16mf
// CHECK-RV64-LABEL: @test_vnmsub_vx_u16mf2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnmsub.nxv2i16.i16.i64(<vscale x 2 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnmsub.nxv2i16.i16.i64(<vscale x 2 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vnmsub_vx_u16mf2(vuint16mf2_t acc, uint16_t op1, vuint16mf2_t op2, size_t vl) {
@ -564,7 +564,7 @@ vuint16mf2_t test_vnmsub_vx_u16mf2(vuint16mf2_t acc, uint16_t op1, vuint16mf2_t
// CHECK-RV64-LABEL: @test_vnmsub_vv_u16m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnmsub.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnmsub.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vnmsub_vv_u16m1(vuint16m1_t acc, vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
@ -573,7 +573,7 @@ vuint16m1_t test_vnmsub_vv_u16m1(vuint16m1_t acc, vuint16m1_t op1, vuint16m1_t o
// CHECK-RV64-LABEL: @test_vnmsub_vx_u16m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnmsub.nxv4i16.i16.i64(<vscale x 4 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnmsub.nxv4i16.i16.i64(<vscale x 4 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vnmsub_vx_u16m1(vuint16m1_t acc, uint16_t op1, vuint16m1_t op2, size_t vl) {
@ -582,7 +582,7 @@ vuint16m1_t test_vnmsub_vx_u16m1(vuint16m1_t acc, uint16_t op1, vuint16m1_t op2,
// CHECK-RV64-LABEL: @test_vnmsub_vv_u16m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnmsub.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnmsub.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vnmsub_vv_u16m2(vuint16m2_t acc, vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
@ -591,7 +591,7 @@ vuint16m2_t test_vnmsub_vv_u16m2(vuint16m2_t acc, vuint16m2_t op1, vuint16m2_t o
// CHECK-RV64-LABEL: @test_vnmsub_vx_u16m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnmsub.nxv8i16.i16.i64(<vscale x 8 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnmsub.nxv8i16.i16.i64(<vscale x 8 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vnmsub_vx_u16m2(vuint16m2_t acc, uint16_t op1, vuint16m2_t op2, size_t vl) {
@ -600,7 +600,7 @@ vuint16m2_t test_vnmsub_vx_u16m2(vuint16m2_t acc, uint16_t op1, vuint16m2_t op2,
// CHECK-RV64-LABEL: @test_vnmsub_vv_u16m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnmsub.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnmsub.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vnmsub_vv_u16m4(vuint16m4_t acc, vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
@ -609,7 +609,7 @@ vuint16m4_t test_vnmsub_vv_u16m4(vuint16m4_t acc, vuint16m4_t op1, vuint16m4_t o
// CHECK-RV64-LABEL: @test_vnmsub_vx_u16m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnmsub.nxv16i16.i16.i64(<vscale x 16 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnmsub.nxv16i16.i16.i64(<vscale x 16 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vnmsub_vx_u16m4(vuint16m4_t acc, uint16_t op1, vuint16m4_t op2, size_t vl) {
@ -618,7 +618,7 @@ vuint16m4_t test_vnmsub_vx_u16m4(vuint16m4_t acc, uint16_t op1, vuint16m4_t op2,
// CHECK-RV64-LABEL: @test_vnmsub_vv_u16m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vnmsub.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vnmsub.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vnmsub_vv_u16m8(vuint16m8_t acc, vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
@ -627,7 +627,7 @@ vuint16m8_t test_vnmsub_vv_u16m8(vuint16m8_t acc, vuint16m8_t op1, vuint16m8_t o
// CHECK-RV64-LABEL: @test_vnmsub_vx_u16m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vnmsub.nxv32i16.i16.i64(<vscale x 32 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vnmsub.nxv32i16.i16.i64(<vscale x 32 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vnmsub_vx_u16m8(vuint16m8_t acc, uint16_t op1, vuint16m8_t op2, size_t vl) {
@ -636,7 +636,7 @@ vuint16m8_t test_vnmsub_vx_u16m8(vuint16m8_t acc, uint16_t op1, vuint16m8_t op2,
// CHECK-RV64-LABEL: @test_vnmsub_vv_u32mf2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnmsub.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnmsub.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vnmsub_vv_u32mf2(vuint32mf2_t acc, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
@ -645,7 +645,7 @@ vuint32mf2_t test_vnmsub_vv_u32mf2(vuint32mf2_t acc, vuint32mf2_t op1, vuint32mf
// CHECK-RV64-LABEL: @test_vnmsub_vx_u32mf2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnmsub.nxv1i32.i32.i64(<vscale x 1 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnmsub.nxv1i32.i32.i64(<vscale x 1 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vnmsub_vx_u32mf2(vuint32mf2_t acc, uint32_t op1, vuint32mf2_t op2, size_t vl) {
@ -654,7 +654,7 @@ vuint32mf2_t test_vnmsub_vx_u32mf2(vuint32mf2_t acc, uint32_t op1, vuint32mf2_t
// CHECK-RV64-LABEL: @test_vnmsub_vv_u32m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnmsub.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnmsub.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vnmsub_vv_u32m1(vuint32m1_t acc, vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
@ -663,7 +663,7 @@ vuint32m1_t test_vnmsub_vv_u32m1(vuint32m1_t acc, vuint32m1_t op1, vuint32m1_t o
// CHECK-RV64-LABEL: @test_vnmsub_vx_u32m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnmsub.nxv2i32.i32.i64(<vscale x 2 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnmsub.nxv2i32.i32.i64(<vscale x 2 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vnmsub_vx_u32m1(vuint32m1_t acc, uint32_t op1, vuint32m1_t op2, size_t vl) {
@ -672,7 +672,7 @@ vuint32m1_t test_vnmsub_vx_u32m1(vuint32m1_t acc, uint32_t op1, vuint32m1_t op2,
// CHECK-RV64-LABEL: @test_vnmsub_vv_u32m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnmsub.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnmsub.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vnmsub_vv_u32m2(vuint32m2_t acc, vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
@ -681,7 +681,7 @@ vuint32m2_t test_vnmsub_vv_u32m2(vuint32m2_t acc, vuint32m2_t op1, vuint32m2_t o
// CHECK-RV64-LABEL: @test_vnmsub_vx_u32m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnmsub.nxv4i32.i32.i64(<vscale x 4 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnmsub.nxv4i32.i32.i64(<vscale x 4 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vnmsub_vx_u32m2(vuint32m2_t acc, uint32_t op1, vuint32m2_t op2, size_t vl) {
@ -690,7 +690,7 @@ vuint32m2_t test_vnmsub_vx_u32m2(vuint32m2_t acc, uint32_t op1, vuint32m2_t op2,
// CHECK-RV64-LABEL: @test_vnmsub_vv_u32m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnmsub.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnmsub.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vnmsub_vv_u32m4(vuint32m4_t acc, vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
@ -699,7 +699,7 @@ vuint32m4_t test_vnmsub_vv_u32m4(vuint32m4_t acc, vuint32m4_t op1, vuint32m4_t o
// CHECK-RV64-LABEL: @test_vnmsub_vx_u32m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnmsub.nxv8i32.i32.i64(<vscale x 8 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnmsub.nxv8i32.i32.i64(<vscale x 8 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vnmsub_vx_u32m4(vuint32m4_t acc, uint32_t op1, vuint32m4_t op2, size_t vl) {
@ -708,7 +708,7 @@ vuint32m4_t test_vnmsub_vx_u32m4(vuint32m4_t acc, uint32_t op1, vuint32m4_t op2,
// CHECK-RV64-LABEL: @test_vnmsub_vv_u32m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vnmsub.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vnmsub.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vnmsub_vv_u32m8(vuint32m8_t acc, vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
@ -717,7 +717,7 @@ vuint32m8_t test_vnmsub_vv_u32m8(vuint32m8_t acc, vuint32m8_t op1, vuint32m8_t o
// CHECK-RV64-LABEL: @test_vnmsub_vx_u32m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vnmsub.nxv16i32.i32.i64(<vscale x 16 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vnmsub.nxv16i32.i32.i64(<vscale x 16 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vnmsub_vx_u32m8(vuint32m8_t acc, uint32_t op1, vuint32m8_t op2, size_t vl) {
@ -726,7 +726,7 @@ vuint32m8_t test_vnmsub_vx_u32m8(vuint32m8_t acc, uint32_t op1, vuint32m8_t op2,
// CHECK-RV64-LABEL: @test_vnmsub_vv_u64m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vnmsub.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vnmsub.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vnmsub_vv_u64m1(vuint64m1_t acc, vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
@ -735,7 +735,7 @@ vuint64m1_t test_vnmsub_vv_u64m1(vuint64m1_t acc, vuint64m1_t op1, vuint64m1_t o
// CHECK-RV64-LABEL: @test_vnmsub_vx_u64m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vnmsub.nxv1i64.i64.i64(<vscale x 1 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vnmsub.nxv1i64.i64.i64(<vscale x 1 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vnmsub_vx_u64m1(vuint64m1_t acc, uint64_t op1, vuint64m1_t op2, size_t vl) {
@ -744,7 +744,7 @@ vuint64m1_t test_vnmsub_vx_u64m1(vuint64m1_t acc, uint64_t op1, vuint64m1_t op2,
// CHECK-RV64-LABEL: @test_vnmsub_vv_u64m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vnmsub.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vnmsub.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vnmsub_vv_u64m2(vuint64m2_t acc, vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
@ -753,7 +753,7 @@ vuint64m2_t test_vnmsub_vv_u64m2(vuint64m2_t acc, vuint64m2_t op1, vuint64m2_t o
// CHECK-RV64-LABEL: @test_vnmsub_vx_u64m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vnmsub.nxv2i64.i64.i64(<vscale x 2 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vnmsub.nxv2i64.i64.i64(<vscale x 2 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vnmsub_vx_u64m2(vuint64m2_t acc, uint64_t op1, vuint64m2_t op2, size_t vl) {
@ -762,7 +762,7 @@ vuint64m2_t test_vnmsub_vx_u64m2(vuint64m2_t acc, uint64_t op1, vuint64m2_t op2,
// CHECK-RV64-LABEL: @test_vnmsub_vv_u64m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vnmsub.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vnmsub.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vnmsub_vv_u64m4(vuint64m4_t acc, vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
@ -771,7 +771,7 @@ vuint64m4_t test_vnmsub_vv_u64m4(vuint64m4_t acc, vuint64m4_t op1, vuint64m4_t o
// CHECK-RV64-LABEL: @test_vnmsub_vx_u64m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vnmsub.nxv4i64.i64.i64(<vscale x 4 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vnmsub.nxv4i64.i64.i64(<vscale x 4 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vnmsub_vx_u64m4(vuint64m4_t acc, uint64_t op1, vuint64m4_t op2, size_t vl) {
@ -780,7 +780,7 @@ vuint64m4_t test_vnmsub_vx_u64m4(vuint64m4_t acc, uint64_t op1, vuint64m4_t op2,
// CHECK-RV64-LABEL: @test_vnmsub_vv_u64m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vnmsub.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vnmsub.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vnmsub_vv_u64m8(vuint64m8_t acc, vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
@ -789,7 +789,7 @@ vuint64m8_t test_vnmsub_vv_u64m8(vuint64m8_t acc, vuint64m8_t op1, vuint64m8_t o
// CHECK-RV64-LABEL: @test_vnmsub_vx_u64m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vnmsub.nxv8i64.i64.i64(<vscale x 8 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vnmsub.nxv8i64.i64.i64(<vscale x 8 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vnmsub_vx_u64m8(vuint64m8_t acc, uint64_t op1, vuint64m8_t op2, size_t vl) {

View File

@ -6,7 +6,7 @@
// CHECK-RV64-LABEL: @test_vwmacc_vv_i16mf4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmacc.nxv1i16.nxv1i8.nxv1i8.i64(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmacc.nxv1i16.nxv1i8.nxv1i8.i64(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vwmacc_vv_i16mf4(vint16mf4_t acc, vint8mf8_t op1,
@ -16,7 +16,7 @@ vint16mf4_t test_vwmacc_vv_i16mf4(vint16mf4_t acc, vint8mf8_t op1,
// CHECK-RV64-LABEL: @test_vwmacc_vx_i16mf4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmacc.nxv1i16.i8.nxv1i8.i64(<vscale x 1 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmacc.nxv1i16.i8.nxv1i8.i64(<vscale x 1 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vwmacc_vx_i16mf4(vint16mf4_t acc, int8_t op1, vint8mf8_t op2,
@ -26,7 +26,7 @@ vint16mf4_t test_vwmacc_vx_i16mf4(vint16mf4_t acc, int8_t op1, vint8mf8_t op2,
// CHECK-RV64-LABEL: @test_vwmacc_vv_i16mf2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmacc.nxv2i16.nxv2i8.nxv2i8.i64(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmacc.nxv2i16.nxv2i8.nxv2i8.i64(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vwmacc_vv_i16mf2(vint16mf2_t acc, vint8mf4_t op1,
@ -36,7 +36,7 @@ vint16mf2_t test_vwmacc_vv_i16mf2(vint16mf2_t acc, vint8mf4_t op1,
// CHECK-RV64-LABEL: @test_vwmacc_vx_i16mf2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmacc.nxv2i16.i8.nxv2i8.i64(<vscale x 2 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmacc.nxv2i16.i8.nxv2i8.i64(<vscale x 2 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vwmacc_vx_i16mf2(vint16mf2_t acc, int8_t op1, vint8mf4_t op2,
@ -46,7 +46,7 @@ vint16mf2_t test_vwmacc_vx_i16mf2(vint16mf2_t acc, int8_t op1, vint8mf4_t op2,
// CHECK-RV64-LABEL: @test_vwmacc_vv_i16m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmacc.nxv4i16.nxv4i8.nxv4i8.i64(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmacc.nxv4i16.nxv4i8.nxv4i8.i64(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vwmacc_vv_i16m1(vint16m1_t acc, vint8mf2_t op1, vint8mf2_t op2,
@ -56,7 +56,7 @@ vint16m1_t test_vwmacc_vv_i16m1(vint16m1_t acc, vint8mf2_t op1, vint8mf2_t op2,
// CHECK-RV64-LABEL: @test_vwmacc_vx_i16m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmacc.nxv4i16.i8.nxv4i8.i64(<vscale x 4 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmacc.nxv4i16.i8.nxv4i8.i64(<vscale x 4 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vwmacc_vx_i16m1(vint16m1_t acc, int8_t op1, vint8mf2_t op2,
@ -66,7 +66,7 @@ vint16m1_t test_vwmacc_vx_i16m1(vint16m1_t acc, int8_t op1, vint8mf2_t op2,
// CHECK-RV64-LABEL: @test_vwmacc_vv_i16m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmacc.nxv8i16.nxv8i8.nxv8i8.i64(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmacc.nxv8i16.nxv8i8.nxv8i8.i64(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vwmacc_vv_i16m2(vint16m2_t acc, vint8m1_t op1, vint8m1_t op2,
@ -76,7 +76,7 @@ vint16m2_t test_vwmacc_vv_i16m2(vint16m2_t acc, vint8m1_t op1, vint8m1_t op2,
// CHECK-RV64-LABEL: @test_vwmacc_vx_i16m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmacc.nxv8i16.i8.nxv8i8.i64(<vscale x 8 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmacc.nxv8i16.i8.nxv8i8.i64(<vscale x 8 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vwmacc_vx_i16m2(vint16m2_t acc, int8_t op1, vint8m1_t op2,
@ -86,7 +86,7 @@ vint16m2_t test_vwmacc_vx_i16m2(vint16m2_t acc, int8_t op1, vint8m1_t op2,
// CHECK-RV64-LABEL: @test_vwmacc_vv_i16m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmacc.nxv16i16.nxv16i8.nxv16i8.i64(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmacc.nxv16i16.nxv16i8.nxv16i8.i64(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vwmacc_vv_i16m4(vint16m4_t acc, vint8m2_t op1, vint8m2_t op2,
@ -96,7 +96,7 @@ vint16m4_t test_vwmacc_vv_i16m4(vint16m4_t acc, vint8m2_t op1, vint8m2_t op2,
// CHECK-RV64-LABEL: @test_vwmacc_vx_i16m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmacc.nxv16i16.i8.nxv16i8.i64(<vscale x 16 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmacc.nxv16i16.i8.nxv16i8.i64(<vscale x 16 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vwmacc_vx_i16m4(vint16m4_t acc, int8_t op1, vint8m2_t op2,
@ -106,7 +106,7 @@ vint16m4_t test_vwmacc_vx_i16m4(vint16m4_t acc, int8_t op1, vint8m2_t op2,
// CHECK-RV64-LABEL: @test_vwmacc_vv_i16m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmacc.nxv32i16.nxv32i8.nxv32i8.i64(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmacc.nxv32i16.nxv32i8.nxv32i8.i64(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vwmacc_vv_i16m8(vint16m8_t acc, vint8m4_t op1, vint8m4_t op2,
@ -116,7 +116,7 @@ vint16m8_t test_vwmacc_vv_i16m8(vint16m8_t acc, vint8m4_t op1, vint8m4_t op2,
// CHECK-RV64-LABEL: @test_vwmacc_vx_i16m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmacc.nxv32i16.i8.nxv32i8.i64(<vscale x 32 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmacc.nxv32i16.i8.nxv32i8.i64(<vscale x 32 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vwmacc_vx_i16m8(vint16m8_t acc, int8_t op1, vint8m4_t op2,
@ -126,7 +126,7 @@ vint16m8_t test_vwmacc_vx_i16m8(vint16m8_t acc, int8_t op1, vint8m4_t op2,
// CHECK-RV64-LABEL: @test_vwmacc_vv_i32mf2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmacc.nxv1i32.nxv1i16.nxv1i16.i64(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmacc.nxv1i32.nxv1i16.nxv1i16.i64(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vwmacc_vv_i32mf2(vint32mf2_t acc, vint16mf4_t op1,
@ -136,7 +136,7 @@ vint32mf2_t test_vwmacc_vv_i32mf2(vint32mf2_t acc, vint16mf4_t op1,
// CHECK-RV64-LABEL: @test_vwmacc_vx_i32mf2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmacc.nxv1i32.i16.nxv1i16.i64(<vscale x 1 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmacc.nxv1i32.i16.nxv1i16.i64(<vscale x 1 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vwmacc_vx_i32mf2(vint32mf2_t acc, int16_t op1, vint16mf4_t op2,
@ -146,7 +146,7 @@ vint32mf2_t test_vwmacc_vx_i32mf2(vint32mf2_t acc, int16_t op1, vint16mf4_t op2,
// CHECK-RV64-LABEL: @test_vwmacc_vv_i32m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmacc.nxv2i32.nxv2i16.nxv2i16.i64(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmacc.nxv2i32.nxv2i16.nxv2i16.i64(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vwmacc_vv_i32m1(vint32m1_t acc, vint16mf2_t op1,
@ -156,7 +156,7 @@ vint32m1_t test_vwmacc_vv_i32m1(vint32m1_t acc, vint16mf2_t op1,
// CHECK-RV64-LABEL: @test_vwmacc_vx_i32m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmacc.nxv2i32.i16.nxv2i16.i64(<vscale x 2 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmacc.nxv2i32.i16.nxv2i16.i64(<vscale x 2 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vwmacc_vx_i32m1(vint32m1_t acc, int16_t op1, vint16mf2_t op2,
@ -166,7 +166,7 @@ vint32m1_t test_vwmacc_vx_i32m1(vint32m1_t acc, int16_t op1, vint16mf2_t op2,
// CHECK-RV64-LABEL: @test_vwmacc_vv_i32m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmacc.nxv4i32.nxv4i16.nxv4i16.i64(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmacc.nxv4i32.nxv4i16.nxv4i16.i64(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vwmacc_vv_i32m2(vint32m2_t acc, vint16m1_t op1, vint16m1_t op2,
@ -176,7 +176,7 @@ vint32m2_t test_vwmacc_vv_i32m2(vint32m2_t acc, vint16m1_t op1, vint16m1_t op2,
// CHECK-RV64-LABEL: @test_vwmacc_vx_i32m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmacc.nxv4i32.i16.nxv4i16.i64(<vscale x 4 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmacc.nxv4i32.i16.nxv4i16.i64(<vscale x 4 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vwmacc_vx_i32m2(vint32m2_t acc, int16_t op1, vint16m1_t op2,
@ -186,7 +186,7 @@ vint32m2_t test_vwmacc_vx_i32m2(vint32m2_t acc, int16_t op1, vint16m1_t op2,
// CHECK-RV64-LABEL: @test_vwmacc_vv_i32m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmacc.nxv8i32.nxv8i16.nxv8i16.i64(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmacc.nxv8i32.nxv8i16.nxv8i16.i64(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vwmacc_vv_i32m4(vint32m4_t acc, vint16m2_t op1, vint16m2_t op2,
@ -196,7 +196,7 @@ vint32m4_t test_vwmacc_vv_i32m4(vint32m4_t acc, vint16m2_t op1, vint16m2_t op2,
// CHECK-RV64-LABEL: @test_vwmacc_vx_i32m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmacc.nxv8i32.i16.nxv8i16.i64(<vscale x 8 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmacc.nxv8i32.i16.nxv8i16.i64(<vscale x 8 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vwmacc_vx_i32m4(vint32m4_t acc, int16_t op1, vint16m2_t op2,
@ -206,7 +206,7 @@ vint32m4_t test_vwmacc_vx_i32m4(vint32m4_t acc, int16_t op1, vint16m2_t op2,
// CHECK-RV64-LABEL: @test_vwmacc_vv_i32m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmacc.nxv16i32.nxv16i16.nxv16i16.i64(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmacc.nxv16i32.nxv16i16.nxv16i16.i64(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vwmacc_vv_i32m8(vint32m8_t acc, vint16m4_t op1, vint16m4_t op2,
@ -216,7 +216,7 @@ vint32m8_t test_vwmacc_vv_i32m8(vint32m8_t acc, vint16m4_t op1, vint16m4_t op2,
// CHECK-RV64-LABEL: @test_vwmacc_vx_i32m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmacc.nxv16i32.i16.nxv16i16.i64(<vscale x 16 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmacc.nxv16i32.i16.nxv16i16.i64(<vscale x 16 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vwmacc_vx_i32m8(vint32m8_t acc, int16_t op1, vint16m4_t op2,
@ -226,7 +226,7 @@ vint32m8_t test_vwmacc_vx_i32m8(vint32m8_t acc, int16_t op1, vint16m4_t op2,
// CHECK-RV64-LABEL: @test_vwmacc_vv_i64m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmacc.nxv1i64.nxv1i32.nxv1i32.i64(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmacc.nxv1i64.nxv1i32.nxv1i32.i64(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vwmacc_vv_i64m1(vint64m1_t acc, vint32mf2_t op1,
@ -236,7 +236,7 @@ vint64m1_t test_vwmacc_vv_i64m1(vint64m1_t acc, vint32mf2_t op1,
// CHECK-RV64-LABEL: @test_vwmacc_vx_i64m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmacc.nxv1i64.i32.nxv1i32.i64(<vscale x 1 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmacc.nxv1i64.i32.nxv1i32.i64(<vscale x 1 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vwmacc_vx_i64m1(vint64m1_t acc, int32_t op1, vint32mf2_t op2,
@ -246,7 +246,7 @@ vint64m1_t test_vwmacc_vx_i64m1(vint64m1_t acc, int32_t op1, vint32mf2_t op2,
// CHECK-RV64-LABEL: @test_vwmacc_vv_i64m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmacc.nxv2i64.nxv2i32.nxv2i32.i64(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmacc.nxv2i64.nxv2i32.nxv2i32.i64(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vwmacc_vv_i64m2(vint64m2_t acc, vint32m1_t op1, vint32m1_t op2,
@ -256,7 +256,7 @@ vint64m2_t test_vwmacc_vv_i64m2(vint64m2_t acc, vint32m1_t op1, vint32m1_t op2,
// CHECK-RV64-LABEL: @test_vwmacc_vx_i64m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmacc.nxv2i64.i32.nxv2i32.i64(<vscale x 2 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmacc.nxv2i64.i32.nxv2i32.i64(<vscale x 2 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vwmacc_vx_i64m2(vint64m2_t acc, int32_t op1, vint32m1_t op2,
@ -266,7 +266,7 @@ vint64m2_t test_vwmacc_vx_i64m2(vint64m2_t acc, int32_t op1, vint32m1_t op2,
// CHECK-RV64-LABEL: @test_vwmacc_vv_i64m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmacc.nxv4i64.nxv4i32.nxv4i32.i64(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmacc.nxv4i64.nxv4i32.nxv4i32.i64(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vwmacc_vv_i64m4(vint64m4_t acc, vint32m2_t op1, vint32m2_t op2,
@ -276,7 +276,7 @@ vint64m4_t test_vwmacc_vv_i64m4(vint64m4_t acc, vint32m2_t op1, vint32m2_t op2,
// CHECK-RV64-LABEL: @test_vwmacc_vx_i64m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmacc.nxv4i64.i32.nxv4i32.i64(<vscale x 4 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmacc.nxv4i64.i32.nxv4i32.i64(<vscale x 4 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vwmacc_vx_i64m4(vint64m4_t acc, int32_t op1, vint32m2_t op2,
@ -286,7 +286,7 @@ vint64m4_t test_vwmacc_vx_i64m4(vint64m4_t acc, int32_t op1, vint32m2_t op2,
// CHECK-RV64-LABEL: @test_vwmacc_vv_i64m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmacc.nxv8i64.nxv8i32.nxv8i32.i64(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmacc.nxv8i64.nxv8i32.nxv8i32.i64(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vwmacc_vv_i64m8(vint64m8_t acc, vint32m4_t op1, vint32m4_t op2,
@ -296,7 +296,7 @@ vint64m8_t test_vwmacc_vv_i64m8(vint64m8_t acc, vint32m4_t op1, vint32m4_t op2,
// CHECK-RV64-LABEL: @test_vwmacc_vx_i64m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmacc.nxv8i64.i32.nxv8i32.i64(<vscale x 8 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmacc.nxv8i64.i32.nxv8i32.i64(<vscale x 8 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vwmacc_vx_i64m8(vint64m8_t acc, int32_t op1, vint32m4_t op2,
@ -306,7 +306,7 @@ vint64m8_t test_vwmacc_vx_i64m8(vint64m8_t acc, int32_t op1, vint32m4_t op2,
// CHECK-RV64-LABEL: @test_vwmaccu_vv_u16mf4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmaccu.nxv1i16.nxv1i8.nxv1i8.i64(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmaccu.nxv1i16.nxv1i8.nxv1i8.i64(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vwmaccu_vv_u16mf4(vuint16mf4_t acc, vuint8mf8_t op1,
@ -316,7 +316,7 @@ vuint16mf4_t test_vwmaccu_vv_u16mf4(vuint16mf4_t acc, vuint8mf8_t op1,
// CHECK-RV64-LABEL: @test_vwmaccu_vx_u16mf4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmaccu.nxv1i16.i8.nxv1i8.i64(<vscale x 1 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmaccu.nxv1i16.i8.nxv1i8.i64(<vscale x 1 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vuint16mf4_t test_vwmaccu_vx_u16mf4(vuint16mf4_t acc, uint8_t op1,
@ -326,7 +326,7 @@ vuint16mf4_t test_vwmaccu_vx_u16mf4(vuint16mf4_t acc, uint8_t op1,
// CHECK-RV64-LABEL: @test_vwmaccu_vv_u16mf2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmaccu.nxv2i16.nxv2i8.nxv2i8.i64(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmaccu.nxv2i16.nxv2i8.nxv2i8.i64(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vwmaccu_vv_u16mf2(vuint16mf2_t acc, vuint8mf4_t op1,
@ -336,7 +336,7 @@ vuint16mf2_t test_vwmaccu_vv_u16mf2(vuint16mf2_t acc, vuint8mf4_t op1,
// CHECK-RV64-LABEL: @test_vwmaccu_vx_u16mf2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmaccu.nxv2i16.i8.nxv2i8.i64(<vscale x 2 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmaccu.nxv2i16.i8.nxv2i8.i64(<vscale x 2 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vuint16mf2_t test_vwmaccu_vx_u16mf2(vuint16mf2_t acc, uint8_t op1,
@ -346,7 +346,7 @@ vuint16mf2_t test_vwmaccu_vx_u16mf2(vuint16mf2_t acc, uint8_t op1,
// CHECK-RV64-LABEL: @test_vwmaccu_vv_u16m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmaccu.nxv4i16.nxv4i8.nxv4i8.i64(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmaccu.nxv4i16.nxv4i8.nxv4i8.i64(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vwmaccu_vv_u16m1(vuint16m1_t acc, vuint8mf2_t op1,
@ -356,7 +356,7 @@ vuint16m1_t test_vwmaccu_vv_u16m1(vuint16m1_t acc, vuint8mf2_t op1,
// CHECK-RV64-LABEL: @test_vwmaccu_vx_u16m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmaccu.nxv4i16.i8.nxv4i8.i64(<vscale x 4 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmaccu.nxv4i16.i8.nxv4i8.i64(<vscale x 4 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vuint16m1_t test_vwmaccu_vx_u16m1(vuint16m1_t acc, uint8_t op1, vuint8mf2_t op2,
@ -366,7 +366,7 @@ vuint16m1_t test_vwmaccu_vx_u16m1(vuint16m1_t acc, uint8_t op1, vuint8mf2_t op2,
// CHECK-RV64-LABEL: @test_vwmaccu_vv_u16m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmaccu.nxv8i16.nxv8i8.nxv8i8.i64(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmaccu.nxv8i16.nxv8i8.nxv8i8.i64(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vwmaccu_vv_u16m2(vuint16m2_t acc, vuint8m1_t op1,
@ -376,7 +376,7 @@ vuint16m2_t test_vwmaccu_vv_u16m2(vuint16m2_t acc, vuint8m1_t op1,
// CHECK-RV64-LABEL: @test_vwmaccu_vx_u16m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmaccu.nxv8i16.i8.nxv8i8.i64(<vscale x 8 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmaccu.nxv8i16.i8.nxv8i8.i64(<vscale x 8 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vuint16m2_t test_vwmaccu_vx_u16m2(vuint16m2_t acc, uint8_t op1, vuint8m1_t op2,
@ -386,7 +386,7 @@ vuint16m2_t test_vwmaccu_vx_u16m2(vuint16m2_t acc, uint8_t op1, vuint8m1_t op2,
// CHECK-RV64-LABEL: @test_vwmaccu_vv_u16m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmaccu.nxv16i16.nxv16i8.nxv16i8.i64(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmaccu.nxv16i16.nxv16i8.nxv16i8.i64(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vwmaccu_vv_u16m4(vuint16m4_t acc, vuint8m2_t op1,
@ -396,7 +396,7 @@ vuint16m4_t test_vwmaccu_vv_u16m4(vuint16m4_t acc, vuint8m2_t op1,
// CHECK-RV64-LABEL: @test_vwmaccu_vx_u16m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmaccu.nxv16i16.i8.nxv16i8.i64(<vscale x 16 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmaccu.nxv16i16.i8.nxv16i8.i64(<vscale x 16 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vuint16m4_t test_vwmaccu_vx_u16m4(vuint16m4_t acc, uint8_t op1, vuint8m2_t op2,
@ -406,7 +406,7 @@ vuint16m4_t test_vwmaccu_vx_u16m4(vuint16m4_t acc, uint8_t op1, vuint8m2_t op2,
// CHECK-RV64-LABEL: @test_vwmaccu_vv_u16m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmaccu.nxv32i16.nxv32i8.nxv32i8.i64(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmaccu.nxv32i16.nxv32i8.nxv32i8.i64(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vwmaccu_vv_u16m8(vuint16m8_t acc, vuint8m4_t op1,
@ -416,7 +416,7 @@ vuint16m8_t test_vwmaccu_vv_u16m8(vuint16m8_t acc, vuint8m4_t op1,
// CHECK-RV64-LABEL: @test_vwmaccu_vx_u16m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmaccu.nxv32i16.i8.nxv32i8.i64(<vscale x 32 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmaccu.nxv32i16.i8.nxv32i8.i64(<vscale x 32 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vuint16m8_t test_vwmaccu_vx_u16m8(vuint16m8_t acc, uint8_t op1, vuint8m4_t op2,
@ -426,7 +426,7 @@ vuint16m8_t test_vwmaccu_vx_u16m8(vuint16m8_t acc, uint8_t op1, vuint8m4_t op2,
// CHECK-RV64-LABEL: @test_vwmaccu_vv_u32mf2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmaccu.nxv1i32.nxv1i16.nxv1i16.i64(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmaccu.nxv1i32.nxv1i16.nxv1i16.i64(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vwmaccu_vv_u32mf2(vuint32mf2_t acc, vuint16mf4_t op1,
@ -436,7 +436,7 @@ vuint32mf2_t test_vwmaccu_vv_u32mf2(vuint32mf2_t acc, vuint16mf4_t op1,
// CHECK-RV64-LABEL: @test_vwmaccu_vx_u32mf2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmaccu.nxv1i32.i16.nxv1i16.i64(<vscale x 1 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmaccu.nxv1i32.i16.nxv1i16.i64(<vscale x 1 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vuint32mf2_t test_vwmaccu_vx_u32mf2(vuint32mf2_t acc, uint16_t op1,
@ -446,7 +446,7 @@ vuint32mf2_t test_vwmaccu_vx_u32mf2(vuint32mf2_t acc, uint16_t op1,
// CHECK-RV64-LABEL: @test_vwmaccu_vv_u32m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmaccu.nxv2i32.nxv2i16.nxv2i16.i64(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmaccu.nxv2i32.nxv2i16.nxv2i16.i64(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vwmaccu_vv_u32m1(vuint32m1_t acc, vuint16mf2_t op1,
@ -456,7 +456,7 @@ vuint32m1_t test_vwmaccu_vv_u32m1(vuint32m1_t acc, vuint16mf2_t op1,
// CHECK-RV64-LABEL: @test_vwmaccu_vx_u32m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmaccu.nxv2i32.i16.nxv2i16.i64(<vscale x 2 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmaccu.nxv2i32.i16.nxv2i16.i64(<vscale x 2 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vuint32m1_t test_vwmaccu_vx_u32m1(vuint32m1_t acc, uint16_t op1,
@ -466,7 +466,7 @@ vuint32m1_t test_vwmaccu_vx_u32m1(vuint32m1_t acc, uint16_t op1,
// CHECK-RV64-LABEL: @test_vwmaccu_vv_u32m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmaccu.nxv4i32.nxv4i16.nxv4i16.i64(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmaccu.nxv4i32.nxv4i16.nxv4i16.i64(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vwmaccu_vv_u32m2(vuint32m2_t acc, vuint16m1_t op1,
@ -476,7 +476,7 @@ vuint32m2_t test_vwmaccu_vv_u32m2(vuint32m2_t acc, vuint16m1_t op1,
// CHECK-RV64-LABEL: @test_vwmaccu_vx_u32m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmaccu.nxv4i32.i16.nxv4i16.i64(<vscale x 4 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmaccu.nxv4i32.i16.nxv4i16.i64(<vscale x 4 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vuint32m2_t test_vwmaccu_vx_u32m2(vuint32m2_t acc, uint16_t op1,
@ -486,7 +486,7 @@ vuint32m2_t test_vwmaccu_vx_u32m2(vuint32m2_t acc, uint16_t op1,
// CHECK-RV64-LABEL: @test_vwmaccu_vv_u32m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmaccu.nxv8i32.nxv8i16.nxv8i16.i64(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmaccu.nxv8i32.nxv8i16.nxv8i16.i64(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vwmaccu_vv_u32m4(vuint32m4_t acc, vuint16m2_t op1,
@ -496,7 +496,7 @@ vuint32m4_t test_vwmaccu_vv_u32m4(vuint32m4_t acc, vuint16m2_t op1,
// CHECK-RV64-LABEL: @test_vwmaccu_vx_u32m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmaccu.nxv8i32.i16.nxv8i16.i64(<vscale x 8 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmaccu.nxv8i32.i16.nxv8i16.i64(<vscale x 8 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vuint32m4_t test_vwmaccu_vx_u32m4(vuint32m4_t acc, uint16_t op1,
@ -506,7 +506,7 @@ vuint32m4_t test_vwmaccu_vx_u32m4(vuint32m4_t acc, uint16_t op1,
// CHECK-RV64-LABEL: @test_vwmaccu_vv_u32m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmaccu.nxv16i32.nxv16i16.nxv16i16.i64(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmaccu.nxv16i32.nxv16i16.nxv16i16.i64(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vwmaccu_vv_u32m8(vuint32m8_t acc, vuint16m4_t op1,
@ -516,7 +516,7 @@ vuint32m8_t test_vwmaccu_vv_u32m8(vuint32m8_t acc, vuint16m4_t op1,
// CHECK-RV64-LABEL: @test_vwmaccu_vx_u32m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmaccu.nxv16i32.i16.nxv16i16.i64(<vscale x 16 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmaccu.nxv16i32.i16.nxv16i16.i64(<vscale x 16 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vuint32m8_t test_vwmaccu_vx_u32m8(vuint32m8_t acc, uint16_t op1,
@ -526,7 +526,7 @@ vuint32m8_t test_vwmaccu_vx_u32m8(vuint32m8_t acc, uint16_t op1,
// CHECK-RV64-LABEL: @test_vwmaccu_vv_u64m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmaccu.nxv1i64.nxv1i32.nxv1i32.i64(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmaccu.nxv1i64.nxv1i32.nxv1i32.i64(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vwmaccu_vv_u64m1(vuint64m1_t acc, vuint32mf2_t op1,
@ -536,7 +536,7 @@ vuint64m1_t test_vwmaccu_vv_u64m1(vuint64m1_t acc, vuint32mf2_t op1,
// CHECK-RV64-LABEL: @test_vwmaccu_vx_u64m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmaccu.nxv1i64.i32.nxv1i32.i64(<vscale x 1 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmaccu.nxv1i64.i32.nxv1i32.i64(<vscale x 1 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vuint64m1_t test_vwmaccu_vx_u64m1(vuint64m1_t acc, uint32_t op1,
@ -546,7 +546,7 @@ vuint64m1_t test_vwmaccu_vx_u64m1(vuint64m1_t acc, uint32_t op1,
// CHECK-RV64-LABEL: @test_vwmaccu_vv_u64m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmaccu.nxv2i64.nxv2i32.nxv2i32.i64(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmaccu.nxv2i64.nxv2i32.nxv2i32.i64(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vwmaccu_vv_u64m2(vuint64m2_t acc, vuint32m1_t op1,
@ -556,7 +556,7 @@ vuint64m2_t test_vwmaccu_vv_u64m2(vuint64m2_t acc, vuint32m1_t op1,
// CHECK-RV64-LABEL: @test_vwmaccu_vx_u64m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmaccu.nxv2i64.i32.nxv2i32.i64(<vscale x 2 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmaccu.nxv2i64.i32.nxv2i32.i64(<vscale x 2 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vuint64m2_t test_vwmaccu_vx_u64m2(vuint64m2_t acc, uint32_t op1,
@ -566,7 +566,7 @@ vuint64m2_t test_vwmaccu_vx_u64m2(vuint64m2_t acc, uint32_t op1,
// CHECK-RV64-LABEL: @test_vwmaccu_vv_u64m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmaccu.nxv4i64.nxv4i32.nxv4i32.i64(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmaccu.nxv4i64.nxv4i32.nxv4i32.i64(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vwmaccu_vv_u64m4(vuint64m4_t acc, vuint32m2_t op1,
@ -576,7 +576,7 @@ vuint64m4_t test_vwmaccu_vv_u64m4(vuint64m4_t acc, vuint32m2_t op1,
// CHECK-RV64-LABEL: @test_vwmaccu_vx_u64m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmaccu.nxv4i64.i32.nxv4i32.i64(<vscale x 4 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmaccu.nxv4i64.i32.nxv4i32.i64(<vscale x 4 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vuint64m4_t test_vwmaccu_vx_u64m4(vuint64m4_t acc, uint32_t op1,
@ -586,7 +586,7 @@ vuint64m4_t test_vwmaccu_vx_u64m4(vuint64m4_t acc, uint32_t op1,
// CHECK-RV64-LABEL: @test_vwmaccu_vv_u64m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmaccu.nxv8i64.nxv8i32.nxv8i32.i64(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmaccu.nxv8i64.nxv8i32.nxv8i32.i64(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vwmaccu_vv_u64m8(vuint64m8_t acc, vuint32m4_t op1,
@ -596,7 +596,7 @@ vuint64m8_t test_vwmaccu_vv_u64m8(vuint64m8_t acc, vuint32m4_t op1,
// CHECK-RV64-LABEL: @test_vwmaccu_vx_u64m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmaccu.nxv8i64.i32.nxv8i32.i64(<vscale x 8 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmaccu.nxv8i64.i32.nxv8i32.i64(<vscale x 8 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vuint64m8_t test_vwmaccu_vx_u64m8(vuint64m8_t acc, uint32_t op1,
@ -606,7 +606,7 @@ vuint64m8_t test_vwmaccu_vx_u64m8(vuint64m8_t acc, uint32_t op1,
// CHECK-RV64-LABEL: @test_vwmaccsu_vv_i16mf4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmaccsu.nxv1i16.nxv1i8.nxv1i8.i64(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmaccsu.nxv1i16.nxv1i8.nxv1i8.i64(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vwmaccsu_vv_i16mf4(vint16mf4_t acc, vint8mf8_t op1,
@ -616,7 +616,7 @@ vint16mf4_t test_vwmaccsu_vv_i16mf4(vint16mf4_t acc, vint8mf8_t op1,
// CHECK-RV64-LABEL: @test_vwmaccsu_vx_i16mf4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmaccsu.nxv1i16.i8.nxv1i8.i64(<vscale x 1 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmaccsu.nxv1i16.i8.nxv1i8.i64(<vscale x 1 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vwmaccsu_vx_i16mf4(vint16mf4_t acc, int8_t op1,
@ -626,7 +626,7 @@ vint16mf4_t test_vwmaccsu_vx_i16mf4(vint16mf4_t acc, int8_t op1,
// CHECK-RV64-LABEL: @test_vwmaccsu_vv_i16mf2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmaccsu.nxv2i16.nxv2i8.nxv2i8.i64(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmaccsu.nxv2i16.nxv2i8.nxv2i8.i64(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vwmaccsu_vv_i16mf2(vint16mf2_t acc, vint8mf4_t op1,
@ -636,7 +636,7 @@ vint16mf2_t test_vwmaccsu_vv_i16mf2(vint16mf2_t acc, vint8mf4_t op1,
// CHECK-RV64-LABEL: @test_vwmaccsu_vx_i16mf2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmaccsu.nxv2i16.i8.nxv2i8.i64(<vscale x 2 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmaccsu.nxv2i16.i8.nxv2i8.i64(<vscale x 2 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vwmaccsu_vx_i16mf2(vint16mf2_t acc, int8_t op1,
@ -646,7 +646,7 @@ vint16mf2_t test_vwmaccsu_vx_i16mf2(vint16mf2_t acc, int8_t op1,
// CHECK-RV64-LABEL: @test_vwmaccsu_vv_i16m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmaccsu.nxv4i16.nxv4i8.nxv4i8.i64(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmaccsu.nxv4i16.nxv4i8.nxv4i8.i64(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vwmaccsu_vv_i16m1(vint16m1_t acc, vint8mf2_t op1,
@ -656,7 +656,7 @@ vint16m1_t test_vwmaccsu_vv_i16m1(vint16m1_t acc, vint8mf2_t op1,
// CHECK-RV64-LABEL: @test_vwmaccsu_vx_i16m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmaccsu.nxv4i16.i8.nxv4i8.i64(<vscale x 4 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmaccsu.nxv4i16.i8.nxv4i8.i64(<vscale x 4 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vwmaccsu_vx_i16m1(vint16m1_t acc, int8_t op1, vuint8mf2_t op2,
@ -666,7 +666,7 @@ vint16m1_t test_vwmaccsu_vx_i16m1(vint16m1_t acc, int8_t op1, vuint8mf2_t op2,
// CHECK-RV64-LABEL: @test_vwmaccsu_vv_i16m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmaccsu.nxv8i16.nxv8i8.nxv8i8.i64(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmaccsu.nxv8i16.nxv8i8.nxv8i8.i64(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vwmaccsu_vv_i16m2(vint16m2_t acc, vint8m1_t op1, vuint8m1_t op2,
@ -676,7 +676,7 @@ vint16m2_t test_vwmaccsu_vv_i16m2(vint16m2_t acc, vint8m1_t op1, vuint8m1_t op2,
// CHECK-RV64-LABEL: @test_vwmaccsu_vx_i16m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmaccsu.nxv8i16.i8.nxv8i8.i64(<vscale x 8 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmaccsu.nxv8i16.i8.nxv8i8.i64(<vscale x 8 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vwmaccsu_vx_i16m2(vint16m2_t acc, int8_t op1, vuint8m1_t op2,
@ -686,7 +686,7 @@ vint16m2_t test_vwmaccsu_vx_i16m2(vint16m2_t acc, int8_t op1, vuint8m1_t op2,
// CHECK-RV64-LABEL: @test_vwmaccsu_vv_i16m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmaccsu.nxv16i16.nxv16i8.nxv16i8.i64(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmaccsu.nxv16i16.nxv16i8.nxv16i8.i64(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vwmaccsu_vv_i16m4(vint16m4_t acc, vint8m2_t op1, vuint8m2_t op2,
@ -696,7 +696,7 @@ vint16m4_t test_vwmaccsu_vv_i16m4(vint16m4_t acc, vint8m2_t op1, vuint8m2_t op2,
// CHECK-RV64-LABEL: @test_vwmaccsu_vx_i16m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmaccsu.nxv16i16.i8.nxv16i8.i64(<vscale x 16 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmaccsu.nxv16i16.i8.nxv16i8.i64(<vscale x 16 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vwmaccsu_vx_i16m4(vint16m4_t acc, int8_t op1, vuint8m2_t op2,
@ -706,7 +706,7 @@ vint16m4_t test_vwmaccsu_vx_i16m4(vint16m4_t acc, int8_t op1, vuint8m2_t op2,
// CHECK-RV64-LABEL: @test_vwmaccsu_vv_i16m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmaccsu.nxv32i16.nxv32i8.nxv32i8.i64(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmaccsu.nxv32i16.nxv32i8.nxv32i8.i64(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vwmaccsu_vv_i16m8(vint16m8_t acc, vint8m4_t op1, vuint8m4_t op2,
@ -716,7 +716,7 @@ vint16m8_t test_vwmaccsu_vv_i16m8(vint16m8_t acc, vint8m4_t op1, vuint8m4_t op2,
// CHECK-RV64-LABEL: @test_vwmaccsu_vx_i16m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmaccsu.nxv32i16.i8.nxv32i8.i64(<vscale x 32 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmaccsu.nxv32i16.i8.nxv32i8.i64(<vscale x 32 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vwmaccsu_vx_i16m8(vint16m8_t acc, int8_t op1, vuint8m4_t op2,
@ -726,7 +726,7 @@ vint16m8_t test_vwmaccsu_vx_i16m8(vint16m8_t acc, int8_t op1, vuint8m4_t op2,
// CHECK-RV64-LABEL: @test_vwmaccsu_vv_i32mf2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmaccsu.nxv1i32.nxv1i16.nxv1i16.i64(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmaccsu.nxv1i32.nxv1i16.nxv1i16.i64(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vwmaccsu_vv_i32mf2(vint32mf2_t acc, vint16mf4_t op1,
@ -736,7 +736,7 @@ vint32mf2_t test_vwmaccsu_vv_i32mf2(vint32mf2_t acc, vint16mf4_t op1,
// CHECK-RV64-LABEL: @test_vwmaccsu_vx_i32mf2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmaccsu.nxv1i32.i16.nxv1i16.i64(<vscale x 1 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmaccsu.nxv1i32.i16.nxv1i16.i64(<vscale x 1 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vwmaccsu_vx_i32mf2(vint32mf2_t acc, int16_t op1,
@ -746,7 +746,7 @@ vint32mf2_t test_vwmaccsu_vx_i32mf2(vint32mf2_t acc, int16_t op1,
// CHECK-RV64-LABEL: @test_vwmaccsu_vv_i32m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmaccsu.nxv2i32.nxv2i16.nxv2i16.i64(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmaccsu.nxv2i32.nxv2i16.nxv2i16.i64(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vwmaccsu_vv_i32m1(vint32m1_t acc, vint16mf2_t op1,
@ -756,7 +756,7 @@ vint32m1_t test_vwmaccsu_vv_i32m1(vint32m1_t acc, vint16mf2_t op1,
// CHECK-RV64-LABEL: @test_vwmaccsu_vx_i32m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmaccsu.nxv2i32.i16.nxv2i16.i64(<vscale x 2 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmaccsu.nxv2i32.i16.nxv2i16.i64(<vscale x 2 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vwmaccsu_vx_i32m1(vint32m1_t acc, int16_t op1, vuint16mf2_t op2,
@ -766,7 +766,7 @@ vint32m1_t test_vwmaccsu_vx_i32m1(vint32m1_t acc, int16_t op1, vuint16mf2_t op2,
// CHECK-RV64-LABEL: @test_vwmaccsu_vv_i32m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmaccsu.nxv4i32.nxv4i16.nxv4i16.i64(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmaccsu.nxv4i32.nxv4i16.nxv4i16.i64(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vwmaccsu_vv_i32m2(vint32m2_t acc, vint16m1_t op1,
@ -776,7 +776,7 @@ vint32m2_t test_vwmaccsu_vv_i32m2(vint32m2_t acc, vint16m1_t op1,
// CHECK-RV64-LABEL: @test_vwmaccsu_vx_i32m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmaccsu.nxv4i32.i16.nxv4i16.i64(<vscale x 4 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmaccsu.nxv4i32.i16.nxv4i16.i64(<vscale x 4 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vwmaccsu_vx_i32m2(vint32m2_t acc, int16_t op1, vuint16m1_t op2,
@ -786,7 +786,7 @@ vint32m2_t test_vwmaccsu_vx_i32m2(vint32m2_t acc, int16_t op1, vuint16m1_t op2,
// CHECK-RV64-LABEL: @test_vwmaccsu_vv_i32m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmaccsu.nxv8i32.nxv8i16.nxv8i16.i64(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmaccsu.nxv8i32.nxv8i16.nxv8i16.i64(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vwmaccsu_vv_i32m4(vint32m4_t acc, vint16m2_t op1,
@ -796,7 +796,7 @@ vint32m4_t test_vwmaccsu_vv_i32m4(vint32m4_t acc, vint16m2_t op1,
// CHECK-RV64-LABEL: @test_vwmaccsu_vx_i32m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmaccsu.nxv8i32.i16.nxv8i16.i64(<vscale x 8 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmaccsu.nxv8i32.i16.nxv8i16.i64(<vscale x 8 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vwmaccsu_vx_i32m4(vint32m4_t acc, int16_t op1, vuint16m2_t op2,
@ -806,7 +806,7 @@ vint32m4_t test_vwmaccsu_vx_i32m4(vint32m4_t acc, int16_t op1, vuint16m2_t op2,
// CHECK-RV64-LABEL: @test_vwmaccsu_vv_i32m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmaccsu.nxv16i32.nxv16i16.nxv16i16.i64(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmaccsu.nxv16i32.nxv16i16.nxv16i16.i64(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vwmaccsu_vv_i32m8(vint32m8_t acc, vint16m4_t op1,
@ -816,7 +816,7 @@ vint32m8_t test_vwmaccsu_vv_i32m8(vint32m8_t acc, vint16m4_t op1,
// CHECK-RV64-LABEL: @test_vwmaccsu_vx_i32m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmaccsu.nxv16i32.i16.nxv16i16.i64(<vscale x 16 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmaccsu.nxv16i32.i16.nxv16i16.i64(<vscale x 16 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vwmaccsu_vx_i32m8(vint32m8_t acc, int16_t op1, vuint16m4_t op2,
@ -826,7 +826,7 @@ vint32m8_t test_vwmaccsu_vx_i32m8(vint32m8_t acc, int16_t op1, vuint16m4_t op2,
// CHECK-RV64-LABEL: @test_vwmaccsu_vv_i64m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmaccsu.nxv1i64.nxv1i32.nxv1i32.i64(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmaccsu.nxv1i64.nxv1i32.nxv1i32.i64(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vwmaccsu_vv_i64m1(vint64m1_t acc, vint32mf2_t op1,
@ -836,7 +836,7 @@ vint64m1_t test_vwmaccsu_vv_i64m1(vint64m1_t acc, vint32mf2_t op1,
// CHECK-RV64-LABEL: @test_vwmaccsu_vx_i64m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmaccsu.nxv1i64.i32.nxv1i32.i64(<vscale x 1 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmaccsu.nxv1i64.i32.nxv1i32.i64(<vscale x 1 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vwmaccsu_vx_i64m1(vint64m1_t acc, int32_t op1, vuint32mf2_t op2,
@ -846,7 +846,7 @@ vint64m1_t test_vwmaccsu_vx_i64m1(vint64m1_t acc, int32_t op1, vuint32mf2_t op2,
// CHECK-RV64-LABEL: @test_vwmaccsu_vv_i64m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmaccsu.nxv2i64.nxv2i32.nxv2i32.i64(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmaccsu.nxv2i64.nxv2i32.nxv2i32.i64(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vwmaccsu_vv_i64m2(vint64m2_t acc, vint32m1_t op1,
@ -856,7 +856,7 @@ vint64m2_t test_vwmaccsu_vv_i64m2(vint64m2_t acc, vint32m1_t op1,
// CHECK-RV64-LABEL: @test_vwmaccsu_vx_i64m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmaccsu.nxv2i64.i32.nxv2i32.i64(<vscale x 2 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmaccsu.nxv2i64.i32.nxv2i32.i64(<vscale x 2 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vwmaccsu_vx_i64m2(vint64m2_t acc, int32_t op1, vuint32m1_t op2,
@ -866,7 +866,7 @@ vint64m2_t test_vwmaccsu_vx_i64m2(vint64m2_t acc, int32_t op1, vuint32m1_t op2,
// CHECK-RV64-LABEL: @test_vwmaccsu_vv_i64m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmaccsu.nxv4i64.nxv4i32.nxv4i32.i64(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmaccsu.nxv4i64.nxv4i32.nxv4i32.i64(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vwmaccsu_vv_i64m4(vint64m4_t acc, vint32m2_t op1,
@ -876,7 +876,7 @@ vint64m4_t test_vwmaccsu_vv_i64m4(vint64m4_t acc, vint32m2_t op1,
// CHECK-RV64-LABEL: @test_vwmaccsu_vx_i64m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmaccsu.nxv4i64.i32.nxv4i32.i64(<vscale x 4 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmaccsu.nxv4i64.i32.nxv4i32.i64(<vscale x 4 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vwmaccsu_vx_i64m4(vint64m4_t acc, int32_t op1, vuint32m2_t op2,
@ -886,7 +886,7 @@ vint64m4_t test_vwmaccsu_vx_i64m4(vint64m4_t acc, int32_t op1, vuint32m2_t op2,
// CHECK-RV64-LABEL: @test_vwmaccsu_vv_i64m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmaccsu.nxv8i64.nxv8i32.nxv8i32.i64(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmaccsu.nxv8i64.nxv8i32.nxv8i32.i64(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vwmaccsu_vv_i64m8(vint64m8_t acc, vint32m4_t op1,
@ -896,7 +896,7 @@ vint64m8_t test_vwmaccsu_vv_i64m8(vint64m8_t acc, vint32m4_t op1,
// CHECK-RV64-LABEL: @test_vwmaccsu_vx_i64m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmaccsu.nxv8i64.i32.nxv8i32.i64(<vscale x 8 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmaccsu.nxv8i64.i32.nxv8i32.i64(<vscale x 8 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vwmaccsu_vx_i64m8(vint64m8_t acc, int32_t op1, vuint32m4_t op2,
@ -906,7 +906,7 @@ vint64m8_t test_vwmaccsu_vx_i64m8(vint64m8_t acc, int32_t op1, vuint32m4_t op2,
// CHECK-RV64-LABEL: @test_vwmaccus_vx_i16mf4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmaccus.nxv1i16.i8.nxv1i8.i64(<vscale x 1 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmaccus.nxv1i16.i8.nxv1i8.i64(<vscale x 1 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
//
vint16mf4_t test_vwmaccus_vx_i16mf4(vint16mf4_t acc, uint8_t op1,
@ -916,7 +916,7 @@ vint16mf4_t test_vwmaccus_vx_i16mf4(vint16mf4_t acc, uint8_t op1,
// CHECK-RV64-LABEL: @test_vwmaccus_vx_i16mf2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmaccus.nxv2i16.i8.nxv2i8.i64(<vscale x 2 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmaccus.nxv2i16.i8.nxv2i8.i64(<vscale x 2 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
//
vint16mf2_t test_vwmaccus_vx_i16mf2(vint16mf2_t acc, uint8_t op1,
@ -926,7 +926,7 @@ vint16mf2_t test_vwmaccus_vx_i16mf2(vint16mf2_t acc, uint8_t op1,
// CHECK-RV64-LABEL: @test_vwmaccus_vx_i16m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmaccus.nxv4i16.i8.nxv4i8.i64(<vscale x 4 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmaccus.nxv4i16.i8.nxv4i8.i64(<vscale x 4 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
//
vint16m1_t test_vwmaccus_vx_i16m1(vint16m1_t acc, uint8_t op1, vint8mf2_t op2,
@ -936,7 +936,7 @@ vint16m1_t test_vwmaccus_vx_i16m1(vint16m1_t acc, uint8_t op1, vint8mf2_t op2,
// CHECK-RV64-LABEL: @test_vwmaccus_vx_i16m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmaccus.nxv8i16.i8.nxv8i8.i64(<vscale x 8 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmaccus.nxv8i16.i8.nxv8i8.i64(<vscale x 8 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
//
vint16m2_t test_vwmaccus_vx_i16m2(vint16m2_t acc, uint8_t op1, vint8m1_t op2,
@ -946,7 +946,7 @@ vint16m2_t test_vwmaccus_vx_i16m2(vint16m2_t acc, uint8_t op1, vint8m1_t op2,
// CHECK-RV64-LABEL: @test_vwmaccus_vx_i16m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmaccus.nxv16i16.i8.nxv16i8.i64(<vscale x 16 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmaccus.nxv16i16.i8.nxv16i8.i64(<vscale x 16 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
//
vint16m4_t test_vwmaccus_vx_i16m4(vint16m4_t acc, uint8_t op1, vint8m2_t op2,
@ -956,7 +956,7 @@ vint16m4_t test_vwmaccus_vx_i16m4(vint16m4_t acc, uint8_t op1, vint8m2_t op2,
// CHECK-RV64-LABEL: @test_vwmaccus_vx_i16m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmaccus.nxv32i16.i8.nxv32i8.i64(<vscale x 32 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmaccus.nxv32i16.i8.nxv32i8.i64(<vscale x 32 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
//
vint16m8_t test_vwmaccus_vx_i16m8(vint16m8_t acc, uint8_t op1, vint8m4_t op2,
@ -966,7 +966,7 @@ vint16m8_t test_vwmaccus_vx_i16m8(vint16m8_t acc, uint8_t op1, vint8m4_t op2,
// CHECK-RV64-LABEL: @test_vwmaccus_vx_i32mf2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmaccus.nxv1i32.i16.nxv1i16.i64(<vscale x 1 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmaccus.nxv1i32.i16.nxv1i16.i64(<vscale x 1 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
//
vint32mf2_t test_vwmaccus_vx_i32mf2(vint32mf2_t acc, uint16_t op1,
@ -976,7 +976,7 @@ vint32mf2_t test_vwmaccus_vx_i32mf2(vint32mf2_t acc, uint16_t op1,
// CHECK-RV64-LABEL: @test_vwmaccus_vx_i32m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmaccus.nxv2i32.i16.nxv2i16.i64(<vscale x 2 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmaccus.nxv2i32.i16.nxv2i16.i64(<vscale x 2 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
//
vint32m1_t test_vwmaccus_vx_i32m1(vint32m1_t acc, uint16_t op1, vint16mf2_t op2,
@ -986,7 +986,7 @@ vint32m1_t test_vwmaccus_vx_i32m1(vint32m1_t acc, uint16_t op1, vint16mf2_t op2,
// CHECK-RV64-LABEL: @test_vwmaccus_vx_i32m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmaccus.nxv4i32.i16.nxv4i16.i64(<vscale x 4 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmaccus.nxv4i32.i16.nxv4i16.i64(<vscale x 4 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
//
vint32m2_t test_vwmaccus_vx_i32m2(vint32m2_t acc, uint16_t op1, vint16m1_t op2,
@ -996,7 +996,7 @@ vint32m2_t test_vwmaccus_vx_i32m2(vint32m2_t acc, uint16_t op1, vint16m1_t op2,
// CHECK-RV64-LABEL: @test_vwmaccus_vx_i32m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmaccus.nxv8i32.i16.nxv8i16.i64(<vscale x 8 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmaccus.nxv8i32.i16.nxv8i16.i64(<vscale x 8 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
//
vint32m4_t test_vwmaccus_vx_i32m4(vint32m4_t acc, uint16_t op1, vint16m2_t op2,
@ -1006,7 +1006,7 @@ vint32m4_t test_vwmaccus_vx_i32m4(vint32m4_t acc, uint16_t op1, vint16m2_t op2,
// CHECK-RV64-LABEL: @test_vwmaccus_vx_i32m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmaccus.nxv16i32.i16.nxv16i16.i64(<vscale x 16 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmaccus.nxv16i32.i16.nxv16i16.i64(<vscale x 16 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
//
vint32m8_t test_vwmaccus_vx_i32m8(vint32m8_t acc, uint16_t op1, vint16m4_t op2,
@ -1016,7 +1016,7 @@ vint32m8_t test_vwmaccus_vx_i32m8(vint32m8_t acc, uint16_t op1, vint16m4_t op2,
// CHECK-RV64-LABEL: @test_vwmaccus_vx_i64m1(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmaccus.nxv1i64.i32.nxv1i32.i64(<vscale x 1 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmaccus.nxv1i64.i32.nxv1i32.i64(<vscale x 1 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
//
vint64m1_t test_vwmaccus_vx_i64m1(vint64m1_t acc, uint32_t op1, vint32mf2_t op2,
@ -1026,7 +1026,7 @@ vint64m1_t test_vwmaccus_vx_i64m1(vint64m1_t acc, uint32_t op1, vint32mf2_t op2,
// CHECK-RV64-LABEL: @test_vwmaccus_vx_i64m2(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmaccus.nxv2i64.i32.nxv2i32.i64(<vscale x 2 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmaccus.nxv2i64.i32.nxv2i32.i64(<vscale x 2 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
//
vint64m2_t test_vwmaccus_vx_i64m2(vint64m2_t acc, uint32_t op1, vint32m1_t op2,
@ -1036,7 +1036,7 @@ vint64m2_t test_vwmaccus_vx_i64m2(vint64m2_t acc, uint32_t op1, vint32m1_t op2,
// CHECK-RV64-LABEL: @test_vwmaccus_vx_i64m4(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmaccus.nxv4i64.i32.nxv4i32.i64(<vscale x 4 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmaccus.nxv4i64.i32.nxv4i32.i64(<vscale x 4 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
//
vint64m4_t test_vwmaccus_vx_i64m4(vint64m4_t acc, uint32_t op1, vint32m2_t op2,
@ -1046,7 +1046,7 @@ vint64m4_t test_vwmaccus_vx_i64m4(vint64m4_t acc, uint32_t op1, vint32m2_t op2,
// CHECK-RV64-LABEL: @test_vwmaccus_vx_i64m8(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmaccus.nxv8i64.i32.nxv8i32.i64(<vscale x 8 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmaccus.nxv8i64.i32.nxv8i32.i64(<vscale x 8 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
//
vint64m8_t test_vwmaccus_vx_i64m8(vint64m8_t acc, uint32_t op1, vint32m4_t op2,

View File

@ -149,6 +149,12 @@ enum RISCVPredefinedMacro : RISCVPredefinedMacroT {
VectorMaxELenFp64 = 1 << 6,
};
enum Policy : uint8_t {
None,
HasPassthruOperand,
HasPolicyOperand,
};
// TODO refactor RVVIntrinsic class design after support all intrinsic
// combination. This represents an instantiation of an intrinsic with a
// particular type and prototype
@ -162,7 +168,7 @@ private:
bool IsMask;
bool HasVL;
bool HasPolicy;
bool HasNoMaskPassThru;
Policy NoMaskPolicy;
bool HasNoMaskedOverloaded;
bool HasAutoDef; // There is automiatic definition in header
std::string ManualCodegen;
@ -178,7 +184,7 @@ public:
RVVIntrinsic(StringRef Name, StringRef Suffix, StringRef MangledName,
StringRef MangledSuffix, StringRef IRName, bool IsMask,
bool HasMaskedOffOperand, bool HasVL, bool HasPolicy,
bool HasNoMaskPassThru, bool HasNoMaskedOverloaded,
Policy NoMaskPolicy, bool HasNoMaskedOverloaded,
bool HasAutoDef, StringRef ManualCodegen, const RVVTypes &Types,
const std::vector<int64_t> &IntrinsicTypes,
const std::vector<StringRef> &RequiredFeatures, unsigned NF);
@ -189,13 +195,15 @@ public:
StringRef getMangledName() const { return MangledName; }
bool hasVL() const { return HasVL; }
bool hasPolicy() const { return HasPolicy; }
bool hasNoMaskPassThru() const { return HasNoMaskPassThru; }
bool hasNoMaskPassthru() const { return NoMaskPolicy == HasPassthruOperand; }
bool hasNoMaskPolicy() const { return NoMaskPolicy == HasPolicyOperand; }
bool hasNoMaskedOverloaded() const { return HasNoMaskedOverloaded; }
bool hasManualCodegen() const { return !ManualCodegen.empty(); }
bool hasAutoDef() const { return HasAutoDef; }
bool isMask() const { return IsMask; }
StringRef getIRName() const { return IRName; }
StringRef getManualCodegen() const { return ManualCodegen; }
Policy getNoMaskPolicy() const { return NoMaskPolicy; }
RISCVPredefinedMacroT getRISCVPredefinedMacros() const {
return RISCVPredefinedMacros;
}
@ -307,7 +315,7 @@ VScaleVal LMULType::getScale(unsigned ElementBitwidth) const {
}
// Illegal vscale result would be less than 1
if (Log2ScaleResult < 0)
return None;
return llvm::None;
return 1 << Log2ScaleResult;
}
@ -768,20 +776,16 @@ void RVVType::applyModifier(StringRef Transformer) {
//===----------------------------------------------------------------------===//
// RVVIntrinsic implementation
//===----------------------------------------------------------------------===//
RVVIntrinsic::RVVIntrinsic(StringRef NewName, StringRef Suffix,
StringRef NewMangledName, StringRef MangledSuffix,
StringRef IRName, bool IsMask,
bool HasMaskedOffOperand, bool HasVL, bool HasPolicy,
bool HasNoMaskPassThru, bool HasNoMaskedOverloaded,
bool HasAutoDef, StringRef ManualCodegen,
const RVVTypes &OutInTypes,
const std::vector<int64_t> &NewIntrinsicTypes,
const std::vector<StringRef> &RequiredFeatures,
unsigned NF)
RVVIntrinsic::RVVIntrinsic(
StringRef NewName, StringRef Suffix, StringRef NewMangledName,
StringRef MangledSuffix, StringRef IRName, bool IsMask,
bool HasMaskedOffOperand, bool HasVL, bool HasPolicy, Policy NoMaskPolicy,
bool HasNoMaskedOverloaded, bool HasAutoDef, StringRef ManualCodegen,
const RVVTypes &OutInTypes, const std::vector<int64_t> &NewIntrinsicTypes,
const std::vector<StringRef> &RequiredFeatures, unsigned NF)
: IRName(IRName), IsMask(IsMask), HasVL(HasVL), HasPolicy(HasPolicy),
HasNoMaskPassThru(HasNoMaskPassThru),
HasNoMaskedOverloaded(HasNoMaskedOverloaded), HasAutoDef(HasAutoDef),
ManualCodegen(ManualCodegen.str()), NF(NF) {
NoMaskPolicy(NoMaskPolicy), HasNoMaskedOverloaded(HasNoMaskedOverloaded),
HasAutoDef(HasAutoDef), ManualCodegen(ManualCodegen.str()), NF(NF) {
// Init BuiltinName, Name and MangledName
BuiltinName = NewName.str();
@ -827,7 +831,7 @@ RVVIntrinsic::RVVIntrinsic(StringRef NewName, StringRef Suffix,
// IntrinsicTypes is nonmasked version index. Need to update it
// if there is maskedoff operand (It is always in first operand).
IntrinsicTypes = NewIntrinsicTypes;
if ((IsMask && HasMaskedOffOperand) || (!IsMask && HasNoMaskPassThru)) {
if ((IsMask && HasMaskedOffOperand) || (!IsMask && hasNoMaskPassthru())) {
for (auto &I : IntrinsicTypes) {
if (I >= 0)
I += NF;
@ -864,9 +868,14 @@ void RVVIntrinsic::emitCodeGenSwitchBody(raw_ostream &OS) const {
} else {
OS << " std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end());\n";
}
} else if (hasNoMaskPassThru()) {
OS << " Ops.push_back(llvm::UndefValue::get(ResultType));\n";
OS << " std::rotate(Ops.rbegin(), Ops.rbegin() + 1, Ops.rend());\n";
} else {
if (hasNoMaskPolicy())
OS << " Ops.push_back(ConstantInt::get(Ops.back()->getType(), "
"TAIL_UNDISTURBED));\n";
else if (hasNoMaskPassthru()) {
OS << " Ops.push_back(llvm::UndefValue::get(ResultType));\n";
OS << " std::rotate(Ops.rbegin(), Ops.rbegin() + 1, Ops.rend());\n";
}
}
OS << " IntrinsicTypes = {";
@ -1114,8 +1123,8 @@ void RVVEmitter::createCodeGen(raw_ostream &OS) {
PrintFatalError("Builtin with same name has different HasPolicy");
else if (P.first->second->hasPolicy() != Def->hasPolicy())
PrintFatalError("Builtin with same name has different HasPolicy");
else if (P.first->second->hasNoMaskPassThru() != Def->hasNoMaskPassThru())
PrintFatalError("Builtin with same name has different HasNoMaskPassThru");
else if (P.first->second->getNoMaskPolicy() != Def->getNoMaskPolicy())
PrintFatalError("Builtin with same name has different getNoMaskPolicy");
else if (P.first->second->getIntrinsicTypes() != Def->getIntrinsicTypes())
PrintFatalError("Builtin with same name has different IntrinsicTypes");
}
@ -1163,7 +1172,9 @@ void RVVEmitter::createRVVIntrinsics(
bool HasMaskedOffOperand = R->getValueAsBit("HasMaskedOffOperand");
bool HasVL = R->getValueAsBit("HasVL");
bool HasPolicy = R->getValueAsBit("HasPolicy");
bool HasNoMaskPassThru = R->getValueAsBit("HasNoMaskPassThru");
Record* NoMaskPolicyRecord = R->getValueAsDef("NoMaskPolicy");
Policy NoMaskPolicy =
static_cast<Policy>(NoMaskPolicyRecord->getValueAsInt("Value"));
bool HasNoMaskedOverloaded = R->getValueAsBit("HasNoMaskedOverloaded");
std::vector<int64_t> Log2LMULList = R->getValueAsListOfInts("Log2LMUL");
StringRef ManualCodegen = R->getValueAsString("ManualCodegen");
@ -1238,7 +1249,7 @@ void RVVEmitter::createRVVIntrinsics(
Out.push_back(std::make_unique<RVVIntrinsic>(
Name, SuffixStr, MangledName, MangledSuffixStr, IRName,
/*IsMask=*/false, /*HasMaskedOffOperand=*/false, HasVL, HasPolicy,
HasNoMaskPassThru, HasNoMaskedOverloaded, HasAutoDef, ManualCodegen,
NoMaskPolicy, HasNoMaskedOverloaded, HasAutoDef, ManualCodegen,
Types.getValue(), IntrinsicTypes, RequiredFeatures, NF));
if (HasMask) {
// Create a mask intrinsic
@ -1247,7 +1258,7 @@ void RVVEmitter::createRVVIntrinsics(
Out.push_back(std::make_unique<RVVIntrinsic>(
Name, SuffixStr, MangledName, MangledSuffixStr, IRNameMask,
/*IsMask=*/true, HasMaskedOffOperand, HasVL, HasPolicy,
HasNoMaskPassThru, HasNoMaskedOverloaded, HasAutoDef,
NoMaskPolicy, HasNoMaskedOverloaded, HasAutoDef,
ManualCodegenMask, MaskTypes.getValue(), IntrinsicTypes,
RequiredFeatures, NF));
}

View File

@ -686,11 +686,12 @@ let TargetPrefix = "riscv" in {
[ImmArg<ArgIndex<5>>, IntrNoMem]>, RISCVVIntrinsic {
let VLOperand = 4;
}
// Input: (vector_in, vector_in/scalar, vector_in, vl, policy)
class RISCVTernaryAAXANoMask
: Intrinsic<[llvm_anyvector_ty],
[LLVMMatchType<0>, llvm_any_ty, LLVMMatchType<0>,
llvm_anyint_ty],
[IntrNoMem]>, RISCVVIntrinsic {
llvm_anyint_ty, LLVMMatchType<2>],
[ImmArg<ArgIndex<4>>, IntrNoMem]>, RISCVVIntrinsic {
let SplatOperand = 1;
let VLOperand = 3;
}
@ -704,11 +705,12 @@ let TargetPrefix = "riscv" in {
let SplatOperand = 1;
let VLOperand = 4;
}
// Input: (vector_in, vector_in/scalar, vector_in, vl, policy)
class RISCVTernaryWideNoMask
: Intrinsic< [llvm_anyvector_ty],
[LLVMMatchType<0>, llvm_any_ty, llvm_anyvector_ty,
llvm_anyint_ty],
[IntrNoMem] >, RISCVVIntrinsic {
llvm_anyint_ty, LLVMMatchType<3>],
[ImmArg<ArgIndex<4>>, IntrNoMem] >, RISCVVIntrinsic {
let SplatOperand = 1;
let VLOperand = 3;
}

View File

@ -3196,12 +3196,12 @@ class VPatTernaryNoMaskWithPolicy<string intrinsic,
(result_type result_reg_class:$rs3),
(op1_type op1_reg_class:$rs1),
(op2_type op2_kind:$rs2),
VLOpFrag)),
VLOpFrag, (XLenVT timm:$policy))),
(!cast<Instruction>(inst#"_"#kind#"_"#vlmul.MX)
result_reg_class:$rs3,
(op1_type op1_reg_class:$rs1),
op2_kind:$rs2,
GPR:$vl, sew, TAIL_UNDISTURBED)>;
GPR:$vl, sew, (XLenVT timm:$policy))>;
class VPatTernaryMask<string intrinsic,
string inst,

View File

@ -6,6 +6,7 @@ declare <vscale x 64 x i8> @llvm.riscv.vmacc.nxv64i8.nxv64i8(
<vscale x 64 x i8>,
<vscale x 64 x i8>,
<vscale x 64 x i8>,
i64,
i64);
define <vscale x 64 x i8> @callee(<vscale x 64 x i8> %arg0, <vscale x 64 x i8> %arg1, <vscale x 64 x i8> %arg2) {
@ -19,7 +20,7 @@ define <vscale x 64 x i8> @callee(<vscale x 64 x i8> %arg0, <vscale x 64 x i8> %
%ret = call <vscale x 64 x i8> @llvm.riscv.vmacc.nxv64i8.nxv64i8(
<vscale x 64 x i8> %arg0,
<vscale x 64 x i8> %arg1,
<vscale x 64 x i8> %arg2, i64 1024)
<vscale x 64 x i8> %arg2, i64 1024, i64 0)
ret <vscale x 64 x i8> %ret
}

View File

@ -0,0 +1,609 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh \
; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s --check-prefix=RV32
; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh \
; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s --check-prefix=RV64
declare <vscale x 1 x float> @llvm.riscv.vfmacc.nxv1f32.nxv1f32(
<vscale x 1 x float>,
<vscale x 1 x float>,
<vscale x 1 x float>,
iXLen,
iXLen);
define <vscale x 1 x float> @intrinsic_vfmacc_vv_nxv1f32_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, iXLen %3) nounwind {
; RV32-LABEL: intrinsic_vfmacc_vv_nxv1f32_nxv1f32_nxv1f32:
; RV32: # %bb.0: # %entry
; RV32-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
; RV32-NEXT: vfmacc.vv v8, v10, v9
; RV32-NEXT: ret
;
; RV64-LABEL: intrinsic_vfmacc_vv_nxv1f32_nxv1f32_nxv1f32:
; RV64: # %bb.0: # %entry
; RV64-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
; RV64-NEXT: vfmacc.vv v8, v10, v9
; RV64-NEXT: ret
entry:
%a = call <vscale x 1 x float> @llvm.riscv.vfmacc.nxv1f32.nxv1f32(
<vscale x 1 x float> %0,
<vscale x 1 x float> %1,
<vscale x 1 x float> %2,
iXLen %3, iXLen 1)
ret <vscale x 1 x float> %a
}
declare <vscale x 1 x float> @llvm.riscv.vfmadd.nxv1f32.nxv1f32(
<vscale x 1 x float>,
<vscale x 1 x float>,
<vscale x 1 x float>,
iXLen,
iXLen);
define <vscale x 1 x float> @intrinsic_vfmadd_vv_nxv1f32_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, iXLen %3) nounwind {
; RV32-LABEL: intrinsic_vfmadd_vv_nxv1f32_nxv1f32_nxv1f32:
; RV32: # %bb.0: # %entry
; RV32-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
; RV32-NEXT: vfmadd.vv v8, v9, v10
; RV32-NEXT: ret
;
; RV64-LABEL: intrinsic_vfmadd_vv_nxv1f32_nxv1f32_nxv1f32:
; RV64: # %bb.0: # %entry
; RV64-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
; RV64-NEXT: vfmadd.vv v8, v9, v10
; RV64-NEXT: ret
entry:
%a = call <vscale x 1 x float> @llvm.riscv.vfmadd.nxv1f32.nxv1f32(
<vscale x 1 x float> %0,
<vscale x 1 x float> %1,
<vscale x 1 x float> %2,
iXLen %3, iXLen 1)
ret <vscale x 1 x float> %a
}
declare <vscale x 1 x float> @llvm.riscv.vfmsac.nxv1f32.nxv1f32(
<vscale x 1 x float>,
<vscale x 1 x float>,
<vscale x 1 x float>,
iXLen,
iXLen);
define <vscale x 1 x float> @intrinsic_vfmsac_vv_nxv1f32_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, iXLen %3) nounwind {
; RV32-LABEL: intrinsic_vfmsac_vv_nxv1f32_nxv1f32_nxv1f32:
; RV32: # %bb.0: # %entry
; RV32-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
; RV32-NEXT: vfmsac.vv v8, v10, v9
; RV32-NEXT: ret
;
; RV64-LABEL: intrinsic_vfmsac_vv_nxv1f32_nxv1f32_nxv1f32:
; RV64: # %bb.0: # %entry
; RV64-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
; RV64-NEXT: vfmsac.vv v8, v10, v9
; RV64-NEXT: ret
entry:
%a = call <vscale x 1 x float> @llvm.riscv.vfmsac.nxv1f32.nxv1f32(
<vscale x 1 x float> %0,
<vscale x 1 x float> %1,
<vscale x 1 x float> %2,
iXLen %3, iXLen 1)
ret <vscale x 1 x float> %a
}
declare <vscale x 1 x float> @llvm.riscv.vfmsub.nxv1f32.nxv1f32(
<vscale x 1 x float>,
<vscale x 1 x float>,
<vscale x 1 x float>,
iXLen,
iXLen);
define <vscale x 1 x float> @intrinsic_vfmsub_vv_nxv1f32_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, iXLen %3) nounwind {
; RV32-LABEL: intrinsic_vfmsub_vv_nxv1f32_nxv1f32_nxv1f32:
; RV32: # %bb.0: # %entry
; RV32-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
; RV32-NEXT: vfmsub.vv v8, v9, v10
; RV32-NEXT: ret
;
; RV64-LABEL: intrinsic_vfmsub_vv_nxv1f32_nxv1f32_nxv1f32:
; RV64: # %bb.0: # %entry
; RV64-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
; RV64-NEXT: vfmsub.vv v8, v9, v10
; RV64-NEXT: ret
entry:
%a = call <vscale x 1 x float> @llvm.riscv.vfmsub.nxv1f32.nxv1f32(
<vscale x 1 x float> %0,
<vscale x 1 x float> %1,
<vscale x 1 x float> %2,
iXLen %3, iXLen 1)
ret <vscale x 1 x float> %a
}
declare <vscale x 1 x float> @llvm.riscv.vfnmacc.nxv1f32.nxv1f32(
<vscale x 1 x float>,
<vscale x 1 x float>,
<vscale x 1 x float>,
iXLen,
iXLen);
define <vscale x 1 x float> @intrinsic_vfnmacc_vv_nxv1f32_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, iXLen %3) nounwind {
; RV32-LABEL: intrinsic_vfnmacc_vv_nxv1f32_nxv1f32_nxv1f32:
; RV32: # %bb.0: # %entry
; RV32-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
; RV32-NEXT: vfnmacc.vv v8, v10, v9
; RV32-NEXT: ret
;
; RV64-LABEL: intrinsic_vfnmacc_vv_nxv1f32_nxv1f32_nxv1f32:
; RV64: # %bb.0: # %entry
; RV64-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
; RV64-NEXT: vfnmacc.vv v8, v10, v9
; RV64-NEXT: ret
entry:
%a = call <vscale x 1 x float> @llvm.riscv.vfnmacc.nxv1f32.nxv1f32(
<vscale x 1 x float> %0,
<vscale x 1 x float> %1,
<vscale x 1 x float> %2,
iXLen %3, iXLen 1)
ret <vscale x 1 x float> %a
}
declare <vscale x 1 x float> @llvm.riscv.vfnmadd.nxv1f32.nxv1f32(
<vscale x 1 x float>,
<vscale x 1 x float>,
<vscale x 1 x float>,
iXLen,
iXLen);
define <vscale x 1 x float> @intrinsic_vfnmadd_vv_nxv1f32_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, iXLen %3) nounwind {
; RV32-LABEL: intrinsic_vfnmadd_vv_nxv1f32_nxv1f32_nxv1f32:
; RV32: # %bb.0: # %entry
; RV32-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
; RV32-NEXT: vfnmadd.vv v8, v9, v10
; RV32-NEXT: ret
;
; RV64-LABEL: intrinsic_vfnmadd_vv_nxv1f32_nxv1f32_nxv1f32:
; RV64: # %bb.0: # %entry
; RV64-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
; RV64-NEXT: vfnmadd.vv v8, v9, v10
; RV64-NEXT: ret
entry:
%a = call <vscale x 1 x float> @llvm.riscv.vfnmadd.nxv1f32.nxv1f32(
<vscale x 1 x float> %0,
<vscale x 1 x float> %1,
<vscale x 1 x float> %2,
iXLen %3, iXLen 1)
ret <vscale x 1 x float> %a
}
declare <vscale x 1 x float> @llvm.riscv.vfnmsac.nxv1f32.nxv1f32(
<vscale x 1 x float>,
<vscale x 1 x float>,
<vscale x 1 x float>,
iXLen,
iXLen);
define <vscale x 1 x float> @intrinsic_vfnmsac_vv_nxv1f32_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, iXLen %3) nounwind {
; RV32-LABEL: intrinsic_vfnmsac_vv_nxv1f32_nxv1f32_nxv1f32:
; RV32: # %bb.0: # %entry
; RV32-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
; RV32-NEXT: vfnmsac.vv v8, v10, v9
; RV32-NEXT: ret
;
; RV64-LABEL: intrinsic_vfnmsac_vv_nxv1f32_nxv1f32_nxv1f32:
; RV64: # %bb.0: # %entry
; RV64-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
; RV64-NEXT: vfnmsac.vv v8, v10, v9
; RV64-NEXT: ret
entry:
%a = call <vscale x 1 x float> @llvm.riscv.vfnmsac.nxv1f32.nxv1f32(
<vscale x 1 x float> %0,
<vscale x 1 x float> %1,
<vscale x 1 x float> %2,
iXLen %3, iXLen 1)
ret <vscale x 1 x float> %a
}
declare <vscale x 1 x float> @llvm.riscv.vfnmsub.nxv1f32.nxv1f32(
<vscale x 1 x float>,
<vscale x 1 x float>,
<vscale x 1 x float>,
iXLen,
iXLen);
define <vscale x 1 x float> @intrinsic_vfnmsub_vv_nxv1f32_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, iXLen %3) nounwind {
; RV32-LABEL: intrinsic_vfnmsub_vv_nxv1f32_nxv1f32_nxv1f32:
; RV32: # %bb.0: # %entry
; RV32-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
; RV32-NEXT: vfnmsub.vv v8, v9, v10
; RV32-NEXT: ret
;
; RV64-LABEL: intrinsic_vfnmsub_vv_nxv1f32_nxv1f32_nxv1f32:
; RV64: # %bb.0: # %entry
; RV64-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
; RV64-NEXT: vfnmsub.vv v8, v9, v10
; RV64-NEXT: ret
entry:
%a = call <vscale x 1 x float> @llvm.riscv.vfnmsub.nxv1f32.nxv1f32(
<vscale x 1 x float> %0,
<vscale x 1 x float> %1,
<vscale x 1 x float> %2,
iXLen %3, iXLen 1)
ret <vscale x 1 x float> %a
}
declare <vscale x 1 x float> @llvm.riscv.vfwmacc.nxv1f32.nxv1f16(
<vscale x 1 x float>,
<vscale x 1 x half>,
<vscale x 1 x half>,
iXLen,
iXLen);
define <vscale x 1 x float> @intrinsic_vfwmacc_vv_nxv1f32_nxv1f16_nxv1f16(<vscale x 1 x float> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, iXLen %3) nounwind {
; RV32-LABEL: intrinsic_vfwmacc_vv_nxv1f32_nxv1f16_nxv1f16:
; RV32: # %bb.0: # %entry
; RV32-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
; RV32-NEXT: vfwmacc.vv v8, v9, v10
; RV32-NEXT: ret
;
; RV64-LABEL: intrinsic_vfwmacc_vv_nxv1f32_nxv1f16_nxv1f16:
; RV64: # %bb.0: # %entry
; RV64-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
; RV64-NEXT: vfwmacc.vv v8, v9, v10
; RV64-NEXT: ret
entry:
%a = call <vscale x 1 x float> @llvm.riscv.vfwmacc.nxv1f32.nxv1f16(
<vscale x 1 x float> %0,
<vscale x 1 x half> %1,
<vscale x 1 x half> %2,
iXLen %3, iXLen 1)
ret <vscale x 1 x float> %a
}
declare <vscale x 1 x float> @llvm.riscv.vfwmsac.nxv1f32.nxv1f16(
<vscale x 1 x float>,
<vscale x 1 x half>,
<vscale x 1 x half>,
iXLen,
iXLen);
define <vscale x 1 x float> @intrinsic_vfwmsac_vv_nxv1f32_nxv1f16_nxv1f16(<vscale x 1 x float> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, iXLen %3) nounwind {
; RV32-LABEL: intrinsic_vfwmsac_vv_nxv1f32_nxv1f16_nxv1f16:
; RV32: # %bb.0: # %entry
; RV32-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
; RV32-NEXT: vfwmsac.vv v8, v9, v10
; RV32-NEXT: ret
;
; RV64-LABEL: intrinsic_vfwmsac_vv_nxv1f32_nxv1f16_nxv1f16:
; RV64: # %bb.0: # %entry
; RV64-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
; RV64-NEXT: vfwmsac.vv v8, v9, v10
; RV64-NEXT: ret
entry:
%a = call <vscale x 1 x float> @llvm.riscv.vfwmsac.nxv1f32.nxv1f16(
<vscale x 1 x float> %0,
<vscale x 1 x half> %1,
<vscale x 1 x half> %2,
iXLen %3, iXLen 1)
ret <vscale x 1 x float> %a
}
declare <vscale x 1 x float> @llvm.riscv.vfwnmacc.nxv1f32.nxv1f16(
<vscale x 1 x float>,
<vscale x 1 x half>,
<vscale x 1 x half>,
iXLen,
iXLen);
define <vscale x 1 x float> @intrinsic_vfwnmacc_vv_nxv1f32_nxv1f16_nxv1f16(<vscale x 1 x float> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, iXLen %3) nounwind {
; RV32-LABEL: intrinsic_vfwnmacc_vv_nxv1f32_nxv1f16_nxv1f16:
; RV32: # %bb.0: # %entry
; RV32-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
; RV32-NEXT: vfwnmacc.vv v8, v9, v10
; RV32-NEXT: ret
;
; RV64-LABEL: intrinsic_vfwnmacc_vv_nxv1f32_nxv1f16_nxv1f16:
; RV64: # %bb.0: # %entry
; RV64-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
; RV64-NEXT: vfwnmacc.vv v8, v9, v10
; RV64-NEXT: ret
entry:
%a = call <vscale x 1 x float> @llvm.riscv.vfwnmacc.nxv1f32.nxv1f16(
<vscale x 1 x float> %0,
<vscale x 1 x half> %1,
<vscale x 1 x half> %2,
iXLen %3, iXLen 1)
ret <vscale x 1 x float> %a
}
declare <vscale x 1 x float> @llvm.riscv.vfwnmsac.nxv1f32.nxv1f16(
<vscale x 1 x float>,
<vscale x 1 x half>,
<vscale x 1 x half>,
iXLen,
iXLen);
define <vscale x 1 x float> @intrinsic_vfwnmsac_vv_nxv1f32_nxv1f16_nxv1f16(<vscale x 1 x float> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, iXLen %3) nounwind {
; RV32-LABEL: intrinsic_vfwnmsac_vv_nxv1f32_nxv1f16_nxv1f16:
; RV32: # %bb.0: # %entry
; RV32-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
; RV32-NEXT: vfwnmsac.vv v8, v9, v10
; RV32-NEXT: ret
;
; RV64-LABEL: intrinsic_vfwnmsac_vv_nxv1f32_nxv1f16_nxv1f16:
; RV64: # %bb.0: # %entry
; RV64-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
; RV64-NEXT: vfwnmsac.vv v8, v9, v10
; RV64-NEXT: ret
entry:
%a = call <vscale x 1 x float> @llvm.riscv.vfwnmsac.nxv1f32.nxv1f16(
<vscale x 1 x float> %0,
<vscale x 1 x half> %1,
<vscale x 1 x half> %2,
iXLen %3, iXLen 1)
ret <vscale x 1 x float> %a
}
declare <vscale x 1 x i64> @llvm.riscv.vmacc.nxv1i64.i64(
<vscale x 1 x i64>,
i64,
<vscale x 1 x i64>,
iXLen,
iXLen);
define <vscale x 1 x i64> @intrinsic_vmacc_vx_nxv1i64_i64_nxv1i64(<vscale x 1 x i64> %0, i64 %1, <vscale x 1 x i64> %2, iXLen %3) nounwind {
; RV32-LABEL: intrinsic_vmacc_vx_nxv1i64_i64_nxv1i64:
; RV32: # %bb.0: # %entry
; RV32-NEXT: addi sp, sp, -16
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vlse64.v v10, (a0), zero
; RV32-NEXT: vmacc.vv v8, v9, v10
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
;
; RV64-LABEL: intrinsic_vmacc_vx_nxv1i64_i64_nxv1i64:
; RV64: # %bb.0: # %entry
; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; RV64-NEXT: vmacc.vx v8, a0, v9
; RV64-NEXT: ret
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vmacc.nxv1i64.i64(
<vscale x 1 x i64> %0,
i64 %1,
<vscale x 1 x i64> %2,
iXLen %3, iXLen 1)
ret <vscale x 1 x i64> %a
}
declare <vscale x 1 x i64> @llvm.riscv.vmadd.nxv1i64.i64(
<vscale x 1 x i64>,
i64,
<vscale x 1 x i64>,
iXLen,
iXLen);
define <vscale x 1 x i64> @intrinsic_vmadd_vx_nxv1i64_i64_nxv1i64(<vscale x 1 x i64> %0, i64 %1, <vscale x 1 x i64> %2, iXLen %3) nounwind {
; RV32-LABEL: intrinsic_vmadd_vx_nxv1i64_i64_nxv1i64:
; RV32: # %bb.0: # %entry
; RV32-NEXT: addi sp, sp, -16
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vlse64.v v10, (a0), zero
; RV32-NEXT: vmadd.vv v8, v10, v9
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
;
; RV64-LABEL: intrinsic_vmadd_vx_nxv1i64_i64_nxv1i64:
; RV64: # %bb.0: # %entry
; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; RV64-NEXT: vmadd.vx v8, a0, v9
; RV64-NEXT: ret
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vmadd.nxv1i64.i64(
<vscale x 1 x i64> %0,
i64 %1,
<vscale x 1 x i64> %2,
iXLen %3, iXLen 1)
ret <vscale x 1 x i64> %a
}
declare <vscale x 1 x i64> @llvm.riscv.vnmsac.nxv1i64.i64(
<vscale x 1 x i64>,
i64,
<vscale x 1 x i64>,
iXLen,
iXLen);
define <vscale x 1 x i64> @intrinsic_vnmsac_vx_nxv1i64_i64_nxv1i64(<vscale x 1 x i64> %0, i64 %1, <vscale x 1 x i64> %2, iXLen %3) nounwind {
; RV32-LABEL: intrinsic_vnmsac_vx_nxv1i64_i64_nxv1i64:
; RV32: # %bb.0: # %entry
; RV32-NEXT: addi sp, sp, -16
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vlse64.v v10, (a0), zero
; RV32-NEXT: vnmsac.vv v8, v9, v10
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
;
; RV64-LABEL: intrinsic_vnmsac_vx_nxv1i64_i64_nxv1i64:
; RV64: # %bb.0: # %entry
; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; RV64-NEXT: vnmsac.vx v8, a0, v9
; RV64-NEXT: ret
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vnmsac.nxv1i64.i64(
<vscale x 1 x i64> %0,
i64 %1,
<vscale x 1 x i64> %2,
iXLen %3, iXLen 1)
ret <vscale x 1 x i64> %a
}
declare <vscale x 1 x i64> @llvm.riscv.vnmsub.nxv1i64.i64(
<vscale x 1 x i64>,
i64,
<vscale x 1 x i64>,
iXLen,
iXLen);
define <vscale x 1 x i64> @intrinsic_vnmsub_vx_nxv1i64_i64_nxv1i64(<vscale x 1 x i64> %0, i64 %1, <vscale x 1 x i64> %2, iXLen %3) nounwind {
; RV32-LABEL: intrinsic_vnmsub_vx_nxv1i64_i64_nxv1i64:
; RV32: # %bb.0: # %entry
; RV32-NEXT: addi sp, sp, -16
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vlse64.v v10, (a0), zero
; RV32-NEXT: vnmsub.vv v8, v10, v9
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
;
; RV64-LABEL: intrinsic_vnmsub_vx_nxv1i64_i64_nxv1i64:
; RV64: # %bb.0: # %entry
; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; RV64-NEXT: vnmsub.vx v8, a0, v9
; RV64-NEXT: ret
entry:
%a = call <vscale x 1 x i64> @llvm.riscv.vnmsub.nxv1i64.i64(
<vscale x 1 x i64> %0,
i64 %1,
<vscale x 1 x i64> %2,
iXLen %3, iXLen 1)
ret <vscale x 1 x i64> %a
}
declare <vscale x 1 x i16> @llvm.riscv.vwmacc.nxv1i16.nxv1i8(
<vscale x 1 x i16>,
<vscale x 1 x i8>,
<vscale x 1 x i8>,
iXLen,
iXLen);
define <vscale x 1 x i16> @intrinsic_vwmacc_vv_nxv1i16_nxv1i8_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
; RV32-LABEL: intrinsic_vwmacc_vv_nxv1i16_nxv1i8_nxv1i8:
; RV32: # %bb.0: # %entry
; RV32-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
; RV32-NEXT: vwmacc.vv v8, v9, v10
; RV32-NEXT: ret
;
; RV64-LABEL: intrinsic_vwmacc_vv_nxv1i16_nxv1i8_nxv1i8:
; RV64: # %bb.0: # %entry
; RV64-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
; RV64-NEXT: vwmacc.vv v8, v9, v10
; RV64-NEXT: ret
entry:
%a = call <vscale x 1 x i16> @llvm.riscv.vwmacc.nxv1i16.nxv1i8(
<vscale x 1 x i16> %0,
<vscale x 1 x i8> %1,
<vscale x 1 x i8> %2,
iXLen %3, iXLen 1)
ret <vscale x 1 x i16> %a
}
declare <vscale x 1 x i16> @llvm.riscv.vwmaccsu.nxv1i16.nxv1i8(
<vscale x 1 x i16>,
<vscale x 1 x i8>,
<vscale x 1 x i8>,
iXLen,
iXLen);
define <vscale x 1 x i16> @intrinsic_vwmaccsu_vv_nxv1i16_nxv1i8_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
; RV32-LABEL: intrinsic_vwmaccsu_vv_nxv1i16_nxv1i8_nxv1i8:
; RV32: # %bb.0: # %entry
; RV32-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
; RV32-NEXT: vwmaccsu.vv v8, v9, v10
; RV32-NEXT: ret
;
; RV64-LABEL: intrinsic_vwmaccsu_vv_nxv1i16_nxv1i8_nxv1i8:
; RV64: # %bb.0: # %entry
; RV64-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
; RV64-NEXT: vwmaccsu.vv v8, v9, v10
; RV64-NEXT: ret
entry:
%a = call <vscale x 1 x i16> @llvm.riscv.vwmaccsu.nxv1i16.nxv1i8(
<vscale x 1 x i16> %0,
<vscale x 1 x i8> %1,
<vscale x 1 x i8> %2,
iXLen %3, iXLen 1)
ret <vscale x 1 x i16> %a
}
declare <vscale x 1 x i16> @llvm.riscv.vwmaccu.nxv1i16.nxv1i8(
<vscale x 1 x i16>,
<vscale x 1 x i8>,
<vscale x 1 x i8>,
iXLen,
iXLen);
define <vscale x 1 x i16> @intrinsic_vwmaccu_vv_nxv1i16_nxv1i8_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
; RV32-LABEL: intrinsic_vwmaccu_vv_nxv1i16_nxv1i8_nxv1i8:
; RV32: # %bb.0: # %entry
; RV32-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
; RV32-NEXT: vwmaccu.vv v8, v9, v10
; RV32-NEXT: ret
;
; RV64-LABEL: intrinsic_vwmaccu_vv_nxv1i16_nxv1i8_nxv1i8:
; RV64: # %bb.0: # %entry
; RV64-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
; RV64-NEXT: vwmaccu.vv v8, v9, v10
; RV64-NEXT: ret
entry:
%a = call <vscale x 1 x i16> @llvm.riscv.vwmaccu.nxv1i16.nxv1i8(
<vscale x 1 x i16> %0,
<vscale x 1 x i8> %1,
<vscale x 1 x i8> %2,
iXLen %3, iXLen 1)
ret <vscale x 1 x i16> %a
}
declare <vscale x 1 x i16> @llvm.riscv.vwmaccus.nxv1i16.i8(
<vscale x 1 x i16>,
i8,
<vscale x 1 x i8>,
iXLen,
iXLen);
define <vscale x 1 x i16> @intrinsic_vwmaccus_vx_nxv1i16_i8_nxv1i8(<vscale x 1 x i16> %0, i8 %1, <vscale x 1 x i8> %2, iXLen %3) nounwind {
; RV32-LABEL: intrinsic_vwmaccus_vx_nxv1i16_i8_nxv1i8:
; RV32: # %bb.0: # %entry
; RV32-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; RV32-NEXT: vwmaccus.vx v8, a0, v9
; RV32-NEXT: ret
;
; RV64-LABEL: intrinsic_vwmaccus_vx_nxv1i16_i8_nxv1i8:
; RV64: # %bb.0: # %entry
; RV64-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; RV64-NEXT: vwmaccus.vx v8, a0, v9
; RV64-NEXT: ret
entry:
%a = call <vscale x 1 x i16> @llvm.riscv.vwmaccus.nxv1i16.i8(
<vscale x 1 x i16> %0,
i8 %1,
<vscale x 1 x i8> %2,
iXLen %3, iXLen 1)
ret <vscale x 1 x i16> %a
}

View File

@ -7,6 +7,7 @@ declare <vscale x 1 x half> @llvm.riscv.vfmacc.nxv1f16.nxv1f16(
<vscale x 1 x half>,
<vscale x 1 x half>,
<vscale x 1 x half>,
iXLen,
iXLen);
define <vscale x 1 x half> @intrinsic_vfmacc_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, iXLen %3) nounwind {
@ -20,7 +21,7 @@ entry:
<vscale x 1 x half> %0,
<vscale x 1 x half> %1,
<vscale x 1 x half> %2,
iXLen %3)
iXLen %3, iXLen 0)
ret <vscale x 1 x half> %a
}
@ -53,6 +54,7 @@ declare <vscale x 2 x half> @llvm.riscv.vfmacc.nxv2f16.nxv2f16(
<vscale x 2 x half>,
<vscale x 2 x half>,
<vscale x 2 x half>,
iXLen,
iXLen);
define <vscale x 2 x half> @intrinsic_vfmacc_vv_nxv2f16_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, iXLen %3) nounwind {
@ -66,7 +68,7 @@ entry:
<vscale x 2 x half> %0,
<vscale x 2 x half> %1,
<vscale x 2 x half> %2,
iXLen %3)
iXLen %3, iXLen 0)
ret <vscale x 2 x half> %a
}
@ -99,6 +101,7 @@ declare <vscale x 4 x half> @llvm.riscv.vfmacc.nxv4f16.nxv4f16(
<vscale x 4 x half>,
<vscale x 4 x half>,
<vscale x 4 x half>,
iXLen,
iXLen);
define <vscale x 4 x half> @intrinsic_vfmacc_vv_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, iXLen %3) nounwind {
@ -112,7 +115,7 @@ entry:
<vscale x 4 x half> %0,
<vscale x 4 x half> %1,
<vscale x 4 x half> %2,
iXLen %3)
iXLen %3, iXLen 0)
ret <vscale x 4 x half> %a
}
@ -145,6 +148,7 @@ declare <vscale x 8 x half> @llvm.riscv.vfmacc.nxv8f16.nxv8f16(
<vscale x 8 x half>,
<vscale x 8 x half>,
<vscale x 8 x half>,
iXLen,
iXLen);
define <vscale x 8 x half> @intrinsic_vfmacc_vv_nxv8f16_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, iXLen %3) nounwind {
@ -158,7 +162,7 @@ entry:
<vscale x 8 x half> %0,
<vscale x 8 x half> %1,
<vscale x 8 x half> %2,
iXLen %3)
iXLen %3, iXLen 0)
ret <vscale x 8 x half> %a
}
@ -191,6 +195,7 @@ declare <vscale x 16 x half> @llvm.riscv.vfmacc.nxv16f16.nxv16f16(
<vscale x 16 x half>,
<vscale x 16 x half>,
<vscale x 16 x half>,
iXLen,
iXLen);
define <vscale x 16 x half> @intrinsic_vfmacc_vv_nxv16f16_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, iXLen %3) nounwind {
@ -204,7 +209,7 @@ entry:
<vscale x 16 x half> %0,
<vscale x 16 x half> %1,
<vscale x 16 x half> %2,
iXLen %3)
iXLen %3, iXLen 0)
ret <vscale x 16 x half> %a
}
@ -237,6 +242,7 @@ declare <vscale x 1 x float> @llvm.riscv.vfmacc.nxv1f32.nxv1f32(
<vscale x 1 x float>,
<vscale x 1 x float>,
<vscale x 1 x float>,
iXLen,
iXLen);
define <vscale x 1 x float> @intrinsic_vfmacc_vv_nxv1f32_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, iXLen %3) nounwind {
@ -250,7 +256,7 @@ entry:
<vscale x 1 x float> %0,
<vscale x 1 x float> %1,
<vscale x 1 x float> %2,
iXLen %3)
iXLen %3, iXLen 0)
ret <vscale x 1 x float> %a
}
@ -283,6 +289,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfmacc.nxv2f32.nxv2f32(
<vscale x 2 x float>,
<vscale x 2 x float>,
<vscale x 2 x float>,
iXLen,
iXLen);
define <vscale x 2 x float> @intrinsic_vfmacc_vv_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, iXLen %3) nounwind {
@ -296,7 +303,7 @@ entry:
<vscale x 2 x float> %0,
<vscale x 2 x float> %1,
<vscale x 2 x float> %2,
iXLen %3)
iXLen %3, iXLen 0)
ret <vscale x 2 x float> %a
}
@ -329,6 +336,7 @@ declare <vscale x 4 x float> @llvm.riscv.vfmacc.nxv4f32.nxv4f32(
<vscale x 4 x float>,
<vscale x 4 x float>,
<vscale x 4 x float>,
iXLen,
iXLen);
define <vscale x 4 x float> @intrinsic_vfmacc_vv_nxv4f32_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, iXLen %3) nounwind {
@ -342,7 +350,7 @@ entry:
<vscale x 4 x float> %0,
<vscale x 4 x float> %1,
<vscale x 4 x float> %2,
iXLen %3)
iXLen %3, iXLen 0)
ret <vscale x 4 x float> %a
}
@ -375,6 +383,7 @@ declare <vscale x 8 x float> @llvm.riscv.vfmacc.nxv8f32.nxv8f32(
<vscale x 8 x float>,
<vscale x 8 x float>,
<vscale x 8 x float>,
iXLen,
iXLen);
define <vscale x 8 x float> @intrinsic_vfmacc_vv_nxv8f32_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, iXLen %3) nounwind {
@ -388,7 +397,7 @@ entry:
<vscale x 8 x float> %0,
<vscale x 8 x float> %1,
<vscale x 8 x float> %2,
iXLen %3)
iXLen %3, iXLen 0)
ret <vscale x 8 x float> %a
}
@ -421,6 +430,7 @@ declare <vscale x 1 x double> @llvm.riscv.vfmacc.nxv1f64.nxv1f64(
<vscale x 1 x double>,
<vscale x 1 x double>,
<vscale x 1 x double>,
iXLen,
iXLen);
define <vscale x 1 x double> @intrinsic_vfmacc_vv_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, iXLen %3) nounwind {
@ -434,7 +444,7 @@ entry:
<vscale x 1 x double> %0,
<vscale x 1 x double> %1,
<vscale x 1 x double> %2,
iXLen %3)
iXLen %3, iXLen 0)
ret <vscale x 1 x double> %a
}
@ -467,6 +477,7 @@ declare <vscale x 2 x double> @llvm.riscv.vfmacc.nxv2f64.nxv2f64(
<vscale x 2 x double>,
<vscale x 2 x double>,
<vscale x 2 x double>,
iXLen,
iXLen);
define <vscale x 2 x double> @intrinsic_vfmacc_vv_nxv2f64_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, <vscale x 2 x double> %2, iXLen %3) nounwind {
@ -480,7 +491,7 @@ entry:
<vscale x 2 x double> %0,
<vscale x 2 x double> %1,
<vscale x 2 x double> %2,
iXLen %3)
iXLen %3, iXLen 0)
ret <vscale x 2 x double> %a
}
@ -513,6 +524,7 @@ declare <vscale x 4 x double> @llvm.riscv.vfmacc.nxv4f64.nxv4f64(
<vscale x 4 x double>,
<vscale x 4 x double>,
<vscale x 4 x double>,
iXLen,
iXLen);
define <vscale x 4 x double> @intrinsic_vfmacc_vv_nxv4f64_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, <vscale x 4 x double> %2, iXLen %3) nounwind {
@ -526,7 +538,7 @@ entry:
<vscale x 4 x double> %0,
<vscale x 4 x double> %1,
<vscale x 4 x double> %2,
iXLen %3)
iXLen %3, iXLen 0)
ret <vscale x 4 x double> %a
}
@ -559,6 +571,7 @@ declare <vscale x 1 x half> @llvm.riscv.vfmacc.nxv1f16.f16(
<vscale x 1 x half>,
half,
<vscale x 1 x half>,
iXLen,
iXLen);
define <vscale x 1 x half> @intrinsic_vfmacc_vf_nxv1f16_f16_nxv1f16(<vscale x 1 x half> %0, half %1, <vscale x 1 x half> %2, iXLen %3) nounwind {
@ -572,7 +585,7 @@ entry:
<vscale x 1 x half> %0,
half %1,
<vscale x 1 x half> %2,
iXLen %3)
iXLen %3, iXLen 0)
ret <vscale x 1 x half> %a
}
@ -605,6 +618,7 @@ declare <vscale x 2 x half> @llvm.riscv.vfmacc.nxv2f16.f16(
<vscale x 2 x half>,
half,
<vscale x 2 x half>,
iXLen,
iXLen);
define <vscale x 2 x half> @intrinsic_vfmacc_vf_nxv2f16_f16_nxv2f16(<vscale x 2 x half> %0, half %1, <vscale x 2 x half> %2, iXLen %3) nounwind {
@ -618,7 +632,7 @@ entry:
<vscale x 2 x half> %0,
half %1,
<vscale x 2 x half> %2,
iXLen %3)
iXLen %3, iXLen 0)
ret <vscale x 2 x half> %a
}
@ -651,6 +665,7 @@ declare <vscale x 4 x half> @llvm.riscv.vfmacc.nxv4f16.f16(
<vscale x 4 x half>,
half,
<vscale x 4 x half>,
iXLen,
iXLen);
define <vscale x 4 x half> @intrinsic_vfmacc_vf_nxv4f16_f16_nxv4f16(<vscale x 4 x half> %0, half %1, <vscale x 4 x half> %2, iXLen %3) nounwind {
@ -664,7 +679,7 @@ entry:
<vscale x 4 x half> %0,
half %1,
<vscale x 4 x half> %2,
iXLen %3)
iXLen %3, iXLen 0)
ret <vscale x 4 x half> %a
}
@ -697,6 +712,7 @@ declare <vscale x 8 x half> @llvm.riscv.vfmacc.nxv8f16.f16(
<vscale x 8 x half>,
half,
<vscale x 8 x half>,
iXLen,
iXLen);
define <vscale x 8 x half> @intrinsic_vfmacc_vf_nxv8f16_f16_nxv8f16(<vscale x 8 x half> %0, half %1, <vscale x 8 x half> %2, iXLen %3) nounwind {
@ -710,7 +726,7 @@ entry:
<vscale x 8 x half> %0,
half %1,
<vscale x 8 x half> %2,
iXLen %3)
iXLen %3, iXLen 0)
ret <vscale x 8 x half> %a
}
@ -743,6 +759,7 @@ declare <vscale x 16 x half> @llvm.riscv.vfmacc.nxv16f16.f16(
<vscale x 16 x half>,
half,
<vscale x 16 x half>,
iXLen,
iXLen);
define <vscale x 16 x half> @intrinsic_vfmacc_vf_nxv16f16_f16_nxv16f16(<vscale x 16 x half> %0, half %1, <vscale x 16 x half> %2, iXLen %3) nounwind {
@ -756,7 +773,7 @@ entry:
<vscale x 16 x half> %0,
half %1,
<vscale x 16 x half> %2,
iXLen %3)
iXLen %3, iXLen 0)
ret <vscale x 16 x half> %a
}
@ -789,6 +806,7 @@ declare <vscale x 1 x float> @llvm.riscv.vfmacc.nxv1f32.f32(
<vscale x 1 x float>,
float,
<vscale x 1 x float>,
iXLen,
iXLen);
define <vscale x 1 x float> @intrinsic_vfmacc_vf_nxv1f32_f32_nxv1f32(<vscale x 1 x float> %0, float %1, <vscale x 1 x float> %2, iXLen %3) nounwind {
@ -802,7 +820,7 @@ entry:
<vscale x 1 x float> %0,
float %1,
<vscale x 1 x float> %2,
iXLen %3)
iXLen %3, iXLen 0)
ret <vscale x 1 x float> %a
}
@ -835,6 +853,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfmacc.nxv2f32.f32(
<vscale x 2 x float>,
float,
<vscale x 2 x float>,
iXLen,
iXLen);
define <vscale x 2 x float> @intrinsic_vfmacc_vf_nxv2f32_f32_nxv2f32(<vscale x 2 x float> %0, float %1, <vscale x 2 x float> %2, iXLen %3) nounwind {
@ -848,7 +867,7 @@ entry:
<vscale x 2 x float> %0,
float %1,
<vscale x 2 x float> %2,
iXLen %3)
iXLen %3, iXLen 0)
ret <vscale x 2 x float> %a
}
@ -881,6 +900,7 @@ declare <vscale x 4 x float> @llvm.riscv.vfmacc.nxv4f32.f32(
<vscale x 4 x float>,
float,
<vscale x 4 x float>,
iXLen,
iXLen);
define <vscale x 4 x float> @intrinsic_vfmacc_vf_nxv4f32_f32_nxv4f32(<vscale x 4 x float> %0, float %1, <vscale x 4 x float> %2, iXLen %3) nounwind {
@ -894,7 +914,7 @@ entry:
<vscale x 4 x float> %0,
float %1,
<vscale x 4 x float> %2,
iXLen %3)
iXLen %3, iXLen 0)
ret <vscale x 4 x float> %a
}
@ -927,6 +947,7 @@ declare <vscale x 8 x float> @llvm.riscv.vfmacc.nxv8f32.f32(
<vscale x 8 x float>,
float,
<vscale x 8 x float>,
iXLen,
iXLen);
define <vscale x 8 x float> @intrinsic_vfmacc_vf_nxv8f32_f32_nxv8f32(<vscale x 8 x float> %0, float %1, <vscale x 8 x float> %2, iXLen %3) nounwind {
@ -940,7 +961,7 @@ entry:
<vscale x 8 x float> %0,
float %1,
<vscale x 8 x float> %2,
iXLen %3)
iXLen %3, iXLen 0)
ret <vscale x 8 x float> %a
}
@ -973,6 +994,7 @@ declare <vscale x 1 x double> @llvm.riscv.vfmacc.nxv1f64.f64(
<vscale x 1 x double>,
double,
<vscale x 1 x double>,
iXLen,
iXLen);
define <vscale x 1 x double> @intrinsic_vfmacc_vf_nxv1f64_f64_nxv1f64(<vscale x 1 x double> %0, double %1, <vscale x 1 x double> %2, iXLen %3) nounwind {
@ -986,7 +1008,7 @@ entry:
<vscale x 1 x double> %0,
double %1,
<vscale x 1 x double> %2,
iXLen %3)
iXLen %3, iXLen 0)
ret <vscale x 1 x double> %a
}
@ -1019,6 +1041,7 @@ declare <vscale x 2 x double> @llvm.riscv.vfmacc.nxv2f64.f64(
<vscale x 2 x double>,
double,
<vscale x 2 x double>,
iXLen,
iXLen);
define <vscale x 2 x double> @intrinsic_vfmacc_vf_nxv2f64_f64_nxv2f64(<vscale x 2 x double> %0, double %1, <vscale x 2 x double> %2, iXLen %3) nounwind {
@ -1032,7 +1055,7 @@ entry:
<vscale x 2 x double> %0,
double %1,
<vscale x 2 x double> %2,
iXLen %3)
iXLen %3, iXLen 0)
ret <vscale x 2 x double> %a
}
@ -1065,6 +1088,7 @@ declare <vscale x 4 x double> @llvm.riscv.vfmacc.nxv4f64.f64(
<vscale x 4 x double>,
double,
<vscale x 4 x double>,
iXLen,
iXLen);
define <vscale x 4 x double> @intrinsic_vfmacc_vf_nxv4f64_f64_nxv4f64(<vscale x 4 x double> %0, double %1, <vscale x 4 x double> %2, iXLen %3) nounwind {
@ -1078,7 +1102,7 @@ entry:
<vscale x 4 x double> %0,
double %1,
<vscale x 4 x double> %2,
iXLen %3)
iXLen %3, iXLen 0)
ret <vscale x 4 x double> %a
}

View File

@ -7,6 +7,7 @@ declare <vscale x 1 x half> @llvm.riscv.vfmadd.nxv1f16.nxv1f16(
<vscale x 1 x half>,
<vscale x 1 x half>,
<vscale x 1 x half>,
iXLen,
iXLen);
define <vscale x 1 x half> @intrinsic_vfmadd_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, iXLen %3) nounwind {
@ -20,7 +21,7 @@ entry:
<vscale x 1 x half> %0,
<vscale x 1 x half> %1,
<vscale x 1 x half> %2,
iXLen %3)
iXLen %3, iXLen 0)
ret <vscale x 1 x half> %a
}
@ -53,6 +54,7 @@ declare <vscale x 2 x half> @llvm.riscv.vfmadd.nxv2f16.nxv2f16(
<vscale x 2 x half>,
<vscale x 2 x half>,
<vscale x 2 x half>,
iXLen,
iXLen);
define <vscale x 2 x half> @intrinsic_vfmadd_vv_nxv2f16_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, iXLen %3) nounwind {
@ -66,7 +68,7 @@ entry:
<vscale x 2 x half> %0,
<vscale x 2 x half> %1,
<vscale x 2 x half> %2,
iXLen %3)
iXLen %3, iXLen 0)
ret <vscale x 2 x half> %a
}
@ -99,6 +101,7 @@ declare <vscale x 4 x half> @llvm.riscv.vfmadd.nxv4f16.nxv4f16(
<vscale x 4 x half>,
<vscale x 4 x half>,
<vscale x 4 x half>,
iXLen,
iXLen);
define <vscale x 4 x half> @intrinsic_vfmadd_vv_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, iXLen %3) nounwind {
@ -112,7 +115,7 @@ entry:
<vscale x 4 x half> %0,
<vscale x 4 x half> %1,
<vscale x 4 x half> %2,
iXLen %3)
iXLen %3, iXLen 0)
ret <vscale x 4 x half> %a
}
@ -145,6 +148,7 @@ declare <vscale x 8 x half> @llvm.riscv.vfmadd.nxv8f16.nxv8f16(
<vscale x 8 x half>,
<vscale x 8 x half>,
<vscale x 8 x half>,
iXLen,
iXLen);
define <vscale x 8 x half> @intrinsic_vfmadd_vv_nxv8f16_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, iXLen %3) nounwind {
@ -158,7 +162,7 @@ entry:
<vscale x 8 x half> %0,
<vscale x 8 x half> %1,
<vscale x 8 x half> %2,
iXLen %3)
iXLen %3, iXLen 0)
ret <vscale x 8 x half> %a
}
@ -191,6 +195,7 @@ declare <vscale x 16 x half> @llvm.riscv.vfmadd.nxv16f16.nxv16f16(
<vscale x 16 x half>,
<vscale x 16 x half>,
<vscale x 16 x half>,
iXLen,
iXLen);
define <vscale x 16 x half> @intrinsic_vfmadd_vv_nxv16f16_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, iXLen %3) nounwind {
@ -204,7 +209,7 @@ entry:
<vscale x 16 x half> %0,
<vscale x 16 x half> %1,
<vscale x 16 x half> %2,
iXLen %3)
iXLen %3, iXLen 0)
ret <vscale x 16 x half> %a
}
@ -237,6 +242,7 @@ declare <vscale x 1 x float> @llvm.riscv.vfmadd.nxv1f32.nxv1f32(
<vscale x 1 x float>,
<vscale x 1 x float>,
<vscale x 1 x float>,
iXLen,
iXLen);
define <vscale x 1 x float> @intrinsic_vfmadd_vv_nxv1f32_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, iXLen %3) nounwind {
@ -250,7 +256,7 @@ entry:
<vscale x 1 x float> %0,
<vscale x 1 x float> %1,
<vscale x 1 x float> %2,
iXLen %3)
iXLen %3, iXLen 0)
ret <vscale x 1 x float> %a
}
@ -283,6 +289,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfmadd.nxv2f32.nxv2f32(
<vscale x 2 x float>,
<vscale x 2 x float>,
<vscale x 2 x float>,
iXLen,
iXLen);
define <vscale x 2 x float> @intrinsic_vfmadd_vv_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, iXLen %3) nounwind {
@ -296,7 +303,7 @@ entry:
<vscale x 2 x float> %0,
<vscale x 2 x float> %1,
<vscale x 2 x float> %2,
iXLen %3)
iXLen %3, iXLen 0)
ret <vscale x 2 x float> %a
}
@ -329,6 +336,7 @@ declare <vscale x 4 x float> @llvm.riscv.vfmadd.nxv4f32.nxv4f32(
<vscale x 4 x float>,
<vscale x 4 x float>,
<vscale x 4 x float>,
iXLen,
iXLen);
define <vscale x 4 x float> @intrinsic_vfmadd_vv_nxv4f32_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, iXLen %3) nounwind {
@ -342,7 +350,7 @@ entry:
<vscale x 4 x float> %0,
<vscale x 4 x float> %1,
<vscale x 4 x float> %2,
iXLen %3)
iXLen %3, iXLen 0)
ret <vscale x 4 x float> %a
}
@ -375,6 +383,7 @@ declare <vscale x 8 x float> @llvm.riscv.vfmadd.nxv8f32.nxv8f32(
<vscale x 8 x float>,
<vscale x 8 x float>,
<vscale x 8 x float>,
iXLen,
iXLen);
define <vscale x 8 x float> @intrinsic_vfmadd_vv_nxv8f32_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, iXLen %3) nounwind {
@ -388,7 +397,7 @@ entry:
<vscale x 8 x float> %0,
<vscale x 8 x float> %1,
<vscale x 8 x float> %2,
iXLen %3)
iXLen %3, iXLen 0)
ret <vscale x 8 x float> %a
}
@ -421,6 +430,7 @@ declare <vscale x 1 x double> @llvm.riscv.vfmadd.nxv1f64.nxv1f64(
<vscale x 1 x double>,
<vscale x 1 x double>,
<vscale x 1 x double>,
iXLen,
iXLen);
define <vscale x 1 x double> @intrinsic_vfmadd_vv_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, iXLen %3) nounwind {
@ -434,7 +444,7 @@ entry:
<vscale x 1 x double> %0,
<vscale x 1 x double> %1,
<vscale x 1 x double> %2,
iXLen %3)
iXLen %3, iXLen 0)
ret <vscale x 1 x double> %a
}
@ -467,6 +477,7 @@ declare <vscale x 2 x double> @llvm.riscv.vfmadd.nxv2f64.nxv2f64(
<vscale x 2 x double>,
<vscale x 2 x double>,
<vscale x 2 x double>,
iXLen,
iXLen);
define <vscale x 2 x double> @intrinsic_vfmadd_vv_nxv2f64_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, <vscale x 2 x double> %2, iXLen %3) nounwind {
@ -480,7 +491,7 @@ entry:
<vscale x 2 x double> %0,
<vscale x 2 x double> %1,
<vscale x 2 x double> %2,
iXLen %3)
iXLen %3, iXLen 0)
ret <vscale x 2 x double> %a
}
@ -513,6 +524,7 @@ declare <vscale x 4 x double> @llvm.riscv.vfmadd.nxv4f64.nxv4f64(
<vscale x 4 x double>,
<vscale x 4 x double>,
<vscale x 4 x double>,
iXLen,
iXLen);
define <vscale x 4 x double> @intrinsic_vfmadd_vv_nxv4f64_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, <vscale x 4 x double> %2, iXLen %3) nounwind {
@ -526,7 +538,7 @@ entry:
<vscale x 4 x double> %0,
<vscale x 4 x double> %1,
<vscale x 4 x double> %2,
iXLen %3)
iXLen %3, iXLen 0)
ret <vscale x 4 x double> %a
}
@ -559,6 +571,7 @@ declare <vscale x 1 x half> @llvm.riscv.vfmadd.nxv1f16.f16(
<vscale x 1 x half>,
half,
<vscale x 1 x half>,
iXLen,
iXLen);
define <vscale x 1 x half> @intrinsic_vfmadd_vf_nxv1f16_f16_nxv1f16(<vscale x 1 x half> %0, half %1, <vscale x 1 x half> %2, iXLen %3) nounwind {
@ -572,7 +585,7 @@ entry:
<vscale x 1 x half> %0,
half %1,
<vscale x 1 x half> %2,
iXLen %3)
iXLen %3, iXLen 0)
ret <vscale x 1 x half> %a
}
@ -605,6 +618,7 @@ declare <vscale x 2 x half> @llvm.riscv.vfmadd.nxv2f16.f16(
<vscale x 2 x half>,
half,
<vscale x 2 x half>,
iXLen,
iXLen);
define <vscale x 2 x half> @intrinsic_vfmadd_vf_nxv2f16_f16_nxv2f16(<vscale x 2 x half> %0, half %1, <vscale x 2 x half> %2, iXLen %3) nounwind {
@ -618,7 +632,7 @@ entry:
<vscale x 2 x half> %0,
half %1,
<vscale x 2 x half> %2,
iXLen %3)
iXLen %3, iXLen 0)
ret <vscale x 2 x half> %a
}
@ -651,6 +665,7 @@ declare <vscale x 4 x half> @llvm.riscv.vfmadd.nxv4f16.f16(
<vscale x 4 x half>,
half,
<vscale x 4 x half>,
iXLen,
iXLen);
define <vscale x 4 x half> @intrinsic_vfmadd_vf_nxv4f16_f16_nxv4f16(<vscale x 4 x half> %0, half %1, <vscale x 4 x half> %2, iXLen %3) nounwind {
@ -664,7 +679,7 @@ entry:
<vscale x 4 x half> %0,
half %1,
<vscale x 4 x half> %2,
iXLen %3)
iXLen %3, iXLen 0)
ret <vscale x 4 x half> %a
}
@ -697,6 +712,7 @@ declare <vscale x 8 x half> @llvm.riscv.vfmadd.nxv8f16.f16(
<vscale x 8 x half>,
half,
<vscale x 8 x half>,
iXLen,
iXLen);
define <vscale x 8 x half> @intrinsic_vfmadd_vf_nxv8f16_f16_nxv8f16(<vscale x 8 x half> %0, half %1, <vscale x 8 x half> %2, iXLen %3) nounwind {
@ -710,7 +726,7 @@ entry:
<vscale x 8 x half> %0,
half %1,
<vscale x 8 x half> %2,
iXLen %3)
iXLen %3, iXLen 0)
ret <vscale x 8 x half> %a
}
@ -743,6 +759,7 @@ declare <vscale x 16 x half> @llvm.riscv.vfmadd.nxv16f16.f16(
<vscale x 16 x half>,
half,
<vscale x 16 x half>,
iXLen,
iXLen);
define <vscale x 16 x half> @intrinsic_vfmadd_vf_nxv16f16_f16_nxv16f16(<vscale x 16 x half> %0, half %1, <vscale x 16 x half> %2, iXLen %3) nounwind {
@ -756,7 +773,7 @@ entry:
<vscale x 16 x half> %0,
half %1,
<vscale x 16 x half> %2,
iXLen %3)
iXLen %3, iXLen 0)
ret <vscale x 16 x half> %a
}
@ -789,6 +806,7 @@ declare <vscale x 1 x float> @llvm.riscv.vfmadd.nxv1f32.f32(
<vscale x 1 x float>,
float,
<vscale x 1 x float>,
iXLen,
iXLen);
define <vscale x 1 x float> @intrinsic_vfmadd_vf_nxv1f32_f32_nxv1f32(<vscale x 1 x float> %0, float %1, <vscale x 1 x float> %2, iXLen %3) nounwind {
@ -802,7 +820,7 @@ entry:
<vscale x 1 x float> %0,
float %1,
<vscale x 1 x float> %2,
iXLen %3)
iXLen %3, iXLen 0)
ret <vscale x 1 x float> %a
}
@ -835,6 +853,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfmadd.nxv2f32.f32(
<vscale x 2 x float>,
float,
<vscale x 2 x float>,
iXLen,
iXLen);
define <vscale x 2 x float> @intrinsic_vfmadd_vf_nxv2f32_f32_nxv2f32(<vscale x 2 x float> %0, float %1, <vscale x 2 x float> %2, iXLen %3) nounwind {
@ -848,7 +867,7 @@ entry:
<vscale x 2 x float> %0,
float %1,
<vscale x 2 x float> %2,
iXLen %3)
iXLen %3, iXLen 0)
ret <vscale x 2 x float> %a
}
@ -881,6 +900,7 @@ declare <vscale x 4 x float> @llvm.riscv.vfmadd.nxv4f32.f32(
<vscale x 4 x float>,
float,
<vscale x 4 x float>,
iXLen,
iXLen);
define <vscale x 4 x float> @intrinsic_vfmadd_vf_nxv4f32_f32_nxv4f32(<vscale x 4 x float> %0, float %1, <vscale x 4 x float> %2, iXLen %3) nounwind {
@ -894,7 +914,7 @@ entry:
<vscale x 4 x float> %0,
float %1,
<vscale x 4 x float> %2,
iXLen %3)
iXLen %3, iXLen 0)
ret <vscale x 4 x float> %a
}
@ -927,6 +947,7 @@ declare <vscale x 8 x float> @llvm.riscv.vfmadd.nxv8f32.f32(
<vscale x 8 x float>,
float,
<vscale x 8 x float>,
iXLen,
iXLen);
define <vscale x 8 x float> @intrinsic_vfmadd_vf_nxv8f32_f32_nxv8f32(<vscale x 8 x float> %0, float %1, <vscale x 8 x float> %2, iXLen %3) nounwind {
@ -940,7 +961,7 @@ entry:
<vscale x 8 x float> %0,
float %1,
<vscale x 8 x float> %2,
iXLen %3)
iXLen %3, iXLen 0)
ret <vscale x 8 x float> %a
}
@ -973,6 +994,7 @@ declare <vscale x 1 x double> @llvm.riscv.vfmadd.nxv1f64.f64(
<vscale x 1 x double>,
double,
<vscale x 1 x double>,
iXLen,
iXLen);
define <vscale x 1 x double> @intrinsic_vfmadd_vf_nxv1f64_f64_nxv1f64(<vscale x 1 x double> %0, double %1, <vscale x 1 x double> %2, iXLen %3) nounwind {
@ -986,7 +1008,7 @@ entry:
<vscale x 1 x double> %0,
double %1,
<vscale x 1 x double> %2,
iXLen %3)
iXLen %3, iXLen 0)
ret <vscale x 1 x double> %a
}
@ -1019,6 +1041,7 @@ declare <vscale x 2 x double> @llvm.riscv.vfmadd.nxv2f64.f64(
<vscale x 2 x double>,
double,
<vscale x 2 x double>,
iXLen,
iXLen);
define <vscale x 2 x double> @intrinsic_vfmadd_vf_nxv2f64_f64_nxv2f64(<vscale x 2 x double> %0, double %1, <vscale x 2 x double> %2, iXLen %3) nounwind {
@ -1032,7 +1055,7 @@ entry:
<vscale x 2 x double> %0,
double %1,
<vscale x 2 x double> %2,
iXLen %3)
iXLen %3, iXLen 0)
ret <vscale x 2 x double> %a
}
@ -1065,6 +1088,7 @@ declare <vscale x 4 x double> @llvm.riscv.vfmadd.nxv4f64.f64(
<vscale x 4 x double>,
double,
<vscale x 4 x double>,
iXLen,
iXLen);
define <vscale x 4 x double> @intrinsic_vfmadd_vf_nxv4f64_f64_nxv4f64(<vscale x 4 x double> %0, double %1, <vscale x 4 x double> %2, iXLen %3) nounwind {
@ -1078,7 +1102,7 @@ entry:
<vscale x 4 x double> %0,
double %1,
<vscale x 4 x double> %2,
iXLen %3)
iXLen %3, iXLen 0)
ret <vscale x 4 x double> %a
}

View File

@ -7,6 +7,7 @@ declare <vscale x 1 x half> @llvm.riscv.vfmsac.nxv1f16.nxv1f16(
<vscale x 1 x half>,
<vscale x 1 x half>,
<vscale x 1 x half>,
iXLen,
iXLen);
define <vscale x 1 x half> @intrinsic_vfmsac_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, iXLen %3) nounwind {
@ -20,7 +21,7 @@ entry:
<vscale x 1 x half> %0,
<vscale x 1 x half> %1,
<vscale x 1 x half> %2,
iXLen %3)
iXLen %3, iXLen 0)
ret <vscale x 1 x half> %a
}
@ -53,6 +54,7 @@ declare <vscale x 2 x half> @llvm.riscv.vfmsac.nxv2f16.nxv2f16(
<vscale x 2 x half>,
<vscale x 2 x half>,
<vscale x 2 x half>,
iXLen,
iXLen);
define <vscale x 2 x half> @intrinsic_vfmsac_vv_nxv2f16_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, iXLen %3) nounwind {
@ -66,7 +68,7 @@ entry:
<vscale x 2 x half> %0,
<vscale x 2 x half> %1,
<vscale x 2 x half> %2,
iXLen %3)
iXLen %3, iXLen 0)
ret <vscale x 2 x half> %a
}
@ -99,6 +101,7 @@ declare <vscale x 4 x half> @llvm.riscv.vfmsac.nxv4f16.nxv4f16(
<vscale x 4 x half>,
<vscale x 4 x half>,
<vscale x 4 x half>,
iXLen,
iXLen);
define <vscale x 4 x half> @intrinsic_vfmsac_vv_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, iXLen %3) nounwind {
@ -112,7 +115,7 @@ entry:
<vscale x 4 x half> %0,
<vscale x 4 x half> %1,
<vscale x 4 x half> %2,
iXLen %3)
iXLen %3, iXLen 0)
ret <vscale x 4 x half> %a
}
@ -145,6 +148,7 @@ declare <vscale x 8 x half> @llvm.riscv.vfmsac.nxv8f16.nxv8f16(
<vscale x 8 x half>,
<vscale x 8 x half>,
<vscale x 8 x half>,
iXLen,
iXLen);
define <vscale x 8 x half> @intrinsic_vfmsac_vv_nxv8f16_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, iXLen %3) nounwind {
@ -158,7 +162,7 @@ entry:
<vscale x 8 x half> %0,
<vscale x 8 x half> %1,
<vscale x 8 x half> %2,
iXLen %3)
iXLen %3, iXLen 0)
ret <vscale x 8 x half> %a
}
@ -191,6 +195,7 @@ declare <vscale x 16 x half> @llvm.riscv.vfmsac.nxv16f16.nxv16f16(
<vscale x 16 x half>,
<vscale x 16 x half>,
<vscale x 16 x half>,
iXLen,
iXLen);
define <vscale x 16 x half> @intrinsic_vfmsac_vv_nxv16f16_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, iXLen %3) nounwind {
@ -204,7 +209,7 @@ entry:
<vscale x 16 x half> %0,
<vscale x 16 x half> %1,
<vscale x 16 x half> %2,
iXLen %3)
iXLen %3, iXLen 0)
ret <vscale x 16 x half> %a
}
@ -237,6 +242,7 @@ declare <vscale x 1 x float> @llvm.riscv.vfmsac.nxv1f32.nxv1f32(
<vscale x 1 x float>,
<vscale x 1 x float>,
<vscale x 1 x float>,
iXLen,
iXLen);
define <vscale x 1 x float> @intrinsic_vfmsac_vv_nxv1f32_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, iXLen %3) nounwind {
@ -250,7 +256,7 @@ entry:
<vscale x 1 x float> %0,
<vscale x 1 x float> %1,
<vscale x 1 x float> %2,
iXLen %3)
iXLen %3, iXLen 0)
ret <vscale x 1 x float> %a
}
@ -283,6 +289,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfmsac.nxv2f32.nxv2f32(
<vscale x 2 x float>,
<vscale x 2 x float>,
<vscale x 2 x float>,
iXLen,
iXLen);
define <vscale x 2 x float> @intrinsic_vfmsac_vv_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, iXLen %3) nounwind {
@ -296,7 +303,7 @@ entry:
<vscale x 2 x float> %0,
<vscale x 2 x float> %1,
<vscale x 2 x float> %2,
iXLen %3)
iXLen %3, iXLen 0)
ret <vscale x 2 x float> %a
}
@ -329,6 +336,7 @@ declare <vscale x 4 x float> @llvm.riscv.vfmsac.nxv4f32.nxv4f32(
<vscale x 4 x float>,
<vscale x 4 x float>,
<vscale x 4 x float>,
iXLen,
iXLen);
define <vscale x 4 x float> @intrinsic_vfmsac_vv_nxv4f32_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, iXLen %3) nounwind {
@ -342,7 +350,7 @@ entry:
<vscale x 4 x float> %0,
<vscale x 4 x float> %1,
<vscale x 4 x float> %2,
iXLen %3)
iXLen %3, iXLen 0)
ret <vscale x 4 x float> %a
}
@ -375,6 +383,7 @@ declare <vscale x 8 x float> @llvm.riscv.vfmsac.nxv8f32.nxv8f32(
<vscale x 8 x float>,
<vscale x 8 x float>,
<vscale x 8 x float>,
iXLen,
iXLen);
define <vscale x 8 x float> @intrinsic_vfmsac_vv_nxv8f32_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, iXLen %3) nounwind {
@ -388,7 +397,7 @@ entry:
<vscale x 8 x float> %0,
<vscale x 8 x float> %1,
<vscale x 8 x float> %2,
iXLen %3)
iXLen %3, iXLen 0)
ret <vscale x 8 x float> %a
}
@ -421,6 +430,7 @@ declare <vscale x 1 x double> @llvm.riscv.vfmsac.nxv1f64.nxv1f64(
<vscale x 1 x double>,
<vscale x 1 x double>,
<vscale x 1 x double>,
iXLen,
iXLen);
define <vscale x 1 x double> @intrinsic_vfmsac_vv_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, iXLen %3) nounwind {
@ -434,7 +444,7 @@ entry:
<vscale x 1 x double> %0,
<vscale x 1 x double> %1,
<vscale x 1 x double> %2,
iXLen %3)
iXLen %3, iXLen 0)
ret <vscale x 1 x double> %a
}
@ -467,6 +477,7 @@ declare <vscale x 2 x double> @llvm.riscv.vfmsac.nxv2f64.nxv2f64(
<vscale x 2 x double>,
<vscale x 2 x double>,
<vscale x 2 x double>,
iXLen,
iXLen);
define <vscale x 2 x double> @intrinsic_vfmsac_vv_nxv2f64_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, <vscale x 2 x double> %2, iXLen %3) nounwind {
@ -480,7 +491,7 @@ entry:
<vscale x 2 x double> %0,
<vscale x 2 x double> %1,
<vscale x 2 x double> %2,
iXLen %3)
iXLen %3, iXLen 0)
ret <vscale x 2 x double> %a
}
@ -513,6 +524,7 @@ declare <vscale x 4 x double> @llvm.riscv.vfmsac.nxv4f64.nxv4f64(
<vscale x 4 x double>,
<vscale x 4 x double>,
<vscale x 4 x double>,
iXLen,
iXLen);
define <vscale x 4 x double> @intrinsic_vfmsac_vv_nxv4f64_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, <vscale x 4 x double> %2, iXLen %3) nounwind {
@ -526,7 +538,7 @@ entry:
<vscale x 4 x double> %0,
<vscale x 4 x double> %1,
<vscale x 4 x double> %2,
iXLen %3)
iXLen %3, iXLen 0)
ret <vscale x 4 x double> %a
}
@ -559,6 +571,7 @@ declare <vscale x 1 x half> @llvm.riscv.vfmsac.nxv1f16.f16(
<vscale x 1 x half>,
half,
<vscale x 1 x half>,
iXLen,
iXLen);
define <vscale x 1 x half> @intrinsic_vfmsac_vf_nxv1f16_f16_nxv1f16(<vscale x 1 x half> %0, half %1, <vscale x 1 x half> %2, iXLen %3) nounwind {
@ -572,7 +585,7 @@ entry:
<vscale x 1 x half> %0,
half %1,
<vscale x 1 x half> %2,
iXLen %3)
iXLen %3, iXLen 0)
ret <vscale x 1 x half> %a
}
@ -605,6 +618,7 @@ declare <vscale x 2 x half> @llvm.riscv.vfmsac.nxv2f16.f16(
<vscale x 2 x half>,
half,
<vscale x 2 x half>,
iXLen,
iXLen);
define <vscale x 2 x half> @intrinsic_vfmsac_vf_nxv2f16_f16_nxv2f16(<vscale x 2 x half> %0, half %1, <vscale x 2 x half> %2, iXLen %3) nounwind {
@ -618,7 +632,7 @@ entry:
<vscale x 2 x half> %0,
half %1,
<vscale x 2 x half> %2,
iXLen %3)
iXLen %3, iXLen 0)
ret <vscale x 2 x half> %a
}
@ -651,6 +665,7 @@ declare <vscale x 4 x half> @llvm.riscv.vfmsac.nxv4f16.f16(
<vscale x 4 x half>,
half,
<vscale x 4 x half>,
iXLen,
iXLen);
define <vscale x 4 x half> @intrinsic_vfmsac_vf_nxv4f16_f16_nxv4f16(<vscale x 4 x half> %0, half %1, <vscale x 4 x half> %2, iXLen %3) nounwind {
@ -664,7 +679,7 @@ entry:
<vscale x 4 x half> %0,
half %1,
<vscale x 4 x half> %2,
iXLen %3)
iXLen %3, iXLen 0)
ret <vscale x 4 x half> %a
}
@ -697,6 +712,7 @@ declare <vscale x 8 x half> @llvm.riscv.vfmsac.nxv8f16.f16(
<vscale x 8 x half>,
half,
<vscale x 8 x half>,
iXLen,
iXLen);
define <vscale x 8 x half> @intrinsic_vfmsac_vf_nxv8f16_f16_nxv8f16(<vscale x 8 x half> %0, half %1, <vscale x 8 x half> %2, iXLen %3) nounwind {
@ -710,7 +726,7 @@ entry:
<vscale x 8 x half> %0,
half %1,
<vscale x 8 x half> %2,
iXLen %3)
iXLen %3, iXLen 0)
ret <vscale x 8 x half> %a
}
@ -743,6 +759,7 @@ declare <vscale x 16 x half> @llvm.riscv.vfmsac.nxv16f16.f16(
<vscale x 16 x half>,
half,
<vscale x 16 x half>,
iXLen,
iXLen);
define <vscale x 16 x half> @intrinsic_vfmsac_vf_nxv16f16_f16_nxv16f16(<vscale x 16 x half> %0, half %1, <vscale x 16 x half> %2, iXLen %3) nounwind {
@ -756,7 +773,7 @@ entry:
<vscale x 16 x half> %0,
half %1,
<vscale x 16 x half> %2,
iXLen %3)
iXLen %3, iXLen 0)
ret <vscale x 16 x half> %a
}
@ -789,6 +806,7 @@ declare <vscale x 1 x float> @llvm.riscv.vfmsac.nxv1f32.f32(
<vscale x 1 x float>,
float,
<vscale x 1 x float>,
iXLen,
iXLen);
define <vscale x 1 x float> @intrinsic_vfmsac_vf_nxv1f32_f32_nxv1f32(<vscale x 1 x float> %0, float %1, <vscale x 1 x float> %2, iXLen %3) nounwind {
@ -802,7 +820,7 @@ entry:
<vscale x 1 x float> %0,
float %1,
<vscale x 1 x float> %2,
iXLen %3)
iXLen %3, iXLen 0)
ret <vscale x 1 x float> %a
}
@ -835,6 +853,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfmsac.nxv2f32.f32(
<vscale x 2 x float>,
float,
<vscale x 2 x float>,
iXLen,
iXLen);
define <vscale x 2 x float> @intrinsic_vfmsac_vf_nxv2f32_f32_nxv2f32(<vscale x 2 x float> %0, float %1, <vscale x 2 x float> %2, iXLen %3) nounwind {
@ -848,7 +867,7 @@ entry:
<vscale x 2 x float> %0,
float %1,
<vscale x 2 x float> %2,
iXLen %3)
iXLen %3, iXLen 0)
ret <vscale x 2 x float> %a
}
@ -881,6 +900,7 @@ declare <vscale x 4 x float> @llvm.riscv.vfmsac.nxv4f32.f32(
<vscale x 4 x float>,
float,
<vscale x 4 x float>,
iXLen,
iXLen);
define <vscale x 4 x float> @intrinsic_vfmsac_vf_nxv4f32_f32_nxv4f32(<vscale x 4 x float> %0, float %1, <vscale x 4 x float> %2, iXLen %3) nounwind {
@ -894,7 +914,7 @@ entry:
<vscale x 4 x float> %0,
float %1,
<vscale x 4 x float> %2,
iXLen %3)
iXLen %3, iXLen 0)
ret <vscale x 4 x float> %a
}
@ -927,6 +947,7 @@ declare <vscale x 8 x float> @llvm.riscv.vfmsac.nxv8f32.f32(
<vscale x 8 x float>,
float,
<vscale x 8 x float>,
iXLen,
iXLen);
define <vscale x 8 x float> @intrinsic_vfmsac_vf_nxv8f32_f32_nxv8f32(<vscale x 8 x float> %0, float %1, <vscale x 8 x float> %2, iXLen %3) nounwind {
@ -940,7 +961,7 @@ entry:
<vscale x 8 x float> %0,
float %1,
<vscale x 8 x float> %2,
iXLen %3)
iXLen %3, iXLen 0)
ret <vscale x 8 x float> %a
}
@ -973,6 +994,7 @@ declare <vscale x 1 x double> @llvm.riscv.vfmsac.nxv1f64.f64(
<vscale x 1 x double>,
double,
<vscale x 1 x double>,
iXLen,
iXLen);
define <vscale x 1 x double> @intrinsic_vfmsac_vf_nxv1f64_f64_nxv1f64(<vscale x 1 x double> %0, double %1, <vscale x 1 x double> %2, iXLen %3) nounwind {
@ -986,7 +1008,7 @@ entry:
<vscale x 1 x double> %0,
double %1,
<vscale x 1 x double> %2,
iXLen %3)
iXLen %3, iXLen 0)
ret <vscale x 1 x double> %a
}
@ -1019,6 +1041,7 @@ declare <vscale x 2 x double> @llvm.riscv.vfmsac.nxv2f64.f64(
<vscale x 2 x double>,
double,
<vscale x 2 x double>,
iXLen,
iXLen);
define <vscale x 2 x double> @intrinsic_vfmsac_vf_nxv2f64_f64_nxv2f64(<vscale x 2 x double> %0, double %1, <vscale x 2 x double> %2, iXLen %3) nounwind {
@ -1032,7 +1055,7 @@ entry:
<vscale x 2 x double> %0,
double %1,
<vscale x 2 x double> %2,
iXLen %3)
iXLen %3, iXLen 0)
ret <vscale x 2 x double> %a
}
@ -1065,6 +1088,7 @@ declare <vscale x 4 x double> @llvm.riscv.vfmsac.nxv4f64.f64(
<vscale x 4 x double>,
double,
<vscale x 4 x double>,
iXLen,
iXLen);
define <vscale x 4 x double> @intrinsic_vfmsac_vf_nxv4f64_f64_nxv4f64(<vscale x 4 x double> %0, double %1, <vscale x 4 x double> %2, iXLen %3) nounwind {
@ -1078,7 +1102,7 @@ entry:
<vscale x 4 x double> %0,
double %1,
<vscale x 4 x double> %2,
iXLen %3)
iXLen %3, iXLen 0)
ret <vscale x 4 x double> %a
}

View File

@ -7,6 +7,7 @@ declare <vscale x 1 x half> @llvm.riscv.vfmsub.nxv1f16.nxv1f16(
<vscale x 1 x half>,
<vscale x 1 x half>,
<vscale x 1 x half>,
iXLen,
iXLen);
define <vscale x 1 x half> @intrinsic_vfmsub_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, iXLen %3) nounwind {
@ -20,7 +21,7 @@ entry:
<vscale x 1 x half> %0,
<vscale x 1 x half> %1,
<vscale x 1 x half> %2,
iXLen %3)
iXLen %3, iXLen 0)
ret <vscale x 1 x half> %a
}
@ -53,6 +54,7 @@ declare <vscale x 2 x half> @llvm.riscv.vfmsub.nxv2f16.nxv2f16(
<vscale x 2 x half>,
<vscale x 2 x half>,
<vscale x 2 x half>,
iXLen,
iXLen);
define <vscale x 2 x half> @intrinsic_vfmsub_vv_nxv2f16_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, iXLen %3) nounwind {
@ -66,7 +68,7 @@ entry:
<vscale x 2 x half> %0,
<vscale x 2 x half> %1,
<vscale x 2 x half> %2,
iXLen %3)
iXLen %3, iXLen 0)
ret <vscale x 2 x half> %a
}
@ -99,6 +101,7 @@ declare <vscale x 4 x half> @llvm.riscv.vfmsub.nxv4f16.nxv4f16(
<vscale x 4 x half>,
<vscale x 4 x half>,
<vscale x 4 x half>,
iXLen,
iXLen);
define <vscale x 4 x half> @intrinsic_vfmsub_vv_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, iXLen %3) nounwind {
@ -112,7 +115,7 @@ entry:
<vscale x 4 x half> %0,
<vscale x 4 x half> %1,
<vscale x 4 x half> %2,
iXLen %3)
iXLen %3, iXLen 0)
ret <vscale x 4 x half> %a
}
@ -145,6 +148,7 @@ declare <vscale x 8 x half> @llvm.riscv.vfmsub.nxv8f16.nxv8f16(
<vscale x 8 x half>,
<vscale x 8 x half>,
<vscale x 8 x half>,
iXLen,
iXLen);
define <vscale x 8 x half> @intrinsic_vfmsub_vv_nxv8f16_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, iXLen %3) nounwind {
@ -158,7 +162,7 @@ entry:
<vscale x 8 x half> %0,
<vscale x 8 x half> %1,
<vscale x 8 x half> %2,
iXLen %3)
iXLen %3, iXLen 0)
ret <vscale x 8 x half> %a
}
@ -191,6 +195,7 @@ declare <vscale x 16 x half> @llvm.riscv.vfmsub.nxv16f16.nxv16f16(
<vscale x 16 x half>,
<vscale x 16 x half>,
<vscale x 16 x half>,
iXLen,
iXLen);
define <vscale x 16 x half> @intrinsic_vfmsub_vv_nxv16f16_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, iXLen %3) nounwind {
@ -204,7 +209,7 @@ entry:
<vscale x 16 x half> %0,
<vscale x 16 x half> %1,
<vscale x 16 x half> %2,
iXLen %3)
iXLen %3, iXLen 0)
ret <vscale x 16 x half> %a
}
@ -237,6 +242,7 @@ declare <vscale x 1 x float> @llvm.riscv.vfmsub.nxv1f32.nxv1f32(
<vscale x 1 x float>,
<vscale x 1 x float>,
<vscale x 1 x float>,
iXLen,
iXLen);
define <vscale x 1 x float> @intrinsic_vfmsub_vv_nxv1f32_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, iXLen %3) nounwind {
@ -250,7 +256,7 @@ entry:
<vscale x 1 x float> %0,
<vscale x 1 x float> %1,
<vscale x 1 x float> %2,
iXLen %3)
iXLen %3, iXLen 0)
ret <vscale x 1 x float> %a
}
@ -283,6 +289,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfmsub.nxv2f32.nxv2f32(
<vscale x 2 x float>,
<vscale x 2 x float>,
<vscale x 2 x float>,
iXLen,
iXLen);
define <vscale x 2 x float> @intrinsic_vfmsub_vv_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, iXLen %3) nounwind {
@ -296,7 +303,7 @@ entry:
<vscale x 2 x float> %0,
<vscale x 2 x float> %1,
<vscale x 2 x float> %2,
iXLen %3)
iXLen %3, iXLen 0)
ret <vscale x 2 x float> %a
}
@ -329,6 +336,7 @@ declare <vscale x 4 x float> @llvm.riscv.vfmsub.nxv4f32.nxv4f32(
<vscale x 4 x float>,
<vscale x 4 x float>,
<vscale x 4 x float>,
iXLen,
iXLen);
define <vscale x 4 x float> @intrinsic_vfmsub_vv_nxv4f32_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, iXLen %3) nounwind {
@ -342,7 +350,7 @@ entry:
<vscale x 4 x float> %0,
<vscale x 4 x float> %1,
<vscale x 4 x float> %2,
iXLen %3)
iXLen %3, iXLen 0)
ret <vscale x 4 x float> %a
}
@ -375,6 +383,7 @@ declare <vscale x 8 x float> @llvm.riscv.vfmsub.nxv8f32.nxv8f32(
<vscale x 8 x float>,
<vscale x 8 x float>,
<vscale x 8 x float>,
iXLen,
iXLen);
define <vscale x 8 x float> @intrinsic_vfmsub_vv_nxv8f32_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, iXLen %3) nounwind {
@ -388,7 +397,7 @@ entry:
<vscale x 8 x float> %0,
<vscale x 8 x float> %1,
<vscale x 8 x float> %2,
iXLen %3)
iXLen %3, iXLen 0)
ret <vscale x 8 x float> %a
}
@ -421,6 +430,7 @@ declare <vscale x 1 x double> @llvm.riscv.vfmsub.nxv1f64.nxv1f64(
<vscale x 1 x double>,
<vscale x 1 x double>,
<vscale x 1 x double>,
iXLen,
iXLen);
define <vscale x 1 x double> @intrinsic_vfmsub_vv_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, iXLen %3) nounwind {
@ -434,7 +444,7 @@ entry:
<vscale x 1 x double> %0,
<vscale x 1 x double> %1,
<vscale x 1 x double> %2,
iXLen %3)
iXLen %3, iXLen 0)
ret <vscale x 1 x double> %a
}
@ -467,6 +477,7 @@ declare <vscale x 2 x double> @llvm.riscv.vfmsub.nxv2f64.nxv2f64(
<vscale x 2 x double>,
<vscale x 2 x double>,
<vscale x 2 x double>,
iXLen,
iXLen);
define <vscale x 2 x double> @intrinsic_vfmsub_vv_nxv2f64_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, <vscale x 2 x double> %2, iXLen %3) nounwind {
@ -480,7 +491,7 @@ entry:
<vscale x 2 x double> %0,
<vscale x 2 x double> %1,
<vscale x 2 x double> %2,
iXLen %3)
iXLen %3, iXLen 0)
ret <vscale x 2 x double> %a
}
@ -513,6 +524,7 @@ declare <vscale x 4 x double> @llvm.riscv.vfmsub.nxv4f64.nxv4f64(
<vscale x 4 x double>,
<vscale x 4 x double>,
<vscale x 4 x double>,
iXLen,
iXLen);
define <vscale x 4 x double> @intrinsic_vfmsub_vv_nxv4f64_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, <vscale x 4 x double> %2, iXLen %3) nounwind {
@ -526,7 +538,7 @@ entry:
<vscale x 4 x double> %0,
<vscale x 4 x double> %1,
<vscale x 4 x double> %2,
iXLen %3)
iXLen %3, iXLen 0)
ret <vscale x 4 x double> %a
}
@ -559,6 +571,7 @@ declare <vscale x 1 x half> @llvm.riscv.vfmsub.nxv1f16.f16(
<vscale x 1 x half>,
half,
<vscale x 1 x half>,
iXLen,
iXLen);
define <vscale x 1 x half> @intrinsic_vfmsub_vf_nxv1f16_f16_nxv1f16(<vscale x 1 x half> %0, half %1, <vscale x 1 x half> %2, iXLen %3) nounwind {
@ -572,7 +585,7 @@ entry:
<vscale x 1 x half> %0,
half %1,
<vscale x 1 x half> %2,
iXLen %3)
iXLen %3, iXLen 0)
ret <vscale x 1 x half> %a
}
@ -605,6 +618,7 @@ declare <vscale x 2 x half> @llvm.riscv.vfmsub.nxv2f16.f16(
<vscale x 2 x half>,
half,
<vscale x 2 x half>,
iXLen,
iXLen);
define <vscale x 2 x half> @intrinsic_vfmsub_vf_nxv2f16_f16_nxv2f16(<vscale x 2 x half> %0, half %1, <vscale x 2 x half> %2, iXLen %3) nounwind {
@ -618,7 +632,7 @@ entry:
<vscale x 2 x half> %0,
half %1,
<vscale x 2 x half> %2,
iXLen %3)
iXLen %3, iXLen 0)
ret <vscale x 2 x half> %a
}
@ -651,6 +665,7 @@ declare <vscale x 4 x half> @llvm.riscv.vfmsub.nxv4f16.f16(
<vscale x 4 x half>,
half,
<vscale x 4 x half>,
iXLen,
iXLen);
define <vscale x 4 x half> @intrinsic_vfmsub_vf_nxv4f16_f16_nxv4f16(<vscale x 4 x half> %0, half %1, <vscale x 4 x half> %2, iXLen %3) nounwind {
@ -664,7 +679,7 @@ entry:
<vscale x 4 x half> %0,
half %1,
<vscale x 4 x half> %2,
iXLen %3)
iXLen %3, iXLen 0)
ret <vscale x 4 x half> %a
}
@ -697,6 +712,7 @@ declare <vscale x 8 x half> @llvm.riscv.vfmsub.nxv8f16.f16(
<vscale x 8 x half>,
half,
<vscale x 8 x half>,
iXLen,
iXLen);
define <vscale x 8 x half> @intrinsic_vfmsub_vf_nxv8f16_f16_nxv8f16(<vscale x 8 x half> %0, half %1, <vscale x 8 x half> %2, iXLen %3) nounwind {
@ -710,7 +726,7 @@ entry:
<vscale x 8 x half> %0,
half %1,
<vscale x 8 x half> %2,
iXLen %3)
iXLen %3, iXLen 0)
ret <vscale x 8 x half> %a
}
@ -743,6 +759,7 @@ declare <vscale x 16 x half> @llvm.riscv.vfmsub.nxv16f16.f16(
<vscale x 16 x half>,
half,
<vscale x 16 x half>,
iXLen,
iXLen);
define <vscale x 16 x half> @intrinsic_vfmsub_vf_nxv16f16_f16_nxv16f16(<vscale x 16 x half> %0, half %1, <vscale x 16 x half> %2, iXLen %3) nounwind {
@ -756,7 +773,7 @@ entry:
<vscale x 16 x half> %0,
half %1,
<vscale x 16 x half> %2,
iXLen %3)
iXLen %3, iXLen 0)
ret <vscale x 16 x half> %a
}
@ -789,6 +806,7 @@ declare <vscale x 1 x float> @llvm.riscv.vfmsub.nxv1f32.f32(
<vscale x 1 x float>,
float,
<vscale x 1 x float>,
iXLen,
iXLen);
define <vscale x 1 x float> @intrinsic_vfmsub_vf_nxv1f32_f32_nxv1f32(<vscale x 1 x float> %0, float %1, <vscale x 1 x float> %2, iXLen %3) nounwind {
@ -802,7 +820,7 @@ entry:
<vscale x 1 x float> %0,
float %1,
<vscale x 1 x float> %2,
iXLen %3)
iXLen %3, iXLen 0)
ret <vscale x 1 x float> %a
}
@ -835,6 +853,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfmsub.nxv2f32.f32(
<vscale x 2 x float>,
float,
<vscale x 2 x float>,
iXLen,
iXLen);
define <vscale x 2 x float> @intrinsic_vfmsub_vf_nxv2f32_f32_nxv2f32(<vscale x 2 x float> %0, float %1, <vscale x 2 x float> %2, iXLen %3) nounwind {
@ -848,7 +867,7 @@ entry:
<vscale x 2 x float> %0,
float %1,
<vscale x 2 x float> %2,
iXLen %3)
iXLen %3, iXLen 0)
ret <vscale x 2 x float> %a
}
@ -881,6 +900,7 @@ declare <vscale x 4 x float> @llvm.riscv.vfmsub.nxv4f32.f32(
<vscale x 4 x float>,
float,
<vscale x 4 x float>,
iXLen,
iXLen);
define <vscale x 4 x float> @intrinsic_vfmsub_vf_nxv4f32_f32_nxv4f32(<vscale x 4 x float> %0, float %1, <vscale x 4 x float> %2, iXLen %3) nounwind {
@ -894,7 +914,7 @@ entry:
<vscale x 4 x float> %0,
float %1,
<vscale x 4 x float> %2,
iXLen %3)
iXLen %3, iXLen 0)
ret <vscale x 4 x float> %a
}
@ -927,6 +947,7 @@ declare <vscale x 8 x float> @llvm.riscv.vfmsub.nxv8f32.f32(
<vscale x 8 x float>,
float,
<vscale x 8 x float>,
iXLen,
iXLen);
define <vscale x 8 x float> @intrinsic_vfmsub_vf_nxv8f32_f32_nxv8f32(<vscale x 8 x float> %0, float %1, <vscale x 8 x float> %2, iXLen %3) nounwind {
@ -940,7 +961,7 @@ entry:
<vscale x 8 x float> %0,
float %1,
<vscale x 8 x float> %2,
iXLen %3)
iXLen %3, iXLen 0)
ret <vscale x 8 x float> %a
}
@ -973,6 +994,7 @@ declare <vscale x 1 x double> @llvm.riscv.vfmsub.nxv1f64.f64(
<vscale x 1 x double>,
double,
<vscale x 1 x double>,
iXLen,
iXLen);
define <vscale x 1 x double> @intrinsic_vfmsub_vf_nxv1f64_f64_nxv1f64(<vscale x 1 x double> %0, double %1, <vscale x 1 x double> %2, iXLen %3) nounwind {
@ -986,7 +1008,7 @@ entry:
<vscale x 1 x double> %0,
double %1,
<vscale x 1 x double> %2,
iXLen %3)
iXLen %3, iXLen 0)
ret <vscale x 1 x double> %a
}
@ -1019,6 +1041,7 @@ declare <vscale x 2 x double> @llvm.riscv.vfmsub.nxv2f64.f64(
<vscale x 2 x double>,
double,
<vscale x 2 x double>,
iXLen,
iXLen);
define <vscale x 2 x double> @intrinsic_vfmsub_vf_nxv2f64_f64_nxv2f64(<vscale x 2 x double> %0, double %1, <vscale x 2 x double> %2, iXLen %3) nounwind {
@ -1032,7 +1055,7 @@ entry:
<vscale x 2 x double> %0,
double %1,
<vscale x 2 x double> %2,
iXLen %3)
iXLen %3, iXLen 0)
ret <vscale x 2 x double> %a
}
@ -1065,6 +1088,7 @@ declare <vscale x 4 x double> @llvm.riscv.vfmsub.nxv4f64.f64(
<vscale x 4 x double>,
double,
<vscale x 4 x double>,
iXLen,
iXLen);
define <vscale x 4 x double> @intrinsic_vfmsub_vf_nxv4f64_f64_nxv4f64(<vscale x 4 x double> %0, double %1, <vscale x 4 x double> %2, iXLen %3) nounwind {
@ -1078,7 +1102,7 @@ entry:
<vscale x 4 x double> %0,
double %1,
<vscale x 4 x double> %2,
iXLen %3)
iXLen %3, iXLen 0)
ret <vscale x 4 x double> %a
}

View File

@ -7,6 +7,7 @@ declare <vscale x 1 x half> @llvm.riscv.vfnmacc.nxv1f16.nxv1f16(
<vscale x 1 x half>,
<vscale x 1 x half>,
<vscale x 1 x half>,
iXLen,
iXLen);
define <vscale x 1 x half> @intrinsic_vfnmacc_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, iXLen %3) nounwind {
@ -20,7 +21,7 @@ entry:
<vscale x 1 x half> %0,
<vscale x 1 x half> %1,
<vscale x 1 x half> %2,
iXLen %3)
iXLen %3, iXLen 0)
ret <vscale x 1 x half> %a
}
@ -53,6 +54,7 @@ declare <vscale x 2 x half> @llvm.riscv.vfnmacc.nxv2f16.nxv2f16(
<vscale x 2 x half>,
<vscale x 2 x half>,
<vscale x 2 x half>,
iXLen,
iXLen);
define <vscale x 2 x half> @intrinsic_vfnmacc_vv_nxv2f16_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, iXLen %3) nounwind {
@ -66,7 +68,7 @@ entry:
<vscale x 2 x half> %0,
<vscale x 2 x half> %1,
<vscale x 2 x half> %2,
iXLen %3)
iXLen %3, iXLen 0)
ret <vscale x 2 x half> %a
}
@ -99,6 +101,7 @@ declare <vscale x 4 x half> @llvm.riscv.vfnmacc.nxv4f16.nxv4f16(
<vscale x 4 x half>,
<vscale x 4 x half>,
<vscale x 4 x half>,
iXLen,
iXLen);
define <vscale x 4 x half> @intrinsic_vfnmacc_vv_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, iXLen %3) nounwind {
@ -112,7 +115,7 @@ entry:
<vscale x 4 x half> %0,
<vscale x 4 x half> %1,
<vscale x 4 x half> %2,
iXLen %3)
iXLen %3, iXLen 0)
ret <vscale x 4 x half> %a
}
@ -145,6 +148,7 @@ declare <vscale x 8 x half> @llvm.riscv.vfnmacc.nxv8f16.nxv8f16(
<vscale x 8 x half>,
<vscale x 8 x half>,
<vscale x 8 x half>,
iXLen,
iXLen);
define <vscale x 8 x half> @intrinsic_vfnmacc_vv_nxv8f16_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, iXLen %3) nounwind {
@ -158,7 +162,7 @@ entry:
<vscale x 8 x half> %0,
<vscale x 8 x half> %1,
<vscale x 8 x half> %2,
iXLen %3)
iXLen %3, iXLen 0)
ret <vscale x 8 x half> %a
}
@ -191,6 +195,7 @@ declare <vscale x 16 x half> @llvm.riscv.vfnmacc.nxv16f16.nxv16f16(
<vscale x 16 x half>,
<vscale x 16 x half>,
<vscale x 16 x half>,
iXLen,
iXLen);
define <vscale x 16 x half> @intrinsic_vfnmacc_vv_nxv16f16_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, iXLen %3) nounwind {
@ -204,7 +209,7 @@ entry:
<vscale x 16 x half> %0,
<vscale x 16 x half> %1,
<vscale x 16 x half> %2,
iXLen %3)
iXLen %3, iXLen 0)
ret <vscale x 16 x half> %a
}
@ -237,6 +242,7 @@ declare <vscale x 1 x float> @llvm.riscv.vfnmacc.nxv1f32.nxv1f32(
<vscale x 1 x float>,
<vscale x 1 x float>,
<vscale x 1 x float>,
iXLen,
iXLen);
define <vscale x 1 x float> @intrinsic_vfnmacc_vv_nxv1f32_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, iXLen %3) nounwind {
@ -250,7 +256,7 @@ entry:
<vscale x 1 x float> %0,
<vscale x 1 x float> %1,
<vscale x 1 x float> %2,
iXLen %3)
iXLen %3, iXLen 0)
ret <vscale x 1 x float> %a
}
@ -283,6 +289,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfnmacc.nxv2f32.nxv2f32(
<vscale x 2 x float>,
<vscale x 2 x float>,
<vscale x 2 x float>,
iXLen,
iXLen);
define <vscale x 2 x float> @intrinsic_vfnmacc_vv_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, iXLen %3) nounwind {
@ -296,7 +303,7 @@ entry:
<vscale x 2 x float> %0,
<vscale x 2 x float> %1,
<vscale x 2 x float> %2,
iXLen %3)
iXLen %3, iXLen 0)
ret <vscale x 2 x float> %a
}
@ -329,6 +336,7 @@ declare <vscale x 4 x float> @llvm.riscv.vfnmacc.nxv4f32.nxv4f32(
<vscale x 4 x float>,
<vscale x 4 x float>,
<vscale x 4 x float>,
iXLen,
iXLen);
define <vscale x 4 x float> @intrinsic_vfnmacc_vv_nxv4f32_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, iXLen %3) nounwind {
@ -342,7 +350,7 @@ entry:
<vscale x 4 x float> %0,
<vscale x 4 x float> %1,
<vscale x 4 x float> %2,
iXLen %3)
iXLen %3, iXLen 0)
ret <vscale x 4 x float> %a
}
@ -375,6 +383,7 @@ declare <vscale x 8 x float> @llvm.riscv.vfnmacc.nxv8f32.nxv8f32(
<vscale x 8 x float>,
<vscale x 8 x float>,
<vscale x 8 x float>,
iXLen,
iXLen);
define <vscale x 8 x float> @intrinsic_vfnmacc_vv_nxv8f32_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, iXLen %3) nounwind {
@ -388,7 +397,7 @@ entry:
<vscale x 8 x float> %0,
<vscale x 8 x float> %1,
<vscale x 8 x float> %2,
iXLen %3)
iXLen %3, iXLen 0)
ret <vscale x 8 x float> %a
}
@ -421,6 +430,7 @@ declare <vscale x 1 x double> @llvm.riscv.vfnmacc.nxv1f64.nxv1f64(
<vscale x 1 x double>,
<vscale x 1 x double>,
<vscale x 1 x double>,
iXLen,
iXLen);
define <vscale x 1 x double> @intrinsic_vfnmacc_vv_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, iXLen %3) nounwind {
@ -434,7 +444,7 @@ entry:
<vscale x 1 x double> %0,
<vscale x 1 x double> %1,
<vscale x 1 x double> %2,
iXLen %3)
iXLen %3, iXLen 0)
ret <vscale x 1 x double> %a
}
@ -467,6 +477,7 @@ declare <vscale x 2 x double> @llvm.riscv.vfnmacc.nxv2f64.nxv2f64(
<vscale x 2 x double>,
<vscale x 2 x double>,
<vscale x 2 x double>,
iXLen,
iXLen);
define <vscale x 2 x double> @intrinsic_vfnmacc_vv_nxv2f64_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, <vscale x 2 x double> %2, iXLen %3) nounwind {
@ -480,7 +491,7 @@ entry:
<vscale x 2 x double> %0,
<vscale x 2 x double> %1,
<vscale x 2 x double> %2,
iXLen %3)
iXLen %3, iXLen 0)
ret <vscale x 2 x double> %a
}
@ -513,6 +524,7 @@ declare <vscale x 4 x double> @llvm.riscv.vfnmacc.nxv4f64.nxv4f64(
<vscale x 4 x double>,
<vscale x 4 x double>,
<vscale x 4 x double>,
iXLen,
iXLen);
define <vscale x 4 x double> @intrinsic_vfnmacc_vv_nxv4f64_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, <vscale x 4 x double> %2, iXLen %3) nounwind {
@ -526,7 +538,7 @@ entry:
<vscale x 4 x double> %0,
<vscale x 4 x double> %1,
<vscale x 4 x double> %2,
iXLen %3)
iXLen %3, iXLen 0)
ret <vscale x 4 x double> %a
}
@ -559,6 +571,7 @@ declare <vscale x 1 x half> @llvm.riscv.vfnmacc.nxv1f16.f16(
<vscale x 1 x half>,
half,
<vscale x 1 x half>,
iXLen,
iXLen);
define <vscale x 1 x half> @intrinsic_vfnmacc_vf_nxv1f16_f16_nxv1f16(<vscale x 1 x half> %0, half %1, <vscale x 1 x half> %2, iXLen %3) nounwind {
@ -572,7 +585,7 @@ entry:
<vscale x 1 x half> %0,
half %1,
<vscale x 1 x half> %2,
iXLen %3)
iXLen %3, iXLen 0)
ret <vscale x 1 x half> %a
}
@ -605,6 +618,7 @@ declare <vscale x 2 x half> @llvm.riscv.vfnmacc.nxv2f16.f16(
<vscale x 2 x half>,
half,
<vscale x 2 x half>,
iXLen,
iXLen);
define <vscale x 2 x half> @intrinsic_vfnmacc_vf_nxv2f16_f16_nxv2f16(<vscale x 2 x half> %0, half %1, <vscale x 2 x half> %2, iXLen %3) nounwind {
@ -618,7 +632,7 @@ entry:
<vscale x 2 x half> %0,
half %1,
<vscale x 2 x half> %2,
iXLen %3)
iXLen %3, iXLen 0)
ret <vscale x 2 x half> %a
}
@ -651,6 +665,7 @@ declare <vscale x 4 x half> @llvm.riscv.vfnmacc.nxv4f16.f16(
<vscale x 4 x half>,
half,
<vscale x 4 x half>,
iXLen,
iXLen);
define <vscale x 4 x half> @intrinsic_vfnmacc_vf_nxv4f16_f16_nxv4f16(<vscale x 4 x half> %0, half %1, <vscale x 4 x half> %2, iXLen %3) nounwind {
@ -664,7 +679,7 @@ entry:
<vscale x 4 x half> %0,
half %1,
<vscale x 4 x half> %2,
iXLen %3)
iXLen %3, iXLen 0)
ret <vscale x 4 x half> %a
}
@ -697,6 +712,7 @@ declare <vscale x 8 x half> @llvm.riscv.vfnmacc.nxv8f16.f16(
<vscale x 8 x half>,
half,
<vscale x 8 x half>,
iXLen,
iXLen);
define <vscale x 8 x half> @intrinsic_vfnmacc_vf_nxv8f16_f16_nxv8f16(<vscale x 8 x half> %0, half %1, <vscale x 8 x half> %2, iXLen %3) nounwind {
@ -710,7 +726,7 @@ entry:
<vscale x 8 x half> %0,
half %1,
<vscale x 8 x half> %2,
iXLen %3)
iXLen %3, iXLen 0)
ret <vscale x 8 x half> %a
}
@ -743,6 +759,7 @@ declare <vscale x 16 x half> @llvm.riscv.vfnmacc.nxv16f16.f16(
<vscale x 16 x half>,
half,
<vscale x 16 x half>,
iXLen,
iXLen);
define <vscale x 16 x half> @intrinsic_vfnmacc_vf_nxv16f16_f16_nxv16f16(<vscale x 16 x half> %0, half %1, <vscale x 16 x half> %2, iXLen %3) nounwind {
@ -756,7 +773,7 @@ entry:
<vscale x 16 x half> %0,
half %1,
<vscale x 16 x half> %2,
iXLen %3)
iXLen %3, iXLen 0)
ret <vscale x 16 x half> %a
}
@ -789,6 +806,7 @@ declare <vscale x 1 x float> @llvm.riscv.vfnmacc.nxv1f32.f32(
<vscale x 1 x float>,
float,
<vscale x 1 x float>,
iXLen,
iXLen);
define <vscale x 1 x float> @intrinsic_vfnmacc_vf_nxv1f32_f32_nxv1f32(<vscale x 1 x float> %0, float %1, <vscale x 1 x float> %2, iXLen %3) nounwind {
@ -802,7 +820,7 @@ entry:
<vscale x 1 x float> %0,
float %1,
<vscale x 1 x float> %2,
iXLen %3)
iXLen %3, iXLen 0)
ret <vscale x 1 x float> %a
}
@ -835,6 +853,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfnmacc.nxv2f32.f32(
<vscale x 2 x float>,
float,
<vscale x 2 x float>,
iXLen,
iXLen);
define <vscale x 2 x float> @intrinsic_vfnmacc_vf_nxv2f32_f32_nxv2f32(<vscale x 2 x float> %0, float %1, <vscale x 2 x float> %2, iXLen %3) nounwind {
@ -848,7 +867,7 @@ entry:
<vscale x 2 x float> %0,
float %1,
<vscale x 2 x float> %2,
iXLen %3)
iXLen %3, iXLen 0)
ret <vscale x 2 x float> %a
}
@ -881,6 +900,7 @@ declare <vscale x 4 x float> @llvm.riscv.vfnmacc.nxv4f32.f32(
<vscale x 4 x float>,
float,
<vscale x 4 x float>,
iXLen,
iXLen);
define <vscale x 4 x float> @intrinsic_vfnmacc_vf_nxv4f32_f32_nxv4f32(<vscale x 4 x float> %0, float %1, <vscale x 4 x float> %2, iXLen %3) nounwind {
@ -894,7 +914,7 @@ entry:
<vscale x 4 x float> %0,
float %1,
<vscale x 4 x float> %2,
iXLen %3)
iXLen %3, iXLen 0)
ret <vscale x 4 x float> %a
}
@ -927,6 +947,7 @@ declare <vscale x 8 x float> @llvm.riscv.vfnmacc.nxv8f32.f32(
<vscale x 8 x float>,
float,
<vscale x 8 x float>,
iXLen,
iXLen);
define <vscale x 8 x float> @intrinsic_vfnmacc_vf_nxv8f32_f32_nxv8f32(<vscale x 8 x float> %0, float %1, <vscale x 8 x float> %2, iXLen %3) nounwind {
@ -940,7 +961,7 @@ entry:
<vscale x 8 x float> %0,
float %1,
<vscale x 8 x float> %2,
iXLen %3)
iXLen %3, iXLen 0)
ret <vscale x 8 x float> %a
}
@ -973,6 +994,7 @@ declare <vscale x 1 x double> @llvm.riscv.vfnmacc.nxv1f64.f64(
<vscale x 1 x double>,
double,
<vscale x 1 x double>,
iXLen,
iXLen);
define <vscale x 1 x double> @intrinsic_vfnmacc_vf_nxv1f64_f64_nxv1f64(<vscale x 1 x double> %0, double %1, <vscale x 1 x double> %2, iXLen %3) nounwind {
@ -986,7 +1008,7 @@ entry:
<vscale x 1 x double> %0,
double %1,
<vscale x 1 x double> %2,
iXLen %3)
iXLen %3, iXLen 0)
ret <vscale x 1 x double> %a
}
@ -1019,6 +1041,7 @@ declare <vscale x 2 x double> @llvm.riscv.vfnmacc.nxv2f64.f64(
<vscale x 2 x double>,
double,
<vscale x 2 x double>,
iXLen,
iXLen);
define <vscale x 2 x double> @intrinsic_vfnmacc_vf_nxv2f64_f64_nxv2f64(<vscale x 2 x double> %0, double %1, <vscale x 2 x double> %2, iXLen %3) nounwind {
@ -1032,7 +1055,7 @@ entry:
<vscale x 2 x double> %0,
double %1,
<vscale x 2 x double> %2,
iXLen %3)
iXLen %3, iXLen 0)
ret <vscale x 2 x double> %a
}
@ -1065,6 +1088,7 @@ declare <vscale x 4 x double> @llvm.riscv.vfnmacc.nxv4f64.f64(
<vscale x 4 x double>,
double,
<vscale x 4 x double>,
iXLen,
iXLen);
define <vscale x 4 x double> @intrinsic_vfnmacc_vf_nxv4f64_f64_nxv4f64(<vscale x 4 x double> %0, double %1, <vscale x 4 x double> %2, iXLen %3) nounwind {
@ -1078,7 +1102,7 @@ entry:
<vscale x 4 x double> %0,
double %1,
<vscale x 4 x double> %2,
iXLen %3)
iXLen %3, iXLen 0)
ret <vscale x 4 x double> %a
}

View File

@ -7,6 +7,7 @@ declare <vscale x 1 x half> @llvm.riscv.vfnmadd.nxv1f16.nxv1f16(
<vscale x 1 x half>,
<vscale x 1 x half>,
<vscale x 1 x half>,
iXLen,
iXLen);
define <vscale x 1 x half> @intrinsic_vfnmadd_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, iXLen %3) nounwind {
@ -20,7 +21,7 @@ entry:
<vscale x 1 x half> %0,
<vscale x 1 x half> %1,
<vscale x 1 x half> %2,
iXLen %3)
iXLen %3, iXLen 0)
ret <vscale x 1 x half> %a
}
@ -53,6 +54,7 @@ declare <vscale x 2 x half> @llvm.riscv.vfnmadd.nxv2f16.nxv2f16(
<vscale x 2 x half>,
<vscale x 2 x half>,
<vscale x 2 x half>,
iXLen,
iXLen);
define <vscale x 2 x half> @intrinsic_vfnmadd_vv_nxv2f16_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, iXLen %3) nounwind {
@ -66,7 +68,7 @@ entry:
<vscale x 2 x half> %0,
<vscale x 2 x half> %1,
<vscale x 2 x half> %2,
iXLen %3)
iXLen %3, iXLen 0)
ret <vscale x 2 x half> %a
}
@ -99,6 +101,7 @@ declare <vscale x 4 x half> @llvm.riscv.vfnmadd.nxv4f16.nxv4f16(
<vscale x 4 x half>,
<vscale x 4 x half>,
<vscale x 4 x half>,
iXLen,
iXLen);
define <vscale x 4 x half> @intrinsic_vfnmadd_vv_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, iXLen %3) nounwind {
@ -112,7 +115,7 @@ entry:
<vscale x 4 x half> %0,
<vscale x 4 x half> %1,
<vscale x 4 x half> %2,
iXLen %3)
iXLen %3, iXLen 0)
ret <vscale x 4 x half> %a
}
@ -145,6 +148,7 @@ declare <vscale x 8 x half> @llvm.riscv.vfnmadd.nxv8f16.nxv8f16(
<vscale x 8 x half>,
<vscale x 8 x half>,
<vscale x 8 x half>,
iXLen,
iXLen);
define <vscale x 8 x half> @intrinsic_vfnmadd_vv_nxv8f16_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, iXLen %3) nounwind {
@ -158,7 +162,7 @@ entry:
<vscale x 8 x half> %0,
<vscale x 8 x half> %1,
<vscale x 8 x half> %2,
iXLen %3)
iXLen %3, iXLen 0)
ret <vscale x 8 x half> %a
}
@ -191,6 +195,7 @@ declare <vscale x 16 x half> @llvm.riscv.vfnmadd.nxv16f16.nxv16f16(
<vscale x 16 x half>,
<vscale x 16 x half>,
<vscale x 16 x half>,
iXLen,
iXLen);
define <vscale x 16 x half> @intrinsic_vfnmadd_vv_nxv16f16_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, iXLen %3) nounwind {
@ -204,7 +209,7 @@ entry:
<vscale x 16 x half> %0,
<vscale x 16 x half> %1,
<vscale x 16 x half> %2,
iXLen %3)
iXLen %3, iXLen 0)
ret <vscale x 16 x half> %a
}
@ -237,6 +242,7 @@ declare <vscale x 1 x float> @llvm.riscv.vfnmadd.nxv1f32.nxv1f32(
<vscale x 1 x float>,
<vscale x 1 x float>,
<vscale x 1 x float>,
iXLen,
iXLen);
define <vscale x 1 x float> @intrinsic_vfnmadd_vv_nxv1f32_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, iXLen %3) nounwind {
@ -250,7 +256,7 @@ entry:
<vscale x 1 x float> %0,
<vscale x 1 x float> %1,
<vscale x 1 x float> %2,
iXLen %3)
iXLen %3, iXLen 0)
ret <vscale x 1 x float> %a
}
@ -283,6 +289,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfnmadd.nxv2f32.nxv2f32(
<vscale x 2 x float>,
<vscale x 2 x float>,
<vscale x 2 x float>,
iXLen,
iXLen);
define <vscale x 2 x float> @intrinsic_vfnmadd_vv_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, iXLen %3) nounwind {
@ -296,7 +303,7 @@ entry:
<vscale x 2 x float> %0,
<vscale x 2 x float> %1,
<vscale x 2 x float> %2,
iXLen %3)
iXLen %3, iXLen 0)
ret <vscale x 2 x float> %a
}
@ -329,6 +336,7 @@ declare <vscale x 4 x float> @llvm.riscv.vfnmadd.nxv4f32.nxv4f32(
<vscale x 4 x float>,
<vscale x 4 x float>,
<vscale x 4 x float>,
iXLen,
iXLen);
define <vscale x 4 x float> @intrinsic_vfnmadd_vv_nxv4f32_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, iXLen %3) nounwind {
@ -342,7 +350,7 @@ entry:
<vscale x 4 x float> %0,
<vscale x 4 x float> %1,
<vscale x 4 x float> %2,
iXLen %3)
iXLen %3, iXLen 0)
ret <vscale x 4 x float> %a
}
@ -375,6 +383,7 @@ declare <vscale x 8 x float> @llvm.riscv.vfnmadd.nxv8f32.nxv8f32(
<vscale x 8 x float>,
<vscale x 8 x float>,
<vscale x 8 x float>,
iXLen,
iXLen);
define <vscale x 8 x float> @intrinsic_vfnmadd_vv_nxv8f32_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, iXLen %3) nounwind {
@ -388,7 +397,7 @@ entry:
<vscale x 8 x float> %0,
<vscale x 8 x float> %1,
<vscale x 8 x float> %2,
iXLen %3)
iXLen %3, iXLen 0)
ret <vscale x 8 x float> %a
}
@ -421,6 +430,7 @@ declare <vscale x 1 x double> @llvm.riscv.vfnmadd.nxv1f64.nxv1f64(
<vscale x 1 x double>,
<vscale x 1 x double>,
<vscale x 1 x double>,
iXLen,
iXLen);
define <vscale x 1 x double> @intrinsic_vfnmadd_vv_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, iXLen %3) nounwind {
@ -434,7 +444,7 @@ entry:
<vscale x 1 x double> %0,
<vscale x 1 x double> %1,
<vscale x 1 x double> %2,
iXLen %3)
iXLen %3, iXLen 0)
ret <vscale x 1 x double> %a
}
@ -467,6 +477,7 @@ declare <vscale x 2 x double> @llvm.riscv.vfnmadd.nxv2f64.nxv2f64(
<vscale x 2 x double>,
<vscale x 2 x double>,
<vscale x 2 x double>,
iXLen,
iXLen);
define <vscale x 2 x double> @intrinsic_vfnmadd_vv_nxv2f64_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, <vscale x 2 x double> %2, iXLen %3) nounwind {
@ -480,7 +491,7 @@ entry:
<vscale x 2 x double> %0,
<vscale x 2 x double> %1,
<vscale x 2 x double> %2,
iXLen %3)
iXLen %3, iXLen 0)
ret <vscale x 2 x double> %a
}
@ -513,6 +524,7 @@ declare <vscale x 4 x double> @llvm.riscv.vfnmadd.nxv4f64.nxv4f64(
<vscale x 4 x double>,
<vscale x 4 x double>,
<vscale x 4 x double>,
iXLen,
iXLen);
define <vscale x 4 x double> @intrinsic_vfnmadd_vv_nxv4f64_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, <vscale x 4 x double> %2, iXLen %3) nounwind {
@ -526,7 +538,7 @@ entry:
<vscale x 4 x double> %0,
<vscale x 4 x double> %1,
<vscale x 4 x double> %2,
iXLen %3)
iXLen %3, iXLen 0)
ret <vscale x 4 x double> %a
}
@ -559,6 +571,7 @@ declare <vscale x 1 x half> @llvm.riscv.vfnmadd.nxv1f16.f16(
<vscale x 1 x half>,
half,
<vscale x 1 x half>,
iXLen,
iXLen);
define <vscale x 1 x half> @intrinsic_vfnmadd_vf_nxv1f16_f16_nxv1f16(<vscale x 1 x half> %0, half %1, <vscale x 1 x half> %2, iXLen %3) nounwind {
@ -572,7 +585,7 @@ entry:
<vscale x 1 x half> %0,
half %1,
<vscale x 1 x half> %2,
iXLen %3)
iXLen %3, iXLen 0)
ret <vscale x 1 x half> %a
}
@ -605,6 +618,7 @@ declare <vscale x 2 x half> @llvm.riscv.vfnmadd.nxv2f16.f16(
<vscale x 2 x half>,
half,
<vscale x 2 x half>,
iXLen,
iXLen);
define <vscale x 2 x half> @intrinsic_vfnmadd_vf_nxv2f16_f16_nxv2f16(<vscale x 2 x half> %0, half %1, <vscale x 2 x half> %2, iXLen %3) nounwind {
@ -618,7 +632,7 @@ entry:
<vscale x 2 x half> %0,
half %1,
<vscale x 2 x half> %2,
iXLen %3)
iXLen %3, iXLen 0)
ret <vscale x 2 x half> %a
}
@ -651,6 +665,7 @@ declare <vscale x 4 x half> @llvm.riscv.vfnmadd.nxv4f16.f16(
<vscale x 4 x half>,
half,
<vscale x 4 x half>,
iXLen,
iXLen);
define <vscale x 4 x half> @intrinsic_vfnmadd_vf_nxv4f16_f16_nxv4f16(<vscale x 4 x half> %0, half %1, <vscale x 4 x half> %2, iXLen %3) nounwind {
@ -664,7 +679,7 @@ entry:
<vscale x 4 x half> %0,
half %1,
<vscale x 4 x half> %2,
iXLen %3)
iXLen %3, iXLen 0)
ret <vscale x 4 x half> %a
}
@ -697,6 +712,7 @@ declare <vscale x 8 x half> @llvm.riscv.vfnmadd.nxv8f16.f16(
<vscale x 8 x half>,
half,
<vscale x 8 x half>,
iXLen,
iXLen);
define <vscale x 8 x half> @intrinsic_vfnmadd_vf_nxv8f16_f16_nxv8f16(<vscale x 8 x half> %0, half %1, <vscale x 8 x half> %2, iXLen %3) nounwind {
@ -710,7 +726,7 @@ entry:
<vscale x 8 x half> %0,
half %1,
<vscale x 8 x half> %2,
iXLen %3)
iXLen %3, iXLen 0)
ret <vscale x 8 x half> %a
}
@ -743,6 +759,7 @@ declare <vscale x 16 x half> @llvm.riscv.vfnmadd.nxv16f16.f16(
<vscale x 16 x half>,
half,
<vscale x 16 x half>,
iXLen,
iXLen);
define <vscale x 16 x half> @intrinsic_vfnmadd_vf_nxv16f16_f16_nxv16f16(<vscale x 16 x half> %0, half %1, <vscale x 16 x half> %2, iXLen %3) nounwind {
@ -756,7 +773,7 @@ entry:
<vscale x 16 x half> %0,
half %1,
<vscale x 16 x half> %2,
iXLen %3)
iXLen %3, iXLen 0)
ret <vscale x 16 x half> %a
}
@ -789,6 +806,7 @@ declare <vscale x 1 x float> @llvm.riscv.vfnmadd.nxv1f32.f32(
<vscale x 1 x float>,
float,
<vscale x 1 x float>,
iXLen,
iXLen);
define <vscale x 1 x float> @intrinsic_vfnmadd_vf_nxv1f32_f32_nxv1f32(<vscale x 1 x float> %0, float %1, <vscale x 1 x float> %2, iXLen %3) nounwind {
@ -802,7 +820,7 @@ entry:
<vscale x 1 x float> %0,
float %1,
<vscale x 1 x float> %2,
iXLen %3)
iXLen %3, iXLen 0)
ret <vscale x 1 x float> %a
}
@ -835,6 +853,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfnmadd.nxv2f32.f32(
<vscale x 2 x float>,
float,
<vscale x 2 x float>,
iXLen,
iXLen);
define <vscale x 2 x float> @intrinsic_vfnmadd_vf_nxv2f32_f32_nxv2f32(<vscale x 2 x float> %0, float %1, <vscale x 2 x float> %2, iXLen %3) nounwind {
@ -848,7 +867,7 @@ entry:
<vscale x 2 x float> %0,
float %1,
<vscale x 2 x float> %2,
iXLen %3)
iXLen %3, iXLen 0)
ret <vscale x 2 x float> %a
}
@ -881,6 +900,7 @@ declare <vscale x 4 x float> @llvm.riscv.vfnmadd.nxv4f32.f32(
<vscale x 4 x float>,
float,
<vscale x 4 x float>,
iXLen,
iXLen);
define <vscale x 4 x float> @intrinsic_vfnmadd_vf_nxv4f32_f32_nxv4f32(<vscale x 4 x float> %0, float %1, <vscale x 4 x float> %2, iXLen %3) nounwind {
@ -894,7 +914,7 @@ entry:
<vscale x 4 x float> %0,
float %1,
<vscale x 4 x float> %2,
iXLen %3)
iXLen %3, iXLen 0)
ret <vscale x 4 x float> %a
}
@ -927,6 +947,7 @@ declare <vscale x 8 x float> @llvm.riscv.vfnmadd.nxv8f32.f32(
<vscale x 8 x float>,
float,
<vscale x 8 x float>,
iXLen,
iXLen);
define <vscale x 8 x float> @intrinsic_vfnmadd_vf_nxv8f32_f32_nxv8f32(<vscale x 8 x float> %0, float %1, <vscale x 8 x float> %2, iXLen %3) nounwind {
@ -940,7 +961,7 @@ entry:
<vscale x 8 x float> %0,
float %1,
<vscale x 8 x float> %2,
iXLen %3)
iXLen %3, iXLen 0)
ret <vscale x 8 x float> %a
}
@ -973,6 +994,7 @@ declare <vscale x 1 x double> @llvm.riscv.vfnmadd.nxv1f64.f64(
<vscale x 1 x double>,
double,
<vscale x 1 x double>,
iXLen,
iXLen);
define <vscale x 1 x double> @intrinsic_vfnmadd_vf_nxv1f64_f64_nxv1f64(<vscale x 1 x double> %0, double %1, <vscale x 1 x double> %2, iXLen %3) nounwind {
@ -986,7 +1008,7 @@ entry:
<vscale x 1 x double> %0,
double %1,
<vscale x 1 x double> %2,
iXLen %3)
iXLen %3, iXLen 0)
ret <vscale x 1 x double> %a
}
@ -1019,6 +1041,7 @@ declare <vscale x 2 x double> @llvm.riscv.vfnmadd.nxv2f64.f64(
<vscale x 2 x double>,
double,
<vscale x 2 x double>,
iXLen,
iXLen);
define <vscale x 2 x double> @intrinsic_vfnmadd_vf_nxv2f64_f64_nxv2f64(<vscale x 2 x double> %0, double %1, <vscale x 2 x double> %2, iXLen %3) nounwind {
@ -1032,7 +1055,7 @@ entry:
<vscale x 2 x double> %0,
double %1,
<vscale x 2 x double> %2,
iXLen %3)
iXLen %3, iXLen 0)
ret <vscale x 2 x double> %a
}
@ -1065,6 +1088,7 @@ declare <vscale x 4 x double> @llvm.riscv.vfnmadd.nxv4f64.f64(
<vscale x 4 x double>,
double,
<vscale x 4 x double>,
iXLen,
iXLen);
define <vscale x 4 x double> @intrinsic_vfnmadd_vf_nxv4f64_f64_nxv4f64(<vscale x 4 x double> %0, double %1, <vscale x 4 x double> %2, iXLen %3) nounwind {
@ -1078,7 +1102,7 @@ entry:
<vscale x 4 x double> %0,
double %1,
<vscale x 4 x double> %2,
iXLen %3)
iXLen %3, iXLen 0)
ret <vscale x 4 x double> %a
}

View File

@ -7,6 +7,7 @@ declare <vscale x 1 x half> @llvm.riscv.vfnmsac.nxv1f16.nxv1f16(
<vscale x 1 x half>,
<vscale x 1 x half>,
<vscale x 1 x half>,
iXLen,
iXLen);
define <vscale x 1 x half> @intrinsic_vfnmsac_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, iXLen %3) nounwind {
@ -20,7 +21,7 @@ entry:
<vscale x 1 x half> %0,
<vscale x 1 x half> %1,
<vscale x 1 x half> %2,
iXLen %3)
iXLen %3, iXLen 0)
ret <vscale x 1 x half> %a
}
@ -53,6 +54,7 @@ declare <vscale x 2 x half> @llvm.riscv.vfnmsac.nxv2f16.nxv2f16(
<vscale x 2 x half>,
<vscale x 2 x half>,
<vscale x 2 x half>,
iXLen,
iXLen);
define <vscale x 2 x half> @intrinsic_vfnmsac_vv_nxv2f16_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, iXLen %3) nounwind {
@ -66,7 +68,7 @@ entry:
<vscale x 2 x half> %0,
<vscale x 2 x half> %1,
<vscale x 2 x half> %2,
iXLen %3)
iXLen %3, iXLen 0)
ret <vscale x 2 x half> %a
}
@ -99,6 +101,7 @@ declare <vscale x 4 x half> @llvm.riscv.vfnmsac.nxv4f16.nxv4f16(
<vscale x 4 x half>,
<vscale x 4 x half>,
<vscale x 4 x half>,
iXLen,
iXLen);
define <vscale x 4 x half> @intrinsic_vfnmsac_vv_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, iXLen %3) nounwind {
@ -112,7 +115,7 @@ entry:
<vscale x 4 x half> %0,
<vscale x 4 x half> %1,
<vscale x 4 x half> %2,
iXLen %3)
iXLen %3, iXLen 0)
ret <vscale x 4 x half> %a
}
@ -145,6 +148,7 @@ declare <vscale x 8 x half> @llvm.riscv.vfnmsac.nxv8f16.nxv8f16(
<vscale x 8 x half>,
<vscale x 8 x half>,
<vscale x 8 x half>,
iXLen,
iXLen);
define <vscale x 8 x half> @intrinsic_vfnmsac_vv_nxv8f16_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, iXLen %3) nounwind {
@ -158,7 +162,7 @@ entry:
<vscale x 8 x half> %0,
<vscale x 8 x half> %1,
<vscale x 8 x half> %2,
iXLen %3)
iXLen %3, iXLen 0)
ret <vscale x 8 x half> %a
}
@ -191,6 +195,7 @@ declare <vscale x 16 x half> @llvm.riscv.vfnmsac.nxv16f16.nxv16f16(
<vscale x 16 x half>,
<vscale x 16 x half>,
<vscale x 16 x half>,
iXLen,
iXLen);
define <vscale x 16 x half> @intrinsic_vfnmsac_vv_nxv16f16_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, iXLen %3) nounwind {
@ -204,7 +209,7 @@ entry:
<vscale x 16 x half> %0,
<vscale x 16 x half> %1,
<vscale x 16 x half> %2,
iXLen %3)
iXLen %3, iXLen 0)
ret <vscale x 16 x half> %a
}
@ -237,6 +242,7 @@ declare <vscale x 1 x float> @llvm.riscv.vfnmsac.nxv1f32.nxv1f32(
<vscale x 1 x float>,
<vscale x 1 x float>,
<vscale x 1 x float>,
iXLen,
iXLen);
define <vscale x 1 x float> @intrinsic_vfnmsac_vv_nxv1f32_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, iXLen %3) nounwind {
@ -250,7 +256,7 @@ entry:
<vscale x 1 x float> %0,
<vscale x 1 x float> %1,
<vscale x 1 x float> %2,
iXLen %3)
iXLen %3, iXLen 0)
ret <vscale x 1 x float> %a
}
@ -283,6 +289,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfnmsac.nxv2f32.nxv2f32(
<vscale x 2 x float>,
<vscale x 2 x float>,
<vscale x 2 x float>,
iXLen,
iXLen);
define <vscale x 2 x float> @intrinsic_vfnmsac_vv_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, iXLen %3) nounwind {
@ -296,7 +303,7 @@ entry:
<vscale x 2 x float> %0,
<vscale x 2 x float> %1,
<vscale x 2 x float> %2,
iXLen %3)
iXLen %3, iXLen 0)
ret <vscale x 2 x float> %a
}
@ -329,6 +336,7 @@ declare <vscale x 4 x float> @llvm.riscv.vfnmsac.nxv4f32.nxv4f32(
<vscale x 4 x float>,
<vscale x 4 x float>,
<vscale x 4 x float>,
iXLen,
iXLen);
define <vscale x 4 x float> @intrinsic_vfnmsac_vv_nxv4f32_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, iXLen %3) nounwind {
@ -342,7 +350,7 @@ entry:
<vscale x 4 x float> %0,
<vscale x 4 x float> %1,
<vscale x 4 x float> %2,
iXLen %3)
iXLen %3, iXLen 0)
ret <vscale x 4 x float> %a
}
@ -375,6 +383,7 @@ declare <vscale x 8 x float> @llvm.riscv.vfnmsac.nxv8f32.nxv8f32(
<vscale x 8 x float>,
<vscale x 8 x float>,
<vscale x 8 x float>,
iXLen,
iXLen);
define <vscale x 8 x float> @intrinsic_vfnmsac_vv_nxv8f32_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, iXLen %3) nounwind {
@ -388,7 +397,7 @@ entry:
<vscale x 8 x float> %0,
<vscale x 8 x float> %1,
<vscale x 8 x float> %2,
iXLen %3)
iXLen %3, iXLen 0)
ret <vscale x 8 x float> %a
}
@ -421,6 +430,7 @@ declare <vscale x 1 x double> @llvm.riscv.vfnmsac.nxv1f64.nxv1f64(
<vscale x 1 x double>,
<vscale x 1 x double>,
<vscale x 1 x double>,
iXLen,
iXLen);
define <vscale x 1 x double> @intrinsic_vfnmsac_vv_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, iXLen %3) nounwind {
@ -434,7 +444,7 @@ entry:
<vscale x 1 x double> %0,
<vscale x 1 x double> %1,
<vscale x 1 x double> %2,
iXLen %3)
iXLen %3, iXLen 0)
ret <vscale x 1 x double> %a
}
@ -467,6 +477,7 @@ declare <vscale x 2 x double> @llvm.riscv.vfnmsac.nxv2f64.nxv2f64(
<vscale x 2 x double>,
<vscale x 2 x double>,
<vscale x 2 x double>,
iXLen,
iXLen);
define <vscale x 2 x double> @intrinsic_vfnmsac_vv_nxv2f64_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, <vscale x 2 x double> %2, iXLen %3) nounwind {
@ -480,7 +491,7 @@ entry:
<vscale x 2 x double> %0,
<vscale x 2 x double> %1,
<vscale x 2 x double> %2,
iXLen %3)
iXLen %3, iXLen 0)
ret <vscale x 2 x double> %a
}
@ -513,6 +524,7 @@ declare <vscale x 4 x double> @llvm.riscv.vfnmsac.nxv4f64.nxv4f64(
<vscale x 4 x double>,
<vscale x 4 x double>,
<vscale x 4 x double>,
iXLen,
iXLen);
define <vscale x 4 x double> @intrinsic_vfnmsac_vv_nxv4f64_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, <vscale x 4 x double> %2, iXLen %3) nounwind {
@ -526,7 +538,7 @@ entry:
<vscale x 4 x double> %0,
<vscale x 4 x double> %1,
<vscale x 4 x double> %2,
iXLen %3)
iXLen %3, iXLen 0)
ret <vscale x 4 x double> %a
}
@ -559,6 +571,7 @@ declare <vscale x 1 x half> @llvm.riscv.vfnmsac.nxv1f16.f16(
<vscale x 1 x half>,
half,
<vscale x 1 x half>,
iXLen,
iXLen);
define <vscale x 1 x half> @intrinsic_vfnmsac_vf_nxv1f16_f16_nxv1f16(<vscale x 1 x half> %0, half %1, <vscale x 1 x half> %2, iXLen %3) nounwind {
@ -572,7 +585,7 @@ entry:
<vscale x 1 x half> %0,
half %1,
<vscale x 1 x half> %2,
iXLen %3)
iXLen %3, iXLen 0)
ret <vscale x 1 x half> %a
}
@ -605,6 +618,7 @@ declare <vscale x 2 x half> @llvm.riscv.vfnmsac.nxv2f16.f16(
<vscale x 2 x half>,
half,
<vscale x 2 x half>,
iXLen,
iXLen);
define <vscale x 2 x half> @intrinsic_vfnmsac_vf_nxv2f16_f16_nxv2f16(<vscale x 2 x half> %0, half %1, <vscale x 2 x half> %2, iXLen %3) nounwind {
@ -618,7 +632,7 @@ entry:
<vscale x 2 x half> %0,
half %1,
<vscale x 2 x half> %2,
iXLen %3)
iXLen %3, iXLen 0)
ret <vscale x 2 x half> %a
}
@ -651,6 +665,7 @@ declare <vscale x 4 x half> @llvm.riscv.vfnmsac.nxv4f16.f16(
<vscale x 4 x half>,
half,
<vscale x 4 x half>,
iXLen,
iXLen);
define <vscale x 4 x half> @intrinsic_vfnmsac_vf_nxv4f16_f16_nxv4f16(<vscale x 4 x half> %0, half %1, <vscale x 4 x half> %2, iXLen %3) nounwind {
@ -664,7 +679,7 @@ entry:
<vscale x 4 x half> %0,
half %1,
<vscale x 4 x half> %2,
iXLen %3)
iXLen %3, iXLen 0)
ret <vscale x 4 x half> %a
}
@ -697,6 +712,7 @@ declare <vscale x 8 x half> @llvm.riscv.vfnmsac.nxv8f16.f16(
<vscale x 8 x half>,
half,
<vscale x 8 x half>,
iXLen,
iXLen);
define <vscale x 8 x half> @intrinsic_vfnmsac_vf_nxv8f16_f16_nxv8f16(<vscale x 8 x half> %0, half %1, <vscale x 8 x half> %2, iXLen %3) nounwind {
@ -710,7 +726,7 @@ entry:
<vscale x 8 x half> %0,
half %1,
<vscale x 8 x half> %2,
iXLen %3)
iXLen %3, iXLen 0)
ret <vscale x 8 x half> %a
}
@ -743,6 +759,7 @@ declare <vscale x 16 x half> @llvm.riscv.vfnmsac.nxv16f16.f16(
<vscale x 16 x half>,
half,
<vscale x 16 x half>,
iXLen,
iXLen);
define <vscale x 16 x half> @intrinsic_vfnmsac_vf_nxv16f16_f16_nxv16f16(<vscale x 16 x half> %0, half %1, <vscale x 16 x half> %2, iXLen %3) nounwind {
@ -756,7 +773,7 @@ entry:
<vscale x 16 x half> %0,
half %1,
<vscale x 16 x half> %2,
iXLen %3)
iXLen %3, iXLen 0)
ret <vscale x 16 x half> %a
}
@ -789,6 +806,7 @@ declare <vscale x 1 x float> @llvm.riscv.vfnmsac.nxv1f32.f32(
<vscale x 1 x float>,
float,
<vscale x 1 x float>,
iXLen,
iXLen);
define <vscale x 1 x float> @intrinsic_vfnmsac_vf_nxv1f32_f32_nxv1f32(<vscale x 1 x float> %0, float %1, <vscale x 1 x float> %2, iXLen %3) nounwind {
@ -802,7 +820,7 @@ entry:
<vscale x 1 x float> %0,
float %1,
<vscale x 1 x float> %2,
iXLen %3)
iXLen %3, iXLen 0)
ret <vscale x 1 x float> %a
}
@ -835,6 +853,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfnmsac.nxv2f32.f32(
<vscale x 2 x float>,
float,
<vscale x 2 x float>,
iXLen,
iXLen);
define <vscale x 2 x float> @intrinsic_vfnmsac_vf_nxv2f32_f32_nxv2f32(<vscale x 2 x float> %0, float %1, <vscale x 2 x float> %2, iXLen %3) nounwind {
@ -848,7 +867,7 @@ entry:
<vscale x 2 x float> %0,
float %1,
<vscale x 2 x float> %2,
iXLen %3)
iXLen %3, iXLen 0)
ret <vscale x 2 x float> %a
}
@ -881,6 +900,7 @@ declare <vscale x 4 x float> @llvm.riscv.vfnmsac.nxv4f32.f32(
<vscale x 4 x float>,
float,
<vscale x 4 x float>,
iXLen,
iXLen);
define <vscale x 4 x float> @intrinsic_vfnmsac_vf_nxv4f32_f32_nxv4f32(<vscale x 4 x float> %0, float %1, <vscale x 4 x float> %2, iXLen %3) nounwind {
@ -894,7 +914,7 @@ entry:
<vscale x 4 x float> %0,
float %1,
<vscale x 4 x float> %2,
iXLen %3)
iXLen %3, iXLen 0)
ret <vscale x 4 x float> %a
}
@ -927,6 +947,7 @@ declare <vscale x 8 x float> @llvm.riscv.vfnmsac.nxv8f32.f32(
<vscale x 8 x float>,
float,
<vscale x 8 x float>,
iXLen,
iXLen);
define <vscale x 8 x float> @intrinsic_vfnmsac_vf_nxv8f32_f32_nxv8f32(<vscale x 8 x float> %0, float %1, <vscale x 8 x float> %2, iXLen %3) nounwind {
@ -940,7 +961,7 @@ entry:
<vscale x 8 x float> %0,
float %1,
<vscale x 8 x float> %2,
iXLen %3)
iXLen %3, iXLen 0)
ret <vscale x 8 x float> %a
}
@ -973,6 +994,7 @@ declare <vscale x 1 x double> @llvm.riscv.vfnmsac.nxv1f64.f64(
<vscale x 1 x double>,
double,
<vscale x 1 x double>,
iXLen,
iXLen);
define <vscale x 1 x double> @intrinsic_vfnmsac_vf_nxv1f64_f64_nxv1f64(<vscale x 1 x double> %0, double %1, <vscale x 1 x double> %2, iXLen %3) nounwind {
@ -986,7 +1008,7 @@ entry:
<vscale x 1 x double> %0,
double %1,
<vscale x 1 x double> %2,
iXLen %3)
iXLen %3, iXLen 0)
ret <vscale x 1 x double> %a
}
@ -1019,6 +1041,7 @@ declare <vscale x 2 x double> @llvm.riscv.vfnmsac.nxv2f64.f64(
<vscale x 2 x double>,
double,
<vscale x 2 x double>,
iXLen,
iXLen);
define <vscale x 2 x double> @intrinsic_vfnmsac_vf_nxv2f64_f64_nxv2f64(<vscale x 2 x double> %0, double %1, <vscale x 2 x double> %2, iXLen %3) nounwind {
@ -1032,7 +1055,7 @@ entry:
<vscale x 2 x double> %0,
double %1,
<vscale x 2 x double> %2,
iXLen %3)
iXLen %3, iXLen 0)
ret <vscale x 2 x double> %a
}
@ -1065,6 +1088,7 @@ declare <vscale x 4 x double> @llvm.riscv.vfnmsac.nxv4f64.f64(
<vscale x 4 x double>,
double,
<vscale x 4 x double>,
iXLen,
iXLen);
define <vscale x 4 x double> @intrinsic_vfnmsac_vf_nxv4f64_f64_nxv4f64(<vscale x 4 x double> %0, double %1, <vscale x 4 x double> %2, iXLen %3) nounwind {
@ -1078,7 +1102,7 @@ entry:
<vscale x 4 x double> %0,
double %1,
<vscale x 4 x double> %2,
iXLen %3)
iXLen %3, iXLen 0)
ret <vscale x 4 x double> %a
}

View File

@ -7,6 +7,7 @@ declare <vscale x 1 x half> @llvm.riscv.vfnmsub.nxv1f16.nxv1f16(
<vscale x 1 x half>,
<vscale x 1 x half>,
<vscale x 1 x half>,
iXLen,
iXLen);
define <vscale x 1 x half> @intrinsic_vfnmsub_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, iXLen %3) nounwind {
@ -20,7 +21,7 @@ entry:
<vscale x 1 x half> %0,
<vscale x 1 x half> %1,
<vscale x 1 x half> %2,
iXLen %3)
iXLen %3, iXLen 0)
ret <vscale x 1 x half> %a
}
@ -53,6 +54,7 @@ declare <vscale x 2 x half> @llvm.riscv.vfnmsub.nxv2f16.nxv2f16(
<vscale x 2 x half>,
<vscale x 2 x half>,
<vscale x 2 x half>,
iXLen,
iXLen);
define <vscale x 2 x half> @intrinsic_vfnmsub_vv_nxv2f16_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, iXLen %3) nounwind {
@ -66,7 +68,7 @@ entry:
<vscale x 2 x half> %0,
<vscale x 2 x half> %1,
<vscale x 2 x half> %2,
iXLen %3)
iXLen %3, iXLen 0)
ret <vscale x 2 x half> %a
}
@ -99,6 +101,7 @@ declare <vscale x 4 x half> @llvm.riscv.vfnmsub.nxv4f16.nxv4f16(
<vscale x 4 x half>,
<vscale x 4 x half>,
<vscale x 4 x half>,
iXLen,
iXLen);
define <vscale x 4 x half> @intrinsic_vfnmsub_vv_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, iXLen %3) nounwind {
@ -112,7 +115,7 @@ entry:
<vscale x 4 x half> %0,
<vscale x 4 x half> %1,
<vscale x 4 x half> %2,
iXLen %3)
iXLen %3, iXLen 0)
ret <vscale x 4 x half> %a
}
@ -145,6 +148,7 @@ declare <vscale x 8 x half> @llvm.riscv.vfnmsub.nxv8f16.nxv8f16(
<vscale x 8 x half>,
<vscale x 8 x half>,
<vscale x 8 x half>,
iXLen,
iXLen);
define <vscale x 8 x half> @intrinsic_vfnmsub_vv_nxv8f16_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, iXLen %3) nounwind {
@ -158,7 +162,7 @@ entry:
<vscale x 8 x half> %0,
<vscale x 8 x half> %1,
<vscale x 8 x half> %2,
iXLen %3)
iXLen %3, iXLen 0)
ret <vscale x 8 x half> %a
}
@ -191,6 +195,7 @@ declare <vscale x 16 x half> @llvm.riscv.vfnmsub.nxv16f16.nxv16f16(
<vscale x 16 x half>,
<vscale x 16 x half>,
<vscale x 16 x half>,
iXLen,
iXLen);
define <vscale x 16 x half> @intrinsic_vfnmsub_vv_nxv16f16_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, iXLen %3) nounwind {
@ -204,7 +209,7 @@ entry:
<vscale x 16 x half> %0,
<vscale x 16 x half> %1,
<vscale x 16 x half> %2,
iXLen %3)
iXLen %3, iXLen 0)
ret <vscale x 16 x half> %a
}
@ -237,6 +242,7 @@ declare <vscale x 1 x float> @llvm.riscv.vfnmsub.nxv1f32.nxv1f32(
<vscale x 1 x float>,
<vscale x 1 x float>,
<vscale x 1 x float>,
iXLen,
iXLen);
define <vscale x 1 x float> @intrinsic_vfnmsub_vv_nxv1f32_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, iXLen %3) nounwind {
@ -250,7 +256,7 @@ entry:
<vscale x 1 x float> %0,
<vscale x 1 x float> %1,
<vscale x 1 x float> %2,
iXLen %3)
iXLen %3, iXLen 0)
ret <vscale x 1 x float> %a
}
@ -283,6 +289,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfnmsub.nxv2f32.nxv2f32(
<vscale x 2 x float>,
<vscale x 2 x float>,
<vscale x 2 x float>,
iXLen,
iXLen);
define <vscale x 2 x float> @intrinsic_vfnmsub_vv_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, iXLen %3) nounwind {
@ -296,7 +303,7 @@ entry:
<vscale x 2 x float> %0,
<vscale x 2 x float> %1,
<vscale x 2 x float> %2,
iXLen %3)
iXLen %3, iXLen 0)
ret <vscale x 2 x float> %a
}
@ -329,6 +336,7 @@ declare <vscale x 4 x float> @llvm.riscv.vfnmsub.nxv4f32.nxv4f32(
<vscale x 4 x float>,
<vscale x 4 x float>,
<vscale x 4 x float>,
iXLen,
iXLen);
define <vscale x 4 x float> @intrinsic_vfnmsub_vv_nxv4f32_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, iXLen %3) nounwind {
@ -342,7 +350,7 @@ entry:
<vscale x 4 x float> %0,
<vscale x 4 x float> %1,
<vscale x 4 x float> %2,
iXLen %3)
iXLen %3, iXLen 0)
ret <vscale x 4 x float> %a
}
@ -375,6 +383,7 @@ declare <vscale x 8 x float> @llvm.riscv.vfnmsub.nxv8f32.nxv8f32(
<vscale x 8 x float>,
<vscale x 8 x float>,
<vscale x 8 x float>,
iXLen,
iXLen);
define <vscale x 8 x float> @intrinsic_vfnmsub_vv_nxv8f32_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, iXLen %3) nounwind {
@ -388,7 +397,7 @@ entry:
<vscale x 8 x float> %0,
<vscale x 8 x float> %1,
<vscale x 8 x float> %2,
iXLen %3)
iXLen %3, iXLen 0)
ret <vscale x 8 x float> %a
}
@ -421,6 +430,7 @@ declare <vscale x 1 x double> @llvm.riscv.vfnmsub.nxv1f64.nxv1f64(
<vscale x 1 x double>,
<vscale x 1 x double>,
<vscale x 1 x double>,
iXLen,
iXLen);
define <vscale x 1 x double> @intrinsic_vfnmsub_vv_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, iXLen %3) nounwind {
@ -434,7 +444,7 @@ entry:
<vscale x 1 x double> %0,
<vscale x 1 x double> %1,
<vscale x 1 x double> %2,
iXLen %3)
iXLen %3, iXLen 0)
ret <vscale x 1 x double> %a
}
@ -467,6 +477,7 @@ declare <vscale x 2 x double> @llvm.riscv.vfnmsub.nxv2f64.nxv2f64(
<vscale x 2 x double>,
<vscale x 2 x double>,
<vscale x 2 x double>,
iXLen,
iXLen);
define <vscale x 2 x double> @intrinsic_vfnmsub_vv_nxv2f64_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, <vscale x 2 x double> %2, iXLen %3) nounwind {
@ -480,7 +491,7 @@ entry:
<vscale x 2 x double> %0,
<vscale x 2 x double> %1,
<vscale x 2 x double> %2,
iXLen %3)
iXLen %3, iXLen 0)
ret <vscale x 2 x double> %a
}
@ -513,6 +524,7 @@ declare <vscale x 4 x double> @llvm.riscv.vfnmsub.nxv4f64.nxv4f64(
<vscale x 4 x double>,
<vscale x 4 x double>,
<vscale x 4 x double>,
iXLen,
iXLen);
define <vscale x 4 x double> @intrinsic_vfnmsub_vv_nxv4f64_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, <vscale x 4 x double> %2, iXLen %3) nounwind {
@ -526,7 +538,7 @@ entry:
<vscale x 4 x double> %0,
<vscale x 4 x double> %1,
<vscale x 4 x double> %2,
iXLen %3)
iXLen %3, iXLen 0)
ret <vscale x 4 x double> %a
}
@ -559,6 +571,7 @@ declare <vscale x 1 x half> @llvm.riscv.vfnmsub.nxv1f16.f16(
<vscale x 1 x half>,
half,
<vscale x 1 x half>,
iXLen,
iXLen);
define <vscale x 1 x half> @intrinsic_vfnmsub_vf_nxv1f16_f16_nxv1f16(<vscale x 1 x half> %0, half %1, <vscale x 1 x half> %2, iXLen %3) nounwind {
@ -572,7 +585,7 @@ entry:
<vscale x 1 x half> %0,
half %1,
<vscale x 1 x half> %2,
iXLen %3)
iXLen %3, iXLen 0)
ret <vscale x 1 x half> %a
}
@ -605,6 +618,7 @@ declare <vscale x 2 x half> @llvm.riscv.vfnmsub.nxv2f16.f16(
<vscale x 2 x half>,
half,
<vscale x 2 x half>,
iXLen,
iXLen);
define <vscale x 2 x half> @intrinsic_vfnmsub_vf_nxv2f16_f16_nxv2f16(<vscale x 2 x half> %0, half %1, <vscale x 2 x half> %2, iXLen %3) nounwind {
@ -618,7 +632,7 @@ entry:
<vscale x 2 x half> %0,
half %1,
<vscale x 2 x half> %2,
iXLen %3)
iXLen %3, iXLen 0)
ret <vscale x 2 x half> %a
}
@ -651,6 +665,7 @@ declare <vscale x 4 x half> @llvm.riscv.vfnmsub.nxv4f16.f16(
<vscale x 4 x half>,
half,
<vscale x 4 x half>,
iXLen,
iXLen);
define <vscale x 4 x half> @intrinsic_vfnmsub_vf_nxv4f16_f16_nxv4f16(<vscale x 4 x half> %0, half %1, <vscale x 4 x half> %2, iXLen %3) nounwind {
@ -664,7 +679,7 @@ entry:
<vscale x 4 x half> %0,
half %1,
<vscale x 4 x half> %2,
iXLen %3)
iXLen %3, iXLen 0)
ret <vscale x 4 x half> %a
}
@ -697,6 +712,7 @@ declare <vscale x 8 x half> @llvm.riscv.vfnmsub.nxv8f16.f16(
<vscale x 8 x half>,
half,
<vscale x 8 x half>,
iXLen,
iXLen);
define <vscale x 8 x half> @intrinsic_vfnmsub_vf_nxv8f16_f16_nxv8f16(<vscale x 8 x half> %0, half %1, <vscale x 8 x half> %2, iXLen %3) nounwind {
@ -710,7 +726,7 @@ entry:
<vscale x 8 x half> %0,
half %1,
<vscale x 8 x half> %2,
iXLen %3)
iXLen %3, iXLen 0)
ret <vscale x 8 x half> %a
}
@ -743,6 +759,7 @@ declare <vscale x 16 x half> @llvm.riscv.vfnmsub.nxv16f16.f16(
<vscale x 16 x half>,
half,
<vscale x 16 x half>,
iXLen,
iXLen);
define <vscale x 16 x half> @intrinsic_vfnmsub_vf_nxv16f16_f16_nxv16f16(<vscale x 16 x half> %0, half %1, <vscale x 16 x half> %2, iXLen %3) nounwind {
@ -756,7 +773,7 @@ entry:
<vscale x 16 x half> %0,
half %1,
<vscale x 16 x half> %2,
iXLen %3)
iXLen %3, iXLen 0)
ret <vscale x 16 x half> %a
}
@ -789,6 +806,7 @@ declare <vscale x 1 x float> @llvm.riscv.vfnmsub.nxv1f32.f32(
<vscale x 1 x float>,
float,
<vscale x 1 x float>,
iXLen,
iXLen);
define <vscale x 1 x float> @intrinsic_vfnmsub_vf_nxv1f32_f32_nxv1f32(<vscale x 1 x float> %0, float %1, <vscale x 1 x float> %2, iXLen %3) nounwind {
@ -802,7 +820,7 @@ entry:
<vscale x 1 x float> %0,
float %1,
<vscale x 1 x float> %2,
iXLen %3)
iXLen %3, iXLen 0)
ret <vscale x 1 x float> %a
}
@ -835,6 +853,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfnmsub.nxv2f32.f32(
<vscale x 2 x float>,
float,
<vscale x 2 x float>,
iXLen,
iXLen);
define <vscale x 2 x float> @intrinsic_vfnmsub_vf_nxv2f32_f32_nxv2f32(<vscale x 2 x float> %0, float %1, <vscale x 2 x float> %2, iXLen %3) nounwind {
@ -848,7 +867,7 @@ entry:
<vscale x 2 x float> %0,
float %1,
<vscale x 2 x float> %2,
iXLen %3)
iXLen %3, iXLen 0)
ret <vscale x 2 x float> %a
}
@ -881,6 +900,7 @@ declare <vscale x 4 x float> @llvm.riscv.vfnmsub.nxv4f32.f32(
<vscale x 4 x float>,
float,
<vscale x 4 x float>,
iXLen,
iXLen);
define <vscale x 4 x float> @intrinsic_vfnmsub_vf_nxv4f32_f32_nxv4f32(<vscale x 4 x float> %0, float %1, <vscale x 4 x float> %2, iXLen %3) nounwind {
@ -894,7 +914,7 @@ entry:
<vscale x 4 x float> %0,
float %1,
<vscale x 4 x float> %2,
iXLen %3)
iXLen %3, iXLen 0)
ret <vscale x 4 x float> %a
}
@ -927,6 +947,7 @@ declare <vscale x 8 x float> @llvm.riscv.vfnmsub.nxv8f32.f32(
<vscale x 8 x float>,
float,
<vscale x 8 x float>,
iXLen,
iXLen);
define <vscale x 8 x float> @intrinsic_vfnmsub_vf_nxv8f32_f32_nxv8f32(<vscale x 8 x float> %0, float %1, <vscale x 8 x float> %2, iXLen %3) nounwind {
@ -940,7 +961,7 @@ entry:
<vscale x 8 x float> %0,
float %1,
<vscale x 8 x float> %2,
iXLen %3)
iXLen %3, iXLen 0)
ret <vscale x 8 x float> %a
}
@ -973,6 +994,7 @@ declare <vscale x 1 x double> @llvm.riscv.vfnmsub.nxv1f64.f64(
<vscale x 1 x double>,
double,
<vscale x 1 x double>,
iXLen,
iXLen);
define <vscale x 1 x double> @intrinsic_vfnmsub_vf_nxv1f64_f64_nxv1f64(<vscale x 1 x double> %0, double %1, <vscale x 1 x double> %2, iXLen %3) nounwind {
@ -986,7 +1008,7 @@ entry:
<vscale x 1 x double> %0,
double %1,
<vscale x 1 x double> %2,
iXLen %3)
iXLen %3, iXLen 0)
ret <vscale x 1 x double> %a
}
@ -1019,6 +1041,7 @@ declare <vscale x 2 x double> @llvm.riscv.vfnmsub.nxv2f64.f64(
<vscale x 2 x double>,
double,
<vscale x 2 x double>,
iXLen,
iXLen);
define <vscale x 2 x double> @intrinsic_vfnmsub_vf_nxv2f64_f64_nxv2f64(<vscale x 2 x double> %0, double %1, <vscale x 2 x double> %2, iXLen %3) nounwind {
@ -1032,7 +1055,7 @@ entry:
<vscale x 2 x double> %0,
double %1,
<vscale x 2 x double> %2,
iXLen %3)
iXLen %3, iXLen 0)
ret <vscale x 2 x double> %a
}
@ -1065,6 +1088,7 @@ declare <vscale x 4 x double> @llvm.riscv.vfnmsub.nxv4f64.f64(
<vscale x 4 x double>,
double,
<vscale x 4 x double>,
iXLen,
iXLen);
define <vscale x 4 x double> @intrinsic_vfnmsub_vf_nxv4f64_f64_nxv4f64(<vscale x 4 x double> %0, double %1, <vscale x 4 x double> %2, iXLen %3) nounwind {
@ -1078,7 +1102,7 @@ entry:
<vscale x 4 x double> %0,
double %1,
<vscale x 4 x double> %2,
iXLen %3)
iXLen %3, iXLen 0)
ret <vscale x 4 x double> %a
}

View File

@ -7,6 +7,7 @@ declare <vscale x 1 x float> @llvm.riscv.vfwmacc.nxv1f32.nxv1f16(
<vscale x 1 x float>,
<vscale x 1 x half>,
<vscale x 1 x half>,
iXLen,
iXLen);
define <vscale x 1 x float> @intrinsic_vfwmacc_vv_nxv1f32_nxv1f16_nxv1f16(<vscale x 1 x float> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, iXLen %3) nounwind {
@ -20,7 +21,7 @@ entry:
<vscale x 1 x float> %0,
<vscale x 1 x half> %1,
<vscale x 1 x half> %2,
iXLen %3)
iXLen %3, iXLen 0)
ret <vscale x 1 x float> %a
}
@ -53,6 +54,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfwmacc.nxv2f32.nxv2f16(
<vscale x 2 x float>,
<vscale x 2 x half>,
<vscale x 2 x half>,
iXLen,
iXLen);
define <vscale x 2 x float> @intrinsic_vfwmacc_vv_nxv2f32_nxv2f16_nxv2f16(<vscale x 2 x float> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, iXLen %3) nounwind {
@ -66,7 +68,7 @@ entry:
<vscale x 2 x float> %0,
<vscale x 2 x half> %1,
<vscale x 2 x half> %2,
iXLen %3)
iXLen %3, iXLen 0)
ret <vscale x 2 x float> %a
}
@ -99,6 +101,7 @@ declare <vscale x 4 x float> @llvm.riscv.vfwmacc.nxv4f32.nxv4f16(
<vscale x 4 x float>,
<vscale x 4 x half>,
<vscale x 4 x half>,
iXLen,
iXLen);
define <vscale x 4 x float> @intrinsic_vfwmacc_vv_nxv4f32_nxv4f16_nxv4f16(<vscale x 4 x float> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, iXLen %3) nounwind {
@ -112,7 +115,7 @@ entry:
<vscale x 4 x float> %0,
<vscale x 4 x half> %1,
<vscale x 4 x half> %2,
iXLen %3)
iXLen %3, iXLen 0)
ret <vscale x 4 x float> %a
}
@ -145,6 +148,7 @@ declare <vscale x 8 x float> @llvm.riscv.vfwmacc.nxv8f32.nxv8f16(
<vscale x 8 x float>,
<vscale x 8 x half>,
<vscale x 8 x half>,
iXLen,
iXLen);
define <vscale x 8 x float> @intrinsic_vfwmacc_vv_nxv8f32_nxv8f16_nxv8f16(<vscale x 8 x float> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, iXLen %3) nounwind {
@ -158,7 +162,7 @@ entry:
<vscale x 8 x float> %0,
<vscale x 8 x half> %1,
<vscale x 8 x half> %2,
iXLen %3)
iXLen %3, iXLen 0)
ret <vscale x 8 x float> %a
}
@ -191,6 +195,7 @@ declare <vscale x 16 x float> @llvm.riscv.vfwmacc.nxv16f32.nxv16f16(
<vscale x 16 x float>,
<vscale x 16 x half>,
<vscale x 16 x half>,
iXLen,
iXLen);
define <vscale x 16 x float> @intrinsic_vfwmacc_vv_nxv16f32_nxv16f16_nxv16f16(<vscale x 16 x float> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, iXLen %3) nounwind {
@ -204,7 +209,7 @@ entry:
<vscale x 16 x float> %0,
<vscale x 16 x half> %1,
<vscale x 16 x half> %2,
iXLen %3)
iXLen %3, iXLen 0)
ret <vscale x 16 x float> %a
}
@ -237,6 +242,7 @@ declare <vscale x 1 x double> @llvm.riscv.vfwmacc.nxv1f64.nxv1f32(
<vscale x 1 x double>,
<vscale x 1 x float>,
<vscale x 1 x float>,
iXLen,
iXLen);
define <vscale x 1 x double> @intrinsic_vfwmacc_vv_nxv1f64_nxv1f32_nxv1f32(<vscale x 1 x double> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, iXLen %3) nounwind {
@ -250,7 +256,7 @@ entry:
<vscale x 1 x double> %0,
<vscale x 1 x float> %1,
<vscale x 1 x float> %2,
iXLen %3)
iXLen %3, iXLen 0)
ret <vscale x 1 x double> %a
}
@ -283,6 +289,7 @@ declare <vscale x 2 x double> @llvm.riscv.vfwmacc.nxv2f64.nxv2f32(
<vscale x 2 x double>,
<vscale x 2 x float>,
<vscale x 2 x float>,
iXLen,
iXLen);
define <vscale x 2 x double> @intrinsic_vfwmacc_vv_nxv2f64_nxv2f32_nxv2f32(<vscale x 2 x double> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, iXLen %3) nounwind {
@ -296,7 +303,7 @@ entry:
<vscale x 2 x double> %0,
<vscale x 2 x float> %1,
<vscale x 2 x float> %2,
iXLen %3)
iXLen %3, iXLen 0)
ret <vscale x 2 x double> %a
}
@ -329,6 +336,7 @@ declare <vscale x 4 x double> @llvm.riscv.vfwmacc.nxv4f64.nxv4f32(
<vscale x 4 x double>,
<vscale x 4 x float>,
<vscale x 4 x float>,
iXLen,
iXLen);
define <vscale x 4 x double> @intrinsic_vfwmacc_vv_nxv4f64_nxv4f32_nxv4f32(<vscale x 4 x double> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, iXLen %3) nounwind {
@ -342,7 +350,7 @@ entry:
<vscale x 4 x double> %0,
<vscale x 4 x float> %1,
<vscale x 4 x float> %2,
iXLen %3)
iXLen %3, iXLen 0)
ret <vscale x 4 x double> %a
}
@ -375,6 +383,7 @@ declare <vscale x 8 x double> @llvm.riscv.vfwmacc.nxv8f64.nxv8f32(
<vscale x 8 x double>,
<vscale x 8 x float>,
<vscale x 8 x float>,
iXLen,
iXLen);
define <vscale x 8 x double> @intrinsic_vfwmacc_vv_nxv8f64_nxv8f32_nxv8f32(<vscale x 8 x double> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, iXLen %3) nounwind {
@ -388,7 +397,7 @@ entry:
<vscale x 8 x double> %0,
<vscale x 8 x float> %1,
<vscale x 8 x float> %2,
iXLen %3)
iXLen %3, iXLen 0)
ret <vscale x 8 x double> %a
}
@ -421,6 +430,7 @@ declare <vscale x 1 x float> @llvm.riscv.vfwmacc.nxv1f32.f16(
<vscale x 1 x float>,
half,
<vscale x 1 x half>,
iXLen,
iXLen);
define <vscale x 1 x float> @intrinsic_vfwmacc_vf_nxv1f32_f16_nxv1f16(<vscale x 1 x float> %0, half %1, <vscale x 1 x half> %2, iXLen %3) nounwind {
@ -434,7 +444,7 @@ entry:
<vscale x 1 x float> %0,
half %1,
<vscale x 1 x half> %2,
iXLen %3)
iXLen %3, iXLen 0)
ret <vscale x 1 x float> %a
}
@ -467,6 +477,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfwmacc.nxv2f32.f16(
<vscale x 2 x float>,
half,
<vscale x 2 x half>,
iXLen,
iXLen);
define <vscale x 2 x float> @intrinsic_vfwmacc_vf_nxv2f32_f16_nxv2f16(<vscale x 2 x float> %0, half %1, <vscale x 2 x half> %2, iXLen %3) nounwind {
@ -480,7 +491,7 @@ entry:
<vscale x 2 x float> %0,
half %1,
<vscale x 2 x half> %2,
iXLen %3)
iXLen %3, iXLen 0)
ret <vscale x 2 x float> %a
}
@ -513,6 +524,7 @@ declare <vscale x 4 x float> @llvm.riscv.vfwmacc.nxv4f32.f16(
<vscale x 4 x float>,
half,
<vscale x 4 x half>,
iXLen,
iXLen);
define <vscale x 4 x float> @intrinsic_vfwmacc_vf_nxv4f32_f16_nxv4f16(<vscale x 4 x float> %0, half %1, <vscale x 4 x half> %2, iXLen %3) nounwind {
@ -526,7 +538,7 @@ entry:
<vscale x 4 x float> %0,
half %1,
<vscale x 4 x half> %2,
iXLen %3)
iXLen %3, iXLen 0)
ret <vscale x 4 x float> %a
}
@ -559,6 +571,7 @@ declare <vscale x 8 x float> @llvm.riscv.vfwmacc.nxv8f32.f16(
<vscale x 8 x float>,
half,
<vscale x 8 x half>,
iXLen,
iXLen);
define <vscale x 8 x float> @intrinsic_vfwmacc_vf_nxv8f32_f16_nxv8f16(<vscale x 8 x float> %0, half %1, <vscale x 8 x half> %2, iXLen %3) nounwind {
@ -572,7 +585,7 @@ entry:
<vscale x 8 x float> %0,
half %1,
<vscale x 8 x half> %2,
iXLen %3)
iXLen %3, iXLen 0)
ret <vscale x 8 x float> %a
}
@ -605,6 +618,7 @@ declare <vscale x 16 x float> @llvm.riscv.vfwmacc.nxv16f32.f16(
<vscale x 16 x float>,
half,
<vscale x 16 x half>,
iXLen,
iXLen);
define <vscale x 16 x float> @intrinsic_vfwmacc_vf_nxv16f32_f16_nxv16f16(<vscale x 16 x float> %0, half %1, <vscale x 16 x half> %2, iXLen %3) nounwind {
@ -618,7 +632,7 @@ entry:
<vscale x 16 x float> %0,
half %1,
<vscale x 16 x half> %2,
iXLen %3)
iXLen %3, iXLen 0)
ret <vscale x 16 x float> %a
}
@ -651,6 +665,7 @@ declare <vscale x 1 x double> @llvm.riscv.vfwmacc.nxv1f64.f32(
<vscale x 1 x double>,
float,
<vscale x 1 x float>,
iXLen,
iXLen);
define <vscale x 1 x double> @intrinsic_vfwmacc_vf_nxv1f64_f32_nxv1f32(<vscale x 1 x double> %0, float %1, <vscale x 1 x float> %2, iXLen %3) nounwind {
@ -664,7 +679,7 @@ entry:
<vscale x 1 x double> %0,
float %1,
<vscale x 1 x float> %2,
iXLen %3)
iXLen %3, iXLen 0)
ret <vscale x 1 x double> %a
}
@ -697,6 +712,7 @@ declare <vscale x 2 x double> @llvm.riscv.vfwmacc.nxv2f64.f32(
<vscale x 2 x double>,
float,
<vscale x 2 x float>,
iXLen,
iXLen);
define <vscale x 2 x double> @intrinsic_vfwmacc_vf_nxv2f64_f32_nxv2f32(<vscale x 2 x double> %0, float %1, <vscale x 2 x float> %2, iXLen %3) nounwind {
@ -710,7 +726,7 @@ entry:
<vscale x 2 x double> %0,
float %1,
<vscale x 2 x float> %2,
iXLen %3)
iXLen %3, iXLen 0)
ret <vscale x 2 x double> %a
}
@ -743,6 +759,7 @@ declare <vscale x 4 x double> @llvm.riscv.vfwmacc.nxv4f64.f32(
<vscale x 4 x double>,
float,
<vscale x 4 x float>,
iXLen,
iXLen);
define <vscale x 4 x double> @intrinsic_vfwmacc_vf_nxv4f64_f32_nxv4f32(<vscale x 4 x double> %0, float %1, <vscale x 4 x float> %2, iXLen %3) nounwind {
@ -756,7 +773,7 @@ entry:
<vscale x 4 x double> %0,
float %1,
<vscale x 4 x float> %2,
iXLen %3)
iXLen %3, iXLen 0)
ret <vscale x 4 x double> %a
}
@ -789,6 +806,7 @@ declare <vscale x 8 x double> @llvm.riscv.vfwmacc.nxv8f64.f32(
<vscale x 8 x double>,
float,
<vscale x 8 x float>,
iXLen,
iXLen);
define <vscale x 8 x double> @intrinsic_vfwmacc_vf_nxv8f64_f32_nxv8f32(<vscale x 8 x double> %0, float %1, <vscale x 8 x float> %2, iXLen %3) nounwind {
@ -802,7 +820,7 @@ entry:
<vscale x 8 x double> %0,
float %1,
<vscale x 8 x float> %2,
iXLen %3)
iXLen %3, iXLen 0)
ret <vscale x 8 x double> %a
}

View File

@ -7,6 +7,7 @@ declare <vscale x 1 x float> @llvm.riscv.vfwmsac.nxv1f32.nxv1f16(
<vscale x 1 x float>,
<vscale x 1 x half>,
<vscale x 1 x half>,
iXLen,
iXLen);
define <vscale x 1 x float> @intrinsic_vfwmsac_vv_nxv1f32_nxv1f16_nxv1f16(<vscale x 1 x float> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, iXLen %3) nounwind {
@ -20,7 +21,7 @@ entry:
<vscale x 1 x float> %0,
<vscale x 1 x half> %1,
<vscale x 1 x half> %2,
iXLen %3)
iXLen %3, iXLen 0)
ret <vscale x 1 x float> %a
}
@ -53,6 +54,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfwmsac.nxv2f32.nxv2f16(
<vscale x 2 x float>,
<vscale x 2 x half>,
<vscale x 2 x half>,
iXLen,
iXLen);
define <vscale x 2 x float> @intrinsic_vfwmsac_vv_nxv2f32_nxv2f16_nxv2f16(<vscale x 2 x float> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, iXLen %3) nounwind {
@ -66,7 +68,7 @@ entry:
<vscale x 2 x float> %0,
<vscale x 2 x half> %1,
<vscale x 2 x half> %2,
iXLen %3)
iXLen %3, iXLen 0)
ret <vscale x 2 x float> %a
}
@ -99,6 +101,7 @@ declare <vscale x 4 x float> @llvm.riscv.vfwmsac.nxv4f32.nxv4f16(
<vscale x 4 x float>,
<vscale x 4 x half>,
<vscale x 4 x half>,
iXLen,
iXLen);
define <vscale x 4 x float> @intrinsic_vfwmsac_vv_nxv4f32_nxv4f16_nxv4f16(<vscale x 4 x float> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, iXLen %3) nounwind {
@ -112,7 +115,7 @@ entry:
<vscale x 4 x float> %0,
<vscale x 4 x half> %1,
<vscale x 4 x half> %2,
iXLen %3)
iXLen %3, iXLen 0)
ret <vscale x 4 x float> %a
}
@ -145,6 +148,7 @@ declare <vscale x 8 x float> @llvm.riscv.vfwmsac.nxv8f32.nxv8f16(
<vscale x 8 x float>,
<vscale x 8 x half>,
<vscale x 8 x half>,
iXLen,
iXLen);
define <vscale x 8 x float> @intrinsic_vfwmsac_vv_nxv8f32_nxv8f16_nxv8f16(<vscale x 8 x float> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, iXLen %3) nounwind {
@ -158,7 +162,7 @@ entry:
<vscale x 8 x float> %0,
<vscale x 8 x half> %1,
<vscale x 8 x half> %2,
iXLen %3)
iXLen %3, iXLen 0)
ret <vscale x 8 x float> %a
}
@ -191,6 +195,7 @@ declare <vscale x 16 x float> @llvm.riscv.vfwmsac.nxv16f32.nxv16f16(
<vscale x 16 x float>,
<vscale x 16 x half>,
<vscale x 16 x half>,
iXLen,
iXLen);
define <vscale x 16 x float> @intrinsic_vfwmsac_vv_nxv16f32_nxv16f16_nxv16f16(<vscale x 16 x float> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, iXLen %3) nounwind {
@ -204,7 +209,7 @@ entry:
<vscale x 16 x float> %0,
<vscale x 16 x half> %1,
<vscale x 16 x half> %2,
iXLen %3)
iXLen %3, iXLen 0)
ret <vscale x 16 x float> %a
}
@ -237,6 +242,7 @@ declare <vscale x 1 x double> @llvm.riscv.vfwmsac.nxv1f64.nxv1f32(
<vscale x 1 x double>,
<vscale x 1 x float>,
<vscale x 1 x float>,
iXLen,
iXLen);
define <vscale x 1 x double> @intrinsic_vfwmsac_vv_nxv1f64_nxv1f32_nxv1f32(<vscale x 1 x double> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, iXLen %3) nounwind {
@ -250,7 +256,7 @@ entry:
<vscale x 1 x double> %0,
<vscale x 1 x float> %1,
<vscale x 1 x float> %2,
iXLen %3)
iXLen %3, iXLen 0)
ret <vscale x 1 x double> %a
}
@ -283,6 +289,7 @@ declare <vscale x 2 x double> @llvm.riscv.vfwmsac.nxv2f64.nxv2f32(
<vscale x 2 x double>,
<vscale x 2 x float>,
<vscale x 2 x float>,
iXLen,
iXLen);
define <vscale x 2 x double> @intrinsic_vfwmsac_vv_nxv2f64_nxv2f32_nxv2f32(<vscale x 2 x double> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, iXLen %3) nounwind {
@ -296,7 +303,7 @@ entry:
<vscale x 2 x double> %0,
<vscale x 2 x float> %1,
<vscale x 2 x float> %2,
iXLen %3)
iXLen %3, iXLen 0)
ret <vscale x 2 x double> %a
}
@ -329,6 +336,7 @@ declare <vscale x 4 x double> @llvm.riscv.vfwmsac.nxv4f64.nxv4f32(
<vscale x 4 x double>,
<vscale x 4 x float>,
<vscale x 4 x float>,
iXLen,
iXLen);
define <vscale x 4 x double> @intrinsic_vfwmsac_vv_nxv4f64_nxv4f32_nxv4f32(<vscale x 4 x double> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, iXLen %3) nounwind {
@ -342,7 +350,7 @@ entry:
<vscale x 4 x double> %0,
<vscale x 4 x float> %1,
<vscale x 4 x float> %2,
iXLen %3)
iXLen %3, iXLen 0)
ret <vscale x 4 x double> %a
}
@ -375,6 +383,7 @@ declare <vscale x 8 x double> @llvm.riscv.vfwmsac.nxv8f64.nxv8f32(
<vscale x 8 x double>,
<vscale x 8 x float>,
<vscale x 8 x float>,
iXLen,
iXLen);
define <vscale x 8 x double> @intrinsic_vfwmsac_vv_nxv8f64_nxv8f32_nxv8f32(<vscale x 8 x double> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, iXLen %3) nounwind {
@ -388,7 +397,7 @@ entry:
<vscale x 8 x double> %0,
<vscale x 8 x float> %1,
<vscale x 8 x float> %2,
iXLen %3)
iXLen %3, iXLen 0)
ret <vscale x 8 x double> %a
}
@ -421,6 +430,7 @@ declare <vscale x 1 x float> @llvm.riscv.vfwmsac.nxv1f32.f16(
<vscale x 1 x float>,
half,
<vscale x 1 x half>,
iXLen,
iXLen);
define <vscale x 1 x float> @intrinsic_vfwmsac_vf_nxv1f32_f16_nxv1f16(<vscale x 1 x float> %0, half %1, <vscale x 1 x half> %2, iXLen %3) nounwind {
@ -434,7 +444,7 @@ entry:
<vscale x 1 x float> %0,
half %1,
<vscale x 1 x half> %2,
iXLen %3)
iXLen %3, iXLen 0)
ret <vscale x 1 x float> %a
}
@ -467,6 +477,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfwmsac.nxv2f32.f16(
<vscale x 2 x float>,
half,
<vscale x 2 x half>,
iXLen,
iXLen);
define <vscale x 2 x float> @intrinsic_vfwmsac_vf_nxv2f32_f16_nxv2f16(<vscale x 2 x float> %0, half %1, <vscale x 2 x half> %2, iXLen %3) nounwind {
@ -480,7 +491,7 @@ entry:
<vscale x 2 x float> %0,
half %1,
<vscale x 2 x half> %2,
iXLen %3)
iXLen %3, iXLen 0)
ret <vscale x 2 x float> %a
}
@ -513,6 +524,7 @@ declare <vscale x 4 x float> @llvm.riscv.vfwmsac.nxv4f32.f16(
<vscale x 4 x float>,
half,
<vscale x 4 x half>,
iXLen,
iXLen);
define <vscale x 4 x float> @intrinsic_vfwmsac_vf_nxv4f32_f16_nxv4f16(<vscale x 4 x float> %0, half %1, <vscale x 4 x half> %2, iXLen %3) nounwind {
@ -526,7 +538,7 @@ entry:
<vscale x 4 x float> %0,
half %1,
<vscale x 4 x half> %2,
iXLen %3)
iXLen %3, iXLen 0)
ret <vscale x 4 x float> %a
}
@ -559,6 +571,7 @@ declare <vscale x 8 x float> @llvm.riscv.vfwmsac.nxv8f32.f16(
<vscale x 8 x float>,
half,
<vscale x 8 x half>,
iXLen,
iXLen);
define <vscale x 8 x float> @intrinsic_vfwmsac_vf_nxv8f32_f16_nxv8f16(<vscale x 8 x float> %0, half %1, <vscale x 8 x half> %2, iXLen %3) nounwind {
@ -572,7 +585,7 @@ entry:
<vscale x 8 x float> %0,
half %1,
<vscale x 8 x half> %2,
iXLen %3)
iXLen %3, iXLen 0)
ret <vscale x 8 x float> %a
}
@ -605,6 +618,7 @@ declare <vscale x 16 x float> @llvm.riscv.vfwmsac.nxv16f32.f16(
<vscale x 16 x float>,
half,
<vscale x 16 x half>,
iXLen,
iXLen);
define <vscale x 16 x float> @intrinsic_vfwmsac_vf_nxv16f32_f16_nxv16f16(<vscale x 16 x float> %0, half %1, <vscale x 16 x half> %2, iXLen %3) nounwind {
@ -618,7 +632,7 @@ entry:
<vscale x 16 x float> %0,
half %1,
<vscale x 16 x half> %2,
iXLen %3)
iXLen %3, iXLen 0)
ret <vscale x 16 x float> %a
}
@ -651,6 +665,7 @@ declare <vscale x 1 x double> @llvm.riscv.vfwmsac.nxv1f64.f32(
<vscale x 1 x double>,
float,
<vscale x 1 x float>,
iXLen,
iXLen);
define <vscale x 1 x double> @intrinsic_vfwmsac_vf_nxv1f64_f32_nxv1f32(<vscale x 1 x double> %0, float %1, <vscale x 1 x float> %2, iXLen %3) nounwind {
@ -664,7 +679,7 @@ entry:
<vscale x 1 x double> %0,
float %1,
<vscale x 1 x float> %2,
iXLen %3)
iXLen %3, iXLen 0)
ret <vscale x 1 x double> %a
}
@ -697,6 +712,7 @@ declare <vscale x 2 x double> @llvm.riscv.vfwmsac.nxv2f64.f32(
<vscale x 2 x double>,
float,
<vscale x 2 x float>,
iXLen,
iXLen);
define <vscale x 2 x double> @intrinsic_vfwmsac_vf_nxv2f64_f32_nxv2f32(<vscale x 2 x double> %0, float %1, <vscale x 2 x float> %2, iXLen %3) nounwind {
@ -710,7 +726,7 @@ entry:
<vscale x 2 x double> %0,
float %1,
<vscale x 2 x float> %2,
iXLen %3)
iXLen %3, iXLen 0)
ret <vscale x 2 x double> %a
}
@ -743,6 +759,7 @@ declare <vscale x 4 x double> @llvm.riscv.vfwmsac.nxv4f64.f32(
<vscale x 4 x double>,
float,
<vscale x 4 x float>,
iXLen,
iXLen);
define <vscale x 4 x double> @intrinsic_vfwmsac_vf_nxv4f64_f32_nxv4f32(<vscale x 4 x double> %0, float %1, <vscale x 4 x float> %2, iXLen %3) nounwind {
@ -756,7 +773,7 @@ entry:
<vscale x 4 x double> %0,
float %1,
<vscale x 4 x float> %2,
iXLen %3)
iXLen %3, iXLen 0)
ret <vscale x 4 x double> %a
}
@ -789,6 +806,7 @@ declare <vscale x 8 x double> @llvm.riscv.vfwmsac.nxv8f64.f32(
<vscale x 8 x double>,
float,
<vscale x 8 x float>,
iXLen,
iXLen);
define <vscale x 8 x double> @intrinsic_vfwmsac_vf_nxv8f64_f32_nxv8f32(<vscale x 8 x double> %0, float %1, <vscale x 8 x float> %2, iXLen %3) nounwind {
@ -802,7 +820,7 @@ entry:
<vscale x 8 x double> %0,
float %1,
<vscale x 8 x float> %2,
iXLen %3)
iXLen %3, iXLen 0)
ret <vscale x 8 x double> %a
}

View File

@ -7,6 +7,7 @@ declare <vscale x 1 x float> @llvm.riscv.vfwnmacc.nxv1f32.nxv1f16(
<vscale x 1 x float>,
<vscale x 1 x half>,
<vscale x 1 x half>,
iXLen,
iXLen);
define <vscale x 1 x float> @intrinsic_vfwnmacc_vv_nxv1f32_nxv1f16_nxv1f16(<vscale x 1 x float> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, iXLen %3) nounwind {
@ -20,7 +21,7 @@ entry:
<vscale x 1 x float> %0,
<vscale x 1 x half> %1,
<vscale x 1 x half> %2,
iXLen %3)
iXLen %3, iXLen 0)
ret <vscale x 1 x float> %a
}
@ -53,6 +54,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfwnmacc.nxv2f32.nxv2f16(
<vscale x 2 x float>,
<vscale x 2 x half>,
<vscale x 2 x half>,
iXLen,
iXLen);
define <vscale x 2 x float> @intrinsic_vfwnmacc_vv_nxv2f32_nxv2f16_nxv2f16(<vscale x 2 x float> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, iXLen %3) nounwind {
@ -66,7 +68,7 @@ entry:
<vscale x 2 x float> %0,
<vscale x 2 x half> %1,
<vscale x 2 x half> %2,
iXLen %3)
iXLen %3, iXLen 0)
ret <vscale x 2 x float> %a
}
@ -99,6 +101,7 @@ declare <vscale x 4 x float> @llvm.riscv.vfwnmacc.nxv4f32.nxv4f16(
<vscale x 4 x float>,
<vscale x 4 x half>,
<vscale x 4 x half>,
iXLen,
iXLen);
define <vscale x 4 x float> @intrinsic_vfwnmacc_vv_nxv4f32_nxv4f16_nxv4f16(<vscale x 4 x float> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, iXLen %3) nounwind {
@ -112,7 +115,7 @@ entry:
<vscale x 4 x float> %0,
<vscale x 4 x half> %1,
<vscale x 4 x half> %2,
iXLen %3)
iXLen %3, iXLen 0)
ret <vscale x 4 x float> %a
}
@ -145,6 +148,7 @@ declare <vscale x 8 x float> @llvm.riscv.vfwnmacc.nxv8f32.nxv8f16(
<vscale x 8 x float>,
<vscale x 8 x half>,
<vscale x 8 x half>,
iXLen,
iXLen);
define <vscale x 8 x float> @intrinsic_vfwnmacc_vv_nxv8f32_nxv8f16_nxv8f16(<vscale x 8 x float> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, iXLen %3) nounwind {
@ -158,7 +162,7 @@ entry:
<vscale x 8 x float> %0,
<vscale x 8 x half> %1,
<vscale x 8 x half> %2,
iXLen %3)
iXLen %3, iXLen 0)
ret <vscale x 8 x float> %a
}
@ -191,6 +195,7 @@ declare <vscale x 16 x float> @llvm.riscv.vfwnmacc.nxv16f32.nxv16f16(
<vscale x 16 x float>,
<vscale x 16 x half>,
<vscale x 16 x half>,
iXLen,
iXLen);
define <vscale x 16 x float> @intrinsic_vfwnmacc_vv_nxv16f32_nxv16f16_nxv16f16(<vscale x 16 x float> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, iXLen %3) nounwind {
@ -204,7 +209,7 @@ entry:
<vscale x 16 x float> %0,
<vscale x 16 x half> %1,
<vscale x 16 x half> %2,
iXLen %3)
iXLen %3, iXLen 0)
ret <vscale x 16 x float> %a
}
@ -237,6 +242,7 @@ declare <vscale x 1 x double> @llvm.riscv.vfwnmacc.nxv1f64.nxv1f32(
<vscale x 1 x double>,
<vscale x 1 x float>,
<vscale x 1 x float>,
iXLen,
iXLen);
define <vscale x 1 x double> @intrinsic_vfwnmacc_vv_nxv1f64_nxv1f32_nxv1f32(<vscale x 1 x double> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, iXLen %3) nounwind {
@ -250,7 +256,7 @@ entry:
<vscale x 1 x double> %0,
<vscale x 1 x float> %1,
<vscale x 1 x float> %2,
iXLen %3)
iXLen %3, iXLen 0)
ret <vscale x 1 x double> %a
}
@ -283,6 +289,7 @@ declare <vscale x 2 x double> @llvm.riscv.vfwnmacc.nxv2f64.nxv2f32(
<vscale x 2 x double>,
<vscale x 2 x float>,
<vscale x 2 x float>,
iXLen,
iXLen);
define <vscale x 2 x double> @intrinsic_vfwnmacc_vv_nxv2f64_nxv2f32_nxv2f32(<vscale x 2 x double> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, iXLen %3) nounwind {
@ -296,7 +303,7 @@ entry:
<vscale x 2 x double> %0,
<vscale x 2 x float> %1,
<vscale x 2 x float> %2,
iXLen %3)
iXLen %3, iXLen 0)
ret <vscale x 2 x double> %a
}
@ -329,6 +336,7 @@ declare <vscale x 4 x double> @llvm.riscv.vfwnmacc.nxv4f64.nxv4f32(
<vscale x 4 x double>,
<vscale x 4 x float>,
<vscale x 4 x float>,
iXLen,
iXLen);
define <vscale x 4 x double> @intrinsic_vfwnmacc_vv_nxv4f64_nxv4f32_nxv4f32(<vscale x 4 x double> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, iXLen %3) nounwind {
@ -342,7 +350,7 @@ entry:
<vscale x 4 x double> %0,
<vscale x 4 x float> %1,
<vscale x 4 x float> %2,
iXLen %3)
iXLen %3, iXLen 0)
ret <vscale x 4 x double> %a
}
@ -375,6 +383,7 @@ declare <vscale x 8 x double> @llvm.riscv.vfwnmacc.nxv8f64.nxv8f32(
<vscale x 8 x double>,
<vscale x 8 x float>,
<vscale x 8 x float>,
iXLen,
iXLen);
define <vscale x 8 x double> @intrinsic_vfwnmacc_vv_nxv8f64_nxv8f32_nxv8f32(<vscale x 8 x double> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, iXLen %3) nounwind {
@ -388,7 +397,7 @@ entry:
<vscale x 8 x double> %0,
<vscale x 8 x float> %1,
<vscale x 8 x float> %2,
iXLen %3)
iXLen %3, iXLen 0)
ret <vscale x 8 x double> %a
}
@ -421,6 +430,7 @@ declare <vscale x 1 x float> @llvm.riscv.vfwnmacc.nxv1f32.f16(
<vscale x 1 x float>,
half,
<vscale x 1 x half>,
iXLen,
iXLen);
define <vscale x 1 x float> @intrinsic_vfwnmacc_vf_nxv1f32_f16_nxv1f16(<vscale x 1 x float> %0, half %1, <vscale x 1 x half> %2, iXLen %3) nounwind {
@ -434,7 +444,7 @@ entry:
<vscale x 1 x float> %0,
half %1,
<vscale x 1 x half> %2,
iXLen %3)
iXLen %3, iXLen 0)
ret <vscale x 1 x float> %a
}
@ -467,6 +477,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfwnmacc.nxv2f32.f16(
<vscale x 2 x float>,
half,
<vscale x 2 x half>,
iXLen,
iXLen);
define <vscale x 2 x float> @intrinsic_vfwnmacc_vf_nxv2f32_f16_nxv2f16(<vscale x 2 x float> %0, half %1, <vscale x 2 x half> %2, iXLen %3) nounwind {
@ -480,7 +491,7 @@ entry:
<vscale x 2 x float> %0,
half %1,
<vscale x 2 x half> %2,
iXLen %3)
iXLen %3, iXLen 0)
ret <vscale x 2 x float> %a
}
@ -513,6 +524,7 @@ declare <vscale x 4 x float> @llvm.riscv.vfwnmacc.nxv4f32.f16(
<vscale x 4 x float>,
half,
<vscale x 4 x half>,
iXLen,
iXLen);
define <vscale x 4 x float> @intrinsic_vfwnmacc_vf_nxv4f32_f16_nxv4f16(<vscale x 4 x float> %0, half %1, <vscale x 4 x half> %2, iXLen %3) nounwind {
@ -526,7 +538,7 @@ entry:
<vscale x 4 x float> %0,
half %1,
<vscale x 4 x half> %2,
iXLen %3)
iXLen %3, iXLen 0)
ret <vscale x 4 x float> %a
}
@ -559,6 +571,7 @@ declare <vscale x 8 x float> @llvm.riscv.vfwnmacc.nxv8f32.f16(
<vscale x 8 x float>,
half,
<vscale x 8 x half>,
iXLen,
iXLen);
define <vscale x 8 x float> @intrinsic_vfwnmacc_vf_nxv8f32_f16_nxv8f16(<vscale x 8 x float> %0, half %1, <vscale x 8 x half> %2, iXLen %3) nounwind {
@ -572,7 +585,7 @@ entry:
<vscale x 8 x float> %0,
half %1,
<vscale x 8 x half> %2,
iXLen %3)
iXLen %3, iXLen 0)
ret <vscale x 8 x float> %a
}
@ -605,6 +618,7 @@ declare <vscale x 16 x float> @llvm.riscv.vfwnmacc.nxv16f32.f16(
<vscale x 16 x float>,
half,
<vscale x 16 x half>,
iXLen,
iXLen);
define <vscale x 16 x float> @intrinsic_vfwnmacc_vf_nxv16f32_f16_nxv16f16(<vscale x 16 x float> %0, half %1, <vscale x 16 x half> %2, iXLen %3) nounwind {
@ -618,7 +632,7 @@ entry:
<vscale x 16 x float> %0,
half %1,
<vscale x 16 x half> %2,
iXLen %3)
iXLen %3, iXLen 0)
ret <vscale x 16 x float> %a
}
@ -651,6 +665,7 @@ declare <vscale x 1 x double> @llvm.riscv.vfwnmacc.nxv1f64.f32(
<vscale x 1 x double>,
float,
<vscale x 1 x float>,
iXLen,
iXLen);
define <vscale x 1 x double> @intrinsic_vfwnmacc_vf_nxv1f64_f32_nxv1f32(<vscale x 1 x double> %0, float %1, <vscale x 1 x float> %2, iXLen %3) nounwind {
@ -664,7 +679,7 @@ entry:
<vscale x 1 x double> %0,
float %1,
<vscale x 1 x float> %2,
iXLen %3)
iXLen %3, iXLen 0)
ret <vscale x 1 x double> %a
}
@ -697,6 +712,7 @@ declare <vscale x 2 x double> @llvm.riscv.vfwnmacc.nxv2f64.f32(
<vscale x 2 x double>,
float,
<vscale x 2 x float>,
iXLen,
iXLen);
define <vscale x 2 x double> @intrinsic_vfwnmacc_vf_nxv2f64_f32_nxv2f32(<vscale x 2 x double> %0, float %1, <vscale x 2 x float> %2, iXLen %3) nounwind {
@ -710,7 +726,7 @@ entry:
<vscale x 2 x double> %0,
float %1,
<vscale x 2 x float> %2,
iXLen %3)
iXLen %3, iXLen 0)
ret <vscale x 2 x double> %a
}
@ -743,6 +759,7 @@ declare <vscale x 4 x double> @llvm.riscv.vfwnmacc.nxv4f64.f32(
<vscale x 4 x double>,
float,
<vscale x 4 x float>,
iXLen,
iXLen);
define <vscale x 4 x double> @intrinsic_vfwnmacc_vf_nxv4f64_f32_nxv4f32(<vscale x 4 x double> %0, float %1, <vscale x 4 x float> %2, iXLen %3) nounwind {
@ -756,7 +773,7 @@ entry:
<vscale x 4 x double> %0,
float %1,
<vscale x 4 x float> %2,
iXLen %3)
iXLen %3, iXLen 0)
ret <vscale x 4 x double> %a
}
@ -789,6 +806,7 @@ declare <vscale x 8 x double> @llvm.riscv.vfwnmacc.nxv8f64.f32(
<vscale x 8 x double>,
float,
<vscale x 8 x float>,
iXLen,
iXLen);
define <vscale x 8 x double> @intrinsic_vfwnmacc_vf_nxv8f64_f32_nxv8f32(<vscale x 8 x double> %0, float %1, <vscale x 8 x float> %2, iXLen %3) nounwind {
@ -802,7 +820,7 @@ entry:
<vscale x 8 x double> %0,
float %1,
<vscale x 8 x float> %2,
iXLen %3)
iXLen %3, iXLen 0)
ret <vscale x 8 x double> %a
}

View File

@ -7,6 +7,7 @@ declare <vscale x 1 x float> @llvm.riscv.vfwnmsac.nxv1f32.nxv1f16(
<vscale x 1 x float>,
<vscale x 1 x half>,
<vscale x 1 x half>,
iXLen,
iXLen);
define <vscale x 1 x float> @intrinsic_vfwnmsac_vv_nxv1f32_nxv1f16_nxv1f16(<vscale x 1 x float> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, iXLen %3) nounwind {
@ -20,7 +21,7 @@ entry:
<vscale x 1 x float> %0,
<vscale x 1 x half> %1,
<vscale x 1 x half> %2,
iXLen %3)
iXLen %3, iXLen 0)
ret <vscale x 1 x float> %a
}
@ -53,6 +54,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfwnmsac.nxv2f32.nxv2f16(
<vscale x 2 x float>,
<vscale x 2 x half>,
<vscale x 2 x half>,
iXLen,
iXLen);
define <vscale x 2 x float> @intrinsic_vfwnmsac_vv_nxv2f32_nxv2f16_nxv2f16(<vscale x 2 x float> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, iXLen %3) nounwind {
@ -66,7 +68,7 @@ entry:
<vscale x 2 x float> %0,
<vscale x 2 x half> %1,
<vscale x 2 x half> %2,
iXLen %3)
iXLen %3, iXLen 0)
ret <vscale x 2 x float> %a
}
@ -99,6 +101,7 @@ declare <vscale x 4 x float> @llvm.riscv.vfwnmsac.nxv4f32.nxv4f16(
<vscale x 4 x float>,
<vscale x 4 x half>,
<vscale x 4 x half>,
iXLen,
iXLen);
define <vscale x 4 x float> @intrinsic_vfwnmsac_vv_nxv4f32_nxv4f16_nxv4f16(<vscale x 4 x float> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, iXLen %3) nounwind {
@ -112,7 +115,7 @@ entry:
<vscale x 4 x float> %0,
<vscale x 4 x half> %1,
<vscale x 4 x half> %2,
iXLen %3)
iXLen %3, iXLen 0)
ret <vscale x 4 x float> %a
}
@ -145,6 +148,7 @@ declare <vscale x 8 x float> @llvm.riscv.vfwnmsac.nxv8f32.nxv8f16(
<vscale x 8 x float>,
<vscale x 8 x half>,
<vscale x 8 x half>,
iXLen,
iXLen);
define <vscale x 8 x float> @intrinsic_vfwnmsac_vv_nxv8f32_nxv8f16_nxv8f16(<vscale x 8 x float> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, iXLen %3) nounwind {
@ -158,7 +162,7 @@ entry:
<vscale x 8 x float> %0,
<vscale x 8 x half> %1,
<vscale x 8 x half> %2,
iXLen %3)
iXLen %3, iXLen 0)
ret <vscale x 8 x float> %a
}
@ -191,6 +195,7 @@ declare <vscale x 16 x float> @llvm.riscv.vfwnmsac.nxv16f32.nxv16f16(
<vscale x 16 x float>,
<vscale x 16 x half>,
<vscale x 16 x half>,
iXLen,
iXLen);
define <vscale x 16 x float> @intrinsic_vfwnmsac_vv_nxv16f32_nxv16f16_nxv16f16(<vscale x 16 x float> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, iXLen %3) nounwind {
@ -204,7 +209,7 @@ entry:
<vscale x 16 x float> %0,
<vscale x 16 x half> %1,
<vscale x 16 x half> %2,
iXLen %3)
iXLen %3, iXLen 0)
ret <vscale x 16 x float> %a
}
@ -237,6 +242,7 @@ declare <vscale x 1 x double> @llvm.riscv.vfwnmsac.nxv1f64.nxv1f32(
<vscale x 1 x double>,
<vscale x 1 x float>,
<vscale x 1 x float>,
iXLen,
iXLen);
define <vscale x 1 x double> @intrinsic_vfwnmsac_vv_nxv1f64_nxv1f32_nxv1f32(<vscale x 1 x double> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, iXLen %3) nounwind {
@ -250,7 +256,7 @@ entry:
<vscale x 1 x double> %0,
<vscale x 1 x float> %1,
<vscale x 1 x float> %2,
iXLen %3)
iXLen %3, iXLen 0)
ret <vscale x 1 x double> %a
}
@ -283,6 +289,7 @@ declare <vscale x 2 x double> @llvm.riscv.vfwnmsac.nxv2f64.nxv2f32(
<vscale x 2 x double>,
<vscale x 2 x float>,
<vscale x 2 x float>,
iXLen,
iXLen);
define <vscale x 2 x double> @intrinsic_vfwnmsac_vv_nxv2f64_nxv2f32_nxv2f32(<vscale x 2 x double> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, iXLen %3) nounwind {
@ -296,7 +303,7 @@ entry:
<vscale x 2 x double> %0,
<vscale x 2 x float> %1,
<vscale x 2 x float> %2,
iXLen %3)
iXLen %3, iXLen 0)
ret <vscale x 2 x double> %a
}
@ -329,6 +336,7 @@ declare <vscale x 4 x double> @llvm.riscv.vfwnmsac.nxv4f64.nxv4f32(
<vscale x 4 x double>,
<vscale x 4 x float>,
<vscale x 4 x float>,
iXLen,
iXLen);
define <vscale x 4 x double> @intrinsic_vfwnmsac_vv_nxv4f64_nxv4f32_nxv4f32(<vscale x 4 x double> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, iXLen %3) nounwind {
@ -342,7 +350,7 @@ entry:
<vscale x 4 x double> %0,
<vscale x 4 x float> %1,
<vscale x 4 x float> %2,
iXLen %3)
iXLen %3, iXLen 0)
ret <vscale x 4 x double> %a
}
@ -375,6 +383,7 @@ declare <vscale x 8 x double> @llvm.riscv.vfwnmsac.nxv8f64.nxv8f32(
<vscale x 8 x double>,
<vscale x 8 x float>,
<vscale x 8 x float>,
iXLen,
iXLen);
define <vscale x 8 x double> @intrinsic_vfwnmsac_vv_nxv8f64_nxv8f32_nxv8f32(<vscale x 8 x double> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, iXLen %3) nounwind {
@ -388,7 +397,7 @@ entry:
<vscale x 8 x double> %0,
<vscale x 8 x float> %1,
<vscale x 8 x float> %2,
iXLen %3)
iXLen %3, iXLen 0)
ret <vscale x 8 x double> %a
}
@ -421,6 +430,7 @@ declare <vscale x 1 x float> @llvm.riscv.vfwnmsac.nxv1f32.f16(
<vscale x 1 x float>,
half,
<vscale x 1 x half>,
iXLen,
iXLen);
define <vscale x 1 x float> @intrinsic_vfwnmsac_vf_nxv1f32_f16_nxv1f16(<vscale x 1 x float> %0, half %1, <vscale x 1 x half> %2, iXLen %3) nounwind {
@ -434,7 +444,7 @@ entry:
<vscale x 1 x float> %0,
half %1,
<vscale x 1 x half> %2,
iXLen %3)
iXLen %3, iXLen 0)
ret <vscale x 1 x float> %a
}
@ -467,6 +477,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfwnmsac.nxv2f32.f16(
<vscale x 2 x float>,
half,
<vscale x 2 x half>,
iXLen,
iXLen);
define <vscale x 2 x float> @intrinsic_vfwnmsac_vf_nxv2f32_f16_nxv2f16(<vscale x 2 x float> %0, half %1, <vscale x 2 x half> %2, iXLen %3) nounwind {
@ -480,7 +491,7 @@ entry:
<vscale x 2 x float> %0,
half %1,
<vscale x 2 x half> %2,
iXLen %3)
iXLen %3, iXLen 0)
ret <vscale x 2 x float> %a
}
@ -513,6 +524,7 @@ declare <vscale x 4 x float> @llvm.riscv.vfwnmsac.nxv4f32.f16(
<vscale x 4 x float>,
half,
<vscale x 4 x half>,
iXLen,
iXLen);
define <vscale x 4 x float> @intrinsic_vfwnmsac_vf_nxv4f32_f16_nxv4f16(<vscale x 4 x float> %0, half %1, <vscale x 4 x half> %2, iXLen %3) nounwind {
@ -526,7 +538,7 @@ entry:
<vscale x 4 x float> %0,
half %1,
<vscale x 4 x half> %2,
iXLen %3)
iXLen %3, iXLen 0)
ret <vscale x 4 x float> %a
}
@ -559,6 +571,7 @@ declare <vscale x 8 x float> @llvm.riscv.vfwnmsac.nxv8f32.f16(
<vscale x 8 x float>,
half,
<vscale x 8 x half>,
iXLen,
iXLen);
define <vscale x 8 x float> @intrinsic_vfwnmsac_vf_nxv8f32_f16_nxv8f16(<vscale x 8 x float> %0, half %1, <vscale x 8 x half> %2, iXLen %3) nounwind {
@ -572,7 +585,7 @@ entry:
<vscale x 8 x float> %0,
half %1,
<vscale x 8 x half> %2,
iXLen %3)
iXLen %3, iXLen 0)
ret <vscale x 8 x float> %a
}
@ -605,6 +618,7 @@ declare <vscale x 16 x float> @llvm.riscv.vfwnmsac.nxv16f32.f16(
<vscale x 16 x float>,
half,
<vscale x 16 x half>,
iXLen,
iXLen);
define <vscale x 16 x float> @intrinsic_vfwnmsac_vf_nxv16f32_f16_nxv16f16(<vscale x 16 x float> %0, half %1, <vscale x 16 x half> %2, iXLen %3) nounwind {
@ -618,7 +632,7 @@ entry:
<vscale x 16 x float> %0,
half %1,
<vscale x 16 x half> %2,
iXLen %3)
iXLen %3, iXLen 0)
ret <vscale x 16 x float> %a
}
@ -651,6 +665,7 @@ declare <vscale x 1 x double> @llvm.riscv.vfwnmsac.nxv1f64.f32(
<vscale x 1 x double>,
float,
<vscale x 1 x float>,
iXLen,
iXLen);
define <vscale x 1 x double> @intrinsic_vfwnmsac_vf_nxv1f64_f32_nxv1f32(<vscale x 1 x double> %0, float %1, <vscale x 1 x float> %2, iXLen %3) nounwind {
@ -664,7 +679,7 @@ entry:
<vscale x 1 x double> %0,
float %1,
<vscale x 1 x float> %2,
iXLen %3)
iXLen %3, iXLen 0)
ret <vscale x 1 x double> %a
}
@ -697,6 +712,7 @@ declare <vscale x 2 x double> @llvm.riscv.vfwnmsac.nxv2f64.f32(
<vscale x 2 x double>,
float,
<vscale x 2 x float>,
iXLen,
iXLen);
define <vscale x 2 x double> @intrinsic_vfwnmsac_vf_nxv2f64_f32_nxv2f32(<vscale x 2 x double> %0, float %1, <vscale x 2 x float> %2, iXLen %3) nounwind {
@ -710,7 +726,7 @@ entry:
<vscale x 2 x double> %0,
float %1,
<vscale x 2 x float> %2,
iXLen %3)
iXLen %3, iXLen 0)
ret <vscale x 2 x double> %a
}
@ -743,6 +759,7 @@ declare <vscale x 4 x double> @llvm.riscv.vfwnmsac.nxv4f64.f32(
<vscale x 4 x double>,
float,
<vscale x 4 x float>,
iXLen,
iXLen);
define <vscale x 4 x double> @intrinsic_vfwnmsac_vf_nxv4f64_f32_nxv4f32(<vscale x 4 x double> %0, float %1, <vscale x 4 x float> %2, iXLen %3) nounwind {
@ -756,7 +773,7 @@ entry:
<vscale x 4 x double> %0,
float %1,
<vscale x 4 x float> %2,
iXLen %3)
iXLen %3, iXLen 0)
ret <vscale x 4 x double> %a
}
@ -789,6 +806,7 @@ declare <vscale x 8 x double> @llvm.riscv.vfwnmsac.nxv8f64.f32(
<vscale x 8 x double>,
float,
<vscale x 8 x float>,
iXLen,
iXLen);
define <vscale x 8 x double> @intrinsic_vfwnmsac_vf_nxv8f64_f32_nxv8f32(<vscale x 8 x double> %0, float %1, <vscale x 8 x float> %2, iXLen %3) nounwind {
@ -802,7 +820,7 @@ entry:
<vscale x 8 x double> %0,
float %1,
<vscale x 8 x float> %2,
iXLen %3)
iXLen %3, iXLen 0)
ret <vscale x 8 x double> %a
}

View File

@ -5,7 +5,9 @@ declare <vscale x 1 x i8> @llvm.riscv.vmacc.nxv1i8.nxv1i8(
<vscale x 1 x i8>,
<vscale x 1 x i8>,
<vscale x 1 x i8>,
i32);
i32,
i32
);
define <vscale x 1 x i8> @intrinsic_vmacc_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vmacc_vv_nxv1i8_nxv1i8_nxv1i8:
@ -18,7 +20,7 @@ entry:
<vscale x 1 x i8> %0,
<vscale x 1 x i8> %1,
<vscale x 1 x i8> %2,
i32 %3)
i32 %3, i32 0)
ret <vscale x 1 x i8> %a
}
@ -51,7 +53,9 @@ declare <vscale x 2 x i8> @llvm.riscv.vmacc.nxv2i8.nxv2i8(
<vscale x 2 x i8>,
<vscale x 2 x i8>,
<vscale x 2 x i8>,
i32);
i32,
i32
);
define <vscale x 2 x i8> @intrinsic_vmacc_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vmacc_vv_nxv2i8_nxv2i8_nxv2i8:
@ -64,7 +68,7 @@ entry:
<vscale x 2 x i8> %0,
<vscale x 2 x i8> %1,
<vscale x 2 x i8> %2,
i32 %3)
i32 %3, i32 0)
ret <vscale x 2 x i8> %a
}
@ -97,7 +101,9 @@ declare <vscale x 4 x i8> @llvm.riscv.vmacc.nxv4i8.nxv4i8(
<vscale x 4 x i8>,
<vscale x 4 x i8>,
<vscale x 4 x i8>,
i32);
i32,
i32
);
define <vscale x 4 x i8> @intrinsic_vmacc_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vmacc_vv_nxv4i8_nxv4i8_nxv4i8:
@ -110,7 +116,7 @@ entry:
<vscale x 4 x i8> %0,
<vscale x 4 x i8> %1,
<vscale x 4 x i8> %2,
i32 %3)
i32 %3, i32 0)
ret <vscale x 4 x i8> %a
}
@ -143,7 +149,9 @@ declare <vscale x 8 x i8> @llvm.riscv.vmacc.nxv8i8.nxv8i8(
<vscale x 8 x i8>,
<vscale x 8 x i8>,
<vscale x 8 x i8>,
i32);
i32,
i32
);
define <vscale x 8 x i8> @intrinsic_vmacc_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vmacc_vv_nxv8i8_nxv8i8_nxv8i8:
@ -156,7 +164,7 @@ entry:
<vscale x 8 x i8> %0,
<vscale x 8 x i8> %1,
<vscale x 8 x i8> %2,
i32 %3)
i32 %3, i32 0)
ret <vscale x 8 x i8> %a
}
@ -189,7 +197,9 @@ declare <vscale x 16 x i8> @llvm.riscv.vmacc.nxv16i8.nxv16i8(
<vscale x 16 x i8>,
<vscale x 16 x i8>,
<vscale x 16 x i8>,
i32);
i32,
i32
);
define <vscale x 16 x i8> @intrinsic_vmacc_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vmacc_vv_nxv16i8_nxv16i8_nxv16i8:
@ -202,7 +212,7 @@ entry:
<vscale x 16 x i8> %0,
<vscale x 16 x i8> %1,
<vscale x 16 x i8> %2,
i32 %3)
i32 %3, i32 0)
ret <vscale x 16 x i8> %a
}
@ -235,7 +245,9 @@ declare <vscale x 32 x i8> @llvm.riscv.vmacc.nxv32i8.nxv32i8(
<vscale x 32 x i8>,
<vscale x 32 x i8>,
<vscale x 32 x i8>,
i32);
i32,
i32
);
define <vscale x 32 x i8> @intrinsic_vmacc_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vmacc_vv_nxv32i8_nxv32i8_nxv32i8:
@ -248,7 +260,7 @@ entry:
<vscale x 32 x i8> %0,
<vscale x 32 x i8> %1,
<vscale x 32 x i8> %2,
i32 %3)
i32 %3, i32 0)
ret <vscale x 32 x i8> %a
}
@ -281,7 +293,9 @@ declare <vscale x 1 x i16> @llvm.riscv.vmacc.nxv1i16.nxv1i16(
<vscale x 1 x i16>,
<vscale x 1 x i16>,
<vscale x 1 x i16>,
i32);
i32,
i32
);
define <vscale x 1 x i16> @intrinsic_vmacc_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vmacc_vv_nxv1i16_nxv1i16_nxv1i16:
@ -294,7 +308,7 @@ entry:
<vscale x 1 x i16> %0,
<vscale x 1 x i16> %1,
<vscale x 1 x i16> %2,
i32 %3)
i32 %3, i32 0)
ret <vscale x 1 x i16> %a
}
@ -327,7 +341,9 @@ declare <vscale x 2 x i16> @llvm.riscv.vmacc.nxv2i16.nxv2i16(
<vscale x 2 x i16>,
<vscale x 2 x i16>,
<vscale x 2 x i16>,
i32);
i32,
i32
);
define <vscale x 2 x i16> @intrinsic_vmacc_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vmacc_vv_nxv2i16_nxv2i16_nxv2i16:
@ -340,7 +356,7 @@ entry:
<vscale x 2 x i16> %0,
<vscale x 2 x i16> %1,
<vscale x 2 x i16> %2,
i32 %3)
i32 %3, i32 0)
ret <vscale x 2 x i16> %a
}
@ -373,7 +389,9 @@ declare <vscale x 4 x i16> @llvm.riscv.vmacc.nxv4i16.nxv4i16(
<vscale x 4 x i16>,
<vscale x 4 x i16>,
<vscale x 4 x i16>,
i32);
i32,
i32
);
define <vscale x 4 x i16> @intrinsic_vmacc_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vmacc_vv_nxv4i16_nxv4i16_nxv4i16:
@ -386,7 +404,7 @@ entry:
<vscale x 4 x i16> %0,
<vscale x 4 x i16> %1,
<vscale x 4 x i16> %2,
i32 %3)
i32 %3, i32 0)
ret <vscale x 4 x i16> %a
}
@ -419,7 +437,9 @@ declare <vscale x 8 x i16> @llvm.riscv.vmacc.nxv8i16.nxv8i16(
<vscale x 8 x i16>,
<vscale x 8 x i16>,
<vscale x 8 x i16>,
i32);
i32,
i32
);
define <vscale x 8 x i16> @intrinsic_vmacc_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vmacc_vv_nxv8i16_nxv8i16_nxv8i16:
@ -432,7 +452,7 @@ entry:
<vscale x 8 x i16> %0,
<vscale x 8 x i16> %1,
<vscale x 8 x i16> %2,
i32 %3)
i32 %3, i32 0)
ret <vscale x 8 x i16> %a
}
@ -465,7 +485,9 @@ declare <vscale x 16 x i16> @llvm.riscv.vmacc.nxv16i16.nxv16i16(
<vscale x 16 x i16>,
<vscale x 16 x i16>,
<vscale x 16 x i16>,
i32);
i32,
i32
);
define <vscale x 16 x i16> @intrinsic_vmacc_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vmacc_vv_nxv16i16_nxv16i16_nxv16i16:
@ -478,7 +500,7 @@ entry:
<vscale x 16 x i16> %0,
<vscale x 16 x i16> %1,
<vscale x 16 x i16> %2,
i32 %3)
i32 %3, i32 0)
ret <vscale x 16 x i16> %a
}
@ -511,7 +533,9 @@ declare <vscale x 1 x i32> @llvm.riscv.vmacc.nxv1i32.nxv1i32(
<vscale x 1 x i32>,
<vscale x 1 x i32>,
<vscale x 1 x i32>,
i32);
i32,
i32
);
define <vscale x 1 x i32> @intrinsic_vmacc_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vmacc_vv_nxv1i32_nxv1i32_nxv1i32:
@ -524,7 +548,7 @@ entry:
<vscale x 1 x i32> %0,
<vscale x 1 x i32> %1,
<vscale x 1 x i32> %2,
i32 %3)
i32 %3, i32 0)
ret <vscale x 1 x i32> %a
}
@ -557,7 +581,9 @@ declare <vscale x 2 x i32> @llvm.riscv.vmacc.nxv2i32.nxv2i32(
<vscale x 2 x i32>,
<vscale x 2 x i32>,
<vscale x 2 x i32>,
i32);
i32,
i32
);
define <vscale x 2 x i32> @intrinsic_vmacc_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vmacc_vv_nxv2i32_nxv2i32_nxv2i32:
@ -570,7 +596,7 @@ entry:
<vscale x 2 x i32> %0,
<vscale x 2 x i32> %1,
<vscale x 2 x i32> %2,
i32 %3)
i32 %3, i32 0)
ret <vscale x 2 x i32> %a
}
@ -603,7 +629,9 @@ declare <vscale x 4 x i32> @llvm.riscv.vmacc.nxv4i32.nxv4i32(
<vscale x 4 x i32>,
<vscale x 4 x i32>,
<vscale x 4 x i32>,
i32);
i32,
i32
);
define <vscale x 4 x i32> @intrinsic_vmacc_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vmacc_vv_nxv4i32_nxv4i32_nxv4i32:
@ -616,7 +644,7 @@ entry:
<vscale x 4 x i32> %0,
<vscale x 4 x i32> %1,
<vscale x 4 x i32> %2,
i32 %3)
i32 %3, i32 0)
ret <vscale x 4 x i32> %a
}
@ -649,7 +677,9 @@ declare <vscale x 8 x i32> @llvm.riscv.vmacc.nxv8i32.nxv8i32(
<vscale x 8 x i32>,
<vscale x 8 x i32>,
<vscale x 8 x i32>,
i32);
i32,
i32
);
define <vscale x 8 x i32> @intrinsic_vmacc_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vmacc_vv_nxv8i32_nxv8i32_nxv8i32:
@ -662,7 +692,7 @@ entry:
<vscale x 8 x i32> %0,
<vscale x 8 x i32> %1,
<vscale x 8 x i32> %2,
i32 %3)
i32 %3, i32 0)
ret <vscale x 8 x i32> %a
}
@ -695,7 +725,9 @@ declare <vscale x 1 x i64> @llvm.riscv.vmacc.nxv1i64.nxv1i64(
<vscale x 1 x i64>,
<vscale x 1 x i64>,
<vscale x 1 x i64>,
i32);
i32,
i32
);
define <vscale x 1 x i64> @intrinsic_vmacc_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vmacc_vv_nxv1i64_nxv1i64_nxv1i64:
@ -708,7 +740,7 @@ entry:
<vscale x 1 x i64> %0,
<vscale x 1 x i64> %1,
<vscale x 1 x i64> %2,
i32 %3)
i32 %3, i32 0)
ret <vscale x 1 x i64> %a
}
@ -741,7 +773,9 @@ declare <vscale x 2 x i64> @llvm.riscv.vmacc.nxv2i64.nxv2i64(
<vscale x 2 x i64>,
<vscale x 2 x i64>,
<vscale x 2 x i64>,
i32);
i32,
i32
);
define <vscale x 2 x i64> @intrinsic_vmacc_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vmacc_vv_nxv2i64_nxv2i64_nxv2i64:
@ -754,7 +788,7 @@ entry:
<vscale x 2 x i64> %0,
<vscale x 2 x i64> %1,
<vscale x 2 x i64> %2,
i32 %3)
i32 %3, i32 0)
ret <vscale x 2 x i64> %a
}
@ -787,7 +821,9 @@ declare <vscale x 4 x i64> @llvm.riscv.vmacc.nxv4i64.nxv4i64(
<vscale x 4 x i64>,
<vscale x 4 x i64>,
<vscale x 4 x i64>,
i32);
i32,
i32
);
define <vscale x 4 x i64> @intrinsic_vmacc_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vmacc_vv_nxv4i64_nxv4i64_nxv4i64:
@ -800,7 +836,7 @@ entry:
<vscale x 4 x i64> %0,
<vscale x 4 x i64> %1,
<vscale x 4 x i64> %2,
i32 %3)
i32 %3, i32 0)
ret <vscale x 4 x i64> %a
}
@ -833,7 +869,9 @@ declare <vscale x 1 x i8> @llvm.riscv.vmacc.nxv1i8.i8(
<vscale x 1 x i8>,
i8,
<vscale x 1 x i8>,
i32);
i32,
i32
);
define <vscale x 1 x i8> @intrinsic_vmacc_vx_nxv1i8_i8_nxv1i8(<vscale x 1 x i8> %0, i8 %1, <vscale x 1 x i8> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vmacc_vx_nxv1i8_i8_nxv1i8:
@ -846,7 +884,7 @@ entry:
<vscale x 1 x i8> %0,
i8 %1,
<vscale x 1 x i8> %2,
i32 %3)
i32 %3, i32 0)
ret <vscale x 1 x i8> %a
}
@ -879,7 +917,9 @@ declare <vscale x 2 x i8> @llvm.riscv.vmacc.nxv2i8.i8(
<vscale x 2 x i8>,
i8,
<vscale x 2 x i8>,
i32);
i32,
i32
);
define <vscale x 2 x i8> @intrinsic_vmacc_vx_nxv2i8_i8_nxv2i8(<vscale x 2 x i8> %0, i8 %1, <vscale x 2 x i8> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vmacc_vx_nxv2i8_i8_nxv2i8:
@ -892,7 +932,7 @@ entry:
<vscale x 2 x i8> %0,
i8 %1,
<vscale x 2 x i8> %2,
i32 %3)
i32 %3, i32 0)
ret <vscale x 2 x i8> %a
}
@ -925,7 +965,9 @@ declare <vscale x 4 x i8> @llvm.riscv.vmacc.nxv4i8.i8(
<vscale x 4 x i8>,
i8,
<vscale x 4 x i8>,
i32);
i32,
i32
);
define <vscale x 4 x i8> @intrinsic_vmacc_vx_nxv4i8_i8_nxv4i8(<vscale x 4 x i8> %0, i8 %1, <vscale x 4 x i8> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vmacc_vx_nxv4i8_i8_nxv4i8:
@ -938,7 +980,7 @@ entry:
<vscale x 4 x i8> %0,
i8 %1,
<vscale x 4 x i8> %2,
i32 %3)
i32 %3, i32 0)
ret <vscale x 4 x i8> %a
}
@ -971,7 +1013,9 @@ declare <vscale x 8 x i8> @llvm.riscv.vmacc.nxv8i8.i8(
<vscale x 8 x i8>,
i8,
<vscale x 8 x i8>,
i32);
i32,
i32
);
define <vscale x 8 x i8> @intrinsic_vmacc_vx_nxv8i8_i8_nxv8i8(<vscale x 8 x i8> %0, i8 %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vmacc_vx_nxv8i8_i8_nxv8i8:
@ -984,7 +1028,7 @@ entry:
<vscale x 8 x i8> %0,
i8 %1,
<vscale x 8 x i8> %2,
i32 %3)
i32 %3, i32 0)
ret <vscale x 8 x i8> %a
}
@ -1017,7 +1061,9 @@ declare <vscale x 16 x i8> @llvm.riscv.vmacc.nxv16i8.i8(
<vscale x 16 x i8>,
i8,
<vscale x 16 x i8>,
i32);
i32,
i32
);
define <vscale x 16 x i8> @intrinsic_vmacc_vx_nxv16i8_i8_nxv16i8(<vscale x 16 x i8> %0, i8 %1, <vscale x 16 x i8> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vmacc_vx_nxv16i8_i8_nxv16i8:
@ -1030,7 +1076,7 @@ entry:
<vscale x 16 x i8> %0,
i8 %1,
<vscale x 16 x i8> %2,
i32 %3)
i32 %3, i32 0)
ret <vscale x 16 x i8> %a
}
@ -1063,7 +1109,9 @@ declare <vscale x 32 x i8> @llvm.riscv.vmacc.nxv32i8.i8(
<vscale x 32 x i8>,
i8,
<vscale x 32 x i8>,
i32);
i32,
i32
);
define <vscale x 32 x i8> @intrinsic_vmacc_vx_nxv32i8_i8_nxv32i8(<vscale x 32 x i8> %0, i8 %1, <vscale x 32 x i8> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vmacc_vx_nxv32i8_i8_nxv32i8:
@ -1076,7 +1124,7 @@ entry:
<vscale x 32 x i8> %0,
i8 %1,
<vscale x 32 x i8> %2,
i32 %3)
i32 %3, i32 0)
ret <vscale x 32 x i8> %a
}
@ -1109,7 +1157,9 @@ declare <vscale x 1 x i16> @llvm.riscv.vmacc.nxv1i16.i16(
<vscale x 1 x i16>,
i16,
<vscale x 1 x i16>,
i32);
i32,
i32
);
define <vscale x 1 x i16> @intrinsic_vmacc_vx_nxv1i16_i16_nxv1i16(<vscale x 1 x i16> %0, i16 %1, <vscale x 1 x i16> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vmacc_vx_nxv1i16_i16_nxv1i16:
@ -1122,7 +1172,7 @@ entry:
<vscale x 1 x i16> %0,
i16 %1,
<vscale x 1 x i16> %2,
i32 %3)
i32 %3, i32 0)
ret <vscale x 1 x i16> %a
}
@ -1155,7 +1205,9 @@ declare <vscale x 2 x i16> @llvm.riscv.vmacc.nxv2i16.i16(
<vscale x 2 x i16>,
i16,
<vscale x 2 x i16>,
i32);
i32,
i32
);
define <vscale x 2 x i16> @intrinsic_vmacc_vx_nxv2i16_i16_nxv2i16(<vscale x 2 x i16> %0, i16 %1, <vscale x 2 x i16> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vmacc_vx_nxv2i16_i16_nxv2i16:
@ -1168,7 +1220,7 @@ entry:
<vscale x 2 x i16> %0,
i16 %1,
<vscale x 2 x i16> %2,
i32 %3)
i32 %3, i32 0)
ret <vscale x 2 x i16> %a
}
@ -1201,7 +1253,9 @@ declare <vscale x 4 x i16> @llvm.riscv.vmacc.nxv4i16.i16(
<vscale x 4 x i16>,
i16,
<vscale x 4 x i16>,
i32);
i32,
i32
);
define <vscale x 4 x i16> @intrinsic_vmacc_vx_nxv4i16_i16_nxv4i16(<vscale x 4 x i16> %0, i16 %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vmacc_vx_nxv4i16_i16_nxv4i16:
@ -1214,7 +1268,7 @@ entry:
<vscale x 4 x i16> %0,
i16 %1,
<vscale x 4 x i16> %2,
i32 %3)
i32 %3, i32 0)
ret <vscale x 4 x i16> %a
}
@ -1247,7 +1301,9 @@ declare <vscale x 8 x i16> @llvm.riscv.vmacc.nxv8i16.i16(
<vscale x 8 x i16>,
i16,
<vscale x 8 x i16>,
i32);
i32,
i32
);
define <vscale x 8 x i16> @intrinsic_vmacc_vx_nxv8i16_i16_nxv8i16(<vscale x 8 x i16> %0, i16 %1, <vscale x 8 x i16> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vmacc_vx_nxv8i16_i16_nxv8i16:
@ -1260,7 +1316,7 @@ entry:
<vscale x 8 x i16> %0,
i16 %1,
<vscale x 8 x i16> %2,
i32 %3)
i32 %3, i32 0)
ret <vscale x 8 x i16> %a
}
@ -1293,7 +1349,9 @@ declare <vscale x 16 x i16> @llvm.riscv.vmacc.nxv16i16.i16(
<vscale x 16 x i16>,
i16,
<vscale x 16 x i16>,
i32);
i32,
i32
);
define <vscale x 16 x i16> @intrinsic_vmacc_vx_nxv16i16_i16_nxv16i16(<vscale x 16 x i16> %0, i16 %1, <vscale x 16 x i16> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vmacc_vx_nxv16i16_i16_nxv16i16:
@ -1306,7 +1364,7 @@ entry:
<vscale x 16 x i16> %0,
i16 %1,
<vscale x 16 x i16> %2,
i32 %3)
i32 %3, i32 0)
ret <vscale x 16 x i16> %a
}
@ -1339,7 +1397,9 @@ declare <vscale x 1 x i32> @llvm.riscv.vmacc.nxv1i32.i32(
<vscale x 1 x i32>,
i32,
<vscale x 1 x i32>,
i32);
i32,
i32
);
define <vscale x 1 x i32> @intrinsic_vmacc_vx_nxv1i32_i32_nxv1i32(<vscale x 1 x i32> %0, i32 %1, <vscale x 1 x i32> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vmacc_vx_nxv1i32_i32_nxv1i32:
@ -1352,7 +1412,7 @@ entry:
<vscale x 1 x i32> %0,
i32 %1,
<vscale x 1 x i32> %2,
i32 %3)
i32 %3, i32 0)
ret <vscale x 1 x i32> %a
}
@ -1385,7 +1445,9 @@ declare <vscale x 2 x i32> @llvm.riscv.vmacc.nxv2i32.i32(
<vscale x 2 x i32>,
i32,
<vscale x 2 x i32>,
i32);
i32,
i32
);
define <vscale x 2 x i32> @intrinsic_vmacc_vx_nxv2i32_i32_nxv2i32(<vscale x 2 x i32> %0, i32 %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vmacc_vx_nxv2i32_i32_nxv2i32:
@ -1398,7 +1460,7 @@ entry:
<vscale x 2 x i32> %0,
i32 %1,
<vscale x 2 x i32> %2,
i32 %3)
i32 %3, i32 0)
ret <vscale x 2 x i32> %a
}
@ -1431,7 +1493,9 @@ declare <vscale x 4 x i32> @llvm.riscv.vmacc.nxv4i32.i32(
<vscale x 4 x i32>,
i32,
<vscale x 4 x i32>,
i32);
i32,
i32
);
define <vscale x 4 x i32> @intrinsic_vmacc_vx_nxv4i32_i32_nxv4i32(<vscale x 4 x i32> %0, i32 %1, <vscale x 4 x i32> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vmacc_vx_nxv4i32_i32_nxv4i32:
@ -1444,7 +1508,7 @@ entry:
<vscale x 4 x i32> %0,
i32 %1,
<vscale x 4 x i32> %2,
i32 %3)
i32 %3, i32 0)
ret <vscale x 4 x i32> %a
}
@ -1477,7 +1541,9 @@ declare <vscale x 8 x i32> @llvm.riscv.vmacc.nxv8i32.i32(
<vscale x 8 x i32>,
i32,
<vscale x 8 x i32>,
i32);
i32,
i32
);
define <vscale x 8 x i32> @intrinsic_vmacc_vx_nxv8i32_i32_nxv8i32(<vscale x 8 x i32> %0, i32 %1, <vscale x 8 x i32> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vmacc_vx_nxv8i32_i32_nxv8i32:
@ -1490,7 +1556,7 @@ entry:
<vscale x 8 x i32> %0,
i32 %1,
<vscale x 8 x i32> %2,
i32 %3)
i32 %3, i32 0)
ret <vscale x 8 x i32> %a
}
@ -1523,7 +1589,9 @@ declare <vscale x 1 x i64> @llvm.riscv.vmacc.nxv1i64.i64(
<vscale x 1 x i64>,
i64,
<vscale x 1 x i64>,
i32);
i32,
i32
);
define <vscale x 1 x i64> @intrinsic_vmacc_vx_nxv1i64_i64_nxv1i64(<vscale x 1 x i64> %0, i64 %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vmacc_vx_nxv1i64_i64_nxv1i64:
@ -1543,7 +1611,7 @@ entry:
<vscale x 1 x i64> %0,
i64 %1,
<vscale x 1 x i64> %2,
i32 %3)
i32 %3, i32 0)
ret <vscale x 1 x i64> %a
}
@ -1583,7 +1651,9 @@ declare <vscale x 2 x i64> @llvm.riscv.vmacc.nxv2i64.i64(
<vscale x 2 x i64>,
i64,
<vscale x 2 x i64>,
i32);
i32,
i32
);
define <vscale x 2 x i64> @intrinsic_vmacc_vx_nxv2i64_i64_nxv2i64(<vscale x 2 x i64> %0, i64 %1, <vscale x 2 x i64> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vmacc_vx_nxv2i64_i64_nxv2i64:
@ -1603,7 +1673,7 @@ entry:
<vscale x 2 x i64> %0,
i64 %1,
<vscale x 2 x i64> %2,
i32 %3)
i32 %3, i32 0)
ret <vscale x 2 x i64> %a
}
@ -1643,7 +1713,9 @@ declare <vscale x 4 x i64> @llvm.riscv.vmacc.nxv4i64.i64(
<vscale x 4 x i64>,
i64,
<vscale x 4 x i64>,
i32);
i32,
i32
);
define <vscale x 4 x i64> @intrinsic_vmacc_vx_nxv4i64_i64_nxv4i64(<vscale x 4 x i64> %0, i64 %1, <vscale x 4 x i64> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vmacc_vx_nxv4i64_i64_nxv4i64:
@ -1663,7 +1735,7 @@ entry:
<vscale x 4 x i64> %0,
i64 %1,
<vscale x 4 x i64> %2,
i32 %3)
i32 %3, i32 0)
ret <vscale x 4 x i64> %a
}

View File

@ -5,6 +5,7 @@ declare <vscale x 1 x i8> @llvm.riscv.vmacc.nxv1i8.nxv1i8(
<vscale x 1 x i8>,
<vscale x 1 x i8>,
<vscale x 1 x i8>,
i64,
i64);
define <vscale x 1 x i8> @intrinsic_vmacc_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, i64 %3) nounwind {
@ -18,7 +19,7 @@ entry:
<vscale x 1 x i8> %0,
<vscale x 1 x i8> %1,
<vscale x 1 x i8> %2,
i64 %3)
i64 %3, i64 0)
ret <vscale x 1 x i8> %a
}
@ -51,6 +52,7 @@ declare <vscale x 2 x i8> @llvm.riscv.vmacc.nxv2i8.nxv2i8(
<vscale x 2 x i8>,
<vscale x 2 x i8>,
<vscale x 2 x i8>,
i64,
i64);
define <vscale x 2 x i8> @intrinsic_vmacc_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, i64 %3) nounwind {
@ -64,7 +66,7 @@ entry:
<vscale x 2 x i8> %0,
<vscale x 2 x i8> %1,
<vscale x 2 x i8> %2,
i64 %3)
i64 %3, i64 0)
ret <vscale x 2 x i8> %a
}
@ -97,6 +99,7 @@ declare <vscale x 4 x i8> @llvm.riscv.vmacc.nxv4i8.nxv4i8(
<vscale x 4 x i8>,
<vscale x 4 x i8>,
<vscale x 4 x i8>,
i64,
i64);
define <vscale x 4 x i8> @intrinsic_vmacc_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, i64 %3) nounwind {
@ -110,7 +113,7 @@ entry:
<vscale x 4 x i8> %0,
<vscale x 4 x i8> %1,
<vscale x 4 x i8> %2,
i64 %3)
i64 %3, i64 0)
ret <vscale x 4 x i8> %a
}
@ -143,6 +146,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vmacc.nxv8i8.nxv8i8(
<vscale x 8 x i8>,
<vscale x 8 x i8>,
<vscale x 8 x i8>,
i64,
i64);
define <vscale x 8 x i8> @intrinsic_vmacc_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
@ -156,7 +160,7 @@ entry:
<vscale x 8 x i8> %0,
<vscale x 8 x i8> %1,
<vscale x 8 x i8> %2,
i64 %3)
i64 %3, i64 0)
ret <vscale x 8 x i8> %a
}
@ -189,6 +193,7 @@ declare <vscale x 16 x i8> @llvm.riscv.vmacc.nxv16i8.nxv16i8(
<vscale x 16 x i8>,
<vscale x 16 x i8>,
<vscale x 16 x i8>,
i64,
i64);
define <vscale x 16 x i8> @intrinsic_vmacc_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, i64 %3) nounwind {
@ -202,7 +207,7 @@ entry:
<vscale x 16 x i8> %0,
<vscale x 16 x i8> %1,
<vscale x 16 x i8> %2,
i64 %3)
i64 %3, i64 0)
ret <vscale x 16 x i8> %a
}
@ -235,6 +240,7 @@ declare <vscale x 32 x i8> @llvm.riscv.vmacc.nxv32i8.nxv32i8(
<vscale x 32 x i8>,
<vscale x 32 x i8>,
<vscale x 32 x i8>,
i64,
i64);
define <vscale x 32 x i8> @intrinsic_vmacc_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, i64 %3) nounwind {
@ -248,7 +254,7 @@ entry:
<vscale x 32 x i8> %0,
<vscale x 32 x i8> %1,
<vscale x 32 x i8> %2,
i64 %3)
i64 %3, i64 0)
ret <vscale x 32 x i8> %a
}
@ -281,6 +287,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vmacc.nxv1i16.nxv1i16(
<vscale x 1 x i16>,
<vscale x 1 x i16>,
<vscale x 1 x i16>,
i64,
i64);
define <vscale x 1 x i16> @intrinsic_vmacc_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, i64 %3) nounwind {
@ -294,7 +301,7 @@ entry:
<vscale x 1 x i16> %0,
<vscale x 1 x i16> %1,
<vscale x 1 x i16> %2,
i64 %3)
i64 %3, i64 0)
ret <vscale x 1 x i16> %a
}
@ -327,6 +334,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vmacc.nxv2i16.nxv2i16(
<vscale x 2 x i16>,
<vscale x 2 x i16>,
<vscale x 2 x i16>,
i64,
i64);
define <vscale x 2 x i16> @intrinsic_vmacc_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, i64 %3) nounwind {
@ -340,7 +348,7 @@ entry:
<vscale x 2 x i16> %0,
<vscale x 2 x i16> %1,
<vscale x 2 x i16> %2,
i64 %3)
i64 %3, i64 0)
ret <vscale x 2 x i16> %a
}
@ -373,6 +381,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vmacc.nxv4i16.nxv4i16(
<vscale x 4 x i16>,
<vscale x 4 x i16>,
<vscale x 4 x i16>,
i64,
i64);
define <vscale x 4 x i16> @intrinsic_vmacc_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
@ -386,7 +395,7 @@ entry:
<vscale x 4 x i16> %0,
<vscale x 4 x i16> %1,
<vscale x 4 x i16> %2,
i64 %3)
i64 %3, i64 0)
ret <vscale x 4 x i16> %a
}
@ -419,6 +428,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vmacc.nxv8i16.nxv8i16(
<vscale x 8 x i16>,
<vscale x 8 x i16>,
<vscale x 8 x i16>,
i64,
i64);
define <vscale x 8 x i16> @intrinsic_vmacc_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, i64 %3) nounwind {
@ -432,7 +442,7 @@ entry:
<vscale x 8 x i16> %0,
<vscale x 8 x i16> %1,
<vscale x 8 x i16> %2,
i64 %3)
i64 %3, i64 0)
ret <vscale x 8 x i16> %a
}
@ -465,6 +475,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vmacc.nxv16i16.nxv16i16(
<vscale x 16 x i16>,
<vscale x 16 x i16>,
<vscale x 16 x i16>,
i64,
i64);
define <vscale x 16 x i16> @intrinsic_vmacc_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, i64 %3) nounwind {
@ -478,7 +489,7 @@ entry:
<vscale x 16 x i16> %0,
<vscale x 16 x i16> %1,
<vscale x 16 x i16> %2,
i64 %3)
i64 %3, i64 0)
ret <vscale x 16 x i16> %a
}
@ -511,6 +522,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vmacc.nxv1i32.nxv1i32(
<vscale x 1 x i32>,
<vscale x 1 x i32>,
<vscale x 1 x i32>,
i64,
i64);
define <vscale x 1 x i32> @intrinsic_vmacc_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, i64 %3) nounwind {
@ -524,7 +536,7 @@ entry:
<vscale x 1 x i32> %0,
<vscale x 1 x i32> %1,
<vscale x 1 x i32> %2,
i64 %3)
i64 %3, i64 0)
ret <vscale x 1 x i32> %a
}
@ -557,6 +569,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vmacc.nxv2i32.nxv2i32(
<vscale x 2 x i32>,
<vscale x 2 x i32>,
<vscale x 2 x i32>,
i64,
i64);
define <vscale x 2 x i32> @intrinsic_vmacc_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
@ -570,7 +583,7 @@ entry:
<vscale x 2 x i32> %0,
<vscale x 2 x i32> %1,
<vscale x 2 x i32> %2,
i64 %3)
i64 %3, i64 0)
ret <vscale x 2 x i32> %a
}
@ -603,6 +616,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vmacc.nxv4i32.nxv4i32(
<vscale x 4 x i32>,
<vscale x 4 x i32>,
<vscale x 4 x i32>,
i64,
i64);
define <vscale x 4 x i32> @intrinsic_vmacc_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, i64 %3) nounwind {
@ -616,7 +630,7 @@ entry:
<vscale x 4 x i32> %0,
<vscale x 4 x i32> %1,
<vscale x 4 x i32> %2,
i64 %3)
i64 %3, i64 0)
ret <vscale x 4 x i32> %a
}
@ -649,6 +663,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vmacc.nxv8i32.nxv8i32(
<vscale x 8 x i32>,
<vscale x 8 x i32>,
<vscale x 8 x i32>,
i64,
i64);
define <vscale x 8 x i32> @intrinsic_vmacc_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, i64 %3) nounwind {
@ -662,7 +677,7 @@ entry:
<vscale x 8 x i32> %0,
<vscale x 8 x i32> %1,
<vscale x 8 x i32> %2,
i64 %3)
i64 %3, i64 0)
ret <vscale x 8 x i32> %a
}
@ -695,6 +710,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vmacc.nxv1i64.nxv1i64(
<vscale x 1 x i64>,
<vscale x 1 x i64>,
<vscale x 1 x i64>,
i64,
i64);
define <vscale x 1 x i64> @intrinsic_vmacc_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
@ -708,7 +724,7 @@ entry:
<vscale x 1 x i64> %0,
<vscale x 1 x i64> %1,
<vscale x 1 x i64> %2,
i64 %3)
i64 %3, i64 0)
ret <vscale x 1 x i64> %a
}
@ -741,6 +757,7 @@ declare <vscale x 2 x i64> @llvm.riscv.vmacc.nxv2i64.nxv2i64(
<vscale x 2 x i64>,
<vscale x 2 x i64>,
<vscale x 2 x i64>,
i64,
i64);
define <vscale x 2 x i64> @intrinsic_vmacc_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, i64 %3) nounwind {
@ -754,7 +771,7 @@ entry:
<vscale x 2 x i64> %0,
<vscale x 2 x i64> %1,
<vscale x 2 x i64> %2,
i64 %3)
i64 %3, i64 0)
ret <vscale x 2 x i64> %a
}
@ -787,6 +804,7 @@ declare <vscale x 4 x i64> @llvm.riscv.vmacc.nxv4i64.nxv4i64(
<vscale x 4 x i64>,
<vscale x 4 x i64>,
<vscale x 4 x i64>,
i64,
i64);
define <vscale x 4 x i64> @intrinsic_vmacc_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, i64 %3) nounwind {
@ -800,7 +818,7 @@ entry:
<vscale x 4 x i64> %0,
<vscale x 4 x i64> %1,
<vscale x 4 x i64> %2,
i64 %3)
i64 %3, i64 0)
ret <vscale x 4 x i64> %a
}
@ -833,6 +851,7 @@ declare <vscale x 1 x i8> @llvm.riscv.vmacc.nxv1i8.i8(
<vscale x 1 x i8>,
i8,
<vscale x 1 x i8>,
i64,
i64);
define <vscale x 1 x i8> @intrinsic_vmacc_vx_nxv1i8_i8_nxv1i8(<vscale x 1 x i8> %0, i8 %1, <vscale x 1 x i8> %2, i64 %3) nounwind {
@ -846,7 +865,7 @@ entry:
<vscale x 1 x i8> %0,
i8 %1,
<vscale x 1 x i8> %2,
i64 %3)
i64 %3, i64 0)
ret <vscale x 1 x i8> %a
}
@ -879,6 +898,7 @@ declare <vscale x 2 x i8> @llvm.riscv.vmacc.nxv2i8.i8(
<vscale x 2 x i8>,
i8,
<vscale x 2 x i8>,
i64,
i64);
define <vscale x 2 x i8> @intrinsic_vmacc_vx_nxv2i8_i8_nxv2i8(<vscale x 2 x i8> %0, i8 %1, <vscale x 2 x i8> %2, i64 %3) nounwind {
@ -892,7 +912,7 @@ entry:
<vscale x 2 x i8> %0,
i8 %1,
<vscale x 2 x i8> %2,
i64 %3)
i64 %3, i64 0)
ret <vscale x 2 x i8> %a
}
@ -925,6 +945,7 @@ declare <vscale x 4 x i8> @llvm.riscv.vmacc.nxv4i8.i8(
<vscale x 4 x i8>,
i8,
<vscale x 4 x i8>,
i64,
i64);
define <vscale x 4 x i8> @intrinsic_vmacc_vx_nxv4i8_i8_nxv4i8(<vscale x 4 x i8> %0, i8 %1, <vscale x 4 x i8> %2, i64 %3) nounwind {
@ -938,7 +959,7 @@ entry:
<vscale x 4 x i8> %0,
i8 %1,
<vscale x 4 x i8> %2,
i64 %3)
i64 %3, i64 0)
ret <vscale x 4 x i8> %a
}
@ -971,6 +992,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vmacc.nxv8i8.i8(
<vscale x 8 x i8>,
i8,
<vscale x 8 x i8>,
i64,
i64);
define <vscale x 8 x i8> @intrinsic_vmacc_vx_nxv8i8_i8_nxv8i8(<vscale x 8 x i8> %0, i8 %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
@ -984,7 +1006,7 @@ entry:
<vscale x 8 x i8> %0,
i8 %1,
<vscale x 8 x i8> %2,
i64 %3)
i64 %3, i64 0)
ret <vscale x 8 x i8> %a
}
@ -1017,6 +1039,7 @@ declare <vscale x 16 x i8> @llvm.riscv.vmacc.nxv16i8.i8(
<vscale x 16 x i8>,
i8,
<vscale x 16 x i8>,
i64,
i64);
define <vscale x 16 x i8> @intrinsic_vmacc_vx_nxv16i8_i8_nxv16i8(<vscale x 16 x i8> %0, i8 %1, <vscale x 16 x i8> %2, i64 %3) nounwind {
@ -1030,7 +1053,7 @@ entry:
<vscale x 16 x i8> %0,
i8 %1,
<vscale x 16 x i8> %2,
i64 %3)
i64 %3, i64 0)
ret <vscale x 16 x i8> %a
}
@ -1063,6 +1086,7 @@ declare <vscale x 32 x i8> @llvm.riscv.vmacc.nxv32i8.i8(
<vscale x 32 x i8>,
i8,
<vscale x 32 x i8>,
i64,
i64);
define <vscale x 32 x i8> @intrinsic_vmacc_vx_nxv32i8_i8_nxv32i8(<vscale x 32 x i8> %0, i8 %1, <vscale x 32 x i8> %2, i64 %3) nounwind {
@ -1076,7 +1100,7 @@ entry:
<vscale x 32 x i8> %0,
i8 %1,
<vscale x 32 x i8> %2,
i64 %3)
i64 %3, i64 0)
ret <vscale x 32 x i8> %a
}
@ -1109,6 +1133,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vmacc.nxv1i16.i16(
<vscale x 1 x i16>,
i16,
<vscale x 1 x i16>,
i64,
i64);
define <vscale x 1 x i16> @intrinsic_vmacc_vx_nxv1i16_i16_nxv1i16(<vscale x 1 x i16> %0, i16 %1, <vscale x 1 x i16> %2, i64 %3) nounwind {
@ -1122,7 +1147,7 @@ entry:
<vscale x 1 x i16> %0,
i16 %1,
<vscale x 1 x i16> %2,
i64 %3)
i64 %3, i64 0)
ret <vscale x 1 x i16> %a
}
@ -1155,6 +1180,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vmacc.nxv2i16.i16(
<vscale x 2 x i16>,
i16,
<vscale x 2 x i16>,
i64,
i64);
define <vscale x 2 x i16> @intrinsic_vmacc_vx_nxv2i16_i16_nxv2i16(<vscale x 2 x i16> %0, i16 %1, <vscale x 2 x i16> %2, i64 %3) nounwind {
@ -1168,7 +1194,7 @@ entry:
<vscale x 2 x i16> %0,
i16 %1,
<vscale x 2 x i16> %2,
i64 %3)
i64 %3, i64 0)
ret <vscale x 2 x i16> %a
}
@ -1201,6 +1227,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vmacc.nxv4i16.i16(
<vscale x 4 x i16>,
i16,
<vscale x 4 x i16>,
i64,
i64);
define <vscale x 4 x i16> @intrinsic_vmacc_vx_nxv4i16_i16_nxv4i16(<vscale x 4 x i16> %0, i16 %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
@ -1214,7 +1241,7 @@ entry:
<vscale x 4 x i16> %0,
i16 %1,
<vscale x 4 x i16> %2,
i64 %3)
i64 %3, i64 0)
ret <vscale x 4 x i16> %a
}
@ -1247,6 +1274,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vmacc.nxv8i16.i16(
<vscale x 8 x i16>,
i16,
<vscale x 8 x i16>,
i64,
i64);
define <vscale x 8 x i16> @intrinsic_vmacc_vx_nxv8i16_i16_nxv8i16(<vscale x 8 x i16> %0, i16 %1, <vscale x 8 x i16> %2, i64 %3) nounwind {
@ -1260,7 +1288,7 @@ entry:
<vscale x 8 x i16> %0,
i16 %1,
<vscale x 8 x i16> %2,
i64 %3)
i64 %3, i64 0)
ret <vscale x 8 x i16> %a
}
@ -1293,6 +1321,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vmacc.nxv16i16.i16(
<vscale x 16 x i16>,
i16,
<vscale x 16 x i16>,
i64,
i64);
define <vscale x 16 x i16> @intrinsic_vmacc_vx_nxv16i16_i16_nxv16i16(<vscale x 16 x i16> %0, i16 %1, <vscale x 16 x i16> %2, i64 %3) nounwind {
@ -1306,7 +1335,7 @@ entry:
<vscale x 16 x i16> %0,
i16 %1,
<vscale x 16 x i16> %2,
i64 %3)
i64 %3, i64 0)
ret <vscale x 16 x i16> %a
}
@ -1339,6 +1368,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vmacc.nxv1i32.i32(
<vscale x 1 x i32>,
i32,
<vscale x 1 x i32>,
i64,
i64);
define <vscale x 1 x i32> @intrinsic_vmacc_vx_nxv1i32_i32_nxv1i32(<vscale x 1 x i32> %0, i32 %1, <vscale x 1 x i32> %2, i64 %3) nounwind {
@ -1352,7 +1382,7 @@ entry:
<vscale x 1 x i32> %0,
i32 %1,
<vscale x 1 x i32> %2,
i64 %3)
i64 %3, i64 0)
ret <vscale x 1 x i32> %a
}
@ -1385,6 +1415,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vmacc.nxv2i32.i32(
<vscale x 2 x i32>,
i32,
<vscale x 2 x i32>,
i64,
i64);
define <vscale x 2 x i32> @intrinsic_vmacc_vx_nxv2i32_i32_nxv2i32(<vscale x 2 x i32> %0, i32 %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
@ -1398,7 +1429,7 @@ entry:
<vscale x 2 x i32> %0,
i32 %1,
<vscale x 2 x i32> %2,
i64 %3)
i64 %3, i64 0)
ret <vscale x 2 x i32> %a
}
@ -1431,6 +1462,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vmacc.nxv4i32.i32(
<vscale x 4 x i32>,
i32,
<vscale x 4 x i32>,
i64,
i64);
define <vscale x 4 x i32> @intrinsic_vmacc_vx_nxv4i32_i32_nxv4i32(<vscale x 4 x i32> %0, i32 %1, <vscale x 4 x i32> %2, i64 %3) nounwind {
@ -1444,7 +1476,7 @@ entry:
<vscale x 4 x i32> %0,
i32 %1,
<vscale x 4 x i32> %2,
i64 %3)
i64 %3, i64 0)
ret <vscale x 4 x i32> %a
}
@ -1477,6 +1509,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vmacc.nxv8i32.i32(
<vscale x 8 x i32>,
i32,
<vscale x 8 x i32>,
i64,
i64);
define <vscale x 8 x i32> @intrinsic_vmacc_vx_nxv8i32_i32_nxv8i32(<vscale x 8 x i32> %0, i32 %1, <vscale x 8 x i32> %2, i64 %3) nounwind {
@ -1490,7 +1523,7 @@ entry:
<vscale x 8 x i32> %0,
i32 %1,
<vscale x 8 x i32> %2,
i64 %3)
i64 %3, i64 0)
ret <vscale x 8 x i32> %a
}
@ -1523,6 +1556,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vmacc.nxv1i64.i64(
<vscale x 1 x i64>,
i64,
<vscale x 1 x i64>,
i64,
i64);
define <vscale x 1 x i64> @intrinsic_vmacc_vx_nxv1i64_i64_nxv1i64(<vscale x 1 x i64> %0, i64 %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
@ -1536,7 +1570,7 @@ entry:
<vscale x 1 x i64> %0,
i64 %1,
<vscale x 1 x i64> %2,
i64 %3)
i64 %3, i64 0)
ret <vscale x 1 x i64> %a
}
@ -1569,6 +1603,7 @@ declare <vscale x 2 x i64> @llvm.riscv.vmacc.nxv2i64.i64(
<vscale x 2 x i64>,
i64,
<vscale x 2 x i64>,
i64,
i64);
define <vscale x 2 x i64> @intrinsic_vmacc_vx_nxv2i64_i64_nxv2i64(<vscale x 2 x i64> %0, i64 %1, <vscale x 2 x i64> %2, i64 %3) nounwind {
@ -1582,7 +1617,7 @@ entry:
<vscale x 2 x i64> %0,
i64 %1,
<vscale x 2 x i64> %2,
i64 %3)
i64 %3, i64 0)
ret <vscale x 2 x i64> %a
}
@ -1615,6 +1650,7 @@ declare <vscale x 4 x i64> @llvm.riscv.vmacc.nxv4i64.i64(
<vscale x 4 x i64>,
i64,
<vscale x 4 x i64>,
i64,
i64);
define <vscale x 4 x i64> @intrinsic_vmacc_vx_nxv4i64_i64_nxv4i64(<vscale x 4 x i64> %0, i64 %1, <vscale x 4 x i64> %2, i64 %3) nounwind {
@ -1628,7 +1664,7 @@ entry:
<vscale x 4 x i64> %0,
i64 %1,
<vscale x 4 x i64> %2,
i64 %3)
i64 %3, i64 0)
ret <vscale x 4 x i64> %a
}

View File

@ -5,6 +5,7 @@ declare <vscale x 1 x i8> @llvm.riscv.vmadd.nxv1i8.nxv1i8(
<vscale x 1 x i8>,
<vscale x 1 x i8>,
<vscale x 1 x i8>,
i32,
i32);
define <vscale x 1 x i8> @intrinsic_vmadd_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, i32 %3) nounwind {
@ -18,7 +19,7 @@ entry:
<vscale x 1 x i8> %0,
<vscale x 1 x i8> %1,
<vscale x 1 x i8> %2,
i32 %3)
i32 %3, i32 0)
ret <vscale x 1 x i8> %a
}
@ -51,6 +52,7 @@ declare <vscale x 2 x i8> @llvm.riscv.vmadd.nxv2i8.nxv2i8(
<vscale x 2 x i8>,
<vscale x 2 x i8>,
<vscale x 2 x i8>,
i32,
i32);
define <vscale x 2 x i8> @intrinsic_vmadd_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, i32 %3) nounwind {
@ -64,7 +66,7 @@ entry:
<vscale x 2 x i8> %0,
<vscale x 2 x i8> %1,
<vscale x 2 x i8> %2,
i32 %3)
i32 %3, i32 0)
ret <vscale x 2 x i8> %a
}
@ -97,6 +99,7 @@ declare <vscale x 4 x i8> @llvm.riscv.vmadd.nxv4i8.nxv4i8(
<vscale x 4 x i8>,
<vscale x 4 x i8>,
<vscale x 4 x i8>,
i32,
i32);
define <vscale x 4 x i8> @intrinsic_vmadd_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, i32 %3) nounwind {
@ -110,7 +113,7 @@ entry:
<vscale x 4 x i8> %0,
<vscale x 4 x i8> %1,
<vscale x 4 x i8> %2,
i32 %3)
i32 %3, i32 0)
ret <vscale x 4 x i8> %a
}
@ -143,6 +146,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vmadd.nxv8i8.nxv8i8(
<vscale x 8 x i8>,
<vscale x 8 x i8>,
<vscale x 8 x i8>,
i32,
i32);
define <vscale x 8 x i8> @intrinsic_vmadd_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
@ -156,7 +160,7 @@ entry:
<vscale x 8 x i8> %0,
<vscale x 8 x i8> %1,
<vscale x 8 x i8> %2,
i32 %3)
i32 %3, i32 0)
ret <vscale x 8 x i8> %a
}
@ -189,6 +193,7 @@ declare <vscale x 16 x i8> @llvm.riscv.vmadd.nxv16i8.nxv16i8(
<vscale x 16 x i8>,
<vscale x 16 x i8>,
<vscale x 16 x i8>,
i32,
i32);
define <vscale x 16 x i8> @intrinsic_vmadd_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, i32 %3) nounwind {
@ -202,7 +207,7 @@ entry:
<vscale x 16 x i8> %0,
<vscale x 16 x i8> %1,
<vscale x 16 x i8> %2,
i32 %3)
i32 %3, i32 0)
ret <vscale x 16 x i8> %a
}
@ -235,6 +240,7 @@ declare <vscale x 32 x i8> @llvm.riscv.vmadd.nxv32i8.nxv32i8(
<vscale x 32 x i8>,
<vscale x 32 x i8>,
<vscale x 32 x i8>,
i32,
i32);
define <vscale x 32 x i8> @intrinsic_vmadd_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, i32 %3) nounwind {
@ -248,7 +254,7 @@ entry:
<vscale x 32 x i8> %0,
<vscale x 32 x i8> %1,
<vscale x 32 x i8> %2,
i32 %3)
i32 %3, i32 0)
ret <vscale x 32 x i8> %a
}
@ -281,6 +287,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vmadd.nxv1i16.nxv1i16(
<vscale x 1 x i16>,
<vscale x 1 x i16>,
<vscale x 1 x i16>,
i32,
i32);
define <vscale x 1 x i16> @intrinsic_vmadd_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, i32 %3) nounwind {
@ -294,7 +301,7 @@ entry:
<vscale x 1 x i16> %0,
<vscale x 1 x i16> %1,
<vscale x 1 x i16> %2,
i32 %3)
i32 %3, i32 0)
ret <vscale x 1 x i16> %a
}
@ -327,6 +334,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vmadd.nxv2i16.nxv2i16(
<vscale x 2 x i16>,
<vscale x 2 x i16>,
<vscale x 2 x i16>,
i32,
i32);
define <vscale x 2 x i16> @intrinsic_vmadd_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, i32 %3) nounwind {
@ -340,7 +348,7 @@ entry:
<vscale x 2 x i16> %0,
<vscale x 2 x i16> %1,
<vscale x 2 x i16> %2,
i32 %3)
i32 %3, i32 0)
ret <vscale x 2 x i16> %a
}
@ -373,6 +381,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vmadd.nxv4i16.nxv4i16(
<vscale x 4 x i16>,
<vscale x 4 x i16>,
<vscale x 4 x i16>,
i32,
i32);
define <vscale x 4 x i16> @intrinsic_vmadd_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
@ -386,7 +395,7 @@ entry:
<vscale x 4 x i16> %0,
<vscale x 4 x i16> %1,
<vscale x 4 x i16> %2,
i32 %3)
i32 %3, i32 0)
ret <vscale x 4 x i16> %a
}
@ -419,6 +428,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vmadd.nxv8i16.nxv8i16(
<vscale x 8 x i16>,
<vscale x 8 x i16>,
<vscale x 8 x i16>,
i32,
i32);
define <vscale x 8 x i16> @intrinsic_vmadd_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, i32 %3) nounwind {
@ -432,7 +442,7 @@ entry:
<vscale x 8 x i16> %0,
<vscale x 8 x i16> %1,
<vscale x 8 x i16> %2,
i32 %3)
i32 %3, i32 0)
ret <vscale x 8 x i16> %a
}
@ -465,6 +475,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vmadd.nxv16i16.nxv16i16(
<vscale x 16 x i16>,
<vscale x 16 x i16>,
<vscale x 16 x i16>,
i32,
i32);
define <vscale x 16 x i16> @intrinsic_vmadd_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, i32 %3) nounwind {
@ -478,7 +489,7 @@ entry:
<vscale x 16 x i16> %0,
<vscale x 16 x i16> %1,
<vscale x 16 x i16> %2,
i32 %3)
i32 %3, i32 0)
ret <vscale x 16 x i16> %a
}
@ -511,6 +522,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vmadd.nxv1i32.nxv1i32(
<vscale x 1 x i32>,
<vscale x 1 x i32>,
<vscale x 1 x i32>,
i32,
i32);
define <vscale x 1 x i32> @intrinsic_vmadd_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, i32 %3) nounwind {
@ -524,7 +536,7 @@ entry:
<vscale x 1 x i32> %0,
<vscale x 1 x i32> %1,
<vscale x 1 x i32> %2,
i32 %3)
i32 %3, i32 0)
ret <vscale x 1 x i32> %a
}
@ -557,6 +569,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vmadd.nxv2i32.nxv2i32(
<vscale x 2 x i32>,
<vscale x 2 x i32>,
<vscale x 2 x i32>,
i32,
i32);
define <vscale x 2 x i32> @intrinsic_vmadd_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
@ -570,7 +583,7 @@ entry:
<vscale x 2 x i32> %0,
<vscale x 2 x i32> %1,
<vscale x 2 x i32> %2,
i32 %3)
i32 %3, i32 0)
ret <vscale x 2 x i32> %a
}
@ -603,6 +616,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vmadd.nxv4i32.nxv4i32(
<vscale x 4 x i32>,
<vscale x 4 x i32>,
<vscale x 4 x i32>,
i32,
i32);
define <vscale x 4 x i32> @intrinsic_vmadd_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, i32 %3) nounwind {
@ -616,7 +630,7 @@ entry:
<vscale x 4 x i32> %0,
<vscale x 4 x i32> %1,
<vscale x 4 x i32> %2,
i32 %3)
i32 %3, i32 0)
ret <vscale x 4 x i32> %a
}
@ -649,6 +663,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vmadd.nxv8i32.nxv8i32(
<vscale x 8 x i32>,
<vscale x 8 x i32>,
<vscale x 8 x i32>,
i32,
i32);
define <vscale x 8 x i32> @intrinsic_vmadd_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, i32 %3) nounwind {
@ -662,7 +677,7 @@ entry:
<vscale x 8 x i32> %0,
<vscale x 8 x i32> %1,
<vscale x 8 x i32> %2,
i32 %3)
i32 %3, i32 0)
ret <vscale x 8 x i32> %a
}
@ -695,6 +710,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vmadd.nxv1i64.nxv1i64(
<vscale x 1 x i64>,
<vscale x 1 x i64>,
<vscale x 1 x i64>,
i32,
i32);
define <vscale x 1 x i64> @intrinsic_vmadd_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
@ -708,7 +724,7 @@ entry:
<vscale x 1 x i64> %0,
<vscale x 1 x i64> %1,
<vscale x 1 x i64> %2,
i32 %3)
i32 %3, i32 0)
ret <vscale x 1 x i64> %a
}
@ -741,6 +757,7 @@ declare <vscale x 2 x i64> @llvm.riscv.vmadd.nxv2i64.nxv2i64(
<vscale x 2 x i64>,
<vscale x 2 x i64>,
<vscale x 2 x i64>,
i32,
i32);
define <vscale x 2 x i64> @intrinsic_vmadd_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, i32 %3) nounwind {
@ -754,7 +771,7 @@ entry:
<vscale x 2 x i64> %0,
<vscale x 2 x i64> %1,
<vscale x 2 x i64> %2,
i32 %3)
i32 %3, i32 0)
ret <vscale x 2 x i64> %a
}
@ -787,6 +804,7 @@ declare <vscale x 4 x i64> @llvm.riscv.vmadd.nxv4i64.nxv4i64(
<vscale x 4 x i64>,
<vscale x 4 x i64>,
<vscale x 4 x i64>,
i32,
i32);
define <vscale x 4 x i64> @intrinsic_vmadd_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, i32 %3) nounwind {
@ -800,7 +818,7 @@ entry:
<vscale x 4 x i64> %0,
<vscale x 4 x i64> %1,
<vscale x 4 x i64> %2,
i32 %3)
i32 %3, i32 0)
ret <vscale x 4 x i64> %a
}
@ -833,6 +851,7 @@ declare <vscale x 1 x i8> @llvm.riscv.vmadd.nxv1i8.i8(
<vscale x 1 x i8>,
i8,
<vscale x 1 x i8>,
i32,
i32);
define <vscale x 1 x i8> @intrinsic_vmadd_vx_nxv1i8_i8_nxv1i8(<vscale x 1 x i8> %0, i8 %1, <vscale x 1 x i8> %2, i32 %3) nounwind {
@ -846,7 +865,7 @@ entry:
<vscale x 1 x i8> %0,
i8 %1,
<vscale x 1 x i8> %2,
i32 %3)
i32 %3, i32 0)
ret <vscale x 1 x i8> %a
}
@ -879,6 +898,7 @@ declare <vscale x 2 x i8> @llvm.riscv.vmadd.nxv2i8.i8(
<vscale x 2 x i8>,
i8,
<vscale x 2 x i8>,
i32,
i32);
define <vscale x 2 x i8> @intrinsic_vmadd_vx_nxv2i8_i8_nxv2i8(<vscale x 2 x i8> %0, i8 %1, <vscale x 2 x i8> %2, i32 %3) nounwind {
@ -892,7 +912,7 @@ entry:
<vscale x 2 x i8> %0,
i8 %1,
<vscale x 2 x i8> %2,
i32 %3)
i32 %3, i32 0)
ret <vscale x 2 x i8> %a
}
@ -925,6 +945,7 @@ declare <vscale x 4 x i8> @llvm.riscv.vmadd.nxv4i8.i8(
<vscale x 4 x i8>,
i8,
<vscale x 4 x i8>,
i32,
i32);
define <vscale x 4 x i8> @intrinsic_vmadd_vx_nxv4i8_i8_nxv4i8(<vscale x 4 x i8> %0, i8 %1, <vscale x 4 x i8> %2, i32 %3) nounwind {
@ -938,7 +959,7 @@ entry:
<vscale x 4 x i8> %0,
i8 %1,
<vscale x 4 x i8> %2,
i32 %3)
i32 %3, i32 0)
ret <vscale x 4 x i8> %a
}
@ -971,6 +992,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vmadd.nxv8i8.i8(
<vscale x 8 x i8>,
i8,
<vscale x 8 x i8>,
i32,
i32);
define <vscale x 8 x i8> @intrinsic_vmadd_vx_nxv8i8_i8_nxv8i8(<vscale x 8 x i8> %0, i8 %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
@ -984,7 +1006,7 @@ entry:
<vscale x 8 x i8> %0,
i8 %1,
<vscale x 8 x i8> %2,
i32 %3)
i32 %3, i32 0)
ret <vscale x 8 x i8> %a
}
@ -1017,6 +1039,7 @@ declare <vscale x 16 x i8> @llvm.riscv.vmadd.nxv16i8.i8(
<vscale x 16 x i8>,
i8,
<vscale x 16 x i8>,
i32,
i32);
define <vscale x 16 x i8> @intrinsic_vmadd_vx_nxv16i8_i8_nxv16i8(<vscale x 16 x i8> %0, i8 %1, <vscale x 16 x i8> %2, i32 %3) nounwind {
@ -1030,7 +1053,7 @@ entry:
<vscale x 16 x i8> %0,
i8 %1,
<vscale x 16 x i8> %2,
i32 %3)
i32 %3, i32 0)
ret <vscale x 16 x i8> %a
}
@ -1063,6 +1086,7 @@ declare <vscale x 32 x i8> @llvm.riscv.vmadd.nxv32i8.i8(
<vscale x 32 x i8>,
i8,
<vscale x 32 x i8>,
i32,
i32);
define <vscale x 32 x i8> @intrinsic_vmadd_vx_nxv32i8_i8_nxv32i8(<vscale x 32 x i8> %0, i8 %1, <vscale x 32 x i8> %2, i32 %3) nounwind {
@ -1076,7 +1100,7 @@ entry:
<vscale x 32 x i8> %0,
i8 %1,
<vscale x 32 x i8> %2,
i32 %3)
i32 %3, i32 0)
ret <vscale x 32 x i8> %a
}
@ -1109,6 +1133,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vmadd.nxv1i16.i16(
<vscale x 1 x i16>,
i16,
<vscale x 1 x i16>,
i32,
i32);
define <vscale x 1 x i16> @intrinsic_vmadd_vx_nxv1i16_i16_nxv1i16(<vscale x 1 x i16> %0, i16 %1, <vscale x 1 x i16> %2, i32 %3) nounwind {
@ -1122,7 +1147,7 @@ entry:
<vscale x 1 x i16> %0,
i16 %1,
<vscale x 1 x i16> %2,
i32 %3)
i32 %3, i32 0)
ret <vscale x 1 x i16> %a
}
@ -1155,6 +1180,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vmadd.nxv2i16.i16(
<vscale x 2 x i16>,
i16,
<vscale x 2 x i16>,
i32,
i32);
define <vscale x 2 x i16> @intrinsic_vmadd_vx_nxv2i16_i16_nxv2i16(<vscale x 2 x i16> %0, i16 %1, <vscale x 2 x i16> %2, i32 %3) nounwind {
@ -1168,7 +1194,7 @@ entry:
<vscale x 2 x i16> %0,
i16 %1,
<vscale x 2 x i16> %2,
i32 %3)
i32 %3, i32 0)
ret <vscale x 2 x i16> %a
}
@ -1201,6 +1227,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vmadd.nxv4i16.i16(
<vscale x 4 x i16>,
i16,
<vscale x 4 x i16>,
i32,
i32);
define <vscale x 4 x i16> @intrinsic_vmadd_vx_nxv4i16_i16_nxv4i16(<vscale x 4 x i16> %0, i16 %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
@ -1214,7 +1241,7 @@ entry:
<vscale x 4 x i16> %0,
i16 %1,
<vscale x 4 x i16> %2,
i32 %3)
i32 %3, i32 0)
ret <vscale x 4 x i16> %a
}
@ -1247,6 +1274,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vmadd.nxv8i16.i16(
<vscale x 8 x i16>,
i16,
<vscale x 8 x i16>,
i32,
i32);
define <vscale x 8 x i16> @intrinsic_vmadd_vx_nxv8i16_i16_nxv8i16(<vscale x 8 x i16> %0, i16 %1, <vscale x 8 x i16> %2, i32 %3) nounwind {
@ -1260,7 +1288,7 @@ entry:
<vscale x 8 x i16> %0,
i16 %1,
<vscale x 8 x i16> %2,
i32 %3)
i32 %3, i32 0)
ret <vscale x 8 x i16> %a
}
@ -1293,6 +1321,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vmadd.nxv16i16.i16(
<vscale x 16 x i16>,
i16,
<vscale x 16 x i16>,
i32,
i32);
define <vscale x 16 x i16> @intrinsic_vmadd_vx_nxv16i16_i16_nxv16i16(<vscale x 16 x i16> %0, i16 %1, <vscale x 16 x i16> %2, i32 %3) nounwind {
@ -1306,7 +1335,7 @@ entry:
<vscale x 16 x i16> %0,
i16 %1,
<vscale x 16 x i16> %2,
i32 %3)
i32 %3, i32 0)
ret <vscale x 16 x i16> %a
}
@ -1339,6 +1368,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vmadd.nxv1i32.i32(
<vscale x 1 x i32>,
i32,
<vscale x 1 x i32>,
i32,
i32);
define <vscale x 1 x i32> @intrinsic_vmadd_vx_nxv1i32_i32_nxv1i32(<vscale x 1 x i32> %0, i32 %1, <vscale x 1 x i32> %2, i32 %3) nounwind {
@ -1352,7 +1382,7 @@ entry:
<vscale x 1 x i32> %0,
i32 %1,
<vscale x 1 x i32> %2,
i32 %3)
i32 %3, i32 0)
ret <vscale x 1 x i32> %a
}
@ -1385,6 +1415,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vmadd.nxv2i32.i32(
<vscale x 2 x i32>,
i32,
<vscale x 2 x i32>,
i32,
i32);
define <vscale x 2 x i32> @intrinsic_vmadd_vx_nxv2i32_i32_nxv2i32(<vscale x 2 x i32> %0, i32 %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
@ -1398,7 +1429,7 @@ entry:
<vscale x 2 x i32> %0,
i32 %1,
<vscale x 2 x i32> %2,
i32 %3)
i32 %3, i32 0)
ret <vscale x 2 x i32> %a
}
@ -1431,6 +1462,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vmadd.nxv4i32.i32(
<vscale x 4 x i32>,
i32,
<vscale x 4 x i32>,
i32,
i32);
define <vscale x 4 x i32> @intrinsic_vmadd_vx_nxv4i32_i32_nxv4i32(<vscale x 4 x i32> %0, i32 %1, <vscale x 4 x i32> %2, i32 %3) nounwind {
@ -1444,7 +1476,7 @@ entry:
<vscale x 4 x i32> %0,
i32 %1,
<vscale x 4 x i32> %2,
i32 %3)
i32 %3, i32 0)
ret <vscale x 4 x i32> %a
}
@ -1477,6 +1509,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vmadd.nxv8i32.i32(
<vscale x 8 x i32>,
i32,
<vscale x 8 x i32>,
i32,
i32);
define <vscale x 8 x i32> @intrinsic_vmadd_vx_nxv8i32_i32_nxv8i32(<vscale x 8 x i32> %0, i32 %1, <vscale x 8 x i32> %2, i32 %3) nounwind {
@ -1490,7 +1523,7 @@ entry:
<vscale x 8 x i32> %0,
i32 %1,
<vscale x 8 x i32> %2,
i32 %3)
i32 %3, i32 0)
ret <vscale x 8 x i32> %a
}
@ -1523,6 +1556,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vmadd.nxv1i64.i64(
<vscale x 1 x i64>,
i64,
<vscale x 1 x i64>,
i32,
i32);
define <vscale x 1 x i64> @intrinsic_vmadd_vx_nxv1i64_i64_nxv1i64(<vscale x 1 x i64> %0, i64 %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
@ -1543,7 +1577,7 @@ entry:
<vscale x 1 x i64> %0,
i64 %1,
<vscale x 1 x i64> %2,
i32 %3)
i32 %3, i32 0)
ret <vscale x 1 x i64> %a
}
@ -1583,6 +1617,7 @@ declare <vscale x 2 x i64> @llvm.riscv.vmadd.nxv2i64.i64(
<vscale x 2 x i64>,
i64,
<vscale x 2 x i64>,
i32,
i32);
define <vscale x 2 x i64> @intrinsic_vmadd_vx_nxv2i64_i64_nxv2i64(<vscale x 2 x i64> %0, i64 %1, <vscale x 2 x i64> %2, i32 %3) nounwind {
@ -1603,7 +1638,7 @@ entry:
<vscale x 2 x i64> %0,
i64 %1,
<vscale x 2 x i64> %2,
i32 %3)
i32 %3, i32 0)
ret <vscale x 2 x i64> %a
}
@ -1643,6 +1678,7 @@ declare <vscale x 4 x i64> @llvm.riscv.vmadd.nxv4i64.i64(
<vscale x 4 x i64>,
i64,
<vscale x 4 x i64>,
i32,
i32);
define <vscale x 4 x i64> @intrinsic_vmadd_vx_nxv4i64_i64_nxv4i64(<vscale x 4 x i64> %0, i64 %1, <vscale x 4 x i64> %2, i32 %3) nounwind {
@ -1663,7 +1699,7 @@ entry:
<vscale x 4 x i64> %0,
i64 %1,
<vscale x 4 x i64> %2,
i32 %3)
i32 %3, i32 0)
ret <vscale x 4 x i64> %a
}

View File

@ -5,6 +5,7 @@ declare <vscale x 1 x i8> @llvm.riscv.vmadd.nxv1i8.nxv1i8(
<vscale x 1 x i8>,
<vscale x 1 x i8>,
<vscale x 1 x i8>,
i64,
i64);
define <vscale x 1 x i8> @intrinsic_vmadd_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, i64 %3) nounwind {
@ -18,7 +19,7 @@ entry:
<vscale x 1 x i8> %0,
<vscale x 1 x i8> %1,
<vscale x 1 x i8> %2,
i64 %3)
i64 %3, i64 0)
ret <vscale x 1 x i8> %a
}
@ -51,6 +52,7 @@ declare <vscale x 2 x i8> @llvm.riscv.vmadd.nxv2i8.nxv2i8(
<vscale x 2 x i8>,
<vscale x 2 x i8>,
<vscale x 2 x i8>,
i64,
i64);
define <vscale x 2 x i8> @intrinsic_vmadd_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, i64 %3) nounwind {
@ -64,7 +66,7 @@ entry:
<vscale x 2 x i8> %0,
<vscale x 2 x i8> %1,
<vscale x 2 x i8> %2,
i64 %3)
i64 %3, i64 0)
ret <vscale x 2 x i8> %a
}
@ -97,6 +99,7 @@ declare <vscale x 4 x i8> @llvm.riscv.vmadd.nxv4i8.nxv4i8(
<vscale x 4 x i8>,
<vscale x 4 x i8>,
<vscale x 4 x i8>,
i64,
i64);
define <vscale x 4 x i8> @intrinsic_vmadd_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, i64 %3) nounwind {
@ -110,7 +113,7 @@ entry:
<vscale x 4 x i8> %0,
<vscale x 4 x i8> %1,
<vscale x 4 x i8> %2,
i64 %3)
i64 %3, i64 0)
ret <vscale x 4 x i8> %a
}
@ -143,6 +146,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vmadd.nxv8i8.nxv8i8(
<vscale x 8 x i8>,
<vscale x 8 x i8>,
<vscale x 8 x i8>,
i64,
i64);
define <vscale x 8 x i8> @intrinsic_vmadd_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
@ -156,7 +160,7 @@ entry:
<vscale x 8 x i8> %0,
<vscale x 8 x i8> %1,
<vscale x 8 x i8> %2,
i64 %3)
i64 %3, i64 0)
ret <vscale x 8 x i8> %a
}
@ -189,6 +193,7 @@ declare <vscale x 16 x i8> @llvm.riscv.vmadd.nxv16i8.nxv16i8(
<vscale x 16 x i8>,
<vscale x 16 x i8>,
<vscale x 16 x i8>,
i64,
i64);
define <vscale x 16 x i8> @intrinsic_vmadd_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, i64 %3) nounwind {
@ -202,7 +207,7 @@ entry:
<vscale x 16 x i8> %0,
<vscale x 16 x i8> %1,
<vscale x 16 x i8> %2,
i64 %3)
i64 %3, i64 0)
ret <vscale x 16 x i8> %a
}
@ -235,6 +240,7 @@ declare <vscale x 32 x i8> @llvm.riscv.vmadd.nxv32i8.nxv32i8(
<vscale x 32 x i8>,
<vscale x 32 x i8>,
<vscale x 32 x i8>,
i64,
i64);
define <vscale x 32 x i8> @intrinsic_vmadd_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, i64 %3) nounwind {
@ -248,7 +254,7 @@ entry:
<vscale x 32 x i8> %0,
<vscale x 32 x i8> %1,
<vscale x 32 x i8> %2,
i64 %3)
i64 %3, i64 0)
ret <vscale x 32 x i8> %a
}
@ -281,6 +287,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vmadd.nxv1i16.nxv1i16(
<vscale x 1 x i16>,
<vscale x 1 x i16>,
<vscale x 1 x i16>,
i64,
i64);
define <vscale x 1 x i16> @intrinsic_vmadd_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, i64 %3) nounwind {
@ -294,7 +301,7 @@ entry:
<vscale x 1 x i16> %0,
<vscale x 1 x i16> %1,
<vscale x 1 x i16> %2,
i64 %3)
i64 %3, i64 0)
ret <vscale x 1 x i16> %a
}
@ -327,6 +334,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vmadd.nxv2i16.nxv2i16(
<vscale x 2 x i16>,
<vscale x 2 x i16>,
<vscale x 2 x i16>,
i64,
i64);
define <vscale x 2 x i16> @intrinsic_vmadd_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, i64 %3) nounwind {
@ -340,7 +348,7 @@ entry:
<vscale x 2 x i16> %0,
<vscale x 2 x i16> %1,
<vscale x 2 x i16> %2,
i64 %3)
i64 %3, i64 0)
ret <vscale x 2 x i16> %a
}
@ -373,6 +381,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vmadd.nxv4i16.nxv4i16(
<vscale x 4 x i16>,
<vscale x 4 x i16>,
<vscale x 4 x i16>,
i64,
i64);
define <vscale x 4 x i16> @intrinsic_vmadd_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
@ -386,7 +395,7 @@ entry:
<vscale x 4 x i16> %0,
<vscale x 4 x i16> %1,
<vscale x 4 x i16> %2,
i64 %3)
i64 %3, i64 0)
ret <vscale x 4 x i16> %a
}
@ -419,6 +428,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vmadd.nxv8i16.nxv8i16(
<vscale x 8 x i16>,
<vscale x 8 x i16>,
<vscale x 8 x i16>,
i64,
i64);
define <vscale x 8 x i16> @intrinsic_vmadd_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, i64 %3) nounwind {
@ -432,7 +442,7 @@ entry:
<vscale x 8 x i16> %0,
<vscale x 8 x i16> %1,
<vscale x 8 x i16> %2,
i64 %3)
i64 %3, i64 0)
ret <vscale x 8 x i16> %a
}
@ -465,6 +475,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vmadd.nxv16i16.nxv16i16(
<vscale x 16 x i16>,
<vscale x 16 x i16>,
<vscale x 16 x i16>,
i64,
i64);
define <vscale x 16 x i16> @intrinsic_vmadd_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, i64 %3) nounwind {
@ -478,7 +489,7 @@ entry:
<vscale x 16 x i16> %0,
<vscale x 16 x i16> %1,
<vscale x 16 x i16> %2,
i64 %3)
i64 %3, i64 0)
ret <vscale x 16 x i16> %a
}
@ -511,6 +522,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vmadd.nxv1i32.nxv1i32(
<vscale x 1 x i32>,
<vscale x 1 x i32>,
<vscale x 1 x i32>,
i64,
i64);
define <vscale x 1 x i32> @intrinsic_vmadd_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, i64 %3) nounwind {
@ -524,7 +536,7 @@ entry:
<vscale x 1 x i32> %0,
<vscale x 1 x i32> %1,
<vscale x 1 x i32> %2,
i64 %3)
i64 %3, i64 0)
ret <vscale x 1 x i32> %a
}
@ -557,6 +569,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vmadd.nxv2i32.nxv2i32(
<vscale x 2 x i32>,
<vscale x 2 x i32>,
<vscale x 2 x i32>,
i64,
i64);
define <vscale x 2 x i32> @intrinsic_vmadd_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
@ -570,7 +583,7 @@ entry:
<vscale x 2 x i32> %0,
<vscale x 2 x i32> %1,
<vscale x 2 x i32> %2,
i64 %3)
i64 %3, i64 0)
ret <vscale x 2 x i32> %a
}
@ -603,6 +616,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vmadd.nxv4i32.nxv4i32(
<vscale x 4 x i32>,
<vscale x 4 x i32>,
<vscale x 4 x i32>,
i64,
i64);
define <vscale x 4 x i32> @intrinsic_vmadd_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, i64 %3) nounwind {
@ -616,7 +630,7 @@ entry:
<vscale x 4 x i32> %0,
<vscale x 4 x i32> %1,
<vscale x 4 x i32> %2,
i64 %3)
i64 %3, i64 0)
ret <vscale x 4 x i32> %a
}
@ -649,6 +663,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vmadd.nxv8i32.nxv8i32(
<vscale x 8 x i32>,
<vscale x 8 x i32>,
<vscale x 8 x i32>,
i64,
i64);
define <vscale x 8 x i32> @intrinsic_vmadd_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, i64 %3) nounwind {
@ -662,7 +677,7 @@ entry:
<vscale x 8 x i32> %0,
<vscale x 8 x i32> %1,
<vscale x 8 x i32> %2,
i64 %3)
i64 %3, i64 0)
ret <vscale x 8 x i32> %a
}
@ -695,6 +710,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vmadd.nxv1i64.nxv1i64(
<vscale x 1 x i64>,
<vscale x 1 x i64>,
<vscale x 1 x i64>,
i64,
i64);
define <vscale x 1 x i64> @intrinsic_vmadd_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
@ -708,7 +724,7 @@ entry:
<vscale x 1 x i64> %0,
<vscale x 1 x i64> %1,
<vscale x 1 x i64> %2,
i64 %3)
i64 %3, i64 0)
ret <vscale x 1 x i64> %a
}
@ -741,6 +757,7 @@ declare <vscale x 2 x i64> @llvm.riscv.vmadd.nxv2i64.nxv2i64(
<vscale x 2 x i64>,
<vscale x 2 x i64>,
<vscale x 2 x i64>,
i64,
i64);
define <vscale x 2 x i64> @intrinsic_vmadd_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, i64 %3) nounwind {
@ -754,7 +771,7 @@ entry:
<vscale x 2 x i64> %0,
<vscale x 2 x i64> %1,
<vscale x 2 x i64> %2,
i64 %3)
i64 %3, i64 0)
ret <vscale x 2 x i64> %a
}
@ -787,6 +804,7 @@ declare <vscale x 4 x i64> @llvm.riscv.vmadd.nxv4i64.nxv4i64(
<vscale x 4 x i64>,
<vscale x 4 x i64>,
<vscale x 4 x i64>,
i64,
i64);
define <vscale x 4 x i64> @intrinsic_vmadd_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, i64 %3) nounwind {
@ -800,7 +818,7 @@ entry:
<vscale x 4 x i64> %0,
<vscale x 4 x i64> %1,
<vscale x 4 x i64> %2,
i64 %3)
i64 %3, i64 0)
ret <vscale x 4 x i64> %a
}
@ -833,6 +851,7 @@ declare <vscale x 1 x i8> @llvm.riscv.vmadd.nxv1i8.i8(
<vscale x 1 x i8>,
i8,
<vscale x 1 x i8>,
i64,
i64);
define <vscale x 1 x i8> @intrinsic_vmadd_vx_nxv1i8_i8_nxv1i8(<vscale x 1 x i8> %0, i8 %1, <vscale x 1 x i8> %2, i64 %3) nounwind {
@ -846,7 +865,7 @@ entry:
<vscale x 1 x i8> %0,
i8 %1,
<vscale x 1 x i8> %2,
i64 %3)
i64 %3, i64 0)
ret <vscale x 1 x i8> %a
}
@ -879,6 +898,7 @@ declare <vscale x 2 x i8> @llvm.riscv.vmadd.nxv2i8.i8(
<vscale x 2 x i8>,
i8,
<vscale x 2 x i8>,
i64,
i64);
define <vscale x 2 x i8> @intrinsic_vmadd_vx_nxv2i8_i8_nxv2i8(<vscale x 2 x i8> %0, i8 %1, <vscale x 2 x i8> %2, i64 %3) nounwind {
@ -892,7 +912,7 @@ entry:
<vscale x 2 x i8> %0,
i8 %1,
<vscale x 2 x i8> %2,
i64 %3)
i64 %3, i64 0)
ret <vscale x 2 x i8> %a
}
@ -925,6 +945,7 @@ declare <vscale x 4 x i8> @llvm.riscv.vmadd.nxv4i8.i8(
<vscale x 4 x i8>,
i8,
<vscale x 4 x i8>,
i64,
i64);
define <vscale x 4 x i8> @intrinsic_vmadd_vx_nxv4i8_i8_nxv4i8(<vscale x 4 x i8> %0, i8 %1, <vscale x 4 x i8> %2, i64 %3) nounwind {
@ -938,7 +959,7 @@ entry:
<vscale x 4 x i8> %0,
i8 %1,
<vscale x 4 x i8> %2,
i64 %3)
i64 %3, i64 0)
ret <vscale x 4 x i8> %a
}
@ -971,6 +992,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vmadd.nxv8i8.i8(
<vscale x 8 x i8>,
i8,
<vscale x 8 x i8>,
i64,
i64);
define <vscale x 8 x i8> @intrinsic_vmadd_vx_nxv8i8_i8_nxv8i8(<vscale x 8 x i8> %0, i8 %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
@ -984,7 +1006,7 @@ entry:
<vscale x 8 x i8> %0,
i8 %1,
<vscale x 8 x i8> %2,
i64 %3)
i64 %3, i64 0)
ret <vscale x 8 x i8> %a
}
@ -1017,6 +1039,7 @@ declare <vscale x 16 x i8> @llvm.riscv.vmadd.nxv16i8.i8(
<vscale x 16 x i8>,
i8,
<vscale x 16 x i8>,
i64,
i64);
define <vscale x 16 x i8> @intrinsic_vmadd_vx_nxv16i8_i8_nxv16i8(<vscale x 16 x i8> %0, i8 %1, <vscale x 16 x i8> %2, i64 %3) nounwind {
@ -1030,7 +1053,7 @@ entry:
<vscale x 16 x i8> %0,
i8 %1,
<vscale x 16 x i8> %2,
i64 %3)
i64 %3, i64 0)
ret <vscale x 16 x i8> %a
}
@ -1063,6 +1086,7 @@ declare <vscale x 32 x i8> @llvm.riscv.vmadd.nxv32i8.i8(
<vscale x 32 x i8>,
i8,
<vscale x 32 x i8>,
i64,
i64);
define <vscale x 32 x i8> @intrinsic_vmadd_vx_nxv32i8_i8_nxv32i8(<vscale x 32 x i8> %0, i8 %1, <vscale x 32 x i8> %2, i64 %3) nounwind {
@ -1076,7 +1100,7 @@ entry:
<vscale x 32 x i8> %0,
i8 %1,
<vscale x 32 x i8> %2,
i64 %3)
i64 %3, i64 0)
ret <vscale x 32 x i8> %a
}
@ -1109,6 +1133,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vmadd.nxv1i16.i16(
<vscale x 1 x i16>,
i16,
<vscale x 1 x i16>,
i64,
i64);
define <vscale x 1 x i16> @intrinsic_vmadd_vx_nxv1i16_i16_nxv1i16(<vscale x 1 x i16> %0, i16 %1, <vscale x 1 x i16> %2, i64 %3) nounwind {
@ -1122,7 +1147,7 @@ entry:
<vscale x 1 x i16> %0,
i16 %1,
<vscale x 1 x i16> %2,
i64 %3)
i64 %3, i64 0)
ret <vscale x 1 x i16> %a
}
@ -1155,6 +1180,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vmadd.nxv2i16.i16(
<vscale x 2 x i16>,
i16,
<vscale x 2 x i16>,
i64,
i64);
define <vscale x 2 x i16> @intrinsic_vmadd_vx_nxv2i16_i16_nxv2i16(<vscale x 2 x i16> %0, i16 %1, <vscale x 2 x i16> %2, i64 %3) nounwind {
@ -1168,7 +1194,7 @@ entry:
<vscale x 2 x i16> %0,
i16 %1,
<vscale x 2 x i16> %2,
i64 %3)
i64 %3, i64 0)
ret <vscale x 2 x i16> %a
}
@ -1201,6 +1227,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vmadd.nxv4i16.i16(
<vscale x 4 x i16>,
i16,
<vscale x 4 x i16>,
i64,
i64);
define <vscale x 4 x i16> @intrinsic_vmadd_vx_nxv4i16_i16_nxv4i16(<vscale x 4 x i16> %0, i16 %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
@ -1214,7 +1241,7 @@ entry:
<vscale x 4 x i16> %0,
i16 %1,
<vscale x 4 x i16> %2,
i64 %3)
i64 %3, i64 0)
ret <vscale x 4 x i16> %a
}
@ -1247,6 +1274,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vmadd.nxv8i16.i16(
<vscale x 8 x i16>,
i16,
<vscale x 8 x i16>,
i64,
i64);
define <vscale x 8 x i16> @intrinsic_vmadd_vx_nxv8i16_i16_nxv8i16(<vscale x 8 x i16> %0, i16 %1, <vscale x 8 x i16> %2, i64 %3) nounwind {
@ -1260,7 +1288,7 @@ entry:
<vscale x 8 x i16> %0,
i16 %1,
<vscale x 8 x i16> %2,
i64 %3)
i64 %3, i64 0)
ret <vscale x 8 x i16> %a
}
@ -1293,6 +1321,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vmadd.nxv16i16.i16(
<vscale x 16 x i16>,
i16,
<vscale x 16 x i16>,
i64,
i64);
define <vscale x 16 x i16> @intrinsic_vmadd_vx_nxv16i16_i16_nxv16i16(<vscale x 16 x i16> %0, i16 %1, <vscale x 16 x i16> %2, i64 %3) nounwind {
@ -1306,7 +1335,7 @@ entry:
<vscale x 16 x i16> %0,
i16 %1,
<vscale x 16 x i16> %2,
i64 %3)
i64 %3, i64 0)
ret <vscale x 16 x i16> %a
}
@ -1339,6 +1368,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vmadd.nxv1i32.i32(
<vscale x 1 x i32>,
i32,
<vscale x 1 x i32>,
i64,
i64);
define <vscale x 1 x i32> @intrinsic_vmadd_vx_nxv1i32_i32_nxv1i32(<vscale x 1 x i32> %0, i32 %1, <vscale x 1 x i32> %2, i64 %3) nounwind {
@ -1352,7 +1382,7 @@ entry:
<vscale x 1 x i32> %0,
i32 %1,
<vscale x 1 x i32> %2,
i64 %3)
i64 %3, i64 0)
ret <vscale x 1 x i32> %a
}
@ -1385,6 +1415,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vmadd.nxv2i32.i32(
<vscale x 2 x i32>,
i32,
<vscale x 2 x i32>,
i64,
i64);
define <vscale x 2 x i32> @intrinsic_vmadd_vx_nxv2i32_i32_nxv2i32(<vscale x 2 x i32> %0, i32 %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
@ -1398,7 +1429,7 @@ entry:
<vscale x 2 x i32> %0,
i32 %1,
<vscale x 2 x i32> %2,
i64 %3)
i64 %3, i64 0)
ret <vscale x 2 x i32> %a
}
@ -1431,6 +1462,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vmadd.nxv4i32.i32(
<vscale x 4 x i32>,
i32,
<vscale x 4 x i32>,
i64,
i64);
define <vscale x 4 x i32> @intrinsic_vmadd_vx_nxv4i32_i32_nxv4i32(<vscale x 4 x i32> %0, i32 %1, <vscale x 4 x i32> %2, i64 %3) nounwind {
@ -1444,7 +1476,7 @@ entry:
<vscale x 4 x i32> %0,
i32 %1,
<vscale x 4 x i32> %2,
i64 %3)
i64 %3, i64 0)
ret <vscale x 4 x i32> %a
}
@ -1477,6 +1509,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vmadd.nxv8i32.i32(
<vscale x 8 x i32>,
i32,
<vscale x 8 x i32>,
i64,
i64);
define <vscale x 8 x i32> @intrinsic_vmadd_vx_nxv8i32_i32_nxv8i32(<vscale x 8 x i32> %0, i32 %1, <vscale x 8 x i32> %2, i64 %3) nounwind {
@ -1490,7 +1523,7 @@ entry:
<vscale x 8 x i32> %0,
i32 %1,
<vscale x 8 x i32> %2,
i64 %3)
i64 %3, i64 0)
ret <vscale x 8 x i32> %a
}
@ -1523,6 +1556,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vmadd.nxv1i64.i64(
<vscale x 1 x i64>,
i64,
<vscale x 1 x i64>,
i64,
i64);
define <vscale x 1 x i64> @intrinsic_vmadd_vx_nxv1i64_i64_nxv1i64(<vscale x 1 x i64> %0, i64 %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
@ -1536,7 +1570,7 @@ entry:
<vscale x 1 x i64> %0,
i64 %1,
<vscale x 1 x i64> %2,
i64 %3)
i64 %3, i64 0)
ret <vscale x 1 x i64> %a
}
@ -1569,6 +1603,7 @@ declare <vscale x 2 x i64> @llvm.riscv.vmadd.nxv2i64.i64(
<vscale x 2 x i64>,
i64,
<vscale x 2 x i64>,
i64,
i64);
define <vscale x 2 x i64> @intrinsic_vmadd_vx_nxv2i64_i64_nxv2i64(<vscale x 2 x i64> %0, i64 %1, <vscale x 2 x i64> %2, i64 %3) nounwind {
@ -1582,7 +1617,7 @@ entry:
<vscale x 2 x i64> %0,
i64 %1,
<vscale x 2 x i64> %2,
i64 %3)
i64 %3, i64 0)
ret <vscale x 2 x i64> %a
}
@ -1615,6 +1650,7 @@ declare <vscale x 4 x i64> @llvm.riscv.vmadd.nxv4i64.i64(
<vscale x 4 x i64>,
i64,
<vscale x 4 x i64>,
i64,
i64);
define <vscale x 4 x i64> @intrinsic_vmadd_vx_nxv4i64_i64_nxv4i64(<vscale x 4 x i64> %0, i64 %1, <vscale x 4 x i64> %2, i64 %3) nounwind {
@ -1628,7 +1664,7 @@ entry:
<vscale x 4 x i64> %0,
i64 %1,
<vscale x 4 x i64> %2,
i64 %3)
i64 %3, i64 0)
ret <vscale x 4 x i64> %a
}

View File

@ -5,6 +5,7 @@ declare <vscale x 1 x i8> @llvm.riscv.vnmsac.nxv1i8.nxv1i8(
<vscale x 1 x i8>,
<vscale x 1 x i8>,
<vscale x 1 x i8>,
i32,
i32);
define <vscale x 1 x i8> @intrinsic_vnmsac_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, i32 %3) nounwind {
@ -18,7 +19,7 @@ entry:
<vscale x 1 x i8> %0,
<vscale x 1 x i8> %1,
<vscale x 1 x i8> %2,
i32 %3)
i32 %3, i32 0)
ret <vscale x 1 x i8> %a
}
@ -51,6 +52,7 @@ declare <vscale x 2 x i8> @llvm.riscv.vnmsac.nxv2i8.nxv2i8(
<vscale x 2 x i8>,
<vscale x 2 x i8>,
<vscale x 2 x i8>,
i32,
i32);
define <vscale x 2 x i8> @intrinsic_vnmsac_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, i32 %3) nounwind {
@ -64,7 +66,7 @@ entry:
<vscale x 2 x i8> %0,
<vscale x 2 x i8> %1,
<vscale x 2 x i8> %2,
i32 %3)
i32 %3, i32 0)
ret <vscale x 2 x i8> %a
}
@ -97,6 +99,7 @@ declare <vscale x 4 x i8> @llvm.riscv.vnmsac.nxv4i8.nxv4i8(
<vscale x 4 x i8>,
<vscale x 4 x i8>,
<vscale x 4 x i8>,
i32,
i32);
define <vscale x 4 x i8> @intrinsic_vnmsac_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, i32 %3) nounwind {
@ -110,7 +113,7 @@ entry:
<vscale x 4 x i8> %0,
<vscale x 4 x i8> %1,
<vscale x 4 x i8> %2,
i32 %3)
i32 %3, i32 0)
ret <vscale x 4 x i8> %a
}
@ -143,6 +146,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vnmsac.nxv8i8.nxv8i8(
<vscale x 8 x i8>,
<vscale x 8 x i8>,
<vscale x 8 x i8>,
i32,
i32);
define <vscale x 8 x i8> @intrinsic_vnmsac_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
@ -156,7 +160,7 @@ entry:
<vscale x 8 x i8> %0,
<vscale x 8 x i8> %1,
<vscale x 8 x i8> %2,
i32 %3)
i32 %3, i32 0)
ret <vscale x 8 x i8> %a
}
@ -189,6 +193,7 @@ declare <vscale x 16 x i8> @llvm.riscv.vnmsac.nxv16i8.nxv16i8(
<vscale x 16 x i8>,
<vscale x 16 x i8>,
<vscale x 16 x i8>,
i32,
i32);
define <vscale x 16 x i8> @intrinsic_vnmsac_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, i32 %3) nounwind {
@ -202,7 +207,7 @@ entry:
<vscale x 16 x i8> %0,
<vscale x 16 x i8> %1,
<vscale x 16 x i8> %2,
i32 %3)
i32 %3, i32 0)
ret <vscale x 16 x i8> %a
}
@ -235,6 +240,7 @@ declare <vscale x 32 x i8> @llvm.riscv.vnmsac.nxv32i8.nxv32i8(
<vscale x 32 x i8>,
<vscale x 32 x i8>,
<vscale x 32 x i8>,
i32,
i32);
define <vscale x 32 x i8> @intrinsic_vnmsac_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, i32 %3) nounwind {
@ -248,7 +254,7 @@ entry:
<vscale x 32 x i8> %0,
<vscale x 32 x i8> %1,
<vscale x 32 x i8> %2,
i32 %3)
i32 %3, i32 0)
ret <vscale x 32 x i8> %a
}
@ -281,6 +287,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vnmsac.nxv1i16.nxv1i16(
<vscale x 1 x i16>,
<vscale x 1 x i16>,
<vscale x 1 x i16>,
i32,
i32);
define <vscale x 1 x i16> @intrinsic_vnmsac_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, i32 %3) nounwind {
@ -294,7 +301,7 @@ entry:
<vscale x 1 x i16> %0,
<vscale x 1 x i16> %1,
<vscale x 1 x i16> %2,
i32 %3)
i32 %3, i32 0)
ret <vscale x 1 x i16> %a
}
@ -327,6 +334,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vnmsac.nxv2i16.nxv2i16(
<vscale x 2 x i16>,
<vscale x 2 x i16>,
<vscale x 2 x i16>,
i32,
i32);
define <vscale x 2 x i16> @intrinsic_vnmsac_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, i32 %3) nounwind {
@ -340,7 +348,7 @@ entry:
<vscale x 2 x i16> %0,
<vscale x 2 x i16> %1,
<vscale x 2 x i16> %2,
i32 %3)
i32 %3, i32 0)
ret <vscale x 2 x i16> %a
}
@ -373,6 +381,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vnmsac.nxv4i16.nxv4i16(
<vscale x 4 x i16>,
<vscale x 4 x i16>,
<vscale x 4 x i16>,
i32,
i32);
define <vscale x 4 x i16> @intrinsic_vnmsac_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
@ -386,7 +395,7 @@ entry:
<vscale x 4 x i16> %0,
<vscale x 4 x i16> %1,
<vscale x 4 x i16> %2,
i32 %3)
i32 %3, i32 0)
ret <vscale x 4 x i16> %a
}
@ -419,6 +428,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vnmsac.nxv8i16.nxv8i16(
<vscale x 8 x i16>,
<vscale x 8 x i16>,
<vscale x 8 x i16>,
i32,
i32);
define <vscale x 8 x i16> @intrinsic_vnmsac_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, i32 %3) nounwind {
@ -432,7 +442,7 @@ entry:
<vscale x 8 x i16> %0,
<vscale x 8 x i16> %1,
<vscale x 8 x i16> %2,
i32 %3)
i32 %3, i32 0)
ret <vscale x 8 x i16> %a
}
@ -465,6 +475,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vnmsac.nxv16i16.nxv16i16(
<vscale x 16 x i16>,
<vscale x 16 x i16>,
<vscale x 16 x i16>,
i32,
i32);
define <vscale x 16 x i16> @intrinsic_vnmsac_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, i32 %3) nounwind {
@ -478,7 +489,7 @@ entry:
<vscale x 16 x i16> %0,
<vscale x 16 x i16> %1,
<vscale x 16 x i16> %2,
i32 %3)
i32 %3, i32 0)
ret <vscale x 16 x i16> %a
}
@ -511,6 +522,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vnmsac.nxv1i32.nxv1i32(
<vscale x 1 x i32>,
<vscale x 1 x i32>,
<vscale x 1 x i32>,
i32,
i32);
define <vscale x 1 x i32> @intrinsic_vnmsac_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, i32 %3) nounwind {
@ -524,7 +536,7 @@ entry:
<vscale x 1 x i32> %0,
<vscale x 1 x i32> %1,
<vscale x 1 x i32> %2,
i32 %3)
i32 %3, i32 0)
ret <vscale x 1 x i32> %a
}
@ -557,6 +569,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vnmsac.nxv2i32.nxv2i32(
<vscale x 2 x i32>,
<vscale x 2 x i32>,
<vscale x 2 x i32>,
i32,
i32);
define <vscale x 2 x i32> @intrinsic_vnmsac_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
@ -570,7 +583,7 @@ entry:
<vscale x 2 x i32> %0,
<vscale x 2 x i32> %1,
<vscale x 2 x i32> %2,
i32 %3)
i32 %3, i32 0)
ret <vscale x 2 x i32> %a
}
@ -603,6 +616,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vnmsac.nxv4i32.nxv4i32(
<vscale x 4 x i32>,
<vscale x 4 x i32>,
<vscale x 4 x i32>,
i32,
i32);
define <vscale x 4 x i32> @intrinsic_vnmsac_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, i32 %3) nounwind {
@ -616,7 +630,7 @@ entry:
<vscale x 4 x i32> %0,
<vscale x 4 x i32> %1,
<vscale x 4 x i32> %2,
i32 %3)
i32 %3, i32 0)
ret <vscale x 4 x i32> %a
}
@ -649,6 +663,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vnmsac.nxv8i32.nxv8i32(
<vscale x 8 x i32>,
<vscale x 8 x i32>,
<vscale x 8 x i32>,
i32,
i32);
define <vscale x 8 x i32> @intrinsic_vnmsac_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, i32 %3) nounwind {
@ -662,7 +677,7 @@ entry:
<vscale x 8 x i32> %0,
<vscale x 8 x i32> %1,
<vscale x 8 x i32> %2,
i32 %3)
i32 %3, i32 0)
ret <vscale x 8 x i32> %a
}
@ -695,6 +710,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vnmsac.nxv1i64.nxv1i64(
<vscale x 1 x i64>,
<vscale x 1 x i64>,
<vscale x 1 x i64>,
i32,
i32);
define <vscale x 1 x i64> @intrinsic_vnmsac_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
@ -708,7 +724,7 @@ entry:
<vscale x 1 x i64> %0,
<vscale x 1 x i64> %1,
<vscale x 1 x i64> %2,
i32 %3)
i32 %3, i32 0)
ret <vscale x 1 x i64> %a
}
@ -741,6 +757,7 @@ declare <vscale x 2 x i64> @llvm.riscv.vnmsac.nxv2i64.nxv2i64(
<vscale x 2 x i64>,
<vscale x 2 x i64>,
<vscale x 2 x i64>,
i32,
i32);
define <vscale x 2 x i64> @intrinsic_vnmsac_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, i32 %3) nounwind {
@ -754,7 +771,7 @@ entry:
<vscale x 2 x i64> %0,
<vscale x 2 x i64> %1,
<vscale x 2 x i64> %2,
i32 %3)
i32 %3, i32 0)
ret <vscale x 2 x i64> %a
}
@ -787,6 +804,7 @@ declare <vscale x 4 x i64> @llvm.riscv.vnmsac.nxv4i64.nxv4i64(
<vscale x 4 x i64>,
<vscale x 4 x i64>,
<vscale x 4 x i64>,
i32,
i32);
define <vscale x 4 x i64> @intrinsic_vnmsac_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, i32 %3) nounwind {
@ -800,7 +818,7 @@ entry:
<vscale x 4 x i64> %0,
<vscale x 4 x i64> %1,
<vscale x 4 x i64> %2,
i32 %3)
i32 %3, i32 0)
ret <vscale x 4 x i64> %a
}
@ -833,6 +851,7 @@ declare <vscale x 1 x i8> @llvm.riscv.vnmsac.nxv1i8.i8(
<vscale x 1 x i8>,
i8,
<vscale x 1 x i8>,
i32,
i32);
define <vscale x 1 x i8> @intrinsic_vnmsac_vx_nxv1i8_i8_nxv1i8(<vscale x 1 x i8> %0, i8 %1, <vscale x 1 x i8> %2, i32 %3) nounwind {
@ -846,7 +865,7 @@ entry:
<vscale x 1 x i8> %0,
i8 %1,
<vscale x 1 x i8> %2,
i32 %3)
i32 %3, i32 0)
ret <vscale x 1 x i8> %a
}
@ -879,6 +898,7 @@ declare <vscale x 2 x i8> @llvm.riscv.vnmsac.nxv2i8.i8(
<vscale x 2 x i8>,
i8,
<vscale x 2 x i8>,
i32,
i32);
define <vscale x 2 x i8> @intrinsic_vnmsac_vx_nxv2i8_i8_nxv2i8(<vscale x 2 x i8> %0, i8 %1, <vscale x 2 x i8> %2, i32 %3) nounwind {
@ -892,7 +912,7 @@ entry:
<vscale x 2 x i8> %0,
i8 %1,
<vscale x 2 x i8> %2,
i32 %3)
i32 %3, i32 0)
ret <vscale x 2 x i8> %a
}
@ -925,6 +945,7 @@ declare <vscale x 4 x i8> @llvm.riscv.vnmsac.nxv4i8.i8(
<vscale x 4 x i8>,
i8,
<vscale x 4 x i8>,
i32,
i32);
define <vscale x 4 x i8> @intrinsic_vnmsac_vx_nxv4i8_i8_nxv4i8(<vscale x 4 x i8> %0, i8 %1, <vscale x 4 x i8> %2, i32 %3) nounwind {
@ -938,7 +959,7 @@ entry:
<vscale x 4 x i8> %0,
i8 %1,
<vscale x 4 x i8> %2,
i32 %3)
i32 %3, i32 0)
ret <vscale x 4 x i8> %a
}
@ -971,6 +992,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vnmsac.nxv8i8.i8(
<vscale x 8 x i8>,
i8,
<vscale x 8 x i8>,
i32,
i32);
define <vscale x 8 x i8> @intrinsic_vnmsac_vx_nxv8i8_i8_nxv8i8(<vscale x 8 x i8> %0, i8 %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
@ -984,7 +1006,7 @@ entry:
<vscale x 8 x i8> %0,
i8 %1,
<vscale x 8 x i8> %2,
i32 %3)
i32 %3, i32 0)
ret <vscale x 8 x i8> %a
}
@ -1017,6 +1039,7 @@ declare <vscale x 16 x i8> @llvm.riscv.vnmsac.nxv16i8.i8(
<vscale x 16 x i8>,
i8,
<vscale x 16 x i8>,
i32,
i32);
define <vscale x 16 x i8> @intrinsic_vnmsac_vx_nxv16i8_i8_nxv16i8(<vscale x 16 x i8> %0, i8 %1, <vscale x 16 x i8> %2, i32 %3) nounwind {
@ -1030,7 +1053,7 @@ entry:
<vscale x 16 x i8> %0,
i8 %1,
<vscale x 16 x i8> %2,
i32 %3)
i32 %3, i32 0)
ret <vscale x 16 x i8> %a
}
@ -1063,6 +1086,7 @@ declare <vscale x 32 x i8> @llvm.riscv.vnmsac.nxv32i8.i8(
<vscale x 32 x i8>,
i8,
<vscale x 32 x i8>,
i32,
i32);
define <vscale x 32 x i8> @intrinsic_vnmsac_vx_nxv32i8_i8_nxv32i8(<vscale x 32 x i8> %0, i8 %1, <vscale x 32 x i8> %2, i32 %3) nounwind {
@ -1076,7 +1100,7 @@ entry:
<vscale x 32 x i8> %0,
i8 %1,
<vscale x 32 x i8> %2,
i32 %3)
i32 %3, i32 0)
ret <vscale x 32 x i8> %a
}
@ -1109,6 +1133,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vnmsac.nxv1i16.i16(
<vscale x 1 x i16>,
i16,
<vscale x 1 x i16>,
i32,
i32);
define <vscale x 1 x i16> @intrinsic_vnmsac_vx_nxv1i16_i16_nxv1i16(<vscale x 1 x i16> %0, i16 %1, <vscale x 1 x i16> %2, i32 %3) nounwind {
@ -1122,7 +1147,7 @@ entry:
<vscale x 1 x i16> %0,
i16 %1,
<vscale x 1 x i16> %2,
i32 %3)
i32 %3, i32 0)
ret <vscale x 1 x i16> %a
}
@ -1155,6 +1180,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vnmsac.nxv2i16.i16(
<vscale x 2 x i16>,
i16,
<vscale x 2 x i16>,
i32,
i32);
define <vscale x 2 x i16> @intrinsic_vnmsac_vx_nxv2i16_i16_nxv2i16(<vscale x 2 x i16> %0, i16 %1, <vscale x 2 x i16> %2, i32 %3) nounwind {
@ -1168,7 +1194,7 @@ entry:
<vscale x 2 x i16> %0,
i16 %1,
<vscale x 2 x i16> %2,
i32 %3)
i32 %3, i32 0)
ret <vscale x 2 x i16> %a
}
@ -1201,6 +1227,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vnmsac.nxv4i16.i16(
<vscale x 4 x i16>,
i16,
<vscale x 4 x i16>,
i32,
i32);
define <vscale x 4 x i16> @intrinsic_vnmsac_vx_nxv4i16_i16_nxv4i16(<vscale x 4 x i16> %0, i16 %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
@ -1214,7 +1241,7 @@ entry:
<vscale x 4 x i16> %0,
i16 %1,
<vscale x 4 x i16> %2,
i32 %3)
i32 %3, i32 0)
ret <vscale x 4 x i16> %a
}
@ -1247,6 +1274,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vnmsac.nxv8i16.i16(
<vscale x 8 x i16>,
i16,
<vscale x 8 x i16>,
i32,
i32);
define <vscale x 8 x i16> @intrinsic_vnmsac_vx_nxv8i16_i16_nxv8i16(<vscale x 8 x i16> %0, i16 %1, <vscale x 8 x i16> %2, i32 %3) nounwind {
@ -1260,7 +1288,7 @@ entry:
<vscale x 8 x i16> %0,
i16 %1,
<vscale x 8 x i16> %2,
i32 %3)
i32 %3, i32 0)
ret <vscale x 8 x i16> %a
}
@ -1293,6 +1321,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vnmsac.nxv16i16.i16(
<vscale x 16 x i16>,
i16,
<vscale x 16 x i16>,
i32,
i32);
define <vscale x 16 x i16> @intrinsic_vnmsac_vx_nxv16i16_i16_nxv16i16(<vscale x 16 x i16> %0, i16 %1, <vscale x 16 x i16> %2, i32 %3) nounwind {
@ -1306,7 +1335,7 @@ entry:
<vscale x 16 x i16> %0,
i16 %1,
<vscale x 16 x i16> %2,
i32 %3)
i32 %3, i32 0)
ret <vscale x 16 x i16> %a
}
@ -1339,6 +1368,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vnmsac.nxv1i32.i32(
<vscale x 1 x i32>,
i32,
<vscale x 1 x i32>,
i32,
i32);
define <vscale x 1 x i32> @intrinsic_vnmsac_vx_nxv1i32_i32_nxv1i32(<vscale x 1 x i32> %0, i32 %1, <vscale x 1 x i32> %2, i32 %3) nounwind {
@ -1352,7 +1382,7 @@ entry:
<vscale x 1 x i32> %0,
i32 %1,
<vscale x 1 x i32> %2,
i32 %3)
i32 %3, i32 0)
ret <vscale x 1 x i32> %a
}
@ -1385,6 +1415,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vnmsac.nxv2i32.i32(
<vscale x 2 x i32>,
i32,
<vscale x 2 x i32>,
i32,
i32);
define <vscale x 2 x i32> @intrinsic_vnmsac_vx_nxv2i32_i32_nxv2i32(<vscale x 2 x i32> %0, i32 %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
@ -1398,7 +1429,7 @@ entry:
<vscale x 2 x i32> %0,
i32 %1,
<vscale x 2 x i32> %2,
i32 %3)
i32 %3, i32 0)
ret <vscale x 2 x i32> %a
}
@ -1431,6 +1462,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vnmsac.nxv4i32.i32(
<vscale x 4 x i32>,
i32,
<vscale x 4 x i32>,
i32,
i32);
define <vscale x 4 x i32> @intrinsic_vnmsac_vx_nxv4i32_i32_nxv4i32(<vscale x 4 x i32> %0, i32 %1, <vscale x 4 x i32> %2, i32 %3) nounwind {
@ -1444,7 +1476,7 @@ entry:
<vscale x 4 x i32> %0,
i32 %1,
<vscale x 4 x i32> %2,
i32 %3)
i32 %3, i32 0)
ret <vscale x 4 x i32> %a
}
@ -1477,6 +1509,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vnmsac.nxv8i32.i32(
<vscale x 8 x i32>,
i32,
<vscale x 8 x i32>,
i32,
i32);
define <vscale x 8 x i32> @intrinsic_vnmsac_vx_nxv8i32_i32_nxv8i32(<vscale x 8 x i32> %0, i32 %1, <vscale x 8 x i32> %2, i32 %3) nounwind {
@ -1490,7 +1523,7 @@ entry:
<vscale x 8 x i32> %0,
i32 %1,
<vscale x 8 x i32> %2,
i32 %3)
i32 %3, i32 0)
ret <vscale x 8 x i32> %a
}
@ -1523,6 +1556,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vnmsac.nxv1i64.i64(
<vscale x 1 x i64>,
i64,
<vscale x 1 x i64>,
i32,
i32);
define <vscale x 1 x i64> @intrinsic_vnmsac_vx_nxv1i64_i64_nxv1i64(<vscale x 1 x i64> %0, i64 %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
@ -1543,7 +1577,7 @@ entry:
<vscale x 1 x i64> %0,
i64 %1,
<vscale x 1 x i64> %2,
i32 %3)
i32 %3, i32 0)
ret <vscale x 1 x i64> %a
}
@ -1583,6 +1617,7 @@ declare <vscale x 2 x i64> @llvm.riscv.vnmsac.nxv2i64.i64(
<vscale x 2 x i64>,
i64,
<vscale x 2 x i64>,
i32,
i32);
define <vscale x 2 x i64> @intrinsic_vnmsac_vx_nxv2i64_i64_nxv2i64(<vscale x 2 x i64> %0, i64 %1, <vscale x 2 x i64> %2, i32 %3) nounwind {
@ -1603,7 +1638,7 @@ entry:
<vscale x 2 x i64> %0,
i64 %1,
<vscale x 2 x i64> %2,
i32 %3)
i32 %3, i32 0)
ret <vscale x 2 x i64> %a
}
@ -1643,6 +1678,7 @@ declare <vscale x 4 x i64> @llvm.riscv.vnmsac.nxv4i64.i64(
<vscale x 4 x i64>,
i64,
<vscale x 4 x i64>,
i32,
i32);
define <vscale x 4 x i64> @intrinsic_vnmsac_vx_nxv4i64_i64_nxv4i64(<vscale x 4 x i64> %0, i64 %1, <vscale x 4 x i64> %2, i32 %3) nounwind {
@ -1663,7 +1699,7 @@ entry:
<vscale x 4 x i64> %0,
i64 %1,
<vscale x 4 x i64> %2,
i32 %3)
i32 %3, i32 0)
ret <vscale x 4 x i64> %a
}

View File

@ -5,6 +5,7 @@ declare <vscale x 1 x i8> @llvm.riscv.vnmsac.nxv1i8.nxv1i8(
<vscale x 1 x i8>,
<vscale x 1 x i8>,
<vscale x 1 x i8>,
i64,
i64);
define <vscale x 1 x i8> @intrinsic_vnmsac_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, i64 %3) nounwind {
@ -18,7 +19,7 @@ entry:
<vscale x 1 x i8> %0,
<vscale x 1 x i8> %1,
<vscale x 1 x i8> %2,
i64 %3)
i64 %3, i64 0)
ret <vscale x 1 x i8> %a
}
@ -51,6 +52,7 @@ declare <vscale x 2 x i8> @llvm.riscv.vnmsac.nxv2i8.nxv2i8(
<vscale x 2 x i8>,
<vscale x 2 x i8>,
<vscale x 2 x i8>,
i64,
i64);
define <vscale x 2 x i8> @intrinsic_vnmsac_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, i64 %3) nounwind {
@ -64,7 +66,7 @@ entry:
<vscale x 2 x i8> %0,
<vscale x 2 x i8> %1,
<vscale x 2 x i8> %2,
i64 %3)
i64 %3, i64 0)
ret <vscale x 2 x i8> %a
}
@ -97,6 +99,7 @@ declare <vscale x 4 x i8> @llvm.riscv.vnmsac.nxv4i8.nxv4i8(
<vscale x 4 x i8>,
<vscale x 4 x i8>,
<vscale x 4 x i8>,
i64,
i64);
define <vscale x 4 x i8> @intrinsic_vnmsac_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, i64 %3) nounwind {
@ -110,7 +113,7 @@ entry:
<vscale x 4 x i8> %0,
<vscale x 4 x i8> %1,
<vscale x 4 x i8> %2,
i64 %3)
i64 %3, i64 0)
ret <vscale x 4 x i8> %a
}
@ -143,6 +146,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vnmsac.nxv8i8.nxv8i8(
<vscale x 8 x i8>,
<vscale x 8 x i8>,
<vscale x 8 x i8>,
i64,
i64);
define <vscale x 8 x i8> @intrinsic_vnmsac_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
@ -156,7 +160,7 @@ entry:
<vscale x 8 x i8> %0,
<vscale x 8 x i8> %1,
<vscale x 8 x i8> %2,
i64 %3)
i64 %3, i64 0)
ret <vscale x 8 x i8> %a
}
@ -189,6 +193,7 @@ declare <vscale x 16 x i8> @llvm.riscv.vnmsac.nxv16i8.nxv16i8(
<vscale x 16 x i8>,
<vscale x 16 x i8>,
<vscale x 16 x i8>,
i64,
i64);
define <vscale x 16 x i8> @intrinsic_vnmsac_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, i64 %3) nounwind {
@ -202,7 +207,7 @@ entry:
<vscale x 16 x i8> %0,
<vscale x 16 x i8> %1,
<vscale x 16 x i8> %2,
i64 %3)
i64 %3, i64 0)
ret <vscale x 16 x i8> %a
}
@ -235,6 +240,7 @@ declare <vscale x 32 x i8> @llvm.riscv.vnmsac.nxv32i8.nxv32i8(
<vscale x 32 x i8>,
<vscale x 32 x i8>,
<vscale x 32 x i8>,
i64,
i64);
define <vscale x 32 x i8> @intrinsic_vnmsac_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, i64 %3) nounwind {
@ -248,7 +254,7 @@ entry:
<vscale x 32 x i8> %0,
<vscale x 32 x i8> %1,
<vscale x 32 x i8> %2,
i64 %3)
i64 %3, i64 0)
ret <vscale x 32 x i8> %a
}
@ -281,6 +287,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vnmsac.nxv1i16.nxv1i16(
<vscale x 1 x i16>,
<vscale x 1 x i16>,
<vscale x 1 x i16>,
i64,
i64);
define <vscale x 1 x i16> @intrinsic_vnmsac_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, i64 %3) nounwind {
@ -294,7 +301,7 @@ entry:
<vscale x 1 x i16> %0,
<vscale x 1 x i16> %1,
<vscale x 1 x i16> %2,
i64 %3)
i64 %3, i64 0)
ret <vscale x 1 x i16> %a
}
@ -327,6 +334,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vnmsac.nxv2i16.nxv2i16(
<vscale x 2 x i16>,
<vscale x 2 x i16>,
<vscale x 2 x i16>,
i64,
i64);
define <vscale x 2 x i16> @intrinsic_vnmsac_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, i64 %3) nounwind {
@ -340,7 +348,7 @@ entry:
<vscale x 2 x i16> %0,
<vscale x 2 x i16> %1,
<vscale x 2 x i16> %2,
i64 %3)
i64 %3, i64 0)
ret <vscale x 2 x i16> %a
}
@ -373,6 +381,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vnmsac.nxv4i16.nxv4i16(
<vscale x 4 x i16>,
<vscale x 4 x i16>,
<vscale x 4 x i16>,
i64,
i64);
define <vscale x 4 x i16> @intrinsic_vnmsac_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
@ -386,7 +395,7 @@ entry:
<vscale x 4 x i16> %0,
<vscale x 4 x i16> %1,
<vscale x 4 x i16> %2,
i64 %3)
i64 %3, i64 0)
ret <vscale x 4 x i16> %a
}
@ -419,6 +428,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vnmsac.nxv8i16.nxv8i16(
<vscale x 8 x i16>,
<vscale x 8 x i16>,
<vscale x 8 x i16>,
i64,
i64);
define <vscale x 8 x i16> @intrinsic_vnmsac_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, i64 %3) nounwind {
@ -432,7 +442,7 @@ entry:
<vscale x 8 x i16> %0,
<vscale x 8 x i16> %1,
<vscale x 8 x i16> %2,
i64 %3)
i64 %3, i64 0)
ret <vscale x 8 x i16> %a
}
@ -465,6 +475,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vnmsac.nxv16i16.nxv16i16(
<vscale x 16 x i16>,
<vscale x 16 x i16>,
<vscale x 16 x i16>,
i64,
i64);
define <vscale x 16 x i16> @intrinsic_vnmsac_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, i64 %3) nounwind {
@ -478,7 +489,7 @@ entry:
<vscale x 16 x i16> %0,
<vscale x 16 x i16> %1,
<vscale x 16 x i16> %2,
i64 %3)
i64 %3, i64 0)
ret <vscale x 16 x i16> %a
}
@ -511,6 +522,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vnmsac.nxv1i32.nxv1i32(
<vscale x 1 x i32>,
<vscale x 1 x i32>,
<vscale x 1 x i32>,
i64,
i64);
define <vscale x 1 x i32> @intrinsic_vnmsac_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, i64 %3) nounwind {
@ -524,7 +536,7 @@ entry:
<vscale x 1 x i32> %0,
<vscale x 1 x i32> %1,
<vscale x 1 x i32> %2,
i64 %3)
i64 %3, i64 0)
ret <vscale x 1 x i32> %a
}
@ -557,6 +569,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vnmsac.nxv2i32.nxv2i32(
<vscale x 2 x i32>,
<vscale x 2 x i32>,
<vscale x 2 x i32>,
i64,
i64);
define <vscale x 2 x i32> @intrinsic_vnmsac_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
@ -570,7 +583,7 @@ entry:
<vscale x 2 x i32> %0,
<vscale x 2 x i32> %1,
<vscale x 2 x i32> %2,
i64 %3)
i64 %3, i64 0)
ret <vscale x 2 x i32> %a
}
@ -603,6 +616,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vnmsac.nxv4i32.nxv4i32(
<vscale x 4 x i32>,
<vscale x 4 x i32>,
<vscale x 4 x i32>,
i64,
i64);
define <vscale x 4 x i32> @intrinsic_vnmsac_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, i64 %3) nounwind {
@ -616,7 +630,7 @@ entry:
<vscale x 4 x i32> %0,
<vscale x 4 x i32> %1,
<vscale x 4 x i32> %2,
i64 %3)
i64 %3, i64 0)
ret <vscale x 4 x i32> %a
}
@ -649,6 +663,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vnmsac.nxv8i32.nxv8i32(
<vscale x 8 x i32>,
<vscale x 8 x i32>,
<vscale x 8 x i32>,
i64,
i64);
define <vscale x 8 x i32> @intrinsic_vnmsac_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, i64 %3) nounwind {
@ -662,7 +677,7 @@ entry:
<vscale x 8 x i32> %0,
<vscale x 8 x i32> %1,
<vscale x 8 x i32> %2,
i64 %3)
i64 %3, i64 0)
ret <vscale x 8 x i32> %a
}
@ -695,6 +710,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vnmsac.nxv1i64.nxv1i64(
<vscale x 1 x i64>,
<vscale x 1 x i64>,
<vscale x 1 x i64>,
i64,
i64);
define <vscale x 1 x i64> @intrinsic_vnmsac_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
@ -708,7 +724,7 @@ entry:
<vscale x 1 x i64> %0,
<vscale x 1 x i64> %1,
<vscale x 1 x i64> %2,
i64 %3)
i64 %3, i64 0)
ret <vscale x 1 x i64> %a
}
@ -741,6 +757,7 @@ declare <vscale x 2 x i64> @llvm.riscv.vnmsac.nxv2i64.nxv2i64(
<vscale x 2 x i64>,
<vscale x 2 x i64>,
<vscale x 2 x i64>,
i64,
i64);
define <vscale x 2 x i64> @intrinsic_vnmsac_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, i64 %3) nounwind {
@ -754,7 +771,7 @@ entry:
<vscale x 2 x i64> %0,
<vscale x 2 x i64> %1,
<vscale x 2 x i64> %2,
i64 %3)
i64 %3, i64 0)
ret <vscale x 2 x i64> %a
}
@ -787,6 +804,7 @@ declare <vscale x 4 x i64> @llvm.riscv.vnmsac.nxv4i64.nxv4i64(
<vscale x 4 x i64>,
<vscale x 4 x i64>,
<vscale x 4 x i64>,
i64,
i64);
define <vscale x 4 x i64> @intrinsic_vnmsac_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, i64 %3) nounwind {
@ -800,7 +818,7 @@ entry:
<vscale x 4 x i64> %0,
<vscale x 4 x i64> %1,
<vscale x 4 x i64> %2,
i64 %3)
i64 %3, i64 0)
ret <vscale x 4 x i64> %a
}
@ -833,6 +851,7 @@ declare <vscale x 1 x i8> @llvm.riscv.vnmsac.nxv1i8.i8(
<vscale x 1 x i8>,
i8,
<vscale x 1 x i8>,
i64,
i64);
define <vscale x 1 x i8> @intrinsic_vnmsac_vx_nxv1i8_i8_nxv1i8(<vscale x 1 x i8> %0, i8 %1, <vscale x 1 x i8> %2, i64 %3) nounwind {
@ -846,7 +865,7 @@ entry:
<vscale x 1 x i8> %0,
i8 %1,
<vscale x 1 x i8> %2,
i64 %3)
i64 %3, i64 0)
ret <vscale x 1 x i8> %a
}
@ -879,6 +898,7 @@ declare <vscale x 2 x i8> @llvm.riscv.vnmsac.nxv2i8.i8(
<vscale x 2 x i8>,
i8,
<vscale x 2 x i8>,
i64,
i64);
define <vscale x 2 x i8> @intrinsic_vnmsac_vx_nxv2i8_i8_nxv2i8(<vscale x 2 x i8> %0, i8 %1, <vscale x 2 x i8> %2, i64 %3) nounwind {
@ -892,7 +912,7 @@ entry:
<vscale x 2 x i8> %0,
i8 %1,
<vscale x 2 x i8> %2,
i64 %3)
i64 %3, i64 0)
ret <vscale x 2 x i8> %a
}
@ -925,6 +945,7 @@ declare <vscale x 4 x i8> @llvm.riscv.vnmsac.nxv4i8.i8(
<vscale x 4 x i8>,
i8,
<vscale x 4 x i8>,
i64,
i64);
define <vscale x 4 x i8> @intrinsic_vnmsac_vx_nxv4i8_i8_nxv4i8(<vscale x 4 x i8> %0, i8 %1, <vscale x 4 x i8> %2, i64 %3) nounwind {
@ -938,7 +959,7 @@ entry:
<vscale x 4 x i8> %0,
i8 %1,
<vscale x 4 x i8> %2,
i64 %3)
i64 %3, i64 0)
ret <vscale x 4 x i8> %a
}
@ -971,6 +992,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vnmsac.nxv8i8.i8(
<vscale x 8 x i8>,
i8,
<vscale x 8 x i8>,
i64,
i64);
define <vscale x 8 x i8> @intrinsic_vnmsac_vx_nxv8i8_i8_nxv8i8(<vscale x 8 x i8> %0, i8 %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
@ -984,7 +1006,7 @@ entry:
<vscale x 8 x i8> %0,
i8 %1,
<vscale x 8 x i8> %2,
i64 %3)
i64 %3, i64 0)
ret <vscale x 8 x i8> %a
}
@ -1017,6 +1039,7 @@ declare <vscale x 16 x i8> @llvm.riscv.vnmsac.nxv16i8.i8(
<vscale x 16 x i8>,
i8,
<vscale x 16 x i8>,
i64,
i64);
define <vscale x 16 x i8> @intrinsic_vnmsac_vx_nxv16i8_i8_nxv16i8(<vscale x 16 x i8> %0, i8 %1, <vscale x 16 x i8> %2, i64 %3) nounwind {
@ -1030,7 +1053,7 @@ entry:
<vscale x 16 x i8> %0,
i8 %1,
<vscale x 16 x i8> %2,
i64 %3)
i64 %3, i64 0)
ret <vscale x 16 x i8> %a
}
@ -1063,6 +1086,7 @@ declare <vscale x 32 x i8> @llvm.riscv.vnmsac.nxv32i8.i8(
<vscale x 32 x i8>,
i8,
<vscale x 32 x i8>,
i64,
i64);
define <vscale x 32 x i8> @intrinsic_vnmsac_vx_nxv32i8_i8_nxv32i8(<vscale x 32 x i8> %0, i8 %1, <vscale x 32 x i8> %2, i64 %3) nounwind {
@ -1076,7 +1100,7 @@ entry:
<vscale x 32 x i8> %0,
i8 %1,
<vscale x 32 x i8> %2,
i64 %3)
i64 %3, i64 0)
ret <vscale x 32 x i8> %a
}
@ -1109,6 +1133,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vnmsac.nxv1i16.i16(
<vscale x 1 x i16>,
i16,
<vscale x 1 x i16>,
i64,
i64);
define <vscale x 1 x i16> @intrinsic_vnmsac_vx_nxv1i16_i16_nxv1i16(<vscale x 1 x i16> %0, i16 %1, <vscale x 1 x i16> %2, i64 %3) nounwind {
@ -1122,7 +1147,7 @@ entry:
<vscale x 1 x i16> %0,
i16 %1,
<vscale x 1 x i16> %2,
i64 %3)
i64 %3, i64 0)
ret <vscale x 1 x i16> %a
}
@ -1155,6 +1180,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vnmsac.nxv2i16.i16(
<vscale x 2 x i16>,
i16,
<vscale x 2 x i16>,
i64,
i64);
define <vscale x 2 x i16> @intrinsic_vnmsac_vx_nxv2i16_i16_nxv2i16(<vscale x 2 x i16> %0, i16 %1, <vscale x 2 x i16> %2, i64 %3) nounwind {
@ -1168,7 +1194,7 @@ entry:
<vscale x 2 x i16> %0,
i16 %1,
<vscale x 2 x i16> %2,
i64 %3)
i64 %3, i64 0)
ret <vscale x 2 x i16> %a
}
@ -1201,6 +1227,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vnmsac.nxv4i16.i16(
<vscale x 4 x i16>,
i16,
<vscale x 4 x i16>,
i64,
i64);
define <vscale x 4 x i16> @intrinsic_vnmsac_vx_nxv4i16_i16_nxv4i16(<vscale x 4 x i16> %0, i16 %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
@ -1214,7 +1241,7 @@ entry:
<vscale x 4 x i16> %0,
i16 %1,
<vscale x 4 x i16> %2,
i64 %3)
i64 %3, i64 0)
ret <vscale x 4 x i16> %a
}
@ -1247,6 +1274,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vnmsac.nxv8i16.i16(
<vscale x 8 x i16>,
i16,
<vscale x 8 x i16>,
i64,
i64);
define <vscale x 8 x i16> @intrinsic_vnmsac_vx_nxv8i16_i16_nxv8i16(<vscale x 8 x i16> %0, i16 %1, <vscale x 8 x i16> %2, i64 %3) nounwind {
@ -1260,7 +1288,7 @@ entry:
<vscale x 8 x i16> %0,
i16 %1,
<vscale x 8 x i16> %2,
i64 %3)
i64 %3, i64 0)
ret <vscale x 8 x i16> %a
}
@ -1293,6 +1321,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vnmsac.nxv16i16.i16(
<vscale x 16 x i16>,
i16,
<vscale x 16 x i16>,
i64,
i64);
define <vscale x 16 x i16> @intrinsic_vnmsac_vx_nxv16i16_i16_nxv16i16(<vscale x 16 x i16> %0, i16 %1, <vscale x 16 x i16> %2, i64 %3) nounwind {
@ -1306,7 +1335,7 @@ entry:
<vscale x 16 x i16> %0,
i16 %1,
<vscale x 16 x i16> %2,
i64 %3)
i64 %3, i64 0)
ret <vscale x 16 x i16> %a
}
@ -1339,6 +1368,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vnmsac.nxv1i32.i32(
<vscale x 1 x i32>,
i32,
<vscale x 1 x i32>,
i64,
i64);
define <vscale x 1 x i32> @intrinsic_vnmsac_vx_nxv1i32_i32_nxv1i32(<vscale x 1 x i32> %0, i32 %1, <vscale x 1 x i32> %2, i64 %3) nounwind {
@ -1352,7 +1382,7 @@ entry:
<vscale x 1 x i32> %0,
i32 %1,
<vscale x 1 x i32> %2,
i64 %3)
i64 %3, i64 0)
ret <vscale x 1 x i32> %a
}
@ -1385,6 +1415,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vnmsac.nxv2i32.i32(
<vscale x 2 x i32>,
i32,
<vscale x 2 x i32>,
i64,
i64);
define <vscale x 2 x i32> @intrinsic_vnmsac_vx_nxv2i32_i32_nxv2i32(<vscale x 2 x i32> %0, i32 %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
@ -1398,7 +1429,7 @@ entry:
<vscale x 2 x i32> %0,
i32 %1,
<vscale x 2 x i32> %2,
i64 %3)
i64 %3, i64 0)
ret <vscale x 2 x i32> %a
}
@ -1431,6 +1462,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vnmsac.nxv4i32.i32(
<vscale x 4 x i32>,
i32,
<vscale x 4 x i32>,
i64,
i64);
define <vscale x 4 x i32> @intrinsic_vnmsac_vx_nxv4i32_i32_nxv4i32(<vscale x 4 x i32> %0, i32 %1, <vscale x 4 x i32> %2, i64 %3) nounwind {
@ -1444,7 +1476,7 @@ entry:
<vscale x 4 x i32> %0,
i32 %1,
<vscale x 4 x i32> %2,
i64 %3)
i64 %3, i64 0)
ret <vscale x 4 x i32> %a
}
@ -1477,6 +1509,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vnmsac.nxv8i32.i32(
<vscale x 8 x i32>,
i32,
<vscale x 8 x i32>,
i64,
i64);
define <vscale x 8 x i32> @intrinsic_vnmsac_vx_nxv8i32_i32_nxv8i32(<vscale x 8 x i32> %0, i32 %1, <vscale x 8 x i32> %2, i64 %3) nounwind {
@ -1490,7 +1523,7 @@ entry:
<vscale x 8 x i32> %0,
i32 %1,
<vscale x 8 x i32> %2,
i64 %3)
i64 %3, i64 0)
ret <vscale x 8 x i32> %a
}
@ -1523,6 +1556,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vnmsac.nxv1i64.i64(
<vscale x 1 x i64>,
i64,
<vscale x 1 x i64>,
i64,
i64);
define <vscale x 1 x i64> @intrinsic_vnmsac_vx_nxv1i64_i64_nxv1i64(<vscale x 1 x i64> %0, i64 %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
@ -1536,7 +1570,7 @@ entry:
<vscale x 1 x i64> %0,
i64 %1,
<vscale x 1 x i64> %2,
i64 %3)
i64 %3, i64 0)
ret <vscale x 1 x i64> %a
}
@ -1569,6 +1603,7 @@ declare <vscale x 2 x i64> @llvm.riscv.vnmsac.nxv2i64.i64(
<vscale x 2 x i64>,
i64,
<vscale x 2 x i64>,
i64,
i64);
define <vscale x 2 x i64> @intrinsic_vnmsac_vx_nxv2i64_i64_nxv2i64(<vscale x 2 x i64> %0, i64 %1, <vscale x 2 x i64> %2, i64 %3) nounwind {
@ -1582,7 +1617,7 @@ entry:
<vscale x 2 x i64> %0,
i64 %1,
<vscale x 2 x i64> %2,
i64 %3)
i64 %3, i64 0)
ret <vscale x 2 x i64> %a
}
@ -1615,6 +1650,7 @@ declare <vscale x 4 x i64> @llvm.riscv.vnmsac.nxv4i64.i64(
<vscale x 4 x i64>,
i64,
<vscale x 4 x i64>,
i64,
i64);
define <vscale x 4 x i64> @intrinsic_vnmsac_vx_nxv4i64_i64_nxv4i64(<vscale x 4 x i64> %0, i64 %1, <vscale x 4 x i64> %2, i64 %3) nounwind {
@ -1628,7 +1664,7 @@ entry:
<vscale x 4 x i64> %0,
i64 %1,
<vscale x 4 x i64> %2,
i64 %3)
i64 %3, i64 0)
ret <vscale x 4 x i64> %a
}

View File

@ -5,6 +5,7 @@ declare <vscale x 1 x i8> @llvm.riscv.vnmsub.nxv1i8.nxv1i8(
<vscale x 1 x i8>,
<vscale x 1 x i8>,
<vscale x 1 x i8>,
i32,
i32);
define <vscale x 1 x i8> @intrinsic_vnmsub_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, i32 %3) nounwind {
@ -18,7 +19,7 @@ entry:
<vscale x 1 x i8> %0,
<vscale x 1 x i8> %1,
<vscale x 1 x i8> %2,
i32 %3)
i32 %3, i32 0)
ret <vscale x 1 x i8> %a
}
@ -51,6 +52,7 @@ declare <vscale x 2 x i8> @llvm.riscv.vnmsub.nxv2i8.nxv2i8(
<vscale x 2 x i8>,
<vscale x 2 x i8>,
<vscale x 2 x i8>,
i32,
i32);
define <vscale x 2 x i8> @intrinsic_vnmsub_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, i32 %3) nounwind {
@ -64,7 +66,7 @@ entry:
<vscale x 2 x i8> %0,
<vscale x 2 x i8> %1,
<vscale x 2 x i8> %2,
i32 %3)
i32 %3, i32 0)
ret <vscale x 2 x i8> %a
}
@ -97,6 +99,7 @@ declare <vscale x 4 x i8> @llvm.riscv.vnmsub.nxv4i8.nxv4i8(
<vscale x 4 x i8>,
<vscale x 4 x i8>,
<vscale x 4 x i8>,
i32,
i32);
define <vscale x 4 x i8> @intrinsic_vnmsub_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, i32 %3) nounwind {
@ -110,7 +113,7 @@ entry:
<vscale x 4 x i8> %0,
<vscale x 4 x i8> %1,
<vscale x 4 x i8> %2,
i32 %3)
i32 %3, i32 0)
ret <vscale x 4 x i8> %a
}
@ -143,6 +146,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vnmsub.nxv8i8.nxv8i8(
<vscale x 8 x i8>,
<vscale x 8 x i8>,
<vscale x 8 x i8>,
i32,
i32);
define <vscale x 8 x i8> @intrinsic_vnmsub_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
@ -156,7 +160,7 @@ entry:
<vscale x 8 x i8> %0,
<vscale x 8 x i8> %1,
<vscale x 8 x i8> %2,
i32 %3)
i32 %3, i32 0)
ret <vscale x 8 x i8> %a
}
@ -189,6 +193,7 @@ declare <vscale x 16 x i8> @llvm.riscv.vnmsub.nxv16i8.nxv16i8(
<vscale x 16 x i8>,
<vscale x 16 x i8>,
<vscale x 16 x i8>,
i32,
i32);
define <vscale x 16 x i8> @intrinsic_vnmsub_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, i32 %3) nounwind {
@ -202,7 +207,7 @@ entry:
<vscale x 16 x i8> %0,
<vscale x 16 x i8> %1,
<vscale x 16 x i8> %2,
i32 %3)
i32 %3, i32 0)
ret <vscale x 16 x i8> %a
}
@ -235,6 +240,7 @@ declare <vscale x 32 x i8> @llvm.riscv.vnmsub.nxv32i8.nxv32i8(
<vscale x 32 x i8>,
<vscale x 32 x i8>,
<vscale x 32 x i8>,
i32,
i32);
define <vscale x 32 x i8> @intrinsic_vnmsub_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, i32 %3) nounwind {
@ -248,7 +254,7 @@ entry:
<vscale x 32 x i8> %0,
<vscale x 32 x i8> %1,
<vscale x 32 x i8> %2,
i32 %3)
i32 %3, i32 0)
ret <vscale x 32 x i8> %a
}
@ -281,6 +287,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vnmsub.nxv1i16.nxv1i16(
<vscale x 1 x i16>,
<vscale x 1 x i16>,
<vscale x 1 x i16>,
i32,
i32);
define <vscale x 1 x i16> @intrinsic_vnmsub_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, i32 %3) nounwind {
@ -294,7 +301,7 @@ entry:
<vscale x 1 x i16> %0,
<vscale x 1 x i16> %1,
<vscale x 1 x i16> %2,
i32 %3)
i32 %3, i32 0)
ret <vscale x 1 x i16> %a
}
@ -327,6 +334,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vnmsub.nxv2i16.nxv2i16(
<vscale x 2 x i16>,
<vscale x 2 x i16>,
<vscale x 2 x i16>,
i32,
i32);
define <vscale x 2 x i16> @intrinsic_vnmsub_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, i32 %3) nounwind {
@ -340,7 +348,7 @@ entry:
<vscale x 2 x i16> %0,
<vscale x 2 x i16> %1,
<vscale x 2 x i16> %2,
i32 %3)
i32 %3, i32 0)
ret <vscale x 2 x i16> %a
}
@ -373,6 +381,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vnmsub.nxv4i16.nxv4i16(
<vscale x 4 x i16>,
<vscale x 4 x i16>,
<vscale x 4 x i16>,
i32,
i32);
define <vscale x 4 x i16> @intrinsic_vnmsub_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
@ -386,7 +395,7 @@ entry:
<vscale x 4 x i16> %0,
<vscale x 4 x i16> %1,
<vscale x 4 x i16> %2,
i32 %3)
i32 %3, i32 0)
ret <vscale x 4 x i16> %a
}
@ -419,6 +428,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vnmsub.nxv8i16.nxv8i16(
<vscale x 8 x i16>,
<vscale x 8 x i16>,
<vscale x 8 x i16>,
i32,
i32);
define <vscale x 8 x i16> @intrinsic_vnmsub_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, i32 %3) nounwind {
@ -432,7 +442,7 @@ entry:
<vscale x 8 x i16> %0,
<vscale x 8 x i16> %1,
<vscale x 8 x i16> %2,
i32 %3)
i32 %3, i32 0)
ret <vscale x 8 x i16> %a
}
@ -465,6 +475,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vnmsub.nxv16i16.nxv16i16(
<vscale x 16 x i16>,
<vscale x 16 x i16>,
<vscale x 16 x i16>,
i32,
i32);
define <vscale x 16 x i16> @intrinsic_vnmsub_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, i32 %3) nounwind {
@ -478,7 +489,7 @@ entry:
<vscale x 16 x i16> %0,
<vscale x 16 x i16> %1,
<vscale x 16 x i16> %2,
i32 %3)
i32 %3, i32 0)
ret <vscale x 16 x i16> %a
}
@ -511,6 +522,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vnmsub.nxv1i32.nxv1i32(
<vscale x 1 x i32>,
<vscale x 1 x i32>,
<vscale x 1 x i32>,
i32,
i32);
define <vscale x 1 x i32> @intrinsic_vnmsub_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, i32 %3) nounwind {
@ -524,7 +536,7 @@ entry:
<vscale x 1 x i32> %0,
<vscale x 1 x i32> %1,
<vscale x 1 x i32> %2,
i32 %3)
i32 %3, i32 0)
ret <vscale x 1 x i32> %a
}
@ -557,6 +569,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vnmsub.nxv2i32.nxv2i32(
<vscale x 2 x i32>,
<vscale x 2 x i32>,
<vscale x 2 x i32>,
i32,
i32);
define <vscale x 2 x i32> @intrinsic_vnmsub_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
@ -570,7 +583,7 @@ entry:
<vscale x 2 x i32> %0,
<vscale x 2 x i32> %1,
<vscale x 2 x i32> %2,
i32 %3)
i32 %3, i32 0)
ret <vscale x 2 x i32> %a
}
@ -603,6 +616,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vnmsub.nxv4i32.nxv4i32(
<vscale x 4 x i32>,
<vscale x 4 x i32>,
<vscale x 4 x i32>,
i32,
i32);
define <vscale x 4 x i32> @intrinsic_vnmsub_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, i32 %3) nounwind {
@ -616,7 +630,7 @@ entry:
<vscale x 4 x i32> %0,
<vscale x 4 x i32> %1,
<vscale x 4 x i32> %2,
i32 %3)
i32 %3, i32 0)
ret <vscale x 4 x i32> %a
}
@ -649,6 +663,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vnmsub.nxv8i32.nxv8i32(
<vscale x 8 x i32>,
<vscale x 8 x i32>,
<vscale x 8 x i32>,
i32,
i32);
define <vscale x 8 x i32> @intrinsic_vnmsub_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, i32 %3) nounwind {
@ -662,7 +677,7 @@ entry:
<vscale x 8 x i32> %0,
<vscale x 8 x i32> %1,
<vscale x 8 x i32> %2,
i32 %3)
i32 %3, i32 0)
ret <vscale x 8 x i32> %a
}
@ -695,6 +710,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vnmsub.nxv1i64.nxv1i64(
<vscale x 1 x i64>,
<vscale x 1 x i64>,
<vscale x 1 x i64>,
i32,
i32);
define <vscale x 1 x i64> @intrinsic_vnmsub_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
@ -708,7 +724,7 @@ entry:
<vscale x 1 x i64> %0,
<vscale x 1 x i64> %1,
<vscale x 1 x i64> %2,
i32 %3)
i32 %3, i32 0)
ret <vscale x 1 x i64> %a
}
@ -741,6 +757,7 @@ declare <vscale x 2 x i64> @llvm.riscv.vnmsub.nxv2i64.nxv2i64(
<vscale x 2 x i64>,
<vscale x 2 x i64>,
<vscale x 2 x i64>,
i32,
i32);
define <vscale x 2 x i64> @intrinsic_vnmsub_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, i32 %3) nounwind {
@ -754,7 +771,7 @@ entry:
<vscale x 2 x i64> %0,
<vscale x 2 x i64> %1,
<vscale x 2 x i64> %2,
i32 %3)
i32 %3, i32 0)
ret <vscale x 2 x i64> %a
}
@ -787,6 +804,7 @@ declare <vscale x 4 x i64> @llvm.riscv.vnmsub.nxv4i64.nxv4i64(
<vscale x 4 x i64>,
<vscale x 4 x i64>,
<vscale x 4 x i64>,
i32,
i32);
define <vscale x 4 x i64> @intrinsic_vnmsub_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, i32 %3) nounwind {
@ -800,7 +818,7 @@ entry:
<vscale x 4 x i64> %0,
<vscale x 4 x i64> %1,
<vscale x 4 x i64> %2,
i32 %3)
i32 %3, i32 0)
ret <vscale x 4 x i64> %a
}
@ -833,6 +851,7 @@ declare <vscale x 1 x i8> @llvm.riscv.vnmsub.nxv1i8.i8(
<vscale x 1 x i8>,
i8,
<vscale x 1 x i8>,
i32,
i32);
define <vscale x 1 x i8> @intrinsic_vnmsub_vx_nxv1i8_i8_nxv1i8(<vscale x 1 x i8> %0, i8 %1, <vscale x 1 x i8> %2, i32 %3) nounwind {
@ -846,7 +865,7 @@ entry:
<vscale x 1 x i8> %0,
i8 %1,
<vscale x 1 x i8> %2,
i32 %3)
i32 %3, i32 0)
ret <vscale x 1 x i8> %a
}
@ -879,6 +898,7 @@ declare <vscale x 2 x i8> @llvm.riscv.vnmsub.nxv2i8.i8(
<vscale x 2 x i8>,
i8,
<vscale x 2 x i8>,
i32,
i32);
define <vscale x 2 x i8> @intrinsic_vnmsub_vx_nxv2i8_i8_nxv2i8(<vscale x 2 x i8> %0, i8 %1, <vscale x 2 x i8> %2, i32 %3) nounwind {
@ -892,7 +912,7 @@ entry:
<vscale x 2 x i8> %0,
i8 %1,
<vscale x 2 x i8> %2,
i32 %3)
i32 %3, i32 0)
ret <vscale x 2 x i8> %a
}
@ -925,6 +945,7 @@ declare <vscale x 4 x i8> @llvm.riscv.vnmsub.nxv4i8.i8(
<vscale x 4 x i8>,
i8,
<vscale x 4 x i8>,
i32,
i32);
define <vscale x 4 x i8> @intrinsic_vnmsub_vx_nxv4i8_i8_nxv4i8(<vscale x 4 x i8> %0, i8 %1, <vscale x 4 x i8> %2, i32 %3) nounwind {
@ -938,7 +959,7 @@ entry:
<vscale x 4 x i8> %0,
i8 %1,
<vscale x 4 x i8> %2,
i32 %3)
i32 %3, i32 0)
ret <vscale x 4 x i8> %a
}
@ -971,6 +992,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vnmsub.nxv8i8.i8(
<vscale x 8 x i8>,
i8,
<vscale x 8 x i8>,
i32,
i32);
define <vscale x 8 x i8> @intrinsic_vnmsub_vx_nxv8i8_i8_nxv8i8(<vscale x 8 x i8> %0, i8 %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
@ -984,7 +1006,7 @@ entry:
<vscale x 8 x i8> %0,
i8 %1,
<vscale x 8 x i8> %2,
i32 %3)
i32 %3, i32 0)
ret <vscale x 8 x i8> %a
}
@ -1017,6 +1039,7 @@ declare <vscale x 16 x i8> @llvm.riscv.vnmsub.nxv16i8.i8(
<vscale x 16 x i8>,
i8,
<vscale x 16 x i8>,
i32,
i32);
define <vscale x 16 x i8> @intrinsic_vnmsub_vx_nxv16i8_i8_nxv16i8(<vscale x 16 x i8> %0, i8 %1, <vscale x 16 x i8> %2, i32 %3) nounwind {
@ -1030,7 +1053,7 @@ entry:
<vscale x 16 x i8> %0,
i8 %1,
<vscale x 16 x i8> %2,
i32 %3)
i32 %3, i32 0)
ret <vscale x 16 x i8> %a
}
@ -1063,6 +1086,7 @@ declare <vscale x 32 x i8> @llvm.riscv.vnmsub.nxv32i8.i8(
<vscale x 32 x i8>,
i8,
<vscale x 32 x i8>,
i32,
i32);
define <vscale x 32 x i8> @intrinsic_vnmsub_vx_nxv32i8_i8_nxv32i8(<vscale x 32 x i8> %0, i8 %1, <vscale x 32 x i8> %2, i32 %3) nounwind {
@ -1076,7 +1100,7 @@ entry:
<vscale x 32 x i8> %0,
i8 %1,
<vscale x 32 x i8> %2,
i32 %3)
i32 %3, i32 0)
ret <vscale x 32 x i8> %a
}
@ -1109,6 +1133,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vnmsub.nxv1i16.i16(
<vscale x 1 x i16>,
i16,
<vscale x 1 x i16>,
i32,
i32);
define <vscale x 1 x i16> @intrinsic_vnmsub_vx_nxv1i16_i16_nxv1i16(<vscale x 1 x i16> %0, i16 %1, <vscale x 1 x i16> %2, i32 %3) nounwind {
@ -1122,7 +1147,7 @@ entry:
<vscale x 1 x i16> %0,
i16 %1,
<vscale x 1 x i16> %2,
i32 %3)
i32 %3, i32 0)
ret <vscale x 1 x i16> %a
}
@ -1155,6 +1180,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vnmsub.nxv2i16.i16(
<vscale x 2 x i16>,
i16,
<vscale x 2 x i16>,
i32,
i32);
define <vscale x 2 x i16> @intrinsic_vnmsub_vx_nxv2i16_i16_nxv2i16(<vscale x 2 x i16> %0, i16 %1, <vscale x 2 x i16> %2, i32 %3) nounwind {
@ -1168,7 +1194,7 @@ entry:
<vscale x 2 x i16> %0,
i16 %1,
<vscale x 2 x i16> %2,
i32 %3)
i32 %3, i32 0)
ret <vscale x 2 x i16> %a
}
@ -1201,6 +1227,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vnmsub.nxv4i16.i16(
<vscale x 4 x i16>,
i16,
<vscale x 4 x i16>,
i32,
i32);
define <vscale x 4 x i16> @intrinsic_vnmsub_vx_nxv4i16_i16_nxv4i16(<vscale x 4 x i16> %0, i16 %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
@ -1214,7 +1241,7 @@ entry:
<vscale x 4 x i16> %0,
i16 %1,
<vscale x 4 x i16> %2,
i32 %3)
i32 %3, i32 0)
ret <vscale x 4 x i16> %a
}
@ -1247,6 +1274,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vnmsub.nxv8i16.i16(
<vscale x 8 x i16>,
i16,
<vscale x 8 x i16>,
i32,
i32);
define <vscale x 8 x i16> @intrinsic_vnmsub_vx_nxv8i16_i16_nxv8i16(<vscale x 8 x i16> %0, i16 %1, <vscale x 8 x i16> %2, i32 %3) nounwind {
@ -1260,7 +1288,7 @@ entry:
<vscale x 8 x i16> %0,
i16 %1,
<vscale x 8 x i16> %2,
i32 %3)
i32 %3, i32 0)
ret <vscale x 8 x i16> %a
}
@ -1293,6 +1321,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vnmsub.nxv16i16.i16(
<vscale x 16 x i16>,
i16,
<vscale x 16 x i16>,
i32,
i32);
define <vscale x 16 x i16> @intrinsic_vnmsub_vx_nxv16i16_i16_nxv16i16(<vscale x 16 x i16> %0, i16 %1, <vscale x 16 x i16> %2, i32 %3) nounwind {
@ -1306,7 +1335,7 @@ entry:
<vscale x 16 x i16> %0,
i16 %1,
<vscale x 16 x i16> %2,
i32 %3)
i32 %3, i32 0)
ret <vscale x 16 x i16> %a
}
@ -1339,6 +1368,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vnmsub.nxv1i32.i32(
<vscale x 1 x i32>,
i32,
<vscale x 1 x i32>,
i32,
i32);
define <vscale x 1 x i32> @intrinsic_vnmsub_vx_nxv1i32_i32_nxv1i32(<vscale x 1 x i32> %0, i32 %1, <vscale x 1 x i32> %2, i32 %3) nounwind {
@ -1352,7 +1382,7 @@ entry:
<vscale x 1 x i32> %0,
i32 %1,
<vscale x 1 x i32> %2,
i32 %3)
i32 %3, i32 0)
ret <vscale x 1 x i32> %a
}
@ -1385,6 +1415,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vnmsub.nxv2i32.i32(
<vscale x 2 x i32>,
i32,
<vscale x 2 x i32>,
i32,
i32);
define <vscale x 2 x i32> @intrinsic_vnmsub_vx_nxv2i32_i32_nxv2i32(<vscale x 2 x i32> %0, i32 %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
@ -1398,7 +1429,7 @@ entry:
<vscale x 2 x i32> %0,
i32 %1,
<vscale x 2 x i32> %2,
i32 %3)
i32 %3, i32 0)
ret <vscale x 2 x i32> %a
}
@ -1431,6 +1462,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vnmsub.nxv4i32.i32(
<vscale x 4 x i32>,
i32,
<vscale x 4 x i32>,
i32,
i32);
define <vscale x 4 x i32> @intrinsic_vnmsub_vx_nxv4i32_i32_nxv4i32(<vscale x 4 x i32> %0, i32 %1, <vscale x 4 x i32> %2, i32 %3) nounwind {
@ -1444,7 +1476,7 @@ entry:
<vscale x 4 x i32> %0,
i32 %1,
<vscale x 4 x i32> %2,
i32 %3)
i32 %3, i32 0)
ret <vscale x 4 x i32> %a
}
@ -1477,6 +1509,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vnmsub.nxv8i32.i32(
<vscale x 8 x i32>,
i32,
<vscale x 8 x i32>,
i32,
i32);
define <vscale x 8 x i32> @intrinsic_vnmsub_vx_nxv8i32_i32_nxv8i32(<vscale x 8 x i32> %0, i32 %1, <vscale x 8 x i32> %2, i32 %3) nounwind {
@ -1490,7 +1523,7 @@ entry:
<vscale x 8 x i32> %0,
i32 %1,
<vscale x 8 x i32> %2,
i32 %3)
i32 %3, i32 0)
ret <vscale x 8 x i32> %a
}
@ -1523,6 +1556,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vnmsub.nxv1i64.i64(
<vscale x 1 x i64>,
i64,
<vscale x 1 x i64>,
i32,
i32);
define <vscale x 1 x i64> @intrinsic_vnmsub_vx_nxv1i64_i64_nxv1i64(<vscale x 1 x i64> %0, i64 %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
@ -1543,7 +1577,7 @@ entry:
<vscale x 1 x i64> %0,
i64 %1,
<vscale x 1 x i64> %2,
i32 %3)
i32 %3, i32 0)
ret <vscale x 1 x i64> %a
}
@ -1583,6 +1617,7 @@ declare <vscale x 2 x i64> @llvm.riscv.vnmsub.nxv2i64.i64(
<vscale x 2 x i64>,
i64,
<vscale x 2 x i64>,
i32,
i32);
define <vscale x 2 x i64> @intrinsic_vnmsub_vx_nxv2i64_i64_nxv2i64(<vscale x 2 x i64> %0, i64 %1, <vscale x 2 x i64> %2, i32 %3) nounwind {
@ -1603,7 +1638,7 @@ entry:
<vscale x 2 x i64> %0,
i64 %1,
<vscale x 2 x i64> %2,
i32 %3)
i32 %3, i32 0)
ret <vscale x 2 x i64> %a
}
@ -1643,6 +1678,7 @@ declare <vscale x 4 x i64> @llvm.riscv.vnmsub.nxv4i64.i64(
<vscale x 4 x i64>,
i64,
<vscale x 4 x i64>,
i32,
i32);
define <vscale x 4 x i64> @intrinsic_vnmsub_vx_nxv4i64_i64_nxv4i64(<vscale x 4 x i64> %0, i64 %1, <vscale x 4 x i64> %2, i32 %3) nounwind {
@ -1663,7 +1699,7 @@ entry:
<vscale x 4 x i64> %0,
i64 %1,
<vscale x 4 x i64> %2,
i32 %3)
i32 %3, i32 0)
ret <vscale x 4 x i64> %a
}

View File

@ -5,6 +5,7 @@ declare <vscale x 1 x i8> @llvm.riscv.vnmsub.nxv1i8.nxv1i8(
<vscale x 1 x i8>,
<vscale x 1 x i8>,
<vscale x 1 x i8>,
i64,
i64);
define <vscale x 1 x i8> @intrinsic_vnmsub_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, i64 %3) nounwind {
@ -18,7 +19,7 @@ entry:
<vscale x 1 x i8> %0,
<vscale x 1 x i8> %1,
<vscale x 1 x i8> %2,
i64 %3)
i64 %3, i64 0)
ret <vscale x 1 x i8> %a
}
@ -51,6 +52,7 @@ declare <vscale x 2 x i8> @llvm.riscv.vnmsub.nxv2i8.nxv2i8(
<vscale x 2 x i8>,
<vscale x 2 x i8>,
<vscale x 2 x i8>,
i64,
i64);
define <vscale x 2 x i8> @intrinsic_vnmsub_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, i64 %3) nounwind {
@ -64,7 +66,7 @@ entry:
<vscale x 2 x i8> %0,
<vscale x 2 x i8> %1,
<vscale x 2 x i8> %2,
i64 %3)
i64 %3, i64 0)
ret <vscale x 2 x i8> %a
}
@ -97,6 +99,7 @@ declare <vscale x 4 x i8> @llvm.riscv.vnmsub.nxv4i8.nxv4i8(
<vscale x 4 x i8>,
<vscale x 4 x i8>,
<vscale x 4 x i8>,
i64,
i64);
define <vscale x 4 x i8> @intrinsic_vnmsub_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, i64 %3) nounwind {
@ -110,7 +113,7 @@ entry:
<vscale x 4 x i8> %0,
<vscale x 4 x i8> %1,
<vscale x 4 x i8> %2,
i64 %3)
i64 %3, i64 0)
ret <vscale x 4 x i8> %a
}
@ -143,6 +146,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vnmsub.nxv8i8.nxv8i8(
<vscale x 8 x i8>,
<vscale x 8 x i8>,
<vscale x 8 x i8>,
i64,
i64);
define <vscale x 8 x i8> @intrinsic_vnmsub_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
@ -156,7 +160,7 @@ entry:
<vscale x 8 x i8> %0,
<vscale x 8 x i8> %1,
<vscale x 8 x i8> %2,
i64 %3)
i64 %3, i64 0)
ret <vscale x 8 x i8> %a
}
@ -189,6 +193,7 @@ declare <vscale x 16 x i8> @llvm.riscv.vnmsub.nxv16i8.nxv16i8(
<vscale x 16 x i8>,
<vscale x 16 x i8>,
<vscale x 16 x i8>,
i64,
i64);
define <vscale x 16 x i8> @intrinsic_vnmsub_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, i64 %3) nounwind {
@ -202,7 +207,7 @@ entry:
<vscale x 16 x i8> %0,
<vscale x 16 x i8> %1,
<vscale x 16 x i8> %2,
i64 %3)
i64 %3, i64 0)
ret <vscale x 16 x i8> %a
}
@ -235,6 +240,7 @@ declare <vscale x 32 x i8> @llvm.riscv.vnmsub.nxv32i8.nxv32i8(
<vscale x 32 x i8>,
<vscale x 32 x i8>,
<vscale x 32 x i8>,
i64,
i64);
define <vscale x 32 x i8> @intrinsic_vnmsub_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, i64 %3) nounwind {
@ -248,7 +254,7 @@ entry:
<vscale x 32 x i8> %0,
<vscale x 32 x i8> %1,
<vscale x 32 x i8> %2,
i64 %3)
i64 %3, i64 0)
ret <vscale x 32 x i8> %a
}
@ -281,6 +287,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vnmsub.nxv1i16.nxv1i16(
<vscale x 1 x i16>,
<vscale x 1 x i16>,
<vscale x 1 x i16>,
i64,
i64);
define <vscale x 1 x i16> @intrinsic_vnmsub_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, i64 %3) nounwind {
@ -294,7 +301,7 @@ entry:
<vscale x 1 x i16> %0,
<vscale x 1 x i16> %1,
<vscale x 1 x i16> %2,
i64 %3)
i64 %3, i64 0)
ret <vscale x 1 x i16> %a
}
@ -327,6 +334,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vnmsub.nxv2i16.nxv2i16(
<vscale x 2 x i16>,
<vscale x 2 x i16>,
<vscale x 2 x i16>,
i64,
i64);
define <vscale x 2 x i16> @intrinsic_vnmsub_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, i64 %3) nounwind {
@ -340,7 +348,7 @@ entry:
<vscale x 2 x i16> %0,
<vscale x 2 x i16> %1,
<vscale x 2 x i16> %2,
i64 %3)
i64 %3, i64 0)
ret <vscale x 2 x i16> %a
}
@ -373,6 +381,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vnmsub.nxv4i16.nxv4i16(
<vscale x 4 x i16>,
<vscale x 4 x i16>,
<vscale x 4 x i16>,
i64,
i64);
define <vscale x 4 x i16> @intrinsic_vnmsub_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
@ -386,7 +395,7 @@ entry:
<vscale x 4 x i16> %0,
<vscale x 4 x i16> %1,
<vscale x 4 x i16> %2,
i64 %3)
i64 %3, i64 0)
ret <vscale x 4 x i16> %a
}
@ -419,6 +428,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vnmsub.nxv8i16.nxv8i16(
<vscale x 8 x i16>,
<vscale x 8 x i16>,
<vscale x 8 x i16>,
i64,
i64);
define <vscale x 8 x i16> @intrinsic_vnmsub_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, i64 %3) nounwind {
@ -432,7 +442,7 @@ entry:
<vscale x 8 x i16> %0,
<vscale x 8 x i16> %1,
<vscale x 8 x i16> %2,
i64 %3)
i64 %3, i64 0)
ret <vscale x 8 x i16> %a
}
@ -465,6 +475,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vnmsub.nxv16i16.nxv16i16(
<vscale x 16 x i16>,
<vscale x 16 x i16>,
<vscale x 16 x i16>,
i64,
i64);
define <vscale x 16 x i16> @intrinsic_vnmsub_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, i64 %3) nounwind {
@ -478,7 +489,7 @@ entry:
<vscale x 16 x i16> %0,
<vscale x 16 x i16> %1,
<vscale x 16 x i16> %2,
i64 %3)
i64 %3, i64 0)
ret <vscale x 16 x i16> %a
}
@ -511,6 +522,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vnmsub.nxv1i32.nxv1i32(
<vscale x 1 x i32>,
<vscale x 1 x i32>,
<vscale x 1 x i32>,
i64,
i64);
define <vscale x 1 x i32> @intrinsic_vnmsub_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, i64 %3) nounwind {
@ -524,7 +536,7 @@ entry:
<vscale x 1 x i32> %0,
<vscale x 1 x i32> %1,
<vscale x 1 x i32> %2,
i64 %3)
i64 %3, i64 0)
ret <vscale x 1 x i32> %a
}
@ -557,6 +569,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vnmsub.nxv2i32.nxv2i32(
<vscale x 2 x i32>,
<vscale x 2 x i32>,
<vscale x 2 x i32>,
i64,
i64);
define <vscale x 2 x i32> @intrinsic_vnmsub_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
@ -570,7 +583,7 @@ entry:
<vscale x 2 x i32> %0,
<vscale x 2 x i32> %1,
<vscale x 2 x i32> %2,
i64 %3)
i64 %3, i64 0)
ret <vscale x 2 x i32> %a
}
@ -603,6 +616,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vnmsub.nxv4i32.nxv4i32(
<vscale x 4 x i32>,
<vscale x 4 x i32>,
<vscale x 4 x i32>,
i64,
i64);
define <vscale x 4 x i32> @intrinsic_vnmsub_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, i64 %3) nounwind {
@ -616,7 +630,7 @@ entry:
<vscale x 4 x i32> %0,
<vscale x 4 x i32> %1,
<vscale x 4 x i32> %2,
i64 %3)
i64 %3, i64 0)
ret <vscale x 4 x i32> %a
}
@ -649,6 +663,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vnmsub.nxv8i32.nxv8i32(
<vscale x 8 x i32>,
<vscale x 8 x i32>,
<vscale x 8 x i32>,
i64,
i64);
define <vscale x 8 x i32> @intrinsic_vnmsub_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, i64 %3) nounwind {
@ -662,7 +677,7 @@ entry:
<vscale x 8 x i32> %0,
<vscale x 8 x i32> %1,
<vscale x 8 x i32> %2,
i64 %3)
i64 %3, i64 0)
ret <vscale x 8 x i32> %a
}
@ -695,6 +710,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vnmsub.nxv1i64.nxv1i64(
<vscale x 1 x i64>,
<vscale x 1 x i64>,
<vscale x 1 x i64>,
i64,
i64);
define <vscale x 1 x i64> @intrinsic_vnmsub_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
@ -708,7 +724,7 @@ entry:
<vscale x 1 x i64> %0,
<vscale x 1 x i64> %1,
<vscale x 1 x i64> %2,
i64 %3)
i64 %3, i64 0)
ret <vscale x 1 x i64> %a
}
@ -741,6 +757,7 @@ declare <vscale x 2 x i64> @llvm.riscv.vnmsub.nxv2i64.nxv2i64(
<vscale x 2 x i64>,
<vscale x 2 x i64>,
<vscale x 2 x i64>,
i64,
i64);
define <vscale x 2 x i64> @intrinsic_vnmsub_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, i64 %3) nounwind {
@ -754,7 +771,7 @@ entry:
<vscale x 2 x i64> %0,
<vscale x 2 x i64> %1,
<vscale x 2 x i64> %2,
i64 %3)
i64 %3, i64 0)
ret <vscale x 2 x i64> %a
}
@ -787,6 +804,7 @@ declare <vscale x 4 x i64> @llvm.riscv.vnmsub.nxv4i64.nxv4i64(
<vscale x 4 x i64>,
<vscale x 4 x i64>,
<vscale x 4 x i64>,
i64,
i64);
define <vscale x 4 x i64> @intrinsic_vnmsub_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, i64 %3) nounwind {
@ -800,7 +818,7 @@ entry:
<vscale x 4 x i64> %0,
<vscale x 4 x i64> %1,
<vscale x 4 x i64> %2,
i64 %3)
i64 %3, i64 0)
ret <vscale x 4 x i64> %a
}
@ -833,6 +851,7 @@ declare <vscale x 1 x i8> @llvm.riscv.vnmsub.nxv1i8.i8(
<vscale x 1 x i8>,
i8,
<vscale x 1 x i8>,
i64,
i64);
define <vscale x 1 x i8> @intrinsic_vnmsub_vx_nxv1i8_i8_nxv1i8(<vscale x 1 x i8> %0, i8 %1, <vscale x 1 x i8> %2, i64 %3) nounwind {
@ -846,7 +865,7 @@ entry:
<vscale x 1 x i8> %0,
i8 %1,
<vscale x 1 x i8> %2,
i64 %3)
i64 %3, i64 0)
ret <vscale x 1 x i8> %a
}
@ -879,6 +898,7 @@ declare <vscale x 2 x i8> @llvm.riscv.vnmsub.nxv2i8.i8(
<vscale x 2 x i8>,
i8,
<vscale x 2 x i8>,
i64,
i64);
define <vscale x 2 x i8> @intrinsic_vnmsub_vx_nxv2i8_i8_nxv2i8(<vscale x 2 x i8> %0, i8 %1, <vscale x 2 x i8> %2, i64 %3) nounwind {
@ -892,7 +912,7 @@ entry:
<vscale x 2 x i8> %0,
i8 %1,
<vscale x 2 x i8> %2,
i64 %3)
i64 %3, i64 0)
ret <vscale x 2 x i8> %a
}
@ -925,6 +945,7 @@ declare <vscale x 4 x i8> @llvm.riscv.vnmsub.nxv4i8.i8(
<vscale x 4 x i8>,
i8,
<vscale x 4 x i8>,
i64,
i64);
define <vscale x 4 x i8> @intrinsic_vnmsub_vx_nxv4i8_i8_nxv4i8(<vscale x 4 x i8> %0, i8 %1, <vscale x 4 x i8> %2, i64 %3) nounwind {
@ -938,7 +959,7 @@ entry:
<vscale x 4 x i8> %0,
i8 %1,
<vscale x 4 x i8> %2,
i64 %3)
i64 %3, i64 0)
ret <vscale x 4 x i8> %a
}
@ -971,6 +992,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vnmsub.nxv8i8.i8(
<vscale x 8 x i8>,
i8,
<vscale x 8 x i8>,
i64,
i64);
define <vscale x 8 x i8> @intrinsic_vnmsub_vx_nxv8i8_i8_nxv8i8(<vscale x 8 x i8> %0, i8 %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
@ -984,7 +1006,7 @@ entry:
<vscale x 8 x i8> %0,
i8 %1,
<vscale x 8 x i8> %2,
i64 %3)
i64 %3, i64 0)
ret <vscale x 8 x i8> %a
}
@ -1017,6 +1039,7 @@ declare <vscale x 16 x i8> @llvm.riscv.vnmsub.nxv16i8.i8(
<vscale x 16 x i8>,
i8,
<vscale x 16 x i8>,
i64,
i64);
define <vscale x 16 x i8> @intrinsic_vnmsub_vx_nxv16i8_i8_nxv16i8(<vscale x 16 x i8> %0, i8 %1, <vscale x 16 x i8> %2, i64 %3) nounwind {
@ -1030,7 +1053,7 @@ entry:
<vscale x 16 x i8> %0,
i8 %1,
<vscale x 16 x i8> %2,
i64 %3)
i64 %3, i64 0)
ret <vscale x 16 x i8> %a
}
@ -1063,6 +1086,7 @@ declare <vscale x 32 x i8> @llvm.riscv.vnmsub.nxv32i8.i8(
<vscale x 32 x i8>,
i8,
<vscale x 32 x i8>,
i64,
i64);
define <vscale x 32 x i8> @intrinsic_vnmsub_vx_nxv32i8_i8_nxv32i8(<vscale x 32 x i8> %0, i8 %1, <vscale x 32 x i8> %2, i64 %3) nounwind {
@ -1076,7 +1100,7 @@ entry:
<vscale x 32 x i8> %0,
i8 %1,
<vscale x 32 x i8> %2,
i64 %3)
i64 %3, i64 0)
ret <vscale x 32 x i8> %a
}
@ -1109,6 +1133,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vnmsub.nxv1i16.i16(
<vscale x 1 x i16>,
i16,
<vscale x 1 x i16>,
i64,
i64);
define <vscale x 1 x i16> @intrinsic_vnmsub_vx_nxv1i16_i16_nxv1i16(<vscale x 1 x i16> %0, i16 %1, <vscale x 1 x i16> %2, i64 %3) nounwind {
@ -1122,7 +1147,7 @@ entry:
<vscale x 1 x i16> %0,
i16 %1,
<vscale x 1 x i16> %2,
i64 %3)
i64 %3, i64 0)
ret <vscale x 1 x i16> %a
}
@ -1155,6 +1180,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vnmsub.nxv2i16.i16(
<vscale x 2 x i16>,
i16,
<vscale x 2 x i16>,
i64,
i64);
define <vscale x 2 x i16> @intrinsic_vnmsub_vx_nxv2i16_i16_nxv2i16(<vscale x 2 x i16> %0, i16 %1, <vscale x 2 x i16> %2, i64 %3) nounwind {
@ -1168,7 +1194,7 @@ entry:
<vscale x 2 x i16> %0,
i16 %1,
<vscale x 2 x i16> %2,
i64 %3)
i64 %3, i64 0)
ret <vscale x 2 x i16> %a
}
@ -1201,6 +1227,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vnmsub.nxv4i16.i16(
<vscale x 4 x i16>,
i16,
<vscale x 4 x i16>,
i64,
i64);
define <vscale x 4 x i16> @intrinsic_vnmsub_vx_nxv4i16_i16_nxv4i16(<vscale x 4 x i16> %0, i16 %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
@ -1214,7 +1241,7 @@ entry:
<vscale x 4 x i16> %0,
i16 %1,
<vscale x 4 x i16> %2,
i64 %3)
i64 %3, i64 0)
ret <vscale x 4 x i16> %a
}
@ -1247,6 +1274,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vnmsub.nxv8i16.i16(
<vscale x 8 x i16>,
i16,
<vscale x 8 x i16>,
i64,
i64);
define <vscale x 8 x i16> @intrinsic_vnmsub_vx_nxv8i16_i16_nxv8i16(<vscale x 8 x i16> %0, i16 %1, <vscale x 8 x i16> %2, i64 %3) nounwind {
@ -1260,7 +1288,7 @@ entry:
<vscale x 8 x i16> %0,
i16 %1,
<vscale x 8 x i16> %2,
i64 %3)
i64 %3, i64 0)
ret <vscale x 8 x i16> %a
}
@ -1293,6 +1321,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vnmsub.nxv16i16.i16(
<vscale x 16 x i16>,
i16,
<vscale x 16 x i16>,
i64,
i64);
define <vscale x 16 x i16> @intrinsic_vnmsub_vx_nxv16i16_i16_nxv16i16(<vscale x 16 x i16> %0, i16 %1, <vscale x 16 x i16> %2, i64 %3) nounwind {
@ -1306,7 +1335,7 @@ entry:
<vscale x 16 x i16> %0,
i16 %1,
<vscale x 16 x i16> %2,
i64 %3)
i64 %3, i64 0)
ret <vscale x 16 x i16> %a
}
@ -1339,6 +1368,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vnmsub.nxv1i32.i32(
<vscale x 1 x i32>,
i32,
<vscale x 1 x i32>,
i64,
i64);
define <vscale x 1 x i32> @intrinsic_vnmsub_vx_nxv1i32_i32_nxv1i32(<vscale x 1 x i32> %0, i32 %1, <vscale x 1 x i32> %2, i64 %3) nounwind {
@ -1352,7 +1382,7 @@ entry:
<vscale x 1 x i32> %0,
i32 %1,
<vscale x 1 x i32> %2,
i64 %3)
i64 %3, i64 0)
ret <vscale x 1 x i32> %a
}
@ -1385,6 +1415,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vnmsub.nxv2i32.i32(
<vscale x 2 x i32>,
i32,
<vscale x 2 x i32>,
i64,
i64);
define <vscale x 2 x i32> @intrinsic_vnmsub_vx_nxv2i32_i32_nxv2i32(<vscale x 2 x i32> %0, i32 %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
@ -1398,7 +1429,7 @@ entry:
<vscale x 2 x i32> %0,
i32 %1,
<vscale x 2 x i32> %2,
i64 %3)
i64 %3, i64 0)
ret <vscale x 2 x i32> %a
}
@ -1431,6 +1462,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vnmsub.nxv4i32.i32(
<vscale x 4 x i32>,
i32,
<vscale x 4 x i32>,
i64,
i64);
define <vscale x 4 x i32> @intrinsic_vnmsub_vx_nxv4i32_i32_nxv4i32(<vscale x 4 x i32> %0, i32 %1, <vscale x 4 x i32> %2, i64 %3) nounwind {
@ -1444,7 +1476,7 @@ entry:
<vscale x 4 x i32> %0,
i32 %1,
<vscale x 4 x i32> %2,
i64 %3)
i64 %3, i64 0)
ret <vscale x 4 x i32> %a
}
@ -1477,6 +1509,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vnmsub.nxv8i32.i32(
<vscale x 8 x i32>,
i32,
<vscale x 8 x i32>,
i64,
i64);
define <vscale x 8 x i32> @intrinsic_vnmsub_vx_nxv8i32_i32_nxv8i32(<vscale x 8 x i32> %0, i32 %1, <vscale x 8 x i32> %2, i64 %3) nounwind {
@ -1490,7 +1523,7 @@ entry:
<vscale x 8 x i32> %0,
i32 %1,
<vscale x 8 x i32> %2,
i64 %3)
i64 %3, i64 0)
ret <vscale x 8 x i32> %a
}
@ -1523,6 +1556,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vnmsub.nxv1i64.i64(
<vscale x 1 x i64>,
i64,
<vscale x 1 x i64>,
i64,
i64);
define <vscale x 1 x i64> @intrinsic_vnmsub_vx_nxv1i64_i64_nxv1i64(<vscale x 1 x i64> %0, i64 %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
@ -1536,7 +1570,7 @@ entry:
<vscale x 1 x i64> %0,
i64 %1,
<vscale x 1 x i64> %2,
i64 %3)
i64 %3, i64 0)
ret <vscale x 1 x i64> %a
}
@ -1569,6 +1603,7 @@ declare <vscale x 2 x i64> @llvm.riscv.vnmsub.nxv2i64.i64(
<vscale x 2 x i64>,
i64,
<vscale x 2 x i64>,
i64,
i64);
define <vscale x 2 x i64> @intrinsic_vnmsub_vx_nxv2i64_i64_nxv2i64(<vscale x 2 x i64> %0, i64 %1, <vscale x 2 x i64> %2, i64 %3) nounwind {
@ -1582,7 +1617,7 @@ entry:
<vscale x 2 x i64> %0,
i64 %1,
<vscale x 2 x i64> %2,
i64 %3)
i64 %3, i64 0)
ret <vscale x 2 x i64> %a
}
@ -1615,6 +1650,7 @@ declare <vscale x 4 x i64> @llvm.riscv.vnmsub.nxv4i64.i64(
<vscale x 4 x i64>,
i64,
<vscale x 4 x i64>,
i64,
i64);
define <vscale x 4 x i64> @intrinsic_vnmsub_vx_nxv4i64_i64_nxv4i64(<vscale x 4 x i64> %0, i64 %1, <vscale x 4 x i64> %2, i64 %3) nounwind {
@ -1628,7 +1664,7 @@ entry:
<vscale x 4 x i64> %0,
i64 %1,
<vscale x 4 x i64> %2,
i64 %3)
i64 %3, i64 0)
ret <vscale x 4 x i64> %a
}

View File

@ -479,7 +479,7 @@ for.body: ; preds = %for.body, %entry
%add.ptr = getelementptr inbounds float, float* %x.addr.015, i64 %1
%4 = bitcast float* %y.addr.014 to <vscale x 16 x float>*
%5 = tail call <vscale x 16 x float> @llvm.riscv.vle.nxv16f32.i64(<vscale x 16 x float> undef, <vscale x 16 x float>* %4, i64 %1)
%6 = tail call <vscale x 16 x float> @llvm.riscv.vfmacc.nxv16f32.f32.i64(<vscale x 16 x float> %5, float %a, <vscale x 16 x float> %3, i64 %1)
%6 = tail call <vscale x 16 x float> @llvm.riscv.vfmacc.nxv16f32.f32.i64(<vscale x 16 x float> %5, float %a, <vscale x 16 x float> %3, i64 %1, i64 0)
tail call void @llvm.riscv.vse.nxv16f32.i64(<vscale x 16 x float> %6, <vscale x 16 x float>* %4, i64 %1)
%add.ptr1 = getelementptr inbounds float, float* %y.addr.014, i64 %1
%sub = sub i64 %n.addr.016, %1
@ -493,7 +493,7 @@ for.end: ; preds = %for.body, %entry
declare i64 @llvm.riscv.vsetvli.i64(i64, i64 immarg, i64 immarg)
declare <vscale x 16 x float> @llvm.riscv.vle.nxv16f32.i64(<vscale x 16 x float>, <vscale x 16 x float>* nocapture, i64)
declare <vscale x 16 x float> @llvm.riscv.vfmacc.nxv16f32.f32.i64(<vscale x 16 x float>, float, <vscale x 16 x float>, i64)
declare <vscale x 16 x float> @llvm.riscv.vfmacc.nxv16f32.f32.i64(<vscale x 16 x float>, float, <vscale x 16 x float>, i64, i64)
declare void @llvm.riscv.vse.nxv16f32.i64(<vscale x 16 x float>, <vscale x 16 x float>* nocapture, i64)
; We need a vsetvli in the last block because the predecessors have different

View File

@ -5,6 +5,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vwmacc.nxv1i16.nxv1i8(
<vscale x 1 x i16>,
<vscale x 1 x i8>,
<vscale x 1 x i8>,
i32,
i32);
define <vscale x 1 x i16> @intrinsic_vwmacc_vv_nxv1i16_nxv1i8_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, i32 %3) nounwind {
@ -18,7 +19,7 @@ entry:
<vscale x 1 x i16> %0,
<vscale x 1 x i8> %1,
<vscale x 1 x i8> %2,
i32 %3)
i32 %3, i32 0)
ret <vscale x 1 x i16> %a
}
@ -51,6 +52,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vwmacc.nxv2i16.nxv2i8(
<vscale x 2 x i16>,
<vscale x 2 x i8>,
<vscale x 2 x i8>,
i32,
i32);
define <vscale x 2 x i16> @intrinsic_vwmacc_vv_nxv2i16_nxv2i8_nxv2i8(<vscale x 2 x i16> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, i32 %3) nounwind {
@ -64,7 +66,7 @@ entry:
<vscale x 2 x i16> %0,
<vscale x 2 x i8> %1,
<vscale x 2 x i8> %2,
i32 %3)
i32 %3, i32 0)
ret <vscale x 2 x i16> %a
}
@ -97,6 +99,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vwmacc.nxv4i16.nxv4i8(
<vscale x 4 x i16>,
<vscale x 4 x i8>,
<vscale x 4 x i8>,
i32,
i32);
define <vscale x 4 x i16> @intrinsic_vwmacc_vv_nxv4i16_nxv4i8_nxv4i8(<vscale x 4 x i16> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, i32 %3) nounwind {
@ -110,7 +113,7 @@ entry:
<vscale x 4 x i16> %0,
<vscale x 4 x i8> %1,
<vscale x 4 x i8> %2,
i32 %3)
i32 %3, i32 0)
ret <vscale x 4 x i16> %a
}
@ -143,6 +146,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vwmacc.nxv8i16.nxv8i8(
<vscale x 8 x i16>,
<vscale x 8 x i8>,
<vscale x 8 x i8>,
i32,
i32);
define <vscale x 8 x i16> @intrinsic_vwmacc_vv_nxv8i16_nxv8i8_nxv8i8(<vscale x 8 x i16> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
@ -156,7 +160,7 @@ entry:
<vscale x 8 x i16> %0,
<vscale x 8 x i8> %1,
<vscale x 8 x i8> %2,
i32 %3)
i32 %3, i32 0)
ret <vscale x 8 x i16> %a
}
@ -189,6 +193,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vwmacc.nxv16i16.nxv16i8(
<vscale x 16 x i16>,
<vscale x 16 x i8>,
<vscale x 16 x i8>,
i32,
i32);
define <vscale x 16 x i16> @intrinsic_vwmacc_vv_nxv16i16_nxv16i8_nxv16i8(<vscale x 16 x i16> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, i32 %3) nounwind {
@ -202,7 +207,7 @@ entry:
<vscale x 16 x i16> %0,
<vscale x 16 x i8> %1,
<vscale x 16 x i8> %2,
i32 %3)
i32 %3, i32 0)
ret <vscale x 16 x i16> %a
}
@ -235,6 +240,7 @@ declare <vscale x 32 x i16> @llvm.riscv.vwmacc.nxv32i16.nxv32i8(
<vscale x 32 x i16>,
<vscale x 32 x i8>,
<vscale x 32 x i8>,
i32,
i32);
define <vscale x 32 x i16> @intrinsic_vwmacc_vv_nxv32i16_nxv32i8_nxv32i8(<vscale x 32 x i16> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, i32 %3) nounwind {
@ -248,7 +254,7 @@ entry:
<vscale x 32 x i16> %0,
<vscale x 32 x i8> %1,
<vscale x 32 x i8> %2,
i32 %3)
i32 %3, i32 0)
ret <vscale x 32 x i16> %a
}
@ -281,6 +287,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vwmacc.nxv1i32.nxv1i16(
<vscale x 1 x i32>,
<vscale x 1 x i16>,
<vscale x 1 x i16>,
i32,
i32);
define <vscale x 1 x i32> @intrinsic_vwmacc_vv_nxv1i32_nxv1i16_nxv1i16(<vscale x 1 x i32> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, i32 %3) nounwind {
@ -294,7 +301,7 @@ entry:
<vscale x 1 x i32> %0,
<vscale x 1 x i16> %1,
<vscale x 1 x i16> %2,
i32 %3)
i32 %3, i32 0)
ret <vscale x 1 x i32> %a
}
@ -327,6 +334,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vwmacc.nxv2i32.nxv2i16(
<vscale x 2 x i32>,
<vscale x 2 x i16>,
<vscale x 2 x i16>,
i32,
i32);
define <vscale x 2 x i32> @intrinsic_vwmacc_vv_nxv2i32_nxv2i16_nxv2i16(<vscale x 2 x i32> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, i32 %3) nounwind {
@ -340,7 +348,7 @@ entry:
<vscale x 2 x i32> %0,
<vscale x 2 x i16> %1,
<vscale x 2 x i16> %2,
i32 %3)
i32 %3, i32 0)
ret <vscale x 2 x i32> %a
}
@ -373,6 +381,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vwmacc.nxv4i32.nxv4i16(
<vscale x 4 x i32>,
<vscale x 4 x i16>,
<vscale x 4 x i16>,
i32,
i32);
define <vscale x 4 x i32> @intrinsic_vwmacc_vv_nxv4i32_nxv4i16_nxv4i16(<vscale x 4 x i32> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
@ -386,7 +395,7 @@ entry:
<vscale x 4 x i32> %0,
<vscale x 4 x i16> %1,
<vscale x 4 x i16> %2,
i32 %3)
i32 %3, i32 0)
ret <vscale x 4 x i32> %a
}
@ -419,6 +428,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vwmacc.nxv8i32.nxv8i16(
<vscale x 8 x i32>,
<vscale x 8 x i16>,
<vscale x 8 x i16>,
i32,
i32);
define <vscale x 8 x i32> @intrinsic_vwmacc_vv_nxv8i32_nxv8i16_nxv8i16(<vscale x 8 x i32> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, i32 %3) nounwind {
@ -432,7 +442,7 @@ entry:
<vscale x 8 x i32> %0,
<vscale x 8 x i16> %1,
<vscale x 8 x i16> %2,
i32 %3)
i32 %3, i32 0)
ret <vscale x 8 x i32> %a
}
@ -465,6 +475,7 @@ declare <vscale x 16 x i32> @llvm.riscv.vwmacc.nxv16i32.nxv16i16(
<vscale x 16 x i32>,
<vscale x 16 x i16>,
<vscale x 16 x i16>,
i32,
i32);
define <vscale x 16 x i32> @intrinsic_vwmacc_vv_nxv16i32_nxv16i16_nxv16i16(<vscale x 16 x i32> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, i32 %3) nounwind {
@ -478,7 +489,7 @@ entry:
<vscale x 16 x i32> %0,
<vscale x 16 x i16> %1,
<vscale x 16 x i16> %2,
i32 %3)
i32 %3, i32 0)
ret <vscale x 16 x i32> %a
}
@ -511,6 +522,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vwmacc.nxv1i64.nxv1i32(
<vscale x 1 x i64>,
<vscale x 1 x i32>,
<vscale x 1 x i32>,
i32,
i32);
define <vscale x 1 x i64> @intrinsic_vwmacc_vv_nxv1i64_nxv1i32_nxv1i32(<vscale x 1 x i64> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, i32 %3) nounwind {
@ -524,7 +536,7 @@ entry:
<vscale x 1 x i64> %0,
<vscale x 1 x i32> %1,
<vscale x 1 x i32> %2,
i32 %3)
i32 %3, i32 0)
ret <vscale x 1 x i64> %a
}
@ -557,6 +569,7 @@ declare <vscale x 2 x i64> @llvm.riscv.vwmacc.nxv2i64.nxv2i32(
<vscale x 2 x i64>,
<vscale x 2 x i32>,
<vscale x 2 x i32>,
i32,
i32);
define <vscale x 2 x i64> @intrinsic_vwmacc_vv_nxv2i64_nxv2i32_nxv2i32(<vscale x 2 x i64> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
@ -570,7 +583,7 @@ entry:
<vscale x 2 x i64> %0,
<vscale x 2 x i32> %1,
<vscale x 2 x i32> %2,
i32 %3)
i32 %3, i32 0)
ret <vscale x 2 x i64> %a
}
@ -603,6 +616,7 @@ declare <vscale x 4 x i64> @llvm.riscv.vwmacc.nxv4i64.nxv4i32(
<vscale x 4 x i64>,
<vscale x 4 x i32>,
<vscale x 4 x i32>,
i32,
i32);
define <vscale x 4 x i64> @intrinsic_vwmacc_vv_nxv4i64_nxv4i32_nxv4i32(<vscale x 4 x i64> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, i32 %3) nounwind {
@ -616,7 +630,7 @@ entry:
<vscale x 4 x i64> %0,
<vscale x 4 x i32> %1,
<vscale x 4 x i32> %2,
i32 %3)
i32 %3, i32 0)
ret <vscale x 4 x i64> %a
}
@ -649,6 +663,7 @@ declare <vscale x 8 x i64> @llvm.riscv.vwmacc.nxv8i64.nxv8i32(
<vscale x 8 x i64>,
<vscale x 8 x i32>,
<vscale x 8 x i32>,
i32,
i32);
define <vscale x 8 x i64> @intrinsic_vwmacc_vv_nxv8i64_nxv8i32_nxv8i32(<vscale x 8 x i64> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, i32 %3) nounwind {
@ -662,7 +677,7 @@ entry:
<vscale x 8 x i64> %0,
<vscale x 8 x i32> %1,
<vscale x 8 x i32> %2,
i32 %3)
i32 %3, i32 0)
ret <vscale x 8 x i64> %a
}
@ -695,6 +710,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vwmacc.nxv1i16.i8(
<vscale x 1 x i16>,
i8,
<vscale x 1 x i8>,
i32,
i32);
define <vscale x 1 x i16> @intrinsic_vwmacc_vx_nxv1i16_i8_nxv1i8(<vscale x 1 x i16> %0, i8 %1, <vscale x 1 x i8> %2, i32 %3) nounwind {
@ -708,7 +724,7 @@ entry:
<vscale x 1 x i16> %0,
i8 %1,
<vscale x 1 x i8> %2,
i32 %3)
i32 %3, i32 0)
ret <vscale x 1 x i16> %a
}
@ -741,6 +757,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vwmacc.nxv2i16.i8(
<vscale x 2 x i16>,
i8,
<vscale x 2 x i8>,
i32,
i32);
define <vscale x 2 x i16> @intrinsic_vwmacc_vx_nxv2i16_i8_nxv2i8(<vscale x 2 x i16> %0, i8 %1, <vscale x 2 x i8> %2, i32 %3) nounwind {
@ -754,7 +771,7 @@ entry:
<vscale x 2 x i16> %0,
i8 %1,
<vscale x 2 x i8> %2,
i32 %3)
i32 %3, i32 0)
ret <vscale x 2 x i16> %a
}
@ -787,6 +804,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vwmacc.nxv4i16.i8(
<vscale x 4 x i16>,
i8,
<vscale x 4 x i8>,
i32,
i32);
define <vscale x 4 x i16> @intrinsic_vwmacc_vx_nxv4i16_i8_nxv4i8(<vscale x 4 x i16> %0, i8 %1, <vscale x 4 x i8> %2, i32 %3) nounwind {
@ -800,7 +818,7 @@ entry:
<vscale x 4 x i16> %0,
i8 %1,
<vscale x 4 x i8> %2,
i32 %3)
i32 %3, i32 0)
ret <vscale x 4 x i16> %a
}
@ -833,6 +851,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vwmacc.nxv8i16.i8(
<vscale x 8 x i16>,
i8,
<vscale x 8 x i8>,
i32,
i32);
define <vscale x 8 x i16> @intrinsic_vwmacc_vx_nxv8i16_i8_nxv8i8(<vscale x 8 x i16> %0, i8 %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
@ -846,7 +865,7 @@ entry:
<vscale x 8 x i16> %0,
i8 %1,
<vscale x 8 x i8> %2,
i32 %3)
i32 %3, i32 0)
ret <vscale x 8 x i16> %a
}
@ -879,6 +898,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vwmacc.nxv16i16.i8(
<vscale x 16 x i16>,
i8,
<vscale x 16 x i8>,
i32,
i32);
define <vscale x 16 x i16> @intrinsic_vwmacc_vx_nxv16i16_i8_nxv16i8(<vscale x 16 x i16> %0, i8 %1, <vscale x 16 x i8> %2, i32 %3) nounwind {
@ -892,7 +912,7 @@ entry:
<vscale x 16 x i16> %0,
i8 %1,
<vscale x 16 x i8> %2,
i32 %3)
i32 %3, i32 0)
ret <vscale x 16 x i16> %a
}
@ -925,6 +945,7 @@ declare <vscale x 32 x i16> @llvm.riscv.vwmacc.nxv32i16.i8(
<vscale x 32 x i16>,
i8,
<vscale x 32 x i8>,
i32,
i32);
define <vscale x 32 x i16> @intrinsic_vwmacc_vx_nxv32i16_i8_nxv32i8(<vscale x 32 x i16> %0, i8 %1, <vscale x 32 x i8> %2, i32 %3) nounwind {
@ -938,7 +959,7 @@ entry:
<vscale x 32 x i16> %0,
i8 %1,
<vscale x 32 x i8> %2,
i32 %3)
i32 %3, i32 0)
ret <vscale x 32 x i16> %a
}
@ -971,6 +992,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vwmacc.nxv1i32.i16(
<vscale x 1 x i32>,
i16,
<vscale x 1 x i16>,
i32,
i32);
define <vscale x 1 x i32> @intrinsic_vwmacc_vx_nxv1i32_i16_nxv1i16(<vscale x 1 x i32> %0, i16 %1, <vscale x 1 x i16> %2, i32 %3) nounwind {
@ -984,7 +1006,7 @@ entry:
<vscale x 1 x i32> %0,
i16 %1,
<vscale x 1 x i16> %2,
i32 %3)
i32 %3, i32 0)
ret <vscale x 1 x i32> %a
}
@ -1017,6 +1039,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vwmacc.nxv2i32.i16(
<vscale x 2 x i32>,
i16,
<vscale x 2 x i16>,
i32,
i32);
define <vscale x 2 x i32> @intrinsic_vwmacc_vx_nxv2i32_i16_nxv2i16(<vscale x 2 x i32> %0, i16 %1, <vscale x 2 x i16> %2, i32 %3) nounwind {
@ -1030,7 +1053,7 @@ entry:
<vscale x 2 x i32> %0,
i16 %1,
<vscale x 2 x i16> %2,
i32 %3)
i32 %3, i32 0)
ret <vscale x 2 x i32> %a
}
@ -1063,6 +1086,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vwmacc.nxv4i32.i16(
<vscale x 4 x i32>,
i16,
<vscale x 4 x i16>,
i32,
i32);
define <vscale x 4 x i32> @intrinsic_vwmacc_vx_nxv4i32_i16_nxv4i16(<vscale x 4 x i32> %0, i16 %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
@ -1076,7 +1100,7 @@ entry:
<vscale x 4 x i32> %0,
i16 %1,
<vscale x 4 x i16> %2,
i32 %3)
i32 %3, i32 0)
ret <vscale x 4 x i32> %a
}
@ -1109,6 +1133,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vwmacc.nxv8i32.i16(
<vscale x 8 x i32>,
i16,
<vscale x 8 x i16>,
i32,
i32);
define <vscale x 8 x i32> @intrinsic_vwmacc_vx_nxv8i32_i16_nxv8i16(<vscale x 8 x i32> %0, i16 %1, <vscale x 8 x i16> %2, i32 %3) nounwind {
@ -1122,7 +1147,7 @@ entry:
<vscale x 8 x i32> %0,
i16 %1,
<vscale x 8 x i16> %2,
i32 %3)
i32 %3, i32 0)
ret <vscale x 8 x i32> %a
}
@ -1155,6 +1180,7 @@ declare <vscale x 16 x i32> @llvm.riscv.vwmacc.nxv16i32.i16(
<vscale x 16 x i32>,
i16,
<vscale x 16 x i16>,
i32,
i32);
define <vscale x 16 x i32> @intrinsic_vwmacc_vx_nxv16i32_i16_nxv16i16(<vscale x 16 x i32> %0, i16 %1, <vscale x 16 x i16> %2, i32 %3) nounwind {
@ -1168,7 +1194,7 @@ entry:
<vscale x 16 x i32> %0,
i16 %1,
<vscale x 16 x i16> %2,
i32 %3)
i32 %3, i32 0)
ret <vscale x 16 x i32> %a
}
@ -1201,6 +1227,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vwmacc.nxv1i64.i32(
<vscale x 1 x i64>,
i32,
<vscale x 1 x i32>,
i32,
i32);
define <vscale x 1 x i64> @intrinsic_vwmacc_vx_nxv1i64_i32_nxv1i32(<vscale x 1 x i64> %0, i32 %1, <vscale x 1 x i32> %2, i32 %3) nounwind {
@ -1214,7 +1241,7 @@ entry:
<vscale x 1 x i64> %0,
i32 %1,
<vscale x 1 x i32> %2,
i32 %3)
i32 %3, i32 0)
ret <vscale x 1 x i64> %a
}
@ -1247,6 +1274,7 @@ declare <vscale x 2 x i64> @llvm.riscv.vwmacc.nxv2i64.i32(
<vscale x 2 x i64>,
i32,
<vscale x 2 x i32>,
i32,
i32);
define <vscale x 2 x i64> @intrinsic_vwmacc_vx_nxv2i64_i32_nxv2i32(<vscale x 2 x i64> %0, i32 %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
@ -1260,7 +1288,7 @@ entry:
<vscale x 2 x i64> %0,
i32 %1,
<vscale x 2 x i32> %2,
i32 %3)
i32 %3, i32 0)
ret <vscale x 2 x i64> %a
}
@ -1293,6 +1321,7 @@ declare <vscale x 4 x i64> @llvm.riscv.vwmacc.nxv4i64.i32(
<vscale x 4 x i64>,
i32,
<vscale x 4 x i32>,
i32,
i32);
define <vscale x 4 x i64> @intrinsic_vwmacc_vx_nxv4i64_i32_nxv4i32(<vscale x 4 x i64> %0, i32 %1, <vscale x 4 x i32> %2, i32 %3) nounwind {
@ -1306,7 +1335,7 @@ entry:
<vscale x 4 x i64> %0,
i32 %1,
<vscale x 4 x i32> %2,
i32 %3)
i32 %3, i32 0)
ret <vscale x 4 x i64> %a
}
@ -1339,6 +1368,7 @@ declare <vscale x 8 x i64> @llvm.riscv.vwmacc.nxv8i64.i32(
<vscale x 8 x i64>,
i32,
<vscale x 8 x i32>,
i32,
i32);
define <vscale x 8 x i64> @intrinsic_vwmacc_vx_nxv8i64_i32_nxv8i32(<vscale x 8 x i64> %0, i32 %1, <vscale x 8 x i32> %2, i32 %3) nounwind {
@ -1352,7 +1382,7 @@ entry:
<vscale x 8 x i64> %0,
i32 %1,
<vscale x 8 x i32> %2,
i32 %3)
i32 %3, i32 0)
ret <vscale x 8 x i64> %a
}

View File

@ -5,6 +5,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vwmacc.nxv1i16.nxv1i8(
<vscale x 1 x i16>,
<vscale x 1 x i8>,
<vscale x 1 x i8>,
i64,
i64);
define <vscale x 1 x i16> @intrinsic_vwmacc_vv_nxv1i16_nxv1i8_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, i64 %3) nounwind {
@ -18,7 +19,7 @@ entry:
<vscale x 1 x i16> %0,
<vscale x 1 x i8> %1,
<vscale x 1 x i8> %2,
i64 %3)
i64 %3, i64 0)
ret <vscale x 1 x i16> %a
}
@ -51,6 +52,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vwmacc.nxv2i16.nxv2i8(
<vscale x 2 x i16>,
<vscale x 2 x i8>,
<vscale x 2 x i8>,
i64,
i64);
define <vscale x 2 x i16> @intrinsic_vwmacc_vv_nxv2i16_nxv2i8_nxv2i8(<vscale x 2 x i16> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, i64 %3) nounwind {
@ -64,7 +66,7 @@ entry:
<vscale x 2 x i16> %0,
<vscale x 2 x i8> %1,
<vscale x 2 x i8> %2,
i64 %3)
i64 %3, i64 0)
ret <vscale x 2 x i16> %a
}
@ -97,6 +99,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vwmacc.nxv4i16.nxv4i8(
<vscale x 4 x i16>,
<vscale x 4 x i8>,
<vscale x 4 x i8>,
i64,
i64);
define <vscale x 4 x i16> @intrinsic_vwmacc_vv_nxv4i16_nxv4i8_nxv4i8(<vscale x 4 x i16> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, i64 %3) nounwind {
@ -110,7 +113,7 @@ entry:
<vscale x 4 x i16> %0,
<vscale x 4 x i8> %1,
<vscale x 4 x i8> %2,
i64 %3)
i64 %3, i64 0)
ret <vscale x 4 x i16> %a
}
@ -143,6 +146,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vwmacc.nxv8i16.nxv8i8(
<vscale x 8 x i16>,
<vscale x 8 x i8>,
<vscale x 8 x i8>,
i64,
i64);
define <vscale x 8 x i16> @intrinsic_vwmacc_vv_nxv8i16_nxv8i8_nxv8i8(<vscale x 8 x i16> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
@ -156,7 +160,7 @@ entry:
<vscale x 8 x i16> %0,
<vscale x 8 x i8> %1,
<vscale x 8 x i8> %2,
i64 %3)
i64 %3, i64 0)
ret <vscale x 8 x i16> %a
}
@ -189,6 +193,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vwmacc.nxv16i16.nxv16i8(
<vscale x 16 x i16>,
<vscale x 16 x i8>,
<vscale x 16 x i8>,
i64,
i64);
define <vscale x 16 x i16> @intrinsic_vwmacc_vv_nxv16i16_nxv16i8_nxv16i8(<vscale x 16 x i16> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, i64 %3) nounwind {
@ -202,7 +207,7 @@ entry:
<vscale x 16 x i16> %0,
<vscale x 16 x i8> %1,
<vscale x 16 x i8> %2,
i64 %3)
i64 %3, i64 0)
ret <vscale x 16 x i16> %a
}
@ -235,6 +240,7 @@ declare <vscale x 32 x i16> @llvm.riscv.vwmacc.nxv32i16.nxv32i8(
<vscale x 32 x i16>,
<vscale x 32 x i8>,
<vscale x 32 x i8>,
i64,
i64);
define <vscale x 32 x i16> @intrinsic_vwmacc_vv_nxv32i16_nxv32i8_nxv32i8(<vscale x 32 x i16> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, i64 %3) nounwind {
@ -248,7 +254,7 @@ entry:
<vscale x 32 x i16> %0,
<vscale x 32 x i8> %1,
<vscale x 32 x i8> %2,
i64 %3)
i64 %3, i64 0)
ret <vscale x 32 x i16> %a
}
@ -281,6 +287,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vwmacc.nxv1i32.nxv1i16(
<vscale x 1 x i32>,
<vscale x 1 x i16>,
<vscale x 1 x i16>,
i64,
i64);
define <vscale x 1 x i32> @intrinsic_vwmacc_vv_nxv1i32_nxv1i16_nxv1i16(<vscale x 1 x i32> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, i64 %3) nounwind {
@ -294,7 +301,7 @@ entry:
<vscale x 1 x i32> %0,
<vscale x 1 x i16> %1,
<vscale x 1 x i16> %2,
i64 %3)
i64 %3, i64 0)
ret <vscale x 1 x i32> %a
}
@ -327,6 +334,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vwmacc.nxv2i32.nxv2i16(
<vscale x 2 x i32>,
<vscale x 2 x i16>,
<vscale x 2 x i16>,
i64,
i64);
define <vscale x 2 x i32> @intrinsic_vwmacc_vv_nxv2i32_nxv2i16_nxv2i16(<vscale x 2 x i32> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, i64 %3) nounwind {
@ -340,7 +348,7 @@ entry:
<vscale x 2 x i32> %0,
<vscale x 2 x i16> %1,
<vscale x 2 x i16> %2,
i64 %3)
i64 %3, i64 0)
ret <vscale x 2 x i32> %a
}
@ -373,6 +381,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vwmacc.nxv4i32.nxv4i16(
<vscale x 4 x i32>,
<vscale x 4 x i16>,
<vscale x 4 x i16>,
i64,
i64);
define <vscale x 4 x i32> @intrinsic_vwmacc_vv_nxv4i32_nxv4i16_nxv4i16(<vscale x 4 x i32> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
@ -386,7 +395,7 @@ entry:
<vscale x 4 x i32> %0,
<vscale x 4 x i16> %1,
<vscale x 4 x i16> %2,
i64 %3)
i64 %3, i64 0)
ret <vscale x 4 x i32> %a
}
@ -419,6 +428,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vwmacc.nxv8i32.nxv8i16(
<vscale x 8 x i32>,
<vscale x 8 x i16>,
<vscale x 8 x i16>,
i64,
i64);
define <vscale x 8 x i32> @intrinsic_vwmacc_vv_nxv8i32_nxv8i16_nxv8i16(<vscale x 8 x i32> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, i64 %3) nounwind {
@ -432,7 +442,7 @@ entry:
<vscale x 8 x i32> %0,
<vscale x 8 x i16> %1,
<vscale x 8 x i16> %2,
i64 %3)
i64 %3, i64 0)
ret <vscale x 8 x i32> %a
}
@ -465,6 +475,7 @@ declare <vscale x 16 x i32> @llvm.riscv.vwmacc.nxv16i32.nxv16i16(
<vscale x 16 x i32>,
<vscale x 16 x i16>,
<vscale x 16 x i16>,
i64,
i64);
define <vscale x 16 x i32> @intrinsic_vwmacc_vv_nxv16i32_nxv16i16_nxv16i16(<vscale x 16 x i32> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, i64 %3) nounwind {
@ -478,7 +489,7 @@ entry:
<vscale x 16 x i32> %0,
<vscale x 16 x i16> %1,
<vscale x 16 x i16> %2,
i64 %3)
i64 %3, i64 0)
ret <vscale x 16 x i32> %a
}
@ -511,6 +522,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vwmacc.nxv1i64.nxv1i32(
<vscale x 1 x i64>,
<vscale x 1 x i32>,
<vscale x 1 x i32>,
i64,
i64);
define <vscale x 1 x i64> @intrinsic_vwmacc_vv_nxv1i64_nxv1i32_nxv1i32(<vscale x 1 x i64> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, i64 %3) nounwind {
@ -524,7 +536,7 @@ entry:
<vscale x 1 x i64> %0,
<vscale x 1 x i32> %1,
<vscale x 1 x i32> %2,
i64 %3)
i64 %3, i64 0)
ret <vscale x 1 x i64> %a
}
@ -557,6 +569,7 @@ declare <vscale x 2 x i64> @llvm.riscv.vwmacc.nxv2i64.nxv2i32(
<vscale x 2 x i64>,
<vscale x 2 x i32>,
<vscale x 2 x i32>,
i64,
i64);
define <vscale x 2 x i64> @intrinsic_vwmacc_vv_nxv2i64_nxv2i32_nxv2i32(<vscale x 2 x i64> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
@ -570,7 +583,7 @@ entry:
<vscale x 2 x i64> %0,
<vscale x 2 x i32> %1,
<vscale x 2 x i32> %2,
i64 %3)
i64 %3, i64 0)
ret <vscale x 2 x i64> %a
}
@ -603,6 +616,7 @@ declare <vscale x 4 x i64> @llvm.riscv.vwmacc.nxv4i64.nxv4i32(
<vscale x 4 x i64>,
<vscale x 4 x i32>,
<vscale x 4 x i32>,
i64,
i64);
define <vscale x 4 x i64> @intrinsic_vwmacc_vv_nxv4i64_nxv4i32_nxv4i32(<vscale x 4 x i64> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, i64 %3) nounwind {
@ -616,7 +630,7 @@ entry:
<vscale x 4 x i64> %0,
<vscale x 4 x i32> %1,
<vscale x 4 x i32> %2,
i64 %3)
i64 %3, i64 0)
ret <vscale x 4 x i64> %a
}
@ -649,6 +663,7 @@ declare <vscale x 8 x i64> @llvm.riscv.vwmacc.nxv8i64.nxv8i32(
<vscale x 8 x i64>,
<vscale x 8 x i32>,
<vscale x 8 x i32>,
i64,
i64);
define <vscale x 8 x i64> @intrinsic_vwmacc_vv_nxv8i64_nxv8i32_nxv8i32(<vscale x 8 x i64> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, i64 %3) nounwind {
@ -662,7 +677,7 @@ entry:
<vscale x 8 x i64> %0,
<vscale x 8 x i32> %1,
<vscale x 8 x i32> %2,
i64 %3)
i64 %3, i64 0)
ret <vscale x 8 x i64> %a
}
@ -695,6 +710,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vwmacc.nxv1i16.i8(
<vscale x 1 x i16>,
i8,
<vscale x 1 x i8>,
i64,
i64);
define <vscale x 1 x i16> @intrinsic_vwmacc_vx_nxv1i16_i8_nxv1i8(<vscale x 1 x i16> %0, i8 %1, <vscale x 1 x i8> %2, i64 %3) nounwind {
@ -708,7 +724,7 @@ entry:
<vscale x 1 x i16> %0,
i8 %1,
<vscale x 1 x i8> %2,
i64 %3)
i64 %3, i64 0)
ret <vscale x 1 x i16> %a
}
@ -741,6 +757,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vwmacc.nxv2i16.i8(
<vscale x 2 x i16>,
i8,
<vscale x 2 x i8>,
i64,
i64);
define <vscale x 2 x i16> @intrinsic_vwmacc_vx_nxv2i16_i8_nxv2i8(<vscale x 2 x i16> %0, i8 %1, <vscale x 2 x i8> %2, i64 %3) nounwind {
@ -754,7 +771,7 @@ entry:
<vscale x 2 x i16> %0,
i8 %1,
<vscale x 2 x i8> %2,
i64 %3)
i64 %3, i64 0)
ret <vscale x 2 x i16> %a
}
@ -787,6 +804,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vwmacc.nxv4i16.i8(
<vscale x 4 x i16>,
i8,
<vscale x 4 x i8>,
i64,
i64);
define <vscale x 4 x i16> @intrinsic_vwmacc_vx_nxv4i16_i8_nxv4i8(<vscale x 4 x i16> %0, i8 %1, <vscale x 4 x i8> %2, i64 %3) nounwind {
@ -800,7 +818,7 @@ entry:
<vscale x 4 x i16> %0,
i8 %1,
<vscale x 4 x i8> %2,
i64 %3)
i64 %3, i64 0)
ret <vscale x 4 x i16> %a
}
@ -833,6 +851,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vwmacc.nxv8i16.i8(
<vscale x 8 x i16>,
i8,
<vscale x 8 x i8>,
i64,
i64);
define <vscale x 8 x i16> @intrinsic_vwmacc_vx_nxv8i16_i8_nxv8i8(<vscale x 8 x i16> %0, i8 %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
@ -846,7 +865,7 @@ entry:
<vscale x 8 x i16> %0,
i8 %1,
<vscale x 8 x i8> %2,
i64 %3)
i64 %3, i64 0)
ret <vscale x 8 x i16> %a
}
@ -879,6 +898,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vwmacc.nxv16i16.i8(
<vscale x 16 x i16>,
i8,
<vscale x 16 x i8>,
i64,
i64);
define <vscale x 16 x i16> @intrinsic_vwmacc_vx_nxv16i16_i8_nxv16i8(<vscale x 16 x i16> %0, i8 %1, <vscale x 16 x i8> %2, i64 %3) nounwind {
@ -892,7 +912,7 @@ entry:
<vscale x 16 x i16> %0,
i8 %1,
<vscale x 16 x i8> %2,
i64 %3)
i64 %3, i64 0)
ret <vscale x 16 x i16> %a
}
@ -925,6 +945,7 @@ declare <vscale x 32 x i16> @llvm.riscv.vwmacc.nxv32i16.i8(
<vscale x 32 x i16>,
i8,
<vscale x 32 x i8>,
i64,
i64);
define <vscale x 32 x i16> @intrinsic_vwmacc_vx_nxv32i16_i8_nxv32i8(<vscale x 32 x i16> %0, i8 %1, <vscale x 32 x i8> %2, i64 %3) nounwind {
@ -938,7 +959,7 @@ entry:
<vscale x 32 x i16> %0,
i8 %1,
<vscale x 32 x i8> %2,
i64 %3)
i64 %3, i64 0)
ret <vscale x 32 x i16> %a
}
@ -971,6 +992,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vwmacc.nxv1i32.i16(
<vscale x 1 x i32>,
i16,
<vscale x 1 x i16>,
i64,
i64);
define <vscale x 1 x i32> @intrinsic_vwmacc_vx_nxv1i32_i16_nxv1i16(<vscale x 1 x i32> %0, i16 %1, <vscale x 1 x i16> %2, i64 %3) nounwind {
@ -984,7 +1006,7 @@ entry:
<vscale x 1 x i32> %0,
i16 %1,
<vscale x 1 x i16> %2,
i64 %3)
i64 %3, i64 0)
ret <vscale x 1 x i32> %a
}
@ -1017,6 +1039,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vwmacc.nxv2i32.i16(
<vscale x 2 x i32>,
i16,
<vscale x 2 x i16>,
i64,
i64);
define <vscale x 2 x i32> @intrinsic_vwmacc_vx_nxv2i32_i16_nxv2i16(<vscale x 2 x i32> %0, i16 %1, <vscale x 2 x i16> %2, i64 %3) nounwind {
@ -1030,7 +1053,7 @@ entry:
<vscale x 2 x i32> %0,
i16 %1,
<vscale x 2 x i16> %2,
i64 %3)
i64 %3, i64 0)
ret <vscale x 2 x i32> %a
}
@ -1063,6 +1086,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vwmacc.nxv4i32.i16(
<vscale x 4 x i32>,
i16,
<vscale x 4 x i16>,
i64,
i64);
define <vscale x 4 x i32> @intrinsic_vwmacc_vx_nxv4i32_i16_nxv4i16(<vscale x 4 x i32> %0, i16 %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
@ -1076,7 +1100,7 @@ entry:
<vscale x 4 x i32> %0,
i16 %1,
<vscale x 4 x i16> %2,
i64 %3)
i64 %3, i64 0)
ret <vscale x 4 x i32> %a
}
@ -1109,6 +1133,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vwmacc.nxv8i32.i16(
<vscale x 8 x i32>,
i16,
<vscale x 8 x i16>,
i64,
i64);
define <vscale x 8 x i32> @intrinsic_vwmacc_vx_nxv8i32_i16_nxv8i16(<vscale x 8 x i32> %0, i16 %1, <vscale x 8 x i16> %2, i64 %3) nounwind {
@ -1122,7 +1147,7 @@ entry:
<vscale x 8 x i32> %0,
i16 %1,
<vscale x 8 x i16> %2,
i64 %3)
i64 %3, i64 0)
ret <vscale x 8 x i32> %a
}
@ -1155,6 +1180,7 @@ declare <vscale x 16 x i32> @llvm.riscv.vwmacc.nxv16i32.i16(
<vscale x 16 x i32>,
i16,
<vscale x 16 x i16>,
i64,
i64);
define <vscale x 16 x i32> @intrinsic_vwmacc_vx_nxv16i32_i16_nxv16i16(<vscale x 16 x i32> %0, i16 %1, <vscale x 16 x i16> %2, i64 %3) nounwind {
@ -1168,7 +1194,7 @@ entry:
<vscale x 16 x i32> %0,
i16 %1,
<vscale x 16 x i16> %2,
i64 %3)
i64 %3, i64 0)
ret <vscale x 16 x i32> %a
}
@ -1201,6 +1227,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vwmacc.nxv1i64.i32(
<vscale x 1 x i64>,
i32,
<vscale x 1 x i32>,
i64,
i64);
define <vscale x 1 x i64> @intrinsic_vwmacc_vx_nxv1i64_i32_nxv1i32(<vscale x 1 x i64> %0, i32 %1, <vscale x 1 x i32> %2, i64 %3) nounwind {
@ -1214,7 +1241,7 @@ entry:
<vscale x 1 x i64> %0,
i32 %1,
<vscale x 1 x i32> %2,
i64 %3)
i64 %3, i64 0)
ret <vscale x 1 x i64> %a
}
@ -1247,6 +1274,7 @@ declare <vscale x 2 x i64> @llvm.riscv.vwmacc.nxv2i64.i32(
<vscale x 2 x i64>,
i32,
<vscale x 2 x i32>,
i64,
i64);
define <vscale x 2 x i64> @intrinsic_vwmacc_vx_nxv2i64_i32_nxv2i32(<vscale x 2 x i64> %0, i32 %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
@ -1260,7 +1288,7 @@ entry:
<vscale x 2 x i64> %0,
i32 %1,
<vscale x 2 x i32> %2,
i64 %3)
i64 %3, i64 0)
ret <vscale x 2 x i64> %a
}
@ -1293,6 +1321,7 @@ declare <vscale x 4 x i64> @llvm.riscv.vwmacc.nxv4i64.i32(
<vscale x 4 x i64>,
i32,
<vscale x 4 x i32>,
i64,
i64);
define <vscale x 4 x i64> @intrinsic_vwmacc_vx_nxv4i64_i32_nxv4i32(<vscale x 4 x i64> %0, i32 %1, <vscale x 4 x i32> %2, i64 %3) nounwind {
@ -1306,7 +1335,7 @@ entry:
<vscale x 4 x i64> %0,
i32 %1,
<vscale x 4 x i32> %2,
i64 %3)
i64 %3, i64 0)
ret <vscale x 4 x i64> %a
}
@ -1339,6 +1368,7 @@ declare <vscale x 8 x i64> @llvm.riscv.vwmacc.nxv8i64.i32(
<vscale x 8 x i64>,
i32,
<vscale x 8 x i32>,
i64,
i64);
define <vscale x 8 x i64> @intrinsic_vwmacc_vx_nxv8i64_i32_nxv8i32(<vscale x 8 x i64> %0, i32 %1, <vscale x 8 x i32> %2, i64 %3) nounwind {
@ -1352,7 +1382,7 @@ entry:
<vscale x 8 x i64> %0,
i32 %1,
<vscale x 8 x i32> %2,
i64 %3)
i64 %3, i64 0)
ret <vscale x 8 x i64> %a
}

View File

@ -5,6 +5,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vwmaccsu.nxv1i16.nxv1i8(
<vscale x 1 x i16>,
<vscale x 1 x i8>,
<vscale x 1 x i8>,
i32,
i32);
define <vscale x 1 x i16> @intrinsic_vwmaccsu_vv_nxv1i16_nxv1i8_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, i32 %3) nounwind {
@ -18,7 +19,7 @@ entry:
<vscale x 1 x i16> %0,
<vscale x 1 x i8> %1,
<vscale x 1 x i8> %2,
i32 %3)
i32 %3, i32 0)
ret <vscale x 1 x i16> %a
}
@ -51,6 +52,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vwmaccsu.nxv2i16.nxv2i8(
<vscale x 2 x i16>,
<vscale x 2 x i8>,
<vscale x 2 x i8>,
i32,
i32);
define <vscale x 2 x i16> @intrinsic_vwmaccsu_vv_nxv2i16_nxv2i8_nxv2i8(<vscale x 2 x i16> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, i32 %3) nounwind {
@ -64,7 +66,7 @@ entry:
<vscale x 2 x i16> %0,
<vscale x 2 x i8> %1,
<vscale x 2 x i8> %2,
i32 %3)
i32 %3, i32 0)
ret <vscale x 2 x i16> %a
}
@ -97,6 +99,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vwmaccsu.nxv4i16.nxv4i8(
<vscale x 4 x i16>,
<vscale x 4 x i8>,
<vscale x 4 x i8>,
i32,
i32);
define <vscale x 4 x i16> @intrinsic_vwmaccsu_vv_nxv4i16_nxv4i8_nxv4i8(<vscale x 4 x i16> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, i32 %3) nounwind {
@ -110,7 +113,7 @@ entry:
<vscale x 4 x i16> %0,
<vscale x 4 x i8> %1,
<vscale x 4 x i8> %2,
i32 %3)
i32 %3, i32 0)
ret <vscale x 4 x i16> %a
}
@ -143,6 +146,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vwmaccsu.nxv8i16.nxv8i8(
<vscale x 8 x i16>,
<vscale x 8 x i8>,
<vscale x 8 x i8>,
i32,
i32);
define <vscale x 8 x i16> @intrinsic_vwmaccsu_vv_nxv8i16_nxv8i8_nxv8i8(<vscale x 8 x i16> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
@ -156,7 +160,7 @@ entry:
<vscale x 8 x i16> %0,
<vscale x 8 x i8> %1,
<vscale x 8 x i8> %2,
i32 %3)
i32 %3, i32 0)
ret <vscale x 8 x i16> %a
}
@ -189,6 +193,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vwmaccsu.nxv16i16.nxv16i8(
<vscale x 16 x i16>,
<vscale x 16 x i8>,
<vscale x 16 x i8>,
i32,
i32);
define <vscale x 16 x i16> @intrinsic_vwmaccsu_vv_nxv16i16_nxv16i8_nxv16i8(<vscale x 16 x i16> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, i32 %3) nounwind {
@ -202,7 +207,7 @@ entry:
<vscale x 16 x i16> %0,
<vscale x 16 x i8> %1,
<vscale x 16 x i8> %2,
i32 %3)
i32 %3, i32 0)
ret <vscale x 16 x i16> %a
}
@ -235,6 +240,7 @@ declare <vscale x 32 x i16> @llvm.riscv.vwmaccsu.nxv32i16.nxv32i8(
<vscale x 32 x i16>,
<vscale x 32 x i8>,
<vscale x 32 x i8>,
i32,
i32);
define <vscale x 32 x i16> @intrinsic_vwmaccsu_vv_nxv32i16_nxv32i8_nxv32i8(<vscale x 32 x i16> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, i32 %3) nounwind {
@ -248,7 +254,7 @@ entry:
<vscale x 32 x i16> %0,
<vscale x 32 x i8> %1,
<vscale x 32 x i8> %2,
i32 %3)
i32 %3, i32 0)
ret <vscale x 32 x i16> %a
}
@ -281,6 +287,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vwmaccsu.nxv1i32.nxv1i16(
<vscale x 1 x i32>,
<vscale x 1 x i16>,
<vscale x 1 x i16>,
i32,
i32);
define <vscale x 1 x i32> @intrinsic_vwmaccsu_vv_nxv1i32_nxv1i16_nxv1i16(<vscale x 1 x i32> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, i32 %3) nounwind {
@ -294,7 +301,7 @@ entry:
<vscale x 1 x i32> %0,
<vscale x 1 x i16> %1,
<vscale x 1 x i16> %2,
i32 %3)
i32 %3, i32 0)
ret <vscale x 1 x i32> %a
}
@ -327,6 +334,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vwmaccsu.nxv2i32.nxv2i16(
<vscale x 2 x i32>,
<vscale x 2 x i16>,
<vscale x 2 x i16>,
i32,
i32);
define <vscale x 2 x i32> @intrinsic_vwmaccsu_vv_nxv2i32_nxv2i16_nxv2i16(<vscale x 2 x i32> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, i32 %3) nounwind {
@ -340,7 +348,7 @@ entry:
<vscale x 2 x i32> %0,
<vscale x 2 x i16> %1,
<vscale x 2 x i16> %2,
i32 %3)
i32 %3, i32 0)
ret <vscale x 2 x i32> %a
}
@ -373,6 +381,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vwmaccsu.nxv4i32.nxv4i16(
<vscale x 4 x i32>,
<vscale x 4 x i16>,
<vscale x 4 x i16>,
i32,
i32);
define <vscale x 4 x i32> @intrinsic_vwmaccsu_vv_nxv4i32_nxv4i16_nxv4i16(<vscale x 4 x i32> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
@ -386,7 +395,7 @@ entry:
<vscale x 4 x i32> %0,
<vscale x 4 x i16> %1,
<vscale x 4 x i16> %2,
i32 %3)
i32 %3, i32 0)
ret <vscale x 4 x i32> %a
}
@ -419,6 +428,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vwmaccsu.nxv8i32.nxv8i16(
<vscale x 8 x i32>,
<vscale x 8 x i16>,
<vscale x 8 x i16>,
i32,
i32);
define <vscale x 8 x i32> @intrinsic_vwmaccsu_vv_nxv8i32_nxv8i16_nxv8i16(<vscale x 8 x i32> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, i32 %3) nounwind {
@ -432,7 +442,7 @@ entry:
<vscale x 8 x i32> %0,
<vscale x 8 x i16> %1,
<vscale x 8 x i16> %2,
i32 %3)
i32 %3, i32 0)
ret <vscale x 8 x i32> %a
}
@ -465,6 +475,7 @@ declare <vscale x 16 x i32> @llvm.riscv.vwmaccsu.nxv16i32.nxv16i16(
<vscale x 16 x i32>,
<vscale x 16 x i16>,
<vscale x 16 x i16>,
i32,
i32);
define <vscale x 16 x i32> @intrinsic_vwmaccsu_vv_nxv16i32_nxv16i16_nxv16i16(<vscale x 16 x i32> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, i32 %3) nounwind {
@ -478,7 +489,7 @@ entry:
<vscale x 16 x i32> %0,
<vscale x 16 x i16> %1,
<vscale x 16 x i16> %2,
i32 %3)
i32 %3, i32 0)
ret <vscale x 16 x i32> %a
}
@ -511,6 +522,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vwmaccsu.nxv1i64.nxv1i32(
<vscale x 1 x i64>,
<vscale x 1 x i32>,
<vscale x 1 x i32>,
i32,
i32);
define <vscale x 1 x i64> @intrinsic_vwmaccsu_vv_nxv1i64_nxv1i32_nxv1i32(<vscale x 1 x i64> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, i32 %3) nounwind {
@ -524,7 +536,7 @@ entry:
<vscale x 1 x i64> %0,
<vscale x 1 x i32> %1,
<vscale x 1 x i32> %2,
i32 %3)
i32 %3, i32 0)
ret <vscale x 1 x i64> %a
}
@ -557,6 +569,7 @@ declare <vscale x 2 x i64> @llvm.riscv.vwmaccsu.nxv2i64.nxv2i32(
<vscale x 2 x i64>,
<vscale x 2 x i32>,
<vscale x 2 x i32>,
i32,
i32);
define <vscale x 2 x i64> @intrinsic_vwmaccsu_vv_nxv2i64_nxv2i32_nxv2i32(<vscale x 2 x i64> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
@ -570,7 +583,7 @@ entry:
<vscale x 2 x i64> %0,
<vscale x 2 x i32> %1,
<vscale x 2 x i32> %2,
i32 %3)
i32 %3, i32 0)
ret <vscale x 2 x i64> %a
}
@ -603,6 +616,7 @@ declare <vscale x 4 x i64> @llvm.riscv.vwmaccsu.nxv4i64.nxv4i32(
<vscale x 4 x i64>,
<vscale x 4 x i32>,
<vscale x 4 x i32>,
i32,
i32);
define <vscale x 4 x i64> @intrinsic_vwmaccsu_vv_nxv4i64_nxv4i32_nxv4i32(<vscale x 4 x i64> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, i32 %3) nounwind {
@ -616,7 +630,7 @@ entry:
<vscale x 4 x i64> %0,
<vscale x 4 x i32> %1,
<vscale x 4 x i32> %2,
i32 %3)
i32 %3, i32 0)
ret <vscale x 4 x i64> %a
}
@ -649,6 +663,7 @@ declare <vscale x 8 x i64> @llvm.riscv.vwmaccsu.nxv8i64.nxv8i32(
<vscale x 8 x i64>,
<vscale x 8 x i32>,
<vscale x 8 x i32>,
i32,
i32);
define <vscale x 8 x i64> @intrinsic_vwmaccsu_vv_nxv8i64_nxv8i32_nxv8i32(<vscale x 8 x i64> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, i32 %3) nounwind {
@ -662,7 +677,7 @@ entry:
<vscale x 8 x i64> %0,
<vscale x 8 x i32> %1,
<vscale x 8 x i32> %2,
i32 %3)
i32 %3, i32 0)
ret <vscale x 8 x i64> %a
}
@ -695,6 +710,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vwmaccsu.nxv1i16.i8(
<vscale x 1 x i16>,
i8,
<vscale x 1 x i8>,
i32,
i32);
define <vscale x 1 x i16> @intrinsic_vwmaccsu_vx_nxv1i16_i8_nxv1i8(<vscale x 1 x i16> %0, i8 %1, <vscale x 1 x i8> %2, i32 %3) nounwind {
@ -708,7 +724,7 @@ entry:
<vscale x 1 x i16> %0,
i8 %1,
<vscale x 1 x i8> %2,
i32 %3)
i32 %3, i32 0)
ret <vscale x 1 x i16> %a
}
@ -741,6 +757,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vwmaccsu.nxv2i16.i8(
<vscale x 2 x i16>,
i8,
<vscale x 2 x i8>,
i32,
i32);
define <vscale x 2 x i16> @intrinsic_vwmaccsu_vx_nxv2i16_i8_nxv2i8(<vscale x 2 x i16> %0, i8 %1, <vscale x 2 x i8> %2, i32 %3) nounwind {
@ -754,7 +771,7 @@ entry:
<vscale x 2 x i16> %0,
i8 %1,
<vscale x 2 x i8> %2,
i32 %3)
i32 %3, i32 0)
ret <vscale x 2 x i16> %a
}
@ -787,6 +804,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vwmaccsu.nxv4i16.i8(
<vscale x 4 x i16>,
i8,
<vscale x 4 x i8>,
i32,
i32);
define <vscale x 4 x i16> @intrinsic_vwmaccsu_vx_nxv4i16_i8_nxv4i8(<vscale x 4 x i16> %0, i8 %1, <vscale x 4 x i8> %2, i32 %3) nounwind {
@ -800,7 +818,7 @@ entry:
<vscale x 4 x i16> %0,
i8 %1,
<vscale x 4 x i8> %2,
i32 %3)
i32 %3, i32 0)
ret <vscale x 4 x i16> %a
}
@ -833,6 +851,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vwmaccsu.nxv8i16.i8(
<vscale x 8 x i16>,
i8,
<vscale x 8 x i8>,
i32,
i32);
define <vscale x 8 x i16> @intrinsic_vwmaccsu_vx_nxv8i16_i8_nxv8i8(<vscale x 8 x i16> %0, i8 %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
@ -846,7 +865,7 @@ entry:
<vscale x 8 x i16> %0,
i8 %1,
<vscale x 8 x i8> %2,
i32 %3)
i32 %3, i32 0)
ret <vscale x 8 x i16> %a
}
@ -879,6 +898,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vwmaccsu.nxv16i16.i8(
<vscale x 16 x i16>,
i8,
<vscale x 16 x i8>,
i32,
i32);
define <vscale x 16 x i16> @intrinsic_vwmaccsu_vx_nxv16i16_i8_nxv16i8(<vscale x 16 x i16> %0, i8 %1, <vscale x 16 x i8> %2, i32 %3) nounwind {
@ -892,7 +912,7 @@ entry:
<vscale x 16 x i16> %0,
i8 %1,
<vscale x 16 x i8> %2,
i32 %3)
i32 %3, i32 0)
ret <vscale x 16 x i16> %a
}
@ -925,6 +945,7 @@ declare <vscale x 32 x i16> @llvm.riscv.vwmaccsu.nxv32i16.i8(
<vscale x 32 x i16>,
i8,
<vscale x 32 x i8>,
i32,
i32);
define <vscale x 32 x i16> @intrinsic_vwmaccsu_vx_nxv32i16_i8_nxv32i8(<vscale x 32 x i16> %0, i8 %1, <vscale x 32 x i8> %2, i32 %3) nounwind {
@ -938,7 +959,7 @@ entry:
<vscale x 32 x i16> %0,
i8 %1,
<vscale x 32 x i8> %2,
i32 %3)
i32 %3, i32 0)
ret <vscale x 32 x i16> %a
}
@ -971,6 +992,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vwmaccsu.nxv1i32.i16(
<vscale x 1 x i32>,
i16,
<vscale x 1 x i16>,
i32,
i32);
define <vscale x 1 x i32> @intrinsic_vwmaccsu_vx_nxv1i32_i16_nxv1i16(<vscale x 1 x i32> %0, i16 %1, <vscale x 1 x i16> %2, i32 %3) nounwind {
@ -984,7 +1006,7 @@ entry:
<vscale x 1 x i32> %0,
i16 %1,
<vscale x 1 x i16> %2,
i32 %3)
i32 %3, i32 0)
ret <vscale x 1 x i32> %a
}
@ -1017,6 +1039,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vwmaccsu.nxv2i32.i16(
<vscale x 2 x i32>,
i16,
<vscale x 2 x i16>,
i32,
i32);
define <vscale x 2 x i32> @intrinsic_vwmaccsu_vx_nxv2i32_i16_nxv2i16(<vscale x 2 x i32> %0, i16 %1, <vscale x 2 x i16> %2, i32 %3) nounwind {
@ -1030,7 +1053,7 @@ entry:
<vscale x 2 x i32> %0,
i16 %1,
<vscale x 2 x i16> %2,
i32 %3)
i32 %3, i32 0)
ret <vscale x 2 x i32> %a
}
@ -1063,6 +1086,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vwmaccsu.nxv4i32.i16(
<vscale x 4 x i32>,
i16,
<vscale x 4 x i16>,
i32,
i32);
define <vscale x 4 x i32> @intrinsic_vwmaccsu_vx_nxv4i32_i16_nxv4i16(<vscale x 4 x i32> %0, i16 %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
@ -1076,7 +1100,7 @@ entry:
<vscale x 4 x i32> %0,
i16 %1,
<vscale x 4 x i16> %2,
i32 %3)
i32 %3, i32 0)
ret <vscale x 4 x i32> %a
}
@ -1109,6 +1133,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vwmaccsu.nxv8i32.i16(
<vscale x 8 x i32>,
i16,
<vscale x 8 x i16>,
i32,
i32);
define <vscale x 8 x i32> @intrinsic_vwmaccsu_vx_nxv8i32_i16_nxv8i16(<vscale x 8 x i32> %0, i16 %1, <vscale x 8 x i16> %2, i32 %3) nounwind {
@ -1122,7 +1147,7 @@ entry:
<vscale x 8 x i32> %0,
i16 %1,
<vscale x 8 x i16> %2,
i32 %3)
i32 %3, i32 0)
ret <vscale x 8 x i32> %a
}
@ -1155,6 +1180,7 @@ declare <vscale x 16 x i32> @llvm.riscv.vwmaccsu.nxv16i32.i16(
<vscale x 16 x i32>,
i16,
<vscale x 16 x i16>,
i32,
i32);
define <vscale x 16 x i32> @intrinsic_vwmaccsu_vx_nxv16i32_i16_nxv16i16(<vscale x 16 x i32> %0, i16 %1, <vscale x 16 x i16> %2, i32 %3) nounwind {
@ -1168,7 +1194,7 @@ entry:
<vscale x 16 x i32> %0,
i16 %1,
<vscale x 16 x i16> %2,
i32 %3)
i32 %3, i32 0)
ret <vscale x 16 x i32> %a
}
@ -1201,6 +1227,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vwmaccsu.nxv1i64.i32(
<vscale x 1 x i64>,
i32,
<vscale x 1 x i32>,
i32,
i32);
define <vscale x 1 x i64> @intrinsic_vwmaccsu_vx_nxv1i64_i32_nxv1i32(<vscale x 1 x i64> %0, i32 %1, <vscale x 1 x i32> %2, i32 %3) nounwind {
@ -1214,7 +1241,7 @@ entry:
<vscale x 1 x i64> %0,
i32 %1,
<vscale x 1 x i32> %2,
i32 %3)
i32 %3, i32 0)
ret <vscale x 1 x i64> %a
}
@ -1247,6 +1274,7 @@ declare <vscale x 2 x i64> @llvm.riscv.vwmaccsu.nxv2i64.i32(
<vscale x 2 x i64>,
i32,
<vscale x 2 x i32>,
i32,
i32);
define <vscale x 2 x i64> @intrinsic_vwmaccsu_vx_nxv2i64_i32_nxv2i32(<vscale x 2 x i64> %0, i32 %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
@ -1260,7 +1288,7 @@ entry:
<vscale x 2 x i64> %0,
i32 %1,
<vscale x 2 x i32> %2,
i32 %3)
i32 %3, i32 0)
ret <vscale x 2 x i64> %a
}
@ -1293,6 +1321,7 @@ declare <vscale x 4 x i64> @llvm.riscv.vwmaccsu.nxv4i64.i32(
<vscale x 4 x i64>,
i32,
<vscale x 4 x i32>,
i32,
i32);
define <vscale x 4 x i64> @intrinsic_vwmaccsu_vx_nxv4i64_i32_nxv4i32(<vscale x 4 x i64> %0, i32 %1, <vscale x 4 x i32> %2, i32 %3) nounwind {
@ -1306,7 +1335,7 @@ entry:
<vscale x 4 x i64> %0,
i32 %1,
<vscale x 4 x i32> %2,
i32 %3)
i32 %3, i32 0)
ret <vscale x 4 x i64> %a
}
@ -1339,6 +1368,7 @@ declare <vscale x 8 x i64> @llvm.riscv.vwmaccsu.nxv8i64.i32(
<vscale x 8 x i64>,
i32,
<vscale x 8 x i32>,
i32,
i32);
define <vscale x 8 x i64> @intrinsic_vwmaccsu_vx_nxv8i64_i32_nxv8i32(<vscale x 8 x i64> %0, i32 %1, <vscale x 8 x i32> %2, i32 %3) nounwind {
@ -1352,7 +1382,7 @@ entry:
<vscale x 8 x i64> %0,
i32 %1,
<vscale x 8 x i32> %2,
i32 %3)
i32 %3, i32 0)
ret <vscale x 8 x i64> %a
}

View File

@ -5,6 +5,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vwmaccsu.nxv1i16.nxv1i8(
<vscale x 1 x i16>,
<vscale x 1 x i8>,
<vscale x 1 x i8>,
i64,
i64);
define <vscale x 1 x i16> @intrinsic_vwmaccsu_vv_nxv1i16_nxv1i8_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, i64 %3) nounwind {
@ -18,7 +19,7 @@ entry:
<vscale x 1 x i16> %0,
<vscale x 1 x i8> %1,
<vscale x 1 x i8> %2,
i64 %3)
i64 %3, i64 0)
ret <vscale x 1 x i16> %a
}
@ -51,6 +52,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vwmaccsu.nxv2i16.nxv2i8(
<vscale x 2 x i16>,
<vscale x 2 x i8>,
<vscale x 2 x i8>,
i64,
i64);
define <vscale x 2 x i16> @intrinsic_vwmaccsu_vv_nxv2i16_nxv2i8_nxv2i8(<vscale x 2 x i16> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, i64 %3) nounwind {
@ -64,7 +66,7 @@ entry:
<vscale x 2 x i16> %0,
<vscale x 2 x i8> %1,
<vscale x 2 x i8> %2,
i64 %3)
i64 %3, i64 0)
ret <vscale x 2 x i16> %a
}
@ -97,6 +99,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vwmaccsu.nxv4i16.nxv4i8(
<vscale x 4 x i16>,
<vscale x 4 x i8>,
<vscale x 4 x i8>,
i64,
i64);
define <vscale x 4 x i16> @intrinsic_vwmaccsu_vv_nxv4i16_nxv4i8_nxv4i8(<vscale x 4 x i16> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, i64 %3) nounwind {
@ -110,7 +113,7 @@ entry:
<vscale x 4 x i16> %0,
<vscale x 4 x i8> %1,
<vscale x 4 x i8> %2,
i64 %3)
i64 %3, i64 0)
ret <vscale x 4 x i16> %a
}
@ -143,6 +146,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vwmaccsu.nxv8i16.nxv8i8(
<vscale x 8 x i16>,
<vscale x 8 x i8>,
<vscale x 8 x i8>,
i64,
i64);
define <vscale x 8 x i16> @intrinsic_vwmaccsu_vv_nxv8i16_nxv8i8_nxv8i8(<vscale x 8 x i16> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
@ -156,7 +160,7 @@ entry:
<vscale x 8 x i16> %0,
<vscale x 8 x i8> %1,
<vscale x 8 x i8> %2,
i64 %3)
i64 %3, i64 0)
ret <vscale x 8 x i16> %a
}
@ -189,6 +193,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vwmaccsu.nxv16i16.nxv16i8(
<vscale x 16 x i16>,
<vscale x 16 x i8>,
<vscale x 16 x i8>,
i64,
i64);
define <vscale x 16 x i16> @intrinsic_vwmaccsu_vv_nxv16i16_nxv16i8_nxv16i8(<vscale x 16 x i16> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, i64 %3) nounwind {
@ -202,7 +207,7 @@ entry:
<vscale x 16 x i16> %0,
<vscale x 16 x i8> %1,
<vscale x 16 x i8> %2,
i64 %3)
i64 %3, i64 0)
ret <vscale x 16 x i16> %a
}
@ -235,6 +240,7 @@ declare <vscale x 32 x i16> @llvm.riscv.vwmaccsu.nxv32i16.nxv32i8(
<vscale x 32 x i16>,
<vscale x 32 x i8>,
<vscale x 32 x i8>,
i64,
i64);
define <vscale x 32 x i16> @intrinsic_vwmaccsu_vv_nxv32i16_nxv32i8_nxv32i8(<vscale x 32 x i16> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, i64 %3) nounwind {
@ -248,7 +254,7 @@ entry:
<vscale x 32 x i16> %0,
<vscale x 32 x i8> %1,
<vscale x 32 x i8> %2,
i64 %3)
i64 %3, i64 0)
ret <vscale x 32 x i16> %a
}
@ -281,6 +287,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vwmaccsu.nxv1i32.nxv1i16(
<vscale x 1 x i32>,
<vscale x 1 x i16>,
<vscale x 1 x i16>,
i64,
i64);
define <vscale x 1 x i32> @intrinsic_vwmaccsu_vv_nxv1i32_nxv1i16_nxv1i16(<vscale x 1 x i32> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, i64 %3) nounwind {
@ -294,7 +301,7 @@ entry:
<vscale x 1 x i32> %0,
<vscale x 1 x i16> %1,
<vscale x 1 x i16> %2,
i64 %3)
i64 %3, i64 0)
ret <vscale x 1 x i32> %a
}
@ -327,6 +334,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vwmaccsu.nxv2i32.nxv2i16(
<vscale x 2 x i32>,
<vscale x 2 x i16>,
<vscale x 2 x i16>,
i64,
i64);
define <vscale x 2 x i32> @intrinsic_vwmaccsu_vv_nxv2i32_nxv2i16_nxv2i16(<vscale x 2 x i32> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, i64 %3) nounwind {
@ -340,7 +348,7 @@ entry:
<vscale x 2 x i32> %0,
<vscale x 2 x i16> %1,
<vscale x 2 x i16> %2,
i64 %3)
i64 %3, i64 0)
ret <vscale x 2 x i32> %a
}
@ -373,6 +381,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vwmaccsu.nxv4i32.nxv4i16(
<vscale x 4 x i32>,
<vscale x 4 x i16>,
<vscale x 4 x i16>,
i64,
i64);
define <vscale x 4 x i32> @intrinsic_vwmaccsu_vv_nxv4i32_nxv4i16_nxv4i16(<vscale x 4 x i32> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
@ -386,7 +395,7 @@ entry:
<vscale x 4 x i32> %0,
<vscale x 4 x i16> %1,
<vscale x 4 x i16> %2,
i64 %3)
i64 %3, i64 0)
ret <vscale x 4 x i32> %a
}
@ -419,6 +428,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vwmaccsu.nxv8i32.nxv8i16(
<vscale x 8 x i32>,
<vscale x 8 x i16>,
<vscale x 8 x i16>,
i64,
i64);
define <vscale x 8 x i32> @intrinsic_vwmaccsu_vv_nxv8i32_nxv8i16_nxv8i16(<vscale x 8 x i32> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, i64 %3) nounwind {
@ -432,7 +442,7 @@ entry:
<vscale x 8 x i32> %0,
<vscale x 8 x i16> %1,
<vscale x 8 x i16> %2,
i64 %3)
i64 %3, i64 0)
ret <vscale x 8 x i32> %a
}
@ -465,6 +475,7 @@ declare <vscale x 16 x i32> @llvm.riscv.vwmaccsu.nxv16i32.nxv16i16(
<vscale x 16 x i32>,
<vscale x 16 x i16>,
<vscale x 16 x i16>,
i64,
i64);
define <vscale x 16 x i32> @intrinsic_vwmaccsu_vv_nxv16i32_nxv16i16_nxv16i16(<vscale x 16 x i32> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, i64 %3) nounwind {
@ -478,7 +489,7 @@ entry:
<vscale x 16 x i32> %0,
<vscale x 16 x i16> %1,
<vscale x 16 x i16> %2,
i64 %3)
i64 %3, i64 0)
ret <vscale x 16 x i32> %a
}
@ -511,6 +522,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vwmaccsu.nxv1i64.nxv1i32(
<vscale x 1 x i64>,
<vscale x 1 x i32>,
<vscale x 1 x i32>,
i64,
i64);
define <vscale x 1 x i64> @intrinsic_vwmaccsu_vv_nxv1i64_nxv1i32_nxv1i32(<vscale x 1 x i64> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, i64 %3) nounwind {
@ -524,7 +536,7 @@ entry:
<vscale x 1 x i64> %0,
<vscale x 1 x i32> %1,
<vscale x 1 x i32> %2,
i64 %3)
i64 %3, i64 0)
ret <vscale x 1 x i64> %a
}
@ -557,6 +569,7 @@ declare <vscale x 2 x i64> @llvm.riscv.vwmaccsu.nxv2i64.nxv2i32(
<vscale x 2 x i64>,
<vscale x 2 x i32>,
<vscale x 2 x i32>,
i64,
i64);
define <vscale x 2 x i64> @intrinsic_vwmaccsu_vv_nxv2i64_nxv2i32_nxv2i32(<vscale x 2 x i64> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
@ -570,7 +583,7 @@ entry:
<vscale x 2 x i64> %0,
<vscale x 2 x i32> %1,
<vscale x 2 x i32> %2,
i64 %3)
i64 %3, i64 0)
ret <vscale x 2 x i64> %a
}
@ -603,6 +616,7 @@ declare <vscale x 4 x i64> @llvm.riscv.vwmaccsu.nxv4i64.nxv4i32(
<vscale x 4 x i64>,
<vscale x 4 x i32>,
<vscale x 4 x i32>,
i64,
i64);
define <vscale x 4 x i64> @intrinsic_vwmaccsu_vv_nxv4i64_nxv4i32_nxv4i32(<vscale x 4 x i64> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, i64 %3) nounwind {
@ -616,7 +630,7 @@ entry:
<vscale x 4 x i64> %0,
<vscale x 4 x i32> %1,
<vscale x 4 x i32> %2,
i64 %3)
i64 %3, i64 0)
ret <vscale x 4 x i64> %a
}
@ -649,6 +663,7 @@ declare <vscale x 8 x i64> @llvm.riscv.vwmaccsu.nxv8i64.nxv8i32(
<vscale x 8 x i64>,
<vscale x 8 x i32>,
<vscale x 8 x i32>,
i64,
i64);
define <vscale x 8 x i64> @intrinsic_vwmaccsu_vv_nxv8i64_nxv8i32_nxv8i32(<vscale x 8 x i64> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, i64 %3) nounwind {
@ -662,7 +677,7 @@ entry:
<vscale x 8 x i64> %0,
<vscale x 8 x i32> %1,
<vscale x 8 x i32> %2,
i64 %3)
i64 %3, i64 0)
ret <vscale x 8 x i64> %a
}
@ -695,6 +710,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vwmaccsu.nxv1i16.i8(
<vscale x 1 x i16>,
i8,
<vscale x 1 x i8>,
i64,
i64);
define <vscale x 1 x i16> @intrinsic_vwmaccsu_vx_nxv1i16_i8_nxv1i8(<vscale x 1 x i16> %0, i8 %1, <vscale x 1 x i8> %2, i64 %3) nounwind {
@ -708,7 +724,7 @@ entry:
<vscale x 1 x i16> %0,
i8 %1,
<vscale x 1 x i8> %2,
i64 %3)
i64 %3, i64 0)
ret <vscale x 1 x i16> %a
}
@ -741,6 +757,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vwmaccsu.nxv2i16.i8(
<vscale x 2 x i16>,
i8,
<vscale x 2 x i8>,
i64,
i64);
define <vscale x 2 x i16> @intrinsic_vwmaccsu_vx_nxv2i16_i8_nxv2i8(<vscale x 2 x i16> %0, i8 %1, <vscale x 2 x i8> %2, i64 %3) nounwind {
@ -754,7 +771,7 @@ entry:
<vscale x 2 x i16> %0,
i8 %1,
<vscale x 2 x i8> %2,
i64 %3)
i64 %3, i64 0)
ret <vscale x 2 x i16> %a
}
@ -787,6 +804,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vwmaccsu.nxv4i16.i8(
<vscale x 4 x i16>,
i8,
<vscale x 4 x i8>,
i64,
i64);
define <vscale x 4 x i16> @intrinsic_vwmaccsu_vx_nxv4i16_i8_nxv4i8(<vscale x 4 x i16> %0, i8 %1, <vscale x 4 x i8> %2, i64 %3) nounwind {
@ -800,7 +818,7 @@ entry:
<vscale x 4 x i16> %0,
i8 %1,
<vscale x 4 x i8> %2,
i64 %3)
i64 %3, i64 0)
ret <vscale x 4 x i16> %a
}
@ -833,6 +851,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vwmaccsu.nxv8i16.i8(
<vscale x 8 x i16>,
i8,
<vscale x 8 x i8>,
i64,
i64);
define <vscale x 8 x i16> @intrinsic_vwmaccsu_vx_nxv8i16_i8_nxv8i8(<vscale x 8 x i16> %0, i8 %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
@ -846,7 +865,7 @@ entry:
<vscale x 8 x i16> %0,
i8 %1,
<vscale x 8 x i8> %2,
i64 %3)
i64 %3, i64 0)
ret <vscale x 8 x i16> %a
}
@ -879,6 +898,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vwmaccsu.nxv16i16.i8(
<vscale x 16 x i16>,
i8,
<vscale x 16 x i8>,
i64,
i64);
define <vscale x 16 x i16> @intrinsic_vwmaccsu_vx_nxv16i16_i8_nxv16i8(<vscale x 16 x i16> %0, i8 %1, <vscale x 16 x i8> %2, i64 %3) nounwind {
@ -892,7 +912,7 @@ entry:
<vscale x 16 x i16> %0,
i8 %1,
<vscale x 16 x i8> %2,
i64 %3)
i64 %3, i64 0)
ret <vscale x 16 x i16> %a
}
@ -925,6 +945,7 @@ declare <vscale x 32 x i16> @llvm.riscv.vwmaccsu.nxv32i16.i8(
<vscale x 32 x i16>,
i8,
<vscale x 32 x i8>,
i64,
i64);
define <vscale x 32 x i16> @intrinsic_vwmaccsu_vx_nxv32i16_i8_nxv32i8(<vscale x 32 x i16> %0, i8 %1, <vscale x 32 x i8> %2, i64 %3) nounwind {
@ -938,7 +959,7 @@ entry:
<vscale x 32 x i16> %0,
i8 %1,
<vscale x 32 x i8> %2,
i64 %3)
i64 %3, i64 0)
ret <vscale x 32 x i16> %a
}
@ -971,6 +992,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vwmaccsu.nxv1i32.i16(
<vscale x 1 x i32>,
i16,
<vscale x 1 x i16>,
i64,
i64);
define <vscale x 1 x i32> @intrinsic_vwmaccsu_vx_nxv1i32_i16_nxv1i16(<vscale x 1 x i32> %0, i16 %1, <vscale x 1 x i16> %2, i64 %3) nounwind {
@ -984,7 +1006,7 @@ entry:
<vscale x 1 x i32> %0,
i16 %1,
<vscale x 1 x i16> %2,
i64 %3)
i64 %3, i64 0)
ret <vscale x 1 x i32> %a
}
@ -1017,6 +1039,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vwmaccsu.nxv2i32.i16(
<vscale x 2 x i32>,
i16,
<vscale x 2 x i16>,
i64,
i64);
define <vscale x 2 x i32> @intrinsic_vwmaccsu_vx_nxv2i32_i16_nxv2i16(<vscale x 2 x i32> %0, i16 %1, <vscale x 2 x i16> %2, i64 %3) nounwind {
@ -1030,7 +1053,7 @@ entry:
<vscale x 2 x i32> %0,
i16 %1,
<vscale x 2 x i16> %2,
i64 %3)
i64 %3, i64 0)
ret <vscale x 2 x i32> %a
}
@ -1063,6 +1086,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vwmaccsu.nxv4i32.i16(
<vscale x 4 x i32>,
i16,
<vscale x 4 x i16>,
i64,
i64);
define <vscale x 4 x i32> @intrinsic_vwmaccsu_vx_nxv4i32_i16_nxv4i16(<vscale x 4 x i32> %0, i16 %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
@ -1076,7 +1100,7 @@ entry:
<vscale x 4 x i32> %0,
i16 %1,
<vscale x 4 x i16> %2,
i64 %3)
i64 %3, i64 0)
ret <vscale x 4 x i32> %a
}
@ -1109,6 +1133,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vwmaccsu.nxv8i32.i16(
<vscale x 8 x i32>,
i16,
<vscale x 8 x i16>,
i64,
i64);
define <vscale x 8 x i32> @intrinsic_vwmaccsu_vx_nxv8i32_i16_nxv8i16(<vscale x 8 x i32> %0, i16 %1, <vscale x 8 x i16> %2, i64 %3) nounwind {
@ -1122,7 +1147,7 @@ entry:
<vscale x 8 x i32> %0,
i16 %1,
<vscale x 8 x i16> %2,
i64 %3)
i64 %3, i64 0)
ret <vscale x 8 x i32> %a
}
@ -1155,6 +1180,7 @@ declare <vscale x 16 x i32> @llvm.riscv.vwmaccsu.nxv16i32.i16(
<vscale x 16 x i32>,
i16,
<vscale x 16 x i16>,
i64,
i64);
define <vscale x 16 x i32> @intrinsic_vwmaccsu_vx_nxv16i32_i16_nxv16i16(<vscale x 16 x i32> %0, i16 %1, <vscale x 16 x i16> %2, i64 %3) nounwind {
@ -1168,7 +1194,7 @@ entry:
<vscale x 16 x i32> %0,
i16 %1,
<vscale x 16 x i16> %2,
i64 %3)
i64 %3, i64 0)
ret <vscale x 16 x i32> %a
}
@ -1201,6 +1227,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vwmaccsu.nxv1i64.i32(
<vscale x 1 x i64>,
i32,
<vscale x 1 x i32>,
i64,
i64);
define <vscale x 1 x i64> @intrinsic_vwmaccsu_vx_nxv1i64_i32_nxv1i32(<vscale x 1 x i64> %0, i32 %1, <vscale x 1 x i32> %2, i64 %3) nounwind {
@ -1214,7 +1241,7 @@ entry:
<vscale x 1 x i64> %0,
i32 %1,
<vscale x 1 x i32> %2,
i64 %3)
i64 %3, i64 0)
ret <vscale x 1 x i64> %a
}
@ -1247,6 +1274,7 @@ declare <vscale x 2 x i64> @llvm.riscv.vwmaccsu.nxv2i64.i32(
<vscale x 2 x i64>,
i32,
<vscale x 2 x i32>,
i64,
i64);
define <vscale x 2 x i64> @intrinsic_vwmaccsu_vx_nxv2i64_i32_nxv2i32(<vscale x 2 x i64> %0, i32 %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
@ -1260,7 +1288,7 @@ entry:
<vscale x 2 x i64> %0,
i32 %1,
<vscale x 2 x i32> %2,
i64 %3)
i64 %3, i64 0)
ret <vscale x 2 x i64> %a
}
@ -1293,6 +1321,7 @@ declare <vscale x 4 x i64> @llvm.riscv.vwmaccsu.nxv4i64.i32(
<vscale x 4 x i64>,
i32,
<vscale x 4 x i32>,
i64,
i64);
define <vscale x 4 x i64> @intrinsic_vwmaccsu_vx_nxv4i64_i32_nxv4i32(<vscale x 4 x i64> %0, i32 %1, <vscale x 4 x i32> %2, i64 %3) nounwind {
@ -1306,7 +1335,7 @@ entry:
<vscale x 4 x i64> %0,
i32 %1,
<vscale x 4 x i32> %2,
i64 %3)
i64 %3, i64 0)
ret <vscale x 4 x i64> %a
}
@ -1339,6 +1368,7 @@ declare <vscale x 8 x i64> @llvm.riscv.vwmaccsu.nxv8i64.i32(
<vscale x 8 x i64>,
i32,
<vscale x 8 x i32>,
i64,
i64);
define <vscale x 8 x i64> @intrinsic_vwmaccsu_vx_nxv8i64_i32_nxv8i32(<vscale x 8 x i64> %0, i32 %1, <vscale x 8 x i32> %2, i64 %3) nounwind {
@ -1352,7 +1382,7 @@ entry:
<vscale x 8 x i64> %0,
i32 %1,
<vscale x 8 x i32> %2,
i64 %3)
i64 %3, i64 0)
ret <vscale x 8 x i64> %a
}

View File

@ -5,6 +5,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vwmaccu.nxv1i16.nxv1i8(
<vscale x 1 x i16>,
<vscale x 1 x i8>,
<vscale x 1 x i8>,
i32,
i32);
define <vscale x 1 x i16> @intrinsic_vwmaccu_vv_nxv1i16_nxv1i8_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, i32 %3) nounwind {
@ -18,7 +19,7 @@ entry:
<vscale x 1 x i16> %0,
<vscale x 1 x i8> %1,
<vscale x 1 x i8> %2,
i32 %3)
i32 %3, i32 0)
ret <vscale x 1 x i16> %a
}
@ -51,6 +52,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vwmaccu.nxv2i16.nxv2i8(
<vscale x 2 x i16>,
<vscale x 2 x i8>,
<vscale x 2 x i8>,
i32,
i32);
define <vscale x 2 x i16> @intrinsic_vwmaccu_vv_nxv2i16_nxv2i8_nxv2i8(<vscale x 2 x i16> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, i32 %3) nounwind {
@ -64,7 +66,7 @@ entry:
<vscale x 2 x i16> %0,
<vscale x 2 x i8> %1,
<vscale x 2 x i8> %2,
i32 %3)
i32 %3, i32 0)
ret <vscale x 2 x i16> %a
}
@ -97,6 +99,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vwmaccu.nxv4i16.nxv4i8(
<vscale x 4 x i16>,
<vscale x 4 x i8>,
<vscale x 4 x i8>,
i32,
i32);
define <vscale x 4 x i16> @intrinsic_vwmaccu_vv_nxv4i16_nxv4i8_nxv4i8(<vscale x 4 x i16> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, i32 %3) nounwind {
@ -110,7 +113,7 @@ entry:
<vscale x 4 x i16> %0,
<vscale x 4 x i8> %1,
<vscale x 4 x i8> %2,
i32 %3)
i32 %3, i32 0)
ret <vscale x 4 x i16> %a
}
@ -143,6 +146,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vwmaccu.nxv8i16.nxv8i8(
<vscale x 8 x i16>,
<vscale x 8 x i8>,
<vscale x 8 x i8>,
i32,
i32);
define <vscale x 8 x i16> @intrinsic_vwmaccu_vv_nxv8i16_nxv8i8_nxv8i8(<vscale x 8 x i16> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
@ -156,7 +160,7 @@ entry:
<vscale x 8 x i16> %0,
<vscale x 8 x i8> %1,
<vscale x 8 x i8> %2,
i32 %3)
i32 %3, i32 0)
ret <vscale x 8 x i16> %a
}
@ -189,6 +193,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vwmaccu.nxv16i16.nxv16i8(
<vscale x 16 x i16>,
<vscale x 16 x i8>,
<vscale x 16 x i8>,
i32,
i32);
define <vscale x 16 x i16> @intrinsic_vwmaccu_vv_nxv16i16_nxv16i8_nxv16i8(<vscale x 16 x i16> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, i32 %3) nounwind {
@ -202,7 +207,7 @@ entry:
<vscale x 16 x i16> %0,
<vscale x 16 x i8> %1,
<vscale x 16 x i8> %2,
i32 %3)
i32 %3, i32 0)
ret <vscale x 16 x i16> %a
}
@ -235,6 +240,7 @@ declare <vscale x 32 x i16> @llvm.riscv.vwmaccu.nxv32i16.nxv32i8(
<vscale x 32 x i16>,
<vscale x 32 x i8>,
<vscale x 32 x i8>,
i32,
i32);
define <vscale x 32 x i16> @intrinsic_vwmaccu_vv_nxv32i16_nxv32i8_nxv32i8(<vscale x 32 x i16> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, i32 %3) nounwind {
@ -248,7 +254,7 @@ entry:
<vscale x 32 x i16> %0,
<vscale x 32 x i8> %1,
<vscale x 32 x i8> %2,
i32 %3)
i32 %3, i32 0)
ret <vscale x 32 x i16> %a
}
@ -281,6 +287,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vwmaccu.nxv1i32.nxv1i16(
<vscale x 1 x i32>,
<vscale x 1 x i16>,
<vscale x 1 x i16>,
i32,
i32);
define <vscale x 1 x i32> @intrinsic_vwmaccu_vv_nxv1i32_nxv1i16_nxv1i16(<vscale x 1 x i32> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, i32 %3) nounwind {
@ -294,7 +301,7 @@ entry:
<vscale x 1 x i32> %0,
<vscale x 1 x i16> %1,
<vscale x 1 x i16> %2,
i32 %3)
i32 %3, i32 0)
ret <vscale x 1 x i32> %a
}
@ -327,6 +334,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vwmaccu.nxv2i32.nxv2i16(
<vscale x 2 x i32>,
<vscale x 2 x i16>,
<vscale x 2 x i16>,
i32,
i32);
define <vscale x 2 x i32> @intrinsic_vwmaccu_vv_nxv2i32_nxv2i16_nxv2i16(<vscale x 2 x i32> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, i32 %3) nounwind {
@ -340,7 +348,7 @@ entry:
<vscale x 2 x i32> %0,
<vscale x 2 x i16> %1,
<vscale x 2 x i16> %2,
i32 %3)
i32 %3, i32 0)
ret <vscale x 2 x i32> %a
}
@ -373,6 +381,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vwmaccu.nxv4i32.nxv4i16(
<vscale x 4 x i32>,
<vscale x 4 x i16>,
<vscale x 4 x i16>,
i32,
i32);
define <vscale x 4 x i32> @intrinsic_vwmaccu_vv_nxv4i32_nxv4i16_nxv4i16(<vscale x 4 x i32> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
@ -386,7 +395,7 @@ entry:
<vscale x 4 x i32> %0,
<vscale x 4 x i16> %1,
<vscale x 4 x i16> %2,
i32 %3)
i32 %3, i32 0)
ret <vscale x 4 x i32> %a
}
@ -419,6 +428,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vwmaccu.nxv8i32.nxv8i16(
<vscale x 8 x i32>,
<vscale x 8 x i16>,
<vscale x 8 x i16>,
i32,
i32);
define <vscale x 8 x i32> @intrinsic_vwmaccu_vv_nxv8i32_nxv8i16_nxv8i16(<vscale x 8 x i32> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, i32 %3) nounwind {
@ -432,7 +442,7 @@ entry:
<vscale x 8 x i32> %0,
<vscale x 8 x i16> %1,
<vscale x 8 x i16> %2,
i32 %3)
i32 %3, i32 0)
ret <vscale x 8 x i32> %a
}
@ -465,6 +475,7 @@ declare <vscale x 16 x i32> @llvm.riscv.vwmaccu.nxv16i32.nxv16i16(
<vscale x 16 x i32>,
<vscale x 16 x i16>,
<vscale x 16 x i16>,
i32,
i32);
define <vscale x 16 x i32> @intrinsic_vwmaccu_vv_nxv16i32_nxv16i16_nxv16i16(<vscale x 16 x i32> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, i32 %3) nounwind {
@ -478,7 +489,7 @@ entry:
<vscale x 16 x i32> %0,
<vscale x 16 x i16> %1,
<vscale x 16 x i16> %2,
i32 %3)
i32 %3, i32 0)
ret <vscale x 16 x i32> %a
}
@ -511,6 +522,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vwmaccu.nxv1i64.nxv1i32(
<vscale x 1 x i64>,
<vscale x 1 x i32>,
<vscale x 1 x i32>,
i32,
i32);
define <vscale x 1 x i64> @intrinsic_vwmaccu_vv_nxv1i64_nxv1i32_nxv1i32(<vscale x 1 x i64> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, i32 %3) nounwind {
@ -524,7 +536,7 @@ entry:
<vscale x 1 x i64> %0,
<vscale x 1 x i32> %1,
<vscale x 1 x i32> %2,
i32 %3)
i32 %3, i32 0)
ret <vscale x 1 x i64> %a
}
@ -557,6 +569,7 @@ declare <vscale x 2 x i64> @llvm.riscv.vwmaccu.nxv2i64.nxv2i32(
<vscale x 2 x i64>,
<vscale x 2 x i32>,
<vscale x 2 x i32>,
i32,
i32);
define <vscale x 2 x i64> @intrinsic_vwmaccu_vv_nxv2i64_nxv2i32_nxv2i32(<vscale x 2 x i64> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
@ -570,7 +583,7 @@ entry:
<vscale x 2 x i64> %0,
<vscale x 2 x i32> %1,
<vscale x 2 x i32> %2,
i32 %3)
i32 %3, i32 0)
ret <vscale x 2 x i64> %a
}
@ -603,6 +616,7 @@ declare <vscale x 4 x i64> @llvm.riscv.vwmaccu.nxv4i64.nxv4i32(
<vscale x 4 x i64>,
<vscale x 4 x i32>,
<vscale x 4 x i32>,
i32,
i32);
define <vscale x 4 x i64> @intrinsic_vwmaccu_vv_nxv4i64_nxv4i32_nxv4i32(<vscale x 4 x i64> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, i32 %3) nounwind {
@ -616,7 +630,7 @@ entry:
<vscale x 4 x i64> %0,
<vscale x 4 x i32> %1,
<vscale x 4 x i32> %2,
i32 %3)
i32 %3, i32 0)
ret <vscale x 4 x i64> %a
}
@ -649,6 +663,7 @@ declare <vscale x 8 x i64> @llvm.riscv.vwmaccu.nxv8i64.nxv8i32(
<vscale x 8 x i64>,
<vscale x 8 x i32>,
<vscale x 8 x i32>,
i32,
i32);
define <vscale x 8 x i64> @intrinsic_vwmaccu_vv_nxv8i64_nxv8i32_nxv8i32(<vscale x 8 x i64> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, i32 %3) nounwind {
@ -662,7 +677,7 @@ entry:
<vscale x 8 x i64> %0,
<vscale x 8 x i32> %1,
<vscale x 8 x i32> %2,
i32 %3)
i32 %3, i32 0)
ret <vscale x 8 x i64> %a
}
@ -695,6 +710,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vwmaccu.nxv1i16.i8(
<vscale x 1 x i16>,
i8,
<vscale x 1 x i8>,
i32,
i32);
define <vscale x 1 x i16> @intrinsic_vwmaccu_vx_nxv1i16_i8_nxv1i8(<vscale x 1 x i16> %0, i8 %1, <vscale x 1 x i8> %2, i32 %3) nounwind {
@ -708,7 +724,7 @@ entry:
<vscale x 1 x i16> %0,
i8 %1,
<vscale x 1 x i8> %2,
i32 %3)
i32 %3, i32 0)
ret <vscale x 1 x i16> %a
}
@ -741,6 +757,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vwmaccu.nxv2i16.i8(
<vscale x 2 x i16>,
i8,
<vscale x 2 x i8>,
i32,
i32);
define <vscale x 2 x i16> @intrinsic_vwmaccu_vx_nxv2i16_i8_nxv2i8(<vscale x 2 x i16> %0, i8 %1, <vscale x 2 x i8> %2, i32 %3) nounwind {
@ -754,7 +771,7 @@ entry:
<vscale x 2 x i16> %0,
i8 %1,
<vscale x 2 x i8> %2,
i32 %3)
i32 %3, i32 0)
ret <vscale x 2 x i16> %a
}
@ -787,6 +804,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vwmaccu.nxv4i16.i8(
<vscale x 4 x i16>,
i8,
<vscale x 4 x i8>,
i32,
i32);
define <vscale x 4 x i16> @intrinsic_vwmaccu_vx_nxv4i16_i8_nxv4i8(<vscale x 4 x i16> %0, i8 %1, <vscale x 4 x i8> %2, i32 %3) nounwind {
@ -800,7 +818,7 @@ entry:
<vscale x 4 x i16> %0,
i8 %1,
<vscale x 4 x i8> %2,
i32 %3)
i32 %3, i32 0)
ret <vscale x 4 x i16> %a
}
@ -833,6 +851,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vwmaccu.nxv8i16.i8(
<vscale x 8 x i16>,
i8,
<vscale x 8 x i8>,
i32,
i32);
define <vscale x 8 x i16> @intrinsic_vwmaccu_vx_nxv8i16_i8_nxv8i8(<vscale x 8 x i16> %0, i8 %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
@ -846,7 +865,7 @@ entry:
<vscale x 8 x i16> %0,
i8 %1,
<vscale x 8 x i8> %2,
i32 %3)
i32 %3, i32 0)
ret <vscale x 8 x i16> %a
}
@ -879,6 +898,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vwmaccu.nxv16i16.i8(
<vscale x 16 x i16>,
i8,
<vscale x 16 x i8>,
i32,
i32);
define <vscale x 16 x i16> @intrinsic_vwmaccu_vx_nxv16i16_i8_nxv16i8(<vscale x 16 x i16> %0, i8 %1, <vscale x 16 x i8> %2, i32 %3) nounwind {
@ -892,7 +912,7 @@ entry:
<vscale x 16 x i16> %0,
i8 %1,
<vscale x 16 x i8> %2,
i32 %3)
i32 %3, i32 0)
ret <vscale x 16 x i16> %a
}
@ -925,6 +945,7 @@ declare <vscale x 32 x i16> @llvm.riscv.vwmaccu.nxv32i16.i8(
<vscale x 32 x i16>,
i8,
<vscale x 32 x i8>,
i32,
i32);
define <vscale x 32 x i16> @intrinsic_vwmaccu_vx_nxv32i16_i8_nxv32i8(<vscale x 32 x i16> %0, i8 %1, <vscale x 32 x i8> %2, i32 %3) nounwind {
@ -938,7 +959,7 @@ entry:
<vscale x 32 x i16> %0,
i8 %1,
<vscale x 32 x i8> %2,
i32 %3)
i32 %3, i32 0)
ret <vscale x 32 x i16> %a
}
@ -971,6 +992,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vwmaccu.nxv1i32.i16(
<vscale x 1 x i32>,
i16,
<vscale x 1 x i16>,
i32,
i32);
define <vscale x 1 x i32> @intrinsic_vwmaccu_vx_nxv1i32_i16_nxv1i16(<vscale x 1 x i32> %0, i16 %1, <vscale x 1 x i16> %2, i32 %3) nounwind {
@ -984,7 +1006,7 @@ entry:
<vscale x 1 x i32> %0,
i16 %1,
<vscale x 1 x i16> %2,
i32 %3)
i32 %3, i32 0)
ret <vscale x 1 x i32> %a
}
@ -1017,6 +1039,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vwmaccu.nxv2i32.i16(
<vscale x 2 x i32>,
i16,
<vscale x 2 x i16>,
i32,
i32);
define <vscale x 2 x i32> @intrinsic_vwmaccu_vx_nxv2i32_i16_nxv2i16(<vscale x 2 x i32> %0, i16 %1, <vscale x 2 x i16> %2, i32 %3) nounwind {
@ -1030,7 +1053,7 @@ entry:
<vscale x 2 x i32> %0,
i16 %1,
<vscale x 2 x i16> %2,
i32 %3)
i32 %3, i32 0)
ret <vscale x 2 x i32> %a
}
@ -1063,6 +1086,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vwmaccu.nxv4i32.i16(
<vscale x 4 x i32>,
i16,
<vscale x 4 x i16>,
i32,
i32);
define <vscale x 4 x i32> @intrinsic_vwmaccu_vx_nxv4i32_i16_nxv4i16(<vscale x 4 x i32> %0, i16 %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
@ -1076,7 +1100,7 @@ entry:
<vscale x 4 x i32> %0,
i16 %1,
<vscale x 4 x i16> %2,
i32 %3)
i32 %3, i32 0)
ret <vscale x 4 x i32> %a
}
@ -1109,6 +1133,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vwmaccu.nxv8i32.i16(
<vscale x 8 x i32>,
i16,
<vscale x 8 x i16>,
i32,
i32);
define <vscale x 8 x i32> @intrinsic_vwmaccu_vx_nxv8i32_i16_nxv8i16(<vscale x 8 x i32> %0, i16 %1, <vscale x 8 x i16> %2, i32 %3) nounwind {
@ -1122,7 +1147,7 @@ entry:
<vscale x 8 x i32> %0,
i16 %1,
<vscale x 8 x i16> %2,
i32 %3)
i32 %3, i32 0)
ret <vscale x 8 x i32> %a
}
@ -1155,6 +1180,7 @@ declare <vscale x 16 x i32> @llvm.riscv.vwmaccu.nxv16i32.i16(
<vscale x 16 x i32>,
i16,
<vscale x 16 x i16>,
i32,
i32);
define <vscale x 16 x i32> @intrinsic_vwmaccu_vx_nxv16i32_i16_nxv16i16(<vscale x 16 x i32> %0, i16 %1, <vscale x 16 x i16> %2, i32 %3) nounwind {
@ -1168,7 +1194,7 @@ entry:
<vscale x 16 x i32> %0,
i16 %1,
<vscale x 16 x i16> %2,
i32 %3)
i32 %3, i32 0)
ret <vscale x 16 x i32> %a
}
@ -1201,6 +1227,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vwmaccu.nxv1i64.i32(
<vscale x 1 x i64>,
i32,
<vscale x 1 x i32>,
i32,
i32);
define <vscale x 1 x i64> @intrinsic_vwmaccu_vx_nxv1i64_i32_nxv1i32(<vscale x 1 x i64> %0, i32 %1, <vscale x 1 x i32> %2, i32 %3) nounwind {
@ -1214,7 +1241,7 @@ entry:
<vscale x 1 x i64> %0,
i32 %1,
<vscale x 1 x i32> %2,
i32 %3)
i32 %3, i32 0)
ret <vscale x 1 x i64> %a
}
@ -1247,6 +1274,7 @@ declare <vscale x 2 x i64> @llvm.riscv.vwmaccu.nxv2i64.i32(
<vscale x 2 x i64>,
i32,
<vscale x 2 x i32>,
i32,
i32);
define <vscale x 2 x i64> @intrinsic_vwmaccu_vx_nxv2i64_i32_nxv2i32(<vscale x 2 x i64> %0, i32 %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
@ -1260,7 +1288,7 @@ entry:
<vscale x 2 x i64> %0,
i32 %1,
<vscale x 2 x i32> %2,
i32 %3)
i32 %3, i32 0)
ret <vscale x 2 x i64> %a
}
@ -1293,6 +1321,7 @@ declare <vscale x 4 x i64> @llvm.riscv.vwmaccu.nxv4i64.i32(
<vscale x 4 x i64>,
i32,
<vscale x 4 x i32>,
i32,
i32);
define <vscale x 4 x i64> @intrinsic_vwmaccu_vx_nxv4i64_i32_nxv4i32(<vscale x 4 x i64> %0, i32 %1, <vscale x 4 x i32> %2, i32 %3) nounwind {
@ -1306,7 +1335,7 @@ entry:
<vscale x 4 x i64> %0,
i32 %1,
<vscale x 4 x i32> %2,
i32 %3)
i32 %3, i32 0)
ret <vscale x 4 x i64> %a
}
@ -1339,6 +1368,7 @@ declare <vscale x 8 x i64> @llvm.riscv.vwmaccu.nxv8i64.i32(
<vscale x 8 x i64>,
i32,
<vscale x 8 x i32>,
i32,
i32);
define <vscale x 8 x i64> @intrinsic_vwmaccu_vx_nxv8i64_i32_nxv8i32(<vscale x 8 x i64> %0, i32 %1, <vscale x 8 x i32> %2, i32 %3) nounwind {
@ -1352,7 +1382,7 @@ entry:
<vscale x 8 x i64> %0,
i32 %1,
<vscale x 8 x i32> %2,
i32 %3)
i32 %3, i32 0)
ret <vscale x 8 x i64> %a
}

View File

@ -5,6 +5,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vwmaccu.nxv1i16.nxv1i8(
<vscale x 1 x i16>,
<vscale x 1 x i8>,
<vscale x 1 x i8>,
i64,
i64);
define <vscale x 1 x i16> @intrinsic_vwmaccu_vv_nxv1i16_nxv1i8_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, i64 %3) nounwind {
@ -18,7 +19,7 @@ entry:
<vscale x 1 x i16> %0,
<vscale x 1 x i8> %1,
<vscale x 1 x i8> %2,
i64 %3)
i64 %3, i64 0)
ret <vscale x 1 x i16> %a
}
@ -51,6 +52,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vwmaccu.nxv2i16.nxv2i8(
<vscale x 2 x i16>,
<vscale x 2 x i8>,
<vscale x 2 x i8>,
i64,
i64);
define <vscale x 2 x i16> @intrinsic_vwmaccu_vv_nxv2i16_nxv2i8_nxv2i8(<vscale x 2 x i16> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, i64 %3) nounwind {
@ -64,7 +66,7 @@ entry:
<vscale x 2 x i16> %0,
<vscale x 2 x i8> %1,
<vscale x 2 x i8> %2,
i64 %3)
i64 %3, i64 0)
ret <vscale x 2 x i16> %a
}
@ -97,6 +99,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vwmaccu.nxv4i16.nxv4i8(
<vscale x 4 x i16>,
<vscale x 4 x i8>,
<vscale x 4 x i8>,
i64,
i64);
define <vscale x 4 x i16> @intrinsic_vwmaccu_vv_nxv4i16_nxv4i8_nxv4i8(<vscale x 4 x i16> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, i64 %3) nounwind {
@ -110,7 +113,7 @@ entry:
<vscale x 4 x i16> %0,
<vscale x 4 x i8> %1,
<vscale x 4 x i8> %2,
i64 %3)
i64 %3, i64 0)
ret <vscale x 4 x i16> %a
}
@ -143,6 +146,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vwmaccu.nxv8i16.nxv8i8(
<vscale x 8 x i16>,
<vscale x 8 x i8>,
<vscale x 8 x i8>,
i64,
i64);
define <vscale x 8 x i16> @intrinsic_vwmaccu_vv_nxv8i16_nxv8i8_nxv8i8(<vscale x 8 x i16> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
@ -156,7 +160,7 @@ entry:
<vscale x 8 x i16> %0,
<vscale x 8 x i8> %1,
<vscale x 8 x i8> %2,
i64 %3)
i64 %3, i64 0)
ret <vscale x 8 x i16> %a
}
@ -189,6 +193,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vwmaccu.nxv16i16.nxv16i8(
<vscale x 16 x i16>,
<vscale x 16 x i8>,
<vscale x 16 x i8>,
i64,
i64);
define <vscale x 16 x i16> @intrinsic_vwmaccu_vv_nxv16i16_nxv16i8_nxv16i8(<vscale x 16 x i16> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, i64 %3) nounwind {
@ -202,7 +207,7 @@ entry:
<vscale x 16 x i16> %0,
<vscale x 16 x i8> %1,
<vscale x 16 x i8> %2,
i64 %3)
i64 %3, i64 0)
ret <vscale x 16 x i16> %a
}
@ -235,6 +240,7 @@ declare <vscale x 32 x i16> @llvm.riscv.vwmaccu.nxv32i16.nxv32i8(
<vscale x 32 x i16>,
<vscale x 32 x i8>,
<vscale x 32 x i8>,
i64,
i64);
define <vscale x 32 x i16> @intrinsic_vwmaccu_vv_nxv32i16_nxv32i8_nxv32i8(<vscale x 32 x i16> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, i64 %3) nounwind {
@ -248,7 +254,7 @@ entry:
<vscale x 32 x i16> %0,
<vscale x 32 x i8> %1,
<vscale x 32 x i8> %2,
i64 %3)
i64 %3, i64 0)
ret <vscale x 32 x i16> %a
}
@ -281,6 +287,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vwmaccu.nxv1i32.nxv1i16(
<vscale x 1 x i32>,
<vscale x 1 x i16>,
<vscale x 1 x i16>,
i64,
i64);
define <vscale x 1 x i32> @intrinsic_vwmaccu_vv_nxv1i32_nxv1i16_nxv1i16(<vscale x 1 x i32> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, i64 %3) nounwind {
@ -294,7 +301,7 @@ entry:
<vscale x 1 x i32> %0,
<vscale x 1 x i16> %1,
<vscale x 1 x i16> %2,
i64 %3)
i64 %3, i64 0)
ret <vscale x 1 x i32> %a
}
@ -327,6 +334,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vwmaccu.nxv2i32.nxv2i16(
<vscale x 2 x i32>,
<vscale x 2 x i16>,
<vscale x 2 x i16>,
i64,
i64);
define <vscale x 2 x i32> @intrinsic_vwmaccu_vv_nxv2i32_nxv2i16_nxv2i16(<vscale x 2 x i32> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, i64 %3) nounwind {
@ -340,7 +348,7 @@ entry:
<vscale x 2 x i32> %0,
<vscale x 2 x i16> %1,
<vscale x 2 x i16> %2,
i64 %3)
i64 %3, i64 0)
ret <vscale x 2 x i32> %a
}
@ -373,6 +381,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vwmaccu.nxv4i32.nxv4i16(
<vscale x 4 x i32>,
<vscale x 4 x i16>,
<vscale x 4 x i16>,
i64,
i64);
define <vscale x 4 x i32> @intrinsic_vwmaccu_vv_nxv4i32_nxv4i16_nxv4i16(<vscale x 4 x i32> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
@ -386,7 +395,7 @@ entry:
<vscale x 4 x i32> %0,
<vscale x 4 x i16> %1,
<vscale x 4 x i16> %2,
i64 %3)
i64 %3, i64 0)
ret <vscale x 4 x i32> %a
}
@ -419,6 +428,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vwmaccu.nxv8i32.nxv8i16(
<vscale x 8 x i32>,
<vscale x 8 x i16>,
<vscale x 8 x i16>,
i64,
i64);
define <vscale x 8 x i32> @intrinsic_vwmaccu_vv_nxv8i32_nxv8i16_nxv8i16(<vscale x 8 x i32> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, i64 %3) nounwind {
@ -432,7 +442,7 @@ entry:
<vscale x 8 x i32> %0,
<vscale x 8 x i16> %1,
<vscale x 8 x i16> %2,
i64 %3)
i64 %3, i64 0)
ret <vscale x 8 x i32> %a
}
@ -465,6 +475,7 @@ declare <vscale x 16 x i32> @llvm.riscv.vwmaccu.nxv16i32.nxv16i16(
<vscale x 16 x i32>,
<vscale x 16 x i16>,
<vscale x 16 x i16>,
i64,
i64);
define <vscale x 16 x i32> @intrinsic_vwmaccu_vv_nxv16i32_nxv16i16_nxv16i16(<vscale x 16 x i32> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, i64 %3) nounwind {
@ -478,7 +489,7 @@ entry:
<vscale x 16 x i32> %0,
<vscale x 16 x i16> %1,
<vscale x 16 x i16> %2,
i64 %3)
i64 %3, i64 0)
ret <vscale x 16 x i32> %a
}
@ -511,6 +522,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vwmaccu.nxv1i64.nxv1i32(
<vscale x 1 x i64>,
<vscale x 1 x i32>,
<vscale x 1 x i32>,
i64,
i64);
define <vscale x 1 x i64> @intrinsic_vwmaccu_vv_nxv1i64_nxv1i32_nxv1i32(<vscale x 1 x i64> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, i64 %3) nounwind {
@ -524,7 +536,7 @@ entry:
<vscale x 1 x i64> %0,
<vscale x 1 x i32> %1,
<vscale x 1 x i32> %2,
i64 %3)
i64 %3, i64 0)
ret <vscale x 1 x i64> %a
}
@ -557,6 +569,7 @@ declare <vscale x 2 x i64> @llvm.riscv.vwmaccu.nxv2i64.nxv2i32(
<vscale x 2 x i64>,
<vscale x 2 x i32>,
<vscale x 2 x i32>,
i64,
i64);
define <vscale x 2 x i64> @intrinsic_vwmaccu_vv_nxv2i64_nxv2i32_nxv2i32(<vscale x 2 x i64> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
@ -570,7 +583,7 @@ entry:
<vscale x 2 x i64> %0,
<vscale x 2 x i32> %1,
<vscale x 2 x i32> %2,
i64 %3)
i64 %3, i64 0)
ret <vscale x 2 x i64> %a
}
@ -603,6 +616,7 @@ declare <vscale x 4 x i64> @llvm.riscv.vwmaccu.nxv4i64.nxv4i32(
<vscale x 4 x i64>,
<vscale x 4 x i32>,
<vscale x 4 x i32>,
i64,
i64);
define <vscale x 4 x i64> @intrinsic_vwmaccu_vv_nxv4i64_nxv4i32_nxv4i32(<vscale x 4 x i64> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, i64 %3) nounwind {
@ -616,7 +630,7 @@ entry:
<vscale x 4 x i64> %0,
<vscale x 4 x i32> %1,
<vscale x 4 x i32> %2,
i64 %3)
i64 %3, i64 0)
ret <vscale x 4 x i64> %a
}
@ -649,6 +663,7 @@ declare <vscale x 8 x i64> @llvm.riscv.vwmaccu.nxv8i64.nxv8i32(
<vscale x 8 x i64>,
<vscale x 8 x i32>,
<vscale x 8 x i32>,
i64,
i64);
define <vscale x 8 x i64> @intrinsic_vwmaccu_vv_nxv8i64_nxv8i32_nxv8i32(<vscale x 8 x i64> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, i64 %3) nounwind {
@ -662,7 +677,7 @@ entry:
<vscale x 8 x i64> %0,
<vscale x 8 x i32> %1,
<vscale x 8 x i32> %2,
i64 %3)
i64 %3, i64 0)
ret <vscale x 8 x i64> %a
}
@ -695,6 +710,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vwmaccu.nxv1i16.i8(
<vscale x 1 x i16>,
i8,
<vscale x 1 x i8>,
i64,
i64);
define <vscale x 1 x i16> @intrinsic_vwmaccu_vx_nxv1i16_i8_nxv1i8(<vscale x 1 x i16> %0, i8 %1, <vscale x 1 x i8> %2, i64 %3) nounwind {
@ -708,7 +724,7 @@ entry:
<vscale x 1 x i16> %0,
i8 %1,
<vscale x 1 x i8> %2,
i64 %3)
i64 %3, i64 0)
ret <vscale x 1 x i16> %a
}
@ -741,6 +757,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vwmaccu.nxv2i16.i8(
<vscale x 2 x i16>,
i8,
<vscale x 2 x i8>,
i64,
i64);
define <vscale x 2 x i16> @intrinsic_vwmaccu_vx_nxv2i16_i8_nxv2i8(<vscale x 2 x i16> %0, i8 %1, <vscale x 2 x i8> %2, i64 %3) nounwind {
@ -754,7 +771,7 @@ entry:
<vscale x 2 x i16> %0,
i8 %1,
<vscale x 2 x i8> %2,
i64 %3)
i64 %3, i64 0)
ret <vscale x 2 x i16> %a
}
@ -787,6 +804,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vwmaccu.nxv4i16.i8(
<vscale x 4 x i16>,
i8,
<vscale x 4 x i8>,
i64,
i64);
define <vscale x 4 x i16> @intrinsic_vwmaccu_vx_nxv4i16_i8_nxv4i8(<vscale x 4 x i16> %0, i8 %1, <vscale x 4 x i8> %2, i64 %3) nounwind {
@ -800,7 +818,7 @@ entry:
<vscale x 4 x i16> %0,
i8 %1,
<vscale x 4 x i8> %2,
i64 %3)
i64 %3, i64 0)
ret <vscale x 4 x i16> %a
}
@ -833,6 +851,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vwmaccu.nxv8i16.i8(
<vscale x 8 x i16>,
i8,
<vscale x 8 x i8>,
i64,
i64);
define <vscale x 8 x i16> @intrinsic_vwmaccu_vx_nxv8i16_i8_nxv8i8(<vscale x 8 x i16> %0, i8 %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
@ -846,7 +865,7 @@ entry:
<vscale x 8 x i16> %0,
i8 %1,
<vscale x 8 x i8> %2,
i64 %3)
i64 %3, i64 0)
ret <vscale x 8 x i16> %a
}
@ -879,6 +898,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vwmaccu.nxv16i16.i8(
<vscale x 16 x i16>,
i8,
<vscale x 16 x i8>,
i64,
i64);
define <vscale x 16 x i16> @intrinsic_vwmaccu_vx_nxv16i16_i8_nxv16i8(<vscale x 16 x i16> %0, i8 %1, <vscale x 16 x i8> %2, i64 %3) nounwind {
@ -892,7 +912,7 @@ entry:
<vscale x 16 x i16> %0,
i8 %1,
<vscale x 16 x i8> %2,
i64 %3)
i64 %3, i64 0)
ret <vscale x 16 x i16> %a
}
@ -925,6 +945,7 @@ declare <vscale x 32 x i16> @llvm.riscv.vwmaccu.nxv32i16.i8(
<vscale x 32 x i16>,
i8,
<vscale x 32 x i8>,
i64,
i64);
define <vscale x 32 x i16> @intrinsic_vwmaccu_vx_nxv32i16_i8_nxv32i8(<vscale x 32 x i16> %0, i8 %1, <vscale x 32 x i8> %2, i64 %3) nounwind {
@ -938,7 +959,7 @@ entry:
<vscale x 32 x i16> %0,
i8 %1,
<vscale x 32 x i8> %2,
i64 %3)
i64 %3, i64 0)
ret <vscale x 32 x i16> %a
}
@ -971,6 +992,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vwmaccu.nxv1i32.i16(
<vscale x 1 x i32>,
i16,
<vscale x 1 x i16>,
i64,
i64);
define <vscale x 1 x i32> @intrinsic_vwmaccu_vx_nxv1i32_i16_nxv1i16(<vscale x 1 x i32> %0, i16 %1, <vscale x 1 x i16> %2, i64 %3) nounwind {
@ -984,7 +1006,7 @@ entry:
<vscale x 1 x i32> %0,
i16 %1,
<vscale x 1 x i16> %2,
i64 %3)
i64 %3, i64 0)
ret <vscale x 1 x i32> %a
}
@ -1017,6 +1039,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vwmaccu.nxv2i32.i16(
<vscale x 2 x i32>,
i16,
<vscale x 2 x i16>,
i64,
i64);
define <vscale x 2 x i32> @intrinsic_vwmaccu_vx_nxv2i32_i16_nxv2i16(<vscale x 2 x i32> %0, i16 %1, <vscale x 2 x i16> %2, i64 %3) nounwind {
@ -1030,7 +1053,7 @@ entry:
<vscale x 2 x i32> %0,
i16 %1,
<vscale x 2 x i16> %2,
i64 %3)
i64 %3, i64 0)
ret <vscale x 2 x i32> %a
}
@ -1063,6 +1086,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vwmaccu.nxv4i32.i16(
<vscale x 4 x i32>,
i16,
<vscale x 4 x i16>,
i64,
i64);
define <vscale x 4 x i32> @intrinsic_vwmaccu_vx_nxv4i32_i16_nxv4i16(<vscale x 4 x i32> %0, i16 %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
@ -1076,7 +1100,7 @@ entry:
<vscale x 4 x i32> %0,
i16 %1,
<vscale x 4 x i16> %2,
i64 %3)
i64 %3, i64 0)
ret <vscale x 4 x i32> %a
}
@ -1109,6 +1133,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vwmaccu.nxv8i32.i16(
<vscale x 8 x i32>,
i16,
<vscale x 8 x i16>,
i64,
i64);
define <vscale x 8 x i32> @intrinsic_vwmaccu_vx_nxv8i32_i16_nxv8i16(<vscale x 8 x i32> %0, i16 %1, <vscale x 8 x i16> %2, i64 %3) nounwind {
@ -1122,7 +1147,7 @@ entry:
<vscale x 8 x i32> %0,
i16 %1,
<vscale x 8 x i16> %2,
i64 %3)
i64 %3, i64 0)
ret <vscale x 8 x i32> %a
}
@ -1155,6 +1180,7 @@ declare <vscale x 16 x i32> @llvm.riscv.vwmaccu.nxv16i32.i16(
<vscale x 16 x i32>,
i16,
<vscale x 16 x i16>,
i64,
i64);
define <vscale x 16 x i32> @intrinsic_vwmaccu_vx_nxv16i32_i16_nxv16i16(<vscale x 16 x i32> %0, i16 %1, <vscale x 16 x i16> %2, i64 %3) nounwind {
@ -1168,7 +1194,7 @@ entry:
<vscale x 16 x i32> %0,
i16 %1,
<vscale x 16 x i16> %2,
i64 %3)
i64 %3, i64 0)
ret <vscale x 16 x i32> %a
}
@ -1201,6 +1227,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vwmaccu.nxv1i64.i32(
<vscale x 1 x i64>,
i32,
<vscale x 1 x i32>,
i64,
i64);
define <vscale x 1 x i64> @intrinsic_vwmaccu_vx_nxv1i64_i32_nxv1i32(<vscale x 1 x i64> %0, i32 %1, <vscale x 1 x i32> %2, i64 %3) nounwind {
@ -1214,7 +1241,7 @@ entry:
<vscale x 1 x i64> %0,
i32 %1,
<vscale x 1 x i32> %2,
i64 %3)
i64 %3, i64 0)
ret <vscale x 1 x i64> %a
}
@ -1247,6 +1274,7 @@ declare <vscale x 2 x i64> @llvm.riscv.vwmaccu.nxv2i64.i32(
<vscale x 2 x i64>,
i32,
<vscale x 2 x i32>,
i64,
i64);
define <vscale x 2 x i64> @intrinsic_vwmaccu_vx_nxv2i64_i32_nxv2i32(<vscale x 2 x i64> %0, i32 %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
@ -1260,7 +1288,7 @@ entry:
<vscale x 2 x i64> %0,
i32 %1,
<vscale x 2 x i32> %2,
i64 %3)
i64 %3, i64 0)
ret <vscale x 2 x i64> %a
}
@ -1293,6 +1321,7 @@ declare <vscale x 4 x i64> @llvm.riscv.vwmaccu.nxv4i64.i32(
<vscale x 4 x i64>,
i32,
<vscale x 4 x i32>,
i64,
i64);
define <vscale x 4 x i64> @intrinsic_vwmaccu_vx_nxv4i64_i32_nxv4i32(<vscale x 4 x i64> %0, i32 %1, <vscale x 4 x i32> %2, i64 %3) nounwind {
@ -1306,7 +1335,7 @@ entry:
<vscale x 4 x i64> %0,
i32 %1,
<vscale x 4 x i32> %2,
i64 %3)
i64 %3, i64 0)
ret <vscale x 4 x i64> %a
}
@ -1339,6 +1368,7 @@ declare <vscale x 8 x i64> @llvm.riscv.vwmaccu.nxv8i64.i32(
<vscale x 8 x i64>,
i32,
<vscale x 8 x i32>,
i64,
i64);
define <vscale x 8 x i64> @intrinsic_vwmaccu_vx_nxv8i64_i32_nxv8i32(<vscale x 8 x i64> %0, i32 %1, <vscale x 8 x i32> %2, i64 %3) nounwind {
@ -1352,7 +1382,7 @@ entry:
<vscale x 8 x i64> %0,
i32 %1,
<vscale x 8 x i32> %2,
i64 %3)
i64 %3, i64 0)
ret <vscale x 8 x i64> %a
}

View File

@ -5,6 +5,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vwmaccus.nxv1i16.i8(
<vscale x 1 x i16>,
i8,
<vscale x 1 x i8>,
i32,
i32);
define <vscale x 1 x i16> @intrinsic_vwmaccus_vx_nxv1i16_i8_nxv1i8(<vscale x 1 x i16> %0, i8 %1, <vscale x 1 x i8> %2, i32 %3) nounwind {
@ -18,7 +19,7 @@ entry:
<vscale x 1 x i16> %0,
i8 %1,
<vscale x 1 x i8> %2,
i32 %3)
i32 %3, i32 0)
ret <vscale x 1 x i16> %a
}
@ -51,6 +52,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vwmaccus.nxv2i16.i8(
<vscale x 2 x i16>,
i8,
<vscale x 2 x i8>,
i32,
i32);
define <vscale x 2 x i16> @intrinsic_vwmaccus_vx_nxv2i16_i8_nxv2i8(<vscale x 2 x i16> %0, i8 %1, <vscale x 2 x i8> %2, i32 %3) nounwind {
@ -64,7 +66,7 @@ entry:
<vscale x 2 x i16> %0,
i8 %1,
<vscale x 2 x i8> %2,
i32 %3)
i32 %3, i32 0)
ret <vscale x 2 x i16> %a
}
@ -97,6 +99,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vwmaccus.nxv4i16.i8(
<vscale x 4 x i16>,
i8,
<vscale x 4 x i8>,
i32,
i32);
define <vscale x 4 x i16> @intrinsic_vwmaccus_vx_nxv4i16_i8_nxv4i8(<vscale x 4 x i16> %0, i8 %1, <vscale x 4 x i8> %2, i32 %3) nounwind {
@ -110,7 +113,7 @@ entry:
<vscale x 4 x i16> %0,
i8 %1,
<vscale x 4 x i8> %2,
i32 %3)
i32 %3, i32 0)
ret <vscale x 4 x i16> %a
}
@ -143,6 +146,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vwmaccus.nxv8i16.i8(
<vscale x 8 x i16>,
i8,
<vscale x 8 x i8>,
i32,
i32);
define <vscale x 8 x i16> @intrinsic_vwmaccus_vx_nxv8i16_i8_nxv8i8(<vscale x 8 x i16> %0, i8 %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
@ -156,7 +160,7 @@ entry:
<vscale x 8 x i16> %0,
i8 %1,
<vscale x 8 x i8> %2,
i32 %3)
i32 %3, i32 0)
ret <vscale x 8 x i16> %a
}
@ -189,6 +193,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vwmaccus.nxv16i16.i8(
<vscale x 16 x i16>,
i8,
<vscale x 16 x i8>,
i32,
i32);
define <vscale x 16 x i16> @intrinsic_vwmaccus_vx_nxv16i16_i8_nxv16i8(<vscale x 16 x i16> %0, i8 %1, <vscale x 16 x i8> %2, i32 %3) nounwind {
@ -202,7 +207,7 @@ entry:
<vscale x 16 x i16> %0,
i8 %1,
<vscale x 16 x i8> %2,
i32 %3)
i32 %3, i32 0)
ret <vscale x 16 x i16> %a
}
@ -235,6 +240,7 @@ declare <vscale x 32 x i16> @llvm.riscv.vwmaccus.nxv32i16.i8(
<vscale x 32 x i16>,
i8,
<vscale x 32 x i8>,
i32,
i32);
define <vscale x 32 x i16> @intrinsic_vwmaccus_vx_nxv32i16_i8_nxv32i8(<vscale x 32 x i16> %0, i8 %1, <vscale x 32 x i8> %2, i32 %3) nounwind {
@ -248,7 +254,7 @@ entry:
<vscale x 32 x i16> %0,
i8 %1,
<vscale x 32 x i8> %2,
i32 %3)
i32 %3, i32 0)
ret <vscale x 32 x i16> %a
}
@ -281,6 +287,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vwmaccus.nxv1i32.i16(
<vscale x 1 x i32>,
i16,
<vscale x 1 x i16>,
i32,
i32);
define <vscale x 1 x i32> @intrinsic_vwmaccus_vx_nxv1i32_i16_nxv1i16(<vscale x 1 x i32> %0, i16 %1, <vscale x 1 x i16> %2, i32 %3) nounwind {
@ -294,7 +301,7 @@ entry:
<vscale x 1 x i32> %0,
i16 %1,
<vscale x 1 x i16> %2,
i32 %3)
i32 %3, i32 0)
ret <vscale x 1 x i32> %a
}
@ -327,6 +334,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vwmaccus.nxv2i32.i16(
<vscale x 2 x i32>,
i16,
<vscale x 2 x i16>,
i32,
i32);
define <vscale x 2 x i32> @intrinsic_vwmaccus_vx_nxv2i32_i16_nxv2i16(<vscale x 2 x i32> %0, i16 %1, <vscale x 2 x i16> %2, i32 %3) nounwind {
@ -340,7 +348,7 @@ entry:
<vscale x 2 x i32> %0,
i16 %1,
<vscale x 2 x i16> %2,
i32 %3)
i32 %3, i32 0)
ret <vscale x 2 x i32> %a
}
@ -373,6 +381,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vwmaccus.nxv4i32.i16(
<vscale x 4 x i32>,
i16,
<vscale x 4 x i16>,
i32,
i32);
define <vscale x 4 x i32> @intrinsic_vwmaccus_vx_nxv4i32_i16_nxv4i16(<vscale x 4 x i32> %0, i16 %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
@ -386,7 +395,7 @@ entry:
<vscale x 4 x i32> %0,
i16 %1,
<vscale x 4 x i16> %2,
i32 %3)
i32 %3, i32 0)
ret <vscale x 4 x i32> %a
}
@ -419,6 +428,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vwmaccus.nxv8i32.i16(
<vscale x 8 x i32>,
i16,
<vscale x 8 x i16>,
i32,
i32);
define <vscale x 8 x i32> @intrinsic_vwmaccus_vx_nxv8i32_i16_nxv8i16(<vscale x 8 x i32> %0, i16 %1, <vscale x 8 x i16> %2, i32 %3) nounwind {
@ -432,7 +442,7 @@ entry:
<vscale x 8 x i32> %0,
i16 %1,
<vscale x 8 x i16> %2,
i32 %3)
i32 %3, i32 0)
ret <vscale x 8 x i32> %a
}
@ -465,6 +475,7 @@ declare <vscale x 16 x i32> @llvm.riscv.vwmaccus.nxv16i32.i16(
<vscale x 16 x i32>,
i16,
<vscale x 16 x i16>,
i32,
i32);
define <vscale x 16 x i32> @intrinsic_vwmaccus_vx_nxv16i32_i16_nxv16i16(<vscale x 16 x i32> %0, i16 %1, <vscale x 16 x i16> %2, i32 %3) nounwind {
@ -478,7 +489,7 @@ entry:
<vscale x 16 x i32> %0,
i16 %1,
<vscale x 16 x i16> %2,
i32 %3)
i32 %3, i32 0)
ret <vscale x 16 x i32> %a
}
@ -511,6 +522,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vwmaccus.nxv1i64.i32(
<vscale x 1 x i64>,
i32,
<vscale x 1 x i32>,
i32,
i32);
define <vscale x 1 x i64> @intrinsic_vwmaccus_vx_nxv1i64_i32_nxv1i32(<vscale x 1 x i64> %0, i32 %1, <vscale x 1 x i32> %2, i32 %3) nounwind {
@ -524,7 +536,7 @@ entry:
<vscale x 1 x i64> %0,
i32 %1,
<vscale x 1 x i32> %2,
i32 %3)
i32 %3, i32 0)
ret <vscale x 1 x i64> %a
}
@ -557,6 +569,7 @@ declare <vscale x 2 x i64> @llvm.riscv.vwmaccus.nxv2i64.i32(
<vscale x 2 x i64>,
i32,
<vscale x 2 x i32>,
i32,
i32);
define <vscale x 2 x i64> @intrinsic_vwmaccus_vx_nxv2i64_i32_nxv2i32(<vscale x 2 x i64> %0, i32 %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
@ -570,7 +583,7 @@ entry:
<vscale x 2 x i64> %0,
i32 %1,
<vscale x 2 x i32> %2,
i32 %3)
i32 %3, i32 0)
ret <vscale x 2 x i64> %a
}
@ -603,6 +616,7 @@ declare <vscale x 4 x i64> @llvm.riscv.vwmaccus.nxv4i64.i32(
<vscale x 4 x i64>,
i32,
<vscale x 4 x i32>,
i32,
i32);
define <vscale x 4 x i64> @intrinsic_vwmaccus_vx_nxv4i64_i32_nxv4i32(<vscale x 4 x i64> %0, i32 %1, <vscale x 4 x i32> %2, i32 %3) nounwind {
@ -616,7 +630,7 @@ entry:
<vscale x 4 x i64> %0,
i32 %1,
<vscale x 4 x i32> %2,
i32 %3)
i32 %3, i32 0)
ret <vscale x 4 x i64> %a
}
@ -649,6 +663,7 @@ declare <vscale x 8 x i64> @llvm.riscv.vwmaccus.nxv8i64.i32(
<vscale x 8 x i64>,
i32,
<vscale x 8 x i32>,
i32,
i32);
define <vscale x 8 x i64> @intrinsic_vwmaccus_vx_nxv8i64_i32_nxv8i32(<vscale x 8 x i64> %0, i32 %1, <vscale x 8 x i32> %2, i32 %3) nounwind {
@ -662,7 +677,7 @@ entry:
<vscale x 8 x i64> %0,
i32 %1,
<vscale x 8 x i32> %2,
i32 %3)
i32 %3, i32 0)
ret <vscale x 8 x i64> %a
}

View File

@ -5,6 +5,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vwmaccus.nxv1i16.i8(
<vscale x 1 x i16>,
i8,
<vscale x 1 x i8>,
i64,
i64);
define <vscale x 1 x i16> @intrinsic_vwmaccus_vx_nxv1i16_i8_nxv1i8(<vscale x 1 x i16> %0, i8 %1, <vscale x 1 x i8> %2, i64 %3) nounwind {
@ -18,7 +19,7 @@ entry:
<vscale x 1 x i16> %0,
i8 %1,
<vscale x 1 x i8> %2,
i64 %3)
i64 %3, i64 0)
ret <vscale x 1 x i16> %a
}
@ -51,6 +52,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vwmaccus.nxv2i16.i8(
<vscale x 2 x i16>,
i8,
<vscale x 2 x i8>,
i64,
i64);
define <vscale x 2 x i16> @intrinsic_vwmaccus_vx_nxv2i16_i8_nxv2i8(<vscale x 2 x i16> %0, i8 %1, <vscale x 2 x i8> %2, i64 %3) nounwind {
@ -64,7 +66,7 @@ entry:
<vscale x 2 x i16> %0,
i8 %1,
<vscale x 2 x i8> %2,
i64 %3)
i64 %3, i64 0)
ret <vscale x 2 x i16> %a
}
@ -97,6 +99,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vwmaccus.nxv4i16.i8(
<vscale x 4 x i16>,
i8,
<vscale x 4 x i8>,
i64,
i64);
define <vscale x 4 x i16> @intrinsic_vwmaccus_vx_nxv4i16_i8_nxv4i8(<vscale x 4 x i16> %0, i8 %1, <vscale x 4 x i8> %2, i64 %3) nounwind {
@ -110,7 +113,7 @@ entry:
<vscale x 4 x i16> %0,
i8 %1,
<vscale x 4 x i8> %2,
i64 %3)
i64 %3, i64 0)
ret <vscale x 4 x i16> %a
}
@ -143,6 +146,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vwmaccus.nxv8i16.i8(
<vscale x 8 x i16>,
i8,
<vscale x 8 x i8>,
i64,
i64);
define <vscale x 8 x i16> @intrinsic_vwmaccus_vx_nxv8i16_i8_nxv8i8(<vscale x 8 x i16> %0, i8 %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
@ -156,7 +160,7 @@ entry:
<vscale x 8 x i16> %0,
i8 %1,
<vscale x 8 x i8> %2,
i64 %3)
i64 %3, i64 0)
ret <vscale x 8 x i16> %a
}
@ -189,6 +193,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vwmaccus.nxv16i16.i8(
<vscale x 16 x i16>,
i8,
<vscale x 16 x i8>,
i64,
i64);
define <vscale x 16 x i16> @intrinsic_vwmaccus_vx_nxv16i16_i8_nxv16i8(<vscale x 16 x i16> %0, i8 %1, <vscale x 16 x i8> %2, i64 %3) nounwind {
@ -202,7 +207,7 @@ entry:
<vscale x 16 x i16> %0,
i8 %1,
<vscale x 16 x i8> %2,
i64 %3)
i64 %3, i64 0)
ret <vscale x 16 x i16> %a
}
@ -235,6 +240,7 @@ declare <vscale x 32 x i16> @llvm.riscv.vwmaccus.nxv32i16.i8(
<vscale x 32 x i16>,
i8,
<vscale x 32 x i8>,
i64,
i64);
define <vscale x 32 x i16> @intrinsic_vwmaccus_vx_nxv32i16_i8_nxv32i8(<vscale x 32 x i16> %0, i8 %1, <vscale x 32 x i8> %2, i64 %3) nounwind {
@ -248,7 +254,7 @@ entry:
<vscale x 32 x i16> %0,
i8 %1,
<vscale x 32 x i8> %2,
i64 %3)
i64 %3, i64 0)
ret <vscale x 32 x i16> %a
}
@ -281,6 +287,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vwmaccus.nxv1i32.i16(
<vscale x 1 x i32>,
i16,
<vscale x 1 x i16>,
i64,
i64);
define <vscale x 1 x i32> @intrinsic_vwmaccus_vx_nxv1i32_i16_nxv1i16(<vscale x 1 x i32> %0, i16 %1, <vscale x 1 x i16> %2, i64 %3) nounwind {
@ -294,7 +301,7 @@ entry:
<vscale x 1 x i32> %0,
i16 %1,
<vscale x 1 x i16> %2,
i64 %3)
i64 %3, i64 0)
ret <vscale x 1 x i32> %a
}
@ -327,6 +334,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vwmaccus.nxv2i32.i16(
<vscale x 2 x i32>,
i16,
<vscale x 2 x i16>,
i64,
i64);
define <vscale x 2 x i32> @intrinsic_vwmaccus_vx_nxv2i32_i16_nxv2i16(<vscale x 2 x i32> %0, i16 %1, <vscale x 2 x i16> %2, i64 %3) nounwind {
@ -340,7 +348,7 @@ entry:
<vscale x 2 x i32> %0,
i16 %1,
<vscale x 2 x i16> %2,
i64 %3)
i64 %3, i64 0)
ret <vscale x 2 x i32> %a
}
@ -373,6 +381,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vwmaccus.nxv4i32.i16(
<vscale x 4 x i32>,
i16,
<vscale x 4 x i16>,
i64,
i64);
define <vscale x 4 x i32> @intrinsic_vwmaccus_vx_nxv4i32_i16_nxv4i16(<vscale x 4 x i32> %0, i16 %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
@ -386,7 +395,7 @@ entry:
<vscale x 4 x i32> %0,
i16 %1,
<vscale x 4 x i16> %2,
i64 %3)
i64 %3, i64 0)
ret <vscale x 4 x i32> %a
}
@ -419,6 +428,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vwmaccus.nxv8i32.i16(
<vscale x 8 x i32>,
i16,
<vscale x 8 x i16>,
i64,
i64);
define <vscale x 8 x i32> @intrinsic_vwmaccus_vx_nxv8i32_i16_nxv8i16(<vscale x 8 x i32> %0, i16 %1, <vscale x 8 x i16> %2, i64 %3) nounwind {
@ -432,7 +442,7 @@ entry:
<vscale x 8 x i32> %0,
i16 %1,
<vscale x 8 x i16> %2,
i64 %3)
i64 %3, i64 0)
ret <vscale x 8 x i32> %a
}
@ -465,6 +475,7 @@ declare <vscale x 16 x i32> @llvm.riscv.vwmaccus.nxv16i32.i16(
<vscale x 16 x i32>,
i16,
<vscale x 16 x i16>,
i64,
i64);
define <vscale x 16 x i32> @intrinsic_vwmaccus_vx_nxv16i32_i16_nxv16i16(<vscale x 16 x i32> %0, i16 %1, <vscale x 16 x i16> %2, i64 %3) nounwind {
@ -478,7 +489,7 @@ entry:
<vscale x 16 x i32> %0,
i16 %1,
<vscale x 16 x i16> %2,
i64 %3)
i64 %3, i64 0)
ret <vscale x 16 x i32> %a
}
@ -511,6 +522,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vwmaccus.nxv1i64.i32(
<vscale x 1 x i64>,
i32,
<vscale x 1 x i32>,
i64,
i64);
define <vscale x 1 x i64> @intrinsic_vwmaccus_vx_nxv1i64_i32_nxv1i32(<vscale x 1 x i64> %0, i32 %1, <vscale x 1 x i32> %2, i64 %3) nounwind {
@ -524,7 +536,7 @@ entry:
<vscale x 1 x i64> %0,
i32 %1,
<vscale x 1 x i32> %2,
i64 %3)
i64 %3, i64 0)
ret <vscale x 1 x i64> %a
}
@ -557,6 +569,7 @@ declare <vscale x 2 x i64> @llvm.riscv.vwmaccus.nxv2i64.i32(
<vscale x 2 x i64>,
i32,
<vscale x 2 x i32>,
i64,
i64);
define <vscale x 2 x i64> @intrinsic_vwmaccus_vx_nxv2i64_i32_nxv2i32(<vscale x 2 x i64> %0, i32 %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
@ -570,7 +583,7 @@ entry:
<vscale x 2 x i64> %0,
i32 %1,
<vscale x 2 x i32> %2,
i64 %3)
i64 %3, i64 0)
ret <vscale x 2 x i64> %a
}
@ -603,6 +616,7 @@ declare <vscale x 4 x i64> @llvm.riscv.vwmaccus.nxv4i64.i32(
<vscale x 4 x i64>,
i32,
<vscale x 4 x i32>,
i64,
i64);
define <vscale x 4 x i64> @intrinsic_vwmaccus_vx_nxv4i64_i32_nxv4i32(<vscale x 4 x i64> %0, i32 %1, <vscale x 4 x i32> %2, i64 %3) nounwind {
@ -616,7 +630,7 @@ entry:
<vscale x 4 x i64> %0,
i32 %1,
<vscale x 4 x i32> %2,
i64 %3)
i64 %3, i64 0)
ret <vscale x 4 x i64> %a
}
@ -649,6 +663,7 @@ declare <vscale x 8 x i64> @llvm.riscv.vwmaccus.nxv8i64.i32(
<vscale x 8 x i64>,
i32,
<vscale x 8 x i32>,
i64,
i64);
define <vscale x 8 x i64> @intrinsic_vwmaccus_vx_nxv8i64_i32_nxv8i32(<vscale x 8 x i64> %0, i32 %1, <vscale x 8 x i32> %2, i64 %3) nounwind {
@ -662,7 +677,7 @@ entry:
<vscale x 8 x i64> %0,
i32 %1,
<vscale x 8 x i32> %2,
i64 %3)
i64 %3, i64 0)
ret <vscale x 8 x i64> %a
}