[RISCV] Define vwadd/vwaddu/vwsub/vwsubu intrinsics.

Define vwadd/vwaddu/vwsub/vwsubu intrinsics and lower to V instructions.

Authored-by: Roger Ferrer Ibanez <rofirrim@gmail.com>
Co-Authored-by: Hsiangkai Wang <kai.wang@sifive.com>

Differential Revision: https://reviews.llvm.org/D93108
This commit is contained in:
Hsiangkai Wang 2020-12-11 16:08:10 +08:00
parent b74c4dbb96
commit db48a6de77
19 changed files with 16891 additions and 36 deletions

View File

@ -96,13 +96,44 @@ let TargetPrefix = "riscv" in {
[IntrNoMem]>, RISCVVIntrinsic {
let ExtendOperand = 3;
}
// For destination vector type is NOT the same as first source vector.
// Input: (vector_in, vector_in/scalar_in, vl)
class RISCVBinaryABXNoMask
: Intrinsic<[llvm_anyvector_ty],
[llvm_anyvector_ty, llvm_any_ty, llvm_anyint_ty],
[IntrNoMem]>, RISCVVIntrinsic {
let ExtendOperand = 2;
}
// For destination vector type is NOT the same as first source vector (with mask).
// Input: (maskedoff, vector_in, vector_in/scalar_in, mask, vl)
class RISCVBinaryABXMask
: Intrinsic<[llvm_anyvector_ty],
[LLVMMatchType<0>, llvm_anyvector_ty, llvm_any_ty,
llvm_anyvector_ty, llvm_anyint_ty],
[IntrNoMem]>, RISCVVIntrinsic {
let ExtendOperand = 3;
}
multiclass RISCVBinaryAAX {
def "int_riscv_" # NAME : RISCVBinaryAAXNoMask;
def "int_riscv_" # NAME # "_mask" : RISCVBinaryAAXMask;
}
multiclass RISCVBinaryABX {
def "int_riscv_" # NAME : RISCVBinaryABXNoMask;
def "int_riscv_" # NAME # "_mask" : RISCVBinaryABXMask;
}
defm vadd : RISCVBinaryAAX;
defm vsub : RISCVBinaryAAX;
defm vrsub : RISCVBinaryAAX;
defm vwaddu : RISCVBinaryABX;
defm vwadd : RISCVBinaryABX;
defm vwaddu_w : RISCVBinaryAAX;
defm vwadd_w : RISCVBinaryAAX;
defm vwsubu : RISCVBinaryABX;
defm vwsub : RISCVBinaryABX;
defm vwsubu_w : RISCVBinaryAAX;
defm vwsub_w : RISCVBinaryAAX;
} // TargetPrefix = "riscv"

View File

@ -36,21 +36,22 @@ def NoX0 : SDNodeXForm<undef,
//===----------------------------------------------------------------------===//
// This class describes information associated to the LMUL.
class LMULInfo<int lmul, VReg regclass, string mx> {
class LMULInfo<int lmul, VReg regclass, VReg wregclass, string mx> {
bits<3> value = lmul; // This is encoded as the vlmul field of vtype.
VReg vrclass = regclass;
VReg wvrclass = wregclass;
string MX = mx;
}
// Associate LMUL with tablegen records of register classes.
def V_M1 : LMULInfo<0b000, VR, "M1">;
def V_M2 : LMULInfo<0b001, VRM2, "M2">;
def V_M4 : LMULInfo<0b010, VRM4, "M4">;
def V_M8 : LMULInfo<0b011, VRM8, "M8">;
def V_M1 : LMULInfo<0b000, VR, VRM2, "M1">;
def V_M2 : LMULInfo<0b001, VRM2, VRM4, "M2">;
def V_M4 : LMULInfo<0b010, VRM4, VRM8, "M4">;
def V_M8 : LMULInfo<0b011, VRM8, NoVReg, "M8">;
def V_MF8 : LMULInfo<0b101, VR, "MF8">;
def V_MF4 : LMULInfo<0b110, VR, "MF4">;
def V_MF2 : LMULInfo<0b111, VR, "MF2">;
def V_MF8 : LMULInfo<0b101, VR, VR, "MF8">;
def V_MF4 : LMULInfo<0b110, VR, VR, "MF4">;
def V_MF2 : LMULInfo<0b111, VR, VR, "MF2">;
// Used to iterate over all possible LMULs.
def MxList {
@ -101,35 +102,61 @@ class GroupVTypeInfo<ValueType Vec, ValueType VecM1, ValueType Mas,
defset list<VTypeInfo> AllVectors = {
defset list<VTypeInfo> AllIntegerVectors = {
def : VTypeInfo<vint8mf8_t, vbool64_t, 8, VR, V_MF8>;
def : VTypeInfo<vint8mf4_t, vbool32_t, 8, VR, V_MF4>;
def : VTypeInfo<vint8mf2_t, vbool16_t, 8, VR, V_MF2>;
def : VTypeInfo<vint8m1_t, vbool8_t, 8, VR, V_M1>;
def : VTypeInfo<vint16mf4_t, vbool64_t, 16, VR, V_MF4>;
def : VTypeInfo<vint16mf2_t, vbool32_t, 16, VR, V_MF2>;
def : VTypeInfo<vint16m1_t, vbool16_t, 16, VR, V_M1>;
def : VTypeInfo<vint32mf2_t, vbool64_t, 32, VR, V_MF2>;
def : VTypeInfo<vint32m1_t, vbool32_t, 32, VR, V_M1>;
def : VTypeInfo<vint64m1_t, vbool64_t, 64, VR, V_M1>;
def VI8MF8: VTypeInfo<vint8mf8_t, vbool64_t, 8, VR, V_MF8>;
def VI8MF4: VTypeInfo<vint8mf4_t, vbool32_t, 8, VR, V_MF4>;
def VI8MF2: VTypeInfo<vint8mf2_t, vbool16_t, 8, VR, V_MF2>;
def VI8M1: VTypeInfo<vint8m1_t, vbool8_t, 8, VR, V_M1>;
def VI16MF4: VTypeInfo<vint16mf4_t, vbool64_t, 16, VR, V_MF4>;
def VI16MF2: VTypeInfo<vint16mf2_t, vbool32_t, 16, VR, V_MF2>;
def VI16M1: VTypeInfo<vint16m1_t, vbool16_t, 16, VR, V_M1>;
def VI32MF2: VTypeInfo<vint32mf2_t, vbool64_t, 32, VR, V_MF2>;
def VI32M1: VTypeInfo<vint32m1_t, vbool32_t, 32, VR, V_M1>;
def VI64M1: VTypeInfo<vint64m1_t, vbool64_t, 64, VR, V_M1>;
def : GroupVTypeInfo<vint8m2_t, vint8m1_t, vbool4_t, 8, VRM2, V_M2>;
def : GroupVTypeInfo<vint8m4_t, vint8m1_t, vbool2_t, 8, VRM4, V_M4>;
def : GroupVTypeInfo<vint8m8_t, vint8m1_t, vbool1_t, 8, VRM8, V_M8>;
def VI8M2: GroupVTypeInfo<vint8m2_t, vint8m1_t, vbool4_t, 8, VRM2, V_M2>;
def VI8M4: GroupVTypeInfo<vint8m4_t, vint8m1_t, vbool2_t, 8, VRM4, V_M4>;
def VI8M8: GroupVTypeInfo<vint8m8_t, vint8m1_t, vbool1_t, 8, VRM8, V_M8>;
def : GroupVTypeInfo<vint16m2_t,vint16m1_t,vbool8_t, 16,VRM2, V_M2>;
def : GroupVTypeInfo<vint16m4_t,vint16m1_t,vbool4_t, 16,VRM4, V_M4>;
def : GroupVTypeInfo<vint16m8_t,vint16m1_t,vbool2_t, 16,VRM8, V_M8>;
def VI16M2: GroupVTypeInfo<vint16m2_t,vint16m1_t,vbool8_t, 16,VRM2, V_M2>;
def VI16M4: GroupVTypeInfo<vint16m4_t,vint16m1_t,vbool4_t, 16,VRM4, V_M4>;
def VI16M8: GroupVTypeInfo<vint16m8_t,vint16m1_t,vbool2_t, 16,VRM8, V_M8>;
def : GroupVTypeInfo<vint32m2_t,vint32m1_t,vbool16_t,32,VRM2, V_M2>;
def : GroupVTypeInfo<vint32m4_t,vint32m1_t,vbool8_t, 32,VRM4, V_M4>;
def : GroupVTypeInfo<vint32m8_t,vint32m1_t,vbool4_t, 32,VRM8, V_M8>;
def VI32M2: GroupVTypeInfo<vint32m2_t,vint32m1_t,vbool16_t,32,VRM2, V_M2>;
def VI32M4: GroupVTypeInfo<vint32m4_t,vint32m1_t,vbool8_t, 32,VRM4, V_M4>;
def VI32M8: GroupVTypeInfo<vint32m8_t,vint32m1_t,vbool4_t, 32,VRM8, V_M8>;
def : GroupVTypeInfo<vint64m2_t,vint64m1_t,vbool32_t,64,VRM2, V_M2>;
def : GroupVTypeInfo<vint64m4_t,vint64m1_t,vbool16_t,64,VRM4, V_M4>;
def : GroupVTypeInfo<vint64m8_t,vint64m1_t,vbool8_t, 64,VRM8, V_M8>;
def VI64M2: GroupVTypeInfo<vint64m2_t,vint64m1_t,vbool32_t,64,VRM2, V_M2>;
def VI64M4: GroupVTypeInfo<vint64m4_t,vint64m1_t,vbool16_t,64,VRM4, V_M4>;
def VI64M8: GroupVTypeInfo<vint64m8_t,vint64m1_t,vbool8_t, 64,VRM8, V_M8>;
}
}
class VTypeInfoToWide<VTypeInfo vti, VTypeInfo wti>
{
VTypeInfo Vti = vti;
VTypeInfo Wti = wti;
}
defset list<VTypeInfoToWide> AllWidenableIntVectors = {
def : VTypeInfoToWide<VI8MF8, VI16MF4>;
def : VTypeInfoToWide<VI8MF4, VI16MF2>;
def : VTypeInfoToWide<VI8MF2, VI16M1>;
def : VTypeInfoToWide<VI8M1, VI16M2>;
def : VTypeInfoToWide<VI8M2, VI16M4>;
def : VTypeInfoToWide<VI8M4, VI16M8>;
def : VTypeInfoToWide<VI16MF4, VI32MF2>;
def : VTypeInfoToWide<VI16MF2, VI32M1>;
def : VTypeInfoToWide<VI16M1, VI32M2>;
def : VTypeInfoToWide<VI16M2, VI32M4>;
def : VTypeInfoToWide<VI16M4, VI32M8>;
def : VTypeInfoToWide<VI32MF2, VI64M1>;
def : VTypeInfoToWide<VI32M1, VI64M2>;
def : VTypeInfoToWide<VI32M2, VI64M4>;
def : VTypeInfoToWide<VI32M4, VI64M8>;
}
// This class holds the record of the RISCVVPseudoTable below.
// This represents the information we need in codegen for each pseudo.
// The definition should be consistent with `struct PseudoInfo` in
@ -194,6 +221,16 @@ class GetVRegNoV0<VReg VRegClass> {
!eq(1, 1) : VRegClass);
}
// Join strings in list using separator and ignoring empty elements
class Join<list<string> strings, string separator> {
string ret = !foldl(!head(strings), !tail(strings), a, b,
!cond(
!and(!empty(a), !empty(b)) : "",
!empty(a) : b,
!empty(b) : a,
1 : a#separator#b));
}
class VPseudo<Instruction instr, LMULInfo m, dag outs, dag ins> :
Pseudo<outs, ins, []>, RISCVVPseudo {
let BaseInstr = instr;
@ -202,7 +239,8 @@ class VPseudo<Instruction instr, LMULInfo m, dag outs, dag ins> :
class VPseudoBinaryNoMask<VReg RetClass,
VReg Op1Class,
DAGOperand Op2Class> :
DAGOperand Op2Class,
string Constraint> :
Pseudo<(outs RetClass:$rd),
(ins Op1Class:$rs2, Op2Class:$rs1, GPR:$vl, ixlenimm:$sew), []>,
RISCVVPseudo {
@ -210,6 +248,7 @@ class VPseudoBinaryNoMask<VReg RetClass,
let mayStore = 0;
let hasSideEffects = 0;
let usesCustomInserter = 1;
let Constraints = Constraint;
let Uses = [VL, VTYPE];
let VLIndex = 3;
let SEWIndex = 4;
@ -219,7 +258,8 @@ class VPseudoBinaryNoMask<VReg RetClass,
class VPseudoBinaryMask<VReg RetClass,
VReg Op1Class,
DAGOperand Op2Class> :
DAGOperand Op2Class,
string Constraint> :
Pseudo<(outs GetVRegNoV0<RetClass>.R:$rd),
(ins GetVRegNoV0<RetClass>.R:$merge,
Op1Class:$rs2, Op2Class:$rs1,
@ -229,7 +269,7 @@ class VPseudoBinaryMask<VReg RetClass,
let mayStore = 0;
let hasSideEffects = 0;
let usesCustomInserter = 1;
let Constraints = "$rd = $merge";
let Constraints = Join<[Constraint, "$rd = $merge"], ",">.ret;
let Uses = [VL, VTYPE];
let VLIndex = 5;
let SEWIndex = 6;
@ -240,10 +280,13 @@ class VPseudoBinaryMask<VReg RetClass,
multiclass VPseudoBinary<VReg RetClass,
VReg Op1Class,
DAGOperand Op2Class,
LMULInfo MInfo> {
LMULInfo MInfo,
string Constraint = ""> {
let VLMul = MInfo.value in {
def "_" # MInfo.MX : VPseudoBinaryNoMask<RetClass, Op1Class, Op2Class>;
def "_" # MInfo.MX # "_MASK" : VPseudoBinaryMask<RetClass, Op1Class, Op2Class>;
def "_" # MInfo.MX : VPseudoBinaryNoMask<RetClass, Op1Class, Op2Class,
Constraint>;
def "_" # MInfo.MX # "_MASK" : VPseudoBinaryMask<RetClass, Op1Class, Op2Class,
Constraint>;
}
}
@ -262,6 +305,37 @@ multiclass VPseudoBinaryV_VI<Operand ImmType = simm5> {
defm _VI : VPseudoBinary<m.vrclass, m.vrclass, ImmType, m>;
}
// We use earlyclobber here due to
// * The destination EEW is smaller than the source EEW and the overlap is
// in the lowest-numbered part of the source register group is legal.
// Otherwise, it is illegal.
// * The destination EEW is greater than the source EEW, the source EMUL is
// at least 1, and the overlap is in the highest-numbered part of the
// destination register group is legal. Otherwise, it is illegal.
multiclass VPseudoBinaryW_VV {
foreach m = MxList.m[0-5] in
defm _VV : VPseudoBinary<m.wvrclass, m.vrclass, m.vrclass, m,
"@earlyclobber $rd">;
}
multiclass VPseudoBinaryW_VX {
foreach m = MxList.m[0-5] in
defm _VX : VPseudoBinary<m.wvrclass, m.vrclass, GPR, m,
"@earlyclobber $rd">;
}
multiclass VPseudoBinaryW_WV {
foreach m = MxList.m[0-5] in
defm _WV : VPseudoBinary<m.wvrclass, m.wvrclass, m.vrclass, m,
"@earlyclobber $rd">;
}
multiclass VPseudoBinaryW_WX {
foreach m = MxList.m[0-5] in
defm _WX : VPseudoBinary<m.wvrclass, m.wvrclass, GPR, m,
"@earlyclobber $rd">;
}
multiclass VPseudoBinaryV_VV_VX_VI<Operand ImmType = simm5> {
defm "" : VPseudoBinaryV_VV;
defm "" : VPseudoBinaryV_VX;
@ -278,6 +352,16 @@ multiclass VPseudoBinaryV_VX_VI<Operand ImmType = simm5> {
defm "" : VPseudoBinaryV_VI<ImmType>;
}
multiclass VPseudoBinaryW_VV_VX {
defm "" : VPseudoBinaryW_VV;
defm "" : VPseudoBinaryW_VX;
}
multiclass VPseudoBinaryW_WV_WX {
defm "" : VPseudoBinaryW_WV;
defm "" : VPseudoBinaryW_WX;
}
//===----------------------------------------------------------------------===//
// Helpers to define the different patterns.
//===----------------------------------------------------------------------===//
@ -396,6 +480,50 @@ multiclass VPatBinaryV_VI<string intrinsic, string instruction,
vti.RegClass, imm_type>;
}
multiclass VPatBinaryW_VV<string intrinsic, string instruction> {
foreach VtiToWti = AllWidenableIntVectors in {
defvar Vti = VtiToWti.Vti;
defvar Wti = VtiToWti.Wti;
defm : VPatBinary<intrinsic, instruction, "VV",
Wti.Vector, Vti.Vector, Vti.Vector, Vti.Mask,
Vti.SEW, Vti.LMul, Wti.RegClass,
Vti.RegClass, Vti.RegClass>;
}
}
multiclass VPatBinaryW_VX<string intrinsic, string instruction> {
foreach VtiToWti = AllWidenableIntVectors in {
defvar Vti = VtiToWti.Vti;
defvar Wti = VtiToWti.Wti;
defm : VPatBinary<intrinsic, instruction, "VX",
Wti.Vector, Vti.Vector, XLenVT, Vti.Mask,
Vti.SEW, Vti.LMul, Wti.RegClass,
Vti.RegClass, GPR>;
}
}
multiclass VPatBinaryW_WV<string intrinsic, string instruction> {
foreach VtiToWti = AllWidenableIntVectors in {
defvar Vti = VtiToWti.Vti;
defvar Wti = VtiToWti.Wti;
defm : VPatBinary<intrinsic, instruction, "WV",
Wti.Vector, Wti.Vector, Vti.Vector, Vti.Mask,
Vti.SEW, Vti.LMul, Wti.RegClass,
Wti.RegClass, Vti.RegClass>;
}
}
multiclass VPatBinaryW_WX<string intrinsic, string instruction> {
foreach VtiToWti = AllWidenableIntVectors in {
defvar Vti = VtiToWti.Vti;
defvar Wti = VtiToWti.Wti;
defm : VPatBinary<intrinsic, instruction, "WX",
Wti.Vector, Wti.Vector, XLenVT, Vti.Mask,
Vti.SEW, Vti.LMul, Wti.RegClass,
Wti.RegClass, GPR>;
}
}
multiclass VPatBinaryV_VV_VX_VI<string intrinsic, string instruction,
list<VTypeInfo> vtilist>
{
@ -418,6 +546,18 @@ multiclass VPatBinaryV_VX_VI<string intrinsic, string instruction,
defm "" : VPatBinaryV_VI<intrinsic, instruction, vtilist, simm5>;
}
multiclass VPatBinaryW_VV_VX<string intrinsic, string instruction>
{
defm "" : VPatBinaryW_VV<intrinsic, instruction>;
defm "" : VPatBinaryW_VX<intrinsic, instruction>;
}
multiclass VPatBinaryW_WV_WX<string intrinsic, string instruction>
{
defm "" : VPatBinaryW_WV<intrinsic, instruction>;
defm "" : VPatBinaryW_WX<intrinsic, instruction>;
}
//===----------------------------------------------------------------------===//
// Pseudo instructions and patterns.
//===----------------------------------------------------------------------===//
@ -542,6 +682,19 @@ defm PseudoVADD : VPseudoBinaryV_VV_VX_VI;
defm PseudoVSUB : VPseudoBinaryV_VV_VX;
defm PseudoVRSUB : VPseudoBinaryV_VX_VI;
//===----------------------------------------------------------------------===//
// 12.2. Vector Widening Integer Add/Subtract
//===----------------------------------------------------------------------===//
defm PseudoVWADDU : VPseudoBinaryW_VV_VX;
defm PseudoVWSUBU : VPseudoBinaryW_VV_VX;
defm PseudoVWADD : VPseudoBinaryW_VV_VX;
defm PseudoVWSUB : VPseudoBinaryW_VV_VX;
defm PseudoVWADDU : VPseudoBinaryW_WV_WX;
defm PseudoVWSUBU : VPseudoBinaryW_WV_WX;
defm PseudoVWADD : VPseudoBinaryW_WV_WX;
defm PseudoVWSUB : VPseudoBinaryW_WV_WX;
//===----------------------------------------------------------------------===//
// Patterns.
//===----------------------------------------------------------------------===//
@ -560,4 +713,16 @@ defm "" : VPatBinaryV_VV_VX_VI<"int_riscv_vadd", "PseudoVADD", AllIntegerVectors
defm "" : VPatBinaryV_VV_VX<"int_riscv_vsub", "PseudoVSUB", AllIntegerVectors>;
defm "" : VPatBinaryV_VX_VI<"int_riscv_vrsub", "PseudoVRSUB", AllIntegerVectors>;
//===----------------------------------------------------------------------===//
// 12.2. Vector Widening Integer Add/Subtract
//===----------------------------------------------------------------------===//
defm "" : VPatBinaryW_VV_VX<"int_riscv_vwaddu", "PseudoVWADDU">;
defm "" : VPatBinaryW_VV_VX<"int_riscv_vwsubu", "PseudoVWSUBU">;
defm "" : VPatBinaryW_VV_VX<"int_riscv_vwadd", "PseudoVWADD">;
defm "" : VPatBinaryW_VV_VX<"int_riscv_vwsub", "PseudoVWSUB">;
defm "" : VPatBinaryW_WV_WX<"int_riscv_vwaddu_w", "PseudoVWADDU">;
defm "" : VPatBinaryW_WV_WX<"int_riscv_vwsubu_w", "PseudoVWSUBU">;
defm "" : VPatBinaryW_WV_WX<"int_riscv_vwadd_w", "PseudoVWADD">;
defm "" : VPatBinaryW_WV_WX<"int_riscv_vwsub_w", "PseudoVWSUB">;
} // Predicates = [HasStdExtV]

View File

@ -395,6 +395,9 @@ class VReg<list<ValueType> regTypes, dag regList, int Vlmul>
int Size = !mul(Vlmul, 64); // FIXME: assuming ELEN=64
}
// Dummy V register class.
def NoVReg : VReg<[vint8m1_t], (add V0), 0>;
def VR : VReg<[vint8mf2_t, vint8mf4_t, vint8mf8_t,
vint16mf2_t, vint16mf4_t, vint32mf2_t,
vint8m1_t, vint16m1_t, vint32m1_t, vint64m1_t,

View File

@ -0,0 +1,881 @@
; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs \
; RUN: --riscv-no-aliases < %s | FileCheck %s
declare <vscale x 1 x i16> @llvm.riscv.vwadd.nxv1i16.nxv1i8.nxv1i8(
<vscale x 1 x i8>,
<vscale x 1 x i8>,
i32);
define <vscale x 1 x i16> @intrinsic_vwadd_vv_nxv1i16_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i32 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwadd_vv_nxv1i16_nxv1i8_nxv1i8
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
; CHECK: vwadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
%a = call <vscale x 1 x i16> @llvm.riscv.vwadd.nxv1i16.nxv1i8.nxv1i8(
<vscale x 1 x i8> %0,
<vscale x 1 x i8> %1,
i32 %2)
ret <vscale x 1 x i16> %a
}
declare <vscale x 1 x i16> @llvm.riscv.vwadd.mask.nxv1i16.nxv1i8.nxv1i8(
<vscale x 1 x i16>,
<vscale x 1 x i8>,
<vscale x 1 x i8>,
<vscale x 1 x i1>,
i32);
define <vscale x 1 x i16> @intrinsic_vwadd_mask_vv_nxv1i16_nxv1i8_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwadd_mask_vv_nxv1i16_nxv1i8_nxv1i8
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
; CHECK: vwadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 1 x i16> @llvm.riscv.vwadd.mask.nxv1i16.nxv1i8.nxv1i8(
<vscale x 1 x i16> %0,
<vscale x 1 x i8> %1,
<vscale x 1 x i8> %2,
<vscale x 1 x i1> %3,
i32 %4)
ret <vscale x 1 x i16> %a
}
declare <vscale x 2 x i16> @llvm.riscv.vwadd.nxv2i16.nxv2i8.nxv2i8(
<vscale x 2 x i8>,
<vscale x 2 x i8>,
i32);
define <vscale x 2 x i16> @intrinsic_vwadd_vv_nxv2i16_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i32 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwadd_vv_nxv2i16_nxv2i8_nxv2i8
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
; CHECK: vwadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
%a = call <vscale x 2 x i16> @llvm.riscv.vwadd.nxv2i16.nxv2i8.nxv2i8(
<vscale x 2 x i8> %0,
<vscale x 2 x i8> %1,
i32 %2)
ret <vscale x 2 x i16> %a
}
declare <vscale x 2 x i16> @llvm.riscv.vwadd.mask.nxv2i16.nxv2i8.nxv2i8(
<vscale x 2 x i16>,
<vscale x 2 x i8>,
<vscale x 2 x i8>,
<vscale x 2 x i1>,
i32);
define <vscale x 2 x i16> @intrinsic_vwadd_mask_vv_nxv2i16_nxv2i8_nxv2i8(<vscale x 2 x i16> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwadd_mask_vv_nxv2i16_nxv2i8_nxv2i8
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
; CHECK: vwadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 2 x i16> @llvm.riscv.vwadd.mask.nxv2i16.nxv2i8.nxv2i8(
<vscale x 2 x i16> %0,
<vscale x 2 x i8> %1,
<vscale x 2 x i8> %2,
<vscale x 2 x i1> %3,
i32 %4)
ret <vscale x 2 x i16> %a
}
declare <vscale x 4 x i16> @llvm.riscv.vwadd.nxv4i16.nxv4i8.nxv4i8(
<vscale x 4 x i8>,
<vscale x 4 x i8>,
i32);
define <vscale x 4 x i16> @intrinsic_vwadd_vv_nxv4i16_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i32 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwadd_vv_nxv4i16_nxv4i8_nxv4i8
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
; CHECK: vwadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
%a = call <vscale x 4 x i16> @llvm.riscv.vwadd.nxv4i16.nxv4i8.nxv4i8(
<vscale x 4 x i8> %0,
<vscale x 4 x i8> %1,
i32 %2)
ret <vscale x 4 x i16> %a
}
declare <vscale x 4 x i16> @llvm.riscv.vwadd.mask.nxv4i16.nxv4i8.nxv4i8(
<vscale x 4 x i16>,
<vscale x 4 x i8>,
<vscale x 4 x i8>,
<vscale x 4 x i1>,
i32);
define <vscale x 4 x i16> @intrinsic_vwadd_mask_vv_nxv4i16_nxv4i8_nxv4i8(<vscale x 4 x i16> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwadd_mask_vv_nxv4i16_nxv4i8_nxv4i8
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
; CHECK: vwadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 4 x i16> @llvm.riscv.vwadd.mask.nxv4i16.nxv4i8.nxv4i8(
<vscale x 4 x i16> %0,
<vscale x 4 x i8> %1,
<vscale x 4 x i8> %2,
<vscale x 4 x i1> %3,
i32 %4)
ret <vscale x 4 x i16> %a
}
declare <vscale x 8 x i16> @llvm.riscv.vwadd.nxv8i16.nxv8i8.nxv8i8(
<vscale x 8 x i8>,
<vscale x 8 x i8>,
i32);
define <vscale x 8 x i16> @intrinsic_vwadd_vv_nxv8i16_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i32 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwadd_vv_nxv8i16_nxv8i8_nxv8i8
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
; CHECK: vwadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
%a = call <vscale x 8 x i16> @llvm.riscv.vwadd.nxv8i16.nxv8i8.nxv8i8(
<vscale x 8 x i8> %0,
<vscale x 8 x i8> %1,
i32 %2)
ret <vscale x 8 x i16> %a
}
declare <vscale x 8 x i16> @llvm.riscv.vwadd.mask.nxv8i16.nxv8i8.nxv8i8(
<vscale x 8 x i16>,
<vscale x 8 x i8>,
<vscale x 8 x i8>,
<vscale x 8 x i1>,
i32);
define <vscale x 8 x i16> @intrinsic_vwadd_mask_vv_nxv8i16_nxv8i8_nxv8i8(<vscale x 8 x i16> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwadd_mask_vv_nxv8i16_nxv8i8_nxv8i8
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
; CHECK: vwadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 8 x i16> @llvm.riscv.vwadd.mask.nxv8i16.nxv8i8.nxv8i8(
<vscale x 8 x i16> %0,
<vscale x 8 x i8> %1,
<vscale x 8 x i8> %2,
<vscale x 8 x i1> %3,
i32 %4)
ret <vscale x 8 x i16> %a
}
declare <vscale x 16 x i16> @llvm.riscv.vwadd.nxv16i16.nxv16i8.nxv16i8(
<vscale x 16 x i8>,
<vscale x 16 x i8>,
i32);
define <vscale x 16 x i16> @intrinsic_vwadd_vv_nxv16i16_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i32 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwadd_vv_nxv16i16_nxv16i8_nxv16i8
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
; CHECK: vwadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
%a = call <vscale x 16 x i16> @llvm.riscv.vwadd.nxv16i16.nxv16i8.nxv16i8(
<vscale x 16 x i8> %0,
<vscale x 16 x i8> %1,
i32 %2)
ret <vscale x 16 x i16> %a
}
declare <vscale x 16 x i16> @llvm.riscv.vwadd.mask.nxv16i16.nxv16i8.nxv16i8(
<vscale x 16 x i16>,
<vscale x 16 x i8>,
<vscale x 16 x i8>,
<vscale x 16 x i1>,
i32);
define <vscale x 16 x i16> @intrinsic_vwadd_mask_vv_nxv16i16_nxv16i8_nxv16i8(<vscale x 16 x i16> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwadd_mask_vv_nxv16i16_nxv16i8_nxv16i8
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
; CHECK: vwadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 16 x i16> @llvm.riscv.vwadd.mask.nxv16i16.nxv16i8.nxv16i8(
<vscale x 16 x i16> %0,
<vscale x 16 x i8> %1,
<vscale x 16 x i8> %2,
<vscale x 16 x i1> %3,
i32 %4)
ret <vscale x 16 x i16> %a
}
declare <vscale x 32 x i16> @llvm.riscv.vwadd.nxv32i16.nxv32i8.nxv32i8(
<vscale x 32 x i8>,
<vscale x 32 x i8>,
i32);
define <vscale x 32 x i16> @intrinsic_vwadd_vv_nxv32i16_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i32 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwadd_vv_nxv32i16_nxv32i8_nxv32i8
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
; CHECK: vwadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
%a = call <vscale x 32 x i16> @llvm.riscv.vwadd.nxv32i16.nxv32i8.nxv32i8(
<vscale x 32 x i8> %0,
<vscale x 32 x i8> %1,
i32 %2)
ret <vscale x 32 x i16> %a
}
declare <vscale x 32 x i16> @llvm.riscv.vwadd.mask.nxv32i16.nxv32i8.nxv32i8(
<vscale x 32 x i16>,
<vscale x 32 x i8>,
<vscale x 32 x i8>,
<vscale x 32 x i1>,
i32);
define <vscale x 32 x i16> @intrinsic_vwadd_mask_vv_nxv32i16_nxv32i8_nxv32i8(<vscale x 32 x i16> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwadd_mask_vv_nxv32i16_nxv32i8_nxv32i8
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
; CHECK: vwadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 32 x i16> @llvm.riscv.vwadd.mask.nxv32i16.nxv32i8.nxv32i8(
<vscale x 32 x i16> %0,
<vscale x 32 x i8> %1,
<vscale x 32 x i8> %2,
<vscale x 32 x i1> %3,
i32 %4)
ret <vscale x 32 x i16> %a
}
declare <vscale x 1 x i32> @llvm.riscv.vwadd.nxv1i32.nxv1i16.nxv1i16(
<vscale x 1 x i16>,
<vscale x 1 x i16>,
i32);
define <vscale x 1 x i32> @intrinsic_vwadd_vv_nxv1i32_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i32 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwadd_vv_nxv1i32_nxv1i16_nxv1i16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
; CHECK: vwadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
%a = call <vscale x 1 x i32> @llvm.riscv.vwadd.nxv1i32.nxv1i16.nxv1i16(
<vscale x 1 x i16> %0,
<vscale x 1 x i16> %1,
i32 %2)
ret <vscale x 1 x i32> %a
}
declare <vscale x 1 x i32> @llvm.riscv.vwadd.mask.nxv1i32.nxv1i16.nxv1i16(
<vscale x 1 x i32>,
<vscale x 1 x i16>,
<vscale x 1 x i16>,
<vscale x 1 x i1>,
i32);
define <vscale x 1 x i32> @intrinsic_vwadd_mask_vv_nxv1i32_nxv1i16_nxv1i16(<vscale x 1 x i32> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwadd_mask_vv_nxv1i32_nxv1i16_nxv1i16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
; CHECK: vwadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 1 x i32> @llvm.riscv.vwadd.mask.nxv1i32.nxv1i16.nxv1i16(
<vscale x 1 x i32> %0,
<vscale x 1 x i16> %1,
<vscale x 1 x i16> %2,
<vscale x 1 x i1> %3,
i32 %4)
ret <vscale x 1 x i32> %a
}
declare <vscale x 2 x i32> @llvm.riscv.vwadd.nxv2i32.nxv2i16.nxv2i16(
<vscale x 2 x i16>,
<vscale x 2 x i16>,
i32);
define <vscale x 2 x i32> @intrinsic_vwadd_vv_nxv2i32_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i32 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwadd_vv_nxv2i32_nxv2i16_nxv2i16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
; CHECK: vwadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
%a = call <vscale x 2 x i32> @llvm.riscv.vwadd.nxv2i32.nxv2i16.nxv2i16(
<vscale x 2 x i16> %0,
<vscale x 2 x i16> %1,
i32 %2)
ret <vscale x 2 x i32> %a
}
declare <vscale x 2 x i32> @llvm.riscv.vwadd.mask.nxv2i32.nxv2i16.nxv2i16(
<vscale x 2 x i32>,
<vscale x 2 x i16>,
<vscale x 2 x i16>,
<vscale x 2 x i1>,
i32);
define <vscale x 2 x i32> @intrinsic_vwadd_mask_vv_nxv2i32_nxv2i16_nxv2i16(<vscale x 2 x i32> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwadd_mask_vv_nxv2i32_nxv2i16_nxv2i16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
; CHECK: vwadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 2 x i32> @llvm.riscv.vwadd.mask.nxv2i32.nxv2i16.nxv2i16(
<vscale x 2 x i32> %0,
<vscale x 2 x i16> %1,
<vscale x 2 x i16> %2,
<vscale x 2 x i1> %3,
i32 %4)
ret <vscale x 2 x i32> %a
}
declare <vscale x 4 x i32> @llvm.riscv.vwadd.nxv4i32.nxv4i16.nxv4i16(
<vscale x 4 x i16>,
<vscale x 4 x i16>,
i32);
define <vscale x 4 x i32> @intrinsic_vwadd_vv_nxv4i32_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i32 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwadd_vv_nxv4i32_nxv4i16_nxv4i16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
; CHECK: vwadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
%a = call <vscale x 4 x i32> @llvm.riscv.vwadd.nxv4i32.nxv4i16.nxv4i16(
<vscale x 4 x i16> %0,
<vscale x 4 x i16> %1,
i32 %2)
ret <vscale x 4 x i32> %a
}
declare <vscale x 4 x i32> @llvm.riscv.vwadd.mask.nxv4i32.nxv4i16.nxv4i16(
<vscale x 4 x i32>,
<vscale x 4 x i16>,
<vscale x 4 x i16>,
<vscale x 4 x i1>,
i32);
define <vscale x 4 x i32> @intrinsic_vwadd_mask_vv_nxv4i32_nxv4i16_nxv4i16(<vscale x 4 x i32> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwadd_mask_vv_nxv4i32_nxv4i16_nxv4i16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
; CHECK: vwadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 4 x i32> @llvm.riscv.vwadd.mask.nxv4i32.nxv4i16.nxv4i16(
<vscale x 4 x i32> %0,
<vscale x 4 x i16> %1,
<vscale x 4 x i16> %2,
<vscale x 4 x i1> %3,
i32 %4)
ret <vscale x 4 x i32> %a
}
declare <vscale x 8 x i32> @llvm.riscv.vwadd.nxv8i32.nxv8i16.nxv8i16(
<vscale x 8 x i16>,
<vscale x 8 x i16>,
i32);
define <vscale x 8 x i32> @intrinsic_vwadd_vv_nxv8i32_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i32 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwadd_vv_nxv8i32_nxv8i16_nxv8i16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
; CHECK: vwadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
%a = call <vscale x 8 x i32> @llvm.riscv.vwadd.nxv8i32.nxv8i16.nxv8i16(
<vscale x 8 x i16> %0,
<vscale x 8 x i16> %1,
i32 %2)
ret <vscale x 8 x i32> %a
}
declare <vscale x 8 x i32> @llvm.riscv.vwadd.mask.nxv8i32.nxv8i16.nxv8i16(
<vscale x 8 x i32>,
<vscale x 8 x i16>,
<vscale x 8 x i16>,
<vscale x 8 x i1>,
i32);
define <vscale x 8 x i32> @intrinsic_vwadd_mask_vv_nxv8i32_nxv8i16_nxv8i16(<vscale x 8 x i32> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwadd_mask_vv_nxv8i32_nxv8i16_nxv8i16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
; CHECK: vwadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 8 x i32> @llvm.riscv.vwadd.mask.nxv8i32.nxv8i16.nxv8i16(
<vscale x 8 x i32> %0,
<vscale x 8 x i16> %1,
<vscale x 8 x i16> %2,
<vscale x 8 x i1> %3,
i32 %4)
ret <vscale x 8 x i32> %a
}
declare <vscale x 16 x i32> @llvm.riscv.vwadd.nxv16i32.nxv16i16.nxv16i16(
<vscale x 16 x i16>,
<vscale x 16 x i16>,
i32);
define <vscale x 16 x i32> @intrinsic_vwadd_vv_nxv16i32_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i32 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwadd_vv_nxv16i32_nxv16i16_nxv16i16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
; CHECK: vwadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
%a = call <vscale x 16 x i32> @llvm.riscv.vwadd.nxv16i32.nxv16i16.nxv16i16(
<vscale x 16 x i16> %0,
<vscale x 16 x i16> %1,
i32 %2)
ret <vscale x 16 x i32> %a
}
declare <vscale x 16 x i32> @llvm.riscv.vwadd.mask.nxv16i32.nxv16i16.nxv16i16(
<vscale x 16 x i32>,
<vscale x 16 x i16>,
<vscale x 16 x i16>,
<vscale x 16 x i1>,
i32);
define <vscale x 16 x i32> @intrinsic_vwadd_mask_vv_nxv16i32_nxv16i16_nxv16i16(<vscale x 16 x i32> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwadd_mask_vv_nxv16i32_nxv16i16_nxv16i16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
; CHECK: vwadd.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 16 x i32> @llvm.riscv.vwadd.mask.nxv16i32.nxv16i16.nxv16i16(
<vscale x 16 x i32> %0,
<vscale x 16 x i16> %1,
<vscale x 16 x i16> %2,
<vscale x 16 x i1> %3,
i32 %4)
ret <vscale x 16 x i32> %a
}
declare <vscale x 1 x i16> @llvm.riscv.vwadd.nxv1i16.nxv1i8.i8(
<vscale x 1 x i8>,
i8,
i32);
define <vscale x 1 x i16> @intrinsic_vwadd_vx_nxv1i16_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, i32 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwadd_vx_nxv1i16_nxv1i8_i8
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
; CHECK: vwadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
%a = call <vscale x 1 x i16> @llvm.riscv.vwadd.nxv1i16.nxv1i8.i8(
<vscale x 1 x i8> %0,
i8 %1,
i32 %2)
ret <vscale x 1 x i16> %a
}
declare <vscale x 1 x i16> @llvm.riscv.vwadd.mask.nxv1i16.nxv1i8.i8(
<vscale x 1 x i16>,
<vscale x 1 x i8>,
i8,
<vscale x 1 x i1>,
i32);
define <vscale x 1 x i16> @intrinsic_vwadd_mask_vx_nxv1i16_nxv1i8_i8(<vscale x 1 x i16> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwadd_mask_vx_nxv1i16_nxv1i8_i8
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
; CHECK: vwadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
%a = call <vscale x 1 x i16> @llvm.riscv.vwadd.mask.nxv1i16.nxv1i8.i8(
<vscale x 1 x i16> %0,
<vscale x 1 x i8> %1,
i8 %2,
<vscale x 1 x i1> %3,
i32 %4)
ret <vscale x 1 x i16> %a
}
declare <vscale x 2 x i16> @llvm.riscv.vwadd.nxv2i16.nxv2i8.i8(
<vscale x 2 x i8>,
i8,
i32);
define <vscale x 2 x i16> @intrinsic_vwadd_vx_nxv2i16_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, i32 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwadd_vx_nxv2i16_nxv2i8_i8
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
; CHECK: vwadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
%a = call <vscale x 2 x i16> @llvm.riscv.vwadd.nxv2i16.nxv2i8.i8(
<vscale x 2 x i8> %0,
i8 %1,
i32 %2)
ret <vscale x 2 x i16> %a
}
declare <vscale x 2 x i16> @llvm.riscv.vwadd.mask.nxv2i16.nxv2i8.i8(
<vscale x 2 x i16>,
<vscale x 2 x i8>,
i8,
<vscale x 2 x i1>,
i32);
define <vscale x 2 x i16> @intrinsic_vwadd_mask_vx_nxv2i16_nxv2i8_i8(<vscale x 2 x i16> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwadd_mask_vx_nxv2i16_nxv2i8_i8
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
; CHECK: vwadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
%a = call <vscale x 2 x i16> @llvm.riscv.vwadd.mask.nxv2i16.nxv2i8.i8(
<vscale x 2 x i16> %0,
<vscale x 2 x i8> %1,
i8 %2,
<vscale x 2 x i1> %3,
i32 %4)
ret <vscale x 2 x i16> %a
}
declare <vscale x 4 x i16> @llvm.riscv.vwadd.nxv4i16.nxv4i8.i8(
<vscale x 4 x i8>,
i8,
i32);
define <vscale x 4 x i16> @intrinsic_vwadd_vx_nxv4i16_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, i32 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwadd_vx_nxv4i16_nxv4i8_i8
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
; CHECK: vwadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
%a = call <vscale x 4 x i16> @llvm.riscv.vwadd.nxv4i16.nxv4i8.i8(
<vscale x 4 x i8> %0,
i8 %1,
i32 %2)
ret <vscale x 4 x i16> %a
}
declare <vscale x 4 x i16> @llvm.riscv.vwadd.mask.nxv4i16.nxv4i8.i8(
<vscale x 4 x i16>,
<vscale x 4 x i8>,
i8,
<vscale x 4 x i1>,
i32);
define <vscale x 4 x i16> @intrinsic_vwadd_mask_vx_nxv4i16_nxv4i8_i8(<vscale x 4 x i16> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwadd_mask_vx_nxv4i16_nxv4i8_i8
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
; CHECK: vwadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
%a = call <vscale x 4 x i16> @llvm.riscv.vwadd.mask.nxv4i16.nxv4i8.i8(
<vscale x 4 x i16> %0,
<vscale x 4 x i8> %1,
i8 %2,
<vscale x 4 x i1> %3,
i32 %4)
ret <vscale x 4 x i16> %a
}
declare <vscale x 8 x i16> @llvm.riscv.vwadd.nxv8i16.nxv8i8.i8(
<vscale x 8 x i8>,
i8,
i32);
define <vscale x 8 x i16> @intrinsic_vwadd_vx_nxv8i16_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, i32 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwadd_vx_nxv8i16_nxv8i8_i8
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
; CHECK: vwadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
%a = call <vscale x 8 x i16> @llvm.riscv.vwadd.nxv8i16.nxv8i8.i8(
<vscale x 8 x i8> %0,
i8 %1,
i32 %2)
ret <vscale x 8 x i16> %a
}
declare <vscale x 8 x i16> @llvm.riscv.vwadd.mask.nxv8i16.nxv8i8.i8(
<vscale x 8 x i16>,
<vscale x 8 x i8>,
i8,
<vscale x 8 x i1>,
i32);
define <vscale x 8 x i16> @intrinsic_vwadd_mask_vx_nxv8i16_nxv8i8_i8(<vscale x 8 x i16> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwadd_mask_vx_nxv8i16_nxv8i8_i8
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
; CHECK: vwadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
%a = call <vscale x 8 x i16> @llvm.riscv.vwadd.mask.nxv8i16.nxv8i8.i8(
<vscale x 8 x i16> %0,
<vscale x 8 x i8> %1,
i8 %2,
<vscale x 8 x i1> %3,
i32 %4)
ret <vscale x 8 x i16> %a
}
declare <vscale x 16 x i16> @llvm.riscv.vwadd.nxv16i16.nxv16i8.i8(
<vscale x 16 x i8>,
i8,
i32);
define <vscale x 16 x i16> @intrinsic_vwadd_vx_nxv16i16_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, i32 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwadd_vx_nxv16i16_nxv16i8_i8
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
; CHECK: vwadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
%a = call <vscale x 16 x i16> @llvm.riscv.vwadd.nxv16i16.nxv16i8.i8(
<vscale x 16 x i8> %0,
i8 %1,
i32 %2)
ret <vscale x 16 x i16> %a
}
declare <vscale x 16 x i16> @llvm.riscv.vwadd.mask.nxv16i16.nxv16i8.i8(
<vscale x 16 x i16>,
<vscale x 16 x i8>,
i8,
<vscale x 16 x i1>,
i32);
define <vscale x 16 x i16> @intrinsic_vwadd_mask_vx_nxv16i16_nxv16i8_i8(<vscale x 16 x i16> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwadd_mask_vx_nxv16i16_nxv16i8_i8
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
; CHECK: vwadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
%a = call <vscale x 16 x i16> @llvm.riscv.vwadd.mask.nxv16i16.nxv16i8.i8(
<vscale x 16 x i16> %0,
<vscale x 16 x i8> %1,
i8 %2,
<vscale x 16 x i1> %3,
i32 %4)
ret <vscale x 16 x i16> %a
}
declare <vscale x 32 x i16> @llvm.riscv.vwadd.nxv32i16.nxv32i8.i8(
<vscale x 32 x i8>,
i8,
i32);
define <vscale x 32 x i16> @intrinsic_vwadd_vx_nxv32i16_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, i32 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwadd_vx_nxv32i16_nxv32i8_i8
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
; CHECK: vwadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
%a = call <vscale x 32 x i16> @llvm.riscv.vwadd.nxv32i16.nxv32i8.i8(
<vscale x 32 x i8> %0,
i8 %1,
i32 %2)
ret <vscale x 32 x i16> %a
}
declare <vscale x 32 x i16> @llvm.riscv.vwadd.mask.nxv32i16.nxv32i8.i8(
<vscale x 32 x i16>,
<vscale x 32 x i8>,
i8,
<vscale x 32 x i1>,
i32);
define <vscale x 32 x i16> @intrinsic_vwadd_mask_vx_nxv32i16_nxv32i8_i8(<vscale x 32 x i16> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwadd_mask_vx_nxv32i16_nxv32i8_i8
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
; CHECK: vwadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
%a = call <vscale x 32 x i16> @llvm.riscv.vwadd.mask.nxv32i16.nxv32i8.i8(
<vscale x 32 x i16> %0,
<vscale x 32 x i8> %1,
i8 %2,
<vscale x 32 x i1> %3,
i32 %4)
ret <vscale x 32 x i16> %a
}
declare <vscale x 1 x i32> @llvm.riscv.vwadd.nxv1i32.nxv1i16.i16(
<vscale x 1 x i16>,
i16,
i32);
define <vscale x 1 x i32> @intrinsic_vwadd_vx_nxv1i32_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, i32 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwadd_vx_nxv1i32_nxv1i16_i16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
; CHECK: vwadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
%a = call <vscale x 1 x i32> @llvm.riscv.vwadd.nxv1i32.nxv1i16.i16(
<vscale x 1 x i16> %0,
i16 %1,
i32 %2)
ret <vscale x 1 x i32> %a
}
declare <vscale x 1 x i32> @llvm.riscv.vwadd.mask.nxv1i32.nxv1i16.i16(
<vscale x 1 x i32>,
<vscale x 1 x i16>,
i16,
<vscale x 1 x i1>,
i32);
define <vscale x 1 x i32> @intrinsic_vwadd_mask_vx_nxv1i32_nxv1i16_i16(<vscale x 1 x i32> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwadd_mask_vx_nxv1i32_nxv1i16_i16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
; CHECK: vwadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
%a = call <vscale x 1 x i32> @llvm.riscv.vwadd.mask.nxv1i32.nxv1i16.i16(
<vscale x 1 x i32> %0,
<vscale x 1 x i16> %1,
i16 %2,
<vscale x 1 x i1> %3,
i32 %4)
ret <vscale x 1 x i32> %a
}
declare <vscale x 2 x i32> @llvm.riscv.vwadd.nxv2i32.nxv2i16.i16(
<vscale x 2 x i16>,
i16,
i32);
define <vscale x 2 x i32> @intrinsic_vwadd_vx_nxv2i32_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, i32 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwadd_vx_nxv2i32_nxv2i16_i16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
; CHECK: vwadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
%a = call <vscale x 2 x i32> @llvm.riscv.vwadd.nxv2i32.nxv2i16.i16(
<vscale x 2 x i16> %0,
i16 %1,
i32 %2)
ret <vscale x 2 x i32> %a
}
declare <vscale x 2 x i32> @llvm.riscv.vwadd.mask.nxv2i32.nxv2i16.i16(
<vscale x 2 x i32>,
<vscale x 2 x i16>,
i16,
<vscale x 2 x i1>,
i32);
define <vscale x 2 x i32> @intrinsic_vwadd_mask_vx_nxv2i32_nxv2i16_i16(<vscale x 2 x i32> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwadd_mask_vx_nxv2i32_nxv2i16_i16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
; CHECK: vwadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
%a = call <vscale x 2 x i32> @llvm.riscv.vwadd.mask.nxv2i32.nxv2i16.i16(
<vscale x 2 x i32> %0,
<vscale x 2 x i16> %1,
i16 %2,
<vscale x 2 x i1> %3,
i32 %4)
ret <vscale x 2 x i32> %a
}
declare <vscale x 4 x i32> @llvm.riscv.vwadd.nxv4i32.nxv4i16.i16(
<vscale x 4 x i16>,
i16,
i32);
define <vscale x 4 x i32> @intrinsic_vwadd_vx_nxv4i32_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, i32 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwadd_vx_nxv4i32_nxv4i16_i16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
; CHECK: vwadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
%a = call <vscale x 4 x i32> @llvm.riscv.vwadd.nxv4i32.nxv4i16.i16(
<vscale x 4 x i16> %0,
i16 %1,
i32 %2)
ret <vscale x 4 x i32> %a
}
declare <vscale x 4 x i32> @llvm.riscv.vwadd.mask.nxv4i32.nxv4i16.i16(
<vscale x 4 x i32>,
<vscale x 4 x i16>,
i16,
<vscale x 4 x i1>,
i32);
define <vscale x 4 x i32> @intrinsic_vwadd_mask_vx_nxv4i32_nxv4i16_i16(<vscale x 4 x i32> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwadd_mask_vx_nxv4i32_nxv4i16_i16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
; CHECK: vwadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
%a = call <vscale x 4 x i32> @llvm.riscv.vwadd.mask.nxv4i32.nxv4i16.i16(
<vscale x 4 x i32> %0,
<vscale x 4 x i16> %1,
i16 %2,
<vscale x 4 x i1> %3,
i32 %4)
ret <vscale x 4 x i32> %a
}
declare <vscale x 8 x i32> @llvm.riscv.vwadd.nxv8i32.nxv8i16.i16(
<vscale x 8 x i16>,
i16,
i32);
define <vscale x 8 x i32> @intrinsic_vwadd_vx_nxv8i32_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, i32 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwadd_vx_nxv8i32_nxv8i16_i16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
; CHECK: vwadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
%a = call <vscale x 8 x i32> @llvm.riscv.vwadd.nxv8i32.nxv8i16.i16(
<vscale x 8 x i16> %0,
i16 %1,
i32 %2)
ret <vscale x 8 x i32> %a
}
declare <vscale x 8 x i32> @llvm.riscv.vwadd.mask.nxv8i32.nxv8i16.i16(
<vscale x 8 x i32>,
<vscale x 8 x i16>,
i16,
<vscale x 8 x i1>,
i32);
define <vscale x 8 x i32> @intrinsic_vwadd_mask_vx_nxv8i32_nxv8i16_i16(<vscale x 8 x i32> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwadd_mask_vx_nxv8i32_nxv8i16_i16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
; CHECK: vwadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
%a = call <vscale x 8 x i32> @llvm.riscv.vwadd.mask.nxv8i32.nxv8i16.i16(
<vscale x 8 x i32> %0,
<vscale x 8 x i16> %1,
i16 %2,
<vscale x 8 x i1> %3,
i32 %4)
ret <vscale x 8 x i32> %a
}
declare <vscale x 16 x i32> @llvm.riscv.vwadd.nxv16i32.nxv16i16.i16(
<vscale x 16 x i16>,
i16,
i32);
define <vscale x 16 x i32> @intrinsic_vwadd_vx_nxv16i32_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, i32 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwadd_vx_nxv16i32_nxv16i16_i16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
; CHECK: vwadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
%a = call <vscale x 16 x i32> @llvm.riscv.vwadd.nxv16i32.nxv16i16.i16(
<vscale x 16 x i16> %0,
i16 %1,
i32 %2)
ret <vscale x 16 x i32> %a
}
declare <vscale x 16 x i32> @llvm.riscv.vwadd.mask.nxv16i32.nxv16i16.i16(
<vscale x 16 x i32>,
<vscale x 16 x i16>,
i16,
<vscale x 16 x i1>,
i32);
define <vscale x 16 x i32> @intrinsic_vwadd_mask_vx_nxv16i32_nxv16i16_i16(<vscale x 16 x i32> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwadd_mask_vx_nxv16i32_nxv16i16_i16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
; CHECK: vwadd.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
%a = call <vscale x 16 x i32> @llvm.riscv.vwadd.mask.nxv16i32.nxv16i16.i16(
<vscale x 16 x i32> %0,
<vscale x 16 x i16> %1,
i16 %2,
<vscale x 16 x i1> %3,
i32 %4)
ret <vscale x 16 x i32> %a
}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,881 @@
; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs \
; RUN: --riscv-no-aliases < %s | FileCheck %s
declare <vscale x 1 x i16> @llvm.riscv.vwadd.w.nxv1i16.nxv1i8(
<vscale x 1 x i16>,
<vscale x 1 x i8>,
i32);
define <vscale x 1 x i16> @intrinsic_vwadd.w_wv_nxv1i16_nxv1i16_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i8> %1, i32 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwadd.w_wv_nxv1i16_nxv1i16_nxv1i8
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
; CHECK: vwadd.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
%a = call <vscale x 1 x i16> @llvm.riscv.vwadd.w.nxv1i16.nxv1i8(
<vscale x 1 x i16> %0,
<vscale x 1 x i8> %1,
i32 %2)
ret <vscale x 1 x i16> %a
}
declare <vscale x 1 x i16> @llvm.riscv.vwadd.w.mask.nxv1i16.nxv1i8(
<vscale x 1 x i16>,
<vscale x 1 x i16>,
<vscale x 1 x i8>,
<vscale x 1 x i1>,
i32);
define <vscale x 1 x i16> @intrinsic_vwadd.w_mask_wv_nxv1i16_nxv1i16_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_nxv1i16_nxv1i16_nxv1i8
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
; CHECK: vwadd.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 1 x i16> @llvm.riscv.vwadd.w.mask.nxv1i16.nxv1i8(
<vscale x 1 x i16> %0,
<vscale x 1 x i16> %1,
<vscale x 1 x i8> %2,
<vscale x 1 x i1> %3,
i32 %4)
ret <vscale x 1 x i16> %a
}
declare <vscale x 2 x i16> @llvm.riscv.vwadd.w.nxv2i16.nxv2i8(
<vscale x 2 x i16>,
<vscale x 2 x i8>,
i32);
define <vscale x 2 x i16> @intrinsic_vwadd.w_wv_nxv2i16_nxv2i16_nxv2i8(<vscale x 2 x i16> %0, <vscale x 2 x i8> %1, i32 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwadd.w_wv_nxv2i16_nxv2i16_nxv2i8
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
; CHECK: vwadd.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
%a = call <vscale x 2 x i16> @llvm.riscv.vwadd.w.nxv2i16.nxv2i8(
<vscale x 2 x i16> %0,
<vscale x 2 x i8> %1,
i32 %2)
ret <vscale x 2 x i16> %a
}
declare <vscale x 2 x i16> @llvm.riscv.vwadd.w.mask.nxv2i16.nxv2i8(
<vscale x 2 x i16>,
<vscale x 2 x i16>,
<vscale x 2 x i8>,
<vscale x 2 x i1>,
i32);
define <vscale x 2 x i16> @intrinsic_vwadd.w_mask_wv_nxv2i16_nxv2i16_nxv2i8(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_nxv2i16_nxv2i16_nxv2i8
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
; CHECK: vwadd.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 2 x i16> @llvm.riscv.vwadd.w.mask.nxv2i16.nxv2i8(
<vscale x 2 x i16> %0,
<vscale x 2 x i16> %1,
<vscale x 2 x i8> %2,
<vscale x 2 x i1> %3,
i32 %4)
ret <vscale x 2 x i16> %a
}
declare <vscale x 4 x i16> @llvm.riscv.vwadd.w.nxv4i16.nxv4i8(
<vscale x 4 x i16>,
<vscale x 4 x i8>,
i32);
define <vscale x 4 x i16> @intrinsic_vwadd.w_wv_nxv4i16_nxv4i16_nxv4i8(<vscale x 4 x i16> %0, <vscale x 4 x i8> %1, i32 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwadd.w_wv_nxv4i16_nxv4i16_nxv4i8
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
; CHECK: vwadd.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
%a = call <vscale x 4 x i16> @llvm.riscv.vwadd.w.nxv4i16.nxv4i8(
<vscale x 4 x i16> %0,
<vscale x 4 x i8> %1,
i32 %2)
ret <vscale x 4 x i16> %a
}
declare <vscale x 4 x i16> @llvm.riscv.vwadd.w.mask.nxv4i16.nxv4i8(
<vscale x 4 x i16>,
<vscale x 4 x i16>,
<vscale x 4 x i8>,
<vscale x 4 x i1>,
i32);
define <vscale x 4 x i16> @intrinsic_vwadd.w_mask_wv_nxv4i16_nxv4i16_nxv4i8(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_nxv4i16_nxv4i16_nxv4i8
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
; CHECK: vwadd.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 4 x i16> @llvm.riscv.vwadd.w.mask.nxv4i16.nxv4i8(
<vscale x 4 x i16> %0,
<vscale x 4 x i16> %1,
<vscale x 4 x i8> %2,
<vscale x 4 x i1> %3,
i32 %4)
ret <vscale x 4 x i16> %a
}
declare <vscale x 8 x i16> @llvm.riscv.vwadd.w.nxv8i16.nxv8i8(
<vscale x 8 x i16>,
<vscale x 8 x i8>,
i32);
define <vscale x 8 x i16> @intrinsic_vwadd.w_wv_nxv8i16_nxv8i16_nxv8i8(<vscale x 8 x i16> %0, <vscale x 8 x i8> %1, i32 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwadd.w_wv_nxv8i16_nxv8i16_nxv8i8
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
; CHECK: vwadd.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
%a = call <vscale x 8 x i16> @llvm.riscv.vwadd.w.nxv8i16.nxv8i8(
<vscale x 8 x i16> %0,
<vscale x 8 x i8> %1,
i32 %2)
ret <vscale x 8 x i16> %a
}
declare <vscale x 8 x i16> @llvm.riscv.vwadd.w.mask.nxv8i16.nxv8i8(
<vscale x 8 x i16>,
<vscale x 8 x i16>,
<vscale x 8 x i8>,
<vscale x 8 x i1>,
i32);
define <vscale x 8 x i16> @intrinsic_vwadd.w_mask_wv_nxv8i16_nxv8i16_nxv8i8(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_nxv8i16_nxv8i16_nxv8i8
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
; CHECK: vwadd.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 8 x i16> @llvm.riscv.vwadd.w.mask.nxv8i16.nxv8i8(
<vscale x 8 x i16> %0,
<vscale x 8 x i16> %1,
<vscale x 8 x i8> %2,
<vscale x 8 x i1> %3,
i32 %4)
ret <vscale x 8 x i16> %a
}
declare <vscale x 16 x i16> @llvm.riscv.vwadd.w.nxv16i16.nxv16i8(
<vscale x 16 x i16>,
<vscale x 16 x i8>,
i32);
define <vscale x 16 x i16> @intrinsic_vwadd.w_wv_nxv16i16_nxv16i16_nxv16i8(<vscale x 16 x i16> %0, <vscale x 16 x i8> %1, i32 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwadd.w_wv_nxv16i16_nxv16i16_nxv16i8
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
; CHECK: vwadd.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
%a = call <vscale x 16 x i16> @llvm.riscv.vwadd.w.nxv16i16.nxv16i8(
<vscale x 16 x i16> %0,
<vscale x 16 x i8> %1,
i32 %2)
ret <vscale x 16 x i16> %a
}
declare <vscale x 16 x i16> @llvm.riscv.vwadd.w.mask.nxv16i16.nxv16i8(
<vscale x 16 x i16>,
<vscale x 16 x i16>,
<vscale x 16 x i8>,
<vscale x 16 x i1>,
i32);
define <vscale x 16 x i16> @intrinsic_vwadd.w_mask_wv_nxv16i16_nxv16i16_nxv16i8(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_nxv16i16_nxv16i16_nxv16i8
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
; CHECK: vwadd.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 16 x i16> @llvm.riscv.vwadd.w.mask.nxv16i16.nxv16i8(
<vscale x 16 x i16> %0,
<vscale x 16 x i16> %1,
<vscale x 16 x i8> %2,
<vscale x 16 x i1> %3,
i32 %4)
ret <vscale x 16 x i16> %a
}
declare <vscale x 32 x i16> @llvm.riscv.vwadd.w.nxv32i16.nxv32i8(
<vscale x 32 x i16>,
<vscale x 32 x i8>,
i32);
define <vscale x 32 x i16> @intrinsic_vwadd.w_wv_nxv32i16_nxv32i16_nxv32i8(<vscale x 32 x i16> %0, <vscale x 32 x i8> %1, i32 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwadd.w_wv_nxv32i16_nxv32i16_nxv32i8
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
; CHECK: vwadd.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
%a = call <vscale x 32 x i16> @llvm.riscv.vwadd.w.nxv32i16.nxv32i8(
<vscale x 32 x i16> %0,
<vscale x 32 x i8> %1,
i32 %2)
ret <vscale x 32 x i16> %a
}
declare <vscale x 32 x i16> @llvm.riscv.vwadd.w.mask.nxv32i16.nxv32i8(
<vscale x 32 x i16>,
<vscale x 32 x i16>,
<vscale x 32 x i8>,
<vscale x 32 x i1>,
i32);
define <vscale x 32 x i16> @intrinsic_vwadd.w_mask_wv_nxv32i16_nxv32i16_nxv32i8(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_nxv32i16_nxv32i16_nxv32i8
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
; CHECK: vwadd.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 32 x i16> @llvm.riscv.vwadd.w.mask.nxv32i16.nxv32i8(
<vscale x 32 x i16> %0,
<vscale x 32 x i16> %1,
<vscale x 32 x i8> %2,
<vscale x 32 x i1> %3,
i32 %4)
ret <vscale x 32 x i16> %a
}
declare <vscale x 1 x i32> @llvm.riscv.vwadd.w.nxv1i32.nxv1i16(
<vscale x 1 x i32>,
<vscale x 1 x i16>,
i32);
define <vscale x 1 x i32> @intrinsic_vwadd.w_wv_nxv1i32_nxv1i32_nxv1i16(<vscale x 1 x i32> %0, <vscale x 1 x i16> %1, i32 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwadd.w_wv_nxv1i32_nxv1i32_nxv1i16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
; CHECK: vwadd.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
%a = call <vscale x 1 x i32> @llvm.riscv.vwadd.w.nxv1i32.nxv1i16(
<vscale x 1 x i32> %0,
<vscale x 1 x i16> %1,
i32 %2)
ret <vscale x 1 x i32> %a
}
declare <vscale x 1 x i32> @llvm.riscv.vwadd.w.mask.nxv1i32.nxv1i16(
<vscale x 1 x i32>,
<vscale x 1 x i32>,
<vscale x 1 x i16>,
<vscale x 1 x i1>,
i32);
define <vscale x 1 x i32> @intrinsic_vwadd.w_mask_wv_nxv1i32_nxv1i32_nxv1i16(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_nxv1i32_nxv1i32_nxv1i16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
; CHECK: vwadd.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 1 x i32> @llvm.riscv.vwadd.w.mask.nxv1i32.nxv1i16(
<vscale x 1 x i32> %0,
<vscale x 1 x i32> %1,
<vscale x 1 x i16> %2,
<vscale x 1 x i1> %3,
i32 %4)
ret <vscale x 1 x i32> %a
}
declare <vscale x 2 x i32> @llvm.riscv.vwadd.w.nxv2i32.nxv2i16(
<vscale x 2 x i32>,
<vscale x 2 x i16>,
i32);
define <vscale x 2 x i32> @intrinsic_vwadd.w_wv_nxv2i32_nxv2i32_nxv2i16(<vscale x 2 x i32> %0, <vscale x 2 x i16> %1, i32 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwadd.w_wv_nxv2i32_nxv2i32_nxv2i16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
; CHECK: vwadd.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
%a = call <vscale x 2 x i32> @llvm.riscv.vwadd.w.nxv2i32.nxv2i16(
<vscale x 2 x i32> %0,
<vscale x 2 x i16> %1,
i32 %2)
ret <vscale x 2 x i32> %a
}
declare <vscale x 2 x i32> @llvm.riscv.vwadd.w.mask.nxv2i32.nxv2i16(
<vscale x 2 x i32>,
<vscale x 2 x i32>,
<vscale x 2 x i16>,
<vscale x 2 x i1>,
i32);
define <vscale x 2 x i32> @intrinsic_vwadd.w_mask_wv_nxv2i32_nxv2i32_nxv2i16(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_nxv2i32_nxv2i32_nxv2i16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
; CHECK: vwadd.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 2 x i32> @llvm.riscv.vwadd.w.mask.nxv2i32.nxv2i16(
<vscale x 2 x i32> %0,
<vscale x 2 x i32> %1,
<vscale x 2 x i16> %2,
<vscale x 2 x i1> %3,
i32 %4)
ret <vscale x 2 x i32> %a
}
declare <vscale x 4 x i32> @llvm.riscv.vwadd.w.nxv4i32.nxv4i16(
<vscale x 4 x i32>,
<vscale x 4 x i16>,
i32);
define <vscale x 4 x i32> @intrinsic_vwadd.w_wv_nxv4i32_nxv4i32_nxv4i16(<vscale x 4 x i32> %0, <vscale x 4 x i16> %1, i32 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwadd.w_wv_nxv4i32_nxv4i32_nxv4i16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
; CHECK: vwadd.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
%a = call <vscale x 4 x i32> @llvm.riscv.vwadd.w.nxv4i32.nxv4i16(
<vscale x 4 x i32> %0,
<vscale x 4 x i16> %1,
i32 %2)
ret <vscale x 4 x i32> %a
}
declare <vscale x 4 x i32> @llvm.riscv.vwadd.w.mask.nxv4i32.nxv4i16(
<vscale x 4 x i32>,
<vscale x 4 x i32>,
<vscale x 4 x i16>,
<vscale x 4 x i1>,
i32);
define <vscale x 4 x i32> @intrinsic_vwadd.w_mask_wv_nxv4i32_nxv4i32_nxv4i16(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_nxv4i32_nxv4i32_nxv4i16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
; CHECK: vwadd.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 4 x i32> @llvm.riscv.vwadd.w.mask.nxv4i32.nxv4i16(
<vscale x 4 x i32> %0,
<vscale x 4 x i32> %1,
<vscale x 4 x i16> %2,
<vscale x 4 x i1> %3,
i32 %4)
ret <vscale x 4 x i32> %a
}
declare <vscale x 8 x i32> @llvm.riscv.vwadd.w.nxv8i32.nxv8i16(
<vscale x 8 x i32>,
<vscale x 8 x i16>,
i32);
define <vscale x 8 x i32> @intrinsic_vwadd.w_wv_nxv8i32_nxv8i32_nxv8i16(<vscale x 8 x i32> %0, <vscale x 8 x i16> %1, i32 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwadd.w_wv_nxv8i32_nxv8i32_nxv8i16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
; CHECK: vwadd.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
%a = call <vscale x 8 x i32> @llvm.riscv.vwadd.w.nxv8i32.nxv8i16(
<vscale x 8 x i32> %0,
<vscale x 8 x i16> %1,
i32 %2)
ret <vscale x 8 x i32> %a
}
declare <vscale x 8 x i32> @llvm.riscv.vwadd.w.mask.nxv8i32.nxv8i16(
<vscale x 8 x i32>,
<vscale x 8 x i32>,
<vscale x 8 x i16>,
<vscale x 8 x i1>,
i32);
define <vscale x 8 x i32> @intrinsic_vwadd.w_mask_wv_nxv8i32_nxv8i32_nxv8i16(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_nxv8i32_nxv8i32_nxv8i16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
; CHECK: vwadd.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 8 x i32> @llvm.riscv.vwadd.w.mask.nxv8i32.nxv8i16(
<vscale x 8 x i32> %0,
<vscale x 8 x i32> %1,
<vscale x 8 x i16> %2,
<vscale x 8 x i1> %3,
i32 %4)
ret <vscale x 8 x i32> %a
}
declare <vscale x 16 x i32> @llvm.riscv.vwadd.w.nxv16i32.nxv16i16(
<vscale x 16 x i32>,
<vscale x 16 x i16>,
i32);
define <vscale x 16 x i32> @intrinsic_vwadd.w_wv_nxv16i32_nxv16i32_nxv16i16(<vscale x 16 x i32> %0, <vscale x 16 x i16> %1, i32 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwadd.w_wv_nxv16i32_nxv16i32_nxv16i16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
; CHECK: vwadd.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
%a = call <vscale x 16 x i32> @llvm.riscv.vwadd.w.nxv16i32.nxv16i16(
<vscale x 16 x i32> %0,
<vscale x 16 x i16> %1,
i32 %2)
ret <vscale x 16 x i32> %a
}
declare <vscale x 16 x i32> @llvm.riscv.vwadd.w.mask.nxv16i32.nxv16i16(
<vscale x 16 x i32>,
<vscale x 16 x i32>,
<vscale x 16 x i16>,
<vscale x 16 x i1>,
i32);
define <vscale x 16 x i32> @intrinsic_vwadd.w_mask_wv_nxv16i32_nxv16i32_nxv16i16(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_nxv16i32_nxv16i32_nxv16i16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
; CHECK: vwadd.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 16 x i32> @llvm.riscv.vwadd.w.mask.nxv16i32.nxv16i16(
<vscale x 16 x i32> %0,
<vscale x 16 x i32> %1,
<vscale x 16 x i16> %2,
<vscale x 16 x i1> %3,
i32 %4)
ret <vscale x 16 x i32> %a
}
declare <vscale x 1 x i16> @llvm.riscv.vwadd.w.nxv1i16.i8(
<vscale x 1 x i16>,
i8,
i32);
define <vscale x 1 x i16> @intrinsic_vwadd.w_wx_nxv1i16_nxv1i16_i8(<vscale x 1 x i16> %0, i8 %1, i32 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwadd.w_wx_nxv1i16_nxv1i16_i8
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
; CHECK: vwadd.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
%a = call <vscale x 1 x i16> @llvm.riscv.vwadd.w.nxv1i16.i8(
<vscale x 1 x i16> %0,
i8 %1,
i32 %2)
ret <vscale x 1 x i16> %a
}
declare <vscale x 1 x i16> @llvm.riscv.vwadd.w.mask.nxv1i16.i8(
<vscale x 1 x i16>,
<vscale x 1 x i16>,
i8,
<vscale x 1 x i1>,
i32);
define <vscale x 1 x i16> @intrinsic_vwadd.w_mask_wx_nxv1i16_nxv1i16_i8(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i8 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_nxv1i16_nxv1i16_i8
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
; CHECK: vwadd.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
%a = call <vscale x 1 x i16> @llvm.riscv.vwadd.w.mask.nxv1i16.i8(
<vscale x 1 x i16> %0,
<vscale x 1 x i16> %1,
i8 %2,
<vscale x 1 x i1> %3,
i32 %4)
ret <vscale x 1 x i16> %a
}
declare <vscale x 2 x i16> @llvm.riscv.vwadd.w.nxv2i16.i8(
<vscale x 2 x i16>,
i8,
i32);
define <vscale x 2 x i16> @intrinsic_vwadd.w_wx_nxv2i16_nxv2i16_i8(<vscale x 2 x i16> %0, i8 %1, i32 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwadd.w_wx_nxv2i16_nxv2i16_i8
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
; CHECK: vwadd.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
%a = call <vscale x 2 x i16> @llvm.riscv.vwadd.w.nxv2i16.i8(
<vscale x 2 x i16> %0,
i8 %1,
i32 %2)
ret <vscale x 2 x i16> %a
}
declare <vscale x 2 x i16> @llvm.riscv.vwadd.w.mask.nxv2i16.i8(
<vscale x 2 x i16>,
<vscale x 2 x i16>,
i8,
<vscale x 2 x i1>,
i32);
define <vscale x 2 x i16> @intrinsic_vwadd.w_mask_wx_nxv2i16_nxv2i16_i8(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i8 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_nxv2i16_nxv2i16_i8
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
; CHECK: vwadd.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
%a = call <vscale x 2 x i16> @llvm.riscv.vwadd.w.mask.nxv2i16.i8(
<vscale x 2 x i16> %0,
<vscale x 2 x i16> %1,
i8 %2,
<vscale x 2 x i1> %3,
i32 %4)
ret <vscale x 2 x i16> %a
}
declare <vscale x 4 x i16> @llvm.riscv.vwadd.w.nxv4i16.i8(
<vscale x 4 x i16>,
i8,
i32);
define <vscale x 4 x i16> @intrinsic_vwadd.w_wx_nxv4i16_nxv4i16_i8(<vscale x 4 x i16> %0, i8 %1, i32 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwadd.w_wx_nxv4i16_nxv4i16_i8
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
; CHECK: vwadd.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
%a = call <vscale x 4 x i16> @llvm.riscv.vwadd.w.nxv4i16.i8(
<vscale x 4 x i16> %0,
i8 %1,
i32 %2)
ret <vscale x 4 x i16> %a
}
declare <vscale x 4 x i16> @llvm.riscv.vwadd.w.mask.nxv4i16.i8(
<vscale x 4 x i16>,
<vscale x 4 x i16>,
i8,
<vscale x 4 x i1>,
i32);
define <vscale x 4 x i16> @intrinsic_vwadd.w_mask_wx_nxv4i16_nxv4i16_i8(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i8 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_nxv4i16_nxv4i16_i8
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
; CHECK: vwadd.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
%a = call <vscale x 4 x i16> @llvm.riscv.vwadd.w.mask.nxv4i16.i8(
<vscale x 4 x i16> %0,
<vscale x 4 x i16> %1,
i8 %2,
<vscale x 4 x i1> %3,
i32 %4)
ret <vscale x 4 x i16> %a
}
declare <vscale x 8 x i16> @llvm.riscv.vwadd.w.nxv8i16.i8(
<vscale x 8 x i16>,
i8,
i32);
define <vscale x 8 x i16> @intrinsic_vwadd.w_wx_nxv8i16_nxv8i16_i8(<vscale x 8 x i16> %0, i8 %1, i32 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwadd.w_wx_nxv8i16_nxv8i16_i8
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
; CHECK: vwadd.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
%a = call <vscale x 8 x i16> @llvm.riscv.vwadd.w.nxv8i16.i8(
<vscale x 8 x i16> %0,
i8 %1,
i32 %2)
ret <vscale x 8 x i16> %a
}
declare <vscale x 8 x i16> @llvm.riscv.vwadd.w.mask.nxv8i16.i8(
<vscale x 8 x i16>,
<vscale x 8 x i16>,
i8,
<vscale x 8 x i1>,
i32);
define <vscale x 8 x i16> @intrinsic_vwadd.w_mask_wx_nxv8i16_nxv8i16_i8(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i8 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_nxv8i16_nxv8i16_i8
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
; CHECK: vwadd.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
%a = call <vscale x 8 x i16> @llvm.riscv.vwadd.w.mask.nxv8i16.i8(
<vscale x 8 x i16> %0,
<vscale x 8 x i16> %1,
i8 %2,
<vscale x 8 x i1> %3,
i32 %4)
ret <vscale x 8 x i16> %a
}
declare <vscale x 16 x i16> @llvm.riscv.vwadd.w.nxv16i16.i8(
<vscale x 16 x i16>,
i8,
i32);
define <vscale x 16 x i16> @intrinsic_vwadd.w_wx_nxv16i16_nxv16i16_i8(<vscale x 16 x i16> %0, i8 %1, i32 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwadd.w_wx_nxv16i16_nxv16i16_i8
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
; CHECK: vwadd.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
%a = call <vscale x 16 x i16> @llvm.riscv.vwadd.w.nxv16i16.i8(
<vscale x 16 x i16> %0,
i8 %1,
i32 %2)
ret <vscale x 16 x i16> %a
}
declare <vscale x 16 x i16> @llvm.riscv.vwadd.w.mask.nxv16i16.i8(
<vscale x 16 x i16>,
<vscale x 16 x i16>,
i8,
<vscale x 16 x i1>,
i32);
define <vscale x 16 x i16> @intrinsic_vwadd.w_mask_wx_nxv16i16_nxv16i16_i8(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i8 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_nxv16i16_nxv16i16_i8
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
; CHECK: vwadd.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
%a = call <vscale x 16 x i16> @llvm.riscv.vwadd.w.mask.nxv16i16.i8(
<vscale x 16 x i16> %0,
<vscale x 16 x i16> %1,
i8 %2,
<vscale x 16 x i1> %3,
i32 %4)
ret <vscale x 16 x i16> %a
}
declare <vscale x 32 x i16> @llvm.riscv.vwadd.w.nxv32i16.i8(
<vscale x 32 x i16>,
i8,
i32);
define <vscale x 32 x i16> @intrinsic_vwadd.w_wx_nxv32i16_nxv32i16_i8(<vscale x 32 x i16> %0, i8 %1, i32 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwadd.w_wx_nxv32i16_nxv32i16_i8
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
; CHECK: vwadd.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
%a = call <vscale x 32 x i16> @llvm.riscv.vwadd.w.nxv32i16.i8(
<vscale x 32 x i16> %0,
i8 %1,
i32 %2)
ret <vscale x 32 x i16> %a
}
declare <vscale x 32 x i16> @llvm.riscv.vwadd.w.mask.nxv32i16.i8(
<vscale x 32 x i16>,
<vscale x 32 x i16>,
i8,
<vscale x 32 x i1>,
i32);
define <vscale x 32 x i16> @intrinsic_vwadd.w_mask_wx_nxv32i16_nxv32i16_i8(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i8 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_nxv32i16_nxv32i16_i8
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
; CHECK: vwadd.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
%a = call <vscale x 32 x i16> @llvm.riscv.vwadd.w.mask.nxv32i16.i8(
<vscale x 32 x i16> %0,
<vscale x 32 x i16> %1,
i8 %2,
<vscale x 32 x i1> %3,
i32 %4)
ret <vscale x 32 x i16> %a
}
declare <vscale x 1 x i32> @llvm.riscv.vwadd.w.nxv1i32.i16(
<vscale x 1 x i32>,
i16,
i32);
define <vscale x 1 x i32> @intrinsic_vwadd.w_wx_nxv1i32_nxv1i32_i16(<vscale x 1 x i32> %0, i16 %1, i32 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwadd.w_wx_nxv1i32_nxv1i32_i16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
; CHECK: vwadd.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
%a = call <vscale x 1 x i32> @llvm.riscv.vwadd.w.nxv1i32.i16(
<vscale x 1 x i32> %0,
i16 %1,
i32 %2)
ret <vscale x 1 x i32> %a
}
declare <vscale x 1 x i32> @llvm.riscv.vwadd.w.mask.nxv1i32.i16(
<vscale x 1 x i32>,
<vscale x 1 x i32>,
i16,
<vscale x 1 x i1>,
i32);
define <vscale x 1 x i32> @intrinsic_vwadd.w_mask_wx_nxv1i32_nxv1i32_i16(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i16 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_nxv1i32_nxv1i32_i16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
; CHECK: vwadd.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
%a = call <vscale x 1 x i32> @llvm.riscv.vwadd.w.mask.nxv1i32.i16(
<vscale x 1 x i32> %0,
<vscale x 1 x i32> %1,
i16 %2,
<vscale x 1 x i1> %3,
i32 %4)
ret <vscale x 1 x i32> %a
}
declare <vscale x 2 x i32> @llvm.riscv.vwadd.w.nxv2i32.i16(
<vscale x 2 x i32>,
i16,
i32);
define <vscale x 2 x i32> @intrinsic_vwadd.w_wx_nxv2i32_nxv2i32_i16(<vscale x 2 x i32> %0, i16 %1, i32 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwadd.w_wx_nxv2i32_nxv2i32_i16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
; CHECK: vwadd.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
%a = call <vscale x 2 x i32> @llvm.riscv.vwadd.w.nxv2i32.i16(
<vscale x 2 x i32> %0,
i16 %1,
i32 %2)
ret <vscale x 2 x i32> %a
}
declare <vscale x 2 x i32> @llvm.riscv.vwadd.w.mask.nxv2i32.i16(
<vscale x 2 x i32>,
<vscale x 2 x i32>,
i16,
<vscale x 2 x i1>,
i32);
define <vscale x 2 x i32> @intrinsic_vwadd.w_mask_wx_nxv2i32_nxv2i32_i16(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i16 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_nxv2i32_nxv2i32_i16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
; CHECK: vwadd.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
%a = call <vscale x 2 x i32> @llvm.riscv.vwadd.w.mask.nxv2i32.i16(
<vscale x 2 x i32> %0,
<vscale x 2 x i32> %1,
i16 %2,
<vscale x 2 x i1> %3,
i32 %4)
ret <vscale x 2 x i32> %a
}
declare <vscale x 4 x i32> @llvm.riscv.vwadd.w.nxv4i32.i16(
<vscale x 4 x i32>,
i16,
i32);
define <vscale x 4 x i32> @intrinsic_vwadd.w_wx_nxv4i32_nxv4i32_i16(<vscale x 4 x i32> %0, i16 %1, i32 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwadd.w_wx_nxv4i32_nxv4i32_i16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
; CHECK: vwadd.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
%a = call <vscale x 4 x i32> @llvm.riscv.vwadd.w.nxv4i32.i16(
<vscale x 4 x i32> %0,
i16 %1,
i32 %2)
ret <vscale x 4 x i32> %a
}
declare <vscale x 4 x i32> @llvm.riscv.vwadd.w.mask.nxv4i32.i16(
<vscale x 4 x i32>,
<vscale x 4 x i32>,
i16,
<vscale x 4 x i1>,
i32);
define <vscale x 4 x i32> @intrinsic_vwadd.w_mask_wx_nxv4i32_nxv4i32_i16(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i16 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_nxv4i32_nxv4i32_i16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
; CHECK: vwadd.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
%a = call <vscale x 4 x i32> @llvm.riscv.vwadd.w.mask.nxv4i32.i16(
<vscale x 4 x i32> %0,
<vscale x 4 x i32> %1,
i16 %2,
<vscale x 4 x i1> %3,
i32 %4)
ret <vscale x 4 x i32> %a
}
declare <vscale x 8 x i32> @llvm.riscv.vwadd.w.nxv8i32.i16(
<vscale x 8 x i32>,
i16,
i32);
define <vscale x 8 x i32> @intrinsic_vwadd.w_wx_nxv8i32_nxv8i32_i16(<vscale x 8 x i32> %0, i16 %1, i32 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwadd.w_wx_nxv8i32_nxv8i32_i16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
; CHECK: vwadd.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
%a = call <vscale x 8 x i32> @llvm.riscv.vwadd.w.nxv8i32.i16(
<vscale x 8 x i32> %0,
i16 %1,
i32 %2)
ret <vscale x 8 x i32> %a
}
declare <vscale x 8 x i32> @llvm.riscv.vwadd.w.mask.nxv8i32.i16(
<vscale x 8 x i32>,
<vscale x 8 x i32>,
i16,
<vscale x 8 x i1>,
i32);
define <vscale x 8 x i32> @intrinsic_vwadd.w_mask_wx_nxv8i32_nxv8i32_i16(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i16 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_nxv8i32_nxv8i32_i16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
; CHECK: vwadd.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
%a = call <vscale x 8 x i32> @llvm.riscv.vwadd.w.mask.nxv8i32.i16(
<vscale x 8 x i32> %0,
<vscale x 8 x i32> %1,
i16 %2,
<vscale x 8 x i1> %3,
i32 %4)
ret <vscale x 8 x i32> %a
}
declare <vscale x 16 x i32> @llvm.riscv.vwadd.w.nxv16i32.i16(
<vscale x 16 x i32>,
i16,
i32);
define <vscale x 16 x i32> @intrinsic_vwadd.w_wx_nxv16i32_nxv16i32_i16(<vscale x 16 x i32> %0, i16 %1, i32 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwadd.w_wx_nxv16i32_nxv16i32_i16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
; CHECK: vwadd.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
%a = call <vscale x 16 x i32> @llvm.riscv.vwadd.w.nxv16i32.i16(
<vscale x 16 x i32> %0,
i16 %1,
i32 %2)
ret <vscale x 16 x i32> %a
}
declare <vscale x 16 x i32> @llvm.riscv.vwadd.w.mask.nxv16i32.i16(
<vscale x 16 x i32>,
<vscale x 16 x i32>,
i16,
<vscale x 16 x i1>,
i32);
define <vscale x 16 x i32> @intrinsic_vwadd.w_mask_wx_nxv16i32_nxv16i32_i16(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i16 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_nxv16i32_nxv16i32_i16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
; CHECK: vwadd.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
%a = call <vscale x 16 x i32> @llvm.riscv.vwadd.w.mask.nxv16i32.i16(
<vscale x 16 x i32> %0,
<vscale x 16 x i32> %1,
i16 %2,
<vscale x 16 x i1> %3,
i32 %4)
ret <vscale x 16 x i32> %a
}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,881 @@
; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs \
; RUN: --riscv-no-aliases < %s | FileCheck %s
declare <vscale x 1 x i16> @llvm.riscv.vwaddu.nxv1i16.nxv1i8.nxv1i8(
<vscale x 1 x i8>,
<vscale x 1 x i8>,
i32);
define <vscale x 1 x i16> @intrinsic_vwaddu_vv_nxv1i16_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i32 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwaddu_vv_nxv1i16_nxv1i8_nxv1i8
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
; CHECK: vwaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
%a = call <vscale x 1 x i16> @llvm.riscv.vwaddu.nxv1i16.nxv1i8.nxv1i8(
<vscale x 1 x i8> %0,
<vscale x 1 x i8> %1,
i32 %2)
ret <vscale x 1 x i16> %a
}
declare <vscale x 1 x i16> @llvm.riscv.vwaddu.mask.nxv1i16.nxv1i8.nxv1i8(
<vscale x 1 x i16>,
<vscale x 1 x i8>,
<vscale x 1 x i8>,
<vscale x 1 x i1>,
i32);
define <vscale x 1 x i16> @intrinsic_vwaddu_mask_vv_nxv1i16_nxv1i8_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwaddu_mask_vv_nxv1i16_nxv1i8_nxv1i8
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
; CHECK: vwaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 1 x i16> @llvm.riscv.vwaddu.mask.nxv1i16.nxv1i8.nxv1i8(
<vscale x 1 x i16> %0,
<vscale x 1 x i8> %1,
<vscale x 1 x i8> %2,
<vscale x 1 x i1> %3,
i32 %4)
ret <vscale x 1 x i16> %a
}
declare <vscale x 2 x i16> @llvm.riscv.vwaddu.nxv2i16.nxv2i8.nxv2i8(
<vscale x 2 x i8>,
<vscale x 2 x i8>,
i32);
define <vscale x 2 x i16> @intrinsic_vwaddu_vv_nxv2i16_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i32 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwaddu_vv_nxv2i16_nxv2i8_nxv2i8
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
; CHECK: vwaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
%a = call <vscale x 2 x i16> @llvm.riscv.vwaddu.nxv2i16.nxv2i8.nxv2i8(
<vscale x 2 x i8> %0,
<vscale x 2 x i8> %1,
i32 %2)
ret <vscale x 2 x i16> %a
}
declare <vscale x 2 x i16> @llvm.riscv.vwaddu.mask.nxv2i16.nxv2i8.nxv2i8(
<vscale x 2 x i16>,
<vscale x 2 x i8>,
<vscale x 2 x i8>,
<vscale x 2 x i1>,
i32);
define <vscale x 2 x i16> @intrinsic_vwaddu_mask_vv_nxv2i16_nxv2i8_nxv2i8(<vscale x 2 x i16> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwaddu_mask_vv_nxv2i16_nxv2i8_nxv2i8
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
; CHECK: vwaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 2 x i16> @llvm.riscv.vwaddu.mask.nxv2i16.nxv2i8.nxv2i8(
<vscale x 2 x i16> %0,
<vscale x 2 x i8> %1,
<vscale x 2 x i8> %2,
<vscale x 2 x i1> %3,
i32 %4)
ret <vscale x 2 x i16> %a
}
declare <vscale x 4 x i16> @llvm.riscv.vwaddu.nxv4i16.nxv4i8.nxv4i8(
<vscale x 4 x i8>,
<vscale x 4 x i8>,
i32);
define <vscale x 4 x i16> @intrinsic_vwaddu_vv_nxv4i16_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i32 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwaddu_vv_nxv4i16_nxv4i8_nxv4i8
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
; CHECK: vwaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
%a = call <vscale x 4 x i16> @llvm.riscv.vwaddu.nxv4i16.nxv4i8.nxv4i8(
<vscale x 4 x i8> %0,
<vscale x 4 x i8> %1,
i32 %2)
ret <vscale x 4 x i16> %a
}
declare <vscale x 4 x i16> @llvm.riscv.vwaddu.mask.nxv4i16.nxv4i8.nxv4i8(
<vscale x 4 x i16>,
<vscale x 4 x i8>,
<vscale x 4 x i8>,
<vscale x 4 x i1>,
i32);
define <vscale x 4 x i16> @intrinsic_vwaddu_mask_vv_nxv4i16_nxv4i8_nxv4i8(<vscale x 4 x i16> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwaddu_mask_vv_nxv4i16_nxv4i8_nxv4i8
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
; CHECK: vwaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 4 x i16> @llvm.riscv.vwaddu.mask.nxv4i16.nxv4i8.nxv4i8(
<vscale x 4 x i16> %0,
<vscale x 4 x i8> %1,
<vscale x 4 x i8> %2,
<vscale x 4 x i1> %3,
i32 %4)
ret <vscale x 4 x i16> %a
}
declare <vscale x 8 x i16> @llvm.riscv.vwaddu.nxv8i16.nxv8i8.nxv8i8(
<vscale x 8 x i8>,
<vscale x 8 x i8>,
i32);
define <vscale x 8 x i16> @intrinsic_vwaddu_vv_nxv8i16_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i32 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwaddu_vv_nxv8i16_nxv8i8_nxv8i8
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
; CHECK: vwaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
%a = call <vscale x 8 x i16> @llvm.riscv.vwaddu.nxv8i16.nxv8i8.nxv8i8(
<vscale x 8 x i8> %0,
<vscale x 8 x i8> %1,
i32 %2)
ret <vscale x 8 x i16> %a
}
declare <vscale x 8 x i16> @llvm.riscv.vwaddu.mask.nxv8i16.nxv8i8.nxv8i8(
<vscale x 8 x i16>,
<vscale x 8 x i8>,
<vscale x 8 x i8>,
<vscale x 8 x i1>,
i32);
define <vscale x 8 x i16> @intrinsic_vwaddu_mask_vv_nxv8i16_nxv8i8_nxv8i8(<vscale x 8 x i16> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwaddu_mask_vv_nxv8i16_nxv8i8_nxv8i8
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
; CHECK: vwaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 8 x i16> @llvm.riscv.vwaddu.mask.nxv8i16.nxv8i8.nxv8i8(
<vscale x 8 x i16> %0,
<vscale x 8 x i8> %1,
<vscale x 8 x i8> %2,
<vscale x 8 x i1> %3,
i32 %4)
ret <vscale x 8 x i16> %a
}
declare <vscale x 16 x i16> @llvm.riscv.vwaddu.nxv16i16.nxv16i8.nxv16i8(
<vscale x 16 x i8>,
<vscale x 16 x i8>,
i32);
define <vscale x 16 x i16> @intrinsic_vwaddu_vv_nxv16i16_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i32 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwaddu_vv_nxv16i16_nxv16i8_nxv16i8
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
; CHECK: vwaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
%a = call <vscale x 16 x i16> @llvm.riscv.vwaddu.nxv16i16.nxv16i8.nxv16i8(
<vscale x 16 x i8> %0,
<vscale x 16 x i8> %1,
i32 %2)
ret <vscale x 16 x i16> %a
}
declare <vscale x 16 x i16> @llvm.riscv.vwaddu.mask.nxv16i16.nxv16i8.nxv16i8(
<vscale x 16 x i16>,
<vscale x 16 x i8>,
<vscale x 16 x i8>,
<vscale x 16 x i1>,
i32);
define <vscale x 16 x i16> @intrinsic_vwaddu_mask_vv_nxv16i16_nxv16i8_nxv16i8(<vscale x 16 x i16> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwaddu_mask_vv_nxv16i16_nxv16i8_nxv16i8
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
; CHECK: vwaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 16 x i16> @llvm.riscv.vwaddu.mask.nxv16i16.nxv16i8.nxv16i8(
<vscale x 16 x i16> %0,
<vscale x 16 x i8> %1,
<vscale x 16 x i8> %2,
<vscale x 16 x i1> %3,
i32 %4)
ret <vscale x 16 x i16> %a
}
declare <vscale x 32 x i16> @llvm.riscv.vwaddu.nxv32i16.nxv32i8.nxv32i8(
<vscale x 32 x i8>,
<vscale x 32 x i8>,
i32);
define <vscale x 32 x i16> @intrinsic_vwaddu_vv_nxv32i16_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i32 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwaddu_vv_nxv32i16_nxv32i8_nxv32i8
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
; CHECK: vwaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
%a = call <vscale x 32 x i16> @llvm.riscv.vwaddu.nxv32i16.nxv32i8.nxv32i8(
<vscale x 32 x i8> %0,
<vscale x 32 x i8> %1,
i32 %2)
ret <vscale x 32 x i16> %a
}
declare <vscale x 32 x i16> @llvm.riscv.vwaddu.mask.nxv32i16.nxv32i8.nxv32i8(
<vscale x 32 x i16>,
<vscale x 32 x i8>,
<vscale x 32 x i8>,
<vscale x 32 x i1>,
i32);
define <vscale x 32 x i16> @intrinsic_vwaddu_mask_vv_nxv32i16_nxv32i8_nxv32i8(<vscale x 32 x i16> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwaddu_mask_vv_nxv32i16_nxv32i8_nxv32i8
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
; CHECK: vwaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 32 x i16> @llvm.riscv.vwaddu.mask.nxv32i16.nxv32i8.nxv32i8(
<vscale x 32 x i16> %0,
<vscale x 32 x i8> %1,
<vscale x 32 x i8> %2,
<vscale x 32 x i1> %3,
i32 %4)
ret <vscale x 32 x i16> %a
}
declare <vscale x 1 x i32> @llvm.riscv.vwaddu.nxv1i32.nxv1i16.nxv1i16(
<vscale x 1 x i16>,
<vscale x 1 x i16>,
i32);
define <vscale x 1 x i32> @intrinsic_vwaddu_vv_nxv1i32_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i32 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwaddu_vv_nxv1i32_nxv1i16_nxv1i16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
; CHECK: vwaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
%a = call <vscale x 1 x i32> @llvm.riscv.vwaddu.nxv1i32.nxv1i16.nxv1i16(
<vscale x 1 x i16> %0,
<vscale x 1 x i16> %1,
i32 %2)
ret <vscale x 1 x i32> %a
}
declare <vscale x 1 x i32> @llvm.riscv.vwaddu.mask.nxv1i32.nxv1i16.nxv1i16(
<vscale x 1 x i32>,
<vscale x 1 x i16>,
<vscale x 1 x i16>,
<vscale x 1 x i1>,
i32);
define <vscale x 1 x i32> @intrinsic_vwaddu_mask_vv_nxv1i32_nxv1i16_nxv1i16(<vscale x 1 x i32> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwaddu_mask_vv_nxv1i32_nxv1i16_nxv1i16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
; CHECK: vwaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 1 x i32> @llvm.riscv.vwaddu.mask.nxv1i32.nxv1i16.nxv1i16(
<vscale x 1 x i32> %0,
<vscale x 1 x i16> %1,
<vscale x 1 x i16> %2,
<vscale x 1 x i1> %3,
i32 %4)
ret <vscale x 1 x i32> %a
}
declare <vscale x 2 x i32> @llvm.riscv.vwaddu.nxv2i32.nxv2i16.nxv2i16(
<vscale x 2 x i16>,
<vscale x 2 x i16>,
i32);
define <vscale x 2 x i32> @intrinsic_vwaddu_vv_nxv2i32_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i32 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwaddu_vv_nxv2i32_nxv2i16_nxv2i16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
; CHECK: vwaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
%a = call <vscale x 2 x i32> @llvm.riscv.vwaddu.nxv2i32.nxv2i16.nxv2i16(
<vscale x 2 x i16> %0,
<vscale x 2 x i16> %1,
i32 %2)
ret <vscale x 2 x i32> %a
}
declare <vscale x 2 x i32> @llvm.riscv.vwaddu.mask.nxv2i32.nxv2i16.nxv2i16(
<vscale x 2 x i32>,
<vscale x 2 x i16>,
<vscale x 2 x i16>,
<vscale x 2 x i1>,
i32);
define <vscale x 2 x i32> @intrinsic_vwaddu_mask_vv_nxv2i32_nxv2i16_nxv2i16(<vscale x 2 x i32> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwaddu_mask_vv_nxv2i32_nxv2i16_nxv2i16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
; CHECK: vwaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 2 x i32> @llvm.riscv.vwaddu.mask.nxv2i32.nxv2i16.nxv2i16(
<vscale x 2 x i32> %0,
<vscale x 2 x i16> %1,
<vscale x 2 x i16> %2,
<vscale x 2 x i1> %3,
i32 %4)
ret <vscale x 2 x i32> %a
}
declare <vscale x 4 x i32> @llvm.riscv.vwaddu.nxv4i32.nxv4i16.nxv4i16(
<vscale x 4 x i16>,
<vscale x 4 x i16>,
i32);
define <vscale x 4 x i32> @intrinsic_vwaddu_vv_nxv4i32_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i32 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwaddu_vv_nxv4i32_nxv4i16_nxv4i16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
; CHECK: vwaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
%a = call <vscale x 4 x i32> @llvm.riscv.vwaddu.nxv4i32.nxv4i16.nxv4i16(
<vscale x 4 x i16> %0,
<vscale x 4 x i16> %1,
i32 %2)
ret <vscale x 4 x i32> %a
}
declare <vscale x 4 x i32> @llvm.riscv.vwaddu.mask.nxv4i32.nxv4i16.nxv4i16(
<vscale x 4 x i32>,
<vscale x 4 x i16>,
<vscale x 4 x i16>,
<vscale x 4 x i1>,
i32);
define <vscale x 4 x i32> @intrinsic_vwaddu_mask_vv_nxv4i32_nxv4i16_nxv4i16(<vscale x 4 x i32> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwaddu_mask_vv_nxv4i32_nxv4i16_nxv4i16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
; CHECK: vwaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 4 x i32> @llvm.riscv.vwaddu.mask.nxv4i32.nxv4i16.nxv4i16(
<vscale x 4 x i32> %0,
<vscale x 4 x i16> %1,
<vscale x 4 x i16> %2,
<vscale x 4 x i1> %3,
i32 %4)
ret <vscale x 4 x i32> %a
}
declare <vscale x 8 x i32> @llvm.riscv.vwaddu.nxv8i32.nxv8i16.nxv8i16(
<vscale x 8 x i16>,
<vscale x 8 x i16>,
i32);
define <vscale x 8 x i32> @intrinsic_vwaddu_vv_nxv8i32_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i32 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwaddu_vv_nxv8i32_nxv8i16_nxv8i16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
; CHECK: vwaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
%a = call <vscale x 8 x i32> @llvm.riscv.vwaddu.nxv8i32.nxv8i16.nxv8i16(
<vscale x 8 x i16> %0,
<vscale x 8 x i16> %1,
i32 %2)
ret <vscale x 8 x i32> %a
}
declare <vscale x 8 x i32> @llvm.riscv.vwaddu.mask.nxv8i32.nxv8i16.nxv8i16(
<vscale x 8 x i32>,
<vscale x 8 x i16>,
<vscale x 8 x i16>,
<vscale x 8 x i1>,
i32);
define <vscale x 8 x i32> @intrinsic_vwaddu_mask_vv_nxv8i32_nxv8i16_nxv8i16(<vscale x 8 x i32> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwaddu_mask_vv_nxv8i32_nxv8i16_nxv8i16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
; CHECK: vwaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 8 x i32> @llvm.riscv.vwaddu.mask.nxv8i32.nxv8i16.nxv8i16(
<vscale x 8 x i32> %0,
<vscale x 8 x i16> %1,
<vscale x 8 x i16> %2,
<vscale x 8 x i1> %3,
i32 %4)
ret <vscale x 8 x i32> %a
}
declare <vscale x 16 x i32> @llvm.riscv.vwaddu.nxv16i32.nxv16i16.nxv16i16(
<vscale x 16 x i16>,
<vscale x 16 x i16>,
i32);
define <vscale x 16 x i32> @intrinsic_vwaddu_vv_nxv16i32_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i32 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwaddu_vv_nxv16i32_nxv16i16_nxv16i16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
; CHECK: vwaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
%a = call <vscale x 16 x i32> @llvm.riscv.vwaddu.nxv16i32.nxv16i16.nxv16i16(
<vscale x 16 x i16> %0,
<vscale x 16 x i16> %1,
i32 %2)
ret <vscale x 16 x i32> %a
}
declare <vscale x 16 x i32> @llvm.riscv.vwaddu.mask.nxv16i32.nxv16i16.nxv16i16(
<vscale x 16 x i32>,
<vscale x 16 x i16>,
<vscale x 16 x i16>,
<vscale x 16 x i1>,
i32);
define <vscale x 16 x i32> @intrinsic_vwaddu_mask_vv_nxv16i32_nxv16i16_nxv16i16(<vscale x 16 x i32> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwaddu_mask_vv_nxv16i32_nxv16i16_nxv16i16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
; CHECK: vwaddu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 16 x i32> @llvm.riscv.vwaddu.mask.nxv16i32.nxv16i16.nxv16i16(
<vscale x 16 x i32> %0,
<vscale x 16 x i16> %1,
<vscale x 16 x i16> %2,
<vscale x 16 x i1> %3,
i32 %4)
ret <vscale x 16 x i32> %a
}
declare <vscale x 1 x i16> @llvm.riscv.vwaddu.nxv1i16.nxv1i8.i8(
<vscale x 1 x i8>,
i8,
i32);
define <vscale x 1 x i16> @intrinsic_vwaddu_vx_nxv1i16_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, i32 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwaddu_vx_nxv1i16_nxv1i8_i8
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
; CHECK: vwaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
%a = call <vscale x 1 x i16> @llvm.riscv.vwaddu.nxv1i16.nxv1i8.i8(
<vscale x 1 x i8> %0,
i8 %1,
i32 %2)
ret <vscale x 1 x i16> %a
}
declare <vscale x 1 x i16> @llvm.riscv.vwaddu.mask.nxv1i16.nxv1i8.i8(
<vscale x 1 x i16>,
<vscale x 1 x i8>,
i8,
<vscale x 1 x i1>,
i32);
define <vscale x 1 x i16> @intrinsic_vwaddu_mask_vx_nxv1i16_nxv1i8_i8(<vscale x 1 x i16> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwaddu_mask_vx_nxv1i16_nxv1i8_i8
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
; CHECK: vwaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
%a = call <vscale x 1 x i16> @llvm.riscv.vwaddu.mask.nxv1i16.nxv1i8.i8(
<vscale x 1 x i16> %0,
<vscale x 1 x i8> %1,
i8 %2,
<vscale x 1 x i1> %3,
i32 %4)
ret <vscale x 1 x i16> %a
}
declare <vscale x 2 x i16> @llvm.riscv.vwaddu.nxv2i16.nxv2i8.i8(
<vscale x 2 x i8>,
i8,
i32);
define <vscale x 2 x i16> @intrinsic_vwaddu_vx_nxv2i16_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, i32 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwaddu_vx_nxv2i16_nxv2i8_i8
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
; CHECK: vwaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
%a = call <vscale x 2 x i16> @llvm.riscv.vwaddu.nxv2i16.nxv2i8.i8(
<vscale x 2 x i8> %0,
i8 %1,
i32 %2)
ret <vscale x 2 x i16> %a
}
declare <vscale x 2 x i16> @llvm.riscv.vwaddu.mask.nxv2i16.nxv2i8.i8(
<vscale x 2 x i16>,
<vscale x 2 x i8>,
i8,
<vscale x 2 x i1>,
i32);
define <vscale x 2 x i16> @intrinsic_vwaddu_mask_vx_nxv2i16_nxv2i8_i8(<vscale x 2 x i16> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwaddu_mask_vx_nxv2i16_nxv2i8_i8
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
; CHECK: vwaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
%a = call <vscale x 2 x i16> @llvm.riscv.vwaddu.mask.nxv2i16.nxv2i8.i8(
<vscale x 2 x i16> %0,
<vscale x 2 x i8> %1,
i8 %2,
<vscale x 2 x i1> %3,
i32 %4)
ret <vscale x 2 x i16> %a
}
declare <vscale x 4 x i16> @llvm.riscv.vwaddu.nxv4i16.nxv4i8.i8(
<vscale x 4 x i8>,
i8,
i32);
define <vscale x 4 x i16> @intrinsic_vwaddu_vx_nxv4i16_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, i32 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwaddu_vx_nxv4i16_nxv4i8_i8
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
; CHECK: vwaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
%a = call <vscale x 4 x i16> @llvm.riscv.vwaddu.nxv4i16.nxv4i8.i8(
<vscale x 4 x i8> %0,
i8 %1,
i32 %2)
ret <vscale x 4 x i16> %a
}
declare <vscale x 4 x i16> @llvm.riscv.vwaddu.mask.nxv4i16.nxv4i8.i8(
<vscale x 4 x i16>,
<vscale x 4 x i8>,
i8,
<vscale x 4 x i1>,
i32);
define <vscale x 4 x i16> @intrinsic_vwaddu_mask_vx_nxv4i16_nxv4i8_i8(<vscale x 4 x i16> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwaddu_mask_vx_nxv4i16_nxv4i8_i8
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
; CHECK: vwaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
%a = call <vscale x 4 x i16> @llvm.riscv.vwaddu.mask.nxv4i16.nxv4i8.i8(
<vscale x 4 x i16> %0,
<vscale x 4 x i8> %1,
i8 %2,
<vscale x 4 x i1> %3,
i32 %4)
ret <vscale x 4 x i16> %a
}
declare <vscale x 8 x i16> @llvm.riscv.vwaddu.nxv8i16.nxv8i8.i8(
<vscale x 8 x i8>,
i8,
i32);
define <vscale x 8 x i16> @intrinsic_vwaddu_vx_nxv8i16_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, i32 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwaddu_vx_nxv8i16_nxv8i8_i8
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
; CHECK: vwaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
%a = call <vscale x 8 x i16> @llvm.riscv.vwaddu.nxv8i16.nxv8i8.i8(
<vscale x 8 x i8> %0,
i8 %1,
i32 %2)
ret <vscale x 8 x i16> %a
}
declare <vscale x 8 x i16> @llvm.riscv.vwaddu.mask.nxv8i16.nxv8i8.i8(
<vscale x 8 x i16>,
<vscale x 8 x i8>,
i8,
<vscale x 8 x i1>,
i32);
define <vscale x 8 x i16> @intrinsic_vwaddu_mask_vx_nxv8i16_nxv8i8_i8(<vscale x 8 x i16> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwaddu_mask_vx_nxv8i16_nxv8i8_i8
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
; CHECK: vwaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
%a = call <vscale x 8 x i16> @llvm.riscv.vwaddu.mask.nxv8i16.nxv8i8.i8(
<vscale x 8 x i16> %0,
<vscale x 8 x i8> %1,
i8 %2,
<vscale x 8 x i1> %3,
i32 %4)
ret <vscale x 8 x i16> %a
}
declare <vscale x 16 x i16> @llvm.riscv.vwaddu.nxv16i16.nxv16i8.i8(
<vscale x 16 x i8>,
i8,
i32);
define <vscale x 16 x i16> @intrinsic_vwaddu_vx_nxv16i16_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, i32 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwaddu_vx_nxv16i16_nxv16i8_i8
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
; CHECK: vwaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
%a = call <vscale x 16 x i16> @llvm.riscv.vwaddu.nxv16i16.nxv16i8.i8(
<vscale x 16 x i8> %0,
i8 %1,
i32 %2)
ret <vscale x 16 x i16> %a
}
declare <vscale x 16 x i16> @llvm.riscv.vwaddu.mask.nxv16i16.nxv16i8.i8(
<vscale x 16 x i16>,
<vscale x 16 x i8>,
i8,
<vscale x 16 x i1>,
i32);
define <vscale x 16 x i16> @intrinsic_vwaddu_mask_vx_nxv16i16_nxv16i8_i8(<vscale x 16 x i16> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwaddu_mask_vx_nxv16i16_nxv16i8_i8
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
; CHECK: vwaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
%a = call <vscale x 16 x i16> @llvm.riscv.vwaddu.mask.nxv16i16.nxv16i8.i8(
<vscale x 16 x i16> %0,
<vscale x 16 x i8> %1,
i8 %2,
<vscale x 16 x i1> %3,
i32 %4)
ret <vscale x 16 x i16> %a
}
declare <vscale x 32 x i16> @llvm.riscv.vwaddu.nxv32i16.nxv32i8.i8(
<vscale x 32 x i8>,
i8,
i32);
define <vscale x 32 x i16> @intrinsic_vwaddu_vx_nxv32i16_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, i32 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwaddu_vx_nxv32i16_nxv32i8_i8
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
; CHECK: vwaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
%a = call <vscale x 32 x i16> @llvm.riscv.vwaddu.nxv32i16.nxv32i8.i8(
<vscale x 32 x i8> %0,
i8 %1,
i32 %2)
ret <vscale x 32 x i16> %a
}
declare <vscale x 32 x i16> @llvm.riscv.vwaddu.mask.nxv32i16.nxv32i8.i8(
<vscale x 32 x i16>,
<vscale x 32 x i8>,
i8,
<vscale x 32 x i1>,
i32);
define <vscale x 32 x i16> @intrinsic_vwaddu_mask_vx_nxv32i16_nxv32i8_i8(<vscale x 32 x i16> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwaddu_mask_vx_nxv32i16_nxv32i8_i8
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
; CHECK: vwaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
%a = call <vscale x 32 x i16> @llvm.riscv.vwaddu.mask.nxv32i16.nxv32i8.i8(
<vscale x 32 x i16> %0,
<vscale x 32 x i8> %1,
i8 %2,
<vscale x 32 x i1> %3,
i32 %4)
ret <vscale x 32 x i16> %a
}
declare <vscale x 1 x i32> @llvm.riscv.vwaddu.nxv1i32.nxv1i16.i16(
<vscale x 1 x i16>,
i16,
i32);
define <vscale x 1 x i32> @intrinsic_vwaddu_vx_nxv1i32_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, i32 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwaddu_vx_nxv1i32_nxv1i16_i16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
; CHECK: vwaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
%a = call <vscale x 1 x i32> @llvm.riscv.vwaddu.nxv1i32.nxv1i16.i16(
<vscale x 1 x i16> %0,
i16 %1,
i32 %2)
ret <vscale x 1 x i32> %a
}
declare <vscale x 1 x i32> @llvm.riscv.vwaddu.mask.nxv1i32.nxv1i16.i16(
<vscale x 1 x i32>,
<vscale x 1 x i16>,
i16,
<vscale x 1 x i1>,
i32);
define <vscale x 1 x i32> @intrinsic_vwaddu_mask_vx_nxv1i32_nxv1i16_i16(<vscale x 1 x i32> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwaddu_mask_vx_nxv1i32_nxv1i16_i16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
; CHECK: vwaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
%a = call <vscale x 1 x i32> @llvm.riscv.vwaddu.mask.nxv1i32.nxv1i16.i16(
<vscale x 1 x i32> %0,
<vscale x 1 x i16> %1,
i16 %2,
<vscale x 1 x i1> %3,
i32 %4)
ret <vscale x 1 x i32> %a
}
declare <vscale x 2 x i32> @llvm.riscv.vwaddu.nxv2i32.nxv2i16.i16(
<vscale x 2 x i16>,
i16,
i32);
define <vscale x 2 x i32> @intrinsic_vwaddu_vx_nxv2i32_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, i32 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwaddu_vx_nxv2i32_nxv2i16_i16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
; CHECK: vwaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
%a = call <vscale x 2 x i32> @llvm.riscv.vwaddu.nxv2i32.nxv2i16.i16(
<vscale x 2 x i16> %0,
i16 %1,
i32 %2)
ret <vscale x 2 x i32> %a
}
declare <vscale x 2 x i32> @llvm.riscv.vwaddu.mask.nxv2i32.nxv2i16.i16(
<vscale x 2 x i32>,
<vscale x 2 x i16>,
i16,
<vscale x 2 x i1>,
i32);
define <vscale x 2 x i32> @intrinsic_vwaddu_mask_vx_nxv2i32_nxv2i16_i16(<vscale x 2 x i32> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwaddu_mask_vx_nxv2i32_nxv2i16_i16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
; CHECK: vwaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
%a = call <vscale x 2 x i32> @llvm.riscv.vwaddu.mask.nxv2i32.nxv2i16.i16(
<vscale x 2 x i32> %0,
<vscale x 2 x i16> %1,
i16 %2,
<vscale x 2 x i1> %3,
i32 %4)
ret <vscale x 2 x i32> %a
}
declare <vscale x 4 x i32> @llvm.riscv.vwaddu.nxv4i32.nxv4i16.i16(
<vscale x 4 x i16>,
i16,
i32);
define <vscale x 4 x i32> @intrinsic_vwaddu_vx_nxv4i32_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, i32 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwaddu_vx_nxv4i32_nxv4i16_i16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
; CHECK: vwaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
%a = call <vscale x 4 x i32> @llvm.riscv.vwaddu.nxv4i32.nxv4i16.i16(
<vscale x 4 x i16> %0,
i16 %1,
i32 %2)
ret <vscale x 4 x i32> %a
}
declare <vscale x 4 x i32> @llvm.riscv.vwaddu.mask.nxv4i32.nxv4i16.i16(
<vscale x 4 x i32>,
<vscale x 4 x i16>,
i16,
<vscale x 4 x i1>,
i32);
define <vscale x 4 x i32> @intrinsic_vwaddu_mask_vx_nxv4i32_nxv4i16_i16(<vscale x 4 x i32> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwaddu_mask_vx_nxv4i32_nxv4i16_i16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
; CHECK: vwaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
%a = call <vscale x 4 x i32> @llvm.riscv.vwaddu.mask.nxv4i32.nxv4i16.i16(
<vscale x 4 x i32> %0,
<vscale x 4 x i16> %1,
i16 %2,
<vscale x 4 x i1> %3,
i32 %4)
ret <vscale x 4 x i32> %a
}
declare <vscale x 8 x i32> @llvm.riscv.vwaddu.nxv8i32.nxv8i16.i16(
<vscale x 8 x i16>,
i16,
i32);
define <vscale x 8 x i32> @intrinsic_vwaddu_vx_nxv8i32_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, i32 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwaddu_vx_nxv8i32_nxv8i16_i16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
; CHECK: vwaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
%a = call <vscale x 8 x i32> @llvm.riscv.vwaddu.nxv8i32.nxv8i16.i16(
<vscale x 8 x i16> %0,
i16 %1,
i32 %2)
ret <vscale x 8 x i32> %a
}
declare <vscale x 8 x i32> @llvm.riscv.vwaddu.mask.nxv8i32.nxv8i16.i16(
<vscale x 8 x i32>,
<vscale x 8 x i16>,
i16,
<vscale x 8 x i1>,
i32);
define <vscale x 8 x i32> @intrinsic_vwaddu_mask_vx_nxv8i32_nxv8i16_i16(<vscale x 8 x i32> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwaddu_mask_vx_nxv8i32_nxv8i16_i16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
; CHECK: vwaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
%a = call <vscale x 8 x i32> @llvm.riscv.vwaddu.mask.nxv8i32.nxv8i16.i16(
<vscale x 8 x i32> %0,
<vscale x 8 x i16> %1,
i16 %2,
<vscale x 8 x i1> %3,
i32 %4)
ret <vscale x 8 x i32> %a
}
declare <vscale x 16 x i32> @llvm.riscv.vwaddu.nxv16i32.nxv16i16.i16(
<vscale x 16 x i16>,
i16,
i32);
define <vscale x 16 x i32> @intrinsic_vwaddu_vx_nxv16i32_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, i32 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwaddu_vx_nxv16i32_nxv16i16_i16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
; CHECK: vwaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
%a = call <vscale x 16 x i32> @llvm.riscv.vwaddu.nxv16i32.nxv16i16.i16(
<vscale x 16 x i16> %0,
i16 %1,
i32 %2)
ret <vscale x 16 x i32> %a
}
declare <vscale x 16 x i32> @llvm.riscv.vwaddu.mask.nxv16i32.nxv16i16.i16(
<vscale x 16 x i32>,
<vscale x 16 x i16>,
i16,
<vscale x 16 x i1>,
i32);
define <vscale x 16 x i32> @intrinsic_vwaddu_mask_vx_nxv16i32_nxv16i16_i16(<vscale x 16 x i32> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwaddu_mask_vx_nxv16i32_nxv16i16_i16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
; CHECK: vwaddu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
%a = call <vscale x 16 x i32> @llvm.riscv.vwaddu.mask.nxv16i32.nxv16i16.i16(
<vscale x 16 x i32> %0,
<vscale x 16 x i16> %1,
i16 %2,
<vscale x 16 x i1> %3,
i32 %4)
ret <vscale x 16 x i32> %a
}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,881 @@
; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs \
; RUN: --riscv-no-aliases < %s | FileCheck %s
declare <vscale x 1 x i16> @llvm.riscv.vwaddu.w.nxv1i16.nxv1i8(
<vscale x 1 x i16>,
<vscale x 1 x i8>,
i32);
define <vscale x 1 x i16> @intrinsic_vwaddu.w_wv_nxv1i16_nxv1i16_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i8> %1, i32 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwaddu.w_wv_nxv1i16_nxv1i16_nxv1i8
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
; CHECK: vwaddu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
%a = call <vscale x 1 x i16> @llvm.riscv.vwaddu.w.nxv1i16.nxv1i8(
<vscale x 1 x i16> %0,
<vscale x 1 x i8> %1,
i32 %2)
ret <vscale x 1 x i16> %a
}
declare <vscale x 1 x i16> @llvm.riscv.vwaddu.w.mask.nxv1i16.nxv1i8(
<vscale x 1 x i16>,
<vscale x 1 x i16>,
<vscale x 1 x i8>,
<vscale x 1 x i1>,
i32);
define <vscale x 1 x i16> @intrinsic_vwaddu.w_mask_wv_nxv1i16_nxv1i16_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_nxv1i16_nxv1i16_nxv1i8
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
; CHECK: vwaddu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 1 x i16> @llvm.riscv.vwaddu.w.mask.nxv1i16.nxv1i8(
<vscale x 1 x i16> %0,
<vscale x 1 x i16> %1,
<vscale x 1 x i8> %2,
<vscale x 1 x i1> %3,
i32 %4)
ret <vscale x 1 x i16> %a
}
declare <vscale x 2 x i16> @llvm.riscv.vwaddu.w.nxv2i16.nxv2i8(
<vscale x 2 x i16>,
<vscale x 2 x i8>,
i32);
define <vscale x 2 x i16> @intrinsic_vwaddu.w_wv_nxv2i16_nxv2i16_nxv2i8(<vscale x 2 x i16> %0, <vscale x 2 x i8> %1, i32 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwaddu.w_wv_nxv2i16_nxv2i16_nxv2i8
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
; CHECK: vwaddu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
%a = call <vscale x 2 x i16> @llvm.riscv.vwaddu.w.nxv2i16.nxv2i8(
<vscale x 2 x i16> %0,
<vscale x 2 x i8> %1,
i32 %2)
ret <vscale x 2 x i16> %a
}
declare <vscale x 2 x i16> @llvm.riscv.vwaddu.w.mask.nxv2i16.nxv2i8(
<vscale x 2 x i16>,
<vscale x 2 x i16>,
<vscale x 2 x i8>,
<vscale x 2 x i1>,
i32);
define <vscale x 2 x i16> @intrinsic_vwaddu.w_mask_wv_nxv2i16_nxv2i16_nxv2i8(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_nxv2i16_nxv2i16_nxv2i8
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
; CHECK: vwaddu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 2 x i16> @llvm.riscv.vwaddu.w.mask.nxv2i16.nxv2i8(
<vscale x 2 x i16> %0,
<vscale x 2 x i16> %1,
<vscale x 2 x i8> %2,
<vscale x 2 x i1> %3,
i32 %4)
ret <vscale x 2 x i16> %a
}
declare <vscale x 4 x i16> @llvm.riscv.vwaddu.w.nxv4i16.nxv4i8(
<vscale x 4 x i16>,
<vscale x 4 x i8>,
i32);
define <vscale x 4 x i16> @intrinsic_vwaddu.w_wv_nxv4i16_nxv4i16_nxv4i8(<vscale x 4 x i16> %0, <vscale x 4 x i8> %1, i32 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwaddu.w_wv_nxv4i16_nxv4i16_nxv4i8
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
; CHECK: vwaddu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
%a = call <vscale x 4 x i16> @llvm.riscv.vwaddu.w.nxv4i16.nxv4i8(
<vscale x 4 x i16> %0,
<vscale x 4 x i8> %1,
i32 %2)
ret <vscale x 4 x i16> %a
}
declare <vscale x 4 x i16> @llvm.riscv.vwaddu.w.mask.nxv4i16.nxv4i8(
<vscale x 4 x i16>,
<vscale x 4 x i16>,
<vscale x 4 x i8>,
<vscale x 4 x i1>,
i32);
define <vscale x 4 x i16> @intrinsic_vwaddu.w_mask_wv_nxv4i16_nxv4i16_nxv4i8(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_nxv4i16_nxv4i16_nxv4i8
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
; CHECK: vwaddu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 4 x i16> @llvm.riscv.vwaddu.w.mask.nxv4i16.nxv4i8(
<vscale x 4 x i16> %0,
<vscale x 4 x i16> %1,
<vscale x 4 x i8> %2,
<vscale x 4 x i1> %3,
i32 %4)
ret <vscale x 4 x i16> %a
}
declare <vscale x 8 x i16> @llvm.riscv.vwaddu.w.nxv8i16.nxv8i8(
<vscale x 8 x i16>,
<vscale x 8 x i8>,
i32);
define <vscale x 8 x i16> @intrinsic_vwaddu.w_wv_nxv8i16_nxv8i16_nxv8i8(<vscale x 8 x i16> %0, <vscale x 8 x i8> %1, i32 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwaddu.w_wv_nxv8i16_nxv8i16_nxv8i8
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
; CHECK: vwaddu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
%a = call <vscale x 8 x i16> @llvm.riscv.vwaddu.w.nxv8i16.nxv8i8(
<vscale x 8 x i16> %0,
<vscale x 8 x i8> %1,
i32 %2)
ret <vscale x 8 x i16> %a
}
declare <vscale x 8 x i16> @llvm.riscv.vwaddu.w.mask.nxv8i16.nxv8i8(
<vscale x 8 x i16>,
<vscale x 8 x i16>,
<vscale x 8 x i8>,
<vscale x 8 x i1>,
i32);
define <vscale x 8 x i16> @intrinsic_vwaddu.w_mask_wv_nxv8i16_nxv8i16_nxv8i8(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_nxv8i16_nxv8i16_nxv8i8
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
; CHECK: vwaddu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 8 x i16> @llvm.riscv.vwaddu.w.mask.nxv8i16.nxv8i8(
<vscale x 8 x i16> %0,
<vscale x 8 x i16> %1,
<vscale x 8 x i8> %2,
<vscale x 8 x i1> %3,
i32 %4)
ret <vscale x 8 x i16> %a
}
declare <vscale x 16 x i16> @llvm.riscv.vwaddu.w.nxv16i16.nxv16i8(
<vscale x 16 x i16>,
<vscale x 16 x i8>,
i32);
define <vscale x 16 x i16> @intrinsic_vwaddu.w_wv_nxv16i16_nxv16i16_nxv16i8(<vscale x 16 x i16> %0, <vscale x 16 x i8> %1, i32 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwaddu.w_wv_nxv16i16_nxv16i16_nxv16i8
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
; CHECK: vwaddu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
%a = call <vscale x 16 x i16> @llvm.riscv.vwaddu.w.nxv16i16.nxv16i8(
<vscale x 16 x i16> %0,
<vscale x 16 x i8> %1,
i32 %2)
ret <vscale x 16 x i16> %a
}
declare <vscale x 16 x i16> @llvm.riscv.vwaddu.w.mask.nxv16i16.nxv16i8(
<vscale x 16 x i16>,
<vscale x 16 x i16>,
<vscale x 16 x i8>,
<vscale x 16 x i1>,
i32);
define <vscale x 16 x i16> @intrinsic_vwaddu.w_mask_wv_nxv16i16_nxv16i16_nxv16i8(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_nxv16i16_nxv16i16_nxv16i8
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
; CHECK: vwaddu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 16 x i16> @llvm.riscv.vwaddu.w.mask.nxv16i16.nxv16i8(
<vscale x 16 x i16> %0,
<vscale x 16 x i16> %1,
<vscale x 16 x i8> %2,
<vscale x 16 x i1> %3,
i32 %4)
ret <vscale x 16 x i16> %a
}
declare <vscale x 32 x i16> @llvm.riscv.vwaddu.w.nxv32i16.nxv32i8(
<vscale x 32 x i16>,
<vscale x 32 x i8>,
i32);
define <vscale x 32 x i16> @intrinsic_vwaddu.w_wv_nxv32i16_nxv32i16_nxv32i8(<vscale x 32 x i16> %0, <vscale x 32 x i8> %1, i32 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwaddu.w_wv_nxv32i16_nxv32i16_nxv32i8
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
; CHECK: vwaddu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
%a = call <vscale x 32 x i16> @llvm.riscv.vwaddu.w.nxv32i16.nxv32i8(
<vscale x 32 x i16> %0,
<vscale x 32 x i8> %1,
i32 %2)
ret <vscale x 32 x i16> %a
}
declare <vscale x 32 x i16> @llvm.riscv.vwaddu.w.mask.nxv32i16.nxv32i8(
<vscale x 32 x i16>,
<vscale x 32 x i16>,
<vscale x 32 x i8>,
<vscale x 32 x i1>,
i32);
define <vscale x 32 x i16> @intrinsic_vwaddu.w_mask_wv_nxv32i16_nxv32i16_nxv32i8(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_nxv32i16_nxv32i16_nxv32i8
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
; CHECK: vwaddu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 32 x i16> @llvm.riscv.vwaddu.w.mask.nxv32i16.nxv32i8(
<vscale x 32 x i16> %0,
<vscale x 32 x i16> %1,
<vscale x 32 x i8> %2,
<vscale x 32 x i1> %3,
i32 %4)
ret <vscale x 32 x i16> %a
}
declare <vscale x 1 x i32> @llvm.riscv.vwaddu.w.nxv1i32.nxv1i16(
<vscale x 1 x i32>,
<vscale x 1 x i16>,
i32);
define <vscale x 1 x i32> @intrinsic_vwaddu.w_wv_nxv1i32_nxv1i32_nxv1i16(<vscale x 1 x i32> %0, <vscale x 1 x i16> %1, i32 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwaddu.w_wv_nxv1i32_nxv1i32_nxv1i16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
; CHECK: vwaddu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
%a = call <vscale x 1 x i32> @llvm.riscv.vwaddu.w.nxv1i32.nxv1i16(
<vscale x 1 x i32> %0,
<vscale x 1 x i16> %1,
i32 %2)
ret <vscale x 1 x i32> %a
}
declare <vscale x 1 x i32> @llvm.riscv.vwaddu.w.mask.nxv1i32.nxv1i16(
<vscale x 1 x i32>,
<vscale x 1 x i32>,
<vscale x 1 x i16>,
<vscale x 1 x i1>,
i32);
define <vscale x 1 x i32> @intrinsic_vwaddu.w_mask_wv_nxv1i32_nxv1i32_nxv1i16(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_nxv1i32_nxv1i32_nxv1i16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
; CHECK: vwaddu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 1 x i32> @llvm.riscv.vwaddu.w.mask.nxv1i32.nxv1i16(
<vscale x 1 x i32> %0,
<vscale x 1 x i32> %1,
<vscale x 1 x i16> %2,
<vscale x 1 x i1> %3,
i32 %4)
ret <vscale x 1 x i32> %a
}
declare <vscale x 2 x i32> @llvm.riscv.vwaddu.w.nxv2i32.nxv2i16(
<vscale x 2 x i32>,
<vscale x 2 x i16>,
i32);
define <vscale x 2 x i32> @intrinsic_vwaddu.w_wv_nxv2i32_nxv2i32_nxv2i16(<vscale x 2 x i32> %0, <vscale x 2 x i16> %1, i32 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwaddu.w_wv_nxv2i32_nxv2i32_nxv2i16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
; CHECK: vwaddu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
%a = call <vscale x 2 x i32> @llvm.riscv.vwaddu.w.nxv2i32.nxv2i16(
<vscale x 2 x i32> %0,
<vscale x 2 x i16> %1,
i32 %2)
ret <vscale x 2 x i32> %a
}
declare <vscale x 2 x i32> @llvm.riscv.vwaddu.w.mask.nxv2i32.nxv2i16(
<vscale x 2 x i32>,
<vscale x 2 x i32>,
<vscale x 2 x i16>,
<vscale x 2 x i1>,
i32);
define <vscale x 2 x i32> @intrinsic_vwaddu.w_mask_wv_nxv2i32_nxv2i32_nxv2i16(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_nxv2i32_nxv2i32_nxv2i16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
; CHECK: vwaddu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 2 x i32> @llvm.riscv.vwaddu.w.mask.nxv2i32.nxv2i16(
<vscale x 2 x i32> %0,
<vscale x 2 x i32> %1,
<vscale x 2 x i16> %2,
<vscale x 2 x i1> %3,
i32 %4)
ret <vscale x 2 x i32> %a
}
declare <vscale x 4 x i32> @llvm.riscv.vwaddu.w.nxv4i32.nxv4i16(
<vscale x 4 x i32>,
<vscale x 4 x i16>,
i32);
define <vscale x 4 x i32> @intrinsic_vwaddu.w_wv_nxv4i32_nxv4i32_nxv4i16(<vscale x 4 x i32> %0, <vscale x 4 x i16> %1, i32 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwaddu.w_wv_nxv4i32_nxv4i32_nxv4i16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
; CHECK: vwaddu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
%a = call <vscale x 4 x i32> @llvm.riscv.vwaddu.w.nxv4i32.nxv4i16(
<vscale x 4 x i32> %0,
<vscale x 4 x i16> %1,
i32 %2)
ret <vscale x 4 x i32> %a
}
declare <vscale x 4 x i32> @llvm.riscv.vwaddu.w.mask.nxv4i32.nxv4i16(
<vscale x 4 x i32>,
<vscale x 4 x i32>,
<vscale x 4 x i16>,
<vscale x 4 x i1>,
i32);
define <vscale x 4 x i32> @intrinsic_vwaddu.w_mask_wv_nxv4i32_nxv4i32_nxv4i16(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_nxv4i32_nxv4i32_nxv4i16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
; CHECK: vwaddu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 4 x i32> @llvm.riscv.vwaddu.w.mask.nxv4i32.nxv4i16(
<vscale x 4 x i32> %0,
<vscale x 4 x i32> %1,
<vscale x 4 x i16> %2,
<vscale x 4 x i1> %3,
i32 %4)
ret <vscale x 4 x i32> %a
}
declare <vscale x 8 x i32> @llvm.riscv.vwaddu.w.nxv8i32.nxv8i16(
<vscale x 8 x i32>,
<vscale x 8 x i16>,
i32);
define <vscale x 8 x i32> @intrinsic_vwaddu.w_wv_nxv8i32_nxv8i32_nxv8i16(<vscale x 8 x i32> %0, <vscale x 8 x i16> %1, i32 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwaddu.w_wv_nxv8i32_nxv8i32_nxv8i16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
; CHECK: vwaddu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
%a = call <vscale x 8 x i32> @llvm.riscv.vwaddu.w.nxv8i32.nxv8i16(
<vscale x 8 x i32> %0,
<vscale x 8 x i16> %1,
i32 %2)
ret <vscale x 8 x i32> %a
}
declare <vscale x 8 x i32> @llvm.riscv.vwaddu.w.mask.nxv8i32.nxv8i16(
<vscale x 8 x i32>,
<vscale x 8 x i32>,
<vscale x 8 x i16>,
<vscale x 8 x i1>,
i32);
define <vscale x 8 x i32> @intrinsic_vwaddu.w_mask_wv_nxv8i32_nxv8i32_nxv8i16(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_nxv8i32_nxv8i32_nxv8i16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
; CHECK: vwaddu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 8 x i32> @llvm.riscv.vwaddu.w.mask.nxv8i32.nxv8i16(
<vscale x 8 x i32> %0,
<vscale x 8 x i32> %1,
<vscale x 8 x i16> %2,
<vscale x 8 x i1> %3,
i32 %4)
ret <vscale x 8 x i32> %a
}
declare <vscale x 16 x i32> @llvm.riscv.vwaddu.w.nxv16i32.nxv16i16(
<vscale x 16 x i32>,
<vscale x 16 x i16>,
i32);
define <vscale x 16 x i32> @intrinsic_vwaddu.w_wv_nxv16i32_nxv16i32_nxv16i16(<vscale x 16 x i32> %0, <vscale x 16 x i16> %1, i32 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwaddu.w_wv_nxv16i32_nxv16i32_nxv16i16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
; CHECK: vwaddu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
%a = call <vscale x 16 x i32> @llvm.riscv.vwaddu.w.nxv16i32.nxv16i16(
<vscale x 16 x i32> %0,
<vscale x 16 x i16> %1,
i32 %2)
ret <vscale x 16 x i32> %a
}
declare <vscale x 16 x i32> @llvm.riscv.vwaddu.w.mask.nxv16i32.nxv16i16(
<vscale x 16 x i32>,
<vscale x 16 x i32>,
<vscale x 16 x i16>,
<vscale x 16 x i1>,
i32);
define <vscale x 16 x i32> @intrinsic_vwaddu.w_mask_wv_nxv16i32_nxv16i32_nxv16i16(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwaddu.w_mask_wv_nxv16i32_nxv16i32_nxv16i16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
; CHECK: vwaddu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 16 x i32> @llvm.riscv.vwaddu.w.mask.nxv16i32.nxv16i16(
<vscale x 16 x i32> %0,
<vscale x 16 x i32> %1,
<vscale x 16 x i16> %2,
<vscale x 16 x i1> %3,
i32 %4)
ret <vscale x 16 x i32> %a
}
declare <vscale x 1 x i16> @llvm.riscv.vwaddu.w.nxv1i16.i8(
<vscale x 1 x i16>,
i8,
i32);
define <vscale x 1 x i16> @intrinsic_vwaddu.w_wx_nxv1i16_nxv1i16_i8(<vscale x 1 x i16> %0, i8 %1, i32 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwaddu.w_wx_nxv1i16_nxv1i16_i8
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
; CHECK: vwaddu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
%a = call <vscale x 1 x i16> @llvm.riscv.vwaddu.w.nxv1i16.i8(
<vscale x 1 x i16> %0,
i8 %1,
i32 %2)
ret <vscale x 1 x i16> %a
}
declare <vscale x 1 x i16> @llvm.riscv.vwaddu.w.mask.nxv1i16.i8(
<vscale x 1 x i16>,
<vscale x 1 x i16>,
i8,
<vscale x 1 x i1>,
i32);
define <vscale x 1 x i16> @intrinsic_vwaddu.w_mask_wx_nxv1i16_nxv1i16_i8(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i8 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwaddu.w_mask_wx_nxv1i16_nxv1i16_i8
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
; CHECK: vwaddu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
%a = call <vscale x 1 x i16> @llvm.riscv.vwaddu.w.mask.nxv1i16.i8(
<vscale x 1 x i16> %0,
<vscale x 1 x i16> %1,
i8 %2,
<vscale x 1 x i1> %3,
i32 %4)
ret <vscale x 1 x i16> %a
}
declare <vscale x 2 x i16> @llvm.riscv.vwaddu.w.nxv2i16.i8(
<vscale x 2 x i16>,
i8,
i32);
define <vscale x 2 x i16> @intrinsic_vwaddu.w_wx_nxv2i16_nxv2i16_i8(<vscale x 2 x i16> %0, i8 %1, i32 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwaddu.w_wx_nxv2i16_nxv2i16_i8
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
; CHECK: vwaddu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
%a = call <vscale x 2 x i16> @llvm.riscv.vwaddu.w.nxv2i16.i8(
<vscale x 2 x i16> %0,
i8 %1,
i32 %2)
ret <vscale x 2 x i16> %a
}
declare <vscale x 2 x i16> @llvm.riscv.vwaddu.w.mask.nxv2i16.i8(
<vscale x 2 x i16>,
<vscale x 2 x i16>,
i8,
<vscale x 2 x i1>,
i32);
define <vscale x 2 x i16> @intrinsic_vwaddu.w_mask_wx_nxv2i16_nxv2i16_i8(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i8 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwaddu.w_mask_wx_nxv2i16_nxv2i16_i8
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
; CHECK: vwaddu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
%a = call <vscale x 2 x i16> @llvm.riscv.vwaddu.w.mask.nxv2i16.i8(
<vscale x 2 x i16> %0,
<vscale x 2 x i16> %1,
i8 %2,
<vscale x 2 x i1> %3,
i32 %4)
ret <vscale x 2 x i16> %a
}
declare <vscale x 4 x i16> @llvm.riscv.vwaddu.w.nxv4i16.i8(
<vscale x 4 x i16>,
i8,
i32);
define <vscale x 4 x i16> @intrinsic_vwaddu.w_wx_nxv4i16_nxv4i16_i8(<vscale x 4 x i16> %0, i8 %1, i32 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwaddu.w_wx_nxv4i16_nxv4i16_i8
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
; CHECK: vwaddu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
%a = call <vscale x 4 x i16> @llvm.riscv.vwaddu.w.nxv4i16.i8(
<vscale x 4 x i16> %0,
i8 %1,
i32 %2)
ret <vscale x 4 x i16> %a
}
declare <vscale x 4 x i16> @llvm.riscv.vwaddu.w.mask.nxv4i16.i8(
<vscale x 4 x i16>,
<vscale x 4 x i16>,
i8,
<vscale x 4 x i1>,
i32);
define <vscale x 4 x i16> @intrinsic_vwaddu.w_mask_wx_nxv4i16_nxv4i16_i8(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i8 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwaddu.w_mask_wx_nxv4i16_nxv4i16_i8
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
; CHECK: vwaddu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
%a = call <vscale x 4 x i16> @llvm.riscv.vwaddu.w.mask.nxv4i16.i8(
<vscale x 4 x i16> %0,
<vscale x 4 x i16> %1,
i8 %2,
<vscale x 4 x i1> %3,
i32 %4)
ret <vscale x 4 x i16> %a
}
declare <vscale x 8 x i16> @llvm.riscv.vwaddu.w.nxv8i16.i8(
<vscale x 8 x i16>,
i8,
i32);
define <vscale x 8 x i16> @intrinsic_vwaddu.w_wx_nxv8i16_nxv8i16_i8(<vscale x 8 x i16> %0, i8 %1, i32 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwaddu.w_wx_nxv8i16_nxv8i16_i8
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
; CHECK: vwaddu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
%a = call <vscale x 8 x i16> @llvm.riscv.vwaddu.w.nxv8i16.i8(
<vscale x 8 x i16> %0,
i8 %1,
i32 %2)
ret <vscale x 8 x i16> %a
}
declare <vscale x 8 x i16> @llvm.riscv.vwaddu.w.mask.nxv8i16.i8(
<vscale x 8 x i16>,
<vscale x 8 x i16>,
i8,
<vscale x 8 x i1>,
i32);
define <vscale x 8 x i16> @intrinsic_vwaddu.w_mask_wx_nxv8i16_nxv8i16_i8(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i8 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwaddu.w_mask_wx_nxv8i16_nxv8i16_i8
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
; CHECK: vwaddu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
%a = call <vscale x 8 x i16> @llvm.riscv.vwaddu.w.mask.nxv8i16.i8(
<vscale x 8 x i16> %0,
<vscale x 8 x i16> %1,
i8 %2,
<vscale x 8 x i1> %3,
i32 %4)
ret <vscale x 8 x i16> %a
}
declare <vscale x 16 x i16> @llvm.riscv.vwaddu.w.nxv16i16.i8(
<vscale x 16 x i16>,
i8,
i32);
define <vscale x 16 x i16> @intrinsic_vwaddu.w_wx_nxv16i16_nxv16i16_i8(<vscale x 16 x i16> %0, i8 %1, i32 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwaddu.w_wx_nxv16i16_nxv16i16_i8
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
; CHECK: vwaddu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
%a = call <vscale x 16 x i16> @llvm.riscv.vwaddu.w.nxv16i16.i8(
<vscale x 16 x i16> %0,
i8 %1,
i32 %2)
ret <vscale x 16 x i16> %a
}
declare <vscale x 16 x i16> @llvm.riscv.vwaddu.w.mask.nxv16i16.i8(
<vscale x 16 x i16>,
<vscale x 16 x i16>,
i8,
<vscale x 16 x i1>,
i32);
define <vscale x 16 x i16> @intrinsic_vwaddu.w_mask_wx_nxv16i16_nxv16i16_i8(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i8 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwaddu.w_mask_wx_nxv16i16_nxv16i16_i8
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
; CHECK: vwaddu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
%a = call <vscale x 16 x i16> @llvm.riscv.vwaddu.w.mask.nxv16i16.i8(
<vscale x 16 x i16> %0,
<vscale x 16 x i16> %1,
i8 %2,
<vscale x 16 x i1> %3,
i32 %4)
ret <vscale x 16 x i16> %a
}
declare <vscale x 32 x i16> @llvm.riscv.vwaddu.w.nxv32i16.i8(
<vscale x 32 x i16>,
i8,
i32);
define <vscale x 32 x i16> @intrinsic_vwaddu.w_wx_nxv32i16_nxv32i16_i8(<vscale x 32 x i16> %0, i8 %1, i32 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwaddu.w_wx_nxv32i16_nxv32i16_i8
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
; CHECK: vwaddu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
%a = call <vscale x 32 x i16> @llvm.riscv.vwaddu.w.nxv32i16.i8(
<vscale x 32 x i16> %0,
i8 %1,
i32 %2)
ret <vscale x 32 x i16> %a
}
declare <vscale x 32 x i16> @llvm.riscv.vwaddu.w.mask.nxv32i16.i8(
<vscale x 32 x i16>,
<vscale x 32 x i16>,
i8,
<vscale x 32 x i1>,
i32);
define <vscale x 32 x i16> @intrinsic_vwaddu.w_mask_wx_nxv32i16_nxv32i16_i8(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i8 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwaddu.w_mask_wx_nxv32i16_nxv32i16_i8
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
; CHECK: vwaddu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
%a = call <vscale x 32 x i16> @llvm.riscv.vwaddu.w.mask.nxv32i16.i8(
<vscale x 32 x i16> %0,
<vscale x 32 x i16> %1,
i8 %2,
<vscale x 32 x i1> %3,
i32 %4)
ret <vscale x 32 x i16> %a
}
declare <vscale x 1 x i32> @llvm.riscv.vwaddu.w.nxv1i32.i16(
<vscale x 1 x i32>,
i16,
i32);
define <vscale x 1 x i32> @intrinsic_vwaddu.w_wx_nxv1i32_nxv1i32_i16(<vscale x 1 x i32> %0, i16 %1, i32 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwaddu.w_wx_nxv1i32_nxv1i32_i16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
; CHECK: vwaddu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
%a = call <vscale x 1 x i32> @llvm.riscv.vwaddu.w.nxv1i32.i16(
<vscale x 1 x i32> %0,
i16 %1,
i32 %2)
ret <vscale x 1 x i32> %a
}
declare <vscale x 1 x i32> @llvm.riscv.vwaddu.w.mask.nxv1i32.i16(
<vscale x 1 x i32>,
<vscale x 1 x i32>,
i16,
<vscale x 1 x i1>,
i32);
define <vscale x 1 x i32> @intrinsic_vwaddu.w_mask_wx_nxv1i32_nxv1i32_i16(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i16 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwaddu.w_mask_wx_nxv1i32_nxv1i32_i16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
; CHECK: vwaddu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
%a = call <vscale x 1 x i32> @llvm.riscv.vwaddu.w.mask.nxv1i32.i16(
<vscale x 1 x i32> %0,
<vscale x 1 x i32> %1,
i16 %2,
<vscale x 1 x i1> %3,
i32 %4)
ret <vscale x 1 x i32> %a
}
declare <vscale x 2 x i32> @llvm.riscv.vwaddu.w.nxv2i32.i16(
<vscale x 2 x i32>,
i16,
i32);
define <vscale x 2 x i32> @intrinsic_vwaddu.w_wx_nxv2i32_nxv2i32_i16(<vscale x 2 x i32> %0, i16 %1, i32 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwaddu.w_wx_nxv2i32_nxv2i32_i16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
; CHECK: vwaddu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
%a = call <vscale x 2 x i32> @llvm.riscv.vwaddu.w.nxv2i32.i16(
<vscale x 2 x i32> %0,
i16 %1,
i32 %2)
ret <vscale x 2 x i32> %a
}
declare <vscale x 2 x i32> @llvm.riscv.vwaddu.w.mask.nxv2i32.i16(
<vscale x 2 x i32>,
<vscale x 2 x i32>,
i16,
<vscale x 2 x i1>,
i32);
define <vscale x 2 x i32> @intrinsic_vwaddu.w_mask_wx_nxv2i32_nxv2i32_i16(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i16 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwaddu.w_mask_wx_nxv2i32_nxv2i32_i16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
; CHECK: vwaddu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
%a = call <vscale x 2 x i32> @llvm.riscv.vwaddu.w.mask.nxv2i32.i16(
<vscale x 2 x i32> %0,
<vscale x 2 x i32> %1,
i16 %2,
<vscale x 2 x i1> %3,
i32 %4)
ret <vscale x 2 x i32> %a
}
declare <vscale x 4 x i32> @llvm.riscv.vwaddu.w.nxv4i32.i16(
<vscale x 4 x i32>,
i16,
i32);
define <vscale x 4 x i32> @intrinsic_vwaddu.w_wx_nxv4i32_nxv4i32_i16(<vscale x 4 x i32> %0, i16 %1, i32 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwaddu.w_wx_nxv4i32_nxv4i32_i16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
; CHECK: vwaddu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
%a = call <vscale x 4 x i32> @llvm.riscv.vwaddu.w.nxv4i32.i16(
<vscale x 4 x i32> %0,
i16 %1,
i32 %2)
ret <vscale x 4 x i32> %a
}
declare <vscale x 4 x i32> @llvm.riscv.vwaddu.w.mask.nxv4i32.i16(
<vscale x 4 x i32>,
<vscale x 4 x i32>,
i16,
<vscale x 4 x i1>,
i32);
define <vscale x 4 x i32> @intrinsic_vwaddu.w_mask_wx_nxv4i32_nxv4i32_i16(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i16 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwaddu.w_mask_wx_nxv4i32_nxv4i32_i16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
; CHECK: vwaddu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
%a = call <vscale x 4 x i32> @llvm.riscv.vwaddu.w.mask.nxv4i32.i16(
<vscale x 4 x i32> %0,
<vscale x 4 x i32> %1,
i16 %2,
<vscale x 4 x i1> %3,
i32 %4)
ret <vscale x 4 x i32> %a
}
declare <vscale x 8 x i32> @llvm.riscv.vwaddu.w.nxv8i32.i16(
<vscale x 8 x i32>,
i16,
i32);
define <vscale x 8 x i32> @intrinsic_vwaddu.w_wx_nxv8i32_nxv8i32_i16(<vscale x 8 x i32> %0, i16 %1, i32 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwaddu.w_wx_nxv8i32_nxv8i32_i16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
; CHECK: vwaddu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
%a = call <vscale x 8 x i32> @llvm.riscv.vwaddu.w.nxv8i32.i16(
<vscale x 8 x i32> %0,
i16 %1,
i32 %2)
ret <vscale x 8 x i32> %a
}
declare <vscale x 8 x i32> @llvm.riscv.vwaddu.w.mask.nxv8i32.i16(
<vscale x 8 x i32>,
<vscale x 8 x i32>,
i16,
<vscale x 8 x i1>,
i32);
define <vscale x 8 x i32> @intrinsic_vwaddu.w_mask_wx_nxv8i32_nxv8i32_i16(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i16 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwaddu.w_mask_wx_nxv8i32_nxv8i32_i16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
; CHECK: vwaddu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
%a = call <vscale x 8 x i32> @llvm.riscv.vwaddu.w.mask.nxv8i32.i16(
<vscale x 8 x i32> %0,
<vscale x 8 x i32> %1,
i16 %2,
<vscale x 8 x i1> %3,
i32 %4)
ret <vscale x 8 x i32> %a
}
declare <vscale x 16 x i32> @llvm.riscv.vwaddu.w.nxv16i32.i16(
<vscale x 16 x i32>,
i16,
i32);
define <vscale x 16 x i32> @intrinsic_vwaddu.w_wx_nxv16i32_nxv16i32_i16(<vscale x 16 x i32> %0, i16 %1, i32 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwaddu.w_wx_nxv16i32_nxv16i32_i16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
; CHECK: vwaddu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
%a = call <vscale x 16 x i32> @llvm.riscv.vwaddu.w.nxv16i32.i16(
<vscale x 16 x i32> %0,
i16 %1,
i32 %2)
ret <vscale x 16 x i32> %a
}
declare <vscale x 16 x i32> @llvm.riscv.vwaddu.w.mask.nxv16i32.i16(
<vscale x 16 x i32>,
<vscale x 16 x i32>,
i16,
<vscale x 16 x i1>,
i32);
define <vscale x 16 x i32> @intrinsic_vwaddu.w_mask_wx_nxv16i32_nxv16i32_i16(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i16 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwaddu.w_mask_wx_nxv16i32_nxv16i32_i16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
; CHECK: vwaddu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
%a = call <vscale x 16 x i32> @llvm.riscv.vwaddu.w.mask.nxv16i32.i16(
<vscale x 16 x i32> %0,
<vscale x 16 x i32> %1,
i16 %2,
<vscale x 16 x i1> %3,
i32 %4)
ret <vscale x 16 x i32> %a
}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,881 @@
; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs \
; RUN: --riscv-no-aliases < %s | FileCheck %s
declare <vscale x 1 x i16> @llvm.riscv.vwsub.nxv1i16.nxv1i8.nxv1i8(
<vscale x 1 x i8>,
<vscale x 1 x i8>,
i32);
define <vscale x 1 x i16> @intrinsic_vwsub_vv_nxv1i16_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i32 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwsub_vv_nxv1i16_nxv1i8_nxv1i8
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
; CHECK: vwsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
%a = call <vscale x 1 x i16> @llvm.riscv.vwsub.nxv1i16.nxv1i8.nxv1i8(
<vscale x 1 x i8> %0,
<vscale x 1 x i8> %1,
i32 %2)
ret <vscale x 1 x i16> %a
}
declare <vscale x 1 x i16> @llvm.riscv.vwsub.mask.nxv1i16.nxv1i8.nxv1i8(
<vscale x 1 x i16>,
<vscale x 1 x i8>,
<vscale x 1 x i8>,
<vscale x 1 x i1>,
i32);
define <vscale x 1 x i16> @intrinsic_vwsub_mask_vv_nxv1i16_nxv1i8_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwsub_mask_vv_nxv1i16_nxv1i8_nxv1i8
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
; CHECK: vwsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 1 x i16> @llvm.riscv.vwsub.mask.nxv1i16.nxv1i8.nxv1i8(
<vscale x 1 x i16> %0,
<vscale x 1 x i8> %1,
<vscale x 1 x i8> %2,
<vscale x 1 x i1> %3,
i32 %4)
ret <vscale x 1 x i16> %a
}
declare <vscale x 2 x i16> @llvm.riscv.vwsub.nxv2i16.nxv2i8.nxv2i8(
<vscale x 2 x i8>,
<vscale x 2 x i8>,
i32);
define <vscale x 2 x i16> @intrinsic_vwsub_vv_nxv2i16_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i32 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwsub_vv_nxv2i16_nxv2i8_nxv2i8
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
; CHECK: vwsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
%a = call <vscale x 2 x i16> @llvm.riscv.vwsub.nxv2i16.nxv2i8.nxv2i8(
<vscale x 2 x i8> %0,
<vscale x 2 x i8> %1,
i32 %2)
ret <vscale x 2 x i16> %a
}
declare <vscale x 2 x i16> @llvm.riscv.vwsub.mask.nxv2i16.nxv2i8.nxv2i8(
<vscale x 2 x i16>,
<vscale x 2 x i8>,
<vscale x 2 x i8>,
<vscale x 2 x i1>,
i32);
define <vscale x 2 x i16> @intrinsic_vwsub_mask_vv_nxv2i16_nxv2i8_nxv2i8(<vscale x 2 x i16> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwsub_mask_vv_nxv2i16_nxv2i8_nxv2i8
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
; CHECK: vwsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 2 x i16> @llvm.riscv.vwsub.mask.nxv2i16.nxv2i8.nxv2i8(
<vscale x 2 x i16> %0,
<vscale x 2 x i8> %1,
<vscale x 2 x i8> %2,
<vscale x 2 x i1> %3,
i32 %4)
ret <vscale x 2 x i16> %a
}
declare <vscale x 4 x i16> @llvm.riscv.vwsub.nxv4i16.nxv4i8.nxv4i8(
<vscale x 4 x i8>,
<vscale x 4 x i8>,
i32);
define <vscale x 4 x i16> @intrinsic_vwsub_vv_nxv4i16_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i32 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwsub_vv_nxv4i16_nxv4i8_nxv4i8
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
; CHECK: vwsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
%a = call <vscale x 4 x i16> @llvm.riscv.vwsub.nxv4i16.nxv4i8.nxv4i8(
<vscale x 4 x i8> %0,
<vscale x 4 x i8> %1,
i32 %2)
ret <vscale x 4 x i16> %a
}
declare <vscale x 4 x i16> @llvm.riscv.vwsub.mask.nxv4i16.nxv4i8.nxv4i8(
<vscale x 4 x i16>,
<vscale x 4 x i8>,
<vscale x 4 x i8>,
<vscale x 4 x i1>,
i32);
define <vscale x 4 x i16> @intrinsic_vwsub_mask_vv_nxv4i16_nxv4i8_nxv4i8(<vscale x 4 x i16> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwsub_mask_vv_nxv4i16_nxv4i8_nxv4i8
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
; CHECK: vwsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 4 x i16> @llvm.riscv.vwsub.mask.nxv4i16.nxv4i8.nxv4i8(
<vscale x 4 x i16> %0,
<vscale x 4 x i8> %1,
<vscale x 4 x i8> %2,
<vscale x 4 x i1> %3,
i32 %4)
ret <vscale x 4 x i16> %a
}
declare <vscale x 8 x i16> @llvm.riscv.vwsub.nxv8i16.nxv8i8.nxv8i8(
<vscale x 8 x i8>,
<vscale x 8 x i8>,
i32);
define <vscale x 8 x i16> @intrinsic_vwsub_vv_nxv8i16_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i32 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwsub_vv_nxv8i16_nxv8i8_nxv8i8
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
; CHECK: vwsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
%a = call <vscale x 8 x i16> @llvm.riscv.vwsub.nxv8i16.nxv8i8.nxv8i8(
<vscale x 8 x i8> %0,
<vscale x 8 x i8> %1,
i32 %2)
ret <vscale x 8 x i16> %a
}
declare <vscale x 8 x i16> @llvm.riscv.vwsub.mask.nxv8i16.nxv8i8.nxv8i8(
<vscale x 8 x i16>,
<vscale x 8 x i8>,
<vscale x 8 x i8>,
<vscale x 8 x i1>,
i32);
define <vscale x 8 x i16> @intrinsic_vwsub_mask_vv_nxv8i16_nxv8i8_nxv8i8(<vscale x 8 x i16> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwsub_mask_vv_nxv8i16_nxv8i8_nxv8i8
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
; CHECK: vwsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 8 x i16> @llvm.riscv.vwsub.mask.nxv8i16.nxv8i8.nxv8i8(
<vscale x 8 x i16> %0,
<vscale x 8 x i8> %1,
<vscale x 8 x i8> %2,
<vscale x 8 x i1> %3,
i32 %4)
ret <vscale x 8 x i16> %a
}
declare <vscale x 16 x i16> @llvm.riscv.vwsub.nxv16i16.nxv16i8.nxv16i8(
<vscale x 16 x i8>,
<vscale x 16 x i8>,
i32);
define <vscale x 16 x i16> @intrinsic_vwsub_vv_nxv16i16_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i32 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwsub_vv_nxv16i16_nxv16i8_nxv16i8
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
; CHECK: vwsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
%a = call <vscale x 16 x i16> @llvm.riscv.vwsub.nxv16i16.nxv16i8.nxv16i8(
<vscale x 16 x i8> %0,
<vscale x 16 x i8> %1,
i32 %2)
ret <vscale x 16 x i16> %a
}
declare <vscale x 16 x i16> @llvm.riscv.vwsub.mask.nxv16i16.nxv16i8.nxv16i8(
<vscale x 16 x i16>,
<vscale x 16 x i8>,
<vscale x 16 x i8>,
<vscale x 16 x i1>,
i32);
define <vscale x 16 x i16> @intrinsic_vwsub_mask_vv_nxv16i16_nxv16i8_nxv16i8(<vscale x 16 x i16> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwsub_mask_vv_nxv16i16_nxv16i8_nxv16i8
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
; CHECK: vwsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 16 x i16> @llvm.riscv.vwsub.mask.nxv16i16.nxv16i8.nxv16i8(
<vscale x 16 x i16> %0,
<vscale x 16 x i8> %1,
<vscale x 16 x i8> %2,
<vscale x 16 x i1> %3,
i32 %4)
ret <vscale x 16 x i16> %a
}
declare <vscale x 32 x i16> @llvm.riscv.vwsub.nxv32i16.nxv32i8.nxv32i8(
<vscale x 32 x i8>,
<vscale x 32 x i8>,
i32);
define <vscale x 32 x i16> @intrinsic_vwsub_vv_nxv32i16_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i32 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwsub_vv_nxv32i16_nxv32i8_nxv32i8
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
; CHECK: vwsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
%a = call <vscale x 32 x i16> @llvm.riscv.vwsub.nxv32i16.nxv32i8.nxv32i8(
<vscale x 32 x i8> %0,
<vscale x 32 x i8> %1,
i32 %2)
ret <vscale x 32 x i16> %a
}
declare <vscale x 32 x i16> @llvm.riscv.vwsub.mask.nxv32i16.nxv32i8.nxv32i8(
<vscale x 32 x i16>,
<vscale x 32 x i8>,
<vscale x 32 x i8>,
<vscale x 32 x i1>,
i32);
define <vscale x 32 x i16> @intrinsic_vwsub_mask_vv_nxv32i16_nxv32i8_nxv32i8(<vscale x 32 x i16> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwsub_mask_vv_nxv32i16_nxv32i8_nxv32i8
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
; CHECK: vwsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 32 x i16> @llvm.riscv.vwsub.mask.nxv32i16.nxv32i8.nxv32i8(
<vscale x 32 x i16> %0,
<vscale x 32 x i8> %1,
<vscale x 32 x i8> %2,
<vscale x 32 x i1> %3,
i32 %4)
ret <vscale x 32 x i16> %a
}
declare <vscale x 1 x i32> @llvm.riscv.vwsub.nxv1i32.nxv1i16.nxv1i16(
<vscale x 1 x i16>,
<vscale x 1 x i16>,
i32);
define <vscale x 1 x i32> @intrinsic_vwsub_vv_nxv1i32_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i32 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwsub_vv_nxv1i32_nxv1i16_nxv1i16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
; CHECK: vwsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
%a = call <vscale x 1 x i32> @llvm.riscv.vwsub.nxv1i32.nxv1i16.nxv1i16(
<vscale x 1 x i16> %0,
<vscale x 1 x i16> %1,
i32 %2)
ret <vscale x 1 x i32> %a
}
declare <vscale x 1 x i32> @llvm.riscv.vwsub.mask.nxv1i32.nxv1i16.nxv1i16(
<vscale x 1 x i32>,
<vscale x 1 x i16>,
<vscale x 1 x i16>,
<vscale x 1 x i1>,
i32);
define <vscale x 1 x i32> @intrinsic_vwsub_mask_vv_nxv1i32_nxv1i16_nxv1i16(<vscale x 1 x i32> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwsub_mask_vv_nxv1i32_nxv1i16_nxv1i16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
; CHECK: vwsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 1 x i32> @llvm.riscv.vwsub.mask.nxv1i32.nxv1i16.nxv1i16(
<vscale x 1 x i32> %0,
<vscale x 1 x i16> %1,
<vscale x 1 x i16> %2,
<vscale x 1 x i1> %3,
i32 %4)
ret <vscale x 1 x i32> %a
}
declare <vscale x 2 x i32> @llvm.riscv.vwsub.nxv2i32.nxv2i16.nxv2i16(
<vscale x 2 x i16>,
<vscale x 2 x i16>,
i32);
define <vscale x 2 x i32> @intrinsic_vwsub_vv_nxv2i32_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i32 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwsub_vv_nxv2i32_nxv2i16_nxv2i16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
; CHECK: vwsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
%a = call <vscale x 2 x i32> @llvm.riscv.vwsub.nxv2i32.nxv2i16.nxv2i16(
<vscale x 2 x i16> %0,
<vscale x 2 x i16> %1,
i32 %2)
ret <vscale x 2 x i32> %a
}
declare <vscale x 2 x i32> @llvm.riscv.vwsub.mask.nxv2i32.nxv2i16.nxv2i16(
<vscale x 2 x i32>,
<vscale x 2 x i16>,
<vscale x 2 x i16>,
<vscale x 2 x i1>,
i32);
define <vscale x 2 x i32> @intrinsic_vwsub_mask_vv_nxv2i32_nxv2i16_nxv2i16(<vscale x 2 x i32> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwsub_mask_vv_nxv2i32_nxv2i16_nxv2i16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
; CHECK: vwsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 2 x i32> @llvm.riscv.vwsub.mask.nxv2i32.nxv2i16.nxv2i16(
<vscale x 2 x i32> %0,
<vscale x 2 x i16> %1,
<vscale x 2 x i16> %2,
<vscale x 2 x i1> %3,
i32 %4)
ret <vscale x 2 x i32> %a
}
declare <vscale x 4 x i32> @llvm.riscv.vwsub.nxv4i32.nxv4i16.nxv4i16(
<vscale x 4 x i16>,
<vscale x 4 x i16>,
i32);
define <vscale x 4 x i32> @intrinsic_vwsub_vv_nxv4i32_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i32 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwsub_vv_nxv4i32_nxv4i16_nxv4i16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
; CHECK: vwsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
%a = call <vscale x 4 x i32> @llvm.riscv.vwsub.nxv4i32.nxv4i16.nxv4i16(
<vscale x 4 x i16> %0,
<vscale x 4 x i16> %1,
i32 %2)
ret <vscale x 4 x i32> %a
}
declare <vscale x 4 x i32> @llvm.riscv.vwsub.mask.nxv4i32.nxv4i16.nxv4i16(
<vscale x 4 x i32>,
<vscale x 4 x i16>,
<vscale x 4 x i16>,
<vscale x 4 x i1>,
i32);
define <vscale x 4 x i32> @intrinsic_vwsub_mask_vv_nxv4i32_nxv4i16_nxv4i16(<vscale x 4 x i32> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwsub_mask_vv_nxv4i32_nxv4i16_nxv4i16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
; CHECK: vwsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 4 x i32> @llvm.riscv.vwsub.mask.nxv4i32.nxv4i16.nxv4i16(
<vscale x 4 x i32> %0,
<vscale x 4 x i16> %1,
<vscale x 4 x i16> %2,
<vscale x 4 x i1> %3,
i32 %4)
ret <vscale x 4 x i32> %a
}
declare <vscale x 8 x i32> @llvm.riscv.vwsub.nxv8i32.nxv8i16.nxv8i16(
<vscale x 8 x i16>,
<vscale x 8 x i16>,
i32);
define <vscale x 8 x i32> @intrinsic_vwsub_vv_nxv8i32_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i32 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwsub_vv_nxv8i32_nxv8i16_nxv8i16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
; CHECK: vwsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
%a = call <vscale x 8 x i32> @llvm.riscv.vwsub.nxv8i32.nxv8i16.nxv8i16(
<vscale x 8 x i16> %0,
<vscale x 8 x i16> %1,
i32 %2)
ret <vscale x 8 x i32> %a
}
declare <vscale x 8 x i32> @llvm.riscv.vwsub.mask.nxv8i32.nxv8i16.nxv8i16(
<vscale x 8 x i32>,
<vscale x 8 x i16>,
<vscale x 8 x i16>,
<vscale x 8 x i1>,
i32);
define <vscale x 8 x i32> @intrinsic_vwsub_mask_vv_nxv8i32_nxv8i16_nxv8i16(<vscale x 8 x i32> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwsub_mask_vv_nxv8i32_nxv8i16_nxv8i16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
; CHECK: vwsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 8 x i32> @llvm.riscv.vwsub.mask.nxv8i32.nxv8i16.nxv8i16(
<vscale x 8 x i32> %0,
<vscale x 8 x i16> %1,
<vscale x 8 x i16> %2,
<vscale x 8 x i1> %3,
i32 %4)
ret <vscale x 8 x i32> %a
}
declare <vscale x 16 x i32> @llvm.riscv.vwsub.nxv16i32.nxv16i16.nxv16i16(
<vscale x 16 x i16>,
<vscale x 16 x i16>,
i32);
define <vscale x 16 x i32> @intrinsic_vwsub_vv_nxv16i32_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i32 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwsub_vv_nxv16i32_nxv16i16_nxv16i16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
; CHECK: vwsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
%a = call <vscale x 16 x i32> @llvm.riscv.vwsub.nxv16i32.nxv16i16.nxv16i16(
<vscale x 16 x i16> %0,
<vscale x 16 x i16> %1,
i32 %2)
ret <vscale x 16 x i32> %a
}
declare <vscale x 16 x i32> @llvm.riscv.vwsub.mask.nxv16i32.nxv16i16.nxv16i16(
<vscale x 16 x i32>,
<vscale x 16 x i16>,
<vscale x 16 x i16>,
<vscale x 16 x i1>,
i32);
define <vscale x 16 x i32> @intrinsic_vwsub_mask_vv_nxv16i32_nxv16i16_nxv16i16(<vscale x 16 x i32> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwsub_mask_vv_nxv16i32_nxv16i16_nxv16i16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
; CHECK: vwsub.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 16 x i32> @llvm.riscv.vwsub.mask.nxv16i32.nxv16i16.nxv16i16(
<vscale x 16 x i32> %0,
<vscale x 16 x i16> %1,
<vscale x 16 x i16> %2,
<vscale x 16 x i1> %3,
i32 %4)
ret <vscale x 16 x i32> %a
}
declare <vscale x 1 x i16> @llvm.riscv.vwsub.nxv1i16.nxv1i8.i8(
<vscale x 1 x i8>,
i8,
i32);
define <vscale x 1 x i16> @intrinsic_vwsub_vx_nxv1i16_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, i32 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwsub_vx_nxv1i16_nxv1i8_i8
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
; CHECK: vwsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
%a = call <vscale x 1 x i16> @llvm.riscv.vwsub.nxv1i16.nxv1i8.i8(
<vscale x 1 x i8> %0,
i8 %1,
i32 %2)
ret <vscale x 1 x i16> %a
}
declare <vscale x 1 x i16> @llvm.riscv.vwsub.mask.nxv1i16.nxv1i8.i8(
<vscale x 1 x i16>,
<vscale x 1 x i8>,
i8,
<vscale x 1 x i1>,
i32);
define <vscale x 1 x i16> @intrinsic_vwsub_mask_vx_nxv1i16_nxv1i8_i8(<vscale x 1 x i16> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwsub_mask_vx_nxv1i16_nxv1i8_i8
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
; CHECK: vwsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
%a = call <vscale x 1 x i16> @llvm.riscv.vwsub.mask.nxv1i16.nxv1i8.i8(
<vscale x 1 x i16> %0,
<vscale x 1 x i8> %1,
i8 %2,
<vscale x 1 x i1> %3,
i32 %4)
ret <vscale x 1 x i16> %a
}
declare <vscale x 2 x i16> @llvm.riscv.vwsub.nxv2i16.nxv2i8.i8(
<vscale x 2 x i8>,
i8,
i32);
define <vscale x 2 x i16> @intrinsic_vwsub_vx_nxv2i16_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, i32 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwsub_vx_nxv2i16_nxv2i8_i8
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
; CHECK: vwsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
%a = call <vscale x 2 x i16> @llvm.riscv.vwsub.nxv2i16.nxv2i8.i8(
<vscale x 2 x i8> %0,
i8 %1,
i32 %2)
ret <vscale x 2 x i16> %a
}
declare <vscale x 2 x i16> @llvm.riscv.vwsub.mask.nxv2i16.nxv2i8.i8(
<vscale x 2 x i16>,
<vscale x 2 x i8>,
i8,
<vscale x 2 x i1>,
i32);
define <vscale x 2 x i16> @intrinsic_vwsub_mask_vx_nxv2i16_nxv2i8_i8(<vscale x 2 x i16> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwsub_mask_vx_nxv2i16_nxv2i8_i8
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
; CHECK: vwsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
%a = call <vscale x 2 x i16> @llvm.riscv.vwsub.mask.nxv2i16.nxv2i8.i8(
<vscale x 2 x i16> %0,
<vscale x 2 x i8> %1,
i8 %2,
<vscale x 2 x i1> %3,
i32 %4)
ret <vscale x 2 x i16> %a
}
declare <vscale x 4 x i16> @llvm.riscv.vwsub.nxv4i16.nxv4i8.i8(
<vscale x 4 x i8>,
i8,
i32);
define <vscale x 4 x i16> @intrinsic_vwsub_vx_nxv4i16_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, i32 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwsub_vx_nxv4i16_nxv4i8_i8
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
; CHECK: vwsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
%a = call <vscale x 4 x i16> @llvm.riscv.vwsub.nxv4i16.nxv4i8.i8(
<vscale x 4 x i8> %0,
i8 %1,
i32 %2)
ret <vscale x 4 x i16> %a
}
declare <vscale x 4 x i16> @llvm.riscv.vwsub.mask.nxv4i16.nxv4i8.i8(
<vscale x 4 x i16>,
<vscale x 4 x i8>,
i8,
<vscale x 4 x i1>,
i32);
define <vscale x 4 x i16> @intrinsic_vwsub_mask_vx_nxv4i16_nxv4i8_i8(<vscale x 4 x i16> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwsub_mask_vx_nxv4i16_nxv4i8_i8
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
; CHECK: vwsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
%a = call <vscale x 4 x i16> @llvm.riscv.vwsub.mask.nxv4i16.nxv4i8.i8(
<vscale x 4 x i16> %0,
<vscale x 4 x i8> %1,
i8 %2,
<vscale x 4 x i1> %3,
i32 %4)
ret <vscale x 4 x i16> %a
}
declare <vscale x 8 x i16> @llvm.riscv.vwsub.nxv8i16.nxv8i8.i8(
<vscale x 8 x i8>,
i8,
i32);
define <vscale x 8 x i16> @intrinsic_vwsub_vx_nxv8i16_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, i32 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwsub_vx_nxv8i16_nxv8i8_i8
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
; CHECK: vwsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
%a = call <vscale x 8 x i16> @llvm.riscv.vwsub.nxv8i16.nxv8i8.i8(
<vscale x 8 x i8> %0,
i8 %1,
i32 %2)
ret <vscale x 8 x i16> %a
}
declare <vscale x 8 x i16> @llvm.riscv.vwsub.mask.nxv8i16.nxv8i8.i8(
<vscale x 8 x i16>,
<vscale x 8 x i8>,
i8,
<vscale x 8 x i1>,
i32);
define <vscale x 8 x i16> @intrinsic_vwsub_mask_vx_nxv8i16_nxv8i8_i8(<vscale x 8 x i16> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwsub_mask_vx_nxv8i16_nxv8i8_i8
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
; CHECK: vwsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
%a = call <vscale x 8 x i16> @llvm.riscv.vwsub.mask.nxv8i16.nxv8i8.i8(
<vscale x 8 x i16> %0,
<vscale x 8 x i8> %1,
i8 %2,
<vscale x 8 x i1> %3,
i32 %4)
ret <vscale x 8 x i16> %a
}
declare <vscale x 16 x i16> @llvm.riscv.vwsub.nxv16i16.nxv16i8.i8(
<vscale x 16 x i8>,
i8,
i32);
define <vscale x 16 x i16> @intrinsic_vwsub_vx_nxv16i16_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, i32 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwsub_vx_nxv16i16_nxv16i8_i8
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
; CHECK: vwsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
%a = call <vscale x 16 x i16> @llvm.riscv.vwsub.nxv16i16.nxv16i8.i8(
<vscale x 16 x i8> %0,
i8 %1,
i32 %2)
ret <vscale x 16 x i16> %a
}
declare <vscale x 16 x i16> @llvm.riscv.vwsub.mask.nxv16i16.nxv16i8.i8(
<vscale x 16 x i16>,
<vscale x 16 x i8>,
i8,
<vscale x 16 x i1>,
i32);
define <vscale x 16 x i16> @intrinsic_vwsub_mask_vx_nxv16i16_nxv16i8_i8(<vscale x 16 x i16> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwsub_mask_vx_nxv16i16_nxv16i8_i8
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
; CHECK: vwsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
%a = call <vscale x 16 x i16> @llvm.riscv.vwsub.mask.nxv16i16.nxv16i8.i8(
<vscale x 16 x i16> %0,
<vscale x 16 x i8> %1,
i8 %2,
<vscale x 16 x i1> %3,
i32 %4)
ret <vscale x 16 x i16> %a
}
declare <vscale x 32 x i16> @llvm.riscv.vwsub.nxv32i16.nxv32i8.i8(
<vscale x 32 x i8>,
i8,
i32);
define <vscale x 32 x i16> @intrinsic_vwsub_vx_nxv32i16_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, i32 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwsub_vx_nxv32i16_nxv32i8_i8
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
; CHECK: vwsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
%a = call <vscale x 32 x i16> @llvm.riscv.vwsub.nxv32i16.nxv32i8.i8(
<vscale x 32 x i8> %0,
i8 %1,
i32 %2)
ret <vscale x 32 x i16> %a
}
declare <vscale x 32 x i16> @llvm.riscv.vwsub.mask.nxv32i16.nxv32i8.i8(
<vscale x 32 x i16>,
<vscale x 32 x i8>,
i8,
<vscale x 32 x i1>,
i32);
define <vscale x 32 x i16> @intrinsic_vwsub_mask_vx_nxv32i16_nxv32i8_i8(<vscale x 32 x i16> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwsub_mask_vx_nxv32i16_nxv32i8_i8
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
; CHECK: vwsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
%a = call <vscale x 32 x i16> @llvm.riscv.vwsub.mask.nxv32i16.nxv32i8.i8(
<vscale x 32 x i16> %0,
<vscale x 32 x i8> %1,
i8 %2,
<vscale x 32 x i1> %3,
i32 %4)
ret <vscale x 32 x i16> %a
}
declare <vscale x 1 x i32> @llvm.riscv.vwsub.nxv1i32.nxv1i16.i16(
<vscale x 1 x i16>,
i16,
i32);
define <vscale x 1 x i32> @intrinsic_vwsub_vx_nxv1i32_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, i32 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwsub_vx_nxv1i32_nxv1i16_i16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
; CHECK: vwsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
%a = call <vscale x 1 x i32> @llvm.riscv.vwsub.nxv1i32.nxv1i16.i16(
<vscale x 1 x i16> %0,
i16 %1,
i32 %2)
ret <vscale x 1 x i32> %a
}
declare <vscale x 1 x i32> @llvm.riscv.vwsub.mask.nxv1i32.nxv1i16.i16(
<vscale x 1 x i32>,
<vscale x 1 x i16>,
i16,
<vscale x 1 x i1>,
i32);
define <vscale x 1 x i32> @intrinsic_vwsub_mask_vx_nxv1i32_nxv1i16_i16(<vscale x 1 x i32> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwsub_mask_vx_nxv1i32_nxv1i16_i16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
; CHECK: vwsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
%a = call <vscale x 1 x i32> @llvm.riscv.vwsub.mask.nxv1i32.nxv1i16.i16(
<vscale x 1 x i32> %0,
<vscale x 1 x i16> %1,
i16 %2,
<vscale x 1 x i1> %3,
i32 %4)
ret <vscale x 1 x i32> %a
}
declare <vscale x 2 x i32> @llvm.riscv.vwsub.nxv2i32.nxv2i16.i16(
<vscale x 2 x i16>,
i16,
i32);
define <vscale x 2 x i32> @intrinsic_vwsub_vx_nxv2i32_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, i32 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwsub_vx_nxv2i32_nxv2i16_i16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
; CHECK: vwsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
%a = call <vscale x 2 x i32> @llvm.riscv.vwsub.nxv2i32.nxv2i16.i16(
<vscale x 2 x i16> %0,
i16 %1,
i32 %2)
ret <vscale x 2 x i32> %a
}
declare <vscale x 2 x i32> @llvm.riscv.vwsub.mask.nxv2i32.nxv2i16.i16(
<vscale x 2 x i32>,
<vscale x 2 x i16>,
i16,
<vscale x 2 x i1>,
i32);
define <vscale x 2 x i32> @intrinsic_vwsub_mask_vx_nxv2i32_nxv2i16_i16(<vscale x 2 x i32> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwsub_mask_vx_nxv2i32_nxv2i16_i16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
; CHECK: vwsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
%a = call <vscale x 2 x i32> @llvm.riscv.vwsub.mask.nxv2i32.nxv2i16.i16(
<vscale x 2 x i32> %0,
<vscale x 2 x i16> %1,
i16 %2,
<vscale x 2 x i1> %3,
i32 %4)
ret <vscale x 2 x i32> %a
}
declare <vscale x 4 x i32> @llvm.riscv.vwsub.nxv4i32.nxv4i16.i16(
<vscale x 4 x i16>,
i16,
i32);
define <vscale x 4 x i32> @intrinsic_vwsub_vx_nxv4i32_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, i32 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwsub_vx_nxv4i32_nxv4i16_i16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
; CHECK: vwsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
%a = call <vscale x 4 x i32> @llvm.riscv.vwsub.nxv4i32.nxv4i16.i16(
<vscale x 4 x i16> %0,
i16 %1,
i32 %2)
ret <vscale x 4 x i32> %a
}
declare <vscale x 4 x i32> @llvm.riscv.vwsub.mask.nxv4i32.nxv4i16.i16(
<vscale x 4 x i32>,
<vscale x 4 x i16>,
i16,
<vscale x 4 x i1>,
i32);
define <vscale x 4 x i32> @intrinsic_vwsub_mask_vx_nxv4i32_nxv4i16_i16(<vscale x 4 x i32> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwsub_mask_vx_nxv4i32_nxv4i16_i16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
; CHECK: vwsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
%a = call <vscale x 4 x i32> @llvm.riscv.vwsub.mask.nxv4i32.nxv4i16.i16(
<vscale x 4 x i32> %0,
<vscale x 4 x i16> %1,
i16 %2,
<vscale x 4 x i1> %3,
i32 %4)
ret <vscale x 4 x i32> %a
}
declare <vscale x 8 x i32> @llvm.riscv.vwsub.nxv8i32.nxv8i16.i16(
<vscale x 8 x i16>,
i16,
i32);
define <vscale x 8 x i32> @intrinsic_vwsub_vx_nxv8i32_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, i32 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwsub_vx_nxv8i32_nxv8i16_i16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
; CHECK: vwsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
%a = call <vscale x 8 x i32> @llvm.riscv.vwsub.nxv8i32.nxv8i16.i16(
<vscale x 8 x i16> %0,
i16 %1,
i32 %2)
ret <vscale x 8 x i32> %a
}
declare <vscale x 8 x i32> @llvm.riscv.vwsub.mask.nxv8i32.nxv8i16.i16(
<vscale x 8 x i32>,
<vscale x 8 x i16>,
i16,
<vscale x 8 x i1>,
i32);
define <vscale x 8 x i32> @intrinsic_vwsub_mask_vx_nxv8i32_nxv8i16_i16(<vscale x 8 x i32> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwsub_mask_vx_nxv8i32_nxv8i16_i16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
; CHECK: vwsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
%a = call <vscale x 8 x i32> @llvm.riscv.vwsub.mask.nxv8i32.nxv8i16.i16(
<vscale x 8 x i32> %0,
<vscale x 8 x i16> %1,
i16 %2,
<vscale x 8 x i1> %3,
i32 %4)
ret <vscale x 8 x i32> %a
}
declare <vscale x 16 x i32> @llvm.riscv.vwsub.nxv16i32.nxv16i16.i16(
<vscale x 16 x i16>,
i16,
i32);
define <vscale x 16 x i32> @intrinsic_vwsub_vx_nxv16i32_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, i32 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwsub_vx_nxv16i32_nxv16i16_i16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
; CHECK: vwsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
%a = call <vscale x 16 x i32> @llvm.riscv.vwsub.nxv16i32.nxv16i16.i16(
<vscale x 16 x i16> %0,
i16 %1,
i32 %2)
ret <vscale x 16 x i32> %a
}
declare <vscale x 16 x i32> @llvm.riscv.vwsub.mask.nxv16i32.nxv16i16.i16(
<vscale x 16 x i32>,
<vscale x 16 x i16>,
i16,
<vscale x 16 x i1>,
i32);
define <vscale x 16 x i32> @intrinsic_vwsub_mask_vx_nxv16i32_nxv16i16_i16(<vscale x 16 x i32> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwsub_mask_vx_nxv16i32_nxv16i16_i16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
; CHECK: vwsub.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
%a = call <vscale x 16 x i32> @llvm.riscv.vwsub.mask.nxv16i32.nxv16i16.i16(
<vscale x 16 x i32> %0,
<vscale x 16 x i16> %1,
i16 %2,
<vscale x 16 x i1> %3,
i32 %4)
ret <vscale x 16 x i32> %a
}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,881 @@
; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs \
; RUN: --riscv-no-aliases < %s | FileCheck %s
declare <vscale x 1 x i16> @llvm.riscv.vwsub.w.nxv1i16.nxv1i8(
<vscale x 1 x i16>,
<vscale x 1 x i8>,
i32);
define <vscale x 1 x i16> @intrinsic_vwsub.w_wv_nxv1i16_nxv1i16_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i8> %1, i32 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwsub.w_wv_nxv1i16_nxv1i16_nxv1i8
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
; CHECK: vwsub.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
%a = call <vscale x 1 x i16> @llvm.riscv.vwsub.w.nxv1i16.nxv1i8(
<vscale x 1 x i16> %0,
<vscale x 1 x i8> %1,
i32 %2)
ret <vscale x 1 x i16> %a
}
declare <vscale x 1 x i16> @llvm.riscv.vwsub.w.mask.nxv1i16.nxv1i8(
<vscale x 1 x i16>,
<vscale x 1 x i16>,
<vscale x 1 x i8>,
<vscale x 1 x i1>,
i32);
define <vscale x 1 x i16> @intrinsic_vwsub.w_mask_wv_nxv1i16_nxv1i16_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_nxv1i16_nxv1i16_nxv1i8
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
; CHECK: vwsub.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 1 x i16> @llvm.riscv.vwsub.w.mask.nxv1i16.nxv1i8(
<vscale x 1 x i16> %0,
<vscale x 1 x i16> %1,
<vscale x 1 x i8> %2,
<vscale x 1 x i1> %3,
i32 %4)
ret <vscale x 1 x i16> %a
}
declare <vscale x 2 x i16> @llvm.riscv.vwsub.w.nxv2i16.nxv2i8(
<vscale x 2 x i16>,
<vscale x 2 x i8>,
i32);
define <vscale x 2 x i16> @intrinsic_vwsub.w_wv_nxv2i16_nxv2i16_nxv2i8(<vscale x 2 x i16> %0, <vscale x 2 x i8> %1, i32 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwsub.w_wv_nxv2i16_nxv2i16_nxv2i8
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
; CHECK: vwsub.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
%a = call <vscale x 2 x i16> @llvm.riscv.vwsub.w.nxv2i16.nxv2i8(
<vscale x 2 x i16> %0,
<vscale x 2 x i8> %1,
i32 %2)
ret <vscale x 2 x i16> %a
}
declare <vscale x 2 x i16> @llvm.riscv.vwsub.w.mask.nxv2i16.nxv2i8(
<vscale x 2 x i16>,
<vscale x 2 x i16>,
<vscale x 2 x i8>,
<vscale x 2 x i1>,
i32);
define <vscale x 2 x i16> @intrinsic_vwsub.w_mask_wv_nxv2i16_nxv2i16_nxv2i8(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_nxv2i16_nxv2i16_nxv2i8
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
; CHECK: vwsub.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 2 x i16> @llvm.riscv.vwsub.w.mask.nxv2i16.nxv2i8(
<vscale x 2 x i16> %0,
<vscale x 2 x i16> %1,
<vscale x 2 x i8> %2,
<vscale x 2 x i1> %3,
i32 %4)
ret <vscale x 2 x i16> %a
}
declare <vscale x 4 x i16> @llvm.riscv.vwsub.w.nxv4i16.nxv4i8(
<vscale x 4 x i16>,
<vscale x 4 x i8>,
i32);
define <vscale x 4 x i16> @intrinsic_vwsub.w_wv_nxv4i16_nxv4i16_nxv4i8(<vscale x 4 x i16> %0, <vscale x 4 x i8> %1, i32 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwsub.w_wv_nxv4i16_nxv4i16_nxv4i8
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
; CHECK: vwsub.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
%a = call <vscale x 4 x i16> @llvm.riscv.vwsub.w.nxv4i16.nxv4i8(
<vscale x 4 x i16> %0,
<vscale x 4 x i8> %1,
i32 %2)
ret <vscale x 4 x i16> %a
}
declare <vscale x 4 x i16> @llvm.riscv.vwsub.w.mask.nxv4i16.nxv4i8(
<vscale x 4 x i16>,
<vscale x 4 x i16>,
<vscale x 4 x i8>,
<vscale x 4 x i1>,
i32);
define <vscale x 4 x i16> @intrinsic_vwsub.w_mask_wv_nxv4i16_nxv4i16_nxv4i8(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_nxv4i16_nxv4i16_nxv4i8
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
; CHECK: vwsub.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 4 x i16> @llvm.riscv.vwsub.w.mask.nxv4i16.nxv4i8(
<vscale x 4 x i16> %0,
<vscale x 4 x i16> %1,
<vscale x 4 x i8> %2,
<vscale x 4 x i1> %3,
i32 %4)
ret <vscale x 4 x i16> %a
}
declare <vscale x 8 x i16> @llvm.riscv.vwsub.w.nxv8i16.nxv8i8(
<vscale x 8 x i16>,
<vscale x 8 x i8>,
i32);
define <vscale x 8 x i16> @intrinsic_vwsub.w_wv_nxv8i16_nxv8i16_nxv8i8(<vscale x 8 x i16> %0, <vscale x 8 x i8> %1, i32 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwsub.w_wv_nxv8i16_nxv8i16_nxv8i8
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
; CHECK: vwsub.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
%a = call <vscale x 8 x i16> @llvm.riscv.vwsub.w.nxv8i16.nxv8i8(
<vscale x 8 x i16> %0,
<vscale x 8 x i8> %1,
i32 %2)
ret <vscale x 8 x i16> %a
}
declare <vscale x 8 x i16> @llvm.riscv.vwsub.w.mask.nxv8i16.nxv8i8(
<vscale x 8 x i16>,
<vscale x 8 x i16>,
<vscale x 8 x i8>,
<vscale x 8 x i1>,
i32);
define <vscale x 8 x i16> @intrinsic_vwsub.w_mask_wv_nxv8i16_nxv8i16_nxv8i8(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_nxv8i16_nxv8i16_nxv8i8
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
; CHECK: vwsub.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 8 x i16> @llvm.riscv.vwsub.w.mask.nxv8i16.nxv8i8(
<vscale x 8 x i16> %0,
<vscale x 8 x i16> %1,
<vscale x 8 x i8> %2,
<vscale x 8 x i1> %3,
i32 %4)
ret <vscale x 8 x i16> %a
}
declare <vscale x 16 x i16> @llvm.riscv.vwsub.w.nxv16i16.nxv16i8(
<vscale x 16 x i16>,
<vscale x 16 x i8>,
i32);
define <vscale x 16 x i16> @intrinsic_vwsub.w_wv_nxv16i16_nxv16i16_nxv16i8(<vscale x 16 x i16> %0, <vscale x 16 x i8> %1, i32 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwsub.w_wv_nxv16i16_nxv16i16_nxv16i8
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
; CHECK: vwsub.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
%a = call <vscale x 16 x i16> @llvm.riscv.vwsub.w.nxv16i16.nxv16i8(
<vscale x 16 x i16> %0,
<vscale x 16 x i8> %1,
i32 %2)
ret <vscale x 16 x i16> %a
}
declare <vscale x 16 x i16> @llvm.riscv.vwsub.w.mask.nxv16i16.nxv16i8(
<vscale x 16 x i16>,
<vscale x 16 x i16>,
<vscale x 16 x i8>,
<vscale x 16 x i1>,
i32);
define <vscale x 16 x i16> @intrinsic_vwsub.w_mask_wv_nxv16i16_nxv16i16_nxv16i8(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_nxv16i16_nxv16i16_nxv16i8
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
; CHECK: vwsub.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 16 x i16> @llvm.riscv.vwsub.w.mask.nxv16i16.nxv16i8(
<vscale x 16 x i16> %0,
<vscale x 16 x i16> %1,
<vscale x 16 x i8> %2,
<vscale x 16 x i1> %3,
i32 %4)
ret <vscale x 16 x i16> %a
}
declare <vscale x 32 x i16> @llvm.riscv.vwsub.w.nxv32i16.nxv32i8(
<vscale x 32 x i16>,
<vscale x 32 x i8>,
i32);
define <vscale x 32 x i16> @intrinsic_vwsub.w_wv_nxv32i16_nxv32i16_nxv32i8(<vscale x 32 x i16> %0, <vscale x 32 x i8> %1, i32 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwsub.w_wv_nxv32i16_nxv32i16_nxv32i8
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
; CHECK: vwsub.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
%a = call <vscale x 32 x i16> @llvm.riscv.vwsub.w.nxv32i16.nxv32i8(
<vscale x 32 x i16> %0,
<vscale x 32 x i8> %1,
i32 %2)
ret <vscale x 32 x i16> %a
}
declare <vscale x 32 x i16> @llvm.riscv.vwsub.w.mask.nxv32i16.nxv32i8(
<vscale x 32 x i16>,
<vscale x 32 x i16>,
<vscale x 32 x i8>,
<vscale x 32 x i1>,
i32);
define <vscale x 32 x i16> @intrinsic_vwsub.w_mask_wv_nxv32i16_nxv32i16_nxv32i8(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_nxv32i16_nxv32i16_nxv32i8
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
; CHECK: vwsub.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 32 x i16> @llvm.riscv.vwsub.w.mask.nxv32i16.nxv32i8(
<vscale x 32 x i16> %0,
<vscale x 32 x i16> %1,
<vscale x 32 x i8> %2,
<vscale x 32 x i1> %3,
i32 %4)
ret <vscale x 32 x i16> %a
}
declare <vscale x 1 x i32> @llvm.riscv.vwsub.w.nxv1i32.nxv1i16(
<vscale x 1 x i32>,
<vscale x 1 x i16>,
i32);
define <vscale x 1 x i32> @intrinsic_vwsub.w_wv_nxv1i32_nxv1i32_nxv1i16(<vscale x 1 x i32> %0, <vscale x 1 x i16> %1, i32 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwsub.w_wv_nxv1i32_nxv1i32_nxv1i16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
; CHECK: vwsub.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
%a = call <vscale x 1 x i32> @llvm.riscv.vwsub.w.nxv1i32.nxv1i16(
<vscale x 1 x i32> %0,
<vscale x 1 x i16> %1,
i32 %2)
ret <vscale x 1 x i32> %a
}
declare <vscale x 1 x i32> @llvm.riscv.vwsub.w.mask.nxv1i32.nxv1i16(
<vscale x 1 x i32>,
<vscale x 1 x i32>,
<vscale x 1 x i16>,
<vscale x 1 x i1>,
i32);
define <vscale x 1 x i32> @intrinsic_vwsub.w_mask_wv_nxv1i32_nxv1i32_nxv1i16(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_nxv1i32_nxv1i32_nxv1i16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
; CHECK: vwsub.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 1 x i32> @llvm.riscv.vwsub.w.mask.nxv1i32.nxv1i16(
<vscale x 1 x i32> %0,
<vscale x 1 x i32> %1,
<vscale x 1 x i16> %2,
<vscale x 1 x i1> %3,
i32 %4)
ret <vscale x 1 x i32> %a
}
declare <vscale x 2 x i32> @llvm.riscv.vwsub.w.nxv2i32.nxv2i16(
<vscale x 2 x i32>,
<vscale x 2 x i16>,
i32);
define <vscale x 2 x i32> @intrinsic_vwsub.w_wv_nxv2i32_nxv2i32_nxv2i16(<vscale x 2 x i32> %0, <vscale x 2 x i16> %1, i32 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwsub.w_wv_nxv2i32_nxv2i32_nxv2i16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
; CHECK: vwsub.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
%a = call <vscale x 2 x i32> @llvm.riscv.vwsub.w.nxv2i32.nxv2i16(
<vscale x 2 x i32> %0,
<vscale x 2 x i16> %1,
i32 %2)
ret <vscale x 2 x i32> %a
}
declare <vscale x 2 x i32> @llvm.riscv.vwsub.w.mask.nxv2i32.nxv2i16(
<vscale x 2 x i32>,
<vscale x 2 x i32>,
<vscale x 2 x i16>,
<vscale x 2 x i1>,
i32);
define <vscale x 2 x i32> @intrinsic_vwsub.w_mask_wv_nxv2i32_nxv2i32_nxv2i16(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_nxv2i32_nxv2i32_nxv2i16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
; CHECK: vwsub.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 2 x i32> @llvm.riscv.vwsub.w.mask.nxv2i32.nxv2i16(
<vscale x 2 x i32> %0,
<vscale x 2 x i32> %1,
<vscale x 2 x i16> %2,
<vscale x 2 x i1> %3,
i32 %4)
ret <vscale x 2 x i32> %a
}
declare <vscale x 4 x i32> @llvm.riscv.vwsub.w.nxv4i32.nxv4i16(
<vscale x 4 x i32>,
<vscale x 4 x i16>,
i32);
define <vscale x 4 x i32> @intrinsic_vwsub.w_wv_nxv4i32_nxv4i32_nxv4i16(<vscale x 4 x i32> %0, <vscale x 4 x i16> %1, i32 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwsub.w_wv_nxv4i32_nxv4i32_nxv4i16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
; CHECK: vwsub.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
%a = call <vscale x 4 x i32> @llvm.riscv.vwsub.w.nxv4i32.nxv4i16(
<vscale x 4 x i32> %0,
<vscale x 4 x i16> %1,
i32 %2)
ret <vscale x 4 x i32> %a
}
declare <vscale x 4 x i32> @llvm.riscv.vwsub.w.mask.nxv4i32.nxv4i16(
<vscale x 4 x i32>,
<vscale x 4 x i32>,
<vscale x 4 x i16>,
<vscale x 4 x i1>,
i32);
define <vscale x 4 x i32> @intrinsic_vwsub.w_mask_wv_nxv4i32_nxv4i32_nxv4i16(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_nxv4i32_nxv4i32_nxv4i16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
; CHECK: vwsub.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 4 x i32> @llvm.riscv.vwsub.w.mask.nxv4i32.nxv4i16(
<vscale x 4 x i32> %0,
<vscale x 4 x i32> %1,
<vscale x 4 x i16> %2,
<vscale x 4 x i1> %3,
i32 %4)
ret <vscale x 4 x i32> %a
}
declare <vscale x 8 x i32> @llvm.riscv.vwsub.w.nxv8i32.nxv8i16(
<vscale x 8 x i32>,
<vscale x 8 x i16>,
i32);
define <vscale x 8 x i32> @intrinsic_vwsub.w_wv_nxv8i32_nxv8i32_nxv8i16(<vscale x 8 x i32> %0, <vscale x 8 x i16> %1, i32 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwsub.w_wv_nxv8i32_nxv8i32_nxv8i16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
; CHECK: vwsub.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
%a = call <vscale x 8 x i32> @llvm.riscv.vwsub.w.nxv8i32.nxv8i16(
<vscale x 8 x i32> %0,
<vscale x 8 x i16> %1,
i32 %2)
ret <vscale x 8 x i32> %a
}
declare <vscale x 8 x i32> @llvm.riscv.vwsub.w.mask.nxv8i32.nxv8i16(
<vscale x 8 x i32>,
<vscale x 8 x i32>,
<vscale x 8 x i16>,
<vscale x 8 x i1>,
i32);
define <vscale x 8 x i32> @intrinsic_vwsub.w_mask_wv_nxv8i32_nxv8i32_nxv8i16(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_nxv8i32_nxv8i32_nxv8i16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
; CHECK: vwsub.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 8 x i32> @llvm.riscv.vwsub.w.mask.nxv8i32.nxv8i16(
<vscale x 8 x i32> %0,
<vscale x 8 x i32> %1,
<vscale x 8 x i16> %2,
<vscale x 8 x i1> %3,
i32 %4)
ret <vscale x 8 x i32> %a
}
declare <vscale x 16 x i32> @llvm.riscv.vwsub.w.nxv16i32.nxv16i16(
<vscale x 16 x i32>,
<vscale x 16 x i16>,
i32);
define <vscale x 16 x i32> @intrinsic_vwsub.w_wv_nxv16i32_nxv16i32_nxv16i16(<vscale x 16 x i32> %0, <vscale x 16 x i16> %1, i32 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwsub.w_wv_nxv16i32_nxv16i32_nxv16i16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
; CHECK: vwsub.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
%a = call <vscale x 16 x i32> @llvm.riscv.vwsub.w.nxv16i32.nxv16i16(
<vscale x 16 x i32> %0,
<vscale x 16 x i16> %1,
i32 %2)
ret <vscale x 16 x i32> %a
}
declare <vscale x 16 x i32> @llvm.riscv.vwsub.w.mask.nxv16i32.nxv16i16(
<vscale x 16 x i32>,
<vscale x 16 x i32>,
<vscale x 16 x i16>,
<vscale x 16 x i1>,
i32);
define <vscale x 16 x i32> @intrinsic_vwsub.w_mask_wv_nxv16i32_nxv16i32_nxv16i16(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwsub.w_mask_wv_nxv16i32_nxv16i32_nxv16i16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
; CHECK: vwsub.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 16 x i32> @llvm.riscv.vwsub.w.mask.nxv16i32.nxv16i16(
<vscale x 16 x i32> %0,
<vscale x 16 x i32> %1,
<vscale x 16 x i16> %2,
<vscale x 16 x i1> %3,
i32 %4)
ret <vscale x 16 x i32> %a
}
declare <vscale x 1 x i16> @llvm.riscv.vwsub.w.nxv1i16.i8(
<vscale x 1 x i16>,
i8,
i32);
define <vscale x 1 x i16> @intrinsic_vwsub.w_wx_nxv1i16_nxv1i16_i8(<vscale x 1 x i16> %0, i8 %1, i32 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwsub.w_wx_nxv1i16_nxv1i16_i8
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
; CHECK: vwsub.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
%a = call <vscale x 1 x i16> @llvm.riscv.vwsub.w.nxv1i16.i8(
<vscale x 1 x i16> %0,
i8 %1,
i32 %2)
ret <vscale x 1 x i16> %a
}
declare <vscale x 1 x i16> @llvm.riscv.vwsub.w.mask.nxv1i16.i8(
<vscale x 1 x i16>,
<vscale x 1 x i16>,
i8,
<vscale x 1 x i1>,
i32);
define <vscale x 1 x i16> @intrinsic_vwsub.w_mask_wx_nxv1i16_nxv1i16_i8(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i8 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_nxv1i16_nxv1i16_i8
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
; CHECK: vwsub.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
%a = call <vscale x 1 x i16> @llvm.riscv.vwsub.w.mask.nxv1i16.i8(
<vscale x 1 x i16> %0,
<vscale x 1 x i16> %1,
i8 %2,
<vscale x 1 x i1> %3,
i32 %4)
ret <vscale x 1 x i16> %a
}
declare <vscale x 2 x i16> @llvm.riscv.vwsub.w.nxv2i16.i8(
<vscale x 2 x i16>,
i8,
i32);
define <vscale x 2 x i16> @intrinsic_vwsub.w_wx_nxv2i16_nxv2i16_i8(<vscale x 2 x i16> %0, i8 %1, i32 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwsub.w_wx_nxv2i16_nxv2i16_i8
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
; CHECK: vwsub.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
%a = call <vscale x 2 x i16> @llvm.riscv.vwsub.w.nxv2i16.i8(
<vscale x 2 x i16> %0,
i8 %1,
i32 %2)
ret <vscale x 2 x i16> %a
}
declare <vscale x 2 x i16> @llvm.riscv.vwsub.w.mask.nxv2i16.i8(
<vscale x 2 x i16>,
<vscale x 2 x i16>,
i8,
<vscale x 2 x i1>,
i32);
define <vscale x 2 x i16> @intrinsic_vwsub.w_mask_wx_nxv2i16_nxv2i16_i8(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i8 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_nxv2i16_nxv2i16_i8
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
; CHECK: vwsub.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
%a = call <vscale x 2 x i16> @llvm.riscv.vwsub.w.mask.nxv2i16.i8(
<vscale x 2 x i16> %0,
<vscale x 2 x i16> %1,
i8 %2,
<vscale x 2 x i1> %3,
i32 %4)
ret <vscale x 2 x i16> %a
}
declare <vscale x 4 x i16> @llvm.riscv.vwsub.w.nxv4i16.i8(
<vscale x 4 x i16>,
i8,
i32);
define <vscale x 4 x i16> @intrinsic_vwsub.w_wx_nxv4i16_nxv4i16_i8(<vscale x 4 x i16> %0, i8 %1, i32 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwsub.w_wx_nxv4i16_nxv4i16_i8
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
; CHECK: vwsub.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
%a = call <vscale x 4 x i16> @llvm.riscv.vwsub.w.nxv4i16.i8(
<vscale x 4 x i16> %0,
i8 %1,
i32 %2)
ret <vscale x 4 x i16> %a
}
declare <vscale x 4 x i16> @llvm.riscv.vwsub.w.mask.nxv4i16.i8(
<vscale x 4 x i16>,
<vscale x 4 x i16>,
i8,
<vscale x 4 x i1>,
i32);
define <vscale x 4 x i16> @intrinsic_vwsub.w_mask_wx_nxv4i16_nxv4i16_i8(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i8 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_nxv4i16_nxv4i16_i8
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
; CHECK: vwsub.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
%a = call <vscale x 4 x i16> @llvm.riscv.vwsub.w.mask.nxv4i16.i8(
<vscale x 4 x i16> %0,
<vscale x 4 x i16> %1,
i8 %2,
<vscale x 4 x i1> %3,
i32 %4)
ret <vscale x 4 x i16> %a
}
declare <vscale x 8 x i16> @llvm.riscv.vwsub.w.nxv8i16.i8(
<vscale x 8 x i16>,
i8,
i32);
define <vscale x 8 x i16> @intrinsic_vwsub.w_wx_nxv8i16_nxv8i16_i8(<vscale x 8 x i16> %0, i8 %1, i32 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwsub.w_wx_nxv8i16_nxv8i16_i8
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
; CHECK: vwsub.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
%a = call <vscale x 8 x i16> @llvm.riscv.vwsub.w.nxv8i16.i8(
<vscale x 8 x i16> %0,
i8 %1,
i32 %2)
ret <vscale x 8 x i16> %a
}
declare <vscale x 8 x i16> @llvm.riscv.vwsub.w.mask.nxv8i16.i8(
<vscale x 8 x i16>,
<vscale x 8 x i16>,
i8,
<vscale x 8 x i1>,
i32);
define <vscale x 8 x i16> @intrinsic_vwsub.w_mask_wx_nxv8i16_nxv8i16_i8(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i8 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_nxv8i16_nxv8i16_i8
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
; CHECK: vwsub.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
%a = call <vscale x 8 x i16> @llvm.riscv.vwsub.w.mask.nxv8i16.i8(
<vscale x 8 x i16> %0,
<vscale x 8 x i16> %1,
i8 %2,
<vscale x 8 x i1> %3,
i32 %4)
ret <vscale x 8 x i16> %a
}
declare <vscale x 16 x i16> @llvm.riscv.vwsub.w.nxv16i16.i8(
<vscale x 16 x i16>,
i8,
i32);
define <vscale x 16 x i16> @intrinsic_vwsub.w_wx_nxv16i16_nxv16i16_i8(<vscale x 16 x i16> %0, i8 %1, i32 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwsub.w_wx_nxv16i16_nxv16i16_i8
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
; CHECK: vwsub.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
%a = call <vscale x 16 x i16> @llvm.riscv.vwsub.w.nxv16i16.i8(
<vscale x 16 x i16> %0,
i8 %1,
i32 %2)
ret <vscale x 16 x i16> %a
}
declare <vscale x 16 x i16> @llvm.riscv.vwsub.w.mask.nxv16i16.i8(
<vscale x 16 x i16>,
<vscale x 16 x i16>,
i8,
<vscale x 16 x i1>,
i32);
define <vscale x 16 x i16> @intrinsic_vwsub.w_mask_wx_nxv16i16_nxv16i16_i8(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i8 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_nxv16i16_nxv16i16_i8
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
; CHECK: vwsub.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
%a = call <vscale x 16 x i16> @llvm.riscv.vwsub.w.mask.nxv16i16.i8(
<vscale x 16 x i16> %0,
<vscale x 16 x i16> %1,
i8 %2,
<vscale x 16 x i1> %3,
i32 %4)
ret <vscale x 16 x i16> %a
}
declare <vscale x 32 x i16> @llvm.riscv.vwsub.w.nxv32i16.i8(
<vscale x 32 x i16>,
i8,
i32);
define <vscale x 32 x i16> @intrinsic_vwsub.w_wx_nxv32i16_nxv32i16_i8(<vscale x 32 x i16> %0, i8 %1, i32 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwsub.w_wx_nxv32i16_nxv32i16_i8
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
; CHECK: vwsub.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
%a = call <vscale x 32 x i16> @llvm.riscv.vwsub.w.nxv32i16.i8(
<vscale x 32 x i16> %0,
i8 %1,
i32 %2)
ret <vscale x 32 x i16> %a
}
declare <vscale x 32 x i16> @llvm.riscv.vwsub.w.mask.nxv32i16.i8(
<vscale x 32 x i16>,
<vscale x 32 x i16>,
i8,
<vscale x 32 x i1>,
i32);
define <vscale x 32 x i16> @intrinsic_vwsub.w_mask_wx_nxv32i16_nxv32i16_i8(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i8 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_nxv32i16_nxv32i16_i8
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
; CHECK: vwsub.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
%a = call <vscale x 32 x i16> @llvm.riscv.vwsub.w.mask.nxv32i16.i8(
<vscale x 32 x i16> %0,
<vscale x 32 x i16> %1,
i8 %2,
<vscale x 32 x i1> %3,
i32 %4)
ret <vscale x 32 x i16> %a
}
declare <vscale x 1 x i32> @llvm.riscv.vwsub.w.nxv1i32.i16(
<vscale x 1 x i32>,
i16,
i32);
define <vscale x 1 x i32> @intrinsic_vwsub.w_wx_nxv1i32_nxv1i32_i16(<vscale x 1 x i32> %0, i16 %1, i32 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwsub.w_wx_nxv1i32_nxv1i32_i16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
; CHECK: vwsub.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
%a = call <vscale x 1 x i32> @llvm.riscv.vwsub.w.nxv1i32.i16(
<vscale x 1 x i32> %0,
i16 %1,
i32 %2)
ret <vscale x 1 x i32> %a
}
declare <vscale x 1 x i32> @llvm.riscv.vwsub.w.mask.nxv1i32.i16(
<vscale x 1 x i32>,
<vscale x 1 x i32>,
i16,
<vscale x 1 x i1>,
i32);
define <vscale x 1 x i32> @intrinsic_vwsub.w_mask_wx_nxv1i32_nxv1i32_i16(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i16 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_nxv1i32_nxv1i32_i16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
; CHECK: vwsub.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
%a = call <vscale x 1 x i32> @llvm.riscv.vwsub.w.mask.nxv1i32.i16(
<vscale x 1 x i32> %0,
<vscale x 1 x i32> %1,
i16 %2,
<vscale x 1 x i1> %3,
i32 %4)
ret <vscale x 1 x i32> %a
}
declare <vscale x 2 x i32> @llvm.riscv.vwsub.w.nxv2i32.i16(
<vscale x 2 x i32>,
i16,
i32);
define <vscale x 2 x i32> @intrinsic_vwsub.w_wx_nxv2i32_nxv2i32_i16(<vscale x 2 x i32> %0, i16 %1, i32 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwsub.w_wx_nxv2i32_nxv2i32_i16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
; CHECK: vwsub.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
%a = call <vscale x 2 x i32> @llvm.riscv.vwsub.w.nxv2i32.i16(
<vscale x 2 x i32> %0,
i16 %1,
i32 %2)
ret <vscale x 2 x i32> %a
}
declare <vscale x 2 x i32> @llvm.riscv.vwsub.w.mask.nxv2i32.i16(
<vscale x 2 x i32>,
<vscale x 2 x i32>,
i16,
<vscale x 2 x i1>,
i32);
define <vscale x 2 x i32> @intrinsic_vwsub.w_mask_wx_nxv2i32_nxv2i32_i16(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i16 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_nxv2i32_nxv2i32_i16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
; CHECK: vwsub.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
%a = call <vscale x 2 x i32> @llvm.riscv.vwsub.w.mask.nxv2i32.i16(
<vscale x 2 x i32> %0,
<vscale x 2 x i32> %1,
i16 %2,
<vscale x 2 x i1> %3,
i32 %4)
ret <vscale x 2 x i32> %a
}
declare <vscale x 4 x i32> @llvm.riscv.vwsub.w.nxv4i32.i16(
<vscale x 4 x i32>,
i16,
i32);
define <vscale x 4 x i32> @intrinsic_vwsub.w_wx_nxv4i32_nxv4i32_i16(<vscale x 4 x i32> %0, i16 %1, i32 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwsub.w_wx_nxv4i32_nxv4i32_i16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
; CHECK: vwsub.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
%a = call <vscale x 4 x i32> @llvm.riscv.vwsub.w.nxv4i32.i16(
<vscale x 4 x i32> %0,
i16 %1,
i32 %2)
ret <vscale x 4 x i32> %a
}
declare <vscale x 4 x i32> @llvm.riscv.vwsub.w.mask.nxv4i32.i16(
<vscale x 4 x i32>,
<vscale x 4 x i32>,
i16,
<vscale x 4 x i1>,
i32);
define <vscale x 4 x i32> @intrinsic_vwsub.w_mask_wx_nxv4i32_nxv4i32_i16(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i16 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_nxv4i32_nxv4i32_i16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
; CHECK: vwsub.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
%a = call <vscale x 4 x i32> @llvm.riscv.vwsub.w.mask.nxv4i32.i16(
<vscale x 4 x i32> %0,
<vscale x 4 x i32> %1,
i16 %2,
<vscale x 4 x i1> %3,
i32 %4)
ret <vscale x 4 x i32> %a
}
declare <vscale x 8 x i32> @llvm.riscv.vwsub.w.nxv8i32.i16(
<vscale x 8 x i32>,
i16,
i32);
define <vscale x 8 x i32> @intrinsic_vwsub.w_wx_nxv8i32_nxv8i32_i16(<vscale x 8 x i32> %0, i16 %1, i32 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwsub.w_wx_nxv8i32_nxv8i32_i16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
; CHECK: vwsub.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
%a = call <vscale x 8 x i32> @llvm.riscv.vwsub.w.nxv8i32.i16(
<vscale x 8 x i32> %0,
i16 %1,
i32 %2)
ret <vscale x 8 x i32> %a
}
declare <vscale x 8 x i32> @llvm.riscv.vwsub.w.mask.nxv8i32.i16(
<vscale x 8 x i32>,
<vscale x 8 x i32>,
i16,
<vscale x 8 x i1>,
i32);
define <vscale x 8 x i32> @intrinsic_vwsub.w_mask_wx_nxv8i32_nxv8i32_i16(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i16 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_nxv8i32_nxv8i32_i16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
; CHECK: vwsub.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
%a = call <vscale x 8 x i32> @llvm.riscv.vwsub.w.mask.nxv8i32.i16(
<vscale x 8 x i32> %0,
<vscale x 8 x i32> %1,
i16 %2,
<vscale x 8 x i1> %3,
i32 %4)
ret <vscale x 8 x i32> %a
}
declare <vscale x 16 x i32> @llvm.riscv.vwsub.w.nxv16i32.i16(
<vscale x 16 x i32>,
i16,
i32);
define <vscale x 16 x i32> @intrinsic_vwsub.w_wx_nxv16i32_nxv16i32_i16(<vscale x 16 x i32> %0, i16 %1, i32 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwsub.w_wx_nxv16i32_nxv16i32_i16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
; CHECK: vwsub.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
%a = call <vscale x 16 x i32> @llvm.riscv.vwsub.w.nxv16i32.i16(
<vscale x 16 x i32> %0,
i16 %1,
i32 %2)
ret <vscale x 16 x i32> %a
}
declare <vscale x 16 x i32> @llvm.riscv.vwsub.w.mask.nxv16i32.i16(
<vscale x 16 x i32>,
<vscale x 16 x i32>,
i16,
<vscale x 16 x i1>,
i32);
define <vscale x 16 x i32> @intrinsic_vwsub.w_mask_wx_nxv16i32_nxv16i32_i16(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i16 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwsub.w_mask_wx_nxv16i32_nxv16i32_i16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
; CHECK: vwsub.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
%a = call <vscale x 16 x i32> @llvm.riscv.vwsub.w.mask.nxv16i32.i16(
<vscale x 16 x i32> %0,
<vscale x 16 x i32> %1,
i16 %2,
<vscale x 16 x i1> %3,
i32 %4)
ret <vscale x 16 x i32> %a
}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,881 @@
; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs \
; RUN: --riscv-no-aliases < %s | FileCheck %s
declare <vscale x 1 x i16> @llvm.riscv.vwsubu.nxv1i16.nxv1i8.nxv1i8(
<vscale x 1 x i8>,
<vscale x 1 x i8>,
i32);
define <vscale x 1 x i16> @intrinsic_vwsubu_vv_nxv1i16_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i32 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwsubu_vv_nxv1i16_nxv1i8_nxv1i8
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
; CHECK: vwsubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
%a = call <vscale x 1 x i16> @llvm.riscv.vwsubu.nxv1i16.nxv1i8.nxv1i8(
<vscale x 1 x i8> %0,
<vscale x 1 x i8> %1,
i32 %2)
ret <vscale x 1 x i16> %a
}
declare <vscale x 1 x i16> @llvm.riscv.vwsubu.mask.nxv1i16.nxv1i8.nxv1i8(
<vscale x 1 x i16>,
<vscale x 1 x i8>,
<vscale x 1 x i8>,
<vscale x 1 x i1>,
i32);
define <vscale x 1 x i16> @intrinsic_vwsubu_mask_vv_nxv1i16_nxv1i8_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwsubu_mask_vv_nxv1i16_nxv1i8_nxv1i8
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
; CHECK: vwsubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 1 x i16> @llvm.riscv.vwsubu.mask.nxv1i16.nxv1i8.nxv1i8(
<vscale x 1 x i16> %0,
<vscale x 1 x i8> %1,
<vscale x 1 x i8> %2,
<vscale x 1 x i1> %3,
i32 %4)
ret <vscale x 1 x i16> %a
}
declare <vscale x 2 x i16> @llvm.riscv.vwsubu.nxv2i16.nxv2i8.nxv2i8(
<vscale x 2 x i8>,
<vscale x 2 x i8>,
i32);
define <vscale x 2 x i16> @intrinsic_vwsubu_vv_nxv2i16_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i32 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwsubu_vv_nxv2i16_nxv2i8_nxv2i8
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
; CHECK: vwsubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
%a = call <vscale x 2 x i16> @llvm.riscv.vwsubu.nxv2i16.nxv2i8.nxv2i8(
<vscale x 2 x i8> %0,
<vscale x 2 x i8> %1,
i32 %2)
ret <vscale x 2 x i16> %a
}
declare <vscale x 2 x i16> @llvm.riscv.vwsubu.mask.nxv2i16.nxv2i8.nxv2i8(
<vscale x 2 x i16>,
<vscale x 2 x i8>,
<vscale x 2 x i8>,
<vscale x 2 x i1>,
i32);
define <vscale x 2 x i16> @intrinsic_vwsubu_mask_vv_nxv2i16_nxv2i8_nxv2i8(<vscale x 2 x i16> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwsubu_mask_vv_nxv2i16_nxv2i8_nxv2i8
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
; CHECK: vwsubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 2 x i16> @llvm.riscv.vwsubu.mask.nxv2i16.nxv2i8.nxv2i8(
<vscale x 2 x i16> %0,
<vscale x 2 x i8> %1,
<vscale x 2 x i8> %2,
<vscale x 2 x i1> %3,
i32 %4)
ret <vscale x 2 x i16> %a
}
declare <vscale x 4 x i16> @llvm.riscv.vwsubu.nxv4i16.nxv4i8.nxv4i8(
<vscale x 4 x i8>,
<vscale x 4 x i8>,
i32);
define <vscale x 4 x i16> @intrinsic_vwsubu_vv_nxv4i16_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i32 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwsubu_vv_nxv4i16_nxv4i8_nxv4i8
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
; CHECK: vwsubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
%a = call <vscale x 4 x i16> @llvm.riscv.vwsubu.nxv4i16.nxv4i8.nxv4i8(
<vscale x 4 x i8> %0,
<vscale x 4 x i8> %1,
i32 %2)
ret <vscale x 4 x i16> %a
}
declare <vscale x 4 x i16> @llvm.riscv.vwsubu.mask.nxv4i16.nxv4i8.nxv4i8(
<vscale x 4 x i16>,
<vscale x 4 x i8>,
<vscale x 4 x i8>,
<vscale x 4 x i1>,
i32);
define <vscale x 4 x i16> @intrinsic_vwsubu_mask_vv_nxv4i16_nxv4i8_nxv4i8(<vscale x 4 x i16> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwsubu_mask_vv_nxv4i16_nxv4i8_nxv4i8
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
; CHECK: vwsubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 4 x i16> @llvm.riscv.vwsubu.mask.nxv4i16.nxv4i8.nxv4i8(
<vscale x 4 x i16> %0,
<vscale x 4 x i8> %1,
<vscale x 4 x i8> %2,
<vscale x 4 x i1> %3,
i32 %4)
ret <vscale x 4 x i16> %a
}
declare <vscale x 8 x i16> @llvm.riscv.vwsubu.nxv8i16.nxv8i8.nxv8i8(
<vscale x 8 x i8>,
<vscale x 8 x i8>,
i32);
define <vscale x 8 x i16> @intrinsic_vwsubu_vv_nxv8i16_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i32 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwsubu_vv_nxv8i16_nxv8i8_nxv8i8
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
; CHECK: vwsubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
%a = call <vscale x 8 x i16> @llvm.riscv.vwsubu.nxv8i16.nxv8i8.nxv8i8(
<vscale x 8 x i8> %0,
<vscale x 8 x i8> %1,
i32 %2)
ret <vscale x 8 x i16> %a
}
declare <vscale x 8 x i16> @llvm.riscv.vwsubu.mask.nxv8i16.nxv8i8.nxv8i8(
<vscale x 8 x i16>,
<vscale x 8 x i8>,
<vscale x 8 x i8>,
<vscale x 8 x i1>,
i32);
define <vscale x 8 x i16> @intrinsic_vwsubu_mask_vv_nxv8i16_nxv8i8_nxv8i8(<vscale x 8 x i16> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwsubu_mask_vv_nxv8i16_nxv8i8_nxv8i8
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
; CHECK: vwsubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 8 x i16> @llvm.riscv.vwsubu.mask.nxv8i16.nxv8i8.nxv8i8(
<vscale x 8 x i16> %0,
<vscale x 8 x i8> %1,
<vscale x 8 x i8> %2,
<vscale x 8 x i1> %3,
i32 %4)
ret <vscale x 8 x i16> %a
}
declare <vscale x 16 x i16> @llvm.riscv.vwsubu.nxv16i16.nxv16i8.nxv16i8(
<vscale x 16 x i8>,
<vscale x 16 x i8>,
i32);
define <vscale x 16 x i16> @intrinsic_vwsubu_vv_nxv16i16_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i32 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwsubu_vv_nxv16i16_nxv16i8_nxv16i8
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
; CHECK: vwsubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
%a = call <vscale x 16 x i16> @llvm.riscv.vwsubu.nxv16i16.nxv16i8.nxv16i8(
<vscale x 16 x i8> %0,
<vscale x 16 x i8> %1,
i32 %2)
ret <vscale x 16 x i16> %a
}
declare <vscale x 16 x i16> @llvm.riscv.vwsubu.mask.nxv16i16.nxv16i8.nxv16i8(
<vscale x 16 x i16>,
<vscale x 16 x i8>,
<vscale x 16 x i8>,
<vscale x 16 x i1>,
i32);
define <vscale x 16 x i16> @intrinsic_vwsubu_mask_vv_nxv16i16_nxv16i8_nxv16i8(<vscale x 16 x i16> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwsubu_mask_vv_nxv16i16_nxv16i8_nxv16i8
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
; CHECK: vwsubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 16 x i16> @llvm.riscv.vwsubu.mask.nxv16i16.nxv16i8.nxv16i8(
<vscale x 16 x i16> %0,
<vscale x 16 x i8> %1,
<vscale x 16 x i8> %2,
<vscale x 16 x i1> %3,
i32 %4)
ret <vscale x 16 x i16> %a
}
declare <vscale x 32 x i16> @llvm.riscv.vwsubu.nxv32i16.nxv32i8.nxv32i8(
<vscale x 32 x i8>,
<vscale x 32 x i8>,
i32);
define <vscale x 32 x i16> @intrinsic_vwsubu_vv_nxv32i16_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i32 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwsubu_vv_nxv32i16_nxv32i8_nxv32i8
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
; CHECK: vwsubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
%a = call <vscale x 32 x i16> @llvm.riscv.vwsubu.nxv32i16.nxv32i8.nxv32i8(
<vscale x 32 x i8> %0,
<vscale x 32 x i8> %1,
i32 %2)
ret <vscale x 32 x i16> %a
}
declare <vscale x 32 x i16> @llvm.riscv.vwsubu.mask.nxv32i16.nxv32i8.nxv32i8(
<vscale x 32 x i16>,
<vscale x 32 x i8>,
<vscale x 32 x i8>,
<vscale x 32 x i1>,
i32);
define <vscale x 32 x i16> @intrinsic_vwsubu_mask_vv_nxv32i16_nxv32i8_nxv32i8(<vscale x 32 x i16> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwsubu_mask_vv_nxv32i16_nxv32i8_nxv32i8
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
; CHECK: vwsubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 32 x i16> @llvm.riscv.vwsubu.mask.nxv32i16.nxv32i8.nxv32i8(
<vscale x 32 x i16> %0,
<vscale x 32 x i8> %1,
<vscale x 32 x i8> %2,
<vscale x 32 x i1> %3,
i32 %4)
ret <vscale x 32 x i16> %a
}
declare <vscale x 1 x i32> @llvm.riscv.vwsubu.nxv1i32.nxv1i16.nxv1i16(
<vscale x 1 x i16>,
<vscale x 1 x i16>,
i32);
define <vscale x 1 x i32> @intrinsic_vwsubu_vv_nxv1i32_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i32 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwsubu_vv_nxv1i32_nxv1i16_nxv1i16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
; CHECK: vwsubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
%a = call <vscale x 1 x i32> @llvm.riscv.vwsubu.nxv1i32.nxv1i16.nxv1i16(
<vscale x 1 x i16> %0,
<vscale x 1 x i16> %1,
i32 %2)
ret <vscale x 1 x i32> %a
}
declare <vscale x 1 x i32> @llvm.riscv.vwsubu.mask.nxv1i32.nxv1i16.nxv1i16(
<vscale x 1 x i32>,
<vscale x 1 x i16>,
<vscale x 1 x i16>,
<vscale x 1 x i1>,
i32);
define <vscale x 1 x i32> @intrinsic_vwsubu_mask_vv_nxv1i32_nxv1i16_nxv1i16(<vscale x 1 x i32> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwsubu_mask_vv_nxv1i32_nxv1i16_nxv1i16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
; CHECK: vwsubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 1 x i32> @llvm.riscv.vwsubu.mask.nxv1i32.nxv1i16.nxv1i16(
<vscale x 1 x i32> %0,
<vscale x 1 x i16> %1,
<vscale x 1 x i16> %2,
<vscale x 1 x i1> %3,
i32 %4)
ret <vscale x 1 x i32> %a
}
declare <vscale x 2 x i32> @llvm.riscv.vwsubu.nxv2i32.nxv2i16.nxv2i16(
<vscale x 2 x i16>,
<vscale x 2 x i16>,
i32);
define <vscale x 2 x i32> @intrinsic_vwsubu_vv_nxv2i32_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i32 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwsubu_vv_nxv2i32_nxv2i16_nxv2i16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
; CHECK: vwsubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
%a = call <vscale x 2 x i32> @llvm.riscv.vwsubu.nxv2i32.nxv2i16.nxv2i16(
<vscale x 2 x i16> %0,
<vscale x 2 x i16> %1,
i32 %2)
ret <vscale x 2 x i32> %a
}
declare <vscale x 2 x i32> @llvm.riscv.vwsubu.mask.nxv2i32.nxv2i16.nxv2i16(
<vscale x 2 x i32>,
<vscale x 2 x i16>,
<vscale x 2 x i16>,
<vscale x 2 x i1>,
i32);
define <vscale x 2 x i32> @intrinsic_vwsubu_mask_vv_nxv2i32_nxv2i16_nxv2i16(<vscale x 2 x i32> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwsubu_mask_vv_nxv2i32_nxv2i16_nxv2i16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
; CHECK: vwsubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 2 x i32> @llvm.riscv.vwsubu.mask.nxv2i32.nxv2i16.nxv2i16(
<vscale x 2 x i32> %0,
<vscale x 2 x i16> %1,
<vscale x 2 x i16> %2,
<vscale x 2 x i1> %3,
i32 %4)
ret <vscale x 2 x i32> %a
}
declare <vscale x 4 x i32> @llvm.riscv.vwsubu.nxv4i32.nxv4i16.nxv4i16(
<vscale x 4 x i16>,
<vscale x 4 x i16>,
i32);
define <vscale x 4 x i32> @intrinsic_vwsubu_vv_nxv4i32_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i32 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwsubu_vv_nxv4i32_nxv4i16_nxv4i16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
; CHECK: vwsubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
%a = call <vscale x 4 x i32> @llvm.riscv.vwsubu.nxv4i32.nxv4i16.nxv4i16(
<vscale x 4 x i16> %0,
<vscale x 4 x i16> %1,
i32 %2)
ret <vscale x 4 x i32> %a
}
declare <vscale x 4 x i32> @llvm.riscv.vwsubu.mask.nxv4i32.nxv4i16.nxv4i16(
<vscale x 4 x i32>,
<vscale x 4 x i16>,
<vscale x 4 x i16>,
<vscale x 4 x i1>,
i32);
define <vscale x 4 x i32> @intrinsic_vwsubu_mask_vv_nxv4i32_nxv4i16_nxv4i16(<vscale x 4 x i32> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwsubu_mask_vv_nxv4i32_nxv4i16_nxv4i16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
; CHECK: vwsubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 4 x i32> @llvm.riscv.vwsubu.mask.nxv4i32.nxv4i16.nxv4i16(
<vscale x 4 x i32> %0,
<vscale x 4 x i16> %1,
<vscale x 4 x i16> %2,
<vscale x 4 x i1> %3,
i32 %4)
ret <vscale x 4 x i32> %a
}
declare <vscale x 8 x i32> @llvm.riscv.vwsubu.nxv8i32.nxv8i16.nxv8i16(
<vscale x 8 x i16>,
<vscale x 8 x i16>,
i32);
define <vscale x 8 x i32> @intrinsic_vwsubu_vv_nxv8i32_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i32 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwsubu_vv_nxv8i32_nxv8i16_nxv8i16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
; CHECK: vwsubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
%a = call <vscale x 8 x i32> @llvm.riscv.vwsubu.nxv8i32.nxv8i16.nxv8i16(
<vscale x 8 x i16> %0,
<vscale x 8 x i16> %1,
i32 %2)
ret <vscale x 8 x i32> %a
}
declare <vscale x 8 x i32> @llvm.riscv.vwsubu.mask.nxv8i32.nxv8i16.nxv8i16(
<vscale x 8 x i32>,
<vscale x 8 x i16>,
<vscale x 8 x i16>,
<vscale x 8 x i1>,
i32);
define <vscale x 8 x i32> @intrinsic_vwsubu_mask_vv_nxv8i32_nxv8i16_nxv8i16(<vscale x 8 x i32> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwsubu_mask_vv_nxv8i32_nxv8i16_nxv8i16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
; CHECK: vwsubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 8 x i32> @llvm.riscv.vwsubu.mask.nxv8i32.nxv8i16.nxv8i16(
<vscale x 8 x i32> %0,
<vscale x 8 x i16> %1,
<vscale x 8 x i16> %2,
<vscale x 8 x i1> %3,
i32 %4)
ret <vscale x 8 x i32> %a
}
declare <vscale x 16 x i32> @llvm.riscv.vwsubu.nxv16i32.nxv16i16.nxv16i16(
<vscale x 16 x i16>,
<vscale x 16 x i16>,
i32);
define <vscale x 16 x i32> @intrinsic_vwsubu_vv_nxv16i32_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i32 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwsubu_vv_nxv16i32_nxv16i16_nxv16i16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
; CHECK: vwsubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
%a = call <vscale x 16 x i32> @llvm.riscv.vwsubu.nxv16i32.nxv16i16.nxv16i16(
<vscale x 16 x i16> %0,
<vscale x 16 x i16> %1,
i32 %2)
ret <vscale x 16 x i32> %a
}
declare <vscale x 16 x i32> @llvm.riscv.vwsubu.mask.nxv16i32.nxv16i16.nxv16i16(
<vscale x 16 x i32>,
<vscale x 16 x i16>,
<vscale x 16 x i16>,
<vscale x 16 x i1>,
i32);
define <vscale x 16 x i32> @intrinsic_vwsubu_mask_vv_nxv16i32_nxv16i16_nxv16i16(<vscale x 16 x i32> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwsubu_mask_vv_nxv16i32_nxv16i16_nxv16i16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
; CHECK: vwsubu.vv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 16 x i32> @llvm.riscv.vwsubu.mask.nxv16i32.nxv16i16.nxv16i16(
<vscale x 16 x i32> %0,
<vscale x 16 x i16> %1,
<vscale x 16 x i16> %2,
<vscale x 16 x i1> %3,
i32 %4)
ret <vscale x 16 x i32> %a
}
declare <vscale x 1 x i16> @llvm.riscv.vwsubu.nxv1i16.nxv1i8.i8(
<vscale x 1 x i8>,
i8,
i32);
define <vscale x 1 x i16> @intrinsic_vwsubu_vx_nxv1i16_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, i32 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwsubu_vx_nxv1i16_nxv1i8_i8
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
; CHECK: vwsubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
%a = call <vscale x 1 x i16> @llvm.riscv.vwsubu.nxv1i16.nxv1i8.i8(
<vscale x 1 x i8> %0,
i8 %1,
i32 %2)
ret <vscale x 1 x i16> %a
}
declare <vscale x 1 x i16> @llvm.riscv.vwsubu.mask.nxv1i16.nxv1i8.i8(
<vscale x 1 x i16>,
<vscale x 1 x i8>,
i8,
<vscale x 1 x i1>,
i32);
define <vscale x 1 x i16> @intrinsic_vwsubu_mask_vx_nxv1i16_nxv1i8_i8(<vscale x 1 x i16> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwsubu_mask_vx_nxv1i16_nxv1i8_i8
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
; CHECK: vwsubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
%a = call <vscale x 1 x i16> @llvm.riscv.vwsubu.mask.nxv1i16.nxv1i8.i8(
<vscale x 1 x i16> %0,
<vscale x 1 x i8> %1,
i8 %2,
<vscale x 1 x i1> %3,
i32 %4)
ret <vscale x 1 x i16> %a
}
declare <vscale x 2 x i16> @llvm.riscv.vwsubu.nxv2i16.nxv2i8.i8(
<vscale x 2 x i8>,
i8,
i32);
define <vscale x 2 x i16> @intrinsic_vwsubu_vx_nxv2i16_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, i32 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwsubu_vx_nxv2i16_nxv2i8_i8
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
; CHECK: vwsubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
%a = call <vscale x 2 x i16> @llvm.riscv.vwsubu.nxv2i16.nxv2i8.i8(
<vscale x 2 x i8> %0,
i8 %1,
i32 %2)
ret <vscale x 2 x i16> %a
}
declare <vscale x 2 x i16> @llvm.riscv.vwsubu.mask.nxv2i16.nxv2i8.i8(
<vscale x 2 x i16>,
<vscale x 2 x i8>,
i8,
<vscale x 2 x i1>,
i32);
define <vscale x 2 x i16> @intrinsic_vwsubu_mask_vx_nxv2i16_nxv2i8_i8(<vscale x 2 x i16> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwsubu_mask_vx_nxv2i16_nxv2i8_i8
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
; CHECK: vwsubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
%a = call <vscale x 2 x i16> @llvm.riscv.vwsubu.mask.nxv2i16.nxv2i8.i8(
<vscale x 2 x i16> %0,
<vscale x 2 x i8> %1,
i8 %2,
<vscale x 2 x i1> %3,
i32 %4)
ret <vscale x 2 x i16> %a
}
declare <vscale x 4 x i16> @llvm.riscv.vwsubu.nxv4i16.nxv4i8.i8(
<vscale x 4 x i8>,
i8,
i32);
define <vscale x 4 x i16> @intrinsic_vwsubu_vx_nxv4i16_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, i32 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwsubu_vx_nxv4i16_nxv4i8_i8
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
; CHECK: vwsubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
%a = call <vscale x 4 x i16> @llvm.riscv.vwsubu.nxv4i16.nxv4i8.i8(
<vscale x 4 x i8> %0,
i8 %1,
i32 %2)
ret <vscale x 4 x i16> %a
}
declare <vscale x 4 x i16> @llvm.riscv.vwsubu.mask.nxv4i16.nxv4i8.i8(
<vscale x 4 x i16>,
<vscale x 4 x i8>,
i8,
<vscale x 4 x i1>,
i32);
define <vscale x 4 x i16> @intrinsic_vwsubu_mask_vx_nxv4i16_nxv4i8_i8(<vscale x 4 x i16> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwsubu_mask_vx_nxv4i16_nxv4i8_i8
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
; CHECK: vwsubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
%a = call <vscale x 4 x i16> @llvm.riscv.vwsubu.mask.nxv4i16.nxv4i8.i8(
<vscale x 4 x i16> %0,
<vscale x 4 x i8> %1,
i8 %2,
<vscale x 4 x i1> %3,
i32 %4)
ret <vscale x 4 x i16> %a
}
declare <vscale x 8 x i16> @llvm.riscv.vwsubu.nxv8i16.nxv8i8.i8(
<vscale x 8 x i8>,
i8,
i32);
define <vscale x 8 x i16> @intrinsic_vwsubu_vx_nxv8i16_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, i32 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwsubu_vx_nxv8i16_nxv8i8_i8
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
; CHECK: vwsubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
%a = call <vscale x 8 x i16> @llvm.riscv.vwsubu.nxv8i16.nxv8i8.i8(
<vscale x 8 x i8> %0,
i8 %1,
i32 %2)
ret <vscale x 8 x i16> %a
}
declare <vscale x 8 x i16> @llvm.riscv.vwsubu.mask.nxv8i16.nxv8i8.i8(
<vscale x 8 x i16>,
<vscale x 8 x i8>,
i8,
<vscale x 8 x i1>,
i32);
define <vscale x 8 x i16> @intrinsic_vwsubu_mask_vx_nxv8i16_nxv8i8_i8(<vscale x 8 x i16> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwsubu_mask_vx_nxv8i16_nxv8i8_i8
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
; CHECK: vwsubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
%a = call <vscale x 8 x i16> @llvm.riscv.vwsubu.mask.nxv8i16.nxv8i8.i8(
<vscale x 8 x i16> %0,
<vscale x 8 x i8> %1,
i8 %2,
<vscale x 8 x i1> %3,
i32 %4)
ret <vscale x 8 x i16> %a
}
declare <vscale x 16 x i16> @llvm.riscv.vwsubu.nxv16i16.nxv16i8.i8(
<vscale x 16 x i8>,
i8,
i32);
define <vscale x 16 x i16> @intrinsic_vwsubu_vx_nxv16i16_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, i32 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwsubu_vx_nxv16i16_nxv16i8_i8
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
; CHECK: vwsubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
%a = call <vscale x 16 x i16> @llvm.riscv.vwsubu.nxv16i16.nxv16i8.i8(
<vscale x 16 x i8> %0,
i8 %1,
i32 %2)
ret <vscale x 16 x i16> %a
}
declare <vscale x 16 x i16> @llvm.riscv.vwsubu.mask.nxv16i16.nxv16i8.i8(
<vscale x 16 x i16>,
<vscale x 16 x i8>,
i8,
<vscale x 16 x i1>,
i32);
define <vscale x 16 x i16> @intrinsic_vwsubu_mask_vx_nxv16i16_nxv16i8_i8(<vscale x 16 x i16> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwsubu_mask_vx_nxv16i16_nxv16i8_i8
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
; CHECK: vwsubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
%a = call <vscale x 16 x i16> @llvm.riscv.vwsubu.mask.nxv16i16.nxv16i8.i8(
<vscale x 16 x i16> %0,
<vscale x 16 x i8> %1,
i8 %2,
<vscale x 16 x i1> %3,
i32 %4)
ret <vscale x 16 x i16> %a
}
declare <vscale x 32 x i16> @llvm.riscv.vwsubu.nxv32i16.nxv32i8.i8(
<vscale x 32 x i8>,
i8,
i32);
define <vscale x 32 x i16> @intrinsic_vwsubu_vx_nxv32i16_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, i32 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwsubu_vx_nxv32i16_nxv32i8_i8
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
; CHECK: vwsubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
%a = call <vscale x 32 x i16> @llvm.riscv.vwsubu.nxv32i16.nxv32i8.i8(
<vscale x 32 x i8> %0,
i8 %1,
i32 %2)
ret <vscale x 32 x i16> %a
}
declare <vscale x 32 x i16> @llvm.riscv.vwsubu.mask.nxv32i16.nxv32i8.i8(
<vscale x 32 x i16>,
<vscale x 32 x i8>,
i8,
<vscale x 32 x i1>,
i32);
define <vscale x 32 x i16> @intrinsic_vwsubu_mask_vx_nxv32i16_nxv32i8_i8(<vscale x 32 x i16> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwsubu_mask_vx_nxv32i16_nxv32i8_i8
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
; CHECK: vwsubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
%a = call <vscale x 32 x i16> @llvm.riscv.vwsubu.mask.nxv32i16.nxv32i8.i8(
<vscale x 32 x i16> %0,
<vscale x 32 x i8> %1,
i8 %2,
<vscale x 32 x i1> %3,
i32 %4)
ret <vscale x 32 x i16> %a
}
declare <vscale x 1 x i32> @llvm.riscv.vwsubu.nxv1i32.nxv1i16.i16(
<vscale x 1 x i16>,
i16,
i32);
define <vscale x 1 x i32> @intrinsic_vwsubu_vx_nxv1i32_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, i32 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwsubu_vx_nxv1i32_nxv1i16_i16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
; CHECK: vwsubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
%a = call <vscale x 1 x i32> @llvm.riscv.vwsubu.nxv1i32.nxv1i16.i16(
<vscale x 1 x i16> %0,
i16 %1,
i32 %2)
ret <vscale x 1 x i32> %a
}
declare <vscale x 1 x i32> @llvm.riscv.vwsubu.mask.nxv1i32.nxv1i16.i16(
<vscale x 1 x i32>,
<vscale x 1 x i16>,
i16,
<vscale x 1 x i1>,
i32);
define <vscale x 1 x i32> @intrinsic_vwsubu_mask_vx_nxv1i32_nxv1i16_i16(<vscale x 1 x i32> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwsubu_mask_vx_nxv1i32_nxv1i16_i16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
; CHECK: vwsubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
%a = call <vscale x 1 x i32> @llvm.riscv.vwsubu.mask.nxv1i32.nxv1i16.i16(
<vscale x 1 x i32> %0,
<vscale x 1 x i16> %1,
i16 %2,
<vscale x 1 x i1> %3,
i32 %4)
ret <vscale x 1 x i32> %a
}
declare <vscale x 2 x i32> @llvm.riscv.vwsubu.nxv2i32.nxv2i16.i16(
<vscale x 2 x i16>,
i16,
i32);
define <vscale x 2 x i32> @intrinsic_vwsubu_vx_nxv2i32_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, i32 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwsubu_vx_nxv2i32_nxv2i16_i16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
; CHECK: vwsubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
%a = call <vscale x 2 x i32> @llvm.riscv.vwsubu.nxv2i32.nxv2i16.i16(
<vscale x 2 x i16> %0,
i16 %1,
i32 %2)
ret <vscale x 2 x i32> %a
}
declare <vscale x 2 x i32> @llvm.riscv.vwsubu.mask.nxv2i32.nxv2i16.i16(
<vscale x 2 x i32>,
<vscale x 2 x i16>,
i16,
<vscale x 2 x i1>,
i32);
define <vscale x 2 x i32> @intrinsic_vwsubu_mask_vx_nxv2i32_nxv2i16_i16(<vscale x 2 x i32> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwsubu_mask_vx_nxv2i32_nxv2i16_i16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
; CHECK: vwsubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
%a = call <vscale x 2 x i32> @llvm.riscv.vwsubu.mask.nxv2i32.nxv2i16.i16(
<vscale x 2 x i32> %0,
<vscale x 2 x i16> %1,
i16 %2,
<vscale x 2 x i1> %3,
i32 %4)
ret <vscale x 2 x i32> %a
}
declare <vscale x 4 x i32> @llvm.riscv.vwsubu.nxv4i32.nxv4i16.i16(
<vscale x 4 x i16>,
i16,
i32);
define <vscale x 4 x i32> @intrinsic_vwsubu_vx_nxv4i32_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, i32 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwsubu_vx_nxv4i32_nxv4i16_i16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
; CHECK: vwsubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
%a = call <vscale x 4 x i32> @llvm.riscv.vwsubu.nxv4i32.nxv4i16.i16(
<vscale x 4 x i16> %0,
i16 %1,
i32 %2)
ret <vscale x 4 x i32> %a
}
declare <vscale x 4 x i32> @llvm.riscv.vwsubu.mask.nxv4i32.nxv4i16.i16(
<vscale x 4 x i32>,
<vscale x 4 x i16>,
i16,
<vscale x 4 x i1>,
i32);
define <vscale x 4 x i32> @intrinsic_vwsubu_mask_vx_nxv4i32_nxv4i16_i16(<vscale x 4 x i32> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwsubu_mask_vx_nxv4i32_nxv4i16_i16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
; CHECK: vwsubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
%a = call <vscale x 4 x i32> @llvm.riscv.vwsubu.mask.nxv4i32.nxv4i16.i16(
<vscale x 4 x i32> %0,
<vscale x 4 x i16> %1,
i16 %2,
<vscale x 4 x i1> %3,
i32 %4)
ret <vscale x 4 x i32> %a
}
declare <vscale x 8 x i32> @llvm.riscv.vwsubu.nxv8i32.nxv8i16.i16(
<vscale x 8 x i16>,
i16,
i32);
define <vscale x 8 x i32> @intrinsic_vwsubu_vx_nxv8i32_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, i32 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwsubu_vx_nxv8i32_nxv8i16_i16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
; CHECK: vwsubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
%a = call <vscale x 8 x i32> @llvm.riscv.vwsubu.nxv8i32.nxv8i16.i16(
<vscale x 8 x i16> %0,
i16 %1,
i32 %2)
ret <vscale x 8 x i32> %a
}
declare <vscale x 8 x i32> @llvm.riscv.vwsubu.mask.nxv8i32.nxv8i16.i16(
<vscale x 8 x i32>,
<vscale x 8 x i16>,
i16,
<vscale x 8 x i1>,
i32);
define <vscale x 8 x i32> @intrinsic_vwsubu_mask_vx_nxv8i32_nxv8i16_i16(<vscale x 8 x i32> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwsubu_mask_vx_nxv8i32_nxv8i16_i16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
; CHECK: vwsubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
%a = call <vscale x 8 x i32> @llvm.riscv.vwsubu.mask.nxv8i32.nxv8i16.i16(
<vscale x 8 x i32> %0,
<vscale x 8 x i16> %1,
i16 %2,
<vscale x 8 x i1> %3,
i32 %4)
ret <vscale x 8 x i32> %a
}
declare <vscale x 16 x i32> @llvm.riscv.vwsubu.nxv16i32.nxv16i16.i16(
<vscale x 16 x i16>,
i16,
i32);
define <vscale x 16 x i32> @intrinsic_vwsubu_vx_nxv16i32_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, i32 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwsubu_vx_nxv16i32_nxv16i16_i16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
; CHECK: vwsubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
%a = call <vscale x 16 x i32> @llvm.riscv.vwsubu.nxv16i32.nxv16i16.i16(
<vscale x 16 x i16> %0,
i16 %1,
i32 %2)
ret <vscale x 16 x i32> %a
}
declare <vscale x 16 x i32> @llvm.riscv.vwsubu.mask.nxv16i32.nxv16i16.i16(
<vscale x 16 x i32>,
<vscale x 16 x i16>,
i16,
<vscale x 16 x i1>,
i32);
define <vscale x 16 x i32> @intrinsic_vwsubu_mask_vx_nxv16i32_nxv16i16_i16(<vscale x 16 x i32> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwsubu_mask_vx_nxv16i32_nxv16i16_i16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
; CHECK: vwsubu.vx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
%a = call <vscale x 16 x i32> @llvm.riscv.vwsubu.mask.nxv16i32.nxv16i16.i16(
<vscale x 16 x i32> %0,
<vscale x 16 x i16> %1,
i16 %2,
<vscale x 16 x i1> %3,
i32 %4)
ret <vscale x 16 x i32> %a
}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,881 @@
; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs \
; RUN: --riscv-no-aliases < %s | FileCheck %s
declare <vscale x 1 x i16> @llvm.riscv.vwsubu.w.nxv1i16.nxv1i8(
<vscale x 1 x i16>,
<vscale x 1 x i8>,
i32);
define <vscale x 1 x i16> @intrinsic_vwsubu.w_wv_nxv1i16_nxv1i16_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i8> %1, i32 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwsubu.w_wv_nxv1i16_nxv1i16_nxv1i8
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
; CHECK: vwsubu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
%a = call <vscale x 1 x i16> @llvm.riscv.vwsubu.w.nxv1i16.nxv1i8(
<vscale x 1 x i16> %0,
<vscale x 1 x i8> %1,
i32 %2)
ret <vscale x 1 x i16> %a
}
declare <vscale x 1 x i16> @llvm.riscv.vwsubu.w.mask.nxv1i16.nxv1i8(
<vscale x 1 x i16>,
<vscale x 1 x i16>,
<vscale x 1 x i8>,
<vscale x 1 x i1>,
i32);
define <vscale x 1 x i16> @intrinsic_vwsubu.w_mask_wv_nxv1i16_nxv1i16_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_nxv1i16_nxv1i16_nxv1i8
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
; CHECK: vwsubu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 1 x i16> @llvm.riscv.vwsubu.w.mask.nxv1i16.nxv1i8(
<vscale x 1 x i16> %0,
<vscale x 1 x i16> %1,
<vscale x 1 x i8> %2,
<vscale x 1 x i1> %3,
i32 %4)
ret <vscale x 1 x i16> %a
}
declare <vscale x 2 x i16> @llvm.riscv.vwsubu.w.nxv2i16.nxv2i8(
<vscale x 2 x i16>,
<vscale x 2 x i8>,
i32);
define <vscale x 2 x i16> @intrinsic_vwsubu.w_wv_nxv2i16_nxv2i16_nxv2i8(<vscale x 2 x i16> %0, <vscale x 2 x i8> %1, i32 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwsubu.w_wv_nxv2i16_nxv2i16_nxv2i8
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
; CHECK: vwsubu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
%a = call <vscale x 2 x i16> @llvm.riscv.vwsubu.w.nxv2i16.nxv2i8(
<vscale x 2 x i16> %0,
<vscale x 2 x i8> %1,
i32 %2)
ret <vscale x 2 x i16> %a
}
declare <vscale x 2 x i16> @llvm.riscv.vwsubu.w.mask.nxv2i16.nxv2i8(
<vscale x 2 x i16>,
<vscale x 2 x i16>,
<vscale x 2 x i8>,
<vscale x 2 x i1>,
i32);
define <vscale x 2 x i16> @intrinsic_vwsubu.w_mask_wv_nxv2i16_nxv2i16_nxv2i8(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_nxv2i16_nxv2i16_nxv2i8
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
; CHECK: vwsubu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 2 x i16> @llvm.riscv.vwsubu.w.mask.nxv2i16.nxv2i8(
<vscale x 2 x i16> %0,
<vscale x 2 x i16> %1,
<vscale x 2 x i8> %2,
<vscale x 2 x i1> %3,
i32 %4)
ret <vscale x 2 x i16> %a
}
declare <vscale x 4 x i16> @llvm.riscv.vwsubu.w.nxv4i16.nxv4i8(
<vscale x 4 x i16>,
<vscale x 4 x i8>,
i32);
define <vscale x 4 x i16> @intrinsic_vwsubu.w_wv_nxv4i16_nxv4i16_nxv4i8(<vscale x 4 x i16> %0, <vscale x 4 x i8> %1, i32 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwsubu.w_wv_nxv4i16_nxv4i16_nxv4i8
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
; CHECK: vwsubu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
%a = call <vscale x 4 x i16> @llvm.riscv.vwsubu.w.nxv4i16.nxv4i8(
<vscale x 4 x i16> %0,
<vscale x 4 x i8> %1,
i32 %2)
ret <vscale x 4 x i16> %a
}
declare <vscale x 4 x i16> @llvm.riscv.vwsubu.w.mask.nxv4i16.nxv4i8(
<vscale x 4 x i16>,
<vscale x 4 x i16>,
<vscale x 4 x i8>,
<vscale x 4 x i1>,
i32);
define <vscale x 4 x i16> @intrinsic_vwsubu.w_mask_wv_nxv4i16_nxv4i16_nxv4i8(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_nxv4i16_nxv4i16_nxv4i8
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
; CHECK: vwsubu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 4 x i16> @llvm.riscv.vwsubu.w.mask.nxv4i16.nxv4i8(
<vscale x 4 x i16> %0,
<vscale x 4 x i16> %1,
<vscale x 4 x i8> %2,
<vscale x 4 x i1> %3,
i32 %4)
ret <vscale x 4 x i16> %a
}
declare <vscale x 8 x i16> @llvm.riscv.vwsubu.w.nxv8i16.nxv8i8(
<vscale x 8 x i16>,
<vscale x 8 x i8>,
i32);
define <vscale x 8 x i16> @intrinsic_vwsubu.w_wv_nxv8i16_nxv8i16_nxv8i8(<vscale x 8 x i16> %0, <vscale x 8 x i8> %1, i32 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwsubu.w_wv_nxv8i16_nxv8i16_nxv8i8
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
; CHECK: vwsubu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
%a = call <vscale x 8 x i16> @llvm.riscv.vwsubu.w.nxv8i16.nxv8i8(
<vscale x 8 x i16> %0,
<vscale x 8 x i8> %1,
i32 %2)
ret <vscale x 8 x i16> %a
}
declare <vscale x 8 x i16> @llvm.riscv.vwsubu.w.mask.nxv8i16.nxv8i8(
<vscale x 8 x i16>,
<vscale x 8 x i16>,
<vscale x 8 x i8>,
<vscale x 8 x i1>,
i32);
define <vscale x 8 x i16> @intrinsic_vwsubu.w_mask_wv_nxv8i16_nxv8i16_nxv8i8(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_nxv8i16_nxv8i16_nxv8i8
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
; CHECK: vwsubu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 8 x i16> @llvm.riscv.vwsubu.w.mask.nxv8i16.nxv8i8(
<vscale x 8 x i16> %0,
<vscale x 8 x i16> %1,
<vscale x 8 x i8> %2,
<vscale x 8 x i1> %3,
i32 %4)
ret <vscale x 8 x i16> %a
}
declare <vscale x 16 x i16> @llvm.riscv.vwsubu.w.nxv16i16.nxv16i8(
<vscale x 16 x i16>,
<vscale x 16 x i8>,
i32);
define <vscale x 16 x i16> @intrinsic_vwsubu.w_wv_nxv16i16_nxv16i16_nxv16i8(<vscale x 16 x i16> %0, <vscale x 16 x i8> %1, i32 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwsubu.w_wv_nxv16i16_nxv16i16_nxv16i8
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
; CHECK: vwsubu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
%a = call <vscale x 16 x i16> @llvm.riscv.vwsubu.w.nxv16i16.nxv16i8(
<vscale x 16 x i16> %0,
<vscale x 16 x i8> %1,
i32 %2)
ret <vscale x 16 x i16> %a
}
declare <vscale x 16 x i16> @llvm.riscv.vwsubu.w.mask.nxv16i16.nxv16i8(
<vscale x 16 x i16>,
<vscale x 16 x i16>,
<vscale x 16 x i8>,
<vscale x 16 x i1>,
i32);
define <vscale x 16 x i16> @intrinsic_vwsubu.w_mask_wv_nxv16i16_nxv16i16_nxv16i8(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_nxv16i16_nxv16i16_nxv16i8
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
; CHECK: vwsubu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 16 x i16> @llvm.riscv.vwsubu.w.mask.nxv16i16.nxv16i8(
<vscale x 16 x i16> %0,
<vscale x 16 x i16> %1,
<vscale x 16 x i8> %2,
<vscale x 16 x i1> %3,
i32 %4)
ret <vscale x 16 x i16> %a
}
declare <vscale x 32 x i16> @llvm.riscv.vwsubu.w.nxv32i16.nxv32i8(
<vscale x 32 x i16>,
<vscale x 32 x i8>,
i32);
define <vscale x 32 x i16> @intrinsic_vwsubu.w_wv_nxv32i16_nxv32i16_nxv32i8(<vscale x 32 x i16> %0, <vscale x 32 x i8> %1, i32 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwsubu.w_wv_nxv32i16_nxv32i16_nxv32i8
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
; CHECK: vwsubu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
%a = call <vscale x 32 x i16> @llvm.riscv.vwsubu.w.nxv32i16.nxv32i8(
<vscale x 32 x i16> %0,
<vscale x 32 x i8> %1,
i32 %2)
ret <vscale x 32 x i16> %a
}
declare <vscale x 32 x i16> @llvm.riscv.vwsubu.w.mask.nxv32i16.nxv32i8(
<vscale x 32 x i16>,
<vscale x 32 x i16>,
<vscale x 32 x i8>,
<vscale x 32 x i1>,
i32);
define <vscale x 32 x i16> @intrinsic_vwsubu.w_mask_wv_nxv32i16_nxv32i16_nxv32i8(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_nxv32i16_nxv32i16_nxv32i8
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
; CHECK: vwsubu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 32 x i16> @llvm.riscv.vwsubu.w.mask.nxv32i16.nxv32i8(
<vscale x 32 x i16> %0,
<vscale x 32 x i16> %1,
<vscale x 32 x i8> %2,
<vscale x 32 x i1> %3,
i32 %4)
ret <vscale x 32 x i16> %a
}
declare <vscale x 1 x i32> @llvm.riscv.vwsubu.w.nxv1i32.nxv1i16(
<vscale x 1 x i32>,
<vscale x 1 x i16>,
i32);
define <vscale x 1 x i32> @intrinsic_vwsubu.w_wv_nxv1i32_nxv1i32_nxv1i16(<vscale x 1 x i32> %0, <vscale x 1 x i16> %1, i32 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwsubu.w_wv_nxv1i32_nxv1i32_nxv1i16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
; CHECK: vwsubu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
%a = call <vscale x 1 x i32> @llvm.riscv.vwsubu.w.nxv1i32.nxv1i16(
<vscale x 1 x i32> %0,
<vscale x 1 x i16> %1,
i32 %2)
ret <vscale x 1 x i32> %a
}
declare <vscale x 1 x i32> @llvm.riscv.vwsubu.w.mask.nxv1i32.nxv1i16(
<vscale x 1 x i32>,
<vscale x 1 x i32>,
<vscale x 1 x i16>,
<vscale x 1 x i1>,
i32);
define <vscale x 1 x i32> @intrinsic_vwsubu.w_mask_wv_nxv1i32_nxv1i32_nxv1i16(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_nxv1i32_nxv1i32_nxv1i16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
; CHECK: vwsubu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 1 x i32> @llvm.riscv.vwsubu.w.mask.nxv1i32.nxv1i16(
<vscale x 1 x i32> %0,
<vscale x 1 x i32> %1,
<vscale x 1 x i16> %2,
<vscale x 1 x i1> %3,
i32 %4)
ret <vscale x 1 x i32> %a
}
declare <vscale x 2 x i32> @llvm.riscv.vwsubu.w.nxv2i32.nxv2i16(
<vscale x 2 x i32>,
<vscale x 2 x i16>,
i32);
define <vscale x 2 x i32> @intrinsic_vwsubu.w_wv_nxv2i32_nxv2i32_nxv2i16(<vscale x 2 x i32> %0, <vscale x 2 x i16> %1, i32 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwsubu.w_wv_nxv2i32_nxv2i32_nxv2i16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
; CHECK: vwsubu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
%a = call <vscale x 2 x i32> @llvm.riscv.vwsubu.w.nxv2i32.nxv2i16(
<vscale x 2 x i32> %0,
<vscale x 2 x i16> %1,
i32 %2)
ret <vscale x 2 x i32> %a
}
declare <vscale x 2 x i32> @llvm.riscv.vwsubu.w.mask.nxv2i32.nxv2i16(
<vscale x 2 x i32>,
<vscale x 2 x i32>,
<vscale x 2 x i16>,
<vscale x 2 x i1>,
i32);
define <vscale x 2 x i32> @intrinsic_vwsubu.w_mask_wv_nxv2i32_nxv2i32_nxv2i16(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_nxv2i32_nxv2i32_nxv2i16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
; CHECK: vwsubu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 2 x i32> @llvm.riscv.vwsubu.w.mask.nxv2i32.nxv2i16(
<vscale x 2 x i32> %0,
<vscale x 2 x i32> %1,
<vscale x 2 x i16> %2,
<vscale x 2 x i1> %3,
i32 %4)
ret <vscale x 2 x i32> %a
}
declare <vscale x 4 x i32> @llvm.riscv.vwsubu.w.nxv4i32.nxv4i16(
<vscale x 4 x i32>,
<vscale x 4 x i16>,
i32);
define <vscale x 4 x i32> @intrinsic_vwsubu.w_wv_nxv4i32_nxv4i32_nxv4i16(<vscale x 4 x i32> %0, <vscale x 4 x i16> %1, i32 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwsubu.w_wv_nxv4i32_nxv4i32_nxv4i16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
; CHECK: vwsubu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
%a = call <vscale x 4 x i32> @llvm.riscv.vwsubu.w.nxv4i32.nxv4i16(
<vscale x 4 x i32> %0,
<vscale x 4 x i16> %1,
i32 %2)
ret <vscale x 4 x i32> %a
}
declare <vscale x 4 x i32> @llvm.riscv.vwsubu.w.mask.nxv4i32.nxv4i16(
<vscale x 4 x i32>,
<vscale x 4 x i32>,
<vscale x 4 x i16>,
<vscale x 4 x i1>,
i32);
define <vscale x 4 x i32> @intrinsic_vwsubu.w_mask_wv_nxv4i32_nxv4i32_nxv4i16(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_nxv4i32_nxv4i32_nxv4i16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
; CHECK: vwsubu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 4 x i32> @llvm.riscv.vwsubu.w.mask.nxv4i32.nxv4i16(
<vscale x 4 x i32> %0,
<vscale x 4 x i32> %1,
<vscale x 4 x i16> %2,
<vscale x 4 x i1> %3,
i32 %4)
ret <vscale x 4 x i32> %a
}
declare <vscale x 8 x i32> @llvm.riscv.vwsubu.w.nxv8i32.nxv8i16(
<vscale x 8 x i32>,
<vscale x 8 x i16>,
i32);
define <vscale x 8 x i32> @intrinsic_vwsubu.w_wv_nxv8i32_nxv8i32_nxv8i16(<vscale x 8 x i32> %0, <vscale x 8 x i16> %1, i32 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwsubu.w_wv_nxv8i32_nxv8i32_nxv8i16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
; CHECK: vwsubu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
%a = call <vscale x 8 x i32> @llvm.riscv.vwsubu.w.nxv8i32.nxv8i16(
<vscale x 8 x i32> %0,
<vscale x 8 x i16> %1,
i32 %2)
ret <vscale x 8 x i32> %a
}
declare <vscale x 8 x i32> @llvm.riscv.vwsubu.w.mask.nxv8i32.nxv8i16(
<vscale x 8 x i32>,
<vscale x 8 x i32>,
<vscale x 8 x i16>,
<vscale x 8 x i1>,
i32);
define <vscale x 8 x i32> @intrinsic_vwsubu.w_mask_wv_nxv8i32_nxv8i32_nxv8i16(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_nxv8i32_nxv8i32_nxv8i16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
; CHECK: vwsubu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 8 x i32> @llvm.riscv.vwsubu.w.mask.nxv8i32.nxv8i16(
<vscale x 8 x i32> %0,
<vscale x 8 x i32> %1,
<vscale x 8 x i16> %2,
<vscale x 8 x i1> %3,
i32 %4)
ret <vscale x 8 x i32> %a
}
declare <vscale x 16 x i32> @llvm.riscv.vwsubu.w.nxv16i32.nxv16i16(
<vscale x 16 x i32>,
<vscale x 16 x i16>,
i32);
define <vscale x 16 x i32> @intrinsic_vwsubu.w_wv_nxv16i32_nxv16i32_nxv16i16(<vscale x 16 x i32> %0, <vscale x 16 x i16> %1, i32 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwsubu.w_wv_nxv16i32_nxv16i32_nxv16i16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
; CHECK: vwsubu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}
%a = call <vscale x 16 x i32> @llvm.riscv.vwsubu.w.nxv16i32.nxv16i16(
<vscale x 16 x i32> %0,
<vscale x 16 x i16> %1,
i32 %2)
ret <vscale x 16 x i32> %a
}
declare <vscale x 16 x i32> @llvm.riscv.vwsubu.w.mask.nxv16i32.nxv16i16(
<vscale x 16 x i32>,
<vscale x 16 x i32>,
<vscale x 16 x i16>,
<vscale x 16 x i1>,
i32);
define <vscale x 16 x i32> @intrinsic_vwsubu.w_mask_wv_nxv16i32_nxv16i32_nxv16i16(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwsubu.w_mask_wv_nxv16i32_nxv16i32_nxv16i16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
; CHECK: vwsubu.wv {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, v0.t
%a = call <vscale x 16 x i32> @llvm.riscv.vwsubu.w.mask.nxv16i32.nxv16i16(
<vscale x 16 x i32> %0,
<vscale x 16 x i32> %1,
<vscale x 16 x i16> %2,
<vscale x 16 x i1> %3,
i32 %4)
ret <vscale x 16 x i32> %a
}
declare <vscale x 1 x i16> @llvm.riscv.vwsubu.w.nxv1i16.i8(
<vscale x 1 x i16>,
i8,
i32);
define <vscale x 1 x i16> @intrinsic_vwsubu.w_wx_nxv1i16_nxv1i16_i8(<vscale x 1 x i16> %0, i8 %1, i32 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwsubu.w_wx_nxv1i16_nxv1i16_i8
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
; CHECK: vwsubu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
%a = call <vscale x 1 x i16> @llvm.riscv.vwsubu.w.nxv1i16.i8(
<vscale x 1 x i16> %0,
i8 %1,
i32 %2)
ret <vscale x 1 x i16> %a
}
declare <vscale x 1 x i16> @llvm.riscv.vwsubu.w.mask.nxv1i16.i8(
<vscale x 1 x i16>,
<vscale x 1 x i16>,
i8,
<vscale x 1 x i1>,
i32);
define <vscale x 1 x i16> @intrinsic_vwsubu.w_mask_wx_nxv1i16_nxv1i16_i8(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i8 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwsubu.w_mask_wx_nxv1i16_nxv1i16_i8
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu
; CHECK: vwsubu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
%a = call <vscale x 1 x i16> @llvm.riscv.vwsubu.w.mask.nxv1i16.i8(
<vscale x 1 x i16> %0,
<vscale x 1 x i16> %1,
i8 %2,
<vscale x 1 x i1> %3,
i32 %4)
ret <vscale x 1 x i16> %a
}
declare <vscale x 2 x i16> @llvm.riscv.vwsubu.w.nxv2i16.i8(
<vscale x 2 x i16>,
i8,
i32);
define <vscale x 2 x i16> @intrinsic_vwsubu.w_wx_nxv2i16_nxv2i16_i8(<vscale x 2 x i16> %0, i8 %1, i32 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwsubu.w_wx_nxv2i16_nxv2i16_i8
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
; CHECK: vwsubu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
%a = call <vscale x 2 x i16> @llvm.riscv.vwsubu.w.nxv2i16.i8(
<vscale x 2 x i16> %0,
i8 %1,
i32 %2)
ret <vscale x 2 x i16> %a
}
declare <vscale x 2 x i16> @llvm.riscv.vwsubu.w.mask.nxv2i16.i8(
<vscale x 2 x i16>,
<vscale x 2 x i16>,
i8,
<vscale x 2 x i1>,
i32);
define <vscale x 2 x i16> @intrinsic_vwsubu.w_mask_wx_nxv2i16_nxv2i16_i8(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i8 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwsubu.w_mask_wx_nxv2i16_nxv2i16_i8
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu
; CHECK: vwsubu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
%a = call <vscale x 2 x i16> @llvm.riscv.vwsubu.w.mask.nxv2i16.i8(
<vscale x 2 x i16> %0,
<vscale x 2 x i16> %1,
i8 %2,
<vscale x 2 x i1> %3,
i32 %4)
ret <vscale x 2 x i16> %a
}
declare <vscale x 4 x i16> @llvm.riscv.vwsubu.w.nxv4i16.i8(
<vscale x 4 x i16>,
i8,
i32);
define <vscale x 4 x i16> @intrinsic_vwsubu.w_wx_nxv4i16_nxv4i16_i8(<vscale x 4 x i16> %0, i8 %1, i32 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwsubu.w_wx_nxv4i16_nxv4i16_i8
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
; CHECK: vwsubu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
%a = call <vscale x 4 x i16> @llvm.riscv.vwsubu.w.nxv4i16.i8(
<vscale x 4 x i16> %0,
i8 %1,
i32 %2)
ret <vscale x 4 x i16> %a
}
declare <vscale x 4 x i16> @llvm.riscv.vwsubu.w.mask.nxv4i16.i8(
<vscale x 4 x i16>,
<vscale x 4 x i16>,
i8,
<vscale x 4 x i1>,
i32);
define <vscale x 4 x i16> @intrinsic_vwsubu.w_mask_wx_nxv4i16_nxv4i16_i8(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i8 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwsubu.w_mask_wx_nxv4i16_nxv4i16_i8
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu
; CHECK: vwsubu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
%a = call <vscale x 4 x i16> @llvm.riscv.vwsubu.w.mask.nxv4i16.i8(
<vscale x 4 x i16> %0,
<vscale x 4 x i16> %1,
i8 %2,
<vscale x 4 x i1> %3,
i32 %4)
ret <vscale x 4 x i16> %a
}
declare <vscale x 8 x i16> @llvm.riscv.vwsubu.w.nxv8i16.i8(
<vscale x 8 x i16>,
i8,
i32);
define <vscale x 8 x i16> @intrinsic_vwsubu.w_wx_nxv8i16_nxv8i16_i8(<vscale x 8 x i16> %0, i8 %1, i32 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwsubu.w_wx_nxv8i16_nxv8i16_i8
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
; CHECK: vwsubu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
%a = call <vscale x 8 x i16> @llvm.riscv.vwsubu.w.nxv8i16.i8(
<vscale x 8 x i16> %0,
i8 %1,
i32 %2)
ret <vscale x 8 x i16> %a
}
declare <vscale x 8 x i16> @llvm.riscv.vwsubu.w.mask.nxv8i16.i8(
<vscale x 8 x i16>,
<vscale x 8 x i16>,
i8,
<vscale x 8 x i1>,
i32);
define <vscale x 8 x i16> @intrinsic_vwsubu.w_mask_wx_nxv8i16_nxv8i16_i8(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i8 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwsubu.w_mask_wx_nxv8i16_nxv8i16_i8
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu
; CHECK: vwsubu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
%a = call <vscale x 8 x i16> @llvm.riscv.vwsubu.w.mask.nxv8i16.i8(
<vscale x 8 x i16> %0,
<vscale x 8 x i16> %1,
i8 %2,
<vscale x 8 x i1> %3,
i32 %4)
ret <vscale x 8 x i16> %a
}
declare <vscale x 16 x i16> @llvm.riscv.vwsubu.w.nxv16i16.i8(
<vscale x 16 x i16>,
i8,
i32);
define <vscale x 16 x i16> @intrinsic_vwsubu.w_wx_nxv16i16_nxv16i16_i8(<vscale x 16 x i16> %0, i8 %1, i32 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwsubu.w_wx_nxv16i16_nxv16i16_i8
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
; CHECK: vwsubu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
%a = call <vscale x 16 x i16> @llvm.riscv.vwsubu.w.nxv16i16.i8(
<vscale x 16 x i16> %0,
i8 %1,
i32 %2)
ret <vscale x 16 x i16> %a
}
declare <vscale x 16 x i16> @llvm.riscv.vwsubu.w.mask.nxv16i16.i8(
<vscale x 16 x i16>,
<vscale x 16 x i16>,
i8,
<vscale x 16 x i1>,
i32);
define <vscale x 16 x i16> @intrinsic_vwsubu.w_mask_wx_nxv16i16_nxv16i16_i8(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i8 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwsubu.w_mask_wx_nxv16i16_nxv16i16_i8
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu
; CHECK: vwsubu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
%a = call <vscale x 16 x i16> @llvm.riscv.vwsubu.w.mask.nxv16i16.i8(
<vscale x 16 x i16> %0,
<vscale x 16 x i16> %1,
i8 %2,
<vscale x 16 x i1> %3,
i32 %4)
ret <vscale x 16 x i16> %a
}
declare <vscale x 32 x i16> @llvm.riscv.vwsubu.w.nxv32i16.i8(
<vscale x 32 x i16>,
i8,
i32);
define <vscale x 32 x i16> @intrinsic_vwsubu.w_wx_nxv32i16_nxv32i16_i8(<vscale x 32 x i16> %0, i8 %1, i32 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwsubu.w_wx_nxv32i16_nxv32i16_i8
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
; CHECK: vwsubu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
%a = call <vscale x 32 x i16> @llvm.riscv.vwsubu.w.nxv32i16.i8(
<vscale x 32 x i16> %0,
i8 %1,
i32 %2)
ret <vscale x 32 x i16> %a
}
declare <vscale x 32 x i16> @llvm.riscv.vwsubu.w.mask.nxv32i16.i8(
<vscale x 32 x i16>,
<vscale x 32 x i16>,
i8,
<vscale x 32 x i1>,
i32);
define <vscale x 32 x i16> @intrinsic_vwsubu.w_mask_wx_nxv32i16_nxv32i16_i8(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i8 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwsubu.w_mask_wx_nxv32i16_nxv32i16_i8
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu
; CHECK: vwsubu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
%a = call <vscale x 32 x i16> @llvm.riscv.vwsubu.w.mask.nxv32i16.i8(
<vscale x 32 x i16> %0,
<vscale x 32 x i16> %1,
i8 %2,
<vscale x 32 x i1> %3,
i32 %4)
ret <vscale x 32 x i16> %a
}
declare <vscale x 1 x i32> @llvm.riscv.vwsubu.w.nxv1i32.i16(
<vscale x 1 x i32>,
i16,
i32);
define <vscale x 1 x i32> @intrinsic_vwsubu.w_wx_nxv1i32_nxv1i32_i16(<vscale x 1 x i32> %0, i16 %1, i32 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwsubu.w_wx_nxv1i32_nxv1i32_i16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
; CHECK: vwsubu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
%a = call <vscale x 1 x i32> @llvm.riscv.vwsubu.w.nxv1i32.i16(
<vscale x 1 x i32> %0,
i16 %1,
i32 %2)
ret <vscale x 1 x i32> %a
}
declare <vscale x 1 x i32> @llvm.riscv.vwsubu.w.mask.nxv1i32.i16(
<vscale x 1 x i32>,
<vscale x 1 x i32>,
i16,
<vscale x 1 x i1>,
i32);
define <vscale x 1 x i32> @intrinsic_vwsubu.w_mask_wx_nxv1i32_nxv1i32_i16(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i16 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwsubu.w_mask_wx_nxv1i32_nxv1i32_i16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu
; CHECK: vwsubu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
%a = call <vscale x 1 x i32> @llvm.riscv.vwsubu.w.mask.nxv1i32.i16(
<vscale x 1 x i32> %0,
<vscale x 1 x i32> %1,
i16 %2,
<vscale x 1 x i1> %3,
i32 %4)
ret <vscale x 1 x i32> %a
}
declare <vscale x 2 x i32> @llvm.riscv.vwsubu.w.nxv2i32.i16(
<vscale x 2 x i32>,
i16,
i32);
define <vscale x 2 x i32> @intrinsic_vwsubu.w_wx_nxv2i32_nxv2i32_i16(<vscale x 2 x i32> %0, i16 %1, i32 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwsubu.w_wx_nxv2i32_nxv2i32_i16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
; CHECK: vwsubu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
%a = call <vscale x 2 x i32> @llvm.riscv.vwsubu.w.nxv2i32.i16(
<vscale x 2 x i32> %0,
i16 %1,
i32 %2)
ret <vscale x 2 x i32> %a
}
declare <vscale x 2 x i32> @llvm.riscv.vwsubu.w.mask.nxv2i32.i16(
<vscale x 2 x i32>,
<vscale x 2 x i32>,
i16,
<vscale x 2 x i1>,
i32);
define <vscale x 2 x i32> @intrinsic_vwsubu.w_mask_wx_nxv2i32_nxv2i32_i16(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i16 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwsubu.w_mask_wx_nxv2i32_nxv2i32_i16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu
; CHECK: vwsubu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
%a = call <vscale x 2 x i32> @llvm.riscv.vwsubu.w.mask.nxv2i32.i16(
<vscale x 2 x i32> %0,
<vscale x 2 x i32> %1,
i16 %2,
<vscale x 2 x i1> %3,
i32 %4)
ret <vscale x 2 x i32> %a
}
declare <vscale x 4 x i32> @llvm.riscv.vwsubu.w.nxv4i32.i16(
<vscale x 4 x i32>,
i16,
i32);
define <vscale x 4 x i32> @intrinsic_vwsubu.w_wx_nxv4i32_nxv4i32_i16(<vscale x 4 x i32> %0, i16 %1, i32 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwsubu.w_wx_nxv4i32_nxv4i32_i16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
; CHECK: vwsubu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
%a = call <vscale x 4 x i32> @llvm.riscv.vwsubu.w.nxv4i32.i16(
<vscale x 4 x i32> %0,
i16 %1,
i32 %2)
ret <vscale x 4 x i32> %a
}
declare <vscale x 4 x i32> @llvm.riscv.vwsubu.w.mask.nxv4i32.i16(
<vscale x 4 x i32>,
<vscale x 4 x i32>,
i16,
<vscale x 4 x i1>,
i32);
define <vscale x 4 x i32> @intrinsic_vwsubu.w_mask_wx_nxv4i32_nxv4i32_i16(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i16 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwsubu.w_mask_wx_nxv4i32_nxv4i32_i16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu
; CHECK: vwsubu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
%a = call <vscale x 4 x i32> @llvm.riscv.vwsubu.w.mask.nxv4i32.i16(
<vscale x 4 x i32> %0,
<vscale x 4 x i32> %1,
i16 %2,
<vscale x 4 x i1> %3,
i32 %4)
ret <vscale x 4 x i32> %a
}
declare <vscale x 8 x i32> @llvm.riscv.vwsubu.w.nxv8i32.i16(
<vscale x 8 x i32>,
i16,
i32);
define <vscale x 8 x i32> @intrinsic_vwsubu.w_wx_nxv8i32_nxv8i32_i16(<vscale x 8 x i32> %0, i16 %1, i32 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwsubu.w_wx_nxv8i32_nxv8i32_i16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
; CHECK: vwsubu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
%a = call <vscale x 8 x i32> @llvm.riscv.vwsubu.w.nxv8i32.i16(
<vscale x 8 x i32> %0,
i16 %1,
i32 %2)
ret <vscale x 8 x i32> %a
}
declare <vscale x 8 x i32> @llvm.riscv.vwsubu.w.mask.nxv8i32.i16(
<vscale x 8 x i32>,
<vscale x 8 x i32>,
i16,
<vscale x 8 x i1>,
i32);
define <vscale x 8 x i32> @intrinsic_vwsubu.w_mask_wx_nxv8i32_nxv8i32_i16(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i16 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwsubu.w_mask_wx_nxv8i32_nxv8i32_i16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu
; CHECK: vwsubu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
%a = call <vscale x 8 x i32> @llvm.riscv.vwsubu.w.mask.nxv8i32.i16(
<vscale x 8 x i32> %0,
<vscale x 8 x i32> %1,
i16 %2,
<vscale x 8 x i1> %3,
i32 %4)
ret <vscale x 8 x i32> %a
}
declare <vscale x 16 x i32> @llvm.riscv.vwsubu.w.nxv16i32.i16(
<vscale x 16 x i32>,
i16,
i32);
define <vscale x 16 x i32> @intrinsic_vwsubu.w_wx_nxv16i32_nxv16i32_i16(<vscale x 16 x i32> %0, i16 %1, i32 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwsubu.w_wx_nxv16i32_nxv16i32_i16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
; CHECK: vwsubu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}
%a = call <vscale x 16 x i32> @llvm.riscv.vwsubu.w.nxv16i32.i16(
<vscale x 16 x i32> %0,
i16 %1,
i32 %2)
ret <vscale x 16 x i32> %a
}
declare <vscale x 16 x i32> @llvm.riscv.vwsubu.w.mask.nxv16i32.i16(
<vscale x 16 x i32>,
<vscale x 16 x i32>,
i16,
<vscale x 16 x i1>,
i32);
define <vscale x 16 x i32> @intrinsic_vwsubu.w_mask_wx_nxv16i32_nxv16i32_i16(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i16 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
entry:
; CHECK-LABEL: intrinsic_vwsubu.w_mask_wx_nxv16i32_nxv16i32_i16
; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu
; CHECK: vwsubu.wx {{v[0-9]+}}, {{v[0-9]+}}, {{a[0-9]+}}, v0.t
%a = call <vscale x 16 x i32> @llvm.riscv.vwsubu.w.mask.nxv16i32.i16(
<vscale x 16 x i32> %0,
<vscale x 16 x i32> %1,
i16 %2,
<vscale x 16 x i1> %3,
i32 %4)
ret <vscale x 16 x i32> %a
}

File diff suppressed because it is too large Load Diff