[RISCV] Define vpopc/vfirst intrinsics.

Define vpopc/vfirst intrinsics and lower to V instructions.

We work with @rogfer01 from BSC to come out this patch.

Reviewed By: craig.topper

Differential Revision: https://reviews.llvm.org/D93795
This commit is contained in:
Zakk Chen 2020-12-23 07:42:36 -08:00
parent d6ff5cf995
commit da4a637e99
6 changed files with 1077 additions and 9 deletions

View File

@ -375,6 +375,20 @@ let TargetPrefix = "riscv" in {
[LLVMMatchType<0>, llvm_anyvector_ty, LLVMMatchType<0>,
LLVMScalarOrSameVectorWidth<1, llvm_i1_ty>, llvm_anyint_ty],
[IntrNoMem]>, RISCVVIntrinsic;
// For unary operations with scalar type output without mask
// Output: (scalar type)
// Input: (vector_in, vl)
class RISCVMaskUnarySOutNoMask
: Intrinsic<[llvm_anyint_ty],
[llvm_anyvector_ty, LLVMMatchType<0>],
[IntrNoMem]>, RISCVVIntrinsic;
// For unary operations with scalar type output with mask
// Output: (scalar type)
// Input: (vector_in, mask, vl)
class RISCVMaskUnarySOutMask
: Intrinsic<[llvm_anyint_ty],
[llvm_anyvector_ty, LLVMMatchType<1>, LLVMMatchType<0>],
[IntrNoMem]>, RISCVVIntrinsic;
multiclass RISCVUSLoad {
def "int_riscv_" # NAME : RISCVUSLoad;
@ -451,6 +465,10 @@ let TargetPrefix = "riscv" in {
def "int_riscv_" # NAME : RISCVReductionNoMask;
def "int_riscv_" # NAME # "_mask" : RISCVReductionMask;
}
multiclass RISCVMaskUnarySOut {
def "int_riscv_" # NAME : RISCVMaskUnarySOutNoMask;
def "int_riscv_" # NAME # "_mask" : RISCVMaskUnarySOutMask;
}
defm vle : RISCVUSLoad;
defm vleff : RISCVUSLoad;
@ -658,4 +676,8 @@ let TargetPrefix = "riscv" in {
def int_riscv_vmnor: RISCVBinaryAAANoMask;
def int_riscv_vmornot: RISCVBinaryAAANoMask;
def int_riscv_vmxnor: RISCVBinaryAAANoMask;
defm vpopc : RISCVMaskUnarySOut;
defm vfirst : RISCVMaskUnarySOut;
} // TargetPrefix = "riscv"

View File

@ -188,23 +188,24 @@ class GetIntVTypeInfo<VTypeInfo vti>
VTypeInfo Vti = !cast<VTypeInfo>(!subst("VF", "VI", !cast<string>(vti)));
}
class MTypeInfo<ValueType Mas, LMULInfo M> {
class MTypeInfo<ValueType Mas, LMULInfo M, string Bx> {
ValueType Mask = Mas;
// {SEW, VLMul} values set a valid VType to deal with this mask type.
// we assume SEW=8 and set corresponding LMUL.
int SEW = 8;
LMULInfo LMul = M;
string BX = Bx; // Appendix of mask operations.
}
defset list<MTypeInfo> AllMasks = {
// vbool<n>_t, <n> = SEW/LMUL, we assume SEW=8 and corresponding LMUL.
def : MTypeInfo<vbool64_t, V_MF8>;
def : MTypeInfo<vbool32_t, V_MF4>;
def : MTypeInfo<vbool16_t, V_MF2>;
def : MTypeInfo<vbool8_t, V_M1>;
def : MTypeInfo<vbool4_t, V_M2>;
def : MTypeInfo<vbool2_t, V_M4>;
def : MTypeInfo<vbool1_t, V_M8>;
def : MTypeInfo<vbool64_t, V_MF8, "B1">;
def : MTypeInfo<vbool32_t, V_MF4, "B2">;
def : MTypeInfo<vbool16_t, V_MF2, "B4">;
def : MTypeInfo<vbool8_t, V_M1, "B8">;
def : MTypeInfo<vbool4_t, V_M2, "B16">;
def : MTypeInfo<vbool2_t, V_M4, "B32">;
def : MTypeInfo<vbool1_t, V_M8, "B64">;
}
class VTypeInfoToWide<VTypeInfo vti, VTypeInfo wti>
@ -294,8 +295,15 @@ class PseudoToVInst<string PseudoInst> {
!subst("_MF2", "",
!subst("_MF4", "",
!subst("_MF8", "",
!subst("_B1", "",
!subst("_B2", "",
!subst("_B4", "",
!subst("_B8", "",
!subst("_B16", "",
!subst("_B32", "",
!subst("_B64", "",
!subst("_MASK", "",
!subst("Pseudo", "", PseudoInst)))))))));
!subst("Pseudo", "", PseudoInst))))))))))))))));
}
// The destination vector register group for a masked vector instruction cannot
@ -499,6 +507,36 @@ class VPseudoUnaryNoDummyMask<VReg RetClass,
let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
}
class VMaskPseudoUnarySOutNoMask:
Pseudo<(outs GPR:$rd),
(ins VR:$rs1, GPR:$vl, ixlenimm:$sew), []>,
RISCVVPseudo {
let mayLoad = 0;
let mayStore = 0;
let hasSideEffects = 0;
let usesCustomInserter = 1;
let Uses = [VL, VTYPE];
let VLIndex = 2;
let SEWIndex = 3;
let HasDummyMask = 1;
let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
}
class VMaskPseudoUnarySOutMask:
Pseudo<(outs GPR:$rd),
(ins VR:$rs1, VMaskOp:$vm, GPR:$vl, ixlenimm:$sew), []>,
RISCVVPseudo {
let mayLoad = 0;
let mayStore = 0;
let hasSideEffects = 0;
let usesCustomInserter = 1;
let Uses = [VL, VTYPE];
let VLIndex = 3;
let SEWIndex = 4;
let HasDummyMask = 1;
let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
}
class VPseudoBinaryNoMask<VReg RetClass,
VReg Op1Class,
DAGOperand Op2Class,
@ -687,6 +725,16 @@ multiclass VPseudoIStore {
}
}
multiclass VMaskPseudoUnarySOut {
foreach mti = AllMasks in
{
let VLMul = mti.LMul.value in {
def "_M_" # mti.BX : VMaskPseudoUnarySOutNoMask;
def "_M_" # mti.BX # "_MASK" : VMaskPseudoUnarySOutMask;
}
}
}
multiclass VPseudoBinary<VReg RetClass,
VReg Op1Class,
DAGOperand Op2Class,
@ -1211,6 +1259,22 @@ multiclass VPatIStore<string intrinsic,
(PseudoMask $rs3, $rs1, $rs2, (mask_type V0), (NoX0 GPR:$vl), sew)>;
}
multiclass VPatMaskUnarySOut<string intrinsic_name,
string inst>
{
foreach mti = AllMasks in {
def : Pat<(XLenVT (!cast<Intrinsic>(intrinsic_name)
(mti.Mask VR:$rs1), GPR:$vl)),
(!cast<Instruction>(inst#"_M_"#mti.BX) $rs1,
(NoX0 GPR:$vl), mti.SEW)>;
def : Pat<(XLenVT (!cast<Intrinsic>(intrinsic_name # "_mask")
(mti.Mask VR:$rs1), (mti.Mask V0), GPR:$vl)),
(!cast<Instruction>(inst#"_M_"#mti.BX#"_MASK") $rs1,
(mti.Mask V0), (NoX0 GPR:$vl), mti.SEW)>;
}
}
multiclass VPatBinary<string intrinsic,
string inst,
string kind,
@ -2103,6 +2167,18 @@ defm PseudoVMNOR: VPseudoBinaryM_MM;
defm PseudoVMORNOT: VPseudoBinaryM_MM;
defm PseudoVMXNOR: VPseudoBinaryM_MM;
//===----------------------------------------------------------------------===//
// 16.2. Vector mask population count vpopc
//===----------------------------------------------------------------------===//
defm PseudoVPOPC: VMaskPseudoUnarySOut;
//===----------------------------------------------------------------------===//
// 16.3. vfirst find-first-set mask bit
//===----------------------------------------------------------------------===//
defm PseudoVFIRST: VMaskPseudoUnarySOut;
//===----------------------------------------------------------------------===//
// 17. Vector Permutation Instructions
//===----------------------------------------------------------------------===//
@ -2584,6 +2660,20 @@ let Predicates = [HasStdExtV] in {
defm "" : VPatBinaryM_MM<"int_riscv_vmxnor", "PseudoVMXNOR">;
} // Predicates = [HasStdExtV]
//===----------------------------------------------------------------------===//
// 16.2. Vector mask population count vpopc
//===----------------------------------------------------------------------===//
let Predicates = [HasStdExtV] in {
defm "" : VPatMaskUnarySOut<"int_riscv_vpopc", "PseudoVPOPC">;
} // Predicates = [HasStdExtV]
//===----------------------------------------------------------------------===//
// 16.3. vfirst find-first-set mask bit
//===----------------------------------------------------------------------===//
let Predicates = [HasStdExtV] in {
defm "" : VPatMaskUnarySOut<"int_riscv_vfirst", "PseudoVFIRST">;
} // Predicates = [HasStdExtV]
//===----------------------------------------------------------------------===//
// 17. Vector Permutation Instructions
//===----------------------------------------------------------------------===//

View File

@ -0,0 +1,239 @@
; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+f -verify-machineinstrs \
; RUN: --riscv-no-aliases < %s | FileCheck %s
declare i32 @llvm.riscv.vfirst.i64.nxv1i1(
<vscale x 1 x i1>,
i32);
define i32 @intrinsic_vfirst_m_i64_nxv1i1(<vscale x 1 x i1> %0, i32 %1) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfirst_m_i64_nxv1i1
; CHECK: vsetvli {{.*}}, a0, e8,mf8,ta,mu
; CHECK: vfirst.m a0, {{v[0-9]+}}
%a = call i32 @llvm.riscv.vfirst.i64.nxv1i1(
<vscale x 1 x i1> %0,
i32 %1)
ret i32 %a
}
declare i32 @llvm.riscv.vfirst.mask.i64.nxv1i1(
<vscale x 1 x i1>,
<vscale x 1 x i1>,
i32);
define i32 @intrinsic_vfirst_mask_m_i64_nxv1i1(<vscale x 1 x i1> %0, <vscale x 1 x i1> %1, i32 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfirst_mask_m_i64_nxv1i1
; CHECK: vsetvli {{.*}}, a0, e8,mf8,ta,mu
; CHECK: vfirst.m a0, {{v[0-9]+}}, v0.t
%a = call i32 @llvm.riscv.vfirst.mask.i64.nxv1i1(
<vscale x 1 x i1> %0,
<vscale x 1 x i1> %1,
i32 %2)
ret i32 %a
}
declare i32 @llvm.riscv.vfirst.i64.nxv2i1(
<vscale x 2 x i1>,
i32);
define i32 @intrinsic_vfirst_m_i64_nxv2i1(<vscale x 2 x i1> %0, i32 %1) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfirst_m_i64_nxv2i1
; CHECK: vsetvli {{.*}}, a0, e8,mf4,ta,mu
; CHECK: vfirst.m a0, {{v[0-9]+}}
%a = call i32 @llvm.riscv.vfirst.i64.nxv2i1(
<vscale x 2 x i1> %0,
i32 %1)
ret i32 %a
}
declare i32 @llvm.riscv.vfirst.mask.i64.nxv2i1(
<vscale x 2 x i1>,
<vscale x 2 x i1>,
i32);
define i32 @intrinsic_vfirst_mask_m_i64_nxv2i1(<vscale x 2 x i1> %0, <vscale x 2 x i1> %1, i32 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfirst_mask_m_i64_nxv2i1
; CHECK: vsetvli {{.*}}, a0, e8,mf4,ta,mu
; CHECK: vfirst.m a0, {{v[0-9]+}}, v0.t
%a = call i32 @llvm.riscv.vfirst.mask.i64.nxv2i1(
<vscale x 2 x i1> %0,
<vscale x 2 x i1> %1,
i32 %2)
ret i32 %a
}
declare i32 @llvm.riscv.vfirst.i64.nxv4i1(
<vscale x 4 x i1>,
i32);
define i32 @intrinsic_vfirst_m_i64_nxv4i1(<vscale x 4 x i1> %0, i32 %1) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfirst_m_i64_nxv4i1
; CHECK: vsetvli {{.*}}, a0, e8,mf2,ta,mu
; CHECK: vfirst.m a0, {{v[0-9]+}}
%a = call i32 @llvm.riscv.vfirst.i64.nxv4i1(
<vscale x 4 x i1> %0,
i32 %1)
ret i32 %a
}
declare i32 @llvm.riscv.vfirst.mask.i64.nxv4i1(
<vscale x 4 x i1>,
<vscale x 4 x i1>,
i32);
define i32 @intrinsic_vfirst_mask_m_i64_nxv4i1(<vscale x 4 x i1> %0, <vscale x 4 x i1> %1, i32 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfirst_mask_m_i64_nxv4i1
; CHECK: vsetvli {{.*}}, a0, e8,mf2,ta,mu
; CHECK: vfirst.m a0, {{v[0-9]+}}, v0.t
%a = call i32 @llvm.riscv.vfirst.mask.i64.nxv4i1(
<vscale x 4 x i1> %0,
<vscale x 4 x i1> %1,
i32 %2)
ret i32 %a
}
declare i32 @llvm.riscv.vfirst.i64.nxv8i1(
<vscale x 8 x i1>,
i32);
define i32 @intrinsic_vfirst_m_i64_nxv8i1(<vscale x 8 x i1> %0, i32 %1) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfirst_m_i64_nxv8i1
; CHECK: vsetvli {{.*}}, a0, e8,m1,ta,mu
; CHECK: vfirst.m a0, {{v[0-9]+}}
%a = call i32 @llvm.riscv.vfirst.i64.nxv8i1(
<vscale x 8 x i1> %0,
i32 %1)
ret i32 %a
}
declare i32 @llvm.riscv.vfirst.mask.i64.nxv8i1(
<vscale x 8 x i1>,
<vscale x 8 x i1>,
i32);
define i32 @intrinsic_vfirst_mask_m_i64_nxv8i1(<vscale x 8 x i1> %0, <vscale x 8 x i1> %1, i32 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfirst_mask_m_i64_nxv8i1
; CHECK: vsetvli {{.*}}, a0, e8,m1,ta,mu
; CHECK: vfirst.m a0, {{v[0-9]+}}, v0.t
%a = call i32 @llvm.riscv.vfirst.mask.i64.nxv8i1(
<vscale x 8 x i1> %0,
<vscale x 8 x i1> %1,
i32 %2)
ret i32 %a
}
declare i32 @llvm.riscv.vfirst.i64.nxv16i1(
<vscale x 16 x i1>,
i32);
define i32 @intrinsic_vfirst_m_i64_nxv16i1(<vscale x 16 x i1> %0, i32 %1) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfirst_m_i64_nxv16i1
; CHECK: vsetvli {{.*}}, a0, e8,m2,ta,mu
; CHECK: vfirst.m a0, {{v[0-9]+}}
%a = call i32 @llvm.riscv.vfirst.i64.nxv16i1(
<vscale x 16 x i1> %0,
i32 %1)
ret i32 %a
}
declare i32 @llvm.riscv.vfirst.mask.i64.nxv16i1(
<vscale x 16 x i1>,
<vscale x 16 x i1>,
i32);
define i32 @intrinsic_vfirst_mask_m_i64_nxv16i1(<vscale x 16 x i1> %0, <vscale x 16 x i1> %1, i32 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfirst_mask_m_i64_nxv16i1
; CHECK: vsetvli {{.*}}, a0, e8,m2,ta,mu
; CHECK: vfirst.m a0, {{v[0-9]+}}, v0.t
%a = call i32 @llvm.riscv.vfirst.mask.i64.nxv16i1(
<vscale x 16 x i1> %0,
<vscale x 16 x i1> %1,
i32 %2)
ret i32 %a
}
declare i32 @llvm.riscv.vfirst.i64.nxv32i1(
<vscale x 32 x i1>,
i32);
define i32 @intrinsic_vfirst_m_i64_nxv32i1(<vscale x 32 x i1> %0, i32 %1) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfirst_m_i64_nxv32i1
; CHECK: vsetvli {{.*}}, a0, e8,m4,ta,mu
; CHECK: vfirst.m a0, {{v[0-9]+}}
%a = call i32 @llvm.riscv.vfirst.i64.nxv32i1(
<vscale x 32 x i1> %0,
i32 %1)
ret i32 %a
}
declare i32 @llvm.riscv.vfirst.mask.i64.nxv32i1(
<vscale x 32 x i1>,
<vscale x 32 x i1>,
i32);
define i32 @intrinsic_vfirst_mask_m_i64_nxv32i1(<vscale x 32 x i1> %0, <vscale x 32 x i1> %1, i32 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfirst_mask_m_i64_nxv32i1
; CHECK: vsetvli {{.*}}, a0, e8,m4,ta,mu
; CHECK: vfirst.m a0, {{v[0-9]+}}, v0.t
%a = call i32 @llvm.riscv.vfirst.mask.i64.nxv32i1(
<vscale x 32 x i1> %0,
<vscale x 32 x i1> %1,
i32 %2)
ret i32 %a
}
declare i32 @llvm.riscv.vfirst.i64.nxv64i1(
<vscale x 64 x i1>,
i32);
define i32 @intrinsic_vfirst_m_i64_nxv64i1(<vscale x 64 x i1> %0, i32 %1) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfirst_m_i64_nxv64i1
; CHECK: vsetvli {{.*}}, a0, e8,m8,ta,mu
; CHECK: vfirst.m a0, {{v[0-9]+}}
%a = call i32 @llvm.riscv.vfirst.i64.nxv64i1(
<vscale x 64 x i1> %0,
i32 %1)
ret i32 %a
}
declare i32 @llvm.riscv.vfirst.mask.i64.nxv64i1(
<vscale x 64 x i1>,
<vscale x 64 x i1>,
i32);
define i32 @intrinsic_vfirst_mask_m_i64_nxv64i1(<vscale x 64 x i1> %0, <vscale x 64 x i1> %1, i32 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfirst_mask_m_i64_nxv64i1
; CHECK: vsetvli {{.*}}, a0, e8,m8,ta,mu
; CHECK: vfirst.m a0, {{v[0-9]+}}, v0.t
%a = call i32 @llvm.riscv.vfirst.mask.i64.nxv64i1(
<vscale x 64 x i1> %0,
<vscale x 64 x i1> %1,
i32 %2)
ret i32 %a
}

View File

@ -0,0 +1,239 @@
; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d -verify-machineinstrs \
; RUN: --riscv-no-aliases < %s | FileCheck %s
declare i64 @llvm.riscv.vfirst.i64.nxv1i1(
<vscale x 1 x i1>,
i64);
define i64 @intrinsic_vfirst_m_i64_nxv1i1(<vscale x 1 x i1> %0, i64 %1) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfirst_m_i64_nxv1i1
; CHECK: vsetvli {{.*}}, a0, e8,mf8,ta,mu
; CHECK: vfirst.m a0, {{v[0-9]+}}
%a = call i64 @llvm.riscv.vfirst.i64.nxv1i1(
<vscale x 1 x i1> %0,
i64 %1)
ret i64 %a
}
declare i64 @llvm.riscv.vfirst.mask.i64.nxv1i1(
<vscale x 1 x i1>,
<vscale x 1 x i1>,
i64);
define i64 @intrinsic_vfirst_mask_m_i64_nxv1i1(<vscale x 1 x i1> %0, <vscale x 1 x i1> %1, i64 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfirst_mask_m_i64_nxv1i1
; CHECK: vsetvli {{.*}}, a0, e8,mf8,ta,mu
; CHECK: vfirst.m a0, {{v[0-9]+}}, v0.t
%a = call i64 @llvm.riscv.vfirst.mask.i64.nxv1i1(
<vscale x 1 x i1> %0,
<vscale x 1 x i1> %1,
i64 %2)
ret i64 %a
}
declare i64 @llvm.riscv.vfirst.i64.nxv2i1(
<vscale x 2 x i1>,
i64);
define i64 @intrinsic_vfirst_m_i64_nxv2i1(<vscale x 2 x i1> %0, i64 %1) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfirst_m_i64_nxv2i1
; CHECK: vsetvli {{.*}}, a0, e8,mf4,ta,mu
; CHECK: vfirst.m a0, {{v[0-9]+}}
%a = call i64 @llvm.riscv.vfirst.i64.nxv2i1(
<vscale x 2 x i1> %0,
i64 %1)
ret i64 %a
}
declare i64 @llvm.riscv.vfirst.mask.i64.nxv2i1(
<vscale x 2 x i1>,
<vscale x 2 x i1>,
i64);
define i64 @intrinsic_vfirst_mask_m_i64_nxv2i1(<vscale x 2 x i1> %0, <vscale x 2 x i1> %1, i64 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfirst_mask_m_i64_nxv2i1
; CHECK: vsetvli {{.*}}, a0, e8,mf4,ta,mu
; CHECK: vfirst.m a0, {{v[0-9]+}}, v0.t
%a = call i64 @llvm.riscv.vfirst.mask.i64.nxv2i1(
<vscale x 2 x i1> %0,
<vscale x 2 x i1> %1,
i64 %2)
ret i64 %a
}
declare i64 @llvm.riscv.vfirst.i64.nxv4i1(
<vscale x 4 x i1>,
i64);
define i64 @intrinsic_vfirst_m_i64_nxv4i1(<vscale x 4 x i1> %0, i64 %1) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfirst_m_i64_nxv4i1
; CHECK: vsetvli {{.*}}, a0, e8,mf2,ta,mu
; CHECK: vfirst.m a0, {{v[0-9]+}}
%a = call i64 @llvm.riscv.vfirst.i64.nxv4i1(
<vscale x 4 x i1> %0,
i64 %1)
ret i64 %a
}
declare i64 @llvm.riscv.vfirst.mask.i64.nxv4i1(
<vscale x 4 x i1>,
<vscale x 4 x i1>,
i64);
define i64 @intrinsic_vfirst_mask_m_i64_nxv4i1(<vscale x 4 x i1> %0, <vscale x 4 x i1> %1, i64 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfirst_mask_m_i64_nxv4i1
; CHECK: vsetvli {{.*}}, a0, e8,mf2,ta,mu
; CHECK: vfirst.m a0, {{v[0-9]+}}, v0.t
%a = call i64 @llvm.riscv.vfirst.mask.i64.nxv4i1(
<vscale x 4 x i1> %0,
<vscale x 4 x i1> %1,
i64 %2)
ret i64 %a
}
declare i64 @llvm.riscv.vfirst.i64.nxv8i1(
<vscale x 8 x i1>,
i64);
define i64 @intrinsic_vfirst_m_i64_nxv8i1(<vscale x 8 x i1> %0, i64 %1) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfirst_m_i64_nxv8i1
; CHECK: vsetvli {{.*}}, a0, e8,m1,ta,mu
; CHECK: vfirst.m a0, {{v[0-9]+}}
%a = call i64 @llvm.riscv.vfirst.i64.nxv8i1(
<vscale x 8 x i1> %0,
i64 %1)
ret i64 %a
}
declare i64 @llvm.riscv.vfirst.mask.i64.nxv8i1(
<vscale x 8 x i1>,
<vscale x 8 x i1>,
i64);
define i64 @intrinsic_vfirst_mask_m_i64_nxv8i1(<vscale x 8 x i1> %0, <vscale x 8 x i1> %1, i64 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfirst_mask_m_i64_nxv8i1
; CHECK: vsetvli {{.*}}, a0, e8,m1,ta,mu
; CHECK: vfirst.m a0, {{v[0-9]+}}, v0.t
%a = call i64 @llvm.riscv.vfirst.mask.i64.nxv8i1(
<vscale x 8 x i1> %0,
<vscale x 8 x i1> %1,
i64 %2)
ret i64 %a
}
declare i64 @llvm.riscv.vfirst.i64.nxv16i1(
<vscale x 16 x i1>,
i64);
define i64 @intrinsic_vfirst_m_i64_nxv16i1(<vscale x 16 x i1> %0, i64 %1) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfirst_m_i64_nxv16i1
; CHECK: vsetvli {{.*}}, a0, e8,m2,ta,mu
; CHECK: vfirst.m a0, {{v[0-9]+}}
%a = call i64 @llvm.riscv.vfirst.i64.nxv16i1(
<vscale x 16 x i1> %0,
i64 %1)
ret i64 %a
}
declare i64 @llvm.riscv.vfirst.mask.i64.nxv16i1(
<vscale x 16 x i1>,
<vscale x 16 x i1>,
i64);
define i64 @intrinsic_vfirst_mask_m_i64_nxv16i1(<vscale x 16 x i1> %0, <vscale x 16 x i1> %1, i64 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfirst_mask_m_i64_nxv16i1
; CHECK: vsetvli {{.*}}, a0, e8,m2,ta,mu
; CHECK: vfirst.m a0, {{v[0-9]+}}, v0.t
%a = call i64 @llvm.riscv.vfirst.mask.i64.nxv16i1(
<vscale x 16 x i1> %0,
<vscale x 16 x i1> %1,
i64 %2)
ret i64 %a
}
declare i64 @llvm.riscv.vfirst.i64.nxv32i1(
<vscale x 32 x i1>,
i64);
define i64 @intrinsic_vfirst_m_i64_nxv32i1(<vscale x 32 x i1> %0, i64 %1) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfirst_m_i64_nxv32i1
; CHECK: vsetvli {{.*}}, a0, e8,m4,ta,mu
; CHECK: vfirst.m a0, {{v[0-9]+}}
%a = call i64 @llvm.riscv.vfirst.i64.nxv32i1(
<vscale x 32 x i1> %0,
i64 %1)
ret i64 %a
}
declare i64 @llvm.riscv.vfirst.mask.i64.nxv32i1(
<vscale x 32 x i1>,
<vscale x 32 x i1>,
i64);
define i64 @intrinsic_vfirst_mask_m_i64_nxv32i1(<vscale x 32 x i1> %0, <vscale x 32 x i1> %1, i64 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfirst_mask_m_i64_nxv32i1
; CHECK: vsetvli {{.*}}, a0, e8,m4,ta,mu
; CHECK: vfirst.m a0, {{v[0-9]+}}, v0.t
%a = call i64 @llvm.riscv.vfirst.mask.i64.nxv32i1(
<vscale x 32 x i1> %0,
<vscale x 32 x i1> %1,
i64 %2)
ret i64 %a
}
declare i64 @llvm.riscv.vfirst.i64.nxv64i1(
<vscale x 64 x i1>,
i64);
define i64 @intrinsic_vfirst_m_i64_nxv64i1(<vscale x 64 x i1> %0, i64 %1) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfirst_m_i64_nxv64i1
; CHECK: vsetvli {{.*}}, a0, e8,m8,ta,mu
; CHECK: vfirst.m a0, {{v[0-9]+}}
%a = call i64 @llvm.riscv.vfirst.i64.nxv64i1(
<vscale x 64 x i1> %0,
i64 %1)
ret i64 %a
}
declare i64 @llvm.riscv.vfirst.mask.i64.nxv64i1(
<vscale x 64 x i1>,
<vscale x 64 x i1>,
i64);
define i64 @intrinsic_vfirst_mask_m_i64_nxv64i1(<vscale x 64 x i1> %0, <vscale x 64 x i1> %1, i64 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vfirst_mask_m_i64_nxv64i1
; CHECK: vsetvli {{.*}}, a0, e8,m8,ta,mu
; CHECK: vfirst.m a0, {{v[0-9]+}}, v0.t
%a = call i64 @llvm.riscv.vfirst.mask.i64.nxv64i1(
<vscale x 64 x i1> %0,
<vscale x 64 x i1> %1,
i64 %2)
ret i64 %a
}

View File

@ -0,0 +1,239 @@
; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+f -verify-machineinstrs \
; RUN: --riscv-no-aliases < %s | FileCheck %s
declare i32 @llvm.riscv.vpopc.i64.nxv1i1(
<vscale x 1 x i1>,
i32);
define i32 @intrinsic_vpopc_m_i64_nxv1i1(<vscale x 1 x i1> %0, i32 %1) nounwind {
entry:
; CHECK-LABEL: intrinsic_vpopc_m_i64_nxv1i1
; CHECK: vsetvli {{.*}}, a0, e8,mf8,ta,mu
; CHECK: vpopc.m a0, {{v[0-9]+}}
%a = call i32 @llvm.riscv.vpopc.i64.nxv1i1(
<vscale x 1 x i1> %0,
i32 %1)
ret i32 %a
}
declare i32 @llvm.riscv.vpopc.mask.i64.nxv1i1(
<vscale x 1 x i1>,
<vscale x 1 x i1>,
i32);
define i32 @intrinsic_vpopc_mask_m_i64_nxv1i1(<vscale x 1 x i1> %0, <vscale x 1 x i1> %1, i32 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vpopc_mask_m_i64_nxv1i1
; CHECK: vsetvli {{.*}}, a0, e8,mf8,ta,mu
; CHECK: vpopc.m a0, {{v[0-9]+}}, v0.t
%a = call i32 @llvm.riscv.vpopc.mask.i64.nxv1i1(
<vscale x 1 x i1> %0,
<vscale x 1 x i1> %1,
i32 %2)
ret i32 %a
}
declare i32 @llvm.riscv.vpopc.i64.nxv2i1(
<vscale x 2 x i1>,
i32);
define i32 @intrinsic_vpopc_m_i64_nxv2i1(<vscale x 2 x i1> %0, i32 %1) nounwind {
entry:
; CHECK-LABEL: intrinsic_vpopc_m_i64_nxv2i1
; CHECK: vsetvli {{.*}}, a0, e8,mf4,ta,mu
; CHECK: vpopc.m a0, {{v[0-9]+}}
%a = call i32 @llvm.riscv.vpopc.i64.nxv2i1(
<vscale x 2 x i1> %0,
i32 %1)
ret i32 %a
}
declare i32 @llvm.riscv.vpopc.mask.i64.nxv2i1(
<vscale x 2 x i1>,
<vscale x 2 x i1>,
i32);
define i32 @intrinsic_vpopc_mask_m_i64_nxv2i1(<vscale x 2 x i1> %0, <vscale x 2 x i1> %1, i32 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vpopc_mask_m_i64_nxv2i1
; CHECK: vsetvli {{.*}}, a0, e8,mf4,ta,mu
; CHECK: vpopc.m a0, {{v[0-9]+}}, v0.t
%a = call i32 @llvm.riscv.vpopc.mask.i64.nxv2i1(
<vscale x 2 x i1> %0,
<vscale x 2 x i1> %1,
i32 %2)
ret i32 %a
}
declare i32 @llvm.riscv.vpopc.i64.nxv4i1(
<vscale x 4 x i1>,
i32);
define i32 @intrinsic_vpopc_m_i64_nxv4i1(<vscale x 4 x i1> %0, i32 %1) nounwind {
entry:
; CHECK-LABEL: intrinsic_vpopc_m_i64_nxv4i1
; CHECK: vsetvli {{.*}}, a0, e8,mf2,ta,mu
; CHECK: vpopc.m a0, {{v[0-9]+}}
%a = call i32 @llvm.riscv.vpopc.i64.nxv4i1(
<vscale x 4 x i1> %0,
i32 %1)
ret i32 %a
}
declare i32 @llvm.riscv.vpopc.mask.i64.nxv4i1(
<vscale x 4 x i1>,
<vscale x 4 x i1>,
i32);
define i32 @intrinsic_vpopc_mask_m_i64_nxv4i1(<vscale x 4 x i1> %0, <vscale x 4 x i1> %1, i32 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vpopc_mask_m_i64_nxv4i1
; CHECK: vsetvli {{.*}}, a0, e8,mf2,ta,mu
; CHECK: vpopc.m a0, {{v[0-9]+}}, v0.t
%a = call i32 @llvm.riscv.vpopc.mask.i64.nxv4i1(
<vscale x 4 x i1> %0,
<vscale x 4 x i1> %1,
i32 %2)
ret i32 %a
}
declare i32 @llvm.riscv.vpopc.i64.nxv8i1(
<vscale x 8 x i1>,
i32);
define i32 @intrinsic_vpopc_m_i64_nxv8i1(<vscale x 8 x i1> %0, i32 %1) nounwind {
entry:
; CHECK-LABEL: intrinsic_vpopc_m_i64_nxv8i1
; CHECK: vsetvli {{.*}}, a0, e8,m1,ta,mu
; CHECK: vpopc.m a0, {{v[0-9]+}}
%a = call i32 @llvm.riscv.vpopc.i64.nxv8i1(
<vscale x 8 x i1> %0,
i32 %1)
ret i32 %a
}
declare i32 @llvm.riscv.vpopc.mask.i64.nxv8i1(
<vscale x 8 x i1>,
<vscale x 8 x i1>,
i32);
define i32 @intrinsic_vpopc_mask_m_i64_nxv8i1(<vscale x 8 x i1> %0, <vscale x 8 x i1> %1, i32 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vpopc_mask_m_i64_nxv8i1
; CHECK: vsetvli {{.*}}, a0, e8,m1,ta,mu
; CHECK: vpopc.m a0, {{v[0-9]+}}, v0.t
%a = call i32 @llvm.riscv.vpopc.mask.i64.nxv8i1(
<vscale x 8 x i1> %0,
<vscale x 8 x i1> %1,
i32 %2)
ret i32 %a
}
declare i32 @llvm.riscv.vpopc.i64.nxv16i1(
<vscale x 16 x i1>,
i32);
define i32 @intrinsic_vpopc_m_i64_nxv16i1(<vscale x 16 x i1> %0, i32 %1) nounwind {
entry:
; CHECK-LABEL: intrinsic_vpopc_m_i64_nxv16i1
; CHECK: vsetvli {{.*}}, a0, e8,m2,ta,mu
; CHECK: vpopc.m a0, {{v[0-9]+}}
%a = call i32 @llvm.riscv.vpopc.i64.nxv16i1(
<vscale x 16 x i1> %0,
i32 %1)
ret i32 %a
}
declare i32 @llvm.riscv.vpopc.mask.i64.nxv16i1(
<vscale x 16 x i1>,
<vscale x 16 x i1>,
i32);
define i32 @intrinsic_vpopc_mask_m_i64_nxv16i1(<vscale x 16 x i1> %0, <vscale x 16 x i1> %1, i32 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vpopc_mask_m_i64_nxv16i1
; CHECK: vsetvli {{.*}}, a0, e8,m2,ta,mu
; CHECK: vpopc.m a0, {{v[0-9]+}}, v0.t
%a = call i32 @llvm.riscv.vpopc.mask.i64.nxv16i1(
<vscale x 16 x i1> %0,
<vscale x 16 x i1> %1,
i32 %2)
ret i32 %a
}
declare i32 @llvm.riscv.vpopc.i64.nxv32i1(
<vscale x 32 x i1>,
i32);
define i32 @intrinsic_vpopc_m_i64_nxv32i1(<vscale x 32 x i1> %0, i32 %1) nounwind {
entry:
; CHECK-LABEL: intrinsic_vpopc_m_i64_nxv32i1
; CHECK: vsetvli {{.*}}, a0, e8,m4,ta,mu
; CHECK: vpopc.m a0, {{v[0-9]+}}
%a = call i32 @llvm.riscv.vpopc.i64.nxv32i1(
<vscale x 32 x i1> %0,
i32 %1)
ret i32 %a
}
declare i32 @llvm.riscv.vpopc.mask.i64.nxv32i1(
<vscale x 32 x i1>,
<vscale x 32 x i1>,
i32);
define i32 @intrinsic_vpopc_mask_m_i64_nxv32i1(<vscale x 32 x i1> %0, <vscale x 32 x i1> %1, i32 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vpopc_mask_m_i64_nxv32i1
; CHECK: vsetvli {{.*}}, a0, e8,m4,ta,mu
; CHECK: vpopc.m a0, {{v[0-9]+}}, v0.t
%a = call i32 @llvm.riscv.vpopc.mask.i64.nxv32i1(
<vscale x 32 x i1> %0,
<vscale x 32 x i1> %1,
i32 %2)
ret i32 %a
}
declare i32 @llvm.riscv.vpopc.i64.nxv64i1(
<vscale x 64 x i1>,
i32);
define i32 @intrinsic_vpopc_m_i64_nxv64i1(<vscale x 64 x i1> %0, i32 %1) nounwind {
entry:
; CHECK-LABEL: intrinsic_vpopc_m_i64_nxv64i1
; CHECK: vsetvli {{.*}}, a0, e8,m8,ta,mu
; CHECK: vpopc.m a0, {{v[0-9]+}}
%a = call i32 @llvm.riscv.vpopc.i64.nxv64i1(
<vscale x 64 x i1> %0,
i32 %1)
ret i32 %a
}
declare i32 @llvm.riscv.vpopc.mask.i64.nxv64i1(
<vscale x 64 x i1>,
<vscale x 64 x i1>,
i32);
define i32 @intrinsic_vpopc_mask_m_i64_nxv64i1(<vscale x 64 x i1> %0, <vscale x 64 x i1> %1, i32 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vpopc_mask_m_i64_nxv64i1
; CHECK: vsetvli {{.*}}, a0, e8,m8,ta,mu
; CHECK: vpopc.m a0, {{v[0-9]+}}, v0.t
%a = call i32 @llvm.riscv.vpopc.mask.i64.nxv64i1(
<vscale x 64 x i1> %0,
<vscale x 64 x i1> %1,
i32 %2)
ret i32 %a
}

View File

@ -0,0 +1,239 @@
; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d -verify-machineinstrs \
; RUN: --riscv-no-aliases < %s | FileCheck %s
declare i64 @llvm.riscv.vpopc.i64.nxv1i1(
<vscale x 1 x i1>,
i64);
define i64 @intrinsic_vpopc_m_i64_nxv1i1(<vscale x 1 x i1> %0, i64 %1) nounwind {
entry:
; CHECK-LABEL: intrinsic_vpopc_m_i64_nxv1i1
; CHECK: vsetvli {{.*}}, a0, e8,mf8,ta,mu
; CHECK: vpopc.m a0, {{v[0-9]+}}
%a = call i64 @llvm.riscv.vpopc.i64.nxv1i1(
<vscale x 1 x i1> %0,
i64 %1)
ret i64 %a
}
declare i64 @llvm.riscv.vpopc.mask.i64.nxv1i1(
<vscale x 1 x i1>,
<vscale x 1 x i1>,
i64);
define i64 @intrinsic_vpopc_mask_m_i64_nxv1i1(<vscale x 1 x i1> %0, <vscale x 1 x i1> %1, i64 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vpopc_mask_m_i64_nxv1i1
; CHECK: vsetvli {{.*}}, a0, e8,mf8,ta,mu
; CHECK: vpopc.m a0, {{v[0-9]+}}, v0.t
%a = call i64 @llvm.riscv.vpopc.mask.i64.nxv1i1(
<vscale x 1 x i1> %0,
<vscale x 1 x i1> %1,
i64 %2)
ret i64 %a
}
declare i64 @llvm.riscv.vpopc.i64.nxv2i1(
<vscale x 2 x i1>,
i64);
define i64 @intrinsic_vpopc_m_i64_nxv2i1(<vscale x 2 x i1> %0, i64 %1) nounwind {
entry:
; CHECK-LABEL: intrinsic_vpopc_m_i64_nxv2i1
; CHECK: vsetvli {{.*}}, a0, e8,mf4,ta,mu
; CHECK: vpopc.m a0, {{v[0-9]+}}
%a = call i64 @llvm.riscv.vpopc.i64.nxv2i1(
<vscale x 2 x i1> %0,
i64 %1)
ret i64 %a
}
declare i64 @llvm.riscv.vpopc.mask.i64.nxv2i1(
<vscale x 2 x i1>,
<vscale x 2 x i1>,
i64);
define i64 @intrinsic_vpopc_mask_m_i64_nxv2i1(<vscale x 2 x i1> %0, <vscale x 2 x i1> %1, i64 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vpopc_mask_m_i64_nxv2i1
; CHECK: vsetvli {{.*}}, a0, e8,mf4,ta,mu
; CHECK: vpopc.m a0, {{v[0-9]+}}, v0.t
%a = call i64 @llvm.riscv.vpopc.mask.i64.nxv2i1(
<vscale x 2 x i1> %0,
<vscale x 2 x i1> %1,
i64 %2)
ret i64 %a
}
declare i64 @llvm.riscv.vpopc.i64.nxv4i1(
<vscale x 4 x i1>,
i64);
define i64 @intrinsic_vpopc_m_i64_nxv4i1(<vscale x 4 x i1> %0, i64 %1) nounwind {
entry:
; CHECK-LABEL: intrinsic_vpopc_m_i64_nxv4i1
; CHECK: vsetvli {{.*}}, a0, e8,mf2,ta,mu
; CHECK: vpopc.m a0, {{v[0-9]+}}
%a = call i64 @llvm.riscv.vpopc.i64.nxv4i1(
<vscale x 4 x i1> %0,
i64 %1)
ret i64 %a
}
declare i64 @llvm.riscv.vpopc.mask.i64.nxv4i1(
<vscale x 4 x i1>,
<vscale x 4 x i1>,
i64);
define i64 @intrinsic_vpopc_mask_m_i64_nxv4i1(<vscale x 4 x i1> %0, <vscale x 4 x i1> %1, i64 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vpopc_mask_m_i64_nxv4i1
; CHECK: vsetvli {{.*}}, a0, e8,mf2,ta,mu
; CHECK: vpopc.m a0, {{v[0-9]+}}, v0.t
%a = call i64 @llvm.riscv.vpopc.mask.i64.nxv4i1(
<vscale x 4 x i1> %0,
<vscale x 4 x i1> %1,
i64 %2)
ret i64 %a
}
declare i64 @llvm.riscv.vpopc.i64.nxv8i1(
<vscale x 8 x i1>,
i64);
define i64 @intrinsic_vpopc_m_i64_nxv8i1(<vscale x 8 x i1> %0, i64 %1) nounwind {
entry:
; CHECK-LABEL: intrinsic_vpopc_m_i64_nxv8i1
; CHECK: vsetvli {{.*}}, a0, e8,m1,ta,mu
; CHECK: vpopc.m a0, {{v[0-9]+}}
%a = call i64 @llvm.riscv.vpopc.i64.nxv8i1(
<vscale x 8 x i1> %0,
i64 %1)
ret i64 %a
}
declare i64 @llvm.riscv.vpopc.mask.i64.nxv8i1(
<vscale x 8 x i1>,
<vscale x 8 x i1>,
i64);
define i64 @intrinsic_vpopc_mask_m_i64_nxv8i1(<vscale x 8 x i1> %0, <vscale x 8 x i1> %1, i64 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vpopc_mask_m_i64_nxv8i1
; CHECK: vsetvli {{.*}}, a0, e8,m1,ta,mu
; CHECK: vpopc.m a0, {{v[0-9]+}}, v0.t
%a = call i64 @llvm.riscv.vpopc.mask.i64.nxv8i1(
<vscale x 8 x i1> %0,
<vscale x 8 x i1> %1,
i64 %2)
ret i64 %a
}
declare i64 @llvm.riscv.vpopc.i64.nxv16i1(
<vscale x 16 x i1>,
i64);
define i64 @intrinsic_vpopc_m_i64_nxv16i1(<vscale x 16 x i1> %0, i64 %1) nounwind {
entry:
; CHECK-LABEL: intrinsic_vpopc_m_i64_nxv16i1
; CHECK: vsetvli {{.*}}, a0, e8,m2,ta,mu
; CHECK: vpopc.m a0, {{v[0-9]+}}
%a = call i64 @llvm.riscv.vpopc.i64.nxv16i1(
<vscale x 16 x i1> %0,
i64 %1)
ret i64 %a
}
declare i64 @llvm.riscv.vpopc.mask.i64.nxv16i1(
<vscale x 16 x i1>,
<vscale x 16 x i1>,
i64);
define i64 @intrinsic_vpopc_mask_m_i64_nxv16i1(<vscale x 16 x i1> %0, <vscale x 16 x i1> %1, i64 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vpopc_mask_m_i64_nxv16i1
; CHECK: vsetvli {{.*}}, a0, e8,m2,ta,mu
; CHECK: vpopc.m a0, {{v[0-9]+}}, v0.t
%a = call i64 @llvm.riscv.vpopc.mask.i64.nxv16i1(
<vscale x 16 x i1> %0,
<vscale x 16 x i1> %1,
i64 %2)
ret i64 %a
}
declare i64 @llvm.riscv.vpopc.i64.nxv32i1(
<vscale x 32 x i1>,
i64);
define i64 @intrinsic_vpopc_m_i64_nxv32i1(<vscale x 32 x i1> %0, i64 %1) nounwind {
entry:
; CHECK-LABEL: intrinsic_vpopc_m_i64_nxv32i1
; CHECK: vsetvli {{.*}}, a0, e8,m4,ta,mu
; CHECK: vpopc.m a0, {{v[0-9]+}}
%a = call i64 @llvm.riscv.vpopc.i64.nxv32i1(
<vscale x 32 x i1> %0,
i64 %1)
ret i64 %a
}
declare i64 @llvm.riscv.vpopc.mask.i64.nxv32i1(
<vscale x 32 x i1>,
<vscale x 32 x i1>,
i64);
define i64 @intrinsic_vpopc_mask_m_i64_nxv32i1(<vscale x 32 x i1> %0, <vscale x 32 x i1> %1, i64 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vpopc_mask_m_i64_nxv32i1
; CHECK: vsetvli {{.*}}, a0, e8,m4,ta,mu
; CHECK: vpopc.m a0, {{v[0-9]+}}, v0.t
%a = call i64 @llvm.riscv.vpopc.mask.i64.nxv32i1(
<vscale x 32 x i1> %0,
<vscale x 32 x i1> %1,
i64 %2)
ret i64 %a
}
declare i64 @llvm.riscv.vpopc.i64.nxv64i1(
<vscale x 64 x i1>,
i64);
define i64 @intrinsic_vpopc_m_i64_nxv64i1(<vscale x 64 x i1> %0, i64 %1) nounwind {
entry:
; CHECK-LABEL: intrinsic_vpopc_m_i64_nxv64i1
; CHECK: vsetvli {{.*}}, a0, e8,m8,ta,mu
; CHECK: vpopc.m a0, {{v[0-9]+}}
%a = call i64 @llvm.riscv.vpopc.i64.nxv64i1(
<vscale x 64 x i1> %0,
i64 %1)
ret i64 %a
}
declare i64 @llvm.riscv.vpopc.mask.i64.nxv64i1(
<vscale x 64 x i1>,
<vscale x 64 x i1>,
i64);
define i64 @intrinsic_vpopc_mask_m_i64_nxv64i1(<vscale x 64 x i1> %0, <vscale x 64 x i1> %1, i64 %2) nounwind {
entry:
; CHECK-LABEL: intrinsic_vpopc_mask_m_i64_nxv64i1
; CHECK: vsetvli {{.*}}, a0, e8,m8,ta,mu
; CHECK: vpopc.m a0, {{v[0-9]+}}, v0.t
%a = call i64 @llvm.riscv.vpopc.mask.i64.nxv64i1(
<vscale x 64 x i1> %0,
<vscale x 64 x i1> %1,
i64 %2)
ret i64 %a
}