[RISCV] Add policy operand for masked compare and vmsbf/vmsif/vmsof IR

intrinsics.

Those operations are updated under a tail agnostic policy, but they
could have mask agnostic or undisturbed.

Reviewed By: rogfer01

Differential Revision: https://reviews.llvm.org/D120228
This commit is contained in:
Zakk Chen 2022-03-16 20:17:06 -07:00
parent c4d74a93f6
commit 10fd2822b7
43 changed files with 2682 additions and 2338 deletions

View File

@ -1678,7 +1678,6 @@ defm vncvt_x_x_w : RVVPseudoVNCVTBuiltin<"vnsrl", "vncvt_x", "csi",
["Uv", "UvUw"]]>;
// 12.8. Vector Integer Comparison Instructions
let HasPolicy = false in {
defm vmseq : RVVIntMaskOutBuiltinSet;
defm vmsne : RVVIntMaskOutBuiltinSet;
defm vmsltu : RVVUnsignedMaskOutBuiltinSet;
@ -1689,7 +1688,6 @@ defm vmsgtu : RVVUnsignedMaskOutBuiltinSet;
defm vmsgt : RVVSignedMaskOutBuiltinSet;
defm vmsgeu : RVVUnsignedMaskOutBuiltinSet;
defm vmsge : RVVSignedMaskOutBuiltinSet;
}
// 12.9. Vector Integer Min/Max Instructions
let NoMaskPolicy = HasPassthruOperand in {
@ -1877,14 +1875,12 @@ defm vfneg_v : RVVPseudoVFUnaryBuiltin<"vfsgnjn", "xfd">;
defm vfabs_v : RVVPseudoVFUnaryBuiltin<"vfsgnjx", "xfd">;
// 14.13. Vector Floating-Point Compare Instructions
let HasPolicy = false in {
defm vmfeq : RVVFloatingMaskOutBuiltinSet;
defm vmfne : RVVFloatingMaskOutBuiltinSet;
defm vmflt : RVVFloatingMaskOutBuiltinSet;
defm vmfle : RVVFloatingMaskOutBuiltinSet;
defm vmfgt : RVVFloatingMaskOutBuiltinSet;
defm vmfge : RVVFloatingMaskOutBuiltinSet;
}
// 14.14. Vector Floating-Point Classify Instruction
let Name = "vfclass_v", NoMaskPolicy = HasPassthruOperand in
@ -1998,6 +1994,7 @@ def vcpop : RVVMaskOp0Builtin<"um">;
// 16.3. vfirst find-first-set mask bit
def vfirst : RVVMaskOp0Builtin<"lm">;
}
// 16.4. vmsbf.m set-before-first mask bit
def vmsbf : RVVMaskUnaryBuiltin;
@ -2007,7 +2004,6 @@ def vmsif : RVVMaskUnaryBuiltin;
// 16.6. vmsof.m set-only-first mask bit
def vmsof : RVVMaskUnaryBuiltin;
}
let NoMaskPolicy = HasPassthruOperand, HasNoMaskedOverloaded = false in {
// 16.8. Vector Iota Instruction

View File

@ -55,7 +55,7 @@ vbool64_t test_vmsbf_m_b64(vbool64_t op1, size_t vl) { return vmsbf(op1, vl); }
// CHECK-RV64-LABEL: @test_vmsbf_m_b1_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsbf.mask.nxv64i1.i64(<vscale x 64 x i1> [[MASKEDOFF:%.*]], <vscale x 64 x i1> [[OP1:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsbf.mask.nxv64i1.i64(<vscale x 64 x i1> [[MASKEDOFF:%.*]], <vscale x 64 x i1> [[OP1:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
//
vbool1_t test_vmsbf_m_b1_m(vbool1_t mask, vbool1_t maskedoff, vbool1_t op1,
@ -65,7 +65,7 @@ vbool1_t test_vmsbf_m_b1_m(vbool1_t mask, vbool1_t maskedoff, vbool1_t op1,
// CHECK-RV64-LABEL: @test_vmsbf_m_b2_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsbf.mask.nxv32i1.i64(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i1> [[OP1:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsbf.mask.nxv32i1.i64(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i1> [[OP1:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
//
vbool2_t test_vmsbf_m_b2_m(vbool2_t mask, vbool2_t maskedoff, vbool2_t op1,
@ -75,7 +75,7 @@ vbool2_t test_vmsbf_m_b2_m(vbool2_t mask, vbool2_t maskedoff, vbool2_t op1,
// CHECK-RV64-LABEL: @test_vmsbf_m_b4_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsbf.mask.nxv16i1.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i1> [[OP1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsbf.mask.nxv16i1.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i1> [[OP1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmsbf_m_b4_m(vbool4_t mask, vbool4_t maskedoff, vbool4_t op1,
@ -85,7 +85,7 @@ vbool4_t test_vmsbf_m_b4_m(vbool4_t mask, vbool4_t maskedoff, vbool4_t op1,
// CHECK-RV64-LABEL: @test_vmsbf_m_b8_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsbf.mask.nxv8i1.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i1> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsbf.mask.nxv8i1.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i1> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmsbf_m_b8_m(vbool8_t mask, vbool8_t maskedoff, vbool8_t op1,
@ -95,7 +95,7 @@ vbool8_t test_vmsbf_m_b8_m(vbool8_t mask, vbool8_t maskedoff, vbool8_t op1,
// CHECK-RV64-LABEL: @test_vmsbf_m_b16_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsbf.mask.nxv4i1.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i1> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsbf.mask.nxv4i1.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i1> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmsbf_m_b16_m(vbool16_t mask, vbool16_t maskedoff, vbool16_t op1,
@ -105,7 +105,7 @@ vbool16_t test_vmsbf_m_b16_m(vbool16_t mask, vbool16_t maskedoff, vbool16_t op1,
// CHECK-RV64-LABEL: @test_vmsbf_m_b32_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsbf.mask.nxv2i1.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i1> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsbf.mask.nxv2i1.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i1> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmsbf_m_b32_m(vbool32_t mask, vbool32_t maskedoff, vbool32_t op1,
@ -115,7 +115,7 @@ vbool32_t test_vmsbf_m_b32_m(vbool32_t mask, vbool32_t maskedoff, vbool32_t op1,
// CHECK-RV64-LABEL: @test_vmsbf_m_b64_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsbf.mask.nxv1i1.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i1> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsbf.mask.nxv1i1.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i1> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsbf_m_b64_m(vbool64_t mask, vbool64_t maskedoff, vbool64_t op1,

View File

@ -55,7 +55,7 @@ vbool64_t test_vmsif_m_b64(vbool64_t op1, size_t vl) { return vmsif(op1, vl); }
// CHECK-RV64-LABEL: @test_vmsif_m_b1_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsif.mask.nxv64i1.i64(<vscale x 64 x i1> [[MASKEDOFF:%.*]], <vscale x 64 x i1> [[OP1:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsif.mask.nxv64i1.i64(<vscale x 64 x i1> [[MASKEDOFF:%.*]], <vscale x 64 x i1> [[OP1:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
//
vbool1_t test_vmsif_m_b1_m(vbool1_t mask, vbool1_t maskedoff, vbool1_t op1,
@ -65,7 +65,7 @@ vbool1_t test_vmsif_m_b1_m(vbool1_t mask, vbool1_t maskedoff, vbool1_t op1,
// CHECK-RV64-LABEL: @test_vmsif_m_b2_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsif.mask.nxv32i1.i64(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i1> [[OP1:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsif.mask.nxv32i1.i64(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i1> [[OP1:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
//
vbool2_t test_vmsif_m_b2_m(vbool2_t mask, vbool2_t maskedoff, vbool2_t op1,
@ -75,7 +75,7 @@ vbool2_t test_vmsif_m_b2_m(vbool2_t mask, vbool2_t maskedoff, vbool2_t op1,
// CHECK-RV64-LABEL: @test_vmsif_m_b4_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsif.mask.nxv16i1.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i1> [[OP1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsif.mask.nxv16i1.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i1> [[OP1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmsif_m_b4_m(vbool4_t mask, vbool4_t maskedoff, vbool4_t op1,
@ -85,7 +85,7 @@ vbool4_t test_vmsif_m_b4_m(vbool4_t mask, vbool4_t maskedoff, vbool4_t op1,
// CHECK-RV64-LABEL: @test_vmsif_m_b8_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsif.mask.nxv8i1.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i1> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsif.mask.nxv8i1.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i1> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmsif_m_b8_m(vbool8_t mask, vbool8_t maskedoff, vbool8_t op1,
@ -95,7 +95,7 @@ vbool8_t test_vmsif_m_b8_m(vbool8_t mask, vbool8_t maskedoff, vbool8_t op1,
// CHECK-RV64-LABEL: @test_vmsif_m_b16_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsif.mask.nxv4i1.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i1> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsif.mask.nxv4i1.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i1> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmsif_m_b16_m(vbool16_t mask, vbool16_t maskedoff, vbool16_t op1,
@ -105,7 +105,7 @@ vbool16_t test_vmsif_m_b16_m(vbool16_t mask, vbool16_t maskedoff, vbool16_t op1,
// CHECK-RV64-LABEL: @test_vmsif_m_b32_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsif.mask.nxv2i1.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i1> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsif.mask.nxv2i1.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i1> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmsif_m_b32_m(vbool32_t mask, vbool32_t maskedoff, vbool32_t op1,
@ -115,7 +115,7 @@ vbool32_t test_vmsif_m_b32_m(vbool32_t mask, vbool32_t maskedoff, vbool32_t op1,
// CHECK-RV64-LABEL: @test_vmsif_m_b64_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsif.mask.nxv1i1.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i1> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsif.mask.nxv1i1.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i1> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsif_m_b64_m(vbool64_t mask, vbool64_t maskedoff, vbool64_t op1,

View File

@ -55,7 +55,7 @@ vbool64_t test_vmsof_m_b64(vbool64_t op1, size_t vl) { return vmsof(op1, vl); }
// CHECK-RV64-LABEL: @test_vmsof_m_b1_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsof.mask.nxv64i1.i64(<vscale x 64 x i1> [[MASKEDOFF:%.*]], <vscale x 64 x i1> [[OP1:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsof.mask.nxv64i1.i64(<vscale x 64 x i1> [[MASKEDOFF:%.*]], <vscale x 64 x i1> [[OP1:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
//
vbool1_t test_vmsof_m_b1_m(vbool1_t mask, vbool1_t maskedoff, vbool1_t op1,
@ -65,7 +65,7 @@ vbool1_t test_vmsof_m_b1_m(vbool1_t mask, vbool1_t maskedoff, vbool1_t op1,
// CHECK-RV64-LABEL: @test_vmsof_m_b2_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsof.mask.nxv32i1.i64(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i1> [[OP1:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsof.mask.nxv32i1.i64(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i1> [[OP1:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
//
vbool2_t test_vmsof_m_b2_m(vbool2_t mask, vbool2_t maskedoff, vbool2_t op1,
@ -75,7 +75,7 @@ vbool2_t test_vmsof_m_b2_m(vbool2_t mask, vbool2_t maskedoff, vbool2_t op1,
// CHECK-RV64-LABEL: @test_vmsof_m_b4_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsof.mask.nxv16i1.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i1> [[OP1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsof.mask.nxv16i1.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i1> [[OP1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmsof_m_b4_m(vbool4_t mask, vbool4_t maskedoff, vbool4_t op1,
@ -85,7 +85,7 @@ vbool4_t test_vmsof_m_b4_m(vbool4_t mask, vbool4_t maskedoff, vbool4_t op1,
// CHECK-RV64-LABEL: @test_vmsof_m_b8_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsof.mask.nxv8i1.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i1> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsof.mask.nxv8i1.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i1> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmsof_m_b8_m(vbool8_t mask, vbool8_t maskedoff, vbool8_t op1,
@ -95,7 +95,7 @@ vbool8_t test_vmsof_m_b8_m(vbool8_t mask, vbool8_t maskedoff, vbool8_t op1,
// CHECK-RV64-LABEL: @test_vmsof_m_b16_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsof.mask.nxv4i1.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i1> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsof.mask.nxv4i1.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i1> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmsof_m_b16_m(vbool16_t mask, vbool16_t maskedoff, vbool16_t op1,
@ -105,7 +105,7 @@ vbool16_t test_vmsof_m_b16_m(vbool16_t mask, vbool16_t maskedoff, vbool16_t op1,
// CHECK-RV64-LABEL: @test_vmsof_m_b32_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsof.mask.nxv2i1.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i1> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsof.mask.nxv2i1.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i1> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmsof_m_b32_m(vbool32_t mask, vbool32_t maskedoff, vbool32_t op1,
@ -115,7 +115,7 @@ vbool32_t test_vmsof_m_b32_m(vbool32_t mask, vbool32_t maskedoff, vbool32_t op1,
// CHECK-RV64-LABEL: @test_vmsof_m_b64_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsof.mask.nxv1i1.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i1> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsof.mask.nxv1i1.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i1> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsof_m_b64_m(vbool64_t mask, vbool64_t maskedoff, vbool64_t op1,

View File

@ -69,7 +69,7 @@ vbool64_t test_vmsbf_m_b64(vbool64_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vmsbf_m_b1_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsbf.mask.nxv64i1.i64(<vscale x 64 x i1> [[MASKEDOFF:%.*]], <vscale x 64 x i1> [[OP1:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsbf.mask.nxv64i1.i64(<vscale x 64 x i1> [[MASKEDOFF:%.*]], <vscale x 64 x i1> [[OP1:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
//
vbool1_t test_vmsbf_m_b1_m(vbool1_t mask, vbool1_t maskedoff, vbool1_t op1,
@ -79,7 +79,7 @@ vbool1_t test_vmsbf_m_b1_m(vbool1_t mask, vbool1_t maskedoff, vbool1_t op1,
// CHECK-RV64-LABEL: @test_vmsbf_m_b2_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsbf.mask.nxv32i1.i64(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i1> [[OP1:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsbf.mask.nxv32i1.i64(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i1> [[OP1:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
//
vbool2_t test_vmsbf_m_b2_m(vbool2_t mask, vbool2_t maskedoff, vbool2_t op1,
@ -89,7 +89,7 @@ vbool2_t test_vmsbf_m_b2_m(vbool2_t mask, vbool2_t maskedoff, vbool2_t op1,
// CHECK-RV64-LABEL: @test_vmsbf_m_b4_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsbf.mask.nxv16i1.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i1> [[OP1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsbf.mask.nxv16i1.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i1> [[OP1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmsbf_m_b4_m(vbool4_t mask, vbool4_t maskedoff, vbool4_t op1,
@ -99,7 +99,7 @@ vbool4_t test_vmsbf_m_b4_m(vbool4_t mask, vbool4_t maskedoff, vbool4_t op1,
// CHECK-RV64-LABEL: @test_vmsbf_m_b8_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsbf.mask.nxv8i1.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i1> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsbf.mask.nxv8i1.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i1> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmsbf_m_b8_m(vbool8_t mask, vbool8_t maskedoff, vbool8_t op1,
@ -109,7 +109,7 @@ vbool8_t test_vmsbf_m_b8_m(vbool8_t mask, vbool8_t maskedoff, vbool8_t op1,
// CHECK-RV64-LABEL: @test_vmsbf_m_b16_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsbf.mask.nxv4i1.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i1> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsbf.mask.nxv4i1.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i1> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmsbf_m_b16_m(vbool16_t mask, vbool16_t maskedoff, vbool16_t op1,
@ -119,7 +119,7 @@ vbool16_t test_vmsbf_m_b16_m(vbool16_t mask, vbool16_t maskedoff, vbool16_t op1,
// CHECK-RV64-LABEL: @test_vmsbf_m_b32_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsbf.mask.nxv2i1.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i1> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsbf.mask.nxv2i1.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i1> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmsbf_m_b32_m(vbool32_t mask, vbool32_t maskedoff, vbool32_t op1,
@ -129,7 +129,7 @@ vbool32_t test_vmsbf_m_b32_m(vbool32_t mask, vbool32_t maskedoff, vbool32_t op1,
// CHECK-RV64-LABEL: @test_vmsbf_m_b64_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsbf.mask.nxv1i1.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i1> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsbf.mask.nxv1i1.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i1> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsbf_m_b64_m(vbool64_t mask, vbool64_t maskedoff, vbool64_t op1,

View File

@ -69,7 +69,7 @@ vbool64_t test_vmsif_m_b64(vbool64_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vmsif_m_b1_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsif.mask.nxv64i1.i64(<vscale x 64 x i1> [[MASKEDOFF:%.*]], <vscale x 64 x i1> [[OP1:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsif.mask.nxv64i1.i64(<vscale x 64 x i1> [[MASKEDOFF:%.*]], <vscale x 64 x i1> [[OP1:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
//
vbool1_t test_vmsif_m_b1_m(vbool1_t mask, vbool1_t maskedoff, vbool1_t op1,
@ -79,7 +79,7 @@ vbool1_t test_vmsif_m_b1_m(vbool1_t mask, vbool1_t maskedoff, vbool1_t op1,
// CHECK-RV64-LABEL: @test_vmsif_m_b2_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsif.mask.nxv32i1.i64(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i1> [[OP1:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsif.mask.nxv32i1.i64(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i1> [[OP1:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
//
vbool2_t test_vmsif_m_b2_m(vbool2_t mask, vbool2_t maskedoff, vbool2_t op1,
@ -89,7 +89,7 @@ vbool2_t test_vmsif_m_b2_m(vbool2_t mask, vbool2_t maskedoff, vbool2_t op1,
// CHECK-RV64-LABEL: @test_vmsif_m_b4_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsif.mask.nxv16i1.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i1> [[OP1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsif.mask.nxv16i1.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i1> [[OP1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmsif_m_b4_m(vbool4_t mask, vbool4_t maskedoff, vbool4_t op1,
@ -99,7 +99,7 @@ vbool4_t test_vmsif_m_b4_m(vbool4_t mask, vbool4_t maskedoff, vbool4_t op1,
// CHECK-RV64-LABEL: @test_vmsif_m_b8_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsif.mask.nxv8i1.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i1> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsif.mask.nxv8i1.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i1> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmsif_m_b8_m(vbool8_t mask, vbool8_t maskedoff, vbool8_t op1,
@ -109,7 +109,7 @@ vbool8_t test_vmsif_m_b8_m(vbool8_t mask, vbool8_t maskedoff, vbool8_t op1,
// CHECK-RV64-LABEL: @test_vmsif_m_b16_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsif.mask.nxv4i1.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i1> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsif.mask.nxv4i1.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i1> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmsif_m_b16_m(vbool16_t mask, vbool16_t maskedoff, vbool16_t op1,
@ -119,7 +119,7 @@ vbool16_t test_vmsif_m_b16_m(vbool16_t mask, vbool16_t maskedoff, vbool16_t op1,
// CHECK-RV64-LABEL: @test_vmsif_m_b32_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsif.mask.nxv2i1.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i1> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsif.mask.nxv2i1.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i1> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmsif_m_b32_m(vbool32_t mask, vbool32_t maskedoff, vbool32_t op1,
@ -129,7 +129,7 @@ vbool32_t test_vmsif_m_b32_m(vbool32_t mask, vbool32_t maskedoff, vbool32_t op1,
// CHECK-RV64-LABEL: @test_vmsif_m_b64_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsif.mask.nxv1i1.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i1> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsif.mask.nxv1i1.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i1> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsif_m_b64_m(vbool64_t mask, vbool64_t maskedoff, vbool64_t op1,

View File

@ -69,7 +69,7 @@ vbool64_t test_vmsof_m_b64(vbool64_t op1, size_t vl) {
// CHECK-RV64-LABEL: @test_vmsof_m_b1_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsof.mask.nxv64i1.i64(<vscale x 64 x i1> [[MASKEDOFF:%.*]], <vscale x 64 x i1> [[OP1:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i1> @llvm.riscv.vmsof.mask.nxv64i1.i64(<vscale x 64 x i1> [[MASKEDOFF:%.*]], <vscale x 64 x i1> [[OP1:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 64 x i1> [[TMP0]]
//
vbool1_t test_vmsof_m_b1_m(vbool1_t mask, vbool1_t maskedoff, vbool1_t op1,
@ -79,7 +79,7 @@ vbool1_t test_vmsof_m_b1_m(vbool1_t mask, vbool1_t maskedoff, vbool1_t op1,
// CHECK-RV64-LABEL: @test_vmsof_m_b2_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsof.mask.nxv32i1.i64(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i1> [[OP1:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i1> @llvm.riscv.vmsof.mask.nxv32i1.i64(<vscale x 32 x i1> [[MASKEDOFF:%.*]], <vscale x 32 x i1> [[OP1:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 32 x i1> [[TMP0]]
//
vbool2_t test_vmsof_m_b2_m(vbool2_t mask, vbool2_t maskedoff, vbool2_t op1,
@ -89,7 +89,7 @@ vbool2_t test_vmsof_m_b2_m(vbool2_t mask, vbool2_t maskedoff, vbool2_t op1,
// CHECK-RV64-LABEL: @test_vmsof_m_b4_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsof.mask.nxv16i1.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i1> [[OP1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i1> @llvm.riscv.vmsof.mask.nxv16i1.i64(<vscale x 16 x i1> [[MASKEDOFF:%.*]], <vscale x 16 x i1> [[OP1:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 16 x i1> [[TMP0]]
//
vbool4_t test_vmsof_m_b4_m(vbool4_t mask, vbool4_t maskedoff, vbool4_t op1,
@ -99,7 +99,7 @@ vbool4_t test_vmsof_m_b4_m(vbool4_t mask, vbool4_t maskedoff, vbool4_t op1,
// CHECK-RV64-LABEL: @test_vmsof_m_b8_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsof.mask.nxv8i1.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i1> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i1> @llvm.riscv.vmsof.mask.nxv8i1.i64(<vscale x 8 x i1> [[MASKEDOFF:%.*]], <vscale x 8 x i1> [[OP1:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 8 x i1> [[TMP0]]
//
vbool8_t test_vmsof_m_b8_m(vbool8_t mask, vbool8_t maskedoff, vbool8_t op1,
@ -109,7 +109,7 @@ vbool8_t test_vmsof_m_b8_m(vbool8_t mask, vbool8_t maskedoff, vbool8_t op1,
// CHECK-RV64-LABEL: @test_vmsof_m_b16_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsof.mask.nxv4i1.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i1> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i1> @llvm.riscv.vmsof.mask.nxv4i1.i64(<vscale x 4 x i1> [[MASKEDOFF:%.*]], <vscale x 4 x i1> [[OP1:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 4 x i1> [[TMP0]]
//
vbool16_t test_vmsof_m_b16_m(vbool16_t mask, vbool16_t maskedoff, vbool16_t op1,
@ -119,7 +119,7 @@ vbool16_t test_vmsof_m_b16_m(vbool16_t mask, vbool16_t maskedoff, vbool16_t op1,
// CHECK-RV64-LABEL: @test_vmsof_m_b32_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsof.mask.nxv2i1.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i1> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i1> @llvm.riscv.vmsof.mask.nxv2i1.i64(<vscale x 2 x i1> [[MASKEDOFF:%.*]], <vscale x 2 x i1> [[OP1:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 2 x i1> [[TMP0]]
//
vbool32_t test_vmsof_m_b32_m(vbool32_t mask, vbool32_t maskedoff, vbool32_t op1,
@ -129,7 +129,7 @@ vbool32_t test_vmsof_m_b32_m(vbool32_t mask, vbool32_t maskedoff, vbool32_t op1,
// CHECK-RV64-LABEL: @test_vmsof_m_b64_m(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsof.mask.nxv1i1.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i1> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsof.mask.nxv1i1.i64(<vscale x 1 x i1> [[MASKEDOFF:%.*]], <vscale x 1 x i1> [[OP1:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsof_m_b64_m(vbool64_t mask, vbool64_t maskedoff, vbool64_t op1,

View File

@ -572,13 +572,14 @@ let TargetPrefix = "riscv" in {
}
// For binary operations with mask type output with mask.
// Output: (mask type output)
// Input: (maskedoff, vector_in, vector_in/scalar_in, mask, vl)
// Input: (maskedoff, vector_in, vector_in/scalar_in, mask, vl, policy)
class RISCVCompareMask
: Intrinsic<[LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>],
[LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
llvm_anyvector_ty, llvm_any_ty,
LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty],
[IntrNoMem]>, RISCVVIntrinsic {
LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
llvm_anyint_ty, LLVMMatchType<2>],
[IntrNoMem, ImmArg<ArgIndex<5>>]>, RISCVVIntrinsic {
let ScalarOperand = 2;
let VLOperand = 4;
}
@ -598,7 +599,7 @@ let TargetPrefix = "riscv" in {
class RISCVClassifyMask
: Intrinsic<[LLVMVectorOfBitcastsToInt<0>],
[LLVMVectorOfBitcastsToInt<0>, llvm_anyvector_ty,
LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
llvm_anyint_ty, LLVMMatchType<1>],
[IntrNoMem, ImmArg<ArgIndex<4>>]>, RISCVVIntrinsic {
let VLOperand = 3;
@ -799,12 +800,12 @@ let TargetPrefix = "riscv" in {
}
// For mask unary operations with mask type in/out with mask
// Output: (mask type output)
// Input: (mask type maskedoff, mask type vector_in, mask, vl)
// Input: (mask type maskedoff, mask type vector_in, mask, vl, policy)
class RISCVMaskUnaryMOutMask
: Intrinsic<[llvm_anyint_ty],
[LLVMMatchType<0>, LLVMMatchType<0>,
LLVMMatchType<0>, llvm_anyint_ty],
[IntrNoMem]>, RISCVVIntrinsic {
LLVMMatchType<0>, llvm_anyint_ty, LLVMMatchType<1>],
[ImmArg<ArgIndex<4>>, IntrNoMem]>, RISCVVIntrinsic {
let VLOperand = 3;
}
// Output: (vector)

View File

@ -1109,10 +1109,11 @@ void RISCVDAGToDAGISel::Select(SDNode *Node) {
// Otherwise use
// vmslt{u}.vx vd, va, x, v0.t; vmxor.mm vd, vd, v0
SDValue Cmp = SDValue(
CurDAG->getMachineNode(VMSLTMaskOpcode, DL, VT,
{MaskedOff, Src1, Src2, V0, VL, SEW, Glue}),
0);
SDValue Policy = Node->getOperand(6);
SDValue Cmp = SDValue(CurDAG->getMachineNode(VMSLTMaskOpcode, DL, VT,
{MaskedOff, Src1, Src2, V0,
VL, SEW, Policy, Glue}),
0);
ReplaceNode(Node, CurDAG->getMachineNode(VMXOROpcode, DL, VT,
{Cmp, Mask, VL, MaskSEW}));
return;

View File

@ -966,22 +966,6 @@ class VPseudoUnaryNoMaskTU<DAGOperand RetClass, VReg OpClass, string Constraint
let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
}
class VPseudoUnaryMask<VReg RetClass, VReg OpClass, string Constraint = ""> :
Pseudo<(outs GetVRegNoV0<RetClass>.R:$rd),
(ins GetVRegNoV0<RetClass>.R:$merge, OpClass:$rs2,
VMaskOp:$vm, AVL:$vl, ixlenimm:$sew), []>,
RISCVVPseudo {
let mayLoad = 0;
let mayStore = 0;
let hasSideEffects = 0;
let Constraints = Join<[Constraint, "$rd = $merge"], ",">.ret;
let HasVLOp = 1;
let HasSEWOp = 1;
let HasMergeOp = 1;
let UsesMaskPolicy = 1;
let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
}
class VPseudoUnaryMaskTA<VReg RetClass, VReg OpClass, string Constraint = ""> :
Pseudo<(outs GetVRegNoV0<RetClass>.R:$rd),
(ins GetVRegNoV0<RetClass>.R:$merge, OpClass:$rs2,
@ -1183,7 +1167,7 @@ class VPseudoBinaryMOutMask<VReg RetClass,
Pseudo<(outs RetClass:$rd),
(ins RetClass:$merge,
Op1Class:$rs2, Op2Class:$rs1,
VMaskOp:$vm, AVL:$vl, ixlenimm:$sew), []>,
VMaskOp:$vm, AVL:$vl, ixlenimm:$sew, ixlenimm:$policy), []>,
RISCVVPseudo {
let mayLoad = 0;
let mayStore = 0;
@ -1192,9 +1176,7 @@ class VPseudoBinaryMOutMask<VReg RetClass,
let HasVLOp = 1;
let HasSEWOp = 1;
let HasMergeOp = 1;
// FIXME: In current design, we would not change the mask policy, so
// UsesMaskPolicy is false. We could fix after add the policy operand.
let UsesMaskPolicy = 0;
let HasVecPolicyOp = 1;
let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
}
@ -1696,7 +1678,8 @@ multiclass VPseudoVSFS_M {
let VLMul = mti.LMul.value in {
def "_M_" # mti.BX : VPseudoUnaryNoMask<VR, VR, constraint>,
Sched<[WriteVMSFSV, ReadVMSFSV, ReadVMask]>;
def "_M_" # mti.BX # "_MASK" : VPseudoUnaryMask<VR, VR, constraint>,
let ForceTailAgnostic = 1 in
def "_M_" # mti.BX # "_MASK" : VPseudoUnaryMaskTA<VR, VR, constraint>,
Sched<[WriteVMSFSV, ReadVMSFSV, ReadVMask]>;
}
}
@ -1769,7 +1752,6 @@ multiclass VPseudoBinaryM<VReg RetClass,
let VLMul = MInfo.value in {
def "_" # MInfo.MX : VPseudoBinaryNoMask<RetClass, Op1Class, Op2Class,
Constraint>;
let ForceTailAgnostic = true in
def "_" # MInfo.MX # "_MASK" : VPseudoBinaryMOutMask<RetClass, Op1Class,
Op2Class, Constraint>;
}
@ -2482,7 +2464,6 @@ multiclass VPseudoTernaryNoMaskNoPolicy<VReg RetClass,
def "_" # MInfo.MX : VPseudoTernaryNoMask<RetClass, Op1Class, Op2Class, Constraint>;
def "_" # MInfo.MX # "_MASK" : VPseudoBinaryMaskPolicy<RetClass, Op1Class, Op2Class,
Constraint>;
}
}
@ -2896,26 +2877,6 @@ class VPatUnaryNoMaskTU<string intrinsic_name,
(op2_type op2_reg_class:$rs2),
GPR:$vl, sew)>;
class VPatUnaryMask<string intrinsic_name,
string inst,
string kind,
ValueType result_type,
ValueType op2_type,
ValueType mask_type,
int sew,
LMULInfo vlmul,
VReg result_reg_class,
VReg op2_reg_class> :
Pat<(result_type (!cast<Intrinsic>(intrinsic_name#"_mask")
(result_type result_reg_class:$merge),
(op2_type op2_reg_class:$rs2),
(mask_type V0),
VLOpFrag)),
(!cast<Instruction>(inst#"_"#kind#"_"#vlmul.MX#"_MASK")
(result_type result_reg_class:$merge),
(op2_type op2_reg_class:$rs2),
(mask_type V0), GPR:$vl, sew)>;
class VPatUnaryMaskTA<string intrinsic_name,
string inst,
string kind,
@ -2953,11 +2914,11 @@ class VPatMaskUnaryMask<string intrinsic_name,
(mti.Mask VR:$merge),
(mti.Mask VR:$rs2),
(mti.Mask V0),
VLOpFrag)),
VLOpFrag, (XLenVT timm:$policy))),
(!cast<Instruction>(inst#"_M_"#mti.BX#"_MASK")
(mti.Mask VR:$merge),
(mti.Mask VR:$rs2),
(mti.Mask V0), GPR:$vl, mti.Log2SEW)>;
(mti.Mask V0), GPR:$vl, mti.Log2SEW, (XLenVT timm:$policy))>;
class VPatUnaryAnyMask<string intrinsic,
string inst,
@ -3113,12 +3074,12 @@ class VPatBinaryMaskSwapped<string intrinsic_name,
(op2_type op2_kind:$rs2),
(op1_type op1_reg_class:$rs1),
(mask_type V0),
VLOpFrag)),
VLOpFrag, (XLenVT timm:$policy))),
(!cast<Instruction>(inst#"_MASK")
(result_type result_reg_class:$merge),
(op1_type op1_reg_class:$rs1),
(op2_type op2_kind:$rs2),
(mask_type V0), GPR:$vl, sew)>;
(mask_type V0), GPR:$vl, sew, (XLenVT timm:$policy))>;
class VPatTiedBinaryNoMask<string intrinsic_name,
string inst,
@ -3390,9 +3351,9 @@ multiclass VPatBinaryM<string intrinsic,
{
def : VPatBinaryM<intrinsic, inst, result_type, op1_type, op2_type,
sew, op1_reg_class, op2_kind>;
def : VPatBinaryMask<intrinsic, inst, result_type, op1_type, op2_type,
mask_type, sew, result_reg_class, op1_reg_class,
op2_kind>;
def : VPatBinaryMaskTA<intrinsic, inst, result_type, op1_type, op2_type,
mask_type, sew, result_reg_class, op1_reg_class,
op2_kind>;
}
multiclass VPatBinaryTA<string intrinsic,
@ -4204,9 +4165,9 @@ multiclass VPatCompare_VI<string intrinsic, string inst,
(vti.Vector vti.RegClass:$rs1),
(vti.Scalar ImmType:$rs2),
(vti.Mask V0),
VLOpFrag)),
VLOpFrag, (XLenVT timm:$policy))),
(PseudoMask VR:$merge, vti.RegClass:$rs1, (DecImm ImmType:$rs2),
(vti.Mask V0), GPR:$vl, vti.Log2SEW)>;
(vti.Mask V0), GPR:$vl, vti.Log2SEW, timm:$policy)>;
}
}

View File

@ -1311,3 +1311,91 @@ entry:
ret <vscale x 1 x i8> %a
}
declare <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i64.i64(
<vscale x 1 x i1>,
<vscale x 1 x i64>,
i64,
<vscale x 1 x i1>,
iXLen, iXLen);
define <vscale x 1 x i1> @intrinsic_vmseq_mask_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
; RV32-LABEL: intrinsic_vmseq_mask_nxv1i64_i64:
; RV32: # %bb.0: # %entry
; RV32-NEXT: addi sp, sp, -16
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vlse64.v v9, (a0), zero
; RV32-NEXT: vsetvli zero, zero, e64, m1, ta, ma
; RV32-NEXT: vmseq.vv v0, v8, v9, v0.t
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
;
; RV64-LABEL: intrinsic_vmseq_mask_nxv1i64_i64:
; RV64: # %bb.0: # %entry
; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; RV64-NEXT: vmseq.vx v0, v8, a0, v0.t
; RV64-NEXT: ret
entry:
%a = call <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i64.i64(
<vscale x 1 x i1> undef,
<vscale x 1 x i64> %0,
i64 %1,
<vscale x 1 x i1> %2,
iXLen %3, iXLen 3)
ret <vscale x 1 x i1> %a
}
declare <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i8.i8(
<vscale x 1 x i1>,
<vscale x 1 x i8>,
i8,
<vscale x 1 x i1>,
iXLen, iXLen);
define <vscale x 1 x i1> @intrinsic_vmsge_mask_vx_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv1i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT: vmslt.vx v8, v8, a0, v0.t
; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, mu
; CHECK-NEXT: vmxor.mm v0, v8, v0
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i8.i8(
<vscale x 1 x i1> undef,
<vscale x 1 x i8> %0,
i8 %1,
<vscale x 1 x i1> %2,
iXLen %3, iXLen 3)
ret <vscale x 1 x i1> %a
}
declare <vscale x 64 x i1> @llvm.riscv.vmsbf.mask.nxv64i1(
<vscale x 64 x i1>,
<vscale x 64 x i1>,
<vscale x 64 x i1>,
iXLen, iXLen);
define <vscale x 64 x i1> @intrinsic_vmsbf_mask_m_nxv64i1_nxv64i1(<vscale x 64 x i1> %0, <vscale x 64 x i1> %1, iXLen %2) nounwind {
; CHECK-LABEL: intrinsic_vmsbf_mask_m_nxv64i1_nxv64i1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vmsbf.m v8, v9, v0.t
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%a = call <vscale x 64 x i1> @llvm.riscv.vmsbf.mask.nxv64i1(
<vscale x 64 x i1> undef,
<vscale x 64 x i1> %0,
<vscale x 64 x i1> %1,
iXLen %2, iXLen 3)
ret <vscale x 64 x i1> %a
}

View File

@ -1238,3 +1238,96 @@ entry:
ret <vscale x 1 x i8> %a
}
declare <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i64.i64(
<vscale x 1 x i1>,
<vscale x 1 x i64>,
i64,
<vscale x 1 x i1>,
iXLen, iXLen);
define <vscale x 1 x i1> @intrinsic_vmseq_mask_nxv1i64_i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; RV32-LABEL: intrinsic_vmseq_mask_nxv1i64_i64:
; RV32: # %bb.0: # %entry
; RV32-NEXT: addi sp, sp, -16
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vlse64.v v11, (a0), zero
; RV32-NEXT: vmv1r.v v10, v0
; RV32-NEXT: vmv1r.v v0, v9
; RV32-NEXT: vmseq.vv v10, v8, v11, v0.t
; RV32-NEXT: vmv.v.v v0, v10
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
;
; RV64-LABEL: intrinsic_vmseq_mask_nxv1i64_i64:
; RV64: # %bb.0: # %entry
; RV64-NEXT: vmv1r.v v10, v0
; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; RV64-NEXT: vmv1r.v v0, v9
; RV64-NEXT: vmseq.vx v10, v8, a0, v0.t
; RV64-NEXT: vmv.v.v v0, v10
; RV64-NEXT: ret
entry:
%a = call <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i64.i64(
<vscale x 1 x i1> %0,
<vscale x 1 x i64> %1,
i64 %2,
<vscale x 1 x i1> %3,
iXLen %4, iXLen 1)
ret <vscale x 1 x i1> %a
}
declare <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i8.i8(
<vscale x 1 x i1>,
<vscale x 1 x i8>,
i8,
<vscale x 1 x i1>,
iXLen, iXLen);
define <vscale x 1 x i1> @intrinsic_vmsge_mask_vx_nxv1i8_i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv1i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vmslt.vx v10, v8, a0, v0.t
; CHECK-NEXT: vmxor.mm v0, v10, v9
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i8.i8(
<vscale x 1 x i1> %0,
<vscale x 1 x i8> %1,
i8 %2,
<vscale x 1 x i1> %3,
iXLen %4, iXLen 1)
ret <vscale x 1 x i1> %a
}
declare <vscale x 64 x i1> @llvm.riscv.vmsbf.mask.nxv64i1(
<vscale x 64 x i1>,
<vscale x 64 x i1>,
<vscale x 64 x i1>,
iXLen, iXLen);
define <vscale x 64 x i1> @intrinsic_vmsbf_mask_m_nxv64i1_nxv64i1(<vscale x 64 x i1> %0, <vscale x 64 x i1> %1, <vscale x 64 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsbf_mask_m_nxv64i1_nxv64i1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vmsbf.m v10, v8, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: ret
entry:
%a = call <vscale x 64 x i1> @llvm.riscv.vmsbf.mask.nxv64i1(
<vscale x 64 x i1> %0,
<vscale x 64 x i1> %1,
<vscale x 64 x i1> %2,
iXLen %3, iXLen 1)
ret <vscale x 64 x i1> %a
}

View File

@ -1238,3 +1238,98 @@ entry:
ret <vscale x 1 x i8> %a
}
declare <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i64.i64(
<vscale x 1 x i1>,
<vscale x 1 x i64>,
i64,
<vscale x 1 x i1>,
iXLen, iXLen);
define <vscale x 1 x i1> @intrinsic_vmseq_mask_nxv1i64_i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; RV32-LABEL: intrinsic_vmseq_mask_nxv1i64_i64:
; RV32: # %bb.0: # %entry
; RV32-NEXT: addi sp, sp, -16
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vlse64.v v11, (a0), zero
; RV32-NEXT: vmv1r.v v10, v0
; RV32-NEXT: vsetvli zero, zero, e64, m1, tu, ma
; RV32-NEXT: vmv1r.v v0, v9
; RV32-NEXT: vmseq.vv v10, v8, v11, v0.t
; RV32-NEXT: vmv1r.v v0, v10
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
;
; RV64-LABEL: intrinsic_vmseq_mask_nxv1i64_i64:
; RV64: # %bb.0: # %entry
; RV64-NEXT: vmv1r.v v10, v0
; RV64-NEXT: vsetvli zero, a1, e64, m1, tu, ma
; RV64-NEXT: vmv1r.v v0, v9
; RV64-NEXT: vmseq.vx v10, v8, a0, v0.t
; RV64-NEXT: vmv1r.v v0, v10
; RV64-NEXT: ret
entry:
%a = call <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i64.i64(
<vscale x 1 x i1> %0,
<vscale x 1 x i64> %1,
i64 %2,
<vscale x 1 x i1> %3,
iXLen %4, iXLen 2)
ret <vscale x 1 x i1> %a
}
declare <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i8.i8(
<vscale x 1 x i1>,
<vscale x 1 x i8>,
i8,
<vscale x 1 x i1>,
iXLen, iXLen);
define <vscale x 1 x i1> @intrinsic_vmsge_mask_vx_nxv1i8_i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv1i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, ma
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vmslt.vx v10, v8, a0, v0.t
; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, mu
; CHECK-NEXT: vmxor.mm v0, v10, v9
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i8.i8(
<vscale x 1 x i1> %0,
<vscale x 1 x i8> %1,
i8 %2,
<vscale x 1 x i1> %3,
iXLen %4, iXLen 2)
ret <vscale x 1 x i1> %a
}
declare <vscale x 64 x i1> @llvm.riscv.vmsbf.mask.nxv64i1(
<vscale x 64 x i1>,
<vscale x 64 x i1>,
<vscale x 64 x i1>,
iXLen, iXLen);
define <vscale x 64 x i1> @intrinsic_vmsbf_mask_m_nxv64i1_nxv64i1(<vscale x 64 x i1> %0, <vscale x 64 x i1> %1, <vscale x 64 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsbf_mask_m_nxv64i1_nxv64i1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vsetvli zero, a0, e8, m8, tu, ma
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vmsbf.m v10, v8, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: ret
entry:
%a = call <vscale x 64 x i1> @llvm.riscv.vmsbf.mask.nxv64i1(
<vscale x 64 x i1> %0,
<vscale x 64 x i1> %1,
<vscale x 64 x i1> %2,
iXLen %3, iXLen 2)
ret <vscale x 64 x i1> %a
}

View File

@ -1238,3 +1238,98 @@ entry:
ret <vscale x 1 x i8> %a
}
declare <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i64.i64(
<vscale x 1 x i1>,
<vscale x 1 x i64>,
i64,
<vscale x 1 x i1>,
iXLen, iXLen);
define <vscale x 1 x i1> @intrinsic_vmseq_mask_nxv1i64_i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; RV32-LABEL: intrinsic_vmseq_mask_nxv1i64_i64:
; RV32: # %bb.0: # %entry
; RV32-NEXT: addi sp, sp, -16
; RV32-NEXT: sw a1, 12(sp)
; RV32-NEXT: sw a0, 8(sp)
; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vlse64.v v11, (a0), zero
; RV32-NEXT: vmv1r.v v10, v0
; RV32-NEXT: vsetvli zero, zero, e64, m1, tu, mu
; RV32-NEXT: vmv1r.v v0, v9
; RV32-NEXT: vmseq.vv v10, v8, v11, v0.t
; RV32-NEXT: vmv1r.v v0, v10
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
;
; RV64-LABEL: intrinsic_vmseq_mask_nxv1i64_i64:
; RV64: # %bb.0: # %entry
; RV64-NEXT: vmv1r.v v10, v0
; RV64-NEXT: vsetvli zero, a1, e64, m1, tu, mu
; RV64-NEXT: vmv1r.v v0, v9
; RV64-NEXT: vmseq.vx v10, v8, a0, v0.t
; RV64-NEXT: vmv1r.v v0, v10
; RV64-NEXT: ret
entry:
%a = call <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i64.i64(
<vscale x 1 x i1> %0,
<vscale x 1 x i64> %1,
i64 %2,
<vscale x 1 x i1> %3,
iXLen %4, iXLen 0)
ret <vscale x 1 x i1> %a
}
declare <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i8.i8(
<vscale x 1 x i1>,
<vscale x 1 x i8>,
i8,
<vscale x 1 x i1>,
iXLen, iXLen);
define <vscale x 1 x i1> @intrinsic_vmsge_mask_vx_nxv1i8_i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv1i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vmslt.vx v10, v8, a0, v0.t
; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, mu
; CHECK-NEXT: vmxor.mm v0, v10, v9
; CHECK-NEXT: ret
entry:
%a = call <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i8.i8(
<vscale x 1 x i1> %0,
<vscale x 1 x i8> %1,
i8 %2,
<vscale x 1 x i1> %3,
iXLen %4, iXLen 0)
ret <vscale x 1 x i1> %a
}
declare <vscale x 64 x i1> @llvm.riscv.vmsbf.mask.nxv64i1(
<vscale x 64 x i1>,
<vscale x 64 x i1>,
<vscale x 64 x i1>,
iXLen, iXLen);
define <vscale x 64 x i1> @intrinsic_vmsbf_mask_m_nxv64i1_nxv64i1(<vscale x 64 x i1> %0, <vscale x 64 x i1> %1, <vscale x 64 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsbf_mask_m_nxv64i1_nxv64i1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vsetvli zero, a0, e8, m8, tu, mu
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vmsbf.m v10, v8, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: ret
entry:
%a = call <vscale x 64 x i1> @llvm.riscv.vmsbf.mask.nxv64i1(
<vscale x 64 x i1> %0,
<vscale x 64 x i1> %1,
<vscale x 64 x i1> %2,
iXLen %3, iXLen 0)
ret <vscale x 64 x i1> %a
}

View File

@ -28,7 +28,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmfeq.mask.nxv1f16(
<vscale x 1 x half>,
<vscale x 1 x half>,
<vscale x 1 x i1>,
iXLen);
iXLen, iXLen);
define <vscale x 1 x i1> @intrinsic_vmfeq_mask_vv_nxv1f16_nxv1f16(<vscale x 1 x i1> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x half> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfeq_mask_vv_nxv1f16_nxv1f16:
@ -50,7 +50,7 @@ entry:
<vscale x 1 x half> %2,
<vscale x 1 x half> %3,
<vscale x 1 x i1> %mask,
iXLen %4)
iXLen %4, iXLen 1)
ret <vscale x 1 x i1> %a
}
@ -80,7 +80,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmfeq.mask.nxv2f16(
<vscale x 2 x half>,
<vscale x 2 x half>,
<vscale x 2 x i1>,
iXLen);
iXLen, iXLen);
define <vscale x 2 x i1> @intrinsic_vmfeq_mask_vv_nxv2f16_nxv2f16(<vscale x 2 x i1> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, <vscale x 2 x half> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfeq_mask_vv_nxv2f16_nxv2f16:
@ -102,7 +102,7 @@ entry:
<vscale x 2 x half> %2,
<vscale x 2 x half> %3,
<vscale x 2 x i1> %mask,
iXLen %4)
iXLen %4, iXLen 1)
ret <vscale x 2 x i1> %a
}
@ -132,7 +132,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmfeq.mask.nxv4f16(
<vscale x 4 x half>,
<vscale x 4 x half>,
<vscale x 4 x i1>,
iXLen);
iXLen, iXLen);
define <vscale x 4 x i1> @intrinsic_vmfeq_mask_vv_nxv4f16_nxv4f16(<vscale x 4 x i1> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x half> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfeq_mask_vv_nxv4f16_nxv4f16:
@ -154,7 +154,7 @@ entry:
<vscale x 4 x half> %2,
<vscale x 4 x half> %3,
<vscale x 4 x i1> %mask,
iXLen %4)
iXLen %4, iXLen 1)
ret <vscale x 4 x i1> %a
}
@ -184,7 +184,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmfeq.mask.nxv8f16(
<vscale x 8 x half>,
<vscale x 8 x half>,
<vscale x 8 x i1>,
iXLen);
iXLen, iXLen);
define <vscale x 8 x i1> @intrinsic_vmfeq_mask_vv_nxv8f16_nxv8f16(<vscale x 8 x i1> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, <vscale x 8 x half> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfeq_mask_vv_nxv8f16_nxv8f16:
@ -206,7 +206,7 @@ entry:
<vscale x 8 x half> %2,
<vscale x 8 x half> %3,
<vscale x 8 x i1> %mask,
iXLen %4)
iXLen %4, iXLen 1)
ret <vscale x 8 x i1> %a
}
@ -236,7 +236,7 @@ declare <vscale x 16 x i1> @llvm.riscv.vmfeq.mask.nxv16f16(
<vscale x 16 x half>,
<vscale x 16 x half>,
<vscale x 16 x i1>,
iXLen);
iXLen, iXLen);
define <vscale x 16 x i1> @intrinsic_vmfeq_mask_vv_nxv16f16_nxv16f16(<vscale x 16 x i1> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, <vscale x 16 x half> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfeq_mask_vv_nxv16f16_nxv16f16:
@ -258,7 +258,7 @@ entry:
<vscale x 16 x half> %2,
<vscale x 16 x half> %3,
<vscale x 16 x i1> %mask,
iXLen %4)
iXLen %4, iXLen 1)
ret <vscale x 16 x i1> %a
}
@ -288,7 +288,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmfeq.mask.nxv1f32(
<vscale x 1 x float>,
<vscale x 1 x float>,
<vscale x 1 x i1>,
iXLen);
iXLen, iXLen);
define <vscale x 1 x i1> @intrinsic_vmfeq_mask_vv_nxv1f32_nxv1f32(<vscale x 1 x i1> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, <vscale x 1 x float> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfeq_mask_vv_nxv1f32_nxv1f32:
@ -310,7 +310,7 @@ entry:
<vscale x 1 x float> %2,
<vscale x 1 x float> %3,
<vscale x 1 x i1> %mask,
iXLen %4)
iXLen %4, iXLen 1)
ret <vscale x 1 x i1> %a
}
@ -340,7 +340,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmfeq.mask.nxv2f32(
<vscale x 2 x float>,
<vscale x 2 x float>,
<vscale x 2 x i1>,
iXLen);
iXLen, iXLen);
define <vscale x 2 x i1> @intrinsic_vmfeq_mask_vv_nxv2f32_nxv2f32(<vscale x 2 x i1> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x float> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfeq_mask_vv_nxv2f32_nxv2f32:
@ -362,7 +362,7 @@ entry:
<vscale x 2 x float> %2,
<vscale x 2 x float> %3,
<vscale x 2 x i1> %mask,
iXLen %4)
iXLen %4, iXLen 1)
ret <vscale x 2 x i1> %a
}
@ -392,7 +392,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmfeq.mask.nxv4f32(
<vscale x 4 x float>,
<vscale x 4 x float>,
<vscale x 4 x i1>,
iXLen);
iXLen, iXLen);
define <vscale x 4 x i1> @intrinsic_vmfeq_mask_vv_nxv4f32_nxv4f32(<vscale x 4 x i1> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, <vscale x 4 x float> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfeq_mask_vv_nxv4f32_nxv4f32:
@ -414,7 +414,7 @@ entry:
<vscale x 4 x float> %2,
<vscale x 4 x float> %3,
<vscale x 4 x i1> %mask,
iXLen %4)
iXLen %4, iXLen 1)
ret <vscale x 4 x i1> %a
}
@ -444,7 +444,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmfeq.mask.nxv8f32(
<vscale x 8 x float>,
<vscale x 8 x float>,
<vscale x 8 x i1>,
iXLen);
iXLen, iXLen);
define <vscale x 8 x i1> @intrinsic_vmfeq_mask_vv_nxv8f32_nxv8f32(<vscale x 8 x i1> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, <vscale x 8 x float> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfeq_mask_vv_nxv8f32_nxv8f32:
@ -466,7 +466,7 @@ entry:
<vscale x 8 x float> %2,
<vscale x 8 x float> %3,
<vscale x 8 x i1> %mask,
iXLen %4)
iXLen %4, iXLen 1)
ret <vscale x 8 x i1> %a
}
@ -496,7 +496,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmfeq.mask.nxv1f64(
<vscale x 1 x double>,
<vscale x 1 x double>,
<vscale x 1 x i1>,
iXLen);
iXLen, iXLen);
define <vscale x 1 x i1> @intrinsic_vmfeq_mask_vv_nxv1f64_nxv1f64(<vscale x 1 x i1> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, <vscale x 1 x double> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfeq_mask_vv_nxv1f64_nxv1f64:
@ -518,7 +518,7 @@ entry:
<vscale x 1 x double> %2,
<vscale x 1 x double> %3,
<vscale x 1 x i1> %mask,
iXLen %4)
iXLen %4, iXLen 1)
ret <vscale x 1 x i1> %a
}
@ -548,7 +548,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmfeq.mask.nxv2f64(
<vscale x 2 x double>,
<vscale x 2 x double>,
<vscale x 2 x i1>,
iXLen);
iXLen, iXLen);
define <vscale x 2 x i1> @intrinsic_vmfeq_mask_vv_nxv2f64_nxv2f64(<vscale x 2 x i1> %0, <vscale x 2 x double> %1, <vscale x 2 x double> %2, <vscale x 2 x double> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfeq_mask_vv_nxv2f64_nxv2f64:
@ -570,7 +570,7 @@ entry:
<vscale x 2 x double> %2,
<vscale x 2 x double> %3,
<vscale x 2 x i1> %mask,
iXLen %4)
iXLen %4, iXLen 1)
ret <vscale x 2 x i1> %a
}
@ -600,7 +600,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmfeq.mask.nxv4f64(
<vscale x 4 x double>,
<vscale x 4 x double>,
<vscale x 4 x i1>,
iXLen);
iXLen, iXLen);
define <vscale x 4 x i1> @intrinsic_vmfeq_mask_vv_nxv4f64_nxv4f64(<vscale x 4 x i1> %0, <vscale x 4 x double> %1, <vscale x 4 x double> %2, <vscale x 4 x double> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfeq_mask_vv_nxv4f64_nxv4f64:
@ -622,7 +622,7 @@ entry:
<vscale x 4 x double> %2,
<vscale x 4 x double> %3,
<vscale x 4 x i1> %mask,
iXLen %4)
iXLen %4, iXLen 1)
ret <vscale x 4 x i1> %a
}
@ -652,7 +652,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmfeq.mask.nxv1f16.f16(
<vscale x 1 x half>,
half,
<vscale x 1 x i1>,
iXLen);
iXLen, iXLen);
define <vscale x 1 x i1> @intrinsic_vmfeq_mask_vf_nxv1f16_f16(<vscale x 1 x i1> %0, <vscale x 1 x half> %1, half %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfeq_mask_vf_nxv1f16_f16:
@ -669,7 +669,7 @@ entry:
<vscale x 1 x half> %1,
half %2,
<vscale x 1 x i1> %3,
iXLen %4)
iXLen %4, iXLen 1)
ret <vscale x 1 x i1> %a
}
@ -699,7 +699,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmfeq.mask.nxv2f16.f16(
<vscale x 2 x half>,
half,
<vscale x 2 x i1>,
iXLen);
iXLen, iXLen);
define <vscale x 2 x i1> @intrinsic_vmfeq_mask_vf_nxv2f16_f16(<vscale x 2 x i1> %0, <vscale x 2 x half> %1, half %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfeq_mask_vf_nxv2f16_f16:
@ -716,7 +716,7 @@ entry:
<vscale x 2 x half> %1,
half %2,
<vscale x 2 x i1> %3,
iXLen %4)
iXLen %4, iXLen 1)
ret <vscale x 2 x i1> %a
}
@ -746,7 +746,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmfeq.mask.nxv4f16.f16(
<vscale x 4 x half>,
half,
<vscale x 4 x i1>,
iXLen);
iXLen, iXLen);
define <vscale x 4 x i1> @intrinsic_vmfeq_mask_vf_nxv4f16_f16(<vscale x 4 x i1> %0, <vscale x 4 x half> %1, half %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfeq_mask_vf_nxv4f16_f16:
@ -763,7 +763,7 @@ entry:
<vscale x 4 x half> %1,
half %2,
<vscale x 4 x i1> %3,
iXLen %4)
iXLen %4, iXLen 1)
ret <vscale x 4 x i1> %a
}
@ -793,7 +793,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmfeq.mask.nxv8f16.f16(
<vscale x 8 x half>,
half,
<vscale x 8 x i1>,
iXLen);
iXLen, iXLen);
define <vscale x 8 x i1> @intrinsic_vmfeq_mask_vf_nxv8f16_f16(<vscale x 8 x i1> %0, <vscale x 8 x half> %1, half %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfeq_mask_vf_nxv8f16_f16:
@ -810,7 +810,7 @@ entry:
<vscale x 8 x half> %1,
half %2,
<vscale x 8 x i1> %3,
iXLen %4)
iXLen %4, iXLen 1)
ret <vscale x 8 x i1> %a
}
@ -840,7 +840,7 @@ declare <vscale x 16 x i1> @llvm.riscv.vmfeq.mask.nxv16f16.f16(
<vscale x 16 x half>,
half,
<vscale x 16 x i1>,
iXLen);
iXLen, iXLen);
define <vscale x 16 x i1> @intrinsic_vmfeq_mask_vf_nxv16f16_f16(<vscale x 16 x i1> %0, <vscale x 16 x half> %1, half %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfeq_mask_vf_nxv16f16_f16:
@ -857,7 +857,7 @@ entry:
<vscale x 16 x half> %1,
half %2,
<vscale x 16 x i1> %3,
iXLen %4)
iXLen %4, iXLen 1)
ret <vscale x 16 x i1> %a
}
@ -887,7 +887,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmfeq.mask.nxv1f32.f32(
<vscale x 1 x float>,
float,
<vscale x 1 x i1>,
iXLen);
iXLen, iXLen);
define <vscale x 1 x i1> @intrinsic_vmfeq_mask_vf_nxv1f32_f32(<vscale x 1 x i1> %0, <vscale x 1 x float> %1, float %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfeq_mask_vf_nxv1f32_f32:
@ -904,7 +904,7 @@ entry:
<vscale x 1 x float> %1,
float %2,
<vscale x 1 x i1> %3,
iXLen %4)
iXLen %4, iXLen 1)
ret <vscale x 1 x i1> %a
}
@ -934,7 +934,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmfeq.mask.nxv2f32.f32(
<vscale x 2 x float>,
float,
<vscale x 2 x i1>,
iXLen);
iXLen, iXLen);
define <vscale x 2 x i1> @intrinsic_vmfeq_mask_vf_nxv2f32_f32(<vscale x 2 x i1> %0, <vscale x 2 x float> %1, float %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfeq_mask_vf_nxv2f32_f32:
@ -951,7 +951,7 @@ entry:
<vscale x 2 x float> %1,
float %2,
<vscale x 2 x i1> %3,
iXLen %4)
iXLen %4, iXLen 1)
ret <vscale x 2 x i1> %a
}
@ -981,7 +981,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmfeq.mask.nxv4f32.f32(
<vscale x 4 x float>,
float,
<vscale x 4 x i1>,
iXLen);
iXLen, iXLen);
define <vscale x 4 x i1> @intrinsic_vmfeq_mask_vf_nxv4f32_f32(<vscale x 4 x i1> %0, <vscale x 4 x float> %1, float %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfeq_mask_vf_nxv4f32_f32:
@ -998,7 +998,7 @@ entry:
<vscale x 4 x float> %1,
float %2,
<vscale x 4 x i1> %3,
iXLen %4)
iXLen %4, iXLen 1)
ret <vscale x 4 x i1> %a
}
@ -1028,7 +1028,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmfeq.mask.nxv8f32.f32(
<vscale x 8 x float>,
float,
<vscale x 8 x i1>,
iXLen);
iXLen, iXLen);
define <vscale x 8 x i1> @intrinsic_vmfeq_mask_vf_nxv8f32_f32(<vscale x 8 x i1> %0, <vscale x 8 x float> %1, float %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfeq_mask_vf_nxv8f32_f32:
@ -1045,7 +1045,7 @@ entry:
<vscale x 8 x float> %1,
float %2,
<vscale x 8 x i1> %3,
iXLen %4)
iXLen %4, iXLen 1)
ret <vscale x 8 x i1> %a
}
@ -1075,7 +1075,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmfeq.mask.nxv1f64.f64(
<vscale x 1 x double>,
double,
<vscale x 1 x i1>,
iXLen);
iXLen, iXLen);
define <vscale x 1 x i1> @intrinsic_vmfeq_mask_vf_nxv1f64_f64(<vscale x 1 x i1> %0, <vscale x 1 x double> %1, double %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfeq_mask_vf_nxv1f64_f64:
@ -1092,7 +1092,7 @@ entry:
<vscale x 1 x double> %1,
double %2,
<vscale x 1 x i1> %3,
iXLen %4)
iXLen %4, iXLen 1)
ret <vscale x 1 x i1> %a
}
@ -1122,7 +1122,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmfeq.mask.nxv2f64.f64(
<vscale x 2 x double>,
double,
<vscale x 2 x i1>,
iXLen);
iXLen, iXLen);
define <vscale x 2 x i1> @intrinsic_vmfeq_mask_vf_nxv2f64_f64(<vscale x 2 x i1> %0, <vscale x 2 x double> %1, double %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfeq_mask_vf_nxv2f64_f64:
@ -1139,7 +1139,7 @@ entry:
<vscale x 2 x double> %1,
double %2,
<vscale x 2 x i1> %3,
iXLen %4)
iXLen %4, iXLen 1)
ret <vscale x 2 x i1> %a
}
@ -1169,7 +1169,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmfeq.mask.nxv4f64.f64(
<vscale x 4 x double>,
double,
<vscale x 4 x i1>,
iXLen);
iXLen, iXLen);
define <vscale x 4 x i1> @intrinsic_vmfeq_mask_vf_nxv4f64_f64(<vscale x 4 x i1> %0, <vscale x 4 x double> %1, double %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfeq_mask_vf_nxv4f64_f64:
@ -1186,7 +1186,7 @@ entry:
<vscale x 4 x double> %1,
double %2,
<vscale x 4 x i1> %3,
iXLen %4)
iXLen %4, iXLen 1)
ret <vscale x 4 x i1> %a
}

View File

@ -28,7 +28,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmfge.mask.nxv1f16(
<vscale x 1 x half>,
<vscale x 1 x half>,
<vscale x 1 x i1>,
iXLen);
iXLen, iXLen);
define <vscale x 1 x i1> @intrinsic_vmfge_mask_vv_nxv1f16_nxv1f16(<vscale x 1 x i1> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x half> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfge_mask_vv_nxv1f16_nxv1f16:
@ -50,7 +50,7 @@ entry:
<vscale x 1 x half> %2,
<vscale x 1 x half> %3,
<vscale x 1 x i1> %mask,
iXLen %4)
iXLen %4, iXLen 1)
ret <vscale x 1 x i1> %a
}
@ -80,7 +80,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmfge.mask.nxv2f16(
<vscale x 2 x half>,
<vscale x 2 x half>,
<vscale x 2 x i1>,
iXLen);
iXLen, iXLen);
define <vscale x 2 x i1> @intrinsic_vmfge_mask_vv_nxv2f16_nxv2f16(<vscale x 2 x i1> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, <vscale x 2 x half> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfge_mask_vv_nxv2f16_nxv2f16:
@ -102,7 +102,7 @@ entry:
<vscale x 2 x half> %2,
<vscale x 2 x half> %3,
<vscale x 2 x i1> %mask,
iXLen %4)
iXLen %4, iXLen 1)
ret <vscale x 2 x i1> %a
}
@ -132,7 +132,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmfge.mask.nxv4f16(
<vscale x 4 x half>,
<vscale x 4 x half>,
<vscale x 4 x i1>,
iXLen);
iXLen, iXLen);
define <vscale x 4 x i1> @intrinsic_vmfge_mask_vv_nxv4f16_nxv4f16(<vscale x 4 x i1> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x half> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfge_mask_vv_nxv4f16_nxv4f16:
@ -154,7 +154,7 @@ entry:
<vscale x 4 x half> %2,
<vscale x 4 x half> %3,
<vscale x 4 x i1> %mask,
iXLen %4)
iXLen %4, iXLen 1)
ret <vscale x 4 x i1> %a
}
@ -184,7 +184,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmfge.mask.nxv8f16(
<vscale x 8 x half>,
<vscale x 8 x half>,
<vscale x 8 x i1>,
iXLen);
iXLen, iXLen);
define <vscale x 8 x i1> @intrinsic_vmfge_mask_vv_nxv8f16_nxv8f16(<vscale x 8 x i1> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, <vscale x 8 x half> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfge_mask_vv_nxv8f16_nxv8f16:
@ -206,7 +206,7 @@ entry:
<vscale x 8 x half> %2,
<vscale x 8 x half> %3,
<vscale x 8 x i1> %mask,
iXLen %4)
iXLen %4, iXLen 1)
ret <vscale x 8 x i1> %a
}
@ -236,7 +236,7 @@ declare <vscale x 16 x i1> @llvm.riscv.vmfge.mask.nxv16f16(
<vscale x 16 x half>,
<vscale x 16 x half>,
<vscale x 16 x i1>,
iXLen);
iXLen, iXLen);
define <vscale x 16 x i1> @intrinsic_vmfge_mask_vv_nxv16f16_nxv16f16(<vscale x 16 x i1> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, <vscale x 16 x half> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfge_mask_vv_nxv16f16_nxv16f16:
@ -258,7 +258,7 @@ entry:
<vscale x 16 x half> %2,
<vscale x 16 x half> %3,
<vscale x 16 x i1> %mask,
iXLen %4)
iXLen %4, iXLen 1)
ret <vscale x 16 x i1> %a
}
@ -288,7 +288,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmfge.mask.nxv1f32(
<vscale x 1 x float>,
<vscale x 1 x float>,
<vscale x 1 x i1>,
iXLen);
iXLen, iXLen);
define <vscale x 1 x i1> @intrinsic_vmfge_mask_vv_nxv1f32_nxv1f32(<vscale x 1 x i1> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, <vscale x 1 x float> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfge_mask_vv_nxv1f32_nxv1f32:
@ -310,7 +310,7 @@ entry:
<vscale x 1 x float> %2,
<vscale x 1 x float> %3,
<vscale x 1 x i1> %mask,
iXLen %4)
iXLen %4, iXLen 1)
ret <vscale x 1 x i1> %a
}
@ -340,7 +340,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmfge.mask.nxv2f32(
<vscale x 2 x float>,
<vscale x 2 x float>,
<vscale x 2 x i1>,
iXLen);
iXLen, iXLen);
define <vscale x 2 x i1> @intrinsic_vmfge_mask_vv_nxv2f32_nxv2f32(<vscale x 2 x i1> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x float> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfge_mask_vv_nxv2f32_nxv2f32:
@ -362,7 +362,7 @@ entry:
<vscale x 2 x float> %2,
<vscale x 2 x float> %3,
<vscale x 2 x i1> %mask,
iXLen %4)
iXLen %4, iXLen 1)
ret <vscale x 2 x i1> %a
}
@ -392,7 +392,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmfge.mask.nxv4f32(
<vscale x 4 x float>,
<vscale x 4 x float>,
<vscale x 4 x i1>,
iXLen);
iXLen, iXLen);
define <vscale x 4 x i1> @intrinsic_vmfge_mask_vv_nxv4f32_nxv4f32(<vscale x 4 x i1> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, <vscale x 4 x float> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfge_mask_vv_nxv4f32_nxv4f32:
@ -414,7 +414,7 @@ entry:
<vscale x 4 x float> %2,
<vscale x 4 x float> %3,
<vscale x 4 x i1> %mask,
iXLen %4)
iXLen %4, iXLen 1)
ret <vscale x 4 x i1> %a
}
@ -444,7 +444,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmfge.mask.nxv8f32(
<vscale x 8 x float>,
<vscale x 8 x float>,
<vscale x 8 x i1>,
iXLen);
iXLen, iXLen);
define <vscale x 8 x i1> @intrinsic_vmfge_mask_vv_nxv8f32_nxv8f32(<vscale x 8 x i1> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, <vscale x 8 x float> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfge_mask_vv_nxv8f32_nxv8f32:
@ -466,7 +466,7 @@ entry:
<vscale x 8 x float> %2,
<vscale x 8 x float> %3,
<vscale x 8 x i1> %mask,
iXLen %4)
iXLen %4, iXLen 1)
ret <vscale x 8 x i1> %a
}
@ -496,7 +496,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmfge.mask.nxv1f64(
<vscale x 1 x double>,
<vscale x 1 x double>,
<vscale x 1 x i1>,
iXLen);
iXLen, iXLen);
define <vscale x 1 x i1> @intrinsic_vmfge_mask_vv_nxv1f64_nxv1f64(<vscale x 1 x i1> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, <vscale x 1 x double> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfge_mask_vv_nxv1f64_nxv1f64:
@ -518,7 +518,7 @@ entry:
<vscale x 1 x double> %2,
<vscale x 1 x double> %3,
<vscale x 1 x i1> %mask,
iXLen %4)
iXLen %4, iXLen 1)
ret <vscale x 1 x i1> %a
}
@ -548,7 +548,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmfge.mask.nxv2f64(
<vscale x 2 x double>,
<vscale x 2 x double>,
<vscale x 2 x i1>,
iXLen);
iXLen, iXLen);
define <vscale x 2 x i1> @intrinsic_vmfge_mask_vv_nxv2f64_nxv2f64(<vscale x 2 x i1> %0, <vscale x 2 x double> %1, <vscale x 2 x double> %2, <vscale x 2 x double> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfge_mask_vv_nxv2f64_nxv2f64:
@ -570,7 +570,7 @@ entry:
<vscale x 2 x double> %2,
<vscale x 2 x double> %3,
<vscale x 2 x i1> %mask,
iXLen %4)
iXLen %4, iXLen 1)
ret <vscale x 2 x i1> %a
}
@ -600,7 +600,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmfge.mask.nxv4f64(
<vscale x 4 x double>,
<vscale x 4 x double>,
<vscale x 4 x i1>,
iXLen);
iXLen, iXLen);
define <vscale x 4 x i1> @intrinsic_vmfge_mask_vv_nxv4f64_nxv4f64(<vscale x 4 x i1> %0, <vscale x 4 x double> %1, <vscale x 4 x double> %2, <vscale x 4 x double> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfge_mask_vv_nxv4f64_nxv4f64:
@ -622,7 +622,7 @@ entry:
<vscale x 4 x double> %2,
<vscale x 4 x double> %3,
<vscale x 4 x i1> %mask,
iXLen %4)
iXLen %4, iXLen 1)
ret <vscale x 4 x i1> %a
}
@ -652,7 +652,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmfge.mask.nxv1f16.f16(
<vscale x 1 x half>,
half,
<vscale x 1 x i1>,
iXLen);
iXLen, iXLen);
define <vscale x 1 x i1> @intrinsic_vmfge_mask_vf_nxv1f16_f16(<vscale x 1 x i1> %0, <vscale x 1 x half> %1, half %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfge_mask_vf_nxv1f16_f16:
@ -669,7 +669,7 @@ entry:
<vscale x 1 x half> %1,
half %2,
<vscale x 1 x i1> %3,
iXLen %4)
iXLen %4, iXLen 1)
ret <vscale x 1 x i1> %a
}
@ -699,7 +699,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmfge.mask.nxv2f16.f16(
<vscale x 2 x half>,
half,
<vscale x 2 x i1>,
iXLen);
iXLen, iXLen);
define <vscale x 2 x i1> @intrinsic_vmfge_mask_vf_nxv2f16_f16(<vscale x 2 x i1> %0, <vscale x 2 x half> %1, half %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfge_mask_vf_nxv2f16_f16:
@ -716,7 +716,7 @@ entry:
<vscale x 2 x half> %1,
half %2,
<vscale x 2 x i1> %3,
iXLen %4)
iXLen %4, iXLen 1)
ret <vscale x 2 x i1> %a
}
@ -746,7 +746,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmfge.mask.nxv4f16.f16(
<vscale x 4 x half>,
half,
<vscale x 4 x i1>,
iXLen);
iXLen, iXLen);
define <vscale x 4 x i1> @intrinsic_vmfge_mask_vf_nxv4f16_f16(<vscale x 4 x i1> %0, <vscale x 4 x half> %1, half %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfge_mask_vf_nxv4f16_f16:
@ -763,7 +763,7 @@ entry:
<vscale x 4 x half> %1,
half %2,
<vscale x 4 x i1> %3,
iXLen %4)
iXLen %4, iXLen 1)
ret <vscale x 4 x i1> %a
}
@ -793,7 +793,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmfge.mask.nxv8f16.f16(
<vscale x 8 x half>,
half,
<vscale x 8 x i1>,
iXLen);
iXLen, iXLen);
define <vscale x 8 x i1> @intrinsic_vmfge_mask_vf_nxv8f16_f16(<vscale x 8 x i1> %0, <vscale x 8 x half> %1, half %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfge_mask_vf_nxv8f16_f16:
@ -810,7 +810,7 @@ entry:
<vscale x 8 x half> %1,
half %2,
<vscale x 8 x i1> %3,
iXLen %4)
iXLen %4, iXLen 1)
ret <vscale x 8 x i1> %a
}
@ -840,7 +840,7 @@ declare <vscale x 16 x i1> @llvm.riscv.vmfge.mask.nxv16f16.f16(
<vscale x 16 x half>,
half,
<vscale x 16 x i1>,
iXLen);
iXLen, iXLen);
define <vscale x 16 x i1> @intrinsic_vmfge_mask_vf_nxv16f16_f16(<vscale x 16 x i1> %0, <vscale x 16 x half> %1, half %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfge_mask_vf_nxv16f16_f16:
@ -857,7 +857,7 @@ entry:
<vscale x 16 x half> %1,
half %2,
<vscale x 16 x i1> %3,
iXLen %4)
iXLen %4, iXLen 1)
ret <vscale x 16 x i1> %a
}
@ -887,7 +887,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmfge.mask.nxv1f32.f32(
<vscale x 1 x float>,
float,
<vscale x 1 x i1>,
iXLen);
iXLen, iXLen);
define <vscale x 1 x i1> @intrinsic_vmfge_mask_vf_nxv1f32_f32(<vscale x 1 x i1> %0, <vscale x 1 x float> %1, float %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfge_mask_vf_nxv1f32_f32:
@ -904,7 +904,7 @@ entry:
<vscale x 1 x float> %1,
float %2,
<vscale x 1 x i1> %3,
iXLen %4)
iXLen %4, iXLen 1)
ret <vscale x 1 x i1> %a
}
@ -934,7 +934,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmfge.mask.nxv2f32.f32(
<vscale x 2 x float>,
float,
<vscale x 2 x i1>,
iXLen);
iXLen, iXLen);
define <vscale x 2 x i1> @intrinsic_vmfge_mask_vf_nxv2f32_f32(<vscale x 2 x i1> %0, <vscale x 2 x float> %1, float %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfge_mask_vf_nxv2f32_f32:
@ -951,7 +951,7 @@ entry:
<vscale x 2 x float> %1,
float %2,
<vscale x 2 x i1> %3,
iXLen %4)
iXLen %4, iXLen 1)
ret <vscale x 2 x i1> %a
}
@ -981,7 +981,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmfge.mask.nxv4f32.f32(
<vscale x 4 x float>,
float,
<vscale x 4 x i1>,
iXLen);
iXLen, iXLen);
define <vscale x 4 x i1> @intrinsic_vmfge_mask_vf_nxv4f32_f32(<vscale x 4 x i1> %0, <vscale x 4 x float> %1, float %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfge_mask_vf_nxv4f32_f32:
@ -998,7 +998,7 @@ entry:
<vscale x 4 x float> %1,
float %2,
<vscale x 4 x i1> %3,
iXLen %4)
iXLen %4, iXLen 1)
ret <vscale x 4 x i1> %a
}
@ -1028,7 +1028,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmfge.mask.nxv8f32.f32(
<vscale x 8 x float>,
float,
<vscale x 8 x i1>,
iXLen);
iXLen, iXLen);
define <vscale x 8 x i1> @intrinsic_vmfge_mask_vf_nxv8f32_f32(<vscale x 8 x i1> %0, <vscale x 8 x float> %1, float %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfge_mask_vf_nxv8f32_f32:
@ -1045,7 +1045,7 @@ entry:
<vscale x 8 x float> %1,
float %2,
<vscale x 8 x i1> %3,
iXLen %4)
iXLen %4, iXLen 1)
ret <vscale x 8 x i1> %a
}
@ -1075,7 +1075,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmfge.mask.nxv1f64.f64(
<vscale x 1 x double>,
double,
<vscale x 1 x i1>,
iXLen);
iXLen, iXLen);
define <vscale x 1 x i1> @intrinsic_vmfge_mask_vf_nxv1f64_f64(<vscale x 1 x i1> %0, <vscale x 1 x double> %1, double %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfge_mask_vf_nxv1f64_f64:
@ -1092,7 +1092,7 @@ entry:
<vscale x 1 x double> %1,
double %2,
<vscale x 1 x i1> %3,
iXLen %4)
iXLen %4, iXLen 1)
ret <vscale x 1 x i1> %a
}
@ -1122,7 +1122,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmfge.mask.nxv2f64.f64(
<vscale x 2 x double>,
double,
<vscale x 2 x i1>,
iXLen);
iXLen, iXLen);
define <vscale x 2 x i1> @intrinsic_vmfge_mask_vf_nxv2f64_f64(<vscale x 2 x i1> %0, <vscale x 2 x double> %1, double %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfge_mask_vf_nxv2f64_f64:
@ -1139,7 +1139,7 @@ entry:
<vscale x 2 x double> %1,
double %2,
<vscale x 2 x i1> %3,
iXLen %4)
iXLen %4, iXLen 1)
ret <vscale x 2 x i1> %a
}
@ -1169,7 +1169,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmfge.mask.nxv4f64.f64(
<vscale x 4 x double>,
double,
<vscale x 4 x i1>,
iXLen);
iXLen, iXLen);
define <vscale x 4 x i1> @intrinsic_vmfge_mask_vf_nxv4f64_f64(<vscale x 4 x i1> %0, <vscale x 4 x double> %1, double %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfge_mask_vf_nxv4f64_f64:
@ -1186,7 +1186,7 @@ entry:
<vscale x 4 x double> %1,
double %2,
<vscale x 4 x i1> %3,
iXLen %4)
iXLen %4, iXLen 1)
ret <vscale x 4 x i1> %a
}

View File

@ -28,7 +28,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmfgt.mask.nxv1f16(
<vscale x 1 x half>,
<vscale x 1 x half>,
<vscale x 1 x i1>,
iXLen);
iXLen, iXLen);
define <vscale x 1 x i1> @intrinsic_vmfgt_mask_vv_nxv1f16_nxv1f16(<vscale x 1 x i1> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x half> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfgt_mask_vv_nxv1f16_nxv1f16:
@ -50,7 +50,7 @@ entry:
<vscale x 1 x half> %2,
<vscale x 1 x half> %3,
<vscale x 1 x i1> %mask,
iXLen %4)
iXLen %4, iXLen 1)
ret <vscale x 1 x i1> %a
}
@ -80,7 +80,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmfgt.mask.nxv2f16(
<vscale x 2 x half>,
<vscale x 2 x half>,
<vscale x 2 x i1>,
iXLen);
iXLen, iXLen);
define <vscale x 2 x i1> @intrinsic_vmfgt_mask_vv_nxv2f16_nxv2f16(<vscale x 2 x i1> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, <vscale x 2 x half> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfgt_mask_vv_nxv2f16_nxv2f16:
@ -102,7 +102,7 @@ entry:
<vscale x 2 x half> %2,
<vscale x 2 x half> %3,
<vscale x 2 x i1> %mask,
iXLen %4)
iXLen %4, iXLen 1)
ret <vscale x 2 x i1> %a
}
@ -132,7 +132,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmfgt.mask.nxv4f16(
<vscale x 4 x half>,
<vscale x 4 x half>,
<vscale x 4 x i1>,
iXLen);
iXLen, iXLen);
define <vscale x 4 x i1> @intrinsic_vmfgt_mask_vv_nxv4f16_nxv4f16(<vscale x 4 x i1> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x half> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfgt_mask_vv_nxv4f16_nxv4f16:
@ -154,7 +154,7 @@ entry:
<vscale x 4 x half> %2,
<vscale x 4 x half> %3,
<vscale x 4 x i1> %mask,
iXLen %4)
iXLen %4, iXLen 1)
ret <vscale x 4 x i1> %a
}
@ -184,7 +184,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmfgt.mask.nxv8f16(
<vscale x 8 x half>,
<vscale x 8 x half>,
<vscale x 8 x i1>,
iXLen);
iXLen, iXLen);
define <vscale x 8 x i1> @intrinsic_vmfgt_mask_vv_nxv8f16_nxv8f16(<vscale x 8 x i1> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, <vscale x 8 x half> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfgt_mask_vv_nxv8f16_nxv8f16:
@ -206,7 +206,7 @@ entry:
<vscale x 8 x half> %2,
<vscale x 8 x half> %3,
<vscale x 8 x i1> %mask,
iXLen %4)
iXLen %4, iXLen 1)
ret <vscale x 8 x i1> %a
}
@ -236,7 +236,7 @@ declare <vscale x 16 x i1> @llvm.riscv.vmfgt.mask.nxv16f16(
<vscale x 16 x half>,
<vscale x 16 x half>,
<vscale x 16 x i1>,
iXLen);
iXLen, iXLen);
define <vscale x 16 x i1> @intrinsic_vmfgt_mask_vv_nxv16f16_nxv16f16(<vscale x 16 x i1> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, <vscale x 16 x half> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfgt_mask_vv_nxv16f16_nxv16f16:
@ -258,7 +258,7 @@ entry:
<vscale x 16 x half> %2,
<vscale x 16 x half> %3,
<vscale x 16 x i1> %mask,
iXLen %4)
iXLen %4, iXLen 1)
ret <vscale x 16 x i1> %a
}
@ -288,7 +288,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmfgt.mask.nxv1f32(
<vscale x 1 x float>,
<vscale x 1 x float>,
<vscale x 1 x i1>,
iXLen);
iXLen, iXLen);
define <vscale x 1 x i1> @intrinsic_vmfgt_mask_vv_nxv1f32_nxv1f32(<vscale x 1 x i1> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, <vscale x 1 x float> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfgt_mask_vv_nxv1f32_nxv1f32:
@ -310,7 +310,7 @@ entry:
<vscale x 1 x float> %2,
<vscale x 1 x float> %3,
<vscale x 1 x i1> %mask,
iXLen %4)
iXLen %4, iXLen 1)
ret <vscale x 1 x i1> %a
}
@ -340,7 +340,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmfgt.mask.nxv2f32(
<vscale x 2 x float>,
<vscale x 2 x float>,
<vscale x 2 x i1>,
iXLen);
iXLen, iXLen);
define <vscale x 2 x i1> @intrinsic_vmfgt_mask_vv_nxv2f32_nxv2f32(<vscale x 2 x i1> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x float> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfgt_mask_vv_nxv2f32_nxv2f32:
@ -362,7 +362,7 @@ entry:
<vscale x 2 x float> %2,
<vscale x 2 x float> %3,
<vscale x 2 x i1> %mask,
iXLen %4)
iXLen %4, iXLen 1)
ret <vscale x 2 x i1> %a
}
@ -392,7 +392,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmfgt.mask.nxv4f32(
<vscale x 4 x float>,
<vscale x 4 x float>,
<vscale x 4 x i1>,
iXLen);
iXLen, iXLen);
define <vscale x 4 x i1> @intrinsic_vmfgt_mask_vv_nxv4f32_nxv4f32(<vscale x 4 x i1> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, <vscale x 4 x float> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfgt_mask_vv_nxv4f32_nxv4f32:
@ -414,7 +414,7 @@ entry:
<vscale x 4 x float> %2,
<vscale x 4 x float> %3,
<vscale x 4 x i1> %mask,
iXLen %4)
iXLen %4, iXLen 1)
ret <vscale x 4 x i1> %a
}
@ -444,7 +444,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmfgt.mask.nxv8f32(
<vscale x 8 x float>,
<vscale x 8 x float>,
<vscale x 8 x i1>,
iXLen);
iXLen, iXLen);
define <vscale x 8 x i1> @intrinsic_vmfgt_mask_vv_nxv8f32_nxv8f32(<vscale x 8 x i1> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, <vscale x 8 x float> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfgt_mask_vv_nxv8f32_nxv8f32:
@ -466,7 +466,7 @@ entry:
<vscale x 8 x float> %2,
<vscale x 8 x float> %3,
<vscale x 8 x i1> %mask,
iXLen %4)
iXLen %4, iXLen 1)
ret <vscale x 8 x i1> %a
}
@ -496,7 +496,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmfgt.mask.nxv1f64(
<vscale x 1 x double>,
<vscale x 1 x double>,
<vscale x 1 x i1>,
iXLen);
iXLen, iXLen);
define <vscale x 1 x i1> @intrinsic_vmfgt_mask_vv_nxv1f64_nxv1f64(<vscale x 1 x i1> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, <vscale x 1 x double> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfgt_mask_vv_nxv1f64_nxv1f64:
@ -518,7 +518,7 @@ entry:
<vscale x 1 x double> %2,
<vscale x 1 x double> %3,
<vscale x 1 x i1> %mask,
iXLen %4)
iXLen %4, iXLen 1)
ret <vscale x 1 x i1> %a
}
@ -548,7 +548,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmfgt.mask.nxv2f64(
<vscale x 2 x double>,
<vscale x 2 x double>,
<vscale x 2 x i1>,
iXLen);
iXLen, iXLen);
define <vscale x 2 x i1> @intrinsic_vmfgt_mask_vv_nxv2f64_nxv2f64(<vscale x 2 x i1> %0, <vscale x 2 x double> %1, <vscale x 2 x double> %2, <vscale x 2 x double> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfgt_mask_vv_nxv2f64_nxv2f64:
@ -570,7 +570,7 @@ entry:
<vscale x 2 x double> %2,
<vscale x 2 x double> %3,
<vscale x 2 x i1> %mask,
iXLen %4)
iXLen %4, iXLen 1)
ret <vscale x 2 x i1> %a
}
@ -600,7 +600,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmfgt.mask.nxv4f64(
<vscale x 4 x double>,
<vscale x 4 x double>,
<vscale x 4 x i1>,
iXLen);
iXLen, iXLen);
define <vscale x 4 x i1> @intrinsic_vmfgt_mask_vv_nxv4f64_nxv4f64(<vscale x 4 x i1> %0, <vscale x 4 x double> %1, <vscale x 4 x double> %2, <vscale x 4 x double> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfgt_mask_vv_nxv4f64_nxv4f64:
@ -622,7 +622,7 @@ entry:
<vscale x 4 x double> %2,
<vscale x 4 x double> %3,
<vscale x 4 x i1> %mask,
iXLen %4)
iXLen %4, iXLen 1)
ret <vscale x 4 x i1> %a
}
@ -652,7 +652,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmfgt.mask.nxv1f16.f16(
<vscale x 1 x half>,
half,
<vscale x 1 x i1>,
iXLen);
iXLen, iXLen);
define <vscale x 1 x i1> @intrinsic_vmfgt_mask_vf_nxv1f16_f16(<vscale x 1 x i1> %0, <vscale x 1 x half> %1, half %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfgt_mask_vf_nxv1f16_f16:
@ -669,7 +669,7 @@ entry:
<vscale x 1 x half> %1,
half %2,
<vscale x 1 x i1> %3,
iXLen %4)
iXLen %4, iXLen 1)
ret <vscale x 1 x i1> %a
}
@ -699,7 +699,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmfgt.mask.nxv2f16.f16(
<vscale x 2 x half>,
half,
<vscale x 2 x i1>,
iXLen);
iXLen, iXLen);
define <vscale x 2 x i1> @intrinsic_vmfgt_mask_vf_nxv2f16_f16(<vscale x 2 x i1> %0, <vscale x 2 x half> %1, half %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfgt_mask_vf_nxv2f16_f16:
@ -716,7 +716,7 @@ entry:
<vscale x 2 x half> %1,
half %2,
<vscale x 2 x i1> %3,
iXLen %4)
iXLen %4, iXLen 1)
ret <vscale x 2 x i1> %a
}
@ -746,7 +746,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmfgt.mask.nxv4f16.f16(
<vscale x 4 x half>,
half,
<vscale x 4 x i1>,
iXLen);
iXLen, iXLen);
define <vscale x 4 x i1> @intrinsic_vmfgt_mask_vf_nxv4f16_f16(<vscale x 4 x i1> %0, <vscale x 4 x half> %1, half %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfgt_mask_vf_nxv4f16_f16:
@ -763,7 +763,7 @@ entry:
<vscale x 4 x half> %1,
half %2,
<vscale x 4 x i1> %3,
iXLen %4)
iXLen %4, iXLen 1)
ret <vscale x 4 x i1> %a
}
@ -793,7 +793,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmfgt.mask.nxv8f16.f16(
<vscale x 8 x half>,
half,
<vscale x 8 x i1>,
iXLen);
iXLen, iXLen);
define <vscale x 8 x i1> @intrinsic_vmfgt_mask_vf_nxv8f16_f16(<vscale x 8 x i1> %0, <vscale x 8 x half> %1, half %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfgt_mask_vf_nxv8f16_f16:
@ -810,7 +810,7 @@ entry:
<vscale x 8 x half> %1,
half %2,
<vscale x 8 x i1> %3,
iXLen %4)
iXLen %4, iXLen 1)
ret <vscale x 8 x i1> %a
}
@ -840,7 +840,7 @@ declare <vscale x 16 x i1> @llvm.riscv.vmfgt.mask.nxv16f16.f16(
<vscale x 16 x half>,
half,
<vscale x 16 x i1>,
iXLen);
iXLen, iXLen);
define <vscale x 16 x i1> @intrinsic_vmfgt_mask_vf_nxv16f16_f16(<vscale x 16 x i1> %0, <vscale x 16 x half> %1, half %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfgt_mask_vf_nxv16f16_f16:
@ -857,7 +857,7 @@ entry:
<vscale x 16 x half> %1,
half %2,
<vscale x 16 x i1> %3,
iXLen %4)
iXLen %4, iXLen 1)
ret <vscale x 16 x i1> %a
}
@ -887,7 +887,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmfgt.mask.nxv1f32.f32(
<vscale x 1 x float>,
float,
<vscale x 1 x i1>,
iXLen);
iXLen, iXLen);
define <vscale x 1 x i1> @intrinsic_vmfgt_mask_vf_nxv1f32_f32(<vscale x 1 x i1> %0, <vscale x 1 x float> %1, float %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfgt_mask_vf_nxv1f32_f32:
@ -904,7 +904,7 @@ entry:
<vscale x 1 x float> %1,
float %2,
<vscale x 1 x i1> %3,
iXLen %4)
iXLen %4, iXLen 1)
ret <vscale x 1 x i1> %a
}
@ -934,7 +934,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmfgt.mask.nxv2f32.f32(
<vscale x 2 x float>,
float,
<vscale x 2 x i1>,
iXLen);
iXLen, iXLen);
define <vscale x 2 x i1> @intrinsic_vmfgt_mask_vf_nxv2f32_f32(<vscale x 2 x i1> %0, <vscale x 2 x float> %1, float %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfgt_mask_vf_nxv2f32_f32:
@ -951,7 +951,7 @@ entry:
<vscale x 2 x float> %1,
float %2,
<vscale x 2 x i1> %3,
iXLen %4)
iXLen %4, iXLen 1)
ret <vscale x 2 x i1> %a
}
@ -981,7 +981,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmfgt.mask.nxv4f32.f32(
<vscale x 4 x float>,
float,
<vscale x 4 x i1>,
iXLen);
iXLen, iXLen);
define <vscale x 4 x i1> @intrinsic_vmfgt_mask_vf_nxv4f32_f32(<vscale x 4 x i1> %0, <vscale x 4 x float> %1, float %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfgt_mask_vf_nxv4f32_f32:
@ -998,7 +998,7 @@ entry:
<vscale x 4 x float> %1,
float %2,
<vscale x 4 x i1> %3,
iXLen %4)
iXLen %4, iXLen 1)
ret <vscale x 4 x i1> %a
}
@ -1028,7 +1028,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmfgt.mask.nxv8f32.f32(
<vscale x 8 x float>,
float,
<vscale x 8 x i1>,
iXLen);
iXLen, iXLen);
define <vscale x 8 x i1> @intrinsic_vmfgt_mask_vf_nxv8f32_f32(<vscale x 8 x i1> %0, <vscale x 8 x float> %1, float %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfgt_mask_vf_nxv8f32_f32:
@ -1045,7 +1045,7 @@ entry:
<vscale x 8 x float> %1,
float %2,
<vscale x 8 x i1> %3,
iXLen %4)
iXLen %4, iXLen 1)
ret <vscale x 8 x i1> %a
}
@ -1075,7 +1075,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmfgt.mask.nxv1f64.f64(
<vscale x 1 x double>,
double,
<vscale x 1 x i1>,
iXLen);
iXLen, iXLen);
define <vscale x 1 x i1> @intrinsic_vmfgt_mask_vf_nxv1f64_f64(<vscale x 1 x i1> %0, <vscale x 1 x double> %1, double %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfgt_mask_vf_nxv1f64_f64:
@ -1092,7 +1092,7 @@ entry:
<vscale x 1 x double> %1,
double %2,
<vscale x 1 x i1> %3,
iXLen %4)
iXLen %4, iXLen 1)
ret <vscale x 1 x i1> %a
}
@ -1122,7 +1122,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmfgt.mask.nxv2f64.f64(
<vscale x 2 x double>,
double,
<vscale x 2 x i1>,
iXLen);
iXLen, iXLen);
define <vscale x 2 x i1> @intrinsic_vmfgt_mask_vf_nxv2f64_f64(<vscale x 2 x i1> %0, <vscale x 2 x double> %1, double %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfgt_mask_vf_nxv2f64_f64:
@ -1139,7 +1139,7 @@ entry:
<vscale x 2 x double> %1,
double %2,
<vscale x 2 x i1> %3,
iXLen %4)
iXLen %4, iXLen 1)
ret <vscale x 2 x i1> %a
}
@ -1169,7 +1169,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmfgt.mask.nxv4f64.f64(
<vscale x 4 x double>,
double,
<vscale x 4 x i1>,
iXLen);
iXLen, iXLen);
define <vscale x 4 x i1> @intrinsic_vmfgt_mask_vf_nxv4f64_f64(<vscale x 4 x i1> %0, <vscale x 4 x double> %1, double %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfgt_mask_vf_nxv4f64_f64:
@ -1186,7 +1186,7 @@ entry:
<vscale x 4 x double> %1,
double %2,
<vscale x 4 x i1> %3,
iXLen %4)
iXLen %4, iXLen 1)
ret <vscale x 4 x i1> %a
}

View File

@ -28,7 +28,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmfle.mask.nxv1f16(
<vscale x 1 x half>,
<vscale x 1 x half>,
<vscale x 1 x i1>,
iXLen);
iXLen, iXLen);
define <vscale x 1 x i1> @intrinsic_vmfle_mask_vv_nxv1f16_nxv1f16(<vscale x 1 x i1> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x half> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfle_mask_vv_nxv1f16_nxv1f16:
@ -50,7 +50,7 @@ entry:
<vscale x 1 x half> %2,
<vscale x 1 x half> %3,
<vscale x 1 x i1> %mask,
iXLen %4)
iXLen %4, iXLen 1)
ret <vscale x 1 x i1> %a
}
@ -80,7 +80,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmfle.mask.nxv2f16(
<vscale x 2 x half>,
<vscale x 2 x half>,
<vscale x 2 x i1>,
iXLen);
iXLen, iXLen);
define <vscale x 2 x i1> @intrinsic_vmfle_mask_vv_nxv2f16_nxv2f16(<vscale x 2 x i1> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, <vscale x 2 x half> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfle_mask_vv_nxv2f16_nxv2f16:
@ -102,7 +102,7 @@ entry:
<vscale x 2 x half> %2,
<vscale x 2 x half> %3,
<vscale x 2 x i1> %mask,
iXLen %4)
iXLen %4, iXLen 1)
ret <vscale x 2 x i1> %a
}
@ -132,7 +132,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmfle.mask.nxv4f16(
<vscale x 4 x half>,
<vscale x 4 x half>,
<vscale x 4 x i1>,
iXLen);
iXLen, iXLen);
define <vscale x 4 x i1> @intrinsic_vmfle_mask_vv_nxv4f16_nxv4f16(<vscale x 4 x i1> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x half> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfle_mask_vv_nxv4f16_nxv4f16:
@ -154,7 +154,7 @@ entry:
<vscale x 4 x half> %2,
<vscale x 4 x half> %3,
<vscale x 4 x i1> %mask,
iXLen %4)
iXLen %4, iXLen 1)
ret <vscale x 4 x i1> %a
}
@ -184,7 +184,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmfle.mask.nxv8f16(
<vscale x 8 x half>,
<vscale x 8 x half>,
<vscale x 8 x i1>,
iXLen);
iXLen, iXLen);
define <vscale x 8 x i1> @intrinsic_vmfle_mask_vv_nxv8f16_nxv8f16(<vscale x 8 x i1> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, <vscale x 8 x half> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfle_mask_vv_nxv8f16_nxv8f16:
@ -206,7 +206,7 @@ entry:
<vscale x 8 x half> %2,
<vscale x 8 x half> %3,
<vscale x 8 x i1> %mask,
iXLen %4)
iXLen %4, iXLen 1)
ret <vscale x 8 x i1> %a
}
@ -236,7 +236,7 @@ declare <vscale x 16 x i1> @llvm.riscv.vmfle.mask.nxv16f16(
<vscale x 16 x half>,
<vscale x 16 x half>,
<vscale x 16 x i1>,
iXLen);
iXLen, iXLen);
define <vscale x 16 x i1> @intrinsic_vmfle_mask_vv_nxv16f16_nxv16f16(<vscale x 16 x i1> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, <vscale x 16 x half> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfle_mask_vv_nxv16f16_nxv16f16:
@ -258,7 +258,7 @@ entry:
<vscale x 16 x half> %2,
<vscale x 16 x half> %3,
<vscale x 16 x i1> %mask,
iXLen %4)
iXLen %4, iXLen 1)
ret <vscale x 16 x i1> %a
}
@ -288,7 +288,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmfle.mask.nxv1f32(
<vscale x 1 x float>,
<vscale x 1 x float>,
<vscale x 1 x i1>,
iXLen);
iXLen, iXLen);
define <vscale x 1 x i1> @intrinsic_vmfle_mask_vv_nxv1f32_nxv1f32(<vscale x 1 x i1> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, <vscale x 1 x float> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfle_mask_vv_nxv1f32_nxv1f32:
@ -310,7 +310,7 @@ entry:
<vscale x 1 x float> %2,
<vscale x 1 x float> %3,
<vscale x 1 x i1> %mask,
iXLen %4)
iXLen %4, iXLen 1)
ret <vscale x 1 x i1> %a
}
@ -340,7 +340,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmfle.mask.nxv2f32(
<vscale x 2 x float>,
<vscale x 2 x float>,
<vscale x 2 x i1>,
iXLen);
iXLen, iXLen);
define <vscale x 2 x i1> @intrinsic_vmfle_mask_vv_nxv2f32_nxv2f32(<vscale x 2 x i1> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x float> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfle_mask_vv_nxv2f32_nxv2f32:
@ -362,7 +362,7 @@ entry:
<vscale x 2 x float> %2,
<vscale x 2 x float> %3,
<vscale x 2 x i1> %mask,
iXLen %4)
iXLen %4, iXLen 1)
ret <vscale x 2 x i1> %a
}
@ -392,7 +392,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmfle.mask.nxv4f32(
<vscale x 4 x float>,
<vscale x 4 x float>,
<vscale x 4 x i1>,
iXLen);
iXLen, iXLen);
define <vscale x 4 x i1> @intrinsic_vmfle_mask_vv_nxv4f32_nxv4f32(<vscale x 4 x i1> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, <vscale x 4 x float> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfle_mask_vv_nxv4f32_nxv4f32:
@ -414,7 +414,7 @@ entry:
<vscale x 4 x float> %2,
<vscale x 4 x float> %3,
<vscale x 4 x i1> %mask,
iXLen %4)
iXLen %4, iXLen 1)
ret <vscale x 4 x i1> %a
}
@ -444,7 +444,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmfle.mask.nxv8f32(
<vscale x 8 x float>,
<vscale x 8 x float>,
<vscale x 8 x i1>,
iXLen);
iXLen, iXLen);
define <vscale x 8 x i1> @intrinsic_vmfle_mask_vv_nxv8f32_nxv8f32(<vscale x 8 x i1> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, <vscale x 8 x float> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfle_mask_vv_nxv8f32_nxv8f32:
@ -466,7 +466,7 @@ entry:
<vscale x 8 x float> %2,
<vscale x 8 x float> %3,
<vscale x 8 x i1> %mask,
iXLen %4)
iXLen %4, iXLen 1)
ret <vscale x 8 x i1> %a
}
@ -496,7 +496,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmfle.mask.nxv1f64(
<vscale x 1 x double>,
<vscale x 1 x double>,
<vscale x 1 x i1>,
iXLen);
iXLen, iXLen);
define <vscale x 1 x i1> @intrinsic_vmfle_mask_vv_nxv1f64_nxv1f64(<vscale x 1 x i1> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, <vscale x 1 x double> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfle_mask_vv_nxv1f64_nxv1f64:
@ -518,7 +518,7 @@ entry:
<vscale x 1 x double> %2,
<vscale x 1 x double> %3,
<vscale x 1 x i1> %mask,
iXLen %4)
iXLen %4, iXLen 1)
ret <vscale x 1 x i1> %a
}
@ -548,7 +548,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmfle.mask.nxv2f64(
<vscale x 2 x double>,
<vscale x 2 x double>,
<vscale x 2 x i1>,
iXLen);
iXLen, iXLen);
define <vscale x 2 x i1> @intrinsic_vmfle_mask_vv_nxv2f64_nxv2f64(<vscale x 2 x i1> %0, <vscale x 2 x double> %1, <vscale x 2 x double> %2, <vscale x 2 x double> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfle_mask_vv_nxv2f64_nxv2f64:
@ -570,7 +570,7 @@ entry:
<vscale x 2 x double> %2,
<vscale x 2 x double> %3,
<vscale x 2 x i1> %mask,
iXLen %4)
iXLen %4, iXLen 1)
ret <vscale x 2 x i1> %a
}
@ -600,7 +600,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmfle.mask.nxv4f64(
<vscale x 4 x double>,
<vscale x 4 x double>,
<vscale x 4 x i1>,
iXLen);
iXLen, iXLen);
define <vscale x 4 x i1> @intrinsic_vmfle_mask_vv_nxv4f64_nxv4f64(<vscale x 4 x i1> %0, <vscale x 4 x double> %1, <vscale x 4 x double> %2, <vscale x 4 x double> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfle_mask_vv_nxv4f64_nxv4f64:
@ -622,7 +622,7 @@ entry:
<vscale x 4 x double> %2,
<vscale x 4 x double> %3,
<vscale x 4 x i1> %mask,
iXLen %4)
iXLen %4, iXLen 1)
ret <vscale x 4 x i1> %a
}
@ -652,7 +652,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmfle.mask.nxv1f16.f16(
<vscale x 1 x half>,
half,
<vscale x 1 x i1>,
iXLen);
iXLen, iXLen);
define <vscale x 1 x i1> @intrinsic_vmfle_mask_vf_nxv1f16_f16(<vscale x 1 x i1> %0, <vscale x 1 x half> %1, half %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfle_mask_vf_nxv1f16_f16:
@ -669,7 +669,7 @@ entry:
<vscale x 1 x half> %1,
half %2,
<vscale x 1 x i1> %3,
iXLen %4)
iXLen %4, iXLen 1)
ret <vscale x 1 x i1> %a
}
@ -699,7 +699,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmfle.mask.nxv2f16.f16(
<vscale x 2 x half>,
half,
<vscale x 2 x i1>,
iXLen);
iXLen, iXLen);
define <vscale x 2 x i1> @intrinsic_vmfle_mask_vf_nxv2f16_f16(<vscale x 2 x i1> %0, <vscale x 2 x half> %1, half %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfle_mask_vf_nxv2f16_f16:
@ -716,7 +716,7 @@ entry:
<vscale x 2 x half> %1,
half %2,
<vscale x 2 x i1> %3,
iXLen %4)
iXLen %4, iXLen 1)
ret <vscale x 2 x i1> %a
}
@ -746,7 +746,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmfle.mask.nxv4f16.f16(
<vscale x 4 x half>,
half,
<vscale x 4 x i1>,
iXLen);
iXLen, iXLen);
define <vscale x 4 x i1> @intrinsic_vmfle_mask_vf_nxv4f16_f16(<vscale x 4 x i1> %0, <vscale x 4 x half> %1, half %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfle_mask_vf_nxv4f16_f16:
@ -763,7 +763,7 @@ entry:
<vscale x 4 x half> %1,
half %2,
<vscale x 4 x i1> %3,
iXLen %4)
iXLen %4, iXLen 1)
ret <vscale x 4 x i1> %a
}
@ -793,7 +793,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmfle.mask.nxv8f16.f16(
<vscale x 8 x half>,
half,
<vscale x 8 x i1>,
iXLen);
iXLen, iXLen);
define <vscale x 8 x i1> @intrinsic_vmfle_mask_vf_nxv8f16_f16(<vscale x 8 x i1> %0, <vscale x 8 x half> %1, half %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfle_mask_vf_nxv8f16_f16:
@ -810,7 +810,7 @@ entry:
<vscale x 8 x half> %1,
half %2,
<vscale x 8 x i1> %3,
iXLen %4)
iXLen %4, iXLen 1)
ret <vscale x 8 x i1> %a
}
@ -840,7 +840,7 @@ declare <vscale x 16 x i1> @llvm.riscv.vmfle.mask.nxv16f16.f16(
<vscale x 16 x half>,
half,
<vscale x 16 x i1>,
iXLen);
iXLen, iXLen);
define <vscale x 16 x i1> @intrinsic_vmfle_mask_vf_nxv16f16_f16(<vscale x 16 x i1> %0, <vscale x 16 x half> %1, half %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfle_mask_vf_nxv16f16_f16:
@ -857,7 +857,7 @@ entry:
<vscale x 16 x half> %1,
half %2,
<vscale x 16 x i1> %3,
iXLen %4)
iXLen %4, iXLen 1)
ret <vscale x 16 x i1> %a
}
@ -887,7 +887,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmfle.mask.nxv1f32.f32(
<vscale x 1 x float>,
float,
<vscale x 1 x i1>,
iXLen);
iXLen, iXLen);
define <vscale x 1 x i1> @intrinsic_vmfle_mask_vf_nxv1f32_f32(<vscale x 1 x i1> %0, <vscale x 1 x float> %1, float %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfle_mask_vf_nxv1f32_f32:
@ -904,7 +904,7 @@ entry:
<vscale x 1 x float> %1,
float %2,
<vscale x 1 x i1> %3,
iXLen %4)
iXLen %4, iXLen 1)
ret <vscale x 1 x i1> %a
}
@ -934,7 +934,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmfle.mask.nxv2f32.f32(
<vscale x 2 x float>,
float,
<vscale x 2 x i1>,
iXLen);
iXLen, iXLen);
define <vscale x 2 x i1> @intrinsic_vmfle_mask_vf_nxv2f32_f32(<vscale x 2 x i1> %0, <vscale x 2 x float> %1, float %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfle_mask_vf_nxv2f32_f32:
@ -951,7 +951,7 @@ entry:
<vscale x 2 x float> %1,
float %2,
<vscale x 2 x i1> %3,
iXLen %4)
iXLen %4, iXLen 1)
ret <vscale x 2 x i1> %a
}
@ -981,7 +981,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmfle.mask.nxv4f32.f32(
<vscale x 4 x float>,
float,
<vscale x 4 x i1>,
iXLen);
iXLen, iXLen);
define <vscale x 4 x i1> @intrinsic_vmfle_mask_vf_nxv4f32_f32(<vscale x 4 x i1> %0, <vscale x 4 x float> %1, float %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfle_mask_vf_nxv4f32_f32:
@ -998,7 +998,7 @@ entry:
<vscale x 4 x float> %1,
float %2,
<vscale x 4 x i1> %3,
iXLen %4)
iXLen %4, iXLen 1)
ret <vscale x 4 x i1> %a
}
@ -1028,7 +1028,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmfle.mask.nxv8f32.f32(
<vscale x 8 x float>,
float,
<vscale x 8 x i1>,
iXLen);
iXLen, iXLen);
define <vscale x 8 x i1> @intrinsic_vmfle_mask_vf_nxv8f32_f32(<vscale x 8 x i1> %0, <vscale x 8 x float> %1, float %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfle_mask_vf_nxv8f32_f32:
@ -1045,7 +1045,7 @@ entry:
<vscale x 8 x float> %1,
float %2,
<vscale x 8 x i1> %3,
iXLen %4)
iXLen %4, iXLen 1)
ret <vscale x 8 x i1> %a
}
@ -1075,7 +1075,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmfle.mask.nxv1f64.f64(
<vscale x 1 x double>,
double,
<vscale x 1 x i1>,
iXLen);
iXLen, iXLen);
define <vscale x 1 x i1> @intrinsic_vmfle_mask_vf_nxv1f64_f64(<vscale x 1 x i1> %0, <vscale x 1 x double> %1, double %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfle_mask_vf_nxv1f64_f64:
@ -1092,7 +1092,7 @@ entry:
<vscale x 1 x double> %1,
double %2,
<vscale x 1 x i1> %3,
iXLen %4)
iXLen %4, iXLen 1)
ret <vscale x 1 x i1> %a
}
@ -1122,7 +1122,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmfle.mask.nxv2f64.f64(
<vscale x 2 x double>,
double,
<vscale x 2 x i1>,
iXLen);
iXLen, iXLen);
define <vscale x 2 x i1> @intrinsic_vmfle_mask_vf_nxv2f64_f64(<vscale x 2 x i1> %0, <vscale x 2 x double> %1, double %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfle_mask_vf_nxv2f64_f64:
@ -1139,7 +1139,7 @@ entry:
<vscale x 2 x double> %1,
double %2,
<vscale x 2 x i1> %3,
iXLen %4)
iXLen %4, iXLen 1)
ret <vscale x 2 x i1> %a
}
@ -1169,7 +1169,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmfle.mask.nxv4f64.f64(
<vscale x 4 x double>,
double,
<vscale x 4 x i1>,
iXLen);
iXLen, iXLen);
define <vscale x 4 x i1> @intrinsic_vmfle_mask_vf_nxv4f64_f64(<vscale x 4 x i1> %0, <vscale x 4 x double> %1, double %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfle_mask_vf_nxv4f64_f64:
@ -1186,7 +1186,7 @@ entry:
<vscale x 4 x double> %1,
double %2,
<vscale x 4 x i1> %3,
iXLen %4)
iXLen %4, iXLen 1)
ret <vscale x 4 x i1> %a
}

View File

@ -28,7 +28,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmflt.mask.nxv1f16(
<vscale x 1 x half>,
<vscale x 1 x half>,
<vscale x 1 x i1>,
iXLen);
iXLen, iXLen);
define <vscale x 1 x i1> @intrinsic_vmflt_mask_vv_nxv1f16_nxv1f16(<vscale x 1 x i1> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x half> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmflt_mask_vv_nxv1f16_nxv1f16:
@ -50,7 +50,7 @@ entry:
<vscale x 1 x half> %2,
<vscale x 1 x half> %3,
<vscale x 1 x i1> %mask,
iXLen %4)
iXLen %4, iXLen 1)
ret <vscale x 1 x i1> %a
}
@ -80,7 +80,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmflt.mask.nxv2f16(
<vscale x 2 x half>,
<vscale x 2 x half>,
<vscale x 2 x i1>,
iXLen);
iXLen, iXLen);
define <vscale x 2 x i1> @intrinsic_vmflt_mask_vv_nxv2f16_nxv2f16(<vscale x 2 x i1> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, <vscale x 2 x half> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmflt_mask_vv_nxv2f16_nxv2f16:
@ -102,7 +102,7 @@ entry:
<vscale x 2 x half> %2,
<vscale x 2 x half> %3,
<vscale x 2 x i1> %mask,
iXLen %4)
iXLen %4, iXLen 1)
ret <vscale x 2 x i1> %a
}
@ -132,7 +132,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmflt.mask.nxv4f16(
<vscale x 4 x half>,
<vscale x 4 x half>,
<vscale x 4 x i1>,
iXLen);
iXLen, iXLen);
define <vscale x 4 x i1> @intrinsic_vmflt_mask_vv_nxv4f16_nxv4f16(<vscale x 4 x i1> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x half> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmflt_mask_vv_nxv4f16_nxv4f16:
@ -154,7 +154,7 @@ entry:
<vscale x 4 x half> %2,
<vscale x 4 x half> %3,
<vscale x 4 x i1> %mask,
iXLen %4)
iXLen %4, iXLen 1)
ret <vscale x 4 x i1> %a
}
@ -184,7 +184,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmflt.mask.nxv8f16(
<vscale x 8 x half>,
<vscale x 8 x half>,
<vscale x 8 x i1>,
iXLen);
iXLen, iXLen);
define <vscale x 8 x i1> @intrinsic_vmflt_mask_vv_nxv8f16_nxv8f16(<vscale x 8 x i1> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, <vscale x 8 x half> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmflt_mask_vv_nxv8f16_nxv8f16:
@ -206,7 +206,7 @@ entry:
<vscale x 8 x half> %2,
<vscale x 8 x half> %3,
<vscale x 8 x i1> %mask,
iXLen %4)
iXLen %4, iXLen 1)
ret <vscale x 8 x i1> %a
}
@ -236,7 +236,7 @@ declare <vscale x 16 x i1> @llvm.riscv.vmflt.mask.nxv16f16(
<vscale x 16 x half>,
<vscale x 16 x half>,
<vscale x 16 x i1>,
iXLen);
iXLen, iXLen);
define <vscale x 16 x i1> @intrinsic_vmflt_mask_vv_nxv16f16_nxv16f16(<vscale x 16 x i1> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, <vscale x 16 x half> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmflt_mask_vv_nxv16f16_nxv16f16:
@ -258,7 +258,7 @@ entry:
<vscale x 16 x half> %2,
<vscale x 16 x half> %3,
<vscale x 16 x i1> %mask,
iXLen %4)
iXLen %4, iXLen 1)
ret <vscale x 16 x i1> %a
}
@ -288,7 +288,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmflt.mask.nxv1f32(
<vscale x 1 x float>,
<vscale x 1 x float>,
<vscale x 1 x i1>,
iXLen);
iXLen, iXLen);
define <vscale x 1 x i1> @intrinsic_vmflt_mask_vv_nxv1f32_nxv1f32(<vscale x 1 x i1> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, <vscale x 1 x float> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmflt_mask_vv_nxv1f32_nxv1f32:
@ -310,7 +310,7 @@ entry:
<vscale x 1 x float> %2,
<vscale x 1 x float> %3,
<vscale x 1 x i1> %mask,
iXLen %4)
iXLen %4, iXLen 1)
ret <vscale x 1 x i1> %a
}
@ -340,7 +340,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmflt.mask.nxv2f32(
<vscale x 2 x float>,
<vscale x 2 x float>,
<vscale x 2 x i1>,
iXLen);
iXLen, iXLen);
define <vscale x 2 x i1> @intrinsic_vmflt_mask_vv_nxv2f32_nxv2f32(<vscale x 2 x i1> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x float> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmflt_mask_vv_nxv2f32_nxv2f32:
@ -362,7 +362,7 @@ entry:
<vscale x 2 x float> %2,
<vscale x 2 x float> %3,
<vscale x 2 x i1> %mask,
iXLen %4)
iXLen %4, iXLen 1)
ret <vscale x 2 x i1> %a
}
@ -392,7 +392,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmflt.mask.nxv4f32(
<vscale x 4 x float>,
<vscale x 4 x float>,
<vscale x 4 x i1>,
iXLen);
iXLen, iXLen);
define <vscale x 4 x i1> @intrinsic_vmflt_mask_vv_nxv4f32_nxv4f32(<vscale x 4 x i1> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, <vscale x 4 x float> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmflt_mask_vv_nxv4f32_nxv4f32:
@ -414,7 +414,7 @@ entry:
<vscale x 4 x float> %2,
<vscale x 4 x float> %3,
<vscale x 4 x i1> %mask,
iXLen %4)
iXLen %4, iXLen 1)
ret <vscale x 4 x i1> %a
}
@ -444,7 +444,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmflt.mask.nxv8f32(
<vscale x 8 x float>,
<vscale x 8 x float>,
<vscale x 8 x i1>,
iXLen);
iXLen, iXLen);
define <vscale x 8 x i1> @intrinsic_vmflt_mask_vv_nxv8f32_nxv8f32(<vscale x 8 x i1> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, <vscale x 8 x float> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmflt_mask_vv_nxv8f32_nxv8f32:
@ -466,7 +466,7 @@ entry:
<vscale x 8 x float> %2,
<vscale x 8 x float> %3,
<vscale x 8 x i1> %mask,
iXLen %4)
iXLen %4, iXLen 1)
ret <vscale x 8 x i1> %a
}
@ -496,7 +496,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmflt.mask.nxv1f64(
<vscale x 1 x double>,
<vscale x 1 x double>,
<vscale x 1 x i1>,
iXLen);
iXLen, iXLen);
define <vscale x 1 x i1> @intrinsic_vmflt_mask_vv_nxv1f64_nxv1f64(<vscale x 1 x i1> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, <vscale x 1 x double> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmflt_mask_vv_nxv1f64_nxv1f64:
@ -518,7 +518,7 @@ entry:
<vscale x 1 x double> %2,
<vscale x 1 x double> %3,
<vscale x 1 x i1> %mask,
iXLen %4)
iXLen %4, iXLen 1)
ret <vscale x 1 x i1> %a
}
@ -548,7 +548,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmflt.mask.nxv2f64(
<vscale x 2 x double>,
<vscale x 2 x double>,
<vscale x 2 x i1>,
iXLen);
iXLen, iXLen);
define <vscale x 2 x i1> @intrinsic_vmflt_mask_vv_nxv2f64_nxv2f64(<vscale x 2 x i1> %0, <vscale x 2 x double> %1, <vscale x 2 x double> %2, <vscale x 2 x double> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmflt_mask_vv_nxv2f64_nxv2f64:
@ -570,7 +570,7 @@ entry:
<vscale x 2 x double> %2,
<vscale x 2 x double> %3,
<vscale x 2 x i1> %mask,
iXLen %4)
iXLen %4, iXLen 1)
ret <vscale x 2 x i1> %a
}
@ -600,7 +600,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmflt.mask.nxv4f64(
<vscale x 4 x double>,
<vscale x 4 x double>,
<vscale x 4 x i1>,
iXLen);
iXLen, iXLen);
define <vscale x 4 x i1> @intrinsic_vmflt_mask_vv_nxv4f64_nxv4f64(<vscale x 4 x i1> %0, <vscale x 4 x double> %1, <vscale x 4 x double> %2, <vscale x 4 x double> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmflt_mask_vv_nxv4f64_nxv4f64:
@ -622,7 +622,7 @@ entry:
<vscale x 4 x double> %2,
<vscale x 4 x double> %3,
<vscale x 4 x i1> %mask,
iXLen %4)
iXLen %4, iXLen 1)
ret <vscale x 4 x i1> %a
}
@ -652,7 +652,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmflt.mask.nxv1f16.f16(
<vscale x 1 x half>,
half,
<vscale x 1 x i1>,
iXLen);
iXLen, iXLen);
define <vscale x 1 x i1> @intrinsic_vmflt_mask_vf_nxv1f16_f16(<vscale x 1 x i1> %0, <vscale x 1 x half> %1, half %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmflt_mask_vf_nxv1f16_f16:
@ -669,7 +669,7 @@ entry:
<vscale x 1 x half> %1,
half %2,
<vscale x 1 x i1> %3,
iXLen %4)
iXLen %4, iXLen 1)
ret <vscale x 1 x i1> %a
}
@ -699,7 +699,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmflt.mask.nxv2f16.f16(
<vscale x 2 x half>,
half,
<vscale x 2 x i1>,
iXLen);
iXLen, iXLen);
define <vscale x 2 x i1> @intrinsic_vmflt_mask_vf_nxv2f16_f16(<vscale x 2 x i1> %0, <vscale x 2 x half> %1, half %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmflt_mask_vf_nxv2f16_f16:
@ -716,7 +716,7 @@ entry:
<vscale x 2 x half> %1,
half %2,
<vscale x 2 x i1> %3,
iXLen %4)
iXLen %4, iXLen 1)
ret <vscale x 2 x i1> %a
}
@ -746,7 +746,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmflt.mask.nxv4f16.f16(
<vscale x 4 x half>,
half,
<vscale x 4 x i1>,
iXLen);
iXLen, iXLen);
define <vscale x 4 x i1> @intrinsic_vmflt_mask_vf_nxv4f16_f16(<vscale x 4 x i1> %0, <vscale x 4 x half> %1, half %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmflt_mask_vf_nxv4f16_f16:
@ -763,7 +763,7 @@ entry:
<vscale x 4 x half> %1,
half %2,
<vscale x 4 x i1> %3,
iXLen %4)
iXLen %4, iXLen 1)
ret <vscale x 4 x i1> %a
}
@ -793,7 +793,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmflt.mask.nxv8f16.f16(
<vscale x 8 x half>,
half,
<vscale x 8 x i1>,
iXLen);
iXLen, iXLen);
define <vscale x 8 x i1> @intrinsic_vmflt_mask_vf_nxv8f16_f16(<vscale x 8 x i1> %0, <vscale x 8 x half> %1, half %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmflt_mask_vf_nxv8f16_f16:
@ -810,7 +810,7 @@ entry:
<vscale x 8 x half> %1,
half %2,
<vscale x 8 x i1> %3,
iXLen %4)
iXLen %4, iXLen 1)
ret <vscale x 8 x i1> %a
}
@ -840,7 +840,7 @@ declare <vscale x 16 x i1> @llvm.riscv.vmflt.mask.nxv16f16.f16(
<vscale x 16 x half>,
half,
<vscale x 16 x i1>,
iXLen);
iXLen, iXLen);
define <vscale x 16 x i1> @intrinsic_vmflt_mask_vf_nxv16f16_f16(<vscale x 16 x i1> %0, <vscale x 16 x half> %1, half %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmflt_mask_vf_nxv16f16_f16:
@ -857,7 +857,7 @@ entry:
<vscale x 16 x half> %1,
half %2,
<vscale x 16 x i1> %3,
iXLen %4)
iXLen %4, iXLen 1)
ret <vscale x 16 x i1> %a
}
@ -887,7 +887,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmflt.mask.nxv1f32.f32(
<vscale x 1 x float>,
float,
<vscale x 1 x i1>,
iXLen);
iXLen, iXLen);
define <vscale x 1 x i1> @intrinsic_vmflt_mask_vf_nxv1f32_f32(<vscale x 1 x i1> %0, <vscale x 1 x float> %1, float %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmflt_mask_vf_nxv1f32_f32:
@ -904,7 +904,7 @@ entry:
<vscale x 1 x float> %1,
float %2,
<vscale x 1 x i1> %3,
iXLen %4)
iXLen %4, iXLen 1)
ret <vscale x 1 x i1> %a
}
@ -934,7 +934,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmflt.mask.nxv2f32.f32(
<vscale x 2 x float>,
float,
<vscale x 2 x i1>,
iXLen);
iXLen, iXLen);
define <vscale x 2 x i1> @intrinsic_vmflt_mask_vf_nxv2f32_f32(<vscale x 2 x i1> %0, <vscale x 2 x float> %1, float %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmflt_mask_vf_nxv2f32_f32:
@ -951,7 +951,7 @@ entry:
<vscale x 2 x float> %1,
float %2,
<vscale x 2 x i1> %3,
iXLen %4)
iXLen %4, iXLen 1)
ret <vscale x 2 x i1> %a
}
@ -981,7 +981,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmflt.mask.nxv4f32.f32(
<vscale x 4 x float>,
float,
<vscale x 4 x i1>,
iXLen);
iXLen, iXLen);
define <vscale x 4 x i1> @intrinsic_vmflt_mask_vf_nxv4f32_f32(<vscale x 4 x i1> %0, <vscale x 4 x float> %1, float %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmflt_mask_vf_nxv4f32_f32:
@ -998,7 +998,7 @@ entry:
<vscale x 4 x float> %1,
float %2,
<vscale x 4 x i1> %3,
iXLen %4)
iXLen %4, iXLen 1)
ret <vscale x 4 x i1> %a
}
@ -1028,7 +1028,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmflt.mask.nxv8f32.f32(
<vscale x 8 x float>,
float,
<vscale x 8 x i1>,
iXLen);
iXLen, iXLen);
define <vscale x 8 x i1> @intrinsic_vmflt_mask_vf_nxv8f32_f32(<vscale x 8 x i1> %0, <vscale x 8 x float> %1, float %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmflt_mask_vf_nxv8f32_f32:
@ -1045,7 +1045,7 @@ entry:
<vscale x 8 x float> %1,
float %2,
<vscale x 8 x i1> %3,
iXLen %4)
iXLen %4, iXLen 1)
ret <vscale x 8 x i1> %a
}
@ -1075,7 +1075,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmflt.mask.nxv1f64.f64(
<vscale x 1 x double>,
double,
<vscale x 1 x i1>,
iXLen);
iXLen, iXLen);
define <vscale x 1 x i1> @intrinsic_vmflt_mask_vf_nxv1f64_f64(<vscale x 1 x i1> %0, <vscale x 1 x double> %1, double %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmflt_mask_vf_nxv1f64_f64:
@ -1092,7 +1092,7 @@ entry:
<vscale x 1 x double> %1,
double %2,
<vscale x 1 x i1> %3,
iXLen %4)
iXLen %4, iXLen 1)
ret <vscale x 1 x i1> %a
}
@ -1122,7 +1122,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmflt.mask.nxv2f64.f64(
<vscale x 2 x double>,
double,
<vscale x 2 x i1>,
iXLen);
iXLen, iXLen);
define <vscale x 2 x i1> @intrinsic_vmflt_mask_vf_nxv2f64_f64(<vscale x 2 x i1> %0, <vscale x 2 x double> %1, double %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmflt_mask_vf_nxv2f64_f64:
@ -1139,7 +1139,7 @@ entry:
<vscale x 2 x double> %1,
double %2,
<vscale x 2 x i1> %3,
iXLen %4)
iXLen %4, iXLen 1)
ret <vscale x 2 x i1> %a
}
@ -1169,7 +1169,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmflt.mask.nxv4f64.f64(
<vscale x 4 x double>,
double,
<vscale x 4 x i1>,
iXLen);
iXLen, iXLen);
define <vscale x 4 x i1> @intrinsic_vmflt_mask_vf_nxv4f64_f64(<vscale x 4 x i1> %0, <vscale x 4 x double> %1, double %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmflt_mask_vf_nxv4f64_f64:
@ -1186,7 +1186,7 @@ entry:
<vscale x 4 x double> %1,
double %2,
<vscale x 4 x i1> %3,
iXLen %4)
iXLen %4, iXLen 1)
ret <vscale x 4 x i1> %a
}

View File

@ -28,7 +28,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmfne.mask.nxv1f16(
<vscale x 1 x half>,
<vscale x 1 x half>,
<vscale x 1 x i1>,
iXLen);
iXLen, iXLen);
define <vscale x 1 x i1> @intrinsic_vmfne_mask_vv_nxv1f16_nxv1f16(<vscale x 1 x i1> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x half> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfne_mask_vv_nxv1f16_nxv1f16:
@ -50,7 +50,7 @@ entry:
<vscale x 1 x half> %2,
<vscale x 1 x half> %3,
<vscale x 1 x i1> %mask,
iXLen %4)
iXLen %4, iXLen 1)
ret <vscale x 1 x i1> %a
}
@ -80,7 +80,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmfne.mask.nxv2f16(
<vscale x 2 x half>,
<vscale x 2 x half>,
<vscale x 2 x i1>,
iXLen);
iXLen, iXLen);
define <vscale x 2 x i1> @intrinsic_vmfne_mask_vv_nxv2f16_nxv2f16(<vscale x 2 x i1> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, <vscale x 2 x half> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfne_mask_vv_nxv2f16_nxv2f16:
@ -102,7 +102,7 @@ entry:
<vscale x 2 x half> %2,
<vscale x 2 x half> %3,
<vscale x 2 x i1> %mask,
iXLen %4)
iXLen %4, iXLen 1)
ret <vscale x 2 x i1> %a
}
@ -132,7 +132,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmfne.mask.nxv4f16(
<vscale x 4 x half>,
<vscale x 4 x half>,
<vscale x 4 x i1>,
iXLen);
iXLen, iXLen);
define <vscale x 4 x i1> @intrinsic_vmfne_mask_vv_nxv4f16_nxv4f16(<vscale x 4 x i1> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x half> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfne_mask_vv_nxv4f16_nxv4f16:
@ -154,7 +154,7 @@ entry:
<vscale x 4 x half> %2,
<vscale x 4 x half> %3,
<vscale x 4 x i1> %mask,
iXLen %4)
iXLen %4, iXLen 1)
ret <vscale x 4 x i1> %a
}
@ -184,7 +184,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmfne.mask.nxv8f16(
<vscale x 8 x half>,
<vscale x 8 x half>,
<vscale x 8 x i1>,
iXLen);
iXLen, iXLen);
define <vscale x 8 x i1> @intrinsic_vmfne_mask_vv_nxv8f16_nxv8f16(<vscale x 8 x i1> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, <vscale x 8 x half> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfne_mask_vv_nxv8f16_nxv8f16:
@ -206,7 +206,7 @@ entry:
<vscale x 8 x half> %2,
<vscale x 8 x half> %3,
<vscale x 8 x i1> %mask,
iXLen %4)
iXLen %4, iXLen 1)
ret <vscale x 8 x i1> %a
}
@ -236,7 +236,7 @@ declare <vscale x 16 x i1> @llvm.riscv.vmfne.mask.nxv16f16(
<vscale x 16 x half>,
<vscale x 16 x half>,
<vscale x 16 x i1>,
iXLen);
iXLen, iXLen);
define <vscale x 16 x i1> @intrinsic_vmfne_mask_vv_nxv16f16_nxv16f16(<vscale x 16 x i1> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, <vscale x 16 x half> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfne_mask_vv_nxv16f16_nxv16f16:
@ -258,7 +258,7 @@ entry:
<vscale x 16 x half> %2,
<vscale x 16 x half> %3,
<vscale x 16 x i1> %mask,
iXLen %4)
iXLen %4, iXLen 1)
ret <vscale x 16 x i1> %a
}
@ -288,7 +288,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmfne.mask.nxv1f32(
<vscale x 1 x float>,
<vscale x 1 x float>,
<vscale x 1 x i1>,
iXLen);
iXLen, iXLen);
define <vscale x 1 x i1> @intrinsic_vmfne_mask_vv_nxv1f32_nxv1f32(<vscale x 1 x i1> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, <vscale x 1 x float> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfne_mask_vv_nxv1f32_nxv1f32:
@ -310,7 +310,7 @@ entry:
<vscale x 1 x float> %2,
<vscale x 1 x float> %3,
<vscale x 1 x i1> %mask,
iXLen %4)
iXLen %4, iXLen 1)
ret <vscale x 1 x i1> %a
}
@ -340,7 +340,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmfne.mask.nxv2f32(
<vscale x 2 x float>,
<vscale x 2 x float>,
<vscale x 2 x i1>,
iXLen);
iXLen, iXLen);
define <vscale x 2 x i1> @intrinsic_vmfne_mask_vv_nxv2f32_nxv2f32(<vscale x 2 x i1> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x float> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfne_mask_vv_nxv2f32_nxv2f32:
@ -362,7 +362,7 @@ entry:
<vscale x 2 x float> %2,
<vscale x 2 x float> %3,
<vscale x 2 x i1> %mask,
iXLen %4)
iXLen %4, iXLen 1)
ret <vscale x 2 x i1> %a
}
@ -392,7 +392,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmfne.mask.nxv4f32(
<vscale x 4 x float>,
<vscale x 4 x float>,
<vscale x 4 x i1>,
iXLen);
iXLen, iXLen);
define <vscale x 4 x i1> @intrinsic_vmfne_mask_vv_nxv4f32_nxv4f32(<vscale x 4 x i1> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, <vscale x 4 x float> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfne_mask_vv_nxv4f32_nxv4f32:
@ -414,7 +414,7 @@ entry:
<vscale x 4 x float> %2,
<vscale x 4 x float> %3,
<vscale x 4 x i1> %mask,
iXLen %4)
iXLen %4, iXLen 1)
ret <vscale x 4 x i1> %a
}
@ -444,7 +444,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmfne.mask.nxv8f32(
<vscale x 8 x float>,
<vscale x 8 x float>,
<vscale x 8 x i1>,
iXLen);
iXLen, iXLen);
define <vscale x 8 x i1> @intrinsic_vmfne_mask_vv_nxv8f32_nxv8f32(<vscale x 8 x i1> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, <vscale x 8 x float> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfne_mask_vv_nxv8f32_nxv8f32:
@ -466,7 +466,7 @@ entry:
<vscale x 8 x float> %2,
<vscale x 8 x float> %3,
<vscale x 8 x i1> %mask,
iXLen %4)
iXLen %4, iXLen 1)
ret <vscale x 8 x i1> %a
}
@ -496,7 +496,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmfne.mask.nxv1f64(
<vscale x 1 x double>,
<vscale x 1 x double>,
<vscale x 1 x i1>,
iXLen);
iXLen, iXLen);
define <vscale x 1 x i1> @intrinsic_vmfne_mask_vv_nxv1f64_nxv1f64(<vscale x 1 x i1> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, <vscale x 1 x double> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfne_mask_vv_nxv1f64_nxv1f64:
@ -518,7 +518,7 @@ entry:
<vscale x 1 x double> %2,
<vscale x 1 x double> %3,
<vscale x 1 x i1> %mask,
iXLen %4)
iXLen %4, iXLen 1)
ret <vscale x 1 x i1> %a
}
@ -548,7 +548,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmfne.mask.nxv2f64(
<vscale x 2 x double>,
<vscale x 2 x double>,
<vscale x 2 x i1>,
iXLen);
iXLen, iXLen);
define <vscale x 2 x i1> @intrinsic_vmfne_mask_vv_nxv2f64_nxv2f64(<vscale x 2 x i1> %0, <vscale x 2 x double> %1, <vscale x 2 x double> %2, <vscale x 2 x double> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfne_mask_vv_nxv2f64_nxv2f64:
@ -570,7 +570,7 @@ entry:
<vscale x 2 x double> %2,
<vscale x 2 x double> %3,
<vscale x 2 x i1> %mask,
iXLen %4)
iXLen %4, iXLen 1)
ret <vscale x 2 x i1> %a
}
@ -600,7 +600,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmfne.mask.nxv4f64(
<vscale x 4 x double>,
<vscale x 4 x double>,
<vscale x 4 x i1>,
iXLen);
iXLen, iXLen);
define <vscale x 4 x i1> @intrinsic_vmfne_mask_vv_nxv4f64_nxv4f64(<vscale x 4 x i1> %0, <vscale x 4 x double> %1, <vscale x 4 x double> %2, <vscale x 4 x double> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfne_mask_vv_nxv4f64_nxv4f64:
@ -622,7 +622,7 @@ entry:
<vscale x 4 x double> %2,
<vscale x 4 x double> %3,
<vscale x 4 x i1> %mask,
iXLen %4)
iXLen %4, iXLen 1)
ret <vscale x 4 x i1> %a
}
@ -652,7 +652,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmfne.mask.nxv1f16.f16(
<vscale x 1 x half>,
half,
<vscale x 1 x i1>,
iXLen);
iXLen, iXLen);
define <vscale x 1 x i1> @intrinsic_vmfne_mask_vf_nxv1f16_f16(<vscale x 1 x i1> %0, <vscale x 1 x half> %1, half %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfne_mask_vf_nxv1f16_f16:
@ -669,7 +669,7 @@ entry:
<vscale x 1 x half> %1,
half %2,
<vscale x 1 x i1> %3,
iXLen %4)
iXLen %4, iXLen 1)
ret <vscale x 1 x i1> %a
}
@ -699,7 +699,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmfne.mask.nxv2f16.f16(
<vscale x 2 x half>,
half,
<vscale x 2 x i1>,
iXLen);
iXLen, iXLen);
define <vscale x 2 x i1> @intrinsic_vmfne_mask_vf_nxv2f16_f16(<vscale x 2 x i1> %0, <vscale x 2 x half> %1, half %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfne_mask_vf_nxv2f16_f16:
@ -716,7 +716,7 @@ entry:
<vscale x 2 x half> %1,
half %2,
<vscale x 2 x i1> %3,
iXLen %4)
iXLen %4, iXLen 1)
ret <vscale x 2 x i1> %a
}
@ -746,7 +746,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmfne.mask.nxv4f16.f16(
<vscale x 4 x half>,
half,
<vscale x 4 x i1>,
iXLen);
iXLen, iXLen);
define <vscale x 4 x i1> @intrinsic_vmfne_mask_vf_nxv4f16_f16(<vscale x 4 x i1> %0, <vscale x 4 x half> %1, half %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfne_mask_vf_nxv4f16_f16:
@ -763,7 +763,7 @@ entry:
<vscale x 4 x half> %1,
half %2,
<vscale x 4 x i1> %3,
iXLen %4)
iXLen %4, iXLen 1)
ret <vscale x 4 x i1> %a
}
@ -793,7 +793,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmfne.mask.nxv8f16.f16(
<vscale x 8 x half>,
half,
<vscale x 8 x i1>,
iXLen);
iXLen, iXLen);
define <vscale x 8 x i1> @intrinsic_vmfne_mask_vf_nxv8f16_f16(<vscale x 8 x i1> %0, <vscale x 8 x half> %1, half %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfne_mask_vf_nxv8f16_f16:
@ -810,7 +810,7 @@ entry:
<vscale x 8 x half> %1,
half %2,
<vscale x 8 x i1> %3,
iXLen %4)
iXLen %4, iXLen 1)
ret <vscale x 8 x i1> %a
}
@ -840,7 +840,7 @@ declare <vscale x 16 x i1> @llvm.riscv.vmfne.mask.nxv16f16.f16(
<vscale x 16 x half>,
half,
<vscale x 16 x i1>,
iXLen);
iXLen, iXLen);
define <vscale x 16 x i1> @intrinsic_vmfne_mask_vf_nxv16f16_f16(<vscale x 16 x i1> %0, <vscale x 16 x half> %1, half %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfne_mask_vf_nxv16f16_f16:
@ -857,7 +857,7 @@ entry:
<vscale x 16 x half> %1,
half %2,
<vscale x 16 x i1> %3,
iXLen %4)
iXLen %4, iXLen 1)
ret <vscale x 16 x i1> %a
}
@ -887,7 +887,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmfne.mask.nxv1f32.f32(
<vscale x 1 x float>,
float,
<vscale x 1 x i1>,
iXLen);
iXLen, iXLen);
define <vscale x 1 x i1> @intrinsic_vmfne_mask_vf_nxv1f32_f32(<vscale x 1 x i1> %0, <vscale x 1 x float> %1, float %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfne_mask_vf_nxv1f32_f32:
@ -904,7 +904,7 @@ entry:
<vscale x 1 x float> %1,
float %2,
<vscale x 1 x i1> %3,
iXLen %4)
iXLen %4, iXLen 1)
ret <vscale x 1 x i1> %a
}
@ -934,7 +934,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmfne.mask.nxv2f32.f32(
<vscale x 2 x float>,
float,
<vscale x 2 x i1>,
iXLen);
iXLen, iXLen);
define <vscale x 2 x i1> @intrinsic_vmfne_mask_vf_nxv2f32_f32(<vscale x 2 x i1> %0, <vscale x 2 x float> %1, float %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfne_mask_vf_nxv2f32_f32:
@ -951,7 +951,7 @@ entry:
<vscale x 2 x float> %1,
float %2,
<vscale x 2 x i1> %3,
iXLen %4)
iXLen %4, iXLen 1)
ret <vscale x 2 x i1> %a
}
@ -981,7 +981,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmfne.mask.nxv4f32.f32(
<vscale x 4 x float>,
float,
<vscale x 4 x i1>,
iXLen);
iXLen, iXLen);
define <vscale x 4 x i1> @intrinsic_vmfne_mask_vf_nxv4f32_f32(<vscale x 4 x i1> %0, <vscale x 4 x float> %1, float %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfne_mask_vf_nxv4f32_f32:
@ -998,7 +998,7 @@ entry:
<vscale x 4 x float> %1,
float %2,
<vscale x 4 x i1> %3,
iXLen %4)
iXLen %4, iXLen 1)
ret <vscale x 4 x i1> %a
}
@ -1028,7 +1028,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmfne.mask.nxv8f32.f32(
<vscale x 8 x float>,
float,
<vscale x 8 x i1>,
iXLen);
iXLen, iXLen);
define <vscale x 8 x i1> @intrinsic_vmfne_mask_vf_nxv8f32_f32(<vscale x 8 x i1> %0, <vscale x 8 x float> %1, float %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfne_mask_vf_nxv8f32_f32:
@ -1045,7 +1045,7 @@ entry:
<vscale x 8 x float> %1,
float %2,
<vscale x 8 x i1> %3,
iXLen %4)
iXLen %4, iXLen 1)
ret <vscale x 8 x i1> %a
}
@ -1075,7 +1075,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmfne.mask.nxv1f64.f64(
<vscale x 1 x double>,
double,
<vscale x 1 x i1>,
iXLen);
iXLen, iXLen);
define <vscale x 1 x i1> @intrinsic_vmfne_mask_vf_nxv1f64_f64(<vscale x 1 x i1> %0, <vscale x 1 x double> %1, double %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfne_mask_vf_nxv1f64_f64:
@ -1092,7 +1092,7 @@ entry:
<vscale x 1 x double> %1,
double %2,
<vscale x 1 x i1> %3,
iXLen %4)
iXLen %4, iXLen 1)
ret <vscale x 1 x i1> %a
}
@ -1122,7 +1122,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmfne.mask.nxv2f64.f64(
<vscale x 2 x double>,
double,
<vscale x 2 x i1>,
iXLen);
iXLen, iXLen);
define <vscale x 2 x i1> @intrinsic_vmfne_mask_vf_nxv2f64_f64(<vscale x 2 x i1> %0, <vscale x 2 x double> %1, double %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfne_mask_vf_nxv2f64_f64:
@ -1139,7 +1139,7 @@ entry:
<vscale x 2 x double> %1,
double %2,
<vscale x 2 x i1> %3,
iXLen %4)
iXLen %4, iXLen 1)
ret <vscale x 2 x i1> %a
}
@ -1169,7 +1169,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmfne.mask.nxv4f64.f64(
<vscale x 4 x double>,
double,
<vscale x 4 x i1>,
iXLen);
iXLen, iXLen);
define <vscale x 4 x i1> @intrinsic_vmfne_mask_vf_nxv4f64_f64(<vscale x 4 x i1> %0, <vscale x 4 x double> %1, double %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfne_mask_vf_nxv4f64_f64:
@ -1186,7 +1186,7 @@ entry:
<vscale x 4 x double> %1,
double %2,
<vscale x 4 x i1> %3,
iXLen %4)
iXLen %4, iXLen 1)
ret <vscale x 4 x i1> %a
}

View File

@ -18,6 +18,7 @@ entry:
%a = call <vscale x 1 x i1> @llvm.riscv.vmsbf.nxv1i1(
<vscale x 1 x i1> %0,
iXLen %1)
ret <vscale x 1 x i1> %a
}
@ -25,13 +26,13 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsbf.mask.nxv1i1(
<vscale x 1 x i1>,
<vscale x 1 x i1>,
<vscale x 1 x i1>,
iXLen);
iXLen, iXLen);
define <vscale x 1 x i1> @intrinsic_vmsbf_mask_m_nxv1i1_nxv1i1(<vscale x 1 x i1> %0, <vscale x 1 x i1> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsbf_mask_m_nxv1i1_nxv1i1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vmsbf.m v10, v8, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
@ -41,7 +42,8 @@ entry:
<vscale x 1 x i1> %0,
<vscale x 1 x i1> %1,
<vscale x 1 x i1> %2,
iXLen %3)
iXLen %3, iXLen 1)
ret <vscale x 1 x i1> %a
}
@ -60,6 +62,7 @@ entry:
%a = call <vscale x 2 x i1> @llvm.riscv.vmsbf.nxv2i1(
<vscale x 2 x i1> %0,
iXLen %1)
ret <vscale x 2 x i1> %a
}
@ -67,13 +70,13 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsbf.mask.nxv2i1(
<vscale x 2 x i1>,
<vscale x 2 x i1>,
<vscale x 2 x i1>,
iXLen);
iXLen, iXLen);
define <vscale x 2 x i1> @intrinsic_vmsbf_mask_m_nxv2i1_nxv2i1(<vscale x 2 x i1> %0, <vscale x 2 x i1> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsbf_mask_m_nxv2i1_nxv2i1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, mu
; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vmsbf.m v10, v8, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
@ -83,7 +86,8 @@ entry:
<vscale x 2 x i1> %0,
<vscale x 2 x i1> %1,
<vscale x 2 x i1> %2,
iXLen %3)
iXLen %3, iXLen 1)
ret <vscale x 2 x i1> %a
}
@ -102,6 +106,7 @@ entry:
%a = call <vscale x 4 x i1> @llvm.riscv.vmsbf.nxv4i1(
<vscale x 4 x i1> %0,
iXLen %1)
ret <vscale x 4 x i1> %a
}
@ -109,13 +114,13 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsbf.mask.nxv4i1(
<vscale x 4 x i1>,
<vscale x 4 x i1>,
<vscale x 4 x i1>,
iXLen);
iXLen, iXLen);
define <vscale x 4 x i1> @intrinsic_vmsbf_mask_m_nxv4i1_nxv4i1(<vscale x 4 x i1> %0, <vscale x 4 x i1> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsbf_mask_m_nxv4i1_nxv4i1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu
; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vmsbf.m v10, v8, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
@ -125,7 +130,8 @@ entry:
<vscale x 4 x i1> %0,
<vscale x 4 x i1> %1,
<vscale x 4 x i1> %2,
iXLen %3)
iXLen %3, iXLen 1)
ret <vscale x 4 x i1> %a
}
@ -144,6 +150,7 @@ entry:
%a = call <vscale x 8 x i1> @llvm.riscv.vmsbf.nxv8i1(
<vscale x 8 x i1> %0,
iXLen %1)
ret <vscale x 8 x i1> %a
}
@ -151,23 +158,24 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsbf.mask.nxv8i1(
<vscale x 8 x i1>,
<vscale x 8 x i1>,
<vscale x 8 x i1>,
iXLen);
iXLen, iXLen);
define <vscale x 8 x i1> @intrinsic_vmsbf_mask_m_nxv8i1_nxv8i1(<vscale x 8 x i1> %0, <vscale x 8 x i1> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsbf_mask_m_nxv8i1_nxv8i1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu
; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vmsbf.m v10, v8, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vmv.v.v v0, v10
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i1> @llvm.riscv.vmsbf.mask.nxv8i1(
<vscale x 8 x i1> %0,
<vscale x 8 x i1> %1,
<vscale x 8 x i1> %2,
iXLen %3)
iXLen %3, iXLen 1)
ret <vscale x 8 x i1> %a
}
@ -186,6 +194,7 @@ entry:
%a = call <vscale x 16 x i1> @llvm.riscv.vmsbf.nxv16i1(
<vscale x 16 x i1> %0,
iXLen %1)
ret <vscale x 16 x i1> %a
}
@ -193,13 +202,13 @@ declare <vscale x 16 x i1> @llvm.riscv.vmsbf.mask.nxv16i1(
<vscale x 16 x i1>,
<vscale x 16 x i1>,
<vscale x 16 x i1>,
iXLen);
iXLen, iXLen);
define <vscale x 16 x i1> @intrinsic_vmsbf_mask_m_nxv16i1_nxv16i1(<vscale x 16 x i1> %0, <vscale x 16 x i1> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsbf_mask_m_nxv16i1_nxv16i1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, mu
; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vmsbf.m v10, v8, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
@ -209,7 +218,8 @@ entry:
<vscale x 16 x i1> %0,
<vscale x 16 x i1> %1,
<vscale x 16 x i1> %2,
iXLen %3)
iXLen %3, iXLen 1)
ret <vscale x 16 x i1> %a
}
@ -228,6 +238,7 @@ entry:
%a = call <vscale x 32 x i1> @llvm.riscv.vmsbf.nxv32i1(
<vscale x 32 x i1> %0,
iXLen %1)
ret <vscale x 32 x i1> %a
}
@ -235,13 +246,13 @@ declare <vscale x 32 x i1> @llvm.riscv.vmsbf.mask.nxv32i1(
<vscale x 32 x i1>,
<vscale x 32 x i1>,
<vscale x 32 x i1>,
iXLen);
iXLen, iXLen);
define <vscale x 32 x i1> @intrinsic_vmsbf_mask_m_nxv32i1_nxv32i1(<vscale x 32 x i1> %0, <vscale x 32 x i1> %1, <vscale x 32 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsbf_mask_m_nxv32i1_nxv32i1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, mu
; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vmsbf.m v10, v8, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
@ -251,7 +262,8 @@ entry:
<vscale x 32 x i1> %0,
<vscale x 32 x i1> %1,
<vscale x 32 x i1> %2,
iXLen %3)
iXLen %3, iXLen 1)
ret <vscale x 32 x i1> %a
}
@ -270,6 +282,7 @@ entry:
%a = call <vscale x 64 x i1> @llvm.riscv.vmsbf.nxv64i1(
<vscale x 64 x i1> %0,
iXLen %1)
ret <vscale x 64 x i1> %a
}
@ -277,13 +290,13 @@ declare <vscale x 64 x i1> @llvm.riscv.vmsbf.mask.nxv64i1(
<vscale x 64 x i1>,
<vscale x 64 x i1>,
<vscale x 64 x i1>,
iXLen);
iXLen, iXLen);
define <vscale x 64 x i1> @intrinsic_vmsbf_mask_m_nxv64i1_nxv64i1(<vscale x 64 x i1> %0, <vscale x 64 x i1> %1, <vscale x 64 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsbf_mask_m_nxv64i1_nxv64i1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vsetvli zero, a0, e8, m8, tu, mu
; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vmsbf.m v10, v8, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
@ -293,6 +306,7 @@ entry:
<vscale x 64 x i1> %0,
<vscale x 64 x i1> %1,
<vscale x 64 x i1> %2,
iXLen %3)
iXLen %3, iXLen 1)
ret <vscale x 64 x i1> %a
}

View File

@ -26,7 +26,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i8(
<vscale x 1 x i8>,
<vscale x 1 x i8>,
<vscale x 1 x i1>,
i32);
i32, i32);
define <vscale x 1 x i1> @intrinsic_vmseq_mask_vv_nxv1i8_nxv1i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i8> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv1i8_nxv1i8:
@ -48,7 +48,7 @@ entry:
<vscale x 1 x i8> %2,
<vscale x 1 x i8> %3,
<vscale x 1 x i1> %mask,
i32 %4)
i32 %4, i32 1)
ret <vscale x 1 x i1> %a
}
@ -78,7 +78,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i8(
<vscale x 2 x i8>,
<vscale x 2 x i8>,
<vscale x 2 x i1>,
i32);
i32, i32);
define <vscale x 2 x i1> @intrinsic_vmseq_mask_vv_nxv2i8_nxv2i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i8> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv2i8_nxv2i8:
@ -100,7 +100,7 @@ entry:
<vscale x 2 x i8> %2,
<vscale x 2 x i8> %3,
<vscale x 2 x i1> %mask,
i32 %4)
i32 %4, i32 1)
ret <vscale x 2 x i1> %a
}
@ -130,7 +130,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i8(
<vscale x 4 x i8>,
<vscale x 4 x i8>,
<vscale x 4 x i1>,
i32);
i32, i32);
define <vscale x 4 x i1> @intrinsic_vmseq_mask_vv_nxv4i8_nxv4i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i8> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv4i8_nxv4i8:
@ -152,7 +152,7 @@ entry:
<vscale x 4 x i8> %2,
<vscale x 4 x i8> %3,
<vscale x 4 x i1> %mask,
i32 %4)
i32 %4, i32 1)
ret <vscale x 4 x i1> %a
}
@ -182,7 +182,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmseq.mask.nxv8i8(
<vscale x 8 x i8>,
<vscale x 8 x i8>,
<vscale x 8 x i1>,
i32);
i32, i32);
define <vscale x 8 x i1> @intrinsic_vmseq_mask_vv_nxv8i8_nxv8i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i8> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv8i8_nxv8i8:
@ -204,7 +204,7 @@ entry:
<vscale x 8 x i8> %2,
<vscale x 8 x i8> %3,
<vscale x 8 x i1> %mask,
i32 %4)
i32 %4, i32 1)
ret <vscale x 8 x i1> %a
}
@ -234,7 +234,7 @@ declare <vscale x 16 x i1> @llvm.riscv.vmseq.mask.nxv16i8(
<vscale x 16 x i8>,
<vscale x 16 x i8>,
<vscale x 16 x i1>,
i32);
i32, i32);
define <vscale x 16 x i1> @intrinsic_vmseq_mask_vv_nxv16i8_nxv16i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i8> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv16i8_nxv16i8:
@ -256,7 +256,7 @@ entry:
<vscale x 16 x i8> %2,
<vscale x 16 x i8> %3,
<vscale x 16 x i1> %mask,
i32 %4)
i32 %4, i32 1)
ret <vscale x 16 x i1> %a
}
@ -286,7 +286,7 @@ declare <vscale x 32 x i1> @llvm.riscv.vmseq.mask.nxv32i8(
<vscale x 32 x i8>,
<vscale x 32 x i8>,
<vscale x 32 x i1>,
i32);
i32, i32);
define <vscale x 32 x i1> @intrinsic_vmseq_mask_vv_nxv32i8_nxv32i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i8> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv32i8_nxv32i8:
@ -308,7 +308,7 @@ entry:
<vscale x 32 x i8> %2,
<vscale x 32 x i8> %3,
<vscale x 32 x i1> %mask,
i32 %4)
i32 %4, i32 1)
ret <vscale x 32 x i1> %a
}
@ -338,7 +338,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i16(
<vscale x 1 x i16>,
<vscale x 1 x i16>,
<vscale x 1 x i1>,
i32);
i32, i32);
define <vscale x 1 x i1> @intrinsic_vmseq_mask_vv_nxv1i16_nxv1i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i16> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv1i16_nxv1i16:
@ -360,7 +360,7 @@ entry:
<vscale x 1 x i16> %2,
<vscale x 1 x i16> %3,
<vscale x 1 x i1> %mask,
i32 %4)
i32 %4, i32 1)
ret <vscale x 1 x i1> %a
}
@ -390,7 +390,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i16(
<vscale x 2 x i16>,
<vscale x 2 x i16>,
<vscale x 2 x i1>,
i32);
i32, i32);
define <vscale x 2 x i1> @intrinsic_vmseq_mask_vv_nxv2i16_nxv2i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i16> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv2i16_nxv2i16:
@ -412,7 +412,7 @@ entry:
<vscale x 2 x i16> %2,
<vscale x 2 x i16> %3,
<vscale x 2 x i1> %mask,
i32 %4)
i32 %4, i32 1)
ret <vscale x 2 x i1> %a
}
@ -442,7 +442,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i16(
<vscale x 4 x i16>,
<vscale x 4 x i16>,
<vscale x 4 x i1>,
i32);
i32, i32);
define <vscale x 4 x i1> @intrinsic_vmseq_mask_vv_nxv4i16_nxv4i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i16> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv4i16_nxv4i16:
@ -464,7 +464,7 @@ entry:
<vscale x 4 x i16> %2,
<vscale x 4 x i16> %3,
<vscale x 4 x i1> %mask,
i32 %4)
i32 %4, i32 1)
ret <vscale x 4 x i1> %a
}
@ -494,7 +494,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmseq.mask.nxv8i16(
<vscale x 8 x i16>,
<vscale x 8 x i16>,
<vscale x 8 x i1>,
i32);
i32, i32);
define <vscale x 8 x i1> @intrinsic_vmseq_mask_vv_nxv8i16_nxv8i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i16> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv8i16_nxv8i16:
@ -516,7 +516,7 @@ entry:
<vscale x 8 x i16> %2,
<vscale x 8 x i16> %3,
<vscale x 8 x i1> %mask,
i32 %4)
i32 %4, i32 1)
ret <vscale x 8 x i1> %a
}
@ -546,7 +546,7 @@ declare <vscale x 16 x i1> @llvm.riscv.vmseq.mask.nxv16i16(
<vscale x 16 x i16>,
<vscale x 16 x i16>,
<vscale x 16 x i1>,
i32);
i32, i32);
define <vscale x 16 x i1> @intrinsic_vmseq_mask_vv_nxv16i16_nxv16i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i16> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv16i16_nxv16i16:
@ -568,7 +568,7 @@ entry:
<vscale x 16 x i16> %2,
<vscale x 16 x i16> %3,
<vscale x 16 x i1> %mask,
i32 %4)
i32 %4, i32 1)
ret <vscale x 16 x i1> %a
}
@ -598,7 +598,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i32(
<vscale x 1 x i32>,
<vscale x 1 x i32>,
<vscale x 1 x i1>,
i32);
i32, i32);
define <vscale x 1 x i1> @intrinsic_vmseq_mask_vv_nxv1i32_nxv1i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i32> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv1i32_nxv1i32:
@ -620,7 +620,7 @@ entry:
<vscale x 1 x i32> %2,
<vscale x 1 x i32> %3,
<vscale x 1 x i1> %mask,
i32 %4)
i32 %4, i32 1)
ret <vscale x 1 x i1> %a
}
@ -650,7 +650,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i32(
<vscale x 2 x i32>,
<vscale x 2 x i32>,
<vscale x 2 x i1>,
i32);
i32, i32);
define <vscale x 2 x i1> @intrinsic_vmseq_mask_vv_nxv2i32_nxv2i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i32> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv2i32_nxv2i32:
@ -672,7 +672,7 @@ entry:
<vscale x 2 x i32> %2,
<vscale x 2 x i32> %3,
<vscale x 2 x i1> %mask,
i32 %4)
i32 %4, i32 1)
ret <vscale x 2 x i1> %a
}
@ -702,7 +702,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i32(
<vscale x 4 x i32>,
<vscale x 4 x i32>,
<vscale x 4 x i1>,
i32);
i32, i32);
define <vscale x 4 x i1> @intrinsic_vmseq_mask_vv_nxv4i32_nxv4i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i32> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv4i32_nxv4i32:
@ -724,7 +724,7 @@ entry:
<vscale x 4 x i32> %2,
<vscale x 4 x i32> %3,
<vscale x 4 x i1> %mask,
i32 %4)
i32 %4, i32 1)
ret <vscale x 4 x i1> %a
}
@ -754,7 +754,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmseq.mask.nxv8i32(
<vscale x 8 x i32>,
<vscale x 8 x i32>,
<vscale x 8 x i1>,
i32);
i32, i32);
define <vscale x 8 x i1> @intrinsic_vmseq_mask_vv_nxv8i32_nxv8i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i32> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv8i32_nxv8i32:
@ -776,7 +776,7 @@ entry:
<vscale x 8 x i32> %2,
<vscale x 8 x i32> %3,
<vscale x 8 x i1> %mask,
i32 %4)
i32 %4, i32 1)
ret <vscale x 8 x i1> %a
}
@ -806,7 +806,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i64(
<vscale x 1 x i64>,
<vscale x 1 x i64>,
<vscale x 1 x i1>,
i32);
i32, i32);
define <vscale x 1 x i1> @intrinsic_vmseq_mask_vv_nxv1i64_nxv1i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i64> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv1i64_nxv1i64:
@ -828,7 +828,7 @@ entry:
<vscale x 1 x i64> %2,
<vscale x 1 x i64> %3,
<vscale x 1 x i1> %mask,
i32 %4)
i32 %4, i32 1)
ret <vscale x 1 x i1> %a
}
@ -858,7 +858,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i64(
<vscale x 2 x i64>,
<vscale x 2 x i64>,
<vscale x 2 x i1>,
i32);
i32, i32);
define <vscale x 2 x i1> @intrinsic_vmseq_mask_vv_nxv2i64_nxv2i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i64> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv2i64_nxv2i64:
@ -880,7 +880,7 @@ entry:
<vscale x 2 x i64> %2,
<vscale x 2 x i64> %3,
<vscale x 2 x i1> %mask,
i32 %4)
i32 %4, i32 1)
ret <vscale x 2 x i1> %a
}
@ -910,7 +910,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i64(
<vscale x 4 x i64>,
<vscale x 4 x i64>,
<vscale x 4 x i1>,
i32);
i32, i32);
define <vscale x 4 x i1> @intrinsic_vmseq_mask_vv_nxv4i64_nxv4i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i64> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv4i64_nxv4i64:
@ -932,7 +932,7 @@ entry:
<vscale x 4 x i64> %2,
<vscale x 4 x i64> %3,
<vscale x 4 x i1> %mask,
i32 %4)
i32 %4, i32 1)
ret <vscale x 4 x i1> %a
}
@ -962,7 +962,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i8.i8(
<vscale x 1 x i8>,
i8,
<vscale x 1 x i1>,
i32);
i32, i32);
define <vscale x 1 x i1> @intrinsic_vmseq_mask_vx_nxv1i8_i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv1i8_i8:
@ -979,7 +979,7 @@ entry:
<vscale x 1 x i8> %1,
i8 %2,
<vscale x 1 x i1> %3,
i32 %4)
i32 %4, i32 1)
ret <vscale x 1 x i1> %a
}
@ -1009,7 +1009,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i8.i8(
<vscale x 2 x i8>,
i8,
<vscale x 2 x i1>,
i32);
i32, i32);
define <vscale x 2 x i1> @intrinsic_vmseq_mask_vx_nxv2i8_i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv2i8_i8:
@ -1026,7 +1026,7 @@ entry:
<vscale x 2 x i8> %1,
i8 %2,
<vscale x 2 x i1> %3,
i32 %4)
i32 %4, i32 1)
ret <vscale x 2 x i1> %a
}
@ -1056,7 +1056,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i8.i8(
<vscale x 4 x i8>,
i8,
<vscale x 4 x i1>,
i32);
i32, i32);
define <vscale x 4 x i1> @intrinsic_vmseq_mask_vx_nxv4i8_i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv4i8_i8:
@ -1073,7 +1073,7 @@ entry:
<vscale x 4 x i8> %1,
i8 %2,
<vscale x 4 x i1> %3,
i32 %4)
i32 %4, i32 1)
ret <vscale x 4 x i1> %a
}
@ -1103,7 +1103,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmseq.mask.nxv8i8.i8(
<vscale x 8 x i8>,
i8,
<vscale x 8 x i1>,
i32);
i32, i32);
define <vscale x 8 x i1> @intrinsic_vmseq_mask_vx_nxv8i8_i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv8i8_i8:
@ -1120,7 +1120,7 @@ entry:
<vscale x 8 x i8> %1,
i8 %2,
<vscale x 8 x i1> %3,
i32 %4)
i32 %4, i32 1)
ret <vscale x 8 x i1> %a
}
@ -1150,7 +1150,7 @@ declare <vscale x 16 x i1> @llvm.riscv.vmseq.mask.nxv16i8.i8(
<vscale x 16 x i8>,
i8,
<vscale x 16 x i1>,
i32);
i32, i32);
define <vscale x 16 x i1> @intrinsic_vmseq_mask_vx_nxv16i8_i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv16i8_i8:
@ -1167,7 +1167,7 @@ entry:
<vscale x 16 x i8> %1,
i8 %2,
<vscale x 16 x i1> %3,
i32 %4)
i32 %4, i32 1)
ret <vscale x 16 x i1> %a
}
@ -1197,7 +1197,7 @@ declare <vscale x 32 x i1> @llvm.riscv.vmseq.mask.nxv32i8.i8(
<vscale x 32 x i8>,
i8,
<vscale x 32 x i1>,
i32);
i32, i32);
define <vscale x 32 x i1> @intrinsic_vmseq_mask_vx_nxv32i8_i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv32i8_i8:
@ -1214,7 +1214,7 @@ entry:
<vscale x 32 x i8> %1,
i8 %2,
<vscale x 32 x i1> %3,
i32 %4)
i32 %4, i32 1)
ret <vscale x 32 x i1> %a
}
@ -1244,7 +1244,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i16.i16(
<vscale x 1 x i16>,
i16,
<vscale x 1 x i1>,
i32);
i32, i32);
define <vscale x 1 x i1> @intrinsic_vmseq_mask_vx_nxv1i16_i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv1i16_i16:
@ -1261,7 +1261,7 @@ entry:
<vscale x 1 x i16> %1,
i16 %2,
<vscale x 1 x i1> %3,
i32 %4)
i32 %4, i32 1)
ret <vscale x 1 x i1> %a
}
@ -1291,7 +1291,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i16.i16(
<vscale x 2 x i16>,
i16,
<vscale x 2 x i1>,
i32);
i32, i32);
define <vscale x 2 x i1> @intrinsic_vmseq_mask_vx_nxv2i16_i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv2i16_i16:
@ -1308,7 +1308,7 @@ entry:
<vscale x 2 x i16> %1,
i16 %2,
<vscale x 2 x i1> %3,
i32 %4)
i32 %4, i32 1)
ret <vscale x 2 x i1> %a
}
@ -1338,7 +1338,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i16.i16(
<vscale x 4 x i16>,
i16,
<vscale x 4 x i1>,
i32);
i32, i32);
define <vscale x 4 x i1> @intrinsic_vmseq_mask_vx_nxv4i16_i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv4i16_i16:
@ -1355,7 +1355,7 @@ entry:
<vscale x 4 x i16> %1,
i16 %2,
<vscale x 4 x i1> %3,
i32 %4)
i32 %4, i32 1)
ret <vscale x 4 x i1> %a
}
@ -1385,7 +1385,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmseq.mask.nxv8i16.i16(
<vscale x 8 x i16>,
i16,
<vscale x 8 x i1>,
i32);
i32, i32);
define <vscale x 8 x i1> @intrinsic_vmseq_mask_vx_nxv8i16_i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv8i16_i16:
@ -1402,7 +1402,7 @@ entry:
<vscale x 8 x i16> %1,
i16 %2,
<vscale x 8 x i1> %3,
i32 %4)
i32 %4, i32 1)
ret <vscale x 8 x i1> %a
}
@ -1432,7 +1432,7 @@ declare <vscale x 16 x i1> @llvm.riscv.vmseq.mask.nxv16i16.i16(
<vscale x 16 x i16>,
i16,
<vscale x 16 x i1>,
i32);
i32, i32);
define <vscale x 16 x i1> @intrinsic_vmseq_mask_vx_nxv16i16_i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv16i16_i16:
@ -1449,7 +1449,7 @@ entry:
<vscale x 16 x i16> %1,
i16 %2,
<vscale x 16 x i1> %3,
i32 %4)
i32 %4, i32 1)
ret <vscale x 16 x i1> %a
}
@ -1479,7 +1479,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i32.i32(
<vscale x 1 x i32>,
i32,
<vscale x 1 x i1>,
i32);
i32, i32);
define <vscale x 1 x i1> @intrinsic_vmseq_mask_vx_nxv1i32_i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv1i32_i32:
@ -1496,7 +1496,7 @@ entry:
<vscale x 1 x i32> %1,
i32 %2,
<vscale x 1 x i1> %3,
i32 %4)
i32 %4, i32 1)
ret <vscale x 1 x i1> %a
}
@ -1526,7 +1526,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i32.i32(
<vscale x 2 x i32>,
i32,
<vscale x 2 x i1>,
i32);
i32, i32);
define <vscale x 2 x i1> @intrinsic_vmseq_mask_vx_nxv2i32_i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv2i32_i32:
@ -1543,7 +1543,7 @@ entry:
<vscale x 2 x i32> %1,
i32 %2,
<vscale x 2 x i1> %3,
i32 %4)
i32 %4, i32 1)
ret <vscale x 2 x i1> %a
}
@ -1573,7 +1573,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i32.i32(
<vscale x 4 x i32>,
i32,
<vscale x 4 x i1>,
i32);
i32, i32);
define <vscale x 4 x i1> @intrinsic_vmseq_mask_vx_nxv4i32_i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv4i32_i32:
@ -1590,7 +1590,7 @@ entry:
<vscale x 4 x i32> %1,
i32 %2,
<vscale x 4 x i1> %3,
i32 %4)
i32 %4, i32 1)
ret <vscale x 4 x i1> %a
}
@ -1620,7 +1620,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmseq.mask.nxv8i32.i32(
<vscale x 8 x i32>,
i32,
<vscale x 8 x i1>,
i32);
i32, i32);
define <vscale x 8 x i1> @intrinsic_vmseq_mask_vx_nxv8i32_i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv8i32_i32:
@ -1637,7 +1637,7 @@ entry:
<vscale x 8 x i32> %1,
i32 %2,
<vscale x 8 x i1> %3,
i32 %4)
i32 %4, i32 1)
ret <vscale x 8 x i1> %a
}
@ -1673,7 +1673,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i64.i64(
<vscale x 1 x i64>,
i64,
<vscale x 1 x i1>,
i32);
i32, i32);
define <vscale x 1 x i1> @intrinsic_vmseq_mask_vx_nxv1i64_i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv1i64_i64:
@ -1696,7 +1696,7 @@ entry:
<vscale x 1 x i64> %1,
i64 %2,
<vscale x 1 x i1> %3,
i32 %4)
i32 %4, i32 1)
ret <vscale x 1 x i1> %a
}
@ -1732,7 +1732,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i64.i64(
<vscale x 2 x i64>,
i64,
<vscale x 2 x i1>,
i32);
i32, i32);
define <vscale x 2 x i1> @intrinsic_vmseq_mask_vx_nxv2i64_i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv2i64_i64:
@ -1755,7 +1755,7 @@ entry:
<vscale x 2 x i64> %1,
i64 %2,
<vscale x 2 x i1> %3,
i32 %4)
i32 %4, i32 1)
ret <vscale x 2 x i1> %a
}
@ -1791,7 +1791,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i64.i64(
<vscale x 4 x i64>,
i64,
<vscale x 4 x i1>,
i32);
i32, i32);
define <vscale x 4 x i1> @intrinsic_vmseq_mask_vx_nxv4i64_i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv4i64_i64:
@ -1814,7 +1814,7 @@ entry:
<vscale x 4 x i64> %1,
i64 %2,
<vscale x 4 x i1> %3,
i32 %4)
i32 %4, i32 1)
ret <vscale x 4 x i1> %a
}
@ -1849,7 +1849,7 @@ entry:
<vscale x 1 x i8> %1,
i8 9,
<vscale x 1 x i1> %2,
i32 %3)
i32 %3, i32 1)
ret <vscale x 1 x i1> %a
}
@ -1884,7 +1884,7 @@ entry:
<vscale x 2 x i8> %1,
i8 9,
<vscale x 2 x i1> %2,
i32 %3)
i32 %3, i32 1)
ret <vscale x 2 x i1> %a
}
@ -1919,7 +1919,7 @@ entry:
<vscale x 4 x i8> %1,
i8 9,
<vscale x 4 x i1> %2,
i32 %3)
i32 %3, i32 1)
ret <vscale x 4 x i1> %a
}
@ -1954,7 +1954,7 @@ entry:
<vscale x 8 x i8> %1,
i8 9,
<vscale x 8 x i1> %2,
i32 %3)
i32 %3, i32 1)
ret <vscale x 8 x i1> %a
}
@ -1989,7 +1989,7 @@ entry:
<vscale x 16 x i8> %1,
i8 9,
<vscale x 16 x i1> %2,
i32 %3)
i32 %3, i32 1)
ret <vscale x 16 x i1> %a
}
@ -2024,7 +2024,7 @@ entry:
<vscale x 32 x i8> %1,
i8 9,
<vscale x 32 x i1> %2,
i32 %3)
i32 %3, i32 1)
ret <vscale x 32 x i1> %a
}
@ -2059,7 +2059,7 @@ entry:
<vscale x 1 x i16> %1,
i16 9,
<vscale x 1 x i1> %2,
i32 %3)
i32 %3, i32 1)
ret <vscale x 1 x i1> %a
}
@ -2094,7 +2094,7 @@ entry:
<vscale x 2 x i16> %1,
i16 9,
<vscale x 2 x i1> %2,
i32 %3)
i32 %3, i32 1)
ret <vscale x 2 x i1> %a
}
@ -2129,7 +2129,7 @@ entry:
<vscale x 4 x i16> %1,
i16 9,
<vscale x 4 x i1> %2,
i32 %3)
i32 %3, i32 1)
ret <vscale x 4 x i1> %a
}
@ -2164,7 +2164,7 @@ entry:
<vscale x 8 x i16> %1,
i16 9,
<vscale x 8 x i1> %2,
i32 %3)
i32 %3, i32 1)
ret <vscale x 8 x i1> %a
}
@ -2199,7 +2199,7 @@ entry:
<vscale x 16 x i16> %1,
i16 9,
<vscale x 16 x i1> %2,
i32 %3)
i32 %3, i32 1)
ret <vscale x 16 x i1> %a
}
@ -2234,7 +2234,7 @@ entry:
<vscale x 1 x i32> %1,
i32 9,
<vscale x 1 x i1> %2,
i32 %3)
i32 %3, i32 1)
ret <vscale x 1 x i1> %a
}
@ -2269,7 +2269,7 @@ entry:
<vscale x 2 x i32> %1,
i32 9,
<vscale x 2 x i1> %2,
i32 %3)
i32 %3, i32 1)
ret <vscale x 2 x i1> %a
}
@ -2304,7 +2304,7 @@ entry:
<vscale x 4 x i32> %1,
i32 9,
<vscale x 4 x i1> %2,
i32 %3)
i32 %3, i32 1)
ret <vscale x 4 x i1> %a
}
@ -2339,7 +2339,7 @@ entry:
<vscale x 8 x i32> %1,
i32 9,
<vscale x 8 x i1> %2,
i32 %3)
i32 %3, i32 1)
ret <vscale x 8 x i1> %a
}
@ -2374,7 +2374,7 @@ entry:
<vscale x 1 x i64> %1,
i64 9,
<vscale x 1 x i1> %2,
i32 %3)
i32 %3, i32 1)
ret <vscale x 1 x i1> %a
}
@ -2409,7 +2409,7 @@ entry:
<vscale x 2 x i64> %1,
i64 9,
<vscale x 2 x i1> %2,
i32 %3)
i32 %3, i32 1)
ret <vscale x 2 x i1> %a
}
@ -2444,7 +2444,7 @@ entry:
<vscale x 4 x i64> %1,
i64 9,
<vscale x 4 x i1> %2,
i32 %3)
i32 %3, i32 1)
ret <vscale x 4 x i1> %a
}

View File

@ -26,7 +26,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i8(
<vscale x 1 x i8>,
<vscale x 1 x i8>,
<vscale x 1 x i1>,
i64);
i64, i64);
define <vscale x 1 x i1> @intrinsic_vmseq_mask_vv_nxv1i8_nxv1i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i8> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv1i8_nxv1i8:
@ -48,7 +48,7 @@ entry:
<vscale x 1 x i8> %2,
<vscale x 1 x i8> %3,
<vscale x 1 x i1> %mask,
i64 %4)
i64 %4, i64 1)
ret <vscale x 1 x i1> %a
}
@ -78,7 +78,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i8(
<vscale x 2 x i8>,
<vscale x 2 x i8>,
<vscale x 2 x i1>,
i64);
i64, i64);
define <vscale x 2 x i1> @intrinsic_vmseq_mask_vv_nxv2i8_nxv2i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i8> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv2i8_nxv2i8:
@ -100,7 +100,7 @@ entry:
<vscale x 2 x i8> %2,
<vscale x 2 x i8> %3,
<vscale x 2 x i1> %mask,
i64 %4)
i64 %4, i64 1)
ret <vscale x 2 x i1> %a
}
@ -130,7 +130,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i8(
<vscale x 4 x i8>,
<vscale x 4 x i8>,
<vscale x 4 x i1>,
i64);
i64, i64);
define <vscale x 4 x i1> @intrinsic_vmseq_mask_vv_nxv4i8_nxv4i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i8> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv4i8_nxv4i8:
@ -152,7 +152,7 @@ entry:
<vscale x 4 x i8> %2,
<vscale x 4 x i8> %3,
<vscale x 4 x i1> %mask,
i64 %4)
i64 %4, i64 1)
ret <vscale x 4 x i1> %a
}
@ -182,7 +182,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmseq.mask.nxv8i8(
<vscale x 8 x i8>,
<vscale x 8 x i8>,
<vscale x 8 x i1>,
i64);
i64, i64);
define <vscale x 8 x i1> @intrinsic_vmseq_mask_vv_nxv8i8_nxv8i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i8> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv8i8_nxv8i8:
@ -204,7 +204,7 @@ entry:
<vscale x 8 x i8> %2,
<vscale x 8 x i8> %3,
<vscale x 8 x i1> %mask,
i64 %4)
i64 %4, i64 1)
ret <vscale x 8 x i1> %a
}
@ -234,7 +234,7 @@ declare <vscale x 16 x i1> @llvm.riscv.vmseq.mask.nxv16i8(
<vscale x 16 x i8>,
<vscale x 16 x i8>,
<vscale x 16 x i1>,
i64);
i64, i64);
define <vscale x 16 x i1> @intrinsic_vmseq_mask_vv_nxv16i8_nxv16i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i8> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv16i8_nxv16i8:
@ -256,7 +256,7 @@ entry:
<vscale x 16 x i8> %2,
<vscale x 16 x i8> %3,
<vscale x 16 x i1> %mask,
i64 %4)
i64 %4, i64 1)
ret <vscale x 16 x i1> %a
}
@ -286,7 +286,7 @@ declare <vscale x 32 x i1> @llvm.riscv.vmseq.mask.nxv32i8(
<vscale x 32 x i8>,
<vscale x 32 x i8>,
<vscale x 32 x i1>,
i64);
i64, i64);
define <vscale x 32 x i1> @intrinsic_vmseq_mask_vv_nxv32i8_nxv32i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i8> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv32i8_nxv32i8:
@ -308,7 +308,7 @@ entry:
<vscale x 32 x i8> %2,
<vscale x 32 x i8> %3,
<vscale x 32 x i1> %mask,
i64 %4)
i64 %4, i64 1)
ret <vscale x 32 x i1> %a
}
@ -338,7 +338,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i16(
<vscale x 1 x i16>,
<vscale x 1 x i16>,
<vscale x 1 x i1>,
i64);
i64, i64);
define <vscale x 1 x i1> @intrinsic_vmseq_mask_vv_nxv1i16_nxv1i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i16> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv1i16_nxv1i16:
@ -360,7 +360,7 @@ entry:
<vscale x 1 x i16> %2,
<vscale x 1 x i16> %3,
<vscale x 1 x i1> %mask,
i64 %4)
i64 %4, i64 1)
ret <vscale x 1 x i1> %a
}
@ -390,7 +390,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i16(
<vscale x 2 x i16>,
<vscale x 2 x i16>,
<vscale x 2 x i1>,
i64);
i64, i64);
define <vscale x 2 x i1> @intrinsic_vmseq_mask_vv_nxv2i16_nxv2i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i16> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv2i16_nxv2i16:
@ -412,7 +412,7 @@ entry:
<vscale x 2 x i16> %2,
<vscale x 2 x i16> %3,
<vscale x 2 x i1> %mask,
i64 %4)
i64 %4, i64 1)
ret <vscale x 2 x i1> %a
}
@ -442,7 +442,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i16(
<vscale x 4 x i16>,
<vscale x 4 x i16>,
<vscale x 4 x i1>,
i64);
i64, i64);
define <vscale x 4 x i1> @intrinsic_vmseq_mask_vv_nxv4i16_nxv4i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i16> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv4i16_nxv4i16:
@ -464,7 +464,7 @@ entry:
<vscale x 4 x i16> %2,
<vscale x 4 x i16> %3,
<vscale x 4 x i1> %mask,
i64 %4)
i64 %4, i64 1)
ret <vscale x 4 x i1> %a
}
@ -494,7 +494,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmseq.mask.nxv8i16(
<vscale x 8 x i16>,
<vscale x 8 x i16>,
<vscale x 8 x i1>,
i64);
i64, i64);
define <vscale x 8 x i1> @intrinsic_vmseq_mask_vv_nxv8i16_nxv8i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i16> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv8i16_nxv8i16:
@ -516,7 +516,7 @@ entry:
<vscale x 8 x i16> %2,
<vscale x 8 x i16> %3,
<vscale x 8 x i1> %mask,
i64 %4)
i64 %4, i64 1)
ret <vscale x 8 x i1> %a
}
@ -546,7 +546,7 @@ declare <vscale x 16 x i1> @llvm.riscv.vmseq.mask.nxv16i16(
<vscale x 16 x i16>,
<vscale x 16 x i16>,
<vscale x 16 x i1>,
i64);
i64, i64);
define <vscale x 16 x i1> @intrinsic_vmseq_mask_vv_nxv16i16_nxv16i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i16> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv16i16_nxv16i16:
@ -568,7 +568,7 @@ entry:
<vscale x 16 x i16> %2,
<vscale x 16 x i16> %3,
<vscale x 16 x i1> %mask,
i64 %4)
i64 %4, i64 1)
ret <vscale x 16 x i1> %a
}
@ -598,7 +598,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i32(
<vscale x 1 x i32>,
<vscale x 1 x i32>,
<vscale x 1 x i1>,
i64);
i64, i64);
define <vscale x 1 x i1> @intrinsic_vmseq_mask_vv_nxv1i32_nxv1i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i32> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv1i32_nxv1i32:
@ -620,7 +620,7 @@ entry:
<vscale x 1 x i32> %2,
<vscale x 1 x i32> %3,
<vscale x 1 x i1> %mask,
i64 %4)
i64 %4, i64 1)
ret <vscale x 1 x i1> %a
}
@ -650,7 +650,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i32(
<vscale x 2 x i32>,
<vscale x 2 x i32>,
<vscale x 2 x i1>,
i64);
i64, i64);
define <vscale x 2 x i1> @intrinsic_vmseq_mask_vv_nxv2i32_nxv2i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i32> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv2i32_nxv2i32:
@ -672,7 +672,7 @@ entry:
<vscale x 2 x i32> %2,
<vscale x 2 x i32> %3,
<vscale x 2 x i1> %mask,
i64 %4)
i64 %4, i64 1)
ret <vscale x 2 x i1> %a
}
@ -702,7 +702,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i32(
<vscale x 4 x i32>,
<vscale x 4 x i32>,
<vscale x 4 x i1>,
i64);
i64, i64);
define <vscale x 4 x i1> @intrinsic_vmseq_mask_vv_nxv4i32_nxv4i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i32> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv4i32_nxv4i32:
@ -724,7 +724,7 @@ entry:
<vscale x 4 x i32> %2,
<vscale x 4 x i32> %3,
<vscale x 4 x i1> %mask,
i64 %4)
i64 %4, i64 1)
ret <vscale x 4 x i1> %a
}
@ -754,7 +754,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmseq.mask.nxv8i32(
<vscale x 8 x i32>,
<vscale x 8 x i32>,
<vscale x 8 x i1>,
i64);
i64, i64);
define <vscale x 8 x i1> @intrinsic_vmseq_mask_vv_nxv8i32_nxv8i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i32> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv8i32_nxv8i32:
@ -776,7 +776,7 @@ entry:
<vscale x 8 x i32> %2,
<vscale x 8 x i32> %3,
<vscale x 8 x i1> %mask,
i64 %4)
i64 %4, i64 1)
ret <vscale x 8 x i1> %a
}
@ -806,7 +806,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i64(
<vscale x 1 x i64>,
<vscale x 1 x i64>,
<vscale x 1 x i1>,
i64);
i64, i64);
define <vscale x 1 x i1> @intrinsic_vmseq_mask_vv_nxv1i64_nxv1i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i64> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv1i64_nxv1i64:
@ -828,7 +828,7 @@ entry:
<vscale x 1 x i64> %2,
<vscale x 1 x i64> %3,
<vscale x 1 x i1> %mask,
i64 %4)
i64 %4, i64 1)
ret <vscale x 1 x i1> %a
}
@ -858,7 +858,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i64(
<vscale x 2 x i64>,
<vscale x 2 x i64>,
<vscale x 2 x i1>,
i64);
i64, i64);
define <vscale x 2 x i1> @intrinsic_vmseq_mask_vv_nxv2i64_nxv2i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i64> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv2i64_nxv2i64:
@ -880,7 +880,7 @@ entry:
<vscale x 2 x i64> %2,
<vscale x 2 x i64> %3,
<vscale x 2 x i1> %mask,
i64 %4)
i64 %4, i64 1)
ret <vscale x 2 x i1> %a
}
@ -910,7 +910,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i64(
<vscale x 4 x i64>,
<vscale x 4 x i64>,
<vscale x 4 x i1>,
i64);
i64, i64);
define <vscale x 4 x i1> @intrinsic_vmseq_mask_vv_nxv4i64_nxv4i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i64> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv4i64_nxv4i64:
@ -932,7 +932,7 @@ entry:
<vscale x 4 x i64> %2,
<vscale x 4 x i64> %3,
<vscale x 4 x i1> %mask,
i64 %4)
i64 %4, i64 1)
ret <vscale x 4 x i1> %a
}
@ -962,7 +962,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i8.i8(
<vscale x 1 x i8>,
i8,
<vscale x 1 x i1>,
i64);
i64, i64);
define <vscale x 1 x i1> @intrinsic_vmseq_mask_vx_nxv1i8_i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv1i8_i8:
@ -979,7 +979,7 @@ entry:
<vscale x 1 x i8> %1,
i8 %2,
<vscale x 1 x i1> %3,
i64 %4)
i64 %4, i64 1)
ret <vscale x 1 x i1> %a
}
@ -1009,7 +1009,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i8.i8(
<vscale x 2 x i8>,
i8,
<vscale x 2 x i1>,
i64);
i64, i64);
define <vscale x 2 x i1> @intrinsic_vmseq_mask_vx_nxv2i8_i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv2i8_i8:
@ -1026,7 +1026,7 @@ entry:
<vscale x 2 x i8> %1,
i8 %2,
<vscale x 2 x i1> %3,
i64 %4)
i64 %4, i64 1)
ret <vscale x 2 x i1> %a
}
@ -1056,7 +1056,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i8.i8(
<vscale x 4 x i8>,
i8,
<vscale x 4 x i1>,
i64);
i64, i64);
define <vscale x 4 x i1> @intrinsic_vmseq_mask_vx_nxv4i8_i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv4i8_i8:
@ -1073,7 +1073,7 @@ entry:
<vscale x 4 x i8> %1,
i8 %2,
<vscale x 4 x i1> %3,
i64 %4)
i64 %4, i64 1)
ret <vscale x 4 x i1> %a
}
@ -1103,7 +1103,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmseq.mask.nxv8i8.i8(
<vscale x 8 x i8>,
i8,
<vscale x 8 x i1>,
i64);
i64, i64);
define <vscale x 8 x i1> @intrinsic_vmseq_mask_vx_nxv8i8_i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv8i8_i8:
@ -1120,7 +1120,7 @@ entry:
<vscale x 8 x i8> %1,
i8 %2,
<vscale x 8 x i1> %3,
i64 %4)
i64 %4, i64 1)
ret <vscale x 8 x i1> %a
}
@ -1150,7 +1150,7 @@ declare <vscale x 16 x i1> @llvm.riscv.vmseq.mask.nxv16i8.i8(
<vscale x 16 x i8>,
i8,
<vscale x 16 x i1>,
i64);
i64, i64);
define <vscale x 16 x i1> @intrinsic_vmseq_mask_vx_nxv16i8_i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv16i8_i8:
@ -1167,7 +1167,7 @@ entry:
<vscale x 16 x i8> %1,
i8 %2,
<vscale x 16 x i1> %3,
i64 %4)
i64 %4, i64 1)
ret <vscale x 16 x i1> %a
}
@ -1197,7 +1197,7 @@ declare <vscale x 32 x i1> @llvm.riscv.vmseq.mask.nxv32i8.i8(
<vscale x 32 x i8>,
i8,
<vscale x 32 x i1>,
i64);
i64, i64);
define <vscale x 32 x i1> @intrinsic_vmseq_mask_vx_nxv32i8_i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv32i8_i8:
@ -1214,7 +1214,7 @@ entry:
<vscale x 32 x i8> %1,
i8 %2,
<vscale x 32 x i1> %3,
i64 %4)
i64 %4, i64 1)
ret <vscale x 32 x i1> %a
}
@ -1244,7 +1244,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i16.i16(
<vscale x 1 x i16>,
i16,
<vscale x 1 x i1>,
i64);
i64, i64);
define <vscale x 1 x i1> @intrinsic_vmseq_mask_vx_nxv1i16_i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv1i16_i16:
@ -1261,7 +1261,7 @@ entry:
<vscale x 1 x i16> %1,
i16 %2,
<vscale x 1 x i1> %3,
i64 %4)
i64 %4, i64 1)
ret <vscale x 1 x i1> %a
}
@ -1291,7 +1291,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i16.i16(
<vscale x 2 x i16>,
i16,
<vscale x 2 x i1>,
i64);
i64, i64);
define <vscale x 2 x i1> @intrinsic_vmseq_mask_vx_nxv2i16_i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv2i16_i16:
@ -1308,7 +1308,7 @@ entry:
<vscale x 2 x i16> %1,
i16 %2,
<vscale x 2 x i1> %3,
i64 %4)
i64 %4, i64 1)
ret <vscale x 2 x i1> %a
}
@ -1338,7 +1338,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i16.i16(
<vscale x 4 x i16>,
i16,
<vscale x 4 x i1>,
i64);
i64, i64);
define <vscale x 4 x i1> @intrinsic_vmseq_mask_vx_nxv4i16_i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv4i16_i16:
@ -1355,7 +1355,7 @@ entry:
<vscale x 4 x i16> %1,
i16 %2,
<vscale x 4 x i1> %3,
i64 %4)
i64 %4, i64 1)
ret <vscale x 4 x i1> %a
}
@ -1385,7 +1385,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmseq.mask.nxv8i16.i16(
<vscale x 8 x i16>,
i16,
<vscale x 8 x i1>,
i64);
i64, i64);
define <vscale x 8 x i1> @intrinsic_vmseq_mask_vx_nxv8i16_i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv8i16_i16:
@ -1402,7 +1402,7 @@ entry:
<vscale x 8 x i16> %1,
i16 %2,
<vscale x 8 x i1> %3,
i64 %4)
i64 %4, i64 1)
ret <vscale x 8 x i1> %a
}
@ -1432,7 +1432,7 @@ declare <vscale x 16 x i1> @llvm.riscv.vmseq.mask.nxv16i16.i16(
<vscale x 16 x i16>,
i16,
<vscale x 16 x i1>,
i64);
i64, i64);
define <vscale x 16 x i1> @intrinsic_vmseq_mask_vx_nxv16i16_i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv16i16_i16:
@ -1449,7 +1449,7 @@ entry:
<vscale x 16 x i16> %1,
i16 %2,
<vscale x 16 x i1> %3,
i64 %4)
i64 %4, i64 1)
ret <vscale x 16 x i1> %a
}
@ -1479,7 +1479,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i32.i32(
<vscale x 1 x i32>,
i32,
<vscale x 1 x i1>,
i64);
i64, i64);
define <vscale x 1 x i1> @intrinsic_vmseq_mask_vx_nxv1i32_i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv1i32_i32:
@ -1496,7 +1496,7 @@ entry:
<vscale x 1 x i32> %1,
i32 %2,
<vscale x 1 x i1> %3,
i64 %4)
i64 %4, i64 1)
ret <vscale x 1 x i1> %a
}
@ -1526,7 +1526,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i32.i32(
<vscale x 2 x i32>,
i32,
<vscale x 2 x i1>,
i64);
i64, i64);
define <vscale x 2 x i1> @intrinsic_vmseq_mask_vx_nxv2i32_i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv2i32_i32:
@ -1543,7 +1543,7 @@ entry:
<vscale x 2 x i32> %1,
i32 %2,
<vscale x 2 x i1> %3,
i64 %4)
i64 %4, i64 1)
ret <vscale x 2 x i1> %a
}
@ -1573,7 +1573,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i32.i32(
<vscale x 4 x i32>,
i32,
<vscale x 4 x i1>,
i64);
i64, i64);
define <vscale x 4 x i1> @intrinsic_vmseq_mask_vx_nxv4i32_i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv4i32_i32:
@ -1590,7 +1590,7 @@ entry:
<vscale x 4 x i32> %1,
i32 %2,
<vscale x 4 x i1> %3,
i64 %4)
i64 %4, i64 1)
ret <vscale x 4 x i1> %a
}
@ -1620,7 +1620,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmseq.mask.nxv8i32.i32(
<vscale x 8 x i32>,
i32,
<vscale x 8 x i1>,
i64);
i64, i64);
define <vscale x 8 x i1> @intrinsic_vmseq_mask_vx_nxv8i32_i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv8i32_i32:
@ -1637,7 +1637,7 @@ entry:
<vscale x 8 x i32> %1,
i32 %2,
<vscale x 8 x i1> %3,
i64 %4)
i64 %4, i64 1)
ret <vscale x 8 x i1> %a
}
@ -1667,7 +1667,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i64.i64(
<vscale x 1 x i64>,
i64,
<vscale x 1 x i1>,
i64);
i64, i64);
define <vscale x 1 x i1> @intrinsic_vmseq_mask_vx_nxv1i64_i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv1i64_i64:
@ -1684,7 +1684,7 @@ entry:
<vscale x 1 x i64> %1,
i64 %2,
<vscale x 1 x i1> %3,
i64 %4)
i64 %4, i64 1)
ret <vscale x 1 x i1> %a
}
@ -1714,7 +1714,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i64.i64(
<vscale x 2 x i64>,
i64,
<vscale x 2 x i1>,
i64);
i64, i64);
define <vscale x 2 x i1> @intrinsic_vmseq_mask_vx_nxv2i64_i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv2i64_i64:
@ -1731,7 +1731,7 @@ entry:
<vscale x 2 x i64> %1,
i64 %2,
<vscale x 2 x i1> %3,
i64 %4)
i64 %4, i64 1)
ret <vscale x 2 x i1> %a
}
@ -1761,7 +1761,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i64.i64(
<vscale x 4 x i64>,
i64,
<vscale x 4 x i1>,
i64);
i64, i64);
define <vscale x 4 x i1> @intrinsic_vmseq_mask_vx_nxv4i64_i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv4i64_i64:
@ -1778,7 +1778,7 @@ entry:
<vscale x 4 x i64> %1,
i64 %2,
<vscale x 4 x i1> %3,
i64 %4)
i64 %4, i64 1)
ret <vscale x 4 x i1> %a
}
@ -1813,7 +1813,7 @@ entry:
<vscale x 1 x i8> %1,
i8 9,
<vscale x 1 x i1> %2,
i64 %3)
i64 %3, i64 1)
ret <vscale x 1 x i1> %a
}
@ -1848,7 +1848,7 @@ entry:
<vscale x 2 x i8> %1,
i8 9,
<vscale x 2 x i1> %2,
i64 %3)
i64 %3, i64 1)
ret <vscale x 2 x i1> %a
}
@ -1883,7 +1883,7 @@ entry:
<vscale x 4 x i8> %1,
i8 9,
<vscale x 4 x i1> %2,
i64 %3)
i64 %3, i64 1)
ret <vscale x 4 x i1> %a
}
@ -1918,7 +1918,7 @@ entry:
<vscale x 8 x i8> %1,
i8 9,
<vscale x 8 x i1> %2,
i64 %3)
i64 %3, i64 1)
ret <vscale x 8 x i1> %a
}
@ -1953,7 +1953,7 @@ entry:
<vscale x 16 x i8> %1,
i8 9,
<vscale x 16 x i1> %2,
i64 %3)
i64 %3, i64 1)
ret <vscale x 16 x i1> %a
}
@ -1988,7 +1988,7 @@ entry:
<vscale x 32 x i8> %1,
i8 9,
<vscale x 32 x i1> %2,
i64 %3)
i64 %3, i64 1)
ret <vscale x 32 x i1> %a
}
@ -2023,7 +2023,7 @@ entry:
<vscale x 1 x i16> %1,
i16 9,
<vscale x 1 x i1> %2,
i64 %3)
i64 %3, i64 1)
ret <vscale x 1 x i1> %a
}
@ -2058,7 +2058,7 @@ entry:
<vscale x 2 x i16> %1,
i16 9,
<vscale x 2 x i1> %2,
i64 %3)
i64 %3, i64 1)
ret <vscale x 2 x i1> %a
}
@ -2093,7 +2093,7 @@ entry:
<vscale x 4 x i16> %1,
i16 9,
<vscale x 4 x i1> %2,
i64 %3)
i64 %3, i64 1)
ret <vscale x 4 x i1> %a
}
@ -2128,7 +2128,7 @@ entry:
<vscale x 8 x i16> %1,
i16 9,
<vscale x 8 x i1> %2,
i64 %3)
i64 %3, i64 1)
ret <vscale x 8 x i1> %a
}
@ -2163,7 +2163,7 @@ entry:
<vscale x 16 x i16> %1,
i16 9,
<vscale x 16 x i1> %2,
i64 %3)
i64 %3, i64 1)
ret <vscale x 16 x i1> %a
}
@ -2198,7 +2198,7 @@ entry:
<vscale x 1 x i32> %1,
i32 9,
<vscale x 1 x i1> %2,
i64 %3)
i64 %3, i64 1)
ret <vscale x 1 x i1> %a
}
@ -2233,7 +2233,7 @@ entry:
<vscale x 2 x i32> %1,
i32 9,
<vscale x 2 x i1> %2,
i64 %3)
i64 %3, i64 1)
ret <vscale x 2 x i1> %a
}
@ -2268,7 +2268,7 @@ entry:
<vscale x 4 x i32> %1,
i32 9,
<vscale x 4 x i1> %2,
i64 %3)
i64 %3, i64 1)
ret <vscale x 4 x i1> %a
}
@ -2303,7 +2303,7 @@ entry:
<vscale x 8 x i32> %1,
i32 9,
<vscale x 8 x i1> %2,
i64 %3)
i64 %3, i64 1)
ret <vscale x 8 x i1> %a
}
@ -2338,7 +2338,7 @@ entry:
<vscale x 1 x i64> %1,
i64 9,
<vscale x 1 x i1> %2,
i64 %3)
i64 %3, i64 1)
ret <vscale x 1 x i1> %a
}
@ -2373,7 +2373,7 @@ entry:
<vscale x 2 x i64> %1,
i64 9,
<vscale x 2 x i1> %2,
i64 %3)
i64 %3, i64 1)
ret <vscale x 2 x i1> %a
}
@ -2408,7 +2408,7 @@ entry:
<vscale x 4 x i64> %1,
i64 9,
<vscale x 4 x i1> %2,
i64 %3)
i64 %3, i64 1)
ret <vscale x 4 x i1> %a
}

View File

@ -26,7 +26,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i8(
<vscale x 1 x i8>,
<vscale x 1 x i8>,
<vscale x 1 x i1>,
i32);
i32, i32);
define <vscale x 1 x i1> @intrinsic_vmsge_mask_vv_nxv1i8_nxv1i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i8> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv1i8_nxv1i8:
@ -48,7 +48,7 @@ entry:
<vscale x 1 x i8> %2,
<vscale x 1 x i8> %3,
<vscale x 1 x i1> %mask,
i32 %4)
i32 %4, i32 1)
ret <vscale x 1 x i1> %a
}
@ -78,7 +78,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsge.mask.nxv2i8(
<vscale x 2 x i8>,
<vscale x 2 x i8>,
<vscale x 2 x i1>,
i32);
i32, i32);
define <vscale x 2 x i1> @intrinsic_vmsge_mask_vv_nxv2i8_nxv2i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i8> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv2i8_nxv2i8:
@ -100,7 +100,7 @@ entry:
<vscale x 2 x i8> %2,
<vscale x 2 x i8> %3,
<vscale x 2 x i1> %mask,
i32 %4)
i32 %4, i32 1)
ret <vscale x 2 x i1> %a
}
@ -130,7 +130,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsge.mask.nxv4i8(
<vscale x 4 x i8>,
<vscale x 4 x i8>,
<vscale x 4 x i1>,
i32);
i32, i32);
define <vscale x 4 x i1> @intrinsic_vmsge_mask_vv_nxv4i8_nxv4i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i8> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv4i8_nxv4i8:
@ -152,7 +152,7 @@ entry:
<vscale x 4 x i8> %2,
<vscale x 4 x i8> %3,
<vscale x 4 x i1> %mask,
i32 %4)
i32 %4, i32 1)
ret <vscale x 4 x i1> %a
}
@ -182,7 +182,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsge.mask.nxv8i8(
<vscale x 8 x i8>,
<vscale x 8 x i8>,
<vscale x 8 x i1>,
i32);
i32, i32);
define <vscale x 8 x i1> @intrinsic_vmsge_mask_vv_nxv8i8_nxv8i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i8> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv8i8_nxv8i8:
@ -204,7 +204,7 @@ entry:
<vscale x 8 x i8> %2,
<vscale x 8 x i8> %3,
<vscale x 8 x i1> %mask,
i32 %4)
i32 %4, i32 1)
ret <vscale x 8 x i1> %a
}
@ -234,7 +234,7 @@ declare <vscale x 16 x i1> @llvm.riscv.vmsge.mask.nxv16i8(
<vscale x 16 x i8>,
<vscale x 16 x i8>,
<vscale x 16 x i1>,
i32);
i32, i32);
define <vscale x 16 x i1> @intrinsic_vmsge_mask_vv_nxv16i8_nxv16i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i8> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv16i8_nxv16i8:
@ -256,7 +256,7 @@ entry:
<vscale x 16 x i8> %2,
<vscale x 16 x i8> %3,
<vscale x 16 x i1> %mask,
i32 %4)
i32 %4, i32 1)
ret <vscale x 16 x i1> %a
}
@ -286,7 +286,7 @@ declare <vscale x 32 x i1> @llvm.riscv.vmsge.mask.nxv32i8(
<vscale x 32 x i8>,
<vscale x 32 x i8>,
<vscale x 32 x i1>,
i32);
i32, i32);
define <vscale x 32 x i1> @intrinsic_vmsge_mask_vv_nxv32i8_nxv32i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i8> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv32i8_nxv32i8:
@ -308,7 +308,7 @@ entry:
<vscale x 32 x i8> %2,
<vscale x 32 x i8> %3,
<vscale x 32 x i1> %mask,
i32 %4)
i32 %4, i32 1)
ret <vscale x 32 x i1> %a
}
@ -338,7 +338,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i16(
<vscale x 1 x i16>,
<vscale x 1 x i16>,
<vscale x 1 x i1>,
i32);
i32, i32);
define <vscale x 1 x i1> @intrinsic_vmsge_mask_vv_nxv1i16_nxv1i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i16> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv1i16_nxv1i16:
@ -360,7 +360,7 @@ entry:
<vscale x 1 x i16> %2,
<vscale x 1 x i16> %3,
<vscale x 1 x i1> %mask,
i32 %4)
i32 %4, i32 1)
ret <vscale x 1 x i1> %a
}
@ -390,7 +390,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsge.mask.nxv2i16(
<vscale x 2 x i16>,
<vscale x 2 x i16>,
<vscale x 2 x i1>,
i32);
i32, i32);
define <vscale x 2 x i1> @intrinsic_vmsge_mask_vv_nxv2i16_nxv2i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i16> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv2i16_nxv2i16:
@ -412,7 +412,7 @@ entry:
<vscale x 2 x i16> %2,
<vscale x 2 x i16> %3,
<vscale x 2 x i1> %mask,
i32 %4)
i32 %4, i32 1)
ret <vscale x 2 x i1> %a
}
@ -442,7 +442,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsge.mask.nxv4i16(
<vscale x 4 x i16>,
<vscale x 4 x i16>,
<vscale x 4 x i1>,
i32);
i32, i32);
define <vscale x 4 x i1> @intrinsic_vmsge_mask_vv_nxv4i16_nxv4i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i16> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv4i16_nxv4i16:
@ -464,7 +464,7 @@ entry:
<vscale x 4 x i16> %2,
<vscale x 4 x i16> %3,
<vscale x 4 x i1> %mask,
i32 %4)
i32 %4, i32 1)
ret <vscale x 4 x i1> %a
}
@ -494,7 +494,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsge.mask.nxv8i16(
<vscale x 8 x i16>,
<vscale x 8 x i16>,
<vscale x 8 x i1>,
i32);
i32, i32);
define <vscale x 8 x i1> @intrinsic_vmsge_mask_vv_nxv8i16_nxv8i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i16> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv8i16_nxv8i16:
@ -516,7 +516,7 @@ entry:
<vscale x 8 x i16> %2,
<vscale x 8 x i16> %3,
<vscale x 8 x i1> %mask,
i32 %4)
i32 %4, i32 1)
ret <vscale x 8 x i1> %a
}
@ -546,7 +546,7 @@ declare <vscale x 16 x i1> @llvm.riscv.vmsge.mask.nxv16i16(
<vscale x 16 x i16>,
<vscale x 16 x i16>,
<vscale x 16 x i1>,
i32);
i32, i32);
define <vscale x 16 x i1> @intrinsic_vmsge_mask_vv_nxv16i16_nxv16i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i16> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv16i16_nxv16i16:
@ -568,7 +568,7 @@ entry:
<vscale x 16 x i16> %2,
<vscale x 16 x i16> %3,
<vscale x 16 x i1> %mask,
i32 %4)
i32 %4, i32 1)
ret <vscale x 16 x i1> %a
}
@ -598,7 +598,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i32(
<vscale x 1 x i32>,
<vscale x 1 x i32>,
<vscale x 1 x i1>,
i32);
i32, i32);
define <vscale x 1 x i1> @intrinsic_vmsge_mask_vv_nxv1i32_nxv1i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i32> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv1i32_nxv1i32:
@ -620,7 +620,7 @@ entry:
<vscale x 1 x i32> %2,
<vscale x 1 x i32> %3,
<vscale x 1 x i1> %mask,
i32 %4)
i32 %4, i32 1)
ret <vscale x 1 x i1> %a
}
@ -650,7 +650,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsge.mask.nxv2i32(
<vscale x 2 x i32>,
<vscale x 2 x i32>,
<vscale x 2 x i1>,
i32);
i32, i32);
define <vscale x 2 x i1> @intrinsic_vmsge_mask_vv_nxv2i32_nxv2i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i32> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv2i32_nxv2i32:
@ -672,7 +672,7 @@ entry:
<vscale x 2 x i32> %2,
<vscale x 2 x i32> %3,
<vscale x 2 x i1> %mask,
i32 %4)
i32 %4, i32 1)
ret <vscale x 2 x i1> %a
}
@ -702,7 +702,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsge.mask.nxv4i32(
<vscale x 4 x i32>,
<vscale x 4 x i32>,
<vscale x 4 x i1>,
i32);
i32, i32);
define <vscale x 4 x i1> @intrinsic_vmsge_mask_vv_nxv4i32_nxv4i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i32> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv4i32_nxv4i32:
@ -724,7 +724,7 @@ entry:
<vscale x 4 x i32> %2,
<vscale x 4 x i32> %3,
<vscale x 4 x i1> %mask,
i32 %4)
i32 %4, i32 1)
ret <vscale x 4 x i1> %a
}
@ -754,7 +754,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsge.mask.nxv8i32(
<vscale x 8 x i32>,
<vscale x 8 x i32>,
<vscale x 8 x i1>,
i32);
i32, i32);
define <vscale x 8 x i1> @intrinsic_vmsge_mask_vv_nxv8i32_nxv8i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i32> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv8i32_nxv8i32:
@ -776,7 +776,7 @@ entry:
<vscale x 8 x i32> %2,
<vscale x 8 x i32> %3,
<vscale x 8 x i1> %mask,
i32 %4)
i32 %4, i32 1)
ret <vscale x 8 x i1> %a
}
@ -806,7 +806,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i64(
<vscale x 1 x i64>,
<vscale x 1 x i64>,
<vscale x 1 x i1>,
i32);
i32, i32);
define <vscale x 1 x i1> @intrinsic_vmsge_mask_vv_nxv1i64_nxv1i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i64> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv1i64_nxv1i64:
@ -828,7 +828,7 @@ entry:
<vscale x 1 x i64> %2,
<vscale x 1 x i64> %3,
<vscale x 1 x i1> %mask,
i32 %4)
i32 %4, i32 1)
ret <vscale x 1 x i1> %a
}
@ -858,7 +858,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsge.mask.nxv2i64(
<vscale x 2 x i64>,
<vscale x 2 x i64>,
<vscale x 2 x i1>,
i32);
i32, i32);
define <vscale x 2 x i1> @intrinsic_vmsge_mask_vv_nxv2i64_nxv2i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i64> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv2i64_nxv2i64:
@ -880,7 +880,7 @@ entry:
<vscale x 2 x i64> %2,
<vscale x 2 x i64> %3,
<vscale x 2 x i1> %mask,
i32 %4)
i32 %4, i32 1)
ret <vscale x 2 x i1> %a
}
@ -910,7 +910,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsge.mask.nxv4i64(
<vscale x 4 x i64>,
<vscale x 4 x i64>,
<vscale x 4 x i1>,
i32);
i32, i32);
define <vscale x 4 x i1> @intrinsic_vmsge_mask_vv_nxv4i64_nxv4i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i64> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv4i64_nxv4i64:
@ -932,7 +932,7 @@ entry:
<vscale x 4 x i64> %2,
<vscale x 4 x i64> %3,
<vscale x 4 x i1> %mask,
i32 %4)
i32 %4, i32 1)
ret <vscale x 4 x i1> %a
}
@ -963,7 +963,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i8.i8(
<vscale x 1 x i8>,
i8,
<vscale x 1 x i1>,
i32);
i32, i32);
define <vscale x 1 x i1> @intrinsic_vmsge_mask_vx_nxv1i8_i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv1i8_i8:
@ -980,7 +980,7 @@ entry:
<vscale x 1 x i8> %1,
i8 %2,
<vscale x 1 x i1> %3,
i32 %4)
i32 %4, i32 1)
ret <vscale x 1 x i1> %a
}
@ -1011,7 +1011,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsge.mask.nxv2i8.i8(
<vscale x 2 x i8>,
i8,
<vscale x 2 x i1>,
i32);
i32, i32);
define <vscale x 2 x i1> @intrinsic_vmsge_mask_vx_nxv2i8_i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv2i8_i8:
@ -1028,7 +1028,7 @@ entry:
<vscale x 2 x i8> %1,
i8 %2,
<vscale x 2 x i1> %3,
i32 %4)
i32 %4, i32 1)
ret <vscale x 2 x i1> %a
}
@ -1059,7 +1059,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsge.mask.nxv4i8.i8(
<vscale x 4 x i8>,
i8,
<vscale x 4 x i1>,
i32);
i32, i32);
define <vscale x 4 x i1> @intrinsic_vmsge_mask_vx_nxv4i8_i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv4i8_i8:
@ -1076,7 +1076,7 @@ entry:
<vscale x 4 x i8> %1,
i8 %2,
<vscale x 4 x i1> %3,
i32 %4)
i32 %4, i32 1)
ret <vscale x 4 x i1> %a
}
@ -1107,7 +1107,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsge.mask.nxv8i8.i8(
<vscale x 8 x i8>,
i8,
<vscale x 8 x i1>,
i32);
i32, i32);
define <vscale x 8 x i1> @intrinsic_vmsge_mask_vx_nxv8i8_i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv8i8_i8:
@ -1124,7 +1124,7 @@ entry:
<vscale x 8 x i8> %1,
i8 %2,
<vscale x 8 x i1> %3,
i32 %4)
i32 %4, i32 1)
ret <vscale x 8 x i1> %a
}
@ -1155,7 +1155,7 @@ declare <vscale x 16 x i1> @llvm.riscv.vmsge.mask.nxv16i8.i8(
<vscale x 16 x i8>,
i8,
<vscale x 16 x i1>,
i32);
i32, i32);
define <vscale x 16 x i1> @intrinsic_vmsge_mask_vx_nxv16i8_i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv16i8_i8:
@ -1172,7 +1172,7 @@ entry:
<vscale x 16 x i8> %1,
i8 %2,
<vscale x 16 x i1> %3,
i32 %4)
i32 %4, i32 1)
ret <vscale x 16 x i1> %a
}
@ -1203,7 +1203,7 @@ declare <vscale x 32 x i1> @llvm.riscv.vmsge.mask.nxv32i8.i8(
<vscale x 32 x i8>,
i8,
<vscale x 32 x i1>,
i32);
i32, i32);
define <vscale x 32 x i1> @intrinsic_vmsge_mask_vx_nxv32i8_i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv32i8_i8:
@ -1220,7 +1220,7 @@ entry:
<vscale x 32 x i8> %1,
i8 %2,
<vscale x 32 x i1> %3,
i32 %4)
i32 %4, i32 1)
ret <vscale x 32 x i1> %a
}
@ -1251,7 +1251,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i16.i16(
<vscale x 1 x i16>,
i16,
<vscale x 1 x i1>,
i32);
i32, i32);
define <vscale x 1 x i1> @intrinsic_vmsge_mask_vx_nxv1i16_i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv1i16_i16:
@ -1268,7 +1268,7 @@ entry:
<vscale x 1 x i16> %1,
i16 %2,
<vscale x 1 x i1> %3,
i32 %4)
i32 %4, i32 1)
ret <vscale x 1 x i1> %a
}
@ -1299,7 +1299,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsge.mask.nxv2i16.i16(
<vscale x 2 x i16>,
i16,
<vscale x 2 x i1>,
i32);
i32, i32);
define <vscale x 2 x i1> @intrinsic_vmsge_mask_vx_nxv2i16_i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv2i16_i16:
@ -1316,7 +1316,7 @@ entry:
<vscale x 2 x i16> %1,
i16 %2,
<vscale x 2 x i1> %3,
i32 %4)
i32 %4, i32 1)
ret <vscale x 2 x i1> %a
}
@ -1347,7 +1347,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsge.mask.nxv4i16.i16(
<vscale x 4 x i16>,
i16,
<vscale x 4 x i1>,
i32);
i32, i32);
define <vscale x 4 x i1> @intrinsic_vmsge_mask_vx_nxv4i16_i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv4i16_i16:
@ -1364,7 +1364,7 @@ entry:
<vscale x 4 x i16> %1,
i16 %2,
<vscale x 4 x i1> %3,
i32 %4)
i32 %4, i32 1)
ret <vscale x 4 x i1> %a
}
@ -1395,7 +1395,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsge.mask.nxv8i16.i16(
<vscale x 8 x i16>,
i16,
<vscale x 8 x i1>,
i32);
i32, i32);
define <vscale x 8 x i1> @intrinsic_vmsge_mask_vx_nxv8i16_i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv8i16_i16:
@ -1412,7 +1412,7 @@ entry:
<vscale x 8 x i16> %1,
i16 %2,
<vscale x 8 x i1> %3,
i32 %4)
i32 %4, i32 1)
ret <vscale x 8 x i1> %a
}
@ -1443,7 +1443,7 @@ declare <vscale x 16 x i1> @llvm.riscv.vmsge.mask.nxv16i16.i16(
<vscale x 16 x i16>,
i16,
<vscale x 16 x i1>,
i32);
i32, i32);
define <vscale x 16 x i1> @intrinsic_vmsge_mask_vx_nxv16i16_i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv16i16_i16:
@ -1460,7 +1460,7 @@ entry:
<vscale x 16 x i16> %1,
i16 %2,
<vscale x 16 x i1> %3,
i32 %4)
i32 %4, i32 1)
ret <vscale x 16 x i1> %a
}
@ -1491,7 +1491,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i32.i32(
<vscale x 1 x i32>,
i32,
<vscale x 1 x i1>,
i32);
i32, i32);
define <vscale x 1 x i1> @intrinsic_vmsge_mask_vx_nxv1i32_i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv1i32_i32:
@ -1508,7 +1508,7 @@ entry:
<vscale x 1 x i32> %1,
i32 %2,
<vscale x 1 x i1> %3,
i32 %4)
i32 %4, i32 1)
ret <vscale x 1 x i1> %a
}
@ -1539,7 +1539,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsge.mask.nxv2i32.i32(
<vscale x 2 x i32>,
i32,
<vscale x 2 x i1>,
i32);
i32, i32);
define <vscale x 2 x i1> @intrinsic_vmsge_mask_vx_nxv2i32_i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv2i32_i32:
@ -1556,7 +1556,7 @@ entry:
<vscale x 2 x i32> %1,
i32 %2,
<vscale x 2 x i1> %3,
i32 %4)
i32 %4, i32 1)
ret <vscale x 2 x i1> %a
}
@ -1587,7 +1587,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsge.mask.nxv4i32.i32(
<vscale x 4 x i32>,
i32,
<vscale x 4 x i1>,
i32);
i32, i32);
define <vscale x 4 x i1> @intrinsic_vmsge_mask_vx_nxv4i32_i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv4i32_i32:
@ -1604,7 +1604,7 @@ entry:
<vscale x 4 x i32> %1,
i32 %2,
<vscale x 4 x i1> %3,
i32 %4)
i32 %4, i32 1)
ret <vscale x 4 x i1> %a
}
@ -1635,7 +1635,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsge.mask.nxv8i32.i32(
<vscale x 8 x i32>,
i32,
<vscale x 8 x i1>,
i32);
i32, i32);
define <vscale x 8 x i1> @intrinsic_vmsge_mask_vx_nxv8i32_i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv8i32_i32:
@ -1652,7 +1652,7 @@ entry:
<vscale x 8 x i32> %1,
i32 %2,
<vscale x 8 x i1> %3,
i32 %4)
i32 %4, i32 1)
ret <vscale x 8 x i1> %a
}
@ -1688,7 +1688,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i64.i64(
<vscale x 1 x i64>,
i64,
<vscale x 1 x i1>,
i32);
i32, i32);
define <vscale x 1 x i1> @intrinsic_vmsge_mask_vx_nxv1i64_i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv1i64_i64:
@ -1711,7 +1711,7 @@ entry:
<vscale x 1 x i64> %1,
i64 %2,
<vscale x 1 x i1> %3,
i32 %4)
i32 %4, i32 1)
ret <vscale x 1 x i1> %a
}
@ -1747,7 +1747,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsge.mask.nxv2i64.i64(
<vscale x 2 x i64>,
i64,
<vscale x 2 x i1>,
i32);
i32, i32);
define <vscale x 2 x i1> @intrinsic_vmsge_mask_vx_nxv2i64_i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv2i64_i64:
@ -1770,7 +1770,7 @@ entry:
<vscale x 2 x i64> %1,
i64 %2,
<vscale x 2 x i1> %3,
i32 %4)
i32 %4, i32 1)
ret <vscale x 2 x i1> %a
}
@ -1806,7 +1806,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsge.mask.nxv4i64.i64(
<vscale x 4 x i64>,
i64,
<vscale x 4 x i1>,
i32);
i32, i32);
define <vscale x 4 x i1> @intrinsic_vmsge_mask_vx_nxv4i64_i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv4i64_i64:
@ -1829,7 +1829,7 @@ entry:
<vscale x 4 x i64> %1,
i64 %2,
<vscale x 4 x i1> %3,
i32 %4)
i32 %4, i32 1)
ret <vscale x 4 x i1> %a
}
@ -1864,7 +1864,7 @@ entry:
<vscale x 1 x i8> %1,
i8 -14,
<vscale x 1 x i1> %2,
i32 %3)
i32 %3, i32 1)
ret <vscale x 1 x i1> %a
}
@ -1899,7 +1899,7 @@ entry:
<vscale x 2 x i8> %1,
i8 -12,
<vscale x 2 x i1> %2,
i32 %3)
i32 %3, i32 1)
ret <vscale x 2 x i1> %a
}
@ -1934,7 +1934,7 @@ entry:
<vscale x 4 x i8> %1,
i8 -10,
<vscale x 4 x i1> %2,
i32 %3)
i32 %3, i32 1)
ret <vscale x 4 x i1> %a
}
@ -1969,7 +1969,7 @@ entry:
<vscale x 8 x i8> %1,
i8 -8,
<vscale x 8 x i1> %2,
i32 %3)
i32 %3, i32 1)
ret <vscale x 8 x i1> %a
}
@ -2004,7 +2004,7 @@ entry:
<vscale x 16 x i8> %1,
i8 -6,
<vscale x 16 x i1> %2,
i32 %3)
i32 %3, i32 1)
ret <vscale x 16 x i1> %a
}
@ -2039,7 +2039,7 @@ entry:
<vscale x 32 x i8> %1,
i8 -4,
<vscale x 32 x i1> %2,
i32 %3)
i32 %3, i32 1)
ret <vscale x 32 x i1> %a
}
@ -2074,7 +2074,7 @@ entry:
<vscale x 1 x i16> %1,
i16 -2,
<vscale x 1 x i1> %2,
i32 %3)
i32 %3, i32 1)
ret <vscale x 1 x i1> %a
}
@ -2109,7 +2109,7 @@ entry:
<vscale x 2 x i16> %1,
i16 0,
<vscale x 2 x i1> %2,
i32 %3)
i32 %3, i32 1)
ret <vscale x 2 x i1> %a
}
@ -2144,7 +2144,7 @@ entry:
<vscale x 4 x i16> %1,
i16 1,
<vscale x 4 x i1> %2,
i32 %3)
i32 %3, i32 1)
ret <vscale x 4 x i1> %a
}
@ -2179,7 +2179,7 @@ entry:
<vscale x 8 x i16> %1,
i16 3,
<vscale x 8 x i1> %2,
i32 %3)
i32 %3, i32 1)
ret <vscale x 8 x i1> %a
}
@ -2214,7 +2214,7 @@ entry:
<vscale x 16 x i16> %1,
i16 5,
<vscale x 16 x i1> %2,
i32 %3)
i32 %3, i32 1)
ret <vscale x 16 x i1> %a
}
@ -2249,7 +2249,7 @@ entry:
<vscale x 1 x i32> %1,
i32 7,
<vscale x 1 x i1> %2,
i32 %3)
i32 %3, i32 1)
ret <vscale x 1 x i1> %a
}
@ -2284,7 +2284,7 @@ entry:
<vscale x 2 x i32> %1,
i32 9,
<vscale x 2 x i1> %2,
i32 %3)
i32 %3, i32 1)
ret <vscale x 2 x i1> %a
}
@ -2319,7 +2319,7 @@ entry:
<vscale x 4 x i32> %1,
i32 11,
<vscale x 4 x i1> %2,
i32 %3)
i32 %3, i32 1)
ret <vscale x 4 x i1> %a
}
@ -2354,7 +2354,7 @@ entry:
<vscale x 8 x i32> %1,
i32 13,
<vscale x 8 x i1> %2,
i32 %3)
i32 %3, i32 1)
ret <vscale x 8 x i1> %a
}
@ -2389,7 +2389,7 @@ entry:
<vscale x 1 x i64> %1,
i64 9,
<vscale x 1 x i1> %2,
i32 %3)
i32 %3, i32 1)
ret <vscale x 1 x i1> %a
}
@ -2424,7 +2424,7 @@ entry:
<vscale x 2 x i64> %1,
i64 9,
<vscale x 2 x i1> %2,
i32 %3)
i32 %3, i32 1)
ret <vscale x 2 x i1> %a
}
@ -2459,7 +2459,7 @@ entry:
<vscale x 4 x i64> %1,
i64 9,
<vscale x 4 x i1> %2,
i32 %3)
i32 %3, i32 1)
ret <vscale x 4 x i1> %a
}
@ -2478,7 +2478,7 @@ entry:
<vscale x 1 x i8> %1,
i8 %2,
<vscale x 1 x i1> %0,
i32 %3)
i32 %3, i32 1)
ret <vscale x 1 x i1> %a
}
@ -2496,7 +2496,7 @@ entry:
<vscale x 2 x i8> %1,
i8 %2,
<vscale x 2 x i1> %0,
i32 %3)
i32 %3, i32 1)
ret <vscale x 2 x i1> %a
}
@ -2514,7 +2514,7 @@ entry:
<vscale x 4 x i8> %1,
i8 %2,
<vscale x 4 x i1> %0,
i32 %3)
i32 %3, i32 1)
ret <vscale x 4 x i1> %a
}
@ -2532,7 +2532,7 @@ entry:
<vscale x 8 x i8> %1,
i8 %2,
<vscale x 8 x i1> %0,
i32 %3)
i32 %3, i32 1)
ret <vscale x 8 x i1> %a
}
@ -2550,7 +2550,7 @@ entry:
<vscale x 16 x i8> %1,
i8 %2,
<vscale x 16 x i1> %0,
i32 %3)
i32 %3, i32 1)
ret <vscale x 16 x i1> %a
}
@ -2568,7 +2568,7 @@ entry:
<vscale x 32 x i8> %1,
i8 %2,
<vscale x 32 x i1> %0,
i32 %3)
i32 %3, i32 1)
ret <vscale x 32 x i1> %a
}
@ -2586,7 +2586,7 @@ entry:
<vscale x 1 x i16> %1,
i16 %2,
<vscale x 1 x i1> %0,
i32 %3)
i32 %3, i32 1)
ret <vscale x 1 x i1> %a
}
@ -2604,7 +2604,7 @@ entry:
<vscale x 2 x i16> %1,
i16 %2,
<vscale x 2 x i1> %0,
i32 %3)
i32 %3, i32 1)
ret <vscale x 2 x i1> %a
}
@ -2622,7 +2622,7 @@ entry:
<vscale x 4 x i16> %1,
i16 %2,
<vscale x 4 x i1> %0,
i32 %3)
i32 %3, i32 1)
ret <vscale x 4 x i1> %a
}
@ -2640,7 +2640,7 @@ entry:
<vscale x 8 x i16> %1,
i16 %2,
<vscale x 8 x i1> %0,
i32 %3)
i32 %3, i32 1)
ret <vscale x 8 x i1> %a
}
@ -2658,7 +2658,7 @@ entry:
<vscale x 16 x i16> %1,
i16 %2,
<vscale x 16 x i1> %0,
i32 %3)
i32 %3, i32 1)
ret <vscale x 16 x i1> %a
}
@ -2676,7 +2676,7 @@ entry:
<vscale x 1 x i32> %1,
i32 %2,
<vscale x 1 x i1> %0,
i32 %3)
i32 %3, i32 1)
ret <vscale x 1 x i1> %a
}
@ -2694,7 +2694,7 @@ entry:
<vscale x 2 x i32> %1,
i32 %2,
<vscale x 2 x i1> %0,
i32 %3)
i32 %3, i32 1)
ret <vscale x 2 x i1> %a
}
@ -2712,7 +2712,7 @@ entry:
<vscale x 4 x i32> %1,
i32 %2,
<vscale x 4 x i1> %0,
i32 %3)
i32 %3, i32 1)
ret <vscale x 4 x i1> %a
}
@ -2730,7 +2730,7 @@ entry:
<vscale x 8 x i32> %1,
i32 %2,
<vscale x 8 x i1> %0,
i32 %3)
i32 %3, i32 1)
ret <vscale x 8 x i1> %a
}
@ -2753,7 +2753,7 @@ entry:
<vscale x 1 x i64> %1,
i64 %2,
<vscale x 1 x i1> %0,
i32 %3)
i32 %3, i32 1)
ret <vscale x 1 x i1> %a
}
@ -2778,7 +2778,7 @@ entry:
<vscale x 2 x i64> %1,
i64 %2,
<vscale x 2 x i1> %0,
i32 %3)
i32 %3, i32 1)
ret <vscale x 2 x i1> %a
}
@ -2803,7 +2803,7 @@ entry:
<vscale x 4 x i64> %1,
i64 %2,
<vscale x 4 x i1> %0,
i32 %3)
i32 %3, i32 1)
ret <vscale x 4 x i1> %a
}

View File

@ -26,7 +26,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i8(
<vscale x 1 x i8>,
<vscale x 1 x i8>,
<vscale x 1 x i1>,
i64);
i64, i64);
define <vscale x 1 x i1> @intrinsic_vmsge_mask_vv_nxv1i8_nxv1i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i8> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv1i8_nxv1i8:
@ -48,7 +48,7 @@ entry:
<vscale x 1 x i8> %2,
<vscale x 1 x i8> %3,
<vscale x 1 x i1> %mask,
i64 %4)
i64 %4, i64 1)
ret <vscale x 1 x i1> %a
}
@ -78,7 +78,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsge.mask.nxv2i8(
<vscale x 2 x i8>,
<vscale x 2 x i8>,
<vscale x 2 x i1>,
i64);
i64, i64);
define <vscale x 2 x i1> @intrinsic_vmsge_mask_vv_nxv2i8_nxv2i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i8> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv2i8_nxv2i8:
@ -100,7 +100,7 @@ entry:
<vscale x 2 x i8> %2,
<vscale x 2 x i8> %3,
<vscale x 2 x i1> %mask,
i64 %4)
i64 %4, i64 1)
ret <vscale x 2 x i1> %a
}
@ -130,7 +130,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsge.mask.nxv4i8(
<vscale x 4 x i8>,
<vscale x 4 x i8>,
<vscale x 4 x i1>,
i64);
i64, i64);
define <vscale x 4 x i1> @intrinsic_vmsge_mask_vv_nxv4i8_nxv4i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i8> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv4i8_nxv4i8:
@ -152,7 +152,7 @@ entry:
<vscale x 4 x i8> %2,
<vscale x 4 x i8> %3,
<vscale x 4 x i1> %mask,
i64 %4)
i64 %4, i64 1)
ret <vscale x 4 x i1> %a
}
@ -182,7 +182,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsge.mask.nxv8i8(
<vscale x 8 x i8>,
<vscale x 8 x i8>,
<vscale x 8 x i1>,
i64);
i64, i64);
define <vscale x 8 x i1> @intrinsic_vmsge_mask_vv_nxv8i8_nxv8i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i8> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv8i8_nxv8i8:
@ -204,7 +204,7 @@ entry:
<vscale x 8 x i8> %2,
<vscale x 8 x i8> %3,
<vscale x 8 x i1> %mask,
i64 %4)
i64 %4, i64 1)
ret <vscale x 8 x i1> %a
}
@ -234,7 +234,7 @@ declare <vscale x 16 x i1> @llvm.riscv.vmsge.mask.nxv16i8(
<vscale x 16 x i8>,
<vscale x 16 x i8>,
<vscale x 16 x i1>,
i64);
i64, i64);
define <vscale x 16 x i1> @intrinsic_vmsge_mask_vv_nxv16i8_nxv16i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i8> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv16i8_nxv16i8:
@ -256,7 +256,7 @@ entry:
<vscale x 16 x i8> %2,
<vscale x 16 x i8> %3,
<vscale x 16 x i1> %mask,
i64 %4)
i64 %4, i64 1)
ret <vscale x 16 x i1> %a
}
@ -286,7 +286,7 @@ declare <vscale x 32 x i1> @llvm.riscv.vmsge.mask.nxv32i8(
<vscale x 32 x i8>,
<vscale x 32 x i8>,
<vscale x 32 x i1>,
i64);
i64, i64);
define <vscale x 32 x i1> @intrinsic_vmsge_mask_vv_nxv32i8_nxv32i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i8> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv32i8_nxv32i8:
@ -308,7 +308,7 @@ entry:
<vscale x 32 x i8> %2,
<vscale x 32 x i8> %3,
<vscale x 32 x i1> %mask,
i64 %4)
i64 %4, i64 1)
ret <vscale x 32 x i1> %a
}
@ -338,7 +338,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i16(
<vscale x 1 x i16>,
<vscale x 1 x i16>,
<vscale x 1 x i1>,
i64);
i64, i64);
define <vscale x 1 x i1> @intrinsic_vmsge_mask_vv_nxv1i16_nxv1i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i16> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv1i16_nxv1i16:
@ -360,7 +360,7 @@ entry:
<vscale x 1 x i16> %2,
<vscale x 1 x i16> %3,
<vscale x 1 x i1> %mask,
i64 %4)
i64 %4, i64 1)
ret <vscale x 1 x i1> %a
}
@ -390,7 +390,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsge.mask.nxv2i16(
<vscale x 2 x i16>,
<vscale x 2 x i16>,
<vscale x 2 x i1>,
i64);
i64, i64);
define <vscale x 2 x i1> @intrinsic_vmsge_mask_vv_nxv2i16_nxv2i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i16> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv2i16_nxv2i16:
@ -412,7 +412,7 @@ entry:
<vscale x 2 x i16> %2,
<vscale x 2 x i16> %3,
<vscale x 2 x i1> %mask,
i64 %4)
i64 %4, i64 1)
ret <vscale x 2 x i1> %a
}
@ -442,7 +442,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsge.mask.nxv4i16(
<vscale x 4 x i16>,
<vscale x 4 x i16>,
<vscale x 4 x i1>,
i64);
i64, i64);
define <vscale x 4 x i1> @intrinsic_vmsge_mask_vv_nxv4i16_nxv4i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i16> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv4i16_nxv4i16:
@ -464,7 +464,7 @@ entry:
<vscale x 4 x i16> %2,
<vscale x 4 x i16> %3,
<vscale x 4 x i1> %mask,
i64 %4)
i64 %4, i64 1)
ret <vscale x 4 x i1> %a
}
@ -494,7 +494,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsge.mask.nxv8i16(
<vscale x 8 x i16>,
<vscale x 8 x i16>,
<vscale x 8 x i1>,
i64);
i64, i64);
define <vscale x 8 x i1> @intrinsic_vmsge_mask_vv_nxv8i16_nxv8i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i16> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv8i16_nxv8i16:
@ -516,7 +516,7 @@ entry:
<vscale x 8 x i16> %2,
<vscale x 8 x i16> %3,
<vscale x 8 x i1> %mask,
i64 %4)
i64 %4, i64 1)
ret <vscale x 8 x i1> %a
}
@ -546,7 +546,7 @@ declare <vscale x 16 x i1> @llvm.riscv.vmsge.mask.nxv16i16(
<vscale x 16 x i16>,
<vscale x 16 x i16>,
<vscale x 16 x i1>,
i64);
i64, i64);
define <vscale x 16 x i1> @intrinsic_vmsge_mask_vv_nxv16i16_nxv16i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i16> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv16i16_nxv16i16:
@ -568,7 +568,7 @@ entry:
<vscale x 16 x i16> %2,
<vscale x 16 x i16> %3,
<vscale x 16 x i1> %mask,
i64 %4)
i64 %4, i64 1)
ret <vscale x 16 x i1> %a
}
@ -598,7 +598,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i32(
<vscale x 1 x i32>,
<vscale x 1 x i32>,
<vscale x 1 x i1>,
i64);
i64, i64);
define <vscale x 1 x i1> @intrinsic_vmsge_mask_vv_nxv1i32_nxv1i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i32> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv1i32_nxv1i32:
@ -620,7 +620,7 @@ entry:
<vscale x 1 x i32> %2,
<vscale x 1 x i32> %3,
<vscale x 1 x i1> %mask,
i64 %4)
i64 %4, i64 1)
ret <vscale x 1 x i1> %a
}
@ -650,7 +650,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsge.mask.nxv2i32(
<vscale x 2 x i32>,
<vscale x 2 x i32>,
<vscale x 2 x i1>,
i64);
i64, i64);
define <vscale x 2 x i1> @intrinsic_vmsge_mask_vv_nxv2i32_nxv2i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i32> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv2i32_nxv2i32:
@ -672,7 +672,7 @@ entry:
<vscale x 2 x i32> %2,
<vscale x 2 x i32> %3,
<vscale x 2 x i1> %mask,
i64 %4)
i64 %4, i64 1)
ret <vscale x 2 x i1> %a
}
@ -702,7 +702,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsge.mask.nxv4i32(
<vscale x 4 x i32>,
<vscale x 4 x i32>,
<vscale x 4 x i1>,
i64);
i64, i64);
define <vscale x 4 x i1> @intrinsic_vmsge_mask_vv_nxv4i32_nxv4i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i32> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv4i32_nxv4i32:
@ -724,7 +724,7 @@ entry:
<vscale x 4 x i32> %2,
<vscale x 4 x i32> %3,
<vscale x 4 x i1> %mask,
i64 %4)
i64 %4, i64 1)
ret <vscale x 4 x i1> %a
}
@ -754,7 +754,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsge.mask.nxv8i32(
<vscale x 8 x i32>,
<vscale x 8 x i32>,
<vscale x 8 x i1>,
i64);
i64, i64);
define <vscale x 8 x i1> @intrinsic_vmsge_mask_vv_nxv8i32_nxv8i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i32> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv8i32_nxv8i32:
@ -776,7 +776,7 @@ entry:
<vscale x 8 x i32> %2,
<vscale x 8 x i32> %3,
<vscale x 8 x i1> %mask,
i64 %4)
i64 %4, i64 1)
ret <vscale x 8 x i1> %a
}
@ -806,7 +806,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i64(
<vscale x 1 x i64>,
<vscale x 1 x i64>,
<vscale x 1 x i1>,
i64);
i64, i64);
define <vscale x 1 x i1> @intrinsic_vmsge_mask_vv_nxv1i64_nxv1i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i64> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv1i64_nxv1i64:
@ -828,7 +828,7 @@ entry:
<vscale x 1 x i64> %2,
<vscale x 1 x i64> %3,
<vscale x 1 x i1> %mask,
i64 %4)
i64 %4, i64 1)
ret <vscale x 1 x i1> %a
}
@ -858,7 +858,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsge.mask.nxv2i64(
<vscale x 2 x i64>,
<vscale x 2 x i64>,
<vscale x 2 x i1>,
i64);
i64, i64);
define <vscale x 2 x i1> @intrinsic_vmsge_mask_vv_nxv2i64_nxv2i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i64> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv2i64_nxv2i64:
@ -880,7 +880,7 @@ entry:
<vscale x 2 x i64> %2,
<vscale x 2 x i64> %3,
<vscale x 2 x i1> %mask,
i64 %4)
i64 %4, i64 1)
ret <vscale x 2 x i1> %a
}
@ -910,7 +910,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsge.mask.nxv4i64(
<vscale x 4 x i64>,
<vscale x 4 x i64>,
<vscale x 4 x i1>,
i64);
i64, i64);
define <vscale x 4 x i1> @intrinsic_vmsge_mask_vv_nxv4i64_nxv4i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i64> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv4i64_nxv4i64:
@ -932,7 +932,7 @@ entry:
<vscale x 4 x i64> %2,
<vscale x 4 x i64> %3,
<vscale x 4 x i1> %mask,
i64 %4)
i64 %4, i64 1)
ret <vscale x 4 x i1> %a
}
@ -963,7 +963,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i8.i8(
<vscale x 1 x i8>,
i8,
<vscale x 1 x i1>,
i64);
i64, i64);
define <vscale x 1 x i1> @intrinsic_vmsge_mask_vx_nxv1i8_i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv1i8_i8:
@ -980,7 +980,7 @@ entry:
<vscale x 1 x i8> %1,
i8 %2,
<vscale x 1 x i1> %3,
i64 %4)
i64 %4, i64 1)
ret <vscale x 1 x i1> %a
}
@ -1011,7 +1011,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsge.mask.nxv2i8.i8(
<vscale x 2 x i8>,
i8,
<vscale x 2 x i1>,
i64);
i64, i64);
define <vscale x 2 x i1> @intrinsic_vmsge_mask_vx_nxv2i8_i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv2i8_i8:
@ -1028,7 +1028,7 @@ entry:
<vscale x 2 x i8> %1,
i8 %2,
<vscale x 2 x i1> %3,
i64 %4)
i64 %4, i64 1)
ret <vscale x 2 x i1> %a
}
@ -1059,7 +1059,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsge.mask.nxv4i8.i8(
<vscale x 4 x i8>,
i8,
<vscale x 4 x i1>,
i64);
i64, i64);
define <vscale x 4 x i1> @intrinsic_vmsge_mask_vx_nxv4i8_i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv4i8_i8:
@ -1076,7 +1076,7 @@ entry:
<vscale x 4 x i8> %1,
i8 %2,
<vscale x 4 x i1> %3,
i64 %4)
i64 %4, i64 1)
ret <vscale x 4 x i1> %a
}
@ -1107,7 +1107,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsge.mask.nxv8i8.i8(
<vscale x 8 x i8>,
i8,
<vscale x 8 x i1>,
i64);
i64, i64);
define <vscale x 8 x i1> @intrinsic_vmsge_mask_vx_nxv8i8_i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv8i8_i8:
@ -1124,7 +1124,7 @@ entry:
<vscale x 8 x i8> %1,
i8 %2,
<vscale x 8 x i1> %3,
i64 %4)
i64 %4, i64 1)
ret <vscale x 8 x i1> %a
}
@ -1155,7 +1155,7 @@ declare <vscale x 16 x i1> @llvm.riscv.vmsge.mask.nxv16i8.i8(
<vscale x 16 x i8>,
i8,
<vscale x 16 x i1>,
i64);
i64, i64);
define <vscale x 16 x i1> @intrinsic_vmsge_mask_vx_nxv16i8_i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv16i8_i8:
@ -1172,7 +1172,7 @@ entry:
<vscale x 16 x i8> %1,
i8 %2,
<vscale x 16 x i1> %3,
i64 %4)
i64 %4, i64 1)
ret <vscale x 16 x i1> %a
}
@ -1203,7 +1203,7 @@ declare <vscale x 32 x i1> @llvm.riscv.vmsge.mask.nxv32i8.i8(
<vscale x 32 x i8>,
i8,
<vscale x 32 x i1>,
i64);
i64, i64);
define <vscale x 32 x i1> @intrinsic_vmsge_mask_vx_nxv32i8_i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv32i8_i8:
@ -1220,7 +1220,7 @@ entry:
<vscale x 32 x i8> %1,
i8 %2,
<vscale x 32 x i1> %3,
i64 %4)
i64 %4, i64 1)
ret <vscale x 32 x i1> %a
}
@ -1251,7 +1251,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i16.i16(
<vscale x 1 x i16>,
i16,
<vscale x 1 x i1>,
i64);
i64, i64);
define <vscale x 1 x i1> @intrinsic_vmsge_mask_vx_nxv1i16_i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv1i16_i16:
@ -1268,7 +1268,7 @@ entry:
<vscale x 1 x i16> %1,
i16 %2,
<vscale x 1 x i1> %3,
i64 %4)
i64 %4, i64 1)
ret <vscale x 1 x i1> %a
}
@ -1299,7 +1299,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsge.mask.nxv2i16.i16(
<vscale x 2 x i16>,
i16,
<vscale x 2 x i1>,
i64);
i64, i64);
define <vscale x 2 x i1> @intrinsic_vmsge_mask_vx_nxv2i16_i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv2i16_i16:
@ -1316,7 +1316,7 @@ entry:
<vscale x 2 x i16> %1,
i16 %2,
<vscale x 2 x i1> %3,
i64 %4)
i64 %4, i64 1)
ret <vscale x 2 x i1> %a
}
@ -1347,7 +1347,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsge.mask.nxv4i16.i16(
<vscale x 4 x i16>,
i16,
<vscale x 4 x i1>,
i64);
i64, i64);
define <vscale x 4 x i1> @intrinsic_vmsge_mask_vx_nxv4i16_i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv4i16_i16:
@ -1364,7 +1364,7 @@ entry:
<vscale x 4 x i16> %1,
i16 %2,
<vscale x 4 x i1> %3,
i64 %4)
i64 %4, i64 1)
ret <vscale x 4 x i1> %a
}
@ -1395,7 +1395,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsge.mask.nxv8i16.i16(
<vscale x 8 x i16>,
i16,
<vscale x 8 x i1>,
i64);
i64, i64);
define <vscale x 8 x i1> @intrinsic_vmsge_mask_vx_nxv8i16_i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv8i16_i16:
@ -1412,7 +1412,7 @@ entry:
<vscale x 8 x i16> %1,
i16 %2,
<vscale x 8 x i1> %3,
i64 %4)
i64 %4, i64 1)
ret <vscale x 8 x i1> %a
}
@ -1443,7 +1443,7 @@ declare <vscale x 16 x i1> @llvm.riscv.vmsge.mask.nxv16i16.i16(
<vscale x 16 x i16>,
i16,
<vscale x 16 x i1>,
i64);
i64, i64);
define <vscale x 16 x i1> @intrinsic_vmsge_mask_vx_nxv16i16_i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv16i16_i16:
@ -1460,7 +1460,7 @@ entry:
<vscale x 16 x i16> %1,
i16 %2,
<vscale x 16 x i1> %3,
i64 %4)
i64 %4, i64 1)
ret <vscale x 16 x i1> %a
}
@ -1491,7 +1491,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i32.i32(
<vscale x 1 x i32>,
i32,
<vscale x 1 x i1>,
i64);
i64, i64);
define <vscale x 1 x i1> @intrinsic_vmsge_mask_vx_nxv1i32_i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv1i32_i32:
@ -1508,7 +1508,7 @@ entry:
<vscale x 1 x i32> %1,
i32 %2,
<vscale x 1 x i1> %3,
i64 %4)
i64 %4, i64 1)
ret <vscale x 1 x i1> %a
}
@ -1539,7 +1539,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsge.mask.nxv2i32.i32(
<vscale x 2 x i32>,
i32,
<vscale x 2 x i1>,
i64);
i64, i64);
define <vscale x 2 x i1> @intrinsic_vmsge_mask_vx_nxv2i32_i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv2i32_i32:
@ -1556,7 +1556,7 @@ entry:
<vscale x 2 x i32> %1,
i32 %2,
<vscale x 2 x i1> %3,
i64 %4)
i64 %4, i64 1)
ret <vscale x 2 x i1> %a
}
@ -1587,7 +1587,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsge.mask.nxv4i32.i32(
<vscale x 4 x i32>,
i32,
<vscale x 4 x i1>,
i64);
i64, i64);
define <vscale x 4 x i1> @intrinsic_vmsge_mask_vx_nxv4i32_i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv4i32_i32:
@ -1604,7 +1604,7 @@ entry:
<vscale x 4 x i32> %1,
i32 %2,
<vscale x 4 x i1> %3,
i64 %4)
i64 %4, i64 1)
ret <vscale x 4 x i1> %a
}
@ -1635,7 +1635,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsge.mask.nxv8i32.i32(
<vscale x 8 x i32>,
i32,
<vscale x 8 x i1>,
i64);
i64, i64);
define <vscale x 8 x i1> @intrinsic_vmsge_mask_vx_nxv8i32_i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv8i32_i32:
@ -1652,7 +1652,7 @@ entry:
<vscale x 8 x i32> %1,
i32 %2,
<vscale x 8 x i1> %3,
i64 %4)
i64 %4, i64 1)
ret <vscale x 8 x i1> %a
}
@ -1683,7 +1683,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i64.i64(
<vscale x 1 x i64>,
i64,
<vscale x 1 x i1>,
i64);
i64, i64);
define <vscale x 1 x i1> @intrinsic_vmsge_mask_vx_nxv1i64_i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv1i64_i64:
@ -1700,7 +1700,7 @@ entry:
<vscale x 1 x i64> %1,
i64 %2,
<vscale x 1 x i1> %3,
i64 %4)
i64 %4, i64 1)
ret <vscale x 1 x i1> %a
}
@ -1731,7 +1731,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsge.mask.nxv2i64.i64(
<vscale x 2 x i64>,
i64,
<vscale x 2 x i1>,
i64);
i64, i64);
define <vscale x 2 x i1> @intrinsic_vmsge_mask_vx_nxv2i64_i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv2i64_i64:
@ -1748,7 +1748,7 @@ entry:
<vscale x 2 x i64> %1,
i64 %2,
<vscale x 2 x i1> %3,
i64 %4)
i64 %4, i64 1)
ret <vscale x 2 x i1> %a
}
@ -1779,7 +1779,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsge.mask.nxv4i64.i64(
<vscale x 4 x i64>,
i64,
<vscale x 4 x i1>,
i64);
i64, i64);
define <vscale x 4 x i1> @intrinsic_vmsge_mask_vx_nxv4i64_i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv4i64_i64:
@ -1796,7 +1796,7 @@ entry:
<vscale x 4 x i64> %1,
i64 %2,
<vscale x 4 x i1> %3,
i64 %4)
i64 %4, i64 1)
ret <vscale x 4 x i1> %a
}
@ -1831,7 +1831,7 @@ entry:
<vscale x 1 x i8> %1,
i8 -14,
<vscale x 1 x i1> %2,
i64 %3)
i64 %3, i64 1)
ret <vscale x 1 x i1> %a
}
@ -1866,7 +1866,7 @@ entry:
<vscale x 2 x i8> %1,
i8 -12,
<vscale x 2 x i1> %2,
i64 %3)
i64 %3, i64 1)
ret <vscale x 2 x i1> %a
}
@ -1901,7 +1901,7 @@ entry:
<vscale x 4 x i8> %1,
i8 -10,
<vscale x 4 x i1> %2,
i64 %3)
i64 %3, i64 1)
ret <vscale x 4 x i1> %a
}
@ -1936,7 +1936,7 @@ entry:
<vscale x 8 x i8> %1,
i8 -8,
<vscale x 8 x i1> %2,
i64 %3)
i64 %3, i64 1)
ret <vscale x 8 x i1> %a
}
@ -1971,7 +1971,7 @@ entry:
<vscale x 16 x i8> %1,
i8 -6,
<vscale x 16 x i1> %2,
i64 %3)
i64 %3, i64 1)
ret <vscale x 16 x i1> %a
}
@ -2006,7 +2006,7 @@ entry:
<vscale x 32 x i8> %1,
i8 -4,
<vscale x 32 x i1> %2,
i64 %3)
i64 %3, i64 1)
ret <vscale x 32 x i1> %a
}
@ -2041,7 +2041,7 @@ entry:
<vscale x 1 x i16> %1,
i16 -2,
<vscale x 1 x i1> %2,
i64 %3)
i64 %3, i64 1)
ret <vscale x 1 x i1> %a
}
@ -2076,7 +2076,7 @@ entry:
<vscale x 2 x i16> %1,
i16 0,
<vscale x 2 x i1> %2,
i64 %3)
i64 %3, i64 1)
ret <vscale x 2 x i1> %a
}
@ -2111,7 +2111,7 @@ entry:
<vscale x 4 x i16> %1,
i16 1,
<vscale x 4 x i1> %2,
i64 %3)
i64 %3, i64 1)
ret <vscale x 4 x i1> %a
}
@ -2146,7 +2146,7 @@ entry:
<vscale x 8 x i16> %1,
i16 3,
<vscale x 8 x i1> %2,
i64 %3)
i64 %3, i64 1)
ret <vscale x 8 x i1> %a
}
@ -2181,7 +2181,7 @@ entry:
<vscale x 16 x i16> %1,
i16 5,
<vscale x 16 x i1> %2,
i64 %3)
i64 %3, i64 1)
ret <vscale x 16 x i1> %a
}
@ -2216,7 +2216,7 @@ entry:
<vscale x 1 x i32> %1,
i32 7,
<vscale x 1 x i1> %2,
i64 %3)
i64 %3, i64 1)
ret <vscale x 1 x i1> %a
}
@ -2251,7 +2251,7 @@ entry:
<vscale x 2 x i32> %1,
i32 9,
<vscale x 2 x i1> %2,
i64 %3)
i64 %3, i64 1)
ret <vscale x 2 x i1> %a
}
@ -2286,7 +2286,7 @@ entry:
<vscale x 4 x i32> %1,
i32 11,
<vscale x 4 x i1> %2,
i64 %3)
i64 %3, i64 1)
ret <vscale x 4 x i1> %a
}
@ -2321,7 +2321,7 @@ entry:
<vscale x 8 x i32> %1,
i32 13,
<vscale x 8 x i1> %2,
i64 %3)
i64 %3, i64 1)
ret <vscale x 8 x i1> %a
}
@ -2356,7 +2356,7 @@ entry:
<vscale x 1 x i64> %1,
i64 15,
<vscale x 1 x i1> %2,
i64 %3)
i64 %3, i64 1)
ret <vscale x 1 x i1> %a
}
@ -2391,7 +2391,7 @@ entry:
<vscale x 2 x i64> %1,
i64 -15,
<vscale x 2 x i1> %2,
i64 %3)
i64 %3, i64 1)
ret <vscale x 2 x i1> %a
}
@ -2426,7 +2426,7 @@ entry:
<vscale x 4 x i64> %1,
i64 -13,
<vscale x 4 x i1> %2,
i64 %3)
i64 %3, i64 1)
ret <vscale x 4 x i1> %a
}
@ -2445,7 +2445,7 @@ entry:
<vscale x 1 x i8> %1,
i8 %2,
<vscale x 1 x i1> %0,
i64 %3)
i64 %3, i64 1)
ret <vscale x 1 x i1> %a
}
@ -2463,7 +2463,7 @@ entry:
<vscale x 2 x i8> %1,
i8 %2,
<vscale x 2 x i1> %0,
i64 %3)
i64 %3, i64 1)
ret <vscale x 2 x i1> %a
}
@ -2481,7 +2481,7 @@ entry:
<vscale x 4 x i8> %1,
i8 %2,
<vscale x 4 x i1> %0,
i64 %3)
i64 %3, i64 1)
ret <vscale x 4 x i1> %a
}
@ -2499,7 +2499,7 @@ entry:
<vscale x 8 x i8> %1,
i8 %2,
<vscale x 8 x i1> %0,
i64 %3)
i64 %3, i64 1)
ret <vscale x 8 x i1> %a
}
@ -2517,7 +2517,7 @@ entry:
<vscale x 16 x i8> %1,
i8 %2,
<vscale x 16 x i1> %0,
i64 %3)
i64 %3, i64 1)
ret <vscale x 16 x i1> %a
}
@ -2535,7 +2535,7 @@ entry:
<vscale x 32 x i8> %1,
i8 %2,
<vscale x 32 x i1> %0,
i64 %3)
i64 %3, i64 1)
ret <vscale x 32 x i1> %a
}
@ -2553,7 +2553,7 @@ entry:
<vscale x 1 x i16> %1,
i16 %2,
<vscale x 1 x i1> %0,
i64 %3)
i64 %3, i64 1)
ret <vscale x 1 x i1> %a
}
@ -2571,7 +2571,7 @@ entry:
<vscale x 2 x i16> %1,
i16 %2,
<vscale x 2 x i1> %0,
i64 %3)
i64 %3, i64 1)
ret <vscale x 2 x i1> %a
}
@ -2589,7 +2589,7 @@ entry:
<vscale x 4 x i16> %1,
i16 %2,
<vscale x 4 x i1> %0,
i64 %3)
i64 %3, i64 1)
ret <vscale x 4 x i1> %a
}
@ -2607,7 +2607,7 @@ entry:
<vscale x 8 x i16> %1,
i16 %2,
<vscale x 8 x i1> %0,
i64 %3)
i64 %3, i64 1)
ret <vscale x 8 x i1> %a
}
@ -2625,7 +2625,7 @@ entry:
<vscale x 16 x i16> %1,
i16 %2,
<vscale x 16 x i1> %0,
i64 %3)
i64 %3, i64 1)
ret <vscale x 16 x i1> %a
}
@ -2643,7 +2643,7 @@ entry:
<vscale x 1 x i32> %1,
i32 %2,
<vscale x 1 x i1> %0,
i64 %3)
i64 %3, i64 1)
ret <vscale x 1 x i1> %a
}
@ -2661,7 +2661,7 @@ entry:
<vscale x 2 x i32> %1,
i32 %2,
<vscale x 2 x i1> %0,
i64 %3)
i64 %3, i64 1)
ret <vscale x 2 x i1> %a
}
@ -2679,7 +2679,7 @@ entry:
<vscale x 4 x i32> %1,
i32 %2,
<vscale x 4 x i1> %0,
i64 %3)
i64 %3, i64 1)
ret <vscale x 4 x i1> %a
}
@ -2697,7 +2697,7 @@ entry:
<vscale x 8 x i32> %1,
i32 %2,
<vscale x 8 x i1> %0,
i64 %3)
i64 %3, i64 1)
ret <vscale x 8 x i1> %a
}
@ -2715,7 +2715,7 @@ entry:
<vscale x 1 x i64> %1,
i64 %2,
<vscale x 1 x i1> %0,
i64 %3)
i64 %3, i64 1)
ret <vscale x 1 x i1> %a
}
@ -2733,7 +2733,7 @@ entry:
<vscale x 2 x i64> %1,
i64 %2,
<vscale x 2 x i1> %0,
i64 %3)
i64 %3, i64 1)
ret <vscale x 2 x i1> %a
}
@ -2751,7 +2751,7 @@ entry:
<vscale x 4 x i64> %1,
i64 %2,
<vscale x 4 x i1> %0,
i64 %3)
i64 %3, i64 1)
ret <vscale x 4 x i1> %a
}

View File

@ -26,7 +26,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsgeu.mask.nxv1i8(
<vscale x 1 x i8>,
<vscale x 1 x i8>,
<vscale x 1 x i1>,
i32);
i32, i32);
define <vscale x 1 x i1> @intrinsic_vmsgeu_mask_vv_nxv1i8_nxv1i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i8> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv1i8_nxv1i8:
@ -48,7 +48,7 @@ entry:
<vscale x 1 x i8> %2,
<vscale x 1 x i8> %3,
<vscale x 1 x i1> %mask,
i32 %4)
i32 %4, i32 1)
ret <vscale x 1 x i1> %a
}
@ -78,7 +78,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsgeu.mask.nxv2i8(
<vscale x 2 x i8>,
<vscale x 2 x i8>,
<vscale x 2 x i1>,
i32);
i32, i32);
define <vscale x 2 x i1> @intrinsic_vmsgeu_mask_vv_nxv2i8_nxv2i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i8> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv2i8_nxv2i8:
@ -100,7 +100,7 @@ entry:
<vscale x 2 x i8> %2,
<vscale x 2 x i8> %3,
<vscale x 2 x i1> %mask,
i32 %4)
i32 %4, i32 1)
ret <vscale x 2 x i1> %a
}
@ -130,7 +130,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsgeu.mask.nxv4i8(
<vscale x 4 x i8>,
<vscale x 4 x i8>,
<vscale x 4 x i1>,
i32);
i32, i32);
define <vscale x 4 x i1> @intrinsic_vmsgeu_mask_vv_nxv4i8_nxv4i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i8> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv4i8_nxv4i8:
@ -152,7 +152,7 @@ entry:
<vscale x 4 x i8> %2,
<vscale x 4 x i8> %3,
<vscale x 4 x i1> %mask,
i32 %4)
i32 %4, i32 1)
ret <vscale x 4 x i1> %a
}
@ -182,7 +182,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsgeu.mask.nxv8i8(
<vscale x 8 x i8>,
<vscale x 8 x i8>,
<vscale x 8 x i1>,
i32);
i32, i32);
define <vscale x 8 x i1> @intrinsic_vmsgeu_mask_vv_nxv8i8_nxv8i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i8> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv8i8_nxv8i8:
@ -204,7 +204,7 @@ entry:
<vscale x 8 x i8> %2,
<vscale x 8 x i8> %3,
<vscale x 8 x i1> %mask,
i32 %4)
i32 %4, i32 1)
ret <vscale x 8 x i1> %a
}
@ -234,7 +234,7 @@ declare <vscale x 16 x i1> @llvm.riscv.vmsgeu.mask.nxv16i8(
<vscale x 16 x i8>,
<vscale x 16 x i8>,
<vscale x 16 x i1>,
i32);
i32, i32);
define <vscale x 16 x i1> @intrinsic_vmsgeu_mask_vv_nxv16i8_nxv16i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i8> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv16i8_nxv16i8:
@ -256,7 +256,7 @@ entry:
<vscale x 16 x i8> %2,
<vscale x 16 x i8> %3,
<vscale x 16 x i1> %mask,
i32 %4)
i32 %4, i32 1)
ret <vscale x 16 x i1> %a
}
@ -286,7 +286,7 @@ declare <vscale x 32 x i1> @llvm.riscv.vmsgeu.mask.nxv32i8(
<vscale x 32 x i8>,
<vscale x 32 x i8>,
<vscale x 32 x i1>,
i32);
i32, i32);
define <vscale x 32 x i1> @intrinsic_vmsgeu_mask_vv_nxv32i8_nxv32i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i8> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv32i8_nxv32i8:
@ -308,7 +308,7 @@ entry:
<vscale x 32 x i8> %2,
<vscale x 32 x i8> %3,
<vscale x 32 x i1> %mask,
i32 %4)
i32 %4, i32 1)
ret <vscale x 32 x i1> %a
}
@ -338,7 +338,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsgeu.mask.nxv1i16(
<vscale x 1 x i16>,
<vscale x 1 x i16>,
<vscale x 1 x i1>,
i32);
i32, i32);
define <vscale x 1 x i1> @intrinsic_vmsgeu_mask_vv_nxv1i16_nxv1i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i16> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv1i16_nxv1i16:
@ -360,7 +360,7 @@ entry:
<vscale x 1 x i16> %2,
<vscale x 1 x i16> %3,
<vscale x 1 x i1> %mask,
i32 %4)
i32 %4, i32 1)
ret <vscale x 1 x i1> %a
}
@ -390,7 +390,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsgeu.mask.nxv2i16(
<vscale x 2 x i16>,
<vscale x 2 x i16>,
<vscale x 2 x i1>,
i32);
i32, i32);
define <vscale x 2 x i1> @intrinsic_vmsgeu_mask_vv_nxv2i16_nxv2i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i16> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv2i16_nxv2i16:
@ -412,7 +412,7 @@ entry:
<vscale x 2 x i16> %2,
<vscale x 2 x i16> %3,
<vscale x 2 x i1> %mask,
i32 %4)
i32 %4, i32 1)
ret <vscale x 2 x i1> %a
}
@ -442,7 +442,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsgeu.mask.nxv4i16(
<vscale x 4 x i16>,
<vscale x 4 x i16>,
<vscale x 4 x i1>,
i32);
i32, i32);
define <vscale x 4 x i1> @intrinsic_vmsgeu_mask_vv_nxv4i16_nxv4i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i16> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv4i16_nxv4i16:
@ -464,7 +464,7 @@ entry:
<vscale x 4 x i16> %2,
<vscale x 4 x i16> %3,
<vscale x 4 x i1> %mask,
i32 %4)
i32 %4, i32 1)
ret <vscale x 4 x i1> %a
}
@ -494,7 +494,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsgeu.mask.nxv8i16(
<vscale x 8 x i16>,
<vscale x 8 x i16>,
<vscale x 8 x i1>,
i32);
i32, i32);
define <vscale x 8 x i1> @intrinsic_vmsgeu_mask_vv_nxv8i16_nxv8i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i16> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv8i16_nxv8i16:
@ -516,7 +516,7 @@ entry:
<vscale x 8 x i16> %2,
<vscale x 8 x i16> %3,
<vscale x 8 x i1> %mask,
i32 %4)
i32 %4, i32 1)
ret <vscale x 8 x i1> %a
}
@ -546,7 +546,7 @@ declare <vscale x 16 x i1> @llvm.riscv.vmsgeu.mask.nxv16i16(
<vscale x 16 x i16>,
<vscale x 16 x i16>,
<vscale x 16 x i1>,
i32);
i32, i32);
define <vscale x 16 x i1> @intrinsic_vmsgeu_mask_vv_nxv16i16_nxv16i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i16> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv16i16_nxv16i16:
@ -568,7 +568,7 @@ entry:
<vscale x 16 x i16> %2,
<vscale x 16 x i16> %3,
<vscale x 16 x i1> %mask,
i32 %4)
i32 %4, i32 1)
ret <vscale x 16 x i1> %a
}
@ -598,7 +598,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsgeu.mask.nxv1i32(
<vscale x 1 x i32>,
<vscale x 1 x i32>,
<vscale x 1 x i1>,
i32);
i32, i32);
define <vscale x 1 x i1> @intrinsic_vmsgeu_mask_vv_nxv1i32_nxv1i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i32> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv1i32_nxv1i32:
@ -620,7 +620,7 @@ entry:
<vscale x 1 x i32> %2,
<vscale x 1 x i32> %3,
<vscale x 1 x i1> %mask,
i32 %4)
i32 %4, i32 1)
ret <vscale x 1 x i1> %a
}
@ -650,7 +650,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsgeu.mask.nxv2i32(
<vscale x 2 x i32>,
<vscale x 2 x i32>,
<vscale x 2 x i1>,
i32);
i32, i32);
define <vscale x 2 x i1> @intrinsic_vmsgeu_mask_vv_nxv2i32_nxv2i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i32> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv2i32_nxv2i32:
@ -672,7 +672,7 @@ entry:
<vscale x 2 x i32> %2,
<vscale x 2 x i32> %3,
<vscale x 2 x i1> %mask,
i32 %4)
i32 %4, i32 1)
ret <vscale x 2 x i1> %a
}
@ -702,7 +702,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsgeu.mask.nxv4i32(
<vscale x 4 x i32>,
<vscale x 4 x i32>,
<vscale x 4 x i1>,
i32);
i32, i32);
define <vscale x 4 x i1> @intrinsic_vmsgeu_mask_vv_nxv4i32_nxv4i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i32> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv4i32_nxv4i32:
@ -724,7 +724,7 @@ entry:
<vscale x 4 x i32> %2,
<vscale x 4 x i32> %3,
<vscale x 4 x i1> %mask,
i32 %4)
i32 %4, i32 1)
ret <vscale x 4 x i1> %a
}
@ -754,7 +754,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsgeu.mask.nxv8i32(
<vscale x 8 x i32>,
<vscale x 8 x i32>,
<vscale x 8 x i1>,
i32);
i32, i32);
define <vscale x 8 x i1> @intrinsic_vmsgeu_mask_vv_nxv8i32_nxv8i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i32> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv8i32_nxv8i32:
@ -776,7 +776,7 @@ entry:
<vscale x 8 x i32> %2,
<vscale x 8 x i32> %3,
<vscale x 8 x i1> %mask,
i32 %4)
i32 %4, i32 1)
ret <vscale x 8 x i1> %a
}
@ -806,7 +806,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsgeu.mask.nxv1i64(
<vscale x 1 x i64>,
<vscale x 1 x i64>,
<vscale x 1 x i1>,
i32);
i32, i32);
define <vscale x 1 x i1> @intrinsic_vmsgeu_mask_vv_nxv1i64_nxv1i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i64> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv1i64_nxv1i64:
@ -828,7 +828,7 @@ entry:
<vscale x 1 x i64> %2,
<vscale x 1 x i64> %3,
<vscale x 1 x i1> %mask,
i32 %4)
i32 %4, i32 1)
ret <vscale x 1 x i1> %a
}
@ -858,7 +858,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsgeu.mask.nxv2i64(
<vscale x 2 x i64>,
<vscale x 2 x i64>,
<vscale x 2 x i1>,
i32);
i32, i32);
define <vscale x 2 x i1> @intrinsic_vmsgeu_mask_vv_nxv2i64_nxv2i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i64> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv2i64_nxv2i64:
@ -880,7 +880,7 @@ entry:
<vscale x 2 x i64> %2,
<vscale x 2 x i64> %3,
<vscale x 2 x i1> %mask,
i32 %4)
i32 %4, i32 1)
ret <vscale x 2 x i1> %a
}
@ -910,7 +910,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsgeu.mask.nxv4i64(
<vscale x 4 x i64>,
<vscale x 4 x i64>,
<vscale x 4 x i1>,
i32);
i32, i32);
define <vscale x 4 x i1> @intrinsic_vmsgeu_mask_vv_nxv4i64_nxv4i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i64> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv4i64_nxv4i64:
@ -932,7 +932,7 @@ entry:
<vscale x 4 x i64> %2,
<vscale x 4 x i64> %3,
<vscale x 4 x i1> %mask,
i32 %4)
i32 %4, i32 1)
ret <vscale x 4 x i1> %a
}
@ -963,7 +963,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsgeu.mask.nxv1i8.i8(
<vscale x 1 x i8>,
i8,
<vscale x 1 x i1>,
i32);
i32, i32);
define <vscale x 1 x i1> @intrinsic_vmsgeu_mask_vx_nxv1i8_i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv1i8_i8:
@ -980,7 +980,7 @@ entry:
<vscale x 1 x i8> %1,
i8 %2,
<vscale x 1 x i1> %3,
i32 %4)
i32 %4, i32 1)
ret <vscale x 1 x i1> %a
}
@ -1011,7 +1011,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsgeu.mask.nxv2i8.i8(
<vscale x 2 x i8>,
i8,
<vscale x 2 x i1>,
i32);
i32, i32);
define <vscale x 2 x i1> @intrinsic_vmsgeu_mask_vx_nxv2i8_i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv2i8_i8:
@ -1028,7 +1028,7 @@ entry:
<vscale x 2 x i8> %1,
i8 %2,
<vscale x 2 x i1> %3,
i32 %4)
i32 %4, i32 1)
ret <vscale x 2 x i1> %a
}
@ -1059,7 +1059,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsgeu.mask.nxv4i8.i8(
<vscale x 4 x i8>,
i8,
<vscale x 4 x i1>,
i32);
i32, i32);
define <vscale x 4 x i1> @intrinsic_vmsgeu_mask_vx_nxv4i8_i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv4i8_i8:
@ -1076,7 +1076,7 @@ entry:
<vscale x 4 x i8> %1,
i8 %2,
<vscale x 4 x i1> %3,
i32 %4)
i32 %4, i32 1)
ret <vscale x 4 x i1> %a
}
@ -1107,7 +1107,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsgeu.mask.nxv8i8.i8(
<vscale x 8 x i8>,
i8,
<vscale x 8 x i1>,
i32);
i32, i32);
define <vscale x 8 x i1> @intrinsic_vmsgeu_mask_vx_nxv8i8_i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv8i8_i8:
@ -1124,7 +1124,7 @@ entry:
<vscale x 8 x i8> %1,
i8 %2,
<vscale x 8 x i1> %3,
i32 %4)
i32 %4, i32 1)
ret <vscale x 8 x i1> %a
}
@ -1155,7 +1155,7 @@ declare <vscale x 16 x i1> @llvm.riscv.vmsgeu.mask.nxv16i8.i8(
<vscale x 16 x i8>,
i8,
<vscale x 16 x i1>,
i32);
i32, i32);
define <vscale x 16 x i1> @intrinsic_vmsgeu_mask_vx_nxv16i8_i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv16i8_i8:
@ -1172,7 +1172,7 @@ entry:
<vscale x 16 x i8> %1,
i8 %2,
<vscale x 16 x i1> %3,
i32 %4)
i32 %4, i32 1)
ret <vscale x 16 x i1> %a
}
@ -1203,7 +1203,7 @@ declare <vscale x 32 x i1> @llvm.riscv.vmsgeu.mask.nxv32i8.i8(
<vscale x 32 x i8>,
i8,
<vscale x 32 x i1>,
i32);
i32, i32);
define <vscale x 32 x i1> @intrinsic_vmsgeu_mask_vx_nxv32i8_i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv32i8_i8:
@ -1220,7 +1220,7 @@ entry:
<vscale x 32 x i8> %1,
i8 %2,
<vscale x 32 x i1> %3,
i32 %4)
i32 %4, i32 1)
ret <vscale x 32 x i1> %a
}
@ -1251,7 +1251,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsgeu.mask.nxv1i16.i16(
<vscale x 1 x i16>,
i16,
<vscale x 1 x i1>,
i32);
i32, i32);
define <vscale x 1 x i1> @intrinsic_vmsgeu_mask_vx_nxv1i16_i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv1i16_i16:
@ -1268,7 +1268,7 @@ entry:
<vscale x 1 x i16> %1,
i16 %2,
<vscale x 1 x i1> %3,
i32 %4)
i32 %4, i32 1)
ret <vscale x 1 x i1> %a
}
@ -1299,7 +1299,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsgeu.mask.nxv2i16.i16(
<vscale x 2 x i16>,
i16,
<vscale x 2 x i1>,
i32);
i32, i32);
define <vscale x 2 x i1> @intrinsic_vmsgeu_mask_vx_nxv2i16_i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv2i16_i16:
@ -1316,7 +1316,7 @@ entry:
<vscale x 2 x i16> %1,
i16 %2,
<vscale x 2 x i1> %3,
i32 %4)
i32 %4, i32 1)
ret <vscale x 2 x i1> %a
}
@ -1347,7 +1347,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsgeu.mask.nxv4i16.i16(
<vscale x 4 x i16>,
i16,
<vscale x 4 x i1>,
i32);
i32, i32);
define <vscale x 4 x i1> @intrinsic_vmsgeu_mask_vx_nxv4i16_i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv4i16_i16:
@ -1364,7 +1364,7 @@ entry:
<vscale x 4 x i16> %1,
i16 %2,
<vscale x 4 x i1> %3,
i32 %4)
i32 %4, i32 1)
ret <vscale x 4 x i1> %a
}
@ -1395,7 +1395,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsgeu.mask.nxv8i16.i16(
<vscale x 8 x i16>,
i16,
<vscale x 8 x i1>,
i32);
i32, i32);
define <vscale x 8 x i1> @intrinsic_vmsgeu_mask_vx_nxv8i16_i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv8i16_i16:
@ -1412,7 +1412,7 @@ entry:
<vscale x 8 x i16> %1,
i16 %2,
<vscale x 8 x i1> %3,
i32 %4)
i32 %4, i32 1)
ret <vscale x 8 x i1> %a
}
@ -1443,7 +1443,7 @@ declare <vscale x 16 x i1> @llvm.riscv.vmsgeu.mask.nxv16i16.i16(
<vscale x 16 x i16>,
i16,
<vscale x 16 x i1>,
i32);
i32, i32);
define <vscale x 16 x i1> @intrinsic_vmsgeu_mask_vx_nxv16i16_i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv16i16_i16:
@ -1460,7 +1460,7 @@ entry:
<vscale x 16 x i16> %1,
i16 %2,
<vscale x 16 x i1> %3,
i32 %4)
i32 %4, i32 1)
ret <vscale x 16 x i1> %a
}
@ -1491,7 +1491,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsgeu.mask.nxv1i32.i32(
<vscale x 1 x i32>,
i32,
<vscale x 1 x i1>,
i32);
i32, i32);
define <vscale x 1 x i1> @intrinsic_vmsgeu_mask_vx_nxv1i32_i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv1i32_i32:
@ -1508,7 +1508,7 @@ entry:
<vscale x 1 x i32> %1,
i32 %2,
<vscale x 1 x i1> %3,
i32 %4)
i32 %4, i32 1)
ret <vscale x 1 x i1> %a
}
@ -1539,7 +1539,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsgeu.mask.nxv2i32.i32(
<vscale x 2 x i32>,
i32,
<vscale x 2 x i1>,
i32);
i32, i32);
define <vscale x 2 x i1> @intrinsic_vmsgeu_mask_vx_nxv2i32_i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv2i32_i32:
@ -1556,7 +1556,7 @@ entry:
<vscale x 2 x i32> %1,
i32 %2,
<vscale x 2 x i1> %3,
i32 %4)
i32 %4, i32 1)
ret <vscale x 2 x i1> %a
}
@ -1587,7 +1587,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsgeu.mask.nxv4i32.i32(
<vscale x 4 x i32>,
i32,
<vscale x 4 x i1>,
i32);
i32, i32);
define <vscale x 4 x i1> @intrinsic_vmsgeu_mask_vx_nxv4i32_i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv4i32_i32:
@ -1604,7 +1604,7 @@ entry:
<vscale x 4 x i32> %1,
i32 %2,
<vscale x 4 x i1> %3,
i32 %4)
i32 %4, i32 1)
ret <vscale x 4 x i1> %a
}
@ -1635,7 +1635,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsgeu.mask.nxv8i32.i32(
<vscale x 8 x i32>,
i32,
<vscale x 8 x i1>,
i32);
i32, i32);
define <vscale x 8 x i1> @intrinsic_vmsgeu_mask_vx_nxv8i32_i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv8i32_i32:
@ -1652,7 +1652,7 @@ entry:
<vscale x 8 x i32> %1,
i32 %2,
<vscale x 8 x i1> %3,
i32 %4)
i32 %4, i32 1)
ret <vscale x 8 x i1> %a
}
@ -1688,7 +1688,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsgeu.mask.nxv1i64.i64(
<vscale x 1 x i64>,
i64,
<vscale x 1 x i1>,
i32);
i32, i32);
define <vscale x 1 x i1> @intrinsic_vmsgeu_mask_vx_nxv1i64_i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv1i64_i64:
@ -1711,7 +1711,7 @@ entry:
<vscale x 1 x i64> %1,
i64 %2,
<vscale x 1 x i1> %3,
i32 %4)
i32 %4, i32 1)
ret <vscale x 1 x i1> %a
}
@ -1747,7 +1747,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsgeu.mask.nxv2i64.i64(
<vscale x 2 x i64>,
i64,
<vscale x 2 x i1>,
i32);
i32, i32);
define <vscale x 2 x i1> @intrinsic_vmsgeu_mask_vx_nxv2i64_i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv2i64_i64:
@ -1770,7 +1770,7 @@ entry:
<vscale x 2 x i64> %1,
i64 %2,
<vscale x 2 x i1> %3,
i32 %4)
i32 %4, i32 1)
ret <vscale x 2 x i1> %a
}
@ -1806,7 +1806,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsgeu.mask.nxv4i64.i64(
<vscale x 4 x i64>,
i64,
<vscale x 4 x i1>,
i32);
i32, i32);
define <vscale x 4 x i1> @intrinsic_vmsgeu_mask_vx_nxv4i64_i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv4i64_i64:
@ -1829,7 +1829,7 @@ entry:
<vscale x 4 x i64> %1,
i64 %2,
<vscale x 4 x i1> %3,
i32 %4)
i32 %4, i32 1)
ret <vscale x 4 x i1> %a
}
@ -1864,7 +1864,7 @@ entry:
<vscale x 1 x i8> %1,
i8 -14,
<vscale x 1 x i1> %2,
i32 %3)
i32 %3, i32 1)
ret <vscale x 1 x i1> %a
}
@ -1899,7 +1899,7 @@ entry:
<vscale x 2 x i8> %1,
i8 -12,
<vscale x 2 x i1> %2,
i32 %3)
i32 %3, i32 1)
ret <vscale x 2 x i1> %a
}
@ -1934,7 +1934,7 @@ entry:
<vscale x 4 x i8> %1,
i8 -10,
<vscale x 4 x i1> %2,
i32 %3)
i32 %3, i32 1)
ret <vscale x 4 x i1> %a
}
@ -1969,7 +1969,7 @@ entry:
<vscale x 8 x i8> %1,
i8 -8,
<vscale x 8 x i1> %2,
i32 %3)
i32 %3, i32 1)
ret <vscale x 8 x i1> %a
}
@ -2004,7 +2004,7 @@ entry:
<vscale x 16 x i8> %1,
i8 -6,
<vscale x 16 x i1> %2,
i32 %3)
i32 %3, i32 1)
ret <vscale x 16 x i1> %a
}
@ -2039,7 +2039,7 @@ entry:
<vscale x 32 x i8> %1,
i8 -4,
<vscale x 32 x i1> %2,
i32 %3)
i32 %3, i32 1)
ret <vscale x 32 x i1> %a
}
@ -2074,7 +2074,7 @@ entry:
<vscale x 1 x i16> %1,
i16 -2,
<vscale x 1 x i1> %2,
i32 %3)
i32 %3, i32 1)
ret <vscale x 1 x i1> %a
}
@ -2106,7 +2106,7 @@ entry:
<vscale x 2 x i16> %1,
i16 0,
<vscale x 2 x i1> %2,
i32 %3)
i32 %3, i32 1)
ret <vscale x 2 x i1> %a
}
@ -2121,7 +2121,7 @@ entry:
<vscale x 2 x i16> %1,
i16 0,
<vscale x 2 x i1> %0,
i32 %2)
i32 %2, i32 1)
ret <vscale x 2 x i1> %a
}
@ -2156,7 +2156,7 @@ entry:
<vscale x 4 x i16> %1,
i16 1,
<vscale x 4 x i1> %2,
i32 %3)
i32 %3, i32 1)
ret <vscale x 4 x i1> %a
}
@ -2191,7 +2191,7 @@ entry:
<vscale x 8 x i16> %1,
i16 3,
<vscale x 8 x i1> %2,
i32 %3)
i32 %3, i32 1)
ret <vscale x 8 x i1> %a
}
@ -2226,7 +2226,7 @@ entry:
<vscale x 16 x i16> %1,
i16 5,
<vscale x 16 x i1> %2,
i32 %3)
i32 %3, i32 1)
ret <vscale x 16 x i1> %a
}
@ -2261,7 +2261,7 @@ entry:
<vscale x 1 x i32> %1,
i32 7,
<vscale x 1 x i1> %2,
i32 %3)
i32 %3, i32 1)
ret <vscale x 1 x i1> %a
}
@ -2296,7 +2296,7 @@ entry:
<vscale x 2 x i32> %1,
i32 9,
<vscale x 2 x i1> %2,
i32 %3)
i32 %3, i32 1)
ret <vscale x 2 x i1> %a
}
@ -2331,7 +2331,7 @@ entry:
<vscale x 4 x i32> %1,
i32 11,
<vscale x 4 x i1> %2,
i32 %3)
i32 %3, i32 1)
ret <vscale x 4 x i1> %a
}
@ -2366,7 +2366,7 @@ entry:
<vscale x 8 x i32> %1,
i32 13,
<vscale x 8 x i1> %2,
i32 %3)
i32 %3, i32 1)
ret <vscale x 8 x i1> %a
}
@ -2401,7 +2401,7 @@ entry:
<vscale x 1 x i64> %1,
i64 15,
<vscale x 1 x i1> %2,
i32 %3)
i32 %3, i32 1)
ret <vscale x 1 x i1> %a
}
@ -2436,7 +2436,7 @@ entry:
<vscale x 2 x i64> %1,
i64 -15,
<vscale x 2 x i1> %2,
i32 %3)
i32 %3, i32 1)
ret <vscale x 2 x i1> %a
}
@ -2471,7 +2471,7 @@ entry:
<vscale x 4 x i64> %1,
i64 -13,
<vscale x 4 x i1> %2,
i32 %3)
i32 %3, i32 1)
ret <vscale x 4 x i1> %a
}
@ -2490,7 +2490,7 @@ entry:
<vscale x 1 x i8> %1,
i8 %2,
<vscale x 1 x i1> %0,
i32 %3)
i32 %3, i32 1)
ret <vscale x 1 x i1> %a
}
@ -2508,7 +2508,7 @@ entry:
<vscale x 2 x i8> %1,
i8 %2,
<vscale x 2 x i1> %0,
i32 %3)
i32 %3, i32 1)
ret <vscale x 2 x i1> %a
}
@ -2526,7 +2526,7 @@ entry:
<vscale x 4 x i8> %1,
i8 %2,
<vscale x 4 x i1> %0,
i32 %3)
i32 %3, i32 1)
ret <vscale x 4 x i1> %a
}
@ -2544,7 +2544,7 @@ entry:
<vscale x 8 x i8> %1,
i8 %2,
<vscale x 8 x i1> %0,
i32 %3)
i32 %3, i32 1)
ret <vscale x 8 x i1> %a
}
@ -2562,7 +2562,7 @@ entry:
<vscale x 16 x i8> %1,
i8 %2,
<vscale x 16 x i1> %0,
i32 %3)
i32 %3, i32 1)
ret <vscale x 16 x i1> %a
}
@ -2580,7 +2580,7 @@ entry:
<vscale x 32 x i8> %1,
i8 %2,
<vscale x 32 x i1> %0,
i32 %3)
i32 %3, i32 1)
ret <vscale x 32 x i1> %a
}
@ -2598,7 +2598,7 @@ entry:
<vscale x 1 x i16> %1,
i16 %2,
<vscale x 1 x i1> %0,
i32 %3)
i32 %3, i32 1)
ret <vscale x 1 x i1> %a
}
@ -2616,7 +2616,7 @@ entry:
<vscale x 2 x i16> %1,
i16 %2,
<vscale x 2 x i1> %0,
i32 %3)
i32 %3, i32 1)
ret <vscale x 2 x i1> %a
}
@ -2634,7 +2634,7 @@ entry:
<vscale x 4 x i16> %1,
i16 %2,
<vscale x 4 x i1> %0,
i32 %3)
i32 %3, i32 1)
ret <vscale x 4 x i1> %a
}
@ -2652,7 +2652,7 @@ entry:
<vscale x 8 x i16> %1,
i16 %2,
<vscale x 8 x i1> %0,
i32 %3)
i32 %3, i32 1)
ret <vscale x 8 x i1> %a
}
@ -2670,7 +2670,7 @@ entry:
<vscale x 16 x i16> %1,
i16 %2,
<vscale x 16 x i1> %0,
i32 %3)
i32 %3, i32 1)
ret <vscale x 16 x i1> %a
}
@ -2688,7 +2688,7 @@ entry:
<vscale x 1 x i32> %1,
i32 %2,
<vscale x 1 x i1> %0,
i32 %3)
i32 %3, i32 1)
ret <vscale x 1 x i1> %a
}
@ -2706,7 +2706,7 @@ entry:
<vscale x 2 x i32> %1,
i32 %2,
<vscale x 2 x i1> %0,
i32 %3)
i32 %3, i32 1)
ret <vscale x 2 x i1> %a
}
@ -2724,7 +2724,7 @@ entry:
<vscale x 4 x i32> %1,
i32 %2,
<vscale x 4 x i1> %0,
i32 %3)
i32 %3, i32 1)
ret <vscale x 4 x i1> %a
}
@ -2742,7 +2742,7 @@ entry:
<vscale x 8 x i32> %1,
i32 %2,
<vscale x 8 x i1> %0,
i32 %3)
i32 %3, i32 1)
ret <vscale x 8 x i1> %a
}
@ -2765,7 +2765,7 @@ entry:
<vscale x 1 x i64> %1,
i64 %2,
<vscale x 1 x i1> %0,
i32 %3)
i32 %3, i32 1)
ret <vscale x 1 x i1> %a
}
@ -2790,7 +2790,7 @@ entry:
<vscale x 2 x i64> %1,
i64 %2,
<vscale x 2 x i1> %0,
i32 %3)
i32 %3, i32 1)
ret <vscale x 2 x i1> %a
}
@ -2815,7 +2815,7 @@ entry:
<vscale x 4 x i64> %1,
i64 %2,
<vscale x 4 x i1> %0,
i32 %3)
i32 %3, i32 1)
ret <vscale x 4 x i1> %a
}

View File

@ -26,7 +26,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsgeu.mask.nxv1i8(
<vscale x 1 x i8>,
<vscale x 1 x i8>,
<vscale x 1 x i1>,
i64);
i64, i64);
define <vscale x 1 x i1> @intrinsic_vmsgeu_mask_vv_nxv1i8_nxv1i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i8> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv1i8_nxv1i8:
@ -48,7 +48,7 @@ entry:
<vscale x 1 x i8> %2,
<vscale x 1 x i8> %3,
<vscale x 1 x i1> %mask,
i64 %4)
i64 %4, i64 1)
ret <vscale x 1 x i1> %a
}
@ -78,7 +78,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsgeu.mask.nxv2i8(
<vscale x 2 x i8>,
<vscale x 2 x i8>,
<vscale x 2 x i1>,
i64);
i64, i64);
define <vscale x 2 x i1> @intrinsic_vmsgeu_mask_vv_nxv2i8_nxv2i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i8> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv2i8_nxv2i8:
@ -100,7 +100,7 @@ entry:
<vscale x 2 x i8> %2,
<vscale x 2 x i8> %3,
<vscale x 2 x i1> %mask,
i64 %4)
i64 %4, i64 1)
ret <vscale x 2 x i1> %a
}
@ -130,7 +130,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsgeu.mask.nxv4i8(
<vscale x 4 x i8>,
<vscale x 4 x i8>,
<vscale x 4 x i1>,
i64);
i64, i64);
define <vscale x 4 x i1> @intrinsic_vmsgeu_mask_vv_nxv4i8_nxv4i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i8> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv4i8_nxv4i8:
@ -152,7 +152,7 @@ entry:
<vscale x 4 x i8> %2,
<vscale x 4 x i8> %3,
<vscale x 4 x i1> %mask,
i64 %4)
i64 %4, i64 1)
ret <vscale x 4 x i1> %a
}
@ -182,7 +182,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsgeu.mask.nxv8i8(
<vscale x 8 x i8>,
<vscale x 8 x i8>,
<vscale x 8 x i1>,
i64);
i64, i64);
define <vscale x 8 x i1> @intrinsic_vmsgeu_mask_vv_nxv8i8_nxv8i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i8> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv8i8_nxv8i8:
@ -204,7 +204,7 @@ entry:
<vscale x 8 x i8> %2,
<vscale x 8 x i8> %3,
<vscale x 8 x i1> %mask,
i64 %4)
i64 %4, i64 1)
ret <vscale x 8 x i1> %a
}
@ -234,7 +234,7 @@ declare <vscale x 16 x i1> @llvm.riscv.vmsgeu.mask.nxv16i8(
<vscale x 16 x i8>,
<vscale x 16 x i8>,
<vscale x 16 x i1>,
i64);
i64, i64);
define <vscale x 16 x i1> @intrinsic_vmsgeu_mask_vv_nxv16i8_nxv16i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i8> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv16i8_nxv16i8:
@ -256,7 +256,7 @@ entry:
<vscale x 16 x i8> %2,
<vscale x 16 x i8> %3,
<vscale x 16 x i1> %mask,
i64 %4)
i64 %4, i64 1)
ret <vscale x 16 x i1> %a
}
@ -286,7 +286,7 @@ declare <vscale x 32 x i1> @llvm.riscv.vmsgeu.mask.nxv32i8(
<vscale x 32 x i8>,
<vscale x 32 x i8>,
<vscale x 32 x i1>,
i64);
i64, i64);
define <vscale x 32 x i1> @intrinsic_vmsgeu_mask_vv_nxv32i8_nxv32i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i8> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv32i8_nxv32i8:
@ -308,7 +308,7 @@ entry:
<vscale x 32 x i8> %2,
<vscale x 32 x i8> %3,
<vscale x 32 x i1> %mask,
i64 %4)
i64 %4, i64 1)
ret <vscale x 32 x i1> %a
}
@ -338,7 +338,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsgeu.mask.nxv1i16(
<vscale x 1 x i16>,
<vscale x 1 x i16>,
<vscale x 1 x i1>,
i64);
i64, i64);
define <vscale x 1 x i1> @intrinsic_vmsgeu_mask_vv_nxv1i16_nxv1i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i16> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv1i16_nxv1i16:
@ -360,7 +360,7 @@ entry:
<vscale x 1 x i16> %2,
<vscale x 1 x i16> %3,
<vscale x 1 x i1> %mask,
i64 %4)
i64 %4, i64 1)
ret <vscale x 1 x i1> %a
}
@ -390,7 +390,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsgeu.mask.nxv2i16(
<vscale x 2 x i16>,
<vscale x 2 x i16>,
<vscale x 2 x i1>,
i64);
i64, i64);
define <vscale x 2 x i1> @intrinsic_vmsgeu_mask_vv_nxv2i16_nxv2i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i16> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv2i16_nxv2i16:
@ -412,7 +412,7 @@ entry:
<vscale x 2 x i16> %2,
<vscale x 2 x i16> %3,
<vscale x 2 x i1> %mask,
i64 %4)
i64 %4, i64 1)
ret <vscale x 2 x i1> %a
}
@ -442,7 +442,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsgeu.mask.nxv4i16(
<vscale x 4 x i16>,
<vscale x 4 x i16>,
<vscale x 4 x i1>,
i64);
i64, i64);
define <vscale x 4 x i1> @intrinsic_vmsgeu_mask_vv_nxv4i16_nxv4i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i16> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv4i16_nxv4i16:
@ -464,7 +464,7 @@ entry:
<vscale x 4 x i16> %2,
<vscale x 4 x i16> %3,
<vscale x 4 x i1> %mask,
i64 %4)
i64 %4, i64 1)
ret <vscale x 4 x i1> %a
}
@ -494,7 +494,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsgeu.mask.nxv8i16(
<vscale x 8 x i16>,
<vscale x 8 x i16>,
<vscale x 8 x i1>,
i64);
i64, i64);
define <vscale x 8 x i1> @intrinsic_vmsgeu_mask_vv_nxv8i16_nxv8i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i16> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv8i16_nxv8i16:
@ -516,7 +516,7 @@ entry:
<vscale x 8 x i16> %2,
<vscale x 8 x i16> %3,
<vscale x 8 x i1> %mask,
i64 %4)
i64 %4, i64 1)
ret <vscale x 8 x i1> %a
}
@ -546,7 +546,7 @@ declare <vscale x 16 x i1> @llvm.riscv.vmsgeu.mask.nxv16i16(
<vscale x 16 x i16>,
<vscale x 16 x i16>,
<vscale x 16 x i1>,
i64);
i64, i64);
define <vscale x 16 x i1> @intrinsic_vmsgeu_mask_vv_nxv16i16_nxv16i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i16> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv16i16_nxv16i16:
@ -568,7 +568,7 @@ entry:
<vscale x 16 x i16> %2,
<vscale x 16 x i16> %3,
<vscale x 16 x i1> %mask,
i64 %4)
i64 %4, i64 1)
ret <vscale x 16 x i1> %a
}
@ -598,7 +598,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsgeu.mask.nxv1i32(
<vscale x 1 x i32>,
<vscale x 1 x i32>,
<vscale x 1 x i1>,
i64);
i64, i64);
define <vscale x 1 x i1> @intrinsic_vmsgeu_mask_vv_nxv1i32_nxv1i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i32> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv1i32_nxv1i32:
@ -620,7 +620,7 @@ entry:
<vscale x 1 x i32> %2,
<vscale x 1 x i32> %3,
<vscale x 1 x i1> %mask,
i64 %4)
i64 %4, i64 1)
ret <vscale x 1 x i1> %a
}
@ -650,7 +650,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsgeu.mask.nxv2i32(
<vscale x 2 x i32>,
<vscale x 2 x i32>,
<vscale x 2 x i1>,
i64);
i64, i64);
define <vscale x 2 x i1> @intrinsic_vmsgeu_mask_vv_nxv2i32_nxv2i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i32> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv2i32_nxv2i32:
@ -672,7 +672,7 @@ entry:
<vscale x 2 x i32> %2,
<vscale x 2 x i32> %3,
<vscale x 2 x i1> %mask,
i64 %4)
i64 %4, i64 1)
ret <vscale x 2 x i1> %a
}
@ -702,7 +702,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsgeu.mask.nxv4i32(
<vscale x 4 x i32>,
<vscale x 4 x i32>,
<vscale x 4 x i1>,
i64);
i64, i64);
define <vscale x 4 x i1> @intrinsic_vmsgeu_mask_vv_nxv4i32_nxv4i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i32> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv4i32_nxv4i32:
@ -724,7 +724,7 @@ entry:
<vscale x 4 x i32> %2,
<vscale x 4 x i32> %3,
<vscale x 4 x i1> %mask,
i64 %4)
i64 %4, i64 1)
ret <vscale x 4 x i1> %a
}
@ -754,7 +754,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsgeu.mask.nxv8i32(
<vscale x 8 x i32>,
<vscale x 8 x i32>,
<vscale x 8 x i1>,
i64);
i64, i64);
define <vscale x 8 x i1> @intrinsic_vmsgeu_mask_vv_nxv8i32_nxv8i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i32> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv8i32_nxv8i32:
@ -776,7 +776,7 @@ entry:
<vscale x 8 x i32> %2,
<vscale x 8 x i32> %3,
<vscale x 8 x i1> %mask,
i64 %4)
i64 %4, i64 1)
ret <vscale x 8 x i1> %a
}
@ -806,7 +806,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsgeu.mask.nxv1i64(
<vscale x 1 x i64>,
<vscale x 1 x i64>,
<vscale x 1 x i1>,
i64);
i64, i64);
define <vscale x 1 x i1> @intrinsic_vmsgeu_mask_vv_nxv1i64_nxv1i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i64> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv1i64_nxv1i64:
@ -828,7 +828,7 @@ entry:
<vscale x 1 x i64> %2,
<vscale x 1 x i64> %3,
<vscale x 1 x i1> %mask,
i64 %4)
i64 %4, i64 1)
ret <vscale x 1 x i1> %a
}
@ -858,7 +858,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsgeu.mask.nxv2i64(
<vscale x 2 x i64>,
<vscale x 2 x i64>,
<vscale x 2 x i1>,
i64);
i64, i64);
define <vscale x 2 x i1> @intrinsic_vmsgeu_mask_vv_nxv2i64_nxv2i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i64> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv2i64_nxv2i64:
@ -880,7 +880,7 @@ entry:
<vscale x 2 x i64> %2,
<vscale x 2 x i64> %3,
<vscale x 2 x i1> %mask,
i64 %4)
i64 %4, i64 1)
ret <vscale x 2 x i1> %a
}
@ -910,7 +910,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsgeu.mask.nxv4i64(
<vscale x 4 x i64>,
<vscale x 4 x i64>,
<vscale x 4 x i1>,
i64);
i64, i64);
define <vscale x 4 x i1> @intrinsic_vmsgeu_mask_vv_nxv4i64_nxv4i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i64> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv4i64_nxv4i64:
@ -932,7 +932,7 @@ entry:
<vscale x 4 x i64> %2,
<vscale x 4 x i64> %3,
<vscale x 4 x i1> %mask,
i64 %4)
i64 %4, i64 1)
ret <vscale x 4 x i1> %a
}
@ -963,7 +963,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsgeu.mask.nxv1i8.i8(
<vscale x 1 x i8>,
i8,
<vscale x 1 x i1>,
i64);
i64, i64);
define <vscale x 1 x i1> @intrinsic_vmsgeu_mask_vx_nxv1i8_i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv1i8_i8:
@ -980,7 +980,7 @@ entry:
<vscale x 1 x i8> %1,
i8 %2,
<vscale x 1 x i1> %3,
i64 %4)
i64 %4, i64 1)
ret <vscale x 1 x i1> %a
}
@ -1011,7 +1011,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsgeu.mask.nxv2i8.i8(
<vscale x 2 x i8>,
i8,
<vscale x 2 x i1>,
i64);
i64, i64);
define <vscale x 2 x i1> @intrinsic_vmsgeu_mask_vx_nxv2i8_i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv2i8_i8:
@ -1028,7 +1028,7 @@ entry:
<vscale x 2 x i8> %1,
i8 %2,
<vscale x 2 x i1> %3,
i64 %4)
i64 %4, i64 1)
ret <vscale x 2 x i1> %a
}
@ -1059,7 +1059,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsgeu.mask.nxv4i8.i8(
<vscale x 4 x i8>,
i8,
<vscale x 4 x i1>,
i64);
i64, i64);
define <vscale x 4 x i1> @intrinsic_vmsgeu_mask_vx_nxv4i8_i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv4i8_i8:
@ -1076,7 +1076,7 @@ entry:
<vscale x 4 x i8> %1,
i8 %2,
<vscale x 4 x i1> %3,
i64 %4)
i64 %4, i64 1)
ret <vscale x 4 x i1> %a
}
@ -1107,7 +1107,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsgeu.mask.nxv8i8.i8(
<vscale x 8 x i8>,
i8,
<vscale x 8 x i1>,
i64);
i64, i64);
define <vscale x 8 x i1> @intrinsic_vmsgeu_mask_vx_nxv8i8_i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv8i8_i8:
@ -1124,7 +1124,7 @@ entry:
<vscale x 8 x i8> %1,
i8 %2,
<vscale x 8 x i1> %3,
i64 %4)
i64 %4, i64 1)
ret <vscale x 8 x i1> %a
}
@ -1155,7 +1155,7 @@ declare <vscale x 16 x i1> @llvm.riscv.vmsgeu.mask.nxv16i8.i8(
<vscale x 16 x i8>,
i8,
<vscale x 16 x i1>,
i64);
i64, i64);
define <vscale x 16 x i1> @intrinsic_vmsgeu_mask_vx_nxv16i8_i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv16i8_i8:
@ -1172,7 +1172,7 @@ entry:
<vscale x 16 x i8> %1,
i8 %2,
<vscale x 16 x i1> %3,
i64 %4)
i64 %4, i64 1)
ret <vscale x 16 x i1> %a
}
@ -1203,7 +1203,7 @@ declare <vscale x 32 x i1> @llvm.riscv.vmsgeu.mask.nxv32i8.i8(
<vscale x 32 x i8>,
i8,
<vscale x 32 x i1>,
i64);
i64, i64);
define <vscale x 32 x i1> @intrinsic_vmsgeu_mask_vx_nxv32i8_i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv32i8_i8:
@ -1220,7 +1220,7 @@ entry:
<vscale x 32 x i8> %1,
i8 %2,
<vscale x 32 x i1> %3,
i64 %4)
i64 %4, i64 1)
ret <vscale x 32 x i1> %a
}
@ -1251,7 +1251,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsgeu.mask.nxv1i16.i16(
<vscale x 1 x i16>,
i16,
<vscale x 1 x i1>,
i64);
i64, i64);
define <vscale x 1 x i1> @intrinsic_vmsgeu_mask_vx_nxv1i16_i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv1i16_i16:
@ -1268,7 +1268,7 @@ entry:
<vscale x 1 x i16> %1,
i16 %2,
<vscale x 1 x i1> %3,
i64 %4)
i64 %4, i64 1)
ret <vscale x 1 x i1> %a
}
@ -1299,7 +1299,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsgeu.mask.nxv2i16.i16(
<vscale x 2 x i16>,
i16,
<vscale x 2 x i1>,
i64);
i64, i64);
define <vscale x 2 x i1> @intrinsic_vmsgeu_mask_vx_nxv2i16_i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv2i16_i16:
@ -1316,7 +1316,7 @@ entry:
<vscale x 2 x i16> %1,
i16 %2,
<vscale x 2 x i1> %3,
i64 %4)
i64 %4, i64 1)
ret <vscale x 2 x i1> %a
}
@ -1347,7 +1347,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsgeu.mask.nxv4i16.i16(
<vscale x 4 x i16>,
i16,
<vscale x 4 x i1>,
i64);
i64, i64);
define <vscale x 4 x i1> @intrinsic_vmsgeu_mask_vx_nxv4i16_i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv4i16_i16:
@ -1364,7 +1364,7 @@ entry:
<vscale x 4 x i16> %1,
i16 %2,
<vscale x 4 x i1> %3,
i64 %4)
i64 %4, i64 1)
ret <vscale x 4 x i1> %a
}
@ -1395,7 +1395,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsgeu.mask.nxv8i16.i16(
<vscale x 8 x i16>,
i16,
<vscale x 8 x i1>,
i64);
i64, i64);
define <vscale x 8 x i1> @intrinsic_vmsgeu_mask_vx_nxv8i16_i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv8i16_i16:
@ -1412,7 +1412,7 @@ entry:
<vscale x 8 x i16> %1,
i16 %2,
<vscale x 8 x i1> %3,
i64 %4)
i64 %4, i64 1)
ret <vscale x 8 x i1> %a
}
@ -1443,7 +1443,7 @@ declare <vscale x 16 x i1> @llvm.riscv.vmsgeu.mask.nxv16i16.i16(
<vscale x 16 x i16>,
i16,
<vscale x 16 x i1>,
i64);
i64, i64);
define <vscale x 16 x i1> @intrinsic_vmsgeu_mask_vx_nxv16i16_i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv16i16_i16:
@ -1460,7 +1460,7 @@ entry:
<vscale x 16 x i16> %1,
i16 %2,
<vscale x 16 x i1> %3,
i64 %4)
i64 %4, i64 1)
ret <vscale x 16 x i1> %a
}
@ -1491,7 +1491,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsgeu.mask.nxv1i32.i32(
<vscale x 1 x i32>,
i32,
<vscale x 1 x i1>,
i64);
i64, i64);
define <vscale x 1 x i1> @intrinsic_vmsgeu_mask_vx_nxv1i32_i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv1i32_i32:
@ -1508,7 +1508,7 @@ entry:
<vscale x 1 x i32> %1,
i32 %2,
<vscale x 1 x i1> %3,
i64 %4)
i64 %4, i64 1)
ret <vscale x 1 x i1> %a
}
@ -1539,7 +1539,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsgeu.mask.nxv2i32.i32(
<vscale x 2 x i32>,
i32,
<vscale x 2 x i1>,
i64);
i64, i64);
define <vscale x 2 x i1> @intrinsic_vmsgeu_mask_vx_nxv2i32_i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv2i32_i32:
@ -1556,7 +1556,7 @@ entry:
<vscale x 2 x i32> %1,
i32 %2,
<vscale x 2 x i1> %3,
i64 %4)
i64 %4, i64 1)
ret <vscale x 2 x i1> %a
}
@ -1587,7 +1587,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsgeu.mask.nxv4i32.i32(
<vscale x 4 x i32>,
i32,
<vscale x 4 x i1>,
i64);
i64, i64);
define <vscale x 4 x i1> @intrinsic_vmsgeu_mask_vx_nxv4i32_i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv4i32_i32:
@ -1604,7 +1604,7 @@ entry:
<vscale x 4 x i32> %1,
i32 %2,
<vscale x 4 x i1> %3,
i64 %4)
i64 %4, i64 1)
ret <vscale x 4 x i1> %a
}
@ -1635,7 +1635,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsgeu.mask.nxv8i32.i32(
<vscale x 8 x i32>,
i32,
<vscale x 8 x i1>,
i64);
i64, i64);
define <vscale x 8 x i1> @intrinsic_vmsgeu_mask_vx_nxv8i32_i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv8i32_i32:
@ -1652,7 +1652,7 @@ entry:
<vscale x 8 x i32> %1,
i32 %2,
<vscale x 8 x i1> %3,
i64 %4)
i64 %4, i64 1)
ret <vscale x 8 x i1> %a
}
@ -1683,7 +1683,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsgeu.mask.nxv1i64.i64(
<vscale x 1 x i64>,
i64,
<vscale x 1 x i1>,
i64);
i64, i64);
define <vscale x 1 x i1> @intrinsic_vmsgeu_mask_vx_nxv1i64_i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv1i64_i64:
@ -1700,7 +1700,7 @@ entry:
<vscale x 1 x i64> %1,
i64 %2,
<vscale x 1 x i1> %3,
i64 %4)
i64 %4, i64 1)
ret <vscale x 1 x i1> %a
}
@ -1731,7 +1731,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsgeu.mask.nxv2i64.i64(
<vscale x 2 x i64>,
i64,
<vscale x 2 x i1>,
i64);
i64, i64);
define <vscale x 2 x i1> @intrinsic_vmsgeu_mask_vx_nxv2i64_i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv2i64_i64:
@ -1748,7 +1748,7 @@ entry:
<vscale x 2 x i64> %1,
i64 %2,
<vscale x 2 x i1> %3,
i64 %4)
i64 %4, i64 1)
ret <vscale x 2 x i1> %a
}
@ -1779,7 +1779,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsgeu.mask.nxv4i64.i64(
<vscale x 4 x i64>,
i64,
<vscale x 4 x i1>,
i64);
i64, i64);
define <vscale x 4 x i1> @intrinsic_vmsgeu_mask_vx_nxv4i64_i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv4i64_i64:
@ -1796,7 +1796,7 @@ entry:
<vscale x 4 x i64> %1,
i64 %2,
<vscale x 4 x i1> %3,
i64 %4)
i64 %4, i64 1)
ret <vscale x 4 x i1> %a
}
@ -1831,7 +1831,7 @@ entry:
<vscale x 1 x i8> %1,
i8 -14,
<vscale x 1 x i1> %2,
i64 %3)
i64 %3, i64 1)
ret <vscale x 1 x i1> %a
}
@ -1866,7 +1866,7 @@ entry:
<vscale x 2 x i8> %1,
i8 -12,
<vscale x 2 x i1> %2,
i64 %3)
i64 %3, i64 1)
ret <vscale x 2 x i1> %a
}
@ -1901,7 +1901,7 @@ entry:
<vscale x 4 x i8> %1,
i8 -10,
<vscale x 4 x i1> %2,
i64 %3)
i64 %3, i64 1)
ret <vscale x 4 x i1> %a
}
@ -1936,7 +1936,7 @@ entry:
<vscale x 8 x i8> %1,
i8 -8,
<vscale x 8 x i1> %2,
i64 %3)
i64 %3, i64 1)
ret <vscale x 8 x i1> %a
}
@ -1971,7 +1971,7 @@ entry:
<vscale x 16 x i8> %1,
i8 -6,
<vscale x 16 x i1> %2,
i64 %3)
i64 %3, i64 1)
ret <vscale x 16 x i1> %a
}
@ -2006,7 +2006,7 @@ entry:
<vscale x 32 x i8> %1,
i8 -4,
<vscale x 32 x i1> %2,
i64 %3)
i64 %3, i64 1)
ret <vscale x 32 x i1> %a
}
@ -2041,7 +2041,7 @@ entry:
<vscale x 1 x i16> %1,
i16 -2,
<vscale x 1 x i1> %2,
i64 %3)
i64 %3, i64 1)
ret <vscale x 1 x i1> %a
}
@ -2073,7 +2073,7 @@ entry:
<vscale x 2 x i16> %1,
i16 0,
<vscale x 2 x i1> %2,
i64 %3)
i64 %3, i64 1)
ret <vscale x 2 x i1> %a
}
@ -2103,7 +2103,7 @@ entry:
<vscale x 2 x i16> %1,
i16 0,
<vscale x 2 x i1> %0,
i64 %2)
i64 %2, i64 1)
ret <vscale x 2 x i1> %a
}
@ -2123,7 +2123,7 @@ entry:
<vscale x 4 x i16> %1,
i16 1,
<vscale x 4 x i1> %2,
i64 %3)
i64 %3, i64 1)
ret <vscale x 4 x i1> %a
}
@ -2158,7 +2158,7 @@ entry:
<vscale x 8 x i16> %1,
i16 3,
<vscale x 8 x i1> %2,
i64 %3)
i64 %3, i64 1)
ret <vscale x 8 x i1> %a
}
@ -2193,7 +2193,7 @@ entry:
<vscale x 16 x i16> %1,
i16 5,
<vscale x 16 x i1> %2,
i64 %3)
i64 %3, i64 1)
ret <vscale x 16 x i1> %a
}
@ -2228,7 +2228,7 @@ entry:
<vscale x 1 x i32> %1,
i32 7,
<vscale x 1 x i1> %2,
i64 %3)
i64 %3, i64 1)
ret <vscale x 1 x i1> %a
}
@ -2263,7 +2263,7 @@ entry:
<vscale x 2 x i32> %1,
i32 9,
<vscale x 2 x i1> %2,
i64 %3)
i64 %3, i64 1)
ret <vscale x 2 x i1> %a
}
@ -2298,7 +2298,7 @@ entry:
<vscale x 4 x i32> %1,
i32 11,
<vscale x 4 x i1> %2,
i64 %3)
i64 %3, i64 1)
ret <vscale x 4 x i1> %a
}
@ -2333,7 +2333,7 @@ entry:
<vscale x 8 x i32> %1,
i32 13,
<vscale x 8 x i1> %2,
i64 %3)
i64 %3, i64 1)
ret <vscale x 8 x i1> %a
}
@ -2368,7 +2368,7 @@ entry:
<vscale x 1 x i64> %1,
i64 15,
<vscale x 1 x i1> %2,
i64 %3)
i64 %3, i64 1)
ret <vscale x 1 x i1> %a
}
@ -2403,7 +2403,7 @@ entry:
<vscale x 2 x i64> %1,
i64 -15,
<vscale x 2 x i1> %2,
i64 %3)
i64 %3, i64 1)
ret <vscale x 2 x i1> %a
}
@ -2438,7 +2438,7 @@ entry:
<vscale x 4 x i64> %1,
i64 -13,
<vscale x 4 x i1> %2,
i64 %3)
i64 %3, i64 1)
ret <vscale x 4 x i1> %a
}
@ -2457,7 +2457,7 @@ entry:
<vscale x 1 x i8> %1,
i8 %2,
<vscale x 1 x i1> %0,
i64 %3)
i64 %3, i64 1)
ret <vscale x 1 x i1> %a
}
@ -2475,7 +2475,7 @@ entry:
<vscale x 2 x i8> %1,
i8 %2,
<vscale x 2 x i1> %0,
i64 %3)
i64 %3, i64 1)
ret <vscale x 2 x i1> %a
}
@ -2493,7 +2493,7 @@ entry:
<vscale x 4 x i8> %1,
i8 %2,
<vscale x 4 x i1> %0,
i64 %3)
i64 %3, i64 1)
ret <vscale x 4 x i1> %a
}
@ -2511,7 +2511,7 @@ entry:
<vscale x 8 x i8> %1,
i8 %2,
<vscale x 8 x i1> %0,
i64 %3)
i64 %3, i64 1)
ret <vscale x 8 x i1> %a
}
@ -2529,7 +2529,7 @@ entry:
<vscale x 16 x i8> %1,
i8 %2,
<vscale x 16 x i1> %0,
i64 %3)
i64 %3, i64 1)
ret <vscale x 16 x i1> %a
}
@ -2547,7 +2547,7 @@ entry:
<vscale x 32 x i8> %1,
i8 %2,
<vscale x 32 x i1> %0,
i64 %3)
i64 %3, i64 1)
ret <vscale x 32 x i1> %a
}
@ -2565,7 +2565,7 @@ entry:
<vscale x 1 x i16> %1,
i16 %2,
<vscale x 1 x i1> %0,
i64 %3)
i64 %3, i64 1)
ret <vscale x 1 x i1> %a
}
@ -2583,7 +2583,7 @@ entry:
<vscale x 2 x i16> %1,
i16 %2,
<vscale x 2 x i1> %0,
i64 %3)
i64 %3, i64 1)
ret <vscale x 2 x i1> %a
}
@ -2601,7 +2601,7 @@ entry:
<vscale x 4 x i16> %1,
i16 %2,
<vscale x 4 x i1> %0,
i64 %3)
i64 %3, i64 1)
ret <vscale x 4 x i1> %a
}
@ -2619,7 +2619,7 @@ entry:
<vscale x 8 x i16> %1,
i16 %2,
<vscale x 8 x i1> %0,
i64 %3)
i64 %3, i64 1)
ret <vscale x 8 x i1> %a
}
@ -2637,7 +2637,7 @@ entry:
<vscale x 16 x i16> %1,
i16 %2,
<vscale x 16 x i1> %0,
i64 %3)
i64 %3, i64 1)
ret <vscale x 16 x i1> %a
}
@ -2655,7 +2655,7 @@ entry:
<vscale x 1 x i32> %1,
i32 %2,
<vscale x 1 x i1> %0,
i64 %3)
i64 %3, i64 1)
ret <vscale x 1 x i1> %a
}
@ -2673,7 +2673,7 @@ entry:
<vscale x 2 x i32> %1,
i32 %2,
<vscale x 2 x i1> %0,
i64 %3)
i64 %3, i64 1)
ret <vscale x 2 x i1> %a
}
@ -2691,7 +2691,7 @@ entry:
<vscale x 4 x i32> %1,
i32 %2,
<vscale x 4 x i1> %0,
i64 %3)
i64 %3, i64 1)
ret <vscale x 4 x i1> %a
}
@ -2709,7 +2709,7 @@ entry:
<vscale x 8 x i32> %1,
i32 %2,
<vscale x 8 x i1> %0,
i64 %3)
i64 %3, i64 1)
ret <vscale x 8 x i1> %a
}
@ -2727,7 +2727,7 @@ entry:
<vscale x 1 x i64> %1,
i64 %2,
<vscale x 1 x i1> %0,
i64 %3)
i64 %3, i64 1)
ret <vscale x 1 x i1> %a
}
@ -2745,7 +2745,7 @@ entry:
<vscale x 2 x i64> %1,
i64 %2,
<vscale x 2 x i1> %0,
i64 %3)
i64 %3, i64 1)
ret <vscale x 2 x i1> %a
}
@ -2763,7 +2763,7 @@ entry:
<vscale x 4 x i64> %1,
i64 %2,
<vscale x 4 x i1> %0,
i64 %3)
i64 %3, i64 1)
ret <vscale x 4 x i1> %a
}

View File

@ -26,7 +26,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsgt.mask.nxv1i8(
<vscale x 1 x i8>,
<vscale x 1 x i8>,
<vscale x 1 x i1>,
i32);
i32, i32);
define <vscale x 1 x i1> @intrinsic_vmsgt_mask_vv_nxv1i8_nxv1i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i8> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv1i8_nxv1i8:
@ -48,7 +48,7 @@ entry:
<vscale x 1 x i8> %2,
<vscale x 1 x i8> %3,
<vscale x 1 x i1> %mask,
i32 %4)
i32 %4, i32 1)
ret <vscale x 1 x i1> %a
}
@ -78,7 +78,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsgt.mask.nxv2i8(
<vscale x 2 x i8>,
<vscale x 2 x i8>,
<vscale x 2 x i1>,
i32);
i32, i32);
define <vscale x 2 x i1> @intrinsic_vmsgt_mask_vv_nxv2i8_nxv2i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i8> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv2i8_nxv2i8:
@ -100,7 +100,7 @@ entry:
<vscale x 2 x i8> %2,
<vscale x 2 x i8> %3,
<vscale x 2 x i1> %mask,
i32 %4)
i32 %4, i32 1)
ret <vscale x 2 x i1> %a
}
@ -130,7 +130,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsgt.mask.nxv4i8(
<vscale x 4 x i8>,
<vscale x 4 x i8>,
<vscale x 4 x i1>,
i32);
i32, i32);
define <vscale x 4 x i1> @intrinsic_vmsgt_mask_vv_nxv4i8_nxv4i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i8> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv4i8_nxv4i8:
@ -152,7 +152,7 @@ entry:
<vscale x 4 x i8> %2,
<vscale x 4 x i8> %3,
<vscale x 4 x i1> %mask,
i32 %4)
i32 %4, i32 1)
ret <vscale x 4 x i1> %a
}
@ -182,7 +182,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsgt.mask.nxv8i8(
<vscale x 8 x i8>,
<vscale x 8 x i8>,
<vscale x 8 x i1>,
i32);
i32, i32);
define <vscale x 8 x i1> @intrinsic_vmsgt_mask_vv_nxv8i8_nxv8i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i8> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv8i8_nxv8i8:
@ -204,7 +204,7 @@ entry:
<vscale x 8 x i8> %2,
<vscale x 8 x i8> %3,
<vscale x 8 x i1> %mask,
i32 %4)
i32 %4, i32 1)
ret <vscale x 8 x i1> %a
}
@ -234,7 +234,7 @@ declare <vscale x 16 x i1> @llvm.riscv.vmsgt.mask.nxv16i8(
<vscale x 16 x i8>,
<vscale x 16 x i8>,
<vscale x 16 x i1>,
i32);
i32, i32);
define <vscale x 16 x i1> @intrinsic_vmsgt_mask_vv_nxv16i8_nxv16i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i8> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv16i8_nxv16i8:
@ -256,7 +256,7 @@ entry:
<vscale x 16 x i8> %2,
<vscale x 16 x i8> %3,
<vscale x 16 x i1> %mask,
i32 %4)
i32 %4, i32 1)
ret <vscale x 16 x i1> %a
}
@ -286,7 +286,7 @@ declare <vscale x 32 x i1> @llvm.riscv.vmsgt.mask.nxv32i8(
<vscale x 32 x i8>,
<vscale x 32 x i8>,
<vscale x 32 x i1>,
i32);
i32, i32);
define <vscale x 32 x i1> @intrinsic_vmsgt_mask_vv_nxv32i8_nxv32i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i8> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv32i8_nxv32i8:
@ -308,7 +308,7 @@ entry:
<vscale x 32 x i8> %2,
<vscale x 32 x i8> %3,
<vscale x 32 x i1> %mask,
i32 %4)
i32 %4, i32 1)
ret <vscale x 32 x i1> %a
}
@ -338,7 +338,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsgt.mask.nxv1i16(
<vscale x 1 x i16>,
<vscale x 1 x i16>,
<vscale x 1 x i1>,
i32);
i32, i32);
define <vscale x 1 x i1> @intrinsic_vmsgt_mask_vv_nxv1i16_nxv1i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i16> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv1i16_nxv1i16:
@ -360,7 +360,7 @@ entry:
<vscale x 1 x i16> %2,
<vscale x 1 x i16> %3,
<vscale x 1 x i1> %mask,
i32 %4)
i32 %4, i32 1)
ret <vscale x 1 x i1> %a
}
@ -390,7 +390,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsgt.mask.nxv2i16(
<vscale x 2 x i16>,
<vscale x 2 x i16>,
<vscale x 2 x i1>,
i32);
i32, i32);
define <vscale x 2 x i1> @intrinsic_vmsgt_mask_vv_nxv2i16_nxv2i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i16> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv2i16_nxv2i16:
@ -412,7 +412,7 @@ entry:
<vscale x 2 x i16> %2,
<vscale x 2 x i16> %3,
<vscale x 2 x i1> %mask,
i32 %4)
i32 %4, i32 1)
ret <vscale x 2 x i1> %a
}
@ -442,7 +442,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsgt.mask.nxv4i16(
<vscale x 4 x i16>,
<vscale x 4 x i16>,
<vscale x 4 x i1>,
i32);
i32, i32);
define <vscale x 4 x i1> @intrinsic_vmsgt_mask_vv_nxv4i16_nxv4i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i16> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv4i16_nxv4i16:
@ -464,7 +464,7 @@ entry:
<vscale x 4 x i16> %2,
<vscale x 4 x i16> %3,
<vscale x 4 x i1> %mask,
i32 %4)
i32 %4, i32 1)
ret <vscale x 4 x i1> %a
}
@ -494,7 +494,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsgt.mask.nxv8i16(
<vscale x 8 x i16>,
<vscale x 8 x i16>,
<vscale x 8 x i1>,
i32);
i32, i32);
define <vscale x 8 x i1> @intrinsic_vmsgt_mask_vv_nxv8i16_nxv8i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i16> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv8i16_nxv8i16:
@ -516,7 +516,7 @@ entry:
<vscale x 8 x i16> %2,
<vscale x 8 x i16> %3,
<vscale x 8 x i1> %mask,
i32 %4)
i32 %4, i32 1)
ret <vscale x 8 x i1> %a
}
@ -546,7 +546,7 @@ declare <vscale x 16 x i1> @llvm.riscv.vmsgt.mask.nxv16i16(
<vscale x 16 x i16>,
<vscale x 16 x i16>,
<vscale x 16 x i1>,
i32);
i32, i32);
define <vscale x 16 x i1> @intrinsic_vmsgt_mask_vv_nxv16i16_nxv16i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i16> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv16i16_nxv16i16:
@ -568,7 +568,7 @@ entry:
<vscale x 16 x i16> %2,
<vscale x 16 x i16> %3,
<vscale x 16 x i1> %mask,
i32 %4)
i32 %4, i32 1)
ret <vscale x 16 x i1> %a
}
@ -598,7 +598,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsgt.mask.nxv1i32(
<vscale x 1 x i32>,
<vscale x 1 x i32>,
<vscale x 1 x i1>,
i32);
i32, i32);
define <vscale x 1 x i1> @intrinsic_vmsgt_mask_vv_nxv1i32_nxv1i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i32> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv1i32_nxv1i32:
@ -620,7 +620,7 @@ entry:
<vscale x 1 x i32> %2,
<vscale x 1 x i32> %3,
<vscale x 1 x i1> %mask,
i32 %4)
i32 %4, i32 1)
ret <vscale x 1 x i1> %a
}
@ -650,7 +650,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsgt.mask.nxv2i32(
<vscale x 2 x i32>,
<vscale x 2 x i32>,
<vscale x 2 x i1>,
i32);
i32, i32);
define <vscale x 2 x i1> @intrinsic_vmsgt_mask_vv_nxv2i32_nxv2i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i32> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv2i32_nxv2i32:
@ -672,7 +672,7 @@ entry:
<vscale x 2 x i32> %2,
<vscale x 2 x i32> %3,
<vscale x 2 x i1> %mask,
i32 %4)
i32 %4, i32 1)
ret <vscale x 2 x i1> %a
}
@ -702,7 +702,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsgt.mask.nxv4i32(
<vscale x 4 x i32>,
<vscale x 4 x i32>,
<vscale x 4 x i1>,
i32);
i32, i32);
define <vscale x 4 x i1> @intrinsic_vmsgt_mask_vv_nxv4i32_nxv4i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i32> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv4i32_nxv4i32:
@ -724,7 +724,7 @@ entry:
<vscale x 4 x i32> %2,
<vscale x 4 x i32> %3,
<vscale x 4 x i1> %mask,
i32 %4)
i32 %4, i32 1)
ret <vscale x 4 x i1> %a
}
@ -754,7 +754,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsgt.mask.nxv8i32(
<vscale x 8 x i32>,
<vscale x 8 x i32>,
<vscale x 8 x i1>,
i32);
i32, i32);
define <vscale x 8 x i1> @intrinsic_vmsgt_mask_vv_nxv8i32_nxv8i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i32> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv8i32_nxv8i32:
@ -776,7 +776,7 @@ entry:
<vscale x 8 x i32> %2,
<vscale x 8 x i32> %3,
<vscale x 8 x i1> %mask,
i32 %4)
i32 %4, i32 1)
ret <vscale x 8 x i1> %a
}
@ -806,7 +806,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsgt.mask.nxv1i64(
<vscale x 1 x i64>,
<vscale x 1 x i64>,
<vscale x 1 x i1>,
i32);
i32, i32);
define <vscale x 1 x i1> @intrinsic_vmsgt_mask_vv_nxv1i64_nxv1i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i64> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv1i64_nxv1i64:
@ -828,7 +828,7 @@ entry:
<vscale x 1 x i64> %2,
<vscale x 1 x i64> %3,
<vscale x 1 x i1> %mask,
i32 %4)
i32 %4, i32 1)
ret <vscale x 1 x i1> %a
}
@ -858,7 +858,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsgt.mask.nxv2i64(
<vscale x 2 x i64>,
<vscale x 2 x i64>,
<vscale x 2 x i1>,
i32);
i32, i32);
define <vscale x 2 x i1> @intrinsic_vmsgt_mask_vv_nxv2i64_nxv2i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i64> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv2i64_nxv2i64:
@ -880,7 +880,7 @@ entry:
<vscale x 2 x i64> %2,
<vscale x 2 x i64> %3,
<vscale x 2 x i1> %mask,
i32 %4)
i32 %4, i32 1)
ret <vscale x 2 x i1> %a
}
@ -910,7 +910,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsgt.mask.nxv4i64(
<vscale x 4 x i64>,
<vscale x 4 x i64>,
<vscale x 4 x i1>,
i32);
i32, i32);
define <vscale x 4 x i1> @intrinsic_vmsgt_mask_vv_nxv4i64_nxv4i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i64> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv4i64_nxv4i64:
@ -932,7 +932,7 @@ entry:
<vscale x 4 x i64> %2,
<vscale x 4 x i64> %3,
<vscale x 4 x i1> %mask,
i32 %4)
i32 %4, i32 1)
ret <vscale x 4 x i1> %a
}
@ -962,7 +962,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsgt.mask.nxv1i8.i8(
<vscale x 1 x i8>,
i8,
<vscale x 1 x i1>,
i32);
i32, i32);
define <vscale x 1 x i1> @intrinsic_vmsgt_mask_vx_nxv1i8_i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv1i8_i8:
@ -979,7 +979,7 @@ entry:
<vscale x 1 x i8> %1,
i8 %2,
<vscale x 1 x i1> %3,
i32 %4)
i32 %4, i32 1)
ret <vscale x 1 x i1> %a
}
@ -1009,7 +1009,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsgt.mask.nxv2i8.i8(
<vscale x 2 x i8>,
i8,
<vscale x 2 x i1>,
i32);
i32, i32);
define <vscale x 2 x i1> @intrinsic_vmsgt_mask_vx_nxv2i8_i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv2i8_i8:
@ -1026,7 +1026,7 @@ entry:
<vscale x 2 x i8> %1,
i8 %2,
<vscale x 2 x i1> %3,
i32 %4)
i32 %4, i32 1)
ret <vscale x 2 x i1> %a
}
@ -1056,7 +1056,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsgt.mask.nxv4i8.i8(
<vscale x 4 x i8>,
i8,
<vscale x 4 x i1>,
i32);
i32, i32);
define <vscale x 4 x i1> @intrinsic_vmsgt_mask_vx_nxv4i8_i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv4i8_i8:
@ -1073,7 +1073,7 @@ entry:
<vscale x 4 x i8> %1,
i8 %2,
<vscale x 4 x i1> %3,
i32 %4)
i32 %4, i32 1)
ret <vscale x 4 x i1> %a
}
@ -1103,7 +1103,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsgt.mask.nxv8i8.i8(
<vscale x 8 x i8>,
i8,
<vscale x 8 x i1>,
i32);
i32, i32);
define <vscale x 8 x i1> @intrinsic_vmsgt_mask_vx_nxv8i8_i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv8i8_i8:
@ -1120,7 +1120,7 @@ entry:
<vscale x 8 x i8> %1,
i8 %2,
<vscale x 8 x i1> %3,
i32 %4)
i32 %4, i32 1)
ret <vscale x 8 x i1> %a
}
@ -1150,7 +1150,7 @@ declare <vscale x 16 x i1> @llvm.riscv.vmsgt.mask.nxv16i8.i8(
<vscale x 16 x i8>,
i8,
<vscale x 16 x i1>,
i32);
i32, i32);
define <vscale x 16 x i1> @intrinsic_vmsgt_mask_vx_nxv16i8_i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv16i8_i8:
@ -1167,7 +1167,7 @@ entry:
<vscale x 16 x i8> %1,
i8 %2,
<vscale x 16 x i1> %3,
i32 %4)
i32 %4, i32 1)
ret <vscale x 16 x i1> %a
}
@ -1197,7 +1197,7 @@ declare <vscale x 32 x i1> @llvm.riscv.vmsgt.mask.nxv32i8.i8(
<vscale x 32 x i8>,
i8,
<vscale x 32 x i1>,
i32);
i32, i32);
define <vscale x 32 x i1> @intrinsic_vmsgt_mask_vx_nxv32i8_i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv32i8_i8:
@ -1214,7 +1214,7 @@ entry:
<vscale x 32 x i8> %1,
i8 %2,
<vscale x 32 x i1> %3,
i32 %4)
i32 %4, i32 1)
ret <vscale x 32 x i1> %a
}
@ -1244,7 +1244,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsgt.mask.nxv1i16.i16(
<vscale x 1 x i16>,
i16,
<vscale x 1 x i1>,
i32);
i32, i32);
define <vscale x 1 x i1> @intrinsic_vmsgt_mask_vx_nxv1i16_i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv1i16_i16:
@ -1261,7 +1261,7 @@ entry:
<vscale x 1 x i16> %1,
i16 %2,
<vscale x 1 x i1> %3,
i32 %4)
i32 %4, i32 1)
ret <vscale x 1 x i1> %a
}
@ -1291,7 +1291,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsgt.mask.nxv2i16.i16(
<vscale x 2 x i16>,
i16,
<vscale x 2 x i1>,
i32);
i32, i32);
define <vscale x 2 x i1> @intrinsic_vmsgt_mask_vx_nxv2i16_i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv2i16_i16:
@ -1308,7 +1308,7 @@ entry:
<vscale x 2 x i16> %1,
i16 %2,
<vscale x 2 x i1> %3,
i32 %4)
i32 %4, i32 1)
ret <vscale x 2 x i1> %a
}
@ -1338,7 +1338,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsgt.mask.nxv4i16.i16(
<vscale x 4 x i16>,
i16,
<vscale x 4 x i1>,
i32);
i32, i32);
define <vscale x 4 x i1> @intrinsic_vmsgt_mask_vx_nxv4i16_i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv4i16_i16:
@ -1355,7 +1355,7 @@ entry:
<vscale x 4 x i16> %1,
i16 %2,
<vscale x 4 x i1> %3,
i32 %4)
i32 %4, i32 1)
ret <vscale x 4 x i1> %a
}
@ -1385,7 +1385,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsgt.mask.nxv8i16.i16(
<vscale x 8 x i16>,
i16,
<vscale x 8 x i1>,
i32);
i32, i32);
define <vscale x 8 x i1> @intrinsic_vmsgt_mask_vx_nxv8i16_i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv8i16_i16:
@ -1402,7 +1402,7 @@ entry:
<vscale x 8 x i16> %1,
i16 %2,
<vscale x 8 x i1> %3,
i32 %4)
i32 %4, i32 1)
ret <vscale x 8 x i1> %a
}
@ -1432,7 +1432,7 @@ declare <vscale x 16 x i1> @llvm.riscv.vmsgt.mask.nxv16i16.i16(
<vscale x 16 x i16>,
i16,
<vscale x 16 x i1>,
i32);
i32, i32);
define <vscale x 16 x i1> @intrinsic_vmsgt_mask_vx_nxv16i16_i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv16i16_i16:
@ -1449,7 +1449,7 @@ entry:
<vscale x 16 x i16> %1,
i16 %2,
<vscale x 16 x i1> %3,
i32 %4)
i32 %4, i32 1)
ret <vscale x 16 x i1> %a
}
@ -1479,7 +1479,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsgt.mask.nxv1i32.i32(
<vscale x 1 x i32>,
i32,
<vscale x 1 x i1>,
i32);
i32, i32);
define <vscale x 1 x i1> @intrinsic_vmsgt_mask_vx_nxv1i32_i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv1i32_i32:
@ -1496,7 +1496,7 @@ entry:
<vscale x 1 x i32> %1,
i32 %2,
<vscale x 1 x i1> %3,
i32 %4)
i32 %4, i32 1)
ret <vscale x 1 x i1> %a
}
@ -1526,7 +1526,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsgt.mask.nxv2i32.i32(
<vscale x 2 x i32>,
i32,
<vscale x 2 x i1>,
i32);
i32, i32);
define <vscale x 2 x i1> @intrinsic_vmsgt_mask_vx_nxv2i32_i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv2i32_i32:
@ -1543,7 +1543,7 @@ entry:
<vscale x 2 x i32> %1,
i32 %2,
<vscale x 2 x i1> %3,
i32 %4)
i32 %4, i32 1)
ret <vscale x 2 x i1> %a
}
@ -1573,7 +1573,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsgt.mask.nxv4i32.i32(
<vscale x 4 x i32>,
i32,
<vscale x 4 x i1>,
i32);
i32, i32);
define <vscale x 4 x i1> @intrinsic_vmsgt_mask_vx_nxv4i32_i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv4i32_i32:
@ -1590,7 +1590,7 @@ entry:
<vscale x 4 x i32> %1,
i32 %2,
<vscale x 4 x i1> %3,
i32 %4)
i32 %4, i32 1)
ret <vscale x 4 x i1> %a
}
@ -1620,7 +1620,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsgt.mask.nxv8i32.i32(
<vscale x 8 x i32>,
i32,
<vscale x 8 x i1>,
i32);
i32, i32);
define <vscale x 8 x i1> @intrinsic_vmsgt_mask_vx_nxv8i32_i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv8i32_i32:
@ -1637,7 +1637,7 @@ entry:
<vscale x 8 x i32> %1,
i32 %2,
<vscale x 8 x i1> %3,
i32 %4)
i32 %4, i32 1)
ret <vscale x 8 x i1> %a
}
@ -1673,7 +1673,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsgt.mask.nxv1i64.i64(
<vscale x 1 x i64>,
i64,
<vscale x 1 x i1>,
i32);
i32, i32);
define <vscale x 1 x i1> @intrinsic_vmsgt_mask_vx_nxv1i64_i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv1i64_i64:
@ -1696,7 +1696,7 @@ entry:
<vscale x 1 x i64> %1,
i64 %2,
<vscale x 1 x i1> %3,
i32 %4)
i32 %4, i32 1)
ret <vscale x 1 x i1> %a
}
@ -1732,7 +1732,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsgt.mask.nxv2i64.i64(
<vscale x 2 x i64>,
i64,
<vscale x 2 x i1>,
i32);
i32, i32);
define <vscale x 2 x i1> @intrinsic_vmsgt_mask_vx_nxv2i64_i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv2i64_i64:
@ -1755,7 +1755,7 @@ entry:
<vscale x 2 x i64> %1,
i64 %2,
<vscale x 2 x i1> %3,
i32 %4)
i32 %4, i32 1)
ret <vscale x 2 x i1> %a
}
@ -1791,7 +1791,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsgt.mask.nxv4i64.i64(
<vscale x 4 x i64>,
i64,
<vscale x 4 x i1>,
i32);
i32, i32);
define <vscale x 4 x i1> @intrinsic_vmsgt_mask_vx_nxv4i64_i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv4i64_i64:
@ -1814,7 +1814,7 @@ entry:
<vscale x 4 x i64> %1,
i64 %2,
<vscale x 4 x i1> %3,
i32 %4)
i32 %4, i32 1)
ret <vscale x 4 x i1> %a
}
@ -1849,7 +1849,7 @@ entry:
<vscale x 1 x i8> %1,
i8 9,
<vscale x 1 x i1> %2,
i32 %3)
i32 %3, i32 1)
ret <vscale x 1 x i1> %a
}
@ -1884,7 +1884,7 @@ entry:
<vscale x 2 x i8> %1,
i8 9,
<vscale x 2 x i1> %2,
i32 %3)
i32 %3, i32 1)
ret <vscale x 2 x i1> %a
}
@ -1919,7 +1919,7 @@ entry:
<vscale x 4 x i8> %1,
i8 9,
<vscale x 4 x i1> %2,
i32 %3)
i32 %3, i32 1)
ret <vscale x 4 x i1> %a
}
@ -1954,7 +1954,7 @@ entry:
<vscale x 8 x i8> %1,
i8 9,
<vscale x 8 x i1> %2,
i32 %3)
i32 %3, i32 1)
ret <vscale x 8 x i1> %a
}
@ -1989,7 +1989,7 @@ entry:
<vscale x 16 x i8> %1,
i8 9,
<vscale x 16 x i1> %2,
i32 %3)
i32 %3, i32 1)
ret <vscale x 16 x i1> %a
}
@ -2024,7 +2024,7 @@ entry:
<vscale x 32 x i8> %1,
i8 9,
<vscale x 32 x i1> %2,
i32 %3)
i32 %3, i32 1)
ret <vscale x 32 x i1> %a
}
@ -2059,7 +2059,7 @@ entry:
<vscale x 1 x i16> %1,
i16 9,
<vscale x 1 x i1> %2,
i32 %3)
i32 %3, i32 1)
ret <vscale x 1 x i1> %a
}
@ -2094,7 +2094,7 @@ entry:
<vscale x 2 x i16> %1,
i16 9,
<vscale x 2 x i1> %2,
i32 %3)
i32 %3, i32 1)
ret <vscale x 2 x i1> %a
}
@ -2129,7 +2129,7 @@ entry:
<vscale x 4 x i16> %1,
i16 9,
<vscale x 4 x i1> %2,
i32 %3)
i32 %3, i32 1)
ret <vscale x 4 x i1> %a
}
@ -2164,7 +2164,7 @@ entry:
<vscale x 8 x i16> %1,
i16 9,
<vscale x 8 x i1> %2,
i32 %3)
i32 %3, i32 1)
ret <vscale x 8 x i1> %a
}
@ -2199,7 +2199,7 @@ entry:
<vscale x 16 x i16> %1,
i16 9,
<vscale x 16 x i1> %2,
i32 %3)
i32 %3, i32 1)
ret <vscale x 16 x i1> %a
}
@ -2234,7 +2234,7 @@ entry:
<vscale x 1 x i32> %1,
i32 9,
<vscale x 1 x i1> %2,
i32 %3)
i32 %3, i32 1)
ret <vscale x 1 x i1> %a
}
@ -2269,7 +2269,7 @@ entry:
<vscale x 2 x i32> %1,
i32 9,
<vscale x 2 x i1> %2,
i32 %3)
i32 %3, i32 1)
ret <vscale x 2 x i1> %a
}
@ -2304,7 +2304,7 @@ entry:
<vscale x 4 x i32> %1,
i32 9,
<vscale x 4 x i1> %2,
i32 %3)
i32 %3, i32 1)
ret <vscale x 4 x i1> %a
}
@ -2339,7 +2339,7 @@ entry:
<vscale x 8 x i32> %1,
i32 9,
<vscale x 8 x i1> %2,
i32 %3)
i32 %3, i32 1)
ret <vscale x 8 x i1> %a
}
@ -2374,7 +2374,7 @@ entry:
<vscale x 1 x i64> %1,
i64 9,
<vscale x 1 x i1> %2,
i32 %3)
i32 %3, i32 1)
ret <vscale x 1 x i1> %a
}
@ -2409,7 +2409,7 @@ entry:
<vscale x 2 x i64> %1,
i64 9,
<vscale x 2 x i1> %2,
i32 %3)
i32 %3, i32 1)
ret <vscale x 2 x i1> %a
}
@ -2444,7 +2444,7 @@ entry:
<vscale x 4 x i64> %1,
i64 9,
<vscale x 4 x i1> %2,
i32 %3)
i32 %3, i32 1)
ret <vscale x 4 x i1> %a
}

View File

@ -26,7 +26,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsgt.mask.nxv1i8(
<vscale x 1 x i8>,
<vscale x 1 x i8>,
<vscale x 1 x i1>,
i64);
i64, i64);
define <vscale x 1 x i1> @intrinsic_vmsgt_mask_vv_nxv1i8_nxv1i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i8> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv1i8_nxv1i8:
@ -48,7 +48,7 @@ entry:
<vscale x 1 x i8> %2,
<vscale x 1 x i8> %3,
<vscale x 1 x i1> %mask,
i64 %4)
i64 %4, i64 1)
ret <vscale x 1 x i1> %a
}
@ -78,7 +78,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsgt.mask.nxv2i8(
<vscale x 2 x i8>,
<vscale x 2 x i8>,
<vscale x 2 x i1>,
i64);
i64, i64);
define <vscale x 2 x i1> @intrinsic_vmsgt_mask_vv_nxv2i8_nxv2i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i8> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv2i8_nxv2i8:
@ -100,7 +100,7 @@ entry:
<vscale x 2 x i8> %2,
<vscale x 2 x i8> %3,
<vscale x 2 x i1> %mask,
i64 %4)
i64 %4, i64 1)
ret <vscale x 2 x i1> %a
}
@ -130,7 +130,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsgt.mask.nxv4i8(
<vscale x 4 x i8>,
<vscale x 4 x i8>,
<vscale x 4 x i1>,
i64);
i64, i64);
define <vscale x 4 x i1> @intrinsic_vmsgt_mask_vv_nxv4i8_nxv4i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i8> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv4i8_nxv4i8:
@ -152,7 +152,7 @@ entry:
<vscale x 4 x i8> %2,
<vscale x 4 x i8> %3,
<vscale x 4 x i1> %mask,
i64 %4)
i64 %4, i64 1)
ret <vscale x 4 x i1> %a
}
@ -182,7 +182,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsgt.mask.nxv8i8(
<vscale x 8 x i8>,
<vscale x 8 x i8>,
<vscale x 8 x i1>,
i64);
i64, i64);
define <vscale x 8 x i1> @intrinsic_vmsgt_mask_vv_nxv8i8_nxv8i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i8> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv8i8_nxv8i8:
@ -204,7 +204,7 @@ entry:
<vscale x 8 x i8> %2,
<vscale x 8 x i8> %3,
<vscale x 8 x i1> %mask,
i64 %4)
i64 %4, i64 1)
ret <vscale x 8 x i1> %a
}
@ -234,7 +234,7 @@ declare <vscale x 16 x i1> @llvm.riscv.vmsgt.mask.nxv16i8(
<vscale x 16 x i8>,
<vscale x 16 x i8>,
<vscale x 16 x i1>,
i64);
i64, i64);
define <vscale x 16 x i1> @intrinsic_vmsgt_mask_vv_nxv16i8_nxv16i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i8> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv16i8_nxv16i8:
@ -256,7 +256,7 @@ entry:
<vscale x 16 x i8> %2,
<vscale x 16 x i8> %3,
<vscale x 16 x i1> %mask,
i64 %4)
i64 %4, i64 1)
ret <vscale x 16 x i1> %a
}
@ -286,7 +286,7 @@ declare <vscale x 32 x i1> @llvm.riscv.vmsgt.mask.nxv32i8(
<vscale x 32 x i8>,
<vscale x 32 x i8>,
<vscale x 32 x i1>,
i64);
i64, i64);
define <vscale x 32 x i1> @intrinsic_vmsgt_mask_vv_nxv32i8_nxv32i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i8> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv32i8_nxv32i8:
@ -308,7 +308,7 @@ entry:
<vscale x 32 x i8> %2,
<vscale x 32 x i8> %3,
<vscale x 32 x i1> %mask,
i64 %4)
i64 %4, i64 1)
ret <vscale x 32 x i1> %a
}
@ -338,7 +338,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsgt.mask.nxv1i16(
<vscale x 1 x i16>,
<vscale x 1 x i16>,
<vscale x 1 x i1>,
i64);
i64, i64);
define <vscale x 1 x i1> @intrinsic_vmsgt_mask_vv_nxv1i16_nxv1i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i16> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv1i16_nxv1i16:
@ -360,7 +360,7 @@ entry:
<vscale x 1 x i16> %2,
<vscale x 1 x i16> %3,
<vscale x 1 x i1> %mask,
i64 %4)
i64 %4, i64 1)
ret <vscale x 1 x i1> %a
}
@ -390,7 +390,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsgt.mask.nxv2i16(
<vscale x 2 x i16>,
<vscale x 2 x i16>,
<vscale x 2 x i1>,
i64);
i64, i64);
define <vscale x 2 x i1> @intrinsic_vmsgt_mask_vv_nxv2i16_nxv2i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i16> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv2i16_nxv2i16:
@ -412,7 +412,7 @@ entry:
<vscale x 2 x i16> %2,
<vscale x 2 x i16> %3,
<vscale x 2 x i1> %mask,
i64 %4)
i64 %4, i64 1)
ret <vscale x 2 x i1> %a
}
@ -442,7 +442,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsgt.mask.nxv4i16(
<vscale x 4 x i16>,
<vscale x 4 x i16>,
<vscale x 4 x i1>,
i64);
i64, i64);
define <vscale x 4 x i1> @intrinsic_vmsgt_mask_vv_nxv4i16_nxv4i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i16> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv4i16_nxv4i16:
@ -464,7 +464,7 @@ entry:
<vscale x 4 x i16> %2,
<vscale x 4 x i16> %3,
<vscale x 4 x i1> %mask,
i64 %4)
i64 %4, i64 1)
ret <vscale x 4 x i1> %a
}
@ -494,7 +494,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsgt.mask.nxv8i16(
<vscale x 8 x i16>,
<vscale x 8 x i16>,
<vscale x 8 x i1>,
i64);
i64, i64);
define <vscale x 8 x i1> @intrinsic_vmsgt_mask_vv_nxv8i16_nxv8i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i16> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv8i16_nxv8i16:
@ -516,7 +516,7 @@ entry:
<vscale x 8 x i16> %2,
<vscale x 8 x i16> %3,
<vscale x 8 x i1> %mask,
i64 %4)
i64 %4, i64 1)
ret <vscale x 8 x i1> %a
}
@ -546,7 +546,7 @@ declare <vscale x 16 x i1> @llvm.riscv.vmsgt.mask.nxv16i16(
<vscale x 16 x i16>,
<vscale x 16 x i16>,
<vscale x 16 x i1>,
i64);
i64, i64);
define <vscale x 16 x i1> @intrinsic_vmsgt_mask_vv_nxv16i16_nxv16i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i16> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv16i16_nxv16i16:
@ -568,7 +568,7 @@ entry:
<vscale x 16 x i16> %2,
<vscale x 16 x i16> %3,
<vscale x 16 x i1> %mask,
i64 %4)
i64 %4, i64 1)
ret <vscale x 16 x i1> %a
}
@ -598,7 +598,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsgt.mask.nxv1i32(
<vscale x 1 x i32>,
<vscale x 1 x i32>,
<vscale x 1 x i1>,
i64);
i64, i64);
define <vscale x 1 x i1> @intrinsic_vmsgt_mask_vv_nxv1i32_nxv1i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i32> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv1i32_nxv1i32:
@ -620,7 +620,7 @@ entry:
<vscale x 1 x i32> %2,
<vscale x 1 x i32> %3,
<vscale x 1 x i1> %mask,
i64 %4)
i64 %4, i64 1)
ret <vscale x 1 x i1> %a
}
@ -650,7 +650,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsgt.mask.nxv2i32(
<vscale x 2 x i32>,
<vscale x 2 x i32>,
<vscale x 2 x i1>,
i64);
i64, i64);
define <vscale x 2 x i1> @intrinsic_vmsgt_mask_vv_nxv2i32_nxv2i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i32> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv2i32_nxv2i32:
@ -672,7 +672,7 @@ entry:
<vscale x 2 x i32> %2,
<vscale x 2 x i32> %3,
<vscale x 2 x i1> %mask,
i64 %4)
i64 %4, i64 1)
ret <vscale x 2 x i1> %a
}
@ -702,7 +702,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsgt.mask.nxv4i32(
<vscale x 4 x i32>,
<vscale x 4 x i32>,
<vscale x 4 x i1>,
i64);
i64, i64);
define <vscale x 4 x i1> @intrinsic_vmsgt_mask_vv_nxv4i32_nxv4i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i32> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv4i32_nxv4i32:
@ -724,7 +724,7 @@ entry:
<vscale x 4 x i32> %2,
<vscale x 4 x i32> %3,
<vscale x 4 x i1> %mask,
i64 %4)
i64 %4, i64 1)
ret <vscale x 4 x i1> %a
}
@ -754,7 +754,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsgt.mask.nxv8i32(
<vscale x 8 x i32>,
<vscale x 8 x i32>,
<vscale x 8 x i1>,
i64);
i64, i64);
define <vscale x 8 x i1> @intrinsic_vmsgt_mask_vv_nxv8i32_nxv8i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i32> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv8i32_nxv8i32:
@ -776,7 +776,7 @@ entry:
<vscale x 8 x i32> %2,
<vscale x 8 x i32> %3,
<vscale x 8 x i1> %mask,
i64 %4)
i64 %4, i64 1)
ret <vscale x 8 x i1> %a
}
@ -806,7 +806,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsgt.mask.nxv1i64(
<vscale x 1 x i64>,
<vscale x 1 x i64>,
<vscale x 1 x i1>,
i64);
i64, i64);
define <vscale x 1 x i1> @intrinsic_vmsgt_mask_vv_nxv1i64_nxv1i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i64> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv1i64_nxv1i64:
@ -828,7 +828,7 @@ entry:
<vscale x 1 x i64> %2,
<vscale x 1 x i64> %3,
<vscale x 1 x i1> %mask,
i64 %4)
i64 %4, i64 1)
ret <vscale x 1 x i1> %a
}
@ -858,7 +858,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsgt.mask.nxv2i64(
<vscale x 2 x i64>,
<vscale x 2 x i64>,
<vscale x 2 x i1>,
i64);
i64, i64);
define <vscale x 2 x i1> @intrinsic_vmsgt_mask_vv_nxv2i64_nxv2i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i64> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv2i64_nxv2i64:
@ -880,7 +880,7 @@ entry:
<vscale x 2 x i64> %2,
<vscale x 2 x i64> %3,
<vscale x 2 x i1> %mask,
i64 %4)
i64 %4, i64 1)
ret <vscale x 2 x i1> %a
}
@ -910,7 +910,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsgt.mask.nxv4i64(
<vscale x 4 x i64>,
<vscale x 4 x i64>,
<vscale x 4 x i1>,
i64);
i64, i64);
define <vscale x 4 x i1> @intrinsic_vmsgt_mask_vv_nxv4i64_nxv4i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i64> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv4i64_nxv4i64:
@ -932,7 +932,7 @@ entry:
<vscale x 4 x i64> %2,
<vscale x 4 x i64> %3,
<vscale x 4 x i1> %mask,
i64 %4)
i64 %4, i64 1)
ret <vscale x 4 x i1> %a
}
@ -962,7 +962,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsgt.mask.nxv1i8.i8(
<vscale x 1 x i8>,
i8,
<vscale x 1 x i1>,
i64);
i64, i64);
define <vscale x 1 x i1> @intrinsic_vmsgt_mask_vx_nxv1i8_i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv1i8_i8:
@ -979,7 +979,7 @@ entry:
<vscale x 1 x i8> %1,
i8 %2,
<vscale x 1 x i1> %3,
i64 %4)
i64 %4, i64 1)
ret <vscale x 1 x i1> %a
}
@ -1009,7 +1009,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsgt.mask.nxv2i8.i8(
<vscale x 2 x i8>,
i8,
<vscale x 2 x i1>,
i64);
i64, i64);
define <vscale x 2 x i1> @intrinsic_vmsgt_mask_vx_nxv2i8_i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv2i8_i8:
@ -1026,7 +1026,7 @@ entry:
<vscale x 2 x i8> %1,
i8 %2,
<vscale x 2 x i1> %3,
i64 %4)
i64 %4, i64 1)
ret <vscale x 2 x i1> %a
}
@ -1056,7 +1056,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsgt.mask.nxv4i8.i8(
<vscale x 4 x i8>,
i8,
<vscale x 4 x i1>,
i64);
i64, i64);
define <vscale x 4 x i1> @intrinsic_vmsgt_mask_vx_nxv4i8_i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv4i8_i8:
@ -1073,7 +1073,7 @@ entry:
<vscale x 4 x i8> %1,
i8 %2,
<vscale x 4 x i1> %3,
i64 %4)
i64 %4, i64 1)
ret <vscale x 4 x i1> %a
}
@ -1103,7 +1103,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsgt.mask.nxv8i8.i8(
<vscale x 8 x i8>,
i8,
<vscale x 8 x i1>,
i64);
i64, i64);
define <vscale x 8 x i1> @intrinsic_vmsgt_mask_vx_nxv8i8_i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv8i8_i8:
@ -1120,7 +1120,7 @@ entry:
<vscale x 8 x i8> %1,
i8 %2,
<vscale x 8 x i1> %3,
i64 %4)
i64 %4, i64 1)
ret <vscale x 8 x i1> %a
}
@ -1150,7 +1150,7 @@ declare <vscale x 16 x i1> @llvm.riscv.vmsgt.mask.nxv16i8.i8(
<vscale x 16 x i8>,
i8,
<vscale x 16 x i1>,
i64);
i64, i64);
define <vscale x 16 x i1> @intrinsic_vmsgt_mask_vx_nxv16i8_i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv16i8_i8:
@ -1167,7 +1167,7 @@ entry:
<vscale x 16 x i8> %1,
i8 %2,
<vscale x 16 x i1> %3,
i64 %4)
i64 %4, i64 1)
ret <vscale x 16 x i1> %a
}
@ -1197,7 +1197,7 @@ declare <vscale x 32 x i1> @llvm.riscv.vmsgt.mask.nxv32i8.i8(
<vscale x 32 x i8>,
i8,
<vscale x 32 x i1>,
i64);
i64, i64);
define <vscale x 32 x i1> @intrinsic_vmsgt_mask_vx_nxv32i8_i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv32i8_i8:
@ -1214,7 +1214,7 @@ entry:
<vscale x 32 x i8> %1,
i8 %2,
<vscale x 32 x i1> %3,
i64 %4)
i64 %4, i64 1)
ret <vscale x 32 x i1> %a
}
@ -1244,7 +1244,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsgt.mask.nxv1i16.i16(
<vscale x 1 x i16>,
i16,
<vscale x 1 x i1>,
i64);
i64, i64);
define <vscale x 1 x i1> @intrinsic_vmsgt_mask_vx_nxv1i16_i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv1i16_i16:
@ -1261,7 +1261,7 @@ entry:
<vscale x 1 x i16> %1,
i16 %2,
<vscale x 1 x i1> %3,
i64 %4)
i64 %4, i64 1)
ret <vscale x 1 x i1> %a
}
@ -1291,7 +1291,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsgt.mask.nxv2i16.i16(
<vscale x 2 x i16>,
i16,
<vscale x 2 x i1>,
i64);
i64, i64);
define <vscale x 2 x i1> @intrinsic_vmsgt_mask_vx_nxv2i16_i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv2i16_i16:
@ -1308,7 +1308,7 @@ entry:
<vscale x 2 x i16> %1,
i16 %2,
<vscale x 2 x i1> %3,
i64 %4)
i64 %4, i64 1)
ret <vscale x 2 x i1> %a
}
@ -1338,7 +1338,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsgt.mask.nxv4i16.i16(
<vscale x 4 x i16>,
i16,
<vscale x 4 x i1>,
i64);
i64, i64);
define <vscale x 4 x i1> @intrinsic_vmsgt_mask_vx_nxv4i16_i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv4i16_i16:
@ -1355,7 +1355,7 @@ entry:
<vscale x 4 x i16> %1,
i16 %2,
<vscale x 4 x i1> %3,
i64 %4)
i64 %4, i64 1)
ret <vscale x 4 x i1> %a
}
@ -1385,7 +1385,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsgt.mask.nxv8i16.i16(
<vscale x 8 x i16>,
i16,
<vscale x 8 x i1>,
i64);
i64, i64);
define <vscale x 8 x i1> @intrinsic_vmsgt_mask_vx_nxv8i16_i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv8i16_i16:
@ -1402,7 +1402,7 @@ entry:
<vscale x 8 x i16> %1,
i16 %2,
<vscale x 8 x i1> %3,
i64 %4)
i64 %4, i64 1)
ret <vscale x 8 x i1> %a
}
@ -1432,7 +1432,7 @@ declare <vscale x 16 x i1> @llvm.riscv.vmsgt.mask.nxv16i16.i16(
<vscale x 16 x i16>,
i16,
<vscale x 16 x i1>,
i64);
i64, i64);
define <vscale x 16 x i1> @intrinsic_vmsgt_mask_vx_nxv16i16_i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv16i16_i16:
@ -1449,7 +1449,7 @@ entry:
<vscale x 16 x i16> %1,
i16 %2,
<vscale x 16 x i1> %3,
i64 %4)
i64 %4, i64 1)
ret <vscale x 16 x i1> %a
}
@ -1479,7 +1479,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsgt.mask.nxv1i32.i32(
<vscale x 1 x i32>,
i32,
<vscale x 1 x i1>,
i64);
i64, i64);
define <vscale x 1 x i1> @intrinsic_vmsgt_mask_vx_nxv1i32_i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv1i32_i32:
@ -1496,7 +1496,7 @@ entry:
<vscale x 1 x i32> %1,
i32 %2,
<vscale x 1 x i1> %3,
i64 %4)
i64 %4, i64 1)
ret <vscale x 1 x i1> %a
}
@ -1526,7 +1526,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsgt.mask.nxv2i32.i32(
<vscale x 2 x i32>,
i32,
<vscale x 2 x i1>,
i64);
i64, i64);
define <vscale x 2 x i1> @intrinsic_vmsgt_mask_vx_nxv2i32_i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv2i32_i32:
@ -1543,7 +1543,7 @@ entry:
<vscale x 2 x i32> %1,
i32 %2,
<vscale x 2 x i1> %3,
i64 %4)
i64 %4, i64 1)
ret <vscale x 2 x i1> %a
}
@ -1573,7 +1573,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsgt.mask.nxv4i32.i32(
<vscale x 4 x i32>,
i32,
<vscale x 4 x i1>,
i64);
i64, i64);
define <vscale x 4 x i1> @intrinsic_vmsgt_mask_vx_nxv4i32_i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv4i32_i32:
@ -1590,7 +1590,7 @@ entry:
<vscale x 4 x i32> %1,
i32 %2,
<vscale x 4 x i1> %3,
i64 %4)
i64 %4, i64 1)
ret <vscale x 4 x i1> %a
}
@ -1620,7 +1620,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsgt.mask.nxv8i32.i32(
<vscale x 8 x i32>,
i32,
<vscale x 8 x i1>,
i64);
i64, i64);
define <vscale x 8 x i1> @intrinsic_vmsgt_mask_vx_nxv8i32_i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv8i32_i32:
@ -1637,7 +1637,7 @@ entry:
<vscale x 8 x i32> %1,
i32 %2,
<vscale x 8 x i1> %3,
i64 %4)
i64 %4, i64 1)
ret <vscale x 8 x i1> %a
}
@ -1667,7 +1667,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsgt.mask.nxv1i64.i64(
<vscale x 1 x i64>,
i64,
<vscale x 1 x i1>,
i64);
i64, i64);
define <vscale x 1 x i1> @intrinsic_vmsgt_mask_vx_nxv1i64_i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv1i64_i64:
@ -1684,7 +1684,7 @@ entry:
<vscale x 1 x i64> %1,
i64 %2,
<vscale x 1 x i1> %3,
i64 %4)
i64 %4, i64 1)
ret <vscale x 1 x i1> %a
}
@ -1714,7 +1714,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsgt.mask.nxv2i64.i64(
<vscale x 2 x i64>,
i64,
<vscale x 2 x i1>,
i64);
i64, i64);
define <vscale x 2 x i1> @intrinsic_vmsgt_mask_vx_nxv2i64_i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv2i64_i64:
@ -1731,7 +1731,7 @@ entry:
<vscale x 2 x i64> %1,
i64 %2,
<vscale x 2 x i1> %3,
i64 %4)
i64 %4, i64 1)
ret <vscale x 2 x i1> %a
}
@ -1761,7 +1761,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsgt.mask.nxv4i64.i64(
<vscale x 4 x i64>,
i64,
<vscale x 4 x i1>,
i64);
i64, i64);
define <vscale x 4 x i1> @intrinsic_vmsgt_mask_vx_nxv4i64_i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv4i64_i64:
@ -1778,7 +1778,7 @@ entry:
<vscale x 4 x i64> %1,
i64 %2,
<vscale x 4 x i1> %3,
i64 %4)
i64 %4, i64 1)
ret <vscale x 4 x i1> %a
}
@ -1813,7 +1813,7 @@ entry:
<vscale x 1 x i8> %1,
i8 9,
<vscale x 1 x i1> %2,
i64 %3)
i64 %3, i64 1)
ret <vscale x 1 x i1> %a
}
@ -1848,7 +1848,7 @@ entry:
<vscale x 2 x i8> %1,
i8 9,
<vscale x 2 x i1> %2,
i64 %3)
i64 %3, i64 1)
ret <vscale x 2 x i1> %a
}
@ -1883,7 +1883,7 @@ entry:
<vscale x 4 x i8> %1,
i8 9,
<vscale x 4 x i1> %2,
i64 %3)
i64 %3, i64 1)
ret <vscale x 4 x i1> %a
}
@ -1918,7 +1918,7 @@ entry:
<vscale x 8 x i8> %1,
i8 9,
<vscale x 8 x i1> %2,
i64 %3)
i64 %3, i64 1)
ret <vscale x 8 x i1> %a
}
@ -1953,7 +1953,7 @@ entry:
<vscale x 16 x i8> %1,
i8 9,
<vscale x 16 x i1> %2,
i64 %3)
i64 %3, i64 1)
ret <vscale x 16 x i1> %a
}
@ -1988,7 +1988,7 @@ entry:
<vscale x 32 x i8> %1,
i8 9,
<vscale x 32 x i1> %2,
i64 %3)
i64 %3, i64 1)
ret <vscale x 32 x i1> %a
}
@ -2023,7 +2023,7 @@ entry:
<vscale x 1 x i16> %1,
i16 9,
<vscale x 1 x i1> %2,
i64 %3)
i64 %3, i64 1)
ret <vscale x 1 x i1> %a
}
@ -2058,7 +2058,7 @@ entry:
<vscale x 2 x i16> %1,
i16 9,
<vscale x 2 x i1> %2,
i64 %3)
i64 %3, i64 1)
ret <vscale x 2 x i1> %a
}
@ -2093,7 +2093,7 @@ entry:
<vscale x 4 x i16> %1,
i16 9,
<vscale x 4 x i1> %2,
i64 %3)
i64 %3, i64 1)
ret <vscale x 4 x i1> %a
}
@ -2128,7 +2128,7 @@ entry:
<vscale x 8 x i16> %1,
i16 9,
<vscale x 8 x i1> %2,
i64 %3)
i64 %3, i64 1)
ret <vscale x 8 x i1> %a
}
@ -2163,7 +2163,7 @@ entry:
<vscale x 16 x i16> %1,
i16 9,
<vscale x 16 x i1> %2,
i64 %3)
i64 %3, i64 1)
ret <vscale x 16 x i1> %a
}
@ -2198,7 +2198,7 @@ entry:
<vscale x 1 x i32> %1,
i32 9,
<vscale x 1 x i1> %2,
i64 %3)
i64 %3, i64 1)
ret <vscale x 1 x i1> %a
}
@ -2233,7 +2233,7 @@ entry:
<vscale x 2 x i32> %1,
i32 9,
<vscale x 2 x i1> %2,
i64 %3)
i64 %3, i64 1)
ret <vscale x 2 x i1> %a
}
@ -2268,7 +2268,7 @@ entry:
<vscale x 4 x i32> %1,
i32 9,
<vscale x 4 x i1> %2,
i64 %3)
i64 %3, i64 1)
ret <vscale x 4 x i1> %a
}
@ -2303,7 +2303,7 @@ entry:
<vscale x 8 x i32> %1,
i32 9,
<vscale x 8 x i1> %2,
i64 %3)
i64 %3, i64 1)
ret <vscale x 8 x i1> %a
}
@ -2338,7 +2338,7 @@ entry:
<vscale x 1 x i64> %1,
i64 9,
<vscale x 1 x i1> %2,
i64 %3)
i64 %3, i64 1)
ret <vscale x 1 x i1> %a
}
@ -2373,7 +2373,7 @@ entry:
<vscale x 2 x i64> %1,
i64 9,
<vscale x 2 x i1> %2,
i64 %3)
i64 %3, i64 1)
ret <vscale x 2 x i1> %a
}
@ -2408,7 +2408,7 @@ entry:
<vscale x 4 x i64> %1,
i64 9,
<vscale x 4 x i1> %2,
i64 %3)
i64 %3, i64 1)
ret <vscale x 4 x i1> %a
}

View File

@ -26,7 +26,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsgtu.mask.nxv1i8(
<vscale x 1 x i8>,
<vscale x 1 x i8>,
<vscale x 1 x i1>,
i32);
i32, i32);
define <vscale x 1 x i1> @intrinsic_vmsgtu_mask_vv_nxv1i8_nxv1i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i8> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv1i8_nxv1i8:
@ -48,7 +48,7 @@ entry:
<vscale x 1 x i8> %2,
<vscale x 1 x i8> %3,
<vscale x 1 x i1> %mask,
i32 %4)
i32 %4, i32 1)
ret <vscale x 1 x i1> %a
}
@ -78,7 +78,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsgtu.mask.nxv2i8(
<vscale x 2 x i8>,
<vscale x 2 x i8>,
<vscale x 2 x i1>,
i32);
i32, i32);
define <vscale x 2 x i1> @intrinsic_vmsgtu_mask_vv_nxv2i8_nxv2i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i8> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv2i8_nxv2i8:
@ -100,7 +100,7 @@ entry:
<vscale x 2 x i8> %2,
<vscale x 2 x i8> %3,
<vscale x 2 x i1> %mask,
i32 %4)
i32 %4, i32 1)
ret <vscale x 2 x i1> %a
}
@ -130,7 +130,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsgtu.mask.nxv4i8(
<vscale x 4 x i8>,
<vscale x 4 x i8>,
<vscale x 4 x i1>,
i32);
i32, i32);
define <vscale x 4 x i1> @intrinsic_vmsgtu_mask_vv_nxv4i8_nxv4i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i8> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv4i8_nxv4i8:
@ -152,7 +152,7 @@ entry:
<vscale x 4 x i8> %2,
<vscale x 4 x i8> %3,
<vscale x 4 x i1> %mask,
i32 %4)
i32 %4, i32 1)
ret <vscale x 4 x i1> %a
}
@ -182,7 +182,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsgtu.mask.nxv8i8(
<vscale x 8 x i8>,
<vscale x 8 x i8>,
<vscale x 8 x i1>,
i32);
i32, i32);
define <vscale x 8 x i1> @intrinsic_vmsgtu_mask_vv_nxv8i8_nxv8i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i8> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv8i8_nxv8i8:
@ -204,7 +204,7 @@ entry:
<vscale x 8 x i8> %2,
<vscale x 8 x i8> %3,
<vscale x 8 x i1> %mask,
i32 %4)
i32 %4, i32 1)
ret <vscale x 8 x i1> %a
}
@ -234,7 +234,7 @@ declare <vscale x 16 x i1> @llvm.riscv.vmsgtu.mask.nxv16i8(
<vscale x 16 x i8>,
<vscale x 16 x i8>,
<vscale x 16 x i1>,
i32);
i32, i32);
define <vscale x 16 x i1> @intrinsic_vmsgtu_mask_vv_nxv16i8_nxv16i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i8> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv16i8_nxv16i8:
@ -256,7 +256,7 @@ entry:
<vscale x 16 x i8> %2,
<vscale x 16 x i8> %3,
<vscale x 16 x i1> %mask,
i32 %4)
i32 %4, i32 1)
ret <vscale x 16 x i1> %a
}
@ -286,7 +286,7 @@ declare <vscale x 32 x i1> @llvm.riscv.vmsgtu.mask.nxv32i8(
<vscale x 32 x i8>,
<vscale x 32 x i8>,
<vscale x 32 x i1>,
i32);
i32, i32);
define <vscale x 32 x i1> @intrinsic_vmsgtu_mask_vv_nxv32i8_nxv32i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i8> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv32i8_nxv32i8:
@ -308,7 +308,7 @@ entry:
<vscale x 32 x i8> %2,
<vscale x 32 x i8> %3,
<vscale x 32 x i1> %mask,
i32 %4)
i32 %4, i32 1)
ret <vscale x 32 x i1> %a
}
@ -338,7 +338,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsgtu.mask.nxv1i16(
<vscale x 1 x i16>,
<vscale x 1 x i16>,
<vscale x 1 x i1>,
i32);
i32, i32);
define <vscale x 1 x i1> @intrinsic_vmsgtu_mask_vv_nxv1i16_nxv1i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i16> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv1i16_nxv1i16:
@ -360,7 +360,7 @@ entry:
<vscale x 1 x i16> %2,
<vscale x 1 x i16> %3,
<vscale x 1 x i1> %mask,
i32 %4)
i32 %4, i32 1)
ret <vscale x 1 x i1> %a
}
@ -390,7 +390,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsgtu.mask.nxv2i16(
<vscale x 2 x i16>,
<vscale x 2 x i16>,
<vscale x 2 x i1>,
i32);
i32, i32);
define <vscale x 2 x i1> @intrinsic_vmsgtu_mask_vv_nxv2i16_nxv2i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i16> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv2i16_nxv2i16:
@ -412,7 +412,7 @@ entry:
<vscale x 2 x i16> %2,
<vscale x 2 x i16> %3,
<vscale x 2 x i1> %mask,
i32 %4)
i32 %4, i32 1)
ret <vscale x 2 x i1> %a
}
@ -442,7 +442,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsgtu.mask.nxv4i16(
<vscale x 4 x i16>,
<vscale x 4 x i16>,
<vscale x 4 x i1>,
i32);
i32, i32);
define <vscale x 4 x i1> @intrinsic_vmsgtu_mask_vv_nxv4i16_nxv4i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i16> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv4i16_nxv4i16:
@ -464,7 +464,7 @@ entry:
<vscale x 4 x i16> %2,
<vscale x 4 x i16> %3,
<vscale x 4 x i1> %mask,
i32 %4)
i32 %4, i32 1)
ret <vscale x 4 x i1> %a
}
@ -494,7 +494,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsgtu.mask.nxv8i16(
<vscale x 8 x i16>,
<vscale x 8 x i16>,
<vscale x 8 x i1>,
i32);
i32, i32);
define <vscale x 8 x i1> @intrinsic_vmsgtu_mask_vv_nxv8i16_nxv8i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i16> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv8i16_nxv8i16:
@ -516,7 +516,7 @@ entry:
<vscale x 8 x i16> %2,
<vscale x 8 x i16> %3,
<vscale x 8 x i1> %mask,
i32 %4)
i32 %4, i32 1)
ret <vscale x 8 x i1> %a
}
@ -546,7 +546,7 @@ declare <vscale x 16 x i1> @llvm.riscv.vmsgtu.mask.nxv16i16(
<vscale x 16 x i16>,
<vscale x 16 x i16>,
<vscale x 16 x i1>,
i32);
i32, i32);
define <vscale x 16 x i1> @intrinsic_vmsgtu_mask_vv_nxv16i16_nxv16i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i16> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv16i16_nxv16i16:
@ -568,7 +568,7 @@ entry:
<vscale x 16 x i16> %2,
<vscale x 16 x i16> %3,
<vscale x 16 x i1> %mask,
i32 %4)
i32 %4, i32 1)
ret <vscale x 16 x i1> %a
}
@ -598,7 +598,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsgtu.mask.nxv1i32(
<vscale x 1 x i32>,
<vscale x 1 x i32>,
<vscale x 1 x i1>,
i32);
i32, i32);
define <vscale x 1 x i1> @intrinsic_vmsgtu_mask_vv_nxv1i32_nxv1i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i32> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv1i32_nxv1i32:
@ -620,7 +620,7 @@ entry:
<vscale x 1 x i32> %2,
<vscale x 1 x i32> %3,
<vscale x 1 x i1> %mask,
i32 %4)
i32 %4, i32 1)
ret <vscale x 1 x i1> %a
}
@ -650,7 +650,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsgtu.mask.nxv2i32(
<vscale x 2 x i32>,
<vscale x 2 x i32>,
<vscale x 2 x i1>,
i32);
i32, i32);
define <vscale x 2 x i1> @intrinsic_vmsgtu_mask_vv_nxv2i32_nxv2i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i32> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv2i32_nxv2i32:
@ -672,7 +672,7 @@ entry:
<vscale x 2 x i32> %2,
<vscale x 2 x i32> %3,
<vscale x 2 x i1> %mask,
i32 %4)
i32 %4, i32 1)
ret <vscale x 2 x i1> %a
}
@ -702,7 +702,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsgtu.mask.nxv4i32(
<vscale x 4 x i32>,
<vscale x 4 x i32>,
<vscale x 4 x i1>,
i32);
i32, i32);
define <vscale x 4 x i1> @intrinsic_vmsgtu_mask_vv_nxv4i32_nxv4i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i32> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv4i32_nxv4i32:
@ -724,7 +724,7 @@ entry:
<vscale x 4 x i32> %2,
<vscale x 4 x i32> %3,
<vscale x 4 x i1> %mask,
i32 %4)
i32 %4, i32 1)
ret <vscale x 4 x i1> %a
}
@ -754,7 +754,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsgtu.mask.nxv8i32(
<vscale x 8 x i32>,
<vscale x 8 x i32>,
<vscale x 8 x i1>,
i32);
i32, i32);
define <vscale x 8 x i1> @intrinsic_vmsgtu_mask_vv_nxv8i32_nxv8i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i32> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv8i32_nxv8i32:
@ -776,7 +776,7 @@ entry:
<vscale x 8 x i32> %2,
<vscale x 8 x i32> %3,
<vscale x 8 x i1> %mask,
i32 %4)
i32 %4, i32 1)
ret <vscale x 8 x i1> %a
}
@ -806,7 +806,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsgtu.mask.nxv1i64(
<vscale x 1 x i64>,
<vscale x 1 x i64>,
<vscale x 1 x i1>,
i32);
i32, i32);
define <vscale x 1 x i1> @intrinsic_vmsgtu_mask_vv_nxv1i64_nxv1i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i64> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv1i64_nxv1i64:
@ -828,7 +828,7 @@ entry:
<vscale x 1 x i64> %2,
<vscale x 1 x i64> %3,
<vscale x 1 x i1> %mask,
i32 %4)
i32 %4, i32 1)
ret <vscale x 1 x i1> %a
}
@ -858,7 +858,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsgtu.mask.nxv2i64(
<vscale x 2 x i64>,
<vscale x 2 x i64>,
<vscale x 2 x i1>,
i32);
i32, i32);
define <vscale x 2 x i1> @intrinsic_vmsgtu_mask_vv_nxv2i64_nxv2i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i64> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv2i64_nxv2i64:
@ -880,7 +880,7 @@ entry:
<vscale x 2 x i64> %2,
<vscale x 2 x i64> %3,
<vscale x 2 x i1> %mask,
i32 %4)
i32 %4, i32 1)
ret <vscale x 2 x i1> %a
}
@ -910,7 +910,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsgtu.mask.nxv4i64(
<vscale x 4 x i64>,
<vscale x 4 x i64>,
<vscale x 4 x i1>,
i32);
i32, i32);
define <vscale x 4 x i1> @intrinsic_vmsgtu_mask_vv_nxv4i64_nxv4i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i64> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv4i64_nxv4i64:
@ -932,7 +932,7 @@ entry:
<vscale x 4 x i64> %2,
<vscale x 4 x i64> %3,
<vscale x 4 x i1> %mask,
i32 %4)
i32 %4, i32 1)
ret <vscale x 4 x i1> %a
}
@ -962,7 +962,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsgtu.mask.nxv1i8.i8(
<vscale x 1 x i8>,
i8,
<vscale x 1 x i1>,
i32);
i32, i32);
define <vscale x 1 x i1> @intrinsic_vmsgtu_mask_vx_nxv1i8_i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv1i8_i8:
@ -979,7 +979,7 @@ entry:
<vscale x 1 x i8> %1,
i8 %2,
<vscale x 1 x i1> %3,
i32 %4)
i32 %4, i32 1)
ret <vscale x 1 x i1> %a
}
@ -1009,7 +1009,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsgtu.mask.nxv2i8.i8(
<vscale x 2 x i8>,
i8,
<vscale x 2 x i1>,
i32);
i32, i32);
define <vscale x 2 x i1> @intrinsic_vmsgtu_mask_vx_nxv2i8_i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv2i8_i8:
@ -1026,7 +1026,7 @@ entry:
<vscale x 2 x i8> %1,
i8 %2,
<vscale x 2 x i1> %3,
i32 %4)
i32 %4, i32 1)
ret <vscale x 2 x i1> %a
}
@ -1056,7 +1056,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsgtu.mask.nxv4i8.i8(
<vscale x 4 x i8>,
i8,
<vscale x 4 x i1>,
i32);
i32, i32);
define <vscale x 4 x i1> @intrinsic_vmsgtu_mask_vx_nxv4i8_i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv4i8_i8:
@ -1073,7 +1073,7 @@ entry:
<vscale x 4 x i8> %1,
i8 %2,
<vscale x 4 x i1> %3,
i32 %4)
i32 %4, i32 1)
ret <vscale x 4 x i1> %a
}
@ -1103,7 +1103,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsgtu.mask.nxv8i8.i8(
<vscale x 8 x i8>,
i8,
<vscale x 8 x i1>,
i32);
i32, i32);
define <vscale x 8 x i1> @intrinsic_vmsgtu_mask_vx_nxv8i8_i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv8i8_i8:
@ -1120,7 +1120,7 @@ entry:
<vscale x 8 x i8> %1,
i8 %2,
<vscale x 8 x i1> %3,
i32 %4)
i32 %4, i32 1)
ret <vscale x 8 x i1> %a
}
@ -1150,7 +1150,7 @@ declare <vscale x 16 x i1> @llvm.riscv.vmsgtu.mask.nxv16i8.i8(
<vscale x 16 x i8>,
i8,
<vscale x 16 x i1>,
i32);
i32, i32);
define <vscale x 16 x i1> @intrinsic_vmsgtu_mask_vx_nxv16i8_i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv16i8_i8:
@ -1167,7 +1167,7 @@ entry:
<vscale x 16 x i8> %1,
i8 %2,
<vscale x 16 x i1> %3,
i32 %4)
i32 %4, i32 1)
ret <vscale x 16 x i1> %a
}
@ -1197,7 +1197,7 @@ declare <vscale x 32 x i1> @llvm.riscv.vmsgtu.mask.nxv32i8.i8(
<vscale x 32 x i8>,
i8,
<vscale x 32 x i1>,
i32);
i32, i32);
define <vscale x 32 x i1> @intrinsic_vmsgtu_mask_vx_nxv32i8_i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv32i8_i8:
@ -1214,7 +1214,7 @@ entry:
<vscale x 32 x i8> %1,
i8 %2,
<vscale x 32 x i1> %3,
i32 %4)
i32 %4, i32 1)
ret <vscale x 32 x i1> %a
}
@ -1244,7 +1244,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsgtu.mask.nxv1i16.i16(
<vscale x 1 x i16>,
i16,
<vscale x 1 x i1>,
i32);
i32, i32);
define <vscale x 1 x i1> @intrinsic_vmsgtu_mask_vx_nxv1i16_i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv1i16_i16:
@ -1261,7 +1261,7 @@ entry:
<vscale x 1 x i16> %1,
i16 %2,
<vscale x 1 x i1> %3,
i32 %4)
i32 %4, i32 1)
ret <vscale x 1 x i1> %a
}
@ -1291,7 +1291,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsgtu.mask.nxv2i16.i16(
<vscale x 2 x i16>,
i16,
<vscale x 2 x i1>,
i32);
i32, i32);
define <vscale x 2 x i1> @intrinsic_vmsgtu_mask_vx_nxv2i16_i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv2i16_i16:
@ -1308,7 +1308,7 @@ entry:
<vscale x 2 x i16> %1,
i16 %2,
<vscale x 2 x i1> %3,
i32 %4)
i32 %4, i32 1)
ret <vscale x 2 x i1> %a
}
@ -1338,7 +1338,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsgtu.mask.nxv4i16.i16(
<vscale x 4 x i16>,
i16,
<vscale x 4 x i1>,
i32);
i32, i32);
define <vscale x 4 x i1> @intrinsic_vmsgtu_mask_vx_nxv4i16_i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv4i16_i16:
@ -1355,7 +1355,7 @@ entry:
<vscale x 4 x i16> %1,
i16 %2,
<vscale x 4 x i1> %3,
i32 %4)
i32 %4, i32 1)
ret <vscale x 4 x i1> %a
}
@ -1385,7 +1385,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsgtu.mask.nxv8i16.i16(
<vscale x 8 x i16>,
i16,
<vscale x 8 x i1>,
i32);
i32, i32);
define <vscale x 8 x i1> @intrinsic_vmsgtu_mask_vx_nxv8i16_i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv8i16_i16:
@ -1402,7 +1402,7 @@ entry:
<vscale x 8 x i16> %1,
i16 %2,
<vscale x 8 x i1> %3,
i32 %4)
i32 %4, i32 1)
ret <vscale x 8 x i1> %a
}
@ -1432,7 +1432,7 @@ declare <vscale x 16 x i1> @llvm.riscv.vmsgtu.mask.nxv16i16.i16(
<vscale x 16 x i16>,
i16,
<vscale x 16 x i1>,
i32);
i32, i32);
define <vscale x 16 x i1> @intrinsic_vmsgtu_mask_vx_nxv16i16_i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv16i16_i16:
@ -1449,7 +1449,7 @@ entry:
<vscale x 16 x i16> %1,
i16 %2,
<vscale x 16 x i1> %3,
i32 %4)
i32 %4, i32 1)
ret <vscale x 16 x i1> %a
}
@ -1479,7 +1479,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsgtu.mask.nxv1i32.i32(
<vscale x 1 x i32>,
i32,
<vscale x 1 x i1>,
i32);
i32, i32);
define <vscale x 1 x i1> @intrinsic_vmsgtu_mask_vx_nxv1i32_i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv1i32_i32:
@ -1496,7 +1496,7 @@ entry:
<vscale x 1 x i32> %1,
i32 %2,
<vscale x 1 x i1> %3,
i32 %4)
i32 %4, i32 1)
ret <vscale x 1 x i1> %a
}
@ -1526,7 +1526,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsgtu.mask.nxv2i32.i32(
<vscale x 2 x i32>,
i32,
<vscale x 2 x i1>,
i32);
i32, i32);
define <vscale x 2 x i1> @intrinsic_vmsgtu_mask_vx_nxv2i32_i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv2i32_i32:
@ -1543,7 +1543,7 @@ entry:
<vscale x 2 x i32> %1,
i32 %2,
<vscale x 2 x i1> %3,
i32 %4)
i32 %4, i32 1)
ret <vscale x 2 x i1> %a
}
@ -1573,7 +1573,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsgtu.mask.nxv4i32.i32(
<vscale x 4 x i32>,
i32,
<vscale x 4 x i1>,
i32);
i32, i32);
define <vscale x 4 x i1> @intrinsic_vmsgtu_mask_vx_nxv4i32_i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv4i32_i32:
@ -1590,7 +1590,7 @@ entry:
<vscale x 4 x i32> %1,
i32 %2,
<vscale x 4 x i1> %3,
i32 %4)
i32 %4, i32 1)
ret <vscale x 4 x i1> %a
}
@ -1620,7 +1620,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsgtu.mask.nxv8i32.i32(
<vscale x 8 x i32>,
i32,
<vscale x 8 x i1>,
i32);
i32, i32);
define <vscale x 8 x i1> @intrinsic_vmsgtu_mask_vx_nxv8i32_i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv8i32_i32:
@ -1637,7 +1637,7 @@ entry:
<vscale x 8 x i32> %1,
i32 %2,
<vscale x 8 x i1> %3,
i32 %4)
i32 %4, i32 1)
ret <vscale x 8 x i1> %a
}
@ -1673,7 +1673,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsgtu.mask.nxv1i64.i64(
<vscale x 1 x i64>,
i64,
<vscale x 1 x i1>,
i32);
i32, i32);
define <vscale x 1 x i1> @intrinsic_vmsgtu_mask_vx_nxv1i64_i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv1i64_i64:
@ -1696,7 +1696,7 @@ entry:
<vscale x 1 x i64> %1,
i64 %2,
<vscale x 1 x i1> %3,
i32 %4)
i32 %4, i32 1)
ret <vscale x 1 x i1> %a
}
@ -1732,7 +1732,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsgtu.mask.nxv2i64.i64(
<vscale x 2 x i64>,
i64,
<vscale x 2 x i1>,
i32);
i32, i32);
define <vscale x 2 x i1> @intrinsic_vmsgtu_mask_vx_nxv2i64_i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv2i64_i64:
@ -1755,7 +1755,7 @@ entry:
<vscale x 2 x i64> %1,
i64 %2,
<vscale x 2 x i1> %3,
i32 %4)
i32 %4, i32 1)
ret <vscale x 2 x i1> %a
}
@ -1791,7 +1791,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsgtu.mask.nxv4i64.i64(
<vscale x 4 x i64>,
i64,
<vscale x 4 x i1>,
i32);
i32, i32);
define <vscale x 4 x i1> @intrinsic_vmsgtu_mask_vx_nxv4i64_i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv4i64_i64:
@ -1814,7 +1814,7 @@ entry:
<vscale x 4 x i64> %1,
i64 %2,
<vscale x 4 x i1> %3,
i32 %4)
i32 %4, i32 1)
ret <vscale x 4 x i1> %a
}
@ -1849,7 +1849,7 @@ entry:
<vscale x 1 x i8> %1,
i8 9,
<vscale x 1 x i1> %2,
i32 %3)
i32 %3, i32 1)
ret <vscale x 1 x i1> %a
}
@ -1884,7 +1884,7 @@ entry:
<vscale x 2 x i8> %1,
i8 9,
<vscale x 2 x i1> %2,
i32 %3)
i32 %3, i32 1)
ret <vscale x 2 x i1> %a
}
@ -1919,7 +1919,7 @@ entry:
<vscale x 4 x i8> %1,
i8 9,
<vscale x 4 x i1> %2,
i32 %3)
i32 %3, i32 1)
ret <vscale x 4 x i1> %a
}
@ -1954,7 +1954,7 @@ entry:
<vscale x 8 x i8> %1,
i8 9,
<vscale x 8 x i1> %2,
i32 %3)
i32 %3, i32 1)
ret <vscale x 8 x i1> %a
}
@ -1989,7 +1989,7 @@ entry:
<vscale x 16 x i8> %1,
i8 9,
<vscale x 16 x i1> %2,
i32 %3)
i32 %3, i32 1)
ret <vscale x 16 x i1> %a
}
@ -2024,7 +2024,7 @@ entry:
<vscale x 32 x i8> %1,
i8 9,
<vscale x 32 x i1> %2,
i32 %3)
i32 %3, i32 1)
ret <vscale x 32 x i1> %a
}
@ -2059,7 +2059,7 @@ entry:
<vscale x 1 x i16> %1,
i16 9,
<vscale x 1 x i1> %2,
i32 %3)
i32 %3, i32 1)
ret <vscale x 1 x i1> %a
}
@ -2094,7 +2094,7 @@ entry:
<vscale x 2 x i16> %1,
i16 9,
<vscale x 2 x i1> %2,
i32 %3)
i32 %3, i32 1)
ret <vscale x 2 x i1> %a
}
@ -2129,7 +2129,7 @@ entry:
<vscale x 4 x i16> %1,
i16 9,
<vscale x 4 x i1> %2,
i32 %3)
i32 %3, i32 1)
ret <vscale x 4 x i1> %a
}
@ -2164,7 +2164,7 @@ entry:
<vscale x 8 x i16> %1,
i16 9,
<vscale x 8 x i1> %2,
i32 %3)
i32 %3, i32 1)
ret <vscale x 8 x i1> %a
}
@ -2199,7 +2199,7 @@ entry:
<vscale x 16 x i16> %1,
i16 9,
<vscale x 16 x i1> %2,
i32 %3)
i32 %3, i32 1)
ret <vscale x 16 x i1> %a
}
@ -2234,7 +2234,7 @@ entry:
<vscale x 1 x i32> %1,
i32 9,
<vscale x 1 x i1> %2,
i32 %3)
i32 %3, i32 1)
ret <vscale x 1 x i1> %a
}
@ -2269,7 +2269,7 @@ entry:
<vscale x 2 x i32> %1,
i32 9,
<vscale x 2 x i1> %2,
i32 %3)
i32 %3, i32 1)
ret <vscale x 2 x i1> %a
}
@ -2304,7 +2304,7 @@ entry:
<vscale x 4 x i32> %1,
i32 9,
<vscale x 4 x i1> %2,
i32 %3)
i32 %3, i32 1)
ret <vscale x 4 x i1> %a
}
@ -2339,7 +2339,7 @@ entry:
<vscale x 8 x i32> %1,
i32 9,
<vscale x 8 x i1> %2,
i32 %3)
i32 %3, i32 1)
ret <vscale x 8 x i1> %a
}
@ -2374,7 +2374,7 @@ entry:
<vscale x 1 x i64> %1,
i64 9,
<vscale x 1 x i1> %2,
i32 %3)
i32 %3, i32 1)
ret <vscale x 1 x i1> %a
}
@ -2409,7 +2409,7 @@ entry:
<vscale x 2 x i64> %1,
i64 9,
<vscale x 2 x i1> %2,
i32 %3)
i32 %3, i32 1)
ret <vscale x 2 x i1> %a
}
@ -2444,7 +2444,7 @@ entry:
<vscale x 4 x i64> %1,
i64 9,
<vscale x 4 x i1> %2,
i32 %3)
i32 %3, i32 1)
ret <vscale x 4 x i1> %a
}

View File

@ -26,7 +26,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsgtu.mask.nxv1i8(
<vscale x 1 x i8>,
<vscale x 1 x i8>,
<vscale x 1 x i1>,
i64);
i64, i64);
define <vscale x 1 x i1> @intrinsic_vmsgtu_mask_vv_nxv1i8_nxv1i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i8> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv1i8_nxv1i8:
@ -48,7 +48,7 @@ entry:
<vscale x 1 x i8> %2,
<vscale x 1 x i8> %3,
<vscale x 1 x i1> %mask,
i64 %4)
i64 %4, i64 1)
ret <vscale x 1 x i1> %a
}
@ -78,7 +78,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsgtu.mask.nxv2i8(
<vscale x 2 x i8>,
<vscale x 2 x i8>,
<vscale x 2 x i1>,
i64);
i64, i64);
define <vscale x 2 x i1> @intrinsic_vmsgtu_mask_vv_nxv2i8_nxv2i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i8> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv2i8_nxv2i8:
@ -100,7 +100,7 @@ entry:
<vscale x 2 x i8> %2,
<vscale x 2 x i8> %3,
<vscale x 2 x i1> %mask,
i64 %4)
i64 %4, i64 1)
ret <vscale x 2 x i1> %a
}
@ -130,7 +130,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsgtu.mask.nxv4i8(
<vscale x 4 x i8>,
<vscale x 4 x i8>,
<vscale x 4 x i1>,
i64);
i64, i64);
define <vscale x 4 x i1> @intrinsic_vmsgtu_mask_vv_nxv4i8_nxv4i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i8> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv4i8_nxv4i8:
@ -152,7 +152,7 @@ entry:
<vscale x 4 x i8> %2,
<vscale x 4 x i8> %3,
<vscale x 4 x i1> %mask,
i64 %4)
i64 %4, i64 1)
ret <vscale x 4 x i1> %a
}
@ -182,7 +182,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsgtu.mask.nxv8i8(
<vscale x 8 x i8>,
<vscale x 8 x i8>,
<vscale x 8 x i1>,
i64);
i64, i64);
define <vscale x 8 x i1> @intrinsic_vmsgtu_mask_vv_nxv8i8_nxv8i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i8> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv8i8_nxv8i8:
@ -204,7 +204,7 @@ entry:
<vscale x 8 x i8> %2,
<vscale x 8 x i8> %3,
<vscale x 8 x i1> %mask,
i64 %4)
i64 %4, i64 1)
ret <vscale x 8 x i1> %a
}
@ -234,7 +234,7 @@ declare <vscale x 16 x i1> @llvm.riscv.vmsgtu.mask.nxv16i8(
<vscale x 16 x i8>,
<vscale x 16 x i8>,
<vscale x 16 x i1>,
i64);
i64, i64);
define <vscale x 16 x i1> @intrinsic_vmsgtu_mask_vv_nxv16i8_nxv16i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i8> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv16i8_nxv16i8:
@ -256,7 +256,7 @@ entry:
<vscale x 16 x i8> %2,
<vscale x 16 x i8> %3,
<vscale x 16 x i1> %mask,
i64 %4)
i64 %4, i64 1)
ret <vscale x 16 x i1> %a
}
@ -286,7 +286,7 @@ declare <vscale x 32 x i1> @llvm.riscv.vmsgtu.mask.nxv32i8(
<vscale x 32 x i8>,
<vscale x 32 x i8>,
<vscale x 32 x i1>,
i64);
i64, i64);
define <vscale x 32 x i1> @intrinsic_vmsgtu_mask_vv_nxv32i8_nxv32i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i8> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv32i8_nxv32i8:
@ -308,7 +308,7 @@ entry:
<vscale x 32 x i8> %2,
<vscale x 32 x i8> %3,
<vscale x 32 x i1> %mask,
i64 %4)
i64 %4, i64 1)
ret <vscale x 32 x i1> %a
}
@ -338,7 +338,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsgtu.mask.nxv1i16(
<vscale x 1 x i16>,
<vscale x 1 x i16>,
<vscale x 1 x i1>,
i64);
i64, i64);
define <vscale x 1 x i1> @intrinsic_vmsgtu_mask_vv_nxv1i16_nxv1i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i16> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv1i16_nxv1i16:
@ -360,7 +360,7 @@ entry:
<vscale x 1 x i16> %2,
<vscale x 1 x i16> %3,
<vscale x 1 x i1> %mask,
i64 %4)
i64 %4, i64 1)
ret <vscale x 1 x i1> %a
}
@ -390,7 +390,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsgtu.mask.nxv2i16(
<vscale x 2 x i16>,
<vscale x 2 x i16>,
<vscale x 2 x i1>,
i64);
i64, i64);
define <vscale x 2 x i1> @intrinsic_vmsgtu_mask_vv_nxv2i16_nxv2i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i16> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv2i16_nxv2i16:
@ -412,7 +412,7 @@ entry:
<vscale x 2 x i16> %2,
<vscale x 2 x i16> %3,
<vscale x 2 x i1> %mask,
i64 %4)
i64 %4, i64 1)
ret <vscale x 2 x i1> %a
}
@ -442,7 +442,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsgtu.mask.nxv4i16(
<vscale x 4 x i16>,
<vscale x 4 x i16>,
<vscale x 4 x i1>,
i64);
i64, i64);
define <vscale x 4 x i1> @intrinsic_vmsgtu_mask_vv_nxv4i16_nxv4i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i16> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv4i16_nxv4i16:
@ -464,7 +464,7 @@ entry:
<vscale x 4 x i16> %2,
<vscale x 4 x i16> %3,
<vscale x 4 x i1> %mask,
i64 %4)
i64 %4, i64 1)
ret <vscale x 4 x i1> %a
}
@ -494,7 +494,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsgtu.mask.nxv8i16(
<vscale x 8 x i16>,
<vscale x 8 x i16>,
<vscale x 8 x i1>,
i64);
i64, i64);
define <vscale x 8 x i1> @intrinsic_vmsgtu_mask_vv_nxv8i16_nxv8i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i16> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv8i16_nxv8i16:
@ -516,7 +516,7 @@ entry:
<vscale x 8 x i16> %2,
<vscale x 8 x i16> %3,
<vscale x 8 x i1> %mask,
i64 %4)
i64 %4, i64 1)
ret <vscale x 8 x i1> %a
}
@ -546,7 +546,7 @@ declare <vscale x 16 x i1> @llvm.riscv.vmsgtu.mask.nxv16i16(
<vscale x 16 x i16>,
<vscale x 16 x i16>,
<vscale x 16 x i1>,
i64);
i64, i64);
define <vscale x 16 x i1> @intrinsic_vmsgtu_mask_vv_nxv16i16_nxv16i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i16> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv16i16_nxv16i16:
@ -568,7 +568,7 @@ entry:
<vscale x 16 x i16> %2,
<vscale x 16 x i16> %3,
<vscale x 16 x i1> %mask,
i64 %4)
i64 %4, i64 1)
ret <vscale x 16 x i1> %a
}
@ -598,7 +598,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsgtu.mask.nxv1i32(
<vscale x 1 x i32>,
<vscale x 1 x i32>,
<vscale x 1 x i1>,
i64);
i64, i64);
define <vscale x 1 x i1> @intrinsic_vmsgtu_mask_vv_nxv1i32_nxv1i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i32> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv1i32_nxv1i32:
@ -620,7 +620,7 @@ entry:
<vscale x 1 x i32> %2,
<vscale x 1 x i32> %3,
<vscale x 1 x i1> %mask,
i64 %4)
i64 %4, i64 1)
ret <vscale x 1 x i1> %a
}
@ -650,7 +650,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsgtu.mask.nxv2i32(
<vscale x 2 x i32>,
<vscale x 2 x i32>,
<vscale x 2 x i1>,
i64);
i64, i64);
define <vscale x 2 x i1> @intrinsic_vmsgtu_mask_vv_nxv2i32_nxv2i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i32> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv2i32_nxv2i32:
@ -672,7 +672,7 @@ entry:
<vscale x 2 x i32> %2,
<vscale x 2 x i32> %3,
<vscale x 2 x i1> %mask,
i64 %4)
i64 %4, i64 1)
ret <vscale x 2 x i1> %a
}
@ -702,7 +702,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsgtu.mask.nxv4i32(
<vscale x 4 x i32>,
<vscale x 4 x i32>,
<vscale x 4 x i1>,
i64);
i64, i64);
define <vscale x 4 x i1> @intrinsic_vmsgtu_mask_vv_nxv4i32_nxv4i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i32> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv4i32_nxv4i32:
@ -724,7 +724,7 @@ entry:
<vscale x 4 x i32> %2,
<vscale x 4 x i32> %3,
<vscale x 4 x i1> %mask,
i64 %4)
i64 %4, i64 1)
ret <vscale x 4 x i1> %a
}
@ -754,7 +754,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsgtu.mask.nxv8i32(
<vscale x 8 x i32>,
<vscale x 8 x i32>,
<vscale x 8 x i1>,
i64);
i64, i64);
define <vscale x 8 x i1> @intrinsic_vmsgtu_mask_vv_nxv8i32_nxv8i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i32> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv8i32_nxv8i32:
@ -776,7 +776,7 @@ entry:
<vscale x 8 x i32> %2,
<vscale x 8 x i32> %3,
<vscale x 8 x i1> %mask,
i64 %4)
i64 %4, i64 1)
ret <vscale x 8 x i1> %a
}
@ -806,7 +806,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsgtu.mask.nxv1i64(
<vscale x 1 x i64>,
<vscale x 1 x i64>,
<vscale x 1 x i1>,
i64);
i64, i64);
define <vscale x 1 x i1> @intrinsic_vmsgtu_mask_vv_nxv1i64_nxv1i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i64> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv1i64_nxv1i64:
@ -828,7 +828,7 @@ entry:
<vscale x 1 x i64> %2,
<vscale x 1 x i64> %3,
<vscale x 1 x i1> %mask,
i64 %4)
i64 %4, i64 1)
ret <vscale x 1 x i1> %a
}
@ -858,7 +858,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsgtu.mask.nxv2i64(
<vscale x 2 x i64>,
<vscale x 2 x i64>,
<vscale x 2 x i1>,
i64);
i64, i64);
define <vscale x 2 x i1> @intrinsic_vmsgtu_mask_vv_nxv2i64_nxv2i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i64> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv2i64_nxv2i64:
@ -880,7 +880,7 @@ entry:
<vscale x 2 x i64> %2,
<vscale x 2 x i64> %3,
<vscale x 2 x i1> %mask,
i64 %4)
i64 %4, i64 1)
ret <vscale x 2 x i1> %a
}
@ -910,7 +910,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsgtu.mask.nxv4i64(
<vscale x 4 x i64>,
<vscale x 4 x i64>,
<vscale x 4 x i1>,
i64);
i64, i64);
define <vscale x 4 x i1> @intrinsic_vmsgtu_mask_vv_nxv4i64_nxv4i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i64> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv4i64_nxv4i64:
@ -932,7 +932,7 @@ entry:
<vscale x 4 x i64> %2,
<vscale x 4 x i64> %3,
<vscale x 4 x i1> %mask,
i64 %4)
i64 %4, i64 1)
ret <vscale x 4 x i1> %a
}
@ -962,7 +962,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsgtu.mask.nxv1i8.i8(
<vscale x 1 x i8>,
i8,
<vscale x 1 x i1>,
i64);
i64, i64);
define <vscale x 1 x i1> @intrinsic_vmsgtu_mask_vx_nxv1i8_i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv1i8_i8:
@ -979,7 +979,7 @@ entry:
<vscale x 1 x i8> %1,
i8 %2,
<vscale x 1 x i1> %3,
i64 %4)
i64 %4, i64 1)
ret <vscale x 1 x i1> %a
}
@ -1009,7 +1009,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsgtu.mask.nxv2i8.i8(
<vscale x 2 x i8>,
i8,
<vscale x 2 x i1>,
i64);
i64, i64);
define <vscale x 2 x i1> @intrinsic_vmsgtu_mask_vx_nxv2i8_i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv2i8_i8:
@ -1026,7 +1026,7 @@ entry:
<vscale x 2 x i8> %1,
i8 %2,
<vscale x 2 x i1> %3,
i64 %4)
i64 %4, i64 1)
ret <vscale x 2 x i1> %a
}
@ -1056,7 +1056,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsgtu.mask.nxv4i8.i8(
<vscale x 4 x i8>,
i8,
<vscale x 4 x i1>,
i64);
i64, i64);
define <vscale x 4 x i1> @intrinsic_vmsgtu_mask_vx_nxv4i8_i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv4i8_i8:
@ -1073,7 +1073,7 @@ entry:
<vscale x 4 x i8> %1,
i8 %2,
<vscale x 4 x i1> %3,
i64 %4)
i64 %4, i64 1)
ret <vscale x 4 x i1> %a
}
@ -1103,7 +1103,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsgtu.mask.nxv8i8.i8(
<vscale x 8 x i8>,
i8,
<vscale x 8 x i1>,
i64);
i64, i64);
define <vscale x 8 x i1> @intrinsic_vmsgtu_mask_vx_nxv8i8_i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv8i8_i8:
@ -1120,7 +1120,7 @@ entry:
<vscale x 8 x i8> %1,
i8 %2,
<vscale x 8 x i1> %3,
i64 %4)
i64 %4, i64 1)
ret <vscale x 8 x i1> %a
}
@ -1150,7 +1150,7 @@ declare <vscale x 16 x i1> @llvm.riscv.vmsgtu.mask.nxv16i8.i8(
<vscale x 16 x i8>,
i8,
<vscale x 16 x i1>,
i64);
i64, i64);
define <vscale x 16 x i1> @intrinsic_vmsgtu_mask_vx_nxv16i8_i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv16i8_i8:
@ -1167,7 +1167,7 @@ entry:
<vscale x 16 x i8> %1,
i8 %2,
<vscale x 16 x i1> %3,
i64 %4)
i64 %4, i64 1)
ret <vscale x 16 x i1> %a
}
@ -1197,7 +1197,7 @@ declare <vscale x 32 x i1> @llvm.riscv.vmsgtu.mask.nxv32i8.i8(
<vscale x 32 x i8>,
i8,
<vscale x 32 x i1>,
i64);
i64, i64);
define <vscale x 32 x i1> @intrinsic_vmsgtu_mask_vx_nxv32i8_i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv32i8_i8:
@ -1214,7 +1214,7 @@ entry:
<vscale x 32 x i8> %1,
i8 %2,
<vscale x 32 x i1> %3,
i64 %4)
i64 %4, i64 1)
ret <vscale x 32 x i1> %a
}
@ -1244,7 +1244,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsgtu.mask.nxv1i16.i16(
<vscale x 1 x i16>,
i16,
<vscale x 1 x i1>,
i64);
i64, i64);
define <vscale x 1 x i1> @intrinsic_vmsgtu_mask_vx_nxv1i16_i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv1i16_i16:
@ -1261,7 +1261,7 @@ entry:
<vscale x 1 x i16> %1,
i16 %2,
<vscale x 1 x i1> %3,
i64 %4)
i64 %4, i64 1)
ret <vscale x 1 x i1> %a
}
@ -1291,7 +1291,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsgtu.mask.nxv2i16.i16(
<vscale x 2 x i16>,
i16,
<vscale x 2 x i1>,
i64);
i64, i64);
define <vscale x 2 x i1> @intrinsic_vmsgtu_mask_vx_nxv2i16_i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv2i16_i16:
@ -1308,7 +1308,7 @@ entry:
<vscale x 2 x i16> %1,
i16 %2,
<vscale x 2 x i1> %3,
i64 %4)
i64 %4, i64 1)
ret <vscale x 2 x i1> %a
}
@ -1338,7 +1338,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsgtu.mask.nxv4i16.i16(
<vscale x 4 x i16>,
i16,
<vscale x 4 x i1>,
i64);
i64, i64);
define <vscale x 4 x i1> @intrinsic_vmsgtu_mask_vx_nxv4i16_i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv4i16_i16:
@ -1355,7 +1355,7 @@ entry:
<vscale x 4 x i16> %1,
i16 %2,
<vscale x 4 x i1> %3,
i64 %4)
i64 %4, i64 1)
ret <vscale x 4 x i1> %a
}
@ -1385,7 +1385,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsgtu.mask.nxv8i16.i16(
<vscale x 8 x i16>,
i16,
<vscale x 8 x i1>,
i64);
i64, i64);
define <vscale x 8 x i1> @intrinsic_vmsgtu_mask_vx_nxv8i16_i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv8i16_i16:
@ -1402,7 +1402,7 @@ entry:
<vscale x 8 x i16> %1,
i16 %2,
<vscale x 8 x i1> %3,
i64 %4)
i64 %4, i64 1)
ret <vscale x 8 x i1> %a
}
@ -1432,7 +1432,7 @@ declare <vscale x 16 x i1> @llvm.riscv.vmsgtu.mask.nxv16i16.i16(
<vscale x 16 x i16>,
i16,
<vscale x 16 x i1>,
i64);
i64, i64);
define <vscale x 16 x i1> @intrinsic_vmsgtu_mask_vx_nxv16i16_i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv16i16_i16:
@ -1449,7 +1449,7 @@ entry:
<vscale x 16 x i16> %1,
i16 %2,
<vscale x 16 x i1> %3,
i64 %4)
i64 %4, i64 1)
ret <vscale x 16 x i1> %a
}
@ -1479,7 +1479,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsgtu.mask.nxv1i32.i32(
<vscale x 1 x i32>,
i32,
<vscale x 1 x i1>,
i64);
i64, i64);
define <vscale x 1 x i1> @intrinsic_vmsgtu_mask_vx_nxv1i32_i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv1i32_i32:
@ -1496,7 +1496,7 @@ entry:
<vscale x 1 x i32> %1,
i32 %2,
<vscale x 1 x i1> %3,
i64 %4)
i64 %4, i64 1)
ret <vscale x 1 x i1> %a
}
@ -1526,7 +1526,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsgtu.mask.nxv2i32.i32(
<vscale x 2 x i32>,
i32,
<vscale x 2 x i1>,
i64);
i64, i64);
define <vscale x 2 x i1> @intrinsic_vmsgtu_mask_vx_nxv2i32_i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv2i32_i32:
@ -1543,7 +1543,7 @@ entry:
<vscale x 2 x i32> %1,
i32 %2,
<vscale x 2 x i1> %3,
i64 %4)
i64 %4, i64 1)
ret <vscale x 2 x i1> %a
}
@ -1573,7 +1573,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsgtu.mask.nxv4i32.i32(
<vscale x 4 x i32>,
i32,
<vscale x 4 x i1>,
i64);
i64, i64);
define <vscale x 4 x i1> @intrinsic_vmsgtu_mask_vx_nxv4i32_i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv4i32_i32:
@ -1590,7 +1590,7 @@ entry:
<vscale x 4 x i32> %1,
i32 %2,
<vscale x 4 x i1> %3,
i64 %4)
i64 %4, i64 1)
ret <vscale x 4 x i1> %a
}
@ -1620,7 +1620,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsgtu.mask.nxv8i32.i32(
<vscale x 8 x i32>,
i32,
<vscale x 8 x i1>,
i64);
i64, i64);
define <vscale x 8 x i1> @intrinsic_vmsgtu_mask_vx_nxv8i32_i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv8i32_i32:
@ -1637,7 +1637,7 @@ entry:
<vscale x 8 x i32> %1,
i32 %2,
<vscale x 8 x i1> %3,
i64 %4)
i64 %4, i64 1)
ret <vscale x 8 x i1> %a
}
@ -1667,7 +1667,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsgtu.mask.nxv1i64.i64(
<vscale x 1 x i64>,
i64,
<vscale x 1 x i1>,
i64);
i64, i64);
define <vscale x 1 x i1> @intrinsic_vmsgtu_mask_vx_nxv1i64_i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv1i64_i64:
@ -1684,7 +1684,7 @@ entry:
<vscale x 1 x i64> %1,
i64 %2,
<vscale x 1 x i1> %3,
i64 %4)
i64 %4, i64 1)
ret <vscale x 1 x i1> %a
}
@ -1714,7 +1714,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsgtu.mask.nxv2i64.i64(
<vscale x 2 x i64>,
i64,
<vscale x 2 x i1>,
i64);
i64, i64);
define <vscale x 2 x i1> @intrinsic_vmsgtu_mask_vx_nxv2i64_i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv2i64_i64:
@ -1731,7 +1731,7 @@ entry:
<vscale x 2 x i64> %1,
i64 %2,
<vscale x 2 x i1> %3,
i64 %4)
i64 %4, i64 1)
ret <vscale x 2 x i1> %a
}
@ -1761,7 +1761,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsgtu.mask.nxv4i64.i64(
<vscale x 4 x i64>,
i64,
<vscale x 4 x i1>,
i64);
i64, i64);
define <vscale x 4 x i1> @intrinsic_vmsgtu_mask_vx_nxv4i64_i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv4i64_i64:
@ -1778,7 +1778,7 @@ entry:
<vscale x 4 x i64> %1,
i64 %2,
<vscale x 4 x i1> %3,
i64 %4)
i64 %4, i64 1)
ret <vscale x 4 x i1> %a
}
@ -1813,7 +1813,7 @@ entry:
<vscale x 1 x i8> %1,
i8 9,
<vscale x 1 x i1> %2,
i64 %3)
i64 %3, i64 1)
ret <vscale x 1 x i1> %a
}
@ -1848,7 +1848,7 @@ entry:
<vscale x 2 x i8> %1,
i8 9,
<vscale x 2 x i1> %2,
i64 %3)
i64 %3, i64 1)
ret <vscale x 2 x i1> %a
}
@ -1883,7 +1883,7 @@ entry:
<vscale x 4 x i8> %1,
i8 9,
<vscale x 4 x i1> %2,
i64 %3)
i64 %3, i64 1)
ret <vscale x 4 x i1> %a
}
@ -1918,7 +1918,7 @@ entry:
<vscale x 8 x i8> %1,
i8 9,
<vscale x 8 x i1> %2,
i64 %3)
i64 %3, i64 1)
ret <vscale x 8 x i1> %a
}
@ -1953,7 +1953,7 @@ entry:
<vscale x 16 x i8> %1,
i8 9,
<vscale x 16 x i1> %2,
i64 %3)
i64 %3, i64 1)
ret <vscale x 16 x i1> %a
}
@ -1988,7 +1988,7 @@ entry:
<vscale x 32 x i8> %1,
i8 9,
<vscale x 32 x i1> %2,
i64 %3)
i64 %3, i64 1)
ret <vscale x 32 x i1> %a
}
@ -2023,7 +2023,7 @@ entry:
<vscale x 1 x i16> %1,
i16 9,
<vscale x 1 x i1> %2,
i64 %3)
i64 %3, i64 1)
ret <vscale x 1 x i1> %a
}
@ -2058,7 +2058,7 @@ entry:
<vscale x 2 x i16> %1,
i16 9,
<vscale x 2 x i1> %2,
i64 %3)
i64 %3, i64 1)
ret <vscale x 2 x i1> %a
}
@ -2093,7 +2093,7 @@ entry:
<vscale x 4 x i16> %1,
i16 9,
<vscale x 4 x i1> %2,
i64 %3)
i64 %3, i64 1)
ret <vscale x 4 x i1> %a
}
@ -2128,7 +2128,7 @@ entry:
<vscale x 8 x i16> %1,
i16 9,
<vscale x 8 x i1> %2,
i64 %3)
i64 %3, i64 1)
ret <vscale x 8 x i1> %a
}
@ -2163,7 +2163,7 @@ entry:
<vscale x 16 x i16> %1,
i16 9,
<vscale x 16 x i1> %2,
i64 %3)
i64 %3, i64 1)
ret <vscale x 16 x i1> %a
}
@ -2198,7 +2198,7 @@ entry:
<vscale x 1 x i32> %1,
i32 9,
<vscale x 1 x i1> %2,
i64 %3)
i64 %3, i64 1)
ret <vscale x 1 x i1> %a
}
@ -2233,7 +2233,7 @@ entry:
<vscale x 2 x i32> %1,
i32 9,
<vscale x 2 x i1> %2,
i64 %3)
i64 %3, i64 1)
ret <vscale x 2 x i1> %a
}
@ -2268,7 +2268,7 @@ entry:
<vscale x 4 x i32> %1,
i32 9,
<vscale x 4 x i1> %2,
i64 %3)
i64 %3, i64 1)
ret <vscale x 4 x i1> %a
}
@ -2303,7 +2303,7 @@ entry:
<vscale x 8 x i32> %1,
i32 9,
<vscale x 8 x i1> %2,
i64 %3)
i64 %3, i64 1)
ret <vscale x 8 x i1> %a
}
@ -2338,7 +2338,7 @@ entry:
<vscale x 1 x i64> %1,
i64 9,
<vscale x 1 x i1> %2,
i64 %3)
i64 %3, i64 1)
ret <vscale x 1 x i1> %a
}
@ -2373,7 +2373,7 @@ entry:
<vscale x 2 x i64> %1,
i64 9,
<vscale x 2 x i1> %2,
i64 %3)
i64 %3, i64 1)
ret <vscale x 2 x i1> %a
}
@ -2408,7 +2408,7 @@ entry:
<vscale x 4 x i64> %1,
i64 9,
<vscale x 4 x i1> %2,
i64 %3)
i64 %3, i64 1)
ret <vscale x 4 x i1> %a
}

View File

@ -25,13 +25,13 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsif.mask.nxv1i1(
<vscale x 1 x i1>,
<vscale x 1 x i1>,
<vscale x 1 x i1>,
iXLen);
iXLen, iXLen);
define <vscale x 1 x i1> @intrinsic_vmsif_mask_m_nxv1i1_nxv1i1(<vscale x 1 x i1> %0, <vscale x 1 x i1> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsif_mask_m_nxv1i1_nxv1i1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vmsif.m v10, v8, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
@ -41,7 +41,7 @@ entry:
<vscale x 1 x i1> %0,
<vscale x 1 x i1> %1,
<vscale x 1 x i1> %2,
iXLen %3)
iXLen %3, iXLen 1)
ret <vscale x 1 x i1> %a
}
@ -67,13 +67,13 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsif.mask.nxv2i1(
<vscale x 2 x i1>,
<vscale x 2 x i1>,
<vscale x 2 x i1>,
iXLen);
iXLen, iXLen);
define <vscale x 2 x i1> @intrinsic_vmsif_mask_m_nxv2i1_nxv2i1(<vscale x 2 x i1> %0, <vscale x 2 x i1> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsif_mask_m_nxv2i1_nxv2i1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, mu
; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vmsif.m v10, v8, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
@ -83,7 +83,7 @@ entry:
<vscale x 2 x i1> %0,
<vscale x 2 x i1> %1,
<vscale x 2 x i1> %2,
iXLen %3)
iXLen %3, iXLen 1)
ret <vscale x 2 x i1> %a
}
@ -109,13 +109,13 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsif.mask.nxv4i1(
<vscale x 4 x i1>,
<vscale x 4 x i1>,
<vscale x 4 x i1>,
iXLen);
iXLen, iXLen);
define <vscale x 4 x i1> @intrinsic_vmsif_mask_m_nxv4i1_nxv4i1(<vscale x 4 x i1> %0, <vscale x 4 x i1> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsif_mask_m_nxv4i1_nxv4i1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu
; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vmsif.m v10, v8, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
@ -125,7 +125,7 @@ entry:
<vscale x 4 x i1> %0,
<vscale x 4 x i1> %1,
<vscale x 4 x i1> %2,
iXLen %3)
iXLen %3, iXLen 1)
ret <vscale x 4 x i1> %a
}
@ -151,23 +151,23 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsif.mask.nxv8i1(
<vscale x 8 x i1>,
<vscale x 8 x i1>,
<vscale x 8 x i1>,
iXLen);
iXLen, iXLen);
define <vscale x 8 x i1> @intrinsic_vmsif_mask_m_nxv8i1_nxv8i1(<vscale x 8 x i1> %0, <vscale x 8 x i1> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsif_mask_m_nxv8i1_nxv8i1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu
; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vmsif.m v10, v8, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vmv.v.v v0, v10
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i1> @llvm.riscv.vmsif.mask.nxv8i1(
<vscale x 8 x i1> %0,
<vscale x 8 x i1> %1,
<vscale x 8 x i1> %2,
iXLen %3)
iXLen %3, iXLen 1)
ret <vscale x 8 x i1> %a
}
@ -193,13 +193,13 @@ declare <vscale x 16 x i1> @llvm.riscv.vmsif.mask.nxv16i1(
<vscale x 16 x i1>,
<vscale x 16 x i1>,
<vscale x 16 x i1>,
iXLen);
iXLen, iXLen);
define <vscale x 16 x i1> @intrinsic_vmsif_mask_m_nxv16i1_nxv16i1(<vscale x 16 x i1> %0, <vscale x 16 x i1> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsif_mask_m_nxv16i1_nxv16i1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, mu
; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vmsif.m v10, v8, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
@ -209,7 +209,7 @@ entry:
<vscale x 16 x i1> %0,
<vscale x 16 x i1> %1,
<vscale x 16 x i1> %2,
iXLen %3)
iXLen %3, iXLen 1)
ret <vscale x 16 x i1> %a
}
@ -235,13 +235,13 @@ declare <vscale x 32 x i1> @llvm.riscv.vmsif.mask.nxv32i1(
<vscale x 32 x i1>,
<vscale x 32 x i1>,
<vscale x 32 x i1>,
iXLen);
iXLen, iXLen);
define <vscale x 32 x i1> @intrinsic_vmsif_mask_m_nxv32i1_nxv32i1(<vscale x 32 x i1> %0, <vscale x 32 x i1> %1, <vscale x 32 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsif_mask_m_nxv32i1_nxv32i1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, mu
; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vmsif.m v10, v8, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
@ -251,7 +251,7 @@ entry:
<vscale x 32 x i1> %0,
<vscale x 32 x i1> %1,
<vscale x 32 x i1> %2,
iXLen %3)
iXLen %3, iXLen 1)
ret <vscale x 32 x i1> %a
}
@ -277,13 +277,13 @@ declare <vscale x 64 x i1> @llvm.riscv.vmsif.mask.nxv64i1(
<vscale x 64 x i1>,
<vscale x 64 x i1>,
<vscale x 64 x i1>,
iXLen);
iXLen, iXLen);
define <vscale x 64 x i1> @intrinsic_vmsif_mask_m_nxv64i1_nxv64i1(<vscale x 64 x i1> %0, <vscale x 64 x i1> %1, <vscale x 64 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsif_mask_m_nxv64i1_nxv64i1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vsetvli zero, a0, e8, m8, tu, mu
; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vmsif.m v10, v8, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
@ -293,6 +293,6 @@ entry:
<vscale x 64 x i1> %0,
<vscale x 64 x i1> %1,
<vscale x 64 x i1> %2,
iXLen %3)
iXLen %3, iXLen 1)
ret <vscale x 64 x i1> %a
}

View File

@ -26,7 +26,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i8(
<vscale x 1 x i8>,
<vscale x 1 x i8>,
<vscale x 1 x i1>,
i32);
i32, i32);
define <vscale x 1 x i1> @intrinsic_vmsle_mask_vv_nxv1i8_nxv1i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i8> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv1i8_nxv1i8:
@ -48,7 +48,7 @@ entry:
<vscale x 1 x i8> %2,
<vscale x 1 x i8> %3,
<vscale x 1 x i1> %mask,
i32 %4)
i32 %4, i32 1)
ret <vscale x 1 x i1> %a
}
@ -78,7 +78,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsle.mask.nxv2i8(
<vscale x 2 x i8>,
<vscale x 2 x i8>,
<vscale x 2 x i1>,
i32);
i32, i32);
define <vscale x 2 x i1> @intrinsic_vmsle_mask_vv_nxv2i8_nxv2i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i8> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv2i8_nxv2i8:
@ -100,7 +100,7 @@ entry:
<vscale x 2 x i8> %2,
<vscale x 2 x i8> %3,
<vscale x 2 x i1> %mask,
i32 %4)
i32 %4, i32 1)
ret <vscale x 2 x i1> %a
}
@ -130,7 +130,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsle.mask.nxv4i8(
<vscale x 4 x i8>,
<vscale x 4 x i8>,
<vscale x 4 x i1>,
i32);
i32, i32);
define <vscale x 4 x i1> @intrinsic_vmsle_mask_vv_nxv4i8_nxv4i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i8> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv4i8_nxv4i8:
@ -152,7 +152,7 @@ entry:
<vscale x 4 x i8> %2,
<vscale x 4 x i8> %3,
<vscale x 4 x i1> %mask,
i32 %4)
i32 %4, i32 1)
ret <vscale x 4 x i1> %a
}
@ -182,7 +182,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsle.mask.nxv8i8(
<vscale x 8 x i8>,
<vscale x 8 x i8>,
<vscale x 8 x i1>,
i32);
i32, i32);
define <vscale x 8 x i1> @intrinsic_vmsle_mask_vv_nxv8i8_nxv8i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i8> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv8i8_nxv8i8:
@ -204,7 +204,7 @@ entry:
<vscale x 8 x i8> %2,
<vscale x 8 x i8> %3,
<vscale x 8 x i1> %mask,
i32 %4)
i32 %4, i32 1)
ret <vscale x 8 x i1> %a
}
@ -234,7 +234,7 @@ declare <vscale x 16 x i1> @llvm.riscv.vmsle.mask.nxv16i8(
<vscale x 16 x i8>,
<vscale x 16 x i8>,
<vscale x 16 x i1>,
i32);
i32, i32);
define <vscale x 16 x i1> @intrinsic_vmsle_mask_vv_nxv16i8_nxv16i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i8> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv16i8_nxv16i8:
@ -256,7 +256,7 @@ entry:
<vscale x 16 x i8> %2,
<vscale x 16 x i8> %3,
<vscale x 16 x i1> %mask,
i32 %4)
i32 %4, i32 1)
ret <vscale x 16 x i1> %a
}
@ -286,7 +286,7 @@ declare <vscale x 32 x i1> @llvm.riscv.vmsle.mask.nxv32i8(
<vscale x 32 x i8>,
<vscale x 32 x i8>,
<vscale x 32 x i1>,
i32);
i32, i32);
define <vscale x 32 x i1> @intrinsic_vmsle_mask_vv_nxv32i8_nxv32i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i8> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv32i8_nxv32i8:
@ -308,7 +308,7 @@ entry:
<vscale x 32 x i8> %2,
<vscale x 32 x i8> %3,
<vscale x 32 x i1> %mask,
i32 %4)
i32 %4, i32 1)
ret <vscale x 32 x i1> %a
}
@ -338,7 +338,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i16(
<vscale x 1 x i16>,
<vscale x 1 x i16>,
<vscale x 1 x i1>,
i32);
i32, i32);
define <vscale x 1 x i1> @intrinsic_vmsle_mask_vv_nxv1i16_nxv1i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i16> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv1i16_nxv1i16:
@ -360,7 +360,7 @@ entry:
<vscale x 1 x i16> %2,
<vscale x 1 x i16> %3,
<vscale x 1 x i1> %mask,
i32 %4)
i32 %4, i32 1)
ret <vscale x 1 x i1> %a
}
@ -390,7 +390,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsle.mask.nxv2i16(
<vscale x 2 x i16>,
<vscale x 2 x i16>,
<vscale x 2 x i1>,
i32);
i32, i32);
define <vscale x 2 x i1> @intrinsic_vmsle_mask_vv_nxv2i16_nxv2i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i16> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv2i16_nxv2i16:
@ -412,7 +412,7 @@ entry:
<vscale x 2 x i16> %2,
<vscale x 2 x i16> %3,
<vscale x 2 x i1> %mask,
i32 %4)
i32 %4, i32 1)
ret <vscale x 2 x i1> %a
}
@ -442,7 +442,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsle.mask.nxv4i16(
<vscale x 4 x i16>,
<vscale x 4 x i16>,
<vscale x 4 x i1>,
i32);
i32, i32);
define <vscale x 4 x i1> @intrinsic_vmsle_mask_vv_nxv4i16_nxv4i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i16> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv4i16_nxv4i16:
@ -464,7 +464,7 @@ entry:
<vscale x 4 x i16> %2,
<vscale x 4 x i16> %3,
<vscale x 4 x i1> %mask,
i32 %4)
i32 %4, i32 1)
ret <vscale x 4 x i1> %a
}
@ -494,7 +494,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsle.mask.nxv8i16(
<vscale x 8 x i16>,
<vscale x 8 x i16>,
<vscale x 8 x i1>,
i32);
i32, i32);
define <vscale x 8 x i1> @intrinsic_vmsle_mask_vv_nxv8i16_nxv8i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i16> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv8i16_nxv8i16:
@ -516,7 +516,7 @@ entry:
<vscale x 8 x i16> %2,
<vscale x 8 x i16> %3,
<vscale x 8 x i1> %mask,
i32 %4)
i32 %4, i32 1)
ret <vscale x 8 x i1> %a
}
@ -546,7 +546,7 @@ declare <vscale x 16 x i1> @llvm.riscv.vmsle.mask.nxv16i16(
<vscale x 16 x i16>,
<vscale x 16 x i16>,
<vscale x 16 x i1>,
i32);
i32, i32);
define <vscale x 16 x i1> @intrinsic_vmsle_mask_vv_nxv16i16_nxv16i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i16> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv16i16_nxv16i16:
@ -568,7 +568,7 @@ entry:
<vscale x 16 x i16> %2,
<vscale x 16 x i16> %3,
<vscale x 16 x i1> %mask,
i32 %4)
i32 %4, i32 1)
ret <vscale x 16 x i1> %a
}
@ -598,7 +598,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i32(
<vscale x 1 x i32>,
<vscale x 1 x i32>,
<vscale x 1 x i1>,
i32);
i32, i32);
define <vscale x 1 x i1> @intrinsic_vmsle_mask_vv_nxv1i32_nxv1i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i32> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv1i32_nxv1i32:
@ -620,7 +620,7 @@ entry:
<vscale x 1 x i32> %2,
<vscale x 1 x i32> %3,
<vscale x 1 x i1> %mask,
i32 %4)
i32 %4, i32 1)
ret <vscale x 1 x i1> %a
}
@ -650,7 +650,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsle.mask.nxv2i32(
<vscale x 2 x i32>,
<vscale x 2 x i32>,
<vscale x 2 x i1>,
i32);
i32, i32);
define <vscale x 2 x i1> @intrinsic_vmsle_mask_vv_nxv2i32_nxv2i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i32> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv2i32_nxv2i32:
@ -672,7 +672,7 @@ entry:
<vscale x 2 x i32> %2,
<vscale x 2 x i32> %3,
<vscale x 2 x i1> %mask,
i32 %4)
i32 %4, i32 1)
ret <vscale x 2 x i1> %a
}
@ -702,7 +702,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsle.mask.nxv4i32(
<vscale x 4 x i32>,
<vscale x 4 x i32>,
<vscale x 4 x i1>,
i32);
i32, i32);
define <vscale x 4 x i1> @intrinsic_vmsle_mask_vv_nxv4i32_nxv4i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i32> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv4i32_nxv4i32:
@ -724,7 +724,7 @@ entry:
<vscale x 4 x i32> %2,
<vscale x 4 x i32> %3,
<vscale x 4 x i1> %mask,
i32 %4)
i32 %4, i32 1)
ret <vscale x 4 x i1> %a
}
@ -754,7 +754,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsle.mask.nxv8i32(
<vscale x 8 x i32>,
<vscale x 8 x i32>,
<vscale x 8 x i1>,
i32);
i32, i32);
define <vscale x 8 x i1> @intrinsic_vmsle_mask_vv_nxv8i32_nxv8i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i32> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv8i32_nxv8i32:
@ -776,7 +776,7 @@ entry:
<vscale x 8 x i32> %2,
<vscale x 8 x i32> %3,
<vscale x 8 x i1> %mask,
i32 %4)
i32 %4, i32 1)
ret <vscale x 8 x i1> %a
}
@ -806,7 +806,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i64(
<vscale x 1 x i64>,
<vscale x 1 x i64>,
<vscale x 1 x i1>,
i32);
i32, i32);
define <vscale x 1 x i1> @intrinsic_vmsle_mask_vv_nxv1i64_nxv1i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i64> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv1i64_nxv1i64:
@ -828,7 +828,7 @@ entry:
<vscale x 1 x i64> %2,
<vscale x 1 x i64> %3,
<vscale x 1 x i1> %mask,
i32 %4)
i32 %4, i32 1)
ret <vscale x 1 x i1> %a
}
@ -858,7 +858,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsle.mask.nxv2i64(
<vscale x 2 x i64>,
<vscale x 2 x i64>,
<vscale x 2 x i1>,
i32);
i32, i32);
define <vscale x 2 x i1> @intrinsic_vmsle_mask_vv_nxv2i64_nxv2i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i64> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv2i64_nxv2i64:
@ -880,7 +880,7 @@ entry:
<vscale x 2 x i64> %2,
<vscale x 2 x i64> %3,
<vscale x 2 x i1> %mask,
i32 %4)
i32 %4, i32 1)
ret <vscale x 2 x i1> %a
}
@ -910,7 +910,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsle.mask.nxv4i64(
<vscale x 4 x i64>,
<vscale x 4 x i64>,
<vscale x 4 x i1>,
i32);
i32, i32);
define <vscale x 4 x i1> @intrinsic_vmsle_mask_vv_nxv4i64_nxv4i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i64> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv4i64_nxv4i64:
@ -932,7 +932,7 @@ entry:
<vscale x 4 x i64> %2,
<vscale x 4 x i64> %3,
<vscale x 4 x i1> %mask,
i32 %4)
i32 %4, i32 1)
ret <vscale x 4 x i1> %a
}
@ -962,7 +962,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i8.i8(
<vscale x 1 x i8>,
i8,
<vscale x 1 x i1>,
i32);
i32, i32);
define <vscale x 1 x i1> @intrinsic_vmsle_mask_vx_nxv1i8_i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv1i8_i8:
@ -979,7 +979,7 @@ entry:
<vscale x 1 x i8> %1,
i8 %2,
<vscale x 1 x i1> %3,
i32 %4)
i32 %4, i32 1)
ret <vscale x 1 x i1> %a
}
@ -1009,7 +1009,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsle.mask.nxv2i8.i8(
<vscale x 2 x i8>,
i8,
<vscale x 2 x i1>,
i32);
i32, i32);
define <vscale x 2 x i1> @intrinsic_vmsle_mask_vx_nxv2i8_i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv2i8_i8:
@ -1026,7 +1026,7 @@ entry:
<vscale x 2 x i8> %1,
i8 %2,
<vscale x 2 x i1> %3,
i32 %4)
i32 %4, i32 1)
ret <vscale x 2 x i1> %a
}
@ -1056,7 +1056,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsle.mask.nxv4i8.i8(
<vscale x 4 x i8>,
i8,
<vscale x 4 x i1>,
i32);
i32, i32);
define <vscale x 4 x i1> @intrinsic_vmsle_mask_vx_nxv4i8_i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv4i8_i8:
@ -1073,7 +1073,7 @@ entry:
<vscale x 4 x i8> %1,
i8 %2,
<vscale x 4 x i1> %3,
i32 %4)
i32 %4, i32 1)
ret <vscale x 4 x i1> %a
}
@ -1103,7 +1103,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsle.mask.nxv8i8.i8(
<vscale x 8 x i8>,
i8,
<vscale x 8 x i1>,
i32);
i32, i32);
define <vscale x 8 x i1> @intrinsic_vmsle_mask_vx_nxv8i8_i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv8i8_i8:
@ -1120,7 +1120,7 @@ entry:
<vscale x 8 x i8> %1,
i8 %2,
<vscale x 8 x i1> %3,
i32 %4)
i32 %4, i32 1)
ret <vscale x 8 x i1> %a
}
@ -1150,7 +1150,7 @@ declare <vscale x 16 x i1> @llvm.riscv.vmsle.mask.nxv16i8.i8(
<vscale x 16 x i8>,
i8,
<vscale x 16 x i1>,
i32);
i32, i32);
define <vscale x 16 x i1> @intrinsic_vmsle_mask_vx_nxv16i8_i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv16i8_i8:
@ -1167,7 +1167,7 @@ entry:
<vscale x 16 x i8> %1,
i8 %2,
<vscale x 16 x i1> %3,
i32 %4)
i32 %4, i32 1)
ret <vscale x 16 x i1> %a
}
@ -1197,7 +1197,7 @@ declare <vscale x 32 x i1> @llvm.riscv.vmsle.mask.nxv32i8.i8(
<vscale x 32 x i8>,
i8,
<vscale x 32 x i1>,
i32);
i32, i32);
define <vscale x 32 x i1> @intrinsic_vmsle_mask_vx_nxv32i8_i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv32i8_i8:
@ -1214,7 +1214,7 @@ entry:
<vscale x 32 x i8> %1,
i8 %2,
<vscale x 32 x i1> %3,
i32 %4)
i32 %4, i32 1)
ret <vscale x 32 x i1> %a
}
@ -1244,7 +1244,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i16.i16(
<vscale x 1 x i16>,
i16,
<vscale x 1 x i1>,
i32);
i32, i32);
define <vscale x 1 x i1> @intrinsic_vmsle_mask_vx_nxv1i16_i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv1i16_i16:
@ -1261,7 +1261,7 @@ entry:
<vscale x 1 x i16> %1,
i16 %2,
<vscale x 1 x i1> %3,
i32 %4)
i32 %4, i32 1)
ret <vscale x 1 x i1> %a
}
@ -1291,7 +1291,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsle.mask.nxv2i16.i16(
<vscale x 2 x i16>,
i16,
<vscale x 2 x i1>,
i32);
i32, i32);
define <vscale x 2 x i1> @intrinsic_vmsle_mask_vx_nxv2i16_i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv2i16_i16:
@ -1308,7 +1308,7 @@ entry:
<vscale x 2 x i16> %1,
i16 %2,
<vscale x 2 x i1> %3,
i32 %4)
i32 %4, i32 1)
ret <vscale x 2 x i1> %a
}
@ -1338,7 +1338,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsle.mask.nxv4i16.i16(
<vscale x 4 x i16>,
i16,
<vscale x 4 x i1>,
i32);
i32, i32);
define <vscale x 4 x i1> @intrinsic_vmsle_mask_vx_nxv4i16_i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv4i16_i16:
@ -1355,7 +1355,7 @@ entry:
<vscale x 4 x i16> %1,
i16 %2,
<vscale x 4 x i1> %3,
i32 %4)
i32 %4, i32 1)
ret <vscale x 4 x i1> %a
}
@ -1385,7 +1385,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsle.mask.nxv8i16.i16(
<vscale x 8 x i16>,
i16,
<vscale x 8 x i1>,
i32);
i32, i32);
define <vscale x 8 x i1> @intrinsic_vmsle_mask_vx_nxv8i16_i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv8i16_i16:
@ -1402,7 +1402,7 @@ entry:
<vscale x 8 x i16> %1,
i16 %2,
<vscale x 8 x i1> %3,
i32 %4)
i32 %4, i32 1)
ret <vscale x 8 x i1> %a
}
@ -1432,7 +1432,7 @@ declare <vscale x 16 x i1> @llvm.riscv.vmsle.mask.nxv16i16.i16(
<vscale x 16 x i16>,
i16,
<vscale x 16 x i1>,
i32);
i32, i32);
define <vscale x 16 x i1> @intrinsic_vmsle_mask_vx_nxv16i16_i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv16i16_i16:
@ -1449,7 +1449,7 @@ entry:
<vscale x 16 x i16> %1,
i16 %2,
<vscale x 16 x i1> %3,
i32 %4)
i32 %4, i32 1)
ret <vscale x 16 x i1> %a
}
@ -1479,7 +1479,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i32.i32(
<vscale x 1 x i32>,
i32,
<vscale x 1 x i1>,
i32);
i32, i32);
define <vscale x 1 x i1> @intrinsic_vmsle_mask_vx_nxv1i32_i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv1i32_i32:
@ -1496,7 +1496,7 @@ entry:
<vscale x 1 x i32> %1,
i32 %2,
<vscale x 1 x i1> %3,
i32 %4)
i32 %4, i32 1)
ret <vscale x 1 x i1> %a
}
@ -1526,7 +1526,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsle.mask.nxv2i32.i32(
<vscale x 2 x i32>,
i32,
<vscale x 2 x i1>,
i32);
i32, i32);
define <vscale x 2 x i1> @intrinsic_vmsle_mask_vx_nxv2i32_i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv2i32_i32:
@ -1543,7 +1543,7 @@ entry:
<vscale x 2 x i32> %1,
i32 %2,
<vscale x 2 x i1> %3,
i32 %4)
i32 %4, i32 1)
ret <vscale x 2 x i1> %a
}
@ -1573,7 +1573,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsle.mask.nxv4i32.i32(
<vscale x 4 x i32>,
i32,
<vscale x 4 x i1>,
i32);
i32, i32);
define <vscale x 4 x i1> @intrinsic_vmsle_mask_vx_nxv4i32_i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv4i32_i32:
@ -1590,7 +1590,7 @@ entry:
<vscale x 4 x i32> %1,
i32 %2,
<vscale x 4 x i1> %3,
i32 %4)
i32 %4, i32 1)
ret <vscale x 4 x i1> %a
}
@ -1620,7 +1620,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsle.mask.nxv8i32.i32(
<vscale x 8 x i32>,
i32,
<vscale x 8 x i1>,
i32);
i32, i32);
define <vscale x 8 x i1> @intrinsic_vmsle_mask_vx_nxv8i32_i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv8i32_i32:
@ -1637,7 +1637,7 @@ entry:
<vscale x 8 x i32> %1,
i32 %2,
<vscale x 8 x i1> %3,
i32 %4)
i32 %4, i32 1)
ret <vscale x 8 x i1> %a
}
@ -1673,7 +1673,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i64.i64(
<vscale x 1 x i64>,
i64,
<vscale x 1 x i1>,
i32);
i32, i32);
define <vscale x 1 x i1> @intrinsic_vmsle_mask_vx_nxv1i64_i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv1i64_i64:
@ -1696,7 +1696,7 @@ entry:
<vscale x 1 x i64> %1,
i64 %2,
<vscale x 1 x i1> %3,
i32 %4)
i32 %4, i32 1)
ret <vscale x 1 x i1> %a
}
@ -1732,7 +1732,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsle.mask.nxv2i64.i64(
<vscale x 2 x i64>,
i64,
<vscale x 2 x i1>,
i32);
i32, i32);
define <vscale x 2 x i1> @intrinsic_vmsle_mask_vx_nxv2i64_i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv2i64_i64:
@ -1755,7 +1755,7 @@ entry:
<vscale x 2 x i64> %1,
i64 %2,
<vscale x 2 x i1> %3,
i32 %4)
i32 %4, i32 1)
ret <vscale x 2 x i1> %a
}
@ -1791,7 +1791,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsle.mask.nxv4i64.i64(
<vscale x 4 x i64>,
i64,
<vscale x 4 x i1>,
i32);
i32, i32);
define <vscale x 4 x i1> @intrinsic_vmsle_mask_vx_nxv4i64_i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv4i64_i64:
@ -1814,7 +1814,7 @@ entry:
<vscale x 4 x i64> %1,
i64 %2,
<vscale x 4 x i1> %3,
i32 %4)
i32 %4, i32 1)
ret <vscale x 4 x i1> %a
}
@ -1849,7 +1849,7 @@ entry:
<vscale x 1 x i8> %1,
i8 9,
<vscale x 1 x i1> %2,
i32 %3)
i32 %3, i32 1)
ret <vscale x 1 x i1> %a
}
@ -1884,7 +1884,7 @@ entry:
<vscale x 2 x i8> %1,
i8 9,
<vscale x 2 x i1> %2,
i32 %3)
i32 %3, i32 1)
ret <vscale x 2 x i1> %a
}
@ -1919,7 +1919,7 @@ entry:
<vscale x 4 x i8> %1,
i8 9,
<vscale x 4 x i1> %2,
i32 %3)
i32 %3, i32 1)
ret <vscale x 4 x i1> %a
}
@ -1954,7 +1954,7 @@ entry:
<vscale x 8 x i8> %1,
i8 9,
<vscale x 8 x i1> %2,
i32 %3)
i32 %3, i32 1)
ret <vscale x 8 x i1> %a
}
@ -1989,7 +1989,7 @@ entry:
<vscale x 16 x i8> %1,
i8 9,
<vscale x 16 x i1> %2,
i32 %3)
i32 %3, i32 1)
ret <vscale x 16 x i1> %a
}
@ -2024,7 +2024,7 @@ entry:
<vscale x 32 x i8> %1,
i8 9,
<vscale x 32 x i1> %2,
i32 %3)
i32 %3, i32 1)
ret <vscale x 32 x i1> %a
}
@ -2059,7 +2059,7 @@ entry:
<vscale x 1 x i16> %1,
i16 9,
<vscale x 1 x i1> %2,
i32 %3)
i32 %3, i32 1)
ret <vscale x 1 x i1> %a
}
@ -2094,7 +2094,7 @@ entry:
<vscale x 2 x i16> %1,
i16 9,
<vscale x 2 x i1> %2,
i32 %3)
i32 %3, i32 1)
ret <vscale x 2 x i1> %a
}
@ -2129,7 +2129,7 @@ entry:
<vscale x 4 x i16> %1,
i16 9,
<vscale x 4 x i1> %2,
i32 %3)
i32 %3, i32 1)
ret <vscale x 4 x i1> %a
}
@ -2164,7 +2164,7 @@ entry:
<vscale x 8 x i16> %1,
i16 9,
<vscale x 8 x i1> %2,
i32 %3)
i32 %3, i32 1)
ret <vscale x 8 x i1> %a
}
@ -2199,7 +2199,7 @@ entry:
<vscale x 16 x i16> %1,
i16 9,
<vscale x 16 x i1> %2,
i32 %3)
i32 %3, i32 1)
ret <vscale x 16 x i1> %a
}
@ -2234,7 +2234,7 @@ entry:
<vscale x 1 x i32> %1,
i32 9,
<vscale x 1 x i1> %2,
i32 %3)
i32 %3, i32 1)
ret <vscale x 1 x i1> %a
}
@ -2269,7 +2269,7 @@ entry:
<vscale x 2 x i32> %1,
i32 9,
<vscale x 2 x i1> %2,
i32 %3)
i32 %3, i32 1)
ret <vscale x 2 x i1> %a
}
@ -2304,7 +2304,7 @@ entry:
<vscale x 4 x i32> %1,
i32 9,
<vscale x 4 x i1> %2,
i32 %3)
i32 %3, i32 1)
ret <vscale x 4 x i1> %a
}
@ -2339,7 +2339,7 @@ entry:
<vscale x 8 x i32> %1,
i32 9,
<vscale x 8 x i1> %2,
i32 %3)
i32 %3, i32 1)
ret <vscale x 8 x i1> %a
}
@ -2374,7 +2374,7 @@ entry:
<vscale x 1 x i64> %1,
i64 9,
<vscale x 1 x i1> %2,
i32 %3)
i32 %3, i32 1)
ret <vscale x 1 x i1> %a
}
@ -2409,7 +2409,7 @@ entry:
<vscale x 2 x i64> %1,
i64 9,
<vscale x 2 x i1> %2,
i32 %3)
i32 %3, i32 1)
ret <vscale x 2 x i1> %a
}
@ -2444,7 +2444,7 @@ entry:
<vscale x 4 x i64> %1,
i64 9,
<vscale x 4 x i1> %2,
i32 %3)
i32 %3, i32 1)
ret <vscale x 4 x i1> %a
}

View File

@ -26,7 +26,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i8(
<vscale x 1 x i8>,
<vscale x 1 x i8>,
<vscale x 1 x i1>,
i64);
i64, i64);
define <vscale x 1 x i1> @intrinsic_vmsle_mask_vv_nxv1i8_nxv1i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i8> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv1i8_nxv1i8:
@ -48,7 +48,7 @@ entry:
<vscale x 1 x i8> %2,
<vscale x 1 x i8> %3,
<vscale x 1 x i1> %mask,
i64 %4)
i64 %4, i64 1)
ret <vscale x 1 x i1> %a
}
@ -78,7 +78,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsle.mask.nxv2i8(
<vscale x 2 x i8>,
<vscale x 2 x i8>,
<vscale x 2 x i1>,
i64);
i64, i64);
define <vscale x 2 x i1> @intrinsic_vmsle_mask_vv_nxv2i8_nxv2i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i8> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv2i8_nxv2i8:
@ -100,7 +100,7 @@ entry:
<vscale x 2 x i8> %2,
<vscale x 2 x i8> %3,
<vscale x 2 x i1> %mask,
i64 %4)
i64 %4, i64 1)
ret <vscale x 2 x i1> %a
}
@ -130,7 +130,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsle.mask.nxv4i8(
<vscale x 4 x i8>,
<vscale x 4 x i8>,
<vscale x 4 x i1>,
i64);
i64, i64);
define <vscale x 4 x i1> @intrinsic_vmsle_mask_vv_nxv4i8_nxv4i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i8> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv4i8_nxv4i8:
@ -152,7 +152,7 @@ entry:
<vscale x 4 x i8> %2,
<vscale x 4 x i8> %3,
<vscale x 4 x i1> %mask,
i64 %4)
i64 %4, i64 1)
ret <vscale x 4 x i1> %a
}
@ -182,7 +182,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsle.mask.nxv8i8(
<vscale x 8 x i8>,
<vscale x 8 x i8>,
<vscale x 8 x i1>,
i64);
i64, i64);
define <vscale x 8 x i1> @intrinsic_vmsle_mask_vv_nxv8i8_nxv8i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i8> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv8i8_nxv8i8:
@ -204,7 +204,7 @@ entry:
<vscale x 8 x i8> %2,
<vscale x 8 x i8> %3,
<vscale x 8 x i1> %mask,
i64 %4)
i64 %4, i64 1)
ret <vscale x 8 x i1> %a
}
@ -234,7 +234,7 @@ declare <vscale x 16 x i1> @llvm.riscv.vmsle.mask.nxv16i8(
<vscale x 16 x i8>,
<vscale x 16 x i8>,
<vscale x 16 x i1>,
i64);
i64, i64);
define <vscale x 16 x i1> @intrinsic_vmsle_mask_vv_nxv16i8_nxv16i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i8> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv16i8_nxv16i8:
@ -256,7 +256,7 @@ entry:
<vscale x 16 x i8> %2,
<vscale x 16 x i8> %3,
<vscale x 16 x i1> %mask,
i64 %4)
i64 %4, i64 1)
ret <vscale x 16 x i1> %a
}
@ -286,7 +286,7 @@ declare <vscale x 32 x i1> @llvm.riscv.vmsle.mask.nxv32i8(
<vscale x 32 x i8>,
<vscale x 32 x i8>,
<vscale x 32 x i1>,
i64);
i64, i64);
define <vscale x 32 x i1> @intrinsic_vmsle_mask_vv_nxv32i8_nxv32i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i8> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv32i8_nxv32i8:
@ -308,7 +308,7 @@ entry:
<vscale x 32 x i8> %2,
<vscale x 32 x i8> %3,
<vscale x 32 x i1> %mask,
i64 %4)
i64 %4, i64 1)
ret <vscale x 32 x i1> %a
}
@ -338,7 +338,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i16(
<vscale x 1 x i16>,
<vscale x 1 x i16>,
<vscale x 1 x i1>,
i64);
i64, i64);
define <vscale x 1 x i1> @intrinsic_vmsle_mask_vv_nxv1i16_nxv1i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i16> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv1i16_nxv1i16:
@ -360,7 +360,7 @@ entry:
<vscale x 1 x i16> %2,
<vscale x 1 x i16> %3,
<vscale x 1 x i1> %mask,
i64 %4)
i64 %4, i64 1)
ret <vscale x 1 x i1> %a
}
@ -390,7 +390,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsle.mask.nxv2i16(
<vscale x 2 x i16>,
<vscale x 2 x i16>,
<vscale x 2 x i1>,
i64);
i64, i64);
define <vscale x 2 x i1> @intrinsic_vmsle_mask_vv_nxv2i16_nxv2i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i16> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv2i16_nxv2i16:
@ -412,7 +412,7 @@ entry:
<vscale x 2 x i16> %2,
<vscale x 2 x i16> %3,
<vscale x 2 x i1> %mask,
i64 %4)
i64 %4, i64 1)
ret <vscale x 2 x i1> %a
}
@ -442,7 +442,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsle.mask.nxv4i16(
<vscale x 4 x i16>,
<vscale x 4 x i16>,
<vscale x 4 x i1>,
i64);
i64, i64);
define <vscale x 4 x i1> @intrinsic_vmsle_mask_vv_nxv4i16_nxv4i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i16> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv4i16_nxv4i16:
@ -464,7 +464,7 @@ entry:
<vscale x 4 x i16> %2,
<vscale x 4 x i16> %3,
<vscale x 4 x i1> %mask,
i64 %4)
i64 %4, i64 1)
ret <vscale x 4 x i1> %a
}
@ -494,7 +494,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsle.mask.nxv8i16(
<vscale x 8 x i16>,
<vscale x 8 x i16>,
<vscale x 8 x i1>,
i64);
i64, i64);
define <vscale x 8 x i1> @intrinsic_vmsle_mask_vv_nxv8i16_nxv8i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i16> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv8i16_nxv8i16:
@ -516,7 +516,7 @@ entry:
<vscale x 8 x i16> %2,
<vscale x 8 x i16> %3,
<vscale x 8 x i1> %mask,
i64 %4)
i64 %4, i64 1)
ret <vscale x 8 x i1> %a
}
@ -546,7 +546,7 @@ declare <vscale x 16 x i1> @llvm.riscv.vmsle.mask.nxv16i16(
<vscale x 16 x i16>,
<vscale x 16 x i16>,
<vscale x 16 x i1>,
i64);
i64, i64);
define <vscale x 16 x i1> @intrinsic_vmsle_mask_vv_nxv16i16_nxv16i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i16> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv16i16_nxv16i16:
@ -568,7 +568,7 @@ entry:
<vscale x 16 x i16> %2,
<vscale x 16 x i16> %3,
<vscale x 16 x i1> %mask,
i64 %4)
i64 %4, i64 1)
ret <vscale x 16 x i1> %a
}
@ -598,7 +598,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i32(
<vscale x 1 x i32>,
<vscale x 1 x i32>,
<vscale x 1 x i1>,
i64);
i64, i64);
define <vscale x 1 x i1> @intrinsic_vmsle_mask_vv_nxv1i32_nxv1i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i32> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv1i32_nxv1i32:
@ -620,7 +620,7 @@ entry:
<vscale x 1 x i32> %2,
<vscale x 1 x i32> %3,
<vscale x 1 x i1> %mask,
i64 %4)
i64 %4, i64 1)
ret <vscale x 1 x i1> %a
}
@ -650,7 +650,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsle.mask.nxv2i32(
<vscale x 2 x i32>,
<vscale x 2 x i32>,
<vscale x 2 x i1>,
i64);
i64, i64);
define <vscale x 2 x i1> @intrinsic_vmsle_mask_vv_nxv2i32_nxv2i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i32> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv2i32_nxv2i32:
@ -672,7 +672,7 @@ entry:
<vscale x 2 x i32> %2,
<vscale x 2 x i32> %3,
<vscale x 2 x i1> %mask,
i64 %4)
i64 %4, i64 1)
ret <vscale x 2 x i1> %a
}
@ -702,7 +702,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsle.mask.nxv4i32(
<vscale x 4 x i32>,
<vscale x 4 x i32>,
<vscale x 4 x i1>,
i64);
i64, i64);
define <vscale x 4 x i1> @intrinsic_vmsle_mask_vv_nxv4i32_nxv4i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i32> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv4i32_nxv4i32:
@ -724,7 +724,7 @@ entry:
<vscale x 4 x i32> %2,
<vscale x 4 x i32> %3,
<vscale x 4 x i1> %mask,
i64 %4)
i64 %4, i64 1)
ret <vscale x 4 x i1> %a
}
@ -754,7 +754,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsle.mask.nxv8i32(
<vscale x 8 x i32>,
<vscale x 8 x i32>,
<vscale x 8 x i1>,
i64);
i64, i64);
define <vscale x 8 x i1> @intrinsic_vmsle_mask_vv_nxv8i32_nxv8i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i32> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv8i32_nxv8i32:
@ -776,7 +776,7 @@ entry:
<vscale x 8 x i32> %2,
<vscale x 8 x i32> %3,
<vscale x 8 x i1> %mask,
i64 %4)
i64 %4, i64 1)
ret <vscale x 8 x i1> %a
}
@ -806,7 +806,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i64(
<vscale x 1 x i64>,
<vscale x 1 x i64>,
<vscale x 1 x i1>,
i64);
i64, i64);
define <vscale x 1 x i1> @intrinsic_vmsle_mask_vv_nxv1i64_nxv1i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i64> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv1i64_nxv1i64:
@ -828,7 +828,7 @@ entry:
<vscale x 1 x i64> %2,
<vscale x 1 x i64> %3,
<vscale x 1 x i1> %mask,
i64 %4)
i64 %4, i64 1)
ret <vscale x 1 x i1> %a
}
@ -858,7 +858,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsle.mask.nxv2i64(
<vscale x 2 x i64>,
<vscale x 2 x i64>,
<vscale x 2 x i1>,
i64);
i64, i64);
define <vscale x 2 x i1> @intrinsic_vmsle_mask_vv_nxv2i64_nxv2i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i64> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv2i64_nxv2i64:
@ -880,7 +880,7 @@ entry:
<vscale x 2 x i64> %2,
<vscale x 2 x i64> %3,
<vscale x 2 x i1> %mask,
i64 %4)
i64 %4, i64 1)
ret <vscale x 2 x i1> %a
}
@ -910,7 +910,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsle.mask.nxv4i64(
<vscale x 4 x i64>,
<vscale x 4 x i64>,
<vscale x 4 x i1>,
i64);
i64, i64);
define <vscale x 4 x i1> @intrinsic_vmsle_mask_vv_nxv4i64_nxv4i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i64> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv4i64_nxv4i64:
@ -932,7 +932,7 @@ entry:
<vscale x 4 x i64> %2,
<vscale x 4 x i64> %3,
<vscale x 4 x i1> %mask,
i64 %4)
i64 %4, i64 1)
ret <vscale x 4 x i1> %a
}
@ -962,7 +962,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i8.i8(
<vscale x 1 x i8>,
i8,
<vscale x 1 x i1>,
i64);
i64, i64);
define <vscale x 1 x i1> @intrinsic_vmsle_mask_vx_nxv1i8_i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv1i8_i8:
@ -979,7 +979,7 @@ entry:
<vscale x 1 x i8> %1,
i8 %2,
<vscale x 1 x i1> %3,
i64 %4)
i64 %4, i64 1)
ret <vscale x 1 x i1> %a
}
@ -1009,7 +1009,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsle.mask.nxv2i8.i8(
<vscale x 2 x i8>,
i8,
<vscale x 2 x i1>,
i64);
i64, i64);
define <vscale x 2 x i1> @intrinsic_vmsle_mask_vx_nxv2i8_i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv2i8_i8:
@ -1026,7 +1026,7 @@ entry:
<vscale x 2 x i8> %1,
i8 %2,
<vscale x 2 x i1> %3,
i64 %4)
i64 %4, i64 1)
ret <vscale x 2 x i1> %a
}
@ -1056,7 +1056,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsle.mask.nxv4i8.i8(
<vscale x 4 x i8>,
i8,
<vscale x 4 x i1>,
i64);
i64, i64);
define <vscale x 4 x i1> @intrinsic_vmsle_mask_vx_nxv4i8_i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv4i8_i8:
@ -1073,7 +1073,7 @@ entry:
<vscale x 4 x i8> %1,
i8 %2,
<vscale x 4 x i1> %3,
i64 %4)
i64 %4, i64 1)
ret <vscale x 4 x i1> %a
}
@ -1103,7 +1103,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsle.mask.nxv8i8.i8(
<vscale x 8 x i8>,
i8,
<vscale x 8 x i1>,
i64);
i64, i64);
define <vscale x 8 x i1> @intrinsic_vmsle_mask_vx_nxv8i8_i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv8i8_i8:
@ -1120,7 +1120,7 @@ entry:
<vscale x 8 x i8> %1,
i8 %2,
<vscale x 8 x i1> %3,
i64 %4)
i64 %4, i64 1)
ret <vscale x 8 x i1> %a
}
@ -1150,7 +1150,7 @@ declare <vscale x 16 x i1> @llvm.riscv.vmsle.mask.nxv16i8.i8(
<vscale x 16 x i8>,
i8,
<vscale x 16 x i1>,
i64);
i64, i64);
define <vscale x 16 x i1> @intrinsic_vmsle_mask_vx_nxv16i8_i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv16i8_i8:
@ -1167,7 +1167,7 @@ entry:
<vscale x 16 x i8> %1,
i8 %2,
<vscale x 16 x i1> %3,
i64 %4)
i64 %4, i64 1)
ret <vscale x 16 x i1> %a
}
@ -1197,7 +1197,7 @@ declare <vscale x 32 x i1> @llvm.riscv.vmsle.mask.nxv32i8.i8(
<vscale x 32 x i8>,
i8,
<vscale x 32 x i1>,
i64);
i64, i64);
define <vscale x 32 x i1> @intrinsic_vmsle_mask_vx_nxv32i8_i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv32i8_i8:
@ -1214,7 +1214,7 @@ entry:
<vscale x 32 x i8> %1,
i8 %2,
<vscale x 32 x i1> %3,
i64 %4)
i64 %4, i64 1)
ret <vscale x 32 x i1> %a
}
@ -1244,7 +1244,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i16.i16(
<vscale x 1 x i16>,
i16,
<vscale x 1 x i1>,
i64);
i64, i64);
define <vscale x 1 x i1> @intrinsic_vmsle_mask_vx_nxv1i16_i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv1i16_i16:
@ -1261,7 +1261,7 @@ entry:
<vscale x 1 x i16> %1,
i16 %2,
<vscale x 1 x i1> %3,
i64 %4)
i64 %4, i64 1)
ret <vscale x 1 x i1> %a
}
@ -1291,7 +1291,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsle.mask.nxv2i16.i16(
<vscale x 2 x i16>,
i16,
<vscale x 2 x i1>,
i64);
i64, i64);
define <vscale x 2 x i1> @intrinsic_vmsle_mask_vx_nxv2i16_i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv2i16_i16:
@ -1308,7 +1308,7 @@ entry:
<vscale x 2 x i16> %1,
i16 %2,
<vscale x 2 x i1> %3,
i64 %4)
i64 %4, i64 1)
ret <vscale x 2 x i1> %a
}
@ -1338,7 +1338,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsle.mask.nxv4i16.i16(
<vscale x 4 x i16>,
i16,
<vscale x 4 x i1>,
i64);
i64, i64);
define <vscale x 4 x i1> @intrinsic_vmsle_mask_vx_nxv4i16_i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv4i16_i16:
@ -1355,7 +1355,7 @@ entry:
<vscale x 4 x i16> %1,
i16 %2,
<vscale x 4 x i1> %3,
i64 %4)
i64 %4, i64 1)
ret <vscale x 4 x i1> %a
}
@ -1385,7 +1385,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsle.mask.nxv8i16.i16(
<vscale x 8 x i16>,
i16,
<vscale x 8 x i1>,
i64);
i64, i64);
define <vscale x 8 x i1> @intrinsic_vmsle_mask_vx_nxv8i16_i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv8i16_i16:
@ -1402,7 +1402,7 @@ entry:
<vscale x 8 x i16> %1,
i16 %2,
<vscale x 8 x i1> %3,
i64 %4)
i64 %4, i64 1)
ret <vscale x 8 x i1> %a
}
@ -1432,7 +1432,7 @@ declare <vscale x 16 x i1> @llvm.riscv.vmsle.mask.nxv16i16.i16(
<vscale x 16 x i16>,
i16,
<vscale x 16 x i1>,
i64);
i64, i64);
define <vscale x 16 x i1> @intrinsic_vmsle_mask_vx_nxv16i16_i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv16i16_i16:
@ -1449,7 +1449,7 @@ entry:
<vscale x 16 x i16> %1,
i16 %2,
<vscale x 16 x i1> %3,
i64 %4)
i64 %4, i64 1)
ret <vscale x 16 x i1> %a
}
@ -1479,7 +1479,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i32.i32(
<vscale x 1 x i32>,
i32,
<vscale x 1 x i1>,
i64);
i64, i64);
define <vscale x 1 x i1> @intrinsic_vmsle_mask_vx_nxv1i32_i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv1i32_i32:
@ -1496,7 +1496,7 @@ entry:
<vscale x 1 x i32> %1,
i32 %2,
<vscale x 1 x i1> %3,
i64 %4)
i64 %4, i64 1)
ret <vscale x 1 x i1> %a
}
@ -1526,7 +1526,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsle.mask.nxv2i32.i32(
<vscale x 2 x i32>,
i32,
<vscale x 2 x i1>,
i64);
i64, i64);
define <vscale x 2 x i1> @intrinsic_vmsle_mask_vx_nxv2i32_i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv2i32_i32:
@ -1543,7 +1543,7 @@ entry:
<vscale x 2 x i32> %1,
i32 %2,
<vscale x 2 x i1> %3,
i64 %4)
i64 %4, i64 1)
ret <vscale x 2 x i1> %a
}
@ -1573,7 +1573,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsle.mask.nxv4i32.i32(
<vscale x 4 x i32>,
i32,
<vscale x 4 x i1>,
i64);
i64, i64);
define <vscale x 4 x i1> @intrinsic_vmsle_mask_vx_nxv4i32_i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv4i32_i32:
@ -1590,7 +1590,7 @@ entry:
<vscale x 4 x i32> %1,
i32 %2,
<vscale x 4 x i1> %3,
i64 %4)
i64 %4, i64 1)
ret <vscale x 4 x i1> %a
}
@ -1620,7 +1620,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsle.mask.nxv8i32.i32(
<vscale x 8 x i32>,
i32,
<vscale x 8 x i1>,
i64);
i64, i64);
define <vscale x 8 x i1> @intrinsic_vmsle_mask_vx_nxv8i32_i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv8i32_i32:
@ -1637,7 +1637,7 @@ entry:
<vscale x 8 x i32> %1,
i32 %2,
<vscale x 8 x i1> %3,
i64 %4)
i64 %4, i64 1)
ret <vscale x 8 x i1> %a
}
@ -1667,7 +1667,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i64.i64(
<vscale x 1 x i64>,
i64,
<vscale x 1 x i1>,
i64);
i64, i64);
define <vscale x 1 x i1> @intrinsic_vmsle_mask_vx_nxv1i64_i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv1i64_i64:
@ -1684,7 +1684,7 @@ entry:
<vscale x 1 x i64> %1,
i64 %2,
<vscale x 1 x i1> %3,
i64 %4)
i64 %4, i64 1)
ret <vscale x 1 x i1> %a
}
@ -1714,7 +1714,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsle.mask.nxv2i64.i64(
<vscale x 2 x i64>,
i64,
<vscale x 2 x i1>,
i64);
i64, i64);
define <vscale x 2 x i1> @intrinsic_vmsle_mask_vx_nxv2i64_i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv2i64_i64:
@ -1731,7 +1731,7 @@ entry:
<vscale x 2 x i64> %1,
i64 %2,
<vscale x 2 x i1> %3,
i64 %4)
i64 %4, i64 1)
ret <vscale x 2 x i1> %a
}
@ -1761,7 +1761,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsle.mask.nxv4i64.i64(
<vscale x 4 x i64>,
i64,
<vscale x 4 x i1>,
i64);
i64, i64);
define <vscale x 4 x i1> @intrinsic_vmsle_mask_vx_nxv4i64_i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv4i64_i64:
@ -1778,7 +1778,7 @@ entry:
<vscale x 4 x i64> %1,
i64 %2,
<vscale x 4 x i1> %3,
i64 %4)
i64 %4, i64 1)
ret <vscale x 4 x i1> %a
}
@ -1813,7 +1813,7 @@ entry:
<vscale x 1 x i8> %1,
i8 9,
<vscale x 1 x i1> %2,
i64 %3)
i64 %3, i64 1)
ret <vscale x 1 x i1> %a
}
@ -1848,7 +1848,7 @@ entry:
<vscale x 2 x i8> %1,
i8 9,
<vscale x 2 x i1> %2,
i64 %3)
i64 %3, i64 1)
ret <vscale x 2 x i1> %a
}
@ -1883,7 +1883,7 @@ entry:
<vscale x 4 x i8> %1,
i8 9,
<vscale x 4 x i1> %2,
i64 %3)
i64 %3, i64 1)
ret <vscale x 4 x i1> %a
}
@ -1918,7 +1918,7 @@ entry:
<vscale x 8 x i8> %1,
i8 9,
<vscale x 8 x i1> %2,
i64 %3)
i64 %3, i64 1)
ret <vscale x 8 x i1> %a
}
@ -1953,7 +1953,7 @@ entry:
<vscale x 16 x i8> %1,
i8 9,
<vscale x 16 x i1> %2,
i64 %3)
i64 %3, i64 1)
ret <vscale x 16 x i1> %a
}
@ -1988,7 +1988,7 @@ entry:
<vscale x 32 x i8> %1,
i8 9,
<vscale x 32 x i1> %2,
i64 %3)
i64 %3, i64 1)
ret <vscale x 32 x i1> %a
}
@ -2023,7 +2023,7 @@ entry:
<vscale x 1 x i16> %1,
i16 9,
<vscale x 1 x i1> %2,
i64 %3)
i64 %3, i64 1)
ret <vscale x 1 x i1> %a
}
@ -2058,7 +2058,7 @@ entry:
<vscale x 2 x i16> %1,
i16 9,
<vscale x 2 x i1> %2,
i64 %3)
i64 %3, i64 1)
ret <vscale x 2 x i1> %a
}
@ -2093,7 +2093,7 @@ entry:
<vscale x 4 x i16> %1,
i16 9,
<vscale x 4 x i1> %2,
i64 %3)
i64 %3, i64 1)
ret <vscale x 4 x i1> %a
}
@ -2128,7 +2128,7 @@ entry:
<vscale x 8 x i16> %1,
i16 9,
<vscale x 8 x i1> %2,
i64 %3)
i64 %3, i64 1)
ret <vscale x 8 x i1> %a
}
@ -2163,7 +2163,7 @@ entry:
<vscale x 16 x i16> %1,
i16 9,
<vscale x 16 x i1> %2,
i64 %3)
i64 %3, i64 1)
ret <vscale x 16 x i1> %a
}
@ -2198,7 +2198,7 @@ entry:
<vscale x 1 x i32> %1,
i32 9,
<vscale x 1 x i1> %2,
i64 %3)
i64 %3, i64 1)
ret <vscale x 1 x i1> %a
}
@ -2233,7 +2233,7 @@ entry:
<vscale x 2 x i32> %1,
i32 9,
<vscale x 2 x i1> %2,
i64 %3)
i64 %3, i64 1)
ret <vscale x 2 x i1> %a
}
@ -2268,7 +2268,7 @@ entry:
<vscale x 4 x i32> %1,
i32 9,
<vscale x 4 x i1> %2,
i64 %3)
i64 %3, i64 1)
ret <vscale x 4 x i1> %a
}
@ -2303,7 +2303,7 @@ entry:
<vscale x 8 x i32> %1,
i32 9,
<vscale x 8 x i1> %2,
i64 %3)
i64 %3, i64 1)
ret <vscale x 8 x i1> %a
}
@ -2338,7 +2338,7 @@ entry:
<vscale x 1 x i64> %1,
i64 9,
<vscale x 1 x i1> %2,
i64 %3)
i64 %3, i64 1)
ret <vscale x 1 x i1> %a
}
@ -2373,7 +2373,7 @@ entry:
<vscale x 2 x i64> %1,
i64 9,
<vscale x 2 x i1> %2,
i64 %3)
i64 %3, i64 1)
ret <vscale x 2 x i1> %a
}
@ -2408,7 +2408,7 @@ entry:
<vscale x 4 x i64> %1,
i64 9,
<vscale x 4 x i1> %2,
i64 %3)
i64 %3, i64 1)
ret <vscale x 4 x i1> %a
}

View File

@ -26,7 +26,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i8(
<vscale x 1 x i8>,
<vscale x 1 x i8>,
<vscale x 1 x i1>,
i32);
i32, i32);
define <vscale x 1 x i1> @intrinsic_vmsleu_mask_vv_nxv1i8_nxv1i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i8> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv1i8_nxv1i8:
@ -48,7 +48,7 @@ entry:
<vscale x 1 x i8> %2,
<vscale x 1 x i8> %3,
<vscale x 1 x i1> %mask,
i32 %4)
i32 %4, i32 1)
ret <vscale x 1 x i1> %a
}
@ -78,7 +78,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsleu.mask.nxv2i8(
<vscale x 2 x i8>,
<vscale x 2 x i8>,
<vscale x 2 x i1>,
i32);
i32, i32);
define <vscale x 2 x i1> @intrinsic_vmsleu_mask_vv_nxv2i8_nxv2i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i8> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv2i8_nxv2i8:
@ -100,7 +100,7 @@ entry:
<vscale x 2 x i8> %2,
<vscale x 2 x i8> %3,
<vscale x 2 x i1> %mask,
i32 %4)
i32 %4, i32 1)
ret <vscale x 2 x i1> %a
}
@ -130,7 +130,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsleu.mask.nxv4i8(
<vscale x 4 x i8>,
<vscale x 4 x i8>,
<vscale x 4 x i1>,
i32);
i32, i32);
define <vscale x 4 x i1> @intrinsic_vmsleu_mask_vv_nxv4i8_nxv4i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i8> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv4i8_nxv4i8:
@ -152,7 +152,7 @@ entry:
<vscale x 4 x i8> %2,
<vscale x 4 x i8> %3,
<vscale x 4 x i1> %mask,
i32 %4)
i32 %4, i32 1)
ret <vscale x 4 x i1> %a
}
@ -182,7 +182,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsleu.mask.nxv8i8(
<vscale x 8 x i8>,
<vscale x 8 x i8>,
<vscale x 8 x i1>,
i32);
i32, i32);
define <vscale x 8 x i1> @intrinsic_vmsleu_mask_vv_nxv8i8_nxv8i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i8> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv8i8_nxv8i8:
@ -204,7 +204,7 @@ entry:
<vscale x 8 x i8> %2,
<vscale x 8 x i8> %3,
<vscale x 8 x i1> %mask,
i32 %4)
i32 %4, i32 1)
ret <vscale x 8 x i1> %a
}
@ -234,7 +234,7 @@ declare <vscale x 16 x i1> @llvm.riscv.vmsleu.mask.nxv16i8(
<vscale x 16 x i8>,
<vscale x 16 x i8>,
<vscale x 16 x i1>,
i32);
i32, i32);
define <vscale x 16 x i1> @intrinsic_vmsleu_mask_vv_nxv16i8_nxv16i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i8> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv16i8_nxv16i8:
@ -256,7 +256,7 @@ entry:
<vscale x 16 x i8> %2,
<vscale x 16 x i8> %3,
<vscale x 16 x i1> %mask,
i32 %4)
i32 %4, i32 1)
ret <vscale x 16 x i1> %a
}
@ -286,7 +286,7 @@ declare <vscale x 32 x i1> @llvm.riscv.vmsleu.mask.nxv32i8(
<vscale x 32 x i8>,
<vscale x 32 x i8>,
<vscale x 32 x i1>,
i32);
i32, i32);
define <vscale x 32 x i1> @intrinsic_vmsleu_mask_vv_nxv32i8_nxv32i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i8> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv32i8_nxv32i8:
@ -308,7 +308,7 @@ entry:
<vscale x 32 x i8> %2,
<vscale x 32 x i8> %3,
<vscale x 32 x i1> %mask,
i32 %4)
i32 %4, i32 1)
ret <vscale x 32 x i1> %a
}
@ -338,7 +338,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i16(
<vscale x 1 x i16>,
<vscale x 1 x i16>,
<vscale x 1 x i1>,
i32);
i32, i32);
define <vscale x 1 x i1> @intrinsic_vmsleu_mask_vv_nxv1i16_nxv1i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i16> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv1i16_nxv1i16:
@ -360,7 +360,7 @@ entry:
<vscale x 1 x i16> %2,
<vscale x 1 x i16> %3,
<vscale x 1 x i1> %mask,
i32 %4)
i32 %4, i32 1)
ret <vscale x 1 x i1> %a
}
@ -390,7 +390,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsleu.mask.nxv2i16(
<vscale x 2 x i16>,
<vscale x 2 x i16>,
<vscale x 2 x i1>,
i32);
i32, i32);
define <vscale x 2 x i1> @intrinsic_vmsleu_mask_vv_nxv2i16_nxv2i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i16> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv2i16_nxv2i16:
@ -412,7 +412,7 @@ entry:
<vscale x 2 x i16> %2,
<vscale x 2 x i16> %3,
<vscale x 2 x i1> %mask,
i32 %4)
i32 %4, i32 1)
ret <vscale x 2 x i1> %a
}
@ -442,7 +442,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsleu.mask.nxv4i16(
<vscale x 4 x i16>,
<vscale x 4 x i16>,
<vscale x 4 x i1>,
i32);
i32, i32);
define <vscale x 4 x i1> @intrinsic_vmsleu_mask_vv_nxv4i16_nxv4i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i16> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv4i16_nxv4i16:
@ -464,7 +464,7 @@ entry:
<vscale x 4 x i16> %2,
<vscale x 4 x i16> %3,
<vscale x 4 x i1> %mask,
i32 %4)
i32 %4, i32 1)
ret <vscale x 4 x i1> %a
}
@ -494,7 +494,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsleu.mask.nxv8i16(
<vscale x 8 x i16>,
<vscale x 8 x i16>,
<vscale x 8 x i1>,
i32);
i32, i32);
define <vscale x 8 x i1> @intrinsic_vmsleu_mask_vv_nxv8i16_nxv8i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i16> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv8i16_nxv8i16:
@ -516,7 +516,7 @@ entry:
<vscale x 8 x i16> %2,
<vscale x 8 x i16> %3,
<vscale x 8 x i1> %mask,
i32 %4)
i32 %4, i32 1)
ret <vscale x 8 x i1> %a
}
@ -546,7 +546,7 @@ declare <vscale x 16 x i1> @llvm.riscv.vmsleu.mask.nxv16i16(
<vscale x 16 x i16>,
<vscale x 16 x i16>,
<vscale x 16 x i1>,
i32);
i32, i32);
define <vscale x 16 x i1> @intrinsic_vmsleu_mask_vv_nxv16i16_nxv16i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i16> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv16i16_nxv16i16:
@ -568,7 +568,7 @@ entry:
<vscale x 16 x i16> %2,
<vscale x 16 x i16> %3,
<vscale x 16 x i1> %mask,
i32 %4)
i32 %4, i32 1)
ret <vscale x 16 x i1> %a
}
@ -598,7 +598,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i32(
<vscale x 1 x i32>,
<vscale x 1 x i32>,
<vscale x 1 x i1>,
i32);
i32, i32);
define <vscale x 1 x i1> @intrinsic_vmsleu_mask_vv_nxv1i32_nxv1i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i32> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv1i32_nxv1i32:
@ -620,7 +620,7 @@ entry:
<vscale x 1 x i32> %2,
<vscale x 1 x i32> %3,
<vscale x 1 x i1> %mask,
i32 %4)
i32 %4, i32 1)
ret <vscale x 1 x i1> %a
}
@ -650,7 +650,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsleu.mask.nxv2i32(
<vscale x 2 x i32>,
<vscale x 2 x i32>,
<vscale x 2 x i1>,
i32);
i32, i32);
define <vscale x 2 x i1> @intrinsic_vmsleu_mask_vv_nxv2i32_nxv2i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i32> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv2i32_nxv2i32:
@ -672,7 +672,7 @@ entry:
<vscale x 2 x i32> %2,
<vscale x 2 x i32> %3,
<vscale x 2 x i1> %mask,
i32 %4)
i32 %4, i32 1)
ret <vscale x 2 x i1> %a
}
@ -702,7 +702,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsleu.mask.nxv4i32(
<vscale x 4 x i32>,
<vscale x 4 x i32>,
<vscale x 4 x i1>,
i32);
i32, i32);
define <vscale x 4 x i1> @intrinsic_vmsleu_mask_vv_nxv4i32_nxv4i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i32> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv4i32_nxv4i32:
@ -724,7 +724,7 @@ entry:
<vscale x 4 x i32> %2,
<vscale x 4 x i32> %3,
<vscale x 4 x i1> %mask,
i32 %4)
i32 %4, i32 1)
ret <vscale x 4 x i1> %a
}
@ -754,7 +754,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsleu.mask.nxv8i32(
<vscale x 8 x i32>,
<vscale x 8 x i32>,
<vscale x 8 x i1>,
i32);
i32, i32);
define <vscale x 8 x i1> @intrinsic_vmsleu_mask_vv_nxv8i32_nxv8i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i32> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv8i32_nxv8i32:
@ -776,7 +776,7 @@ entry:
<vscale x 8 x i32> %2,
<vscale x 8 x i32> %3,
<vscale x 8 x i1> %mask,
i32 %4)
i32 %4, i32 1)
ret <vscale x 8 x i1> %a
}
@ -806,7 +806,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i64(
<vscale x 1 x i64>,
<vscale x 1 x i64>,
<vscale x 1 x i1>,
i32);
i32, i32);
define <vscale x 1 x i1> @intrinsic_vmsleu_mask_vv_nxv1i64_nxv1i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i64> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv1i64_nxv1i64:
@ -828,7 +828,7 @@ entry:
<vscale x 1 x i64> %2,
<vscale x 1 x i64> %3,
<vscale x 1 x i1> %mask,
i32 %4)
i32 %4, i32 1)
ret <vscale x 1 x i1> %a
}
@ -858,7 +858,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsleu.mask.nxv2i64(
<vscale x 2 x i64>,
<vscale x 2 x i64>,
<vscale x 2 x i1>,
i32);
i32, i32);
define <vscale x 2 x i1> @intrinsic_vmsleu_mask_vv_nxv2i64_nxv2i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i64> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv2i64_nxv2i64:
@ -880,7 +880,7 @@ entry:
<vscale x 2 x i64> %2,
<vscale x 2 x i64> %3,
<vscale x 2 x i1> %mask,
i32 %4)
i32 %4, i32 1)
ret <vscale x 2 x i1> %a
}
@ -910,7 +910,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsleu.mask.nxv4i64(
<vscale x 4 x i64>,
<vscale x 4 x i64>,
<vscale x 4 x i1>,
i32);
i32, i32);
define <vscale x 4 x i1> @intrinsic_vmsleu_mask_vv_nxv4i64_nxv4i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i64> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv4i64_nxv4i64:
@ -932,7 +932,7 @@ entry:
<vscale x 4 x i64> %2,
<vscale x 4 x i64> %3,
<vscale x 4 x i1> %mask,
i32 %4)
i32 %4, i32 1)
ret <vscale x 4 x i1> %a
}
@ -962,7 +962,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i8.i8(
<vscale x 1 x i8>,
i8,
<vscale x 1 x i1>,
i32);
i32, i32);
define <vscale x 1 x i1> @intrinsic_vmsleu_mask_vx_nxv1i8_i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv1i8_i8:
@ -979,7 +979,7 @@ entry:
<vscale x 1 x i8> %1,
i8 %2,
<vscale x 1 x i1> %3,
i32 %4)
i32 %4, i32 1)
ret <vscale x 1 x i1> %a
}
@ -1009,7 +1009,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsleu.mask.nxv2i8.i8(
<vscale x 2 x i8>,
i8,
<vscale x 2 x i1>,
i32);
i32, i32);
define <vscale x 2 x i1> @intrinsic_vmsleu_mask_vx_nxv2i8_i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv2i8_i8:
@ -1026,7 +1026,7 @@ entry:
<vscale x 2 x i8> %1,
i8 %2,
<vscale x 2 x i1> %3,
i32 %4)
i32 %4, i32 1)
ret <vscale x 2 x i1> %a
}
@ -1056,7 +1056,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsleu.mask.nxv4i8.i8(
<vscale x 4 x i8>,
i8,
<vscale x 4 x i1>,
i32);
i32, i32);
define <vscale x 4 x i1> @intrinsic_vmsleu_mask_vx_nxv4i8_i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv4i8_i8:
@ -1073,7 +1073,7 @@ entry:
<vscale x 4 x i8> %1,
i8 %2,
<vscale x 4 x i1> %3,
i32 %4)
i32 %4, i32 1)
ret <vscale x 4 x i1> %a
}
@ -1103,7 +1103,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsleu.mask.nxv8i8.i8(
<vscale x 8 x i8>,
i8,
<vscale x 8 x i1>,
i32);
i32, i32);
define <vscale x 8 x i1> @intrinsic_vmsleu_mask_vx_nxv8i8_i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv8i8_i8:
@ -1120,7 +1120,7 @@ entry:
<vscale x 8 x i8> %1,
i8 %2,
<vscale x 8 x i1> %3,
i32 %4)
i32 %4, i32 1)
ret <vscale x 8 x i1> %a
}
@ -1150,7 +1150,7 @@ declare <vscale x 16 x i1> @llvm.riscv.vmsleu.mask.nxv16i8.i8(
<vscale x 16 x i8>,
i8,
<vscale x 16 x i1>,
i32);
i32, i32);
define <vscale x 16 x i1> @intrinsic_vmsleu_mask_vx_nxv16i8_i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv16i8_i8:
@ -1167,7 +1167,7 @@ entry:
<vscale x 16 x i8> %1,
i8 %2,
<vscale x 16 x i1> %3,
i32 %4)
i32 %4, i32 1)
ret <vscale x 16 x i1> %a
}
@ -1197,7 +1197,7 @@ declare <vscale x 32 x i1> @llvm.riscv.vmsleu.mask.nxv32i8.i8(
<vscale x 32 x i8>,
i8,
<vscale x 32 x i1>,
i32);
i32, i32);
define <vscale x 32 x i1> @intrinsic_vmsleu_mask_vx_nxv32i8_i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv32i8_i8:
@ -1214,7 +1214,7 @@ entry:
<vscale x 32 x i8> %1,
i8 %2,
<vscale x 32 x i1> %3,
i32 %4)
i32 %4, i32 1)
ret <vscale x 32 x i1> %a
}
@ -1244,7 +1244,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i16.i16(
<vscale x 1 x i16>,
i16,
<vscale x 1 x i1>,
i32);
i32, i32);
define <vscale x 1 x i1> @intrinsic_vmsleu_mask_vx_nxv1i16_i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv1i16_i16:
@ -1261,7 +1261,7 @@ entry:
<vscale x 1 x i16> %1,
i16 %2,
<vscale x 1 x i1> %3,
i32 %4)
i32 %4, i32 1)
ret <vscale x 1 x i1> %a
}
@ -1291,7 +1291,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsleu.mask.nxv2i16.i16(
<vscale x 2 x i16>,
i16,
<vscale x 2 x i1>,
i32);
i32, i32);
define <vscale x 2 x i1> @intrinsic_vmsleu_mask_vx_nxv2i16_i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv2i16_i16:
@ -1308,7 +1308,7 @@ entry:
<vscale x 2 x i16> %1,
i16 %2,
<vscale x 2 x i1> %3,
i32 %4)
i32 %4, i32 1)
ret <vscale x 2 x i1> %a
}
@ -1338,7 +1338,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsleu.mask.nxv4i16.i16(
<vscale x 4 x i16>,
i16,
<vscale x 4 x i1>,
i32);
i32, i32);
define <vscale x 4 x i1> @intrinsic_vmsleu_mask_vx_nxv4i16_i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv4i16_i16:
@ -1355,7 +1355,7 @@ entry:
<vscale x 4 x i16> %1,
i16 %2,
<vscale x 4 x i1> %3,
i32 %4)
i32 %4, i32 1)
ret <vscale x 4 x i1> %a
}
@ -1385,7 +1385,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsleu.mask.nxv8i16.i16(
<vscale x 8 x i16>,
i16,
<vscale x 8 x i1>,
i32);
i32, i32);
define <vscale x 8 x i1> @intrinsic_vmsleu_mask_vx_nxv8i16_i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv8i16_i16:
@ -1402,7 +1402,7 @@ entry:
<vscale x 8 x i16> %1,
i16 %2,
<vscale x 8 x i1> %3,
i32 %4)
i32 %4, i32 1)
ret <vscale x 8 x i1> %a
}
@ -1432,7 +1432,7 @@ declare <vscale x 16 x i1> @llvm.riscv.vmsleu.mask.nxv16i16.i16(
<vscale x 16 x i16>,
i16,
<vscale x 16 x i1>,
i32);
i32, i32);
define <vscale x 16 x i1> @intrinsic_vmsleu_mask_vx_nxv16i16_i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv16i16_i16:
@ -1449,7 +1449,7 @@ entry:
<vscale x 16 x i16> %1,
i16 %2,
<vscale x 16 x i1> %3,
i32 %4)
i32 %4, i32 1)
ret <vscale x 16 x i1> %a
}
@ -1479,7 +1479,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i32.i32(
<vscale x 1 x i32>,
i32,
<vscale x 1 x i1>,
i32);
i32, i32);
define <vscale x 1 x i1> @intrinsic_vmsleu_mask_vx_nxv1i32_i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv1i32_i32:
@ -1496,7 +1496,7 @@ entry:
<vscale x 1 x i32> %1,
i32 %2,
<vscale x 1 x i1> %3,
i32 %4)
i32 %4, i32 1)
ret <vscale x 1 x i1> %a
}
@ -1526,7 +1526,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsleu.mask.nxv2i32.i32(
<vscale x 2 x i32>,
i32,
<vscale x 2 x i1>,
i32);
i32, i32);
define <vscale x 2 x i1> @intrinsic_vmsleu_mask_vx_nxv2i32_i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv2i32_i32:
@ -1543,7 +1543,7 @@ entry:
<vscale x 2 x i32> %1,
i32 %2,
<vscale x 2 x i1> %3,
i32 %4)
i32 %4, i32 1)
ret <vscale x 2 x i1> %a
}
@ -1573,7 +1573,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsleu.mask.nxv4i32.i32(
<vscale x 4 x i32>,
i32,
<vscale x 4 x i1>,
i32);
i32, i32);
define <vscale x 4 x i1> @intrinsic_vmsleu_mask_vx_nxv4i32_i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv4i32_i32:
@ -1590,7 +1590,7 @@ entry:
<vscale x 4 x i32> %1,
i32 %2,
<vscale x 4 x i1> %3,
i32 %4)
i32 %4, i32 1)
ret <vscale x 4 x i1> %a
}
@ -1620,7 +1620,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsleu.mask.nxv8i32.i32(
<vscale x 8 x i32>,
i32,
<vscale x 8 x i1>,
i32);
i32, i32);
define <vscale x 8 x i1> @intrinsic_vmsleu_mask_vx_nxv8i32_i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv8i32_i32:
@ -1637,7 +1637,7 @@ entry:
<vscale x 8 x i32> %1,
i32 %2,
<vscale x 8 x i1> %3,
i32 %4)
i32 %4, i32 1)
ret <vscale x 8 x i1> %a
}
@ -1673,7 +1673,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i64.i64(
<vscale x 1 x i64>,
i64,
<vscale x 1 x i1>,
i32);
i32, i32);
define <vscale x 1 x i1> @intrinsic_vmsleu_mask_vx_nxv1i64_i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv1i64_i64:
@ -1696,7 +1696,7 @@ entry:
<vscale x 1 x i64> %1,
i64 %2,
<vscale x 1 x i1> %3,
i32 %4)
i32 %4, i32 1)
ret <vscale x 1 x i1> %a
}
@ -1732,7 +1732,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsleu.mask.nxv2i64.i64(
<vscale x 2 x i64>,
i64,
<vscale x 2 x i1>,
i32);
i32, i32);
define <vscale x 2 x i1> @intrinsic_vmsleu_mask_vx_nxv2i64_i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv2i64_i64:
@ -1755,7 +1755,7 @@ entry:
<vscale x 2 x i64> %1,
i64 %2,
<vscale x 2 x i1> %3,
i32 %4)
i32 %4, i32 1)
ret <vscale x 2 x i1> %a
}
@ -1791,7 +1791,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsleu.mask.nxv4i64.i64(
<vscale x 4 x i64>,
i64,
<vscale x 4 x i1>,
i32);
i32, i32);
define <vscale x 4 x i1> @intrinsic_vmsleu_mask_vx_nxv4i64_i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv4i64_i64:
@ -1814,7 +1814,7 @@ entry:
<vscale x 4 x i64> %1,
i64 %2,
<vscale x 4 x i1> %3,
i32 %4)
i32 %4, i32 1)
ret <vscale x 4 x i1> %a
}
@ -1849,7 +1849,7 @@ entry:
<vscale x 1 x i8> %1,
i8 9,
<vscale x 1 x i1> %2,
i32 %3)
i32 %3, i32 1)
ret <vscale x 1 x i1> %a
}
@ -1884,7 +1884,7 @@ entry:
<vscale x 2 x i8> %1,
i8 9,
<vscale x 2 x i1> %2,
i32 %3)
i32 %3, i32 1)
ret <vscale x 2 x i1> %a
}
@ -1919,7 +1919,7 @@ entry:
<vscale x 4 x i8> %1,
i8 9,
<vscale x 4 x i1> %2,
i32 %3)
i32 %3, i32 1)
ret <vscale x 4 x i1> %a
}
@ -1954,7 +1954,7 @@ entry:
<vscale x 8 x i8> %1,
i8 9,
<vscale x 8 x i1> %2,
i32 %3)
i32 %3, i32 1)
ret <vscale x 8 x i1> %a
}
@ -1989,7 +1989,7 @@ entry:
<vscale x 16 x i8> %1,
i8 9,
<vscale x 16 x i1> %2,
i32 %3)
i32 %3, i32 1)
ret <vscale x 16 x i1> %a
}
@ -2024,7 +2024,7 @@ entry:
<vscale x 32 x i8> %1,
i8 9,
<vscale x 32 x i1> %2,
i32 %3)
i32 %3, i32 1)
ret <vscale x 32 x i1> %a
}
@ -2059,7 +2059,7 @@ entry:
<vscale x 1 x i16> %1,
i16 9,
<vscale x 1 x i1> %2,
i32 %3)
i32 %3, i32 1)
ret <vscale x 1 x i1> %a
}
@ -2094,7 +2094,7 @@ entry:
<vscale x 2 x i16> %1,
i16 9,
<vscale x 2 x i1> %2,
i32 %3)
i32 %3, i32 1)
ret <vscale x 2 x i1> %a
}
@ -2129,7 +2129,7 @@ entry:
<vscale x 4 x i16> %1,
i16 9,
<vscale x 4 x i1> %2,
i32 %3)
i32 %3, i32 1)
ret <vscale x 4 x i1> %a
}
@ -2164,7 +2164,7 @@ entry:
<vscale x 8 x i16> %1,
i16 9,
<vscale x 8 x i1> %2,
i32 %3)
i32 %3, i32 1)
ret <vscale x 8 x i1> %a
}
@ -2199,7 +2199,7 @@ entry:
<vscale x 16 x i16> %1,
i16 9,
<vscale x 16 x i1> %2,
i32 %3)
i32 %3, i32 1)
ret <vscale x 16 x i1> %a
}
@ -2234,7 +2234,7 @@ entry:
<vscale x 1 x i32> %1,
i32 9,
<vscale x 1 x i1> %2,
i32 %3)
i32 %3, i32 1)
ret <vscale x 1 x i1> %a
}
@ -2269,7 +2269,7 @@ entry:
<vscale x 2 x i32> %1,
i32 9,
<vscale x 2 x i1> %2,
i32 %3)
i32 %3, i32 1)
ret <vscale x 2 x i1> %a
}
@ -2304,7 +2304,7 @@ entry:
<vscale x 4 x i32> %1,
i32 9,
<vscale x 4 x i1> %2,
i32 %3)
i32 %3, i32 1)
ret <vscale x 4 x i1> %a
}
@ -2339,7 +2339,7 @@ entry:
<vscale x 8 x i32> %1,
i32 9,
<vscale x 8 x i1> %2,
i32 %3)
i32 %3, i32 1)
ret <vscale x 8 x i1> %a
}
@ -2374,7 +2374,7 @@ entry:
<vscale x 1 x i64> %1,
i64 9,
<vscale x 1 x i1> %2,
i32 %3)
i32 %3, i32 1)
ret <vscale x 1 x i1> %a
}
@ -2409,7 +2409,7 @@ entry:
<vscale x 2 x i64> %1,
i64 9,
<vscale x 2 x i1> %2,
i32 %3)
i32 %3, i32 1)
ret <vscale x 2 x i1> %a
}
@ -2444,7 +2444,7 @@ entry:
<vscale x 4 x i64> %1,
i64 9,
<vscale x 4 x i1> %2,
i32 %3)
i32 %3, i32 1)
ret <vscale x 4 x i1> %a
}

View File

@ -26,7 +26,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i8(
<vscale x 1 x i8>,
<vscale x 1 x i8>,
<vscale x 1 x i1>,
i64);
i64, i64);
define <vscale x 1 x i1> @intrinsic_vmsleu_mask_vv_nxv1i8_nxv1i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i8> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv1i8_nxv1i8:
@ -48,7 +48,7 @@ entry:
<vscale x 1 x i8> %2,
<vscale x 1 x i8> %3,
<vscale x 1 x i1> %mask,
i64 %4)
i64 %4, i64 1)
ret <vscale x 1 x i1> %a
}
@ -78,7 +78,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsleu.mask.nxv2i8(
<vscale x 2 x i8>,
<vscale x 2 x i8>,
<vscale x 2 x i1>,
i64);
i64, i64);
define <vscale x 2 x i1> @intrinsic_vmsleu_mask_vv_nxv2i8_nxv2i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i8> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv2i8_nxv2i8:
@ -100,7 +100,7 @@ entry:
<vscale x 2 x i8> %2,
<vscale x 2 x i8> %3,
<vscale x 2 x i1> %mask,
i64 %4)
i64 %4, i64 1)
ret <vscale x 2 x i1> %a
}
@ -130,7 +130,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsleu.mask.nxv4i8(
<vscale x 4 x i8>,
<vscale x 4 x i8>,
<vscale x 4 x i1>,
i64);
i64, i64);
define <vscale x 4 x i1> @intrinsic_vmsleu_mask_vv_nxv4i8_nxv4i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i8> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv4i8_nxv4i8:
@ -152,7 +152,7 @@ entry:
<vscale x 4 x i8> %2,
<vscale x 4 x i8> %3,
<vscale x 4 x i1> %mask,
i64 %4)
i64 %4, i64 1)
ret <vscale x 4 x i1> %a
}
@ -182,7 +182,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsleu.mask.nxv8i8(
<vscale x 8 x i8>,
<vscale x 8 x i8>,
<vscale x 8 x i1>,
i64);
i64, i64);
define <vscale x 8 x i1> @intrinsic_vmsleu_mask_vv_nxv8i8_nxv8i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i8> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv8i8_nxv8i8:
@ -204,7 +204,7 @@ entry:
<vscale x 8 x i8> %2,
<vscale x 8 x i8> %3,
<vscale x 8 x i1> %mask,
i64 %4)
i64 %4, i64 1)
ret <vscale x 8 x i1> %a
}
@ -234,7 +234,7 @@ declare <vscale x 16 x i1> @llvm.riscv.vmsleu.mask.nxv16i8(
<vscale x 16 x i8>,
<vscale x 16 x i8>,
<vscale x 16 x i1>,
i64);
i64, i64);
define <vscale x 16 x i1> @intrinsic_vmsleu_mask_vv_nxv16i8_nxv16i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i8> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv16i8_nxv16i8:
@ -256,7 +256,7 @@ entry:
<vscale x 16 x i8> %2,
<vscale x 16 x i8> %3,
<vscale x 16 x i1> %mask,
i64 %4)
i64 %4, i64 1)
ret <vscale x 16 x i1> %a
}
@ -286,7 +286,7 @@ declare <vscale x 32 x i1> @llvm.riscv.vmsleu.mask.nxv32i8(
<vscale x 32 x i8>,
<vscale x 32 x i8>,
<vscale x 32 x i1>,
i64);
i64, i64);
define <vscale x 32 x i1> @intrinsic_vmsleu_mask_vv_nxv32i8_nxv32i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i8> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv32i8_nxv32i8:
@ -308,7 +308,7 @@ entry:
<vscale x 32 x i8> %2,
<vscale x 32 x i8> %3,
<vscale x 32 x i1> %mask,
i64 %4)
i64 %4, i64 1)
ret <vscale x 32 x i1> %a
}
@ -338,7 +338,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i16(
<vscale x 1 x i16>,
<vscale x 1 x i16>,
<vscale x 1 x i1>,
i64);
i64, i64);
define <vscale x 1 x i1> @intrinsic_vmsleu_mask_vv_nxv1i16_nxv1i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i16> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv1i16_nxv1i16:
@ -360,7 +360,7 @@ entry:
<vscale x 1 x i16> %2,
<vscale x 1 x i16> %3,
<vscale x 1 x i1> %mask,
i64 %4)
i64 %4, i64 1)
ret <vscale x 1 x i1> %a
}
@ -390,7 +390,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsleu.mask.nxv2i16(
<vscale x 2 x i16>,
<vscale x 2 x i16>,
<vscale x 2 x i1>,
i64);
i64, i64);
define <vscale x 2 x i1> @intrinsic_vmsleu_mask_vv_nxv2i16_nxv2i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i16> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv2i16_nxv2i16:
@ -412,7 +412,7 @@ entry:
<vscale x 2 x i16> %2,
<vscale x 2 x i16> %3,
<vscale x 2 x i1> %mask,
i64 %4)
i64 %4, i64 1)
ret <vscale x 2 x i1> %a
}
@ -442,7 +442,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsleu.mask.nxv4i16(
<vscale x 4 x i16>,
<vscale x 4 x i16>,
<vscale x 4 x i1>,
i64);
i64, i64);
define <vscale x 4 x i1> @intrinsic_vmsleu_mask_vv_nxv4i16_nxv4i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i16> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv4i16_nxv4i16:
@ -464,7 +464,7 @@ entry:
<vscale x 4 x i16> %2,
<vscale x 4 x i16> %3,
<vscale x 4 x i1> %mask,
i64 %4)
i64 %4, i64 1)
ret <vscale x 4 x i1> %a
}
@ -494,7 +494,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsleu.mask.nxv8i16(
<vscale x 8 x i16>,
<vscale x 8 x i16>,
<vscale x 8 x i1>,
i64);
i64, i64);
define <vscale x 8 x i1> @intrinsic_vmsleu_mask_vv_nxv8i16_nxv8i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i16> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv8i16_nxv8i16:
@ -516,7 +516,7 @@ entry:
<vscale x 8 x i16> %2,
<vscale x 8 x i16> %3,
<vscale x 8 x i1> %mask,
i64 %4)
i64 %4, i64 1)
ret <vscale x 8 x i1> %a
}
@ -546,7 +546,7 @@ declare <vscale x 16 x i1> @llvm.riscv.vmsleu.mask.nxv16i16(
<vscale x 16 x i16>,
<vscale x 16 x i16>,
<vscale x 16 x i1>,
i64);
i64, i64);
define <vscale x 16 x i1> @intrinsic_vmsleu_mask_vv_nxv16i16_nxv16i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i16> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv16i16_nxv16i16:
@ -568,7 +568,7 @@ entry:
<vscale x 16 x i16> %2,
<vscale x 16 x i16> %3,
<vscale x 16 x i1> %mask,
i64 %4)
i64 %4, i64 1)
ret <vscale x 16 x i1> %a
}
@ -598,7 +598,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i32(
<vscale x 1 x i32>,
<vscale x 1 x i32>,
<vscale x 1 x i1>,
i64);
i64, i64);
define <vscale x 1 x i1> @intrinsic_vmsleu_mask_vv_nxv1i32_nxv1i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i32> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv1i32_nxv1i32:
@ -620,7 +620,7 @@ entry:
<vscale x 1 x i32> %2,
<vscale x 1 x i32> %3,
<vscale x 1 x i1> %mask,
i64 %4)
i64 %4, i64 1)
ret <vscale x 1 x i1> %a
}
@ -650,7 +650,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsleu.mask.nxv2i32(
<vscale x 2 x i32>,
<vscale x 2 x i32>,
<vscale x 2 x i1>,
i64);
i64, i64);
define <vscale x 2 x i1> @intrinsic_vmsleu_mask_vv_nxv2i32_nxv2i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i32> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv2i32_nxv2i32:
@ -672,7 +672,7 @@ entry:
<vscale x 2 x i32> %2,
<vscale x 2 x i32> %3,
<vscale x 2 x i1> %mask,
i64 %4)
i64 %4, i64 1)
ret <vscale x 2 x i1> %a
}
@ -702,7 +702,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsleu.mask.nxv4i32(
<vscale x 4 x i32>,
<vscale x 4 x i32>,
<vscale x 4 x i1>,
i64);
i64, i64);
define <vscale x 4 x i1> @intrinsic_vmsleu_mask_vv_nxv4i32_nxv4i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i32> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv4i32_nxv4i32:
@ -724,7 +724,7 @@ entry:
<vscale x 4 x i32> %2,
<vscale x 4 x i32> %3,
<vscale x 4 x i1> %mask,
i64 %4)
i64 %4, i64 1)
ret <vscale x 4 x i1> %a
}
@ -754,7 +754,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsleu.mask.nxv8i32(
<vscale x 8 x i32>,
<vscale x 8 x i32>,
<vscale x 8 x i1>,
i64);
i64, i64);
define <vscale x 8 x i1> @intrinsic_vmsleu_mask_vv_nxv8i32_nxv8i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i32> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv8i32_nxv8i32:
@ -776,7 +776,7 @@ entry:
<vscale x 8 x i32> %2,
<vscale x 8 x i32> %3,
<vscale x 8 x i1> %mask,
i64 %4)
i64 %4, i64 1)
ret <vscale x 8 x i1> %a
}
@ -806,7 +806,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i64(
<vscale x 1 x i64>,
<vscale x 1 x i64>,
<vscale x 1 x i1>,
i64);
i64, i64);
define <vscale x 1 x i1> @intrinsic_vmsleu_mask_vv_nxv1i64_nxv1i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i64> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv1i64_nxv1i64:
@ -828,7 +828,7 @@ entry:
<vscale x 1 x i64> %2,
<vscale x 1 x i64> %3,
<vscale x 1 x i1> %mask,
i64 %4)
i64 %4, i64 1)
ret <vscale x 1 x i1> %a
}
@ -858,7 +858,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsleu.mask.nxv2i64(
<vscale x 2 x i64>,
<vscale x 2 x i64>,
<vscale x 2 x i1>,
i64);
i64, i64);
define <vscale x 2 x i1> @intrinsic_vmsleu_mask_vv_nxv2i64_nxv2i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i64> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv2i64_nxv2i64:
@ -880,7 +880,7 @@ entry:
<vscale x 2 x i64> %2,
<vscale x 2 x i64> %3,
<vscale x 2 x i1> %mask,
i64 %4)
i64 %4, i64 1)
ret <vscale x 2 x i1> %a
}
@ -910,7 +910,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsleu.mask.nxv4i64(
<vscale x 4 x i64>,
<vscale x 4 x i64>,
<vscale x 4 x i1>,
i64);
i64, i64);
define <vscale x 4 x i1> @intrinsic_vmsleu_mask_vv_nxv4i64_nxv4i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i64> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv4i64_nxv4i64:
@ -932,7 +932,7 @@ entry:
<vscale x 4 x i64> %2,
<vscale x 4 x i64> %3,
<vscale x 4 x i1> %mask,
i64 %4)
i64 %4, i64 1)
ret <vscale x 4 x i1> %a
}
@ -962,7 +962,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i8.i8(
<vscale x 1 x i8>,
i8,
<vscale x 1 x i1>,
i64);
i64, i64);
define <vscale x 1 x i1> @intrinsic_vmsleu_mask_vx_nxv1i8_i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv1i8_i8:
@ -979,7 +979,7 @@ entry:
<vscale x 1 x i8> %1,
i8 %2,
<vscale x 1 x i1> %3,
i64 %4)
i64 %4, i64 1)
ret <vscale x 1 x i1> %a
}
@ -1009,7 +1009,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsleu.mask.nxv2i8.i8(
<vscale x 2 x i8>,
i8,
<vscale x 2 x i1>,
i64);
i64, i64);
define <vscale x 2 x i1> @intrinsic_vmsleu_mask_vx_nxv2i8_i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv2i8_i8:
@ -1026,7 +1026,7 @@ entry:
<vscale x 2 x i8> %1,
i8 %2,
<vscale x 2 x i1> %3,
i64 %4)
i64 %4, i64 1)
ret <vscale x 2 x i1> %a
}
@ -1056,7 +1056,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsleu.mask.nxv4i8.i8(
<vscale x 4 x i8>,
i8,
<vscale x 4 x i1>,
i64);
i64, i64);
define <vscale x 4 x i1> @intrinsic_vmsleu_mask_vx_nxv4i8_i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv4i8_i8:
@ -1073,7 +1073,7 @@ entry:
<vscale x 4 x i8> %1,
i8 %2,
<vscale x 4 x i1> %3,
i64 %4)
i64 %4, i64 1)
ret <vscale x 4 x i1> %a
}
@ -1103,7 +1103,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsleu.mask.nxv8i8.i8(
<vscale x 8 x i8>,
i8,
<vscale x 8 x i1>,
i64);
i64, i64);
define <vscale x 8 x i1> @intrinsic_vmsleu_mask_vx_nxv8i8_i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv8i8_i8:
@ -1120,7 +1120,7 @@ entry:
<vscale x 8 x i8> %1,
i8 %2,
<vscale x 8 x i1> %3,
i64 %4)
i64 %4, i64 1)
ret <vscale x 8 x i1> %a
}
@ -1150,7 +1150,7 @@ declare <vscale x 16 x i1> @llvm.riscv.vmsleu.mask.nxv16i8.i8(
<vscale x 16 x i8>,
i8,
<vscale x 16 x i1>,
i64);
i64, i64);
define <vscale x 16 x i1> @intrinsic_vmsleu_mask_vx_nxv16i8_i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv16i8_i8:
@ -1167,7 +1167,7 @@ entry:
<vscale x 16 x i8> %1,
i8 %2,
<vscale x 16 x i1> %3,
i64 %4)
i64 %4, i64 1)
ret <vscale x 16 x i1> %a
}
@ -1197,7 +1197,7 @@ declare <vscale x 32 x i1> @llvm.riscv.vmsleu.mask.nxv32i8.i8(
<vscale x 32 x i8>,
i8,
<vscale x 32 x i1>,
i64);
i64, i64);
define <vscale x 32 x i1> @intrinsic_vmsleu_mask_vx_nxv32i8_i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv32i8_i8:
@ -1214,7 +1214,7 @@ entry:
<vscale x 32 x i8> %1,
i8 %2,
<vscale x 32 x i1> %3,
i64 %4)
i64 %4, i64 1)
ret <vscale x 32 x i1> %a
}
@ -1244,7 +1244,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i16.i16(
<vscale x 1 x i16>,
i16,
<vscale x 1 x i1>,
i64);
i64, i64);
define <vscale x 1 x i1> @intrinsic_vmsleu_mask_vx_nxv1i16_i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv1i16_i16:
@ -1261,7 +1261,7 @@ entry:
<vscale x 1 x i16> %1,
i16 %2,
<vscale x 1 x i1> %3,
i64 %4)
i64 %4, i64 1)
ret <vscale x 1 x i1> %a
}
@ -1291,7 +1291,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsleu.mask.nxv2i16.i16(
<vscale x 2 x i16>,
i16,
<vscale x 2 x i1>,
i64);
i64, i64);
define <vscale x 2 x i1> @intrinsic_vmsleu_mask_vx_nxv2i16_i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv2i16_i16:
@ -1308,7 +1308,7 @@ entry:
<vscale x 2 x i16> %1,
i16 %2,
<vscale x 2 x i1> %3,
i64 %4)
i64 %4, i64 1)
ret <vscale x 2 x i1> %a
}
@ -1338,7 +1338,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsleu.mask.nxv4i16.i16(
<vscale x 4 x i16>,
i16,
<vscale x 4 x i1>,
i64);
i64, i64);
define <vscale x 4 x i1> @intrinsic_vmsleu_mask_vx_nxv4i16_i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv4i16_i16:
@ -1355,7 +1355,7 @@ entry:
<vscale x 4 x i16> %1,
i16 %2,
<vscale x 4 x i1> %3,
i64 %4)
i64 %4, i64 1)
ret <vscale x 4 x i1> %a
}
@ -1385,7 +1385,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsleu.mask.nxv8i16.i16(
<vscale x 8 x i16>,
i16,
<vscale x 8 x i1>,
i64);
i64, i64);
define <vscale x 8 x i1> @intrinsic_vmsleu_mask_vx_nxv8i16_i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv8i16_i16:
@ -1402,7 +1402,7 @@ entry:
<vscale x 8 x i16> %1,
i16 %2,
<vscale x 8 x i1> %3,
i64 %4)
i64 %4, i64 1)
ret <vscale x 8 x i1> %a
}
@ -1432,7 +1432,7 @@ declare <vscale x 16 x i1> @llvm.riscv.vmsleu.mask.nxv16i16.i16(
<vscale x 16 x i16>,
i16,
<vscale x 16 x i1>,
i64);
i64, i64);
define <vscale x 16 x i1> @intrinsic_vmsleu_mask_vx_nxv16i16_i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv16i16_i16:
@ -1449,7 +1449,7 @@ entry:
<vscale x 16 x i16> %1,
i16 %2,
<vscale x 16 x i1> %3,
i64 %4)
i64 %4, i64 1)
ret <vscale x 16 x i1> %a
}
@ -1479,7 +1479,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i32.i32(
<vscale x 1 x i32>,
i32,
<vscale x 1 x i1>,
i64);
i64, i64);
define <vscale x 1 x i1> @intrinsic_vmsleu_mask_vx_nxv1i32_i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv1i32_i32:
@ -1496,7 +1496,7 @@ entry:
<vscale x 1 x i32> %1,
i32 %2,
<vscale x 1 x i1> %3,
i64 %4)
i64 %4, i64 1)
ret <vscale x 1 x i1> %a
}
@ -1526,7 +1526,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsleu.mask.nxv2i32.i32(
<vscale x 2 x i32>,
i32,
<vscale x 2 x i1>,
i64);
i64, i64);
define <vscale x 2 x i1> @intrinsic_vmsleu_mask_vx_nxv2i32_i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv2i32_i32:
@ -1543,7 +1543,7 @@ entry:
<vscale x 2 x i32> %1,
i32 %2,
<vscale x 2 x i1> %3,
i64 %4)
i64 %4, i64 1)
ret <vscale x 2 x i1> %a
}
@ -1573,7 +1573,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsleu.mask.nxv4i32.i32(
<vscale x 4 x i32>,
i32,
<vscale x 4 x i1>,
i64);
i64, i64);
define <vscale x 4 x i1> @intrinsic_vmsleu_mask_vx_nxv4i32_i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv4i32_i32:
@ -1590,7 +1590,7 @@ entry:
<vscale x 4 x i32> %1,
i32 %2,
<vscale x 4 x i1> %3,
i64 %4)
i64 %4, i64 1)
ret <vscale x 4 x i1> %a
}
@ -1620,7 +1620,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsleu.mask.nxv8i32.i32(
<vscale x 8 x i32>,
i32,
<vscale x 8 x i1>,
i64);
i64, i64);
define <vscale x 8 x i1> @intrinsic_vmsleu_mask_vx_nxv8i32_i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv8i32_i32:
@ -1637,7 +1637,7 @@ entry:
<vscale x 8 x i32> %1,
i32 %2,
<vscale x 8 x i1> %3,
i64 %4)
i64 %4, i64 1)
ret <vscale x 8 x i1> %a
}
@ -1667,7 +1667,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i64.i64(
<vscale x 1 x i64>,
i64,
<vscale x 1 x i1>,
i64);
i64, i64);
define <vscale x 1 x i1> @intrinsic_vmsleu_mask_vx_nxv1i64_i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv1i64_i64:
@ -1684,7 +1684,7 @@ entry:
<vscale x 1 x i64> %1,
i64 %2,
<vscale x 1 x i1> %3,
i64 %4)
i64 %4, i64 1)
ret <vscale x 1 x i1> %a
}
@ -1714,7 +1714,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsleu.mask.nxv2i64.i64(
<vscale x 2 x i64>,
i64,
<vscale x 2 x i1>,
i64);
i64, i64);
define <vscale x 2 x i1> @intrinsic_vmsleu_mask_vx_nxv2i64_i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv2i64_i64:
@ -1731,7 +1731,7 @@ entry:
<vscale x 2 x i64> %1,
i64 %2,
<vscale x 2 x i1> %3,
i64 %4)
i64 %4, i64 1)
ret <vscale x 2 x i1> %a
}
@ -1761,7 +1761,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsleu.mask.nxv4i64.i64(
<vscale x 4 x i64>,
i64,
<vscale x 4 x i1>,
i64);
i64, i64);
define <vscale x 4 x i1> @intrinsic_vmsleu_mask_vx_nxv4i64_i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv4i64_i64:
@ -1778,7 +1778,7 @@ entry:
<vscale x 4 x i64> %1,
i64 %2,
<vscale x 4 x i1> %3,
i64 %4)
i64 %4, i64 1)
ret <vscale x 4 x i1> %a
}
@ -1813,7 +1813,7 @@ entry:
<vscale x 1 x i8> %1,
i8 9,
<vscale x 1 x i1> %2,
i64 %3)
i64 %3, i64 1)
ret <vscale x 1 x i1> %a
}
@ -1848,7 +1848,7 @@ entry:
<vscale x 2 x i8> %1,
i8 9,
<vscale x 2 x i1> %2,
i64 %3)
i64 %3, i64 1)
ret <vscale x 2 x i1> %a
}
@ -1883,7 +1883,7 @@ entry:
<vscale x 4 x i8> %1,
i8 9,
<vscale x 4 x i1> %2,
i64 %3)
i64 %3, i64 1)
ret <vscale x 4 x i1> %a
}
@ -1918,7 +1918,7 @@ entry:
<vscale x 8 x i8> %1,
i8 9,
<vscale x 8 x i1> %2,
i64 %3)
i64 %3, i64 1)
ret <vscale x 8 x i1> %a
}
@ -1953,7 +1953,7 @@ entry:
<vscale x 16 x i8> %1,
i8 9,
<vscale x 16 x i1> %2,
i64 %3)
i64 %3, i64 1)
ret <vscale x 16 x i1> %a
}
@ -1988,7 +1988,7 @@ entry:
<vscale x 32 x i8> %1,
i8 9,
<vscale x 32 x i1> %2,
i64 %3)
i64 %3, i64 1)
ret <vscale x 32 x i1> %a
}
@ -2023,7 +2023,7 @@ entry:
<vscale x 1 x i16> %1,
i16 9,
<vscale x 1 x i1> %2,
i64 %3)
i64 %3, i64 1)
ret <vscale x 1 x i1> %a
}
@ -2058,7 +2058,7 @@ entry:
<vscale x 2 x i16> %1,
i16 9,
<vscale x 2 x i1> %2,
i64 %3)
i64 %3, i64 1)
ret <vscale x 2 x i1> %a
}
@ -2093,7 +2093,7 @@ entry:
<vscale x 4 x i16> %1,
i16 9,
<vscale x 4 x i1> %2,
i64 %3)
i64 %3, i64 1)
ret <vscale x 4 x i1> %a
}
@ -2128,7 +2128,7 @@ entry:
<vscale x 8 x i16> %1,
i16 9,
<vscale x 8 x i1> %2,
i64 %3)
i64 %3, i64 1)
ret <vscale x 8 x i1> %a
}
@ -2163,7 +2163,7 @@ entry:
<vscale x 16 x i16> %1,
i16 9,
<vscale x 16 x i1> %2,
i64 %3)
i64 %3, i64 1)
ret <vscale x 16 x i1> %a
}
@ -2198,7 +2198,7 @@ entry:
<vscale x 1 x i32> %1,
i32 9,
<vscale x 1 x i1> %2,
i64 %3)
i64 %3, i64 1)
ret <vscale x 1 x i1> %a
}
@ -2233,7 +2233,7 @@ entry:
<vscale x 2 x i32> %1,
i32 9,
<vscale x 2 x i1> %2,
i64 %3)
i64 %3, i64 1)
ret <vscale x 2 x i1> %a
}
@ -2268,7 +2268,7 @@ entry:
<vscale x 4 x i32> %1,
i32 9,
<vscale x 4 x i1> %2,
i64 %3)
i64 %3, i64 1)
ret <vscale x 4 x i1> %a
}
@ -2303,7 +2303,7 @@ entry:
<vscale x 8 x i32> %1,
i32 9,
<vscale x 8 x i1> %2,
i64 %3)
i64 %3, i64 1)
ret <vscale x 8 x i1> %a
}
@ -2338,7 +2338,7 @@ entry:
<vscale x 1 x i64> %1,
i64 9,
<vscale x 1 x i1> %2,
i64 %3)
i64 %3, i64 1)
ret <vscale x 1 x i1> %a
}
@ -2373,7 +2373,7 @@ entry:
<vscale x 2 x i64> %1,
i64 9,
<vscale x 2 x i1> %2,
i64 %3)
i64 %3, i64 1)
ret <vscale x 2 x i1> %a
}
@ -2408,7 +2408,7 @@ entry:
<vscale x 4 x i64> %1,
i64 9,
<vscale x 4 x i1> %2,
i64 %3)
i64 %3, i64 1)
ret <vscale x 4 x i1> %a
}

View File

@ -26,7 +26,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i8(
<vscale x 1 x i8>,
<vscale x 1 x i8>,
<vscale x 1 x i1>,
i32);
i32, i32);
define <vscale x 1 x i1> @intrinsic_vmslt_mask_vv_nxv1i8_nxv1i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i8> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv1i8_nxv1i8:
@ -48,7 +48,7 @@ entry:
<vscale x 1 x i8> %2,
<vscale x 1 x i8> %3,
<vscale x 1 x i1> %mask,
i32 %4)
i32 %4, i32 1)
ret <vscale x 1 x i1> %a
}
@ -78,7 +78,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmslt.mask.nxv2i8(
<vscale x 2 x i8>,
<vscale x 2 x i8>,
<vscale x 2 x i1>,
i32);
i32, i32);
define <vscale x 2 x i1> @intrinsic_vmslt_mask_vv_nxv2i8_nxv2i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i8> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv2i8_nxv2i8:
@ -100,7 +100,7 @@ entry:
<vscale x 2 x i8> %2,
<vscale x 2 x i8> %3,
<vscale x 2 x i1> %mask,
i32 %4)
i32 %4, i32 1)
ret <vscale x 2 x i1> %a
}
@ -130,7 +130,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmslt.mask.nxv4i8(
<vscale x 4 x i8>,
<vscale x 4 x i8>,
<vscale x 4 x i1>,
i32);
i32, i32);
define <vscale x 4 x i1> @intrinsic_vmslt_mask_vv_nxv4i8_nxv4i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i8> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv4i8_nxv4i8:
@ -152,7 +152,7 @@ entry:
<vscale x 4 x i8> %2,
<vscale x 4 x i8> %3,
<vscale x 4 x i1> %mask,
i32 %4)
i32 %4, i32 1)
ret <vscale x 4 x i1> %a
}
@ -182,7 +182,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmslt.mask.nxv8i8(
<vscale x 8 x i8>,
<vscale x 8 x i8>,
<vscale x 8 x i1>,
i32);
i32, i32);
define <vscale x 8 x i1> @intrinsic_vmslt_mask_vv_nxv8i8_nxv8i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i8> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv8i8_nxv8i8:
@ -204,7 +204,7 @@ entry:
<vscale x 8 x i8> %2,
<vscale x 8 x i8> %3,
<vscale x 8 x i1> %mask,
i32 %4)
i32 %4, i32 1)
ret <vscale x 8 x i1> %a
}
@ -234,7 +234,7 @@ declare <vscale x 16 x i1> @llvm.riscv.vmslt.mask.nxv16i8(
<vscale x 16 x i8>,
<vscale x 16 x i8>,
<vscale x 16 x i1>,
i32);
i32, i32);
define <vscale x 16 x i1> @intrinsic_vmslt_mask_vv_nxv16i8_nxv16i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i8> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv16i8_nxv16i8:
@ -256,7 +256,7 @@ entry:
<vscale x 16 x i8> %2,
<vscale x 16 x i8> %3,
<vscale x 16 x i1> %mask,
i32 %4)
i32 %4, i32 1)
ret <vscale x 16 x i1> %a
}
@ -286,7 +286,7 @@ declare <vscale x 32 x i1> @llvm.riscv.vmslt.mask.nxv32i8(
<vscale x 32 x i8>,
<vscale x 32 x i8>,
<vscale x 32 x i1>,
i32);
i32, i32);
define <vscale x 32 x i1> @intrinsic_vmslt_mask_vv_nxv32i8_nxv32i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i8> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv32i8_nxv32i8:
@ -308,7 +308,7 @@ entry:
<vscale x 32 x i8> %2,
<vscale x 32 x i8> %3,
<vscale x 32 x i1> %mask,
i32 %4)
i32 %4, i32 1)
ret <vscale x 32 x i1> %a
}
@ -338,7 +338,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i16(
<vscale x 1 x i16>,
<vscale x 1 x i16>,
<vscale x 1 x i1>,
i32);
i32, i32);
define <vscale x 1 x i1> @intrinsic_vmslt_mask_vv_nxv1i16_nxv1i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i16> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv1i16_nxv1i16:
@ -360,7 +360,7 @@ entry:
<vscale x 1 x i16> %2,
<vscale x 1 x i16> %3,
<vscale x 1 x i1> %mask,
i32 %4)
i32 %4, i32 1)
ret <vscale x 1 x i1> %a
}
@ -390,7 +390,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmslt.mask.nxv2i16(
<vscale x 2 x i16>,
<vscale x 2 x i16>,
<vscale x 2 x i1>,
i32);
i32, i32);
define <vscale x 2 x i1> @intrinsic_vmslt_mask_vv_nxv2i16_nxv2i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i16> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv2i16_nxv2i16:
@ -412,7 +412,7 @@ entry:
<vscale x 2 x i16> %2,
<vscale x 2 x i16> %3,
<vscale x 2 x i1> %mask,
i32 %4)
i32 %4, i32 1)
ret <vscale x 2 x i1> %a
}
@ -442,7 +442,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmslt.mask.nxv4i16(
<vscale x 4 x i16>,
<vscale x 4 x i16>,
<vscale x 4 x i1>,
i32);
i32, i32);
define <vscale x 4 x i1> @intrinsic_vmslt_mask_vv_nxv4i16_nxv4i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i16> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv4i16_nxv4i16:
@ -464,7 +464,7 @@ entry:
<vscale x 4 x i16> %2,
<vscale x 4 x i16> %3,
<vscale x 4 x i1> %mask,
i32 %4)
i32 %4, i32 1)
ret <vscale x 4 x i1> %a
}
@ -494,7 +494,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmslt.mask.nxv8i16(
<vscale x 8 x i16>,
<vscale x 8 x i16>,
<vscale x 8 x i1>,
i32);
i32, i32);
define <vscale x 8 x i1> @intrinsic_vmslt_mask_vv_nxv8i16_nxv8i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i16> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv8i16_nxv8i16:
@ -516,7 +516,7 @@ entry:
<vscale x 8 x i16> %2,
<vscale x 8 x i16> %3,
<vscale x 8 x i1> %mask,
i32 %4)
i32 %4, i32 1)
ret <vscale x 8 x i1> %a
}
@ -546,7 +546,7 @@ declare <vscale x 16 x i1> @llvm.riscv.vmslt.mask.nxv16i16(
<vscale x 16 x i16>,
<vscale x 16 x i16>,
<vscale x 16 x i1>,
i32);
i32, i32);
define <vscale x 16 x i1> @intrinsic_vmslt_mask_vv_nxv16i16_nxv16i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i16> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv16i16_nxv16i16:
@ -568,7 +568,7 @@ entry:
<vscale x 16 x i16> %2,
<vscale x 16 x i16> %3,
<vscale x 16 x i1> %mask,
i32 %4)
i32 %4, i32 1)
ret <vscale x 16 x i1> %a
}
@ -598,7 +598,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i32(
<vscale x 1 x i32>,
<vscale x 1 x i32>,
<vscale x 1 x i1>,
i32);
i32, i32);
define <vscale x 1 x i1> @intrinsic_vmslt_mask_vv_nxv1i32_nxv1i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i32> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv1i32_nxv1i32:
@ -620,7 +620,7 @@ entry:
<vscale x 1 x i32> %2,
<vscale x 1 x i32> %3,
<vscale x 1 x i1> %mask,
i32 %4)
i32 %4, i32 1)
ret <vscale x 1 x i1> %a
}
@ -650,7 +650,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmslt.mask.nxv2i32(
<vscale x 2 x i32>,
<vscale x 2 x i32>,
<vscale x 2 x i1>,
i32);
i32, i32);
define <vscale x 2 x i1> @intrinsic_vmslt_mask_vv_nxv2i32_nxv2i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i32> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv2i32_nxv2i32:
@ -672,7 +672,7 @@ entry:
<vscale x 2 x i32> %2,
<vscale x 2 x i32> %3,
<vscale x 2 x i1> %mask,
i32 %4)
i32 %4, i32 1)
ret <vscale x 2 x i1> %a
}
@ -702,7 +702,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmslt.mask.nxv4i32(
<vscale x 4 x i32>,
<vscale x 4 x i32>,
<vscale x 4 x i1>,
i32);
i32, i32);
define <vscale x 4 x i1> @intrinsic_vmslt_mask_vv_nxv4i32_nxv4i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i32> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv4i32_nxv4i32:
@ -724,7 +724,7 @@ entry:
<vscale x 4 x i32> %2,
<vscale x 4 x i32> %3,
<vscale x 4 x i1> %mask,
i32 %4)
i32 %4, i32 1)
ret <vscale x 4 x i1> %a
}
@ -754,7 +754,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmslt.mask.nxv8i32(
<vscale x 8 x i32>,
<vscale x 8 x i32>,
<vscale x 8 x i1>,
i32);
i32, i32);
define <vscale x 8 x i1> @intrinsic_vmslt_mask_vv_nxv8i32_nxv8i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i32> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv8i32_nxv8i32:
@ -776,7 +776,7 @@ entry:
<vscale x 8 x i32> %2,
<vscale x 8 x i32> %3,
<vscale x 8 x i1> %mask,
i32 %4)
i32 %4, i32 1)
ret <vscale x 8 x i1> %a
}
@ -806,7 +806,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i64(
<vscale x 1 x i64>,
<vscale x 1 x i64>,
<vscale x 1 x i1>,
i32);
i32, i32);
define <vscale x 1 x i1> @intrinsic_vmslt_mask_vv_nxv1i64_nxv1i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i64> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv1i64_nxv1i64:
@ -828,7 +828,7 @@ entry:
<vscale x 1 x i64> %2,
<vscale x 1 x i64> %3,
<vscale x 1 x i1> %mask,
i32 %4)
i32 %4, i32 1)
ret <vscale x 1 x i1> %a
}
@ -858,7 +858,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmslt.mask.nxv2i64(
<vscale x 2 x i64>,
<vscale x 2 x i64>,
<vscale x 2 x i1>,
i32);
i32, i32);
define <vscale x 2 x i1> @intrinsic_vmslt_mask_vv_nxv2i64_nxv2i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i64> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv2i64_nxv2i64:
@ -880,7 +880,7 @@ entry:
<vscale x 2 x i64> %2,
<vscale x 2 x i64> %3,
<vscale x 2 x i1> %mask,
i32 %4)
i32 %4, i32 1)
ret <vscale x 2 x i1> %a
}
@ -910,7 +910,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmslt.mask.nxv4i64(
<vscale x 4 x i64>,
<vscale x 4 x i64>,
<vscale x 4 x i1>,
i32);
i32, i32);
define <vscale x 4 x i1> @intrinsic_vmslt_mask_vv_nxv4i64_nxv4i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i64> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv4i64_nxv4i64:
@ -932,7 +932,7 @@ entry:
<vscale x 4 x i64> %2,
<vscale x 4 x i64> %3,
<vscale x 4 x i1> %mask,
i32 %4)
i32 %4, i32 1)
ret <vscale x 4 x i1> %a
}
@ -962,7 +962,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i8.i8(
<vscale x 1 x i8>,
i8,
<vscale x 1 x i1>,
i32);
i32, i32);
define <vscale x 1 x i1> @intrinsic_vmslt_mask_vx_nxv1i8_i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv1i8_i8:
@ -979,7 +979,7 @@ entry:
<vscale x 1 x i8> %1,
i8 %2,
<vscale x 1 x i1> %3,
i32 %4)
i32 %4, i32 1)
ret <vscale x 1 x i1> %a
}
@ -1009,7 +1009,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmslt.mask.nxv2i8.i8(
<vscale x 2 x i8>,
i8,
<vscale x 2 x i1>,
i32);
i32, i32);
define <vscale x 2 x i1> @intrinsic_vmslt_mask_vx_nxv2i8_i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv2i8_i8:
@ -1026,7 +1026,7 @@ entry:
<vscale x 2 x i8> %1,
i8 %2,
<vscale x 2 x i1> %3,
i32 %4)
i32 %4, i32 1)
ret <vscale x 2 x i1> %a
}
@ -1056,7 +1056,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmslt.mask.nxv4i8.i8(
<vscale x 4 x i8>,
i8,
<vscale x 4 x i1>,
i32);
i32, i32);
define <vscale x 4 x i1> @intrinsic_vmslt_mask_vx_nxv4i8_i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv4i8_i8:
@ -1073,7 +1073,7 @@ entry:
<vscale x 4 x i8> %1,
i8 %2,
<vscale x 4 x i1> %3,
i32 %4)
i32 %4, i32 1)
ret <vscale x 4 x i1> %a
}
@ -1103,7 +1103,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmslt.mask.nxv8i8.i8(
<vscale x 8 x i8>,
i8,
<vscale x 8 x i1>,
i32);
i32, i32);
define <vscale x 8 x i1> @intrinsic_vmslt_mask_vx_nxv8i8_i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv8i8_i8:
@ -1120,7 +1120,7 @@ entry:
<vscale x 8 x i8> %1,
i8 %2,
<vscale x 8 x i1> %3,
i32 %4)
i32 %4, i32 1)
ret <vscale x 8 x i1> %a
}
@ -1150,7 +1150,7 @@ declare <vscale x 16 x i1> @llvm.riscv.vmslt.mask.nxv16i8.i8(
<vscale x 16 x i8>,
i8,
<vscale x 16 x i1>,
i32);
i32, i32);
define <vscale x 16 x i1> @intrinsic_vmslt_mask_vx_nxv16i8_i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv16i8_i8:
@ -1167,7 +1167,7 @@ entry:
<vscale x 16 x i8> %1,
i8 %2,
<vscale x 16 x i1> %3,
i32 %4)
i32 %4, i32 1)
ret <vscale x 16 x i1> %a
}
@ -1197,7 +1197,7 @@ declare <vscale x 32 x i1> @llvm.riscv.vmslt.mask.nxv32i8.i8(
<vscale x 32 x i8>,
i8,
<vscale x 32 x i1>,
i32);
i32, i32);
define <vscale x 32 x i1> @intrinsic_vmslt_mask_vx_nxv32i8_i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv32i8_i8:
@ -1214,7 +1214,7 @@ entry:
<vscale x 32 x i8> %1,
i8 %2,
<vscale x 32 x i1> %3,
i32 %4)
i32 %4, i32 1)
ret <vscale x 32 x i1> %a
}
@ -1244,7 +1244,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i16.i16(
<vscale x 1 x i16>,
i16,
<vscale x 1 x i1>,
i32);
i32, i32);
define <vscale x 1 x i1> @intrinsic_vmslt_mask_vx_nxv1i16_i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv1i16_i16:
@ -1261,7 +1261,7 @@ entry:
<vscale x 1 x i16> %1,
i16 %2,
<vscale x 1 x i1> %3,
i32 %4)
i32 %4, i32 1)
ret <vscale x 1 x i1> %a
}
@ -1291,7 +1291,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmslt.mask.nxv2i16.i16(
<vscale x 2 x i16>,
i16,
<vscale x 2 x i1>,
i32);
i32, i32);
define <vscale x 2 x i1> @intrinsic_vmslt_mask_vx_nxv2i16_i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv2i16_i16:
@ -1308,7 +1308,7 @@ entry:
<vscale x 2 x i16> %1,
i16 %2,
<vscale x 2 x i1> %3,
i32 %4)
i32 %4, i32 1)
ret <vscale x 2 x i1> %a
}
@ -1338,7 +1338,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmslt.mask.nxv4i16.i16(
<vscale x 4 x i16>,
i16,
<vscale x 4 x i1>,
i32);
i32, i32);
define <vscale x 4 x i1> @intrinsic_vmslt_mask_vx_nxv4i16_i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv4i16_i16:
@ -1355,7 +1355,7 @@ entry:
<vscale x 4 x i16> %1,
i16 %2,
<vscale x 4 x i1> %3,
i32 %4)
i32 %4, i32 1)
ret <vscale x 4 x i1> %a
}
@ -1385,7 +1385,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmslt.mask.nxv8i16.i16(
<vscale x 8 x i16>,
i16,
<vscale x 8 x i1>,
i32);
i32, i32);
define <vscale x 8 x i1> @intrinsic_vmslt_mask_vx_nxv8i16_i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv8i16_i16:
@ -1402,7 +1402,7 @@ entry:
<vscale x 8 x i16> %1,
i16 %2,
<vscale x 8 x i1> %3,
i32 %4)
i32 %4, i32 1)
ret <vscale x 8 x i1> %a
}
@ -1432,7 +1432,7 @@ declare <vscale x 16 x i1> @llvm.riscv.vmslt.mask.nxv16i16.i16(
<vscale x 16 x i16>,
i16,
<vscale x 16 x i1>,
i32);
i32, i32);
define <vscale x 16 x i1> @intrinsic_vmslt_mask_vx_nxv16i16_i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv16i16_i16:
@ -1449,7 +1449,7 @@ entry:
<vscale x 16 x i16> %1,
i16 %2,
<vscale x 16 x i1> %3,
i32 %4)
i32 %4, i32 1)
ret <vscale x 16 x i1> %a
}
@ -1479,7 +1479,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i32.i32(
<vscale x 1 x i32>,
i32,
<vscale x 1 x i1>,
i32);
i32, i32);
define <vscale x 1 x i1> @intrinsic_vmslt_mask_vx_nxv1i32_i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv1i32_i32:
@ -1496,7 +1496,7 @@ entry:
<vscale x 1 x i32> %1,
i32 %2,
<vscale x 1 x i1> %3,
i32 %4)
i32 %4, i32 1)
ret <vscale x 1 x i1> %a
}
@ -1526,7 +1526,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmslt.mask.nxv2i32.i32(
<vscale x 2 x i32>,
i32,
<vscale x 2 x i1>,
i32);
i32, i32);
define <vscale x 2 x i1> @intrinsic_vmslt_mask_vx_nxv2i32_i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv2i32_i32:
@ -1543,7 +1543,7 @@ entry:
<vscale x 2 x i32> %1,
i32 %2,
<vscale x 2 x i1> %3,
i32 %4)
i32 %4, i32 1)
ret <vscale x 2 x i1> %a
}
@ -1573,7 +1573,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmslt.mask.nxv4i32.i32(
<vscale x 4 x i32>,
i32,
<vscale x 4 x i1>,
i32);
i32, i32);
define <vscale x 4 x i1> @intrinsic_vmslt_mask_vx_nxv4i32_i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv4i32_i32:
@ -1590,7 +1590,7 @@ entry:
<vscale x 4 x i32> %1,
i32 %2,
<vscale x 4 x i1> %3,
i32 %4)
i32 %4, i32 1)
ret <vscale x 4 x i1> %a
}
@ -1620,7 +1620,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmslt.mask.nxv8i32.i32(
<vscale x 8 x i32>,
i32,
<vscale x 8 x i1>,
i32);
i32, i32);
define <vscale x 8 x i1> @intrinsic_vmslt_mask_vx_nxv8i32_i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv8i32_i32:
@ -1637,7 +1637,7 @@ entry:
<vscale x 8 x i32> %1,
i32 %2,
<vscale x 8 x i1> %3,
i32 %4)
i32 %4, i32 1)
ret <vscale x 8 x i1> %a
}
@ -1673,7 +1673,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i64.i64(
<vscale x 1 x i64>,
i64,
<vscale x 1 x i1>,
i32);
i32, i32);
define <vscale x 1 x i1> @intrinsic_vmslt_mask_vx_nxv1i64_i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv1i64_i64:
@ -1696,7 +1696,7 @@ entry:
<vscale x 1 x i64> %1,
i64 %2,
<vscale x 1 x i1> %3,
i32 %4)
i32 %4, i32 1)
ret <vscale x 1 x i1> %a
}
@ -1732,7 +1732,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmslt.mask.nxv2i64.i64(
<vscale x 2 x i64>,
i64,
<vscale x 2 x i1>,
i32);
i32, i32);
define <vscale x 2 x i1> @intrinsic_vmslt_mask_vx_nxv2i64_i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv2i64_i64:
@ -1755,7 +1755,7 @@ entry:
<vscale x 2 x i64> %1,
i64 %2,
<vscale x 2 x i1> %3,
i32 %4)
i32 %4, i32 1)
ret <vscale x 2 x i1> %a
}
@ -1791,7 +1791,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmslt.mask.nxv4i64.i64(
<vscale x 4 x i64>,
i64,
<vscale x 4 x i1>,
i32);
i32, i32);
define <vscale x 4 x i1> @intrinsic_vmslt_mask_vx_nxv4i64_i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv4i64_i64:
@ -1814,7 +1814,7 @@ entry:
<vscale x 4 x i64> %1,
i64 %2,
<vscale x 4 x i1> %3,
i32 %4)
i32 %4, i32 1)
ret <vscale x 4 x i1> %a
}
@ -1849,7 +1849,7 @@ entry:
<vscale x 1 x i8> %1,
i8 -14,
<vscale x 1 x i1> %2,
i32 %3)
i32 %3, i32 1)
ret <vscale x 1 x i1> %a
}
@ -1884,7 +1884,7 @@ entry:
<vscale x 2 x i8> %1,
i8 -12,
<vscale x 2 x i1> %2,
i32 %3)
i32 %3, i32 1)
ret <vscale x 2 x i1> %a
}
@ -1919,7 +1919,7 @@ entry:
<vscale x 4 x i8> %1,
i8 -10,
<vscale x 4 x i1> %2,
i32 %3)
i32 %3, i32 1)
ret <vscale x 4 x i1> %a
}
@ -1954,7 +1954,7 @@ entry:
<vscale x 8 x i8> %1,
i8 -8,
<vscale x 8 x i1> %2,
i32 %3)
i32 %3, i32 1)
ret <vscale x 8 x i1> %a
}
@ -1989,7 +1989,7 @@ entry:
<vscale x 16 x i8> %1,
i8 -6,
<vscale x 16 x i1> %2,
i32 %3)
i32 %3, i32 1)
ret <vscale x 16 x i1> %a
}
@ -2024,7 +2024,7 @@ entry:
<vscale x 32 x i8> %1,
i8 -4,
<vscale x 32 x i1> %2,
i32 %3)
i32 %3, i32 1)
ret <vscale x 32 x i1> %a
}
@ -2059,7 +2059,7 @@ entry:
<vscale x 1 x i16> %1,
i16 -2,
<vscale x 1 x i1> %2,
i32 %3)
i32 %3, i32 1)
ret <vscale x 1 x i1> %a
}
@ -2094,7 +2094,7 @@ entry:
<vscale x 2 x i16> %1,
i16 0,
<vscale x 2 x i1> %2,
i32 %3)
i32 %3, i32 1)
ret <vscale x 2 x i1> %a
}
@ -2129,7 +2129,7 @@ entry:
<vscale x 4 x i16> %1,
i16 1,
<vscale x 4 x i1> %2,
i32 %3)
i32 %3, i32 1)
ret <vscale x 4 x i1> %a
}
@ -2164,7 +2164,7 @@ entry:
<vscale x 8 x i16> %1,
i16 3,
<vscale x 8 x i1> %2,
i32 %3)
i32 %3, i32 1)
ret <vscale x 8 x i1> %a
}
@ -2199,7 +2199,7 @@ entry:
<vscale x 16 x i16> %1,
i16 5,
<vscale x 16 x i1> %2,
i32 %3)
i32 %3, i32 1)
ret <vscale x 16 x i1> %a
}
@ -2234,7 +2234,7 @@ entry:
<vscale x 1 x i32> %1,
i32 7,
<vscale x 1 x i1> %2,
i32 %3)
i32 %3, i32 1)
ret <vscale x 1 x i1> %a
}
@ -2269,7 +2269,7 @@ entry:
<vscale x 2 x i32> %1,
i32 9,
<vscale x 2 x i1> %2,
i32 %3)
i32 %3, i32 1)
ret <vscale x 2 x i1> %a
}
@ -2304,7 +2304,7 @@ entry:
<vscale x 4 x i32> %1,
i32 11,
<vscale x 4 x i1> %2,
i32 %3)
i32 %3, i32 1)
ret <vscale x 4 x i1> %a
}
@ -2339,7 +2339,7 @@ entry:
<vscale x 8 x i32> %1,
i32 13,
<vscale x 8 x i1> %2,
i32 %3)
i32 %3, i32 1)
ret <vscale x 8 x i1> %a
}
@ -2374,7 +2374,7 @@ entry:
<vscale x 1 x i64> %1,
i64 9,
<vscale x 1 x i1> %2,
i32 %3)
i32 %3, i32 1)
ret <vscale x 1 x i1> %a
}
@ -2409,7 +2409,7 @@ entry:
<vscale x 2 x i64> %1,
i64 9,
<vscale x 2 x i1> %2,
i32 %3)
i32 %3, i32 1)
ret <vscale x 2 x i1> %a
}
@ -2444,7 +2444,7 @@ entry:
<vscale x 4 x i64> %1,
i64 9,
<vscale x 4 x i1> %2,
i32 %3)
i32 %3, i32 1)
ret <vscale x 4 x i1> %a
}

View File

@ -26,7 +26,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i8(
<vscale x 1 x i8>,
<vscale x 1 x i8>,
<vscale x 1 x i1>,
i64);
i64, i64);
define <vscale x 1 x i1> @intrinsic_vmslt_mask_vv_nxv1i8_nxv1i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i8> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv1i8_nxv1i8:
@ -48,7 +48,7 @@ entry:
<vscale x 1 x i8> %2,
<vscale x 1 x i8> %3,
<vscale x 1 x i1> %mask,
i64 %4)
i64 %4, i64 1)
ret <vscale x 1 x i1> %a
}
@ -78,7 +78,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmslt.mask.nxv2i8(
<vscale x 2 x i8>,
<vscale x 2 x i8>,
<vscale x 2 x i1>,
i64);
i64, i64);
define <vscale x 2 x i1> @intrinsic_vmslt_mask_vv_nxv2i8_nxv2i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i8> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv2i8_nxv2i8:
@ -100,7 +100,7 @@ entry:
<vscale x 2 x i8> %2,
<vscale x 2 x i8> %3,
<vscale x 2 x i1> %mask,
i64 %4)
i64 %4, i64 1)
ret <vscale x 2 x i1> %a
}
@ -130,7 +130,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmslt.mask.nxv4i8(
<vscale x 4 x i8>,
<vscale x 4 x i8>,
<vscale x 4 x i1>,
i64);
i64, i64);
define <vscale x 4 x i1> @intrinsic_vmslt_mask_vv_nxv4i8_nxv4i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i8> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv4i8_nxv4i8:
@ -152,7 +152,7 @@ entry:
<vscale x 4 x i8> %2,
<vscale x 4 x i8> %3,
<vscale x 4 x i1> %mask,
i64 %4)
i64 %4, i64 1)
ret <vscale x 4 x i1> %a
}
@ -182,7 +182,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmslt.mask.nxv8i8(
<vscale x 8 x i8>,
<vscale x 8 x i8>,
<vscale x 8 x i1>,
i64);
i64, i64);
define <vscale x 8 x i1> @intrinsic_vmslt_mask_vv_nxv8i8_nxv8i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i8> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv8i8_nxv8i8:
@ -204,7 +204,7 @@ entry:
<vscale x 8 x i8> %2,
<vscale x 8 x i8> %3,
<vscale x 8 x i1> %mask,
i64 %4)
i64 %4, i64 1)
ret <vscale x 8 x i1> %a
}
@ -234,7 +234,7 @@ declare <vscale x 16 x i1> @llvm.riscv.vmslt.mask.nxv16i8(
<vscale x 16 x i8>,
<vscale x 16 x i8>,
<vscale x 16 x i1>,
i64);
i64, i64);
define <vscale x 16 x i1> @intrinsic_vmslt_mask_vv_nxv16i8_nxv16i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i8> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv16i8_nxv16i8:
@ -256,7 +256,7 @@ entry:
<vscale x 16 x i8> %2,
<vscale x 16 x i8> %3,
<vscale x 16 x i1> %mask,
i64 %4)
i64 %4, i64 1)
ret <vscale x 16 x i1> %a
}
@ -286,7 +286,7 @@ declare <vscale x 32 x i1> @llvm.riscv.vmslt.mask.nxv32i8(
<vscale x 32 x i8>,
<vscale x 32 x i8>,
<vscale x 32 x i1>,
i64);
i64, i64);
define <vscale x 32 x i1> @intrinsic_vmslt_mask_vv_nxv32i8_nxv32i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i8> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv32i8_nxv32i8:
@ -308,7 +308,7 @@ entry:
<vscale x 32 x i8> %2,
<vscale x 32 x i8> %3,
<vscale x 32 x i1> %mask,
i64 %4)
i64 %4, i64 1)
ret <vscale x 32 x i1> %a
}
@ -338,7 +338,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i16(
<vscale x 1 x i16>,
<vscale x 1 x i16>,
<vscale x 1 x i1>,
i64);
i64, i64);
define <vscale x 1 x i1> @intrinsic_vmslt_mask_vv_nxv1i16_nxv1i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i16> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv1i16_nxv1i16:
@ -360,7 +360,7 @@ entry:
<vscale x 1 x i16> %2,
<vscale x 1 x i16> %3,
<vscale x 1 x i1> %mask,
i64 %4)
i64 %4, i64 1)
ret <vscale x 1 x i1> %a
}
@ -390,7 +390,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmslt.mask.nxv2i16(
<vscale x 2 x i16>,
<vscale x 2 x i16>,
<vscale x 2 x i1>,
i64);
i64, i64);
define <vscale x 2 x i1> @intrinsic_vmslt_mask_vv_nxv2i16_nxv2i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i16> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv2i16_nxv2i16:
@ -412,7 +412,7 @@ entry:
<vscale x 2 x i16> %2,
<vscale x 2 x i16> %3,
<vscale x 2 x i1> %mask,
i64 %4)
i64 %4, i64 1)
ret <vscale x 2 x i1> %a
}
@ -442,7 +442,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmslt.mask.nxv4i16(
<vscale x 4 x i16>,
<vscale x 4 x i16>,
<vscale x 4 x i1>,
i64);
i64, i64);
define <vscale x 4 x i1> @intrinsic_vmslt_mask_vv_nxv4i16_nxv4i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i16> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv4i16_nxv4i16:
@ -464,7 +464,7 @@ entry:
<vscale x 4 x i16> %2,
<vscale x 4 x i16> %3,
<vscale x 4 x i1> %mask,
i64 %4)
i64 %4, i64 1)
ret <vscale x 4 x i1> %a
}
@ -494,7 +494,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmslt.mask.nxv8i16(
<vscale x 8 x i16>,
<vscale x 8 x i16>,
<vscale x 8 x i1>,
i64);
i64, i64);
define <vscale x 8 x i1> @intrinsic_vmslt_mask_vv_nxv8i16_nxv8i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i16> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv8i16_nxv8i16:
@ -516,7 +516,7 @@ entry:
<vscale x 8 x i16> %2,
<vscale x 8 x i16> %3,
<vscale x 8 x i1> %mask,
i64 %4)
i64 %4, i64 1)
ret <vscale x 8 x i1> %a
}
@ -546,7 +546,7 @@ declare <vscale x 16 x i1> @llvm.riscv.vmslt.mask.nxv16i16(
<vscale x 16 x i16>,
<vscale x 16 x i16>,
<vscale x 16 x i1>,
i64);
i64, i64);
define <vscale x 16 x i1> @intrinsic_vmslt_mask_vv_nxv16i16_nxv16i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i16> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv16i16_nxv16i16:
@ -568,7 +568,7 @@ entry:
<vscale x 16 x i16> %2,
<vscale x 16 x i16> %3,
<vscale x 16 x i1> %mask,
i64 %4)
i64 %4, i64 1)
ret <vscale x 16 x i1> %a
}
@ -598,7 +598,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i32(
<vscale x 1 x i32>,
<vscale x 1 x i32>,
<vscale x 1 x i1>,
i64);
i64, i64);
define <vscale x 1 x i1> @intrinsic_vmslt_mask_vv_nxv1i32_nxv1i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i32> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv1i32_nxv1i32:
@ -620,7 +620,7 @@ entry:
<vscale x 1 x i32> %2,
<vscale x 1 x i32> %3,
<vscale x 1 x i1> %mask,
i64 %4)
i64 %4, i64 1)
ret <vscale x 1 x i1> %a
}
@ -650,7 +650,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmslt.mask.nxv2i32(
<vscale x 2 x i32>,
<vscale x 2 x i32>,
<vscale x 2 x i1>,
i64);
i64, i64);
define <vscale x 2 x i1> @intrinsic_vmslt_mask_vv_nxv2i32_nxv2i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i32> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv2i32_nxv2i32:
@ -672,7 +672,7 @@ entry:
<vscale x 2 x i32> %2,
<vscale x 2 x i32> %3,
<vscale x 2 x i1> %mask,
i64 %4)
i64 %4, i64 1)
ret <vscale x 2 x i1> %a
}
@ -702,7 +702,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmslt.mask.nxv4i32(
<vscale x 4 x i32>,
<vscale x 4 x i32>,
<vscale x 4 x i1>,
i64);
i64, i64);
define <vscale x 4 x i1> @intrinsic_vmslt_mask_vv_nxv4i32_nxv4i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i32> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv4i32_nxv4i32:
@ -724,7 +724,7 @@ entry:
<vscale x 4 x i32> %2,
<vscale x 4 x i32> %3,
<vscale x 4 x i1> %mask,
i64 %4)
i64 %4, i64 1)
ret <vscale x 4 x i1> %a
}
@ -754,7 +754,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmslt.mask.nxv8i32(
<vscale x 8 x i32>,
<vscale x 8 x i32>,
<vscale x 8 x i1>,
i64);
i64, i64);
define <vscale x 8 x i1> @intrinsic_vmslt_mask_vv_nxv8i32_nxv8i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i32> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv8i32_nxv8i32:
@ -776,7 +776,7 @@ entry:
<vscale x 8 x i32> %2,
<vscale x 8 x i32> %3,
<vscale x 8 x i1> %mask,
i64 %4)
i64 %4, i64 1)
ret <vscale x 8 x i1> %a
}
@ -806,7 +806,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i64(
<vscale x 1 x i64>,
<vscale x 1 x i64>,
<vscale x 1 x i1>,
i64);
i64, i64);
define <vscale x 1 x i1> @intrinsic_vmslt_mask_vv_nxv1i64_nxv1i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i64> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv1i64_nxv1i64:
@ -828,7 +828,7 @@ entry:
<vscale x 1 x i64> %2,
<vscale x 1 x i64> %3,
<vscale x 1 x i1> %mask,
i64 %4)
i64 %4, i64 1)
ret <vscale x 1 x i1> %a
}
@ -858,7 +858,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmslt.mask.nxv2i64(
<vscale x 2 x i64>,
<vscale x 2 x i64>,
<vscale x 2 x i1>,
i64);
i64, i64);
define <vscale x 2 x i1> @intrinsic_vmslt_mask_vv_nxv2i64_nxv2i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i64> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv2i64_nxv2i64:
@ -880,7 +880,7 @@ entry:
<vscale x 2 x i64> %2,
<vscale x 2 x i64> %3,
<vscale x 2 x i1> %mask,
i64 %4)
i64 %4, i64 1)
ret <vscale x 2 x i1> %a
}
@ -910,7 +910,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmslt.mask.nxv4i64(
<vscale x 4 x i64>,
<vscale x 4 x i64>,
<vscale x 4 x i1>,
i64);
i64, i64);
define <vscale x 4 x i1> @intrinsic_vmslt_mask_vv_nxv4i64_nxv4i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i64> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv4i64_nxv4i64:
@ -932,7 +932,7 @@ entry:
<vscale x 4 x i64> %2,
<vscale x 4 x i64> %3,
<vscale x 4 x i1> %mask,
i64 %4)
i64 %4, i64 1)
ret <vscale x 4 x i1> %a
}
@ -962,7 +962,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i8.i8(
<vscale x 1 x i8>,
i8,
<vscale x 1 x i1>,
i64);
i64, i64);
define <vscale x 1 x i1> @intrinsic_vmslt_mask_vx_nxv1i8_i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv1i8_i8:
@ -979,7 +979,7 @@ entry:
<vscale x 1 x i8> %1,
i8 %2,
<vscale x 1 x i1> %3,
i64 %4)
i64 %4, i64 1)
ret <vscale x 1 x i1> %a
}
@ -1009,7 +1009,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmslt.mask.nxv2i8.i8(
<vscale x 2 x i8>,
i8,
<vscale x 2 x i1>,
i64);
i64, i64);
define <vscale x 2 x i1> @intrinsic_vmslt_mask_vx_nxv2i8_i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv2i8_i8:
@ -1026,7 +1026,7 @@ entry:
<vscale x 2 x i8> %1,
i8 %2,
<vscale x 2 x i1> %3,
i64 %4)
i64 %4, i64 1)
ret <vscale x 2 x i1> %a
}
@ -1056,7 +1056,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmslt.mask.nxv4i8.i8(
<vscale x 4 x i8>,
i8,
<vscale x 4 x i1>,
i64);
i64, i64);
define <vscale x 4 x i1> @intrinsic_vmslt_mask_vx_nxv4i8_i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv4i8_i8:
@ -1073,7 +1073,7 @@ entry:
<vscale x 4 x i8> %1,
i8 %2,
<vscale x 4 x i1> %3,
i64 %4)
i64 %4, i64 1)
ret <vscale x 4 x i1> %a
}
@ -1103,7 +1103,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmslt.mask.nxv8i8.i8(
<vscale x 8 x i8>,
i8,
<vscale x 8 x i1>,
i64);
i64, i64);
define <vscale x 8 x i1> @intrinsic_vmslt_mask_vx_nxv8i8_i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv8i8_i8:
@ -1120,7 +1120,7 @@ entry:
<vscale x 8 x i8> %1,
i8 %2,
<vscale x 8 x i1> %3,
i64 %4)
i64 %4, i64 1)
ret <vscale x 8 x i1> %a
}
@ -1150,7 +1150,7 @@ declare <vscale x 16 x i1> @llvm.riscv.vmslt.mask.nxv16i8.i8(
<vscale x 16 x i8>,
i8,
<vscale x 16 x i1>,
i64);
i64, i64);
define <vscale x 16 x i1> @intrinsic_vmslt_mask_vx_nxv16i8_i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv16i8_i8:
@ -1167,7 +1167,7 @@ entry:
<vscale x 16 x i8> %1,
i8 %2,
<vscale x 16 x i1> %3,
i64 %4)
i64 %4, i64 1)
ret <vscale x 16 x i1> %a
}
@ -1197,7 +1197,7 @@ declare <vscale x 32 x i1> @llvm.riscv.vmslt.mask.nxv32i8.i8(
<vscale x 32 x i8>,
i8,
<vscale x 32 x i1>,
i64);
i64, i64);
define <vscale x 32 x i1> @intrinsic_vmslt_mask_vx_nxv32i8_i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv32i8_i8:
@ -1214,7 +1214,7 @@ entry:
<vscale x 32 x i8> %1,
i8 %2,
<vscale x 32 x i1> %3,
i64 %4)
i64 %4, i64 1)
ret <vscale x 32 x i1> %a
}
@ -1244,7 +1244,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i16.i16(
<vscale x 1 x i16>,
i16,
<vscale x 1 x i1>,
i64);
i64, i64);
define <vscale x 1 x i1> @intrinsic_vmslt_mask_vx_nxv1i16_i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv1i16_i16:
@ -1261,7 +1261,7 @@ entry:
<vscale x 1 x i16> %1,
i16 %2,
<vscale x 1 x i1> %3,
i64 %4)
i64 %4, i64 1)
ret <vscale x 1 x i1> %a
}
@ -1291,7 +1291,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmslt.mask.nxv2i16.i16(
<vscale x 2 x i16>,
i16,
<vscale x 2 x i1>,
i64);
i64, i64);
define <vscale x 2 x i1> @intrinsic_vmslt_mask_vx_nxv2i16_i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv2i16_i16:
@ -1308,7 +1308,7 @@ entry:
<vscale x 2 x i16> %1,
i16 %2,
<vscale x 2 x i1> %3,
i64 %4)
i64 %4, i64 1)
ret <vscale x 2 x i1> %a
}
@ -1338,7 +1338,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmslt.mask.nxv4i16.i16(
<vscale x 4 x i16>,
i16,
<vscale x 4 x i1>,
i64);
i64, i64);
define <vscale x 4 x i1> @intrinsic_vmslt_mask_vx_nxv4i16_i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv4i16_i16:
@ -1355,7 +1355,7 @@ entry:
<vscale x 4 x i16> %1,
i16 %2,
<vscale x 4 x i1> %3,
i64 %4)
i64 %4, i64 1)
ret <vscale x 4 x i1> %a
}
@ -1385,7 +1385,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmslt.mask.nxv8i16.i16(
<vscale x 8 x i16>,
i16,
<vscale x 8 x i1>,
i64);
i64, i64);
define <vscale x 8 x i1> @intrinsic_vmslt_mask_vx_nxv8i16_i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv8i16_i16:
@ -1402,7 +1402,7 @@ entry:
<vscale x 8 x i16> %1,
i16 %2,
<vscale x 8 x i1> %3,
i64 %4)
i64 %4, i64 1)
ret <vscale x 8 x i1> %a
}
@ -1432,7 +1432,7 @@ declare <vscale x 16 x i1> @llvm.riscv.vmslt.mask.nxv16i16.i16(
<vscale x 16 x i16>,
i16,
<vscale x 16 x i1>,
i64);
i64, i64);
define <vscale x 16 x i1> @intrinsic_vmslt_mask_vx_nxv16i16_i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv16i16_i16:
@ -1449,7 +1449,7 @@ entry:
<vscale x 16 x i16> %1,
i16 %2,
<vscale x 16 x i1> %3,
i64 %4)
i64 %4, i64 1)
ret <vscale x 16 x i1> %a
}
@ -1479,7 +1479,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i32.i32(
<vscale x 1 x i32>,
i32,
<vscale x 1 x i1>,
i64);
i64, i64);
define <vscale x 1 x i1> @intrinsic_vmslt_mask_vx_nxv1i32_i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv1i32_i32:
@ -1496,7 +1496,7 @@ entry:
<vscale x 1 x i32> %1,
i32 %2,
<vscale x 1 x i1> %3,
i64 %4)
i64 %4, i64 1)
ret <vscale x 1 x i1> %a
}
@ -1526,7 +1526,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmslt.mask.nxv2i32.i32(
<vscale x 2 x i32>,
i32,
<vscale x 2 x i1>,
i64);
i64, i64);
define <vscale x 2 x i1> @intrinsic_vmslt_mask_vx_nxv2i32_i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv2i32_i32:
@ -1543,7 +1543,7 @@ entry:
<vscale x 2 x i32> %1,
i32 %2,
<vscale x 2 x i1> %3,
i64 %4)
i64 %4, i64 1)
ret <vscale x 2 x i1> %a
}
@ -1573,7 +1573,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmslt.mask.nxv4i32.i32(
<vscale x 4 x i32>,
i32,
<vscale x 4 x i1>,
i64);
i64, i64);
define <vscale x 4 x i1> @intrinsic_vmslt_mask_vx_nxv4i32_i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv4i32_i32:
@ -1590,7 +1590,7 @@ entry:
<vscale x 4 x i32> %1,
i32 %2,
<vscale x 4 x i1> %3,
i64 %4)
i64 %4, i64 1)
ret <vscale x 4 x i1> %a
}
@ -1620,7 +1620,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmslt.mask.nxv8i32.i32(
<vscale x 8 x i32>,
i32,
<vscale x 8 x i1>,
i64);
i64, i64);
define <vscale x 8 x i1> @intrinsic_vmslt_mask_vx_nxv8i32_i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv8i32_i32:
@ -1637,7 +1637,7 @@ entry:
<vscale x 8 x i32> %1,
i32 %2,
<vscale x 8 x i1> %3,
i64 %4)
i64 %4, i64 1)
ret <vscale x 8 x i1> %a
}
@ -1667,7 +1667,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i64.i64(
<vscale x 1 x i64>,
i64,
<vscale x 1 x i1>,
i64);
i64, i64);
define <vscale x 1 x i1> @intrinsic_vmslt_mask_vx_nxv1i64_i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv1i64_i64:
@ -1684,7 +1684,7 @@ entry:
<vscale x 1 x i64> %1,
i64 %2,
<vscale x 1 x i1> %3,
i64 %4)
i64 %4, i64 1)
ret <vscale x 1 x i1> %a
}
@ -1714,7 +1714,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmslt.mask.nxv2i64.i64(
<vscale x 2 x i64>,
i64,
<vscale x 2 x i1>,
i64);
i64, i64);
define <vscale x 2 x i1> @intrinsic_vmslt_mask_vx_nxv2i64_i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv2i64_i64:
@ -1731,7 +1731,7 @@ entry:
<vscale x 2 x i64> %1,
i64 %2,
<vscale x 2 x i1> %3,
i64 %4)
i64 %4, i64 1)
ret <vscale x 2 x i1> %a
}
@ -1761,7 +1761,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmslt.mask.nxv4i64.i64(
<vscale x 4 x i64>,
i64,
<vscale x 4 x i1>,
i64);
i64, i64);
define <vscale x 4 x i1> @intrinsic_vmslt_mask_vx_nxv4i64_i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv4i64_i64:
@ -1778,7 +1778,7 @@ entry:
<vscale x 4 x i64> %1,
i64 %2,
<vscale x 4 x i1> %3,
i64 %4)
i64 %4, i64 1)
ret <vscale x 4 x i1> %a
}
@ -1813,7 +1813,7 @@ entry:
<vscale x 1 x i8> %1,
i8 -14,
<vscale x 1 x i1> %2,
i64 %3)
i64 %3, i64 1)
ret <vscale x 1 x i1> %a
}
@ -1848,7 +1848,7 @@ entry:
<vscale x 2 x i8> %1,
i8 -12,
<vscale x 2 x i1> %2,
i64 %3)
i64 %3, i64 1)
ret <vscale x 2 x i1> %a
}
@ -1883,7 +1883,7 @@ entry:
<vscale x 4 x i8> %1,
i8 -10,
<vscale x 4 x i1> %2,
i64 %3)
i64 %3, i64 1)
ret <vscale x 4 x i1> %a
}
@ -1918,7 +1918,7 @@ entry:
<vscale x 8 x i8> %1,
i8 -8,
<vscale x 8 x i1> %2,
i64 %3)
i64 %3, i64 1)
ret <vscale x 8 x i1> %a
}
@ -1953,7 +1953,7 @@ entry:
<vscale x 16 x i8> %1,
i8 -6,
<vscale x 16 x i1> %2,
i64 %3)
i64 %3, i64 1)
ret <vscale x 16 x i1> %a
}
@ -1988,7 +1988,7 @@ entry:
<vscale x 32 x i8> %1,
i8 -4,
<vscale x 32 x i1> %2,
i64 %3)
i64 %3, i64 1)
ret <vscale x 32 x i1> %a
}
@ -2023,7 +2023,7 @@ entry:
<vscale x 1 x i16> %1,
i16 -2,
<vscale x 1 x i1> %2,
i64 %3)
i64 %3, i64 1)
ret <vscale x 1 x i1> %a
}
@ -2058,7 +2058,7 @@ entry:
<vscale x 2 x i16> %1,
i16 0,
<vscale x 2 x i1> %2,
i64 %3)
i64 %3, i64 1)
ret <vscale x 2 x i1> %a
}
@ -2093,7 +2093,7 @@ entry:
<vscale x 4 x i16> %1,
i16 1,
<vscale x 4 x i1> %2,
i64 %3)
i64 %3, i64 1)
ret <vscale x 4 x i1> %a
}
@ -2128,7 +2128,7 @@ entry:
<vscale x 8 x i16> %1,
i16 3,
<vscale x 8 x i1> %2,
i64 %3)
i64 %3, i64 1)
ret <vscale x 8 x i1> %a
}
@ -2163,7 +2163,7 @@ entry:
<vscale x 16 x i16> %1,
i16 5,
<vscale x 16 x i1> %2,
i64 %3)
i64 %3, i64 1)
ret <vscale x 16 x i1> %a
}
@ -2198,7 +2198,7 @@ entry:
<vscale x 1 x i32> %1,
i32 7,
<vscale x 1 x i1> %2,
i64 %3)
i64 %3, i64 1)
ret <vscale x 1 x i1> %a
}
@ -2233,7 +2233,7 @@ entry:
<vscale x 2 x i32> %1,
i32 9,
<vscale x 2 x i1> %2,
i64 %3)
i64 %3, i64 1)
ret <vscale x 2 x i1> %a
}
@ -2268,7 +2268,7 @@ entry:
<vscale x 4 x i32> %1,
i32 11,
<vscale x 4 x i1> %2,
i64 %3)
i64 %3, i64 1)
ret <vscale x 4 x i1> %a
}
@ -2303,7 +2303,7 @@ entry:
<vscale x 8 x i32> %1,
i32 13,
<vscale x 8 x i1> %2,
i64 %3)
i64 %3, i64 1)
ret <vscale x 8 x i1> %a
}
@ -2338,7 +2338,7 @@ entry:
<vscale x 1 x i64> %1,
i64 15,
<vscale x 1 x i1> %2,
i64 %3)
i64 %3, i64 1)
ret <vscale x 1 x i1> %a
}
@ -2373,7 +2373,7 @@ entry:
<vscale x 2 x i64> %1,
i64 -15,
<vscale x 2 x i1> %2,
i64 %3)
i64 %3, i64 1)
ret <vscale x 2 x i1> %a
}
@ -2408,7 +2408,7 @@ entry:
<vscale x 4 x i64> %1,
i64 -13,
<vscale x 4 x i1> %2,
i64 %3)
i64 %3, i64 1)
ret <vscale x 4 x i1> %a
}

View File

@ -26,7 +26,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsltu.mask.nxv1i8(
<vscale x 1 x i8>,
<vscale x 1 x i8>,
<vscale x 1 x i1>,
i32);
i32, i32);
define <vscale x 1 x i1> @intrinsic_vmsltu_mask_vv_nxv1i8_nxv1i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i8> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv1i8_nxv1i8:
@ -48,7 +48,7 @@ entry:
<vscale x 1 x i8> %2,
<vscale x 1 x i8> %3,
<vscale x 1 x i1> %mask,
i32 %4)
i32 %4, i32 1)
ret <vscale x 1 x i1> %a
}
@ -78,7 +78,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsltu.mask.nxv2i8(
<vscale x 2 x i8>,
<vscale x 2 x i8>,
<vscale x 2 x i1>,
i32);
i32, i32);
define <vscale x 2 x i1> @intrinsic_vmsltu_mask_vv_nxv2i8_nxv2i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i8> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv2i8_nxv2i8:
@ -100,7 +100,7 @@ entry:
<vscale x 2 x i8> %2,
<vscale x 2 x i8> %3,
<vscale x 2 x i1> %mask,
i32 %4)
i32 %4, i32 1)
ret <vscale x 2 x i1> %a
}
@ -130,7 +130,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsltu.mask.nxv4i8(
<vscale x 4 x i8>,
<vscale x 4 x i8>,
<vscale x 4 x i1>,
i32);
i32, i32);
define <vscale x 4 x i1> @intrinsic_vmsltu_mask_vv_nxv4i8_nxv4i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i8> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv4i8_nxv4i8:
@ -152,7 +152,7 @@ entry:
<vscale x 4 x i8> %2,
<vscale x 4 x i8> %3,
<vscale x 4 x i1> %mask,
i32 %4)
i32 %4, i32 1)
ret <vscale x 4 x i1> %a
}
@ -182,7 +182,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsltu.mask.nxv8i8(
<vscale x 8 x i8>,
<vscale x 8 x i8>,
<vscale x 8 x i1>,
i32);
i32, i32);
define <vscale x 8 x i1> @intrinsic_vmsltu_mask_vv_nxv8i8_nxv8i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i8> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv8i8_nxv8i8:
@ -204,7 +204,7 @@ entry:
<vscale x 8 x i8> %2,
<vscale x 8 x i8> %3,
<vscale x 8 x i1> %mask,
i32 %4)
i32 %4, i32 1)
ret <vscale x 8 x i1> %a
}
@ -234,7 +234,7 @@ declare <vscale x 16 x i1> @llvm.riscv.vmsltu.mask.nxv16i8(
<vscale x 16 x i8>,
<vscale x 16 x i8>,
<vscale x 16 x i1>,
i32);
i32, i32);
define <vscale x 16 x i1> @intrinsic_vmsltu_mask_vv_nxv16i8_nxv16i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i8> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv16i8_nxv16i8:
@ -256,7 +256,7 @@ entry:
<vscale x 16 x i8> %2,
<vscale x 16 x i8> %3,
<vscale x 16 x i1> %mask,
i32 %4)
i32 %4, i32 1)
ret <vscale x 16 x i1> %a
}
@ -286,7 +286,7 @@ declare <vscale x 32 x i1> @llvm.riscv.vmsltu.mask.nxv32i8(
<vscale x 32 x i8>,
<vscale x 32 x i8>,
<vscale x 32 x i1>,
i32);
i32, i32);
define <vscale x 32 x i1> @intrinsic_vmsltu_mask_vv_nxv32i8_nxv32i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i8> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv32i8_nxv32i8:
@ -308,7 +308,7 @@ entry:
<vscale x 32 x i8> %2,
<vscale x 32 x i8> %3,
<vscale x 32 x i1> %mask,
i32 %4)
i32 %4, i32 1)
ret <vscale x 32 x i1> %a
}
@ -338,7 +338,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsltu.mask.nxv1i16(
<vscale x 1 x i16>,
<vscale x 1 x i16>,
<vscale x 1 x i1>,
i32);
i32, i32);
define <vscale x 1 x i1> @intrinsic_vmsltu_mask_vv_nxv1i16_nxv1i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i16> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv1i16_nxv1i16:
@ -360,7 +360,7 @@ entry:
<vscale x 1 x i16> %2,
<vscale x 1 x i16> %3,
<vscale x 1 x i1> %mask,
i32 %4)
i32 %4, i32 1)
ret <vscale x 1 x i1> %a
}
@ -390,7 +390,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsltu.mask.nxv2i16(
<vscale x 2 x i16>,
<vscale x 2 x i16>,
<vscale x 2 x i1>,
i32);
i32, i32);
define <vscale x 2 x i1> @intrinsic_vmsltu_mask_vv_nxv2i16_nxv2i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i16> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv2i16_nxv2i16:
@ -412,7 +412,7 @@ entry:
<vscale x 2 x i16> %2,
<vscale x 2 x i16> %3,
<vscale x 2 x i1> %mask,
i32 %4)
i32 %4, i32 1)
ret <vscale x 2 x i1> %a
}
@ -442,7 +442,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsltu.mask.nxv4i16(
<vscale x 4 x i16>,
<vscale x 4 x i16>,
<vscale x 4 x i1>,
i32);
i32, i32);
define <vscale x 4 x i1> @intrinsic_vmsltu_mask_vv_nxv4i16_nxv4i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i16> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv4i16_nxv4i16:
@ -464,7 +464,7 @@ entry:
<vscale x 4 x i16> %2,
<vscale x 4 x i16> %3,
<vscale x 4 x i1> %mask,
i32 %4)
i32 %4, i32 1)
ret <vscale x 4 x i1> %a
}
@ -494,7 +494,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsltu.mask.nxv8i16(
<vscale x 8 x i16>,
<vscale x 8 x i16>,
<vscale x 8 x i1>,
i32);
i32, i32);
define <vscale x 8 x i1> @intrinsic_vmsltu_mask_vv_nxv8i16_nxv8i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i16> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv8i16_nxv8i16:
@ -516,7 +516,7 @@ entry:
<vscale x 8 x i16> %2,
<vscale x 8 x i16> %3,
<vscale x 8 x i1> %mask,
i32 %4)
i32 %4, i32 1)
ret <vscale x 8 x i1> %a
}
@ -546,7 +546,7 @@ declare <vscale x 16 x i1> @llvm.riscv.vmsltu.mask.nxv16i16(
<vscale x 16 x i16>,
<vscale x 16 x i16>,
<vscale x 16 x i1>,
i32);
i32, i32);
define <vscale x 16 x i1> @intrinsic_vmsltu_mask_vv_nxv16i16_nxv16i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i16> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv16i16_nxv16i16:
@ -568,7 +568,7 @@ entry:
<vscale x 16 x i16> %2,
<vscale x 16 x i16> %3,
<vscale x 16 x i1> %mask,
i32 %4)
i32 %4, i32 1)
ret <vscale x 16 x i1> %a
}
@ -598,7 +598,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsltu.mask.nxv1i32(
<vscale x 1 x i32>,
<vscale x 1 x i32>,
<vscale x 1 x i1>,
i32);
i32, i32);
define <vscale x 1 x i1> @intrinsic_vmsltu_mask_vv_nxv1i32_nxv1i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i32> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv1i32_nxv1i32:
@ -620,7 +620,7 @@ entry:
<vscale x 1 x i32> %2,
<vscale x 1 x i32> %3,
<vscale x 1 x i1> %mask,
i32 %4)
i32 %4, i32 1)
ret <vscale x 1 x i1> %a
}
@ -650,7 +650,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsltu.mask.nxv2i32(
<vscale x 2 x i32>,
<vscale x 2 x i32>,
<vscale x 2 x i1>,
i32);
i32, i32);
define <vscale x 2 x i1> @intrinsic_vmsltu_mask_vv_nxv2i32_nxv2i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i32> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv2i32_nxv2i32:
@ -672,7 +672,7 @@ entry:
<vscale x 2 x i32> %2,
<vscale x 2 x i32> %3,
<vscale x 2 x i1> %mask,
i32 %4)
i32 %4, i32 1)
ret <vscale x 2 x i1> %a
}
@ -702,7 +702,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsltu.mask.nxv4i32(
<vscale x 4 x i32>,
<vscale x 4 x i32>,
<vscale x 4 x i1>,
i32);
i32, i32);
define <vscale x 4 x i1> @intrinsic_vmsltu_mask_vv_nxv4i32_nxv4i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i32> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv4i32_nxv4i32:
@ -724,7 +724,7 @@ entry:
<vscale x 4 x i32> %2,
<vscale x 4 x i32> %3,
<vscale x 4 x i1> %mask,
i32 %4)
i32 %4, i32 1)
ret <vscale x 4 x i1> %a
}
@ -754,7 +754,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsltu.mask.nxv8i32(
<vscale x 8 x i32>,
<vscale x 8 x i32>,
<vscale x 8 x i1>,
i32);
i32, i32);
define <vscale x 8 x i1> @intrinsic_vmsltu_mask_vv_nxv8i32_nxv8i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i32> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv8i32_nxv8i32:
@ -776,7 +776,7 @@ entry:
<vscale x 8 x i32> %2,
<vscale x 8 x i32> %3,
<vscale x 8 x i1> %mask,
i32 %4)
i32 %4, i32 1)
ret <vscale x 8 x i1> %a
}
@ -806,7 +806,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsltu.mask.nxv1i64(
<vscale x 1 x i64>,
<vscale x 1 x i64>,
<vscale x 1 x i1>,
i32);
i32, i32);
define <vscale x 1 x i1> @intrinsic_vmsltu_mask_vv_nxv1i64_nxv1i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i64> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv1i64_nxv1i64:
@ -828,7 +828,7 @@ entry:
<vscale x 1 x i64> %2,
<vscale x 1 x i64> %3,
<vscale x 1 x i1> %mask,
i32 %4)
i32 %4, i32 1)
ret <vscale x 1 x i1> %a
}
@ -858,7 +858,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsltu.mask.nxv2i64(
<vscale x 2 x i64>,
<vscale x 2 x i64>,
<vscale x 2 x i1>,
i32);
i32, i32);
define <vscale x 2 x i1> @intrinsic_vmsltu_mask_vv_nxv2i64_nxv2i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i64> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv2i64_nxv2i64:
@ -880,7 +880,7 @@ entry:
<vscale x 2 x i64> %2,
<vscale x 2 x i64> %3,
<vscale x 2 x i1> %mask,
i32 %4)
i32 %4, i32 1)
ret <vscale x 2 x i1> %a
}
@ -910,7 +910,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsltu.mask.nxv4i64(
<vscale x 4 x i64>,
<vscale x 4 x i64>,
<vscale x 4 x i1>,
i32);
i32, i32);
define <vscale x 4 x i1> @intrinsic_vmsltu_mask_vv_nxv4i64_nxv4i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i64> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv4i64_nxv4i64:
@ -932,7 +932,7 @@ entry:
<vscale x 4 x i64> %2,
<vscale x 4 x i64> %3,
<vscale x 4 x i1> %mask,
i32 %4)
i32 %4, i32 1)
ret <vscale x 4 x i1> %a
}
@ -962,7 +962,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsltu.mask.nxv1i8.i8(
<vscale x 1 x i8>,
i8,
<vscale x 1 x i1>,
i32);
i32, i32);
define <vscale x 1 x i1> @intrinsic_vmsltu_mask_vx_nxv1i8_i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv1i8_i8:
@ -979,7 +979,7 @@ entry:
<vscale x 1 x i8> %1,
i8 %2,
<vscale x 1 x i1> %3,
i32 %4)
i32 %4, i32 1)
ret <vscale x 1 x i1> %a
}
@ -1009,7 +1009,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsltu.mask.nxv2i8.i8(
<vscale x 2 x i8>,
i8,
<vscale x 2 x i1>,
i32);
i32, i32);
define <vscale x 2 x i1> @intrinsic_vmsltu_mask_vx_nxv2i8_i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv2i8_i8:
@ -1026,7 +1026,7 @@ entry:
<vscale x 2 x i8> %1,
i8 %2,
<vscale x 2 x i1> %3,
i32 %4)
i32 %4, i32 1)
ret <vscale x 2 x i1> %a
}
@ -1056,7 +1056,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsltu.mask.nxv4i8.i8(
<vscale x 4 x i8>,
i8,
<vscale x 4 x i1>,
i32);
i32, i32);
define <vscale x 4 x i1> @intrinsic_vmsltu_mask_vx_nxv4i8_i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv4i8_i8:
@ -1073,7 +1073,7 @@ entry:
<vscale x 4 x i8> %1,
i8 %2,
<vscale x 4 x i1> %3,
i32 %4)
i32 %4, i32 1)
ret <vscale x 4 x i1> %a
}
@ -1103,7 +1103,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsltu.mask.nxv8i8.i8(
<vscale x 8 x i8>,
i8,
<vscale x 8 x i1>,
i32);
i32, i32);
define <vscale x 8 x i1> @intrinsic_vmsltu_mask_vx_nxv8i8_i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv8i8_i8:
@ -1120,7 +1120,7 @@ entry:
<vscale x 8 x i8> %1,
i8 %2,
<vscale x 8 x i1> %3,
i32 %4)
i32 %4, i32 1)
ret <vscale x 8 x i1> %a
}
@ -1150,7 +1150,7 @@ declare <vscale x 16 x i1> @llvm.riscv.vmsltu.mask.nxv16i8.i8(
<vscale x 16 x i8>,
i8,
<vscale x 16 x i1>,
i32);
i32, i32);
define <vscale x 16 x i1> @intrinsic_vmsltu_mask_vx_nxv16i8_i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv16i8_i8:
@ -1167,7 +1167,7 @@ entry:
<vscale x 16 x i8> %1,
i8 %2,
<vscale x 16 x i1> %3,
i32 %4)
i32 %4, i32 1)
ret <vscale x 16 x i1> %a
}
@ -1197,7 +1197,7 @@ declare <vscale x 32 x i1> @llvm.riscv.vmsltu.mask.nxv32i8.i8(
<vscale x 32 x i8>,
i8,
<vscale x 32 x i1>,
i32);
i32, i32);
define <vscale x 32 x i1> @intrinsic_vmsltu_mask_vx_nxv32i8_i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv32i8_i8:
@ -1214,7 +1214,7 @@ entry:
<vscale x 32 x i8> %1,
i8 %2,
<vscale x 32 x i1> %3,
i32 %4)
i32 %4, i32 1)
ret <vscale x 32 x i1> %a
}
@ -1244,7 +1244,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsltu.mask.nxv1i16.i16(
<vscale x 1 x i16>,
i16,
<vscale x 1 x i1>,
i32);
i32, i32);
define <vscale x 1 x i1> @intrinsic_vmsltu_mask_vx_nxv1i16_i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv1i16_i16:
@ -1261,7 +1261,7 @@ entry:
<vscale x 1 x i16> %1,
i16 %2,
<vscale x 1 x i1> %3,
i32 %4)
i32 %4, i32 1)
ret <vscale x 1 x i1> %a
}
@ -1291,7 +1291,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsltu.mask.nxv2i16.i16(
<vscale x 2 x i16>,
i16,
<vscale x 2 x i1>,
i32);
i32, i32);
define <vscale x 2 x i1> @intrinsic_vmsltu_mask_vx_nxv2i16_i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv2i16_i16:
@ -1308,7 +1308,7 @@ entry:
<vscale x 2 x i16> %1,
i16 %2,
<vscale x 2 x i1> %3,
i32 %4)
i32 %4, i32 1)
ret <vscale x 2 x i1> %a
}
@ -1338,7 +1338,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsltu.mask.nxv4i16.i16(
<vscale x 4 x i16>,
i16,
<vscale x 4 x i1>,
i32);
i32, i32);
define <vscale x 4 x i1> @intrinsic_vmsltu_mask_vx_nxv4i16_i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv4i16_i16:
@ -1355,7 +1355,7 @@ entry:
<vscale x 4 x i16> %1,
i16 %2,
<vscale x 4 x i1> %3,
i32 %4)
i32 %4, i32 1)
ret <vscale x 4 x i1> %a
}
@ -1385,7 +1385,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsltu.mask.nxv8i16.i16(
<vscale x 8 x i16>,
i16,
<vscale x 8 x i1>,
i32);
i32, i32);
define <vscale x 8 x i1> @intrinsic_vmsltu_mask_vx_nxv8i16_i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv8i16_i16:
@ -1402,7 +1402,7 @@ entry:
<vscale x 8 x i16> %1,
i16 %2,
<vscale x 8 x i1> %3,
i32 %4)
i32 %4, i32 1)
ret <vscale x 8 x i1> %a
}
@ -1432,7 +1432,7 @@ declare <vscale x 16 x i1> @llvm.riscv.vmsltu.mask.nxv16i16.i16(
<vscale x 16 x i16>,
i16,
<vscale x 16 x i1>,
i32);
i32, i32);
define <vscale x 16 x i1> @intrinsic_vmsltu_mask_vx_nxv16i16_i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv16i16_i16:
@ -1449,7 +1449,7 @@ entry:
<vscale x 16 x i16> %1,
i16 %2,
<vscale x 16 x i1> %3,
i32 %4)
i32 %4, i32 1)
ret <vscale x 16 x i1> %a
}
@ -1479,7 +1479,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsltu.mask.nxv1i32.i32(
<vscale x 1 x i32>,
i32,
<vscale x 1 x i1>,
i32);
i32, i32);
define <vscale x 1 x i1> @intrinsic_vmsltu_mask_vx_nxv1i32_i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv1i32_i32:
@ -1496,7 +1496,7 @@ entry:
<vscale x 1 x i32> %1,
i32 %2,
<vscale x 1 x i1> %3,
i32 %4)
i32 %4, i32 1)
ret <vscale x 1 x i1> %a
}
@ -1526,7 +1526,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsltu.mask.nxv2i32.i32(
<vscale x 2 x i32>,
i32,
<vscale x 2 x i1>,
i32);
i32, i32);
define <vscale x 2 x i1> @intrinsic_vmsltu_mask_vx_nxv2i32_i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv2i32_i32:
@ -1543,7 +1543,7 @@ entry:
<vscale x 2 x i32> %1,
i32 %2,
<vscale x 2 x i1> %3,
i32 %4)
i32 %4, i32 1)
ret <vscale x 2 x i1> %a
}
@ -1573,7 +1573,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsltu.mask.nxv4i32.i32(
<vscale x 4 x i32>,
i32,
<vscale x 4 x i1>,
i32);
i32, i32);
define <vscale x 4 x i1> @intrinsic_vmsltu_mask_vx_nxv4i32_i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv4i32_i32:
@ -1590,7 +1590,7 @@ entry:
<vscale x 4 x i32> %1,
i32 %2,
<vscale x 4 x i1> %3,
i32 %4)
i32 %4, i32 1)
ret <vscale x 4 x i1> %a
}
@ -1620,7 +1620,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsltu.mask.nxv8i32.i32(
<vscale x 8 x i32>,
i32,
<vscale x 8 x i1>,
i32);
i32, i32);
define <vscale x 8 x i1> @intrinsic_vmsltu_mask_vx_nxv8i32_i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv8i32_i32:
@ -1637,7 +1637,7 @@ entry:
<vscale x 8 x i32> %1,
i32 %2,
<vscale x 8 x i1> %3,
i32 %4)
i32 %4, i32 1)
ret <vscale x 8 x i1> %a
}
@ -1673,7 +1673,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsltu.mask.nxv1i64.i64(
<vscale x 1 x i64>,
i64,
<vscale x 1 x i1>,
i32);
i32, i32);
define <vscale x 1 x i1> @intrinsic_vmsltu_mask_vx_nxv1i64_i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv1i64_i64:
@ -1696,7 +1696,7 @@ entry:
<vscale x 1 x i64> %1,
i64 %2,
<vscale x 1 x i1> %3,
i32 %4)
i32 %4, i32 1)
ret <vscale x 1 x i1> %a
}
@ -1732,7 +1732,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsltu.mask.nxv2i64.i64(
<vscale x 2 x i64>,
i64,
<vscale x 2 x i1>,
i32);
i32, i32);
define <vscale x 2 x i1> @intrinsic_vmsltu_mask_vx_nxv2i64_i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv2i64_i64:
@ -1755,7 +1755,7 @@ entry:
<vscale x 2 x i64> %1,
i64 %2,
<vscale x 2 x i1> %3,
i32 %4)
i32 %4, i32 1)
ret <vscale x 2 x i1> %a
}
@ -1791,7 +1791,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsltu.mask.nxv4i64.i64(
<vscale x 4 x i64>,
i64,
<vscale x 4 x i1>,
i32);
i32, i32);
define <vscale x 4 x i1> @intrinsic_vmsltu_mask_vx_nxv4i64_i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv4i64_i64:
@ -1814,7 +1814,7 @@ entry:
<vscale x 4 x i64> %1,
i64 %2,
<vscale x 4 x i1> %3,
i32 %4)
i32 %4, i32 1)
ret <vscale x 4 x i1> %a
}
@ -1849,7 +1849,7 @@ entry:
<vscale x 1 x i8> %1,
i8 -14,
<vscale x 1 x i1> %2,
i32 %3)
i32 %3, i32 1)
ret <vscale x 1 x i1> %a
}
@ -1884,7 +1884,7 @@ entry:
<vscale x 2 x i8> %1,
i8 -12,
<vscale x 2 x i1> %2,
i32 %3)
i32 %3, i32 1)
ret <vscale x 2 x i1> %a
}
@ -1919,7 +1919,7 @@ entry:
<vscale x 4 x i8> %1,
i8 -10,
<vscale x 4 x i1> %2,
i32 %3)
i32 %3, i32 1)
ret <vscale x 4 x i1> %a
}
@ -1954,7 +1954,7 @@ entry:
<vscale x 8 x i8> %1,
i8 -8,
<vscale x 8 x i1> %2,
i32 %3)
i32 %3, i32 1)
ret <vscale x 8 x i1> %a
}
@ -1989,7 +1989,7 @@ entry:
<vscale x 16 x i8> %1,
i8 -6,
<vscale x 16 x i1> %2,
i32 %3)
i32 %3, i32 1)
ret <vscale x 16 x i1> %a
}
@ -2024,7 +2024,7 @@ entry:
<vscale x 32 x i8> %1,
i8 -4,
<vscale x 32 x i1> %2,
i32 %3)
i32 %3, i32 1)
ret <vscale x 32 x i1> %a
}
@ -2059,7 +2059,7 @@ entry:
<vscale x 1 x i16> %1,
i16 -2,
<vscale x 1 x i1> %2,
i32 %3)
i32 %3, i32 1)
ret <vscale x 1 x i1> %a
}
@ -2094,7 +2094,7 @@ entry:
<vscale x 2 x i16> %1,
i16 0,
<vscale x 2 x i1> %2,
i32 %3)
i32 %3, i32 1)
ret <vscale x 2 x i1> %a
}
@ -2129,7 +2129,7 @@ entry:
<vscale x 4 x i16> %1,
i16 1,
<vscale x 4 x i1> %2,
i32 %3)
i32 %3, i32 1)
ret <vscale x 4 x i1> %a
}
@ -2164,7 +2164,7 @@ entry:
<vscale x 8 x i16> %1,
i16 3,
<vscale x 8 x i1> %2,
i32 %3)
i32 %3, i32 1)
ret <vscale x 8 x i1> %a
}
@ -2199,7 +2199,7 @@ entry:
<vscale x 16 x i16> %1,
i16 5,
<vscale x 16 x i1> %2,
i32 %3)
i32 %3, i32 1)
ret <vscale x 16 x i1> %a
}
@ -2234,7 +2234,7 @@ entry:
<vscale x 1 x i32> %1,
i32 7,
<vscale x 1 x i1> %2,
i32 %3)
i32 %3, i32 1)
ret <vscale x 1 x i1> %a
}
@ -2269,7 +2269,7 @@ entry:
<vscale x 2 x i32> %1,
i32 9,
<vscale x 2 x i1> %2,
i32 %3)
i32 %3, i32 1)
ret <vscale x 2 x i1> %a
}
@ -2304,7 +2304,7 @@ entry:
<vscale x 4 x i32> %1,
i32 11,
<vscale x 4 x i1> %2,
i32 %3)
i32 %3, i32 1)
ret <vscale x 4 x i1> %a
}
@ -2339,7 +2339,7 @@ entry:
<vscale x 8 x i32> %1,
i32 13,
<vscale x 8 x i1> %2,
i32 %3)
i32 %3, i32 1)
ret <vscale x 8 x i1> %a
}
@ -2374,7 +2374,7 @@ entry:
<vscale x 1 x i64> %1,
i64 15,
<vscale x 1 x i1> %2,
i32 %3)
i32 %3, i32 1)
ret <vscale x 1 x i1> %a
}
@ -2409,7 +2409,7 @@ entry:
<vscale x 2 x i64> %1,
i64 -15,
<vscale x 2 x i1> %2,
i32 %3)
i32 %3, i32 1)
ret <vscale x 2 x i1> %a
}
@ -2444,7 +2444,7 @@ entry:
<vscale x 4 x i64> %1,
i64 -13,
<vscale x 4 x i1> %2,
i32 %3)
i32 %3, i32 1)
ret <vscale x 4 x i1> %a
}

View File

@ -26,7 +26,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsltu.mask.nxv1i8(
<vscale x 1 x i8>,
<vscale x 1 x i8>,
<vscale x 1 x i1>,
i64);
i64, i64);
define <vscale x 1 x i1> @intrinsic_vmsltu_mask_vv_nxv1i8_nxv1i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i8> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv1i8_nxv1i8:
@ -48,7 +48,7 @@ entry:
<vscale x 1 x i8> %2,
<vscale x 1 x i8> %3,
<vscale x 1 x i1> %mask,
i64 %4)
i64 %4, i64 1)
ret <vscale x 1 x i1> %a
}
@ -78,7 +78,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsltu.mask.nxv2i8(
<vscale x 2 x i8>,
<vscale x 2 x i8>,
<vscale x 2 x i1>,
i64);
i64, i64);
define <vscale x 2 x i1> @intrinsic_vmsltu_mask_vv_nxv2i8_nxv2i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i8> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv2i8_nxv2i8:
@ -100,7 +100,7 @@ entry:
<vscale x 2 x i8> %2,
<vscale x 2 x i8> %3,
<vscale x 2 x i1> %mask,
i64 %4)
i64 %4, i64 1)
ret <vscale x 2 x i1> %a
}
@ -130,7 +130,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsltu.mask.nxv4i8(
<vscale x 4 x i8>,
<vscale x 4 x i8>,
<vscale x 4 x i1>,
i64);
i64, i64);
define <vscale x 4 x i1> @intrinsic_vmsltu_mask_vv_nxv4i8_nxv4i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i8> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv4i8_nxv4i8:
@ -152,7 +152,7 @@ entry:
<vscale x 4 x i8> %2,
<vscale x 4 x i8> %3,
<vscale x 4 x i1> %mask,
i64 %4)
i64 %4, i64 1)
ret <vscale x 4 x i1> %a
}
@ -182,7 +182,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsltu.mask.nxv8i8(
<vscale x 8 x i8>,
<vscale x 8 x i8>,
<vscale x 8 x i1>,
i64);
i64, i64);
define <vscale x 8 x i1> @intrinsic_vmsltu_mask_vv_nxv8i8_nxv8i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i8> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv8i8_nxv8i8:
@ -204,7 +204,7 @@ entry:
<vscale x 8 x i8> %2,
<vscale x 8 x i8> %3,
<vscale x 8 x i1> %mask,
i64 %4)
i64 %4, i64 1)
ret <vscale x 8 x i1> %a
}
@ -234,7 +234,7 @@ declare <vscale x 16 x i1> @llvm.riscv.vmsltu.mask.nxv16i8(
<vscale x 16 x i8>,
<vscale x 16 x i8>,
<vscale x 16 x i1>,
i64);
i64, i64);
define <vscale x 16 x i1> @intrinsic_vmsltu_mask_vv_nxv16i8_nxv16i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i8> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv16i8_nxv16i8:
@ -256,7 +256,7 @@ entry:
<vscale x 16 x i8> %2,
<vscale x 16 x i8> %3,
<vscale x 16 x i1> %mask,
i64 %4)
i64 %4, i64 1)
ret <vscale x 16 x i1> %a
}
@ -286,7 +286,7 @@ declare <vscale x 32 x i1> @llvm.riscv.vmsltu.mask.nxv32i8(
<vscale x 32 x i8>,
<vscale x 32 x i8>,
<vscale x 32 x i1>,
i64);
i64, i64);
define <vscale x 32 x i1> @intrinsic_vmsltu_mask_vv_nxv32i8_nxv32i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i8> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv32i8_nxv32i8:
@ -308,7 +308,7 @@ entry:
<vscale x 32 x i8> %2,
<vscale x 32 x i8> %3,
<vscale x 32 x i1> %mask,
i64 %4)
i64 %4, i64 1)
ret <vscale x 32 x i1> %a
}
@ -338,7 +338,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsltu.mask.nxv1i16(
<vscale x 1 x i16>,
<vscale x 1 x i16>,
<vscale x 1 x i1>,
i64);
i64, i64);
define <vscale x 1 x i1> @intrinsic_vmsltu_mask_vv_nxv1i16_nxv1i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i16> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv1i16_nxv1i16:
@ -360,7 +360,7 @@ entry:
<vscale x 1 x i16> %2,
<vscale x 1 x i16> %3,
<vscale x 1 x i1> %mask,
i64 %4)
i64 %4, i64 1)
ret <vscale x 1 x i1> %a
}
@ -390,7 +390,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsltu.mask.nxv2i16(
<vscale x 2 x i16>,
<vscale x 2 x i16>,
<vscale x 2 x i1>,
i64);
i64, i64);
define <vscale x 2 x i1> @intrinsic_vmsltu_mask_vv_nxv2i16_nxv2i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i16> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv2i16_nxv2i16:
@ -412,7 +412,7 @@ entry:
<vscale x 2 x i16> %2,
<vscale x 2 x i16> %3,
<vscale x 2 x i1> %mask,
i64 %4)
i64 %4, i64 1)
ret <vscale x 2 x i1> %a
}
@ -442,7 +442,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsltu.mask.nxv4i16(
<vscale x 4 x i16>,
<vscale x 4 x i16>,
<vscale x 4 x i1>,
i64);
i64, i64);
define <vscale x 4 x i1> @intrinsic_vmsltu_mask_vv_nxv4i16_nxv4i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i16> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv4i16_nxv4i16:
@ -464,7 +464,7 @@ entry:
<vscale x 4 x i16> %2,
<vscale x 4 x i16> %3,
<vscale x 4 x i1> %mask,
i64 %4)
i64 %4, i64 1)
ret <vscale x 4 x i1> %a
}
@ -494,7 +494,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsltu.mask.nxv8i16(
<vscale x 8 x i16>,
<vscale x 8 x i16>,
<vscale x 8 x i1>,
i64);
i64, i64);
define <vscale x 8 x i1> @intrinsic_vmsltu_mask_vv_nxv8i16_nxv8i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i16> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv8i16_nxv8i16:
@ -516,7 +516,7 @@ entry:
<vscale x 8 x i16> %2,
<vscale x 8 x i16> %3,
<vscale x 8 x i1> %mask,
i64 %4)
i64 %4, i64 1)
ret <vscale x 8 x i1> %a
}
@ -546,7 +546,7 @@ declare <vscale x 16 x i1> @llvm.riscv.vmsltu.mask.nxv16i16(
<vscale x 16 x i16>,
<vscale x 16 x i16>,
<vscale x 16 x i1>,
i64);
i64, i64);
define <vscale x 16 x i1> @intrinsic_vmsltu_mask_vv_nxv16i16_nxv16i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i16> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv16i16_nxv16i16:
@ -568,7 +568,7 @@ entry:
<vscale x 16 x i16> %2,
<vscale x 16 x i16> %3,
<vscale x 16 x i1> %mask,
i64 %4)
i64 %4, i64 1)
ret <vscale x 16 x i1> %a
}
@ -598,7 +598,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsltu.mask.nxv1i32(
<vscale x 1 x i32>,
<vscale x 1 x i32>,
<vscale x 1 x i1>,
i64);
i64, i64);
define <vscale x 1 x i1> @intrinsic_vmsltu_mask_vv_nxv1i32_nxv1i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i32> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv1i32_nxv1i32:
@ -620,7 +620,7 @@ entry:
<vscale x 1 x i32> %2,
<vscale x 1 x i32> %3,
<vscale x 1 x i1> %mask,
i64 %4)
i64 %4, i64 1)
ret <vscale x 1 x i1> %a
}
@ -650,7 +650,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsltu.mask.nxv2i32(
<vscale x 2 x i32>,
<vscale x 2 x i32>,
<vscale x 2 x i1>,
i64);
i64, i64);
define <vscale x 2 x i1> @intrinsic_vmsltu_mask_vv_nxv2i32_nxv2i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i32> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv2i32_nxv2i32:
@ -672,7 +672,7 @@ entry:
<vscale x 2 x i32> %2,
<vscale x 2 x i32> %3,
<vscale x 2 x i1> %mask,
i64 %4)
i64 %4, i64 1)
ret <vscale x 2 x i1> %a
}
@ -702,7 +702,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsltu.mask.nxv4i32(
<vscale x 4 x i32>,
<vscale x 4 x i32>,
<vscale x 4 x i1>,
i64);
i64, i64);
define <vscale x 4 x i1> @intrinsic_vmsltu_mask_vv_nxv4i32_nxv4i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i32> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv4i32_nxv4i32:
@ -724,7 +724,7 @@ entry:
<vscale x 4 x i32> %2,
<vscale x 4 x i32> %3,
<vscale x 4 x i1> %mask,
i64 %4)
i64 %4, i64 1)
ret <vscale x 4 x i1> %a
}
@ -754,7 +754,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsltu.mask.nxv8i32(
<vscale x 8 x i32>,
<vscale x 8 x i32>,
<vscale x 8 x i1>,
i64);
i64, i64);
define <vscale x 8 x i1> @intrinsic_vmsltu_mask_vv_nxv8i32_nxv8i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i32> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv8i32_nxv8i32:
@ -776,7 +776,7 @@ entry:
<vscale x 8 x i32> %2,
<vscale x 8 x i32> %3,
<vscale x 8 x i1> %mask,
i64 %4)
i64 %4, i64 1)
ret <vscale x 8 x i1> %a
}
@ -806,7 +806,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsltu.mask.nxv1i64(
<vscale x 1 x i64>,
<vscale x 1 x i64>,
<vscale x 1 x i1>,
i64);
i64, i64);
define <vscale x 1 x i1> @intrinsic_vmsltu_mask_vv_nxv1i64_nxv1i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i64> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv1i64_nxv1i64:
@ -828,7 +828,7 @@ entry:
<vscale x 1 x i64> %2,
<vscale x 1 x i64> %3,
<vscale x 1 x i1> %mask,
i64 %4)
i64 %4, i64 1)
ret <vscale x 1 x i1> %a
}
@ -858,7 +858,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsltu.mask.nxv2i64(
<vscale x 2 x i64>,
<vscale x 2 x i64>,
<vscale x 2 x i1>,
i64);
i64, i64);
define <vscale x 2 x i1> @intrinsic_vmsltu_mask_vv_nxv2i64_nxv2i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i64> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv2i64_nxv2i64:
@ -880,7 +880,7 @@ entry:
<vscale x 2 x i64> %2,
<vscale x 2 x i64> %3,
<vscale x 2 x i1> %mask,
i64 %4)
i64 %4, i64 1)
ret <vscale x 2 x i1> %a
}
@ -910,7 +910,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsltu.mask.nxv4i64(
<vscale x 4 x i64>,
<vscale x 4 x i64>,
<vscale x 4 x i1>,
i64);
i64, i64);
define <vscale x 4 x i1> @intrinsic_vmsltu_mask_vv_nxv4i64_nxv4i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i64> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv4i64_nxv4i64:
@ -932,7 +932,7 @@ entry:
<vscale x 4 x i64> %2,
<vscale x 4 x i64> %3,
<vscale x 4 x i1> %mask,
i64 %4)
i64 %4, i64 1)
ret <vscale x 4 x i1> %a
}
@ -962,7 +962,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsltu.mask.nxv1i8.i8(
<vscale x 1 x i8>,
i8,
<vscale x 1 x i1>,
i64);
i64, i64);
define <vscale x 1 x i1> @intrinsic_vmsltu_mask_vx_nxv1i8_i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv1i8_i8:
@ -979,7 +979,7 @@ entry:
<vscale x 1 x i8> %1,
i8 %2,
<vscale x 1 x i1> %3,
i64 %4)
i64 %4, i64 1)
ret <vscale x 1 x i1> %a
}
@ -1009,7 +1009,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsltu.mask.nxv2i8.i8(
<vscale x 2 x i8>,
i8,
<vscale x 2 x i1>,
i64);
i64, i64);
define <vscale x 2 x i1> @intrinsic_vmsltu_mask_vx_nxv2i8_i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv2i8_i8:
@ -1026,7 +1026,7 @@ entry:
<vscale x 2 x i8> %1,
i8 %2,
<vscale x 2 x i1> %3,
i64 %4)
i64 %4, i64 1)
ret <vscale x 2 x i1> %a
}
@ -1056,7 +1056,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsltu.mask.nxv4i8.i8(
<vscale x 4 x i8>,
i8,
<vscale x 4 x i1>,
i64);
i64, i64);
define <vscale x 4 x i1> @intrinsic_vmsltu_mask_vx_nxv4i8_i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv4i8_i8:
@ -1073,7 +1073,7 @@ entry:
<vscale x 4 x i8> %1,
i8 %2,
<vscale x 4 x i1> %3,
i64 %4)
i64 %4, i64 1)
ret <vscale x 4 x i1> %a
}
@ -1103,7 +1103,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsltu.mask.nxv8i8.i8(
<vscale x 8 x i8>,
i8,
<vscale x 8 x i1>,
i64);
i64, i64);
define <vscale x 8 x i1> @intrinsic_vmsltu_mask_vx_nxv8i8_i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv8i8_i8:
@ -1120,7 +1120,7 @@ entry:
<vscale x 8 x i8> %1,
i8 %2,
<vscale x 8 x i1> %3,
i64 %4)
i64 %4, i64 1)
ret <vscale x 8 x i1> %a
}
@ -1150,7 +1150,7 @@ declare <vscale x 16 x i1> @llvm.riscv.vmsltu.mask.nxv16i8.i8(
<vscale x 16 x i8>,
i8,
<vscale x 16 x i1>,
i64);
i64, i64);
define <vscale x 16 x i1> @intrinsic_vmsltu_mask_vx_nxv16i8_i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv16i8_i8:
@ -1167,7 +1167,7 @@ entry:
<vscale x 16 x i8> %1,
i8 %2,
<vscale x 16 x i1> %3,
i64 %4)
i64 %4, i64 1)
ret <vscale x 16 x i1> %a
}
@ -1197,7 +1197,7 @@ declare <vscale x 32 x i1> @llvm.riscv.vmsltu.mask.nxv32i8.i8(
<vscale x 32 x i8>,
i8,
<vscale x 32 x i1>,
i64);
i64, i64);
define <vscale x 32 x i1> @intrinsic_vmsltu_mask_vx_nxv32i8_i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv32i8_i8:
@ -1214,7 +1214,7 @@ entry:
<vscale x 32 x i8> %1,
i8 %2,
<vscale x 32 x i1> %3,
i64 %4)
i64 %4, i64 1)
ret <vscale x 32 x i1> %a
}
@ -1244,7 +1244,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsltu.mask.nxv1i16.i16(
<vscale x 1 x i16>,
i16,
<vscale x 1 x i1>,
i64);
i64, i64);
define <vscale x 1 x i1> @intrinsic_vmsltu_mask_vx_nxv1i16_i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv1i16_i16:
@ -1261,7 +1261,7 @@ entry:
<vscale x 1 x i16> %1,
i16 %2,
<vscale x 1 x i1> %3,
i64 %4)
i64 %4, i64 1)
ret <vscale x 1 x i1> %a
}
@ -1291,7 +1291,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsltu.mask.nxv2i16.i16(
<vscale x 2 x i16>,
i16,
<vscale x 2 x i1>,
i64);
i64, i64);
define <vscale x 2 x i1> @intrinsic_vmsltu_mask_vx_nxv2i16_i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv2i16_i16:
@ -1308,7 +1308,7 @@ entry:
<vscale x 2 x i16> %1,
i16 %2,
<vscale x 2 x i1> %3,
i64 %4)
i64 %4, i64 1)
ret <vscale x 2 x i1> %a
}
@ -1338,7 +1338,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsltu.mask.nxv4i16.i16(
<vscale x 4 x i16>,
i16,
<vscale x 4 x i1>,
i64);
i64, i64);
define <vscale x 4 x i1> @intrinsic_vmsltu_mask_vx_nxv4i16_i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv4i16_i16:
@ -1355,7 +1355,7 @@ entry:
<vscale x 4 x i16> %1,
i16 %2,
<vscale x 4 x i1> %3,
i64 %4)
i64 %4, i64 1)
ret <vscale x 4 x i1> %a
}
@ -1385,7 +1385,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsltu.mask.nxv8i16.i16(
<vscale x 8 x i16>,
i16,
<vscale x 8 x i1>,
i64);
i64, i64);
define <vscale x 8 x i1> @intrinsic_vmsltu_mask_vx_nxv8i16_i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv8i16_i16:
@ -1402,7 +1402,7 @@ entry:
<vscale x 8 x i16> %1,
i16 %2,
<vscale x 8 x i1> %3,
i64 %4)
i64 %4, i64 1)
ret <vscale x 8 x i1> %a
}
@ -1432,7 +1432,7 @@ declare <vscale x 16 x i1> @llvm.riscv.vmsltu.mask.nxv16i16.i16(
<vscale x 16 x i16>,
i16,
<vscale x 16 x i1>,
i64);
i64, i64);
define <vscale x 16 x i1> @intrinsic_vmsltu_mask_vx_nxv16i16_i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv16i16_i16:
@ -1449,7 +1449,7 @@ entry:
<vscale x 16 x i16> %1,
i16 %2,
<vscale x 16 x i1> %3,
i64 %4)
i64 %4, i64 1)
ret <vscale x 16 x i1> %a
}
@ -1479,7 +1479,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsltu.mask.nxv1i32.i32(
<vscale x 1 x i32>,
i32,
<vscale x 1 x i1>,
i64);
i64, i64);
define <vscale x 1 x i1> @intrinsic_vmsltu_mask_vx_nxv1i32_i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv1i32_i32:
@ -1496,7 +1496,7 @@ entry:
<vscale x 1 x i32> %1,
i32 %2,
<vscale x 1 x i1> %3,
i64 %4)
i64 %4, i64 1)
ret <vscale x 1 x i1> %a
}
@ -1526,7 +1526,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsltu.mask.nxv2i32.i32(
<vscale x 2 x i32>,
i32,
<vscale x 2 x i1>,
i64);
i64, i64);
define <vscale x 2 x i1> @intrinsic_vmsltu_mask_vx_nxv2i32_i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv2i32_i32:
@ -1543,7 +1543,7 @@ entry:
<vscale x 2 x i32> %1,
i32 %2,
<vscale x 2 x i1> %3,
i64 %4)
i64 %4, i64 1)
ret <vscale x 2 x i1> %a
}
@ -1573,7 +1573,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsltu.mask.nxv4i32.i32(
<vscale x 4 x i32>,
i32,
<vscale x 4 x i1>,
i64);
i64, i64);
define <vscale x 4 x i1> @intrinsic_vmsltu_mask_vx_nxv4i32_i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv4i32_i32:
@ -1590,7 +1590,7 @@ entry:
<vscale x 4 x i32> %1,
i32 %2,
<vscale x 4 x i1> %3,
i64 %4)
i64 %4, i64 1)
ret <vscale x 4 x i1> %a
}
@ -1620,7 +1620,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsltu.mask.nxv8i32.i32(
<vscale x 8 x i32>,
i32,
<vscale x 8 x i1>,
i64);
i64, i64);
define <vscale x 8 x i1> @intrinsic_vmsltu_mask_vx_nxv8i32_i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv8i32_i32:
@ -1637,7 +1637,7 @@ entry:
<vscale x 8 x i32> %1,
i32 %2,
<vscale x 8 x i1> %3,
i64 %4)
i64 %4, i64 1)
ret <vscale x 8 x i1> %a
}
@ -1667,7 +1667,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsltu.mask.nxv1i64.i64(
<vscale x 1 x i64>,
i64,
<vscale x 1 x i1>,
i64);
i64, i64);
define <vscale x 1 x i1> @intrinsic_vmsltu_mask_vx_nxv1i64_i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv1i64_i64:
@ -1684,7 +1684,7 @@ entry:
<vscale x 1 x i64> %1,
i64 %2,
<vscale x 1 x i1> %3,
i64 %4)
i64 %4, i64 1)
ret <vscale x 1 x i1> %a
}
@ -1714,7 +1714,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsltu.mask.nxv2i64.i64(
<vscale x 2 x i64>,
i64,
<vscale x 2 x i1>,
i64);
i64, i64);
define <vscale x 2 x i1> @intrinsic_vmsltu_mask_vx_nxv2i64_i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv2i64_i64:
@ -1731,7 +1731,7 @@ entry:
<vscale x 2 x i64> %1,
i64 %2,
<vscale x 2 x i1> %3,
i64 %4)
i64 %4, i64 1)
ret <vscale x 2 x i1> %a
}
@ -1761,7 +1761,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsltu.mask.nxv4i64.i64(
<vscale x 4 x i64>,
i64,
<vscale x 4 x i1>,
i64);
i64, i64);
define <vscale x 4 x i1> @intrinsic_vmsltu_mask_vx_nxv4i64_i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv4i64_i64:
@ -1778,7 +1778,7 @@ entry:
<vscale x 4 x i64> %1,
i64 %2,
<vscale x 4 x i1> %3,
i64 %4)
i64 %4, i64 1)
ret <vscale x 4 x i1> %a
}
@ -1813,7 +1813,7 @@ entry:
<vscale x 1 x i8> %1,
i8 -14,
<vscale x 1 x i1> %2,
i64 %3)
i64 %3, i64 1)
ret <vscale x 1 x i1> %a
}
@ -1848,7 +1848,7 @@ entry:
<vscale x 2 x i8> %1,
i8 -12,
<vscale x 2 x i1> %2,
i64 %3)
i64 %3, i64 1)
ret <vscale x 2 x i1> %a
}
@ -1883,7 +1883,7 @@ entry:
<vscale x 4 x i8> %1,
i8 -10,
<vscale x 4 x i1> %2,
i64 %3)
i64 %3, i64 1)
ret <vscale x 4 x i1> %a
}
@ -1918,7 +1918,7 @@ entry:
<vscale x 8 x i8> %1,
i8 -8,
<vscale x 8 x i1> %2,
i64 %3)
i64 %3, i64 1)
ret <vscale x 8 x i1> %a
}
@ -1953,7 +1953,7 @@ entry:
<vscale x 16 x i8> %1,
i8 -6,
<vscale x 16 x i1> %2,
i64 %3)
i64 %3, i64 1)
ret <vscale x 16 x i1> %a
}
@ -1988,7 +1988,7 @@ entry:
<vscale x 32 x i8> %1,
i8 -4,
<vscale x 32 x i1> %2,
i64 %3)
i64 %3, i64 1)
ret <vscale x 32 x i1> %a
}
@ -2023,7 +2023,7 @@ entry:
<vscale x 1 x i16> %1,
i16 -2,
<vscale x 1 x i1> %2,
i64 %3)
i64 %3, i64 1)
ret <vscale x 1 x i1> %a
}
@ -2058,7 +2058,7 @@ entry:
<vscale x 2 x i16> %1,
i16 0,
<vscale x 2 x i1> %2,
i64 %3)
i64 %3, i64 1)
ret <vscale x 2 x i1> %a
}
@ -2093,7 +2093,7 @@ entry:
<vscale x 4 x i16> %1,
i16 1,
<vscale x 4 x i1> %2,
i64 %3)
i64 %3, i64 1)
ret <vscale x 4 x i1> %a
}
@ -2128,7 +2128,7 @@ entry:
<vscale x 8 x i16> %1,
i16 3,
<vscale x 8 x i1> %2,
i64 %3)
i64 %3, i64 1)
ret <vscale x 8 x i1> %a
}
@ -2163,7 +2163,7 @@ entry:
<vscale x 16 x i16> %1,
i16 5,
<vscale x 16 x i1> %2,
i64 %3)
i64 %3, i64 1)
ret <vscale x 16 x i1> %a
}
@ -2198,7 +2198,7 @@ entry:
<vscale x 1 x i32> %1,
i32 7,
<vscale x 1 x i1> %2,
i64 %3)
i64 %3, i64 1)
ret <vscale x 1 x i1> %a
}
@ -2233,7 +2233,7 @@ entry:
<vscale x 2 x i32> %1,
i32 9,
<vscale x 2 x i1> %2,
i64 %3)
i64 %3, i64 1)
ret <vscale x 2 x i1> %a
}
@ -2268,7 +2268,7 @@ entry:
<vscale x 4 x i32> %1,
i32 11,
<vscale x 4 x i1> %2,
i64 %3)
i64 %3, i64 1)
ret <vscale x 4 x i1> %a
}
@ -2303,7 +2303,7 @@ entry:
<vscale x 8 x i32> %1,
i32 13,
<vscale x 8 x i1> %2,
i64 %3)
i64 %3, i64 1)
ret <vscale x 8 x i1> %a
}
@ -2338,7 +2338,7 @@ entry:
<vscale x 1 x i64> %1,
i64 15,
<vscale x 1 x i1> %2,
i64 %3)
i64 %3, i64 1)
ret <vscale x 1 x i1> %a
}
@ -2373,7 +2373,7 @@ entry:
<vscale x 2 x i64> %1,
i64 -15,
<vscale x 2 x i1> %2,
i64 %3)
i64 %3, i64 1)
ret <vscale x 2 x i1> %a
}
@ -2408,7 +2408,7 @@ entry:
<vscale x 4 x i64> %1,
i64 -13,
<vscale x 4 x i1> %2,
i64 %3)
i64 %3, i64 1)
ret <vscale x 4 x i1> %a
}

View File

@ -26,7 +26,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i8(
<vscale x 1 x i8>,
<vscale x 1 x i8>,
<vscale x 1 x i1>,
i32);
i32, i32);
define <vscale x 1 x i1> @intrinsic_vmsne_mask_vv_nxv1i8_nxv1i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i8> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv1i8_nxv1i8:
@ -48,7 +48,7 @@ entry:
<vscale x 1 x i8> %2,
<vscale x 1 x i8> %3,
<vscale x 1 x i1> %mask,
i32 %4)
i32 %4, i32 1)
ret <vscale x 1 x i1> %a
}
@ -78,7 +78,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i8(
<vscale x 2 x i8>,
<vscale x 2 x i8>,
<vscale x 2 x i1>,
i32);
i32, i32);
define <vscale x 2 x i1> @intrinsic_vmsne_mask_vv_nxv2i8_nxv2i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i8> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv2i8_nxv2i8:
@ -100,7 +100,7 @@ entry:
<vscale x 2 x i8> %2,
<vscale x 2 x i8> %3,
<vscale x 2 x i1> %mask,
i32 %4)
i32 %4, i32 1)
ret <vscale x 2 x i1> %a
}
@ -130,7 +130,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i8(
<vscale x 4 x i8>,
<vscale x 4 x i8>,
<vscale x 4 x i1>,
i32);
i32, i32);
define <vscale x 4 x i1> @intrinsic_vmsne_mask_vv_nxv4i8_nxv4i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i8> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv4i8_nxv4i8:
@ -152,7 +152,7 @@ entry:
<vscale x 4 x i8> %2,
<vscale x 4 x i8> %3,
<vscale x 4 x i1> %mask,
i32 %4)
i32 %4, i32 1)
ret <vscale x 4 x i1> %a
}
@ -182,7 +182,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsne.mask.nxv8i8(
<vscale x 8 x i8>,
<vscale x 8 x i8>,
<vscale x 8 x i1>,
i32);
i32, i32);
define <vscale x 8 x i1> @intrinsic_vmsne_mask_vv_nxv8i8_nxv8i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i8> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv8i8_nxv8i8:
@ -204,7 +204,7 @@ entry:
<vscale x 8 x i8> %2,
<vscale x 8 x i8> %3,
<vscale x 8 x i1> %mask,
i32 %4)
i32 %4, i32 1)
ret <vscale x 8 x i1> %a
}
@ -234,7 +234,7 @@ declare <vscale x 16 x i1> @llvm.riscv.vmsne.mask.nxv16i8(
<vscale x 16 x i8>,
<vscale x 16 x i8>,
<vscale x 16 x i1>,
i32);
i32, i32);
define <vscale x 16 x i1> @intrinsic_vmsne_mask_vv_nxv16i8_nxv16i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i8> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv16i8_nxv16i8:
@ -256,7 +256,7 @@ entry:
<vscale x 16 x i8> %2,
<vscale x 16 x i8> %3,
<vscale x 16 x i1> %mask,
i32 %4)
i32 %4, i32 1)
ret <vscale x 16 x i1> %a
}
@ -286,7 +286,7 @@ declare <vscale x 32 x i1> @llvm.riscv.vmsne.mask.nxv32i8(
<vscale x 32 x i8>,
<vscale x 32 x i8>,
<vscale x 32 x i1>,
i32);
i32, i32);
define <vscale x 32 x i1> @intrinsic_vmsne_mask_vv_nxv32i8_nxv32i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i8> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv32i8_nxv32i8:
@ -308,7 +308,7 @@ entry:
<vscale x 32 x i8> %2,
<vscale x 32 x i8> %3,
<vscale x 32 x i1> %mask,
i32 %4)
i32 %4, i32 1)
ret <vscale x 32 x i1> %a
}
@ -338,7 +338,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i16(
<vscale x 1 x i16>,
<vscale x 1 x i16>,
<vscale x 1 x i1>,
i32);
i32, i32);
define <vscale x 1 x i1> @intrinsic_vmsne_mask_vv_nxv1i16_nxv1i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i16> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv1i16_nxv1i16:
@ -360,7 +360,7 @@ entry:
<vscale x 1 x i16> %2,
<vscale x 1 x i16> %3,
<vscale x 1 x i1> %mask,
i32 %4)
i32 %4, i32 1)
ret <vscale x 1 x i1> %a
}
@ -390,7 +390,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i16(
<vscale x 2 x i16>,
<vscale x 2 x i16>,
<vscale x 2 x i1>,
i32);
i32, i32);
define <vscale x 2 x i1> @intrinsic_vmsne_mask_vv_nxv2i16_nxv2i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i16> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv2i16_nxv2i16:
@ -412,7 +412,7 @@ entry:
<vscale x 2 x i16> %2,
<vscale x 2 x i16> %3,
<vscale x 2 x i1> %mask,
i32 %4)
i32 %4, i32 1)
ret <vscale x 2 x i1> %a
}
@ -442,7 +442,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i16(
<vscale x 4 x i16>,
<vscale x 4 x i16>,
<vscale x 4 x i1>,
i32);
i32, i32);
define <vscale x 4 x i1> @intrinsic_vmsne_mask_vv_nxv4i16_nxv4i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i16> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv4i16_nxv4i16:
@ -464,7 +464,7 @@ entry:
<vscale x 4 x i16> %2,
<vscale x 4 x i16> %3,
<vscale x 4 x i1> %mask,
i32 %4)
i32 %4, i32 1)
ret <vscale x 4 x i1> %a
}
@ -494,7 +494,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsne.mask.nxv8i16(
<vscale x 8 x i16>,
<vscale x 8 x i16>,
<vscale x 8 x i1>,
i32);
i32, i32);
define <vscale x 8 x i1> @intrinsic_vmsne_mask_vv_nxv8i16_nxv8i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i16> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv8i16_nxv8i16:
@ -516,7 +516,7 @@ entry:
<vscale x 8 x i16> %2,
<vscale x 8 x i16> %3,
<vscale x 8 x i1> %mask,
i32 %4)
i32 %4, i32 1)
ret <vscale x 8 x i1> %a
}
@ -546,7 +546,7 @@ declare <vscale x 16 x i1> @llvm.riscv.vmsne.mask.nxv16i16(
<vscale x 16 x i16>,
<vscale x 16 x i16>,
<vscale x 16 x i1>,
i32);
i32, i32);
define <vscale x 16 x i1> @intrinsic_vmsne_mask_vv_nxv16i16_nxv16i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i16> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv16i16_nxv16i16:
@ -568,7 +568,7 @@ entry:
<vscale x 16 x i16> %2,
<vscale x 16 x i16> %3,
<vscale x 16 x i1> %mask,
i32 %4)
i32 %4, i32 1)
ret <vscale x 16 x i1> %a
}
@ -598,7 +598,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i32(
<vscale x 1 x i32>,
<vscale x 1 x i32>,
<vscale x 1 x i1>,
i32);
i32, i32);
define <vscale x 1 x i1> @intrinsic_vmsne_mask_vv_nxv1i32_nxv1i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i32> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv1i32_nxv1i32:
@ -620,7 +620,7 @@ entry:
<vscale x 1 x i32> %2,
<vscale x 1 x i32> %3,
<vscale x 1 x i1> %mask,
i32 %4)
i32 %4, i32 1)
ret <vscale x 1 x i1> %a
}
@ -650,7 +650,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i32(
<vscale x 2 x i32>,
<vscale x 2 x i32>,
<vscale x 2 x i1>,
i32);
i32, i32);
define <vscale x 2 x i1> @intrinsic_vmsne_mask_vv_nxv2i32_nxv2i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i32> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv2i32_nxv2i32:
@ -672,7 +672,7 @@ entry:
<vscale x 2 x i32> %2,
<vscale x 2 x i32> %3,
<vscale x 2 x i1> %mask,
i32 %4)
i32 %4, i32 1)
ret <vscale x 2 x i1> %a
}
@ -702,7 +702,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i32(
<vscale x 4 x i32>,
<vscale x 4 x i32>,
<vscale x 4 x i1>,
i32);
i32, i32);
define <vscale x 4 x i1> @intrinsic_vmsne_mask_vv_nxv4i32_nxv4i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i32> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv4i32_nxv4i32:
@ -724,7 +724,7 @@ entry:
<vscale x 4 x i32> %2,
<vscale x 4 x i32> %3,
<vscale x 4 x i1> %mask,
i32 %4)
i32 %4, i32 1)
ret <vscale x 4 x i1> %a
}
@ -754,7 +754,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsne.mask.nxv8i32(
<vscale x 8 x i32>,
<vscale x 8 x i32>,
<vscale x 8 x i1>,
i32);
i32, i32);
define <vscale x 8 x i1> @intrinsic_vmsne_mask_vv_nxv8i32_nxv8i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i32> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv8i32_nxv8i32:
@ -776,7 +776,7 @@ entry:
<vscale x 8 x i32> %2,
<vscale x 8 x i32> %3,
<vscale x 8 x i1> %mask,
i32 %4)
i32 %4, i32 1)
ret <vscale x 8 x i1> %a
}
@ -806,7 +806,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i64(
<vscale x 1 x i64>,
<vscale x 1 x i64>,
<vscale x 1 x i1>,
i32);
i32, i32);
define <vscale x 1 x i1> @intrinsic_vmsne_mask_vv_nxv1i64_nxv1i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i64> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv1i64_nxv1i64:
@ -828,7 +828,7 @@ entry:
<vscale x 1 x i64> %2,
<vscale x 1 x i64> %3,
<vscale x 1 x i1> %mask,
i32 %4)
i32 %4, i32 1)
ret <vscale x 1 x i1> %a
}
@ -858,7 +858,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i64(
<vscale x 2 x i64>,
<vscale x 2 x i64>,
<vscale x 2 x i1>,
i32);
i32, i32);
define <vscale x 2 x i1> @intrinsic_vmsne_mask_vv_nxv2i64_nxv2i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i64> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv2i64_nxv2i64:
@ -880,7 +880,7 @@ entry:
<vscale x 2 x i64> %2,
<vscale x 2 x i64> %3,
<vscale x 2 x i1> %mask,
i32 %4)
i32 %4, i32 1)
ret <vscale x 2 x i1> %a
}
@ -910,7 +910,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i64(
<vscale x 4 x i64>,
<vscale x 4 x i64>,
<vscale x 4 x i1>,
i32);
i32, i32);
define <vscale x 4 x i1> @intrinsic_vmsne_mask_vv_nxv4i64_nxv4i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i64> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv4i64_nxv4i64:
@ -932,7 +932,7 @@ entry:
<vscale x 4 x i64> %2,
<vscale x 4 x i64> %3,
<vscale x 4 x i1> %mask,
i32 %4)
i32 %4, i32 1)
ret <vscale x 4 x i1> %a
}
@ -962,7 +962,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i8.i8(
<vscale x 1 x i8>,
i8,
<vscale x 1 x i1>,
i32);
i32, i32);
define <vscale x 1 x i1> @intrinsic_vmsne_mask_vx_nxv1i8_i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv1i8_i8:
@ -979,7 +979,7 @@ entry:
<vscale x 1 x i8> %1,
i8 %2,
<vscale x 1 x i1> %3,
i32 %4)
i32 %4, i32 1)
ret <vscale x 1 x i1> %a
}
@ -1009,7 +1009,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i8.i8(
<vscale x 2 x i8>,
i8,
<vscale x 2 x i1>,
i32);
i32, i32);
define <vscale x 2 x i1> @intrinsic_vmsne_mask_vx_nxv2i8_i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv2i8_i8:
@ -1026,7 +1026,7 @@ entry:
<vscale x 2 x i8> %1,
i8 %2,
<vscale x 2 x i1> %3,
i32 %4)
i32 %4, i32 1)
ret <vscale x 2 x i1> %a
}
@ -1056,7 +1056,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i8.i8(
<vscale x 4 x i8>,
i8,
<vscale x 4 x i1>,
i32);
i32, i32);
define <vscale x 4 x i1> @intrinsic_vmsne_mask_vx_nxv4i8_i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv4i8_i8:
@ -1073,7 +1073,7 @@ entry:
<vscale x 4 x i8> %1,
i8 %2,
<vscale x 4 x i1> %3,
i32 %4)
i32 %4, i32 1)
ret <vscale x 4 x i1> %a
}
@ -1103,7 +1103,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsne.mask.nxv8i8.i8(
<vscale x 8 x i8>,
i8,
<vscale x 8 x i1>,
i32);
i32, i32);
define <vscale x 8 x i1> @intrinsic_vmsne_mask_vx_nxv8i8_i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv8i8_i8:
@ -1120,7 +1120,7 @@ entry:
<vscale x 8 x i8> %1,
i8 %2,
<vscale x 8 x i1> %3,
i32 %4)
i32 %4, i32 1)
ret <vscale x 8 x i1> %a
}
@ -1150,7 +1150,7 @@ declare <vscale x 16 x i1> @llvm.riscv.vmsne.mask.nxv16i8.i8(
<vscale x 16 x i8>,
i8,
<vscale x 16 x i1>,
i32);
i32, i32);
define <vscale x 16 x i1> @intrinsic_vmsne_mask_vx_nxv16i8_i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv16i8_i8:
@ -1167,7 +1167,7 @@ entry:
<vscale x 16 x i8> %1,
i8 %2,
<vscale x 16 x i1> %3,
i32 %4)
i32 %4, i32 1)
ret <vscale x 16 x i1> %a
}
@ -1197,7 +1197,7 @@ declare <vscale x 32 x i1> @llvm.riscv.vmsne.mask.nxv32i8.i8(
<vscale x 32 x i8>,
i8,
<vscale x 32 x i1>,
i32);
i32, i32);
define <vscale x 32 x i1> @intrinsic_vmsne_mask_vx_nxv32i8_i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv32i8_i8:
@ -1214,7 +1214,7 @@ entry:
<vscale x 32 x i8> %1,
i8 %2,
<vscale x 32 x i1> %3,
i32 %4)
i32 %4, i32 1)
ret <vscale x 32 x i1> %a
}
@ -1244,7 +1244,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i16.i16(
<vscale x 1 x i16>,
i16,
<vscale x 1 x i1>,
i32);
i32, i32);
define <vscale x 1 x i1> @intrinsic_vmsne_mask_vx_nxv1i16_i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv1i16_i16:
@ -1261,7 +1261,7 @@ entry:
<vscale x 1 x i16> %1,
i16 %2,
<vscale x 1 x i1> %3,
i32 %4)
i32 %4, i32 1)
ret <vscale x 1 x i1> %a
}
@ -1291,7 +1291,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i16.i16(
<vscale x 2 x i16>,
i16,
<vscale x 2 x i1>,
i32);
i32, i32);
define <vscale x 2 x i1> @intrinsic_vmsne_mask_vx_nxv2i16_i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv2i16_i16:
@ -1308,7 +1308,7 @@ entry:
<vscale x 2 x i16> %1,
i16 %2,
<vscale x 2 x i1> %3,
i32 %4)
i32 %4, i32 1)
ret <vscale x 2 x i1> %a
}
@ -1338,7 +1338,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i16.i16(
<vscale x 4 x i16>,
i16,
<vscale x 4 x i1>,
i32);
i32, i32);
define <vscale x 4 x i1> @intrinsic_vmsne_mask_vx_nxv4i16_i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv4i16_i16:
@ -1355,7 +1355,7 @@ entry:
<vscale x 4 x i16> %1,
i16 %2,
<vscale x 4 x i1> %3,
i32 %4)
i32 %4, i32 1)
ret <vscale x 4 x i1> %a
}
@ -1385,7 +1385,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsne.mask.nxv8i16.i16(
<vscale x 8 x i16>,
i16,
<vscale x 8 x i1>,
i32);
i32, i32);
define <vscale x 8 x i1> @intrinsic_vmsne_mask_vx_nxv8i16_i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv8i16_i16:
@ -1402,7 +1402,7 @@ entry:
<vscale x 8 x i16> %1,
i16 %2,
<vscale x 8 x i1> %3,
i32 %4)
i32 %4, i32 1)
ret <vscale x 8 x i1> %a
}
@ -1432,7 +1432,7 @@ declare <vscale x 16 x i1> @llvm.riscv.vmsne.mask.nxv16i16.i16(
<vscale x 16 x i16>,
i16,
<vscale x 16 x i1>,
i32);
i32, i32);
define <vscale x 16 x i1> @intrinsic_vmsne_mask_vx_nxv16i16_i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv16i16_i16:
@ -1449,7 +1449,7 @@ entry:
<vscale x 16 x i16> %1,
i16 %2,
<vscale x 16 x i1> %3,
i32 %4)
i32 %4, i32 1)
ret <vscale x 16 x i1> %a
}
@ -1479,7 +1479,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i32.i32(
<vscale x 1 x i32>,
i32,
<vscale x 1 x i1>,
i32);
i32, i32);
define <vscale x 1 x i1> @intrinsic_vmsne_mask_vx_nxv1i32_i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv1i32_i32:
@ -1496,7 +1496,7 @@ entry:
<vscale x 1 x i32> %1,
i32 %2,
<vscale x 1 x i1> %3,
i32 %4)
i32 %4, i32 1)
ret <vscale x 1 x i1> %a
}
@ -1526,7 +1526,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i32.i32(
<vscale x 2 x i32>,
i32,
<vscale x 2 x i1>,
i32);
i32, i32);
define <vscale x 2 x i1> @intrinsic_vmsne_mask_vx_nxv2i32_i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv2i32_i32:
@ -1543,7 +1543,7 @@ entry:
<vscale x 2 x i32> %1,
i32 %2,
<vscale x 2 x i1> %3,
i32 %4)
i32 %4, i32 1)
ret <vscale x 2 x i1> %a
}
@ -1573,7 +1573,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i32.i32(
<vscale x 4 x i32>,
i32,
<vscale x 4 x i1>,
i32);
i32, i32);
define <vscale x 4 x i1> @intrinsic_vmsne_mask_vx_nxv4i32_i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv4i32_i32:
@ -1590,7 +1590,7 @@ entry:
<vscale x 4 x i32> %1,
i32 %2,
<vscale x 4 x i1> %3,
i32 %4)
i32 %4, i32 1)
ret <vscale x 4 x i1> %a
}
@ -1620,7 +1620,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsne.mask.nxv8i32.i32(
<vscale x 8 x i32>,
i32,
<vscale x 8 x i1>,
i32);
i32, i32);
define <vscale x 8 x i1> @intrinsic_vmsne_mask_vx_nxv8i32_i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv8i32_i32:
@ -1637,7 +1637,7 @@ entry:
<vscale x 8 x i32> %1,
i32 %2,
<vscale x 8 x i1> %3,
i32 %4)
i32 %4, i32 1)
ret <vscale x 8 x i1> %a
}
@ -1673,7 +1673,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i64.i64(
<vscale x 1 x i64>,
i64,
<vscale x 1 x i1>,
i32);
i32, i32);
define <vscale x 1 x i1> @intrinsic_vmsne_mask_vx_nxv1i64_i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv1i64_i64:
@ -1696,7 +1696,7 @@ entry:
<vscale x 1 x i64> %1,
i64 %2,
<vscale x 1 x i1> %3,
i32 %4)
i32 %4, i32 1)
ret <vscale x 1 x i1> %a
}
@ -1732,7 +1732,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i64.i64(
<vscale x 2 x i64>,
i64,
<vscale x 2 x i1>,
i32);
i32, i32);
define <vscale x 2 x i1> @intrinsic_vmsne_mask_vx_nxv2i64_i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv2i64_i64:
@ -1755,7 +1755,7 @@ entry:
<vscale x 2 x i64> %1,
i64 %2,
<vscale x 2 x i1> %3,
i32 %4)
i32 %4, i32 1)
ret <vscale x 2 x i1> %a
}
@ -1791,7 +1791,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i64.i64(
<vscale x 4 x i64>,
i64,
<vscale x 4 x i1>,
i32);
i32, i32);
define <vscale x 4 x i1> @intrinsic_vmsne_mask_vx_nxv4i64_i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv4i64_i64:
@ -1814,7 +1814,7 @@ entry:
<vscale x 4 x i64> %1,
i64 %2,
<vscale x 4 x i1> %3,
i32 %4)
i32 %4, i32 1)
ret <vscale x 4 x i1> %a
}
@ -1849,7 +1849,7 @@ entry:
<vscale x 1 x i8> %1,
i8 9,
<vscale x 1 x i1> %2,
i32 %3)
i32 %3, i32 1)
ret <vscale x 1 x i1> %a
}
@ -1884,7 +1884,7 @@ entry:
<vscale x 2 x i8> %1,
i8 9,
<vscale x 2 x i1> %2,
i32 %3)
i32 %3, i32 1)
ret <vscale x 2 x i1> %a
}
@ -1919,7 +1919,7 @@ entry:
<vscale x 4 x i8> %1,
i8 9,
<vscale x 4 x i1> %2,
i32 %3)
i32 %3, i32 1)
ret <vscale x 4 x i1> %a
}
@ -1954,7 +1954,7 @@ entry:
<vscale x 8 x i8> %1,
i8 9,
<vscale x 8 x i1> %2,
i32 %3)
i32 %3, i32 1)
ret <vscale x 8 x i1> %a
}
@ -1989,7 +1989,7 @@ entry:
<vscale x 16 x i8> %1,
i8 9,
<vscale x 16 x i1> %2,
i32 %3)
i32 %3, i32 1)
ret <vscale x 16 x i1> %a
}
@ -2024,7 +2024,7 @@ entry:
<vscale x 32 x i8> %1,
i8 9,
<vscale x 32 x i1> %2,
i32 %3)
i32 %3, i32 1)
ret <vscale x 32 x i1> %a
}
@ -2059,7 +2059,7 @@ entry:
<vscale x 1 x i16> %1,
i16 9,
<vscale x 1 x i1> %2,
i32 %3)
i32 %3, i32 1)
ret <vscale x 1 x i1> %a
}
@ -2094,7 +2094,7 @@ entry:
<vscale x 2 x i16> %1,
i16 9,
<vscale x 2 x i1> %2,
i32 %3)
i32 %3, i32 1)
ret <vscale x 2 x i1> %a
}
@ -2129,7 +2129,7 @@ entry:
<vscale x 4 x i16> %1,
i16 9,
<vscale x 4 x i1> %2,
i32 %3)
i32 %3, i32 1)
ret <vscale x 4 x i1> %a
}
@ -2164,7 +2164,7 @@ entry:
<vscale x 8 x i16> %1,
i16 9,
<vscale x 8 x i1> %2,
i32 %3)
i32 %3, i32 1)
ret <vscale x 8 x i1> %a
}
@ -2199,7 +2199,7 @@ entry:
<vscale x 16 x i16> %1,
i16 9,
<vscale x 16 x i1> %2,
i32 %3)
i32 %3, i32 1)
ret <vscale x 16 x i1> %a
}
@ -2234,7 +2234,7 @@ entry:
<vscale x 1 x i32> %1,
i32 9,
<vscale x 1 x i1> %2,
i32 %3)
i32 %3, i32 1)
ret <vscale x 1 x i1> %a
}
@ -2269,7 +2269,7 @@ entry:
<vscale x 2 x i32> %1,
i32 9,
<vscale x 2 x i1> %2,
i32 %3)
i32 %3, i32 1)
ret <vscale x 2 x i1> %a
}
@ -2304,7 +2304,7 @@ entry:
<vscale x 4 x i32> %1,
i32 9,
<vscale x 4 x i1> %2,
i32 %3)
i32 %3, i32 1)
ret <vscale x 4 x i1> %a
}
@ -2339,7 +2339,7 @@ entry:
<vscale x 8 x i32> %1,
i32 9,
<vscale x 8 x i1> %2,
i32 %3)
i32 %3, i32 1)
ret <vscale x 8 x i1> %a
}
@ -2374,7 +2374,7 @@ entry:
<vscale x 1 x i64> %1,
i64 9,
<vscale x 1 x i1> %2,
i32 %3)
i32 %3, i32 1)
ret <vscale x 1 x i1> %a
}
@ -2409,7 +2409,7 @@ entry:
<vscale x 2 x i64> %1,
i64 9,
<vscale x 2 x i1> %2,
i32 %3)
i32 %3, i32 1)
ret <vscale x 2 x i1> %a
}
@ -2444,7 +2444,7 @@ entry:
<vscale x 4 x i64> %1,
i64 9,
<vscale x 4 x i1> %2,
i32 %3)
i32 %3, i32 1)
ret <vscale x 4 x i1> %a
}

View File

@ -26,7 +26,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i8(
<vscale x 1 x i8>,
<vscale x 1 x i8>,
<vscale x 1 x i1>,
i64);
i64, i64);
define <vscale x 1 x i1> @intrinsic_vmsne_mask_vv_nxv1i8_nxv1i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i8> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv1i8_nxv1i8:
@ -48,7 +48,7 @@ entry:
<vscale x 1 x i8> %2,
<vscale x 1 x i8> %3,
<vscale x 1 x i1> %mask,
i64 %4)
i64 %4, i64 1)
ret <vscale x 1 x i1> %a
}
@ -78,7 +78,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i8(
<vscale x 2 x i8>,
<vscale x 2 x i8>,
<vscale x 2 x i1>,
i64);
i64, i64);
define <vscale x 2 x i1> @intrinsic_vmsne_mask_vv_nxv2i8_nxv2i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i8> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv2i8_nxv2i8:
@ -100,7 +100,7 @@ entry:
<vscale x 2 x i8> %2,
<vscale x 2 x i8> %3,
<vscale x 2 x i1> %mask,
i64 %4)
i64 %4, i64 1)
ret <vscale x 2 x i1> %a
}
@ -130,7 +130,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i8(
<vscale x 4 x i8>,
<vscale x 4 x i8>,
<vscale x 4 x i1>,
i64);
i64, i64);
define <vscale x 4 x i1> @intrinsic_vmsne_mask_vv_nxv4i8_nxv4i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i8> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv4i8_nxv4i8:
@ -152,7 +152,7 @@ entry:
<vscale x 4 x i8> %2,
<vscale x 4 x i8> %3,
<vscale x 4 x i1> %mask,
i64 %4)
i64 %4, i64 1)
ret <vscale x 4 x i1> %a
}
@ -182,7 +182,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsne.mask.nxv8i8(
<vscale x 8 x i8>,
<vscale x 8 x i8>,
<vscale x 8 x i1>,
i64);
i64, i64);
define <vscale x 8 x i1> @intrinsic_vmsne_mask_vv_nxv8i8_nxv8i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i8> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv8i8_nxv8i8:
@ -204,7 +204,7 @@ entry:
<vscale x 8 x i8> %2,
<vscale x 8 x i8> %3,
<vscale x 8 x i1> %mask,
i64 %4)
i64 %4, i64 1)
ret <vscale x 8 x i1> %a
}
@ -234,7 +234,7 @@ declare <vscale x 16 x i1> @llvm.riscv.vmsne.mask.nxv16i8(
<vscale x 16 x i8>,
<vscale x 16 x i8>,
<vscale x 16 x i1>,
i64);
i64, i64);
define <vscale x 16 x i1> @intrinsic_vmsne_mask_vv_nxv16i8_nxv16i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i8> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv16i8_nxv16i8:
@ -256,7 +256,7 @@ entry:
<vscale x 16 x i8> %2,
<vscale x 16 x i8> %3,
<vscale x 16 x i1> %mask,
i64 %4)
i64 %4, i64 1)
ret <vscale x 16 x i1> %a
}
@ -286,7 +286,7 @@ declare <vscale x 32 x i1> @llvm.riscv.vmsne.mask.nxv32i8(
<vscale x 32 x i8>,
<vscale x 32 x i8>,
<vscale x 32 x i1>,
i64);
i64, i64);
define <vscale x 32 x i1> @intrinsic_vmsne_mask_vv_nxv32i8_nxv32i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i8> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv32i8_nxv32i8:
@ -308,7 +308,7 @@ entry:
<vscale x 32 x i8> %2,
<vscale x 32 x i8> %3,
<vscale x 32 x i1> %mask,
i64 %4)
i64 %4, i64 1)
ret <vscale x 32 x i1> %a
}
@ -338,7 +338,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i16(
<vscale x 1 x i16>,
<vscale x 1 x i16>,
<vscale x 1 x i1>,
i64);
i64, i64);
define <vscale x 1 x i1> @intrinsic_vmsne_mask_vv_nxv1i16_nxv1i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i16> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv1i16_nxv1i16:
@ -360,7 +360,7 @@ entry:
<vscale x 1 x i16> %2,
<vscale x 1 x i16> %3,
<vscale x 1 x i1> %mask,
i64 %4)
i64 %4, i64 1)
ret <vscale x 1 x i1> %a
}
@ -390,7 +390,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i16(
<vscale x 2 x i16>,
<vscale x 2 x i16>,
<vscale x 2 x i1>,
i64);
i64, i64);
define <vscale x 2 x i1> @intrinsic_vmsne_mask_vv_nxv2i16_nxv2i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i16> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv2i16_nxv2i16:
@ -412,7 +412,7 @@ entry:
<vscale x 2 x i16> %2,
<vscale x 2 x i16> %3,
<vscale x 2 x i1> %mask,
i64 %4)
i64 %4, i64 1)
ret <vscale x 2 x i1> %a
}
@ -442,7 +442,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i16(
<vscale x 4 x i16>,
<vscale x 4 x i16>,
<vscale x 4 x i1>,
i64);
i64, i64);
define <vscale x 4 x i1> @intrinsic_vmsne_mask_vv_nxv4i16_nxv4i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i16> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv4i16_nxv4i16:
@ -464,7 +464,7 @@ entry:
<vscale x 4 x i16> %2,
<vscale x 4 x i16> %3,
<vscale x 4 x i1> %mask,
i64 %4)
i64 %4, i64 1)
ret <vscale x 4 x i1> %a
}
@ -494,7 +494,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsne.mask.nxv8i16(
<vscale x 8 x i16>,
<vscale x 8 x i16>,
<vscale x 8 x i1>,
i64);
i64, i64);
define <vscale x 8 x i1> @intrinsic_vmsne_mask_vv_nxv8i16_nxv8i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i16> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv8i16_nxv8i16:
@ -516,7 +516,7 @@ entry:
<vscale x 8 x i16> %2,
<vscale x 8 x i16> %3,
<vscale x 8 x i1> %mask,
i64 %4)
i64 %4, i64 1)
ret <vscale x 8 x i1> %a
}
@ -546,7 +546,7 @@ declare <vscale x 16 x i1> @llvm.riscv.vmsne.mask.nxv16i16(
<vscale x 16 x i16>,
<vscale x 16 x i16>,
<vscale x 16 x i1>,
i64);
i64, i64);
define <vscale x 16 x i1> @intrinsic_vmsne_mask_vv_nxv16i16_nxv16i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i16> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv16i16_nxv16i16:
@ -568,7 +568,7 @@ entry:
<vscale x 16 x i16> %2,
<vscale x 16 x i16> %3,
<vscale x 16 x i1> %mask,
i64 %4)
i64 %4, i64 1)
ret <vscale x 16 x i1> %a
}
@ -598,7 +598,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i32(
<vscale x 1 x i32>,
<vscale x 1 x i32>,
<vscale x 1 x i1>,
i64);
i64, i64);
define <vscale x 1 x i1> @intrinsic_vmsne_mask_vv_nxv1i32_nxv1i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i32> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv1i32_nxv1i32:
@ -620,7 +620,7 @@ entry:
<vscale x 1 x i32> %2,
<vscale x 1 x i32> %3,
<vscale x 1 x i1> %mask,
i64 %4)
i64 %4, i64 1)
ret <vscale x 1 x i1> %a
}
@ -650,7 +650,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i32(
<vscale x 2 x i32>,
<vscale x 2 x i32>,
<vscale x 2 x i1>,
i64);
i64, i64);
define <vscale x 2 x i1> @intrinsic_vmsne_mask_vv_nxv2i32_nxv2i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i32> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv2i32_nxv2i32:
@ -672,7 +672,7 @@ entry:
<vscale x 2 x i32> %2,
<vscale x 2 x i32> %3,
<vscale x 2 x i1> %mask,
i64 %4)
i64 %4, i64 1)
ret <vscale x 2 x i1> %a
}
@ -702,7 +702,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i32(
<vscale x 4 x i32>,
<vscale x 4 x i32>,
<vscale x 4 x i1>,
i64);
i64, i64);
define <vscale x 4 x i1> @intrinsic_vmsne_mask_vv_nxv4i32_nxv4i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i32> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv4i32_nxv4i32:
@ -724,7 +724,7 @@ entry:
<vscale x 4 x i32> %2,
<vscale x 4 x i32> %3,
<vscale x 4 x i1> %mask,
i64 %4)
i64 %4, i64 1)
ret <vscale x 4 x i1> %a
}
@ -754,7 +754,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsne.mask.nxv8i32(
<vscale x 8 x i32>,
<vscale x 8 x i32>,
<vscale x 8 x i1>,
i64);
i64, i64);
define <vscale x 8 x i1> @intrinsic_vmsne_mask_vv_nxv8i32_nxv8i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i32> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv8i32_nxv8i32:
@ -776,7 +776,7 @@ entry:
<vscale x 8 x i32> %2,
<vscale x 8 x i32> %3,
<vscale x 8 x i1> %mask,
i64 %4)
i64 %4, i64 1)
ret <vscale x 8 x i1> %a
}
@ -806,7 +806,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i64(
<vscale x 1 x i64>,
<vscale x 1 x i64>,
<vscale x 1 x i1>,
i64);
i64, i64);
define <vscale x 1 x i1> @intrinsic_vmsne_mask_vv_nxv1i64_nxv1i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i64> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv1i64_nxv1i64:
@ -828,7 +828,7 @@ entry:
<vscale x 1 x i64> %2,
<vscale x 1 x i64> %3,
<vscale x 1 x i1> %mask,
i64 %4)
i64 %4, i64 1)
ret <vscale x 1 x i1> %a
}
@ -858,7 +858,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i64(
<vscale x 2 x i64>,
<vscale x 2 x i64>,
<vscale x 2 x i1>,
i64);
i64, i64);
define <vscale x 2 x i1> @intrinsic_vmsne_mask_vv_nxv2i64_nxv2i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i64> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv2i64_nxv2i64:
@ -880,7 +880,7 @@ entry:
<vscale x 2 x i64> %2,
<vscale x 2 x i64> %3,
<vscale x 2 x i1> %mask,
i64 %4)
i64 %4, i64 1)
ret <vscale x 2 x i1> %a
}
@ -910,7 +910,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i64(
<vscale x 4 x i64>,
<vscale x 4 x i64>,
<vscale x 4 x i1>,
i64);
i64, i64);
define <vscale x 4 x i1> @intrinsic_vmsne_mask_vv_nxv4i64_nxv4i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i64> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv4i64_nxv4i64:
@ -932,7 +932,7 @@ entry:
<vscale x 4 x i64> %2,
<vscale x 4 x i64> %3,
<vscale x 4 x i1> %mask,
i64 %4)
i64 %4, i64 1)
ret <vscale x 4 x i1> %a
}
@ -962,7 +962,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i8.i8(
<vscale x 1 x i8>,
i8,
<vscale x 1 x i1>,
i64);
i64, i64);
define <vscale x 1 x i1> @intrinsic_vmsne_mask_vx_nxv1i8_i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv1i8_i8:
@ -979,7 +979,7 @@ entry:
<vscale x 1 x i8> %1,
i8 %2,
<vscale x 1 x i1> %3,
i64 %4)
i64 %4, i64 1)
ret <vscale x 1 x i1> %a
}
@ -1009,7 +1009,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i8.i8(
<vscale x 2 x i8>,
i8,
<vscale x 2 x i1>,
i64);
i64, i64);
define <vscale x 2 x i1> @intrinsic_vmsne_mask_vx_nxv2i8_i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv2i8_i8:
@ -1026,7 +1026,7 @@ entry:
<vscale x 2 x i8> %1,
i8 %2,
<vscale x 2 x i1> %3,
i64 %4)
i64 %4, i64 1)
ret <vscale x 2 x i1> %a
}
@ -1056,7 +1056,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i8.i8(
<vscale x 4 x i8>,
i8,
<vscale x 4 x i1>,
i64);
i64, i64);
define <vscale x 4 x i1> @intrinsic_vmsne_mask_vx_nxv4i8_i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv4i8_i8:
@ -1073,7 +1073,7 @@ entry:
<vscale x 4 x i8> %1,
i8 %2,
<vscale x 4 x i1> %3,
i64 %4)
i64 %4, i64 1)
ret <vscale x 4 x i1> %a
}
@ -1103,7 +1103,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsne.mask.nxv8i8.i8(
<vscale x 8 x i8>,
i8,
<vscale x 8 x i1>,
i64);
i64, i64);
define <vscale x 8 x i1> @intrinsic_vmsne_mask_vx_nxv8i8_i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv8i8_i8:
@ -1120,7 +1120,7 @@ entry:
<vscale x 8 x i8> %1,
i8 %2,
<vscale x 8 x i1> %3,
i64 %4)
i64 %4, i64 1)
ret <vscale x 8 x i1> %a
}
@ -1150,7 +1150,7 @@ declare <vscale x 16 x i1> @llvm.riscv.vmsne.mask.nxv16i8.i8(
<vscale x 16 x i8>,
i8,
<vscale x 16 x i1>,
i64);
i64, i64);
define <vscale x 16 x i1> @intrinsic_vmsne_mask_vx_nxv16i8_i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv16i8_i8:
@ -1167,7 +1167,7 @@ entry:
<vscale x 16 x i8> %1,
i8 %2,
<vscale x 16 x i1> %3,
i64 %4)
i64 %4, i64 1)
ret <vscale x 16 x i1> %a
}
@ -1197,7 +1197,7 @@ declare <vscale x 32 x i1> @llvm.riscv.vmsne.mask.nxv32i8.i8(
<vscale x 32 x i8>,
i8,
<vscale x 32 x i1>,
i64);
i64, i64);
define <vscale x 32 x i1> @intrinsic_vmsne_mask_vx_nxv32i8_i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv32i8_i8:
@ -1214,7 +1214,7 @@ entry:
<vscale x 32 x i8> %1,
i8 %2,
<vscale x 32 x i1> %3,
i64 %4)
i64 %4, i64 1)
ret <vscale x 32 x i1> %a
}
@ -1244,7 +1244,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i16.i16(
<vscale x 1 x i16>,
i16,
<vscale x 1 x i1>,
i64);
i64, i64);
define <vscale x 1 x i1> @intrinsic_vmsne_mask_vx_nxv1i16_i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv1i16_i16:
@ -1261,7 +1261,7 @@ entry:
<vscale x 1 x i16> %1,
i16 %2,
<vscale x 1 x i1> %3,
i64 %4)
i64 %4, i64 1)
ret <vscale x 1 x i1> %a
}
@ -1291,7 +1291,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i16.i16(
<vscale x 2 x i16>,
i16,
<vscale x 2 x i1>,
i64);
i64, i64);
define <vscale x 2 x i1> @intrinsic_vmsne_mask_vx_nxv2i16_i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv2i16_i16:
@ -1308,7 +1308,7 @@ entry:
<vscale x 2 x i16> %1,
i16 %2,
<vscale x 2 x i1> %3,
i64 %4)
i64 %4, i64 1)
ret <vscale x 2 x i1> %a
}
@ -1338,7 +1338,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i16.i16(
<vscale x 4 x i16>,
i16,
<vscale x 4 x i1>,
i64);
i64, i64);
define <vscale x 4 x i1> @intrinsic_vmsne_mask_vx_nxv4i16_i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv4i16_i16:
@ -1355,7 +1355,7 @@ entry:
<vscale x 4 x i16> %1,
i16 %2,
<vscale x 4 x i1> %3,
i64 %4)
i64 %4, i64 1)
ret <vscale x 4 x i1> %a
}
@ -1385,7 +1385,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsne.mask.nxv8i16.i16(
<vscale x 8 x i16>,
i16,
<vscale x 8 x i1>,
i64);
i64, i64);
define <vscale x 8 x i1> @intrinsic_vmsne_mask_vx_nxv8i16_i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv8i16_i16:
@ -1402,7 +1402,7 @@ entry:
<vscale x 8 x i16> %1,
i16 %2,
<vscale x 8 x i1> %3,
i64 %4)
i64 %4, i64 1)
ret <vscale x 8 x i1> %a
}
@ -1432,7 +1432,7 @@ declare <vscale x 16 x i1> @llvm.riscv.vmsne.mask.nxv16i16.i16(
<vscale x 16 x i16>,
i16,
<vscale x 16 x i1>,
i64);
i64, i64);
define <vscale x 16 x i1> @intrinsic_vmsne_mask_vx_nxv16i16_i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv16i16_i16:
@ -1449,7 +1449,7 @@ entry:
<vscale x 16 x i16> %1,
i16 %2,
<vscale x 16 x i1> %3,
i64 %4)
i64 %4, i64 1)
ret <vscale x 16 x i1> %a
}
@ -1479,7 +1479,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i32.i32(
<vscale x 1 x i32>,
i32,
<vscale x 1 x i1>,
i64);
i64, i64);
define <vscale x 1 x i1> @intrinsic_vmsne_mask_vx_nxv1i32_i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv1i32_i32:
@ -1496,7 +1496,7 @@ entry:
<vscale x 1 x i32> %1,
i32 %2,
<vscale x 1 x i1> %3,
i64 %4)
i64 %4, i64 1)
ret <vscale x 1 x i1> %a
}
@ -1526,7 +1526,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i32.i32(
<vscale x 2 x i32>,
i32,
<vscale x 2 x i1>,
i64);
i64, i64);
define <vscale x 2 x i1> @intrinsic_vmsne_mask_vx_nxv2i32_i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv2i32_i32:
@ -1543,7 +1543,7 @@ entry:
<vscale x 2 x i32> %1,
i32 %2,
<vscale x 2 x i1> %3,
i64 %4)
i64 %4, i64 1)
ret <vscale x 2 x i1> %a
}
@ -1573,7 +1573,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i32.i32(
<vscale x 4 x i32>,
i32,
<vscale x 4 x i1>,
i64);
i64, i64);
define <vscale x 4 x i1> @intrinsic_vmsne_mask_vx_nxv4i32_i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv4i32_i32:
@ -1590,7 +1590,7 @@ entry:
<vscale x 4 x i32> %1,
i32 %2,
<vscale x 4 x i1> %3,
i64 %4)
i64 %4, i64 1)
ret <vscale x 4 x i1> %a
}
@ -1620,7 +1620,7 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsne.mask.nxv8i32.i32(
<vscale x 8 x i32>,
i32,
<vscale x 8 x i1>,
i64);
i64, i64);
define <vscale x 8 x i1> @intrinsic_vmsne_mask_vx_nxv8i32_i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv8i32_i32:
@ -1637,7 +1637,7 @@ entry:
<vscale x 8 x i32> %1,
i32 %2,
<vscale x 8 x i1> %3,
i64 %4)
i64 %4, i64 1)
ret <vscale x 8 x i1> %a
}
@ -1667,7 +1667,7 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i64.i64(
<vscale x 1 x i64>,
i64,
<vscale x 1 x i1>,
i64);
i64, i64);
define <vscale x 1 x i1> @intrinsic_vmsne_mask_vx_nxv1i64_i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv1i64_i64:
@ -1684,7 +1684,7 @@ entry:
<vscale x 1 x i64> %1,
i64 %2,
<vscale x 1 x i1> %3,
i64 %4)
i64 %4, i64 1)
ret <vscale x 1 x i1> %a
}
@ -1714,7 +1714,7 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i64.i64(
<vscale x 2 x i64>,
i64,
<vscale x 2 x i1>,
i64);
i64, i64);
define <vscale x 2 x i1> @intrinsic_vmsne_mask_vx_nxv2i64_i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv2i64_i64:
@ -1731,7 +1731,7 @@ entry:
<vscale x 2 x i64> %1,
i64 %2,
<vscale x 2 x i1> %3,
i64 %4)
i64 %4, i64 1)
ret <vscale x 2 x i1> %a
}
@ -1761,7 +1761,7 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i64.i64(
<vscale x 4 x i64>,
i64,
<vscale x 4 x i1>,
i64);
i64, i64);
define <vscale x 4 x i1> @intrinsic_vmsne_mask_vx_nxv4i64_i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv4i64_i64:
@ -1778,7 +1778,7 @@ entry:
<vscale x 4 x i64> %1,
i64 %2,
<vscale x 4 x i1> %3,
i64 %4)
i64 %4, i64 1)
ret <vscale x 4 x i1> %a
}
@ -1813,7 +1813,7 @@ entry:
<vscale x 1 x i8> %1,
i8 9,
<vscale x 1 x i1> %2,
i64 %3)
i64 %3, i64 1)
ret <vscale x 1 x i1> %a
}
@ -1848,7 +1848,7 @@ entry:
<vscale x 2 x i8> %1,
i8 9,
<vscale x 2 x i1> %2,
i64 %3)
i64 %3, i64 1)
ret <vscale x 2 x i1> %a
}
@ -1883,7 +1883,7 @@ entry:
<vscale x 4 x i8> %1,
i8 9,
<vscale x 4 x i1> %2,
i64 %3)
i64 %3, i64 1)
ret <vscale x 4 x i1> %a
}
@ -1918,7 +1918,7 @@ entry:
<vscale x 8 x i8> %1,
i8 9,
<vscale x 8 x i1> %2,
i64 %3)
i64 %3, i64 1)
ret <vscale x 8 x i1> %a
}
@ -1953,7 +1953,7 @@ entry:
<vscale x 16 x i8> %1,
i8 9,
<vscale x 16 x i1> %2,
i64 %3)
i64 %3, i64 1)
ret <vscale x 16 x i1> %a
}
@ -1988,7 +1988,7 @@ entry:
<vscale x 32 x i8> %1,
i8 9,
<vscale x 32 x i1> %2,
i64 %3)
i64 %3, i64 1)
ret <vscale x 32 x i1> %a
}
@ -2023,7 +2023,7 @@ entry:
<vscale x 1 x i16> %1,
i16 9,
<vscale x 1 x i1> %2,
i64 %3)
i64 %3, i64 1)
ret <vscale x 1 x i1> %a
}
@ -2058,7 +2058,7 @@ entry:
<vscale x 2 x i16> %1,
i16 9,
<vscale x 2 x i1> %2,
i64 %3)
i64 %3, i64 1)
ret <vscale x 2 x i1> %a
}
@ -2093,7 +2093,7 @@ entry:
<vscale x 4 x i16> %1,
i16 9,
<vscale x 4 x i1> %2,
i64 %3)
i64 %3, i64 1)
ret <vscale x 4 x i1> %a
}
@ -2128,7 +2128,7 @@ entry:
<vscale x 8 x i16> %1,
i16 9,
<vscale x 8 x i1> %2,
i64 %3)
i64 %3, i64 1)
ret <vscale x 8 x i1> %a
}
@ -2163,7 +2163,7 @@ entry:
<vscale x 16 x i16> %1,
i16 9,
<vscale x 16 x i1> %2,
i64 %3)
i64 %3, i64 1)
ret <vscale x 16 x i1> %a
}
@ -2198,7 +2198,7 @@ entry:
<vscale x 1 x i32> %1,
i32 9,
<vscale x 1 x i1> %2,
i64 %3)
i64 %3, i64 1)
ret <vscale x 1 x i1> %a
}
@ -2233,7 +2233,7 @@ entry:
<vscale x 2 x i32> %1,
i32 9,
<vscale x 2 x i1> %2,
i64 %3)
i64 %3, i64 1)
ret <vscale x 2 x i1> %a
}
@ -2268,7 +2268,7 @@ entry:
<vscale x 4 x i32> %1,
i32 9,
<vscale x 4 x i1> %2,
i64 %3)
i64 %3, i64 1)
ret <vscale x 4 x i1> %a
}
@ -2303,7 +2303,7 @@ entry:
<vscale x 8 x i32> %1,
i32 9,
<vscale x 8 x i1> %2,
i64 %3)
i64 %3, i64 1)
ret <vscale x 8 x i1> %a
}
@ -2338,7 +2338,7 @@ entry:
<vscale x 1 x i64> %1,
i64 9,
<vscale x 1 x i1> %2,
i64 %3)
i64 %3, i64 1)
ret <vscale x 1 x i1> %a
}
@ -2373,7 +2373,7 @@ entry:
<vscale x 2 x i64> %1,
i64 9,
<vscale x 2 x i1> %2,
i64 %3)
i64 %3, i64 1)
ret <vscale x 2 x i1> %a
}
@ -2408,7 +2408,7 @@ entry:
<vscale x 4 x i64> %1,
i64 9,
<vscale x 4 x i1> %2,
i64 %3)
i64 %3, i64 1)
ret <vscale x 4 x i1> %a
}

View File

@ -25,13 +25,13 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsof.mask.nxv1i1(
<vscale x 1 x i1>,
<vscale x 1 x i1>,
<vscale x 1 x i1>,
iXLen);
iXLen, iXLen);
define <vscale x 1 x i1> @intrinsic_vmsof_mask_m_nxv1i1_nxv1i1(<vscale x 1 x i1> %0, <vscale x 1 x i1> %1, <vscale x 1 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsof_mask_m_nxv1i1_nxv1i1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vmsof.m v10, v8, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
@ -41,7 +41,7 @@ entry:
<vscale x 1 x i1> %0,
<vscale x 1 x i1> %1,
<vscale x 1 x i1> %2,
iXLen %3)
iXLen %3, iXLen 1)
ret <vscale x 1 x i1> %a
}
@ -67,13 +67,13 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsof.mask.nxv2i1(
<vscale x 2 x i1>,
<vscale x 2 x i1>,
<vscale x 2 x i1>,
iXLen);
iXLen, iXLen);
define <vscale x 2 x i1> @intrinsic_vmsof_mask_m_nxv2i1_nxv2i1(<vscale x 2 x i1> %0, <vscale x 2 x i1> %1, <vscale x 2 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsof_mask_m_nxv2i1_nxv2i1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, mu
; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vmsof.m v10, v8, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
@ -83,7 +83,7 @@ entry:
<vscale x 2 x i1> %0,
<vscale x 2 x i1> %1,
<vscale x 2 x i1> %2,
iXLen %3)
iXLen %3, iXLen 1)
ret <vscale x 2 x i1> %a
}
@ -109,13 +109,13 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsof.mask.nxv4i1(
<vscale x 4 x i1>,
<vscale x 4 x i1>,
<vscale x 4 x i1>,
iXLen);
iXLen, iXLen);
define <vscale x 4 x i1> @intrinsic_vmsof_mask_m_nxv4i1_nxv4i1(<vscale x 4 x i1> %0, <vscale x 4 x i1> %1, <vscale x 4 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsof_mask_m_nxv4i1_nxv4i1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu
; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vmsof.m v10, v8, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
@ -125,7 +125,7 @@ entry:
<vscale x 4 x i1> %0,
<vscale x 4 x i1> %1,
<vscale x 4 x i1> %2,
iXLen %3)
iXLen %3, iXLen 1)
ret <vscale x 4 x i1> %a
}
@ -151,23 +151,23 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsof.mask.nxv8i1(
<vscale x 8 x i1>,
<vscale x 8 x i1>,
<vscale x 8 x i1>,
iXLen);
iXLen, iXLen);
define <vscale x 8 x i1> @intrinsic_vmsof_mask_m_nxv8i1_nxv8i1(<vscale x 8 x i1> %0, <vscale x 8 x i1> %1, <vscale x 8 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsof_mask_m_nxv8i1_nxv8i1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu
; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vmsof.m v10, v8, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vmv.v.v v0, v10
; CHECK-NEXT: ret
entry:
%a = call <vscale x 8 x i1> @llvm.riscv.vmsof.mask.nxv8i1(
<vscale x 8 x i1> %0,
<vscale x 8 x i1> %1,
<vscale x 8 x i1> %2,
iXLen %3)
iXLen %3, iXLen 1)
ret <vscale x 8 x i1> %a
}
@ -193,13 +193,13 @@ declare <vscale x 16 x i1> @llvm.riscv.vmsof.mask.nxv16i1(
<vscale x 16 x i1>,
<vscale x 16 x i1>,
<vscale x 16 x i1>,
iXLen);
iXLen, iXLen);
define <vscale x 16 x i1> @intrinsic_vmsof_mask_m_nxv16i1_nxv16i1(<vscale x 16 x i1> %0, <vscale x 16 x i1> %1, <vscale x 16 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsof_mask_m_nxv16i1_nxv16i1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, mu
; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vmsof.m v10, v8, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
@ -209,7 +209,7 @@ entry:
<vscale x 16 x i1> %0,
<vscale x 16 x i1> %1,
<vscale x 16 x i1> %2,
iXLen %3)
iXLen %3, iXLen 1)
ret <vscale x 16 x i1> %a
}
@ -235,13 +235,13 @@ declare <vscale x 32 x i1> @llvm.riscv.vmsof.mask.nxv32i1(
<vscale x 32 x i1>,
<vscale x 32 x i1>,
<vscale x 32 x i1>,
iXLen);
iXLen, iXLen);
define <vscale x 32 x i1> @intrinsic_vmsof_mask_m_nxv32i1_nxv32i1(<vscale x 32 x i1> %0, <vscale x 32 x i1> %1, <vscale x 32 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsof_mask_m_nxv32i1_nxv32i1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, mu
; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vmsof.m v10, v8, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
@ -251,7 +251,7 @@ entry:
<vscale x 32 x i1> %0,
<vscale x 32 x i1> %1,
<vscale x 32 x i1> %2,
iXLen %3)
iXLen %3, iXLen 1)
ret <vscale x 32 x i1> %a
}
@ -277,13 +277,13 @@ declare <vscale x 64 x i1> @llvm.riscv.vmsof.mask.nxv64i1(
<vscale x 64 x i1>,
<vscale x 64 x i1>,
<vscale x 64 x i1>,
iXLen);
iXLen, iXLen);
define <vscale x 64 x i1> @intrinsic_vmsof_mask_m_nxv64i1_nxv64i1(<vscale x 64 x i1> %0, <vscale x 64 x i1> %1, <vscale x 64 x i1> %2, iXLen %3) nounwind {
; CHECK-LABEL: intrinsic_vmsof_mask_m_nxv64i1_nxv64i1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vsetvli zero, a0, e8, m8, tu, mu
; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
; CHECK-NEXT: vmsof.m v10, v8, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
@ -293,6 +293,6 @@ entry:
<vscale x 64 x i1> %0,
<vscale x 64 x i1> %1,
<vscale x 64 x i1> %2,
iXLen %3)
iXLen %3, iXLen 1)
ret <vscale x 64 x i1> %a
}