[RISCV][Clang] Support policy functions for Vector Comparison

Instructions.

We will switch all UndefValue to PoisonValue in follow up patches.

Reviewed By: kito-cheng

Differential Revision: https://reviews.llvm.org/D126746
This commit is contained in:
Zakk Chen 2022-08-02 16:27:29 +00:00
parent 7eddeb9e99
commit 9caf2cc05c
25 changed files with 1300 additions and 2 deletions

View File

@ -1745,7 +1745,8 @@ defm vncvt_x_x_w : RVVPseudoVNCVTBuiltin<"vnsrl", "vncvt_x", "csi",
["Uv", "UvUw"]]>;
// 12.8. Vector Integer Comparison Instructions
let MaskedPolicyScheme = NonePolicy in {
let MaskedPolicyScheme = HasPassthruOperand,
HasTailPolicy = false in {
defm vmseq : RVVIntMaskOutBuiltinSet;
defm vmsne : RVVIntMaskOutBuiltinSet;
defm vmsltu : RVVUnsignedMaskOutBuiltinSet;
@ -1950,7 +1951,8 @@ defm vfneg_v : RVVPseudoVFUnaryBuiltin<"vfsgnjn", "xfd">;
defm vfabs_v : RVVPseudoVFUnaryBuiltin<"vfsgnjx", "xfd">;
// 14.13. Vector Floating-Point Compare Instructions
let MaskedPolicyScheme = NonePolicy in {
let MaskedPolicyScheme = HasPassthruOperand,
HasTailPolicy = false in {
defm vmfeq : RVVFloatingMaskOutBuiltinSet;
defm vmfne : RVVFloatingMaskOutBuiltinSet;
defm vmflt : RVVFloatingMaskOutBuiltinSet;

View File

@ -361,3 +361,39 @@ vbool8_t test_vmfeq_vf_f64m8_b8_m(vbool8_t mask, vbool8_t maskedoff,
vfloat64m8_t op1, double op2, size_t vl) {
return vmfeq(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfeq_vv_f32mf2_b64_ma(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfeq.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x i1> undef, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmfeq_vv_f32mf2_b64_ma (vbool64_t mask, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) {
return vmfeq_ma(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfeq_vf_f32mf2_b64_ma(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfeq.mask.nxv1f32.f32.i64(<vscale x 1 x i1> undef, <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmfeq_vf_f32mf2_b64_ma (vbool64_t mask, vfloat32mf2_t op1, float op2, size_t vl) {
return vmfeq_ma(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfeq_vv_f32mf2_b64_mu(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfeq.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x i1> [[MERGE:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmfeq_vv_f32mf2_b64_mu (vbool64_t mask, vbool64_t merge, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) {
return vmfeq_mu(mask, merge, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfeq_vf_f32mf2_b64_mu(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfeq.mask.nxv1f32.f32.i64(<vscale x 1 x i1> [[MERGE:%.*]], <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmfeq_vf_f32mf2_b64_mu (vbool64_t mask, vbool64_t merge, vfloat32mf2_t op1, float op2, size_t vl) {
return vmfeq_mu(mask, merge, op1, op2, vl);
}

View File

@ -329,3 +329,39 @@ vbool8_t test_vmfge_vf_f64m8_b8_m (vbool8_t mask, vbool8_t maskedoff, vfloat64m8
return vmfge(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfge_vv_f32mf2_b64_ma(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfge.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x i1> undef, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmfge_vv_f32mf2_b64_ma (vbool64_t mask, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) {
return vmfge_ma(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfge_vf_f32mf2_b64_ma(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfge.mask.nxv1f32.f32.i64(<vscale x 1 x i1> undef, <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmfge_vf_f32mf2_b64_ma (vbool64_t mask, vfloat32mf2_t op1, float op2, size_t vl) {
return vmfge_ma(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfge_vv_f32mf2_b64_mu(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfge.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x i1> [[MERGE:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmfge_vv_f32mf2_b64_mu (vbool64_t mask, vbool64_t merge, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) {
return vmfge_mu(mask, merge, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfge_vf_f32mf2_b64_mu(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfge.mask.nxv1f32.f32.i64(<vscale x 1 x i1> [[MERGE:%.*]], <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmfge_vf_f32mf2_b64_mu (vbool64_t mask, vbool64_t merge, vfloat32mf2_t op1, float op2, size_t vl) {
return vmfge_mu(mask, merge, op1, op2, vl);
}

View File

@ -329,3 +329,39 @@ vbool8_t test_vmfgt_vf_f64m8_b8_m (vbool8_t mask, vbool8_t maskedoff, vfloat64m8
return vmfgt(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfgt_vv_f32mf2_b64_ma(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfgt.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x i1> undef, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmfgt_vv_f32mf2_b64_ma (vbool64_t mask, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) {
return vmfgt_ma(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfgt_vf_f32mf2_b64_ma(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfgt.mask.nxv1f32.f32.i64(<vscale x 1 x i1> undef, <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmfgt_vf_f32mf2_b64_ma (vbool64_t mask, vfloat32mf2_t op1, float op2, size_t vl) {
return vmfgt_ma(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfgt_vv_f32mf2_b64_mu(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfgt.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x i1> [[MERGE:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmfgt_vv_f32mf2_b64_mu (vbool64_t mask, vbool64_t merge, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) {
return vmfgt_mu(mask, merge, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfgt_vf_f32mf2_b64_mu(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfgt.mask.nxv1f32.f32.i64(<vscale x 1 x i1> [[MERGE:%.*]], <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmfgt_vf_f32mf2_b64_mu (vbool64_t mask, vbool64_t merge, vfloat32mf2_t op1, float op2, size_t vl) {
return vmfgt_mu(mask, merge, op1, op2, vl);
}

View File

@ -361,3 +361,39 @@ vbool8_t test_vmfle_vf_f64m8_b8_m(vbool8_t mask, vbool8_t maskedoff,
vfloat64m8_t op1, double op2, size_t vl) {
return vmfle(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfle_vv_f32mf2_b64_ma(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfle.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x i1> undef, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmfle_vv_f32mf2_b64_ma (vbool64_t mask, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) {
return vmfle_ma(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfle_vf_f32mf2_b64_ma(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfle.mask.nxv1f32.f32.i64(<vscale x 1 x i1> undef, <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmfle_vf_f32mf2_b64_ma (vbool64_t mask, vfloat32mf2_t op1, float op2, size_t vl) {
return vmfle_ma(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfle_vv_f32mf2_b64_mu(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfle.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x i1> [[MERGE:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmfle_vv_f32mf2_b64_mu (vbool64_t mask, vbool64_t merge, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) {
return vmfle_mu(mask, merge, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfle_vf_f32mf2_b64_mu(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfle.mask.nxv1f32.f32.i64(<vscale x 1 x i1> [[MERGE:%.*]], <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmfle_vf_f32mf2_b64_mu (vbool64_t mask, vbool64_t merge, vfloat32mf2_t op1, float op2, size_t vl) {
return vmfle_mu(mask, merge, op1, op2, vl);
}

View File

@ -361,3 +361,39 @@ vbool8_t test_vmflt_vf_f64m8_b8_m(vbool8_t mask, vbool8_t maskedoff,
vfloat64m8_t op1, double op2, size_t vl) {
return vmflt(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmflt_vv_f32mf2_b64_ma(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmflt.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x i1> undef, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmflt_vv_f32mf2_b64_ma (vbool64_t mask, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) {
return vmflt_ma(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmflt_vf_f32mf2_b64_ma(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmflt.mask.nxv1f32.f32.i64(<vscale x 1 x i1> undef, <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmflt_vf_f32mf2_b64_ma (vbool64_t mask, vfloat32mf2_t op1, float op2, size_t vl) {
return vmflt_ma(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmflt_vv_f32mf2_b64_mu(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmflt.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x i1> [[MERGE:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmflt_vv_f32mf2_b64_mu (vbool64_t mask, vbool64_t merge, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) {
return vmflt_mu(mask, merge, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmflt_vf_f32mf2_b64_mu(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmflt.mask.nxv1f32.f32.i64(<vscale x 1 x i1> [[MERGE:%.*]], <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmflt_vf_f32mf2_b64_mu (vbool64_t mask, vbool64_t merge, vfloat32mf2_t op1, float op2, size_t vl) {
return vmflt_mu(mask, merge, op1, op2, vl);
}

View File

@ -361,3 +361,39 @@ vbool8_t test_vmfne_vf_f64m8_b8_m(vbool8_t mask, vbool8_t maskedoff,
vfloat64m8_t op1, double op2, size_t vl) {
return vmfne(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfne_vv_f32mf2_b64_ma(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfne.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x i1> undef, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmfne_vv_f32mf2_b64_ma (vbool64_t mask, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) {
return vmfne_ma(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfne_vf_f32mf2_b64_ma(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfne.mask.nxv1f32.f32.i64(<vscale x 1 x i1> undef, <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmfne_vf_f32mf2_b64_ma (vbool64_t mask, vfloat32mf2_t op1, float op2, size_t vl) {
return vmfne_ma(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfne_vv_f32mf2_b64_mu(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfne.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x i1> [[MERGE:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmfne_vv_f32mf2_b64_mu (vbool64_t mask, vbool64_t merge, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) {
return vmfne_mu(mask, merge, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfne_vf_f32mf2_b64_mu(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfne.mask.nxv1f32.f32.i64(<vscale x 1 x i1> [[MERGE:%.*]], <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmfne_vf_f32mf2_b64_mu (vbool64_t mask, vbool64_t merge, vfloat32mf2_t op1, float op2, size_t vl) {
return vmfne_mu(mask, merge, op1, op2, vl);
}

View File

@ -1699,3 +1699,75 @@ vbool8_t test_vmseq_vx_u64m8_b8_m(vbool8_t mask, vbool8_t maskedoff,
vuint64m8_t op1, uint64_t op2, size_t vl) {
return vmseq(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmseq_vv_i32mf2_b64_ma(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i1> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmseq_vv_i32mf2_b64_ma (vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
return vmseq_ma(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmseq_vx_i32mf2_b64_ma(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i32.i32.i64(<vscale x 1 x i1> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmseq_vx_i32mf2_b64_ma (vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) {
return vmseq_ma(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmseq_vv_u32mf2_b64_ma(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i1> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmseq_vv_u32mf2_b64_ma (vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
return vmseq_ma(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmseq_vx_u32mf2_b64_ma(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i32.i32.i64(<vscale x 1 x i1> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmseq_vx_u32mf2_b64_ma (vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) {
return vmseq_ma(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmseq_vv_i32mf2_b64_mu(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i1> [[MERGE:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmseq_vv_i32mf2_b64_mu (vbool64_t mask, vbool64_t merge, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
return vmseq_mu(mask, merge, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmseq_vx_i32mf2_b64_mu(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i32.i32.i64(<vscale x 1 x i1> [[MERGE:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmseq_vx_i32mf2_b64_mu (vbool64_t mask, vbool64_t merge, vint32mf2_t op1, int32_t op2, size_t vl) {
return vmseq_mu(mask, merge, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmseq_vv_u32mf2_b64_mu(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i1> [[MERGE:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmseq_vv_u32mf2_b64_mu (vbool64_t mask, vbool64_t merge, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
return vmseq_mu(mask, merge, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmseq_vx_u32mf2_b64_mu(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i32.i32.i64(<vscale x 1 x i1> [[MERGE:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmseq_vx_u32mf2_b64_mu (vbool64_t mask, vbool64_t merge, vuint32mf2_t op1, uint32_t op2, size_t vl) {
return vmseq_mu(mask, merge, op1, op2, vl);
}

View File

@ -1588,3 +1588,75 @@ vbool8_t test_vmsgeu_vx_u64m8_b8_m (vbool8_t mask, vbool8_t maskedoff, vuint64m8
return vmsgeu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsge_vv_i32mf2_b64_ma(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i1> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsge_vv_i32mf2_b64_ma (vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
return vmsge_ma(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsge_vx_i32mf2_b64_ma(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i32.i32.i64(<vscale x 1 x i1> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsge_vx_i32mf2_b64_ma (vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) {
return vmsge_ma(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgeu_vv_u32mf2_b64_ma(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i1> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsgeu_vv_u32mf2_b64_ma (vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
return vmsgeu_ma(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgeu_vx_u32mf2_b64_ma(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.mask.nxv1i32.i32.i64(<vscale x 1 x i1> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsgeu_vx_u32mf2_b64_ma (vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) {
return vmsgeu_ma(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsge_vv_i32mf2_b64_mu(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i1> [[MERGE:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsge_vv_i32mf2_b64_mu (vbool64_t mask, vbool64_t merge, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
return vmsge_mu(mask, merge, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsge_vx_i32mf2_b64_mu(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i32.i32.i64(<vscale x 1 x i1> [[MERGE:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsge_vx_i32mf2_b64_mu (vbool64_t mask, vbool64_t merge, vint32mf2_t op1, int32_t op2, size_t vl) {
return vmsge_mu(mask, merge, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgeu_vv_u32mf2_b64_mu(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i1> [[MERGE:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsgeu_vv_u32mf2_b64_mu (vbool64_t mask, vbool64_t merge, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
return vmsgeu_mu(mask, merge, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgeu_vx_u32mf2_b64_mu(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.mask.nxv1i32.i32.i64(<vscale x 1 x i1> [[MERGE:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsgeu_vx_u32mf2_b64_mu (vbool64_t mask, vbool64_t merge, vuint32mf2_t op1, uint32_t op2, size_t vl) {
return vmsgeu_mu(mask, merge, op1, op2, vl);
}

View File

@ -1588,3 +1588,75 @@ vbool8_t test_vmsgtu_vx_u64m8_b8_m (vbool8_t mask, vbool8_t maskedoff, vuint64m8
return vmsgtu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgt_vv_i32mf2_b64_ma(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsgt.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i1> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsgt_vv_i32mf2_b64_ma (vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
return vmsgt_ma(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgt_vx_i32mf2_b64_ma(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsgt.mask.nxv1i32.i32.i64(<vscale x 1 x i1> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsgt_vx_i32mf2_b64_ma (vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) {
return vmsgt_ma(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgtu_vv_u32mf2_b64_ma(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i1> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsgtu_vv_u32mf2_b64_ma (vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
return vmsgtu_ma(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgtu_vx_u32mf2_b64_ma(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.mask.nxv1i32.i32.i64(<vscale x 1 x i1> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsgtu_vx_u32mf2_b64_ma (vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) {
return vmsgtu_ma(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgt_vv_i32mf2_b64_mu(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsgt.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i1> [[MERGE:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsgt_vv_i32mf2_b64_mu (vbool64_t mask, vbool64_t merge, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
return vmsgt_mu(mask, merge, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgt_vx_i32mf2_b64_mu(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsgt.mask.nxv1i32.i32.i64(<vscale x 1 x i1> [[MERGE:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsgt_vx_i32mf2_b64_mu (vbool64_t mask, vbool64_t merge, vint32mf2_t op1, int32_t op2, size_t vl) {
return vmsgt_mu(mask, merge, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgtu_vv_u32mf2_b64_mu(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i1> [[MERGE:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsgtu_vv_u32mf2_b64_mu (vbool64_t mask, vbool64_t merge, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
return vmsgtu_mu(mask, merge, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgtu_vx_u32mf2_b64_mu(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.mask.nxv1i32.i32.i64(<vscale x 1 x i1> [[MERGE:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsgtu_vx_u32mf2_b64_mu (vbool64_t mask, vbool64_t merge, vuint32mf2_t op1, uint32_t op2, size_t vl) {
return vmsgtu_mu(mask, merge, op1, op2, vl);
}

View File

@ -1714,3 +1714,75 @@ vbool8_t test_vmsleu_vx_u64m8_b8_m(vbool8_t mask, vbool8_t maskedoff,
vuint64m8_t op1, uint64_t op2, size_t vl) {
return vmsleu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsle_vv_i32mf2_b64_ma(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i1> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsle_vv_i32mf2_b64_ma (vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
return vmsle_ma(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsle_vx_i32mf2_b64_ma(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i32.i32.i64(<vscale x 1 x i1> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsle_vx_i32mf2_b64_ma (vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) {
return vmsle_ma(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsleu_vv_u32mf2_b64_ma(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i1> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsleu_vv_u32mf2_b64_ma (vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
return vmsleu_ma(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsleu_vx_u32mf2_b64_ma(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i32.i32.i64(<vscale x 1 x i1> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsleu_vx_u32mf2_b64_ma (vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) {
return vmsleu_ma(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsle_vv_i32mf2_b64_mu(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i1> [[MERGE:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsle_vv_i32mf2_b64_mu (vbool64_t mask, vbool64_t merge, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
return vmsle_mu(mask, merge, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsle_vx_i32mf2_b64_mu(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i32.i32.i64(<vscale x 1 x i1> [[MERGE:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsle_vx_i32mf2_b64_mu (vbool64_t mask, vbool64_t merge, vint32mf2_t op1, int32_t op2, size_t vl) {
return vmsle_mu(mask, merge, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsleu_vv_u32mf2_b64_mu(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i1> [[MERGE:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsleu_vv_u32mf2_b64_mu (vbool64_t mask, vbool64_t merge, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
return vmsleu_mu(mask, merge, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsleu_vx_u32mf2_b64_mu(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i32.i32.i64(<vscale x 1 x i1> [[MERGE:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsleu_vx_u32mf2_b64_mu (vbool64_t mask, vbool64_t merge, vuint32mf2_t op1, uint32_t op2, size_t vl) {
return vmsleu_mu(mask, merge, op1, op2, vl);
}

View File

@ -1714,3 +1714,75 @@ vbool8_t test_vmsltu_vx_u64m8_b8_m(vbool8_t mask, vbool8_t maskedoff,
vuint64m8_t op1, uint64_t op2, size_t vl) {
return vmsltu(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmslt_vv_i32mf2_b64_ma(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i1> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmslt_vv_i32mf2_b64_ma (vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
return vmslt_ma(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmslt_vx_i32mf2_b64_ma(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i32.i32.i64(<vscale x 1 x i1> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmslt_vx_i32mf2_b64_ma (vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) {
return vmslt_ma(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsltu_vv_u32mf2_b64_ma(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsltu.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i1> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsltu_vv_u32mf2_b64_ma (vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
return vmsltu_ma(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsltu_vx_u32mf2_b64_ma(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsltu.mask.nxv1i32.i32.i64(<vscale x 1 x i1> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsltu_vx_u32mf2_b64_ma (vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) {
return vmsltu_ma(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmslt_vv_i32mf2_b64_mu(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i1> [[MERGE:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmslt_vv_i32mf2_b64_mu (vbool64_t mask, vbool64_t merge, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
return vmslt_mu(mask, merge, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmslt_vx_i32mf2_b64_mu(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i32.i32.i64(<vscale x 1 x i1> [[MERGE:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmslt_vx_i32mf2_b64_mu (vbool64_t mask, vbool64_t merge, vint32mf2_t op1, int32_t op2, size_t vl) {
return vmslt_mu(mask, merge, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsltu_vv_u32mf2_b64_mu(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsltu.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i1> [[MERGE:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsltu_vv_u32mf2_b64_mu (vbool64_t mask, vbool64_t merge, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
return vmsltu_mu(mask, merge, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsltu_vx_u32mf2_b64_mu(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsltu.mask.nxv1i32.i32.i64(<vscale x 1 x i1> [[MERGE:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsltu_vx_u32mf2_b64_mu (vbool64_t mask, vbool64_t merge, vuint32mf2_t op1, uint32_t op2, size_t vl) {
return vmsltu_mu(mask, merge, op1, op2, vl);
}

View File

@ -1699,3 +1699,75 @@ vbool8_t test_vmsne_vx_u64m8_b8_m(vbool8_t mask, vbool8_t maskedoff,
vuint64m8_t op1, uint64_t op2, size_t vl) {
return vmsne(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsne_vv_i32mf2_b64_ma(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i1> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsne_vv_i32mf2_b64_ma (vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
return vmsne_ma(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsne_vx_i32mf2_b64_ma(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i32.i32.i64(<vscale x 1 x i1> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsne_vx_i32mf2_b64_ma (vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) {
return vmsne_ma(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsne_vv_u32mf2_b64_ma(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i1> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsne_vv_u32mf2_b64_ma (vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
return vmsne_ma(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsne_vx_u32mf2_b64_ma(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i32.i32.i64(<vscale x 1 x i1> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsne_vx_u32mf2_b64_ma (vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) {
return vmsne_ma(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsne_vv_i32mf2_b64_mu(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i1> [[MERGE:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsne_vv_i32mf2_b64_mu (vbool64_t mask, vbool64_t merge, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
return vmsne_mu(mask, merge, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsne_vx_i32mf2_b64_mu(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i32.i32.i64(<vscale x 1 x i1> [[MERGE:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsne_vx_i32mf2_b64_mu (vbool64_t mask, vbool64_t merge, vint32mf2_t op1, int32_t op2, size_t vl) {
return vmsne_mu(mask, merge, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsne_vv_u32mf2_b64_mu(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i1> [[MERGE:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsne_vv_u32mf2_b64_mu (vbool64_t mask, vbool64_t merge, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
return vmsne_mu(mask, merge, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsne_vx_u32mf2_b64_mu(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i32.i32.i64(<vscale x 1 x i1> [[MERGE:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsne_vx_u32mf2_b64_mu (vbool64_t mask, vbool64_t merge, vuint32mf2_t op1, uint32_t op2, size_t vl) {
return vmsne_mu(mask, merge, op1, op2, vl);
}

View File

@ -578,3 +578,39 @@ vbool2_t test_vmfeq_vv_f16m8_b2_m (vbool2_t mask, vbool2_t maskedoff, vfloat16m8
vbool2_t test_vmfeq_vf_f16m8_b2_m (vbool2_t mask, vbool2_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) {
return vmfeq_vf_f16m8_b2_m(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfeq_vv_f32mf2_b64_ma(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfeq.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x i1> undef, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmfeq_vv_f32mf2_b64_ma (vbool64_t mask, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) {
return vmfeq_vv_f32mf2_b64_ma(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfeq_vf_f32mf2_b64_ma(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfeq.mask.nxv1f32.f32.i64(<vscale x 1 x i1> undef, <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmfeq_vf_f32mf2_b64_ma (vbool64_t mask, vfloat32mf2_t op1, float op2, size_t vl) {
return vmfeq_vf_f32mf2_b64_ma(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfeq_vv_f32mf2_b64_mu(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfeq.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x i1> [[MERGE:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmfeq_vv_f32mf2_b64_mu (vbool64_t mask, vbool64_t merge, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) {
return vmfeq_vv_f32mf2_b64_mu(mask, merge, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfeq_vf_f32mf2_b64_mu(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfeq.mask.nxv1f32.f32.i64(<vscale x 1 x i1> [[MERGE:%.*]], <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmfeq_vf_f32mf2_b64_mu (vbool64_t mask, vbool64_t merge, vfloat32mf2_t op1, float op2, size_t vl) {
return vmfeq_vf_f32mf2_b64_mu(mask, merge, op1, op2, vl);
}

View File

@ -545,3 +545,39 @@ vbool2_t test_vmfge_vv_f16m8_b2_m (vbool2_t mask, vbool2_t maskedoff, vfloat16m8
vbool2_t test_vmfge_vf_f16m8_b2_m (vbool2_t mask, vbool2_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) {
return vmfge_vf_f16m8_b2_m(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfge_vv_f32mf2_b64_ma(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfge.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x i1> undef, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmfge_vv_f32mf2_b64_ma (vbool64_t mask, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) {
return vmfge_vv_f32mf2_b64_ma(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfge_vf_f32mf2_b64_ma(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfge.mask.nxv1f32.f32.i64(<vscale x 1 x i1> undef, <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmfge_vf_f32mf2_b64_ma (vbool64_t mask, vfloat32mf2_t op1, float op2, size_t vl) {
return vmfge_vf_f32mf2_b64_ma(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfge_vv_f32mf2_b64_mu(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfge.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x i1> [[MERGE:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmfge_vv_f32mf2_b64_mu (vbool64_t mask, vbool64_t merge, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) {
return vmfge_vv_f32mf2_b64_mu(mask, merge, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfge_vf_f32mf2_b64_mu(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfge.mask.nxv1f32.f32.i64(<vscale x 1 x i1> [[MERGE:%.*]], <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmfge_vf_f32mf2_b64_mu (vbool64_t mask, vbool64_t merge, vfloat32mf2_t op1, float op2, size_t vl) {
return vmfge_vf_f32mf2_b64_mu(mask, merge, op1, op2, vl);
}

View File

@ -545,3 +545,39 @@ vbool2_t test_vmfgt_vv_f16m8_b2_m (vbool2_t mask, vbool2_t maskedoff, vfloat16m8
vbool2_t test_vmfgt_vf_f16m8_b2_m (vbool2_t mask, vbool2_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) {
return vmfgt_vf_f16m8_b2_m(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfgt_vv_f32mf2_b64_ma(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfgt.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x i1> undef, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmfgt_vv_f32mf2_b64_ma (vbool64_t mask, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) {
return vmfgt_vv_f32mf2_b64_ma(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfgt_vf_f32mf2_b64_ma(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfgt.mask.nxv1f32.f32.i64(<vscale x 1 x i1> undef, <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmfgt_vf_f32mf2_b64_ma (vbool64_t mask, vfloat32mf2_t op1, float op2, size_t vl) {
return vmfgt_vf_f32mf2_b64_ma(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfgt_vv_f32mf2_b64_mu(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfgt.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x i1> [[MERGE:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmfgt_vv_f32mf2_b64_mu (vbool64_t mask, vbool64_t merge, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) {
return vmfgt_vv_f32mf2_b64_mu(mask, merge, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfgt_vf_f32mf2_b64_mu(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfgt.mask.nxv1f32.f32.i64(<vscale x 1 x i1> [[MERGE:%.*]], <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmfgt_vf_f32mf2_b64_mu (vbool64_t mask, vbool64_t merge, vfloat32mf2_t op1, float op2, size_t vl) {
return vmfgt_vf_f32mf2_b64_mu(mask, merge, op1, op2, vl);
}

View File

@ -578,3 +578,39 @@ vbool2_t test_vmfle_vv_f16m8_b2_m (vbool2_t mask, vbool2_t maskedoff, vfloat16m8
vbool2_t test_vmfle_vf_f16m8_b2_m (vbool2_t mask, vbool2_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) {
return vmfle_vf_f16m8_b2_m(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfle_vv_f32mf2_b64_ma(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfle.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x i1> undef, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmfle_vv_f32mf2_b64_ma (vbool64_t mask, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) {
return vmfle_vv_f32mf2_b64_ma(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfle_vf_f32mf2_b64_ma(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfle.mask.nxv1f32.f32.i64(<vscale x 1 x i1> undef, <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmfle_vf_f32mf2_b64_ma (vbool64_t mask, vfloat32mf2_t op1, float op2, size_t vl) {
return vmfle_vf_f32mf2_b64_ma(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfle_vv_f32mf2_b64_mu(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfle.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x i1> [[MERGE:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmfle_vv_f32mf2_b64_mu (vbool64_t mask, vbool64_t merge, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) {
return vmfle_vv_f32mf2_b64_mu(mask, merge, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfle_vf_f32mf2_b64_mu(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfle.mask.nxv1f32.f32.i64(<vscale x 1 x i1> [[MERGE:%.*]], <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmfle_vf_f32mf2_b64_mu (vbool64_t mask, vbool64_t merge, vfloat32mf2_t op1, float op2, size_t vl) {
return vmfle_vf_f32mf2_b64_mu(mask, merge, op1, op2, vl);
}

View File

@ -578,3 +578,39 @@ vbool2_t test_vmflt_vv_f16m8_b2_m (vbool2_t mask, vbool2_t maskedoff, vfloat16m8
vbool2_t test_vmflt_vf_f16m8_b2_m (vbool2_t mask, vbool2_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) {
return vmflt_vf_f16m8_b2_m(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmflt_vv_f32mf2_b64_ma(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmflt.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x i1> undef, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmflt_vv_f32mf2_b64_ma (vbool64_t mask, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) {
return vmflt_vv_f32mf2_b64_ma(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmflt_vf_f32mf2_b64_ma(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmflt.mask.nxv1f32.f32.i64(<vscale x 1 x i1> undef, <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmflt_vf_f32mf2_b64_ma (vbool64_t mask, vfloat32mf2_t op1, float op2, size_t vl) {
return vmflt_vf_f32mf2_b64_ma(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmflt_vv_f32mf2_b64_mu(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmflt.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x i1> [[MERGE:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmflt_vv_f32mf2_b64_mu (vbool64_t mask, vbool64_t merge, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) {
return vmflt_vv_f32mf2_b64_mu(mask, merge, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmflt_vf_f32mf2_b64_mu(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmflt.mask.nxv1f32.f32.i64(<vscale x 1 x i1> [[MERGE:%.*]], <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmflt_vf_f32mf2_b64_mu (vbool64_t mask, vbool64_t merge, vfloat32mf2_t op1, float op2, size_t vl) {
return vmflt_vf_f32mf2_b64_mu(mask, merge, op1, op2, vl);
}

View File

@ -578,3 +578,39 @@ vbool2_t test_vmfne_vv_f16m8_b2_m (vbool2_t mask, vbool2_t maskedoff, vfloat16m8
vbool2_t test_vmfne_vf_f16m8_b2_m (vbool2_t mask, vbool2_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) {
return vmfne_vf_f16m8_b2_m(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfne_vv_f32mf2_b64_ma(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfne.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x i1> undef, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmfne_vv_f32mf2_b64_ma (vbool64_t mask, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) {
return vmfne_vv_f32mf2_b64_ma(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfne_vf_f32mf2_b64_ma(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfne.mask.nxv1f32.f32.i64(<vscale x 1 x i1> undef, <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmfne_vf_f32mf2_b64_ma (vbool64_t mask, vfloat32mf2_t op1, float op2, size_t vl) {
return vmfne_vf_f32mf2_b64_ma(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfne_vv_f32mf2_b64_mu(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfne.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x i1> [[MERGE:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmfne_vv_f32mf2_b64_mu (vbool64_t mask, vbool64_t merge, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) {
return vmfne_vv_f32mf2_b64_mu(mask, merge, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmfne_vf_f32mf2_b64_mu(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmfne.mask.nxv1f32.f32.i64(<vscale x 1 x i1> [[MERGE:%.*]], <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmfne_vf_f32mf2_b64_mu (vbool64_t mask, vbool64_t merge, vfloat32mf2_t op1, float op2, size_t vl) {
return vmfne_vf_f32mf2_b64_mu(mask, merge, op1, op2, vl);
}

View File

@ -1699,3 +1699,75 @@ vbool8_t test_vmseq_vx_u64m8_b8_m(vbool8_t mask, vbool8_t maskedoff,
vuint64m8_t op1, uint64_t op2, size_t vl) {
return vmseq_vx_u64m8_b8_m(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmseq_vv_i32mf2_b64_ma(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i1> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmseq_vv_i32mf2_b64_ma (vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
return vmseq_vv_i32mf2_b64_ma(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmseq_vx_i32mf2_b64_ma(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i32.i32.i64(<vscale x 1 x i1> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmseq_vx_i32mf2_b64_ma (vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) {
return vmseq_vx_i32mf2_b64_ma(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmseq_vv_u32mf2_b64_ma(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i1> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmseq_vv_u32mf2_b64_ma (vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
return vmseq_vv_u32mf2_b64_ma(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmseq_vx_u32mf2_b64_ma(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i32.i32.i64(<vscale x 1 x i1> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmseq_vx_u32mf2_b64_ma (vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) {
return vmseq_vx_u32mf2_b64_ma(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmseq_vv_i32mf2_b64_mu(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i1> [[MERGE:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmseq_vv_i32mf2_b64_mu (vbool64_t mask, vbool64_t merge, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
return vmseq_vv_i32mf2_b64_mu(mask, merge, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmseq_vx_i32mf2_b64_mu(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i32.i32.i64(<vscale x 1 x i1> [[MERGE:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmseq_vx_i32mf2_b64_mu (vbool64_t mask, vbool64_t merge, vint32mf2_t op1, int32_t op2, size_t vl) {
return vmseq_vx_i32mf2_b64_mu(mask, merge, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmseq_vv_u32mf2_b64_mu(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i1> [[MERGE:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmseq_vv_u32mf2_b64_mu (vbool64_t mask, vbool64_t merge, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
return vmseq_vv_u32mf2_b64_mu(mask, merge, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmseq_vx_u32mf2_b64_mu(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i32.i32.i64(<vscale x 1 x i1> [[MERGE:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmseq_vx_u32mf2_b64_mu (vbool64_t mask, vbool64_t merge, vuint32mf2_t op1, uint32_t op2, size_t vl) {
return vmseq_vx_u32mf2_b64_mu(mask, merge, op1, op2, vl);
}

View File

@ -1588,3 +1588,75 @@ vbool8_t test_vmsgeu_vx_u64m8_b8_m (vbool8_t mask, vbool8_t maskedoff, vuint64m8
return vmsgeu_vx_u64m8_b8_m(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsge_vv_i32mf2_b64_ma(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i1> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsge_vv_i32mf2_b64_ma (vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
return vmsge_vv_i32mf2_b64_ma(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsge_vx_i32mf2_b64_ma(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i32.i32.i64(<vscale x 1 x i1> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsge_vx_i32mf2_b64_ma (vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) {
return vmsge_vx_i32mf2_b64_ma(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgeu_vv_u32mf2_b64_ma(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i1> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsgeu_vv_u32mf2_b64_ma (vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
return vmsgeu_vv_u32mf2_b64_ma(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgeu_vx_u32mf2_b64_ma(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.mask.nxv1i32.i32.i64(<vscale x 1 x i1> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsgeu_vx_u32mf2_b64_ma (vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) {
return vmsgeu_vx_u32mf2_b64_ma(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsge_vv_i32mf2_b64_mu(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i1> [[MERGE:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsge_vv_i32mf2_b64_mu (vbool64_t mask, vbool64_t merge, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
return vmsge_vv_i32mf2_b64_mu(mask, merge, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsge_vx_i32mf2_b64_mu(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i32.i32.i64(<vscale x 1 x i1> [[MERGE:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsge_vx_i32mf2_b64_mu (vbool64_t mask, vbool64_t merge, vint32mf2_t op1, int32_t op2, size_t vl) {
return vmsge_vx_i32mf2_b64_mu(mask, merge, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgeu_vv_u32mf2_b64_mu(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i1> [[MERGE:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsgeu_vv_u32mf2_b64_mu (vbool64_t mask, vbool64_t merge, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
return vmsgeu_vv_u32mf2_b64_mu(mask, merge, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgeu_vx_u32mf2_b64_mu(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsgeu.mask.nxv1i32.i32.i64(<vscale x 1 x i1> [[MERGE:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsgeu_vx_u32mf2_b64_mu (vbool64_t mask, vbool64_t merge, vuint32mf2_t op1, uint32_t op2, size_t vl) {
return vmsgeu_vx_u32mf2_b64_mu(mask, merge, op1, op2, vl);
}

View File

@ -1588,3 +1588,75 @@ vbool8_t test_vmsgtu_vx_u64m8_b8_m (vbool8_t mask, vbool8_t maskedoff, vuint64m8
return vmsgtu_vx_u64m8_b8_m(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgt_vv_i32mf2_b64_ma(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsgt.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i1> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsgt_vv_i32mf2_b64_ma (vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
return vmsgt_vv_i32mf2_b64_ma(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgt_vx_i32mf2_b64_ma(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsgt.mask.nxv1i32.i32.i64(<vscale x 1 x i1> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsgt_vx_i32mf2_b64_ma (vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) {
return vmsgt_vx_i32mf2_b64_ma(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgtu_vv_u32mf2_b64_ma(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i1> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsgtu_vv_u32mf2_b64_ma (vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
return vmsgtu_vv_u32mf2_b64_ma(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgtu_vx_u32mf2_b64_ma(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.mask.nxv1i32.i32.i64(<vscale x 1 x i1> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsgtu_vx_u32mf2_b64_ma (vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) {
return vmsgtu_vx_u32mf2_b64_ma(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgt_vv_i32mf2_b64_mu(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsgt.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i1> [[MERGE:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsgt_vv_i32mf2_b64_mu (vbool64_t mask, vbool64_t merge, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
return vmsgt_vv_i32mf2_b64_mu(mask, merge, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgt_vx_i32mf2_b64_mu(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsgt.mask.nxv1i32.i32.i64(<vscale x 1 x i1> [[MERGE:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsgt_vx_i32mf2_b64_mu (vbool64_t mask, vbool64_t merge, vint32mf2_t op1, int32_t op2, size_t vl) {
return vmsgt_vx_i32mf2_b64_mu(mask, merge, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgtu_vv_u32mf2_b64_mu(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i1> [[MERGE:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsgtu_vv_u32mf2_b64_mu (vbool64_t mask, vbool64_t merge, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
return vmsgtu_vv_u32mf2_b64_mu(mask, merge, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsgtu_vx_u32mf2_b64_mu(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsgtu.mask.nxv1i32.i32.i64(<vscale x 1 x i1> [[MERGE:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsgtu_vx_u32mf2_b64_mu (vbool64_t mask, vbool64_t merge, vuint32mf2_t op1, uint32_t op2, size_t vl) {
return vmsgtu_vx_u32mf2_b64_mu(mask, merge, op1, op2, vl);
}

View File

@ -1714,3 +1714,75 @@ vbool8_t test_vmsleu_vx_u64m8_b8_m(vbool8_t mask, vbool8_t maskedoff,
vuint64m8_t op1, uint64_t op2, size_t vl) {
return vmsleu_vx_u64m8_b8_m(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsle_vv_i32mf2_b64_ma(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i1> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsle_vv_i32mf2_b64_ma (vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
return vmsle_vv_i32mf2_b64_ma(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsle_vx_i32mf2_b64_ma(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i32.i32.i64(<vscale x 1 x i1> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsle_vx_i32mf2_b64_ma (vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) {
return vmsle_vx_i32mf2_b64_ma(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsleu_vv_u32mf2_b64_ma(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i1> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsleu_vv_u32mf2_b64_ma (vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
return vmsleu_vv_u32mf2_b64_ma(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsleu_vx_u32mf2_b64_ma(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i32.i32.i64(<vscale x 1 x i1> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsleu_vx_u32mf2_b64_ma (vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) {
return vmsleu_vx_u32mf2_b64_ma(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsle_vv_i32mf2_b64_mu(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i1> [[MERGE:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsle_vv_i32mf2_b64_mu (vbool64_t mask, vbool64_t merge, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
return vmsle_vv_i32mf2_b64_mu(mask, merge, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsle_vx_i32mf2_b64_mu(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i32.i32.i64(<vscale x 1 x i1> [[MERGE:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsle_vx_i32mf2_b64_mu (vbool64_t mask, vbool64_t merge, vint32mf2_t op1, int32_t op2, size_t vl) {
return vmsle_vx_i32mf2_b64_mu(mask, merge, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsleu_vv_u32mf2_b64_mu(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i1> [[MERGE:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsleu_vv_u32mf2_b64_mu (vbool64_t mask, vbool64_t merge, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
return vmsleu_vv_u32mf2_b64_mu(mask, merge, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsleu_vx_u32mf2_b64_mu(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i32.i32.i64(<vscale x 1 x i1> [[MERGE:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsleu_vx_u32mf2_b64_mu (vbool64_t mask, vbool64_t merge, vuint32mf2_t op1, uint32_t op2, size_t vl) {
return vmsleu_vx_u32mf2_b64_mu(mask, merge, op1, op2, vl);
}

View File

@ -1714,3 +1714,75 @@ vbool8_t test_vmsltu_vx_u64m8_b8_m(vbool8_t mask, vbool8_t maskedoff,
vuint64m8_t op1, uint64_t op2, size_t vl) {
return vmsltu_vx_u64m8_b8_m(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmslt_vv_i32mf2_b64_ma(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i1> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmslt_vv_i32mf2_b64_ma (vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
return vmslt_vv_i32mf2_b64_ma(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmslt_vx_i32mf2_b64_ma(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i32.i32.i64(<vscale x 1 x i1> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmslt_vx_i32mf2_b64_ma (vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) {
return vmslt_vx_i32mf2_b64_ma(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsltu_vv_u32mf2_b64_ma(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsltu.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i1> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsltu_vv_u32mf2_b64_ma (vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
return vmsltu_vv_u32mf2_b64_ma(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsltu_vx_u32mf2_b64_ma(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsltu.mask.nxv1i32.i32.i64(<vscale x 1 x i1> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsltu_vx_u32mf2_b64_ma (vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) {
return vmsltu_vx_u32mf2_b64_ma(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmslt_vv_i32mf2_b64_mu(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i1> [[MERGE:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmslt_vv_i32mf2_b64_mu (vbool64_t mask, vbool64_t merge, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
return vmslt_vv_i32mf2_b64_mu(mask, merge, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmslt_vx_i32mf2_b64_mu(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i32.i32.i64(<vscale x 1 x i1> [[MERGE:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmslt_vx_i32mf2_b64_mu (vbool64_t mask, vbool64_t merge, vint32mf2_t op1, int32_t op2, size_t vl) {
return vmslt_vx_i32mf2_b64_mu(mask, merge, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsltu_vv_u32mf2_b64_mu(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsltu.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i1> [[MERGE:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsltu_vv_u32mf2_b64_mu (vbool64_t mask, vbool64_t merge, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
return vmsltu_vv_u32mf2_b64_mu(mask, merge, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsltu_vx_u32mf2_b64_mu(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsltu.mask.nxv1i32.i32.i64(<vscale x 1 x i1> [[MERGE:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsltu_vx_u32mf2_b64_mu (vbool64_t mask, vbool64_t merge, vuint32mf2_t op1, uint32_t op2, size_t vl) {
return vmsltu_vx_u32mf2_b64_mu(mask, merge, op1, op2, vl);
}

View File

@ -1699,3 +1699,75 @@ vbool8_t test_vmsne_vx_u64m8_b8_m(vbool8_t mask, vbool8_t maskedoff,
vuint64m8_t op1, uint64_t op2, size_t vl) {
return vmsne_vx_u64m8_b8_m(mask, maskedoff, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsne_vv_i32mf2_b64_ma(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i1> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsne_vv_i32mf2_b64_ma (vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
return vmsne_vv_i32mf2_b64_ma(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsne_vx_i32mf2_b64_ma(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i32.i32.i64(<vscale x 1 x i1> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsne_vx_i32mf2_b64_ma (vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) {
return vmsne_vx_i32mf2_b64_ma(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsne_vv_u32mf2_b64_ma(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i1> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsne_vv_u32mf2_b64_ma (vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
return vmsne_vv_u32mf2_b64_ma(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsne_vx_u32mf2_b64_ma(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i32.i32.i64(<vscale x 1 x i1> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsne_vx_u32mf2_b64_ma (vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) {
return vmsne_vx_u32mf2_b64_ma(mask, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsne_vv_i32mf2_b64_mu(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i1> [[MERGE:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsne_vv_i32mf2_b64_mu (vbool64_t mask, vbool64_t merge, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
return vmsne_vv_i32mf2_b64_mu(mask, merge, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsne_vx_i32mf2_b64_mu(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i32.i32.i64(<vscale x 1 x i1> [[MERGE:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsne_vx_i32mf2_b64_mu (vbool64_t mask, vbool64_t merge, vint32mf2_t op1, int32_t op2, size_t vl) {
return vmsne_vx_i32mf2_b64_mu(mask, merge, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsne_vv_u32mf2_b64_mu(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i1> [[MERGE:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsne_vv_u32mf2_b64_mu (vbool64_t mask, vbool64_t merge, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
return vmsne_vv_u32mf2_b64_mu(mask, merge, op1, op2, vl);
}
// CHECK-RV64-LABEL: @test_vmsne_vx_u32mf2_b64_mu(
// CHECK-RV64-NEXT: entry:
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i32.i32.i64(<vscale x 1 x i1> [[MERGE:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
// CHECK-RV64-NEXT: ret <vscale x 1 x i1> [[TMP0]]
//
vbool64_t test_vmsne_vx_u32mf2_b64_mu (vbool64_t mask, vbool64_t merge, vuint32mf2_t op1, uint32_t op2, size_t vl) {
return vmsne_vx_u32mf2_b64_mu(mask, merge, op1, op2, vl);
}