From d77d56acfd48e8253a35d885db8daac78793313f Mon Sep 17 00:00:00 2001 From: Craig Topper Date: Thu, 22 Apr 2021 10:18:33 -0700 Subject: [PATCH] [RISCV] Add missing tests for vector type for second operand of vmsgt and vmsgtu IR intrinsics. Refactor to use new multiclass instead of individual patterns. We already supported this due to SEW=64 on RV32, but we didn't have test cases for all the types we supported. Part of D100925 --- .../Target/RISCV/RISCVInstrInfoVPseudos.td | 46 +- llvm/test/CodeGen/RISCV/rvv/vmsgt-rv32.ll | 936 ++++++++++++++++++ llvm/test/CodeGen/RISCV/rvv/vmsgt-rv64.ll | 936 ++++++++++++++++++ llvm/test/CodeGen/RISCV/rvv/vmsgtu-rv32.ll | 936 ++++++++++++++++++ llvm/test/CodeGen/RISCV/rvv/vmsgtu-rv64.ll | 936 ++++++++++++++++++ 5 files changed, 3746 insertions(+), 44 deletions(-) diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td index 43c386183ad0..af589d2e2437 100644 --- a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td @@ -3949,50 +3949,8 @@ defm : VPatBinaryM_VX_VI<"int_riscv_vmsgtu", "PseudoVMSGTU", AllIntegerVectors>; defm : VPatBinaryM_VX_VI<"int_riscv_vmsgt", "PseudoVMSGT", AllIntegerVectors>; // Match vmsgt with 2 vector operands to vmslt with the operands swapped. -// Occurs when legalizing vmsgt(u).vx intrinsics for i64 on RV32 since we need -// to use a more complex splat sequence. Add the pattern for all VTs for -// consistency. -foreach vti = AllIntegerVectors in { - def : Pat<(vti.Mask (int_riscv_vmsgt (vti.Vector vti.RegClass:$rs2), - (vti.Vector vti.RegClass:$rs1), - VLOpFrag)), - (!cast("PseudoVMSLT_VV_"#vti.LMul.MX) vti.RegClass:$rs1, - vti.RegClass:$rs2, - GPR:$vl, - vti.SEW)>; - def : Pat<(vti.Mask (int_riscv_vmsgt_mask (vti.Mask VR:$merge), - (vti.Vector vti.RegClass:$rs2), - (vti.Vector vti.RegClass:$rs1), - (vti.Mask V0), - VLOpFrag)), - (!cast("PseudoVMSLT_VV_"#vti.LMul.MX#"_MASK") - VR:$merge, - vti.RegClass:$rs1, - vti.RegClass:$rs2, - (vti.Mask V0), - GPR:$vl, - vti.SEW)>; - - def : Pat<(vti.Mask (int_riscv_vmsgtu (vti.Vector vti.RegClass:$rs2), - (vti.Vector vti.RegClass:$rs1), - VLOpFrag)), - (!cast("PseudoVMSLTU_VV_"#vti.LMul.MX) vti.RegClass:$rs1, - vti.RegClass:$rs2, - GPR:$vl, - vti.SEW)>; - def : Pat<(vti.Mask (int_riscv_vmsgtu_mask (vti.Mask VR:$merge), - (vti.Vector vti.RegClass:$rs2), - (vti.Vector vti.RegClass:$rs1), - (vti.Mask V0), - VLOpFrag)), - (!cast("PseudoVMSLTU_VV_"#vti.LMul.MX#"_MASK") - VR:$merge, - vti.RegClass:$rs1, - vti.RegClass:$rs2, - (vti.Mask V0), - GPR:$vl, - vti.SEW)>; -} +defm : VPatBinarySwappedM_VV<"int_riscv_vmsgtu", "PseudoVMSLTU", AllIntegerVectors>; +defm : VPatBinarySwappedM_VV<"int_riscv_vmsgt", "PseudoVMSLT", AllIntegerVectors>; // Match vmslt(u).vx intrinsics to vmsle(u).vi if the scalar is -15 to 16. This // avoids the user needing to know that there is no vmslt(u).vi instruction. diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsgt-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmsgt-rv32.ll index 3af341064fa6..504b44e0be1c 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vmsgt-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmsgt-rv32.ll @@ -1,6 +1,942 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs \ ; RUN: --riscv-no-aliases < %s | FileCheck %s +declare @llvm.riscv.vmsgt.nxv1i8( + , + , + i32); + +define @intrinsic_vmsgt_vv_nxv1i8_nxv1i8( %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsgt_vv_nxv1i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,mf8,ta,mu +; CHECK-NEXT: vmslt.vv v0, v9, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgt.nxv1i8( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmsgt.mask.nxv1i8( + , + , + , + , + i32); + +define @intrinsic_vmsgt_mask_vv_nxv1i8_nxv1i8( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv1i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e8,mf8,ta,mu +; CHECK-NEXT: vmslt.vv v0, v9, v8 +; CHECK-NEXT: vsetvli a0, a0, e8,mf8,tu,mu +; CHECK-NEXT: vmslt.vv v25, v10, v9, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmsgt.nxv1i8( + %1, + %2, + i32 %4) + %a = call @llvm.riscv.vmsgt.mask.nxv1i8( + %0, + %2, + %3, + %mask, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmsgt.nxv2i8( + , + , + i32); + +define @intrinsic_vmsgt_vv_nxv2i8_nxv2i8( %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsgt_vv_nxv2i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,mf4,ta,mu +; CHECK-NEXT: vmslt.vv v0, v9, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgt.nxv2i8( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmsgt.mask.nxv2i8( + , + , + , + , + i32); + +define @intrinsic_vmsgt_mask_vv_nxv2i8_nxv2i8( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv2i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e8,mf4,ta,mu +; CHECK-NEXT: vmslt.vv v0, v9, v8 +; CHECK-NEXT: vsetvli a0, a0, e8,mf4,tu,mu +; CHECK-NEXT: vmslt.vv v25, v10, v9, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmsgt.nxv2i8( + %1, + %2, + i32 %4) + %a = call @llvm.riscv.vmsgt.mask.nxv2i8( + %0, + %2, + %3, + %mask, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmsgt.nxv4i8( + , + , + i32); + +define @intrinsic_vmsgt_vv_nxv4i8_nxv4i8( %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsgt_vv_nxv4i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,mf2,ta,mu +; CHECK-NEXT: vmslt.vv v0, v9, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgt.nxv4i8( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmsgt.mask.nxv4i8( + , + , + , + , + i32); + +define @intrinsic_vmsgt_mask_vv_nxv4i8_nxv4i8( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv4i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e8,mf2,ta,mu +; CHECK-NEXT: vmslt.vv v0, v9, v8 +; CHECK-NEXT: vsetvli a0, a0, e8,mf2,tu,mu +; CHECK-NEXT: vmslt.vv v25, v10, v9, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmsgt.nxv4i8( + %1, + %2, + i32 %4) + %a = call @llvm.riscv.vmsgt.mask.nxv4i8( + %0, + %2, + %3, + %mask, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmsgt.nxv8i8( + , + , + i32); + +define @intrinsic_vmsgt_vv_nxv8i8_nxv8i8( %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsgt_vv_nxv8i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,m1,ta,mu +; CHECK-NEXT: vmslt.vv v0, v9, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgt.nxv8i8( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmsgt.mask.nxv8i8( + , + , + , + , + i32); + +define @intrinsic_vmsgt_mask_vv_nxv8i8_nxv8i8( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv8i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e8,m1,ta,mu +; CHECK-NEXT: vmslt.vv v0, v9, v8 +; CHECK-NEXT: vsetvli a0, a0, e8,m1,tu,mu +; CHECK-NEXT: vmslt.vv v25, v10, v9, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmsgt.nxv8i8( + %1, + %2, + i32 %4) + %a = call @llvm.riscv.vmsgt.mask.nxv8i8( + %0, + %2, + %3, + %mask, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmsgt.nxv16i8( + , + , + i32); + +define @intrinsic_vmsgt_vv_nxv16i8_nxv16i8( %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsgt_vv_nxv16i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,m2,ta,mu +; CHECK-NEXT: vmslt.vv v0, v10, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgt.nxv16i8( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmsgt.mask.nxv16i8( + , + , + , + , + i32); + +define @intrinsic_vmsgt_mask_vv_nxv16i8_nxv16i8( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv16i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e8,m2,ta,mu +; CHECK-NEXT: vmslt.vv v0, v10, v8 +; CHECK-NEXT: vsetvli a0, a0, e8,m2,tu,mu +; CHECK-NEXT: vmslt.vv v25, v12, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmsgt.nxv16i8( + %1, + %2, + i32 %4) + %a = call @llvm.riscv.vmsgt.mask.nxv16i8( + %0, + %2, + %3, + %mask, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmsgt.nxv32i8( + , + , + i32); + +define @intrinsic_vmsgt_vv_nxv32i8_nxv32i8( %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsgt_vv_nxv32i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,m4,ta,mu +; CHECK-NEXT: vmslt.vv v0, v12, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgt.nxv32i8( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmsgt.mask.nxv32i8( + , + , + , + , + i32); + +define @intrinsic_vmsgt_mask_vv_nxv32i8_nxv32i8( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv32i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e8,m4,ta,mu +; CHECK-NEXT: vmslt.vv v0, v12, v8 +; CHECK-NEXT: vsetvli a0, a0, e8,m4,tu,mu +; CHECK-NEXT: vmslt.vv v25, v16, v12, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmsgt.nxv32i8( + %1, + %2, + i32 %4) + %a = call @llvm.riscv.vmsgt.mask.nxv32i8( + %0, + %2, + %3, + %mask, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmsgt.nxv1i16( + , + , + i32); + +define @intrinsic_vmsgt_vv_nxv1i16_nxv1i16( %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsgt_vv_nxv1i16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,mf4,ta,mu +; CHECK-NEXT: vmslt.vv v0, v9, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgt.nxv1i16( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmsgt.mask.nxv1i16( + , + , + , + , + i32); + +define @intrinsic_vmsgt_mask_vv_nxv1i16_nxv1i16( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv1i16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e16,mf4,ta,mu +; CHECK-NEXT: vmslt.vv v0, v9, v8 +; CHECK-NEXT: vsetvli a0, a0, e16,mf4,tu,mu +; CHECK-NEXT: vmslt.vv v25, v10, v9, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmsgt.nxv1i16( + %1, + %2, + i32 %4) + %a = call @llvm.riscv.vmsgt.mask.nxv1i16( + %0, + %2, + %3, + %mask, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmsgt.nxv2i16( + , + , + i32); + +define @intrinsic_vmsgt_vv_nxv2i16_nxv2i16( %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsgt_vv_nxv2i16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,mf2,ta,mu +; CHECK-NEXT: vmslt.vv v0, v9, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgt.nxv2i16( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmsgt.mask.nxv2i16( + , + , + , + , + i32); + +define @intrinsic_vmsgt_mask_vv_nxv2i16_nxv2i16( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv2i16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e16,mf2,ta,mu +; CHECK-NEXT: vmslt.vv v0, v9, v8 +; CHECK-NEXT: vsetvli a0, a0, e16,mf2,tu,mu +; CHECK-NEXT: vmslt.vv v25, v10, v9, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmsgt.nxv2i16( + %1, + %2, + i32 %4) + %a = call @llvm.riscv.vmsgt.mask.nxv2i16( + %0, + %2, + %3, + %mask, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmsgt.nxv4i16( + , + , + i32); + +define @intrinsic_vmsgt_vv_nxv4i16_nxv4i16( %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsgt_vv_nxv4i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m1,ta,mu +; CHECK-NEXT: vmslt.vv v0, v9, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgt.nxv4i16( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmsgt.mask.nxv4i16( + , + , + , + , + i32); + +define @intrinsic_vmsgt_mask_vv_nxv4i16_nxv4i16( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv4i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e16,m1,ta,mu +; CHECK-NEXT: vmslt.vv v0, v9, v8 +; CHECK-NEXT: vsetvli a0, a0, e16,m1,tu,mu +; CHECK-NEXT: vmslt.vv v25, v10, v9, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmsgt.nxv4i16( + %1, + %2, + i32 %4) + %a = call @llvm.riscv.vmsgt.mask.nxv4i16( + %0, + %2, + %3, + %mask, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmsgt.nxv8i16( + , + , + i32); + +define @intrinsic_vmsgt_vv_nxv8i16_nxv8i16( %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsgt_vv_nxv8i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m2,ta,mu +; CHECK-NEXT: vmslt.vv v0, v10, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgt.nxv8i16( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmsgt.mask.nxv8i16( + , + , + , + , + i32); + +define @intrinsic_vmsgt_mask_vv_nxv8i16_nxv8i16( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv8i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e16,m2,ta,mu +; CHECK-NEXT: vmslt.vv v0, v10, v8 +; CHECK-NEXT: vsetvli a0, a0, e16,m2,tu,mu +; CHECK-NEXT: vmslt.vv v25, v12, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmsgt.nxv8i16( + %1, + %2, + i32 %4) + %a = call @llvm.riscv.vmsgt.mask.nxv8i16( + %0, + %2, + %3, + %mask, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmsgt.nxv16i16( + , + , + i32); + +define @intrinsic_vmsgt_vv_nxv16i16_nxv16i16( %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsgt_vv_nxv16i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m4,ta,mu +; CHECK-NEXT: vmslt.vv v0, v12, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgt.nxv16i16( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmsgt.mask.nxv16i16( + , + , + , + , + i32); + +define @intrinsic_vmsgt_mask_vv_nxv16i16_nxv16i16( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv16i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e16,m4,ta,mu +; CHECK-NEXT: vmslt.vv v0, v12, v8 +; CHECK-NEXT: vsetvli a0, a0, e16,m4,tu,mu +; CHECK-NEXT: vmslt.vv v25, v16, v12, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmsgt.nxv16i16( + %1, + %2, + i32 %4) + %a = call @llvm.riscv.vmsgt.mask.nxv16i16( + %0, + %2, + %3, + %mask, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmsgt.nxv1i32( + , + , + i32); + +define @intrinsic_vmsgt_vv_nxv1i32_nxv1i32( %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsgt_vv_nxv1i32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,mf2,ta,mu +; CHECK-NEXT: vmslt.vv v0, v9, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgt.nxv1i32( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmsgt.mask.nxv1i32( + , + , + , + , + i32); + +define @intrinsic_vmsgt_mask_vv_nxv1i32_nxv1i32( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv1i32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e32,mf2,ta,mu +; CHECK-NEXT: vmslt.vv v0, v9, v8 +; CHECK-NEXT: vsetvli a0, a0, e32,mf2,tu,mu +; CHECK-NEXT: vmslt.vv v25, v10, v9, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmsgt.nxv1i32( + %1, + %2, + i32 %4) + %a = call @llvm.riscv.vmsgt.mask.nxv1i32( + %0, + %2, + %3, + %mask, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmsgt.nxv2i32( + , + , + i32); + +define @intrinsic_vmsgt_vv_nxv2i32_nxv2i32( %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsgt_vv_nxv2i32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m1,ta,mu +; CHECK-NEXT: vmslt.vv v0, v9, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgt.nxv2i32( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmsgt.mask.nxv2i32( + , + , + , + , + i32); + +define @intrinsic_vmsgt_mask_vv_nxv2i32_nxv2i32( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv2i32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e32,m1,ta,mu +; CHECK-NEXT: vmslt.vv v0, v9, v8 +; CHECK-NEXT: vsetvli a0, a0, e32,m1,tu,mu +; CHECK-NEXT: vmslt.vv v25, v10, v9, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmsgt.nxv2i32( + %1, + %2, + i32 %4) + %a = call @llvm.riscv.vmsgt.mask.nxv2i32( + %0, + %2, + %3, + %mask, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmsgt.nxv4i32( + , + , + i32); + +define @intrinsic_vmsgt_vv_nxv4i32_nxv4i32( %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsgt_vv_nxv4i32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m2,ta,mu +; CHECK-NEXT: vmslt.vv v0, v10, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgt.nxv4i32( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmsgt.mask.nxv4i32( + , + , + , + , + i32); + +define @intrinsic_vmsgt_mask_vv_nxv4i32_nxv4i32( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv4i32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e32,m2,ta,mu +; CHECK-NEXT: vmslt.vv v0, v10, v8 +; CHECK-NEXT: vsetvli a0, a0, e32,m2,tu,mu +; CHECK-NEXT: vmslt.vv v25, v12, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmsgt.nxv4i32( + %1, + %2, + i32 %4) + %a = call @llvm.riscv.vmsgt.mask.nxv4i32( + %0, + %2, + %3, + %mask, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmsgt.nxv8i32( + , + , + i32); + +define @intrinsic_vmsgt_vv_nxv8i32_nxv8i32( %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsgt_vv_nxv8i32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m4,ta,mu +; CHECK-NEXT: vmslt.vv v0, v12, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgt.nxv8i32( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmsgt.mask.nxv8i32( + , + , + , + , + i32); + +define @intrinsic_vmsgt_mask_vv_nxv8i32_nxv8i32( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv8i32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e32,m4,ta,mu +; CHECK-NEXT: vmslt.vv v0, v12, v8 +; CHECK-NEXT: vsetvli a0, a0, e32,m4,tu,mu +; CHECK-NEXT: vmslt.vv v25, v16, v12, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmsgt.nxv8i32( + %1, + %2, + i32 %4) + %a = call @llvm.riscv.vmsgt.mask.nxv8i32( + %0, + %2, + %3, + %mask, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmsgt.nxv1i64( + , + , + i32); + +define @intrinsic_vmsgt_vv_nxv1i64_nxv1i64( %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsgt_vv_nxv1i64_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e64,m1,ta,mu +; CHECK-NEXT: vmslt.vv v0, v9, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgt.nxv1i64( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmsgt.mask.nxv1i64( + , + , + , + , + i32); + +define @intrinsic_vmsgt_mask_vv_nxv1i64_nxv1i64( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv1i64_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e64,m1,ta,mu +; CHECK-NEXT: vmslt.vv v0, v9, v8 +; CHECK-NEXT: vsetvli a0, a0, e64,m1,tu,mu +; CHECK-NEXT: vmslt.vv v25, v10, v9, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmsgt.nxv1i64( + %1, + %2, + i32 %4) + %a = call @llvm.riscv.vmsgt.mask.nxv1i64( + %0, + %2, + %3, + %mask, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmsgt.nxv2i64( + , + , + i32); + +define @intrinsic_vmsgt_vv_nxv2i64_nxv2i64( %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsgt_vv_nxv2i64_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e64,m2,ta,mu +; CHECK-NEXT: vmslt.vv v0, v10, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgt.nxv2i64( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmsgt.mask.nxv2i64( + , + , + , + , + i32); + +define @intrinsic_vmsgt_mask_vv_nxv2i64_nxv2i64( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv2i64_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e64,m2,ta,mu +; CHECK-NEXT: vmslt.vv v0, v10, v8 +; CHECK-NEXT: vsetvli a0, a0, e64,m2,tu,mu +; CHECK-NEXT: vmslt.vv v25, v12, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmsgt.nxv2i64( + %1, + %2, + i32 %4) + %a = call @llvm.riscv.vmsgt.mask.nxv2i64( + %0, + %2, + %3, + %mask, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmsgt.nxv4i64( + , + , + i32); + +define @intrinsic_vmsgt_vv_nxv4i64_nxv4i64( %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsgt_vv_nxv4i64_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e64,m4,ta,mu +; CHECK-NEXT: vmslt.vv v0, v12, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgt.nxv4i64( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmsgt.mask.nxv4i64( + , + , + , + , + i32); + +define @intrinsic_vmsgt_mask_vv_nxv4i64_nxv4i64( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv4i64_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e64,m4,ta,mu +; CHECK-NEXT: vmslt.vv v0, v12, v8 +; CHECK-NEXT: vsetvli a0, a0, e64,m4,tu,mu +; CHECK-NEXT: vmslt.vv v25, v16, v12, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmsgt.nxv4i64( + %1, + %2, + i32 %4) + %a = call @llvm.riscv.vmsgt.mask.nxv4i64( + %0, + %2, + %3, + %mask, + i32 %4) + + ret %a +} + declare @llvm.riscv.vmsgt.nxv1i8.i8( , i8, diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsgt-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmsgt-rv64.ll index fcbb58f63d96..e03678a69557 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vmsgt-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmsgt-rv64.ll @@ -1,6 +1,942 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs \ ; RUN: --riscv-no-aliases < %s | FileCheck %s +declare @llvm.riscv.vmsgt.nxv1i8( + , + , + i64); + +define @intrinsic_vmsgt_vv_nxv1i8_nxv1i8( %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsgt_vv_nxv1i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,mf8,ta,mu +; CHECK-NEXT: vmslt.vv v0, v9, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgt.nxv1i8( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmsgt.mask.nxv1i8( + , + , + , + , + i64); + +define @intrinsic_vmsgt_mask_vv_nxv1i8_nxv1i8( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv1i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e8,mf8,ta,mu +; CHECK-NEXT: vmslt.vv v0, v9, v8 +; CHECK-NEXT: vsetvli a0, a0, e8,mf8,tu,mu +; CHECK-NEXT: vmslt.vv v25, v10, v9, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmsgt.nxv1i8( + %1, + %2, + i64 %4) + %a = call @llvm.riscv.vmsgt.mask.nxv1i8( + %0, + %2, + %3, + %mask, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmsgt.nxv2i8( + , + , + i64); + +define @intrinsic_vmsgt_vv_nxv2i8_nxv2i8( %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsgt_vv_nxv2i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,mf4,ta,mu +; CHECK-NEXT: vmslt.vv v0, v9, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgt.nxv2i8( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmsgt.mask.nxv2i8( + , + , + , + , + i64); + +define @intrinsic_vmsgt_mask_vv_nxv2i8_nxv2i8( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv2i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e8,mf4,ta,mu +; CHECK-NEXT: vmslt.vv v0, v9, v8 +; CHECK-NEXT: vsetvli a0, a0, e8,mf4,tu,mu +; CHECK-NEXT: vmslt.vv v25, v10, v9, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmsgt.nxv2i8( + %1, + %2, + i64 %4) + %a = call @llvm.riscv.vmsgt.mask.nxv2i8( + %0, + %2, + %3, + %mask, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmsgt.nxv4i8( + , + , + i64); + +define @intrinsic_vmsgt_vv_nxv4i8_nxv4i8( %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsgt_vv_nxv4i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,mf2,ta,mu +; CHECK-NEXT: vmslt.vv v0, v9, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgt.nxv4i8( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmsgt.mask.nxv4i8( + , + , + , + , + i64); + +define @intrinsic_vmsgt_mask_vv_nxv4i8_nxv4i8( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv4i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e8,mf2,ta,mu +; CHECK-NEXT: vmslt.vv v0, v9, v8 +; CHECK-NEXT: vsetvli a0, a0, e8,mf2,tu,mu +; CHECK-NEXT: vmslt.vv v25, v10, v9, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmsgt.nxv4i8( + %1, + %2, + i64 %4) + %a = call @llvm.riscv.vmsgt.mask.nxv4i8( + %0, + %2, + %3, + %mask, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmsgt.nxv8i8( + , + , + i64); + +define @intrinsic_vmsgt_vv_nxv8i8_nxv8i8( %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsgt_vv_nxv8i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,m1,ta,mu +; CHECK-NEXT: vmslt.vv v0, v9, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgt.nxv8i8( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmsgt.mask.nxv8i8( + , + , + , + , + i64); + +define @intrinsic_vmsgt_mask_vv_nxv8i8_nxv8i8( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv8i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e8,m1,ta,mu +; CHECK-NEXT: vmslt.vv v0, v9, v8 +; CHECK-NEXT: vsetvli a0, a0, e8,m1,tu,mu +; CHECK-NEXT: vmslt.vv v25, v10, v9, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmsgt.nxv8i8( + %1, + %2, + i64 %4) + %a = call @llvm.riscv.vmsgt.mask.nxv8i8( + %0, + %2, + %3, + %mask, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmsgt.nxv16i8( + , + , + i64); + +define @intrinsic_vmsgt_vv_nxv16i8_nxv16i8( %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsgt_vv_nxv16i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,m2,ta,mu +; CHECK-NEXT: vmslt.vv v0, v10, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgt.nxv16i8( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmsgt.mask.nxv16i8( + , + , + , + , + i64); + +define @intrinsic_vmsgt_mask_vv_nxv16i8_nxv16i8( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv16i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e8,m2,ta,mu +; CHECK-NEXT: vmslt.vv v0, v10, v8 +; CHECK-NEXT: vsetvli a0, a0, e8,m2,tu,mu +; CHECK-NEXT: vmslt.vv v25, v12, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmsgt.nxv16i8( + %1, + %2, + i64 %4) + %a = call @llvm.riscv.vmsgt.mask.nxv16i8( + %0, + %2, + %3, + %mask, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmsgt.nxv32i8( + , + , + i64); + +define @intrinsic_vmsgt_vv_nxv32i8_nxv32i8( %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsgt_vv_nxv32i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,m4,ta,mu +; CHECK-NEXT: vmslt.vv v0, v12, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgt.nxv32i8( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmsgt.mask.nxv32i8( + , + , + , + , + i64); + +define @intrinsic_vmsgt_mask_vv_nxv32i8_nxv32i8( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv32i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e8,m4,ta,mu +; CHECK-NEXT: vmslt.vv v0, v12, v8 +; CHECK-NEXT: vsetvli a0, a0, e8,m4,tu,mu +; CHECK-NEXT: vmslt.vv v25, v16, v12, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmsgt.nxv32i8( + %1, + %2, + i64 %4) + %a = call @llvm.riscv.vmsgt.mask.nxv32i8( + %0, + %2, + %3, + %mask, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmsgt.nxv1i16( + , + , + i64); + +define @intrinsic_vmsgt_vv_nxv1i16_nxv1i16( %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsgt_vv_nxv1i16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,mf4,ta,mu +; CHECK-NEXT: vmslt.vv v0, v9, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgt.nxv1i16( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmsgt.mask.nxv1i16( + , + , + , + , + i64); + +define @intrinsic_vmsgt_mask_vv_nxv1i16_nxv1i16( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv1i16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e16,mf4,ta,mu +; CHECK-NEXT: vmslt.vv v0, v9, v8 +; CHECK-NEXT: vsetvli a0, a0, e16,mf4,tu,mu +; CHECK-NEXT: vmslt.vv v25, v10, v9, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmsgt.nxv1i16( + %1, + %2, + i64 %4) + %a = call @llvm.riscv.vmsgt.mask.nxv1i16( + %0, + %2, + %3, + %mask, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmsgt.nxv2i16( + , + , + i64); + +define @intrinsic_vmsgt_vv_nxv2i16_nxv2i16( %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsgt_vv_nxv2i16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,mf2,ta,mu +; CHECK-NEXT: vmslt.vv v0, v9, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgt.nxv2i16( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmsgt.mask.nxv2i16( + , + , + , + , + i64); + +define @intrinsic_vmsgt_mask_vv_nxv2i16_nxv2i16( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv2i16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e16,mf2,ta,mu +; CHECK-NEXT: vmslt.vv v0, v9, v8 +; CHECK-NEXT: vsetvli a0, a0, e16,mf2,tu,mu +; CHECK-NEXT: vmslt.vv v25, v10, v9, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmsgt.nxv2i16( + %1, + %2, + i64 %4) + %a = call @llvm.riscv.vmsgt.mask.nxv2i16( + %0, + %2, + %3, + %mask, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmsgt.nxv4i16( + , + , + i64); + +define @intrinsic_vmsgt_vv_nxv4i16_nxv4i16( %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsgt_vv_nxv4i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m1,ta,mu +; CHECK-NEXT: vmslt.vv v0, v9, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgt.nxv4i16( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmsgt.mask.nxv4i16( + , + , + , + , + i64); + +define @intrinsic_vmsgt_mask_vv_nxv4i16_nxv4i16( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv4i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e16,m1,ta,mu +; CHECK-NEXT: vmslt.vv v0, v9, v8 +; CHECK-NEXT: vsetvli a0, a0, e16,m1,tu,mu +; CHECK-NEXT: vmslt.vv v25, v10, v9, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmsgt.nxv4i16( + %1, + %2, + i64 %4) + %a = call @llvm.riscv.vmsgt.mask.nxv4i16( + %0, + %2, + %3, + %mask, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmsgt.nxv8i16( + , + , + i64); + +define @intrinsic_vmsgt_vv_nxv8i16_nxv8i16( %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsgt_vv_nxv8i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m2,ta,mu +; CHECK-NEXT: vmslt.vv v0, v10, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgt.nxv8i16( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmsgt.mask.nxv8i16( + , + , + , + , + i64); + +define @intrinsic_vmsgt_mask_vv_nxv8i16_nxv8i16( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv8i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e16,m2,ta,mu +; CHECK-NEXT: vmslt.vv v0, v10, v8 +; CHECK-NEXT: vsetvli a0, a0, e16,m2,tu,mu +; CHECK-NEXT: vmslt.vv v25, v12, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmsgt.nxv8i16( + %1, + %2, + i64 %4) + %a = call @llvm.riscv.vmsgt.mask.nxv8i16( + %0, + %2, + %3, + %mask, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmsgt.nxv16i16( + , + , + i64); + +define @intrinsic_vmsgt_vv_nxv16i16_nxv16i16( %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsgt_vv_nxv16i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m4,ta,mu +; CHECK-NEXT: vmslt.vv v0, v12, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgt.nxv16i16( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmsgt.mask.nxv16i16( + , + , + , + , + i64); + +define @intrinsic_vmsgt_mask_vv_nxv16i16_nxv16i16( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv16i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e16,m4,ta,mu +; CHECK-NEXT: vmslt.vv v0, v12, v8 +; CHECK-NEXT: vsetvli a0, a0, e16,m4,tu,mu +; CHECK-NEXT: vmslt.vv v25, v16, v12, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmsgt.nxv16i16( + %1, + %2, + i64 %4) + %a = call @llvm.riscv.vmsgt.mask.nxv16i16( + %0, + %2, + %3, + %mask, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmsgt.nxv1i32( + , + , + i64); + +define @intrinsic_vmsgt_vv_nxv1i32_nxv1i32( %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsgt_vv_nxv1i32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,mf2,ta,mu +; CHECK-NEXT: vmslt.vv v0, v9, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgt.nxv1i32( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmsgt.mask.nxv1i32( + , + , + , + , + i64); + +define @intrinsic_vmsgt_mask_vv_nxv1i32_nxv1i32( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv1i32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e32,mf2,ta,mu +; CHECK-NEXT: vmslt.vv v0, v9, v8 +; CHECK-NEXT: vsetvli a0, a0, e32,mf2,tu,mu +; CHECK-NEXT: vmslt.vv v25, v10, v9, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmsgt.nxv1i32( + %1, + %2, + i64 %4) + %a = call @llvm.riscv.vmsgt.mask.nxv1i32( + %0, + %2, + %3, + %mask, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmsgt.nxv2i32( + , + , + i64); + +define @intrinsic_vmsgt_vv_nxv2i32_nxv2i32( %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsgt_vv_nxv2i32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m1,ta,mu +; CHECK-NEXT: vmslt.vv v0, v9, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgt.nxv2i32( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmsgt.mask.nxv2i32( + , + , + , + , + i64); + +define @intrinsic_vmsgt_mask_vv_nxv2i32_nxv2i32( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv2i32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e32,m1,ta,mu +; CHECK-NEXT: vmslt.vv v0, v9, v8 +; CHECK-NEXT: vsetvli a0, a0, e32,m1,tu,mu +; CHECK-NEXT: vmslt.vv v25, v10, v9, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmsgt.nxv2i32( + %1, + %2, + i64 %4) + %a = call @llvm.riscv.vmsgt.mask.nxv2i32( + %0, + %2, + %3, + %mask, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmsgt.nxv4i32( + , + , + i64); + +define @intrinsic_vmsgt_vv_nxv4i32_nxv4i32( %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsgt_vv_nxv4i32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m2,ta,mu +; CHECK-NEXT: vmslt.vv v0, v10, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgt.nxv4i32( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmsgt.mask.nxv4i32( + , + , + , + , + i64); + +define @intrinsic_vmsgt_mask_vv_nxv4i32_nxv4i32( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv4i32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e32,m2,ta,mu +; CHECK-NEXT: vmslt.vv v0, v10, v8 +; CHECK-NEXT: vsetvli a0, a0, e32,m2,tu,mu +; CHECK-NEXT: vmslt.vv v25, v12, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmsgt.nxv4i32( + %1, + %2, + i64 %4) + %a = call @llvm.riscv.vmsgt.mask.nxv4i32( + %0, + %2, + %3, + %mask, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmsgt.nxv8i32( + , + , + i64); + +define @intrinsic_vmsgt_vv_nxv8i32_nxv8i32( %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsgt_vv_nxv8i32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m4,ta,mu +; CHECK-NEXT: vmslt.vv v0, v12, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgt.nxv8i32( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmsgt.mask.nxv8i32( + , + , + , + , + i64); + +define @intrinsic_vmsgt_mask_vv_nxv8i32_nxv8i32( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv8i32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e32,m4,ta,mu +; CHECK-NEXT: vmslt.vv v0, v12, v8 +; CHECK-NEXT: vsetvli a0, a0, e32,m4,tu,mu +; CHECK-NEXT: vmslt.vv v25, v16, v12, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmsgt.nxv8i32( + %1, + %2, + i64 %4) + %a = call @llvm.riscv.vmsgt.mask.nxv8i32( + %0, + %2, + %3, + %mask, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmsgt.nxv1i64( + , + , + i64); + +define @intrinsic_vmsgt_vv_nxv1i64_nxv1i64( %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsgt_vv_nxv1i64_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e64,m1,ta,mu +; CHECK-NEXT: vmslt.vv v0, v9, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgt.nxv1i64( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmsgt.mask.nxv1i64( + , + , + , + , + i64); + +define @intrinsic_vmsgt_mask_vv_nxv1i64_nxv1i64( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv1i64_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e64,m1,ta,mu +; CHECK-NEXT: vmslt.vv v0, v9, v8 +; CHECK-NEXT: vsetvli a0, a0, e64,m1,tu,mu +; CHECK-NEXT: vmslt.vv v25, v10, v9, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmsgt.nxv1i64( + %1, + %2, + i64 %4) + %a = call @llvm.riscv.vmsgt.mask.nxv1i64( + %0, + %2, + %3, + %mask, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmsgt.nxv2i64( + , + , + i64); + +define @intrinsic_vmsgt_vv_nxv2i64_nxv2i64( %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsgt_vv_nxv2i64_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e64,m2,ta,mu +; CHECK-NEXT: vmslt.vv v0, v10, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgt.nxv2i64( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmsgt.mask.nxv2i64( + , + , + , + , + i64); + +define @intrinsic_vmsgt_mask_vv_nxv2i64_nxv2i64( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv2i64_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e64,m2,ta,mu +; CHECK-NEXT: vmslt.vv v0, v10, v8 +; CHECK-NEXT: vsetvli a0, a0, e64,m2,tu,mu +; CHECK-NEXT: vmslt.vv v25, v12, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmsgt.nxv2i64( + %1, + %2, + i64 %4) + %a = call @llvm.riscv.vmsgt.mask.nxv2i64( + %0, + %2, + %3, + %mask, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmsgt.nxv4i64( + , + , + i64); + +define @intrinsic_vmsgt_vv_nxv4i64_nxv4i64( %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsgt_vv_nxv4i64_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e64,m4,ta,mu +; CHECK-NEXT: vmslt.vv v0, v12, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgt.nxv4i64( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmsgt.mask.nxv4i64( + , + , + , + , + i64); + +define @intrinsic_vmsgt_mask_vv_nxv4i64_nxv4i64( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv4i64_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e64,m4,ta,mu +; CHECK-NEXT: vmslt.vv v0, v12, v8 +; CHECK-NEXT: vsetvli a0, a0, e64,m4,tu,mu +; CHECK-NEXT: vmslt.vv v25, v16, v12, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmsgt.nxv4i64( + %1, + %2, + i64 %4) + %a = call @llvm.riscv.vmsgt.mask.nxv4i64( + %0, + %2, + %3, + %mask, + i64 %4) + + ret %a +} + declare @llvm.riscv.vmsgt.nxv1i8.i8( , i8, diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsgtu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmsgtu-rv32.ll index 4404f007db95..839722ce6c24 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vmsgtu-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmsgtu-rv32.ll @@ -1,6 +1,942 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs \ ; RUN: --riscv-no-aliases < %s | FileCheck %s +declare @llvm.riscv.vmsgtu.nxv1i8( + , + , + i32); + +define @intrinsic_vmsgtu_vv_nxv1i8_nxv1i8( %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsgtu_vv_nxv1i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,mf8,ta,mu +; CHECK-NEXT: vmsltu.vv v0, v9, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgtu.nxv1i8( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmsgtu.mask.nxv1i8( + , + , + , + , + i32); + +define @intrinsic_vmsgtu_mask_vv_nxv1i8_nxv1i8( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv1i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e8,mf8,ta,mu +; CHECK-NEXT: vmsltu.vv v0, v9, v8 +; CHECK-NEXT: vsetvli a0, a0, e8,mf8,tu,mu +; CHECK-NEXT: vmsltu.vv v25, v10, v9, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmsgtu.nxv1i8( + %1, + %2, + i32 %4) + %a = call @llvm.riscv.vmsgtu.mask.nxv1i8( + %0, + %2, + %3, + %mask, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmsgtu.nxv2i8( + , + , + i32); + +define @intrinsic_vmsgtu_vv_nxv2i8_nxv2i8( %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsgtu_vv_nxv2i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,mf4,ta,mu +; CHECK-NEXT: vmsltu.vv v0, v9, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgtu.nxv2i8( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmsgtu.mask.nxv2i8( + , + , + , + , + i32); + +define @intrinsic_vmsgtu_mask_vv_nxv2i8_nxv2i8( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv2i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e8,mf4,ta,mu +; CHECK-NEXT: vmsltu.vv v0, v9, v8 +; CHECK-NEXT: vsetvli a0, a0, e8,mf4,tu,mu +; CHECK-NEXT: vmsltu.vv v25, v10, v9, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmsgtu.nxv2i8( + %1, + %2, + i32 %4) + %a = call @llvm.riscv.vmsgtu.mask.nxv2i8( + %0, + %2, + %3, + %mask, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmsgtu.nxv4i8( + , + , + i32); + +define @intrinsic_vmsgtu_vv_nxv4i8_nxv4i8( %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsgtu_vv_nxv4i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,mf2,ta,mu +; CHECK-NEXT: vmsltu.vv v0, v9, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgtu.nxv4i8( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmsgtu.mask.nxv4i8( + , + , + , + , + i32); + +define @intrinsic_vmsgtu_mask_vv_nxv4i8_nxv4i8( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv4i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e8,mf2,ta,mu +; CHECK-NEXT: vmsltu.vv v0, v9, v8 +; CHECK-NEXT: vsetvli a0, a0, e8,mf2,tu,mu +; CHECK-NEXT: vmsltu.vv v25, v10, v9, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmsgtu.nxv4i8( + %1, + %2, + i32 %4) + %a = call @llvm.riscv.vmsgtu.mask.nxv4i8( + %0, + %2, + %3, + %mask, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmsgtu.nxv8i8( + , + , + i32); + +define @intrinsic_vmsgtu_vv_nxv8i8_nxv8i8( %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsgtu_vv_nxv8i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,m1,ta,mu +; CHECK-NEXT: vmsltu.vv v0, v9, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgtu.nxv8i8( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmsgtu.mask.nxv8i8( + , + , + , + , + i32); + +define @intrinsic_vmsgtu_mask_vv_nxv8i8_nxv8i8( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv8i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e8,m1,ta,mu +; CHECK-NEXT: vmsltu.vv v0, v9, v8 +; CHECK-NEXT: vsetvli a0, a0, e8,m1,tu,mu +; CHECK-NEXT: vmsltu.vv v25, v10, v9, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmsgtu.nxv8i8( + %1, + %2, + i32 %4) + %a = call @llvm.riscv.vmsgtu.mask.nxv8i8( + %0, + %2, + %3, + %mask, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmsgtu.nxv16i8( + , + , + i32); + +define @intrinsic_vmsgtu_vv_nxv16i8_nxv16i8( %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsgtu_vv_nxv16i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,m2,ta,mu +; CHECK-NEXT: vmsltu.vv v0, v10, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgtu.nxv16i8( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmsgtu.mask.nxv16i8( + , + , + , + , + i32); + +define @intrinsic_vmsgtu_mask_vv_nxv16i8_nxv16i8( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv16i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e8,m2,ta,mu +; CHECK-NEXT: vmsltu.vv v0, v10, v8 +; CHECK-NEXT: vsetvli a0, a0, e8,m2,tu,mu +; CHECK-NEXT: vmsltu.vv v25, v12, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmsgtu.nxv16i8( + %1, + %2, + i32 %4) + %a = call @llvm.riscv.vmsgtu.mask.nxv16i8( + %0, + %2, + %3, + %mask, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmsgtu.nxv32i8( + , + , + i32); + +define @intrinsic_vmsgtu_vv_nxv32i8_nxv32i8( %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsgtu_vv_nxv32i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,m4,ta,mu +; CHECK-NEXT: vmsltu.vv v0, v12, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgtu.nxv32i8( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmsgtu.mask.nxv32i8( + , + , + , + , + i32); + +define @intrinsic_vmsgtu_mask_vv_nxv32i8_nxv32i8( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv32i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e8,m4,ta,mu +; CHECK-NEXT: vmsltu.vv v0, v12, v8 +; CHECK-NEXT: vsetvli a0, a0, e8,m4,tu,mu +; CHECK-NEXT: vmsltu.vv v25, v16, v12, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmsgtu.nxv32i8( + %1, + %2, + i32 %4) + %a = call @llvm.riscv.vmsgtu.mask.nxv32i8( + %0, + %2, + %3, + %mask, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmsgtu.nxv1i16( + , + , + i32); + +define @intrinsic_vmsgtu_vv_nxv1i16_nxv1i16( %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsgtu_vv_nxv1i16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,mf4,ta,mu +; CHECK-NEXT: vmsltu.vv v0, v9, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgtu.nxv1i16( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmsgtu.mask.nxv1i16( + , + , + , + , + i32); + +define @intrinsic_vmsgtu_mask_vv_nxv1i16_nxv1i16( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv1i16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e16,mf4,ta,mu +; CHECK-NEXT: vmsltu.vv v0, v9, v8 +; CHECK-NEXT: vsetvli a0, a0, e16,mf4,tu,mu +; CHECK-NEXT: vmsltu.vv v25, v10, v9, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmsgtu.nxv1i16( + %1, + %2, + i32 %4) + %a = call @llvm.riscv.vmsgtu.mask.nxv1i16( + %0, + %2, + %3, + %mask, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmsgtu.nxv2i16( + , + , + i32); + +define @intrinsic_vmsgtu_vv_nxv2i16_nxv2i16( %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsgtu_vv_nxv2i16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,mf2,ta,mu +; CHECK-NEXT: vmsltu.vv v0, v9, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgtu.nxv2i16( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmsgtu.mask.nxv2i16( + , + , + , + , + i32); + +define @intrinsic_vmsgtu_mask_vv_nxv2i16_nxv2i16( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv2i16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e16,mf2,ta,mu +; CHECK-NEXT: vmsltu.vv v0, v9, v8 +; CHECK-NEXT: vsetvli a0, a0, e16,mf2,tu,mu +; CHECK-NEXT: vmsltu.vv v25, v10, v9, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmsgtu.nxv2i16( + %1, + %2, + i32 %4) + %a = call @llvm.riscv.vmsgtu.mask.nxv2i16( + %0, + %2, + %3, + %mask, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmsgtu.nxv4i16( + , + , + i32); + +define @intrinsic_vmsgtu_vv_nxv4i16_nxv4i16( %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsgtu_vv_nxv4i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m1,ta,mu +; CHECK-NEXT: vmsltu.vv v0, v9, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgtu.nxv4i16( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmsgtu.mask.nxv4i16( + , + , + , + , + i32); + +define @intrinsic_vmsgtu_mask_vv_nxv4i16_nxv4i16( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv4i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e16,m1,ta,mu +; CHECK-NEXT: vmsltu.vv v0, v9, v8 +; CHECK-NEXT: vsetvli a0, a0, e16,m1,tu,mu +; CHECK-NEXT: vmsltu.vv v25, v10, v9, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmsgtu.nxv4i16( + %1, + %2, + i32 %4) + %a = call @llvm.riscv.vmsgtu.mask.nxv4i16( + %0, + %2, + %3, + %mask, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmsgtu.nxv8i16( + , + , + i32); + +define @intrinsic_vmsgtu_vv_nxv8i16_nxv8i16( %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsgtu_vv_nxv8i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m2,ta,mu +; CHECK-NEXT: vmsltu.vv v0, v10, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgtu.nxv8i16( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmsgtu.mask.nxv8i16( + , + , + , + , + i32); + +define @intrinsic_vmsgtu_mask_vv_nxv8i16_nxv8i16( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv8i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e16,m2,ta,mu +; CHECK-NEXT: vmsltu.vv v0, v10, v8 +; CHECK-NEXT: vsetvli a0, a0, e16,m2,tu,mu +; CHECK-NEXT: vmsltu.vv v25, v12, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmsgtu.nxv8i16( + %1, + %2, + i32 %4) + %a = call @llvm.riscv.vmsgtu.mask.nxv8i16( + %0, + %2, + %3, + %mask, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmsgtu.nxv16i16( + , + , + i32); + +define @intrinsic_vmsgtu_vv_nxv16i16_nxv16i16( %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsgtu_vv_nxv16i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m4,ta,mu +; CHECK-NEXT: vmsltu.vv v0, v12, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgtu.nxv16i16( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmsgtu.mask.nxv16i16( + , + , + , + , + i32); + +define @intrinsic_vmsgtu_mask_vv_nxv16i16_nxv16i16( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv16i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e16,m4,ta,mu +; CHECK-NEXT: vmsltu.vv v0, v12, v8 +; CHECK-NEXT: vsetvli a0, a0, e16,m4,tu,mu +; CHECK-NEXT: vmsltu.vv v25, v16, v12, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmsgtu.nxv16i16( + %1, + %2, + i32 %4) + %a = call @llvm.riscv.vmsgtu.mask.nxv16i16( + %0, + %2, + %3, + %mask, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmsgtu.nxv1i32( + , + , + i32); + +define @intrinsic_vmsgtu_vv_nxv1i32_nxv1i32( %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsgtu_vv_nxv1i32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,mf2,ta,mu +; CHECK-NEXT: vmsltu.vv v0, v9, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgtu.nxv1i32( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmsgtu.mask.nxv1i32( + , + , + , + , + i32); + +define @intrinsic_vmsgtu_mask_vv_nxv1i32_nxv1i32( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv1i32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e32,mf2,ta,mu +; CHECK-NEXT: vmsltu.vv v0, v9, v8 +; CHECK-NEXT: vsetvli a0, a0, e32,mf2,tu,mu +; CHECK-NEXT: vmsltu.vv v25, v10, v9, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmsgtu.nxv1i32( + %1, + %2, + i32 %4) + %a = call @llvm.riscv.vmsgtu.mask.nxv1i32( + %0, + %2, + %3, + %mask, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmsgtu.nxv2i32( + , + , + i32); + +define @intrinsic_vmsgtu_vv_nxv2i32_nxv2i32( %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsgtu_vv_nxv2i32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m1,ta,mu +; CHECK-NEXT: vmsltu.vv v0, v9, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgtu.nxv2i32( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmsgtu.mask.nxv2i32( + , + , + , + , + i32); + +define @intrinsic_vmsgtu_mask_vv_nxv2i32_nxv2i32( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv2i32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e32,m1,ta,mu +; CHECK-NEXT: vmsltu.vv v0, v9, v8 +; CHECK-NEXT: vsetvli a0, a0, e32,m1,tu,mu +; CHECK-NEXT: vmsltu.vv v25, v10, v9, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmsgtu.nxv2i32( + %1, + %2, + i32 %4) + %a = call @llvm.riscv.vmsgtu.mask.nxv2i32( + %0, + %2, + %3, + %mask, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmsgtu.nxv4i32( + , + , + i32); + +define @intrinsic_vmsgtu_vv_nxv4i32_nxv4i32( %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsgtu_vv_nxv4i32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m2,ta,mu +; CHECK-NEXT: vmsltu.vv v0, v10, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgtu.nxv4i32( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmsgtu.mask.nxv4i32( + , + , + , + , + i32); + +define @intrinsic_vmsgtu_mask_vv_nxv4i32_nxv4i32( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv4i32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e32,m2,ta,mu +; CHECK-NEXT: vmsltu.vv v0, v10, v8 +; CHECK-NEXT: vsetvli a0, a0, e32,m2,tu,mu +; CHECK-NEXT: vmsltu.vv v25, v12, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmsgtu.nxv4i32( + %1, + %2, + i32 %4) + %a = call @llvm.riscv.vmsgtu.mask.nxv4i32( + %0, + %2, + %3, + %mask, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmsgtu.nxv8i32( + , + , + i32); + +define @intrinsic_vmsgtu_vv_nxv8i32_nxv8i32( %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsgtu_vv_nxv8i32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m4,ta,mu +; CHECK-NEXT: vmsltu.vv v0, v12, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgtu.nxv8i32( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmsgtu.mask.nxv8i32( + , + , + , + , + i32); + +define @intrinsic_vmsgtu_mask_vv_nxv8i32_nxv8i32( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv8i32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e32,m4,ta,mu +; CHECK-NEXT: vmsltu.vv v0, v12, v8 +; CHECK-NEXT: vsetvli a0, a0, e32,m4,tu,mu +; CHECK-NEXT: vmsltu.vv v25, v16, v12, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmsgtu.nxv8i32( + %1, + %2, + i32 %4) + %a = call @llvm.riscv.vmsgtu.mask.nxv8i32( + %0, + %2, + %3, + %mask, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmsgtu.nxv1i64( + , + , + i32); + +define @intrinsic_vmsgtu_vv_nxv1i64_nxv1i64( %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsgtu_vv_nxv1i64_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e64,m1,ta,mu +; CHECK-NEXT: vmsltu.vv v0, v9, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgtu.nxv1i64( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmsgtu.mask.nxv1i64( + , + , + , + , + i32); + +define @intrinsic_vmsgtu_mask_vv_nxv1i64_nxv1i64( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv1i64_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e64,m1,ta,mu +; CHECK-NEXT: vmsltu.vv v0, v9, v8 +; CHECK-NEXT: vsetvli a0, a0, e64,m1,tu,mu +; CHECK-NEXT: vmsltu.vv v25, v10, v9, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmsgtu.nxv1i64( + %1, + %2, + i32 %4) + %a = call @llvm.riscv.vmsgtu.mask.nxv1i64( + %0, + %2, + %3, + %mask, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmsgtu.nxv2i64( + , + , + i32); + +define @intrinsic_vmsgtu_vv_nxv2i64_nxv2i64( %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsgtu_vv_nxv2i64_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e64,m2,ta,mu +; CHECK-NEXT: vmsltu.vv v0, v10, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgtu.nxv2i64( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmsgtu.mask.nxv2i64( + , + , + , + , + i32); + +define @intrinsic_vmsgtu_mask_vv_nxv2i64_nxv2i64( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv2i64_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e64,m2,ta,mu +; CHECK-NEXT: vmsltu.vv v0, v10, v8 +; CHECK-NEXT: vsetvli a0, a0, e64,m2,tu,mu +; CHECK-NEXT: vmsltu.vv v25, v12, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmsgtu.nxv2i64( + %1, + %2, + i32 %4) + %a = call @llvm.riscv.vmsgtu.mask.nxv2i64( + %0, + %2, + %3, + %mask, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmsgtu.nxv4i64( + , + , + i32); + +define @intrinsic_vmsgtu_vv_nxv4i64_nxv4i64( %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsgtu_vv_nxv4i64_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e64,m4,ta,mu +; CHECK-NEXT: vmsltu.vv v0, v12, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgtu.nxv4i64( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmsgtu.mask.nxv4i64( + , + , + , + , + i32); + +define @intrinsic_vmsgtu_mask_vv_nxv4i64_nxv4i64( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv4i64_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e64,m4,ta,mu +; CHECK-NEXT: vmsltu.vv v0, v12, v8 +; CHECK-NEXT: vsetvli a0, a0, e64,m4,tu,mu +; CHECK-NEXT: vmsltu.vv v25, v16, v12, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmsgtu.nxv4i64( + %1, + %2, + i32 %4) + %a = call @llvm.riscv.vmsgtu.mask.nxv4i64( + %0, + %2, + %3, + %mask, + i32 %4) + + ret %a +} + declare @llvm.riscv.vmsgtu.nxv1i8.i8( , i8, diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsgtu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmsgtu-rv64.ll index ecb29a15241b..fea3c2246bf1 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vmsgtu-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmsgtu-rv64.ll @@ -1,6 +1,942 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs \ ; RUN: --riscv-no-aliases < %s | FileCheck %s +declare @llvm.riscv.vmsgtu.nxv1i8( + , + , + i64); + +define @intrinsic_vmsgtu_vv_nxv1i8_nxv1i8( %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsgtu_vv_nxv1i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,mf8,ta,mu +; CHECK-NEXT: vmsltu.vv v0, v9, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgtu.nxv1i8( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmsgtu.mask.nxv1i8( + , + , + , + , + i64); + +define @intrinsic_vmsgtu_mask_vv_nxv1i8_nxv1i8( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv1i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e8,mf8,ta,mu +; CHECK-NEXT: vmsltu.vv v0, v9, v8 +; CHECK-NEXT: vsetvli a0, a0, e8,mf8,tu,mu +; CHECK-NEXT: vmsltu.vv v25, v10, v9, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmsgtu.nxv1i8( + %1, + %2, + i64 %4) + %a = call @llvm.riscv.vmsgtu.mask.nxv1i8( + %0, + %2, + %3, + %mask, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmsgtu.nxv2i8( + , + , + i64); + +define @intrinsic_vmsgtu_vv_nxv2i8_nxv2i8( %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsgtu_vv_nxv2i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,mf4,ta,mu +; CHECK-NEXT: vmsltu.vv v0, v9, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgtu.nxv2i8( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmsgtu.mask.nxv2i8( + , + , + , + , + i64); + +define @intrinsic_vmsgtu_mask_vv_nxv2i8_nxv2i8( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv2i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e8,mf4,ta,mu +; CHECK-NEXT: vmsltu.vv v0, v9, v8 +; CHECK-NEXT: vsetvli a0, a0, e8,mf4,tu,mu +; CHECK-NEXT: vmsltu.vv v25, v10, v9, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmsgtu.nxv2i8( + %1, + %2, + i64 %4) + %a = call @llvm.riscv.vmsgtu.mask.nxv2i8( + %0, + %2, + %3, + %mask, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmsgtu.nxv4i8( + , + , + i64); + +define @intrinsic_vmsgtu_vv_nxv4i8_nxv4i8( %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsgtu_vv_nxv4i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,mf2,ta,mu +; CHECK-NEXT: vmsltu.vv v0, v9, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgtu.nxv4i8( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmsgtu.mask.nxv4i8( + , + , + , + , + i64); + +define @intrinsic_vmsgtu_mask_vv_nxv4i8_nxv4i8( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv4i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e8,mf2,ta,mu +; CHECK-NEXT: vmsltu.vv v0, v9, v8 +; CHECK-NEXT: vsetvli a0, a0, e8,mf2,tu,mu +; CHECK-NEXT: vmsltu.vv v25, v10, v9, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmsgtu.nxv4i8( + %1, + %2, + i64 %4) + %a = call @llvm.riscv.vmsgtu.mask.nxv4i8( + %0, + %2, + %3, + %mask, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmsgtu.nxv8i8( + , + , + i64); + +define @intrinsic_vmsgtu_vv_nxv8i8_nxv8i8( %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsgtu_vv_nxv8i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,m1,ta,mu +; CHECK-NEXT: vmsltu.vv v0, v9, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgtu.nxv8i8( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmsgtu.mask.nxv8i8( + , + , + , + , + i64); + +define @intrinsic_vmsgtu_mask_vv_nxv8i8_nxv8i8( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv8i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e8,m1,ta,mu +; CHECK-NEXT: vmsltu.vv v0, v9, v8 +; CHECK-NEXT: vsetvli a0, a0, e8,m1,tu,mu +; CHECK-NEXT: vmsltu.vv v25, v10, v9, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmsgtu.nxv8i8( + %1, + %2, + i64 %4) + %a = call @llvm.riscv.vmsgtu.mask.nxv8i8( + %0, + %2, + %3, + %mask, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmsgtu.nxv16i8( + , + , + i64); + +define @intrinsic_vmsgtu_vv_nxv16i8_nxv16i8( %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsgtu_vv_nxv16i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,m2,ta,mu +; CHECK-NEXT: vmsltu.vv v0, v10, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgtu.nxv16i8( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmsgtu.mask.nxv16i8( + , + , + , + , + i64); + +define @intrinsic_vmsgtu_mask_vv_nxv16i8_nxv16i8( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv16i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e8,m2,ta,mu +; CHECK-NEXT: vmsltu.vv v0, v10, v8 +; CHECK-NEXT: vsetvli a0, a0, e8,m2,tu,mu +; CHECK-NEXT: vmsltu.vv v25, v12, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmsgtu.nxv16i8( + %1, + %2, + i64 %4) + %a = call @llvm.riscv.vmsgtu.mask.nxv16i8( + %0, + %2, + %3, + %mask, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmsgtu.nxv32i8( + , + , + i64); + +define @intrinsic_vmsgtu_vv_nxv32i8_nxv32i8( %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsgtu_vv_nxv32i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,m4,ta,mu +; CHECK-NEXT: vmsltu.vv v0, v12, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgtu.nxv32i8( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmsgtu.mask.nxv32i8( + , + , + , + , + i64); + +define @intrinsic_vmsgtu_mask_vv_nxv32i8_nxv32i8( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv32i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e8,m4,ta,mu +; CHECK-NEXT: vmsltu.vv v0, v12, v8 +; CHECK-NEXT: vsetvli a0, a0, e8,m4,tu,mu +; CHECK-NEXT: vmsltu.vv v25, v16, v12, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmsgtu.nxv32i8( + %1, + %2, + i64 %4) + %a = call @llvm.riscv.vmsgtu.mask.nxv32i8( + %0, + %2, + %3, + %mask, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmsgtu.nxv1i16( + , + , + i64); + +define @intrinsic_vmsgtu_vv_nxv1i16_nxv1i16( %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsgtu_vv_nxv1i16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,mf4,ta,mu +; CHECK-NEXT: vmsltu.vv v0, v9, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgtu.nxv1i16( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmsgtu.mask.nxv1i16( + , + , + , + , + i64); + +define @intrinsic_vmsgtu_mask_vv_nxv1i16_nxv1i16( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv1i16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e16,mf4,ta,mu +; CHECK-NEXT: vmsltu.vv v0, v9, v8 +; CHECK-NEXT: vsetvli a0, a0, e16,mf4,tu,mu +; CHECK-NEXT: vmsltu.vv v25, v10, v9, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmsgtu.nxv1i16( + %1, + %2, + i64 %4) + %a = call @llvm.riscv.vmsgtu.mask.nxv1i16( + %0, + %2, + %3, + %mask, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmsgtu.nxv2i16( + , + , + i64); + +define @intrinsic_vmsgtu_vv_nxv2i16_nxv2i16( %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsgtu_vv_nxv2i16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,mf2,ta,mu +; CHECK-NEXT: vmsltu.vv v0, v9, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgtu.nxv2i16( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmsgtu.mask.nxv2i16( + , + , + , + , + i64); + +define @intrinsic_vmsgtu_mask_vv_nxv2i16_nxv2i16( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv2i16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e16,mf2,ta,mu +; CHECK-NEXT: vmsltu.vv v0, v9, v8 +; CHECK-NEXT: vsetvli a0, a0, e16,mf2,tu,mu +; CHECK-NEXT: vmsltu.vv v25, v10, v9, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmsgtu.nxv2i16( + %1, + %2, + i64 %4) + %a = call @llvm.riscv.vmsgtu.mask.nxv2i16( + %0, + %2, + %3, + %mask, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmsgtu.nxv4i16( + , + , + i64); + +define @intrinsic_vmsgtu_vv_nxv4i16_nxv4i16( %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsgtu_vv_nxv4i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m1,ta,mu +; CHECK-NEXT: vmsltu.vv v0, v9, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgtu.nxv4i16( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmsgtu.mask.nxv4i16( + , + , + , + , + i64); + +define @intrinsic_vmsgtu_mask_vv_nxv4i16_nxv4i16( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv4i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e16,m1,ta,mu +; CHECK-NEXT: vmsltu.vv v0, v9, v8 +; CHECK-NEXT: vsetvli a0, a0, e16,m1,tu,mu +; CHECK-NEXT: vmsltu.vv v25, v10, v9, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmsgtu.nxv4i16( + %1, + %2, + i64 %4) + %a = call @llvm.riscv.vmsgtu.mask.nxv4i16( + %0, + %2, + %3, + %mask, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmsgtu.nxv8i16( + , + , + i64); + +define @intrinsic_vmsgtu_vv_nxv8i16_nxv8i16( %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsgtu_vv_nxv8i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m2,ta,mu +; CHECK-NEXT: vmsltu.vv v0, v10, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgtu.nxv8i16( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmsgtu.mask.nxv8i16( + , + , + , + , + i64); + +define @intrinsic_vmsgtu_mask_vv_nxv8i16_nxv8i16( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv8i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e16,m2,ta,mu +; CHECK-NEXT: vmsltu.vv v0, v10, v8 +; CHECK-NEXT: vsetvli a0, a0, e16,m2,tu,mu +; CHECK-NEXT: vmsltu.vv v25, v12, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmsgtu.nxv8i16( + %1, + %2, + i64 %4) + %a = call @llvm.riscv.vmsgtu.mask.nxv8i16( + %0, + %2, + %3, + %mask, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmsgtu.nxv16i16( + , + , + i64); + +define @intrinsic_vmsgtu_vv_nxv16i16_nxv16i16( %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsgtu_vv_nxv16i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m4,ta,mu +; CHECK-NEXT: vmsltu.vv v0, v12, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgtu.nxv16i16( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmsgtu.mask.nxv16i16( + , + , + , + , + i64); + +define @intrinsic_vmsgtu_mask_vv_nxv16i16_nxv16i16( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv16i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e16,m4,ta,mu +; CHECK-NEXT: vmsltu.vv v0, v12, v8 +; CHECK-NEXT: vsetvli a0, a0, e16,m4,tu,mu +; CHECK-NEXT: vmsltu.vv v25, v16, v12, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmsgtu.nxv16i16( + %1, + %2, + i64 %4) + %a = call @llvm.riscv.vmsgtu.mask.nxv16i16( + %0, + %2, + %3, + %mask, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmsgtu.nxv1i32( + , + , + i64); + +define @intrinsic_vmsgtu_vv_nxv1i32_nxv1i32( %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsgtu_vv_nxv1i32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,mf2,ta,mu +; CHECK-NEXT: vmsltu.vv v0, v9, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgtu.nxv1i32( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmsgtu.mask.nxv1i32( + , + , + , + , + i64); + +define @intrinsic_vmsgtu_mask_vv_nxv1i32_nxv1i32( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv1i32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e32,mf2,ta,mu +; CHECK-NEXT: vmsltu.vv v0, v9, v8 +; CHECK-NEXT: vsetvli a0, a0, e32,mf2,tu,mu +; CHECK-NEXT: vmsltu.vv v25, v10, v9, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmsgtu.nxv1i32( + %1, + %2, + i64 %4) + %a = call @llvm.riscv.vmsgtu.mask.nxv1i32( + %0, + %2, + %3, + %mask, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmsgtu.nxv2i32( + , + , + i64); + +define @intrinsic_vmsgtu_vv_nxv2i32_nxv2i32( %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsgtu_vv_nxv2i32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m1,ta,mu +; CHECK-NEXT: vmsltu.vv v0, v9, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgtu.nxv2i32( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmsgtu.mask.nxv2i32( + , + , + , + , + i64); + +define @intrinsic_vmsgtu_mask_vv_nxv2i32_nxv2i32( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv2i32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e32,m1,ta,mu +; CHECK-NEXT: vmsltu.vv v0, v9, v8 +; CHECK-NEXT: vsetvli a0, a0, e32,m1,tu,mu +; CHECK-NEXT: vmsltu.vv v25, v10, v9, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmsgtu.nxv2i32( + %1, + %2, + i64 %4) + %a = call @llvm.riscv.vmsgtu.mask.nxv2i32( + %0, + %2, + %3, + %mask, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmsgtu.nxv4i32( + , + , + i64); + +define @intrinsic_vmsgtu_vv_nxv4i32_nxv4i32( %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsgtu_vv_nxv4i32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m2,ta,mu +; CHECK-NEXT: vmsltu.vv v0, v10, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgtu.nxv4i32( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmsgtu.mask.nxv4i32( + , + , + , + , + i64); + +define @intrinsic_vmsgtu_mask_vv_nxv4i32_nxv4i32( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv4i32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e32,m2,ta,mu +; CHECK-NEXT: vmsltu.vv v0, v10, v8 +; CHECK-NEXT: vsetvli a0, a0, e32,m2,tu,mu +; CHECK-NEXT: vmsltu.vv v25, v12, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmsgtu.nxv4i32( + %1, + %2, + i64 %4) + %a = call @llvm.riscv.vmsgtu.mask.nxv4i32( + %0, + %2, + %3, + %mask, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmsgtu.nxv8i32( + , + , + i64); + +define @intrinsic_vmsgtu_vv_nxv8i32_nxv8i32( %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsgtu_vv_nxv8i32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m4,ta,mu +; CHECK-NEXT: vmsltu.vv v0, v12, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgtu.nxv8i32( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmsgtu.mask.nxv8i32( + , + , + , + , + i64); + +define @intrinsic_vmsgtu_mask_vv_nxv8i32_nxv8i32( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv8i32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e32,m4,ta,mu +; CHECK-NEXT: vmsltu.vv v0, v12, v8 +; CHECK-NEXT: vsetvli a0, a0, e32,m4,tu,mu +; CHECK-NEXT: vmsltu.vv v25, v16, v12, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmsgtu.nxv8i32( + %1, + %2, + i64 %4) + %a = call @llvm.riscv.vmsgtu.mask.nxv8i32( + %0, + %2, + %3, + %mask, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmsgtu.nxv1i64( + , + , + i64); + +define @intrinsic_vmsgtu_vv_nxv1i64_nxv1i64( %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsgtu_vv_nxv1i64_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e64,m1,ta,mu +; CHECK-NEXT: vmsltu.vv v0, v9, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgtu.nxv1i64( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmsgtu.mask.nxv1i64( + , + , + , + , + i64); + +define @intrinsic_vmsgtu_mask_vv_nxv1i64_nxv1i64( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv1i64_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e64,m1,ta,mu +; CHECK-NEXT: vmsltu.vv v0, v9, v8 +; CHECK-NEXT: vsetvli a0, a0, e64,m1,tu,mu +; CHECK-NEXT: vmsltu.vv v25, v10, v9, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmsgtu.nxv1i64( + %1, + %2, + i64 %4) + %a = call @llvm.riscv.vmsgtu.mask.nxv1i64( + %0, + %2, + %3, + %mask, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmsgtu.nxv2i64( + , + , + i64); + +define @intrinsic_vmsgtu_vv_nxv2i64_nxv2i64( %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsgtu_vv_nxv2i64_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e64,m2,ta,mu +; CHECK-NEXT: vmsltu.vv v0, v10, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgtu.nxv2i64( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmsgtu.mask.nxv2i64( + , + , + , + , + i64); + +define @intrinsic_vmsgtu_mask_vv_nxv2i64_nxv2i64( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv2i64_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e64,m2,ta,mu +; CHECK-NEXT: vmsltu.vv v0, v10, v8 +; CHECK-NEXT: vsetvli a0, a0, e64,m2,tu,mu +; CHECK-NEXT: vmsltu.vv v25, v12, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmsgtu.nxv2i64( + %1, + %2, + i64 %4) + %a = call @llvm.riscv.vmsgtu.mask.nxv2i64( + %0, + %2, + %3, + %mask, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmsgtu.nxv4i64( + , + , + i64); + +define @intrinsic_vmsgtu_vv_nxv4i64_nxv4i64( %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsgtu_vv_nxv4i64_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e64,m4,ta,mu +; CHECK-NEXT: vmsltu.vv v0, v12, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgtu.nxv4i64( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmsgtu.mask.nxv4i64( + , + , + , + , + i64); + +define @intrinsic_vmsgtu_mask_vv_nxv4i64_nxv4i64( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv4i64_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e64,m4,ta,mu +; CHECK-NEXT: vmsltu.vv v0, v12, v8 +; CHECK-NEXT: vsetvli a0, a0, e64,m4,tu,mu +; CHECK-NEXT: vmsltu.vv v25, v16, v12, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmsgtu.nxv4i64( + %1, + %2, + i64 %4) + %a = call @llvm.riscv.vmsgtu.mask.nxv4i64( + %0, + %2, + %3, + %mask, + i64 %4) + + ret %a +} + declare @llvm.riscv.vmsgtu.nxv1i8.i8( , i8,