diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td index e158b632aa73..0ef798937a66 100644 --- a/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td @@ -35,6 +35,10 @@ def SplatPat : ComplexPattern; def SplatPat_simm5 : ComplexPattern; def SplatPat_uimm5 : ComplexPattern; +class SwapHelper { + dag Value = !con(Prefix, !if(swap, B, A), !if(swap, A, B), Suffix); +} + multiclass VPatUSLoadStoreSDNode { + foreach vti = AllIntegerVectors in { + defvar instruction = !cast(instruction_name#"_VV_"#vti.LMul.MX); + def : Pat<(vti.Mask (setcc (vti.Vector vti.RegClass:$rs1), + (vti.Vector vti.RegClass:$rs2), cc)), + SwapHelper<(instruction), + (instruction vti.RegClass:$rs1), + (instruction vti.RegClass:$rs2), + (instruction VLMax, vti.SEW), + swap>.Value>; + } +} + +multiclass VPatIntegerSetCCSDNode_XI { + foreach vti = AllIntegerVectors in { + defvar instruction = !cast(instruction_name#_#kind#_#vti.LMul.MX); + def : Pat<(vti.Mask (setcc (vti.Vector vti.RegClass:$rs1), + (vti.Vector (SplatPatKind xop_kind:$rs2)), cc)), + SwapHelper<(instruction), + (instruction vti.RegClass:$rs1), + (instruction xop_kind:$rs2), + (instruction VLMax, vti.SEW), + swap>.Value>; + } +} + +multiclass VPatIntegerSetCCSDNode_VV_VX_VI { + defm : VPatIntegerSetCCSDNode_VV; + defm : VPatIntegerSetCCSDNode_XI; + defm : VPatIntegerSetCCSDNode_XI; +} + +multiclass VPatIntegerSetCCSDNode_VV_VX { + defm : VPatIntegerSetCCSDNode_VV; + defm : VPatIntegerSetCCSDNode_XI; +} + +multiclass VPatIntegerSetCCSDNode_VX_VI { + defm : VPatIntegerSetCCSDNode_XI; + defm : VPatIntegerSetCCSDNode_XI; +} + //===----------------------------------------------------------------------===// // Patterns. //===----------------------------------------------------------------------===// @@ -164,6 +228,28 @@ defm "" : VPatBinarySDNode_VV_VX_VI; defm "" : VPatBinarySDNode_VV_VX_VI; defm "" : VPatBinarySDNode_VV_VX_VI; +// 12.8. Vector Integer Comparison Instructions +defm "" : VPatIntegerSetCCSDNode_VV_VX_VI; +defm "" : VPatIntegerSetCCSDNode_VV_VX_VI; + +// FIXME: Support immediate forms of these by choosing SLE decrementing the +// immediate +defm "" : VPatIntegerSetCCSDNode_VV_VX; +defm "" : VPatIntegerSetCCSDNode_VV_VX; + +defm "" : VPatIntegerSetCCSDNode_VV; +defm "" : VPatIntegerSetCCSDNode_VV; +defm "" : VPatIntegerSetCCSDNode_VX_VI; +defm "" : VPatIntegerSetCCSDNode_VX_VI; + +defm "" : VPatIntegerSetCCSDNode_VV_VX_VI; +defm "" : VPatIntegerSetCCSDNode_VV_VX_VI; + +// FIXME: Support immediate forms of these by choosing SGT and decrementing the +// immediate +defm "" : VPatIntegerSetCCSDNode_VV; +defm "" : VPatIntegerSetCCSDNode_VV; + // 12.9. Vector Integer Min/Max Instructions defm "" : VPatBinarySDNode_VV_VX; defm "" : VPatBinarySDNode_VV_VX; diff --git a/llvm/test/CodeGen/RISCV/rvv/setcc-integer-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/setcc-integer-rv32.ll new file mode 100644 index 000000000000..85562131f08b --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/setcc-integer-rv32.ll @@ -0,0 +1,3128 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs < %s | FileCheck %s + +; FIXME: The scalar/vector operations ('xv' and 'iv' tests) should swap +; operands and condition codes accordingly in order to generate a 'vx' or 'vi' +; instruction. + +define @icmp_eq_vv_nxv8i8( %va, %vb) { +; CHECK-LABEL: icmp_eq_vv_nxv8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmseq.vv v0, v16, v17 +; CHECK-NEXT: ret + %vc = icmp eq %va, %vb + ret %vc +} + +define @icmp_eq_vx_nxv8i8( %va, i8 %b) { +; CHECK-LABEL: icmp_eq_vx_nxv8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu +; CHECK-NEXT: vmseq.vx v0, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp eq %va, %splat + ret %vc +} + +define @icmp_eq_xv_nxv8i8( %va, i8 %b) { +; CHECK-LABEL: icmp_eq_xv_nxv8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu +; CHECK-NEXT: vmv.v.x v25, a0 +; CHECK-NEXT: vmseq.vv v0, v25, v16 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp eq %splat, %va + ret %vc +} + +define @icmp_eq_vi_nxv8i8_0( %va) { +; CHECK-LABEL: icmp_eq_vi_nxv8i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmseq.vi v0, v16, 0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 0, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp eq %va, %splat + ret %vc +} + +define @icmp_eq_vi_nxv8i8_1( %va) { +; CHECK-LABEL: icmp_eq_vi_nxv8i8_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmseq.vi v0, v16, 5 +; CHECK-NEXT: ret + %head = insertelement undef, i8 5, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp eq %va, %splat + ret %vc +} + +define @icmp_eq_iv_nxv8i8_1( %va) { +; CHECK-LABEL: icmp_eq_iv_nxv8i8_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmseq.vi v0, v16, 5 +; CHECK-NEXT: ret + %head = insertelement undef, i8 5, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp eq %splat, %va + ret %vc +} + +define @icmp_ne_vv_nxv8i8( %va, %vb) { +; CHECK-LABEL: icmp_ne_vv_nxv8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmsne.vv v0, v16, v17 +; CHECK-NEXT: ret + %vc = icmp ne %va, %vb + ret %vc +} + +define @icmp_ne_vx_nxv8i8( %va, i8 %b) { +; CHECK-LABEL: icmp_ne_vx_nxv8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu +; CHECK-NEXT: vmsne.vx v0, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ne %va, %splat + ret %vc +} + +define @icmp_ne_xv_nxv8i8( %va, i8 %b) { +; CHECK-LABEL: icmp_ne_xv_nxv8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu +; CHECK-NEXT: vmv.v.x v25, a0 +; CHECK-NEXT: vmsne.vv v0, v25, v16 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ne %splat, %va + ret %vc +} + +define @icmp_ne_vi_nxv8i8_0( %va) { +; CHECK-LABEL: icmp_ne_vi_nxv8i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmsne.vi v0, v16, 5 +; CHECK-NEXT: ret + %head = insertelement undef, i8 5, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ne %va, %splat + ret %vc +} + +define @icmp_ugt_vv_nxv8i8( %va, %vb) { +; CHECK-LABEL: icmp_ugt_vv_nxv8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmsltu.vv v0, v17, v16 +; CHECK-NEXT: ret + %vc = icmp ugt %va, %vb + ret %vc +} + +define @icmp_ugt_vx_nxv8i8( %va, i8 %b) { +; CHECK-LABEL: icmp_ugt_vx_nxv8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu +; CHECK-NEXT: vmsgtu.vx v0, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ugt %va, %splat + ret %vc +} + +define @icmp_ugt_xv_nxv8i8( %va, i8 %b) { +; CHECK-LABEL: icmp_ugt_xv_nxv8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu +; CHECK-NEXT: vmv.v.x v25, a0 +; CHECK-NEXT: vmsltu.vv v0, v16, v25 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ugt %splat, %va + ret %vc +} + +define @icmp_ugt_vi_nxv8i8_0( %va) { +; CHECK-LABEL: icmp_ugt_vi_nxv8i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmsgtu.vi v0, v16, 5 +; CHECK-NEXT: ret + %head = insertelement undef, i8 5, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ugt %va, %splat + ret %vc +} + +define @icmp_uge_vv_nxv8i8( %va, %vb) { +; CHECK-LABEL: icmp_uge_vv_nxv8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmsleu.vv v0, v17, v16 +; CHECK-NEXT: ret + %vc = icmp uge %va, %vb + ret %vc +} + +define @icmp_uge_vx_nxv8i8( %va, i8 %b) { +; CHECK-LABEL: icmp_uge_vx_nxv8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu +; CHECK-NEXT: vmv.v.x v25, a0 +; CHECK-NEXT: vmsleu.vv v0, v25, v16 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp uge %va, %splat + ret %vc +} + +define @icmp_uge_xv_nxv8i8( %va, i8 %b) { +; CHECK-LABEL: icmp_uge_xv_nxv8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu +; CHECK-NEXT: vmv.v.x v25, a0 +; CHECK-NEXT: vmsleu.vv v0, v16, v25 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp uge %splat, %va + ret %vc +} + +define @icmp_uge_vi_nxv8i8_0( %va) { +; CHECK-LABEL: icmp_uge_vi_nxv8i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmv.v.i v25, -16 +; CHECK-NEXT: vmsleu.vv v0, v25, v16 +; CHECK-NEXT: ret + %head = insertelement undef, i8 -16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp uge %va, %splat + ret %vc +} + +define @icmp_uge_vi_nxv8i8_1( %va) { +; CHECK-LABEL: icmp_uge_vi_nxv8i8_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmv.v.i v25, 15 +; CHECK-NEXT: vmsleu.vv v0, v25, v16 +; CHECK-NEXT: ret + %head = insertelement undef, i8 15, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp uge %va, %splat + ret %vc +} + +define @icmp_uge_iv_nxv8i8_1( %va) { +; CHECK-LABEL: icmp_uge_iv_nxv8i8_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmsleu.vi v0, v16, 15 +; CHECK-NEXT: ret + %head = insertelement undef, i8 15, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp uge %splat, %va + ret %vc +} + +define @icmp_uge_vi_nxv8i8_2( %va) { +; CHECK-LABEL: icmp_uge_vi_nxv8i8_2: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmset.m v0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 0, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp uge %va, %splat + ret %vc +} + +define @icmp_uge_vi_nxv8i8_3( %va) { +; CHECK-LABEL: icmp_uge_vi_nxv8i8_3: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmv.v.i v25, 1 +; CHECK-NEXT: vmsleu.vv v0, v25, v16 +; CHECK-NEXT: ret + %head = insertelement undef, i8 1, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp uge %va, %splat + ret %vc +} + +define @icmp_uge_vi_nxv8i8_4( %va) { +; CHECK-LABEL: icmp_uge_vi_nxv8i8_4: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmv.v.i v25, -15 +; CHECK-NEXT: vmsleu.vv v0, v25, v16 +; CHECK-NEXT: ret + %head = insertelement undef, i8 -15, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp uge %va, %splat + ret %vc +} + +define @icmp_uge_vi_nxv8i8_5( %va) { +; CHECK-LABEL: icmp_uge_vi_nxv8i8_5: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu +; CHECK-NEXT: vmv.v.x v25, a0 +; CHECK-NEXT: vmsleu.vv v0, v25, v16 +; CHECK-NEXT: ret + %head = insertelement undef, i8 16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp uge %va, %splat + ret %vc +} + +define @icmp_ult_vv_nxv8i8( %va, %vb) { +; CHECK-LABEL: icmp_ult_vv_nxv8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmsltu.vv v0, v16, v17 +; CHECK-NEXT: ret + %vc = icmp ult %va, %vb + ret %vc +} + +define @icmp_ult_vx_nxv8i8( %va, i8 %b) { +; CHECK-LABEL: icmp_ult_vx_nxv8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu +; CHECK-NEXT: vmsltu.vx v0, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ult %va, %splat + ret %vc +} + +define @icmp_ult_xv_nxv8i8( %va, i8 %b) { +; CHECK-LABEL: icmp_ult_xv_nxv8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu +; CHECK-NEXT: vmv.v.x v25, a0 +; CHECK-NEXT: vmsltu.vv v0, v25, v16 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ult %splat, %va + ret %vc +} + +define @icmp_ult_vi_nxv8i8_0( %va) { +; CHECK-LABEL: icmp_ult_vi_nxv8i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -16 +; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu +; CHECK-NEXT: vmsltu.vx v0, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 -16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ult %va, %splat + ret %vc +} + +define @icmp_ult_vi_nxv8i8_1( %va) { +; CHECK-LABEL: icmp_ult_vi_nxv8i8_1: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -15 +; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu +; CHECK-NEXT: vmsltu.vx v0, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 -15, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ult %va, %splat + ret %vc +} + +define @icmp_ult_iv_nxv8i8_1( %va) { +; CHECK-LABEL: icmp_ult_iv_nxv8i8_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmsgtu.vi v0, v16, -15 +; CHECK-NEXT: ret + %head = insertelement undef, i8 -15, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ult %splat, %va + ret %vc +} + +define @icmp_ult_vi_nxv8i8_2( %va) { +; CHECK-LABEL: icmp_ult_vi_nxv8i8_2: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmclr.m v0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 0, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ult %va, %splat + ret %vc +} + +define @icmp_ult_vi_nxv8i8_3( %va) { +; CHECK-LABEL: icmp_ult_vi_nxv8i8_3: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmseq.vi v0, v16, 0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 1, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ult %va, %splat + ret %vc +} + +define @icmp_ult_vi_nxv8i8_4( %va) { +; CHECK-LABEL: icmp_ult_vi_nxv8i8_4: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu +; CHECK-NEXT: vmsltu.vx v0, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ult %va, %splat + ret %vc +} + +define @icmp_ule_vv_nxv8i8( %va, %vb) { +; CHECK-LABEL: icmp_ule_vv_nxv8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmsleu.vv v0, v16, v17 +; CHECK-NEXT: ret + %vc = icmp ule %va, %vb + ret %vc +} + +define @icmp_ule_vx_nxv8i8( %va, i8 %b) { +; CHECK-LABEL: icmp_ule_vx_nxv8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu +; CHECK-NEXT: vmsleu.vx v0, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ule %va, %splat + ret %vc +} + +define @icmp_ule_xv_nxv8i8( %va, i8 %b) { +; CHECK-LABEL: icmp_ule_xv_nxv8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu +; CHECK-NEXT: vmv.v.x v25, a0 +; CHECK-NEXT: vmsleu.vv v0, v25, v16 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ule %splat, %va + ret %vc +} + +define @icmp_ule_vi_nxv8i8_0( %va) { +; CHECK-LABEL: icmp_ule_vi_nxv8i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmsleu.vi v0, v16, 5 +; CHECK-NEXT: ret + %head = insertelement undef, i8 5, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ule %va, %splat + ret %vc +} + +define @icmp_sgt_vv_nxv8i8( %va, %vb) { +; CHECK-LABEL: icmp_sgt_vv_nxv8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmslt.vv v0, v17, v16 +; CHECK-NEXT: ret + %vc = icmp sgt %va, %vb + ret %vc +} + +define @icmp_sgt_vx_nxv8i8( %va, i8 %b) { +; CHECK-LABEL: icmp_sgt_vx_nxv8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu +; CHECK-NEXT: vmsgt.vx v0, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp sgt %va, %splat + ret %vc +} + +define @icmp_sgt_xv_nxv8i8( %va, i8 %b) { +; CHECK-LABEL: icmp_sgt_xv_nxv8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu +; CHECK-NEXT: vmv.v.x v25, a0 +; CHECK-NEXT: vmslt.vv v0, v16, v25 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp sgt %splat, %va + ret %vc +} + +define @icmp_sgt_vi_nxv8i8_0( %va) { +; CHECK-LABEL: icmp_sgt_vi_nxv8i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmsgt.vi v0, v16, 5 +; CHECK-NEXT: ret + %head = insertelement undef, i8 5, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp sgt %va, %splat + ret %vc +} + +define @icmp_sge_vv_nxv8i8( %va, %vb) { +; CHECK-LABEL: icmp_sge_vv_nxv8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmsle.vv v0, v17, v16 +; CHECK-NEXT: ret + %vc = icmp sge %va, %vb + ret %vc +} + +define @icmp_sge_vx_nxv8i8( %va, i8 %b) { +; CHECK-LABEL: icmp_sge_vx_nxv8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu +; CHECK-NEXT: vmv.v.x v25, a0 +; CHECK-NEXT: vmsle.vv v0, v25, v16 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp sge %va, %splat + ret %vc +} + +define @icmp_sge_xv_nxv8i8( %va, i8 %b) { +; CHECK-LABEL: icmp_sge_xv_nxv8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu +; CHECK-NEXT: vmv.v.x v25, a0 +; CHECK-NEXT: vmsle.vv v0, v16, v25 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp sge %splat, %va + ret %vc +} + +define @icmp_sge_vi_nxv8i8_0( %va) { +; CHECK-LABEL: icmp_sge_vi_nxv8i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmv.v.i v25, -16 +; CHECK-NEXT: vmsle.vv v0, v25, v16 +; CHECK-NEXT: ret + %head = insertelement undef, i8 -16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp sge %va, %splat + ret %vc +} + +define @icmp_sge_vi_nxv8i8_1( %va) { +; CHECK-LABEL: icmp_sge_vi_nxv8i8_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmv.v.i v25, -15 +; CHECK-NEXT: vmsle.vv v0, v25, v16 +; CHECK-NEXT: ret + %head = insertelement undef, i8 -15, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp sge %va, %splat + ret %vc +} + +define @icmp_sge_iv_nxv8i8_1( %va) { +; CHECK-LABEL: icmp_sge_iv_nxv8i8_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmsle.vi v0, v16, -15 +; CHECK-NEXT: ret + %head = insertelement undef, i8 -15, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp sge %splat, %va + ret %vc +} + +define @icmp_sge_vi_nxv8i8_2( %va) { +; CHECK-LABEL: icmp_sge_vi_nxv8i8_2: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmv.v.i v25, 0 +; CHECK-NEXT: vmsle.vv v0, v25, v16 +; CHECK-NEXT: ret + %head = insertelement undef, i8 0, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp sge %va, %splat + ret %vc +} + +define @icmp_sge_vi_nxv8i8_3( %va) { +; CHECK-LABEL: icmp_sge_vi_nxv8i8_3: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu +; CHECK-NEXT: vmv.v.x v25, a0 +; CHECK-NEXT: vmsle.vv v0, v25, v16 +; CHECK-NEXT: ret + %head = insertelement undef, i8 16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp sge %va, %splat + ret %vc +} + +define @icmp_slt_vv_nxv8i8( %va, %vb) { +; CHECK-LABEL: icmp_slt_vv_nxv8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmslt.vv v0, v16, v17 +; CHECK-NEXT: ret + %vc = icmp slt %va, %vb + ret %vc +} + +define @icmp_slt_vx_nxv8i8( %va, i8 %b) { +; CHECK-LABEL: icmp_slt_vx_nxv8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu +; CHECK-NEXT: vmslt.vx v0, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp slt %va, %splat + ret %vc +} + +define @icmp_slt_xv_nxv8i8( %va, i8 %b) { +; CHECK-LABEL: icmp_slt_xv_nxv8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu +; CHECK-NEXT: vmv.v.x v25, a0 +; CHECK-NEXT: vmslt.vv v0, v25, v16 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp slt %splat, %va + ret %vc +} + +define @icmp_slt_vi_nxv8i8_0( %va) { +; CHECK-LABEL: icmp_slt_vi_nxv8i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -16 +; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu +; CHECK-NEXT: vmslt.vx v0, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 -16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp slt %va, %splat + ret %vc +} + +define @icmp_slt_vi_nxv8i8_1( %va) { +; CHECK-LABEL: icmp_slt_vi_nxv8i8_1: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -15 +; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu +; CHECK-NEXT: vmslt.vx v0, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 -15, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp slt %va, %splat + ret %vc +} + +define @icmp_slt_iv_nxv8i8_1( %va) { +; CHECK-LABEL: icmp_slt_iv_nxv8i8_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmsgt.vi v0, v16, -15 +; CHECK-NEXT: ret + %head = insertelement undef, i8 -15, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp slt %splat, %va + ret %vc +} + +define @icmp_slt_vi_nxv8i8_2( %va) { +; CHECK-LABEL: icmp_slt_vi_nxv8i8_2: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmslt.vx v0, v16, zero +; CHECK-NEXT: ret + %head = insertelement undef, i8 0, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp slt %va, %splat + ret %vc +} + +define @icmp_slt_vi_nxv8i8_3( %va) { +; CHECK-LABEL: icmp_slt_vi_nxv8i8_3: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu +; CHECK-NEXT: vmslt.vx v0, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp slt %va, %splat + ret %vc +} + +define @icmp_sle_vv_nxv8i8( %va, %vb) { +; CHECK-LABEL: icmp_sle_vv_nxv8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmsle.vv v0, v16, v17 +; CHECK-NEXT: ret + %vc = icmp sle %va, %vb + ret %vc +} + +define @icmp_sle_vx_nxv8i8( %va, i8 %b) { +; CHECK-LABEL: icmp_sle_vx_nxv8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu +; CHECK-NEXT: vmsle.vx v0, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp sle %va, %splat + ret %vc +} + +define @icmp_sle_xv_nxv8i8( %va, i8 %b) { +; CHECK-LABEL: icmp_sle_xv_nxv8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu +; CHECK-NEXT: vmv.v.x v25, a0 +; CHECK-NEXT: vmsle.vv v0, v25, v16 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp sle %splat, %va + ret %vc +} + +define @icmp_sle_vi_nxv8i8_0( %va) { +; CHECK-LABEL: icmp_sle_vi_nxv8i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmsle.vi v0, v16, 5 +; CHECK-NEXT: ret + %head = insertelement undef, i8 5, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp sle %va, %splat + ret %vc +} + +define @icmp_eq_vv_nxv8i16( %va, %vb) { +; CHECK-LABEL: icmp_eq_vv_nxv8i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vmseq.vv v0, v16, v18 +; CHECK-NEXT: ret + %vc = icmp eq %va, %vb + ret %vc +} + +define @icmp_eq_vx_nxv8i16( %va, i16 %b) { +; CHECK-LABEL: icmp_eq_vx_nxv8i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu +; CHECK-NEXT: vmseq.vx v0, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp eq %va, %splat + ret %vc +} + +define @icmp_eq_xv_nxv8i16( %va, i16 %b) { +; CHECK-LABEL: icmp_eq_xv_nxv8i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu +; CHECK-NEXT: vmv.v.x v26, a0 +; CHECK-NEXT: vmseq.vv v0, v26, v16 +; CHECK-NEXT: ret + %head = insertelement undef, i16 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp eq %splat, %va + ret %vc +} + +define @icmp_eq_vi_nxv8i16_0( %va) { +; CHECK-LABEL: icmp_eq_vi_nxv8i16_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vmseq.vi v0, v16, 0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 0, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp eq %va, %splat + ret %vc +} + +define @icmp_eq_vi_nxv8i16_1( %va) { +; CHECK-LABEL: icmp_eq_vi_nxv8i16_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vmseq.vi v0, v16, 5 +; CHECK-NEXT: ret + %head = insertelement undef, i16 5, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp eq %va, %splat + ret %vc +} + +define @icmp_eq_iv_nxv8i16_1( %va) { +; CHECK-LABEL: icmp_eq_iv_nxv8i16_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vmseq.vi v0, v16, 5 +; CHECK-NEXT: ret + %head = insertelement undef, i16 5, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp eq %splat, %va + ret %vc +} + +define @icmp_ne_vv_nxv8i16( %va, %vb) { +; CHECK-LABEL: icmp_ne_vv_nxv8i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vmsne.vv v0, v16, v18 +; CHECK-NEXT: ret + %vc = icmp ne %va, %vb + ret %vc +} + +define @icmp_ne_vx_nxv8i16( %va, i16 %b) { +; CHECK-LABEL: icmp_ne_vx_nxv8i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu +; CHECK-NEXT: vmsne.vx v0, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ne %va, %splat + ret %vc +} + +define @icmp_ne_xv_nxv8i16( %va, i16 %b) { +; CHECK-LABEL: icmp_ne_xv_nxv8i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu +; CHECK-NEXT: vmv.v.x v26, a0 +; CHECK-NEXT: vmsne.vv v0, v26, v16 +; CHECK-NEXT: ret + %head = insertelement undef, i16 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ne %splat, %va + ret %vc +} + +define @icmp_ne_vi_nxv8i16_0( %va) { +; CHECK-LABEL: icmp_ne_vi_nxv8i16_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vmsne.vi v0, v16, 5 +; CHECK-NEXT: ret + %head = insertelement undef, i16 5, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ne %va, %splat + ret %vc +} + +define @icmp_ugt_vv_nxv8i16( %va, %vb) { +; CHECK-LABEL: icmp_ugt_vv_nxv8i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vmsltu.vv v0, v18, v16 +; CHECK-NEXT: ret + %vc = icmp ugt %va, %vb + ret %vc +} + +define @icmp_ugt_vx_nxv8i16( %va, i16 %b) { +; CHECK-LABEL: icmp_ugt_vx_nxv8i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu +; CHECK-NEXT: vmsgtu.vx v0, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ugt %va, %splat + ret %vc +} + +define @icmp_ugt_xv_nxv8i16( %va, i16 %b) { +; CHECK-LABEL: icmp_ugt_xv_nxv8i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu +; CHECK-NEXT: vmv.v.x v26, a0 +; CHECK-NEXT: vmsltu.vv v0, v16, v26 +; CHECK-NEXT: ret + %head = insertelement undef, i16 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ugt %splat, %va + ret %vc +} + +define @icmp_ugt_vi_nxv8i16_0( %va) { +; CHECK-LABEL: icmp_ugt_vi_nxv8i16_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vmsgtu.vi v0, v16, 5 +; CHECK-NEXT: ret + %head = insertelement undef, i16 5, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ugt %va, %splat + ret %vc +} + +define @icmp_uge_vv_nxv8i16( %va, %vb) { +; CHECK-LABEL: icmp_uge_vv_nxv8i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vmsleu.vv v0, v18, v16 +; CHECK-NEXT: ret + %vc = icmp uge %va, %vb + ret %vc +} + +define @icmp_uge_vx_nxv8i16( %va, i16 %b) { +; CHECK-LABEL: icmp_uge_vx_nxv8i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu +; CHECK-NEXT: vmv.v.x v26, a0 +; CHECK-NEXT: vmsleu.vv v0, v26, v16 +; CHECK-NEXT: ret + %head = insertelement undef, i16 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp uge %va, %splat + ret %vc +} + +define @icmp_uge_xv_nxv8i16( %va, i16 %b) { +; CHECK-LABEL: icmp_uge_xv_nxv8i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu +; CHECK-NEXT: vmv.v.x v26, a0 +; CHECK-NEXT: vmsleu.vv v0, v16, v26 +; CHECK-NEXT: ret + %head = insertelement undef, i16 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp uge %splat, %va + ret %vc +} + +define @icmp_uge_vi_nxv8i16_0( %va) { +; CHECK-LABEL: icmp_uge_vi_nxv8i16_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vmv.v.i v26, -16 +; CHECK-NEXT: vmsleu.vv v0, v26, v16 +; CHECK-NEXT: ret + %head = insertelement undef, i16 -16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp uge %va, %splat + ret %vc +} + +define @icmp_uge_vi_nxv8i16_1( %va) { +; CHECK-LABEL: icmp_uge_vi_nxv8i16_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vmv.v.i v26, 15 +; CHECK-NEXT: vmsleu.vv v0, v26, v16 +; CHECK-NEXT: ret + %head = insertelement undef, i16 15, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp uge %va, %splat + ret %vc +} + +define @icmp_uge_iv_nxv8i16_1( %va) { +; CHECK-LABEL: icmp_uge_iv_nxv8i16_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vmsleu.vi v0, v16, 15 +; CHECK-NEXT: ret + %head = insertelement undef, i16 15, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp uge %splat, %va + ret %vc +} + +define @icmp_uge_vi_nxv8i16_2( %va) { +; CHECK-LABEL: icmp_uge_vi_nxv8i16_2: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmset.m v0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 0, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp uge %va, %splat + ret %vc +} + +define @icmp_uge_vi_nxv8i16_3( %va) { +; CHECK-LABEL: icmp_uge_vi_nxv8i16_3: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vmv.v.i v26, 1 +; CHECK-NEXT: vmsleu.vv v0, v26, v16 +; CHECK-NEXT: ret + %head = insertelement undef, i16 1, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp uge %va, %splat + ret %vc +} + +define @icmp_uge_vi_nxv8i16_4( %va) { +; CHECK-LABEL: icmp_uge_vi_nxv8i16_4: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vmv.v.i v26, -15 +; CHECK-NEXT: vmsleu.vv v0, v26, v16 +; CHECK-NEXT: ret + %head = insertelement undef, i16 -15, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp uge %va, %splat + ret %vc +} + +define @icmp_uge_vi_nxv8i16_5( %va) { +; CHECK-LABEL: icmp_uge_vi_nxv8i16_5: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu +; CHECK-NEXT: vmv.v.x v26, a0 +; CHECK-NEXT: vmsleu.vv v0, v26, v16 +; CHECK-NEXT: ret + %head = insertelement undef, i16 16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp uge %va, %splat + ret %vc +} + +define @icmp_ult_vv_nxv8i16( %va, %vb) { +; CHECK-LABEL: icmp_ult_vv_nxv8i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vmsltu.vv v0, v16, v18 +; CHECK-NEXT: ret + %vc = icmp ult %va, %vb + ret %vc +} + +define @icmp_ult_vx_nxv8i16( %va, i16 %b) { +; CHECK-LABEL: icmp_ult_vx_nxv8i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu +; CHECK-NEXT: vmsltu.vx v0, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ult %va, %splat + ret %vc +} + +define @icmp_ult_xv_nxv8i16( %va, i16 %b) { +; CHECK-LABEL: icmp_ult_xv_nxv8i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu +; CHECK-NEXT: vmv.v.x v26, a0 +; CHECK-NEXT: vmsltu.vv v0, v26, v16 +; CHECK-NEXT: ret + %head = insertelement undef, i16 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ult %splat, %va + ret %vc +} + +define @icmp_ult_vi_nxv8i16_0( %va) { +; CHECK-LABEL: icmp_ult_vi_nxv8i16_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -16 +; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu +; CHECK-NEXT: vmsltu.vx v0, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 -16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ult %va, %splat + ret %vc +} + +define @icmp_ult_vi_nxv8i16_1( %va) { +; CHECK-LABEL: icmp_ult_vi_nxv8i16_1: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -15 +; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu +; CHECK-NEXT: vmsltu.vx v0, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 -15, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ult %va, %splat + ret %vc +} + +define @icmp_ult_iv_nxv8i16_1( %va) { +; CHECK-LABEL: icmp_ult_iv_nxv8i16_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vmsgtu.vi v0, v16, -15 +; CHECK-NEXT: ret + %head = insertelement undef, i16 -15, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ult %splat, %va + ret %vc +} + +define @icmp_ult_vi_nxv8i16_2( %va) { +; CHECK-LABEL: icmp_ult_vi_nxv8i16_2: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmclr.m v0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 0, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ult %va, %splat + ret %vc +} + +define @icmp_ult_vi_nxv8i16_3( %va) { +; CHECK-LABEL: icmp_ult_vi_nxv8i16_3: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vmseq.vi v0, v16, 0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 1, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ult %va, %splat + ret %vc +} + +define @icmp_ult_vi_nxv8i16_4( %va) { +; CHECK-LABEL: icmp_ult_vi_nxv8i16_4: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu +; CHECK-NEXT: vmsltu.vx v0, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ult %va, %splat + ret %vc +} + +define @icmp_ule_vv_nxv8i16( %va, %vb) { +; CHECK-LABEL: icmp_ule_vv_nxv8i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vmsleu.vv v0, v16, v18 +; CHECK-NEXT: ret + %vc = icmp ule %va, %vb + ret %vc +} + +define @icmp_ule_vx_nxv8i16( %va, i16 %b) { +; CHECK-LABEL: icmp_ule_vx_nxv8i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu +; CHECK-NEXT: vmsleu.vx v0, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ule %va, %splat + ret %vc +} + +define @icmp_ule_xv_nxv8i16( %va, i16 %b) { +; CHECK-LABEL: icmp_ule_xv_nxv8i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu +; CHECK-NEXT: vmv.v.x v26, a0 +; CHECK-NEXT: vmsleu.vv v0, v26, v16 +; CHECK-NEXT: ret + %head = insertelement undef, i16 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ule %splat, %va + ret %vc +} + +define @icmp_ule_vi_nxv8i16_0( %va) { +; CHECK-LABEL: icmp_ule_vi_nxv8i16_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vmsleu.vi v0, v16, 5 +; CHECK-NEXT: ret + %head = insertelement undef, i16 5, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ule %va, %splat + ret %vc +} + +define @icmp_sgt_vv_nxv8i16( %va, %vb) { +; CHECK-LABEL: icmp_sgt_vv_nxv8i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vmslt.vv v0, v18, v16 +; CHECK-NEXT: ret + %vc = icmp sgt %va, %vb + ret %vc +} + +define @icmp_sgt_vx_nxv8i16( %va, i16 %b) { +; CHECK-LABEL: icmp_sgt_vx_nxv8i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu +; CHECK-NEXT: vmsgt.vx v0, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp sgt %va, %splat + ret %vc +} + +define @icmp_sgt_xv_nxv8i16( %va, i16 %b) { +; CHECK-LABEL: icmp_sgt_xv_nxv8i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu +; CHECK-NEXT: vmv.v.x v26, a0 +; CHECK-NEXT: vmslt.vv v0, v16, v26 +; CHECK-NEXT: ret + %head = insertelement undef, i16 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp sgt %splat, %va + ret %vc +} + +define @icmp_sgt_vi_nxv8i16_0( %va) { +; CHECK-LABEL: icmp_sgt_vi_nxv8i16_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vmsgt.vi v0, v16, 5 +; CHECK-NEXT: ret + %head = insertelement undef, i16 5, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp sgt %va, %splat + ret %vc +} + +define @icmp_sge_vv_nxv8i16( %va, %vb) { +; CHECK-LABEL: icmp_sge_vv_nxv8i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vmsle.vv v0, v18, v16 +; CHECK-NEXT: ret + %vc = icmp sge %va, %vb + ret %vc +} + +define @icmp_sge_vx_nxv8i16( %va, i16 %b) { +; CHECK-LABEL: icmp_sge_vx_nxv8i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu +; CHECK-NEXT: vmv.v.x v26, a0 +; CHECK-NEXT: vmsle.vv v0, v26, v16 +; CHECK-NEXT: ret + %head = insertelement undef, i16 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp sge %va, %splat + ret %vc +} + +define @icmp_sge_xv_nxv8i16( %va, i16 %b) { +; CHECK-LABEL: icmp_sge_xv_nxv8i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu +; CHECK-NEXT: vmv.v.x v26, a0 +; CHECK-NEXT: vmsle.vv v0, v16, v26 +; CHECK-NEXT: ret + %head = insertelement undef, i16 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp sge %splat, %va + ret %vc +} + +define @icmp_sge_vi_nxv8i16_0( %va) { +; CHECK-LABEL: icmp_sge_vi_nxv8i16_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vmv.v.i v26, -16 +; CHECK-NEXT: vmsle.vv v0, v26, v16 +; CHECK-NEXT: ret + %head = insertelement undef, i16 -16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp sge %va, %splat + ret %vc +} + +define @icmp_sge_vi_nxv8i16_1( %va) { +; CHECK-LABEL: icmp_sge_vi_nxv8i16_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vmv.v.i v26, -15 +; CHECK-NEXT: vmsle.vv v0, v26, v16 +; CHECK-NEXT: ret + %head = insertelement undef, i16 -15, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp sge %va, %splat + ret %vc +} + +define @icmp_sge_iv_nxv8i16_1( %va) { +; CHECK-LABEL: icmp_sge_iv_nxv8i16_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vmsle.vi v0, v16, -15 +; CHECK-NEXT: ret + %head = insertelement undef, i16 -15, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp sge %splat, %va + ret %vc +} + +define @icmp_sge_vi_nxv8i16_2( %va) { +; CHECK-LABEL: icmp_sge_vi_nxv8i16_2: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vmv.v.i v26, 0 +; CHECK-NEXT: vmsle.vv v0, v26, v16 +; CHECK-NEXT: ret + %head = insertelement undef, i16 0, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp sge %va, %splat + ret %vc +} + +define @icmp_sge_vi_nxv8i16_3( %va) { +; CHECK-LABEL: icmp_sge_vi_nxv8i16_3: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu +; CHECK-NEXT: vmv.v.x v26, a0 +; CHECK-NEXT: vmsle.vv v0, v26, v16 +; CHECK-NEXT: ret + %head = insertelement undef, i16 16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp sge %va, %splat + ret %vc +} + +define @icmp_slt_vv_nxv8i16( %va, %vb) { +; CHECK-LABEL: icmp_slt_vv_nxv8i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vmslt.vv v0, v16, v18 +; CHECK-NEXT: ret + %vc = icmp slt %va, %vb + ret %vc +} + +define @icmp_slt_vx_nxv8i16( %va, i16 %b) { +; CHECK-LABEL: icmp_slt_vx_nxv8i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu +; CHECK-NEXT: vmslt.vx v0, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp slt %va, %splat + ret %vc +} + +define @icmp_slt_xv_nxv8i16( %va, i16 %b) { +; CHECK-LABEL: icmp_slt_xv_nxv8i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu +; CHECK-NEXT: vmv.v.x v26, a0 +; CHECK-NEXT: vmslt.vv v0, v26, v16 +; CHECK-NEXT: ret + %head = insertelement undef, i16 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp slt %splat, %va + ret %vc +} + +define @icmp_slt_vi_nxv8i16_0( %va) { +; CHECK-LABEL: icmp_slt_vi_nxv8i16_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -16 +; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu +; CHECK-NEXT: vmslt.vx v0, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 -16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp slt %va, %splat + ret %vc +} + +define @icmp_slt_vi_nxv8i16_1( %va) { +; CHECK-LABEL: icmp_slt_vi_nxv8i16_1: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -15 +; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu +; CHECK-NEXT: vmslt.vx v0, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 -15, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp slt %va, %splat + ret %vc +} + +define @icmp_slt_iv_nxv8i16_1( %va) { +; CHECK-LABEL: icmp_slt_iv_nxv8i16_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vmsgt.vi v0, v16, -15 +; CHECK-NEXT: ret + %head = insertelement undef, i16 -15, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp slt %splat, %va + ret %vc +} + +define @icmp_slt_vi_nxv8i16_2( %va) { +; CHECK-LABEL: icmp_slt_vi_nxv8i16_2: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vmslt.vx v0, v16, zero +; CHECK-NEXT: ret + %head = insertelement undef, i16 0, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp slt %va, %splat + ret %vc +} + +define @icmp_slt_vi_nxv8i16_3( %va) { +; CHECK-LABEL: icmp_slt_vi_nxv8i16_3: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu +; CHECK-NEXT: vmslt.vx v0, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp slt %va, %splat + ret %vc +} + +define @icmp_sle_vv_nxv8i16( %va, %vb) { +; CHECK-LABEL: icmp_sle_vv_nxv8i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vmsle.vv v0, v16, v18 +; CHECK-NEXT: ret + %vc = icmp sle %va, %vb + ret %vc +} + +define @icmp_sle_vx_nxv8i16( %va, i16 %b) { +; CHECK-LABEL: icmp_sle_vx_nxv8i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu +; CHECK-NEXT: vmsle.vx v0, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp sle %va, %splat + ret %vc +} + +define @icmp_sle_xv_nxv8i16( %va, i16 %b) { +; CHECK-LABEL: icmp_sle_xv_nxv8i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu +; CHECK-NEXT: vmv.v.x v26, a0 +; CHECK-NEXT: vmsle.vv v0, v26, v16 +; CHECK-NEXT: ret + %head = insertelement undef, i16 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp sle %splat, %va + ret %vc +} + +define @icmp_sle_vi_nxv8i16_0( %va) { +; CHECK-LABEL: icmp_sle_vi_nxv8i16_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vmsle.vi v0, v16, 5 +; CHECK-NEXT: ret + %head = insertelement undef, i16 5, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp sle %va, %splat + ret %vc +} + +define @icmp_eq_vv_nxv8i32( %va, %vb) { +; CHECK-LABEL: icmp_eq_vv_nxv8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vmseq.vv v0, v16, v20 +; CHECK-NEXT: ret + %vc = icmp eq %va, %vb + ret %vc +} + +define @icmp_eq_vx_nxv8i32( %va, i32 %b) { +; CHECK-LABEL: icmp_eq_vx_nxv8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu +; CHECK-NEXT: vmseq.vx v0, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp eq %va, %splat + ret %vc +} + +define @icmp_eq_xv_nxv8i32( %va, i32 %b) { +; CHECK-LABEL: icmp_eq_xv_nxv8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu +; CHECK-NEXT: vmv.v.x v28, a0 +; CHECK-NEXT: vmseq.vv v0, v28, v16 +; CHECK-NEXT: ret + %head = insertelement undef, i32 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp eq %splat, %va + ret %vc +} + +define @icmp_eq_vi_nxv8i32_0( %va) { +; CHECK-LABEL: icmp_eq_vi_nxv8i32_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vmseq.vi v0, v16, 0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 0, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp eq %va, %splat + ret %vc +} + +define @icmp_eq_vi_nxv8i32_1( %va) { +; CHECK-LABEL: icmp_eq_vi_nxv8i32_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vmseq.vi v0, v16, 5 +; CHECK-NEXT: ret + %head = insertelement undef, i32 5, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp eq %va, %splat + ret %vc +} + +define @icmp_eq_iv_nxv8i32_1( %va) { +; CHECK-LABEL: icmp_eq_iv_nxv8i32_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vmseq.vi v0, v16, 5 +; CHECK-NEXT: ret + %head = insertelement undef, i32 5, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp eq %splat, %va + ret %vc +} + +define @icmp_ne_vv_nxv8i32( %va, %vb) { +; CHECK-LABEL: icmp_ne_vv_nxv8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vmsne.vv v0, v16, v20 +; CHECK-NEXT: ret + %vc = icmp ne %va, %vb + ret %vc +} + +define @icmp_ne_vx_nxv8i32( %va, i32 %b) { +; CHECK-LABEL: icmp_ne_vx_nxv8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu +; CHECK-NEXT: vmsne.vx v0, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ne %va, %splat + ret %vc +} + +define @icmp_ne_xv_nxv8i32( %va, i32 %b) { +; CHECK-LABEL: icmp_ne_xv_nxv8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu +; CHECK-NEXT: vmv.v.x v28, a0 +; CHECK-NEXT: vmsne.vv v0, v28, v16 +; CHECK-NEXT: ret + %head = insertelement undef, i32 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ne %splat, %va + ret %vc +} + +define @icmp_ne_vi_nxv8i32_0( %va) { +; CHECK-LABEL: icmp_ne_vi_nxv8i32_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vmsne.vi v0, v16, 5 +; CHECK-NEXT: ret + %head = insertelement undef, i32 5, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ne %va, %splat + ret %vc +} + +define @icmp_ugt_vv_nxv8i32( %va, %vb) { +; CHECK-LABEL: icmp_ugt_vv_nxv8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vmsltu.vv v0, v20, v16 +; CHECK-NEXT: ret + %vc = icmp ugt %va, %vb + ret %vc +} + +define @icmp_ugt_vx_nxv8i32( %va, i32 %b) { +; CHECK-LABEL: icmp_ugt_vx_nxv8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu +; CHECK-NEXT: vmsgtu.vx v0, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ugt %va, %splat + ret %vc +} + +define @icmp_ugt_xv_nxv8i32( %va, i32 %b) { +; CHECK-LABEL: icmp_ugt_xv_nxv8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu +; CHECK-NEXT: vmv.v.x v28, a0 +; CHECK-NEXT: vmsltu.vv v0, v16, v28 +; CHECK-NEXT: ret + %head = insertelement undef, i32 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ugt %splat, %va + ret %vc +} + +define @icmp_ugt_vi_nxv8i32_0( %va) { +; CHECK-LABEL: icmp_ugt_vi_nxv8i32_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vmsgtu.vi v0, v16, 5 +; CHECK-NEXT: ret + %head = insertelement undef, i32 5, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ugt %va, %splat + ret %vc +} + +define @icmp_uge_vv_nxv8i32( %va, %vb) { +; CHECK-LABEL: icmp_uge_vv_nxv8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vmsleu.vv v0, v20, v16 +; CHECK-NEXT: ret + %vc = icmp uge %va, %vb + ret %vc +} + +define @icmp_uge_vx_nxv8i32( %va, i32 %b) { +; CHECK-LABEL: icmp_uge_vx_nxv8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu +; CHECK-NEXT: vmv.v.x v28, a0 +; CHECK-NEXT: vmsleu.vv v0, v28, v16 +; CHECK-NEXT: ret + %head = insertelement undef, i32 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp uge %va, %splat + ret %vc +} + +define @icmp_uge_xv_nxv8i32( %va, i32 %b) { +; CHECK-LABEL: icmp_uge_xv_nxv8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu +; CHECK-NEXT: vmv.v.x v28, a0 +; CHECK-NEXT: vmsleu.vv v0, v16, v28 +; CHECK-NEXT: ret + %head = insertelement undef, i32 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp uge %splat, %va + ret %vc +} + +define @icmp_uge_vi_nxv8i32_0( %va) { +; CHECK-LABEL: icmp_uge_vi_nxv8i32_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vmv.v.i v28, -16 +; CHECK-NEXT: vmsleu.vv v0, v28, v16 +; CHECK-NEXT: ret + %head = insertelement undef, i32 -16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp uge %va, %splat + ret %vc +} + +define @icmp_uge_vi_nxv8i32_1( %va) { +; CHECK-LABEL: icmp_uge_vi_nxv8i32_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vmv.v.i v28, 15 +; CHECK-NEXT: vmsleu.vv v0, v28, v16 +; CHECK-NEXT: ret + %head = insertelement undef, i32 15, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp uge %va, %splat + ret %vc +} + +define @icmp_uge_iv_nxv8i32_1( %va) { +; CHECK-LABEL: icmp_uge_iv_nxv8i32_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vmsleu.vi v0, v16, 15 +; CHECK-NEXT: ret + %head = insertelement undef, i32 15, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp uge %splat, %va + ret %vc +} + +define @icmp_uge_vi_nxv8i32_2( %va) { +; CHECK-LABEL: icmp_uge_vi_nxv8i32_2: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmset.m v0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 0, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp uge %va, %splat + ret %vc +} + +define @icmp_uge_vi_nxv8i32_3( %va) { +; CHECK-LABEL: icmp_uge_vi_nxv8i32_3: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vmv.v.i v28, 1 +; CHECK-NEXT: vmsleu.vv v0, v28, v16 +; CHECK-NEXT: ret + %head = insertelement undef, i32 1, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp uge %va, %splat + ret %vc +} + +define @icmp_uge_vi_nxv8i32_4( %va) { +; CHECK-LABEL: icmp_uge_vi_nxv8i32_4: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vmv.v.i v28, -15 +; CHECK-NEXT: vmsleu.vv v0, v28, v16 +; CHECK-NEXT: ret + %head = insertelement undef, i32 -15, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp uge %va, %splat + ret %vc +} + +define @icmp_uge_vi_nxv8i32_5( %va) { +; CHECK-LABEL: icmp_uge_vi_nxv8i32_5: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu +; CHECK-NEXT: vmv.v.x v28, a0 +; CHECK-NEXT: vmsleu.vv v0, v28, v16 +; CHECK-NEXT: ret + %head = insertelement undef, i32 16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp uge %va, %splat + ret %vc +} + +define @icmp_ult_vv_nxv8i32( %va, %vb) { +; CHECK-LABEL: icmp_ult_vv_nxv8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vmsltu.vv v0, v16, v20 +; CHECK-NEXT: ret + %vc = icmp ult %va, %vb + ret %vc +} + +define @icmp_ult_vx_nxv8i32( %va, i32 %b) { +; CHECK-LABEL: icmp_ult_vx_nxv8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu +; CHECK-NEXT: vmsltu.vx v0, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ult %va, %splat + ret %vc +} + +define @icmp_ult_xv_nxv8i32( %va, i32 %b) { +; CHECK-LABEL: icmp_ult_xv_nxv8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu +; CHECK-NEXT: vmv.v.x v28, a0 +; CHECK-NEXT: vmsltu.vv v0, v28, v16 +; CHECK-NEXT: ret + %head = insertelement undef, i32 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ult %splat, %va + ret %vc +} + +define @icmp_ult_vi_nxv8i32_0( %va) { +; CHECK-LABEL: icmp_ult_vi_nxv8i32_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -16 +; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu +; CHECK-NEXT: vmsltu.vx v0, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 -16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ult %va, %splat + ret %vc +} + +define @icmp_ult_vi_nxv8i32_1( %va) { +; CHECK-LABEL: icmp_ult_vi_nxv8i32_1: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -15 +; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu +; CHECK-NEXT: vmsltu.vx v0, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 -15, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ult %va, %splat + ret %vc +} + +define @icmp_ult_iv_nxv8i32_1( %va) { +; CHECK-LABEL: icmp_ult_iv_nxv8i32_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vmsgtu.vi v0, v16, -15 +; CHECK-NEXT: ret + %head = insertelement undef, i32 -15, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ult %splat, %va + ret %vc +} + +define @icmp_ult_vi_nxv8i32_2( %va) { +; CHECK-LABEL: icmp_ult_vi_nxv8i32_2: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmclr.m v0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 0, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ult %va, %splat + ret %vc +} + +define @icmp_ult_vi_nxv8i32_3( %va) { +; CHECK-LABEL: icmp_ult_vi_nxv8i32_3: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vmseq.vi v0, v16, 0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 1, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ult %va, %splat + ret %vc +} + +define @icmp_ult_vi_nxv8i32_4( %va) { +; CHECK-LABEL: icmp_ult_vi_nxv8i32_4: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu +; CHECK-NEXT: vmsltu.vx v0, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ult %va, %splat + ret %vc +} + +define @icmp_ule_vv_nxv8i32( %va, %vb) { +; CHECK-LABEL: icmp_ule_vv_nxv8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vmsleu.vv v0, v16, v20 +; CHECK-NEXT: ret + %vc = icmp ule %va, %vb + ret %vc +} + +define @icmp_ule_vx_nxv8i32( %va, i32 %b) { +; CHECK-LABEL: icmp_ule_vx_nxv8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu +; CHECK-NEXT: vmsleu.vx v0, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ule %va, %splat + ret %vc +} + +define @icmp_ule_xv_nxv8i32( %va, i32 %b) { +; CHECK-LABEL: icmp_ule_xv_nxv8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu +; CHECK-NEXT: vmv.v.x v28, a0 +; CHECK-NEXT: vmsleu.vv v0, v28, v16 +; CHECK-NEXT: ret + %head = insertelement undef, i32 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ule %splat, %va + ret %vc +} + +define @icmp_ule_vi_nxv8i32_0( %va) { +; CHECK-LABEL: icmp_ule_vi_nxv8i32_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vmsleu.vi v0, v16, 5 +; CHECK-NEXT: ret + %head = insertelement undef, i32 5, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ule %va, %splat + ret %vc +} + +define @icmp_sgt_vv_nxv8i32( %va, %vb) { +; CHECK-LABEL: icmp_sgt_vv_nxv8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vmslt.vv v0, v20, v16 +; CHECK-NEXT: ret + %vc = icmp sgt %va, %vb + ret %vc +} + +define @icmp_sgt_vx_nxv8i32( %va, i32 %b) { +; CHECK-LABEL: icmp_sgt_vx_nxv8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu +; CHECK-NEXT: vmsgt.vx v0, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp sgt %va, %splat + ret %vc +} + +define @icmp_sgt_xv_nxv8i32( %va, i32 %b) { +; CHECK-LABEL: icmp_sgt_xv_nxv8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu +; CHECK-NEXT: vmv.v.x v28, a0 +; CHECK-NEXT: vmslt.vv v0, v16, v28 +; CHECK-NEXT: ret + %head = insertelement undef, i32 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp sgt %splat, %va + ret %vc +} + +define @icmp_sgt_vi_nxv8i32_0( %va) { +; CHECK-LABEL: icmp_sgt_vi_nxv8i32_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vmsgt.vi v0, v16, 5 +; CHECK-NEXT: ret + %head = insertelement undef, i32 5, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp sgt %va, %splat + ret %vc +} + +define @icmp_sge_vv_nxv8i32( %va, %vb) { +; CHECK-LABEL: icmp_sge_vv_nxv8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vmsle.vv v0, v20, v16 +; CHECK-NEXT: ret + %vc = icmp sge %va, %vb + ret %vc +} + +define @icmp_sge_vx_nxv8i32( %va, i32 %b) { +; CHECK-LABEL: icmp_sge_vx_nxv8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu +; CHECK-NEXT: vmv.v.x v28, a0 +; CHECK-NEXT: vmsle.vv v0, v28, v16 +; CHECK-NEXT: ret + %head = insertelement undef, i32 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp sge %va, %splat + ret %vc +} + +define @icmp_sge_xv_nxv8i32( %va, i32 %b) { +; CHECK-LABEL: icmp_sge_xv_nxv8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu +; CHECK-NEXT: vmv.v.x v28, a0 +; CHECK-NEXT: vmsle.vv v0, v16, v28 +; CHECK-NEXT: ret + %head = insertelement undef, i32 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp sge %splat, %va + ret %vc +} + +define @icmp_sge_vi_nxv8i32_0( %va) { +; CHECK-LABEL: icmp_sge_vi_nxv8i32_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vmv.v.i v28, -16 +; CHECK-NEXT: vmsle.vv v0, v28, v16 +; CHECK-NEXT: ret + %head = insertelement undef, i32 -16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp sge %va, %splat + ret %vc +} + +define @icmp_sge_vi_nxv8i32_1( %va) { +; CHECK-LABEL: icmp_sge_vi_nxv8i32_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vmv.v.i v28, -15 +; CHECK-NEXT: vmsle.vv v0, v28, v16 +; CHECK-NEXT: ret + %head = insertelement undef, i32 -15, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp sge %va, %splat + ret %vc +} + +define @icmp_sge_iv_nxv8i32_1( %va) { +; CHECK-LABEL: icmp_sge_iv_nxv8i32_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vmsle.vi v0, v16, -15 +; CHECK-NEXT: ret + %head = insertelement undef, i32 -15, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp sge %splat, %va + ret %vc +} + +define @icmp_sge_vi_nxv8i32_2( %va) { +; CHECK-LABEL: icmp_sge_vi_nxv8i32_2: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vmv.v.i v28, 0 +; CHECK-NEXT: vmsle.vv v0, v28, v16 +; CHECK-NEXT: ret + %head = insertelement undef, i32 0, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp sge %va, %splat + ret %vc +} + +define @icmp_sge_vi_nxv8i32_3( %va) { +; CHECK-LABEL: icmp_sge_vi_nxv8i32_3: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu +; CHECK-NEXT: vmv.v.x v28, a0 +; CHECK-NEXT: vmsle.vv v0, v28, v16 +; CHECK-NEXT: ret + %head = insertelement undef, i32 16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp sge %va, %splat + ret %vc +} + +define @icmp_slt_vv_nxv8i32( %va, %vb) { +; CHECK-LABEL: icmp_slt_vv_nxv8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vmslt.vv v0, v16, v20 +; CHECK-NEXT: ret + %vc = icmp slt %va, %vb + ret %vc +} + +define @icmp_slt_vx_nxv8i32( %va, i32 %b) { +; CHECK-LABEL: icmp_slt_vx_nxv8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu +; CHECK-NEXT: vmslt.vx v0, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp slt %va, %splat + ret %vc +} + +define @icmp_slt_xv_nxv8i32( %va, i32 %b) { +; CHECK-LABEL: icmp_slt_xv_nxv8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu +; CHECK-NEXT: vmv.v.x v28, a0 +; CHECK-NEXT: vmslt.vv v0, v28, v16 +; CHECK-NEXT: ret + %head = insertelement undef, i32 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp slt %splat, %va + ret %vc +} + +define @icmp_slt_vi_nxv8i32_0( %va) { +; CHECK-LABEL: icmp_slt_vi_nxv8i32_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -16 +; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu +; CHECK-NEXT: vmslt.vx v0, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 -16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp slt %va, %splat + ret %vc +} + +define @icmp_slt_vi_nxv8i32_1( %va) { +; CHECK-LABEL: icmp_slt_vi_nxv8i32_1: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -15 +; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu +; CHECK-NEXT: vmslt.vx v0, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 -15, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp slt %va, %splat + ret %vc +} + +define @icmp_slt_iv_nxv8i32_1( %va) { +; CHECK-LABEL: icmp_slt_iv_nxv8i32_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vmsgt.vi v0, v16, -15 +; CHECK-NEXT: ret + %head = insertelement undef, i32 -15, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp slt %splat, %va + ret %vc +} + +define @icmp_slt_vi_nxv8i32_2( %va) { +; CHECK-LABEL: icmp_slt_vi_nxv8i32_2: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vmslt.vx v0, v16, zero +; CHECK-NEXT: ret + %head = insertelement undef, i32 0, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp slt %va, %splat + ret %vc +} + +define @icmp_slt_vi_nxv8i32_3( %va) { +; CHECK-LABEL: icmp_slt_vi_nxv8i32_3: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu +; CHECK-NEXT: vmslt.vx v0, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp slt %va, %splat + ret %vc +} + +define @icmp_sle_vv_nxv8i32( %va, %vb) { +; CHECK-LABEL: icmp_sle_vv_nxv8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vmsle.vv v0, v16, v20 +; CHECK-NEXT: ret + %vc = icmp sle %va, %vb + ret %vc +} + +define @icmp_sle_vx_nxv8i32( %va, i32 %b) { +; CHECK-LABEL: icmp_sle_vx_nxv8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu +; CHECK-NEXT: vmsle.vx v0, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp sle %va, %splat + ret %vc +} + +define @icmp_sle_xv_nxv8i32( %va, i32 %b) { +; CHECK-LABEL: icmp_sle_xv_nxv8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu +; CHECK-NEXT: vmv.v.x v28, a0 +; CHECK-NEXT: vmsle.vv v0, v28, v16 +; CHECK-NEXT: ret + %head = insertelement undef, i32 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp sle %splat, %va + ret %vc +} + +define @icmp_sle_vi_nxv8i32_0( %va) { +; CHECK-LABEL: icmp_sle_vi_nxv8i32_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vmsle.vi v0, v16, 5 +; CHECK-NEXT: ret + %head = insertelement undef, i32 5, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp sle %va, %splat + ret %vc +} + +define @icmp_eq_vv_nxv8i64( %va, %vb) { +; CHECK-LABEL: icmp_eq_vv_nxv8i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a0) +; CHECK-NEXT: vmseq.vv v0, v16, v8 +; CHECK-NEXT: ret + %vc = icmp eq %va, %vb + ret %vc +} + +define @icmp_eq_vx_nxv8i64( %va, i64 %b) { +; CHECK-LABEL: icmp_eq_vx_nxv8i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a2, zero, e64,m8,ta,mu +; CHECK-NEXT: vmv.v.x v8, a1 +; CHECK-NEXT: addi a1, zero, 32 +; CHECK-NEXT: vsll.vx v8, v8, a1 +; CHECK-NEXT: vmv.v.x v24, a0 +; CHECK-NEXT: vsll.vx v24, v24, a1 +; CHECK-NEXT: vsrl.vx v24, v24, a1 +; CHECK-NEXT: vor.vv v8, v24, v8 +; CHECK-NEXT: vmseq.vv v0, v16, v8 +; CHECK-NEXT: ret + %head = insertelement undef, i64 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp eq %va, %splat + ret %vc +} + +define @icmp_eq_xv_nxv8i64( %va, i64 %b) { +; CHECK-LABEL: icmp_eq_xv_nxv8i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a2, zero, e64,m8,ta,mu +; CHECK-NEXT: vmv.v.x v8, a1 +; CHECK-NEXT: addi a1, zero, 32 +; CHECK-NEXT: vsll.vx v8, v8, a1 +; CHECK-NEXT: vmv.v.x v24, a0 +; CHECK-NEXT: vsll.vx v24, v24, a1 +; CHECK-NEXT: vsrl.vx v24, v24, a1 +; CHECK-NEXT: vor.vv v8, v24, v8 +; CHECK-NEXT: vmseq.vv v0, v8, v16 +; CHECK-NEXT: ret + %head = insertelement undef, i64 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp eq %splat, %va + ret %vc +} + +define @icmp_eq_vi_nxv8i64_0( %va) { +; CHECK-LABEL: icmp_eq_vi_nxv8i64_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu +; CHECK-NEXT: vmseq.vi v0, v16, 0 +; CHECK-NEXT: ret + %head = insertelement undef, i64 0, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp eq %va, %splat + ret %vc +} + +define @icmp_eq_vi_nxv8i64_1( %va) { +; CHECK-LABEL: icmp_eq_vi_nxv8i64_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu +; CHECK-NEXT: vmseq.vi v0, v16, 5 +; CHECK-NEXT: ret + %head = insertelement undef, i64 5, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp eq %va, %splat + ret %vc +} + +define @icmp_eq_iv_nxv8i64_1( %va) { +; CHECK-LABEL: icmp_eq_iv_nxv8i64_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu +; CHECK-NEXT: vmseq.vi v0, v16, 5 +; CHECK-NEXT: ret + %head = insertelement undef, i64 5, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp eq %splat, %va + ret %vc +} + +define @icmp_ne_vv_nxv8i64( %va, %vb) { +; CHECK-LABEL: icmp_ne_vv_nxv8i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a0) +; CHECK-NEXT: vmsne.vv v0, v16, v8 +; CHECK-NEXT: ret + %vc = icmp ne %va, %vb + ret %vc +} + +define @icmp_ne_vx_nxv8i64( %va, i64 %b) { +; CHECK-LABEL: icmp_ne_vx_nxv8i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a2, zero, e64,m8,ta,mu +; CHECK-NEXT: vmv.v.x v8, a1 +; CHECK-NEXT: addi a1, zero, 32 +; CHECK-NEXT: vsll.vx v8, v8, a1 +; CHECK-NEXT: vmv.v.x v24, a0 +; CHECK-NEXT: vsll.vx v24, v24, a1 +; CHECK-NEXT: vsrl.vx v24, v24, a1 +; CHECK-NEXT: vor.vv v8, v24, v8 +; CHECK-NEXT: vmsne.vv v0, v16, v8 +; CHECK-NEXT: ret + %head = insertelement undef, i64 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ne %va, %splat + ret %vc +} + +define @icmp_ne_xv_nxv8i64( %va, i64 %b) { +; CHECK-LABEL: icmp_ne_xv_nxv8i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a2, zero, e64,m8,ta,mu +; CHECK-NEXT: vmv.v.x v8, a1 +; CHECK-NEXT: addi a1, zero, 32 +; CHECK-NEXT: vsll.vx v8, v8, a1 +; CHECK-NEXT: vmv.v.x v24, a0 +; CHECK-NEXT: vsll.vx v24, v24, a1 +; CHECK-NEXT: vsrl.vx v24, v24, a1 +; CHECK-NEXT: vor.vv v8, v24, v8 +; CHECK-NEXT: vmsne.vv v0, v8, v16 +; CHECK-NEXT: ret + %head = insertelement undef, i64 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ne %splat, %va + ret %vc +} + +define @icmp_ne_vi_nxv8i64_0( %va) { +; CHECK-LABEL: icmp_ne_vi_nxv8i64_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu +; CHECK-NEXT: vmsne.vi v0, v16, 5 +; CHECK-NEXT: ret + %head = insertelement undef, i64 5, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ne %va, %splat + ret %vc +} + +define @icmp_ugt_vv_nxv8i64( %va, %vb) { +; CHECK-LABEL: icmp_ugt_vv_nxv8i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a0) +; CHECK-NEXT: vmsltu.vv v0, v8, v16 +; CHECK-NEXT: ret + %vc = icmp ugt %va, %vb + ret %vc +} + +define @icmp_ugt_vx_nxv8i64( %va, i64 %b) { +; CHECK-LABEL: icmp_ugt_vx_nxv8i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a2, zero, e64,m8,ta,mu +; CHECK-NEXT: vmv.v.x v8, a1 +; CHECK-NEXT: addi a1, zero, 32 +; CHECK-NEXT: vsll.vx v8, v8, a1 +; CHECK-NEXT: vmv.v.x v24, a0 +; CHECK-NEXT: vsll.vx v24, v24, a1 +; CHECK-NEXT: vsrl.vx v24, v24, a1 +; CHECK-NEXT: vor.vv v8, v24, v8 +; CHECK-NEXT: vmsltu.vv v0, v8, v16 +; CHECK-NEXT: ret + %head = insertelement undef, i64 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ugt %va, %splat + ret %vc +} + +define @icmp_ugt_xv_nxv8i64( %va, i64 %b) { +; CHECK-LABEL: icmp_ugt_xv_nxv8i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a2, zero, e64,m8,ta,mu +; CHECK-NEXT: vmv.v.x v8, a1 +; CHECK-NEXT: addi a1, zero, 32 +; CHECK-NEXT: vsll.vx v8, v8, a1 +; CHECK-NEXT: vmv.v.x v24, a0 +; CHECK-NEXT: vsll.vx v24, v24, a1 +; CHECK-NEXT: vsrl.vx v24, v24, a1 +; CHECK-NEXT: vor.vv v8, v24, v8 +; CHECK-NEXT: vmsltu.vv v0, v16, v8 +; CHECK-NEXT: ret + %head = insertelement undef, i64 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ugt %splat, %va + ret %vc +} + +define @icmp_ugt_vi_nxv8i64_0( %va) { +; CHECK-LABEL: icmp_ugt_vi_nxv8i64_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu +; CHECK-NEXT: vmsgtu.vi v0, v16, 5 +; CHECK-NEXT: ret + %head = insertelement undef, i64 5, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ugt %va, %splat + ret %vc +} + +define @icmp_uge_vv_nxv8i64( %va, %vb) { +; CHECK-LABEL: icmp_uge_vv_nxv8i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a0) +; CHECK-NEXT: vmsleu.vv v0, v8, v16 +; CHECK-NEXT: ret + %vc = icmp uge %va, %vb + ret %vc +} + +define @icmp_uge_vx_nxv8i64( %va, i64 %b) { +; CHECK-LABEL: icmp_uge_vx_nxv8i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a2, zero, e64,m8,ta,mu +; CHECK-NEXT: vmv.v.x v8, a1 +; CHECK-NEXT: addi a1, zero, 32 +; CHECK-NEXT: vsll.vx v8, v8, a1 +; CHECK-NEXT: vmv.v.x v24, a0 +; CHECK-NEXT: vsll.vx v24, v24, a1 +; CHECK-NEXT: vsrl.vx v24, v24, a1 +; CHECK-NEXT: vor.vv v8, v24, v8 +; CHECK-NEXT: vmsleu.vv v0, v8, v16 +; CHECK-NEXT: ret + %head = insertelement undef, i64 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp uge %va, %splat + ret %vc +} + +define @icmp_uge_xv_nxv8i64( %va, i64 %b) { +; CHECK-LABEL: icmp_uge_xv_nxv8i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a2, zero, e64,m8,ta,mu +; CHECK-NEXT: vmv.v.x v8, a1 +; CHECK-NEXT: addi a1, zero, 32 +; CHECK-NEXT: vsll.vx v8, v8, a1 +; CHECK-NEXT: vmv.v.x v24, a0 +; CHECK-NEXT: vsll.vx v24, v24, a1 +; CHECK-NEXT: vsrl.vx v24, v24, a1 +; CHECK-NEXT: vor.vv v8, v24, v8 +; CHECK-NEXT: vmsleu.vv v0, v16, v8 +; CHECK-NEXT: ret + %head = insertelement undef, i64 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp uge %splat, %va + ret %vc +} + +define @icmp_uge_vi_nxv8i64_0( %va) { +; CHECK-LABEL: icmp_uge_vi_nxv8i64_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu +; CHECK-NEXT: vmv.v.i v8, -16 +; CHECK-NEXT: vmsleu.vv v0, v8, v16 +; CHECK-NEXT: ret + %head = insertelement undef, i64 -16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp uge %va, %splat + ret %vc +} + +define @icmp_uge_vi_nxv8i64_1( %va) { +; CHECK-LABEL: icmp_uge_vi_nxv8i64_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu +; CHECK-NEXT: vmv.v.i v8, 15 +; CHECK-NEXT: vmsleu.vv v0, v8, v16 +; CHECK-NEXT: ret + %head = insertelement undef, i64 15, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp uge %va, %splat + ret %vc +} + +define @icmp_uge_iv_nxv8i64_1( %va) { +; CHECK-LABEL: icmp_uge_iv_nxv8i64_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu +; CHECK-NEXT: vmsleu.vi v0, v16, 15 +; CHECK-NEXT: ret + %head = insertelement undef, i64 15, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp uge %splat, %va + ret %vc +} + +define @icmp_uge_vi_nxv8i64_2( %va) { +; CHECK-LABEL: icmp_uge_vi_nxv8i64_2: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmset.m v0 +; CHECK-NEXT: ret + %head = insertelement undef, i64 0, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp uge %va, %splat + ret %vc +} + +define @icmp_uge_vi_nxv8i64_3( %va) { +; CHECK-LABEL: icmp_uge_vi_nxv8i64_3: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu +; CHECK-NEXT: vmv.v.i v8, 1 +; CHECK-NEXT: vmsleu.vv v0, v8, v16 +; CHECK-NEXT: ret + %head = insertelement undef, i64 1, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp uge %va, %splat + ret %vc +} + +define @icmp_uge_vi_nxv8i64_4( %va) { +; CHECK-LABEL: icmp_uge_vi_nxv8i64_4: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu +; CHECK-NEXT: vmv.v.i v8, -15 +; CHECK-NEXT: vmsleu.vv v0, v8, v16 +; CHECK-NEXT: ret + %head = insertelement undef, i64 -15, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp uge %va, %splat + ret %vc +} + +define @icmp_uge_vi_nxv8i64_5( %va) { +; CHECK-LABEL: icmp_uge_vi_nxv8i64_5: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu +; CHECK-NEXT: vmv.v.x v8, a0 +; CHECK-NEXT: vmsleu.vv v0, v8, v16 +; CHECK-NEXT: ret + %head = insertelement undef, i64 16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp uge %va, %splat + ret %vc +} + +define @icmp_ult_vv_nxv8i64( %va, %vb) { +; CHECK-LABEL: icmp_ult_vv_nxv8i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a0) +; CHECK-NEXT: vmsltu.vv v0, v16, v8 +; CHECK-NEXT: ret + %vc = icmp ult %va, %vb + ret %vc +} + +define @icmp_ult_vx_nxv8i64( %va, i64 %b) { +; CHECK-LABEL: icmp_ult_vx_nxv8i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a2, zero, e64,m8,ta,mu +; CHECK-NEXT: vmv.v.x v8, a1 +; CHECK-NEXT: addi a1, zero, 32 +; CHECK-NEXT: vsll.vx v8, v8, a1 +; CHECK-NEXT: vmv.v.x v24, a0 +; CHECK-NEXT: vsll.vx v24, v24, a1 +; CHECK-NEXT: vsrl.vx v24, v24, a1 +; CHECK-NEXT: vor.vv v8, v24, v8 +; CHECK-NEXT: vmsltu.vv v0, v16, v8 +; CHECK-NEXT: ret + %head = insertelement undef, i64 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ult %va, %splat + ret %vc +} + +define @icmp_ult_xv_nxv8i64( %va, i64 %b) { +; CHECK-LABEL: icmp_ult_xv_nxv8i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a2, zero, e64,m8,ta,mu +; CHECK-NEXT: vmv.v.x v8, a1 +; CHECK-NEXT: addi a1, zero, 32 +; CHECK-NEXT: vsll.vx v8, v8, a1 +; CHECK-NEXT: vmv.v.x v24, a0 +; CHECK-NEXT: vsll.vx v24, v24, a1 +; CHECK-NEXT: vsrl.vx v24, v24, a1 +; CHECK-NEXT: vor.vv v8, v24, v8 +; CHECK-NEXT: vmsltu.vv v0, v8, v16 +; CHECK-NEXT: ret + %head = insertelement undef, i64 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ult %splat, %va + ret %vc +} + +define @icmp_ult_vi_nxv8i64_0( %va) { +; CHECK-LABEL: icmp_ult_vi_nxv8i64_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -16 +; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu +; CHECK-NEXT: vmsltu.vx v0, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i64 -16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ult %va, %splat + ret %vc +} + +define @icmp_ult_vi_nxv8i64_1( %va) { +; CHECK-LABEL: icmp_ult_vi_nxv8i64_1: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -15 +; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu +; CHECK-NEXT: vmsltu.vx v0, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i64 -15, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ult %va, %splat + ret %vc +} + +define @icmp_ult_iv_nxv8i64_1( %va) { +; CHECK-LABEL: icmp_ult_iv_nxv8i64_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu +; CHECK-NEXT: vmsgtu.vi v0, v16, -15 +; CHECK-NEXT: ret + %head = insertelement undef, i64 -15, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ult %splat, %va + ret %vc +} + +define @icmp_ult_vi_nxv8i64_2( %va) { +; CHECK-LABEL: icmp_ult_vi_nxv8i64_2: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmclr.m v0 +; CHECK-NEXT: ret + %head = insertelement undef, i64 0, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ult %va, %splat + ret %vc +} + +define @icmp_ult_vi_nxv8i64_3( %va) { +; CHECK-LABEL: icmp_ult_vi_nxv8i64_3: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu +; CHECK-NEXT: vmseq.vi v0, v16, 0 +; CHECK-NEXT: ret + %head = insertelement undef, i64 1, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ult %va, %splat + ret %vc +} + +define @icmp_ult_vi_nxv8i64_4( %va) { +; CHECK-LABEL: icmp_ult_vi_nxv8i64_4: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu +; CHECK-NEXT: vmsltu.vx v0, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i64 16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ult %va, %splat + ret %vc +} + +define @icmp_ule_vv_nxv8i64( %va, %vb) { +; CHECK-LABEL: icmp_ule_vv_nxv8i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a0) +; CHECK-NEXT: vmsleu.vv v0, v16, v8 +; CHECK-NEXT: ret + %vc = icmp ule %va, %vb + ret %vc +} + +define @icmp_ule_vx_nxv8i64( %va, i64 %b) { +; CHECK-LABEL: icmp_ule_vx_nxv8i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a2, zero, e64,m8,ta,mu +; CHECK-NEXT: vmv.v.x v8, a1 +; CHECK-NEXT: addi a1, zero, 32 +; CHECK-NEXT: vsll.vx v8, v8, a1 +; CHECK-NEXT: vmv.v.x v24, a0 +; CHECK-NEXT: vsll.vx v24, v24, a1 +; CHECK-NEXT: vsrl.vx v24, v24, a1 +; CHECK-NEXT: vor.vv v8, v24, v8 +; CHECK-NEXT: vmsleu.vv v0, v16, v8 +; CHECK-NEXT: ret + %head = insertelement undef, i64 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ule %va, %splat + ret %vc +} + +define @icmp_ule_xv_nxv8i64( %va, i64 %b) { +; CHECK-LABEL: icmp_ule_xv_nxv8i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a2, zero, e64,m8,ta,mu +; CHECK-NEXT: vmv.v.x v8, a1 +; CHECK-NEXT: addi a1, zero, 32 +; CHECK-NEXT: vsll.vx v8, v8, a1 +; CHECK-NEXT: vmv.v.x v24, a0 +; CHECK-NEXT: vsll.vx v24, v24, a1 +; CHECK-NEXT: vsrl.vx v24, v24, a1 +; CHECK-NEXT: vor.vv v8, v24, v8 +; CHECK-NEXT: vmsleu.vv v0, v8, v16 +; CHECK-NEXT: ret + %head = insertelement undef, i64 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ule %splat, %va + ret %vc +} + +define @icmp_ule_vi_nxv8i64_0( %va) { +; CHECK-LABEL: icmp_ule_vi_nxv8i64_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu +; CHECK-NEXT: vmsleu.vi v0, v16, 5 +; CHECK-NEXT: ret + %head = insertelement undef, i64 5, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ule %va, %splat + ret %vc +} + +define @icmp_sgt_vv_nxv8i64( %va, %vb) { +; CHECK-LABEL: icmp_sgt_vv_nxv8i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a0) +; CHECK-NEXT: vmslt.vv v0, v8, v16 +; CHECK-NEXT: ret + %vc = icmp sgt %va, %vb + ret %vc +} + +define @icmp_sgt_vx_nxv8i64( %va, i64 %b) { +; CHECK-LABEL: icmp_sgt_vx_nxv8i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a2, zero, e64,m8,ta,mu +; CHECK-NEXT: vmv.v.x v8, a1 +; CHECK-NEXT: addi a1, zero, 32 +; CHECK-NEXT: vsll.vx v8, v8, a1 +; CHECK-NEXT: vmv.v.x v24, a0 +; CHECK-NEXT: vsll.vx v24, v24, a1 +; CHECK-NEXT: vsrl.vx v24, v24, a1 +; CHECK-NEXT: vor.vv v8, v24, v8 +; CHECK-NEXT: vmslt.vv v0, v8, v16 +; CHECK-NEXT: ret + %head = insertelement undef, i64 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp sgt %va, %splat + ret %vc +} + +define @icmp_sgt_xv_nxv8i64( %va, i64 %b) { +; CHECK-LABEL: icmp_sgt_xv_nxv8i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a2, zero, e64,m8,ta,mu +; CHECK-NEXT: vmv.v.x v8, a1 +; CHECK-NEXT: addi a1, zero, 32 +; CHECK-NEXT: vsll.vx v8, v8, a1 +; CHECK-NEXT: vmv.v.x v24, a0 +; CHECK-NEXT: vsll.vx v24, v24, a1 +; CHECK-NEXT: vsrl.vx v24, v24, a1 +; CHECK-NEXT: vor.vv v8, v24, v8 +; CHECK-NEXT: vmslt.vv v0, v16, v8 +; CHECK-NEXT: ret + %head = insertelement undef, i64 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp sgt %splat, %va + ret %vc +} + +define @icmp_sgt_vi_nxv8i64_0( %va) { +; CHECK-LABEL: icmp_sgt_vi_nxv8i64_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu +; CHECK-NEXT: vmsgt.vi v0, v16, 5 +; CHECK-NEXT: ret + %head = insertelement undef, i64 5, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp sgt %va, %splat + ret %vc +} + +define @icmp_sge_vv_nxv8i64( %va, %vb) { +; CHECK-LABEL: icmp_sge_vv_nxv8i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a0) +; CHECK-NEXT: vmsle.vv v0, v8, v16 +; CHECK-NEXT: ret + %vc = icmp sge %va, %vb + ret %vc +} + +define @icmp_sge_vx_nxv8i64( %va, i64 %b) { +; CHECK-LABEL: icmp_sge_vx_nxv8i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a2, zero, e64,m8,ta,mu +; CHECK-NEXT: vmv.v.x v8, a1 +; CHECK-NEXT: addi a1, zero, 32 +; CHECK-NEXT: vsll.vx v8, v8, a1 +; CHECK-NEXT: vmv.v.x v24, a0 +; CHECK-NEXT: vsll.vx v24, v24, a1 +; CHECK-NEXT: vsrl.vx v24, v24, a1 +; CHECK-NEXT: vor.vv v8, v24, v8 +; CHECK-NEXT: vmsle.vv v0, v8, v16 +; CHECK-NEXT: ret + %head = insertelement undef, i64 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp sge %va, %splat + ret %vc +} + +define @icmp_sge_xv_nxv8i64( %va, i64 %b) { +; CHECK-LABEL: icmp_sge_xv_nxv8i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a2, zero, e64,m8,ta,mu +; CHECK-NEXT: vmv.v.x v8, a1 +; CHECK-NEXT: addi a1, zero, 32 +; CHECK-NEXT: vsll.vx v8, v8, a1 +; CHECK-NEXT: vmv.v.x v24, a0 +; CHECK-NEXT: vsll.vx v24, v24, a1 +; CHECK-NEXT: vsrl.vx v24, v24, a1 +; CHECK-NEXT: vor.vv v8, v24, v8 +; CHECK-NEXT: vmsle.vv v0, v16, v8 +; CHECK-NEXT: ret + %head = insertelement undef, i64 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp sge %splat, %va + ret %vc +} + +define @icmp_sge_vi_nxv8i64_0( %va) { +; CHECK-LABEL: icmp_sge_vi_nxv8i64_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu +; CHECK-NEXT: vmv.v.i v8, -16 +; CHECK-NEXT: vmsle.vv v0, v8, v16 +; CHECK-NEXT: ret + %head = insertelement undef, i64 -16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp sge %va, %splat + ret %vc +} + +define @icmp_sge_vi_nxv8i64_1( %va) { +; CHECK-LABEL: icmp_sge_vi_nxv8i64_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu +; CHECK-NEXT: vmv.v.i v8, -15 +; CHECK-NEXT: vmsle.vv v0, v8, v16 +; CHECK-NEXT: ret + %head = insertelement undef, i64 -15, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp sge %va, %splat + ret %vc +} + +define @icmp_sge_iv_nxv8i64_1( %va) { +; CHECK-LABEL: icmp_sge_iv_nxv8i64_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu +; CHECK-NEXT: vmsle.vi v0, v16, -15 +; CHECK-NEXT: ret + %head = insertelement undef, i64 -15, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp sge %splat, %va + ret %vc +} + +define @icmp_sge_vi_nxv8i64_2( %va) { +; CHECK-LABEL: icmp_sge_vi_nxv8i64_2: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu +; CHECK-NEXT: vmv.v.i v8, 0 +; CHECK-NEXT: vmsle.vv v0, v8, v16 +; CHECK-NEXT: ret + %head = insertelement undef, i64 0, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp sge %va, %splat + ret %vc +} + +define @icmp_sge_vi_nxv8i64_3( %va) { +; CHECK-LABEL: icmp_sge_vi_nxv8i64_3: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu +; CHECK-NEXT: vmv.v.x v8, a0 +; CHECK-NEXT: vmsle.vv v0, v8, v16 +; CHECK-NEXT: ret + %head = insertelement undef, i64 16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp sge %va, %splat + ret %vc +} + +define @icmp_slt_vv_nxv8i64( %va, %vb) { +; CHECK-LABEL: icmp_slt_vv_nxv8i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a0) +; CHECK-NEXT: vmslt.vv v0, v16, v8 +; CHECK-NEXT: ret + %vc = icmp slt %va, %vb + ret %vc +} + +define @icmp_slt_vx_nxv8i64( %va, i64 %b) { +; CHECK-LABEL: icmp_slt_vx_nxv8i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a2, zero, e64,m8,ta,mu +; CHECK-NEXT: vmv.v.x v8, a1 +; CHECK-NEXT: addi a1, zero, 32 +; CHECK-NEXT: vsll.vx v8, v8, a1 +; CHECK-NEXT: vmv.v.x v24, a0 +; CHECK-NEXT: vsll.vx v24, v24, a1 +; CHECK-NEXT: vsrl.vx v24, v24, a1 +; CHECK-NEXT: vor.vv v8, v24, v8 +; CHECK-NEXT: vmslt.vv v0, v16, v8 +; CHECK-NEXT: ret + %head = insertelement undef, i64 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp slt %va, %splat + ret %vc +} + +define @icmp_slt_xv_nxv8i64( %va, i64 %b) { +; CHECK-LABEL: icmp_slt_xv_nxv8i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a2, zero, e64,m8,ta,mu +; CHECK-NEXT: vmv.v.x v8, a1 +; CHECK-NEXT: addi a1, zero, 32 +; CHECK-NEXT: vsll.vx v8, v8, a1 +; CHECK-NEXT: vmv.v.x v24, a0 +; CHECK-NEXT: vsll.vx v24, v24, a1 +; CHECK-NEXT: vsrl.vx v24, v24, a1 +; CHECK-NEXT: vor.vv v8, v24, v8 +; CHECK-NEXT: vmslt.vv v0, v8, v16 +; CHECK-NEXT: ret + %head = insertelement undef, i64 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp slt %splat, %va + ret %vc +} + +define @icmp_slt_vi_nxv8i64_0( %va) { +; CHECK-LABEL: icmp_slt_vi_nxv8i64_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -16 +; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu +; CHECK-NEXT: vmslt.vx v0, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i64 -16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp slt %va, %splat + ret %vc +} + +define @icmp_slt_vi_nxv8i64_1( %va) { +; CHECK-LABEL: icmp_slt_vi_nxv8i64_1: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -15 +; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu +; CHECK-NEXT: vmslt.vx v0, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i64 -15, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp slt %va, %splat + ret %vc +} + +define @icmp_slt_iv_nxv8i64_1( %va) { +; CHECK-LABEL: icmp_slt_iv_nxv8i64_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu +; CHECK-NEXT: vmsgt.vi v0, v16, -15 +; CHECK-NEXT: ret + %head = insertelement undef, i64 -15, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp slt %splat, %va + ret %vc +} + +define @icmp_slt_vi_nxv8i64_2( %va) { +; CHECK-LABEL: icmp_slt_vi_nxv8i64_2: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu +; CHECK-NEXT: vmslt.vx v0, v16, zero +; CHECK-NEXT: ret + %head = insertelement undef, i64 0, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp slt %va, %splat + ret %vc +} + +define @icmp_slt_vi_nxv8i64_3( %va) { +; CHECK-LABEL: icmp_slt_vi_nxv8i64_3: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu +; CHECK-NEXT: vmslt.vx v0, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i64 16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp slt %va, %splat + ret %vc +} + +define @icmp_sle_vv_nxv8i64( %va, %vb) { +; CHECK-LABEL: icmp_sle_vv_nxv8i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a0) +; CHECK-NEXT: vmsle.vv v0, v16, v8 +; CHECK-NEXT: ret + %vc = icmp sle %va, %vb + ret %vc +} + +define @icmp_sle_vx_nxv8i64( %va, i64 %b) { +; CHECK-LABEL: icmp_sle_vx_nxv8i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a2, zero, e64,m8,ta,mu +; CHECK-NEXT: vmv.v.x v8, a1 +; CHECK-NEXT: addi a1, zero, 32 +; CHECK-NEXT: vsll.vx v8, v8, a1 +; CHECK-NEXT: vmv.v.x v24, a0 +; CHECK-NEXT: vsll.vx v24, v24, a1 +; CHECK-NEXT: vsrl.vx v24, v24, a1 +; CHECK-NEXT: vor.vv v8, v24, v8 +; CHECK-NEXT: vmsle.vv v0, v16, v8 +; CHECK-NEXT: ret + %head = insertelement undef, i64 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp sle %va, %splat + ret %vc +} + +define @icmp_sle_xv_nxv8i64( %va, i64 %b) { +; CHECK-LABEL: icmp_sle_xv_nxv8i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a2, zero, e64,m8,ta,mu +; CHECK-NEXT: vmv.v.x v8, a1 +; CHECK-NEXT: addi a1, zero, 32 +; CHECK-NEXT: vsll.vx v8, v8, a1 +; CHECK-NEXT: vmv.v.x v24, a0 +; CHECK-NEXT: vsll.vx v24, v24, a1 +; CHECK-NEXT: vsrl.vx v24, v24, a1 +; CHECK-NEXT: vor.vv v8, v24, v8 +; CHECK-NEXT: vmsle.vv v0, v8, v16 +; CHECK-NEXT: ret + %head = insertelement undef, i64 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp sle %splat, %va + ret %vc +} + +define @icmp_sle_vi_nxv8i64_0( %va) { +; CHECK-LABEL: icmp_sle_vi_nxv8i64_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu +; CHECK-NEXT: vmsle.vi v0, v16, 5 +; CHECK-NEXT: ret + %head = insertelement undef, i64 5, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp sle %va, %splat + ret %vc +} + +; Check a setcc with two constant splats, which would previously get stuck in +; an infinite loop. DAGCombine isn't clever enough to constant-fold +; splat_vectors but could continuously swap the operands, trying to put the +; splat on the RHS. +define @icmp_eq_ii_nxv8i8() { +; CHECK-LABEL: icmp_eq_ii_nxv8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmv.v.i v25, 5 +; CHECK-NEXT: vmseq.vi v0, v25, 2 +; CHECK-NEXT: ret + %heada = insertelement undef, i8 5, i32 0 + %splata = shufflevector %heada, undef, zeroinitializer + %headb = insertelement undef, i8 2, i32 0 + %splatb = shufflevector %headb, undef, zeroinitializer + %vc = icmp eq %splata, %splatb + ret %vc +} + diff --git a/llvm/test/CodeGen/RISCV/rvv/setcc-integer-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/setcc-integer-rv64.ll new file mode 100644 index 000000000000..180b9044a3f6 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/setcc-integer-rv64.ll @@ -0,0 +1,2981 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs < %s | FileCheck %s + +; FIXME: The scalar/vector operations ('xv' and 'iv' tests) should swap +; operands and condition codes accordingly in order to generate a 'vx' or 'vi' +; instruction. + +define @icmp_eq_vv_nxv8i8( %va, %vb) { +; CHECK-LABEL: icmp_eq_vv_nxv8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmseq.vv v0, v16, v17 +; CHECK-NEXT: ret + %vc = icmp eq %va, %vb + ret %vc +} + +define @icmp_eq_vx_nxv8i8( %va, i8 %b) { +; CHECK-LABEL: icmp_eq_vx_nxv8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu +; CHECK-NEXT: vmseq.vx v0, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp eq %va, %splat + ret %vc +} + +define @icmp_eq_xv_nxv8i8( %va, i8 %b) { +; CHECK-LABEL: icmp_eq_xv_nxv8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu +; CHECK-NEXT: vmv.v.x v25, a0 +; CHECK-NEXT: vmseq.vv v0, v25, v16 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp eq %splat, %va + ret %vc +} + +define @icmp_eq_vi_nxv8i8_0( %va) { +; CHECK-LABEL: icmp_eq_vi_nxv8i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmseq.vi v0, v16, 0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 0, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp eq %va, %splat + ret %vc +} + +define @icmp_eq_vi_nxv8i8_1( %va) { +; CHECK-LABEL: icmp_eq_vi_nxv8i8_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmseq.vi v0, v16, 5 +; CHECK-NEXT: ret + %head = insertelement undef, i8 5, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp eq %va, %splat + ret %vc +} + +define @icmp_eq_iv_nxv8i8_1( %va) { +; CHECK-LABEL: icmp_eq_iv_nxv8i8_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmseq.vi v0, v16, 5 +; CHECK-NEXT: ret + %head = insertelement undef, i8 5, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp eq %splat, %va + ret %vc +} + +define @icmp_ne_vv_nxv8i8( %va, %vb) { +; CHECK-LABEL: icmp_ne_vv_nxv8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmsne.vv v0, v16, v17 +; CHECK-NEXT: ret + %vc = icmp ne %va, %vb + ret %vc +} + +define @icmp_ne_vx_nxv8i8( %va, i8 %b) { +; CHECK-LABEL: icmp_ne_vx_nxv8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu +; CHECK-NEXT: vmsne.vx v0, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ne %va, %splat + ret %vc +} + +define @icmp_ne_xv_nxv8i8( %va, i8 %b) { +; CHECK-LABEL: icmp_ne_xv_nxv8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu +; CHECK-NEXT: vmv.v.x v25, a0 +; CHECK-NEXT: vmsne.vv v0, v25, v16 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ne %splat, %va + ret %vc +} + +define @icmp_ne_vi_nxv8i8_0( %va) { +; CHECK-LABEL: icmp_ne_vi_nxv8i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmsne.vi v0, v16, 5 +; CHECK-NEXT: ret + %head = insertelement undef, i8 5, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ne %va, %splat + ret %vc +} + +define @icmp_ugt_vv_nxv8i8( %va, %vb) { +; CHECK-LABEL: icmp_ugt_vv_nxv8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmsltu.vv v0, v17, v16 +; CHECK-NEXT: ret + %vc = icmp ugt %va, %vb + ret %vc +} + +define @icmp_ugt_vx_nxv8i8( %va, i8 %b) { +; CHECK-LABEL: icmp_ugt_vx_nxv8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu +; CHECK-NEXT: vmsgtu.vx v0, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ugt %va, %splat + ret %vc +} + +define @icmp_ugt_xv_nxv8i8( %va, i8 %b) { +; CHECK-LABEL: icmp_ugt_xv_nxv8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu +; CHECK-NEXT: vmv.v.x v25, a0 +; CHECK-NEXT: vmsltu.vv v0, v16, v25 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ugt %splat, %va + ret %vc +} + +define @icmp_ugt_vi_nxv8i8_0( %va) { +; CHECK-LABEL: icmp_ugt_vi_nxv8i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmsgtu.vi v0, v16, 5 +; CHECK-NEXT: ret + %head = insertelement undef, i8 5, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ugt %va, %splat + ret %vc +} + +define @icmp_uge_vv_nxv8i8( %va, %vb) { +; CHECK-LABEL: icmp_uge_vv_nxv8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmsleu.vv v0, v17, v16 +; CHECK-NEXT: ret + %vc = icmp uge %va, %vb + ret %vc +} + +define @icmp_uge_vx_nxv8i8( %va, i8 %b) { +; CHECK-LABEL: icmp_uge_vx_nxv8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu +; CHECK-NEXT: vmv.v.x v25, a0 +; CHECK-NEXT: vmsleu.vv v0, v25, v16 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp uge %va, %splat + ret %vc +} + +define @icmp_uge_xv_nxv8i8( %va, i8 %b) { +; CHECK-LABEL: icmp_uge_xv_nxv8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu +; CHECK-NEXT: vmv.v.x v25, a0 +; CHECK-NEXT: vmsleu.vv v0, v16, v25 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp uge %splat, %va + ret %vc +} + +define @icmp_uge_vi_nxv8i8_0( %va) { +; CHECK-LABEL: icmp_uge_vi_nxv8i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmv.v.i v25, -16 +; CHECK-NEXT: vmsleu.vv v0, v25, v16 +; CHECK-NEXT: ret + %head = insertelement undef, i8 -16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp uge %va, %splat + ret %vc +} + +define @icmp_uge_vi_nxv8i8_1( %va) { +; CHECK-LABEL: icmp_uge_vi_nxv8i8_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmv.v.i v25, 15 +; CHECK-NEXT: vmsleu.vv v0, v25, v16 +; CHECK-NEXT: ret + %head = insertelement undef, i8 15, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp uge %va, %splat + ret %vc +} + +define @icmp_uge_iv_nxv8i8_1( %va) { +; CHECK-LABEL: icmp_uge_iv_nxv8i8_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmsleu.vi v0, v16, 15 +; CHECK-NEXT: ret + %head = insertelement undef, i8 15, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp uge %splat, %va + ret %vc +} + +define @icmp_uge_vi_nxv8i8_2( %va) { +; CHECK-LABEL: icmp_uge_vi_nxv8i8_2: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmset.m v0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 0, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp uge %va, %splat + ret %vc +} + +define @icmp_uge_vi_nxv8i8_3( %va) { +; CHECK-LABEL: icmp_uge_vi_nxv8i8_3: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmv.v.i v25, 1 +; CHECK-NEXT: vmsleu.vv v0, v25, v16 +; CHECK-NEXT: ret + %head = insertelement undef, i8 1, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp uge %va, %splat + ret %vc +} + +define @icmp_uge_vi_nxv8i8_4( %va) { +; CHECK-LABEL: icmp_uge_vi_nxv8i8_4: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmv.v.i v25, -15 +; CHECK-NEXT: vmsleu.vv v0, v25, v16 +; CHECK-NEXT: ret + %head = insertelement undef, i8 -15, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp uge %va, %splat + ret %vc +} + +define @icmp_uge_vi_nxv8i8_5( %va) { +; CHECK-LABEL: icmp_uge_vi_nxv8i8_5: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu +; CHECK-NEXT: vmv.v.x v25, a0 +; CHECK-NEXT: vmsleu.vv v0, v25, v16 +; CHECK-NEXT: ret + %head = insertelement undef, i8 16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp uge %va, %splat + ret %vc +} + +define @icmp_ult_vv_nxv8i8( %va, %vb) { +; CHECK-LABEL: icmp_ult_vv_nxv8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmsltu.vv v0, v16, v17 +; CHECK-NEXT: ret + %vc = icmp ult %va, %vb + ret %vc +} + +define @icmp_ult_vx_nxv8i8( %va, i8 %b) { +; CHECK-LABEL: icmp_ult_vx_nxv8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu +; CHECK-NEXT: vmsltu.vx v0, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ult %va, %splat + ret %vc +} + +define @icmp_ult_xv_nxv8i8( %va, i8 %b) { +; CHECK-LABEL: icmp_ult_xv_nxv8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu +; CHECK-NEXT: vmv.v.x v25, a0 +; CHECK-NEXT: vmsltu.vv v0, v25, v16 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ult %splat, %va + ret %vc +} + +define @icmp_ult_vi_nxv8i8_0( %va) { +; CHECK-LABEL: icmp_ult_vi_nxv8i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -16 +; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu +; CHECK-NEXT: vmsltu.vx v0, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 -16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ult %va, %splat + ret %vc +} + +define @icmp_ult_vi_nxv8i8_1( %va) { +; CHECK-LABEL: icmp_ult_vi_nxv8i8_1: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -15 +; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu +; CHECK-NEXT: vmsltu.vx v0, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 -15, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ult %va, %splat + ret %vc +} + +define @icmp_ult_iv_nxv8i8_1( %va) { +; CHECK-LABEL: icmp_ult_iv_nxv8i8_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmsgtu.vi v0, v16, -15 +; CHECK-NEXT: ret + %head = insertelement undef, i8 -15, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ult %splat, %va + ret %vc +} + +define @icmp_ult_vi_nxv8i8_2( %va) { +; CHECK-LABEL: icmp_ult_vi_nxv8i8_2: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmclr.m v0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 0, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ult %va, %splat + ret %vc +} + +define @icmp_ult_vi_nxv8i8_3( %va) { +; CHECK-LABEL: icmp_ult_vi_nxv8i8_3: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmseq.vi v0, v16, 0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 1, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ult %va, %splat + ret %vc +} + +define @icmp_ult_vi_nxv8i8_4( %va) { +; CHECK-LABEL: icmp_ult_vi_nxv8i8_4: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu +; CHECK-NEXT: vmsltu.vx v0, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ult %va, %splat + ret %vc +} + +define @icmp_ule_vv_nxv8i8( %va, %vb) { +; CHECK-LABEL: icmp_ule_vv_nxv8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmsleu.vv v0, v16, v17 +; CHECK-NEXT: ret + %vc = icmp ule %va, %vb + ret %vc +} + +define @icmp_ule_vx_nxv8i8( %va, i8 %b) { +; CHECK-LABEL: icmp_ule_vx_nxv8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu +; CHECK-NEXT: vmsleu.vx v0, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ule %va, %splat + ret %vc +} + +define @icmp_ule_xv_nxv8i8( %va, i8 %b) { +; CHECK-LABEL: icmp_ule_xv_nxv8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu +; CHECK-NEXT: vmv.v.x v25, a0 +; CHECK-NEXT: vmsleu.vv v0, v25, v16 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ule %splat, %va + ret %vc +} + +define @icmp_ule_vi_nxv8i8_0( %va) { +; CHECK-LABEL: icmp_ule_vi_nxv8i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmsleu.vi v0, v16, 5 +; CHECK-NEXT: ret + %head = insertelement undef, i8 5, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ule %va, %splat + ret %vc +} + +define @icmp_sgt_vv_nxv8i8( %va, %vb) { +; CHECK-LABEL: icmp_sgt_vv_nxv8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmslt.vv v0, v17, v16 +; CHECK-NEXT: ret + %vc = icmp sgt %va, %vb + ret %vc +} + +define @icmp_sgt_vx_nxv8i8( %va, i8 %b) { +; CHECK-LABEL: icmp_sgt_vx_nxv8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu +; CHECK-NEXT: vmsgt.vx v0, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp sgt %va, %splat + ret %vc +} + +define @icmp_sgt_xv_nxv8i8( %va, i8 %b) { +; CHECK-LABEL: icmp_sgt_xv_nxv8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu +; CHECK-NEXT: vmv.v.x v25, a0 +; CHECK-NEXT: vmslt.vv v0, v16, v25 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp sgt %splat, %va + ret %vc +} + +define @icmp_sgt_vi_nxv8i8_0( %va) { +; CHECK-LABEL: icmp_sgt_vi_nxv8i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmsgt.vi v0, v16, 5 +; CHECK-NEXT: ret + %head = insertelement undef, i8 5, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp sgt %va, %splat + ret %vc +} + +define @icmp_sge_vv_nxv8i8( %va, %vb) { +; CHECK-LABEL: icmp_sge_vv_nxv8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmsle.vv v0, v17, v16 +; CHECK-NEXT: ret + %vc = icmp sge %va, %vb + ret %vc +} + +define @icmp_sge_vx_nxv8i8( %va, i8 %b) { +; CHECK-LABEL: icmp_sge_vx_nxv8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu +; CHECK-NEXT: vmv.v.x v25, a0 +; CHECK-NEXT: vmsle.vv v0, v25, v16 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp sge %va, %splat + ret %vc +} + +define @icmp_sge_xv_nxv8i8( %va, i8 %b) { +; CHECK-LABEL: icmp_sge_xv_nxv8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu +; CHECK-NEXT: vmv.v.x v25, a0 +; CHECK-NEXT: vmsle.vv v0, v16, v25 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp sge %splat, %va + ret %vc +} + +define @icmp_sge_vi_nxv8i8_0( %va) { +; CHECK-LABEL: icmp_sge_vi_nxv8i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmv.v.i v25, -16 +; CHECK-NEXT: vmsle.vv v0, v25, v16 +; CHECK-NEXT: ret + %head = insertelement undef, i8 -16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp sge %va, %splat + ret %vc +} + +define @icmp_sge_vi_nxv8i8_1( %va) { +; CHECK-LABEL: icmp_sge_vi_nxv8i8_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmv.v.i v25, -15 +; CHECK-NEXT: vmsle.vv v0, v25, v16 +; CHECK-NEXT: ret + %head = insertelement undef, i8 -15, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp sge %va, %splat + ret %vc +} + +define @icmp_sge_iv_nxv8i8_1( %va) { +; CHECK-LABEL: icmp_sge_iv_nxv8i8_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmsle.vi v0, v16, -15 +; CHECK-NEXT: ret + %head = insertelement undef, i8 -15, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp sge %splat, %va + ret %vc +} + +define @icmp_sge_vi_nxv8i8_2( %va) { +; CHECK-LABEL: icmp_sge_vi_nxv8i8_2: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmv.v.i v25, 0 +; CHECK-NEXT: vmsle.vv v0, v25, v16 +; CHECK-NEXT: ret + %head = insertelement undef, i8 0, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp sge %va, %splat + ret %vc +} + +define @icmp_sge_vi_nxv8i8_3( %va) { +; CHECK-LABEL: icmp_sge_vi_nxv8i8_3: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu +; CHECK-NEXT: vmv.v.x v25, a0 +; CHECK-NEXT: vmsle.vv v0, v25, v16 +; CHECK-NEXT: ret + %head = insertelement undef, i8 16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp sge %va, %splat + ret %vc +} + +define @icmp_slt_vv_nxv8i8( %va, %vb) { +; CHECK-LABEL: icmp_slt_vv_nxv8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmslt.vv v0, v16, v17 +; CHECK-NEXT: ret + %vc = icmp slt %va, %vb + ret %vc +} + +define @icmp_slt_vx_nxv8i8( %va, i8 %b) { +; CHECK-LABEL: icmp_slt_vx_nxv8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu +; CHECK-NEXT: vmslt.vx v0, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp slt %va, %splat + ret %vc +} + +define @icmp_slt_xv_nxv8i8( %va, i8 %b) { +; CHECK-LABEL: icmp_slt_xv_nxv8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu +; CHECK-NEXT: vmv.v.x v25, a0 +; CHECK-NEXT: vmslt.vv v0, v25, v16 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp slt %splat, %va + ret %vc +} + +define @icmp_slt_vi_nxv8i8_0( %va) { +; CHECK-LABEL: icmp_slt_vi_nxv8i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -16 +; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu +; CHECK-NEXT: vmslt.vx v0, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 -16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp slt %va, %splat + ret %vc +} + +define @icmp_slt_vi_nxv8i8_1( %va) { +; CHECK-LABEL: icmp_slt_vi_nxv8i8_1: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -15 +; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu +; CHECK-NEXT: vmslt.vx v0, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 -15, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp slt %va, %splat + ret %vc +} + +define @icmp_slt_iv_nxv8i8_1( %va) { +; CHECK-LABEL: icmp_slt_iv_nxv8i8_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmsgt.vi v0, v16, -15 +; CHECK-NEXT: ret + %head = insertelement undef, i8 -15, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp slt %splat, %va + ret %vc +} + +define @icmp_slt_vi_nxv8i8_2( %va) { +; CHECK-LABEL: icmp_slt_vi_nxv8i8_2: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmslt.vx v0, v16, zero +; CHECK-NEXT: ret + %head = insertelement undef, i8 0, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp slt %va, %splat + ret %vc +} + +define @icmp_slt_vi_nxv8i8_3( %va) { +; CHECK-LABEL: icmp_slt_vi_nxv8i8_3: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu +; CHECK-NEXT: vmslt.vx v0, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp slt %va, %splat + ret %vc +} + +define @icmp_sle_vv_nxv8i8( %va, %vb) { +; CHECK-LABEL: icmp_sle_vv_nxv8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmsle.vv v0, v16, v17 +; CHECK-NEXT: ret + %vc = icmp sle %va, %vb + ret %vc +} + +define @icmp_sle_vx_nxv8i8( %va, i8 %b) { +; CHECK-LABEL: icmp_sle_vx_nxv8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu +; CHECK-NEXT: vmsle.vx v0, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp sle %va, %splat + ret %vc +} + +define @icmp_sle_xv_nxv8i8( %va, i8 %b) { +; CHECK-LABEL: icmp_sle_xv_nxv8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu +; CHECK-NEXT: vmv.v.x v25, a0 +; CHECK-NEXT: vmsle.vv v0, v25, v16 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp sle %splat, %va + ret %vc +} + +define @icmp_sle_vi_nxv8i8_0( %va) { +; CHECK-LABEL: icmp_sle_vi_nxv8i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmsle.vi v0, v16, 5 +; CHECK-NEXT: ret + %head = insertelement undef, i8 5, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp sle %va, %splat + ret %vc +} + +define @icmp_eq_vv_nxv8i16( %va, %vb) { +; CHECK-LABEL: icmp_eq_vv_nxv8i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vmseq.vv v0, v16, v18 +; CHECK-NEXT: ret + %vc = icmp eq %va, %vb + ret %vc +} + +define @icmp_eq_vx_nxv8i16( %va, i16 %b) { +; CHECK-LABEL: icmp_eq_vx_nxv8i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu +; CHECK-NEXT: vmseq.vx v0, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp eq %va, %splat + ret %vc +} + +define @icmp_eq_xv_nxv8i16( %va, i16 %b) { +; CHECK-LABEL: icmp_eq_xv_nxv8i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu +; CHECK-NEXT: vmv.v.x v26, a0 +; CHECK-NEXT: vmseq.vv v0, v26, v16 +; CHECK-NEXT: ret + %head = insertelement undef, i16 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp eq %splat, %va + ret %vc +} + +define @icmp_eq_vi_nxv8i16_0( %va) { +; CHECK-LABEL: icmp_eq_vi_nxv8i16_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vmseq.vi v0, v16, 0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 0, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp eq %va, %splat + ret %vc +} + +define @icmp_eq_vi_nxv8i16_1( %va) { +; CHECK-LABEL: icmp_eq_vi_nxv8i16_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vmseq.vi v0, v16, 5 +; CHECK-NEXT: ret + %head = insertelement undef, i16 5, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp eq %va, %splat + ret %vc +} + +define @icmp_eq_iv_nxv8i16_1( %va) { +; CHECK-LABEL: icmp_eq_iv_nxv8i16_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vmseq.vi v0, v16, 5 +; CHECK-NEXT: ret + %head = insertelement undef, i16 5, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp eq %splat, %va + ret %vc +} + +define @icmp_ne_vv_nxv8i16( %va, %vb) { +; CHECK-LABEL: icmp_ne_vv_nxv8i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vmsne.vv v0, v16, v18 +; CHECK-NEXT: ret + %vc = icmp ne %va, %vb + ret %vc +} + +define @icmp_ne_vx_nxv8i16( %va, i16 %b) { +; CHECK-LABEL: icmp_ne_vx_nxv8i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu +; CHECK-NEXT: vmsne.vx v0, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ne %va, %splat + ret %vc +} + +define @icmp_ne_xv_nxv8i16( %va, i16 %b) { +; CHECK-LABEL: icmp_ne_xv_nxv8i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu +; CHECK-NEXT: vmv.v.x v26, a0 +; CHECK-NEXT: vmsne.vv v0, v26, v16 +; CHECK-NEXT: ret + %head = insertelement undef, i16 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ne %splat, %va + ret %vc +} + +define @icmp_ne_vi_nxv8i16_0( %va) { +; CHECK-LABEL: icmp_ne_vi_nxv8i16_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vmsne.vi v0, v16, 5 +; CHECK-NEXT: ret + %head = insertelement undef, i16 5, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ne %va, %splat + ret %vc +} + +define @icmp_ugt_vv_nxv8i16( %va, %vb) { +; CHECK-LABEL: icmp_ugt_vv_nxv8i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vmsltu.vv v0, v18, v16 +; CHECK-NEXT: ret + %vc = icmp ugt %va, %vb + ret %vc +} + +define @icmp_ugt_vx_nxv8i16( %va, i16 %b) { +; CHECK-LABEL: icmp_ugt_vx_nxv8i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu +; CHECK-NEXT: vmsgtu.vx v0, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ugt %va, %splat + ret %vc +} + +define @icmp_ugt_xv_nxv8i16( %va, i16 %b) { +; CHECK-LABEL: icmp_ugt_xv_nxv8i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu +; CHECK-NEXT: vmv.v.x v26, a0 +; CHECK-NEXT: vmsltu.vv v0, v16, v26 +; CHECK-NEXT: ret + %head = insertelement undef, i16 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ugt %splat, %va + ret %vc +} + +define @icmp_ugt_vi_nxv8i16_0( %va) { +; CHECK-LABEL: icmp_ugt_vi_nxv8i16_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vmsgtu.vi v0, v16, 5 +; CHECK-NEXT: ret + %head = insertelement undef, i16 5, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ugt %va, %splat + ret %vc +} + +define @icmp_uge_vv_nxv8i16( %va, %vb) { +; CHECK-LABEL: icmp_uge_vv_nxv8i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vmsleu.vv v0, v18, v16 +; CHECK-NEXT: ret + %vc = icmp uge %va, %vb + ret %vc +} + +define @icmp_uge_vx_nxv8i16( %va, i16 %b) { +; CHECK-LABEL: icmp_uge_vx_nxv8i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu +; CHECK-NEXT: vmv.v.x v26, a0 +; CHECK-NEXT: vmsleu.vv v0, v26, v16 +; CHECK-NEXT: ret + %head = insertelement undef, i16 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp uge %va, %splat + ret %vc +} + +define @icmp_uge_xv_nxv8i16( %va, i16 %b) { +; CHECK-LABEL: icmp_uge_xv_nxv8i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu +; CHECK-NEXT: vmv.v.x v26, a0 +; CHECK-NEXT: vmsleu.vv v0, v16, v26 +; CHECK-NEXT: ret + %head = insertelement undef, i16 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp uge %splat, %va + ret %vc +} + +define @icmp_uge_vi_nxv8i16_0( %va) { +; CHECK-LABEL: icmp_uge_vi_nxv8i16_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vmv.v.i v26, -16 +; CHECK-NEXT: vmsleu.vv v0, v26, v16 +; CHECK-NEXT: ret + %head = insertelement undef, i16 -16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp uge %va, %splat + ret %vc +} + +define @icmp_uge_vi_nxv8i16_1( %va) { +; CHECK-LABEL: icmp_uge_vi_nxv8i16_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vmv.v.i v26, 15 +; CHECK-NEXT: vmsleu.vv v0, v26, v16 +; CHECK-NEXT: ret + %head = insertelement undef, i16 15, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp uge %va, %splat + ret %vc +} + +define @icmp_uge_iv_nxv8i16_1( %va) { +; CHECK-LABEL: icmp_uge_iv_nxv8i16_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vmsleu.vi v0, v16, 15 +; CHECK-NEXT: ret + %head = insertelement undef, i16 15, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp uge %splat, %va + ret %vc +} + +define @icmp_uge_vi_nxv8i16_2( %va) { +; CHECK-LABEL: icmp_uge_vi_nxv8i16_2: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmset.m v0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 0, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp uge %va, %splat + ret %vc +} + +define @icmp_uge_vi_nxv8i16_3( %va) { +; CHECK-LABEL: icmp_uge_vi_nxv8i16_3: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vmv.v.i v26, 1 +; CHECK-NEXT: vmsleu.vv v0, v26, v16 +; CHECK-NEXT: ret + %head = insertelement undef, i16 1, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp uge %va, %splat + ret %vc +} + +define @icmp_uge_vi_nxv8i16_4( %va) { +; CHECK-LABEL: icmp_uge_vi_nxv8i16_4: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vmv.v.i v26, -15 +; CHECK-NEXT: vmsleu.vv v0, v26, v16 +; CHECK-NEXT: ret + %head = insertelement undef, i16 -15, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp uge %va, %splat + ret %vc +} + +define @icmp_uge_vi_nxv8i16_5( %va) { +; CHECK-LABEL: icmp_uge_vi_nxv8i16_5: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu +; CHECK-NEXT: vmv.v.x v26, a0 +; CHECK-NEXT: vmsleu.vv v0, v26, v16 +; CHECK-NEXT: ret + %head = insertelement undef, i16 16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp uge %va, %splat + ret %vc +} + +define @icmp_ult_vv_nxv8i16( %va, %vb) { +; CHECK-LABEL: icmp_ult_vv_nxv8i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vmsltu.vv v0, v16, v18 +; CHECK-NEXT: ret + %vc = icmp ult %va, %vb + ret %vc +} + +define @icmp_ult_vx_nxv8i16( %va, i16 %b) { +; CHECK-LABEL: icmp_ult_vx_nxv8i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu +; CHECK-NEXT: vmsltu.vx v0, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ult %va, %splat + ret %vc +} + +define @icmp_ult_xv_nxv8i16( %va, i16 %b) { +; CHECK-LABEL: icmp_ult_xv_nxv8i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu +; CHECK-NEXT: vmv.v.x v26, a0 +; CHECK-NEXT: vmsltu.vv v0, v26, v16 +; CHECK-NEXT: ret + %head = insertelement undef, i16 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ult %splat, %va + ret %vc +} + +define @icmp_ult_vi_nxv8i16_0( %va) { +; CHECK-LABEL: icmp_ult_vi_nxv8i16_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -16 +; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu +; CHECK-NEXT: vmsltu.vx v0, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 -16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ult %va, %splat + ret %vc +} + +define @icmp_ult_vi_nxv8i16_1( %va) { +; CHECK-LABEL: icmp_ult_vi_nxv8i16_1: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -15 +; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu +; CHECK-NEXT: vmsltu.vx v0, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 -15, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ult %va, %splat + ret %vc +} + +define @icmp_ult_iv_nxv8i16_1( %va) { +; CHECK-LABEL: icmp_ult_iv_nxv8i16_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vmsgtu.vi v0, v16, -15 +; CHECK-NEXT: ret + %head = insertelement undef, i16 -15, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ult %splat, %va + ret %vc +} + +define @icmp_ult_vi_nxv8i16_2( %va) { +; CHECK-LABEL: icmp_ult_vi_nxv8i16_2: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmclr.m v0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 0, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ult %va, %splat + ret %vc +} + +define @icmp_ult_vi_nxv8i16_3( %va) { +; CHECK-LABEL: icmp_ult_vi_nxv8i16_3: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vmseq.vi v0, v16, 0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 1, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ult %va, %splat + ret %vc +} + +define @icmp_ult_vi_nxv8i16_4( %va) { +; CHECK-LABEL: icmp_ult_vi_nxv8i16_4: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu +; CHECK-NEXT: vmsltu.vx v0, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ult %va, %splat + ret %vc +} + +define @icmp_ule_vv_nxv8i16( %va, %vb) { +; CHECK-LABEL: icmp_ule_vv_nxv8i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vmsleu.vv v0, v16, v18 +; CHECK-NEXT: ret + %vc = icmp ule %va, %vb + ret %vc +} + +define @icmp_ule_vx_nxv8i16( %va, i16 %b) { +; CHECK-LABEL: icmp_ule_vx_nxv8i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu +; CHECK-NEXT: vmsleu.vx v0, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ule %va, %splat + ret %vc +} + +define @icmp_ule_xv_nxv8i16( %va, i16 %b) { +; CHECK-LABEL: icmp_ule_xv_nxv8i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu +; CHECK-NEXT: vmv.v.x v26, a0 +; CHECK-NEXT: vmsleu.vv v0, v26, v16 +; CHECK-NEXT: ret + %head = insertelement undef, i16 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ule %splat, %va + ret %vc +} + +define @icmp_ule_vi_nxv8i16_0( %va) { +; CHECK-LABEL: icmp_ule_vi_nxv8i16_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vmsleu.vi v0, v16, 5 +; CHECK-NEXT: ret + %head = insertelement undef, i16 5, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ule %va, %splat + ret %vc +} + +define @icmp_sgt_vv_nxv8i16( %va, %vb) { +; CHECK-LABEL: icmp_sgt_vv_nxv8i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vmslt.vv v0, v18, v16 +; CHECK-NEXT: ret + %vc = icmp sgt %va, %vb + ret %vc +} + +define @icmp_sgt_vx_nxv8i16( %va, i16 %b) { +; CHECK-LABEL: icmp_sgt_vx_nxv8i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu +; CHECK-NEXT: vmsgt.vx v0, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp sgt %va, %splat + ret %vc +} + +define @icmp_sgt_xv_nxv8i16( %va, i16 %b) { +; CHECK-LABEL: icmp_sgt_xv_nxv8i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu +; CHECK-NEXT: vmv.v.x v26, a0 +; CHECK-NEXT: vmslt.vv v0, v16, v26 +; CHECK-NEXT: ret + %head = insertelement undef, i16 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp sgt %splat, %va + ret %vc +} + +define @icmp_sgt_vi_nxv8i16_0( %va) { +; CHECK-LABEL: icmp_sgt_vi_nxv8i16_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vmsgt.vi v0, v16, 5 +; CHECK-NEXT: ret + %head = insertelement undef, i16 5, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp sgt %va, %splat + ret %vc +} + +define @icmp_sge_vv_nxv8i16( %va, %vb) { +; CHECK-LABEL: icmp_sge_vv_nxv8i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vmsle.vv v0, v18, v16 +; CHECK-NEXT: ret + %vc = icmp sge %va, %vb + ret %vc +} + +define @icmp_sge_vx_nxv8i16( %va, i16 %b) { +; CHECK-LABEL: icmp_sge_vx_nxv8i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu +; CHECK-NEXT: vmv.v.x v26, a0 +; CHECK-NEXT: vmsle.vv v0, v26, v16 +; CHECK-NEXT: ret + %head = insertelement undef, i16 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp sge %va, %splat + ret %vc +} + +define @icmp_sge_xv_nxv8i16( %va, i16 %b) { +; CHECK-LABEL: icmp_sge_xv_nxv8i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu +; CHECK-NEXT: vmv.v.x v26, a0 +; CHECK-NEXT: vmsle.vv v0, v16, v26 +; CHECK-NEXT: ret + %head = insertelement undef, i16 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp sge %splat, %va + ret %vc +} + +define @icmp_sge_vi_nxv8i16_0( %va) { +; CHECK-LABEL: icmp_sge_vi_nxv8i16_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vmv.v.i v26, -16 +; CHECK-NEXT: vmsle.vv v0, v26, v16 +; CHECK-NEXT: ret + %head = insertelement undef, i16 -16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp sge %va, %splat + ret %vc +} + +define @icmp_sge_vi_nxv8i16_1( %va) { +; CHECK-LABEL: icmp_sge_vi_nxv8i16_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vmv.v.i v26, -15 +; CHECK-NEXT: vmsle.vv v0, v26, v16 +; CHECK-NEXT: ret + %head = insertelement undef, i16 -15, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp sge %va, %splat + ret %vc +} + +define @icmp_sge_iv_nxv8i16_1( %va) { +; CHECK-LABEL: icmp_sge_iv_nxv8i16_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vmsle.vi v0, v16, -15 +; CHECK-NEXT: ret + %head = insertelement undef, i16 -15, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp sge %splat, %va + ret %vc +} + +define @icmp_sge_vi_nxv8i16_2( %va) { +; CHECK-LABEL: icmp_sge_vi_nxv8i16_2: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vmv.v.i v26, 0 +; CHECK-NEXT: vmsle.vv v0, v26, v16 +; CHECK-NEXT: ret + %head = insertelement undef, i16 0, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp sge %va, %splat + ret %vc +} + +define @icmp_sge_vi_nxv8i16_3( %va) { +; CHECK-LABEL: icmp_sge_vi_nxv8i16_3: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu +; CHECK-NEXT: vmv.v.x v26, a0 +; CHECK-NEXT: vmsle.vv v0, v26, v16 +; CHECK-NEXT: ret + %head = insertelement undef, i16 16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp sge %va, %splat + ret %vc +} + +define @icmp_slt_vv_nxv8i16( %va, %vb) { +; CHECK-LABEL: icmp_slt_vv_nxv8i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vmslt.vv v0, v16, v18 +; CHECK-NEXT: ret + %vc = icmp slt %va, %vb + ret %vc +} + +define @icmp_slt_vx_nxv8i16( %va, i16 %b) { +; CHECK-LABEL: icmp_slt_vx_nxv8i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu +; CHECK-NEXT: vmslt.vx v0, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp slt %va, %splat + ret %vc +} + +define @icmp_slt_xv_nxv8i16( %va, i16 %b) { +; CHECK-LABEL: icmp_slt_xv_nxv8i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu +; CHECK-NEXT: vmv.v.x v26, a0 +; CHECK-NEXT: vmslt.vv v0, v26, v16 +; CHECK-NEXT: ret + %head = insertelement undef, i16 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp slt %splat, %va + ret %vc +} + +define @icmp_slt_vi_nxv8i16_0( %va) { +; CHECK-LABEL: icmp_slt_vi_nxv8i16_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -16 +; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu +; CHECK-NEXT: vmslt.vx v0, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 -16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp slt %va, %splat + ret %vc +} + +define @icmp_slt_vi_nxv8i16_1( %va) { +; CHECK-LABEL: icmp_slt_vi_nxv8i16_1: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -15 +; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu +; CHECK-NEXT: vmslt.vx v0, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 -15, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp slt %va, %splat + ret %vc +} + +define @icmp_slt_iv_nxv8i16_1( %va) { +; CHECK-LABEL: icmp_slt_iv_nxv8i16_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vmsgt.vi v0, v16, -15 +; CHECK-NEXT: ret + %head = insertelement undef, i16 -15, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp slt %splat, %va + ret %vc +} + +define @icmp_slt_vi_nxv8i16_2( %va) { +; CHECK-LABEL: icmp_slt_vi_nxv8i16_2: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vmslt.vx v0, v16, zero +; CHECK-NEXT: ret + %head = insertelement undef, i16 0, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp slt %va, %splat + ret %vc +} + +define @icmp_slt_vi_nxv8i16_3( %va) { +; CHECK-LABEL: icmp_slt_vi_nxv8i16_3: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu +; CHECK-NEXT: vmslt.vx v0, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp slt %va, %splat + ret %vc +} + +define @icmp_sle_vv_nxv8i16( %va, %vb) { +; CHECK-LABEL: icmp_sle_vv_nxv8i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vmsle.vv v0, v16, v18 +; CHECK-NEXT: ret + %vc = icmp sle %va, %vb + ret %vc +} + +define @icmp_sle_vx_nxv8i16( %va, i16 %b) { +; CHECK-LABEL: icmp_sle_vx_nxv8i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu +; CHECK-NEXT: vmsle.vx v0, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp sle %va, %splat + ret %vc +} + +define @icmp_sle_xv_nxv8i16( %va, i16 %b) { +; CHECK-LABEL: icmp_sle_xv_nxv8i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu +; CHECK-NEXT: vmv.v.x v26, a0 +; CHECK-NEXT: vmsle.vv v0, v26, v16 +; CHECK-NEXT: ret + %head = insertelement undef, i16 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp sle %splat, %va + ret %vc +} + +define @icmp_sle_vi_nxv8i16_0( %va) { +; CHECK-LABEL: icmp_sle_vi_nxv8i16_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vmsle.vi v0, v16, 5 +; CHECK-NEXT: ret + %head = insertelement undef, i16 5, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp sle %va, %splat + ret %vc +} + +define @icmp_eq_vv_nxv8i32( %va, %vb) { +; CHECK-LABEL: icmp_eq_vv_nxv8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vmseq.vv v0, v16, v20 +; CHECK-NEXT: ret + %vc = icmp eq %va, %vb + ret %vc +} + +define @icmp_eq_vx_nxv8i32( %va, i32 %b) { +; CHECK-LABEL: icmp_eq_vx_nxv8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu +; CHECK-NEXT: vmseq.vx v0, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp eq %va, %splat + ret %vc +} + +define @icmp_eq_xv_nxv8i32( %va, i32 %b) { +; CHECK-LABEL: icmp_eq_xv_nxv8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu +; CHECK-NEXT: vmv.v.x v28, a0 +; CHECK-NEXT: vmseq.vv v0, v28, v16 +; CHECK-NEXT: ret + %head = insertelement undef, i32 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp eq %splat, %va + ret %vc +} + +define @icmp_eq_vi_nxv8i32_0( %va) { +; CHECK-LABEL: icmp_eq_vi_nxv8i32_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vmseq.vi v0, v16, 0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 0, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp eq %va, %splat + ret %vc +} + +define @icmp_eq_vi_nxv8i32_1( %va) { +; CHECK-LABEL: icmp_eq_vi_nxv8i32_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vmseq.vi v0, v16, 5 +; CHECK-NEXT: ret + %head = insertelement undef, i32 5, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp eq %va, %splat + ret %vc +} + +define @icmp_eq_iv_nxv8i32_1( %va) { +; CHECK-LABEL: icmp_eq_iv_nxv8i32_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vmseq.vi v0, v16, 5 +; CHECK-NEXT: ret + %head = insertelement undef, i32 5, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp eq %splat, %va + ret %vc +} + +define @icmp_ne_vv_nxv8i32( %va, %vb) { +; CHECK-LABEL: icmp_ne_vv_nxv8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vmsne.vv v0, v16, v20 +; CHECK-NEXT: ret + %vc = icmp ne %va, %vb + ret %vc +} + +define @icmp_ne_vx_nxv8i32( %va, i32 %b) { +; CHECK-LABEL: icmp_ne_vx_nxv8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu +; CHECK-NEXT: vmsne.vx v0, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ne %va, %splat + ret %vc +} + +define @icmp_ne_xv_nxv8i32( %va, i32 %b) { +; CHECK-LABEL: icmp_ne_xv_nxv8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu +; CHECK-NEXT: vmv.v.x v28, a0 +; CHECK-NEXT: vmsne.vv v0, v28, v16 +; CHECK-NEXT: ret + %head = insertelement undef, i32 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ne %splat, %va + ret %vc +} + +define @icmp_ne_vi_nxv8i32_0( %va) { +; CHECK-LABEL: icmp_ne_vi_nxv8i32_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vmsne.vi v0, v16, 5 +; CHECK-NEXT: ret + %head = insertelement undef, i32 5, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ne %va, %splat + ret %vc +} + +define @icmp_ugt_vv_nxv8i32( %va, %vb) { +; CHECK-LABEL: icmp_ugt_vv_nxv8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vmsltu.vv v0, v20, v16 +; CHECK-NEXT: ret + %vc = icmp ugt %va, %vb + ret %vc +} + +define @icmp_ugt_vx_nxv8i32( %va, i32 %b) { +; CHECK-LABEL: icmp_ugt_vx_nxv8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu +; CHECK-NEXT: vmsgtu.vx v0, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ugt %va, %splat + ret %vc +} + +define @icmp_ugt_xv_nxv8i32( %va, i32 %b) { +; CHECK-LABEL: icmp_ugt_xv_nxv8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu +; CHECK-NEXT: vmv.v.x v28, a0 +; CHECK-NEXT: vmsltu.vv v0, v16, v28 +; CHECK-NEXT: ret + %head = insertelement undef, i32 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ugt %splat, %va + ret %vc +} + +define @icmp_ugt_vi_nxv8i32_0( %va) { +; CHECK-LABEL: icmp_ugt_vi_nxv8i32_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vmsgtu.vi v0, v16, 5 +; CHECK-NEXT: ret + %head = insertelement undef, i32 5, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ugt %va, %splat + ret %vc +} + +define @icmp_uge_vv_nxv8i32( %va, %vb) { +; CHECK-LABEL: icmp_uge_vv_nxv8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vmsleu.vv v0, v20, v16 +; CHECK-NEXT: ret + %vc = icmp uge %va, %vb + ret %vc +} + +define @icmp_uge_vx_nxv8i32( %va, i32 %b) { +; CHECK-LABEL: icmp_uge_vx_nxv8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu +; CHECK-NEXT: vmv.v.x v28, a0 +; CHECK-NEXT: vmsleu.vv v0, v28, v16 +; CHECK-NEXT: ret + %head = insertelement undef, i32 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp uge %va, %splat + ret %vc +} + +define @icmp_uge_xv_nxv8i32( %va, i32 %b) { +; CHECK-LABEL: icmp_uge_xv_nxv8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu +; CHECK-NEXT: vmv.v.x v28, a0 +; CHECK-NEXT: vmsleu.vv v0, v16, v28 +; CHECK-NEXT: ret + %head = insertelement undef, i32 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp uge %splat, %va + ret %vc +} + +define @icmp_uge_vi_nxv8i32_0( %va) { +; CHECK-LABEL: icmp_uge_vi_nxv8i32_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vmv.v.i v28, -16 +; CHECK-NEXT: vmsleu.vv v0, v28, v16 +; CHECK-NEXT: ret + %head = insertelement undef, i32 -16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp uge %va, %splat + ret %vc +} + +define @icmp_uge_vi_nxv8i32_1( %va) { +; CHECK-LABEL: icmp_uge_vi_nxv8i32_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vmv.v.i v28, 15 +; CHECK-NEXT: vmsleu.vv v0, v28, v16 +; CHECK-NEXT: ret + %head = insertelement undef, i32 15, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp uge %va, %splat + ret %vc +} + +define @icmp_uge_iv_nxv8i32_1( %va) { +; CHECK-LABEL: icmp_uge_iv_nxv8i32_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vmsleu.vi v0, v16, 15 +; CHECK-NEXT: ret + %head = insertelement undef, i32 15, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp uge %splat, %va + ret %vc +} + +define @icmp_uge_vi_nxv8i32_2( %va) { +; CHECK-LABEL: icmp_uge_vi_nxv8i32_2: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmset.m v0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 0, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp uge %va, %splat + ret %vc +} + +define @icmp_uge_vi_nxv8i32_3( %va) { +; CHECK-LABEL: icmp_uge_vi_nxv8i32_3: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vmv.v.i v28, 1 +; CHECK-NEXT: vmsleu.vv v0, v28, v16 +; CHECK-NEXT: ret + %head = insertelement undef, i32 1, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp uge %va, %splat + ret %vc +} + +define @icmp_uge_vi_nxv8i32_4( %va) { +; CHECK-LABEL: icmp_uge_vi_nxv8i32_4: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vmv.v.i v28, -15 +; CHECK-NEXT: vmsleu.vv v0, v28, v16 +; CHECK-NEXT: ret + %head = insertelement undef, i32 -15, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp uge %va, %splat + ret %vc +} + +define @icmp_uge_vi_nxv8i32_5( %va) { +; CHECK-LABEL: icmp_uge_vi_nxv8i32_5: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu +; CHECK-NEXT: vmv.v.x v28, a0 +; CHECK-NEXT: vmsleu.vv v0, v28, v16 +; CHECK-NEXT: ret + %head = insertelement undef, i32 16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp uge %va, %splat + ret %vc +} + +define @icmp_ult_vv_nxv8i32( %va, %vb) { +; CHECK-LABEL: icmp_ult_vv_nxv8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vmsltu.vv v0, v16, v20 +; CHECK-NEXT: ret + %vc = icmp ult %va, %vb + ret %vc +} + +define @icmp_ult_vx_nxv8i32( %va, i32 %b) { +; CHECK-LABEL: icmp_ult_vx_nxv8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu +; CHECK-NEXT: vmsltu.vx v0, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ult %va, %splat + ret %vc +} + +define @icmp_ult_xv_nxv8i32( %va, i32 %b) { +; CHECK-LABEL: icmp_ult_xv_nxv8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu +; CHECK-NEXT: vmv.v.x v28, a0 +; CHECK-NEXT: vmsltu.vv v0, v28, v16 +; CHECK-NEXT: ret + %head = insertelement undef, i32 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ult %splat, %va + ret %vc +} + +define @icmp_ult_vi_nxv8i32_0( %va) { +; CHECK-LABEL: icmp_ult_vi_nxv8i32_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -16 +; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu +; CHECK-NEXT: vmsltu.vx v0, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 -16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ult %va, %splat + ret %vc +} + +define @icmp_ult_vi_nxv8i32_1( %va) { +; CHECK-LABEL: icmp_ult_vi_nxv8i32_1: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -15 +; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu +; CHECK-NEXT: vmsltu.vx v0, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 -15, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ult %va, %splat + ret %vc +} + +define @icmp_ult_iv_nxv8i32_1( %va) { +; CHECK-LABEL: icmp_ult_iv_nxv8i32_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vmsgtu.vi v0, v16, -15 +; CHECK-NEXT: ret + %head = insertelement undef, i32 -15, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ult %splat, %va + ret %vc +} + +define @icmp_ult_vi_nxv8i32_2( %va) { +; CHECK-LABEL: icmp_ult_vi_nxv8i32_2: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmclr.m v0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 0, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ult %va, %splat + ret %vc +} + +define @icmp_ult_vi_nxv8i32_3( %va) { +; CHECK-LABEL: icmp_ult_vi_nxv8i32_3: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vmseq.vi v0, v16, 0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 1, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ult %va, %splat + ret %vc +} + +define @icmp_ult_vi_nxv8i32_4( %va) { +; CHECK-LABEL: icmp_ult_vi_nxv8i32_4: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu +; CHECK-NEXT: vmsltu.vx v0, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ult %va, %splat + ret %vc +} + +define @icmp_ule_vv_nxv8i32( %va, %vb) { +; CHECK-LABEL: icmp_ule_vv_nxv8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vmsleu.vv v0, v16, v20 +; CHECK-NEXT: ret + %vc = icmp ule %va, %vb + ret %vc +} + +define @icmp_ule_vx_nxv8i32( %va, i32 %b) { +; CHECK-LABEL: icmp_ule_vx_nxv8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu +; CHECK-NEXT: vmsleu.vx v0, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ule %va, %splat + ret %vc +} + +define @icmp_ule_xv_nxv8i32( %va, i32 %b) { +; CHECK-LABEL: icmp_ule_xv_nxv8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu +; CHECK-NEXT: vmv.v.x v28, a0 +; CHECK-NEXT: vmsleu.vv v0, v28, v16 +; CHECK-NEXT: ret + %head = insertelement undef, i32 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ule %splat, %va + ret %vc +} + +define @icmp_ule_vi_nxv8i32_0( %va) { +; CHECK-LABEL: icmp_ule_vi_nxv8i32_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vmsleu.vi v0, v16, 5 +; CHECK-NEXT: ret + %head = insertelement undef, i32 5, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ule %va, %splat + ret %vc +} + +define @icmp_sgt_vv_nxv8i32( %va, %vb) { +; CHECK-LABEL: icmp_sgt_vv_nxv8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vmslt.vv v0, v20, v16 +; CHECK-NEXT: ret + %vc = icmp sgt %va, %vb + ret %vc +} + +define @icmp_sgt_vx_nxv8i32( %va, i32 %b) { +; CHECK-LABEL: icmp_sgt_vx_nxv8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu +; CHECK-NEXT: vmsgt.vx v0, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp sgt %va, %splat + ret %vc +} + +define @icmp_sgt_xv_nxv8i32( %va, i32 %b) { +; CHECK-LABEL: icmp_sgt_xv_nxv8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu +; CHECK-NEXT: vmv.v.x v28, a0 +; CHECK-NEXT: vmslt.vv v0, v16, v28 +; CHECK-NEXT: ret + %head = insertelement undef, i32 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp sgt %splat, %va + ret %vc +} + +define @icmp_sgt_vi_nxv8i32_0( %va) { +; CHECK-LABEL: icmp_sgt_vi_nxv8i32_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vmsgt.vi v0, v16, 5 +; CHECK-NEXT: ret + %head = insertelement undef, i32 5, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp sgt %va, %splat + ret %vc +} + +define @icmp_sge_vv_nxv8i32( %va, %vb) { +; CHECK-LABEL: icmp_sge_vv_nxv8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vmsle.vv v0, v20, v16 +; CHECK-NEXT: ret + %vc = icmp sge %va, %vb + ret %vc +} + +define @icmp_sge_vx_nxv8i32( %va, i32 %b) { +; CHECK-LABEL: icmp_sge_vx_nxv8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu +; CHECK-NEXT: vmv.v.x v28, a0 +; CHECK-NEXT: vmsle.vv v0, v28, v16 +; CHECK-NEXT: ret + %head = insertelement undef, i32 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp sge %va, %splat + ret %vc +} + +define @icmp_sge_xv_nxv8i32( %va, i32 %b) { +; CHECK-LABEL: icmp_sge_xv_nxv8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu +; CHECK-NEXT: vmv.v.x v28, a0 +; CHECK-NEXT: vmsle.vv v0, v16, v28 +; CHECK-NEXT: ret + %head = insertelement undef, i32 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp sge %splat, %va + ret %vc +} + +define @icmp_sge_vi_nxv8i32_0( %va) { +; CHECK-LABEL: icmp_sge_vi_nxv8i32_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vmv.v.i v28, -16 +; CHECK-NEXT: vmsle.vv v0, v28, v16 +; CHECK-NEXT: ret + %head = insertelement undef, i32 -16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp sge %va, %splat + ret %vc +} + +define @icmp_sge_vi_nxv8i32_1( %va) { +; CHECK-LABEL: icmp_sge_vi_nxv8i32_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vmv.v.i v28, -15 +; CHECK-NEXT: vmsle.vv v0, v28, v16 +; CHECK-NEXT: ret + %head = insertelement undef, i32 -15, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp sge %va, %splat + ret %vc +} + +define @icmp_sge_iv_nxv8i32_1( %va) { +; CHECK-LABEL: icmp_sge_iv_nxv8i32_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vmsle.vi v0, v16, -15 +; CHECK-NEXT: ret + %head = insertelement undef, i32 -15, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp sge %splat, %va + ret %vc +} + +define @icmp_sge_vi_nxv8i32_2( %va) { +; CHECK-LABEL: icmp_sge_vi_nxv8i32_2: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vmv.v.i v28, 0 +; CHECK-NEXT: vmsle.vv v0, v28, v16 +; CHECK-NEXT: ret + %head = insertelement undef, i32 0, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp sge %va, %splat + ret %vc +} + +define @icmp_sge_vi_nxv8i32_3( %va) { +; CHECK-LABEL: icmp_sge_vi_nxv8i32_3: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu +; CHECK-NEXT: vmv.v.x v28, a0 +; CHECK-NEXT: vmsle.vv v0, v28, v16 +; CHECK-NEXT: ret + %head = insertelement undef, i32 16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp sge %va, %splat + ret %vc +} + +define @icmp_slt_vv_nxv8i32( %va, %vb) { +; CHECK-LABEL: icmp_slt_vv_nxv8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vmslt.vv v0, v16, v20 +; CHECK-NEXT: ret + %vc = icmp slt %va, %vb + ret %vc +} + +define @icmp_slt_vx_nxv8i32( %va, i32 %b) { +; CHECK-LABEL: icmp_slt_vx_nxv8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu +; CHECK-NEXT: vmslt.vx v0, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp slt %va, %splat + ret %vc +} + +define @icmp_slt_xv_nxv8i32( %va, i32 %b) { +; CHECK-LABEL: icmp_slt_xv_nxv8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu +; CHECK-NEXT: vmv.v.x v28, a0 +; CHECK-NEXT: vmslt.vv v0, v28, v16 +; CHECK-NEXT: ret + %head = insertelement undef, i32 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp slt %splat, %va + ret %vc +} + +define @icmp_slt_vi_nxv8i32_0( %va) { +; CHECK-LABEL: icmp_slt_vi_nxv8i32_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -16 +; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu +; CHECK-NEXT: vmslt.vx v0, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 -16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp slt %va, %splat + ret %vc +} + +define @icmp_slt_vi_nxv8i32_1( %va) { +; CHECK-LABEL: icmp_slt_vi_nxv8i32_1: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -15 +; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu +; CHECK-NEXT: vmslt.vx v0, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 -15, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp slt %va, %splat + ret %vc +} + +define @icmp_slt_iv_nxv8i32_1( %va) { +; CHECK-LABEL: icmp_slt_iv_nxv8i32_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vmsgt.vi v0, v16, -15 +; CHECK-NEXT: ret + %head = insertelement undef, i32 -15, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp slt %splat, %va + ret %vc +} + +define @icmp_slt_vi_nxv8i32_2( %va) { +; CHECK-LABEL: icmp_slt_vi_nxv8i32_2: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vmslt.vx v0, v16, zero +; CHECK-NEXT: ret + %head = insertelement undef, i32 0, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp slt %va, %splat + ret %vc +} + +define @icmp_slt_vi_nxv8i32_3( %va) { +; CHECK-LABEL: icmp_slt_vi_nxv8i32_3: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu +; CHECK-NEXT: vmslt.vx v0, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp slt %va, %splat + ret %vc +} + +define @icmp_sle_vv_nxv8i32( %va, %vb) { +; CHECK-LABEL: icmp_sle_vv_nxv8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vmsle.vv v0, v16, v20 +; CHECK-NEXT: ret + %vc = icmp sle %va, %vb + ret %vc +} + +define @icmp_sle_vx_nxv8i32( %va, i32 %b) { +; CHECK-LABEL: icmp_sle_vx_nxv8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu +; CHECK-NEXT: vmsle.vx v0, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp sle %va, %splat + ret %vc +} + +define @icmp_sle_xv_nxv8i32( %va, i32 %b) { +; CHECK-LABEL: icmp_sle_xv_nxv8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu +; CHECK-NEXT: vmv.v.x v28, a0 +; CHECK-NEXT: vmsle.vv v0, v28, v16 +; CHECK-NEXT: ret + %head = insertelement undef, i32 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp sle %splat, %va + ret %vc +} + +define @icmp_sle_vi_nxv8i32_0( %va) { +; CHECK-LABEL: icmp_sle_vi_nxv8i32_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vmsle.vi v0, v16, 5 +; CHECK-NEXT: ret + %head = insertelement undef, i32 5, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp sle %va, %splat + ret %vc +} + +define @icmp_eq_vv_nxv8i64( %va, %vb) { +; CHECK-LABEL: icmp_eq_vv_nxv8i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a0) +; CHECK-NEXT: vmseq.vv v0, v16, v8 +; CHECK-NEXT: ret + %vc = icmp eq %va, %vb + ret %vc +} + +define @icmp_eq_vx_nxv8i64( %va, i64 %b) { +; CHECK-LABEL: icmp_eq_vx_nxv8i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu +; CHECK-NEXT: vmseq.vx v0, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i64 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp eq %va, %splat + ret %vc +} + +define @icmp_eq_xv_nxv8i64( %va, i64 %b) { +; CHECK-LABEL: icmp_eq_xv_nxv8i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu +; CHECK-NEXT: vmv.v.x v8, a0 +; CHECK-NEXT: vmseq.vv v0, v8, v16 +; CHECK-NEXT: ret + %head = insertelement undef, i64 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp eq %splat, %va + ret %vc +} + +define @icmp_eq_vi_nxv8i64_0( %va) { +; CHECK-LABEL: icmp_eq_vi_nxv8i64_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu +; CHECK-NEXT: vmseq.vi v0, v16, 0 +; CHECK-NEXT: ret + %head = insertelement undef, i64 0, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp eq %va, %splat + ret %vc +} + +define @icmp_eq_vi_nxv8i64_1( %va) { +; CHECK-LABEL: icmp_eq_vi_nxv8i64_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu +; CHECK-NEXT: vmseq.vi v0, v16, 5 +; CHECK-NEXT: ret + %head = insertelement undef, i64 5, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp eq %va, %splat + ret %vc +} + +define @icmp_eq_iv_nxv8i64_1( %va) { +; CHECK-LABEL: icmp_eq_iv_nxv8i64_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu +; CHECK-NEXT: vmseq.vi v0, v16, 5 +; CHECK-NEXT: ret + %head = insertelement undef, i64 5, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp eq %splat, %va + ret %vc +} + +define @icmp_ne_vv_nxv8i64( %va, %vb) { +; CHECK-LABEL: icmp_ne_vv_nxv8i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a0) +; CHECK-NEXT: vmsne.vv v0, v16, v8 +; CHECK-NEXT: ret + %vc = icmp ne %va, %vb + ret %vc +} + +define @icmp_ne_vx_nxv8i64( %va, i64 %b) { +; CHECK-LABEL: icmp_ne_vx_nxv8i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu +; CHECK-NEXT: vmsne.vx v0, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i64 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ne %va, %splat + ret %vc +} + +define @icmp_ne_xv_nxv8i64( %va, i64 %b) { +; CHECK-LABEL: icmp_ne_xv_nxv8i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu +; CHECK-NEXT: vmv.v.x v8, a0 +; CHECK-NEXT: vmsne.vv v0, v8, v16 +; CHECK-NEXT: ret + %head = insertelement undef, i64 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ne %splat, %va + ret %vc +} + +define @icmp_ne_vi_nxv8i64_0( %va) { +; CHECK-LABEL: icmp_ne_vi_nxv8i64_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu +; CHECK-NEXT: vmsne.vi v0, v16, 5 +; CHECK-NEXT: ret + %head = insertelement undef, i64 5, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ne %va, %splat + ret %vc +} + +define @icmp_ugt_vv_nxv8i64( %va, %vb) { +; CHECK-LABEL: icmp_ugt_vv_nxv8i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a0) +; CHECK-NEXT: vmsltu.vv v0, v8, v16 +; CHECK-NEXT: ret + %vc = icmp ugt %va, %vb + ret %vc +} + +define @icmp_ugt_vx_nxv8i64( %va, i64 %b) { +; CHECK-LABEL: icmp_ugt_vx_nxv8i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu +; CHECK-NEXT: vmsgtu.vx v0, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i64 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ugt %va, %splat + ret %vc +} + +define @icmp_ugt_xv_nxv8i64( %va, i64 %b) { +; CHECK-LABEL: icmp_ugt_xv_nxv8i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu +; CHECK-NEXT: vmv.v.x v8, a0 +; CHECK-NEXT: vmsltu.vv v0, v16, v8 +; CHECK-NEXT: ret + %head = insertelement undef, i64 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ugt %splat, %va + ret %vc +} + +define @icmp_ugt_vi_nxv8i64_0( %va) { +; CHECK-LABEL: icmp_ugt_vi_nxv8i64_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu +; CHECK-NEXT: vmsgtu.vi v0, v16, 5 +; CHECK-NEXT: ret + %head = insertelement undef, i64 5, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ugt %va, %splat + ret %vc +} + +define @icmp_uge_vv_nxv8i64( %va, %vb) { +; CHECK-LABEL: icmp_uge_vv_nxv8i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a0) +; CHECK-NEXT: vmsleu.vv v0, v8, v16 +; CHECK-NEXT: ret + %vc = icmp uge %va, %vb + ret %vc +} + +define @icmp_uge_vx_nxv8i64( %va, i64 %b) { +; CHECK-LABEL: icmp_uge_vx_nxv8i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu +; CHECK-NEXT: vmv.v.x v8, a0 +; CHECK-NEXT: vmsleu.vv v0, v8, v16 +; CHECK-NEXT: ret + %head = insertelement undef, i64 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp uge %va, %splat + ret %vc +} + +define @icmp_uge_xv_nxv8i64( %va, i64 %b) { +; CHECK-LABEL: icmp_uge_xv_nxv8i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu +; CHECK-NEXT: vmv.v.x v8, a0 +; CHECK-NEXT: vmsleu.vv v0, v16, v8 +; CHECK-NEXT: ret + %head = insertelement undef, i64 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp uge %splat, %va + ret %vc +} + +define @icmp_uge_vi_nxv8i64_0( %va) { +; CHECK-LABEL: icmp_uge_vi_nxv8i64_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu +; CHECK-NEXT: vmv.v.i v8, -16 +; CHECK-NEXT: vmsleu.vv v0, v8, v16 +; CHECK-NEXT: ret + %head = insertelement undef, i64 -16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp uge %va, %splat + ret %vc +} + +define @icmp_uge_vi_nxv8i64_1( %va) { +; CHECK-LABEL: icmp_uge_vi_nxv8i64_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu +; CHECK-NEXT: vmv.v.i v8, 15 +; CHECK-NEXT: vmsleu.vv v0, v8, v16 +; CHECK-NEXT: ret + %head = insertelement undef, i64 15, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp uge %va, %splat + ret %vc +} + +define @icmp_uge_iv_nxv8i64_1( %va) { +; CHECK-LABEL: icmp_uge_iv_nxv8i64_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu +; CHECK-NEXT: vmsleu.vi v0, v16, 15 +; CHECK-NEXT: ret + %head = insertelement undef, i64 15, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp uge %splat, %va + ret %vc +} + +define @icmp_uge_vi_nxv8i64_2( %va) { +; CHECK-LABEL: icmp_uge_vi_nxv8i64_2: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmset.m v0 +; CHECK-NEXT: ret + %head = insertelement undef, i64 0, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp uge %va, %splat + ret %vc +} + +define @icmp_uge_vi_nxv8i64_3( %va) { +; CHECK-LABEL: icmp_uge_vi_nxv8i64_3: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu +; CHECK-NEXT: vmv.v.i v8, 1 +; CHECK-NEXT: vmsleu.vv v0, v8, v16 +; CHECK-NEXT: ret + %head = insertelement undef, i64 1, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp uge %va, %splat + ret %vc +} + +define @icmp_uge_vi_nxv8i64_4( %va) { +; CHECK-LABEL: icmp_uge_vi_nxv8i64_4: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu +; CHECK-NEXT: vmv.v.i v8, -15 +; CHECK-NEXT: vmsleu.vv v0, v8, v16 +; CHECK-NEXT: ret + %head = insertelement undef, i64 -15, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp uge %va, %splat + ret %vc +} + +define @icmp_uge_vi_nxv8i64_5( %va) { +; CHECK-LABEL: icmp_uge_vi_nxv8i64_5: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu +; CHECK-NEXT: vmv.v.x v8, a0 +; CHECK-NEXT: vmsleu.vv v0, v8, v16 +; CHECK-NEXT: ret + %head = insertelement undef, i64 16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp uge %va, %splat + ret %vc +} + +define @icmp_ult_vv_nxv8i64( %va, %vb) { +; CHECK-LABEL: icmp_ult_vv_nxv8i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a0) +; CHECK-NEXT: vmsltu.vv v0, v16, v8 +; CHECK-NEXT: ret + %vc = icmp ult %va, %vb + ret %vc +} + +define @icmp_ult_vx_nxv8i64( %va, i64 %b) { +; CHECK-LABEL: icmp_ult_vx_nxv8i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu +; CHECK-NEXT: vmsltu.vx v0, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i64 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ult %va, %splat + ret %vc +} + +define @icmp_ult_xv_nxv8i64( %va, i64 %b) { +; CHECK-LABEL: icmp_ult_xv_nxv8i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu +; CHECK-NEXT: vmv.v.x v8, a0 +; CHECK-NEXT: vmsltu.vv v0, v8, v16 +; CHECK-NEXT: ret + %head = insertelement undef, i64 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ult %splat, %va + ret %vc +} + +define @icmp_ult_vi_nxv8i64_0( %va) { +; CHECK-LABEL: icmp_ult_vi_nxv8i64_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -16 +; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu +; CHECK-NEXT: vmsltu.vx v0, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i64 -16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ult %va, %splat + ret %vc +} + +define @icmp_ult_vi_nxv8i64_1( %va) { +; CHECK-LABEL: icmp_ult_vi_nxv8i64_1: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -15 +; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu +; CHECK-NEXT: vmsltu.vx v0, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i64 -15, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ult %va, %splat + ret %vc +} + +define @icmp_ult_iv_nxv8i64_1( %va) { +; CHECK-LABEL: icmp_ult_iv_nxv8i64_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu +; CHECK-NEXT: vmsgtu.vi v0, v16, -15 +; CHECK-NEXT: ret + %head = insertelement undef, i64 -15, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ult %splat, %va + ret %vc +} + +define @icmp_ult_vi_nxv8i64_2( %va) { +; CHECK-LABEL: icmp_ult_vi_nxv8i64_2: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmclr.m v0 +; CHECK-NEXT: ret + %head = insertelement undef, i64 0, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ult %va, %splat + ret %vc +} + +define @icmp_ult_vi_nxv8i64_3( %va) { +; CHECK-LABEL: icmp_ult_vi_nxv8i64_3: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu +; CHECK-NEXT: vmseq.vi v0, v16, 0 +; CHECK-NEXT: ret + %head = insertelement undef, i64 1, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ult %va, %splat + ret %vc +} + +define @icmp_ult_vi_nxv8i64_4( %va) { +; CHECK-LABEL: icmp_ult_vi_nxv8i64_4: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu +; CHECK-NEXT: vmsltu.vx v0, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i64 16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ult %va, %splat + ret %vc +} + +define @icmp_ule_vv_nxv8i64( %va, %vb) { +; CHECK-LABEL: icmp_ule_vv_nxv8i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a0) +; CHECK-NEXT: vmsleu.vv v0, v16, v8 +; CHECK-NEXT: ret + %vc = icmp ule %va, %vb + ret %vc +} + +define @icmp_ule_vx_nxv8i64( %va, i64 %b) { +; CHECK-LABEL: icmp_ule_vx_nxv8i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu +; CHECK-NEXT: vmsleu.vx v0, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i64 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ule %va, %splat + ret %vc +} + +define @icmp_ule_xv_nxv8i64( %va, i64 %b) { +; CHECK-LABEL: icmp_ule_xv_nxv8i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu +; CHECK-NEXT: vmv.v.x v8, a0 +; CHECK-NEXT: vmsleu.vv v0, v8, v16 +; CHECK-NEXT: ret + %head = insertelement undef, i64 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ule %splat, %va + ret %vc +} + +define @icmp_ule_vi_nxv8i64_0( %va) { +; CHECK-LABEL: icmp_ule_vi_nxv8i64_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu +; CHECK-NEXT: vmsleu.vi v0, v16, 5 +; CHECK-NEXT: ret + %head = insertelement undef, i64 5, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ule %va, %splat + ret %vc +} + +define @icmp_sgt_vv_nxv8i64( %va, %vb) { +; CHECK-LABEL: icmp_sgt_vv_nxv8i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a0) +; CHECK-NEXT: vmslt.vv v0, v8, v16 +; CHECK-NEXT: ret + %vc = icmp sgt %va, %vb + ret %vc +} + +define @icmp_sgt_vx_nxv8i64( %va, i64 %b) { +; CHECK-LABEL: icmp_sgt_vx_nxv8i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu +; CHECK-NEXT: vmsgt.vx v0, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i64 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp sgt %va, %splat + ret %vc +} + +define @icmp_sgt_xv_nxv8i64( %va, i64 %b) { +; CHECK-LABEL: icmp_sgt_xv_nxv8i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu +; CHECK-NEXT: vmv.v.x v8, a0 +; CHECK-NEXT: vmslt.vv v0, v16, v8 +; CHECK-NEXT: ret + %head = insertelement undef, i64 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp sgt %splat, %va + ret %vc +} + +define @icmp_sgt_vi_nxv8i64_0( %va) { +; CHECK-LABEL: icmp_sgt_vi_nxv8i64_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu +; CHECK-NEXT: vmsgt.vi v0, v16, 5 +; CHECK-NEXT: ret + %head = insertelement undef, i64 5, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp sgt %va, %splat + ret %vc +} + +define @icmp_sge_vv_nxv8i64( %va, %vb) { +; CHECK-LABEL: icmp_sge_vv_nxv8i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a0) +; CHECK-NEXT: vmsle.vv v0, v8, v16 +; CHECK-NEXT: ret + %vc = icmp sge %va, %vb + ret %vc +} + +define @icmp_sge_vx_nxv8i64( %va, i64 %b) { +; CHECK-LABEL: icmp_sge_vx_nxv8i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu +; CHECK-NEXT: vmv.v.x v8, a0 +; CHECK-NEXT: vmsle.vv v0, v8, v16 +; CHECK-NEXT: ret + %head = insertelement undef, i64 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp sge %va, %splat + ret %vc +} + +define @icmp_sge_xv_nxv8i64( %va, i64 %b) { +; CHECK-LABEL: icmp_sge_xv_nxv8i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu +; CHECK-NEXT: vmv.v.x v8, a0 +; CHECK-NEXT: vmsle.vv v0, v16, v8 +; CHECK-NEXT: ret + %head = insertelement undef, i64 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp sge %splat, %va + ret %vc +} + +define @icmp_sge_vi_nxv8i64_0( %va) { +; CHECK-LABEL: icmp_sge_vi_nxv8i64_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu +; CHECK-NEXT: vmv.v.i v8, -16 +; CHECK-NEXT: vmsle.vv v0, v8, v16 +; CHECK-NEXT: ret + %head = insertelement undef, i64 -16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp sge %va, %splat + ret %vc +} + +define @icmp_sge_vi_nxv8i64_1( %va) { +; CHECK-LABEL: icmp_sge_vi_nxv8i64_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu +; CHECK-NEXT: vmv.v.i v8, -15 +; CHECK-NEXT: vmsle.vv v0, v8, v16 +; CHECK-NEXT: ret + %head = insertelement undef, i64 -15, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp sge %va, %splat + ret %vc +} + +define @icmp_sge_iv_nxv8i64_1( %va) { +; CHECK-LABEL: icmp_sge_iv_nxv8i64_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu +; CHECK-NEXT: vmsle.vi v0, v16, -15 +; CHECK-NEXT: ret + %head = insertelement undef, i64 -15, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp sge %splat, %va + ret %vc +} + +define @icmp_sge_vi_nxv8i64_2( %va) { +; CHECK-LABEL: icmp_sge_vi_nxv8i64_2: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu +; CHECK-NEXT: vmv.v.i v8, 0 +; CHECK-NEXT: vmsle.vv v0, v8, v16 +; CHECK-NEXT: ret + %head = insertelement undef, i64 0, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp sge %va, %splat + ret %vc +} + +define @icmp_sge_vi_nxv8i64_3( %va) { +; CHECK-LABEL: icmp_sge_vi_nxv8i64_3: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu +; CHECK-NEXT: vmv.v.x v8, a0 +; CHECK-NEXT: vmsle.vv v0, v8, v16 +; CHECK-NEXT: ret + %head = insertelement undef, i64 16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp sge %va, %splat + ret %vc +} + +define @icmp_slt_vv_nxv8i64( %va, %vb) { +; CHECK-LABEL: icmp_slt_vv_nxv8i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a0) +; CHECK-NEXT: vmslt.vv v0, v16, v8 +; CHECK-NEXT: ret + %vc = icmp slt %va, %vb + ret %vc +} + +define @icmp_slt_vx_nxv8i64( %va, i64 %b) { +; CHECK-LABEL: icmp_slt_vx_nxv8i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu +; CHECK-NEXT: vmslt.vx v0, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i64 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp slt %va, %splat + ret %vc +} + +define @icmp_slt_xv_nxv8i64( %va, i64 %b) { +; CHECK-LABEL: icmp_slt_xv_nxv8i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu +; CHECK-NEXT: vmv.v.x v8, a0 +; CHECK-NEXT: vmslt.vv v0, v8, v16 +; CHECK-NEXT: ret + %head = insertelement undef, i64 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp slt %splat, %va + ret %vc +} + +define @icmp_slt_vi_nxv8i64_0( %va) { +; CHECK-LABEL: icmp_slt_vi_nxv8i64_0: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -16 +; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu +; CHECK-NEXT: vmslt.vx v0, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i64 -16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp slt %va, %splat + ret %vc +} + +define @icmp_slt_vi_nxv8i64_1( %va) { +; CHECK-LABEL: icmp_slt_vi_nxv8i64_1: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -15 +; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu +; CHECK-NEXT: vmslt.vx v0, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i64 -15, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp slt %va, %splat + ret %vc +} + +define @icmp_slt_iv_nxv8i64_1( %va) { +; CHECK-LABEL: icmp_slt_iv_nxv8i64_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu +; CHECK-NEXT: vmsgt.vi v0, v16, -15 +; CHECK-NEXT: ret + %head = insertelement undef, i64 -15, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp slt %splat, %va + ret %vc +} + +define @icmp_slt_vi_nxv8i64_2( %va) { +; CHECK-LABEL: icmp_slt_vi_nxv8i64_2: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu +; CHECK-NEXT: vmslt.vx v0, v16, zero +; CHECK-NEXT: ret + %head = insertelement undef, i64 0, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp slt %va, %splat + ret %vc +} + +define @icmp_slt_vi_nxv8i64_3( %va) { +; CHECK-LABEL: icmp_slt_vi_nxv8i64_3: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu +; CHECK-NEXT: vmslt.vx v0, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i64 16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp slt %va, %splat + ret %vc +} + +define @icmp_sle_vv_nxv8i64( %va, %vb) { +; CHECK-LABEL: icmp_sle_vv_nxv8i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a0) +; CHECK-NEXT: vmsle.vv v0, v16, v8 +; CHECK-NEXT: ret + %vc = icmp sle %va, %vb + ret %vc +} + +define @icmp_sle_vx_nxv8i64( %va, i64 %b) { +; CHECK-LABEL: icmp_sle_vx_nxv8i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu +; CHECK-NEXT: vmsle.vx v0, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i64 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp sle %va, %splat + ret %vc +} + +define @icmp_sle_xv_nxv8i64( %va, i64 %b) { +; CHECK-LABEL: icmp_sle_xv_nxv8i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu +; CHECK-NEXT: vmv.v.x v8, a0 +; CHECK-NEXT: vmsle.vv v0, v8, v16 +; CHECK-NEXT: ret + %head = insertelement undef, i64 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp sle %splat, %va + ret %vc +} + +define @icmp_sle_vi_nxv8i64_0( %va) { +; CHECK-LABEL: icmp_sle_vi_nxv8i64_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu +; CHECK-NEXT: vmsle.vi v0, v16, 5 +; CHECK-NEXT: ret + %head = insertelement undef, i64 5, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp sle %va, %splat + ret %vc +} +