From e6b1ac85461ac37526fcc3157a15880c4b289b97 Mon Sep 17 00:00:00 2001 From: Fraser Cormack Date: Wed, 11 Aug 2021 13:09:14 +0100 Subject: [PATCH] [LegalizeTypes][VP] Add widening support for binary VP ops This patch adds the beginnings of more thorough support in the legalizers for vector-predicated (VP) operations. The first step is the ability to widen illegal vectors. The more complicated scenario in which the result/operands need widening but the mask doesn't has not been handled here. That would require a lot of code without an in-tree target on which to test it. Reviewed By: RKSimon Differential Revision: https://reviews.llvm.org/D107904 --- llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.h | 2 +- .../SelectionDAG/LegalizeVectorTypes.cpp | 49 +++++++- .../RISCV/rvv/fixed-vectors-vadd-vp.ll | 76 ++++++++++++ .../RISCV/rvv/fixed-vectors-vand-vp.ll | 108 ++++++++++++++++++ .../RISCV/rvv/fixed-vectors-vdiv-vp.ll | 12 ++ .../RISCV/rvv/fixed-vectors-vdivu-vp.ll | 12 ++ .../RISCV/rvv/fixed-vectors-vfadd-vp.ll | 12 ++ .../RISCV/rvv/fixed-vectors-vfdiv-vp.ll | 12 ++ .../RISCV/rvv/fixed-vectors-vfmul-vp.ll | 12 ++ .../RISCV/rvv/fixed-vectors-vfsub-vp.ll | 12 ++ .../RISCV/rvv/fixed-vectors-vmul-vp.ll | 50 ++++++++ .../CodeGen/RISCV/rvv/fixed-vectors-vor-vp.ll | 76 ++++++++++++ .../RISCV/rvv/fixed-vectors-vrem-vp.ll | 12 ++ .../RISCV/rvv/fixed-vectors-vremu-vp.ll | 12 ++ .../RISCV/rvv/fixed-vectors-vshl-vp.ll | 12 ++ .../RISCV/rvv/fixed-vectors-vsra-vp.ll | 12 ++ .../RISCV/rvv/fixed-vectors-vsrl-vp.ll | 12 ++ .../RISCV/rvv/fixed-vectors-vsub-vp.ll | 50 ++++++++ .../RISCV/rvv/fixed-vectors-vxor-vp.ll | 102 +++++++++++++++++ llvm/test/CodeGen/RISCV/rvv/vadd-vp.ll | 76 ++++++++++++ llvm/test/CodeGen/RISCV/rvv/vand-vp.ll | 76 ++++++++++++ llvm/test/CodeGen/RISCV/rvv/vdiv-vp.ll | 12 ++ llvm/test/CodeGen/RISCV/rvv/vdivu-vp.ll | 12 ++ llvm/test/CodeGen/RISCV/rvv/vfadd-vp.ll | 12 ++ llvm/test/CodeGen/RISCV/rvv/vfdiv-vp.ll | 12 ++ llvm/test/CodeGen/RISCV/rvv/vfmul-vp.ll | 12 ++ llvm/test/CodeGen/RISCV/rvv/vfsub-vp.ll | 12 ++ llvm/test/CodeGen/RISCV/rvv/vmul-vp.ll | 50 ++++++++ llvm/test/CodeGen/RISCV/rvv/vor-vp.ll | 76 ++++++++++++ llvm/test/CodeGen/RISCV/rvv/vrem-vp.ll | 12 ++ llvm/test/CodeGen/RISCV/rvv/vremu-vp.ll | 12 ++ llvm/test/CodeGen/RISCV/rvv/vshl-vp.ll | 12 ++ llvm/test/CodeGen/RISCV/rvv/vsra-vp.ll | 12 ++ llvm/test/CodeGen/RISCV/rvv/vsrl-vp.ll | 12 ++ llvm/test/CodeGen/RISCV/rvv/vsub-vp.ll | 50 ++++++++ llvm/test/CodeGen/RISCV/rvv/vxor-vp.ll | 102 +++++++++++++++++ 36 files changed, 1203 insertions(+), 4 deletions(-) diff --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.h b/llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.h index bfd3927f8a16..508823441e9f 100644 --- a/llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.h +++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeTypes.h @@ -915,7 +915,7 @@ private: SDValue WidenVecRes_VECTOR_SHUFFLE(ShuffleVectorSDNode *N); SDValue WidenVecRes_Ternary(SDNode *N); - SDValue WidenVecRes_Binary(SDNode *N); + SDValue WidenVecRes_Binary(SDNode *N, bool IsVP); SDValue WidenVecRes_BinaryCanTrap(SDNode *N); SDValue WidenVecRes_BinaryWithExtraScalarOp(SDNode *N); SDValue WidenVecRes_StrictFP(SDNode *N); diff --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp b/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp index d3ebf6ca89ce..2c6cef8ebf16 100644 --- a/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp @@ -3070,7 +3070,7 @@ void DAGTypeLegalizer::WidenVectorResult(SDNode *N, unsigned ResNo) { case ISD::USHLSAT: case ISD::ROTL: case ISD::ROTR: - Res = WidenVecRes_Binary(N); + Res = WidenVecRes_Binary(N, /*IsVP*/ false); break; case ISD::FADD: @@ -3194,6 +3194,31 @@ void DAGTypeLegalizer::WidenVectorResult(SDNode *N, unsigned ResNo) { case ISD::FSHR: Res = WidenVecRes_Ternary(N); break; + case ISD::VP_ADD: + case ISD::VP_AND: + case ISD::VP_MUL: + case ISD::VP_OR: + case ISD::VP_SUB: + case ISD::VP_XOR: + case ISD::VP_SHL: + case ISD::VP_LSHR: + case ISD::VP_ASHR: + case ISD::VP_SDIV: + case ISD::VP_UDIV: + case ISD::VP_SREM: + case ISD::VP_UREM: + case ISD::VP_FADD: + case ISD::VP_FSUB: + case ISD::VP_FMUL: + case ISD::VP_FDIV: + case ISD::VP_FREM: + // Vector-predicated binary op widening. Note that -- unlike the + // unpredicated versions -- we don't have to worry about trapping on + // operations like UDIV, FADD, etc., as we pass on the original vector + // length parameter. This means the widened elements containing garbage + // aren't active. + Res = WidenVecRes_Binary(N, /*IsVP*/ true); + break; } // If Res is null, the sub-method took care of registering the result. @@ -3211,13 +3236,31 @@ SDValue DAGTypeLegalizer::WidenVecRes_Ternary(SDNode *N) { return DAG.getNode(N->getOpcode(), dl, WidenVT, InOp1, InOp2, InOp3); } -SDValue DAGTypeLegalizer::WidenVecRes_Binary(SDNode *N) { +SDValue DAGTypeLegalizer::WidenVecRes_Binary(SDNode *N, bool IsVP) { // Binary op widening. SDLoc dl(N); EVT WidenVT = TLI.getTypeToTransformTo(*DAG.getContext(), N->getValueType(0)); SDValue InOp1 = GetWidenedVector(N->getOperand(0)); SDValue InOp2 = GetWidenedVector(N->getOperand(1)); - return DAG.getNode(N->getOpcode(), dl, WidenVT, InOp1, InOp2, N->getFlags()); + if (!IsVP) + return DAG.getNode(N->getOpcode(), dl, WidenVT, InOp1, InOp2, + N->getFlags()); + // For VP operations, we must also widen the mask. Note that the mask type + // may not actually need widening, leading it be split along with the VP + // operation. + // FIXME: This could lead to an infinite split/widen loop. We only handle the + // case where the mask needs widening to an identically-sized type as the + // vector inputs. + SDValue Mask = N->getOperand(2); + assert(getTypeAction(Mask.getValueType()) == + TargetLowering::TypeWidenVector && + "Unable to widen binary VP op"); + Mask = GetWidenedVector(Mask); + assert(Mask.getValueType().getVectorElementCount() == + WidenVT.getVectorElementCount() && + "Unable to widen binary VP op"); + return DAG.getNode(N->getOpcode(), dl, WidenVT, + {InOp1, InOp2, Mask, N->getOperand(3)}, N->getFlags()); } SDValue DAGTypeLegalizer::WidenVecRes_BinaryWithExtraScalarOp(SDNode *N) { diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vadd-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vadd-vp.ll index ef8c3613e192..696b38e7879c 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vadd-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vadd-vp.ll @@ -156,6 +156,82 @@ define <4 x i8> @vadd_vi_v4i8_unmasked(<4 x i8> %va, i32 zeroext %evl) { ret <4 x i8> %v } +declare <5 x i8> @llvm.vp.add.v5i8(<5 x i8>, <5 x i8>, <5 x i1>, i32) + +define <5 x i8> @vadd_vv_v5i8(<5 x i8> %va, <5 x i8> %b, <5 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vadd_vv_v5i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vadd.vv v8, v8, v9, v0.t +; CHECK-NEXT: ret + %v = call <5 x i8> @llvm.vp.add.v5i8(<5 x i8> %va, <5 x i8> %b, <5 x i1> %m, i32 %evl) + ret <5 x i8> %v +} + +define <5 x i8> @vadd_vv_v5i8_unmasked(<5 x i8> %va, <5 x i8> %b, i32 zeroext %evl) { +; CHECK-LABEL: vadd_vv_v5i8_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vadd.vv v8, v8, v9 +; CHECK-NEXT: ret + %head = insertelement <5 x i1> undef, i1 true, i32 0 + %m = shufflevector <5 x i1> %head, <5 x i1> undef, <5 x i32> zeroinitializer + %v = call <5 x i8> @llvm.vp.add.v5i8(<5 x i8> %va, <5 x i8> %b, <5 x i1> %m, i32 %evl) + ret <5 x i8> %v +} + +define <5 x i8> @vadd_vx_v5i8(<5 x i8> %va, i8 %b, <5 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vadd_vx_v5i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vadd.vx v8, v8, a0, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement <5 x i8> undef, i8 %b, i32 0 + %vb = shufflevector <5 x i8> %elt.head, <5 x i8> undef, <5 x i32> zeroinitializer + %v = call <5 x i8> @llvm.vp.add.v5i8(<5 x i8> %va, <5 x i8> %vb, <5 x i1> %m, i32 %evl) + ret <5 x i8> %v +} + +define <5 x i8> @vadd_vx_v5i8_unmasked(<5 x i8> %va, i8 %b, i32 zeroext %evl) { +; CHECK-LABEL: vadd_vx_v5i8_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vadd.vx v8, v8, a0 +; CHECK-NEXT: ret + %elt.head = insertelement <5 x i8> undef, i8 %b, i32 0 + %vb = shufflevector <5 x i8> %elt.head, <5 x i8> undef, <5 x i32> zeroinitializer + %head = insertelement <5 x i1> undef, i1 true, i32 0 + %m = shufflevector <5 x i1> %head, <5 x i1> undef, <5 x i32> zeroinitializer + %v = call <5 x i8> @llvm.vp.add.v5i8(<5 x i8> %va, <5 x i8> %vb, <5 x i1> %m, i32 %evl) + ret <5 x i8> %v +} + +define <5 x i8> @vadd_vi_v5i8(<5 x i8> %va, <5 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vadd_vi_v5i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vadd.vi v8, v8, -1, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement <5 x i8> undef, i8 -1, i32 0 + %vb = shufflevector <5 x i8> %elt.head, <5 x i8> undef, <5 x i32> zeroinitializer + %v = call <5 x i8> @llvm.vp.add.v5i8(<5 x i8> %va, <5 x i8> %vb, <5 x i1> %m, i32 %evl) + ret <5 x i8> %v +} + +define <5 x i8> @vadd_vi_v5i8_unmasked(<5 x i8> %va, i32 zeroext %evl) { +; CHECK-LABEL: vadd_vi_v5i8_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vadd.vi v8, v8, -1 +; CHECK-NEXT: ret + %elt.head = insertelement <5 x i8> undef, i8 -1, i32 0 + %vb = shufflevector <5 x i8> %elt.head, <5 x i8> undef, <5 x i32> zeroinitializer + %head = insertelement <5 x i1> undef, i1 true, i32 0 + %m = shufflevector <5 x i1> %head, <5 x i1> undef, <5 x i32> zeroinitializer + %v = call <5 x i8> @llvm.vp.add.v5i8(<5 x i8> %va, <5 x i8> %vb, <5 x i1> %m, i32 %evl) + ret <5 x i8> %v +} + declare <8 x i8> @llvm.vp.add.v8i8(<8 x i8>, <8 x i8>, <8 x i1>, i32) define <8 x i8> @vadd_vv_v8i8(<8 x i8> %va, <8 x i8> %b, <8 x i1> %m, i32 zeroext %evl) { diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vand-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vand-vp.ll index 405089254961..6d6ff37df791 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vand-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vand-vp.ll @@ -1228,6 +1228,114 @@ define <8 x i64> @vand_vi_v8i64_unmasked(<8 x i64> %va, i32 zeroext %evl) { ret <8 x i64> %v } +declare <11 x i64> @llvm.vp.and.v11i64(<11 x i64>, <11 x i64>, <11 x i1>, i32) + +define <11 x i64> @vand_vv_v11i64(<11 x i64> %va, <11 x i64> %b, <11 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vand_vv_v11i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vand.vv v8, v8, v16, v0.t +; CHECK-NEXT: ret + %v = call <11 x i64> @llvm.vp.and.v11i64(<11 x i64> %va, <11 x i64> %b, <11 x i1> %m, i32 %evl) + ret <11 x i64> %v +} + +define <11 x i64> @vand_vv_v11i64_unmasked(<11 x i64> %va, <11 x i64> %b, i32 zeroext %evl) { +; CHECK-LABEL: vand_vv_v11i64_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vand.vv v8, v8, v16 +; CHECK-NEXT: ret + %head = insertelement <11 x i1> undef, i1 true, i32 0 + %m = shufflevector <11 x i1> %head, <11 x i1> undef, <11 x i32> zeroinitializer + %v = call <11 x i64> @llvm.vp.and.v11i64(<11 x i64> %va, <11 x i64> %b, <11 x i1> %m, i32 %evl) + ret <11 x i64> %v +} + +define <11 x i64> @vand_vx_v11i64(<11 x i64> %va, i64 %b, <11 x i1> %m, i32 zeroext %evl) { +; RV32-LABEL: vand_vx_v11i64: +; RV32: # %bb.0: +; RV32-NEXT: vmv1r.v v25, v0 +; RV32-NEXT: addi a3, zero, 32 +; RV32-NEXT: vsetvli zero, a3, e32, m8, ta, mu +; RV32-NEXT: vmv.v.x v16, a1 +; RV32-NEXT: lui a1, 341 +; RV32-NEXT: addi a1, a1, 1365 +; RV32-NEXT: vsetivli zero, 1, e32, mf2, ta, mu +; RV32-NEXT: vmv.s.x v0, a1 +; RV32-NEXT: vsetvli zero, a3, e32, m8, ta, mu +; RV32-NEXT: vmerge.vxm v16, v16, a0, v0 +; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu +; RV32-NEXT: vmv1r.v v0, v25 +; RV32-NEXT: vand.vv v8, v8, v16, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vand_vx_v11i64: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: ret + %elt.head = insertelement <11 x i64> undef, i64 %b, i32 0 + %vb = shufflevector <11 x i64> %elt.head, <11 x i64> undef, <11 x i32> zeroinitializer + %v = call <11 x i64> @llvm.vp.and.v11i64(<11 x i64> %va, <11 x i64> %vb, <11 x i1> %m, i32 %evl) + ret <11 x i64> %v +} + +define <11 x i64> @vand_vx_v11i64_unmasked(<11 x i64> %va, i64 %b, i32 zeroext %evl) { +; RV32-LABEL: vand_vx_v11i64_unmasked: +; RV32: # %bb.0: +; RV32-NEXT: addi a3, zero, 32 +; RV32-NEXT: vsetvli zero, a3, e32, m8, ta, mu +; RV32-NEXT: vmv.v.x v16, a1 +; RV32-NEXT: lui a1, 341 +; RV32-NEXT: addi a1, a1, 1365 +; RV32-NEXT: vsetivli zero, 1, e32, mf2, ta, mu +; RV32-NEXT: vmv.s.x v0, a1 +; RV32-NEXT: vsetvli zero, a3, e32, m8, ta, mu +; RV32-NEXT: vmerge.vxm v16, v16, a0, v0 +; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu +; RV32-NEXT: vand.vv v8, v8, v16 +; RV32-NEXT: ret +; +; RV64-LABEL: vand_vx_v11i64_unmasked: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu +; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: ret + %elt.head = insertelement <11 x i64> undef, i64 %b, i32 0 + %vb = shufflevector <11 x i64> %elt.head, <11 x i64> undef, <11 x i32> zeroinitializer + %head = insertelement <11 x i1> undef, i1 true, i32 0 + %m = shufflevector <11 x i1> %head, <11 x i1> undef, <11 x i32> zeroinitializer + %v = call <11 x i64> @llvm.vp.and.v11i64(<11 x i64> %va, <11 x i64> %vb, <11 x i1> %m, i32 %evl) + ret <11 x i64> %v +} + +define <11 x i64> @vand_vi_v11i64(<11 x i64> %va, <11 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vand_vi_v11i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vand.vi v8, v8, 4, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement <11 x i64> undef, i64 4, i32 0 + %vb = shufflevector <11 x i64> %elt.head, <11 x i64> undef, <11 x i32> zeroinitializer + %v = call <11 x i64> @llvm.vp.and.v11i64(<11 x i64> %va, <11 x i64> %vb, <11 x i1> %m, i32 %evl) + ret <11 x i64> %v +} + +define <11 x i64> @vand_vi_v11i64_unmasked(<11 x i64> %va, i32 zeroext %evl) { +; CHECK-LABEL: vand_vi_v11i64_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vand.vi v8, v8, 4 +; CHECK-NEXT: ret + %elt.head = insertelement <11 x i64> undef, i64 4, i32 0 + %vb = shufflevector <11 x i64> %elt.head, <11 x i64> undef, <11 x i32> zeroinitializer + %head = insertelement <11 x i1> undef, i1 true, i32 0 + %m = shufflevector <11 x i1> %head, <11 x i1> undef, <11 x i32> zeroinitializer + %v = call <11 x i64> @llvm.vp.and.v11i64(<11 x i64> %va, <11 x i64> %vb, <11 x i1> %m, i32 %evl) + ret <11 x i64> %v +} + declare <16 x i64> @llvm.vp.and.v16i64(<16 x i64>, <16 x i64>, <16 x i1>, i32) define <16 x i64> @vand_vv_v16i64(<16 x i64> %va, <16 x i64> %b, <16 x i1> %m, i32 zeroext %evl) { diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vdiv-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vdiv-vp.ll index 1a7e67abbc34..ae2f58c7df4d 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vdiv-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vdiv-vp.ll @@ -104,6 +104,18 @@ define <4 x i8> @vdiv_vx_v4i8_unmasked(<4 x i8> %va, i8 %b, i32 zeroext %evl) { ret <4 x i8> %v } +declare <6 x i8> @llvm.vp.sdiv.v6i8(<6 x i8>, <6 x i8>, <6 x i1>, i32) + +define <6 x i8> @vdiv_vv_v6i8(<6 x i8> %va, <6 x i8> %b, <6 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vdiv_vv_v6i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vdiv.vv v8, v8, v9, v0.t +; CHECK-NEXT: ret + %v = call <6 x i8> @llvm.vp.sdiv.v6i8(<6 x i8> %va, <6 x i8> %b, <6 x i1> %m, i32 %evl) + ret <6 x i8> %v +} + declare <8 x i8> @llvm.vp.sdiv.v8i8(<8 x i8>, <8 x i8>, <8 x i1>, i32) define <8 x i8> @vdiv_vv_v8i8(<8 x i8> %va, <8 x i8> %b, <8 x i1> %m, i32 zeroext %evl) { diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vdivu-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vdivu-vp.ll index 559d4c1e1dc1..da9b5ce09dcf 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vdivu-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vdivu-vp.ll @@ -104,6 +104,18 @@ define <4 x i8> @vdivu_vx_v4i8_unmasked(<4 x i8> %va, i8 %b, i32 zeroext %evl) { ret <4 x i8> %v } +declare <6 x i8> @llvm.vp.udiv.v6i8(<6 x i8>, <6 x i8>, <6 x i1>, i32) + +define <6 x i8> @vdivu_vv_v6i8(<6 x i8> %va, <6 x i8> %b, <6 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vdivu_vv_v6i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vdivu.vv v8, v8, v9, v0.t +; CHECK-NEXT: ret + %v = call <6 x i8> @llvm.vp.udiv.v6i8(<6 x i8> %va, <6 x i8> %b, <6 x i1> %m, i32 %evl) + ret <6 x i8> %v +} + declare <8 x i8> @llvm.vp.udiv.v8i8(<8 x i8>, <8 x i8>, <8 x i1>, i32) define <8 x i8> @vdivu_vv_v8i8(<8 x i8> %va, <8 x i8> %b, <8 x i1> %m, i32 zeroext %evl) { diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfadd-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfadd-vp.ll index e6ed948c0665..9302940d29ba 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfadd-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfadd-vp.ll @@ -56,6 +56,18 @@ define <2 x half> @vfadd_vf_v2f16_unmasked(<2 x half> %va, half %b, i32 zeroext ret <2 x half> %v } +declare <3 x half> @llvm.vp.fadd.v3f16(<3 x half>, <3 x half>, <3 x i1>, i32) + +define <3 x half> @vfadd_vv_v3f16(<3 x half> %va, <3 x half> %b, <3 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfadd_vv_v3f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfadd.vv v8, v8, v9, v0.t +; CHECK-NEXT: ret + %v = call <3 x half> @llvm.vp.fadd.v3f16(<3 x half> %va, <3 x half> %b, <3 x i1> %m, i32 %evl) + ret <3 x half> %v +} + declare <4 x half> @llvm.vp.fadd.v4f16(<4 x half>, <4 x half>, <4 x i1>, i32) define <4 x half> @vfadd_vv_v4f16(<4 x half> %va, <4 x half> %b, <4 x i1> %m, i32 zeroext %evl) { diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfdiv-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfdiv-vp.ll index 87aaff021451..aa4ab8dba3a2 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfdiv-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfdiv-vp.ll @@ -56,6 +56,18 @@ define <2 x half> @vfdiv_vf_v2f16_unmasked(<2 x half> %va, half %b, i32 zeroext ret <2 x half> %v } +declare <3 x half> @llvm.vp.fdiv.v3f16(<3 x half>, <3 x half>, <3 x i1>, i32) + +define <3 x half> @vfdiv_vv_v3f16(<3 x half> %va, <3 x half> %b, <3 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfdiv_vv_v3f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfdiv.vv v8, v8, v9, v0.t +; CHECK-NEXT: ret + %v = call <3 x half> @llvm.vp.fdiv.v3f16(<3 x half> %va, <3 x half> %b, <3 x i1> %m, i32 %evl) + ret <3 x half> %v +} + declare <4 x half> @llvm.vp.fdiv.v4f16(<4 x half>, <4 x half>, <4 x i1>, i32) define <4 x half> @vfdiv_vv_v4f16(<4 x half> %va, <4 x half> %b, <4 x i1> %m, i32 zeroext %evl) { diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmul-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmul-vp.ll index 259fbfb24728..885a16510dbb 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmul-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmul-vp.ll @@ -56,6 +56,18 @@ define <2 x half> @vfmul_vf_v2f16_unmasked(<2 x half> %va, half %b, i32 zeroext ret <2 x half> %v } +declare <3 x half> @llvm.vp.fmul.v3f16(<3 x half>, <3 x half>, <3 x i1>, i32) + +define <3 x half> @vfmul_vv_v3f16(<3 x half> %va, <3 x half> %b, <3 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfmul_vv_v3f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfmul.vv v8, v8, v9, v0.t +; CHECK-NEXT: ret + %v = call <3 x half> @llvm.vp.fmul.v3f16(<3 x half> %va, <3 x half> %b, <3 x i1> %m, i32 %evl) + ret <3 x half> %v +} + declare <4 x half> @llvm.vp.fmul.v4f16(<4 x half>, <4 x half>, <4 x i1>, i32) define <4 x half> @vfmul_vv_v4f16(<4 x half> %va, <4 x half> %b, <4 x i1> %m, i32 zeroext %evl) { diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfsub-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfsub-vp.ll index ee04b73af882..e4158640a459 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfsub-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfsub-vp.ll @@ -56,6 +56,18 @@ define <2 x half> @vfsub_vf_v2f16_unmasked(<2 x half> %va, half %b, i32 zeroext ret <2 x half> %v } +declare <3 x half> @llvm.vp.fsub.v3f16(<3 x half>, <3 x half>, <3 x i1>, i32) + +define <3 x half> @vfsub_vv_v3f16(<3 x half> %va, <3 x half> %b, <3 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vfsub_vv_v3f16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu +; CHECK-NEXT: vfsub.vv v8, v8, v9, v0.t +; CHECK-NEXT: ret + %v = call <3 x half> @llvm.vp.fsub.v3f16(<3 x half> %va, <3 x half> %b, <3 x i1> %m, i32 %evl) + ret <3 x half> %v +} + declare <4 x half> @llvm.vp.fsub.v4f16(<4 x half>, <4 x half>, <4 x i1>, i32) define <4 x half> @vfsub_vv_v4f16(<4 x half> %va, <4 x half> %b, <4 x i1> %m, i32 zeroext %evl) { diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vmul-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vmul-vp.ll index 26834149778d..a1a1005741d7 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vmul-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vmul-vp.ll @@ -354,6 +354,56 @@ define <8 x i16> @vmul_vx_v8i16_unmasked(<8 x i16> %va, i16 %b, i32 zeroext %evl ret <8 x i16> %v } +declare <12 x i16> @llvm.vp.mul.v12i16(<12 x i16>, <12 x i16>, <12 x i1>, i32) + +define <12 x i16> @vmul_vv_v12i16(<12 x i16> %va, <12 x i16> %b, <12 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vmul_vv_v12i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vmul.vv v8, v8, v10, v0.t +; CHECK-NEXT: ret + %v = call <12 x i16> @llvm.vp.mul.v12i16(<12 x i16> %va, <12 x i16> %b, <12 x i1> %m, i32 %evl) + ret <12 x i16> %v +} + +define <12 x i16> @vmul_vv_v12i16_unmasked(<12 x i16> %va, <12 x i16> %b, i32 zeroext %evl) { +; CHECK-LABEL: vmul_vv_v12i16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu +; CHECK-NEXT: vmul.vv v8, v8, v10 +; CHECK-NEXT: ret + %head = insertelement <12 x i1> undef, i1 true, i32 0 + %m = shufflevector <12 x i1> %head, <12 x i1> undef, <12 x i32> zeroinitializer + %v = call <12 x i16> @llvm.vp.mul.v12i16(<12 x i16> %va, <12 x i16> %b, <12 x i1> %m, i32 %evl) + ret <12 x i16> %v +} + +define <12 x i16> @vmul_vx_v12i16(<12 x i16> %va, i16 %b, <12 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vmul_vx_v12i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vmul.vx v8, v8, a0, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement <12 x i16> undef, i16 %b, i32 0 + %vb = shufflevector <12 x i16> %elt.head, <12 x i16> undef, <12 x i32> zeroinitializer + %v = call <12 x i16> @llvm.vp.mul.v12i16(<12 x i16> %va, <12 x i16> %vb, <12 x i1> %m, i32 %evl) + ret <12 x i16> %v +} + +define <12 x i16> @vmul_vx_v12i16_unmasked(<12 x i16> %va, i16 %b, i32 zeroext %evl) { +; CHECK-LABEL: vmul_vx_v12i16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu +; CHECK-NEXT: vmul.vx v8, v8, a0 +; CHECK-NEXT: ret + %elt.head = insertelement <12 x i16> undef, i16 %b, i32 0 + %vb = shufflevector <12 x i16> %elt.head, <12 x i16> undef, <12 x i32> zeroinitializer + %head = insertelement <12 x i1> undef, i1 true, i32 0 + %m = shufflevector <12 x i1> %head, <12 x i1> undef, <12 x i32> zeroinitializer + %v = call <12 x i16> @llvm.vp.mul.v12i16(<12 x i16> %va, <12 x i16> %vb, <12 x i1> %m, i32 %evl) + ret <12 x i16> %v +} + declare <16 x i16> @llvm.vp.mul.v16i16(<16 x i16>, <16 x i16>, <16 x i1>, i32) define <16 x i16> @vmul_vv_v16i16(<16 x i16> %va, <16 x i16> %b, <16 x i1> %m, i32 zeroext %evl) { diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vor-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vor-vp.ll index 128dcf4e8546..eeb2b286fb24 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vor-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vor-vp.ll @@ -156,6 +156,82 @@ define <4 x i8> @vor_vi_v4i8_unmasked(<4 x i8> %va, i32 zeroext %evl) { ret <4 x i8> %v } +declare <7 x i8> @llvm.vp.or.v5i8(<7 x i8>, <7 x i8>, <7 x i1>, i32) + +define <7 x i8> @vor_vv_v5i8(<7 x i8> %va, <7 x i8> %b, <7 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vor_vv_v5i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vor.vv v8, v8, v9, v0.t +; CHECK-NEXT: ret + %v = call <7 x i8> @llvm.vp.or.v5i8(<7 x i8> %va, <7 x i8> %b, <7 x i1> %m, i32 %evl) + ret <7 x i8> %v +} + +define <7 x i8> @vor_vv_v5i8_unmasked(<7 x i8> %va, <7 x i8> %b, i32 zeroext %evl) { +; CHECK-LABEL: vor_vv_v5i8_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vor.vv v8, v8, v9 +; CHECK-NEXT: ret + %head = insertelement <7 x i1> undef, i1 true, i32 0 + %m = shufflevector <7 x i1> %head, <7 x i1> undef, <7 x i32> zeroinitializer + %v = call <7 x i8> @llvm.vp.or.v5i8(<7 x i8> %va, <7 x i8> %b, <7 x i1> %m, i32 %evl) + ret <7 x i8> %v +} + +define <7 x i8> @vor_vx_v5i8(<7 x i8> %va, i8 %b, <7 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vor_vx_v5i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vor.vx v8, v8, a0, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement <7 x i8> undef, i8 %b, i32 0 + %vb = shufflevector <7 x i8> %elt.head, <7 x i8> undef, <7 x i32> zeroinitializer + %v = call <7 x i8> @llvm.vp.or.v5i8(<7 x i8> %va, <7 x i8> %vb, <7 x i1> %m, i32 %evl) + ret <7 x i8> %v +} + +define <7 x i8> @vor_vx_v5i8_unmasked(<7 x i8> %va, i8 %b, i32 zeroext %evl) { +; CHECK-LABEL: vor_vx_v5i8_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vor.vx v8, v8, a0 +; CHECK-NEXT: ret + %elt.head = insertelement <7 x i8> undef, i8 %b, i32 0 + %vb = shufflevector <7 x i8> %elt.head, <7 x i8> undef, <7 x i32> zeroinitializer + %head = insertelement <7 x i1> undef, i1 true, i32 0 + %m = shufflevector <7 x i1> %head, <7 x i1> undef, <7 x i32> zeroinitializer + %v = call <7 x i8> @llvm.vp.or.v5i8(<7 x i8> %va, <7 x i8> %vb, <7 x i1> %m, i32 %evl) + ret <7 x i8> %v +} + +define <7 x i8> @vor_vi_v5i8(<7 x i8> %va, <7 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vor_vi_v5i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vor.vi v8, v8, 5, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement <7 x i8> undef, i8 5, i32 0 + %vb = shufflevector <7 x i8> %elt.head, <7 x i8> undef, <7 x i32> zeroinitializer + %v = call <7 x i8> @llvm.vp.or.v5i8(<7 x i8> %va, <7 x i8> %vb, <7 x i1> %m, i32 %evl) + ret <7 x i8> %v +} + +define <7 x i8> @vor_vi_v5i8_unmasked(<7 x i8> %va, i32 zeroext %evl) { +; CHECK-LABEL: vor_vi_v5i8_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vor.vi v8, v8, 5 +; CHECK-NEXT: ret + %elt.head = insertelement <7 x i8> undef, i8 5, i32 0 + %vb = shufflevector <7 x i8> %elt.head, <7 x i8> undef, <7 x i32> zeroinitializer + %head = insertelement <7 x i1> undef, i1 true, i32 0 + %m = shufflevector <7 x i1> %head, <7 x i1> undef, <7 x i32> zeroinitializer + %v = call <7 x i8> @llvm.vp.or.v5i8(<7 x i8> %va, <7 x i8> %vb, <7 x i1> %m, i32 %evl) + ret <7 x i8> %v +} + declare <8 x i8> @llvm.vp.or.v8i8(<8 x i8>, <8 x i8>, <8 x i1>, i32) define <8 x i8> @vor_vv_v8i8(<8 x i8> %va, <8 x i8> %b, <8 x i1> %m, i32 zeroext %evl) { diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vrem-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vrem-vp.ll index fe6cd06fded3..3a637eb80eb6 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vrem-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vrem-vp.ll @@ -104,6 +104,18 @@ define <4 x i8> @vrem_vx_v4i8_unmasked(<4 x i8> %va, i8 %b, i32 zeroext %evl) { ret <4 x i8> %v } +declare <6 x i8> @llvm.vp.srem.v6i8(<6 x i8>, <6 x i8>, <6 x i1>, i32) + +define <6 x i8> @vrem_vv_v6i8(<6 x i8> %va, <6 x i8> %b, <6 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vrem_vv_v6i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vrem.vv v8, v8, v9, v0.t +; CHECK-NEXT: ret + %v = call <6 x i8> @llvm.vp.srem.v6i8(<6 x i8> %va, <6 x i8> %b, <6 x i1> %m, i32 %evl) + ret <6 x i8> %v +} + declare <8 x i8> @llvm.vp.srem.v8i8(<8 x i8>, <8 x i8>, <8 x i1>, i32) define <8 x i8> @vrem_vv_v8i8(<8 x i8> %va, <8 x i8> %b, <8 x i1> %m, i32 zeroext %evl) { diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vremu-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vremu-vp.ll index 3d0796011433..ead9220a7650 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vremu-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vremu-vp.ll @@ -104,6 +104,18 @@ define <4 x i8> @vremu_vx_v4i8_unmasked(<4 x i8> %va, i8 %b, i32 zeroext %evl) { ret <4 x i8> %v } +declare <6 x i8> @llvm.vp.urem.v6i8(<6 x i8>, <6 x i8>, <6 x i1>, i32) + +define <6 x i8> @vremu_vv_v6i8(<6 x i8> %va, <6 x i8> %b, <6 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vremu_vv_v6i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vremu.vv v8, v8, v9, v0.t +; CHECK-NEXT: ret + %v = call <6 x i8> @llvm.vp.urem.v6i8(<6 x i8> %va, <6 x i8> %b, <6 x i1> %m, i32 %evl) + ret <6 x i8> %v +} + declare <8 x i8> @llvm.vp.urem.v8i8(<8 x i8>, <8 x i8>, <8 x i1>, i32) define <8 x i8> @vremu_vv_v8i8(<8 x i8> %va, <8 x i8> %b, <8 x i1> %m, i32 zeroext %evl) { diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vshl-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vshl-vp.ll index 8cfdb57b8cab..98a9c9938e98 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vshl-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vshl-vp.ll @@ -80,6 +80,18 @@ define <2 x i8> @vsll_vi_v2i8_unmasked(<2 x i8> %va, i32 zeroext %evl) { ret <2 x i8> %v } +declare <3 x i8> @llvm.vp.shl.v3i8(<3 x i8>, <3 x i8>, <3 x i1>, i32) + +define <3 x i8> @vsll_vv_v3i8(<3 x i8> %va, <3 x i8> %b, <3 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vsll_vv_v3i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsll.vv v8, v8, v9, v0.t +; CHECK-NEXT: ret + %v = call <3 x i8> @llvm.vp.shl.v3i8(<3 x i8> %va, <3 x i8> %b, <3 x i1> %m, i32 %evl) + ret <3 x i8> %v +} + declare <4 x i8> @llvm.vp.shl.v4i8(<4 x i8>, <4 x i8>, <4 x i1>, i32) define <4 x i8> @vsll_vv_v4i8(<4 x i8> %va, <4 x i8> %b, <4 x i1> %m, i32 zeroext %evl) { diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsra-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsra-vp.ll index 370edf420f57..47f7ab7df398 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsra-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsra-vp.ll @@ -156,6 +156,18 @@ define <4 x i8> @vsra_vi_v4i8_unmasked(<4 x i8> %va, i32 zeroext %evl) { ret <4 x i8> %v } +declare <7 x i8> @llvm.vp.ashr.v7i8(<7 x i8>, <7 x i8>, <7 x i1>, i32) + +define <7 x i8> @vsra_vv_v7i8(<7 x i8> %va, <7 x i8> %b, <7 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vsra_vv_v7i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsra.vv v8, v8, v9, v0.t +; CHECK-NEXT: ret + %v = call <7 x i8> @llvm.vp.ashr.v7i8(<7 x i8> %va, <7 x i8> %b, <7 x i1> %m, i32 %evl) + ret <7 x i8> %v +} + declare <8 x i8> @llvm.vp.ashr.v8i8(<8 x i8>, <8 x i8>, <8 x i1>, i32) define <8 x i8> @vsra_vv_v8i8(<8 x i8> %va, <8 x i8> %b, <8 x i1> %m, i32 zeroext %evl) { diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsrl-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsrl-vp.ll index 1c45115110fe..0638889c5a17 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsrl-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsrl-vp.ll @@ -156,6 +156,18 @@ define <4 x i8> @vsrl_vi_v4i8_unmasked(<4 x i8> %va, i32 zeroext %evl) { ret <4 x i8> %v } +declare <7 x i8> @llvm.vp.lshr.v7i8(<7 x i8>, <7 x i8>, <7 x i1>, i32) + +define <7 x i8> @vsrl_vv_v7i8(<7 x i8> %va, <7 x i8> %b, <7 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vsrl_vv_v7i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vsrl.vv v8, v8, v9, v0.t +; CHECK-NEXT: ret + %v = call <7 x i8> @llvm.vp.lshr.v7i8(<7 x i8> %va, <7 x i8> %b, <7 x i1> %m, i32 %evl) + ret <7 x i8> %v +} + declare <8 x i8> @llvm.vp.lshr.v8i8(<8 x i8>, <8 x i8>, <8 x i1>, i32) define <8 x i8> @vsrl_vv_v8i8(<8 x i8> %va, <8 x i8> %b, <8 x i1> %m, i32 zeroext %evl) { diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsub-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsub-vp.ll index 077b1f03290c..88955eb4540e 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsub-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsub-vp.ll @@ -54,6 +54,56 @@ define <2 x i8> @vsub_vx_v2i8_unmasked(<2 x i8> %va, i8 %b, i32 zeroext %evl) { ret <2 x i8> %v } +declare <3 x i8> @llvm.vp.sub.v3i8(<3 x i8>, <3 x i8>, <3 x i1>, i32) + +define <3 x i8> @vsub_vv_v3i8(<3 x i8> %va, <3 x i8> %b, <3 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vsub_vv_v3i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsub.vv v8, v8, v9, v0.t +; CHECK-NEXT: ret + %v = call <3 x i8> @llvm.vp.sub.v3i8(<3 x i8> %va, <3 x i8> %b, <3 x i1> %m, i32 %evl) + ret <3 x i8> %v +} + +define <3 x i8> @vsub_vv_v3i8_unmasked(<3 x i8> %va, <3 x i8> %b, i32 zeroext %evl) { +; CHECK-LABEL: vsub_vv_v3i8_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vsub.vv v8, v8, v9 +; CHECK-NEXT: ret + %head = insertelement <3 x i1> undef, i1 true, i32 0 + %m = shufflevector <3 x i1> %head, <3 x i1> undef, <3 x i32> zeroinitializer + %v = call <3 x i8> @llvm.vp.sub.v3i8(<3 x i8> %va, <3 x i8> %b, <3 x i1> %m, i32 %evl) + ret <3 x i8> %v +} + +define <3 x i8> @vsub_vx_v3i8(<3 x i8> %va, i8 %b, <3 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vsub_vx_v3i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsub.vx v8, v8, a0, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement <3 x i8> undef, i8 %b, i32 0 + %vb = shufflevector <3 x i8> %elt.head, <3 x i8> undef, <3 x i32> zeroinitializer + %v = call <3 x i8> @llvm.vp.sub.v3i8(<3 x i8> %va, <3 x i8> %vb, <3 x i1> %m, i32 %evl) + ret <3 x i8> %v +} + +define <3 x i8> @vsub_vx_v3i8_unmasked(<3 x i8> %va, i8 %b, i32 zeroext %evl) { +; CHECK-LABEL: vsub_vx_v3i8_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu +; CHECK-NEXT: vsub.vx v8, v8, a0 +; CHECK-NEXT: ret + %elt.head = insertelement <3 x i8> undef, i8 %b, i32 0 + %vb = shufflevector <3 x i8> %elt.head, <3 x i8> undef, <3 x i32> zeroinitializer + %head = insertelement <3 x i1> undef, i1 true, i32 0 + %m = shufflevector <3 x i1> %head, <3 x i1> undef, <3 x i32> zeroinitializer + %v = call <3 x i8> @llvm.vp.sub.v3i8(<3 x i8> %va, <3 x i8> %vb, <3 x i1> %m, i32 %evl) + ret <3 x i8> %v +} + declare <4 x i8> @llvm.vp.sub.v4i8(<4 x i8>, <4 x i8>, <4 x i1>, i32) define <4 x i8> @vsub_vv_v4i8(<4 x i8> %va, <4 x i8> %b, <4 x i1> %m, i32 zeroext %evl) { diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vxor-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vxor-vp.ll index c9b4f966fba9..b5b016364ec1 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vxor-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vxor-vp.ll @@ -310,6 +310,108 @@ define <8 x i8> @vxor_vi_v8i8_unmasked_1(<8 x i8> %va, i32 zeroext %evl) { ret <8 x i8> %v } +declare <9 x i8> @llvm.vp.xor.v9i8(<9 x i8>, <9 x i8>, <9 x i1>, i32) + +define <9 x i8> @vxor_vv_v9i8(<9 x i8> %va, <9 x i8> %b, <9 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vxor_vv_v9i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vxor.vv v8, v8, v9, v0.t +; CHECK-NEXT: ret + %v = call <9 x i8> @llvm.vp.xor.v9i8(<9 x i8> %va, <9 x i8> %b, <9 x i1> %m, i32 %evl) + ret <9 x i8> %v +} + +define <9 x i8> @vxor_vv_v9i8_unmasked(<9 x i8> %va, <9 x i8> %b, i32 zeroext %evl) { +; CHECK-LABEL: vxor_vv_v9i8_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vxor.vv v8, v8, v9 +; CHECK-NEXT: ret + %head = insertelement <9 x i1> undef, i1 true, i32 0 + %m = shufflevector <9 x i1> %head, <9 x i1> undef, <9 x i32> zeroinitializer + %v = call <9 x i8> @llvm.vp.xor.v9i8(<9 x i8> %va, <9 x i8> %b, <9 x i1> %m, i32 %evl) + ret <9 x i8> %v +} + +define <9 x i8> @vxor_vx_v9i8(<9 x i8> %va, i8 %b, <9 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vxor_vx_v9i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vxor.vx v8, v8, a0, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement <9 x i8> undef, i8 %b, i32 0 + %vb = shufflevector <9 x i8> %elt.head, <9 x i8> undef, <9 x i32> zeroinitializer + %v = call <9 x i8> @llvm.vp.xor.v9i8(<9 x i8> %va, <9 x i8> %vb, <9 x i1> %m, i32 %evl) + ret <9 x i8> %v +} + +define <9 x i8> @vxor_vx_v9i8_unmasked(<9 x i8> %va, i8 %b, i32 zeroext %evl) { +; CHECK-LABEL: vxor_vx_v9i8_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vxor.vx v8, v8, a0 +; CHECK-NEXT: ret + %elt.head = insertelement <9 x i8> undef, i8 %b, i32 0 + %vb = shufflevector <9 x i8> %elt.head, <9 x i8> undef, <9 x i32> zeroinitializer + %head = insertelement <9 x i1> undef, i1 true, i32 0 + %m = shufflevector <9 x i1> %head, <9 x i1> undef, <9 x i32> zeroinitializer + %v = call <9 x i8> @llvm.vp.xor.v9i8(<9 x i8> %va, <9 x i8> %vb, <9 x i1> %m, i32 %evl) + ret <9 x i8> %v +} + +define <9 x i8> @vxor_vi_v9i8(<9 x i8> %va, <9 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vxor_vi_v9i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vxor.vi v8, v8, 7, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement <9 x i8> undef, i8 7, i32 0 + %vb = shufflevector <9 x i8> %elt.head, <9 x i8> undef, <9 x i32> zeroinitializer + %v = call <9 x i8> @llvm.vp.xor.v9i8(<9 x i8> %va, <9 x i8> %vb, <9 x i1> %m, i32 %evl) + ret <9 x i8> %v +} + +define <9 x i8> @vxor_vi_v9i8_unmasked(<9 x i8> %va, i32 zeroext %evl) { +; CHECK-LABEL: vxor_vi_v9i8_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vxor.vi v8, v8, 7 +; CHECK-NEXT: ret + %elt.head = insertelement <9 x i8> undef, i8 7, i32 0 + %vb = shufflevector <9 x i8> %elt.head, <9 x i8> undef, <9 x i32> zeroinitializer + %head = insertelement <9 x i1> undef, i1 true, i32 0 + %m = shufflevector <9 x i1> %head, <9 x i1> undef, <9 x i32> zeroinitializer + %v = call <9 x i8> @llvm.vp.xor.v9i8(<9 x i8> %va, <9 x i8> %vb, <9 x i1> %m, i32 %evl) + ret <9 x i8> %v +} + +define <9 x i8> @vxor_vi_v9i8_1(<9 x i8> %va, <9 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vxor_vi_v9i8_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vnot.v v8, v8, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement <9 x i8> undef, i8 -1, i32 0 + %vb = shufflevector <9 x i8> %elt.head, <9 x i8> undef, <9 x i32> zeroinitializer + %v = call <9 x i8> @llvm.vp.xor.v9i8(<9 x i8> %va, <9 x i8> %vb, <9 x i1> %m, i32 %evl) + ret <9 x i8> %v +} + +define <9 x i8> @vxor_vi_v9i8_unmasked_1(<9 x i8> %va, i32 zeroext %evl) { +; CHECK-LABEL: vxor_vi_v9i8_unmasked_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vxor.vi v8, v8, -1 +; CHECK-NEXT: ret + %elt.head = insertelement <9 x i8> undef, i8 -1, i32 0 + %vb = shufflevector <9 x i8> %elt.head, <9 x i8> undef, <9 x i32> zeroinitializer + %head = insertelement <9 x i1> undef, i1 true, i32 0 + %m = shufflevector <9 x i1> %head, <9 x i1> undef, <9 x i32> zeroinitializer + %v = call <9 x i8> @llvm.vp.xor.v9i8(<9 x i8> %va, <9 x i8> %vb, <9 x i1> %m, i32 %evl) + ret <9 x i8> %v +} + declare <16 x i8> @llvm.vp.xor.v16i8(<16 x i8>, <16 x i8>, <16 x i1>, i32) define <16 x i8> @vxor_vv_v16i8(<16 x i8> %va, <16 x i8> %b, <16 x i1> %m, i32 zeroext %evl) { diff --git a/llvm/test/CodeGen/RISCV/rvv/vadd-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vadd-vp.ll index 10df4fc4884a..57e049e072d9 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vadd-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vadd-vp.ll @@ -156,6 +156,82 @@ define @vadd_vi_nxv2i8_unmasked( %va, i32 zer ret %v } +declare @llvm.vp.add.nxv3i8(, , , i32) + +define @vadd_vv_nxv3i8( %va, %b, %m, i32 zeroext %evl) { +; CHECK-LABEL: vadd_vv_nxv3i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vadd.vv v8, v8, v9, v0.t +; CHECK-NEXT: ret + %v = call @llvm.vp.add.nxv3i8( %va, %b, %m, i32 %evl) + ret %v +} + +define @vadd_vv_nxv3i8_unmasked( %va, %b, i32 zeroext %evl) { +; CHECK-LABEL: vadd_vv_nxv3i8_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vadd.vv v8, v8, v9 +; CHECK-NEXT: ret + %head = insertelement undef, i1 true, i32 0 + %m = shufflevector %head, undef, zeroinitializer + %v = call @llvm.vp.add.nxv3i8( %va, %b, %m, i32 %evl) + ret %v +} + +define @vadd_vx_nxv3i8( %va, i8 %b, %m, i32 zeroext %evl) { +; CHECK-LABEL: vadd_vx_nxv3i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vadd.vx v8, v8, a0, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement undef, i8 %b, i32 0 + %vb = shufflevector %elt.head, undef, zeroinitializer + %v = call @llvm.vp.add.nxv3i8( %va, %vb, %m, i32 %evl) + ret %v +} + +define @vadd_vx_nxv3i8_unmasked( %va, i8 %b, i32 zeroext %evl) { +; CHECK-LABEL: vadd_vx_nxv3i8_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu +; CHECK-NEXT: vadd.vx v8, v8, a0 +; CHECK-NEXT: ret + %elt.head = insertelement undef, i8 %b, i32 0 + %vb = shufflevector %elt.head, undef, zeroinitializer + %head = insertelement undef, i1 true, i32 0 + %m = shufflevector %head, undef, zeroinitializer + %v = call @llvm.vp.add.nxv3i8( %va, %vb, %m, i32 %evl) + ret %v +} + +define @vadd_vi_nxv3i8( %va, %m, i32 zeroext %evl) { +; CHECK-LABEL: vadd_vi_nxv3i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vadd.vi v8, v8, -1, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement undef, i8 -1, i32 0 + %vb = shufflevector %elt.head, undef, zeroinitializer + %v = call @llvm.vp.add.nxv3i8( %va, %vb, %m, i32 %evl) + ret %v +} + +define @vadd_vi_nxv3i8_unmasked( %va, i32 zeroext %evl) { +; CHECK-LABEL: vadd_vi_nxv3i8_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vadd.vi v8, v8, -1 +; CHECK-NEXT: ret + %elt.head = insertelement undef, i8 -1, i32 0 + %vb = shufflevector %elt.head, undef, zeroinitializer + %head = insertelement undef, i1 true, i32 0 + %m = shufflevector %head, undef, zeroinitializer + %v = call @llvm.vp.add.nxv3i8( %va, %vb, %m, i32 %evl) + ret %v +} + declare @llvm.vp.add.nxv4i8(, , , i32) define @vadd_vv_nxv4i8( %va, %b, %m, i32 zeroext %evl) { diff --git a/llvm/test/CodeGen/RISCV/rvv/vand-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vand-vp.ll index c37e1e9ceb93..b62ccc8f410a 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vand-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vand-vp.ll @@ -840,6 +840,82 @@ define @vand_vi_nxv8i16_unmasked( %va, i32 ret %v } +declare @llvm.vp.and.nxv14i16(, , , i32) + +define @vand_vv_nxv14i16( %va, %b, %m, i32 zeroext %evl) { +; CHECK-LABEL: vand_vv_nxv14i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vand.vv v8, v8, v12, v0.t +; CHECK-NEXT: ret + %v = call @llvm.vp.and.nxv14i16( %va, %b, %m, i32 %evl) + ret %v +} + +define @vand_vv_nxv14i16_unmasked( %va, %b, i32 zeroext %evl) { +; CHECK-LABEL: vand_vv_nxv14i16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vand.vv v8, v8, v12 +; CHECK-NEXT: ret + %head = insertelement undef, i1 true, i32 0 + %m = shufflevector %head, undef, zeroinitializer + %v = call @llvm.vp.and.nxv14i16( %va, %b, %m, i32 %evl) + ret %v +} + +define @vand_vx_nxv14i16( %va, i16 %b, %m, i32 zeroext %evl) { +; CHECK-LABEL: vand_vx_nxv14i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vand.vx v8, v8, a0, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement undef, i16 %b, i32 0 + %vb = shufflevector %elt.head, undef, zeroinitializer + %v = call @llvm.vp.and.nxv14i16( %va, %vb, %m, i32 %evl) + ret %v +} + +define @vand_vx_nxv14i16_unmasked( %va, i16 %b, i32 zeroext %evl) { +; CHECK-LABEL: vand_vx_nxv14i16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu +; CHECK-NEXT: vand.vx v8, v8, a0 +; CHECK-NEXT: ret + %elt.head = insertelement undef, i16 %b, i32 0 + %vb = shufflevector %elt.head, undef, zeroinitializer + %head = insertelement undef, i1 true, i32 0 + %m = shufflevector %head, undef, zeroinitializer + %v = call @llvm.vp.and.nxv14i16( %va, %vb, %m, i32 %evl) + ret %v +} + +define @vand_vi_nxv14i16( %va, %m, i32 zeroext %evl) { +; CHECK-LABEL: vand_vi_nxv14i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vand.vi v8, v8, 4, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement undef, i16 4, i32 0 + %vb = shufflevector %elt.head, undef, zeroinitializer + %v = call @llvm.vp.and.nxv14i16( %va, %vb, %m, i32 %evl) + ret %v +} + +define @vand_vi_nxv14i16_unmasked( %va, i32 zeroext %evl) { +; CHECK-LABEL: vand_vi_nxv14i16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu +; CHECK-NEXT: vand.vi v8, v8, 4 +; CHECK-NEXT: ret + %elt.head = insertelement undef, i16 4, i32 0 + %vb = shufflevector %elt.head, undef, zeroinitializer + %head = insertelement undef, i1 true, i32 0 + %m = shufflevector %head, undef, zeroinitializer + %v = call @llvm.vp.and.nxv14i16( %va, %vb, %m, i32 %evl) + ret %v +} + declare @llvm.vp.and.nxv16i16(, , , i32) define @vand_vv_nxv16i16( %va, %b, %m, i32 zeroext %evl) { diff --git a/llvm/test/CodeGen/RISCV/rvv/vdiv-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vdiv-vp.ll index a9b0e66a7d96..c797bde9ee59 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vdiv-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vdiv-vp.ll @@ -104,6 +104,18 @@ define @vdiv_vx_nxv2i8_unmasked( %va, i8 %b, ret %v } +declare @llvm.vp.sdiv.nxv3i8(, , , i32) + +define @vdiv_vv_nxv3i8( %va, %b, %m, i32 zeroext %evl) { +; CHECK-LABEL: vdiv_vv_nxv3i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vdiv.vv v8, v8, v9, v0.t +; CHECK-NEXT: ret + %v = call @llvm.vp.sdiv.nxv3i8( %va, %b, %m, i32 %evl) + ret %v +} + declare @llvm.vp.sdiv.nxv4i8(, , , i32) define @vdiv_vv_nxv4i8( %va, %b, %m, i32 zeroext %evl) { diff --git a/llvm/test/CodeGen/RISCV/rvv/vdivu-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vdivu-vp.ll index eedf9fa447e4..0b2e4b67753e 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vdivu-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vdivu-vp.ll @@ -104,6 +104,18 @@ define @vdivu_vx_nxv2i8_unmasked( %va, i8 %b, ret %v } +declare @llvm.vp.udiv.nxv3i8(, , , i32) + +define @vdivu_vv_nxv3i8( %va, %b, %m, i32 zeroext %evl) { +; CHECK-LABEL: vdivu_vv_nxv3i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vdivu.vv v8, v8, v9, v0.t +; CHECK-NEXT: ret + %v = call @llvm.vp.udiv.nxv3i8( %va, %b, %m, i32 %evl) + ret %v +} + declare @llvm.vp.udiv.nxv4i8(, , , i32) define @vdivu_vv_nxv4i8( %va, %b, %m, i32 zeroext %evl) { diff --git a/llvm/test/CodeGen/RISCV/rvv/vfadd-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfadd-vp.ll index 2f33166cb392..c3c40aed3670 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfadd-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfadd-vp.ll @@ -760,6 +760,18 @@ define @vfadd_vf_nxv4f64_unmasked( %v ret %v } +declare @llvm.vp.fadd.nxv7f64(, , , i32) + +define @vfadd_vv_nxv7f64( %va, %b, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfadd_vv_nxv7f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vfadd.vv v8, v8, v16, v0.t +; CHECK-NEXT: ret + %v = call @llvm.vp.fadd.nxv7f64( %va, %b, %m, i32 %evl) + ret %v +} + declare @llvm.vp.fadd.nxv8f64(, , , i32) define @vfadd_vv_nxv8f64( %va, %b, %m, i32 zeroext %evl) { diff --git a/llvm/test/CodeGen/RISCV/rvv/vfdiv-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfdiv-vp.ll index c5f219c5e206..4146f215730c 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfdiv-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfdiv-vp.ll @@ -760,6 +760,18 @@ define @vfdiv_vf_nxv4f64_unmasked( %v ret %v } +declare @llvm.vp.fdiv.nxv7f64(, , , i32) + +define @vfdiv_vv_nxv7f64( %va, %b, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfdiv_vv_nxv7f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vfdiv.vv v8, v8, v16, v0.t +; CHECK-NEXT: ret + %v = call @llvm.vp.fdiv.nxv7f64( %va, %b, %m, i32 %evl) + ret %v +} + declare @llvm.vp.fdiv.nxv8f64(, , , i32) define @vfdiv_vv_nxv8f64( %va, %b, %m, i32 zeroext %evl) { diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmul-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfmul-vp.ll index f33fd4a604fe..a474d5907d95 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfmul-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfmul-vp.ll @@ -760,6 +760,18 @@ define @vfmul_vf_nxv4f64_unmasked( %v ret %v } +declare @llvm.vp.fmul.nxv7f64(, , , i32) + +define @vfmul_vv_nxv7f64( %va, %b, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfmul_vv_nxv7f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vfmul.vv v8, v8, v16, v0.t +; CHECK-NEXT: ret + %v = call @llvm.vp.fmul.nxv7f64( %va, %b, %m, i32 %evl) + ret %v +} + declare @llvm.vp.fmul.nxv8f64(, , , i32) define @vfmul_vv_nxv8f64( %va, %b, %m, i32 zeroext %evl) { diff --git a/llvm/test/CodeGen/RISCV/rvv/vfsub-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfsub-vp.ll index cfcfdf8af768..131ae96ad708 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfsub-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfsub-vp.ll @@ -760,6 +760,18 @@ define @vfsub_vf_nxv4f64_unmasked( %v ret %v } +declare @llvm.vp.fsub.nxv7f64(, , , i32) + +define @vfsub_vv_nxv7f64( %va, %b, %m, i32 zeroext %evl) { +; CHECK-LABEL: vfsub_vv_nxv7f64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vfsub.vv v8, v8, v16, v0.t +; CHECK-NEXT: ret + %v = call @llvm.vp.fsub.nxv7f64( %va, %b, %m, i32 %evl) + ret %v +} + declare @llvm.vp.fsub.nxv8f64(, , , i32) define @vfsub_vv_nxv8f64( %va, %b, %m, i32 zeroext %evl) { diff --git a/llvm/test/CodeGen/RISCV/rvv/vmul-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vmul-vp.ll index 8d42c4c8f93b..d222e4c30ad7 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vmul-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmul-vp.ll @@ -804,6 +804,56 @@ define @vmul_vx_nxv4i32_unmasked( %va, i32 ret %v } +declare @llvm.vp.mul.nxv7i32(, , , i32) + +define @vmul_vv_nxv7i32( %va, %b, %m, i32 zeroext %evl) { +; CHECK-LABEL: vmul_vv_nxv7i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vmul.vv v8, v8, v12, v0.t +; CHECK-NEXT: ret + %v = call @llvm.vp.mul.nxv7i32( %va, %b, %m, i32 %evl) + ret %v +} + +define @vmul_vv_nxv7i32_unmasked( %va, %b, i32 zeroext %evl) { +; CHECK-LABEL: vmul_vv_nxv7i32_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu +; CHECK-NEXT: vmul.vv v8, v8, v12 +; CHECK-NEXT: ret + %head = insertelement undef, i1 true, i32 0 + %m = shufflevector %head, undef, zeroinitializer + %v = call @llvm.vp.mul.nxv7i32( %va, %b, %m, i32 %evl) + ret %v +} + +define @vmul_vx_nxv7i32( %va, i32 %b, %m, i32 zeroext %evl) { +; CHECK-LABEL: vmul_vx_nxv7i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vmul.vx v8, v8, a0, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement undef, i32 %b, i32 0 + %vb = shufflevector %elt.head, undef, zeroinitializer + %v = call @llvm.vp.mul.nxv7i32( %va, %vb, %m, i32 %evl) + ret %v +} + +define @vmul_vx_nxv7i32_unmasked( %va, i32 %b, i32 zeroext %evl) { +; CHECK-LABEL: vmul_vx_nxv7i32_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu +; CHECK-NEXT: vmul.vx v8, v8, a0 +; CHECK-NEXT: ret + %elt.head = insertelement undef, i32 %b, i32 0 + %vb = shufflevector %elt.head, undef, zeroinitializer + %head = insertelement undef, i1 true, i32 0 + %m = shufflevector %head, undef, zeroinitializer + %v = call @llvm.vp.mul.nxv7i32( %va, %vb, %m, i32 %evl) + ret %v +} + declare @llvm.vp.mul.nxv8i32(, , , i32) define @vmul_vv_nxv8i32( %va, %b, %m, i32 zeroext %evl) { diff --git a/llvm/test/CodeGen/RISCV/rvv/vor-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vor-vp.ll index ecccda50c5d1..586ee00afcaf 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vor-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vor-vp.ll @@ -1296,6 +1296,82 @@ define @vor_vi_nxv8i32_unmasked( %va, i32 z ret %v } +declare @llvm.vp.or.nxv10i32(, , , i32) + +define @vor_vv_nxv10i32( %va, %b, %m, i32 zeroext %evl) { +; CHECK-LABEL: vor_vv_nxv10i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vor.vv v8, v8, v16, v0.t +; CHECK-NEXT: ret + %v = call @llvm.vp.or.nxv10i32( %va, %b, %m, i32 %evl) + ret %v +} + +define @vor_vv_nxv10i32_unmasked( %va, %b, i32 zeroext %evl) { +; CHECK-LABEL: vor_vv_nxv10i32_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vor.vv v8, v8, v16 +; CHECK-NEXT: ret + %head = insertelement undef, i1 true, i32 0 + %m = shufflevector %head, undef, zeroinitializer + %v = call @llvm.vp.or.nxv10i32( %va, %b, %m, i32 %evl) + ret %v +} + +define @vor_vx_nxv10i32( %va, i32 %b, %m, i32 zeroext %evl) { +; CHECK-LABEL: vor_vx_nxv10i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT: vor.vx v8, v8, a0, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement undef, i32 %b, i32 0 + %vb = shufflevector %elt.head, undef, zeroinitializer + %v = call @llvm.vp.or.nxv10i32( %va, %vb, %m, i32 %evl) + ret %v +} + +define @vor_vx_nxv10i32_unmasked( %va, i32 %b, i32 zeroext %evl) { +; CHECK-LABEL: vor_vx_nxv10i32_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu +; CHECK-NEXT: vor.vx v8, v8, a0 +; CHECK-NEXT: ret + %elt.head = insertelement undef, i32 %b, i32 0 + %vb = shufflevector %elt.head, undef, zeroinitializer + %head = insertelement undef, i1 true, i32 0 + %m = shufflevector %head, undef, zeroinitializer + %v = call @llvm.vp.or.nxv10i32( %va, %vb, %m, i32 %evl) + ret %v +} + +define @vor_vi_nxv10i32( %va, %m, i32 zeroext %evl) { +; CHECK-LABEL: vor_vi_nxv10i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vor.vi v8, v8, 5, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement undef, i32 5, i32 0 + %vb = shufflevector %elt.head, undef, zeroinitializer + %v = call @llvm.vp.or.nxv10i32( %va, %vb, %m, i32 %evl) + ret %v +} + +define @vor_vi_nxv10i32_unmasked( %va, i32 zeroext %evl) { +; CHECK-LABEL: vor_vi_nxv10i32_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu +; CHECK-NEXT: vor.vi v8, v8, 5 +; CHECK-NEXT: ret + %elt.head = insertelement undef, i32 5, i32 0 + %vb = shufflevector %elt.head, undef, zeroinitializer + %head = insertelement undef, i1 true, i32 0 + %m = shufflevector %head, undef, zeroinitializer + %v = call @llvm.vp.or.nxv10i32( %va, %vb, %m, i32 %evl) + ret %v +} + declare @llvm.vp.or.nxv16i32(, , , i32) define @vor_vv_nxv16i32( %va, %b, %m, i32 zeroext %evl) { diff --git a/llvm/test/CodeGen/RISCV/rvv/vrem-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vrem-vp.ll index 0bb7504e9436..89f2b7d2b686 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vrem-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vrem-vp.ll @@ -104,6 +104,18 @@ define @vrem_vx_nxv2i8_unmasked( %va, i8 %b, ret %v } +declare @llvm.vp.srem.nxv3i8(, , , i32) + +define @vrem_vv_nxv3i8( %va, %b, %m, i32 zeroext %evl) { +; CHECK-LABEL: vrem_vv_nxv3i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vrem.vv v8, v8, v9, v0.t +; CHECK-NEXT: ret + %v = call @llvm.vp.srem.nxv3i8( %va, %b, %m, i32 %evl) + ret %v +} + declare @llvm.vp.srem.nxv4i8(, , , i32) define @vrem_vv_nxv4i8( %va, %b, %m, i32 zeroext %evl) { diff --git a/llvm/test/CodeGen/RISCV/rvv/vremu-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vremu-vp.ll index f6e0cb71c241..e7a3b33f3ba1 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vremu-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vremu-vp.ll @@ -104,6 +104,18 @@ define @vremu_vx_nxv2i8_unmasked( %va, i8 %b, ret %v } +declare @llvm.vp.urem.nxv3i8(, , , i32) + +define @vremu_vv_nxv3i8( %va, %b, %m, i32 zeroext %evl) { +; CHECK-LABEL: vremu_vv_nxv3i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vremu.vv v8, v8, v9, v0.t +; CHECK-NEXT: ret + %v = call @llvm.vp.urem.nxv3i8( %va, %b, %m, i32 %evl) + ret %v +} + declare @llvm.vp.urem.nxv4i8(, , , i32) define @vremu_vv_nxv4i8( %va, %b, %m, i32 zeroext %evl) { diff --git a/llvm/test/CodeGen/RISCV/rvv/vshl-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vshl-vp.ll index 1aca9277d9a0..061a2f44ead8 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vshl-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vshl-vp.ll @@ -232,6 +232,18 @@ define @vsll_vi_nxv4i8_unmasked( %va, i32 zer ret %v } +declare @llvm.vp.shl.nxv5i8(, , , i32) + +define @vsll_vv_nxv5i8( %va, %b, %m, i32 zeroext %evl) { +; CHECK-LABEL: vsll_vv_nxv5i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsll.vv v8, v8, v9, v0.t +; CHECK-NEXT: ret + %v = call @llvm.vp.shl.nxv5i8( %va, %b, %m, i32 %evl) + ret %v +} + declare @llvm.vp.shl.nxv8i8(, , , i32) define @vsll_vv_nxv8i8( %va, %b, %m, i32 zeroext %evl) { diff --git a/llvm/test/CodeGen/RISCV/rvv/vsra-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vsra-vp.ll index 3ddd25937273..2ff5d07860f2 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vsra-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsra-vp.ll @@ -1636,6 +1636,18 @@ define @vsra_vi_nxv4i64_unmasked( %va, i32 ret %v } +declare @llvm.vp.ashr.nxv6i64(, , , i32) + +define @vsra_vv_nxv6i64( %va, %b, %m, i32 zeroext %evl) { +; CHECK-LABEL: vsra_vv_nxv6i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsra.vv v8, v8, v16, v0.t +; CHECK-NEXT: ret + %v = call @llvm.vp.ashr.nxv6i64( %va, %b, %m, i32 %evl) + ret %v +} + declare @llvm.vp.ashr.nxv8i64(, , , i32) define @vsra_vv_nxv8i64( %va, %b, %m, i32 zeroext %evl) { diff --git a/llvm/test/CodeGen/RISCV/rvv/vsrl-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vsrl-vp.ll index f42b5798e132..b58873b452b6 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vsrl-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsrl-vp.ll @@ -1636,6 +1636,18 @@ define @vsrl_vi_nxv4i64_unmasked( %va, i32 ret %v } +declare @llvm.vp.lshr.nxv5i64(, , , i32) + +define @vsrl_vv_nxv5i64( %va, %b, %m, i32 zeroext %evl) { +; CHECK-LABEL: vsrl_vv_nxv5i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu +; CHECK-NEXT: vsrl.vv v8, v8, v16, v0.t +; CHECK-NEXT: ret + %v = call @llvm.vp.lshr.nxv5i64( %va, %b, %m, i32 %evl) + ret %v +} + declare @llvm.vp.lshr.nxv8i64(, , , i32) define @vsrl_vv_nxv8i64( %va, %b, %m, i32 zeroext %evl) { diff --git a/llvm/test/CodeGen/RISCV/rvv/vsub-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vsub-vp.ll index a0b7b7de0227..94e697f97cd9 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vsub-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsub-vp.ll @@ -154,6 +154,56 @@ define @vsub_vx_nxv4i8_unmasked( %va, i8 %b, ret %v } +declare @llvm.vp.sub.nxv5i8(, , , i32) + +define @vsub_vv_nxv5i8( %va, %b, %m, i32 zeroext %evl) { +; CHECK-LABEL: vsub_vv_nxv5i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsub.vv v8, v8, v9, v0.t +; CHECK-NEXT: ret + %v = call @llvm.vp.sub.nxv5i8( %va, %b, %m, i32 %evl) + ret %v +} + +define @vsub_vv_nxv5i8_unmasked( %va, %b, i32 zeroext %evl) { +; CHECK-LABEL: vsub_vv_nxv5i8_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vsub.vv v8, v8, v9 +; CHECK-NEXT: ret + %head = insertelement undef, i1 true, i32 0 + %m = shufflevector %head, undef, zeroinitializer + %v = call @llvm.vp.sub.nxv5i8( %va, %b, %m, i32 %evl) + ret %v +} + +define @vsub_vx_nxv5i8( %va, i8 %b, %m, i32 zeroext %evl) { +; CHECK-LABEL: vsub_vx_nxv5i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsub.vx v8, v8, a0, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement undef, i8 %b, i32 0 + %vb = shufflevector %elt.head, undef, zeroinitializer + %v = call @llvm.vp.sub.nxv5i8( %va, %vb, %m, i32 %evl) + ret %v +} + +define @vsub_vx_nxv5i8_unmasked( %va, i8 %b, i32 zeroext %evl) { +; CHECK-LABEL: vsub_vx_nxv5i8_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu +; CHECK-NEXT: vsub.vx v8, v8, a0 +; CHECK-NEXT: ret + %elt.head = insertelement undef, i8 %b, i32 0 + %vb = shufflevector %elt.head, undef, zeroinitializer + %head = insertelement undef, i1 true, i32 0 + %m = shufflevector %head, undef, zeroinitializer + %v = call @llvm.vp.sub.nxv5i8( %va, %vb, %m, i32 %evl) + ret %v +} + declare @llvm.vp.sub.nxv8i8(, , , i32) define @vsub_vv_nxv8i8( %va, %b, %m, i32 zeroext %evl) { diff --git a/llvm/test/CodeGen/RISCV/rvv/vxor-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vxor-vp.ll index a9756058b8c0..cfd5bd116c6c 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vxor-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vxor-vp.ll @@ -412,6 +412,108 @@ define @vxor_vi_nxv8i8_unmasked_1( %va, i32 z ret %v } +declare @llvm.vp.xor.nxv15i8(, , , i32) + +define @vxor_vv_nxv15i8( %va, %b, %m, i32 zeroext %evl) { +; CHECK-LABEL: vxor_vv_nxv15i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vxor.vv v8, v8, v10, v0.t +; CHECK-NEXT: ret + %v = call @llvm.vp.xor.nxv15i8( %va, %b, %m, i32 %evl) + ret %v +} + +define @vxor_vv_nxv15i8_unmasked( %va, %b, i32 zeroext %evl) { +; CHECK-LABEL: vxor_vv_nxv15i8_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vxor.vv v8, v8, v10 +; CHECK-NEXT: ret + %head = insertelement undef, i1 true, i32 0 + %m = shufflevector %head, undef, zeroinitializer + %v = call @llvm.vp.xor.nxv15i8( %va, %b, %m, i32 %evl) + ret %v +} + +define @vxor_vx_nxv15i8( %va, i8 %b, %m, i32 zeroext %evl) { +; CHECK-LABEL: vxor_vx_nxv15i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vxor.vx v8, v8, a0, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement undef, i8 %b, i32 0 + %vb = shufflevector %elt.head, undef, zeroinitializer + %v = call @llvm.vp.xor.nxv15i8( %va, %vb, %m, i32 %evl) + ret %v +} + +define @vxor_vx_nxv15i8_unmasked( %va, i8 %b, i32 zeroext %evl) { +; CHECK-LABEL: vxor_vx_nxv15i8_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu +; CHECK-NEXT: vxor.vx v8, v8, a0 +; CHECK-NEXT: ret + %elt.head = insertelement undef, i8 %b, i32 0 + %vb = shufflevector %elt.head, undef, zeroinitializer + %head = insertelement undef, i1 true, i32 0 + %m = shufflevector %head, undef, zeroinitializer + %v = call @llvm.vp.xor.nxv15i8( %va, %vb, %m, i32 %evl) + ret %v +} + +define @vxor_vi_nxv15i8( %va, %m, i32 zeroext %evl) { +; CHECK-LABEL: vxor_vi_nxv15i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vxor.vi v8, v8, 7, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement undef, i8 7, i32 0 + %vb = shufflevector %elt.head, undef, zeroinitializer + %v = call @llvm.vp.xor.nxv15i8( %va, %vb, %m, i32 %evl) + ret %v +} + +define @vxor_vi_nxv15i8_unmasked( %va, i32 zeroext %evl) { +; CHECK-LABEL: vxor_vi_nxv15i8_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vxor.vi v8, v8, 7 +; CHECK-NEXT: ret + %elt.head = insertelement undef, i8 7, i32 0 + %vb = shufflevector %elt.head, undef, zeroinitializer + %head = insertelement undef, i1 true, i32 0 + %m = shufflevector %head, undef, zeroinitializer + %v = call @llvm.vp.xor.nxv15i8( %va, %vb, %m, i32 %evl) + ret %v +} + +define @vxor_vi_nxv15i8_1( %va, %m, i32 zeroext %evl) { +; CHECK-LABEL: vxor_vi_nxv15i8_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vnot.v v8, v8, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement undef, i8 -1, i32 0 + %vb = shufflevector %elt.head, undef, zeroinitializer + %v = call @llvm.vp.xor.nxv15i8( %va, %vb, %m, i32 %evl) + ret %v +} + +define @vxor_vi_nxv15i8_unmasked_1( %va, i32 zeroext %evl) { +; CHECK-LABEL: vxor_vi_nxv15i8_unmasked_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vxor.vi v8, v8, -1 +; CHECK-NEXT: ret + %elt.head = insertelement undef, i8 -1, i32 0 + %vb = shufflevector %elt.head, undef, zeroinitializer + %head = insertelement undef, i1 true, i32 0 + %m = shufflevector %head, undef, zeroinitializer + %v = call @llvm.vp.xor.nxv15i8( %va, %vb, %m, i32 %evl) + ret %v +} + declare @llvm.vp.xor.nxv16i8(, , , i32) define @vxor_vv_nxv16i8( %va, %b, %m, i32 zeroext %evl) {