forked from OSchip/llvm-project
[RISCV] Add support for matching vwmul(u) and vwmacc(u) from fixed vectors.
This adds a DAG combine to detect sext/zext inputs and emit a new ISD opcode. The extends will either be removed or replaced with narrower extends. Isel patterns are used to match add and widening mul to vwmacc similar to the recently added vmacc patterns. There's still some work to be to match vmulsu. We should also rewrite splats that were extended as scalars and then splatted. Reviewed By: arcbbb Differential Revision: https://reviews.llvm.org/D104802
This commit is contained in:
parent
846a530e7d
commit
2b5e53111a
|
@ -6140,6 +6140,47 @@ SDValue RISCVTargetLowering::PerformDAGCombine(SDNode *N,
|
|||
}
|
||||
break;
|
||||
}
|
||||
case RISCVISD::MUL_VL: {
|
||||
// Try to form VWMUL or VWMULU.
|
||||
// FIXME: Look for splat of extended scalar as well.
|
||||
// FIXME: Support VWMULSU.
|
||||
SDValue Op0 = N->getOperand(0);
|
||||
SDValue Op1 = N->getOperand(1);
|
||||
bool IsSignExt = Op0.getOpcode() == RISCVISD::VSEXT_VL;
|
||||
bool IsZeroExt = Op0.getOpcode() == RISCVISD::VZEXT_VL;
|
||||
if ((!IsSignExt && !IsZeroExt) || Op0.getOpcode() != Op1.getOpcode())
|
||||
return SDValue();
|
||||
|
||||
// Make sure the extends have a single use.
|
||||
if (!Op0.hasOneUse() || !Op1.hasOneUse())
|
||||
return SDValue();
|
||||
|
||||
SDValue Mask = N->getOperand(2);
|
||||
SDValue VL = N->getOperand(3);
|
||||
if (Op0.getOperand(1) != Mask || Op1.getOperand(1) != Mask ||
|
||||
Op0.getOperand(2) != VL || Op1.getOperand(2) != VL)
|
||||
return SDValue();
|
||||
|
||||
Op0 = Op0.getOperand(0);
|
||||
Op1 = Op1.getOperand(0);
|
||||
|
||||
MVT VT = N->getSimpleValueType(0);
|
||||
MVT NarrowVT =
|
||||
MVT::getVectorVT(MVT::getIntegerVT(VT.getScalarSizeInBits() / 2),
|
||||
VT.getVectorElementCount());
|
||||
|
||||
SDLoc DL(N);
|
||||
|
||||
// Re-introduce narrower extends if needed.
|
||||
unsigned ExtOpc = IsSignExt ? RISCVISD::VSEXT_VL : RISCVISD::VZEXT_VL;
|
||||
if (Op0.getValueType() != NarrowVT)
|
||||
Op0 = DAG.getNode(ExtOpc, DL, NarrowVT, Op0, Mask, VL);
|
||||
if (Op1.getValueType() != NarrowVT)
|
||||
Op1 = DAG.getNode(ExtOpc, DL, NarrowVT, Op1, Mask, VL);
|
||||
|
||||
unsigned WMulOpc = IsSignExt ? RISCVISD::VWMUL_VL : RISCVISD::VWMULU_VL;
|
||||
return DAG.getNode(WMulOpc, DL, VT, Op0, Op1, Mask, VL);
|
||||
}
|
||||
}
|
||||
|
||||
return SDValue();
|
||||
|
@ -8199,6 +8240,8 @@ const char *RISCVTargetLowering::getTargetNodeName(unsigned Opcode) const {
|
|||
NODE_NAME_CASE(UINT_TO_FP_VL)
|
||||
NODE_NAME_CASE(FP_EXTEND_VL)
|
||||
NODE_NAME_CASE(FP_ROUND_VL)
|
||||
NODE_NAME_CASE(VWMUL_VL)
|
||||
NODE_NAME_CASE(VWMULU_VL)
|
||||
NODE_NAME_CASE(SETCC_VL)
|
||||
NODE_NAME_CASE(VSELECT_VL)
|
||||
NODE_NAME_CASE(VMAND_VL)
|
||||
|
|
|
@ -216,6 +216,10 @@ enum NodeType : unsigned {
|
|||
FP_ROUND_VL,
|
||||
FP_EXTEND_VL,
|
||||
|
||||
// Widening instructions
|
||||
VWMUL_VL,
|
||||
VWMULU_VL,
|
||||
|
||||
// Vector compare producing a mask. Fourth operand is input mask. Fifth
|
||||
// operand is VL.
|
||||
SETCC_VL,
|
||||
|
@ -241,6 +245,7 @@ enum NodeType : unsigned {
|
|||
// Vector sign/zero extend with additional mask & VL operands.
|
||||
VSEXT_VL,
|
||||
VZEXT_VL,
|
||||
|
||||
// vpopc.m with additional mask and VL operands.
|
||||
VPOPC_VL,
|
||||
|
||||
|
|
|
@ -215,6 +215,15 @@ def riscv_trunc_vector_vl : SDNode<"RISCVISD::TRUNCATE_VECTOR_VL",
|
|||
SDTCVecEltisVT<2, i1>,
|
||||
SDTCisVT<3, XLenVT>]>>;
|
||||
|
||||
def SDT_RISCVVWMUL_VL : SDTypeProfile<1, 4, [SDTCisVec<0>,
|
||||
SDTCisSameNumEltsAs<0, 1>,
|
||||
SDTCisSameAs<1, 2>,
|
||||
SDTCisSameNumEltsAs<1, 3>,
|
||||
SDTCVecEltisVT<3, i1>,
|
||||
SDTCisVT<4, XLenVT>]>;
|
||||
def riscv_vwmul_vl : SDNode<"RISCVISD::VWMUL_VL", SDT_RISCVVWMUL_VL, [SDNPCommutative]>;
|
||||
def riscv_vwmulu_vl : SDNode<"RISCVISD::VWMULU_VL", SDT_RISCVVWMUL_VL, [SDNPCommutative]>;
|
||||
|
||||
def SDTRVVVecReduce : SDTypeProfile<1, 4, [
|
||||
SDTCisVec<0>, SDTCisVec<1>, SDTCisSameAs<0, 2>, SDTCVecEltisVT<3, i1>,
|
||||
SDTCisSameNumEltsAs<1, 3>, SDTCisVT<4, XLenVT>
|
||||
|
@ -226,6 +235,18 @@ def riscv_mul_vl_oneuse : PatFrag<(ops node:$A, node:$B, node:$C, node:$D),
|
|||
return N->hasOneUse();
|
||||
}]>;
|
||||
|
||||
def riscv_vwmul_vl_oneuse : PatFrag<(ops node:$A, node:$B, node:$C, node:$D),
|
||||
(riscv_vwmul_vl node:$A, node:$B, node:$C,
|
||||
node:$D), [{
|
||||
return N->hasOneUse();
|
||||
}]>;
|
||||
|
||||
def riscv_vwmulu_vl_oneuse : PatFrag<(ops node:$A, node:$B, node:$C, node:$D),
|
||||
(riscv_vwmulu_vl node:$A, node:$B, node:$C,
|
||||
node:$D), [{
|
||||
return N->hasOneUse();
|
||||
}]>;
|
||||
|
||||
foreach kind = ["ADD", "UMAX", "SMAX", "UMIN", "SMIN", "AND", "OR", "XOR",
|
||||
"FADD", "SEQ_FADD", "FMIN", "FMAX"] in
|
||||
def rvv_vecreduce_#kind#_vl : SDNode<"RISCVISD::VECREDUCE_"#kind#"_VL", SDTRVVVecReduce>;
|
||||
|
@ -326,6 +347,20 @@ multiclass VPatBinaryVL_VV_VX_VI<SDNode vop, string instruction_name,
|
|||
}
|
||||
}
|
||||
|
||||
multiclass VPatBinaryWVL_VV_VX<SDNode vop, string instruction_name> {
|
||||
foreach VtiToWti = AllWidenableIntVectors in {
|
||||
defvar vti = VtiToWti.Vti;
|
||||
defvar wti = VtiToWti.Wti;
|
||||
defm : VPatBinaryVL_VV<vop, instruction_name,
|
||||
wti.Vector, vti.Vector, vti.Mask, vti.Log2SEW,
|
||||
vti.LMul, wti.RegClass, vti.RegClass>;
|
||||
defm : VPatBinaryVL_XI<vop, instruction_name, "VX",
|
||||
wti.Vector, vti.Vector, vti.Mask, vti.Log2SEW,
|
||||
vti.LMul, wti.RegClass, vti.RegClass,
|
||||
SplatPat, GPR>;
|
||||
}
|
||||
}
|
||||
|
||||
class VPatBinaryVL_VF<SDNode vop,
|
||||
string instruction_name,
|
||||
ValueType result_type,
|
||||
|
@ -737,6 +772,10 @@ defm : VPatBinaryVL_VV_VX<riscv_sdiv_vl, "PseudoVDIV">;
|
|||
defm : VPatBinaryVL_VV_VX<riscv_urem_vl, "PseudoVREMU">;
|
||||
defm : VPatBinaryVL_VV_VX<riscv_srem_vl, "PseudoVREM">;
|
||||
|
||||
// 12.12. Vector Widening Integer Multiply Instructions
|
||||
defm : VPatBinaryWVL_VV_VX<riscv_vwmul_vl, "PseudoVWMUL">;
|
||||
defm : VPatBinaryWVL_VV_VX<riscv_vwmulu_vl, "PseudoVWMULU">;
|
||||
|
||||
// 12.13 Vector Single-Width Integer Multiply-Add Instructions
|
||||
foreach vti = AllIntegerVectors in {
|
||||
// NOTE: We choose VMADD because it has the most commuting freedom. So it
|
||||
|
@ -784,6 +823,49 @@ foreach vti = AllIntegerVectors in {
|
|||
GPR:$vl, vti.Log2SEW)>;
|
||||
}
|
||||
|
||||
// 12.14. Vector Widening Integer Multiply-Add Instructions
|
||||
foreach vtiTowti = AllWidenableIntVectors in {
|
||||
defvar vti = vtiTowti.Vti;
|
||||
defvar wti = vtiTowti.Wti;
|
||||
def : Pat<(wti.Vector
|
||||
(riscv_add_vl wti.RegClass:$rd,
|
||||
(riscv_vwmul_vl_oneuse vti.RegClass:$rs1,
|
||||
(vti.Vector vti.RegClass:$rs2),
|
||||
(vti.Mask true_mask), VLOpFrag),
|
||||
(vti.Mask true_mask), VLOpFrag)),
|
||||
(!cast<Instruction>("PseudoVWMACC_VV_"# vti.LMul.MX)
|
||||
wti.RegClass:$rd, vti.RegClass:$rs1, vti.RegClass:$rs2,
|
||||
GPR:$vl, vti.Log2SEW)>;
|
||||
def : Pat<(wti.Vector
|
||||
(riscv_add_vl wti.RegClass:$rd,
|
||||
(riscv_vwmulu_vl_oneuse vti.RegClass:$rs1,
|
||||
(vti.Vector vti.RegClass:$rs2),
|
||||
(vti.Mask true_mask), VLOpFrag),
|
||||
(vti.Mask true_mask), VLOpFrag)),
|
||||
(!cast<Instruction>("PseudoVWMACCU_VV_"# vti.LMul.MX)
|
||||
wti.RegClass:$rd, vti.RegClass:$rs1, vti.RegClass:$rs2,
|
||||
GPR:$vl, vti.Log2SEW)>;
|
||||
|
||||
def : Pat<(wti.Vector
|
||||
(riscv_add_vl wti.RegClass:$rd,
|
||||
(riscv_vwmul_vl_oneuse (SplatPat XLenVT:$rs1),
|
||||
(vti.Vector vti.RegClass:$rs2),
|
||||
(vti.Mask true_mask), VLOpFrag),
|
||||
(vti.Mask true_mask), VLOpFrag)),
|
||||
(!cast<Instruction>("PseudoVWMACC_VX_" # vti.LMul.MX)
|
||||
wti.RegClass:$rd, vti.ScalarRegClass:$rs1, vti.RegClass:$rs2,
|
||||
GPR:$vl, vti.Log2SEW)>;
|
||||
def : Pat<(wti.Vector
|
||||
(riscv_add_vl wti.RegClass:$rd,
|
||||
(riscv_vwmulu_vl_oneuse (SplatPat XLenVT:$rs1),
|
||||
(vti.Vector vti.RegClass:$rs2),
|
||||
(vti.Mask true_mask), VLOpFrag),
|
||||
(vti.Mask true_mask), VLOpFrag)),
|
||||
(!cast<Instruction>("PseudoVWMACCU_VX_" # vti.LMul.MX)
|
||||
wti.RegClass:$rd, vti.ScalarRegClass:$rs1, vti.RegClass:$rs2,
|
||||
GPR:$vl, vti.Log2SEW)>;
|
||||
}
|
||||
|
||||
// 12.15. Vector Integer Merge Instructions
|
||||
foreach vti = AllIntegerVectors in {
|
||||
def : Pat<(vti.Vector (riscv_vselect_vl (vti.Mask VMV0:$vm),
|
||||
|
|
|
@ -0,0 +1,549 @@
|
|||
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
|
||||
; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -riscv-v-vector-bits-min=128 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK
|
||||
; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -riscv-v-vector-bits-min=128 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK
|
||||
|
||||
define <2 x i16> @vwmacc_v2i16(<2 x i8>* %x, <2 x i8>* %y, <2 x i16> %z) {
|
||||
; CHECK-LABEL: vwmacc_v2i16:
|
||||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, mu
|
||||
; CHECK-NEXT: vle8.v v25, (a0)
|
||||
; CHECK-NEXT: vle8.v v26, (a1)
|
||||
; CHECK-NEXT: vsetvli zero, zero, e8, mf8, tu, mu
|
||||
; CHECK-NEXT: vwmacc.vv v8, v25, v26
|
||||
; CHECK-NEXT: ret
|
||||
%a = load <2 x i8>, <2 x i8>* %x
|
||||
%b = load <2 x i8>, <2 x i8>* %y
|
||||
%c = sext <2 x i8> %a to <2 x i16>
|
||||
%d = sext <2 x i8> %b to <2 x i16>
|
||||
%e = mul <2 x i16> %c, %d
|
||||
%f = add <2 x i16> %e, %z
|
||||
ret <2 x i16> %f
|
||||
}
|
||||
|
||||
define <4 x i16> @vwmacc_v4i16(<4 x i8>* %x, <4 x i8>* %y, <4 x i16> %z) {
|
||||
; CHECK-LABEL: vwmacc_v4i16:
|
||||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu
|
||||
; CHECK-NEXT: vle8.v v25, (a0)
|
||||
; CHECK-NEXT: vle8.v v26, (a1)
|
||||
; CHECK-NEXT: vsetvli zero, zero, e8, mf4, tu, mu
|
||||
; CHECK-NEXT: vwmacc.vv v8, v25, v26
|
||||
; CHECK-NEXT: ret
|
||||
%a = load <4 x i8>, <4 x i8>* %x
|
||||
%b = load <4 x i8>, <4 x i8>* %y
|
||||
%c = sext <4 x i8> %a to <4 x i16>
|
||||
%d = sext <4 x i8> %b to <4 x i16>
|
||||
%e = mul <4 x i16> %c, %d
|
||||
%f = add <4 x i16> %e, %z
|
||||
ret <4 x i16> %f
|
||||
}
|
||||
|
||||
define <2 x i32> @vwmacc_v2i32(<2 x i16>* %x, <2 x i16>* %y, <2 x i32> %z) {
|
||||
; CHECK-LABEL: vwmacc_v2i32:
|
||||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, mu
|
||||
; CHECK-NEXT: vle16.v v25, (a0)
|
||||
; CHECK-NEXT: vle16.v v26, (a1)
|
||||
; CHECK-NEXT: vsetvli zero, zero, e16, mf4, tu, mu
|
||||
; CHECK-NEXT: vwmacc.vv v8, v25, v26
|
||||
; CHECK-NEXT: ret
|
||||
%a = load <2 x i16>, <2 x i16>* %x
|
||||
%b = load <2 x i16>, <2 x i16>* %y
|
||||
%c = sext <2 x i16> %a to <2 x i32>
|
||||
%d = sext <2 x i16> %b to <2 x i32>
|
||||
%e = mul <2 x i32> %c, %d
|
||||
%f = add <2 x i32> %e, %z
|
||||
ret <2 x i32> %f
|
||||
}
|
||||
|
||||
define <8 x i16> @vwmacc_v8i16(<8 x i8>* %x, <8 x i8>* %y, <8 x i16> %z) {
|
||||
; CHECK-LABEL: vwmacc_v8i16:
|
||||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
|
||||
; CHECK-NEXT: vle8.v v25, (a0)
|
||||
; CHECK-NEXT: vle8.v v26, (a1)
|
||||
; CHECK-NEXT: vsetvli zero, zero, e8, mf2, tu, mu
|
||||
; CHECK-NEXT: vwmacc.vv v8, v25, v26
|
||||
; CHECK-NEXT: ret
|
||||
%a = load <8 x i8>, <8 x i8>* %x
|
||||
%b = load <8 x i8>, <8 x i8>* %y
|
||||
%c = sext <8 x i8> %a to <8 x i16>
|
||||
%d = sext <8 x i8> %b to <8 x i16>
|
||||
%e = mul <8 x i16> %c, %d
|
||||
%f = add <8 x i16> %e, %z
|
||||
ret <8 x i16> %f
|
||||
}
|
||||
|
||||
define <4 x i32> @vwmacc_v4i32(<4 x i16>* %x, <4 x i16>* %y, <4 x i32> %z) {
|
||||
; CHECK-LABEL: vwmacc_v4i32:
|
||||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu
|
||||
; CHECK-NEXT: vle16.v v25, (a0)
|
||||
; CHECK-NEXT: vle16.v v26, (a1)
|
||||
; CHECK-NEXT: vsetvli zero, zero, e16, mf2, tu, mu
|
||||
; CHECK-NEXT: vwmacc.vv v8, v25, v26
|
||||
; CHECK-NEXT: ret
|
||||
%a = load <4 x i16>, <4 x i16>* %x
|
||||
%b = load <4 x i16>, <4 x i16>* %y
|
||||
%c = sext <4 x i16> %a to <4 x i32>
|
||||
%d = sext <4 x i16> %b to <4 x i32>
|
||||
%e = mul <4 x i32> %c, %d
|
||||
%f = add <4 x i32> %e, %z
|
||||
ret <4 x i32> %f
|
||||
}
|
||||
|
||||
define <2 x i64> @vwmacc_v2i64(<2 x i32>* %x, <2 x i32>* %y, <2 x i64> %z) {
|
||||
; CHECK-LABEL: vwmacc_v2i64:
|
||||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu
|
||||
; CHECK-NEXT: vle32.v v25, (a0)
|
||||
; CHECK-NEXT: vle32.v v26, (a1)
|
||||
; CHECK-NEXT: vsetvli zero, zero, e32, mf2, tu, mu
|
||||
; CHECK-NEXT: vwmacc.vv v8, v25, v26
|
||||
; CHECK-NEXT: ret
|
||||
%a = load <2 x i32>, <2 x i32>* %x
|
||||
%b = load <2 x i32>, <2 x i32>* %y
|
||||
%c = sext <2 x i32> %a to <2 x i64>
|
||||
%d = sext <2 x i32> %b to <2 x i64>
|
||||
%e = mul <2 x i64> %c, %d
|
||||
%f = add <2 x i64> %e, %z
|
||||
ret <2 x i64> %f
|
||||
}
|
||||
|
||||
define <16 x i16> @vwmacc_v16i16(<16 x i8>* %x, <16 x i8>* %y, <16 x i16> %z) {
|
||||
; CHECK-LABEL: vwmacc_v16i16:
|
||||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu
|
||||
; CHECK-NEXT: vle8.v v25, (a0)
|
||||
; CHECK-NEXT: vle8.v v26, (a1)
|
||||
; CHECK-NEXT: vsetvli zero, zero, e8, m1, tu, mu
|
||||
; CHECK-NEXT: vwmacc.vv v8, v25, v26
|
||||
; CHECK-NEXT: ret
|
||||
%a = load <16 x i8>, <16 x i8>* %x
|
||||
%b = load <16 x i8>, <16 x i8>* %y
|
||||
%c = sext <16 x i8> %a to <16 x i16>
|
||||
%d = sext <16 x i8> %b to <16 x i16>
|
||||
%e = mul <16 x i16> %c, %d
|
||||
%f = add <16 x i16> %e, %z
|
||||
ret <16 x i16> %f
|
||||
}
|
||||
|
||||
define <8 x i32> @vwmacc_v8i32(<8 x i16>* %x, <8 x i16>* %y, <8 x i32> %z) {
|
||||
; CHECK-LABEL: vwmacc_v8i32:
|
||||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu
|
||||
; CHECK-NEXT: vle16.v v25, (a0)
|
||||
; CHECK-NEXT: vle16.v v26, (a1)
|
||||
; CHECK-NEXT: vsetvli zero, zero, e16, m1, tu, mu
|
||||
; CHECK-NEXT: vwmacc.vv v8, v25, v26
|
||||
; CHECK-NEXT: ret
|
||||
%a = load <8 x i16>, <8 x i16>* %x
|
||||
%b = load <8 x i16>, <8 x i16>* %y
|
||||
%c = sext <8 x i16> %a to <8 x i32>
|
||||
%d = sext <8 x i16> %b to <8 x i32>
|
||||
%e = mul <8 x i32> %c, %d
|
||||
%f = add <8 x i32> %e, %z
|
||||
ret <8 x i32> %f
|
||||
}
|
||||
|
||||
define <4 x i64> @vwmacc_v4i64(<4 x i32>* %x, <4 x i32>* %y, <4 x i64> %z) {
|
||||
; CHECK-LABEL: vwmacc_v4i64:
|
||||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu
|
||||
; CHECK-NEXT: vle32.v v25, (a0)
|
||||
; CHECK-NEXT: vle32.v v26, (a1)
|
||||
; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
|
||||
; CHECK-NEXT: vwmacc.vv v8, v25, v26
|
||||
; CHECK-NEXT: ret
|
||||
%a = load <4 x i32>, <4 x i32>* %x
|
||||
%b = load <4 x i32>, <4 x i32>* %y
|
||||
%c = sext <4 x i32> %a to <4 x i64>
|
||||
%d = sext <4 x i32> %b to <4 x i64>
|
||||
%e = mul <4 x i64> %c, %d
|
||||
%f = add <4 x i64> %e, %z
|
||||
ret <4 x i64> %f
|
||||
}
|
||||
|
||||
define <32 x i16> @vwmacc_v32i16(<32 x i8>* %x, <32 x i8>* %y, <32 x i16> %z) {
|
||||
; CHECK-LABEL: vwmacc_v32i16:
|
||||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: addi a2, zero, 32
|
||||
; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, mu
|
||||
; CHECK-NEXT: vle8.v v26, (a0)
|
||||
; CHECK-NEXT: vle8.v v28, (a1)
|
||||
; CHECK-NEXT: vsetvli zero, zero, e8, m2, tu, mu
|
||||
; CHECK-NEXT: vwmacc.vv v8, v26, v28
|
||||
; CHECK-NEXT: ret
|
||||
%a = load <32 x i8>, <32 x i8>* %x
|
||||
%b = load <32 x i8>, <32 x i8>* %y
|
||||
%c = sext <32 x i8> %a to <32 x i16>
|
||||
%d = sext <32 x i8> %b to <32 x i16>
|
||||
%e = mul <32 x i16> %c, %d
|
||||
%f = add <32 x i16> %e, %z
|
||||
ret <32 x i16> %f
|
||||
}
|
||||
|
||||
define <16 x i32> @vwmacc_v16i32(<16 x i16>* %x, <16 x i16>* %y, <16 x i32> %z) {
|
||||
; CHECK-LABEL: vwmacc_v16i32:
|
||||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, mu
|
||||
; CHECK-NEXT: vle16.v v26, (a0)
|
||||
; CHECK-NEXT: vle16.v v28, (a1)
|
||||
; CHECK-NEXT: vsetvli zero, zero, e16, m2, tu, mu
|
||||
; CHECK-NEXT: vwmacc.vv v8, v26, v28
|
||||
; CHECK-NEXT: ret
|
||||
%a = load <16 x i16>, <16 x i16>* %x
|
||||
%b = load <16 x i16>, <16 x i16>* %y
|
||||
%c = sext <16 x i16> %a to <16 x i32>
|
||||
%d = sext <16 x i16> %b to <16 x i32>
|
||||
%e = mul <16 x i32> %c, %d
|
||||
%f = add <16 x i32> %e, %z
|
||||
ret <16 x i32> %f
|
||||
}
|
||||
|
||||
define <8 x i64> @vwmacc_v8i64(<8 x i32>* %x, <8 x i32>* %y, <8 x i64> %z) {
|
||||
; CHECK-LABEL: vwmacc_v8i64:
|
||||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, mu
|
||||
; CHECK-NEXT: vle32.v v26, (a0)
|
||||
; CHECK-NEXT: vle32.v v28, (a1)
|
||||
; CHECK-NEXT: vsetvli zero, zero, e32, m2, tu, mu
|
||||
; CHECK-NEXT: vwmacc.vv v8, v26, v28
|
||||
; CHECK-NEXT: ret
|
||||
%a = load <8 x i32>, <8 x i32>* %x
|
||||
%b = load <8 x i32>, <8 x i32>* %y
|
||||
%c = sext <8 x i32> %a to <8 x i64>
|
||||
%d = sext <8 x i32> %b to <8 x i64>
|
||||
%e = mul <8 x i64> %c, %d
|
||||
%f = add <8 x i64> %e, %z
|
||||
ret <8 x i64> %f
|
||||
}
|
||||
|
||||
define <64 x i16> @vwmacc_v64i16(<64 x i8>* %x, <64 x i8>* %y, <64 x i16> %z) {
|
||||
; CHECK-LABEL: vwmacc_v64i16:
|
||||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: addi a2, zero, 64
|
||||
; CHECK-NEXT: vsetvli zero, a2, e8, m4, ta, mu
|
||||
; CHECK-NEXT: vle8.v v28, (a0)
|
||||
; CHECK-NEXT: vle8.v v16, (a1)
|
||||
; CHECK-NEXT: vsetvli zero, zero, e8, m4, tu, mu
|
||||
; CHECK-NEXT: vwmacc.vv v8, v28, v16
|
||||
; CHECK-NEXT: ret
|
||||
%a = load <64 x i8>, <64 x i8>* %x
|
||||
%b = load <64 x i8>, <64 x i8>* %y
|
||||
%c = sext <64 x i8> %a to <64 x i16>
|
||||
%d = sext <64 x i8> %b to <64 x i16>
|
||||
%e = mul <64 x i16> %c, %d
|
||||
%f = add <64 x i16> %e, %z
|
||||
ret <64 x i16> %f
|
||||
}
|
||||
|
||||
define <32 x i32> @vwmacc_v32i32(<32 x i16>* %x, <32 x i16>* %y, <32 x i32> %z) {
|
||||
; CHECK-LABEL: vwmacc_v32i32:
|
||||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: addi a2, zero, 32
|
||||
; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, mu
|
||||
; CHECK-NEXT: vle16.v v28, (a0)
|
||||
; CHECK-NEXT: vle16.v v16, (a1)
|
||||
; CHECK-NEXT: vsetvli zero, zero, e16, m4, tu, mu
|
||||
; CHECK-NEXT: vwmacc.vv v8, v28, v16
|
||||
; CHECK-NEXT: ret
|
||||
%a = load <32 x i16>, <32 x i16>* %x
|
||||
%b = load <32 x i16>, <32 x i16>* %y
|
||||
%c = sext <32 x i16> %a to <32 x i32>
|
||||
%d = sext <32 x i16> %b to <32 x i32>
|
||||
%e = mul <32 x i32> %c, %d
|
||||
%f = add <32 x i32> %e, %z
|
||||
ret <32 x i32> %f
|
||||
}
|
||||
|
||||
define <16 x i64> @vwmacc_v16i64(<16 x i32>* %x, <16 x i32>* %y, <16 x i64> %z) {
|
||||
; CHECK-LABEL: vwmacc_v16i64:
|
||||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, mu
|
||||
; CHECK-NEXT: vle32.v v28, (a0)
|
||||
; CHECK-NEXT: vle32.v v16, (a1)
|
||||
; CHECK-NEXT: vsetvli zero, zero, e32, m4, tu, mu
|
||||
; CHECK-NEXT: vwmacc.vv v8, v28, v16
|
||||
; CHECK-NEXT: ret
|
||||
%a = load <16 x i32>, <16 x i32>* %x
|
||||
%b = load <16 x i32>, <16 x i32>* %y
|
||||
%c = sext <16 x i32> %a to <16 x i64>
|
||||
%d = sext <16 x i32> %b to <16 x i64>
|
||||
%e = mul <16 x i64> %c, %d
|
||||
%f = add <16 x i64> %e, %z
|
||||
ret <16 x i64> %f
|
||||
}
|
||||
|
||||
define <2 x i16> @vwmacc_vx_v2i16(<2 x i8>* %x, i8 %y, <2 x i16> %z) {
|
||||
; CHECK-LABEL: vwmacc_vx_v2i16:
|
||||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, mu
|
||||
; CHECK-NEXT: vle8.v v25, (a0)
|
||||
; CHECK-NEXT: vsetvli zero, zero, e8, mf8, tu, mu
|
||||
; CHECK-NEXT: vwmacc.vx v8, a1, v25
|
||||
; CHECK-NEXT: ret
|
||||
%a = load <2 x i8>, <2 x i8>* %x
|
||||
%b = insertelement <2 x i8> undef, i8 %y, i32 0
|
||||
%c = shufflevector <2 x i8> %b, <2 x i8> undef, <2 x i32> zeroinitializer
|
||||
%d = sext <2 x i8> %a to <2 x i16>
|
||||
%e = sext <2 x i8> %c to <2 x i16>
|
||||
%f = mul <2 x i16> %d, %e
|
||||
%g = add <2 x i16> %f, %z
|
||||
ret <2 x i16> %g
|
||||
}
|
||||
|
||||
define <4 x i16> @vwmacc_vx_v4i16(<4 x i8>* %x, i8 %y, <4 x i16> %z) {
|
||||
; CHECK-LABEL: vwmacc_vx_v4i16:
|
||||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu
|
||||
; CHECK-NEXT: vle8.v v25, (a0)
|
||||
; CHECK-NEXT: vsetvli zero, zero, e8, mf4, tu, mu
|
||||
; CHECK-NEXT: vwmacc.vx v8, a1, v25
|
||||
; CHECK-NEXT: ret
|
||||
%a = load <4 x i8>, <4 x i8>* %x
|
||||
%b = insertelement <4 x i8> undef, i8 %y, i32 0
|
||||
%c = shufflevector <4 x i8> %b, <4 x i8> undef, <4 x i32> zeroinitializer
|
||||
%d = sext <4 x i8> %a to <4 x i16>
|
||||
%e = sext <4 x i8> %c to <4 x i16>
|
||||
%f = mul <4 x i16> %d, %e
|
||||
%g = add <4 x i16> %f, %z
|
||||
ret <4 x i16> %g
|
||||
}
|
||||
|
||||
define <2 x i32> @vwmacc_vx_v2i32(<2 x i16>* %x, i16 %y, <2 x i32> %z) {
|
||||
; CHECK-LABEL: vwmacc_vx_v2i32:
|
||||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, mu
|
||||
; CHECK-NEXT: vle16.v v25, (a0)
|
||||
; CHECK-NEXT: vsetvli zero, zero, e16, mf4, tu, mu
|
||||
; CHECK-NEXT: vwmacc.vx v8, a1, v25
|
||||
; CHECK-NEXT: ret
|
||||
%a = load <2 x i16>, <2 x i16>* %x
|
||||
%b = insertelement <2 x i16> undef, i16 %y, i32 0
|
||||
%c = shufflevector <2 x i16> %b, <2 x i16> undef, <2 x i32> zeroinitializer
|
||||
%d = sext <2 x i16> %a to <2 x i32>
|
||||
%e = sext <2 x i16> %c to <2 x i32>
|
||||
%f = mul <2 x i32> %d, %e
|
||||
%g = add <2 x i32> %f, %z
|
||||
ret <2 x i32> %g
|
||||
}
|
||||
|
||||
define <8 x i16> @vwmacc_vx_v8i16(<8 x i8>* %x, i8 %y, <8 x i16> %z) {
|
||||
; CHECK-LABEL: vwmacc_vx_v8i16:
|
||||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
|
||||
; CHECK-NEXT: vle8.v v25, (a0)
|
||||
; CHECK-NEXT: vsetvli zero, zero, e8, mf2, tu, mu
|
||||
; CHECK-NEXT: vwmacc.vx v8, a1, v25
|
||||
; CHECK-NEXT: ret
|
||||
%a = load <8 x i8>, <8 x i8>* %x
|
||||
%b = insertelement <8 x i8> undef, i8 %y, i32 0
|
||||
%c = shufflevector <8 x i8> %b, <8 x i8> undef, <8 x i32> zeroinitializer
|
||||
%d = sext <8 x i8> %a to <8 x i16>
|
||||
%e = sext <8 x i8> %c to <8 x i16>
|
||||
%f = mul <8 x i16> %d, %e
|
||||
%g = add <8 x i16> %f, %z
|
||||
ret <8 x i16> %g
|
||||
}
|
||||
|
||||
define <4 x i32> @vwmacc_vx_v4i32(<4 x i16>* %x, i16 %y, <4 x i32> %z) {
|
||||
; CHECK-LABEL: vwmacc_vx_v4i32:
|
||||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu
|
||||
; CHECK-NEXT: vle16.v v25, (a0)
|
||||
; CHECK-NEXT: vsetvli zero, zero, e16, mf2, tu, mu
|
||||
; CHECK-NEXT: vwmacc.vx v8, a1, v25
|
||||
; CHECK-NEXT: ret
|
||||
%a = load <4 x i16>, <4 x i16>* %x
|
||||
%b = insertelement <4 x i16> undef, i16 %y, i32 0
|
||||
%c = shufflevector <4 x i16> %b, <4 x i16> undef, <4 x i32> zeroinitializer
|
||||
%d = sext <4 x i16> %a to <4 x i32>
|
||||
%e = sext <4 x i16> %c to <4 x i32>
|
||||
%f = mul <4 x i32> %d, %e
|
||||
%g = add <4 x i32> %f, %z
|
||||
ret <4 x i32> %g
|
||||
}
|
||||
|
||||
define <2 x i64> @vwmacc_vx_v2i64(<2 x i32>* %x, i32 %y, <2 x i64> %z) {
|
||||
; CHECK-LABEL: vwmacc_vx_v2i64:
|
||||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu
|
||||
; CHECK-NEXT: vle32.v v25, (a0)
|
||||
; CHECK-NEXT: vsetvli zero, zero, e32, mf2, tu, mu
|
||||
; CHECK-NEXT: vwmacc.vx v8, a1, v25
|
||||
; CHECK-NEXT: ret
|
||||
%a = load <2 x i32>, <2 x i32>* %x
|
||||
%b = insertelement <2 x i32> undef, i32 %y, i64 0
|
||||
%c = shufflevector <2 x i32> %b, <2 x i32> undef, <2 x i32> zeroinitializer
|
||||
%d = sext <2 x i32> %a to <2 x i64>
|
||||
%e = sext <2 x i32> %c to <2 x i64>
|
||||
%f = mul <2 x i64> %d, %e
|
||||
%g = add <2 x i64> %f, %z
|
||||
ret <2 x i64> %g
|
||||
}
|
||||
|
||||
define <16 x i16> @vwmacc_vx_v16i16(<16 x i8>* %x, i8 %y, <16 x i16> %z) {
|
||||
; CHECK-LABEL: vwmacc_vx_v16i16:
|
||||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu
|
||||
; CHECK-NEXT: vle8.v v25, (a0)
|
||||
; CHECK-NEXT: vsetvli zero, zero, e8, m1, tu, mu
|
||||
; CHECK-NEXT: vwmacc.vx v8, a1, v25
|
||||
; CHECK-NEXT: ret
|
||||
%a = load <16 x i8>, <16 x i8>* %x
|
||||
%b = insertelement <16 x i8> undef, i8 %y, i32 0
|
||||
%c = shufflevector <16 x i8> %b, <16 x i8> undef, <16 x i32> zeroinitializer
|
||||
%d = sext <16 x i8> %a to <16 x i16>
|
||||
%e = sext <16 x i8> %c to <16 x i16>
|
||||
%f = mul <16 x i16> %d, %e
|
||||
%g = add <16 x i16> %f, %z
|
||||
ret <16 x i16> %g
|
||||
}
|
||||
|
||||
define <8 x i32> @vwmacc_vx_v8i32(<8 x i16>* %x, i16 %y, <8 x i32> %z) {
|
||||
; CHECK-LABEL: vwmacc_vx_v8i32:
|
||||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu
|
||||
; CHECK-NEXT: vle16.v v25, (a0)
|
||||
; CHECK-NEXT: vsetvli zero, zero, e16, m1, tu, mu
|
||||
; CHECK-NEXT: vwmacc.vx v8, a1, v25
|
||||
; CHECK-NEXT: ret
|
||||
%a = load <8 x i16>, <8 x i16>* %x
|
||||
%b = insertelement <8 x i16> undef, i16 %y, i32 0
|
||||
%c = shufflevector <8 x i16> %b, <8 x i16> undef, <8 x i32> zeroinitializer
|
||||
%d = sext <8 x i16> %a to <8 x i32>
|
||||
%e = sext <8 x i16> %c to <8 x i32>
|
||||
%f = mul <8 x i32> %d, %e
|
||||
%g = add <8 x i32> %f, %z
|
||||
ret <8 x i32> %g
|
||||
}
|
||||
|
||||
define <4 x i64> @vwmacc_vx_v4i64(<4 x i32>* %x, i32 %y, <4 x i64> %z) {
|
||||
; CHECK-LABEL: vwmacc_vx_v4i64:
|
||||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu
|
||||
; CHECK-NEXT: vle32.v v25, (a0)
|
||||
; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
|
||||
; CHECK-NEXT: vwmacc.vx v8, a1, v25
|
||||
; CHECK-NEXT: ret
|
||||
%a = load <4 x i32>, <4 x i32>* %x
|
||||
%b = insertelement <4 x i32> undef, i32 %y, i64 0
|
||||
%c = shufflevector <4 x i32> %b, <4 x i32> undef, <4 x i32> zeroinitializer
|
||||
%d = sext <4 x i32> %a to <4 x i64>
|
||||
%e = sext <4 x i32> %c to <4 x i64>
|
||||
%f = mul <4 x i64> %d, %e
|
||||
%g = add <4 x i64> %f, %z
|
||||
ret <4 x i64> %g
|
||||
}
|
||||
|
||||
define <32 x i16> @vwmacc_vx_v32i16(<32 x i8>* %x, i8 %y, <32 x i16> %z) {
|
||||
; CHECK-LABEL: vwmacc_vx_v32i16:
|
||||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: addi a2, zero, 32
|
||||
; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, mu
|
||||
; CHECK-NEXT: vle8.v v26, (a0)
|
||||
; CHECK-NEXT: vsetvli zero, zero, e8, m2, tu, mu
|
||||
; CHECK-NEXT: vwmacc.vx v8, a1, v26
|
||||
; CHECK-NEXT: ret
|
||||
%a = load <32 x i8>, <32 x i8>* %x
|
||||
%b = insertelement <32 x i8> undef, i8 %y, i32 0
|
||||
%c = shufflevector <32 x i8> %b, <32 x i8> undef, <32 x i32> zeroinitializer
|
||||
%d = sext <32 x i8> %a to <32 x i16>
|
||||
%e = sext <32 x i8> %c to <32 x i16>
|
||||
%f = mul <32 x i16> %d, %e
|
||||
%g = add <32 x i16> %f, %z
|
||||
ret <32 x i16> %g
|
||||
}
|
||||
|
||||
define <16 x i32> @vwmacc_vx_v16i32(<16 x i16>* %x, i16 %y, <16 x i32> %z) {
|
||||
; CHECK-LABEL: vwmacc_vx_v16i32:
|
||||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, mu
|
||||
; CHECK-NEXT: vle16.v v26, (a0)
|
||||
; CHECK-NEXT: vsetvli zero, zero, e16, m2, tu, mu
|
||||
; CHECK-NEXT: vwmacc.vx v8, a1, v26
|
||||
; CHECK-NEXT: ret
|
||||
%a = load <16 x i16>, <16 x i16>* %x
|
||||
%b = insertelement <16 x i16> undef, i16 %y, i32 0
|
||||
%c = shufflevector <16 x i16> %b, <16 x i16> undef, <16 x i32> zeroinitializer
|
||||
%d = sext <16 x i16> %a to <16 x i32>
|
||||
%e = sext <16 x i16> %c to <16 x i32>
|
||||
%f = mul <16 x i32> %d, %e
|
||||
%g = add <16 x i32> %f, %z
|
||||
ret <16 x i32> %g
|
||||
}
|
||||
|
||||
define <8 x i64> @vwmacc_vx_v8i64(<8 x i32>* %x, i32 %y, <8 x i64> %z) {
|
||||
; CHECK-LABEL: vwmacc_vx_v8i64:
|
||||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, mu
|
||||
; CHECK-NEXT: vle32.v v26, (a0)
|
||||
; CHECK-NEXT: vsetvli zero, zero, e32, m2, tu, mu
|
||||
; CHECK-NEXT: vwmacc.vx v8, a1, v26
|
||||
; CHECK-NEXT: ret
|
||||
%a = load <8 x i32>, <8 x i32>* %x
|
||||
%b = insertelement <8 x i32> undef, i32 %y, i64 0
|
||||
%c = shufflevector <8 x i32> %b, <8 x i32> undef, <8 x i32> zeroinitializer
|
||||
%d = sext <8 x i32> %a to <8 x i64>
|
||||
%e = sext <8 x i32> %c to <8 x i64>
|
||||
%f = mul <8 x i64> %d, %e
|
||||
%g = add <8 x i64> %f, %z
|
||||
ret <8 x i64> %g
|
||||
}
|
||||
|
||||
define <64 x i16> @vwmacc_vx_v64i16(<64 x i8>* %x, i8 %y, <64 x i16> %z) {
|
||||
; CHECK-LABEL: vwmacc_vx_v64i16:
|
||||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: addi a2, zero, 64
|
||||
; CHECK-NEXT: vsetvli zero, a2, e8, m4, ta, mu
|
||||
; CHECK-NEXT: vle8.v v28, (a0)
|
||||
; CHECK-NEXT: vsetvli zero, zero, e8, m4, tu, mu
|
||||
; CHECK-NEXT: vwmacc.vx v8, a1, v28
|
||||
; CHECK-NEXT: ret
|
||||
%a = load <64 x i8>, <64 x i8>* %x
|
||||
%b = insertelement <64 x i8> undef, i8 %y, i32 0
|
||||
%c = shufflevector <64 x i8> %b, <64 x i8> undef, <64 x i32> zeroinitializer
|
||||
%d = sext <64 x i8> %a to <64 x i16>
|
||||
%e = sext <64 x i8> %c to <64 x i16>
|
||||
%f = mul <64 x i16> %d, %e
|
||||
%g = add <64 x i16> %f, %z
|
||||
ret <64 x i16> %g
|
||||
}
|
||||
|
||||
define <32 x i32> @vwmacc_vx_v32i32(<32 x i16>* %x, i16 %y, <32 x i32> %z) {
|
||||
; CHECK-LABEL: vwmacc_vx_v32i32:
|
||||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: addi a2, zero, 32
|
||||
; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, mu
|
||||
; CHECK-NEXT: vle16.v v28, (a0)
|
||||
; CHECK-NEXT: vsetvli zero, zero, e16, m4, tu, mu
|
||||
; CHECK-NEXT: vwmacc.vx v8, a1, v28
|
||||
; CHECK-NEXT: ret
|
||||
%a = load <32 x i16>, <32 x i16>* %x
|
||||
%b = insertelement <32 x i16> undef, i16 %y, i32 0
|
||||
%c = shufflevector <32 x i16> %b, <32 x i16> undef, <32 x i32> zeroinitializer
|
||||
%d = sext <32 x i16> %a to <32 x i32>
|
||||
%e = sext <32 x i16> %c to <32 x i32>
|
||||
%f = mul <32 x i32> %d, %e
|
||||
%g = add <32 x i32> %f, %z
|
||||
ret <32 x i32> %g
|
||||
}
|
||||
|
||||
define <16 x i64> @vwmacc_vx_v16i64(<16 x i32>* %x, i32 %y, <16 x i64> %z) {
|
||||
; CHECK-LABEL: vwmacc_vx_v16i64:
|
||||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, mu
|
||||
; CHECK-NEXT: vle32.v v28, (a0)
|
||||
; CHECK-NEXT: vsetvli zero, zero, e32, m4, tu, mu
|
||||
; CHECK-NEXT: vwmacc.vx v8, a1, v28
|
||||
; CHECK-NEXT: ret
|
||||
%a = load <16 x i32>, <16 x i32>* %x
|
||||
%b = insertelement <16 x i32> undef, i32 %y, i64 0
|
||||
%c = shufflevector <16 x i32> %b, <16 x i32> undef, <16 x i32> zeroinitializer
|
||||
%d = sext <16 x i32> %a to <16 x i64>
|
||||
%e = sext <16 x i32> %c to <16 x i64>
|
||||
%f = mul <16 x i64> %d, %e
|
||||
%g = add <16 x i64> %f, %z
|
||||
ret <16 x i64> %g
|
||||
}
|
|
@ -0,0 +1,549 @@
|
|||
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
|
||||
; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -riscv-v-vector-bits-min=128 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK
|
||||
; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -riscv-v-vector-bits-min=128 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK
|
||||
|
||||
define <2 x i16> @vwmaccu_v2i16(<2 x i8>* %x, <2 x i8>* %y, <2 x i16> %z) {
|
||||
; CHECK-LABEL: vwmaccu_v2i16:
|
||||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, mu
|
||||
; CHECK-NEXT: vle8.v v25, (a0)
|
||||
; CHECK-NEXT: vle8.v v26, (a1)
|
||||
; CHECK-NEXT: vsetvli zero, zero, e8, mf8, tu, mu
|
||||
; CHECK-NEXT: vwmaccu.vv v8, v25, v26
|
||||
; CHECK-NEXT: ret
|
||||
%a = load <2 x i8>, <2 x i8>* %x
|
||||
%b = load <2 x i8>, <2 x i8>* %y
|
||||
%c = zext <2 x i8> %a to <2 x i16>
|
||||
%d = zext <2 x i8> %b to <2 x i16>
|
||||
%e = mul <2 x i16> %c, %d
|
||||
%f = add <2 x i16> %e, %z
|
||||
ret <2 x i16> %f
|
||||
}
|
||||
|
||||
define <4 x i16> @vwmaccu_v4i16(<4 x i8>* %x, <4 x i8>* %y, <4 x i16> %z) {
|
||||
; CHECK-LABEL: vwmaccu_v4i16:
|
||||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu
|
||||
; CHECK-NEXT: vle8.v v25, (a0)
|
||||
; CHECK-NEXT: vle8.v v26, (a1)
|
||||
; CHECK-NEXT: vsetvli zero, zero, e8, mf4, tu, mu
|
||||
; CHECK-NEXT: vwmaccu.vv v8, v25, v26
|
||||
; CHECK-NEXT: ret
|
||||
%a = load <4 x i8>, <4 x i8>* %x
|
||||
%b = load <4 x i8>, <4 x i8>* %y
|
||||
%c = zext <4 x i8> %a to <4 x i16>
|
||||
%d = zext <4 x i8> %b to <4 x i16>
|
||||
%e = mul <4 x i16> %c, %d
|
||||
%f = add <4 x i16> %e, %z
|
||||
ret <4 x i16> %f
|
||||
}
|
||||
|
||||
define <2 x i32> @vwmaccu_v2i32(<2 x i16>* %x, <2 x i16>* %y, <2 x i32> %z) {
|
||||
; CHECK-LABEL: vwmaccu_v2i32:
|
||||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, mu
|
||||
; CHECK-NEXT: vle16.v v25, (a0)
|
||||
; CHECK-NEXT: vle16.v v26, (a1)
|
||||
; CHECK-NEXT: vsetvli zero, zero, e16, mf4, tu, mu
|
||||
; CHECK-NEXT: vwmaccu.vv v8, v25, v26
|
||||
; CHECK-NEXT: ret
|
||||
%a = load <2 x i16>, <2 x i16>* %x
|
||||
%b = load <2 x i16>, <2 x i16>* %y
|
||||
%c = zext <2 x i16> %a to <2 x i32>
|
||||
%d = zext <2 x i16> %b to <2 x i32>
|
||||
%e = mul <2 x i32> %c, %d
|
||||
%f = add <2 x i32> %e, %z
|
||||
ret <2 x i32> %f
|
||||
}
|
||||
|
||||
define <8 x i16> @vwmaccu_v8i16(<8 x i8>* %x, <8 x i8>* %y, <8 x i16> %z) {
|
||||
; CHECK-LABEL: vwmaccu_v8i16:
|
||||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
|
||||
; CHECK-NEXT: vle8.v v25, (a0)
|
||||
; CHECK-NEXT: vle8.v v26, (a1)
|
||||
; CHECK-NEXT: vsetvli zero, zero, e8, mf2, tu, mu
|
||||
; CHECK-NEXT: vwmaccu.vv v8, v25, v26
|
||||
; CHECK-NEXT: ret
|
||||
%a = load <8 x i8>, <8 x i8>* %x
|
||||
%b = load <8 x i8>, <8 x i8>* %y
|
||||
%c = zext <8 x i8> %a to <8 x i16>
|
||||
%d = zext <8 x i8> %b to <8 x i16>
|
||||
%e = mul <8 x i16> %c, %d
|
||||
%f = add <8 x i16> %e, %z
|
||||
ret <8 x i16> %f
|
||||
}
|
||||
|
||||
define <4 x i32> @vwmaccu_v4i32(<4 x i16>* %x, <4 x i16>* %y, <4 x i32> %z) {
|
||||
; CHECK-LABEL: vwmaccu_v4i32:
|
||||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu
|
||||
; CHECK-NEXT: vle16.v v25, (a0)
|
||||
; CHECK-NEXT: vle16.v v26, (a1)
|
||||
; CHECK-NEXT: vsetvli zero, zero, e16, mf2, tu, mu
|
||||
; CHECK-NEXT: vwmaccu.vv v8, v25, v26
|
||||
; CHECK-NEXT: ret
|
||||
%a = load <4 x i16>, <4 x i16>* %x
|
||||
%b = load <4 x i16>, <4 x i16>* %y
|
||||
%c = zext <4 x i16> %a to <4 x i32>
|
||||
%d = zext <4 x i16> %b to <4 x i32>
|
||||
%e = mul <4 x i32> %c, %d
|
||||
%f = add <4 x i32> %e, %z
|
||||
ret <4 x i32> %f
|
||||
}
|
||||
|
||||
define <2 x i64> @vwmaccu_v2i64(<2 x i32>* %x, <2 x i32>* %y, <2 x i64> %z) {
|
||||
; CHECK-LABEL: vwmaccu_v2i64:
|
||||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu
|
||||
; CHECK-NEXT: vle32.v v25, (a0)
|
||||
; CHECK-NEXT: vle32.v v26, (a1)
|
||||
; CHECK-NEXT: vsetvli zero, zero, e32, mf2, tu, mu
|
||||
; CHECK-NEXT: vwmaccu.vv v8, v25, v26
|
||||
; CHECK-NEXT: ret
|
||||
%a = load <2 x i32>, <2 x i32>* %x
|
||||
%b = load <2 x i32>, <2 x i32>* %y
|
||||
%c = zext <2 x i32> %a to <2 x i64>
|
||||
%d = zext <2 x i32> %b to <2 x i64>
|
||||
%e = mul <2 x i64> %c, %d
|
||||
%f = add <2 x i64> %e, %z
|
||||
ret <2 x i64> %f
|
||||
}
|
||||
|
||||
define <16 x i16> @vwmaccu_v16i16(<16 x i8>* %x, <16 x i8>* %y, <16 x i16> %z) {
|
||||
; CHECK-LABEL: vwmaccu_v16i16:
|
||||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu
|
||||
; CHECK-NEXT: vle8.v v25, (a0)
|
||||
; CHECK-NEXT: vle8.v v26, (a1)
|
||||
; CHECK-NEXT: vsetvli zero, zero, e8, m1, tu, mu
|
||||
; CHECK-NEXT: vwmaccu.vv v8, v25, v26
|
||||
; CHECK-NEXT: ret
|
||||
%a = load <16 x i8>, <16 x i8>* %x
|
||||
%b = load <16 x i8>, <16 x i8>* %y
|
||||
%c = zext <16 x i8> %a to <16 x i16>
|
||||
%d = zext <16 x i8> %b to <16 x i16>
|
||||
%e = mul <16 x i16> %c, %d
|
||||
%f = add <16 x i16> %e, %z
|
||||
ret <16 x i16> %f
|
||||
}
|
||||
|
||||
define <8 x i32> @vwmaccu_v8i32(<8 x i16>* %x, <8 x i16>* %y, <8 x i32> %z) {
|
||||
; CHECK-LABEL: vwmaccu_v8i32:
|
||||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu
|
||||
; CHECK-NEXT: vle16.v v25, (a0)
|
||||
; CHECK-NEXT: vle16.v v26, (a1)
|
||||
; CHECK-NEXT: vsetvli zero, zero, e16, m1, tu, mu
|
||||
; CHECK-NEXT: vwmaccu.vv v8, v25, v26
|
||||
; CHECK-NEXT: ret
|
||||
%a = load <8 x i16>, <8 x i16>* %x
|
||||
%b = load <8 x i16>, <8 x i16>* %y
|
||||
%c = zext <8 x i16> %a to <8 x i32>
|
||||
%d = zext <8 x i16> %b to <8 x i32>
|
||||
%e = mul <8 x i32> %c, %d
|
||||
%f = add <8 x i32> %e, %z
|
||||
ret <8 x i32> %f
|
||||
}
|
||||
|
||||
define <4 x i64> @vwmaccu_v4i64(<4 x i32>* %x, <4 x i32>* %y, <4 x i64> %z) {
|
||||
; CHECK-LABEL: vwmaccu_v4i64:
|
||||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu
|
||||
; CHECK-NEXT: vle32.v v25, (a0)
|
||||
; CHECK-NEXT: vle32.v v26, (a1)
|
||||
; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
|
||||
; CHECK-NEXT: vwmaccu.vv v8, v25, v26
|
||||
; CHECK-NEXT: ret
|
||||
%a = load <4 x i32>, <4 x i32>* %x
|
||||
%b = load <4 x i32>, <4 x i32>* %y
|
||||
%c = zext <4 x i32> %a to <4 x i64>
|
||||
%d = zext <4 x i32> %b to <4 x i64>
|
||||
%e = mul <4 x i64> %c, %d
|
||||
%f = add <4 x i64> %e, %z
|
||||
ret <4 x i64> %f
|
||||
}
|
||||
|
||||
define <32 x i16> @vwmaccu_v32i16(<32 x i8>* %x, <32 x i8>* %y, <32 x i16> %z) {
|
||||
; CHECK-LABEL: vwmaccu_v32i16:
|
||||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: addi a2, zero, 32
|
||||
; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, mu
|
||||
; CHECK-NEXT: vle8.v v26, (a0)
|
||||
; CHECK-NEXT: vle8.v v28, (a1)
|
||||
; CHECK-NEXT: vsetvli zero, zero, e8, m2, tu, mu
|
||||
; CHECK-NEXT: vwmaccu.vv v8, v26, v28
|
||||
; CHECK-NEXT: ret
|
||||
%a = load <32 x i8>, <32 x i8>* %x
|
||||
%b = load <32 x i8>, <32 x i8>* %y
|
||||
%c = zext <32 x i8> %a to <32 x i16>
|
||||
%d = zext <32 x i8> %b to <32 x i16>
|
||||
%e = mul <32 x i16> %c, %d
|
||||
%f = add <32 x i16> %e, %z
|
||||
ret <32 x i16> %f
|
||||
}
|
||||
|
||||
define <16 x i32> @vwmaccu_v16i32(<16 x i16>* %x, <16 x i16>* %y, <16 x i32> %z) {
|
||||
; CHECK-LABEL: vwmaccu_v16i32:
|
||||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, mu
|
||||
; CHECK-NEXT: vle16.v v26, (a0)
|
||||
; CHECK-NEXT: vle16.v v28, (a1)
|
||||
; CHECK-NEXT: vsetvli zero, zero, e16, m2, tu, mu
|
||||
; CHECK-NEXT: vwmaccu.vv v8, v26, v28
|
||||
; CHECK-NEXT: ret
|
||||
%a = load <16 x i16>, <16 x i16>* %x
|
||||
%b = load <16 x i16>, <16 x i16>* %y
|
||||
%c = zext <16 x i16> %a to <16 x i32>
|
||||
%d = zext <16 x i16> %b to <16 x i32>
|
||||
%e = mul <16 x i32> %c, %d
|
||||
%f = add <16 x i32> %e, %z
|
||||
ret <16 x i32> %f
|
||||
}
|
||||
|
||||
define <8 x i64> @vwmaccu_v8i64(<8 x i32>* %x, <8 x i32>* %y, <8 x i64> %z) {
|
||||
; CHECK-LABEL: vwmaccu_v8i64:
|
||||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, mu
|
||||
; CHECK-NEXT: vle32.v v26, (a0)
|
||||
; CHECK-NEXT: vle32.v v28, (a1)
|
||||
; CHECK-NEXT: vsetvli zero, zero, e32, m2, tu, mu
|
||||
; CHECK-NEXT: vwmaccu.vv v8, v26, v28
|
||||
; CHECK-NEXT: ret
|
||||
%a = load <8 x i32>, <8 x i32>* %x
|
||||
%b = load <8 x i32>, <8 x i32>* %y
|
||||
%c = zext <8 x i32> %a to <8 x i64>
|
||||
%d = zext <8 x i32> %b to <8 x i64>
|
||||
%e = mul <8 x i64> %c, %d
|
||||
%f = add <8 x i64> %e, %z
|
||||
ret <8 x i64> %f
|
||||
}
|
||||
|
||||
define <64 x i16> @vwmaccu_v64i16(<64 x i8>* %x, <64 x i8>* %y, <64 x i16> %z) {
|
||||
; CHECK-LABEL: vwmaccu_v64i16:
|
||||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: addi a2, zero, 64
|
||||
; CHECK-NEXT: vsetvli zero, a2, e8, m4, ta, mu
|
||||
; CHECK-NEXT: vle8.v v28, (a0)
|
||||
; CHECK-NEXT: vle8.v v16, (a1)
|
||||
; CHECK-NEXT: vsetvli zero, zero, e8, m4, tu, mu
|
||||
; CHECK-NEXT: vwmaccu.vv v8, v28, v16
|
||||
; CHECK-NEXT: ret
|
||||
%a = load <64 x i8>, <64 x i8>* %x
|
||||
%b = load <64 x i8>, <64 x i8>* %y
|
||||
%c = zext <64 x i8> %a to <64 x i16>
|
||||
%d = zext <64 x i8> %b to <64 x i16>
|
||||
%e = mul <64 x i16> %c, %d
|
||||
%f = add <64 x i16> %e, %z
|
||||
ret <64 x i16> %f
|
||||
}
|
||||
|
||||
define <32 x i32> @vwmaccu_v32i32(<32 x i16>* %x, <32 x i16>* %y, <32 x i32> %z) {
|
||||
; CHECK-LABEL: vwmaccu_v32i32:
|
||||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: addi a2, zero, 32
|
||||
; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, mu
|
||||
; CHECK-NEXT: vle16.v v28, (a0)
|
||||
; CHECK-NEXT: vle16.v v16, (a1)
|
||||
; CHECK-NEXT: vsetvli zero, zero, e16, m4, tu, mu
|
||||
; CHECK-NEXT: vwmaccu.vv v8, v28, v16
|
||||
; CHECK-NEXT: ret
|
||||
%a = load <32 x i16>, <32 x i16>* %x
|
||||
%b = load <32 x i16>, <32 x i16>* %y
|
||||
%c = zext <32 x i16> %a to <32 x i32>
|
||||
%d = zext <32 x i16> %b to <32 x i32>
|
||||
%e = mul <32 x i32> %c, %d
|
||||
%f = add <32 x i32> %e, %z
|
||||
ret <32 x i32> %f
|
||||
}
|
||||
|
||||
define <16 x i64> @vwmaccu_v16i64(<16 x i32>* %x, <16 x i32>* %y, <16 x i64> %z) {
|
||||
; CHECK-LABEL: vwmaccu_v16i64:
|
||||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, mu
|
||||
; CHECK-NEXT: vle32.v v28, (a0)
|
||||
; CHECK-NEXT: vle32.v v16, (a1)
|
||||
; CHECK-NEXT: vsetvli zero, zero, e32, m4, tu, mu
|
||||
; CHECK-NEXT: vwmaccu.vv v8, v28, v16
|
||||
; CHECK-NEXT: ret
|
||||
%a = load <16 x i32>, <16 x i32>* %x
|
||||
%b = load <16 x i32>, <16 x i32>* %y
|
||||
%c = zext <16 x i32> %a to <16 x i64>
|
||||
%d = zext <16 x i32> %b to <16 x i64>
|
||||
%e = mul <16 x i64> %c, %d
|
||||
%f = add <16 x i64> %e, %z
|
||||
ret <16 x i64> %f
|
||||
}
|
||||
|
||||
define <2 x i16> @vwmaccu_vx_v2i16(<2 x i8>* %x, i8 %y, <2 x i16> %z) {
|
||||
; CHECK-LABEL: vwmaccu_vx_v2i16:
|
||||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, mu
|
||||
; CHECK-NEXT: vle8.v v25, (a0)
|
||||
; CHECK-NEXT: vsetvli zero, zero, e8, mf8, tu, mu
|
||||
; CHECK-NEXT: vwmaccu.vx v8, a1, v25
|
||||
; CHECK-NEXT: ret
|
||||
%a = load <2 x i8>, <2 x i8>* %x
|
||||
%b = insertelement <2 x i8> undef, i8 %y, i32 0
|
||||
%c = shufflevector <2 x i8> %b, <2 x i8> undef, <2 x i32> zeroinitializer
|
||||
%d = zext <2 x i8> %a to <2 x i16>
|
||||
%e = zext <2 x i8> %c to <2 x i16>
|
||||
%f = mul <2 x i16> %d, %e
|
||||
%g = add <2 x i16> %f, %z
|
||||
ret <2 x i16> %g
|
||||
}
|
||||
|
||||
define <4 x i16> @vwmaccu_vx_v4i16(<4 x i8>* %x, i8 %y, <4 x i16> %z) {
|
||||
; CHECK-LABEL: vwmaccu_vx_v4i16:
|
||||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu
|
||||
; CHECK-NEXT: vle8.v v25, (a0)
|
||||
; CHECK-NEXT: vsetvli zero, zero, e8, mf4, tu, mu
|
||||
; CHECK-NEXT: vwmaccu.vx v8, a1, v25
|
||||
; CHECK-NEXT: ret
|
||||
%a = load <4 x i8>, <4 x i8>* %x
|
||||
%b = insertelement <4 x i8> undef, i8 %y, i32 0
|
||||
%c = shufflevector <4 x i8> %b, <4 x i8> undef, <4 x i32> zeroinitializer
|
||||
%d = zext <4 x i8> %a to <4 x i16>
|
||||
%e = zext <4 x i8> %c to <4 x i16>
|
||||
%f = mul <4 x i16> %d, %e
|
||||
%g = add <4 x i16> %f, %z
|
||||
ret <4 x i16> %g
|
||||
}
|
||||
|
||||
define <2 x i32> @vwmaccu_vx_v2i32(<2 x i16>* %x, i16 %y, <2 x i32> %z) {
|
||||
; CHECK-LABEL: vwmaccu_vx_v2i32:
|
||||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, mu
|
||||
; CHECK-NEXT: vle16.v v25, (a0)
|
||||
; CHECK-NEXT: vsetvli zero, zero, e16, mf4, tu, mu
|
||||
; CHECK-NEXT: vwmaccu.vx v8, a1, v25
|
||||
; CHECK-NEXT: ret
|
||||
%a = load <2 x i16>, <2 x i16>* %x
|
||||
%b = insertelement <2 x i16> undef, i16 %y, i32 0
|
||||
%c = shufflevector <2 x i16> %b, <2 x i16> undef, <2 x i32> zeroinitializer
|
||||
%d = zext <2 x i16> %a to <2 x i32>
|
||||
%e = zext <2 x i16> %c to <2 x i32>
|
||||
%f = mul <2 x i32> %d, %e
|
||||
%g = add <2 x i32> %f, %z
|
||||
ret <2 x i32> %g
|
||||
}
|
||||
|
||||
define <8 x i16> @vwmaccu_vx_v8i16(<8 x i8>* %x, i8 %y, <8 x i16> %z) {
|
||||
; CHECK-LABEL: vwmaccu_vx_v8i16:
|
||||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
|
||||
; CHECK-NEXT: vle8.v v25, (a0)
|
||||
; CHECK-NEXT: vsetvli zero, zero, e8, mf2, tu, mu
|
||||
; CHECK-NEXT: vwmaccu.vx v8, a1, v25
|
||||
; CHECK-NEXT: ret
|
||||
%a = load <8 x i8>, <8 x i8>* %x
|
||||
%b = insertelement <8 x i8> undef, i8 %y, i32 0
|
||||
%c = shufflevector <8 x i8> %b, <8 x i8> undef, <8 x i32> zeroinitializer
|
||||
%d = zext <8 x i8> %a to <8 x i16>
|
||||
%e = zext <8 x i8> %c to <8 x i16>
|
||||
%f = mul <8 x i16> %d, %e
|
||||
%g = add <8 x i16> %f, %z
|
||||
ret <8 x i16> %g
|
||||
}
|
||||
|
||||
define <4 x i32> @vwmaccu_vx_v4i32(<4 x i16>* %x, i16 %y, <4 x i32> %z) {
|
||||
; CHECK-LABEL: vwmaccu_vx_v4i32:
|
||||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu
|
||||
; CHECK-NEXT: vle16.v v25, (a0)
|
||||
; CHECK-NEXT: vsetvli zero, zero, e16, mf2, tu, mu
|
||||
; CHECK-NEXT: vwmaccu.vx v8, a1, v25
|
||||
; CHECK-NEXT: ret
|
||||
%a = load <4 x i16>, <4 x i16>* %x
|
||||
%b = insertelement <4 x i16> undef, i16 %y, i32 0
|
||||
%c = shufflevector <4 x i16> %b, <4 x i16> undef, <4 x i32> zeroinitializer
|
||||
%d = zext <4 x i16> %a to <4 x i32>
|
||||
%e = zext <4 x i16> %c to <4 x i32>
|
||||
%f = mul <4 x i32> %d, %e
|
||||
%g = add <4 x i32> %f, %z
|
||||
ret <4 x i32> %g
|
||||
}
|
||||
|
||||
define <2 x i64> @vwmaccu_vx_v2i64(<2 x i32>* %x, i32 %y, <2 x i64> %z) {
|
||||
; CHECK-LABEL: vwmaccu_vx_v2i64:
|
||||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu
|
||||
; CHECK-NEXT: vle32.v v25, (a0)
|
||||
; CHECK-NEXT: vsetvli zero, zero, e32, mf2, tu, mu
|
||||
; CHECK-NEXT: vwmaccu.vx v8, a1, v25
|
||||
; CHECK-NEXT: ret
|
||||
%a = load <2 x i32>, <2 x i32>* %x
|
||||
%b = insertelement <2 x i32> undef, i32 %y, i64 0
|
||||
%c = shufflevector <2 x i32> %b, <2 x i32> undef, <2 x i32> zeroinitializer
|
||||
%d = zext <2 x i32> %a to <2 x i64>
|
||||
%e = zext <2 x i32> %c to <2 x i64>
|
||||
%f = mul <2 x i64> %d, %e
|
||||
%g = add <2 x i64> %f, %z
|
||||
ret <2 x i64> %g
|
||||
}
|
||||
|
||||
define <16 x i16> @vwmaccu_vx_v16i16(<16 x i8>* %x, i8 %y, <16 x i16> %z) {
|
||||
; CHECK-LABEL: vwmaccu_vx_v16i16:
|
||||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu
|
||||
; CHECK-NEXT: vle8.v v25, (a0)
|
||||
; CHECK-NEXT: vsetvli zero, zero, e8, m1, tu, mu
|
||||
; CHECK-NEXT: vwmaccu.vx v8, a1, v25
|
||||
; CHECK-NEXT: ret
|
||||
%a = load <16 x i8>, <16 x i8>* %x
|
||||
%b = insertelement <16 x i8> undef, i8 %y, i32 0
|
||||
%c = shufflevector <16 x i8> %b, <16 x i8> undef, <16 x i32> zeroinitializer
|
||||
%d = zext <16 x i8> %a to <16 x i16>
|
||||
%e = zext <16 x i8> %c to <16 x i16>
|
||||
%f = mul <16 x i16> %d, %e
|
||||
%g = add <16 x i16> %f, %z
|
||||
ret <16 x i16> %g
|
||||
}
|
||||
|
||||
define <8 x i32> @vwmaccu_vx_v8i32(<8 x i16>* %x, i16 %y, <8 x i32> %z) {
|
||||
; CHECK-LABEL: vwmaccu_vx_v8i32:
|
||||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu
|
||||
; CHECK-NEXT: vle16.v v25, (a0)
|
||||
; CHECK-NEXT: vsetvli zero, zero, e16, m1, tu, mu
|
||||
; CHECK-NEXT: vwmaccu.vx v8, a1, v25
|
||||
; CHECK-NEXT: ret
|
||||
%a = load <8 x i16>, <8 x i16>* %x
|
||||
%b = insertelement <8 x i16> undef, i16 %y, i32 0
|
||||
%c = shufflevector <8 x i16> %b, <8 x i16> undef, <8 x i32> zeroinitializer
|
||||
%d = zext <8 x i16> %a to <8 x i32>
|
||||
%e = zext <8 x i16> %c to <8 x i32>
|
||||
%f = mul <8 x i32> %d, %e
|
||||
%g = add <8 x i32> %f, %z
|
||||
ret <8 x i32> %g
|
||||
}
|
||||
|
||||
define <4 x i64> @vwmaccu_vx_v4i64(<4 x i32>* %x, i32 %y, <4 x i64> %z) {
|
||||
; CHECK-LABEL: vwmaccu_vx_v4i64:
|
||||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu
|
||||
; CHECK-NEXT: vle32.v v25, (a0)
|
||||
; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
|
||||
; CHECK-NEXT: vwmaccu.vx v8, a1, v25
|
||||
; CHECK-NEXT: ret
|
||||
%a = load <4 x i32>, <4 x i32>* %x
|
||||
%b = insertelement <4 x i32> undef, i32 %y, i64 0
|
||||
%c = shufflevector <4 x i32> %b, <4 x i32> undef, <4 x i32> zeroinitializer
|
||||
%d = zext <4 x i32> %a to <4 x i64>
|
||||
%e = zext <4 x i32> %c to <4 x i64>
|
||||
%f = mul <4 x i64> %d, %e
|
||||
%g = add <4 x i64> %f, %z
|
||||
ret <4 x i64> %g
|
||||
}
|
||||
|
||||
define <32 x i16> @vwmaccu_vx_v32i16(<32 x i8>* %x, i8 %y, <32 x i16> %z) {
|
||||
; CHECK-LABEL: vwmaccu_vx_v32i16:
|
||||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: addi a2, zero, 32
|
||||
; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, mu
|
||||
; CHECK-NEXT: vle8.v v26, (a0)
|
||||
; CHECK-NEXT: vsetvli zero, zero, e8, m2, tu, mu
|
||||
; CHECK-NEXT: vwmaccu.vx v8, a1, v26
|
||||
; CHECK-NEXT: ret
|
||||
%a = load <32 x i8>, <32 x i8>* %x
|
||||
%b = insertelement <32 x i8> undef, i8 %y, i32 0
|
||||
%c = shufflevector <32 x i8> %b, <32 x i8> undef, <32 x i32> zeroinitializer
|
||||
%d = zext <32 x i8> %a to <32 x i16>
|
||||
%e = zext <32 x i8> %c to <32 x i16>
|
||||
%f = mul <32 x i16> %d, %e
|
||||
%g = add <32 x i16> %f, %z
|
||||
ret <32 x i16> %g
|
||||
}
|
||||
|
||||
define <16 x i32> @vwmaccu_vx_v16i32(<16 x i16>* %x, i16 %y, <16 x i32> %z) {
|
||||
; CHECK-LABEL: vwmaccu_vx_v16i32:
|
||||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, mu
|
||||
; CHECK-NEXT: vle16.v v26, (a0)
|
||||
; CHECK-NEXT: vsetvli zero, zero, e16, m2, tu, mu
|
||||
; CHECK-NEXT: vwmaccu.vx v8, a1, v26
|
||||
; CHECK-NEXT: ret
|
||||
%a = load <16 x i16>, <16 x i16>* %x
|
||||
%b = insertelement <16 x i16> undef, i16 %y, i32 0
|
||||
%c = shufflevector <16 x i16> %b, <16 x i16> undef, <16 x i32> zeroinitializer
|
||||
%d = zext <16 x i16> %a to <16 x i32>
|
||||
%e = zext <16 x i16> %c to <16 x i32>
|
||||
%f = mul <16 x i32> %d, %e
|
||||
%g = add <16 x i32> %f, %z
|
||||
ret <16 x i32> %g
|
||||
}
|
||||
|
||||
define <8 x i64> @vwmaccu_vx_v8i64(<8 x i32>* %x, i32 %y, <8 x i64> %z) {
|
||||
; CHECK-LABEL: vwmaccu_vx_v8i64:
|
||||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, mu
|
||||
; CHECK-NEXT: vle32.v v26, (a0)
|
||||
; CHECK-NEXT: vsetvli zero, zero, e32, m2, tu, mu
|
||||
; CHECK-NEXT: vwmaccu.vx v8, a1, v26
|
||||
; CHECK-NEXT: ret
|
||||
%a = load <8 x i32>, <8 x i32>* %x
|
||||
%b = insertelement <8 x i32> undef, i32 %y, i64 0
|
||||
%c = shufflevector <8 x i32> %b, <8 x i32> undef, <8 x i32> zeroinitializer
|
||||
%d = zext <8 x i32> %a to <8 x i64>
|
||||
%e = zext <8 x i32> %c to <8 x i64>
|
||||
%f = mul <8 x i64> %d, %e
|
||||
%g = add <8 x i64> %f, %z
|
||||
ret <8 x i64> %g
|
||||
}
|
||||
|
||||
define <64 x i16> @vwmaccu_vx_v64i16(<64 x i8>* %x, i8 %y, <64 x i16> %z) {
|
||||
; CHECK-LABEL: vwmaccu_vx_v64i16:
|
||||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: addi a2, zero, 64
|
||||
; CHECK-NEXT: vsetvli zero, a2, e8, m4, ta, mu
|
||||
; CHECK-NEXT: vle8.v v28, (a0)
|
||||
; CHECK-NEXT: vsetvli zero, zero, e8, m4, tu, mu
|
||||
; CHECK-NEXT: vwmaccu.vx v8, a1, v28
|
||||
; CHECK-NEXT: ret
|
||||
%a = load <64 x i8>, <64 x i8>* %x
|
||||
%b = insertelement <64 x i8> undef, i8 %y, i32 0
|
||||
%c = shufflevector <64 x i8> %b, <64 x i8> undef, <64 x i32> zeroinitializer
|
||||
%d = zext <64 x i8> %a to <64 x i16>
|
||||
%e = zext <64 x i8> %c to <64 x i16>
|
||||
%f = mul <64 x i16> %d, %e
|
||||
%g = add <64 x i16> %f, %z
|
||||
ret <64 x i16> %g
|
||||
}
|
||||
|
||||
define <32 x i32> @vwmaccu_vx_v32i32(<32 x i16>* %x, i16 %y, <32 x i32> %z) {
|
||||
; CHECK-LABEL: vwmaccu_vx_v32i32:
|
||||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: addi a2, zero, 32
|
||||
; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, mu
|
||||
; CHECK-NEXT: vle16.v v28, (a0)
|
||||
; CHECK-NEXT: vsetvli zero, zero, e16, m4, tu, mu
|
||||
; CHECK-NEXT: vwmaccu.vx v8, a1, v28
|
||||
; CHECK-NEXT: ret
|
||||
%a = load <32 x i16>, <32 x i16>* %x
|
||||
%b = insertelement <32 x i16> undef, i16 %y, i32 0
|
||||
%c = shufflevector <32 x i16> %b, <32 x i16> undef, <32 x i32> zeroinitializer
|
||||
%d = zext <32 x i16> %a to <32 x i32>
|
||||
%e = zext <32 x i16> %c to <32 x i32>
|
||||
%f = mul <32 x i32> %d, %e
|
||||
%g = add <32 x i32> %f, %z
|
||||
ret <32 x i32> %g
|
||||
}
|
||||
|
||||
define <16 x i64> @vwmaccu_vx_v16i64(<16 x i32>* %x, i32 %y, <16 x i64> %z) {
|
||||
; CHECK-LABEL: vwmaccu_vx_v16i64:
|
||||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, mu
|
||||
; CHECK-NEXT: vle32.v v28, (a0)
|
||||
; CHECK-NEXT: vsetvli zero, zero, e32, m4, tu, mu
|
||||
; CHECK-NEXT: vwmaccu.vx v8, a1, v28
|
||||
; CHECK-NEXT: ret
|
||||
%a = load <16 x i32>, <16 x i32>* %x
|
||||
%b = insertelement <16 x i32> undef, i32 %y, i64 0
|
||||
%c = shufflevector <16 x i32> %b, <16 x i32> undef, <16 x i32> zeroinitializer
|
||||
%d = zext <16 x i32> %a to <16 x i64>
|
||||
%e = zext <16 x i32> %c to <16 x i64>
|
||||
%f = mul <16 x i64> %d, %e
|
||||
%g = add <16 x i64> %f, %z
|
||||
ret <16 x i64> %g
|
||||
}
|
|
@ -0,0 +1,653 @@
|
|||
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
|
||||
; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -riscv-v-vector-bits-min=128 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK
|
||||
; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -riscv-v-vector-bits-min=128 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK
|
||||
|
||||
define <2 x i16> @vwmul_v2i16(<2 x i8>* %x, <2 x i8>* %y) {
|
||||
; CHECK-LABEL: vwmul_v2i16:
|
||||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, mu
|
||||
; CHECK-NEXT: vle8.v v25, (a0)
|
||||
; CHECK-NEXT: vle8.v v26, (a1)
|
||||
; CHECK-NEXT: vwmul.vv v8, v25, v26
|
||||
; CHECK-NEXT: ret
|
||||
%a = load <2 x i8>, <2 x i8>* %x
|
||||
%b = load <2 x i8>, <2 x i8>* %y
|
||||
%c = sext <2 x i8> %a to <2 x i16>
|
||||
%d = sext <2 x i8> %b to <2 x i16>
|
||||
%e = mul <2 x i16> %c, %d
|
||||
ret <2 x i16> %e
|
||||
}
|
||||
|
||||
define <4 x i16> @vwmul_v4i16(<4 x i8>* %x, <4 x i8>* %y) {
|
||||
; CHECK-LABEL: vwmul_v4i16:
|
||||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu
|
||||
; CHECK-NEXT: vle8.v v25, (a0)
|
||||
; CHECK-NEXT: vle8.v v26, (a1)
|
||||
; CHECK-NEXT: vwmul.vv v8, v25, v26
|
||||
; CHECK-NEXT: ret
|
||||
%a = load <4 x i8>, <4 x i8>* %x
|
||||
%b = load <4 x i8>, <4 x i8>* %y
|
||||
%c = sext <4 x i8> %a to <4 x i16>
|
||||
%d = sext <4 x i8> %b to <4 x i16>
|
||||
%e = mul <4 x i16> %c, %d
|
||||
ret <4 x i16> %e
|
||||
}
|
||||
|
||||
define <2 x i32> @vwmul_v2i32(<2 x i16>* %x, <2 x i16>* %y) {
|
||||
; CHECK-LABEL: vwmul_v2i32:
|
||||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, mu
|
||||
; CHECK-NEXT: vle16.v v25, (a0)
|
||||
; CHECK-NEXT: vle16.v v26, (a1)
|
||||
; CHECK-NEXT: vwmul.vv v8, v25, v26
|
||||
; CHECK-NEXT: ret
|
||||
%a = load <2 x i16>, <2 x i16>* %x
|
||||
%b = load <2 x i16>, <2 x i16>* %y
|
||||
%c = sext <2 x i16> %a to <2 x i32>
|
||||
%d = sext <2 x i16> %b to <2 x i32>
|
||||
%e = mul <2 x i32> %c, %d
|
||||
ret <2 x i32> %e
|
||||
}
|
||||
|
||||
define <8 x i16> @vwmul_v8i16(<8 x i8>* %x, <8 x i8>* %y) {
|
||||
; CHECK-LABEL: vwmul_v8i16:
|
||||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
|
||||
; CHECK-NEXT: vle8.v v25, (a0)
|
||||
; CHECK-NEXT: vle8.v v26, (a1)
|
||||
; CHECK-NEXT: vwmul.vv v8, v25, v26
|
||||
; CHECK-NEXT: ret
|
||||
%a = load <8 x i8>, <8 x i8>* %x
|
||||
%b = load <8 x i8>, <8 x i8>* %y
|
||||
%c = sext <8 x i8> %a to <8 x i16>
|
||||
%d = sext <8 x i8> %b to <8 x i16>
|
||||
%e = mul <8 x i16> %c, %d
|
||||
ret <8 x i16> %e
|
||||
}
|
||||
|
||||
define <4 x i32> @vwmul_v4i32(<4 x i16>* %x, <4 x i16>* %y) {
|
||||
; CHECK-LABEL: vwmul_v4i32:
|
||||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu
|
||||
; CHECK-NEXT: vle16.v v25, (a0)
|
||||
; CHECK-NEXT: vle16.v v26, (a1)
|
||||
; CHECK-NEXT: vwmul.vv v8, v25, v26
|
||||
; CHECK-NEXT: ret
|
||||
%a = load <4 x i16>, <4 x i16>* %x
|
||||
%b = load <4 x i16>, <4 x i16>* %y
|
||||
%c = sext <4 x i16> %a to <4 x i32>
|
||||
%d = sext <4 x i16> %b to <4 x i32>
|
||||
%e = mul <4 x i32> %c, %d
|
||||
ret <4 x i32> %e
|
||||
}
|
||||
|
||||
define <2 x i64> @vwmul_v2i64(<2 x i32>* %x, <2 x i32>* %y) {
|
||||
; CHECK-LABEL: vwmul_v2i64:
|
||||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu
|
||||
; CHECK-NEXT: vle32.v v25, (a0)
|
||||
; CHECK-NEXT: vle32.v v26, (a1)
|
||||
; CHECK-NEXT: vwmul.vv v8, v25, v26
|
||||
; CHECK-NEXT: ret
|
||||
%a = load <2 x i32>, <2 x i32>* %x
|
||||
%b = load <2 x i32>, <2 x i32>* %y
|
||||
%c = sext <2 x i32> %a to <2 x i64>
|
||||
%d = sext <2 x i32> %b to <2 x i64>
|
||||
%e = mul <2 x i64> %c, %d
|
||||
ret <2 x i64> %e
|
||||
}
|
||||
|
||||
define <16 x i16> @vwmul_v16i16(<16 x i8>* %x, <16 x i8>* %y) {
|
||||
; CHECK-LABEL: vwmul_v16i16:
|
||||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu
|
||||
; CHECK-NEXT: vle8.v v25, (a0)
|
||||
; CHECK-NEXT: vle8.v v26, (a1)
|
||||
; CHECK-NEXT: vwmul.vv v8, v25, v26
|
||||
; CHECK-NEXT: ret
|
||||
%a = load <16 x i8>, <16 x i8>* %x
|
||||
%b = load <16 x i8>, <16 x i8>* %y
|
||||
%c = sext <16 x i8> %a to <16 x i16>
|
||||
%d = sext <16 x i8> %b to <16 x i16>
|
||||
%e = mul <16 x i16> %c, %d
|
||||
ret <16 x i16> %e
|
||||
}
|
||||
|
||||
define <8 x i32> @vwmul_v8i32(<8 x i16>* %x, <8 x i16>* %y) {
|
||||
; CHECK-LABEL: vwmul_v8i32:
|
||||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu
|
||||
; CHECK-NEXT: vle16.v v25, (a0)
|
||||
; CHECK-NEXT: vle16.v v26, (a1)
|
||||
; CHECK-NEXT: vwmul.vv v8, v25, v26
|
||||
; CHECK-NEXT: ret
|
||||
%a = load <8 x i16>, <8 x i16>* %x
|
||||
%b = load <8 x i16>, <8 x i16>* %y
|
||||
%c = sext <8 x i16> %a to <8 x i32>
|
||||
%d = sext <8 x i16> %b to <8 x i32>
|
||||
%e = mul <8 x i32> %c, %d
|
||||
ret <8 x i32> %e
|
||||
}
|
||||
|
||||
define <4 x i64> @vwmul_v4i64(<4 x i32>* %x, <4 x i32>* %y) {
|
||||
; CHECK-LABEL: vwmul_v4i64:
|
||||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu
|
||||
; CHECK-NEXT: vle32.v v25, (a0)
|
||||
; CHECK-NEXT: vle32.v v26, (a1)
|
||||
; CHECK-NEXT: vwmul.vv v8, v25, v26
|
||||
; CHECK-NEXT: ret
|
||||
%a = load <4 x i32>, <4 x i32>* %x
|
||||
%b = load <4 x i32>, <4 x i32>* %y
|
||||
%c = sext <4 x i32> %a to <4 x i64>
|
||||
%d = sext <4 x i32> %b to <4 x i64>
|
||||
%e = mul <4 x i64> %c, %d
|
||||
ret <4 x i64> %e
|
||||
}
|
||||
|
||||
define <32 x i16> @vwmul_v32i16(<32 x i8>* %x, <32 x i8>* %y) {
|
||||
; CHECK-LABEL: vwmul_v32i16:
|
||||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: addi a2, zero, 32
|
||||
; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, mu
|
||||
; CHECK-NEXT: vle8.v v26, (a0)
|
||||
; CHECK-NEXT: vle8.v v28, (a1)
|
||||
; CHECK-NEXT: vwmul.vv v8, v26, v28
|
||||
; CHECK-NEXT: ret
|
||||
%a = load <32 x i8>, <32 x i8>* %x
|
||||
%b = load <32 x i8>, <32 x i8>* %y
|
||||
%c = sext <32 x i8> %a to <32 x i16>
|
||||
%d = sext <32 x i8> %b to <32 x i16>
|
||||
%e = mul <32 x i16> %c, %d
|
||||
ret <32 x i16> %e
|
||||
}
|
||||
|
||||
define <16 x i32> @vwmul_v16i32(<16 x i16>* %x, <16 x i16>* %y) {
|
||||
; CHECK-LABEL: vwmul_v16i32:
|
||||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, mu
|
||||
; CHECK-NEXT: vle16.v v26, (a0)
|
||||
; CHECK-NEXT: vle16.v v28, (a1)
|
||||
; CHECK-NEXT: vwmul.vv v8, v26, v28
|
||||
; CHECK-NEXT: ret
|
||||
%a = load <16 x i16>, <16 x i16>* %x
|
||||
%b = load <16 x i16>, <16 x i16>* %y
|
||||
%c = sext <16 x i16> %a to <16 x i32>
|
||||
%d = sext <16 x i16> %b to <16 x i32>
|
||||
%e = mul <16 x i32> %c, %d
|
||||
ret <16 x i32> %e
|
||||
}
|
||||
|
||||
define <8 x i64> @vwmul_v8i64(<8 x i32>* %x, <8 x i32>* %y) {
|
||||
; CHECK-LABEL: vwmul_v8i64:
|
||||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, mu
|
||||
; CHECK-NEXT: vle32.v v26, (a0)
|
||||
; CHECK-NEXT: vle32.v v28, (a1)
|
||||
; CHECK-NEXT: vwmul.vv v8, v26, v28
|
||||
; CHECK-NEXT: ret
|
||||
%a = load <8 x i32>, <8 x i32>* %x
|
||||
%b = load <8 x i32>, <8 x i32>* %y
|
||||
%c = sext <8 x i32> %a to <8 x i64>
|
||||
%d = sext <8 x i32> %b to <8 x i64>
|
||||
%e = mul <8 x i64> %c, %d
|
||||
ret <8 x i64> %e
|
||||
}
|
||||
|
||||
define <64 x i16> @vwmul_v64i16(<64 x i8>* %x, <64 x i8>* %y) {
|
||||
; CHECK-LABEL: vwmul_v64i16:
|
||||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: addi a2, zero, 64
|
||||
; CHECK-NEXT: vsetvli zero, a2, e8, m4, ta, mu
|
||||
; CHECK-NEXT: vle8.v v28, (a0)
|
||||
; CHECK-NEXT: vle8.v v16, (a1)
|
||||
; CHECK-NEXT: vwmul.vv v8, v28, v16
|
||||
; CHECK-NEXT: ret
|
||||
%a = load <64 x i8>, <64 x i8>* %x
|
||||
%b = load <64 x i8>, <64 x i8>* %y
|
||||
%c = sext <64 x i8> %a to <64 x i16>
|
||||
%d = sext <64 x i8> %b to <64 x i16>
|
||||
%e = mul <64 x i16> %c, %d
|
||||
ret <64 x i16> %e
|
||||
}
|
||||
|
||||
define <32 x i32> @vwmul_v32i32(<32 x i16>* %x, <32 x i16>* %y) {
|
||||
; CHECK-LABEL: vwmul_v32i32:
|
||||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: addi a2, zero, 32
|
||||
; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, mu
|
||||
; CHECK-NEXT: vle16.v v28, (a0)
|
||||
; CHECK-NEXT: vle16.v v16, (a1)
|
||||
; CHECK-NEXT: vwmul.vv v8, v28, v16
|
||||
; CHECK-NEXT: ret
|
||||
%a = load <32 x i16>, <32 x i16>* %x
|
||||
%b = load <32 x i16>, <32 x i16>* %y
|
||||
%c = sext <32 x i16> %a to <32 x i32>
|
||||
%d = sext <32 x i16> %b to <32 x i32>
|
||||
%e = mul <32 x i32> %c, %d
|
||||
ret <32 x i32> %e
|
||||
}
|
||||
|
||||
define <16 x i64> @vwmul_v16i64(<16 x i32>* %x, <16 x i32>* %y) {
|
||||
; CHECK-LABEL: vwmul_v16i64:
|
||||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, mu
|
||||
; CHECK-NEXT: vle32.v v28, (a0)
|
||||
; CHECK-NEXT: vle32.v v16, (a1)
|
||||
; CHECK-NEXT: vwmul.vv v8, v28, v16
|
||||
; CHECK-NEXT: ret
|
||||
%a = load <16 x i32>, <16 x i32>* %x
|
||||
%b = load <16 x i32>, <16 x i32>* %y
|
||||
%c = sext <16 x i32> %a to <16 x i64>
|
||||
%d = sext <16 x i32> %b to <16 x i64>
|
||||
%e = mul <16 x i64> %c, %d
|
||||
ret <16 x i64> %e
|
||||
}
|
||||
|
||||
define <128 x i16> @vwmul_v128i16(<128 x i8>* %x, <128 x i8>* %y) {
|
||||
; CHECK-LABEL: vwmul_v128i16:
|
||||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: addi sp, sp, -16
|
||||
; CHECK-NEXT: .cfi_def_cfa_offset 16
|
||||
; CHECK-NEXT: csrr a2, vlenb
|
||||
; CHECK-NEXT: slli a2, a2, 3
|
||||
; CHECK-NEXT: sub sp, sp, a2
|
||||
; CHECK-NEXT: addi a2, zero, 128
|
||||
; CHECK-NEXT: vsetvli zero, a2, e8, m8, ta, mu
|
||||
; CHECK-NEXT: vle8.v v16, (a0)
|
||||
; CHECK-NEXT: vle8.v v24, (a1)
|
||||
; CHECK-NEXT: addi a0, zero, 64
|
||||
; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu
|
||||
; CHECK-NEXT: vslidedown.vx v8, v16, a0
|
||||
; CHECK-NEXT: addi a1, sp, 16
|
||||
; CHECK-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill
|
||||
; CHECK-NEXT: vslidedown.vx v0, v24, a0
|
||||
; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
|
||||
; CHECK-NEXT: vwmul.vv v8, v16, v24
|
||||
; CHECK-NEXT: addi a0, sp, 16
|
||||
; CHECK-NEXT: vl8re8.v v24, (a0) # Unknown-size Folded Reload
|
||||
; CHECK-NEXT: vwmul.vv v16, v24, v0
|
||||
; CHECK-NEXT: csrr a0, vlenb
|
||||
; CHECK-NEXT: slli a0, a0, 3
|
||||
; CHECK-NEXT: add sp, sp, a0
|
||||
; CHECK-NEXT: addi sp, sp, 16
|
||||
; CHECK-NEXT: ret
|
||||
%a = load <128 x i8>, <128 x i8>* %x
|
||||
%b = load <128 x i8>, <128 x i8>* %y
|
||||
%c = sext <128 x i8> %a to <128 x i16>
|
||||
%d = sext <128 x i8> %b to <128 x i16>
|
||||
%e = mul <128 x i16> %c, %d
|
||||
ret <128 x i16> %e
|
||||
}
|
||||
|
||||
define <64 x i32> @vwmul_v64i32(<64 x i16>* %x, <64 x i16>* %y) {
|
||||
; CHECK-LABEL: vwmul_v64i32:
|
||||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: addi sp, sp, -16
|
||||
; CHECK-NEXT: .cfi_def_cfa_offset 16
|
||||
; CHECK-NEXT: csrr a2, vlenb
|
||||
; CHECK-NEXT: slli a2, a2, 3
|
||||
; CHECK-NEXT: sub sp, sp, a2
|
||||
; CHECK-NEXT: addi a2, zero, 64
|
||||
; CHECK-NEXT: vsetvli zero, a2, e16, m8, ta, mu
|
||||
; CHECK-NEXT: vle16.v v16, (a0)
|
||||
; CHECK-NEXT: vle16.v v24, (a1)
|
||||
; CHECK-NEXT: addi a0, zero, 32
|
||||
; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu
|
||||
; CHECK-NEXT: vslidedown.vx v8, v16, a0
|
||||
; CHECK-NEXT: addi a1, sp, 16
|
||||
; CHECK-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill
|
||||
; CHECK-NEXT: vslidedown.vx v0, v24, a0
|
||||
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
|
||||
; CHECK-NEXT: vwmul.vv v8, v16, v24
|
||||
; CHECK-NEXT: addi a0, sp, 16
|
||||
; CHECK-NEXT: vl8re8.v v24, (a0) # Unknown-size Folded Reload
|
||||
; CHECK-NEXT: vwmul.vv v16, v24, v0
|
||||
; CHECK-NEXT: csrr a0, vlenb
|
||||
; CHECK-NEXT: slli a0, a0, 3
|
||||
; CHECK-NEXT: add sp, sp, a0
|
||||
; CHECK-NEXT: addi sp, sp, 16
|
||||
; CHECK-NEXT: ret
|
||||
%a = load <64 x i16>, <64 x i16>* %x
|
||||
%b = load <64 x i16>, <64 x i16>* %y
|
||||
%c = sext <64 x i16> %a to <64 x i32>
|
||||
%d = sext <64 x i16> %b to <64 x i32>
|
||||
%e = mul <64 x i32> %c, %d
|
||||
ret <64 x i32> %e
|
||||
}
|
||||
|
||||
define <32 x i64> @vwmul_v32i64(<32 x i32>* %x, <32 x i32>* %y) {
|
||||
; CHECK-LABEL: vwmul_v32i64:
|
||||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: addi sp, sp, -16
|
||||
; CHECK-NEXT: .cfi_def_cfa_offset 16
|
||||
; CHECK-NEXT: csrr a2, vlenb
|
||||
; CHECK-NEXT: slli a2, a2, 3
|
||||
; CHECK-NEXT: sub sp, sp, a2
|
||||
; CHECK-NEXT: addi a2, zero, 32
|
||||
; CHECK-NEXT: vsetvli zero, a2, e32, m8, ta, mu
|
||||
; CHECK-NEXT: vle32.v v16, (a0)
|
||||
; CHECK-NEXT: vle32.v v24, (a1)
|
||||
; CHECK-NEXT: vsetivli zero, 16, e32, m8, ta, mu
|
||||
; CHECK-NEXT: vslidedown.vi v8, v16, 16
|
||||
; CHECK-NEXT: addi a0, sp, 16
|
||||
; CHECK-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
|
||||
; CHECK-NEXT: vslidedown.vi v0, v24, 16
|
||||
; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, mu
|
||||
; CHECK-NEXT: vwmul.vv v8, v16, v24
|
||||
; CHECK-NEXT: addi a0, sp, 16
|
||||
; CHECK-NEXT: vl8re8.v v24, (a0) # Unknown-size Folded Reload
|
||||
; CHECK-NEXT: vwmul.vv v16, v24, v0
|
||||
; CHECK-NEXT: csrr a0, vlenb
|
||||
; CHECK-NEXT: slli a0, a0, 3
|
||||
; CHECK-NEXT: add sp, sp, a0
|
||||
; CHECK-NEXT: addi sp, sp, 16
|
||||
; CHECK-NEXT: ret
|
||||
%a = load <32 x i32>, <32 x i32>* %x
|
||||
%b = load <32 x i32>, <32 x i32>* %y
|
||||
%c = sext <32 x i32> %a to <32 x i64>
|
||||
%d = sext <32 x i32> %b to <32 x i64>
|
||||
%e = mul <32 x i64> %c, %d
|
||||
ret <32 x i64> %e
|
||||
}
|
||||
|
||||
define <2 x i32> @vwmul_v2i32_v2i8(<2 x i8>* %x, <2 x i8>* %y) {
|
||||
; CHECK-LABEL: vwmul_v2i32_v2i8:
|
||||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, mu
|
||||
; CHECK-NEXT: vle8.v v25, (a1)
|
||||
; CHECK-NEXT: vle8.v v26, (a0)
|
||||
; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, mu
|
||||
; CHECK-NEXT: vsext.vf2 v27, v25
|
||||
; CHECK-NEXT: vsext.vf2 v25, v26
|
||||
; CHECK-NEXT: vwmul.vv v8, v25, v27
|
||||
; CHECK-NEXT: ret
|
||||
%a = load <2 x i8>, <2 x i8>* %x
|
||||
%b = load <2 x i8>, <2 x i8>* %y
|
||||
%c = sext <2 x i8> %a to <2 x i32>
|
||||
%d = sext <2 x i8> %b to <2 x i32>
|
||||
%e = mul <2 x i32> %c, %d
|
||||
ret <2 x i32> %e
|
||||
}
|
||||
|
||||
define <4 x i32> @vwmul_v4i32_v4i8_v4i16(<4 x i8>* %x, <4 x i16>* %y) {
|
||||
; CHECK-LABEL: vwmul_v4i32_v4i8_v4i16:
|
||||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu
|
||||
; CHECK-NEXT: vle8.v v25, (a0)
|
||||
; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, mu
|
||||
; CHECK-NEXT: vle16.v v26, (a1)
|
||||
; CHECK-NEXT: vsext.vf2 v27, v25
|
||||
; CHECK-NEXT: vwmul.vv v8, v27, v26
|
||||
; CHECK-NEXT: ret
|
||||
%a = load <4 x i8>, <4 x i8>* %x
|
||||
%b = load <4 x i16>, <4 x i16>* %y
|
||||
%c = sext <4 x i8> %a to <4 x i32>
|
||||
%d = sext <4 x i16> %b to <4 x i32>
|
||||
%e = mul <4 x i32> %c, %d
|
||||
ret <4 x i32> %e
|
||||
}
|
||||
|
||||
define <4 x i64> @vwmul_v4i64_v4i32_v4i8(<4 x i32>* %x, <4 x i8>* %y) {
|
||||
; CHECK-LABEL: vwmul_v4i64_v4i32_v4i8:
|
||||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu
|
||||
; CHECK-NEXT: vle32.v v25, (a0)
|
||||
; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, mu
|
||||
; CHECK-NEXT: vle8.v v26, (a1)
|
||||
; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, mu
|
||||
; CHECK-NEXT: vsext.vf4 v27, v26
|
||||
; CHECK-NEXT: vwmul.vv v8, v25, v27
|
||||
; CHECK-NEXT: ret
|
||||
%a = load <4 x i32>, <4 x i32>* %x
|
||||
%b = load <4 x i8>, <4 x i8>* %y
|
||||
%c = sext <4 x i32> %a to <4 x i64>
|
||||
%d = sext <4 x i8> %b to <4 x i64>
|
||||
%e = mul <4 x i64> %c, %d
|
||||
ret <4 x i64> %e
|
||||
}
|
||||
|
||||
define <2 x i16> @vwmul_vx_v2i16(<2 x i8>* %x, i8 %y) {
|
||||
; CHECK-LABEL: vwmul_vx_v2i16:
|
||||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, mu
|
||||
; CHECK-NEXT: vle8.v v25, (a0)
|
||||
; CHECK-NEXT: vwmul.vx v8, v25, a1
|
||||
; CHECK-NEXT: ret
|
||||
%a = load <2 x i8>, <2 x i8>* %x
|
||||
%b = insertelement <2 x i8> undef, i8 %y, i32 0
|
||||
%c = shufflevector <2 x i8> %b, <2 x i8> undef, <2 x i32> zeroinitializer
|
||||
%d = sext <2 x i8> %a to <2 x i16>
|
||||
%e = sext <2 x i8> %c to <2 x i16>
|
||||
%f = mul <2 x i16> %d, %e
|
||||
ret <2 x i16> %f
|
||||
}
|
||||
|
||||
define <4 x i16> @vwmul_vx_v4i16(<4 x i8>* %x, i8 %y) {
|
||||
; CHECK-LABEL: vwmul_vx_v4i16:
|
||||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu
|
||||
; CHECK-NEXT: vle8.v v25, (a0)
|
||||
; CHECK-NEXT: vwmul.vx v8, v25, a1
|
||||
; CHECK-NEXT: ret
|
||||
%a = load <4 x i8>, <4 x i8>* %x
|
||||
%b = insertelement <4 x i8> undef, i8 %y, i32 0
|
||||
%c = shufflevector <4 x i8> %b, <4 x i8> undef, <4 x i32> zeroinitializer
|
||||
%d = sext <4 x i8> %a to <4 x i16>
|
||||
%e = sext <4 x i8> %c to <4 x i16>
|
||||
%f = mul <4 x i16> %d, %e
|
||||
ret <4 x i16> %f
|
||||
}
|
||||
|
||||
define <2 x i32> @vwmul_vx_v2i32(<2 x i16>* %x, i16 %y) {
|
||||
; CHECK-LABEL: vwmul_vx_v2i32:
|
||||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, mu
|
||||
; CHECK-NEXT: vle16.v v25, (a0)
|
||||
; CHECK-NEXT: vwmul.vx v8, v25, a1
|
||||
; CHECK-NEXT: ret
|
||||
%a = load <2 x i16>, <2 x i16>* %x
|
||||
%b = insertelement <2 x i16> undef, i16 %y, i32 0
|
||||
%c = shufflevector <2 x i16> %b, <2 x i16> undef, <2 x i32> zeroinitializer
|
||||
%d = sext <2 x i16> %a to <2 x i32>
|
||||
%e = sext <2 x i16> %c to <2 x i32>
|
||||
%f = mul <2 x i32> %d, %e
|
||||
ret <2 x i32> %f
|
||||
}
|
||||
|
||||
define <8 x i16> @vwmul_vx_v8i16(<8 x i8>* %x, i8 %y) {
|
||||
; CHECK-LABEL: vwmul_vx_v8i16:
|
||||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
|
||||
; CHECK-NEXT: vle8.v v25, (a0)
|
||||
; CHECK-NEXT: vwmul.vx v8, v25, a1
|
||||
; CHECK-NEXT: ret
|
||||
%a = load <8 x i8>, <8 x i8>* %x
|
||||
%b = insertelement <8 x i8> undef, i8 %y, i32 0
|
||||
%c = shufflevector <8 x i8> %b, <8 x i8> undef, <8 x i32> zeroinitializer
|
||||
%d = sext <8 x i8> %a to <8 x i16>
|
||||
%e = sext <8 x i8> %c to <8 x i16>
|
||||
%f = mul <8 x i16> %d, %e
|
||||
ret <8 x i16> %f
|
||||
}
|
||||
|
||||
define <4 x i32> @vwmul_vx_v4i32(<4 x i16>* %x, i16 %y) {
|
||||
; CHECK-LABEL: vwmul_vx_v4i32:
|
||||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu
|
||||
; CHECK-NEXT: vle16.v v25, (a0)
|
||||
; CHECK-NEXT: vwmul.vx v8, v25, a1
|
||||
; CHECK-NEXT: ret
|
||||
%a = load <4 x i16>, <4 x i16>* %x
|
||||
%b = insertelement <4 x i16> undef, i16 %y, i32 0
|
||||
%c = shufflevector <4 x i16> %b, <4 x i16> undef, <4 x i32> zeroinitializer
|
||||
%d = sext <4 x i16> %a to <4 x i32>
|
||||
%e = sext <4 x i16> %c to <4 x i32>
|
||||
%f = mul <4 x i32> %d, %e
|
||||
ret <4 x i32> %f
|
||||
}
|
||||
|
||||
define <2 x i64> @vwmul_vx_v2i64(<2 x i32>* %x, i32 %y) {
|
||||
; CHECK-LABEL: vwmul_vx_v2i64:
|
||||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu
|
||||
; CHECK-NEXT: vle32.v v25, (a0)
|
||||
; CHECK-NEXT: vwmul.vx v8, v25, a1
|
||||
; CHECK-NEXT: ret
|
||||
%a = load <2 x i32>, <2 x i32>* %x
|
||||
%b = insertelement <2 x i32> undef, i32 %y, i64 0
|
||||
%c = shufflevector <2 x i32> %b, <2 x i32> undef, <2 x i32> zeroinitializer
|
||||
%d = sext <2 x i32> %a to <2 x i64>
|
||||
%e = sext <2 x i32> %c to <2 x i64>
|
||||
%f = mul <2 x i64> %d, %e
|
||||
ret <2 x i64> %f
|
||||
}
|
||||
|
||||
define <16 x i16> @vwmul_vx_v16i16(<16 x i8>* %x, i8 %y) {
|
||||
; CHECK-LABEL: vwmul_vx_v16i16:
|
||||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu
|
||||
; CHECK-NEXT: vle8.v v25, (a0)
|
||||
; CHECK-NEXT: vwmul.vx v8, v25, a1
|
||||
; CHECK-NEXT: ret
|
||||
%a = load <16 x i8>, <16 x i8>* %x
|
||||
%b = insertelement <16 x i8> undef, i8 %y, i32 0
|
||||
%c = shufflevector <16 x i8> %b, <16 x i8> undef, <16 x i32> zeroinitializer
|
||||
%d = sext <16 x i8> %a to <16 x i16>
|
||||
%e = sext <16 x i8> %c to <16 x i16>
|
||||
%f = mul <16 x i16> %d, %e
|
||||
ret <16 x i16> %f
|
||||
}
|
||||
|
||||
define <8 x i32> @vwmul_vx_v8i32(<8 x i16>* %x, i16 %y) {
|
||||
; CHECK-LABEL: vwmul_vx_v8i32:
|
||||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu
|
||||
; CHECK-NEXT: vle16.v v25, (a0)
|
||||
; CHECK-NEXT: vwmul.vx v8, v25, a1
|
||||
; CHECK-NEXT: ret
|
||||
%a = load <8 x i16>, <8 x i16>* %x
|
||||
%b = insertelement <8 x i16> undef, i16 %y, i32 0
|
||||
%c = shufflevector <8 x i16> %b, <8 x i16> undef, <8 x i32> zeroinitializer
|
||||
%d = sext <8 x i16> %a to <8 x i32>
|
||||
%e = sext <8 x i16> %c to <8 x i32>
|
||||
%f = mul <8 x i32> %d, %e
|
||||
ret <8 x i32> %f
|
||||
}
|
||||
|
||||
define <4 x i64> @vwmul_vx_v4i64(<4 x i32>* %x, i32 %y) {
|
||||
; CHECK-LABEL: vwmul_vx_v4i64:
|
||||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu
|
||||
; CHECK-NEXT: vle32.v v25, (a0)
|
||||
; CHECK-NEXT: vwmul.vx v8, v25, a1
|
||||
; CHECK-NEXT: ret
|
||||
%a = load <4 x i32>, <4 x i32>* %x
|
||||
%b = insertelement <4 x i32> undef, i32 %y, i64 0
|
||||
%c = shufflevector <4 x i32> %b, <4 x i32> undef, <4 x i32> zeroinitializer
|
||||
%d = sext <4 x i32> %a to <4 x i64>
|
||||
%e = sext <4 x i32> %c to <4 x i64>
|
||||
%f = mul <4 x i64> %d, %e
|
||||
ret <4 x i64> %f
|
||||
}
|
||||
|
||||
define <32 x i16> @vwmul_vx_v32i16(<32 x i8>* %x, i8 %y) {
|
||||
; CHECK-LABEL: vwmul_vx_v32i16:
|
||||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: addi a2, zero, 32
|
||||
; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, mu
|
||||
; CHECK-NEXT: vle8.v v26, (a0)
|
||||
; CHECK-NEXT: vwmul.vx v8, v26, a1
|
||||
; CHECK-NEXT: ret
|
||||
%a = load <32 x i8>, <32 x i8>* %x
|
||||
%b = insertelement <32 x i8> undef, i8 %y, i32 0
|
||||
%c = shufflevector <32 x i8> %b, <32 x i8> undef, <32 x i32> zeroinitializer
|
||||
%d = sext <32 x i8> %a to <32 x i16>
|
||||
%e = sext <32 x i8> %c to <32 x i16>
|
||||
%f = mul <32 x i16> %d, %e
|
||||
ret <32 x i16> %f
|
||||
}
|
||||
|
||||
define <16 x i32> @vwmul_vx_v16i32(<16 x i16>* %x, i16 %y) {
|
||||
; CHECK-LABEL: vwmul_vx_v16i32:
|
||||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, mu
|
||||
; CHECK-NEXT: vle16.v v26, (a0)
|
||||
; CHECK-NEXT: vwmul.vx v8, v26, a1
|
||||
; CHECK-NEXT: ret
|
||||
%a = load <16 x i16>, <16 x i16>* %x
|
||||
%b = insertelement <16 x i16> undef, i16 %y, i32 0
|
||||
%c = shufflevector <16 x i16> %b, <16 x i16> undef, <16 x i32> zeroinitializer
|
||||
%d = sext <16 x i16> %a to <16 x i32>
|
||||
%e = sext <16 x i16> %c to <16 x i32>
|
||||
%f = mul <16 x i32> %d, %e
|
||||
ret <16 x i32> %f
|
||||
}
|
||||
|
||||
define <8 x i64> @vwmul_vx_v8i64(<8 x i32>* %x, i32 %y) {
|
||||
; CHECK-LABEL: vwmul_vx_v8i64:
|
||||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, mu
|
||||
; CHECK-NEXT: vle32.v v26, (a0)
|
||||
; CHECK-NEXT: vwmul.vx v8, v26, a1
|
||||
; CHECK-NEXT: ret
|
||||
%a = load <8 x i32>, <8 x i32>* %x
|
||||
%b = insertelement <8 x i32> undef, i32 %y, i64 0
|
||||
%c = shufflevector <8 x i32> %b, <8 x i32> undef, <8 x i32> zeroinitializer
|
||||
%d = sext <8 x i32> %a to <8 x i64>
|
||||
%e = sext <8 x i32> %c to <8 x i64>
|
||||
%f = mul <8 x i64> %d, %e
|
||||
ret <8 x i64> %f
|
||||
}
|
||||
|
||||
define <64 x i16> @vwmul_vx_v64i16(<64 x i8>* %x, i8 %y) {
|
||||
; CHECK-LABEL: vwmul_vx_v64i16:
|
||||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: addi a2, zero, 64
|
||||
; CHECK-NEXT: vsetvli zero, a2, e8, m4, ta, mu
|
||||
; CHECK-NEXT: vle8.v v28, (a0)
|
||||
; CHECK-NEXT: vwmul.vx v8, v28, a1
|
||||
; CHECK-NEXT: ret
|
||||
%a = load <64 x i8>, <64 x i8>* %x
|
||||
%b = insertelement <64 x i8> undef, i8 %y, i32 0
|
||||
%c = shufflevector <64 x i8> %b, <64 x i8> undef, <64 x i32> zeroinitializer
|
||||
%d = sext <64 x i8> %a to <64 x i16>
|
||||
%e = sext <64 x i8> %c to <64 x i16>
|
||||
%f = mul <64 x i16> %d, %e
|
||||
ret <64 x i16> %f
|
||||
}
|
||||
|
||||
define <32 x i32> @vwmul_vx_v32i32(<32 x i16>* %x, i16 %y) {
|
||||
; CHECK-LABEL: vwmul_vx_v32i32:
|
||||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: addi a2, zero, 32
|
||||
; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, mu
|
||||
; CHECK-NEXT: vle16.v v28, (a0)
|
||||
; CHECK-NEXT: vwmul.vx v8, v28, a1
|
||||
; CHECK-NEXT: ret
|
||||
%a = load <32 x i16>, <32 x i16>* %x
|
||||
%b = insertelement <32 x i16> undef, i16 %y, i32 0
|
||||
%c = shufflevector <32 x i16> %b, <32 x i16> undef, <32 x i32> zeroinitializer
|
||||
%d = sext <32 x i16> %a to <32 x i32>
|
||||
%e = sext <32 x i16> %c to <32 x i32>
|
||||
%f = mul <32 x i32> %d, %e
|
||||
ret <32 x i32> %f
|
||||
}
|
||||
|
||||
define <16 x i64> @vwmul_vx_v16i64(<16 x i32>* %x, i32 %y) {
|
||||
; CHECK-LABEL: vwmul_vx_v16i64:
|
||||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, mu
|
||||
; CHECK-NEXT: vle32.v v28, (a0)
|
||||
; CHECK-NEXT: vwmul.vx v8, v28, a1
|
||||
; CHECK-NEXT: ret
|
||||
%a = load <16 x i32>, <16 x i32>* %x
|
||||
%b = insertelement <16 x i32> undef, i32 %y, i64 0
|
||||
%c = shufflevector <16 x i32> %b, <16 x i32> undef, <16 x i32> zeroinitializer
|
||||
%d = sext <16 x i32> %a to <16 x i64>
|
||||
%e = sext <16 x i32> %c to <16 x i64>
|
||||
%f = mul <16 x i64> %d, %e
|
||||
ret <16 x i64> %f
|
||||
}
|
||||
|
|
@ -0,0 +1,653 @@
|
|||
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
|
||||
; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -riscv-v-vector-bits-min=128 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK
|
||||
; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -riscv-v-vector-bits-min=128 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK
|
||||
|
||||
define <2 x i16> @vwmulu_v2i16(<2 x i8>* %x, <2 x i8>* %y) {
|
||||
; CHECK-LABEL: vwmulu_v2i16:
|
||||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, mu
|
||||
; CHECK-NEXT: vle8.v v25, (a0)
|
||||
; CHECK-NEXT: vle8.v v26, (a1)
|
||||
; CHECK-NEXT: vwmulu.vv v8, v25, v26
|
||||
; CHECK-NEXT: ret
|
||||
%a = load <2 x i8>, <2 x i8>* %x
|
||||
%b = load <2 x i8>, <2 x i8>* %y
|
||||
%c = zext <2 x i8> %a to <2 x i16>
|
||||
%d = zext <2 x i8> %b to <2 x i16>
|
||||
%e = mul <2 x i16> %c, %d
|
||||
ret <2 x i16> %e
|
||||
}
|
||||
|
||||
define <4 x i16> @vwmulu_v4i16(<4 x i8>* %x, <4 x i8>* %y) {
|
||||
; CHECK-LABEL: vwmulu_v4i16:
|
||||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu
|
||||
; CHECK-NEXT: vle8.v v25, (a0)
|
||||
; CHECK-NEXT: vle8.v v26, (a1)
|
||||
; CHECK-NEXT: vwmulu.vv v8, v25, v26
|
||||
; CHECK-NEXT: ret
|
||||
%a = load <4 x i8>, <4 x i8>* %x
|
||||
%b = load <4 x i8>, <4 x i8>* %y
|
||||
%c = zext <4 x i8> %a to <4 x i16>
|
||||
%d = zext <4 x i8> %b to <4 x i16>
|
||||
%e = mul <4 x i16> %c, %d
|
||||
ret <4 x i16> %e
|
||||
}
|
||||
|
||||
define <2 x i32> @vwmulu_v2i32(<2 x i16>* %x, <2 x i16>* %y) {
|
||||
; CHECK-LABEL: vwmulu_v2i32:
|
||||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, mu
|
||||
; CHECK-NEXT: vle16.v v25, (a0)
|
||||
; CHECK-NEXT: vle16.v v26, (a1)
|
||||
; CHECK-NEXT: vwmulu.vv v8, v25, v26
|
||||
; CHECK-NEXT: ret
|
||||
%a = load <2 x i16>, <2 x i16>* %x
|
||||
%b = load <2 x i16>, <2 x i16>* %y
|
||||
%c = zext <2 x i16> %a to <2 x i32>
|
||||
%d = zext <2 x i16> %b to <2 x i32>
|
||||
%e = mul <2 x i32> %c, %d
|
||||
ret <2 x i32> %e
|
||||
}
|
||||
|
||||
define <8 x i16> @vwmulu_v8i16(<8 x i8>* %x, <8 x i8>* %y) {
|
||||
; CHECK-LABEL: vwmulu_v8i16:
|
||||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
|
||||
; CHECK-NEXT: vle8.v v25, (a0)
|
||||
; CHECK-NEXT: vle8.v v26, (a1)
|
||||
; CHECK-NEXT: vwmulu.vv v8, v25, v26
|
||||
; CHECK-NEXT: ret
|
||||
%a = load <8 x i8>, <8 x i8>* %x
|
||||
%b = load <8 x i8>, <8 x i8>* %y
|
||||
%c = zext <8 x i8> %a to <8 x i16>
|
||||
%d = zext <8 x i8> %b to <8 x i16>
|
||||
%e = mul <8 x i16> %c, %d
|
||||
ret <8 x i16> %e
|
||||
}
|
||||
|
||||
define <4 x i32> @vwmulu_v4i32(<4 x i16>* %x, <4 x i16>* %y) {
|
||||
; CHECK-LABEL: vwmulu_v4i32:
|
||||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu
|
||||
; CHECK-NEXT: vle16.v v25, (a0)
|
||||
; CHECK-NEXT: vle16.v v26, (a1)
|
||||
; CHECK-NEXT: vwmulu.vv v8, v25, v26
|
||||
; CHECK-NEXT: ret
|
||||
%a = load <4 x i16>, <4 x i16>* %x
|
||||
%b = load <4 x i16>, <4 x i16>* %y
|
||||
%c = zext <4 x i16> %a to <4 x i32>
|
||||
%d = zext <4 x i16> %b to <4 x i32>
|
||||
%e = mul <4 x i32> %c, %d
|
||||
ret <4 x i32> %e
|
||||
}
|
||||
|
||||
define <2 x i64> @vwmulu_v2i64(<2 x i32>* %x, <2 x i32>* %y) {
|
||||
; CHECK-LABEL: vwmulu_v2i64:
|
||||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu
|
||||
; CHECK-NEXT: vle32.v v25, (a0)
|
||||
; CHECK-NEXT: vle32.v v26, (a1)
|
||||
; CHECK-NEXT: vwmulu.vv v8, v25, v26
|
||||
; CHECK-NEXT: ret
|
||||
%a = load <2 x i32>, <2 x i32>* %x
|
||||
%b = load <2 x i32>, <2 x i32>* %y
|
||||
%c = zext <2 x i32> %a to <2 x i64>
|
||||
%d = zext <2 x i32> %b to <2 x i64>
|
||||
%e = mul <2 x i64> %c, %d
|
||||
ret <2 x i64> %e
|
||||
}
|
||||
|
||||
define <16 x i16> @vwmulu_v16i16(<16 x i8>* %x, <16 x i8>* %y) {
|
||||
; CHECK-LABEL: vwmulu_v16i16:
|
||||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu
|
||||
; CHECK-NEXT: vle8.v v25, (a0)
|
||||
; CHECK-NEXT: vle8.v v26, (a1)
|
||||
; CHECK-NEXT: vwmulu.vv v8, v25, v26
|
||||
; CHECK-NEXT: ret
|
||||
%a = load <16 x i8>, <16 x i8>* %x
|
||||
%b = load <16 x i8>, <16 x i8>* %y
|
||||
%c = zext <16 x i8> %a to <16 x i16>
|
||||
%d = zext <16 x i8> %b to <16 x i16>
|
||||
%e = mul <16 x i16> %c, %d
|
||||
ret <16 x i16> %e
|
||||
}
|
||||
|
||||
define <8 x i32> @vwmulu_v8i32(<8 x i16>* %x, <8 x i16>* %y) {
|
||||
; CHECK-LABEL: vwmulu_v8i32:
|
||||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu
|
||||
; CHECK-NEXT: vle16.v v25, (a0)
|
||||
; CHECK-NEXT: vle16.v v26, (a1)
|
||||
; CHECK-NEXT: vwmulu.vv v8, v25, v26
|
||||
; CHECK-NEXT: ret
|
||||
%a = load <8 x i16>, <8 x i16>* %x
|
||||
%b = load <8 x i16>, <8 x i16>* %y
|
||||
%c = zext <8 x i16> %a to <8 x i32>
|
||||
%d = zext <8 x i16> %b to <8 x i32>
|
||||
%e = mul <8 x i32> %c, %d
|
||||
ret <8 x i32> %e
|
||||
}
|
||||
|
||||
define <4 x i64> @vwmulu_v4i64(<4 x i32>* %x, <4 x i32>* %y) {
|
||||
; CHECK-LABEL: vwmulu_v4i64:
|
||||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu
|
||||
; CHECK-NEXT: vle32.v v25, (a0)
|
||||
; CHECK-NEXT: vle32.v v26, (a1)
|
||||
; CHECK-NEXT: vwmulu.vv v8, v25, v26
|
||||
; CHECK-NEXT: ret
|
||||
%a = load <4 x i32>, <4 x i32>* %x
|
||||
%b = load <4 x i32>, <4 x i32>* %y
|
||||
%c = zext <4 x i32> %a to <4 x i64>
|
||||
%d = zext <4 x i32> %b to <4 x i64>
|
||||
%e = mul <4 x i64> %c, %d
|
||||
ret <4 x i64> %e
|
||||
}
|
||||
|
||||
define <32 x i16> @vwmulu_v32i16(<32 x i8>* %x, <32 x i8>* %y) {
|
||||
; CHECK-LABEL: vwmulu_v32i16:
|
||||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: addi a2, zero, 32
|
||||
; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, mu
|
||||
; CHECK-NEXT: vle8.v v26, (a0)
|
||||
; CHECK-NEXT: vle8.v v28, (a1)
|
||||
; CHECK-NEXT: vwmulu.vv v8, v26, v28
|
||||
; CHECK-NEXT: ret
|
||||
%a = load <32 x i8>, <32 x i8>* %x
|
||||
%b = load <32 x i8>, <32 x i8>* %y
|
||||
%c = zext <32 x i8> %a to <32 x i16>
|
||||
%d = zext <32 x i8> %b to <32 x i16>
|
||||
%e = mul <32 x i16> %c, %d
|
||||
ret <32 x i16> %e
|
||||
}
|
||||
|
||||
define <16 x i32> @vwmulu_v16i32(<16 x i16>* %x, <16 x i16>* %y) {
|
||||
; CHECK-LABEL: vwmulu_v16i32:
|
||||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, mu
|
||||
; CHECK-NEXT: vle16.v v26, (a0)
|
||||
; CHECK-NEXT: vle16.v v28, (a1)
|
||||
; CHECK-NEXT: vwmulu.vv v8, v26, v28
|
||||
; CHECK-NEXT: ret
|
||||
%a = load <16 x i16>, <16 x i16>* %x
|
||||
%b = load <16 x i16>, <16 x i16>* %y
|
||||
%c = zext <16 x i16> %a to <16 x i32>
|
||||
%d = zext <16 x i16> %b to <16 x i32>
|
||||
%e = mul <16 x i32> %c, %d
|
||||
ret <16 x i32> %e
|
||||
}
|
||||
|
||||
define <8 x i64> @vwmulu_v8i64(<8 x i32>* %x, <8 x i32>* %y) {
|
||||
; CHECK-LABEL: vwmulu_v8i64:
|
||||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, mu
|
||||
; CHECK-NEXT: vle32.v v26, (a0)
|
||||
; CHECK-NEXT: vle32.v v28, (a1)
|
||||
; CHECK-NEXT: vwmulu.vv v8, v26, v28
|
||||
; CHECK-NEXT: ret
|
||||
%a = load <8 x i32>, <8 x i32>* %x
|
||||
%b = load <8 x i32>, <8 x i32>* %y
|
||||
%c = zext <8 x i32> %a to <8 x i64>
|
||||
%d = zext <8 x i32> %b to <8 x i64>
|
||||
%e = mul <8 x i64> %c, %d
|
||||
ret <8 x i64> %e
|
||||
}
|
||||
|
||||
define <64 x i16> @vwmulu_v64i16(<64 x i8>* %x, <64 x i8>* %y) {
|
||||
; CHECK-LABEL: vwmulu_v64i16:
|
||||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: addi a2, zero, 64
|
||||
; CHECK-NEXT: vsetvli zero, a2, e8, m4, ta, mu
|
||||
; CHECK-NEXT: vle8.v v28, (a0)
|
||||
; CHECK-NEXT: vle8.v v16, (a1)
|
||||
; CHECK-NEXT: vwmulu.vv v8, v28, v16
|
||||
; CHECK-NEXT: ret
|
||||
%a = load <64 x i8>, <64 x i8>* %x
|
||||
%b = load <64 x i8>, <64 x i8>* %y
|
||||
%c = zext <64 x i8> %a to <64 x i16>
|
||||
%d = zext <64 x i8> %b to <64 x i16>
|
||||
%e = mul <64 x i16> %c, %d
|
||||
ret <64 x i16> %e
|
||||
}
|
||||
|
||||
define <32 x i32> @vwmulu_v32i32(<32 x i16>* %x, <32 x i16>* %y) {
|
||||
; CHECK-LABEL: vwmulu_v32i32:
|
||||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: addi a2, zero, 32
|
||||
; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, mu
|
||||
; CHECK-NEXT: vle16.v v28, (a0)
|
||||
; CHECK-NEXT: vle16.v v16, (a1)
|
||||
; CHECK-NEXT: vwmulu.vv v8, v28, v16
|
||||
; CHECK-NEXT: ret
|
||||
%a = load <32 x i16>, <32 x i16>* %x
|
||||
%b = load <32 x i16>, <32 x i16>* %y
|
||||
%c = zext <32 x i16> %a to <32 x i32>
|
||||
%d = zext <32 x i16> %b to <32 x i32>
|
||||
%e = mul <32 x i32> %c, %d
|
||||
ret <32 x i32> %e
|
||||
}
|
||||
|
||||
define <16 x i64> @vwmulu_v16i64(<16 x i32>* %x, <16 x i32>* %y) {
|
||||
; CHECK-LABEL: vwmulu_v16i64:
|
||||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, mu
|
||||
; CHECK-NEXT: vle32.v v28, (a0)
|
||||
; CHECK-NEXT: vle32.v v16, (a1)
|
||||
; CHECK-NEXT: vwmulu.vv v8, v28, v16
|
||||
; CHECK-NEXT: ret
|
||||
%a = load <16 x i32>, <16 x i32>* %x
|
||||
%b = load <16 x i32>, <16 x i32>* %y
|
||||
%c = zext <16 x i32> %a to <16 x i64>
|
||||
%d = zext <16 x i32> %b to <16 x i64>
|
||||
%e = mul <16 x i64> %c, %d
|
||||
ret <16 x i64> %e
|
||||
}
|
||||
|
||||
define <128 x i16> @vwmulu_v128i16(<128 x i8>* %x, <128 x i8>* %y) {
|
||||
; CHECK-LABEL: vwmulu_v128i16:
|
||||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: addi sp, sp, -16
|
||||
; CHECK-NEXT: .cfi_def_cfa_offset 16
|
||||
; CHECK-NEXT: csrr a2, vlenb
|
||||
; CHECK-NEXT: slli a2, a2, 3
|
||||
; CHECK-NEXT: sub sp, sp, a2
|
||||
; CHECK-NEXT: addi a2, zero, 128
|
||||
; CHECK-NEXT: vsetvli zero, a2, e8, m8, ta, mu
|
||||
; CHECK-NEXT: vle8.v v16, (a0)
|
||||
; CHECK-NEXT: vle8.v v24, (a1)
|
||||
; CHECK-NEXT: addi a0, zero, 64
|
||||
; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu
|
||||
; CHECK-NEXT: vslidedown.vx v8, v16, a0
|
||||
; CHECK-NEXT: addi a1, sp, 16
|
||||
; CHECK-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill
|
||||
; CHECK-NEXT: vslidedown.vx v0, v24, a0
|
||||
; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
|
||||
; CHECK-NEXT: vwmulu.vv v8, v16, v24
|
||||
; CHECK-NEXT: addi a0, sp, 16
|
||||
; CHECK-NEXT: vl8re8.v v24, (a0) # Unknown-size Folded Reload
|
||||
; CHECK-NEXT: vwmulu.vv v16, v24, v0
|
||||
; CHECK-NEXT: csrr a0, vlenb
|
||||
; CHECK-NEXT: slli a0, a0, 3
|
||||
; CHECK-NEXT: add sp, sp, a0
|
||||
; CHECK-NEXT: addi sp, sp, 16
|
||||
; CHECK-NEXT: ret
|
||||
%a = load <128 x i8>, <128 x i8>* %x
|
||||
%b = load <128 x i8>, <128 x i8>* %y
|
||||
%c = zext <128 x i8> %a to <128 x i16>
|
||||
%d = zext <128 x i8> %b to <128 x i16>
|
||||
%e = mul <128 x i16> %c, %d
|
||||
ret <128 x i16> %e
|
||||
}
|
||||
|
||||
define <64 x i32> @vwmulu_v64i32(<64 x i16>* %x, <64 x i16>* %y) {
|
||||
; CHECK-LABEL: vwmulu_v64i32:
|
||||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: addi sp, sp, -16
|
||||
; CHECK-NEXT: .cfi_def_cfa_offset 16
|
||||
; CHECK-NEXT: csrr a2, vlenb
|
||||
; CHECK-NEXT: slli a2, a2, 3
|
||||
; CHECK-NEXT: sub sp, sp, a2
|
||||
; CHECK-NEXT: addi a2, zero, 64
|
||||
; CHECK-NEXT: vsetvli zero, a2, e16, m8, ta, mu
|
||||
; CHECK-NEXT: vle16.v v16, (a0)
|
||||
; CHECK-NEXT: vle16.v v24, (a1)
|
||||
; CHECK-NEXT: addi a0, zero, 32
|
||||
; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu
|
||||
; CHECK-NEXT: vslidedown.vx v8, v16, a0
|
||||
; CHECK-NEXT: addi a1, sp, 16
|
||||
; CHECK-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill
|
||||
; CHECK-NEXT: vslidedown.vx v0, v24, a0
|
||||
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
|
||||
; CHECK-NEXT: vwmulu.vv v8, v16, v24
|
||||
; CHECK-NEXT: addi a0, sp, 16
|
||||
; CHECK-NEXT: vl8re8.v v24, (a0) # Unknown-size Folded Reload
|
||||
; CHECK-NEXT: vwmulu.vv v16, v24, v0
|
||||
; CHECK-NEXT: csrr a0, vlenb
|
||||
; CHECK-NEXT: slli a0, a0, 3
|
||||
; CHECK-NEXT: add sp, sp, a0
|
||||
; CHECK-NEXT: addi sp, sp, 16
|
||||
; CHECK-NEXT: ret
|
||||
%a = load <64 x i16>, <64 x i16>* %x
|
||||
%b = load <64 x i16>, <64 x i16>* %y
|
||||
%c = zext <64 x i16> %a to <64 x i32>
|
||||
%d = zext <64 x i16> %b to <64 x i32>
|
||||
%e = mul <64 x i32> %c, %d
|
||||
ret <64 x i32> %e
|
||||
}
|
||||
|
||||
define <32 x i64> @vwmulu_v32i64(<32 x i32>* %x, <32 x i32>* %y) {
|
||||
; CHECK-LABEL: vwmulu_v32i64:
|
||||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: addi sp, sp, -16
|
||||
; CHECK-NEXT: .cfi_def_cfa_offset 16
|
||||
; CHECK-NEXT: csrr a2, vlenb
|
||||
; CHECK-NEXT: slli a2, a2, 3
|
||||
; CHECK-NEXT: sub sp, sp, a2
|
||||
; CHECK-NEXT: addi a2, zero, 32
|
||||
; CHECK-NEXT: vsetvli zero, a2, e32, m8, ta, mu
|
||||
; CHECK-NEXT: vle32.v v16, (a0)
|
||||
; CHECK-NEXT: vle32.v v24, (a1)
|
||||
; CHECK-NEXT: vsetivli zero, 16, e32, m8, ta, mu
|
||||
; CHECK-NEXT: vslidedown.vi v8, v16, 16
|
||||
; CHECK-NEXT: addi a0, sp, 16
|
||||
; CHECK-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
|
||||
; CHECK-NEXT: vslidedown.vi v0, v24, 16
|
||||
; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, mu
|
||||
; CHECK-NEXT: vwmulu.vv v8, v16, v24
|
||||
; CHECK-NEXT: addi a0, sp, 16
|
||||
; CHECK-NEXT: vl8re8.v v24, (a0) # Unknown-size Folded Reload
|
||||
; CHECK-NEXT: vwmulu.vv v16, v24, v0
|
||||
; CHECK-NEXT: csrr a0, vlenb
|
||||
; CHECK-NEXT: slli a0, a0, 3
|
||||
; CHECK-NEXT: add sp, sp, a0
|
||||
; CHECK-NEXT: addi sp, sp, 16
|
||||
; CHECK-NEXT: ret
|
||||
%a = load <32 x i32>, <32 x i32>* %x
|
||||
%b = load <32 x i32>, <32 x i32>* %y
|
||||
%c = zext <32 x i32> %a to <32 x i64>
|
||||
%d = zext <32 x i32> %b to <32 x i64>
|
||||
%e = mul <32 x i64> %c, %d
|
||||
ret <32 x i64> %e
|
||||
}
|
||||
|
||||
define <2 x i32> @vwmulu_v2i32_v2i8(<2 x i8>* %x, <2 x i8>* %y) {
|
||||
; CHECK-LABEL: vwmulu_v2i32_v2i8:
|
||||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, mu
|
||||
; CHECK-NEXT: vle8.v v25, (a1)
|
||||
; CHECK-NEXT: vle8.v v26, (a0)
|
||||
; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, mu
|
||||
; CHECK-NEXT: vzext.vf2 v27, v25
|
||||
; CHECK-NEXT: vzext.vf2 v25, v26
|
||||
; CHECK-NEXT: vwmulu.vv v8, v25, v27
|
||||
; CHECK-NEXT: ret
|
||||
%a = load <2 x i8>, <2 x i8>* %x
|
||||
%b = load <2 x i8>, <2 x i8>* %y
|
||||
%c = zext <2 x i8> %a to <2 x i32>
|
||||
%d = zext <2 x i8> %b to <2 x i32>
|
||||
%e = mul <2 x i32> %c, %d
|
||||
ret <2 x i32> %e
|
||||
}
|
||||
|
||||
define <4 x i32> @vwmulu_v4i32_v4i8_v4i16(<4 x i8>* %x, <4 x i16>* %y) {
|
||||
; CHECK-LABEL: vwmulu_v4i32_v4i8_v4i16:
|
||||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu
|
||||
; CHECK-NEXT: vle8.v v25, (a0)
|
||||
; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, mu
|
||||
; CHECK-NEXT: vle16.v v26, (a1)
|
||||
; CHECK-NEXT: vzext.vf2 v27, v25
|
||||
; CHECK-NEXT: vwmulu.vv v8, v27, v26
|
||||
; CHECK-NEXT: ret
|
||||
%a = load <4 x i8>, <4 x i8>* %x
|
||||
%b = load <4 x i16>, <4 x i16>* %y
|
||||
%c = zext <4 x i8> %a to <4 x i32>
|
||||
%d = zext <4 x i16> %b to <4 x i32>
|
||||
%e = mul <4 x i32> %c, %d
|
||||
ret <4 x i32> %e
|
||||
}
|
||||
|
||||
define <4 x i64> @vwmulu_v4i64_v4i32_v4i8(<4 x i32>* %x, <4 x i8>* %y) {
|
||||
; CHECK-LABEL: vwmulu_v4i64_v4i32_v4i8:
|
||||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu
|
||||
; CHECK-NEXT: vle32.v v25, (a0)
|
||||
; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, mu
|
||||
; CHECK-NEXT: vle8.v v26, (a1)
|
||||
; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, mu
|
||||
; CHECK-NEXT: vzext.vf4 v27, v26
|
||||
; CHECK-NEXT: vwmulu.vv v8, v25, v27
|
||||
; CHECK-NEXT: ret
|
||||
%a = load <4 x i32>, <4 x i32>* %x
|
||||
%b = load <4 x i8>, <4 x i8>* %y
|
||||
%c = zext <4 x i32> %a to <4 x i64>
|
||||
%d = zext <4 x i8> %b to <4 x i64>
|
||||
%e = mul <4 x i64> %c, %d
|
||||
ret <4 x i64> %e
|
||||
}
|
||||
|
||||
define <2 x i16> @vwmulu_vx_v2i16(<2 x i8>* %x, i8 %y) {
|
||||
; CHECK-LABEL: vwmulu_vx_v2i16:
|
||||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, mu
|
||||
; CHECK-NEXT: vle8.v v25, (a0)
|
||||
; CHECK-NEXT: vwmulu.vx v8, v25, a1
|
||||
; CHECK-NEXT: ret
|
||||
%a = load <2 x i8>, <2 x i8>* %x
|
||||
%b = insertelement <2 x i8> undef, i8 %y, i32 0
|
||||
%c = shufflevector <2 x i8> %b, <2 x i8> undef, <2 x i32> zeroinitializer
|
||||
%d = zext <2 x i8> %a to <2 x i16>
|
||||
%e = zext <2 x i8> %c to <2 x i16>
|
||||
%f = mul <2 x i16> %d, %e
|
||||
ret <2 x i16> %f
|
||||
}
|
||||
|
||||
define <4 x i16> @vwmulu_vx_v4i16(<4 x i8>* %x, i8 %y) {
|
||||
; CHECK-LABEL: vwmulu_vx_v4i16:
|
||||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu
|
||||
; CHECK-NEXT: vle8.v v25, (a0)
|
||||
; CHECK-NEXT: vwmulu.vx v8, v25, a1
|
||||
; CHECK-NEXT: ret
|
||||
%a = load <4 x i8>, <4 x i8>* %x
|
||||
%b = insertelement <4 x i8> undef, i8 %y, i32 0
|
||||
%c = shufflevector <4 x i8> %b, <4 x i8> undef, <4 x i32> zeroinitializer
|
||||
%d = zext <4 x i8> %a to <4 x i16>
|
||||
%e = zext <4 x i8> %c to <4 x i16>
|
||||
%f = mul <4 x i16> %d, %e
|
||||
ret <4 x i16> %f
|
||||
}
|
||||
|
||||
define <2 x i32> @vwmulu_vx_v2i32(<2 x i16>* %x, i16 %y) {
|
||||
; CHECK-LABEL: vwmulu_vx_v2i32:
|
||||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, mu
|
||||
; CHECK-NEXT: vle16.v v25, (a0)
|
||||
; CHECK-NEXT: vwmulu.vx v8, v25, a1
|
||||
; CHECK-NEXT: ret
|
||||
%a = load <2 x i16>, <2 x i16>* %x
|
||||
%b = insertelement <2 x i16> undef, i16 %y, i32 0
|
||||
%c = shufflevector <2 x i16> %b, <2 x i16> undef, <2 x i32> zeroinitializer
|
||||
%d = zext <2 x i16> %a to <2 x i32>
|
||||
%e = zext <2 x i16> %c to <2 x i32>
|
||||
%f = mul <2 x i32> %d, %e
|
||||
ret <2 x i32> %f
|
||||
}
|
||||
|
||||
define <8 x i16> @vwmulu_vx_v8i16(<8 x i8>* %x, i8 %y) {
|
||||
; CHECK-LABEL: vwmulu_vx_v8i16:
|
||||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
|
||||
; CHECK-NEXT: vle8.v v25, (a0)
|
||||
; CHECK-NEXT: vwmulu.vx v8, v25, a1
|
||||
; CHECK-NEXT: ret
|
||||
%a = load <8 x i8>, <8 x i8>* %x
|
||||
%b = insertelement <8 x i8> undef, i8 %y, i32 0
|
||||
%c = shufflevector <8 x i8> %b, <8 x i8> undef, <8 x i32> zeroinitializer
|
||||
%d = zext <8 x i8> %a to <8 x i16>
|
||||
%e = zext <8 x i8> %c to <8 x i16>
|
||||
%f = mul <8 x i16> %d, %e
|
||||
ret <8 x i16> %f
|
||||
}
|
||||
|
||||
define <4 x i32> @vwmulu_vx_v4i32(<4 x i16>* %x, i16 %y) {
|
||||
; CHECK-LABEL: vwmulu_vx_v4i32:
|
||||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu
|
||||
; CHECK-NEXT: vle16.v v25, (a0)
|
||||
; CHECK-NEXT: vwmulu.vx v8, v25, a1
|
||||
; CHECK-NEXT: ret
|
||||
%a = load <4 x i16>, <4 x i16>* %x
|
||||
%b = insertelement <4 x i16> undef, i16 %y, i32 0
|
||||
%c = shufflevector <4 x i16> %b, <4 x i16> undef, <4 x i32> zeroinitializer
|
||||
%d = zext <4 x i16> %a to <4 x i32>
|
||||
%e = zext <4 x i16> %c to <4 x i32>
|
||||
%f = mul <4 x i32> %d, %e
|
||||
ret <4 x i32> %f
|
||||
}
|
||||
|
||||
define <2 x i64> @vwmulu_vx_v2i64(<2 x i32>* %x, i32 %y) {
|
||||
; CHECK-LABEL: vwmulu_vx_v2i64:
|
||||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu
|
||||
; CHECK-NEXT: vle32.v v25, (a0)
|
||||
; CHECK-NEXT: vwmulu.vx v8, v25, a1
|
||||
; CHECK-NEXT: ret
|
||||
%a = load <2 x i32>, <2 x i32>* %x
|
||||
%b = insertelement <2 x i32> undef, i32 %y, i64 0
|
||||
%c = shufflevector <2 x i32> %b, <2 x i32> undef, <2 x i32> zeroinitializer
|
||||
%d = zext <2 x i32> %a to <2 x i64>
|
||||
%e = zext <2 x i32> %c to <2 x i64>
|
||||
%f = mul <2 x i64> %d, %e
|
||||
ret <2 x i64> %f
|
||||
}
|
||||
|
||||
define <16 x i16> @vwmulu_vx_v16i16(<16 x i8>* %x, i8 %y) {
|
||||
; CHECK-LABEL: vwmulu_vx_v16i16:
|
||||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu
|
||||
; CHECK-NEXT: vle8.v v25, (a0)
|
||||
; CHECK-NEXT: vwmulu.vx v8, v25, a1
|
||||
; CHECK-NEXT: ret
|
||||
%a = load <16 x i8>, <16 x i8>* %x
|
||||
%b = insertelement <16 x i8> undef, i8 %y, i32 0
|
||||
%c = shufflevector <16 x i8> %b, <16 x i8> undef, <16 x i32> zeroinitializer
|
||||
%d = zext <16 x i8> %a to <16 x i16>
|
||||
%e = zext <16 x i8> %c to <16 x i16>
|
||||
%f = mul <16 x i16> %d, %e
|
||||
ret <16 x i16> %f
|
||||
}
|
||||
|
||||
define <8 x i32> @vwmulu_vx_v8i32(<8 x i16>* %x, i16 %y) {
|
||||
; CHECK-LABEL: vwmulu_vx_v8i32:
|
||||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu
|
||||
; CHECK-NEXT: vle16.v v25, (a0)
|
||||
; CHECK-NEXT: vwmulu.vx v8, v25, a1
|
||||
; CHECK-NEXT: ret
|
||||
%a = load <8 x i16>, <8 x i16>* %x
|
||||
%b = insertelement <8 x i16> undef, i16 %y, i32 0
|
||||
%c = shufflevector <8 x i16> %b, <8 x i16> undef, <8 x i32> zeroinitializer
|
||||
%d = zext <8 x i16> %a to <8 x i32>
|
||||
%e = zext <8 x i16> %c to <8 x i32>
|
||||
%f = mul <8 x i32> %d, %e
|
||||
ret <8 x i32> %f
|
||||
}
|
||||
|
||||
define <4 x i64> @vwmulu_vx_v4i64(<4 x i32>* %x, i32 %y) {
|
||||
; CHECK-LABEL: vwmulu_vx_v4i64:
|
||||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu
|
||||
; CHECK-NEXT: vle32.v v25, (a0)
|
||||
; CHECK-NEXT: vwmulu.vx v8, v25, a1
|
||||
; CHECK-NEXT: ret
|
||||
%a = load <4 x i32>, <4 x i32>* %x
|
||||
%b = insertelement <4 x i32> undef, i32 %y, i64 0
|
||||
%c = shufflevector <4 x i32> %b, <4 x i32> undef, <4 x i32> zeroinitializer
|
||||
%d = zext <4 x i32> %a to <4 x i64>
|
||||
%e = zext <4 x i32> %c to <4 x i64>
|
||||
%f = mul <4 x i64> %d, %e
|
||||
ret <4 x i64> %f
|
||||
}
|
||||
|
||||
define <32 x i16> @vwmulu_vx_v32i16(<32 x i8>* %x, i8 %y) {
|
||||
; CHECK-LABEL: vwmulu_vx_v32i16:
|
||||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: addi a2, zero, 32
|
||||
; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, mu
|
||||
; CHECK-NEXT: vle8.v v26, (a0)
|
||||
; CHECK-NEXT: vwmulu.vx v8, v26, a1
|
||||
; CHECK-NEXT: ret
|
||||
%a = load <32 x i8>, <32 x i8>* %x
|
||||
%b = insertelement <32 x i8> undef, i8 %y, i32 0
|
||||
%c = shufflevector <32 x i8> %b, <32 x i8> undef, <32 x i32> zeroinitializer
|
||||
%d = zext <32 x i8> %a to <32 x i16>
|
||||
%e = zext <32 x i8> %c to <32 x i16>
|
||||
%f = mul <32 x i16> %d, %e
|
||||
ret <32 x i16> %f
|
||||
}
|
||||
|
||||
define <16 x i32> @vwmulu_vx_v16i32(<16 x i16>* %x, i16 %y) {
|
||||
; CHECK-LABEL: vwmulu_vx_v16i32:
|
||||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, mu
|
||||
; CHECK-NEXT: vle16.v v26, (a0)
|
||||
; CHECK-NEXT: vwmulu.vx v8, v26, a1
|
||||
; CHECK-NEXT: ret
|
||||
%a = load <16 x i16>, <16 x i16>* %x
|
||||
%b = insertelement <16 x i16> undef, i16 %y, i32 0
|
||||
%c = shufflevector <16 x i16> %b, <16 x i16> undef, <16 x i32> zeroinitializer
|
||||
%d = zext <16 x i16> %a to <16 x i32>
|
||||
%e = zext <16 x i16> %c to <16 x i32>
|
||||
%f = mul <16 x i32> %d, %e
|
||||
ret <16 x i32> %f
|
||||
}
|
||||
|
||||
define <8 x i64> @vwmulu_vx_v8i64(<8 x i32>* %x, i32 %y) {
|
||||
; CHECK-LABEL: vwmulu_vx_v8i64:
|
||||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, mu
|
||||
; CHECK-NEXT: vle32.v v26, (a0)
|
||||
; CHECK-NEXT: vwmulu.vx v8, v26, a1
|
||||
; CHECK-NEXT: ret
|
||||
%a = load <8 x i32>, <8 x i32>* %x
|
||||
%b = insertelement <8 x i32> undef, i32 %y, i64 0
|
||||
%c = shufflevector <8 x i32> %b, <8 x i32> undef, <8 x i32> zeroinitializer
|
||||
%d = zext <8 x i32> %a to <8 x i64>
|
||||
%e = zext <8 x i32> %c to <8 x i64>
|
||||
%f = mul <8 x i64> %d, %e
|
||||
ret <8 x i64> %f
|
||||
}
|
||||
|
||||
define <64 x i16> @vwmulu_vx_v64i16(<64 x i8>* %x, i8 %y) {
|
||||
; CHECK-LABEL: vwmulu_vx_v64i16:
|
||||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: addi a2, zero, 64
|
||||
; CHECK-NEXT: vsetvli zero, a2, e8, m4, ta, mu
|
||||
; CHECK-NEXT: vle8.v v28, (a0)
|
||||
; CHECK-NEXT: vwmulu.vx v8, v28, a1
|
||||
; CHECK-NEXT: ret
|
||||
%a = load <64 x i8>, <64 x i8>* %x
|
||||
%b = insertelement <64 x i8> undef, i8 %y, i32 0
|
||||
%c = shufflevector <64 x i8> %b, <64 x i8> undef, <64 x i32> zeroinitializer
|
||||
%d = zext <64 x i8> %a to <64 x i16>
|
||||
%e = zext <64 x i8> %c to <64 x i16>
|
||||
%f = mul <64 x i16> %d, %e
|
||||
ret <64 x i16> %f
|
||||
}
|
||||
|
||||
define <32 x i32> @vwmulu_vx_v32i32(<32 x i16>* %x, i16 %y) {
|
||||
; CHECK-LABEL: vwmulu_vx_v32i32:
|
||||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: addi a2, zero, 32
|
||||
; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, mu
|
||||
; CHECK-NEXT: vle16.v v28, (a0)
|
||||
; CHECK-NEXT: vwmulu.vx v8, v28, a1
|
||||
; CHECK-NEXT: ret
|
||||
%a = load <32 x i16>, <32 x i16>* %x
|
||||
%b = insertelement <32 x i16> undef, i16 %y, i32 0
|
||||
%c = shufflevector <32 x i16> %b, <32 x i16> undef, <32 x i32> zeroinitializer
|
||||
%d = zext <32 x i16> %a to <32 x i32>
|
||||
%e = zext <32 x i16> %c to <32 x i32>
|
||||
%f = mul <32 x i32> %d, %e
|
||||
ret <32 x i32> %f
|
||||
}
|
||||
|
||||
define <16 x i64> @vwmulu_vx_v16i64(<16 x i32>* %x, i32 %y) {
|
||||
; CHECK-LABEL: vwmulu_vx_v16i64:
|
||||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, mu
|
||||
; CHECK-NEXT: vle32.v v28, (a0)
|
||||
; CHECK-NEXT: vwmulu.vx v8, v28, a1
|
||||
; CHECK-NEXT: ret
|
||||
%a = load <16 x i32>, <16 x i32>* %x
|
||||
%b = insertelement <16 x i32> undef, i32 %y, i64 0
|
||||
%c = shufflevector <16 x i32> %b, <16 x i32> undef, <16 x i32> zeroinitializer
|
||||
%d = zext <16 x i32> %a to <16 x i64>
|
||||
%e = zext <16 x i32> %c to <16 x i64>
|
||||
%f = mul <16 x i64> %d, %e
|
||||
ret <16 x i64> %f
|
||||
}
|
||||
|
Loading…
Reference in New Issue