forked from OSchip/llvm-project
[RISCV] Add support for fixed vector reductions.
I've included tests that require type legalization to split the vector. The i64 version of these scalarizes on RV32 due to type legalization visiting the result before the vector type. So we have to abort our custom expansion to avoid creating target specific nodes with an illegal type. Then type legalization ends up scalarizing. We might be able to fix this by doing custom splitting for large vectors in our handler to get down to a legal type. Reviewed By: frasercrmck Differential Revision: https://reviews.llvm.org/D98102
This commit is contained in:
parent
736afe465f
commit
77ac3166e5
|
@ -595,6 +595,17 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
|
|||
setOperationAction(ISD::ZERO_EXTEND, VT, Custom);
|
||||
|
||||
setOperationAction(ISD::BITCAST, VT, Custom);
|
||||
|
||||
// Custom-lower reduction operations to set up the corresponding custom
|
||||
// nodes' operands.
|
||||
setOperationAction(ISD::VECREDUCE_ADD, VT, Custom);
|
||||
setOperationAction(ISD::VECREDUCE_AND, VT, Custom);
|
||||
setOperationAction(ISD::VECREDUCE_OR, VT, Custom);
|
||||
setOperationAction(ISD::VECREDUCE_XOR, VT, Custom);
|
||||
setOperationAction(ISD::VECREDUCE_SMAX, VT, Custom);
|
||||
setOperationAction(ISD::VECREDUCE_SMIN, VT, Custom);
|
||||
setOperationAction(ISD::VECREDUCE_UMAX, VT, Custom);
|
||||
setOperationAction(ISD::VECREDUCE_UMIN, VT, Custom);
|
||||
}
|
||||
|
||||
for (MVT VT : MVT::fp_fixedlen_vector_valuetypes()) {
|
||||
|
@ -638,6 +649,9 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
|
|||
setOperationAction(ISD::VSELECT, VT, Custom);
|
||||
|
||||
setOperationAction(ISD::BITCAST, VT, Custom);
|
||||
|
||||
setOperationAction(ISD::VECREDUCE_FADD, VT, Custom);
|
||||
setOperationAction(ISD::VECREDUCE_SEQ_FADD, VT, Custom);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -2399,21 +2413,21 @@ static unsigned getRVVReductionOp(unsigned ISDOpcode) {
|
|||
default:
|
||||
llvm_unreachable("Unhandled reduction");
|
||||
case ISD::VECREDUCE_ADD:
|
||||
return RISCVISD::VECREDUCE_ADD;
|
||||
return RISCVISD::VECREDUCE_ADD_VL;
|
||||
case ISD::VECREDUCE_UMAX:
|
||||
return RISCVISD::VECREDUCE_UMAX;
|
||||
return RISCVISD::VECREDUCE_UMAX_VL;
|
||||
case ISD::VECREDUCE_SMAX:
|
||||
return RISCVISD::VECREDUCE_SMAX;
|
||||
return RISCVISD::VECREDUCE_SMAX_VL;
|
||||
case ISD::VECREDUCE_UMIN:
|
||||
return RISCVISD::VECREDUCE_UMIN;
|
||||
return RISCVISD::VECREDUCE_UMIN_VL;
|
||||
case ISD::VECREDUCE_SMIN:
|
||||
return RISCVISD::VECREDUCE_SMIN;
|
||||
return RISCVISD::VECREDUCE_SMIN_VL;
|
||||
case ISD::VECREDUCE_AND:
|
||||
return RISCVISD::VECREDUCE_AND;
|
||||
return RISCVISD::VECREDUCE_AND_VL;
|
||||
case ISD::VECREDUCE_OR:
|
||||
return RISCVISD::VECREDUCE_OR;
|
||||
return RISCVISD::VECREDUCE_OR_VL;
|
||||
case ISD::VECREDUCE_XOR:
|
||||
return RISCVISD::VECREDUCE_XOR;
|
||||
return RISCVISD::VECREDUCE_XOR_VL;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -2423,18 +2437,35 @@ static unsigned getRVVReductionOp(unsigned ISDOpcode) {
|
|||
SDValue RISCVTargetLowering::lowerVECREDUCE(SDValue Op,
|
||||
SelectionDAG &DAG) const {
|
||||
SDLoc DL(Op);
|
||||
assert(Op.getValueType().isSimple() &&
|
||||
Op.getOperand(0).getValueType().isSimple() &&
|
||||
"Unexpected vector-reduce lowering");
|
||||
MVT VecVT = Op.getOperand(0).getSimpleValueType();
|
||||
MVT VecEltVT = VecVT.getVectorElementType();
|
||||
|
||||
// Avoid creating vectors with illegal type.
|
||||
if (!isTypeLegal(VecVT))
|
||||
return SDValue();
|
||||
|
||||
unsigned RVVOpcode = getRVVReductionOp(Op.getOpcode());
|
||||
MVT M1VT = getLMUL1VT(VecVT);
|
||||
|
||||
SDValue Vec = Op.getOperand(0);
|
||||
|
||||
MVT ContainerVT = VecVT;
|
||||
if (VecVT.isFixedLengthVector()) {
|
||||
ContainerVT = getContainerForFixedLengthVector(VecVT);
|
||||
Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget);
|
||||
}
|
||||
|
||||
MVT M1VT = getLMUL1VT(ContainerVT);
|
||||
|
||||
SDValue Mask, VL;
|
||||
std::tie(Mask, VL) = getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget);
|
||||
|
||||
// FIXME: This is a VLMAX splat which might be too large and can prevent
|
||||
// vsetvli removal.
|
||||
SDValue NeutralElem = DAG.getNeutralElement(
|
||||
ISD::getVecReduceBaseOpcode(Op.getOpcode()), DL, VecEltVT, SDNodeFlags());
|
||||
SDValue IdentitySplat = DAG.getSplatVector(M1VT, DL, NeutralElem);
|
||||
SDValue Reduction =
|
||||
DAG.getNode(RVVOpcode, DL, M1VT, Op.getOperand(0), IdentitySplat);
|
||||
DAG.getNode(RVVOpcode, DL, M1VT, Vec, IdentitySplat, Mask, VL);
|
||||
SDValue Elt0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VecEltVT, Reduction,
|
||||
DAG.getConstant(0, DL, Subtarget.getXLenVT()));
|
||||
return DAG.getSExtOrTrunc(Elt0, DL, Op.getValueType());
|
||||
|
@ -2450,10 +2481,10 @@ getRVVFPReductionOpAndOperands(SDValue Op, SelectionDAG &DAG, EVT EltVT) {
|
|||
default:
|
||||
llvm_unreachable("Unhandled reduction");
|
||||
case ISD::VECREDUCE_FADD:
|
||||
return std::make_tuple(RISCVISD::VECREDUCE_FADD, Op.getOperand(0),
|
||||
return std::make_tuple(RISCVISD::VECREDUCE_FADD_VL, Op.getOperand(0),
|
||||
DAG.getConstantFP(0.0, DL, EltVT));
|
||||
case ISD::VECREDUCE_SEQ_FADD:
|
||||
return std::make_tuple(RISCVISD::VECREDUCE_SEQ_FADD, Op.getOperand(1),
|
||||
return std::make_tuple(RISCVISD::VECREDUCE_SEQ_FADD_VL, Op.getOperand(1),
|
||||
Op.getOperand(0));
|
||||
}
|
||||
}
|
||||
|
@ -2467,10 +2498,24 @@ SDValue RISCVTargetLowering::lowerFPVECREDUCE(SDValue Op,
|
|||
SDValue VectorVal, ScalarVal;
|
||||
std::tie(RVVOpcode, VectorVal, ScalarVal) =
|
||||
getRVVFPReductionOpAndOperands(Op, DAG, VecEltVT);
|
||||
MVT VecVT = VectorVal.getSimpleValueType();
|
||||
|
||||
MVT ContainerVT = VecVT;
|
||||
if (VecVT.isFixedLengthVector()) {
|
||||
ContainerVT = getContainerForFixedLengthVector(VecVT);
|
||||
VectorVal = convertToScalableVector(ContainerVT, VectorVal, DAG, Subtarget);
|
||||
}
|
||||
|
||||
MVT M1VT = getLMUL1VT(VectorVal.getSimpleValueType());
|
||||
|
||||
SDValue Mask, VL;
|
||||
std::tie(Mask, VL) = getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget);
|
||||
|
||||
// FIXME: This is a VLMAX splat which might be too large and can prevent
|
||||
// vsetvli removal.
|
||||
SDValue ScalarSplat = DAG.getSplatVector(M1VT, DL, ScalarVal);
|
||||
SDValue Reduction = DAG.getNode(RVVOpcode, DL, M1VT, VectorVal, ScalarSplat);
|
||||
SDValue Reduction =
|
||||
DAG.getNode(RVVOpcode, DL, M1VT, VectorVal, ScalarSplat, Mask, VL);
|
||||
return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VecEltVT, Reduction,
|
||||
DAG.getConstant(0, DL, Subtarget.getXLenVT()));
|
||||
}
|
||||
|
@ -3339,7 +3384,8 @@ void RISCVTargetLowering::ReplaceNodeResults(SDNode *N,
|
|||
// The custom-lowering for these nodes returns a vector whose first element
|
||||
// is the result of the reduction. Extract its first element and let the
|
||||
// legalization for EXTRACT_VECTOR_ELT do the rest of the job.
|
||||
Results.push_back(lowerVECREDUCE(SDValue(N, 0), DAG));
|
||||
if (SDValue V = lowerVECREDUCE(SDValue(N, 0), DAG))
|
||||
Results.push_back(V);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
@ -5810,16 +5856,16 @@ const char *RISCVTargetLowering::getTargetNodeName(unsigned Opcode) const {
|
|||
NODE_NAME_CASE(VSLIDEDOWN_VL)
|
||||
NODE_NAME_CASE(VID_VL)
|
||||
NODE_NAME_CASE(VFNCVT_ROD_VL)
|
||||
NODE_NAME_CASE(VECREDUCE_ADD)
|
||||
NODE_NAME_CASE(VECREDUCE_UMAX)
|
||||
NODE_NAME_CASE(VECREDUCE_SMAX)
|
||||
NODE_NAME_CASE(VECREDUCE_UMIN)
|
||||
NODE_NAME_CASE(VECREDUCE_SMIN)
|
||||
NODE_NAME_CASE(VECREDUCE_AND)
|
||||
NODE_NAME_CASE(VECREDUCE_OR)
|
||||
NODE_NAME_CASE(VECREDUCE_XOR)
|
||||
NODE_NAME_CASE(VECREDUCE_FADD)
|
||||
NODE_NAME_CASE(VECREDUCE_SEQ_FADD)
|
||||
NODE_NAME_CASE(VECREDUCE_ADD_VL)
|
||||
NODE_NAME_CASE(VECREDUCE_UMAX_VL)
|
||||
NODE_NAME_CASE(VECREDUCE_SMAX_VL)
|
||||
NODE_NAME_CASE(VECREDUCE_UMIN_VL)
|
||||
NODE_NAME_CASE(VECREDUCE_SMIN_VL)
|
||||
NODE_NAME_CASE(VECREDUCE_AND_VL)
|
||||
NODE_NAME_CASE(VECREDUCE_OR_VL)
|
||||
NODE_NAME_CASE(VECREDUCE_XOR_VL)
|
||||
NODE_NAME_CASE(VECREDUCE_FADD_VL)
|
||||
NODE_NAME_CASE(VECREDUCE_SEQ_FADD_VL)
|
||||
NODE_NAME_CASE(ADD_VL)
|
||||
NODE_NAME_CASE(AND_VL)
|
||||
NODE_NAME_CASE(MUL_VL)
|
||||
|
|
|
@ -138,16 +138,17 @@ enum NodeType : unsigned {
|
|||
// nxv2i32 = vecreduce_add nxv8i32, nxv2i32
|
||||
// The different in types does introduce extra vsetvli instructions but
|
||||
// similarly it reduces the number of registers consumed per reduction.
|
||||
VECREDUCE_ADD,
|
||||
VECREDUCE_UMAX,
|
||||
VECREDUCE_SMAX,
|
||||
VECREDUCE_UMIN,
|
||||
VECREDUCE_SMIN,
|
||||
VECREDUCE_AND,
|
||||
VECREDUCE_OR,
|
||||
VECREDUCE_XOR,
|
||||
VECREDUCE_FADD,
|
||||
VECREDUCE_SEQ_FADD,
|
||||
// Also has a mask and VL operand.
|
||||
VECREDUCE_ADD_VL,
|
||||
VECREDUCE_UMAX_VL,
|
||||
VECREDUCE_SMAX_VL,
|
||||
VECREDUCE_UMIN_VL,
|
||||
VECREDUCE_SMIN_VL,
|
||||
VECREDUCE_AND_VL,
|
||||
VECREDUCE_OR_VL,
|
||||
VECREDUCE_XOR_VL,
|
||||
VECREDUCE_FADD_VL,
|
||||
VECREDUCE_SEQ_FADD_VL,
|
||||
|
||||
// Vector binary and unary ops with a mask as a third operand, and VL as a
|
||||
// fourth operand.
|
||||
|
|
|
@ -37,14 +37,6 @@ class SwapHelper<dag Prefix, dag A, dag B, dag Suffix, bit swap> {
|
|||
dag Value = !con(Prefix, !if(swap, B, A), !if(swap, A, B), Suffix);
|
||||
}
|
||||
|
||||
def SDTRVVVecReduce : SDTypeProfile<1, 2, [
|
||||
SDTCisVec<0>, SDTCisVec<1>, SDTCisSameAs<0, 2>
|
||||
]>;
|
||||
|
||||
foreach kind = ["ADD", "UMAX", "SMAX", "UMIN", "SMIN", "AND", "OR", "XOR",
|
||||
"FADD", "SEQ_FADD"] in
|
||||
def rvv_vecreduce_#kind : SDNode<"RISCVISD::VECREDUCE_"#kind, SDTRVVVecReduce>;
|
||||
|
||||
multiclass VPatUSLoadStoreSDNode<ValueType type,
|
||||
int sew,
|
||||
LMULInfo vlmul,
|
||||
|
@ -357,18 +349,6 @@ multiclass VPatNConvertFP2ISDNode_V<SDNode vop, string instruction_name> {
|
|||
}
|
||||
}
|
||||
|
||||
multiclass VPatReductionSDNode<SDNode vop, string instruction_name, bit is_float> {
|
||||
foreach vti = !if(is_float, AllFloatVectors, AllIntegerVectors) in {
|
||||
defvar vti_m1 = !cast<VTypeInfo>(!if(is_float, "VF", "VI") # vti.SEW # "M1");
|
||||
def: Pat<(vti_m1.Vector (vop (vti.Vector vti.RegClass:$rs1), VR:$rs2)),
|
||||
(!cast<Instruction>(instruction_name#"_VS_"#vti.LMul.MX)
|
||||
(vti_m1.Vector (IMPLICIT_DEF)),
|
||||
(vti.Vector vti.RegClass:$rs1),
|
||||
(vti_m1.Vector VR:$rs2),
|
||||
vti.AVL, vti.SEW)>;
|
||||
}
|
||||
}
|
||||
|
||||
//===----------------------------------------------------------------------===//
|
||||
// Patterns.
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
@ -487,20 +467,6 @@ foreach vti = AllIntegerVectors in {
|
|||
vti.RegClass:$rs2, simm5:$rs1, VMV0:$vm, vti.AVL, vti.SEW)>;
|
||||
}
|
||||
|
||||
// 15.1. Vector Single-Width Integer Reduction Instructions
|
||||
defm "" : VPatReductionSDNode<rvv_vecreduce_ADD, "PseudoVREDSUM", /*is_float*/0>;
|
||||
defm "" : VPatReductionSDNode<rvv_vecreduce_UMAX, "PseudoVREDMAXU", /*is_float*/0>;
|
||||
defm "" : VPatReductionSDNode<rvv_vecreduce_SMAX, "PseudoVREDMAX", /*is_float*/0>;
|
||||
defm "" : VPatReductionSDNode<rvv_vecreduce_UMIN, "PseudoVREDMINU", /*is_float*/0>;
|
||||
defm "" : VPatReductionSDNode<rvv_vecreduce_SMIN, "PseudoVREDMIN", /*is_float*/0>;
|
||||
defm "" : VPatReductionSDNode<rvv_vecreduce_AND, "PseudoVREDAND", /*is_float*/0>;
|
||||
defm "" : VPatReductionSDNode<rvv_vecreduce_OR, "PseudoVREDOR", /*is_float*/0>;
|
||||
defm "" : VPatReductionSDNode<rvv_vecreduce_XOR, "PseudoVREDXOR", /*is_float*/0>;
|
||||
|
||||
// 15.3. Vector Single-Width Floating-Point Reduction Instructions
|
||||
defm "" : VPatReductionSDNode<rvv_vecreduce_SEQ_FADD, "PseudoVFREDOSUM", /*is_float*/1>;
|
||||
defm "" : VPatReductionSDNode<rvv_vecreduce_FADD, "PseudoVFREDSUM", /*is_float*/1>;
|
||||
|
||||
// 16.1. Vector Mask-Register Logical Instructions
|
||||
foreach mti = AllMasks in {
|
||||
def : Pat<(mti.Mask (and VR:$rs1, VR:$rs2)),
|
||||
|
|
|
@ -185,6 +185,15 @@ def riscv_trunc_vector_vl : SDNode<"RISCVISD::TRUNCATE_VECTOR_VL",
|
|||
SDTCVecEltisVT<2, i1>,
|
||||
SDTCisVT<3, XLenVT>]>>;
|
||||
|
||||
def SDTRVVVecReduce : SDTypeProfile<1, 4, [
|
||||
SDTCisVec<0>, SDTCisVec<1>, SDTCisSameAs<0, 2>, SDTCVecEltisVT<3, i1>,
|
||||
SDTCisSameNumEltsAs<1, 3>, SDTCisVT<4, XLenVT>
|
||||
]>;
|
||||
|
||||
foreach kind = ["ADD", "UMAX", "SMAX", "UMIN", "SMIN", "AND", "OR", "XOR",
|
||||
"FADD", "SEQ_FADD"] in
|
||||
def rvv_vecreduce_#kind#_vl : SDNode<"RISCVISD::VECREDUCE_"#kind#"_VL", SDTRVVVecReduce>;
|
||||
|
||||
// Ignore the vl operand.
|
||||
def SplatFPOp : PatFrag<(ops node:$op),
|
||||
(riscv_vfmv_v_f_vl node:$op, srcvalue)>;
|
||||
|
@ -479,6 +488,20 @@ multiclass VPatNConvertI2FPSDNode_V_VL<SDNode vop, string instruction_name> {
|
|||
}
|
||||
}
|
||||
|
||||
multiclass VPatReductionVL<SDNode vop, string instruction_name, bit is_float> {
|
||||
foreach vti = !if(is_float, AllFloatVectors, AllIntegerVectors) in {
|
||||
defvar vti_m1 = !cast<VTypeInfo>(!if(is_float, "VF", "VI") # vti.SEW # "M1");
|
||||
def: Pat<(vti_m1.Vector (vop (vti.Vector vti.RegClass:$rs1), VR:$rs2,
|
||||
(vti.Mask true_mask),
|
||||
(XLenVT (VLOp GPR:$vl)))),
|
||||
(!cast<Instruction>(instruction_name#"_VS_"#vti.LMul.MX)
|
||||
(vti_m1.Vector (IMPLICIT_DEF)),
|
||||
(vti.Vector vti.RegClass:$rs1),
|
||||
(vti_m1.Vector VR:$rs2),
|
||||
GPR:$vl, vti.SEW)>;
|
||||
}
|
||||
}
|
||||
|
||||
//===----------------------------------------------------------------------===//
|
||||
// Patterns.
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
@ -645,6 +668,24 @@ foreach vti = AllIntegerVectors in {
|
|||
|
||||
} // Predicates = [HasStdExtV]
|
||||
|
||||
// 15.1. Vector Single-Width Integer Reduction Instructions
|
||||
let Predicates = [HasStdExtV] in {
|
||||
defm "" : VPatReductionVL<rvv_vecreduce_ADD_vl, "PseudoVREDSUM", /*is_float*/0>;
|
||||
defm "" : VPatReductionVL<rvv_vecreduce_UMAX_vl, "PseudoVREDMAXU", /*is_float*/0>;
|
||||
defm "" : VPatReductionVL<rvv_vecreduce_SMAX_vl, "PseudoVREDMAX", /*is_float*/0>;
|
||||
defm "" : VPatReductionVL<rvv_vecreduce_UMIN_vl, "PseudoVREDMINU", /*is_float*/0>;
|
||||
defm "" : VPatReductionVL<rvv_vecreduce_SMIN_vl, "PseudoVREDMIN", /*is_float*/0>;
|
||||
defm "" : VPatReductionVL<rvv_vecreduce_AND_vl, "PseudoVREDAND", /*is_float*/0>;
|
||||
defm "" : VPatReductionVL<rvv_vecreduce_OR_vl, "PseudoVREDOR", /*is_float*/0>;
|
||||
defm "" : VPatReductionVL<rvv_vecreduce_XOR_vl, "PseudoVREDXOR", /*is_float*/0>;
|
||||
} // Predicates = [HasStdExtV]
|
||||
|
||||
// 15.3. Vector Single-Width Floating-Point Reduction Instructions
|
||||
let Predicates = [HasStdExtV, HasStdExtF] in {
|
||||
defm "" : VPatReductionVL<rvv_vecreduce_SEQ_FADD_vl, "PseudoVFREDOSUM", /*is_float*/1>;
|
||||
defm "" : VPatReductionVL<rvv_vecreduce_FADD_vl, "PseudoVFREDSUM", /*is_float*/1>;
|
||||
} // Predicates = [HasStdExtV, HasStdExtF]
|
||||
|
||||
let Predicates = [HasStdExtV, HasStdExtF] in {
|
||||
|
||||
// 14.2. Vector Single-Width Floating-Point Add/Subtract Instructions
|
||||
|
|
|
@ -0,0 +1,786 @@
|
|||
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
|
||||
; RUN: llc -mtriple=riscv32 -target-abi=ilp32d -mattr=+experimental-v,+experimental-zfh,+f,+d -verify-machineinstrs -riscv-v-vector-bits-min=128 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK
|
||||
; RUN: llc -mtriple=riscv64 -target-abi=lp64d -mattr=+experimental-v,+experimental-zfh,+f,+d -verify-machineinstrs -riscv-v-vector-bits-min=128 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK
|
||||
|
||||
declare half @llvm.vector.reduce.fadd.v1f16(half, <1 x half>)
|
||||
|
||||
define half @vreduce_fadd_v1f16(<1 x half>* %x, half %s) {
|
||||
; CHECK-LABEL: vreduce_fadd_v1f16:
|
||||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: flh ft0, 0(a0)
|
||||
; CHECK-NEXT: fadd.h fa0, fa0, ft0
|
||||
; CHECK-NEXT: ret
|
||||
%v = load <1 x half>, <1 x half>* %x
|
||||
%red = call reassoc half @llvm.vector.reduce.fadd.v1f16(half %s, <1 x half> %v)
|
||||
ret half %red
|
||||
}
|
||||
|
||||
define half @vreduce_ord_fadd_v1f16(<1 x half>* %x, half %s) {
|
||||
; CHECK-LABEL: vreduce_ord_fadd_v1f16:
|
||||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: flh ft0, 0(a0)
|
||||
; CHECK-NEXT: fadd.h fa0, fa0, ft0
|
||||
; CHECK-NEXT: ret
|
||||
%v = load <1 x half>, <1 x half>* %x
|
||||
%red = call half @llvm.vector.reduce.fadd.v1f16(half %s, <1 x half> %v)
|
||||
ret half %red
|
||||
}
|
||||
|
||||
declare half @llvm.vector.reduce.fadd.v2f16(half, <2 x half>)
|
||||
|
||||
define half @vreduce_fadd_v2f16(<2 x half>* %x, half %s) {
|
||||
; CHECK-LABEL: vreduce_fadd_v2f16:
|
||||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: vsetivli a1, 2, e16,m1,ta,mu
|
||||
; CHECK-NEXT: vle16.v v25, (a0)
|
||||
; CHECK-NEXT: vsetvli a0, zero, e16,m1,ta,mu
|
||||
; CHECK-NEXT: vmv.v.i v26, 0
|
||||
; CHECK-NEXT: vsetivli a0, 2, e16,m1,ta,mu
|
||||
; CHECK-NEXT: vfredsum.vs v25, v25, v26
|
||||
; CHECK-NEXT: vfmv.f.s ft0, v25
|
||||
; CHECK-NEXT: fadd.h fa0, fa0, ft0
|
||||
; CHECK-NEXT: ret
|
||||
%v = load <2 x half>, <2 x half>* %x
|
||||
%red = call reassoc half @llvm.vector.reduce.fadd.v2f16(half %s, <2 x half> %v)
|
||||
ret half %red
|
||||
}
|
||||
|
||||
define half @vreduce_ord_fadd_v2f16(<2 x half>* %x, half %s) {
|
||||
; CHECK-LABEL: vreduce_ord_fadd_v2f16:
|
||||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: vsetivli a1, 2, e16,m1,ta,mu
|
||||
; CHECK-NEXT: vle16.v v25, (a0)
|
||||
; CHECK-NEXT: vsetvli a0, zero, e16,m1,ta,mu
|
||||
; CHECK-NEXT: vfmv.v.f v26, fa0
|
||||
; CHECK-NEXT: vsetivli a0, 2, e16,m1,ta,mu
|
||||
; CHECK-NEXT: vfredosum.vs v25, v25, v26
|
||||
; CHECK-NEXT: vfmv.f.s fa0, v25
|
||||
; CHECK-NEXT: ret
|
||||
%v = load <2 x half>, <2 x half>* %x
|
||||
%red = call half @llvm.vector.reduce.fadd.v2f16(half %s, <2 x half> %v)
|
||||
ret half %red
|
||||
}
|
||||
|
||||
declare half @llvm.vector.reduce.fadd.v4f16(half, <4 x half>)
|
||||
|
||||
define half @vreduce_fadd_v4f16(<4 x half>* %x, half %s) {
|
||||
; CHECK-LABEL: vreduce_fadd_v4f16:
|
||||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: vsetivli a1, 4, e16,m1,ta,mu
|
||||
; CHECK-NEXT: vle16.v v25, (a0)
|
||||
; CHECK-NEXT: vsetvli a0, zero, e16,m1,ta,mu
|
||||
; CHECK-NEXT: vmv.v.i v26, 0
|
||||
; CHECK-NEXT: vsetivli a0, 4, e16,m1,ta,mu
|
||||
; CHECK-NEXT: vfredsum.vs v25, v25, v26
|
||||
; CHECK-NEXT: vfmv.f.s ft0, v25
|
||||
; CHECK-NEXT: fadd.h fa0, fa0, ft0
|
||||
; CHECK-NEXT: ret
|
||||
%v = load <4 x half>, <4 x half>* %x
|
||||
%red = call reassoc half @llvm.vector.reduce.fadd.v4f16(half %s, <4 x half> %v)
|
||||
ret half %red
|
||||
}
|
||||
|
||||
define half @vreduce_ord_fadd_v4f16(<4 x half>* %x, half %s) {
|
||||
; CHECK-LABEL: vreduce_ord_fadd_v4f16:
|
||||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: vsetivli a1, 4, e16,m1,ta,mu
|
||||
; CHECK-NEXT: vle16.v v25, (a0)
|
||||
; CHECK-NEXT: vsetvli a0, zero, e16,m1,ta,mu
|
||||
; CHECK-NEXT: vfmv.v.f v26, fa0
|
||||
; CHECK-NEXT: vsetivli a0, 4, e16,m1,ta,mu
|
||||
; CHECK-NEXT: vfredosum.vs v25, v25, v26
|
||||
; CHECK-NEXT: vfmv.f.s fa0, v25
|
||||
; CHECK-NEXT: ret
|
||||
%v = load <4 x half>, <4 x half>* %x
|
||||
%red = call half @llvm.vector.reduce.fadd.v4f16(half %s, <4 x half> %v)
|
||||
ret half %red
|
||||
}
|
||||
|
||||
declare half @llvm.vector.reduce.fadd.v8f16(half, <8 x half>)
|
||||
|
||||
define half @vreduce_fadd_v8f16(<8 x half>* %x, half %s) {
|
||||
; CHECK-LABEL: vreduce_fadd_v8f16:
|
||||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: vsetivli a1, 8, e16,m1,ta,mu
|
||||
; CHECK-NEXT: vle16.v v25, (a0)
|
||||
; CHECK-NEXT: vsetvli a0, zero, e16,m1,ta,mu
|
||||
; CHECK-NEXT: vmv.v.i v26, 0
|
||||
; CHECK-NEXT: vsetivli a0, 8, e16,m1,ta,mu
|
||||
; CHECK-NEXT: vfredsum.vs v25, v25, v26
|
||||
; CHECK-NEXT: vfmv.f.s ft0, v25
|
||||
; CHECK-NEXT: fadd.h fa0, fa0, ft0
|
||||
; CHECK-NEXT: ret
|
||||
%v = load <8 x half>, <8 x half>* %x
|
||||
%red = call reassoc half @llvm.vector.reduce.fadd.v8f16(half %s, <8 x half> %v)
|
||||
ret half %red
|
||||
}
|
||||
|
||||
define half @vreduce_ord_fadd_v8f16(<8 x half>* %x, half %s) {
|
||||
; CHECK-LABEL: vreduce_ord_fadd_v8f16:
|
||||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: vsetivli a1, 8, e16,m1,ta,mu
|
||||
; CHECK-NEXT: vle16.v v25, (a0)
|
||||
; CHECK-NEXT: vsetvli a0, zero, e16,m1,ta,mu
|
||||
; CHECK-NEXT: vfmv.v.f v26, fa0
|
||||
; CHECK-NEXT: vsetivli a0, 8, e16,m1,ta,mu
|
||||
; CHECK-NEXT: vfredosum.vs v25, v25, v26
|
||||
; CHECK-NEXT: vfmv.f.s fa0, v25
|
||||
; CHECK-NEXT: ret
|
||||
%v = load <8 x half>, <8 x half>* %x
|
||||
%red = call half @llvm.vector.reduce.fadd.v8f16(half %s, <8 x half> %v)
|
||||
ret half %red
|
||||
}
|
||||
|
||||
declare half @llvm.vector.reduce.fadd.v16f16(half, <16 x half>)
|
||||
|
||||
define half @vreduce_fadd_v16f16(<16 x half>* %x, half %s) {
|
||||
; CHECK-LABEL: vreduce_fadd_v16f16:
|
||||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: vsetivli a1, 16, e16,m2,ta,mu
|
||||
; CHECK-NEXT: vle16.v v26, (a0)
|
||||
; CHECK-NEXT: vsetvli a0, zero, e16,m1,ta,mu
|
||||
; CHECK-NEXT: vmv.v.i v25, 0
|
||||
; CHECK-NEXT: vsetivli a0, 16, e16,m2,ta,mu
|
||||
; CHECK-NEXT: vfredsum.vs v25, v26, v25
|
||||
; CHECK-NEXT: vsetvli zero, zero, e16,m1,ta,mu
|
||||
; CHECK-NEXT: vfmv.f.s ft0, v25
|
||||
; CHECK-NEXT: fadd.h fa0, fa0, ft0
|
||||
; CHECK-NEXT: ret
|
||||
%v = load <16 x half>, <16 x half>* %x
|
||||
%red = call reassoc half @llvm.vector.reduce.fadd.v16f16(half %s, <16 x half> %v)
|
||||
ret half %red
|
||||
}
|
||||
|
||||
define half @vreduce_ord_fadd_v16f16(<16 x half>* %x, half %s) {
|
||||
; CHECK-LABEL: vreduce_ord_fadd_v16f16:
|
||||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: vsetivli a1, 16, e16,m2,ta,mu
|
||||
; CHECK-NEXT: vle16.v v26, (a0)
|
||||
; CHECK-NEXT: vsetvli a0, zero, e16,m1,ta,mu
|
||||
; CHECK-NEXT: vfmv.v.f v25, fa0
|
||||
; CHECK-NEXT: vsetivli a0, 16, e16,m2,ta,mu
|
||||
; CHECK-NEXT: vfredosum.vs v25, v26, v25
|
||||
; CHECK-NEXT: vsetvli zero, zero, e16,m1,ta,mu
|
||||
; CHECK-NEXT: vfmv.f.s fa0, v25
|
||||
; CHECK-NEXT: ret
|
||||
%v = load <16 x half>, <16 x half>* %x
|
||||
%red = call half @llvm.vector.reduce.fadd.v16f16(half %s, <16 x half> %v)
|
||||
ret half %red
|
||||
}
|
||||
|
||||
declare half @llvm.vector.reduce.fadd.v32f16(half, <32 x half>)
|
||||
|
||||
define half @vreduce_fadd_v32f16(<32 x half>* %x, half %s) {
|
||||
; CHECK-LABEL: vreduce_fadd_v32f16:
|
||||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: addi a1, zero, 32
|
||||
; CHECK-NEXT: vsetvli a2, a1, e16,m4,ta,mu
|
||||
; CHECK-NEXT: vle16.v v28, (a0)
|
||||
; CHECK-NEXT: vsetvli a0, zero, e16,m1,ta,mu
|
||||
; CHECK-NEXT: vmv.v.i v25, 0
|
||||
; CHECK-NEXT: vsetvli a0, a1, e16,m4,ta,mu
|
||||
; CHECK-NEXT: vfredsum.vs v25, v28, v25
|
||||
; CHECK-NEXT: vsetvli zero, zero, e16,m1,ta,mu
|
||||
; CHECK-NEXT: vfmv.f.s ft0, v25
|
||||
; CHECK-NEXT: fadd.h fa0, fa0, ft0
|
||||
; CHECK-NEXT: ret
|
||||
%v = load <32 x half>, <32 x half>* %x
|
||||
%red = call reassoc half @llvm.vector.reduce.fadd.v32f16(half %s, <32 x half> %v)
|
||||
ret half %red
|
||||
}
|
||||
|
||||
define half @vreduce_ord_fadd_v32f16(<32 x half>* %x, half %s) {
|
||||
; CHECK-LABEL: vreduce_ord_fadd_v32f16:
|
||||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: addi a1, zero, 32
|
||||
; CHECK-NEXT: vsetvli a2, a1, e16,m4,ta,mu
|
||||
; CHECK-NEXT: vle16.v v28, (a0)
|
||||
; CHECK-NEXT: vsetvli a0, zero, e16,m1,ta,mu
|
||||
; CHECK-NEXT: vfmv.v.f v25, fa0
|
||||
; CHECK-NEXT: vsetvli a0, a1, e16,m4,ta,mu
|
||||
; CHECK-NEXT: vfredosum.vs v25, v28, v25
|
||||
; CHECK-NEXT: vsetvli zero, zero, e16,m1,ta,mu
|
||||
; CHECK-NEXT: vfmv.f.s fa0, v25
|
||||
; CHECK-NEXT: ret
|
||||
%v = load <32 x half>, <32 x half>* %x
|
||||
%red = call half @llvm.vector.reduce.fadd.v32f16(half %s, <32 x half> %v)
|
||||
ret half %red
|
||||
}
|
||||
|
||||
declare half @llvm.vector.reduce.fadd.v64f16(half, <64 x half>)
|
||||
|
||||
define half @vreduce_fadd_v64f16(<64 x half>* %x, half %s) {
|
||||
; CHECK-LABEL: vreduce_fadd_v64f16:
|
||||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: addi a1, zero, 64
|
||||
; CHECK-NEXT: vsetvli a2, a1, e16,m8,ta,mu
|
||||
; CHECK-NEXT: vle16.v v8, (a0)
|
||||
; CHECK-NEXT: vsetvli a0, zero, e16,m1,ta,mu
|
||||
; CHECK-NEXT: vmv.v.i v25, 0
|
||||
; CHECK-NEXT: vsetvli a0, a1, e16,m8,ta,mu
|
||||
; CHECK-NEXT: vfredsum.vs v25, v8, v25
|
||||
; CHECK-NEXT: vsetvli zero, zero, e16,m1,ta,mu
|
||||
; CHECK-NEXT: vfmv.f.s ft0, v25
|
||||
; CHECK-NEXT: fadd.h fa0, fa0, ft0
|
||||
; CHECK-NEXT: ret
|
||||
%v = load <64 x half>, <64 x half>* %x
|
||||
%red = call reassoc half @llvm.vector.reduce.fadd.v64f16(half %s, <64 x half> %v)
|
||||
ret half %red
|
||||
}
|
||||
|
||||
define half @vreduce_ord_fadd_v64f16(<64 x half>* %x, half %s) {
|
||||
; CHECK-LABEL: vreduce_ord_fadd_v64f16:
|
||||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: addi a1, zero, 64
|
||||
; CHECK-NEXT: vsetvli a2, a1, e16,m8,ta,mu
|
||||
; CHECK-NEXT: vle16.v v8, (a0)
|
||||
; CHECK-NEXT: vsetvli a0, zero, e16,m1,ta,mu
|
||||
; CHECK-NEXT: vfmv.v.f v25, fa0
|
||||
; CHECK-NEXT: vsetvli a0, a1, e16,m8,ta,mu
|
||||
; CHECK-NEXT: vfredosum.vs v25, v8, v25
|
||||
; CHECK-NEXT: vsetvli zero, zero, e16,m1,ta,mu
|
||||
; CHECK-NEXT: vfmv.f.s fa0, v25
|
||||
; CHECK-NEXT: ret
|
||||
%v = load <64 x half>, <64 x half>* %x
|
||||
%red = call half @llvm.vector.reduce.fadd.v64f16(half %s, <64 x half> %v)
|
||||
ret half %red
|
||||
}
|
||||
|
||||
declare half @llvm.vector.reduce.fadd.v128f16(half, <128 x half>)
|
||||
|
||||
define half @vreduce_fadd_v128f16(<128 x half>* %x, half %s) {
|
||||
; CHECK-LABEL: vreduce_fadd_v128f16:
|
||||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: addi a1, zero, 64
|
||||
; CHECK-NEXT: vsetvli a2, a1, e16,m8,ta,mu
|
||||
; CHECK-NEXT: vle16.v v8, (a0)
|
||||
; CHECK-NEXT: addi a0, a0, 128
|
||||
; CHECK-NEXT: vle16.v v16, (a0)
|
||||
; CHECK-NEXT: vfadd.vv v8, v8, v16
|
||||
; CHECK-NEXT: vsetvli a0, zero, e16,m1,ta,mu
|
||||
; CHECK-NEXT: vmv.v.i v25, 0
|
||||
; CHECK-NEXT: vsetvli a0, a1, e16,m8,ta,mu
|
||||
; CHECK-NEXT: vfredsum.vs v25, v8, v25
|
||||
; CHECK-NEXT: vsetvli zero, zero, e16,m1,ta,mu
|
||||
; CHECK-NEXT: vfmv.f.s ft0, v25
|
||||
; CHECK-NEXT: fadd.h fa0, fa0, ft0
|
||||
; CHECK-NEXT: ret
|
||||
%v = load <128 x half>, <128 x half>* %x
|
||||
%red = call reassoc half @llvm.vector.reduce.fadd.v128f16(half %s, <128 x half> %v)
|
||||
ret half %red
|
||||
}
|
||||
|
||||
define half @vreduce_ord_fadd_v128f16(<128 x half>* %x, half %s) {
|
||||
; CHECK-LABEL: vreduce_ord_fadd_v128f16:
|
||||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: addi a1, a0, 128
|
||||
; CHECK-NEXT: addi a2, zero, 64
|
||||
; CHECK-NEXT: vsetvli a3, a2, e16,m8,ta,mu
|
||||
; CHECK-NEXT: vle16.v v8, (a1)
|
||||
; CHECK-NEXT: vle16.v v16, (a0)
|
||||
; CHECK-NEXT: vsetvli a0, zero, e16,m1,ta,mu
|
||||
; CHECK-NEXT: vfmv.v.f v25, fa0
|
||||
; CHECK-NEXT: vsetvli a0, a2, e16,m8,ta,mu
|
||||
; CHECK-NEXT: vfredosum.vs v25, v16, v25
|
||||
; CHECK-NEXT: vsetvli zero, zero, e16,m1,ta,mu
|
||||
; CHECK-NEXT: vfmv.f.s ft0, v25
|
||||
; CHECK-NEXT: vsetvli a0, zero, e16,m1,ta,mu
|
||||
; CHECK-NEXT: vfmv.v.f v25, ft0
|
||||
; CHECK-NEXT: vsetvli a0, a2, e16,m8,ta,mu
|
||||
; CHECK-NEXT: vfredosum.vs v25, v8, v25
|
||||
; CHECK-NEXT: vsetvli zero, zero, e16,m1,ta,mu
|
||||
; CHECK-NEXT: vfmv.f.s fa0, v25
|
||||
; CHECK-NEXT: ret
|
||||
%v = load <128 x half>, <128 x half>* %x
|
||||
%red = call half @llvm.vector.reduce.fadd.v128f16(half %s, <128 x half> %v)
|
||||
ret half %red
|
||||
}
|
||||
|
||||
declare float @llvm.vector.reduce.fadd.v1f32(float, <1 x float>)
|
||||
|
||||
define float @vreduce_fadd_v1f32(<1 x float>* %x, float %s) {
|
||||
; CHECK-LABEL: vreduce_fadd_v1f32:
|
||||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: vsetivli a1, 1, e32,m1,ta,mu
|
||||
; CHECK-NEXT: vle32.v v25, (a0)
|
||||
; CHECK-NEXT: vfmv.f.s ft0, v25
|
||||
; CHECK-NEXT: fadd.s fa0, fa0, ft0
|
||||
; CHECK-NEXT: ret
|
||||
%v = load <1 x float>, <1 x float>* %x
|
||||
%red = call reassoc float @llvm.vector.reduce.fadd.v1f32(float %s, <1 x float> %v)
|
||||
ret float %red
|
||||
}
|
||||
|
||||
define float @vreduce_ord_fadd_v1f32(<1 x float>* %x, float %s) {
|
||||
; CHECK-LABEL: vreduce_ord_fadd_v1f32:
|
||||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: vsetivli a1, 1, e32,m1,ta,mu
|
||||
; CHECK-NEXT: vle32.v v25, (a0)
|
||||
; CHECK-NEXT: vsetvli a0, zero, e32,m1,ta,mu
|
||||
; CHECK-NEXT: vfmv.v.f v26, fa0
|
||||
; CHECK-NEXT: vsetivli a0, 1, e32,m1,ta,mu
|
||||
; CHECK-NEXT: vfredosum.vs v25, v25, v26
|
||||
; CHECK-NEXT: vfmv.f.s fa0, v25
|
||||
; CHECK-NEXT: ret
|
||||
%v = load <1 x float>, <1 x float>* %x
|
||||
%red = call float @llvm.vector.reduce.fadd.v1f32(float %s, <1 x float> %v)
|
||||
ret float %red
|
||||
}
|
||||
|
||||
declare float @llvm.vector.reduce.fadd.v2f32(float, <2 x float>)
|
||||
|
||||
define float @vreduce_fadd_v2f32(<2 x float>* %x, float %s) {
|
||||
; CHECK-LABEL: vreduce_fadd_v2f32:
|
||||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: vsetivli a1, 2, e32,m1,ta,mu
|
||||
; CHECK-NEXT: vle32.v v25, (a0)
|
||||
; CHECK-NEXT: vsetvli a0, zero, e32,m1,ta,mu
|
||||
; CHECK-NEXT: vmv.v.i v26, 0
|
||||
; CHECK-NEXT: vsetivli a0, 2, e32,m1,ta,mu
|
||||
; CHECK-NEXT: vfredsum.vs v25, v25, v26
|
||||
; CHECK-NEXT: vfmv.f.s ft0, v25
|
||||
; CHECK-NEXT: fadd.s fa0, fa0, ft0
|
||||
; CHECK-NEXT: ret
|
||||
%v = load <2 x float>, <2 x float>* %x
|
||||
%red = call reassoc float @llvm.vector.reduce.fadd.v2f32(float %s, <2 x float> %v)
|
||||
ret float %red
|
||||
}
|
||||
|
||||
define float @vreduce_ord_fadd_v2f32(<2 x float>* %x, float %s) {
|
||||
; CHECK-LABEL: vreduce_ord_fadd_v2f32:
|
||||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: vsetivli a1, 2, e32,m1,ta,mu
|
||||
; CHECK-NEXT: vle32.v v25, (a0)
|
||||
; CHECK-NEXT: vsetvli a0, zero, e32,m1,ta,mu
|
||||
; CHECK-NEXT: vfmv.v.f v26, fa0
|
||||
; CHECK-NEXT: vsetivli a0, 2, e32,m1,ta,mu
|
||||
; CHECK-NEXT: vfredosum.vs v25, v25, v26
|
||||
; CHECK-NEXT: vfmv.f.s fa0, v25
|
||||
; CHECK-NEXT: ret
|
||||
%v = load <2 x float>, <2 x float>* %x
|
||||
%red = call float @llvm.vector.reduce.fadd.v2f32(float %s, <2 x float> %v)
|
||||
ret float %red
|
||||
}
|
||||
|
||||
declare float @llvm.vector.reduce.fadd.v4f32(float, <4 x float>)
|
||||
|
||||
define float @vreduce_fadd_v4f32(<4 x float>* %x, float %s) {
|
||||
; CHECK-LABEL: vreduce_fadd_v4f32:
|
||||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: vsetivli a1, 4, e32,m1,ta,mu
|
||||
; CHECK-NEXT: vle32.v v25, (a0)
|
||||
; CHECK-NEXT: vsetvli a0, zero, e32,m1,ta,mu
|
||||
; CHECK-NEXT: vmv.v.i v26, 0
|
||||
; CHECK-NEXT: vsetivli a0, 4, e32,m1,ta,mu
|
||||
; CHECK-NEXT: vfredsum.vs v25, v25, v26
|
||||
; CHECK-NEXT: vfmv.f.s ft0, v25
|
||||
; CHECK-NEXT: fadd.s fa0, fa0, ft0
|
||||
; CHECK-NEXT: ret
|
||||
%v = load <4 x float>, <4 x float>* %x
|
||||
%red = call reassoc float @llvm.vector.reduce.fadd.v4f32(float %s, <4 x float> %v)
|
||||
ret float %red
|
||||
}
|
||||
|
||||
define float @vreduce_ord_fadd_v4f32(<4 x float>* %x, float %s) {
|
||||
; CHECK-LABEL: vreduce_ord_fadd_v4f32:
|
||||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: vsetivli a1, 4, e32,m1,ta,mu
|
||||
; CHECK-NEXT: vle32.v v25, (a0)
|
||||
; CHECK-NEXT: vsetvli a0, zero, e32,m1,ta,mu
|
||||
; CHECK-NEXT: vfmv.v.f v26, fa0
|
||||
; CHECK-NEXT: vsetivli a0, 4, e32,m1,ta,mu
|
||||
; CHECK-NEXT: vfredosum.vs v25, v25, v26
|
||||
; CHECK-NEXT: vfmv.f.s fa0, v25
|
||||
; CHECK-NEXT: ret
|
||||
%v = load <4 x float>, <4 x float>* %x
|
||||
%red = call float @llvm.vector.reduce.fadd.v4f32(float %s, <4 x float> %v)
|
||||
ret float %red
|
||||
}
|
||||
|
||||
declare float @llvm.vector.reduce.fadd.v8f32(float, <8 x float>)
|
||||
|
||||
define float @vreduce_fadd_v8f32(<8 x float>* %x, float %s) {
|
||||
; CHECK-LABEL: vreduce_fadd_v8f32:
|
||||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: vsetivli a1, 8, e32,m2,ta,mu
|
||||
; CHECK-NEXT: vle32.v v26, (a0)
|
||||
; CHECK-NEXT: vsetvli a0, zero, e32,m1,ta,mu
|
||||
; CHECK-NEXT: vmv.v.i v25, 0
|
||||
; CHECK-NEXT: vsetivli a0, 8, e32,m2,ta,mu
|
||||
; CHECK-NEXT: vfredsum.vs v25, v26, v25
|
||||
; CHECK-NEXT: vsetvli zero, zero, e32,m1,ta,mu
|
||||
; CHECK-NEXT: vfmv.f.s ft0, v25
|
||||
; CHECK-NEXT: fadd.s fa0, fa0, ft0
|
||||
; CHECK-NEXT: ret
|
||||
%v = load <8 x float>, <8 x float>* %x
|
||||
%red = call reassoc float @llvm.vector.reduce.fadd.v8f32(float %s, <8 x float> %v)
|
||||
ret float %red
|
||||
}
|
||||
|
||||
define float @vreduce_ord_fadd_v8f32(<8 x float>* %x, float %s) {
|
||||
; CHECK-LABEL: vreduce_ord_fadd_v8f32:
|
||||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: vsetivli a1, 8, e32,m2,ta,mu
|
||||
; CHECK-NEXT: vle32.v v26, (a0)
|
||||
; CHECK-NEXT: vsetvli a0, zero, e32,m1,ta,mu
|
||||
; CHECK-NEXT: vfmv.v.f v25, fa0
|
||||
; CHECK-NEXT: vsetivli a0, 8, e32,m2,ta,mu
|
||||
; CHECK-NEXT: vfredosum.vs v25, v26, v25
|
||||
; CHECK-NEXT: vsetvli zero, zero, e32,m1,ta,mu
|
||||
; CHECK-NEXT: vfmv.f.s fa0, v25
|
||||
; CHECK-NEXT: ret
|
||||
%v = load <8 x float>, <8 x float>* %x
|
||||
%red = call float @llvm.vector.reduce.fadd.v8f32(float %s, <8 x float> %v)
|
||||
ret float %red
|
||||
}
|
||||
|
||||
declare float @llvm.vector.reduce.fadd.v16f32(float, <16 x float>)
|
||||
|
||||
define float @vreduce_fadd_v16f32(<16 x float>* %x, float %s) {
|
||||
; CHECK-LABEL: vreduce_fadd_v16f32:
|
||||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: vsetivli a1, 16, e32,m4,ta,mu
|
||||
; CHECK-NEXT: vle32.v v28, (a0)
|
||||
; CHECK-NEXT: vsetvli a0, zero, e32,m1,ta,mu
|
||||
; CHECK-NEXT: vmv.v.i v25, 0
|
||||
; CHECK-NEXT: vsetivli a0, 16, e32,m4,ta,mu
|
||||
; CHECK-NEXT: vfredsum.vs v25, v28, v25
|
||||
; CHECK-NEXT: vsetvli zero, zero, e32,m1,ta,mu
|
||||
; CHECK-NEXT: vfmv.f.s ft0, v25
|
||||
; CHECK-NEXT: fadd.s fa0, fa0, ft0
|
||||
; CHECK-NEXT: ret
|
||||
%v = load <16 x float>, <16 x float>* %x
|
||||
%red = call reassoc float @llvm.vector.reduce.fadd.v16f32(float %s, <16 x float> %v)
|
||||
ret float %red
|
||||
}
|
||||
|
||||
define float @vreduce_ord_fadd_v16f32(<16 x float>* %x, float %s) {
|
||||
; CHECK-LABEL: vreduce_ord_fadd_v16f32:
|
||||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: vsetivli a1, 16, e32,m4,ta,mu
|
||||
; CHECK-NEXT: vle32.v v28, (a0)
|
||||
; CHECK-NEXT: vsetvli a0, zero, e32,m1,ta,mu
|
||||
; CHECK-NEXT: vfmv.v.f v25, fa0
|
||||
; CHECK-NEXT: vsetivli a0, 16, e32,m4,ta,mu
|
||||
; CHECK-NEXT: vfredosum.vs v25, v28, v25
|
||||
; CHECK-NEXT: vsetvli zero, zero, e32,m1,ta,mu
|
||||
; CHECK-NEXT: vfmv.f.s fa0, v25
|
||||
; CHECK-NEXT: ret
|
||||
%v = load <16 x float>, <16 x float>* %x
|
||||
%red = call float @llvm.vector.reduce.fadd.v16f32(float %s, <16 x float> %v)
|
||||
ret float %red
|
||||
}
|
||||
|
||||
declare float @llvm.vector.reduce.fadd.v32f32(float, <32 x float>)
|
||||
|
||||
define float @vreduce_fadd_v32f32(<32 x float>* %x, float %s) {
|
||||
; CHECK-LABEL: vreduce_fadd_v32f32:
|
||||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: addi a1, zero, 32
|
||||
; CHECK-NEXT: vsetvli a2, a1, e32,m8,ta,mu
|
||||
; CHECK-NEXT: vle32.v v8, (a0)
|
||||
; CHECK-NEXT: vsetvli a0, zero, e32,m1,ta,mu
|
||||
; CHECK-NEXT: vmv.v.i v25, 0
|
||||
; CHECK-NEXT: vsetvli a0, a1, e32,m8,ta,mu
|
||||
; CHECK-NEXT: vfredsum.vs v25, v8, v25
|
||||
; CHECK-NEXT: vsetvli zero, zero, e32,m1,ta,mu
|
||||
; CHECK-NEXT: vfmv.f.s ft0, v25
|
||||
; CHECK-NEXT: fadd.s fa0, fa0, ft0
|
||||
; CHECK-NEXT: ret
|
||||
%v = load <32 x float>, <32 x float>* %x
|
||||
%red = call reassoc float @llvm.vector.reduce.fadd.v32f32(float %s, <32 x float> %v)
|
||||
ret float %red
|
||||
}
|
||||
|
||||
define float @vreduce_ord_fadd_v32f32(<32 x float>* %x, float %s) {
|
||||
; CHECK-LABEL: vreduce_ord_fadd_v32f32:
|
||||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: addi a1, zero, 32
|
||||
; CHECK-NEXT: vsetvli a2, a1, e32,m8,ta,mu
|
||||
; CHECK-NEXT: vle32.v v8, (a0)
|
||||
; CHECK-NEXT: vsetvli a0, zero, e32,m1,ta,mu
|
||||
; CHECK-NEXT: vfmv.v.f v25, fa0
|
||||
; CHECK-NEXT: vsetvli a0, a1, e32,m8,ta,mu
|
||||
; CHECK-NEXT: vfredosum.vs v25, v8, v25
|
||||
; CHECK-NEXT: vsetvli zero, zero, e32,m1,ta,mu
|
||||
; CHECK-NEXT: vfmv.f.s fa0, v25
|
||||
; CHECK-NEXT: ret
|
||||
%v = load <32 x float>, <32 x float>* %x
|
||||
%red = call float @llvm.vector.reduce.fadd.v32f32(float %s, <32 x float> %v)
|
||||
ret float %red
|
||||
}
|
||||
|
||||
declare float @llvm.vector.reduce.fadd.v64f32(float, <64 x float>)
|
||||
|
||||
define float @vreduce_fadd_v64f32(<64 x float>* %x, float %s) {
|
||||
; CHECK-LABEL: vreduce_fadd_v64f32:
|
||||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: addi a1, zero, 32
|
||||
; CHECK-NEXT: vsetvli a2, a1, e32,m8,ta,mu
|
||||
; CHECK-NEXT: vle32.v v8, (a0)
|
||||
; CHECK-NEXT: addi a0, a0, 128
|
||||
; CHECK-NEXT: vle32.v v16, (a0)
|
||||
; CHECK-NEXT: vfadd.vv v8, v8, v16
|
||||
; CHECK-NEXT: vsetvli a0, zero, e32,m1,ta,mu
|
||||
; CHECK-NEXT: vmv.v.i v25, 0
|
||||
; CHECK-NEXT: vsetvli a0, a1, e32,m8,ta,mu
|
||||
; CHECK-NEXT: vfredsum.vs v25, v8, v25
|
||||
; CHECK-NEXT: vsetvli zero, zero, e32,m1,ta,mu
|
||||
; CHECK-NEXT: vfmv.f.s ft0, v25
|
||||
; CHECK-NEXT: fadd.s fa0, fa0, ft0
|
||||
; CHECK-NEXT: ret
|
||||
%v = load <64 x float>, <64 x float>* %x
|
||||
%red = call reassoc float @llvm.vector.reduce.fadd.v64f32(float %s, <64 x float> %v)
|
||||
ret float %red
|
||||
}
|
||||
|
||||
define float @vreduce_ord_fadd_v64f32(<64 x float>* %x, float %s) {
|
||||
; CHECK-LABEL: vreduce_ord_fadd_v64f32:
|
||||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: addi a1, a0, 128
|
||||
; CHECK-NEXT: addi a2, zero, 32
|
||||
; CHECK-NEXT: vsetvli a3, a2, e32,m8,ta,mu
|
||||
; CHECK-NEXT: vle32.v v8, (a1)
|
||||
; CHECK-NEXT: vle32.v v16, (a0)
|
||||
; CHECK-NEXT: vsetvli a0, zero, e32,m1,ta,mu
|
||||
; CHECK-NEXT: vfmv.v.f v25, fa0
|
||||
; CHECK-NEXT: vsetvli a0, a2, e32,m8,ta,mu
|
||||
; CHECK-NEXT: vfredosum.vs v25, v16, v25
|
||||
; CHECK-NEXT: vsetvli zero, zero, e32,m1,ta,mu
|
||||
; CHECK-NEXT: vfmv.f.s ft0, v25
|
||||
; CHECK-NEXT: vsetvli a0, zero, e32,m1,ta,mu
|
||||
; CHECK-NEXT: vfmv.v.f v25, ft0
|
||||
; CHECK-NEXT: vsetvli a0, a2, e32,m8,ta,mu
|
||||
; CHECK-NEXT: vfredosum.vs v25, v8, v25
|
||||
; CHECK-NEXT: vsetvli zero, zero, e32,m1,ta,mu
|
||||
; CHECK-NEXT: vfmv.f.s fa0, v25
|
||||
; CHECK-NEXT: ret
|
||||
%v = load <64 x float>, <64 x float>* %x
|
||||
%red = call float @llvm.vector.reduce.fadd.v64f32(float %s, <64 x float> %v)
|
||||
ret float %red
|
||||
}
|
||||
|
||||
declare double @llvm.vector.reduce.fadd.v1f64(double, <1 x double>)
|
||||
|
||||
define double @vreduce_fadd_v1f64(<1 x double>* %x, double %s) {
|
||||
; CHECK-LABEL: vreduce_fadd_v1f64:
|
||||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: vsetivli a1, 1, e64,m1,ta,mu
|
||||
; CHECK-NEXT: vle64.v v25, (a0)
|
||||
; CHECK-NEXT: vfmv.f.s ft0, v25
|
||||
; CHECK-NEXT: fadd.d fa0, fa0, ft0
|
||||
; CHECK-NEXT: ret
|
||||
%v = load <1 x double>, <1 x double>* %x
|
||||
%red = call reassoc double @llvm.vector.reduce.fadd.v1f64(double %s, <1 x double> %v)
|
||||
ret double %red
|
||||
}
|
||||
|
||||
define double @vreduce_ord_fadd_v1f64(<1 x double>* %x, double %s) {
|
||||
; CHECK-LABEL: vreduce_ord_fadd_v1f64:
|
||||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: vsetivli a1, 1, e64,m1,ta,mu
|
||||
; CHECK-NEXT: vle64.v v25, (a0)
|
||||
; CHECK-NEXT: vsetvli a0, zero, e64,m1,ta,mu
|
||||
; CHECK-NEXT: vfmv.v.f v26, fa0
|
||||
; CHECK-NEXT: vsetivli a0, 1, e64,m1,ta,mu
|
||||
; CHECK-NEXT: vfredosum.vs v25, v25, v26
|
||||
; CHECK-NEXT: vfmv.f.s fa0, v25
|
||||
; CHECK-NEXT: ret
|
||||
%v = load <1 x double>, <1 x double>* %x
|
||||
%red = call double @llvm.vector.reduce.fadd.v1f64(double %s, <1 x double> %v)
|
||||
ret double %red
|
||||
}
|
||||
|
||||
declare double @llvm.vector.reduce.fadd.v2f64(double, <2 x double>)
|
||||
|
||||
define double @vreduce_fadd_v2f64(<2 x double>* %x, double %s) {
|
||||
; CHECK-LABEL: vreduce_fadd_v2f64:
|
||||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: vsetivli a1, 2, e64,m1,ta,mu
|
||||
; CHECK-NEXT: vle64.v v25, (a0)
|
||||
; CHECK-NEXT: vsetvli a0, zero, e64,m1,ta,mu
|
||||
; CHECK-NEXT: vmv.v.i v26, 0
|
||||
; CHECK-NEXT: vsetivli a0, 2, e64,m1,ta,mu
|
||||
; CHECK-NEXT: vfredsum.vs v25, v25, v26
|
||||
; CHECK-NEXT: vfmv.f.s ft0, v25
|
||||
; CHECK-NEXT: fadd.d fa0, fa0, ft0
|
||||
; CHECK-NEXT: ret
|
||||
%v = load <2 x double>, <2 x double>* %x
|
||||
%red = call reassoc double @llvm.vector.reduce.fadd.v2f64(double %s, <2 x double> %v)
|
||||
ret double %red
|
||||
}
|
||||
|
||||
define double @vreduce_ord_fadd_v2f64(<2 x double>* %x, double %s) {
|
||||
; CHECK-LABEL: vreduce_ord_fadd_v2f64:
|
||||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: vsetivli a1, 2, e64,m1,ta,mu
|
||||
; CHECK-NEXT: vle64.v v25, (a0)
|
||||
; CHECK-NEXT: vsetvli a0, zero, e64,m1,ta,mu
|
||||
; CHECK-NEXT: vfmv.v.f v26, fa0
|
||||
; CHECK-NEXT: vsetivli a0, 2, e64,m1,ta,mu
|
||||
; CHECK-NEXT: vfredosum.vs v25, v25, v26
|
||||
; CHECK-NEXT: vfmv.f.s fa0, v25
|
||||
; CHECK-NEXT: ret
|
||||
%v = load <2 x double>, <2 x double>* %x
|
||||
%red = call double @llvm.vector.reduce.fadd.v2f64(double %s, <2 x double> %v)
|
||||
ret double %red
|
||||
}
|
||||
|
||||
declare double @llvm.vector.reduce.fadd.v4f64(double, <4 x double>)
|
||||
|
||||
define double @vreduce_fadd_v4f64(<4 x double>* %x, double %s) {
|
||||
; CHECK-LABEL: vreduce_fadd_v4f64:
|
||||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: vsetivli a1, 4, e64,m2,ta,mu
|
||||
; CHECK-NEXT: vle64.v v26, (a0)
|
||||
; CHECK-NEXT: vsetvli a0, zero, e64,m1,ta,mu
|
||||
; CHECK-NEXT: vmv.v.i v25, 0
|
||||
; CHECK-NEXT: vsetivli a0, 4, e64,m2,ta,mu
|
||||
; CHECK-NEXT: vfredsum.vs v25, v26, v25
|
||||
; CHECK-NEXT: vsetvli zero, zero, e64,m1,ta,mu
|
||||
; CHECK-NEXT: vfmv.f.s ft0, v25
|
||||
; CHECK-NEXT: fadd.d fa0, fa0, ft0
|
||||
; CHECK-NEXT: ret
|
||||
%v = load <4 x double>, <4 x double>* %x
|
||||
%red = call reassoc double @llvm.vector.reduce.fadd.v4f64(double %s, <4 x double> %v)
|
||||
ret double %red
|
||||
}
|
||||
|
||||
define double @vreduce_ord_fadd_v4f64(<4 x double>* %x, double %s) {
|
||||
; CHECK-LABEL: vreduce_ord_fadd_v4f64:
|
||||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: vsetivli a1, 4, e64,m2,ta,mu
|
||||
; CHECK-NEXT: vle64.v v26, (a0)
|
||||
; CHECK-NEXT: vsetvli a0, zero, e64,m1,ta,mu
|
||||
; CHECK-NEXT: vfmv.v.f v25, fa0
|
||||
; CHECK-NEXT: vsetivli a0, 4, e64,m2,ta,mu
|
||||
; CHECK-NEXT: vfredosum.vs v25, v26, v25
|
||||
; CHECK-NEXT: vsetvli zero, zero, e64,m1,ta,mu
|
||||
; CHECK-NEXT: vfmv.f.s fa0, v25
|
||||
; CHECK-NEXT: ret
|
||||
%v = load <4 x double>, <4 x double>* %x
|
||||
%red = call double @llvm.vector.reduce.fadd.v4f64(double %s, <4 x double> %v)
|
||||
ret double %red
|
||||
}
|
||||
|
||||
declare double @llvm.vector.reduce.fadd.v8f64(double, <8 x double>)
|
||||
|
||||
define double @vreduce_fadd_v8f64(<8 x double>* %x, double %s) {
|
||||
; CHECK-LABEL: vreduce_fadd_v8f64:
|
||||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: vsetivli a1, 8, e64,m4,ta,mu
|
||||
; CHECK-NEXT: vle64.v v28, (a0)
|
||||
; CHECK-NEXT: vsetvli a0, zero, e64,m1,ta,mu
|
||||
; CHECK-NEXT: vmv.v.i v25, 0
|
||||
; CHECK-NEXT: vsetivli a0, 8, e64,m4,ta,mu
|
||||
; CHECK-NEXT: vfredsum.vs v25, v28, v25
|
||||
; CHECK-NEXT: vsetvli zero, zero, e64,m1,ta,mu
|
||||
; CHECK-NEXT: vfmv.f.s ft0, v25
|
||||
; CHECK-NEXT: fadd.d fa0, fa0, ft0
|
||||
; CHECK-NEXT: ret
|
||||
%v = load <8 x double>, <8 x double>* %x
|
||||
%red = call reassoc double @llvm.vector.reduce.fadd.v8f64(double %s, <8 x double> %v)
|
||||
ret double %red
|
||||
}
|
||||
|
||||
define double @vreduce_ord_fadd_v8f64(<8 x double>* %x, double %s) {
|
||||
; CHECK-LABEL: vreduce_ord_fadd_v8f64:
|
||||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: vsetivli a1, 8, e64,m4,ta,mu
|
||||
; CHECK-NEXT: vle64.v v28, (a0)
|
||||
; CHECK-NEXT: vsetvli a0, zero, e64,m1,ta,mu
|
||||
; CHECK-NEXT: vfmv.v.f v25, fa0
|
||||
; CHECK-NEXT: vsetivli a0, 8, e64,m4,ta,mu
|
||||
; CHECK-NEXT: vfredosum.vs v25, v28, v25
|
||||
; CHECK-NEXT: vsetvli zero, zero, e64,m1,ta,mu
|
||||
; CHECK-NEXT: vfmv.f.s fa0, v25
|
||||
; CHECK-NEXT: ret
|
||||
%v = load <8 x double>, <8 x double>* %x
|
||||
%red = call double @llvm.vector.reduce.fadd.v8f64(double %s, <8 x double> %v)
|
||||
ret double %red
|
||||
}
|
||||
|
||||
declare double @llvm.vector.reduce.fadd.v16f64(double, <16 x double>)
|
||||
|
||||
define double @vreduce_fadd_v16f64(<16 x double>* %x, double %s) {
|
||||
; CHECK-LABEL: vreduce_fadd_v16f64:
|
||||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: vsetivli a1, 16, e64,m8,ta,mu
|
||||
; CHECK-NEXT: vle64.v v8, (a0)
|
||||
; CHECK-NEXT: vsetvli a0, zero, e64,m1,ta,mu
|
||||
; CHECK-NEXT: vmv.v.i v25, 0
|
||||
; CHECK-NEXT: vsetivli a0, 16, e64,m8,ta,mu
|
||||
; CHECK-NEXT: vfredsum.vs v25, v8, v25
|
||||
; CHECK-NEXT: vsetvli zero, zero, e64,m1,ta,mu
|
||||
; CHECK-NEXT: vfmv.f.s ft0, v25
|
||||
; CHECK-NEXT: fadd.d fa0, fa0, ft0
|
||||
; CHECK-NEXT: ret
|
||||
%v = load <16 x double>, <16 x double>* %x
|
||||
%red = call reassoc double @llvm.vector.reduce.fadd.v16f64(double %s, <16 x double> %v)
|
||||
ret double %red
|
||||
}
|
||||
|
||||
define double @vreduce_ord_fadd_v16f64(<16 x double>* %x, double %s) {
|
||||
; CHECK-LABEL: vreduce_ord_fadd_v16f64:
|
||||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: vsetivli a1, 16, e64,m8,ta,mu
|
||||
; CHECK-NEXT: vle64.v v8, (a0)
|
||||
; CHECK-NEXT: vsetvli a0, zero, e64,m1,ta,mu
|
||||
; CHECK-NEXT: vfmv.v.f v25, fa0
|
||||
; CHECK-NEXT: vsetivli a0, 16, e64,m8,ta,mu
|
||||
; CHECK-NEXT: vfredosum.vs v25, v8, v25
|
||||
; CHECK-NEXT: vsetvli zero, zero, e64,m1,ta,mu
|
||||
; CHECK-NEXT: vfmv.f.s fa0, v25
|
||||
; CHECK-NEXT: ret
|
||||
%v = load <16 x double>, <16 x double>* %x
|
||||
%red = call double @llvm.vector.reduce.fadd.v16f64(double %s, <16 x double> %v)
|
||||
ret double %red
|
||||
}
|
||||
|
||||
declare double @llvm.vector.reduce.fadd.v32f64(double, <32 x double>)
|
||||
|
||||
define double @vreduce_fadd_v32f64(<32 x double>* %x, double %s) {
|
||||
; CHECK-LABEL: vreduce_fadd_v32f64:
|
||||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: vsetivli a1, 16, e64,m8,ta,mu
|
||||
; CHECK-NEXT: vle64.v v8, (a0)
|
||||
; CHECK-NEXT: addi a0, a0, 128
|
||||
; CHECK-NEXT: vle64.v v16, (a0)
|
||||
; CHECK-NEXT: vfadd.vv v8, v8, v16
|
||||
; CHECK-NEXT: vsetvli a0, zero, e64,m1,ta,mu
|
||||
; CHECK-NEXT: vmv.v.i v25, 0
|
||||
; CHECK-NEXT: vsetivli a0, 16, e64,m8,ta,mu
|
||||
; CHECK-NEXT: vfredsum.vs v25, v8, v25
|
||||
; CHECK-NEXT: vsetvli zero, zero, e64,m1,ta,mu
|
||||
; CHECK-NEXT: vfmv.f.s ft0, v25
|
||||
; CHECK-NEXT: fadd.d fa0, fa0, ft0
|
||||
; CHECK-NEXT: ret
|
||||
%v = load <32 x double>, <32 x double>* %x
|
||||
%red = call reassoc double @llvm.vector.reduce.fadd.v32f64(double %s, <32 x double> %v)
|
||||
ret double %red
|
||||
}
|
||||
|
||||
define double @vreduce_ord_fadd_v32f64(<32 x double>* %x, double %s) {
|
||||
; CHECK-LABEL: vreduce_ord_fadd_v32f64:
|
||||
; CHECK: # %bb.0:
|
||||
; CHECK-NEXT: addi a1, a0, 128
|
||||
; CHECK-NEXT: vsetivli a2, 16, e64,m8,ta,mu
|
||||
; CHECK-NEXT: vle64.v v8, (a1)
|
||||
; CHECK-NEXT: vle64.v v16, (a0)
|
||||
; CHECK-NEXT: vsetvli a0, zero, e64,m1,ta,mu
|
||||
; CHECK-NEXT: vfmv.v.f v25, fa0
|
||||
; CHECK-NEXT: vsetivli a0, 16, e64,m8,ta,mu
|
||||
; CHECK-NEXT: vfredosum.vs v25, v16, v25
|
||||
; CHECK-NEXT: vsetvli zero, zero, e64,m1,ta,mu
|
||||
; CHECK-NEXT: vfmv.f.s ft0, v25
|
||||
; CHECK-NEXT: vsetvli a0, zero, e64,m1,ta,mu
|
||||
; CHECK-NEXT: vfmv.v.f v25, ft0
|
||||
; CHECK-NEXT: vsetivli a0, 16, e64,m8,ta,mu
|
||||
; CHECK-NEXT: vfredosum.vs v25, v8, v25
|
||||
; CHECK-NEXT: vsetvli zero, zero, e64,m1,ta,mu
|
||||
; CHECK-NEXT: vfmv.f.s fa0, v25
|
||||
; CHECK-NEXT: ret
|
||||
%v = load <32 x double>, <32 x double>* %x
|
||||
%red = call double @llvm.vector.reduce.fadd.v32f64(double %s, <32 x double> %v)
|
||||
ret double %red
|
||||
}
|
File diff suppressed because it is too large
Load Diff
Loading…
Reference in New Issue