[SVE] Code generation for fixed length vector adds.

Summary:
Teach LowerToPredicatedOp to lower fixed length vector operations.

Add AArch64ISD nodes and isel patterns for predicated integer
and floating point adds.

Together this enables SVE code generation for fixed length vector adds.

Reviewers: rengolin, efriedma

Subscribers: tschuett, kristof.beyls, hiraditya, rkruppe, psnobl, llvm-commits

Tags: #llvm

Differential Revision: https://reviews.llvm.org/D82483
This commit is contained in:
Paul Walker 2020-06-20 20:23:31 +01:00
parent dffc142045
commit 3a98d5d7e7
7 changed files with 823 additions and 35 deletions

View File

@ -1041,13 +1041,10 @@ void AArch64TargetLowering::addTypeForFixedLengthSVE(MVT VT) {
setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom);
// Lower fixed length vector operations to scalable equivalents.
setOperationAction(ISD::ADD, VT, Custom);
setOperationAction(ISD::FADD, VT, Custom);
setOperationAction(ISD::LOAD, VT, Custom);
setOperationAction(ISD::STORE, VT, Custom);
// NOTE: This is a temporary measure to maintain functionality required by
// Analysis/CostModel/AArch64/sve-fixed-length.ll
setOperationAction(ISD::ADD, VT, Legal);
setOperationAction(ISD::FADD, VT, Legal);
}
void AArch64TargetLowering::addDRTypeForNEON(MVT VT) {
@ -1354,6 +1351,7 @@ const char *AArch64TargetLowering::getTargetNodeName(unsigned Opcode) const {
case AArch64ISD::CSINC: return "AArch64ISD::CSINC";
case AArch64ISD::THREAD_POINTER: return "AArch64ISD::THREAD_POINTER";
case AArch64ISD::TLSDESC_CALLSEQ: return "AArch64ISD::TLSDESC_CALLSEQ";
case AArch64ISD::ADD_PRED: return "AArch64ISD::ADD_PRED";
case AArch64ISD::SDIV_PRED: return "AArch64ISD::SDIV_PRED";
case AArch64ISD::UDIV_PRED: return "AArch64ISD::UDIV_PRED";
case AArch64ISD::SMIN_PRED: return "AArch64ISD::SMIN_PRED";
@ -1447,6 +1445,7 @@ const char *AArch64TargetLowering::getTargetNodeName(unsigned Opcode) const {
case AArch64ISD::REV: return "AArch64ISD::REV";
case AArch64ISD::REINTERPRET_CAST: return "AArch64ISD::REINTERPRET_CAST";
case AArch64ISD::TBL: return "AArch64ISD::TBL";
case AArch64ISD::FADD_PRED: return "AArch64ISD::FADD_PRED";
case AArch64ISD::FADDA_PRED: return "AArch64ISD::FADDA_PRED";
case AArch64ISD::FADDV_PRED: return "AArch64ISD::FADDV_PRED";
case AArch64ISD::FMAXV_PRED: return "AArch64ISD::FMAXV_PRED";
@ -3425,6 +3424,8 @@ SDValue AArch64TargetLowering::LowerOperation(SDValue Op,
case ISD::UMULO:
return LowerXALUO(Op, DAG);
case ISD::FADD:
if (useSVEForFixedLengthVectorVT(Op.getValueType()))
return LowerToPredicatedOp(Op, DAG, AArch64ISD::FADD_PRED);
return LowerF128Call(Op, DAG, RTLIB::ADD_F128);
case ISD::FSUB:
return LowerF128Call(Op, DAG, RTLIB::SUB_F128);
@ -3527,7 +3528,11 @@ SDValue AArch64TargetLowering::LowerOperation(SDValue Op,
case ISD::LOAD:
if (useSVEForFixedLengthVectorVT(Op.getValueType()))
return LowerFixedLengthVectorLoadToSVE(Op, DAG);
llvm_unreachable("Unexpected Load.");
llvm_unreachable("Unexpected request to lower ISD::LOAD");
case ISD::ADD:
if (useSVEForFixedLengthVectorVT(Op.getValueType()))
return LowerToPredicatedOp(Op, DAG, AArch64ISD::ADD_PRED);
llvm_unreachable("Unexpected request to lower ISD::ADD");
}
}
@ -7853,24 +7858,6 @@ SDValue AArch64TargetLowering::LowerDUPQLane(SDValue Op,
return DAG.getNode(ISD::BITCAST, DL, VT, TBL);
}
SDValue AArch64TargetLowering::LowerToPredicatedOp(SDValue Op,
SelectionDAG &DAG,
unsigned NewOp) const {
EVT VT = Op.getValueType();
SDLoc DL(Op);
assert(Op.getOperand(0).getValueType().isScalableVector() &&
Op.getOperand(1).getValueType().isScalableVector() &&
"Only scalable vectors are supported");
auto PredTy =
VT.getVectorVT(*DAG.getContext(), MVT::i1, VT.getVectorElementCount());
SDValue Mask = getPTrue(DAG, DL, PredTy, AArch64SVEPredPattern::all);
SmallVector<SDValue, 4> Operands = {Mask};
Operands.append(Op->op_begin(), Op->op_end());
return DAG.getNode(NewOp, DL, VT, Operands);
}
static bool resolveBuildVector(BuildVectorSDNode *BVN, APInt &CnstBits,
APInt &UndefBits) {
@ -14810,6 +14797,21 @@ static SDValue getPredicateForFixedLengthVector(SelectionDAG &DAG, SDLoc &DL,
DAG.getTargetConstant(PgPattern, DL, MVT::i64));
}
static SDValue getPredicateForScalableVector(SelectionDAG &DAG, SDLoc &DL,
EVT VT) {
assert(VT.isScalableVector() && DAG.getTargetLoweringInfo().isTypeLegal(VT) &&
"Expected legal scalable vector!");
auto PredTy = VT.changeVectorElementType(MVT::i1);
return getPTrue(DAG, DL, PredTy, AArch64SVEPredPattern::all);
}
static SDValue getPredicateForVector(SelectionDAG &DAG, SDLoc &DL, EVT VT) {
if (VT.isFixedLengthVector())
return getPredicateForFixedLengthVector(DAG, DL, VT);
return getPredicateForScalableVector(DAG, DL, VT);
}
// Grow V to consume an entire SVE register.
static SDValue convertToScalableVector(SelectionDAG &DAG, EVT VT, SDValue V) {
assert(VT.isScalableVector() &&
@ -14868,3 +14870,42 @@ SDValue AArch64TargetLowering::LowerFixedLengthVectorStoreToSVE(
Store->getMemOperand(), Store->getAddressingMode(),
Store->isTruncatingStore());
}
SDValue AArch64TargetLowering::LowerToPredicatedOp(SDValue Op,
SelectionDAG &DAG,
unsigned NewOp) const {
EVT VT = Op.getValueType();
SDLoc DL(Op);
auto Pg = getPredicateForVector(DAG, DL, VT);
if (useSVEForFixedLengthVectorVT(VT)) {
EVT ContainerVT = getContainerForFixedLengthVector(DAG, VT);
// Create list of operands by convereting existing ones to scalable types.
SmallVector<SDValue, 4> Operands = {Pg};
for (const SDValue &V : Op->op_values()) {
if (isa<CondCodeSDNode>(V)) {
Operands.push_back(V);
continue;
}
assert(useSVEForFixedLengthVectorVT(V.getValueType()) &&
"Only fixed length vectors are supported!");
Operands.push_back(convertToScalableVector(DAG, ContainerVT, V));
}
auto ScalableRes = DAG.getNode(NewOp, DL, ContainerVT, Operands);
return convertFromScalableVector(DAG, VT, ScalableRes);
}
assert(VT.isScalableVector() && "Only expect to lower scalable vector op!");
SmallVector<SDValue, 4> Operands = {Pg};
for (const SDValue &V : Op->op_values()) {
assert((isa<CondCodeSDNode>(V) || V.getValueType().isScalableVector()) &&
"Only scalable vectors are supported!");
Operands.push_back(V);
}
return DAG.getNode(NewOp, DL, VT, Operands);
}

View File

@ -53,6 +53,8 @@ enum NodeType : unsigned {
SBC, // adc, sbc instructions
// Arithmetic instructions
ADD_PRED,
FADD_PRED,
SDIV_PRED,
UDIV_PRED,
SMIN_PRED,

View File

@ -152,6 +152,8 @@ def SDT_AArch64Arith : SDTypeProfile<1, 3, [
SDTCVecEltisVT<1,i1>, SDTCisSameAs<2,3>
]>;
def AArch64add_pred : SDNode<"AArch64ISD::ADD_PRED", SDT_AArch64Arith>;
def AArch64fadd_pred : SDNode<"AArch64ISD::FADD_PRED", SDT_AArch64Arith>;
def AArch64sdiv_pred : SDNode<"AArch64ISD::SDIV_PRED", SDT_AArch64Arith>;
def AArch64udiv_pred : SDNode<"AArch64ISD::UDIV_PRED", SDT_AArch64Arith>;
def AArch64smin_pred : SDNode<"AArch64ISD::SMIN_PRED", SDT_AArch64Arith>;
@ -204,7 +206,7 @@ let Predicates = [HasSVE] in {
defm SUB_ZPmZ : sve_int_bin_pred_arit_0<0b001, "sub", "SUB_ZPZZ", int_aarch64_sve_sub, DestructiveBinaryCommWithRev, "SUBR_ZPmZ", 1>;
defm SUBR_ZPmZ : sve_int_bin_pred_arit_0<0b011, "subr", "SUBR_ZPZZ", int_aarch64_sve_subr, DestructiveBinaryCommWithRev, "SUB_ZPmZ", 0>;
defm ADD_ZPZZ : sve_int_bin_pred_zx<int_aarch64_sve_add>;
defm ADD_ZPZZ : sve_int_bin_pred_zx<int_aarch64_sve_add, AArch64add_pred>;
defm SUB_ZPZZ : sve_int_bin_pred_zx<int_aarch64_sve_sub>;
defm SUBR_ZPZZ : sve_int_bin_pred_zx<int_aarch64_sve_subr>;
@ -327,7 +329,7 @@ let Predicates = [HasSVE] in {
defm FDIVR_ZPmZ : sve_fp_2op_p_zds<0b1100, "fdivr", "FDIVR_ZPZZ", int_aarch64_sve_fdivr, DestructiveBinaryCommWithRev, "FDIV_ZPmZ", 0>;
defm FDIV_ZPmZ : sve_fp_2op_p_zds<0b1101, "fdiv", "FDIV_ZPZZ", int_aarch64_sve_fdiv, DestructiveBinaryCommWithRev, "FDIVR_ZPmZ", 1>;
defm FADD_ZPZZ : sve_fp_2op_p_zds_zx<int_aarch64_sve_fadd>;
defm FADD_ZPZZ : sve_fp_2op_p_zds_zx<int_aarch64_sve_fadd, AArch64fadd_pred>;
defm FSUB_ZPZZ : sve_fp_2op_p_zds_zx<int_aarch64_sve_fsub>;
defm FMUL_ZPZZ : sve_fp_2op_p_zds_zx<int_aarch64_sve_fmul>;
defm FSUBR_ZPZZ : sve_fp_2op_p_zds_zx<int_aarch64_sve_fsubr>;

View File

@ -601,6 +601,16 @@ int AArch64TTIImpl::getArithmeticInstrCost(
// These nodes are marked as 'custom' for combining purposes only.
// We know that they are legal. See LowerAdd in ISelLowering.
return (Cost + 1) * LT.first;
case ISD::FADD:
// These nodes are marked as 'custom' just to lower them to SVE.
// We know said lowering will incur no additional cost.
if (isa<FixedVectorType>(Ty) && !Ty->getScalarType()->isFP128Ty())
return (Cost + 2) * LT.first;
return Cost + BaseT::getArithmeticInstrCost(Opcode, Ty, CostKind, Opd1Info,
Opd2Info,
Opd1PropInfo, Opd2PropInfo);
}
}

View File

@ -1596,14 +1596,23 @@ multiclass sve_fp_2op_p_zds_fscale<bits<4> opc, string asm,
def : SVE_3_Op_Pat<nxv2f64, op, nxv2i1, nxv2f64, nxv2i64, !cast<Instruction>(NAME # _D)>;
}
multiclass sve_fp_2op_p_zds_zx<SDPatternOperator op> {
multiclass sve_fp_2op_p_zds_zx<SDPatternOperator int_op,
SDPatternOperator ir_op = null_frag> {
def _UNDEF_H : PredTwoOpPseudo<NAME # _H, ZPR16, FalseLanesUndef>;
def _UNDEF_S : PredTwoOpPseudo<NAME # _S, ZPR32, FalseLanesUndef>;
def _UNDEF_D : PredTwoOpPseudo<NAME # _D, ZPR64, FalseLanesUndef>;
def _ZERO_H : PredTwoOpPseudo<NAME # _H, ZPR16, FalseLanesZero>;
def _ZERO_S : PredTwoOpPseudo<NAME # _S, ZPR32, FalseLanesZero>;
def _ZERO_D : PredTwoOpPseudo<NAME # _D, ZPR64, FalseLanesZero>;
def : SVE_3_Op_Pat_SelZero<nxv8f16, op, nxv8i1, nxv8f16, nxv8f16, !cast<Pseudo>(NAME # _ZERO_H)>;
def : SVE_3_Op_Pat_SelZero<nxv4f32, op, nxv4i1, nxv4f32, nxv4f32, !cast<Pseudo>(NAME # _ZERO_S)>;
def : SVE_3_Op_Pat_SelZero<nxv2f64, op, nxv2i1, nxv2f64, nxv2f64, !cast<Pseudo>(NAME # _ZERO_D)>;
def : SVE_3_Op_Pat<nxv8f16, ir_op, nxv8i1, nxv8f16, nxv8f16, !cast<Pseudo>(NAME # _UNDEF_H)>;
def : SVE_3_Op_Pat<nxv4f32, ir_op, nxv4i1, nxv4f32, nxv4f32, !cast<Pseudo>(NAME # _UNDEF_S)>;
def : SVE_3_Op_Pat<nxv2f64, ir_op, nxv2i1, nxv2f64, nxv2f64, !cast<Pseudo>(NAME # _UNDEF_D)>;
def : SVE_3_Op_Pat_SelZero<nxv8f16, int_op, nxv8i1, nxv8f16, nxv8f16, !cast<Pseudo>(NAME # _ZERO_H)>;
def : SVE_3_Op_Pat_SelZero<nxv4f32, int_op, nxv4i1, nxv4f32, nxv4f32, !cast<Pseudo>(NAME # _ZERO_S)>;
def : SVE_3_Op_Pat_SelZero<nxv2f64, int_op, nxv2i1, nxv2f64, nxv2f64, !cast<Pseudo>(NAME # _ZERO_D)>;
}
class sve_fp_ftmad<bits<2> sz, string asm, ZPRRegOp zprty>
@ -4856,16 +4865,27 @@ multiclass sve_int_bin_pred_shift<bits<3> opc, string asm, string Ps,
def : SVE_3_Op_Pat<nxv2i64, op, nxv2i1, nxv2i64, nxv2i64, !cast<Instruction>(NAME # _D)>;
}
multiclass sve_int_bin_pred_zx<SDPatternOperator op> {
multiclass sve_int_bin_pred_zx<SDPatternOperator int_op,
SDPatternOperator ir_op = null_frag> {
def _UNDEF_B : PredTwoOpPseudo<NAME # _B, ZPR8, FalseLanesUndef>;
def _UNDEF_H : PredTwoOpPseudo<NAME # _H, ZPR16, FalseLanesUndef>;
def _UNDEF_S : PredTwoOpPseudo<NAME # _S, ZPR32, FalseLanesUndef>;
def _UNDEF_D : PredTwoOpPseudo<NAME # _D, ZPR64, FalseLanesUndef>;
def _ZERO_B : PredTwoOpPseudo<NAME # _B, ZPR8, FalseLanesZero>;
def _ZERO_H : PredTwoOpPseudo<NAME # _H, ZPR16, FalseLanesZero>;
def _ZERO_S : PredTwoOpPseudo<NAME # _S, ZPR32, FalseLanesZero>;
def _ZERO_D : PredTwoOpPseudo<NAME # _D, ZPR64, FalseLanesZero>;
def : SVE_3_Op_Pat_SelZero<nxv16i8, op, nxv16i1, nxv16i8, nxv16i8, !cast<Pseudo>(NAME # _ZERO_B)>;
def : SVE_3_Op_Pat_SelZero<nxv8i16, op, nxv8i1, nxv8i16, nxv8i16, !cast<Pseudo>(NAME # _ZERO_H)>;
def : SVE_3_Op_Pat_SelZero<nxv4i32, op, nxv4i1, nxv4i32, nxv4i32, !cast<Pseudo>(NAME # _ZERO_S)>;
def : SVE_3_Op_Pat_SelZero<nxv2i64, op, nxv2i1, nxv2i64, nxv2i64, !cast<Pseudo>(NAME # _ZERO_D)>;
def : SVE_3_Op_Pat<nxv16i8, ir_op, nxv16i1, nxv16i8, nxv16i8, !cast<Pseudo>(NAME # _UNDEF_B)>;
def : SVE_3_Op_Pat<nxv8i16, ir_op, nxv8i1, nxv8i16, nxv8i16, !cast<Pseudo>(NAME # _UNDEF_H)>;
def : SVE_3_Op_Pat<nxv4i32, ir_op, nxv4i1, nxv4i32, nxv4i32, !cast<Pseudo>(NAME # _UNDEF_S)>;
def : SVE_3_Op_Pat<nxv2i64, ir_op, nxv2i1, nxv2i64, nxv2i64, !cast<Pseudo>(NAME # _UNDEF_D)>;
def : SVE_3_Op_Pat_SelZero<nxv16i8, int_op, nxv16i1, nxv16i8, nxv16i8, !cast<Pseudo>(NAME # _ZERO_B)>;
def : SVE_3_Op_Pat_SelZero<nxv8i16, int_op, nxv8i1, nxv8i16, nxv8i16, !cast<Pseudo>(NAME # _ZERO_H)>;
def : SVE_3_Op_Pat_SelZero<nxv4i32, int_op, nxv4i1, nxv4i32, nxv4i32, !cast<Pseudo>(NAME # _ZERO_S)>;
def : SVE_3_Op_Pat_SelZero<nxv2i64, int_op, nxv2i1, nxv2i64, nxv2i64, !cast<Pseudo>(NAME # _ZERO_D)>;
}
multiclass sve_int_bin_pred_shift_wide<bits<3> opc, string asm,

View File

@ -0,0 +1,300 @@
; RUN: llc -aarch64-sve-vector-bits-min=128 < %s | FileCheck %s -D#VBYTES=16 -check-prefix=NO_SVE
; RUN: llc -aarch64-sve-vector-bits-min=256 < %s | FileCheck %s -D#VBYTES=32 -check-prefixes=CHECK,VBITS_LE_1024,VBITS_LE_512,VBITS_LE_256
; RUN: llc -aarch64-sve-vector-bits-min=384 < %s | FileCheck %s -D#VBYTES=32 -check-prefixes=CHECK,VBITS_LE_1024,VBITS_LE_512,VBITS_LE_256
; RUN: llc -aarch64-sve-vector-bits-min=512 < %s | FileCheck %s -D#VBYTES=64 -check-prefixes=CHECK,VBITS_LE_1024,VBITS_LE_512
; RUN: llc -aarch64-sve-vector-bits-min=640 < %s | FileCheck %s -D#VBYTES=64 -check-prefixes=CHECK,VBITS_LE_1024,VBITS_LE_512
; RUN: llc -aarch64-sve-vector-bits-min=768 < %s | FileCheck %s -D#VBYTES=64 -check-prefixes=CHECK,VBITS_LE_1024,VBITS_LE_512
; RUN: llc -aarch64-sve-vector-bits-min=896 < %s | FileCheck %s -D#VBYTES=64 -check-prefixes=CHECK,VBITS_LE_1024,VBITS_LE_512
; RUN: llc -aarch64-sve-vector-bits-min=1024 < %s | FileCheck %s -D#VBYTES=128 -check-prefixes=CHECK,VBITS_LE_1024
; RUN: llc -aarch64-sve-vector-bits-min=1152 < %s | FileCheck %s -D#VBYTES=128 -check-prefixes=CHECK,VBITS_LE_1024
; RUN: llc -aarch64-sve-vector-bits-min=1280 < %s | FileCheck %s -D#VBYTES=128 -check-prefixes=CHECK,VBITS_LE_1024
; RUN: llc -aarch64-sve-vector-bits-min=1408 < %s | FileCheck %s -D#VBYTES=128 -check-prefixes=CHECK,VBITS_LE_1024
; RUN: llc -aarch64-sve-vector-bits-min=1536 < %s | FileCheck %s -D#VBYTES=128 -check-prefixes=CHECK,VBITS_LE_1024
; RUN: llc -aarch64-sve-vector-bits-min=1664 < %s | FileCheck %s -D#VBYTES=128 -check-prefixes=CHECK,VBITS_LE_1024
; RUN: llc -aarch64-sve-vector-bits-min=1792 < %s | FileCheck %s -D#VBYTES=128 -check-prefixes=CHECK,VBITS_LE_1024
; RUN: llc -aarch64-sve-vector-bits-min=1920 < %s | FileCheck %s -D#VBYTES=128 -check-prefixes=CHECK,VBITS_LE_1024
; RUN: llc -aarch64-sve-vector-bits-min=2048 < %s | FileCheck %s -D#VBYTES=256 -check-prefixes=CHECK
; VBYTES represents the useful byte size of a vector register from the code
; generator's point of view. It is clamped to power-of-2 values because
; only power-of-2 vector lengths are considered legal, regardless of the
; user specified vector length.
target triple = "aarch64-unknown-linux-gnu"
; Don't use SVE when its registers are no bigger than NEON.
; NO_SVE-NOT: ptrue
; Don't use SVE for 64-bit vectors.
define <4 x half> @fadd_v4f16(<4 x half> %op1, <4 x half> %op2) #0 {
; CHECK-LABEL: @fadd_v4f16
; CHECK: fadd v0.4h, v0.4h, v1.4h
; CHECK: ret
%res = fadd <4 x half> %op1, %op2
ret <4 x half> %res
}
; Don't use SVE for 128-bit vectors.
define <8 x half> @fadd_v8f16(<8 x half> %op1, <8 x half> %op2) #0 {
; CHECK-LABEL: @fadd_v8f16
; CHECK: fadd v0.8h, v0.8h, v1.8h
; CHECK: ret
%res = fadd <8 x half> %op1, %op2
ret <8 x half> %res
}
define void @fadd_v16f16(<16 x half>* %a, <16 x half>* %b) #0 {
; CHECK-LABEL: @fadd_v16f16
; CHECK: ptrue [[PG:p[0-9]+]].h, vl[[#min(div(VBYTES,2),16)]]
; CHECK-DAG: ld1h { [[OP1:z[0-9]+]].h }, [[PG]]/z, [x0]
; CHECK-DAG: ld1h { [[OP2:z[0-9]+]].h }, [[PG]]/z, [x1]
; CHECK: fadd [[RES:z[0-9]+]].h, [[PG]]/m, [[OP1]].h, [[OP2]].h
; CHECK: st1h { [[RES]].h }, [[PG]], [x0]
; CHECK: ret
%op1 = load <16 x half>, <16 x half>* %a
%op2 = load <16 x half>, <16 x half>* %b
%res = fadd <16 x half> %op1, %op2
store <16 x half> %res, <16 x half>* %a
ret void
}
define void @fadd_v32f16(<32 x half>* %a, <32 x half>* %b) #0 {
; CHECK-LABEL: @fadd_v32f16
; CHECK-DAG: ptrue [[PG:p[0-9]+]].h, vl[[#min(div(VBYTES,2),32)]]
; CHECK-DAG: ld1h { [[OP1:z[0-9]+]].h }, [[PG]]/z, [x0]
; CHECK-DAG: ld1h { [[OP2:z[0-9]+]].h }, [[PG]]/z, [x1]
; CHECK-DAG: fadd [[RES:z[0-9]+]].h, [[PG]]/m, [[OP1]].h, [[OP2]].h
; CHECK-DAG: st1h { [[RES]].h }, [[PG]], [x0]
; VBITS_LE_256-DAG: add x[[A1:[0-9]+]], x0, #[[#VBYTES]]
; VBITS_LE_256-DAG: add x[[B1:[0-9]+]], x1, #[[#VBYTES]]
; VBITS_LE_256-DAG: ld1h { [[OP1_1:z[0-9]+]].h }, [[PG]]/z, [x[[A1]]]
; VBITS_LE_256-DAG: ld1h { [[OP2_1:z[0-9]+]].h }, [[PG]]/z, [x[[B1]]]
; VBITS_LE_256-DAG: fadd [[RES_1:z[0-9]+]].h, [[PG]]/m, [[OP1_1]].h, [[OP2_1]].h
; VBITS_LE_256-DAG: st1h { [[RES_1]].h }, [[PG]], [x[[A1]]]
; CHECK: ret
%op1 = load <32 x half>, <32 x half>* %a
%op2 = load <32 x half>, <32 x half>* %b
%res = fadd <32 x half> %op1, %op2
store <32 x half> %res, <32 x half>* %a
ret void
}
define void @fadd_v64f16(<64 x half>* %a, <64 x half>* %b) #0 {
; CHECK-LABEL: @fadd_v64f16
; CHECK-DAG: ptrue [[PG:p[0-9]+]].h, vl[[#min(div(VBYTES,2),64)]]
; CHECK-DAG: ld1h { [[OP1:z[0-9]+]].h }, [[PG]]/z, [x0]
; CHECK-DAG: ld1h { [[OP2:z[0-9]+]].h }, [[PG]]/z, [x1]
; CHECK-DAG: fadd [[RES:z[0-9]+]].h, [[PG]]/m, [[OP1]].h, [[OP2]].h
; CHECK-DAG: st1h { [[RES]].h }, [[PG]], [x0]
; VBITS_LE_512-DAG: add x[[A1:[0-9]+]], x0, #[[#VBYTES]]
; VBITS_LE_512-DAG: add x[[B1:[0-9]+]], x1, #[[#VBYTES]]
; VBITS_LE_512-DAG: ld1h { [[OP1_1:z[0-9]+]].h }, [[PG]]/z, [x[[A1]]]
; VBITS_LE_512-DAG: ld1h { [[OP2_1:z[0-9]+]].h }, [[PG]]/z, [x[[B1]]]
; VBITS_LE_512-DAG: fadd [[RES_1:z[0-9]+]].h, [[PG]]/m, [[OP1_1]].h, [[OP2_1]].h
; VBITS_LE_512-DAG: st1h { [[RES_1]].h }, [[PG]], [x[[A1]]]
; VBITS_LE_256-DAG: add x[[A2:[0-9]+]], x0, #[[#mul(VBYTES,2)]]
; VBITS_LE_256-DAG: add x[[B2:[0-9]+]], x1, #[[#mul(VBYTES,2)]]
; VBITS_LE_256-DAG: ld1h { [[OP1_2:z[0-9]+]].h }, [[PG]]/z, [x[[A2]]]
; VBITS_LE_256-DAG: ld1h { [[OP2_2:z[0-9]+]].h }, [[PG]]/z, [x[[B2]]]
; VBITS_LE_256-DAG: fadd [[RES_2:z[0-9]+]].h, [[PG]]/m, [[OP1_2]].h, [[OP2_2]].h
; VBITS_LE_256-DAG: st1h { [[RES_2]].h }, [[PG]], [x[[A2]]]
; VBITS_LE_256-DAG: add x[[A3:[0-9]+]], x0, #[[#mul(VBYTES,3)]]
; VBITS_LE_256-DAG: add x[[B3:[0-9]+]], x1, #[[#mul(VBYTES,3)]]
; VBITS_LE_256-DAG: ld1h { [[OP1_3:z[0-9]+]].h }, [[PG]]/z, [x[[A3]]]
; VBITS_LE_256-DAG: ld1h { [[OP2_3:z[0-9]+]].h }, [[PG]]/z, [x[[B3]]]
; VBITS_LE_256-DAG: fadd [[RES_3:z[0-9]+]].h, [[PG]]/m, [[OP1_3]].h, [[OP2_3]].h
; VBITS_LE_256-DAG: st1h { [[RES_3]].h }, [[PG]], [x[[A3]]]
; CHECK: ret
%op1 = load <64 x half>, <64 x half>* %a
%op2 = load <64 x half>, <64 x half>* %b
%res = fadd <64 x half> %op1, %op2
store <64 x half> %res, <64 x half>* %a
ret void
}
; NOTE: Check lines only cover the first VBYTES because the add_v#f16 tests
; already cover the general legalisation cases.
define void @fadd_v128f16(<128 x half>* %a, <128 x half>* %b) #0 {
; CHECK-LABEL: @fadd_v128f16
; CHECK: ptrue [[PG:p[0-9]+]].h, vl[[#min(div(VBYTES,2),128)]]
; CHECK-DAG: ld1h { [[OP1:z[0-9]+]].h }, [[PG]]/z, [x0]
; CHECK-DAG: ld1h { [[OP2:z[0-9]+]].h }, [[PG]]/z, [x1]
; CHECK: fadd [[RES:z[0-9]+]].h, [[PG]]/m, [[OP1]].h, [[OP2]].h
; CHECK: st1h { [[RES]].h }, [[PG]], [x0]
; CHECK: ret
%op1 = load <128 x half>, <128 x half>* %a
%op2 = load <128 x half>, <128 x half>* %b
%res = fadd <128 x half> %op1, %op2
store <128 x half> %res, <128 x half>* %a
ret void
}
; Don't use SVE for 64-bit vectors.
define <2 x float> @fadd_v2f32(<2 x float> %op1, <2 x float> %op2) #0 {
; CHECK-LABEL: @fadd_v2f32
; CHECK: fadd v0.2s, v0.2s, v1.2s
; CHECK: ret
%res = fadd <2 x float> %op1, %op2
ret <2 x float> %res
}
; Don't use SVE for 128-bit vectors.
define <4 x float> @fadd_v4f32(<4 x float> %op1, <4 x float> %op2) #0 {
; CHECK-LABEL: @fadd_v4f32
; CHECK: fadd v0.4s, v0.4s, v1.4s
; CHECK: ret
%res = fadd <4 x float> %op1, %op2
ret <4 x float> %res
}
define void @fadd_v8f32(<8 x float>* %a, <8 x float>* %b) #0 {
; CHECK-LABEL: @fadd_v8f32
; CHECK: ptrue [[PG:p[0-9]+]].s, vl[[#min(div(VBYTES,4),8)]]
; CHECK-DAG: ld1w { [[OP1:z[0-9]+]].s }, [[PG]]/z, [x0]
; CHECK-DAG: ld1w { [[OP2:z[0-9]+]].s }, [[PG]]/z, [x1]
; CHECK: fadd [[RES:z[0-9]+]].s, [[PG]]/m, [[OP1]].s, [[OP2]].s
; CHECK: st1w { [[RES]].s }, [[PG]], [x0]
; CHECK: ret
%op1 = load <8 x float>, <8 x float>* %a
%op2 = load <8 x float>, <8 x float>* %b
%res = fadd <8 x float> %op1, %op2
store <8 x float> %res, <8 x float>* %a
ret void
}
; NOTE: Check lines only cover the first VBYTES because the add_v#f16 tests
; already cover the general legalisation cases.
define void @fadd_v16f32(<16 x float>* %a, <16 x float>* %b) #0 {
; CHECK-LABEL: @fadd_v16f32
; CHECK: ptrue [[PG:p[0-9]+]].s, vl[[#min(div(VBYTES,4),16)]]
; CHECK-DAG: ld1w { [[OP1:z[0-9]+]].s }, [[PG]]/z, [x0]
; CHECK-DAG: ld1w { [[OP2:z[0-9]+]].s }, [[PG]]/z, [x1]
; CHECK: fadd [[RES:z[0-9]+]].s, [[PG]]/m, [[OP1]].s, [[OP2]].s
; CHECK: st1w { [[RES]].s }, [[PG]], [x0]
; CHECK: ret
%op1 = load <16 x float>, <16 x float>* %a
%op2 = load <16 x float>, <16 x float>* %b
%res = fadd <16 x float> %op1, %op2
store <16 x float> %res, <16 x float>* %a
ret void
}
; NOTE: Check lines only cover the first VBYTES because the add_v#f16 tests
; already cover the general legalisation cases.
define void @fadd_v32f32(<32 x float>* %a, <32 x float>* %b) #0 {
; CHECK-LABEL: @fadd_v32f32
; CHECK: ptrue [[PG:p[0-9]+]].s, vl[[#min(div(VBYTES,4),32)]]
; CHECK-DAG: ld1w { [[OP1:z[0-9]+]].s }, [[PG]]/z, [x0]
; CHECK-DAG: ld1w { [[OP2:z[0-9]+]].s }, [[PG]]/z, [x1]
; CHECK: fadd [[RES:z[0-9]+]].s, [[PG]]/m, [[OP1]].s, [[OP2]].s
; CHECK: st1w { [[RES]].s }, [[PG]], [x0]
; CHECK: ret
%op1 = load <32 x float>, <32 x float>* %a
%op2 = load <32 x float>, <32 x float>* %b
%res = fadd <32 x float> %op1, %op2
store <32 x float> %res, <32 x float>* %a
ret void
}
; NOTE: Check lines only cover the first VBYTES because the add_v#f16 tests
; already cover the general legalisation cases.
define void @fadd_v64f32(<64 x float>* %a, <64 x float>* %b) #0 {
; CHECK-LABEL: @fadd_v64f32
; CHECK: ptrue [[PG:p[0-9]+]].s, vl[[#min(div(VBYTES,4),64)]]
; CHECK-DAG: ld1w { [[OP1:z[0-9]+]].s }, [[PG]]/z, [x0]
; CHECK-DAG: ld1w { [[OP2:z[0-9]+]].s }, [[PG]]/z, [x1]
; CHECK: fadd [[RES:z[0-9]+]].s, [[PG]]/m, [[OP1]].s, [[OP2]].s
; CHECK: st1w { [[RES]].s }, [[PG]], [x0]
; CHECK: ret
%op1 = load <64 x float>, <64 x float>* %a
%op2 = load <64 x float>, <64 x float>* %b
%res = fadd <64 x float> %op1, %op2
store <64 x float> %res, <64 x float>* %a
ret void
}
; Don't use SVE for 64-bit vectors.
define <1 x double> @fadd_v1f64(<1 x double> %op1, <1 x double> %op2) #0 {
; CHECK-LABEL: @fadd_v1f64
; CHECK: fadd d0, d0, d1
; CHECK: ret
%res = fadd <1 x double> %op1, %op2
ret <1 x double> %res
}
; Don't use SVE for 128-bit vectors.
define <2 x double> @fadd_v2f64(<2 x double> %op1, <2 x double> %op2) #0 {
; CHECK-LABEL: @fadd_v2f64
; CHECK: fadd v0.2d, v0.2d, v1.2d
; CHECK: ret
%res = fadd <2 x double> %op1, %op2
ret <2 x double> %res
}
define void @fadd_v4f64(<4 x double>* %a, <4 x double>* %b) #0 {
; CHECK-LABEL: @fadd_v4f64
; CHECK: ptrue [[PG:p[0-9]+]].d, vl[[#min(div(VBYTES,8),4)]]
; CHECK-DAG: ld1d { [[OP1:z[0-9]+]].d }, [[PG]]/z, [x0]
; CHECK-DAG: ld1d { [[OP2:z[0-9]+]].d }, [[PG]]/z, [x1]
; CHECK: fadd [[RES:z[0-9]+]].d, [[PG]]/m, [[OP1]].d, [[OP2]].d
; CHECK: st1d { [[RES]].d }, [[PG]], [x0]
; CHECK: ret
%op1 = load <4 x double>, <4 x double>* %a
%op2 = load <4 x double>, <4 x double>* %b
%res = fadd <4 x double> %op1, %op2
store <4 x double> %res, <4 x double>* %a
ret void
}
; NOTE: Check lines only cover the first VBYTES because the add_v#f16 tests
; already cover the general legalisation cases.
define void @fadd_v8f64(<8 x double>* %a, <8 x double>* %b) #0 {
; CHECK-LABEL: @fadd_v8f64
; CHECK: ptrue [[PG:p[0-9]+]].d, vl[[#min(div(VBYTES,8),8)]]
; CHECK-DAG: ld1d { [[OP1:z[0-9]+]].d }, [[PG]]/z, [x0]
; CHECK-DAG: ld1d { [[OP2:z[0-9]+]].d }, [[PG]]/z, [x1]
; CHECK: fadd [[RES:z[0-9]+]].d, [[PG]]/m, [[OP1]].d, [[OP2]].d
; CHECK: st1d { [[RES]].d }, [[PG]], [x0]
; CHECK: ret
%op1 = load <8 x double>, <8 x double>* %a
%op2 = load <8 x double>, <8 x double>* %b
%res = fadd <8 x double> %op1, %op2
store <8 x double> %res, <8 x double>* %a
ret void
}
; NOTE: Check lines only cover the first VBYTES because the add_v#f16 tests
; already cover the general legalisation cases.
define void @fadd_v16f64(<16 x double>* %a, <16 x double>* %b) #0 {
; CHECK-LABEL: @fadd_v16f64
; CHECK: ptrue [[PG:p[0-9]+]].d, vl[[#min(div(VBYTES,8),16)]]
; CHECK-DAG: ld1d { [[OP1:z[0-9]+]].d }, [[PG]]/z, [x0]
; CHECK-DAG: ld1d { [[OP2:z[0-9]+]].d }, [[PG]]/z, [x1]
; CHECK: fadd [[RES:z[0-9]+]].d, [[PG]]/m, [[OP1]].d, [[OP2]].d
; CHECK: st1d { [[RES]].d }, [[PG]], [x0]
; CHECK: ret
%op1 = load <16 x double>, <16 x double>* %a
%op2 = load <16 x double>, <16 x double>* %b
%res = fadd <16 x double> %op1, %op2
store <16 x double> %res, <16 x double>* %a
ret void
}
; NOTE: Check lines only cover the first VBYTES because the add_v#f16 tests
; already cover the general legalisation cases.
define void @fadd_v32f64(<32 x double>* %a, <32 x double>* %b) #0 {
; CHECK-LABEL: @fadd_v32f64
; CHECK: ptrue [[PG:p[0-9]+]].d, vl[[#min(div(VBYTES,8),32)]]
; CHECK-DAG: ld1d { [[OP1:z[0-9]+]].d }, [[PG]]/z, [x0]
; CHECK-DAG: ld1d { [[OP2:z[0-9]+]].d }, [[PG]]/z, [x1]
; CHECK: fadd [[RES:z[0-9]+]].d, [[PG]]/m, [[OP1]].d, [[OP2]].d
; CHECK: st1d { [[RES]].d }, [[PG]], [x0]
; CHECK: ret
%op1 = load <32 x double>, <32 x double>* %a
%op2 = load <32 x double>, <32 x double>* %b
%res = fadd <32 x double> %op1, %op2
store <32 x double> %res, <32 x double>* %a
ret void
}
attributes #0 = { "target-features"="+sve" }

View File

@ -0,0 +1,413 @@
; RUN: llc -aarch64-sve-vector-bits-min=128 < %s | FileCheck %s -D#VBYTES=16 -check-prefix=NO_SVE
; RUN: llc -aarch64-sve-vector-bits-min=256 < %s | FileCheck %s -D#VBYTES=32 -check-prefixes=CHECK,VBITS_LE_1024,VBITS_LE_512,VBITS_LE_256
; RUN: llc -aarch64-sve-vector-bits-min=384 < %s | FileCheck %s -D#VBYTES=32 -check-prefixes=CHECK,VBITS_LE_1024,VBITS_LE_512,VBITS_LE_256
; RUN: llc -aarch64-sve-vector-bits-min=512 < %s | FileCheck %s -D#VBYTES=64 -check-prefixes=CHECK,VBITS_LE_1024,VBITS_LE_512
; RUN: llc -aarch64-sve-vector-bits-min=640 < %s | FileCheck %s -D#VBYTES=64 -check-prefixes=CHECK,VBITS_LE_1024,VBITS_LE_512
; RUN: llc -aarch64-sve-vector-bits-min=768 < %s | FileCheck %s -D#VBYTES=64 -check-prefixes=CHECK,VBITS_LE_1024,VBITS_LE_512
; RUN: llc -aarch64-sve-vector-bits-min=896 < %s | FileCheck %s -D#VBYTES=64 -check-prefixes=CHECK,VBITS_LE_1024,VBITS_LE_512
; RUN: llc -aarch64-sve-vector-bits-min=1024 < %s | FileCheck %s -D#VBYTES=128 -check-prefixes=CHECK,VBITS_LE_1024
; RUN: llc -aarch64-sve-vector-bits-min=1152 < %s | FileCheck %s -D#VBYTES=128 -check-prefixes=CHECK,VBITS_LE_1024
; RUN: llc -aarch64-sve-vector-bits-min=1280 < %s | FileCheck %s -D#VBYTES=128 -check-prefixes=CHECK,VBITS_LE_1024
; RUN: llc -aarch64-sve-vector-bits-min=1408 < %s | FileCheck %s -D#VBYTES=128 -check-prefixes=CHECK,VBITS_LE_1024
; RUN: llc -aarch64-sve-vector-bits-min=1536 < %s | FileCheck %s -D#VBYTES=128 -check-prefixes=CHECK,VBITS_LE_1024
; RUN: llc -aarch64-sve-vector-bits-min=1664 < %s | FileCheck %s -D#VBYTES=128 -check-prefixes=CHECK,VBITS_LE_1024
; RUN: llc -aarch64-sve-vector-bits-min=1792 < %s | FileCheck %s -D#VBYTES=128 -check-prefixes=CHECK,VBITS_LE_1024
; RUN: llc -aarch64-sve-vector-bits-min=1920 < %s | FileCheck %s -D#VBYTES=128 -check-prefixes=CHECK,VBITS_LE_1024
; RUN: llc -aarch64-sve-vector-bits-min=2048 < %s | FileCheck %s -D#VBYTES=256 -check-prefixes=CHECK
; VBYTES represents the useful byte size of a vector register from the code
; generator's point of view. It is clamped to power-of-2 values because
; only power-of-2 vector lengths are considered legal, regardless of the
; user specified vector length.
target triple = "aarch64-unknown-linux-gnu"
; Don't use SVE when its registers are no bigger than NEON.
; NO_SVE-NOT: ptrue
; Don't use SVE for 64-bit vectors.
define <8 x i8> @add_v8i8(<8 x i8> %op1, <8 x i8> %op2) #0 {
; CHECK-LABEL: @add_v8i8
; CHECK: add v0.8b, v0.8b, v1.8b
; CHECK: ret
%res = add <8 x i8> %op1, %op2
ret <8 x i8> %res
}
; Don't use SVE for 128-bit vectors.
define <16 x i8> @add_v16i8(<16 x i8> %op1, <16 x i8> %op2) #0 {
; CHECK-LABEL: @add_v16i8
; CHECK: add v0.16b, v0.16b, v1.16b
; CHECK: ret
%res = add <16 x i8> %op1, %op2
ret <16 x i8> %res
}
define void @add_v32i8(<32 x i8>* %a, <32 x i8>* %b) #0 {
; CHECK-LABEL: @add_v32i8
; CHECK: ptrue [[PG:p[0-9]+]].b, vl[[#min(VBYTES,32)]]
; CHECK-DAG: ld1b { [[OP1:z[0-9]+]].b }, [[PG]]/z, [x0]
; CHECK-DAG: ld1b { [[OP2:z[0-9]+]].b }, [[PG]]/z, [x1]
; CHECK: add [[RES:z[0-9]+]].b, [[PG]]/m, [[OP1]].b, [[OP2]].b
; CHECK: st1b { [[RES]].b }, [[PG]], [x0]
; CHECK: ret
%op1 = load <32 x i8>, <32 x i8>* %a
%op2 = load <32 x i8>, <32 x i8>* %b
%res = add <32 x i8> %op1, %op2
store <32 x i8> %res, <32 x i8>* %a
ret void
}
define void @add_v64i8(<64 x i8>* %a, <64 x i8>* %b) #0 {
; CHECK-LABEL: @add_v64i8
; CHECK-DAG: ptrue [[PG:p[0-9]+]].b, vl[[#min(VBYTES,64)]]
; CHECK-DAG: ld1b { [[OP1:z[0-9]+]].b }, [[PG]]/z, [x0]
; CHECK-DAG: ld1b { [[OP2:z[0-9]+]].b }, [[PG]]/z, [x1]
; CHECK-DAG: add [[RES:z[0-9]+]].b, [[PG]]/m, [[OP1]].b, [[OP2]].b
; CHECK-DAG: st1b { [[RES]].b }, [[PG]], [x0]
; VBITS_LE_256-DAG: mov w[[OFF_1:[0-9]+]], #[[#VBYTES]]
; VBITS_LE_256-DAG: ld1b { [[OP1_1:z[0-9]+]].b }, [[PG]]/z, [x0, x[[OFF_1]]]
; VBITS_LE_256-DAG: ld1b { [[OP2_1:z[0-9]+]].b }, [[PG]]/z, [x1, x[[OFF_1]]]
; VBITS_LE_256-DAG: add [[RES_1:z[0-9]+]].b, [[PG]]/m, [[OP1_1]].b, [[OP2_1]].b
; VBITS_LE_256-DAG: st1b { [[RES_1]].b }, [[PG]], [x0, x[[OFF_1]]]
; CHECK: ret
%op1 = load <64 x i8>, <64 x i8>* %a
%op2 = load <64 x i8>, <64 x i8>* %b
%res = add <64 x i8> %op1, %op2
store <64 x i8> %res, <64 x i8>* %a
ret void
}
define void @add_v128i8(<128 x i8>* %a, <128 x i8>* %b) #0 {
; CHECK-LABEL: @add_v128i8
; CHECK-DAG: ptrue [[PG:p[0-9]+]].b, vl[[#min(VBYTES,128)]]
; CHECK-DAG: ld1b { [[OP1:z[0-9]+]].b }, [[PG]]/z, [x0]
; CHECK-DAG: ld1b { [[OP2:z[0-9]+]].b }, [[PG]]/z, [x1]
; CHECK-DAG: add [[RES:z[0-9]+]].b, [[PG]]/m, [[OP1]].b, [[OP2]].b
; CHECK-DAG: st1b { [[RES]].b }, [[PG]], [x0]
; VBITS_LE_512-DAG: mov w[[OFF_1:[0-9]+]], #[[#VBYTES]]
; VBITS_LE_512-DAG: ld1b { [[OP1_1:z[0-9]+]].b }, [[PG]]/z, [x0, x[[OFF_1]]]
; VBITS_LE_512-DAG: ld1b { [[OP2_1:z[0-9]+]].b }, [[PG]]/z, [x1, x[[OFF_1]]]
; VBITS_LE_512-DAG: add [[RES_1:z[0-9]+]].b, [[PG]]/m, [[OP1_1]].b, [[OP2_1]].b
; VBITS_LE_512-DAG: st1b { [[RES_1]].b }, [[PG]], [x0, x[[OFF_1]]]
; VBITS_LE_256-DAG: mov w[[OFF_2:[0-9]+]], #[[#mul(VBYTES,2)]]
; VBITS_LE_256-DAG: ld1b { [[OP1_2:z[0-9]+]].b }, [[PG]]/z, [x0, x[[OFF_2]]]
; VBITS_LE_256-DAG: ld1b { [[OP2_2:z[0-9]+]].b }, [[PG]]/z, [x1, x[[OFF_2]]]
; VBITS_LE_256-DAG: add [[RES_2:z[0-9]+]].b, [[PG]]/m, [[OP1_2]].b, [[OP2_2]].b
; VBITS_LE_256-DAG: st1b { [[RES_2]].b }, [[PG]], [x0, x[[OFF_2]]]
; VBITS_LE_256-DAG: mov w[[OFF_3:[0-9]+]], #[[#mul(VBYTES,3)]]
; VBITS_LE_256-DAG: ld1b { [[OP1_3:z[0-9]+]].b }, [[PG]]/z, [x0, x[[OFF_3]]]
; VBITS_LE_256-DAG: ld1b { [[OP2_3:z[0-9]+]].b }, [[PG]]/z, [x1, x[[OFF_3]]]
; VBITS_LE_256-DAG: add [[RES_3:z[0-9]+]].b, [[PG]]/m, [[OP1_3]].b, [[OP2_3]].b
; VBITS_LE_256-DAG: st1b { [[RES_3]].b }, [[PG]], [x0, x[[OFF_3]]]
; CHECK: ret
%op1 = load <128 x i8>, <128 x i8>* %a
%op2 = load <128 x i8>, <128 x i8>* %b
%res = add <128 x i8> %op1, %op2
store <128 x i8> %res, <128 x i8>* %a
ret void
}
define void @add_v256i8(<256 x i8>* %a, <256 x i8>* %b) #0 {
; CHECK-LABEL: @add_v256i8
; CHECK-DAG: ptrue [[PG:p[0-9]+]].b, vl[[#min(VBYTES,256)]]
; CHECK-DAG: ld1b { [[OP1:z[0-9]+]].b }, [[PG]]/z, [x0]
; CHECK-DAG: ld1b { [[OP2:z[0-9]+]].b }, [[PG]]/z, [x1]
; CHECK-DAG: add [[RES:z[0-9]+]].b, [[PG]]/m, [[OP1]].b, [[OP2]].b
; CHECK-DAG: st1b { [[RES]].b }, [[PG]], [x0]
; VBITS_LE_1024-DAG: mov w[[OFF_1:[0-9]+]], #[[#VBYTES]]
; VBITS_LE_1024-DAG: ld1b { [[OP1_1:z[0-9]+]].b }, [[PG]]/z, [x0, x[[OFF_1]]]
; VBITS_LE_1024-DAG: ld1b { [[OP2_1:z[0-9]+]].b }, [[PG]]/z, [x1, x[[OFF_1]]]
; VBITS_LE_1024-DAG: add [[RES_1:z[0-9]+]].b, [[PG]]/m, [[OP1_1]].b, [[OP2_1]].b
; VBITS_LE_1024-DAG: st1b { [[RES_1]].b }, [[PG]], [x0, x[[OFF_1]]]
; VBITS_LE_512-DAG: mov w[[OFF_2:[0-9]+]], #[[#mul(VBYTES,2)]]
; VBITS_LE_512-DAG: ld1b { [[OP1_2:z[0-9]+]].b }, [[PG]]/z, [x0, x[[OFF_2]]]
; VBITS_LE_512-DAG: ld1b { [[OP2_2:z[0-9]+]].b }, [[PG]]/z, [x1, x[[OFF_2]]]
; VBITS_LE_512-DAG: add [[RES_2:z[0-9]+]].b, [[PG]]/m, [[OP1_2]].b, [[OP2_2]].b
; VBITS_LE_512-DAG: st1b { [[RES_2]].b }, [[PG]], [x0, x[[OFF_2]]]
; VBITS_LE_512-DAG: mov w[[OFF_3:[0-9]+]], #[[#mul(VBYTES,3)]]
; VBITS_LE_512-DAG: ld1b { [[OP1_3:z[0-9]+]].b }, [[PG]]/z, [x0, x[[OFF_3]]]
; VBITS_LE_512-DAG: ld1b { [[OP2_3:z[0-9]+]].b }, [[PG]]/z, [x1, x[[OFF_3]]]
; VBITS_LE_512-DAG: add [[RES_3:z[0-9]+]].b, [[PG]]/m, [[OP1_3]].b, [[OP2_3]].b
; VBITS_LE_512-DAG: st1b { [[RES_3]].b }, [[PG]], [x0, x[[OFF_3]]]
; VBITS_LE_256-DAG: mov w[[OFF_4:[0-9]+]], #[[#mul(VBYTES,4)]]
; VBITS_LE_256-DAG: ld1b { [[OP1_4:z[0-9]+]].b }, [[PG]]/z, [x0, x[[OFF_4]]]
; VBITS_LE_256-DAG: ld1b { [[OP2_4:z[0-9]+]].b }, [[PG]]/z, [x1, x[[OFF_4]]]
; VBITS_LE_256-DAG: add [[RES_4:z[0-9]+]].b, [[PG]]/m, [[OP1_4]].b, [[OP2_4]].b
; VBITS_LE_256-DAG: st1b { [[RES_4]].b }, [[PG]], [x0, x[[OFF_4]]]
; VBITS_LE_256-DAG: mov w[[OFF_5:[0-9]+]], #[[#mul(VBYTES,5)]]
; VBITS_LE_256-DAG: ld1b { [[OP1_5:z[0-9]+]].b }, [[PG]]/z, [x0, x[[OFF_5]]]
; VBITS_LE_256-DAG: ld1b { [[OP2_5:z[0-9]+]].b }, [[PG]]/z, [x1, x[[OFF_5]]]
; VBITS_LE_256-DAG: add [[RES_5:z[0-9]+]].b, [[PG]]/m, [[OP1_5]].b, [[OP2_5]].b
; VBITS_LE_256-DAG: st1b { [[RES_5]].b }, [[PG]], [x0, x[[OFF_5]]]
; VBITS_LE_256-DAG: mov w[[OFF_6:[0-9]+]], #[[#mul(VBYTES,6)]]
; VBITS_LE_256-DAG: ld1b { [[OP1_6:z[0-9]+]].b }, [[PG]]/z, [x0, x[[OFF_6]]]
; VBITS_LE_256-DAG: ld1b { [[OP2_6:z[0-9]+]].b }, [[PG]]/z, [x1, x[[OFF_6]]]
; VBITS_LE_256-DAG: add [[RES_6:z[0-9]+]].b, [[PG]]/m, [[OP1_6]].b, [[OP2_6]].b
; VBITS_LE_256-DAG: st1b { [[RES_6]].b }, [[PG]], [x0, x[[OFF_6]]]
; VBITS_LE_256-DAG: mov w[[OFF_7:[0-9]+]], #[[#mul(VBYTES,7)]]
; VBITS_LE_256-DAG: ld1b { [[OP1_7:z[0-9]+]].b }, [[PG]]/z, [x0, x[[OFF_7]]]
; VBITS_LE_256-DAG: ld1b { [[OP2_7:z[0-9]+]].b }, [[PG]]/z, [x1, x[[OFF_7]]]
; VBITS_LE_256-DAG: add [[RES_7:z[0-9]+]].b, [[PG]]/m, [[OP1_7]].b, [[OP2_7]].b
; VBITS_LE_256-DAG: st1b { [[RES_7]].b }, [[PG]], [x0, x[[OFF_7]]]
; CHECK: ret
%op1 = load <256 x i8>, <256 x i8>* %a
%op2 = load <256 x i8>, <256 x i8>* %b
%res = add <256 x i8> %op1, %op2
store <256 x i8> %res, <256 x i8>* %a
ret void
}
; Don't use SVE for 64-bit vectors.
define <4 x i16> @add_v4i16(<4 x i16> %op1, <4 x i16> %op2) #0 {
; CHECK-LABEL: @add_v4i16
; CHECK: add v0.4h, v0.4h, v1.4h
; CHECK: ret
%res = add <4 x i16> %op1, %op2
ret <4 x i16> %res
}
; Don't use SVE for 128-bit vectors.
define <8 x i16> @add_v8i16(<8 x i16> %op1, <8 x i16> %op2) #0 {
; CHECK-LABEL: @add_v8i16
; CHECK: add v0.8h, v0.8h, v1.8h
; CHECK: ret
%res = add <8 x i16> %op1, %op2
ret <8 x i16> %res
}
define void @add_v16i16(<16 x i16>* %a, <16 x i16>* %b) #0 {
; CHECK-LABEL: @add_v16i16
; CHECK: ptrue [[PG:p[0-9]+]].h, vl[[#min(div(VBYTES,2),16)]]
; CHECK-DAG: ld1h { [[OP1:z[0-9]+]].h }, [[PG]]/z, [x0]
; CHECK-DAG: ld1h { [[OP2:z[0-9]+]].h }, [[PG]]/z, [x1]
; CHECK: add [[RES:z[0-9]+]].h, [[PG]]/m, [[OP1]].h, [[OP2]].h
; CHECK: st1h { [[RES]].h }, [[PG]], [x0]
; CHECK: ret
%op1 = load <16 x i16>, <16 x i16>* %a
%op2 = load <16 x i16>, <16 x i16>* %b
%res = add <16 x i16> %op1, %op2
store <16 x i16> %res, <16 x i16>* %a
ret void
}
; NOTE: Check lines only cover the first VBYTES because the add_v#i8 tests
; already cover the general legalisation cases.
define void @add_v32i16(<32 x i16>* %a, <32 x i16>* %b) #0 {
; CHECK-LABEL: @add_v32i16
; CHECK: ptrue [[PG:p[0-9]+]].h, vl[[#min(div(VBYTES,2),32)]]
; CHECK-DAG: ld1h { [[OP1:z[0-9]+]].h }, [[PG]]/z, [x0]
; CHECK-DAG: ld1h { [[OP2:z[0-9]+]].h }, [[PG]]/z, [x1]
; CHECK: add [[RES:z[0-9]+]].h, [[PG]]/m, [[OP1]].h, [[OP2]].h
; CHECK: st1h { [[RES]].h }, [[PG]], [x0]
; CHECK: ret
%op1 = load <32 x i16>, <32 x i16>* %a
%op2 = load <32 x i16>, <32 x i16>* %b
%res = add <32 x i16> %op1, %op2
store <32 x i16> %res, <32 x i16>* %a
ret void
}
; NOTE: Check lines only cover the first VBYTES because the add_v#i8 tests
; already cover the general legalisation cases.
define void @add_v64i16(<64 x i16>* %a, <64 x i16>* %b) #0 {
; CHECK-LABEL: @add_v64i16
; CHECK: ptrue [[PG:p[0-9]+]].h, vl[[#min(div(VBYTES,2),64)]]
; CHECK-DAG: ld1h { [[OP1:z[0-9]+]].h }, [[PG]]/z, [x0]
; CHECK-DAG: ld1h { [[OP2:z[0-9]+]].h }, [[PG]]/z, [x1]
; CHECK: add [[RES:z[0-9]+]].h, [[PG]]/m, [[OP1]].h, [[OP2]].h
; CHECK: st1h { [[RES]].h }, [[PG]], [x0]
; CHECK: ret
%op1 = load <64 x i16>, <64 x i16>* %a
%op2 = load <64 x i16>, <64 x i16>* %b
%res = add <64 x i16> %op1, %op2
store <64 x i16> %res, <64 x i16>* %a
ret void
}
; NOTE: Check lines only cover the first VBYTES because the add_v#i8 tests
; already cover the general legalisation cases.
define void @add_v128i16(<128 x i16>* %a, <128 x i16>* %b) #0 {
; CHECK-LABEL: @add_v128i16
; CHECK: ptrue [[PG:p[0-9]+]].h, vl[[#min(div(VBYTES,2),128)]]
; CHECK-DAG: ld1h { [[OP1:z[0-9]+]].h }, [[PG]]/z, [x0]
; CHECK-DAG: ld1h { [[OP2:z[0-9]+]].h }, [[PG]]/z, [x1]
; CHECK: add [[RES:z[0-9]+]].h, [[PG]]/m, [[OP1]].h, [[OP2]].h
; CHECK: st1h { [[RES]].h }, [[PG]], [x0]
; CHECK: ret
%op1 = load <128 x i16>, <128 x i16>* %a
%op2 = load <128 x i16>, <128 x i16>* %b
%res = add <128 x i16> %op1, %op2
store <128 x i16> %res, <128 x i16>* %a
ret void
}
; Don't use SVE for 64-bit vectors.
define <2 x i32> @add_v2i32(<2 x i32> %op1, <2 x i32> %op2) #0 {
; CHECK-LABEL: @add_v2i32
; CHECK: add v0.2s, v0.2s, v1.2s
; CHECK: ret
%res = add <2 x i32> %op1, %op2
ret <2 x i32> %res
}
; Don't use SVE for 128-bit vectors.
define <4 x i32> @add_v4i32(<4 x i32> %op1, <4 x i32> %op2) #0 {
; CHECK-LABEL: @add_v4i32
; CHECK: add v0.4s, v0.4s, v1.4s
; CHECK: ret
%res = add <4 x i32> %op1, %op2
ret <4 x i32> %res
}
define void @add_v8i32(<8 x i32>* %a, <8 x i32>* %b) #0 {
; CHECK-LABEL: @add_v8i32
; CHECK: ptrue [[PG:p[0-9]+]].s, vl[[#min(div(VBYTES,4),8)]]
; CHECK-DAG: ld1w { [[OP1:z[0-9]+]].s }, [[PG]]/z, [x0]
; CHECK-DAG: ld1w { [[OP2:z[0-9]+]].s }, [[PG]]/z, [x1]
; CHECK: add [[RES:z[0-9]+]].s, [[PG]]/m, [[OP1]].s, [[OP2]].s
; CHECK: st1w { [[RES]].s }, [[PG]], [x0]
; CHECK: ret
%op1 = load <8 x i32>, <8 x i32>* %a
%op2 = load <8 x i32>, <8 x i32>* %b
%res = add <8 x i32> %op1, %op2
store <8 x i32> %res, <8 x i32>* %a
ret void
}
; NOTE: Check lines only cover the first VBYTES because the add_v#i8 tests
; already cover the general legalisation cases.
define void @add_v16i32(<16 x i32>* %a, <16 x i32>* %b) #0 {
; CHECK-LABEL: @add_v16i32
; CHECK: ptrue [[PG:p[0-9]+]].s, vl[[#min(div(VBYTES,4),16)]]
; CHECK-DAG: ld1w { [[OP1:z[0-9]+]].s }, [[PG]]/z, [x0]
; CHECK-DAG: ld1w { [[OP2:z[0-9]+]].s }, [[PG]]/z, [x1]
; CHECK: add [[RES:z[0-9]+]].s, [[PG]]/m, [[OP1]].s, [[OP2]].s
; CHECK: st1w { [[RES]].s }, [[PG]], [x0]
; CHECK: ret
%op1 = load <16 x i32>, <16 x i32>* %a
%op2 = load <16 x i32>, <16 x i32>* %b
%res = add <16 x i32> %op1, %op2
store <16 x i32> %res, <16 x i32>* %a
ret void
}
; NOTE: Check lines only cover the first VBYTES because the add_v#i8 tests
; already cover the general legalisation cases.
define void @add_v32i32(<32 x i32>* %a, <32 x i32>* %b) #0 {
; CHECK-LABEL: @add_v32i32
; CHECK: ptrue [[PG:p[0-9]+]].s, vl[[#min(div(VBYTES,4),32)]]
; CHECK-DAG: ld1w { [[OP1:z[0-9]+]].s }, [[PG]]/z, [x0]
; CHECK-DAG: ld1w { [[OP2:z[0-9]+]].s }, [[PG]]/z, [x1]
; CHECK: add [[RES:z[0-9]+]].s, [[PG]]/m, [[OP1]].s, [[OP2]].s
; CHECK: st1w { [[RES]].s }, [[PG]], [x0]
; CHECK: ret
%op1 = load <32 x i32>, <32 x i32>* %a
%op2 = load <32 x i32>, <32 x i32>* %b
%res = add <32 x i32> %op1, %op2
store <32 x i32> %res, <32 x i32>* %a
ret void
}
; NOTE: Check lines only cover the first VBYTES because the add_v#i8 tests
; already cover the general legalisation cases.
define void @add_v64i32(<64 x i32>* %a, <64 x i32>* %b) #0 {
; CHECK-LABEL: @add_v64i32
; CHECK: ptrue [[PG:p[0-9]+]].s, vl[[#min(div(VBYTES,4),64)]]
; CHECK-DAG: ld1w { [[OP1:z[0-9]+]].s }, [[PG]]/z, [x0]
; CHECK-DAG: ld1w { [[OP2:z[0-9]+]].s }, [[PG]]/z, [x1]
; CHECK: add [[RES:z[0-9]+]].s, [[PG]]/m, [[OP1]].s, [[OP2]].s
; CHECK: st1w { [[RES]].s }, [[PG]], [x0]
; CHECK: ret
%op1 = load <64 x i32>, <64 x i32>* %a
%op2 = load <64 x i32>, <64 x i32>* %b
%res = add <64 x i32> %op1, %op2
store <64 x i32> %res, <64 x i32>* %a
ret void
}
; Don't use SVE for 64-bit vectors.
define <1 x i64> @add_v1i64(<1 x i64> %op1, <1 x i64> %op2) #0 {
; CHECK-LABEL: @add_v1i64
; CHECK: add d0, d0, d1
; CHECK: ret
%res = add <1 x i64> %op1, %op2
ret <1 x i64> %res
}
; Don't use SVE for 128-bit vectors.
define <2 x i64> @add_v2i64(<2 x i64> %op1, <2 x i64> %op2) #0 {
; CHECK-LABEL: @add_v2i64
; CHECK: add v0.2d, v0.2d, v1.2d
; CHECK: ret
%res = add <2 x i64> %op1, %op2
ret <2 x i64> %res
}
define void @add_v4i64(<4 x i64>* %a, <4 x i64>* %b) #0 {
; CHECK-LABEL: @add_v4i64
; CHECK: ptrue [[PG:p[0-9]+]].d, vl[[#min(div(VBYTES,8),4)]]
; CHECK-DAG: ld1d { [[OP1:z[0-9]+]].d }, [[PG]]/z, [x0]
; CHECK-DAG: ld1d { [[OP2:z[0-9]+]].d }, [[PG]]/z, [x1]
; CHECK: add [[RES:z[0-9]+]].d, [[PG]]/m, [[OP1]].d, [[OP2]].d
; CHECK: st1d { [[RES]].d }, [[PG]], [x0]
; CHECK: ret
%op1 = load <4 x i64>, <4 x i64>* %a
%op2 = load <4 x i64>, <4 x i64>* %b
%res = add <4 x i64> %op1, %op2
store <4 x i64> %res, <4 x i64>* %a
ret void
}
; NOTE: Check lines only cover the first VBYTES because the add_v#i8 tests
; already cover the general legalisation cases.
define void @add_v8i64(<8 x i64>* %a, <8 x i64>* %b) #0 {
; CHECK-LABEL: @add_v8i64
; CHECK: ptrue [[PG:p[0-9]+]].d, vl[[#min(div(VBYTES,8),8)]]
; CHECK-DAG: ld1d { [[OP1:z[0-9]+]].d }, [[PG]]/z, [x0]
; CHECK-DAG: ld1d { [[OP2:z[0-9]+]].d }, [[PG]]/z, [x1]
; CHECK: add [[RES:z[0-9]+]].d, [[PG]]/m, [[OP1]].d, [[OP2]].d
; CHECK: st1d { [[RES]].d }, [[PG]], [x0]
; CHECK: ret
%op1 = load <8 x i64>, <8 x i64>* %a
%op2 = load <8 x i64>, <8 x i64>* %b
%res = add <8 x i64> %op1, %op2
store <8 x i64> %res, <8 x i64>* %a
ret void
}
; NOTE: Check lines only cover the first VBYTES because the add_v#i8 tests
; already cover the general legalisation cases.
define void @add_v16i64(<16 x i64>* %a, <16 x i64>* %b) #0 {
; CHECK-LABEL: @add_v16i64
; CHECK: ptrue [[PG:p[0-9]+]].d, vl[[#min(div(VBYTES,8),16)]]
; CHECK-DAG: ld1d { [[OP1:z[0-9]+]].d }, [[PG]]/z, [x0]
; CHECK-DAG: ld1d { [[OP2:z[0-9]+]].d }, [[PG]]/z, [x1]
; CHECK: add [[RES:z[0-9]+]].d, [[PG]]/m, [[OP1]].d, [[OP2]].d
; CHECK: st1d { [[RES]].d }, [[PG]], [x0]
; CHECK: ret
%op1 = load <16 x i64>, <16 x i64>* %a
%op2 = load <16 x i64>, <16 x i64>* %b
%res = add <16 x i64> %op1, %op2
store <16 x i64> %res, <16 x i64>* %a
ret void
}
; NOTE: Check lines only cover the first VBYTES because the add_v#i8 tests
; already cover the general legalisation cases.
define void @add_v32i64(<32 x i64>* %a, <32 x i64>* %b) #0 {
; CHECK-LABEL: @add_v32i64
; CHECK: ptrue [[PG:p[0-9]+]].d, vl[[#min(div(VBYTES,8),32)]]
; CHECK-DAG: ld1d { [[OP1:z[0-9]+]].d }, [[PG]]/z, [x0]
; CHECK-DAG: ld1d { [[OP2:z[0-9]+]].d }, [[PG]]/z, [x1]
; CHECK: add [[RES:z[0-9]+]].d, [[PG]]/m, [[OP1]].d, [[OP2]].d
; CHECK: st1d { [[RES]].d }, [[PG]], [x0]
; CHECK: ret
%op1 = load <32 x i64>, <32 x i64>* %a
%op2 = load <32 x i64>, <32 x i64>* %b
%res = add <32 x i64> %op1, %op2
store <32 x i64> %res, <32 x i64>* %a
ret void
}
attributes #0 = { "target-features"="+sve" }