forked from OSchip/llvm-project
[SVE][Codegen] Lower legal min & max operations
Summary: This patch adds AArch64ISD nodes for [S|U]MIN_PRED and [S|U]MAX_PRED, and lowers both SVE intrinsics and IR operations for min and max to these nodes. There are two forms of these instructions for SVE: a predicated form and an immediate (unpredicated) form. The patterns which existed for the latter have been updated to match a predicated node with an immediate and map this to the immediate instruction. Reviewers: sdesmalen, efriedma, dancgr, rengolin Reviewed By: efriedma Subscribers: huihuiz, tschuett, kristof.beyls, hiraditya, rkruppe, psnobl, cfe-commits, llvm-commits Tags: #llvm Differential Revision: https://reviews.llvm.org/D79087
This commit is contained in:
parent
e737847b8f
commit
19f5da9c1d
|
@ -3092,7 +3092,7 @@ bool AArch64DAGToDAGISel::SelectSVESignedArithImm(SDValue N, SDValue &Imm) {
|
|||
if (auto CNode = dyn_cast<ConstantSDNode>(N)) {
|
||||
int64_t ImmVal = CNode->getSExtValue();
|
||||
SDLoc DL(N);
|
||||
if (ImmVal >= -127 && ImmVal < 127) {
|
||||
if (ImmVal >= -128 && ImmVal < 128) {
|
||||
Imm = CurDAG->getTargetConstant(ImmVal, DL, MVT::i32);
|
||||
return true;
|
||||
}
|
||||
|
|
|
@ -188,10 +188,6 @@ AArch64TargetLowering::AArch64TargetLowering(const TargetMachine &TM,
|
|||
setOperationAction(ISD::UADDSAT, VT, Legal);
|
||||
setOperationAction(ISD::SSUBSAT, VT, Legal);
|
||||
setOperationAction(ISD::USUBSAT, VT, Legal);
|
||||
setOperationAction(ISD::SMAX, VT, Legal);
|
||||
setOperationAction(ISD::UMAX, VT, Legal);
|
||||
setOperationAction(ISD::SMIN, VT, Legal);
|
||||
setOperationAction(ISD::UMIN, VT, Legal);
|
||||
}
|
||||
|
||||
for (auto VT :
|
||||
|
@ -887,6 +883,10 @@ AArch64TargetLowering::AArch64TargetLowering(const TargetMachine &TM,
|
|||
setOperationAction(ISD::SPLAT_VECTOR, VT, Custom);
|
||||
setOperationAction(ISD::SDIV, VT, Custom);
|
||||
setOperationAction(ISD::UDIV, VT, Custom);
|
||||
setOperationAction(ISD::SMIN, VT, Custom);
|
||||
setOperationAction(ISD::UMIN, VT, Custom);
|
||||
setOperationAction(ISD::SMAX, VT, Custom);
|
||||
setOperationAction(ISD::UMAX, VT, Custom);
|
||||
}
|
||||
}
|
||||
setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::i8, Custom);
|
||||
|
@ -1285,6 +1285,10 @@ const char *AArch64TargetLowering::getTargetNodeName(unsigned Opcode) const {
|
|||
case AArch64ISD::TLSDESC_CALLSEQ: return "AArch64ISD::TLSDESC_CALLSEQ";
|
||||
case AArch64ISD::SDIV_PRED: return "AArch64ISD::SDIV_PRED";
|
||||
case AArch64ISD::UDIV_PRED: return "AArch64ISD::UDIV_PRED";
|
||||
case AArch64ISD::SMIN_PRED: return "AArch64ISD::SMIN_PRED";
|
||||
case AArch64ISD::UMIN_PRED: return "AArch64ISD::UMIN_PRED";
|
||||
case AArch64ISD::SMAX_PRED: return "AArch64ISD::SMAX_PRED";
|
||||
case AArch64ISD::UMAX_PRED: return "AArch64ISD::UMAX_PRED";
|
||||
case AArch64ISD::ADC: return "AArch64ISD::ADC";
|
||||
case AArch64ISD::SBC: return "AArch64ISD::SBC";
|
||||
case AArch64ISD::ADDS: return "AArch64ISD::ADDS";
|
||||
|
@ -3354,9 +3358,17 @@ SDValue AArch64TargetLowering::LowerOperation(SDValue Op,
|
|||
case ISD::EXTRACT_SUBVECTOR:
|
||||
return LowerEXTRACT_SUBVECTOR(Op, DAG);
|
||||
case ISD::SDIV:
|
||||
return LowerDIV(Op, DAG, AArch64ISD::SDIV_PRED);
|
||||
return LowerToPredicatedOp(Op, DAG, AArch64ISD::SDIV_PRED);
|
||||
case ISD::UDIV:
|
||||
return LowerDIV(Op, DAG, AArch64ISD::UDIV_PRED);
|
||||
return LowerToPredicatedOp(Op, DAG, AArch64ISD::UDIV_PRED);
|
||||
case ISD::SMIN:
|
||||
return LowerToPredicatedOp(Op, DAG, AArch64ISD::SMIN_PRED);
|
||||
case ISD::UMIN:
|
||||
return LowerToPredicatedOp(Op, DAG, AArch64ISD::UMIN_PRED);
|
||||
case ISD::SMAX:
|
||||
return LowerToPredicatedOp(Op, DAG, AArch64ISD::SMAX_PRED);
|
||||
case ISD::UMAX:
|
||||
return LowerToPredicatedOp(Op, DAG, AArch64ISD::UMAX_PRED);
|
||||
case ISD::SRA:
|
||||
case ISD::SRL:
|
||||
case ISD::SHL:
|
||||
|
@ -7663,7 +7675,7 @@ SDValue AArch64TargetLowering::LowerDUPQLane(SDValue Op,
|
|||
return DAG.getNode(ISD::BITCAST, DL, VT, TBL);
|
||||
}
|
||||
|
||||
SDValue AArch64TargetLowering::LowerDIV(SDValue Op,
|
||||
SDValue AArch64TargetLowering::LowerToPredicatedOp(SDValue Op,
|
||||
SelectionDAG &DAG,
|
||||
unsigned NewOp) const {
|
||||
EVT VT = Op.getValueType();
|
||||
|
@ -11435,7 +11447,19 @@ static SDValue performIntrinsicCombine(SDNode *N,
|
|||
return DAG.getNode(AArch64ISD::SDIV_PRED, SDLoc(N), N->getValueType(0),
|
||||
N->getOperand(1), N->getOperand(2), N->getOperand(3));
|
||||
case Intrinsic::aarch64_sve_udiv:
|
||||
return DAG.getNode(AArch64ISD::UDIV_PRED, SDLoc(N), N->getValueType(0),
|
||||
return DAG.getNode(AArch64ISD::UDIV_PRED, SDLoc(N), N->getValueType(0),
|
||||
N->getOperand(1), N->getOperand(2), N->getOperand(3));
|
||||
case Intrinsic::aarch64_sve_smin:
|
||||
return DAG.getNode(AArch64ISD::SMIN_PRED, SDLoc(N), N->getValueType(0),
|
||||
N->getOperand(1), N->getOperand(2), N->getOperand(3));
|
||||
case Intrinsic::aarch64_sve_umin:
|
||||
return DAG.getNode(AArch64ISD::UMIN_PRED, SDLoc(N), N->getValueType(0),
|
||||
N->getOperand(1), N->getOperand(2), N->getOperand(3));
|
||||
case Intrinsic::aarch64_sve_smax:
|
||||
return DAG.getNode(AArch64ISD::SMAX_PRED, SDLoc(N), N->getValueType(0),
|
||||
N->getOperand(1), N->getOperand(2), N->getOperand(3));
|
||||
case Intrinsic::aarch64_sve_umax:
|
||||
return DAG.getNode(AArch64ISD::UMAX_PRED, SDLoc(N), N->getValueType(0),
|
||||
N->getOperand(1), N->getOperand(2), N->getOperand(3));
|
||||
case Intrinsic::aarch64_sve_fadda:
|
||||
return combineSVEReductionOrderedFP(N, AArch64ISD::FADDA_PRED, DAG);
|
||||
|
|
|
@ -55,6 +55,10 @@ enum NodeType : unsigned {
|
|||
// Arithmetic instructions
|
||||
SDIV_PRED,
|
||||
UDIV_PRED,
|
||||
SMIN_PRED,
|
||||
UMIN_PRED,
|
||||
SMAX_PRED,
|
||||
UMAX_PRED,
|
||||
|
||||
// Arithmetic instructions which write flags.
|
||||
ADDS,
|
||||
|
@ -793,8 +797,8 @@ private:
|
|||
SDValue LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) const;
|
||||
SDValue LowerSPLAT_VECTOR(SDValue Op, SelectionDAG &DAG) const;
|
||||
SDValue LowerDUPQLane(SDValue Op, SelectionDAG &DAG) const;
|
||||
SDValue LowerDIV(SDValue Op, SelectionDAG &DAG,
|
||||
unsigned NewOp) const;
|
||||
SDValue LowerToPredicatedOp(SDValue Op, SelectionDAG &DAG,
|
||||
unsigned NewOp) const;
|
||||
SDValue LowerEXTRACT_SUBVECTOR(SDValue Op, SelectionDAG &DAG) const;
|
||||
SDValue LowerVectorSRA_SRL_SHL(SDValue Op, SelectionDAG &DAG) const;
|
||||
SDValue LowerShiftLeftParts(SDValue Op, SelectionDAG &DAG) const;
|
||||
|
|
|
@ -149,13 +149,17 @@ def AArch64andv_pred : SDNode<"AArch64ISD::ANDV_PRED", SDT_AArch64Reduce>;
|
|||
def AArch64lasta : SDNode<"AArch64ISD::LASTA", SDT_AArch64Reduce>;
|
||||
def AArch64lastb : SDNode<"AArch64ISD::LASTB", SDT_AArch64Reduce>;
|
||||
|
||||
def SDT_AArch64DIV : SDTypeProfile<1, 3, [
|
||||
def SDT_AArch64Arith : SDTypeProfile<1, 3, [
|
||||
SDTCisVec<0>, SDTCisVec<1>, SDTCisVec<2>, SDTCisVec<3>,
|
||||
SDTCVecEltisVT<1,i1>, SDTCisSameAs<2,3>
|
||||
]>;
|
||||
|
||||
def AArch64sdiv_pred : SDNode<"AArch64ISD::SDIV_PRED", SDT_AArch64DIV>;
|
||||
def AArch64udiv_pred : SDNode<"AArch64ISD::UDIV_PRED", SDT_AArch64DIV>;
|
||||
def AArch64sdiv_pred : SDNode<"AArch64ISD::SDIV_PRED", SDT_AArch64Arith>;
|
||||
def AArch64udiv_pred : SDNode<"AArch64ISD::UDIV_PRED", SDT_AArch64Arith>;
|
||||
def AArch64smin_pred : SDNode<"AArch64ISD::SMIN_PRED", SDT_AArch64Arith>;
|
||||
def AArch64umin_pred : SDNode<"AArch64ISD::UMIN_PRED", SDT_AArch64Arith>;
|
||||
def AArch64smax_pred : SDNode<"AArch64ISD::SMAX_PRED", SDT_AArch64Arith>;
|
||||
def AArch64umax_pred : SDNode<"AArch64ISD::UMAX_PRED", SDT_AArch64Arith>;
|
||||
|
||||
def SDT_AArch64ReduceWithInit : SDTypeProfile<1, 3, [SDTCisVec<1>, SDTCisVec<3>]>;
|
||||
def AArch64clasta_n : SDNode<"AArch64ISD::CLASTA_N", SDT_AArch64ReduceWithInit>;
|
||||
|
@ -232,10 +236,10 @@ let Predicates = [HasSVE] in {
|
|||
defm EOR_ZI : sve_int_log_imm<0b01, "eor", "eon", xor>;
|
||||
defm AND_ZI : sve_int_log_imm<0b10, "and", "bic", and>;
|
||||
|
||||
defm SMAX_ZI : sve_int_arith_imm1<0b00, "smax", smax>;
|
||||
defm SMIN_ZI : sve_int_arith_imm1<0b10, "smin", smin>;
|
||||
defm UMAX_ZI : sve_int_arith_imm1_unsigned<0b01, "umax", umax>;
|
||||
defm UMIN_ZI : sve_int_arith_imm1_unsigned<0b11, "umin", umin>;
|
||||
defm SMAX_ZI : sve_int_arith_imm1<0b00, "smax", AArch64smax_pred>;
|
||||
defm SMIN_ZI : sve_int_arith_imm1<0b10, "smin", AArch64smin_pred>;
|
||||
defm UMAX_ZI : sve_int_arith_imm1_unsigned<0b01, "umax", AArch64umax_pred>;
|
||||
defm UMIN_ZI : sve_int_arith_imm1_unsigned<0b11, "umin", AArch64umin_pred>;
|
||||
|
||||
defm MUL_ZI : sve_int_arith_imm2<"mul", mul>;
|
||||
defm MUL_ZPmZ : sve_int_bin_pred_arit_2<0b000, "mul", int_aarch64_sve_mul>;
|
||||
|
@ -280,10 +284,10 @@ let Predicates = [HasSVE] in {
|
|||
defm FABS_ZPmZ : sve_int_un_pred_arit_1_fp<0b100, "fabs", int_aarch64_sve_fabs>;
|
||||
defm FNEG_ZPmZ : sve_int_un_pred_arit_1_fp<0b101, "fneg", int_aarch64_sve_fneg>;
|
||||
|
||||
defm SMAX_ZPmZ : sve_int_bin_pred_arit_1<0b000, "smax", int_aarch64_sve_smax>;
|
||||
defm UMAX_ZPmZ : sve_int_bin_pred_arit_1<0b001, "umax", int_aarch64_sve_umax>;
|
||||
defm SMIN_ZPmZ : sve_int_bin_pred_arit_1<0b010, "smin", int_aarch64_sve_smin>;
|
||||
defm UMIN_ZPmZ : sve_int_bin_pred_arit_1<0b011, "umin", int_aarch64_sve_umin>;
|
||||
defm SMAX_ZPmZ : sve_int_bin_pred_arit_1<0b000, "smax", AArch64smax_pred>;
|
||||
defm UMAX_ZPmZ : sve_int_bin_pred_arit_1<0b001, "umax", AArch64umax_pred>;
|
||||
defm SMIN_ZPmZ : sve_int_bin_pred_arit_1<0b010, "smin", AArch64smin_pred>;
|
||||
defm UMIN_ZPmZ : sve_int_bin_pred_arit_1<0b011, "umin", AArch64umin_pred>;
|
||||
defm SABD_ZPmZ : sve_int_bin_pred_arit_1<0b100, "sabd", int_aarch64_sve_sabd>;
|
||||
defm UABD_ZPmZ : sve_int_bin_pred_arit_1<0b101, "uabd", int_aarch64_sve_uabd>;
|
||||
|
||||
|
|
|
@ -324,6 +324,11 @@ class SVE_1_Op_Imm_Arith_Pat<ValueType vt, SDPatternOperator op, ZPRRegOp zprty,
|
|||
: Pat<(vt (op (vt zprty:$Op1), (vt (AArch64dup (it (cpx i32:$imm)))))),
|
||||
(inst $Op1, i32:$imm)>;
|
||||
|
||||
class SVE_1_Op_Imm_Arith_Pred_Pat<ValueType vt, ValueType pt, SDPatternOperator op,
|
||||
ZPRRegOp zprty, ValueType it, ComplexPattern cpx, Instruction inst>
|
||||
: Pat<(vt (op (pt (AArch64ptrue 31)), (vt zprty:$Op1), (vt (AArch64dup (it (cpx i32:$imm)))))),
|
||||
(inst $Op1, i32:$imm)>;
|
||||
|
||||
class SVE_1_Op_Imm_Log_Pat<ValueType vt, SDPatternOperator op, ZPRRegOp zprty,
|
||||
ValueType it, ComplexPattern cpx, Instruction inst>
|
||||
: Pat<(vt (op (vt zprty:$Op1), (vt (AArch64dup (it (cpx i64:$imm)))))),
|
||||
|
@ -3840,10 +3845,10 @@ multiclass sve_int_arith_imm1<bits<2> opc, string asm, SDPatternOperator op> {
|
|||
def _S : sve_int_arith_imm<0b10, { 0b1010, opc }, asm, ZPR32, simm8>;
|
||||
def _D : sve_int_arith_imm<0b11, { 0b1010, opc }, asm, ZPR64, simm8>;
|
||||
|
||||
def : SVE_1_Op_Imm_Arith_Pat<nxv16i8, op, ZPR8, i32, SVEArithSImmPat, !cast<Instruction>(NAME # _B)>;
|
||||
def : SVE_1_Op_Imm_Arith_Pat<nxv8i16, op, ZPR16, i32, SVEArithSImmPat, !cast<Instruction>(NAME # _H)>;
|
||||
def : SVE_1_Op_Imm_Arith_Pat<nxv4i32, op, ZPR32, i32, SVEArithSImmPat, !cast<Instruction>(NAME # _S)>;
|
||||
def : SVE_1_Op_Imm_Arith_Pat<nxv2i64, op, ZPR64, i64, SVEArithSImmPat, !cast<Instruction>(NAME # _D)>;
|
||||
def : SVE_1_Op_Imm_Arith_Pred_Pat<nxv16i8, nxv16i1, op, ZPR8, i32, SVEArithSImmPat, !cast<Instruction>(NAME # _B)>;
|
||||
def : SVE_1_Op_Imm_Arith_Pred_Pat<nxv8i16, nxv8i1, op, ZPR16, i32, SVEArithSImmPat, !cast<Instruction>(NAME # _H)>;
|
||||
def : SVE_1_Op_Imm_Arith_Pred_Pat<nxv4i32, nxv4i1, op, ZPR32, i32, SVEArithSImmPat, !cast<Instruction>(NAME # _S)>;
|
||||
def : SVE_1_Op_Imm_Arith_Pred_Pat<nxv2i64, nxv2i1, op, ZPR64, i64, SVEArithSImmPat, !cast<Instruction>(NAME # _D)>;
|
||||
}
|
||||
|
||||
multiclass sve_int_arith_imm1_unsigned<bits<2> opc, string asm, SDPatternOperator op> {
|
||||
|
@ -3852,10 +3857,10 @@ multiclass sve_int_arith_imm1_unsigned<bits<2> opc, string asm, SDPatternOperato
|
|||
def _S : sve_int_arith_imm<0b10, { 0b1010, opc }, asm, ZPR32, imm0_255>;
|
||||
def _D : sve_int_arith_imm<0b11, { 0b1010, opc }, asm, ZPR64, imm0_255>;
|
||||
|
||||
def : SVE_1_Op_Imm_Arith_Pat<nxv16i8, op, ZPR8, i32, SVEArithUImmPat, !cast<Instruction>(NAME # _B)>;
|
||||
def : SVE_1_Op_Imm_Arith_Pat<nxv8i16, op, ZPR16, i32, SVEArithUImmPat, !cast<Instruction>(NAME # _H)>;
|
||||
def : SVE_1_Op_Imm_Arith_Pat<nxv4i32, op, ZPR32, i32, SVEArithUImmPat, !cast<Instruction>(NAME # _S)>;
|
||||
def : SVE_1_Op_Imm_Arith_Pat<nxv2i64, op, ZPR64, i64, SVEArithUImmPat, !cast<Instruction>(NAME # _D)>;
|
||||
def : SVE_1_Op_Imm_Arith_Pred_Pat<nxv16i8, nxv16i1, op, ZPR8, i32, SVEArithUImmPat, !cast<Instruction>(NAME # _B)>;
|
||||
def : SVE_1_Op_Imm_Arith_Pred_Pat<nxv8i16, nxv8i1, op, ZPR16, i32, SVEArithUImmPat, !cast<Instruction>(NAME # _H)>;
|
||||
def : SVE_1_Op_Imm_Arith_Pred_Pat<nxv4i32, nxv4i1, op, ZPR32, i32, SVEArithUImmPat, !cast<Instruction>(NAME # _S)>;
|
||||
def : SVE_1_Op_Imm_Arith_Pred_Pat<nxv2i64, nxv2i1, op, ZPR64, i64, SVEArithUImmPat, !cast<Instruction>(NAME # _D)>;
|
||||
}
|
||||
|
||||
multiclass sve_int_arith_imm2<string asm, SDPatternOperator op> {
|
||||
|
|
|
@ -43,3 +43,179 @@ define <vscale x 2 x i64> @udiv_i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b
|
|||
%div = udiv <vscale x 2 x i64> %a, %b
|
||||
ret <vscale x 2 x i64> %div
|
||||
}
|
||||
|
||||
;
|
||||
; SMIN
|
||||
;
|
||||
|
||||
define <vscale x 16 x i8> @smin_i8(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b, <vscale x 16 x i8> %c) {
|
||||
; CHECK-LABEL: @smin_i8
|
||||
; CHECK-DAG: ptrue p0.b
|
||||
; CHECK-DAG: smin z0.b, p0/m, z0.b, z1.b
|
||||
; CHECK-NEXT: ret
|
||||
%cmp = icmp slt <vscale x 16 x i8> %a, %b
|
||||
%min = select <vscale x 16 x i1> %cmp, <vscale x 16 x i8> %a, <vscale x 16 x i8> %b
|
||||
ret <vscale x 16 x i8> %min
|
||||
}
|
||||
|
||||
define <vscale x 8 x i16> @smin_i16(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b, <vscale x 8 x i16> %c) {
|
||||
; CHECK-LABEL: @smin_i16
|
||||
; CHECK-DAG: ptrue p0.h
|
||||
; CHECK-DAG: smin z0.h, p0/m, z0.h, z1.h
|
||||
; CHECK-NEXT: ret
|
||||
%cmp = icmp slt <vscale x 8 x i16> %a, %b
|
||||
%min = select <vscale x 8 x i1> %cmp, <vscale x 8 x i16> %a, <vscale x 8 x i16> %b
|
||||
ret <vscale x 8 x i16> %min
|
||||
}
|
||||
|
||||
define <vscale x 4 x i32> @smin_i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b, <vscale x 4 x i32> %c) {
|
||||
; CHECK-LABEL: smin_i32:
|
||||
; CHECK-DAG: ptrue p0.s
|
||||
; CHECK-DAG: smin z0.s, p0/m, z0.s, z1.s
|
||||
; CHECK-NEXT: ret
|
||||
%cmp = icmp slt <vscale x 4 x i32> %a, %b
|
||||
%min = select <vscale x 4 x i1> %cmp, <vscale x 4 x i32> %a, <vscale x 4 x i32> %b
|
||||
ret <vscale x 4 x i32> %min
|
||||
}
|
||||
|
||||
define <vscale x 2 x i64> @smin_i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b, <vscale x 2 x i64> %c) {
|
||||
; CHECK-LABEL: smin_i64:
|
||||
; CHECK-DAG: ptrue p0.d
|
||||
; CHECK-DAG: smin z0.d, p0/m, z0.d, z1.d
|
||||
; CHECK-NEXT: ret
|
||||
%cmp = icmp slt <vscale x 2 x i64> %a, %b
|
||||
%min = select <vscale x 2 x i1> %cmp, <vscale x 2 x i64> %a, <vscale x 2 x i64> %b
|
||||
ret <vscale x 2 x i64> %min
|
||||
}
|
||||
|
||||
;
|
||||
; UMIN
|
||||
;
|
||||
|
||||
define <vscale x 16 x i8> @umin_i8(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b, <vscale x 16 x i8> %c) {
|
||||
; CHECK-LABEL: @umin_i8
|
||||
; CHECK-DAG: ptrue p0.b
|
||||
; CHECK-DAG: umin z0.b, p0/m, z0.b, z1.b
|
||||
; CHECK-NEXT: ret
|
||||
%cmp = icmp ult <vscale x 16 x i8> %a, %b
|
||||
%min = select <vscale x 16 x i1> %cmp, <vscale x 16 x i8> %a, <vscale x 16 x i8> %b
|
||||
ret <vscale x 16 x i8> %min
|
||||
}
|
||||
|
||||
define <vscale x 8 x i16> @umin_i16(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b, <vscale x 8 x i16> %c) {
|
||||
; CHECK-LABEL: @umin_i16
|
||||
; CHECK-DAG: ptrue p0.h
|
||||
; CHECK-DAG: umin z0.h, p0/m, z0.h, z1.h
|
||||
; CHECK-NEXT: ret
|
||||
%cmp = icmp ult <vscale x 8 x i16> %a, %b
|
||||
%min = select <vscale x 8 x i1> %cmp, <vscale x 8 x i16> %a, <vscale x 8 x i16> %b
|
||||
ret <vscale x 8 x i16> %min
|
||||
}
|
||||
|
||||
define <vscale x 4 x i32> @umin_i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b, <vscale x 4 x i32> %c) {
|
||||
; CHECK-LABEL: umin_i32:
|
||||
; CHECK-DAG: ptrue p0.s
|
||||
; CHECK-DAG: umin z0.s, p0/m, z0.s, z1.s
|
||||
; CHECK-NEXT: ret
|
||||
%cmp = icmp ult <vscale x 4 x i32> %a, %b
|
||||
%min = select <vscale x 4 x i1> %cmp, <vscale x 4 x i32> %a, <vscale x 4 x i32> %b
|
||||
ret <vscale x 4 x i32> %min
|
||||
}
|
||||
|
||||
define <vscale x 2 x i64> @umin_i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b, <vscale x 2 x i64> %c) {
|
||||
; CHECK-LABEL: umin_i64:
|
||||
; CHECK-DAG: ptrue p0.d
|
||||
; CHECK-DAG: umin z0.d, p0/m, z0.d, z1.d
|
||||
; CHECK-NEXT: ret
|
||||
%cmp = icmp ult <vscale x 2 x i64> %a, %b
|
||||
%min = select <vscale x 2 x i1> %cmp, <vscale x 2 x i64> %a, <vscale x 2 x i64> %b
|
||||
ret <vscale x 2 x i64> %min
|
||||
}
|
||||
|
||||
;
|
||||
; SMAX
|
||||
;
|
||||
|
||||
define <vscale x 16 x i8> @smax_i8(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b, <vscale x 16 x i8> %c) {
|
||||
; CHECK-LABEL: @smax_i8
|
||||
; CHECK-DAG: ptrue p0.b
|
||||
; CHECK-DAG: smax z0.b, p0/m, z0.b, z1.b
|
||||
; CHECK-NEXT: ret
|
||||
%cmp = icmp sgt <vscale x 16 x i8> %a, %b
|
||||
%min = select <vscale x 16 x i1> %cmp, <vscale x 16 x i8> %a, <vscale x 16 x i8> %b
|
||||
ret <vscale x 16 x i8> %min
|
||||
}
|
||||
|
||||
define <vscale x 8 x i16> @smax_i16(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b, <vscale x 8 x i16> %c) {
|
||||
; CHECK-LABEL: @smax_i16
|
||||
; CHECK-DAG: ptrue p0.h
|
||||
; CHECK-DAG: smax z0.h, p0/m, z0.h, z1.h
|
||||
; CHECK-NEXT: ret
|
||||
%cmp = icmp sgt <vscale x 8 x i16> %a, %b
|
||||
%min = select <vscale x 8 x i1> %cmp, <vscale x 8 x i16> %a, <vscale x 8 x i16> %b
|
||||
ret <vscale x 8 x i16> %min
|
||||
}
|
||||
|
||||
define <vscale x 4 x i32> @smax_i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b, <vscale x 4 x i32> %c) {
|
||||
; CHECK-LABEL: smax_i32:
|
||||
; CHECK-DAG: ptrue p0.s
|
||||
; CHECK-DAG: smax z0.s, p0/m, z0.s, z1.s
|
||||
; CHECK-NEXT: ret
|
||||
%cmp = icmp sgt <vscale x 4 x i32> %a, %b
|
||||
%min = select <vscale x 4 x i1> %cmp, <vscale x 4 x i32> %a, <vscale x 4 x i32> %b
|
||||
ret <vscale x 4 x i32> %min
|
||||
}
|
||||
|
||||
define <vscale x 2 x i64> @smax_i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b, <vscale x 2 x i64> %c) {
|
||||
; CHECK-LABEL: smax_i64:
|
||||
; CHECK-DAG: ptrue p0.d
|
||||
; CHECK-DAG: smax z0.d, p0/m, z0.d, z1.d
|
||||
; CHECK-NEXT: ret
|
||||
%cmp = icmp sgt <vscale x 2 x i64> %a, %b
|
||||
%min = select <vscale x 2 x i1> %cmp, <vscale x 2 x i64> %a, <vscale x 2 x i64> %b
|
||||
ret <vscale x 2 x i64> %min
|
||||
}
|
||||
|
||||
;
|
||||
; UMAX
|
||||
;
|
||||
|
||||
define <vscale x 16 x i8> @umax_i8(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b, <vscale x 16 x i8> %c) {
|
||||
; CHECK-LABEL: @umax_i8
|
||||
; CHECK-DAG: ptrue p0.b
|
||||
; CHECK-DAG: umax z0.b, p0/m, z0.b, z1.b
|
||||
; CHECK-NEXT: ret
|
||||
%cmp = icmp ugt <vscale x 16 x i8> %a, %b
|
||||
%min = select <vscale x 16 x i1> %cmp, <vscale x 16 x i8> %a, <vscale x 16 x i8> %b
|
||||
ret <vscale x 16 x i8> %min
|
||||
}
|
||||
|
||||
define <vscale x 8 x i16> @umax_i16(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b, <vscale x 8 x i16> %c) {
|
||||
; CHECK-LABEL: @umax_i16
|
||||
; CHECK-DAG: ptrue p0.h
|
||||
; CHECK-DAG: umax z0.h, p0/m, z0.h, z1.h
|
||||
; CHECK-NEXT: ret
|
||||
%cmp = icmp ugt <vscale x 8 x i16> %a, %b
|
||||
%min = select <vscale x 8 x i1> %cmp, <vscale x 8 x i16> %a, <vscale x 8 x i16> %b
|
||||
ret <vscale x 8 x i16> %min
|
||||
}
|
||||
|
||||
define <vscale x 4 x i32> @umax_i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b, <vscale x 4 x i32> %c) {
|
||||
; CHECK-LABEL: umax_i32:
|
||||
; CHECK-DAG: ptrue p0.s
|
||||
; CHECK-DAG: umax z0.s, p0/m, z0.s, z1.s
|
||||
; CHECK-NEXT: ret
|
||||
%cmp = icmp ugt <vscale x 4 x i32> %a, %b
|
||||
%min = select <vscale x 4 x i1> %cmp, <vscale x 4 x i32> %a, <vscale x 4 x i32> %b
|
||||
ret <vscale x 4 x i32> %min
|
||||
}
|
||||
|
||||
define <vscale x 2 x i64> @umax_i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b, <vscale x 2 x i64> %c) {
|
||||
; CHECK-LABEL: umax_i64:
|
||||
; CHECK-DAG: ptrue p0.d
|
||||
; CHECK-DAG: umax z0.d, p0/m, z0.d, z1.d
|
||||
; CHECK-NEXT: ret
|
||||
%cmp = icmp ugt <vscale x 2 x i64> %a, %b
|
||||
%min = select <vscale x 2 x i1> %cmp, <vscale x 2 x i64> %a, <vscale x 2 x i64> %b
|
||||
ret <vscale x 2 x i64> %min
|
||||
}
|
||||
|
|
|
@ -1,5 +1,221 @@
|
|||
; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve < %s | FileCheck %s
|
||||
|
||||
; SMAX
|
||||
|
||||
define <vscale x 16 x i8> @smax_i8(<vscale x 16 x i8> %a) {
|
||||
; CHECK-LABEL: smax_i8:
|
||||
; CHECK: smax z0.b, z0.b, #-128
|
||||
; CHECK-NEXT: ret
|
||||
%pg = call <vscale x 16 x i1> @llvm.aarch64.sve.ptrue.nxv16i1(i32 31)
|
||||
%elt = insertelement <vscale x 16 x i8> undef, i8 -128, i32 0
|
||||
%splat = shufflevector <vscale x 16 x i8> %elt, <vscale x 16 x i8> undef, <vscale x 16 x i32> zeroinitializer
|
||||
%out = call <vscale x 16 x i8> @llvm.aarch64.sve.smax.nxv16i8(<vscale x 16 x i1> %pg,
|
||||
<vscale x 16 x i8> %a,
|
||||
<vscale x 16 x i8> %splat)
|
||||
ret <vscale x 16 x i8> %out
|
||||
}
|
||||
|
||||
define <vscale x 8 x i16> @smax_i16(<vscale x 8 x i16> %a) {
|
||||
; CHECK-LABEL: smax_i16:
|
||||
; CHECK: smax z0.h, z0.h, #127
|
||||
; CHECK-NEXT: ret
|
||||
%pg = call <vscale x 8 x i1> @llvm.aarch64.sve.ptrue.nxv8i1(i32 31)
|
||||
%elt = insertelement <vscale x 8 x i16> undef, i16 127, i32 0
|
||||
%splat = shufflevector <vscale x 8 x i16> %elt, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
|
||||
%out = call <vscale x 8 x i16> @llvm.aarch64.sve.smax.nxv8i16(<vscale x 8 x i1> %pg,
|
||||
<vscale x 8 x i16> %a,
|
||||
<vscale x 8 x i16> %splat)
|
||||
ret <vscale x 8 x i16> %out
|
||||
}
|
||||
|
||||
define <vscale x 4 x i32> @smax_i32(<vscale x 4 x i32> %a) {
|
||||
; CHECK-LABEL: smax_i32:
|
||||
; CHECK: smax z0.s, z0.s, #-128
|
||||
; CHECK-NEXT: ret
|
||||
%pg = call <vscale x 4 x i1> @llvm.aarch64.sve.ptrue.nxv4i1(i32 31)
|
||||
%elt = insertelement <vscale x 4 x i32> undef, i32 -128, i32 0
|
||||
%splat = shufflevector <vscale x 4 x i32> %elt, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
|
||||
%out = call <vscale x 4 x i32> @llvm.aarch64.sve.smax.nxv4i32(<vscale x 4 x i1> %pg,
|
||||
<vscale x 4 x i32> %a,
|
||||
<vscale x 4 x i32> %splat)
|
||||
ret <vscale x 4 x i32> %out
|
||||
}
|
||||
|
||||
define <vscale x 2 x i64> @smax_i64(<vscale x 2 x i64> %a) {
|
||||
; CHECK-LABEL: smax_i64:
|
||||
; CHECK: smax z0.d, z0.d, #127
|
||||
; CHECK-NEXT: ret
|
||||
%pg = call <vscale x 2 x i1> @llvm.aarch64.sve.ptrue.nxv2i1(i32 31)
|
||||
%elt = insertelement <vscale x 2 x i64> undef, i64 127, i64 0
|
||||
%splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
|
||||
%out = call <vscale x 2 x i64> @llvm.aarch64.sve.smax.nxv2i64(<vscale x 2 x i1> %pg,
|
||||
<vscale x 2 x i64> %a,
|
||||
<vscale x 2 x i64> %splat)
|
||||
ret <vscale x 2 x i64> %out
|
||||
}
|
||||
|
||||
; SMIN
|
||||
|
||||
define <vscale x 16 x i8> @smin_i8(<vscale x 16 x i8> %a) {
|
||||
; CHECK-LABEL: smin_i8:
|
||||
; CHECK: smin z0.b, z0.b, #127
|
||||
; CHECK-NEXT: ret
|
||||
%pg = call <vscale x 16 x i1> @llvm.aarch64.sve.ptrue.nxv16i1(i32 31)
|
||||
%elt = insertelement <vscale x 16 x i8> undef, i8 127, i32 0
|
||||
%splat = shufflevector <vscale x 16 x i8> %elt, <vscale x 16 x i8> undef, <vscale x 16 x i32> zeroinitializer
|
||||
%out = call <vscale x 16 x i8> @llvm.aarch64.sve.smin.nxv16i8(<vscale x 16 x i1> %pg,
|
||||
<vscale x 16 x i8> %a,
|
||||
<vscale x 16 x i8> %splat)
|
||||
ret <vscale x 16 x i8> %out
|
||||
}
|
||||
|
||||
define <vscale x 8 x i16> @smin_i16(<vscale x 8 x i16> %a) {
|
||||
; CHECK-LABEL: smin_i16:
|
||||
; CHECK: smin z0.h, z0.h, #-128
|
||||
; CHECK-NEXT: ret
|
||||
%pg = call <vscale x 8 x i1> @llvm.aarch64.sve.ptrue.nxv8i1(i32 31)
|
||||
%elt = insertelement <vscale x 8 x i16> undef, i16 -128, i32 0
|
||||
%splat = shufflevector <vscale x 8 x i16> %elt, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
|
||||
%out = call <vscale x 8 x i16> @llvm.aarch64.sve.smin.nxv8i16(<vscale x 8 x i1> %pg,
|
||||
<vscale x 8 x i16> %a,
|
||||
<vscale x 8 x i16> %splat)
|
||||
ret <vscale x 8 x i16> %out
|
||||
}
|
||||
|
||||
define <vscale x 4 x i32> @smin_i32(<vscale x 4 x i32> %a) {
|
||||
; CHECK-LABEL: smin_i32:
|
||||
; CHECK: smin z0.s, z0.s, #127
|
||||
; CHECK-NEXT: ret
|
||||
%pg = call <vscale x 4 x i1> @llvm.aarch64.sve.ptrue.nxv4i1(i32 31)
|
||||
%elt = insertelement <vscale x 4 x i32> undef, i32 127, i32 0
|
||||
%splat = shufflevector <vscale x 4 x i32> %elt, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
|
||||
%out = call <vscale x 4 x i32> @llvm.aarch64.sve.smin.nxv4i32(<vscale x 4 x i1> %pg,
|
||||
<vscale x 4 x i32> %a,
|
||||
<vscale x 4 x i32> %splat)
|
||||
ret <vscale x 4 x i32> %out
|
||||
}
|
||||
|
||||
define <vscale x 2 x i64> @smin_i64(<vscale x 2 x i64> %a) {
|
||||
; CHECK-LABEL: smin_i64:
|
||||
; CHECK: smin z0.d, z0.d, #-128
|
||||
; CHECK-NEXT: ret
|
||||
%pg = call <vscale x 2 x i1> @llvm.aarch64.sve.ptrue.nxv2i1(i32 31)
|
||||
%elt = insertelement <vscale x 2 x i64> undef, i64 -128, i64 0
|
||||
%splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
|
||||
%out = call <vscale x 2 x i64> @llvm.aarch64.sve.smin.nxv2i64(<vscale x 2 x i1> %pg,
|
||||
<vscale x 2 x i64> %a,
|
||||
<vscale x 2 x i64> %splat)
|
||||
ret <vscale x 2 x i64> %out
|
||||
}
|
||||
|
||||
; UMAX
|
||||
|
||||
define <vscale x 16 x i8> @umax_i8(<vscale x 16 x i8> %a) {
|
||||
; CHECK-LABEL: umax_i8:
|
||||
; CHECK: umax z0.b, z0.b, #0
|
||||
; CHECK-NEXT: ret
|
||||
%pg = call <vscale x 16 x i1> @llvm.aarch64.sve.ptrue.nxv16i1(i32 31)
|
||||
%elt = insertelement <vscale x 16 x i8> undef, i8 0, i32 0
|
||||
%splat = shufflevector <vscale x 16 x i8> %elt, <vscale x 16 x i8> undef, <vscale x 16 x i32> zeroinitializer
|
||||
%out = call <vscale x 16 x i8> @llvm.aarch64.sve.umax.nxv16i8(<vscale x 16 x i1> %pg,
|
||||
<vscale x 16 x i8> %a,
|
||||
<vscale x 16 x i8> %splat)
|
||||
ret <vscale x 16 x i8> %out
|
||||
}
|
||||
|
||||
define <vscale x 8 x i16> @umax_i16(<vscale x 8 x i16> %a) {
|
||||
; CHECK-LABEL: umax_i16:
|
||||
; CHECK: umax z0.h, z0.h, #255
|
||||
; CHECK-NEXT: ret
|
||||
%pg = call <vscale x 8 x i1> @llvm.aarch64.sve.ptrue.nxv8i1(i32 31)
|
||||
%elt = insertelement <vscale x 8 x i16> undef, i16 255, i32 0
|
||||
%splat = shufflevector <vscale x 8 x i16> %elt, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
|
||||
%out = call <vscale x 8 x i16> @llvm.aarch64.sve.umax.nxv8i16(<vscale x 8 x i1> %pg,
|
||||
<vscale x 8 x i16> %a,
|
||||
<vscale x 8 x i16> %splat)
|
||||
ret <vscale x 8 x i16> %out
|
||||
}
|
||||
|
||||
define <vscale x 4 x i32> @umax_i32(<vscale x 4 x i32> %a) {
|
||||
; CHECK-LABEL: umax_i32:
|
||||
; CHECK: umax z0.s, z0.s, #0
|
||||
; CHECK-NEXT: ret
|
||||
%pg = call <vscale x 4 x i1> @llvm.aarch64.sve.ptrue.nxv4i1(i32 31)
|
||||
%elt = insertelement <vscale x 4 x i32> undef, i32 0, i32 0
|
||||
%splat = shufflevector <vscale x 4 x i32> %elt, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
|
||||
%out = call <vscale x 4 x i32> @llvm.aarch64.sve.umax.nxv4i32(<vscale x 4 x i1> %pg,
|
||||
<vscale x 4 x i32> %a,
|
||||
<vscale x 4 x i32> %splat)
|
||||
ret <vscale x 4 x i32> %out
|
||||
}
|
||||
|
||||
define <vscale x 2 x i64> @umax_i64(<vscale x 2 x i64> %a) {
|
||||
; CHECK-LABEL: umax_i64:
|
||||
; CHECK: umax z0.d, z0.d, #255
|
||||
; CHECK-NEXT: ret
|
||||
%pg = call <vscale x 2 x i1> @llvm.aarch64.sve.ptrue.nxv2i1(i32 31)
|
||||
%elt = insertelement <vscale x 2 x i64> undef, i64 255, i64 0
|
||||
%splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
|
||||
%out = call <vscale x 2 x i64> @llvm.aarch64.sve.umax.nxv2i64(<vscale x 2 x i1> %pg,
|
||||
<vscale x 2 x i64> %a,
|
||||
<vscale x 2 x i64> %splat)
|
||||
ret <vscale x 2 x i64> %out
|
||||
}
|
||||
|
||||
; UMIN
|
||||
|
||||
define <vscale x 16 x i8> @umin_i8(<vscale x 16 x i8> %a) {
|
||||
; CHECK-LABEL: umin_i8:
|
||||
; CHECK: umin z0.b, z0.b, #255
|
||||
; CHECK-NEXT: ret
|
||||
%pg = call <vscale x 16 x i1> @llvm.aarch64.sve.ptrue.nxv16i1(i32 31)
|
||||
%elt = insertelement <vscale x 16 x i8> undef, i8 255, i32 0
|
||||
%splat = shufflevector <vscale x 16 x i8> %elt, <vscale x 16 x i8> undef, <vscale x 16 x i32> zeroinitializer
|
||||
%out = call <vscale x 16 x i8> @llvm.aarch64.sve.umin.nxv16i8(<vscale x 16 x i1> %pg,
|
||||
<vscale x 16 x i8> %a,
|
||||
<vscale x 16 x i8> %splat)
|
||||
ret <vscale x 16 x i8> %out
|
||||
}
|
||||
|
||||
define <vscale x 8 x i16> @umin_i16(<vscale x 8 x i16> %a) {
|
||||
; CHECK-LABEL: umin_i16:
|
||||
; CHECK: umin z0.h, z0.h, #0
|
||||
; CHECK-NEXT: ret
|
||||
%pg = call <vscale x 8 x i1> @llvm.aarch64.sve.ptrue.nxv8i1(i32 31)
|
||||
%elt = insertelement <vscale x 8 x i16> undef, i16 0, i32 0
|
||||
%splat = shufflevector <vscale x 8 x i16> %elt, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
|
||||
%out = call <vscale x 8 x i16> @llvm.aarch64.sve.umin.nxv8i16(<vscale x 8 x i1> %pg,
|
||||
<vscale x 8 x i16> %a,
|
||||
<vscale x 8 x i16> %splat)
|
||||
ret <vscale x 8 x i16> %out
|
||||
}
|
||||
|
||||
define <vscale x 4 x i32> @umin_i32(<vscale x 4 x i32> %a) {
|
||||
; CHECK-LABEL: umin_i32:
|
||||
; CHECK: umin z0.s, z0.s, #255
|
||||
; CHECK-NEXT: ret
|
||||
%pg = call <vscale x 4 x i1> @llvm.aarch64.sve.ptrue.nxv4i1(i32 31)
|
||||
%elt = insertelement <vscale x 4 x i32> undef, i32 255, i32 0
|
||||
%splat = shufflevector <vscale x 4 x i32> %elt, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
|
||||
%out = call <vscale x 4 x i32> @llvm.aarch64.sve.umin.nxv4i32(<vscale x 4 x i1> %pg,
|
||||
<vscale x 4 x i32> %a,
|
||||
<vscale x 4 x i32> %splat)
|
||||
ret <vscale x 4 x i32> %out
|
||||
}
|
||||
|
||||
define <vscale x 2 x i64> @umin_i64(<vscale x 2 x i64> %a) {
|
||||
; CHECK-LABEL: umin_i64:
|
||||
; CHECK: umin z0.d, z0.d, #0
|
||||
; CHECK-NEXT: ret
|
||||
%pg = call <vscale x 2 x i1> @llvm.aarch64.sve.ptrue.nxv2i1(i32 31)
|
||||
%elt = insertelement <vscale x 2 x i64> undef, i64 0, i64 0
|
||||
%splat = shufflevector <vscale x 2 x i64> %elt, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
|
||||
%out = call <vscale x 2 x i64> @llvm.aarch64.sve.umin.nxv2i64(<vscale x 2 x i1> %pg,
|
||||
<vscale x 2 x i64> %a,
|
||||
<vscale x 2 x i64> %splat)
|
||||
ret <vscale x 2 x i64> %out
|
||||
}
|
||||
|
||||
; SQADD
|
||||
|
||||
define <vscale x 16 x i8> @sqadd_b_lowimm(<vscale x 16 x i8> %a) {
|
||||
|
@ -336,3 +552,28 @@ declare <vscale x 16 x i8> @llvm.aarch64.sve.uqsub.x.nxv16i8(<vscale x 16 x i8>,
|
|||
declare <vscale x 8 x i16> @llvm.aarch64.sve.uqsub.x.nxv8i16(<vscale x 8 x i16>, <vscale x 8 x i16>)
|
||||
declare <vscale x 4 x i32> @llvm.aarch64.sve.uqsub.x.nxv4i32(<vscale x 4 x i32>, <vscale x 4 x i32>)
|
||||
declare <vscale x 2 x i64> @llvm.aarch64.sve.uqsub.x.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i64>)
|
||||
|
||||
declare <vscale x 16 x i8> @llvm.aarch64.sve.smax.nxv16i8(<vscale x 16 x i1>, <vscale x 16 x i8>, <vscale x 16 x i8>)
|
||||
declare <vscale x 8 x i16> @llvm.aarch64.sve.smax.nxv8i16(<vscale x 8 x i1>, <vscale x 8 x i16>, <vscale x 8 x i16>)
|
||||
declare <vscale x 4 x i32> @llvm.aarch64.sve.smax.nxv4i32(<vscale x 4 x i1>, <vscale x 4 x i32>, <vscale x 4 x i32>)
|
||||
declare <vscale x 2 x i64> @llvm.aarch64.sve.smax.nxv2i64(<vscale x 2 x i1>, <vscale x 2 x i64>, <vscale x 2 x i64>)
|
||||
|
||||
declare <vscale x 16 x i8> @llvm.aarch64.sve.smin.nxv16i8(<vscale x 16 x i1>, <vscale x 16 x i8>, <vscale x 16 x i8>)
|
||||
declare <vscale x 8 x i16> @llvm.aarch64.sve.smin.nxv8i16(<vscale x 8 x i1>, <vscale x 8 x i16>, <vscale x 8 x i16>)
|
||||
declare <vscale x 4 x i32> @llvm.aarch64.sve.smin.nxv4i32(<vscale x 4 x i1>, <vscale x 4 x i32>, <vscale x 4 x i32>)
|
||||
declare <vscale x 2 x i64> @llvm.aarch64.sve.smin.nxv2i64(<vscale x 2 x i1>, <vscale x 2 x i64>, <vscale x 2 x i64>)
|
||||
|
||||
declare <vscale x 16 x i8> @llvm.aarch64.sve.umax.nxv16i8(<vscale x 16 x i1>, <vscale x 16 x i8>, <vscale x 16 x i8>)
|
||||
declare <vscale x 8 x i16> @llvm.aarch64.sve.umax.nxv8i16(<vscale x 8 x i1>, <vscale x 8 x i16>, <vscale x 8 x i16>)
|
||||
declare <vscale x 4 x i32> @llvm.aarch64.sve.umax.nxv4i32(<vscale x 4 x i1>, <vscale x 4 x i32>, <vscale x 4 x i32>)
|
||||
declare <vscale x 2 x i64> @llvm.aarch64.sve.umax.nxv2i64(<vscale x 2 x i1>, <vscale x 2 x i64>, <vscale x 2 x i64>)
|
||||
|
||||
declare <vscale x 16 x i8> @llvm.aarch64.sve.umin.nxv16i8(<vscale x 16 x i1>, <vscale x 16 x i8>, <vscale x 16 x i8>)
|
||||
declare <vscale x 8 x i16> @llvm.aarch64.sve.umin.nxv8i16(<vscale x 8 x i1>, <vscale x 8 x i16>, <vscale x 8 x i16>)
|
||||
declare <vscale x 4 x i32> @llvm.aarch64.sve.umin.nxv4i32(<vscale x 4 x i1>, <vscale x 4 x i32>, <vscale x 4 x i32>)
|
||||
declare <vscale x 2 x i64> @llvm.aarch64.sve.umin.nxv2i64(<vscale x 2 x i1>, <vscale x 2 x i64>, <vscale x 2 x i64>)
|
||||
|
||||
declare <vscale x 16 x i1> @llvm.aarch64.sve.ptrue.nxv16i1(i32 %pattern)
|
||||
declare <vscale x 8 x i1> @llvm.aarch64.sve.ptrue.nxv8i1(i32 %pattern)
|
||||
declare <vscale x 4 x i1> @llvm.aarch64.sve.ptrue.nxv4i1(i32 %pattern)
|
||||
declare <vscale x 2 x i1> @llvm.aarch64.sve.ptrue.nxv2i1(i32 %pattern)
|
||||
|
|
Loading…
Reference in New Issue