forked from OSchip/llvm-project
[AArch64][SVE] Add SVE intrinsic for LD1RQ
Summary: Adds the following intrinsic for contiguous load & replicate: - @llvm.aarch64.sve.ld1rq The LD1RQ intrinsic only needs the SImmS16XForm added by this patch. The others (SImmS2XForm, SImmS3XForm & SImmS4XForm) were added for consistency. Reviewers: andwar, sdesmalen, efriedma, cameron.mcinally, dancgr, rengolin Reviewed By: sdesmalen Subscribers: tschuett, kristof.beyls, hiraditya, rkruppe, psnobl, danielkiss, cfe-commits, llvm-commits Tags: #llvm Differential Revision: https://reviews.llvm.org/D76929
This commit is contained in:
parent
b198f1f86c
commit
17f6e18acf
|
@ -1307,6 +1307,8 @@ def int_aarch64_sve_ldnt1 : AdvSIMD_1Vec_PredLoad_Intrinsic;
|
|||
def int_aarch64_sve_ldnf1 : AdvSIMD_1Vec_PredLoad_Intrinsic;
|
||||
def int_aarch64_sve_ldff1 : AdvSIMD_1Vec_PredLoad_Intrinsic;
|
||||
|
||||
def int_aarch64_sve_ld1rq : AdvSIMD_1Vec_PredLoad_Intrinsic;
|
||||
|
||||
//
|
||||
// Stores
|
||||
//
|
||||
|
|
|
@ -1424,6 +1424,7 @@ const char *AArch64TargetLowering::getTargetNodeName(unsigned Opcode) const {
|
|||
case AArch64ISD::LDNF1S: return "AArch64ISD::LDNF1S";
|
||||
case AArch64ISD::LDFF1: return "AArch64ISD::LDFF1";
|
||||
case AArch64ISD::LDFF1S: return "AArch64ISD::LDFF1S";
|
||||
case AArch64ISD::LD1RQ: return "AArch64ISD::LD1RQ";
|
||||
case AArch64ISD::GLD1: return "AArch64ISD::GLD1";
|
||||
case AArch64ISD::GLD1_SCALED: return "AArch64ISD::GLD1_SCALED";
|
||||
case AArch64ISD::GLD1_SXTW: return "AArch64ISD::GLD1_SXTW";
|
||||
|
@ -11622,6 +11623,24 @@ static SDValue performLDNT1Combine(SDNode *N, SelectionDAG &DAG) {
|
|||
return L;
|
||||
}
|
||||
|
||||
static SDValue performLD1RQCombine(SDNode *N, SelectionDAG &DAG) {
|
||||
SDLoc DL(N);
|
||||
EVT VT = N->getValueType(0);
|
||||
|
||||
EVT LoadVT = VT;
|
||||
if (VT.isFloatingPoint())
|
||||
LoadVT = VT.changeTypeToInteger();
|
||||
|
||||
SDValue Ops[] = {N->getOperand(0), N->getOperand(2), N->getOperand(3)};
|
||||
SDValue Load = DAG.getNode(AArch64ISD::LD1RQ, DL, {LoadVT, MVT::Other}, Ops);
|
||||
SDValue LoadChain = SDValue(Load.getNode(), 1);
|
||||
|
||||
if (VT.isFloatingPoint())
|
||||
Load = DAG.getNode(ISD::BITCAST, DL, VT, Load.getValue(0));
|
||||
|
||||
return DAG.getMergeValues({ Load, LoadChain }, DL);
|
||||
}
|
||||
|
||||
static SDValue performST1Combine(SDNode *N, SelectionDAG &DAG) {
|
||||
SDLoc DL(N);
|
||||
SDValue Data = N->getOperand(2);
|
||||
|
@ -13211,6 +13230,8 @@ SDValue AArch64TargetLowering::PerformDAGCombine(SDNode *N,
|
|||
return performNEONPostLDSTCombine(N, DCI, DAG);
|
||||
case Intrinsic::aarch64_sve_ldnt1:
|
||||
return performLDNT1Combine(N, DAG);
|
||||
case Intrinsic::aarch64_sve_ld1rq:
|
||||
return performLD1RQCombine(N, DAG);
|
||||
case Intrinsic::aarch64_sve_ldnt1_gather_scalar_offset:
|
||||
return performGatherLoadCombine(N, DAG, AArch64ISD::GLDNT1);
|
||||
case Intrinsic::aarch64_sve_ldnt1_gather:
|
||||
|
|
|
@ -226,6 +226,7 @@ enum NodeType : unsigned {
|
|||
LDNF1S,
|
||||
LDFF1,
|
||||
LDFF1S,
|
||||
LD1RQ,
|
||||
|
||||
// Unsigned gather loads.
|
||||
GLD1,
|
||||
|
|
|
@ -483,6 +483,19 @@ def uimm6s16 : Operand<i64>, ImmLeaf<i64,
|
|||
let ParserMatchClass = UImm6s16Operand;
|
||||
}
|
||||
|
||||
def SImmS2XForm : SDNodeXForm<imm, [{
|
||||
return CurDAG->getTargetConstant(N->getSExtValue() / 2, SDLoc(N), MVT::i64);
|
||||
}]>;
|
||||
def SImmS3XForm : SDNodeXForm<imm, [{
|
||||
return CurDAG->getTargetConstant(N->getSExtValue() / 3, SDLoc(N), MVT::i64);
|
||||
}]>;
|
||||
def SImmS4XForm : SDNodeXForm<imm, [{
|
||||
return CurDAG->getTargetConstant(N->getSExtValue() / 4, SDLoc(N), MVT::i64);
|
||||
}]>;
|
||||
def SImmS16XForm : SDNodeXForm<imm, [{
|
||||
return CurDAG->getTargetConstant(N->getSExtValue() / 16, SDLoc(N), MVT::i64);
|
||||
}]>;
|
||||
|
||||
// simm6sN predicate - True if the immediate is a multiple of N in the range
|
||||
// [-32 * N, 31 * N].
|
||||
def SImm6s1Operand : SImmScaledMemoryIndexed<6, 1>;
|
||||
|
@ -506,27 +519,27 @@ def simm4s1 : Operand<i64>, ImmLeaf<i64,
|
|||
}
|
||||
|
||||
def simm4s2 : Operand<i64>, ImmLeaf<i64,
|
||||
[{ return Imm >=-16 && Imm <= 14 && (Imm % 2) == 0x0; }]> {
|
||||
[{ return Imm >=-16 && Imm <= 14 && (Imm % 2) == 0x0; }], SImmS2XForm> {
|
||||
let PrintMethod = "printImmScale<2>";
|
||||
let ParserMatchClass = SImm4s2Operand;
|
||||
let DecoderMethod = "DecodeSImm<4>";
|
||||
}
|
||||
|
||||
def simm4s3 : Operand<i64>, ImmLeaf<i64,
|
||||
[{ return Imm >=-24 && Imm <= 21 && (Imm % 3) == 0x0; }]> {
|
||||
[{ return Imm >=-24 && Imm <= 21 && (Imm % 3) == 0x0; }], SImmS3XForm> {
|
||||
let PrintMethod = "printImmScale<3>";
|
||||
let ParserMatchClass = SImm4s3Operand;
|
||||
let DecoderMethod = "DecodeSImm<4>";
|
||||
}
|
||||
|
||||
def simm4s4 : Operand<i64>, ImmLeaf<i64,
|
||||
[{ return Imm >=-32 && Imm <= 28 && (Imm % 4) == 0x0; }]> {
|
||||
[{ return Imm >=-32 && Imm <= 28 && (Imm % 4) == 0x0; }], SImmS4XForm> {
|
||||
let PrintMethod = "printImmScale<4>";
|
||||
let ParserMatchClass = SImm4s4Operand;
|
||||
let DecoderMethod = "DecodeSImm<4>";
|
||||
}
|
||||
def simm4s16 : Operand<i64>, ImmLeaf<i64,
|
||||
[{ return Imm >=-128 && Imm <= 112 && (Imm % 16) == 0x0; }]> {
|
||||
[{ return Imm >=-128 && Imm <= 112 && (Imm % 16) == 0x0; }], SImmS16XForm> {
|
||||
let PrintMethod = "printImmScale<16>";
|
||||
let ParserMatchClass = SImm4s16Operand;
|
||||
let DecoderMethod = "DecodeSImm<4>";
|
||||
|
|
|
@ -31,6 +31,16 @@ def AArch64ldff1 : SDNode<"AArch64ISD::LDFF1", SDT_AArch64_LD1, [SDNPHasChain, S
|
|||
def AArch64ldnf1s : SDNode<"AArch64ISD::LDNF1S", SDT_AArch64_LD1, [SDNPHasChain, SDNPMayLoad, SDNPOptInGlue, SDNPOutGlue]>;
|
||||
def AArch64ldff1s : SDNode<"AArch64ISD::LDFF1S", SDT_AArch64_LD1, [SDNPHasChain, SDNPMayLoad, SDNPOptInGlue, SDNPOutGlue]>;
|
||||
|
||||
// Contiguous load and replicate - node definitions
|
||||
//
|
||||
|
||||
def SDT_AArch64_LD1RQ : SDTypeProfile<1, 2, [
|
||||
SDTCisVec<0>, SDTCisVec<1>, SDTCisPtrTy<2>,
|
||||
SDTCVecEltisVT<1,i1>, SDTCisSameNumEltsAs<0,1>
|
||||
]>;
|
||||
|
||||
def AArch64ld1rq : SDNode<"AArch64ISD::LD1RQ", SDT_AArch64_LD1RQ, [SDNPHasChain, SDNPMayLoad]>;
|
||||
|
||||
// Gather loads - node definitions
|
||||
//
|
||||
def SDT_AArch64_GATHER_SV : SDTypeProfile<1, 4, [
|
||||
|
@ -1317,6 +1327,25 @@ multiclass sve_prefetch<SDPatternOperator prefetch, ValueType PredTy, Instructio
|
|||
def : Pat<(AArch64ptest (nxv2i1 PPR:$pg), (nxv2i1 PPR:$src)),
|
||||
(PTEST_PP PPR:$pg, PPR:$src)>;
|
||||
|
||||
// LD1R of 128-bit masked data
|
||||
def : Pat<(nxv16i8 (AArch64ld1rq PPR:$gp, GPR64:$base)),
|
||||
(LD1RQ_B_IMM $gp, $base, (i64 0))>;
|
||||
def : Pat<(nxv8i16 (AArch64ld1rq PPR:$gp, GPR64:$base)),
|
||||
(LD1RQ_H_IMM $gp, $base, (i64 0))>;
|
||||
def : Pat<(nxv4i32 (AArch64ld1rq PPR:$gp, GPR64:$base)),
|
||||
(LD1RQ_W_IMM $gp, $base, (i64 0))>;
|
||||
def : Pat<(nxv2i64 (AArch64ld1rq PPR:$gp, GPR64:$base)),
|
||||
(LD1RQ_D_IMM $gp, $base, (i64 0))>;
|
||||
|
||||
def : Pat<(nxv16i8 (AArch64ld1rq PPR:$gp, (add GPR64:$base, (i64 simm4s16:$imm)))),
|
||||
(LD1RQ_B_IMM $gp, $base, simm4s16:$imm)>;
|
||||
def : Pat<(nxv8i16 (AArch64ld1rq PPR:$gp, (add GPR64:$base, (i64 simm4s16:$imm)))),
|
||||
(LD1RQ_H_IMM $gp, $base, simm4s16:$imm)>;
|
||||
def : Pat<(nxv4i32 (AArch64ld1rq PPR:$gp, (add GPR64:$base, (i64 simm4s16:$imm)))),
|
||||
(LD1RQ_W_IMM $gp, $base, simm4s16:$imm)>;
|
||||
def : Pat<(nxv2i64 (AArch64ld1rq PPR:$gp, (add GPR64:$base, (i64 simm4s16:$imm)))),
|
||||
(LD1RQ_D_IMM $gp, $base, simm4s16:$imm)>;
|
||||
|
||||
def : Pat<(sext_inreg (nxv2i64 ZPR:$Zs), nxv2i32), (SXTW_ZPmZ_D (IMPLICIT_DEF), (PTRUE_D 31), ZPR:$Zs)>;
|
||||
def : Pat<(sext_inreg (nxv2i64 ZPR:$Zs), nxv2i16), (SXTH_ZPmZ_D (IMPLICIT_DEF), (PTRUE_D 31), ZPR:$Zs)>;
|
||||
def : Pat<(sext_inreg (nxv2i64 ZPR:$Zs), nxv2i8), (SXTB_ZPmZ_D (IMPLICIT_DEF), (PTRUE_D 31), ZPR:$Zs)>;
|
||||
|
|
|
@ -1,5 +1,178 @@
|
|||
; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve < %s | FileCheck %s
|
||||
|
||||
;
|
||||
; LD1RQB
|
||||
;
|
||||
|
||||
define <vscale x 16 x i8> @ld1rqb_i8(<vscale x 16 x i1> %pred, i8* %addr) {
|
||||
; CHECK-LABEL: ld1rqb_i8:
|
||||
; CHECK: ld1rqb { z0.b }, p0/z, [x0]
|
||||
; CHECK-NEXT: ret
|
||||
%res = call <vscale x 16 x i8> @llvm.aarch64.sve.ld1rq.nxv16i8(<vscale x 16 x i1> %pred, i8* %addr)
|
||||
ret <vscale x 16 x i8> %res
|
||||
}
|
||||
|
||||
define <vscale x 16 x i8> @ld1rqb_i8_imm(<vscale x 16 x i1> %pred, i8* %addr) {
|
||||
; CHECK-LABEL: ld1rqb_i8_imm:
|
||||
; CHECK: ld1rqb { z0.b }, p0/z, [x0, #16]
|
||||
; CHECK-NEXT: ret
|
||||
%ptr = getelementptr inbounds i8, i8* %addr, i8 16
|
||||
%res = call <vscale x 16 x i8> @llvm.aarch64.sve.ld1rq.nxv16i8(<vscale x 16 x i1> %pred, i8* %ptr)
|
||||
ret <vscale x 16 x i8> %res
|
||||
}
|
||||
|
||||
define <vscale x 16 x i8> @ld1rqb_i8_imm_lower_bound(<vscale x 16 x i1> %pred, i8* %addr) {
|
||||
; CHECK-LABEL: ld1rqb_i8_imm_lower_bound:
|
||||
; CHECK: ld1rqb { z0.b }, p0/z, [x0, #-128]
|
||||
; CHECK-NEXT: ret
|
||||
%ptr = getelementptr inbounds i8, i8* %addr, i8 -128
|
||||
%res = call <vscale x 16 x i8> @llvm.aarch64.sve.ld1rq.nxv16i8(<vscale x 16 x i1> %pred, i8* %ptr)
|
||||
ret <vscale x 16 x i8> %res
|
||||
}
|
||||
|
||||
define <vscale x 16 x i8> @ld1rqb_i8_imm_upper_bound(<vscale x 16 x i1> %pred, i8* %addr) {
|
||||
; CHECK-LABEL: ld1rqb_i8_imm_upper_bound:
|
||||
; CHECK: ld1rqb { z0.b }, p0/z, [x0, #112]
|
||||
; CHECK-NEXT: ret
|
||||
%ptr = getelementptr inbounds i8, i8* %addr, i8 112
|
||||
%res = call <vscale x 16 x i8> @llvm.aarch64.sve.ld1rq.nxv16i8(<vscale x 16 x i1> %pred, i8* %ptr)
|
||||
ret <vscale x 16 x i8> %res
|
||||
}
|
||||
|
||||
define <vscale x 16 x i8> @ld1rqb_i8_imm_out_of_lower_bound(<vscale x 16 x i1> %pred, i8* %addr) {
|
||||
; CHECK-LABEL: ld1rqb_i8_imm_out_of_lower_bound:
|
||||
; CHECK: sub x8, x0, #129
|
||||
; CHECK-NEXT: ld1rqb { z0.b }, p0/z, [x8]
|
||||
; CHECK-NEXT: ret
|
||||
%ptr = getelementptr inbounds i8, i8* %addr, i64 -129
|
||||
%res = call <vscale x 16 x i8> @llvm.aarch64.sve.ld1rq.nxv16i8(<vscale x 16 x i1> %pred, i8* %ptr)
|
||||
ret <vscale x 16 x i8> %res
|
||||
}
|
||||
|
||||
define <vscale x 16 x i8> @ld1rqb_i8_imm_out_of_upper_bound(<vscale x 16 x i1> %pred, i8* %addr) {
|
||||
; CHECK-LABEL: ld1rqb_i8_imm_out_of_upper_bound:
|
||||
; CHECK: add x8, x0, #113
|
||||
; CHECK-NEXT: ld1rqb { z0.b }, p0/z, [x8]
|
||||
; CHECK-NEXT: ret
|
||||
%ptr = getelementptr inbounds i8, i8* %addr, i64 113
|
||||
%res = call <vscale x 16 x i8> @llvm.aarch64.sve.ld1rq.nxv16i8(<vscale x 16 x i1> %pred, i8* %ptr)
|
||||
ret <vscale x 16 x i8> %res
|
||||
}
|
||||
|
||||
;
|
||||
; LD1RQH
|
||||
;
|
||||
|
||||
define <vscale x 8 x i16> @ld1rqh_i16(<vscale x 8 x i1> %pred, i16* %addr) {
|
||||
; CHECK-LABEL: ld1rqh_i16:
|
||||
; CHECK: ld1rqh { z0.h }, p0/z, [x0]
|
||||
; CHECK-NEXT: ret
|
||||
%res = call <vscale x 8 x i16> @llvm.aarch64.sve.ld1rq.nxv8i16(<vscale x 8 x i1> %pred, i16* %addr)
|
||||
ret <vscale x 8 x i16> %res
|
||||
}
|
||||
|
||||
define <vscale x 8 x half> @ld1rqh_f16(<vscale x 8 x i1> %pred, half* %addr) {
|
||||
; CHECK-LABEL: ld1rqh_f16:
|
||||
; CHECK: ld1rqh { z0.h }, p0/z, [x0]
|
||||
; CHECK-NEXT: ret
|
||||
%res = call <vscale x 8 x half> @llvm.aarch64.sve.ld1rq.nxv8f16(<vscale x 8 x i1> %pred, half* %addr)
|
||||
ret <vscale x 8 x half> %res
|
||||
}
|
||||
|
||||
define <vscale x 8 x i16> @ld1rqh_i16_imm(<vscale x 8 x i1> %pred, i16* %addr) {
|
||||
; CHECK-LABEL: ld1rqh_i16_imm:
|
||||
; CHECK: ld1rqh { z0.h }, p0/z, [x0, #-64]
|
||||
; CHECK-NEXT: ret
|
||||
%ptr = getelementptr inbounds i16, i16* %addr, i16 -32
|
||||
%res = call <vscale x 8 x i16> @llvm.aarch64.sve.ld1rq.nxv8i16(<vscale x 8 x i1> %pred, i16* %ptr)
|
||||
ret <vscale x 8 x i16> %res
|
||||
}
|
||||
|
||||
define <vscale x 8 x half> @ld1rqh_f16_imm(<vscale x 8 x i1> %pred, half* %addr) {
|
||||
; CHECK-LABEL: ld1rqh_f16_imm:
|
||||
; CHECK: ld1rqh { z0.h }, p0/z, [x0, #-16]
|
||||
; CHECK-NEXT: ret
|
||||
%ptr = getelementptr inbounds half, half* %addr, i16 -8
|
||||
%res = call <vscale x 8 x half> @llvm.aarch64.sve.ld1rq.nxv8f16(<vscale x 8 x i1> %pred, half* %ptr)
|
||||
ret <vscale x 8 x half> %res
|
||||
}
|
||||
|
||||
;
|
||||
; LD1RQW
|
||||
;
|
||||
|
||||
define <vscale x 4 x i32> @ld1rqw_i32(<vscale x 4 x i1> %pred, i32* %addr) {
|
||||
; CHECK-LABEL: ld1rqw_i32:
|
||||
; CHECK: ld1rqw { z0.s }, p0/z, [x0]
|
||||
; CHECK-NEXT: ret
|
||||
%res = call <vscale x 4 x i32> @llvm.aarch64.sve.ld1rq.nxv4i32(<vscale x 4 x i1> %pred, i32* %addr)
|
||||
ret <vscale x 4 x i32> %res
|
||||
}
|
||||
|
||||
define <vscale x 4 x float> @ld1rqw_f32(<vscale x 4 x i1> %pred, float* %addr) {
|
||||
; CHECK-LABEL: ld1rqw_f32:
|
||||
; CHECK: ld1rqw { z0.s }, p0/z, [x0]
|
||||
; CHECK-NEXT: ret
|
||||
%res = call <vscale x 4 x float> @llvm.aarch64.sve.ld1rq.nxv4f32(<vscale x 4 x i1> %pred, float* %addr)
|
||||
ret <vscale x 4 x float> %res
|
||||
}
|
||||
|
||||
define <vscale x 4 x i32> @ld1rqw_i32_imm(<vscale x 4 x i1> %pred, i32* %addr) {
|
||||
; CHECK-LABEL: ld1rqw_i32_imm:
|
||||
; CHECK: ld1rqw { z0.s }, p0/z, [x0, #112]
|
||||
; CHECK-NEXT: ret
|
||||
%ptr = getelementptr inbounds i32, i32* %addr, i32 28
|
||||
%res = call <vscale x 4 x i32> @llvm.aarch64.sve.ld1rq.nxv4i32(<vscale x 4 x i1> %pred, i32* %ptr)
|
||||
ret <vscale x 4 x i32> %res
|
||||
}
|
||||
|
||||
define <vscale x 4 x float> @ld1rqw_f32_imm(<vscale x 4 x i1> %pred, float* %addr) {
|
||||
; CHECK-LABEL: ld1rqw_f32_imm:
|
||||
; CHECK: ld1rqw { z0.s }, p0/z, [x0, #32]
|
||||
; CHECK-NEXT: ret
|
||||
%ptr = getelementptr inbounds float, float* %addr, i32 8
|
||||
%res = call <vscale x 4 x float> @llvm.aarch64.sve.ld1rq.nxv4f32(<vscale x 4 x i1> %pred, float* %ptr)
|
||||
ret <vscale x 4 x float> %res
|
||||
}
|
||||
|
||||
;
|
||||
; LD1RQD
|
||||
;
|
||||
|
||||
define <vscale x 2 x i64> @ld1rqd_i64(<vscale x 2 x i1> %pred, i64* %addr) {
|
||||
; CHECK-LABEL: ld1rqd_i64:
|
||||
; CHECK: ld1rqd { z0.d }, p0/z, [x0]
|
||||
; CHECK-NEXT: ret
|
||||
%res = call <vscale x 2 x i64> @llvm.aarch64.sve.ld1rq.nxv2i64(<vscale x 2 x i1> %pred, i64* %addr)
|
||||
ret <vscale x 2 x i64> %res
|
||||
}
|
||||
|
||||
define <vscale x 2 x double> @ld1rqd_f64(<vscale x 2 x i1> %pred, double* %addr) {
|
||||
; CHECK-LABEL: ld1rqd_f64:
|
||||
; CHECK: ld1rqd { z0.d }, p0/z, [x0]
|
||||
; CHECK-NEXT: ret
|
||||
%res = call <vscale x 2 x double> @llvm.aarch64.sve.ld1rq.nxv2f64(<vscale x 2 x i1> %pred, double* %addr)
|
||||
ret <vscale x 2 x double> %res
|
||||
}
|
||||
|
||||
define <vscale x 2 x i64> @ld1rqd_i64_imm(<vscale x 2 x i1> %pred, i64* %addr) {
|
||||
; CHECK-LABEL: ld1rqd_i64_imm:
|
||||
; CHECK: ld1rqd { z0.d }, p0/z, [x0, #64]
|
||||
; CHECK-NEXT: ret
|
||||
%ptr = getelementptr inbounds i64, i64* %addr, i64 8
|
||||
%res = call <vscale x 2 x i64> @llvm.aarch64.sve.ld1rq.nxv2i64(<vscale x 2 x i1> %pred, i64* %ptr)
|
||||
ret <vscale x 2 x i64> %res
|
||||
}
|
||||
|
||||
define <vscale x 2 x double> @ld1rqd_f64_imm(<vscale x 2 x i1> %pred, double* %addr) {
|
||||
; CHECK-LABEL: ld1rqd_f64_imm:
|
||||
; CHECK: ld1rqd { z0.d }, p0/z, [x0, #-128]
|
||||
; CHECK-NEXT: ret
|
||||
%ptr = getelementptr inbounds double, double* %addr, i64 -16
|
||||
%res = call <vscale x 2 x double> @llvm.aarch64.sve.ld1rq.nxv2f64(<vscale x 2 x i1> %pred, double* %ptr)
|
||||
ret <vscale x 2 x double> %res
|
||||
}
|
||||
|
||||
;
|
||||
; LDNT1B
|
||||
;
|
||||
|
@ -79,6 +252,14 @@ define <vscale x 2 x double> @ldnt1d_f64(<vscale x 2 x i1> %pred, double* %addr)
|
|||
ret <vscale x 2 x double> %res
|
||||
}
|
||||
|
||||
declare <vscale x 16 x i8> @llvm.aarch64.sve.ld1rq.nxv16i8(<vscale x 16 x i1>, i8*)
|
||||
declare <vscale x 8 x i16> @llvm.aarch64.sve.ld1rq.nxv8i16(<vscale x 8 x i1>, i16*)
|
||||
declare <vscale x 4 x i32> @llvm.aarch64.sve.ld1rq.nxv4i32(<vscale x 4 x i1>, i32*)
|
||||
declare <vscale x 2 x i64> @llvm.aarch64.sve.ld1rq.nxv2i64(<vscale x 2 x i1>, i64*)
|
||||
declare <vscale x 8 x half> @llvm.aarch64.sve.ld1rq.nxv8f16(<vscale x 8 x i1>, half*)
|
||||
declare <vscale x 4 x float> @llvm.aarch64.sve.ld1rq.nxv4f32(<vscale x 4 x i1>, float*)
|
||||
declare <vscale x 2 x double> @llvm.aarch64.sve.ld1rq.nxv2f64(<vscale x 2 x i1>, double*)
|
||||
|
||||
declare <vscale x 16 x i8> @llvm.aarch64.sve.ldnt1.nxv16i8(<vscale x 16 x i1>, i8*)
|
||||
declare <vscale x 8 x i16> @llvm.aarch64.sve.ldnt1.nxv8i16(<vscale x 8 x i1>, i16*)
|
||||
declare <vscale x 4 x i32> @llvm.aarch64.sve.ldnt1.nxv4i32(<vscale x 4 x i1>, i32*)
|
||||
|
|
Loading…
Reference in New Issue