forked from OSchip/llvm-project
[Aarch64][SVE] Add intrinsics for gather loads with 32-bits offsets
This patch adds intrinsics for SVE gather loads for which the offsets are 32-bits wide and are: * unscaled * @llvm.aarch64.sve.ld1.gather.sxtw * @llvm.aarch64.sve.ld1.gather.uxtw * scaled (offsets become indices) * @llvm.arch64.sve.ld1.gather.sxtw.index * @llvm.arch64.sve.ld1.gather.uxtw.index The offsets are either zero (uxtw) or sign (sxtw) extended to 64 bits. These intrinsics map 1-1 to the corresponding SVE instructions (examples for half-words): * unscaled * ld1h { z0.s }, p0/z, [x0, z0.s, sxtw] * ld1h { z0.s }, p0/z, [x0, z0.s, uxtw] * scaled * ld1h { z0.s }, p0/z, [x0, z0.s, sxtw #1] * ld1h { z0.s }, p0/z, [x0, z0.s, uxtw #1] Committed on behalf of Andrzej Warzynski (andwar) Reviewers: sdesmalen, kmclaughlin, eli.friedman, rengolin, rovka, huntergr, dancgr, mgudim, efriedma Reviewed By: sdesmalen Tags: #llvm Differential Revision: https://reviews.llvm.org/D70782
This commit is contained in:
parent
8dd17a13b0
commit
8bf31e28d7
|
@ -970,6 +970,14 @@ class AdvSIMD_GatherLoad_64bitOffset_Intrinsic
|
|||
// to reuse currently identical class definitions.
|
||||
class AdvSIMD_SVE_LOGB_Intrinsic : AdvSIMD_SVE_CNT_Intrinsic;
|
||||
|
||||
class AdvSIMD_GatherLoad_32bitOffset_Intrinsic
|
||||
: Intrinsic<[ llvm_anyvector_ty ],
|
||||
[
|
||||
LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
|
||||
LLVMPointerToElt<0>, llvm_anyvector_ty
|
||||
],
|
||||
[ IntrReadMem, IntrArgMemOnly ]>;
|
||||
|
||||
// This class of intrinsics are not intended to be useful within LLVM IR but
|
||||
// are instead here to support some of the more regid parts of the ACLE.
|
||||
class Builtin_SVCVT<string name, LLVMType OUT, LLVMType IN>
|
||||
|
@ -1211,6 +1219,16 @@ def int_aarch64_sve_ld1_gather : AdvSIMD_GatherLoad_64bitOffset_Intrinsic;
|
|||
// scalar + vector, 64 bit scaled offsets
|
||||
def int_aarch64_sve_ld1_gather_index : AdvSIMD_GatherLoad_64bitOffset_Intrinsic;
|
||||
|
||||
// scalar + vector, 32 bit unscaled offsets, sign (sxtw) or zero (zxtw)
|
||||
// extended to 64 bits
|
||||
def int_aarch64_sve_ld1_gather_sxtw : AdvSIMD_GatherLoad_32bitOffset_Intrinsic;
|
||||
def int_aarch64_sve_ld1_gather_uxtw : AdvSIMD_GatherLoad_32bitOffset_Intrinsic;
|
||||
|
||||
// scalar + vector, 32 bit scaled offsets, sign (sxtw) or zero (zxtw) extended
|
||||
// to 64 bits
|
||||
def int_aarch64_sve_ld1_gather_sxtw_index : AdvSIMD_GatherLoad_32bitOffset_Intrinsic;
|
||||
def int_aarch64_sve_ld1_gather_uxtw_index : AdvSIMD_GatherLoad_32bitOffset_Intrinsic;
|
||||
|
||||
//
|
||||
// SVE2 - Non-widening pairwise arithmetic
|
||||
//
|
||||
|
|
|
@ -1338,6 +1338,10 @@ const char *AArch64TargetLowering::getTargetNodeName(unsigned Opcode) const {
|
|||
case AArch64ISD::INSR: return "AArch64ISD::INSR";
|
||||
case AArch64ISD::GLD1: return "AArch64ISD::GLD1";
|
||||
case AArch64ISD::GLD1_SCALED: return "AArch64ISD::GLD1_SCALED";
|
||||
case AArch64ISD::GLD1_SXTW: return "AArch64ISD::GLD1_SXTW";
|
||||
case AArch64ISD::GLD1_UXTW: return "AArch64ISD::GLD1_UXTW";
|
||||
case AArch64ISD::GLD1_SXTW_SCALED: return "AArch64ISD::GLD1_SXTW_SCALED";
|
||||
case AArch64ISD::GLD1_UXTW_SCALED: return "AArch64ISD::GLD1_UXTW_SCALED";
|
||||
}
|
||||
return nullptr;
|
||||
}
|
||||
|
@ -11931,6 +11935,14 @@ SDValue AArch64TargetLowering::PerformDAGCombine(SDNode *N,
|
|||
return performLD1GatherCombine(N, DAG, AArch64ISD::GLD1);
|
||||
case Intrinsic::aarch64_sve_ld1_gather_index:
|
||||
return performLD1GatherCombine(N, DAG, AArch64ISD::GLD1_SCALED);
|
||||
case Intrinsic::aarch64_sve_ld1_gather_sxtw:
|
||||
return performLD1GatherCombine(N, DAG, AArch64ISD::GLD1_SXTW);
|
||||
case Intrinsic::aarch64_sve_ld1_gather_uxtw:
|
||||
return performLD1GatherCombine(N, DAG, AArch64ISD::GLD1_UXTW);
|
||||
case Intrinsic::aarch64_sve_ld1_gather_sxtw_index:
|
||||
return performLD1GatherCombine(N, DAG, AArch64ISD::GLD1_SXTW_SCALED);
|
||||
case Intrinsic::aarch64_sve_ld1_gather_uxtw_index:
|
||||
return performLD1GatherCombine(N, DAG, AArch64ISD::GLD1_UXTW_SCALED);
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
|
|
@ -201,6 +201,10 @@ enum NodeType : unsigned {
|
|||
// Unsigned gather loads.
|
||||
GLD1,
|
||||
GLD1_SCALED,
|
||||
GLD1_UXTW,
|
||||
GLD1_SXTW,
|
||||
GLD1_UXTW_SCALED,
|
||||
GLD1_SXTW_SCALED,
|
||||
|
||||
// NEON Load/Store with post-increment base updates
|
||||
LD2post = ISD::FIRST_TARGET_MEMORY_OPCODE,
|
||||
|
|
|
@ -15,8 +15,12 @@ def SDT_AArch64_GLD1 : SDTypeProfile<1, 4, [
|
|||
SDTCVecEltisVT<1,i1>, SDTCisSameNumEltsAs<0,1>
|
||||
]>;
|
||||
|
||||
def AArch64ld1_gather : SDNode<"AArch64ISD::GLD1", SDT_AArch64_GLD1, [SDNPHasChain, SDNPMayLoad, SDNPOptInGlue]>;
|
||||
def AArch64ld1_gather_scaled : SDNode<"AArch64ISD::GLD1_SCALED", SDT_AArch64_GLD1, [SDNPHasChain, SDNPMayLoad, SDNPOptInGlue]>;
|
||||
def AArch64ld1_gather : SDNode<"AArch64ISD::GLD1", SDT_AArch64_GLD1, [SDNPHasChain, SDNPMayLoad, SDNPOptInGlue]>;
|
||||
def AArch64ld1_gather_scaled : SDNode<"AArch64ISD::GLD1_SCALED", SDT_AArch64_GLD1, [SDNPHasChain, SDNPMayLoad, SDNPOptInGlue]>;
|
||||
def AArch64ld1_gather_uxtw : SDNode<"AArch64ISD::GLD1_UXTW", SDT_AArch64_GLD1, [SDNPHasChain, SDNPMayLoad, SDNPOptInGlue]>;
|
||||
def AArch64ld1_gather_sxtw : SDNode<"AArch64ISD::GLD1_SXTW", SDT_AArch64_GLD1, [SDNPHasChain, SDNPMayLoad, SDNPOptInGlue]>;
|
||||
def AArch64ld1_gather_uxtw_scaled : SDNode<"AArch64ISD::GLD1_UXTW_SCALED", SDT_AArch64_GLD1, [SDNPHasChain, SDNPMayLoad, SDNPOptInGlue]>;
|
||||
def AArch64ld1_gather_sxtw_scaled : SDNode<"AArch64ISD::GLD1_SXTW_SCALED", SDT_AArch64_GLD1, [SDNPHasChain, SDNPMayLoad, SDNPOptInGlue]>;
|
||||
|
||||
let Predicates = [HasSVE] in {
|
||||
|
||||
|
@ -410,25 +414,25 @@ let Predicates = [HasSVE] in {
|
|||
|
||||
// Gathers using unscaled 32-bit offsets, e.g.
|
||||
// ld1h z0.s, p0/z, [x0, z0.s, uxtw]
|
||||
defm GLD1SB_S : sve_mem_32b_gld_vs_32_unscaled<0b0000, "ld1sb", ZPR32ExtSXTW8Only, ZPR32ExtUXTW8Only>;
|
||||
defm GLDFF1SB_S : sve_mem_32b_gld_vs_32_unscaled<0b0001, "ldff1sb", ZPR32ExtSXTW8Only, ZPR32ExtUXTW8Only>;
|
||||
defm GLD1B_S : sve_mem_32b_gld_vs_32_unscaled<0b0010, "ld1b", ZPR32ExtSXTW8Only, ZPR32ExtUXTW8Only>;
|
||||
defm GLDFF1B_S : sve_mem_32b_gld_vs_32_unscaled<0b0011, "ldff1b", ZPR32ExtSXTW8Only, ZPR32ExtUXTW8Only>;
|
||||
defm GLD1SH_S : sve_mem_32b_gld_vs_32_unscaled<0b0100, "ld1sh", ZPR32ExtSXTW8, ZPR32ExtUXTW8>;
|
||||
defm GLDFF1SH_S : sve_mem_32b_gld_vs_32_unscaled<0b0101, "ldff1sh", ZPR32ExtSXTW8, ZPR32ExtUXTW8>;
|
||||
defm GLD1H_S : sve_mem_32b_gld_vs_32_unscaled<0b0110, "ld1h", ZPR32ExtSXTW8, ZPR32ExtUXTW8>;
|
||||
defm GLDFF1H_S : sve_mem_32b_gld_vs_32_unscaled<0b0111, "ldff1h", ZPR32ExtSXTW8, ZPR32ExtUXTW8>;
|
||||
defm GLD1W : sve_mem_32b_gld_vs_32_unscaled<0b1010, "ld1w", ZPR32ExtSXTW8, ZPR32ExtUXTW8>;
|
||||
defm GLDFF1W : sve_mem_32b_gld_vs_32_unscaled<0b1011, "ldff1w", ZPR32ExtSXTW8, ZPR32ExtUXTW8>;
|
||||
defm GLD1SB_S : sve_mem_32b_gld_vs_32_unscaled<0b0000, "ld1sb", null_frag, null_frag, ZPR32ExtSXTW8Only, ZPR32ExtUXTW8Only, nxv4i8>;
|
||||
defm GLDFF1SB_S : sve_mem_32b_gld_vs_32_unscaled<0b0001, "ldff1sb", null_frag, null_frag, ZPR32ExtSXTW8Only, ZPR32ExtUXTW8Only, nxv4i8>;
|
||||
defm GLD1B_S : sve_mem_32b_gld_vs_32_unscaled<0b0010, "ld1b", AArch64ld1_gather_sxtw, AArch64ld1_gather_uxtw, ZPR32ExtSXTW8Only, ZPR32ExtUXTW8Only, nxv4i8>;
|
||||
defm GLDFF1B_S : sve_mem_32b_gld_vs_32_unscaled<0b0011, "ldff1b", null_frag, null_frag, ZPR32ExtSXTW8Only, ZPR32ExtUXTW8Only, nxv4i8>;
|
||||
defm GLD1SH_S : sve_mem_32b_gld_vs_32_unscaled<0b0100, "ld1sh", null_frag, null_frag, ZPR32ExtSXTW8, ZPR32ExtUXTW8, nxv4i16>;
|
||||
defm GLDFF1SH_S : sve_mem_32b_gld_vs_32_unscaled<0b0101, "ldff1sh", null_frag, null_frag, ZPR32ExtSXTW8, ZPR32ExtUXTW8, nxv4i16>;
|
||||
defm GLD1H_S : sve_mem_32b_gld_vs_32_unscaled<0b0110, "ld1h", AArch64ld1_gather_sxtw, AArch64ld1_gather_uxtw, ZPR32ExtSXTW8, ZPR32ExtUXTW8, nxv4i16>;
|
||||
defm GLDFF1H_S : sve_mem_32b_gld_vs_32_unscaled<0b0111, "ldff1h", null_frag, null_frag, ZPR32ExtSXTW8, ZPR32ExtUXTW8, nxv4i16>;
|
||||
defm GLD1W : sve_mem_32b_gld_vs_32_unscaled<0b1010, "ld1w", AArch64ld1_gather_sxtw, AArch64ld1_gather_uxtw, ZPR32ExtSXTW8, ZPR32ExtUXTW8, nxv4i32>;
|
||||
defm GLDFF1W : sve_mem_32b_gld_vs_32_unscaled<0b1011, "ldff1w", null_frag, null_frag, ZPR32ExtSXTW8, ZPR32ExtUXTW8, nxv4i32>;
|
||||
|
||||
// Gathers using scaled 32-bit offsets, e.g.
|
||||
// ld1h z0.s, p0/z, [x0, z0.s, uxtw #1]
|
||||
defm GLD1SH_S : sve_mem_32b_gld_sv_32_scaled<0b0100, "ld1sh", ZPR32ExtSXTW16, ZPR32ExtUXTW16>;
|
||||
defm GLDFF1SH_S : sve_mem_32b_gld_sv_32_scaled<0b0101, "ldff1sh", ZPR32ExtSXTW16, ZPR32ExtUXTW16>;
|
||||
defm GLD1H_S : sve_mem_32b_gld_sv_32_scaled<0b0110, "ld1h", ZPR32ExtSXTW16, ZPR32ExtUXTW16>;
|
||||
defm GLDFF1H_S : sve_mem_32b_gld_sv_32_scaled<0b0111, "ldff1h", ZPR32ExtSXTW16, ZPR32ExtUXTW16>;
|
||||
defm GLD1W : sve_mem_32b_gld_sv_32_scaled<0b1010, "ld1w", ZPR32ExtSXTW32, ZPR32ExtUXTW32>;
|
||||
defm GLDFF1W : sve_mem_32b_gld_sv_32_scaled<0b1011, "ldff1w", ZPR32ExtSXTW32, ZPR32ExtUXTW32>;
|
||||
defm GLD1SH_S : sve_mem_32b_gld_sv_32_scaled<0b0100, "ld1sh", null_frag, null_frag, ZPR32ExtSXTW16, ZPR32ExtUXTW16, nxv4i16>;
|
||||
defm GLDFF1SH_S : sve_mem_32b_gld_sv_32_scaled<0b0101, "ldff1sh", null_frag, null_frag, ZPR32ExtSXTW16, ZPR32ExtUXTW16, nxv4i16>;
|
||||
defm GLD1H_S : sve_mem_32b_gld_sv_32_scaled<0b0110, "ld1h", AArch64ld1_gather_sxtw_scaled, AArch64ld1_gather_uxtw_scaled, ZPR32ExtSXTW16, ZPR32ExtUXTW16, nxv4i16>;
|
||||
defm GLDFF1H_S : sve_mem_32b_gld_sv_32_scaled<0b0111, "ldff1h", null_frag, null_frag, ZPR32ExtSXTW16, ZPR32ExtUXTW16, nxv4i16>;
|
||||
defm GLD1W : sve_mem_32b_gld_sv_32_scaled<0b1010, "ld1w", AArch64ld1_gather_sxtw_scaled, AArch64ld1_gather_uxtw_scaled, ZPR32ExtSXTW32, ZPR32ExtUXTW32, nxv4i32>;
|
||||
defm GLDFF1W : sve_mem_32b_gld_sv_32_scaled<0b1011, "ldff1w", null_frag, null_frag, ZPR32ExtSXTW32, ZPR32ExtUXTW32, nxv4i32>;
|
||||
|
||||
// Gathers using scaled 32-bit pointers with offset, e.g.
|
||||
// ld1h z0.s, p0/z, [z0.s, #16]
|
||||
|
@ -492,33 +496,33 @@ let Predicates = [HasSVE] in {
|
|||
|
||||
// Gathers using unscaled 32-bit offsets unpacked in 64-bits elements, e.g.
|
||||
// ld1h z0.d, p0/z, [x0, z0.d, uxtw]
|
||||
defm GLD1SB_D : sve_mem_64b_gld_vs_32_unscaled<0b0000, "ld1sb", ZPR64ExtSXTW8Only, ZPR64ExtUXTW8Only>;
|
||||
defm GLDFF1SB_D : sve_mem_64b_gld_vs_32_unscaled<0b0001, "ldff1sb", ZPR64ExtSXTW8Only, ZPR64ExtUXTW8Only>;
|
||||
defm GLD1B_D : sve_mem_64b_gld_vs_32_unscaled<0b0010, "ld1b", ZPR64ExtSXTW8Only, ZPR64ExtUXTW8Only>;
|
||||
defm GLDFF1B_D : sve_mem_64b_gld_vs_32_unscaled<0b0011, "ldff1b", ZPR64ExtSXTW8Only, ZPR64ExtUXTW8Only>;
|
||||
defm GLD1SH_D : sve_mem_64b_gld_vs_32_unscaled<0b0100, "ld1sh", ZPR64ExtSXTW8, ZPR64ExtUXTW8>;
|
||||
defm GLDFF1SH_D : sve_mem_64b_gld_vs_32_unscaled<0b0101, "ldff1sh", ZPR64ExtSXTW8, ZPR64ExtUXTW8>;
|
||||
defm GLD1H_D : sve_mem_64b_gld_vs_32_unscaled<0b0110, "ld1h", ZPR64ExtSXTW8, ZPR64ExtUXTW8>;
|
||||
defm GLDFF1H_D : sve_mem_64b_gld_vs_32_unscaled<0b0111, "ldff1h", ZPR64ExtSXTW8, ZPR64ExtUXTW8>;
|
||||
defm GLD1SW_D : sve_mem_64b_gld_vs_32_unscaled<0b1000, "ld1sw", ZPR64ExtSXTW8, ZPR64ExtUXTW8>;
|
||||
defm GLDFF1SW_D : sve_mem_64b_gld_vs_32_unscaled<0b1001, "ldff1sw", ZPR64ExtSXTW8, ZPR64ExtUXTW8>;
|
||||
defm GLD1W_D : sve_mem_64b_gld_vs_32_unscaled<0b1010, "ld1w", ZPR64ExtSXTW8, ZPR64ExtUXTW8>;
|
||||
defm GLDFF1W_D : sve_mem_64b_gld_vs_32_unscaled<0b1011, "ldff1w", ZPR64ExtSXTW8, ZPR64ExtUXTW8>;
|
||||
defm GLD1D : sve_mem_64b_gld_vs_32_unscaled<0b1110, "ld1d", ZPR64ExtSXTW8, ZPR64ExtUXTW8>;
|
||||
defm GLDFF1D : sve_mem_64b_gld_vs_32_unscaled<0b1111, "ldff1d", ZPR64ExtSXTW8, ZPR64ExtUXTW8>;
|
||||
defm GLD1SB_D : sve_mem_64b_gld_vs_32_unscaled<0b0000, "ld1sb", null_frag, null_frag, ZPR64ExtSXTW8Only, ZPR64ExtUXTW8Only, nxv2i8>;
|
||||
defm GLDFF1SB_D : sve_mem_64b_gld_vs_32_unscaled<0b0001, "ldff1sb", null_frag, null_frag, ZPR64ExtSXTW8Only, ZPR64ExtUXTW8Only, nxv2i8>;
|
||||
defm GLD1B_D : sve_mem_64b_gld_vs_32_unscaled<0b0010, "ld1b", AArch64ld1_gather_sxtw, AArch64ld1_gather_uxtw, ZPR64ExtSXTW8Only, ZPR64ExtUXTW8Only, nxv2i8>;
|
||||
defm GLDFF1B_D : sve_mem_64b_gld_vs_32_unscaled<0b0011, "ldff1b", null_frag, null_frag, ZPR64ExtSXTW8Only, ZPR64ExtUXTW8Only, nxv2i8>;
|
||||
defm GLD1SH_D : sve_mem_64b_gld_vs_32_unscaled<0b0100, "ld1sh", null_frag, null_frag, ZPR64ExtSXTW8, ZPR64ExtUXTW8, nxv2i16>;
|
||||
defm GLDFF1SH_D : sve_mem_64b_gld_vs_32_unscaled<0b0101, "ldff1sh", null_frag, null_frag, ZPR64ExtSXTW8, ZPR64ExtUXTW8, nxv2i16>;
|
||||
defm GLD1H_D : sve_mem_64b_gld_vs_32_unscaled<0b0110, "ld1h", AArch64ld1_gather_sxtw, AArch64ld1_gather_uxtw, ZPR64ExtSXTW8, ZPR64ExtUXTW8, nxv2i16>;
|
||||
defm GLDFF1H_D : sve_mem_64b_gld_vs_32_unscaled<0b0111, "ldff1h", null_frag, null_frag, ZPR64ExtSXTW8, ZPR64ExtUXTW8, nxv2i16>;
|
||||
defm GLD1SW_D : sve_mem_64b_gld_vs_32_unscaled<0b1000, "ld1sw", null_frag, null_frag, ZPR64ExtSXTW8, ZPR64ExtUXTW8, nxv2i32>;
|
||||
defm GLDFF1SW_D : sve_mem_64b_gld_vs_32_unscaled<0b1001, "ldff1sw", null_frag, null_frag, ZPR64ExtSXTW8, ZPR64ExtUXTW8, nxv2i32>;
|
||||
defm GLD1W_D : sve_mem_64b_gld_vs_32_unscaled<0b1010, "ld1w", AArch64ld1_gather_sxtw, AArch64ld1_gather_uxtw, ZPR64ExtSXTW8, ZPR64ExtUXTW8, nxv2i32>;
|
||||
defm GLDFF1W_D : sve_mem_64b_gld_vs_32_unscaled<0b1011, "ldff1w", null_frag, null_frag, ZPR64ExtSXTW8, ZPR64ExtUXTW8, nxv2i32>;
|
||||
defm GLD1D : sve_mem_64b_gld_vs_32_unscaled<0b1110, "ld1d", AArch64ld1_gather_sxtw, AArch64ld1_gather_uxtw, ZPR64ExtSXTW8, ZPR64ExtUXTW8, nxv2i64>;
|
||||
defm GLDFF1D : sve_mem_64b_gld_vs_32_unscaled<0b1111, "ldff1d", null_frag, null_frag, ZPR64ExtSXTW8, ZPR64ExtUXTW8, nxv2i64>;
|
||||
|
||||
// Gathers using scaled 32-bit offsets unpacked in 64-bits elements, e.g.
|
||||
// ld1h z0.d, p0/z, [x0, z0.d, uxtw #1]
|
||||
defm GLD1SH_D : sve_mem_64b_gld_sv_32_scaled<0b0100, "ld1sh", ZPR64ExtSXTW16, ZPR64ExtUXTW16>;
|
||||
defm GLDFF1SH_D : sve_mem_64b_gld_sv_32_scaled<0b0101, "ldff1sh",ZPR64ExtSXTW16, ZPR64ExtUXTW16>;
|
||||
defm GLD1H_D : sve_mem_64b_gld_sv_32_scaled<0b0110, "ld1h", ZPR64ExtSXTW16, ZPR64ExtUXTW16>;
|
||||
defm GLDFF1H_D : sve_mem_64b_gld_sv_32_scaled<0b0111, "ldff1h", ZPR64ExtSXTW16, ZPR64ExtUXTW16>;
|
||||
defm GLD1SW_D : sve_mem_64b_gld_sv_32_scaled<0b1000, "ld1sw", ZPR64ExtSXTW32, ZPR64ExtUXTW32>;
|
||||
defm GLDFF1SW_D : sve_mem_64b_gld_sv_32_scaled<0b1001, "ldff1sw",ZPR64ExtSXTW32, ZPR64ExtUXTW32>;
|
||||
defm GLD1W_D : sve_mem_64b_gld_sv_32_scaled<0b1010, "ld1w", ZPR64ExtSXTW32, ZPR64ExtUXTW32>;
|
||||
defm GLDFF1W_D : sve_mem_64b_gld_sv_32_scaled<0b1011, "ldff1w", ZPR64ExtSXTW32, ZPR64ExtUXTW32>;
|
||||
defm GLD1D : sve_mem_64b_gld_sv_32_scaled<0b1110, "ld1d", ZPR64ExtSXTW64, ZPR64ExtUXTW64>;
|
||||
defm GLDFF1D : sve_mem_64b_gld_sv_32_scaled<0b1111, "ldff1d", ZPR64ExtSXTW64, ZPR64ExtUXTW64>;
|
||||
defm GLD1SH_D : sve_mem_64b_gld_sv_32_scaled<0b0100, "ld1sh", null_frag, null_frag, ZPR64ExtSXTW16, ZPR64ExtUXTW16, nxv2i16>;
|
||||
defm GLDFF1SH_D : sve_mem_64b_gld_sv_32_scaled<0b0101, "ldff1sh", null_frag, null_frag, ZPR64ExtSXTW16, ZPR64ExtUXTW16, nxv2i16>;
|
||||
defm GLD1H_D : sve_mem_64b_gld_sv_32_scaled<0b0110, "ld1h", AArch64ld1_gather_sxtw_scaled, AArch64ld1_gather_uxtw_scaled, ZPR64ExtSXTW16, ZPR64ExtUXTW16, nxv2i16>;
|
||||
defm GLDFF1H_D : sve_mem_64b_gld_sv_32_scaled<0b0111, "ldff1h", null_frag, null_frag, ZPR64ExtSXTW16, ZPR64ExtUXTW16, nxv2i16>;
|
||||
defm GLD1SW_D : sve_mem_64b_gld_sv_32_scaled<0b1000, "ld1sw", null_frag, null_frag, ZPR64ExtSXTW32, ZPR64ExtUXTW32, nxv2i32>;
|
||||
defm GLDFF1SW_D : sve_mem_64b_gld_sv_32_scaled<0b1001, "ldff1sw", null_frag, null_frag, ZPR64ExtSXTW32, ZPR64ExtUXTW32, nxv2i32>;
|
||||
defm GLD1W_D : sve_mem_64b_gld_sv_32_scaled<0b1010, "ld1w", AArch64ld1_gather_sxtw_scaled, AArch64ld1_gather_uxtw_scaled, ZPR64ExtSXTW32, ZPR64ExtUXTW32, nxv2i32>;
|
||||
defm GLDFF1W_D : sve_mem_64b_gld_sv_32_scaled<0b1011, "ldff1w", null_frag, null_frag, ZPR64ExtSXTW32, ZPR64ExtUXTW32, nxv2i32>;
|
||||
defm GLD1D : sve_mem_64b_gld_sv_32_scaled<0b1110, "ld1d", AArch64ld1_gather_sxtw_scaled, AArch64ld1_gather_uxtw_scaled, ZPR64ExtSXTW64, ZPR64ExtUXTW64, nxv2i64>;
|
||||
defm GLDFF1D : sve_mem_64b_gld_sv_32_scaled<0b1111, "ldff1d", null_frag, null_frag, ZPR64ExtSXTW64, ZPR64ExtUXTW64, nxv2i64>;
|
||||
|
||||
// Non-temporal contiguous loads (register + immediate)
|
||||
defm LDNT1B_ZRI : sve_mem_cldnt_si<0b00, "ldnt1b", Z_b, ZPR8>;
|
||||
|
|
|
@ -5293,8 +5293,11 @@ class sve_mem_32b_gld_sv<bits<4> opc, bit xs, bit scaled, string asm,
|
|||
}
|
||||
|
||||
multiclass sve_mem_32b_gld_sv_32_scaled<bits<4> opc, string asm,
|
||||
SDPatternOperator sxtw_op,
|
||||
SDPatternOperator uxtw_op,
|
||||
RegisterOperand sxtw_opnd,
|
||||
RegisterOperand uxtw_opnd> {
|
||||
RegisterOperand uxtw_opnd,
|
||||
ValueType vt> {
|
||||
def _UXTW_SCALED_REAL : sve_mem_32b_gld_sv<opc, 0, 1, asm, uxtw_opnd>;
|
||||
def _SXTW_SCALED_REAL : sve_mem_32b_gld_sv<opc, 1, 1, asm, sxtw_opnd>;
|
||||
|
||||
|
@ -5302,11 +5305,19 @@ multiclass sve_mem_32b_gld_sv_32_scaled<bits<4> opc, string asm,
|
|||
(!cast<Instruction>(NAME # _UXTW_SCALED_REAL) ZPR32:$Zt, PPR3bAny:$Pg, GPR64sp:$Rn, uxtw_opnd:$Zm), 0>;
|
||||
def : InstAlias<asm # "\t$Zt, $Pg/z, [$Rn, $Zm]",
|
||||
(!cast<Instruction>(NAME # _SXTW_SCALED_REAL) ZPR32:$Zt, PPR3bAny:$Pg, GPR64sp:$Rn, sxtw_opnd:$Zm), 0>;
|
||||
|
||||
def : Pat<(nxv4i32 (uxtw_op (nxv4i1 PPR:$gp), GPR64sp:$base, (nxv4i32 ZPR:$indices), vt)),
|
||||
(!cast<Instruction>(NAME # _UXTW_SCALED_REAL) PPR:$gp, GPR64sp:$base, ZPR:$indices)>;
|
||||
def : Pat<(nxv4i32 (sxtw_op (nxv4i1 PPR:$gp), GPR64sp:$base, (nxv4i32 ZPR:$indices), vt)),
|
||||
(!cast<Instruction>(NAME # _SXTW_SCALED_REAL) PPR:$gp, GPR64sp:$base, ZPR:$indices)>;
|
||||
}
|
||||
|
||||
multiclass sve_mem_32b_gld_vs_32_unscaled<bits<4> opc, string asm,
|
||||
SDPatternOperator sxtw_op,
|
||||
SDPatternOperator uxtw_op,
|
||||
RegisterOperand sxtw_opnd,
|
||||
RegisterOperand uxtw_opnd> {
|
||||
RegisterOperand uxtw_opnd,
|
||||
ValueType vt> {
|
||||
def _UXTW_REAL : sve_mem_32b_gld_sv<opc, 0, 0, asm, uxtw_opnd>;
|
||||
def _SXTW_REAL : sve_mem_32b_gld_sv<opc, 1, 0, asm, sxtw_opnd>;
|
||||
|
||||
|
@ -5314,6 +5325,11 @@ multiclass sve_mem_32b_gld_vs_32_unscaled<bits<4> opc, string asm,
|
|||
(!cast<Instruction>(NAME # _UXTW_REAL) ZPR32:$Zt, PPR3bAny:$Pg, GPR64sp:$Rn, uxtw_opnd:$Zm), 0>;
|
||||
def : InstAlias<asm # "\t$Zt, $Pg/z, [$Rn, $Zm]",
|
||||
(!cast<Instruction>(NAME # _SXTW_REAL) ZPR32:$Zt, PPR3bAny:$Pg, GPR64sp:$Rn, sxtw_opnd:$Zm), 0>;
|
||||
|
||||
def : Pat<(nxv4i32 (uxtw_op (nxv4i1 PPR:$gp), GPR64sp:$base, (nxv4i32 ZPR:$offsets), vt)),
|
||||
(!cast<Instruction>(NAME # _UXTW_REAL) PPR:$gp, GPR64sp:$base, ZPR:$offsets)>;
|
||||
def : Pat<(nxv4i32 (sxtw_op (nxv4i1 PPR:$gp), GPR64sp:$base, (nxv4i32 ZPR:$offsets), vt)),
|
||||
(!cast<Instruction>(NAME # _SXTW_REAL) PPR:$gp, GPR64sp:$base, ZPR:$offsets)>;
|
||||
}
|
||||
|
||||
|
||||
|
@ -5585,8 +5601,11 @@ class sve_mem_64b_gld_sv<bits<4> opc, bit xs, bit scaled, bit lsl, string asm,
|
|||
}
|
||||
|
||||
multiclass sve_mem_64b_gld_sv_32_scaled<bits<4> opc, string asm,
|
||||
SDPatternOperator sxtw_op,
|
||||
SDPatternOperator uxtw_op,
|
||||
RegisterOperand sxtw_opnd,
|
||||
RegisterOperand uxtw_opnd> {
|
||||
RegisterOperand uxtw_opnd,
|
||||
ValueType vt> {
|
||||
def _UXTW_SCALED_REAL : sve_mem_64b_gld_sv<opc, 0, 1, 0, asm, uxtw_opnd>;
|
||||
def _SXTW_SCALED_REAL : sve_mem_64b_gld_sv<opc, 1, 1, 0, asm, sxtw_opnd>;
|
||||
|
||||
|
@ -5594,11 +5613,19 @@ multiclass sve_mem_64b_gld_sv_32_scaled<bits<4> opc, string asm,
|
|||
(!cast<Instruction>(NAME # _UXTW_SCALED_REAL) ZPR64:$Zt, PPR3bAny:$Pg, GPR64sp:$Rn, uxtw_opnd:$Zm), 0>;
|
||||
def : InstAlias<asm # "\t$Zt, $Pg/z, [$Rn, $Zm]",
|
||||
(!cast<Instruction>(NAME # _SXTW_SCALED_REAL) ZPR64:$Zt, PPR3bAny:$Pg, GPR64sp:$Rn, sxtw_opnd:$Zm), 0>;
|
||||
|
||||
def : Pat<(nxv2i64 (uxtw_op (nxv2i1 PPR:$gp), GPR64sp:$base, (nxv2i64 ZPR:$indices), vt)),
|
||||
(!cast<Instruction>(NAME # _UXTW_SCALED_REAL) PPR:$gp, GPR64sp:$base, ZPR:$indices)>;
|
||||
def : Pat<(nxv2i64 (sxtw_op (nxv2i1 PPR:$gp), GPR64sp:$base, (nxv2i64 ZPR:$indices), vt)),
|
||||
(!cast<Instruction>(NAME # _SXTW_SCALED_REAL) PPR:$gp, GPR64sp:$base, ZPR:$indices)>;
|
||||
}
|
||||
|
||||
multiclass sve_mem_64b_gld_vs_32_unscaled<bits<4> opc, string asm,
|
||||
SDPatternOperator sxtw_op,
|
||||
SDPatternOperator uxtw_op,
|
||||
RegisterOperand sxtw_opnd,
|
||||
RegisterOperand uxtw_opnd> {
|
||||
RegisterOperand uxtw_opnd,
|
||||
ValueType vt> {
|
||||
def _UXTW_REAL : sve_mem_64b_gld_sv<opc, 0, 0, 0, asm, uxtw_opnd>;
|
||||
def _SXTW_REAL : sve_mem_64b_gld_sv<opc, 1, 0, 0, asm, sxtw_opnd>;
|
||||
|
||||
|
@ -5606,6 +5633,11 @@ multiclass sve_mem_64b_gld_vs_32_unscaled<bits<4> opc, string asm,
|
|||
(!cast<Instruction>(NAME # _UXTW_REAL) ZPR64:$Zt, PPR3bAny:$Pg, GPR64sp:$Rn, uxtw_opnd:$Zm), 0>;
|
||||
def : InstAlias<asm # "\t$Zt, $Pg/z, [$Rn, $Zm]",
|
||||
(!cast<Instruction>(NAME # _SXTW_REAL) ZPR64:$Zt, PPR3bAny:$Pg, GPR64sp:$Rn, sxtw_opnd:$Zm), 0>;
|
||||
|
||||
def : Pat<(nxv2i64 (uxtw_op (nxv2i1 PPR:$gp), GPR64sp:$base, (nxv2i64 ZPR:$offsets), vt)),
|
||||
(!cast<Instruction>(NAME # _UXTW_REAL) PPR:$gp, GPR64sp:$base, ZPR:$offsets)>;
|
||||
def : Pat<(nxv2i64 (sxtw_op (nxv2i1 PPR:$gp), GPR64sp:$base, (nxv2i64 ZPR:$offsets), vt)),
|
||||
(!cast<Instruction>(NAME # _SXTW_REAL) PPR:$gp, GPR64sp:$base, ZPR:$offsets)>;
|
||||
}
|
||||
|
||||
multiclass sve_mem_64b_gld_sv2_64_scaled<bits<4> opc, string asm,
|
||||
|
|
|
@ -0,0 +1,198 @@
|
|||
; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve < %s | FileCheck %s
|
||||
|
||||
;
|
||||
; LD1H, LD1W, LD1D: base + 32-bit scaled offset, sign (sxtw) or zero (uxtw)
|
||||
; extended to 64 bits
|
||||
; e.g. ld1h z0.d, p0/z, [x0, z0.d, uxtw #1]
|
||||
;
|
||||
|
||||
; LD1H
|
||||
define <vscale x 4 x i32> @gld1h_s_uxtw_index(<vscale x 4 x i1> %pg, i16* %base, <vscale x 4 x i32> %b) {
|
||||
; CHECK-LABEL: gld1h_s_uxtw_index:
|
||||
; CHECK: ld1h { z0.s }, p0/z, [x0, z0.s, uxtw #1]
|
||||
; CHECK-NEXT: mov w8, #65535
|
||||
; CHECK-NEXT: mov z1.s, w8
|
||||
; CHECK-NEXT: and z0.d, z0.d, z1.d
|
||||
; CHECK-NEXT: ret
|
||||
%load = call <vscale x 4 x i16> @llvm.aarch64.sve.ld1.gather.uxtw.index.nxv4i16.nxv4i32(<vscale x 4 x i1> %pg,
|
||||
i16* %base,
|
||||
<vscale x 4 x i32> %b)
|
||||
%res = zext <vscale x 4 x i16> %load to <vscale x 4 x i32>
|
||||
ret <vscale x 4 x i32> %res
|
||||
}
|
||||
|
||||
define <vscale x 4 x i32> @gld1h_s_sxtw_index(<vscale x 4 x i1> %pg, i16* %base, <vscale x 4 x i32> %b) {
|
||||
; CHECK-LABEL: gld1h_s_sxtw_index:
|
||||
; CHECK: ld1h { z0.s }, p0/z, [x0, z0.s, sxtw #1]
|
||||
; CHECK-NEXT: mov w8, #65535
|
||||
; CHECK-NEXT: mov z1.s, w8
|
||||
; CHECK-NEXT: and z0.d, z0.d, z1.d
|
||||
; CHECK-NEXT: ret
|
||||
%load = call <vscale x 4 x i16> @llvm.aarch64.sve.ld1.gather.sxtw.index.nxv4i16.nxv4i32(<vscale x 4 x i1> %pg,
|
||||
i16* %base,
|
||||
<vscale x 4 x i32> %b)
|
||||
%res = zext <vscale x 4 x i16> %load to <vscale x 4 x i32>
|
||||
ret <vscale x 4 x i32> %res
|
||||
}
|
||||
|
||||
define <vscale x 2 x i64> @gld1h_d_uxtw_index(<vscale x 2 x i1> %pg, i16* %base, <vscale x 2 x i64> %b) {
|
||||
; CHECK-LABEL: gld1h_d_uxtw_index:
|
||||
; CHECK: ld1h { z0.d }, p0/z, [x0, z0.d, uxtw #1]
|
||||
; CHECK-NEXT: mov w8, #65535
|
||||
; CHECK-NEXT: mov z1.d, x8
|
||||
; CHECK-NEXT: and z0.d, z0.d, z1.d
|
||||
; CHECK-NEXT: ret
|
||||
%load = call <vscale x 2 x i16> @llvm.aarch64.sve.ld1.gather.uxtw.index.nxv2i16.nxv2i64(<vscale x 2 x i1> %pg,
|
||||
i16* %base,
|
||||
<vscale x 2 x i64> %b)
|
||||
%res = zext <vscale x 2 x i16> %load to <vscale x 2 x i64>
|
||||
ret <vscale x 2 x i64> %res
|
||||
}
|
||||
|
||||
define <vscale x 2 x i64> @gld1h_d_sxtw_index(<vscale x 2 x i1> %pg, i16* %base, <vscale x 2 x i64> %b) {
|
||||
; CHECK-LABEL: gld1h_d_sxtw_index:
|
||||
; CHECK: ld1h { z0.d }, p0/z, [x0, z0.d, sxtw #1]
|
||||
; CHECK-NEXT: mov w8, #65535
|
||||
; CHECK-NEXT: mov z1.d, x8
|
||||
; CHECK-NEXT: and z0.d, z0.d, z1.d
|
||||
; CHECK-NEXT: ret
|
||||
%load = call <vscale x 2 x i16> @llvm.aarch64.sve.ld1.gather.sxtw.index.nxv2i16.nxv2i64(<vscale x 2 x i1> %pg,
|
||||
i16* %base,
|
||||
<vscale x 2 x i64> %b)
|
||||
%res = zext <vscale x 2 x i16> %load to <vscale x 2 x i64>
|
||||
ret <vscale x 2 x i64> %res
|
||||
}
|
||||
|
||||
; LD1W
|
||||
define <vscale x 4 x i32> @gld1w_s_uxtw_index(<vscale x 4 x i1> %pg, i32* %base, <vscale x 4 x i32> %b) {
|
||||
; CHECK-LABEL: gld1w_s_uxtw_index:
|
||||
; CHECK: ld1w { z0.s }, p0/z, [x0, z0.s, uxtw #2]
|
||||
; CHECK-NEXT: ret
|
||||
%load = call <vscale x 4 x i32> @llvm.aarch64.sve.ld1.gather.uxtw.index.nxv4i32.nxv4i32(<vscale x 4 x i1> %pg,
|
||||
i32* %base,
|
||||
<vscale x 4 x i32> %b)
|
||||
ret <vscale x 4 x i32> %load
|
||||
}
|
||||
|
||||
define <vscale x 4 x i32> @gld1w_s_sxtw_index(<vscale x 4 x i1> %pg, i32* %base, <vscale x 4 x i32> %b) {
|
||||
; CHECK-LABEL: gld1w_s_sxtw_index:
|
||||
; CHECK: ld1w { z0.s }, p0/z, [x0, z0.s, sxtw #2]
|
||||
; CHECK-NEXT: ret
|
||||
%load = call <vscale x 4 x i32> @llvm.aarch64.sve.ld1.gather.sxtw.index.nxv4i32.nxv4i32(<vscale x 4 x i1> %pg,
|
||||
i32* %base,
|
||||
<vscale x 4 x i32> %b)
|
||||
ret <vscale x 4 x i32> %load
|
||||
}
|
||||
|
||||
define <vscale x 2 x i64> @gld1w_d_uxtw_index(<vscale x 2 x i1> %pg, i32* %base, <vscale x 2 x i64> %b) {
|
||||
; CHECK-LABEL: gld1w_d_uxtw_index:
|
||||
; CHECK: ld1w { z0.d }, p0/z, [x0, z0.d, uxtw #2]
|
||||
; CHECK-NEXT: mov w8, #-1
|
||||
; CHECK-NEXT: mov z1.d, x8
|
||||
; CHECK-NEXT: and z0.d, z0.d, z1.d
|
||||
; CHECK-NEXT: ret
|
||||
%load = call <vscale x 2 x i32> @llvm.aarch64.sve.ld1.gather.uxtw.index.nxv2i32.nxv2i64(<vscale x 2 x i1> %pg,
|
||||
i32* %base,
|
||||
<vscale x 2 x i64> %b)
|
||||
%res = zext <vscale x 2 x i32> %load to <vscale x 2 x i64>
|
||||
ret <vscale x 2 x i64> %res
|
||||
}
|
||||
|
||||
define <vscale x 2 x i64> @gld1w_d_sxtw_index(<vscale x 2 x i1> %pg, i32* %base, <vscale x 2 x i64> %b) {
|
||||
; CHECK-LABEL: gld1w_d_sxtw_index:
|
||||
; CHECK: ld1w { z0.d }, p0/z, [x0, z0.d, sxtw #2]
|
||||
; CHECK-NEXT: mov w8, #-1
|
||||
; CHECK-NEXT: mov z1.d, x8
|
||||
; CHECK-NEXT: and z0.d, z0.d, z1.d
|
||||
; CHECK-NEXT: ret
|
||||
%load = call <vscale x 2 x i32> @llvm.aarch64.sve.ld1.gather.sxtw.index.nxv2i32.nxv2i64(<vscale x 2 x i1> %pg,
|
||||
i32* %base,
|
||||
<vscale x 2 x i64> %b)
|
||||
%res = zext <vscale x 2 x i32> %load to <vscale x 2 x i64>
|
||||
ret <vscale x 2 x i64> %res
|
||||
}
|
||||
|
||||
define <vscale x 4 x float> @gld1w_s_uxtw_index_float(<vscale x 4 x i1> %pg, float* %base, <vscale x 4 x i32> %b) {
|
||||
; CHECK-LABEL: gld1w_s_uxtw_index_float:
|
||||
; CHECK: ld1w { z0.s }, p0/z, [x0, z0.s, uxtw #2]
|
||||
; CHECK-NEXT: ret
|
||||
%load = call <vscale x 4 x float> @llvm.aarch64.sve.ld1.gather.uxtw.index.nxv4f32.nxv4i32(<vscale x 4 x i1> %pg,
|
||||
float* %base,
|
||||
<vscale x 4 x i32> %b)
|
||||
ret <vscale x 4 x float> %load
|
||||
}
|
||||
|
||||
define <vscale x 4 x float> @gld1w_s_sxtw_index_float(<vscale x 4 x i1> %pg, float* %base, <vscale x 4 x i32> %b) {
|
||||
; CHECK-LABEL: gld1w_s_sxtw_index_float:
|
||||
; CHECK: ld1w { z0.s }, p0/z, [x0, z0.s, sxtw #2]
|
||||
; CHECK-NEXT: ret
|
||||
%load = call <vscale x 4 x float> @llvm.aarch64.sve.ld1.gather.sxtw.index.nxv4f32.nxv4i32(<vscale x 4 x i1> %pg,
|
||||
float* %base,
|
||||
<vscale x 4 x i32> %b)
|
||||
ret <vscale x 4 x float> %load
|
||||
}
|
||||
|
||||
; LD1D
|
||||
define <vscale x 2 x i64> @gld1d_s_uxtw_index(<vscale x 2 x i1> %pg, i64* %base, <vscale x 2 x i64> %b) {
|
||||
; CHECK-LABEL: gld1d_s_uxtw_index:
|
||||
; CHECK: ld1d { z0.d }, p0/z, [x0, z0.d, uxtw #3]
|
||||
; CHECK-NEXT: ret
|
||||
%load = call <vscale x 2 x i64> @llvm.aarch64.sve.ld1.gather.uxtw.index.nxv2i64.nxv2i64(<vscale x 2 x i1> %pg,
|
||||
i64* %base,
|
||||
<vscale x 2 x i64> %b)
|
||||
ret <vscale x 2 x i64> %load
|
||||
}
|
||||
|
||||
define <vscale x 2 x i64> @gld1d_sxtw_index(<vscale x 2 x i1> %pg, i64* %base, <vscale x 2 x i64> %b) {
|
||||
; CHECK-LABEL: gld1d_sxtw_index:
|
||||
; CHECK: ld1d { z0.d }, p0/z, [x0, z0.d, sxtw #3]
|
||||
; CHECK-NEXT: ret
|
||||
%load = call <vscale x 2 x i64> @llvm.aarch64.sve.ld1.gather.sxtw.index.nxv2i64.nxv2i64(<vscale x 2 x i1> %pg,
|
||||
i64* %base,
|
||||
<vscale x 2 x i64> %b)
|
||||
ret <vscale x 2 x i64> %load
|
||||
}
|
||||
|
||||
define <vscale x 2 x double> @gld1d_uxtw_index_double(<vscale x 2 x i1> %pg, double* %base, <vscale x 2 x i64> %b) {
|
||||
; CHECK-LABEL: gld1d_uxtw_index_double:
|
||||
; CHECK: ld1d { z0.d }, p0/z, [x0, z0.d, uxtw #3]
|
||||
; CHECK-NEXT: ret
|
||||
%load = call <vscale x 2 x double> @llvm.aarch64.sve.ld1.gather.uxtw.index.nxv2f64.nxv2i64(<vscale x 2 x i1> %pg,
|
||||
double* %base,
|
||||
<vscale x 2 x i64> %b)
|
||||
ret <vscale x 2 x double> %load
|
||||
}
|
||||
|
||||
define <vscale x 2 x double> @gld1d_sxtw_index_double(<vscale x 2 x i1> %pg, double* %base, <vscale x 2 x i64> %b) {
|
||||
; CHECK-LABEL: gld1d_sxtw_index_double:
|
||||
; CHECK: ld1d { z0.d }, p0/z, [x0, z0.d, sxtw #3]
|
||||
; CHECK-NEXT: ret
|
||||
%load = call <vscale x 2 x double> @llvm.aarch64.sve.ld1.gather.sxtw.index.nxv2f64.nxv2i64(<vscale x 2 x i1> %pg,
|
||||
double* %base,
|
||||
<vscale x 2 x i64> %b)
|
||||
ret <vscale x 2 x double> %load
|
||||
}
|
||||
|
||||
; LD1H
|
||||
declare <vscale x 4 x i16> @llvm.aarch64.sve.ld1.gather.uxtw.index.nxv4i16.nxv4i32(<vscale x 4 x i1>, i16*, <vscale x 4 x i32>)
|
||||
declare <vscale x 4 x i16> @llvm.aarch64.sve.ld1.gather.sxtw.index.nxv4i16.nxv4i32(<vscale x 4 x i1>, i16*, <vscale x 4 x i32>)
|
||||
|
||||
declare <vscale x 2 x i16> @llvm.aarch64.sve.ld1.gather.uxtw.index.nxv2i16.nxv2i64(<vscale x 2 x i1>, i16*, <vscale x 2 x i64>)
|
||||
declare <vscale x 2 x i16> @llvm.aarch64.sve.ld1.gather.sxtw.index.nxv2i16.nxv2i64(<vscale x 2 x i1>, i16*, <vscale x 2 x i64>)
|
||||
|
||||
; LD1W
|
||||
declare <vscale x 4 x i32> @llvm.aarch64.sve.ld1.gather.uxtw.index.nxv4i32.nxv4i32(<vscale x 4 x i1>, i32*, <vscale x 4 x i32>)
|
||||
declare <vscale x 4 x i32> @llvm.aarch64.sve.ld1.gather.sxtw.index.nxv4i32.nxv4i32(<vscale x 4 x i1>, i32*, <vscale x 4 x i32>)
|
||||
|
||||
declare <vscale x 2 x i32> @llvm.aarch64.sve.ld1.gather.uxtw.index.nxv2i32.nxv2i64(<vscale x 2 x i1>, i32*, <vscale x 2 x i64>)
|
||||
declare <vscale x 2 x i32> @llvm.aarch64.sve.ld1.gather.sxtw.index.nxv2i32.nxv2i64(<vscale x 2 x i1>, i32*, <vscale x 2 x i64>)
|
||||
|
||||
declare <vscale x 4 x float> @llvm.aarch64.sve.ld1.gather.uxtw.index.nxv4f32.nxv4i32(<vscale x 4 x i1>, float*, <vscale x 4 x i32>)
|
||||
declare <vscale x 4 x float> @llvm.aarch64.sve.ld1.gather.sxtw.index.nxv4f32.nxv4i32(<vscale x 4 x i1>, float*, <vscale x 4 x i32>)
|
||||
|
||||
; LD1D
|
||||
declare <vscale x 2 x i64> @llvm.aarch64.sve.ld1.gather.uxtw.index.nxv2i64.nxv2i64(<vscale x 2 x i1>, i64*, <vscale x 2 x i64>)
|
||||
declare <vscale x 2 x i64> @llvm.aarch64.sve.ld1.gather.sxtw.index.nxv2i64.nxv2i64(<vscale x 2 x i1>, i64*, <vscale x 2 x i64>)
|
||||
|
||||
declare <vscale x 2 x double> @llvm.aarch64.sve.ld1.gather.uxtw.index.nxv2f64.nxv2i64(<vscale x 2 x i1>, double*, <vscale x 2 x i64>)
|
||||
declare <vscale x 2 x double> @llvm.aarch64.sve.ld1.gather.sxtw.index.nxv2f64.nxv2i64(<vscale x 2 x i1>, double*, <vscale x 2 x i64>)
|
|
@ -0,0 +1,259 @@
|
|||
; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve < %s | FileCheck %s
|
||||
|
||||
;
|
||||
; LD1B, LD1W, LD1H, LD1D: base + 32-bit unscaled offset, sign (sxtw) or zero
|
||||
; (uxtw) extended to 64 bits.
|
||||
; e.g. ld1h { z0.d }, p0/z, [x0, z0.d, uxtw]
|
||||
;
|
||||
|
||||
; LD1B
|
||||
define <vscale x 4 x i32> @gld1b_s_uxtw(<vscale x 4 x i1> %pg, i8* %base, <vscale x 4 x i32> %b) {
|
||||
; CHECK-LABEL: gld1b_s_uxtw:
|
||||
; CHECK: ld1b { z0.s }, p0/z, [x0, z0.s, uxtw]
|
||||
; CHECK-NEXT: mov w8, #255
|
||||
; CHECK-NEXT: mov z1.s, w8
|
||||
; CHECK-NEXT: and z0.d, z0.d, z1.d
|
||||
; CHECK-NEXT: ret
|
||||
%load = call <vscale x 4 x i8> @llvm.aarch64.sve.ld1.gather.uxtw.nxv4i8.nxv4i32(<vscale x 4 x i1> %pg,
|
||||
i8* %base,
|
||||
<vscale x 4 x i32> %b)
|
||||
%res = zext <vscale x 4 x i8> %load to <vscale x 4 x i32>
|
||||
ret <vscale x 4 x i32> %res
|
||||
}
|
||||
|
||||
define <vscale x 4 x i32> @gld1b_s_sxtw(<vscale x 4 x i1> %pg, i8* %base, <vscale x 4 x i32> %b) {
|
||||
; CHECK-LABEL: gld1b_s_sxtw:
|
||||
; CHECK: ld1b { z0.s }, p0/z, [x0, z0.s, sxtw]
|
||||
; CHECK-NEXT: mov w8, #255
|
||||
; CHECK-NEXT: mov z1.s, w8
|
||||
; CHECK-NEXT: and z0.d, z0.d, z1.d
|
||||
; CHECK-NEXT: ret
|
||||
%load = call <vscale x 4 x i8> @llvm.aarch64.sve.ld1.gather.sxtw.nxv4i8.nxv4i32(<vscale x 4 x i1> %pg,
|
||||
i8* %base,
|
||||
<vscale x 4 x i32> %b)
|
||||
%res = zext <vscale x 4 x i8> %load to <vscale x 4 x i32>
|
||||
ret <vscale x 4 x i32> %res
|
||||
}
|
||||
|
||||
define <vscale x 2 x i64> @gld1b_d_uxtw(<vscale x 2 x i1> %pg, i8* %base, <vscale x 2 x i64> %b) {
|
||||
; CHECK-LABEL: gld1b_d_uxtw:
|
||||
; CHECK: ld1b { z0.d }, p0/z, [x0, z0.d, uxtw]
|
||||
; CHECK-NEXT: mov w8, #255
|
||||
; CHECK-NEXT: mov z1.d, x8
|
||||
; CHECK-NEXT: and z0.d, z0.d, z1.d
|
||||
; CHECK-NEXT: ret
|
||||
%load = call <vscale x 2 x i8> @llvm.aarch64.sve.ld1.gather.uxtw.nxv2i8.nxv2i64(<vscale x 2 x i1> %pg,
|
||||
i8* %base,
|
||||
<vscale x 2 x i64> %b)
|
||||
%res = zext <vscale x 2 x i8> %load to <vscale x 2 x i64>
|
||||
ret <vscale x 2 x i64> %res
|
||||
}
|
||||
|
||||
define <vscale x 2 x i64> @gld1b_d_sxtw(<vscale x 2 x i1> %pg, i8* %base, <vscale x 2 x i64> %b) {
|
||||
; CHECK-LABEL: gld1b_d_sxtw:
|
||||
; CHECK: ld1b { z0.d }, p0/z, [x0, z0.d, sxtw]
|
||||
; CHECK-NEXT: mov w8, #255
|
||||
; CHECK-NEXT: mov z1.d, x8
|
||||
; CHECK-NEXT: and z0.d, z0.d, z1.d
|
||||
; CHECK-NEXT: ret
|
||||
%load = call <vscale x 2 x i8> @llvm.aarch64.sve.ld1.gather.sxtw.nxv2i8.nxv2i64(<vscale x 2 x i1> %pg,
|
||||
i8* %base,
|
||||
<vscale x 2 x i64> %b)
|
||||
%res = zext <vscale x 2 x i8> %load to <vscale x 2 x i64>
|
||||
ret <vscale x 2 x i64> %res
|
||||
}
|
||||
|
||||
; LD1H
|
||||
define <vscale x 4 x i32> @gld1h_s_uxtw(<vscale x 4 x i1> %pg, i16* %base, <vscale x 4 x i32> %b) {
|
||||
; CHECK-LABEL: gld1h_s_uxtw:
|
||||
; CHECK: ld1h { z0.s }, p0/z, [x0, z0.s, uxtw]
|
||||
; CHECK-NEXT: mov w8, #65535
|
||||
; CHECK-NEXT: mov z1.s, w8
|
||||
; CHECK-NEXT: and z0.d, z0.d, z1.d
|
||||
; CHECK-NEXT: ret
|
||||
%load = call <vscale x 4 x i16> @llvm.aarch64.sve.ld1.gather.uxtw.nxv4i16.nxv4i32(<vscale x 4 x i1> %pg,
|
||||
i16* %base,
|
||||
<vscale x 4 x i32> %b)
|
||||
%res = zext <vscale x 4 x i16> %load to <vscale x 4 x i32>
|
||||
ret <vscale x 4 x i32> %res
|
||||
}
|
||||
|
||||
define <vscale x 4 x i32> @gld1h_s_sxtw(<vscale x 4 x i1> %pg, i16* %base, <vscale x 4 x i32> %b) {
|
||||
; CHECK-LABEL: gld1h_s_sxtw:
|
||||
; CHECK: ld1h { z0.s }, p0/z, [x0, z0.s, sxtw]
|
||||
; CHECK-NEXT: mov w8, #65535
|
||||
; CHECK-NEXT: mov z1.s, w8
|
||||
; CHECK-NEXT: and z0.d, z0.d, z1.d
|
||||
; CHECK-NEXT: ret
|
||||
%load = call <vscale x 4 x i16> @llvm.aarch64.sve.ld1.gather.sxtw.nxv4i16.nxv4i32(<vscale x 4 x i1> %pg,
|
||||
i16* %base,
|
||||
<vscale x 4 x i32> %b)
|
||||
%res = zext <vscale x 4 x i16> %load to <vscale x 4 x i32>
|
||||
ret <vscale x 4 x i32> %res
|
||||
}
|
||||
|
||||
define <vscale x 2 x i64> @gld1h_d_uxtw(<vscale x 2 x i1> %pg, i16* %base, <vscale x 2 x i64> %b) {
|
||||
; CHECK-LABEL: gld1h_d_uxtw:
|
||||
; CHECK: ld1h { z0.d }, p0/z, [x0, z0.d, uxtw]
|
||||
; CHECK-NEXT: mov w8, #65535
|
||||
; CHECK-NEXT: mov z1.d, x8
|
||||
; CHECK-NEXT: and z0.d, z0.d, z1.d
|
||||
; CHECK-NEXT: ret
|
||||
%load = call <vscale x 2 x i16> @llvm.aarch64.sve.ld1.gather.uxtw.nxv2i16.nxv2i64(<vscale x 2 x i1> %pg,
|
||||
i16* %base,
|
||||
<vscale x 2 x i64> %b)
|
||||
%res = zext <vscale x 2 x i16> %load to <vscale x 2 x i64>
|
||||
ret <vscale x 2 x i64> %res
|
||||
}
|
||||
|
||||
define <vscale x 2 x i64> @gld1h_d_sxtw(<vscale x 2 x i1> %pg, i16* %base, <vscale x 2 x i64> %b) {
|
||||
; CHECK-LABEL: gld1h_d_sxtw:
|
||||
; CHECK: ld1h { z0.d }, p0/z, [x0, z0.d, sxtw]
|
||||
; CHECK-NEXT: mov w8, #65535
|
||||
; CHECK-NEXT: mov z1.d, x8
|
||||
; CHECK-NEXT: and z0.d, z0.d, z1.d
|
||||
; CHECK-NEXT: ret
|
||||
%load = call <vscale x 2 x i16> @llvm.aarch64.sve.ld1.gather.sxtw.nxv2i16.nxv2i64(<vscale x 2 x i1> %pg,
|
||||
i16* %base,
|
||||
<vscale x 2 x i64> %b)
|
||||
%res = zext <vscale x 2 x i16> %load to <vscale x 2 x i64>
|
||||
ret <vscale x 2 x i64> %res
|
||||
}
|
||||
|
||||
; LD1W
|
||||
define <vscale x 4 x i32> @gld1w_s_uxtw(<vscale x 4 x i1> %pg, i32* %base, <vscale x 4 x i32> %b) {
|
||||
; CHECK-LABEL: gld1w_s_uxtw:
|
||||
; CHECK: ld1w { z0.s }, p0/z, [x0, z0.s, uxtw]
|
||||
; CHECK-NEXT: ret
|
||||
%load = call <vscale x 4 x i32> @llvm.aarch64.sve.ld1.gather.uxtw.nxv4i32.nxv4i32(<vscale x 4 x i1> %pg,
|
||||
i32* %base,
|
||||
<vscale x 4 x i32> %b)
|
||||
ret <vscale x 4 x i32> %load
|
||||
}
|
||||
|
||||
define <vscale x 4 x i32> @gld1w_s_sxtw(<vscale x 4 x i1> %pg, i32* %base, <vscale x 4 x i32> %b) {
|
||||
; CHECK-LABEL: gld1w_s_sxtw:
|
||||
; CHECK: ld1w { z0.s }, p0/z, [x0, z0.s, sxtw]
|
||||
; CHECK-NEXT: ret
|
||||
%load = call <vscale x 4 x i32> @llvm.aarch64.sve.ld1.gather.sxtw.nxv4i32.nxv4i32(<vscale x 4 x i1> %pg,
|
||||
i32* %base,
|
||||
<vscale x 4 x i32> %b)
|
||||
ret <vscale x 4 x i32> %load
|
||||
}
|
||||
|
||||
define <vscale x 2 x i64> @gld1w_d_uxtw(<vscale x 2 x i1> %pg, i32* %base, <vscale x 2 x i64> %b) {
|
||||
; CHECK-LABEL: gld1w_d_uxtw:
|
||||
; CHECK: ld1w { z0.d }, p0/z, [x0, z0.d, uxtw]
|
||||
; CHECK-NEXT: mov w8, #-1
|
||||
; CHECK-NEXT: mov z1.d, x8
|
||||
; CHECK-NEXT: and z0.d, z0.d, z1.d
|
||||
; CHECK-NEXT: ret
|
||||
%load = call <vscale x 2 x i32> @llvm.aarch64.sve.ld1.gather.uxtw.nxv2i32.nxv2i64(<vscale x 2 x i1> %pg,
|
||||
i32* %base,
|
||||
<vscale x 2 x i64> %b)
|
||||
%res = zext <vscale x 2 x i32> %load to <vscale x 2 x i64>
|
||||
ret <vscale x 2 x i64> %res
|
||||
}
|
||||
|
||||
define <vscale x 2 x i64> @gld1w_d_sxtw(<vscale x 2 x i1> %pg, i32* %base, <vscale x 2 x i64> %b) {
|
||||
; CHECK-LABEL: gld1w_d_sxtw:
|
||||
; CHECK: ld1w { z0.d }, p0/z, [x0, z0.d, sxtw]
|
||||
; CHECK-NEXT: mov w8, #-1
|
||||
; CHECK-NEXT: mov z1.d, x8
|
||||
; CHECK-NEXT: and z0.d, z0.d, z1.d
|
||||
; CHECK-NEXT: ret
|
||||
%load = call <vscale x 2 x i32> @llvm.aarch64.sve.ld1.gather.sxtw.nxv2i32.nxv2i64(<vscale x 2 x i1> %pg,
|
||||
i32* %base,
|
||||
<vscale x 2 x i64> %b)
|
||||
%res = zext <vscale x 2 x i32> %load to <vscale x 2 x i64>
|
||||
ret <vscale x 2 x i64> %res
|
||||
}
|
||||
|
||||
define <vscale x 4 x float> @gld1w_s_uxtw_float(<vscale x 4 x i1> %pg, float* %base, <vscale x 4 x i32> %b) {
|
||||
; CHECK-LABEL: gld1w_s_uxtw_float:
|
||||
; CHECK: ld1w { z0.s }, p0/z, [x0, z0.s, uxtw]
|
||||
; CHECK-NEXT: ret
|
||||
%load = call <vscale x 4 x float> @llvm.aarch64.sve.ld1.gather.uxtw.nxv4f32.nxv4i32(<vscale x 4 x i1> %pg,
|
||||
float* %base,
|
||||
<vscale x 4 x i32> %b)
|
||||
ret <vscale x 4 x float> %load
|
||||
}
|
||||
|
||||
define <vscale x 4 x float> @gld1w_s_sxtw_float(<vscale x 4 x i1> %pg, float* %base, <vscale x 4 x i32> %b) {
|
||||
; CHECK-LABEL: gld1w_s_sxtw_float:
|
||||
; CHECK: ld1w { z0.s }, p0/z, [x0, z0.s, sxtw]
|
||||
; CHECK-NEXT: ret
|
||||
%load = call <vscale x 4 x float> @llvm.aarch64.sve.ld1.gather.sxtw.nxv4f32.nxv4i32(<vscale x 4 x i1> %pg,
|
||||
float* %base,
|
||||
<vscale x 4 x i32> %b)
|
||||
ret <vscale x 4 x float> %load
|
||||
}
|
||||
|
||||
; LD1D
|
||||
define <vscale x 2 x i64> @gld1d_d_uxtw(<vscale x 2 x i1> %pg, i64* %base, <vscale x 2 x i64> %b) {
|
||||
; CHECK-LABEL: gld1d_d_uxtw:
|
||||
; CHECK: ld1d { z0.d }, p0/z, [x0, z0.d, uxtw]
|
||||
; CHECK-NEXT: ret
|
||||
%load = call <vscale x 2 x i64> @llvm.aarch64.sve.ld1.gather.uxtw.nxv2i64.nxv2i64(<vscale x 2 x i1> %pg,
|
||||
i64* %base,
|
||||
<vscale x 2 x i64> %b)
|
||||
ret <vscale x 2 x i64> %load
|
||||
}
|
||||
|
||||
define <vscale x 2 x i64> @gld1d_d_sxtw(<vscale x 2 x i1> %pg, i64* %base, <vscale x 2 x i64> %b) {
|
||||
; CHECK-LABEL: gld1d_d_sxtw:
|
||||
; CHECK: ld1d { z0.d }, p0/z, [x0, z0.d, sxtw]
|
||||
; CHECK-NEXT: ret
|
||||
%load = call <vscale x 2 x i64> @llvm.aarch64.sve.ld1.gather.sxtw.nxv2i64.nxv2i64(<vscale x 2 x i1> %pg,
|
||||
i64* %base,
|
||||
<vscale x 2 x i64> %b)
|
||||
ret <vscale x 2 x i64> %load
|
||||
}
|
||||
|
||||
define <vscale x 2 x double> @gld1d_d_uxtw_double(<vscale x 2 x i1> %pg, double* %base, <vscale x 2 x i64> %b) {
|
||||
; CHECK-LABEL: gld1d_d_uxtw_double:
|
||||
; CHECK: ld1d { z0.d }, p0/z, [x0, z0.d, uxtw]
|
||||
; CHECK-NEXT: ret
|
||||
%load = call <vscale x 2 x double> @llvm.aarch64.sve.ld1.gather.uxtw.nxv2f64.nxv2i64(<vscale x 2 x i1> %pg,
|
||||
double* %base,
|
||||
<vscale x 2 x i64> %b)
|
||||
ret <vscale x 2 x double> %load
|
||||
}
|
||||
|
||||
define <vscale x 2 x double> @gld1d_d_sxtw_double(<vscale x 2 x i1> %pg, double* %base, <vscale x 2 x i64> %b) {
|
||||
; CHECK-LABEL: gld1d_d_sxtw_double:
|
||||
; CHECK: ld1d { z0.d }, p0/z, [x0, z0.d, sxtw]
|
||||
; CHECK-NEXT: ret
|
||||
%load = call <vscale x 2 x double> @llvm.aarch64.sve.ld1.gather.sxtw.nxv2f64.nxv2i64(<vscale x 2 x i1> %pg,
|
||||
double* %base,
|
||||
<vscale x 2 x i64> %b)
|
||||
ret <vscale x 2 x double> %load
|
||||
}
|
||||
|
||||
; LD1B
|
||||
declare <vscale x 4 x i8> @llvm.aarch64.sve.ld1.gather.uxtw.nxv4i8.nxv4i32(<vscale x 4 x i1>, i8*, <vscale x 4 x i32>)
|
||||
declare <vscale x 2 x i8> @llvm.aarch64.sve.ld1.gather.uxtw.nxv2i8.nxv2i64(<vscale x 2 x i1>, i8*, <vscale x 2 x i64>)
|
||||
declare <vscale x 4 x i8> @llvm.aarch64.sve.ld1.gather.sxtw.nxv4i8.nxv4i32(<vscale x 4 x i1>, i8*, <vscale x 4 x i32>)
|
||||
declare <vscale x 2 x i8> @llvm.aarch64.sve.ld1.gather.sxtw.nxv2i8.nxv2i64(<vscale x 2 x i1>, i8*, <vscale x 2 x i64>)
|
||||
|
||||
; LD1H
|
||||
declare <vscale x 4 x i16> @llvm.aarch64.sve.ld1.gather.sxtw.nxv4i16.nxv4i32(<vscale x 4 x i1>, i16*, <vscale x 4 x i32>)
|
||||
declare <vscale x 2 x i16> @llvm.aarch64.sve.ld1.gather.sxtw.nxv2i16.nxv2i64(<vscale x 2 x i1>, i16*, <vscale x 2 x i64>)
|
||||
declare <vscale x 4 x i16> @llvm.aarch64.sve.ld1.gather.uxtw.nxv4i16.nxv4i32(<vscale x 4 x i1>, i16*, <vscale x 4 x i32>)
|
||||
declare <vscale x 2 x i16> @llvm.aarch64.sve.ld1.gather.uxtw.nxv2i16.nxv2i64(<vscale x 2 x i1>, i16*, <vscale x 2 x i64>)
|
||||
|
||||
; LD1W
|
||||
declare <vscale x 4 x i32> @llvm.aarch64.sve.ld1.gather.sxtw.nxv4i32.nxv4i32(<vscale x 4 x i1>, i32*, <vscale x 4 x i32>)
|
||||
declare <vscale x 2 x i32> @llvm.aarch64.sve.ld1.gather.sxtw.nxv2i32.nxv2i64(<vscale x 2 x i1>, i32*, <vscale x 2 x i64>)
|
||||
declare <vscale x 4 x i32> @llvm.aarch64.sve.ld1.gather.uxtw.nxv4i32.nxv4i32(<vscale x 4 x i1>, i32*, <vscale x 4 x i32>)
|
||||
declare <vscale x 2 x i32> @llvm.aarch64.sve.ld1.gather.uxtw.nxv2i32.nxv2i64(<vscale x 2 x i1>, i32*, <vscale x 2 x i64>)
|
||||
|
||||
declare <vscale x 4 x float> @llvm.aarch64.sve.ld1.gather.sxtw.nxv4f32.nxv4i32(<vscale x 4 x i1>, float*, <vscale x 4 x i32>)
|
||||
declare <vscale x 4 x float> @llvm.aarch64.sve.ld1.gather.uxtw.nxv4f32.nxv4i32(<vscale x 4 x i1>, float*, <vscale x 4 x i32>)
|
||||
|
||||
; LD1D
|
||||
declare <vscale x 2 x i64> @llvm.aarch64.sve.ld1.gather.sxtw.nxv2i64.nxv2i64(<vscale x 2 x i1>, i64*, <vscale x 2 x i64>)
|
||||
declare <vscale x 2 x i64> @llvm.aarch64.sve.ld1.gather.uxtw.nxv2i64.nxv2i64(<vscale x 2 x i1>, i64*, <vscale x 2 x i64>)
|
||||
|
||||
declare <vscale x 2 x double> @llvm.aarch64.sve.ld1.gather.sxtw.nxv2f64.nxv2i64(<vscale x 2 x i1>, double*, <vscale x 2 x i64>)
|
||||
declare <vscale x 2 x double> @llvm.aarch64.sve.ld1.gather.uxtw.nxv2f64.nxv2i64(<vscale x 2 x i1>, double*, <vscale x 2 x i64>)
|
Loading…
Reference in New Issue