forked from OSchip/llvm-project
[AArch64][SVE] Add DAG combine rules for gather loads and sext/zext
Summary: These changes allow us to support sign-extending gather loads with the exisiting intrinsics (i.e. @llvm.aarch64.sve.ld1.gather.*). Reviewers: sdesmalen, huntergr, kmclaughlin, efriedma, rengolin, rovka, dancgr, mgudim Reviewed By: sdesmalen Subscribers: tschuett, kristof.beyls, hiraditya, rkruppe, psnobl, llvm-commits Tags: #llvm Differential revision: https://reviews.llvm.org/D70812
This commit is contained in:
parent
cb30ad728f
commit
65651f197a
|
@ -614,6 +614,7 @@ AArch64TargetLowering::AArch64TargetLowering(const TargetMachine &TM,
|
||||||
setTargetDAGCombine(ISD::ANY_EXTEND);
|
setTargetDAGCombine(ISD::ANY_EXTEND);
|
||||||
setTargetDAGCombine(ISD::ZERO_EXTEND);
|
setTargetDAGCombine(ISD::ZERO_EXTEND);
|
||||||
setTargetDAGCombine(ISD::SIGN_EXTEND);
|
setTargetDAGCombine(ISD::SIGN_EXTEND);
|
||||||
|
setTargetDAGCombine(ISD::SIGN_EXTEND_INREG);
|
||||||
setTargetDAGCombine(ISD::BITCAST);
|
setTargetDAGCombine(ISD::BITCAST);
|
||||||
setTargetDAGCombine(ISD::CONCAT_VECTORS);
|
setTargetDAGCombine(ISD::CONCAT_VECTORS);
|
||||||
setTargetDAGCombine(ISD::STORE);
|
setTargetDAGCombine(ISD::STORE);
|
||||||
|
@ -1350,6 +1351,13 @@ const char *AArch64TargetLowering::getTargetNodeName(unsigned Opcode) const {
|
||||||
case AArch64ISD::GLD1_SXTW_SCALED: return "AArch64ISD::GLD1_SXTW_SCALED";
|
case AArch64ISD::GLD1_SXTW_SCALED: return "AArch64ISD::GLD1_SXTW_SCALED";
|
||||||
case AArch64ISD::GLD1_UXTW_SCALED: return "AArch64ISD::GLD1_UXTW_SCALED";
|
case AArch64ISD::GLD1_UXTW_SCALED: return "AArch64ISD::GLD1_UXTW_SCALED";
|
||||||
case AArch64ISD::GLD1_IMM: return "AArch64ISD::GLD1_IMM";
|
case AArch64ISD::GLD1_IMM: return "AArch64ISD::GLD1_IMM";
|
||||||
|
case AArch64ISD::GLD1S: return "AArch64ISD::GLD1S";
|
||||||
|
case AArch64ISD::GLD1S_SCALED: return "AArch64ISD::GLD1S_SCALED";
|
||||||
|
case AArch64ISD::GLD1S_SXTW: return "AArch64ISD::GLD1S_SXTW";
|
||||||
|
case AArch64ISD::GLD1S_UXTW: return "AArch64ISD::GLD1S_UXTW";
|
||||||
|
case AArch64ISD::GLD1S_SXTW_SCALED: return "AArch64ISD::GLD1S_SXTW_SCALED";
|
||||||
|
case AArch64ISD::GLD1S_UXTW_SCALED: return "AArch64ISD::GLD1S_UXTW_SCALED";
|
||||||
|
case AArch64ISD::GLD1S_IMM: return "AArch64ISD::GLD1S_IMM";
|
||||||
}
|
}
|
||||||
return nullptr;
|
return nullptr;
|
||||||
}
|
}
|
||||||
|
@ -9917,6 +9925,67 @@ static SDValue performORCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI,
|
||||||
return SDValue();
|
return SDValue();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static bool isConstantSplatVectorMaskForType(SDNode *N, EVT MemVT) {
|
||||||
|
if (!MemVT.getVectorElementType().isSimple())
|
||||||
|
return false;
|
||||||
|
|
||||||
|
uint64_t MaskForTy = 0ull;
|
||||||
|
switch (MemVT.getVectorElementType().getSimpleVT().SimpleTy) {
|
||||||
|
case MVT::i8:
|
||||||
|
MaskForTy = 0xffull;
|
||||||
|
break;
|
||||||
|
case MVT::i16:
|
||||||
|
MaskForTy = 0xffffull;
|
||||||
|
break;
|
||||||
|
case MVT::i32:
|
||||||
|
MaskForTy = 0xffffffffull;
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
return false;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (N->getOpcode() == AArch64ISD::DUP || N->getOpcode() == ISD::SPLAT_VECTOR)
|
||||||
|
if (auto *Op0 = dyn_cast<ConstantSDNode>(N->getOperand(0)))
|
||||||
|
return Op0->getAPIntValue().getLimitedValue() == MaskForTy;
|
||||||
|
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
static SDValue performSVEAndCombine(SDNode *N,
|
||||||
|
TargetLowering::DAGCombinerInfo &DCI) {
|
||||||
|
if (DCI.isBeforeLegalizeOps())
|
||||||
|
return SDValue();
|
||||||
|
|
||||||
|
SDValue Src = N->getOperand(0);
|
||||||
|
SDValue Mask = N->getOperand(1);
|
||||||
|
|
||||||
|
if (!Src.hasOneUse())
|
||||||
|
return SDValue();
|
||||||
|
|
||||||
|
// GLD1* instructions perform an implicit zero-extend, which makes them
|
||||||
|
// perfect candidates for combining.
|
||||||
|
switch (Src->getOpcode()) {
|
||||||
|
case AArch64ISD::GLD1:
|
||||||
|
case AArch64ISD::GLD1_SCALED:
|
||||||
|
case AArch64ISD::GLD1_SXTW:
|
||||||
|
case AArch64ISD::GLD1_SXTW_SCALED:
|
||||||
|
case AArch64ISD::GLD1_UXTW:
|
||||||
|
case AArch64ISD::GLD1_UXTW_SCALED:
|
||||||
|
case AArch64ISD::GLD1_IMM:
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
return SDValue();
|
||||||
|
}
|
||||||
|
|
||||||
|
EVT MemVT = cast<VTSDNode>(Src->getOperand(4))->getVT();
|
||||||
|
|
||||||
|
if (isConstantSplatVectorMaskForType(Mask.getNode(), MemVT))
|
||||||
|
return Src;
|
||||||
|
|
||||||
|
return SDValue();
|
||||||
|
}
|
||||||
|
|
||||||
static SDValue performANDCombine(SDNode *N,
|
static SDValue performANDCombine(SDNode *N,
|
||||||
TargetLowering::DAGCombinerInfo &DCI) {
|
TargetLowering::DAGCombinerInfo &DCI) {
|
||||||
SelectionDAG &DAG = DCI.DAG;
|
SelectionDAG &DAG = DCI.DAG;
|
||||||
|
@ -9925,6 +9994,9 @@ static SDValue performANDCombine(SDNode *N,
|
||||||
if (!VT.isVector() || !DAG.getTargetLoweringInfo().isTypeLegal(VT))
|
if (!VT.isVector() || !DAG.getTargetLoweringInfo().isTypeLegal(VT))
|
||||||
return SDValue();
|
return SDValue();
|
||||||
|
|
||||||
|
if (VT.isScalableVector())
|
||||||
|
return performSVEAndCombine(N, DCI);
|
||||||
|
|
||||||
BuildVectorSDNode *BVN =
|
BuildVectorSDNode *BVN =
|
||||||
dyn_cast<BuildVectorSDNode>(N->getOperand(1).getNode());
|
dyn_cast<BuildVectorSDNode>(N->getOperand(1).getNode());
|
||||||
if (!BVN)
|
if (!BVN)
|
||||||
|
@ -12063,6 +12135,64 @@ static SDValue performLD1GatherCombine(SDNode *N, SelectionDAG &DAG,
|
||||||
return DAG.getMergeValues({Load, LoadChain}, DL);
|
return DAG.getMergeValues({Load, LoadChain}, DL);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
static SDValue
|
||||||
|
performSignExtendInRegCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI,
|
||||||
|
SelectionDAG &DAG) {
|
||||||
|
if (DCI.isBeforeLegalizeOps())
|
||||||
|
return SDValue();
|
||||||
|
|
||||||
|
SDValue Src = N->getOperand(0);
|
||||||
|
unsigned Opc = Src->getOpcode();
|
||||||
|
|
||||||
|
// Gather load nodes (e.g. AArch64ISD::GLD1) are straightforward candidates
|
||||||
|
// for DAG Combine with SIGN_EXTEND_INREG. Bail out for all other nodes.
|
||||||
|
unsigned NewOpc;
|
||||||
|
switch (Opc) {
|
||||||
|
case AArch64ISD::GLD1:
|
||||||
|
NewOpc = AArch64ISD::GLD1S;
|
||||||
|
break;
|
||||||
|
case AArch64ISD::GLD1_SCALED:
|
||||||
|
NewOpc = AArch64ISD::GLD1S_SCALED;
|
||||||
|
break;
|
||||||
|
case AArch64ISD::GLD1_SXTW:
|
||||||
|
NewOpc = AArch64ISD::GLD1S_SXTW;
|
||||||
|
break;
|
||||||
|
case AArch64ISD::GLD1_SXTW_SCALED:
|
||||||
|
NewOpc = AArch64ISD::GLD1S_SXTW_SCALED;
|
||||||
|
break;
|
||||||
|
case AArch64ISD::GLD1_UXTW:
|
||||||
|
NewOpc = AArch64ISD::GLD1S_UXTW;
|
||||||
|
break;
|
||||||
|
case AArch64ISD::GLD1_UXTW_SCALED:
|
||||||
|
NewOpc = AArch64ISD::GLD1S_UXTW_SCALED;
|
||||||
|
break;
|
||||||
|
case AArch64ISD::GLD1_IMM:
|
||||||
|
NewOpc = AArch64ISD::GLD1S_IMM;
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
return SDValue();
|
||||||
|
}
|
||||||
|
|
||||||
|
EVT SignExtSrcVT = cast<VTSDNode>(N->getOperand(1))->getVT();
|
||||||
|
EVT GLD1SrcMemVT = cast<VTSDNode>(Src->getOperand(4))->getVT();
|
||||||
|
|
||||||
|
if ((SignExtSrcVT != GLD1SrcMemVT) || !Src.hasOneUse())
|
||||||
|
return SDValue();
|
||||||
|
|
||||||
|
EVT DstVT = N->getValueType(0);
|
||||||
|
SDVTList VTs = DAG.getVTList(DstVT, MVT::Other);
|
||||||
|
SDValue Ops[] = {Src->getOperand(0), Src->getOperand(1), Src->getOperand(2),
|
||||||
|
Src->getOperand(3), Src->getOperand(4)};
|
||||||
|
|
||||||
|
SDValue ExtLoad = DAG.getNode(NewOpc, SDLoc(N), VTs, Ops);
|
||||||
|
DCI.CombineTo(N, ExtLoad);
|
||||||
|
DCI.CombineTo(Src.getNode(), ExtLoad, ExtLoad.getValue(1));
|
||||||
|
|
||||||
|
// Return N so it doesn't get rechecked
|
||||||
|
return SDValue(N, 0);
|
||||||
|
}
|
||||||
|
|
||||||
SDValue AArch64TargetLowering::PerformDAGCombine(SDNode *N,
|
SDValue AArch64TargetLowering::PerformDAGCombine(SDNode *N,
|
||||||
DAGCombinerInfo &DCI) const {
|
DAGCombinerInfo &DCI) const {
|
||||||
SelectionDAG &DAG = DCI.DAG;
|
SelectionDAG &DAG = DCI.DAG;
|
||||||
|
@ -12097,6 +12227,8 @@ SDValue AArch64TargetLowering::PerformDAGCombine(SDNode *N,
|
||||||
case ISD::ZERO_EXTEND:
|
case ISD::ZERO_EXTEND:
|
||||||
case ISD::SIGN_EXTEND:
|
case ISD::SIGN_EXTEND:
|
||||||
return performExtendCombine(N, DCI, DAG);
|
return performExtendCombine(N, DCI, DAG);
|
||||||
|
case ISD::SIGN_EXTEND_INREG:
|
||||||
|
return performSignExtendInRegCombine(N, DCI, DAG);
|
||||||
case ISD::BITCAST:
|
case ISD::BITCAST:
|
||||||
return performBitcastCombine(N, DCI, DAG);
|
return performBitcastCombine(N, DCI, DAG);
|
||||||
case ISD::CONCAT_VECTORS:
|
case ISD::CONCAT_VECTORS:
|
||||||
|
|
|
@ -215,6 +215,15 @@ enum NodeType : unsigned {
|
||||||
GLD1_SXTW_SCALED,
|
GLD1_SXTW_SCALED,
|
||||||
GLD1_IMM,
|
GLD1_IMM,
|
||||||
|
|
||||||
|
// Signed gather loads
|
||||||
|
GLD1S,
|
||||||
|
GLD1S_SCALED,
|
||||||
|
GLD1S_UXTW,
|
||||||
|
GLD1S_SXTW,
|
||||||
|
GLD1S_UXTW_SCALED,
|
||||||
|
GLD1S_SXTW_SCALED,
|
||||||
|
GLD1S_IMM,
|
||||||
|
|
||||||
// NEON Load/Store with post-increment base updates
|
// NEON Load/Store with post-increment base updates
|
||||||
LD2post = ISD::FIRST_TARGET_MEMORY_OPCODE,
|
LD2post = ISD::FIRST_TARGET_MEMORY_OPCODE,
|
||||||
LD3post,
|
LD3post,
|
||||||
|
|
|
@ -28,6 +28,14 @@ def AArch64ld1_gather_uxtw_scaled : SDNode<"AArch64ISD::GLD1_UXTW_SCALED",
|
||||||
def AArch64ld1_gather_sxtw_scaled : SDNode<"AArch64ISD::GLD1_SXTW_SCALED", SDT_AArch64_GLD1, [SDNPHasChain, SDNPMayLoad, SDNPOptInGlue]>;
|
def AArch64ld1_gather_sxtw_scaled : SDNode<"AArch64ISD::GLD1_SXTW_SCALED", SDT_AArch64_GLD1, [SDNPHasChain, SDNPMayLoad, SDNPOptInGlue]>;
|
||||||
def AArch64ld1_gather_imm : SDNode<"AArch64ISD::GLD1_IMM", SDT_AArch64_GLD1_IMM, [SDNPHasChain, SDNPMayLoad, SDNPOptInGlue]>;
|
def AArch64ld1_gather_imm : SDNode<"AArch64ISD::GLD1_IMM", SDT_AArch64_GLD1_IMM, [SDNPHasChain, SDNPMayLoad, SDNPOptInGlue]>;
|
||||||
|
|
||||||
|
def AArch64ld1s_gather : SDNode<"AArch64ISD::GLD1S", SDT_AArch64_GLD1, [SDNPHasChain, SDNPMayLoad, SDNPOptInGlue]>;
|
||||||
|
def AArch64ld1s_gather_scaled : SDNode<"AArch64ISD::GLD1S_SCALED", SDT_AArch64_GLD1, [SDNPHasChain, SDNPMayLoad, SDNPOptInGlue]>;
|
||||||
|
def AArch64ld1s_gather_uxtw : SDNode<"AArch64ISD::GLD1S_UXTW", SDT_AArch64_GLD1, [SDNPHasChain, SDNPMayLoad, SDNPOptInGlue]>;
|
||||||
|
def AArch64ld1s_gather_sxtw : SDNode<"AArch64ISD::GLD1S_SXTW", SDT_AArch64_GLD1, [SDNPHasChain, SDNPMayLoad, SDNPOptInGlue]>;
|
||||||
|
def AArch64ld1s_gather_uxtw_scaled : SDNode<"AArch64ISD::GLD1S_UXTW_SCALED", SDT_AArch64_GLD1, [SDNPHasChain, SDNPMayLoad, SDNPOptInGlue]>;
|
||||||
|
def AArch64ld1s_gather_sxtw_scaled : SDNode<"AArch64ISD::GLD1S_SXTW_SCALED", SDT_AArch64_GLD1, [SDNPHasChain, SDNPMayLoad, SDNPOptInGlue]>;
|
||||||
|
def AArch64ld1s_gather_imm : SDNode<"AArch64ISD::GLD1S_IMM", SDT_AArch64_GLD1_IMM, [SDNPHasChain, SDNPMayLoad, SDNPOptInGlue]>;
|
||||||
|
|
||||||
def SDT_AArch64Reduce : SDTypeProfile<1, 2, [SDTCisVec<1>, SDTCisVec<2>]>;
|
def SDT_AArch64Reduce : SDTypeProfile<1, 2, [SDTCisVec<1>, SDTCisVec<2>]>;
|
||||||
|
|
||||||
def AArch64smaxv_pred : SDNode<"AArch64ISD::SMAXV_PRED", SDT_AArch64Reduce>;
|
def AArch64smaxv_pred : SDNode<"AArch64ISD::SMAXV_PRED", SDT_AArch64Reduce>;
|
||||||
|
@ -430,11 +438,11 @@ let Predicates = [HasSVE] in {
|
||||||
|
|
||||||
// Gathers using unscaled 32-bit offsets, e.g.
|
// Gathers using unscaled 32-bit offsets, e.g.
|
||||||
// ld1h z0.s, p0/z, [x0, z0.s, uxtw]
|
// ld1h z0.s, p0/z, [x0, z0.s, uxtw]
|
||||||
defm GLD1SB_S : sve_mem_32b_gld_vs_32_unscaled<0b0000, "ld1sb", null_frag, null_frag, ZPR32ExtSXTW8Only, ZPR32ExtUXTW8Only, nxv4i8>;
|
defm GLD1SB_S : sve_mem_32b_gld_vs_32_unscaled<0b0000, "ld1sb", AArch64ld1s_gather_sxtw, AArch64ld1s_gather_uxtw, ZPR32ExtSXTW8Only, ZPR32ExtUXTW8Only, nxv4i8>;
|
||||||
defm GLDFF1SB_S : sve_mem_32b_gld_vs_32_unscaled<0b0001, "ldff1sb", null_frag, null_frag, ZPR32ExtSXTW8Only, ZPR32ExtUXTW8Only, nxv4i8>;
|
defm GLDFF1SB_S : sve_mem_32b_gld_vs_32_unscaled<0b0001, "ldff1sb", null_frag, null_frag, ZPR32ExtSXTW8Only, ZPR32ExtUXTW8Only, nxv4i8>;
|
||||||
defm GLD1B_S : sve_mem_32b_gld_vs_32_unscaled<0b0010, "ld1b", AArch64ld1_gather_sxtw, AArch64ld1_gather_uxtw, ZPR32ExtSXTW8Only, ZPR32ExtUXTW8Only, nxv4i8>;
|
defm GLD1B_S : sve_mem_32b_gld_vs_32_unscaled<0b0010, "ld1b", AArch64ld1_gather_sxtw, AArch64ld1_gather_uxtw, ZPR32ExtSXTW8Only, ZPR32ExtUXTW8Only, nxv4i8>;
|
||||||
defm GLDFF1B_S : sve_mem_32b_gld_vs_32_unscaled<0b0011, "ldff1b", null_frag, null_frag, ZPR32ExtSXTW8Only, ZPR32ExtUXTW8Only, nxv4i8>;
|
defm GLDFF1B_S : sve_mem_32b_gld_vs_32_unscaled<0b0011, "ldff1b", null_frag, null_frag, ZPR32ExtSXTW8Only, ZPR32ExtUXTW8Only, nxv4i8>;
|
||||||
defm GLD1SH_S : sve_mem_32b_gld_vs_32_unscaled<0b0100, "ld1sh", null_frag, null_frag, ZPR32ExtSXTW8, ZPR32ExtUXTW8, nxv4i16>;
|
defm GLD1SH_S : sve_mem_32b_gld_vs_32_unscaled<0b0100, "ld1sh", AArch64ld1s_gather_sxtw, AArch64ld1s_gather_uxtw, ZPR32ExtSXTW8, ZPR32ExtUXTW8, nxv4i16>;
|
||||||
defm GLDFF1SH_S : sve_mem_32b_gld_vs_32_unscaled<0b0101, "ldff1sh", null_frag, null_frag, ZPR32ExtSXTW8, ZPR32ExtUXTW8, nxv4i16>;
|
defm GLDFF1SH_S : sve_mem_32b_gld_vs_32_unscaled<0b0101, "ldff1sh", null_frag, null_frag, ZPR32ExtSXTW8, ZPR32ExtUXTW8, nxv4i16>;
|
||||||
defm GLD1H_S : sve_mem_32b_gld_vs_32_unscaled<0b0110, "ld1h", AArch64ld1_gather_sxtw, AArch64ld1_gather_uxtw, ZPR32ExtSXTW8, ZPR32ExtUXTW8, nxv4i16>;
|
defm GLD1H_S : sve_mem_32b_gld_vs_32_unscaled<0b0110, "ld1h", AArch64ld1_gather_sxtw, AArch64ld1_gather_uxtw, ZPR32ExtSXTW8, ZPR32ExtUXTW8, nxv4i16>;
|
||||||
defm GLDFF1H_S : sve_mem_32b_gld_vs_32_unscaled<0b0111, "ldff1h", null_frag, null_frag, ZPR32ExtSXTW8, ZPR32ExtUXTW8, nxv4i16>;
|
defm GLDFF1H_S : sve_mem_32b_gld_vs_32_unscaled<0b0111, "ldff1h", null_frag, null_frag, ZPR32ExtSXTW8, ZPR32ExtUXTW8, nxv4i16>;
|
||||||
|
@ -443,7 +451,7 @@ let Predicates = [HasSVE] in {
|
||||||
|
|
||||||
// Gathers using scaled 32-bit offsets, e.g.
|
// Gathers using scaled 32-bit offsets, e.g.
|
||||||
// ld1h z0.s, p0/z, [x0, z0.s, uxtw #1]
|
// ld1h z0.s, p0/z, [x0, z0.s, uxtw #1]
|
||||||
defm GLD1SH_S : sve_mem_32b_gld_sv_32_scaled<0b0100, "ld1sh", null_frag, null_frag, ZPR32ExtSXTW16, ZPR32ExtUXTW16, nxv4i16>;
|
defm GLD1SH_S : sve_mem_32b_gld_sv_32_scaled<0b0100, "ld1sh", AArch64ld1s_gather_sxtw_scaled, AArch64ld1s_gather_uxtw_scaled, ZPR32ExtSXTW16, ZPR32ExtUXTW16, nxv4i16>;
|
||||||
defm GLDFF1SH_S : sve_mem_32b_gld_sv_32_scaled<0b0101, "ldff1sh", null_frag, null_frag, ZPR32ExtSXTW16, ZPR32ExtUXTW16, nxv4i16>;
|
defm GLDFF1SH_S : sve_mem_32b_gld_sv_32_scaled<0b0101, "ldff1sh", null_frag, null_frag, ZPR32ExtSXTW16, ZPR32ExtUXTW16, nxv4i16>;
|
||||||
defm GLD1H_S : sve_mem_32b_gld_sv_32_scaled<0b0110, "ld1h", AArch64ld1_gather_sxtw_scaled, AArch64ld1_gather_uxtw_scaled, ZPR32ExtSXTW16, ZPR32ExtUXTW16, nxv4i16>;
|
defm GLD1H_S : sve_mem_32b_gld_sv_32_scaled<0b0110, "ld1h", AArch64ld1_gather_sxtw_scaled, AArch64ld1_gather_uxtw_scaled, ZPR32ExtSXTW16, ZPR32ExtUXTW16, nxv4i16>;
|
||||||
defm GLDFF1H_S : sve_mem_32b_gld_sv_32_scaled<0b0111, "ldff1h", null_frag, null_frag, ZPR32ExtSXTW16, ZPR32ExtUXTW16, nxv4i16>;
|
defm GLDFF1H_S : sve_mem_32b_gld_sv_32_scaled<0b0111, "ldff1h", null_frag, null_frag, ZPR32ExtSXTW16, ZPR32ExtUXTW16, nxv4i16>;
|
||||||
|
@ -452,11 +460,11 @@ let Predicates = [HasSVE] in {
|
||||||
|
|
||||||
// Gathers using 32-bit pointers with scaled offset, e.g.
|
// Gathers using 32-bit pointers with scaled offset, e.g.
|
||||||
// ld1h z0.s, p0/z, [z0.s, #16]
|
// ld1h z0.s, p0/z, [z0.s, #16]
|
||||||
defm GLD1SB_S : sve_mem_32b_gld_vi_32_ptrs<0b0000, "ld1sb", imm0_31, null_frag, nxv4i8>;
|
defm GLD1SB_S : sve_mem_32b_gld_vi_32_ptrs<0b0000, "ld1sb", imm0_31, AArch64ld1s_gather_imm, nxv4i8>;
|
||||||
defm GLDFF1SB_S : sve_mem_32b_gld_vi_32_ptrs<0b0001, "ldff1sb", imm0_31, null_frag, nxv4i8>;
|
defm GLDFF1SB_S : sve_mem_32b_gld_vi_32_ptrs<0b0001, "ldff1sb", imm0_31, null_frag, nxv4i8>;
|
||||||
defm GLD1B_S : sve_mem_32b_gld_vi_32_ptrs<0b0010, "ld1b", imm0_31, AArch64ld1_gather_imm, nxv4i8>;
|
defm GLD1B_S : sve_mem_32b_gld_vi_32_ptrs<0b0010, "ld1b", imm0_31, AArch64ld1_gather_imm, nxv4i8>;
|
||||||
defm GLDFF1B_S : sve_mem_32b_gld_vi_32_ptrs<0b0011, "ldff1b", imm0_31, null_frag, nxv4i8>;
|
defm GLDFF1B_S : sve_mem_32b_gld_vi_32_ptrs<0b0011, "ldff1b", imm0_31, null_frag, nxv4i8>;
|
||||||
defm GLD1SH_S : sve_mem_32b_gld_vi_32_ptrs<0b0100, "ld1sh", uimm5s2, null_frag, nxv4i16>;
|
defm GLD1SH_S : sve_mem_32b_gld_vi_32_ptrs<0b0100, "ld1sh", uimm5s2, AArch64ld1s_gather_imm, nxv4i16>;
|
||||||
defm GLDFF1SH_S : sve_mem_32b_gld_vi_32_ptrs<0b0101, "ldff1sh", uimm5s2, null_frag, nxv4i16>;
|
defm GLDFF1SH_S : sve_mem_32b_gld_vi_32_ptrs<0b0101, "ldff1sh", uimm5s2, null_frag, nxv4i16>;
|
||||||
defm GLD1H_S : sve_mem_32b_gld_vi_32_ptrs<0b0110, "ld1h", uimm5s2, AArch64ld1_gather_imm, nxv4i16>;
|
defm GLD1H_S : sve_mem_32b_gld_vi_32_ptrs<0b0110, "ld1h", uimm5s2, AArch64ld1_gather_imm, nxv4i16>;
|
||||||
defm GLDFF1H_S : sve_mem_32b_gld_vi_32_ptrs<0b0111, "ldff1h", uimm5s2, null_frag, nxv4i16>;
|
defm GLDFF1H_S : sve_mem_32b_gld_vi_32_ptrs<0b0111, "ldff1h", uimm5s2, null_frag, nxv4i16>;
|
||||||
|
@ -465,45 +473,45 @@ let Predicates = [HasSVE] in {
|
||||||
|
|
||||||
// Gathers using 64-bit pointers with scaled offset, e.g.
|
// Gathers using 64-bit pointers with scaled offset, e.g.
|
||||||
// ld1h z0.d, p0/z, [z0.d, #16]
|
// ld1h z0.d, p0/z, [z0.d, #16]
|
||||||
defm GLD1SB_D : sve_mem_64b_gld_vi_64_ptrs<0b0000, "ld1sb", imm0_31, null_frag, nxv2i8>;
|
defm GLD1SB_D : sve_mem_64b_gld_vi_64_ptrs<0b0000, "ld1sb", imm0_31, AArch64ld1s_gather_imm, nxv2i8>;
|
||||||
defm GLDFF1SB_D : sve_mem_64b_gld_vi_64_ptrs<0b0001, "ldff1sb", imm0_31, null_frag, nxv2i8>;
|
defm GLDFF1SB_D : sve_mem_64b_gld_vi_64_ptrs<0b0001, "ldff1sb", imm0_31, null_frag, nxv2i8>;
|
||||||
defm GLD1B_D : sve_mem_64b_gld_vi_64_ptrs<0b0010, "ld1b", imm0_31, AArch64ld1_gather_imm, nxv2i8>;
|
defm GLD1B_D : sve_mem_64b_gld_vi_64_ptrs<0b0010, "ld1b", imm0_31, AArch64ld1_gather_imm, nxv2i8>;
|
||||||
defm GLDFF1B_D : sve_mem_64b_gld_vi_64_ptrs<0b0011, "ldff1b", imm0_31, null_frag, nxv2i8>;
|
defm GLDFF1B_D : sve_mem_64b_gld_vi_64_ptrs<0b0011, "ldff1b", imm0_31, null_frag, nxv2i8>;
|
||||||
defm GLD1SH_D : sve_mem_64b_gld_vi_64_ptrs<0b0100, "ld1sh", uimm5s2, null_frag, nxv2i16>;
|
defm GLD1SH_D : sve_mem_64b_gld_vi_64_ptrs<0b0100, "ld1sh", uimm5s2, AArch64ld1s_gather_imm, nxv2i16>;
|
||||||
defm GLDFF1SH_D : sve_mem_64b_gld_vi_64_ptrs<0b0101, "ldff1sh", uimm5s2, null_frag, nxv2i16>;
|
defm GLDFF1SH_D : sve_mem_64b_gld_vi_64_ptrs<0b0101, "ldff1sh", uimm5s2, null_frag, nxv2i16>;
|
||||||
defm GLD1H_D : sve_mem_64b_gld_vi_64_ptrs<0b0110, "ld1h", uimm5s2, AArch64ld1_gather_imm, nxv2i16>;
|
defm GLD1H_D : sve_mem_64b_gld_vi_64_ptrs<0b0110, "ld1h", uimm5s2, AArch64ld1_gather_imm, nxv2i16>;
|
||||||
defm GLDFF1H_D : sve_mem_64b_gld_vi_64_ptrs<0b0111, "ldff1h", uimm5s2, null_frag, nxv2i16>;
|
defm GLDFF1H_D : sve_mem_64b_gld_vi_64_ptrs<0b0111, "ldff1h", uimm5s2, null_frag, nxv2i16>;
|
||||||
defm GLD1SW_D : sve_mem_64b_gld_vi_64_ptrs<0b1000, "ld1sw", uimm5s4, null_frag, nxv2i32>;
|
defm GLD1SW_D : sve_mem_64b_gld_vi_64_ptrs<0b1000, "ld1sw", uimm5s4, AArch64ld1s_gather_imm, nxv2i32>;
|
||||||
defm GLDFF1SW_D : sve_mem_64b_gld_vi_64_ptrs<0b1001, "ldff1sw", uimm5s4, null_frag, nxv2i32>;
|
defm GLDFF1SW_D : sve_mem_64b_gld_vi_64_ptrs<0b1001, "ldff1sw", uimm5s4, null_frag, nxv2i32>;
|
||||||
defm GLD1W_D : sve_mem_64b_gld_vi_64_ptrs<0b1010, "ld1w", uimm5s4, AArch64ld1_gather_imm, nxv2i32>;
|
defm GLD1W_D : sve_mem_64b_gld_vi_64_ptrs<0b1010, "ld1w", uimm5s4, AArch64ld1_gather_imm, nxv2i32>;
|
||||||
defm GLDFF1W_D : sve_mem_64b_gld_vi_64_ptrs<0b1011, "ldff1w", uimm5s4, null_frag, nxv2i32>;
|
defm GLDFF1W_D : sve_mem_64b_gld_vi_64_ptrs<0b1011, "ldff1w", uimm5s4, null_frag, nxv2i32>;
|
||||||
defm GLD1D : sve_mem_64b_gld_vi_64_ptrs<0b1110, "ld1d", uimm5s8, AArch64ld1_gather_imm, nxv2i64>;
|
defm GLD1D : sve_mem_64b_gld_vi_64_ptrs<0b1110, "ld1d", uimm5s8, AArch64ld1_gather_imm, nxv2i64>;
|
||||||
defm GLDFF1D : sve_mem_64b_gld_vi_64_ptrs<0b1111, "ldff1d", uimm5s8, null_frag, nxv2i64>;
|
defm GLDFF1D : sve_mem_64b_gld_vi_64_ptrs<0b1111, "ldff1d", uimm5s8, null_frag, nxv2i64>;
|
||||||
|
|
||||||
// Gathers using unscaled 64-bit offsets, e.g.
|
// Gathers using unscaled 64-bit offsets, e.g.
|
||||||
// ld1h z0.d, p0/z, [x0, z0.d]
|
// ld1h z0.d, p0/z, [x0, z0.d]
|
||||||
defm GLD1SB_D : sve_mem_64b_gld_vs2_64_unscaled<0b0000, "ld1sb", null_frag, nxv2i8>;
|
defm GLD1SB_D : sve_mem_64b_gld_vs2_64_unscaled<0b0000, "ld1sb", AArch64ld1s_gather, nxv2i8>;
|
||||||
defm GLDFF1SB_D : sve_mem_64b_gld_vs2_64_unscaled<0b0001, "ldff1sb", null_frag, nxv2i8>;
|
defm GLDFF1SB_D : sve_mem_64b_gld_vs2_64_unscaled<0b0001, "ldff1sb", null_frag, nxv2i8>;
|
||||||
defm GLD1B_D : sve_mem_64b_gld_vs2_64_unscaled<0b0010, "ld1b", AArch64ld1_gather, nxv2i8>;
|
defm GLD1B_D : sve_mem_64b_gld_vs2_64_unscaled<0b0010, "ld1b", AArch64ld1_gather, nxv2i8>;
|
||||||
defm GLDFF1B_D : sve_mem_64b_gld_vs2_64_unscaled<0b0011, "ldff1b", null_frag, nxv2i8>;
|
defm GLDFF1B_D : sve_mem_64b_gld_vs2_64_unscaled<0b0011, "ldff1b", null_frag, nxv2i8>;
|
||||||
defm GLD1SH_D : sve_mem_64b_gld_vs2_64_unscaled<0b0100, "ld1sh", null_frag, nxv2i16>;
|
defm GLD1SH_D : sve_mem_64b_gld_vs2_64_unscaled<0b0100, "ld1sh", AArch64ld1s_gather, nxv2i16>;
|
||||||
defm GLDFF1SH_D : sve_mem_64b_gld_vs2_64_unscaled<0b0101, "ldff1sh", null_frag, nxv2i16>;
|
defm GLDFF1SH_D : sve_mem_64b_gld_vs2_64_unscaled<0b0101, "ldff1sh", null_frag, nxv2i16>;
|
||||||
defm GLD1H_D : sve_mem_64b_gld_vs2_64_unscaled<0b0110, "ld1h", AArch64ld1_gather, nxv2i16>;
|
defm GLD1H_D : sve_mem_64b_gld_vs2_64_unscaled<0b0110, "ld1h", AArch64ld1_gather, nxv2i16>;
|
||||||
defm GLDFF1H_D : sve_mem_64b_gld_vs2_64_unscaled<0b0111, "ldff1h", null_frag, nxv2i16>;
|
defm GLDFF1H_D : sve_mem_64b_gld_vs2_64_unscaled<0b0111, "ldff1h", null_frag, nxv2i16>;
|
||||||
defm GLD1SW_D : sve_mem_64b_gld_vs2_64_unscaled<0b1000, "ld1sw", null_frag, nxv2i32>;
|
defm GLD1SW_D : sve_mem_64b_gld_vs2_64_unscaled<0b1000, "ld1sw", AArch64ld1s_gather, nxv2i32>;
|
||||||
defm GLDFF1SW_D : sve_mem_64b_gld_vs2_64_unscaled<0b1001, "ldff1sw", null_frag, nxv2i32>;
|
defm GLDFF1SW_D : sve_mem_64b_gld_vs2_64_unscaled<0b1001, "ldff1sw", null_frag, nxv2i32>;
|
||||||
defm GLD1W_D : sve_mem_64b_gld_vs2_64_unscaled<0b1010, "ld1w", AArch64ld1_gather, nxv2i32>;
|
defm GLD1W_D : sve_mem_64b_gld_vs2_64_unscaled<0b1010, "ld1w", AArch64ld1_gather, nxv2i32>;
|
||||||
defm GLDFF1W_D : sve_mem_64b_gld_vs2_64_unscaled<0b1011, "ldff1w", null_frag, nxv2i32>;
|
defm GLDFF1W_D : sve_mem_64b_gld_vs2_64_unscaled<0b1011, "ldff1w", null_frag, nxv2i32>;
|
||||||
defm GLD1D : sve_mem_64b_gld_vs2_64_unscaled<0b1110, "ld1d", AArch64ld1_gather, nxv2i64>;
|
defm GLD1D : sve_mem_64b_gld_vs2_64_unscaled<0b1110, "ld1d", AArch64ld1_gather, nxv2i64>;
|
||||||
defm GLDFF1D : sve_mem_64b_gld_vs2_64_unscaled<0b1111, "ldff1d", null_frag, nxv2i64>;
|
defm GLDFF1D : sve_mem_64b_gld_vs2_64_unscaled<0b1111, "ldff1d", null_frag, nxv2i64>;
|
||||||
|
|
||||||
// Gathers using scaled 64-bit offsets, e.g.
|
// Gathers using scaled 64-bit offsets, e.g.
|
||||||
// ld1h z0.d, p0/z, [x0, z0.d, lsl #1]
|
// ld1h z0.d, p0/z, [x0, z0.d, lsl #1]
|
||||||
defm GLD1SH_D : sve_mem_64b_gld_sv2_64_scaled<0b0100, "ld1sh", null_frag, ZPR64ExtLSL16, nxv2i16>;
|
defm GLD1SH_D : sve_mem_64b_gld_sv2_64_scaled<0b0100, "ld1sh", AArch64ld1s_gather_scaled, ZPR64ExtLSL16, nxv2i16>;
|
||||||
defm GLDFF1SH_D : sve_mem_64b_gld_sv2_64_scaled<0b0101, "ldff1sh", null_frag, ZPR64ExtLSL16, nxv2i16>;
|
defm GLDFF1SH_D : sve_mem_64b_gld_sv2_64_scaled<0b0101, "ldff1sh", null_frag, ZPR64ExtLSL16, nxv2i16>;
|
||||||
defm GLD1H_D : sve_mem_64b_gld_sv2_64_scaled<0b0110, "ld1h", AArch64ld1_gather_scaled, ZPR64ExtLSL16, nxv2i16>;
|
defm GLD1H_D : sve_mem_64b_gld_sv2_64_scaled<0b0110, "ld1h", AArch64ld1_gather_scaled, ZPR64ExtLSL16, nxv2i16>;
|
||||||
defm GLDFF1H_D : sve_mem_64b_gld_sv2_64_scaled<0b0111, "ldff1h", null_frag, ZPR64ExtLSL16, nxv2i16>;
|
defm GLDFF1H_D : sve_mem_64b_gld_sv2_64_scaled<0b0111, "ldff1h", null_frag, ZPR64ExtLSL16, nxv2i16>;
|
||||||
defm GLD1SW_D : sve_mem_64b_gld_sv2_64_scaled<0b1000, "ld1sw", null_frag, ZPR64ExtLSL32, nxv2i32>;
|
defm GLD1SW_D : sve_mem_64b_gld_sv2_64_scaled<0b1000, "ld1sw", AArch64ld1s_gather_scaled, ZPR64ExtLSL32, nxv2i32>;
|
||||||
defm GLDFF1SW_D : sve_mem_64b_gld_sv2_64_scaled<0b1001, "ldff1sw", null_frag, ZPR64ExtLSL32, nxv2i32>;
|
defm GLDFF1SW_D : sve_mem_64b_gld_sv2_64_scaled<0b1001, "ldff1sw", null_frag, ZPR64ExtLSL32, nxv2i32>;
|
||||||
defm GLD1W_D : sve_mem_64b_gld_sv2_64_scaled<0b1010, "ld1w", AArch64ld1_gather_scaled, ZPR64ExtLSL32, nxv2i32>;
|
defm GLD1W_D : sve_mem_64b_gld_sv2_64_scaled<0b1010, "ld1w", AArch64ld1_gather_scaled, ZPR64ExtLSL32, nxv2i32>;
|
||||||
defm GLDFF1W_D : sve_mem_64b_gld_sv2_64_scaled<0b1011, "ldff1w", null_frag, ZPR64ExtLSL32, nxv2i32>;
|
defm GLDFF1W_D : sve_mem_64b_gld_sv2_64_scaled<0b1011, "ldff1w", null_frag, ZPR64ExtLSL32, nxv2i32>;
|
||||||
|
@ -512,15 +520,15 @@ let Predicates = [HasSVE] in {
|
||||||
|
|
||||||
// Gathers using unscaled 32-bit offsets unpacked in 64-bits elements, e.g.
|
// Gathers using unscaled 32-bit offsets unpacked in 64-bits elements, e.g.
|
||||||
// ld1h z0.d, p0/z, [x0, z0.d, uxtw]
|
// ld1h z0.d, p0/z, [x0, z0.d, uxtw]
|
||||||
defm GLD1SB_D : sve_mem_64b_gld_vs_32_unscaled<0b0000, "ld1sb", null_frag, null_frag, ZPR64ExtSXTW8Only, ZPR64ExtUXTW8Only, nxv2i8>;
|
defm GLD1SB_D : sve_mem_64b_gld_vs_32_unscaled<0b0000, "ld1sb", AArch64ld1s_gather_sxtw, AArch64ld1s_gather_uxtw, ZPR64ExtSXTW8Only, ZPR64ExtUXTW8Only, nxv2i8>;
|
||||||
defm GLDFF1SB_D : sve_mem_64b_gld_vs_32_unscaled<0b0001, "ldff1sb", null_frag, null_frag, ZPR64ExtSXTW8Only, ZPR64ExtUXTW8Only, nxv2i8>;
|
defm GLDFF1SB_D : sve_mem_64b_gld_vs_32_unscaled<0b0001, "ldff1sb", null_frag, null_frag, ZPR64ExtSXTW8Only, ZPR64ExtUXTW8Only, nxv2i8>;
|
||||||
defm GLD1B_D : sve_mem_64b_gld_vs_32_unscaled<0b0010, "ld1b", AArch64ld1_gather_sxtw, AArch64ld1_gather_uxtw, ZPR64ExtSXTW8Only, ZPR64ExtUXTW8Only, nxv2i8>;
|
defm GLD1B_D : sve_mem_64b_gld_vs_32_unscaled<0b0010, "ld1b", AArch64ld1_gather_sxtw, AArch64ld1_gather_uxtw, ZPR64ExtSXTW8Only, ZPR64ExtUXTW8Only, nxv2i8>;
|
||||||
defm GLDFF1B_D : sve_mem_64b_gld_vs_32_unscaled<0b0011, "ldff1b", null_frag, null_frag, ZPR64ExtSXTW8Only, ZPR64ExtUXTW8Only, nxv2i8>;
|
defm GLDFF1B_D : sve_mem_64b_gld_vs_32_unscaled<0b0011, "ldff1b", null_frag, null_frag, ZPR64ExtSXTW8Only, ZPR64ExtUXTW8Only, nxv2i8>;
|
||||||
defm GLD1SH_D : sve_mem_64b_gld_vs_32_unscaled<0b0100, "ld1sh", null_frag, null_frag, ZPR64ExtSXTW8, ZPR64ExtUXTW8, nxv2i16>;
|
defm GLD1SH_D : sve_mem_64b_gld_vs_32_unscaled<0b0100, "ld1sh", AArch64ld1s_gather_sxtw, AArch64ld1s_gather_uxtw, ZPR64ExtSXTW8, ZPR64ExtUXTW8, nxv2i16>;
|
||||||
defm GLDFF1SH_D : sve_mem_64b_gld_vs_32_unscaled<0b0101, "ldff1sh", null_frag, null_frag, ZPR64ExtSXTW8, ZPR64ExtUXTW8, nxv2i16>;
|
defm GLDFF1SH_D : sve_mem_64b_gld_vs_32_unscaled<0b0101, "ldff1sh", null_frag, null_frag, ZPR64ExtSXTW8, ZPR64ExtUXTW8, nxv2i16>;
|
||||||
defm GLD1H_D : sve_mem_64b_gld_vs_32_unscaled<0b0110, "ld1h", AArch64ld1_gather_sxtw, AArch64ld1_gather_uxtw, ZPR64ExtSXTW8, ZPR64ExtUXTW8, nxv2i16>;
|
defm GLD1H_D : sve_mem_64b_gld_vs_32_unscaled<0b0110, "ld1h", AArch64ld1_gather_sxtw, AArch64ld1_gather_uxtw, ZPR64ExtSXTW8, ZPR64ExtUXTW8, nxv2i16>;
|
||||||
defm GLDFF1H_D : sve_mem_64b_gld_vs_32_unscaled<0b0111, "ldff1h", null_frag, null_frag, ZPR64ExtSXTW8, ZPR64ExtUXTW8, nxv2i16>;
|
defm GLDFF1H_D : sve_mem_64b_gld_vs_32_unscaled<0b0111, "ldff1h", null_frag, null_frag, ZPR64ExtSXTW8, ZPR64ExtUXTW8, nxv2i16>;
|
||||||
defm GLD1SW_D : sve_mem_64b_gld_vs_32_unscaled<0b1000, "ld1sw", null_frag, null_frag, ZPR64ExtSXTW8, ZPR64ExtUXTW8, nxv2i32>;
|
defm GLD1SW_D : sve_mem_64b_gld_vs_32_unscaled<0b1000, "ld1sw", AArch64ld1s_gather_sxtw, AArch64ld1s_gather_uxtw, ZPR64ExtSXTW8, ZPR64ExtUXTW8, nxv2i32>;
|
||||||
defm GLDFF1SW_D : sve_mem_64b_gld_vs_32_unscaled<0b1001, "ldff1sw", null_frag, null_frag, ZPR64ExtSXTW8, ZPR64ExtUXTW8, nxv2i32>;
|
defm GLDFF1SW_D : sve_mem_64b_gld_vs_32_unscaled<0b1001, "ldff1sw", null_frag, null_frag, ZPR64ExtSXTW8, ZPR64ExtUXTW8, nxv2i32>;
|
||||||
defm GLD1W_D : sve_mem_64b_gld_vs_32_unscaled<0b1010, "ld1w", AArch64ld1_gather_sxtw, AArch64ld1_gather_uxtw, ZPR64ExtSXTW8, ZPR64ExtUXTW8, nxv2i32>;
|
defm GLD1W_D : sve_mem_64b_gld_vs_32_unscaled<0b1010, "ld1w", AArch64ld1_gather_sxtw, AArch64ld1_gather_uxtw, ZPR64ExtSXTW8, ZPR64ExtUXTW8, nxv2i32>;
|
||||||
defm GLDFF1W_D : sve_mem_64b_gld_vs_32_unscaled<0b1011, "ldff1w", null_frag, null_frag, ZPR64ExtSXTW8, ZPR64ExtUXTW8, nxv2i32>;
|
defm GLDFF1W_D : sve_mem_64b_gld_vs_32_unscaled<0b1011, "ldff1w", null_frag, null_frag, ZPR64ExtSXTW8, ZPR64ExtUXTW8, nxv2i32>;
|
||||||
|
@ -529,16 +537,16 @@ let Predicates = [HasSVE] in {
|
||||||
|
|
||||||
// Gathers using scaled 32-bit offsets unpacked in 64-bits elements, e.g.
|
// Gathers using scaled 32-bit offsets unpacked in 64-bits elements, e.g.
|
||||||
// ld1h z0.d, p0/z, [x0, z0.d, uxtw #1]
|
// ld1h z0.d, p0/z, [x0, z0.d, uxtw #1]
|
||||||
defm GLD1SH_D : sve_mem_64b_gld_sv_32_scaled<0b0100, "ld1sh", null_frag, null_frag, ZPR64ExtSXTW16, ZPR64ExtUXTW16, nxv2i16>;
|
defm GLD1SH_D : sve_mem_64b_gld_sv_32_scaled<0b0100, "ld1sh", AArch64ld1s_gather_sxtw_scaled, AArch64ld1s_gather_uxtw_scaled, ZPR64ExtSXTW16, ZPR64ExtUXTW16, nxv2i16>;
|
||||||
defm GLDFF1SH_D : sve_mem_64b_gld_sv_32_scaled<0b0101, "ldff1sh", null_frag, null_frag, ZPR64ExtSXTW16, ZPR64ExtUXTW16, nxv2i16>;
|
defm GLDFF1SH_D : sve_mem_64b_gld_sv_32_scaled<0b0101, "ldff1sh", null_frag, null_frag, ZPR64ExtSXTW16, ZPR64ExtUXTW16, nxv2i16>;
|
||||||
defm GLD1H_D : sve_mem_64b_gld_sv_32_scaled<0b0110, "ld1h", AArch64ld1_gather_sxtw_scaled, AArch64ld1_gather_uxtw_scaled, ZPR64ExtSXTW16, ZPR64ExtUXTW16, nxv2i16>;
|
defm GLD1H_D : sve_mem_64b_gld_sv_32_scaled<0b0110, "ld1h", AArch64ld1_gather_sxtw_scaled, AArch64ld1_gather_uxtw_scaled, ZPR64ExtSXTW16, ZPR64ExtUXTW16, nxv2i16>;
|
||||||
defm GLDFF1H_D : sve_mem_64b_gld_sv_32_scaled<0b0111, "ldff1h", null_frag, null_frag, ZPR64ExtSXTW16, ZPR64ExtUXTW16, nxv2i16>;
|
defm GLDFF1H_D : sve_mem_64b_gld_sv_32_scaled<0b0111, "ldff1h", null_frag, null_frag, ZPR64ExtSXTW16, ZPR64ExtUXTW16, nxv2i16>;
|
||||||
defm GLD1SW_D : sve_mem_64b_gld_sv_32_scaled<0b1000, "ld1sw", null_frag, null_frag, ZPR64ExtSXTW32, ZPR64ExtUXTW32, nxv2i32>;
|
defm GLD1SW_D : sve_mem_64b_gld_sv_32_scaled<0b1000, "ld1sw", AArch64ld1s_gather_sxtw_scaled, AArch64ld1s_gather_uxtw_scaled, ZPR64ExtSXTW32, ZPR64ExtUXTW32, nxv2i32>;
|
||||||
defm GLDFF1SW_D : sve_mem_64b_gld_sv_32_scaled<0b1001, "ldff1sw", null_frag, null_frag, ZPR64ExtSXTW32, ZPR64ExtUXTW32, nxv2i32>;
|
defm GLDFF1SW_D : sve_mem_64b_gld_sv_32_scaled<0b1001, "ldff1sw", null_frag, null_frag, ZPR64ExtSXTW32, ZPR64ExtUXTW32, nxv2i32>;
|
||||||
defm GLD1W_D : sve_mem_64b_gld_sv_32_scaled<0b1010, "ld1w", AArch64ld1_gather_sxtw_scaled, AArch64ld1_gather_uxtw_scaled, ZPR64ExtSXTW32, ZPR64ExtUXTW32, nxv2i32>;
|
defm GLD1W_D : sve_mem_64b_gld_sv_32_scaled<0b1010, "ld1w", AArch64ld1_gather_sxtw_scaled, AArch64ld1_gather_uxtw_scaled, ZPR64ExtSXTW32, ZPR64ExtUXTW32, nxv2i32>;
|
||||||
defm GLDFF1W_D : sve_mem_64b_gld_sv_32_scaled<0b1011, "ldff1w", null_frag, null_frag, ZPR64ExtSXTW32, ZPR64ExtUXTW32, nxv2i32>;
|
defm GLDFF1W_D : sve_mem_64b_gld_sv_32_scaled<0b1011, "ldff1w", null_frag, null_frag, ZPR64ExtSXTW32, ZPR64ExtUXTW32, nxv2i32>;
|
||||||
defm GLD1D : sve_mem_64b_gld_sv_32_scaled<0b1110, "ld1d", AArch64ld1_gather_sxtw_scaled, AArch64ld1_gather_uxtw_scaled, ZPR64ExtSXTW64, ZPR64ExtUXTW64, nxv2i64>;
|
defm GLD1D : sve_mem_64b_gld_sv_32_scaled<0b1110, "ld1d", AArch64ld1_gather_sxtw_scaled, AArch64ld1_gather_uxtw_scaled, ZPR64ExtSXTW64, ZPR64ExtUXTW64, nxv2i64>;
|
||||||
defm GLDFF1D : sve_mem_64b_gld_sv_32_scaled<0b1111, "ldff1d", null_frag, null_frag, ZPR64ExtSXTW64, ZPR64ExtUXTW64, nxv2i64>;
|
defm GLDFF1D : sve_mem_64b_gld_sv_32_scaled<0b1111, "ldff1d", null_frag, null_frag, ZPR64ExtSXTW64, ZPR64ExtUXTW64, nxv2i64>;
|
||||||
|
|
||||||
// Non-temporal contiguous loads (register + immediate)
|
// Non-temporal contiguous loads (register + immediate)
|
||||||
defm LDNT1B_ZRI : sve_mem_cldnt_si<0b00, "ldnt1b", Z_b, ZPR8>;
|
defm LDNT1B_ZRI : sve_mem_cldnt_si<0b00, "ldnt1b", Z_b, ZPR8>;
|
||||||
|
@ -1047,6 +1055,13 @@ let Predicates = [HasSVE] in {
|
||||||
def : InstAlias<"fcmlt $Zd, $Pg/z, $Zm, $Zn",
|
def : InstAlias<"fcmlt $Zd, $Pg/z, $Zm, $Zn",
|
||||||
(FCMGT_PPzZZ_D PPR64:$Zd, PPR3bAny:$Pg, ZPR64:$Zn, ZPR64:$Zm), 0>;
|
(FCMGT_PPzZZ_D PPR64:$Zd, PPR3bAny:$Pg, ZPR64:$Zn, ZPR64:$Zm), 0>;
|
||||||
|
|
||||||
|
def : Pat<(sext_inreg (nxv2i64 ZPR:$Zs), nxv2i32), (SXTW_ZPmZ_D (IMPLICIT_DEF), (PTRUE_D 31), ZPR:$Zs)>;
|
||||||
|
def : Pat<(sext_inreg (nxv2i64 ZPR:$Zs), nxv2i16), (SXTH_ZPmZ_D (IMPLICIT_DEF), (PTRUE_D 31), ZPR:$Zs)>;
|
||||||
|
def : Pat<(sext_inreg (nxv2i64 ZPR:$Zs), nxv2i8), (SXTB_ZPmZ_D (IMPLICIT_DEF), (PTRUE_D 31), ZPR:$Zs)>;
|
||||||
|
def : Pat<(sext_inreg (nxv4i32 ZPR:$Zs), nxv4i16), (SXTH_ZPmZ_S (IMPLICIT_DEF), (PTRUE_S 31), ZPR:$Zs)>;
|
||||||
|
def : Pat<(sext_inreg (nxv4i32 ZPR:$Zs), nxv4i8), (SXTB_ZPmZ_S (IMPLICIT_DEF), (PTRUE_S 31), ZPR:$Zs)>;
|
||||||
|
def : Pat<(sext_inreg (nxv8i16 ZPR:$Zs), nxv8i8), (SXTB_ZPmZ_H (IMPLICIT_DEF), (PTRUE_H 31), ZPR:$Zs)>;
|
||||||
|
|
||||||
def : Pat<(nxv16i8 (bitconvert (nxv8i16 ZPR:$src))), (nxv16i8 ZPR:$src)>;
|
def : Pat<(nxv16i8 (bitconvert (nxv8i16 ZPR:$src))), (nxv16i8 ZPR:$src)>;
|
||||||
def : Pat<(nxv16i8 (bitconvert (nxv4i32 ZPR:$src))), (nxv16i8 ZPR:$src)>;
|
def : Pat<(nxv16i8 (bitconvert (nxv4i32 ZPR:$src))), (nxv16i8 ZPR:$src)>;
|
||||||
def : Pat<(nxv16i8 (bitconvert (nxv2i64 ZPR:$src))), (nxv16i8 ZPR:$src)>;
|
def : Pat<(nxv16i8 (bitconvert (nxv2i64 ZPR:$src))), (nxv16i8 ZPR:$src)>;
|
||||||
|
|
|
@ -0,0 +1,76 @@
|
||||||
|
; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve < %s | FileCheck %s
|
||||||
|
|
||||||
|
; Verify that DAG combine rules for LD1 + sext/zext don't apply when the
|
||||||
|
; result of LD1 has multiple uses
|
||||||
|
|
||||||
|
define <vscale x 2 x i64> @no_dag_combine_zext_sext(<vscale x 2 x i1> %pg,
|
||||||
|
<vscale x 2 x i64> %base,
|
||||||
|
<vscale x 2 x i8>* %res_out,
|
||||||
|
<vscale x 2 x i1> %pred) {
|
||||||
|
; CHECK-LABEL: no_dag_combine_zext_sext
|
||||||
|
; CHECK: ld1b { z1.d }, p0/z, [z0.d, #16]
|
||||||
|
; CHECK-NEXT: mov w8, #255
|
||||||
|
; CHECK-NEXT: mov z0.d, x8
|
||||||
|
; CHECK-NEXT: and z0.d, z1.d, z0.d
|
||||||
|
; CHECK-NEXT: st1b { z1.d }, p1, [x0]
|
||||||
|
; CHECK-NEXT: ret
|
||||||
|
%load = call <vscale x 2 x i8> @llvm.aarch64.sve.ld1.gather.imm.nxv2i8.nxv2i64(<vscale x 2 x i1> %pg,
|
||||||
|
<vscale x 2 x i64> %base,
|
||||||
|
i64 16)
|
||||||
|
%res1 = zext <vscale x 2 x i8> %load to <vscale x 2 x i64>
|
||||||
|
%res2 = sext <vscale x 2 x i8> %load to <vscale x 2 x i64>
|
||||||
|
call void @llvm.masked.store.nxv2i8(<vscale x 2 x i8> %load,
|
||||||
|
<vscale x 2 x i8> *%res_out,
|
||||||
|
i32 8,
|
||||||
|
<vscale x 2 x i1> %pred)
|
||||||
|
|
||||||
|
ret <vscale x 2 x i64> %res1
|
||||||
|
}
|
||||||
|
|
||||||
|
define <vscale x 2 x i64> @no_dag_combine_sext(<vscale x 2 x i1> %pg,
|
||||||
|
<vscale x 2 x i64> %base,
|
||||||
|
<vscale x 2 x i8>* %res_out,
|
||||||
|
<vscale x 2 x i1> %pred) {
|
||||||
|
; CHECK-LABEL: no_dag_combine_sext
|
||||||
|
; CHECK: ld1b { z1.d }, p0/z, [z0.d, #16]
|
||||||
|
; CHECK-NEXT: ptrue p0.d
|
||||||
|
; CHECK-NEXT: sxtb z0.d, p0/m, z1.d
|
||||||
|
; CHECK-NEXT: st1b { z1.d }, p1, [x0]
|
||||||
|
; CHECK-NEXT: ret
|
||||||
|
%load = call <vscale x 2 x i8> @llvm.aarch64.sve.ld1.gather.imm.nxv2i8.nxv2i64(<vscale x 2 x i1> %pg,
|
||||||
|
<vscale x 2 x i64> %base,
|
||||||
|
i64 16)
|
||||||
|
%res = sext <vscale x 2 x i8> %load to <vscale x 2 x i64>
|
||||||
|
call void @llvm.masked.store.nxv2i8(<vscale x 2 x i8> %load,
|
||||||
|
<vscale x 2 x i8> *%res_out,
|
||||||
|
i32 8,
|
||||||
|
<vscale x 2 x i1> %pred)
|
||||||
|
|
||||||
|
ret <vscale x 2 x i64> %res
|
||||||
|
}
|
||||||
|
|
||||||
|
define <vscale x 2 x i64> @no_dag_combine_zext(<vscale x 2 x i1> %pg,
|
||||||
|
<vscale x 2 x i64> %base,
|
||||||
|
<vscale x 2 x i8>* %res_out,
|
||||||
|
<vscale x 2 x i1> %pred) {
|
||||||
|
; CHECK-LABEL: no_dag_combine_zext
|
||||||
|
; CHECK: ld1b { z1.d }, p0/z, [z0.d, #16]
|
||||||
|
; CHECK-NEXT: mov w8, #255
|
||||||
|
; CHECK-NEXT: mov z0.d, x8
|
||||||
|
; CHECK-NEXT: and z0.d, z1.d, z0.d
|
||||||
|
; CHECK-NEXT: st1b { z1.d }, p1, [x0]
|
||||||
|
; CHECK-NEXT: ret
|
||||||
|
%load = call <vscale x 2 x i8> @llvm.aarch64.sve.ld1.gather.imm.nxv2i8.nxv2i64(<vscale x 2 x i1> %pg,
|
||||||
|
<vscale x 2 x i64> %base,
|
||||||
|
i64 16)
|
||||||
|
%res = zext <vscale x 2 x i8> %load to <vscale x 2 x i64>
|
||||||
|
call void @llvm.masked.store.nxv2i8(<vscale x 2 x i8> %load,
|
||||||
|
<vscale x 2 x i8> *%res_out,
|
||||||
|
i32 8,
|
||||||
|
<vscale x 2 x i1> %pred)
|
||||||
|
|
||||||
|
ret <vscale x 2 x i64> %res
|
||||||
|
}
|
||||||
|
|
||||||
|
declare <vscale x 2 x i8> @llvm.aarch64.sve.ld1.gather.imm.nxv2i8.nxv2i64(<vscale x 2 x i1>, <vscale x 2 x i64>, i64)
|
||||||
|
declare void @llvm.masked.store.nxv2i8(<vscale x 2 x i8>, <vscale x 2 x i8>*, i32, <vscale x 2 x i1>)
|
|
@ -10,9 +10,6 @@
|
||||||
define <vscale x 4 x i32> @gld1h_s_uxtw_index(<vscale x 4 x i1> %pg, i16* %base, <vscale x 4 x i32> %b) {
|
define <vscale x 4 x i32> @gld1h_s_uxtw_index(<vscale x 4 x i1> %pg, i16* %base, <vscale x 4 x i32> %b) {
|
||||||
; CHECK-LABEL: gld1h_s_uxtw_index:
|
; CHECK-LABEL: gld1h_s_uxtw_index:
|
||||||
; CHECK: ld1h { z0.s }, p0/z, [x0, z0.s, uxtw #1]
|
; CHECK: ld1h { z0.s }, p0/z, [x0, z0.s, uxtw #1]
|
||||||
; CHECK-NEXT: mov w8, #65535
|
|
||||||
; CHECK-NEXT: mov z1.s, w8
|
|
||||||
; CHECK-NEXT: and z0.d, z0.d, z1.d
|
|
||||||
; CHECK-NEXT: ret
|
; CHECK-NEXT: ret
|
||||||
%load = call <vscale x 4 x i16> @llvm.aarch64.sve.ld1.gather.uxtw.index.nxv4i16.nxv4i32(<vscale x 4 x i1> %pg,
|
%load = call <vscale x 4 x i16> @llvm.aarch64.sve.ld1.gather.uxtw.index.nxv4i16.nxv4i32(<vscale x 4 x i1> %pg,
|
||||||
i16* %base,
|
i16* %base,
|
||||||
|
@ -24,9 +21,6 @@ define <vscale x 4 x i32> @gld1h_s_uxtw_index(<vscale x 4 x i1> %pg, i16* %base,
|
||||||
define <vscale x 4 x i32> @gld1h_s_sxtw_index(<vscale x 4 x i1> %pg, i16* %base, <vscale x 4 x i32> %b) {
|
define <vscale x 4 x i32> @gld1h_s_sxtw_index(<vscale x 4 x i1> %pg, i16* %base, <vscale x 4 x i32> %b) {
|
||||||
; CHECK-LABEL: gld1h_s_sxtw_index:
|
; CHECK-LABEL: gld1h_s_sxtw_index:
|
||||||
; CHECK: ld1h { z0.s }, p0/z, [x0, z0.s, sxtw #1]
|
; CHECK: ld1h { z0.s }, p0/z, [x0, z0.s, sxtw #1]
|
||||||
; CHECK-NEXT: mov w8, #65535
|
|
||||||
; CHECK-NEXT: mov z1.s, w8
|
|
||||||
; CHECK-NEXT: and z0.d, z0.d, z1.d
|
|
||||||
; CHECK-NEXT: ret
|
; CHECK-NEXT: ret
|
||||||
%load = call <vscale x 4 x i16> @llvm.aarch64.sve.ld1.gather.sxtw.index.nxv4i16.nxv4i32(<vscale x 4 x i1> %pg,
|
%load = call <vscale x 4 x i16> @llvm.aarch64.sve.ld1.gather.sxtw.index.nxv4i16.nxv4i32(<vscale x 4 x i1> %pg,
|
||||||
i16* %base,
|
i16* %base,
|
||||||
|
@ -38,9 +32,6 @@ define <vscale x 4 x i32> @gld1h_s_sxtw_index(<vscale x 4 x i1> %pg, i16* %base,
|
||||||
define <vscale x 2 x i64> @gld1h_d_uxtw_index(<vscale x 2 x i1> %pg, i16* %base, <vscale x 2 x i64> %b) {
|
define <vscale x 2 x i64> @gld1h_d_uxtw_index(<vscale x 2 x i1> %pg, i16* %base, <vscale x 2 x i64> %b) {
|
||||||
; CHECK-LABEL: gld1h_d_uxtw_index:
|
; CHECK-LABEL: gld1h_d_uxtw_index:
|
||||||
; CHECK: ld1h { z0.d }, p0/z, [x0, z0.d, uxtw #1]
|
; CHECK: ld1h { z0.d }, p0/z, [x0, z0.d, uxtw #1]
|
||||||
; CHECK-NEXT: mov w8, #65535
|
|
||||||
; CHECK-NEXT: mov z1.d, x8
|
|
||||||
; CHECK-NEXT: and z0.d, z0.d, z1.d
|
|
||||||
; CHECK-NEXT: ret
|
; CHECK-NEXT: ret
|
||||||
%load = call <vscale x 2 x i16> @llvm.aarch64.sve.ld1.gather.uxtw.index.nxv2i16.nxv2i64(<vscale x 2 x i1> %pg,
|
%load = call <vscale x 2 x i16> @llvm.aarch64.sve.ld1.gather.uxtw.index.nxv2i16.nxv2i64(<vscale x 2 x i1> %pg,
|
||||||
i16* %base,
|
i16* %base,
|
||||||
|
@ -52,9 +43,6 @@ define <vscale x 2 x i64> @gld1h_d_uxtw_index(<vscale x 2 x i1> %pg, i16* %base,
|
||||||
define <vscale x 2 x i64> @gld1h_d_sxtw_index(<vscale x 2 x i1> %pg, i16* %base, <vscale x 2 x i64> %b) {
|
define <vscale x 2 x i64> @gld1h_d_sxtw_index(<vscale x 2 x i1> %pg, i16* %base, <vscale x 2 x i64> %b) {
|
||||||
; CHECK-LABEL: gld1h_d_sxtw_index:
|
; CHECK-LABEL: gld1h_d_sxtw_index:
|
||||||
; CHECK: ld1h { z0.d }, p0/z, [x0, z0.d, sxtw #1]
|
; CHECK: ld1h { z0.d }, p0/z, [x0, z0.d, sxtw #1]
|
||||||
; CHECK-NEXT: mov w8, #65535
|
|
||||||
; CHECK-NEXT: mov z1.d, x8
|
|
||||||
; CHECK-NEXT: and z0.d, z0.d, z1.d
|
|
||||||
; CHECK-NEXT: ret
|
; CHECK-NEXT: ret
|
||||||
%load = call <vscale x 2 x i16> @llvm.aarch64.sve.ld1.gather.sxtw.index.nxv2i16.nxv2i64(<vscale x 2 x i1> %pg,
|
%load = call <vscale x 2 x i16> @llvm.aarch64.sve.ld1.gather.sxtw.index.nxv2i16.nxv2i64(<vscale x 2 x i1> %pg,
|
||||||
i16* %base,
|
i16* %base,
|
||||||
|
@ -87,9 +75,6 @@ define <vscale x 4 x i32> @gld1w_s_sxtw_index(<vscale x 4 x i1> %pg, i32* %base,
|
||||||
define <vscale x 2 x i64> @gld1w_d_uxtw_index(<vscale x 2 x i1> %pg, i32* %base, <vscale x 2 x i64> %b) {
|
define <vscale x 2 x i64> @gld1w_d_uxtw_index(<vscale x 2 x i1> %pg, i32* %base, <vscale x 2 x i64> %b) {
|
||||||
; CHECK-LABEL: gld1w_d_uxtw_index:
|
; CHECK-LABEL: gld1w_d_uxtw_index:
|
||||||
; CHECK: ld1w { z0.d }, p0/z, [x0, z0.d, uxtw #2]
|
; CHECK: ld1w { z0.d }, p0/z, [x0, z0.d, uxtw #2]
|
||||||
; CHECK-NEXT: mov w8, #-1
|
|
||||||
; CHECK-NEXT: mov z1.d, x8
|
|
||||||
; CHECK-NEXT: and z0.d, z0.d, z1.d
|
|
||||||
; CHECK-NEXT: ret
|
; CHECK-NEXT: ret
|
||||||
%load = call <vscale x 2 x i32> @llvm.aarch64.sve.ld1.gather.uxtw.index.nxv2i32.nxv2i64(<vscale x 2 x i1> %pg,
|
%load = call <vscale x 2 x i32> @llvm.aarch64.sve.ld1.gather.uxtw.index.nxv2i32.nxv2i64(<vscale x 2 x i1> %pg,
|
||||||
i32* %base,
|
i32* %base,
|
||||||
|
@ -101,9 +86,6 @@ define <vscale x 2 x i64> @gld1w_d_uxtw_index(<vscale x 2 x i1> %pg, i32* %base,
|
||||||
define <vscale x 2 x i64> @gld1w_d_sxtw_index(<vscale x 2 x i1> %pg, i32* %base, <vscale x 2 x i64> %b) {
|
define <vscale x 2 x i64> @gld1w_d_sxtw_index(<vscale x 2 x i1> %pg, i32* %base, <vscale x 2 x i64> %b) {
|
||||||
; CHECK-LABEL: gld1w_d_sxtw_index:
|
; CHECK-LABEL: gld1w_d_sxtw_index:
|
||||||
; CHECK: ld1w { z0.d }, p0/z, [x0, z0.d, sxtw #2]
|
; CHECK: ld1w { z0.d }, p0/z, [x0, z0.d, sxtw #2]
|
||||||
; CHECK-NEXT: mov w8, #-1
|
|
||||||
; CHECK-NEXT: mov z1.d, x8
|
|
||||||
; CHECK-NEXT: and z0.d, z0.d, z1.d
|
|
||||||
; CHECK-NEXT: ret
|
; CHECK-NEXT: ret
|
||||||
%load = call <vscale x 2 x i32> @llvm.aarch64.sve.ld1.gather.sxtw.index.nxv2i32.nxv2i64(<vscale x 2 x i1> %pg,
|
%load = call <vscale x 2 x i32> @llvm.aarch64.sve.ld1.gather.sxtw.index.nxv2i32.nxv2i64(<vscale x 2 x i1> %pg,
|
||||||
i32* %base,
|
i32* %base,
|
||||||
|
@ -173,14 +155,89 @@ define <vscale x 2 x double> @gld1d_sxtw_index_double(<vscale x 2 x i1> %pg, dou
|
||||||
ret <vscale x 2 x double> %load
|
ret <vscale x 2 x double> %load
|
||||||
}
|
}
|
||||||
|
|
||||||
; LD1H
|
;
|
||||||
|
; LD1SH, LD1SW, LD1SD: base + 32-bit scaled offset, sign (sxtw) or zero (uxtw)
|
||||||
|
; extended to 64 bits
|
||||||
|
; e.g. ld1sh z0.d, p0/z, [x0, z0.d, uxtw #1]
|
||||||
|
;
|
||||||
|
|
||||||
|
; LD1SH
|
||||||
|
define <vscale x 4 x i32> @gld1sh_s_uxtw_index(<vscale x 4 x i1> %pg, i16* %base, <vscale x 4 x i32> %b) {
|
||||||
|
; CHECK-LABEL: gld1sh_s_uxtw_index:
|
||||||
|
; CHECK: ld1sh { z0.s }, p0/z, [x0, z0.s, uxtw #1]
|
||||||
|
; CHECK-NEXT: ret
|
||||||
|
%load = call <vscale x 4 x i16> @llvm.aarch64.sve.ld1.gather.uxtw.index.nxv4i16.nxv4i32(<vscale x 4 x i1> %pg,
|
||||||
|
i16* %base,
|
||||||
|
<vscale x 4 x i32> %b)
|
||||||
|
%res = sext <vscale x 4 x i16> %load to <vscale x 4 x i32>
|
||||||
|
ret <vscale x 4 x i32> %res
|
||||||
|
}
|
||||||
|
|
||||||
|
define <vscale x 4 x i32> @gld1sh_s_sxtw_index(<vscale x 4 x i1> %pg, i16* %base, <vscale x 4 x i32> %b) {
|
||||||
|
; CHECK-LABEL: gld1sh_s_sxtw_index:
|
||||||
|
; CHECK: ld1sh { z0.s }, p0/z, [x0, z0.s, sxtw #1]
|
||||||
|
; CHECK-NEXT: ret
|
||||||
|
%load = call <vscale x 4 x i16> @llvm.aarch64.sve.ld1.gather.sxtw.index.nxv4i16.nxv4i32(<vscale x 4 x i1> %pg,
|
||||||
|
i16* %base,
|
||||||
|
<vscale x 4 x i32> %b)
|
||||||
|
%res = sext <vscale x 4 x i16> %load to <vscale x 4 x i32>
|
||||||
|
ret <vscale x 4 x i32> %res
|
||||||
|
}
|
||||||
|
|
||||||
|
define <vscale x 2 x i64> @gld1sh_d_uxtw_index(<vscale x 2 x i1> %pg, i16* %base, <vscale x 2 x i64> %b) {
|
||||||
|
; CHECK-LABEL: gld1sh_d_uxtw_index:
|
||||||
|
; CHECK: ld1sh { z0.d }, p0/z, [x0, z0.d, uxtw #1]
|
||||||
|
; CHECK-NEXT: ret
|
||||||
|
%load = call <vscale x 2 x i16> @llvm.aarch64.sve.ld1.gather.uxtw.index.nxv2i16.nxv2i64(<vscale x 2 x i1> %pg,
|
||||||
|
i16* %base,
|
||||||
|
<vscale x 2 x i64> %b)
|
||||||
|
%res = sext <vscale x 2 x i16> %load to <vscale x 2 x i64>
|
||||||
|
ret <vscale x 2 x i64> %res
|
||||||
|
}
|
||||||
|
|
||||||
|
define <vscale x 2 x i64> @gld1sh_d_sxtw_index(<vscale x 2 x i1> %pg, i16* %base, <vscale x 2 x i64> %b) {
|
||||||
|
; CHECK-LABEL: gld1sh_d_sxtw_index:
|
||||||
|
; CHECK: ld1sh { z0.d }, p0/z, [x0, z0.d, sxtw #1]
|
||||||
|
; CHECK-NEXT: ret
|
||||||
|
%load = call <vscale x 2 x i16> @llvm.aarch64.sve.ld1.gather.sxtw.index.nxv2i16.nxv2i64(<vscale x 2 x i1> %pg,
|
||||||
|
i16* %base,
|
||||||
|
<vscale x 2 x i64> %b)
|
||||||
|
%res = sext <vscale x 2 x i16> %load to <vscale x 2 x i64>
|
||||||
|
ret <vscale x 2 x i64> %res
|
||||||
|
}
|
||||||
|
|
||||||
|
; LD1SW
|
||||||
|
define <vscale x 2 x i64> @gld1sw_d_uxtw_index(<vscale x 2 x i1> %pg, i32* %base, <vscale x 2 x i64> %b) {
|
||||||
|
; CHECK-LABEL: gld1sw_d_uxtw_index:
|
||||||
|
; CHECK: ld1sw { z0.d }, p0/z, [x0, z0.d, uxtw #2]
|
||||||
|
; CHECK-NEXT: ret
|
||||||
|
%load = call <vscale x 2 x i32> @llvm.aarch64.sve.ld1.gather.uxtw.index.nxv2i32.nxv2i64(<vscale x 2 x i1> %pg,
|
||||||
|
i32* %base,
|
||||||
|
<vscale x 2 x i64> %b)
|
||||||
|
%res = sext <vscale x 2 x i32> %load to <vscale x 2 x i64>
|
||||||
|
ret <vscale x 2 x i64> %res
|
||||||
|
}
|
||||||
|
|
||||||
|
define <vscale x 2 x i64> @gld1sw_d_sxtw_index(<vscale x 2 x i1> %pg, i32* %base, <vscale x 2 x i64> %b) {
|
||||||
|
; CHECK-LABEL: gld1sw_d_sxtw_index:
|
||||||
|
; CHECK: ld1sw { z0.d }, p0/z, [x0, z0.d, sxtw #2]
|
||||||
|
; CHECK-NEXT: ret
|
||||||
|
%load = call <vscale x 2 x i32> @llvm.aarch64.sve.ld1.gather.sxtw.index.nxv2i32.nxv2i64(<vscale x 2 x i1> %pg,
|
||||||
|
i32* %base,
|
||||||
|
<vscale x 2 x i64> %b)
|
||||||
|
%res = sext <vscale x 2 x i32> %load to <vscale x 2 x i64>
|
||||||
|
ret <vscale x 2 x i64> %res
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
; LD1H/LD1SH
|
||||||
declare <vscale x 4 x i16> @llvm.aarch64.sve.ld1.gather.uxtw.index.nxv4i16.nxv4i32(<vscale x 4 x i1>, i16*, <vscale x 4 x i32>)
|
declare <vscale x 4 x i16> @llvm.aarch64.sve.ld1.gather.uxtw.index.nxv4i16.nxv4i32(<vscale x 4 x i1>, i16*, <vscale x 4 x i32>)
|
||||||
declare <vscale x 4 x i16> @llvm.aarch64.sve.ld1.gather.sxtw.index.nxv4i16.nxv4i32(<vscale x 4 x i1>, i16*, <vscale x 4 x i32>)
|
declare <vscale x 4 x i16> @llvm.aarch64.sve.ld1.gather.sxtw.index.nxv4i16.nxv4i32(<vscale x 4 x i1>, i16*, <vscale x 4 x i32>)
|
||||||
|
|
||||||
declare <vscale x 2 x i16> @llvm.aarch64.sve.ld1.gather.uxtw.index.nxv2i16.nxv2i64(<vscale x 2 x i1>, i16*, <vscale x 2 x i64>)
|
declare <vscale x 2 x i16> @llvm.aarch64.sve.ld1.gather.uxtw.index.nxv2i16.nxv2i64(<vscale x 2 x i1>, i16*, <vscale x 2 x i64>)
|
||||||
declare <vscale x 2 x i16> @llvm.aarch64.sve.ld1.gather.sxtw.index.nxv2i16.nxv2i64(<vscale x 2 x i1>, i16*, <vscale x 2 x i64>)
|
declare <vscale x 2 x i16> @llvm.aarch64.sve.ld1.gather.sxtw.index.nxv2i16.nxv2i64(<vscale x 2 x i1>, i16*, <vscale x 2 x i64>)
|
||||||
|
|
||||||
; LD1W
|
; LD1W/LD1SW
|
||||||
declare <vscale x 4 x i32> @llvm.aarch64.sve.ld1.gather.uxtw.index.nxv4i32.nxv4i32(<vscale x 4 x i1>, i32*, <vscale x 4 x i32>)
|
declare <vscale x 4 x i32> @llvm.aarch64.sve.ld1.gather.uxtw.index.nxv4i32.nxv4i32(<vscale x 4 x i1>, i32*, <vscale x 4 x i32>)
|
||||||
declare <vscale x 4 x i32> @llvm.aarch64.sve.ld1.gather.sxtw.index.nxv4i32.nxv4i32(<vscale x 4 x i1>, i32*, <vscale x 4 x i32>)
|
declare <vscale x 4 x i32> @llvm.aarch64.sve.ld1.gather.sxtw.index.nxv4i32.nxv4i32(<vscale x 4 x i1>, i32*, <vscale x 4 x i32>)
|
||||||
|
|
||||||
|
|
|
@ -10,9 +10,6 @@
|
||||||
define <vscale x 4 x i32> @gld1b_s_uxtw(<vscale x 4 x i1> %pg, i8* %base, <vscale x 4 x i32> %b) {
|
define <vscale x 4 x i32> @gld1b_s_uxtw(<vscale x 4 x i1> %pg, i8* %base, <vscale x 4 x i32> %b) {
|
||||||
; CHECK-LABEL: gld1b_s_uxtw:
|
; CHECK-LABEL: gld1b_s_uxtw:
|
||||||
; CHECK: ld1b { z0.s }, p0/z, [x0, z0.s, uxtw]
|
; CHECK: ld1b { z0.s }, p0/z, [x0, z0.s, uxtw]
|
||||||
; CHECK-NEXT: mov w8, #255
|
|
||||||
; CHECK-NEXT: mov z1.s, w8
|
|
||||||
; CHECK-NEXT: and z0.d, z0.d, z1.d
|
|
||||||
; CHECK-NEXT: ret
|
; CHECK-NEXT: ret
|
||||||
%load = call <vscale x 4 x i8> @llvm.aarch64.sve.ld1.gather.uxtw.nxv4i8.nxv4i32(<vscale x 4 x i1> %pg,
|
%load = call <vscale x 4 x i8> @llvm.aarch64.sve.ld1.gather.uxtw.nxv4i8.nxv4i32(<vscale x 4 x i1> %pg,
|
||||||
i8* %base,
|
i8* %base,
|
||||||
|
@ -24,9 +21,6 @@ define <vscale x 4 x i32> @gld1b_s_uxtw(<vscale x 4 x i1> %pg, i8* %base, <vscal
|
||||||
define <vscale x 4 x i32> @gld1b_s_sxtw(<vscale x 4 x i1> %pg, i8* %base, <vscale x 4 x i32> %b) {
|
define <vscale x 4 x i32> @gld1b_s_sxtw(<vscale x 4 x i1> %pg, i8* %base, <vscale x 4 x i32> %b) {
|
||||||
; CHECK-LABEL: gld1b_s_sxtw:
|
; CHECK-LABEL: gld1b_s_sxtw:
|
||||||
; CHECK: ld1b { z0.s }, p0/z, [x0, z0.s, sxtw]
|
; CHECK: ld1b { z0.s }, p0/z, [x0, z0.s, sxtw]
|
||||||
; CHECK-NEXT: mov w8, #255
|
|
||||||
; CHECK-NEXT: mov z1.s, w8
|
|
||||||
; CHECK-NEXT: and z0.d, z0.d, z1.d
|
|
||||||
; CHECK-NEXT: ret
|
; CHECK-NEXT: ret
|
||||||
%load = call <vscale x 4 x i8> @llvm.aarch64.sve.ld1.gather.sxtw.nxv4i8.nxv4i32(<vscale x 4 x i1> %pg,
|
%load = call <vscale x 4 x i8> @llvm.aarch64.sve.ld1.gather.sxtw.nxv4i8.nxv4i32(<vscale x 4 x i1> %pg,
|
||||||
i8* %base,
|
i8* %base,
|
||||||
|
@ -38,9 +32,6 @@ define <vscale x 4 x i32> @gld1b_s_sxtw(<vscale x 4 x i1> %pg, i8* %base, <vscal
|
||||||
define <vscale x 2 x i64> @gld1b_d_uxtw(<vscale x 2 x i1> %pg, i8* %base, <vscale x 2 x i64> %b) {
|
define <vscale x 2 x i64> @gld1b_d_uxtw(<vscale x 2 x i1> %pg, i8* %base, <vscale x 2 x i64> %b) {
|
||||||
; CHECK-LABEL: gld1b_d_uxtw:
|
; CHECK-LABEL: gld1b_d_uxtw:
|
||||||
; CHECK: ld1b { z0.d }, p0/z, [x0, z0.d, uxtw]
|
; CHECK: ld1b { z0.d }, p0/z, [x0, z0.d, uxtw]
|
||||||
; CHECK-NEXT: mov w8, #255
|
|
||||||
; CHECK-NEXT: mov z1.d, x8
|
|
||||||
; CHECK-NEXT: and z0.d, z0.d, z1.d
|
|
||||||
; CHECK-NEXT: ret
|
; CHECK-NEXT: ret
|
||||||
%load = call <vscale x 2 x i8> @llvm.aarch64.sve.ld1.gather.uxtw.nxv2i8.nxv2i64(<vscale x 2 x i1> %pg,
|
%load = call <vscale x 2 x i8> @llvm.aarch64.sve.ld1.gather.uxtw.nxv2i8.nxv2i64(<vscale x 2 x i1> %pg,
|
||||||
i8* %base,
|
i8* %base,
|
||||||
|
@ -52,9 +43,6 @@ define <vscale x 2 x i64> @gld1b_d_uxtw(<vscale x 2 x i1> %pg, i8* %base, <vscal
|
||||||
define <vscale x 2 x i64> @gld1b_d_sxtw(<vscale x 2 x i1> %pg, i8* %base, <vscale x 2 x i64> %b) {
|
define <vscale x 2 x i64> @gld1b_d_sxtw(<vscale x 2 x i1> %pg, i8* %base, <vscale x 2 x i64> %b) {
|
||||||
; CHECK-LABEL: gld1b_d_sxtw:
|
; CHECK-LABEL: gld1b_d_sxtw:
|
||||||
; CHECK: ld1b { z0.d }, p0/z, [x0, z0.d, sxtw]
|
; CHECK: ld1b { z0.d }, p0/z, [x0, z0.d, sxtw]
|
||||||
; CHECK-NEXT: mov w8, #255
|
|
||||||
; CHECK-NEXT: mov z1.d, x8
|
|
||||||
; CHECK-NEXT: and z0.d, z0.d, z1.d
|
|
||||||
; CHECK-NEXT: ret
|
; CHECK-NEXT: ret
|
||||||
%load = call <vscale x 2 x i8> @llvm.aarch64.sve.ld1.gather.sxtw.nxv2i8.nxv2i64(<vscale x 2 x i1> %pg,
|
%load = call <vscale x 2 x i8> @llvm.aarch64.sve.ld1.gather.sxtw.nxv2i8.nxv2i64(<vscale x 2 x i1> %pg,
|
||||||
i8* %base,
|
i8* %base,
|
||||||
|
@ -67,9 +55,6 @@ define <vscale x 2 x i64> @gld1b_d_sxtw(<vscale x 2 x i1> %pg, i8* %base, <vscal
|
||||||
define <vscale x 4 x i32> @gld1h_s_uxtw(<vscale x 4 x i1> %pg, i16* %base, <vscale x 4 x i32> %b) {
|
define <vscale x 4 x i32> @gld1h_s_uxtw(<vscale x 4 x i1> %pg, i16* %base, <vscale x 4 x i32> %b) {
|
||||||
; CHECK-LABEL: gld1h_s_uxtw:
|
; CHECK-LABEL: gld1h_s_uxtw:
|
||||||
; CHECK: ld1h { z0.s }, p0/z, [x0, z0.s, uxtw]
|
; CHECK: ld1h { z0.s }, p0/z, [x0, z0.s, uxtw]
|
||||||
; CHECK-NEXT: mov w8, #65535
|
|
||||||
; CHECK-NEXT: mov z1.s, w8
|
|
||||||
; CHECK-NEXT: and z0.d, z0.d, z1.d
|
|
||||||
; CHECK-NEXT: ret
|
; CHECK-NEXT: ret
|
||||||
%load = call <vscale x 4 x i16> @llvm.aarch64.sve.ld1.gather.uxtw.nxv4i16.nxv4i32(<vscale x 4 x i1> %pg,
|
%load = call <vscale x 4 x i16> @llvm.aarch64.sve.ld1.gather.uxtw.nxv4i16.nxv4i32(<vscale x 4 x i1> %pg,
|
||||||
i16* %base,
|
i16* %base,
|
||||||
|
@ -81,9 +66,6 @@ define <vscale x 4 x i32> @gld1h_s_uxtw(<vscale x 4 x i1> %pg, i16* %base, <vsca
|
||||||
define <vscale x 4 x i32> @gld1h_s_sxtw(<vscale x 4 x i1> %pg, i16* %base, <vscale x 4 x i32> %b) {
|
define <vscale x 4 x i32> @gld1h_s_sxtw(<vscale x 4 x i1> %pg, i16* %base, <vscale x 4 x i32> %b) {
|
||||||
; CHECK-LABEL: gld1h_s_sxtw:
|
; CHECK-LABEL: gld1h_s_sxtw:
|
||||||
; CHECK: ld1h { z0.s }, p0/z, [x0, z0.s, sxtw]
|
; CHECK: ld1h { z0.s }, p0/z, [x0, z0.s, sxtw]
|
||||||
; CHECK-NEXT: mov w8, #65535
|
|
||||||
; CHECK-NEXT: mov z1.s, w8
|
|
||||||
; CHECK-NEXT: and z0.d, z0.d, z1.d
|
|
||||||
; CHECK-NEXT: ret
|
; CHECK-NEXT: ret
|
||||||
%load = call <vscale x 4 x i16> @llvm.aarch64.sve.ld1.gather.sxtw.nxv4i16.nxv4i32(<vscale x 4 x i1> %pg,
|
%load = call <vscale x 4 x i16> @llvm.aarch64.sve.ld1.gather.sxtw.nxv4i16.nxv4i32(<vscale x 4 x i1> %pg,
|
||||||
i16* %base,
|
i16* %base,
|
||||||
|
@ -95,9 +77,6 @@ define <vscale x 4 x i32> @gld1h_s_sxtw(<vscale x 4 x i1> %pg, i16* %base, <vsca
|
||||||
define <vscale x 2 x i64> @gld1h_d_uxtw(<vscale x 2 x i1> %pg, i16* %base, <vscale x 2 x i64> %b) {
|
define <vscale x 2 x i64> @gld1h_d_uxtw(<vscale x 2 x i1> %pg, i16* %base, <vscale x 2 x i64> %b) {
|
||||||
; CHECK-LABEL: gld1h_d_uxtw:
|
; CHECK-LABEL: gld1h_d_uxtw:
|
||||||
; CHECK: ld1h { z0.d }, p0/z, [x0, z0.d, uxtw]
|
; CHECK: ld1h { z0.d }, p0/z, [x0, z0.d, uxtw]
|
||||||
; CHECK-NEXT: mov w8, #65535
|
|
||||||
; CHECK-NEXT: mov z1.d, x8
|
|
||||||
; CHECK-NEXT: and z0.d, z0.d, z1.d
|
|
||||||
; CHECK-NEXT: ret
|
; CHECK-NEXT: ret
|
||||||
%load = call <vscale x 2 x i16> @llvm.aarch64.sve.ld1.gather.uxtw.nxv2i16.nxv2i64(<vscale x 2 x i1> %pg,
|
%load = call <vscale x 2 x i16> @llvm.aarch64.sve.ld1.gather.uxtw.nxv2i16.nxv2i64(<vscale x 2 x i1> %pg,
|
||||||
i16* %base,
|
i16* %base,
|
||||||
|
@ -109,9 +88,6 @@ define <vscale x 2 x i64> @gld1h_d_uxtw(<vscale x 2 x i1> %pg, i16* %base, <vsca
|
||||||
define <vscale x 2 x i64> @gld1h_d_sxtw(<vscale x 2 x i1> %pg, i16* %base, <vscale x 2 x i64> %b) {
|
define <vscale x 2 x i64> @gld1h_d_sxtw(<vscale x 2 x i1> %pg, i16* %base, <vscale x 2 x i64> %b) {
|
||||||
; CHECK-LABEL: gld1h_d_sxtw:
|
; CHECK-LABEL: gld1h_d_sxtw:
|
||||||
; CHECK: ld1h { z0.d }, p0/z, [x0, z0.d, sxtw]
|
; CHECK: ld1h { z0.d }, p0/z, [x0, z0.d, sxtw]
|
||||||
; CHECK-NEXT: mov w8, #65535
|
|
||||||
; CHECK-NEXT: mov z1.d, x8
|
|
||||||
; CHECK-NEXT: and z0.d, z0.d, z1.d
|
|
||||||
; CHECK-NEXT: ret
|
; CHECK-NEXT: ret
|
||||||
%load = call <vscale x 2 x i16> @llvm.aarch64.sve.ld1.gather.sxtw.nxv2i16.nxv2i64(<vscale x 2 x i1> %pg,
|
%load = call <vscale x 2 x i16> @llvm.aarch64.sve.ld1.gather.sxtw.nxv2i16.nxv2i64(<vscale x 2 x i1> %pg,
|
||||||
i16* %base,
|
i16* %base,
|
||||||
|
@ -144,9 +120,6 @@ define <vscale x 4 x i32> @gld1w_s_sxtw(<vscale x 4 x i1> %pg, i32* %base, <vsca
|
||||||
define <vscale x 2 x i64> @gld1w_d_uxtw(<vscale x 2 x i1> %pg, i32* %base, <vscale x 2 x i64> %b) {
|
define <vscale x 2 x i64> @gld1w_d_uxtw(<vscale x 2 x i1> %pg, i32* %base, <vscale x 2 x i64> %b) {
|
||||||
; CHECK-LABEL: gld1w_d_uxtw:
|
; CHECK-LABEL: gld1w_d_uxtw:
|
||||||
; CHECK: ld1w { z0.d }, p0/z, [x0, z0.d, uxtw]
|
; CHECK: ld1w { z0.d }, p0/z, [x0, z0.d, uxtw]
|
||||||
; CHECK-NEXT: mov w8, #-1
|
|
||||||
; CHECK-NEXT: mov z1.d, x8
|
|
||||||
; CHECK-NEXT: and z0.d, z0.d, z1.d
|
|
||||||
; CHECK-NEXT: ret
|
; CHECK-NEXT: ret
|
||||||
%load = call <vscale x 2 x i32> @llvm.aarch64.sve.ld1.gather.uxtw.nxv2i32.nxv2i64(<vscale x 2 x i1> %pg,
|
%load = call <vscale x 2 x i32> @llvm.aarch64.sve.ld1.gather.uxtw.nxv2i32.nxv2i64(<vscale x 2 x i1> %pg,
|
||||||
i32* %base,
|
i32* %base,
|
||||||
|
@ -158,9 +131,6 @@ define <vscale x 2 x i64> @gld1w_d_uxtw(<vscale x 2 x i1> %pg, i32* %base, <vsca
|
||||||
define <vscale x 2 x i64> @gld1w_d_sxtw(<vscale x 2 x i1> %pg, i32* %base, <vscale x 2 x i64> %b) {
|
define <vscale x 2 x i64> @gld1w_d_sxtw(<vscale x 2 x i1> %pg, i32* %base, <vscale x 2 x i64> %b) {
|
||||||
; CHECK-LABEL: gld1w_d_sxtw:
|
; CHECK-LABEL: gld1w_d_sxtw:
|
||||||
; CHECK: ld1w { z0.d }, p0/z, [x0, z0.d, sxtw]
|
; CHECK: ld1w { z0.d }, p0/z, [x0, z0.d, sxtw]
|
||||||
; CHECK-NEXT: mov w8, #-1
|
|
||||||
; CHECK-NEXT: mov z1.d, x8
|
|
||||||
; CHECK-NEXT: and z0.d, z0.d, z1.d
|
|
||||||
; CHECK-NEXT: ret
|
; CHECK-NEXT: ret
|
||||||
%load = call <vscale x 2 x i32> @llvm.aarch64.sve.ld1.gather.sxtw.nxv2i32.nxv2i64(<vscale x 2 x i1> %pg,
|
%load = call <vscale x 2 x i32> @llvm.aarch64.sve.ld1.gather.sxtw.nxv2i32.nxv2i64(<vscale x 2 x i1> %pg,
|
||||||
i32* %base,
|
i32* %base,
|
||||||
|
@ -230,19 +200,138 @@ define <vscale x 2 x double> @gld1d_d_sxtw_double(<vscale x 2 x i1> %pg, double*
|
||||||
ret <vscale x 2 x double> %load
|
ret <vscale x 2 x double> %load
|
||||||
}
|
}
|
||||||
|
|
||||||
; LD1B
|
;
|
||||||
|
; LD1SB, LD1SW, LD1SH: base + 32-bit unscaled offset, sign (sxtw) or zero
|
||||||
|
; (uxtw) extended to 64 bits.
|
||||||
|
; e.g. ld1sh { z0.d }, p0/z, [x0, z0.d, uxtw]
|
||||||
|
;
|
||||||
|
|
||||||
|
; LD1SB
|
||||||
|
define <vscale x 4 x i32> @gld1sb_s_uxtw(<vscale x 4 x i1> %pg, i8* %base, <vscale x 4 x i32> %b) {
|
||||||
|
; CHECK-LABEL: gld1sb_s_uxtw:
|
||||||
|
; CHECK: ld1sb { z0.s }, p0/z, [x0, z0.s, uxtw]
|
||||||
|
; CHECK-NEXT: ret
|
||||||
|
%load = call <vscale x 4 x i8> @llvm.aarch64.sve.ld1.gather.uxtw.nxv4i8.nxv4i32(<vscale x 4 x i1> %pg,
|
||||||
|
i8* %base,
|
||||||
|
<vscale x 4 x i32> %b)
|
||||||
|
%res = sext <vscale x 4 x i8> %load to <vscale x 4 x i32>
|
||||||
|
ret <vscale x 4 x i32> %res
|
||||||
|
}
|
||||||
|
|
||||||
|
define <vscale x 4 x i32> @gld1sb_s_sxtw(<vscale x 4 x i1> %pg, i8* %base, <vscale x 4 x i32> %b) {
|
||||||
|
; CHECK-LABEL: gld1sb_s_sxtw:
|
||||||
|
; CHECK: ld1sb { z0.s }, p0/z, [x0, z0.s, sxtw]
|
||||||
|
; CHECK-NEXT: ret
|
||||||
|
%load = call <vscale x 4 x i8> @llvm.aarch64.sve.ld1.gather.sxtw.nxv4i8.nxv4i32(<vscale x 4 x i1> %pg,
|
||||||
|
i8* %base,
|
||||||
|
<vscale x 4 x i32> %b)
|
||||||
|
%res = sext <vscale x 4 x i8> %load to <vscale x 4 x i32>
|
||||||
|
ret <vscale x 4 x i32> %res
|
||||||
|
}
|
||||||
|
|
||||||
|
define <vscale x 2 x i64> @gld1sb_d_uxtw(<vscale x 2 x i1> %pg, i8* %base, <vscale x 2 x i64> %b) {
|
||||||
|
; CHECK-LABEL: gld1sb_d_uxtw:
|
||||||
|
; CHECK: ld1sb { z0.d }, p0/z, [x0, z0.d, uxtw]
|
||||||
|
; CHECK-NEXT: ret
|
||||||
|
%load = call <vscale x 2 x i8> @llvm.aarch64.sve.ld1.gather.uxtw.nxv2i8.nxv2i64(<vscale x 2 x i1> %pg,
|
||||||
|
i8* %base,
|
||||||
|
<vscale x 2 x i64> %b)
|
||||||
|
%res = sext <vscale x 2 x i8> %load to <vscale x 2 x i64>
|
||||||
|
ret <vscale x 2 x i64> %res
|
||||||
|
}
|
||||||
|
|
||||||
|
define <vscale x 2 x i64> @gld1sb_d_sxtw(<vscale x 2 x i1> %pg, i8* %base, <vscale x 2 x i64> %b) {
|
||||||
|
; CHECK-LABEL: gld1sb_d_sxtw:
|
||||||
|
; CHECK: ld1sb { z0.d }, p0/z, [x0, z0.d, sxtw]
|
||||||
|
; CHECK-NEXT: ret
|
||||||
|
%load = call <vscale x 2 x i8> @llvm.aarch64.sve.ld1.gather.sxtw.nxv2i8.nxv2i64(<vscale x 2 x i1> %pg,
|
||||||
|
i8* %base,
|
||||||
|
<vscale x 2 x i64> %b)
|
||||||
|
%res = sext <vscale x 2 x i8> %load to <vscale x 2 x i64>
|
||||||
|
ret <vscale x 2 x i64> %res
|
||||||
|
}
|
||||||
|
|
||||||
|
; LD1SH
|
||||||
|
define <vscale x 4 x i32> @gld1sh_s_uxtw(<vscale x 4 x i1> %pg, i16* %base, <vscale x 4 x i32> %b) {
|
||||||
|
; CHECK-LABEL: gld1sh_s_uxtw:
|
||||||
|
; CHECK: ld1sh { z0.s }, p0/z, [x0, z0.s, uxtw]
|
||||||
|
; CHECK-NEXT: ret
|
||||||
|
%load = call <vscale x 4 x i16> @llvm.aarch64.sve.ld1.gather.uxtw.nxv4i16.nxv4i32(<vscale x 4 x i1> %pg,
|
||||||
|
i16* %base,
|
||||||
|
<vscale x 4 x i32> %b)
|
||||||
|
%res = sext <vscale x 4 x i16> %load to <vscale x 4 x i32>
|
||||||
|
ret <vscale x 4 x i32> %res
|
||||||
|
}
|
||||||
|
|
||||||
|
define <vscale x 4 x i32> @gld1sh_s_sxtw(<vscale x 4 x i1> %pg, i16* %base, <vscale x 4 x i32> %b) {
|
||||||
|
; CHECK-LABEL: gld1sh_s_sxtw:
|
||||||
|
; CHECK: ld1sh { z0.s }, p0/z, [x0, z0.s, sxtw]
|
||||||
|
; CHECK-NEXT: ret
|
||||||
|
%load = call <vscale x 4 x i16> @llvm.aarch64.sve.ld1.gather.sxtw.nxv4i16.nxv4i32(<vscale x 4 x i1> %pg,
|
||||||
|
i16* %base,
|
||||||
|
<vscale x 4 x i32> %b)
|
||||||
|
%res = sext <vscale x 4 x i16> %load to <vscale x 4 x i32>
|
||||||
|
ret <vscale x 4 x i32> %res
|
||||||
|
}
|
||||||
|
|
||||||
|
define <vscale x 2 x i64> @gld1sh_d_uxtw(<vscale x 2 x i1> %pg, i16* %base, <vscale x 2 x i64> %b) {
|
||||||
|
; CHECK-LABEL: gld1sh_d_uxtw:
|
||||||
|
; CHECK: ld1sh { z0.d }, p0/z, [x0, z0.d, uxtw]
|
||||||
|
; CHECK-NEXT: ret
|
||||||
|
%load = call <vscale x 2 x i16> @llvm.aarch64.sve.ld1.gather.uxtw.nxv2i16.nxv2i64(<vscale x 2 x i1> %pg,
|
||||||
|
i16* %base,
|
||||||
|
<vscale x 2 x i64> %b)
|
||||||
|
%res = sext <vscale x 2 x i16> %load to <vscale x 2 x i64>
|
||||||
|
ret <vscale x 2 x i64> %res
|
||||||
|
}
|
||||||
|
|
||||||
|
define <vscale x 2 x i64> @gld1sh_d_sxtw(<vscale x 2 x i1> %pg, i16* %base, <vscale x 2 x i64> %b) {
|
||||||
|
; CHECK-LABEL: gld1sh_d_sxtw:
|
||||||
|
; CHECK: ld1sh { z0.d }, p0/z, [x0, z0.d, sxtw]
|
||||||
|
; CHECK-NEXT: ret
|
||||||
|
%load = call <vscale x 2 x i16> @llvm.aarch64.sve.ld1.gather.sxtw.nxv2i16.nxv2i64(<vscale x 2 x i1> %pg,
|
||||||
|
i16* %base,
|
||||||
|
<vscale x 2 x i64> %b)
|
||||||
|
%res = sext <vscale x 2 x i16> %load to <vscale x 2 x i64>
|
||||||
|
ret <vscale x 2 x i64> %res
|
||||||
|
}
|
||||||
|
|
||||||
|
; LD1SW
|
||||||
|
define <vscale x 2 x i64> @gld1sw_d_uxtw(<vscale x 2 x i1> %pg, i32* %base, <vscale x 2 x i64> %b) {
|
||||||
|
; CHECK-LABEL: gld1sw_d_uxtw:
|
||||||
|
; CHECK: ld1sw { z0.d }, p0/z, [x0, z0.d, uxtw]
|
||||||
|
; CHECK-NEXT: ret
|
||||||
|
%load = call <vscale x 2 x i32> @llvm.aarch64.sve.ld1.gather.uxtw.nxv2i32.nxv2i64(<vscale x 2 x i1> %pg,
|
||||||
|
i32* %base,
|
||||||
|
<vscale x 2 x i64> %b)
|
||||||
|
%res = sext <vscale x 2 x i32> %load to <vscale x 2 x i64>
|
||||||
|
ret <vscale x 2 x i64> %res
|
||||||
|
}
|
||||||
|
|
||||||
|
define <vscale x 2 x i64> @gld1sw_d_sxtw(<vscale x 2 x i1> %pg, i32* %base, <vscale x 2 x i64> %b) {
|
||||||
|
; CHECK-LABEL: gld1sw_d_sxtw:
|
||||||
|
; CHECK: ld1sw { z0.d }, p0/z, [x0, z0.d, sxtw]
|
||||||
|
; CHECK-NEXT: ret
|
||||||
|
%load = call <vscale x 2 x i32> @llvm.aarch64.sve.ld1.gather.sxtw.nxv2i32.nxv2i64(<vscale x 2 x i1> %pg,
|
||||||
|
i32* %base,
|
||||||
|
<vscale x 2 x i64> %b)
|
||||||
|
%res = sext <vscale x 2 x i32> %load to <vscale x 2 x i64>
|
||||||
|
ret <vscale x 2 x i64> %res
|
||||||
|
}
|
||||||
|
|
||||||
|
; LD1B/LD1SB
|
||||||
declare <vscale x 4 x i8> @llvm.aarch64.sve.ld1.gather.uxtw.nxv4i8.nxv4i32(<vscale x 4 x i1>, i8*, <vscale x 4 x i32>)
|
declare <vscale x 4 x i8> @llvm.aarch64.sve.ld1.gather.uxtw.nxv4i8.nxv4i32(<vscale x 4 x i1>, i8*, <vscale x 4 x i32>)
|
||||||
declare <vscale x 2 x i8> @llvm.aarch64.sve.ld1.gather.uxtw.nxv2i8.nxv2i64(<vscale x 2 x i1>, i8*, <vscale x 2 x i64>)
|
declare <vscale x 2 x i8> @llvm.aarch64.sve.ld1.gather.uxtw.nxv2i8.nxv2i64(<vscale x 2 x i1>, i8*, <vscale x 2 x i64>)
|
||||||
declare <vscale x 4 x i8> @llvm.aarch64.sve.ld1.gather.sxtw.nxv4i8.nxv4i32(<vscale x 4 x i1>, i8*, <vscale x 4 x i32>)
|
declare <vscale x 4 x i8> @llvm.aarch64.sve.ld1.gather.sxtw.nxv4i8.nxv4i32(<vscale x 4 x i1>, i8*, <vscale x 4 x i32>)
|
||||||
declare <vscale x 2 x i8> @llvm.aarch64.sve.ld1.gather.sxtw.nxv2i8.nxv2i64(<vscale x 2 x i1>, i8*, <vscale x 2 x i64>)
|
declare <vscale x 2 x i8> @llvm.aarch64.sve.ld1.gather.sxtw.nxv2i8.nxv2i64(<vscale x 2 x i1>, i8*, <vscale x 2 x i64>)
|
||||||
|
|
||||||
; LD1H
|
; LD1H/LD1SH
|
||||||
declare <vscale x 4 x i16> @llvm.aarch64.sve.ld1.gather.sxtw.nxv4i16.nxv4i32(<vscale x 4 x i1>, i16*, <vscale x 4 x i32>)
|
declare <vscale x 4 x i16> @llvm.aarch64.sve.ld1.gather.sxtw.nxv4i16.nxv4i32(<vscale x 4 x i1>, i16*, <vscale x 4 x i32>)
|
||||||
declare <vscale x 2 x i16> @llvm.aarch64.sve.ld1.gather.sxtw.nxv2i16.nxv2i64(<vscale x 2 x i1>, i16*, <vscale x 2 x i64>)
|
declare <vscale x 2 x i16> @llvm.aarch64.sve.ld1.gather.sxtw.nxv2i16.nxv2i64(<vscale x 2 x i1>, i16*, <vscale x 2 x i64>)
|
||||||
declare <vscale x 4 x i16> @llvm.aarch64.sve.ld1.gather.uxtw.nxv4i16.nxv4i32(<vscale x 4 x i1>, i16*, <vscale x 4 x i32>)
|
declare <vscale x 4 x i16> @llvm.aarch64.sve.ld1.gather.uxtw.nxv4i16.nxv4i32(<vscale x 4 x i1>, i16*, <vscale x 4 x i32>)
|
||||||
declare <vscale x 2 x i16> @llvm.aarch64.sve.ld1.gather.uxtw.nxv2i16.nxv2i64(<vscale x 2 x i1>, i16*, <vscale x 2 x i64>)
|
declare <vscale x 2 x i16> @llvm.aarch64.sve.ld1.gather.uxtw.nxv2i16.nxv2i64(<vscale x 2 x i1>, i16*, <vscale x 2 x i64>)
|
||||||
|
|
||||||
; LD1W
|
; LD1W/LD1SW
|
||||||
declare <vscale x 4 x i32> @llvm.aarch64.sve.ld1.gather.sxtw.nxv4i32.nxv4i32(<vscale x 4 x i1>, i32*, <vscale x 4 x i32>)
|
declare <vscale x 4 x i32> @llvm.aarch64.sve.ld1.gather.sxtw.nxv4i32.nxv4i32(<vscale x 4 x i1>, i32*, <vscale x 4 x i32>)
|
||||||
declare <vscale x 2 x i32> @llvm.aarch64.sve.ld1.gather.sxtw.nxv2i32.nxv2i64(<vscale x 2 x i1>, i32*, <vscale x 2 x i64>)
|
declare <vscale x 2 x i32> @llvm.aarch64.sve.ld1.gather.sxtw.nxv2i32.nxv2i64(<vscale x 2 x i1>, i32*, <vscale x 2 x i64>)
|
||||||
declare <vscale x 4 x i32> @llvm.aarch64.sve.ld1.gather.uxtw.nxv4i32.nxv4i32(<vscale x 4 x i1>, i32*, <vscale x 4 x i32>)
|
declare <vscale x 4 x i32> @llvm.aarch64.sve.ld1.gather.uxtw.nxv4i32.nxv4i32(<vscale x 4 x i1>, i32*, <vscale x 4 x i32>)
|
||||||
|
|
|
@ -8,9 +8,6 @@
|
||||||
define <vscale x 2 x i64> @gld1h_index(<vscale x 2 x i1> %pg, i16* %base, <vscale x 2 x i64> %b) {
|
define <vscale x 2 x i64> @gld1h_index(<vscale x 2 x i1> %pg, i16* %base, <vscale x 2 x i64> %b) {
|
||||||
; CHECK-LABEL: gld1h_index
|
; CHECK-LABEL: gld1h_index
|
||||||
; CHECK: ld1h { z0.d }, p0/z, [x0, z0.d, lsl #1]
|
; CHECK: ld1h { z0.d }, p0/z, [x0, z0.d, lsl #1]
|
||||||
; CHECK-NEXT: mov w8, #65535
|
|
||||||
; CHECK-NEXT: mov z1.d, x8
|
|
||||||
; CHECK-NEXT: and z0.d, z0.d, z1.d
|
|
||||||
; CHECK-NEXT: ret
|
; CHECK-NEXT: ret
|
||||||
%load = call <vscale x 2 x i16> @llvm.aarch64.sve.ld1.gather.index.nxv2i16(<vscale x 2 x i1> %pg,
|
%load = call <vscale x 2 x i16> @llvm.aarch64.sve.ld1.gather.index.nxv2i16(<vscale x 2 x i1> %pg,
|
||||||
i16* %base,
|
i16* %base,
|
||||||
|
@ -22,9 +19,6 @@ define <vscale x 2 x i64> @gld1h_index(<vscale x 2 x i1> %pg, i16* %base, <vscal
|
||||||
define <vscale x 2 x i64> @gld1w_index(<vscale x 2 x i1> %pg, i32* %base, <vscale x 2 x i64> %b) {
|
define <vscale x 2 x i64> @gld1w_index(<vscale x 2 x i1> %pg, i32* %base, <vscale x 2 x i64> %b) {
|
||||||
; CHECK-LABEL: gld1w_index
|
; CHECK-LABEL: gld1w_index
|
||||||
; CHECK: ld1w { z0.d }, p0/z, [x0, z0.d, lsl #2]
|
; CHECK: ld1w { z0.d }, p0/z, [x0, z0.d, lsl #2]
|
||||||
; CHECK-NEXT: mov w8, #-1
|
|
||||||
; CHECK-NEXT: mov z1.d, x8
|
|
||||||
; CHECK-NEXT: and z0.d, z0.d, z1.d
|
|
||||||
; CHECK-NEXT: ret
|
; CHECK-NEXT: ret
|
||||||
%load = call <vscale x 2 x i32> @llvm.aarch64.sve.ld1.gather.index.nxv2i32(<vscale x 2 x i1> %pg,
|
%load = call <vscale x 2 x i32> @llvm.aarch64.sve.ld1.gather.index.nxv2i32(<vscale x 2 x i1> %pg,
|
||||||
i32* %base,
|
i32* %base,
|
||||||
|
@ -53,6 +47,33 @@ define <vscale x 2 x double> @gld1d_index_double(<vscale x 2 x i1> %pg, double*
|
||||||
ret <vscale x 2 x double> %load
|
ret <vscale x 2 x double> %load
|
||||||
}
|
}
|
||||||
|
|
||||||
|
;
|
||||||
|
; LD1SH, LD1SW: base + 64-bit scaled offset
|
||||||
|
; e.g. ld1sh z0.d, p0/z, [x0, z0.d, lsl #1]
|
||||||
|
;
|
||||||
|
|
||||||
|
define <vscale x 2 x i64> @gld1sh_index(<vscale x 2 x i1> %pg, i16* %base, <vscale x 2 x i64> %b) {
|
||||||
|
; CHECK-LABEL: gld1sh_index
|
||||||
|
; CHECK: ld1sh { z0.d }, p0/z, [x0, z0.d, lsl #1]
|
||||||
|
; CHECK-NEXT: ret
|
||||||
|
%load = call <vscale x 2 x i16> @llvm.aarch64.sve.ld1.gather.index.nxv2i16(<vscale x 2 x i1> %pg,
|
||||||
|
i16* %base,
|
||||||
|
<vscale x 2 x i64> %b)
|
||||||
|
%res = sext <vscale x 2 x i16> %load to <vscale x 2 x i64>
|
||||||
|
ret <vscale x 2 x i64> %res
|
||||||
|
}
|
||||||
|
|
||||||
|
define <vscale x 2 x i64> @gld1sw_index(<vscale x 2 x i1> %pg, i32* %base, <vscale x 2 x i64> %b) {
|
||||||
|
; CHECK-LABEL: gld1sw_index
|
||||||
|
; CHECK: ld1sw { z0.d }, p0/z, [x0, z0.d, lsl #2]
|
||||||
|
; CHECK-NEXT: ret
|
||||||
|
%load = call <vscale x 2 x i32> @llvm.aarch64.sve.ld1.gather.index.nxv2i32(<vscale x 2 x i1> %pg,
|
||||||
|
i32* %base,
|
||||||
|
<vscale x 2 x i64> %b)
|
||||||
|
%res = sext <vscale x 2 x i32> %load to <vscale x 2 x i64>
|
||||||
|
ret <vscale x 2 x i64> %res
|
||||||
|
}
|
||||||
|
|
||||||
declare <vscale x 2 x i16> @llvm.aarch64.sve.ld1.gather.index.nxv2i16(<vscale x 2 x i1>, i16*, <vscale x 2 x i64>)
|
declare <vscale x 2 x i16> @llvm.aarch64.sve.ld1.gather.index.nxv2i16(<vscale x 2 x i1>, i16*, <vscale x 2 x i64>)
|
||||||
declare <vscale x 2 x i32> @llvm.aarch64.sve.ld1.gather.index.nxv2i32(<vscale x 2 x i1>, i32*, <vscale x 2 x i64>)
|
declare <vscale x 2 x i32> @llvm.aarch64.sve.ld1.gather.index.nxv2i32(<vscale x 2 x i1>, i32*, <vscale x 2 x i64>)
|
||||||
declare <vscale x 2 x i64> @llvm.aarch64.sve.ld1.gather.index.nxv2i64(<vscale x 2 x i1>, i64*, <vscale x 2 x i64>)
|
declare <vscale x 2 x i64> @llvm.aarch64.sve.ld1.gather.index.nxv2i64(<vscale x 2 x i1>, i64*, <vscale x 2 x i64>)
|
||||||
|
|
|
@ -8,9 +8,6 @@
|
||||||
define <vscale x 2 x i64> @gld1b_d(<vscale x 2 x i1> %pg, i8* %base, <vscale x 2 x i64> %b) {
|
define <vscale x 2 x i64> @gld1b_d(<vscale x 2 x i1> %pg, i8* %base, <vscale x 2 x i64> %b) {
|
||||||
; CHECK-LABEL: gld1b_d:
|
; CHECK-LABEL: gld1b_d:
|
||||||
; CHECK: ld1b { z0.d }, p0/z, [x0, z0.d]
|
; CHECK: ld1b { z0.d }, p0/z, [x0, z0.d]
|
||||||
; CHECK-NEXT: mov w8, #255
|
|
||||||
; CHECK-NEXT: mov z1.d, x8
|
|
||||||
; CHECK-NEXT: and z0.d, z0.d, z1.d
|
|
||||||
; CHECK-NEXT: ret
|
; CHECK-NEXT: ret
|
||||||
%load = call <vscale x 2 x i8> @llvm.aarch64.sve.ld1.gather.nxv2i8(<vscale x 2 x i1> %pg,
|
%load = call <vscale x 2 x i8> @llvm.aarch64.sve.ld1.gather.nxv2i8(<vscale x 2 x i1> %pg,
|
||||||
i8* %base,
|
i8* %base,
|
||||||
|
@ -22,9 +19,6 @@ define <vscale x 2 x i64> @gld1b_d(<vscale x 2 x i1> %pg, i8* %base, <vscale x 2
|
||||||
define <vscale x 2 x i64> @gld1h_d(<vscale x 2 x i1> %pg, i16* %base, <vscale x 2 x i64> %b) {
|
define <vscale x 2 x i64> @gld1h_d(<vscale x 2 x i1> %pg, i16* %base, <vscale x 2 x i64> %b) {
|
||||||
; CHECK-LABEL: gld1h_d:
|
; CHECK-LABEL: gld1h_d:
|
||||||
; CHECK: ld1h { z0.d }, p0/z, [x0, z0.d]
|
; CHECK: ld1h { z0.d }, p0/z, [x0, z0.d]
|
||||||
; CHECK-NEXT: mov w8, #65535
|
|
||||||
; CHECK-NEXT: mov z1.d, x8
|
|
||||||
; CHECK-NEXT: and z0.d, z0.d, z1.d
|
|
||||||
; CHECK-NEXT: ret
|
; CHECK-NEXT: ret
|
||||||
%load = call <vscale x 2 x i16> @llvm.aarch64.sve.ld1.gather.nxv2i16(<vscale x 2 x i1> %pg,
|
%load = call <vscale x 2 x i16> @llvm.aarch64.sve.ld1.gather.nxv2i16(<vscale x 2 x i1> %pg,
|
||||||
i16* %base,
|
i16* %base,
|
||||||
|
@ -36,9 +30,6 @@ define <vscale x 2 x i64> @gld1h_d(<vscale x 2 x i1> %pg, i16* %base, <vscale x
|
||||||
define <vscale x 2 x i64> @gld1w_d(<vscale x 2 x i1> %pg, i32* %base, <vscale x 2 x i64> %offsets) {
|
define <vscale x 2 x i64> @gld1w_d(<vscale x 2 x i1> %pg, i32* %base, <vscale x 2 x i64> %offsets) {
|
||||||
; CHECK-LABEL: gld1w_d:
|
; CHECK-LABEL: gld1w_d:
|
||||||
; CHECK: ld1w { z0.d }, p0/z, [x0, z0.d]
|
; CHECK: ld1w { z0.d }, p0/z, [x0, z0.d]
|
||||||
; CHECK-NEXT: mov w8, #-1
|
|
||||||
; CHECK-NEXT: mov z1.d, x8
|
|
||||||
; CHECK-NEXT: and z0.d, z0.d, z1.d
|
|
||||||
; CHECK-NEXT: ret
|
; CHECK-NEXT: ret
|
||||||
%load = call <vscale x 2 x i32> @llvm.aarch64.sve.ld1.gather.nxv2i32(<vscale x 2 x i1> %pg,
|
%load = call <vscale x 2 x i32> @llvm.aarch64.sve.ld1.gather.nxv2i32(<vscale x 2 x i1> %pg,
|
||||||
i32* %base,
|
i32* %base,
|
||||||
|
@ -67,6 +58,44 @@ define <vscale x 2 x double> @gld1d_d_double(<vscale x 2 x i1> %pg, double* %bas
|
||||||
ret <vscale x 2 x double> %load
|
ret <vscale x 2 x double> %load
|
||||||
}
|
}
|
||||||
|
|
||||||
|
;
|
||||||
|
; LD1SB, LD1SW, LD1SH: base + 64-bit unscaled offset
|
||||||
|
; e.g. ld1sh { z0.d }, p0/z, [x0, z0.d]
|
||||||
|
;
|
||||||
|
|
||||||
|
define <vscale x 2 x i64> @gld1sb_d(<vscale x 2 x i1> %pg, i8* %base, <vscale x 2 x i64> %b) {
|
||||||
|
; CHECK-LABEL: gld1sb_d:
|
||||||
|
; CHECK: ld1sb { z0.d }, p0/z, [x0, z0.d]
|
||||||
|
; CHECK-NEXT: ret
|
||||||
|
%load = call <vscale x 2 x i8> @llvm.aarch64.sve.ld1.gather.nxv2i8(<vscale x 2 x i1> %pg,
|
||||||
|
i8* %base,
|
||||||
|
<vscale x 2 x i64> %b)
|
||||||
|
%res = sext <vscale x 2 x i8> %load to <vscale x 2 x i64>
|
||||||
|
ret <vscale x 2 x i64> %res
|
||||||
|
}
|
||||||
|
|
||||||
|
define <vscale x 2 x i64> @gld1sh_d(<vscale x 2 x i1> %pg, i16* %base, <vscale x 2 x i64> %b) {
|
||||||
|
; CHECK-LABEL: gld1sh_d:
|
||||||
|
; CHECK: ld1sh { z0.d }, p0/z, [x0, z0.d]
|
||||||
|
; CHECK-NEXT: ret
|
||||||
|
%load = call <vscale x 2 x i16> @llvm.aarch64.sve.ld1.gather.nxv2i16(<vscale x 2 x i1> %pg,
|
||||||
|
i16* %base,
|
||||||
|
<vscale x 2 x i64> %b)
|
||||||
|
%res = sext <vscale x 2 x i16> %load to <vscale x 2 x i64>
|
||||||
|
ret <vscale x 2 x i64> %res
|
||||||
|
}
|
||||||
|
|
||||||
|
define <vscale x 2 x i64> @gld1sw_d(<vscale x 2 x i1> %pg, i32* %base, <vscale x 2 x i64> %offsets) {
|
||||||
|
; CHECK-LABEL: gld1sw_d:
|
||||||
|
; CHECK: ld1sw { z0.d }, p0/z, [x0, z0.d]
|
||||||
|
; CHECK-NEXT: ret
|
||||||
|
%load = call <vscale x 2 x i32> @llvm.aarch64.sve.ld1.gather.nxv2i32(<vscale x 2 x i1> %pg,
|
||||||
|
i32* %base,
|
||||||
|
<vscale x 2 x i64> %offsets)
|
||||||
|
%res = sext <vscale x 2 x i32> %load to <vscale x 2 x i64>
|
||||||
|
ret <vscale x 2 x i64> %res
|
||||||
|
}
|
||||||
|
|
||||||
declare <vscale x 2 x i8> @llvm.aarch64.sve.ld1.gather.nxv2i8(<vscale x 2 x i1>, i8*, <vscale x 2 x i64>)
|
declare <vscale x 2 x i8> @llvm.aarch64.sve.ld1.gather.nxv2i8(<vscale x 2 x i1>, i8*, <vscale x 2 x i64>)
|
||||||
declare <vscale x 2 x i16> @llvm.aarch64.sve.ld1.gather.nxv2i16(<vscale x 2 x i1>, i16*, <vscale x 2 x i64>)
|
declare <vscale x 2 x i16> @llvm.aarch64.sve.ld1.gather.nxv2i16(<vscale x 2 x i1>, i16*, <vscale x 2 x i64>)
|
||||||
declare <vscale x 2 x i32> @llvm.aarch64.sve.ld1.gather.nxv2i32(<vscale x 2 x i1>, i32*, <vscale x 2 x i64>)
|
declare <vscale x 2 x i32> @llvm.aarch64.sve.ld1.gather.nxv2i32(<vscale x 2 x i1>, i32*, <vscale x 2 x i64>)
|
||||||
|
|
|
@ -9,9 +9,6 @@
|
||||||
define <vscale x 4 x i32> @gld1b_s_imm(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %base) {
|
define <vscale x 4 x i32> @gld1b_s_imm(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %base) {
|
||||||
; CHECK-LABEL: gld1b_s_imm:
|
; CHECK-LABEL: gld1b_s_imm:
|
||||||
; CHECK: ld1b { z0.s }, p0/z, [z0.s, #16]
|
; CHECK: ld1b { z0.s }, p0/z, [z0.s, #16]
|
||||||
; CHECK-NEXT: mov w8, #255
|
|
||||||
; CHECK-NEXT: mov z1.s, w8
|
|
||||||
; CHECK-NEXT: and z0.d, z0.d, z1.d
|
|
||||||
; CHECK-NEXT: ret
|
; CHECK-NEXT: ret
|
||||||
%load = call <vscale x 4 x i8> @llvm.aarch64.sve.ld1.gather.imm.nxv4i8.nxv4i32(<vscale x 4 x i1> %pg,
|
%load = call <vscale x 4 x i8> @llvm.aarch64.sve.ld1.gather.imm.nxv4i8.nxv4i32(<vscale x 4 x i1> %pg,
|
||||||
<vscale x 4 x i32> %base,
|
<vscale x 4 x i32> %base,
|
||||||
|
@ -23,9 +20,6 @@ define <vscale x 4 x i32> @gld1b_s_imm(<vscale x 4 x i1> %pg, <vscale x 4 x i32>
|
||||||
define <vscale x 2 x i64> @gld1b_d_imm(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %base) {
|
define <vscale x 2 x i64> @gld1b_d_imm(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %base) {
|
||||||
; CHECK-LABEL: gld1b_d_imm:
|
; CHECK-LABEL: gld1b_d_imm:
|
||||||
; CHECK: ld1b { z0.d }, p0/z, [z0.d, #16]
|
; CHECK: ld1b { z0.d }, p0/z, [z0.d, #16]
|
||||||
; CHECK-NEXT: mov w8, #255
|
|
||||||
; CHECK-NEXT: mov z1.d, x8
|
|
||||||
; CHECK-NEXT: and z0.d, z0.d, z1.d
|
|
||||||
; CHECK-NEXT: ret
|
; CHECK-NEXT: ret
|
||||||
%load = call <vscale x 2 x i8> @llvm.aarch64.sve.ld1.gather.imm.nxv2i8.nxv2i64(<vscale x 2 x i1> %pg,
|
%load = call <vscale x 2 x i8> @llvm.aarch64.sve.ld1.gather.imm.nxv2i8.nxv2i64(<vscale x 2 x i1> %pg,
|
||||||
<vscale x 2 x i64> %base,
|
<vscale x 2 x i64> %base,
|
||||||
|
@ -38,9 +32,6 @@ define <vscale x 2 x i64> @gld1b_d_imm(<vscale x 2 x i1> %pg, <vscale x 2 x i64>
|
||||||
define <vscale x 4 x i32> @gld1h_s_imm(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %base) {
|
define <vscale x 4 x i32> @gld1h_s_imm(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %base) {
|
||||||
; CHECK-LABEL: gld1h_s_imm:
|
; CHECK-LABEL: gld1h_s_imm:
|
||||||
; CHECK: ld1h { z0.s }, p0/z, [z0.s, #16]
|
; CHECK: ld1h { z0.s }, p0/z, [z0.s, #16]
|
||||||
; CHECK-NEXT: mov w8, #65535
|
|
||||||
; CHECK-NEXT: mov z1.s, w8
|
|
||||||
; CHECK-NEXT: and z0.d, z0.d, z1.d
|
|
||||||
; CHECK-NEXT: ret
|
; CHECK-NEXT: ret
|
||||||
%load = call <vscale x 4 x i16> @llvm.aarch64.sve.ld1.gather.imm.nxv4i16.nxv4i32(<vscale x 4 x i1> %pg,
|
%load = call <vscale x 4 x i16> @llvm.aarch64.sve.ld1.gather.imm.nxv4i16.nxv4i32(<vscale x 4 x i1> %pg,
|
||||||
<vscale x 4 x i32> %base,
|
<vscale x 4 x i32> %base,
|
||||||
|
@ -52,9 +43,6 @@ define <vscale x 4 x i32> @gld1h_s_imm(<vscale x 4 x i1> %pg, <vscale x 4 x i32>
|
||||||
define <vscale x 2 x i64> @gld1h_d_imm(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %base) {
|
define <vscale x 2 x i64> @gld1h_d_imm(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %base) {
|
||||||
; CHECK-LABEL: gld1h_d_imm:
|
; CHECK-LABEL: gld1h_d_imm:
|
||||||
; CHECK: ld1h { z0.d }, p0/z, [z0.d, #16]
|
; CHECK: ld1h { z0.d }, p0/z, [z0.d, #16]
|
||||||
; CHECK-NEXT: mov w8, #65535
|
|
||||||
; CHECK-NEXT: mov z1.d, x8
|
|
||||||
; CHECK-NEXT: and z0.d, z0.d, z1.d
|
|
||||||
; CHECK-NEXT: ret
|
; CHECK-NEXT: ret
|
||||||
%load = call <vscale x 2 x i16> @llvm.aarch64.sve.ld1.gather.imm.nxv2i16.nxv2i64(<vscale x 2 x i1> %pg,
|
%load = call <vscale x 2 x i16> @llvm.aarch64.sve.ld1.gather.imm.nxv2i16.nxv2i64(<vscale x 2 x i1> %pg,
|
||||||
<vscale x 2 x i64> %base,
|
<vscale x 2 x i64> %base,
|
||||||
|
@ -77,9 +65,6 @@ define <vscale x 4 x i32> @gld1w_s_imm(<vscale x 4 x i1> %pg, <vscale x 4 x i32>
|
||||||
define <vscale x 2 x i64> @gld1w_d_imm(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %base) {
|
define <vscale x 2 x i64> @gld1w_d_imm(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %base) {
|
||||||
; CHECK-LABEL: gld1w_d_imm:
|
; CHECK-LABEL: gld1w_d_imm:
|
||||||
; CHECK: ld1w { z0.d }, p0/z, [z0.d, #16]
|
; CHECK: ld1w { z0.d }, p0/z, [z0.d, #16]
|
||||||
; CHECK-NEXT: mov w8, #-1
|
|
||||||
; CHECK-NEXT: mov z1.d, x8
|
|
||||||
; CHECK-NEXT: and z0.d, z0.d, z1.d
|
|
||||||
; CHECK-NEXT: ret
|
; CHECK-NEXT: ret
|
||||||
%load = call <vscale x 2 x i32> @llvm.aarch64.sve.ld1.gather.imm.nxv2i32.nxv2i64(<vscale x 2 x i1> %pg,
|
%load = call <vscale x 2 x i32> @llvm.aarch64.sve.ld1.gather.imm.nxv2i32.nxv2i64(<vscale x 2 x i1> %pg,
|
||||||
<vscale x 2 x i64> %base,
|
<vscale x 2 x i64> %base,
|
||||||
|
@ -119,15 +104,77 @@ define <vscale x 2 x double> @gld1d_d_imm_double(<vscale x 2 x i1> %pg, <vscale
|
||||||
ret <vscale x 2 x double> %load
|
ret <vscale x 2 x double> %load
|
||||||
}
|
}
|
||||||
|
|
||||||
; LD1B
|
; LD1SB, LD1SW, LD1SH: vector + immediate (index)
|
||||||
|
; e.g. ld1sh { z0.s }, p0/z, [z0.s, #16]
|
||||||
|
;
|
||||||
|
|
||||||
|
; LD1SB
|
||||||
|
define <vscale x 4 x i32> @gld1sb_s_imm(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %base) {
|
||||||
|
; CHECK-LABEL: gld1sb_s_imm:
|
||||||
|
; CHECK: ld1sb { z0.s }, p0/z, [z0.s, #16]
|
||||||
|
; CHECK-NEXT: ret
|
||||||
|
%load = call <vscale x 4 x i8> @llvm.aarch64.sve.ld1.gather.imm.nxv4i8.nxv4i32(<vscale x 4 x i1> %pg,
|
||||||
|
<vscale x 4 x i32> %base,
|
||||||
|
i64 16)
|
||||||
|
%res = sext <vscale x 4 x i8> %load to <vscale x 4 x i32>
|
||||||
|
ret <vscale x 4 x i32> %res
|
||||||
|
}
|
||||||
|
|
||||||
|
define <vscale x 2 x i64> @gld1sb_d_imm(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %base) {
|
||||||
|
; CHECK-LABEL: gld1sb_d_imm:
|
||||||
|
; CHECK: ld1sb { z0.d }, p0/z, [z0.d, #16]
|
||||||
|
; CHECK-NEXT: ret
|
||||||
|
%load = call <vscale x 2 x i8> @llvm.aarch64.sve.ld1.gather.imm.nxv2i8.nxv2i64(<vscale x 2 x i1> %pg,
|
||||||
|
<vscale x 2 x i64> %base,
|
||||||
|
i64 16)
|
||||||
|
%res = sext <vscale x 2 x i8> %load to <vscale x 2 x i64>
|
||||||
|
ret <vscale x 2 x i64> %res
|
||||||
|
}
|
||||||
|
|
||||||
|
; LD1SH
|
||||||
|
define <vscale x 4 x i32> @gld1sh_s_imm(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %base) {
|
||||||
|
; CHECK-LABEL: gld1sh_s_imm:
|
||||||
|
; CHECK: ld1sh { z0.s }, p0/z, [z0.s, #16]
|
||||||
|
; CHECK-NEXT: ret
|
||||||
|
%load = call <vscale x 4 x i16> @llvm.aarch64.sve.ld1.gather.imm.nxv4i16.nxv4i32(<vscale x 4 x i1> %pg,
|
||||||
|
<vscale x 4 x i32> %base,
|
||||||
|
i64 16)
|
||||||
|
%res = sext <vscale x 4 x i16> %load to <vscale x 4 x i32>
|
||||||
|
ret <vscale x 4 x i32> %res
|
||||||
|
}
|
||||||
|
|
||||||
|
define <vscale x 2 x i64> @gld1sh_d_imm(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %base) {
|
||||||
|
; CHECK-LABEL: gld1sh_d_imm:
|
||||||
|
; CHECK: ld1sh { z0.d }, p0/z, [z0.d, #16]
|
||||||
|
; CHECK-NEXT: ret
|
||||||
|
%load = call <vscale x 2 x i16> @llvm.aarch64.sve.ld1.gather.imm.nxv2i16.nxv2i64(<vscale x 2 x i1> %pg,
|
||||||
|
<vscale x 2 x i64> %base,
|
||||||
|
i64 16)
|
||||||
|
%res = sext <vscale x 2 x i16> %load to <vscale x 2 x i64>
|
||||||
|
ret <vscale x 2 x i64> %res
|
||||||
|
}
|
||||||
|
|
||||||
|
; LD1SW
|
||||||
|
define <vscale x 2 x i64> @gld1sw_d_imm(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %base) {
|
||||||
|
; CHECK-LABEL: gld1sw_d_imm:
|
||||||
|
; CHECK: ld1sw { z0.d }, p0/z, [z0.d, #16]
|
||||||
|
; CHECK-NEXT: ret
|
||||||
|
%load = call <vscale x 2 x i32> @llvm.aarch64.sve.ld1.gather.imm.nxv2i32.nxv2i64(<vscale x 2 x i1> %pg,
|
||||||
|
<vscale x 2 x i64> %base,
|
||||||
|
i64 16)
|
||||||
|
%res = sext <vscale x 2 x i32> %load to <vscale x 2 x i64>
|
||||||
|
ret <vscale x 2 x i64> %res
|
||||||
|
}
|
||||||
|
|
||||||
|
; LD1B/LD1SB
|
||||||
declare <vscale x 4 x i8> @llvm.aarch64.sve.ld1.gather.imm.nxv4i8.nxv4i32(<vscale x 4 x i1>, <vscale x 4 x i32>, i64)
|
declare <vscale x 4 x i8> @llvm.aarch64.sve.ld1.gather.imm.nxv4i8.nxv4i32(<vscale x 4 x i1>, <vscale x 4 x i32>, i64)
|
||||||
declare <vscale x 2 x i8> @llvm.aarch64.sve.ld1.gather.imm.nxv2i8.nxv2i64(<vscale x 2 x i1>, <vscale x 2 x i64>, i64)
|
declare <vscale x 2 x i8> @llvm.aarch64.sve.ld1.gather.imm.nxv2i8.nxv2i64(<vscale x 2 x i1>, <vscale x 2 x i64>, i64)
|
||||||
|
|
||||||
; LD1H
|
; LD1H/LD1SH
|
||||||
declare <vscale x 4 x i16> @llvm.aarch64.sve.ld1.gather.imm.nxv4i16.nxv4i32(<vscale x 4 x i1>, <vscale x 4 x i32>, i64)
|
declare <vscale x 4 x i16> @llvm.aarch64.sve.ld1.gather.imm.nxv4i16.nxv4i32(<vscale x 4 x i1>, <vscale x 4 x i32>, i64)
|
||||||
declare <vscale x 2 x i16> @llvm.aarch64.sve.ld1.gather.imm.nxv2i16.nxv2i64(<vscale x 2 x i1>, <vscale x 2 x i64>, i64)
|
declare <vscale x 2 x i16> @llvm.aarch64.sve.ld1.gather.imm.nxv2i16.nxv2i64(<vscale x 2 x i1>, <vscale x 2 x i64>, i64)
|
||||||
|
|
||||||
; LD1W
|
; LD1W/LD1SW
|
||||||
declare <vscale x 4 x i32> @llvm.aarch64.sve.ld1.gather.imm.nxv4i32.nxv4i32(<vscale x 4 x i1>, <vscale x 4 x i32>, i64)
|
declare <vscale x 4 x i32> @llvm.aarch64.sve.ld1.gather.imm.nxv4i32.nxv4i32(<vscale x 4 x i1>, <vscale x 4 x i32>, i64)
|
||||||
declare <vscale x 2 x i32> @llvm.aarch64.sve.ld1.gather.imm.nxv2i32.nxv2i64(<vscale x 2 x i1>, <vscale x 2 x i64>, i64)
|
declare <vscale x 2 x i32> @llvm.aarch64.sve.ld1.gather.imm.nxv2i32.nxv2i64(<vscale x 2 x i1>, <vscale x 2 x i64>, i64)
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue