forked from OSchip/llvm-project
Revert "[llvm][CodeGen] Addressing modes for SVE stN."
This reverts commit fc4e954ed5
.
The commit reported the following failure:
http://lab.llvm.org:8011/builders/clang-armv7-linux-build-cache/builds/29420
FAILED: lib/Target/AArch64/CMakeFiles/LLVMAArch64CodeGen.dir/AArch64ISelDAGToDAG.cpp.o
/usr/bin/c++ -DGTEST_HAS_RTTI=0 -D_DEBUG -D_FILE_OFFSET_BITS=64 -D_GNU_SOURCE -D_LARGEFILE_SOURCE -D__STDC_CONSTANT_MACROS -D__STDC_FORMAT_MACROS -D__STDC_LIMIT_MACROS -Ilib/Target/AArch64 -I/home/buildslave/buildslave/clang-armv7-linux-build-cache/llvm/llvm/lib/Target/AArch64 -I/usr/include/libxml2 -Iinclude -I/home/buildslave/buildslave/clang-armv7-linux-build-cache/llvm/llvm/include -mthumb -fPIC -fvisibility-inlines-hidden -Werror=date-time -Werror=unguarded-availability-new -Wall -Wextra -Wno-unused-parameter -Wwrite-strings -Wcast-qual -Wmissing-field-initializers -pedantic -Wno-long-long -Wimplicit-fallthrough -Wcovered-switch-default -Wno-noexcept-type -Wnon-virtual-dtor -Wdelete-non-virtual-dtor -Wstring-conversion -fdiagnostics-color -ffunction-sections -fdata-sections -O3 -fvisibility=hidden -fno-exceptions -fno-rtti -UNDEBUG -std=c++14 -MMD -MT lib/Target/AArch64/CMakeFiles/LLVMAArch64CodeGen.dir/AArch64ISelDAGToDAG.cpp.o -MF lib/Target/AArch64/CMakeFiles/LLVMAArch64CodeGen.dir/AArch64ISelDAGToDAG.cpp.o.d -o lib/Target/AArch64/CMakeFiles/LLVMAArch64CodeGen.dir/AArch64ISelDAGToDAG.cpp.o -c /home/buildslave/buildslave/clang-armv7-linux-build-cache/llvm/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp
/home/buildslave/buildslave/clang-armv7-linux-build-cache/llvm/llvm/lib/Target/AArch64/AArch64ISelDAGToDAG.cpp:1439:10: error: chosen constructor is explicit in copy-initialization
return {IsRegReg ? Opc_rr : Opc_ri, NewBase, NewOffset};
^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
/usr/bin/../lib/gcc/arm-linux-gnueabihf/5.4.0/../../../../include/c++/5.4.0/tuple:479:19: note: explicit constructor declared here
constexpr tuple(_UElements&&... __elements)
This commit is contained in:
parent
992fbce4e9
commit
17b1869b72
|
@ -261,14 +261,7 @@ public:
|
|||
void SelectPostStore(SDNode *N, unsigned NumVecs, unsigned Opc);
|
||||
void SelectStoreLane(SDNode *N, unsigned NumVecs, unsigned Opc);
|
||||
void SelectPostStoreLane(SDNode *N, unsigned NumVecs, unsigned Opc);
|
||||
template <unsigned Scale>
|
||||
void SelectPredicatedStore(SDNode *N, unsigned NumVecs, const unsigned Opc_rr,
|
||||
const unsigned Opc_ri);
|
||||
template <unsigned Scale>
|
||||
std::tuple<unsigned, SDValue, SDValue>
|
||||
findAddrModeSVELoadStore(SDNode *N, const unsigned Opc_rr,
|
||||
const unsigned Opc_ri, const SDValue &OldBase,
|
||||
const SDValue &OldOffset);
|
||||
void SelectPredicatedStore(SDNode *N, unsigned NumVecs, const unsigned Opc);
|
||||
|
||||
bool tryBitfieldExtractOp(SDNode *N);
|
||||
bool tryBitfieldExtractOpFromSExt(SDNode *N);
|
||||
|
@ -1415,30 +1408,6 @@ void AArch64DAGToDAGISel::SelectPostLoad(SDNode *N, unsigned NumVecs,
|
|||
CurDAG->RemoveDeadNode(N);
|
||||
}
|
||||
|
||||
/// Optimize \param OldBase and \param OldOffset selecting the best addressing
|
||||
/// mode. Returns a tuple consisting of an Opcode, an SDValue representing the
|
||||
/// new Base and an SDValue representing the new offset.
|
||||
template <unsigned Scale>
|
||||
std::tuple<unsigned, SDValue, SDValue>
|
||||
AArch64DAGToDAGISel::findAddrModeSVELoadStore(SDNode *N, const unsigned Opc_rr,
|
||||
const unsigned Opc_ri,
|
||||
const SDValue &OldBase,
|
||||
const SDValue &OldOffset) {
|
||||
SDValue NewBase = OldBase;
|
||||
SDValue NewOffset = OldOffset;
|
||||
// Detect a possible Reg+Imm addressing mode.
|
||||
const bool IsRegImm = SelectAddrModeIndexedSVE</*Min=*/-8, /*Max=*/7>(
|
||||
N, OldBase, NewBase, NewOffset);
|
||||
|
||||
// Detect a possible reg+reg addressing mode, but only if we haven't already
|
||||
// detected a Reg+Imm one.
|
||||
const bool IsRegReg =
|
||||
!IsRegImm && SelectSVERegRegAddrMode<Scale>(OldBase, NewBase, NewOffset);
|
||||
|
||||
// Select the instruction.
|
||||
return {IsRegReg ? Opc_rr : Opc_ri, NewBase, NewOffset};
|
||||
}
|
||||
|
||||
void AArch64DAGToDAGISel::SelectStore(SDNode *N, unsigned NumVecs,
|
||||
unsigned Opc) {
|
||||
SDLoc dl(N);
|
||||
|
@ -1459,27 +1428,18 @@ void AArch64DAGToDAGISel::SelectStore(SDNode *N, unsigned NumVecs,
|
|||
ReplaceNode(N, St);
|
||||
}
|
||||
|
||||
template <unsigned Scale>
|
||||
void AArch64DAGToDAGISel::SelectPredicatedStore(SDNode *N, unsigned NumVecs,
|
||||
const unsigned Opc_rr,
|
||||
const unsigned Opc_ri) {
|
||||
const unsigned Opc) {
|
||||
SDLoc dl(N);
|
||||
|
||||
// Form a REG_SEQUENCE to force register allocation.
|
||||
SmallVector<SDValue, 4> Regs(N->op_begin() + 2, N->op_begin() + 2 + NumVecs);
|
||||
SDValue RegSeq = createZTuple(Regs);
|
||||
|
||||
// Optimize addressing mode.
|
||||
unsigned Opc;
|
||||
SDValue Offset, Base;
|
||||
std::tie(Opc, Base, Offset) = findAddrModeSVELoadStore<Scale>(
|
||||
N, Opc_rr, Opc_ri, N->getOperand(NumVecs + 3),
|
||||
CurDAG->getTargetConstant(0, dl, MVT::i64));
|
||||
|
||||
SDValue Ops[] = {RegSeq, N->getOperand(NumVecs + 2), // predicate
|
||||
Base, // address
|
||||
Offset, // offset
|
||||
N->getOperand(0)}; // chain
|
||||
SDValue Ops[] = {RegSeq, N->getOperand(NumVecs + 2), // predicate
|
||||
N->getOperand(NumVecs + 3), // address
|
||||
CurDAG->getTargetConstant(0, dl, MVT::i64), // offset
|
||||
N->getOperand(0)}; // chain
|
||||
SDNode *St = CurDAG->getMachineNode(Opc, dl, N->getValueType(0), Ops);
|
||||
|
||||
ReplaceNode(N, St);
|
||||
|
@ -3950,60 +3910,48 @@ void AArch64DAGToDAGISel::Select(SDNode *Node) {
|
|||
}
|
||||
case Intrinsic::aarch64_sve_st2: {
|
||||
if (VT == MVT::nxv16i8) {
|
||||
SelectPredicatedStore</*Scale=*/0>(Node, 2, AArch64::ST2B,
|
||||
AArch64::ST2B_IMM);
|
||||
SelectPredicatedStore(Node, 2, AArch64::ST2B_IMM);
|
||||
return;
|
||||
} else if (VT == MVT::nxv8i16 || VT == MVT::nxv8f16) {
|
||||
SelectPredicatedStore</*Scale=*/1>(Node, 2, AArch64::ST2H,
|
||||
AArch64::ST2H_IMM);
|
||||
SelectPredicatedStore(Node, 2, AArch64::ST2H_IMM);
|
||||
return;
|
||||
} else if (VT == MVT::nxv4i32 || VT == MVT::nxv4f32) {
|
||||
SelectPredicatedStore</*Scale=*/2>(Node, 2, AArch64::ST2W,
|
||||
AArch64::ST2W_IMM);
|
||||
SelectPredicatedStore(Node, 2, AArch64::ST2W_IMM);
|
||||
return;
|
||||
} else if (VT == MVT::nxv2i64 || VT == MVT::nxv2f64) {
|
||||
SelectPredicatedStore</*Scale=*/3>(Node, 2, AArch64::ST2D,
|
||||
AArch64::ST2D_IMM);
|
||||
SelectPredicatedStore(Node, 2, AArch64::ST2D_IMM);
|
||||
return;
|
||||
}
|
||||
break;
|
||||
}
|
||||
case Intrinsic::aarch64_sve_st3: {
|
||||
if (VT == MVT::nxv16i8) {
|
||||
SelectPredicatedStore</*Scale=*/0>(Node, 3, AArch64::ST3B,
|
||||
AArch64::ST3B_IMM);
|
||||
SelectPredicatedStore(Node, 3, AArch64::ST3B_IMM);
|
||||
return;
|
||||
} else if (VT == MVT::nxv8i16 || VT == MVT::nxv8f16) {
|
||||
SelectPredicatedStore</*Scale=*/1>(Node, 3, AArch64::ST3H,
|
||||
AArch64::ST3H_IMM);
|
||||
SelectPredicatedStore(Node, 3, AArch64::ST3H_IMM);
|
||||
return;
|
||||
} else if (VT == MVT::nxv4i32 || VT == MVT::nxv4f32) {
|
||||
SelectPredicatedStore</*Scale=*/2>(Node, 3, AArch64::ST3W,
|
||||
AArch64::ST3W_IMM);
|
||||
SelectPredicatedStore(Node, 3, AArch64::ST3W_IMM);
|
||||
return;
|
||||
} else if (VT == MVT::nxv2i64 || VT == MVT::nxv2f64) {
|
||||
SelectPredicatedStore</*Scale=*/3>(Node, 3, AArch64::ST3D,
|
||||
AArch64::ST3D_IMM);
|
||||
SelectPredicatedStore(Node, 3, AArch64::ST3D_IMM);
|
||||
return;
|
||||
}
|
||||
break;
|
||||
}
|
||||
case Intrinsic::aarch64_sve_st4: {
|
||||
if (VT == MVT::nxv16i8) {
|
||||
SelectPredicatedStore</*Scale=*/0>(Node, 4, AArch64::ST4B,
|
||||
AArch64::ST4B_IMM);
|
||||
SelectPredicatedStore(Node, 4, AArch64::ST4B_IMM);
|
||||
return;
|
||||
} else if (VT == MVT::nxv8i16 || VT == MVT::nxv8f16) {
|
||||
SelectPredicatedStore</*Scale=*/1>(Node, 4, AArch64::ST4H,
|
||||
AArch64::ST4H_IMM);
|
||||
SelectPredicatedStore(Node, 4, AArch64::ST4H_IMM);
|
||||
return;
|
||||
} else if (VT == MVT::nxv4i32 || VT == MVT::nxv4f32) {
|
||||
SelectPredicatedStore</*Scale=*/2>(Node, 4, AArch64::ST4W,
|
||||
AArch64::ST4W_IMM);
|
||||
SelectPredicatedStore(Node, 4, AArch64::ST4W_IMM);
|
||||
return;
|
||||
} else if (VT == MVT::nxv2i64 || VT == MVT::nxv2f64) {
|
||||
SelectPredicatedStore</*Scale=*/3>(Node, 4, AArch64::ST4D,
|
||||
AArch64::ST4D_IMM);
|
||||
SelectPredicatedStore(Node, 4, AArch64::ST4D_IMM);
|
||||
return;
|
||||
}
|
||||
break;
|
||||
|
@ -4639,9 +4587,6 @@ static EVT getMemVTFromNode(LLVMContext &Ctx, SDNode *Root) {
|
|||
if (isa<MemSDNode>(Root))
|
||||
return cast<MemSDNode>(Root)->getMemoryVT();
|
||||
|
||||
if (isa<MemIntrinsicSDNode>(Root))
|
||||
return cast<MemIntrinsicSDNode>(Root)->getMemoryVT();
|
||||
|
||||
const unsigned Opcode = Root->getOpcode();
|
||||
// For custom ISD nodes, we have to look at them individually to extract the
|
||||
// type of the data moved to/from memory.
|
||||
|
|
|
@ -8913,30 +8913,6 @@ SDValue AArch64TargetLowering::LowerVSCALE(SDValue Op,
|
|||
DL, VT);
|
||||
}
|
||||
|
||||
/// Set the IntrinsicInfo for the `aarch64_sve_st<N>` intrinsics.
|
||||
template <unsigned NumVecs>
|
||||
static bool setInfoSVEStN(AArch64TargetLowering::IntrinsicInfo &Info,
|
||||
const CallInst &CI) {
|
||||
Info.opc = ISD::INTRINSIC_VOID;
|
||||
// Retrieve EC from first vector argument.
|
||||
const EVT VT = EVT::getEVT(CI.getArgOperand(0)->getType());
|
||||
ElementCount EC = VT.getVectorElementCount();
|
||||
#ifndef NDEBUG
|
||||
// Check the assumption that all input vectors are the same type.
|
||||
for (unsigned I = 0; I < NumVecs; ++I)
|
||||
assert(VT == EVT::getEVT(CI.getArgOperand(I)->getType()) &&
|
||||
"Invalid type.");
|
||||
#endif
|
||||
// memVT is `NumVecs * VT`.
|
||||
Info.memVT = EVT::getVectorVT(CI.getType()->getContext(), VT.getScalarType(),
|
||||
EC * NumVecs);
|
||||
Info.ptrVal = CI.getArgOperand(CI.getNumArgOperands() - 1);
|
||||
Info.offset = 0;
|
||||
Info.align.reset();
|
||||
Info.flags = MachineMemOperand::MOStore;
|
||||
return true;
|
||||
}
|
||||
|
||||
/// getTgtMemIntrinsic - Represent NEON load and store intrinsics as
|
||||
/// MemIntrinsicNodes. The associated MachineMemOperands record the alignment
|
||||
/// specified in the intrinsic calls.
|
||||
|
@ -8946,12 +8922,6 @@ bool AArch64TargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
|
|||
unsigned Intrinsic) const {
|
||||
auto &DL = I.getModule()->getDataLayout();
|
||||
switch (Intrinsic) {
|
||||
case Intrinsic::aarch64_sve_st2:
|
||||
return setInfoSVEStN<2>(Info, I);
|
||||
case Intrinsic::aarch64_sve_st3:
|
||||
return setInfoSVEStN<3>(Info, I);
|
||||
case Intrinsic::aarch64_sve_st4:
|
||||
return setInfoSVEStN<4>(Info, I);
|
||||
case Intrinsic::aarch64_neon_ld2:
|
||||
case Intrinsic::aarch64_neon_ld3:
|
||||
case Intrinsic::aarch64_neon_ld4:
|
||||
|
|
|
@ -1,614 +0,0 @@
|
|||
; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve -asm-verbose=0 < %s | FileCheck %s
|
||||
|
||||
; NOTE: invalid, upper and lower bound immediate values of the reg+imm
|
||||
; addressing mode are checked only for the byte version of each
|
||||
; instruction (`st<N>b`), as the code for detecting the immediate is
|
||||
; common to all instructions, and varies only for the number of
|
||||
; elements of the structured store, which is <N> = 2, 3, 4.
|
||||
|
||||
;
|
||||
; ST2B
|
||||
;
|
||||
|
||||
define void @st2b_i8_valid_imm(<vscale x 16 x i8> %v0, <vscale x 16 x i8> %v1, <vscale x 16 x i1> %pred, <vscale x 16 x i8>* %addr) {
|
||||
; CHECK-LABEL: st2b_i8_valid_imm:
|
||||
; CHECK: st2b { z0.b, z1.b }, p0, [x0, #2, mul vl]
|
||||
; CHECK-NEXT: ret
|
||||
%base = getelementptr <vscale x 16 x i8>, <vscale x 16 x i8>* %addr, i64 2
|
||||
call void @llvm.aarch64.sve.st2.nxv16i8(<vscale x 16 x i8> %v0,
|
||||
<vscale x 16 x i8> %v1,
|
||||
<vscale x 16 x i1> %pred,
|
||||
<vscale x 16 x i8>* %base)
|
||||
ret void
|
||||
}
|
||||
|
||||
define void @st2b_i8_invalid_imm_not_multiple_of_2(<vscale x 16 x i8> %v0, <vscale x 16 x i8> %v1, <vscale x 16 x i1> %pred, <vscale x 16 x i8>* %addr) {
|
||||
; CHECK-LABEL: st2b_i8_invalid_imm_not_multiple_of_2:
|
||||
; CHECK: rdvl x[[N:[0-9]+]], #3
|
||||
; CHECK-NEXT: st2b { z0.b, z1.b }, p0, [x0, x[[N]]]
|
||||
; CHECK-NEXT: ret
|
||||
%base = getelementptr <vscale x 16 x i8>, <vscale x 16 x i8>* %addr, i64 3
|
||||
call void @llvm.aarch64.sve.st2.nxv16i8(<vscale x 16 x i8> %v0,
|
||||
<vscale x 16 x i8> %v1,
|
||||
<vscale x 16 x i1> %pred,
|
||||
<vscale x 16 x i8>* %base)
|
||||
ret void
|
||||
}
|
||||
|
||||
define void @st2b_i8_invalid_imm_out_of_lower_bound(<vscale x 16 x i8> %v0, <vscale x 16 x i8> %v1, <vscale x 16 x i1> %pred, <vscale x 16 x i8>* %addr) {
|
||||
; CHECK-LABEL: st2b_i8_invalid_imm_out_of_lower_bound:
|
||||
; CHECK: rdvl x[[N:[0-9]+]], #-18
|
||||
; CHECK-NEXT: st2b { z0.b, z1.b }, p0, [x0, x[[N]]]
|
||||
; CHECK-NEXT: ret
|
||||
%base = getelementptr <vscale x 16 x i8>, <vscale x 16 x i8>* %addr, i64 -18
|
||||
call void @llvm.aarch64.sve.st2.nxv16i8(<vscale x 16 x i8> %v0,
|
||||
<vscale x 16 x i8> %v1,
|
||||
<vscale x 16 x i1> %pred,
|
||||
<vscale x 16 x i8>* %base)
|
||||
ret void
|
||||
}
|
||||
|
||||
define void @st2b_i8_invalid_imm_out_of_upper_bound(<vscale x 16 x i8> %v0, <vscale x 16 x i8> %v1, <vscale x 16 x i1> %pred, <vscale x 16 x i8>* %addr) {
|
||||
; CHECK-LABEL: st2b_i8_invalid_imm_out_of_upper_bound:
|
||||
; CHECK: rdvl x[[N:[0-9]+]], #16
|
||||
; CHECK-NEXT: st2b { z0.b, z1.b }, p0, [x0, x[[N]]]
|
||||
; CHECK-NEXT: ret
|
||||
%base = getelementptr <vscale x 16 x i8>, <vscale x 16 x i8>* %addr, i64 16
|
||||
call void @llvm.aarch64.sve.st2.nxv16i8(<vscale x 16 x i8> %v0,
|
||||
<vscale x 16 x i8> %v1,
|
||||
<vscale x 16 x i1> %pred,
|
||||
<vscale x 16 x i8>* %base)
|
||||
ret void
|
||||
}
|
||||
|
||||
define void @st2b_i8_valid_imm_lower_bound(<vscale x 16 x i8> %v0, <vscale x 16 x i8> %v1, <vscale x 16 x i1> %pred, <vscale x 16 x i8>* %addr) {
|
||||
; CHECK-LABEL: st2b_i8_valid_imm_lower_bound:
|
||||
; CHECK: st2b { z0.b, z1.b }, p0, [x0, #-16, mul vl]
|
||||
; CHECK-NEXT: ret
|
||||
%base = getelementptr <vscale x 16 x i8>, <vscale x 16 x i8>* %addr, i64 -16
|
||||
call void @llvm.aarch64.sve.st2.nxv16i8(<vscale x 16 x i8> %v0,
|
||||
<vscale x 16 x i8> %v1,
|
||||
<vscale x 16 x i1> %pred,
|
||||
<vscale x 16 x i8>* %base)
|
||||
ret void
|
||||
}
|
||||
|
||||
define void @st2b_i8_valid_imm_upper_bound(<vscale x 16 x i8> %v0, <vscale x 16 x i8> %v1, <vscale x 16 x i1> %pred, <vscale x 16 x i8>* %addr) {
|
||||
; CHECK-LABEL: st2b_i8_valid_imm_upper_bound:
|
||||
; CHECK: st2b { z0.b, z1.b }, p0, [x0, #14, mul vl]
|
||||
; CHECK-NEXT: ret
|
||||
%base = getelementptr <vscale x 16 x i8>, <vscale x 16 x i8>* %addr, i64 14
|
||||
call void @llvm.aarch64.sve.st2.nxv16i8(<vscale x 16 x i8> %v0,
|
||||
<vscale x 16 x i8> %v1,
|
||||
<vscale x 16 x i1> %pred,
|
||||
<vscale x 16 x i8>* %base)
|
||||
ret void
|
||||
}
|
||||
|
||||
;
|
||||
; ST2H
|
||||
;
|
||||
|
||||
define void @st2h_i16(<vscale x 8 x i16> %v0, <vscale x 8 x i16> %v1, <vscale x 8 x i1> %pred, <vscale x 8 x i16>* %addr) {
|
||||
; CHECK-LABEL: st2h_i16:
|
||||
; CHECK: st2h { z0.h, z1.h }, p0, [x0, #2, mul vl]
|
||||
; CHECK-NEXT: ret
|
||||
%base = getelementptr <vscale x 8 x i16>, <vscale x 8 x i16>* %addr, i64 2
|
||||
call void @llvm.aarch64.sve.st2.nxv8i16(<vscale x 8 x i16> %v0,
|
||||
<vscale x 8 x i16> %v1,
|
||||
<vscale x 8 x i1> %pred,
|
||||
<vscale x 8 x i16>* %base)
|
||||
ret void
|
||||
}
|
||||
|
||||
define void @st2h_f16(<vscale x 8 x half> %v0, <vscale x 8 x half> %v1, <vscale x 8 x i1> %pred, <vscale x 8 x half>* %addr) {
|
||||
; CHECK-LABEL: st2h_f16:
|
||||
; CHECK: st2h { z0.h, z1.h }, p0, [x0, #2, mul vl]
|
||||
; CHECK-NEXT: ret
|
||||
%base = getelementptr <vscale x 8 x half>, <vscale x 8 x half>* %addr, i64 2
|
||||
call void @llvm.aarch64.sve.st2.nxv8f16(<vscale x 8 x half> %v0,
|
||||
<vscale x 8 x half> %v1,
|
||||
<vscale x 8 x i1> %pred,
|
||||
<vscale x 8 x half>* %base)
|
||||
ret void
|
||||
}
|
||||
|
||||
;
|
||||
; ST2W
|
||||
;
|
||||
|
||||
define void @st2w_i32(<vscale x 4 x i32> %v0, <vscale x 4 x i32> %v1, <vscale x 4 x i1> %pred, <vscale x 4 x i32>* %addr) {
|
||||
; CHECK-LABEL: st2w_i32:
|
||||
; CHECK: st2w { z0.s, z1.s }, p0, [x0, #4, mul vl]
|
||||
; CHECK-NEXT: ret
|
||||
%base = getelementptr <vscale x 4 x i32>, <vscale x 4 x i32>* %addr, i64 4
|
||||
call void @llvm.aarch64.sve.st2.nxv4i32(<vscale x 4 x i32> %v0,
|
||||
<vscale x 4 x i32> %v1,
|
||||
<vscale x 4 x i1> %pred,
|
||||
<vscale x 4 x i32>* %base)
|
||||
ret void
|
||||
}
|
||||
|
||||
define void @st2w_f32(<vscale x 4 x float> %v0, <vscale x 4 x float> %v1, <vscale x 4 x i1> %pred, <vscale x 4 x float>* %addr) {
|
||||
; CHECK-LABEL: st2w_f32:
|
||||
; CHECK: st2w { z0.s, z1.s }, p0, [x0, #6, mul vl]
|
||||
; CHECK-NEXT: ret
|
||||
%base = getelementptr <vscale x 4 x float>, <vscale x 4 x float>* %addr, i64 6
|
||||
call void @llvm.aarch64.sve.st2.nxv4f32(<vscale x 4 x float> %v0,
|
||||
<vscale x 4 x float> %v1,
|
||||
<vscale x 4 x i1> %pred,
|
||||
<vscale x 4 x float>* %base)
|
||||
ret void
|
||||
}
|
||||
|
||||
;
|
||||
; ST2D
|
||||
;
|
||||
|
||||
define void @st2d_i64(<vscale x 2 x i64> %v0, <vscale x 2 x i64> %v1, <vscale x 2 x i1> %pred, <vscale x 2 x i64>* %addr) {
|
||||
; CHECK-LABEL: st2d_i64:
|
||||
; CHECK: st2d { z0.d, z1.d }, p0, [x0, #8, mul vl]
|
||||
; CHECK-NEXT: ret
|
||||
%base = getelementptr <vscale x 2 x i64>, <vscale x 2 x i64>* %addr, i64 8
|
||||
call void @llvm.aarch64.sve.st2.nxv2i64(<vscale x 2 x i64> %v0,
|
||||
<vscale x 2 x i64> %v1,
|
||||
<vscale x 2 x i1> %pred,
|
||||
<vscale x 2 x i64>* %base)
|
||||
ret void
|
||||
}
|
||||
|
||||
define void @st2d_f64(<vscale x 2 x double> %v0, <vscale x 2 x double> %v1, <vscale x 2 x i1> %pred, <vscale x 2 x double>* %addr) {
|
||||
; CHECK-LABEL: st2d_f64:
|
||||
; CHECK: st2d { z0.d, z1.d }, p0, [x0, #10, mul vl]
|
||||
; CHECK-NEXT: ret
|
||||
%base = getelementptr <vscale x 2 x double>, <vscale x 2 x double>* %addr, i64 10
|
||||
call void @llvm.aarch64.sve.st2.nxv2f64(<vscale x 2 x double> %v0,
|
||||
<vscale x 2 x double> %v1,
|
||||
<vscale x 2 x i1> %pred,
|
||||
<vscale x 2 x double>* %base)
|
||||
ret void
|
||||
}
|
||||
|
||||
;
|
||||
; ST3B
|
||||
;
|
||||
|
||||
define void @st3b_i8_valid_imm(<vscale x 16 x i8> %v0, <vscale x 16 x i8> %v1, <vscale x 16 x i8> %v2, <vscale x 16 x i1> %pred, <vscale x 16 x i8>* %addr) {
|
||||
; CHECK-LABEL: st3b_i8_valid_imm:
|
||||
; CHECK: st3b { z0.b, z1.b, z2.b }, p0, [x0, #3, mul vl]
|
||||
; CHECK-NEXT: ret
|
||||
%base = getelementptr <vscale x 16 x i8>, <vscale x 16 x i8>* %addr, i64 3
|
||||
call void @llvm.aarch64.sve.st3.nxv16i8(<vscale x 16 x i8> %v0,
|
||||
<vscale x 16 x i8> %v1,
|
||||
<vscale x 16 x i8> %v2,
|
||||
<vscale x 16 x i1> %pred,
|
||||
<vscale x 16 x i8>* %base)
|
||||
ret void
|
||||
}
|
||||
|
||||
define void @st3b_i8_invalid_imm_not_multiple_of_3_01(<vscale x 16 x i8> %v0, <vscale x 16 x i8> %v1, <vscale x 16 x i8> %v2, <vscale x 16 x i1> %pred, <vscale x 16 x i8>* %addr) {
|
||||
; CHECK-LABEL: st3b_i8_invalid_imm_not_multiple_of_3_01:
|
||||
; CHECK: rdvl x[[N:[0-9]+]], #4
|
||||
; CHECK-NEXT: st3b { z0.b, z1.b, z2.b }, p0, [x0, x[[N]]]
|
||||
; CHECK-NEXT: ret
|
||||
%base = getelementptr <vscale x 16 x i8>, <vscale x 16 x i8>* %addr, i64 4
|
||||
call void @llvm.aarch64.sve.st3.nxv16i8(<vscale x 16 x i8> %v0,
|
||||
<vscale x 16 x i8> %v1,
|
||||
<vscale x 16 x i8> %v2,
|
||||
<vscale x 16 x i1> %pred,
|
||||
<vscale x 16 x i8>* %base)
|
||||
ret void
|
||||
}
|
||||
|
||||
define void @st3b_i8_invalid_imm_not_multiple_of_3_02(<vscale x 16 x i8> %v0, <vscale x 16 x i8> %v1, <vscale x 16 x i8> %v2, <vscale x 16 x i1> %pred, <vscale x 16 x i8>* %addr) {
|
||||
; CHECK-LABEL: st3b_i8_invalid_imm_not_multiple_of_3_02:
|
||||
; CHECK: rdvl x[[N:[0-9]+]], #5
|
||||
; CHECK-NEXT: st3b { z0.b, z1.b, z2.b }, p0, [x0, x[[N]]]
|
||||
; CHECK-NEXT: ret
|
||||
%base = getelementptr <vscale x 16 x i8>, <vscale x 16 x i8>* %addr, i64 5
|
||||
call void @llvm.aarch64.sve.st3.nxv16i8(<vscale x 16 x i8> %v0,
|
||||
<vscale x 16 x i8> %v1,
|
||||
<vscale x 16 x i8> %v2,
|
||||
<vscale x 16 x i1> %pred,
|
||||
<vscale x 16 x i8>* %base)
|
||||
ret void
|
||||
}
|
||||
|
||||
define void @st3b_i8_invalid_imm_out_of_lower_bound(<vscale x 16 x i8> %v0, <vscale x 16 x i8> %v1, <vscale x 16 x i8> %v2, <vscale x 16 x i1> %pred, <vscale x 16 x i8>* %addr) {
|
||||
; CHECK-LABEL: st3b_i8_invalid_imm_out_of_lower_bound:
|
||||
; CHECK: rdvl x[[N:[0-9]+]], #-27
|
||||
; CHECK-NEXT: st3b { z0.b, z1.b, z2.b }, p0, [x0, x[[N]]]
|
||||
; CHECK-NEXT: ret
|
||||
%base = getelementptr <vscale x 16 x i8>, <vscale x 16 x i8>* %addr, i64 -27
|
||||
call void @llvm.aarch64.sve.st3.nxv16i8(<vscale x 16 x i8> %v0,
|
||||
<vscale x 16 x i8> %v1,
|
||||
<vscale x 16 x i8> %v2,
|
||||
<vscale x 16 x i1> %pred,
|
||||
<vscale x 16 x i8>* %base)
|
||||
ret void
|
||||
}
|
||||
|
||||
define void @st3b_i8_invalid_imm_out_of_upper_bound(<vscale x 16 x i8> %v0, <vscale x 16 x i8> %v1, <vscale x 16 x i8> %v2, <vscale x 16 x i1> %pred, <vscale x 16 x i8>* %addr) {
|
||||
; CHECK-LABEL: st3b_i8_invalid_imm_out_of_upper_bound:
|
||||
; CHECK: rdvl x[[N:[0-9]+]], #24
|
||||
; CHECK-NEXT: st3b { z0.b, z1.b, z2.b }, p0, [x0, x[[N]]]
|
||||
; CHECK-NEXT: ret
|
||||
%base = getelementptr <vscale x 16 x i8>, <vscale x 16 x i8>* %addr, i64 24
|
||||
call void @llvm.aarch64.sve.st3.nxv16i8(<vscale x 16 x i8> %v0,
|
||||
<vscale x 16 x i8> %v1,
|
||||
<vscale x 16 x i8> %v2,
|
||||
<vscale x 16 x i1> %pred,
|
||||
<vscale x 16 x i8>* %base)
|
||||
ret void
|
||||
}
|
||||
|
||||
define void @st3b_i8_valid_imm_lower_bound(<vscale x 16 x i8> %v0, <vscale x 16 x i8> %v1, <vscale x 16 x i8> %v2, <vscale x 16 x i1> %pred, <vscale x 16 x i8>* %addr) {
|
||||
; CHECK-LABEL: st3b_i8_valid_imm_lower_bound:
|
||||
; CHECK: st3b { z0.b, z1.b, z2.b }, p0, [x0, #-24, mul vl]
|
||||
; CHECK-NEXT: ret
|
||||
%base = getelementptr <vscale x 16 x i8>, <vscale x 16 x i8>* %addr, i64 -24
|
||||
call void @llvm.aarch64.sve.st3.nxv16i8(<vscale x 16 x i8> %v0,
|
||||
<vscale x 16 x i8> %v1,
|
||||
<vscale x 16 x i8> %v2,
|
||||
<vscale x 16 x i1> %pred,
|
||||
<vscale x 16 x i8>* %base)
|
||||
ret void
|
||||
}
|
||||
|
||||
define void @st3b_i8_valid_imm_upper_bound(<vscale x 16 x i8> %v0, <vscale x 16 x i8> %v1, <vscale x 16 x i8> %v2, <vscale x 16 x i1> %pred, <vscale x 16 x i8>* %addr) {
|
||||
; CHECK-LABEL: st3b_i8_valid_imm_upper_bound:
|
||||
; CHECK: st3b { z0.b, z1.b, z2.b }, p0, [x0, #21, mul vl]
|
||||
; CHECK-NEXT: ret
|
||||
%base = getelementptr <vscale x 16 x i8>, <vscale x 16 x i8>* %addr, i64 21
|
||||
call void @llvm.aarch64.sve.st3.nxv16i8(<vscale x 16 x i8> %v0,
|
||||
<vscale x 16 x i8> %v1,
|
||||
<vscale x 16 x i8> %v2,
|
||||
<vscale x 16 x i1> %pred,
|
||||
<vscale x 16 x i8>* %base)
|
||||
ret void
|
||||
}
|
||||
|
||||
;
|
||||
; ST3H
|
||||
;
|
||||
|
||||
define void @st3h_i16(<vscale x 8 x i16> %v0, <vscale x 8 x i16> %v1, <vscale x 8 x i16> %v2, <vscale x 8 x i1> %pred, <vscale x 8 x i16>* %addr) {
|
||||
; CHECK-LABEL: st3h_i16:
|
||||
; CHECK: st3h { z0.h, z1.h, z2.h }, p0, [x0, #6, mul vl]
|
||||
; CHECK-NEXT: ret
|
||||
%base = getelementptr <vscale x 8 x i16>, <vscale x 8 x i16>* %addr, i64 6
|
||||
call void @llvm.aarch64.sve.st3.nxv8i16(<vscale x 8 x i16> %v0,
|
||||
<vscale x 8 x i16> %v1,
|
||||
<vscale x 8 x i16> %v2,
|
||||
<vscale x 8 x i1> %pred,
|
||||
<vscale x 8 x i16>* %base)
|
||||
ret void
|
||||
}
|
||||
|
||||
define void @st3h_f16(<vscale x 8 x half> %v0, <vscale x 8 x half> %v1, <vscale x 8 x half> %v2, <vscale x 8 x i1> %pred, <vscale x 8 x half>* %addr) {
|
||||
; CHECK-LABEL: st3h_f16:
|
||||
; CHECK: st3h { z0.h, z1.h, z2.h }, p0, [x0, #9, mul vl]
|
||||
; CHECK-NEXT: ret
|
||||
%base = getelementptr <vscale x 8 x half>, <vscale x 8 x half>* %addr, i64 9
|
||||
call void @llvm.aarch64.sve.st3.nxv8f16(<vscale x 8 x half> %v0,
|
||||
<vscale x 8 x half> %v1,
|
||||
<vscale x 8 x half> %v2,
|
||||
<vscale x 8 x i1> %pred,
|
||||
<vscale x 8 x half>* %base)
|
||||
ret void
|
||||
}
|
||||
|
||||
;
|
||||
; ST3W
|
||||
;
|
||||
|
||||
define void @st3w_i32(<vscale x 4 x i32> %v0, <vscale x 4 x i32> %v1, <vscale x 4 x i32> %v2, <vscale x 4 x i1> %pred, <vscale x 4 x i32>* %addr) {
|
||||
; CHECK-LABEL: st3w_i32:
|
||||
; CHECK: st3w { z0.s, z1.s, z2.s }, p0, [x0, #12, mul vl]
|
||||
; CHECK-NEXT: ret
|
||||
%base = getelementptr <vscale x 4 x i32>, <vscale x 4 x i32>* %addr, i64 12
|
||||
call void @llvm.aarch64.sve.st3.nxv4i32(<vscale x 4 x i32> %v0,
|
||||
<vscale x 4 x i32> %v1,
|
||||
<vscale x 4 x i32> %v2,
|
||||
<vscale x 4 x i1> %pred,
|
||||
<vscale x 4 x i32>* %base)
|
||||
ret void
|
||||
}
|
||||
|
||||
define void @st3w_f32(<vscale x 4 x float> %v0, <vscale x 4 x float> %v1, <vscale x 4 x float> %v2, <vscale x 4 x i1> %pred, <vscale x 4 x float>* %addr) {
|
||||
; CHECK-LABEL: st3w_f32:
|
||||
; CHECK: st3w { z0.s, z1.s, z2.s }, p0, [x0, #15, mul vl]
|
||||
; CHECK-NEXT: ret
|
||||
%base = getelementptr <vscale x 4 x float>, <vscale x 4 x float>* %addr, i64 15
|
||||
call void @llvm.aarch64.sve.st3.nxv4f32(<vscale x 4 x float> %v0,
|
||||
<vscale x 4 x float> %v1,
|
||||
<vscale x 4 x float> %v2,
|
||||
<vscale x 4 x i1> %pred,
|
||||
<vscale x 4 x float>* %base)
|
||||
ret void
|
||||
}
|
||||
|
||||
;
|
||||
; ST3D
|
||||
;
|
||||
|
||||
define void @st3d_i64(<vscale x 2 x i64> %v0, <vscale x 2 x i64> %v1, <vscale x 2 x i64> %v2, <vscale x 2 x i1> %pred, <vscale x 2 x i64>* %addr) {
|
||||
; CHECK-LABEL: st3d_i64:
|
||||
; CHECK: st3d { z0.d, z1.d, z2.d }, p0, [x0, #18, mul vl]
|
||||
; CHECK-NEXT: ret
|
||||
%base = getelementptr <vscale x 2 x i64>, <vscale x 2 x i64>* %addr, i64 18
|
||||
call void @llvm.aarch64.sve.st3.nxv2i64(<vscale x 2 x i64> %v0,
|
||||
<vscale x 2 x i64> %v1,
|
||||
<vscale x 2 x i64> %v2,
|
||||
<vscale x 2 x i1> %pred,
|
||||
<vscale x 2 x i64>* %base)
|
||||
ret void
|
||||
}
|
||||
|
||||
define void @st3d_f64(<vscale x 2 x double> %v0, <vscale x 2 x double> %v1, <vscale x 2 x double> %v2, <vscale x 2 x i1> %pred, <vscale x 2 x double>* %addr) {
|
||||
; CHECK-LABEL: st3d_f64:
|
||||
; CHECK: st3d { z0.d, z1.d, z2.d }, p0, [x0, #-3, mul vl]
|
||||
; CHECK-NEXT: ret
|
||||
%base = getelementptr <vscale x 2 x double>, <vscale x 2 x double>* %addr, i64 -3
|
||||
call void @llvm.aarch64.sve.st3.nxv2f64(<vscale x 2 x double> %v0,
|
||||
<vscale x 2 x double> %v1,
|
||||
<vscale x 2 x double> %v2,
|
||||
<vscale x 2 x i1> %pred,
|
||||
<vscale x 2 x double>* %base)
|
||||
ret void
|
||||
}
|
||||
|
||||
;
|
||||
; ST4B
|
||||
;
|
||||
|
||||
define void @st4b_i8_valid_imm(<vscale x 16 x i8> %v0, <vscale x 16 x i8> %v1, <vscale x 16 x i8> %v2, <vscale x 16 x i8> %v3, <vscale x 16 x i1> %pred, <vscale x 16 x i8>* %addr) {
|
||||
; CHECK-LABEL: st4b_i8_valid_imm:
|
||||
; CHECK: st4b { z0.b, z1.b, z2.b, z3.b }, p0, [x0, #4, mul vl]
|
||||
; CHECK-NEXT: ret
|
||||
%base = getelementptr <vscale x 16 x i8>, <vscale x 16 x i8>* %addr, i64 4
|
||||
call void @llvm.aarch64.sve.st4.nxv16i8(<vscale x 16 x i8> %v0,
|
||||
<vscale x 16 x i8> %v1,
|
||||
<vscale x 16 x i8> %v2,
|
||||
<vscale x 16 x i8> %v3,
|
||||
<vscale x 16 x i1> %pred,
|
||||
<vscale x 16 x i8>* %base)
|
||||
ret void
|
||||
}
|
||||
|
||||
define void @st4b_i8_invalid_imm_not_multiple_of_4_01(<vscale x 16 x i8> %v0, <vscale x 16 x i8> %v1, <vscale x 16 x i8> %v2, <vscale x 16 x i8> %v3, <vscale x 16 x i1> %pred, <vscale x 16 x i8>* %addr) {
|
||||
; CHECK-LABEL: st4b_i8_invalid_imm_not_multiple_of_4_01:
|
||||
; CHECK: rdvl x[[N:[0-9]+]], #5
|
||||
; CHECK-NEXT: st4b { z0.b, z1.b, z2.b, z3.b }, p0, [x0, x[[N]]]
|
||||
; CHECK-NEXT: ret
|
||||
%base = getelementptr <vscale x 16 x i8>, <vscale x 16 x i8>* %addr, i64 5
|
||||
call void @llvm.aarch64.sve.st4.nxv16i8(<vscale x 16 x i8> %v0,
|
||||
<vscale x 16 x i8> %v1,
|
||||
<vscale x 16 x i8> %v2,
|
||||
<vscale x 16 x i8> %v3,
|
||||
<vscale x 16 x i1> %pred,
|
||||
<vscale x 16 x i8>* %base)
|
||||
ret void
|
||||
}
|
||||
|
||||
define void @st4b_i8_invalid_imm_not_multiple_of_4_02(<vscale x 16 x i8> %v0, <vscale x 16 x i8> %v1, <vscale x 16 x i8> %v2, <vscale x 16 x i8> %v3, <vscale x 16 x i1> %pred, <vscale x 16 x i8>* %addr) {
|
||||
; CHECK-LABEL: st4b_i8_invalid_imm_not_multiple_of_4_02:
|
||||
; CHECK: rdvl x[[N:[0-9]+]], #6
|
||||
; CHECK-NEXT: st4b { z0.b, z1.b, z2.b, z3.b }, p0, [x0, x[[N]]]
|
||||
; CHECK-NEXT: ret
|
||||
%base = getelementptr <vscale x 16 x i8>, <vscale x 16 x i8>* %addr, i64 6
|
||||
call void @llvm.aarch64.sve.st4.nxv16i8(<vscale x 16 x i8> %v0,
|
||||
<vscale x 16 x i8> %v1,
|
||||
<vscale x 16 x i8> %v2,
|
||||
<vscale x 16 x i8> %v3,
|
||||
<vscale x 16 x i1> %pred,
|
||||
<vscale x 16 x i8>* %base)
|
||||
ret void
|
||||
}
|
||||
|
||||
define void @st4b_i8_invalid_imm_not_multiple_of_4_03(<vscale x 16 x i8> %v0, <vscale x 16 x i8> %v1, <vscale x 16 x i8> %v2, <vscale x 16 x i8> %v3, <vscale x 16 x i1> %pred, <vscale x 16 x i8>* %addr) {
|
||||
; CHECK-LABEL: st4b_i8_invalid_imm_not_multiple_of_4_03:
|
||||
; CHECK: rdvl x[[N:[0-9]+]], #7
|
||||
; CHECK-NEXT: st4b { z0.b, z1.b, z2.b, z3.b }, p0, [x0, x[[N]]]
|
||||
; CHECK-NEXT: ret
|
||||
%base = getelementptr <vscale x 16 x i8>, <vscale x 16 x i8>* %addr, i64 7
|
||||
call void @llvm.aarch64.sve.st4.nxv16i8(<vscale x 16 x i8> %v0,
|
||||
<vscale x 16 x i8> %v1,
|
||||
<vscale x 16 x i8> %v2,
|
||||
<vscale x 16 x i8> %v3,
|
||||
<vscale x 16 x i1> %pred,
|
||||
<vscale x 16 x i8>* %base)
|
||||
ret void
|
||||
}
|
||||
|
||||
define void @st4b_i8_invalid_imm_out_of_lower_bound(<vscale x 16 x i8> %v0, <vscale x 16 x i8> %v1, <vscale x 16 x i8> %v2, <vscale x 16 x i8> %v3, <vscale x 16 x i1> %pred, <vscale x 16 x i8>* %addr) {
|
||||
; CHECK-LABEL: st4b_i8_invalid_imm_out_of_lower_bound:
|
||||
; FIXME: optimize OFFSET computation so that xOFFSET = (mul (RDVL #4) #9)
|
||||
; xM = -9 * 2^6
|
||||
; xP = RDVL * 2^-4
|
||||
; xBASE = RDVL * 2^-4 * -9 * 2^6 = RDVL * -36
|
||||
; CHECK: rdvl x[[N:[0-9]+]], #1
|
||||
; CHECK-DAG: mov x[[M:[0-9]+]], #-576
|
||||
; CHECK-DAG: lsr x[[P:[0-9]+]], x[[N]], #4
|
||||
; CHECK-DAG: mul x[[OFFSET:[0-9]+]], x[[P]], x[[M]]
|
||||
; CHECK-NEXT: st4b { z0.b, z1.b, z2.b, z3.b }, p0, [x0, x[[OFFSET]]]
|
||||
; CHECK-NEXT: ret
|
||||
%base = getelementptr <vscale x 16 x i8>, <vscale x 16 x i8>* %addr, i64 -36
|
||||
call void @llvm.aarch64.sve.st4.nxv16i8(<vscale x 16 x i8> %v0,
|
||||
<vscale x 16 x i8> %v1,
|
||||
<vscale x 16 x i8> %v2,
|
||||
<vscale x 16 x i8> %v3,
|
||||
<vscale x 16 x i1> %pred,
|
||||
<vscale x 16 x i8>* %base)
|
||||
ret void
|
||||
}
|
||||
|
||||
define void @st4b_i8_invalid_imm_out_of_upper_bound(<vscale x 16 x i8> %v0, <vscale x 16 x i8> %v1, <vscale x 16 x i8> %v2, <vscale x 16 x i8> %v3, <vscale x 16 x i1> %pred, <vscale x 16 x i8>* %addr) {
|
||||
; CHECK-LABEL: st4b_i8_invalid_imm_out_of_upper_bound:
|
||||
; FIXME: optimize OFFSET computation so that xOFFSET = (shl (RDVL #16) #1)
|
||||
; xM = 2^9
|
||||
; xP = RDVL * 2^-4
|
||||
; xOFFSET = RDVL * 2^-4 * 2^9 = RDVL * 32
|
||||
; CHECK: rdvl x[[N:[0-9]+]], #1
|
||||
; CHECK-DAG: mov w[[M:[0-9]+]], #512
|
||||
; CHECK-DAG: lsr x[[P:[0-9]+]], x[[N]], #4
|
||||
; CHECK-DAG: mul x[[OFFSET:[0-9]+]], x[[P]], x[[M]]
|
||||
; CHECK-NEXT: st4b { z0.b, z1.b, z2.b, z3.b }, p0, [x0, x[[OFFSET]]]
|
||||
; CHECK-NEXT: ret
|
||||
%base = getelementptr <vscale x 16 x i8>, <vscale x 16 x i8>* %addr, i64 32
|
||||
call void @llvm.aarch64.sve.st4.nxv16i8(<vscale x 16 x i8> %v0,
|
||||
<vscale x 16 x i8> %v1,
|
||||
<vscale x 16 x i8> %v2,
|
||||
<vscale x 16 x i8> %v3,
|
||||
<vscale x 16 x i1> %pred,
|
||||
<vscale x 16 x i8>* %base)
|
||||
ret void
|
||||
}
|
||||
|
||||
define void @st4b_i8_valid_imm_lower_bound(<vscale x 16 x i8> %v0, <vscale x 16 x i8> %v1, <vscale x 16 x i8> %v2, <vscale x 16 x i8> %v3, <vscale x 16 x i1> %pred, <vscale x 16 x i8>* %addr) {
|
||||
; CHECK-LABEL: st4b_i8_valid_imm_lower_bound:
|
||||
; CHECK: st4b { z0.b, z1.b, z2.b, z3.b }, p0, [x0, #-32, mul vl]
|
||||
; CHECK-NEXT: ret
|
||||
%base = getelementptr <vscale x 16 x i8>, <vscale x 16 x i8>* %addr, i64 -32
|
||||
call void @llvm.aarch64.sve.st4.nxv16i8(<vscale x 16 x i8> %v0,
|
||||
<vscale x 16 x i8> %v1,
|
||||
<vscale x 16 x i8> %v2,
|
||||
<vscale x 16 x i8> %v3,
|
||||
<vscale x 16 x i1> %pred,
|
||||
<vscale x 16 x i8>* %base)
|
||||
ret void
|
||||
}
|
||||
|
||||
define void @st4b_i8_valid_imm_upper_bound(<vscale x 16 x i8> %v0, <vscale x 16 x i8> %v1, <vscale x 16 x i8> %v2, <vscale x 16 x i8> %v3, <vscale x 16 x i1> %pred, <vscale x 16 x i8>* %addr) {
|
||||
; CHECK-LABEL: st4b_i8_valid_imm_upper_bound:
|
||||
; CHECK: st4b { z0.b, z1.b, z2.b, z3.b }, p0, [x0, #28, mul vl]
|
||||
; CHECK-NEXT: ret
|
||||
%base = getelementptr <vscale x 16 x i8>, <vscale x 16 x i8>* %addr, i64 28
|
||||
call void @llvm.aarch64.sve.st4.nxv16i8(<vscale x 16 x i8> %v0,
|
||||
<vscale x 16 x i8> %v1,
|
||||
<vscale x 16 x i8> %v2,
|
||||
<vscale x 16 x i8> %v3,
|
||||
<vscale x 16 x i1> %pred,
|
||||
<vscale x 16 x i8>* %base)
|
||||
ret void
|
||||
}
|
||||
|
||||
;
|
||||
; ST4H
|
||||
;
|
||||
|
||||
define void @st4h_i16(<vscale x 8 x i16> %v0, <vscale x 8 x i16> %v1, <vscale x 8 x i16> %v2, <vscale x 8 x i16> %v3, <vscale x 8 x i1> %pred, <vscale x 8 x i16>* %addr) {
|
||||
; CHECK-LABEL: st4h_i16:
|
||||
; CHECK: st4h { z0.h, z1.h, z2.h, z3.h }, p0, [x0, #8, mul vl]
|
||||
; CHECK-NEXT: ret
|
||||
%base = getelementptr <vscale x 8 x i16>, <vscale x 8 x i16>* %addr, i64 8
|
||||
call void @llvm.aarch64.sve.st4.nxv8i16(<vscale x 8 x i16> %v0,
|
||||
<vscale x 8 x i16> %v1,
|
||||
<vscale x 8 x i16> %v2,
|
||||
<vscale x 8 x i16> %v3,
|
||||
<vscale x 8 x i1> %pred,
|
||||
<vscale x 8 x i16>* %base)
|
||||
ret void
|
||||
}
|
||||
|
||||
define void @st4h_f16(<vscale x 8 x half> %v0, <vscale x 8 x half> %v1, <vscale x 8 x half> %v2, <vscale x 8 x half> %v3, <vscale x 8 x i1> %pred, <vscale x 8 x half>* %addr) {
|
||||
; CHECK-LABEL: st4h_f16:
|
||||
; CHECK: st4h { z0.h, z1.h, z2.h, z3.h }, p0, [x0, #12, mul vl]
|
||||
; CHECK-NEXT: ret
|
||||
%base = getelementptr <vscale x 8 x half>, <vscale x 8 x half>* %addr, i64 12
|
||||
call void @llvm.aarch64.sve.st4.nxv8f16(<vscale x 8 x half> %v0,
|
||||
<vscale x 8 x half> %v1,
|
||||
<vscale x 8 x half> %v2,
|
||||
<vscale x 8 x half> %v3,
|
||||
<vscale x 8 x i1> %pred,
|
||||
<vscale x 8 x half>* %base)
|
||||
ret void
|
||||
}
|
||||
|
||||
;
|
||||
; ST4W
|
||||
;
|
||||
|
||||
define void @st4w_i32(<vscale x 4 x i32> %v0, <vscale x 4 x i32> %v1, <vscale x 4 x i32> %v2, <vscale x 4 x i32> %v3, <vscale x 4 x i1> %pred, <vscale x 4 x i32>* %addr) {
|
||||
; CHECK-LABEL: st4w_i32:
|
||||
; CHECK: st4w { z0.s, z1.s, z2.s, z3.s }, p0, [x0, #16, mul vl]
|
||||
; CHECK-NEXT: ret
|
||||
%base = getelementptr <vscale x 4 x i32>, <vscale x 4 x i32>* %addr, i64 16
|
||||
call void @llvm.aarch64.sve.st4.nxv4i32(<vscale x 4 x i32> %v0,
|
||||
<vscale x 4 x i32> %v1,
|
||||
<vscale x 4 x i32> %v2,
|
||||
<vscale x 4 x i32> %v3,
|
||||
<vscale x 4 x i1> %pred,
|
||||
<vscale x 4 x i32>* %base)
|
||||
ret void
|
||||
}
|
||||
|
||||
define void @st4w_f32(<vscale x 4 x float> %v0, <vscale x 4 x float> %v1, <vscale x 4 x float> %v2, <vscale x 4 x float> %v3, <vscale x 4 x i1> %pred, <vscale x 4 x float>* %addr) {
|
||||
; CHECK-LABEL: st4w_f32:
|
||||
; CHECK: st4w { z0.s, z1.s, z2.s, z3.s }, p0, [x0, #20, mul vl]
|
||||
; CHECK-NEXT: ret
|
||||
%base = getelementptr <vscale x 4 x float>, <vscale x 4 x float>* %addr, i64 20
|
||||
call void @llvm.aarch64.sve.st4.nxv4f32(<vscale x 4 x float> %v0,
|
||||
<vscale x 4 x float> %v1,
|
||||
<vscale x 4 x float> %v2,
|
||||
<vscale x 4 x float> %v3,
|
||||
<vscale x 4 x i1> %pred,
|
||||
<vscale x 4 x float>* %base)
|
||||
ret void
|
||||
}
|
||||
|
||||
;
|
||||
; ST4D
|
||||
;
|
||||
|
||||
define void @st4d_i64(<vscale x 2 x i64> %v0, <vscale x 2 x i64> %v1, <vscale x 2 x i64> %v2, <vscale x 2 x i64> %v3, <vscale x 2 x i1> %pred, <vscale x 2 x i64>* %addr) {
|
||||
; CHECK-LABEL: st4d_i64:
|
||||
; CHECK: st4d { z0.d, z1.d, z2.d, z3.d }, p0, [x0, #24, mul vl]
|
||||
; CHECK-NEXT: ret
|
||||
%base = getelementptr <vscale x 2 x i64>, <vscale x 2 x i64>* %addr, i64 24
|
||||
call void @llvm.aarch64.sve.st4.nxv2i64(<vscale x 2 x i64> %v0,
|
||||
<vscale x 2 x i64> %v1,
|
||||
<vscale x 2 x i64> %v2,
|
||||
<vscale x 2 x i64> %v3,
|
||||
<vscale x 2 x i1> %pred,
|
||||
<vscale x 2 x i64>* %base)
|
||||
ret void
|
||||
}
|
||||
|
||||
define void @st4d_f64(<vscale x 2 x double> %v0, <vscale x 2 x double> %v1, <vscale x 2 x double> %v2, <vscale x 2 x double> %v3, <vscale x 2 x i1> %pred, <vscale x 2 x double>* %addr) {
|
||||
; CHECK-LABEL: st4d_f64:
|
||||
; CHECK: st4d { z0.d, z1.d, z2.d, z3.d }, p0, [x0, #28, mul vl]
|
||||
; CHECK-NEXT: ret
|
||||
%base = getelementptr <vscale x 2 x double>, <vscale x 2 x double>* %addr, i64 28
|
||||
call void @llvm.aarch64.sve.st4.nxv2f64(<vscale x 2 x double> %v0,
|
||||
<vscale x 2 x double> %v1,
|
||||
<vscale x 2 x double> %v2,
|
||||
<vscale x 2 x double> %v3,
|
||||
<vscale x 2 x i1> %pred,
|
||||
<vscale x 2 x double>* %base)
|
||||
ret void
|
||||
}
|
||||
|
||||
declare void @llvm.aarch64.sve.st2.nxv16i8(<vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i1>, <vscale x 16 x i8>*)
|
||||
declare void @llvm.aarch64.sve.st2.nxv8i16(<vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i1>, <vscale x 8 x i16>*)
|
||||
declare void @llvm.aarch64.sve.st2.nxv4i32(<vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i1>, <vscale x 4 x i32>*)
|
||||
declare void @llvm.aarch64.sve.st2.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i1>, <vscale x 2 x i64>*)
|
||||
declare void @llvm.aarch64.sve.st2.nxv8f16(<vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x i1>, <vscale x 8 x half>*)
|
||||
declare void @llvm.aarch64.sve.st2.nxv4f32(<vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x i1>, <vscale x 4 x float>*)
|
||||
declare void @llvm.aarch64.sve.st2.nxv2f64(<vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x i1>, <vscale x 2 x double>*)
|
||||
|
||||
declare void @llvm.aarch64.sve.st3.nxv16i8(<vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i1>, <vscale x 16 x i8>*)
|
||||
declare void @llvm.aarch64.sve.st3.nxv8i16(<vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i1>, <vscale x 8 x i16>*)
|
||||
declare void @llvm.aarch64.sve.st3.nxv4i32(<vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i1>, <vscale x 4 x i32>*)
|
||||
declare void @llvm.aarch64.sve.st3.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i1>, <vscale x 2 x i64>*)
|
||||
declare void @llvm.aarch64.sve.st3.nxv8f16(<vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x i1>, <vscale x 8 x half>*)
|
||||
declare void @llvm.aarch64.sve.st3.nxv4f32(<vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x i1>, <vscale x 4 x float>*)
|
||||
declare void @llvm.aarch64.sve.st3.nxv2f64(<vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x i1>, <vscale x 2 x double>*)
|
||||
|
||||
declare void @llvm.aarch64.sve.st4.nxv16i8(<vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i1>, <vscale x 16 x i8>*)
|
||||
declare void @llvm.aarch64.sve.st4.nxv8i16(<vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i1>, <vscale x 8 x i16>*)
|
||||
declare void @llvm.aarch64.sve.st4.nxv4i32(<vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i1>, <vscale x 4 x i32>*)
|
||||
declare void @llvm.aarch64.sve.st4.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i1>, <vscale x 2 x i64>*)
|
||||
declare void @llvm.aarch64.sve.st4.nxv8f16(<vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x i1>, <vscale x 8 x half>*)
|
||||
declare void @llvm.aarch64.sve.st4.nxv4f32(<vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x i1>, <vscale x 4 x float>*)
|
||||
declare void @llvm.aarch64.sve.st4.nxv2f64(<vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x i1>, <vscale x 2 x double>*)
|
|
@ -1,367 +0,0 @@
|
|||
; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve -asm-verbose=0 < %s | FileCheck %s
|
||||
|
||||
;
|
||||
; ST2B
|
||||
;
|
||||
|
||||
define void @st2b_i8(<vscale x 16 x i8> %v0, <vscale x 16 x i8> %v1, <vscale x 16 x i1> %pred, i8* %addr, i64 %offset) {
|
||||
; CHECK-LABEL: st2b_i8:
|
||||
; CHECK: st2b { z0.b, z1.b }, p0, [x0, x1]
|
||||
; CHECK-NEXT: ret
|
||||
%1 = getelementptr i8, i8* %addr, i64 %offset
|
||||
%base = bitcast i8* %1 to <vscale x 16 x i8>*
|
||||
call void @llvm.aarch64.sve.st2.nxv16i8(<vscale x 16 x i8> %v0,
|
||||
<vscale x 16 x i8> %v1,
|
||||
<vscale x 16 x i1> %pred,
|
||||
<vscale x 16 x i8>* %base)
|
||||
ret void
|
||||
}
|
||||
|
||||
;
|
||||
; ST2H
|
||||
;
|
||||
|
||||
define void @st2h_i16(<vscale x 8 x i16> %v0, <vscale x 8 x i16> %v1, <vscale x 8 x i1> %pred, i16* %addr, i64 %offset) {
|
||||
; CHECK-LABEL: st2h_i16:
|
||||
; CHECK: st2h { z0.h, z1.h }, p0, [x0, x1, lsl #1]
|
||||
; CHECK-NEXT: ret
|
||||
%1 = getelementptr i16, i16* %addr, i64 %offset
|
||||
%base = bitcast i16* %1 to <vscale x 8 x i16>*
|
||||
call void @llvm.aarch64.sve.st2.nxv8i16(<vscale x 8 x i16> %v0,
|
||||
<vscale x 8 x i16> %v1,
|
||||
<vscale x 8 x i1> %pred,
|
||||
<vscale x 8 x i16>* %base)
|
||||
ret void
|
||||
}
|
||||
|
||||
define void @st2h_f16(<vscale x 8 x half> %v0, <vscale x 8 x half> %v1, <vscale x 8 x i1> %pred, half* %addr, i64 %offset) {
|
||||
; CHECK-LABEL: st2h_f16:
|
||||
; CHECK: st2h { z0.h, z1.h }, p0, [x0, x1, lsl #1]
|
||||
; CHECK-NEXT: ret
|
||||
%1 = getelementptr half, half* %addr, i64 %offset
|
||||
%base = bitcast half* %1 to <vscale x 8 x half>*
|
||||
call void @llvm.aarch64.sve.st2.nxv8f16(<vscale x 8 x half> %v0,
|
||||
<vscale x 8 x half> %v1,
|
||||
<vscale x 8 x i1> %pred,
|
||||
<vscale x 8 x half>* %base)
|
||||
ret void
|
||||
}
|
||||
|
||||
;
|
||||
; ST2W
|
||||
;
|
||||
|
||||
define void @st2w_i32(<vscale x 4 x i32> %v0, <vscale x 4 x i32> %v1, <vscale x 4 x i1> %pred, i32* %addr, i64 %offset) {
|
||||
; CHECK-LABEL: st2w_i32:
|
||||
; CHECK: st2w { z0.s, z1.s }, p0, [x0, x1, lsl #2]
|
||||
; CHECK-NEXT: ret
|
||||
%1 = getelementptr i32, i32* %addr, i64 %offset
|
||||
%base = bitcast i32* %1 to <vscale x 4 x i32>*
|
||||
call void @llvm.aarch64.sve.st2.nxv4i32(<vscale x 4 x i32> %v0,
|
||||
<vscale x 4 x i32> %v1,
|
||||
<vscale x 4 x i1> %pred,
|
||||
<vscale x 4 x i32>* %base)
|
||||
ret void
|
||||
}
|
||||
|
||||
define void @st2w_f32(<vscale x 4 x float> %v0, <vscale x 4 x float> %v1, <vscale x 4 x i1> %pred, float* %addr, i64 %offset) {
|
||||
; CHECK-LABEL: st2w_f32:
|
||||
; CHECK: st2w { z0.s, z1.s }, p0, [x0, x1, lsl #2]
|
||||
; CHECK-NEXT: ret
|
||||
%1 = getelementptr float, float* %addr, i64 %offset
|
||||
%base = bitcast float* %1 to <vscale x 4 x float>*
|
||||
call void @llvm.aarch64.sve.st2.nxv4f32(<vscale x 4 x float> %v0,
|
||||
<vscale x 4 x float> %v1,
|
||||
<vscale x 4 x i1> %pred,
|
||||
<vscale x 4 x float>* %base)
|
||||
ret void
|
||||
}
|
||||
|
||||
;
|
||||
; ST2D
|
||||
;
|
||||
|
||||
define void @st2d_i64(<vscale x 2 x i64> %v0, <vscale x 2 x i64> %v1, <vscale x 2 x i1> %pred, i64* %addr, i64 %offset) {
|
||||
; CHECK-LABEL: st2d_i64:
|
||||
; CHECK: st2d { z0.d, z1.d }, p0, [x0, x1, lsl #3]
|
||||
; CHECK-NEXT: ret
|
||||
%1 = getelementptr i64, i64* %addr, i64 %offset
|
||||
%base = bitcast i64* %1 to <vscale x 2 x i64>*
|
||||
call void @llvm.aarch64.sve.st2.nxv2i64(<vscale x 2 x i64> %v0,
|
||||
<vscale x 2 x i64> %v1,
|
||||
<vscale x 2 x i1> %pred,
|
||||
<vscale x 2 x i64>* %base)
|
||||
ret void
|
||||
}
|
||||
|
||||
define void @st2d_f64(<vscale x 2 x double> %v0, <vscale x 2 x double> %v1, <vscale x 2 x i1> %pred, double* %addr, i64 %offset) {
|
||||
; CHECK-LABEL: st2d_f64:
|
||||
; CHECK: st2d { z0.d, z1.d }, p0, [x0, x1, lsl #3]
|
||||
; CHECK-NEXT: ret
|
||||
%1 = getelementptr double, double* %addr, i64 %offset
|
||||
%base = bitcast double* %1 to <vscale x 2 x double>*
|
||||
call void @llvm.aarch64.sve.st2.nxv2f64(<vscale x 2 x double> %v0,
|
||||
<vscale x 2 x double> %v1,
|
||||
<vscale x 2 x i1> %pred,
|
||||
<vscale x 2 x double>* %base)
|
||||
ret void
|
||||
}
|
||||
|
||||
;
|
||||
; ST3B
|
||||
;
|
||||
|
||||
define void @st3b_i8(<vscale x 16 x i8> %v0, <vscale x 16 x i8> %v1, <vscale x 16 x i8> %v2, <vscale x 16 x i1> %pred, i8* %addr, i64 %offset) {
|
||||
; CHECK-LABEL: st3b_i8:
|
||||
; CHECK: st3b { z0.b, z1.b, z2.b }, p0, [x0, x1]
|
||||
; CHECK-NEXT: ret
|
||||
%1 = getelementptr i8, i8* %addr, i64 %offset
|
||||
%base = bitcast i8* %1 to <vscale x 16 x i8>*
|
||||
call void @llvm.aarch64.sve.st3.nxv16i8(<vscale x 16 x i8> %v0,
|
||||
<vscale x 16 x i8> %v1,
|
||||
<vscale x 16 x i8> %v2,
|
||||
<vscale x 16 x i1> %pred,
|
||||
<vscale x 16 x i8>* %base)
|
||||
ret void
|
||||
}
|
||||
|
||||
;
|
||||
; ST3H
|
||||
;
|
||||
|
||||
define void @st3h_i16(<vscale x 8 x i16> %v0, <vscale x 8 x i16> %v1, <vscale x 8 x i16> %v2, <vscale x 8 x i1> %pred, i16* %addr, i64 %offset) {
|
||||
; CHECK-LABEL: st3h_i16:
|
||||
; CHECK: st3h { z0.h, z1.h, z2.h }, p0, [x0, x1, lsl #1]
|
||||
; CHECK-NEXT: ret
|
||||
%1 = getelementptr i16, i16* %addr, i64 %offset
|
||||
%base = bitcast i16* %1 to <vscale x 8 x i16>*
|
||||
call void @llvm.aarch64.sve.st3.nxv8i16(<vscale x 8 x i16> %v0,
|
||||
<vscale x 8 x i16> %v1,
|
||||
<vscale x 8 x i16> %v2,
|
||||
<vscale x 8 x i1> %pred,
|
||||
<vscale x 8 x i16>* %base)
|
||||
ret void
|
||||
}
|
||||
|
||||
define void @st3h_f16(<vscale x 8 x half> %v0, <vscale x 8 x half> %v1, <vscale x 8 x half> %v2, <vscale x 8 x i1> %pred, half* %addr, i64 %offset) {
|
||||
; CHECK-LABEL: st3h_f16:
|
||||
; CHECK: st3h { z0.h, z1.h, z2.h }, p0, [x0, x1, lsl #1]
|
||||
; CHECK-NEXT: ret
|
||||
%1 = getelementptr half, half* %addr, i64 %offset
|
||||
%base = bitcast half* %1 to <vscale x 8 x half>*
|
||||
call void @llvm.aarch64.sve.st3.nxv8f16(<vscale x 8 x half> %v0,
|
||||
<vscale x 8 x half> %v1,
|
||||
<vscale x 8 x half> %v2,
|
||||
<vscale x 8 x i1> %pred,
|
||||
<vscale x 8 x half>* %base)
|
||||
ret void
|
||||
}
|
||||
|
||||
;
|
||||
; ST3W
|
||||
;
|
||||
|
||||
define void @st3w_i32(<vscale x 4 x i32> %v0, <vscale x 4 x i32> %v1, <vscale x 4 x i32> %v2, <vscale x 4 x i1> %pred, i32* %addr, i64 %offset) {
|
||||
; CHECK-LABEL: st3w_i32:
|
||||
; CHECK: st3w { z0.s, z1.s, z2.s }, p0, [x0, x1, lsl #2]
|
||||
; CHECK-NEXT: ret
|
||||
%1 = getelementptr i32, i32* %addr, i64 %offset
|
||||
%base = bitcast i32* %1 to <vscale x 4 x i32>*
|
||||
call void @llvm.aarch64.sve.st3.nxv4i32(<vscale x 4 x i32> %v0,
|
||||
<vscale x 4 x i32> %v1,
|
||||
<vscale x 4 x i32> %v2,
|
||||
<vscale x 4 x i1> %pred,
|
||||
<vscale x 4 x i32>* %base)
|
||||
ret void
|
||||
}
|
||||
|
||||
define void @st3w_f32(<vscale x 4 x float> %v0, <vscale x 4 x float> %v1, <vscale x 4 x float> %v2, <vscale x 4 x i1> %pred, float* %addr, i64 %offset) {
|
||||
; CHECK-LABEL: st3w_f32:
|
||||
; CHECK: st3w { z0.s, z1.s, z2.s }, p0, [x0, x1, lsl #2]
|
||||
; CHECK-NEXT: ret
|
||||
%1 = getelementptr float, float* %addr, i64 %offset
|
||||
%base = bitcast float* %1 to <vscale x 4 x float>*
|
||||
call void @llvm.aarch64.sve.st3.nxv4f32(<vscale x 4 x float> %v0,
|
||||
<vscale x 4 x float> %v1,
|
||||
<vscale x 4 x float> %v2,
|
||||
<vscale x 4 x i1> %pred,
|
||||
<vscale x 4 x float>* %base)
|
||||
ret void
|
||||
}
|
||||
|
||||
;
|
||||
; ST3D
|
||||
;
|
||||
|
||||
define void @st3d_i64(<vscale x 2 x i64> %v0, <vscale x 2 x i64> %v1, <vscale x 2 x i64> %v2, <vscale x 2 x i1> %pred, i64* %addr, i64 %offset) {
|
||||
; CHECK-LABEL: st3d_i64:
|
||||
; CHECK: st3d { z0.d, z1.d, z2.d }, p0, [x0, x1, lsl #3]
|
||||
; CHECK-NEXT: ret
|
||||
%1 = getelementptr i64, i64* %addr, i64 %offset
|
||||
%base = bitcast i64* %1 to <vscale x 2 x i64>*
|
||||
call void @llvm.aarch64.sve.st3.nxv2i64(<vscale x 2 x i64> %v0,
|
||||
<vscale x 2 x i64> %v1,
|
||||
<vscale x 2 x i64> %v2,
|
||||
<vscale x 2 x i1> %pred,
|
||||
<vscale x 2 x i64>* %base)
|
||||
ret void
|
||||
}
|
||||
|
||||
define void @st3d_f64(<vscale x 2 x double> %v0, <vscale x 2 x double> %v1, <vscale x 2 x double> %v2, <vscale x 2 x i1> %pred, double* %addr, i64 %offset) {
|
||||
; CHECK-LABEL: st3d_f64:
|
||||
; CHECK: st3d { z0.d, z1.d, z2.d }, p0, [x0, x1, lsl #3]
|
||||
; CHECK-NEXT: ret
|
||||
%1 = getelementptr double, double* %addr, i64 %offset
|
||||
%base = bitcast double* %1 to <vscale x 2 x double>*
|
||||
call void @llvm.aarch64.sve.st3.nxv2f64(<vscale x 2 x double> %v0,
|
||||
<vscale x 2 x double> %v1,
|
||||
<vscale x 2 x double> %v2,
|
||||
<vscale x 2 x i1> %pred,
|
||||
<vscale x 2 x double>* %base)
|
||||
ret void
|
||||
}
|
||||
|
||||
;
|
||||
; ST4B
|
||||
;
|
||||
|
||||
define void @st4b_i8(<vscale x 16 x i8> %v0, <vscale x 16 x i8> %v1, <vscale x 16 x i8> %v2, <vscale x 16 x i8> %v3, <vscale x 16 x i1> %pred, i8* %addr, i64 %offset) {
|
||||
; CHECK-LABEL: st4b_i8:
|
||||
; CHECK: st4b { z0.b, z1.b, z2.b, z3.b }, p0, [x0, x1]
|
||||
; CHECK-NEXT: ret
|
||||
%1 = getelementptr i8, i8* %addr, i64 %offset
|
||||
%base = bitcast i8* %1 to <vscale x 16 x i8>*
|
||||
call void @llvm.aarch64.sve.st4.nxv16i8(<vscale x 16 x i8> %v0,
|
||||
<vscale x 16 x i8> %v1,
|
||||
<vscale x 16 x i8> %v2,
|
||||
<vscale x 16 x i8> %v3,
|
||||
<vscale x 16 x i1> %pred,
|
||||
<vscale x 16 x i8>* %base)
|
||||
ret void
|
||||
}
|
||||
|
||||
;
|
||||
; ST4H
|
||||
;
|
||||
|
||||
define void @st4h_i16(<vscale x 8 x i16> %v0, <vscale x 8 x i16> %v1, <vscale x 8 x i16> %v2, <vscale x 8 x i16> %v3, <vscale x 8 x i1> %pred, i16* %addr, i64 %offset) {
|
||||
; CHECK-LABEL: st4h_i16:
|
||||
; CHECK: st4h { z0.h, z1.h, z2.h, z3.h }, p0, [x0, x1, lsl #1]
|
||||
; CHECK-NEXT: ret
|
||||
%1 = getelementptr i16, i16* %addr, i64 %offset
|
||||
%base = bitcast i16* %1 to <vscale x 8 x i16>*
|
||||
call void @llvm.aarch64.sve.st4.nxv8i16(<vscale x 8 x i16> %v0,
|
||||
<vscale x 8 x i16> %v1,
|
||||
<vscale x 8 x i16> %v2,
|
||||
<vscale x 8 x i16> %v3,
|
||||
<vscale x 8 x i1> %pred,
|
||||
<vscale x 8 x i16>* %base)
|
||||
ret void
|
||||
}
|
||||
|
||||
define void @st4h_f16(<vscale x 8 x half> %v0, <vscale x 8 x half> %v1, <vscale x 8 x half> %v2, <vscale x 8 x half> %v3, <vscale x 8 x i1> %pred, half* %addr, i64 %offset) {
|
||||
; CHECK-LABEL: st4h_f16:
|
||||
; CHECK: st4h { z0.h, z1.h, z2.h, z3.h }, p0, [x0, x1, lsl #1]
|
||||
; CHECK-NEXT: ret
|
||||
%1 = getelementptr half, half* %addr, i64 %offset
|
||||
%base = bitcast half* %1 to <vscale x 8 x half>*
|
||||
call void @llvm.aarch64.sve.st4.nxv8f16(<vscale x 8 x half> %v0,
|
||||
<vscale x 8 x half> %v1,
|
||||
<vscale x 8 x half> %v2,
|
||||
<vscale x 8 x half> %v3,
|
||||
<vscale x 8 x i1> %pred,
|
||||
<vscale x 8 x half>* %base)
|
||||
ret void
|
||||
}
|
||||
|
||||
;
|
||||
; ST4W
|
||||
;
|
||||
|
||||
define void @st4w_i32(<vscale x 4 x i32> %v0, <vscale x 4 x i32> %v1, <vscale x 4 x i32> %v2, <vscale x 4 x i32> %v3, <vscale x 4 x i1> %pred, i32* %addr, i64 %offset) {
|
||||
; CHECK-LABEL: st4w_i32:
|
||||
; CHECK: st4w { z0.s, z1.s, z2.s, z3.s }, p0, [x0, x1, lsl #2]
|
||||
; CHECK-NEXT: ret
|
||||
%1 = getelementptr i32, i32* %addr, i64 %offset
|
||||
%base = bitcast i32* %1 to <vscale x 4 x i32>*
|
||||
call void @llvm.aarch64.sve.st4.nxv4i32(<vscale x 4 x i32> %v0,
|
||||
<vscale x 4 x i32> %v1,
|
||||
<vscale x 4 x i32> %v2,
|
||||
<vscale x 4 x i32> %v3,
|
||||
<vscale x 4 x i1> %pred,
|
||||
<vscale x 4 x i32>* %base)
|
||||
ret void
|
||||
}
|
||||
|
||||
define void @st4w_f32(<vscale x 4 x float> %v0, <vscale x 4 x float> %v1, <vscale x 4 x float> %v2, <vscale x 4 x float> %v3, <vscale x 4 x i1> %pred, float* %addr, i64 %offset) {
|
||||
; CHECK-LABEL: st4w_f32:
|
||||
; CHECK: st4w { z0.s, z1.s, z2.s, z3.s }, p0, [x0, x1, lsl #2]
|
||||
; CHECK-NEXT: ret
|
||||
%1 = getelementptr float, float* %addr, i64 %offset
|
||||
%base = bitcast float* %1 to <vscale x 4 x float>*
|
||||
call void @llvm.aarch64.sve.st4.nxv4f32(<vscale x 4 x float> %v0,
|
||||
<vscale x 4 x float> %v1,
|
||||
<vscale x 4 x float> %v2,
|
||||
<vscale x 4 x float> %v3,
|
||||
<vscale x 4 x i1> %pred,
|
||||
<vscale x 4 x float>* %base)
|
||||
ret void
|
||||
}
|
||||
|
||||
;
|
||||
; ST4D
|
||||
;
|
||||
|
||||
define void @st4d_i64(<vscale x 2 x i64> %v0, <vscale x 2 x i64> %v1, <vscale x 2 x i64> %v2, <vscale x 2 x i64> %v3, <vscale x 2 x i1> %pred, i64* %addr, i64 %offset) {
|
||||
; CHECK-LABEL: st4d_i64:
|
||||
; CHECK: st4d { z0.d, z1.d, z2.d, z3.d }, p0, [x0, x1, lsl #3]
|
||||
; CHECK-NEXT: ret
|
||||
%1 = getelementptr i64, i64* %addr, i64 %offset
|
||||
%base = bitcast i64* %1 to <vscale x 2 x i64>*
|
||||
call void @llvm.aarch64.sve.st4.nxv2i64(<vscale x 2 x i64> %v0,
|
||||
<vscale x 2 x i64> %v1,
|
||||
<vscale x 2 x i64> %v2,
|
||||
<vscale x 2 x i64> %v3,
|
||||
<vscale x 2 x i1> %pred,
|
||||
<vscale x 2 x i64>* %base)
|
||||
ret void
|
||||
}
|
||||
|
||||
define void @st4d_f64(<vscale x 2 x double> %v0, <vscale x 2 x double> %v1, <vscale x 2 x double> %v2, <vscale x 2 x double> %v3, <vscale x 2 x i1> %pred, double* %addr, i64 %offset) {
|
||||
; CHECK-LABEL: st4d_f64:
|
||||
; CHECK: st4d { z0.d, z1.d, z2.d, z3.d }, p0, [x0, x1, lsl #3]
|
||||
; CHECK-NEXT: ret
|
||||
%1 = getelementptr double, double* %addr, i64 %offset
|
||||
%base = bitcast double* %1 to <vscale x 2 x double>*
|
||||
call void @llvm.aarch64.sve.st4.nxv2f64(<vscale x 2 x double> %v0,
|
||||
<vscale x 2 x double> %v1,
|
||||
<vscale x 2 x double> %v2,
|
||||
<vscale x 2 x double> %v3,
|
||||
<vscale x 2 x i1> %pred,
|
||||
<vscale x 2 x double>* %base)
|
||||
ret void
|
||||
}
|
||||
|
||||
declare void @llvm.aarch64.sve.st2.nxv16i8(<vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i1>, <vscale x 16 x i8>*)
|
||||
declare void @llvm.aarch64.sve.st2.nxv8i16(<vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i1>, <vscale x 8 x i16>*)
|
||||
declare void @llvm.aarch64.sve.st2.nxv4i32(<vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i1>, <vscale x 4 x i32>*)
|
||||
declare void @llvm.aarch64.sve.st2.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i1>, <vscale x 2 x i64>*)
|
||||
declare void @llvm.aarch64.sve.st2.nxv8f16(<vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x i1>, <vscale x 8 x half>*)
|
||||
declare void @llvm.aarch64.sve.st2.nxv4f32(<vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x i1>, <vscale x 4 x float>*)
|
||||
declare void @llvm.aarch64.sve.st2.nxv2f64(<vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x i1>, <vscale x 2 x double>*)
|
||||
|
||||
declare void @llvm.aarch64.sve.st3.nxv16i8(<vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i1>, <vscale x 16 x i8>*)
|
||||
declare void @llvm.aarch64.sve.st3.nxv8i16(<vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i1>, <vscale x 8 x i16>*)
|
||||
declare void @llvm.aarch64.sve.st3.nxv4i32(<vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i1>, <vscale x 4 x i32>*)
|
||||
declare void @llvm.aarch64.sve.st3.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i1>, <vscale x 2 x i64>*)
|
||||
declare void @llvm.aarch64.sve.st3.nxv8f16(<vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x i1>, <vscale x 8 x half>*)
|
||||
declare void @llvm.aarch64.sve.st3.nxv4f32(<vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x i1>, <vscale x 4 x float>*)
|
||||
declare void @llvm.aarch64.sve.st3.nxv2f64(<vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x i1>, <vscale x 2 x double>*)
|
||||
|
||||
declare void @llvm.aarch64.sve.st4.nxv16i8(<vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i8>, <vscale x 16 x i1>, <vscale x 16 x i8>*)
|
||||
declare void @llvm.aarch64.sve.st4.nxv8i16(<vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i16>, <vscale x 8 x i1>, <vscale x 8 x i16>*)
|
||||
declare void @llvm.aarch64.sve.st4.nxv4i32(<vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x i1>, <vscale x 4 x i32>*)
|
||||
declare void @llvm.aarch64.sve.st4.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i64>, <vscale x 2 x i1>, <vscale x 2 x i64>*)
|
||||
declare void @llvm.aarch64.sve.st4.nxv8f16(<vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x half>, <vscale x 8 x i1>, <vscale x 8 x half>*)
|
||||
declare void @llvm.aarch64.sve.st4.nxv4f32(<vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x float>, <vscale x 4 x i1>, <vscale x 4 x float>*)
|
||||
declare void @llvm.aarch64.sve.st4.nxv2f64(<vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x double>, <vscale x 2 x i1>, <vscale x 2 x double>*)
|
Loading…
Reference in New Issue