forked from OSchip/llvm-project
[AArch64][SVE] Add addressing mode for contiguous loads & stores
Summary: This patch adds the register + register addressing mode for SVE contiguous load and store intrinsics (LD1 & ST1) Reviewers: sdesmalen, fpetrogalli, efriedma, rengolin Reviewed By: fpetrogalli Subscribers: tschuett, kristof.beyls, hiraditya, rkruppe, psnobl, danielkiss, cfe-commits, llvm-commits Tags: #llvm Differential Revision: https://reviews.llvm.org/D78509
This commit is contained in:
parent
1811061c38
commit
0df40d6ef8
|
@ -1568,7 +1568,53 @@ multiclass sve_prefetch<SDPatternOperator prefetch, ValueType PredTy, Instructio
|
|||
defm Pat_Load_P4 : unpred_load_predicate<nxv4i1, LDR_PXI>;
|
||||
defm Pat_Load_P2 : unpred_load_predicate<nxv2i1, LDR_PXI>;
|
||||
|
||||
multiclass ld1<Instruction I, ValueType Ty, SDPatternOperator Load, ValueType PredTy, ValueType MemVT> {
|
||||
multiclass ld1<Instruction RegRegInst, Instruction RegImmInst, ValueType Ty,
|
||||
SDPatternOperator Load, ValueType PredTy, ValueType MemVT, ComplexPattern AddrCP> {
|
||||
// reg + reg
|
||||
let AddedComplexity = 1 in {
|
||||
def : Pat<(Ty (Load (PredTy PPR:$gp), (AddrCP GPR64:$base, GPR64:$offset), MemVT)),
|
||||
(RegRegInst PPR:$gp, GPR64sp:$base, GPR64:$offset)>;
|
||||
}
|
||||
|
||||
// scalar + immediate (mul vl)
|
||||
let AddedComplexity = 2 in {
|
||||
def : Pat<(Ty (Load (PredTy PPR:$gp), (am_sve_indexed_s4 GPR64sp:$base, simm4s1:$offset), MemVT)),
|
||||
(RegImmInst PPR:$gp, GPR64sp:$base, simm4s1:$offset)>;
|
||||
}
|
||||
|
||||
// base
|
||||
def : Pat<(Ty (Load (PredTy PPR:$gp), GPR64:$base, MemVT)),
|
||||
(RegImmInst PPR:$gp, GPR64sp:$base, (i64 0))>;
|
||||
}
|
||||
|
||||
// 2-element contiguous loads
|
||||
defm : ld1<LD1B_D, LD1B_D_IMM, nxv2i64, AArch64ld1, nxv2i1, nxv2i8, am_sve_regreg_lsl0>;
|
||||
defm : ld1<LD1SB_D, LD1SB_D_IMM, nxv2i64, AArch64ld1s, nxv2i1, nxv2i8, am_sve_regreg_lsl0>;
|
||||
defm : ld1<LD1H_D, LD1H_D_IMM, nxv2i64, AArch64ld1, nxv2i1, nxv2i16, am_sve_regreg_lsl1>;
|
||||
defm : ld1<LD1SH_D, LD1SH_D_IMM, nxv2i64, AArch64ld1s, nxv2i1, nxv2i16, am_sve_regreg_lsl1>;
|
||||
defm : ld1<LD1W_D, LD1W_D_IMM, nxv2i64, AArch64ld1, nxv2i1, nxv2i32, am_sve_regreg_lsl2>;
|
||||
defm : ld1<LD1SW_D, LD1SW_D_IMM, nxv2i64, AArch64ld1s, nxv2i1, nxv2i32, am_sve_regreg_lsl2>;
|
||||
defm : ld1<LD1D, LD1D_IMM, nxv2i64, AArch64ld1, nxv2i1, nxv2i64, am_sve_regreg_lsl3>;
|
||||
defm : ld1<LD1D, LD1D_IMM, nxv2f64, AArch64ld1, nxv2i1, nxv2f64, am_sve_regreg_lsl3>;
|
||||
|
||||
// 4-element contiguous loads
|
||||
defm : ld1<LD1B_S, LD1B_S_IMM, nxv4i32, AArch64ld1, nxv4i1, nxv4i8, am_sve_regreg_lsl0>;
|
||||
defm : ld1<LD1SB_S, LD1SB_S_IMM, nxv4i32, AArch64ld1s, nxv4i1, nxv4i8, am_sve_regreg_lsl0>;
|
||||
defm : ld1<LD1H_S, LD1H_S_IMM, nxv4i32, AArch64ld1, nxv4i1, nxv4i16, am_sve_regreg_lsl1>;
|
||||
defm : ld1<LD1SH_S, LD1SH_S_IMM, nxv4i32, AArch64ld1s, nxv4i1, nxv4i16, am_sve_regreg_lsl1>;
|
||||
defm : ld1<LD1W, LD1W_IMM, nxv4i32, AArch64ld1, nxv4i1, nxv4i32, am_sve_regreg_lsl2>;
|
||||
defm : ld1<LD1W, LD1W_IMM, nxv4f32, AArch64ld1, nxv4i1, nxv4f32, am_sve_regreg_lsl2>;
|
||||
|
||||
// 8-element contiguous loads
|
||||
defm : ld1<LD1B_H, LD1B_H_IMM, nxv8i16, AArch64ld1, nxv8i1, nxv8i8, am_sve_regreg_lsl0>;
|
||||
defm : ld1<LD1SB_H, LD1SB_H_IMM, nxv8i16, AArch64ld1s, nxv8i1, nxv8i8, am_sve_regreg_lsl0>;
|
||||
defm : ld1<LD1H, LD1H_IMM, nxv8i16, AArch64ld1, nxv8i1, nxv8i16, am_sve_regreg_lsl1>;
|
||||
defm : ld1<LD1H, LD1H_IMM, nxv8f16, AArch64ld1, nxv8i1, nxv8f16, am_sve_regreg_lsl1>;
|
||||
|
||||
// 16-element contiguous loads
|
||||
defm : ld1<LD1B, LD1B_IMM, nxv16i8, AArch64ld1, nxv16i1, nxv16i8, am_sve_regreg_lsl0>;
|
||||
|
||||
multiclass ldnf1<Instruction I, ValueType Ty, SDPatternOperator Load, ValueType PredTy, ValueType MemVT> {
|
||||
// scalar + immediate (mul vl)
|
||||
let AddedComplexity = 1 in {
|
||||
def : Pat<(Ty (Load (PredTy PPR:$gp), (am_sve_indexed_s4 GPR64sp:$base, simm4s1:$offset), MemVT)),
|
||||
|
@ -1580,60 +1626,32 @@ multiclass sve_prefetch<SDPatternOperator prefetch, ValueType PredTy, Instructio
|
|||
(I PPR:$gp, GPR64sp:$base, (i64 0))>;
|
||||
}
|
||||
|
||||
// 2-element contiguous loads
|
||||
defm : ld1<LD1B_D_IMM, nxv2i64, AArch64ld1, nxv2i1, nxv2i8>;
|
||||
defm : ld1<LD1SB_D_IMM, nxv2i64, AArch64ld1s, nxv2i1, nxv2i8>;
|
||||
defm : ld1<LD1H_D_IMM, nxv2i64, AArch64ld1, nxv2i1, nxv2i16>;
|
||||
defm : ld1<LD1SH_D_IMM, nxv2i64, AArch64ld1s, nxv2i1, nxv2i16>;
|
||||
defm : ld1<LD1W_D_IMM, nxv2i64, AArch64ld1, nxv2i1, nxv2i32>;
|
||||
defm : ld1<LD1SW_D_IMM, nxv2i64, AArch64ld1s, nxv2i1, nxv2i32>;
|
||||
defm : ld1<LD1D_IMM, nxv2i64, AArch64ld1, nxv2i1, nxv2i64>;
|
||||
defm : ld1<LD1D_IMM, nxv2f64, AArch64ld1, nxv2i1, nxv2f64>;
|
||||
|
||||
// 4-element contiguous loads
|
||||
defm : ld1<LD1B_S_IMM, nxv4i32, AArch64ld1, nxv4i1, nxv4i8>;
|
||||
defm : ld1<LD1SB_S_IMM, nxv4i32, AArch64ld1s, nxv4i1, nxv4i8>;
|
||||
defm : ld1<LD1H_S_IMM, nxv4i32, AArch64ld1, nxv4i1, nxv4i16>;
|
||||
defm : ld1<LD1SH_S_IMM, nxv4i32, AArch64ld1s, nxv4i1, nxv4i16>;
|
||||
defm : ld1<LD1W_IMM, nxv4i32, AArch64ld1, nxv4i1, nxv4i32>;
|
||||
defm : ld1<LD1W_IMM, nxv4f32, AArch64ld1, nxv4i1, nxv4f32>;
|
||||
|
||||
// 8-element contiguous loads
|
||||
defm : ld1<LD1B_H_IMM, nxv8i16, AArch64ld1, nxv8i1, nxv8i8>;
|
||||
defm : ld1<LD1SB_H_IMM, nxv8i16, AArch64ld1s, nxv8i1, nxv8i8>;
|
||||
defm : ld1<LD1H_IMM, nxv8i16, AArch64ld1, nxv8i1, nxv8i16>;
|
||||
defm : ld1<LD1H_IMM, nxv8f16, AArch64ld1, nxv8i1, nxv8f16>;
|
||||
|
||||
// 16-element contiguous loads
|
||||
defm : ld1<LD1B_IMM, nxv16i8, AArch64ld1, nxv16i1, nxv16i8>;
|
||||
|
||||
|
||||
// 2-element contiguous non-faulting loads
|
||||
defm : ld1<LDNF1B_D_IMM, nxv2i64, AArch64ldnf1, nxv2i1, nxv2i8>;
|
||||
defm : ld1<LDNF1SB_D_IMM, nxv2i64, AArch64ldnf1s, nxv2i1, nxv2i8>;
|
||||
defm : ld1<LDNF1H_D_IMM, nxv2i64, AArch64ldnf1, nxv2i1, nxv2i16>;
|
||||
defm : ld1<LDNF1SH_D_IMM, nxv2i64, AArch64ldnf1s, nxv2i1, nxv2i16>;
|
||||
defm : ld1<LDNF1W_D_IMM, nxv2i64, AArch64ldnf1, nxv2i1, nxv2i32>;
|
||||
defm : ld1<LDNF1SW_D_IMM, nxv2i64, AArch64ldnf1s, nxv2i1, nxv2i32>;
|
||||
defm : ld1<LDNF1D_IMM, nxv2i64, AArch64ldnf1, nxv2i1, nxv2i64>;
|
||||
defm : ld1<LDNF1D_IMM, nxv2f64, AArch64ldnf1, nxv2i1, nxv2f64>;
|
||||
defm : ldnf1<LDNF1B_D_IMM, nxv2i64, AArch64ldnf1, nxv2i1, nxv2i8>;
|
||||
defm : ldnf1<LDNF1SB_D_IMM, nxv2i64, AArch64ldnf1s, nxv2i1, nxv2i8>;
|
||||
defm : ldnf1<LDNF1H_D_IMM, nxv2i64, AArch64ldnf1, nxv2i1, nxv2i16>;
|
||||
defm : ldnf1<LDNF1SH_D_IMM, nxv2i64, AArch64ldnf1s, nxv2i1, nxv2i16>;
|
||||
defm : ldnf1<LDNF1W_D_IMM, nxv2i64, AArch64ldnf1, nxv2i1, nxv2i32>;
|
||||
defm : ldnf1<LDNF1SW_D_IMM, nxv2i64, AArch64ldnf1s, nxv2i1, nxv2i32>;
|
||||
defm : ldnf1<LDNF1D_IMM, nxv2i64, AArch64ldnf1, nxv2i1, nxv2i64>;
|
||||
defm : ldnf1<LDNF1D_IMM, nxv2f64, AArch64ldnf1, nxv2i1, nxv2f64>;
|
||||
|
||||
// 4-element contiguous non-faulting loads
|
||||
defm : ld1<LDNF1B_S_IMM, nxv4i32, AArch64ldnf1, nxv4i1, nxv4i8>;
|
||||
defm : ld1<LDNF1SB_S_IMM, nxv4i32, AArch64ldnf1s, nxv4i1, nxv4i8>;
|
||||
defm : ld1<LDNF1H_S_IMM, nxv4i32, AArch64ldnf1, nxv4i1, nxv4i16>;
|
||||
defm : ld1<LDNF1SH_S_IMM, nxv4i32, AArch64ldnf1s, nxv4i1, nxv4i16>;
|
||||
defm : ld1<LDNF1W_IMM, nxv4i32, AArch64ldnf1, nxv4i1, nxv4i32>;
|
||||
defm : ld1<LDNF1W_IMM, nxv4f32, AArch64ldnf1, nxv4i1, nxv4f32>;
|
||||
defm : ldnf1<LDNF1B_S_IMM, nxv4i32, AArch64ldnf1, nxv4i1, nxv4i8>;
|
||||
defm : ldnf1<LDNF1SB_S_IMM, nxv4i32, AArch64ldnf1s, nxv4i1, nxv4i8>;
|
||||
defm : ldnf1<LDNF1H_S_IMM, nxv4i32, AArch64ldnf1, nxv4i1, nxv4i16>;
|
||||
defm : ldnf1<LDNF1SH_S_IMM, nxv4i32, AArch64ldnf1s, nxv4i1, nxv4i16>;
|
||||
defm : ldnf1<LDNF1W_IMM, nxv4i32, AArch64ldnf1, nxv4i1, nxv4i32>;
|
||||
defm : ldnf1<LDNF1W_IMM, nxv4f32, AArch64ldnf1, nxv4i1, nxv4f32>;
|
||||
|
||||
// 8-element contiguous non-faulting loads
|
||||
defm : ld1<LDNF1B_H_IMM, nxv8i16, AArch64ldnf1, nxv8i1, nxv8i8>;
|
||||
defm : ld1<LDNF1SB_H_IMM, nxv8i16, AArch64ldnf1s, nxv8i1, nxv8i8>;
|
||||
defm : ld1<LDNF1H_IMM, nxv8i16, AArch64ldnf1, nxv8i1, nxv8i16>;
|
||||
defm : ld1<LDNF1H_IMM, nxv8f16, AArch64ldnf1, nxv8i1, nxv8f16>;
|
||||
defm : ldnf1<LDNF1B_H_IMM, nxv8i16, AArch64ldnf1, nxv8i1, nxv8i8>;
|
||||
defm : ldnf1<LDNF1SB_H_IMM, nxv8i16, AArch64ldnf1s, nxv8i1, nxv8i8>;
|
||||
defm : ldnf1<LDNF1H_IMM, nxv8i16, AArch64ldnf1, nxv8i1, nxv8i16>;
|
||||
defm : ldnf1<LDNF1H_IMM, nxv8f16, AArch64ldnf1, nxv8i1, nxv8f16>;
|
||||
|
||||
// 16-element contiguous non-faulting loads
|
||||
defm : ld1<LDNF1B_IMM, nxv16i8, AArch64ldnf1, nxv16i1, nxv16i8>;
|
||||
defm : ldnf1<LDNF1B_IMM, nxv16i8, AArch64ldnf1, nxv16i1, nxv16i8>;
|
||||
|
||||
multiclass ldff1<Instruction I, ValueType Ty, SDPatternOperator Load, ValueType PredTy, ValueType MemVT, ComplexPattern AddrCP> {
|
||||
// reg + reg
|
||||
|
@ -1675,35 +1693,42 @@ multiclass sve_prefetch<SDPatternOperator prefetch, ValueType PredTy, Instructio
|
|||
// 16-element contiguous first faulting loads
|
||||
defm : ldff1<LDFF1B, nxv16i8, AArch64ldff1, nxv16i1, nxv16i8, am_sve_regreg_lsl0>;
|
||||
|
||||
multiclass st1<Instruction I, ValueType Ty, SDPatternOperator Store, ValueType PredTy, ValueType MemVT> {
|
||||
// scalar + immediate (mul vl)
|
||||
multiclass st1<Instruction RegRegInst, Instruction RegImmInst, ValueType Ty,
|
||||
SDPatternOperator Store, ValueType PredTy, ValueType MemVT, ComplexPattern AddrCP> {
|
||||
// reg + reg
|
||||
let AddedComplexity = 1 in {
|
||||
def : Pat<(Store (Ty ZPR:$vec), (AddrCP GPR64:$base, GPR64:$offset), (PredTy PPR:$gp), MemVT),
|
||||
(RegRegInst ZPR:$vec, PPR:$gp, GPR64sp:$base, GPR64:$offset)>;
|
||||
}
|
||||
|
||||
// scalar + immediate (mul vl)
|
||||
let AddedComplexity = 2 in {
|
||||
def : Pat<(Store (Ty ZPR:$vec), (am_sve_indexed_s4 GPR64sp:$base, simm4s1:$offset), (PredTy PPR:$gp), MemVT),
|
||||
(I ZPR:$vec, PPR:$gp, GPR64sp:$base, simm4s1:$offset)>;
|
||||
(RegImmInst ZPR:$vec, PPR:$gp, GPR64sp:$base, simm4s1:$offset)>;
|
||||
}
|
||||
|
||||
// base
|
||||
def : Pat<(Store (Ty ZPR:$vec), GPR64:$base, (PredTy PPR:$gp), MemVT),
|
||||
(I ZPR:$vec, PPR:$gp, GPR64:$base, (i64 0))>;
|
||||
(RegImmInst ZPR:$vec, PPR:$gp, GPR64:$base, (i64 0))>;
|
||||
}
|
||||
|
||||
// 2-element contiguous store
|
||||
defm : st1<ST1B_D_IMM, nxv2i64, AArch64st1, nxv2i1, nxv2i8>;
|
||||
defm : st1<ST1H_D_IMM, nxv2i64, AArch64st1, nxv2i1, nxv2i16>;
|
||||
defm : st1<ST1W_D_IMM, nxv2i64, AArch64st1, nxv2i1, nxv2i32>;
|
||||
defm : st1<ST1D_IMM, nxv2i64, AArch64st1, nxv2i1, nxv2i64>;
|
||||
defm : st1<ST1B_D, ST1B_D_IMM, nxv2i64, AArch64st1, nxv2i1, nxv2i8, am_sve_regreg_lsl0>;
|
||||
defm : st1<ST1H_D, ST1H_D_IMM, nxv2i64, AArch64st1, nxv2i1, nxv2i16, am_sve_regreg_lsl1>;
|
||||
defm : st1<ST1W_D, ST1W_D_IMM, nxv2i64, AArch64st1, nxv2i1, nxv2i32, am_sve_regreg_lsl2>;
|
||||
defm : st1<ST1D, ST1D_IMM, nxv2i64, AArch64st1, nxv2i1, nxv2i64, am_sve_regreg_lsl3>;
|
||||
|
||||
// 4-element contiguous store
|
||||
defm : st1<ST1B_S_IMM, nxv4i32, AArch64st1, nxv4i1, nxv4i8>;
|
||||
defm : st1<ST1H_S_IMM, nxv4i32, AArch64st1, nxv4i1, nxv4i16>;
|
||||
defm : st1<ST1W_IMM, nxv4i32, AArch64st1, nxv4i1, nxv4i32>;
|
||||
defm : st1<ST1B_S, ST1B_S_IMM, nxv4i32, AArch64st1, nxv4i1, nxv4i8, am_sve_regreg_lsl0>;
|
||||
defm : st1<ST1H_S, ST1H_S_IMM, nxv4i32, AArch64st1, nxv4i1, nxv4i16, am_sve_regreg_lsl1>;
|
||||
defm : st1<ST1W, ST1W_IMM, nxv4i32, AArch64st1, nxv4i1, nxv4i32, am_sve_regreg_lsl2>;
|
||||
|
||||
// 8-element contiguous store
|
||||
defm : st1<ST1B_H_IMM, nxv8i16, AArch64st1, nxv8i1, nxv8i8>;
|
||||
defm : st1<ST1H_IMM, nxv8i16, AArch64st1, nxv8i1, nxv8i16>;
|
||||
defm : st1<ST1B_H, ST1B_H_IMM, nxv8i16, AArch64st1, nxv8i1, nxv8i8, am_sve_regreg_lsl0>;
|
||||
defm : st1<ST1H, ST1H_IMM, nxv8i16, AArch64st1, nxv8i1, nxv8i16, am_sve_regreg_lsl1>;
|
||||
|
||||
// 16-element contiguous store
|
||||
defm : st1<ST1B_IMM, nxv16i8, AArch64st1, nxv16i1, nxv16i8>;
|
||||
defm : st1<ST1B, ST1B_IMM, nxv16i8, AArch64st1, nxv16i1, nxv16i8, am_sve_regreg_lsl0>;
|
||||
|
||||
}
|
||||
|
||||
|
|
|
@ -0,0 +1,301 @@
|
|||
; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve < %s | FileCheck %s
|
||||
|
||||
;
|
||||
; LD1B
|
||||
;
|
||||
|
||||
define <vscale x 16 x i8> @ld1b_upper_bound(<vscale x 16 x i1> %pg, i8* %a) {
|
||||
; CHECK-LABEL: ld1b_upper_bound:
|
||||
; CHECK: ld1b { z0.b }, p0/z, [x0, #7, mul vl]
|
||||
; CHECK-NEXT: ret
|
||||
%base_scalable = bitcast i8* %a to <vscale x 16 x i8>*
|
||||
%base = getelementptr <vscale x 16 x i8>, <vscale x 16 x i8>* %base_scalable, i64 7
|
||||
%base_scalar = bitcast <vscale x 16 x i8>* %base to i8*
|
||||
%load = call <vscale x 16 x i8> @llvm.aarch64.sve.ld1.nxv16i8(<vscale x 16 x i1> %pg, i8* %base_scalar)
|
||||
ret <vscale x 16 x i8> %load
|
||||
}
|
||||
|
||||
define <vscale x 16 x i8> @ld1b_inbound(<vscale x 16 x i1> %pg, i8* %a) {
|
||||
; CHECK-LABEL: ld1b_inbound:
|
||||
; CHECK: ld1b { z0.b }, p0/z, [x0, #1, mul vl]
|
||||
; CHECK-NEXT: ret
|
||||
%base_scalable = bitcast i8* %a to <vscale x 16 x i8>*
|
||||
%base = getelementptr <vscale x 16 x i8>, <vscale x 16 x i8>* %base_scalable, i64 1
|
||||
%base_scalar = bitcast <vscale x 16 x i8>* %base to i8*
|
||||
%load = call <vscale x 16 x i8> @llvm.aarch64.sve.ld1.nxv16i8(<vscale x 16 x i1> %pg, i8* %base_scalar)
|
||||
ret <vscale x 16 x i8> %load
|
||||
}
|
||||
|
||||
define <vscale x 4 x i32> @ld1b_s_inbound(<vscale x 4 x i1> %pg, i8* %a) {
|
||||
; CHECK-LABEL: ld1b_s_inbound:
|
||||
; CHECK: ld1b { z0.s }, p0/z, [x0, #7, mul vl]
|
||||
; CHECK-NEXT: ret
|
||||
%base_scalable = bitcast i8* %a to <vscale x 4 x i8>*
|
||||
%base = getelementptr <vscale x 4 x i8>, <vscale x 4 x i8>* %base_scalable, i64 7
|
||||
%base_scalar = bitcast <vscale x 4 x i8>* %base to i8*
|
||||
%load = call <vscale x 4 x i8> @llvm.aarch64.sve.ld1.nxv4i8(<vscale x 4 x i1> %pg, i8* %base_scalar)
|
||||
%res = zext <vscale x 4 x i8> %load to <vscale x 4 x i32>
|
||||
ret <vscale x 4 x i32> %res
|
||||
}
|
||||
|
||||
define <vscale x 4 x i32> @ld1sb_s_inbound(<vscale x 4 x i1> %pg, i8* %a) {
|
||||
; CHECK-LABEL: ld1sb_s_inbound:
|
||||
; CHECK: ld1sb { z0.s }, p0/z, [x0, #7, mul vl]
|
||||
; CHECK-NEXT: ret
|
||||
%base_scalable = bitcast i8* %a to <vscale x 4 x i8>*
|
||||
%base = getelementptr <vscale x 4 x i8>, <vscale x 4 x i8>* %base_scalable, i64 7
|
||||
%base_scalar = bitcast <vscale x 4 x i8>* %base to i8*
|
||||
%load = call <vscale x 4 x i8> @llvm.aarch64.sve.ld1.nxv4i8(<vscale x 4 x i1> %pg, i8* %base_scalar)
|
||||
%res = sext <vscale x 4 x i8> %load to <vscale x 4 x i32>
|
||||
ret <vscale x 4 x i32> %res
|
||||
}
|
||||
|
||||
define <vscale x 16 x i8> @ld1b_lower_bound(<vscale x 16 x i1> %pg, i8* %a) {
|
||||
; CHECK-LABEL: ld1b_lower_bound:
|
||||
; CHECK: ld1b { z0.b }, p0/z, [x0, #-8, mul vl]
|
||||
; CHECK-NEXT: ret
|
||||
%base_scalable = bitcast i8* %a to <vscale x 16 x i8>*
|
||||
%base = getelementptr <vscale x 16 x i8>, <vscale x 16 x i8>* %base_scalable, i64 -8
|
||||
%base_scalar = bitcast <vscale x 16 x i8>* %base to i8*
|
||||
%load = call <vscale x 16 x i8> @llvm.aarch64.sve.ld1.nxv16i8(<vscale x 16 x i1> %pg, i8* %base_scalar)
|
||||
ret <vscale x 16 x i8> %load
|
||||
}
|
||||
|
||||
define <vscale x 16 x i8> @ld1b_out_of_upper_bound(<vscale x 16 x i1> %pg, i8* %a) {
|
||||
; CHECK-LABEL: ld1b_out_of_upper_bound:
|
||||
; CHECK: rdvl x[[OFFSET:[0-9]+]], #8
|
||||
; CHECK-NEXT: ld1b { z0.b }, p0/z, [x0, x[[OFFSET]]]
|
||||
; CHECK-NEXT: ret
|
||||
%base_scalable = bitcast i8* %a to <vscale x 16 x i8>*
|
||||
%base = getelementptr <vscale x 16 x i8>, <vscale x 16 x i8>* %base_scalable, i64 8
|
||||
%base_scalar = bitcast <vscale x 16 x i8>* %base to i8*
|
||||
%load = call <vscale x 16 x i8> @llvm.aarch64.sve.ld1.nxv16i8(<vscale x 16 x i1> %pg, i8* %base_scalar)
|
||||
ret <vscale x 16 x i8> %load
|
||||
}
|
||||
|
||||
define <vscale x 16 x i8> @ld1b_out_of_lower_bound(<vscale x 16 x i1> %pg, i8* %a) {
|
||||
; CHECK-LABEL: ld1b_out_of_lower_bound:
|
||||
; CHECK: rdvl x[[OFFSET:[0-9]+]], #-9
|
||||
; CHECK-NEXT: ld1b { z0.b }, p0/z, [x0, x[[OFFSET]]]
|
||||
; CHECK-NEXT: ret
|
||||
%base_scalable = bitcast i8* %a to <vscale x 16 x i8>*
|
||||
%base = getelementptr <vscale x 16 x i8>, <vscale x 16 x i8>* %base_scalable, i64 -9
|
||||
%base_scalar = bitcast <vscale x 16 x i8>* %base to i8*
|
||||
%load = call <vscale x 16 x i8> @llvm.aarch64.sve.ld1.nxv16i8(<vscale x 16 x i1> %pg, i8* %base_scalar)
|
||||
ret <vscale x 16 x i8> %load
|
||||
}
|
||||
|
||||
;
|
||||
; LD1H
|
||||
;
|
||||
|
||||
define <vscale x 8 x i16> @ld1b_h_inbound(<vscale x 8 x i1> %pg, i8* %a) {
|
||||
; CHECK-LABEL: ld1b_h_inbound:
|
||||
; CHECK: ld1b { z0.h }, p0/z, [x0, #7, mul vl]
|
||||
; CHECK-NEXT: ret
|
||||
%base_scalable = bitcast i8* %a to <vscale x 8 x i8>*
|
||||
%base = getelementptr <vscale x 8 x i8>, <vscale x 8 x i8>* %base_scalable, i64 7
|
||||
%base_scalar = bitcast <vscale x 8 x i8>* %base to i8*
|
||||
%load = call <vscale x 8 x i8> @llvm.aarch64.sve.ld1.nxv8i8(<vscale x 8 x i1> %pg, i8* %base_scalar)
|
||||
%res = zext <vscale x 8 x i8> %load to <vscale x 8 x i16>
|
||||
ret <vscale x 8 x i16> %res
|
||||
}
|
||||
|
||||
define <vscale x 8 x i16> @ld1sb_h_inbound(<vscale x 8 x i1> %pg, i8* %a) {
|
||||
; CHECK-LABEL: ld1sb_h_inbound:
|
||||
; CHECK: ld1sb { z0.h }, p0/z, [x0, #7, mul vl]
|
||||
; CHECK-NEXT: ret
|
||||
%base_scalable = bitcast i8* %a to <vscale x 8 x i8>*
|
||||
%base = getelementptr <vscale x 8 x i8>, <vscale x 8 x i8>* %base_scalable, i64 7
|
||||
%base_scalar = bitcast <vscale x 8 x i8>* %base to i8*
|
||||
%load = call <vscale x 8 x i8> @llvm.aarch64.sve.ld1.nxv8i8(<vscale x 8 x i1> %pg, i8* %base_scalar)
|
||||
%res = sext <vscale x 8 x i8> %load to <vscale x 8 x i16>
|
||||
ret <vscale x 8 x i16> %res
|
||||
}
|
||||
|
||||
define <vscale x 8 x i16> @ld1h_inbound(<vscale x 8 x i1> %pg, i16* %a) {
|
||||
; CHECK-LABEL: ld1h_inbound:
|
||||
; CHECK: ld1h { z0.h }, p0/z, [x0, #1, mul vl]
|
||||
; CHECK-NEXT: ret
|
||||
%base_scalable = bitcast i16* %a to <vscale x 8 x i16>*
|
||||
%base = getelementptr <vscale x 8 x i16>, <vscale x 8 x i16>* %base_scalable, i64 1
|
||||
%base_scalar = bitcast <vscale x 8 x i16>* %base to i16*
|
||||
%load = call <vscale x 8 x i16> @llvm.aarch64.sve.ld1.nxv8i16(<vscale x 8 x i1> %pg, i16* %base_scalar)
|
||||
ret <vscale x 8 x i16> %load
|
||||
}
|
||||
|
||||
define <vscale x 4 x i32> @ld1h_s_inbound(<vscale x 4 x i1> %pg, i16* %a) {
|
||||
; CHECK-LABEL: ld1h_s_inbound:
|
||||
; CHECK: ld1h { z0.s }, p0/z, [x0, #7, mul vl]
|
||||
; CHECK-NEXT: ret
|
||||
%base_scalable = bitcast i16* %a to <vscale x 4 x i16>*
|
||||
%base = getelementptr <vscale x 4 x i16>, <vscale x 4 x i16>* %base_scalable, i64 7
|
||||
%base_scalar = bitcast <vscale x 4 x i16>* %base to i16*
|
||||
%load = call <vscale x 4 x i16> @llvm.aarch64.sve.ld1.nxv4i16(<vscale x 4 x i1> %pg, i16* %base_scalar)
|
||||
%res = zext <vscale x 4 x i16> %load to <vscale x 4 x i32>
|
||||
ret <vscale x 4 x i32> %res
|
||||
}
|
||||
|
||||
define <vscale x 4 x i32> @ld1sh_s_inbound(<vscale x 4 x i1> %pg, i16* %a) {
|
||||
; CHECK-LABEL: ld1sh_s_inbound:
|
||||
; CHECK: ld1sh { z0.s }, p0/z, [x0, #7, mul vl]
|
||||
; CHECK-NEXT: ret
|
||||
%base_scalable = bitcast i16* %a to <vscale x 4 x i16>*
|
||||
%base = getelementptr <vscale x 4 x i16>, <vscale x 4 x i16>* %base_scalable, i64 7
|
||||
%base_scalar = bitcast <vscale x 4 x i16>* %base to i16*
|
||||
%load = call <vscale x 4 x i16> @llvm.aarch64.sve.ld1.nxv4i16(<vscale x 4 x i1> %pg, i16* %base_scalar)
|
||||
%res = sext <vscale x 4 x i16> %load to <vscale x 4 x i32>
|
||||
ret <vscale x 4 x i32> %res
|
||||
}
|
||||
|
||||
define <vscale x 2 x i64> @ld1b_d_inbound(<vscale x 2 x i1> %pg, i8* %a) {
|
||||
; CHECK-LABEL: ld1b_d_inbound:
|
||||
; CHECK: ld1b { z0.d }, p0/z, [x0, #7, mul vl]
|
||||
; CHECK-NEXT: ret
|
||||
%base_scalable = bitcast i8* %a to <vscale x 2 x i8>*
|
||||
%base = getelementptr <vscale x 2 x i8>, <vscale x 2 x i8>* %base_scalable, i64 7
|
||||
%base_scalar = bitcast <vscale x 2 x i8>* %base to i8*
|
||||
%load = call <vscale x 2 x i8> @llvm.aarch64.sve.ld1.nxv2i8(<vscale x 2 x i1> %pg, i8* %base_scalar)
|
||||
%res = zext <vscale x 2 x i8> %load to <vscale x 2 x i64>
|
||||
ret <vscale x 2 x i64> %res
|
||||
}
|
||||
|
||||
define <vscale x 2 x i64> @ld1sb_d_inbound(<vscale x 2 x i1> %pg, i8* %a) {
|
||||
; CHECK-LABEL: ld1sb_d_inbound:
|
||||
; CHECK: ld1sb { z0.d }, p0/z, [x0, #7, mul vl]
|
||||
; CHECK-NEXT: ret
|
||||
%base_scalable = bitcast i8* %a to <vscale x 2 x i8>*
|
||||
%base = getelementptr <vscale x 2 x i8>, <vscale x 2 x i8>* %base_scalable, i64 7
|
||||
%base_scalar = bitcast <vscale x 2 x i8>* %base to i8*
|
||||
%load = call <vscale x 2 x i8> @llvm.aarch64.sve.ld1.nxv2i8(<vscale x 2 x i1> %pg, i8* %base_scalar)
|
||||
%res = sext <vscale x 2 x i8> %load to <vscale x 2 x i64>
|
||||
ret <vscale x 2 x i64> %res
|
||||
}
|
||||
|
||||
define <vscale x 2 x i64> @ld1h_d_inbound(<vscale x 2 x i1> %pg, i16* %a) {
|
||||
; CHECK-LABEL: ld1h_d_inbound:
|
||||
; CHECK: ld1h { z0.d }, p0/z, [x0, #7, mul vl]
|
||||
; CHECK-NEXT: ret
|
||||
%base_scalable = bitcast i16* %a to <vscale x 2 x i16>*
|
||||
%base = getelementptr <vscale x 2 x i16>, <vscale x 2 x i16>* %base_scalable, i64 7
|
||||
%base_scalar = bitcast <vscale x 2 x i16>* %base to i16*
|
||||
%load = call <vscale x 2 x i16> @llvm.aarch64.sve.ld1.nxv2i16(<vscale x 2 x i1> %pg, i16* %base_scalar)
|
||||
%res = zext <vscale x 2 x i16> %load to <vscale x 2 x i64>
|
||||
ret <vscale x 2 x i64> %res
|
||||
}
|
||||
|
||||
define <vscale x 2 x i64> @ld1sh_d_inbound(<vscale x 2 x i1> %pg, i16* %a) {
|
||||
; CHECK-LABEL: ld1sh_d_inbound:
|
||||
; CHECK: ld1sh { z0.d }, p0/z, [x0, #7, mul vl]
|
||||
; CHECK-NEXT: ret
|
||||
%base_scalable = bitcast i16* %a to <vscale x 2 x i16>*
|
||||
%base = getelementptr <vscale x 2 x i16>, <vscale x 2 x i16>* %base_scalable, i64 7
|
||||
%base_scalar = bitcast <vscale x 2 x i16>* %base to i16*
|
||||
%load = call <vscale x 2 x i16> @llvm.aarch64.sve.ld1.nxv2i16(<vscale x 2 x i1> %pg, i16* %base_scalar)
|
||||
%res = sext <vscale x 2 x i16> %load to <vscale x 2 x i64>
|
||||
ret <vscale x 2 x i64> %res
|
||||
}
|
||||
|
||||
define <vscale x 8 x half> @ld1h_f16_inbound(<vscale x 8 x i1> %pg, half* %a) {
|
||||
; CHECK-LABEL: ld1h_f16_inbound:
|
||||
; CHECK: ld1h { z0.h }, p0/z, [x0, #1, mul vl]
|
||||
; CHECK-NEXT: ret
|
||||
%base_scalable = bitcast half* %a to <vscale x 8 x half>*
|
||||
%base = getelementptr <vscale x 8 x half>, <vscale x 8 x half>* %base_scalable, i64 1
|
||||
%base_scalar = bitcast <vscale x 8 x half>* %base to half*
|
||||
%load = call <vscale x 8 x half> @llvm.aarch64.sve.ld1.nxv8f16(<vscale x 8 x i1> %pg, half* %base_scalar)
|
||||
ret <vscale x 8 x half> %load
|
||||
}
|
||||
|
||||
;
|
||||
; LD1W
|
||||
;
|
||||
|
||||
define <vscale x 4 x i32> @ld1w_inbound(<vscale x 4 x i1> %pg, i32* %a) {
|
||||
; CHECK-LABEL: ld1w_inbound:
|
||||
; CHECK: ld1w { z0.s }, p0/z, [x0, #7, mul vl]
|
||||
; CHECK-NEXT: ret
|
||||
%base_scalable = bitcast i32* %a to <vscale x 4 x i32>*
|
||||
%base = getelementptr <vscale x 4 x i32>, <vscale x 4 x i32>* %base_scalable, i64 7
|
||||
%base_scalar = bitcast <vscale x 4 x i32>* %base to i32*
|
||||
%load = call <vscale x 4 x i32> @llvm.aarch64.sve.ld1.nxv4i32(<vscale x 4 x i1> %pg, i32* %base_scalar)
|
||||
ret <vscale x 4 x i32> %load
|
||||
}
|
||||
|
||||
define <vscale x 4 x float> @ld1w_f32_inbound(<vscale x 4 x i1> %pg, float* %a) {
|
||||
; CHECK-LABEL: ld1w_f32_inbound:
|
||||
; CHECK: ld1w { z0.s }, p0/z, [x0, #7, mul vl]
|
||||
; CHECK-NEXT: ret
|
||||
%base_scalable = bitcast float* %a to <vscale x 4 x float>*
|
||||
%base = getelementptr <vscale x 4 x float>, <vscale x 4 x float>* %base_scalable, i64 7
|
||||
%base_scalar = bitcast <vscale x 4 x float>* %base to float*
|
||||
%load = call <vscale x 4 x float> @llvm.aarch64.sve.ld1.nxv4f32(<vscale x 4 x i1> %pg, float* %base_scalar)
|
||||
ret <vscale x 4 x float> %load
|
||||
}
|
||||
|
||||
;
|
||||
; LD1D
|
||||
;
|
||||
|
||||
define <vscale x 2 x i64> @ld1d_inbound(<vscale x 2 x i1> %pg, i64* %a) {
|
||||
; CHECK-LABEL: ld1d_inbound:
|
||||
; CHECK: ld1d { z0.d }, p0/z, [x0, #1, mul vl]
|
||||
; CHECK-NEXT: ret
|
||||
%base_scalable = bitcast i64* %a to <vscale x 2 x i64>*
|
||||
%base = getelementptr <vscale x 2 x i64>, <vscale x 2 x i64>* %base_scalable, i64 1
|
||||
%base_scalar = bitcast <vscale x 2 x i64>* %base to i64*
|
||||
%load = call <vscale x 2 x i64> @llvm.aarch64.sve.ld1.nxv2i64(<vscale x 2 x i1> %pg, i64* %base_scalar)
|
||||
ret <vscale x 2 x i64> %load
|
||||
}
|
||||
|
||||
define <vscale x 2 x i64> @ld1w_d_inbound(<vscale x 2 x i1> %pg, i32* %a) {
|
||||
; CHECK-LABEL: ld1w_d_inbound:
|
||||
; CHECK: ld1w { z0.d }, p0/z, [x0, #7, mul vl]
|
||||
; CHECK-NEXT: ret
|
||||
%base_scalable = bitcast i32* %a to <vscale x 2 x i32>*
|
||||
%base = getelementptr <vscale x 2 x i32>, <vscale x 2 x i32>* %base_scalable, i64 7
|
||||
%base_scalar = bitcast <vscale x 2 x i32>* %base to i32*
|
||||
%load = call <vscale x 2 x i32> @llvm.aarch64.sve.ld1.nxv2i32(<vscale x 2 x i1> %pg, i32* %base_scalar)
|
||||
%res = zext <vscale x 2 x i32> %load to <vscale x 2 x i64>
|
||||
ret <vscale x 2 x i64> %res
|
||||
}
|
||||
|
||||
define <vscale x 2 x i64> @ld1sw_d_inbound(<vscale x 2 x i1> %pg, i32* %a) {
|
||||
; CHECK-LABEL: ld1sw_d_inbound:
|
||||
; CHECK: ld1sw { z0.d }, p0/z, [x0, #7, mul vl]
|
||||
; CHECK-NEXT: ret
|
||||
%base_scalable = bitcast i32* %a to <vscale x 2 x i32>*
|
||||
%base = getelementptr <vscale x 2 x i32>, <vscale x 2 x i32>* %base_scalable, i64 7
|
||||
%base_scalar = bitcast <vscale x 2 x i32>* %base to i32*
|
||||
%load = call <vscale x 2 x i32> @llvm.aarch64.sve.ld1.nxv2i32(<vscale x 2 x i1> %pg, i32* %base_scalar)
|
||||
%res = sext <vscale x 2 x i32> %load to <vscale x 2 x i64>
|
||||
ret <vscale x 2 x i64> %res
|
||||
}
|
||||
|
||||
define <vscale x 2 x double> @ld1d_f64_inbound(<vscale x 2 x i1> %pg, double* %a) {
|
||||
; CHECK-LABEL: ld1d_f64_inbound:
|
||||
; CHECK: ld1d { z0.d }, p0/z, [x0, #1, mul vl]
|
||||
; CHECK-NEXT: ret
|
||||
%base_scalable = bitcast double* %a to <vscale x 2 x double>*
|
||||
%base = getelementptr <vscale x 2 x double>, <vscale x 2 x double>* %base_scalable, i64 1
|
||||
%base_scalar = bitcast <vscale x 2 x double>* %base to double*
|
||||
%load = call <vscale x 2 x double> @llvm.aarch64.sve.ld1.nxv2f64(<vscale x 2 x i1> %pg, double* %base_scalar)
|
||||
ret <vscale x 2 x double> %load
|
||||
}
|
||||
|
||||
declare <vscale x 16 x i8> @llvm.aarch64.sve.ld1.nxv16i8(<vscale x 16 x i1>, i8*)
|
||||
|
||||
declare <vscale x 8 x i8> @llvm.aarch64.sve.ld1.nxv8i8(<vscale x 8 x i1>, i8*)
|
||||
declare <vscale x 8 x i16> @llvm.aarch64.sve.ld1.nxv8i16(<vscale x 8 x i1>, i16*)
|
||||
declare <vscale x 8 x half> @llvm.aarch64.sve.ld1.nxv8f16(<vscale x 8 x i1>, half*)
|
||||
|
||||
declare <vscale x 4 x i8> @llvm.aarch64.sve.ld1.nxv4i8(<vscale x 4 x i1>, i8*)
|
||||
declare <vscale x 4 x i16> @llvm.aarch64.sve.ld1.nxv4i16(<vscale x 4 x i1>, i16*)
|
||||
declare <vscale x 4 x i32> @llvm.aarch64.sve.ld1.nxv4i32(<vscale x 4 x i1>, i32*)
|
||||
declare <vscale x 4 x float> @llvm.aarch64.sve.ld1.nxv4f32(<vscale x 4 x i1>, float*)
|
||||
|
||||
declare <vscale x 2 x i8> @llvm.aarch64.sve.ld1.nxv2i8(<vscale x 2 x i1>, i8*)
|
||||
declare <vscale x 2 x i16> @llvm.aarch64.sve.ld1.nxv2i16(<vscale x 2 x i1>, i16*)
|
||||
declare <vscale x 2 x i32> @llvm.aarch64.sve.ld1.nxv2i32(<vscale x 2 x i1>, i32*)
|
||||
declare <vscale x 2 x i64> @llvm.aarch64.sve.ld1.nxv2i64(<vscale x 2 x i1>, i64*)
|
||||
declare <vscale x 2 x double> @llvm.aarch64.sve.ld1.nxv2f64(<vscale x 2 x i1>, double*)
|
|
@ -0,0 +1,217 @@
|
|||
; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve < %s | FileCheck %s
|
||||
|
||||
;
|
||||
; LD1B
|
||||
;
|
||||
|
||||
define <vscale x 16 x i8> @ld1b_i8(<vscale x 16 x i1> %pg, i8* %a, i64 %index) {
|
||||
; CHECK-LABEL: ld1b_i8
|
||||
; CHECK: ld1b { z0.b }, p0/z, [x0, x1]
|
||||
; CHECK-NEXT: ret
|
||||
%base = getelementptr i8, i8* %a, i64 %index
|
||||
%load = call <vscale x 16 x i8> @llvm.aarch64.sve.ld1.nxv16i8(<vscale x 16 x i1> %pg, i8* %base)
|
||||
ret <vscale x 16 x i8> %load
|
||||
}
|
||||
|
||||
define <vscale x 8 x i16> @ld1b_h(<vscale x 8 x i1> %pred, i8* %a, i64 %index) {
|
||||
; CHECK-LABEL: ld1b_h:
|
||||
; CHECK: ld1b { z0.h }, p0/z, [x0, x1]
|
||||
; CHECK-NEXT: ret
|
||||
%base = getelementptr i8, i8* %a, i64 %index
|
||||
%load = call <vscale x 8 x i8> @llvm.aarch64.sve.ld1.nxv8i8(<vscale x 8 x i1> %pred, i8* %base)
|
||||
%res = zext <vscale x 8 x i8> %load to <vscale x 8 x i16>
|
||||
ret <vscale x 8 x i16> %res
|
||||
}
|
||||
|
||||
define <vscale x 8 x i16> @ld1sb_h(<vscale x 8 x i1> %pred, i8* %a, i64 %index) {
|
||||
; CHECK-LABEL: ld1sb_h:
|
||||
; CHECK: ld1sb { z0.h }, p0/z, [x0, x1]
|
||||
; CHECK-NEXT: ret
|
||||
%base = getelementptr i8, i8* %a, i64 %index
|
||||
%load = call <vscale x 8 x i8> @llvm.aarch64.sve.ld1.nxv8i8(<vscale x 8 x i1> %pred, i8* %base)
|
||||
%res = sext <vscale x 8 x i8> %load to <vscale x 8 x i16>
|
||||
ret <vscale x 8 x i16> %res
|
||||
}
|
||||
|
||||
define <vscale x 4 x i32> @ld1b_s(<vscale x 4 x i1> %pred, i8* %a, i64 %index) {
|
||||
; CHECK-LABEL: ld1b_s:
|
||||
; CHECK: ld1b { z0.s }, p0/z, [x0, x1]
|
||||
; CHECK-NEXT: ret
|
||||
%base = getelementptr i8, i8* %a, i64 %index
|
||||
%load = call <vscale x 4 x i8> @llvm.aarch64.sve.ld1.nxv4i8(<vscale x 4 x i1> %pred, i8* %base)
|
||||
%res = zext <vscale x 4 x i8> %load to <vscale x 4 x i32>
|
||||
ret <vscale x 4 x i32> %res
|
||||
}
|
||||
|
||||
define <vscale x 4 x i32> @ld1sb_s(<vscale x 4 x i1> %pred, i8* %a, i64 %index) {
|
||||
; CHECK-LABEL: ld1sb_s:
|
||||
; CHECK: ld1sb { z0.s }, p0/z, [x0, x1]
|
||||
; CHECK-NEXT: ret
|
||||
%base = getelementptr i8, i8* %a, i64 %index
|
||||
%load = call <vscale x 4 x i8> @llvm.aarch64.sve.ld1.nxv4i8(<vscale x 4 x i1> %pred, i8* %base)
|
||||
%res = sext <vscale x 4 x i8> %load to <vscale x 4 x i32>
|
||||
ret <vscale x 4 x i32> %res
|
||||
}
|
||||
|
||||
define <vscale x 2 x i64> @ld1b_d(<vscale x 2 x i1> %pred, i8* %a, i64 %index) {
|
||||
; CHECK-LABEL: ld1b_d:
|
||||
; CHECK: ld1b { z0.d }, p0/z, [x0, x1]
|
||||
; CHECK-NEXT: ret
|
||||
%base = getelementptr i8, i8* %a, i64 %index
|
||||
%load = call <vscale x 2 x i8> @llvm.aarch64.sve.ld1.nxv2i8(<vscale x 2 x i1> %pred, i8* %base)
|
||||
%res = zext <vscale x 2 x i8> %load to <vscale x 2 x i64>
|
||||
ret <vscale x 2 x i64> %res
|
||||
}
|
||||
|
||||
define <vscale x 2 x i64> @ld1sb_d(<vscale x 2 x i1> %pred, i8* %a, i64 %index) {
|
||||
; CHECK-LABEL: ld1sb_d:
|
||||
; CHECK: ld1sb { z0.d }, p0/z, [x0, x1]
|
||||
; CHECK-NEXT: ret
|
||||
%base = getelementptr i8, i8* %a, i64 %index
|
||||
%load = call <vscale x 2 x i8> @llvm.aarch64.sve.ld1.nxv2i8(<vscale x 2 x i1> %pred, i8* %base)
|
||||
%res = sext <vscale x 2 x i8> %load to <vscale x 2 x i64>
|
||||
ret <vscale x 2 x i64> %res
|
||||
}
|
||||
|
||||
;
|
||||
; LD1H
|
||||
;
|
||||
|
||||
define <vscale x 8 x i16> @ld1h_i16(<vscale x 8 x i1> %pg, i16* %a, i64 %index) {
|
||||
; CHECK-LABEL: ld1h_i16
|
||||
; CHECK: ld1h { z0.h }, p0/z, [x0, x1, lsl #1]
|
||||
; CHECK-NEXT: ret
|
||||
%base = getelementptr i16, i16* %a, i64 %index
|
||||
%load = call <vscale x 8 x i16> @llvm.aarch64.sve.ld1.nxv8i16(<vscale x 8 x i1> %pg, i16* %base)
|
||||
ret <vscale x 8 x i16> %load
|
||||
}
|
||||
|
||||
define <vscale x 8 x half> @ld1h_f16(<vscale x 8 x i1> %pg, half* %a, i64 %index) {
|
||||
; CHECK-LABEL: ld1h_f16
|
||||
; CHECK: ld1h { z0.h }, p0/z, [x0, x1, lsl #1]
|
||||
; CHECK-NEXT: ret
|
||||
%base = getelementptr half, half* %a, i64 %index
|
||||
%load = call <vscale x 8 x half> @llvm.aarch64.sve.ld1.nxv8f16(<vscale x 8 x i1> %pg, half* %base)
|
||||
ret <vscale x 8 x half> %load
|
||||
}
|
||||
|
||||
define <vscale x 4 x i32> @ld1h_s(<vscale x 4 x i1> %pred, i16* %a, i64 %index) {
|
||||
; CHECK-LABEL: ld1h_s:
|
||||
; CHECK: ld1h { z0.s }, p0/z, [x0, x1, lsl #1]
|
||||
; CHECK-NEXT: ret
|
||||
%base = getelementptr i16, i16* %a, i64 %index
|
||||
%load = call <vscale x 4 x i16> @llvm.aarch64.sve.ld1.nxv4i16(<vscale x 4 x i1> %pred, i16* %base)
|
||||
%res = zext <vscale x 4 x i16> %load to <vscale x 4 x i32>
|
||||
ret <vscale x 4 x i32> %res
|
||||
}
|
||||
|
||||
define <vscale x 4 x i32> @ld1sh_s(<vscale x 4 x i1> %pred, i16* %a, i64 %index) {
|
||||
; CHECK-LABEL: ld1sh_s:
|
||||
; CHECK: ld1sh { z0.s }, p0/z, [x0, x1, lsl #1]
|
||||
; CHECK-NEXT: ret
|
||||
%base = getelementptr i16, i16* %a, i64 %index
|
||||
%load = call <vscale x 4 x i16> @llvm.aarch64.sve.ld1.nxv4i16(<vscale x 4 x i1> %pred, i16* %base)
|
||||
%res = sext <vscale x 4 x i16> %load to <vscale x 4 x i32>
|
||||
ret <vscale x 4 x i32> %res
|
||||
}
|
||||
|
||||
define <vscale x 2 x i64> @ld1h_d(<vscale x 2 x i1> %pred, i16* %a, i64 %index) {
|
||||
; CHECK-LABEL: ld1h_d:
|
||||
; CHECK: ld1h { z0.d }, p0/z, [x0, x1, lsl #1]
|
||||
; CHECK-NEXT: ret
|
||||
%base = getelementptr i16, i16* %a, i64 %index
|
||||
%load = call <vscale x 2 x i16> @llvm.aarch64.sve.ld1.nxv2i16(<vscale x 2 x i1> %pred, i16* %base)
|
||||
%res = zext <vscale x 2 x i16> %load to <vscale x 2 x i64>
|
||||
ret <vscale x 2 x i64> %res
|
||||
}
|
||||
|
||||
define <vscale x 2 x i64> @ld1sh_d(<vscale x 2 x i1> %pred, i16* %a, i64 %index) {
|
||||
; CHECK-LABEL: ld1sh_d:
|
||||
; CHECK: ld1sh { z0.d }, p0/z, [x0, x1, lsl #1]
|
||||
; CHECK-NEXT: ret
|
||||
%base = getelementptr i16, i16* %a, i64 %index
|
||||
%load = call <vscale x 2 x i16> @llvm.aarch64.sve.ld1.nxv2i16(<vscale x 2 x i1> %pred, i16* %base)
|
||||
%res = sext <vscale x 2 x i16> %load to <vscale x 2 x i64>
|
||||
ret <vscale x 2 x i64> %res
|
||||
}
|
||||
|
||||
;
|
||||
; LD1W
|
||||
;
|
||||
|
||||
define<vscale x 4 x i32> @ld1w(<vscale x 4 x i1> %pg, i32* %a, i64 %index) {
|
||||
; CHECK-LABEL: ld1w
|
||||
; CHECK: ld1w { z0.s }, p0/z, [x0, x1, lsl #2]
|
||||
; CHECK-NEXT: ret
|
||||
%base = getelementptr i32, i32* %a, i64 %index
|
||||
%load = call <vscale x 4 x i32> @llvm.aarch64.sve.ld1.nxv4i32(<vscale x 4 x i1> %pg, i32* %base)
|
||||
ret <vscale x 4 x i32> %load
|
||||
}
|
||||
|
||||
define<vscale x 4 x float> @ld1w_f32(<vscale x 4 x i1> %pg, float* %a, i64 %index) {
|
||||
; CHECK-LABEL: ld1w_f32
|
||||
; CHECK: ld1w { z0.s }, p0/z, [x0, x1, lsl #2]
|
||||
; CHECK-NEXT: ret
|
||||
%base = getelementptr float, float* %a, i64 %index
|
||||
%load = call <vscale x 4 x float> @llvm.aarch64.sve.ld1.nxv4f32(<vscale x 4 x i1> %pg, float* %base)
|
||||
ret <vscale x 4 x float> %load
|
||||
}
|
||||
|
||||
define <vscale x 2 x i64> @ld1w_d(<vscale x 2 x i1> %pred, i32* %a, i64 %index) {
|
||||
; CHECK-LABEL: ld1w_d:
|
||||
; CHECK: ld1w { z0.d }, p0/z, [x0, x1, lsl #2]
|
||||
; CHECK-NEXT: ret
|
||||
%base = getelementptr i32, i32* %a, i64 %index
|
||||
%load = call <vscale x 2 x i32> @llvm.aarch64.sve.ld1.nxv2i32(<vscale x 2 x i1> %pred, i32* %base)
|
||||
%res = zext <vscale x 2 x i32> %load to <vscale x 2 x i64>
|
||||
ret <vscale x 2 x i64> %res
|
||||
}
|
||||
|
||||
define <vscale x 2 x i64> @ld1sw_d(<vscale x 2 x i1> %pred, i32* %a, i64 %index) {
|
||||
; CHECK-LABEL: ld1sw_d:
|
||||
; CHECK: ld1sw { z0.d }, p0/z, [x0, x1, lsl #2]
|
||||
; CHECK-NEXT: ret
|
||||
%base = getelementptr i32, i32* %a, i64 %index
|
||||
%load = call <vscale x 2 x i32> @llvm.aarch64.sve.ld1.nxv2i32(<vscale x 2 x i1> %pred, i32* %base)
|
||||
%res = sext <vscale x 2 x i32> %load to <vscale x 2 x i64>
|
||||
ret <vscale x 2 x i64> %res
|
||||
}
|
||||
|
||||
;
|
||||
; LD1D
|
||||
;
|
||||
|
||||
define <vscale x 2 x i64> @ld1d(<vscale x 2 x i1> %pg, i64* %a, i64 %index) {
|
||||
; CHECK-LABEL: ld1d
|
||||
; CHECK: ld1d { z0.d }, p0/z, [x0, x1, lsl #3]
|
||||
; CHECK-NEXT: ret
|
||||
%base = getelementptr i64, i64* %a, i64 %index
|
||||
%load = call <vscale x 2 x i64> @llvm.aarch64.sve.ld1.nxv2i64(<vscale x 2 x i1> %pg, i64* %base)
|
||||
ret <vscale x 2 x i64> %load
|
||||
}
|
||||
|
||||
define <vscale x 2 x double> @ld1d_f64(<vscale x 2 x i1> %pg, double* %a, i64 %index) {
|
||||
; CHECK-LABEL: ld1d_f64
|
||||
; CHECK: ld1d { z0.d }, p0/z, [x0, x1, lsl #3]
|
||||
; CHECK-NEXT: ret
|
||||
%base = getelementptr double, double* %a, i64 %index
|
||||
%load = call <vscale x 2 x double> @llvm.aarch64.sve.ld1.nxv2f64(<vscale x 2 x i1> %pg, double* %base)
|
||||
ret <vscale x 2 x double> %load
|
||||
}
|
||||
|
||||
declare <vscale x 16 x i8> @llvm.aarch64.sve.ld1.nxv16i8(<vscale x 16 x i1>, i8*)
|
||||
|
||||
declare <vscale x 8 x i8> @llvm.aarch64.sve.ld1.nxv8i8(<vscale x 8 x i1>, i8*)
|
||||
declare <vscale x 8 x i16> @llvm.aarch64.sve.ld1.nxv8i16(<vscale x 8 x i1>, i16*)
|
||||
declare <vscale x 8 x half> @llvm.aarch64.sve.ld1.nxv8f16(<vscale x 8 x i1>, half*)
|
||||
|
||||
declare <vscale x 4 x i8> @llvm.aarch64.sve.ld1.nxv4i8(<vscale x 4 x i1>, i8*)
|
||||
declare <vscale x 4 x i16> @llvm.aarch64.sve.ld1.nxv4i16(<vscale x 4 x i1>, i16*)
|
||||
declare <vscale x 4 x i32> @llvm.aarch64.sve.ld1.nxv4i32(<vscale x 4 x i1>, i32*)
|
||||
declare <vscale x 4 x float> @llvm.aarch64.sve.ld1.nxv4f32(<vscale x 4 x i1>, float*)
|
||||
|
||||
declare <vscale x 2 x i8> @llvm.aarch64.sve.ld1.nxv2i8(<vscale x 2 x i1>, i8*)
|
||||
declare <vscale x 2 x i16> @llvm.aarch64.sve.ld1.nxv2i16(<vscale x 2 x i1>, i16*)
|
||||
declare <vscale x 2 x i32> @llvm.aarch64.sve.ld1.nxv2i32(<vscale x 2 x i1>, i32*)
|
||||
declare <vscale x 2 x i64> @llvm.aarch64.sve.ld1.nxv2i64(<vscale x 2 x i1>, i64*)
|
||||
declare <vscale x 2 x double> @llvm.aarch64.sve.ld1.nxv2f64(<vscale x 2 x i1>, double*)
|
|
@ -66,89 +66,6 @@ define <vscale x 2 x i64> @ld1sb_d(<vscale x 2 x i1> %pred, i8* %addr) {
|
|||
ret <vscale x 2 x i64> %res
|
||||
}
|
||||
|
||||
define <vscale x 16 x i8> @ld1b_upper_bound(<vscale x 16 x i1> %pg, i8* %a) {
|
||||
; CHECK-LABEL: ld1b_upper_bound:
|
||||
; CHECK: ld1b { z0.b }, p0/z, [x0, #7, mul vl]
|
||||
; CHECK-NEXT: ret
|
||||
%base_scalable = bitcast i8* %a to <vscale x 16 x i8>*
|
||||
%base = getelementptr <vscale x 16 x i8>, <vscale x 16 x i8>* %base_scalable, i64 7
|
||||
%base_scalar = bitcast <vscale x 16 x i8>* %base to i8*
|
||||
%load = call <vscale x 16 x i8> @llvm.aarch64.sve.ld1.nxv16i8(<vscale x 16 x i1> %pg, i8* %base_scalar)
|
||||
ret <vscale x 16 x i8> %load
|
||||
}
|
||||
|
||||
define <vscale x 16 x i8> @ld1b_inbound(<vscale x 16 x i1> %pg, i8* %a) {
|
||||
; CHECK-LABEL: ld1b_inbound:
|
||||
; CHECK: ld1b { z0.b }, p0/z, [x0, #1, mul vl]
|
||||
; CHECK-NEXT: ret
|
||||
%base_scalable = bitcast i8* %a to <vscale x 16 x i8>*
|
||||
%base = getelementptr <vscale x 16 x i8>, <vscale x 16 x i8>* %base_scalable, i64 1
|
||||
%base_scalar = bitcast <vscale x 16 x i8>* %base to i8*
|
||||
%load = call <vscale x 16 x i8> @llvm.aarch64.sve.ld1.nxv16i8(<vscale x 16 x i1> %pg, i8* %base_scalar)
|
||||
ret <vscale x 16 x i8> %load
|
||||
}
|
||||
|
||||
define <vscale x 4 x i32> @ld1b_s_inbound(<vscale x 4 x i1> %pg, i8* %a) {
|
||||
; CHECK-LABEL: ld1b_s_inbound:
|
||||
; CHECK: ld1b { z0.s }, p0/z, [x0, #7, mul vl]
|
||||
; CHECK-NEXT: ret
|
||||
%base_scalable = bitcast i8* %a to <vscale x 4 x i8>*
|
||||
%base = getelementptr <vscale x 4 x i8>, <vscale x 4 x i8>* %base_scalable, i64 7
|
||||
%base_scalar = bitcast <vscale x 4 x i8>* %base to i8*
|
||||
%load = call <vscale x 4 x i8> @llvm.aarch64.sve.ld1.nxv4i8(<vscale x 4 x i1> %pg, i8* %base_scalar)
|
||||
%res = zext <vscale x 4 x i8> %load to <vscale x 4 x i32>
|
||||
ret <vscale x 4 x i32> %res
|
||||
}
|
||||
|
||||
define <vscale x 4 x i32> @ld1sb_s_inbound(<vscale x 4 x i1> %pg, i8* %a) {
|
||||
; CHECK-LABEL: ld1sb_s_inbound:
|
||||
; CHECK: ld1sb { z0.s }, p0/z, [x0, #7, mul vl]
|
||||
; CHECK-NEXT: ret
|
||||
%base_scalable = bitcast i8* %a to <vscale x 4 x i8>*
|
||||
%base = getelementptr <vscale x 4 x i8>, <vscale x 4 x i8>* %base_scalable, i64 7
|
||||
%base_scalar = bitcast <vscale x 4 x i8>* %base to i8*
|
||||
%load = call <vscale x 4 x i8> @llvm.aarch64.sve.ld1.nxv4i8(<vscale x 4 x i1> %pg, i8* %base_scalar)
|
||||
%res = sext <vscale x 4 x i8> %load to <vscale x 4 x i32>
|
||||
ret <vscale x 4 x i32> %res
|
||||
}
|
||||
|
||||
define <vscale x 16 x i8> @ld1b_lower_bound(<vscale x 16 x i1> %pg, i8* %a) {
|
||||
; CHECK-LABEL: ld1b_lower_bound:
|
||||
; CHECK: ld1b { z0.b }, p0/z, [x0, #-8, mul vl]
|
||||
; CHECK-NEXT: ret
|
||||
%base_scalable = bitcast i8* %a to <vscale x 16 x i8>*
|
||||
%base = getelementptr <vscale x 16 x i8>, <vscale x 16 x i8>* %base_scalable, i64 -8
|
||||
%base_scalar = bitcast <vscale x 16 x i8>* %base to i8*
|
||||
%load = call <vscale x 16 x i8> @llvm.aarch64.sve.ld1.nxv16i8(<vscale x 16 x i1> %pg, i8* %base_scalar)
|
||||
ret <vscale x 16 x i8> %load
|
||||
}
|
||||
|
||||
define <vscale x 16 x i8> @ld1b_out_of_upper_bound(<vscale x 16 x i1> %pg, i8* %a) {
|
||||
; CHECK-LABEL: ld1b_out_of_upper_bound:
|
||||
; CHECK: rdvl x[[OFFSET:[0-9]+]], #8
|
||||
; CHECK-NEXT: add x[[BASE:[0-9]+]], x0, x[[OFFSET]]
|
||||
; CHECK-NEXT: ld1b { z0.b }, p0/z, [x[[BASE]]]
|
||||
; CHECK-NEXT: ret
|
||||
%base_scalable = bitcast i8* %a to <vscale x 16 x i8>*
|
||||
%base = getelementptr <vscale x 16 x i8>, <vscale x 16 x i8>* %base_scalable, i64 8
|
||||
%base_scalar = bitcast <vscale x 16 x i8>* %base to i8*
|
||||
%load = call <vscale x 16 x i8> @llvm.aarch64.sve.ld1.nxv16i8(<vscale x 16 x i1> %pg, i8* %base_scalar)
|
||||
ret <vscale x 16 x i8> %load
|
||||
}
|
||||
|
||||
define <vscale x 16 x i8> @ld1b_out_of_lower_bound(<vscale x 16 x i1> %pg, i8* %a) {
|
||||
; CHECK-LABEL: ld1b_out_of_lower_bound:
|
||||
; CHECK: rdvl x[[OFFSET:[0-9]+]], #-9
|
||||
; CHECK-NEXT: add x[[BASE:[0-9]+]], x0, x[[OFFSET]]
|
||||
; CHECK-NEXT: ld1b { z0.b }, p0/z, [x[[BASE]]]
|
||||
; CHECK-NEXT: ret
|
||||
%base_scalable = bitcast i8* %a to <vscale x 16 x i8>*
|
||||
%base = getelementptr <vscale x 16 x i8>, <vscale x 16 x i8>* %base_scalable, i64 -9
|
||||
%base_scalar = bitcast <vscale x 16 x i8>* %base to i8*
|
||||
%load = call <vscale x 16 x i8> @llvm.aarch64.sve.ld1.nxv16i8(<vscale x 16 x i1> %pg, i8* %base_scalar)
|
||||
ret <vscale x 16 x i8> %load
|
||||
}
|
||||
|
||||
;
|
||||
; LD1H
|
||||
;
|
||||
|
@ -205,124 +122,6 @@ define <vscale x 2 x i64> @ld1sh_d(<vscale x 2 x i1> %pred, i16* %addr) {
|
|||
ret <vscale x 2 x i64> %res
|
||||
}
|
||||
|
||||
define <vscale x 8 x i16> @ld1b_h_inbound(<vscale x 8 x i1> %pg, i8* %a) {
|
||||
; CHECK-LABEL: ld1b_h_inbound:
|
||||
; CHECK: ld1b { z0.h }, p0/z, [x0, #7, mul vl]
|
||||
; CHECK-NEXT: ret
|
||||
%base_scalable = bitcast i8* %a to <vscale x 8 x i8>*
|
||||
%base = getelementptr <vscale x 8 x i8>, <vscale x 8 x i8>* %base_scalable, i64 7
|
||||
%base_scalar = bitcast <vscale x 8 x i8>* %base to i8*
|
||||
%load = call <vscale x 8 x i8> @llvm.aarch64.sve.ld1.nxv8i8(<vscale x 8 x i1> %pg, i8* %base_scalar)
|
||||
%res = zext <vscale x 8 x i8> %load to <vscale x 8 x i16>
|
||||
ret <vscale x 8 x i16> %res
|
||||
}
|
||||
|
||||
define <vscale x 8 x i16> @ld1sb_h_inbound(<vscale x 8 x i1> %pg, i8* %a) {
|
||||
; CHECK-LABEL: ld1sb_h_inbound:
|
||||
; CHECK: ld1sb { z0.h }, p0/z, [x0, #7, mul vl]
|
||||
; CHECK-NEXT: ret
|
||||
%base_scalable = bitcast i8* %a to <vscale x 8 x i8>*
|
||||
%base = getelementptr <vscale x 8 x i8>, <vscale x 8 x i8>* %base_scalable, i64 7
|
||||
%base_scalar = bitcast <vscale x 8 x i8>* %base to i8*
|
||||
%load = call <vscale x 8 x i8> @llvm.aarch64.sve.ld1.nxv8i8(<vscale x 8 x i1> %pg, i8* %base_scalar)
|
||||
%res = sext <vscale x 8 x i8> %load to <vscale x 8 x i16>
|
||||
ret <vscale x 8 x i16> %res
|
||||
}
|
||||
|
||||
define <vscale x 8 x i16> @ld1h_inbound(<vscale x 8 x i1> %pg, i16* %a) {
|
||||
; CHECK-LABEL: ld1h_inbound:
|
||||
; CHECK: ld1h { z0.h }, p0/z, [x0, #1, mul vl]
|
||||
; CHECK-NEXT: ret
|
||||
%base_scalable = bitcast i16* %a to <vscale x 8 x i16>*
|
||||
%base = getelementptr <vscale x 8 x i16>, <vscale x 8 x i16>* %base_scalable, i64 1
|
||||
%base_scalar = bitcast <vscale x 8 x i16>* %base to i16*
|
||||
%load = call <vscale x 8 x i16> @llvm.aarch64.sve.ld1.nxv8i16(<vscale x 8 x i1> %pg, i16* %base_scalar)
|
||||
ret <vscale x 8 x i16> %load
|
||||
}
|
||||
|
||||
define <vscale x 4 x i32> @ld1h_s_inbound(<vscale x 4 x i1> %pg, i16* %a) {
|
||||
; CHECK-LABEL: ld1h_s_inbound:
|
||||
; CHECK: ld1h { z0.s }, p0/z, [x0, #7, mul vl]
|
||||
; CHECK-NEXT: ret
|
||||
%base_scalable = bitcast i16* %a to <vscale x 4 x i16>*
|
||||
%base = getelementptr <vscale x 4 x i16>, <vscale x 4 x i16>* %base_scalable, i64 7
|
||||
%base_scalar = bitcast <vscale x 4 x i16>* %base to i16*
|
||||
%load = call <vscale x 4 x i16> @llvm.aarch64.sve.ld1.nxv4i16(<vscale x 4 x i1> %pg, i16* %base_scalar)
|
||||
%res = zext <vscale x 4 x i16> %load to <vscale x 4 x i32>
|
||||
ret <vscale x 4 x i32> %res
|
||||
}
|
||||
|
||||
define <vscale x 4 x i32> @ld1sh_s_inbound(<vscale x 4 x i1> %pg, i16* %a) {
|
||||
; CHECK-LABEL: ld1sh_s_inbound:
|
||||
; CHECK: ld1sh { z0.s }, p0/z, [x0, #7, mul vl]
|
||||
; CHECK-NEXT: ret
|
||||
%base_scalable = bitcast i16* %a to <vscale x 4 x i16>*
|
||||
%base = getelementptr <vscale x 4 x i16>, <vscale x 4 x i16>* %base_scalable, i64 7
|
||||
%base_scalar = bitcast <vscale x 4 x i16>* %base to i16*
|
||||
%load = call <vscale x 4 x i16> @llvm.aarch64.sve.ld1.nxv4i16(<vscale x 4 x i1> %pg, i16* %base_scalar)
|
||||
%res = sext <vscale x 4 x i16> %load to <vscale x 4 x i32>
|
||||
ret <vscale x 4 x i32> %res
|
||||
}
|
||||
|
||||
define <vscale x 2 x i64> @ld1b_d_inbound(<vscale x 2 x i1> %pg, i8* %a) {
|
||||
; CHECK-LABEL: ld1b_d_inbound:
|
||||
; CHECK: ld1b { z0.d }, p0/z, [x0, #7, mul vl]
|
||||
; CHECK-NEXT: ret
|
||||
%base_scalable = bitcast i8* %a to <vscale x 2 x i8>*
|
||||
%base = getelementptr <vscale x 2 x i8>, <vscale x 2 x i8>* %base_scalable, i64 7
|
||||
%base_scalar = bitcast <vscale x 2 x i8>* %base to i8*
|
||||
%load = call <vscale x 2 x i8> @llvm.aarch64.sve.ld1.nxv2i8(<vscale x 2 x i1> %pg, i8* %base_scalar)
|
||||
%res = zext <vscale x 2 x i8> %load to <vscale x 2 x i64>
|
||||
ret <vscale x 2 x i64> %res
|
||||
}
|
||||
|
||||
define <vscale x 2 x i64> @ld1sb_d_inbound(<vscale x 2 x i1> %pg, i8* %a) {
|
||||
; CHECK-LABEL: ld1sb_d_inbound:
|
||||
; CHECK: ld1sb { z0.d }, p0/z, [x0, #7, mul vl]
|
||||
; CHECK-NEXT: ret
|
||||
%base_scalable = bitcast i8* %a to <vscale x 2 x i8>*
|
||||
%base = getelementptr <vscale x 2 x i8>, <vscale x 2 x i8>* %base_scalable, i64 7
|
||||
%base_scalar = bitcast <vscale x 2 x i8>* %base to i8*
|
||||
%load = call <vscale x 2 x i8> @llvm.aarch64.sve.ld1.nxv2i8(<vscale x 2 x i1> %pg, i8* %base_scalar)
|
||||
%res = sext <vscale x 2 x i8> %load to <vscale x 2 x i64>
|
||||
ret <vscale x 2 x i64> %res
|
||||
}
|
||||
|
||||
define <vscale x 2 x i64> @ld1h_d_inbound(<vscale x 2 x i1> %pg, i16* %a) {
|
||||
; CHECK-LABEL: ld1h_d_inbound:
|
||||
; CHECK: ld1h { z0.d }, p0/z, [x0, #7, mul vl]
|
||||
; CHECK-NEXT: ret
|
||||
%base_scalable = bitcast i16* %a to <vscale x 2 x i16>*
|
||||
%base = getelementptr <vscale x 2 x i16>, <vscale x 2 x i16>* %base_scalable, i64 7
|
||||
%base_scalar = bitcast <vscale x 2 x i16>* %base to i16*
|
||||
%load = call <vscale x 2 x i16> @llvm.aarch64.sve.ld1.nxv2i16(<vscale x 2 x i1> %pg, i16* %base_scalar)
|
||||
%res = zext <vscale x 2 x i16> %load to <vscale x 2 x i64>
|
||||
ret <vscale x 2 x i64> %res
|
||||
}
|
||||
|
||||
define <vscale x 2 x i64> @ld1sh_d_inbound(<vscale x 2 x i1> %pg, i16* %a) {
|
||||
; CHECK-LABEL: ld1sh_d_inbound:
|
||||
; CHECK: ld1sh { z0.d }, p0/z, [x0, #7, mul vl]
|
||||
; CHECK-NEXT: ret
|
||||
%base_scalable = bitcast i16* %a to <vscale x 2 x i16>*
|
||||
%base = getelementptr <vscale x 2 x i16>, <vscale x 2 x i16>* %base_scalable, i64 7
|
||||
%base_scalar = bitcast <vscale x 2 x i16>* %base to i16*
|
||||
%load = call <vscale x 2 x i16> @llvm.aarch64.sve.ld1.nxv2i16(<vscale x 2 x i1> %pg, i16* %base_scalar)
|
||||
%res = sext <vscale x 2 x i16> %load to <vscale x 2 x i64>
|
||||
ret <vscale x 2 x i64> %res
|
||||
}
|
||||
|
||||
define <vscale x 8 x half> @ld1h_f16_inbound(<vscale x 8 x i1> %pg, half* %a) {
|
||||
; CHECK-LABEL: ld1h_f16_inbound:
|
||||
; CHECK: ld1h { z0.h }, p0/z, [x0, #1, mul vl]
|
||||
; CHECK-NEXT: ret
|
||||
%base_scalable = bitcast half* %a to <vscale x 8 x half>*
|
||||
%base = getelementptr <vscale x 8 x half>, <vscale x 8 x half>* %base_scalable, i64 1
|
||||
%base_scalar = bitcast <vscale x 8 x half>* %base to half*
|
||||
%load = call <vscale x 8 x half> @llvm.aarch64.sve.ld1.nxv8f16(<vscale x 8 x i1> %pg, half* %base_scalar)
|
||||
ret <vscale x 8 x half> %load
|
||||
}
|
||||
|
||||
;
|
||||
; LD1W
|
||||
;
|
||||
|
@ -361,28 +160,6 @@ define <vscale x 2 x i64> @ld1sw_d(<vscale x 2 x i1> %pred, i32* %addr) {
|
|||
ret <vscale x 2 x i64> %res
|
||||
}
|
||||
|
||||
define <vscale x 4 x i32> @ld1w_inbound(<vscale x 4 x i1> %pg, i32* %a) {
|
||||
; CHECK-LABEL: ld1w_inbound:
|
||||
; CHECK: ld1w { z0.s }, p0/z, [x0, #7, mul vl]
|
||||
; CHECK-NEXT: ret
|
||||
%base_scalable = bitcast i32* %a to <vscale x 4 x i32>*
|
||||
%base = getelementptr <vscale x 4 x i32>, <vscale x 4 x i32>* %base_scalable, i64 7
|
||||
%base_scalar = bitcast <vscale x 4 x i32>* %base to i32*
|
||||
%load = call <vscale x 4 x i32> @llvm.aarch64.sve.ld1.nxv4i32(<vscale x 4 x i1> %pg, i32* %base_scalar)
|
||||
ret <vscale x 4 x i32> %load
|
||||
}
|
||||
|
||||
define <vscale x 4 x float> @ld1w_f32_inbound(<vscale x 4 x i1> %pg, float* %a) {
|
||||
; CHECK-LABEL: ld1w_f32_inbound:
|
||||
; CHECK: ld1w { z0.s }, p0/z, [x0, #7, mul vl]
|
||||
; CHECK-NEXT: ret
|
||||
%base_scalable = bitcast float* %a to <vscale x 4 x float>*
|
||||
%base = getelementptr <vscale x 4 x float>, <vscale x 4 x float>* %base_scalable, i64 7
|
||||
%base_scalar = bitcast <vscale x 4 x float>* %base to float*
|
||||
%load = call <vscale x 4 x float> @llvm.aarch64.sve.ld1.nxv4f32(<vscale x 4 x i1> %pg, float* %base_scalar)
|
||||
ret <vscale x 4 x float> %load
|
||||
}
|
||||
|
||||
;
|
||||
; LD1D
|
||||
;
|
||||
|
@ -405,52 +182,6 @@ define <vscale x 2 x double> @ld1d_f64(<vscale x 2 x i1> %pred, double* %addr) {
|
|||
ret <vscale x 2 x double> %res
|
||||
}
|
||||
|
||||
define <vscale x 2 x i64> @ld1d_inbound(<vscale x 2 x i1> %pg, i64* %a) {
|
||||
; CHECK-LABEL: ld1d_inbound:
|
||||
; CHECK: ld1d { z0.d }, p0/z, [x0, #1, mul vl]
|
||||
; CHECK-NEXT: ret
|
||||
%base_scalable = bitcast i64* %a to <vscale x 2 x i64>*
|
||||
%base = getelementptr <vscale x 2 x i64>, <vscale x 2 x i64>* %base_scalable, i64 1
|
||||
%base_scalar = bitcast <vscale x 2 x i64>* %base to i64*
|
||||
%load = call <vscale x 2 x i64> @llvm.aarch64.sve.ld1.nxv2i64(<vscale x 2 x i1> %pg, i64* %base_scalar)
|
||||
ret <vscale x 2 x i64> %load
|
||||
}
|
||||
|
||||
define <vscale x 2 x i64> @ld1w_d_inbound(<vscale x 2 x i1> %pg, i32* %a) {
|
||||
; CHECK-LABEL: ld1w_d_inbound:
|
||||
; CHECK: ld1w { z0.d }, p0/z, [x0, #7, mul vl]
|
||||
; CHECK-NEXT: ret
|
||||
%base_scalable = bitcast i32* %a to <vscale x 2 x i32>*
|
||||
%base = getelementptr <vscale x 2 x i32>, <vscale x 2 x i32>* %base_scalable, i64 7
|
||||
%base_scalar = bitcast <vscale x 2 x i32>* %base to i32*
|
||||
%load = call <vscale x 2 x i32> @llvm.aarch64.sve.ld1.nxv2i32(<vscale x 2 x i1> %pg, i32* %base_scalar)
|
||||
%res = zext <vscale x 2 x i32> %load to <vscale x 2 x i64>
|
||||
ret <vscale x 2 x i64> %res
|
||||
}
|
||||
|
||||
define <vscale x 2 x i64> @ld1sw_d_inbound(<vscale x 2 x i1> %pg, i32* %a) {
|
||||
; CHECK-LABEL: ld1sw_d_inbound:
|
||||
; CHECK: ld1sw { z0.d }, p0/z, [x0, #7, mul vl]
|
||||
; CHECK-NEXT: ret
|
||||
%base_scalable = bitcast i32* %a to <vscale x 2 x i32>*
|
||||
%base = getelementptr <vscale x 2 x i32>, <vscale x 2 x i32>* %base_scalable, i64 7
|
||||
%base_scalar = bitcast <vscale x 2 x i32>* %base to i32*
|
||||
%load = call <vscale x 2 x i32> @llvm.aarch64.sve.ld1.nxv2i32(<vscale x 2 x i1> %pg, i32* %base_scalar)
|
||||
%res = sext <vscale x 2 x i32> %load to <vscale x 2 x i64>
|
||||
ret <vscale x 2 x i64> %res
|
||||
}
|
||||
|
||||
define <vscale x 2 x double> @ld1d_f64_inbound(<vscale x 2 x i1> %pg, double* %a) {
|
||||
; CHECK-LABEL: ld1d_f64_inbound:
|
||||
; CHECK: ld1d { z0.d }, p0/z, [x0, #1, mul vl]
|
||||
; CHECK-NEXT: ret
|
||||
%base_scalable = bitcast double* %a to <vscale x 2 x double>*
|
||||
%base = getelementptr <vscale x 2 x double>, <vscale x 2 x double>* %base_scalable, i64 1
|
||||
%base_scalar = bitcast <vscale x 2 x double>* %base to double*
|
||||
%load = call <vscale x 2 x double> @llvm.aarch64.sve.ld1.nxv2f64(<vscale x 2 x i1> %pg, double* %base_scalar)
|
||||
ret <vscale x 2 x double> %load
|
||||
}
|
||||
|
||||
declare <vscale x 16 x i8> @llvm.aarch64.sve.ld1.nxv16i8(<vscale x 16 x i1>, i8*)
|
||||
|
||||
declare <vscale x 8 x i8> @llvm.aarch64.sve.ld1.nxv8i8(<vscale x 8 x i1>, i8*)
|
||||
|
|
|
@ -0,0 +1,229 @@
|
|||
; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve < %s | FileCheck %s
|
||||
|
||||
;
|
||||
; ST1B
|
||||
;
|
||||
|
||||
define void @st1b_upper_bound(<vscale x 16 x i8> %data, <vscale x 16 x i1> %pg, i8* %a) {
|
||||
; CHECK-LABEL: st1b_upper_bound:
|
||||
; CHECK: st1b { z0.b }, p0, [x0, #7, mul vl]
|
||||
; CHECK-NEXT: ret
|
||||
%base_scalable = bitcast i8* %a to <vscale x 16 x i8>*
|
||||
%base = getelementptr <vscale x 16 x i8>, <vscale x 16 x i8>* %base_scalable, i64 7
|
||||
%base_scalar = bitcast <vscale x 16 x i8>* %base to i8*
|
||||
call void @llvm.aarch64.sve.st1.nxv16i8(<vscale x 16 x i8> %data, <vscale x 16 x i1> %pg, i8* %base_scalar)
|
||||
ret void
|
||||
}
|
||||
|
||||
define void @st1b_inbound(<vscale x 16 x i8> %data, <vscale x 16 x i1> %pg, i8* %a) {
|
||||
; CHECK-LABEL: st1b_inbound:
|
||||
; CHECK: st1b { z0.b }, p0, [x0, #1, mul vl]
|
||||
; CHECK-NEXT: ret
|
||||
%base_scalable = bitcast i8* %a to <vscale x 16 x i8>*
|
||||
%base = getelementptr <vscale x 16 x i8>, <vscale x 16 x i8>* %base_scalable, i64 1
|
||||
%base_scalar = bitcast <vscale x 16 x i8>* %base to i8*
|
||||
call void @llvm.aarch64.sve.st1.nxv16i8(<vscale x 16 x i8> %data, <vscale x 16 x i1> %pg, i8* %base_scalar)
|
||||
ret void
|
||||
}
|
||||
|
||||
define void @st1b_lower_bound(<vscale x 16 x i8> %data, <vscale x 16 x i1> %pg, i8* %a) {
|
||||
; CHECK-LABEL: st1b_lower_bound:
|
||||
; CHECK: st1b { z0.b }, p0, [x0, #-8, mul vl]
|
||||
; CHECK-NEXT: ret
|
||||
%base_scalable = bitcast i8* %a to <vscale x 16 x i8>*
|
||||
%base = getelementptr <vscale x 16 x i8>, <vscale x 16 x i8>* %base_scalable, i64 -8
|
||||
%base_scalar = bitcast <vscale x 16 x i8>* %base to i8*
|
||||
call void @llvm.aarch64.sve.st1.nxv16i8(<vscale x 16 x i8> %data, <vscale x 16 x i1> %pg, i8* %base_scalar)
|
||||
ret void
|
||||
}
|
||||
|
||||
define void @st1b_out_of_upper_bound(<vscale x 16 x i8> %data, <vscale x 16 x i1> %pg, i8* %a) {
|
||||
; CHECK-LABEL: st1b_out_of_upper_bound:
|
||||
; CHECK: rdvl x[[OFFSET:[0-9]+]], #8
|
||||
; CHECK: st1b { z0.b }, p0, [x0, x[[OFFSET]]]
|
||||
; CHECK-NEXT: ret
|
||||
%base_scalable = bitcast i8* %a to <vscale x 16 x i8>*
|
||||
%base = getelementptr <vscale x 16 x i8>, <vscale x 16 x i8>* %base_scalable, i64 8
|
||||
%base_scalar = bitcast <vscale x 16 x i8>* %base to i8*
|
||||
call void @llvm.aarch64.sve.st1.nxv16i8(<vscale x 16 x i8> %data, <vscale x 16 x i1> %pg, i8* %base_scalar)
|
||||
ret void
|
||||
}
|
||||
|
||||
define void @st1b_out_of_lower_bound(<vscale x 16 x i8> %data, <vscale x 16 x i1> %pg, i8* %a) {
|
||||
; CHECK-LABEL: st1b_out_of_lower_bound:
|
||||
; CHECK: rdvl x[[OFFSET:[0-9]+]], #-9
|
||||
; CHECK: st1b { z0.b }, p0, [x0, x[[OFFSET]]]
|
||||
; CHECK-NEXT: ret
|
||||
%base_scalable = bitcast i8* %a to <vscale x 16 x i8>*
|
||||
%base = getelementptr <vscale x 16 x i8>, <vscale x 16 x i8>* %base_scalable, i64 -9
|
||||
%base_scalar = bitcast <vscale x 16 x i8>* %base to i8*
|
||||
call void @llvm.aarch64.sve.st1.nxv16i8(<vscale x 16 x i8> %data, <vscale x 16 x i1> %pg, i8* %base_scalar)
|
||||
ret void
|
||||
}
|
||||
|
||||
define void @st1b_s_inbound(<vscale x 4 x i32> %data, <vscale x 4 x i1> %pg, i8* %a) {
|
||||
; CHECK-LABEL: st1b_s_inbound:
|
||||
; CHECK: st1b { z0.s }, p0, [x0, #7, mul vl]
|
||||
; CHECK-NEXT: ret
|
||||
%base_scalable = bitcast i8* %a to <vscale x 4 x i8>*
|
||||
%base = getelementptr <vscale x 4 x i8>, <vscale x 4 x i8>* %base_scalable, i64 7
|
||||
%base_scalar = bitcast <vscale x 4 x i8>* %base to i8*
|
||||
%trunc = trunc <vscale x 4 x i32> %data to <vscale x 4 x i8>
|
||||
call void @llvm.aarch64.sve.st1.nxv4i8(<vscale x 4 x i8> %trunc, <vscale x 4 x i1> %pg, i8* %base_scalar)
|
||||
ret void
|
||||
}
|
||||
|
||||
define void @st1b_h_inbound(<vscale x 8 x i16> %data, <vscale x 8 x i1> %pg, i8* %a) {
|
||||
; CHECK-LABEL: st1b_h_inbound:
|
||||
; CHECK: st1b { z0.h }, p0, [x0, #1, mul vl]
|
||||
; CHECK-NEXT: ret
|
||||
%base_scalable = bitcast i8* %a to <vscale x 8 x i8>*
|
||||
%base = getelementptr <vscale x 8 x i8>, <vscale x 8 x i8>* %base_scalable, i64 1
|
||||
%base_scalar = bitcast <vscale x 8 x i8>* %base to i8*
|
||||
%trunc = trunc <vscale x 8 x i16> %data to <vscale x 8 x i8>
|
||||
call void @llvm.aarch64.sve.st1.nxv8i8(<vscale x 8 x i8> %trunc, <vscale x 8 x i1> %pg, i8* %base_scalar)
|
||||
ret void
|
||||
}
|
||||
|
||||
define void @st1b_d_inbound(<vscale x 2 x i64> %data, <vscale x 2 x i1> %pg, i8* %a) {
|
||||
; CHECK-LABEL: st1b_d_inbound:
|
||||
; CHECK: st1b { z0.d }, p0, [x0, #-7, mul vl]
|
||||
; CHECK-NEXT: ret
|
||||
%base_scalable = bitcast i8* %a to <vscale x 2 x i8>*
|
||||
%base = getelementptr <vscale x 2 x i8>, <vscale x 2 x i8>* %base_scalable, i64 -7
|
||||
%base_scalar = bitcast <vscale x 2 x i8>* %base to i8*
|
||||
%trunc = trunc <vscale x 2 x i64> %data to <vscale x 2 x i8>
|
||||
call void @llvm.aarch64.sve.st1.nxv2i8(<vscale x 2 x i8> %trunc, <vscale x 2 x i1> %pg, i8* %base_scalar)
|
||||
ret void
|
||||
}
|
||||
|
||||
;
|
||||
; ST1H
|
||||
;
|
||||
|
||||
define void @st1h_inbound(<vscale x 8 x i16> %data, <vscale x 8 x i1> %pg, i16* %a) {
|
||||
; CHECK-LABEL: st1h_inbound:
|
||||
; CHECK: st1h { z0.h }, p0, [x0, #-1, mul vl]
|
||||
; CHECK-NEXT: ret
|
||||
%base_scalable = bitcast i16* %a to <vscale x 8 x i16>*
|
||||
%base = getelementptr <vscale x 8 x i16>, <vscale x 8 x i16>* %base_scalable, i64 -1
|
||||
%base_scalar = bitcast <vscale x 8 x i16>* %base to i16*
|
||||
call void @llvm.aarch64.sve.st1.nxv8i16(<vscale x 8 x i16> %data, <vscale x 8 x i1> %pg, i16* %base_scalar)
|
||||
ret void
|
||||
}
|
||||
|
||||
define void @st1h_f16_inbound(<vscale x 8 x half> %data, <vscale x 8 x i1> %pg, half* %a) {
|
||||
; CHECK-LABEL: st1h_f16_inbound:
|
||||
; CHECK: st1h { z0.h }, p0, [x0, #-5, mul vl]
|
||||
; CHECK-NEXT: ret
|
||||
%base_scalable = bitcast half* %a to <vscale x 8 x half>*
|
||||
%base = getelementptr <vscale x 8 x half>, <vscale x 8 x half>* %base_scalable, i64 -5
|
||||
%base_scalar = bitcast <vscale x 8 x half>* %base to half*
|
||||
call void @llvm.aarch64.sve.st1.nxv8f16(<vscale x 8 x half> %data, <vscale x 8 x i1> %pg, half* %base_scalar)
|
||||
ret void
|
||||
}
|
||||
|
||||
define void @st1h_s_inbound(<vscale x 4 x i32> %data, <vscale x 4 x i1> %pg, i16* %a) {
|
||||
; CHECK-LABEL: st1h_s_inbound:
|
||||
; CHECK: st1h { z0.s }, p0, [x0, #2, mul vl]
|
||||
; CHECK-NEXT: ret
|
||||
%base_scalable = bitcast i16* %a to <vscale x 4 x i16>*
|
||||
%base = getelementptr <vscale x 4 x i16>, <vscale x 4 x i16>* %base_scalable, i64 2
|
||||
%base_scalar = bitcast <vscale x 4 x i16>* %base to i16*
|
||||
%trunc = trunc <vscale x 4 x i32> %data to <vscale x 4 x i16>
|
||||
call void @llvm.aarch64.sve.st1.nxv4i16(<vscale x 4 x i16> %trunc, <vscale x 4 x i1> %pg, i16* %base_scalar)
|
||||
ret void
|
||||
}
|
||||
|
||||
define void @st1h_d_inbound(<vscale x 2 x i64> %data, <vscale x 2 x i1> %pg, i16* %a) {
|
||||
; CHECK-LABEL: st1h_d_inbound:
|
||||
; CHECK: st1h { z0.d }, p0, [x0, #-4, mul vl]
|
||||
; CHECK-NEXT: ret
|
||||
%base_scalable = bitcast i16* %a to <vscale x 2 x i16>*
|
||||
%base = getelementptr <vscale x 2 x i16>, <vscale x 2 x i16>* %base_scalable, i64 -4
|
||||
%base_scalar = bitcast <vscale x 2 x i16>* %base to i16*
|
||||
%trunc = trunc <vscale x 2 x i64> %data to <vscale x 2 x i16>
|
||||
call void @llvm.aarch64.sve.st1.nxv2i16(<vscale x 2 x i16> %trunc, <vscale x 2 x i1> %pg, i16* %base_scalar)
|
||||
ret void
|
||||
}
|
||||
|
||||
;
|
||||
; ST1W
|
||||
;
|
||||
|
||||
define void @st1w_inbound(<vscale x 4 x i32> %data, <vscale x 4 x i1> %pg, i32* %a) {
|
||||
; CHECK-LABEL: st1w_inbound:
|
||||
; CHECK: st1w { z0.s }, p0, [x0, #6, mul vl]
|
||||
; CHECK-NEXT: ret
|
||||
%base_scalable = bitcast i32* %a to <vscale x 4 x i32>*
|
||||
%base = getelementptr <vscale x 4 x i32>, <vscale x 4 x i32>* %base_scalable, i64 6
|
||||
%base_scalar = bitcast <vscale x 4 x i32>* %base to i32*
|
||||
call void @llvm.aarch64.sve.st1.nxv4i32(<vscale x 4 x i32> %data, <vscale x 4 x i1> %pg, i32* %base_scalar)
|
||||
ret void
|
||||
}
|
||||
|
||||
define void @st1w_f32_inbound(<vscale x 4 x float> %data, <vscale x 4 x i1> %pg, float* %a) {
|
||||
; CHECK-LABEL: st1w_f32_inbound:
|
||||
; CHECK: st1w { z0.s }, p0, [x0, #-1, mul vl]
|
||||
; CHECK-NEXT: ret
|
||||
%base_scalable = bitcast float* %a to <vscale x 4 x float>*
|
||||
%base = getelementptr <vscale x 4 x float>, <vscale x 4 x float>* %base_scalable, i64 -1
|
||||
%base_scalar = bitcast <vscale x 4 x float>* %base to float*
|
||||
call void @llvm.aarch64.sve.st1.nxv4f32(<vscale x 4 x float> %data, <vscale x 4 x i1> %pg, float* %base_scalar)
|
||||
ret void
|
||||
}
|
||||
|
||||
define void @st1w_d_inbound(<vscale x 2 x i64> %data, <vscale x 2 x i1> %pg, i32* %a) {
|
||||
; CHECK-LABEL: st1w_d_inbound:
|
||||
; CHECK: st1w { z0.d }, p0, [x0, #1, mul vl]
|
||||
; CHECK-NEXT: ret
|
||||
%base_scalable = bitcast i32* %a to <vscale x 2 x i32>*
|
||||
%base = getelementptr <vscale x 2 x i32>, <vscale x 2 x i32>* %base_scalable, i64 1
|
||||
%base_scalar = bitcast <vscale x 2 x i32>* %base to i32*
|
||||
%trunc = trunc <vscale x 2 x i64> %data to <vscale x 2 x i32>
|
||||
call void @llvm.aarch64.sve.st1.nxv2i32(<vscale x 2 x i32> %trunc, <vscale x 2 x i1> %pg, i32* %base_scalar)
|
||||
ret void
|
||||
}
|
||||
|
||||
;
|
||||
; ST1D
|
||||
;
|
||||
|
||||
define void @st1d_inbound(<vscale x 2 x i64> %data, <vscale x 2 x i1> %pg, i64* %a) {
|
||||
; CHECK-LABEL: st1d_inbound:
|
||||
; CHECK: st1d { z0.d }, p0, [x0, #5, mul vl]
|
||||
; CHECK-NEXT: ret
|
||||
%base_scalable = bitcast i64* %a to <vscale x 2 x i64>*
|
||||
%base = getelementptr <vscale x 2 x i64>, <vscale x 2 x i64>* %base_scalable, i64 5
|
||||
%base_scalar = bitcast <vscale x 2 x i64>* %base to i64*
|
||||
call void @llvm.aarch64.sve.st1.nxv2i64(<vscale x 2 x i64> %data, <vscale x 2 x i1> %pg, i64* %base_scalar)
|
||||
ret void
|
||||
}
|
||||
|
||||
define void @st1d_f64_inbound(<vscale x 2 x double> %data, <vscale x 2 x i1> %pg, double* %a) {
|
||||
; CHECK-LABEL: st1d_f64_inbound:
|
||||
; CHECK: st1d { z0.d }, p0, [x0, #-8, mul vl]
|
||||
; CHECK-NEXT: ret
|
||||
%base_scalable = bitcast double* %a to <vscale x 2 x double>*
|
||||
%base = getelementptr <vscale x 2 x double>, <vscale x 2 x double>* %base_scalable, i64 -8
|
||||
%base_scalar = bitcast <vscale x 2 x double>* %base to double*
|
||||
call void @llvm.aarch64.sve.st1.nxv2f64(<vscale x 2 x double> %data, <vscale x 2 x i1> %pg, double* %base_scalar)
|
||||
ret void
|
||||
}
|
||||
|
||||
declare void @llvm.aarch64.sve.st1.nxv16i8(<vscale x 16 x i8>, <vscale x 16 x i1>, i8*)
|
||||
|
||||
declare void @llvm.aarch64.sve.st1.nxv8i8(<vscale x 8 x i8>, <vscale x 8 x i1>, i8*)
|
||||
declare void @llvm.aarch64.sve.st1.nxv8i16(<vscale x 8 x i16>, <vscale x 8 x i1>, i16*)
|
||||
declare void @llvm.aarch64.sve.st1.nxv8f16(<vscale x 8 x half>, <vscale x 8 x i1>, half*)
|
||||
|
||||
declare void @llvm.aarch64.sve.st1.nxv4i8(<vscale x 4 x i8>, <vscale x 4 x i1>, i8*)
|
||||
declare void @llvm.aarch64.sve.st1.nxv4i16(<vscale x 4 x i16>, <vscale x 4 x i1>, i16*)
|
||||
declare void @llvm.aarch64.sve.st1.nxv4i32(<vscale x 4 x i32>, <vscale x 4 x i1>, i32*)
|
||||
declare void @llvm.aarch64.sve.st1.nxv4f32(<vscale x 4 x float>, <vscale x 4 x i1>, float*)
|
||||
|
||||
declare void @llvm.aarch64.sve.st1.nxv2i8(<vscale x 2 x i8>, <vscale x 2 x i1>, i8*)
|
||||
declare void @llvm.aarch64.sve.st1.nxv2i16(<vscale x 2 x i16>, <vscale x 2 x i1>, i16*)
|
||||
declare void @llvm.aarch64.sve.st1.nxv2i32(<vscale x 2 x i32>, <vscale x 2 x i1>, i32*)
|
||||
declare void @llvm.aarch64.sve.st1.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i1>, i64*)
|
||||
declare void @llvm.aarch64.sve.st1.nxv2f64(<vscale x 2 x double>, <vscale x 2 x i1>, double*)
|
|
@ -0,0 +1,184 @@
|
|||
; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve < %s | FileCheck %s
|
||||
|
||||
;
|
||||
; ST1B
|
||||
;
|
||||
|
||||
define void @st1b_i8(<vscale x 16 x i8> %data, <vscale x 16 x i1> %pred, i8* %a, i64 %index) {
|
||||
; CHECK-LABEL: st1b_i8:
|
||||
; CHECK: st1b { z0.b }, p0, [x0, x1]
|
||||
; CHECK-NEXT: ret
|
||||
%base = getelementptr i8, i8* %a, i64 %index
|
||||
call void @llvm.aarch64.sve.st1.nxv16i8(<vscale x 16 x i8> %data,
|
||||
<vscale x 16 x i1> %pred,
|
||||
i8* %base)
|
||||
ret void
|
||||
}
|
||||
|
||||
|
||||
|
||||
define void @st1b_h(<vscale x 8 x i16> %data, <vscale x 8 x i1> %pred, i8* %a, i64 %index) {
|
||||
; CHECK-LABEL: st1b_h:
|
||||
; CHECK: st1b { z0.h }, p0, [x0, x1]
|
||||
; CHECK-NEXT: ret
|
||||
%base = getelementptr i8, i8* %a, i64 %index
|
||||
%trunc = trunc <vscale x 8 x i16> %data to <vscale x 8 x i8>
|
||||
call void @llvm.aarch64.sve.st1.nxv8i8(<vscale x 8 x i8> %trunc,
|
||||
<vscale x 8 x i1> %pred,
|
||||
i8* %base)
|
||||
ret void
|
||||
}
|
||||
|
||||
define void @st1b_s(<vscale x 4 x i32> %data, <vscale x 4 x i1> %pred, i8* %a, i64 %index) {
|
||||
; CHECK-LABEL: st1b_s:
|
||||
; CHECK: st1b { z0.s }, p0, [x0, x1]
|
||||
; CHECK-NEXT: ret
|
||||
%base = getelementptr i8, i8* %a, i64 %index
|
||||
%trunc = trunc <vscale x 4 x i32> %data to <vscale x 4 x i8>
|
||||
call void @llvm.aarch64.sve.st1.nxv4i8(<vscale x 4 x i8> %trunc,
|
||||
<vscale x 4 x i1> %pred,
|
||||
i8* %base)
|
||||
ret void
|
||||
}
|
||||
|
||||
define void @st1b_d(<vscale x 2 x i64> %data, <vscale x 2 x i1> %pred, i8* %a, i64 %index) {
|
||||
; CHECK-LABEL: st1b_d:
|
||||
; CHECK: st1b { z0.d }, p0, [x0, x1]
|
||||
; CHECK-NEXT: ret
|
||||
%base = getelementptr i8, i8* %a, i64 %index
|
||||
%trunc = trunc <vscale x 2 x i64> %data to <vscale x 2 x i8>
|
||||
call void @llvm.aarch64.sve.st1.nxv2i8(<vscale x 2 x i8> %trunc,
|
||||
<vscale x 2 x i1> %pred,
|
||||
i8* %base)
|
||||
ret void
|
||||
}
|
||||
|
||||
;
|
||||
; ST1H
|
||||
;
|
||||
|
||||
define void @st1h_i16(<vscale x 8 x i16> %data, <vscale x 8 x i1> %pred, i16* %a, i64 %index) {
|
||||
; CHECK-LABEL: st1h_i16:
|
||||
; CHECK: st1h { z0.h }, p0, [x0, x1, lsl #1]
|
||||
; CHECK-NEXT: ret
|
||||
%base = getelementptr i16, i16* %a, i64 %index
|
||||
call void @llvm.aarch64.sve.st1.nxv8i16(<vscale x 8 x i16> %data,
|
||||
<vscale x 8 x i1> %pred,
|
||||
i16* %base)
|
||||
ret void
|
||||
}
|
||||
|
||||
define void @st1h_f16(<vscale x 8 x half> %data, <vscale x 8 x i1> %pred, half* %a, i64 %index) {
|
||||
; CHECK-LABEL: st1h_f16:
|
||||
; CHECK: st1h { z0.h }, p0, [x0, x1, lsl #1]
|
||||
; CHECK-NEXT: ret
|
||||
%base = getelementptr half, half* %a, i64 %index
|
||||
call void @llvm.aarch64.sve.st1.nxv8f16(<vscale x 8 x half> %data,
|
||||
<vscale x 8 x i1> %pred,
|
||||
half* %base)
|
||||
ret void
|
||||
}
|
||||
|
||||
define void @st1h_s(<vscale x 4 x i32> %data, <vscale x 4 x i1> %pred, i16* %addr) {
|
||||
; CHECK-LABEL: st1h_s:
|
||||
; CHECK: st1h { z0.s }, p0, [x0]
|
||||
; CHECK-NEXT: ret
|
||||
%trunc = trunc <vscale x 4 x i32> %data to <vscale x 4 x i16>
|
||||
call void @llvm.aarch64.sve.st1.nxv4i16(<vscale x 4 x i16> %trunc,
|
||||
<vscale x 4 x i1> %pred,
|
||||
i16* %addr)
|
||||
ret void
|
||||
}
|
||||
|
||||
define void @st1h_d(<vscale x 2 x i64> %data, <vscale x 2 x i1> %pred, i16* %a, i64 %index) {
|
||||
; CHECK-LABEL: st1h_d:
|
||||
; CHECK: st1h { z0.d }, p0, [x0, x1, lsl #1]
|
||||
; CHECK-NEXT: ret
|
||||
%base = getelementptr i16, i16* %a, i64 %index
|
||||
%trunc = trunc <vscale x 2 x i64> %data to <vscale x 2 x i16>
|
||||
call void @llvm.aarch64.sve.st1.nxv2i16(<vscale x 2 x i16> %trunc,
|
||||
<vscale x 2 x i1> %pred,
|
||||
i16* %base)
|
||||
ret void
|
||||
}
|
||||
|
||||
;
|
||||
; ST1W
|
||||
;
|
||||
|
||||
define void @st1w_i32(<vscale x 4 x i32> %data, <vscale x 4 x i1> %pred, i32* %a, i64 %index) {
|
||||
; CHECK-LABEL: st1w_i32:
|
||||
; CHECK: st1w { z0.s }, p0, [x0, x1, lsl #2]
|
||||
; CHECK-NEXT: ret
|
||||
%base = getelementptr i32, i32* %a, i64 %index
|
||||
call void @llvm.aarch64.sve.st1.nxv4i32(<vscale x 4 x i32> %data,
|
||||
<vscale x 4 x i1> %pred,
|
||||
i32* %base)
|
||||
ret void
|
||||
}
|
||||
|
||||
define void @st1w_f32(<vscale x 4 x float> %data, <vscale x 4 x i1> %pred, float* %a, i64 %index) {
|
||||
; CHECK-LABEL: st1w_f32:
|
||||
; CHECK: st1w { z0.s }, p0, [x0, x1, lsl #2]
|
||||
; CHECK-NEXT: ret
|
||||
%base = getelementptr float, float* %a, i64 %index
|
||||
call void @llvm.aarch64.sve.st1.nxv4f32(<vscale x 4 x float> %data,
|
||||
<vscale x 4 x i1> %pred,
|
||||
float* %base)
|
||||
ret void
|
||||
}
|
||||
|
||||
define void @st1w_d(<vscale x 2 x i64> %data, <vscale x 2 x i1> %pred, i32* %a, i64 %index) {
|
||||
; CHECK-LABEL: st1w_d:
|
||||
; CHECK: st1w { z0.d }, p0, [x0, x1, lsl #2]
|
||||
; CHECK-NEXT: ret
|
||||
%base = getelementptr i32, i32* %a, i64 %index
|
||||
%trunc = trunc <vscale x 2 x i64> %data to <vscale x 2 x i32>
|
||||
call void @llvm.aarch64.sve.st1.nxv2i32(<vscale x 2 x i32> %trunc,
|
||||
<vscale x 2 x i1> %pred,
|
||||
i32* %base)
|
||||
ret void
|
||||
}
|
||||
|
||||
;
|
||||
; ST1D
|
||||
;
|
||||
|
||||
define void @st1d_i64(<vscale x 2 x i64> %data, <vscale x 2 x i1> %pred, i64* %a, i64 %index) {
|
||||
; CHECK-LABEL: st1d_i64:
|
||||
; CHECK: st1d { z0.d }, p0, [x0, x1, lsl #3]
|
||||
; CHECK-NEXT: ret
|
||||
%base = getelementptr i64, i64* %a, i64 %index
|
||||
call void @llvm.aarch64.sve.st1.nxv2i64(<vscale x 2 x i64> %data,
|
||||
<vscale x 2 x i1> %pred,
|
||||
i64* %base)
|
||||
ret void
|
||||
}
|
||||
|
||||
define void @st1d_f64(<vscale x 2 x double> %data, <vscale x 2 x i1> %pred, double* %a, i64 %index) {
|
||||
; CHECK-LABEL: st1d_f64:
|
||||
; CHECK: st1d { z0.d }, p0, [x0, x1, lsl #3]
|
||||
; CHECK-NEXT: ret
|
||||
%base = getelementptr double, double* %a, i64 %index
|
||||
call void @llvm.aarch64.sve.st1.nxv2f64(<vscale x 2 x double> %data,
|
||||
<vscale x 2 x i1> %pred,
|
||||
double* %base)
|
||||
ret void
|
||||
}
|
||||
|
||||
declare void @llvm.aarch64.sve.st1.nxv16i8(<vscale x 16 x i8>, <vscale x 16 x i1>, i8*)
|
||||
|
||||
declare void @llvm.aarch64.sve.st1.nxv8i8(<vscale x 8 x i8>, <vscale x 8 x i1>, i8*)
|
||||
declare void @llvm.aarch64.sve.st1.nxv8i16(<vscale x 8 x i16>, <vscale x 8 x i1>, i16*)
|
||||
declare void @llvm.aarch64.sve.st1.nxv8f16(<vscale x 8 x half>, <vscale x 8 x i1>, half*)
|
||||
|
||||
declare void @llvm.aarch64.sve.st1.nxv4i8(<vscale x 4 x i8>, <vscale x 4 x i1>, i8*)
|
||||
declare void @llvm.aarch64.sve.st1.nxv4i16(<vscale x 4 x i16>, <vscale x 4 x i1>, i16*)
|
||||
declare void @llvm.aarch64.sve.st1.nxv4i32(<vscale x 4 x i32>, <vscale x 4 x i1>, i32*)
|
||||
declare void @llvm.aarch64.sve.st1.nxv4f32(<vscale x 4 x float>, <vscale x 4 x i1>, float*)
|
||||
|
||||
declare void @llvm.aarch64.sve.st1.nxv2i8(<vscale x 2 x i8>, <vscale x 2 x i1>, i8*)
|
||||
declare void @llvm.aarch64.sve.st1.nxv2i16(<vscale x 2 x i16>, <vscale x 2 x i1>, i16*)
|
||||
declare void @llvm.aarch64.sve.st1.nxv2i32(<vscale x 2 x i32>, <vscale x 2 x i1>, i32*)
|
||||
declare void @llvm.aarch64.sve.st1.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i1>, i64*)
|
||||
declare void @llvm.aarch64.sve.st1.nxv2f64(<vscale x 2 x double>, <vscale x 2 x i1>, double*)
|
|
@ -47,101 +47,6 @@ define void @st1b_d(<vscale x 2 x i64> %data, <vscale x 2 x i1> %pred, i8* %addr
|
|||
ret void
|
||||
}
|
||||
|
||||
define void @st1b_upper_bound(<vscale x 16 x i8> %data, <vscale x 16 x i1> %pg, i8* %a) {
|
||||
; CHECK-LABEL: st1b_upper_bound:
|
||||
; CHECK: st1b { z0.b }, p0, [x0, #7, mul vl]
|
||||
; CHECK-NEXT: ret
|
||||
%base_scalable = bitcast i8* %a to <vscale x 16 x i8>*
|
||||
%base = getelementptr <vscale x 16 x i8>, <vscale x 16 x i8>* %base_scalable, i64 7
|
||||
%base_scalar = bitcast <vscale x 16 x i8>* %base to i8*
|
||||
call void @llvm.aarch64.sve.st1.nxv16i8(<vscale x 16 x i8> %data, <vscale x 16 x i1> %pg, i8* %base_scalar)
|
||||
ret void
|
||||
}
|
||||
|
||||
define void @st1b_inbound(<vscale x 16 x i8> %data, <vscale x 16 x i1> %pg, i8* %a) {
|
||||
; CHECK-LABEL: st1b_inbound:
|
||||
; CHECK: st1b { z0.b }, p0, [x0, #1, mul vl]
|
||||
; CHECK-NEXT: ret
|
||||
%base_scalable = bitcast i8* %a to <vscale x 16 x i8>*
|
||||
%base = getelementptr <vscale x 16 x i8>, <vscale x 16 x i8>* %base_scalable, i64 1
|
||||
%base_scalar = bitcast <vscale x 16 x i8>* %base to i8*
|
||||
call void @llvm.aarch64.sve.st1.nxv16i8(<vscale x 16 x i8> %data, <vscale x 16 x i1> %pg, i8* %base_scalar)
|
||||
ret void
|
||||
}
|
||||
|
||||
define void @st1b_lower_bound(<vscale x 16 x i8> %data, <vscale x 16 x i1> %pg, i8* %a) {
|
||||
; CHECK-LABEL: st1b_lower_bound:
|
||||
; CHECK: st1b { z0.b }, p0, [x0, #-8, mul vl]
|
||||
; CHECK-NEXT: ret
|
||||
%base_scalable = bitcast i8* %a to <vscale x 16 x i8>*
|
||||
%base = getelementptr <vscale x 16 x i8>, <vscale x 16 x i8>* %base_scalable, i64 -8
|
||||
%base_scalar = bitcast <vscale x 16 x i8>* %base to i8*
|
||||
call void @llvm.aarch64.sve.st1.nxv16i8(<vscale x 16 x i8> %data, <vscale x 16 x i1> %pg, i8* %base_scalar)
|
||||
ret void
|
||||
}
|
||||
|
||||
define void @st1b_out_of_upper_bound(<vscale x 16 x i8> %data, <vscale x 16 x i1> %pg, i8* %a) {
|
||||
; CHECK-LABEL: st1b_out_of_upper_bound:
|
||||
; CHECK: rdvl x[[OFFSET:[0-9]+]], #8
|
||||
; CHECK: add x[[BASE:[0-9]+]], x0, x[[OFFSET]]
|
||||
; CHECK: st1b { z0.b }, p0, [x[[BASE]]]
|
||||
; CHECK-NEXT: ret
|
||||
%base_scalable = bitcast i8* %a to <vscale x 16 x i8>*
|
||||
%base = getelementptr <vscale x 16 x i8>, <vscale x 16 x i8>* %base_scalable, i64 8
|
||||
%base_scalar = bitcast <vscale x 16 x i8>* %base to i8*
|
||||
call void @llvm.aarch64.sve.st1.nxv16i8(<vscale x 16 x i8> %data, <vscale x 16 x i1> %pg, i8* %base_scalar)
|
||||
ret void
|
||||
}
|
||||
|
||||
define void @st1b_out_of_lower_bound(<vscale x 16 x i8> %data, <vscale x 16 x i1> %pg, i8* %a) {
|
||||
; CHECK-LABEL: st1b_out_of_lower_bound:
|
||||
; CHECK: rdvl x[[OFFSET:[0-9]+]], #-9
|
||||
; CHECK: add x[[BASE:[0-9]+]], x0, x[[OFFSET]]
|
||||
; CHECK: st1b { z0.b }, p0, [x[[BASE]]]
|
||||
; CHECK-NEXT: ret
|
||||
%base_scalable = bitcast i8* %a to <vscale x 16 x i8>*
|
||||
%base = getelementptr <vscale x 16 x i8>, <vscale x 16 x i8>* %base_scalable, i64 -9
|
||||
%base_scalar = bitcast <vscale x 16 x i8>* %base to i8*
|
||||
call void @llvm.aarch64.sve.st1.nxv16i8(<vscale x 16 x i8> %data, <vscale x 16 x i1> %pg, i8* %base_scalar)
|
||||
ret void
|
||||
}
|
||||
|
||||
define void @st1b_s_inbound(<vscale x 4 x i32> %data, <vscale x 4 x i1> %pg, i8* %a) {
|
||||
; CHECK-LABEL: st1b_s_inbound:
|
||||
; CHECK: st1b { z0.s }, p0, [x0, #7, mul vl]
|
||||
; CHECK-NEXT: ret
|
||||
%base_scalable = bitcast i8* %a to <vscale x 4 x i8>*
|
||||
%base = getelementptr <vscale x 4 x i8>, <vscale x 4 x i8>* %base_scalable, i64 7
|
||||
%base_scalar = bitcast <vscale x 4 x i8>* %base to i8*
|
||||
%trunc = trunc <vscale x 4 x i32> %data to <vscale x 4 x i8>
|
||||
call void @llvm.aarch64.sve.st1.nxv4i8(<vscale x 4 x i8> %trunc, <vscale x 4 x i1> %pg, i8* %base_scalar)
|
||||
ret void
|
||||
}
|
||||
|
||||
define void @st1b_h_inbound(<vscale x 8 x i16> %data, <vscale x 8 x i1> %pg, i8* %a) {
|
||||
; CHECK-LABEL: st1b_h_inbound:
|
||||
; CHECK: st1b { z0.h }, p0, [x0, #1, mul vl]
|
||||
; CHECK-NEXT: ret
|
||||
%base_scalable = bitcast i8* %a to <vscale x 8 x i8>*
|
||||
%base = getelementptr <vscale x 8 x i8>, <vscale x 8 x i8>* %base_scalable, i64 1
|
||||
%base_scalar = bitcast <vscale x 8 x i8>* %base to i8*
|
||||
%trunc = trunc <vscale x 8 x i16> %data to <vscale x 8 x i8>
|
||||
call void @llvm.aarch64.sve.st1.nxv8i8(<vscale x 8 x i8> %trunc, <vscale x 8 x i1> %pg, i8* %base_scalar)
|
||||
ret void
|
||||
}
|
||||
|
||||
define void @st1b_d_inbound(<vscale x 2 x i64> %data, <vscale x 2 x i1> %pg, i8* %a) {
|
||||
; CHECK-LABEL: st1b_d_inbound:
|
||||
; CHECK: st1b { z0.d }, p0, [x0, #-7, mul vl]
|
||||
; CHECK-NEXT: ret
|
||||
%base_scalable = bitcast i8* %a to <vscale x 2 x i8>*
|
||||
%base = getelementptr <vscale x 2 x i8>, <vscale x 2 x i8>* %base_scalable, i64 -7
|
||||
%base_scalar = bitcast <vscale x 2 x i8>* %base to i8*
|
||||
%trunc = trunc <vscale x 2 x i64> %data to <vscale x 2 x i8>
|
||||
call void @llvm.aarch64.sve.st1.nxv2i8(<vscale x 2 x i8> %trunc, <vscale x 2 x i1> %pg, i8* %base_scalar)
|
||||
ret void
|
||||
}
|
||||
|
||||
;
|
||||
; ST1H
|
||||
;
|
||||
|
@ -188,52 +93,6 @@ define void @st1h_d(<vscale x 2 x i64> %data, <vscale x 2 x i1> %pred, i16* %add
|
|||
ret void
|
||||
}
|
||||
|
||||
define void @st1h_inbound(<vscale x 8 x i16> %data, <vscale x 8 x i1> %pg, i16* %a) {
|
||||
; CHECK-LABEL: st1h_inbound:
|
||||
; CHECK: st1h { z0.h }, p0, [x0, #-1, mul vl]
|
||||
; CHECK-NEXT: ret
|
||||
%base_scalable = bitcast i16* %a to <vscale x 8 x i16>*
|
||||
%base = getelementptr <vscale x 8 x i16>, <vscale x 8 x i16>* %base_scalable, i64 -1
|
||||
%base_scalar = bitcast <vscale x 8 x i16>* %base to i16*
|
||||
call void @llvm.aarch64.sve.st1.nxv8i16(<vscale x 8 x i16> %data, <vscale x 8 x i1> %pg, i16* %base_scalar)
|
||||
ret void
|
||||
}
|
||||
|
||||
define void @st1h_f16_inbound(<vscale x 8 x half> %data, <vscale x 8 x i1> %pg, half* %a) {
|
||||
; CHECK-LABEL: st1h_f16_inbound:
|
||||
; CHECK: st1h { z0.h }, p0, [x0, #-5, mul vl]
|
||||
; CHECK-NEXT: ret
|
||||
%base_scalable = bitcast half* %a to <vscale x 8 x half>*
|
||||
%base = getelementptr <vscale x 8 x half>, <vscale x 8 x half>* %base_scalable, i64 -5
|
||||
%base_scalar = bitcast <vscale x 8 x half>* %base to half*
|
||||
call void @llvm.aarch64.sve.st1.nxv8f16(<vscale x 8 x half> %data, <vscale x 8 x i1> %pg, half* %base_scalar)
|
||||
ret void
|
||||
}
|
||||
|
||||
define void @st1h_s_inbound(<vscale x 4 x i32> %data, <vscale x 4 x i1> %pg, i16* %a) {
|
||||
; CHECK-LABEL: st1h_s_inbound:
|
||||
; CHECK: st1h { z0.s }, p0, [x0, #2, mul vl]
|
||||
; CHECK-NEXT: ret
|
||||
%base_scalable = bitcast i16* %a to <vscale x 4 x i16>*
|
||||
%base = getelementptr <vscale x 4 x i16>, <vscale x 4 x i16>* %base_scalable, i64 2
|
||||
%base_scalar = bitcast <vscale x 4 x i16>* %base to i16*
|
||||
%trunc = trunc <vscale x 4 x i32> %data to <vscale x 4 x i16>
|
||||
call void @llvm.aarch64.sve.st1.nxv4i16(<vscale x 4 x i16> %trunc, <vscale x 4 x i1> %pg, i16* %base_scalar)
|
||||
ret void
|
||||
}
|
||||
|
||||
define void @st1h_d_inbound(<vscale x 2 x i64> %data, <vscale x 2 x i1> %pg, i16* %a) {
|
||||
; CHECK-LABEL: st1h_d_inbound:
|
||||
; CHECK: st1h { z0.d }, p0, [x0, #-4, mul vl]
|
||||
; CHECK-NEXT: ret
|
||||
%base_scalable = bitcast i16* %a to <vscale x 2 x i16>*
|
||||
%base = getelementptr <vscale x 2 x i16>, <vscale x 2 x i16>* %base_scalable, i64 -4
|
||||
%base_scalar = bitcast <vscale x 2 x i16>* %base to i16*
|
||||
%trunc = trunc <vscale x 2 x i64> %data to <vscale x 2 x i16>
|
||||
call void @llvm.aarch64.sve.st1.nxv2i16(<vscale x 2 x i16> %trunc, <vscale x 2 x i1> %pg, i16* %base_scalar)
|
||||
ret void
|
||||
}
|
||||
|
||||
;
|
||||
; ST1W
|
||||
;
|
||||
|
@ -269,40 +128,6 @@ define void @st1w_d(<vscale x 2 x i64> %data, <vscale x 2 x i1> %pred, i32* %add
|
|||
ret void
|
||||
}
|
||||
|
||||
define void @st1w_inbound(<vscale x 4 x i32> %data, <vscale x 4 x i1> %pg, i32* %a) {
|
||||
; CHECK-LABEL: st1w_inbound:
|
||||
; CHECK: st1w { z0.s }, p0, [x0, #6, mul vl]
|
||||
; CHECK-NEXT: ret
|
||||
%base_scalable = bitcast i32* %a to <vscale x 4 x i32>*
|
||||
%base = getelementptr <vscale x 4 x i32>, <vscale x 4 x i32>* %base_scalable, i64 6
|
||||
%base_scalar = bitcast <vscale x 4 x i32>* %base to i32*
|
||||
call void @llvm.aarch64.sve.st1.nxv4i32(<vscale x 4 x i32> %data, <vscale x 4 x i1> %pg, i32* %base_scalar)
|
||||
ret void
|
||||
}
|
||||
|
||||
define void @st1w_f32_inbound(<vscale x 4 x float> %data, <vscale x 4 x i1> %pg, float* %a) {
|
||||
; CHECK-LABEL: st1w_f32_inbound:
|
||||
; CHECK: st1w { z0.s }, p0, [x0, #-1, mul vl]
|
||||
; CHECK-NEXT: ret
|
||||
%base_scalable = bitcast float* %a to <vscale x 4 x float>*
|
||||
%base = getelementptr <vscale x 4 x float>, <vscale x 4 x float>* %base_scalable, i64 -1
|
||||
%base_scalar = bitcast <vscale x 4 x float>* %base to float*
|
||||
call void @llvm.aarch64.sve.st1.nxv4f32(<vscale x 4 x float> %data, <vscale x 4 x i1> %pg, float* %base_scalar)
|
||||
ret void
|
||||
}
|
||||
|
||||
define void @st1w_d_inbound(<vscale x 2 x i64> %data, <vscale x 2 x i1> %pg, i32* %a) {
|
||||
; CHECK-LABEL: st1w_d_inbound:
|
||||
; CHECK: st1w { z0.d }, p0, [x0, #1, mul vl]
|
||||
; CHECK-NEXT: ret
|
||||
%base_scalable = bitcast i32* %a to <vscale x 2 x i32>*
|
||||
%base = getelementptr <vscale x 2 x i32>, <vscale x 2 x i32>* %base_scalable, i64 1
|
||||
%base_scalar = bitcast <vscale x 2 x i32>* %base to i32*
|
||||
%trunc = trunc <vscale x 2 x i64> %data to <vscale x 2 x i32>
|
||||
call void @llvm.aarch64.sve.st1.nxv2i32(<vscale x 2 x i32> %trunc, <vscale x 2 x i1> %pg, i32* %base_scalar)
|
||||
ret void
|
||||
}
|
||||
|
||||
;
|
||||
; ST1D
|
||||
;
|
||||
|
@ -327,28 +152,6 @@ define void @st1d_f64(<vscale x 2 x double> %data, <vscale x 2 x i1> %pred, doub
|
|||
ret void
|
||||
}
|
||||
|
||||
define void @st1d_inbound(<vscale x 2 x i64> %data, <vscale x 2 x i1> %pg, i64* %a) {
|
||||
; CHECK-LABEL: st1d_inbound:
|
||||
; CHECK: st1d { z0.d }, p0, [x0, #5, mul vl]
|
||||
; CHECK-NEXT: ret
|
||||
%base_scalable = bitcast i64* %a to <vscale x 2 x i64>*
|
||||
%base = getelementptr <vscale x 2 x i64>, <vscale x 2 x i64>* %base_scalable, i64 5
|
||||
%base_scalar = bitcast <vscale x 2 x i64>* %base to i64*
|
||||
call void @llvm.aarch64.sve.st1.nxv2i64(<vscale x 2 x i64> %data, <vscale x 2 x i1> %pg, i64* %base_scalar)
|
||||
ret void
|
||||
}
|
||||
|
||||
define void @st1d_f64_inbound(<vscale x 2 x double> %data, <vscale x 2 x i1> %pg, double* %a) {
|
||||
; CHECK-LABEL: st1d_f64_inbound:
|
||||
; CHECK: st1d { z0.d }, p0, [x0, #-8, mul vl]
|
||||
; CHECK-NEXT: ret
|
||||
%base_scalable = bitcast double* %a to <vscale x 2 x double>*
|
||||
%base = getelementptr <vscale x 2 x double>, <vscale x 2 x double>* %base_scalable, i64 -8
|
||||
%base_scalar = bitcast <vscale x 2 x double>* %base to double*
|
||||
call void @llvm.aarch64.sve.st1.nxv2f64(<vscale x 2 x double> %data, <vscale x 2 x i1> %pg, double* %base_scalar)
|
||||
ret void
|
||||
}
|
||||
|
||||
declare void @llvm.aarch64.sve.st1.nxv16i8(<vscale x 16 x i8>, <vscale x 16 x i1>, i8*)
|
||||
|
||||
declare void @llvm.aarch64.sve.st1.nxv8i8(<vscale x 8 x i8>, <vscale x 8 x i1>, i8*)
|
||||
|
|
Loading…
Reference in New Issue