2020-05-22 20:39:03 +08:00
|
|
|
; RUN: llc -mtriple=aarch64--linux-gnu -mattr=+sve < %s 2>%t | FileCheck %s
|
|
|
|
; RUN: FileCheck --check-prefix=WARN --allow-empty %s <%t
|
|
|
|
|
2020-07-09 17:16:32 +08:00
|
|
|
; If this check fails please read test/CodeGen/AArch64/README for instructions on how to resolve it.
|
2020-05-22 20:39:03 +08:00
|
|
|
; WARN-NOT: warning
|
[AArch64][SVE] Implement masked load intrinsics
Summary:
Adds support for codegen of masked loads, with non-extending,
zero-extending and sign-extending variants.
Reviewers: huntergr, rovka, greened, dmgreen
Reviewed By: dmgreen
Subscribers: dmgreen, samparker, tschuett, kristof.beyls, hiraditya, rkruppe, psnobl, cfe-commits, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D68877
2019-10-28 18:00:57 +08:00
|
|
|
|
|
|
|
;
|
|
|
|
; Masked Loads
|
|
|
|
;
|
|
|
|
|
|
|
|
define <vscale x 2 x i64> @masked_sload_nxv2i8(<vscale x 2 x i8> *%a, <vscale x 2 x i1> %mask) {
|
|
|
|
; CHECK-LABEL: masked_sload_nxv2i8:
|
|
|
|
; CHECK: ld1sb { [[IN:z[0-9]+]].d }, [[PG:p[0-9]+]]/z, [x0]
|
|
|
|
; CHECK-NEXT: ret
|
|
|
|
%load = call <vscale x 2 x i8> @llvm.masked.load.nxv2i8(<vscale x 2 x i8> *%a, i32 1, <vscale x 2 x i1> %mask, <vscale x 2 x i8> undef)
|
|
|
|
%ext = sext <vscale x 2 x i8> %load to <vscale x 2 x i64>
|
|
|
|
ret <vscale x 2 x i64> %ext
|
|
|
|
}
|
|
|
|
|
|
|
|
define <vscale x 2 x i64> @masked_sload_nxv2i16(<vscale x 2 x i16> *%a, <vscale x 2 x i1> %mask) {
|
|
|
|
; CHECK-LABEL: masked_sload_nxv2i16:
|
|
|
|
; CHECK: ld1sh { [[IN:z[0-9]+]].d }, [[PG:p[0-9]+]]/z, [x0]
|
|
|
|
; CHECK-NEXT: ret
|
|
|
|
%load = call <vscale x 2 x i16> @llvm.masked.load.nxv2i16(<vscale x 2 x i16> *%a, i32 1, <vscale x 2 x i1> %mask, <vscale x 2 x i16> undef)
|
|
|
|
%ext = sext <vscale x 2 x i16> %load to <vscale x 2 x i64>
|
|
|
|
ret <vscale x 2 x i64> %ext
|
|
|
|
}
|
|
|
|
|
|
|
|
define <vscale x 2 x i64> @masked_sload_nxv2i32(<vscale x 2 x i32> *%a, <vscale x 2 x i1> %mask) {
|
|
|
|
; CHECK-LABEL: masked_sload_nxv2i32:
|
|
|
|
; CHECK: ld1sw { [[IN:z[0-9]+]].d }, [[PG:p[0-9]+]]/z, [x0]
|
|
|
|
; CHECK-NEXT: ret
|
|
|
|
%load = call <vscale x 2 x i32> @llvm.masked.load.nxv2i32(<vscale x 2 x i32> *%a, i32 1, <vscale x 2 x i1> %mask, <vscale x 2 x i32> undef)
|
|
|
|
%ext = sext <vscale x 2 x i32> %load to <vscale x 2 x i64>
|
|
|
|
ret <vscale x 2 x i64> %ext
|
|
|
|
}
|
|
|
|
|
|
|
|
define <vscale x 4 x i32> @masked_sload_nxv4i8(<vscale x 4 x i8> *%a, <vscale x 4 x i1> %mask) {
|
|
|
|
; CHECK-LABEL: masked_sload_nxv4i8:
|
|
|
|
; CHECK: ld1sb { [[IN:z[0-9]+]].s }, [[PG:p[0-9]+]]/z, [x0]
|
|
|
|
; CHECK-NEXT: ret
|
|
|
|
%load = call <vscale x 4 x i8> @llvm.masked.load.nxv4i8(<vscale x 4 x i8> *%a, i32 1, <vscale x 4 x i1> %mask, <vscale x 4 x i8> undef)
|
|
|
|
%ext = sext <vscale x 4 x i8> %load to <vscale x 4 x i32>
|
|
|
|
ret <vscale x 4 x i32> %ext
|
|
|
|
}
|
|
|
|
|
|
|
|
define <vscale x 4 x i32> @masked_sload_nxv4i16(<vscale x 4 x i16> *%a, <vscale x 4 x i1> %mask) {
|
|
|
|
; CHECK-LABEL: masked_sload_nxv4i16:
|
|
|
|
; CHECK: ld1sh { [[IN:z[0-9]+]].s }, [[PG:p[0-9]+]]/z, [x0]
|
|
|
|
; CHECK-NEXT: ret
|
|
|
|
%load = call <vscale x 4 x i16> @llvm.masked.load.nxv4i16(<vscale x 4 x i16> *%a, i32 1, <vscale x 4 x i1> %mask, <vscale x 4 x i16> undef)
|
|
|
|
%ext = sext <vscale x 4 x i16> %load to <vscale x 4 x i32>
|
|
|
|
ret <vscale x 4 x i32> %ext
|
|
|
|
}
|
|
|
|
|
|
|
|
define <vscale x 8 x i16> @masked_sload_nxv8i8(<vscale x 8 x i8> *%a, <vscale x 8 x i1> %mask) {
|
|
|
|
; CHECK-LABEL: masked_sload_nxv8i8:
|
|
|
|
; CHECK: ld1sb { [[IN:z[0-9]+]].h }, [[PG:p[0-9]+]]/z, [x0]
|
|
|
|
; CHECK-NEXT: ret
|
|
|
|
%load = call <vscale x 8 x i8> @llvm.masked.load.nxv8i8(<vscale x 8 x i8> *%a, i32 1, <vscale x 8 x i1> %mask, <vscale x 8 x i8> undef)
|
|
|
|
%ext = sext <vscale x 8 x i8> %load to <vscale x 8 x i16>
|
|
|
|
ret <vscale x 8 x i16> %ext
|
|
|
|
}
|
|
|
|
|
|
|
|
declare <vscale x 2 x i8> @llvm.masked.load.nxv2i8(<vscale x 2 x i8>*, i32, <vscale x 2 x i1>, <vscale x 2 x i8>)
|
|
|
|
declare <vscale x 2 x i16> @llvm.masked.load.nxv2i16(<vscale x 2 x i16>*, i32, <vscale x 2 x i1>, <vscale x 2 x i16>)
|
|
|
|
declare <vscale x 2 x i32> @llvm.masked.load.nxv2i32(<vscale x 2 x i32>*, i32, <vscale x 2 x i1>, <vscale x 2 x i32>)
|
|
|
|
declare <vscale x 4 x i8> @llvm.masked.load.nxv4i8(<vscale x 4 x i8>*, i32, <vscale x 4 x i1>, <vscale x 4 x i8>)
|
|
|
|
declare <vscale x 4 x i16> @llvm.masked.load.nxv4i16(<vscale x 4 x i16>*, i32, <vscale x 4 x i1>, <vscale x 4 x i16>)
|
|
|
|
declare <vscale x 8 x i8> @llvm.masked.load.nxv8i8(<vscale x 8 x i8>*, i32, <vscale x 8 x i1>, <vscale x 8 x i8>)
|