Implement IR intrinsics for gather prefetch.
Summary:
Intrinsics and relative codegen has been implemented for the following
SVE instructions:
1. PRF<T> <prfop>, <Pg>, [<Xn|SP>, <Zm>.S, <mod>] -> 32-bit scaled offset
2. PRF<T> <prfop>, <Pg>, [<Xn|SP>, <Zm>.D, <mod>] -> 32-bit unpacked scaled offset
3. PRF<T> <prfop>, <Pg>, [<Xn|SP>, <Zm>.D] -> 64-bit scaled offset
4. PRF<T> <prfop>, <Pg>, [<Zn>.S{, #<imm>}] -> 32-bit element
5. PRF<T> <prfop>, <Pg>, [<Zn>.D{, #<imm>}] -> 64-bit element
The instructions are associated the following intrinsics, respectively:
1. void @llvm.aarch64.sve.gather.prf<T>.scaled.<mod>.nx4vi32(
i8* %base,
<vscale x 4 x i32> %offset,
<vscale x 4 x i1> %Pg,
i32 %prfop)
2. void @llvm.aarch64.sve.gather.prf<T>.scaled.<mod>.nx2vi32(
i8* %base,
<vscale x 2 x i32> %offset,
<vscale x 2 x i1> %Pg,
i32 %prfop)
3. void @llvm.aarch64.sve.gather.prf<T>.scaled.nx2vi64(
i8* %base,
<vscale x 2 x i64> %offset,
<vscale x 2 x i1> %Pg,
i32 %prfop)
4. void @llvm.aarch64.sve.gather.prf<T>.nx4vi32(
<vscale x 4 x i32> %bases,
i64 %imm,
<vscale x 4 x i1> %Pg,
i32 %prfop)
5. void @llvm.aarch64.sve.gather.prf<T>.nx2vi64(
<vscale x 2 x i64> %bases,
i64 %imm,
<vscale x 2 x i1> %Pg,
i32 %prfop)
The intrinsics are the IR counterpart of the following SVE ACLE functions:
* void svprf<T>(svbool_t pg, const void *base, svprfop op)
* void svprf<T>_vnum(svbool_t pg, const void *base, int64_t vnum, svprfop op)
* void svprf<T>_gather[_u32base](svbool_t pg, svuint32_t bases, svprfop op)
* void svprf<T>_gather[_u64base](svbool_t pg, svuint64_t bases, svprfop op)
* void svprf<T>_gather_[s32]offset(svbool_t pg, const void *base, svint32_t offsets, svprfop op)
* void svprf<T>_gather_[u32]offset(svbool_t pg, const void *base, svint32_t offsets, svprfop op)
* void svprf<T>_gather_[s64]offset(svbool_t pg, const void *base, svint64_t offsets, svprfop op)
* void svprf<T>_gather_[u64]offset(svbool_t pg, const void *base, svint64_t offsets, svprfop op)
* void svprf<T>_gather[_u32base]_offset(svbool_t pg, svuint32_t bases, int64_t offset, svprfop op)
* void svprf<T>_gather[_u64base]_offset(svbool_t pg, svuint64_t bases,int64_t offset, svprfop op)
Reviewers: andwar, sdesmalen, efriedma, rengolin
Subscribers: tschuett, hiraditya, rkruppe, psnobl, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D75580
2020-02-27 06:19:42 +08:00
|
|
|
; RUN: llc -mtriple=aarch64--linux-gnu -mattr=+sve --asm-verbose=false < %s | FileCheck %s
|
|
|
|
|
|
|
|
; PRFB <prfop>, <Pg>, [<Xn|SP>, <Zm>.S, <mod>] -> 32-bit scaled offset
|
2020-03-19 17:35:31 +08:00
|
|
|
define void @llvm_aarch64_sve_prfb_gather_scaled_uxtw_nx4vi32(<vscale x 4 x i1> %Pg, i8* %base, <vscale x 4 x i32> %offset) nounwind {
|
|
|
|
; CHECK-LABEL: llvm_aarch64_sve_prfb_gather_scaled_uxtw_nx4vi32:
|
Implement IR intrinsics for gather prefetch.
Summary:
Intrinsics and relative codegen has been implemented for the following
SVE instructions:
1. PRF<T> <prfop>, <Pg>, [<Xn|SP>, <Zm>.S, <mod>] -> 32-bit scaled offset
2. PRF<T> <prfop>, <Pg>, [<Xn|SP>, <Zm>.D, <mod>] -> 32-bit unpacked scaled offset
3. PRF<T> <prfop>, <Pg>, [<Xn|SP>, <Zm>.D] -> 64-bit scaled offset
4. PRF<T> <prfop>, <Pg>, [<Zn>.S{, #<imm>}] -> 32-bit element
5. PRF<T> <prfop>, <Pg>, [<Zn>.D{, #<imm>}] -> 64-bit element
The instructions are associated the following intrinsics, respectively:
1. void @llvm.aarch64.sve.gather.prf<T>.scaled.<mod>.nx4vi32(
i8* %base,
<vscale x 4 x i32> %offset,
<vscale x 4 x i1> %Pg,
i32 %prfop)
2. void @llvm.aarch64.sve.gather.prf<T>.scaled.<mod>.nx2vi32(
i8* %base,
<vscale x 2 x i32> %offset,
<vscale x 2 x i1> %Pg,
i32 %prfop)
3. void @llvm.aarch64.sve.gather.prf<T>.scaled.nx2vi64(
i8* %base,
<vscale x 2 x i64> %offset,
<vscale x 2 x i1> %Pg,
i32 %prfop)
4. void @llvm.aarch64.sve.gather.prf<T>.nx4vi32(
<vscale x 4 x i32> %bases,
i64 %imm,
<vscale x 4 x i1> %Pg,
i32 %prfop)
5. void @llvm.aarch64.sve.gather.prf<T>.nx2vi64(
<vscale x 2 x i64> %bases,
i64 %imm,
<vscale x 2 x i1> %Pg,
i32 %prfop)
The intrinsics are the IR counterpart of the following SVE ACLE functions:
* void svprf<T>(svbool_t pg, const void *base, svprfop op)
* void svprf<T>_vnum(svbool_t pg, const void *base, int64_t vnum, svprfop op)
* void svprf<T>_gather[_u32base](svbool_t pg, svuint32_t bases, svprfop op)
* void svprf<T>_gather[_u64base](svbool_t pg, svuint64_t bases, svprfop op)
* void svprf<T>_gather_[s32]offset(svbool_t pg, const void *base, svint32_t offsets, svprfop op)
* void svprf<T>_gather_[u32]offset(svbool_t pg, const void *base, svint32_t offsets, svprfop op)
* void svprf<T>_gather_[s64]offset(svbool_t pg, const void *base, svint64_t offsets, svprfop op)
* void svprf<T>_gather_[u64]offset(svbool_t pg, const void *base, svint64_t offsets, svprfop op)
* void svprf<T>_gather[_u32base]_offset(svbool_t pg, svuint32_t bases, int64_t offset, svprfop op)
* void svprf<T>_gather[_u64base]_offset(svbool_t pg, svuint64_t bases,int64_t offset, svprfop op)
Reviewers: andwar, sdesmalen, efriedma, rengolin
Subscribers: tschuett, hiraditya, rkruppe, psnobl, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D75580
2020-02-27 06:19:42 +08:00
|
|
|
; CHECK-NEXT: prfb pldl1strm, p0, [x0, z0.s, uxtw]
|
|
|
|
; CHECK-NEXT: ret
|
2020-03-19 17:35:31 +08:00
|
|
|
call void @llvm.aarch64.sve.prfb.gather.scaled.uxtw.nx4vi32(<vscale x 4 x i1> %Pg, i8* %base, <vscale x 4 x i32> %offset, i32 1)
|
Implement IR intrinsics for gather prefetch.
Summary:
Intrinsics and relative codegen has been implemented for the following
SVE instructions:
1. PRF<T> <prfop>, <Pg>, [<Xn|SP>, <Zm>.S, <mod>] -> 32-bit scaled offset
2. PRF<T> <prfop>, <Pg>, [<Xn|SP>, <Zm>.D, <mod>] -> 32-bit unpacked scaled offset
3. PRF<T> <prfop>, <Pg>, [<Xn|SP>, <Zm>.D] -> 64-bit scaled offset
4. PRF<T> <prfop>, <Pg>, [<Zn>.S{, #<imm>}] -> 32-bit element
5. PRF<T> <prfop>, <Pg>, [<Zn>.D{, #<imm>}] -> 64-bit element
The instructions are associated the following intrinsics, respectively:
1. void @llvm.aarch64.sve.gather.prf<T>.scaled.<mod>.nx4vi32(
i8* %base,
<vscale x 4 x i32> %offset,
<vscale x 4 x i1> %Pg,
i32 %prfop)
2. void @llvm.aarch64.sve.gather.prf<T>.scaled.<mod>.nx2vi32(
i8* %base,
<vscale x 2 x i32> %offset,
<vscale x 2 x i1> %Pg,
i32 %prfop)
3. void @llvm.aarch64.sve.gather.prf<T>.scaled.nx2vi64(
i8* %base,
<vscale x 2 x i64> %offset,
<vscale x 2 x i1> %Pg,
i32 %prfop)
4. void @llvm.aarch64.sve.gather.prf<T>.nx4vi32(
<vscale x 4 x i32> %bases,
i64 %imm,
<vscale x 4 x i1> %Pg,
i32 %prfop)
5. void @llvm.aarch64.sve.gather.prf<T>.nx2vi64(
<vscale x 2 x i64> %bases,
i64 %imm,
<vscale x 2 x i1> %Pg,
i32 %prfop)
The intrinsics are the IR counterpart of the following SVE ACLE functions:
* void svprf<T>(svbool_t pg, const void *base, svprfop op)
* void svprf<T>_vnum(svbool_t pg, const void *base, int64_t vnum, svprfop op)
* void svprf<T>_gather[_u32base](svbool_t pg, svuint32_t bases, svprfop op)
* void svprf<T>_gather[_u64base](svbool_t pg, svuint64_t bases, svprfop op)
* void svprf<T>_gather_[s32]offset(svbool_t pg, const void *base, svint32_t offsets, svprfop op)
* void svprf<T>_gather_[u32]offset(svbool_t pg, const void *base, svint32_t offsets, svprfop op)
* void svprf<T>_gather_[s64]offset(svbool_t pg, const void *base, svint64_t offsets, svprfop op)
* void svprf<T>_gather_[u64]offset(svbool_t pg, const void *base, svint64_t offsets, svprfop op)
* void svprf<T>_gather[_u32base]_offset(svbool_t pg, svuint32_t bases, int64_t offset, svprfop op)
* void svprf<T>_gather[_u64base]_offset(svbool_t pg, svuint64_t bases,int64_t offset, svprfop op)
Reviewers: andwar, sdesmalen, efriedma, rengolin
Subscribers: tschuett, hiraditya, rkruppe, psnobl, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D75580
2020-02-27 06:19:42 +08:00
|
|
|
ret void
|
|
|
|
}
|
|
|
|
|
2020-03-19 17:35:31 +08:00
|
|
|
define void @llvm_aarch64_sve_prfb_gather_scaled_sxtw_nx4vi32(<vscale x 4 x i1> %Pg, i8* %base, <vscale x 4 x i32> %offset) nounwind {
|
|
|
|
; CHECK-LABEL: llvm_aarch64_sve_prfb_gather_scaled_sxtw_nx4vi32:
|
Implement IR intrinsics for gather prefetch.
Summary:
Intrinsics and relative codegen has been implemented for the following
SVE instructions:
1. PRF<T> <prfop>, <Pg>, [<Xn|SP>, <Zm>.S, <mod>] -> 32-bit scaled offset
2. PRF<T> <prfop>, <Pg>, [<Xn|SP>, <Zm>.D, <mod>] -> 32-bit unpacked scaled offset
3. PRF<T> <prfop>, <Pg>, [<Xn|SP>, <Zm>.D] -> 64-bit scaled offset
4. PRF<T> <prfop>, <Pg>, [<Zn>.S{, #<imm>}] -> 32-bit element
5. PRF<T> <prfop>, <Pg>, [<Zn>.D{, #<imm>}] -> 64-bit element
The instructions are associated the following intrinsics, respectively:
1. void @llvm.aarch64.sve.gather.prf<T>.scaled.<mod>.nx4vi32(
i8* %base,
<vscale x 4 x i32> %offset,
<vscale x 4 x i1> %Pg,
i32 %prfop)
2. void @llvm.aarch64.sve.gather.prf<T>.scaled.<mod>.nx2vi32(
i8* %base,
<vscale x 2 x i32> %offset,
<vscale x 2 x i1> %Pg,
i32 %prfop)
3. void @llvm.aarch64.sve.gather.prf<T>.scaled.nx2vi64(
i8* %base,
<vscale x 2 x i64> %offset,
<vscale x 2 x i1> %Pg,
i32 %prfop)
4. void @llvm.aarch64.sve.gather.prf<T>.nx4vi32(
<vscale x 4 x i32> %bases,
i64 %imm,
<vscale x 4 x i1> %Pg,
i32 %prfop)
5. void @llvm.aarch64.sve.gather.prf<T>.nx2vi64(
<vscale x 2 x i64> %bases,
i64 %imm,
<vscale x 2 x i1> %Pg,
i32 %prfop)
The intrinsics are the IR counterpart of the following SVE ACLE functions:
* void svprf<T>(svbool_t pg, const void *base, svprfop op)
* void svprf<T>_vnum(svbool_t pg, const void *base, int64_t vnum, svprfop op)
* void svprf<T>_gather[_u32base](svbool_t pg, svuint32_t bases, svprfop op)
* void svprf<T>_gather[_u64base](svbool_t pg, svuint64_t bases, svprfop op)
* void svprf<T>_gather_[s32]offset(svbool_t pg, const void *base, svint32_t offsets, svprfop op)
* void svprf<T>_gather_[u32]offset(svbool_t pg, const void *base, svint32_t offsets, svprfop op)
* void svprf<T>_gather_[s64]offset(svbool_t pg, const void *base, svint64_t offsets, svprfop op)
* void svprf<T>_gather_[u64]offset(svbool_t pg, const void *base, svint64_t offsets, svprfop op)
* void svprf<T>_gather[_u32base]_offset(svbool_t pg, svuint32_t bases, int64_t offset, svprfop op)
* void svprf<T>_gather[_u64base]_offset(svbool_t pg, svuint64_t bases,int64_t offset, svprfop op)
Reviewers: andwar, sdesmalen, efriedma, rengolin
Subscribers: tschuett, hiraditya, rkruppe, psnobl, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D75580
2020-02-27 06:19:42 +08:00
|
|
|
; CHECK-NEXT: prfb pldl1strm, p0, [x0, z0.s, sxtw]
|
|
|
|
; CHECK-NEXT: ret
|
2020-03-19 17:35:31 +08:00
|
|
|
call void @llvm.aarch64.sve.prfb.gather.scaled.sxtw.nx4vi32(<vscale x 4 x i1> %Pg, i8* %base, <vscale x 4 x i32> %offset, i32 1)
|
Implement IR intrinsics for gather prefetch.
Summary:
Intrinsics and relative codegen has been implemented for the following
SVE instructions:
1. PRF<T> <prfop>, <Pg>, [<Xn|SP>, <Zm>.S, <mod>] -> 32-bit scaled offset
2. PRF<T> <prfop>, <Pg>, [<Xn|SP>, <Zm>.D, <mod>] -> 32-bit unpacked scaled offset
3. PRF<T> <prfop>, <Pg>, [<Xn|SP>, <Zm>.D] -> 64-bit scaled offset
4. PRF<T> <prfop>, <Pg>, [<Zn>.S{, #<imm>}] -> 32-bit element
5. PRF<T> <prfop>, <Pg>, [<Zn>.D{, #<imm>}] -> 64-bit element
The instructions are associated the following intrinsics, respectively:
1. void @llvm.aarch64.sve.gather.prf<T>.scaled.<mod>.nx4vi32(
i8* %base,
<vscale x 4 x i32> %offset,
<vscale x 4 x i1> %Pg,
i32 %prfop)
2. void @llvm.aarch64.sve.gather.prf<T>.scaled.<mod>.nx2vi32(
i8* %base,
<vscale x 2 x i32> %offset,
<vscale x 2 x i1> %Pg,
i32 %prfop)
3. void @llvm.aarch64.sve.gather.prf<T>.scaled.nx2vi64(
i8* %base,
<vscale x 2 x i64> %offset,
<vscale x 2 x i1> %Pg,
i32 %prfop)
4. void @llvm.aarch64.sve.gather.prf<T>.nx4vi32(
<vscale x 4 x i32> %bases,
i64 %imm,
<vscale x 4 x i1> %Pg,
i32 %prfop)
5. void @llvm.aarch64.sve.gather.prf<T>.nx2vi64(
<vscale x 2 x i64> %bases,
i64 %imm,
<vscale x 2 x i1> %Pg,
i32 %prfop)
The intrinsics are the IR counterpart of the following SVE ACLE functions:
* void svprf<T>(svbool_t pg, const void *base, svprfop op)
* void svprf<T>_vnum(svbool_t pg, const void *base, int64_t vnum, svprfop op)
* void svprf<T>_gather[_u32base](svbool_t pg, svuint32_t bases, svprfop op)
* void svprf<T>_gather[_u64base](svbool_t pg, svuint64_t bases, svprfop op)
* void svprf<T>_gather_[s32]offset(svbool_t pg, const void *base, svint32_t offsets, svprfop op)
* void svprf<T>_gather_[u32]offset(svbool_t pg, const void *base, svint32_t offsets, svprfop op)
* void svprf<T>_gather_[s64]offset(svbool_t pg, const void *base, svint64_t offsets, svprfop op)
* void svprf<T>_gather_[u64]offset(svbool_t pg, const void *base, svint64_t offsets, svprfop op)
* void svprf<T>_gather[_u32base]_offset(svbool_t pg, svuint32_t bases, int64_t offset, svprfop op)
* void svprf<T>_gather[_u64base]_offset(svbool_t pg, svuint64_t bases,int64_t offset, svprfop op)
Reviewers: andwar, sdesmalen, efriedma, rengolin
Subscribers: tschuett, hiraditya, rkruppe, psnobl, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D75580
2020-02-27 06:19:42 +08:00
|
|
|
ret void
|
|
|
|
}
|
|
|
|
|
|
|
|
; PRFB <prfop>, <Pg>, [<Xn|SP>, <Zm>.D, <mod>] -> 32-bit unpacked scaled offset
|
|
|
|
|
2020-03-19 17:35:31 +08:00
|
|
|
define void @llvm_aarch64_sve_prfb_gather_scaled_uxtw_nx2vi64(<vscale x 2 x i1> %Pg, i8* %base, <vscale x 2 x i32> %offset) nounwind {
|
|
|
|
; CHECK-LABEL: llvm_aarch64_sve_prfb_gather_scaled_uxtw_nx2vi64:
|
Implement IR intrinsics for gather prefetch.
Summary:
Intrinsics and relative codegen has been implemented for the following
SVE instructions:
1. PRF<T> <prfop>, <Pg>, [<Xn|SP>, <Zm>.S, <mod>] -> 32-bit scaled offset
2. PRF<T> <prfop>, <Pg>, [<Xn|SP>, <Zm>.D, <mod>] -> 32-bit unpacked scaled offset
3. PRF<T> <prfop>, <Pg>, [<Xn|SP>, <Zm>.D] -> 64-bit scaled offset
4. PRF<T> <prfop>, <Pg>, [<Zn>.S{, #<imm>}] -> 32-bit element
5. PRF<T> <prfop>, <Pg>, [<Zn>.D{, #<imm>}] -> 64-bit element
The instructions are associated the following intrinsics, respectively:
1. void @llvm.aarch64.sve.gather.prf<T>.scaled.<mod>.nx4vi32(
i8* %base,
<vscale x 4 x i32> %offset,
<vscale x 4 x i1> %Pg,
i32 %prfop)
2. void @llvm.aarch64.sve.gather.prf<T>.scaled.<mod>.nx2vi32(
i8* %base,
<vscale x 2 x i32> %offset,
<vscale x 2 x i1> %Pg,
i32 %prfop)
3. void @llvm.aarch64.sve.gather.prf<T>.scaled.nx2vi64(
i8* %base,
<vscale x 2 x i64> %offset,
<vscale x 2 x i1> %Pg,
i32 %prfop)
4. void @llvm.aarch64.sve.gather.prf<T>.nx4vi32(
<vscale x 4 x i32> %bases,
i64 %imm,
<vscale x 4 x i1> %Pg,
i32 %prfop)
5. void @llvm.aarch64.sve.gather.prf<T>.nx2vi64(
<vscale x 2 x i64> %bases,
i64 %imm,
<vscale x 2 x i1> %Pg,
i32 %prfop)
The intrinsics are the IR counterpart of the following SVE ACLE functions:
* void svprf<T>(svbool_t pg, const void *base, svprfop op)
* void svprf<T>_vnum(svbool_t pg, const void *base, int64_t vnum, svprfop op)
* void svprf<T>_gather[_u32base](svbool_t pg, svuint32_t bases, svprfop op)
* void svprf<T>_gather[_u64base](svbool_t pg, svuint64_t bases, svprfop op)
* void svprf<T>_gather_[s32]offset(svbool_t pg, const void *base, svint32_t offsets, svprfop op)
* void svprf<T>_gather_[u32]offset(svbool_t pg, const void *base, svint32_t offsets, svprfop op)
* void svprf<T>_gather_[s64]offset(svbool_t pg, const void *base, svint64_t offsets, svprfop op)
* void svprf<T>_gather_[u64]offset(svbool_t pg, const void *base, svint64_t offsets, svprfop op)
* void svprf<T>_gather[_u32base]_offset(svbool_t pg, svuint32_t bases, int64_t offset, svprfop op)
* void svprf<T>_gather[_u64base]_offset(svbool_t pg, svuint64_t bases,int64_t offset, svprfop op)
Reviewers: andwar, sdesmalen, efriedma, rengolin
Subscribers: tschuett, hiraditya, rkruppe, psnobl, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D75580
2020-02-27 06:19:42 +08:00
|
|
|
; CHECK-NEXT: prfb pldl1strm, p0, [x0, z0.d, uxtw]
|
|
|
|
; CHECK-NEXT: ret
|
2020-03-19 17:35:31 +08:00
|
|
|
call void @llvm.aarch64.sve.prfb.gather.scaled.uxtw.nx2vi64(<vscale x 2 x i1> %Pg, i8* %base, <vscale x 2 x i32> %offset, i32 1)
|
Implement IR intrinsics for gather prefetch.
Summary:
Intrinsics and relative codegen has been implemented for the following
SVE instructions:
1. PRF<T> <prfop>, <Pg>, [<Xn|SP>, <Zm>.S, <mod>] -> 32-bit scaled offset
2. PRF<T> <prfop>, <Pg>, [<Xn|SP>, <Zm>.D, <mod>] -> 32-bit unpacked scaled offset
3. PRF<T> <prfop>, <Pg>, [<Xn|SP>, <Zm>.D] -> 64-bit scaled offset
4. PRF<T> <prfop>, <Pg>, [<Zn>.S{, #<imm>}] -> 32-bit element
5. PRF<T> <prfop>, <Pg>, [<Zn>.D{, #<imm>}] -> 64-bit element
The instructions are associated the following intrinsics, respectively:
1. void @llvm.aarch64.sve.gather.prf<T>.scaled.<mod>.nx4vi32(
i8* %base,
<vscale x 4 x i32> %offset,
<vscale x 4 x i1> %Pg,
i32 %prfop)
2. void @llvm.aarch64.sve.gather.prf<T>.scaled.<mod>.nx2vi32(
i8* %base,
<vscale x 2 x i32> %offset,
<vscale x 2 x i1> %Pg,
i32 %prfop)
3. void @llvm.aarch64.sve.gather.prf<T>.scaled.nx2vi64(
i8* %base,
<vscale x 2 x i64> %offset,
<vscale x 2 x i1> %Pg,
i32 %prfop)
4. void @llvm.aarch64.sve.gather.prf<T>.nx4vi32(
<vscale x 4 x i32> %bases,
i64 %imm,
<vscale x 4 x i1> %Pg,
i32 %prfop)
5. void @llvm.aarch64.sve.gather.prf<T>.nx2vi64(
<vscale x 2 x i64> %bases,
i64 %imm,
<vscale x 2 x i1> %Pg,
i32 %prfop)
The intrinsics are the IR counterpart of the following SVE ACLE functions:
* void svprf<T>(svbool_t pg, const void *base, svprfop op)
* void svprf<T>_vnum(svbool_t pg, const void *base, int64_t vnum, svprfop op)
* void svprf<T>_gather[_u32base](svbool_t pg, svuint32_t bases, svprfop op)
* void svprf<T>_gather[_u64base](svbool_t pg, svuint64_t bases, svprfop op)
* void svprf<T>_gather_[s32]offset(svbool_t pg, const void *base, svint32_t offsets, svprfop op)
* void svprf<T>_gather_[u32]offset(svbool_t pg, const void *base, svint32_t offsets, svprfop op)
* void svprf<T>_gather_[s64]offset(svbool_t pg, const void *base, svint64_t offsets, svprfop op)
* void svprf<T>_gather_[u64]offset(svbool_t pg, const void *base, svint64_t offsets, svprfop op)
* void svprf<T>_gather[_u32base]_offset(svbool_t pg, svuint32_t bases, int64_t offset, svprfop op)
* void svprf<T>_gather[_u64base]_offset(svbool_t pg, svuint64_t bases,int64_t offset, svprfop op)
Reviewers: andwar, sdesmalen, efriedma, rengolin
Subscribers: tschuett, hiraditya, rkruppe, psnobl, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D75580
2020-02-27 06:19:42 +08:00
|
|
|
ret void
|
|
|
|
}
|
|
|
|
|
2020-03-19 17:35:31 +08:00
|
|
|
define void @llvm_aarch64_sve_prfb_gather_scaled_sxtw_nx2vi64(<vscale x 2 x i1> %Pg, i8* %base, <vscale x 2 x i32> %offset) nounwind {
|
|
|
|
; CHECK-LABEL: llvm_aarch64_sve_prfb_gather_scaled_sxtw_nx2vi64:
|
Implement IR intrinsics for gather prefetch.
Summary:
Intrinsics and relative codegen has been implemented for the following
SVE instructions:
1. PRF<T> <prfop>, <Pg>, [<Xn|SP>, <Zm>.S, <mod>] -> 32-bit scaled offset
2. PRF<T> <prfop>, <Pg>, [<Xn|SP>, <Zm>.D, <mod>] -> 32-bit unpacked scaled offset
3. PRF<T> <prfop>, <Pg>, [<Xn|SP>, <Zm>.D] -> 64-bit scaled offset
4. PRF<T> <prfop>, <Pg>, [<Zn>.S{, #<imm>}] -> 32-bit element
5. PRF<T> <prfop>, <Pg>, [<Zn>.D{, #<imm>}] -> 64-bit element
The instructions are associated the following intrinsics, respectively:
1. void @llvm.aarch64.sve.gather.prf<T>.scaled.<mod>.nx4vi32(
i8* %base,
<vscale x 4 x i32> %offset,
<vscale x 4 x i1> %Pg,
i32 %prfop)
2. void @llvm.aarch64.sve.gather.prf<T>.scaled.<mod>.nx2vi32(
i8* %base,
<vscale x 2 x i32> %offset,
<vscale x 2 x i1> %Pg,
i32 %prfop)
3. void @llvm.aarch64.sve.gather.prf<T>.scaled.nx2vi64(
i8* %base,
<vscale x 2 x i64> %offset,
<vscale x 2 x i1> %Pg,
i32 %prfop)
4. void @llvm.aarch64.sve.gather.prf<T>.nx4vi32(
<vscale x 4 x i32> %bases,
i64 %imm,
<vscale x 4 x i1> %Pg,
i32 %prfop)
5. void @llvm.aarch64.sve.gather.prf<T>.nx2vi64(
<vscale x 2 x i64> %bases,
i64 %imm,
<vscale x 2 x i1> %Pg,
i32 %prfop)
The intrinsics are the IR counterpart of the following SVE ACLE functions:
* void svprf<T>(svbool_t pg, const void *base, svprfop op)
* void svprf<T>_vnum(svbool_t pg, const void *base, int64_t vnum, svprfop op)
* void svprf<T>_gather[_u32base](svbool_t pg, svuint32_t bases, svprfop op)
* void svprf<T>_gather[_u64base](svbool_t pg, svuint64_t bases, svprfop op)
* void svprf<T>_gather_[s32]offset(svbool_t pg, const void *base, svint32_t offsets, svprfop op)
* void svprf<T>_gather_[u32]offset(svbool_t pg, const void *base, svint32_t offsets, svprfop op)
* void svprf<T>_gather_[s64]offset(svbool_t pg, const void *base, svint64_t offsets, svprfop op)
* void svprf<T>_gather_[u64]offset(svbool_t pg, const void *base, svint64_t offsets, svprfop op)
* void svprf<T>_gather[_u32base]_offset(svbool_t pg, svuint32_t bases, int64_t offset, svprfop op)
* void svprf<T>_gather[_u64base]_offset(svbool_t pg, svuint64_t bases,int64_t offset, svprfop op)
Reviewers: andwar, sdesmalen, efriedma, rengolin
Subscribers: tschuett, hiraditya, rkruppe, psnobl, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D75580
2020-02-27 06:19:42 +08:00
|
|
|
; CHECK-NEXT: prfb pldl1strm, p0, [x0, z0.d, sxtw]
|
|
|
|
; CHECK-NEXT: ret
|
2020-03-19 17:35:31 +08:00
|
|
|
call void @llvm.aarch64.sve.prfb.gather.scaled.sxtw.nx2vi64(<vscale x 2 x i1> %Pg, i8* %base, <vscale x 2 x i32> %offset, i32 1)
|
Implement IR intrinsics for gather prefetch.
Summary:
Intrinsics and relative codegen has been implemented for the following
SVE instructions:
1. PRF<T> <prfop>, <Pg>, [<Xn|SP>, <Zm>.S, <mod>] -> 32-bit scaled offset
2. PRF<T> <prfop>, <Pg>, [<Xn|SP>, <Zm>.D, <mod>] -> 32-bit unpacked scaled offset
3. PRF<T> <prfop>, <Pg>, [<Xn|SP>, <Zm>.D] -> 64-bit scaled offset
4. PRF<T> <prfop>, <Pg>, [<Zn>.S{, #<imm>}] -> 32-bit element
5. PRF<T> <prfop>, <Pg>, [<Zn>.D{, #<imm>}] -> 64-bit element
The instructions are associated the following intrinsics, respectively:
1. void @llvm.aarch64.sve.gather.prf<T>.scaled.<mod>.nx4vi32(
i8* %base,
<vscale x 4 x i32> %offset,
<vscale x 4 x i1> %Pg,
i32 %prfop)
2. void @llvm.aarch64.sve.gather.prf<T>.scaled.<mod>.nx2vi32(
i8* %base,
<vscale x 2 x i32> %offset,
<vscale x 2 x i1> %Pg,
i32 %prfop)
3. void @llvm.aarch64.sve.gather.prf<T>.scaled.nx2vi64(
i8* %base,
<vscale x 2 x i64> %offset,
<vscale x 2 x i1> %Pg,
i32 %prfop)
4. void @llvm.aarch64.sve.gather.prf<T>.nx4vi32(
<vscale x 4 x i32> %bases,
i64 %imm,
<vscale x 4 x i1> %Pg,
i32 %prfop)
5. void @llvm.aarch64.sve.gather.prf<T>.nx2vi64(
<vscale x 2 x i64> %bases,
i64 %imm,
<vscale x 2 x i1> %Pg,
i32 %prfop)
The intrinsics are the IR counterpart of the following SVE ACLE functions:
* void svprf<T>(svbool_t pg, const void *base, svprfop op)
* void svprf<T>_vnum(svbool_t pg, const void *base, int64_t vnum, svprfop op)
* void svprf<T>_gather[_u32base](svbool_t pg, svuint32_t bases, svprfop op)
* void svprf<T>_gather[_u64base](svbool_t pg, svuint64_t bases, svprfop op)
* void svprf<T>_gather_[s32]offset(svbool_t pg, const void *base, svint32_t offsets, svprfop op)
* void svprf<T>_gather_[u32]offset(svbool_t pg, const void *base, svint32_t offsets, svprfop op)
* void svprf<T>_gather_[s64]offset(svbool_t pg, const void *base, svint64_t offsets, svprfop op)
* void svprf<T>_gather_[u64]offset(svbool_t pg, const void *base, svint64_t offsets, svprfop op)
* void svprf<T>_gather[_u32base]_offset(svbool_t pg, svuint32_t bases, int64_t offset, svprfop op)
* void svprf<T>_gather[_u64base]_offset(svbool_t pg, svuint64_t bases,int64_t offset, svprfop op)
Reviewers: andwar, sdesmalen, efriedma, rengolin
Subscribers: tschuett, hiraditya, rkruppe, psnobl, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D75580
2020-02-27 06:19:42 +08:00
|
|
|
ret void
|
|
|
|
}
|
|
|
|
; PRFB <prfop>, <Pg>, [<Xn|SP>, <Zm>.D] -> 64-bit scaled offset
|
2020-03-19 17:35:31 +08:00
|
|
|
define void @llvm_aarch64_sve_prfb_gather_scaled_nx2vi64(<vscale x 2 x i1> %Pg, i8* %base, <vscale x 2 x i64> %offset) nounwind {
|
|
|
|
; CHECK-LABEL: llvm_aarch64_sve_prfb_gather_scaled_nx2vi64:
|
Implement IR intrinsics for gather prefetch.
Summary:
Intrinsics and relative codegen has been implemented for the following
SVE instructions:
1. PRF<T> <prfop>, <Pg>, [<Xn|SP>, <Zm>.S, <mod>] -> 32-bit scaled offset
2. PRF<T> <prfop>, <Pg>, [<Xn|SP>, <Zm>.D, <mod>] -> 32-bit unpacked scaled offset
3. PRF<T> <prfop>, <Pg>, [<Xn|SP>, <Zm>.D] -> 64-bit scaled offset
4. PRF<T> <prfop>, <Pg>, [<Zn>.S{, #<imm>}] -> 32-bit element
5. PRF<T> <prfop>, <Pg>, [<Zn>.D{, #<imm>}] -> 64-bit element
The instructions are associated the following intrinsics, respectively:
1. void @llvm.aarch64.sve.gather.prf<T>.scaled.<mod>.nx4vi32(
i8* %base,
<vscale x 4 x i32> %offset,
<vscale x 4 x i1> %Pg,
i32 %prfop)
2. void @llvm.aarch64.sve.gather.prf<T>.scaled.<mod>.nx2vi32(
i8* %base,
<vscale x 2 x i32> %offset,
<vscale x 2 x i1> %Pg,
i32 %prfop)
3. void @llvm.aarch64.sve.gather.prf<T>.scaled.nx2vi64(
i8* %base,
<vscale x 2 x i64> %offset,
<vscale x 2 x i1> %Pg,
i32 %prfop)
4. void @llvm.aarch64.sve.gather.prf<T>.nx4vi32(
<vscale x 4 x i32> %bases,
i64 %imm,
<vscale x 4 x i1> %Pg,
i32 %prfop)
5. void @llvm.aarch64.sve.gather.prf<T>.nx2vi64(
<vscale x 2 x i64> %bases,
i64 %imm,
<vscale x 2 x i1> %Pg,
i32 %prfop)
The intrinsics are the IR counterpart of the following SVE ACLE functions:
* void svprf<T>(svbool_t pg, const void *base, svprfop op)
* void svprf<T>_vnum(svbool_t pg, const void *base, int64_t vnum, svprfop op)
* void svprf<T>_gather[_u32base](svbool_t pg, svuint32_t bases, svprfop op)
* void svprf<T>_gather[_u64base](svbool_t pg, svuint64_t bases, svprfop op)
* void svprf<T>_gather_[s32]offset(svbool_t pg, const void *base, svint32_t offsets, svprfop op)
* void svprf<T>_gather_[u32]offset(svbool_t pg, const void *base, svint32_t offsets, svprfop op)
* void svprf<T>_gather_[s64]offset(svbool_t pg, const void *base, svint64_t offsets, svprfop op)
* void svprf<T>_gather_[u64]offset(svbool_t pg, const void *base, svint64_t offsets, svprfop op)
* void svprf<T>_gather[_u32base]_offset(svbool_t pg, svuint32_t bases, int64_t offset, svprfop op)
* void svprf<T>_gather[_u64base]_offset(svbool_t pg, svuint64_t bases,int64_t offset, svprfop op)
Reviewers: andwar, sdesmalen, efriedma, rengolin
Subscribers: tschuett, hiraditya, rkruppe, psnobl, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D75580
2020-02-27 06:19:42 +08:00
|
|
|
; CHECK-NEXT: prfb pldl1strm, p0, [x0, z0.d]
|
|
|
|
; CHECK-NEXT: ret
|
2020-03-19 17:35:31 +08:00
|
|
|
call void @llvm.aarch64.sve.prfb.gather.scaled.nx2vi64(<vscale x 2 x i1> %Pg, i8* %base, <vscale x 2 x i64> %offset, i32 1)
|
Implement IR intrinsics for gather prefetch.
Summary:
Intrinsics and relative codegen has been implemented for the following
SVE instructions:
1. PRF<T> <prfop>, <Pg>, [<Xn|SP>, <Zm>.S, <mod>] -> 32-bit scaled offset
2. PRF<T> <prfop>, <Pg>, [<Xn|SP>, <Zm>.D, <mod>] -> 32-bit unpacked scaled offset
3. PRF<T> <prfop>, <Pg>, [<Xn|SP>, <Zm>.D] -> 64-bit scaled offset
4. PRF<T> <prfop>, <Pg>, [<Zn>.S{, #<imm>}] -> 32-bit element
5. PRF<T> <prfop>, <Pg>, [<Zn>.D{, #<imm>}] -> 64-bit element
The instructions are associated the following intrinsics, respectively:
1. void @llvm.aarch64.sve.gather.prf<T>.scaled.<mod>.nx4vi32(
i8* %base,
<vscale x 4 x i32> %offset,
<vscale x 4 x i1> %Pg,
i32 %prfop)
2. void @llvm.aarch64.sve.gather.prf<T>.scaled.<mod>.nx2vi32(
i8* %base,
<vscale x 2 x i32> %offset,
<vscale x 2 x i1> %Pg,
i32 %prfop)
3. void @llvm.aarch64.sve.gather.prf<T>.scaled.nx2vi64(
i8* %base,
<vscale x 2 x i64> %offset,
<vscale x 2 x i1> %Pg,
i32 %prfop)
4. void @llvm.aarch64.sve.gather.prf<T>.nx4vi32(
<vscale x 4 x i32> %bases,
i64 %imm,
<vscale x 4 x i1> %Pg,
i32 %prfop)
5. void @llvm.aarch64.sve.gather.prf<T>.nx2vi64(
<vscale x 2 x i64> %bases,
i64 %imm,
<vscale x 2 x i1> %Pg,
i32 %prfop)
The intrinsics are the IR counterpart of the following SVE ACLE functions:
* void svprf<T>(svbool_t pg, const void *base, svprfop op)
* void svprf<T>_vnum(svbool_t pg, const void *base, int64_t vnum, svprfop op)
* void svprf<T>_gather[_u32base](svbool_t pg, svuint32_t bases, svprfop op)
* void svprf<T>_gather[_u64base](svbool_t pg, svuint64_t bases, svprfop op)
* void svprf<T>_gather_[s32]offset(svbool_t pg, const void *base, svint32_t offsets, svprfop op)
* void svprf<T>_gather_[u32]offset(svbool_t pg, const void *base, svint32_t offsets, svprfop op)
* void svprf<T>_gather_[s64]offset(svbool_t pg, const void *base, svint64_t offsets, svprfop op)
* void svprf<T>_gather_[u64]offset(svbool_t pg, const void *base, svint64_t offsets, svprfop op)
* void svprf<T>_gather[_u32base]_offset(svbool_t pg, svuint32_t bases, int64_t offset, svprfop op)
* void svprf<T>_gather[_u64base]_offset(svbool_t pg, svuint64_t bases,int64_t offset, svprfop op)
Reviewers: andwar, sdesmalen, efriedma, rengolin
Subscribers: tschuett, hiraditya, rkruppe, psnobl, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D75580
2020-02-27 06:19:42 +08:00
|
|
|
ret void
|
|
|
|
}
|
|
|
|
|
|
|
|
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
|
|
|
|
|
|
|
; PRFH <prfop>, <Pg>, [<Xn|SP>, <Zm>.S, <mod>] -> 32-bit scaled offset
|
2020-03-19 17:35:31 +08:00
|
|
|
define void @llvm_aarch64_sve_prfh_gather_scaled_uxtw_nx4vi32(<vscale x 4 x i1> %Pg, i8* %base, <vscale x 4 x i32> %offset) nounwind {
|
|
|
|
; CHECK-LABEL: llvm_aarch64_sve_prfh_gather_scaled_uxtw_nx4vi32:
|
Implement IR intrinsics for gather prefetch.
Summary:
Intrinsics and relative codegen has been implemented for the following
SVE instructions:
1. PRF<T> <prfop>, <Pg>, [<Xn|SP>, <Zm>.S, <mod>] -> 32-bit scaled offset
2. PRF<T> <prfop>, <Pg>, [<Xn|SP>, <Zm>.D, <mod>] -> 32-bit unpacked scaled offset
3. PRF<T> <prfop>, <Pg>, [<Xn|SP>, <Zm>.D] -> 64-bit scaled offset
4. PRF<T> <prfop>, <Pg>, [<Zn>.S{, #<imm>}] -> 32-bit element
5. PRF<T> <prfop>, <Pg>, [<Zn>.D{, #<imm>}] -> 64-bit element
The instructions are associated the following intrinsics, respectively:
1. void @llvm.aarch64.sve.gather.prf<T>.scaled.<mod>.nx4vi32(
i8* %base,
<vscale x 4 x i32> %offset,
<vscale x 4 x i1> %Pg,
i32 %prfop)
2. void @llvm.aarch64.sve.gather.prf<T>.scaled.<mod>.nx2vi32(
i8* %base,
<vscale x 2 x i32> %offset,
<vscale x 2 x i1> %Pg,
i32 %prfop)
3. void @llvm.aarch64.sve.gather.prf<T>.scaled.nx2vi64(
i8* %base,
<vscale x 2 x i64> %offset,
<vscale x 2 x i1> %Pg,
i32 %prfop)
4. void @llvm.aarch64.sve.gather.prf<T>.nx4vi32(
<vscale x 4 x i32> %bases,
i64 %imm,
<vscale x 4 x i1> %Pg,
i32 %prfop)
5. void @llvm.aarch64.sve.gather.prf<T>.nx2vi64(
<vscale x 2 x i64> %bases,
i64 %imm,
<vscale x 2 x i1> %Pg,
i32 %prfop)
The intrinsics are the IR counterpart of the following SVE ACLE functions:
* void svprf<T>(svbool_t pg, const void *base, svprfop op)
* void svprf<T>_vnum(svbool_t pg, const void *base, int64_t vnum, svprfop op)
* void svprf<T>_gather[_u32base](svbool_t pg, svuint32_t bases, svprfop op)
* void svprf<T>_gather[_u64base](svbool_t pg, svuint64_t bases, svprfop op)
* void svprf<T>_gather_[s32]offset(svbool_t pg, const void *base, svint32_t offsets, svprfop op)
* void svprf<T>_gather_[u32]offset(svbool_t pg, const void *base, svint32_t offsets, svprfop op)
* void svprf<T>_gather_[s64]offset(svbool_t pg, const void *base, svint64_t offsets, svprfop op)
* void svprf<T>_gather_[u64]offset(svbool_t pg, const void *base, svint64_t offsets, svprfop op)
* void svprf<T>_gather[_u32base]_offset(svbool_t pg, svuint32_t bases, int64_t offset, svprfop op)
* void svprf<T>_gather[_u64base]_offset(svbool_t pg, svuint64_t bases,int64_t offset, svprfop op)
Reviewers: andwar, sdesmalen, efriedma, rengolin
Subscribers: tschuett, hiraditya, rkruppe, psnobl, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D75580
2020-02-27 06:19:42 +08:00
|
|
|
; CHECK-NEXT: prfh pldl1strm, p0, [x0, z0.s, uxtw #1]
|
|
|
|
; CHECK-NEXT: ret
|
2020-03-19 17:35:31 +08:00
|
|
|
call void @llvm.aarch64.sve.prfh.gather.scaled.uxtw.nx4vi32(<vscale x 4 x i1> %Pg, i8* %base, <vscale x 4 x i32> %offset, i32 1)
|
Implement IR intrinsics for gather prefetch.
Summary:
Intrinsics and relative codegen has been implemented for the following
SVE instructions:
1. PRF<T> <prfop>, <Pg>, [<Xn|SP>, <Zm>.S, <mod>] -> 32-bit scaled offset
2. PRF<T> <prfop>, <Pg>, [<Xn|SP>, <Zm>.D, <mod>] -> 32-bit unpacked scaled offset
3. PRF<T> <prfop>, <Pg>, [<Xn|SP>, <Zm>.D] -> 64-bit scaled offset
4. PRF<T> <prfop>, <Pg>, [<Zn>.S{, #<imm>}] -> 32-bit element
5. PRF<T> <prfop>, <Pg>, [<Zn>.D{, #<imm>}] -> 64-bit element
The instructions are associated the following intrinsics, respectively:
1. void @llvm.aarch64.sve.gather.prf<T>.scaled.<mod>.nx4vi32(
i8* %base,
<vscale x 4 x i32> %offset,
<vscale x 4 x i1> %Pg,
i32 %prfop)
2. void @llvm.aarch64.sve.gather.prf<T>.scaled.<mod>.nx2vi32(
i8* %base,
<vscale x 2 x i32> %offset,
<vscale x 2 x i1> %Pg,
i32 %prfop)
3. void @llvm.aarch64.sve.gather.prf<T>.scaled.nx2vi64(
i8* %base,
<vscale x 2 x i64> %offset,
<vscale x 2 x i1> %Pg,
i32 %prfop)
4. void @llvm.aarch64.sve.gather.prf<T>.nx4vi32(
<vscale x 4 x i32> %bases,
i64 %imm,
<vscale x 4 x i1> %Pg,
i32 %prfop)
5. void @llvm.aarch64.sve.gather.prf<T>.nx2vi64(
<vscale x 2 x i64> %bases,
i64 %imm,
<vscale x 2 x i1> %Pg,
i32 %prfop)
The intrinsics are the IR counterpart of the following SVE ACLE functions:
* void svprf<T>(svbool_t pg, const void *base, svprfop op)
* void svprf<T>_vnum(svbool_t pg, const void *base, int64_t vnum, svprfop op)
* void svprf<T>_gather[_u32base](svbool_t pg, svuint32_t bases, svprfop op)
* void svprf<T>_gather[_u64base](svbool_t pg, svuint64_t bases, svprfop op)
* void svprf<T>_gather_[s32]offset(svbool_t pg, const void *base, svint32_t offsets, svprfop op)
* void svprf<T>_gather_[u32]offset(svbool_t pg, const void *base, svint32_t offsets, svprfop op)
* void svprf<T>_gather_[s64]offset(svbool_t pg, const void *base, svint64_t offsets, svprfop op)
* void svprf<T>_gather_[u64]offset(svbool_t pg, const void *base, svint64_t offsets, svprfop op)
* void svprf<T>_gather[_u32base]_offset(svbool_t pg, svuint32_t bases, int64_t offset, svprfop op)
* void svprf<T>_gather[_u64base]_offset(svbool_t pg, svuint64_t bases,int64_t offset, svprfop op)
Reviewers: andwar, sdesmalen, efriedma, rengolin
Subscribers: tschuett, hiraditya, rkruppe, psnobl, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D75580
2020-02-27 06:19:42 +08:00
|
|
|
ret void
|
|
|
|
}
|
|
|
|
|
2020-03-19 17:35:31 +08:00
|
|
|
define void @llvm_aarch64_sve_prfh_gather_scaled_sxtw_nx4vi32(<vscale x 4 x i1> %Pg, i8* %base, <vscale x 4 x i32> %offset) nounwind {
|
|
|
|
; CHECK-LABEL: llvm_aarch64_sve_prfh_gather_scaled_sxtw_nx4vi32:
|
Implement IR intrinsics for gather prefetch.
Summary:
Intrinsics and relative codegen has been implemented for the following
SVE instructions:
1. PRF<T> <prfop>, <Pg>, [<Xn|SP>, <Zm>.S, <mod>] -> 32-bit scaled offset
2. PRF<T> <prfop>, <Pg>, [<Xn|SP>, <Zm>.D, <mod>] -> 32-bit unpacked scaled offset
3. PRF<T> <prfop>, <Pg>, [<Xn|SP>, <Zm>.D] -> 64-bit scaled offset
4. PRF<T> <prfop>, <Pg>, [<Zn>.S{, #<imm>}] -> 32-bit element
5. PRF<T> <prfop>, <Pg>, [<Zn>.D{, #<imm>}] -> 64-bit element
The instructions are associated the following intrinsics, respectively:
1. void @llvm.aarch64.sve.gather.prf<T>.scaled.<mod>.nx4vi32(
i8* %base,
<vscale x 4 x i32> %offset,
<vscale x 4 x i1> %Pg,
i32 %prfop)
2. void @llvm.aarch64.sve.gather.prf<T>.scaled.<mod>.nx2vi32(
i8* %base,
<vscale x 2 x i32> %offset,
<vscale x 2 x i1> %Pg,
i32 %prfop)
3. void @llvm.aarch64.sve.gather.prf<T>.scaled.nx2vi64(
i8* %base,
<vscale x 2 x i64> %offset,
<vscale x 2 x i1> %Pg,
i32 %prfop)
4. void @llvm.aarch64.sve.gather.prf<T>.nx4vi32(
<vscale x 4 x i32> %bases,
i64 %imm,
<vscale x 4 x i1> %Pg,
i32 %prfop)
5. void @llvm.aarch64.sve.gather.prf<T>.nx2vi64(
<vscale x 2 x i64> %bases,
i64 %imm,
<vscale x 2 x i1> %Pg,
i32 %prfop)
The intrinsics are the IR counterpart of the following SVE ACLE functions:
* void svprf<T>(svbool_t pg, const void *base, svprfop op)
* void svprf<T>_vnum(svbool_t pg, const void *base, int64_t vnum, svprfop op)
* void svprf<T>_gather[_u32base](svbool_t pg, svuint32_t bases, svprfop op)
* void svprf<T>_gather[_u64base](svbool_t pg, svuint64_t bases, svprfop op)
* void svprf<T>_gather_[s32]offset(svbool_t pg, const void *base, svint32_t offsets, svprfop op)
* void svprf<T>_gather_[u32]offset(svbool_t pg, const void *base, svint32_t offsets, svprfop op)
* void svprf<T>_gather_[s64]offset(svbool_t pg, const void *base, svint64_t offsets, svprfop op)
* void svprf<T>_gather_[u64]offset(svbool_t pg, const void *base, svint64_t offsets, svprfop op)
* void svprf<T>_gather[_u32base]_offset(svbool_t pg, svuint32_t bases, int64_t offset, svprfop op)
* void svprf<T>_gather[_u64base]_offset(svbool_t pg, svuint64_t bases,int64_t offset, svprfop op)
Reviewers: andwar, sdesmalen, efriedma, rengolin
Subscribers: tschuett, hiraditya, rkruppe, psnobl, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D75580
2020-02-27 06:19:42 +08:00
|
|
|
; CHECK-NEXT: prfh pldl1strm, p0, [x0, z0.s, sxtw #1]
|
|
|
|
; CHECK-NEXT: ret
|
2020-03-19 17:35:31 +08:00
|
|
|
call void @llvm.aarch64.sve.prfh.gather.scaled.sxtw.nx4vi32(<vscale x 4 x i1> %Pg, i8* %base, <vscale x 4 x i32> %offset, i32 1)
|
Implement IR intrinsics for gather prefetch.
Summary:
Intrinsics and relative codegen has been implemented for the following
SVE instructions:
1. PRF<T> <prfop>, <Pg>, [<Xn|SP>, <Zm>.S, <mod>] -> 32-bit scaled offset
2. PRF<T> <prfop>, <Pg>, [<Xn|SP>, <Zm>.D, <mod>] -> 32-bit unpacked scaled offset
3. PRF<T> <prfop>, <Pg>, [<Xn|SP>, <Zm>.D] -> 64-bit scaled offset
4. PRF<T> <prfop>, <Pg>, [<Zn>.S{, #<imm>}] -> 32-bit element
5. PRF<T> <prfop>, <Pg>, [<Zn>.D{, #<imm>}] -> 64-bit element
The instructions are associated the following intrinsics, respectively:
1. void @llvm.aarch64.sve.gather.prf<T>.scaled.<mod>.nx4vi32(
i8* %base,
<vscale x 4 x i32> %offset,
<vscale x 4 x i1> %Pg,
i32 %prfop)
2. void @llvm.aarch64.sve.gather.prf<T>.scaled.<mod>.nx2vi32(
i8* %base,
<vscale x 2 x i32> %offset,
<vscale x 2 x i1> %Pg,
i32 %prfop)
3. void @llvm.aarch64.sve.gather.prf<T>.scaled.nx2vi64(
i8* %base,
<vscale x 2 x i64> %offset,
<vscale x 2 x i1> %Pg,
i32 %prfop)
4. void @llvm.aarch64.sve.gather.prf<T>.nx4vi32(
<vscale x 4 x i32> %bases,
i64 %imm,
<vscale x 4 x i1> %Pg,
i32 %prfop)
5. void @llvm.aarch64.sve.gather.prf<T>.nx2vi64(
<vscale x 2 x i64> %bases,
i64 %imm,
<vscale x 2 x i1> %Pg,
i32 %prfop)
The intrinsics are the IR counterpart of the following SVE ACLE functions:
* void svprf<T>(svbool_t pg, const void *base, svprfop op)
* void svprf<T>_vnum(svbool_t pg, const void *base, int64_t vnum, svprfop op)
* void svprf<T>_gather[_u32base](svbool_t pg, svuint32_t bases, svprfop op)
* void svprf<T>_gather[_u64base](svbool_t pg, svuint64_t bases, svprfop op)
* void svprf<T>_gather_[s32]offset(svbool_t pg, const void *base, svint32_t offsets, svprfop op)
* void svprf<T>_gather_[u32]offset(svbool_t pg, const void *base, svint32_t offsets, svprfop op)
* void svprf<T>_gather_[s64]offset(svbool_t pg, const void *base, svint64_t offsets, svprfop op)
* void svprf<T>_gather_[u64]offset(svbool_t pg, const void *base, svint64_t offsets, svprfop op)
* void svprf<T>_gather[_u32base]_offset(svbool_t pg, svuint32_t bases, int64_t offset, svprfop op)
* void svprf<T>_gather[_u64base]_offset(svbool_t pg, svuint64_t bases,int64_t offset, svprfop op)
Reviewers: andwar, sdesmalen, efriedma, rengolin
Subscribers: tschuett, hiraditya, rkruppe, psnobl, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D75580
2020-02-27 06:19:42 +08:00
|
|
|
ret void
|
|
|
|
}
|
|
|
|
|
|
|
|
; PRFH <prfop>, <Pg>, [<Xn|SP>, <Zm>.D, <mod> #1] -> 32-bit unpacked scaled offset
|
2020-03-19 17:35:31 +08:00
|
|
|
define void @llvm_aarch64_sve_prfh_gather_scaled_uxtw_nx2vi64(<vscale x 2 x i1> %Pg, i8* %base, <vscale x 2 x i32> %offset) nounwind {
|
|
|
|
; CHECK-LABEL: llvm_aarch64_sve_prfh_gather_scaled_uxtw_nx2vi64:
|
Implement IR intrinsics for gather prefetch.
Summary:
Intrinsics and relative codegen has been implemented for the following
SVE instructions:
1. PRF<T> <prfop>, <Pg>, [<Xn|SP>, <Zm>.S, <mod>] -> 32-bit scaled offset
2. PRF<T> <prfop>, <Pg>, [<Xn|SP>, <Zm>.D, <mod>] -> 32-bit unpacked scaled offset
3. PRF<T> <prfop>, <Pg>, [<Xn|SP>, <Zm>.D] -> 64-bit scaled offset
4. PRF<T> <prfop>, <Pg>, [<Zn>.S{, #<imm>}] -> 32-bit element
5. PRF<T> <prfop>, <Pg>, [<Zn>.D{, #<imm>}] -> 64-bit element
The instructions are associated the following intrinsics, respectively:
1. void @llvm.aarch64.sve.gather.prf<T>.scaled.<mod>.nx4vi32(
i8* %base,
<vscale x 4 x i32> %offset,
<vscale x 4 x i1> %Pg,
i32 %prfop)
2. void @llvm.aarch64.sve.gather.prf<T>.scaled.<mod>.nx2vi32(
i8* %base,
<vscale x 2 x i32> %offset,
<vscale x 2 x i1> %Pg,
i32 %prfop)
3. void @llvm.aarch64.sve.gather.prf<T>.scaled.nx2vi64(
i8* %base,
<vscale x 2 x i64> %offset,
<vscale x 2 x i1> %Pg,
i32 %prfop)
4. void @llvm.aarch64.sve.gather.prf<T>.nx4vi32(
<vscale x 4 x i32> %bases,
i64 %imm,
<vscale x 4 x i1> %Pg,
i32 %prfop)
5. void @llvm.aarch64.sve.gather.prf<T>.nx2vi64(
<vscale x 2 x i64> %bases,
i64 %imm,
<vscale x 2 x i1> %Pg,
i32 %prfop)
The intrinsics are the IR counterpart of the following SVE ACLE functions:
* void svprf<T>(svbool_t pg, const void *base, svprfop op)
* void svprf<T>_vnum(svbool_t pg, const void *base, int64_t vnum, svprfop op)
* void svprf<T>_gather[_u32base](svbool_t pg, svuint32_t bases, svprfop op)
* void svprf<T>_gather[_u64base](svbool_t pg, svuint64_t bases, svprfop op)
* void svprf<T>_gather_[s32]offset(svbool_t pg, const void *base, svint32_t offsets, svprfop op)
* void svprf<T>_gather_[u32]offset(svbool_t pg, const void *base, svint32_t offsets, svprfop op)
* void svprf<T>_gather_[s64]offset(svbool_t pg, const void *base, svint64_t offsets, svprfop op)
* void svprf<T>_gather_[u64]offset(svbool_t pg, const void *base, svint64_t offsets, svprfop op)
* void svprf<T>_gather[_u32base]_offset(svbool_t pg, svuint32_t bases, int64_t offset, svprfop op)
* void svprf<T>_gather[_u64base]_offset(svbool_t pg, svuint64_t bases,int64_t offset, svprfop op)
Reviewers: andwar, sdesmalen, efriedma, rengolin
Subscribers: tschuett, hiraditya, rkruppe, psnobl, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D75580
2020-02-27 06:19:42 +08:00
|
|
|
; CHECK-NEXT: prfh pldl1strm, p0, [x0, z0.d, uxtw #1]
|
|
|
|
; CHECK-NEXT: ret
|
2020-03-19 17:35:31 +08:00
|
|
|
call void @llvm.aarch64.sve.prfh.gather.scaled.uxtw.nx2vi64(<vscale x 2 x i1> %Pg, i8* %base, <vscale x 2 x i32> %offset, i32 1)
|
Implement IR intrinsics for gather prefetch.
Summary:
Intrinsics and relative codegen has been implemented for the following
SVE instructions:
1. PRF<T> <prfop>, <Pg>, [<Xn|SP>, <Zm>.S, <mod>] -> 32-bit scaled offset
2. PRF<T> <prfop>, <Pg>, [<Xn|SP>, <Zm>.D, <mod>] -> 32-bit unpacked scaled offset
3. PRF<T> <prfop>, <Pg>, [<Xn|SP>, <Zm>.D] -> 64-bit scaled offset
4. PRF<T> <prfop>, <Pg>, [<Zn>.S{, #<imm>}] -> 32-bit element
5. PRF<T> <prfop>, <Pg>, [<Zn>.D{, #<imm>}] -> 64-bit element
The instructions are associated the following intrinsics, respectively:
1. void @llvm.aarch64.sve.gather.prf<T>.scaled.<mod>.nx4vi32(
i8* %base,
<vscale x 4 x i32> %offset,
<vscale x 4 x i1> %Pg,
i32 %prfop)
2. void @llvm.aarch64.sve.gather.prf<T>.scaled.<mod>.nx2vi32(
i8* %base,
<vscale x 2 x i32> %offset,
<vscale x 2 x i1> %Pg,
i32 %prfop)
3. void @llvm.aarch64.sve.gather.prf<T>.scaled.nx2vi64(
i8* %base,
<vscale x 2 x i64> %offset,
<vscale x 2 x i1> %Pg,
i32 %prfop)
4. void @llvm.aarch64.sve.gather.prf<T>.nx4vi32(
<vscale x 4 x i32> %bases,
i64 %imm,
<vscale x 4 x i1> %Pg,
i32 %prfop)
5. void @llvm.aarch64.sve.gather.prf<T>.nx2vi64(
<vscale x 2 x i64> %bases,
i64 %imm,
<vscale x 2 x i1> %Pg,
i32 %prfop)
The intrinsics are the IR counterpart of the following SVE ACLE functions:
* void svprf<T>(svbool_t pg, const void *base, svprfop op)
* void svprf<T>_vnum(svbool_t pg, const void *base, int64_t vnum, svprfop op)
* void svprf<T>_gather[_u32base](svbool_t pg, svuint32_t bases, svprfop op)
* void svprf<T>_gather[_u64base](svbool_t pg, svuint64_t bases, svprfop op)
* void svprf<T>_gather_[s32]offset(svbool_t pg, const void *base, svint32_t offsets, svprfop op)
* void svprf<T>_gather_[u32]offset(svbool_t pg, const void *base, svint32_t offsets, svprfop op)
* void svprf<T>_gather_[s64]offset(svbool_t pg, const void *base, svint64_t offsets, svprfop op)
* void svprf<T>_gather_[u64]offset(svbool_t pg, const void *base, svint64_t offsets, svprfop op)
* void svprf<T>_gather[_u32base]_offset(svbool_t pg, svuint32_t bases, int64_t offset, svprfop op)
* void svprf<T>_gather[_u64base]_offset(svbool_t pg, svuint64_t bases,int64_t offset, svprfop op)
Reviewers: andwar, sdesmalen, efriedma, rengolin
Subscribers: tschuett, hiraditya, rkruppe, psnobl, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D75580
2020-02-27 06:19:42 +08:00
|
|
|
ret void
|
|
|
|
}
|
|
|
|
|
2020-03-19 17:35:31 +08:00
|
|
|
define void @llvm_aarch64_sve_prfh_gather_scaled_sxtw_nx2vi64(<vscale x 2 x i1> %Pg, i8* %base, <vscale x 2 x i32> %offset) nounwind {
|
|
|
|
; CHECK-LABEL: llvm_aarch64_sve_prfh_gather_scaled_sxtw_nx2vi64:
|
Implement IR intrinsics for gather prefetch.
Summary:
Intrinsics and relative codegen has been implemented for the following
SVE instructions:
1. PRF<T> <prfop>, <Pg>, [<Xn|SP>, <Zm>.S, <mod>] -> 32-bit scaled offset
2. PRF<T> <prfop>, <Pg>, [<Xn|SP>, <Zm>.D, <mod>] -> 32-bit unpacked scaled offset
3. PRF<T> <prfop>, <Pg>, [<Xn|SP>, <Zm>.D] -> 64-bit scaled offset
4. PRF<T> <prfop>, <Pg>, [<Zn>.S{, #<imm>}] -> 32-bit element
5. PRF<T> <prfop>, <Pg>, [<Zn>.D{, #<imm>}] -> 64-bit element
The instructions are associated the following intrinsics, respectively:
1. void @llvm.aarch64.sve.gather.prf<T>.scaled.<mod>.nx4vi32(
i8* %base,
<vscale x 4 x i32> %offset,
<vscale x 4 x i1> %Pg,
i32 %prfop)
2. void @llvm.aarch64.sve.gather.prf<T>.scaled.<mod>.nx2vi32(
i8* %base,
<vscale x 2 x i32> %offset,
<vscale x 2 x i1> %Pg,
i32 %prfop)
3. void @llvm.aarch64.sve.gather.prf<T>.scaled.nx2vi64(
i8* %base,
<vscale x 2 x i64> %offset,
<vscale x 2 x i1> %Pg,
i32 %prfop)
4. void @llvm.aarch64.sve.gather.prf<T>.nx4vi32(
<vscale x 4 x i32> %bases,
i64 %imm,
<vscale x 4 x i1> %Pg,
i32 %prfop)
5. void @llvm.aarch64.sve.gather.prf<T>.nx2vi64(
<vscale x 2 x i64> %bases,
i64 %imm,
<vscale x 2 x i1> %Pg,
i32 %prfop)
The intrinsics are the IR counterpart of the following SVE ACLE functions:
* void svprf<T>(svbool_t pg, const void *base, svprfop op)
* void svprf<T>_vnum(svbool_t pg, const void *base, int64_t vnum, svprfop op)
* void svprf<T>_gather[_u32base](svbool_t pg, svuint32_t bases, svprfop op)
* void svprf<T>_gather[_u64base](svbool_t pg, svuint64_t bases, svprfop op)
* void svprf<T>_gather_[s32]offset(svbool_t pg, const void *base, svint32_t offsets, svprfop op)
* void svprf<T>_gather_[u32]offset(svbool_t pg, const void *base, svint32_t offsets, svprfop op)
* void svprf<T>_gather_[s64]offset(svbool_t pg, const void *base, svint64_t offsets, svprfop op)
* void svprf<T>_gather_[u64]offset(svbool_t pg, const void *base, svint64_t offsets, svprfop op)
* void svprf<T>_gather[_u32base]_offset(svbool_t pg, svuint32_t bases, int64_t offset, svprfop op)
* void svprf<T>_gather[_u64base]_offset(svbool_t pg, svuint64_t bases,int64_t offset, svprfop op)
Reviewers: andwar, sdesmalen, efriedma, rengolin
Subscribers: tschuett, hiraditya, rkruppe, psnobl, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D75580
2020-02-27 06:19:42 +08:00
|
|
|
; CHECK-NEXT: prfh pldl1strm, p0, [x0, z0.d, sxtw #1]
|
|
|
|
; CHECK-NEXT: ret
|
2020-03-19 17:35:31 +08:00
|
|
|
call void @llvm.aarch64.sve.prfh.gather.scaled.sxtw.nx2vi64(<vscale x 2 x i1> %Pg, i8* %base, <vscale x 2 x i32> %offset, i32 1)
|
Implement IR intrinsics for gather prefetch.
Summary:
Intrinsics and relative codegen has been implemented for the following
SVE instructions:
1. PRF<T> <prfop>, <Pg>, [<Xn|SP>, <Zm>.S, <mod>] -> 32-bit scaled offset
2. PRF<T> <prfop>, <Pg>, [<Xn|SP>, <Zm>.D, <mod>] -> 32-bit unpacked scaled offset
3. PRF<T> <prfop>, <Pg>, [<Xn|SP>, <Zm>.D] -> 64-bit scaled offset
4. PRF<T> <prfop>, <Pg>, [<Zn>.S{, #<imm>}] -> 32-bit element
5. PRF<T> <prfop>, <Pg>, [<Zn>.D{, #<imm>}] -> 64-bit element
The instructions are associated the following intrinsics, respectively:
1. void @llvm.aarch64.sve.gather.prf<T>.scaled.<mod>.nx4vi32(
i8* %base,
<vscale x 4 x i32> %offset,
<vscale x 4 x i1> %Pg,
i32 %prfop)
2. void @llvm.aarch64.sve.gather.prf<T>.scaled.<mod>.nx2vi32(
i8* %base,
<vscale x 2 x i32> %offset,
<vscale x 2 x i1> %Pg,
i32 %prfop)
3. void @llvm.aarch64.sve.gather.prf<T>.scaled.nx2vi64(
i8* %base,
<vscale x 2 x i64> %offset,
<vscale x 2 x i1> %Pg,
i32 %prfop)
4. void @llvm.aarch64.sve.gather.prf<T>.nx4vi32(
<vscale x 4 x i32> %bases,
i64 %imm,
<vscale x 4 x i1> %Pg,
i32 %prfop)
5. void @llvm.aarch64.sve.gather.prf<T>.nx2vi64(
<vscale x 2 x i64> %bases,
i64 %imm,
<vscale x 2 x i1> %Pg,
i32 %prfop)
The intrinsics are the IR counterpart of the following SVE ACLE functions:
* void svprf<T>(svbool_t pg, const void *base, svprfop op)
* void svprf<T>_vnum(svbool_t pg, const void *base, int64_t vnum, svprfop op)
* void svprf<T>_gather[_u32base](svbool_t pg, svuint32_t bases, svprfop op)
* void svprf<T>_gather[_u64base](svbool_t pg, svuint64_t bases, svprfop op)
* void svprf<T>_gather_[s32]offset(svbool_t pg, const void *base, svint32_t offsets, svprfop op)
* void svprf<T>_gather_[u32]offset(svbool_t pg, const void *base, svint32_t offsets, svprfop op)
* void svprf<T>_gather_[s64]offset(svbool_t pg, const void *base, svint64_t offsets, svprfop op)
* void svprf<T>_gather_[u64]offset(svbool_t pg, const void *base, svint64_t offsets, svprfop op)
* void svprf<T>_gather[_u32base]_offset(svbool_t pg, svuint32_t bases, int64_t offset, svprfop op)
* void svprf<T>_gather[_u64base]_offset(svbool_t pg, svuint64_t bases,int64_t offset, svprfop op)
Reviewers: andwar, sdesmalen, efriedma, rengolin
Subscribers: tschuett, hiraditya, rkruppe, psnobl, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D75580
2020-02-27 06:19:42 +08:00
|
|
|
ret void
|
|
|
|
}
|
|
|
|
|
|
|
|
; PRFH <prfop>, <Pg>, [<Xn|SP>, <Zm>.D] -> 64-bit scaled offset
|
2020-03-19 17:35:31 +08:00
|
|
|
define void @llvm_aarch64_sve_prfh_gather_scaled_nx2vi64(<vscale x 2 x i1> %Pg, i8* %base, <vscale x 2 x i64> %offset) nounwind {
|
|
|
|
; CHECK-LABEL: llvm_aarch64_sve_prfh_gather_scaled_nx2vi64:
|
Implement IR intrinsics for gather prefetch.
Summary:
Intrinsics and relative codegen has been implemented for the following
SVE instructions:
1. PRF<T> <prfop>, <Pg>, [<Xn|SP>, <Zm>.S, <mod>] -> 32-bit scaled offset
2. PRF<T> <prfop>, <Pg>, [<Xn|SP>, <Zm>.D, <mod>] -> 32-bit unpacked scaled offset
3. PRF<T> <prfop>, <Pg>, [<Xn|SP>, <Zm>.D] -> 64-bit scaled offset
4. PRF<T> <prfop>, <Pg>, [<Zn>.S{, #<imm>}] -> 32-bit element
5. PRF<T> <prfop>, <Pg>, [<Zn>.D{, #<imm>}] -> 64-bit element
The instructions are associated the following intrinsics, respectively:
1. void @llvm.aarch64.sve.gather.prf<T>.scaled.<mod>.nx4vi32(
i8* %base,
<vscale x 4 x i32> %offset,
<vscale x 4 x i1> %Pg,
i32 %prfop)
2. void @llvm.aarch64.sve.gather.prf<T>.scaled.<mod>.nx2vi32(
i8* %base,
<vscale x 2 x i32> %offset,
<vscale x 2 x i1> %Pg,
i32 %prfop)
3. void @llvm.aarch64.sve.gather.prf<T>.scaled.nx2vi64(
i8* %base,
<vscale x 2 x i64> %offset,
<vscale x 2 x i1> %Pg,
i32 %prfop)
4. void @llvm.aarch64.sve.gather.prf<T>.nx4vi32(
<vscale x 4 x i32> %bases,
i64 %imm,
<vscale x 4 x i1> %Pg,
i32 %prfop)
5. void @llvm.aarch64.sve.gather.prf<T>.nx2vi64(
<vscale x 2 x i64> %bases,
i64 %imm,
<vscale x 2 x i1> %Pg,
i32 %prfop)
The intrinsics are the IR counterpart of the following SVE ACLE functions:
* void svprf<T>(svbool_t pg, const void *base, svprfop op)
* void svprf<T>_vnum(svbool_t pg, const void *base, int64_t vnum, svprfop op)
* void svprf<T>_gather[_u32base](svbool_t pg, svuint32_t bases, svprfop op)
* void svprf<T>_gather[_u64base](svbool_t pg, svuint64_t bases, svprfop op)
* void svprf<T>_gather_[s32]offset(svbool_t pg, const void *base, svint32_t offsets, svprfop op)
* void svprf<T>_gather_[u32]offset(svbool_t pg, const void *base, svint32_t offsets, svprfop op)
* void svprf<T>_gather_[s64]offset(svbool_t pg, const void *base, svint64_t offsets, svprfop op)
* void svprf<T>_gather_[u64]offset(svbool_t pg, const void *base, svint64_t offsets, svprfop op)
* void svprf<T>_gather[_u32base]_offset(svbool_t pg, svuint32_t bases, int64_t offset, svprfop op)
* void svprf<T>_gather[_u64base]_offset(svbool_t pg, svuint64_t bases,int64_t offset, svprfop op)
Reviewers: andwar, sdesmalen, efriedma, rengolin
Subscribers: tschuett, hiraditya, rkruppe, psnobl, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D75580
2020-02-27 06:19:42 +08:00
|
|
|
; CHECK-NEXT: prfh pldl1strm, p0, [x0, z0.d, lsl #1]
|
|
|
|
; CHECK-NEXT: ret
|
2020-03-19 17:35:31 +08:00
|
|
|
call void @llvm.aarch64.sve.prfh.gather.scaled.nx2vi64(<vscale x 2 x i1> %Pg, i8* %base, <vscale x 2 x i64> %offset, i32 1)
|
Implement IR intrinsics for gather prefetch.
Summary:
Intrinsics and relative codegen has been implemented for the following
SVE instructions:
1. PRF<T> <prfop>, <Pg>, [<Xn|SP>, <Zm>.S, <mod>] -> 32-bit scaled offset
2. PRF<T> <prfop>, <Pg>, [<Xn|SP>, <Zm>.D, <mod>] -> 32-bit unpacked scaled offset
3. PRF<T> <prfop>, <Pg>, [<Xn|SP>, <Zm>.D] -> 64-bit scaled offset
4. PRF<T> <prfop>, <Pg>, [<Zn>.S{, #<imm>}] -> 32-bit element
5. PRF<T> <prfop>, <Pg>, [<Zn>.D{, #<imm>}] -> 64-bit element
The instructions are associated the following intrinsics, respectively:
1. void @llvm.aarch64.sve.gather.prf<T>.scaled.<mod>.nx4vi32(
i8* %base,
<vscale x 4 x i32> %offset,
<vscale x 4 x i1> %Pg,
i32 %prfop)
2. void @llvm.aarch64.sve.gather.prf<T>.scaled.<mod>.nx2vi32(
i8* %base,
<vscale x 2 x i32> %offset,
<vscale x 2 x i1> %Pg,
i32 %prfop)
3. void @llvm.aarch64.sve.gather.prf<T>.scaled.nx2vi64(
i8* %base,
<vscale x 2 x i64> %offset,
<vscale x 2 x i1> %Pg,
i32 %prfop)
4. void @llvm.aarch64.sve.gather.prf<T>.nx4vi32(
<vscale x 4 x i32> %bases,
i64 %imm,
<vscale x 4 x i1> %Pg,
i32 %prfop)
5. void @llvm.aarch64.sve.gather.prf<T>.nx2vi64(
<vscale x 2 x i64> %bases,
i64 %imm,
<vscale x 2 x i1> %Pg,
i32 %prfop)
The intrinsics are the IR counterpart of the following SVE ACLE functions:
* void svprf<T>(svbool_t pg, const void *base, svprfop op)
* void svprf<T>_vnum(svbool_t pg, const void *base, int64_t vnum, svprfop op)
* void svprf<T>_gather[_u32base](svbool_t pg, svuint32_t bases, svprfop op)
* void svprf<T>_gather[_u64base](svbool_t pg, svuint64_t bases, svprfop op)
* void svprf<T>_gather_[s32]offset(svbool_t pg, const void *base, svint32_t offsets, svprfop op)
* void svprf<T>_gather_[u32]offset(svbool_t pg, const void *base, svint32_t offsets, svprfop op)
* void svprf<T>_gather_[s64]offset(svbool_t pg, const void *base, svint64_t offsets, svprfop op)
* void svprf<T>_gather_[u64]offset(svbool_t pg, const void *base, svint64_t offsets, svprfop op)
* void svprf<T>_gather[_u32base]_offset(svbool_t pg, svuint32_t bases, int64_t offset, svprfop op)
* void svprf<T>_gather[_u64base]_offset(svbool_t pg, svuint64_t bases,int64_t offset, svprfop op)
Reviewers: andwar, sdesmalen, efriedma, rengolin
Subscribers: tschuett, hiraditya, rkruppe, psnobl, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D75580
2020-02-27 06:19:42 +08:00
|
|
|
ret void
|
|
|
|
}
|
|
|
|
|
|
|
|
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
|
|
|
|
|
|
|
; PRFW <prfop>, <Pg>, [<Xn|SP>, <Zm>.S, <mod>] -> 32-bit scaled offset
|
2020-03-19 17:35:31 +08:00
|
|
|
define void @llvm_aarch64_sve_prfw_gather_scaled_uxtw_nx4vi32(<vscale x 4 x i1> %Pg, i8* %base, <vscale x 4 x i32> %offset) nounwind {
|
|
|
|
; CHECK-LABEL: llvm_aarch64_sve_prfw_gather_scaled_uxtw_nx4vi32:
|
Implement IR intrinsics for gather prefetch.
Summary:
Intrinsics and relative codegen has been implemented for the following
SVE instructions:
1. PRF<T> <prfop>, <Pg>, [<Xn|SP>, <Zm>.S, <mod>] -> 32-bit scaled offset
2. PRF<T> <prfop>, <Pg>, [<Xn|SP>, <Zm>.D, <mod>] -> 32-bit unpacked scaled offset
3. PRF<T> <prfop>, <Pg>, [<Xn|SP>, <Zm>.D] -> 64-bit scaled offset
4. PRF<T> <prfop>, <Pg>, [<Zn>.S{, #<imm>}] -> 32-bit element
5. PRF<T> <prfop>, <Pg>, [<Zn>.D{, #<imm>}] -> 64-bit element
The instructions are associated the following intrinsics, respectively:
1. void @llvm.aarch64.sve.gather.prf<T>.scaled.<mod>.nx4vi32(
i8* %base,
<vscale x 4 x i32> %offset,
<vscale x 4 x i1> %Pg,
i32 %prfop)
2. void @llvm.aarch64.sve.gather.prf<T>.scaled.<mod>.nx2vi32(
i8* %base,
<vscale x 2 x i32> %offset,
<vscale x 2 x i1> %Pg,
i32 %prfop)
3. void @llvm.aarch64.sve.gather.prf<T>.scaled.nx2vi64(
i8* %base,
<vscale x 2 x i64> %offset,
<vscale x 2 x i1> %Pg,
i32 %prfop)
4. void @llvm.aarch64.sve.gather.prf<T>.nx4vi32(
<vscale x 4 x i32> %bases,
i64 %imm,
<vscale x 4 x i1> %Pg,
i32 %prfop)
5. void @llvm.aarch64.sve.gather.prf<T>.nx2vi64(
<vscale x 2 x i64> %bases,
i64 %imm,
<vscale x 2 x i1> %Pg,
i32 %prfop)
The intrinsics are the IR counterpart of the following SVE ACLE functions:
* void svprf<T>(svbool_t pg, const void *base, svprfop op)
* void svprf<T>_vnum(svbool_t pg, const void *base, int64_t vnum, svprfop op)
* void svprf<T>_gather[_u32base](svbool_t pg, svuint32_t bases, svprfop op)
* void svprf<T>_gather[_u64base](svbool_t pg, svuint64_t bases, svprfop op)
* void svprf<T>_gather_[s32]offset(svbool_t pg, const void *base, svint32_t offsets, svprfop op)
* void svprf<T>_gather_[u32]offset(svbool_t pg, const void *base, svint32_t offsets, svprfop op)
* void svprf<T>_gather_[s64]offset(svbool_t pg, const void *base, svint64_t offsets, svprfop op)
* void svprf<T>_gather_[u64]offset(svbool_t pg, const void *base, svint64_t offsets, svprfop op)
* void svprf<T>_gather[_u32base]_offset(svbool_t pg, svuint32_t bases, int64_t offset, svprfop op)
* void svprf<T>_gather[_u64base]_offset(svbool_t pg, svuint64_t bases,int64_t offset, svprfop op)
Reviewers: andwar, sdesmalen, efriedma, rengolin
Subscribers: tschuett, hiraditya, rkruppe, psnobl, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D75580
2020-02-27 06:19:42 +08:00
|
|
|
; CHECK-NEXT: prfw pldl1strm, p0, [x0, z0.s, uxtw #2]
|
|
|
|
; CHECK-NEXT: ret
|
2020-03-19 17:35:31 +08:00
|
|
|
call void @llvm.aarch64.sve.prfw.gather.scaled.uxtw.nx4vi32(<vscale x 4 x i1> %Pg, i8* %base, <vscale x 4 x i32> %offset, i32 1)
|
Implement IR intrinsics for gather prefetch.
Summary:
Intrinsics and relative codegen has been implemented for the following
SVE instructions:
1. PRF<T> <prfop>, <Pg>, [<Xn|SP>, <Zm>.S, <mod>] -> 32-bit scaled offset
2. PRF<T> <prfop>, <Pg>, [<Xn|SP>, <Zm>.D, <mod>] -> 32-bit unpacked scaled offset
3. PRF<T> <prfop>, <Pg>, [<Xn|SP>, <Zm>.D] -> 64-bit scaled offset
4. PRF<T> <prfop>, <Pg>, [<Zn>.S{, #<imm>}] -> 32-bit element
5. PRF<T> <prfop>, <Pg>, [<Zn>.D{, #<imm>}] -> 64-bit element
The instructions are associated the following intrinsics, respectively:
1. void @llvm.aarch64.sve.gather.prf<T>.scaled.<mod>.nx4vi32(
i8* %base,
<vscale x 4 x i32> %offset,
<vscale x 4 x i1> %Pg,
i32 %prfop)
2. void @llvm.aarch64.sve.gather.prf<T>.scaled.<mod>.nx2vi32(
i8* %base,
<vscale x 2 x i32> %offset,
<vscale x 2 x i1> %Pg,
i32 %prfop)
3. void @llvm.aarch64.sve.gather.prf<T>.scaled.nx2vi64(
i8* %base,
<vscale x 2 x i64> %offset,
<vscale x 2 x i1> %Pg,
i32 %prfop)
4. void @llvm.aarch64.sve.gather.prf<T>.nx4vi32(
<vscale x 4 x i32> %bases,
i64 %imm,
<vscale x 4 x i1> %Pg,
i32 %prfop)
5. void @llvm.aarch64.sve.gather.prf<T>.nx2vi64(
<vscale x 2 x i64> %bases,
i64 %imm,
<vscale x 2 x i1> %Pg,
i32 %prfop)
The intrinsics are the IR counterpart of the following SVE ACLE functions:
* void svprf<T>(svbool_t pg, const void *base, svprfop op)
* void svprf<T>_vnum(svbool_t pg, const void *base, int64_t vnum, svprfop op)
* void svprf<T>_gather[_u32base](svbool_t pg, svuint32_t bases, svprfop op)
* void svprf<T>_gather[_u64base](svbool_t pg, svuint64_t bases, svprfop op)
* void svprf<T>_gather_[s32]offset(svbool_t pg, const void *base, svint32_t offsets, svprfop op)
* void svprf<T>_gather_[u32]offset(svbool_t pg, const void *base, svint32_t offsets, svprfop op)
* void svprf<T>_gather_[s64]offset(svbool_t pg, const void *base, svint64_t offsets, svprfop op)
* void svprf<T>_gather_[u64]offset(svbool_t pg, const void *base, svint64_t offsets, svprfop op)
* void svprf<T>_gather[_u32base]_offset(svbool_t pg, svuint32_t bases, int64_t offset, svprfop op)
* void svprf<T>_gather[_u64base]_offset(svbool_t pg, svuint64_t bases,int64_t offset, svprfop op)
Reviewers: andwar, sdesmalen, efriedma, rengolin
Subscribers: tschuett, hiraditya, rkruppe, psnobl, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D75580
2020-02-27 06:19:42 +08:00
|
|
|
ret void
|
|
|
|
}
|
|
|
|
|
2020-03-19 17:35:31 +08:00
|
|
|
define void @llvm_aarch64_sve_prfw_gather_scaled_sxtw_nx4vi32(<vscale x 4 x i1> %Pg, i8* %base, <vscale x 4 x i32> %offset) nounwind {
|
|
|
|
; CHECK-LABEL: llvm_aarch64_sve_prfw_gather_scaled_sxtw_nx4vi32:
|
Implement IR intrinsics for gather prefetch.
Summary:
Intrinsics and relative codegen has been implemented for the following
SVE instructions:
1. PRF<T> <prfop>, <Pg>, [<Xn|SP>, <Zm>.S, <mod>] -> 32-bit scaled offset
2. PRF<T> <prfop>, <Pg>, [<Xn|SP>, <Zm>.D, <mod>] -> 32-bit unpacked scaled offset
3. PRF<T> <prfop>, <Pg>, [<Xn|SP>, <Zm>.D] -> 64-bit scaled offset
4. PRF<T> <prfop>, <Pg>, [<Zn>.S{, #<imm>}] -> 32-bit element
5. PRF<T> <prfop>, <Pg>, [<Zn>.D{, #<imm>}] -> 64-bit element
The instructions are associated the following intrinsics, respectively:
1. void @llvm.aarch64.sve.gather.prf<T>.scaled.<mod>.nx4vi32(
i8* %base,
<vscale x 4 x i32> %offset,
<vscale x 4 x i1> %Pg,
i32 %prfop)
2. void @llvm.aarch64.sve.gather.prf<T>.scaled.<mod>.nx2vi32(
i8* %base,
<vscale x 2 x i32> %offset,
<vscale x 2 x i1> %Pg,
i32 %prfop)
3. void @llvm.aarch64.sve.gather.prf<T>.scaled.nx2vi64(
i8* %base,
<vscale x 2 x i64> %offset,
<vscale x 2 x i1> %Pg,
i32 %prfop)
4. void @llvm.aarch64.sve.gather.prf<T>.nx4vi32(
<vscale x 4 x i32> %bases,
i64 %imm,
<vscale x 4 x i1> %Pg,
i32 %prfop)
5. void @llvm.aarch64.sve.gather.prf<T>.nx2vi64(
<vscale x 2 x i64> %bases,
i64 %imm,
<vscale x 2 x i1> %Pg,
i32 %prfop)
The intrinsics are the IR counterpart of the following SVE ACLE functions:
* void svprf<T>(svbool_t pg, const void *base, svprfop op)
* void svprf<T>_vnum(svbool_t pg, const void *base, int64_t vnum, svprfop op)
* void svprf<T>_gather[_u32base](svbool_t pg, svuint32_t bases, svprfop op)
* void svprf<T>_gather[_u64base](svbool_t pg, svuint64_t bases, svprfop op)
* void svprf<T>_gather_[s32]offset(svbool_t pg, const void *base, svint32_t offsets, svprfop op)
* void svprf<T>_gather_[u32]offset(svbool_t pg, const void *base, svint32_t offsets, svprfop op)
* void svprf<T>_gather_[s64]offset(svbool_t pg, const void *base, svint64_t offsets, svprfop op)
* void svprf<T>_gather_[u64]offset(svbool_t pg, const void *base, svint64_t offsets, svprfop op)
* void svprf<T>_gather[_u32base]_offset(svbool_t pg, svuint32_t bases, int64_t offset, svprfop op)
* void svprf<T>_gather[_u64base]_offset(svbool_t pg, svuint64_t bases,int64_t offset, svprfop op)
Reviewers: andwar, sdesmalen, efriedma, rengolin
Subscribers: tschuett, hiraditya, rkruppe, psnobl, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D75580
2020-02-27 06:19:42 +08:00
|
|
|
; CHECK-NEXT: prfw pldl1strm, p0, [x0, z0.s, sxtw #2]
|
|
|
|
; CHECK-NEXT: ret
|
2020-03-19 17:35:31 +08:00
|
|
|
call void @llvm.aarch64.sve.prfw.gather.scaled.sxtw.nx4vi32(<vscale x 4 x i1> %Pg, i8* %base, <vscale x 4 x i32> %offset, i32 1)
|
Implement IR intrinsics for gather prefetch.
Summary:
Intrinsics and relative codegen has been implemented for the following
SVE instructions:
1. PRF<T> <prfop>, <Pg>, [<Xn|SP>, <Zm>.S, <mod>] -> 32-bit scaled offset
2. PRF<T> <prfop>, <Pg>, [<Xn|SP>, <Zm>.D, <mod>] -> 32-bit unpacked scaled offset
3. PRF<T> <prfop>, <Pg>, [<Xn|SP>, <Zm>.D] -> 64-bit scaled offset
4. PRF<T> <prfop>, <Pg>, [<Zn>.S{, #<imm>}] -> 32-bit element
5. PRF<T> <prfop>, <Pg>, [<Zn>.D{, #<imm>}] -> 64-bit element
The instructions are associated the following intrinsics, respectively:
1. void @llvm.aarch64.sve.gather.prf<T>.scaled.<mod>.nx4vi32(
i8* %base,
<vscale x 4 x i32> %offset,
<vscale x 4 x i1> %Pg,
i32 %prfop)
2. void @llvm.aarch64.sve.gather.prf<T>.scaled.<mod>.nx2vi32(
i8* %base,
<vscale x 2 x i32> %offset,
<vscale x 2 x i1> %Pg,
i32 %prfop)
3. void @llvm.aarch64.sve.gather.prf<T>.scaled.nx2vi64(
i8* %base,
<vscale x 2 x i64> %offset,
<vscale x 2 x i1> %Pg,
i32 %prfop)
4. void @llvm.aarch64.sve.gather.prf<T>.nx4vi32(
<vscale x 4 x i32> %bases,
i64 %imm,
<vscale x 4 x i1> %Pg,
i32 %prfop)
5. void @llvm.aarch64.sve.gather.prf<T>.nx2vi64(
<vscale x 2 x i64> %bases,
i64 %imm,
<vscale x 2 x i1> %Pg,
i32 %prfop)
The intrinsics are the IR counterpart of the following SVE ACLE functions:
* void svprf<T>(svbool_t pg, const void *base, svprfop op)
* void svprf<T>_vnum(svbool_t pg, const void *base, int64_t vnum, svprfop op)
* void svprf<T>_gather[_u32base](svbool_t pg, svuint32_t bases, svprfop op)
* void svprf<T>_gather[_u64base](svbool_t pg, svuint64_t bases, svprfop op)
* void svprf<T>_gather_[s32]offset(svbool_t pg, const void *base, svint32_t offsets, svprfop op)
* void svprf<T>_gather_[u32]offset(svbool_t pg, const void *base, svint32_t offsets, svprfop op)
* void svprf<T>_gather_[s64]offset(svbool_t pg, const void *base, svint64_t offsets, svprfop op)
* void svprf<T>_gather_[u64]offset(svbool_t pg, const void *base, svint64_t offsets, svprfop op)
* void svprf<T>_gather[_u32base]_offset(svbool_t pg, svuint32_t bases, int64_t offset, svprfop op)
* void svprf<T>_gather[_u64base]_offset(svbool_t pg, svuint64_t bases,int64_t offset, svprfop op)
Reviewers: andwar, sdesmalen, efriedma, rengolin
Subscribers: tschuett, hiraditya, rkruppe, psnobl, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D75580
2020-02-27 06:19:42 +08:00
|
|
|
ret void
|
|
|
|
}
|
|
|
|
|
|
|
|
; PRFW <prfop>, <Pg>, [<Xn|SP>, <Zm>.D, <mod> #2] -> 32-bit unpacked scaled offset
|
2020-03-19 17:35:31 +08:00
|
|
|
define void @llvm_aarch64_sve_prfw_gather_scaled_uxtw_nx2vi64(<vscale x 2 x i1> %Pg, i8* %base, <vscale x 2 x i32> %offset) nounwind {
|
|
|
|
; CHECK-LABEL: llvm_aarch64_sve_prfw_gather_scaled_uxtw_nx2vi64:
|
Implement IR intrinsics for gather prefetch.
Summary:
Intrinsics and relative codegen has been implemented for the following
SVE instructions:
1. PRF<T> <prfop>, <Pg>, [<Xn|SP>, <Zm>.S, <mod>] -> 32-bit scaled offset
2. PRF<T> <prfop>, <Pg>, [<Xn|SP>, <Zm>.D, <mod>] -> 32-bit unpacked scaled offset
3. PRF<T> <prfop>, <Pg>, [<Xn|SP>, <Zm>.D] -> 64-bit scaled offset
4. PRF<T> <prfop>, <Pg>, [<Zn>.S{, #<imm>}] -> 32-bit element
5. PRF<T> <prfop>, <Pg>, [<Zn>.D{, #<imm>}] -> 64-bit element
The instructions are associated the following intrinsics, respectively:
1. void @llvm.aarch64.sve.gather.prf<T>.scaled.<mod>.nx4vi32(
i8* %base,
<vscale x 4 x i32> %offset,
<vscale x 4 x i1> %Pg,
i32 %prfop)
2. void @llvm.aarch64.sve.gather.prf<T>.scaled.<mod>.nx2vi32(
i8* %base,
<vscale x 2 x i32> %offset,
<vscale x 2 x i1> %Pg,
i32 %prfop)
3. void @llvm.aarch64.sve.gather.prf<T>.scaled.nx2vi64(
i8* %base,
<vscale x 2 x i64> %offset,
<vscale x 2 x i1> %Pg,
i32 %prfop)
4. void @llvm.aarch64.sve.gather.prf<T>.nx4vi32(
<vscale x 4 x i32> %bases,
i64 %imm,
<vscale x 4 x i1> %Pg,
i32 %prfop)
5. void @llvm.aarch64.sve.gather.prf<T>.nx2vi64(
<vscale x 2 x i64> %bases,
i64 %imm,
<vscale x 2 x i1> %Pg,
i32 %prfop)
The intrinsics are the IR counterpart of the following SVE ACLE functions:
* void svprf<T>(svbool_t pg, const void *base, svprfop op)
* void svprf<T>_vnum(svbool_t pg, const void *base, int64_t vnum, svprfop op)
* void svprf<T>_gather[_u32base](svbool_t pg, svuint32_t bases, svprfop op)
* void svprf<T>_gather[_u64base](svbool_t pg, svuint64_t bases, svprfop op)
* void svprf<T>_gather_[s32]offset(svbool_t pg, const void *base, svint32_t offsets, svprfop op)
* void svprf<T>_gather_[u32]offset(svbool_t pg, const void *base, svint32_t offsets, svprfop op)
* void svprf<T>_gather_[s64]offset(svbool_t pg, const void *base, svint64_t offsets, svprfop op)
* void svprf<T>_gather_[u64]offset(svbool_t pg, const void *base, svint64_t offsets, svprfop op)
* void svprf<T>_gather[_u32base]_offset(svbool_t pg, svuint32_t bases, int64_t offset, svprfop op)
* void svprf<T>_gather[_u64base]_offset(svbool_t pg, svuint64_t bases,int64_t offset, svprfop op)
Reviewers: andwar, sdesmalen, efriedma, rengolin
Subscribers: tschuett, hiraditya, rkruppe, psnobl, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D75580
2020-02-27 06:19:42 +08:00
|
|
|
; CHECK-NEXT: prfw pldl1strm, p0, [x0, z0.d, uxtw #2]
|
|
|
|
; CHECK-NEXT: ret
|
2020-03-19 17:35:31 +08:00
|
|
|
call void @llvm.aarch64.sve.prfw.gather.scaled.uxtw.nx2vi64(<vscale x 2 x i1> %Pg, i8* %base, <vscale x 2 x i32> %offset, i32 1)
|
Implement IR intrinsics for gather prefetch.
Summary:
Intrinsics and relative codegen has been implemented for the following
SVE instructions:
1. PRF<T> <prfop>, <Pg>, [<Xn|SP>, <Zm>.S, <mod>] -> 32-bit scaled offset
2. PRF<T> <prfop>, <Pg>, [<Xn|SP>, <Zm>.D, <mod>] -> 32-bit unpacked scaled offset
3. PRF<T> <prfop>, <Pg>, [<Xn|SP>, <Zm>.D] -> 64-bit scaled offset
4. PRF<T> <prfop>, <Pg>, [<Zn>.S{, #<imm>}] -> 32-bit element
5. PRF<T> <prfop>, <Pg>, [<Zn>.D{, #<imm>}] -> 64-bit element
The instructions are associated the following intrinsics, respectively:
1. void @llvm.aarch64.sve.gather.prf<T>.scaled.<mod>.nx4vi32(
i8* %base,
<vscale x 4 x i32> %offset,
<vscale x 4 x i1> %Pg,
i32 %prfop)
2. void @llvm.aarch64.sve.gather.prf<T>.scaled.<mod>.nx2vi32(
i8* %base,
<vscale x 2 x i32> %offset,
<vscale x 2 x i1> %Pg,
i32 %prfop)
3. void @llvm.aarch64.sve.gather.prf<T>.scaled.nx2vi64(
i8* %base,
<vscale x 2 x i64> %offset,
<vscale x 2 x i1> %Pg,
i32 %prfop)
4. void @llvm.aarch64.sve.gather.prf<T>.nx4vi32(
<vscale x 4 x i32> %bases,
i64 %imm,
<vscale x 4 x i1> %Pg,
i32 %prfop)
5. void @llvm.aarch64.sve.gather.prf<T>.nx2vi64(
<vscale x 2 x i64> %bases,
i64 %imm,
<vscale x 2 x i1> %Pg,
i32 %prfop)
The intrinsics are the IR counterpart of the following SVE ACLE functions:
* void svprf<T>(svbool_t pg, const void *base, svprfop op)
* void svprf<T>_vnum(svbool_t pg, const void *base, int64_t vnum, svprfop op)
* void svprf<T>_gather[_u32base](svbool_t pg, svuint32_t bases, svprfop op)
* void svprf<T>_gather[_u64base](svbool_t pg, svuint64_t bases, svprfop op)
* void svprf<T>_gather_[s32]offset(svbool_t pg, const void *base, svint32_t offsets, svprfop op)
* void svprf<T>_gather_[u32]offset(svbool_t pg, const void *base, svint32_t offsets, svprfop op)
* void svprf<T>_gather_[s64]offset(svbool_t pg, const void *base, svint64_t offsets, svprfop op)
* void svprf<T>_gather_[u64]offset(svbool_t pg, const void *base, svint64_t offsets, svprfop op)
* void svprf<T>_gather[_u32base]_offset(svbool_t pg, svuint32_t bases, int64_t offset, svprfop op)
* void svprf<T>_gather[_u64base]_offset(svbool_t pg, svuint64_t bases,int64_t offset, svprfop op)
Reviewers: andwar, sdesmalen, efriedma, rengolin
Subscribers: tschuett, hiraditya, rkruppe, psnobl, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D75580
2020-02-27 06:19:42 +08:00
|
|
|
ret void
|
|
|
|
}
|
|
|
|
|
2020-03-19 17:35:31 +08:00
|
|
|
define void @llvm_aarch64_sve_prfw_gather_scaled_sxtw_nx2vi64(<vscale x 2 x i1> %Pg, i8* %base, <vscale x 2 x i32> %offset) nounwind {
|
|
|
|
; CHECK-LABEL: llvm_aarch64_sve_prfw_gather_scaled_sxtw_nx2vi64:
|
Implement IR intrinsics for gather prefetch.
Summary:
Intrinsics and relative codegen has been implemented for the following
SVE instructions:
1. PRF<T> <prfop>, <Pg>, [<Xn|SP>, <Zm>.S, <mod>] -> 32-bit scaled offset
2. PRF<T> <prfop>, <Pg>, [<Xn|SP>, <Zm>.D, <mod>] -> 32-bit unpacked scaled offset
3. PRF<T> <prfop>, <Pg>, [<Xn|SP>, <Zm>.D] -> 64-bit scaled offset
4. PRF<T> <prfop>, <Pg>, [<Zn>.S{, #<imm>}] -> 32-bit element
5. PRF<T> <prfop>, <Pg>, [<Zn>.D{, #<imm>}] -> 64-bit element
The instructions are associated the following intrinsics, respectively:
1. void @llvm.aarch64.sve.gather.prf<T>.scaled.<mod>.nx4vi32(
i8* %base,
<vscale x 4 x i32> %offset,
<vscale x 4 x i1> %Pg,
i32 %prfop)
2. void @llvm.aarch64.sve.gather.prf<T>.scaled.<mod>.nx2vi32(
i8* %base,
<vscale x 2 x i32> %offset,
<vscale x 2 x i1> %Pg,
i32 %prfop)
3. void @llvm.aarch64.sve.gather.prf<T>.scaled.nx2vi64(
i8* %base,
<vscale x 2 x i64> %offset,
<vscale x 2 x i1> %Pg,
i32 %prfop)
4. void @llvm.aarch64.sve.gather.prf<T>.nx4vi32(
<vscale x 4 x i32> %bases,
i64 %imm,
<vscale x 4 x i1> %Pg,
i32 %prfop)
5. void @llvm.aarch64.sve.gather.prf<T>.nx2vi64(
<vscale x 2 x i64> %bases,
i64 %imm,
<vscale x 2 x i1> %Pg,
i32 %prfop)
The intrinsics are the IR counterpart of the following SVE ACLE functions:
* void svprf<T>(svbool_t pg, const void *base, svprfop op)
* void svprf<T>_vnum(svbool_t pg, const void *base, int64_t vnum, svprfop op)
* void svprf<T>_gather[_u32base](svbool_t pg, svuint32_t bases, svprfop op)
* void svprf<T>_gather[_u64base](svbool_t pg, svuint64_t bases, svprfop op)
* void svprf<T>_gather_[s32]offset(svbool_t pg, const void *base, svint32_t offsets, svprfop op)
* void svprf<T>_gather_[u32]offset(svbool_t pg, const void *base, svint32_t offsets, svprfop op)
* void svprf<T>_gather_[s64]offset(svbool_t pg, const void *base, svint64_t offsets, svprfop op)
* void svprf<T>_gather_[u64]offset(svbool_t pg, const void *base, svint64_t offsets, svprfop op)
* void svprf<T>_gather[_u32base]_offset(svbool_t pg, svuint32_t bases, int64_t offset, svprfop op)
* void svprf<T>_gather[_u64base]_offset(svbool_t pg, svuint64_t bases,int64_t offset, svprfop op)
Reviewers: andwar, sdesmalen, efriedma, rengolin
Subscribers: tschuett, hiraditya, rkruppe, psnobl, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D75580
2020-02-27 06:19:42 +08:00
|
|
|
; CHECK-NEXT: prfw pldl1strm, p0, [x0, z0.d, sxtw #2]
|
|
|
|
; CHECK-NEXT: ret
|
2020-03-19 17:35:31 +08:00
|
|
|
call void @llvm.aarch64.sve.prfw.gather.scaled.sxtw.nx2vi64(<vscale x 2 x i1> %Pg, i8* %base, <vscale x 2 x i32> %offset, i32 1)
|
Implement IR intrinsics for gather prefetch.
Summary:
Intrinsics and relative codegen has been implemented for the following
SVE instructions:
1. PRF<T> <prfop>, <Pg>, [<Xn|SP>, <Zm>.S, <mod>] -> 32-bit scaled offset
2. PRF<T> <prfop>, <Pg>, [<Xn|SP>, <Zm>.D, <mod>] -> 32-bit unpacked scaled offset
3. PRF<T> <prfop>, <Pg>, [<Xn|SP>, <Zm>.D] -> 64-bit scaled offset
4. PRF<T> <prfop>, <Pg>, [<Zn>.S{, #<imm>}] -> 32-bit element
5. PRF<T> <prfop>, <Pg>, [<Zn>.D{, #<imm>}] -> 64-bit element
The instructions are associated the following intrinsics, respectively:
1. void @llvm.aarch64.sve.gather.prf<T>.scaled.<mod>.nx4vi32(
i8* %base,
<vscale x 4 x i32> %offset,
<vscale x 4 x i1> %Pg,
i32 %prfop)
2. void @llvm.aarch64.sve.gather.prf<T>.scaled.<mod>.nx2vi32(
i8* %base,
<vscale x 2 x i32> %offset,
<vscale x 2 x i1> %Pg,
i32 %prfop)
3. void @llvm.aarch64.sve.gather.prf<T>.scaled.nx2vi64(
i8* %base,
<vscale x 2 x i64> %offset,
<vscale x 2 x i1> %Pg,
i32 %prfop)
4. void @llvm.aarch64.sve.gather.prf<T>.nx4vi32(
<vscale x 4 x i32> %bases,
i64 %imm,
<vscale x 4 x i1> %Pg,
i32 %prfop)
5. void @llvm.aarch64.sve.gather.prf<T>.nx2vi64(
<vscale x 2 x i64> %bases,
i64 %imm,
<vscale x 2 x i1> %Pg,
i32 %prfop)
The intrinsics are the IR counterpart of the following SVE ACLE functions:
* void svprf<T>(svbool_t pg, const void *base, svprfop op)
* void svprf<T>_vnum(svbool_t pg, const void *base, int64_t vnum, svprfop op)
* void svprf<T>_gather[_u32base](svbool_t pg, svuint32_t bases, svprfop op)
* void svprf<T>_gather[_u64base](svbool_t pg, svuint64_t bases, svprfop op)
* void svprf<T>_gather_[s32]offset(svbool_t pg, const void *base, svint32_t offsets, svprfop op)
* void svprf<T>_gather_[u32]offset(svbool_t pg, const void *base, svint32_t offsets, svprfop op)
* void svprf<T>_gather_[s64]offset(svbool_t pg, const void *base, svint64_t offsets, svprfop op)
* void svprf<T>_gather_[u64]offset(svbool_t pg, const void *base, svint64_t offsets, svprfop op)
* void svprf<T>_gather[_u32base]_offset(svbool_t pg, svuint32_t bases, int64_t offset, svprfop op)
* void svprf<T>_gather[_u64base]_offset(svbool_t pg, svuint64_t bases,int64_t offset, svprfop op)
Reviewers: andwar, sdesmalen, efriedma, rengolin
Subscribers: tschuett, hiraditya, rkruppe, psnobl, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D75580
2020-02-27 06:19:42 +08:00
|
|
|
ret void
|
|
|
|
}
|
|
|
|
|
|
|
|
; PRFW <prfop>, <Pg>, [<Xn|SP>, <Zm>.D] -> 64-bit scaled offset
|
2020-03-19 17:35:31 +08:00
|
|
|
define void @llvm_aarch64_sve_prfw_gather_scaled_nx2vi64(<vscale x 2 x i1> %Pg, i8* %base, <vscale x 2 x i64> %offset) nounwind {
|
|
|
|
; CHECK-LABEL: llvm_aarch64_sve_prfw_gather_scaled_nx2vi64:
|
Implement IR intrinsics for gather prefetch.
Summary:
Intrinsics and relative codegen has been implemented for the following
SVE instructions:
1. PRF<T> <prfop>, <Pg>, [<Xn|SP>, <Zm>.S, <mod>] -> 32-bit scaled offset
2. PRF<T> <prfop>, <Pg>, [<Xn|SP>, <Zm>.D, <mod>] -> 32-bit unpacked scaled offset
3. PRF<T> <prfop>, <Pg>, [<Xn|SP>, <Zm>.D] -> 64-bit scaled offset
4. PRF<T> <prfop>, <Pg>, [<Zn>.S{, #<imm>}] -> 32-bit element
5. PRF<T> <prfop>, <Pg>, [<Zn>.D{, #<imm>}] -> 64-bit element
The instructions are associated the following intrinsics, respectively:
1. void @llvm.aarch64.sve.gather.prf<T>.scaled.<mod>.nx4vi32(
i8* %base,
<vscale x 4 x i32> %offset,
<vscale x 4 x i1> %Pg,
i32 %prfop)
2. void @llvm.aarch64.sve.gather.prf<T>.scaled.<mod>.nx2vi32(
i8* %base,
<vscale x 2 x i32> %offset,
<vscale x 2 x i1> %Pg,
i32 %prfop)
3. void @llvm.aarch64.sve.gather.prf<T>.scaled.nx2vi64(
i8* %base,
<vscale x 2 x i64> %offset,
<vscale x 2 x i1> %Pg,
i32 %prfop)
4. void @llvm.aarch64.sve.gather.prf<T>.nx4vi32(
<vscale x 4 x i32> %bases,
i64 %imm,
<vscale x 4 x i1> %Pg,
i32 %prfop)
5. void @llvm.aarch64.sve.gather.prf<T>.nx2vi64(
<vscale x 2 x i64> %bases,
i64 %imm,
<vscale x 2 x i1> %Pg,
i32 %prfop)
The intrinsics are the IR counterpart of the following SVE ACLE functions:
* void svprf<T>(svbool_t pg, const void *base, svprfop op)
* void svprf<T>_vnum(svbool_t pg, const void *base, int64_t vnum, svprfop op)
* void svprf<T>_gather[_u32base](svbool_t pg, svuint32_t bases, svprfop op)
* void svprf<T>_gather[_u64base](svbool_t pg, svuint64_t bases, svprfop op)
* void svprf<T>_gather_[s32]offset(svbool_t pg, const void *base, svint32_t offsets, svprfop op)
* void svprf<T>_gather_[u32]offset(svbool_t pg, const void *base, svint32_t offsets, svprfop op)
* void svprf<T>_gather_[s64]offset(svbool_t pg, const void *base, svint64_t offsets, svprfop op)
* void svprf<T>_gather_[u64]offset(svbool_t pg, const void *base, svint64_t offsets, svprfop op)
* void svprf<T>_gather[_u32base]_offset(svbool_t pg, svuint32_t bases, int64_t offset, svprfop op)
* void svprf<T>_gather[_u64base]_offset(svbool_t pg, svuint64_t bases,int64_t offset, svprfop op)
Reviewers: andwar, sdesmalen, efriedma, rengolin
Subscribers: tschuett, hiraditya, rkruppe, psnobl, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D75580
2020-02-27 06:19:42 +08:00
|
|
|
; CHECK-NEXT: prfw pldl1strm, p0, [x0, z0.d, lsl #2]
|
|
|
|
; CHECK-NEXT: ret
|
2020-03-19 17:35:31 +08:00
|
|
|
call void @llvm.aarch64.sve.prfw.gather.scaled.nx2vi64(<vscale x 2 x i1> %Pg, i8* %base, <vscale x 2 x i64> %offset, i32 1)
|
Implement IR intrinsics for gather prefetch.
Summary:
Intrinsics and relative codegen has been implemented for the following
SVE instructions:
1. PRF<T> <prfop>, <Pg>, [<Xn|SP>, <Zm>.S, <mod>] -> 32-bit scaled offset
2. PRF<T> <prfop>, <Pg>, [<Xn|SP>, <Zm>.D, <mod>] -> 32-bit unpacked scaled offset
3. PRF<T> <prfop>, <Pg>, [<Xn|SP>, <Zm>.D] -> 64-bit scaled offset
4. PRF<T> <prfop>, <Pg>, [<Zn>.S{, #<imm>}] -> 32-bit element
5. PRF<T> <prfop>, <Pg>, [<Zn>.D{, #<imm>}] -> 64-bit element
The instructions are associated the following intrinsics, respectively:
1. void @llvm.aarch64.sve.gather.prf<T>.scaled.<mod>.nx4vi32(
i8* %base,
<vscale x 4 x i32> %offset,
<vscale x 4 x i1> %Pg,
i32 %prfop)
2. void @llvm.aarch64.sve.gather.prf<T>.scaled.<mod>.nx2vi32(
i8* %base,
<vscale x 2 x i32> %offset,
<vscale x 2 x i1> %Pg,
i32 %prfop)
3. void @llvm.aarch64.sve.gather.prf<T>.scaled.nx2vi64(
i8* %base,
<vscale x 2 x i64> %offset,
<vscale x 2 x i1> %Pg,
i32 %prfop)
4. void @llvm.aarch64.sve.gather.prf<T>.nx4vi32(
<vscale x 4 x i32> %bases,
i64 %imm,
<vscale x 4 x i1> %Pg,
i32 %prfop)
5. void @llvm.aarch64.sve.gather.prf<T>.nx2vi64(
<vscale x 2 x i64> %bases,
i64 %imm,
<vscale x 2 x i1> %Pg,
i32 %prfop)
The intrinsics are the IR counterpart of the following SVE ACLE functions:
* void svprf<T>(svbool_t pg, const void *base, svprfop op)
* void svprf<T>_vnum(svbool_t pg, const void *base, int64_t vnum, svprfop op)
* void svprf<T>_gather[_u32base](svbool_t pg, svuint32_t bases, svprfop op)
* void svprf<T>_gather[_u64base](svbool_t pg, svuint64_t bases, svprfop op)
* void svprf<T>_gather_[s32]offset(svbool_t pg, const void *base, svint32_t offsets, svprfop op)
* void svprf<T>_gather_[u32]offset(svbool_t pg, const void *base, svint32_t offsets, svprfop op)
* void svprf<T>_gather_[s64]offset(svbool_t pg, const void *base, svint64_t offsets, svprfop op)
* void svprf<T>_gather_[u64]offset(svbool_t pg, const void *base, svint64_t offsets, svprfop op)
* void svprf<T>_gather[_u32base]_offset(svbool_t pg, svuint32_t bases, int64_t offset, svprfop op)
* void svprf<T>_gather[_u64base]_offset(svbool_t pg, svuint64_t bases,int64_t offset, svprfop op)
Reviewers: andwar, sdesmalen, efriedma, rengolin
Subscribers: tschuett, hiraditya, rkruppe, psnobl, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D75580
2020-02-27 06:19:42 +08:00
|
|
|
ret void
|
|
|
|
}
|
|
|
|
|
|
|
|
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
|
|
|
|
|
|
|
|
; PRFD <prfop>, <Pg>, [<Xn|SP>, <Zm>.S, <mod>] -> 32-bit scaled offset
|
2020-03-19 17:35:31 +08:00
|
|
|
define void @llvm_aarch64_sve_prfd_gather_scaled_uxtw_nx4vi32(<vscale x 4 x i1> %Pg, i8* %base, <vscale x 4 x i32> %offset) nounwind {
|
|
|
|
; CHECK-LABEL: llvm_aarch64_sve_prfd_gather_scaled_uxtw_nx4vi32:
|
Implement IR intrinsics for gather prefetch.
Summary:
Intrinsics and relative codegen has been implemented for the following
SVE instructions:
1. PRF<T> <prfop>, <Pg>, [<Xn|SP>, <Zm>.S, <mod>] -> 32-bit scaled offset
2. PRF<T> <prfop>, <Pg>, [<Xn|SP>, <Zm>.D, <mod>] -> 32-bit unpacked scaled offset
3. PRF<T> <prfop>, <Pg>, [<Xn|SP>, <Zm>.D] -> 64-bit scaled offset
4. PRF<T> <prfop>, <Pg>, [<Zn>.S{, #<imm>}] -> 32-bit element
5. PRF<T> <prfop>, <Pg>, [<Zn>.D{, #<imm>}] -> 64-bit element
The instructions are associated the following intrinsics, respectively:
1. void @llvm.aarch64.sve.gather.prf<T>.scaled.<mod>.nx4vi32(
i8* %base,
<vscale x 4 x i32> %offset,
<vscale x 4 x i1> %Pg,
i32 %prfop)
2. void @llvm.aarch64.sve.gather.prf<T>.scaled.<mod>.nx2vi32(
i8* %base,
<vscale x 2 x i32> %offset,
<vscale x 2 x i1> %Pg,
i32 %prfop)
3. void @llvm.aarch64.sve.gather.prf<T>.scaled.nx2vi64(
i8* %base,
<vscale x 2 x i64> %offset,
<vscale x 2 x i1> %Pg,
i32 %prfop)
4. void @llvm.aarch64.sve.gather.prf<T>.nx4vi32(
<vscale x 4 x i32> %bases,
i64 %imm,
<vscale x 4 x i1> %Pg,
i32 %prfop)
5. void @llvm.aarch64.sve.gather.prf<T>.nx2vi64(
<vscale x 2 x i64> %bases,
i64 %imm,
<vscale x 2 x i1> %Pg,
i32 %prfop)
The intrinsics are the IR counterpart of the following SVE ACLE functions:
* void svprf<T>(svbool_t pg, const void *base, svprfop op)
* void svprf<T>_vnum(svbool_t pg, const void *base, int64_t vnum, svprfop op)
* void svprf<T>_gather[_u32base](svbool_t pg, svuint32_t bases, svprfop op)
* void svprf<T>_gather[_u64base](svbool_t pg, svuint64_t bases, svprfop op)
* void svprf<T>_gather_[s32]offset(svbool_t pg, const void *base, svint32_t offsets, svprfop op)
* void svprf<T>_gather_[u32]offset(svbool_t pg, const void *base, svint32_t offsets, svprfop op)
* void svprf<T>_gather_[s64]offset(svbool_t pg, const void *base, svint64_t offsets, svprfop op)
* void svprf<T>_gather_[u64]offset(svbool_t pg, const void *base, svint64_t offsets, svprfop op)
* void svprf<T>_gather[_u32base]_offset(svbool_t pg, svuint32_t bases, int64_t offset, svprfop op)
* void svprf<T>_gather[_u64base]_offset(svbool_t pg, svuint64_t bases,int64_t offset, svprfop op)
Reviewers: andwar, sdesmalen, efriedma, rengolin
Subscribers: tschuett, hiraditya, rkruppe, psnobl, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D75580
2020-02-27 06:19:42 +08:00
|
|
|
; CHECK-NEXT: prfd pldl1strm, p0, [x0, z0.s, uxtw #3]
|
|
|
|
; CHECK-NEXT: ret
|
2020-03-19 17:35:31 +08:00
|
|
|
call void @llvm.aarch64.sve.prfd.gather.scaled.uxtw.nx4vi32(<vscale x 4 x i1> %Pg, i8* %base, <vscale x 4 x i32> %offset, i32 1)
|
Implement IR intrinsics for gather prefetch.
Summary:
Intrinsics and relative codegen has been implemented for the following
SVE instructions:
1. PRF<T> <prfop>, <Pg>, [<Xn|SP>, <Zm>.S, <mod>] -> 32-bit scaled offset
2. PRF<T> <prfop>, <Pg>, [<Xn|SP>, <Zm>.D, <mod>] -> 32-bit unpacked scaled offset
3. PRF<T> <prfop>, <Pg>, [<Xn|SP>, <Zm>.D] -> 64-bit scaled offset
4. PRF<T> <prfop>, <Pg>, [<Zn>.S{, #<imm>}] -> 32-bit element
5. PRF<T> <prfop>, <Pg>, [<Zn>.D{, #<imm>}] -> 64-bit element
The instructions are associated the following intrinsics, respectively:
1. void @llvm.aarch64.sve.gather.prf<T>.scaled.<mod>.nx4vi32(
i8* %base,
<vscale x 4 x i32> %offset,
<vscale x 4 x i1> %Pg,
i32 %prfop)
2. void @llvm.aarch64.sve.gather.prf<T>.scaled.<mod>.nx2vi32(
i8* %base,
<vscale x 2 x i32> %offset,
<vscale x 2 x i1> %Pg,
i32 %prfop)
3. void @llvm.aarch64.sve.gather.prf<T>.scaled.nx2vi64(
i8* %base,
<vscale x 2 x i64> %offset,
<vscale x 2 x i1> %Pg,
i32 %prfop)
4. void @llvm.aarch64.sve.gather.prf<T>.nx4vi32(
<vscale x 4 x i32> %bases,
i64 %imm,
<vscale x 4 x i1> %Pg,
i32 %prfop)
5. void @llvm.aarch64.sve.gather.prf<T>.nx2vi64(
<vscale x 2 x i64> %bases,
i64 %imm,
<vscale x 2 x i1> %Pg,
i32 %prfop)
The intrinsics are the IR counterpart of the following SVE ACLE functions:
* void svprf<T>(svbool_t pg, const void *base, svprfop op)
* void svprf<T>_vnum(svbool_t pg, const void *base, int64_t vnum, svprfop op)
* void svprf<T>_gather[_u32base](svbool_t pg, svuint32_t bases, svprfop op)
* void svprf<T>_gather[_u64base](svbool_t pg, svuint64_t bases, svprfop op)
* void svprf<T>_gather_[s32]offset(svbool_t pg, const void *base, svint32_t offsets, svprfop op)
* void svprf<T>_gather_[u32]offset(svbool_t pg, const void *base, svint32_t offsets, svprfop op)
* void svprf<T>_gather_[s64]offset(svbool_t pg, const void *base, svint64_t offsets, svprfop op)
* void svprf<T>_gather_[u64]offset(svbool_t pg, const void *base, svint64_t offsets, svprfop op)
* void svprf<T>_gather[_u32base]_offset(svbool_t pg, svuint32_t bases, int64_t offset, svprfop op)
* void svprf<T>_gather[_u64base]_offset(svbool_t pg, svuint64_t bases,int64_t offset, svprfop op)
Reviewers: andwar, sdesmalen, efriedma, rengolin
Subscribers: tschuett, hiraditya, rkruppe, psnobl, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D75580
2020-02-27 06:19:42 +08:00
|
|
|
ret void
|
|
|
|
}
|
|
|
|
|
2020-03-19 17:35:31 +08:00
|
|
|
define void @llvm_aarch64_sve_prfd_gather_scaled_sxtw_nx4vi32(<vscale x 4 x i1> %Pg, i8* %base, <vscale x 4 x i32> %offset) nounwind {
|
|
|
|
; CHECK-LABEL: llvm_aarch64_sve_prfd_gather_scaled_sxtw_nx4vi32:
|
Implement IR intrinsics for gather prefetch.
Summary:
Intrinsics and relative codegen has been implemented for the following
SVE instructions:
1. PRF<T> <prfop>, <Pg>, [<Xn|SP>, <Zm>.S, <mod>] -> 32-bit scaled offset
2. PRF<T> <prfop>, <Pg>, [<Xn|SP>, <Zm>.D, <mod>] -> 32-bit unpacked scaled offset
3. PRF<T> <prfop>, <Pg>, [<Xn|SP>, <Zm>.D] -> 64-bit scaled offset
4. PRF<T> <prfop>, <Pg>, [<Zn>.S{, #<imm>}] -> 32-bit element
5. PRF<T> <prfop>, <Pg>, [<Zn>.D{, #<imm>}] -> 64-bit element
The instructions are associated the following intrinsics, respectively:
1. void @llvm.aarch64.sve.gather.prf<T>.scaled.<mod>.nx4vi32(
i8* %base,
<vscale x 4 x i32> %offset,
<vscale x 4 x i1> %Pg,
i32 %prfop)
2. void @llvm.aarch64.sve.gather.prf<T>.scaled.<mod>.nx2vi32(
i8* %base,
<vscale x 2 x i32> %offset,
<vscale x 2 x i1> %Pg,
i32 %prfop)
3. void @llvm.aarch64.sve.gather.prf<T>.scaled.nx2vi64(
i8* %base,
<vscale x 2 x i64> %offset,
<vscale x 2 x i1> %Pg,
i32 %prfop)
4. void @llvm.aarch64.sve.gather.prf<T>.nx4vi32(
<vscale x 4 x i32> %bases,
i64 %imm,
<vscale x 4 x i1> %Pg,
i32 %prfop)
5. void @llvm.aarch64.sve.gather.prf<T>.nx2vi64(
<vscale x 2 x i64> %bases,
i64 %imm,
<vscale x 2 x i1> %Pg,
i32 %prfop)
The intrinsics are the IR counterpart of the following SVE ACLE functions:
* void svprf<T>(svbool_t pg, const void *base, svprfop op)
* void svprf<T>_vnum(svbool_t pg, const void *base, int64_t vnum, svprfop op)
* void svprf<T>_gather[_u32base](svbool_t pg, svuint32_t bases, svprfop op)
* void svprf<T>_gather[_u64base](svbool_t pg, svuint64_t bases, svprfop op)
* void svprf<T>_gather_[s32]offset(svbool_t pg, const void *base, svint32_t offsets, svprfop op)
* void svprf<T>_gather_[u32]offset(svbool_t pg, const void *base, svint32_t offsets, svprfop op)
* void svprf<T>_gather_[s64]offset(svbool_t pg, const void *base, svint64_t offsets, svprfop op)
* void svprf<T>_gather_[u64]offset(svbool_t pg, const void *base, svint64_t offsets, svprfop op)
* void svprf<T>_gather[_u32base]_offset(svbool_t pg, svuint32_t bases, int64_t offset, svprfop op)
* void svprf<T>_gather[_u64base]_offset(svbool_t pg, svuint64_t bases,int64_t offset, svprfop op)
Reviewers: andwar, sdesmalen, efriedma, rengolin
Subscribers: tschuett, hiraditya, rkruppe, psnobl, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D75580
2020-02-27 06:19:42 +08:00
|
|
|
; CHECK-NEXT: prfd pldl1strm, p0, [x0, z0.s, sxtw #3]
|
|
|
|
; CHECK-NEXT: ret
|
2020-03-19 17:35:31 +08:00
|
|
|
call void @llvm.aarch64.sve.prfd.gather.scaled.sxtw.nx4vi32(<vscale x 4 x i1> %Pg, i8* %base, <vscale x 4 x i32> %offset, i32 1)
|
Implement IR intrinsics for gather prefetch.
Summary:
Intrinsics and relative codegen has been implemented for the following
SVE instructions:
1. PRF<T> <prfop>, <Pg>, [<Xn|SP>, <Zm>.S, <mod>] -> 32-bit scaled offset
2. PRF<T> <prfop>, <Pg>, [<Xn|SP>, <Zm>.D, <mod>] -> 32-bit unpacked scaled offset
3. PRF<T> <prfop>, <Pg>, [<Xn|SP>, <Zm>.D] -> 64-bit scaled offset
4. PRF<T> <prfop>, <Pg>, [<Zn>.S{, #<imm>}] -> 32-bit element
5. PRF<T> <prfop>, <Pg>, [<Zn>.D{, #<imm>}] -> 64-bit element
The instructions are associated the following intrinsics, respectively:
1. void @llvm.aarch64.sve.gather.prf<T>.scaled.<mod>.nx4vi32(
i8* %base,
<vscale x 4 x i32> %offset,
<vscale x 4 x i1> %Pg,
i32 %prfop)
2. void @llvm.aarch64.sve.gather.prf<T>.scaled.<mod>.nx2vi32(
i8* %base,
<vscale x 2 x i32> %offset,
<vscale x 2 x i1> %Pg,
i32 %prfop)
3. void @llvm.aarch64.sve.gather.prf<T>.scaled.nx2vi64(
i8* %base,
<vscale x 2 x i64> %offset,
<vscale x 2 x i1> %Pg,
i32 %prfop)
4. void @llvm.aarch64.sve.gather.prf<T>.nx4vi32(
<vscale x 4 x i32> %bases,
i64 %imm,
<vscale x 4 x i1> %Pg,
i32 %prfop)
5. void @llvm.aarch64.sve.gather.prf<T>.nx2vi64(
<vscale x 2 x i64> %bases,
i64 %imm,
<vscale x 2 x i1> %Pg,
i32 %prfop)
The intrinsics are the IR counterpart of the following SVE ACLE functions:
* void svprf<T>(svbool_t pg, const void *base, svprfop op)
* void svprf<T>_vnum(svbool_t pg, const void *base, int64_t vnum, svprfop op)
* void svprf<T>_gather[_u32base](svbool_t pg, svuint32_t bases, svprfop op)
* void svprf<T>_gather[_u64base](svbool_t pg, svuint64_t bases, svprfop op)
* void svprf<T>_gather_[s32]offset(svbool_t pg, const void *base, svint32_t offsets, svprfop op)
* void svprf<T>_gather_[u32]offset(svbool_t pg, const void *base, svint32_t offsets, svprfop op)
* void svprf<T>_gather_[s64]offset(svbool_t pg, const void *base, svint64_t offsets, svprfop op)
* void svprf<T>_gather_[u64]offset(svbool_t pg, const void *base, svint64_t offsets, svprfop op)
* void svprf<T>_gather[_u32base]_offset(svbool_t pg, svuint32_t bases, int64_t offset, svprfop op)
* void svprf<T>_gather[_u64base]_offset(svbool_t pg, svuint64_t bases,int64_t offset, svprfop op)
Reviewers: andwar, sdesmalen, efriedma, rengolin
Subscribers: tschuett, hiraditya, rkruppe, psnobl, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D75580
2020-02-27 06:19:42 +08:00
|
|
|
ret void
|
|
|
|
}
|
|
|
|
|
|
|
|
; PRFD <prfop>, <Pg>, [<Xn|SP>, <Zm>.D, <mod> #3] -> 32-bit unpacked scaled offset
|
2020-03-19 17:35:31 +08:00
|
|
|
define void @llvm_aarch64_sve_prfd_gather_scaled_uxtw_nx2vi64(<vscale x 2 x i1> %Pg, i8* %base, <vscale x 2 x i32> %offset) nounwind {
|
|
|
|
; CHECK-LABEL: llvm_aarch64_sve_prfd_gather_scaled_uxtw_nx2vi64:
|
Implement IR intrinsics for gather prefetch.
Summary:
Intrinsics and relative codegen has been implemented for the following
SVE instructions:
1. PRF<T> <prfop>, <Pg>, [<Xn|SP>, <Zm>.S, <mod>] -> 32-bit scaled offset
2. PRF<T> <prfop>, <Pg>, [<Xn|SP>, <Zm>.D, <mod>] -> 32-bit unpacked scaled offset
3. PRF<T> <prfop>, <Pg>, [<Xn|SP>, <Zm>.D] -> 64-bit scaled offset
4. PRF<T> <prfop>, <Pg>, [<Zn>.S{, #<imm>}] -> 32-bit element
5. PRF<T> <prfop>, <Pg>, [<Zn>.D{, #<imm>}] -> 64-bit element
The instructions are associated the following intrinsics, respectively:
1. void @llvm.aarch64.sve.gather.prf<T>.scaled.<mod>.nx4vi32(
i8* %base,
<vscale x 4 x i32> %offset,
<vscale x 4 x i1> %Pg,
i32 %prfop)
2. void @llvm.aarch64.sve.gather.prf<T>.scaled.<mod>.nx2vi32(
i8* %base,
<vscale x 2 x i32> %offset,
<vscale x 2 x i1> %Pg,
i32 %prfop)
3. void @llvm.aarch64.sve.gather.prf<T>.scaled.nx2vi64(
i8* %base,
<vscale x 2 x i64> %offset,
<vscale x 2 x i1> %Pg,
i32 %prfop)
4. void @llvm.aarch64.sve.gather.prf<T>.nx4vi32(
<vscale x 4 x i32> %bases,
i64 %imm,
<vscale x 4 x i1> %Pg,
i32 %prfop)
5. void @llvm.aarch64.sve.gather.prf<T>.nx2vi64(
<vscale x 2 x i64> %bases,
i64 %imm,
<vscale x 2 x i1> %Pg,
i32 %prfop)
The intrinsics are the IR counterpart of the following SVE ACLE functions:
* void svprf<T>(svbool_t pg, const void *base, svprfop op)
* void svprf<T>_vnum(svbool_t pg, const void *base, int64_t vnum, svprfop op)
* void svprf<T>_gather[_u32base](svbool_t pg, svuint32_t bases, svprfop op)
* void svprf<T>_gather[_u64base](svbool_t pg, svuint64_t bases, svprfop op)
* void svprf<T>_gather_[s32]offset(svbool_t pg, const void *base, svint32_t offsets, svprfop op)
* void svprf<T>_gather_[u32]offset(svbool_t pg, const void *base, svint32_t offsets, svprfop op)
* void svprf<T>_gather_[s64]offset(svbool_t pg, const void *base, svint64_t offsets, svprfop op)
* void svprf<T>_gather_[u64]offset(svbool_t pg, const void *base, svint64_t offsets, svprfop op)
* void svprf<T>_gather[_u32base]_offset(svbool_t pg, svuint32_t bases, int64_t offset, svprfop op)
* void svprf<T>_gather[_u64base]_offset(svbool_t pg, svuint64_t bases,int64_t offset, svprfop op)
Reviewers: andwar, sdesmalen, efriedma, rengolin
Subscribers: tschuett, hiraditya, rkruppe, psnobl, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D75580
2020-02-27 06:19:42 +08:00
|
|
|
; CHECK-NEXT: prfd pldl1strm, p0, [x0, z0.d, uxtw #3]
|
|
|
|
; CHECK-NEXT: ret
|
2020-03-19 17:35:31 +08:00
|
|
|
call void @llvm.aarch64.sve.prfd.gather.scaled.uxtw.nx2vi64(<vscale x 2 x i1> %Pg, i8* %base, <vscale x 2 x i32> %offset, i32 1)
|
Implement IR intrinsics for gather prefetch.
Summary:
Intrinsics and relative codegen has been implemented for the following
SVE instructions:
1. PRF<T> <prfop>, <Pg>, [<Xn|SP>, <Zm>.S, <mod>] -> 32-bit scaled offset
2. PRF<T> <prfop>, <Pg>, [<Xn|SP>, <Zm>.D, <mod>] -> 32-bit unpacked scaled offset
3. PRF<T> <prfop>, <Pg>, [<Xn|SP>, <Zm>.D] -> 64-bit scaled offset
4. PRF<T> <prfop>, <Pg>, [<Zn>.S{, #<imm>}] -> 32-bit element
5. PRF<T> <prfop>, <Pg>, [<Zn>.D{, #<imm>}] -> 64-bit element
The instructions are associated the following intrinsics, respectively:
1. void @llvm.aarch64.sve.gather.prf<T>.scaled.<mod>.nx4vi32(
i8* %base,
<vscale x 4 x i32> %offset,
<vscale x 4 x i1> %Pg,
i32 %prfop)
2. void @llvm.aarch64.sve.gather.prf<T>.scaled.<mod>.nx2vi32(
i8* %base,
<vscale x 2 x i32> %offset,
<vscale x 2 x i1> %Pg,
i32 %prfop)
3. void @llvm.aarch64.sve.gather.prf<T>.scaled.nx2vi64(
i8* %base,
<vscale x 2 x i64> %offset,
<vscale x 2 x i1> %Pg,
i32 %prfop)
4. void @llvm.aarch64.sve.gather.prf<T>.nx4vi32(
<vscale x 4 x i32> %bases,
i64 %imm,
<vscale x 4 x i1> %Pg,
i32 %prfop)
5. void @llvm.aarch64.sve.gather.prf<T>.nx2vi64(
<vscale x 2 x i64> %bases,
i64 %imm,
<vscale x 2 x i1> %Pg,
i32 %prfop)
The intrinsics are the IR counterpart of the following SVE ACLE functions:
* void svprf<T>(svbool_t pg, const void *base, svprfop op)
* void svprf<T>_vnum(svbool_t pg, const void *base, int64_t vnum, svprfop op)
* void svprf<T>_gather[_u32base](svbool_t pg, svuint32_t bases, svprfop op)
* void svprf<T>_gather[_u64base](svbool_t pg, svuint64_t bases, svprfop op)
* void svprf<T>_gather_[s32]offset(svbool_t pg, const void *base, svint32_t offsets, svprfop op)
* void svprf<T>_gather_[u32]offset(svbool_t pg, const void *base, svint32_t offsets, svprfop op)
* void svprf<T>_gather_[s64]offset(svbool_t pg, const void *base, svint64_t offsets, svprfop op)
* void svprf<T>_gather_[u64]offset(svbool_t pg, const void *base, svint64_t offsets, svprfop op)
* void svprf<T>_gather[_u32base]_offset(svbool_t pg, svuint32_t bases, int64_t offset, svprfop op)
* void svprf<T>_gather[_u64base]_offset(svbool_t pg, svuint64_t bases,int64_t offset, svprfop op)
Reviewers: andwar, sdesmalen, efriedma, rengolin
Subscribers: tschuett, hiraditya, rkruppe, psnobl, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D75580
2020-02-27 06:19:42 +08:00
|
|
|
ret void
|
|
|
|
}
|
|
|
|
|
2020-03-19 17:35:31 +08:00
|
|
|
define void @llvm_aarch64_sve_prfd_gather_scaled_sxtw_nx2vi64(<vscale x 2 x i1> %Pg, i8* %base, <vscale x 2 x i32> %offset) nounwind {
|
|
|
|
; CHECK-LABEL: llvm_aarch64_sve_prfd_gather_scaled_sxtw_nx2vi64:
|
Implement IR intrinsics for gather prefetch.
Summary:
Intrinsics and relative codegen has been implemented for the following
SVE instructions:
1. PRF<T> <prfop>, <Pg>, [<Xn|SP>, <Zm>.S, <mod>] -> 32-bit scaled offset
2. PRF<T> <prfop>, <Pg>, [<Xn|SP>, <Zm>.D, <mod>] -> 32-bit unpacked scaled offset
3. PRF<T> <prfop>, <Pg>, [<Xn|SP>, <Zm>.D] -> 64-bit scaled offset
4. PRF<T> <prfop>, <Pg>, [<Zn>.S{, #<imm>}] -> 32-bit element
5. PRF<T> <prfop>, <Pg>, [<Zn>.D{, #<imm>}] -> 64-bit element
The instructions are associated the following intrinsics, respectively:
1. void @llvm.aarch64.sve.gather.prf<T>.scaled.<mod>.nx4vi32(
i8* %base,
<vscale x 4 x i32> %offset,
<vscale x 4 x i1> %Pg,
i32 %prfop)
2. void @llvm.aarch64.sve.gather.prf<T>.scaled.<mod>.nx2vi32(
i8* %base,
<vscale x 2 x i32> %offset,
<vscale x 2 x i1> %Pg,
i32 %prfop)
3. void @llvm.aarch64.sve.gather.prf<T>.scaled.nx2vi64(
i8* %base,
<vscale x 2 x i64> %offset,
<vscale x 2 x i1> %Pg,
i32 %prfop)
4. void @llvm.aarch64.sve.gather.prf<T>.nx4vi32(
<vscale x 4 x i32> %bases,
i64 %imm,
<vscale x 4 x i1> %Pg,
i32 %prfop)
5. void @llvm.aarch64.sve.gather.prf<T>.nx2vi64(
<vscale x 2 x i64> %bases,
i64 %imm,
<vscale x 2 x i1> %Pg,
i32 %prfop)
The intrinsics are the IR counterpart of the following SVE ACLE functions:
* void svprf<T>(svbool_t pg, const void *base, svprfop op)
* void svprf<T>_vnum(svbool_t pg, const void *base, int64_t vnum, svprfop op)
* void svprf<T>_gather[_u32base](svbool_t pg, svuint32_t bases, svprfop op)
* void svprf<T>_gather[_u64base](svbool_t pg, svuint64_t bases, svprfop op)
* void svprf<T>_gather_[s32]offset(svbool_t pg, const void *base, svint32_t offsets, svprfop op)
* void svprf<T>_gather_[u32]offset(svbool_t pg, const void *base, svint32_t offsets, svprfop op)
* void svprf<T>_gather_[s64]offset(svbool_t pg, const void *base, svint64_t offsets, svprfop op)
* void svprf<T>_gather_[u64]offset(svbool_t pg, const void *base, svint64_t offsets, svprfop op)
* void svprf<T>_gather[_u32base]_offset(svbool_t pg, svuint32_t bases, int64_t offset, svprfop op)
* void svprf<T>_gather[_u64base]_offset(svbool_t pg, svuint64_t bases,int64_t offset, svprfop op)
Reviewers: andwar, sdesmalen, efriedma, rengolin
Subscribers: tschuett, hiraditya, rkruppe, psnobl, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D75580
2020-02-27 06:19:42 +08:00
|
|
|
; CHECK-NEXT: prfd pldl1strm, p0, [x0, z0.d, sxtw #3]
|
|
|
|
; CHECK-NEXT: ret
|
2020-03-19 17:35:31 +08:00
|
|
|
call void @llvm.aarch64.sve.prfd.gather.scaled.sxtw.nx2vi64(<vscale x 2 x i1> %Pg, i8* %base, <vscale x 2 x i32> %offset, i32 1)
|
Implement IR intrinsics for gather prefetch.
Summary:
Intrinsics and relative codegen has been implemented for the following
SVE instructions:
1. PRF<T> <prfop>, <Pg>, [<Xn|SP>, <Zm>.S, <mod>] -> 32-bit scaled offset
2. PRF<T> <prfop>, <Pg>, [<Xn|SP>, <Zm>.D, <mod>] -> 32-bit unpacked scaled offset
3. PRF<T> <prfop>, <Pg>, [<Xn|SP>, <Zm>.D] -> 64-bit scaled offset
4. PRF<T> <prfop>, <Pg>, [<Zn>.S{, #<imm>}] -> 32-bit element
5. PRF<T> <prfop>, <Pg>, [<Zn>.D{, #<imm>}] -> 64-bit element
The instructions are associated the following intrinsics, respectively:
1. void @llvm.aarch64.sve.gather.prf<T>.scaled.<mod>.nx4vi32(
i8* %base,
<vscale x 4 x i32> %offset,
<vscale x 4 x i1> %Pg,
i32 %prfop)
2. void @llvm.aarch64.sve.gather.prf<T>.scaled.<mod>.nx2vi32(
i8* %base,
<vscale x 2 x i32> %offset,
<vscale x 2 x i1> %Pg,
i32 %prfop)
3. void @llvm.aarch64.sve.gather.prf<T>.scaled.nx2vi64(
i8* %base,
<vscale x 2 x i64> %offset,
<vscale x 2 x i1> %Pg,
i32 %prfop)
4. void @llvm.aarch64.sve.gather.prf<T>.nx4vi32(
<vscale x 4 x i32> %bases,
i64 %imm,
<vscale x 4 x i1> %Pg,
i32 %prfop)
5. void @llvm.aarch64.sve.gather.prf<T>.nx2vi64(
<vscale x 2 x i64> %bases,
i64 %imm,
<vscale x 2 x i1> %Pg,
i32 %prfop)
The intrinsics are the IR counterpart of the following SVE ACLE functions:
* void svprf<T>(svbool_t pg, const void *base, svprfop op)
* void svprf<T>_vnum(svbool_t pg, const void *base, int64_t vnum, svprfop op)
* void svprf<T>_gather[_u32base](svbool_t pg, svuint32_t bases, svprfop op)
* void svprf<T>_gather[_u64base](svbool_t pg, svuint64_t bases, svprfop op)
* void svprf<T>_gather_[s32]offset(svbool_t pg, const void *base, svint32_t offsets, svprfop op)
* void svprf<T>_gather_[u32]offset(svbool_t pg, const void *base, svint32_t offsets, svprfop op)
* void svprf<T>_gather_[s64]offset(svbool_t pg, const void *base, svint64_t offsets, svprfop op)
* void svprf<T>_gather_[u64]offset(svbool_t pg, const void *base, svint64_t offsets, svprfop op)
* void svprf<T>_gather[_u32base]_offset(svbool_t pg, svuint32_t bases, int64_t offset, svprfop op)
* void svprf<T>_gather[_u64base]_offset(svbool_t pg, svuint64_t bases,int64_t offset, svprfop op)
Reviewers: andwar, sdesmalen, efriedma, rengolin
Subscribers: tschuett, hiraditya, rkruppe, psnobl, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D75580
2020-02-27 06:19:42 +08:00
|
|
|
ret void
|
|
|
|
}
|
|
|
|
|
|
|
|
; PRFD <prfop>, <Pg>, [<Xn|SP>, <Zm>.D] -> 64-bit scaled offset
|
2020-03-19 17:35:31 +08:00
|
|
|
define void @llvm_aarch64_sve_prfd_gather_scaled_nx2vi64(<vscale x 2 x i1> %Pg, i8* %base, <vscale x 2 x i64> %offset) nounwind {
|
|
|
|
; CHECK-LABEL: llvm_aarch64_sve_prfd_gather_scaled_nx2vi64:
|
Implement IR intrinsics for gather prefetch.
Summary:
Intrinsics and relative codegen has been implemented for the following
SVE instructions:
1. PRF<T> <prfop>, <Pg>, [<Xn|SP>, <Zm>.S, <mod>] -> 32-bit scaled offset
2. PRF<T> <prfop>, <Pg>, [<Xn|SP>, <Zm>.D, <mod>] -> 32-bit unpacked scaled offset
3. PRF<T> <prfop>, <Pg>, [<Xn|SP>, <Zm>.D] -> 64-bit scaled offset
4. PRF<T> <prfop>, <Pg>, [<Zn>.S{, #<imm>}] -> 32-bit element
5. PRF<T> <prfop>, <Pg>, [<Zn>.D{, #<imm>}] -> 64-bit element
The instructions are associated the following intrinsics, respectively:
1. void @llvm.aarch64.sve.gather.prf<T>.scaled.<mod>.nx4vi32(
i8* %base,
<vscale x 4 x i32> %offset,
<vscale x 4 x i1> %Pg,
i32 %prfop)
2. void @llvm.aarch64.sve.gather.prf<T>.scaled.<mod>.nx2vi32(
i8* %base,
<vscale x 2 x i32> %offset,
<vscale x 2 x i1> %Pg,
i32 %prfop)
3. void @llvm.aarch64.sve.gather.prf<T>.scaled.nx2vi64(
i8* %base,
<vscale x 2 x i64> %offset,
<vscale x 2 x i1> %Pg,
i32 %prfop)
4. void @llvm.aarch64.sve.gather.prf<T>.nx4vi32(
<vscale x 4 x i32> %bases,
i64 %imm,
<vscale x 4 x i1> %Pg,
i32 %prfop)
5. void @llvm.aarch64.sve.gather.prf<T>.nx2vi64(
<vscale x 2 x i64> %bases,
i64 %imm,
<vscale x 2 x i1> %Pg,
i32 %prfop)
The intrinsics are the IR counterpart of the following SVE ACLE functions:
* void svprf<T>(svbool_t pg, const void *base, svprfop op)
* void svprf<T>_vnum(svbool_t pg, const void *base, int64_t vnum, svprfop op)
* void svprf<T>_gather[_u32base](svbool_t pg, svuint32_t bases, svprfop op)
* void svprf<T>_gather[_u64base](svbool_t pg, svuint64_t bases, svprfop op)
* void svprf<T>_gather_[s32]offset(svbool_t pg, const void *base, svint32_t offsets, svprfop op)
* void svprf<T>_gather_[u32]offset(svbool_t pg, const void *base, svint32_t offsets, svprfop op)
* void svprf<T>_gather_[s64]offset(svbool_t pg, const void *base, svint64_t offsets, svprfop op)
* void svprf<T>_gather_[u64]offset(svbool_t pg, const void *base, svint64_t offsets, svprfop op)
* void svprf<T>_gather[_u32base]_offset(svbool_t pg, svuint32_t bases, int64_t offset, svprfop op)
* void svprf<T>_gather[_u64base]_offset(svbool_t pg, svuint64_t bases,int64_t offset, svprfop op)
Reviewers: andwar, sdesmalen, efriedma, rengolin
Subscribers: tschuett, hiraditya, rkruppe, psnobl, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D75580
2020-02-27 06:19:42 +08:00
|
|
|
; CHECK-NEXT: prfd pldl1strm, p0, [x0, z0.d, lsl #3]
|
|
|
|
; CHECK-NEXT: ret
|
2020-03-19 17:35:31 +08:00
|
|
|
call void @llvm.aarch64.sve.prfd.gather.scaled.nx2vi64(<vscale x 2 x i1> %Pg, i8* %base, <vscale x 2 x i64> %offset, i32 1)
|
|
|
|
ret void
|
|
|
|
}
|
|
|
|
|
|
|
|
declare void @llvm.aarch64.sve.prfb.gather.scaled.uxtw.nx4vi32(<vscale x 4 x i1> %Pg, i8* %base, <vscale x 4 x i32> %offset, i32 %prfop)
|
|
|
|
declare void @llvm.aarch64.sve.prfb.gather.scaled.sxtw.nx4vi32(<vscale x 4 x i1> %Pg, i8* %base, <vscale x 4 x i32> %offset, i32 %prfop)
|
|
|
|
declare void @llvm.aarch64.sve.prfb.gather.scaled.uxtw.nx2vi64(<vscale x 2 x i1> %Pg, i8* %base, <vscale x 2 x i32> %offset, i32 %prfop)
|
|
|
|
declare void @llvm.aarch64.sve.prfb.gather.scaled.sxtw.nx2vi64(<vscale x 2 x i1> %Pg, i8* %base, <vscale x 2 x i32> %offset, i32 %prfop)
|
|
|
|
declare void @llvm.aarch64.sve.prfb.gather.scaled.nx2vi64(<vscale x 2 x i1> %Pg, i8* %base, <vscale x 2 x i64> %offset, i32 %prfop)
|
|
|
|
declare void @llvm.aarch64.sve.prfh.gather.scaled.uxtw.nx4vi32(<vscale x 4 x i1> %Pg, i8* %base, <vscale x 4 x i32> %offset, i32 %prfop)
|
|
|
|
declare void @llvm.aarch64.sve.prfh.gather.scaled.sxtw.nx4vi32(<vscale x 4 x i1> %Pg, i8* %base, <vscale x 4 x i32> %offset, i32 %prfop)
|
|
|
|
declare void @llvm.aarch64.sve.prfh.gather.scaled.uxtw.nx2vi64(<vscale x 2 x i1> %Pg, i8* %base, <vscale x 2 x i32> %offset, i32 %prfop)
|
|
|
|
declare void @llvm.aarch64.sve.prfh.gather.scaled.sxtw.nx2vi64(<vscale x 2 x i1> %Pg, i8* %base, <vscale x 2 x i32> %offset, i32 %prfop)
|
|
|
|
declare void @llvm.aarch64.sve.prfh.gather.scaled.nx2vi64(<vscale x 2 x i1> %Pg, i8* %base, <vscale x 2 x i64> %offset, i32 %prfop)
|
|
|
|
declare void @llvm.aarch64.sve.prfw.gather.scaled.uxtw.nx4vi32(<vscale x 4 x i1> %Pg, i8* %base, <vscale x 4 x i32> %offset, i32 %prfop)
|
|
|
|
declare void @llvm.aarch64.sve.prfw.gather.scaled.sxtw.nx4vi32(<vscale x 4 x i1> %Pg, i8* %base, <vscale x 4 x i32> %offset, i32 %prfop)
|
|
|
|
declare void @llvm.aarch64.sve.prfw.gather.scaled.uxtw.nx2vi64(<vscale x 2 x i1> %Pg, i8* %base, <vscale x 2 x i32> %offset, i32 %prfop)
|
|
|
|
declare void @llvm.aarch64.sve.prfw.gather.scaled.sxtw.nx2vi64(<vscale x 2 x i1> %Pg, i8* %base, <vscale x 2 x i32> %offset, i32 %prfop)
|
|
|
|
declare void @llvm.aarch64.sve.prfw.gather.scaled.nx2vi64(<vscale x 2 x i1> %Pg, i8* %base, <vscale x 2 x i64> %offset, i32 %prfop)
|
|
|
|
declare void @llvm.aarch64.sve.prfd.gather.scaled.uxtw.nx4vi32(<vscale x 4 x i1> %Pg, i8* %base, <vscale x 4 x i32> %offset, i32 %prfop)
|
|
|
|
declare void @llvm.aarch64.sve.prfd.gather.scaled.sxtw.nx4vi32(<vscale x 4 x i1> %Pg, i8* %base, <vscale x 4 x i32> %offset, i32 %prfop)
|
|
|
|
declare void @llvm.aarch64.sve.prfd.gather.scaled.uxtw.nx2vi64(<vscale x 2 x i1> %Pg, i8* %base, <vscale x 2 x i32> %offset, i32 %prfop)
|
|
|
|
declare void @llvm.aarch64.sve.prfd.gather.scaled.sxtw.nx2vi64(<vscale x 2 x i1> %Pg, i8* %base, <vscale x 2 x i32> %offset, i32 %prfop)
|
|
|
|
declare void @llvm.aarch64.sve.prfd.gather.scaled.nx2vi64(<vscale x 2 x i1> %Pg, i8* %base, <vscale x 2 x i64> %offset, i32 %prfop)
|