2020-06-19 16:06:21 +08:00
|
|
|
; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve < %s 2>%t | FileCheck %s
|
|
|
|
; RUN: FileCheck --check-prefix=WARN --allow-empty %s <%t
|
|
|
|
|
2020-07-09 17:16:32 +08:00
|
|
|
; If this check fails please read test/CodeGen/AArch64/README for instructions on how to resolve it.
|
2020-06-19 16:06:21 +08:00
|
|
|
; WARN-NOT: warning
|
[Aarch64][SVE] Add intrinsics for gather loads with 32-bits offsets
This patch adds intrinsics for SVE gather loads for which the offsets are 32-bits wide and are:
* unscaled
* @llvm.aarch64.sve.ld1.gather.sxtw
* @llvm.aarch64.sve.ld1.gather.uxtw
* scaled (offsets become indices)
* @llvm.arch64.sve.ld1.gather.sxtw.index
* @llvm.arch64.sve.ld1.gather.uxtw.index
The offsets are either zero (uxtw) or sign (sxtw) extended to 64 bits.
These intrinsics map 1-1 to the corresponding SVE instructions (examples for half-words):
* unscaled
* ld1h { z0.s }, p0/z, [x0, z0.s, sxtw]
* ld1h { z0.s }, p0/z, [x0, z0.s, uxtw]
* scaled
* ld1h { z0.s }, p0/z, [x0, z0.s, sxtw #1]
* ld1h { z0.s }, p0/z, [x0, z0.s, uxtw #1]
Committed on behalf of Andrzej Warzynski (andwar)
Reviewers: sdesmalen, kmclaughlin, eli.friedman, rengolin, rovka, huntergr, dancgr, mgudim, efriedma
Reviewed By: sdesmalen
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D70782
2019-12-03 21:52:47 +08:00
|
|
|
|
|
|
|
;
|
|
|
|
; LD1B, LD1W, LD1H, LD1D: base + 32-bit unscaled offset, sign (sxtw) or zero
|
|
|
|
; (uxtw) extended to 64 bits.
|
|
|
|
; e.g. ld1h { z0.d }, p0/z, [x0, z0.d, uxtw]
|
|
|
|
;
|
|
|
|
|
|
|
|
; LD1B
|
|
|
|
define <vscale x 4 x i32> @gld1b_s_uxtw(<vscale x 4 x i1> %pg, i8* %base, <vscale x 4 x i32> %b) {
|
|
|
|
; CHECK-LABEL: gld1b_s_uxtw:
|
|
|
|
; CHECK: ld1b { z0.s }, p0/z, [x0, z0.s, uxtw]
|
|
|
|
; CHECK-NEXT: ret
|
2019-12-20 04:37:57 +08:00
|
|
|
%load = call <vscale x 4 x i8> @llvm.aarch64.sve.ld1.gather.uxtw.nxv4i8(<vscale x 4 x i1> %pg,
|
|
|
|
i8* %base,
|
|
|
|
<vscale x 4 x i32> %b)
|
[Aarch64][SVE] Add intrinsics for gather loads with 32-bits offsets
This patch adds intrinsics for SVE gather loads for which the offsets are 32-bits wide and are:
* unscaled
* @llvm.aarch64.sve.ld1.gather.sxtw
* @llvm.aarch64.sve.ld1.gather.uxtw
* scaled (offsets become indices)
* @llvm.arch64.sve.ld1.gather.sxtw.index
* @llvm.arch64.sve.ld1.gather.uxtw.index
The offsets are either zero (uxtw) or sign (sxtw) extended to 64 bits.
These intrinsics map 1-1 to the corresponding SVE instructions (examples for half-words):
* unscaled
* ld1h { z0.s }, p0/z, [x0, z0.s, sxtw]
* ld1h { z0.s }, p0/z, [x0, z0.s, uxtw]
* scaled
* ld1h { z0.s }, p0/z, [x0, z0.s, sxtw #1]
* ld1h { z0.s }, p0/z, [x0, z0.s, uxtw #1]
Committed on behalf of Andrzej Warzynski (andwar)
Reviewers: sdesmalen, kmclaughlin, eli.friedman, rengolin, rovka, huntergr, dancgr, mgudim, efriedma
Reviewed By: sdesmalen
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D70782
2019-12-03 21:52:47 +08:00
|
|
|
%res = zext <vscale x 4 x i8> %load to <vscale x 4 x i32>
|
|
|
|
ret <vscale x 4 x i32> %res
|
|
|
|
}
|
|
|
|
|
|
|
|
define <vscale x 4 x i32> @gld1b_s_sxtw(<vscale x 4 x i1> %pg, i8* %base, <vscale x 4 x i32> %b) {
|
|
|
|
; CHECK-LABEL: gld1b_s_sxtw:
|
|
|
|
; CHECK: ld1b { z0.s }, p0/z, [x0, z0.s, sxtw]
|
|
|
|
; CHECK-NEXT: ret
|
2019-12-20 04:37:57 +08:00
|
|
|
%load = call <vscale x 4 x i8> @llvm.aarch64.sve.ld1.gather.sxtw.nxv4i8(<vscale x 4 x i1> %pg,
|
|
|
|
i8* %base,
|
|
|
|
<vscale x 4 x i32> %b)
|
[Aarch64][SVE] Add intrinsics for gather loads with 32-bits offsets
This patch adds intrinsics for SVE gather loads for which the offsets are 32-bits wide and are:
* unscaled
* @llvm.aarch64.sve.ld1.gather.sxtw
* @llvm.aarch64.sve.ld1.gather.uxtw
* scaled (offsets become indices)
* @llvm.arch64.sve.ld1.gather.sxtw.index
* @llvm.arch64.sve.ld1.gather.uxtw.index
The offsets are either zero (uxtw) or sign (sxtw) extended to 64 bits.
These intrinsics map 1-1 to the corresponding SVE instructions (examples for half-words):
* unscaled
* ld1h { z0.s }, p0/z, [x0, z0.s, sxtw]
* ld1h { z0.s }, p0/z, [x0, z0.s, uxtw]
* scaled
* ld1h { z0.s }, p0/z, [x0, z0.s, sxtw #1]
* ld1h { z0.s }, p0/z, [x0, z0.s, uxtw #1]
Committed on behalf of Andrzej Warzynski (andwar)
Reviewers: sdesmalen, kmclaughlin, eli.friedman, rengolin, rovka, huntergr, dancgr, mgudim, efriedma
Reviewed By: sdesmalen
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D70782
2019-12-03 21:52:47 +08:00
|
|
|
%res = zext <vscale x 4 x i8> %load to <vscale x 4 x i32>
|
|
|
|
ret <vscale x 4 x i32> %res
|
|
|
|
}
|
|
|
|
|
2019-12-20 04:37:57 +08:00
|
|
|
define <vscale x 2 x i64> @gld1b_d_uxtw(<vscale x 2 x i1> %pg, i8* %base, <vscale x 2 x i32> %b) {
|
[Aarch64][SVE] Add intrinsics for gather loads with 32-bits offsets
This patch adds intrinsics for SVE gather loads for which the offsets are 32-bits wide and are:
* unscaled
* @llvm.aarch64.sve.ld1.gather.sxtw
* @llvm.aarch64.sve.ld1.gather.uxtw
* scaled (offsets become indices)
* @llvm.arch64.sve.ld1.gather.sxtw.index
* @llvm.arch64.sve.ld1.gather.uxtw.index
The offsets are either zero (uxtw) or sign (sxtw) extended to 64 bits.
These intrinsics map 1-1 to the corresponding SVE instructions (examples for half-words):
* unscaled
* ld1h { z0.s }, p0/z, [x0, z0.s, sxtw]
* ld1h { z0.s }, p0/z, [x0, z0.s, uxtw]
* scaled
* ld1h { z0.s }, p0/z, [x0, z0.s, sxtw #1]
* ld1h { z0.s }, p0/z, [x0, z0.s, uxtw #1]
Committed on behalf of Andrzej Warzynski (andwar)
Reviewers: sdesmalen, kmclaughlin, eli.friedman, rengolin, rovka, huntergr, dancgr, mgudim, efriedma
Reviewed By: sdesmalen
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D70782
2019-12-03 21:52:47 +08:00
|
|
|
; CHECK-LABEL: gld1b_d_uxtw:
|
|
|
|
; CHECK: ld1b { z0.d }, p0/z, [x0, z0.d, uxtw]
|
|
|
|
; CHECK-NEXT: ret
|
2019-12-20 04:37:57 +08:00
|
|
|
%load = call <vscale x 2 x i8> @llvm.aarch64.sve.ld1.gather.uxtw.nxv2i8(<vscale x 2 x i1> %pg,
|
|
|
|
i8* %base,
|
|
|
|
<vscale x 2 x i32> %b)
|
[Aarch64][SVE] Add intrinsics for gather loads with 32-bits offsets
This patch adds intrinsics for SVE gather loads for which the offsets are 32-bits wide and are:
* unscaled
* @llvm.aarch64.sve.ld1.gather.sxtw
* @llvm.aarch64.sve.ld1.gather.uxtw
* scaled (offsets become indices)
* @llvm.arch64.sve.ld1.gather.sxtw.index
* @llvm.arch64.sve.ld1.gather.uxtw.index
The offsets are either zero (uxtw) or sign (sxtw) extended to 64 bits.
These intrinsics map 1-1 to the corresponding SVE instructions (examples for half-words):
* unscaled
* ld1h { z0.s }, p0/z, [x0, z0.s, sxtw]
* ld1h { z0.s }, p0/z, [x0, z0.s, uxtw]
* scaled
* ld1h { z0.s }, p0/z, [x0, z0.s, sxtw #1]
* ld1h { z0.s }, p0/z, [x0, z0.s, uxtw #1]
Committed on behalf of Andrzej Warzynski (andwar)
Reviewers: sdesmalen, kmclaughlin, eli.friedman, rengolin, rovka, huntergr, dancgr, mgudim, efriedma
Reviewed By: sdesmalen
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D70782
2019-12-03 21:52:47 +08:00
|
|
|
%res = zext <vscale x 2 x i8> %load to <vscale x 2 x i64>
|
|
|
|
ret <vscale x 2 x i64> %res
|
|
|
|
}
|
|
|
|
|
2019-12-20 04:37:57 +08:00
|
|
|
define <vscale x 2 x i64> @gld1b_d_sxtw(<vscale x 2 x i1> %pg, i8* %base, <vscale x 2 x i32> %b) {
|
[Aarch64][SVE] Add intrinsics for gather loads with 32-bits offsets
This patch adds intrinsics for SVE gather loads for which the offsets are 32-bits wide and are:
* unscaled
* @llvm.aarch64.sve.ld1.gather.sxtw
* @llvm.aarch64.sve.ld1.gather.uxtw
* scaled (offsets become indices)
* @llvm.arch64.sve.ld1.gather.sxtw.index
* @llvm.arch64.sve.ld1.gather.uxtw.index
The offsets are either zero (uxtw) or sign (sxtw) extended to 64 bits.
These intrinsics map 1-1 to the corresponding SVE instructions (examples for half-words):
* unscaled
* ld1h { z0.s }, p0/z, [x0, z0.s, sxtw]
* ld1h { z0.s }, p0/z, [x0, z0.s, uxtw]
* scaled
* ld1h { z0.s }, p0/z, [x0, z0.s, sxtw #1]
* ld1h { z0.s }, p0/z, [x0, z0.s, uxtw #1]
Committed on behalf of Andrzej Warzynski (andwar)
Reviewers: sdesmalen, kmclaughlin, eli.friedman, rengolin, rovka, huntergr, dancgr, mgudim, efriedma
Reviewed By: sdesmalen
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D70782
2019-12-03 21:52:47 +08:00
|
|
|
; CHECK-LABEL: gld1b_d_sxtw:
|
|
|
|
; CHECK: ld1b { z0.d }, p0/z, [x0, z0.d, sxtw]
|
|
|
|
; CHECK-NEXT: ret
|
2019-12-20 04:37:57 +08:00
|
|
|
%load = call <vscale x 2 x i8> @llvm.aarch64.sve.ld1.gather.sxtw.nxv2i8(<vscale x 2 x i1> %pg,
|
|
|
|
i8* %base,
|
|
|
|
<vscale x 2 x i32> %b)
|
[Aarch64][SVE] Add intrinsics for gather loads with 32-bits offsets
This patch adds intrinsics for SVE gather loads for which the offsets are 32-bits wide and are:
* unscaled
* @llvm.aarch64.sve.ld1.gather.sxtw
* @llvm.aarch64.sve.ld1.gather.uxtw
* scaled (offsets become indices)
* @llvm.arch64.sve.ld1.gather.sxtw.index
* @llvm.arch64.sve.ld1.gather.uxtw.index
The offsets are either zero (uxtw) or sign (sxtw) extended to 64 bits.
These intrinsics map 1-1 to the corresponding SVE instructions (examples for half-words):
* unscaled
* ld1h { z0.s }, p0/z, [x0, z0.s, sxtw]
* ld1h { z0.s }, p0/z, [x0, z0.s, uxtw]
* scaled
* ld1h { z0.s }, p0/z, [x0, z0.s, sxtw #1]
* ld1h { z0.s }, p0/z, [x0, z0.s, uxtw #1]
Committed on behalf of Andrzej Warzynski (andwar)
Reviewers: sdesmalen, kmclaughlin, eli.friedman, rengolin, rovka, huntergr, dancgr, mgudim, efriedma
Reviewed By: sdesmalen
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D70782
2019-12-03 21:52:47 +08:00
|
|
|
%res = zext <vscale x 2 x i8> %load to <vscale x 2 x i64>
|
|
|
|
ret <vscale x 2 x i64> %res
|
|
|
|
}
|
|
|
|
|
|
|
|
; LD1H
|
|
|
|
define <vscale x 4 x i32> @gld1h_s_uxtw(<vscale x 4 x i1> %pg, i16* %base, <vscale x 4 x i32> %b) {
|
|
|
|
; CHECK-LABEL: gld1h_s_uxtw:
|
|
|
|
; CHECK: ld1h { z0.s }, p0/z, [x0, z0.s, uxtw]
|
|
|
|
; CHECK-NEXT: ret
|
2019-12-20 04:37:57 +08:00
|
|
|
%load = call <vscale x 4 x i16> @llvm.aarch64.sve.ld1.gather.uxtw.nxv4i16(<vscale x 4 x i1> %pg,
|
|
|
|
i16* %base,
|
|
|
|
<vscale x 4 x i32> %b)
|
[Aarch64][SVE] Add intrinsics for gather loads with 32-bits offsets
This patch adds intrinsics for SVE gather loads for which the offsets are 32-bits wide and are:
* unscaled
* @llvm.aarch64.sve.ld1.gather.sxtw
* @llvm.aarch64.sve.ld1.gather.uxtw
* scaled (offsets become indices)
* @llvm.arch64.sve.ld1.gather.sxtw.index
* @llvm.arch64.sve.ld1.gather.uxtw.index
The offsets are either zero (uxtw) or sign (sxtw) extended to 64 bits.
These intrinsics map 1-1 to the corresponding SVE instructions (examples for half-words):
* unscaled
* ld1h { z0.s }, p0/z, [x0, z0.s, sxtw]
* ld1h { z0.s }, p0/z, [x0, z0.s, uxtw]
* scaled
* ld1h { z0.s }, p0/z, [x0, z0.s, sxtw #1]
* ld1h { z0.s }, p0/z, [x0, z0.s, uxtw #1]
Committed on behalf of Andrzej Warzynski (andwar)
Reviewers: sdesmalen, kmclaughlin, eli.friedman, rengolin, rovka, huntergr, dancgr, mgudim, efriedma
Reviewed By: sdesmalen
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D70782
2019-12-03 21:52:47 +08:00
|
|
|
%res = zext <vscale x 4 x i16> %load to <vscale x 4 x i32>
|
|
|
|
ret <vscale x 4 x i32> %res
|
|
|
|
}
|
|
|
|
|
|
|
|
define <vscale x 4 x i32> @gld1h_s_sxtw(<vscale x 4 x i1> %pg, i16* %base, <vscale x 4 x i32> %b) {
|
|
|
|
; CHECK-LABEL: gld1h_s_sxtw:
|
|
|
|
; CHECK: ld1h { z0.s }, p0/z, [x0, z0.s, sxtw]
|
|
|
|
; CHECK-NEXT: ret
|
2019-12-20 04:37:57 +08:00
|
|
|
%load = call <vscale x 4 x i16> @llvm.aarch64.sve.ld1.gather.sxtw.nxv4i16(<vscale x 4 x i1> %pg,
|
|
|
|
i16* %base,
|
|
|
|
<vscale x 4 x i32> %b)
|
[Aarch64][SVE] Add intrinsics for gather loads with 32-bits offsets
This patch adds intrinsics for SVE gather loads for which the offsets are 32-bits wide and are:
* unscaled
* @llvm.aarch64.sve.ld1.gather.sxtw
* @llvm.aarch64.sve.ld1.gather.uxtw
* scaled (offsets become indices)
* @llvm.arch64.sve.ld1.gather.sxtw.index
* @llvm.arch64.sve.ld1.gather.uxtw.index
The offsets are either zero (uxtw) or sign (sxtw) extended to 64 bits.
These intrinsics map 1-1 to the corresponding SVE instructions (examples for half-words):
* unscaled
* ld1h { z0.s }, p0/z, [x0, z0.s, sxtw]
* ld1h { z0.s }, p0/z, [x0, z0.s, uxtw]
* scaled
* ld1h { z0.s }, p0/z, [x0, z0.s, sxtw #1]
* ld1h { z0.s }, p0/z, [x0, z0.s, uxtw #1]
Committed on behalf of Andrzej Warzynski (andwar)
Reviewers: sdesmalen, kmclaughlin, eli.friedman, rengolin, rovka, huntergr, dancgr, mgudim, efriedma
Reviewed By: sdesmalen
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D70782
2019-12-03 21:52:47 +08:00
|
|
|
%res = zext <vscale x 4 x i16> %load to <vscale x 4 x i32>
|
|
|
|
ret <vscale x 4 x i32> %res
|
|
|
|
}
|
|
|
|
|
2019-12-20 04:37:57 +08:00
|
|
|
define <vscale x 2 x i64> @gld1h_d_uxtw(<vscale x 2 x i1> %pg, i16* %base, <vscale x 2 x i32> %b) {
|
[Aarch64][SVE] Add intrinsics for gather loads with 32-bits offsets
This patch adds intrinsics for SVE gather loads for which the offsets are 32-bits wide and are:
* unscaled
* @llvm.aarch64.sve.ld1.gather.sxtw
* @llvm.aarch64.sve.ld1.gather.uxtw
* scaled (offsets become indices)
* @llvm.arch64.sve.ld1.gather.sxtw.index
* @llvm.arch64.sve.ld1.gather.uxtw.index
The offsets are either zero (uxtw) or sign (sxtw) extended to 64 bits.
These intrinsics map 1-1 to the corresponding SVE instructions (examples for half-words):
* unscaled
* ld1h { z0.s }, p0/z, [x0, z0.s, sxtw]
* ld1h { z0.s }, p0/z, [x0, z0.s, uxtw]
* scaled
* ld1h { z0.s }, p0/z, [x0, z0.s, sxtw #1]
* ld1h { z0.s }, p0/z, [x0, z0.s, uxtw #1]
Committed on behalf of Andrzej Warzynski (andwar)
Reviewers: sdesmalen, kmclaughlin, eli.friedman, rengolin, rovka, huntergr, dancgr, mgudim, efriedma
Reviewed By: sdesmalen
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D70782
2019-12-03 21:52:47 +08:00
|
|
|
; CHECK-LABEL: gld1h_d_uxtw:
|
|
|
|
; CHECK: ld1h { z0.d }, p0/z, [x0, z0.d, uxtw]
|
|
|
|
; CHECK-NEXT: ret
|
2019-12-20 04:37:57 +08:00
|
|
|
%load = call <vscale x 2 x i16> @llvm.aarch64.sve.ld1.gather.uxtw.nxv2i16(<vscale x 2 x i1> %pg,
|
|
|
|
i16* %base,
|
|
|
|
<vscale x 2 x i32> %b)
|
[Aarch64][SVE] Add intrinsics for gather loads with 32-bits offsets
This patch adds intrinsics for SVE gather loads for which the offsets are 32-bits wide and are:
* unscaled
* @llvm.aarch64.sve.ld1.gather.sxtw
* @llvm.aarch64.sve.ld1.gather.uxtw
* scaled (offsets become indices)
* @llvm.arch64.sve.ld1.gather.sxtw.index
* @llvm.arch64.sve.ld1.gather.uxtw.index
The offsets are either zero (uxtw) or sign (sxtw) extended to 64 bits.
These intrinsics map 1-1 to the corresponding SVE instructions (examples for half-words):
* unscaled
* ld1h { z0.s }, p0/z, [x0, z0.s, sxtw]
* ld1h { z0.s }, p0/z, [x0, z0.s, uxtw]
* scaled
* ld1h { z0.s }, p0/z, [x0, z0.s, sxtw #1]
* ld1h { z0.s }, p0/z, [x0, z0.s, uxtw #1]
Committed on behalf of Andrzej Warzynski (andwar)
Reviewers: sdesmalen, kmclaughlin, eli.friedman, rengolin, rovka, huntergr, dancgr, mgudim, efriedma
Reviewed By: sdesmalen
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D70782
2019-12-03 21:52:47 +08:00
|
|
|
%res = zext <vscale x 2 x i16> %load to <vscale x 2 x i64>
|
|
|
|
ret <vscale x 2 x i64> %res
|
|
|
|
}
|
|
|
|
|
2019-12-20 04:37:57 +08:00
|
|
|
define <vscale x 2 x i64> @gld1h_d_sxtw(<vscale x 2 x i1> %pg, i16* %base, <vscale x 2 x i32> %b) {
|
[Aarch64][SVE] Add intrinsics for gather loads with 32-bits offsets
This patch adds intrinsics for SVE gather loads for which the offsets are 32-bits wide and are:
* unscaled
* @llvm.aarch64.sve.ld1.gather.sxtw
* @llvm.aarch64.sve.ld1.gather.uxtw
* scaled (offsets become indices)
* @llvm.arch64.sve.ld1.gather.sxtw.index
* @llvm.arch64.sve.ld1.gather.uxtw.index
The offsets are either zero (uxtw) or sign (sxtw) extended to 64 bits.
These intrinsics map 1-1 to the corresponding SVE instructions (examples for half-words):
* unscaled
* ld1h { z0.s }, p0/z, [x0, z0.s, sxtw]
* ld1h { z0.s }, p0/z, [x0, z0.s, uxtw]
* scaled
* ld1h { z0.s }, p0/z, [x0, z0.s, sxtw #1]
* ld1h { z0.s }, p0/z, [x0, z0.s, uxtw #1]
Committed on behalf of Andrzej Warzynski (andwar)
Reviewers: sdesmalen, kmclaughlin, eli.friedman, rengolin, rovka, huntergr, dancgr, mgudim, efriedma
Reviewed By: sdesmalen
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D70782
2019-12-03 21:52:47 +08:00
|
|
|
; CHECK-LABEL: gld1h_d_sxtw:
|
|
|
|
; CHECK: ld1h { z0.d }, p0/z, [x0, z0.d, sxtw]
|
|
|
|
; CHECK-NEXT: ret
|
2019-12-20 04:37:57 +08:00
|
|
|
%load = call <vscale x 2 x i16> @llvm.aarch64.sve.ld1.gather.sxtw.nxv2i16(<vscale x 2 x i1> %pg,
|
|
|
|
i16* %base,
|
|
|
|
<vscale x 2 x i32> %b)
|
[Aarch64][SVE] Add intrinsics for gather loads with 32-bits offsets
This patch adds intrinsics for SVE gather loads for which the offsets are 32-bits wide and are:
* unscaled
* @llvm.aarch64.sve.ld1.gather.sxtw
* @llvm.aarch64.sve.ld1.gather.uxtw
* scaled (offsets become indices)
* @llvm.arch64.sve.ld1.gather.sxtw.index
* @llvm.arch64.sve.ld1.gather.uxtw.index
The offsets are either zero (uxtw) or sign (sxtw) extended to 64 bits.
These intrinsics map 1-1 to the corresponding SVE instructions (examples for half-words):
* unscaled
* ld1h { z0.s }, p0/z, [x0, z0.s, sxtw]
* ld1h { z0.s }, p0/z, [x0, z0.s, uxtw]
* scaled
* ld1h { z0.s }, p0/z, [x0, z0.s, sxtw #1]
* ld1h { z0.s }, p0/z, [x0, z0.s, uxtw #1]
Committed on behalf of Andrzej Warzynski (andwar)
Reviewers: sdesmalen, kmclaughlin, eli.friedman, rengolin, rovka, huntergr, dancgr, mgudim, efriedma
Reviewed By: sdesmalen
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D70782
2019-12-03 21:52:47 +08:00
|
|
|
%res = zext <vscale x 2 x i16> %load to <vscale x 2 x i64>
|
|
|
|
ret <vscale x 2 x i64> %res
|
|
|
|
}
|
|
|
|
|
|
|
|
; LD1W
|
|
|
|
define <vscale x 4 x i32> @gld1w_s_uxtw(<vscale x 4 x i1> %pg, i32* %base, <vscale x 4 x i32> %b) {
|
|
|
|
; CHECK-LABEL: gld1w_s_uxtw:
|
|
|
|
; CHECK: ld1w { z0.s }, p0/z, [x0, z0.s, uxtw]
|
|
|
|
; CHECK-NEXT: ret
|
2019-12-20 04:37:57 +08:00
|
|
|
%load = call <vscale x 4 x i32> @llvm.aarch64.sve.ld1.gather.uxtw.nxv4i32(<vscale x 4 x i1> %pg,
|
|
|
|
i32* %base,
|
|
|
|
<vscale x 4 x i32> %b)
|
[Aarch64][SVE] Add intrinsics for gather loads with 32-bits offsets
This patch adds intrinsics for SVE gather loads for which the offsets are 32-bits wide and are:
* unscaled
* @llvm.aarch64.sve.ld1.gather.sxtw
* @llvm.aarch64.sve.ld1.gather.uxtw
* scaled (offsets become indices)
* @llvm.arch64.sve.ld1.gather.sxtw.index
* @llvm.arch64.sve.ld1.gather.uxtw.index
The offsets are either zero (uxtw) or sign (sxtw) extended to 64 bits.
These intrinsics map 1-1 to the corresponding SVE instructions (examples for half-words):
* unscaled
* ld1h { z0.s }, p0/z, [x0, z0.s, sxtw]
* ld1h { z0.s }, p0/z, [x0, z0.s, uxtw]
* scaled
* ld1h { z0.s }, p0/z, [x0, z0.s, sxtw #1]
* ld1h { z0.s }, p0/z, [x0, z0.s, uxtw #1]
Committed on behalf of Andrzej Warzynski (andwar)
Reviewers: sdesmalen, kmclaughlin, eli.friedman, rengolin, rovka, huntergr, dancgr, mgudim, efriedma
Reviewed By: sdesmalen
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D70782
2019-12-03 21:52:47 +08:00
|
|
|
ret <vscale x 4 x i32> %load
|
|
|
|
}
|
|
|
|
|
|
|
|
define <vscale x 4 x i32> @gld1w_s_sxtw(<vscale x 4 x i1> %pg, i32* %base, <vscale x 4 x i32> %b) {
|
|
|
|
; CHECK-LABEL: gld1w_s_sxtw:
|
|
|
|
; CHECK: ld1w { z0.s }, p0/z, [x0, z0.s, sxtw]
|
|
|
|
; CHECK-NEXT: ret
|
2019-12-20 04:37:57 +08:00
|
|
|
%load = call <vscale x 4 x i32> @llvm.aarch64.sve.ld1.gather.sxtw.nxv4i32(<vscale x 4 x i1> %pg,
|
|
|
|
i32* %base,
|
|
|
|
<vscale x 4 x i32> %b)
|
[Aarch64][SVE] Add intrinsics for gather loads with 32-bits offsets
This patch adds intrinsics for SVE gather loads for which the offsets are 32-bits wide and are:
* unscaled
* @llvm.aarch64.sve.ld1.gather.sxtw
* @llvm.aarch64.sve.ld1.gather.uxtw
* scaled (offsets become indices)
* @llvm.arch64.sve.ld1.gather.sxtw.index
* @llvm.arch64.sve.ld1.gather.uxtw.index
The offsets are either zero (uxtw) or sign (sxtw) extended to 64 bits.
These intrinsics map 1-1 to the corresponding SVE instructions (examples for half-words):
* unscaled
* ld1h { z0.s }, p0/z, [x0, z0.s, sxtw]
* ld1h { z0.s }, p0/z, [x0, z0.s, uxtw]
* scaled
* ld1h { z0.s }, p0/z, [x0, z0.s, sxtw #1]
* ld1h { z0.s }, p0/z, [x0, z0.s, uxtw #1]
Committed on behalf of Andrzej Warzynski (andwar)
Reviewers: sdesmalen, kmclaughlin, eli.friedman, rengolin, rovka, huntergr, dancgr, mgudim, efriedma
Reviewed By: sdesmalen
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D70782
2019-12-03 21:52:47 +08:00
|
|
|
ret <vscale x 4 x i32> %load
|
|
|
|
}
|
|
|
|
|
2019-12-20 04:37:57 +08:00
|
|
|
define <vscale x 2 x i64> @gld1w_d_uxtw(<vscale x 2 x i1> %pg, i32* %base, <vscale x 2 x i32> %b) {
|
[Aarch64][SVE] Add intrinsics for gather loads with 32-bits offsets
This patch adds intrinsics for SVE gather loads for which the offsets are 32-bits wide and are:
* unscaled
* @llvm.aarch64.sve.ld1.gather.sxtw
* @llvm.aarch64.sve.ld1.gather.uxtw
* scaled (offsets become indices)
* @llvm.arch64.sve.ld1.gather.sxtw.index
* @llvm.arch64.sve.ld1.gather.uxtw.index
The offsets are either zero (uxtw) or sign (sxtw) extended to 64 bits.
These intrinsics map 1-1 to the corresponding SVE instructions (examples for half-words):
* unscaled
* ld1h { z0.s }, p0/z, [x0, z0.s, sxtw]
* ld1h { z0.s }, p0/z, [x0, z0.s, uxtw]
* scaled
* ld1h { z0.s }, p0/z, [x0, z0.s, sxtw #1]
* ld1h { z0.s }, p0/z, [x0, z0.s, uxtw #1]
Committed on behalf of Andrzej Warzynski (andwar)
Reviewers: sdesmalen, kmclaughlin, eli.friedman, rengolin, rovka, huntergr, dancgr, mgudim, efriedma
Reviewed By: sdesmalen
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D70782
2019-12-03 21:52:47 +08:00
|
|
|
; CHECK-LABEL: gld1w_d_uxtw:
|
|
|
|
; CHECK: ld1w { z0.d }, p0/z, [x0, z0.d, uxtw]
|
|
|
|
; CHECK-NEXT: ret
|
2019-12-20 04:37:57 +08:00
|
|
|
%load = call <vscale x 2 x i32> @llvm.aarch64.sve.ld1.gather.uxtw.nxv2i32(<vscale x 2 x i1> %pg,
|
|
|
|
i32* %base,
|
|
|
|
<vscale x 2 x i32> %b)
|
[Aarch64][SVE] Add intrinsics for gather loads with 32-bits offsets
This patch adds intrinsics for SVE gather loads for which the offsets are 32-bits wide and are:
* unscaled
* @llvm.aarch64.sve.ld1.gather.sxtw
* @llvm.aarch64.sve.ld1.gather.uxtw
* scaled (offsets become indices)
* @llvm.arch64.sve.ld1.gather.sxtw.index
* @llvm.arch64.sve.ld1.gather.uxtw.index
The offsets are either zero (uxtw) or sign (sxtw) extended to 64 bits.
These intrinsics map 1-1 to the corresponding SVE instructions (examples for half-words):
* unscaled
* ld1h { z0.s }, p0/z, [x0, z0.s, sxtw]
* ld1h { z0.s }, p0/z, [x0, z0.s, uxtw]
* scaled
* ld1h { z0.s }, p0/z, [x0, z0.s, sxtw #1]
* ld1h { z0.s }, p0/z, [x0, z0.s, uxtw #1]
Committed on behalf of Andrzej Warzynski (andwar)
Reviewers: sdesmalen, kmclaughlin, eli.friedman, rengolin, rovka, huntergr, dancgr, mgudim, efriedma
Reviewed By: sdesmalen
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D70782
2019-12-03 21:52:47 +08:00
|
|
|
%res = zext <vscale x 2 x i32> %load to <vscale x 2 x i64>
|
|
|
|
ret <vscale x 2 x i64> %res
|
|
|
|
}
|
|
|
|
|
2019-12-20 04:37:57 +08:00
|
|
|
define <vscale x 2 x i64> @gld1w_d_sxtw(<vscale x 2 x i1> %pg, i32* %base, <vscale x 2 x i32> %b) {
|
[Aarch64][SVE] Add intrinsics for gather loads with 32-bits offsets
This patch adds intrinsics for SVE gather loads for which the offsets are 32-bits wide and are:
* unscaled
* @llvm.aarch64.sve.ld1.gather.sxtw
* @llvm.aarch64.sve.ld1.gather.uxtw
* scaled (offsets become indices)
* @llvm.arch64.sve.ld1.gather.sxtw.index
* @llvm.arch64.sve.ld1.gather.uxtw.index
The offsets are either zero (uxtw) or sign (sxtw) extended to 64 bits.
These intrinsics map 1-1 to the corresponding SVE instructions (examples for half-words):
* unscaled
* ld1h { z0.s }, p0/z, [x0, z0.s, sxtw]
* ld1h { z0.s }, p0/z, [x0, z0.s, uxtw]
* scaled
* ld1h { z0.s }, p0/z, [x0, z0.s, sxtw #1]
* ld1h { z0.s }, p0/z, [x0, z0.s, uxtw #1]
Committed on behalf of Andrzej Warzynski (andwar)
Reviewers: sdesmalen, kmclaughlin, eli.friedman, rengolin, rovka, huntergr, dancgr, mgudim, efriedma
Reviewed By: sdesmalen
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D70782
2019-12-03 21:52:47 +08:00
|
|
|
; CHECK-LABEL: gld1w_d_sxtw:
|
|
|
|
; CHECK: ld1w { z0.d }, p0/z, [x0, z0.d, sxtw]
|
|
|
|
; CHECK-NEXT: ret
|
2019-12-20 04:37:57 +08:00
|
|
|
%load = call <vscale x 2 x i32> @llvm.aarch64.sve.ld1.gather.sxtw.nxv2i32(<vscale x 2 x i1> %pg,
|
|
|
|
i32* %base,
|
|
|
|
<vscale x 2 x i32> %b)
|
[Aarch64][SVE] Add intrinsics for gather loads with 32-bits offsets
This patch adds intrinsics for SVE gather loads for which the offsets are 32-bits wide and are:
* unscaled
* @llvm.aarch64.sve.ld1.gather.sxtw
* @llvm.aarch64.sve.ld1.gather.uxtw
* scaled (offsets become indices)
* @llvm.arch64.sve.ld1.gather.sxtw.index
* @llvm.arch64.sve.ld1.gather.uxtw.index
The offsets are either zero (uxtw) or sign (sxtw) extended to 64 bits.
These intrinsics map 1-1 to the corresponding SVE instructions (examples for half-words):
* unscaled
* ld1h { z0.s }, p0/z, [x0, z0.s, sxtw]
* ld1h { z0.s }, p0/z, [x0, z0.s, uxtw]
* scaled
* ld1h { z0.s }, p0/z, [x0, z0.s, sxtw #1]
* ld1h { z0.s }, p0/z, [x0, z0.s, uxtw #1]
Committed on behalf of Andrzej Warzynski (andwar)
Reviewers: sdesmalen, kmclaughlin, eli.friedman, rengolin, rovka, huntergr, dancgr, mgudim, efriedma
Reviewed By: sdesmalen
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D70782
2019-12-03 21:52:47 +08:00
|
|
|
%res = zext <vscale x 2 x i32> %load to <vscale x 2 x i64>
|
|
|
|
ret <vscale x 2 x i64> %res
|
|
|
|
}
|
|
|
|
|
|
|
|
define <vscale x 4 x float> @gld1w_s_uxtw_float(<vscale x 4 x i1> %pg, float* %base, <vscale x 4 x i32> %b) {
|
|
|
|
; CHECK-LABEL: gld1w_s_uxtw_float:
|
|
|
|
; CHECK: ld1w { z0.s }, p0/z, [x0, z0.s, uxtw]
|
|
|
|
; CHECK-NEXT: ret
|
2019-12-20 04:37:57 +08:00
|
|
|
%load = call <vscale x 4 x float> @llvm.aarch64.sve.ld1.gather.uxtw.nxv4f32(<vscale x 4 x i1> %pg,
|
|
|
|
float* %base,
|
|
|
|
<vscale x 4 x i32> %b)
|
[Aarch64][SVE] Add intrinsics for gather loads with 32-bits offsets
This patch adds intrinsics for SVE gather loads for which the offsets are 32-bits wide and are:
* unscaled
* @llvm.aarch64.sve.ld1.gather.sxtw
* @llvm.aarch64.sve.ld1.gather.uxtw
* scaled (offsets become indices)
* @llvm.arch64.sve.ld1.gather.sxtw.index
* @llvm.arch64.sve.ld1.gather.uxtw.index
The offsets are either zero (uxtw) or sign (sxtw) extended to 64 bits.
These intrinsics map 1-1 to the corresponding SVE instructions (examples for half-words):
* unscaled
* ld1h { z0.s }, p0/z, [x0, z0.s, sxtw]
* ld1h { z0.s }, p0/z, [x0, z0.s, uxtw]
* scaled
* ld1h { z0.s }, p0/z, [x0, z0.s, sxtw #1]
* ld1h { z0.s }, p0/z, [x0, z0.s, uxtw #1]
Committed on behalf of Andrzej Warzynski (andwar)
Reviewers: sdesmalen, kmclaughlin, eli.friedman, rengolin, rovka, huntergr, dancgr, mgudim, efriedma
Reviewed By: sdesmalen
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D70782
2019-12-03 21:52:47 +08:00
|
|
|
ret <vscale x 4 x float> %load
|
|
|
|
}
|
|
|
|
|
|
|
|
define <vscale x 4 x float> @gld1w_s_sxtw_float(<vscale x 4 x i1> %pg, float* %base, <vscale x 4 x i32> %b) {
|
|
|
|
; CHECK-LABEL: gld1w_s_sxtw_float:
|
|
|
|
; CHECK: ld1w { z0.s }, p0/z, [x0, z0.s, sxtw]
|
|
|
|
; CHECK-NEXT: ret
|
2019-12-20 04:37:57 +08:00
|
|
|
%load = call <vscale x 4 x float> @llvm.aarch64.sve.ld1.gather.sxtw.nxv4f32(<vscale x 4 x i1> %pg,
|
|
|
|
float* %base,
|
|
|
|
<vscale x 4 x i32> %b)
|
[Aarch64][SVE] Add intrinsics for gather loads with 32-bits offsets
This patch adds intrinsics for SVE gather loads for which the offsets are 32-bits wide and are:
* unscaled
* @llvm.aarch64.sve.ld1.gather.sxtw
* @llvm.aarch64.sve.ld1.gather.uxtw
* scaled (offsets become indices)
* @llvm.arch64.sve.ld1.gather.sxtw.index
* @llvm.arch64.sve.ld1.gather.uxtw.index
The offsets are either zero (uxtw) or sign (sxtw) extended to 64 bits.
These intrinsics map 1-1 to the corresponding SVE instructions (examples for half-words):
* unscaled
* ld1h { z0.s }, p0/z, [x0, z0.s, sxtw]
* ld1h { z0.s }, p0/z, [x0, z0.s, uxtw]
* scaled
* ld1h { z0.s }, p0/z, [x0, z0.s, sxtw #1]
* ld1h { z0.s }, p0/z, [x0, z0.s, uxtw #1]
Committed on behalf of Andrzej Warzynski (andwar)
Reviewers: sdesmalen, kmclaughlin, eli.friedman, rengolin, rovka, huntergr, dancgr, mgudim, efriedma
Reviewed By: sdesmalen
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D70782
2019-12-03 21:52:47 +08:00
|
|
|
ret <vscale x 4 x float> %load
|
|
|
|
}
|
|
|
|
|
|
|
|
; LD1D
|
2019-12-20 04:37:57 +08:00
|
|
|
define <vscale x 2 x i64> @gld1d_d_uxtw(<vscale x 2 x i1> %pg, i64* %base, <vscale x 2 x i32> %b) {
|
[Aarch64][SVE] Add intrinsics for gather loads with 32-bits offsets
This patch adds intrinsics for SVE gather loads for which the offsets are 32-bits wide and are:
* unscaled
* @llvm.aarch64.sve.ld1.gather.sxtw
* @llvm.aarch64.sve.ld1.gather.uxtw
* scaled (offsets become indices)
* @llvm.arch64.sve.ld1.gather.sxtw.index
* @llvm.arch64.sve.ld1.gather.uxtw.index
The offsets are either zero (uxtw) or sign (sxtw) extended to 64 bits.
These intrinsics map 1-1 to the corresponding SVE instructions (examples for half-words):
* unscaled
* ld1h { z0.s }, p0/z, [x0, z0.s, sxtw]
* ld1h { z0.s }, p0/z, [x0, z0.s, uxtw]
* scaled
* ld1h { z0.s }, p0/z, [x0, z0.s, sxtw #1]
* ld1h { z0.s }, p0/z, [x0, z0.s, uxtw #1]
Committed on behalf of Andrzej Warzynski (andwar)
Reviewers: sdesmalen, kmclaughlin, eli.friedman, rengolin, rovka, huntergr, dancgr, mgudim, efriedma
Reviewed By: sdesmalen
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D70782
2019-12-03 21:52:47 +08:00
|
|
|
; CHECK-LABEL: gld1d_d_uxtw:
|
|
|
|
; CHECK: ld1d { z0.d }, p0/z, [x0, z0.d, uxtw]
|
|
|
|
; CHECK-NEXT: ret
|
2019-12-20 04:37:57 +08:00
|
|
|
%load = call <vscale x 2 x i64> @llvm.aarch64.sve.ld1.gather.uxtw.nxv2i64(<vscale x 2 x i1> %pg,
|
|
|
|
i64* %base,
|
|
|
|
<vscale x 2 x i32> %b)
|
[Aarch64][SVE] Add intrinsics for gather loads with 32-bits offsets
This patch adds intrinsics for SVE gather loads for which the offsets are 32-bits wide and are:
* unscaled
* @llvm.aarch64.sve.ld1.gather.sxtw
* @llvm.aarch64.sve.ld1.gather.uxtw
* scaled (offsets become indices)
* @llvm.arch64.sve.ld1.gather.sxtw.index
* @llvm.arch64.sve.ld1.gather.uxtw.index
The offsets are either zero (uxtw) or sign (sxtw) extended to 64 bits.
These intrinsics map 1-1 to the corresponding SVE instructions (examples for half-words):
* unscaled
* ld1h { z0.s }, p0/z, [x0, z0.s, sxtw]
* ld1h { z0.s }, p0/z, [x0, z0.s, uxtw]
* scaled
* ld1h { z0.s }, p0/z, [x0, z0.s, sxtw #1]
* ld1h { z0.s }, p0/z, [x0, z0.s, uxtw #1]
Committed on behalf of Andrzej Warzynski (andwar)
Reviewers: sdesmalen, kmclaughlin, eli.friedman, rengolin, rovka, huntergr, dancgr, mgudim, efriedma
Reviewed By: sdesmalen
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D70782
2019-12-03 21:52:47 +08:00
|
|
|
ret <vscale x 2 x i64> %load
|
|
|
|
}
|
|
|
|
|
2019-12-20 04:37:57 +08:00
|
|
|
define <vscale x 2 x i64> @gld1d_d_sxtw(<vscale x 2 x i1> %pg, i64* %base, <vscale x 2 x i32> %b) {
|
[Aarch64][SVE] Add intrinsics for gather loads with 32-bits offsets
This patch adds intrinsics for SVE gather loads for which the offsets are 32-bits wide and are:
* unscaled
* @llvm.aarch64.sve.ld1.gather.sxtw
* @llvm.aarch64.sve.ld1.gather.uxtw
* scaled (offsets become indices)
* @llvm.arch64.sve.ld1.gather.sxtw.index
* @llvm.arch64.sve.ld1.gather.uxtw.index
The offsets are either zero (uxtw) or sign (sxtw) extended to 64 bits.
These intrinsics map 1-1 to the corresponding SVE instructions (examples for half-words):
* unscaled
* ld1h { z0.s }, p0/z, [x0, z0.s, sxtw]
* ld1h { z0.s }, p0/z, [x0, z0.s, uxtw]
* scaled
* ld1h { z0.s }, p0/z, [x0, z0.s, sxtw #1]
* ld1h { z0.s }, p0/z, [x0, z0.s, uxtw #1]
Committed on behalf of Andrzej Warzynski (andwar)
Reviewers: sdesmalen, kmclaughlin, eli.friedman, rengolin, rovka, huntergr, dancgr, mgudim, efriedma
Reviewed By: sdesmalen
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D70782
2019-12-03 21:52:47 +08:00
|
|
|
; CHECK-LABEL: gld1d_d_sxtw:
|
|
|
|
; CHECK: ld1d { z0.d }, p0/z, [x0, z0.d, sxtw]
|
|
|
|
; CHECK-NEXT: ret
|
2019-12-20 04:37:57 +08:00
|
|
|
%load = call <vscale x 2 x i64> @llvm.aarch64.sve.ld1.gather.sxtw.nxv2i64(<vscale x 2 x i1> %pg,
|
|
|
|
i64* %base,
|
|
|
|
<vscale x 2 x i32> %b)
|
[Aarch64][SVE] Add intrinsics for gather loads with 32-bits offsets
This patch adds intrinsics for SVE gather loads for which the offsets are 32-bits wide and are:
* unscaled
* @llvm.aarch64.sve.ld1.gather.sxtw
* @llvm.aarch64.sve.ld1.gather.uxtw
* scaled (offsets become indices)
* @llvm.arch64.sve.ld1.gather.sxtw.index
* @llvm.arch64.sve.ld1.gather.uxtw.index
The offsets are either zero (uxtw) or sign (sxtw) extended to 64 bits.
These intrinsics map 1-1 to the corresponding SVE instructions (examples for half-words):
* unscaled
* ld1h { z0.s }, p0/z, [x0, z0.s, sxtw]
* ld1h { z0.s }, p0/z, [x0, z0.s, uxtw]
* scaled
* ld1h { z0.s }, p0/z, [x0, z0.s, sxtw #1]
* ld1h { z0.s }, p0/z, [x0, z0.s, uxtw #1]
Committed on behalf of Andrzej Warzynski (andwar)
Reviewers: sdesmalen, kmclaughlin, eli.friedman, rengolin, rovka, huntergr, dancgr, mgudim, efriedma
Reviewed By: sdesmalen
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D70782
2019-12-03 21:52:47 +08:00
|
|
|
ret <vscale x 2 x i64> %load
|
|
|
|
}
|
|
|
|
|
2019-12-20 04:37:57 +08:00
|
|
|
define <vscale x 2 x double> @gld1d_d_uxtw_double(<vscale x 2 x i1> %pg, double* %base, <vscale x 2 x i32> %b) {
|
[Aarch64][SVE] Add intrinsics for gather loads with 32-bits offsets
This patch adds intrinsics for SVE gather loads for which the offsets are 32-bits wide and are:
* unscaled
* @llvm.aarch64.sve.ld1.gather.sxtw
* @llvm.aarch64.sve.ld1.gather.uxtw
* scaled (offsets become indices)
* @llvm.arch64.sve.ld1.gather.sxtw.index
* @llvm.arch64.sve.ld1.gather.uxtw.index
The offsets are either zero (uxtw) or sign (sxtw) extended to 64 bits.
These intrinsics map 1-1 to the corresponding SVE instructions (examples for half-words):
* unscaled
* ld1h { z0.s }, p0/z, [x0, z0.s, sxtw]
* ld1h { z0.s }, p0/z, [x0, z0.s, uxtw]
* scaled
* ld1h { z0.s }, p0/z, [x0, z0.s, sxtw #1]
* ld1h { z0.s }, p0/z, [x0, z0.s, uxtw #1]
Committed on behalf of Andrzej Warzynski (andwar)
Reviewers: sdesmalen, kmclaughlin, eli.friedman, rengolin, rovka, huntergr, dancgr, mgudim, efriedma
Reviewed By: sdesmalen
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D70782
2019-12-03 21:52:47 +08:00
|
|
|
; CHECK-LABEL: gld1d_d_uxtw_double:
|
|
|
|
; CHECK: ld1d { z0.d }, p0/z, [x0, z0.d, uxtw]
|
|
|
|
; CHECK-NEXT: ret
|
2019-12-20 04:37:57 +08:00
|
|
|
%load = call <vscale x 2 x double> @llvm.aarch64.sve.ld1.gather.uxtw.nxv2f64(<vscale x 2 x i1> %pg,
|
|
|
|
double* %base,
|
|
|
|
<vscale x 2 x i32> %b)
|
[Aarch64][SVE] Add intrinsics for gather loads with 32-bits offsets
This patch adds intrinsics for SVE gather loads for which the offsets are 32-bits wide and are:
* unscaled
* @llvm.aarch64.sve.ld1.gather.sxtw
* @llvm.aarch64.sve.ld1.gather.uxtw
* scaled (offsets become indices)
* @llvm.arch64.sve.ld1.gather.sxtw.index
* @llvm.arch64.sve.ld1.gather.uxtw.index
The offsets are either zero (uxtw) or sign (sxtw) extended to 64 bits.
These intrinsics map 1-1 to the corresponding SVE instructions (examples for half-words):
* unscaled
* ld1h { z0.s }, p0/z, [x0, z0.s, sxtw]
* ld1h { z0.s }, p0/z, [x0, z0.s, uxtw]
* scaled
* ld1h { z0.s }, p0/z, [x0, z0.s, sxtw #1]
* ld1h { z0.s }, p0/z, [x0, z0.s, uxtw #1]
Committed on behalf of Andrzej Warzynski (andwar)
Reviewers: sdesmalen, kmclaughlin, eli.friedman, rengolin, rovka, huntergr, dancgr, mgudim, efriedma
Reviewed By: sdesmalen
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D70782
2019-12-03 21:52:47 +08:00
|
|
|
ret <vscale x 2 x double> %load
|
|
|
|
}
|
|
|
|
|
2019-12-20 04:37:57 +08:00
|
|
|
define <vscale x 2 x double> @gld1d_d_sxtw_double(<vscale x 2 x i1> %pg, double* %base, <vscale x 2 x i32> %b) {
|
[Aarch64][SVE] Add intrinsics for gather loads with 32-bits offsets
This patch adds intrinsics for SVE gather loads for which the offsets are 32-bits wide and are:
* unscaled
* @llvm.aarch64.sve.ld1.gather.sxtw
* @llvm.aarch64.sve.ld1.gather.uxtw
* scaled (offsets become indices)
* @llvm.arch64.sve.ld1.gather.sxtw.index
* @llvm.arch64.sve.ld1.gather.uxtw.index
The offsets are either zero (uxtw) or sign (sxtw) extended to 64 bits.
These intrinsics map 1-1 to the corresponding SVE instructions (examples for half-words):
* unscaled
* ld1h { z0.s }, p0/z, [x0, z0.s, sxtw]
* ld1h { z0.s }, p0/z, [x0, z0.s, uxtw]
* scaled
* ld1h { z0.s }, p0/z, [x0, z0.s, sxtw #1]
* ld1h { z0.s }, p0/z, [x0, z0.s, uxtw #1]
Committed on behalf of Andrzej Warzynski (andwar)
Reviewers: sdesmalen, kmclaughlin, eli.friedman, rengolin, rovka, huntergr, dancgr, mgudim, efriedma
Reviewed By: sdesmalen
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D70782
2019-12-03 21:52:47 +08:00
|
|
|
; CHECK-LABEL: gld1d_d_sxtw_double:
|
|
|
|
; CHECK: ld1d { z0.d }, p0/z, [x0, z0.d, sxtw]
|
|
|
|
; CHECK-NEXT: ret
|
2019-12-20 04:37:57 +08:00
|
|
|
%load = call <vscale x 2 x double> @llvm.aarch64.sve.ld1.gather.sxtw.nxv2f64(<vscale x 2 x i1> %pg,
|
|
|
|
double* %base,
|
|
|
|
<vscale x 2 x i32> %b)
|
[Aarch64][SVE] Add intrinsics for gather loads with 32-bits offsets
This patch adds intrinsics for SVE gather loads for which the offsets are 32-bits wide and are:
* unscaled
* @llvm.aarch64.sve.ld1.gather.sxtw
* @llvm.aarch64.sve.ld1.gather.uxtw
* scaled (offsets become indices)
* @llvm.arch64.sve.ld1.gather.sxtw.index
* @llvm.arch64.sve.ld1.gather.uxtw.index
The offsets are either zero (uxtw) or sign (sxtw) extended to 64 bits.
These intrinsics map 1-1 to the corresponding SVE instructions (examples for half-words):
* unscaled
* ld1h { z0.s }, p0/z, [x0, z0.s, sxtw]
* ld1h { z0.s }, p0/z, [x0, z0.s, uxtw]
* scaled
* ld1h { z0.s }, p0/z, [x0, z0.s, sxtw #1]
* ld1h { z0.s }, p0/z, [x0, z0.s, uxtw #1]
Committed on behalf of Andrzej Warzynski (andwar)
Reviewers: sdesmalen, kmclaughlin, eli.friedman, rengolin, rovka, huntergr, dancgr, mgudim, efriedma
Reviewed By: sdesmalen
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D70782
2019-12-03 21:52:47 +08:00
|
|
|
ret <vscale x 2 x double> %load
|
|
|
|
}
|
|
|
|
|
[AArch64][SVE] Add DAG combine rules for gather loads and sext/zext
Summary:
These changes allow us to support sign-extending gather loads with the
exisiting intrinsics (i.e. @llvm.aarch64.sve.ld1.gather.*).
Reviewers: sdesmalen, huntergr, kmclaughlin, efriedma, rengolin, rovka, dancgr, mgudim
Reviewed By: sdesmalen
Subscribers: tschuett, kristof.beyls, hiraditya, rkruppe, psnobl, llvm-commits
Tags: #llvm
Differential revision: https://reviews.llvm.org/D70812
2019-11-25 21:54:47 +08:00
|
|
|
;
|
|
|
|
; LD1SB, LD1SW, LD1SH: base + 32-bit unscaled offset, sign (sxtw) or zero
|
|
|
|
; (uxtw) extended to 64 bits.
|
|
|
|
; e.g. ld1sh { z0.d }, p0/z, [x0, z0.d, uxtw]
|
|
|
|
;
|
|
|
|
|
|
|
|
; LD1SB
|
|
|
|
define <vscale x 4 x i32> @gld1sb_s_uxtw(<vscale x 4 x i1> %pg, i8* %base, <vscale x 4 x i32> %b) {
|
|
|
|
; CHECK-LABEL: gld1sb_s_uxtw:
|
|
|
|
; CHECK: ld1sb { z0.s }, p0/z, [x0, z0.s, uxtw]
|
|
|
|
; CHECK-NEXT: ret
|
2019-12-20 04:37:57 +08:00
|
|
|
%load = call <vscale x 4 x i8> @llvm.aarch64.sve.ld1.gather.uxtw.nxv4i8(<vscale x 4 x i1> %pg,
|
|
|
|
i8* %base,
|
|
|
|
<vscale x 4 x i32> %b)
|
[AArch64][SVE] Add DAG combine rules for gather loads and sext/zext
Summary:
These changes allow us to support sign-extending gather loads with the
exisiting intrinsics (i.e. @llvm.aarch64.sve.ld1.gather.*).
Reviewers: sdesmalen, huntergr, kmclaughlin, efriedma, rengolin, rovka, dancgr, mgudim
Reviewed By: sdesmalen
Subscribers: tschuett, kristof.beyls, hiraditya, rkruppe, psnobl, llvm-commits
Tags: #llvm
Differential revision: https://reviews.llvm.org/D70812
2019-11-25 21:54:47 +08:00
|
|
|
%res = sext <vscale x 4 x i8> %load to <vscale x 4 x i32>
|
|
|
|
ret <vscale x 4 x i32> %res
|
|
|
|
}
|
|
|
|
|
|
|
|
define <vscale x 4 x i32> @gld1sb_s_sxtw(<vscale x 4 x i1> %pg, i8* %base, <vscale x 4 x i32> %b) {
|
|
|
|
; CHECK-LABEL: gld1sb_s_sxtw:
|
|
|
|
; CHECK: ld1sb { z0.s }, p0/z, [x0, z0.s, sxtw]
|
|
|
|
; CHECK-NEXT: ret
|
2019-12-20 04:37:57 +08:00
|
|
|
%load = call <vscale x 4 x i8> @llvm.aarch64.sve.ld1.gather.sxtw.nxv4i8(<vscale x 4 x i1> %pg,
|
|
|
|
i8* %base,
|
|
|
|
<vscale x 4 x i32> %b)
|
[AArch64][SVE] Add DAG combine rules for gather loads and sext/zext
Summary:
These changes allow us to support sign-extending gather loads with the
exisiting intrinsics (i.e. @llvm.aarch64.sve.ld1.gather.*).
Reviewers: sdesmalen, huntergr, kmclaughlin, efriedma, rengolin, rovka, dancgr, mgudim
Reviewed By: sdesmalen
Subscribers: tschuett, kristof.beyls, hiraditya, rkruppe, psnobl, llvm-commits
Tags: #llvm
Differential revision: https://reviews.llvm.org/D70812
2019-11-25 21:54:47 +08:00
|
|
|
%res = sext <vscale x 4 x i8> %load to <vscale x 4 x i32>
|
|
|
|
ret <vscale x 4 x i32> %res
|
|
|
|
}
|
|
|
|
|
2019-12-20 04:37:57 +08:00
|
|
|
define <vscale x 2 x i64> @gld1sb_d_uxtw(<vscale x 2 x i1> %pg, i8* %base, <vscale x 2 x i32> %b) {
|
[AArch64][SVE] Add DAG combine rules for gather loads and sext/zext
Summary:
These changes allow us to support sign-extending gather loads with the
exisiting intrinsics (i.e. @llvm.aarch64.sve.ld1.gather.*).
Reviewers: sdesmalen, huntergr, kmclaughlin, efriedma, rengolin, rovka, dancgr, mgudim
Reviewed By: sdesmalen
Subscribers: tschuett, kristof.beyls, hiraditya, rkruppe, psnobl, llvm-commits
Tags: #llvm
Differential revision: https://reviews.llvm.org/D70812
2019-11-25 21:54:47 +08:00
|
|
|
; CHECK-LABEL: gld1sb_d_uxtw:
|
|
|
|
; CHECK: ld1sb { z0.d }, p0/z, [x0, z0.d, uxtw]
|
|
|
|
; CHECK-NEXT: ret
|
2019-12-20 04:37:57 +08:00
|
|
|
%load = call <vscale x 2 x i8> @llvm.aarch64.sve.ld1.gather.uxtw.nxv2i8(<vscale x 2 x i1> %pg,
|
|
|
|
i8* %base,
|
|
|
|
<vscale x 2 x i32> %b)
|
[AArch64][SVE] Add DAG combine rules for gather loads and sext/zext
Summary:
These changes allow us to support sign-extending gather loads with the
exisiting intrinsics (i.e. @llvm.aarch64.sve.ld1.gather.*).
Reviewers: sdesmalen, huntergr, kmclaughlin, efriedma, rengolin, rovka, dancgr, mgudim
Reviewed By: sdesmalen
Subscribers: tschuett, kristof.beyls, hiraditya, rkruppe, psnobl, llvm-commits
Tags: #llvm
Differential revision: https://reviews.llvm.org/D70812
2019-11-25 21:54:47 +08:00
|
|
|
%res = sext <vscale x 2 x i8> %load to <vscale x 2 x i64>
|
|
|
|
ret <vscale x 2 x i64> %res
|
|
|
|
}
|
|
|
|
|
2019-12-20 04:37:57 +08:00
|
|
|
define <vscale x 2 x i64> @gld1sb_d_sxtw(<vscale x 2 x i1> %pg, i8* %base, <vscale x 2 x i32> %b) {
|
[AArch64][SVE] Add DAG combine rules for gather loads and sext/zext
Summary:
These changes allow us to support sign-extending gather loads with the
exisiting intrinsics (i.e. @llvm.aarch64.sve.ld1.gather.*).
Reviewers: sdesmalen, huntergr, kmclaughlin, efriedma, rengolin, rovka, dancgr, mgudim
Reviewed By: sdesmalen
Subscribers: tschuett, kristof.beyls, hiraditya, rkruppe, psnobl, llvm-commits
Tags: #llvm
Differential revision: https://reviews.llvm.org/D70812
2019-11-25 21:54:47 +08:00
|
|
|
; CHECK-LABEL: gld1sb_d_sxtw:
|
|
|
|
; CHECK: ld1sb { z0.d }, p0/z, [x0, z0.d, sxtw]
|
|
|
|
; CHECK-NEXT: ret
|
2019-12-20 04:37:57 +08:00
|
|
|
%load = call <vscale x 2 x i8> @llvm.aarch64.sve.ld1.gather.sxtw.nxv2i8(<vscale x 2 x i1> %pg,
|
|
|
|
i8* %base,
|
|
|
|
<vscale x 2 x i32> %b)
|
[AArch64][SVE] Add DAG combine rules for gather loads and sext/zext
Summary:
These changes allow us to support sign-extending gather loads with the
exisiting intrinsics (i.e. @llvm.aarch64.sve.ld1.gather.*).
Reviewers: sdesmalen, huntergr, kmclaughlin, efriedma, rengolin, rovka, dancgr, mgudim
Reviewed By: sdesmalen
Subscribers: tschuett, kristof.beyls, hiraditya, rkruppe, psnobl, llvm-commits
Tags: #llvm
Differential revision: https://reviews.llvm.org/D70812
2019-11-25 21:54:47 +08:00
|
|
|
%res = sext <vscale x 2 x i8> %load to <vscale x 2 x i64>
|
|
|
|
ret <vscale x 2 x i64> %res
|
|
|
|
}
|
|
|
|
|
|
|
|
; LD1SH
|
|
|
|
define <vscale x 4 x i32> @gld1sh_s_uxtw(<vscale x 4 x i1> %pg, i16* %base, <vscale x 4 x i32> %b) {
|
|
|
|
; CHECK-LABEL: gld1sh_s_uxtw:
|
|
|
|
; CHECK: ld1sh { z0.s }, p0/z, [x0, z0.s, uxtw]
|
|
|
|
; CHECK-NEXT: ret
|
2019-12-20 04:37:57 +08:00
|
|
|
%load = call <vscale x 4 x i16> @llvm.aarch64.sve.ld1.gather.uxtw.nxv4i16(<vscale x 4 x i1> %pg,
|
|
|
|
i16* %base,
|
|
|
|
<vscale x 4 x i32> %b)
|
[AArch64][SVE] Add DAG combine rules for gather loads and sext/zext
Summary:
These changes allow us to support sign-extending gather loads with the
exisiting intrinsics (i.e. @llvm.aarch64.sve.ld1.gather.*).
Reviewers: sdesmalen, huntergr, kmclaughlin, efriedma, rengolin, rovka, dancgr, mgudim
Reviewed By: sdesmalen
Subscribers: tschuett, kristof.beyls, hiraditya, rkruppe, psnobl, llvm-commits
Tags: #llvm
Differential revision: https://reviews.llvm.org/D70812
2019-11-25 21:54:47 +08:00
|
|
|
%res = sext <vscale x 4 x i16> %load to <vscale x 4 x i32>
|
|
|
|
ret <vscale x 4 x i32> %res
|
|
|
|
}
|
|
|
|
|
|
|
|
define <vscale x 4 x i32> @gld1sh_s_sxtw(<vscale x 4 x i1> %pg, i16* %base, <vscale x 4 x i32> %b) {
|
|
|
|
; CHECK-LABEL: gld1sh_s_sxtw:
|
|
|
|
; CHECK: ld1sh { z0.s }, p0/z, [x0, z0.s, sxtw]
|
|
|
|
; CHECK-NEXT: ret
|
2019-12-20 04:37:57 +08:00
|
|
|
%load = call <vscale x 4 x i16> @llvm.aarch64.sve.ld1.gather.sxtw.nxv4i16(<vscale x 4 x i1> %pg,
|
|
|
|
i16* %base,
|
|
|
|
<vscale x 4 x i32> %b)
|
[AArch64][SVE] Add DAG combine rules for gather loads and sext/zext
Summary:
These changes allow us to support sign-extending gather loads with the
exisiting intrinsics (i.e. @llvm.aarch64.sve.ld1.gather.*).
Reviewers: sdesmalen, huntergr, kmclaughlin, efriedma, rengolin, rovka, dancgr, mgudim
Reviewed By: sdesmalen
Subscribers: tschuett, kristof.beyls, hiraditya, rkruppe, psnobl, llvm-commits
Tags: #llvm
Differential revision: https://reviews.llvm.org/D70812
2019-11-25 21:54:47 +08:00
|
|
|
%res = sext <vscale x 4 x i16> %load to <vscale x 4 x i32>
|
|
|
|
ret <vscale x 4 x i32> %res
|
|
|
|
}
|
|
|
|
|
2019-12-20 04:37:57 +08:00
|
|
|
define <vscale x 2 x i64> @gld1sh_d_uxtw(<vscale x 2 x i1> %pg, i16* %base, <vscale x 2 x i32> %b) {
|
[AArch64][SVE] Add DAG combine rules for gather loads and sext/zext
Summary:
These changes allow us to support sign-extending gather loads with the
exisiting intrinsics (i.e. @llvm.aarch64.sve.ld1.gather.*).
Reviewers: sdesmalen, huntergr, kmclaughlin, efriedma, rengolin, rovka, dancgr, mgudim
Reviewed By: sdesmalen
Subscribers: tschuett, kristof.beyls, hiraditya, rkruppe, psnobl, llvm-commits
Tags: #llvm
Differential revision: https://reviews.llvm.org/D70812
2019-11-25 21:54:47 +08:00
|
|
|
; CHECK-LABEL: gld1sh_d_uxtw:
|
|
|
|
; CHECK: ld1sh { z0.d }, p0/z, [x0, z0.d, uxtw]
|
|
|
|
; CHECK-NEXT: ret
|
2019-12-20 04:37:57 +08:00
|
|
|
%load = call <vscale x 2 x i16> @llvm.aarch64.sve.ld1.gather.uxtw.nxv2i16(<vscale x 2 x i1> %pg,
|
|
|
|
i16* %base,
|
|
|
|
<vscale x 2 x i32> %b)
|
[AArch64][SVE] Add DAG combine rules for gather loads and sext/zext
Summary:
These changes allow us to support sign-extending gather loads with the
exisiting intrinsics (i.e. @llvm.aarch64.sve.ld1.gather.*).
Reviewers: sdesmalen, huntergr, kmclaughlin, efriedma, rengolin, rovka, dancgr, mgudim
Reviewed By: sdesmalen
Subscribers: tschuett, kristof.beyls, hiraditya, rkruppe, psnobl, llvm-commits
Tags: #llvm
Differential revision: https://reviews.llvm.org/D70812
2019-11-25 21:54:47 +08:00
|
|
|
%res = sext <vscale x 2 x i16> %load to <vscale x 2 x i64>
|
|
|
|
ret <vscale x 2 x i64> %res
|
|
|
|
}
|
|
|
|
|
2019-12-20 04:37:57 +08:00
|
|
|
define <vscale x 2 x i64> @gld1sh_d_sxtw(<vscale x 2 x i1> %pg, i16* %base, <vscale x 2 x i32> %b) {
|
[AArch64][SVE] Add DAG combine rules for gather loads and sext/zext
Summary:
These changes allow us to support sign-extending gather loads with the
exisiting intrinsics (i.e. @llvm.aarch64.sve.ld1.gather.*).
Reviewers: sdesmalen, huntergr, kmclaughlin, efriedma, rengolin, rovka, dancgr, mgudim
Reviewed By: sdesmalen
Subscribers: tschuett, kristof.beyls, hiraditya, rkruppe, psnobl, llvm-commits
Tags: #llvm
Differential revision: https://reviews.llvm.org/D70812
2019-11-25 21:54:47 +08:00
|
|
|
; CHECK-LABEL: gld1sh_d_sxtw:
|
|
|
|
; CHECK: ld1sh { z0.d }, p0/z, [x0, z0.d, sxtw]
|
|
|
|
; CHECK-NEXT: ret
|
2019-12-20 04:37:57 +08:00
|
|
|
%load = call <vscale x 2 x i16> @llvm.aarch64.sve.ld1.gather.sxtw.nxv2i16(<vscale x 2 x i1> %pg,
|
|
|
|
i16* %base,
|
|
|
|
<vscale x 2 x i32> %b)
|
[AArch64][SVE] Add DAG combine rules for gather loads and sext/zext
Summary:
These changes allow us to support sign-extending gather loads with the
exisiting intrinsics (i.e. @llvm.aarch64.sve.ld1.gather.*).
Reviewers: sdesmalen, huntergr, kmclaughlin, efriedma, rengolin, rovka, dancgr, mgudim
Reviewed By: sdesmalen
Subscribers: tschuett, kristof.beyls, hiraditya, rkruppe, psnobl, llvm-commits
Tags: #llvm
Differential revision: https://reviews.llvm.org/D70812
2019-11-25 21:54:47 +08:00
|
|
|
%res = sext <vscale x 2 x i16> %load to <vscale x 2 x i64>
|
|
|
|
ret <vscale x 2 x i64> %res
|
|
|
|
}
|
|
|
|
|
|
|
|
; LD1SW
|
2019-12-20 04:37:57 +08:00
|
|
|
define <vscale x 2 x i64> @gld1sw_d_uxtw(<vscale x 2 x i1> %pg, i32* %base, <vscale x 2 x i32> %b) {
|
[AArch64][SVE] Add DAG combine rules for gather loads and sext/zext
Summary:
These changes allow us to support sign-extending gather loads with the
exisiting intrinsics (i.e. @llvm.aarch64.sve.ld1.gather.*).
Reviewers: sdesmalen, huntergr, kmclaughlin, efriedma, rengolin, rovka, dancgr, mgudim
Reviewed By: sdesmalen
Subscribers: tschuett, kristof.beyls, hiraditya, rkruppe, psnobl, llvm-commits
Tags: #llvm
Differential revision: https://reviews.llvm.org/D70812
2019-11-25 21:54:47 +08:00
|
|
|
; CHECK-LABEL: gld1sw_d_uxtw:
|
|
|
|
; CHECK: ld1sw { z0.d }, p0/z, [x0, z0.d, uxtw]
|
|
|
|
; CHECK-NEXT: ret
|
2019-12-20 04:37:57 +08:00
|
|
|
%load = call <vscale x 2 x i32> @llvm.aarch64.sve.ld1.gather.uxtw.nxv2i32(<vscale x 2 x i1> %pg,
|
|
|
|
i32* %base,
|
|
|
|
<vscale x 2 x i32> %b)
|
[AArch64][SVE] Add DAG combine rules for gather loads and sext/zext
Summary:
These changes allow us to support sign-extending gather loads with the
exisiting intrinsics (i.e. @llvm.aarch64.sve.ld1.gather.*).
Reviewers: sdesmalen, huntergr, kmclaughlin, efriedma, rengolin, rovka, dancgr, mgudim
Reviewed By: sdesmalen
Subscribers: tschuett, kristof.beyls, hiraditya, rkruppe, psnobl, llvm-commits
Tags: #llvm
Differential revision: https://reviews.llvm.org/D70812
2019-11-25 21:54:47 +08:00
|
|
|
%res = sext <vscale x 2 x i32> %load to <vscale x 2 x i64>
|
|
|
|
ret <vscale x 2 x i64> %res
|
|
|
|
}
|
|
|
|
|
2019-12-20 04:37:57 +08:00
|
|
|
define <vscale x 2 x i64> @gld1sw_d_sxtw(<vscale x 2 x i1> %pg, i32* %base, <vscale x 2 x i32> %b) {
|
[AArch64][SVE] Add DAG combine rules for gather loads and sext/zext
Summary:
These changes allow us to support sign-extending gather loads with the
exisiting intrinsics (i.e. @llvm.aarch64.sve.ld1.gather.*).
Reviewers: sdesmalen, huntergr, kmclaughlin, efriedma, rengolin, rovka, dancgr, mgudim
Reviewed By: sdesmalen
Subscribers: tschuett, kristof.beyls, hiraditya, rkruppe, psnobl, llvm-commits
Tags: #llvm
Differential revision: https://reviews.llvm.org/D70812
2019-11-25 21:54:47 +08:00
|
|
|
; CHECK-LABEL: gld1sw_d_sxtw:
|
|
|
|
; CHECK: ld1sw { z0.d }, p0/z, [x0, z0.d, sxtw]
|
|
|
|
; CHECK-NEXT: ret
|
2019-12-20 04:37:57 +08:00
|
|
|
%load = call <vscale x 2 x i32> @llvm.aarch64.sve.ld1.gather.sxtw.nxv2i32(<vscale x 2 x i1> %pg,
|
|
|
|
i32* %base,
|
|
|
|
<vscale x 2 x i32> %b)
|
[AArch64][SVE] Add DAG combine rules for gather loads and sext/zext
Summary:
These changes allow us to support sign-extending gather loads with the
exisiting intrinsics (i.e. @llvm.aarch64.sve.ld1.gather.*).
Reviewers: sdesmalen, huntergr, kmclaughlin, efriedma, rengolin, rovka, dancgr, mgudim
Reviewed By: sdesmalen
Subscribers: tschuett, kristof.beyls, hiraditya, rkruppe, psnobl, llvm-commits
Tags: #llvm
Differential revision: https://reviews.llvm.org/D70812
2019-11-25 21:54:47 +08:00
|
|
|
%res = sext <vscale x 2 x i32> %load to <vscale x 2 x i64>
|
|
|
|
ret <vscale x 2 x i64> %res
|
|
|
|
}
|
|
|
|
|
|
|
|
; LD1B/LD1SB
|
2019-12-20 04:37:57 +08:00
|
|
|
declare <vscale x 4 x i8> @llvm.aarch64.sve.ld1.gather.uxtw.nxv4i8(<vscale x 4 x i1>, i8*, <vscale x 4 x i32>)
|
|
|
|
declare <vscale x 2 x i8> @llvm.aarch64.sve.ld1.gather.uxtw.nxv2i8(<vscale x 2 x i1>, i8*, <vscale x 2 x i32>)
|
|
|
|
declare <vscale x 4 x i8> @llvm.aarch64.sve.ld1.gather.sxtw.nxv4i8(<vscale x 4 x i1>, i8*, <vscale x 4 x i32>)
|
|
|
|
declare <vscale x 2 x i8> @llvm.aarch64.sve.ld1.gather.sxtw.nxv2i8(<vscale x 2 x i1>, i8*, <vscale x 2 x i32>)
|
[Aarch64][SVE] Add intrinsics for gather loads with 32-bits offsets
This patch adds intrinsics for SVE gather loads for which the offsets are 32-bits wide and are:
* unscaled
* @llvm.aarch64.sve.ld1.gather.sxtw
* @llvm.aarch64.sve.ld1.gather.uxtw
* scaled (offsets become indices)
* @llvm.arch64.sve.ld1.gather.sxtw.index
* @llvm.arch64.sve.ld1.gather.uxtw.index
The offsets are either zero (uxtw) or sign (sxtw) extended to 64 bits.
These intrinsics map 1-1 to the corresponding SVE instructions (examples for half-words):
* unscaled
* ld1h { z0.s }, p0/z, [x0, z0.s, sxtw]
* ld1h { z0.s }, p0/z, [x0, z0.s, uxtw]
* scaled
* ld1h { z0.s }, p0/z, [x0, z0.s, sxtw #1]
* ld1h { z0.s }, p0/z, [x0, z0.s, uxtw #1]
Committed on behalf of Andrzej Warzynski (andwar)
Reviewers: sdesmalen, kmclaughlin, eli.friedman, rengolin, rovka, huntergr, dancgr, mgudim, efriedma
Reviewed By: sdesmalen
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D70782
2019-12-03 21:52:47 +08:00
|
|
|
|
[AArch64][SVE] Add DAG combine rules for gather loads and sext/zext
Summary:
These changes allow us to support sign-extending gather loads with the
exisiting intrinsics (i.e. @llvm.aarch64.sve.ld1.gather.*).
Reviewers: sdesmalen, huntergr, kmclaughlin, efriedma, rengolin, rovka, dancgr, mgudim
Reviewed By: sdesmalen
Subscribers: tschuett, kristof.beyls, hiraditya, rkruppe, psnobl, llvm-commits
Tags: #llvm
Differential revision: https://reviews.llvm.org/D70812
2019-11-25 21:54:47 +08:00
|
|
|
; LD1H/LD1SH
|
2019-12-20 04:37:57 +08:00
|
|
|
declare <vscale x 4 x i16> @llvm.aarch64.sve.ld1.gather.sxtw.nxv4i16(<vscale x 4 x i1>, i16*, <vscale x 4 x i32>)
|
|
|
|
declare <vscale x 2 x i16> @llvm.aarch64.sve.ld1.gather.sxtw.nxv2i16(<vscale x 2 x i1>, i16*, <vscale x 2 x i32>)
|
|
|
|
declare <vscale x 4 x i16> @llvm.aarch64.sve.ld1.gather.uxtw.nxv4i16(<vscale x 4 x i1>, i16*, <vscale x 4 x i32>)
|
|
|
|
declare <vscale x 2 x i16> @llvm.aarch64.sve.ld1.gather.uxtw.nxv2i16(<vscale x 2 x i1>, i16*, <vscale x 2 x i32>)
|
[Aarch64][SVE] Add intrinsics for gather loads with 32-bits offsets
This patch adds intrinsics for SVE gather loads for which the offsets are 32-bits wide and are:
* unscaled
* @llvm.aarch64.sve.ld1.gather.sxtw
* @llvm.aarch64.sve.ld1.gather.uxtw
* scaled (offsets become indices)
* @llvm.arch64.sve.ld1.gather.sxtw.index
* @llvm.arch64.sve.ld1.gather.uxtw.index
The offsets are either zero (uxtw) or sign (sxtw) extended to 64 bits.
These intrinsics map 1-1 to the corresponding SVE instructions (examples for half-words):
* unscaled
* ld1h { z0.s }, p0/z, [x0, z0.s, sxtw]
* ld1h { z0.s }, p0/z, [x0, z0.s, uxtw]
* scaled
* ld1h { z0.s }, p0/z, [x0, z0.s, sxtw #1]
* ld1h { z0.s }, p0/z, [x0, z0.s, uxtw #1]
Committed on behalf of Andrzej Warzynski (andwar)
Reviewers: sdesmalen, kmclaughlin, eli.friedman, rengolin, rovka, huntergr, dancgr, mgudim, efriedma
Reviewed By: sdesmalen
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D70782
2019-12-03 21:52:47 +08:00
|
|
|
|
[AArch64][SVE] Add DAG combine rules for gather loads and sext/zext
Summary:
These changes allow us to support sign-extending gather loads with the
exisiting intrinsics (i.e. @llvm.aarch64.sve.ld1.gather.*).
Reviewers: sdesmalen, huntergr, kmclaughlin, efriedma, rengolin, rovka, dancgr, mgudim
Reviewed By: sdesmalen
Subscribers: tschuett, kristof.beyls, hiraditya, rkruppe, psnobl, llvm-commits
Tags: #llvm
Differential revision: https://reviews.llvm.org/D70812
2019-11-25 21:54:47 +08:00
|
|
|
; LD1W/LD1SW
|
2019-12-20 04:37:57 +08:00
|
|
|
declare <vscale x 4 x i32> @llvm.aarch64.sve.ld1.gather.sxtw.nxv4i32(<vscale x 4 x i1>, i32*, <vscale x 4 x i32>)
|
|
|
|
declare <vscale x 2 x i32> @llvm.aarch64.sve.ld1.gather.sxtw.nxv2i32(<vscale x 2 x i1>, i32*, <vscale x 2 x i32>)
|
|
|
|
declare <vscale x 4 x i32> @llvm.aarch64.sve.ld1.gather.uxtw.nxv4i32(<vscale x 4 x i1>, i32*, <vscale x 4 x i32>)
|
|
|
|
declare <vscale x 2 x i32> @llvm.aarch64.sve.ld1.gather.uxtw.nxv2i32(<vscale x 2 x i1>, i32*, <vscale x 2 x i32>)
|
[Aarch64][SVE] Add intrinsics for gather loads with 32-bits offsets
This patch adds intrinsics for SVE gather loads for which the offsets are 32-bits wide and are:
* unscaled
* @llvm.aarch64.sve.ld1.gather.sxtw
* @llvm.aarch64.sve.ld1.gather.uxtw
* scaled (offsets become indices)
* @llvm.arch64.sve.ld1.gather.sxtw.index
* @llvm.arch64.sve.ld1.gather.uxtw.index
The offsets are either zero (uxtw) or sign (sxtw) extended to 64 bits.
These intrinsics map 1-1 to the corresponding SVE instructions (examples for half-words):
* unscaled
* ld1h { z0.s }, p0/z, [x0, z0.s, sxtw]
* ld1h { z0.s }, p0/z, [x0, z0.s, uxtw]
* scaled
* ld1h { z0.s }, p0/z, [x0, z0.s, sxtw #1]
* ld1h { z0.s }, p0/z, [x0, z0.s, uxtw #1]
Committed on behalf of Andrzej Warzynski (andwar)
Reviewers: sdesmalen, kmclaughlin, eli.friedman, rengolin, rovka, huntergr, dancgr, mgudim, efriedma
Reviewed By: sdesmalen
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D70782
2019-12-03 21:52:47 +08:00
|
|
|
|
2019-12-20 04:37:57 +08:00
|
|
|
declare <vscale x 4 x float> @llvm.aarch64.sve.ld1.gather.sxtw.nxv4f32(<vscale x 4 x i1>, float*, <vscale x 4 x i32>)
|
|
|
|
declare <vscale x 4 x float> @llvm.aarch64.sve.ld1.gather.uxtw.nxv4f32(<vscale x 4 x i1>, float*, <vscale x 4 x i32>)
|
[Aarch64][SVE] Add intrinsics for gather loads with 32-bits offsets
This patch adds intrinsics for SVE gather loads for which the offsets are 32-bits wide and are:
* unscaled
* @llvm.aarch64.sve.ld1.gather.sxtw
* @llvm.aarch64.sve.ld1.gather.uxtw
* scaled (offsets become indices)
* @llvm.arch64.sve.ld1.gather.sxtw.index
* @llvm.arch64.sve.ld1.gather.uxtw.index
The offsets are either zero (uxtw) or sign (sxtw) extended to 64 bits.
These intrinsics map 1-1 to the corresponding SVE instructions (examples for half-words):
* unscaled
* ld1h { z0.s }, p0/z, [x0, z0.s, sxtw]
* ld1h { z0.s }, p0/z, [x0, z0.s, uxtw]
* scaled
* ld1h { z0.s }, p0/z, [x0, z0.s, sxtw #1]
* ld1h { z0.s }, p0/z, [x0, z0.s, uxtw #1]
Committed on behalf of Andrzej Warzynski (andwar)
Reviewers: sdesmalen, kmclaughlin, eli.friedman, rengolin, rovka, huntergr, dancgr, mgudim, efriedma
Reviewed By: sdesmalen
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D70782
2019-12-03 21:52:47 +08:00
|
|
|
|
|
|
|
; LD1D
|
2019-12-20 04:37:57 +08:00
|
|
|
declare <vscale x 2 x i64> @llvm.aarch64.sve.ld1.gather.sxtw.nxv2i64(<vscale x 2 x i1>, i64*, <vscale x 2 x i32>)
|
|
|
|
declare <vscale x 2 x i64> @llvm.aarch64.sve.ld1.gather.uxtw.nxv2i64(<vscale x 2 x i1>, i64*, <vscale x 2 x i32>)
|
[Aarch64][SVE] Add intrinsics for gather loads with 32-bits offsets
This patch adds intrinsics for SVE gather loads for which the offsets are 32-bits wide and are:
* unscaled
* @llvm.aarch64.sve.ld1.gather.sxtw
* @llvm.aarch64.sve.ld1.gather.uxtw
* scaled (offsets become indices)
* @llvm.arch64.sve.ld1.gather.sxtw.index
* @llvm.arch64.sve.ld1.gather.uxtw.index
The offsets are either zero (uxtw) or sign (sxtw) extended to 64 bits.
These intrinsics map 1-1 to the corresponding SVE instructions (examples for half-words):
* unscaled
* ld1h { z0.s }, p0/z, [x0, z0.s, sxtw]
* ld1h { z0.s }, p0/z, [x0, z0.s, uxtw]
* scaled
* ld1h { z0.s }, p0/z, [x0, z0.s, sxtw #1]
* ld1h { z0.s }, p0/z, [x0, z0.s, uxtw #1]
Committed on behalf of Andrzej Warzynski (andwar)
Reviewers: sdesmalen, kmclaughlin, eli.friedman, rengolin, rovka, huntergr, dancgr, mgudim, efriedma
Reviewed By: sdesmalen
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D70782
2019-12-03 21:52:47 +08:00
|
|
|
|
2019-12-20 04:37:57 +08:00
|
|
|
declare <vscale x 2 x double> @llvm.aarch64.sve.ld1.gather.sxtw.nxv2f64(<vscale x 2 x i1>, double*, <vscale x 2 x i32>)
|
|
|
|
declare <vscale x 2 x double> @llvm.aarch64.sve.ld1.gather.uxtw.nxv2f64(<vscale x 2 x i1>, double*, <vscale x 2 x i32>)
|