2016-07-19 21:35:11 +08:00
|
|
|
; RUN: llc < %s -mtriple=aarch64-eabi -aarch64-neon-syntax=generic | FileCheck %s
|
[AArch64] Improve ISel using across lane addition reduction.
In vectorized add reduction code, the final "reduce" step is sub-optimal.
This change wll combine :
ext v1.16b, v0.16b, v0.16b, #8
add v0.4s, v1.4s, v0.4s
dup v1.4s, v0.s[1]
add v0.4s, v1.4s, v0.4s
into
addv s0, v0.4s
PR21371
http://reviews.llvm.org/D12325
Patch by Jun Bum Lim <junbuml@codeaurora.org>!
llvm-svn: 246790
2015-09-04 02:13:57 +08:00
|
|
|
|
2017-05-17 05:29:22 +08:00
|
|
|
; Function Attrs: nounwind readnone
|
2019-06-13 17:37:38 +08:00
|
|
|
declare i64 @llvm.experimental.vector.reduce.add.v2i64(<2 x i64>)
|
|
|
|
declare i32 @llvm.experimental.vector.reduce.add.v4i32(<4 x i32>)
|
|
|
|
declare i16 @llvm.experimental.vector.reduce.add.v8i16(<8 x i16>)
|
|
|
|
declare i8 @llvm.experimental.vector.reduce.add.v16i8(<16 x i8>)
|
2017-05-17 05:29:22 +08:00
|
|
|
|
Improve ISel using across lane min/max reduction
In vectorized integer min/max reduction code, the final "reduce" step
is sub-optimal. In AArch64, this change wll combine :
%svn0 = vector_shuffle %0, undef<2,3,u,u>
%smax0 = smax %0, svn0
%svn3 = vector_shuffle %smax0, undef<1,u,u,u>
%sc = setcc %smax0, %svn3, gt
%n0 = extract_vector_elt %sc, #0
%n1 = extract_vector_elt %smax0, #0
%n2 = extract_vector_elt $smax0, #1
%result = select %n0, %n1, n2
becomes :
%1 = smaxv %0
%result = extract_vector_elt %1, 0
This change extends r246790.
llvm-svn: 247575
2015-09-15 00:19:52 +08:00
|
|
|
define i8 @add_B(<16 x i8>* %arr) {
|
|
|
|
; CHECK-LABEL: add_B
|
[AArch64] Improve ISel using across lane addition reduction.
In vectorized add reduction code, the final "reduce" step is sub-optimal.
This change wll combine :
ext v1.16b, v0.16b, v0.16b, #8
add v0.4s, v1.4s, v0.4s
dup v1.4s, v0.s[1]
add v0.4s, v1.4s, v0.4s
into
addv s0, v0.4s
PR21371
http://reviews.llvm.org/D12325
Patch by Jun Bum Lim <junbuml@codeaurora.org>!
llvm-svn: 246790
2015-09-04 02:13:57 +08:00
|
|
|
; CHECK: addv {{b[0-9]+}}, {{v[0-9]+}}.16b
|
|
|
|
%bin.rdx = load <16 x i8>, <16 x i8>* %arr
|
2019-06-13 17:37:38 +08:00
|
|
|
%r = call i8 @llvm.experimental.vector.reduce.add.v16i8(<16 x i8> %bin.rdx)
|
[AArch64] Improve ISel using across lane addition reduction.
In vectorized add reduction code, the final "reduce" step is sub-optimal.
This change wll combine :
ext v1.16b, v0.16b, v0.16b, #8
add v0.4s, v1.4s, v0.4s
dup v1.4s, v0.s[1]
add v0.4s, v1.4s, v0.4s
into
addv s0, v0.4s
PR21371
http://reviews.llvm.org/D12325
Patch by Jun Bum Lim <junbuml@codeaurora.org>!
llvm-svn: 246790
2015-09-04 02:13:57 +08:00
|
|
|
ret i8 %r
|
|
|
|
}
|
|
|
|
|
Improve ISel using across lane min/max reduction
In vectorized integer min/max reduction code, the final "reduce" step
is sub-optimal. In AArch64, this change wll combine :
%svn0 = vector_shuffle %0, undef<2,3,u,u>
%smax0 = smax %0, svn0
%svn3 = vector_shuffle %smax0, undef<1,u,u,u>
%sc = setcc %smax0, %svn3, gt
%n0 = extract_vector_elt %sc, #0
%n1 = extract_vector_elt %smax0, #0
%n2 = extract_vector_elt $smax0, #1
%result = select %n0, %n1, n2
becomes :
%1 = smaxv %0
%result = extract_vector_elt %1, 0
This change extends r246790.
llvm-svn: 247575
2015-09-15 00:19:52 +08:00
|
|
|
define i16 @add_H(<8 x i16>* %arr) {
|
|
|
|
; CHECK-LABEL: add_H
|
[AArch64] Improve ISel using across lane addition reduction.
In vectorized add reduction code, the final "reduce" step is sub-optimal.
This change wll combine :
ext v1.16b, v0.16b, v0.16b, #8
add v0.4s, v1.4s, v0.4s
dup v1.4s, v0.s[1]
add v0.4s, v1.4s, v0.4s
into
addv s0, v0.4s
PR21371
http://reviews.llvm.org/D12325
Patch by Jun Bum Lim <junbuml@codeaurora.org>!
llvm-svn: 246790
2015-09-04 02:13:57 +08:00
|
|
|
; CHECK: addv {{h[0-9]+}}, {{v[0-9]+}}.8h
|
|
|
|
%bin.rdx = load <8 x i16>, <8 x i16>* %arr
|
2019-06-13 17:37:38 +08:00
|
|
|
%r = call i16 @llvm.experimental.vector.reduce.add.v8i16(<8 x i16> %bin.rdx)
|
[AArch64] Improve ISel using across lane addition reduction.
In vectorized add reduction code, the final "reduce" step is sub-optimal.
This change wll combine :
ext v1.16b, v0.16b, v0.16b, #8
add v0.4s, v1.4s, v0.4s
dup v1.4s, v0.s[1]
add v0.4s, v1.4s, v0.4s
into
addv s0, v0.4s
PR21371
http://reviews.llvm.org/D12325
Patch by Jun Bum Lim <junbuml@codeaurora.org>!
llvm-svn: 246790
2015-09-04 02:13:57 +08:00
|
|
|
ret i16 %r
|
|
|
|
}
|
|
|
|
|
Improve ISel using across lane min/max reduction
In vectorized integer min/max reduction code, the final "reduce" step
is sub-optimal. In AArch64, this change wll combine :
%svn0 = vector_shuffle %0, undef<2,3,u,u>
%smax0 = smax %0, svn0
%svn3 = vector_shuffle %smax0, undef<1,u,u,u>
%sc = setcc %smax0, %svn3, gt
%n0 = extract_vector_elt %sc, #0
%n1 = extract_vector_elt %smax0, #0
%n2 = extract_vector_elt $smax0, #1
%result = select %n0, %n1, n2
becomes :
%1 = smaxv %0
%result = extract_vector_elt %1, 0
This change extends r246790.
llvm-svn: 247575
2015-09-15 00:19:52 +08:00
|
|
|
define i32 @add_S( <4 x i32>* %arr) {
|
|
|
|
; CHECK-LABEL: add_S
|
[AArch64] Improve ISel using across lane addition reduction.
In vectorized add reduction code, the final "reduce" step is sub-optimal.
This change wll combine :
ext v1.16b, v0.16b, v0.16b, #8
add v0.4s, v1.4s, v0.4s
dup v1.4s, v0.s[1]
add v0.4s, v1.4s, v0.4s
into
addv s0, v0.4s
PR21371
http://reviews.llvm.org/D12325
Patch by Jun Bum Lim <junbuml@codeaurora.org>!
llvm-svn: 246790
2015-09-04 02:13:57 +08:00
|
|
|
; CHECK: addv {{s[0-9]+}}, {{v[0-9]+}}.4s
|
|
|
|
%bin.rdx = load <4 x i32>, <4 x i32>* %arr
|
2019-06-13 17:37:38 +08:00
|
|
|
%r = call i32 @llvm.experimental.vector.reduce.add.v4i32(<4 x i32> %bin.rdx)
|
[AArch64] Improve ISel using across lane addition reduction.
In vectorized add reduction code, the final "reduce" step is sub-optimal.
This change wll combine :
ext v1.16b, v0.16b, v0.16b, #8
add v0.4s, v1.4s, v0.4s
dup v1.4s, v0.s[1]
add v0.4s, v1.4s, v0.4s
into
addv s0, v0.4s
PR21371
http://reviews.llvm.org/D12325
Patch by Jun Bum Lim <junbuml@codeaurora.org>!
llvm-svn: 246790
2015-09-04 02:13:57 +08:00
|
|
|
ret i32 %r
|
|
|
|
}
|
|
|
|
|
Improve ISel using across lane min/max reduction
In vectorized integer min/max reduction code, the final "reduce" step
is sub-optimal. In AArch64, this change wll combine :
%svn0 = vector_shuffle %0, undef<2,3,u,u>
%smax0 = smax %0, svn0
%svn3 = vector_shuffle %smax0, undef<1,u,u,u>
%sc = setcc %smax0, %svn3, gt
%n0 = extract_vector_elt %sc, #0
%n1 = extract_vector_elt %smax0, #0
%n2 = extract_vector_elt $smax0, #1
%result = select %n0, %n1, n2
becomes :
%1 = smaxv %0
%result = extract_vector_elt %1, 0
This change extends r246790.
llvm-svn: 247575
2015-09-15 00:19:52 +08:00
|
|
|
define i64 @add_D(<2 x i64>* %arr) {
|
|
|
|
; CHECK-LABEL: add_D
|
[AArch64] Improve ISel using across lane addition reduction.
In vectorized add reduction code, the final "reduce" step is sub-optimal.
This change wll combine :
ext v1.16b, v0.16b, v0.16b, #8
add v0.4s, v1.4s, v0.4s
dup v1.4s, v0.s[1]
add v0.4s, v1.4s, v0.4s
into
addv s0, v0.4s
PR21371
http://reviews.llvm.org/D12325
Patch by Jun Bum Lim <junbuml@codeaurora.org>!
llvm-svn: 246790
2015-09-04 02:13:57 +08:00
|
|
|
; CHECK-NOT: addv
|
|
|
|
%bin.rdx = load <2 x i64>, <2 x i64>* %arr
|
2019-06-13 17:37:38 +08:00
|
|
|
%r = call i64 @llvm.experimental.vector.reduce.add.v2i64(<2 x i64> %bin.rdx)
|
[AArch64] Improve ISel using across lane addition reduction.
In vectorized add reduction code, the final "reduce" step is sub-optimal.
This change wll combine :
ext v1.16b, v0.16b, v0.16b, #8
add v0.4s, v1.4s, v0.4s
dup v1.4s, v0.s[1]
add v0.4s, v1.4s, v0.4s
into
addv s0, v0.4s
PR21371
http://reviews.llvm.org/D12325
Patch by Jun Bum Lim <junbuml@codeaurora.org>!
llvm-svn: 246790
2015-09-04 02:13:57 +08:00
|
|
|
ret i64 %r
|
|
|
|
}
|
2015-10-16 23:38:25 +08:00
|
|
|
|
2019-06-13 17:37:38 +08:00
|
|
|
declare i32 @llvm.experimental.vector.reduce.add.v8i32(<8 x i32>)
|
2017-05-17 05:29:22 +08:00
|
|
|
|
2015-10-16 23:38:25 +08:00
|
|
|
define i32 @oversized_ADDV_256(i8* noalias nocapture readonly %arg1, i8* noalias nocapture readonly %arg2) {
|
|
|
|
; CHECK-LABEL: oversized_ADDV_256
|
|
|
|
; CHECK: addv {{s[0-9]+}}, {{v[0-9]+}}.4s
|
|
|
|
entry:
|
|
|
|
%0 = bitcast i8* %arg1 to <8 x i8>*
|
|
|
|
%1 = load <8 x i8>, <8 x i8>* %0, align 1
|
|
|
|
%2 = zext <8 x i8> %1 to <8 x i32>
|
|
|
|
%3 = bitcast i8* %arg2 to <8 x i8>*
|
|
|
|
%4 = load <8 x i8>, <8 x i8>* %3, align 1
|
|
|
|
%5 = zext <8 x i8> %4 to <8 x i32>
|
|
|
|
%6 = sub nsw <8 x i32> %2, %5
|
|
|
|
%7 = icmp slt <8 x i32> %6, zeroinitializer
|
|
|
|
%8 = sub nsw <8 x i32> zeroinitializer, %6
|
|
|
|
%9 = select <8 x i1> %7, <8 x i32> %8, <8 x i32> %6
|
2019-06-13 17:37:38 +08:00
|
|
|
%r = call i32 @llvm.experimental.vector.reduce.add.v8i32(<8 x i32> %9)
|
2017-05-17 05:29:22 +08:00
|
|
|
ret i32 %r
|
2015-10-16 23:38:25 +08:00
|
|
|
}
|
|
|
|
|
2019-06-13 17:37:38 +08:00
|
|
|
declare i32 @llvm.experimental.vector.reduce.add.v16i32(<16 x i32>)
|
2017-05-17 05:29:22 +08:00
|
|
|
|
2015-10-16 23:38:25 +08:00
|
|
|
define i32 @oversized_ADDV_512(<16 x i32>* %arr) {
|
|
|
|
; CHECK-LABEL: oversized_ADDV_512
|
|
|
|
; CHECK: addv {{s[0-9]+}}, {{v[0-9]+}}.4s
|
|
|
|
%bin.rdx = load <16 x i32>, <16 x i32>* %arr
|
2019-06-13 17:37:38 +08:00
|
|
|
%r = call i32 @llvm.experimental.vector.reduce.add.v16i32(<16 x i32> %bin.rdx)
|
2015-10-16 23:38:25 +08:00
|
|
|
ret i32 %r
|
|
|
|
}
|