2015-09-04 09:39:24 +08:00
|
|
|
; RUN: llc -march=aarch64 -aarch64-neon-syntax=generic < %s | FileCheck %s
|
[AArch64] Improve ISel using across lane addition reduction.
In vectorized add reduction code, the final "reduce" step is sub-optimal.
This change wll combine :
ext v1.16b, v0.16b, v0.16b, #8
add v0.4s, v1.4s, v0.4s
dup v1.4s, v0.s[1]
add v0.4s, v1.4s, v0.4s
into
addv s0, v0.4s
PR21371
http://reviews.llvm.org/D12325
Patch by Jun Bum Lim <junbuml@codeaurora.org>!
llvm-svn: 246790
2015-09-04 02:13:57 +08:00
|
|
|
|
Improve ISel using across lane min/max reduction
In vectorized integer min/max reduction code, the final "reduce" step
is sub-optimal. In AArch64, this change wll combine :
%svn0 = vector_shuffle %0, undef<2,3,u,u>
%smax0 = smax %0, svn0
%svn3 = vector_shuffle %smax0, undef<1,u,u,u>
%sc = setcc %smax0, %svn3, gt
%n0 = extract_vector_elt %sc, #0
%n1 = extract_vector_elt %smax0, #0
%n2 = extract_vector_elt $smax0, #1
%result = select %n0, %n1, n2
becomes :
%1 = smaxv %0
%result = extract_vector_elt %1, 0
This change extends r246790.
llvm-svn: 247575
2015-09-15 00:19:52 +08:00
|
|
|
define i8 @add_B(<16 x i8>* %arr) {
|
|
|
|
; CHECK-LABEL: add_B
|
[AArch64] Improve ISel using across lane addition reduction.
In vectorized add reduction code, the final "reduce" step is sub-optimal.
This change wll combine :
ext v1.16b, v0.16b, v0.16b, #8
add v0.4s, v1.4s, v0.4s
dup v1.4s, v0.s[1]
add v0.4s, v1.4s, v0.4s
into
addv s0, v0.4s
PR21371
http://reviews.llvm.org/D12325
Patch by Jun Bum Lim <junbuml@codeaurora.org>!
llvm-svn: 246790
2015-09-04 02:13:57 +08:00
|
|
|
; CHECK: addv {{b[0-9]+}}, {{v[0-9]+}}.16b
|
|
|
|
%bin.rdx = load <16 x i8>, <16 x i8>* %arr
|
|
|
|
%rdx.shuf0 = shufflevector <16 x i8> %bin.rdx, <16 x i8> undef, <16 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 undef, i32 undef,i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
|
|
|
|
%bin.rdx0 = add <16 x i8> %bin.rdx, %rdx.shuf0
|
|
|
|
%rdx.shuf = shufflevector <16 x i8> %bin.rdx0, <16 x i8> undef, <16 x i32> <i32 4, i32 5, i32 6, i32 7, i32 undef, i32 undef,i32 undef, i32 undef, i32 undef, i32 undef,i32 undef, i32 undef, i32 undef, i32 undef,i32 undef, i32 undef >
|
|
|
|
%bin.rdx11 = add <16 x i8> %bin.rdx0, %rdx.shuf
|
|
|
|
%rdx.shuf12 = shufflevector <16 x i8> %bin.rdx11, <16 x i8> undef, <16 x i32> <i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef,i32 undef, i32 undef, i32 undef, i32 undef,i32 undef, i32 undef>
|
|
|
|
%bin.rdx13 = add <16 x i8> %bin.rdx11, %rdx.shuf12
|
|
|
|
%rdx.shuf13 = shufflevector <16 x i8> %bin.rdx13, <16 x i8> undef, <16 x i32> <i32 1, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef,i32 undef, i32 undef, i32 undef, i32 undef,i32 undef, i32 undef>
|
|
|
|
%bin.rdx14 = add <16 x i8> %bin.rdx13, %rdx.shuf13
|
|
|
|
%r = extractelement <16 x i8> %bin.rdx14, i32 0
|
|
|
|
ret i8 %r
|
|
|
|
}
|
|
|
|
|
Improve ISel using across lane min/max reduction
In vectorized integer min/max reduction code, the final "reduce" step
is sub-optimal. In AArch64, this change wll combine :
%svn0 = vector_shuffle %0, undef<2,3,u,u>
%smax0 = smax %0, svn0
%svn3 = vector_shuffle %smax0, undef<1,u,u,u>
%sc = setcc %smax0, %svn3, gt
%n0 = extract_vector_elt %sc, #0
%n1 = extract_vector_elt %smax0, #0
%n2 = extract_vector_elt $smax0, #1
%result = select %n0, %n1, n2
becomes :
%1 = smaxv %0
%result = extract_vector_elt %1, 0
This change extends r246790.
llvm-svn: 247575
2015-09-15 00:19:52 +08:00
|
|
|
define i16 @add_H(<8 x i16>* %arr) {
|
|
|
|
; CHECK-LABEL: add_H
|
[AArch64] Improve ISel using across lane addition reduction.
In vectorized add reduction code, the final "reduce" step is sub-optimal.
This change wll combine :
ext v1.16b, v0.16b, v0.16b, #8
add v0.4s, v1.4s, v0.4s
dup v1.4s, v0.s[1]
add v0.4s, v1.4s, v0.4s
into
addv s0, v0.4s
PR21371
http://reviews.llvm.org/D12325
Patch by Jun Bum Lim <junbuml@codeaurora.org>!
llvm-svn: 246790
2015-09-04 02:13:57 +08:00
|
|
|
; CHECK: addv {{h[0-9]+}}, {{v[0-9]+}}.8h
|
|
|
|
%bin.rdx = load <8 x i16>, <8 x i16>* %arr
|
|
|
|
%rdx.shuf = shufflevector <8 x i16> %bin.rdx, <8 x i16> undef, <8 x i32> <i32 4, i32 5, i32 6, i32 7, i32 undef, i32 undef,i32 undef, i32 undef>
|
|
|
|
%bin.rdx11 = add <8 x i16> %bin.rdx, %rdx.shuf
|
|
|
|
%rdx.shuf12 = shufflevector <8 x i16> %bin.rdx11, <8 x i16> undef, <8 x i32> <i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
|
|
|
|
%bin.rdx13 = add <8 x i16> %bin.rdx11, %rdx.shuf12
|
|
|
|
%rdx.shuf13 = shufflevector <8 x i16> %bin.rdx13, <8 x i16> undef, <8 x i32> <i32 1, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
|
|
|
|
%bin.rdx14 = add <8 x i16> %bin.rdx13, %rdx.shuf13
|
|
|
|
%r = extractelement <8 x i16> %bin.rdx14, i32 0
|
|
|
|
ret i16 %r
|
|
|
|
}
|
|
|
|
|
Improve ISel using across lane min/max reduction
In vectorized integer min/max reduction code, the final "reduce" step
is sub-optimal. In AArch64, this change wll combine :
%svn0 = vector_shuffle %0, undef<2,3,u,u>
%smax0 = smax %0, svn0
%svn3 = vector_shuffle %smax0, undef<1,u,u,u>
%sc = setcc %smax0, %svn3, gt
%n0 = extract_vector_elt %sc, #0
%n1 = extract_vector_elt %smax0, #0
%n2 = extract_vector_elt $smax0, #1
%result = select %n0, %n1, n2
becomes :
%1 = smaxv %0
%result = extract_vector_elt %1, 0
This change extends r246790.
llvm-svn: 247575
2015-09-15 00:19:52 +08:00
|
|
|
define i32 @add_S( <4 x i32>* %arr) {
|
|
|
|
; CHECK-LABEL: add_S
|
[AArch64] Improve ISel using across lane addition reduction.
In vectorized add reduction code, the final "reduce" step is sub-optimal.
This change wll combine :
ext v1.16b, v0.16b, v0.16b, #8
add v0.4s, v1.4s, v0.4s
dup v1.4s, v0.s[1]
add v0.4s, v1.4s, v0.4s
into
addv s0, v0.4s
PR21371
http://reviews.llvm.org/D12325
Patch by Jun Bum Lim <junbuml@codeaurora.org>!
llvm-svn: 246790
2015-09-04 02:13:57 +08:00
|
|
|
; CHECK: addv {{s[0-9]+}}, {{v[0-9]+}}.4s
|
|
|
|
%bin.rdx = load <4 x i32>, <4 x i32>* %arr
|
|
|
|
%rdx.shuf = shufflevector <4 x i32> %bin.rdx, <4 x i32> undef, <4 x i32> <i32 2, i32 3, i32 undef, i32 undef>
|
|
|
|
%bin.rdx11 = add <4 x i32> %bin.rdx, %rdx.shuf
|
|
|
|
%rdx.shuf12 = shufflevector <4 x i32> %bin.rdx11, <4 x i32> undef, <4 x i32> <i32 1, i32 undef, i32 undef, i32 undef>
|
|
|
|
%bin.rdx13 = add <4 x i32> %bin.rdx11, %rdx.shuf12
|
|
|
|
%r = extractelement <4 x i32> %bin.rdx13, i32 0
|
|
|
|
ret i32 %r
|
|
|
|
}
|
|
|
|
|
Improve ISel using across lane min/max reduction
In vectorized integer min/max reduction code, the final "reduce" step
is sub-optimal. In AArch64, this change wll combine :
%svn0 = vector_shuffle %0, undef<2,3,u,u>
%smax0 = smax %0, svn0
%svn3 = vector_shuffle %smax0, undef<1,u,u,u>
%sc = setcc %smax0, %svn3, gt
%n0 = extract_vector_elt %sc, #0
%n1 = extract_vector_elt %smax0, #0
%n2 = extract_vector_elt $smax0, #1
%result = select %n0, %n1, n2
becomes :
%1 = smaxv %0
%result = extract_vector_elt %1, 0
This change extends r246790.
llvm-svn: 247575
2015-09-15 00:19:52 +08:00
|
|
|
define i64 @add_D(<2 x i64>* %arr) {
|
|
|
|
; CHECK-LABEL: add_D
|
[AArch64] Improve ISel using across lane addition reduction.
In vectorized add reduction code, the final "reduce" step is sub-optimal.
This change wll combine :
ext v1.16b, v0.16b, v0.16b, #8
add v0.4s, v1.4s, v0.4s
dup v1.4s, v0.s[1]
add v0.4s, v1.4s, v0.4s
into
addv s0, v0.4s
PR21371
http://reviews.llvm.org/D12325
Patch by Jun Bum Lim <junbuml@codeaurora.org>!
llvm-svn: 246790
2015-09-04 02:13:57 +08:00
|
|
|
; CHECK-NOT: addv
|
|
|
|
%bin.rdx = load <2 x i64>, <2 x i64>* %arr
|
|
|
|
%rdx.shuf0 = shufflevector <2 x i64> %bin.rdx, <2 x i64> undef, <2 x i32> <i32 1, i32 undef>
|
|
|
|
%bin.rdx0 = add <2 x i64> %bin.rdx, %rdx.shuf0
|
|
|
|
%r = extractelement <2 x i64> %bin.rdx0, i32 0
|
|
|
|
ret i64 %r
|
|
|
|
}
|