2016-10-19 01:31:07 +08:00
|
|
|
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
|
2018-02-09 03:23:47 +08:00
|
|
|
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefixes=CHECK,SSE
|
|
|
|
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefixes=CHECK,AVX,AVX2-SLOW
|
|
|
|
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2,+fast-variable-shuffle | FileCheck %s --check-prefixes=CHECK,AVX,AVX2-FAST
|
2016-10-19 01:31:07 +08:00
|
|
|
|
|
|
|
; fold (sra 0, x) -> 0
|
|
|
|
define <4 x i32> @combine_vec_ashr_zero(<4 x i32> %x) {
|
|
|
|
; SSE-LABEL: combine_vec_ashr_zero:
|
2017-12-05 01:18:51 +08:00
|
|
|
; SSE: # %bb.0:
|
2016-10-19 01:31:07 +08:00
|
|
|
; SSE-NEXT: xorps %xmm0, %xmm0
|
|
|
|
; SSE-NEXT: retq
|
|
|
|
;
|
|
|
|
; AVX-LABEL: combine_vec_ashr_zero:
|
2017-12-05 01:18:51 +08:00
|
|
|
; AVX: # %bb.0:
|
2016-10-19 01:31:07 +08:00
|
|
|
; AVX-NEXT: vxorps %xmm0, %xmm0, %xmm0
|
|
|
|
; AVX-NEXT: retq
|
|
|
|
%1 = ashr <4 x i32> zeroinitializer, %x
|
|
|
|
ret <4 x i32> %1
|
|
|
|
}
|
|
|
|
|
|
|
|
; fold (sra -1, x) -> -1
|
|
|
|
define <4 x i32> @combine_vec_ashr_allones(<4 x i32> %x) {
|
|
|
|
; SSE-LABEL: combine_vec_ashr_allones:
|
2017-12-05 01:18:51 +08:00
|
|
|
; SSE: # %bb.0:
|
2016-10-19 01:31:07 +08:00
|
|
|
; SSE-NEXT: pcmpeqd %xmm0, %xmm0
|
|
|
|
; SSE-NEXT: retq
|
|
|
|
;
|
|
|
|
; AVX-LABEL: combine_vec_ashr_allones:
|
2017-12-05 01:18:51 +08:00
|
|
|
; AVX: # %bb.0:
|
2016-10-19 01:31:07 +08:00
|
|
|
; AVX-NEXT: vpcmpeqd %xmm0, %xmm0, %xmm0
|
|
|
|
; AVX-NEXT: retq
|
|
|
|
%1 = ashr <4 x i32> <i32 -1, i32 -1, i32 -1, i32 -1>, %x
|
|
|
|
ret <4 x i32> %1
|
|
|
|
}
|
|
|
|
|
|
|
|
; fold (sra x, c >= size(x)) -> undef
|
|
|
|
define <4 x i32> @combine_vec_ashr_outofrange0(<4 x i32> %x) {
|
2018-02-09 03:23:47 +08:00
|
|
|
; CHECK-LABEL: combine_vec_ashr_outofrange0:
|
|
|
|
; CHECK: # %bb.0:
|
|
|
|
; CHECK-NEXT: retq
|
2016-10-19 01:31:07 +08:00
|
|
|
%1 = ashr <4 x i32> %x, <i32 33, i32 33, i32 33, i32 33>
|
|
|
|
ret <4 x i32> %1
|
|
|
|
}
|
|
|
|
|
|
|
|
define <4 x i32> @combine_vec_ashr_outofrange1(<4 x i32> %x) {
|
2018-02-09 03:23:47 +08:00
|
|
|
; CHECK-LABEL: combine_vec_ashr_outofrange1:
|
|
|
|
; CHECK: # %bb.0:
|
|
|
|
; CHECK-NEXT: retq
|
2016-10-19 01:31:07 +08:00
|
|
|
%1 = ashr <4 x i32> %x, <i32 33, i32 34, i32 35, i32 36>
|
|
|
|
ret <4 x i32> %1
|
|
|
|
}
|
|
|
|
|
2018-12-18 21:37:04 +08:00
|
|
|
define <4 x i32> @combine_vec_ashr_outofrange2(<4 x i32> %x) {
|
2018-12-19 18:41:06 +08:00
|
|
|
; CHECK-LABEL: combine_vec_ashr_outofrange2:
|
|
|
|
; CHECK: # %bb.0:
|
|
|
|
; CHECK-NEXT: retq
|
2018-12-18 21:37:04 +08:00
|
|
|
%1 = ashr <4 x i32> %x, <i32 33, i32 34, i32 35, i32 undef>
|
|
|
|
ret <4 x i32> %1
|
|
|
|
}
|
|
|
|
|
2016-10-19 01:31:07 +08:00
|
|
|
; fold (sra x, 0) -> x
|
|
|
|
define <4 x i32> @combine_vec_ashr_by_zero(<4 x i32> %x) {
|
2018-02-09 03:23:47 +08:00
|
|
|
; CHECK-LABEL: combine_vec_ashr_by_zero:
|
|
|
|
; CHECK: # %bb.0:
|
|
|
|
; CHECK-NEXT: retq
|
2016-10-19 01:31:07 +08:00
|
|
|
%1 = ashr <4 x i32> %x, zeroinitializer
|
|
|
|
ret <4 x i32> %1
|
|
|
|
}
|
|
|
|
|
|
|
|
; fold (sra (sra x, c1), c2) -> (sra x, (add c1, c2))
|
|
|
|
define <4 x i32> @combine_vec_ashr_ashr0(<4 x i32> %x) {
|
|
|
|
; SSE-LABEL: combine_vec_ashr_ashr0:
|
2017-12-05 01:18:51 +08:00
|
|
|
; SSE: # %bb.0:
|
2016-10-19 01:31:07 +08:00
|
|
|
; SSE-NEXT: psrad $6, %xmm0
|
|
|
|
; SSE-NEXT: retq
|
|
|
|
;
|
|
|
|
; AVX-LABEL: combine_vec_ashr_ashr0:
|
2017-12-05 01:18:51 +08:00
|
|
|
; AVX: # %bb.0:
|
2016-10-19 01:31:07 +08:00
|
|
|
; AVX-NEXT: vpsrad $6, %xmm0, %xmm0
|
|
|
|
; AVX-NEXT: retq
|
|
|
|
%1 = ashr <4 x i32> %x, <i32 2, i32 2, i32 2, i32 2>
|
|
|
|
%2 = ashr <4 x i32> %1, <i32 4, i32 4, i32 4, i32 4>
|
|
|
|
ret <4 x i32> %2
|
|
|
|
}
|
|
|
|
|
|
|
|
define <4 x i32> @combine_vec_ashr_ashr1(<4 x i32> %x) {
|
|
|
|
; SSE-LABEL: combine_vec_ashr_ashr1:
|
2017-12-05 01:18:51 +08:00
|
|
|
; SSE: # %bb.0:
|
2017-07-20 18:43:05 +08:00
|
|
|
; SSE-NEXT: movdqa %xmm0, %xmm1
|
|
|
|
; SSE-NEXT: psrad $10, %xmm1
|
2016-10-19 01:31:07 +08:00
|
|
|
; SSE-NEXT: movdqa %xmm0, %xmm2
|
2017-07-20 18:43:05 +08:00
|
|
|
; SSE-NEXT: psrad $6, %xmm2
|
|
|
|
; SSE-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm1[4,5,6,7]
|
2016-10-19 01:31:07 +08:00
|
|
|
; SSE-NEXT: movdqa %xmm0, %xmm1
|
2017-07-20 18:43:05 +08:00
|
|
|
; SSE-NEXT: psrad $8, %xmm1
|
|
|
|
; SSE-NEXT: psrad $4, %xmm0
|
|
|
|
; SSE-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7]
|
|
|
|
; SSE-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3],xmm0[4,5],xmm2[6,7]
|
2016-10-19 01:31:07 +08:00
|
|
|
; SSE-NEXT: retq
|
|
|
|
;
|
|
|
|
; AVX-LABEL: combine_vec_ashr_ashr1:
|
2017-12-05 01:18:51 +08:00
|
|
|
; AVX: # %bb.0:
|
2016-10-19 01:31:07 +08:00
|
|
|
; AVX-NEXT: vpsravd {{.*}}(%rip), %xmm0, %xmm0
|
|
|
|
; AVX-NEXT: retq
|
|
|
|
%1 = ashr <4 x i32> %x, <i32 0, i32 1, i32 2, i32 3>
|
|
|
|
%2 = ashr <4 x i32> %1, <i32 4, i32 5, i32 6, i32 7>
|
|
|
|
ret <4 x i32> %2
|
|
|
|
}
|
|
|
|
|
|
|
|
define <4 x i32> @combine_vec_ashr_ashr2(<4 x i32> %x) {
|
|
|
|
; SSE-LABEL: combine_vec_ashr_ashr2:
|
2017-12-05 01:18:51 +08:00
|
|
|
; SSE: # %bb.0:
|
2017-07-20 18:43:05 +08:00
|
|
|
; SSE-NEXT: psrad $31, %xmm0
|
2016-10-19 01:31:07 +08:00
|
|
|
; SSE-NEXT: retq
|
|
|
|
;
|
|
|
|
; AVX-LABEL: combine_vec_ashr_ashr2:
|
2017-12-05 01:18:51 +08:00
|
|
|
; AVX: # %bb.0:
|
2017-07-20 18:43:05 +08:00
|
|
|
; AVX-NEXT: vpsrad $31, %xmm0, %xmm0
|
2016-10-19 01:31:07 +08:00
|
|
|
; AVX-NEXT: retq
|
|
|
|
%1 = ashr <4 x i32> %x, <i32 17, i32 18, i32 19, i32 20>
|
|
|
|
%2 = ashr <4 x i32> %1, <i32 25, i32 26, i32 27, i32 28>
|
|
|
|
ret <4 x i32> %2
|
2017-07-21 18:22:49 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
define <4 x i32> @combine_vec_ashr_ashr3(<4 x i32> %x) {
|
|
|
|
; SSE-LABEL: combine_vec_ashr_ashr3:
|
2017-12-05 01:18:51 +08:00
|
|
|
; SSE: # %bb.0:
|
2017-07-21 18:22:49 +08:00
|
|
|
; SSE-NEXT: movdqa %xmm0, %xmm1
|
|
|
|
; SSE-NEXT: psrad $27, %xmm1
|
|
|
|
; SSE-NEXT: movdqa %xmm0, %xmm2
|
2018-08-17 18:52:49 +08:00
|
|
|
; SSE-NEXT: psrad $15, %xmm2
|
2017-07-21 18:22:49 +08:00
|
|
|
; SSE-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm1[4,5,6,7]
|
|
|
|
; SSE-NEXT: psrad $31, %xmm0
|
2018-08-17 18:52:49 +08:00
|
|
|
; SSE-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3],xmm0[4,5],xmm2[6,7]
|
2017-07-21 18:22:49 +08:00
|
|
|
; SSE-NEXT: retq
|
|
|
|
;
|
|
|
|
; AVX-LABEL: combine_vec_ashr_ashr3:
|
2017-12-05 01:18:51 +08:00
|
|
|
; AVX: # %bb.0:
|
2017-07-21 18:22:49 +08:00
|
|
|
; AVX-NEXT: vpsravd {{.*}}(%rip), %xmm0, %xmm0
|
|
|
|
; AVX-NEXT: retq
|
|
|
|
%1 = ashr <4 x i32> %x, <i32 1, i32 5, i32 50, i32 27>
|
|
|
|
%2 = ashr <4 x i32> %1, <i32 33, i32 10, i32 33, i32 0>
|
|
|
|
ret <4 x i32> %2
|
2016-10-19 01:31:07 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
; fold (sra x, (trunc (and y, c))) -> (sra x, (and (trunc y), (trunc c))).
|
|
|
|
define <4 x i32> @combine_vec_ashr_trunc_and(<4 x i32> %x, <4 x i64> %y) {
|
|
|
|
; SSE-LABEL: combine_vec_ashr_trunc_and:
|
2017-12-05 01:18:51 +08:00
|
|
|
; SSE: # %bb.0:
|
[x86] use a single shufps when it can save instructions
This is a tiny patch with a big pile of test changes.
This partially fixes PR27885:
https://llvm.org/bugs/show_bug.cgi?id=27885
My motivating case looks like this:
- vpshufd {{.*#+}} xmm1 = xmm1[0,1,0,2]
- vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
- vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7]
+ vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
And this happens several times in the diffs. For chips with domain-crossing penalties,
the instruction count and size reduction should usually overcome any potential
domain-crossing penalty due to using an FP op in a sequence of int ops. For chips such
as recent Intel big cores and Atom, there is no domain-crossing penalty for shufps, so
using shufps is a pure win.
So the test case diffs all appear to be improvements except one test in
vector-shuffle-combining.ll where we miss an opportunity to use a shift to generate
zero elements and one test in combine-sra.ll where multiple uses prevent the expected
shuffle combining.
Differential Revision: https://reviews.llvm.org/D27692
llvm-svn: 289837
2016-12-16 02:03:38 +08:00
|
|
|
; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,2],xmm2[0,2]
|
|
|
|
; SSE-NEXT: andps {{.*}}(%rip), %xmm1
|
2018-05-17 04:52:52 +08:00
|
|
|
; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm1[2,3,3,3,4,5,6,7]
|
2016-10-19 01:31:07 +08:00
|
|
|
; SSE-NEXT: movdqa %xmm0, %xmm3
|
|
|
|
; SSE-NEXT: psrad %xmm2, %xmm3
|
2020-07-26 23:03:53 +08:00
|
|
|
; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm1[2,3,2,3]
|
2018-05-17 04:52:52 +08:00
|
|
|
; SSE-NEXT: pshuflw {{.*#+}} xmm4 = xmm2[2,3,3,3,4,5,6,7]
|
|
|
|
; SSE-NEXT: movdqa %xmm0, %xmm5
|
|
|
|
; SSE-NEXT: psrad %xmm4, %xmm5
|
|
|
|
; SSE-NEXT: pblendw {{.*#+}} xmm5 = xmm3[0,1,2,3],xmm5[4,5,6,7]
|
|
|
|
; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,1,1,1,4,5,6,7]
|
|
|
|
; SSE-NEXT: movdqa %xmm0, %xmm3
|
|
|
|
; SSE-NEXT: psrad %xmm1, %xmm3
|
|
|
|
; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm2[0,1,1,1,4,5,6,7]
|
|
|
|
; SSE-NEXT: psrad %xmm1, %xmm0
|
|
|
|
; SSE-NEXT: pblendw {{.*#+}} xmm0 = xmm3[0,1,2,3],xmm0[4,5,6,7]
|
|
|
|
; SSE-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm5[2,3],xmm0[4,5],xmm5[6,7]
|
2016-10-19 01:31:07 +08:00
|
|
|
; SSE-NEXT: retq
|
|
|
|
;
|
2018-01-10 00:26:06 +08:00
|
|
|
; AVX2-SLOW-LABEL: combine_vec_ashr_trunc_and:
|
|
|
|
; AVX2-SLOW: # %bb.0:
|
2019-02-19 00:46:12 +08:00
|
|
|
; AVX2-SLOW-NEXT: vextractf128 $1, %ymm1, %xmm2
|
|
|
|
; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm1 = xmm1[0,2],xmm2[0,2]
|
|
|
|
; AVX2-SLOW-NEXT: vandps {{.*}}(%rip), %xmm1, %xmm1
|
2018-01-10 00:26:06 +08:00
|
|
|
; AVX2-SLOW-NEXT: vpsravd %xmm1, %xmm0, %xmm0
|
|
|
|
; AVX2-SLOW-NEXT: vzeroupper
|
|
|
|
; AVX2-SLOW-NEXT: retq
|
|
|
|
;
|
|
|
|
; AVX2-FAST-LABEL: combine_vec_ashr_trunc_and:
|
|
|
|
; AVX2-FAST: # %bb.0:
|
2020-06-21 18:16:07 +08:00
|
|
|
; AVX2-FAST-NEXT: vmovdqa {{.*#+}} ymm2 = <0,2,4,6,u,u,u,u>
|
2018-01-10 00:26:06 +08:00
|
|
|
; AVX2-FAST-NEXT: vpermd %ymm1, %ymm2, %ymm1
|
|
|
|
; AVX2-FAST-NEXT: vpand {{.*}}(%rip), %xmm1, %xmm1
|
|
|
|
; AVX2-FAST-NEXT: vpsravd %xmm1, %xmm0, %xmm0
|
|
|
|
; AVX2-FAST-NEXT: vzeroupper
|
|
|
|
; AVX2-FAST-NEXT: retq
|
2016-10-19 01:31:07 +08:00
|
|
|
%1 = and <4 x i64> %y, <i64 15, i64 255, i64 4095, i64 65535>
|
|
|
|
%2 = trunc <4 x i64> %1 to <4 x i32>
|
|
|
|
%3 = ashr <4 x i32> %x, %2
|
|
|
|
ret <4 x i32> %3
|
|
|
|
}
|
|
|
|
|
|
|
|
; fold (sra (trunc (srl x, c1)), c2) -> (trunc (sra x, c1 + c2))
|
|
|
|
; if c1 is equal to the number of bits the trunc removes
|
|
|
|
define <4 x i32> @combine_vec_ashr_trunc_lshr(<4 x i64> %x) {
|
|
|
|
; SSE-LABEL: combine_vec_ashr_trunc_lshr:
|
2017-12-05 01:18:51 +08:00
|
|
|
; SSE: # %bb.0:
|
2018-02-16 22:57:25 +08:00
|
|
|
; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,3],xmm1[1,3]
|
[x86] use a single shufps when it can save instructions
This is a tiny patch with a big pile of test changes.
This partially fixes PR27885:
https://llvm.org/bugs/show_bug.cgi?id=27885
My motivating case looks like this:
- vpshufd {{.*#+}} xmm1 = xmm1[0,1,0,2]
- vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
- vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7]
+ vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
And this happens several times in the diffs. For chips with domain-crossing penalties,
the instruction count and size reduction should usually overcome any potential
domain-crossing penalty due to using an FP op in a sequence of int ops. For chips such
as recent Intel big cores and Atom, there is no domain-crossing penalty for shufps, so
using shufps is a pure win.
So the test case diffs all appear to be improvements except one test in
vector-shuffle-combining.ll where we miss an opportunity to use a shift to generate
zero elements and one test in combine-sra.ll where multiple uses prevent the expected
shuffle combining.
Differential Revision: https://reviews.llvm.org/D27692
llvm-svn: 289837
2016-12-16 02:03:38 +08:00
|
|
|
; SSE-NEXT: movaps %xmm0, %xmm2
|
2020-07-23 21:09:31 +08:00
|
|
|
; SSE-NEXT: psrad $2, %xmm2
|
|
|
|
; SSE-NEXT: pblendw {{.*#+}} xmm2 = xmm0[0,1,2,3],xmm2[4,5,6,7]
|
|
|
|
; SSE-NEXT: psrad $1, %xmm0
|
|
|
|
; SSE-NEXT: psrad $3, %xmm1
|
2018-01-16 06:18:45 +08:00
|
|
|
; SSE-NEXT: pblendw {{.*#+}} xmm1 = xmm0[0,1,2,3],xmm1[4,5,6,7]
|
2020-07-23 21:09:31 +08:00
|
|
|
; SSE-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1],xmm1[2,3],xmm2[4,5],xmm1[6,7]
|
|
|
|
; SSE-NEXT: movdqa %xmm2, %xmm0
|
2016-10-19 01:31:07 +08:00
|
|
|
; SSE-NEXT: retq
|
|
|
|
;
|
2018-01-10 00:26:06 +08:00
|
|
|
; AVX2-SLOW-LABEL: combine_vec_ashr_trunc_lshr:
|
|
|
|
; AVX2-SLOW: # %bb.0:
|
2021-01-20 22:19:18 +08:00
|
|
|
; AVX2-SLOW-NEXT: vextractf128 $1, %ymm0, %xmm1
|
|
|
|
; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm0 = xmm0[1,3],xmm1[1,3]
|
2018-01-10 00:26:06 +08:00
|
|
|
; AVX2-SLOW-NEXT: vpsravd {{.*}}(%rip), %xmm0, %xmm0
|
|
|
|
; AVX2-SLOW-NEXT: vzeroupper
|
|
|
|
; AVX2-SLOW-NEXT: retq
|
|
|
|
;
|
|
|
|
; AVX2-FAST-LABEL: combine_vec_ashr_trunc_lshr:
|
|
|
|
; AVX2-FAST: # %bb.0:
|
2020-06-21 18:16:07 +08:00
|
|
|
; AVX2-FAST-NEXT: vmovdqa {{.*#+}} xmm1 = [1,3,5,7]
|
2018-01-10 00:26:06 +08:00
|
|
|
; AVX2-FAST-NEXT: vpermd %ymm0, %ymm1, %ymm0
|
|
|
|
; AVX2-FAST-NEXT: vpsravd {{.*}}(%rip), %xmm0, %xmm0
|
|
|
|
; AVX2-FAST-NEXT: vzeroupper
|
|
|
|
; AVX2-FAST-NEXT: retq
|
2016-10-19 01:31:07 +08:00
|
|
|
%1 = lshr <4 x i64> %x, <i64 32, i64 32, i64 32, i64 32>
|
|
|
|
%2 = trunc <4 x i64> %1 to <4 x i32>
|
|
|
|
%3 = ashr <4 x i32> %2, <i32 0, i32 1, i32 2, i32 3>
|
|
|
|
ret <4 x i32> %3
|
|
|
|
}
|
|
|
|
|
2021-02-18 02:17:50 +08:00
|
|
|
define <16 x i8> @combine_vec_ashr_trunc_lshr_splat(<16 x i32> %x) {
|
|
|
|
; SSE-LABEL: combine_vec_ashr_trunc_lshr_splat:
|
|
|
|
; SSE: # %bb.0:
|
|
|
|
; SSE-NEXT: psrad $26, %xmm3
|
|
|
|
; SSE-NEXT: psrad $26, %xmm2
|
|
|
|
; SSE-NEXT: packssdw %xmm3, %xmm2
|
|
|
|
; SSE-NEXT: psrad $26, %xmm1
|
|
|
|
; SSE-NEXT: psrad $26, %xmm0
|
|
|
|
; SSE-NEXT: packssdw %xmm1, %xmm0
|
|
|
|
; SSE-NEXT: packsswb %xmm2, %xmm0
|
|
|
|
; SSE-NEXT: retq
|
|
|
|
;
|
|
|
|
; AVX-LABEL: combine_vec_ashr_trunc_lshr_splat:
|
|
|
|
; AVX: # %bb.0:
|
|
|
|
; AVX-NEXT: vpsrad $26, %ymm1, %ymm1
|
|
|
|
; AVX-NEXT: vpsrad $26, %ymm0, %ymm0
|
|
|
|
; AVX-NEXT: vpackssdw %ymm1, %ymm0, %ymm0
|
|
|
|
; AVX-NEXT: vextracti128 $1, %ymm0, %xmm1
|
|
|
|
; AVX-NEXT: vpacksswb %xmm1, %xmm0, %xmm0
|
|
|
|
; AVX-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,1,3]
|
|
|
|
; AVX-NEXT: vzeroupper
|
|
|
|
; AVX-NEXT: retq
|
|
|
|
%1 = lshr <16 x i32> %x, <i32 24, i32 24, i32 24, i32 24, i32 24, i32 24, i32 24, i32 24, i32 24, i32 24, i32 24, i32 24, i32 24, i32 24, i32 24, i32 24>
|
|
|
|
%2 = trunc <16 x i32> %1 to <16 x i8>
|
|
|
|
%3 = ashr <16 x i8> %2, <i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2>
|
|
|
|
ret <16 x i8> %3
|
|
|
|
}
|
|
|
|
|
2016-10-19 01:31:07 +08:00
|
|
|
; fold (sra (trunc (sra x, c1)), c2) -> (trunc (sra x, c1 + c2))
|
|
|
|
; if c1 is equal to the number of bits the trunc removes
|
|
|
|
define <4 x i32> @combine_vec_ashr_trunc_ashr(<4 x i64> %x) {
|
|
|
|
; SSE-LABEL: combine_vec_ashr_trunc_ashr:
|
2017-12-05 01:18:51 +08:00
|
|
|
; SSE: # %bb.0:
|
2018-02-15 20:14:15 +08:00
|
|
|
; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,3],xmm1[1,3]
|
[x86] use a single shufps when it can save instructions
This is a tiny patch with a big pile of test changes.
This partially fixes PR27885:
https://llvm.org/bugs/show_bug.cgi?id=27885
My motivating case looks like this:
- vpshufd {{.*#+}} xmm1 = xmm1[0,1,0,2]
- vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
- vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7]
+ vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
And this happens several times in the diffs. For chips with domain-crossing penalties,
the instruction count and size reduction should usually overcome any potential
domain-crossing penalty due to using an FP op in a sequence of int ops. For chips such
as recent Intel big cores and Atom, there is no domain-crossing penalty for shufps, so
using shufps is a pure win.
So the test case diffs all appear to be improvements except one test in
vector-shuffle-combining.ll where we miss an opportunity to use a shift to generate
zero elements and one test in combine-sra.ll where multiple uses prevent the expected
shuffle combining.
Differential Revision: https://reviews.llvm.org/D27692
llvm-svn: 289837
2016-12-16 02:03:38 +08:00
|
|
|
; SSE-NEXT: movaps %xmm0, %xmm2
|
2020-07-23 21:09:31 +08:00
|
|
|
; SSE-NEXT: psrad $2, %xmm2
|
|
|
|
; SSE-NEXT: pblendw {{.*#+}} xmm2 = xmm0[0,1,2,3],xmm2[4,5,6,7]
|
|
|
|
; SSE-NEXT: psrad $1, %xmm0
|
|
|
|
; SSE-NEXT: psrad $3, %xmm1
|
2018-01-16 06:18:45 +08:00
|
|
|
; SSE-NEXT: pblendw {{.*#+}} xmm1 = xmm0[0,1,2,3],xmm1[4,5,6,7]
|
2020-07-23 21:09:31 +08:00
|
|
|
; SSE-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1],xmm1[2,3],xmm2[4,5],xmm1[6,7]
|
|
|
|
; SSE-NEXT: movdqa %xmm2, %xmm0
|
2016-10-19 01:31:07 +08:00
|
|
|
; SSE-NEXT: retq
|
|
|
|
;
|
2018-01-10 00:26:06 +08:00
|
|
|
; AVX2-SLOW-LABEL: combine_vec_ashr_trunc_ashr:
|
|
|
|
; AVX2-SLOW: # %bb.0:
|
2019-02-19 00:46:12 +08:00
|
|
|
; AVX2-SLOW-NEXT: vextractf128 $1, %ymm0, %xmm1
|
|
|
|
; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm0 = xmm0[1,3],xmm1[1,3]
|
2018-01-10 00:26:06 +08:00
|
|
|
; AVX2-SLOW-NEXT: vpsravd {{.*}}(%rip), %xmm0, %xmm0
|
|
|
|
; AVX2-SLOW-NEXT: vzeroupper
|
|
|
|
; AVX2-SLOW-NEXT: retq
|
|
|
|
;
|
|
|
|
; AVX2-FAST-LABEL: combine_vec_ashr_trunc_ashr:
|
|
|
|
; AVX2-FAST: # %bb.0:
|
2021-01-21 20:58:16 +08:00
|
|
|
; AVX2-FAST-NEXT: vmovdqa {{.*#+}} xmm1 = [1,3,5,7]
|
2018-01-10 00:26:06 +08:00
|
|
|
; AVX2-FAST-NEXT: vpermd %ymm0, %ymm1, %ymm0
|
|
|
|
; AVX2-FAST-NEXT: vpsravd {{.*}}(%rip), %xmm0, %xmm0
|
|
|
|
; AVX2-FAST-NEXT: vzeroupper
|
|
|
|
; AVX2-FAST-NEXT: retq
|
2016-10-19 01:31:07 +08:00
|
|
|
%1 = ashr <4 x i64> %x, <i64 32, i64 32, i64 32, i64 32>
|
|
|
|
%2 = trunc <4 x i64> %1 to <4 x i32>
|
|
|
|
%3 = ashr <4 x i32> %2, <i32 0, i32 1, i32 2, i32 3>
|
|
|
|
ret <4 x i32> %3
|
|
|
|
}
|
|
|
|
|
2021-02-18 02:17:50 +08:00
|
|
|
define <8 x i16> @combine_vec_ashr_trunc_ashr_splat(<8 x i32> %x) {
|
|
|
|
; SSE-LABEL: combine_vec_ashr_trunc_ashr_splat:
|
|
|
|
; SSE: # %bb.0:
|
|
|
|
; SSE-NEXT: psrad $19, %xmm1
|
|
|
|
; SSE-NEXT: psrad $19, %xmm0
|
|
|
|
; SSE-NEXT: packssdw %xmm1, %xmm0
|
|
|
|
; SSE-NEXT: retq
|
|
|
|
;
|
|
|
|
; AVX-LABEL: combine_vec_ashr_trunc_ashr_splat:
|
|
|
|
; AVX: # %bb.0:
|
|
|
|
; AVX-NEXT: vpsrad $19, %ymm0, %ymm0
|
|
|
|
; AVX-NEXT: vextracti128 $1, %ymm0, %xmm1
|
|
|
|
; AVX-NEXT: vpackssdw %xmm1, %xmm0, %xmm0
|
|
|
|
; AVX-NEXT: vzeroupper
|
|
|
|
; AVX-NEXT: retq
|
|
|
|
%1 = ashr <8 x i32> %x, <i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16>
|
|
|
|
%2 = trunc <8 x i32> %1 to <8 x i16>
|
|
|
|
%3 = ashr <8 x i16> %2, <i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3>
|
|
|
|
ret <8 x i16> %3
|
|
|
|
}
|
|
|
|
|
2016-10-19 01:31:07 +08:00
|
|
|
; If the sign bit is known to be zero, switch this to a SRL.
|
|
|
|
define <4 x i32> @combine_vec_ashr_positive(<4 x i32> %x, <4 x i32> %y) {
|
|
|
|
; SSE-LABEL: combine_vec_ashr_positive:
|
2017-12-05 01:18:51 +08:00
|
|
|
; SSE: # %bb.0:
|
2016-10-19 01:31:07 +08:00
|
|
|
; SSE-NEXT: pand {{.*}}(%rip), %xmm0
|
2018-05-17 04:52:52 +08:00
|
|
|
; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm1[2,3,3,3,4,5,6,7]
|
2016-10-19 01:31:07 +08:00
|
|
|
; SSE-NEXT: movdqa %xmm0, %xmm3
|
|
|
|
; SSE-NEXT: psrld %xmm2, %xmm3
|
2020-07-26 23:03:53 +08:00
|
|
|
; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm1[2,3,2,3]
|
2018-05-17 04:52:52 +08:00
|
|
|
; SSE-NEXT: pshuflw {{.*#+}} xmm4 = xmm2[2,3,3,3,4,5,6,7]
|
|
|
|
; SSE-NEXT: movdqa %xmm0, %xmm5
|
|
|
|
; SSE-NEXT: psrld %xmm4, %xmm5
|
|
|
|
; SSE-NEXT: pblendw {{.*#+}} xmm5 = xmm3[0,1,2,3],xmm5[4,5,6,7]
|
|
|
|
; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,1,1,1,4,5,6,7]
|
|
|
|
; SSE-NEXT: movdqa %xmm0, %xmm3
|
|
|
|
; SSE-NEXT: psrld %xmm1, %xmm3
|
|
|
|
; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm2[0,1,1,1,4,5,6,7]
|
|
|
|
; SSE-NEXT: psrld %xmm1, %xmm0
|
|
|
|
; SSE-NEXT: pblendw {{.*#+}} xmm0 = xmm3[0,1,2,3],xmm0[4,5,6,7]
|
|
|
|
; SSE-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm5[2,3],xmm0[4,5],xmm5[6,7]
|
2016-10-19 01:31:07 +08:00
|
|
|
; SSE-NEXT: retq
|
|
|
|
;
|
|
|
|
; AVX-LABEL: combine_vec_ashr_positive:
|
2017-12-05 01:18:51 +08:00
|
|
|
; AVX: # %bb.0:
|
2016-10-19 01:31:07 +08:00
|
|
|
; AVX-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
|
|
|
|
; AVX-NEXT: vpsrlvd %xmm1, %xmm0, %xmm0
|
|
|
|
; AVX-NEXT: retq
|
|
|
|
%1 = and <4 x i32> %x, <i32 15, i32 255, i32 4095, i32 65535>
|
|
|
|
%2 = ashr <4 x i32> %1, %y
|
|
|
|
ret <4 x i32> %2
|
|
|
|
}
|
2016-10-22 07:02:31 +08:00
|
|
|
|
|
|
|
define <4 x i32> @combine_vec_ashr_positive_splat(<4 x i32> %x, <4 x i32> %y) {
|
|
|
|
; SSE-LABEL: combine_vec_ashr_positive_splat:
|
2017-12-05 01:18:51 +08:00
|
|
|
; SSE: # %bb.0:
|
2016-10-24 07:13:31 +08:00
|
|
|
; SSE-NEXT: xorps %xmm0, %xmm0
|
2016-10-22 07:02:31 +08:00
|
|
|
; SSE-NEXT: retq
|
|
|
|
;
|
|
|
|
; AVX-LABEL: combine_vec_ashr_positive_splat:
|
2017-12-05 01:18:51 +08:00
|
|
|
; AVX: # %bb.0:
|
2016-10-24 07:13:31 +08:00
|
|
|
; AVX-NEXT: vxorps %xmm0, %xmm0, %xmm0
|
2016-10-22 07:02:31 +08:00
|
|
|
; AVX-NEXT: retq
|
|
|
|
%1 = and <4 x i32> %x, <i32 1023, i32 1023, i32 1023, i32 1023>
|
|
|
|
%2 = ashr <4 x i32> %1, <i32 10, i32 10, i32 10, i32 10>
|
|
|
|
ret <4 x i32> %2
|
|
|
|
}
|