2015-11-24 05:33:58 +08:00
|
|
|
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
|
2016-11-07 21:28:20 +08:00
|
|
|
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefix=SSE --check-prefix=SSE2
|
|
|
|
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+ssse3 | FileCheck %s --check-prefix=SSE --check-prefix=SSSE3
|
|
|
|
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefix=SSE --check-prefix=SSE41
|
|
|
|
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefix=AVX --check-prefix=AVX1
|
2018-01-10 00:26:06 +08:00
|
|
|
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefixes=AVX,AVX2,AVX2-SLOW
|
|
|
|
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2,+fast-variable-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX2-FAST
|
2016-11-07 21:28:20 +08:00
|
|
|
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f | FileCheck %s --check-prefix=AVX512 --check-prefix=AVX512F
|
2018-01-10 00:26:06 +08:00
|
|
|
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512vl,+fast-variable-shuffle | FileCheck %s --check-prefixes=AVX512,AVX512VL
|
|
|
|
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512bw,+fast-variable-shuffle | FileCheck %s --check-prefixes=AVX512,AVX512BW
|
|
|
|
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512bw,+avx512vl,+fast-variable-shuffle | FileCheck %s --check-prefixes=AVX512,AVX512BWVL
|
2015-01-05 01:52:00 +08:00
|
|
|
|
2015-08-18 16:37:09 +08:00
|
|
|
define <8 x i32> @trunc8i64_8i32(<8 x i64> %a) {
|
[x86] use a single shufps when it can save instructions
This is a tiny patch with a big pile of test changes.
This partially fixes PR27885:
https://llvm.org/bugs/show_bug.cgi?id=27885
My motivating case looks like this:
- vpshufd {{.*#+}} xmm1 = xmm1[0,1,0,2]
- vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
- vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7]
+ vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
And this happens several times in the diffs. For chips with domain-crossing penalties,
the instruction count and size reduction should usually overcome any potential
domain-crossing penalty due to using an FP op in a sequence of int ops. For chips such
as recent Intel big cores and Atom, there is no domain-crossing penalty for shufps, so
using shufps is a pure win.
So the test case diffs all appear to be improvements except one test in
vector-shuffle-combining.ll where we miss an opportunity to use a shift to generate
zero elements and one test in combine-sra.ll where multiple uses prevent the expected
shuffle combining.
Differential Revision: https://reviews.llvm.org/D27692
llvm-svn: 289837
2016-12-16 02:03:38 +08:00
|
|
|
; SSE-LABEL: trunc8i64_8i32:
|
2017-12-05 01:18:51 +08:00
|
|
|
; SSE: # %bb.0: # %entry
|
[x86] use a single shufps when it can save instructions
This is a tiny patch with a big pile of test changes.
This partially fixes PR27885:
https://llvm.org/bugs/show_bug.cgi?id=27885
My motivating case looks like this:
- vpshufd {{.*#+}} xmm1 = xmm1[0,1,0,2]
- vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
- vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7]
+ vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
And this happens several times in the diffs. For chips with domain-crossing penalties,
the instruction count and size reduction should usually overcome any potential
domain-crossing penalty due to using an FP op in a sequence of int ops. For chips such
as recent Intel big cores and Atom, there is no domain-crossing penalty for shufps, so
using shufps is a pure win.
So the test case diffs all appear to be improvements except one test in
vector-shuffle-combining.ll where we miss an opportunity to use a shift to generate
zero elements and one test in combine-sra.ll where multiple uses prevent the expected
shuffle combining.
Differential Revision: https://reviews.llvm.org/D27692
llvm-svn: 289837
2016-12-16 02:03:38 +08:00
|
|
|
; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
|
|
|
|
; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,2],xmm3[0,2]
|
|
|
|
; SSE-NEXT: movaps %xmm2, %xmm1
|
|
|
|
; SSE-NEXT: retq
|
2015-02-16 09:39:50 +08:00
|
|
|
;
|
2015-08-18 16:37:09 +08:00
|
|
|
; AVX1-LABEL: trunc8i64_8i32:
|
2017-12-05 01:18:51 +08:00
|
|
|
; AVX1: # %bb.0: # %entry
|
2015-08-18 16:37:09 +08:00
|
|
|
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
|
[x86] use a single shufps when it can save instructions
This is a tiny patch with a big pile of test changes.
This partially fixes PR27885:
https://llvm.org/bugs/show_bug.cgi?id=27885
My motivating case looks like this:
- vpshufd {{.*#+}} xmm1 = xmm1[0,1,0,2]
- vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
- vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7]
+ vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
And this happens several times in the diffs. For chips with domain-crossing penalties,
the instruction count and size reduction should usually overcome any potential
domain-crossing penalty due to using an FP op in a sequence of int ops. For chips such
as recent Intel big cores and Atom, there is no domain-crossing penalty for shufps, so
using shufps is a pure win.
So the test case diffs all appear to be improvements except one test in
vector-shuffle-combining.ll where we miss an opportunity to use a shift to generate
zero elements and one test in combine-sra.ll where multiple uses prevent the expected
shuffle combining.
Differential Revision: https://reviews.llvm.org/D27692
llvm-svn: 289837
2016-12-16 02:03:38 +08:00
|
|
|
; AVX1-NEXT: vshufps {{.*#+}} xmm1 = xmm1[0,2],xmm2[0,2]
|
2015-08-18 16:37:09 +08:00
|
|
|
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
|
[x86] use a single shufps when it can save instructions
This is a tiny patch with a big pile of test changes.
This partially fixes PR27885:
https://llvm.org/bugs/show_bug.cgi?id=27885
My motivating case looks like this:
- vpshufd {{.*#+}} xmm1 = xmm1[0,1,0,2]
- vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
- vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7]
+ vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
And this happens several times in the diffs. For chips with domain-crossing penalties,
the instruction count and size reduction should usually overcome any potential
domain-crossing penalty due to using an FP op in a sequence of int ops. For chips such
as recent Intel big cores and Atom, there is no domain-crossing penalty for shufps, so
using shufps is a pure win.
So the test case diffs all appear to be improvements except one test in
vector-shuffle-combining.ll where we miss an opportunity to use a shift to generate
zero elements and one test in combine-sra.ll where multiple uses prevent the expected
shuffle combining.
Differential Revision: https://reviews.llvm.org/D27692
llvm-svn: 289837
2016-12-16 02:03:38 +08:00
|
|
|
; AVX1-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm2[0,2]
|
2015-08-18 16:37:09 +08:00
|
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
|
|
|
|
; AVX1-NEXT: retq
|
|
|
|
;
|
2018-01-10 00:26:06 +08:00
|
|
|
; AVX2-SLOW-LABEL: trunc8i64_8i32:
|
|
|
|
; AVX2-SLOW: # %bb.0: # %entry
|
2019-02-19 00:46:12 +08:00
|
|
|
; AVX2-SLOW-NEXT: vextractf128 $1, %ymm1, %xmm2
|
|
|
|
; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm1 = xmm1[0,2],xmm2[0,2]
|
|
|
|
; AVX2-SLOW-NEXT: vextractf128 $1, %ymm0, %xmm2
|
|
|
|
; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm2[0,2]
|
2018-01-10 00:26:06 +08:00
|
|
|
; AVX2-SLOW-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
|
|
|
|
; AVX2-SLOW-NEXT: retq
|
|
|
|
;
|
|
|
|
; AVX2-FAST-LABEL: trunc8i64_8i32:
|
|
|
|
; AVX2-FAST: # %bb.0: # %entry
|
|
|
|
; AVX2-FAST-NEXT: vmovaps {{.*#+}} ymm2 = [0,2,4,6,4,6,6,7]
|
|
|
|
; AVX2-FAST-NEXT: vpermps %ymm0, %ymm2, %ymm0
|
|
|
|
; AVX2-FAST-NEXT: vpermps %ymm1, %ymm2, %ymm1
|
|
|
|
; AVX2-FAST-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
|
|
|
|
; AVX2-FAST-NEXT: retq
|
2015-12-22 04:42:43 +08:00
|
|
|
;
|
2016-09-14 01:34:56 +08:00
|
|
|
; AVX512-LABEL: trunc8i64_8i32:
|
2017-12-05 01:18:51 +08:00
|
|
|
; AVX512: # %bb.0: # %entry
|
2016-09-14 01:34:56 +08:00
|
|
|
; AVX512-NEXT: vpmovqd %zmm0, %ymm0
|
|
|
|
; AVX512-NEXT: retq
|
2015-08-18 16:37:09 +08:00
|
|
|
entry:
|
|
|
|
%0 = trunc <8 x i64> %a to <8 x i32>
|
|
|
|
ret <8 x i32> %0
|
|
|
|
}
|
|
|
|
|
2017-10-01 00:14:59 +08:00
|
|
|
define <8 x i32> @trunc8i64_8i32_ashr(<8 x i64> %a) {
|
2018-02-15 20:14:15 +08:00
|
|
|
; SSE-LABEL: trunc8i64_8i32_ashr:
|
|
|
|
; SSE: # %bb.0: # %entry
|
|
|
|
; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,3],xmm1[1,3]
|
|
|
|
; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[1,3],xmm3[1,3]
|
|
|
|
; SSE-NEXT: movaps %xmm2, %xmm1
|
|
|
|
; SSE-NEXT: retq
|
2017-10-01 00:14:59 +08:00
|
|
|
;
|
|
|
|
; AVX1-LABEL: trunc8i64_8i32_ashr:
|
2017-12-05 01:18:51 +08:00
|
|
|
; AVX1: # %bb.0: # %entry
|
2017-10-01 00:14:59 +08:00
|
|
|
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
|
|
|
|
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3
|
2018-02-15 20:14:15 +08:00
|
|
|
; AVX1-NEXT: vshufps {{.*#+}} xmm1 = xmm1[1,3],xmm3[1,3]
|
|
|
|
; AVX1-NEXT: vshufps {{.*#+}} xmm0 = xmm0[1,3],xmm2[1,3]
|
2017-10-01 00:14:59 +08:00
|
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
|
|
|
|
; AVX1-NEXT: retq
|
|
|
|
;
|
2018-01-10 00:26:06 +08:00
|
|
|
; AVX2-SLOW-LABEL: trunc8i64_8i32_ashr:
|
|
|
|
; AVX2-SLOW: # %bb.0: # %entry
|
2019-02-19 00:46:12 +08:00
|
|
|
; AVX2-SLOW-NEXT: vextractf128 $1, %ymm1, %xmm2
|
|
|
|
; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm1 = xmm1[1,3],xmm2[1,3]
|
|
|
|
; AVX2-SLOW-NEXT: vextractf128 $1, %ymm0, %xmm2
|
|
|
|
; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm0 = xmm0[1,3],xmm2[1,3]
|
2018-01-10 00:26:06 +08:00
|
|
|
; AVX2-SLOW-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
|
|
|
|
; AVX2-SLOW-NEXT: retq
|
|
|
|
;
|
|
|
|
; AVX2-FAST-LABEL: trunc8i64_8i32_ashr:
|
|
|
|
; AVX2-FAST: # %bb.0: # %entry
|
|
|
|
; AVX2-FAST-NEXT: vmovaps {{.*#+}} ymm2 = [1,3,5,7,5,7,6,7]
|
|
|
|
; AVX2-FAST-NEXT: vpermps %ymm0, %ymm2, %ymm0
|
|
|
|
; AVX2-FAST-NEXT: vpermps %ymm1, %ymm2, %ymm1
|
|
|
|
; AVX2-FAST-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
|
|
|
|
; AVX2-FAST-NEXT: retq
|
2017-10-01 00:14:59 +08:00
|
|
|
;
|
|
|
|
; AVX512-LABEL: trunc8i64_8i32_ashr:
|
2017-12-05 01:18:51 +08:00
|
|
|
; AVX512: # %bb.0: # %entry
|
2017-10-01 00:14:59 +08:00
|
|
|
; AVX512-NEXT: vpsraq $32, %zmm0, %zmm0
|
|
|
|
; AVX512-NEXT: vpmovqd %zmm0, %ymm0
|
|
|
|
; AVX512-NEXT: retq
|
|
|
|
entry:
|
|
|
|
%0 = ashr <8 x i64> %a, <i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32>
|
|
|
|
%1 = trunc <8 x i64> %0 to <8 x i32>
|
|
|
|
ret <8 x i32> %1
|
|
|
|
}
|
|
|
|
|
|
|
|
define <8 x i32> @trunc8i64_8i32_lshr(<8 x i64> %a) {
|
|
|
|
; SSE-LABEL: trunc8i64_8i32_lshr:
|
2017-12-05 01:18:51 +08:00
|
|
|
; SSE: # %bb.0: # %entry
|
2018-02-16 22:57:25 +08:00
|
|
|
; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,3],xmm1[1,3]
|
|
|
|
; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[1,3],xmm3[1,3]
|
2017-10-01 00:14:59 +08:00
|
|
|
; SSE-NEXT: movaps %xmm2, %xmm1
|
|
|
|
; SSE-NEXT: retq
|
|
|
|
;
|
|
|
|
; AVX1-LABEL: trunc8i64_8i32_lshr:
|
2017-12-05 01:18:51 +08:00
|
|
|
; AVX1: # %bb.0: # %entry
|
2017-10-01 00:14:59 +08:00
|
|
|
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
|
2018-02-16 22:57:25 +08:00
|
|
|
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3
|
|
|
|
; AVX1-NEXT: vshufps {{.*#+}} xmm1 = xmm1[1,3],xmm3[1,3]
|
|
|
|
; AVX1-NEXT: vshufps {{.*#+}} xmm0 = xmm0[1,3],xmm2[1,3]
|
2017-10-01 00:14:59 +08:00
|
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
|
|
|
|
; AVX1-NEXT: retq
|
|
|
|
;
|
2018-01-10 00:26:06 +08:00
|
|
|
; AVX2-SLOW-LABEL: trunc8i64_8i32_lshr:
|
|
|
|
; AVX2-SLOW: # %bb.0: # %entry
|
|
|
|
; AVX2-SLOW-NEXT: vpsrlq $32, %ymm0, %ymm0
|
2019-02-19 00:46:12 +08:00
|
|
|
; AVX2-SLOW-NEXT: vpsrlq $32, %ymm1, %ymm1
|
|
|
|
; AVX2-SLOW-NEXT: vextracti128 $1, %ymm1, %xmm2
|
|
|
|
; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm1 = xmm1[0,2],xmm2[0,2]
|
|
|
|
; AVX2-SLOW-NEXT: vextracti128 $1, %ymm0, %xmm2
|
|
|
|
; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm2[0,2]
|
|
|
|
; AVX2-SLOW-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
|
2018-01-10 00:26:06 +08:00
|
|
|
; AVX2-SLOW-NEXT: retq
|
|
|
|
;
|
|
|
|
; AVX2-FAST-LABEL: trunc8i64_8i32_lshr:
|
|
|
|
; AVX2-FAST: # %bb.0: # %entry
|
|
|
|
; AVX2-FAST-NEXT: vpsrlq $32, %ymm1, %ymm1
|
|
|
|
; AVX2-FAST-NEXT: vpsrlq $32, %ymm0, %ymm0
|
|
|
|
; AVX2-FAST-NEXT: vmovdqa {{.*#+}} ymm2 = [0,2,4,6,4,6,6,7]
|
|
|
|
; AVX2-FAST-NEXT: vpermd %ymm0, %ymm2, %ymm0
|
|
|
|
; AVX2-FAST-NEXT: vpermd %ymm1, %ymm2, %ymm1
|
|
|
|
; AVX2-FAST-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
|
|
|
|
; AVX2-FAST-NEXT: retq
|
2017-10-01 00:14:59 +08:00
|
|
|
;
|
|
|
|
; AVX512-LABEL: trunc8i64_8i32_lshr:
|
2017-12-05 01:18:51 +08:00
|
|
|
; AVX512: # %bb.0: # %entry
|
2017-10-01 00:14:59 +08:00
|
|
|
; AVX512-NEXT: vpsrlq $32, %zmm0, %zmm0
|
|
|
|
; AVX512-NEXT: vpmovqd %zmm0, %ymm0
|
|
|
|
; AVX512-NEXT: retq
|
|
|
|
entry:
|
|
|
|
%0 = lshr <8 x i64> %a, <i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32>
|
|
|
|
%1 = trunc <8 x i64> %0 to <8 x i32>
|
|
|
|
ret <8 x i32> %1
|
|
|
|
}
|
|
|
|
|
2015-08-18 16:37:09 +08:00
|
|
|
define <8 x i16> @trunc8i64_8i16(<8 x i64> %a) {
|
|
|
|
; SSE2-LABEL: trunc8i64_8i16:
|
2017-12-05 01:18:51 +08:00
|
|
|
; SSE2: # %bb.0: # %entry
|
2016-10-07 02:58:24 +08:00
|
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
|
|
|
|
; SSE2-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,2,2,3,4,5,6,7]
|
|
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
|
2017-10-05 01:20:12 +08:00
|
|
|
; SSE2-NEXT: pshuflw {{.*#+}} xmm4 = xmm0[0,2,2,3,4,5,6,7]
|
|
|
|
; SSE2-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm1[0],xmm4[1],xmm1[1]
|
|
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm3[0,2,2,3]
|
|
|
|
; SSE2-NEXT: pshuflw {{.*#+}} xmm1 = xmm0[0,1,0,2,4,5,6,7]
|
|
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm2[0,2,2,3]
|
|
|
|
; SSE2-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,1,0,2,4,5,6,7]
|
2016-10-07 02:58:24 +08:00
|
|
|
; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
|
2017-10-05 01:20:12 +08:00
|
|
|
; SSE2-NEXT: movsd {{.*#+}} xmm0 = xmm4[0],xmm0[1]
|
2015-08-18 16:37:09 +08:00
|
|
|
; SSE2-NEXT: retq
|
|
|
|
;
|
|
|
|
; SSSE3-LABEL: trunc8i64_8i16:
|
2017-12-05 01:18:51 +08:00
|
|
|
; SSSE3: # %bb.0: # %entry
|
2016-10-07 02:58:24 +08:00
|
|
|
; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
|
|
|
|
; SSSE3-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,2,2,3,4,5,6,7]
|
|
|
|
; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
|
2017-10-05 01:20:12 +08:00
|
|
|
; SSSE3-NEXT: pshuflw {{.*#+}} xmm4 = xmm0[0,2,2,3,4,5,6,7]
|
|
|
|
; SSSE3-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm1[0],xmm4[1],xmm1[1]
|
|
|
|
; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm3[0,2,2,3]
|
|
|
|
; SSSE3-NEXT: pshuflw {{.*#+}} xmm1 = xmm0[0,1,0,2,4,5,6,7]
|
|
|
|
; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm2[0,2,2,3]
|
|
|
|
; SSSE3-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,1,0,2,4,5,6,7]
|
2016-10-07 02:58:24 +08:00
|
|
|
; SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
|
2017-10-05 01:20:12 +08:00
|
|
|
; SSSE3-NEXT: movsd {{.*#+}} xmm0 = xmm4[0],xmm0[1]
|
2015-08-18 16:37:09 +08:00
|
|
|
; SSSE3-NEXT: retq
|
|
|
|
;
|
|
|
|
; SSE41-LABEL: trunc8i64_8i16:
|
2017-12-05 01:18:51 +08:00
|
|
|
; SSE41: # %bb.0: # %entry
|
2015-12-22 04:42:43 +08:00
|
|
|
; SSE41-NEXT: pxor %xmm4, %xmm4
|
|
|
|
; SSE41-NEXT: pblendw {{.*#+}} xmm3 = xmm3[0],xmm4[1,2,3],xmm3[4],xmm4[5,6,7]
|
|
|
|
; SSE41-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0],xmm4[1,2,3],xmm2[4],xmm4[5,6,7]
|
|
|
|
; SSE41-NEXT: packusdw %xmm3, %xmm2
|
|
|
|
; SSE41-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0],xmm4[1,2,3],xmm1[4],xmm4[5,6,7]
|
|
|
|
; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0],xmm4[1,2,3],xmm0[4],xmm4[5,6,7]
|
|
|
|
; SSE41-NEXT: packusdw %xmm1, %xmm0
|
|
|
|
; SSE41-NEXT: packusdw %xmm2, %xmm0
|
2015-08-18 16:37:09 +08:00
|
|
|
; SSE41-NEXT: retq
|
|
|
|
;
|
|
|
|
; AVX1-LABEL: trunc8i64_8i16:
|
2017-12-05 01:18:51 +08:00
|
|
|
; AVX1: # %bb.0: # %entry
|
2019-04-19 01:23:09 +08:00
|
|
|
; AVX1-NEXT: vmovaps {{.*#+}} ymm2 = [65535,65535,65535,65535]
|
|
|
|
; AVX1-NEXT: vandps %ymm2, %ymm1, %ymm1
|
|
|
|
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3
|
|
|
|
; AVX1-NEXT: vpackusdw %xmm3, %xmm1, %xmm1
|
|
|
|
; AVX1-NEXT: vandps %ymm2, %ymm0, %ymm0
|
2015-12-22 04:42:43 +08:00
|
|
|
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
|
|
|
|
; AVX1-NEXT: vpackusdw %xmm2, %xmm0, %xmm0
|
|
|
|
; AVX1-NEXT: vpackusdw %xmm1, %xmm0, %xmm0
|
2015-08-18 16:37:09 +08:00
|
|
|
; AVX1-NEXT: vzeroupper
|
|
|
|
; AVX1-NEXT: retq
|
|
|
|
;
|
2018-01-10 00:26:06 +08:00
|
|
|
; AVX2-SLOW-LABEL: trunc8i64_8i16:
|
|
|
|
; AVX2-SLOW: # %bb.0: # %entry
|
2019-02-19 00:46:12 +08:00
|
|
|
; AVX2-SLOW-NEXT: vextractf128 $1, %ymm1, %xmm2
|
|
|
|
; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm1 = xmm1[0,2],xmm2[0,2]
|
|
|
|
; AVX2-SLOW-NEXT: vextractf128 $1, %ymm0, %xmm2
|
|
|
|
; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm2[0,2]
|
|
|
|
; AVX2-SLOW-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
|
2018-01-10 00:26:06 +08:00
|
|
|
; AVX2-SLOW-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
|
|
|
|
; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
|
2018-02-01 06:04:26 +08:00
|
|
|
; AVX2-SLOW-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
|
2018-01-10 00:26:06 +08:00
|
|
|
; AVX2-SLOW-NEXT: vzeroupper
|
|
|
|
; AVX2-SLOW-NEXT: retq
|
|
|
|
;
|
|
|
|
; AVX2-FAST-LABEL: trunc8i64_8i16:
|
|
|
|
; AVX2-FAST: # %bb.0: # %entry
|
|
|
|
; AVX2-FAST-NEXT: vmovdqa {{.*#+}} ymm2 = [0,2,4,6,4,6,6,7]
|
|
|
|
; AVX2-FAST-NEXT: vpermd %ymm0, %ymm2, %ymm0
|
|
|
|
; AVX2-FAST-NEXT: vpermd %ymm1, %ymm2, %ymm1
|
|
|
|
; AVX2-FAST-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
|
|
|
|
; AVX2-FAST-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
|
|
|
|
; AVX2-FAST-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
|
2018-02-01 06:04:26 +08:00
|
|
|
; AVX2-FAST-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
|
2018-01-10 00:26:06 +08:00
|
|
|
; AVX2-FAST-NEXT: vzeroupper
|
|
|
|
; AVX2-FAST-NEXT: retq
|
2015-12-22 04:42:43 +08:00
|
|
|
;
|
2016-09-14 01:34:56 +08:00
|
|
|
; AVX512-LABEL: trunc8i64_8i16:
|
2017-12-05 01:18:51 +08:00
|
|
|
; AVX512: # %bb.0: # %entry
|
2016-09-14 01:34:56 +08:00
|
|
|
; AVX512-NEXT: vpmovqw %zmm0, %xmm0
|
2017-03-03 17:03:24 +08:00
|
|
|
; AVX512-NEXT: vzeroupper
|
2016-09-14 01:34:56 +08:00
|
|
|
; AVX512-NEXT: retq
|
2015-08-18 16:37:09 +08:00
|
|
|
entry:
|
|
|
|
%0 = trunc <8 x i64> %a to <8 x i16>
|
|
|
|
ret <8 x i16> %0
|
|
|
|
}
|
|
|
|
|
2015-12-22 04:42:43 +08:00
|
|
|
define void @trunc8i64_8i8(<8 x i64> %a) {
|
2018-06-08 21:59:11 +08:00
|
|
|
; SSE2-LABEL: trunc8i64_8i8:
|
|
|
|
; SSE2: # %bb.0: # %entry
|
|
|
|
; SSE2-NEXT: movdqa {{.*#+}} xmm4 = [255,0,0,0,0,0,0,0,255,0,0,0,0,0,0,0]
|
|
|
|
; SSE2-NEXT: pand %xmm4, %xmm3
|
|
|
|
; SSE2-NEXT: pand %xmm4, %xmm2
|
|
|
|
; SSE2-NEXT: packuswb %xmm3, %xmm2
|
|
|
|
; SSE2-NEXT: pand %xmm4, %xmm1
|
|
|
|
; SSE2-NEXT: pand %xmm4, %xmm0
|
|
|
|
; SSE2-NEXT: packuswb %xmm1, %xmm0
|
|
|
|
; SSE2-NEXT: packuswb %xmm2, %xmm0
|
|
|
|
; SSE2-NEXT: packuswb %xmm0, %xmm0
|
|
|
|
; SSE2-NEXT: movq %xmm0, (%rax)
|
|
|
|
; SSE2-NEXT: retq
|
|
|
|
;
|
|
|
|
; SSSE3-LABEL: trunc8i64_8i8:
|
|
|
|
; SSSE3: # %bb.0: # %entry
|
|
|
|
; SSSE3-NEXT: movdqa {{.*#+}} xmm4 = [255,0,0,0,0,0,0,0,255,0,0,0,0,0,0,0]
|
|
|
|
; SSSE3-NEXT: pand %xmm4, %xmm3
|
|
|
|
; SSSE3-NEXT: pand %xmm4, %xmm2
|
|
|
|
; SSSE3-NEXT: packuswb %xmm3, %xmm2
|
|
|
|
; SSSE3-NEXT: pand %xmm4, %xmm1
|
|
|
|
; SSSE3-NEXT: pand %xmm4, %xmm0
|
|
|
|
; SSSE3-NEXT: packuswb %xmm1, %xmm0
|
|
|
|
; SSSE3-NEXT: packuswb %xmm2, %xmm0
|
|
|
|
; SSSE3-NEXT: packuswb %xmm0, %xmm0
|
|
|
|
; SSSE3-NEXT: movq %xmm0, (%rax)
|
|
|
|
; SSSE3-NEXT: retq
|
|
|
|
;
|
|
|
|
; SSE41-LABEL: trunc8i64_8i8:
|
|
|
|
; SSE41: # %bb.0: # %entry
|
|
|
|
; SSE41-NEXT: movdqa {{.*#+}} xmm4 = [255,0,0,0,0,0,0,0,255,0,0,0,0,0,0,0]
|
|
|
|
; SSE41-NEXT: pand %xmm4, %xmm3
|
|
|
|
; SSE41-NEXT: pand %xmm4, %xmm2
|
|
|
|
; SSE41-NEXT: packusdw %xmm3, %xmm2
|
|
|
|
; SSE41-NEXT: pand %xmm4, %xmm1
|
|
|
|
; SSE41-NEXT: pand %xmm4, %xmm0
|
|
|
|
; SSE41-NEXT: packusdw %xmm1, %xmm0
|
|
|
|
; SSE41-NEXT: packusdw %xmm2, %xmm0
|
|
|
|
; SSE41-NEXT: packuswb %xmm0, %xmm0
|
|
|
|
; SSE41-NEXT: movq %xmm0, (%rax)
|
|
|
|
; SSE41-NEXT: retq
|
2015-12-22 04:42:43 +08:00
|
|
|
;
|
|
|
|
; AVX1-LABEL: trunc8i64_8i8:
|
2017-12-05 01:18:51 +08:00
|
|
|
; AVX1: # %bb.0: # %entry
|
2019-04-19 01:23:09 +08:00
|
|
|
; AVX1-NEXT: vmovaps {{.*#+}} ymm2 = [255,255,255,255]
|
|
|
|
; AVX1-NEXT: vandps %ymm2, %ymm1, %ymm1
|
|
|
|
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3
|
|
|
|
; AVX1-NEXT: vpackusdw %xmm3, %xmm1, %xmm1
|
|
|
|
; AVX1-NEXT: vandps %ymm2, %ymm0, %ymm0
|
2015-12-22 04:42:43 +08:00
|
|
|
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
|
2018-06-08 21:59:11 +08:00
|
|
|
; AVX1-NEXT: vpackusdw %xmm2, %xmm0, %xmm0
|
|
|
|
; AVX1-NEXT: vpackusdw %xmm1, %xmm0, %xmm0
|
2015-12-22 04:42:43 +08:00
|
|
|
; AVX1-NEXT: vpackuswb %xmm0, %xmm0, %xmm0
|
|
|
|
; AVX1-NEXT: vmovq %xmm0, (%rax)
|
|
|
|
; AVX1-NEXT: vzeroupper
|
|
|
|
; AVX1-NEXT: retq
|
|
|
|
;
|
2018-01-10 00:26:06 +08:00
|
|
|
; AVX2-SLOW-LABEL: trunc8i64_8i8:
|
|
|
|
; AVX2-SLOW: # %bb.0: # %entry
|
2019-02-19 00:46:12 +08:00
|
|
|
; AVX2-SLOW-NEXT: vextractf128 $1, %ymm0, %xmm2
|
|
|
|
; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm2[0,2]
|
|
|
|
; AVX2-SLOW-NEXT: vextractf128 $1, %ymm1, %xmm2
|
|
|
|
; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm1 = xmm1[0,2],xmm2[0,2]
|
2018-01-10 00:26:06 +08:00
|
|
|
; AVX2-SLOW-NEXT: vmovdqa {{.*#+}} xmm2 = <0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u>
|
|
|
|
; AVX2-SLOW-NEXT: vpshufb %xmm2, %xmm1, %xmm1
|
|
|
|
; AVX2-SLOW-NEXT: vpshufb %xmm2, %xmm0, %xmm0
|
|
|
|
; AVX2-SLOW-NEXT: vpunpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
|
|
|
|
; AVX2-SLOW-NEXT: vmovq %xmm0, (%rax)
|
|
|
|
; AVX2-SLOW-NEXT: vzeroupper
|
|
|
|
; AVX2-SLOW-NEXT: retq
|
|
|
|
;
|
|
|
|
; AVX2-FAST-LABEL: trunc8i64_8i8:
|
|
|
|
; AVX2-FAST: # %bb.0: # %entry
|
|
|
|
; AVX2-FAST-NEXT: vmovdqa {{.*#+}} ymm2 = [0,2,4,6,4,6,6,7]
|
|
|
|
; AVX2-FAST-NEXT: vpermd %ymm0, %ymm2, %ymm0
|
|
|
|
; AVX2-FAST-NEXT: vpermd %ymm1, %ymm2, %ymm1
|
|
|
|
; AVX2-FAST-NEXT: vmovdqa {{.*#+}} xmm2 = <0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u>
|
|
|
|
; AVX2-FAST-NEXT: vpshufb %xmm2, %xmm1, %xmm1
|
|
|
|
; AVX2-FAST-NEXT: vpshufb %xmm2, %xmm0, %xmm0
|
|
|
|
; AVX2-FAST-NEXT: vpunpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
|
|
|
|
; AVX2-FAST-NEXT: vmovq %xmm0, (%rax)
|
|
|
|
; AVX2-FAST-NEXT: vzeroupper
|
|
|
|
; AVX2-FAST-NEXT: retq
|
2015-12-22 04:42:43 +08:00
|
|
|
;
|
2016-09-14 01:34:56 +08:00
|
|
|
; AVX512-LABEL: trunc8i64_8i8:
|
2017-12-05 01:18:51 +08:00
|
|
|
; AVX512: # %bb.0: # %entry
|
2016-09-14 01:34:56 +08:00
|
|
|
; AVX512-NEXT: vpmovqb %zmm0, (%rax)
|
2017-03-03 17:03:24 +08:00
|
|
|
; AVX512-NEXT: vzeroupper
|
2016-09-14 01:34:56 +08:00
|
|
|
; AVX512-NEXT: retq
|
2015-12-22 04:42:43 +08:00
|
|
|
entry:
|
|
|
|
%0 = trunc <8 x i64> %a to <8 x i8>
|
|
|
|
store <8 x i8> %0, <8 x i8>* undef, align 4
|
|
|
|
ret void
|
|
|
|
}
|
|
|
|
|
2015-08-18 16:37:09 +08:00
|
|
|
define <8 x i16> @trunc8i32_8i16(<8 x i32> %a) {
|
|
|
|
; SSE2-LABEL: trunc8i32_8i16:
|
2017-12-05 01:18:51 +08:00
|
|
|
; SSE2: # %bb.0: # %entry
|
2015-12-22 04:42:43 +08:00
|
|
|
; SSE2-NEXT: pslld $16, %xmm1
|
|
|
|
; SSE2-NEXT: psrad $16, %xmm1
|
|
|
|
; SSE2-NEXT: pslld $16, %xmm0
|
|
|
|
; SSE2-NEXT: psrad $16, %xmm0
|
|
|
|
; SSE2-NEXT: packssdw %xmm1, %xmm0
|
2015-08-18 16:37:09 +08:00
|
|
|
; SSE2-NEXT: retq
|
|
|
|
;
|
|
|
|
; SSSE3-LABEL: trunc8i32_8i16:
|
2017-12-05 01:18:51 +08:00
|
|
|
; SSSE3: # %bb.0: # %entry
|
2015-08-18 16:37:09 +08:00
|
|
|
; SSSE3-NEXT: movdqa {{.*#+}} xmm2 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
|
|
|
|
; SSSE3-NEXT: pshufb %xmm2, %xmm1
|
|
|
|
; SSSE3-NEXT: pshufb %xmm2, %xmm0
|
|
|
|
; SSSE3-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
|
|
|
|
; SSSE3-NEXT: retq
|
|
|
|
;
|
|
|
|
; SSE41-LABEL: trunc8i32_8i16:
|
2017-12-05 01:18:51 +08:00
|
|
|
; SSE41: # %bb.0: # %entry
|
2015-08-18 16:37:09 +08:00
|
|
|
; SSE41-NEXT: movdqa {{.*#+}} xmm2 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
|
|
|
|
; SSE41-NEXT: pshufb %xmm2, %xmm1
|
|
|
|
; SSE41-NEXT: pshufb %xmm2, %xmm0
|
|
|
|
; SSE41-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
|
|
|
|
; SSE41-NEXT: retq
|
|
|
|
;
|
|
|
|
; AVX1-LABEL: trunc8i32_8i16:
|
2017-12-05 01:18:51 +08:00
|
|
|
; AVX1: # %bb.0: # %entry
|
2015-08-18 16:37:09 +08:00
|
|
|
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
|
|
|
|
; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
|
|
|
|
; AVX1-NEXT: vpshufb %xmm2, %xmm1, %xmm1
|
|
|
|
; AVX1-NEXT: vpshufb %xmm2, %xmm0, %xmm0
|
|
|
|
; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
|
|
|
|
; AVX1-NEXT: vzeroupper
|
|
|
|
; AVX1-NEXT: retq
|
|
|
|
;
|
|
|
|
; AVX2-LABEL: trunc8i32_8i16:
|
2017-12-05 01:18:51 +08:00
|
|
|
; AVX2: # %bb.0: # %entry
|
2017-02-06 02:33:14 +08:00
|
|
|
; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
|
2015-08-18 16:37:09 +08:00
|
|
|
; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
|
2018-02-01 06:04:26 +08:00
|
|
|
; AVX2-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
|
2015-08-18 16:37:09 +08:00
|
|
|
; AVX2-NEXT: vzeroupper
|
|
|
|
; AVX2-NEXT: retq
|
2015-12-22 04:42:43 +08:00
|
|
|
;
|
2016-11-07 21:34:29 +08:00
|
|
|
; AVX512F-LABEL: trunc8i32_8i16:
|
2017-12-05 01:18:51 +08:00
|
|
|
; AVX512F: # %bb.0: # %entry
|
2018-02-01 06:04:26 +08:00
|
|
|
; AVX512F-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
|
2016-11-07 21:34:29 +08:00
|
|
|
; AVX512F-NEXT: vpmovdw %zmm0, %ymm0
|
2018-02-01 06:04:26 +08:00
|
|
|
; AVX512F-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
|
2017-03-03 17:03:24 +08:00
|
|
|
; AVX512F-NEXT: vzeroupper
|
2016-11-07 21:34:29 +08:00
|
|
|
; AVX512F-NEXT: retq
|
|
|
|
;
|
|
|
|
; AVX512VL-LABEL: trunc8i32_8i16:
|
2017-12-05 01:18:51 +08:00
|
|
|
; AVX512VL: # %bb.0: # %entry
|
2016-11-07 21:34:29 +08:00
|
|
|
; AVX512VL-NEXT: vpmovdw %ymm0, %xmm0
|
2017-03-03 17:03:24 +08:00
|
|
|
; AVX512VL-NEXT: vzeroupper
|
2016-11-07 21:34:29 +08:00
|
|
|
; AVX512VL-NEXT: retq
|
|
|
|
;
|
|
|
|
; AVX512BW-LABEL: trunc8i32_8i16:
|
2017-12-05 01:18:51 +08:00
|
|
|
; AVX512BW: # %bb.0: # %entry
|
2018-02-01 06:04:26 +08:00
|
|
|
; AVX512BW-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
|
2016-11-07 21:34:29 +08:00
|
|
|
; AVX512BW-NEXT: vpmovdw %zmm0, %ymm0
|
2018-02-01 06:04:26 +08:00
|
|
|
; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
|
2017-03-03 17:03:24 +08:00
|
|
|
; AVX512BW-NEXT: vzeroupper
|
2016-11-07 21:34:29 +08:00
|
|
|
; AVX512BW-NEXT: retq
|
|
|
|
;
|
|
|
|
; AVX512BWVL-LABEL: trunc8i32_8i16:
|
2017-12-05 01:18:51 +08:00
|
|
|
; AVX512BWVL: # %bb.0: # %entry
|
2016-11-07 21:34:29 +08:00
|
|
|
; AVX512BWVL-NEXT: vpmovdw %ymm0, %xmm0
|
2017-03-03 17:03:24 +08:00
|
|
|
; AVX512BWVL-NEXT: vzeroupper
|
2016-11-07 21:34:29 +08:00
|
|
|
; AVX512BWVL-NEXT: retq
|
2015-08-18 16:37:09 +08:00
|
|
|
entry:
|
|
|
|
%0 = trunc <8 x i32> %a to <8 x i16>
|
|
|
|
ret <8 x i16> %0
|
|
|
|
}
|
|
|
|
|
2017-10-01 00:14:59 +08:00
|
|
|
define <8 x i16> @trunc8i32_8i16_ashr(<8 x i32> %a) {
|
2017-10-05 01:31:28 +08:00
|
|
|
; SSE-LABEL: trunc8i32_8i16_ashr:
|
2017-12-05 01:18:51 +08:00
|
|
|
; SSE: # %bb.0: # %entry
|
2017-10-05 01:31:28 +08:00
|
|
|
; SSE-NEXT: psrad $16, %xmm1
|
|
|
|
; SSE-NEXT: psrad $16, %xmm0
|
|
|
|
; SSE-NEXT: packssdw %xmm1, %xmm0
|
|
|
|
; SSE-NEXT: retq
|
2017-10-01 00:14:59 +08:00
|
|
|
;
|
|
|
|
; AVX1-LABEL: trunc8i32_8i16_ashr:
|
2017-12-05 01:18:51 +08:00
|
|
|
; AVX1: # %bb.0: # %entry
|
2017-10-07 20:42:23 +08:00
|
|
|
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
|
|
|
|
; AVX1-NEXT: vpsrad $16, %xmm1, %xmm1
|
2017-10-01 00:14:59 +08:00
|
|
|
; AVX1-NEXT: vpsrad $16, %xmm0, %xmm0
|
2017-10-07 20:42:23 +08:00
|
|
|
; AVX1-NEXT: vpackssdw %xmm1, %xmm0, %xmm0
|
2017-10-01 00:14:59 +08:00
|
|
|
; AVX1-NEXT: vzeroupper
|
|
|
|
; AVX1-NEXT: retq
|
|
|
|
;
|
|
|
|
; AVX2-LABEL: trunc8i32_8i16_ashr:
|
2017-12-05 01:18:51 +08:00
|
|
|
; AVX2: # %bb.0: # %entry
|
2017-10-01 00:14:59 +08:00
|
|
|
; AVX2-NEXT: vpsrad $16, %ymm0, %ymm0
|
2017-11-01 19:47:44 +08:00
|
|
|
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
|
|
|
|
; AVX2-NEXT: vpackssdw %xmm1, %xmm0, %xmm0
|
2017-10-01 00:14:59 +08:00
|
|
|
; AVX2-NEXT: vzeroupper
|
|
|
|
; AVX2-NEXT: retq
|
|
|
|
;
|
|
|
|
; AVX512F-LABEL: trunc8i32_8i16_ashr:
|
2017-12-05 01:18:51 +08:00
|
|
|
; AVX512F: # %bb.0: # %entry
|
2017-10-01 00:14:59 +08:00
|
|
|
; AVX512F-NEXT: vpsrad $16, %ymm0, %ymm0
|
|
|
|
; AVX512F-NEXT: vpmovdw %zmm0, %ymm0
|
2018-02-01 06:04:26 +08:00
|
|
|
; AVX512F-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
|
2017-10-01 00:14:59 +08:00
|
|
|
; AVX512F-NEXT: vzeroupper
|
|
|
|
; AVX512F-NEXT: retq
|
|
|
|
;
|
|
|
|
; AVX512VL-LABEL: trunc8i32_8i16_ashr:
|
2017-12-05 01:18:51 +08:00
|
|
|
; AVX512VL: # %bb.0: # %entry
|
2017-10-01 00:14:59 +08:00
|
|
|
; AVX512VL-NEXT: vpsrad $16, %ymm0, %ymm0
|
|
|
|
; AVX512VL-NEXT: vpmovdw %ymm0, %xmm0
|
|
|
|
; AVX512VL-NEXT: vzeroupper
|
|
|
|
; AVX512VL-NEXT: retq
|
|
|
|
;
|
|
|
|
; AVX512BW-LABEL: trunc8i32_8i16_ashr:
|
2017-12-05 01:18:51 +08:00
|
|
|
; AVX512BW: # %bb.0: # %entry
|
2017-10-01 00:14:59 +08:00
|
|
|
; AVX512BW-NEXT: vpsrad $16, %ymm0, %ymm0
|
|
|
|
; AVX512BW-NEXT: vpmovdw %zmm0, %ymm0
|
2018-02-01 06:04:26 +08:00
|
|
|
; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
|
2017-10-01 00:14:59 +08:00
|
|
|
; AVX512BW-NEXT: vzeroupper
|
|
|
|
; AVX512BW-NEXT: retq
|
|
|
|
;
|
|
|
|
; AVX512BWVL-LABEL: trunc8i32_8i16_ashr:
|
2017-12-05 01:18:51 +08:00
|
|
|
; AVX512BWVL: # %bb.0: # %entry
|
2017-10-01 00:14:59 +08:00
|
|
|
; AVX512BWVL-NEXT: vpsrad $16, %ymm0, %ymm0
|
|
|
|
; AVX512BWVL-NEXT: vpmovdw %ymm0, %xmm0
|
|
|
|
; AVX512BWVL-NEXT: vzeroupper
|
|
|
|
; AVX512BWVL-NEXT: retq
|
|
|
|
entry:
|
|
|
|
%0 = ashr <8 x i32> %a, <i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16>
|
|
|
|
%1 = trunc <8 x i32> %0 to <8 x i16>
|
|
|
|
ret <8 x i16> %1
|
|
|
|
}
|
|
|
|
|
|
|
|
define <8 x i16> @trunc8i32_8i16_lshr(<8 x i32> %a) {
|
|
|
|
; SSE2-LABEL: trunc8i32_8i16_lshr:
|
2017-12-05 01:18:51 +08:00
|
|
|
; SSE2: # %bb.0: # %entry
|
2017-10-01 00:14:59 +08:00
|
|
|
; SSE2-NEXT: psrad $16, %xmm1
|
|
|
|
; SSE2-NEXT: psrad $16, %xmm0
|
|
|
|
; SSE2-NEXT: packssdw %xmm1, %xmm0
|
|
|
|
; SSE2-NEXT: retq
|
|
|
|
;
|
|
|
|
; SSSE3-LABEL: trunc8i32_8i16_lshr:
|
2017-12-05 01:18:51 +08:00
|
|
|
; SSSE3: # %bb.0: # %entry
|
2017-10-01 00:14:59 +08:00
|
|
|
; SSSE3-NEXT: movdqa {{.*#+}} xmm2 = [2,3,6,7,10,11,14,15,10,11,14,15,14,15,255,255]
|
|
|
|
; SSSE3-NEXT: pshufb %xmm2, %xmm1
|
|
|
|
; SSSE3-NEXT: pshufb %xmm2, %xmm0
|
|
|
|
; SSSE3-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
|
|
|
|
; SSSE3-NEXT: retq
|
|
|
|
;
|
|
|
|
; SSE41-LABEL: trunc8i32_8i16_lshr:
|
2017-12-05 01:18:51 +08:00
|
|
|
; SSE41: # %bb.0: # %entry
|
2017-10-03 20:01:31 +08:00
|
|
|
; SSE41-NEXT: psrld $16, %xmm1
|
2017-10-05 01:31:28 +08:00
|
|
|
; SSE41-NEXT: psrld $16, %xmm0
|
|
|
|
; SSE41-NEXT: packusdw %xmm1, %xmm0
|
2017-10-01 00:14:59 +08:00
|
|
|
; SSE41-NEXT: retq
|
|
|
|
;
|
|
|
|
; AVX1-LABEL: trunc8i32_8i16_lshr:
|
2017-12-05 01:18:51 +08:00
|
|
|
; AVX1: # %bb.0: # %entry
|
2017-10-01 00:14:59 +08:00
|
|
|
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
|
2017-10-03 20:01:31 +08:00
|
|
|
; AVX1-NEXT: vpsrld $16, %xmm1, %xmm1
|
|
|
|
; AVX1-NEXT: vpsrld $16, %xmm0, %xmm0
|
|
|
|
; AVX1-NEXT: vpackusdw %xmm1, %xmm0, %xmm0
|
2017-10-01 00:14:59 +08:00
|
|
|
; AVX1-NEXT: vzeroupper
|
|
|
|
; AVX1-NEXT: retq
|
|
|
|
;
|
|
|
|
; AVX2-LABEL: trunc8i32_8i16_lshr:
|
2017-12-05 01:18:51 +08:00
|
|
|
; AVX2: # %bb.0: # %entry
|
2017-10-03 20:01:31 +08:00
|
|
|
; AVX2-NEXT: vpsrld $16, %ymm0, %ymm0
|
2017-11-02 05:52:29 +08:00
|
|
|
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
|
|
|
|
; AVX2-NEXT: vpackusdw %xmm1, %xmm0, %xmm0
|
2017-10-01 00:14:59 +08:00
|
|
|
; AVX2-NEXT: vzeroupper
|
|
|
|
; AVX2-NEXT: retq
|
|
|
|
;
|
|
|
|
; AVX512F-LABEL: trunc8i32_8i16_lshr:
|
2017-12-05 01:18:51 +08:00
|
|
|
; AVX512F: # %bb.0: # %entry
|
2017-10-01 00:14:59 +08:00
|
|
|
; AVX512F-NEXT: vpsrld $16, %ymm0, %ymm0
|
|
|
|
; AVX512F-NEXT: vpmovdw %zmm0, %ymm0
|
2018-02-01 06:04:26 +08:00
|
|
|
; AVX512F-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
|
2017-10-01 00:14:59 +08:00
|
|
|
; AVX512F-NEXT: vzeroupper
|
|
|
|
; AVX512F-NEXT: retq
|
|
|
|
;
|
|
|
|
; AVX512VL-LABEL: trunc8i32_8i16_lshr:
|
2017-12-05 01:18:51 +08:00
|
|
|
; AVX512VL: # %bb.0: # %entry
|
2017-10-01 00:14:59 +08:00
|
|
|
; AVX512VL-NEXT: vpsrld $16, %ymm0, %ymm0
|
|
|
|
; AVX512VL-NEXT: vpmovdw %ymm0, %xmm0
|
|
|
|
; AVX512VL-NEXT: vzeroupper
|
|
|
|
; AVX512VL-NEXT: retq
|
|
|
|
;
|
|
|
|
; AVX512BW-LABEL: trunc8i32_8i16_lshr:
|
2017-12-05 01:18:51 +08:00
|
|
|
; AVX512BW: # %bb.0: # %entry
|
2017-10-01 00:14:59 +08:00
|
|
|
; AVX512BW-NEXT: vpsrld $16, %ymm0, %ymm0
|
|
|
|
; AVX512BW-NEXT: vpmovdw %zmm0, %ymm0
|
2018-02-01 06:04:26 +08:00
|
|
|
; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
|
2017-10-01 00:14:59 +08:00
|
|
|
; AVX512BW-NEXT: vzeroupper
|
|
|
|
; AVX512BW-NEXT: retq
|
|
|
|
;
|
|
|
|
; AVX512BWVL-LABEL: trunc8i32_8i16_lshr:
|
2017-12-05 01:18:51 +08:00
|
|
|
; AVX512BWVL: # %bb.0: # %entry
|
2017-10-01 00:14:59 +08:00
|
|
|
; AVX512BWVL-NEXT: vpsrld $16, %ymm0, %ymm0
|
|
|
|
; AVX512BWVL-NEXT: vpmovdw %ymm0, %xmm0
|
|
|
|
; AVX512BWVL-NEXT: vzeroupper
|
|
|
|
; AVX512BWVL-NEXT: retq
|
|
|
|
entry:
|
|
|
|
%0 = lshr <8 x i32> %a, <i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16>
|
|
|
|
%1 = trunc <8 x i32> %0 to <8 x i16>
|
|
|
|
ret <8 x i16> %1
|
|
|
|
}
|
|
|
|
|
2015-11-14 23:23:59 +08:00
|
|
|
define void @trunc8i32_8i8(<8 x i32> %a) {
|
|
|
|
; SSE2-LABEL: trunc8i32_8i8:
|
2017-12-05 01:18:51 +08:00
|
|
|
; SSE2: # %bb.0: # %entry
|
2015-12-22 04:42:43 +08:00
|
|
|
; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [255,0,0,0,255,0,0,0,255,0,0,0,255,0,0,0]
|
2015-11-14 23:23:59 +08:00
|
|
|
; SSE2-NEXT: pand %xmm2, %xmm1
|
|
|
|
; SSE2-NEXT: pand %xmm2, %xmm0
|
|
|
|
; SSE2-NEXT: packuswb %xmm1, %xmm0
|
|
|
|
; SSE2-NEXT: packuswb %xmm0, %xmm0
|
|
|
|
; SSE2-NEXT: movq %xmm0, (%rax)
|
|
|
|
; SSE2-NEXT: retq
|
|
|
|
;
|
|
|
|
; SSSE3-LABEL: trunc8i32_8i8:
|
2017-12-05 01:18:51 +08:00
|
|
|
; SSSE3: # %bb.0: # %entry
|
2015-11-14 23:23:59 +08:00
|
|
|
; SSSE3-NEXT: movdqa {{.*#+}} xmm2 = <0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u>
|
|
|
|
; SSSE3-NEXT: pshufb %xmm2, %xmm1
|
|
|
|
; SSSE3-NEXT: pshufb %xmm2, %xmm0
|
|
|
|
; SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
|
|
|
|
; SSSE3-NEXT: movq %xmm0, (%rax)
|
|
|
|
; SSSE3-NEXT: retq
|
|
|
|
;
|
|
|
|
; SSE41-LABEL: trunc8i32_8i8:
|
2017-12-05 01:18:51 +08:00
|
|
|
; SSE41: # %bb.0: # %entry
|
2015-11-14 23:23:59 +08:00
|
|
|
; SSE41-NEXT: movdqa {{.*#+}} xmm2 = <0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u>
|
|
|
|
; SSE41-NEXT: pshufb %xmm2, %xmm1
|
|
|
|
; SSE41-NEXT: pshufb %xmm2, %xmm0
|
|
|
|
; SSE41-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
|
|
|
|
; SSE41-NEXT: movq %xmm0, (%rax)
|
|
|
|
; SSE41-NEXT: retq
|
|
|
|
;
|
|
|
|
; AVX1-LABEL: trunc8i32_8i8:
|
2017-12-05 01:18:51 +08:00
|
|
|
; AVX1: # %bb.0: # %entry
|
2015-11-14 23:23:59 +08:00
|
|
|
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
|
|
|
|
; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = <0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u>
|
|
|
|
; AVX1-NEXT: vpshufb %xmm2, %xmm1, %xmm1
|
|
|
|
; AVX1-NEXT: vpshufb %xmm2, %xmm0, %xmm0
|
|
|
|
; AVX1-NEXT: vpunpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
|
|
|
|
; AVX1-NEXT: vmovq %xmm0, (%rax)
|
|
|
|
; AVX1-NEXT: vzeroupper
|
|
|
|
; AVX1-NEXT: retq
|
|
|
|
;
|
|
|
|
; AVX2-LABEL: trunc8i32_8i8:
|
2017-12-05 01:18:51 +08:00
|
|
|
; AVX2: # %bb.0: # %entry
|
2017-02-06 02:33:14 +08:00
|
|
|
; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
|
2015-11-14 23:23:59 +08:00
|
|
|
; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
|
|
|
|
; AVX2-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u]
|
|
|
|
; AVX2-NEXT: vmovq %xmm0, (%rax)
|
|
|
|
; AVX2-NEXT: vzeroupper
|
|
|
|
; AVX2-NEXT: retq
|
2015-12-22 04:42:43 +08:00
|
|
|
;
|
2016-11-07 21:34:29 +08:00
|
|
|
; AVX512F-LABEL: trunc8i32_8i8:
|
2017-12-05 01:18:51 +08:00
|
|
|
; AVX512F: # %bb.0: # %entry
|
2018-02-01 06:04:26 +08:00
|
|
|
; AVX512F-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
|
2016-11-07 21:34:29 +08:00
|
|
|
; AVX512F-NEXT: vpmovdw %zmm0, %ymm0
|
|
|
|
; AVX512F-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u]
|
|
|
|
; AVX512F-NEXT: vmovq %xmm0, (%rax)
|
2017-03-03 17:03:24 +08:00
|
|
|
; AVX512F-NEXT: vzeroupper
|
2016-11-07 21:34:29 +08:00
|
|
|
; AVX512F-NEXT: retq
|
|
|
|
;
|
|
|
|
; AVX512VL-LABEL: trunc8i32_8i8:
|
2017-12-05 01:18:51 +08:00
|
|
|
; AVX512VL: # %bb.0: # %entry
|
2016-11-07 21:34:29 +08:00
|
|
|
; AVX512VL-NEXT: vpmovdb %ymm0, (%rax)
|
2017-03-03 17:03:24 +08:00
|
|
|
; AVX512VL-NEXT: vzeroupper
|
2016-11-07 21:34:29 +08:00
|
|
|
; AVX512VL-NEXT: retq
|
|
|
|
;
|
|
|
|
; AVX512BW-LABEL: trunc8i32_8i8:
|
2017-12-05 01:18:51 +08:00
|
|
|
; AVX512BW: # %bb.0: # %entry
|
2018-02-01 06:04:26 +08:00
|
|
|
; AVX512BW-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
|
2016-11-07 21:34:29 +08:00
|
|
|
; AVX512BW-NEXT: vpmovdw %zmm0, %ymm0
|
|
|
|
; AVX512BW-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u]
|
|
|
|
; AVX512BW-NEXT: vmovq %xmm0, (%rax)
|
2017-03-03 17:03:24 +08:00
|
|
|
; AVX512BW-NEXT: vzeroupper
|
2016-11-07 21:34:29 +08:00
|
|
|
; AVX512BW-NEXT: retq
|
|
|
|
;
|
|
|
|
; AVX512BWVL-LABEL: trunc8i32_8i8:
|
2017-12-05 01:18:51 +08:00
|
|
|
; AVX512BWVL: # %bb.0: # %entry
|
2016-11-07 21:34:29 +08:00
|
|
|
; AVX512BWVL-NEXT: vpmovdb %ymm0, (%rax)
|
2017-03-03 17:03:24 +08:00
|
|
|
; AVX512BWVL-NEXT: vzeroupper
|
2016-11-07 21:34:29 +08:00
|
|
|
; AVX512BWVL-NEXT: retq
|
2015-11-14 23:23:59 +08:00
|
|
|
entry:
|
|
|
|
%0 = trunc <8 x i32> %a to <8 x i8>
|
|
|
|
store <8 x i8> %0, <8 x i8>* undef, align 4
|
|
|
|
ret void
|
|
|
|
}
|
|
|
|
|
2016-09-14 01:34:56 +08:00
|
|
|
define void @trunc16i32_16i16(<16 x i32> %a) {
|
|
|
|
; SSE2-LABEL: trunc16i32_16i16:
|
2017-12-05 01:18:51 +08:00
|
|
|
; SSE2: # %bb.0: # %entry
|
2016-09-14 01:34:56 +08:00
|
|
|
; SSE2-NEXT: pslld $16, %xmm1
|
|
|
|
; SSE2-NEXT: psrad $16, %xmm1
|
|
|
|
; SSE2-NEXT: pslld $16, %xmm0
|
|
|
|
; SSE2-NEXT: psrad $16, %xmm0
|
|
|
|
; SSE2-NEXT: packssdw %xmm1, %xmm0
|
|
|
|
; SSE2-NEXT: pslld $16, %xmm3
|
|
|
|
; SSE2-NEXT: psrad $16, %xmm3
|
|
|
|
; SSE2-NEXT: pslld $16, %xmm2
|
|
|
|
; SSE2-NEXT: psrad $16, %xmm2
|
|
|
|
; SSE2-NEXT: packssdw %xmm3, %xmm2
|
|
|
|
; SSE2-NEXT: movdqu %xmm2, (%rax)
|
|
|
|
; SSE2-NEXT: movdqu %xmm0, (%rax)
|
|
|
|
; SSE2-NEXT: retq
|
|
|
|
;
|
|
|
|
; SSSE3-LABEL: trunc16i32_16i16:
|
2017-12-05 01:18:51 +08:00
|
|
|
; SSSE3: # %bb.0: # %entry
|
2016-09-14 01:34:56 +08:00
|
|
|
; SSSE3-NEXT: pslld $16, %xmm1
|
|
|
|
; SSSE3-NEXT: psrad $16, %xmm1
|
|
|
|
; SSSE3-NEXT: pslld $16, %xmm0
|
|
|
|
; SSSE3-NEXT: psrad $16, %xmm0
|
|
|
|
; SSSE3-NEXT: packssdw %xmm1, %xmm0
|
|
|
|
; SSSE3-NEXT: pslld $16, %xmm3
|
|
|
|
; SSSE3-NEXT: psrad $16, %xmm3
|
|
|
|
; SSSE3-NEXT: pslld $16, %xmm2
|
|
|
|
; SSSE3-NEXT: psrad $16, %xmm2
|
|
|
|
; SSSE3-NEXT: packssdw %xmm3, %xmm2
|
|
|
|
; SSSE3-NEXT: movdqu %xmm2, (%rax)
|
|
|
|
; SSSE3-NEXT: movdqu %xmm0, (%rax)
|
|
|
|
; SSSE3-NEXT: retq
|
|
|
|
;
|
|
|
|
; SSE41-LABEL: trunc16i32_16i16:
|
2017-12-05 01:18:51 +08:00
|
|
|
; SSE41: # %bb.0: # %entry
|
2016-09-14 01:34:56 +08:00
|
|
|
; SSE41-NEXT: pxor %xmm4, %xmm4
|
|
|
|
; SSE41-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0],xmm4[1],xmm1[2],xmm4[3],xmm1[4],xmm4[5],xmm1[6],xmm4[7]
|
|
|
|
; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0],xmm4[1],xmm0[2],xmm4[3],xmm0[4],xmm4[5],xmm0[6],xmm4[7]
|
|
|
|
; SSE41-NEXT: packusdw %xmm1, %xmm0
|
|
|
|
; SSE41-NEXT: pblendw {{.*#+}} xmm3 = xmm3[0],xmm4[1],xmm3[2],xmm4[3],xmm3[4],xmm4[5],xmm3[6],xmm4[7]
|
|
|
|
; SSE41-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0],xmm4[1],xmm2[2],xmm4[3],xmm2[4],xmm4[5],xmm2[6],xmm4[7]
|
|
|
|
; SSE41-NEXT: packusdw %xmm3, %xmm2
|
|
|
|
; SSE41-NEXT: movdqu %xmm2, (%rax)
|
|
|
|
; SSE41-NEXT: movdqu %xmm0, (%rax)
|
|
|
|
; SSE41-NEXT: retq
|
|
|
|
;
|
|
|
|
; AVX1-LABEL: trunc16i32_16i16:
|
2017-12-05 01:18:51 +08:00
|
|
|
; AVX1: # %bb.0: # %entry
|
2019-04-19 01:23:09 +08:00
|
|
|
; AVX1-NEXT: vmovaps {{.*#+}} ymm2 = [65535,65535,65535,65535,65535,65535,65535,65535]
|
2019-05-28 21:54:17 +08:00
|
|
|
; AVX1-NEXT: vandps %ymm2, %ymm1, %ymm1
|
2019-05-29 01:37:58 +08:00
|
|
|
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3
|
|
|
|
; AVX1-NEXT: vpackusdw %xmm3, %xmm1, %xmm1
|
|
|
|
; AVX1-NEXT: vandps %ymm2, %ymm0, %ymm0
|
|
|
|
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
|
|
|
|
; AVX1-NEXT: vpackusdw %xmm2, %xmm0, %xmm0
|
|
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
|
|
|
|
; AVX1-NEXT: vmovups %ymm0, (%rax)
|
2016-09-14 01:34:56 +08:00
|
|
|
; AVX1-NEXT: vzeroupper
|
|
|
|
; AVX1-NEXT: retq
|
|
|
|
;
|
|
|
|
; AVX2-LABEL: trunc16i32_16i16:
|
2017-12-05 01:18:51 +08:00
|
|
|
; AVX2: # %bb.0: # %entry
|
2017-02-06 02:33:14 +08:00
|
|
|
; AVX2-NEXT: vmovdqa {{.*#+}} ymm2 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
|
2016-09-14 01:34:56 +08:00
|
|
|
; AVX2-NEXT: vpshufb %ymm2, %ymm0, %ymm0
|
|
|
|
; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
|
|
|
|
; AVX2-NEXT: vpshufb %ymm2, %ymm1, %ymm1
|
|
|
|
; AVX2-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,2,2,3]
|
2019-05-29 01:37:58 +08:00
|
|
|
; AVX2-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
|
|
|
|
; AVX2-NEXT: vmovdqu %ymm0, (%rax)
|
2016-09-14 01:34:56 +08:00
|
|
|
; AVX2-NEXT: vzeroupper
|
|
|
|
; AVX2-NEXT: retq
|
|
|
|
;
|
|
|
|
; AVX512-LABEL: trunc16i32_16i16:
|
2017-12-05 01:18:51 +08:00
|
|
|
; AVX512: # %bb.0: # %entry
|
2016-09-14 01:34:56 +08:00
|
|
|
; AVX512-NEXT: vpmovdw %zmm0, (%rax)
|
2017-03-03 17:03:24 +08:00
|
|
|
; AVX512-NEXT: vzeroupper
|
2016-09-14 01:34:56 +08:00
|
|
|
; AVX512-NEXT: retq
|
|
|
|
entry:
|
|
|
|
%0 = trunc <16 x i32> %a to <16 x i16>
|
|
|
|
store <16 x i16> %0, <16 x i16>* undef, align 4
|
|
|
|
ret void
|
|
|
|
}
|
|
|
|
|
2017-10-01 00:14:59 +08:00
|
|
|
define void @trunc16i32_16i16_ashr(<16 x i32> %a) {
|
2017-11-01 19:47:44 +08:00
|
|
|
; SSE-LABEL: trunc16i32_16i16_ashr:
|
2017-12-05 01:18:51 +08:00
|
|
|
; SSE: # %bb.0: # %entry
|
2017-11-01 19:47:44 +08:00
|
|
|
; SSE-NEXT: psrad $16, %xmm3
|
|
|
|
; SSE-NEXT: psrad $16, %xmm2
|
|
|
|
; SSE-NEXT: packssdw %xmm3, %xmm2
|
|
|
|
; SSE-NEXT: psrad $16, %xmm1
|
|
|
|
; SSE-NEXT: psrad $16, %xmm0
|
|
|
|
; SSE-NEXT: packssdw %xmm1, %xmm0
|
|
|
|
; SSE-NEXT: movdqu %xmm2, (%rax)
|
|
|
|
; SSE-NEXT: movdqu %xmm0, (%rax)
|
|
|
|
; SSE-NEXT: retq
|
2017-10-01 00:14:59 +08:00
|
|
|
;
|
|
|
|
; AVX1-LABEL: trunc16i32_16i16_ashr:
|
2017-12-05 01:18:51 +08:00
|
|
|
; AVX1: # %bb.0: # %entry
|
2019-05-28 21:54:17 +08:00
|
|
|
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
|
|
|
|
; AVX1-NEXT: vpsrad $16, %xmm2, %xmm2
|
|
|
|
; AVX1-NEXT: vpsrad $16, %xmm0, %xmm0
|
|
|
|
; AVX1-NEXT: vpackssdw %xmm2, %xmm0, %xmm0
|
2019-05-29 01:37:58 +08:00
|
|
|
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
|
|
|
|
; AVX1-NEXT: vpsrad $16, %xmm2, %xmm2
|
|
|
|
; AVX1-NEXT: vpsrad $16, %xmm1, %xmm1
|
|
|
|
; AVX1-NEXT: vpackssdw %xmm2, %xmm1, %xmm1
|
|
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
|
|
|
|
; AVX1-NEXT: vmovups %ymm0, (%rax)
|
2015-11-14 23:23:59 +08:00
|
|
|
; AVX1-NEXT: vzeroupper
|
|
|
|
; AVX1-NEXT: retq
|
|
|
|
;
|
2017-10-01 00:14:59 +08:00
|
|
|
; AVX2-LABEL: trunc16i32_16i16_ashr:
|
2017-12-05 01:18:51 +08:00
|
|
|
; AVX2: # %bb.0: # %entry
|
2017-10-01 00:14:59 +08:00
|
|
|
; AVX2-NEXT: vpsrad $16, %ymm1, %ymm1
|
|
|
|
; AVX2-NEXT: vpsrad $16, %ymm0, %ymm0
|
2017-11-01 19:47:44 +08:00
|
|
|
; AVX2-NEXT: vpackssdw %ymm1, %ymm0, %ymm0
|
|
|
|
; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,1,3]
|
2017-10-01 00:14:59 +08:00
|
|
|
; AVX2-NEXT: vmovdqu %ymm0, (%rax)
|
|
|
|
; AVX2-NEXT: vzeroupper
|
|
|
|
; AVX2-NEXT: retq
|
|
|
|
;
|
|
|
|
; AVX512-LABEL: trunc16i32_16i16_ashr:
|
2017-12-05 01:18:51 +08:00
|
|
|
; AVX512: # %bb.0: # %entry
|
2017-10-01 00:14:59 +08:00
|
|
|
; AVX512-NEXT: vpsrld $16, %zmm0, %zmm0
|
|
|
|
; AVX512-NEXT: vpmovdw %zmm0, (%rax)
|
|
|
|
; AVX512-NEXT: vzeroupper
|
|
|
|
; AVX512-NEXT: retq
|
|
|
|
entry:
|
|
|
|
%0 = ashr <16 x i32> %a, <i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16>
|
|
|
|
%1 = trunc <16 x i32> %0 to <16 x i16>
|
|
|
|
store <16 x i16> %1, <16 x i16>* undef, align 4
|
|
|
|
ret void
|
|
|
|
}
|
|
|
|
|
|
|
|
define void @trunc16i32_16i16_lshr(<16 x i32> %a) {
|
|
|
|
; SSE2-LABEL: trunc16i32_16i16_lshr:
|
2017-12-05 01:18:51 +08:00
|
|
|
; SSE2: # %bb.0: # %entry
|
2017-10-01 00:14:59 +08:00
|
|
|
; SSE2-NEXT: psrad $16, %xmm1
|
|
|
|
; SSE2-NEXT: psrad $16, %xmm0
|
|
|
|
; SSE2-NEXT: packssdw %xmm1, %xmm0
|
|
|
|
; SSE2-NEXT: psrad $16, %xmm3
|
|
|
|
; SSE2-NEXT: psrad $16, %xmm2
|
|
|
|
; SSE2-NEXT: packssdw %xmm3, %xmm2
|
|
|
|
; SSE2-NEXT: movdqu %xmm2, (%rax)
|
|
|
|
; SSE2-NEXT: movdqu %xmm0, (%rax)
|
|
|
|
; SSE2-NEXT: retq
|
|
|
|
;
|
|
|
|
; SSSE3-LABEL: trunc16i32_16i16_lshr:
|
2017-12-05 01:18:51 +08:00
|
|
|
; SSSE3: # %bb.0: # %entry
|
2017-10-01 00:14:59 +08:00
|
|
|
; SSSE3-NEXT: psrad $16, %xmm1
|
|
|
|
; SSSE3-NEXT: psrad $16, %xmm0
|
|
|
|
; SSSE3-NEXT: packssdw %xmm1, %xmm0
|
|
|
|
; SSSE3-NEXT: psrad $16, %xmm3
|
|
|
|
; SSSE3-NEXT: psrad $16, %xmm2
|
|
|
|
; SSSE3-NEXT: packssdw %xmm3, %xmm2
|
|
|
|
; SSSE3-NEXT: movdqu %xmm2, (%rax)
|
|
|
|
; SSSE3-NEXT: movdqu %xmm0, (%rax)
|
|
|
|
; SSSE3-NEXT: retq
|
|
|
|
;
|
|
|
|
; SSE41-LABEL: trunc16i32_16i16_lshr:
|
2017-12-05 01:18:51 +08:00
|
|
|
; SSE41: # %bb.0: # %entry
|
2017-10-01 00:14:59 +08:00
|
|
|
; SSE41-NEXT: psrld $16, %xmm3
|
|
|
|
; SSE41-NEXT: psrld $16, %xmm2
|
|
|
|
; SSE41-NEXT: packusdw %xmm3, %xmm2
|
|
|
|
; SSE41-NEXT: psrld $16, %xmm1
|
|
|
|
; SSE41-NEXT: psrld $16, %xmm0
|
|
|
|
; SSE41-NEXT: packusdw %xmm1, %xmm0
|
|
|
|
; SSE41-NEXT: movdqu %xmm2, (%rax)
|
|
|
|
; SSE41-NEXT: movdqu %xmm0, (%rax)
|
|
|
|
; SSE41-NEXT: retq
|
|
|
|
;
|
|
|
|
; AVX1-LABEL: trunc16i32_16i16_lshr:
|
2017-12-05 01:18:51 +08:00
|
|
|
; AVX1: # %bb.0: # %entry
|
2019-05-28 21:54:17 +08:00
|
|
|
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
|
|
|
|
; AVX1-NEXT: vpsrld $16, %xmm2, %xmm2
|
|
|
|
; AVX1-NEXT: vpsrld $16, %xmm0, %xmm0
|
|
|
|
; AVX1-NEXT: vpackusdw %xmm2, %xmm0, %xmm0
|
2019-05-29 01:37:58 +08:00
|
|
|
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
|
|
|
|
; AVX1-NEXT: vpsrld $16, %xmm2, %xmm2
|
|
|
|
; AVX1-NEXT: vpsrld $16, %xmm1, %xmm1
|
|
|
|
; AVX1-NEXT: vpackusdw %xmm2, %xmm1, %xmm1
|
|
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
|
|
|
|
; AVX1-NEXT: vmovups %ymm0, (%rax)
|
2017-10-01 00:14:59 +08:00
|
|
|
; AVX1-NEXT: vzeroupper
|
|
|
|
; AVX1-NEXT: retq
|
|
|
|
;
|
|
|
|
; AVX2-LABEL: trunc16i32_16i16_lshr:
|
2017-12-05 01:18:51 +08:00
|
|
|
; AVX2: # %bb.0: # %entry
|
2017-11-02 05:52:29 +08:00
|
|
|
; AVX2-NEXT: vpsrld $16, %ymm1, %ymm1
|
2017-11-03 19:33:48 +08:00
|
|
|
; AVX2-NEXT: vpsrld $16, %ymm0, %ymm0
|
|
|
|
; AVX2-NEXT: vpackusdw %ymm1, %ymm0, %ymm0
|
|
|
|
; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,1,3]
|
2017-10-01 00:14:59 +08:00
|
|
|
; AVX2-NEXT: vmovdqu %ymm0, (%rax)
|
|
|
|
; AVX2-NEXT: vzeroupper
|
|
|
|
; AVX2-NEXT: retq
|
|
|
|
;
|
|
|
|
; AVX512-LABEL: trunc16i32_16i16_lshr:
|
2017-12-05 01:18:51 +08:00
|
|
|
; AVX512: # %bb.0: # %entry
|
2017-10-01 00:14:59 +08:00
|
|
|
; AVX512-NEXT: vpsrld $16, %zmm0, %zmm0
|
|
|
|
; AVX512-NEXT: vpmovdw %zmm0, (%rax)
|
|
|
|
; AVX512-NEXT: vzeroupper
|
|
|
|
; AVX512-NEXT: retq
|
|
|
|
entry:
|
|
|
|
%0 = lshr <16 x i32> %a, <i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16>
|
|
|
|
%1 = trunc <16 x i32> %0 to <16 x i16>
|
|
|
|
store <16 x i16> %1, <16 x i16>* undef, align 4
|
|
|
|
ret void
|
|
|
|
}
|
|
|
|
|
|
|
|
define void @trunc16i32_16i8(<16 x i32> %a) {
|
2018-06-08 21:59:11 +08:00
|
|
|
; SSE2-LABEL: trunc16i32_16i8:
|
|
|
|
; SSE2: # %bb.0: # %entry
|
|
|
|
; SSE2-NEXT: movdqa {{.*#+}} xmm4 = [255,0,0,0,255,0,0,0,255,0,0,0,255,0,0,0]
|
|
|
|
; SSE2-NEXT: pand %xmm4, %xmm3
|
|
|
|
; SSE2-NEXT: pand %xmm4, %xmm2
|
|
|
|
; SSE2-NEXT: packuswb %xmm3, %xmm2
|
|
|
|
; SSE2-NEXT: pand %xmm4, %xmm1
|
|
|
|
; SSE2-NEXT: pand %xmm4, %xmm0
|
|
|
|
; SSE2-NEXT: packuswb %xmm1, %xmm0
|
|
|
|
; SSE2-NEXT: packuswb %xmm2, %xmm0
|
|
|
|
; SSE2-NEXT: movdqu %xmm0, (%rax)
|
|
|
|
; SSE2-NEXT: retq
|
|
|
|
;
|
|
|
|
; SSSE3-LABEL: trunc16i32_16i8:
|
|
|
|
; SSSE3: # %bb.0: # %entry
|
|
|
|
; SSSE3-NEXT: movdqa {{.*#+}} xmm4 = [255,0,0,0,255,0,0,0,255,0,0,0,255,0,0,0]
|
|
|
|
; SSSE3-NEXT: pand %xmm4, %xmm3
|
|
|
|
; SSSE3-NEXT: pand %xmm4, %xmm2
|
|
|
|
; SSSE3-NEXT: packuswb %xmm3, %xmm2
|
|
|
|
; SSSE3-NEXT: pand %xmm4, %xmm1
|
|
|
|
; SSSE3-NEXT: pand %xmm4, %xmm0
|
|
|
|
; SSSE3-NEXT: packuswb %xmm1, %xmm0
|
|
|
|
; SSSE3-NEXT: packuswb %xmm2, %xmm0
|
|
|
|
; SSSE3-NEXT: movdqu %xmm0, (%rax)
|
|
|
|
; SSSE3-NEXT: retq
|
|
|
|
;
|
|
|
|
; SSE41-LABEL: trunc16i32_16i8:
|
|
|
|
; SSE41: # %bb.0: # %entry
|
|
|
|
; SSE41-NEXT: movdqa {{.*#+}} xmm4 = [255,0,0,0,255,0,0,0,255,0,0,0,255,0,0,0]
|
|
|
|
; SSE41-NEXT: pand %xmm4, %xmm3
|
|
|
|
; SSE41-NEXT: pand %xmm4, %xmm2
|
|
|
|
; SSE41-NEXT: packusdw %xmm3, %xmm2
|
|
|
|
; SSE41-NEXT: pand %xmm4, %xmm1
|
|
|
|
; SSE41-NEXT: pand %xmm4, %xmm0
|
|
|
|
; SSE41-NEXT: packusdw %xmm1, %xmm0
|
|
|
|
; SSE41-NEXT: packuswb %xmm2, %xmm0
|
|
|
|
; SSE41-NEXT: movdqu %xmm0, (%rax)
|
|
|
|
; SSE41-NEXT: retq
|
2017-10-01 00:14:59 +08:00
|
|
|
;
|
|
|
|
; AVX1-LABEL: trunc16i32_16i8:
|
2017-12-05 01:18:51 +08:00
|
|
|
; AVX1: # %bb.0: # %entry
|
2019-04-19 01:23:09 +08:00
|
|
|
; AVX1-NEXT: vmovaps {{.*#+}} ymm2 = [255,255,255,255,255,255,255,255]
|
|
|
|
; AVX1-NEXT: vandps %ymm2, %ymm1, %ymm1
|
|
|
|
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3
|
|
|
|
; AVX1-NEXT: vpackusdw %xmm3, %xmm1, %xmm1
|
|
|
|
; AVX1-NEXT: vandps %ymm2, %ymm0, %ymm0
|
2017-10-01 00:14:59 +08:00
|
|
|
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
|
2018-06-08 21:59:11 +08:00
|
|
|
; AVX1-NEXT: vpackusdw %xmm2, %xmm0, %xmm0
|
2017-10-01 00:14:59 +08:00
|
|
|
; AVX1-NEXT: vpackuswb %xmm1, %xmm0, %xmm0
|
|
|
|
; AVX1-NEXT: vmovdqu %xmm0, (%rax)
|
|
|
|
; AVX1-NEXT: vzeroupper
|
|
|
|
; AVX1-NEXT: retq
|
|
|
|
;
|
|
|
|
; AVX2-LABEL: trunc16i32_16i8:
|
2017-12-05 01:18:51 +08:00
|
|
|
; AVX2: # %bb.0: # %entry
|
2017-10-01 00:14:59 +08:00
|
|
|
; AVX2-NEXT: vmovdqa {{.*#+}} ymm2 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
|
|
|
|
; AVX2-NEXT: vpshufb %ymm2, %ymm1, %ymm1
|
|
|
|
; AVX2-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,2,2,3]
|
2018-11-19 01:59:28 +08:00
|
|
|
; AVX2-NEXT: vmovdqa {{.*#+}} xmm3 = [255,255,255,255,255,255,255,255]
|
|
|
|
; AVX2-NEXT: vpand %xmm3, %xmm1, %xmm1
|
2015-11-14 23:23:59 +08:00
|
|
|
; AVX2-NEXT: vpshufb %ymm2, %ymm0, %ymm0
|
|
|
|
; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
|
2018-11-19 01:59:28 +08:00
|
|
|
; AVX2-NEXT: vpand %xmm3, %xmm0, %xmm0
|
|
|
|
; AVX2-NEXT: vpackuswb %xmm1, %xmm0, %xmm0
|
2015-11-14 23:23:59 +08:00
|
|
|
; AVX2-NEXT: vmovdqu %xmm0, (%rax)
|
|
|
|
; AVX2-NEXT: vzeroupper
|
|
|
|
; AVX2-NEXT: retq
|
2015-12-22 04:42:43 +08:00
|
|
|
;
|
2016-09-14 01:34:56 +08:00
|
|
|
; AVX512-LABEL: trunc16i32_16i8:
|
2017-12-05 01:18:51 +08:00
|
|
|
; AVX512: # %bb.0: # %entry
|
2016-09-14 01:34:56 +08:00
|
|
|
; AVX512-NEXT: vpmovdb %zmm0, (%rax)
|
2017-03-03 17:03:24 +08:00
|
|
|
; AVX512-NEXT: vzeroupper
|
2016-09-14 01:34:56 +08:00
|
|
|
; AVX512-NEXT: retq
|
2015-11-14 23:23:59 +08:00
|
|
|
entry:
|
|
|
|
%0 = trunc <16 x i32> %a to <16 x i8>
|
|
|
|
store <16 x i8> %0, <16 x i8>* undef, align 4
|
|
|
|
ret void
|
|
|
|
}
|
|
|
|
|
2017-10-01 00:14:59 +08:00
|
|
|
define void @trunc16i32_16i8_ashr(<16 x i32> %a) {
|
|
|
|
; SSE-LABEL: trunc16i32_16i8_ashr:
|
2017-12-05 01:18:51 +08:00
|
|
|
; SSE: # %bb.0: # %entry
|
2017-10-01 00:14:59 +08:00
|
|
|
; SSE-NEXT: psrad $24, %xmm1
|
2017-11-01 19:47:44 +08:00
|
|
|
; SSE-NEXT: psrad $24, %xmm0
|
|
|
|
; SSE-NEXT: packssdw %xmm1, %xmm0
|
2017-10-01 00:14:59 +08:00
|
|
|
; SSE-NEXT: psrad $24, %xmm3
|
2017-11-01 19:47:44 +08:00
|
|
|
; SSE-NEXT: psrad $24, %xmm2
|
|
|
|
; SSE-NEXT: packssdw %xmm3, %xmm2
|
|
|
|
; SSE-NEXT: packsswb %xmm2, %xmm0
|
2017-10-01 00:14:59 +08:00
|
|
|
; SSE-NEXT: movdqu %xmm0, (%rax)
|
|
|
|
; SSE-NEXT: retq
|
|
|
|
;
|
|
|
|
; AVX1-LABEL: trunc16i32_16i8_ashr:
|
2017-12-05 01:18:51 +08:00
|
|
|
; AVX1: # %bb.0: # %entry
|
2017-11-01 19:47:44 +08:00
|
|
|
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
|
|
|
|
; AVX1-NEXT: vpsrad $24, %xmm2, %xmm2
|
2017-10-01 00:14:59 +08:00
|
|
|
; AVX1-NEXT: vpsrad $24, %xmm0, %xmm0
|
2017-11-01 19:47:44 +08:00
|
|
|
; AVX1-NEXT: vpackssdw %xmm2, %xmm0, %xmm0
|
|
|
|
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
|
|
|
|
; AVX1-NEXT: vpsrad $24, %xmm2, %xmm2
|
2017-10-01 00:14:59 +08:00
|
|
|
; AVX1-NEXT: vpsrad $24, %xmm1, %xmm1
|
2017-11-01 19:47:44 +08:00
|
|
|
; AVX1-NEXT: vpackssdw %xmm2, %xmm1, %xmm1
|
|
|
|
; AVX1-NEXT: vpacksswb %xmm1, %xmm0, %xmm0
|
2017-10-01 00:14:59 +08:00
|
|
|
; AVX1-NEXT: vmovdqu %xmm0, (%rax)
|
|
|
|
; AVX1-NEXT: vzeroupper
|
|
|
|
; AVX1-NEXT: retq
|
|
|
|
;
|
|
|
|
; AVX2-LABEL: trunc16i32_16i8_ashr:
|
2017-12-05 01:18:51 +08:00
|
|
|
; AVX2: # %bb.0: # %entry
|
2017-10-01 00:14:59 +08:00
|
|
|
; AVX2-NEXT: vpsrad $24, %ymm1, %ymm1
|
2017-11-01 19:47:44 +08:00
|
|
|
; AVX2-NEXT: vpsrad $24, %ymm0, %ymm0
|
2018-02-15 02:23:58 +08:00
|
|
|
; AVX2-NEXT: vpackssdw %ymm1, %ymm0, %ymm0
|
2017-11-01 19:47:44 +08:00
|
|
|
; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,1,3]
|
|
|
|
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
|
|
|
|
; AVX2-NEXT: vpacksswb %xmm1, %xmm0, %xmm0
|
2017-10-01 00:14:59 +08:00
|
|
|
; AVX2-NEXT: vmovdqu %xmm0, (%rax)
|
|
|
|
; AVX2-NEXT: vzeroupper
|
|
|
|
; AVX2-NEXT: retq
|
|
|
|
;
|
|
|
|
; AVX512-LABEL: trunc16i32_16i8_ashr:
|
2017-12-05 01:18:51 +08:00
|
|
|
; AVX512: # %bb.0: # %entry
|
2017-10-01 00:14:59 +08:00
|
|
|
; AVX512-NEXT: vpsrld $24, %zmm0, %zmm0
|
|
|
|
; AVX512-NEXT: vpmovdb %zmm0, (%rax)
|
|
|
|
; AVX512-NEXT: vzeroupper
|
|
|
|
; AVX512-NEXT: retq
|
|
|
|
entry:
|
|
|
|
%0 = ashr <16 x i32> %a, <i32 24, i32 24, i32 24, i32 24, i32 24, i32 24, i32 24, i32 24, i32 24, i32 24, i32 24, i32 24, i32 24, i32 24, i32 24, i32 24>
|
|
|
|
%1 = trunc <16 x i32> %0 to <16 x i8>
|
|
|
|
store <16 x i8> %1, <16 x i8>* undef, align 4
|
|
|
|
ret void
|
|
|
|
}
|
|
|
|
|
|
|
|
define void @trunc16i32_16i8_lshr(<16 x i32> %a) {
|
2017-11-03 19:33:48 +08:00
|
|
|
; SSE2-LABEL: trunc16i32_16i8_lshr:
|
2017-12-05 01:18:51 +08:00
|
|
|
; SSE2: # %bb.0: # %entry
|
2017-11-03 19:33:48 +08:00
|
|
|
; SSE2-NEXT: psrld $24, %xmm1
|
|
|
|
; SSE2-NEXT: psrld $24, %xmm0
|
|
|
|
; SSE2-NEXT: packuswb %xmm1, %xmm0
|
|
|
|
; SSE2-NEXT: psrld $24, %xmm3
|
|
|
|
; SSE2-NEXT: psrld $24, %xmm2
|
|
|
|
; SSE2-NEXT: packuswb %xmm3, %xmm2
|
|
|
|
; SSE2-NEXT: packuswb %xmm2, %xmm0
|
|
|
|
; SSE2-NEXT: movdqu %xmm0, (%rax)
|
|
|
|
; SSE2-NEXT: retq
|
|
|
|
;
|
|
|
|
; SSSE3-LABEL: trunc16i32_16i8_lshr:
|
2017-12-05 01:18:51 +08:00
|
|
|
; SSSE3: # %bb.0: # %entry
|
2017-11-03 19:33:48 +08:00
|
|
|
; SSSE3-NEXT: psrld $24, %xmm1
|
|
|
|
; SSSE3-NEXT: psrld $24, %xmm0
|
|
|
|
; SSSE3-NEXT: packuswb %xmm1, %xmm0
|
|
|
|
; SSSE3-NEXT: psrld $24, %xmm3
|
|
|
|
; SSSE3-NEXT: psrld $24, %xmm2
|
|
|
|
; SSSE3-NEXT: packuswb %xmm3, %xmm2
|
|
|
|
; SSSE3-NEXT: packuswb %xmm2, %xmm0
|
|
|
|
; SSSE3-NEXT: movdqu %xmm0, (%rax)
|
|
|
|
; SSSE3-NEXT: retq
|
|
|
|
;
|
|
|
|
; SSE41-LABEL: trunc16i32_16i8_lshr:
|
2017-12-05 01:18:51 +08:00
|
|
|
; SSE41: # %bb.0: # %entry
|
2017-11-03 19:33:48 +08:00
|
|
|
; SSE41-NEXT: psrld $24, %xmm1
|
|
|
|
; SSE41-NEXT: psrld $24, %xmm0
|
2018-06-08 18:29:00 +08:00
|
|
|
; SSE41-NEXT: packusdw %xmm1, %xmm0
|
2017-11-03 19:33:48 +08:00
|
|
|
; SSE41-NEXT: psrld $24, %xmm3
|
|
|
|
; SSE41-NEXT: psrld $24, %xmm2
|
2018-06-08 18:29:00 +08:00
|
|
|
; SSE41-NEXT: packusdw %xmm3, %xmm2
|
2017-11-03 19:33:48 +08:00
|
|
|
; SSE41-NEXT: packuswb %xmm2, %xmm0
|
|
|
|
; SSE41-NEXT: movdqu %xmm0, (%rax)
|
|
|
|
; SSE41-NEXT: retq
|
2017-10-01 00:14:59 +08:00
|
|
|
;
|
|
|
|
; AVX1-LABEL: trunc16i32_16i8_lshr:
|
2017-12-05 01:18:51 +08:00
|
|
|
; AVX1: # %bb.0: # %entry
|
2017-10-01 00:14:59 +08:00
|
|
|
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
|
|
|
|
; AVX1-NEXT: vpsrld $24, %xmm2, %xmm2
|
|
|
|
; AVX1-NEXT: vpsrld $24, %xmm0, %xmm0
|
2018-06-08 18:29:00 +08:00
|
|
|
; AVX1-NEXT: vpackusdw %xmm2, %xmm0, %xmm0
|
2017-10-01 00:14:59 +08:00
|
|
|
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
|
|
|
|
; AVX1-NEXT: vpsrld $24, %xmm2, %xmm2
|
|
|
|
; AVX1-NEXT: vpsrld $24, %xmm1, %xmm1
|
2018-06-08 18:29:00 +08:00
|
|
|
; AVX1-NEXT: vpackusdw %xmm2, %xmm1, %xmm1
|
2017-10-01 00:14:59 +08:00
|
|
|
; AVX1-NEXT: vpackuswb %xmm1, %xmm0, %xmm0
|
|
|
|
; AVX1-NEXT: vmovdqu %xmm0, (%rax)
|
|
|
|
; AVX1-NEXT: vzeroupper
|
|
|
|
; AVX1-NEXT: retq
|
|
|
|
;
|
|
|
|
; AVX2-LABEL: trunc16i32_16i8_lshr:
|
2017-12-05 01:18:51 +08:00
|
|
|
; AVX2: # %bb.0: # %entry
|
2017-10-03 20:01:31 +08:00
|
|
|
; AVX2-NEXT: vpsrld $24, %ymm1, %ymm1
|
2017-11-03 19:33:48 +08:00
|
|
|
; AVX2-NEXT: vpsrld $24, %ymm0, %ymm0
|
2018-02-15 02:23:58 +08:00
|
|
|
; AVX2-NEXT: vpackusdw %ymm1, %ymm0, %ymm0
|
2017-11-03 19:33:48 +08:00
|
|
|
; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,1,3]
|
|
|
|
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
|
|
|
|
; AVX2-NEXT: vpackuswb %xmm1, %xmm0, %xmm0
|
2017-10-01 00:14:59 +08:00
|
|
|
; AVX2-NEXT: vmovdqu %xmm0, (%rax)
|
|
|
|
; AVX2-NEXT: vzeroupper
|
|
|
|
; AVX2-NEXT: retq
|
|
|
|
;
|
|
|
|
; AVX512-LABEL: trunc16i32_16i8_lshr:
|
2017-12-05 01:18:51 +08:00
|
|
|
; AVX512: # %bb.0: # %entry
|
2017-10-01 00:14:59 +08:00
|
|
|
; AVX512-NEXT: vpsrld $24, %zmm0, %zmm0
|
|
|
|
; AVX512-NEXT: vpmovdb %zmm0, (%rax)
|
|
|
|
; AVX512-NEXT: vzeroupper
|
|
|
|
; AVX512-NEXT: retq
|
|
|
|
entry:
|
|
|
|
%0 = lshr <16 x i32> %a, <i32 24, i32 24, i32 24, i32 24, i32 24, i32 24, i32 24, i32 24, i32 24, i32 24, i32 24, i32 24, i32 24, i32 24, i32 24, i32 24>
|
|
|
|
%1 = trunc <16 x i32> %0 to <16 x i8>
|
|
|
|
store <16 x i8> %1, <16 x i8>* undef, align 4
|
|
|
|
ret void
|
|
|
|
}
|
|
|
|
|
2016-09-14 01:34:56 +08:00
|
|
|
;PR25684
|
2016-09-14 16:04:28 +08:00
|
|
|
define void @trunc16i16_16i8(<16 x i16> %a) {
|
|
|
|
; SSE2-LABEL: trunc16i16_16i8:
|
2017-12-05 01:18:51 +08:00
|
|
|
; SSE2: # %bb.0: # %entry
|
2016-09-14 16:04:28 +08:00
|
|
|
; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [255,255,255,255,255,255,255,255]
|
|
|
|
; SSE2-NEXT: pand %xmm2, %xmm1
|
|
|
|
; SSE2-NEXT: pand %xmm2, %xmm0
|
|
|
|
; SSE2-NEXT: packuswb %xmm1, %xmm0
|
|
|
|
; SSE2-NEXT: movdqu %xmm0, (%rax)
|
|
|
|
; SSE2-NEXT: retq
|
|
|
|
;
|
|
|
|
; SSSE3-LABEL: trunc16i16_16i8:
|
2017-12-05 01:18:51 +08:00
|
|
|
; SSSE3: # %bb.0: # %entry
|
2016-09-14 16:04:28 +08:00
|
|
|
; SSSE3-NEXT: movdqa {{.*#+}} xmm2 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
|
|
|
|
; SSSE3-NEXT: pshufb %xmm2, %xmm1
|
|
|
|
; SSSE3-NEXT: pshufb %xmm2, %xmm0
|
|
|
|
; SSSE3-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
|
|
|
|
; SSSE3-NEXT: movdqu %xmm0, (%rax)
|
|
|
|
; SSSE3-NEXT: retq
|
|
|
|
;
|
|
|
|
; SSE41-LABEL: trunc16i16_16i8:
|
2017-12-05 01:18:51 +08:00
|
|
|
; SSE41: # %bb.0: # %entry
|
2016-09-14 16:04:28 +08:00
|
|
|
; SSE41-NEXT: movdqa {{.*#+}} xmm2 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
|
|
|
|
; SSE41-NEXT: pshufb %xmm2, %xmm1
|
|
|
|
; SSE41-NEXT: pshufb %xmm2, %xmm0
|
|
|
|
; SSE41-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
|
|
|
|
; SSE41-NEXT: movdqu %xmm0, (%rax)
|
|
|
|
; SSE41-NEXT: retq
|
|
|
|
;
|
|
|
|
; AVX1-LABEL: trunc16i16_16i8:
|
2017-12-05 01:18:51 +08:00
|
|
|
; AVX1: # %bb.0: # %entry
|
2018-11-19 01:59:28 +08:00
|
|
|
; AVX1-NEXT: vandps {{.*}}(%rip), %ymm0, %ymm0
|
2016-09-14 16:04:28 +08:00
|
|
|
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
|
2018-11-19 01:59:28 +08:00
|
|
|
; AVX1-NEXT: vpackuswb %xmm1, %xmm0, %xmm0
|
2016-09-14 16:04:28 +08:00
|
|
|
; AVX1-NEXT: vmovdqu %xmm0, (%rax)
|
|
|
|
; AVX1-NEXT: vzeroupper
|
|
|
|
; AVX1-NEXT: retq
|
|
|
|
;
|
|
|
|
; AVX2-LABEL: trunc16i16_16i8:
|
2017-12-05 01:18:51 +08:00
|
|
|
; AVX2: # %bb.0: # %entry
|
2018-11-19 01:59:28 +08:00
|
|
|
; AVX2-NEXT: vpand {{.*}}(%rip), %ymm0, %ymm0
|
2016-09-14 16:04:28 +08:00
|
|
|
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
|
2018-11-19 01:59:28 +08:00
|
|
|
; AVX2-NEXT: vpackuswb %xmm1, %xmm0, %xmm0
|
2016-09-14 16:04:28 +08:00
|
|
|
; AVX2-NEXT: vmovdqu %xmm0, (%rax)
|
|
|
|
; AVX2-NEXT: vzeroupper
|
|
|
|
; AVX2-NEXT: retq
|
|
|
|
;
|
|
|
|
; AVX512F-LABEL: trunc16i16_16i8:
|
2017-12-05 01:18:51 +08:00
|
|
|
; AVX512F: # %bb.0: # %entry
|
2018-11-10 03:05:51 +08:00
|
|
|
; AVX512F-NEXT: vpmovzxwd {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero
|
2018-01-14 16:11:36 +08:00
|
|
|
; AVX512F-NEXT: vpmovdb %zmm0, (%rax)
|
2017-03-03 17:03:24 +08:00
|
|
|
; AVX512F-NEXT: vzeroupper
|
2016-09-14 16:04:28 +08:00
|
|
|
; AVX512F-NEXT: retq
|
|
|
|
;
|
2016-11-07 21:34:29 +08:00
|
|
|
; AVX512VL-LABEL: trunc16i16_16i8:
|
2017-12-05 01:18:51 +08:00
|
|
|
; AVX512VL: # %bb.0: # %entry
|
2018-11-10 03:05:51 +08:00
|
|
|
; AVX512VL-NEXT: vpmovzxwd {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero
|
2018-01-14 16:11:36 +08:00
|
|
|
; AVX512VL-NEXT: vpmovdb %zmm0, (%rax)
|
2017-03-03 17:03:24 +08:00
|
|
|
; AVX512VL-NEXT: vzeroupper
|
2016-11-07 21:34:29 +08:00
|
|
|
; AVX512VL-NEXT: retq
|
|
|
|
;
|
2016-09-14 16:04:28 +08:00
|
|
|
; AVX512BW-LABEL: trunc16i16_16i8:
|
2017-12-05 01:18:51 +08:00
|
|
|
; AVX512BW: # %bb.0: # %entry
|
2018-02-01 06:04:26 +08:00
|
|
|
; AVX512BW-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
|
2016-09-14 16:04:28 +08:00
|
|
|
; AVX512BW-NEXT: vpmovwb %zmm0, %ymm0
|
|
|
|
; AVX512BW-NEXT: vmovdqu %xmm0, (%rax)
|
2017-03-03 17:03:24 +08:00
|
|
|
; AVX512BW-NEXT: vzeroupper
|
2016-09-14 16:04:28 +08:00
|
|
|
; AVX512BW-NEXT: retq
|
2016-11-07 21:34:29 +08:00
|
|
|
;
|
|
|
|
; AVX512BWVL-LABEL: trunc16i16_16i8:
|
2017-12-05 01:18:51 +08:00
|
|
|
; AVX512BWVL: # %bb.0: # %entry
|
2016-11-07 21:34:29 +08:00
|
|
|
; AVX512BWVL-NEXT: vpmovwb %ymm0, (%rax)
|
2017-03-03 17:03:24 +08:00
|
|
|
; AVX512BWVL-NEXT: vzeroupper
|
2016-11-07 21:34:29 +08:00
|
|
|
; AVX512BWVL-NEXT: retq
|
2016-09-14 16:04:28 +08:00
|
|
|
entry:
|
|
|
|
%0 = trunc <16 x i16> %a to <16 x i8>
|
|
|
|
store <16 x i8> %0, <16 x i8>* undef, align 4
|
|
|
|
ret void
|
|
|
|
}
|
2016-09-14 01:34:56 +08:00
|
|
|
|
2017-10-01 00:14:59 +08:00
|
|
|
define void @trunc16i16_16i8_ashr(<16 x i16> %a) {
|
2017-10-03 20:01:31 +08:00
|
|
|
; SSE-LABEL: trunc16i16_16i8_ashr:
|
2017-12-05 01:18:51 +08:00
|
|
|
; SSE: # %bb.0: # %entry
|
2017-10-03 20:01:31 +08:00
|
|
|
; SSE-NEXT: psraw $8, %xmm1
|
|
|
|
; SSE-NEXT: psraw $8, %xmm0
|
|
|
|
; SSE-NEXT: packsswb %xmm1, %xmm0
|
|
|
|
; SSE-NEXT: movdqu %xmm0, (%rax)
|
|
|
|
; SSE-NEXT: retq
|
2017-10-01 00:14:59 +08:00
|
|
|
;
|
|
|
|
; AVX1-LABEL: trunc16i16_16i8_ashr:
|
2017-12-05 01:18:51 +08:00
|
|
|
; AVX1: # %bb.0: # %entry
|
2017-11-01 19:47:44 +08:00
|
|
|
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
|
|
|
|
; AVX1-NEXT: vpsraw $8, %xmm1, %xmm1
|
2017-10-01 00:14:59 +08:00
|
|
|
; AVX1-NEXT: vpsraw $8, %xmm0, %xmm0
|
2017-11-01 19:47:44 +08:00
|
|
|
; AVX1-NEXT: vpacksswb %xmm1, %xmm0, %xmm0
|
2017-10-01 00:14:59 +08:00
|
|
|
; AVX1-NEXT: vmovdqu %xmm0, (%rax)
|
|
|
|
; AVX1-NEXT: vzeroupper
|
|
|
|
; AVX1-NEXT: retq
|
|
|
|
;
|
|
|
|
; AVX2-LABEL: trunc16i16_16i8_ashr:
|
2017-12-05 01:18:51 +08:00
|
|
|
; AVX2: # %bb.0: # %entry
|
2017-10-01 00:14:59 +08:00
|
|
|
; AVX2-NEXT: vpsraw $8, %ymm0, %ymm0
|
|
|
|
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
|
2017-11-01 19:47:44 +08:00
|
|
|
; AVX2-NEXT: vpacksswb %xmm1, %xmm0, %xmm0
|
2017-10-01 00:14:59 +08:00
|
|
|
; AVX2-NEXT: vmovdqu %xmm0, (%rax)
|
|
|
|
; AVX2-NEXT: vzeroupper
|
|
|
|
; AVX2-NEXT: retq
|
|
|
|
;
|
|
|
|
; AVX512F-LABEL: trunc16i16_16i8_ashr:
|
2017-12-05 01:18:51 +08:00
|
|
|
; AVX512F: # %bb.0: # %entry
|
2017-10-01 00:14:59 +08:00
|
|
|
; AVX512F-NEXT: vpsraw $8, %ymm0, %ymm0
|
2018-11-10 03:05:51 +08:00
|
|
|
; AVX512F-NEXT: vpmovzxwd {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero
|
2018-01-14 16:11:36 +08:00
|
|
|
; AVX512F-NEXT: vpmovdb %zmm0, (%rax)
|
2017-10-01 00:14:59 +08:00
|
|
|
; AVX512F-NEXT: vzeroupper
|
|
|
|
; AVX512F-NEXT: retq
|
|
|
|
;
|
|
|
|
; AVX512VL-LABEL: trunc16i16_16i8_ashr:
|
2017-12-05 01:18:51 +08:00
|
|
|
; AVX512VL: # %bb.0: # %entry
|
2017-10-01 00:14:59 +08:00
|
|
|
; AVX512VL-NEXT: vpsraw $8, %ymm0, %ymm0
|
2018-11-10 03:05:51 +08:00
|
|
|
; AVX512VL-NEXT: vpmovzxwd {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero
|
2018-01-14 16:11:36 +08:00
|
|
|
; AVX512VL-NEXT: vpmovdb %zmm0, (%rax)
|
2017-10-01 00:14:59 +08:00
|
|
|
; AVX512VL-NEXT: vzeroupper
|
|
|
|
; AVX512VL-NEXT: retq
|
|
|
|
;
|
|
|
|
; AVX512BW-LABEL: trunc16i16_16i8_ashr:
|
2017-12-05 01:18:51 +08:00
|
|
|
; AVX512BW: # %bb.0: # %entry
|
2017-10-01 00:14:59 +08:00
|
|
|
; AVX512BW-NEXT: vpsraw $8, %ymm0, %ymm0
|
|
|
|
; AVX512BW-NEXT: vpmovwb %zmm0, %ymm0
|
|
|
|
; AVX512BW-NEXT: vmovdqu %xmm0, (%rax)
|
|
|
|
; AVX512BW-NEXT: vzeroupper
|
|
|
|
; AVX512BW-NEXT: retq
|
|
|
|
;
|
|
|
|
; AVX512BWVL-LABEL: trunc16i16_16i8_ashr:
|
2017-12-05 01:18:51 +08:00
|
|
|
; AVX512BWVL: # %bb.0: # %entry
|
2017-10-01 00:14:59 +08:00
|
|
|
; AVX512BWVL-NEXT: vpsrlw $8, %ymm0, %ymm0
|
|
|
|
; AVX512BWVL-NEXT: vpmovwb %ymm0, (%rax)
|
|
|
|
; AVX512BWVL-NEXT: vzeroupper
|
|
|
|
; AVX512BWVL-NEXT: retq
|
|
|
|
entry:
|
|
|
|
%0 = ashr <16 x i16> %a, <i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8>
|
|
|
|
%1 = trunc <16 x i16> %0 to <16 x i8>
|
|
|
|
store <16 x i8> %1, <16 x i8>* undef, align 4
|
|
|
|
ret void
|
|
|
|
}
|
|
|
|
|
|
|
|
define void @trunc16i16_16i8_lshr(<16 x i16> %a) {
|
2017-10-03 20:01:31 +08:00
|
|
|
; SSE-LABEL: trunc16i16_16i8_lshr:
|
2017-12-05 01:18:51 +08:00
|
|
|
; SSE: # %bb.0: # %entry
|
2017-10-03 20:01:31 +08:00
|
|
|
; SSE-NEXT: psrlw $8, %xmm1
|
|
|
|
; SSE-NEXT: psrlw $8, %xmm0
|
|
|
|
; SSE-NEXT: packuswb %xmm1, %xmm0
|
|
|
|
; SSE-NEXT: movdqu %xmm0, (%rax)
|
|
|
|
; SSE-NEXT: retq
|
2017-10-01 00:14:59 +08:00
|
|
|
;
|
|
|
|
; AVX1-LABEL: trunc16i16_16i8_lshr:
|
2017-12-05 01:18:51 +08:00
|
|
|
; AVX1: # %bb.0: # %entry
|
2017-10-01 00:14:59 +08:00
|
|
|
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm1
|
|
|
|
; AVX1-NEXT: vpsrlw $8, %xmm1, %xmm1
|
2017-10-29 06:10:40 +08:00
|
|
|
; AVX1-NEXT: vpsrlw $8, %xmm0, %xmm0
|
|
|
|
; AVX1-NEXT: vpackuswb %xmm1, %xmm0, %xmm0
|
2017-10-01 00:14:59 +08:00
|
|
|
; AVX1-NEXT: vmovdqu %xmm0, (%rax)
|
|
|
|
; AVX1-NEXT: vzeroupper
|
|
|
|
; AVX1-NEXT: retq
|
|
|
|
;
|
|
|
|
; AVX2-LABEL: trunc16i16_16i8_lshr:
|
2017-12-05 01:18:51 +08:00
|
|
|
; AVX2: # %bb.0: # %entry
|
2017-10-01 00:14:59 +08:00
|
|
|
; AVX2-NEXT: vpsrlw $8, %ymm0, %ymm0
|
|
|
|
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
|
2017-10-03 20:01:31 +08:00
|
|
|
; AVX2-NEXT: vpackuswb %xmm1, %xmm0, %xmm0
|
2017-10-01 00:14:59 +08:00
|
|
|
; AVX2-NEXT: vmovdqu %xmm0, (%rax)
|
|
|
|
; AVX2-NEXT: vzeroupper
|
|
|
|
; AVX2-NEXT: retq
|
|
|
|
;
|
|
|
|
; AVX512F-LABEL: trunc16i16_16i8_lshr:
|
2017-12-05 01:18:51 +08:00
|
|
|
; AVX512F: # %bb.0: # %entry
|
2017-10-01 00:14:59 +08:00
|
|
|
; AVX512F-NEXT: vpsrlw $8, %ymm0, %ymm0
|
2018-11-10 03:05:51 +08:00
|
|
|
; AVX512F-NEXT: vpmovzxwd {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero
|
2018-01-14 16:11:36 +08:00
|
|
|
; AVX512F-NEXT: vpmovdb %zmm0, (%rax)
|
2017-10-01 00:14:59 +08:00
|
|
|
; AVX512F-NEXT: vzeroupper
|
|
|
|
; AVX512F-NEXT: retq
|
|
|
|
;
|
|
|
|
; AVX512VL-LABEL: trunc16i16_16i8_lshr:
|
2017-12-05 01:18:51 +08:00
|
|
|
; AVX512VL: # %bb.0: # %entry
|
2017-10-01 00:14:59 +08:00
|
|
|
; AVX512VL-NEXT: vpsrlw $8, %ymm0, %ymm0
|
2018-11-10 03:05:51 +08:00
|
|
|
; AVX512VL-NEXT: vpmovzxwd {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero
|
2018-01-14 16:11:36 +08:00
|
|
|
; AVX512VL-NEXT: vpmovdb %zmm0, (%rax)
|
2017-10-01 00:14:59 +08:00
|
|
|
; AVX512VL-NEXT: vzeroupper
|
|
|
|
; AVX512VL-NEXT: retq
|
|
|
|
;
|
|
|
|
; AVX512BW-LABEL: trunc16i16_16i8_lshr:
|
2017-12-05 01:18:51 +08:00
|
|
|
; AVX512BW: # %bb.0: # %entry
|
2017-10-01 00:14:59 +08:00
|
|
|
; AVX512BW-NEXT: vpsrlw $8, %ymm0, %ymm0
|
|
|
|
; AVX512BW-NEXT: vpmovwb %zmm0, %ymm0
|
|
|
|
; AVX512BW-NEXT: vmovdqu %xmm0, (%rax)
|
|
|
|
; AVX512BW-NEXT: vzeroupper
|
|
|
|
; AVX512BW-NEXT: retq
|
|
|
|
;
|
|
|
|
; AVX512BWVL-LABEL: trunc16i16_16i8_lshr:
|
2017-12-05 01:18:51 +08:00
|
|
|
; AVX512BWVL: # %bb.0: # %entry
|
2017-10-01 00:14:59 +08:00
|
|
|
; AVX512BWVL-NEXT: vpsrlw $8, %ymm0, %ymm0
|
|
|
|
; AVX512BWVL-NEXT: vpmovwb %ymm0, (%rax)
|
|
|
|
; AVX512BWVL-NEXT: vzeroupper
|
|
|
|
; AVX512BWVL-NEXT: retq
|
|
|
|
entry:
|
|
|
|
%0 = lshr <16 x i16> %a, <i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8>
|
|
|
|
%1 = trunc <16 x i16> %0 to <16 x i8>
|
|
|
|
store <16 x i8> %1, <16 x i8>* undef, align 4
|
|
|
|
ret void
|
|
|
|
}
|
|
|
|
|
2016-09-14 01:34:56 +08:00
|
|
|
define void @trunc32i16_32i8(<32 x i16> %a) {
|
|
|
|
; SSE2-LABEL: trunc32i16_32i8:
|
2017-12-05 01:18:51 +08:00
|
|
|
; SSE2: # %bb.0: # %entry
|
2016-09-14 01:34:56 +08:00
|
|
|
; SSE2-NEXT: movdqa {{.*#+}} xmm4 = [255,255,255,255,255,255,255,255]
|
|
|
|
; SSE2-NEXT: pand %xmm4, %xmm1
|
|
|
|
; SSE2-NEXT: pand %xmm4, %xmm0
|
|
|
|
; SSE2-NEXT: packuswb %xmm1, %xmm0
|
|
|
|
; SSE2-NEXT: pand %xmm4, %xmm3
|
|
|
|
; SSE2-NEXT: pand %xmm4, %xmm2
|
|
|
|
; SSE2-NEXT: packuswb %xmm3, %xmm2
|
|
|
|
; SSE2-NEXT: movdqu %xmm2, (%rax)
|
|
|
|
; SSE2-NEXT: movdqu %xmm0, (%rax)
|
|
|
|
; SSE2-NEXT: retq
|
|
|
|
;
|
|
|
|
; SSSE3-LABEL: trunc32i16_32i8:
|
2017-12-05 01:18:51 +08:00
|
|
|
; SSSE3: # %bb.0: # %entry
|
2016-09-14 01:34:56 +08:00
|
|
|
; SSSE3-NEXT: movdqa {{.*#+}} xmm4 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
|
|
|
|
; SSSE3-NEXT: pshufb %xmm4, %xmm1
|
|
|
|
; SSSE3-NEXT: pshufb %xmm4, %xmm0
|
|
|
|
; SSSE3-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
|
|
|
|
; SSSE3-NEXT: pshufb %xmm4, %xmm3
|
|
|
|
; SSSE3-NEXT: pshufb %xmm4, %xmm2
|
|
|
|
; SSSE3-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm3[0]
|
|
|
|
; SSSE3-NEXT: movdqu %xmm2, (%rax)
|
|
|
|
; SSSE3-NEXT: movdqu %xmm0, (%rax)
|
|
|
|
; SSSE3-NEXT: retq
|
|
|
|
;
|
|
|
|
; SSE41-LABEL: trunc32i16_32i8:
|
2017-12-05 01:18:51 +08:00
|
|
|
; SSE41: # %bb.0: # %entry
|
2016-09-14 01:34:56 +08:00
|
|
|
; SSE41-NEXT: movdqa {{.*#+}} xmm4 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
|
|
|
|
; SSE41-NEXT: pshufb %xmm4, %xmm1
|
|
|
|
; SSE41-NEXT: pshufb %xmm4, %xmm0
|
|
|
|
; SSE41-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
|
|
|
|
; SSE41-NEXT: pshufb %xmm4, %xmm3
|
|
|
|
; SSE41-NEXT: pshufb %xmm4, %xmm2
|
|
|
|
; SSE41-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm3[0]
|
|
|
|
; SSE41-NEXT: movdqu %xmm2, (%rax)
|
|
|
|
; SSE41-NEXT: movdqu %xmm0, (%rax)
|
|
|
|
; SSE41-NEXT: retq
|
|
|
|
;
|
|
|
|
; AVX1-LABEL: trunc32i16_32i8:
|
2017-12-05 01:18:51 +08:00
|
|
|
; AVX1: # %bb.0: # %entry
|
2018-11-19 01:59:28 +08:00
|
|
|
; AVX1-NEXT: vmovaps {{.*#+}} ymm2 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
|
2019-05-28 21:54:17 +08:00
|
|
|
; AVX1-NEXT: vandps %ymm2, %ymm1, %ymm1
|
2019-05-29 01:37:58 +08:00
|
|
|
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3
|
|
|
|
; AVX1-NEXT: vpackuswb %xmm3, %xmm1, %xmm1
|
|
|
|
; AVX1-NEXT: vandps %ymm2, %ymm0, %ymm0
|
|
|
|
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
|
|
|
|
; AVX1-NEXT: vpackuswb %xmm2, %xmm0, %xmm0
|
|
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
|
|
|
|
; AVX1-NEXT: vmovups %ymm0, (%rax)
|
2016-09-14 01:34:56 +08:00
|
|
|
; AVX1-NEXT: vzeroupper
|
|
|
|
; AVX1-NEXT: retq
|
|
|
|
;
|
|
|
|
; AVX2-LABEL: trunc32i16_32i8:
|
2017-12-05 01:18:51 +08:00
|
|
|
; AVX2: # %bb.0: # %entry
|
2018-11-19 01:59:28 +08:00
|
|
|
; AVX2-NEXT: vmovdqa {{.*#+}} ymm2 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
|
|
|
|
; AVX2-NEXT: vpand %ymm2, %ymm0, %ymm0
|
2019-05-07 19:17:39 +08:00
|
|
|
; AVX2-NEXT: vpand %ymm2, %ymm1, %ymm1
|
|
|
|
; AVX2-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm2
|
|
|
|
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm0
|
|
|
|
; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1,2,3],ymm1[4,5,6,7]
|
|
|
|
; AVX2-NEXT: vpackuswb %ymm0, %ymm2, %ymm0
|
2016-09-14 01:34:56 +08:00
|
|
|
; AVX2-NEXT: vmovdqu %ymm0, (%rax)
|
|
|
|
; AVX2-NEXT: vzeroupper
|
|
|
|
; AVX2-NEXT: retq
|
|
|
|
;
|
|
|
|
; AVX512F-LABEL: trunc32i16_32i8:
|
2017-12-05 01:18:51 +08:00
|
|
|
; AVX512F: # %bb.0: # %entry
|
2019-05-28 21:54:17 +08:00
|
|
|
; AVX512F-NEXT: vpmovzxwd {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero
|
2019-05-29 01:37:58 +08:00
|
|
|
; AVX512F-NEXT: vpmovdb %zmm0, %xmm0
|
|
|
|
; AVX512F-NEXT: vpmovzxwd {{.*#+}} zmm1 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero,ymm1[8],zero,ymm1[9],zero,ymm1[10],zero,ymm1[11],zero,ymm1[12],zero,ymm1[13],zero,ymm1[14],zero,ymm1[15],zero
|
|
|
|
; AVX512F-NEXT: vpmovdb %zmm1, %xmm1
|
|
|
|
; AVX512F-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
|
|
|
|
; AVX512F-NEXT: vmovdqu %ymm0, (%rax)
|
2017-03-03 17:03:24 +08:00
|
|
|
; AVX512F-NEXT: vzeroupper
|
2016-09-14 01:34:56 +08:00
|
|
|
; AVX512F-NEXT: retq
|
|
|
|
;
|
2016-11-07 21:34:29 +08:00
|
|
|
; AVX512VL-LABEL: trunc32i16_32i8:
|
2017-12-05 01:18:51 +08:00
|
|
|
; AVX512VL: # %bb.0: # %entry
|
2019-05-28 21:54:17 +08:00
|
|
|
; AVX512VL-NEXT: vpmovzxwd {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero
|
2019-05-29 01:37:58 +08:00
|
|
|
; AVX512VL-NEXT: vpmovdb %zmm0, %xmm0
|
|
|
|
; AVX512VL-NEXT: vpmovzxwd {{.*#+}} zmm1 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero,ymm1[8],zero,ymm1[9],zero,ymm1[10],zero,ymm1[11],zero,ymm1[12],zero,ymm1[13],zero,ymm1[14],zero,ymm1[15],zero
|
|
|
|
; AVX512VL-NEXT: vpmovdb %zmm1, %xmm1
|
|
|
|
; AVX512VL-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
|
|
|
|
; AVX512VL-NEXT: vmovdqu %ymm0, (%rax)
|
2017-03-03 17:03:24 +08:00
|
|
|
; AVX512VL-NEXT: vzeroupper
|
2016-11-07 21:34:29 +08:00
|
|
|
; AVX512VL-NEXT: retq
|
|
|
|
;
|
2016-09-14 01:34:56 +08:00
|
|
|
; AVX512BW-LABEL: trunc32i16_32i8:
|
2017-12-05 01:18:51 +08:00
|
|
|
; AVX512BW: # %bb.0: # %entry
|
2016-09-14 01:34:56 +08:00
|
|
|
; AVX512BW-NEXT: vpmovwb %zmm0, (%rax)
|
2017-03-03 17:03:24 +08:00
|
|
|
; AVX512BW-NEXT: vzeroupper
|
2016-09-14 01:34:56 +08:00
|
|
|
; AVX512BW-NEXT: retq
|
2016-11-07 21:34:29 +08:00
|
|
|
;
|
|
|
|
; AVX512BWVL-LABEL: trunc32i16_32i8:
|
2017-12-05 01:18:51 +08:00
|
|
|
; AVX512BWVL: # %bb.0: # %entry
|
2016-11-07 21:34:29 +08:00
|
|
|
; AVX512BWVL-NEXT: vpmovwb %zmm0, (%rax)
|
2017-03-03 17:03:24 +08:00
|
|
|
; AVX512BWVL-NEXT: vzeroupper
|
2016-11-07 21:34:29 +08:00
|
|
|
; AVX512BWVL-NEXT: retq
|
2016-09-14 01:34:56 +08:00
|
|
|
entry:
|
|
|
|
%0 = trunc <32 x i16> %a to <32 x i8>
|
|
|
|
store <32 x i8> %0, <32 x i8>* undef, align 4
|
|
|
|
ret void
|
|
|
|
}
|
|
|
|
|
2015-08-18 16:37:09 +08:00
|
|
|
define <8 x i32> @trunc2x4i64_8i32(<4 x i64> %a, <4 x i64> %b) {
|
[x86] use a single shufps when it can save instructions
This is a tiny patch with a big pile of test changes.
This partially fixes PR27885:
https://llvm.org/bugs/show_bug.cgi?id=27885
My motivating case looks like this:
- vpshufd {{.*#+}} xmm1 = xmm1[0,1,0,2]
- vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
- vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7]
+ vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
And this happens several times in the diffs. For chips with domain-crossing penalties,
the instruction count and size reduction should usually overcome any potential
domain-crossing penalty due to using an FP op in a sequence of int ops. For chips such
as recent Intel big cores and Atom, there is no domain-crossing penalty for shufps, so
using shufps is a pure win.
So the test case diffs all appear to be improvements except one test in
vector-shuffle-combining.ll where we miss an opportunity to use a shift to generate
zero elements and one test in combine-sra.ll where multiple uses prevent the expected
shuffle combining.
Differential Revision: https://reviews.llvm.org/D27692
llvm-svn: 289837
2016-12-16 02:03:38 +08:00
|
|
|
; SSE-LABEL: trunc2x4i64_8i32:
|
2017-12-05 01:18:51 +08:00
|
|
|
; SSE: # %bb.0: # %entry
|
[x86] use a single shufps when it can save instructions
This is a tiny patch with a big pile of test changes.
This partially fixes PR27885:
https://llvm.org/bugs/show_bug.cgi?id=27885
My motivating case looks like this:
- vpshufd {{.*#+}} xmm1 = xmm1[0,1,0,2]
- vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
- vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7]
+ vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
And this happens several times in the diffs. For chips with domain-crossing penalties,
the instruction count and size reduction should usually overcome any potential
domain-crossing penalty due to using an FP op in a sequence of int ops. For chips such
as recent Intel big cores and Atom, there is no domain-crossing penalty for shufps, so
using shufps is a pure win.
So the test case diffs all appear to be improvements except one test in
vector-shuffle-combining.ll where we miss an opportunity to use a shift to generate
zero elements and one test in combine-sra.ll where multiple uses prevent the expected
shuffle combining.
Differential Revision: https://reviews.llvm.org/D27692
llvm-svn: 289837
2016-12-16 02:03:38 +08:00
|
|
|
; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
|
|
|
|
; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,2],xmm3[0,2]
|
|
|
|
; SSE-NEXT: movaps %xmm2, %xmm1
|
|
|
|
; SSE-NEXT: retq
|
2015-08-18 16:37:09 +08:00
|
|
|
;
|
|
|
|
; AVX1-LABEL: trunc2x4i64_8i32:
|
2017-12-05 01:18:51 +08:00
|
|
|
; AVX1: # %bb.0: # %entry
|
2015-08-18 16:37:09 +08:00
|
|
|
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
|
[x86] use a single shufps when it can save instructions
This is a tiny patch with a big pile of test changes.
This partially fixes PR27885:
https://llvm.org/bugs/show_bug.cgi?id=27885
My motivating case looks like this:
- vpshufd {{.*#+}} xmm1 = xmm1[0,1,0,2]
- vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
- vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7]
+ vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
And this happens several times in the diffs. For chips with domain-crossing penalties,
the instruction count and size reduction should usually overcome any potential
domain-crossing penalty due to using an FP op in a sequence of int ops. For chips such
as recent Intel big cores and Atom, there is no domain-crossing penalty for shufps, so
using shufps is a pure win.
So the test case diffs all appear to be improvements except one test in
vector-shuffle-combining.ll where we miss an opportunity to use a shift to generate
zero elements and one test in combine-sra.ll where multiple uses prevent the expected
shuffle combining.
Differential Revision: https://reviews.llvm.org/D27692
llvm-svn: 289837
2016-12-16 02:03:38 +08:00
|
|
|
; AVX1-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm2[0,2]
|
2015-08-18 16:37:09 +08:00
|
|
|
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
|
[x86] use a single shufps when it can save instructions
This is a tiny patch with a big pile of test changes.
This partially fixes PR27885:
https://llvm.org/bugs/show_bug.cgi?id=27885
My motivating case looks like this:
- vpshufd {{.*#+}} xmm1 = xmm1[0,1,0,2]
- vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
- vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7]
+ vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
And this happens several times in the diffs. For chips with domain-crossing penalties,
the instruction count and size reduction should usually overcome any potential
domain-crossing penalty due to using an FP op in a sequence of int ops. For chips such
as recent Intel big cores and Atom, there is no domain-crossing penalty for shufps, so
using shufps is a pure win.
So the test case diffs all appear to be improvements except one test in
vector-shuffle-combining.ll where we miss an opportunity to use a shift to generate
zero elements and one test in combine-sra.ll where multiple uses prevent the expected
shuffle combining.
Differential Revision: https://reviews.llvm.org/D27692
llvm-svn: 289837
2016-12-16 02:03:38 +08:00
|
|
|
; AVX1-NEXT: vshufps {{.*#+}} xmm1 = xmm1[0,2],xmm2[0,2]
|
2015-08-18 16:37:09 +08:00
|
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
|
|
|
|
; AVX1-NEXT: retq
|
|
|
|
;
|
2018-01-10 00:26:06 +08:00
|
|
|
; AVX2-SLOW-LABEL: trunc2x4i64_8i32:
|
|
|
|
; AVX2-SLOW: # %bb.0: # %entry
|
2019-02-19 00:46:12 +08:00
|
|
|
; AVX2-SLOW-NEXT: vextractf128 $1, %ymm0, %xmm2
|
|
|
|
; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm2[0,2]
|
|
|
|
; AVX2-SLOW-NEXT: vextractf128 $1, %ymm1, %xmm2
|
|
|
|
; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm1 = xmm1[0,2],xmm2[0,2]
|
2018-01-10 00:26:06 +08:00
|
|
|
; AVX2-SLOW-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
|
|
|
|
; AVX2-SLOW-NEXT: retq
|
|
|
|
;
|
|
|
|
; AVX2-FAST-LABEL: trunc2x4i64_8i32:
|
|
|
|
; AVX2-FAST: # %bb.0: # %entry
|
|
|
|
; AVX2-FAST-NEXT: vmovaps {{.*#+}} ymm2 = [0,2,4,6,4,6,6,7]
|
|
|
|
; AVX2-FAST-NEXT: vpermps %ymm0, %ymm2, %ymm0
|
|
|
|
; AVX2-FAST-NEXT: vpermps %ymm1, %ymm2, %ymm1
|
|
|
|
; AVX2-FAST-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
|
|
|
|
; AVX2-FAST-NEXT: retq
|
2015-12-22 04:42:43 +08:00
|
|
|
;
|
2016-11-07 21:34:29 +08:00
|
|
|
; AVX512F-LABEL: trunc2x4i64_8i32:
|
2017-12-05 01:18:51 +08:00
|
|
|
; AVX512F: # %bb.0: # %entry
|
2018-02-01 06:04:26 +08:00
|
|
|
; AVX512F-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1
|
|
|
|
; AVX512F-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
|
2016-11-07 21:34:29 +08:00
|
|
|
; AVX512F-NEXT: vpmovqd %zmm0, %ymm0
|
|
|
|
; AVX512F-NEXT: vpmovqd %zmm1, %ymm1
|
|
|
|
; AVX512F-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
|
|
|
|
; AVX512F-NEXT: retq
|
|
|
|
;
|
|
|
|
; AVX512VL-LABEL: trunc2x4i64_8i32:
|
2017-12-05 01:18:51 +08:00
|
|
|
; AVX512VL: # %bb.0: # %entry
|
2016-11-07 21:34:29 +08:00
|
|
|
; AVX512VL-NEXT: vpmovqd %ymm0, %xmm0
|
|
|
|
; AVX512VL-NEXT: vpmovqd %ymm1, %xmm1
|
2017-01-03 13:46:18 +08:00
|
|
|
; AVX512VL-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
|
2016-11-07 21:34:29 +08:00
|
|
|
; AVX512VL-NEXT: retq
|
|
|
|
;
|
|
|
|
; AVX512BW-LABEL: trunc2x4i64_8i32:
|
2017-12-05 01:18:51 +08:00
|
|
|
; AVX512BW: # %bb.0: # %entry
|
2018-02-01 06:04:26 +08:00
|
|
|
; AVX512BW-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1
|
|
|
|
; AVX512BW-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
|
2016-11-07 21:34:29 +08:00
|
|
|
; AVX512BW-NEXT: vpmovqd %zmm0, %ymm0
|
|
|
|
; AVX512BW-NEXT: vpmovqd %zmm1, %ymm1
|
|
|
|
; AVX512BW-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
|
|
|
|
; AVX512BW-NEXT: retq
|
|
|
|
;
|
|
|
|
; AVX512BWVL-LABEL: trunc2x4i64_8i32:
|
2017-12-05 01:18:51 +08:00
|
|
|
; AVX512BWVL: # %bb.0: # %entry
|
2016-11-07 21:34:29 +08:00
|
|
|
; AVX512BWVL-NEXT: vpmovqd %ymm0, %xmm0
|
|
|
|
; AVX512BWVL-NEXT: vpmovqd %ymm1, %xmm1
|
2017-01-03 13:46:18 +08:00
|
|
|
; AVX512BWVL-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm0
|
2016-11-07 21:34:29 +08:00
|
|
|
; AVX512BWVL-NEXT: retq
|
2015-08-18 16:37:09 +08:00
|
|
|
entry:
|
|
|
|
%0 = trunc <4 x i64> %a to <4 x i32>
|
|
|
|
%1 = trunc <4 x i64> %b to <4 x i32>
|
|
|
|
%2 = shufflevector <4 x i32> %0, <4 x i32> %1, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
|
|
|
|
ret <8 x i32> %2
|
|
|
|
}
|
2015-02-12 02:14:35 +08:00
|
|
|
|
2015-08-18 16:37:09 +08:00
|
|
|
define <8 x i16> @trunc2x4i64_8i16(<4 x i64> %a, <4 x i64> %b) {
|
|
|
|
; SSE2-LABEL: trunc2x4i64_8i16:
|
2017-12-05 01:18:51 +08:00
|
|
|
; SSE2: # %bb.0: # %entry
|
2016-10-07 02:58:24 +08:00
|
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
|
|
|
|
; SSE2-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,2,2,3,4,5,6,7]
|
|
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
|
2017-10-05 01:20:12 +08:00
|
|
|
; SSE2-NEXT: pshuflw {{.*#+}} xmm4 = xmm0[0,2,2,3,4,5,6,7]
|
|
|
|
; SSE2-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm1[0],xmm4[1],xmm1[1]
|
|
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm3[0,2,2,3]
|
|
|
|
; SSE2-NEXT: pshuflw {{.*#+}} xmm1 = xmm0[0,1,0,2,4,5,6,7]
|
|
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm2[0,2,2,3]
|
|
|
|
; SSE2-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,1,0,2,4,5,6,7]
|
2016-10-07 02:58:24 +08:00
|
|
|
; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
|
2017-10-05 01:20:12 +08:00
|
|
|
; SSE2-NEXT: movsd {{.*#+}} xmm0 = xmm4[0],xmm0[1]
|
2015-08-18 16:37:09 +08:00
|
|
|
; SSE2-NEXT: retq
|
|
|
|
;
|
|
|
|
; SSSE3-LABEL: trunc2x4i64_8i16:
|
2017-12-05 01:18:51 +08:00
|
|
|
; SSSE3: # %bb.0: # %entry
|
2016-10-07 02:58:24 +08:00
|
|
|
; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
|
|
|
|
; SSSE3-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,2,2,3,4,5,6,7]
|
|
|
|
; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
|
2017-10-05 01:20:12 +08:00
|
|
|
; SSSE3-NEXT: pshuflw {{.*#+}} xmm4 = xmm0[0,2,2,3,4,5,6,7]
|
|
|
|
; SSSE3-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm1[0],xmm4[1],xmm1[1]
|
|
|
|
; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm3[0,2,2,3]
|
|
|
|
; SSSE3-NEXT: pshuflw {{.*#+}} xmm1 = xmm0[0,1,0,2,4,5,6,7]
|
|
|
|
; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm2[0,2,2,3]
|
|
|
|
; SSSE3-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,1,0,2,4,5,6,7]
|
2016-10-07 02:58:24 +08:00
|
|
|
; SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
|
2017-10-05 01:20:12 +08:00
|
|
|
; SSSE3-NEXT: movsd {{.*#+}} xmm0 = xmm4[0],xmm0[1]
|
2015-08-18 16:37:09 +08:00
|
|
|
; SSSE3-NEXT: retq
|
|
|
|
;
|
|
|
|
; SSE41-LABEL: trunc2x4i64_8i16:
|
2017-12-05 01:18:51 +08:00
|
|
|
; SSE41: # %bb.0: # %entry
|
2016-10-07 02:58:24 +08:00
|
|
|
; SSE41-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,2,2,3]
|
|
|
|
; SSE41-NEXT: pshuflw {{.*#+}} xmm3 = xmm3[0,1,0,2,4,5,6,7]
|
|
|
|
; SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,2,2,3]
|
|
|
|
; SSE41-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[0,1,0,2,4,5,6,7]
|
|
|
|
; SSE41-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1]
|
|
|
|
; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
|
|
|
|
; SSE41-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,2,2,3,4,5,6,7]
|
|
|
|
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
|
|
|
|
; SSE41-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,2,2,3,4,5,6,7]
|
|
|
|
; SSE41-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
|
|
|
|
; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm2[4,5,6,7]
|
2015-08-18 16:37:09 +08:00
|
|
|
; SSE41-NEXT: retq
|
|
|
|
;
|
|
|
|
; AVX1-LABEL: trunc2x4i64_8i16:
|
2017-12-05 01:18:51 +08:00
|
|
|
; AVX1: # %bb.0: # %entry
|
2015-08-18 16:37:09 +08:00
|
|
|
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm2
|
[x86] use a single shufps when it can save instructions
This is a tiny patch with a big pile of test changes.
This partially fixes PR27885:
https://llvm.org/bugs/show_bug.cgi?id=27885
My motivating case looks like this:
- vpshufd {{.*#+}} xmm1 = xmm1[0,1,0,2]
- vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
- vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7]
+ vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
And this happens several times in the diffs. For chips with domain-crossing penalties,
the instruction count and size reduction should usually overcome any potential
domain-crossing penalty due to using an FP op in a sequence of int ops. For chips such
as recent Intel big cores and Atom, there is no domain-crossing penalty for shufps, so
using shufps is a pure win.
So the test case diffs all appear to be improvements except one test in
vector-shuffle-combining.ll where we miss an opportunity to use a shift to generate
zero elements and one test in combine-sra.ll where multiple uses prevent the expected
shuffle combining.
Differential Revision: https://reviews.llvm.org/D27692
llvm-svn: 289837
2016-12-16 02:03:38 +08:00
|
|
|
; AVX1-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm2[0,2]
|
2015-08-18 16:37:09 +08:00
|
|
|
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
|
[x86] use a single shufps when it can save instructions
This is a tiny patch with a big pile of test changes.
This partially fixes PR27885:
https://llvm.org/bugs/show_bug.cgi?id=27885
My motivating case looks like this:
- vpshufd {{.*#+}} xmm1 = xmm1[0,1,0,2]
- vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
- vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7]
+ vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
And this happens several times in the diffs. For chips with domain-crossing penalties,
the instruction count and size reduction should usually overcome any potential
domain-crossing penalty due to using an FP op in a sequence of int ops. For chips such
as recent Intel big cores and Atom, there is no domain-crossing penalty for shufps, so
using shufps is a pure win.
So the test case diffs all appear to be improvements except one test in
vector-shuffle-combining.ll where we miss an opportunity to use a shift to generate
zero elements and one test in combine-sra.ll where multiple uses prevent the expected
shuffle combining.
Differential Revision: https://reviews.llvm.org/D27692
llvm-svn: 289837
2016-12-16 02:03:38 +08:00
|
|
|
; AVX1-NEXT: vshufps {{.*#+}} xmm1 = xmm1[0,2],xmm2[0,2]
|
2015-08-18 16:37:09 +08:00
|
|
|
; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
|
|
|
|
; AVX1-NEXT: vpshufb %xmm2, %xmm1, %xmm1
|
|
|
|
; AVX1-NEXT: vpshufb %xmm2, %xmm0, %xmm0
|
|
|
|
; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
|
|
|
|
; AVX1-NEXT: vzeroupper
|
|
|
|
; AVX1-NEXT: retq
|
|
|
|
;
|
2018-01-10 00:26:06 +08:00
|
|
|
; AVX2-SLOW-LABEL: trunc2x4i64_8i16:
|
|
|
|
; AVX2-SLOW: # %bb.0: # %entry
|
2019-02-19 00:46:12 +08:00
|
|
|
; AVX2-SLOW-NEXT: vextractf128 $1, %ymm0, %xmm2
|
|
|
|
; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm2[0,2]
|
|
|
|
; AVX2-SLOW-NEXT: vextractf128 $1, %ymm1, %xmm2
|
|
|
|
; AVX2-SLOW-NEXT: vshufps {{.*#+}} xmm1 = xmm1[0,2],xmm2[0,2]
|
2018-01-10 00:26:06 +08:00
|
|
|
; AVX2-SLOW-NEXT: vmovdqa {{.*#+}} xmm2 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
|
|
|
|
; AVX2-SLOW-NEXT: vpshufb %xmm2, %xmm1, %xmm1
|
|
|
|
; AVX2-SLOW-NEXT: vpshufb %xmm2, %xmm0, %xmm0
|
|
|
|
; AVX2-SLOW-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
|
|
|
|
; AVX2-SLOW-NEXT: vzeroupper
|
|
|
|
; AVX2-SLOW-NEXT: retq
|
|
|
|
;
|
|
|
|
; AVX2-FAST-LABEL: trunc2x4i64_8i16:
|
|
|
|
; AVX2-FAST: # %bb.0: # %entry
|
|
|
|
; AVX2-FAST-NEXT: vmovdqa {{.*#+}} ymm2 = [0,2,4,6,4,6,6,7]
|
|
|
|
; AVX2-FAST-NEXT: vpermd %ymm0, %ymm2, %ymm0
|
|
|
|
; AVX2-FAST-NEXT: vpermd %ymm1, %ymm2, %ymm1
|
|
|
|
; AVX2-FAST-NEXT: vmovdqa {{.*#+}} xmm2 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
|
|
|
|
; AVX2-FAST-NEXT: vpshufb %xmm2, %xmm1, %xmm1
|
|
|
|
; AVX2-FAST-NEXT: vpshufb %xmm2, %xmm0, %xmm0
|
|
|
|
; AVX2-FAST-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
|
|
|
|
; AVX2-FAST-NEXT: vzeroupper
|
|
|
|
; AVX2-FAST-NEXT: retq
|
2015-12-22 04:42:43 +08:00
|
|
|
;
|
2016-11-07 21:34:29 +08:00
|
|
|
; AVX512F-LABEL: trunc2x4i64_8i16:
|
2017-12-05 01:18:51 +08:00
|
|
|
; AVX512F: # %bb.0: # %entry
|
2018-02-01 06:04:26 +08:00
|
|
|
; AVX512F-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1
|
|
|
|
; AVX512F-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
|
2016-11-07 21:34:29 +08:00
|
|
|
; AVX512F-NEXT: vpmovqd %zmm0, %ymm0
|
|
|
|
; AVX512F-NEXT: vpmovqd %zmm1, %ymm1
|
|
|
|
; AVX512F-NEXT: vmovdqa {{.*#+}} xmm2 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
|
|
|
|
; AVX512F-NEXT: vpshufb %xmm2, %xmm1, %xmm1
|
|
|
|
; AVX512F-NEXT: vpshufb %xmm2, %xmm0, %xmm0
|
|
|
|
; AVX512F-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
|
2017-03-03 17:03:24 +08:00
|
|
|
; AVX512F-NEXT: vzeroupper
|
2016-11-07 21:34:29 +08:00
|
|
|
; AVX512F-NEXT: retq
|
|
|
|
;
|
|
|
|
; AVX512VL-LABEL: trunc2x4i64_8i16:
|
2017-12-05 01:18:51 +08:00
|
|
|
; AVX512VL: # %bb.0: # %entry
|
2016-11-07 21:34:29 +08:00
|
|
|
; AVX512VL-NEXT: vpmovqd %ymm0, %xmm0
|
|
|
|
; AVX512VL-NEXT: vpmovqd %ymm1, %xmm1
|
2017-12-20 00:54:07 +08:00
|
|
|
; AVX512VL-NEXT: vmovdqa {{.*#+}} xmm2 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
|
|
|
|
; AVX512VL-NEXT: vpshufb %xmm2, %xmm1, %xmm1
|
|
|
|
; AVX512VL-NEXT: vpshufb %xmm2, %xmm0, %xmm0
|
2016-11-07 21:34:29 +08:00
|
|
|
; AVX512VL-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
|
2017-03-03 17:03:24 +08:00
|
|
|
; AVX512VL-NEXT: vzeroupper
|
2016-11-07 21:34:29 +08:00
|
|
|
; AVX512VL-NEXT: retq
|
|
|
|
;
|
|
|
|
; AVX512BW-LABEL: trunc2x4i64_8i16:
|
2017-12-05 01:18:51 +08:00
|
|
|
; AVX512BW: # %bb.0: # %entry
|
2018-02-01 06:04:26 +08:00
|
|
|
; AVX512BW-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1
|
|
|
|
; AVX512BW-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
|
2016-11-07 21:34:29 +08:00
|
|
|
; AVX512BW-NEXT: vpmovqd %zmm0, %ymm0
|
|
|
|
; AVX512BW-NEXT: vpmovqd %zmm1, %ymm1
|
|
|
|
; AVX512BW-NEXT: vmovdqa {{.*#+}} xmm2 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
|
|
|
|
; AVX512BW-NEXT: vpshufb %xmm2, %xmm1, %xmm1
|
|
|
|
; AVX512BW-NEXT: vpshufb %xmm2, %xmm0, %xmm0
|
|
|
|
; AVX512BW-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
|
2017-03-03 17:03:24 +08:00
|
|
|
; AVX512BW-NEXT: vzeroupper
|
2016-11-07 21:34:29 +08:00
|
|
|
; AVX512BW-NEXT: retq
|
|
|
|
;
|
|
|
|
; AVX512BWVL-LABEL: trunc2x4i64_8i16:
|
2017-12-05 01:18:51 +08:00
|
|
|
; AVX512BWVL: # %bb.0: # %entry
|
2019-04-28 22:31:01 +08:00
|
|
|
; AVX512BWVL-NEXT: vpmovqd %ymm0, %xmm2
|
2016-11-07 21:34:29 +08:00
|
|
|
; AVX512BWVL-NEXT: vpmovqd %ymm1, %xmm1
|
2019-04-28 22:31:01 +08:00
|
|
|
; AVX512BWVL-NEXT: vmovdqa {{.*#+}} xmm0 = [0,2,4,6,8,10,12,14]
|
|
|
|
; AVX512BWVL-NEXT: vpermi2w %xmm1, %xmm2, %xmm0
|
2017-03-03 17:03:24 +08:00
|
|
|
; AVX512BWVL-NEXT: vzeroupper
|
2016-11-07 21:34:29 +08:00
|
|
|
; AVX512BWVL-NEXT: retq
|
2015-08-18 16:37:09 +08:00
|
|
|
entry:
|
|
|
|
%0 = trunc <4 x i64> %a to <4 x i16>
|
|
|
|
%1 = trunc <4 x i64> %b to <4 x i16>
|
|
|
|
%2 = shufflevector <4 x i16> %0, <4 x i16> %1, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
|
|
|
|
ret <8 x i16> %2
|
|
|
|
}
|
2015-02-12 02:14:35 +08:00
|
|
|
|
2015-08-18 16:37:09 +08:00
|
|
|
define <4 x i32> @trunc2x2i64_4i32(<2 x i64> %a, <2 x i64> %b) {
|
[x86] use a single shufps when it can save instructions
This is a tiny patch with a big pile of test changes.
This partially fixes PR27885:
https://llvm.org/bugs/show_bug.cgi?id=27885
My motivating case looks like this:
- vpshufd {{.*#+}} xmm1 = xmm1[0,1,0,2]
- vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
- vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7]
+ vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
And this happens several times in the diffs. For chips with domain-crossing penalties,
the instruction count and size reduction should usually overcome any potential
domain-crossing penalty due to using an FP op in a sequence of int ops. For chips such
as recent Intel big cores and Atom, there is no domain-crossing penalty for shufps, so
using shufps is a pure win.
So the test case diffs all appear to be improvements except one test in
vector-shuffle-combining.ll where we miss an opportunity to use a shift to generate
zero elements and one test in combine-sra.ll where multiple uses prevent the expected
shuffle combining.
Differential Revision: https://reviews.llvm.org/D27692
llvm-svn: 289837
2016-12-16 02:03:38 +08:00
|
|
|
; SSE-LABEL: trunc2x2i64_4i32:
|
2017-12-05 01:18:51 +08:00
|
|
|
; SSE: # %bb.0: # %entry
|
[x86] use a single shufps when it can save instructions
This is a tiny patch with a big pile of test changes.
This partially fixes PR27885:
https://llvm.org/bugs/show_bug.cgi?id=27885
My motivating case looks like this:
- vpshufd {{.*#+}} xmm1 = xmm1[0,1,0,2]
- vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
- vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7]
+ vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
And this happens several times in the diffs. For chips with domain-crossing penalties,
the instruction count and size reduction should usually overcome any potential
domain-crossing penalty due to using an FP op in a sequence of int ops. For chips such
as recent Intel big cores and Atom, there is no domain-crossing penalty for shufps, so
using shufps is a pure win.
So the test case diffs all appear to be improvements except one test in
vector-shuffle-combining.ll where we miss an opportunity to use a shift to generate
zero elements and one test in combine-sra.ll where multiple uses prevent the expected
shuffle combining.
Differential Revision: https://reviews.llvm.org/D27692
llvm-svn: 289837
2016-12-16 02:03:38 +08:00
|
|
|
; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
|
|
|
|
; SSE-NEXT: retq
|
2015-08-18 16:37:09 +08:00
|
|
|
;
|
[x86] use a single shufps when it can save instructions
This is a tiny patch with a big pile of test changes.
This partially fixes PR27885:
https://llvm.org/bugs/show_bug.cgi?id=27885
My motivating case looks like this:
- vpshufd {{.*#+}} xmm1 = xmm1[0,1,0,2]
- vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
- vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7]
+ vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
And this happens several times in the diffs. For chips with domain-crossing penalties,
the instruction count and size reduction should usually overcome any potential
domain-crossing penalty due to using an FP op in a sequence of int ops. For chips such
as recent Intel big cores and Atom, there is no domain-crossing penalty for shufps, so
using shufps is a pure win.
So the test case diffs all appear to be improvements except one test in
vector-shuffle-combining.ll where we miss an opportunity to use a shift to generate
zero elements and one test in combine-sra.ll where multiple uses prevent the expected
shuffle combining.
Differential Revision: https://reviews.llvm.org/D27692
llvm-svn: 289837
2016-12-16 02:03:38 +08:00
|
|
|
; AVX-LABEL: trunc2x2i64_4i32:
|
2017-12-05 01:18:51 +08:00
|
|
|
; AVX: # %bb.0: # %entry
|
[x86] use a single shufps when it can save instructions
This is a tiny patch with a big pile of test changes.
This partially fixes PR27885:
https://llvm.org/bugs/show_bug.cgi?id=27885
My motivating case looks like this:
- vpshufd {{.*#+}} xmm1 = xmm1[0,1,0,2]
- vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
- vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7]
+ vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
And this happens several times in the diffs. For chips with domain-crossing penalties,
the instruction count and size reduction should usually overcome any potential
domain-crossing penalty due to using an FP op in a sequence of int ops. For chips such
as recent Intel big cores and Atom, there is no domain-crossing penalty for shufps, so
using shufps is a pure win.
So the test case diffs all appear to be improvements except one test in
vector-shuffle-combining.ll where we miss an opportunity to use a shift to generate
zero elements and one test in combine-sra.ll where multiple uses prevent the expected
shuffle combining.
Differential Revision: https://reviews.llvm.org/D27692
llvm-svn: 289837
2016-12-16 02:03:38 +08:00
|
|
|
; AVX-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
|
|
|
|
; AVX-NEXT: retq
|
2015-12-22 04:42:43 +08:00
|
|
|
;
|
2016-09-14 01:34:56 +08:00
|
|
|
; AVX512-LABEL: trunc2x2i64_4i32:
|
2017-12-05 01:18:51 +08:00
|
|
|
; AVX512: # %bb.0: # %entry
|
[x86] use a single shufps when it can save instructions
This is a tiny patch with a big pile of test changes.
This partially fixes PR27885:
https://llvm.org/bugs/show_bug.cgi?id=27885
My motivating case looks like this:
- vpshufd {{.*#+}} xmm1 = xmm1[0,1,0,2]
- vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
- vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7]
+ vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
And this happens several times in the diffs. For chips with domain-crossing penalties,
the instruction count and size reduction should usually overcome any potential
domain-crossing penalty due to using an FP op in a sequence of int ops. For chips such
as recent Intel big cores and Atom, there is no domain-crossing penalty for shufps, so
using shufps is a pure win.
So the test case diffs all appear to be improvements except one test in
vector-shuffle-combining.ll where we miss an opportunity to use a shift to generate
zero elements and one test in combine-sra.ll where multiple uses prevent the expected
shuffle combining.
Differential Revision: https://reviews.llvm.org/D27692
llvm-svn: 289837
2016-12-16 02:03:38 +08:00
|
|
|
; AVX512-NEXT: vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
|
2016-09-14 01:34:56 +08:00
|
|
|
; AVX512-NEXT: retq
|
2015-02-12 02:14:35 +08:00
|
|
|
entry:
|
|
|
|
%0 = trunc <2 x i64> %a to <2 x i32>
|
|
|
|
%1 = trunc <2 x i64> %b to <2 x i32>
|
|
|
|
%2 = shufflevector <2 x i32> %0, <2 x i32> %1, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
|
|
|
|
ret <4 x i32> %2
|
|
|
|
}
|
|
|
|
|
2015-08-18 16:37:09 +08:00
|
|
|
define i64 @trunc2i64_i64(<2 x i64> %inval) {
|
|
|
|
; SSE-LABEL: trunc2i64_i64:
|
2017-12-05 01:18:51 +08:00
|
|
|
; SSE: # %bb.0: # %entry
|
2015-02-16 09:39:50 +08:00
|
|
|
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
|
2017-04-26 15:08:44 +08:00
|
|
|
; SSE-NEXT: movq %xmm0, %rax
|
2015-02-16 09:39:50 +08:00
|
|
|
; SSE-NEXT: retq
|
|
|
|
;
|
2015-08-18 16:37:09 +08:00
|
|
|
; AVX-LABEL: trunc2i64_i64:
|
2017-12-05 01:18:51 +08:00
|
|
|
; AVX: # %bb.0: # %entry
|
2015-02-16 09:39:50 +08:00
|
|
|
; AVX-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
|
|
|
|
; AVX-NEXT: vmovq %xmm0, %rax
|
|
|
|
; AVX-NEXT: retq
|
2015-12-22 04:42:43 +08:00
|
|
|
;
|
2018-12-02 13:46:48 +08:00
|
|
|
; AVX512-LABEL: trunc2i64_i64:
|
|
|
|
; AVX512: # %bb.0: # %entry
|
|
|
|
; AVX512-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
|
|
|
|
; AVX512-NEXT: vmovq %xmm0, %rax
|
|
|
|
; AVX512-NEXT: retq
|
2015-01-05 01:52:00 +08:00
|
|
|
entry:
|
|
|
|
%0 = trunc <2 x i64> %inval to <2 x i32>
|
|
|
|
%1 = bitcast <2 x i32> %0 to i64
|
|
|
|
ret i64 %1
|
|
|
|
}
|
|
|
|
|
2015-08-18 16:37:09 +08:00
|
|
|
define <8 x i16> @trunc2x4i32_8i16(<4 x i32> %a, <4 x i32> %b) {
|
|
|
|
; SSE2-LABEL: trunc2x4i32_8i16:
|
2017-12-05 01:18:51 +08:00
|
|
|
; SSE2: # %bb.0: # %entry
|
2015-02-16 09:39:50 +08:00
|
|
|
; SSE2-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,2,2,3,4,5,6,7]
|
|
|
|
; SSE2-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,6,6,7]
|
|
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
|
|
|
|
; SSE2-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,2,2,3,4,5,6,7]
|
|
|
|
; SSE2-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,6,6,7]
|
|
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
|
|
|
|
; SSE2-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
|
|
|
|
; SSE2-NEXT: retq
|
|
|
|
;
|
2015-08-18 16:37:09 +08:00
|
|
|
; SSSE3-LABEL: trunc2x4i32_8i16:
|
2017-12-05 01:18:51 +08:00
|
|
|
; SSSE3: # %bb.0: # %entry
|
2015-02-16 09:39:50 +08:00
|
|
|
; SSSE3-NEXT: movdqa {{.*#+}} xmm2 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
|
|
|
|
; SSSE3-NEXT: pshufb %xmm2, %xmm1
|
|
|
|
; SSSE3-NEXT: pshufb %xmm2, %xmm0
|
|
|
|
; SSSE3-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
|
|
|
|
; SSSE3-NEXT: retq
|
|
|
|
;
|
2015-08-18 16:37:09 +08:00
|
|
|
; SSE41-LABEL: trunc2x4i32_8i16:
|
2017-12-05 01:18:51 +08:00
|
|
|
; SSE41: # %bb.0: # %entry
|
2015-02-19 23:21:57 +08:00
|
|
|
; SSE41-NEXT: movdqa {{.*#+}} xmm2 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
|
|
|
|
; SSE41-NEXT: pshufb %xmm2, %xmm1
|
|
|
|
; SSE41-NEXT: pshufb %xmm2, %xmm0
|
|
|
|
; SSE41-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
|
2015-02-16 09:39:50 +08:00
|
|
|
; SSE41-NEXT: retq
|
|
|
|
;
|
2015-08-18 16:37:09 +08:00
|
|
|
; AVX-LABEL: trunc2x4i32_8i16:
|
2017-12-05 01:18:51 +08:00
|
|
|
; AVX: # %bb.0: # %entry
|
2015-02-19 23:21:57 +08:00
|
|
|
; AVX-NEXT: vmovdqa {{.*#+}} xmm2 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
|
|
|
|
; AVX-NEXT: vpshufb %xmm2, %xmm1, %xmm1
|
|
|
|
; AVX-NEXT: vpshufb %xmm2, %xmm0, %xmm0
|
|
|
|
; AVX-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
|
2015-02-16 09:39:50 +08:00
|
|
|
; AVX-NEXT: retq
|
2015-12-22 04:42:43 +08:00
|
|
|
;
|
2019-04-28 22:31:01 +08:00
|
|
|
; AVX512F-LABEL: trunc2x4i32_8i16:
|
|
|
|
; AVX512F: # %bb.0: # %entry
|
|
|
|
; AVX512F-NEXT: vmovdqa {{.*#+}} xmm2 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
|
|
|
|
; AVX512F-NEXT: vpshufb %xmm2, %xmm1, %xmm1
|
|
|
|
; AVX512F-NEXT: vpshufb %xmm2, %xmm0, %xmm0
|
|
|
|
; AVX512F-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
|
|
|
|
; AVX512F-NEXT: retq
|
|
|
|
;
|
|
|
|
; AVX512VL-LABEL: trunc2x4i32_8i16:
|
|
|
|
; AVX512VL: # %bb.0: # %entry
|
|
|
|
; AVX512VL-NEXT: vmovdqa {{.*#+}} xmm2 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
|
|
|
|
; AVX512VL-NEXT: vpshufb %xmm2, %xmm1, %xmm1
|
|
|
|
; AVX512VL-NEXT: vpshufb %xmm2, %xmm0, %xmm0
|
|
|
|
; AVX512VL-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
|
|
|
|
; AVX512VL-NEXT: retq
|
|
|
|
;
|
|
|
|
; AVX512BW-LABEL: trunc2x4i32_8i16:
|
|
|
|
; AVX512BW: # %bb.0: # %entry
|
|
|
|
; AVX512BW-NEXT: vmovdqa {{.*#+}} xmm2 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
|
|
|
|
; AVX512BW-NEXT: vpshufb %xmm2, %xmm1, %xmm1
|
|
|
|
; AVX512BW-NEXT: vpshufb %xmm2, %xmm0, %xmm0
|
|
|
|
; AVX512BW-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
|
|
|
|
; AVX512BW-NEXT: retq
|
|
|
|
;
|
|
|
|
; AVX512BWVL-LABEL: trunc2x4i32_8i16:
|
|
|
|
; AVX512BWVL: # %bb.0: # %entry
|
|
|
|
; AVX512BWVL-NEXT: vmovdqa {{.*#+}} xmm2 = [0,2,4,6,8,10,12,14]
|
|
|
|
; AVX512BWVL-NEXT: vpermt2w %xmm1, %xmm2, %xmm0
|
|
|
|
; AVX512BWVL-NEXT: retq
|
2015-02-12 02:14:35 +08:00
|
|
|
entry:
|
|
|
|
%0 = trunc <4 x i32> %a to <4 x i16>
|
|
|
|
%1 = trunc <4 x i32> %b to <4 x i16>
|
|
|
|
%2 = shufflevector <4 x i16> %0, <4 x i16> %1, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
|
|
|
|
ret <8 x i16> %2
|
|
|
|
}
|
|
|
|
|
2015-01-05 01:52:00 +08:00
|
|
|
; PR15524 http://llvm.org/bugs/show_bug.cgi?id=15524
|
2015-08-18 16:37:09 +08:00
|
|
|
define i64 @trunc4i32_i64(<4 x i32> %inval) {
|
|
|
|
; SSE2-LABEL: trunc4i32_i64:
|
2017-12-05 01:18:51 +08:00
|
|
|
; SSE2: # %bb.0: # %entry
|
2015-02-16 09:39:50 +08:00
|
|
|
; SSE2-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,2,2,3,4,5,6,7]
|
|
|
|
; SSE2-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,6,6,7]
|
|
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
|
2017-04-26 15:08:44 +08:00
|
|
|
; SSE2-NEXT: movq %xmm0, %rax
|
2015-02-16 09:39:50 +08:00
|
|
|
; SSE2-NEXT: retq
|
|
|
|
;
|
2015-08-18 16:37:09 +08:00
|
|
|
; SSSE3-LABEL: trunc4i32_i64:
|
2017-12-05 01:18:51 +08:00
|
|
|
; SSSE3: # %bb.0: # %entry
|
2015-02-16 09:39:50 +08:00
|
|
|
; SSSE3-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
|
2017-04-26 15:08:44 +08:00
|
|
|
; SSSE3-NEXT: movq %xmm0, %rax
|
2015-02-16 09:39:50 +08:00
|
|
|
; SSSE3-NEXT: retq
|
|
|
|
;
|
2015-08-18 16:37:09 +08:00
|
|
|
; SSE41-LABEL: trunc4i32_i64:
|
2017-12-05 01:18:51 +08:00
|
|
|
; SSE41: # %bb.0: # %entry
|
2015-02-16 09:39:50 +08:00
|
|
|
; SSE41-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
|
2017-04-26 15:08:44 +08:00
|
|
|
; SSE41-NEXT: movq %xmm0, %rax
|
2015-02-16 09:39:50 +08:00
|
|
|
; SSE41-NEXT: retq
|
|
|
|
;
|
2015-08-18 16:37:09 +08:00
|
|
|
; AVX-LABEL: trunc4i32_i64:
|
2017-12-05 01:18:51 +08:00
|
|
|
; AVX: # %bb.0: # %entry
|
2015-02-16 09:39:50 +08:00
|
|
|
; AVX-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
|
|
|
|
; AVX-NEXT: vmovq %xmm0, %rax
|
|
|
|
; AVX-NEXT: retq
|
2015-12-22 04:42:43 +08:00
|
|
|
;
|
2018-12-02 13:46:48 +08:00
|
|
|
; AVX512-LABEL: trunc4i32_i64:
|
|
|
|
; AVX512: # %bb.0: # %entry
|
|
|
|
; AVX512-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
|
|
|
|
; AVX512-NEXT: vmovq %xmm0, %rax
|
|
|
|
; AVX512-NEXT: retq
|
2015-01-05 01:52:00 +08:00
|
|
|
entry:
|
|
|
|
%0 = trunc <4 x i32> %inval to <4 x i16>
|
|
|
|
%1 = bitcast <4 x i16> %0 to i64
|
|
|
|
ret i64 %1
|
|
|
|
}
|
|
|
|
|
2015-08-18 16:37:09 +08:00
|
|
|
define <16 x i8> @trunc2x8i16_16i8(<8 x i16> %a, <8 x i16> %b) {
|
|
|
|
; SSE2-LABEL: trunc2x8i16_16i8:
|
2017-12-05 01:18:51 +08:00
|
|
|
; SSE2: # %bb.0: # %entry
|
2015-02-16 09:39:50 +08:00
|
|
|
; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [255,255,255,255,255,255,255,255]
|
|
|
|
; SSE2-NEXT: pand %xmm2, %xmm1
|
|
|
|
; SSE2-NEXT: pand %xmm2, %xmm0
|
|
|
|
; SSE2-NEXT: packuswb %xmm1, %xmm0
|
|
|
|
; SSE2-NEXT: retq
|
|
|
|
;
|
2015-08-18 16:37:09 +08:00
|
|
|
; SSSE3-LABEL: trunc2x8i16_16i8:
|
2017-12-05 01:18:51 +08:00
|
|
|
; SSSE3: # %bb.0: # %entry
|
2015-02-19 20:10:37 +08:00
|
|
|
; SSSE3-NEXT: movdqa {{.*#+}} xmm2 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
|
|
|
|
; SSSE3-NEXT: pshufb %xmm2, %xmm1
|
|
|
|
; SSSE3-NEXT: pshufb %xmm2, %xmm0
|
|
|
|
; SSSE3-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
|
2015-02-16 09:39:50 +08:00
|
|
|
; SSSE3-NEXT: retq
|
|
|
|
;
|
2015-08-18 16:37:09 +08:00
|
|
|
; SSE41-LABEL: trunc2x8i16_16i8:
|
2017-12-05 01:18:51 +08:00
|
|
|
; SSE41: # %bb.0: # %entry
|
2015-02-19 20:10:37 +08:00
|
|
|
; SSE41-NEXT: movdqa {{.*#+}} xmm2 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
|
|
|
|
; SSE41-NEXT: pshufb %xmm2, %xmm1
|
|
|
|
; SSE41-NEXT: pshufb %xmm2, %xmm0
|
|
|
|
; SSE41-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
|
2015-02-16 09:39:50 +08:00
|
|
|
; SSE41-NEXT: retq
|
|
|
|
;
|
2015-08-18 16:37:09 +08:00
|
|
|
; AVX-LABEL: trunc2x8i16_16i8:
|
2017-12-05 01:18:51 +08:00
|
|
|
; AVX: # %bb.0: # %entry
|
2015-02-19 20:10:37 +08:00
|
|
|
; AVX-NEXT: vmovdqa {{.*#+}} xmm2 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
|
|
|
|
; AVX-NEXT: vpshufb %xmm2, %xmm1, %xmm1
|
|
|
|
; AVX-NEXT: vpshufb %xmm2, %xmm0, %xmm0
|
|
|
|
; AVX-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
|
2015-02-16 09:39:50 +08:00
|
|
|
; AVX-NEXT: retq
|
2015-12-22 04:42:43 +08:00
|
|
|
;
|
2017-08-01 01:35:44 +08:00
|
|
|
; AVX512-LABEL: trunc2x8i16_16i8:
|
2017-12-05 01:18:51 +08:00
|
|
|
; AVX512: # %bb.0: # %entry
|
2017-08-01 01:35:44 +08:00
|
|
|
; AVX512-NEXT: vmovdqa {{.*#+}} xmm2 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
|
|
|
|
; AVX512-NEXT: vpshufb %xmm2, %xmm1, %xmm1
|
|
|
|
; AVX512-NEXT: vpshufb %xmm2, %xmm0, %xmm0
|
|
|
|
; AVX512-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
|
|
|
|
; AVX512-NEXT: retq
|
2015-02-12 02:14:35 +08:00
|
|
|
entry:
|
|
|
|
%0 = trunc <8 x i16> %a to <8 x i8>
|
|
|
|
%1 = trunc <8 x i16> %b to <8 x i8>
|
|
|
|
%2 = shufflevector <8 x i8> %0, <8 x i8> %1, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
|
|
|
|
ret <16 x i8> %2
|
|
|
|
}
|
|
|
|
|
2015-01-05 01:52:00 +08:00
|
|
|
; PR15524 http://llvm.org/bugs/show_bug.cgi?id=15524
|
2015-08-18 16:37:09 +08:00
|
|
|
define i64 @trunc8i16_i64(<8 x i16> %inval) {
|
|
|
|
; SSE2-LABEL: trunc8i16_i64:
|
2017-12-05 01:18:51 +08:00
|
|
|
; SSE2: # %bb.0: # %entry
|
2015-02-16 09:39:50 +08:00
|
|
|
; SSE2-NEXT: pand {{.*}}(%rip), %xmm0
|
|
|
|
; SSE2-NEXT: packuswb %xmm0, %xmm0
|
2017-04-26 15:08:44 +08:00
|
|
|
; SSE2-NEXT: movq %xmm0, %rax
|
2015-02-16 09:39:50 +08:00
|
|
|
; SSE2-NEXT: retq
|
|
|
|
;
|
2015-08-18 16:37:09 +08:00
|
|
|
; SSSE3-LABEL: trunc8i16_i64:
|
2017-12-05 01:18:51 +08:00
|
|
|
; SSSE3: # %bb.0: # %entry
|
2015-02-16 09:39:50 +08:00
|
|
|
; SSSE3-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u]
|
2017-04-26 15:08:44 +08:00
|
|
|
; SSSE3-NEXT: movq %xmm0, %rax
|
2015-02-16 09:39:50 +08:00
|
|
|
; SSSE3-NEXT: retq
|
|
|
|
;
|
2015-08-18 16:37:09 +08:00
|
|
|
; SSE41-LABEL: trunc8i16_i64:
|
2017-12-05 01:18:51 +08:00
|
|
|
; SSE41: # %bb.0: # %entry
|
2015-02-16 09:39:50 +08:00
|
|
|
; SSE41-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u]
|
2017-04-26 15:08:44 +08:00
|
|
|
; SSE41-NEXT: movq %xmm0, %rax
|
2015-02-16 09:39:50 +08:00
|
|
|
; SSE41-NEXT: retq
|
|
|
|
;
|
2015-08-18 16:37:09 +08:00
|
|
|
; AVX-LABEL: trunc8i16_i64:
|
2017-12-05 01:18:51 +08:00
|
|
|
; AVX: # %bb.0: # %entry
|
2015-02-16 09:39:50 +08:00
|
|
|
; AVX-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u]
|
|
|
|
; AVX-NEXT: vmovq %xmm0, %rax
|
|
|
|
; AVX-NEXT: retq
|
2015-12-22 04:42:43 +08:00
|
|
|
;
|
2018-12-02 13:46:48 +08:00
|
|
|
; AVX512-LABEL: trunc8i16_i64:
|
|
|
|
; AVX512: # %bb.0: # %entry
|
|
|
|
; AVX512-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u]
|
|
|
|
; AVX512-NEXT: vmovq %xmm0, %rax
|
|
|
|
; AVX512-NEXT: retq
|
2015-01-05 01:52:00 +08:00
|
|
|
entry:
|
|
|
|
%0 = trunc <8 x i16> %inval to <8 x i8>
|
|
|
|
%1 = bitcast <8 x i8> %0 to i64
|
|
|
|
ret i64 %1
|
|
|
|
}
|
2015-03-26 06:30:31 +08:00
|
|
|
|
2015-08-18 16:37:09 +08:00
|
|
|
define <16 x i8> @trunc16i64_16i8_const() {
|
|
|
|
; SSE-LABEL: trunc16i64_16i8_const:
|
2017-12-05 01:18:51 +08:00
|
|
|
; SSE: # %bb.0: # %entry
|
2015-07-13 01:40:49 +08:00
|
|
|
; SSE-NEXT: xorps %xmm0, %xmm0
|
|
|
|
; SSE-NEXT: retq
|
|
|
|
;
|
2015-08-18 16:37:09 +08:00
|
|
|
; AVX-LABEL: trunc16i64_16i8_const:
|
2017-12-05 01:18:51 +08:00
|
|
|
; AVX: # %bb.0: # %entry
|
2015-07-13 01:40:49 +08:00
|
|
|
; AVX-NEXT: vxorps %xmm0, %xmm0, %xmm0
|
|
|
|
; AVX-NEXT: retq
|
2015-12-22 04:42:43 +08:00
|
|
|
;
|
2018-07-16 07:32:36 +08:00
|
|
|
; AVX512-LABEL: trunc16i64_16i8_const:
|
|
|
|
; AVX512: # %bb.0: # %entry
|
|
|
|
; AVX512-NEXT: vxorps %xmm0, %xmm0, %xmm0
|
|
|
|
; AVX512-NEXT: retq
|
2015-03-26 06:30:31 +08:00
|
|
|
|
|
|
|
entry:
|
|
|
|
%0 = trunc <16 x i64> zeroinitializer to <16 x i8>
|
|
|
|
%1 = shufflevector <16 x i8> %0, <16 x i8> %0, <16 x i32> <i32 28, i32 30, i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 undef, i32 14, i32 16, i32 18, i32 20, i32 22, i32 24, i32 26>
|
|
|
|
ret <16 x i8> %1
|
|
|
|
}
|
2016-09-14 16:04:28 +08:00
|
|
|
|
2018-01-01 21:04:04 +08:00
|
|
|
define <8 x i16> @PR32160(<8 x i32> %x) {
|
2018-02-25 03:28:34 +08:00
|
|
|
; SSE-LABEL: PR32160:
|
|
|
|
; SSE: # %bb.0:
|
|
|
|
; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,4,6,7]
|
|
|
|
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,2,2,2]
|
|
|
|
; SSE-NEXT: retq
|
2018-01-01 21:04:04 +08:00
|
|
|
;
|
|
|
|
; AVX1-LABEL: PR32160:
|
|
|
|
; AVX1: # %bb.0:
|
|
|
|
; AVX1-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[8,9,8,9,8,9,8,9,8,9,8,9,8,9,8,9]
|
|
|
|
; AVX1-NEXT: vzeroupper
|
|
|
|
; AVX1-NEXT: retq
|
|
|
|
;
|
2018-01-10 00:26:06 +08:00
|
|
|
; AVX2-SLOW-LABEL: PR32160:
|
|
|
|
; AVX2-SLOW: # %bb.0:
|
2018-10-12 20:10:34 +08:00
|
|
|
; AVX2-SLOW-NEXT: vpunpckhwd {{.*#+}} xmm0 = xmm0[4,4,5,5,6,6,7,7]
|
2018-01-10 00:26:06 +08:00
|
|
|
; AVX2-SLOW-NEXT: vpbroadcastd %xmm0, %xmm0
|
|
|
|
; AVX2-SLOW-NEXT: vzeroupper
|
|
|
|
; AVX2-SLOW-NEXT: retq
|
|
|
|
;
|
|
|
|
; AVX2-FAST-LABEL: PR32160:
|
|
|
|
; AVX2-FAST: # %bb.0:
|
2018-10-12 20:10:34 +08:00
|
|
|
; AVX2-FAST-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[8,9,8,9,8,9,8,9,8,9,8,9,8,9,8,9]
|
2018-01-10 00:26:06 +08:00
|
|
|
; AVX2-FAST-NEXT: vzeroupper
|
|
|
|
; AVX2-FAST-NEXT: retq
|
2018-01-01 21:04:04 +08:00
|
|
|
;
|
|
|
|
; AVX512F-LABEL: PR32160:
|
|
|
|
; AVX512F: # %bb.0:
|
2018-02-01 06:04:26 +08:00
|
|
|
; AVX512F-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
|
2018-01-01 21:04:04 +08:00
|
|
|
; AVX512F-NEXT: vpmovdw %zmm0, %ymm0
|
|
|
|
; AVX512F-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[2,2,2,3,4,5,6,7]
|
|
|
|
; AVX512F-NEXT: vpbroadcastd %xmm0, %xmm0
|
|
|
|
; AVX512F-NEXT: vzeroupper
|
|
|
|
; AVX512F-NEXT: retq
|
|
|
|
;
|
|
|
|
; AVX512VL-LABEL: PR32160:
|
|
|
|
; AVX512VL: # %bb.0:
|
|
|
|
; AVX512VL-NEXT: vpmovdw %ymm0, %xmm0
|
2018-01-10 00:26:06 +08:00
|
|
|
; AVX512VL-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[4,5,4,5,4,5,4,5,4,5,4,5,4,5,4,5]
|
2018-01-01 21:04:04 +08:00
|
|
|
; AVX512VL-NEXT: vzeroupper
|
|
|
|
; AVX512VL-NEXT: retq
|
|
|
|
;
|
|
|
|
; AVX512BW-LABEL: PR32160:
|
|
|
|
; AVX512BW: # %bb.0:
|
2018-02-01 06:04:26 +08:00
|
|
|
; AVX512BW-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0
|
2018-01-01 21:04:04 +08:00
|
|
|
; AVX512BW-NEXT: vpmovdw %zmm0, %ymm0
|
2018-01-10 00:26:06 +08:00
|
|
|
; AVX512BW-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[4,5,4,5,4,5,4,5,4,5,4,5,4,5,4,5]
|
2018-01-01 21:04:04 +08:00
|
|
|
; AVX512BW-NEXT: vzeroupper
|
|
|
|
; AVX512BW-NEXT: retq
|
|
|
|
;
|
|
|
|
; AVX512BWVL-LABEL: PR32160:
|
|
|
|
; AVX512BWVL: # %bb.0:
|
|
|
|
; AVX512BWVL-NEXT: vpmovdw %ymm0, %xmm0
|
2018-01-10 00:26:06 +08:00
|
|
|
; AVX512BWVL-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[4,5,4,5,4,5,4,5,4,5,4,5,4,5,4,5]
|
2018-01-01 21:04:04 +08:00
|
|
|
; AVX512BWVL-NEXT: vzeroupper
|
|
|
|
; AVX512BWVL-NEXT: retq
|
|
|
|
%shuf = trunc <8 x i32> %x to <8 x i16>
|
|
|
|
%trunc = shufflevector <8 x i16> %shuf, <8 x i16> undef, <8 x i32> <i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2>
|
|
|
|
ret <8 x i16> %trunc
|
|
|
|
}
|
|
|
|
|
2017-10-18 05:14:53 +08:00
|
|
|
define void @PR34773(i16* %a0, i8* %a1) {
|
|
|
|
; SSE-LABEL: PR34773:
|
2017-12-05 01:18:51 +08:00
|
|
|
; SSE: # %bb.0:
|
2017-10-18 05:14:53 +08:00
|
|
|
; SSE-NEXT: movdqu (%rdi), %xmm0
|
|
|
|
; SSE-NEXT: movdqu 16(%rdi), %xmm1
|
|
|
|
; SSE-NEXT: movdqu 32(%rdi), %xmm2
|
|
|
|
; SSE-NEXT: movdqu 48(%rdi), %xmm3
|
|
|
|
; SSE-NEXT: psrlw $8, %xmm1
|
|
|
|
; SSE-NEXT: psrlw $8, %xmm0
|
|
|
|
; SSE-NEXT: packuswb %xmm1, %xmm0
|
|
|
|
; SSE-NEXT: psrlw $8, %xmm3
|
|
|
|
; SSE-NEXT: psrlw $8, %xmm2
|
|
|
|
; SSE-NEXT: packuswb %xmm3, %xmm2
|
|
|
|
; SSE-NEXT: movdqu %xmm0, (%rsi)
|
|
|
|
; SSE-NEXT: movdqu %xmm2, 16(%rsi)
|
|
|
|
; SSE-NEXT: retq
|
|
|
|
;
|
|
|
|
; AVX1-LABEL: PR34773:
|
2017-12-05 01:18:51 +08:00
|
|
|
; AVX1: # %bb.0:
|
[x86] allow vector load narrowing with multi-use values
This is a long-awaited follow-up suggested in D33578. Since then, we've picked up even more
opportunities for vector narrowing from changes like D53784, so there are a lot of test diffs.
Apart from 2-3 strange cases, these are all wins.
I've structured this to be no-functional-change-intended for any target except for x86
because I couldn't tell if AArch64, ARM, and AMDGPU would improve or not. All of those
targets have existing regression tests (4, 4, 10 files respectively) that would be
affected. Also, Hexagon overrides the shouldReduceLoadWidth() hook, but doesn't show
any regression test diffs. The trade-off is deciding if an extra vector load is better
than a single wide load + extract_subvector.
For x86, this is almost always better (on paper at least) because we often can fold
loads into subsequent ops and not increase the official instruction count. There's also
some unknown -- but potentially large -- benefit from using narrower vector ops if wide
ops are implemented with multiple uops and/or frequency throttling is avoided.
Differential Revision: https://reviews.llvm.org/D54073
llvm-svn: 346595
2018-11-11 04:05:31 +08:00
|
|
|
; AVX1-NEXT: vmovdqu (%rdi), %xmm0
|
|
|
|
; AVX1-NEXT: vmovdqu 16(%rdi), %xmm1
|
|
|
|
; AVX1-NEXT: vmovdqu 32(%rdi), %xmm2
|
|
|
|
; AVX1-NEXT: vmovdqu 48(%rdi), %xmm3
|
|
|
|
; AVX1-NEXT: vpsrlw $8, %xmm1, %xmm1
|
2017-10-29 06:10:40 +08:00
|
|
|
; AVX1-NEXT: vpsrlw $8, %xmm0, %xmm0
|
[x86] allow vector load narrowing with multi-use values
This is a long-awaited follow-up suggested in D33578. Since then, we've picked up even more
opportunities for vector narrowing from changes like D53784, so there are a lot of test diffs.
Apart from 2-3 strange cases, these are all wins.
I've structured this to be no-functional-change-intended for any target except for x86
because I couldn't tell if AArch64, ARM, and AMDGPU would improve or not. All of those
targets have existing regression tests (4, 4, 10 files respectively) that would be
affected. Also, Hexagon overrides the shouldReduceLoadWidth() hook, but doesn't show
any regression test diffs. The trade-off is deciding if an extra vector load is better
than a single wide load + extract_subvector.
For x86, this is almost always better (on paper at least) because we often can fold
loads into subsequent ops and not increase the official instruction count. There's also
some unknown -- but potentially large -- benefit from using narrower vector ops if wide
ops are implemented with multiple uops and/or frequency throttling is avoided.
Differential Revision: https://reviews.llvm.org/D54073
llvm-svn: 346595
2018-11-11 04:05:31 +08:00
|
|
|
; AVX1-NEXT: vpackuswb %xmm1, %xmm0, %xmm0
|
|
|
|
; AVX1-NEXT: vpsrlw $8, %xmm3, %xmm1
|
2017-10-29 06:10:40 +08:00
|
|
|
; AVX1-NEXT: vpsrlw $8, %xmm2, %xmm2
|
[x86] allow vector load narrowing with multi-use values
This is a long-awaited follow-up suggested in D33578. Since then, we've picked up even more
opportunities for vector narrowing from changes like D53784, so there are a lot of test diffs.
Apart from 2-3 strange cases, these are all wins.
I've structured this to be no-functional-change-intended for any target except for x86
because I couldn't tell if AArch64, ARM, and AMDGPU would improve or not. All of those
targets have existing regression tests (4, 4, 10 files respectively) that would be
affected. Also, Hexagon overrides the shouldReduceLoadWidth() hook, but doesn't show
any regression test diffs. The trade-off is deciding if an extra vector load is better
than a single wide load + extract_subvector.
For x86, this is almost always better (on paper at least) because we often can fold
loads into subsequent ops and not increase the official instruction count. There's also
some unknown -- but potentially large -- benefit from using narrower vector ops if wide
ops are implemented with multiple uops and/or frequency throttling is avoided.
Differential Revision: https://reviews.llvm.org/D54073
llvm-svn: 346595
2018-11-11 04:05:31 +08:00
|
|
|
; AVX1-NEXT: vpackuswb %xmm1, %xmm2, %xmm1
|
2017-11-02 05:52:29 +08:00
|
|
|
; AVX1-NEXT: vmovdqu %xmm0, (%rsi)
|
|
|
|
; AVX1-NEXT: vmovdqu %xmm1, 16(%rsi)
|
2017-10-18 05:14:53 +08:00
|
|
|
; AVX1-NEXT: retq
|
|
|
|
;
|
|
|
|
; AVX2-LABEL: PR34773:
|
2017-12-05 01:18:51 +08:00
|
|
|
; AVX2: # %bb.0:
|
2017-10-18 05:14:53 +08:00
|
|
|
; AVX2-NEXT: vmovdqu (%rdi), %ymm0
|
|
|
|
; AVX2-NEXT: vmovdqu 32(%rdi), %ymm1
|
|
|
|
; AVX2-NEXT: vpsrlw $8, %ymm0, %ymm0
|
|
|
|
; AVX2-NEXT: vpsrlw $8, %ymm1, %ymm1
|
|
|
|
; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm2
|
|
|
|
; AVX2-NEXT: vpackuswb %xmm2, %xmm0, %xmm0
|
|
|
|
; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm2
|
|
|
|
; AVX2-NEXT: vpackuswb %xmm2, %xmm1, %xmm1
|
2017-11-02 05:52:29 +08:00
|
|
|
; AVX2-NEXT: vmovdqu %xmm0, (%rsi)
|
|
|
|
; AVX2-NEXT: vmovdqu %xmm1, 16(%rsi)
|
2017-10-18 05:14:53 +08:00
|
|
|
; AVX2-NEXT: vzeroupper
|
|
|
|
; AVX2-NEXT: retq
|
|
|
|
;
|
|
|
|
; AVX512F-LABEL: PR34773:
|
2017-12-05 01:18:51 +08:00
|
|
|
; AVX512F: # %bb.0:
|
2017-10-18 05:14:53 +08:00
|
|
|
; AVX512F-NEXT: vmovdqu (%rdi), %ymm0
|
|
|
|
; AVX512F-NEXT: vmovdqu 32(%rdi), %ymm1
|
|
|
|
; AVX512F-NEXT: vpsrlw $8, %ymm0, %ymm0
|
|
|
|
; AVX512F-NEXT: vpsrlw $8, %ymm1, %ymm1
|
2018-11-10 03:05:51 +08:00
|
|
|
; AVX512F-NEXT: vpmovzxwd {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero
|
2018-01-14 16:11:36 +08:00
|
|
|
; AVX512F-NEXT: vpmovdb %zmm0, (%rsi)
|
2018-11-10 04:09:53 +08:00
|
|
|
; AVX512F-NEXT: vpmovzxwd {{.*#+}} zmm0 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero,ymm1[8],zero,ymm1[9],zero,ymm1[10],zero,ymm1[11],zero,ymm1[12],zero,ymm1[13],zero,ymm1[14],zero,ymm1[15],zero
|
|
|
|
; AVX512F-NEXT: vpmovdb %zmm0, 16(%rsi)
|
2017-10-18 05:14:53 +08:00
|
|
|
; AVX512F-NEXT: vzeroupper
|
|
|
|
; AVX512F-NEXT: retq
|
|
|
|
;
|
|
|
|
; AVX512VL-LABEL: PR34773:
|
2017-12-05 01:18:51 +08:00
|
|
|
; AVX512VL: # %bb.0:
|
2017-10-18 05:14:53 +08:00
|
|
|
; AVX512VL-NEXT: vmovdqu (%rdi), %ymm0
|
|
|
|
; AVX512VL-NEXT: vmovdqu 32(%rdi), %ymm1
|
|
|
|
; AVX512VL-NEXT: vpsrlw $8, %ymm0, %ymm0
|
|
|
|
; AVX512VL-NEXT: vpsrlw $8, %ymm1, %ymm1
|
2018-11-10 03:05:51 +08:00
|
|
|
; AVX512VL-NEXT: vpmovzxwd {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero
|
2018-01-14 16:11:36 +08:00
|
|
|
; AVX512VL-NEXT: vpmovdb %zmm0, (%rsi)
|
2018-11-10 04:09:53 +08:00
|
|
|
; AVX512VL-NEXT: vpmovzxwd {{.*#+}} zmm0 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero,ymm1[8],zero,ymm1[9],zero,ymm1[10],zero,ymm1[11],zero,ymm1[12],zero,ymm1[13],zero,ymm1[14],zero,ymm1[15],zero
|
|
|
|
; AVX512VL-NEXT: vpmovdb %zmm0, 16(%rsi)
|
2017-10-18 05:14:53 +08:00
|
|
|
; AVX512VL-NEXT: vzeroupper
|
|
|
|
; AVX512VL-NEXT: retq
|
|
|
|
;
|
|
|
|
; AVX512BW-LABEL: PR34773:
|
2017-12-05 01:18:51 +08:00
|
|
|
; AVX512BW: # %bb.0:
|
2017-10-18 05:14:53 +08:00
|
|
|
; AVX512BW-NEXT: vmovdqu (%rdi), %ymm0
|
|
|
|
; AVX512BW-NEXT: vmovdqu 32(%rdi), %ymm1
|
|
|
|
; AVX512BW-NEXT: vpsrlw $8, %ymm0, %ymm0
|
|
|
|
; AVX512BW-NEXT: vpsrlw $8, %ymm1, %ymm1
|
|
|
|
; AVX512BW-NEXT: vpmovwb %zmm0, %ymm0
|
|
|
|
; AVX512BW-NEXT: vpmovwb %zmm1, %ymm1
|
|
|
|
; AVX512BW-NEXT: vmovdqu %xmm0, (%rsi)
|
|
|
|
; AVX512BW-NEXT: vmovdqu %xmm1, 16(%rsi)
|
|
|
|
; AVX512BW-NEXT: vzeroupper
|
|
|
|
; AVX512BW-NEXT: retq
|
|
|
|
;
|
|
|
|
; AVX512BWVL-LABEL: PR34773:
|
2017-12-05 01:18:51 +08:00
|
|
|
; AVX512BWVL: # %bb.0:
|
2017-10-18 05:14:53 +08:00
|
|
|
; AVX512BWVL-NEXT: vpsrlw $8, (%rdi), %ymm0
|
|
|
|
; AVX512BWVL-NEXT: vpsrlw $8, 32(%rdi), %ymm1
|
|
|
|
; AVX512BWVL-NEXT: vpmovwb %ymm0, (%rsi)
|
|
|
|
; AVX512BWVL-NEXT: vpmovwb %ymm1, 16(%rsi)
|
|
|
|
; AVX512BWVL-NEXT: vzeroupper
|
|
|
|
; AVX512BWVL-NEXT: retq
|
|
|
|
%1 = getelementptr i16, i16* %a0, i64 16
|
|
|
|
%2 = getelementptr i8, i8* %a1, i64 16
|
|
|
|
%3 = bitcast i16* %a0 to <16 x i16>*
|
|
|
|
%4 = bitcast i16* %1 to <16 x i16>*
|
|
|
|
%5 = bitcast i8* %a1 to <16 x i8>*
|
|
|
|
%6 = bitcast i8* %2 to <16 x i8>*
|
|
|
|
%7 = load <16 x i16>, <16 x i16>* %3, align 2
|
|
|
|
%8 = load <16 x i16>, <16 x i16>* %4, align 2
|
|
|
|
%9 = lshr <16 x i16> %7, <i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8>
|
|
|
|
%10 = lshr <16 x i16> %8, <i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8>
|
|
|
|
%11 = trunc <16 x i16> %9 to <16 x i8>
|
|
|
|
%12 = trunc <16 x i16> %10 to <16 x i8>
|
|
|
|
store <16 x i8> %11, <16 x i8>* %5, align 1
|
|
|
|
store <16 x i8> %12, <16 x i8>* %6, align 1
|
|
|
|
ret void
|
|
|
|
}
|