2015-11-24 05:33:58 +08:00
|
|
|
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
|
2015-10-25 20:15:00 +08:00
|
|
|
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefix=SSE --check-prefix=SSE2
|
|
|
|
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+ssse3 | FileCheck %s --check-prefix=SSE --check-prefix=SSSE3
|
|
|
|
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefix=SSE --check-prefix=SSE41
|
|
|
|
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefix=AVX --check-prefix=AVX1
|
|
|
|
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=AVX --check-prefix=AVX2
|
2014-10-02 04:49:54 +08:00
|
|
|
;
|
|
|
|
; Just one 32-bit run to make sure we do reasonable things there.
|
2015-10-25 20:15:00 +08:00
|
|
|
; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefix=X32-SSE41
|
2012-02-02 17:10:43 +08:00
|
|
|
|
2015-07-25 19:17:35 +08:00
|
|
|
define <8 x i16> @sext_16i8_to_8i16(<16 x i8> %A) nounwind uwtable readnone ssp {
|
|
|
|
; SSE2-LABEL: sext_16i8_to_8i16:
|
|
|
|
; SSE2: # BB#0: # %entry
|
|
|
|
; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
|
|
|
|
; SSE2-NEXT: psraw $8, %xmm0
|
|
|
|
; SSE2-NEXT: retq
|
|
|
|
;
|
|
|
|
; SSSE3-LABEL: sext_16i8_to_8i16:
|
|
|
|
; SSSE3: # BB#0: # %entry
|
|
|
|
; SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
|
|
|
|
; SSSE3-NEXT: psraw $8, %xmm0
|
|
|
|
; SSSE3-NEXT: retq
|
|
|
|
;
|
|
|
|
; SSE41-LABEL: sext_16i8_to_8i16:
|
|
|
|
; SSE41: # BB#0: # %entry
|
|
|
|
; SSE41-NEXT: pmovsxbw %xmm0, %xmm0
|
|
|
|
; SSE41-NEXT: retq
|
|
|
|
;
|
|
|
|
; AVX-LABEL: sext_16i8_to_8i16:
|
|
|
|
; AVX: # BB#0: # %entry
|
|
|
|
; AVX-NEXT: vpmovsxbw %xmm0, %xmm0
|
|
|
|
; AVX-NEXT: retq
|
|
|
|
;
|
|
|
|
; X32-SSE41-LABEL: sext_16i8_to_8i16:
|
|
|
|
; X32-SSE41: # BB#0: # %entry
|
|
|
|
; X32-SSE41-NEXT: pmovsxbw %xmm0, %xmm0
|
|
|
|
; X32-SSE41-NEXT: retl
|
|
|
|
entry:
|
|
|
|
%B = shufflevector <16 x i8> %A, <16 x i8> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
|
|
|
|
%C = sext <8 x i8> %B to <8 x i16>
|
|
|
|
ret <8 x i16> %C
|
|
|
|
}
|
|
|
|
|
|
|
|
define <16 x i16> @sext_16i8_to_16i16(<16 x i8> %A) nounwind uwtable readnone ssp {
|
|
|
|
; SSE2-LABEL: sext_16i8_to_16i16:
|
|
|
|
; SSE2: # BB#0: # %entry
|
|
|
|
; SSE2-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
|
|
|
|
; SSE2-NEXT: psraw $8, %xmm2
|
[X86][SSE] Combine UNPCKL with vector_shuffle into UNPCKH to save one instruction for sext from v16i8 to v16i16 and v8i16 to v8i32.
This patch is enabling combining UNPCKL with vector_shuffle that moves the upper
half of a vector into the lower half, into a UNPCKH instruction. For example:
t2: v16i8 = vector_shuffle<8,9,10,11,12,13,14,15,u,u,u,u,u,u,u,u> t1, undef:v16i8
t3: v16i8 = X86ISD::UNPCKL undef:v16i8, t2
will be combined to:
t3: v16i8 = X86ISD::UNPCKH undef:v16i8, t1
Differential revision: http://reviews.llvm.org/D14399
llvm-svn: 253067
2015-11-14 03:47:43 +08:00
|
|
|
; SSE2-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm0[8],xmm1[9],xmm0[9],xmm1[10],xmm0[10],xmm1[11],xmm0[11],xmm1[12],xmm0[12],xmm1[13],xmm0[13],xmm1[14],xmm0[14],xmm1[15],xmm0[15]
|
2015-07-25 19:17:35 +08:00
|
|
|
; SSE2-NEXT: psraw $8, %xmm1
|
|
|
|
; SSE2-NEXT: movdqa %xmm2, %xmm0
|
|
|
|
; SSE2-NEXT: retq
|
|
|
|
;
|
|
|
|
; SSSE3-LABEL: sext_16i8_to_16i16:
|
|
|
|
; SSSE3: # BB#0: # %entry
|
|
|
|
; SSSE3-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
|
|
|
|
; SSSE3-NEXT: psraw $8, %xmm2
|
[X86][SSE] Combine UNPCKL with vector_shuffle into UNPCKH to save one instruction for sext from v16i8 to v16i16 and v8i16 to v8i32.
This patch is enabling combining UNPCKL with vector_shuffle that moves the upper
half of a vector into the lower half, into a UNPCKH instruction. For example:
t2: v16i8 = vector_shuffle<8,9,10,11,12,13,14,15,u,u,u,u,u,u,u,u> t1, undef:v16i8
t3: v16i8 = X86ISD::UNPCKL undef:v16i8, t2
will be combined to:
t3: v16i8 = X86ISD::UNPCKH undef:v16i8, t1
Differential revision: http://reviews.llvm.org/D14399
llvm-svn: 253067
2015-11-14 03:47:43 +08:00
|
|
|
; SSSE3-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm0[8],xmm1[9],xmm0[9],xmm1[10],xmm0[10],xmm1[11],xmm0[11],xmm1[12],xmm0[12],xmm1[13],xmm0[13],xmm1[14],xmm0[14],xmm1[15],xmm0[15]
|
2015-07-25 19:17:35 +08:00
|
|
|
; SSSE3-NEXT: psraw $8, %xmm1
|
|
|
|
; SSSE3-NEXT: movdqa %xmm2, %xmm0
|
|
|
|
; SSSE3-NEXT: retq
|
|
|
|
;
|
|
|
|
; SSE41-LABEL: sext_16i8_to_16i16:
|
|
|
|
; SSE41: # BB#0: # %entry
|
|
|
|
; SSE41-NEXT: pmovsxbw %xmm0, %xmm2
|
|
|
|
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
|
|
|
|
; SSE41-NEXT: pmovsxbw %xmm0, %xmm1
|
|
|
|
; SSE41-NEXT: movdqa %xmm2, %xmm0
|
|
|
|
; SSE41-NEXT: retq
|
|
|
|
;
|
|
|
|
; AVX1-LABEL: sext_16i8_to_16i16:
|
|
|
|
; AVX1: # BB#0: # %entry
|
|
|
|
; AVX1-NEXT: vpmovsxbw %xmm0, %xmm1
|
|
|
|
; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
|
|
|
|
; AVX1-NEXT: vpmovsxbw %xmm0, %xmm0
|
|
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
|
|
|
|
; AVX1-NEXT: retq
|
|
|
|
;
|
|
|
|
; AVX2-LABEL: sext_16i8_to_16i16:
|
|
|
|
; AVX2: # BB#0: # %entry
|
|
|
|
; AVX2-NEXT: vpmovsxbw %xmm0, %ymm0
|
|
|
|
; AVX2-NEXT: retq
|
|
|
|
;
|
|
|
|
; X32-SSE41-LABEL: sext_16i8_to_16i16:
|
|
|
|
; X32-SSE41: # BB#0: # %entry
|
|
|
|
; X32-SSE41-NEXT: pmovsxbw %xmm0, %xmm2
|
|
|
|
; X32-SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
|
|
|
|
; X32-SSE41-NEXT: pmovsxbw %xmm0, %xmm1
|
|
|
|
; X32-SSE41-NEXT: movdqa %xmm2, %xmm0
|
|
|
|
; X32-SSE41-NEXT: retl
|
|
|
|
entry:
|
|
|
|
%B = sext <16 x i8> %A to <16 x i16>
|
|
|
|
ret <16 x i16> %B
|
|
|
|
}
|
|
|
|
|
|
|
|
define <4 x i32> @sext_16i8_to_4i32(<16 x i8> %A) nounwind uwtable readnone ssp {
|
|
|
|
; SSE2-LABEL: sext_16i8_to_4i32:
|
|
|
|
; SSE2: # BB#0: # %entry
|
|
|
|
; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
|
|
|
|
; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3]
|
|
|
|
; SSE2-NEXT: psrad $24, %xmm0
|
|
|
|
; SSE2-NEXT: retq
|
|
|
|
;
|
|
|
|
; SSSE3-LABEL: sext_16i8_to_4i32:
|
|
|
|
; SSSE3: # BB#0: # %entry
|
|
|
|
; SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
|
|
|
|
; SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3]
|
|
|
|
; SSSE3-NEXT: psrad $24, %xmm0
|
|
|
|
; SSSE3-NEXT: retq
|
|
|
|
;
|
|
|
|
; SSE41-LABEL: sext_16i8_to_4i32:
|
|
|
|
; SSE41: # BB#0: # %entry
|
|
|
|
; SSE41-NEXT: pmovsxbd %xmm0, %xmm0
|
|
|
|
; SSE41-NEXT: retq
|
|
|
|
;
|
|
|
|
; AVX-LABEL: sext_16i8_to_4i32:
|
|
|
|
; AVX: # BB#0: # %entry
|
|
|
|
; AVX-NEXT: vpmovsxbd %xmm0, %xmm0
|
|
|
|
; AVX-NEXT: retq
|
|
|
|
;
|
|
|
|
; X32-SSE41-LABEL: sext_16i8_to_4i32:
|
|
|
|
; X32-SSE41: # BB#0: # %entry
|
|
|
|
; X32-SSE41-NEXT: pmovsxbd %xmm0, %xmm0
|
|
|
|
; X32-SSE41-NEXT: retl
|
|
|
|
entry:
|
|
|
|
%B = shufflevector <16 x i8> %A, <16 x i8> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
|
|
|
|
%C = sext <4 x i8> %B to <4 x i32>
|
|
|
|
ret <4 x i32> %C
|
|
|
|
}
|
|
|
|
|
|
|
|
define <8 x i32> @sext_16i8_to_8i32(<16 x i8> %A) nounwind uwtable readnone ssp {
|
|
|
|
; SSE2-LABEL: sext_16i8_to_8i32:
|
|
|
|
; SSE2: # BB#0: # %entry
|
|
|
|
; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
|
|
|
|
; SSE2-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3]
|
|
|
|
; SSE2-NEXT: psrad $24, %xmm2
|
|
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
|
|
|
|
; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
|
|
|
|
; SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
|
|
|
|
; SSE2-NEXT: psrad $24, %xmm1
|
|
|
|
; SSE2-NEXT: movdqa %xmm2, %xmm0
|
|
|
|
; SSE2-NEXT: retq
|
|
|
|
;
|
|
|
|
; SSSE3-LABEL: sext_16i8_to_8i32:
|
|
|
|
; SSSE3: # BB#0: # %entry
|
|
|
|
; SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
|
|
|
|
; SSSE3-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3]
|
|
|
|
; SSSE3-NEXT: psrad $24, %xmm2
|
|
|
|
; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
|
|
|
|
; SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
|
|
|
|
; SSSE3-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
|
|
|
|
; SSSE3-NEXT: psrad $24, %xmm1
|
|
|
|
; SSSE3-NEXT: movdqa %xmm2, %xmm0
|
|
|
|
; SSSE3-NEXT: retq
|
|
|
|
;
|
|
|
|
; SSE41-LABEL: sext_16i8_to_8i32:
|
|
|
|
; SSE41: # BB#0: # %entry
|
|
|
|
; SSE41-NEXT: pmovsxbd %xmm0, %xmm2
|
|
|
|
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
|
|
|
|
; SSE41-NEXT: pmovsxbd %xmm0, %xmm1
|
|
|
|
; SSE41-NEXT: movdqa %xmm2, %xmm0
|
|
|
|
; SSE41-NEXT: retq
|
|
|
|
;
|
|
|
|
; AVX1-LABEL: sext_16i8_to_8i32:
|
|
|
|
; AVX1: # BB#0: # %entry
|
|
|
|
; AVX1-NEXT: vpmovsxbd %xmm0, %xmm1
|
|
|
|
; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
|
|
|
|
; AVX1-NEXT: vpmovsxbd %xmm0, %xmm0
|
|
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
|
|
|
|
; AVX1-NEXT: retq
|
|
|
|
;
|
|
|
|
; AVX2-LABEL: sext_16i8_to_8i32:
|
|
|
|
; AVX2: # BB#0: # %entry
|
|
|
|
; AVX2-NEXT: vpmovzxbd {{.*#+}} ymm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero,xmm0[4],zero,zero,zero,xmm0[5],zero,zero,zero,xmm0[6],zero,zero,zero,xmm0[7],zero,zero,zero
|
|
|
|
; AVX2-NEXT: vpslld $24, %ymm0, %ymm0
|
|
|
|
; AVX2-NEXT: vpsrad $24, %ymm0, %ymm0
|
|
|
|
; AVX2-NEXT: retq
|
|
|
|
;
|
|
|
|
; X32-SSE41-LABEL: sext_16i8_to_8i32:
|
|
|
|
; X32-SSE41: # BB#0: # %entry
|
|
|
|
; X32-SSE41-NEXT: pmovsxbd %xmm0, %xmm2
|
|
|
|
; X32-SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
|
|
|
|
; X32-SSE41-NEXT: pmovsxbd %xmm0, %xmm1
|
|
|
|
; X32-SSE41-NEXT: movdqa %xmm2, %xmm0
|
|
|
|
; X32-SSE41-NEXT: retl
|
|
|
|
entry:
|
|
|
|
%B = shufflevector <16 x i8> %A, <16 x i8> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
|
|
|
|
%C = sext <8 x i8> %B to <8 x i32>
|
|
|
|
ret <8 x i32> %C
|
|
|
|
}
|
|
|
|
|
|
|
|
define <2 x i64> @sext_16i8_to_2i64(<16 x i8> %A) nounwind uwtable readnone ssp {
|
|
|
|
; SSE2-LABEL: sext_16i8_to_2i64:
|
|
|
|
; SSE2: # BB#0: # %entry
|
|
|
|
; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
|
|
|
|
; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3]
|
|
|
|
; SSE2-NEXT: movdqa %xmm0, %xmm1
|
|
|
|
; SSE2-NEXT: psrad $31, %xmm1
|
|
|
|
; SSE2-NEXT: psrad $24, %xmm0
|
|
|
|
; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
|
|
|
|
; SSE2-NEXT: retq
|
|
|
|
;
|
|
|
|
; SSSE3-LABEL: sext_16i8_to_2i64:
|
|
|
|
; SSSE3: # BB#0: # %entry
|
|
|
|
; SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
|
|
|
|
; SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3]
|
|
|
|
; SSSE3-NEXT: movdqa %xmm0, %xmm1
|
|
|
|
; SSSE3-NEXT: psrad $31, %xmm1
|
|
|
|
; SSSE3-NEXT: psrad $24, %xmm0
|
|
|
|
; SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
|
|
|
|
; SSSE3-NEXT: retq
|
|
|
|
;
|
|
|
|
; SSE41-LABEL: sext_16i8_to_2i64:
|
|
|
|
; SSE41: # BB#0: # %entry
|
|
|
|
; SSE41-NEXT: pmovsxbq %xmm0, %xmm0
|
|
|
|
; SSE41-NEXT: retq
|
|
|
|
;
|
|
|
|
; AVX-LABEL: sext_16i8_to_2i64:
|
|
|
|
; AVX: # BB#0: # %entry
|
|
|
|
; AVX-NEXT: vpmovsxbq %xmm0, %xmm0
|
|
|
|
; AVX-NEXT: retq
|
|
|
|
;
|
|
|
|
; X32-SSE41-LABEL: sext_16i8_to_2i64:
|
|
|
|
; X32-SSE41: # BB#0: # %entry
|
|
|
|
; X32-SSE41-NEXT: pmovsxbq %xmm0, %xmm0
|
|
|
|
; X32-SSE41-NEXT: retl
|
|
|
|
entry:
|
|
|
|
%B = shufflevector <16 x i8> %A, <16 x i8> undef, <2 x i32> <i32 0, i32 1>
|
|
|
|
%C = sext <2 x i8> %B to <2 x i64>
|
|
|
|
ret <2 x i64> %C
|
|
|
|
}
|
|
|
|
|
|
|
|
define <4 x i64> @sext_16i8_to_4i64(<16 x i8> %A) nounwind uwtable readnone ssp {
|
|
|
|
; SSE2-LABEL: sext_16i8_to_4i64:
|
|
|
|
; SSE2: # BB#0: # %entry
|
|
|
|
; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
|
|
|
|
; SSE2-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3]
|
|
|
|
; SSE2-NEXT: movdqa %xmm2, %xmm1
|
|
|
|
; SSE2-NEXT: psrad $31, %xmm1
|
|
|
|
; SSE2-NEXT: psrad $24, %xmm2
|
|
|
|
; SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
|
|
|
|
; SSE2-NEXT: psrld $16, %xmm0
|
|
|
|
; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
|
|
|
|
; SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
|
|
|
|
; SSE2-NEXT: movdqa %xmm1, %xmm0
|
|
|
|
; SSE2-NEXT: psrad $31, %xmm0
|
|
|
|
; SSE2-NEXT: psrad $24, %xmm1
|
|
|
|
; SSE2-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
|
|
|
|
; SSE2-NEXT: movdqa %xmm2, %xmm0
|
|
|
|
; SSE2-NEXT: retq
|
|
|
|
;
|
|
|
|
; SSSE3-LABEL: sext_16i8_to_4i64:
|
|
|
|
; SSSE3: # BB#0: # %entry
|
|
|
|
; SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
|
|
|
|
; SSSE3-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3]
|
|
|
|
; SSSE3-NEXT: movdqa %xmm2, %xmm1
|
|
|
|
; SSSE3-NEXT: psrad $31, %xmm1
|
|
|
|
; SSSE3-NEXT: psrad $24, %xmm2
|
|
|
|
; SSSE3-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
|
|
|
|
; SSSE3-NEXT: psrld $16, %xmm0
|
|
|
|
; SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
|
|
|
|
; SSSE3-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
|
|
|
|
; SSSE3-NEXT: movdqa %xmm1, %xmm0
|
|
|
|
; SSSE3-NEXT: psrad $31, %xmm0
|
|
|
|
; SSSE3-NEXT: psrad $24, %xmm1
|
|
|
|
; SSSE3-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
|
|
|
|
; SSSE3-NEXT: movdqa %xmm2, %xmm0
|
|
|
|
; SSSE3-NEXT: retq
|
|
|
|
;
|
|
|
|
; SSE41-LABEL: sext_16i8_to_4i64:
|
|
|
|
; SSE41: # BB#0: # %entry
|
|
|
|
; SSE41-NEXT: pmovsxbq %xmm0, %xmm2
|
|
|
|
; SSE41-NEXT: psrld $16, %xmm0
|
|
|
|
; SSE41-NEXT: pmovsxbq %xmm0, %xmm1
|
|
|
|
; SSE41-NEXT: movdqa %xmm2, %xmm0
|
|
|
|
; SSE41-NEXT: retq
|
|
|
|
;
|
|
|
|
; AVX1-LABEL: sext_16i8_to_4i64:
|
|
|
|
; AVX1: # BB#0: # %entry
|
|
|
|
; AVX1-NEXT: vpmovsxbq %xmm0, %xmm1
|
|
|
|
; AVX1-NEXT: vpsrld $16, %xmm0, %xmm0
|
|
|
|
; AVX1-NEXT: vpmovsxbq %xmm0, %xmm0
|
|
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
|
|
|
|
; AVX1-NEXT: retq
|
|
|
|
;
|
|
|
|
; AVX2-LABEL: sext_16i8_to_4i64:
|
|
|
|
; AVX2: # BB#0: # %entry
|
|
|
|
; AVX2-NEXT: vpmovzxbd {{.*#+}} xmm0 = xmm0[0],zero,zero,zero,xmm0[1],zero,zero,zero,xmm0[2],zero,zero,zero,xmm0[3],zero,zero,zero
|
|
|
|
; AVX2-NEXT: vpslld $24, %xmm0, %xmm0
|
|
|
|
; AVX2-NEXT: vpsrad $24, %xmm0, %xmm0
|
|
|
|
; AVX2-NEXT: vpmovsxdq %xmm0, %ymm0
|
|
|
|
; AVX2-NEXT: retq
|
|
|
|
;
|
|
|
|
; X32-SSE41-LABEL: sext_16i8_to_4i64:
|
|
|
|
; X32-SSE41: # BB#0: # %entry
|
|
|
|
; X32-SSE41-NEXT: pmovsxbq %xmm0, %xmm2
|
|
|
|
; X32-SSE41-NEXT: psrld $16, %xmm0
|
|
|
|
; X32-SSE41-NEXT: pmovsxbq %xmm0, %xmm1
|
|
|
|
; X32-SSE41-NEXT: movdqa %xmm2, %xmm0
|
|
|
|
; X32-SSE41-NEXT: retl
|
|
|
|
entry:
|
|
|
|
%B = shufflevector <16 x i8> %A, <16 x i8> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
|
|
|
|
%C = sext <4 x i8> %B to <4 x i64>
|
|
|
|
ret <4 x i64> %C
|
|
|
|
}
|
|
|
|
|
|
|
|
define <4 x i32> @sext_8i16_to_4i32(<8 x i16> %A) nounwind uwtable readnone ssp {
|
|
|
|
; SSE2-LABEL: sext_8i16_to_4i32:
|
|
|
|
; SSE2: # BB#0: # %entry
|
|
|
|
; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3]
|
|
|
|
; SSE2-NEXT: psrad $16, %xmm0
|
|
|
|
; SSE2-NEXT: retq
|
|
|
|
;
|
|
|
|
; SSSE3-LABEL: sext_8i16_to_4i32:
|
|
|
|
; SSSE3: # BB#0: # %entry
|
|
|
|
; SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3]
|
|
|
|
; SSSE3-NEXT: psrad $16, %xmm0
|
|
|
|
; SSSE3-NEXT: retq
|
|
|
|
;
|
|
|
|
; SSE41-LABEL: sext_8i16_to_4i32:
|
|
|
|
; SSE41: # BB#0: # %entry
|
|
|
|
; SSE41-NEXT: pmovsxwd %xmm0, %xmm0
|
|
|
|
; SSE41-NEXT: retq
|
|
|
|
;
|
|
|
|
; AVX-LABEL: sext_8i16_to_4i32:
|
|
|
|
; AVX: # BB#0: # %entry
|
|
|
|
; AVX-NEXT: vpmovsxwd %xmm0, %xmm0
|
|
|
|
; AVX-NEXT: retq
|
|
|
|
;
|
|
|
|
; X32-SSE41-LABEL: sext_8i16_to_4i32:
|
|
|
|
; X32-SSE41: # BB#0: # %entry
|
|
|
|
; X32-SSE41-NEXT: pmovsxwd %xmm0, %xmm0
|
|
|
|
; X32-SSE41-NEXT: retl
|
|
|
|
entry:
|
|
|
|
%B = shufflevector <8 x i16> %A, <8 x i16> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
|
|
|
|
%C = sext <4 x i16> %B to <4 x i32>
|
|
|
|
ret <4 x i32> %C
|
|
|
|
}
|
|
|
|
|
2012-02-02 17:10:43 +08:00
|
|
|
define <8 x i32> @sext_8i16_to_8i32(<8 x i16> %A) nounwind uwtable readnone ssp {
|
2014-10-02 04:41:36 +08:00
|
|
|
; SSE2-LABEL: sext_8i16_to_8i32:
|
2014-10-02 14:52:19 +08:00
|
|
|
; SSE2: # BB#0: # %entry
|
2015-05-21 18:05:03 +08:00
|
|
|
; SSE2-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3]
|
|
|
|
; SSE2-NEXT: psrad $16, %xmm2
|
[X86][SSE] Combine UNPCKL with vector_shuffle into UNPCKH to save one instruction for sext from v16i8 to v16i16 and v8i16 to v8i32.
This patch is enabling combining UNPCKL with vector_shuffle that moves the upper
half of a vector into the lower half, into a UNPCKH instruction. For example:
t2: v16i8 = vector_shuffle<8,9,10,11,12,13,14,15,u,u,u,u,u,u,u,u> t1, undef:v16i8
t3: v16i8 = X86ISD::UNPCKL undef:v16i8, t2
will be combined to:
t3: v16i8 = X86ISD::UNPCKH undef:v16i8, t1
Differential revision: http://reviews.llvm.org/D14399
llvm-svn: 253067
2015-11-14 03:47:43 +08:00
|
|
|
; SSE2-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
|
2014-10-02 04:41:36 +08:00
|
|
|
; SSE2-NEXT: psrad $16, %xmm1
|
2015-05-21 18:05:03 +08:00
|
|
|
; SSE2-NEXT: movdqa %xmm2, %xmm0
|
2014-10-02 04:41:36 +08:00
|
|
|
; SSE2-NEXT: retq
|
|
|
|
;
|
|
|
|
; SSSE3-LABEL: sext_8i16_to_8i32:
|
2014-10-02 14:52:19 +08:00
|
|
|
; SSSE3: # BB#0: # %entry
|
2015-05-21 18:05:03 +08:00
|
|
|
; SSSE3-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3]
|
|
|
|
; SSSE3-NEXT: psrad $16, %xmm2
|
[X86][SSE] Combine UNPCKL with vector_shuffle into UNPCKH to save one instruction for sext from v16i8 to v16i16 and v8i16 to v8i32.
This patch is enabling combining UNPCKL with vector_shuffle that moves the upper
half of a vector into the lower half, into a UNPCKH instruction. For example:
t2: v16i8 = vector_shuffle<8,9,10,11,12,13,14,15,u,u,u,u,u,u,u,u> t1, undef:v16i8
t3: v16i8 = X86ISD::UNPCKL undef:v16i8, t2
will be combined to:
t3: v16i8 = X86ISD::UNPCKH undef:v16i8, t1
Differential revision: http://reviews.llvm.org/D14399
llvm-svn: 253067
2015-11-14 03:47:43 +08:00
|
|
|
; SSSE3-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
|
2014-10-02 04:41:36 +08:00
|
|
|
; SSSE3-NEXT: psrad $16, %xmm1
|
2015-05-21 18:05:03 +08:00
|
|
|
; SSSE3-NEXT: movdqa %xmm2, %xmm0
|
2014-10-02 04:41:36 +08:00
|
|
|
; SSSE3-NEXT: retq
|
|
|
|
;
|
|
|
|
; SSE41-LABEL: sext_8i16_to_8i32:
|
2014-10-02 14:52:19 +08:00
|
|
|
; SSE41: # BB#0: # %entry
|
2015-05-21 18:05:03 +08:00
|
|
|
; SSE41-NEXT: pmovsxwd %xmm0, %xmm2
|
|
|
|
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
|
|
|
|
; SSE41-NEXT: pmovsxwd %xmm0, %xmm1
|
|
|
|
; SSE41-NEXT: movdqa %xmm2, %xmm0
|
2014-10-02 04:41:36 +08:00
|
|
|
; SSE41-NEXT: retq
|
2014-10-02 04:32:44 +08:00
|
|
|
;
|
|
|
|
; AVX1-LABEL: sext_8i16_to_8i32:
|
2014-10-02 14:52:19 +08:00
|
|
|
; AVX1: # BB#0: # %entry
|
2014-10-02 04:32:44 +08:00
|
|
|
; AVX1-NEXT: vpmovsxwd %xmm0, %xmm1
|
[x86] Enable the new vector shuffle lowering by default.
Update the entire regression test suite for the new shuffles. Remove
most of the old testing which was devoted to the old shuffle lowering
path and is no longer relevant really. Also remove a few other random
tests that only really exercised shuffles and only incidently or without
any interesting aspects to them.
Benchmarking that I have done shows a few small regressions with this on
LNT, zero measurable regressions on real, large applications, and for
several benchmarks where the loop vectorizer fires in the hot path it
shows 5% to 40% improvements for SSE2 and SSE3 code running on Sandy
Bridge machines. Running on AMD machines shows even more dramatic
improvements.
When using newer ISA vector extensions the gains are much more modest,
but the code is still better on the whole. There are a few regressions
being tracked (PR21137, PR21138, PR21139) but by and large this is
expected to be a win for x86 generated code performance.
It is also more correct than the code it replaces. I have fuzz tested
this extensively with ISA extensions up through AVX2 and found no
crashes or miscompiles (yet...). The old lowering had a few miscompiles
and crashers after a somewhat smaller amount of fuzz testing.
There is one significant area where the new code path lags behind and
that is in AVX-512 support. However, there was *extremely little*
support for that already and so this isn't a significant step backwards
and the new framework will probably make it easier to implement lowering
that uses the full power of AVX-512's table-based shuffle+blend (IMO).
Many thanks to Quentin, Andrea, Robert, and others for benchmarking
assistance. Thanks to Adam and others for help with AVX-512. Thanks to
Hal, Eric, and *many* others for answering my incessant questions about
how the backend actually works. =]
I will leave the old code path in the tree until the 3 PRs above are at
least resolved to folks' satisfaction. Then I will rip it (and 1000s of
lines of code) out. =] I don't expect this flag to stay around for very
long. It may not survive next week.
llvm-svn: 219046
2014-10-04 11:52:55 +08:00
|
|
|
; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
|
2014-10-02 04:32:44 +08:00
|
|
|
; AVX1-NEXT: vpmovsxwd %xmm0, %xmm0
|
2014-10-02 04:19:32 +08:00
|
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
|
|
|
|
; AVX1-NEXT: retq
|
|
|
|
;
|
2014-10-02 04:32:44 +08:00
|
|
|
; AVX2-LABEL: sext_8i16_to_8i32:
|
2014-10-02 14:52:19 +08:00
|
|
|
; AVX2: # BB#0: # %entry
|
2014-10-02 04:32:44 +08:00
|
|
|
; AVX2-NEXT: vpmovsxwd %xmm0, %ymm0
|
2014-10-02 04:19:32 +08:00
|
|
|
; AVX2-NEXT: retq
|
2014-10-02 04:49:54 +08:00
|
|
|
;
|
|
|
|
; X32-SSE41-LABEL: sext_8i16_to_8i32:
|
2014-10-02 14:52:19 +08:00
|
|
|
; X32-SSE41: # BB#0: # %entry
|
2015-05-21 18:05:03 +08:00
|
|
|
; X32-SSE41-NEXT: pmovsxwd %xmm0, %xmm2
|
|
|
|
; X32-SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
|
|
|
|
; X32-SSE41-NEXT: pmovsxwd %xmm0, %xmm1
|
|
|
|
; X32-SSE41-NEXT: movdqa %xmm2, %xmm0
|
2014-10-02 04:49:54 +08:00
|
|
|
; X32-SSE41-NEXT: retl
|
2014-10-02 14:52:19 +08:00
|
|
|
entry:
|
2014-10-02 04:32:44 +08:00
|
|
|
%B = sext <8 x i16> %A to <8 x i32>
|
2015-07-25 19:17:35 +08:00
|
|
|
ret <8 x i32> %B
|
|
|
|
}
|
|
|
|
|
|
|
|
define <2 x i64> @sext_8i16_to_2i64(<8 x i16> %A) nounwind uwtable readnone ssp {
|
|
|
|
; SSE2-LABEL: sext_8i16_to_2i64:
|
|
|
|
; SSE2: # BB#0: # %entry
|
|
|
|
; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3]
|
|
|
|
; SSE2-NEXT: movdqa %xmm0, %xmm1
|
|
|
|
; SSE2-NEXT: psrad $31, %xmm1
|
|
|
|
; SSE2-NEXT: psrad $16, %xmm0
|
|
|
|
; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
|
|
|
|
; SSE2-NEXT: retq
|
|
|
|
;
|
|
|
|
; SSSE3-LABEL: sext_8i16_to_2i64:
|
|
|
|
; SSSE3: # BB#0: # %entry
|
|
|
|
; SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3]
|
|
|
|
; SSSE3-NEXT: movdqa %xmm0, %xmm1
|
|
|
|
; SSSE3-NEXT: psrad $31, %xmm1
|
|
|
|
; SSSE3-NEXT: psrad $16, %xmm0
|
|
|
|
; SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
|
|
|
|
; SSSE3-NEXT: retq
|
|
|
|
;
|
|
|
|
; SSE41-LABEL: sext_8i16_to_2i64:
|
|
|
|
; SSE41: # BB#0: # %entry
|
|
|
|
; SSE41-NEXT: pmovsxwq %xmm0, %xmm0
|
|
|
|
; SSE41-NEXT: retq
|
|
|
|
;
|
|
|
|
; AVX-LABEL: sext_8i16_to_2i64:
|
|
|
|
; AVX: # BB#0: # %entry
|
|
|
|
; AVX-NEXT: vpmovsxwq %xmm0, %xmm0
|
|
|
|
; AVX-NEXT: retq
|
|
|
|
;
|
|
|
|
; X32-SSE41-LABEL: sext_8i16_to_2i64:
|
|
|
|
; X32-SSE41: # BB#0: # %entry
|
|
|
|
; X32-SSE41-NEXT: pmovsxwq %xmm0, %xmm0
|
|
|
|
; X32-SSE41-NEXT: retl
|
|
|
|
entry:
|
|
|
|
%B = shufflevector <8 x i16> %A, <8 x i16> undef, <2 x i32> <i32 0, i32 1>
|
|
|
|
%C = sext <2 x i16> %B to <2 x i64>
|
|
|
|
ret <2 x i64> %C
|
|
|
|
}
|
|
|
|
|
|
|
|
define <4 x i64> @sext_8i16_to_4i64(<8 x i16> %A) nounwind uwtable readnone ssp {
|
|
|
|
; SSE2-LABEL: sext_8i16_to_4i64:
|
|
|
|
; SSE2: # BB#0: # %entry
|
|
|
|
; SSE2-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3]
|
|
|
|
; SSE2-NEXT: movdqa %xmm2, %xmm1
|
|
|
|
; SSE2-NEXT: psrad $31, %xmm1
|
|
|
|
; SSE2-NEXT: psrad $16, %xmm2
|
|
|
|
; SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
|
|
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
|
|
|
|
; SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
|
|
|
|
; SSE2-NEXT: movdqa %xmm1, %xmm0
|
|
|
|
; SSE2-NEXT: psrad $31, %xmm0
|
|
|
|
; SSE2-NEXT: psrad $16, %xmm1
|
|
|
|
; SSE2-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
|
|
|
|
; SSE2-NEXT: movdqa %xmm2, %xmm0
|
|
|
|
; SSE2-NEXT: retq
|
|
|
|
;
|
|
|
|
; SSSE3-LABEL: sext_8i16_to_4i64:
|
|
|
|
; SSSE3: # BB#0: # %entry
|
|
|
|
; SSSE3-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3]
|
|
|
|
; SSSE3-NEXT: movdqa %xmm2, %xmm1
|
|
|
|
; SSSE3-NEXT: psrad $31, %xmm1
|
|
|
|
; SSSE3-NEXT: psrad $16, %xmm2
|
|
|
|
; SSSE3-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
|
|
|
|
; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
|
|
|
|
; SSSE3-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
|
|
|
|
; SSSE3-NEXT: movdqa %xmm1, %xmm0
|
|
|
|
; SSSE3-NEXT: psrad $31, %xmm0
|
|
|
|
; SSSE3-NEXT: psrad $16, %xmm1
|
|
|
|
; SSSE3-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
|
|
|
|
; SSSE3-NEXT: movdqa %xmm2, %xmm0
|
|
|
|
; SSSE3-NEXT: retq
|
|
|
|
;
|
|
|
|
; SSE41-LABEL: sext_8i16_to_4i64:
|
|
|
|
; SSE41: # BB#0: # %entry
|
|
|
|
; SSE41-NEXT: pmovsxwq %xmm0, %xmm2
|
|
|
|
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
|
|
|
|
; SSE41-NEXT: pmovsxwq %xmm0, %xmm1
|
|
|
|
; SSE41-NEXT: movdqa %xmm2, %xmm0
|
|
|
|
; SSE41-NEXT: retq
|
|
|
|
;
|
|
|
|
; AVX1-LABEL: sext_8i16_to_4i64:
|
|
|
|
; AVX1: # BB#0: # %entry
|
|
|
|
; AVX1-NEXT: vpmovsxwq %xmm0, %xmm1
|
|
|
|
; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
|
|
|
|
; AVX1-NEXT: vpmovsxwq %xmm0, %xmm0
|
|
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
|
|
|
|
; AVX1-NEXT: retq
|
|
|
|
;
|
|
|
|
; AVX2-LABEL: sext_8i16_to_4i64:
|
|
|
|
; AVX2: # BB#0: # %entry
|
|
|
|
; AVX2-NEXT: vpmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
|
|
|
|
; AVX2-NEXT: vpslld $16, %xmm0, %xmm0
|
|
|
|
; AVX2-NEXT: vpsrad $16, %xmm0, %xmm0
|
|
|
|
; AVX2-NEXT: vpmovsxdq %xmm0, %ymm0
|
|
|
|
; AVX2-NEXT: retq
|
|
|
|
;
|
|
|
|
; X32-SSE41-LABEL: sext_8i16_to_4i64:
|
|
|
|
; X32-SSE41: # BB#0: # %entry
|
|
|
|
; X32-SSE41-NEXT: pmovsxwq %xmm0, %xmm2
|
|
|
|
; X32-SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,2,3]
|
|
|
|
; X32-SSE41-NEXT: pmovsxwq %xmm0, %xmm1
|
|
|
|
; X32-SSE41-NEXT: movdqa %xmm2, %xmm0
|
|
|
|
; X32-SSE41-NEXT: retl
|
|
|
|
entry:
|
|
|
|
%B = shufflevector <8 x i16> %A, <8 x i16> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
|
|
|
|
%C = sext <4 x i16> %B to <4 x i64>
|
|
|
|
ret <4 x i64> %C
|
|
|
|
}
|
|
|
|
|
|
|
|
define <2 x i64> @sext_4i32_to_2i64(<4 x i32> %A) nounwind uwtable readnone ssp {
|
|
|
|
; SSE2-LABEL: sext_4i32_to_2i64:
|
|
|
|
; SSE2: # BB#0: # %entry
|
|
|
|
; SSE2-NEXT: movdqa %xmm0, %xmm1
|
|
|
|
; SSE2-NEXT: psrad $31, %xmm1
|
|
|
|
; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
|
|
|
|
; SSE2-NEXT: retq
|
|
|
|
;
|
|
|
|
; SSSE3-LABEL: sext_4i32_to_2i64:
|
|
|
|
; SSSE3: # BB#0: # %entry
|
|
|
|
; SSSE3-NEXT: movdqa %xmm0, %xmm1
|
|
|
|
; SSSE3-NEXT: psrad $31, %xmm1
|
|
|
|
; SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
|
|
|
|
; SSSE3-NEXT: retq
|
|
|
|
;
|
|
|
|
; SSE41-LABEL: sext_4i32_to_2i64:
|
|
|
|
; SSE41: # BB#0: # %entry
|
|
|
|
; SSE41-NEXT: pmovsxdq %xmm0, %xmm0
|
|
|
|
; SSE41-NEXT: retq
|
|
|
|
;
|
|
|
|
; AVX-LABEL: sext_4i32_to_2i64:
|
|
|
|
; AVX: # BB#0: # %entry
|
|
|
|
; AVX-NEXT: vpmovsxdq %xmm0, %xmm0
|
|
|
|
; AVX-NEXT: retq
|
|
|
|
;
|
|
|
|
; X32-SSE41-LABEL: sext_4i32_to_2i64:
|
|
|
|
; X32-SSE41: # BB#0: # %entry
|
|
|
|
; X32-SSE41-NEXT: pmovsxdq %xmm0, %xmm0
|
|
|
|
; X32-SSE41-NEXT: retl
|
|
|
|
entry:
|
|
|
|
%B = shufflevector <4 x i32> %A, <4 x i32> undef, <2 x i32> <i32 0, i32 1>
|
|
|
|
%C = sext <2 x i32> %B to <2 x i64>
|
|
|
|
ret <2 x i64> %C
|
2014-10-02 04:32:44 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
define <4 x i64> @sext_4i32_to_4i64(<4 x i32> %A) nounwind uwtable readnone ssp {
|
2014-10-02 04:41:36 +08:00
|
|
|
; SSE2-LABEL: sext_4i32_to_4i64:
|
2014-10-02 14:52:19 +08:00
|
|
|
; SSE2: # BB#0: # %entry
|
2015-05-21 18:05:03 +08:00
|
|
|
; SSE2-NEXT: movdqa %xmm0, %xmm2
|
|
|
|
; SSE2-NEXT: psrad $31, %xmm2
|
|
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
|
|
|
|
; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
|
|
|
|
; SSE2-NEXT: movdqa %xmm1, %xmm2
|
|
|
|
; SSE2-NEXT: psrad $31, %xmm2
|
|
|
|
; SSE2-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
|
2014-10-02 04:41:36 +08:00
|
|
|
; SSE2-NEXT: retq
|
|
|
|
;
|
|
|
|
; SSSE3-LABEL: sext_4i32_to_4i64:
|
2014-10-02 14:52:19 +08:00
|
|
|
; SSSE3: # BB#0: # %entry
|
2015-05-21 18:05:03 +08:00
|
|
|
; SSSE3-NEXT: movdqa %xmm0, %xmm2
|
|
|
|
; SSSE3-NEXT: psrad $31, %xmm2
|
|
|
|
; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
|
|
|
|
; SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
|
|
|
|
; SSSE3-NEXT: movdqa %xmm1, %xmm2
|
|
|
|
; SSSE3-NEXT: psrad $31, %xmm2
|
|
|
|
; SSSE3-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
|
2014-10-02 04:41:36 +08:00
|
|
|
; SSSE3-NEXT: retq
|
|
|
|
;
|
|
|
|
; SSE41-LABEL: sext_4i32_to_4i64:
|
2014-10-02 14:52:19 +08:00
|
|
|
; SSE41: # BB#0: # %entry
|
2015-05-21 18:05:03 +08:00
|
|
|
; SSE41-NEXT: pmovsxdq %xmm0, %xmm2
|
|
|
|
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
|
|
|
|
; SSE41-NEXT: pmovsxdq %xmm0, %xmm1
|
2014-10-02 04:41:36 +08:00
|
|
|
; SSE41-NEXT: movdqa %xmm2, %xmm0
|
|
|
|
; SSE41-NEXT: retq
|
2014-10-02 04:32:44 +08:00
|
|
|
;
|
|
|
|
; AVX1-LABEL: sext_4i32_to_4i64:
|
2014-10-02 14:52:19 +08:00
|
|
|
; AVX1: # BB#0: # %entry
|
2014-10-02 04:32:44 +08:00
|
|
|
; AVX1-NEXT: vpmovsxdq %xmm0, %xmm1
|
[x86] Enable the new vector shuffle lowering by default.
Update the entire regression test suite for the new shuffles. Remove
most of the old testing which was devoted to the old shuffle lowering
path and is no longer relevant really. Also remove a few other random
tests that only really exercised shuffles and only incidently or without
any interesting aspects to them.
Benchmarking that I have done shows a few small regressions with this on
LNT, zero measurable regressions on real, large applications, and for
several benchmarks where the loop vectorizer fires in the hot path it
shows 5% to 40% improvements for SSE2 and SSE3 code running on Sandy
Bridge machines. Running on AMD machines shows even more dramatic
improvements.
When using newer ISA vector extensions the gains are much more modest,
but the code is still better on the whole. There are a few regressions
being tracked (PR21137, PR21138, PR21139) but by and large this is
expected to be a win for x86 generated code performance.
It is also more correct than the code it replaces. I have fuzz tested
this extensively with ISA extensions up through AVX2 and found no
crashes or miscompiles (yet...). The old lowering had a few miscompiles
and crashers after a somewhat smaller amount of fuzz testing.
There is one significant area where the new code path lags behind and
that is in AVX-512 support. However, there was *extremely little*
support for that already and so this isn't a significant step backwards
and the new framework will probably make it easier to implement lowering
that uses the full power of AVX-512's table-based shuffle+blend (IMO).
Many thanks to Quentin, Andrea, Robert, and others for benchmarking
assistance. Thanks to Adam and others for help with AVX-512. Thanks to
Hal, Eric, and *many* others for answering my incessant questions about
how the backend actually works. =]
I will leave the old code path in the tree until the 3 PRs above are at
least resolved to folks' satisfaction. Then I will rip it (and 1000s of
lines of code) out. =] I don't expect this flag to stay around for very
long. It may not survive next week.
llvm-svn: 219046
2014-10-04 11:52:55 +08:00
|
|
|
; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
|
2014-10-02 04:32:44 +08:00
|
|
|
; AVX1-NEXT: vpmovsxdq %xmm0, %xmm0
|
|
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
|
|
|
|
; AVX1-NEXT: retq
|
|
|
|
;
|
|
|
|
; AVX2-LABEL: sext_4i32_to_4i64:
|
2014-10-02 14:52:19 +08:00
|
|
|
; AVX2: # BB#0: # %entry
|
2014-10-02 04:32:44 +08:00
|
|
|
; AVX2-NEXT: vpmovsxdq %xmm0, %ymm0
|
|
|
|
; AVX2-NEXT: retq
|
2014-10-02 04:49:54 +08:00
|
|
|
;
|
|
|
|
; X32-SSE41-LABEL: sext_4i32_to_4i64:
|
2014-10-02 14:52:19 +08:00
|
|
|
; X32-SSE41: # BB#0: # %entry
|
2015-05-21 18:05:03 +08:00
|
|
|
; X32-SSE41-NEXT: pmovsxdq %xmm0, %xmm2
|
|
|
|
; X32-SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
|
|
|
|
; X32-SSE41-NEXT: pmovsxdq %xmm0, %xmm1
|
2014-10-02 04:49:54 +08:00
|
|
|
; X32-SSE41-NEXT: movdqa %xmm2, %xmm0
|
|
|
|
; X32-SSE41-NEXT: retl
|
2014-10-02 14:52:19 +08:00
|
|
|
entry:
|
2012-02-02 17:10:43 +08:00
|
|
|
%B = sext <4 x i32> %A to <4 x i64>
|
2015-07-25 22:07:20 +08:00
|
|
|
ret <4 x i64> %B
|
2012-02-02 17:10:43 +08:00
|
|
|
}
|
2012-12-19 15:50:20 +08:00
|
|
|
|
2015-09-12 23:36:41 +08:00
|
|
|
define <2 x i64> @load_sext_2i1_to_2i64(<2 x i1> *%ptr) {
|
|
|
|
; SSE-LABEL: load_sext_2i1_to_2i64:
|
|
|
|
; SSE: # BB#0: # %entry
|
|
|
|
; SSE-NEXT: movzbl (%rdi), %eax
|
|
|
|
; SSE-NEXT: movq %rax, %rcx
|
|
|
|
; SSE-NEXT: shlq $62, %rcx
|
|
|
|
; SSE-NEXT: sarq $63, %rcx
|
|
|
|
; SSE-NEXT: movd %rcx, %xmm1
|
|
|
|
; SSE-NEXT: shlq $63, %rax
|
|
|
|
; SSE-NEXT: sarq $63, %rax
|
|
|
|
; SSE-NEXT: movd %rax, %xmm0
|
|
|
|
; SSE-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
|
|
|
|
; SSE-NEXT: retq
|
|
|
|
;
|
|
|
|
; AVX-LABEL: load_sext_2i1_to_2i64:
|
|
|
|
; AVX: # BB#0: # %entry
|
|
|
|
; AVX-NEXT: movzbl (%rdi), %eax
|
|
|
|
; AVX-NEXT: movq %rax, %rcx
|
|
|
|
; AVX-NEXT: shlq $62, %rcx
|
|
|
|
; AVX-NEXT: sarq $63, %rcx
|
|
|
|
; AVX-NEXT: vmovq %rcx, %xmm0
|
|
|
|
; AVX-NEXT: shlq $63, %rax
|
|
|
|
; AVX-NEXT: sarq $63, %rax
|
|
|
|
; AVX-NEXT: vmovq %rax, %xmm1
|
|
|
|
; AVX-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0]
|
|
|
|
; AVX-NEXT: retq
|
|
|
|
;
|
|
|
|
; X32-SSE41-LABEL: load_sext_2i1_to_2i64:
|
|
|
|
; X32-SSE41: # BB#0: # %entry
|
|
|
|
; X32-SSE41-NEXT: movl {{[0-9]+}}(%esp), %eax
|
|
|
|
; X32-SSE41-NEXT: movzbl (%eax), %eax
|
|
|
|
; X32-SSE41-NEXT: movl %eax, %ecx
|
|
|
|
; X32-SSE41-NEXT: shll $31, %ecx
|
|
|
|
; X32-SSE41-NEXT: sarl $31, %ecx
|
|
|
|
; X32-SSE41-NEXT: movd %ecx, %xmm0
|
|
|
|
; X32-SSE41-NEXT: pinsrd $1, %ecx, %xmm0
|
|
|
|
; X32-SSE41-NEXT: shll $30, %eax
|
|
|
|
; X32-SSE41-NEXT: sarl $31, %eax
|
|
|
|
; X32-SSE41-NEXT: pinsrd $2, %eax, %xmm0
|
|
|
|
; X32-SSE41-NEXT: pinsrd $3, %eax, %xmm0
|
|
|
|
; X32-SSE41-NEXT: retl
|
|
|
|
entry:
|
|
|
|
%X = load <2 x i1>, <2 x i1>* %ptr
|
|
|
|
%Y = sext <2 x i1> %X to <2 x i64>
|
|
|
|
ret <2 x i64> %Y
|
|
|
|
}
|
|
|
|
|
2015-07-25 22:07:20 +08:00
|
|
|
define <2 x i64> @load_sext_2i8_to_2i64(<2 x i8> *%ptr) {
|
|
|
|
; SSE2-LABEL: load_sext_2i8_to_2i64:
|
2015-07-03 15:51:01 +08:00
|
|
|
; SSE2: # BB#0: # %entry
|
2015-07-25 22:07:20 +08:00
|
|
|
; SSE2-NEXT: movzwl (%rdi), %eax
|
|
|
|
; SSE2-NEXT: movd %eax, %xmm0
|
2015-07-03 15:51:01 +08:00
|
|
|
; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
|
2015-07-25 22:07:20 +08:00
|
|
|
; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3]
|
|
|
|
; SSE2-NEXT: movdqa %xmm0, %xmm1
|
|
|
|
; SSE2-NEXT: psrad $31, %xmm1
|
|
|
|
; SSE2-NEXT: psrad $24, %xmm0
|
|
|
|
; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
|
2015-07-03 15:51:01 +08:00
|
|
|
; SSE2-NEXT: retq
|
|
|
|
;
|
2015-07-25 22:07:20 +08:00
|
|
|
; SSSE3-LABEL: load_sext_2i8_to_2i64:
|
2015-07-03 15:51:01 +08:00
|
|
|
; SSSE3: # BB#0: # %entry
|
2015-07-25 22:07:20 +08:00
|
|
|
; SSSE3-NEXT: movzwl (%rdi), %eax
|
|
|
|
; SSSE3-NEXT: movd %eax, %xmm0
|
2015-07-03 15:51:01 +08:00
|
|
|
; SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
|
2015-07-25 22:07:20 +08:00
|
|
|
; SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3]
|
|
|
|
; SSSE3-NEXT: movdqa %xmm0, %xmm1
|
|
|
|
; SSSE3-NEXT: psrad $31, %xmm1
|
|
|
|
; SSSE3-NEXT: psrad $24, %xmm0
|
|
|
|
; SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
|
2015-07-03 15:51:01 +08:00
|
|
|
; SSSE3-NEXT: retq
|
|
|
|
;
|
2015-07-25 22:07:20 +08:00
|
|
|
; SSE41-LABEL: load_sext_2i8_to_2i64:
|
2015-07-03 15:51:01 +08:00
|
|
|
; SSE41: # BB#0: # %entry
|
2015-07-25 22:07:20 +08:00
|
|
|
; SSE41-NEXT: pmovsxbq (%rdi), %xmm0
|
2015-07-03 15:51:01 +08:00
|
|
|
; SSE41-NEXT: retq
|
|
|
|
;
|
2015-07-25 22:07:20 +08:00
|
|
|
; AVX-LABEL: load_sext_2i8_to_2i64:
|
2015-07-03 15:51:01 +08:00
|
|
|
; AVX: # BB#0: # %entry
|
2015-07-25 22:07:20 +08:00
|
|
|
; AVX-NEXT: vpmovsxbq (%rdi), %xmm0
|
2015-07-03 15:51:01 +08:00
|
|
|
; AVX-NEXT: retq
|
|
|
|
;
|
2015-07-25 22:07:20 +08:00
|
|
|
; X32-SSE41-LABEL: load_sext_2i8_to_2i64:
|
2015-07-03 15:51:01 +08:00
|
|
|
; X32-SSE41: # BB#0: # %entry
|
2015-07-25 22:07:20 +08:00
|
|
|
; X32-SSE41-NEXT: movl {{[0-9]+}}(%esp), %eax
|
|
|
|
; X32-SSE41-NEXT: pmovsxbq (%eax), %xmm0
|
2015-07-03 15:51:01 +08:00
|
|
|
; X32-SSE41-NEXT: retl
|
|
|
|
entry:
|
2015-07-25 22:07:20 +08:00
|
|
|
%X = load <2 x i8>, <2 x i8>* %ptr
|
|
|
|
%Y = sext <2 x i8> %X to <2 x i64>
|
|
|
|
ret <2 x i64> %Y
|
2015-07-03 15:51:01 +08:00
|
|
|
}
|
|
|
|
|
2015-09-12 23:36:41 +08:00
|
|
|
define <4 x i32> @load_sext_4i1_to_4i32(<4 x i1> *%ptr) {
|
|
|
|
; SSE2-LABEL: load_sext_4i1_to_4i32:
|
|
|
|
; SSE2: # BB#0: # %entry
|
|
|
|
; SSE2-NEXT: movzbl (%rdi), %eax
|
|
|
|
; SSE2-NEXT: movq %rax, %rcx
|
|
|
|
; SSE2-NEXT: shlq $60, %rcx
|
|
|
|
; SSE2-NEXT: sarq $63, %rcx
|
|
|
|
; SSE2-NEXT: movd %ecx, %xmm0
|
|
|
|
; SSE2-NEXT: movq %rax, %rcx
|
|
|
|
; SSE2-NEXT: shlq $62, %rcx
|
|
|
|
; SSE2-NEXT: sarq $63, %rcx
|
|
|
|
; SSE2-NEXT: movd %ecx, %xmm1
|
|
|
|
; SSE2-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
|
|
|
|
; SSE2-NEXT: movq %rax, %rcx
|
|
|
|
; SSE2-NEXT: shlq $61, %rcx
|
|
|
|
; SSE2-NEXT: sarq $63, %rcx
|
|
|
|
; SSE2-NEXT: movd %ecx, %xmm2
|
|
|
|
; SSE2-NEXT: shlq $63, %rax
|
|
|
|
; SSE2-NEXT: sarq $63, %rax
|
|
|
|
; SSE2-NEXT: movd %eax, %xmm0
|
|
|
|
; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
|
|
|
|
; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
|
|
|
|
; SSE2-NEXT: retq
|
|
|
|
;
|
|
|
|
; SSSE3-LABEL: load_sext_4i1_to_4i32:
|
|
|
|
; SSSE3: # BB#0: # %entry
|
|
|
|
; SSSE3-NEXT: movzbl (%rdi), %eax
|
|
|
|
; SSSE3-NEXT: movq %rax, %rcx
|
|
|
|
; SSSE3-NEXT: shlq $60, %rcx
|
|
|
|
; SSSE3-NEXT: sarq $63, %rcx
|
|
|
|
; SSSE3-NEXT: movd %ecx, %xmm0
|
|
|
|
; SSSE3-NEXT: movq %rax, %rcx
|
|
|
|
; SSSE3-NEXT: shlq $62, %rcx
|
|
|
|
; SSSE3-NEXT: sarq $63, %rcx
|
|
|
|
; SSSE3-NEXT: movd %ecx, %xmm1
|
|
|
|
; SSSE3-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
|
|
|
|
; SSSE3-NEXT: movq %rax, %rcx
|
|
|
|
; SSSE3-NEXT: shlq $61, %rcx
|
|
|
|
; SSSE3-NEXT: sarq $63, %rcx
|
|
|
|
; SSSE3-NEXT: movd %ecx, %xmm2
|
|
|
|
; SSSE3-NEXT: shlq $63, %rax
|
|
|
|
; SSSE3-NEXT: sarq $63, %rax
|
|
|
|
; SSSE3-NEXT: movd %eax, %xmm0
|
|
|
|
; SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
|
|
|
|
; SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
|
|
|
|
; SSSE3-NEXT: retq
|
|
|
|
;
|
|
|
|
; SSE41-LABEL: load_sext_4i1_to_4i32:
|
|
|
|
; SSE41: # BB#0: # %entry
|
|
|
|
; SSE41-NEXT: movzbl (%rdi), %eax
|
|
|
|
; SSE41-NEXT: movq %rax, %rcx
|
|
|
|
; SSE41-NEXT: shlq $62, %rcx
|
|
|
|
; SSE41-NEXT: sarq $63, %rcx
|
|
|
|
; SSE41-NEXT: movq %rax, %rdx
|
|
|
|
; SSE41-NEXT: shlq $63, %rdx
|
|
|
|
; SSE41-NEXT: sarq $63, %rdx
|
|
|
|
; SSE41-NEXT: movd %edx, %xmm0
|
|
|
|
; SSE41-NEXT: pinsrd $1, %ecx, %xmm0
|
|
|
|
; SSE41-NEXT: movq %rax, %rcx
|
|
|
|
; SSE41-NEXT: shlq $61, %rcx
|
|
|
|
; SSE41-NEXT: sarq $63, %rcx
|
|
|
|
; SSE41-NEXT: pinsrd $2, %ecx, %xmm0
|
|
|
|
; SSE41-NEXT: shlq $60, %rax
|
|
|
|
; SSE41-NEXT: sarq $63, %rax
|
|
|
|
; SSE41-NEXT: pinsrd $3, %eax, %xmm0
|
|
|
|
; SSE41-NEXT: retq
|
|
|
|
;
|
|
|
|
; AVX-LABEL: load_sext_4i1_to_4i32:
|
|
|
|
; AVX: # BB#0: # %entry
|
|
|
|
; AVX-NEXT: movzbl (%rdi), %eax
|
|
|
|
; AVX-NEXT: movq %rax, %rcx
|
|
|
|
; AVX-NEXT: shlq $62, %rcx
|
|
|
|
; AVX-NEXT: sarq $63, %rcx
|
|
|
|
; AVX-NEXT: movq %rax, %rdx
|
|
|
|
; AVX-NEXT: shlq $63, %rdx
|
|
|
|
; AVX-NEXT: sarq $63, %rdx
|
|
|
|
; AVX-NEXT: vmovd %edx, %xmm0
|
|
|
|
; AVX-NEXT: vpinsrd $1, %ecx, %xmm0, %xmm0
|
|
|
|
; AVX-NEXT: movq %rax, %rcx
|
|
|
|
; AVX-NEXT: shlq $61, %rcx
|
|
|
|
; AVX-NEXT: sarq $63, %rcx
|
|
|
|
; AVX-NEXT: vpinsrd $2, %ecx, %xmm0, %xmm0
|
|
|
|
; AVX-NEXT: shlq $60, %rax
|
|
|
|
; AVX-NEXT: sarq $63, %rax
|
|
|
|
; AVX-NEXT: vpinsrd $3, %eax, %xmm0, %xmm0
|
|
|
|
; AVX-NEXT: retq
|
|
|
|
;
|
|
|
|
; X32-SSE41-LABEL: load_sext_4i1_to_4i32:
|
|
|
|
; X32-SSE41: # BB#0: # %entry
|
|
|
|
; X32-SSE41-NEXT: movl {{[0-9]+}}(%esp), %eax
|
|
|
|
; X32-SSE41-NEXT: movl (%eax), %eax
|
|
|
|
; X32-SSE41-NEXT: movl %eax, %ecx
|
|
|
|
; X32-SSE41-NEXT: shll $30, %ecx
|
|
|
|
; X32-SSE41-NEXT: sarl $31, %ecx
|
|
|
|
; X32-SSE41-NEXT: movl %eax, %edx
|
|
|
|
; X32-SSE41-NEXT: shll $31, %edx
|
|
|
|
; X32-SSE41-NEXT: sarl $31, %edx
|
|
|
|
; X32-SSE41-NEXT: movd %edx, %xmm0
|
|
|
|
; X32-SSE41-NEXT: pinsrd $1, %ecx, %xmm0
|
|
|
|
; X32-SSE41-NEXT: movl %eax, %ecx
|
|
|
|
; X32-SSE41-NEXT: shll $29, %ecx
|
|
|
|
; X32-SSE41-NEXT: sarl $31, %ecx
|
|
|
|
; X32-SSE41-NEXT: pinsrd $2, %ecx, %xmm0
|
|
|
|
; X32-SSE41-NEXT: shll $28, %eax
|
|
|
|
; X32-SSE41-NEXT: sarl $31, %eax
|
|
|
|
; X32-SSE41-NEXT: pinsrd $3, %eax, %xmm0
|
|
|
|
; X32-SSE41-NEXT: retl
|
|
|
|
entry:
|
|
|
|
%X = load <4 x i1>, <4 x i1>* %ptr
|
|
|
|
%Y = sext <4 x i1> %X to <4 x i32>
|
|
|
|
ret <4 x i32> %Y
|
|
|
|
}
|
|
|
|
|
2015-07-25 22:07:20 +08:00
|
|
|
define <4 x i32> @load_sext_4i8_to_4i32(<4 x i8> *%ptr) {
|
|
|
|
; SSE2-LABEL: load_sext_4i8_to_4i32:
|
2014-10-02 14:52:19 +08:00
|
|
|
; SSE2: # BB#0: # %entry
|
2015-07-25 22:07:20 +08:00
|
|
|
; SSE2-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
|
|
|
|
; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
|
2014-10-02 04:41:36 +08:00
|
|
|
; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3]
|
2015-07-25 22:07:20 +08:00
|
|
|
; SSE2-NEXT: psrad $24, %xmm0
|
2014-10-02 04:41:36 +08:00
|
|
|
; SSE2-NEXT: retq
|
|
|
|
;
|
2015-07-25 22:07:20 +08:00
|
|
|
; SSSE3-LABEL: load_sext_4i8_to_4i32:
|
2014-10-02 14:52:19 +08:00
|
|
|
; SSSE3: # BB#0: # %entry
|
2015-07-25 22:07:20 +08:00
|
|
|
; SSSE3-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
|
|
|
|
; SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
|
2014-10-02 04:41:36 +08:00
|
|
|
; SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3]
|
2015-07-25 22:07:20 +08:00
|
|
|
; SSSE3-NEXT: psrad $24, %xmm0
|
2014-10-02 04:41:36 +08:00
|
|
|
; SSSE3-NEXT: retq
|
|
|
|
;
|
2015-07-25 22:07:20 +08:00
|
|
|
; SSE41-LABEL: load_sext_4i8_to_4i32:
|
2014-10-02 14:52:19 +08:00
|
|
|
; SSE41: # BB#0: # %entry
|
2015-07-25 22:07:20 +08:00
|
|
|
; SSE41-NEXT: pmovsxbd (%rdi), %xmm0
|
2014-10-02 04:41:36 +08:00
|
|
|
; SSE41-NEXT: retq
|
2014-10-02 04:32:44 +08:00
|
|
|
;
|
2015-07-25 22:07:20 +08:00
|
|
|
; AVX-LABEL: load_sext_4i8_to_4i32:
|
2014-10-02 14:52:19 +08:00
|
|
|
; AVX: # BB#0: # %entry
|
2015-07-25 22:07:20 +08:00
|
|
|
; AVX-NEXT: vpmovsxbd (%rdi), %xmm0
|
2014-10-02 04:32:44 +08:00
|
|
|
; AVX-NEXT: retq
|
2014-10-02 04:49:54 +08:00
|
|
|
;
|
2015-07-25 22:07:20 +08:00
|
|
|
; X32-SSE41-LABEL: load_sext_4i8_to_4i32:
|
2014-10-02 14:52:19 +08:00
|
|
|
; X32-SSE41: # BB#0: # %entry
|
2014-10-02 04:49:54 +08:00
|
|
|
; X32-SSE41-NEXT: movl {{[0-9]+}}(%esp), %eax
|
2015-07-25 22:07:20 +08:00
|
|
|
; X32-SSE41-NEXT: pmovsxbd (%eax), %xmm0
|
2014-10-02 04:49:54 +08:00
|
|
|
; X32-SSE41-NEXT: retl
|
2014-10-02 14:52:19 +08:00
|
|
|
entry:
|
2015-07-25 22:07:20 +08:00
|
|
|
%X = load <4 x i8>, <4 x i8>* %ptr
|
|
|
|
%Y = sext <4 x i8> %X to <4 x i32>
|
|
|
|
ret <4 x i32> %Y
|
2012-12-19 15:50:20 +08:00
|
|
|
}
|
|
|
|
|
2015-09-12 23:36:41 +08:00
|
|
|
define <4 x i64> @load_sext_4i1_to_4i64(<4 x i1> *%ptr) {
|
|
|
|
; SSE2-LABEL: load_sext_4i1_to_4i64:
|
|
|
|
; SSE2: # BB#0: # %entry
|
|
|
|
; SSE2-NEXT: movzbl (%rdi), %eax
|
|
|
|
; SSE2-NEXT: movl %eax, %ecx
|
|
|
|
; SSE2-NEXT: shrl $3, %ecx
|
|
|
|
; SSE2-NEXT: andl $1, %ecx
|
|
|
|
; SSE2-NEXT: movd %ecx, %xmm0
|
|
|
|
; SSE2-NEXT: movl %eax, %ecx
|
|
|
|
; SSE2-NEXT: shrl %ecx
|
|
|
|
; SSE2-NEXT: andl $1, %ecx
|
|
|
|
; SSE2-NEXT: movd %ecx, %xmm1
|
|
|
|
; SSE2-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
|
|
|
|
; SSE2-NEXT: movl %eax, %ecx
|
|
|
|
; SSE2-NEXT: andl $1, %ecx
|
|
|
|
; SSE2-NEXT: movd %ecx, %xmm2
|
|
|
|
; SSE2-NEXT: shrl $2, %eax
|
|
|
|
; SSE2-NEXT: andl $1, %eax
|
|
|
|
; SSE2-NEXT: movd %eax, %xmm0
|
|
|
|
; SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1]
|
|
|
|
; SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
|
|
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm2[0,1,1,3]
|
|
|
|
; SSE2-NEXT: psllq $63, %xmm0
|
|
|
|
; SSE2-NEXT: psrad $31, %xmm0
|
|
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
|
2015-09-22 16:16:08 +08:00
|
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm2[2,1,3,3]
|
2015-09-12 23:36:41 +08:00
|
|
|
; SSE2-NEXT: psllq $63, %xmm1
|
|
|
|
; SSE2-NEXT: psrad $31, %xmm1
|
|
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
|
|
|
|
; SSE2-NEXT: retq
|
|
|
|
;
|
|
|
|
; SSSE3-LABEL: load_sext_4i1_to_4i64:
|
|
|
|
; SSSE3: # BB#0: # %entry
|
|
|
|
; SSSE3-NEXT: movzbl (%rdi), %eax
|
|
|
|
; SSSE3-NEXT: movl %eax, %ecx
|
|
|
|
; SSSE3-NEXT: shrl $3, %ecx
|
|
|
|
; SSSE3-NEXT: andl $1, %ecx
|
|
|
|
; SSSE3-NEXT: movd %ecx, %xmm0
|
|
|
|
; SSSE3-NEXT: movl %eax, %ecx
|
|
|
|
; SSSE3-NEXT: shrl %ecx
|
|
|
|
; SSSE3-NEXT: andl $1, %ecx
|
|
|
|
; SSSE3-NEXT: movd %ecx, %xmm1
|
|
|
|
; SSSE3-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
|
|
|
|
; SSSE3-NEXT: movl %eax, %ecx
|
|
|
|
; SSSE3-NEXT: andl $1, %ecx
|
|
|
|
; SSSE3-NEXT: movd %ecx, %xmm2
|
|
|
|
; SSSE3-NEXT: shrl $2, %eax
|
|
|
|
; SSSE3-NEXT: andl $1, %eax
|
|
|
|
; SSSE3-NEXT: movd %eax, %xmm0
|
|
|
|
; SSSE3-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1]
|
|
|
|
; SSSE3-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
|
|
|
|
; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm2[0,1,1,3]
|
|
|
|
; SSSE3-NEXT: psllq $63, %xmm0
|
|
|
|
; SSSE3-NEXT: psrad $31, %xmm0
|
|
|
|
; SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
|
2015-09-22 16:16:08 +08:00
|
|
|
; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm2[2,1,3,3]
|
2015-09-12 23:36:41 +08:00
|
|
|
; SSSE3-NEXT: psllq $63, %xmm1
|
|
|
|
; SSSE3-NEXT: psrad $31, %xmm1
|
|
|
|
; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
|
|
|
|
; SSSE3-NEXT: retq
|
|
|
|
;
|
|
|
|
; SSE41-LABEL: load_sext_4i1_to_4i64:
|
|
|
|
; SSE41: # BB#0: # %entry
|
|
|
|
; SSE41-NEXT: movzbl (%rdi), %eax
|
|
|
|
; SSE41-NEXT: movl %eax, %ecx
|
|
|
|
; SSE41-NEXT: shrl %ecx
|
|
|
|
; SSE41-NEXT: andl $1, %ecx
|
|
|
|
; SSE41-NEXT: movl %eax, %edx
|
|
|
|
; SSE41-NEXT: andl $1, %edx
|
|
|
|
; SSE41-NEXT: movd %edx, %xmm1
|
|
|
|
; SSE41-NEXT: pinsrd $1, %ecx, %xmm1
|
|
|
|
; SSE41-NEXT: movl %eax, %ecx
|
|
|
|
; SSE41-NEXT: shrl $2, %ecx
|
|
|
|
; SSE41-NEXT: andl $1, %ecx
|
|
|
|
; SSE41-NEXT: pinsrd $2, %ecx, %xmm1
|
|
|
|
; SSE41-NEXT: shrl $3, %eax
|
|
|
|
; SSE41-NEXT: andl $1, %eax
|
|
|
|
; SSE41-NEXT: pinsrd $3, %eax, %xmm1
|
|
|
|
; SSE41-NEXT: pmovzxdq {{.*#+}} xmm0 = xmm1[0],zero,xmm1[1],zero
|
|
|
|
; SSE41-NEXT: psllq $63, %xmm0
|
|
|
|
; SSE41-NEXT: psrad $31, %xmm0
|
|
|
|
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
|
|
|
|
; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,2,3,3]
|
|
|
|
; SSE41-NEXT: psllq $63, %xmm1
|
|
|
|
; SSE41-NEXT: psrad $31, %xmm1
|
|
|
|
; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
|
|
|
|
; SSE41-NEXT: retq
|
|
|
|
;
|
|
|
|
; AVX1-LABEL: load_sext_4i1_to_4i64:
|
|
|
|
; AVX1: # BB#0: # %entry
|
|
|
|
; AVX1-NEXT: movzbl (%rdi), %eax
|
|
|
|
; AVX1-NEXT: movq %rax, %rcx
|
|
|
|
; AVX1-NEXT: shlq $62, %rcx
|
|
|
|
; AVX1-NEXT: sarq $63, %rcx
|
|
|
|
; AVX1-NEXT: movq %rax, %rdx
|
|
|
|
; AVX1-NEXT: shlq $63, %rdx
|
|
|
|
; AVX1-NEXT: sarq $63, %rdx
|
|
|
|
; AVX1-NEXT: vmovd %edx, %xmm0
|
|
|
|
; AVX1-NEXT: vpinsrd $1, %ecx, %xmm0, %xmm0
|
|
|
|
; AVX1-NEXT: movq %rax, %rcx
|
|
|
|
; AVX1-NEXT: shlq $61, %rcx
|
|
|
|
; AVX1-NEXT: sarq $63, %rcx
|
|
|
|
; AVX1-NEXT: vpinsrd $2, %ecx, %xmm0, %xmm0
|
|
|
|
; AVX1-NEXT: shlq $60, %rax
|
|
|
|
; AVX1-NEXT: sarq $63, %rax
|
|
|
|
; AVX1-NEXT: vpinsrd $3, %eax, %xmm0, %xmm0
|
|
|
|
; AVX1-NEXT: vpmovsxdq %xmm0, %xmm1
|
|
|
|
; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
|
|
|
|
; AVX1-NEXT: vpmovsxdq %xmm0, %xmm0
|
|
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
|
|
|
|
; AVX1-NEXT: retq
|
|
|
|
;
|
|
|
|
; AVX2-LABEL: load_sext_4i1_to_4i64:
|
|
|
|
; AVX2: # BB#0: # %entry
|
|
|
|
; AVX2-NEXT: movzbl (%rdi), %eax
|
|
|
|
; AVX2-NEXT: movq %rax, %rcx
|
|
|
|
; AVX2-NEXT: shlq $60, %rcx
|
|
|
|
; AVX2-NEXT: sarq $63, %rcx
|
|
|
|
; AVX2-NEXT: vmovq %rcx, %xmm0
|
|
|
|
; AVX2-NEXT: movq %rax, %rcx
|
|
|
|
; AVX2-NEXT: shlq $61, %rcx
|
|
|
|
; AVX2-NEXT: sarq $63, %rcx
|
|
|
|
; AVX2-NEXT: vmovq %rcx, %xmm1
|
|
|
|
; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0]
|
|
|
|
; AVX2-NEXT: movq %rax, %rcx
|
|
|
|
; AVX2-NEXT: shlq $62, %rcx
|
|
|
|
; AVX2-NEXT: sarq $63, %rcx
|
|
|
|
; AVX2-NEXT: vmovq %rcx, %xmm1
|
|
|
|
; AVX2-NEXT: shlq $63, %rax
|
|
|
|
; AVX2-NEXT: sarq $63, %rax
|
|
|
|
; AVX2-NEXT: vmovq %rax, %xmm2
|
|
|
|
; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm2[0],xmm1[0]
|
|
|
|
; AVX2-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0
|
|
|
|
; AVX2-NEXT: retq
|
|
|
|
;
|
|
|
|
; X32-SSE41-LABEL: load_sext_4i1_to_4i64:
|
|
|
|
; X32-SSE41: # BB#0: # %entry
|
|
|
|
; X32-SSE41-NEXT: movl {{[0-9]+}}(%esp), %eax
|
|
|
|
; X32-SSE41-NEXT: movzbl (%eax), %eax
|
|
|
|
; X32-SSE41-NEXT: movl %eax, %ecx
|
|
|
|
; X32-SSE41-NEXT: shrl %ecx
|
|
|
|
; X32-SSE41-NEXT: andl $1, %ecx
|
|
|
|
; X32-SSE41-NEXT: movl %eax, %edx
|
|
|
|
; X32-SSE41-NEXT: andl $1, %edx
|
|
|
|
; X32-SSE41-NEXT: movd %edx, %xmm1
|
|
|
|
; X32-SSE41-NEXT: pinsrd $1, %ecx, %xmm1
|
|
|
|
; X32-SSE41-NEXT: movl %eax, %ecx
|
|
|
|
; X32-SSE41-NEXT: shrl $2, %ecx
|
|
|
|
; X32-SSE41-NEXT: andl $1, %ecx
|
|
|
|
; X32-SSE41-NEXT: pinsrd $2, %ecx, %xmm1
|
|
|
|
; X32-SSE41-NEXT: shrl $3, %eax
|
|
|
|
; X32-SSE41-NEXT: andl $1, %eax
|
|
|
|
; X32-SSE41-NEXT: pinsrd $3, %eax, %xmm1
|
|
|
|
; X32-SSE41-NEXT: pmovzxdq {{.*#+}} xmm0 = xmm1[0],zero,xmm1[1],zero
|
|
|
|
; X32-SSE41-NEXT: psllq $63, %xmm0
|
|
|
|
; X32-SSE41-NEXT: psrad $31, %xmm0
|
|
|
|
; X32-SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,3,3]
|
|
|
|
; X32-SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,2,3,3]
|
|
|
|
; X32-SSE41-NEXT: psllq $63, %xmm1
|
|
|
|
; X32-SSE41-NEXT: psrad $31, %xmm1
|
|
|
|
; X32-SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
|
|
|
|
; X32-SSE41-NEXT: retl
|
|
|
|
entry:
|
|
|
|
%X = load <4 x i1>, <4 x i1>* %ptr
|
|
|
|
%Y = sext <4 x i1> %X to <4 x i64>
|
|
|
|
ret <4 x i64> %Y
|
|
|
|
}
|
|
|
|
|
2015-07-25 22:07:20 +08:00
|
|
|
define <4 x i64> @load_sext_4i8_to_4i64(<4 x i8> *%ptr) {
|
|
|
|
; SSE2-LABEL: load_sext_4i8_to_4i64:
|
|
|
|
; SSE2: # BB#0: # %entry
|
|
|
|
; SSE2-NEXT: movsbq 1(%rdi), %rax
|
|
|
|
; SSE2-NEXT: movd %rax, %xmm1
|
|
|
|
; SSE2-NEXT: movsbq (%rdi), %rax
|
|
|
|
; SSE2-NEXT: movd %rax, %xmm0
|
|
|
|
; SSE2-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
|
|
|
|
; SSE2-NEXT: movsbq 3(%rdi), %rax
|
|
|
|
; SSE2-NEXT: movd %rax, %xmm2
|
|
|
|
; SSE2-NEXT: movsbq 2(%rdi), %rax
|
|
|
|
; SSE2-NEXT: movd %rax, %xmm1
|
|
|
|
; SSE2-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0]
|
|
|
|
; SSE2-NEXT: retq
|
|
|
|
;
|
|
|
|
; SSSE3-LABEL: load_sext_4i8_to_4i64:
|
|
|
|
; SSSE3: # BB#0: # %entry
|
|
|
|
; SSSE3-NEXT: movsbq 1(%rdi), %rax
|
|
|
|
; SSSE3-NEXT: movd %rax, %xmm1
|
|
|
|
; SSSE3-NEXT: movsbq (%rdi), %rax
|
|
|
|
; SSSE3-NEXT: movd %rax, %xmm0
|
|
|
|
; SSSE3-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
|
|
|
|
; SSSE3-NEXT: movsbq 3(%rdi), %rax
|
|
|
|
; SSSE3-NEXT: movd %rax, %xmm2
|
|
|
|
; SSSE3-NEXT: movsbq 2(%rdi), %rax
|
|
|
|
; SSSE3-NEXT: movd %rax, %xmm1
|
|
|
|
; SSSE3-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0]
|
|
|
|
; SSSE3-NEXT: retq
|
|
|
|
;
|
|
|
|
; SSE41-LABEL: load_sext_4i8_to_4i64:
|
|
|
|
; SSE41: # BB#0: # %entry
|
|
|
|
; SSE41-NEXT: pmovsxbq (%rdi), %xmm0
|
|
|
|
; SSE41-NEXT: pmovsxbq 2(%rdi), %xmm1
|
|
|
|
; SSE41-NEXT: retq
|
|
|
|
;
|
|
|
|
; AVX1-LABEL: load_sext_4i8_to_4i64:
|
|
|
|
; AVX1: # BB#0: # %entry
|
|
|
|
; AVX1-NEXT: vpmovsxbd (%rdi), %xmm0
|
|
|
|
; AVX1-NEXT: vpmovsxdq %xmm0, %xmm1
|
|
|
|
; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
|
|
|
|
; AVX1-NEXT: vpmovsxdq %xmm0, %xmm0
|
|
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
|
|
|
|
; AVX1-NEXT: retq
|
|
|
|
;
|
|
|
|
; AVX2-LABEL: load_sext_4i8_to_4i64:
|
|
|
|
; AVX2: # BB#0: # %entry
|
|
|
|
; AVX2-NEXT: vpmovsxbq (%rdi), %ymm0
|
|
|
|
; AVX2-NEXT: retq
|
|
|
|
;
|
|
|
|
; X32-SSE41-LABEL: load_sext_4i8_to_4i64:
|
|
|
|
; X32-SSE41: # BB#0: # %entry
|
|
|
|
; X32-SSE41-NEXT: movl {{[0-9]+}}(%esp), %eax
|
|
|
|
; X32-SSE41-NEXT: pmovsxbq (%eax), %xmm0
|
|
|
|
; X32-SSE41-NEXT: pmovsxbq 2(%eax), %xmm1
|
|
|
|
; X32-SSE41-NEXT: retl
|
|
|
|
entry:
|
|
|
|
%X = load <4 x i8>, <4 x i8>* %ptr
|
|
|
|
%Y = sext <4 x i8> %X to <4 x i64>
|
|
|
|
ret <4 x i64> %Y
|
|
|
|
}
|
|
|
|
|
2015-09-12 23:36:41 +08:00
|
|
|
define <8 x i16> @load_sext_8i1_to_8i16(<8 x i1> *%ptr) {
|
|
|
|
; SSE2-LABEL: load_sext_8i1_to_8i16:
|
|
|
|
; SSE2: # BB#0: # %entry
|
[X86] Improve shift combining
This folds (ashr (shl a, [56,48,32,24,16]), SarConst)
into (shl, (sext (a), [56,48,32,24,16] - SarConst))
or into (lshr, (sext (a), SarConst - [56,48,32,24,16]))
depending on sign of (SarConst - [56,48,32,24,16])
sexts in X86 are MOVs.
The MOVs have the same code size as above SHIFTs (only SHIFT by 1 has lower code size).
However the MOVs have 2 advantages to SHIFTs on x86:
1. MOVs can write to a register that differs from source.
2. MOVs accept memory operands.
This fixes PR24373.
Patch by: evgeny.v.stupachenko@intel.com
Differential Revision: http://reviews.llvm.org/D13161
llvm-svn: 255761
2015-12-16 19:22:37 +08:00
|
|
|
; SSE2-NEXT: movsbq (%rdi), %rax
|
2015-09-12 23:36:41 +08:00
|
|
|
; SSE2-NEXT: movq %rax, %rcx
|
[X86] Improve shift combining
This folds (ashr (shl a, [56,48,32,24,16]), SarConst)
into (shl, (sext (a), [56,48,32,24,16] - SarConst))
or into (lshr, (sext (a), SarConst - [56,48,32,24,16]))
depending on sign of (SarConst - [56,48,32,24,16])
sexts in X86 are MOVs.
The MOVs have the same code size as above SHIFTs (only SHIFT by 1 has lower code size).
However the MOVs have 2 advantages to SHIFTs on x86:
1. MOVs can write to a register that differs from source.
2. MOVs accept memory operands.
This fixes PR24373.
Patch by: evgeny.v.stupachenko@intel.com
Differential Revision: http://reviews.llvm.org/D13161
llvm-svn: 255761
2015-12-16 19:22:37 +08:00
|
|
|
; SSE2-NEXT: shrq $7, %rcx
|
2015-09-12 23:36:41 +08:00
|
|
|
; SSE2-NEXT: movd %ecx, %xmm0
|
|
|
|
; SSE2-NEXT: movq %rax, %rcx
|
|
|
|
; SSE2-NEXT: shlq $60, %rcx
|
|
|
|
; SSE2-NEXT: sarq $63, %rcx
|
|
|
|
; SSE2-NEXT: movd %ecx, %xmm2
|
|
|
|
; SSE2-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3]
|
|
|
|
; SSE2-NEXT: movq %rax, %rcx
|
|
|
|
; SSE2-NEXT: shlq $58, %rcx
|
|
|
|
; SSE2-NEXT: sarq $63, %rcx
|
|
|
|
; SSE2-NEXT: movd %ecx, %xmm0
|
|
|
|
; SSE2-NEXT: movq %rax, %rcx
|
|
|
|
; SSE2-NEXT: shlq $62, %rcx
|
|
|
|
; SSE2-NEXT: sarq $63, %rcx
|
|
|
|
; SSE2-NEXT: movd %ecx, %xmm1
|
|
|
|
; SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
|
|
|
|
; SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3]
|
|
|
|
; SSE2-NEXT: movq %rax, %rcx
|
|
|
|
; SSE2-NEXT: shlq $57, %rcx
|
|
|
|
; SSE2-NEXT: sarq $63, %rcx
|
|
|
|
; SSE2-NEXT: movd %ecx, %xmm0
|
|
|
|
; SSE2-NEXT: movq %rax, %rcx
|
|
|
|
; SSE2-NEXT: shlq $61, %rcx
|
|
|
|
; SSE2-NEXT: sarq $63, %rcx
|
|
|
|
; SSE2-NEXT: movd %ecx, %xmm2
|
|
|
|
; SSE2-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3]
|
|
|
|
; SSE2-NEXT: movq %rax, %rcx
|
|
|
|
; SSE2-NEXT: shlq $59, %rcx
|
|
|
|
; SSE2-NEXT: sarq $63, %rcx
|
|
|
|
; SSE2-NEXT: movd %ecx, %xmm3
|
|
|
|
; SSE2-NEXT: shlq $63, %rax
|
|
|
|
; SSE2-NEXT: sarq $63, %rax
|
|
|
|
; SSE2-NEXT: movd %eax, %xmm0
|
|
|
|
; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3]
|
|
|
|
; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3]
|
|
|
|
; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
|
|
|
|
; SSE2-NEXT: retq
|
|
|
|
;
|
|
|
|
; SSSE3-LABEL: load_sext_8i1_to_8i16:
|
|
|
|
; SSSE3: # BB#0: # %entry
|
[X86] Improve shift combining
This folds (ashr (shl a, [56,48,32,24,16]), SarConst)
into (shl, (sext (a), [56,48,32,24,16] - SarConst))
or into (lshr, (sext (a), SarConst - [56,48,32,24,16]))
depending on sign of (SarConst - [56,48,32,24,16])
sexts in X86 are MOVs.
The MOVs have the same code size as above SHIFTs (only SHIFT by 1 has lower code size).
However the MOVs have 2 advantages to SHIFTs on x86:
1. MOVs can write to a register that differs from source.
2. MOVs accept memory operands.
This fixes PR24373.
Patch by: evgeny.v.stupachenko@intel.com
Differential Revision: http://reviews.llvm.org/D13161
llvm-svn: 255761
2015-12-16 19:22:37 +08:00
|
|
|
; SSSE3-NEXT: movsbq (%rdi), %rax
|
2015-09-12 23:36:41 +08:00
|
|
|
; SSSE3-NEXT: movq %rax, %rcx
|
[X86] Improve shift combining
This folds (ashr (shl a, [56,48,32,24,16]), SarConst)
into (shl, (sext (a), [56,48,32,24,16] - SarConst))
or into (lshr, (sext (a), SarConst - [56,48,32,24,16]))
depending on sign of (SarConst - [56,48,32,24,16])
sexts in X86 are MOVs.
The MOVs have the same code size as above SHIFTs (only SHIFT by 1 has lower code size).
However the MOVs have 2 advantages to SHIFTs on x86:
1. MOVs can write to a register that differs from source.
2. MOVs accept memory operands.
This fixes PR24373.
Patch by: evgeny.v.stupachenko@intel.com
Differential Revision: http://reviews.llvm.org/D13161
llvm-svn: 255761
2015-12-16 19:22:37 +08:00
|
|
|
; SSSE3-NEXT: shrq $7, %rcx
|
2015-09-12 23:36:41 +08:00
|
|
|
; SSSE3-NEXT: movd %ecx, %xmm0
|
|
|
|
; SSSE3-NEXT: movq %rax, %rcx
|
|
|
|
; SSSE3-NEXT: shlq $60, %rcx
|
|
|
|
; SSSE3-NEXT: sarq $63, %rcx
|
|
|
|
; SSSE3-NEXT: movd %ecx, %xmm2
|
|
|
|
; SSSE3-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3]
|
|
|
|
; SSSE3-NEXT: movq %rax, %rcx
|
|
|
|
; SSSE3-NEXT: shlq $58, %rcx
|
|
|
|
; SSSE3-NEXT: sarq $63, %rcx
|
|
|
|
; SSSE3-NEXT: movd %ecx, %xmm0
|
|
|
|
; SSSE3-NEXT: movq %rax, %rcx
|
|
|
|
; SSSE3-NEXT: shlq $62, %rcx
|
|
|
|
; SSSE3-NEXT: sarq $63, %rcx
|
|
|
|
; SSSE3-NEXT: movd %ecx, %xmm1
|
|
|
|
; SSSE3-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
|
|
|
|
; SSSE3-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3]
|
|
|
|
; SSSE3-NEXT: movq %rax, %rcx
|
|
|
|
; SSSE3-NEXT: shlq $57, %rcx
|
|
|
|
; SSSE3-NEXT: sarq $63, %rcx
|
|
|
|
; SSSE3-NEXT: movd %ecx, %xmm0
|
|
|
|
; SSSE3-NEXT: movq %rax, %rcx
|
|
|
|
; SSSE3-NEXT: shlq $61, %rcx
|
|
|
|
; SSSE3-NEXT: sarq $63, %rcx
|
|
|
|
; SSSE3-NEXT: movd %ecx, %xmm2
|
|
|
|
; SSSE3-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3]
|
|
|
|
; SSSE3-NEXT: movq %rax, %rcx
|
|
|
|
; SSSE3-NEXT: shlq $59, %rcx
|
|
|
|
; SSSE3-NEXT: sarq $63, %rcx
|
|
|
|
; SSSE3-NEXT: movd %ecx, %xmm3
|
|
|
|
; SSSE3-NEXT: shlq $63, %rax
|
|
|
|
; SSSE3-NEXT: sarq $63, %rax
|
|
|
|
; SSSE3-NEXT: movd %eax, %xmm0
|
|
|
|
; SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3]
|
|
|
|
; SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3]
|
|
|
|
; SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
|
|
|
|
; SSSE3-NEXT: retq
|
|
|
|
;
|
|
|
|
; SSE41-LABEL: load_sext_8i1_to_8i16:
|
|
|
|
; SSE41: # BB#0: # %entry
|
[X86] Improve shift combining
This folds (ashr (shl a, [56,48,32,24,16]), SarConst)
into (shl, (sext (a), [56,48,32,24,16] - SarConst))
or into (lshr, (sext (a), SarConst - [56,48,32,24,16]))
depending on sign of (SarConst - [56,48,32,24,16])
sexts in X86 are MOVs.
The MOVs have the same code size as above SHIFTs (only SHIFT by 1 has lower code size).
However the MOVs have 2 advantages to SHIFTs on x86:
1. MOVs can write to a register that differs from source.
2. MOVs accept memory operands.
This fixes PR24373.
Patch by: evgeny.v.stupachenko@intel.com
Differential Revision: http://reviews.llvm.org/D13161
llvm-svn: 255761
2015-12-16 19:22:37 +08:00
|
|
|
; SSE41-NEXT: movsbq (%rdi), %rax
|
2015-09-12 23:36:41 +08:00
|
|
|
; SSE41-NEXT: movq %rax, %rcx
|
|
|
|
; SSE41-NEXT: shlq $62, %rcx
|
|
|
|
; SSE41-NEXT: sarq $63, %rcx
|
|
|
|
; SSE41-NEXT: movq %rax, %rdx
|
|
|
|
; SSE41-NEXT: shlq $63, %rdx
|
|
|
|
; SSE41-NEXT: sarq $63, %rdx
|
|
|
|
; SSE41-NEXT: movd %edx, %xmm0
|
|
|
|
; SSE41-NEXT: pinsrw $1, %ecx, %xmm0
|
|
|
|
; SSE41-NEXT: movq %rax, %rcx
|
|
|
|
; SSE41-NEXT: shlq $61, %rcx
|
|
|
|
; SSE41-NEXT: sarq $63, %rcx
|
|
|
|
; SSE41-NEXT: pinsrw $2, %ecx, %xmm0
|
|
|
|
; SSE41-NEXT: movq %rax, %rcx
|
|
|
|
; SSE41-NEXT: shlq $60, %rcx
|
|
|
|
; SSE41-NEXT: sarq $63, %rcx
|
|
|
|
; SSE41-NEXT: pinsrw $3, %ecx, %xmm0
|
|
|
|
; SSE41-NEXT: movq %rax, %rcx
|
|
|
|
; SSE41-NEXT: shlq $59, %rcx
|
|
|
|
; SSE41-NEXT: sarq $63, %rcx
|
|
|
|
; SSE41-NEXT: pinsrw $4, %ecx, %xmm0
|
|
|
|
; SSE41-NEXT: movq %rax, %rcx
|
|
|
|
; SSE41-NEXT: shlq $58, %rcx
|
|
|
|
; SSE41-NEXT: sarq $63, %rcx
|
|
|
|
; SSE41-NEXT: pinsrw $5, %ecx, %xmm0
|
|
|
|
; SSE41-NEXT: movq %rax, %rcx
|
|
|
|
; SSE41-NEXT: shlq $57, %rcx
|
|
|
|
; SSE41-NEXT: sarq $63, %rcx
|
|
|
|
; SSE41-NEXT: pinsrw $6, %ecx, %xmm0
|
[X86] Improve shift combining
This folds (ashr (shl a, [56,48,32,24,16]), SarConst)
into (shl, (sext (a), [56,48,32,24,16] - SarConst))
or into (lshr, (sext (a), SarConst - [56,48,32,24,16]))
depending on sign of (SarConst - [56,48,32,24,16])
sexts in X86 are MOVs.
The MOVs have the same code size as above SHIFTs (only SHIFT by 1 has lower code size).
However the MOVs have 2 advantages to SHIFTs on x86:
1. MOVs can write to a register that differs from source.
2. MOVs accept memory operands.
This fixes PR24373.
Patch by: evgeny.v.stupachenko@intel.com
Differential Revision: http://reviews.llvm.org/D13161
llvm-svn: 255761
2015-12-16 19:22:37 +08:00
|
|
|
; SSE41-NEXT: shrq $7, %rax
|
2015-09-12 23:36:41 +08:00
|
|
|
; SSE41-NEXT: pinsrw $7, %eax, %xmm0
|
|
|
|
; SSE41-NEXT: retq
|
|
|
|
;
|
|
|
|
; AVX-LABEL: load_sext_8i1_to_8i16:
|
|
|
|
; AVX: # BB#0: # %entry
|
[X86] Improve shift combining
This folds (ashr (shl a, [56,48,32,24,16]), SarConst)
into (shl, (sext (a), [56,48,32,24,16] - SarConst))
or into (lshr, (sext (a), SarConst - [56,48,32,24,16]))
depending on sign of (SarConst - [56,48,32,24,16])
sexts in X86 are MOVs.
The MOVs have the same code size as above SHIFTs (only SHIFT by 1 has lower code size).
However the MOVs have 2 advantages to SHIFTs on x86:
1. MOVs can write to a register that differs from source.
2. MOVs accept memory operands.
This fixes PR24373.
Patch by: evgeny.v.stupachenko@intel.com
Differential Revision: http://reviews.llvm.org/D13161
llvm-svn: 255761
2015-12-16 19:22:37 +08:00
|
|
|
; AVX-NEXT: movsbq (%rdi), %rax
|
2015-09-12 23:36:41 +08:00
|
|
|
; AVX-NEXT: movq %rax, %rcx
|
|
|
|
; AVX-NEXT: shlq $62, %rcx
|
|
|
|
; AVX-NEXT: sarq $63, %rcx
|
|
|
|
; AVX-NEXT: movq %rax, %rdx
|
|
|
|
; AVX-NEXT: shlq $63, %rdx
|
|
|
|
; AVX-NEXT: sarq $63, %rdx
|
|
|
|
; AVX-NEXT: vmovd %edx, %xmm0
|
|
|
|
; AVX-NEXT: vpinsrw $1, %ecx, %xmm0, %xmm0
|
|
|
|
; AVX-NEXT: movq %rax, %rcx
|
|
|
|
; AVX-NEXT: shlq $61, %rcx
|
|
|
|
; AVX-NEXT: sarq $63, %rcx
|
|
|
|
; AVX-NEXT: vpinsrw $2, %ecx, %xmm0, %xmm0
|
|
|
|
; AVX-NEXT: movq %rax, %rcx
|
|
|
|
; AVX-NEXT: shlq $60, %rcx
|
|
|
|
; AVX-NEXT: sarq $63, %rcx
|
|
|
|
; AVX-NEXT: vpinsrw $3, %ecx, %xmm0, %xmm0
|
|
|
|
; AVX-NEXT: movq %rax, %rcx
|
|
|
|
; AVX-NEXT: shlq $59, %rcx
|
|
|
|
; AVX-NEXT: sarq $63, %rcx
|
|
|
|
; AVX-NEXT: vpinsrw $4, %ecx, %xmm0, %xmm0
|
|
|
|
; AVX-NEXT: movq %rax, %rcx
|
|
|
|
; AVX-NEXT: shlq $58, %rcx
|
|
|
|
; AVX-NEXT: sarq $63, %rcx
|
|
|
|
; AVX-NEXT: vpinsrw $5, %ecx, %xmm0, %xmm0
|
|
|
|
; AVX-NEXT: movq %rax, %rcx
|
|
|
|
; AVX-NEXT: shlq $57, %rcx
|
|
|
|
; AVX-NEXT: sarq $63, %rcx
|
|
|
|
; AVX-NEXT: vpinsrw $6, %ecx, %xmm0, %xmm0
|
[X86] Improve shift combining
This folds (ashr (shl a, [56,48,32,24,16]), SarConst)
into (shl, (sext (a), [56,48,32,24,16] - SarConst))
or into (lshr, (sext (a), SarConst - [56,48,32,24,16]))
depending on sign of (SarConst - [56,48,32,24,16])
sexts in X86 are MOVs.
The MOVs have the same code size as above SHIFTs (only SHIFT by 1 has lower code size).
However the MOVs have 2 advantages to SHIFTs on x86:
1. MOVs can write to a register that differs from source.
2. MOVs accept memory operands.
This fixes PR24373.
Patch by: evgeny.v.stupachenko@intel.com
Differential Revision: http://reviews.llvm.org/D13161
llvm-svn: 255761
2015-12-16 19:22:37 +08:00
|
|
|
; AVX-NEXT: shrq $7, %rax
|
2015-09-12 23:36:41 +08:00
|
|
|
; AVX-NEXT: vpinsrw $7, %eax, %xmm0, %xmm0
|
|
|
|
; AVX-NEXT: retq
|
|
|
|
;
|
|
|
|
; X32-SSE41-LABEL: load_sext_8i1_to_8i16:
|
|
|
|
; X32-SSE41: # BB#0: # %entry
|
|
|
|
; X32-SSE41-NEXT: movl {{[0-9]+}}(%esp), %eax
|
[X86] Improve shift combining
This folds (ashr (shl a, [56,48,32,24,16]), SarConst)
into (shl, (sext (a), [56,48,32,24,16] - SarConst))
or into (lshr, (sext (a), SarConst - [56,48,32,24,16]))
depending on sign of (SarConst - [56,48,32,24,16])
sexts in X86 are MOVs.
The MOVs have the same code size as above SHIFTs (only SHIFT by 1 has lower code size).
However the MOVs have 2 advantages to SHIFTs on x86:
1. MOVs can write to a register that differs from source.
2. MOVs accept memory operands.
This fixes PR24373.
Patch by: evgeny.v.stupachenko@intel.com
Differential Revision: http://reviews.llvm.org/D13161
llvm-svn: 255761
2015-12-16 19:22:37 +08:00
|
|
|
; X32-SSE41-NEXT: movsbl (%eax), %eax
|
2015-09-12 23:36:41 +08:00
|
|
|
; X32-SSE41-NEXT: movl %eax, %ecx
|
|
|
|
; X32-SSE41-NEXT: shll $30, %ecx
|
|
|
|
; X32-SSE41-NEXT: sarl $31, %ecx
|
|
|
|
; X32-SSE41-NEXT: movl %eax, %edx
|
|
|
|
; X32-SSE41-NEXT: shll $31, %edx
|
|
|
|
; X32-SSE41-NEXT: sarl $31, %edx
|
|
|
|
; X32-SSE41-NEXT: movd %edx, %xmm0
|
|
|
|
; X32-SSE41-NEXT: pinsrw $1, %ecx, %xmm0
|
|
|
|
; X32-SSE41-NEXT: movl %eax, %ecx
|
|
|
|
; X32-SSE41-NEXT: shll $29, %ecx
|
|
|
|
; X32-SSE41-NEXT: sarl $31, %ecx
|
|
|
|
; X32-SSE41-NEXT: pinsrw $2, %ecx, %xmm0
|
|
|
|
; X32-SSE41-NEXT: movl %eax, %ecx
|
|
|
|
; X32-SSE41-NEXT: shll $28, %ecx
|
|
|
|
; X32-SSE41-NEXT: sarl $31, %ecx
|
|
|
|
; X32-SSE41-NEXT: pinsrw $3, %ecx, %xmm0
|
|
|
|
; X32-SSE41-NEXT: movl %eax, %ecx
|
|
|
|
; X32-SSE41-NEXT: shll $27, %ecx
|
|
|
|
; X32-SSE41-NEXT: sarl $31, %ecx
|
|
|
|
; X32-SSE41-NEXT: pinsrw $4, %ecx, %xmm0
|
|
|
|
; X32-SSE41-NEXT: movl %eax, %ecx
|
|
|
|
; X32-SSE41-NEXT: shll $26, %ecx
|
|
|
|
; X32-SSE41-NEXT: sarl $31, %ecx
|
|
|
|
; X32-SSE41-NEXT: pinsrw $5, %ecx, %xmm0
|
|
|
|
; X32-SSE41-NEXT: movl %eax, %ecx
|
|
|
|
; X32-SSE41-NEXT: shll $25, %ecx
|
|
|
|
; X32-SSE41-NEXT: sarl $31, %ecx
|
|
|
|
; X32-SSE41-NEXT: pinsrw $6, %ecx, %xmm0
|
[X86] Improve shift combining
This folds (ashr (shl a, [56,48,32,24,16]), SarConst)
into (shl, (sext (a), [56,48,32,24,16] - SarConst))
or into (lshr, (sext (a), SarConst - [56,48,32,24,16]))
depending on sign of (SarConst - [56,48,32,24,16])
sexts in X86 are MOVs.
The MOVs have the same code size as above SHIFTs (only SHIFT by 1 has lower code size).
However the MOVs have 2 advantages to SHIFTs on x86:
1. MOVs can write to a register that differs from source.
2. MOVs accept memory operands.
This fixes PR24373.
Patch by: evgeny.v.stupachenko@intel.com
Differential Revision: http://reviews.llvm.org/D13161
llvm-svn: 255761
2015-12-16 19:22:37 +08:00
|
|
|
; X32-SSE41-NEXT: shrl $7, %eax
|
2015-09-12 23:36:41 +08:00
|
|
|
; X32-SSE41-NEXT: pinsrw $7, %eax, %xmm0
|
|
|
|
; X32-SSE41-NEXT: retl
|
|
|
|
entry:
|
|
|
|
%X = load <8 x i1>, <8 x i1>* %ptr
|
|
|
|
%Y = sext <8 x i1> %X to <8 x i16>
|
|
|
|
ret <8 x i16> %Y
|
|
|
|
}
|
|
|
|
|
2015-07-25 22:07:20 +08:00
|
|
|
define <8 x i16> @load_sext_8i8_to_8i16(<8 x i8> *%ptr) {
|
|
|
|
; SSE2-LABEL: load_sext_8i8_to_8i16:
|
|
|
|
; SSE2: # BB#0: # %entry
|
|
|
|
; SSE2-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
|
|
|
|
; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
|
|
|
|
; SSE2-NEXT: psraw $8, %xmm0
|
|
|
|
; SSE2-NEXT: retq
|
|
|
|
;
|
|
|
|
; SSSE3-LABEL: load_sext_8i8_to_8i16:
|
|
|
|
; SSSE3: # BB#0: # %entry
|
|
|
|
; SSSE3-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
|
|
|
|
; SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
|
|
|
|
; SSSE3-NEXT: psraw $8, %xmm0
|
|
|
|
; SSSE3-NEXT: retq
|
|
|
|
;
|
|
|
|
; SSE41-LABEL: load_sext_8i8_to_8i16:
|
|
|
|
; SSE41: # BB#0: # %entry
|
|
|
|
; SSE41-NEXT: pmovsxbw (%rdi), %xmm0
|
|
|
|
; SSE41-NEXT: retq
|
|
|
|
;
|
|
|
|
; AVX-LABEL: load_sext_8i8_to_8i16:
|
|
|
|
; AVX: # BB#0: # %entry
|
|
|
|
; AVX-NEXT: vpmovsxbw (%rdi), %xmm0
|
|
|
|
; AVX-NEXT: retq
|
|
|
|
;
|
|
|
|
; X32-SSE41-LABEL: load_sext_8i8_to_8i16:
|
|
|
|
; X32-SSE41: # BB#0: # %entry
|
|
|
|
; X32-SSE41-NEXT: movl {{[0-9]+}}(%esp), %eax
|
|
|
|
; X32-SSE41-NEXT: pmovsxbw (%eax), %xmm0
|
|
|
|
; X32-SSE41-NEXT: retl
|
|
|
|
entry:
|
|
|
|
%X = load <8 x i8>, <8 x i8>* %ptr
|
|
|
|
%Y = sext <8 x i8> %X to <8 x i16>
|
|
|
|
ret <8 x i16> %Y
|
|
|
|
}
|
|
|
|
|
2015-09-12 23:36:41 +08:00
|
|
|
define <8 x i32> @load_sext_8i1_to_8i32(<8 x i1> *%ptr) {
|
|
|
|
; SSE2-LABEL: load_sext_8i1_to_8i32:
|
|
|
|
; SSE2: # BB#0: # %entry
|
|
|
|
; SSE2-NEXT: movzbl (%rdi), %eax
|
|
|
|
; SSE2-NEXT: movl %eax, %ecx
|
|
|
|
; SSE2-NEXT: shrl $6, %ecx
|
|
|
|
; SSE2-NEXT: andl $1, %ecx
|
|
|
|
; SSE2-NEXT: movd %ecx, %xmm0
|
|
|
|
; SSE2-NEXT: movl %eax, %ecx
|
|
|
|
; SSE2-NEXT: shrl $2, %ecx
|
|
|
|
; SSE2-NEXT: andl $1, %ecx
|
|
|
|
; SSE2-NEXT: movd %ecx, %xmm2
|
|
|
|
; SSE2-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3]
|
|
|
|
; SSE2-NEXT: movl %eax, %ecx
|
|
|
|
; SSE2-NEXT: andl $1, %ecx
|
|
|
|
; SSE2-NEXT: movd %ecx, %xmm1
|
|
|
|
; SSE2-NEXT: movl %eax, %ecx
|
|
|
|
; SSE2-NEXT: shrl $4, %ecx
|
|
|
|
; SSE2-NEXT: andl $1, %ecx
|
|
|
|
; SSE2-NEXT: movd %ecx, %xmm0
|
|
|
|
; SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
|
|
|
|
; SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3]
|
|
|
|
; SSE2-NEXT: movl %eax, %ecx
|
|
|
|
; SSE2-NEXT: shrl $5, %ecx
|
|
|
|
; SSE2-NEXT: andl $1, %ecx
|
|
|
|
; SSE2-NEXT: movd %ecx, %xmm0
|
|
|
|
; SSE2-NEXT: movl %eax, %ecx
|
|
|
|
; SSE2-NEXT: shrl %ecx
|
|
|
|
; SSE2-NEXT: andl $1, %ecx
|
|
|
|
; SSE2-NEXT: movd %ecx, %xmm2
|
|
|
|
; SSE2-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3]
|
|
|
|
; SSE2-NEXT: movl %eax, %ecx
|
|
|
|
; SSE2-NEXT: shrl $3, %ecx
|
|
|
|
; SSE2-NEXT: andl $1, %ecx
|
|
|
|
; SSE2-NEXT: movd %ecx, %xmm0
|
|
|
|
; SSE2-NEXT: shrl $7, %eax
|
|
|
|
; SSE2-NEXT: movzwl %ax, %eax
|
|
|
|
; SSE2-NEXT: movd %eax, %xmm3
|
|
|
|
; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3]
|
|
|
|
; SSE2-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3]
|
|
|
|
; SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3]
|
|
|
|
; SSE2-NEXT: movdqa %xmm1, %xmm0
|
|
|
|
; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3]
|
|
|
|
; SSE2-NEXT: pslld $31, %xmm0
|
|
|
|
; SSE2-NEXT: psrad $31, %xmm0
|
2015-09-22 16:16:08 +08:00
|
|
|
; SSE2-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
|
2015-09-12 23:36:41 +08:00
|
|
|
; SSE2-NEXT: pslld $31, %xmm1
|
|
|
|
; SSE2-NEXT: psrad $31, %xmm1
|
|
|
|
; SSE2-NEXT: retq
|
|
|
|
;
|
|
|
|
; SSSE3-LABEL: load_sext_8i1_to_8i32:
|
|
|
|
; SSSE3: # BB#0: # %entry
|
|
|
|
; SSSE3-NEXT: movzbl (%rdi), %eax
|
|
|
|
; SSSE3-NEXT: movl %eax, %ecx
|
|
|
|
; SSSE3-NEXT: shrl $6, %ecx
|
|
|
|
; SSSE3-NEXT: andl $1, %ecx
|
|
|
|
; SSSE3-NEXT: movd %ecx, %xmm0
|
|
|
|
; SSSE3-NEXT: movl %eax, %ecx
|
|
|
|
; SSSE3-NEXT: shrl $2, %ecx
|
|
|
|
; SSSE3-NEXT: andl $1, %ecx
|
|
|
|
; SSSE3-NEXT: movd %ecx, %xmm2
|
|
|
|
; SSSE3-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3]
|
|
|
|
; SSSE3-NEXT: movl %eax, %ecx
|
|
|
|
; SSSE3-NEXT: andl $1, %ecx
|
|
|
|
; SSSE3-NEXT: movd %ecx, %xmm1
|
|
|
|
; SSSE3-NEXT: movl %eax, %ecx
|
|
|
|
; SSSE3-NEXT: shrl $4, %ecx
|
|
|
|
; SSSE3-NEXT: andl $1, %ecx
|
|
|
|
; SSSE3-NEXT: movd %ecx, %xmm0
|
|
|
|
; SSSE3-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
|
|
|
|
; SSSE3-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3]
|
|
|
|
; SSSE3-NEXT: movl %eax, %ecx
|
|
|
|
; SSSE3-NEXT: shrl $5, %ecx
|
|
|
|
; SSSE3-NEXT: andl $1, %ecx
|
|
|
|
; SSSE3-NEXT: movd %ecx, %xmm0
|
|
|
|
; SSSE3-NEXT: movl %eax, %ecx
|
|
|
|
; SSSE3-NEXT: shrl %ecx
|
|
|
|
; SSSE3-NEXT: andl $1, %ecx
|
|
|
|
; SSSE3-NEXT: movd %ecx, %xmm2
|
|
|
|
; SSSE3-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3]
|
|
|
|
; SSSE3-NEXT: movl %eax, %ecx
|
|
|
|
; SSSE3-NEXT: shrl $3, %ecx
|
|
|
|
; SSSE3-NEXT: andl $1, %ecx
|
|
|
|
; SSSE3-NEXT: movd %ecx, %xmm0
|
|
|
|
; SSSE3-NEXT: shrl $7, %eax
|
|
|
|
; SSSE3-NEXT: movzwl %ax, %eax
|
|
|
|
; SSSE3-NEXT: movd %eax, %xmm3
|
|
|
|
; SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3]
|
|
|
|
; SSSE3-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3]
|
|
|
|
; SSSE3-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3]
|
|
|
|
; SSSE3-NEXT: movdqa %xmm1, %xmm0
|
|
|
|
; SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3]
|
|
|
|
; SSSE3-NEXT: pslld $31, %xmm0
|
|
|
|
; SSSE3-NEXT: psrad $31, %xmm0
|
2015-09-22 16:16:08 +08:00
|
|
|
; SSSE3-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
|
2015-09-12 23:36:41 +08:00
|
|
|
; SSSE3-NEXT: pslld $31, %xmm1
|
|
|
|
; SSSE3-NEXT: psrad $31, %xmm1
|
|
|
|
; SSSE3-NEXT: retq
|
|
|
|
;
|
|
|
|
; SSE41-LABEL: load_sext_8i1_to_8i32:
|
|
|
|
; SSE41: # BB#0: # %entry
|
|
|
|
; SSE41-NEXT: movzbl (%rdi), %eax
|
|
|
|
; SSE41-NEXT: movl %eax, %ecx
|
|
|
|
; SSE41-NEXT: shrl %ecx
|
|
|
|
; SSE41-NEXT: andl $1, %ecx
|
|
|
|
; SSE41-NEXT: movl %eax, %edx
|
|
|
|
; SSE41-NEXT: andl $1, %edx
|
|
|
|
; SSE41-NEXT: movd %edx, %xmm1
|
|
|
|
; SSE41-NEXT: pinsrw $1, %ecx, %xmm1
|
|
|
|
; SSE41-NEXT: movl %eax, %ecx
|
|
|
|
; SSE41-NEXT: shrl $2, %ecx
|
|
|
|
; SSE41-NEXT: andl $1, %ecx
|
|
|
|
; SSE41-NEXT: pinsrw $2, %ecx, %xmm1
|
|
|
|
; SSE41-NEXT: movl %eax, %ecx
|
|
|
|
; SSE41-NEXT: shrl $3, %ecx
|
|
|
|
; SSE41-NEXT: andl $1, %ecx
|
|
|
|
; SSE41-NEXT: pinsrw $3, %ecx, %xmm1
|
|
|
|
; SSE41-NEXT: movl %eax, %ecx
|
|
|
|
; SSE41-NEXT: shrl $4, %ecx
|
|
|
|
; SSE41-NEXT: andl $1, %ecx
|
|
|
|
; SSE41-NEXT: pinsrw $4, %ecx, %xmm1
|
|
|
|
; SSE41-NEXT: movl %eax, %ecx
|
|
|
|
; SSE41-NEXT: shrl $5, %ecx
|
|
|
|
; SSE41-NEXT: andl $1, %ecx
|
|
|
|
; SSE41-NEXT: pinsrw $5, %ecx, %xmm1
|
|
|
|
; SSE41-NEXT: movl %eax, %ecx
|
|
|
|
; SSE41-NEXT: shrl $6, %ecx
|
|
|
|
; SSE41-NEXT: andl $1, %ecx
|
|
|
|
; SSE41-NEXT: pinsrw $6, %ecx, %xmm1
|
|
|
|
; SSE41-NEXT: shrl $7, %eax
|
|
|
|
; SSE41-NEXT: movzwl %ax, %eax
|
|
|
|
; SSE41-NEXT: pinsrw $7, %eax, %xmm1
|
|
|
|
; SSE41-NEXT: pmovzxwd {{.*#+}} xmm0 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero
|
|
|
|
; SSE41-NEXT: pslld $31, %xmm0
|
|
|
|
; SSE41-NEXT: psrad $31, %xmm0
|
2015-10-25 06:45:04 +08:00
|
|
|
; SSE41-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
|
2015-09-12 23:36:41 +08:00
|
|
|
; SSE41-NEXT: pslld $31, %xmm1
|
|
|
|
; SSE41-NEXT: psrad $31, %xmm1
|
|
|
|
; SSE41-NEXT: retq
|
|
|
|
;
|
|
|
|
; AVX1-LABEL: load_sext_8i1_to_8i32:
|
|
|
|
; AVX1: # BB#0: # %entry
|
[X86] Improve shift combining
This folds (ashr (shl a, [56,48,32,24,16]), SarConst)
into (shl, (sext (a), [56,48,32,24,16] - SarConst))
or into (lshr, (sext (a), SarConst - [56,48,32,24,16]))
depending on sign of (SarConst - [56,48,32,24,16])
sexts in X86 are MOVs.
The MOVs have the same code size as above SHIFTs (only SHIFT by 1 has lower code size).
However the MOVs have 2 advantages to SHIFTs on x86:
1. MOVs can write to a register that differs from source.
2. MOVs accept memory operands.
This fixes PR24373.
Patch by: evgeny.v.stupachenko@intel.com
Differential Revision: http://reviews.llvm.org/D13161
llvm-svn: 255761
2015-12-16 19:22:37 +08:00
|
|
|
; AVX1-NEXT: movsbq (%rdi), %rax
|
2015-09-12 23:36:41 +08:00
|
|
|
; AVX1-NEXT: movq %rax, %rcx
|
|
|
|
; AVX1-NEXT: shlq $58, %rcx
|
|
|
|
; AVX1-NEXT: sarq $63, %rcx
|
|
|
|
; AVX1-NEXT: movq %rax, %rdx
|
|
|
|
; AVX1-NEXT: shlq $59, %rdx
|
|
|
|
; AVX1-NEXT: sarq $63, %rdx
|
|
|
|
; AVX1-NEXT: vmovd %edx, %xmm0
|
|
|
|
; AVX1-NEXT: vpinsrd $1, %ecx, %xmm0, %xmm0
|
|
|
|
; AVX1-NEXT: movq %rax, %rcx
|
|
|
|
; AVX1-NEXT: shlq $57, %rcx
|
|
|
|
; AVX1-NEXT: sarq $63, %rcx
|
|
|
|
; AVX1-NEXT: vpinsrd $2, %ecx, %xmm0, %xmm0
|
|
|
|
; AVX1-NEXT: movq %rax, %rcx
|
[X86] Improve shift combining
This folds (ashr (shl a, [56,48,32,24,16]), SarConst)
into (shl, (sext (a), [56,48,32,24,16] - SarConst))
or into (lshr, (sext (a), SarConst - [56,48,32,24,16]))
depending on sign of (SarConst - [56,48,32,24,16])
sexts in X86 are MOVs.
The MOVs have the same code size as above SHIFTs (only SHIFT by 1 has lower code size).
However the MOVs have 2 advantages to SHIFTs on x86:
1. MOVs can write to a register that differs from source.
2. MOVs accept memory operands.
This fixes PR24373.
Patch by: evgeny.v.stupachenko@intel.com
Differential Revision: http://reviews.llvm.org/D13161
llvm-svn: 255761
2015-12-16 19:22:37 +08:00
|
|
|
; AVX1-NEXT: shrq $7, %rcx
|
2015-09-12 23:36:41 +08:00
|
|
|
; AVX1-NEXT: vpinsrd $3, %ecx, %xmm0, %xmm0
|
|
|
|
; AVX1-NEXT: movq %rax, %rcx
|
|
|
|
; AVX1-NEXT: shlq $62, %rcx
|
|
|
|
; AVX1-NEXT: sarq $63, %rcx
|
|
|
|
; AVX1-NEXT: movq %rax, %rdx
|
|
|
|
; AVX1-NEXT: shlq $63, %rdx
|
|
|
|
; AVX1-NEXT: sarq $63, %rdx
|
|
|
|
; AVX1-NEXT: vmovd %edx, %xmm1
|
|
|
|
; AVX1-NEXT: vpinsrd $1, %ecx, %xmm1, %xmm1
|
|
|
|
; AVX1-NEXT: movq %rax, %rcx
|
|
|
|
; AVX1-NEXT: shlq $61, %rcx
|
|
|
|
; AVX1-NEXT: sarq $63, %rcx
|
|
|
|
; AVX1-NEXT: vpinsrd $2, %ecx, %xmm1, %xmm1
|
|
|
|
; AVX1-NEXT: shlq $60, %rax
|
|
|
|
; AVX1-NEXT: sarq $63, %rax
|
|
|
|
; AVX1-NEXT: vpinsrd $3, %eax, %xmm1, %xmm1
|
|
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
|
|
|
|
; AVX1-NEXT: retq
|
|
|
|
;
|
|
|
|
; AVX2-LABEL: load_sext_8i1_to_8i32:
|
|
|
|
; AVX2: # BB#0: # %entry
|
[X86] Improve shift combining
This folds (ashr (shl a, [56,48,32,24,16]), SarConst)
into (shl, (sext (a), [56,48,32,24,16] - SarConst))
or into (lshr, (sext (a), SarConst - [56,48,32,24,16]))
depending on sign of (SarConst - [56,48,32,24,16])
sexts in X86 are MOVs.
The MOVs have the same code size as above SHIFTs (only SHIFT by 1 has lower code size).
However the MOVs have 2 advantages to SHIFTs on x86:
1. MOVs can write to a register that differs from source.
2. MOVs accept memory operands.
This fixes PR24373.
Patch by: evgeny.v.stupachenko@intel.com
Differential Revision: http://reviews.llvm.org/D13161
llvm-svn: 255761
2015-12-16 19:22:37 +08:00
|
|
|
; AVX2-NEXT: movsbq (%rdi), %rax
|
2015-09-12 23:36:41 +08:00
|
|
|
; AVX2-NEXT: movq %rax, %rcx
|
|
|
|
; AVX2-NEXT: shlq $58, %rcx
|
|
|
|
; AVX2-NEXT: sarq $63, %rcx
|
|
|
|
; AVX2-NEXT: movq %rax, %rdx
|
|
|
|
; AVX2-NEXT: shlq $59, %rdx
|
|
|
|
; AVX2-NEXT: sarq $63, %rdx
|
|
|
|
; AVX2-NEXT: vmovd %edx, %xmm0
|
|
|
|
; AVX2-NEXT: vpinsrd $1, %ecx, %xmm0, %xmm0
|
|
|
|
; AVX2-NEXT: movq %rax, %rcx
|
|
|
|
; AVX2-NEXT: shlq $57, %rcx
|
|
|
|
; AVX2-NEXT: sarq $63, %rcx
|
|
|
|
; AVX2-NEXT: vpinsrd $2, %ecx, %xmm0, %xmm0
|
|
|
|
; AVX2-NEXT: movq %rax, %rcx
|
[X86] Improve shift combining
This folds (ashr (shl a, [56,48,32,24,16]), SarConst)
into (shl, (sext (a), [56,48,32,24,16] - SarConst))
or into (lshr, (sext (a), SarConst - [56,48,32,24,16]))
depending on sign of (SarConst - [56,48,32,24,16])
sexts in X86 are MOVs.
The MOVs have the same code size as above SHIFTs (only SHIFT by 1 has lower code size).
However the MOVs have 2 advantages to SHIFTs on x86:
1. MOVs can write to a register that differs from source.
2. MOVs accept memory operands.
This fixes PR24373.
Patch by: evgeny.v.stupachenko@intel.com
Differential Revision: http://reviews.llvm.org/D13161
llvm-svn: 255761
2015-12-16 19:22:37 +08:00
|
|
|
; AVX2-NEXT: shrq $7, %rcx
|
2015-09-12 23:36:41 +08:00
|
|
|
; AVX2-NEXT: vpinsrd $3, %ecx, %xmm0, %xmm0
|
|
|
|
; AVX2-NEXT: movq %rax, %rcx
|
|
|
|
; AVX2-NEXT: shlq $62, %rcx
|
|
|
|
; AVX2-NEXT: sarq $63, %rcx
|
|
|
|
; AVX2-NEXT: movq %rax, %rdx
|
|
|
|
; AVX2-NEXT: shlq $63, %rdx
|
|
|
|
; AVX2-NEXT: sarq $63, %rdx
|
|
|
|
; AVX2-NEXT: vmovd %edx, %xmm1
|
|
|
|
; AVX2-NEXT: vpinsrd $1, %ecx, %xmm1, %xmm1
|
|
|
|
; AVX2-NEXT: movq %rax, %rcx
|
|
|
|
; AVX2-NEXT: shlq $61, %rcx
|
|
|
|
; AVX2-NEXT: sarq $63, %rcx
|
|
|
|
; AVX2-NEXT: vpinsrd $2, %ecx, %xmm1, %xmm1
|
|
|
|
; AVX2-NEXT: shlq $60, %rax
|
|
|
|
; AVX2-NEXT: sarq $63, %rax
|
|
|
|
; AVX2-NEXT: vpinsrd $3, %eax, %xmm1, %xmm1
|
|
|
|
; AVX2-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0
|
|
|
|
; AVX2-NEXT: retq
|
|
|
|
;
|
|
|
|
; X32-SSE41-LABEL: load_sext_8i1_to_8i32:
|
|
|
|
; X32-SSE41: # BB#0: # %entry
|
|
|
|
; X32-SSE41-NEXT: movl {{[0-9]+}}(%esp), %eax
|
|
|
|
; X32-SSE41-NEXT: movzbl (%eax), %eax
|
|
|
|
; X32-SSE41-NEXT: movl %eax, %ecx
|
|
|
|
; X32-SSE41-NEXT: shrl %ecx
|
|
|
|
; X32-SSE41-NEXT: andl $1, %ecx
|
|
|
|
; X32-SSE41-NEXT: movl %eax, %edx
|
|
|
|
; X32-SSE41-NEXT: andl $1, %edx
|
|
|
|
; X32-SSE41-NEXT: movd %edx, %xmm1
|
|
|
|
; X32-SSE41-NEXT: pinsrw $1, %ecx, %xmm1
|
|
|
|
; X32-SSE41-NEXT: movl %eax, %ecx
|
|
|
|
; X32-SSE41-NEXT: shrl $2, %ecx
|
|
|
|
; X32-SSE41-NEXT: andl $1, %ecx
|
|
|
|
; X32-SSE41-NEXT: pinsrw $2, %ecx, %xmm1
|
|
|
|
; X32-SSE41-NEXT: movl %eax, %ecx
|
|
|
|
; X32-SSE41-NEXT: shrl $3, %ecx
|
|
|
|
; X32-SSE41-NEXT: andl $1, %ecx
|
|
|
|
; X32-SSE41-NEXT: pinsrw $3, %ecx, %xmm1
|
|
|
|
; X32-SSE41-NEXT: movl %eax, %ecx
|
|
|
|
; X32-SSE41-NEXT: shrl $4, %ecx
|
|
|
|
; X32-SSE41-NEXT: andl $1, %ecx
|
|
|
|
; X32-SSE41-NEXT: pinsrw $4, %ecx, %xmm1
|
|
|
|
; X32-SSE41-NEXT: movl %eax, %ecx
|
|
|
|
; X32-SSE41-NEXT: shrl $5, %ecx
|
|
|
|
; X32-SSE41-NEXT: andl $1, %ecx
|
|
|
|
; X32-SSE41-NEXT: pinsrw $5, %ecx, %xmm1
|
|
|
|
; X32-SSE41-NEXT: movl %eax, %ecx
|
|
|
|
; X32-SSE41-NEXT: shrl $6, %ecx
|
|
|
|
; X32-SSE41-NEXT: andl $1, %ecx
|
|
|
|
; X32-SSE41-NEXT: pinsrw $6, %ecx, %xmm1
|
|
|
|
; X32-SSE41-NEXT: shrl $7, %eax
|
|
|
|
; X32-SSE41-NEXT: pinsrw $7, %eax, %xmm1
|
|
|
|
; X32-SSE41-NEXT: pmovzxwd {{.*#+}} xmm0 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero
|
|
|
|
; X32-SSE41-NEXT: pslld $31, %xmm0
|
|
|
|
; X32-SSE41-NEXT: psrad $31, %xmm0
|
2015-10-25 06:45:04 +08:00
|
|
|
; X32-SSE41-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
|
2015-09-12 23:36:41 +08:00
|
|
|
; X32-SSE41-NEXT: pslld $31, %xmm1
|
|
|
|
; X32-SSE41-NEXT: psrad $31, %xmm1
|
|
|
|
; X32-SSE41-NEXT: retl
|
|
|
|
entry:
|
|
|
|
%X = load <8 x i1>, <8 x i1>* %ptr
|
|
|
|
%Y = sext <8 x i1> %X to <8 x i32>
|
|
|
|
ret <8 x i32> %Y
|
|
|
|
}
|
|
|
|
|
2015-07-25 22:07:20 +08:00
|
|
|
define <8 x i32> @load_sext_8i8_to_8i32(<8 x i8> *%ptr) {
|
|
|
|
; SSE2-LABEL: load_sext_8i8_to_8i32:
|
2014-10-02 14:52:19 +08:00
|
|
|
; SSE2: # BB#0: # %entry
|
2015-07-13 01:40:49 +08:00
|
|
|
; SSE2-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
|
[x86] Enable the new vector shuffle lowering by default.
Update the entire regression test suite for the new shuffles. Remove
most of the old testing which was devoted to the old shuffle lowering
path and is no longer relevant really. Also remove a few other random
tests that only really exercised shuffles and only incidently or without
any interesting aspects to them.
Benchmarking that I have done shows a few small regressions with this on
LNT, zero measurable regressions on real, large applications, and for
several benchmarks where the loop vectorizer fires in the hot path it
shows 5% to 40% improvements for SSE2 and SSE3 code running on Sandy
Bridge machines. Running on AMD machines shows even more dramatic
improvements.
When using newer ISA vector extensions the gains are much more modest,
but the code is still better on the whole. There are a few regressions
being tracked (PR21137, PR21138, PR21139) but by and large this is
expected to be a win for x86 generated code performance.
It is also more correct than the code it replaces. I have fuzz tested
this extensively with ISA extensions up through AVX2 and found no
crashes or miscompiles (yet...). The old lowering had a few miscompiles
and crashers after a somewhat smaller amount of fuzz testing.
There is one significant area where the new code path lags behind and
that is in AVX-512 support. However, there was *extremely little*
support for that already and so this isn't a significant step backwards
and the new framework will probably make it easier to implement lowering
that uses the full power of AVX-512's table-based shuffle+blend (IMO).
Many thanks to Quentin, Andrea, Robert, and others for benchmarking
assistance. Thanks to Adam and others for help with AVX-512. Thanks to
Hal, Eric, and *many* others for answering my incessant questions about
how the backend actually works. =]
I will leave the old code path in the tree until the 3 PRs above are at
least resolved to folks' satisfaction. Then I will rip it (and 1000s of
lines of code) out. =] I don't expect this flag to stay around for very
long. It may not survive next week.
llvm-svn: 219046
2014-10-04 11:52:55 +08:00
|
|
|
; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
|
|
|
|
; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3]
|
2014-10-02 04:19:32 +08:00
|
|
|
; SSE2-NEXT: psrad $24, %xmm0
|
2015-07-25 22:07:20 +08:00
|
|
|
; SSE2-NEXT: movd {{.*#+}} xmm1 = mem[0],zero,zero,zero
|
|
|
|
; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
|
|
|
|
; SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3]
|
|
|
|
; SSE2-NEXT: psrad $24, %xmm1
|
2014-10-02 04:19:32 +08:00
|
|
|
; SSE2-NEXT: retq
|
2014-10-02 04:32:44 +08:00
|
|
|
;
|
2015-07-25 22:07:20 +08:00
|
|
|
; SSSE3-LABEL: load_sext_8i8_to_8i32:
|
2014-10-02 14:52:19 +08:00
|
|
|
; SSSE3: # BB#0: # %entry
|
2015-07-13 01:40:49 +08:00
|
|
|
; SSSE3-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
|
[x86] Enable the new vector shuffle lowering by default.
Update the entire regression test suite for the new shuffles. Remove
most of the old testing which was devoted to the old shuffle lowering
path and is no longer relevant really. Also remove a few other random
tests that only really exercised shuffles and only incidently or without
any interesting aspects to them.
Benchmarking that I have done shows a few small regressions with this on
LNT, zero measurable regressions on real, large applications, and for
several benchmarks where the loop vectorizer fires in the hot path it
shows 5% to 40% improvements for SSE2 and SSE3 code running on Sandy
Bridge machines. Running on AMD machines shows even more dramatic
improvements.
When using newer ISA vector extensions the gains are much more modest,
but the code is still better on the whole. There are a few regressions
being tracked (PR21137, PR21138, PR21139) but by and large this is
expected to be a win for x86 generated code performance.
It is also more correct than the code it replaces. I have fuzz tested
this extensively with ISA extensions up through AVX2 and found no
crashes or miscompiles (yet...). The old lowering had a few miscompiles
and crashers after a somewhat smaller amount of fuzz testing.
There is one significant area where the new code path lags behind and
that is in AVX-512 support. However, there was *extremely little*
support for that already and so this isn't a significant step backwards
and the new framework will probably make it easier to implement lowering
that uses the full power of AVX-512's table-based shuffle+blend (IMO).
Many thanks to Quentin, Andrea, Robert, and others for benchmarking
assistance. Thanks to Adam and others for help with AVX-512. Thanks to
Hal, Eric, and *many* others for answering my incessant questions about
how the backend actually works. =]
I will leave the old code path in the tree until the 3 PRs above are at
least resolved to folks' satisfaction. Then I will rip it (and 1000s of
lines of code) out. =] I don't expect this flag to stay around for very
long. It may not survive next week.
llvm-svn: 219046
2014-10-04 11:52:55 +08:00
|
|
|
; SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
|
|
|
|
; SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3]
|
2014-10-02 04:32:44 +08:00
|
|
|
; SSSE3-NEXT: psrad $24, %xmm0
|
2015-07-25 22:07:20 +08:00
|
|
|
; SSSE3-NEXT: movd {{.*#+}} xmm1 = mem[0],zero,zero,zero
|
|
|
|
; SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
|
|
|
|
; SSSE3-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3]
|
|
|
|
; SSSE3-NEXT: psrad $24, %xmm1
|
2014-10-02 04:32:44 +08:00
|
|
|
; SSSE3-NEXT: retq
|
|
|
|
;
|
2015-07-25 22:07:20 +08:00
|
|
|
; SSE41-LABEL: load_sext_8i8_to_8i32:
|
2014-10-02 14:52:19 +08:00
|
|
|
; SSE41: # BB#0: # %entry
|
2014-10-02 04:41:36 +08:00
|
|
|
; SSE41-NEXT: pmovsxbd (%rdi), %xmm0
|
2015-07-25 22:07:20 +08:00
|
|
|
; SSE41-NEXT: pmovsxbd 4(%rdi), %xmm1
|
2014-10-02 04:41:36 +08:00
|
|
|
; SSE41-NEXT: retq
|
|
|
|
;
|
2015-07-25 22:07:20 +08:00
|
|
|
; AVX1-LABEL: load_sext_8i8_to_8i32:
|
|
|
|
; AVX1: # BB#0: # %entry
|
|
|
|
; AVX1-NEXT: vpmovsxbw (%rdi), %xmm0
|
|
|
|
; AVX1-NEXT: vpmovsxwd %xmm0, %xmm1
|
|
|
|
; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
|
|
|
|
; AVX1-NEXT: vpmovsxwd %xmm0, %xmm0
|
|
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
|
|
|
|
; AVX1-NEXT: retq
|
2014-10-02 04:49:54 +08:00
|
|
|
;
|
2015-07-25 22:07:20 +08:00
|
|
|
; AVX2-LABEL: load_sext_8i8_to_8i32:
|
|
|
|
; AVX2: # BB#0: # %entry
|
|
|
|
; AVX2-NEXT: vpmovsxbd (%rdi), %ymm0
|
|
|
|
; AVX2-NEXT: retq
|
|
|
|
;
|
|
|
|
; X32-SSE41-LABEL: load_sext_8i8_to_8i32:
|
2014-10-02 14:52:19 +08:00
|
|
|
; X32-SSE41: # BB#0: # %entry
|
2014-10-02 04:49:54 +08:00
|
|
|
; X32-SSE41-NEXT: movl {{[0-9]+}}(%esp), %eax
|
|
|
|
; X32-SSE41-NEXT: pmovsxbd (%eax), %xmm0
|
2015-07-25 22:07:20 +08:00
|
|
|
; X32-SSE41-NEXT: pmovsxbd 4(%eax), %xmm1
|
2014-10-02 04:49:54 +08:00
|
|
|
; X32-SSE41-NEXT: retl
|
2014-10-02 14:52:19 +08:00
|
|
|
entry:
|
2015-07-25 22:07:20 +08:00
|
|
|
%X = load <8 x i8>, <8 x i8>* %ptr
|
|
|
|
%Y = sext <8 x i8> %X to <8 x i32>
|
|
|
|
ret <8 x i32> %Y
|
2012-12-19 15:50:20 +08:00
|
|
|
}
|
|
|
|
|
[X86] Improve shift combining
This folds (ashr (shl a, [56,48,32,24,16]), SarConst)
into (shl, (sext (a), [56,48,32,24,16] - SarConst))
or into (lshr, (sext (a), SarConst - [56,48,32,24,16]))
depending on sign of (SarConst - [56,48,32,24,16])
sexts in X86 are MOVs.
The MOVs have the same code size as above SHIFTs (only SHIFT by 1 has lower code size).
However the MOVs have 2 advantages to SHIFTs on x86:
1. MOVs can write to a register that differs from source.
2. MOVs accept memory operands.
This fixes PR24373.
Patch by: evgeny.v.stupachenko@intel.com
Differential Revision: http://reviews.llvm.org/D13161
llvm-svn: 255761
2015-12-16 19:22:37 +08:00
|
|
|
define <16 x i8> @load_sext_16i1_to_16i8(<16 x i1> *%ptr) nounwind readnone {
|
2015-10-30 06:19:21 +08:00
|
|
|
; SSE2-LABEL: load_sext_16i1_to_16i8:
|
2015-09-12 23:36:41 +08:00
|
|
|
; SSE2: # BB#0: # %entry
|
[X86] Improve shift combining
This folds (ashr (shl a, [56,48,32,24,16]), SarConst)
into (shl, (sext (a), [56,48,32,24,16] - SarConst))
or into (lshr, (sext (a), SarConst - [56,48,32,24,16]))
depending on sign of (SarConst - [56,48,32,24,16])
sexts in X86 are MOVs.
The MOVs have the same code size as above SHIFTs (only SHIFT by 1 has lower code size).
However the MOVs have 2 advantages to SHIFTs on x86:
1. MOVs can write to a register that differs from source.
2. MOVs accept memory operands.
This fixes PR24373.
Patch by: evgeny.v.stupachenko@intel.com
Differential Revision: http://reviews.llvm.org/D13161
llvm-svn: 255761
2015-12-16 19:22:37 +08:00
|
|
|
; SSE2-NEXT: pushq %rbp
|
|
|
|
; SSE2-NEXT: pushq %r15
|
|
|
|
; SSE2-NEXT: pushq %r14
|
|
|
|
; SSE2-NEXT: pushq %r13
|
|
|
|
; SSE2-NEXT: pushq %r12
|
|
|
|
; SSE2-NEXT: pushq %rbx
|
|
|
|
; SSE2-NEXT: movswq (%rdi), %rax
|
|
|
|
; SSE2-NEXT: movq %rax, %r8
|
|
|
|
; SSE2-NEXT: movq %rax, %r9
|
|
|
|
; SSE2-NEXT: movq %rax, %r10
|
|
|
|
; SSE2-NEXT: movq %rax, %r11
|
|
|
|
; SSE2-NEXT: movq %rax, %r14
|
|
|
|
; SSE2-NEXT: movq %rax, %r15
|
|
|
|
; SSE2-NEXT: movq %rax, %r12
|
|
|
|
; SSE2-NEXT: movq %rax, %r13
|
|
|
|
; SSE2-NEXT: movq %rax, %rbx
|
2015-10-30 06:19:21 +08:00
|
|
|
; SSE2-NEXT: movq %rax, %rcx
|
[X86] Improve shift combining
This folds (ashr (shl a, [56,48,32,24,16]), SarConst)
into (shl, (sext (a), [56,48,32,24,16] - SarConst))
or into (lshr, (sext (a), SarConst - [56,48,32,24,16]))
depending on sign of (SarConst - [56,48,32,24,16])
sexts in X86 are MOVs.
The MOVs have the same code size as above SHIFTs (only SHIFT by 1 has lower code size).
However the MOVs have 2 advantages to SHIFTs on x86:
1. MOVs can write to a register that differs from source.
2. MOVs accept memory operands.
This fixes PR24373.
Patch by: evgeny.v.stupachenko@intel.com
Differential Revision: http://reviews.llvm.org/D13161
llvm-svn: 255761
2015-12-16 19:22:37 +08:00
|
|
|
; SSE2-NEXT: movq %rax, %rdx
|
|
|
|
; SSE2-NEXT: movq %rax, %rsi
|
|
|
|
; SSE2-NEXT: movq %rax, %rdi
|
|
|
|
; SSE2-NEXT: movq %rax, %rbp
|
|
|
|
; SSE2-NEXT: shlq $49, %rbp
|
|
|
|
; SSE2-NEXT: sarq $63, %rbp
|
|
|
|
; SSE2-NEXT: movd %ebp, %xmm0
|
|
|
|
; SSE2-NEXT: movq %rax, %rbp
|
|
|
|
; SSE2-NEXT: movsbq %al, %rax
|
|
|
|
; SSE2-NEXT: shlq $57, %r8
|
|
|
|
; SSE2-NEXT: sarq $63, %r8
|
|
|
|
; SSE2-NEXT: movd %r8d, %xmm1
|
|
|
|
; SSE2-NEXT: shlq $53, %r9
|
|
|
|
; SSE2-NEXT: sarq $63, %r9
|
|
|
|
; SSE2-NEXT: movd %r9d, %xmm2
|
|
|
|
; SSE2-NEXT: shlq $61, %r10
|
|
|
|
; SSE2-NEXT: sarq $63, %r10
|
|
|
|
; SSE2-NEXT: movd %r10d, %xmm3
|
|
|
|
; SSE2-NEXT: shlq $51, %r11
|
|
|
|
; SSE2-NEXT: sarq $63, %r11
|
|
|
|
; SSE2-NEXT: movd %r11d, %xmm4
|
2015-09-12 23:36:41 +08:00
|
|
|
; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
|
[X86] Improve shift combining
This folds (ashr (shl a, [56,48,32,24,16]), SarConst)
into (shl, (sext (a), [56,48,32,24,16] - SarConst))
or into (lshr, (sext (a), SarConst - [56,48,32,24,16]))
depending on sign of (SarConst - [56,48,32,24,16])
sexts in X86 are MOVs.
The MOVs have the same code size as above SHIFTs (only SHIFT by 1 has lower code size).
However the MOVs have 2 advantages to SHIFTs on x86:
1. MOVs can write to a register that differs from source.
2. MOVs accept memory operands.
This fixes PR24373.
Patch by: evgeny.v.stupachenko@intel.com
Differential Revision: http://reviews.llvm.org/D13161
llvm-svn: 255761
2015-12-16 19:22:37 +08:00
|
|
|
; SSE2-NEXT: shlq $59, %r14
|
|
|
|
; SSE2-NEXT: sarq $63, %r14
|
|
|
|
; SSE2-NEXT: movd %r14d, %xmm5
|
|
|
|
; SSE2-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3],xmm3[4],xmm2[4],xmm3[5],xmm2[5],xmm3[6],xmm2[6],xmm3[7],xmm2[7]
|
|
|
|
; SSE2-NEXT: shlq $55, %r15
|
|
|
|
; SSE2-NEXT: sarq $63, %r15
|
|
|
|
; SSE2-NEXT: movd %r15d, %xmm2
|
|
|
|
; SSE2-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1],xmm3[2],xmm1[2],xmm3[3],xmm1[3],xmm3[4],xmm1[4],xmm3[5],xmm1[5],xmm3[6],xmm1[6],xmm3[7],xmm1[7]
|
|
|
|
; SSE2-NEXT: shlq $63, %r12
|
|
|
|
; SSE2-NEXT: sarq $63, %r12
|
|
|
|
; SSE2-NEXT: movd %r12d, %xmm0
|
|
|
|
; SSE2-NEXT: punpcklbw {{.*#+}} xmm5 = xmm5[0],xmm4[0],xmm5[1],xmm4[1],xmm5[2],xmm4[2],xmm5[3],xmm4[3],xmm5[4],xmm4[4],xmm5[5],xmm4[5],xmm5[6],xmm4[6],xmm5[7],xmm4[7]
|
|
|
|
; SSE2-NEXT: shlq $50, %r13
|
|
|
|
; SSE2-NEXT: sarq $63, %r13
|
|
|
|
; SSE2-NEXT: movd %r13d, %xmm1
|
2015-10-30 06:19:21 +08:00
|
|
|
; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7]
|
[X86] Improve shift combining
This folds (ashr (shl a, [56,48,32,24,16]), SarConst)
into (shl, (sext (a), [56,48,32,24,16] - SarConst))
or into (lshr, (sext (a), SarConst - [56,48,32,24,16]))
depending on sign of (SarConst - [56,48,32,24,16])
sexts in X86 are MOVs.
The MOVs have the same code size as above SHIFTs (only SHIFT by 1 has lower code size).
However the MOVs have 2 advantages to SHIFTs on x86:
1. MOVs can write to a register that differs from source.
2. MOVs accept memory operands.
This fixes PR24373.
Patch by: evgeny.v.stupachenko@intel.com
Differential Revision: http://reviews.llvm.org/D13161
llvm-svn: 255761
2015-12-16 19:22:37 +08:00
|
|
|
; SSE2-NEXT: shlq $58, %rbx
|
|
|
|
; SSE2-NEXT: sarq $63, %rbx
|
|
|
|
; SSE2-NEXT: movd %ebx, %xmm2
|
|
|
|
; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm5[0],xmm0[1],xmm5[1],xmm0[2],xmm5[2],xmm0[3],xmm5[3],xmm0[4],xmm5[4],xmm0[5],xmm5[5],xmm0[6],xmm5[6],xmm0[7],xmm5[7]
|
2015-10-30 06:19:21 +08:00
|
|
|
; SSE2-NEXT: shlq $54, %rcx
|
|
|
|
; SSE2-NEXT: sarq $63, %rcx
|
|
|
|
; SSE2-NEXT: movd %ecx, %xmm4
|
2015-09-12 23:36:41 +08:00
|
|
|
; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3],xmm0[4],xmm3[4],xmm0[5],xmm3[5],xmm0[6],xmm3[6],xmm0[7],xmm3[7]
|
[X86] Improve shift combining
This folds (ashr (shl a, [56,48,32,24,16]), SarConst)
into (shl, (sext (a), [56,48,32,24,16] - SarConst))
or into (lshr, (sext (a), SarConst - [56,48,32,24,16]))
depending on sign of (SarConst - [56,48,32,24,16])
sexts in X86 are MOVs.
The MOVs have the same code size as above SHIFTs (only SHIFT by 1 has lower code size).
However the MOVs have 2 advantages to SHIFTs on x86:
1. MOVs can write to a register that differs from source.
2. MOVs accept memory operands.
This fixes PR24373.
Patch by: evgeny.v.stupachenko@intel.com
Differential Revision: http://reviews.llvm.org/D13161
llvm-svn: 255761
2015-12-16 19:22:37 +08:00
|
|
|
; SSE2-NEXT: shlq $62, %rdx
|
|
|
|
; SSE2-NEXT: sarq $63, %rdx
|
|
|
|
; SSE2-NEXT: movd %edx, %xmm3
|
|
|
|
; SSE2-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3],xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7]
|
|
|
|
; SSE2-NEXT: shlq $52, %rsi
|
|
|
|
; SSE2-NEXT: sarq $63, %rsi
|
|
|
|
; SSE2-NEXT: movd %esi, %xmm1
|
|
|
|
; SSE2-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm4[0],xmm3[1],xmm4[1],xmm3[2],xmm4[2],xmm3[3],xmm4[3],xmm3[4],xmm4[4],xmm3[5],xmm4[5],xmm3[6],xmm4[6],xmm3[7],xmm4[7]
|
|
|
|
; SSE2-NEXT: shlq $60, %rdi
|
|
|
|
; SSE2-NEXT: sarq $63, %rdi
|
|
|
|
; SSE2-NEXT: movd %edi, %xmm4
|
|
|
|
; SSE2-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3],xmm3[4],xmm2[4],xmm3[5],xmm2[5],xmm3[6],xmm2[6],xmm3[7],xmm2[7]
|
|
|
|
; SSE2-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm1[0],xmm4[1],xmm1[1],xmm4[2],xmm1[2],xmm4[3],xmm1[3],xmm4[4],xmm1[4],xmm4[5],xmm1[5],xmm4[6],xmm1[6],xmm4[7],xmm1[7]
|
|
|
|
; SSE2-NEXT: shrq $15, %rbp
|
|
|
|
; SSE2-NEXT: movd %ebp, %xmm1
|
|
|
|
; SSE2-NEXT: shrq $7, %rax
|
|
|
|
; SSE2-NEXT: movd %eax, %xmm2
|
|
|
|
; SSE2-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3],xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7]
|
|
|
|
; SSE2-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm2[0],xmm4[1],xmm2[1],xmm4[2],xmm2[2],xmm4[3],xmm2[3],xmm4[4],xmm2[4],xmm4[5],xmm2[5],xmm4[6],xmm2[6],xmm4[7],xmm2[7]
|
|
|
|
; SSE2-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm4[0],xmm3[1],xmm4[1],xmm3[2],xmm4[2],xmm3[3],xmm4[3],xmm3[4],xmm4[4],xmm3[5],xmm4[5],xmm3[6],xmm4[6],xmm3[7],xmm4[7]
|
|
|
|
; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3],xmm0[4],xmm3[4],xmm0[5],xmm3[5],xmm0[6],xmm3[6],xmm0[7],xmm3[7]
|
|
|
|
; SSE2-NEXT: popq %rbx
|
|
|
|
; SSE2-NEXT: popq %r12
|
|
|
|
; SSE2-NEXT: popq %r13
|
|
|
|
; SSE2-NEXT: popq %r14
|
|
|
|
; SSE2-NEXT: popq %r15
|
|
|
|
; SSE2-NEXT: popq %rbp
|
2015-09-12 23:36:41 +08:00
|
|
|
; SSE2-NEXT: retq
|
|
|
|
;
|
2015-10-30 06:19:21 +08:00
|
|
|
; SSSE3-LABEL: load_sext_16i1_to_16i8:
|
2015-09-12 23:36:41 +08:00
|
|
|
; SSSE3: # BB#0: # %entry
|
[X86] Improve shift combining
This folds (ashr (shl a, [56,48,32,24,16]), SarConst)
into (shl, (sext (a), [56,48,32,24,16] - SarConst))
or into (lshr, (sext (a), SarConst - [56,48,32,24,16]))
depending on sign of (SarConst - [56,48,32,24,16])
sexts in X86 are MOVs.
The MOVs have the same code size as above SHIFTs (only SHIFT by 1 has lower code size).
However the MOVs have 2 advantages to SHIFTs on x86:
1. MOVs can write to a register that differs from source.
2. MOVs accept memory operands.
This fixes PR24373.
Patch by: evgeny.v.stupachenko@intel.com
Differential Revision: http://reviews.llvm.org/D13161
llvm-svn: 255761
2015-12-16 19:22:37 +08:00
|
|
|
; SSSE3-NEXT: pushq %rbp
|
|
|
|
; SSSE3-NEXT: pushq %r15
|
|
|
|
; SSSE3-NEXT: pushq %r14
|
|
|
|
; SSSE3-NEXT: pushq %r13
|
|
|
|
; SSSE3-NEXT: pushq %r12
|
|
|
|
; SSSE3-NEXT: pushq %rbx
|
|
|
|
; SSSE3-NEXT: movswq (%rdi), %rax
|
|
|
|
; SSSE3-NEXT: movq %rax, %r8
|
|
|
|
; SSSE3-NEXT: movq %rax, %r9
|
|
|
|
; SSSE3-NEXT: movq %rax, %r10
|
|
|
|
; SSSE3-NEXT: movq %rax, %r11
|
|
|
|
; SSSE3-NEXT: movq %rax, %r14
|
|
|
|
; SSSE3-NEXT: movq %rax, %r15
|
|
|
|
; SSSE3-NEXT: movq %rax, %r12
|
|
|
|
; SSSE3-NEXT: movq %rax, %r13
|
|
|
|
; SSSE3-NEXT: movq %rax, %rbx
|
2015-10-30 06:19:21 +08:00
|
|
|
; SSSE3-NEXT: movq %rax, %rcx
|
[X86] Improve shift combining
This folds (ashr (shl a, [56,48,32,24,16]), SarConst)
into (shl, (sext (a), [56,48,32,24,16] - SarConst))
or into (lshr, (sext (a), SarConst - [56,48,32,24,16]))
depending on sign of (SarConst - [56,48,32,24,16])
sexts in X86 are MOVs.
The MOVs have the same code size as above SHIFTs (only SHIFT by 1 has lower code size).
However the MOVs have 2 advantages to SHIFTs on x86:
1. MOVs can write to a register that differs from source.
2. MOVs accept memory operands.
This fixes PR24373.
Patch by: evgeny.v.stupachenko@intel.com
Differential Revision: http://reviews.llvm.org/D13161
llvm-svn: 255761
2015-12-16 19:22:37 +08:00
|
|
|
; SSSE3-NEXT: movq %rax, %rdx
|
|
|
|
; SSSE3-NEXT: movq %rax, %rsi
|
|
|
|
; SSSE3-NEXT: movq %rax, %rdi
|
|
|
|
; SSSE3-NEXT: movq %rax, %rbp
|
|
|
|
; SSSE3-NEXT: shlq $49, %rbp
|
|
|
|
; SSSE3-NEXT: sarq $63, %rbp
|
|
|
|
; SSSE3-NEXT: movd %ebp, %xmm0
|
|
|
|
; SSSE3-NEXT: movq %rax, %rbp
|
|
|
|
; SSSE3-NEXT: movsbq %al, %rax
|
|
|
|
; SSSE3-NEXT: shlq $57, %r8
|
|
|
|
; SSSE3-NEXT: sarq $63, %r8
|
|
|
|
; SSSE3-NEXT: movd %r8d, %xmm1
|
|
|
|
; SSSE3-NEXT: shlq $53, %r9
|
|
|
|
; SSSE3-NEXT: sarq $63, %r9
|
|
|
|
; SSSE3-NEXT: movd %r9d, %xmm2
|
|
|
|
; SSSE3-NEXT: shlq $61, %r10
|
|
|
|
; SSSE3-NEXT: sarq $63, %r10
|
|
|
|
; SSSE3-NEXT: movd %r10d, %xmm3
|
|
|
|
; SSSE3-NEXT: shlq $51, %r11
|
|
|
|
; SSSE3-NEXT: sarq $63, %r11
|
|
|
|
; SSSE3-NEXT: movd %r11d, %xmm4
|
2015-09-12 23:36:41 +08:00
|
|
|
; SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
|
[X86] Improve shift combining
This folds (ashr (shl a, [56,48,32,24,16]), SarConst)
into (shl, (sext (a), [56,48,32,24,16] - SarConst))
or into (lshr, (sext (a), SarConst - [56,48,32,24,16]))
depending on sign of (SarConst - [56,48,32,24,16])
sexts in X86 are MOVs.
The MOVs have the same code size as above SHIFTs (only SHIFT by 1 has lower code size).
However the MOVs have 2 advantages to SHIFTs on x86:
1. MOVs can write to a register that differs from source.
2. MOVs accept memory operands.
This fixes PR24373.
Patch by: evgeny.v.stupachenko@intel.com
Differential Revision: http://reviews.llvm.org/D13161
llvm-svn: 255761
2015-12-16 19:22:37 +08:00
|
|
|
; SSSE3-NEXT: shlq $59, %r14
|
|
|
|
; SSSE3-NEXT: sarq $63, %r14
|
|
|
|
; SSSE3-NEXT: movd %r14d, %xmm5
|
|
|
|
; SSSE3-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3],xmm3[4],xmm2[4],xmm3[5],xmm2[5],xmm3[6],xmm2[6],xmm3[7],xmm2[7]
|
|
|
|
; SSSE3-NEXT: shlq $55, %r15
|
|
|
|
; SSSE3-NEXT: sarq $63, %r15
|
|
|
|
; SSSE3-NEXT: movd %r15d, %xmm2
|
|
|
|
; SSSE3-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1],xmm3[2],xmm1[2],xmm3[3],xmm1[3],xmm3[4],xmm1[4],xmm3[5],xmm1[5],xmm3[6],xmm1[6],xmm3[7],xmm1[7]
|
|
|
|
; SSSE3-NEXT: shlq $63, %r12
|
|
|
|
; SSSE3-NEXT: sarq $63, %r12
|
|
|
|
; SSSE3-NEXT: movd %r12d, %xmm0
|
|
|
|
; SSSE3-NEXT: punpcklbw {{.*#+}} xmm5 = xmm5[0],xmm4[0],xmm5[1],xmm4[1],xmm5[2],xmm4[2],xmm5[3],xmm4[3],xmm5[4],xmm4[4],xmm5[5],xmm4[5],xmm5[6],xmm4[6],xmm5[7],xmm4[7]
|
|
|
|
; SSSE3-NEXT: shlq $50, %r13
|
|
|
|
; SSSE3-NEXT: sarq $63, %r13
|
|
|
|
; SSSE3-NEXT: movd %r13d, %xmm1
|
2015-10-30 06:19:21 +08:00
|
|
|
; SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7]
|
[X86] Improve shift combining
This folds (ashr (shl a, [56,48,32,24,16]), SarConst)
into (shl, (sext (a), [56,48,32,24,16] - SarConst))
or into (lshr, (sext (a), SarConst - [56,48,32,24,16]))
depending on sign of (SarConst - [56,48,32,24,16])
sexts in X86 are MOVs.
The MOVs have the same code size as above SHIFTs (only SHIFT by 1 has lower code size).
However the MOVs have 2 advantages to SHIFTs on x86:
1. MOVs can write to a register that differs from source.
2. MOVs accept memory operands.
This fixes PR24373.
Patch by: evgeny.v.stupachenko@intel.com
Differential Revision: http://reviews.llvm.org/D13161
llvm-svn: 255761
2015-12-16 19:22:37 +08:00
|
|
|
; SSSE3-NEXT: shlq $58, %rbx
|
|
|
|
; SSSE3-NEXT: sarq $63, %rbx
|
|
|
|
; SSSE3-NEXT: movd %ebx, %xmm2
|
|
|
|
; SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm5[0],xmm0[1],xmm5[1],xmm0[2],xmm5[2],xmm0[3],xmm5[3],xmm0[4],xmm5[4],xmm0[5],xmm5[5],xmm0[6],xmm5[6],xmm0[7],xmm5[7]
|
2015-10-30 06:19:21 +08:00
|
|
|
; SSSE3-NEXT: shlq $54, %rcx
|
|
|
|
; SSSE3-NEXT: sarq $63, %rcx
|
|
|
|
; SSSE3-NEXT: movd %ecx, %xmm4
|
2015-09-12 23:36:41 +08:00
|
|
|
; SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3],xmm0[4],xmm3[4],xmm0[5],xmm3[5],xmm0[6],xmm3[6],xmm0[7],xmm3[7]
|
[X86] Improve shift combining
This folds (ashr (shl a, [56,48,32,24,16]), SarConst)
into (shl, (sext (a), [56,48,32,24,16] - SarConst))
or into (lshr, (sext (a), SarConst - [56,48,32,24,16]))
depending on sign of (SarConst - [56,48,32,24,16])
sexts in X86 are MOVs.
The MOVs have the same code size as above SHIFTs (only SHIFT by 1 has lower code size).
However the MOVs have 2 advantages to SHIFTs on x86:
1. MOVs can write to a register that differs from source.
2. MOVs accept memory operands.
This fixes PR24373.
Patch by: evgeny.v.stupachenko@intel.com
Differential Revision: http://reviews.llvm.org/D13161
llvm-svn: 255761
2015-12-16 19:22:37 +08:00
|
|
|
; SSSE3-NEXT: shlq $62, %rdx
|
|
|
|
; SSSE3-NEXT: sarq $63, %rdx
|
|
|
|
; SSSE3-NEXT: movd %edx, %xmm3
|
|
|
|
; SSSE3-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3],xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7]
|
|
|
|
; SSSE3-NEXT: shlq $52, %rsi
|
|
|
|
; SSSE3-NEXT: sarq $63, %rsi
|
|
|
|
; SSSE3-NEXT: movd %esi, %xmm1
|
|
|
|
; SSSE3-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm4[0],xmm3[1],xmm4[1],xmm3[2],xmm4[2],xmm3[3],xmm4[3],xmm3[4],xmm4[4],xmm3[5],xmm4[5],xmm3[6],xmm4[6],xmm3[7],xmm4[7]
|
|
|
|
; SSSE3-NEXT: shlq $60, %rdi
|
|
|
|
; SSSE3-NEXT: sarq $63, %rdi
|
|
|
|
; SSSE3-NEXT: movd %edi, %xmm4
|
|
|
|
; SSSE3-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3],xmm3[4],xmm2[4],xmm3[5],xmm2[5],xmm3[6],xmm2[6],xmm3[7],xmm2[7]
|
|
|
|
; SSSE3-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm1[0],xmm4[1],xmm1[1],xmm4[2],xmm1[2],xmm4[3],xmm1[3],xmm4[4],xmm1[4],xmm4[5],xmm1[5],xmm4[6],xmm1[6],xmm4[7],xmm1[7]
|
|
|
|
; SSSE3-NEXT: shrq $15, %rbp
|
|
|
|
; SSSE3-NEXT: movd %ebp, %xmm1
|
|
|
|
; SSSE3-NEXT: shrq $7, %rax
|
|
|
|
; SSSE3-NEXT: movd %eax, %xmm2
|
|
|
|
; SSSE3-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3],xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7]
|
|
|
|
; SSSE3-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm2[0],xmm4[1],xmm2[1],xmm4[2],xmm2[2],xmm4[3],xmm2[3],xmm4[4],xmm2[4],xmm4[5],xmm2[5],xmm4[6],xmm2[6],xmm4[7],xmm2[7]
|
|
|
|
; SSSE3-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm4[0],xmm3[1],xmm4[1],xmm3[2],xmm4[2],xmm3[3],xmm4[3],xmm3[4],xmm4[4],xmm3[5],xmm4[5],xmm3[6],xmm4[6],xmm3[7],xmm4[7]
|
|
|
|
; SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3],xmm0[4],xmm3[4],xmm0[5],xmm3[5],xmm0[6],xmm3[6],xmm0[7],xmm3[7]
|
|
|
|
; SSSE3-NEXT: popq %rbx
|
|
|
|
; SSSE3-NEXT: popq %r12
|
|
|
|
; SSSE3-NEXT: popq %r13
|
|
|
|
; SSSE3-NEXT: popq %r14
|
|
|
|
; SSSE3-NEXT: popq %r15
|
|
|
|
; SSSE3-NEXT: popq %rbp
|
2015-09-12 23:36:41 +08:00
|
|
|
; SSSE3-NEXT: retq
|
|
|
|
;
|
2015-10-30 06:19:21 +08:00
|
|
|
; SSE41-LABEL: load_sext_16i1_to_16i8:
|
2015-09-12 23:36:41 +08:00
|
|
|
; SSE41: # BB#0: # %entry
|
[X86] Improve shift combining
This folds (ashr (shl a, [56,48,32,24,16]), SarConst)
into (shl, (sext (a), [56,48,32,24,16] - SarConst))
or into (lshr, (sext (a), SarConst - [56,48,32,24,16]))
depending on sign of (SarConst - [56,48,32,24,16])
sexts in X86 are MOVs.
The MOVs have the same code size as above SHIFTs (only SHIFT by 1 has lower code size).
However the MOVs have 2 advantages to SHIFTs on x86:
1. MOVs can write to a register that differs from source.
2. MOVs accept memory operands.
This fixes PR24373.
Patch by: evgeny.v.stupachenko@intel.com
Differential Revision: http://reviews.llvm.org/D13161
llvm-svn: 255761
2015-12-16 19:22:37 +08:00
|
|
|
; SSE41-NEXT: movswq (%rdi), %rax
|
2015-10-30 06:19:21 +08:00
|
|
|
; SSE41-NEXT: movq %rax, %rcx
|
|
|
|
; SSE41-NEXT: shlq $62, %rcx
|
|
|
|
; SSE41-NEXT: sarq $63, %rcx
|
|
|
|
; SSE41-NEXT: movq %rax, %rdx
|
|
|
|
; SSE41-NEXT: shlq $63, %rdx
|
|
|
|
; SSE41-NEXT: sarq $63, %rdx
|
|
|
|
; SSE41-NEXT: movd %edx, %xmm0
|
|
|
|
; SSE41-NEXT: pinsrb $1, %ecx, %xmm0
|
|
|
|
; SSE41-NEXT: movq %rax, %rcx
|
|
|
|
; SSE41-NEXT: shlq $61, %rcx
|
|
|
|
; SSE41-NEXT: sarq $63, %rcx
|
|
|
|
; SSE41-NEXT: pinsrb $2, %ecx, %xmm0
|
|
|
|
; SSE41-NEXT: movq %rax, %rcx
|
|
|
|
; SSE41-NEXT: shlq $60, %rcx
|
|
|
|
; SSE41-NEXT: sarq $63, %rcx
|
|
|
|
; SSE41-NEXT: pinsrb $3, %ecx, %xmm0
|
|
|
|
; SSE41-NEXT: movq %rax, %rcx
|
|
|
|
; SSE41-NEXT: shlq $59, %rcx
|
|
|
|
; SSE41-NEXT: sarq $63, %rcx
|
|
|
|
; SSE41-NEXT: pinsrb $4, %ecx, %xmm0
|
|
|
|
; SSE41-NEXT: movq %rax, %rcx
|
|
|
|
; SSE41-NEXT: shlq $58, %rcx
|
|
|
|
; SSE41-NEXT: sarq $63, %rcx
|
|
|
|
; SSE41-NEXT: pinsrb $5, %ecx, %xmm0
|
|
|
|
; SSE41-NEXT: movq %rax, %rcx
|
|
|
|
; SSE41-NEXT: shlq $57, %rcx
|
|
|
|
; SSE41-NEXT: sarq $63, %rcx
|
|
|
|
; SSE41-NEXT: pinsrb $6, %ecx, %xmm0
|
[X86] Improve shift combining
This folds (ashr (shl a, [56,48,32,24,16]), SarConst)
into (shl, (sext (a), [56,48,32,24,16] - SarConst))
or into (lshr, (sext (a), SarConst - [56,48,32,24,16]))
depending on sign of (SarConst - [56,48,32,24,16])
sexts in X86 are MOVs.
The MOVs have the same code size as above SHIFTs (only SHIFT by 1 has lower code size).
However the MOVs have 2 advantages to SHIFTs on x86:
1. MOVs can write to a register that differs from source.
2. MOVs accept memory operands.
This fixes PR24373.
Patch by: evgeny.v.stupachenko@intel.com
Differential Revision: http://reviews.llvm.org/D13161
llvm-svn: 255761
2015-12-16 19:22:37 +08:00
|
|
|
; SSE41-NEXT: movsbq %al, %rcx
|
|
|
|
; SSE41-NEXT: shrq $7, %rcx
|
2015-10-30 06:19:21 +08:00
|
|
|
; SSE41-NEXT: pinsrb $7, %ecx, %xmm0
|
|
|
|
; SSE41-NEXT: movq %rax, %rcx
|
|
|
|
; SSE41-NEXT: shlq $55, %rcx
|
|
|
|
; SSE41-NEXT: sarq $63, %rcx
|
|
|
|
; SSE41-NEXT: pinsrb $8, %ecx, %xmm0
|
|
|
|
; SSE41-NEXT: movq %rax, %rcx
|
|
|
|
; SSE41-NEXT: shlq $54, %rcx
|
|
|
|
; SSE41-NEXT: sarq $63, %rcx
|
|
|
|
; SSE41-NEXT: pinsrb $9, %ecx, %xmm0
|
|
|
|
; SSE41-NEXT: movq %rax, %rcx
|
|
|
|
; SSE41-NEXT: shlq $53, %rcx
|
|
|
|
; SSE41-NEXT: sarq $63, %rcx
|
|
|
|
; SSE41-NEXT: pinsrb $10, %ecx, %xmm0
|
|
|
|
; SSE41-NEXT: movq %rax, %rcx
|
|
|
|
; SSE41-NEXT: shlq $52, %rcx
|
|
|
|
; SSE41-NEXT: sarq $63, %rcx
|
|
|
|
; SSE41-NEXT: pinsrb $11, %ecx, %xmm0
|
|
|
|
; SSE41-NEXT: movq %rax, %rcx
|
|
|
|
; SSE41-NEXT: shlq $51, %rcx
|
|
|
|
; SSE41-NEXT: sarq $63, %rcx
|
|
|
|
; SSE41-NEXT: pinsrb $12, %ecx, %xmm0
|
|
|
|
; SSE41-NEXT: movq %rax, %rcx
|
|
|
|
; SSE41-NEXT: shlq $50, %rcx
|
|
|
|
; SSE41-NEXT: sarq $63, %rcx
|
|
|
|
; SSE41-NEXT: pinsrb $13, %ecx, %xmm0
|
|
|
|
; SSE41-NEXT: movq %rax, %rcx
|
|
|
|
; SSE41-NEXT: shlq $49, %rcx
|
|
|
|
; SSE41-NEXT: sarq $63, %rcx
|
|
|
|
; SSE41-NEXT: pinsrb $14, %ecx, %xmm0
|
[X86] Improve shift combining
This folds (ashr (shl a, [56,48,32,24,16]), SarConst)
into (shl, (sext (a), [56,48,32,24,16] - SarConst))
or into (lshr, (sext (a), SarConst - [56,48,32,24,16]))
depending on sign of (SarConst - [56,48,32,24,16])
sexts in X86 are MOVs.
The MOVs have the same code size as above SHIFTs (only SHIFT by 1 has lower code size).
However the MOVs have 2 advantages to SHIFTs on x86:
1. MOVs can write to a register that differs from source.
2. MOVs accept memory operands.
This fixes PR24373.
Patch by: evgeny.v.stupachenko@intel.com
Differential Revision: http://reviews.llvm.org/D13161
llvm-svn: 255761
2015-12-16 19:22:37 +08:00
|
|
|
; SSE41-NEXT: shrq $15, %rax
|
2015-10-30 06:19:21 +08:00
|
|
|
; SSE41-NEXT: pinsrb $15, %eax, %xmm0
|
|
|
|
; SSE41-NEXT: retq
|
|
|
|
;
|
|
|
|
; AVX-LABEL: load_sext_16i1_to_16i8:
|
|
|
|
; AVX: # BB#0: # %entry
|
[X86] Improve shift combining
This folds (ashr (shl a, [56,48,32,24,16]), SarConst)
into (shl, (sext (a), [56,48,32,24,16] - SarConst))
or into (lshr, (sext (a), SarConst - [56,48,32,24,16]))
depending on sign of (SarConst - [56,48,32,24,16])
sexts in X86 are MOVs.
The MOVs have the same code size as above SHIFTs (only SHIFT by 1 has lower code size).
However the MOVs have 2 advantages to SHIFTs on x86:
1. MOVs can write to a register that differs from source.
2. MOVs accept memory operands.
This fixes PR24373.
Patch by: evgeny.v.stupachenko@intel.com
Differential Revision: http://reviews.llvm.org/D13161
llvm-svn: 255761
2015-12-16 19:22:37 +08:00
|
|
|
; AVX-NEXT: movswq (%rdi), %rax
|
2015-10-30 06:19:21 +08:00
|
|
|
; AVX-NEXT: movq %rax, %rcx
|
|
|
|
; AVX-NEXT: shlq $62, %rcx
|
|
|
|
; AVX-NEXT: sarq $63, %rcx
|
|
|
|
; AVX-NEXT: movq %rax, %rdx
|
|
|
|
; AVX-NEXT: shlq $63, %rdx
|
|
|
|
; AVX-NEXT: sarq $63, %rdx
|
|
|
|
; AVX-NEXT: vmovd %edx, %xmm0
|
|
|
|
; AVX-NEXT: vpinsrb $1, %ecx, %xmm0, %xmm0
|
|
|
|
; AVX-NEXT: movq %rax, %rcx
|
|
|
|
; AVX-NEXT: shlq $61, %rcx
|
|
|
|
; AVX-NEXT: sarq $63, %rcx
|
|
|
|
; AVX-NEXT: vpinsrb $2, %ecx, %xmm0, %xmm0
|
|
|
|
; AVX-NEXT: movq %rax, %rcx
|
|
|
|
; AVX-NEXT: shlq $60, %rcx
|
|
|
|
; AVX-NEXT: sarq $63, %rcx
|
|
|
|
; AVX-NEXT: vpinsrb $3, %ecx, %xmm0, %xmm0
|
|
|
|
; AVX-NEXT: movq %rax, %rcx
|
|
|
|
; AVX-NEXT: shlq $59, %rcx
|
|
|
|
; AVX-NEXT: sarq $63, %rcx
|
|
|
|
; AVX-NEXT: vpinsrb $4, %ecx, %xmm0, %xmm0
|
|
|
|
; AVX-NEXT: movq %rax, %rcx
|
|
|
|
; AVX-NEXT: shlq $58, %rcx
|
|
|
|
; AVX-NEXT: sarq $63, %rcx
|
|
|
|
; AVX-NEXT: vpinsrb $5, %ecx, %xmm0, %xmm0
|
|
|
|
; AVX-NEXT: movq %rax, %rcx
|
|
|
|
; AVX-NEXT: shlq $57, %rcx
|
|
|
|
; AVX-NEXT: sarq $63, %rcx
|
|
|
|
; AVX-NEXT: vpinsrb $6, %ecx, %xmm0, %xmm0
|
[X86] Improve shift combining
This folds (ashr (shl a, [56,48,32,24,16]), SarConst)
into (shl, (sext (a), [56,48,32,24,16] - SarConst))
or into (lshr, (sext (a), SarConst - [56,48,32,24,16]))
depending on sign of (SarConst - [56,48,32,24,16])
sexts in X86 are MOVs.
The MOVs have the same code size as above SHIFTs (only SHIFT by 1 has lower code size).
However the MOVs have 2 advantages to SHIFTs on x86:
1. MOVs can write to a register that differs from source.
2. MOVs accept memory operands.
This fixes PR24373.
Patch by: evgeny.v.stupachenko@intel.com
Differential Revision: http://reviews.llvm.org/D13161
llvm-svn: 255761
2015-12-16 19:22:37 +08:00
|
|
|
; AVX-NEXT: movsbq %al, %rcx
|
|
|
|
; AVX-NEXT: shrq $7, %rcx
|
2015-10-30 06:19:21 +08:00
|
|
|
; AVX-NEXT: vpinsrb $7, %ecx, %xmm0, %xmm0
|
|
|
|
; AVX-NEXT: movq %rax, %rcx
|
|
|
|
; AVX-NEXT: shlq $55, %rcx
|
|
|
|
; AVX-NEXT: sarq $63, %rcx
|
|
|
|
; AVX-NEXT: vpinsrb $8, %ecx, %xmm0, %xmm0
|
|
|
|
; AVX-NEXT: movq %rax, %rcx
|
|
|
|
; AVX-NEXT: shlq $54, %rcx
|
|
|
|
; AVX-NEXT: sarq $63, %rcx
|
|
|
|
; AVX-NEXT: vpinsrb $9, %ecx, %xmm0, %xmm0
|
|
|
|
; AVX-NEXT: movq %rax, %rcx
|
|
|
|
; AVX-NEXT: shlq $53, %rcx
|
|
|
|
; AVX-NEXT: sarq $63, %rcx
|
|
|
|
; AVX-NEXT: vpinsrb $10, %ecx, %xmm0, %xmm0
|
|
|
|
; AVX-NEXT: movq %rax, %rcx
|
|
|
|
; AVX-NEXT: shlq $52, %rcx
|
|
|
|
; AVX-NEXT: sarq $63, %rcx
|
|
|
|
; AVX-NEXT: vpinsrb $11, %ecx, %xmm0, %xmm0
|
|
|
|
; AVX-NEXT: movq %rax, %rcx
|
|
|
|
; AVX-NEXT: shlq $51, %rcx
|
|
|
|
; AVX-NEXT: sarq $63, %rcx
|
|
|
|
; AVX-NEXT: vpinsrb $12, %ecx, %xmm0, %xmm0
|
|
|
|
; AVX-NEXT: movq %rax, %rcx
|
|
|
|
; AVX-NEXT: shlq $50, %rcx
|
|
|
|
; AVX-NEXT: sarq $63, %rcx
|
|
|
|
; AVX-NEXT: vpinsrb $13, %ecx, %xmm0, %xmm0
|
|
|
|
; AVX-NEXT: movq %rax, %rcx
|
|
|
|
; AVX-NEXT: shlq $49, %rcx
|
|
|
|
; AVX-NEXT: sarq $63, %rcx
|
|
|
|
; AVX-NEXT: vpinsrb $14, %ecx, %xmm0, %xmm0
|
[X86] Improve shift combining
This folds (ashr (shl a, [56,48,32,24,16]), SarConst)
into (shl, (sext (a), [56,48,32,24,16] - SarConst))
or into (lshr, (sext (a), SarConst - [56,48,32,24,16]))
depending on sign of (SarConst - [56,48,32,24,16])
sexts in X86 are MOVs.
The MOVs have the same code size as above SHIFTs (only SHIFT by 1 has lower code size).
However the MOVs have 2 advantages to SHIFTs on x86:
1. MOVs can write to a register that differs from source.
2. MOVs accept memory operands.
This fixes PR24373.
Patch by: evgeny.v.stupachenko@intel.com
Differential Revision: http://reviews.llvm.org/D13161
llvm-svn: 255761
2015-12-16 19:22:37 +08:00
|
|
|
; AVX-NEXT: shrq $15, %rax
|
2015-10-30 06:19:21 +08:00
|
|
|
; AVX-NEXT: vpinsrb $15, %eax, %xmm0, %xmm0
|
|
|
|
; AVX-NEXT: retq
|
|
|
|
;
|
|
|
|
; X32-SSE41-LABEL: load_sext_16i1_to_16i8:
|
|
|
|
; X32-SSE41: # BB#0: # %entry
|
|
|
|
; X32-SSE41-NEXT: movl {{[0-9]+}}(%esp), %eax
|
[X86] Improve shift combining
This folds (ashr (shl a, [56,48,32,24,16]), SarConst)
into (shl, (sext (a), [56,48,32,24,16] - SarConst))
or into (lshr, (sext (a), SarConst - [56,48,32,24,16]))
depending on sign of (SarConst - [56,48,32,24,16])
sexts in X86 are MOVs.
The MOVs have the same code size as above SHIFTs (only SHIFT by 1 has lower code size).
However the MOVs have 2 advantages to SHIFTs on x86:
1. MOVs can write to a register that differs from source.
2. MOVs accept memory operands.
This fixes PR24373.
Patch by: evgeny.v.stupachenko@intel.com
Differential Revision: http://reviews.llvm.org/D13161
llvm-svn: 255761
2015-12-16 19:22:37 +08:00
|
|
|
; X32-SSE41-NEXT: movswl (%eax), %eax
|
2015-10-30 06:19:21 +08:00
|
|
|
; X32-SSE41-NEXT: movl %eax, %ecx
|
|
|
|
; X32-SSE41-NEXT: shll $30, %ecx
|
|
|
|
; X32-SSE41-NEXT: sarl $31, %ecx
|
|
|
|
; X32-SSE41-NEXT: movl %eax, %edx
|
|
|
|
; X32-SSE41-NEXT: shll $31, %edx
|
|
|
|
; X32-SSE41-NEXT: sarl $31, %edx
|
|
|
|
; X32-SSE41-NEXT: movd %edx, %xmm0
|
|
|
|
; X32-SSE41-NEXT: pinsrb $1, %ecx, %xmm0
|
|
|
|
; X32-SSE41-NEXT: movl %eax, %ecx
|
|
|
|
; X32-SSE41-NEXT: shll $29, %ecx
|
|
|
|
; X32-SSE41-NEXT: sarl $31, %ecx
|
|
|
|
; X32-SSE41-NEXT: pinsrb $2, %ecx, %xmm0
|
|
|
|
; X32-SSE41-NEXT: movl %eax, %ecx
|
|
|
|
; X32-SSE41-NEXT: shll $28, %ecx
|
|
|
|
; X32-SSE41-NEXT: sarl $31, %ecx
|
|
|
|
; X32-SSE41-NEXT: pinsrb $3, %ecx, %xmm0
|
|
|
|
; X32-SSE41-NEXT: movl %eax, %ecx
|
|
|
|
; X32-SSE41-NEXT: shll $27, %ecx
|
|
|
|
; X32-SSE41-NEXT: sarl $31, %ecx
|
|
|
|
; X32-SSE41-NEXT: pinsrb $4, %ecx, %xmm0
|
|
|
|
; X32-SSE41-NEXT: movl %eax, %ecx
|
|
|
|
; X32-SSE41-NEXT: shll $26, %ecx
|
|
|
|
; X32-SSE41-NEXT: sarl $31, %ecx
|
|
|
|
; X32-SSE41-NEXT: pinsrb $5, %ecx, %xmm0
|
|
|
|
; X32-SSE41-NEXT: movl %eax, %ecx
|
|
|
|
; X32-SSE41-NEXT: shll $25, %ecx
|
|
|
|
; X32-SSE41-NEXT: sarl $31, %ecx
|
|
|
|
; X32-SSE41-NEXT: pinsrb $6, %ecx, %xmm0
|
[X86] Improve shift combining
This folds (ashr (shl a, [56,48,32,24,16]), SarConst)
into (shl, (sext (a), [56,48,32,24,16] - SarConst))
or into (lshr, (sext (a), SarConst - [56,48,32,24,16]))
depending on sign of (SarConst - [56,48,32,24,16])
sexts in X86 are MOVs.
The MOVs have the same code size as above SHIFTs (only SHIFT by 1 has lower code size).
However the MOVs have 2 advantages to SHIFTs on x86:
1. MOVs can write to a register that differs from source.
2. MOVs accept memory operands.
This fixes PR24373.
Patch by: evgeny.v.stupachenko@intel.com
Differential Revision: http://reviews.llvm.org/D13161
llvm-svn: 255761
2015-12-16 19:22:37 +08:00
|
|
|
; X32-SSE41-NEXT: movsbl %al, %ecx
|
|
|
|
; X32-SSE41-NEXT: shrl $7, %ecx
|
2015-10-30 06:19:21 +08:00
|
|
|
; X32-SSE41-NEXT: pinsrb $7, %ecx, %xmm0
|
|
|
|
; X32-SSE41-NEXT: movl %eax, %ecx
|
|
|
|
; X32-SSE41-NEXT: shll $23, %ecx
|
|
|
|
; X32-SSE41-NEXT: sarl $31, %ecx
|
|
|
|
; X32-SSE41-NEXT: pinsrb $8, %ecx, %xmm0
|
|
|
|
; X32-SSE41-NEXT: movl %eax, %ecx
|
|
|
|
; X32-SSE41-NEXT: shll $22, %ecx
|
|
|
|
; X32-SSE41-NEXT: sarl $31, %ecx
|
|
|
|
; X32-SSE41-NEXT: pinsrb $9, %ecx, %xmm0
|
|
|
|
; X32-SSE41-NEXT: movl %eax, %ecx
|
|
|
|
; X32-SSE41-NEXT: shll $21, %ecx
|
|
|
|
; X32-SSE41-NEXT: sarl $31, %ecx
|
|
|
|
; X32-SSE41-NEXT: pinsrb $10, %ecx, %xmm0
|
|
|
|
; X32-SSE41-NEXT: movl %eax, %ecx
|
|
|
|
; X32-SSE41-NEXT: shll $20, %ecx
|
|
|
|
; X32-SSE41-NEXT: sarl $31, %ecx
|
|
|
|
; X32-SSE41-NEXT: pinsrb $11, %ecx, %xmm0
|
|
|
|
; X32-SSE41-NEXT: movl %eax, %ecx
|
|
|
|
; X32-SSE41-NEXT: shll $19, %ecx
|
|
|
|
; X32-SSE41-NEXT: sarl $31, %ecx
|
|
|
|
; X32-SSE41-NEXT: pinsrb $12, %ecx, %xmm0
|
|
|
|
; X32-SSE41-NEXT: movl %eax, %ecx
|
|
|
|
; X32-SSE41-NEXT: shll $18, %ecx
|
|
|
|
; X32-SSE41-NEXT: sarl $31, %ecx
|
|
|
|
; X32-SSE41-NEXT: pinsrb $13, %ecx, %xmm0
|
|
|
|
; X32-SSE41-NEXT: movl %eax, %ecx
|
|
|
|
; X32-SSE41-NEXT: shll $17, %ecx
|
|
|
|
; X32-SSE41-NEXT: sarl $31, %ecx
|
|
|
|
; X32-SSE41-NEXT: pinsrb $14, %ecx, %xmm0
|
[X86] Improve shift combining
This folds (ashr (shl a, [56,48,32,24,16]), SarConst)
into (shl, (sext (a), [56,48,32,24,16] - SarConst))
or into (lshr, (sext (a), SarConst - [56,48,32,24,16]))
depending on sign of (SarConst - [56,48,32,24,16])
sexts in X86 are MOVs.
The MOVs have the same code size as above SHIFTs (only SHIFT by 1 has lower code size).
However the MOVs have 2 advantages to SHIFTs on x86:
1. MOVs can write to a register that differs from source.
2. MOVs accept memory operands.
This fixes PR24373.
Patch by: evgeny.v.stupachenko@intel.com
Differential Revision: http://reviews.llvm.org/D13161
llvm-svn: 255761
2015-12-16 19:22:37 +08:00
|
|
|
; X32-SSE41-NEXT: shrl $15, %eax
|
2015-10-30 06:19:21 +08:00
|
|
|
; X32-SSE41-NEXT: pinsrb $15, %eax, %xmm0
|
|
|
|
; X32-SSE41-NEXT: retl
|
|
|
|
entry:
|
|
|
|
%X = load <16 x i1>, <16 x i1>* %ptr
|
|
|
|
%Y = sext <16 x i1> %X to <16 x i8>
|
|
|
|
ret <16 x i8> %Y
|
|
|
|
}
|
|
|
|
|
|
|
|
define <16 x i16> @load_sext_16i1_to_16i16(<16 x i1> *%ptr) {
|
|
|
|
; SSE2-LABEL: load_sext_16i1_to_16i16:
|
|
|
|
; SSE2: # BB#0: # %entry
|
|
|
|
; SSE2-NEXT: movzwl (%rdi), %eax
|
|
|
|
; SSE2-NEXT: movl %eax, %ecx
|
|
|
|
; SSE2-NEXT: shrl $14, %ecx
|
|
|
|
; SSE2-NEXT: andl $1, %ecx
|
|
|
|
; SSE2-NEXT: movd %ecx, %xmm0
|
|
|
|
; SSE2-NEXT: movl %eax, %ecx
|
|
|
|
; SSE2-NEXT: shrl $6, %ecx
|
|
|
|
; SSE2-NEXT: andl $1, %ecx
|
|
|
|
; SSE2-NEXT: movd %ecx, %xmm1
|
|
|
|
; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
|
|
|
|
; SSE2-NEXT: movl %eax, %ecx
|
|
|
|
; SSE2-NEXT: shrl $10, %ecx
|
|
|
|
; SSE2-NEXT: andl $1, %ecx
|
|
|
|
; SSE2-NEXT: movd %ecx, %xmm0
|
|
|
|
; SSE2-NEXT: movl %eax, %ecx
|
|
|
|
; SSE2-NEXT: shrl $2, %ecx
|
|
|
|
; SSE2-NEXT: andl $1, %ecx
|
|
|
|
; SSE2-NEXT: movd %ecx, %xmm2
|
|
|
|
; SSE2-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
|
|
|
|
; SSE2-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3],xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7]
|
|
|
|
; SSE2-NEXT: movl %eax, %ecx
|
|
|
|
; SSE2-NEXT: shrl $12, %ecx
|
|
|
|
; SSE2-NEXT: andl $1, %ecx
|
|
|
|
; SSE2-NEXT: movd %ecx, %xmm0
|
|
|
|
; SSE2-NEXT: movl %eax, %ecx
|
|
|
|
; SSE2-NEXT: shrl $4, %ecx
|
|
|
|
; SSE2-NEXT: andl $1, %ecx
|
|
|
|
; SSE2-NEXT: movd %ecx, %xmm3
|
|
|
|
; SSE2-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3],xmm3[4],xmm0[4],xmm3[5],xmm0[5],xmm3[6],xmm0[6],xmm3[7],xmm0[7]
|
|
|
|
; SSE2-NEXT: movl %eax, %ecx
|
|
|
|
; SSE2-NEXT: andl $1, %ecx
|
|
|
|
; SSE2-NEXT: movd %ecx, %xmm1
|
|
|
|
; SSE2-NEXT: movl %eax, %ecx
|
|
|
|
; SSE2-NEXT: shrl $8, %ecx
|
|
|
|
; SSE2-NEXT: andl $1, %ecx
|
|
|
|
; SSE2-NEXT: movd %ecx, %xmm0
|
|
|
|
; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
|
|
|
|
; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1],xmm1[2],xmm3[2],xmm1[3],xmm3[3],xmm1[4],xmm3[4],xmm1[5],xmm3[5],xmm1[6],xmm3[6],xmm1[7],xmm3[7]
|
|
|
|
; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3],xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7]
|
|
|
|
; SSE2-NEXT: movl %eax, %ecx
|
|
|
|
; SSE2-NEXT: shrl $13, %ecx
|
|
|
|
; SSE2-NEXT: andl $1, %ecx
|
|
|
|
; SSE2-NEXT: movd %ecx, %xmm0
|
|
|
|
; SSE2-NEXT: movl %eax, %ecx
|
|
|
|
; SSE2-NEXT: shrl $5, %ecx
|
|
|
|
; SSE2-NEXT: andl $1, %ecx
|
|
|
|
; SSE2-NEXT: movd %ecx, %xmm2
|
|
|
|
; SSE2-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
|
|
|
|
; SSE2-NEXT: movl %eax, %ecx
|
|
|
|
; SSE2-NEXT: shrl $9, %ecx
|
|
|
|
; SSE2-NEXT: andl $1, %ecx
|
|
|
|
; SSE2-NEXT: movd %ecx, %xmm3
|
|
|
|
; SSE2-NEXT: movl %eax, %ecx
|
|
|
|
; SSE2-NEXT: shrl %ecx
|
|
|
|
; SSE2-NEXT: andl $1, %ecx
|
|
|
|
; SSE2-NEXT: movd %ecx, %xmm0
|
|
|
|
; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3],xmm0[4],xmm3[4],xmm0[5],xmm3[5],xmm0[6],xmm3[6],xmm0[7],xmm3[7]
|
|
|
|
; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7]
|
|
|
|
; SSE2-NEXT: movl %eax, %ecx
|
|
|
|
; SSE2-NEXT: shrl $11, %ecx
|
|
|
|
; SSE2-NEXT: andl $1, %ecx
|
|
|
|
; SSE2-NEXT: movd %ecx, %xmm2
|
|
|
|
; SSE2-NEXT: movl %eax, %ecx
|
|
|
|
; SSE2-NEXT: shrl $3, %ecx
|
|
|
|
; SSE2-NEXT: andl $1, %ecx
|
|
|
|
; SSE2-NEXT: movd %ecx, %xmm3
|
|
|
|
; SSE2-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3],xmm3[4],xmm2[4],xmm3[5],xmm2[5],xmm3[6],xmm2[6],xmm3[7],xmm2[7]
|
|
|
|
; SSE2-NEXT: movl %eax, %ecx
|
|
|
|
; SSE2-NEXT: shrl $7, %ecx
|
|
|
|
; SSE2-NEXT: andl $1, %ecx
|
|
|
|
; SSE2-NEXT: movd %ecx, %xmm2
|
|
|
|
; SSE2-NEXT: shrl $15, %eax
|
|
|
|
; SSE2-NEXT: movzwl %ax, %eax
|
|
|
|
; SSE2-NEXT: movd %eax, %xmm4
|
|
|
|
; SSE2-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm4[0],xmm2[1],xmm4[1],xmm2[2],xmm4[2],xmm2[3],xmm4[3],xmm2[4],xmm4[4],xmm2[5],xmm4[5],xmm2[6],xmm4[6],xmm2[7],xmm4[7]
|
|
|
|
; SSE2-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3],xmm3[4],xmm2[4],xmm3[5],xmm2[5],xmm3[6],xmm2[6],xmm3[7],xmm2[7]
|
|
|
|
; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3],xmm0[4],xmm3[4],xmm0[5],xmm3[5],xmm0[6],xmm3[6],xmm0[7],xmm3[7]
|
|
|
|
; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
|
|
|
|
; SSE2-NEXT: movdqa %xmm1, %xmm0
|
|
|
|
; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
|
|
|
|
; SSE2-NEXT: psllw $15, %xmm0
|
|
|
|
; SSE2-NEXT: psraw $15, %xmm0
|
|
|
|
; SSE2-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm0[8],xmm1[9],xmm0[9],xmm1[10],xmm0[10],xmm1[11],xmm0[11],xmm1[12],xmm0[12],xmm1[13],xmm0[13],xmm1[14],xmm0[14],xmm1[15],xmm0[15]
|
|
|
|
; SSE2-NEXT: psllw $15, %xmm1
|
|
|
|
; SSE2-NEXT: psraw $15, %xmm1
|
|
|
|
; SSE2-NEXT: retq
|
|
|
|
;
|
|
|
|
; SSSE3-LABEL: load_sext_16i1_to_16i16:
|
|
|
|
; SSSE3: # BB#0: # %entry
|
|
|
|
; SSSE3-NEXT: movzwl (%rdi), %eax
|
|
|
|
; SSSE3-NEXT: movl %eax, %ecx
|
|
|
|
; SSSE3-NEXT: shrl $14, %ecx
|
|
|
|
; SSSE3-NEXT: andl $1, %ecx
|
|
|
|
; SSSE3-NEXT: movd %ecx, %xmm0
|
|
|
|
; SSSE3-NEXT: movl %eax, %ecx
|
|
|
|
; SSSE3-NEXT: shrl $6, %ecx
|
|
|
|
; SSSE3-NEXT: andl $1, %ecx
|
|
|
|
; SSSE3-NEXT: movd %ecx, %xmm1
|
|
|
|
; SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
|
|
|
|
; SSSE3-NEXT: movl %eax, %ecx
|
|
|
|
; SSSE3-NEXT: shrl $10, %ecx
|
|
|
|
; SSSE3-NEXT: andl $1, %ecx
|
|
|
|
; SSSE3-NEXT: movd %ecx, %xmm0
|
|
|
|
; SSSE3-NEXT: movl %eax, %ecx
|
|
|
|
; SSSE3-NEXT: shrl $2, %ecx
|
|
|
|
; SSSE3-NEXT: andl $1, %ecx
|
|
|
|
; SSSE3-NEXT: movd %ecx, %xmm2
|
|
|
|
; SSSE3-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
|
|
|
|
; SSSE3-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3],xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7]
|
|
|
|
; SSSE3-NEXT: movl %eax, %ecx
|
|
|
|
; SSSE3-NEXT: shrl $12, %ecx
|
|
|
|
; SSSE3-NEXT: andl $1, %ecx
|
|
|
|
; SSSE3-NEXT: movd %ecx, %xmm0
|
|
|
|
; SSSE3-NEXT: movl %eax, %ecx
|
|
|
|
; SSSE3-NEXT: shrl $4, %ecx
|
|
|
|
; SSSE3-NEXT: andl $1, %ecx
|
|
|
|
; SSSE3-NEXT: movd %ecx, %xmm3
|
|
|
|
; SSSE3-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1],xmm3[2],xmm0[2],xmm3[3],xmm0[3],xmm3[4],xmm0[4],xmm3[5],xmm0[5],xmm3[6],xmm0[6],xmm3[7],xmm0[7]
|
|
|
|
; SSSE3-NEXT: movl %eax, %ecx
|
|
|
|
; SSSE3-NEXT: andl $1, %ecx
|
|
|
|
; SSSE3-NEXT: movd %ecx, %xmm1
|
|
|
|
; SSSE3-NEXT: movl %eax, %ecx
|
|
|
|
; SSSE3-NEXT: shrl $8, %ecx
|
|
|
|
; SSSE3-NEXT: andl $1, %ecx
|
|
|
|
; SSSE3-NEXT: movd %ecx, %xmm0
|
|
|
|
; SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
|
|
|
|
; SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1],xmm1[2],xmm3[2],xmm1[3],xmm3[3],xmm1[4],xmm3[4],xmm1[5],xmm3[5],xmm1[6],xmm3[6],xmm1[7],xmm3[7]
|
|
|
|
; SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3],xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7]
|
|
|
|
; SSSE3-NEXT: movl %eax, %ecx
|
|
|
|
; SSSE3-NEXT: shrl $13, %ecx
|
|
|
|
; SSSE3-NEXT: andl $1, %ecx
|
|
|
|
; SSSE3-NEXT: movd %ecx, %xmm0
|
|
|
|
; SSSE3-NEXT: movl %eax, %ecx
|
|
|
|
; SSSE3-NEXT: shrl $5, %ecx
|
|
|
|
; SSSE3-NEXT: andl $1, %ecx
|
|
|
|
; SSSE3-NEXT: movd %ecx, %xmm2
|
|
|
|
; SSSE3-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3],xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
|
|
|
|
; SSSE3-NEXT: movl %eax, %ecx
|
|
|
|
; SSSE3-NEXT: shrl $9, %ecx
|
|
|
|
; SSSE3-NEXT: andl $1, %ecx
|
|
|
|
; SSSE3-NEXT: movd %ecx, %xmm3
|
|
|
|
; SSSE3-NEXT: movl %eax, %ecx
|
|
|
|
; SSSE3-NEXT: shrl %ecx
|
|
|
|
; SSSE3-NEXT: andl $1, %ecx
|
|
|
|
; SSSE3-NEXT: movd %ecx, %xmm0
|
|
|
|
; SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3],xmm0[4],xmm3[4],xmm0[5],xmm3[5],xmm0[6],xmm3[6],xmm0[7],xmm3[7]
|
|
|
|
; SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7]
|
|
|
|
; SSSE3-NEXT: movl %eax, %ecx
|
|
|
|
; SSSE3-NEXT: shrl $11, %ecx
|
|
|
|
; SSSE3-NEXT: andl $1, %ecx
|
|
|
|
; SSSE3-NEXT: movd %ecx, %xmm2
|
|
|
|
; SSSE3-NEXT: movl %eax, %ecx
|
|
|
|
; SSSE3-NEXT: shrl $3, %ecx
|
|
|
|
; SSSE3-NEXT: andl $1, %ecx
|
|
|
|
; SSSE3-NEXT: movd %ecx, %xmm3
|
|
|
|
; SSSE3-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3],xmm3[4],xmm2[4],xmm3[5],xmm2[5],xmm3[6],xmm2[6],xmm3[7],xmm2[7]
|
|
|
|
; SSSE3-NEXT: movl %eax, %ecx
|
|
|
|
; SSSE3-NEXT: shrl $7, %ecx
|
|
|
|
; SSSE3-NEXT: andl $1, %ecx
|
|
|
|
; SSSE3-NEXT: movd %ecx, %xmm2
|
|
|
|
; SSSE3-NEXT: shrl $15, %eax
|
|
|
|
; SSSE3-NEXT: movzwl %ax, %eax
|
|
|
|
; SSSE3-NEXT: movd %eax, %xmm4
|
|
|
|
; SSSE3-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm4[0],xmm2[1],xmm4[1],xmm2[2],xmm4[2],xmm2[3],xmm4[3],xmm2[4],xmm4[4],xmm2[5],xmm4[5],xmm2[6],xmm4[6],xmm2[7],xmm4[7]
|
|
|
|
; SSSE3-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3],xmm3[4],xmm2[4],xmm3[5],xmm2[5],xmm3[6],xmm2[6],xmm3[7],xmm2[7]
|
|
|
|
; SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3],xmm0[4],xmm3[4],xmm0[5],xmm3[5],xmm0[6],xmm3[6],xmm0[7],xmm3[7]
|
|
|
|
; SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
|
|
|
|
; SSSE3-NEXT: movdqa %xmm1, %xmm0
|
|
|
|
; SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
|
|
|
|
; SSSE3-NEXT: psllw $15, %xmm0
|
|
|
|
; SSSE3-NEXT: psraw $15, %xmm0
|
|
|
|
; SSSE3-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm0[8],xmm1[9],xmm0[9],xmm1[10],xmm0[10],xmm1[11],xmm0[11],xmm1[12],xmm0[12],xmm1[13],xmm0[13],xmm1[14],xmm0[14],xmm1[15],xmm0[15]
|
|
|
|
; SSSE3-NEXT: psllw $15, %xmm1
|
|
|
|
; SSSE3-NEXT: psraw $15, %xmm1
|
|
|
|
; SSSE3-NEXT: retq
|
|
|
|
;
|
|
|
|
; SSE41-LABEL: load_sext_16i1_to_16i16:
|
|
|
|
; SSE41: # BB#0: # %entry
|
|
|
|
; SSE41-NEXT: movzwl (%rdi), %eax
|
|
|
|
; SSE41-NEXT: movl %eax, %ecx
|
|
|
|
; SSE41-NEXT: shrl %ecx
|
|
|
|
; SSE41-NEXT: andl $1, %ecx
|
|
|
|
; SSE41-NEXT: movl %eax, %edx
|
|
|
|
; SSE41-NEXT: andl $1, %edx
|
|
|
|
; SSE41-NEXT: movd %edx, %xmm1
|
|
|
|
; SSE41-NEXT: pinsrb $1, %ecx, %xmm1
|
|
|
|
; SSE41-NEXT: movl %eax, %ecx
|
|
|
|
; SSE41-NEXT: shrl $2, %ecx
|
|
|
|
; SSE41-NEXT: andl $1, %ecx
|
|
|
|
; SSE41-NEXT: pinsrb $2, %ecx, %xmm1
|
|
|
|
; SSE41-NEXT: movl %eax, %ecx
|
|
|
|
; SSE41-NEXT: shrl $3, %ecx
|
|
|
|
; SSE41-NEXT: andl $1, %ecx
|
|
|
|
; SSE41-NEXT: pinsrb $3, %ecx, %xmm1
|
|
|
|
; SSE41-NEXT: movl %eax, %ecx
|
|
|
|
; SSE41-NEXT: shrl $4, %ecx
|
|
|
|
; SSE41-NEXT: andl $1, %ecx
|
|
|
|
; SSE41-NEXT: pinsrb $4, %ecx, %xmm1
|
|
|
|
; SSE41-NEXT: movl %eax, %ecx
|
|
|
|
; SSE41-NEXT: shrl $5, %ecx
|
|
|
|
; SSE41-NEXT: andl $1, %ecx
|
|
|
|
; SSE41-NEXT: pinsrb $5, %ecx, %xmm1
|
|
|
|
; SSE41-NEXT: movl %eax, %ecx
|
|
|
|
; SSE41-NEXT: shrl $6, %ecx
|
|
|
|
; SSE41-NEXT: andl $1, %ecx
|
|
|
|
; SSE41-NEXT: pinsrb $6, %ecx, %xmm1
|
|
|
|
; SSE41-NEXT: movl %eax, %ecx
|
|
|
|
; SSE41-NEXT: shrl $7, %ecx
|
|
|
|
; SSE41-NEXT: andl $1, %ecx
|
|
|
|
; SSE41-NEXT: pinsrb $7, %ecx, %xmm1
|
|
|
|
; SSE41-NEXT: movl %eax, %ecx
|
|
|
|
; SSE41-NEXT: shrl $8, %ecx
|
|
|
|
; SSE41-NEXT: andl $1, %ecx
|
|
|
|
; SSE41-NEXT: pinsrb $8, %ecx, %xmm1
|
|
|
|
; SSE41-NEXT: movl %eax, %ecx
|
|
|
|
; SSE41-NEXT: shrl $9, %ecx
|
|
|
|
; SSE41-NEXT: andl $1, %ecx
|
|
|
|
; SSE41-NEXT: pinsrb $9, %ecx, %xmm1
|
|
|
|
; SSE41-NEXT: movl %eax, %ecx
|
|
|
|
; SSE41-NEXT: shrl $10, %ecx
|
|
|
|
; SSE41-NEXT: andl $1, %ecx
|
|
|
|
; SSE41-NEXT: pinsrb $10, %ecx, %xmm1
|
|
|
|
; SSE41-NEXT: movl %eax, %ecx
|
|
|
|
; SSE41-NEXT: shrl $11, %ecx
|
|
|
|
; SSE41-NEXT: andl $1, %ecx
|
|
|
|
; SSE41-NEXT: pinsrb $11, %ecx, %xmm1
|
|
|
|
; SSE41-NEXT: movl %eax, %ecx
|
|
|
|
; SSE41-NEXT: shrl $12, %ecx
|
|
|
|
; SSE41-NEXT: andl $1, %ecx
|
|
|
|
; SSE41-NEXT: pinsrb $12, %ecx, %xmm1
|
|
|
|
; SSE41-NEXT: movl %eax, %ecx
|
|
|
|
; SSE41-NEXT: shrl $13, %ecx
|
|
|
|
; SSE41-NEXT: andl $1, %ecx
|
|
|
|
; SSE41-NEXT: pinsrb $13, %ecx, %xmm1
|
|
|
|
; SSE41-NEXT: movl %eax, %ecx
|
|
|
|
; SSE41-NEXT: shrl $14, %ecx
|
|
|
|
; SSE41-NEXT: andl $1, %ecx
|
|
|
|
; SSE41-NEXT: pinsrb $14, %ecx, %xmm1
|
|
|
|
; SSE41-NEXT: shrl $15, %eax
|
|
|
|
; SSE41-NEXT: movzwl %ax, %eax
|
|
|
|
; SSE41-NEXT: pinsrb $15, %eax, %xmm1
|
|
|
|
; SSE41-NEXT: pmovzxbw {{.*#+}} xmm0 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
|
|
|
|
; SSE41-NEXT: psllw $15, %xmm0
|
|
|
|
; SSE41-NEXT: psraw $15, %xmm0
|
|
|
|
; SSE41-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
|
|
|
; SSE41-NEXT: psllw $15, %xmm1
|
|
|
|
; SSE41-NEXT: psraw $15, %xmm1
|
|
|
|
; SSE41-NEXT: retq
|
|
|
|
;
|
|
|
|
; AVX1-LABEL: load_sext_16i1_to_16i16:
|
|
|
|
; AVX1: # BB#0: # %entry
|
[X86] Improve shift combining
This folds (ashr (shl a, [56,48,32,24,16]), SarConst)
into (shl, (sext (a), [56,48,32,24,16] - SarConst))
or into (lshr, (sext (a), SarConst - [56,48,32,24,16]))
depending on sign of (SarConst - [56,48,32,24,16])
sexts in X86 are MOVs.
The MOVs have the same code size as above SHIFTs (only SHIFT by 1 has lower code size).
However the MOVs have 2 advantages to SHIFTs on x86:
1. MOVs can write to a register that differs from source.
2. MOVs accept memory operands.
This fixes PR24373.
Patch by: evgeny.v.stupachenko@intel.com
Differential Revision: http://reviews.llvm.org/D13161
llvm-svn: 255761
2015-12-16 19:22:37 +08:00
|
|
|
; AVX1-NEXT: pushq %rbp
|
|
|
|
; AVX1-NEXT: .Ltmp0:
|
|
|
|
; AVX1-NEXT: .cfi_def_cfa_offset 16
|
|
|
|
; AVX1-NEXT: pushq %r15
|
|
|
|
; AVX1-NEXT: .Ltmp1:
|
|
|
|
; AVX1-NEXT: .cfi_def_cfa_offset 24
|
|
|
|
; AVX1-NEXT: pushq %r14
|
|
|
|
; AVX1-NEXT: .Ltmp2:
|
|
|
|
; AVX1-NEXT: .cfi_def_cfa_offset 32
|
|
|
|
; AVX1-NEXT: pushq %r13
|
|
|
|
; AVX1-NEXT: .Ltmp3:
|
|
|
|
; AVX1-NEXT: .cfi_def_cfa_offset 40
|
|
|
|
; AVX1-NEXT: pushq %r12
|
|
|
|
; AVX1-NEXT: .Ltmp4:
|
|
|
|
; AVX1-NEXT: .cfi_def_cfa_offset 48
|
|
|
|
; AVX1-NEXT: pushq %rbx
|
|
|
|
; AVX1-NEXT: .Ltmp5:
|
|
|
|
; AVX1-NEXT: .cfi_def_cfa_offset 56
|
|
|
|
; AVX1-NEXT: .Ltmp6:
|
|
|
|
; AVX1-NEXT: .cfi_offset %rbx, -56
|
|
|
|
; AVX1-NEXT: .Ltmp7:
|
|
|
|
; AVX1-NEXT: .cfi_offset %r12, -48
|
|
|
|
; AVX1-NEXT: .Ltmp8:
|
|
|
|
; AVX1-NEXT: .cfi_offset %r13, -40
|
|
|
|
; AVX1-NEXT: .Ltmp9:
|
|
|
|
; AVX1-NEXT: .cfi_offset %r14, -32
|
|
|
|
; AVX1-NEXT: .Ltmp10:
|
|
|
|
; AVX1-NEXT: .cfi_offset %r15, -24
|
|
|
|
; AVX1-NEXT: .Ltmp11:
|
|
|
|
; AVX1-NEXT: .cfi_offset %rbp, -16
|
|
|
|
; AVX1-NEXT: movswq (%rdi), %rax
|
2015-10-30 06:19:21 +08:00
|
|
|
; AVX1-NEXT: movq %rax, %rcx
|
[X86] Improve shift combining
This folds (ashr (shl a, [56,48,32,24,16]), SarConst)
into (shl, (sext (a), [56,48,32,24,16] - SarConst))
or into (lshr, (sext (a), SarConst - [56,48,32,24,16]))
depending on sign of (SarConst - [56,48,32,24,16])
sexts in X86 are MOVs.
The MOVs have the same code size as above SHIFTs (only SHIFT by 1 has lower code size).
However the MOVs have 2 advantages to SHIFTs on x86:
1. MOVs can write to a register that differs from source.
2. MOVs accept memory operands.
This fixes PR24373.
Patch by: evgeny.v.stupachenko@intel.com
Differential Revision: http://reviews.llvm.org/D13161
llvm-svn: 255761
2015-12-16 19:22:37 +08:00
|
|
|
; AVX1-NEXT: shlq $55, %rcx
|
2015-10-30 06:19:21 +08:00
|
|
|
; AVX1-NEXT: sarq $63, %rcx
|
[X86] Improve shift combining
This folds (ashr (shl a, [56,48,32,24,16]), SarConst)
into (shl, (sext (a), [56,48,32,24,16] - SarConst))
or into (lshr, (sext (a), SarConst - [56,48,32,24,16]))
depending on sign of (SarConst - [56,48,32,24,16])
sexts in X86 are MOVs.
The MOVs have the same code size as above SHIFTs (only SHIFT by 1 has lower code size).
However the MOVs have 2 advantages to SHIFTs on x86:
1. MOVs can write to a register that differs from source.
2. MOVs accept memory operands.
This fixes PR24373.
Patch by: evgeny.v.stupachenko@intel.com
Differential Revision: http://reviews.llvm.org/D13161
llvm-svn: 255761
2015-12-16 19:22:37 +08:00
|
|
|
; AVX1-NEXT: vmovd %ecx, %xmm0
|
|
|
|
; AVX1-NEXT: movq %rax, %r8
|
|
|
|
; AVX1-NEXT: movq %rax, %r10
|
|
|
|
; AVX1-NEXT: movq %rax, %r11
|
|
|
|
; AVX1-NEXT: movq %rax, %r14
|
|
|
|
; AVX1-NEXT: movq %rax, %r15
|
|
|
|
; AVX1-NEXT: movq %rax, %r9
|
|
|
|
; AVX1-NEXT: movq %rax, %r12
|
|
|
|
; AVX1-NEXT: movq %rax, %r13
|
|
|
|
; AVX1-NEXT: movq %rax, %rbx
|
|
|
|
; AVX1-NEXT: movq %rax, %rdi
|
2015-10-30 06:19:21 +08:00
|
|
|
; AVX1-NEXT: movq %rax, %rcx
|
|
|
|
; AVX1-NEXT: movq %rax, %rdx
|
[X86] Improve shift combining
This folds (ashr (shl a, [56,48,32,24,16]), SarConst)
into (shl, (sext (a), [56,48,32,24,16] - SarConst))
or into (lshr, (sext (a), SarConst - [56,48,32,24,16]))
depending on sign of (SarConst - [56,48,32,24,16])
sexts in X86 are MOVs.
The MOVs have the same code size as above SHIFTs (only SHIFT by 1 has lower code size).
However the MOVs have 2 advantages to SHIFTs on x86:
1. MOVs can write to a register that differs from source.
2. MOVs accept memory operands.
This fixes PR24373.
Patch by: evgeny.v.stupachenko@intel.com
Differential Revision: http://reviews.llvm.org/D13161
llvm-svn: 255761
2015-12-16 19:22:37 +08:00
|
|
|
; AVX1-NEXT: movq %rax, %rsi
|
|
|
|
; AVX1-NEXT: movsbq %al, %rbp
|
|
|
|
; AVX1-NEXT: shlq $54, %rax
|
|
|
|
; AVX1-NEXT: sarq $63, %rax
|
|
|
|
; AVX1-NEXT: vpinsrw $1, %eax, %xmm0, %xmm0
|
|
|
|
; AVX1-NEXT: shlq $53, %r8
|
|
|
|
; AVX1-NEXT: sarq $63, %r8
|
|
|
|
; AVX1-NEXT: vpinsrw $2, %r8d, %xmm0, %xmm0
|
|
|
|
; AVX1-NEXT: shlq $52, %r10
|
|
|
|
; AVX1-NEXT: sarq $63, %r10
|
|
|
|
; AVX1-NEXT: vpinsrw $3, %r10d, %xmm0, %xmm0
|
|
|
|
; AVX1-NEXT: shlq $51, %r11
|
|
|
|
; AVX1-NEXT: sarq $63, %r11
|
|
|
|
; AVX1-NEXT: vpinsrw $4, %r11d, %xmm0, %xmm0
|
|
|
|
; AVX1-NEXT: shlq $50, %r14
|
|
|
|
; AVX1-NEXT: sarq $63, %r14
|
|
|
|
; AVX1-NEXT: vpinsrw $5, %r14d, %xmm0, %xmm0
|
|
|
|
; AVX1-NEXT: shlq $49, %r15
|
|
|
|
; AVX1-NEXT: sarq $63, %r15
|
|
|
|
; AVX1-NEXT: vpinsrw $6, %r15d, %xmm0, %xmm0
|
|
|
|
; AVX1-NEXT: shrq $15, %r9
|
|
|
|
; AVX1-NEXT: vpinsrw $7, %r9d, %xmm0, %xmm0
|
|
|
|
; AVX1-NEXT: shlq $63, %r13
|
|
|
|
; AVX1-NEXT: sarq $63, %r13
|
|
|
|
; AVX1-NEXT: vmovd %r13d, %xmm1
|
|
|
|
; AVX1-NEXT: shlq $62, %r12
|
|
|
|
; AVX1-NEXT: sarq $63, %r12
|
|
|
|
; AVX1-NEXT: vpinsrw $1, %r12d, %xmm1, %xmm1
|
|
|
|
; AVX1-NEXT: shlq $61, %rbx
|
|
|
|
; AVX1-NEXT: sarq $63, %rbx
|
|
|
|
; AVX1-NEXT: vpinsrw $2, %ebx, %xmm1, %xmm1
|
|
|
|
; AVX1-NEXT: shlq $60, %rdi
|
|
|
|
; AVX1-NEXT: sarq $63, %rdi
|
|
|
|
; AVX1-NEXT: vpinsrw $3, %edi, %xmm1, %xmm1
|
2015-10-30 06:19:21 +08:00
|
|
|
; AVX1-NEXT: shlq $59, %rcx
|
|
|
|
; AVX1-NEXT: sarq $63, %rcx
|
|
|
|
; AVX1-NEXT: vpinsrw $4, %ecx, %xmm1, %xmm1
|
[X86] Improve shift combining
This folds (ashr (shl a, [56,48,32,24,16]), SarConst)
into (shl, (sext (a), [56,48,32,24,16] - SarConst))
or into (lshr, (sext (a), SarConst - [56,48,32,24,16]))
depending on sign of (SarConst - [56,48,32,24,16])
sexts in X86 are MOVs.
The MOVs have the same code size as above SHIFTs (only SHIFT by 1 has lower code size).
However the MOVs have 2 advantages to SHIFTs on x86:
1. MOVs can write to a register that differs from source.
2. MOVs accept memory operands.
This fixes PR24373.
Patch by: evgeny.v.stupachenko@intel.com
Differential Revision: http://reviews.llvm.org/D13161
llvm-svn: 255761
2015-12-16 19:22:37 +08:00
|
|
|
; AVX1-NEXT: shlq $58, %rdx
|
|
|
|
; AVX1-NEXT: sarq $63, %rdx
|
|
|
|
; AVX1-NEXT: vpinsrw $5, %edx, %xmm1, %xmm1
|
|
|
|
; AVX1-NEXT: shlq $57, %rsi
|
|
|
|
; AVX1-NEXT: sarq $63, %rsi
|
|
|
|
; AVX1-NEXT: vpinsrw $6, %esi, %xmm1, %xmm1
|
|
|
|
; AVX1-NEXT: shrq $7, %rbp
|
|
|
|
; AVX1-NEXT: vpinsrw $7, %ebp, %xmm1, %xmm1
|
2015-10-30 06:19:21 +08:00
|
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
|
[X86] Improve shift combining
This folds (ashr (shl a, [56,48,32,24,16]), SarConst)
into (shl, (sext (a), [56,48,32,24,16] - SarConst))
or into (lshr, (sext (a), SarConst - [56,48,32,24,16]))
depending on sign of (SarConst - [56,48,32,24,16])
sexts in X86 are MOVs.
The MOVs have the same code size as above SHIFTs (only SHIFT by 1 has lower code size).
However the MOVs have 2 advantages to SHIFTs on x86:
1. MOVs can write to a register that differs from source.
2. MOVs accept memory operands.
This fixes PR24373.
Patch by: evgeny.v.stupachenko@intel.com
Differential Revision: http://reviews.llvm.org/D13161
llvm-svn: 255761
2015-12-16 19:22:37 +08:00
|
|
|
; AVX1-NEXT: popq %rbx
|
|
|
|
; AVX1-NEXT: popq %r12
|
|
|
|
; AVX1-NEXT: popq %r13
|
|
|
|
; AVX1-NEXT: popq %r14
|
|
|
|
; AVX1-NEXT: popq %r15
|
|
|
|
; AVX1-NEXT: popq %rbp
|
2015-10-30 06:19:21 +08:00
|
|
|
; AVX1-NEXT: retq
|
|
|
|
;
|
|
|
|
; AVX2-LABEL: load_sext_16i1_to_16i16:
|
|
|
|
; AVX2: # BB#0: # %entry
|
[X86] Improve shift combining
This folds (ashr (shl a, [56,48,32,24,16]), SarConst)
into (shl, (sext (a), [56,48,32,24,16] - SarConst))
or into (lshr, (sext (a), SarConst - [56,48,32,24,16]))
depending on sign of (SarConst - [56,48,32,24,16])
sexts in X86 are MOVs.
The MOVs have the same code size as above SHIFTs (only SHIFT by 1 has lower code size).
However the MOVs have 2 advantages to SHIFTs on x86:
1. MOVs can write to a register that differs from source.
2. MOVs accept memory operands.
This fixes PR24373.
Patch by: evgeny.v.stupachenko@intel.com
Differential Revision: http://reviews.llvm.org/D13161
llvm-svn: 255761
2015-12-16 19:22:37 +08:00
|
|
|
; AVX2-NEXT: pushq %rbp
|
|
|
|
; AVX2-NEXT: .Ltmp0:
|
|
|
|
; AVX2-NEXT: .cfi_def_cfa_offset 16
|
|
|
|
; AVX2-NEXT: pushq %r15
|
|
|
|
; AVX2-NEXT: .Ltmp1:
|
|
|
|
; AVX2-NEXT: .cfi_def_cfa_offset 24
|
|
|
|
; AVX2-NEXT: pushq %r14
|
|
|
|
; AVX2-NEXT: .Ltmp2:
|
|
|
|
; AVX2-NEXT: .cfi_def_cfa_offset 32
|
|
|
|
; AVX2-NEXT: pushq %r13
|
|
|
|
; AVX2-NEXT: .Ltmp3:
|
|
|
|
; AVX2-NEXT: .cfi_def_cfa_offset 40
|
|
|
|
; AVX2-NEXT: pushq %r12
|
|
|
|
; AVX2-NEXT: .Ltmp4:
|
|
|
|
; AVX2-NEXT: .cfi_def_cfa_offset 48
|
|
|
|
; AVX2-NEXT: pushq %rbx
|
|
|
|
; AVX2-NEXT: .Ltmp5:
|
|
|
|
; AVX2-NEXT: .cfi_def_cfa_offset 56
|
|
|
|
; AVX2-NEXT: .Ltmp6:
|
|
|
|
; AVX2-NEXT: .cfi_offset %rbx, -56
|
|
|
|
; AVX2-NEXT: .Ltmp7:
|
|
|
|
; AVX2-NEXT: .cfi_offset %r12, -48
|
|
|
|
; AVX2-NEXT: .Ltmp8:
|
|
|
|
; AVX2-NEXT: .cfi_offset %r13, -40
|
|
|
|
; AVX2-NEXT: .Ltmp9:
|
|
|
|
; AVX2-NEXT: .cfi_offset %r14, -32
|
|
|
|
; AVX2-NEXT: .Ltmp10:
|
|
|
|
; AVX2-NEXT: .cfi_offset %r15, -24
|
|
|
|
; AVX2-NEXT: .Ltmp11:
|
|
|
|
; AVX2-NEXT: .cfi_offset %rbp, -16
|
|
|
|
; AVX2-NEXT: movswq (%rdi), %rax
|
2015-10-30 06:19:21 +08:00
|
|
|
; AVX2-NEXT: movq %rax, %rcx
|
[X86] Improve shift combining
This folds (ashr (shl a, [56,48,32,24,16]), SarConst)
into (shl, (sext (a), [56,48,32,24,16] - SarConst))
or into (lshr, (sext (a), SarConst - [56,48,32,24,16]))
depending on sign of (SarConst - [56,48,32,24,16])
sexts in X86 are MOVs.
The MOVs have the same code size as above SHIFTs (only SHIFT by 1 has lower code size).
However the MOVs have 2 advantages to SHIFTs on x86:
1. MOVs can write to a register that differs from source.
2. MOVs accept memory operands.
This fixes PR24373.
Patch by: evgeny.v.stupachenko@intel.com
Differential Revision: http://reviews.llvm.org/D13161
llvm-svn: 255761
2015-12-16 19:22:37 +08:00
|
|
|
; AVX2-NEXT: shlq $55, %rcx
|
2015-10-30 06:19:21 +08:00
|
|
|
; AVX2-NEXT: sarq $63, %rcx
|
[X86] Improve shift combining
This folds (ashr (shl a, [56,48,32,24,16]), SarConst)
into (shl, (sext (a), [56,48,32,24,16] - SarConst))
or into (lshr, (sext (a), SarConst - [56,48,32,24,16]))
depending on sign of (SarConst - [56,48,32,24,16])
sexts in X86 are MOVs.
The MOVs have the same code size as above SHIFTs (only SHIFT by 1 has lower code size).
However the MOVs have 2 advantages to SHIFTs on x86:
1. MOVs can write to a register that differs from source.
2. MOVs accept memory operands.
This fixes PR24373.
Patch by: evgeny.v.stupachenko@intel.com
Differential Revision: http://reviews.llvm.org/D13161
llvm-svn: 255761
2015-12-16 19:22:37 +08:00
|
|
|
; AVX2-NEXT: vmovd %ecx, %xmm0
|
|
|
|
; AVX2-NEXT: movq %rax, %r8
|
|
|
|
; AVX2-NEXT: movq %rax, %r10
|
|
|
|
; AVX2-NEXT: movq %rax, %r11
|
|
|
|
; AVX2-NEXT: movq %rax, %r14
|
|
|
|
; AVX2-NEXT: movq %rax, %r15
|
|
|
|
; AVX2-NEXT: movq %rax, %r9
|
|
|
|
; AVX2-NEXT: movq %rax, %r12
|
|
|
|
; AVX2-NEXT: movq %rax, %r13
|
|
|
|
; AVX2-NEXT: movq %rax, %rbx
|
|
|
|
; AVX2-NEXT: movq %rax, %rdi
|
2015-10-30 06:19:21 +08:00
|
|
|
; AVX2-NEXT: movq %rax, %rcx
|
|
|
|
; AVX2-NEXT: movq %rax, %rdx
|
[X86] Improve shift combining
This folds (ashr (shl a, [56,48,32,24,16]), SarConst)
into (shl, (sext (a), [56,48,32,24,16] - SarConst))
or into (lshr, (sext (a), SarConst - [56,48,32,24,16]))
depending on sign of (SarConst - [56,48,32,24,16])
sexts in X86 are MOVs.
The MOVs have the same code size as above SHIFTs (only SHIFT by 1 has lower code size).
However the MOVs have 2 advantages to SHIFTs on x86:
1. MOVs can write to a register that differs from source.
2. MOVs accept memory operands.
This fixes PR24373.
Patch by: evgeny.v.stupachenko@intel.com
Differential Revision: http://reviews.llvm.org/D13161
llvm-svn: 255761
2015-12-16 19:22:37 +08:00
|
|
|
; AVX2-NEXT: movq %rax, %rsi
|
|
|
|
; AVX2-NEXT: movsbq %al, %rbp
|
|
|
|
; AVX2-NEXT: shlq $54, %rax
|
|
|
|
; AVX2-NEXT: sarq $63, %rax
|
|
|
|
; AVX2-NEXT: vpinsrw $1, %eax, %xmm0, %xmm0
|
|
|
|
; AVX2-NEXT: shlq $53, %r8
|
|
|
|
; AVX2-NEXT: sarq $63, %r8
|
|
|
|
; AVX2-NEXT: vpinsrw $2, %r8d, %xmm0, %xmm0
|
|
|
|
; AVX2-NEXT: shlq $52, %r10
|
|
|
|
; AVX2-NEXT: sarq $63, %r10
|
|
|
|
; AVX2-NEXT: vpinsrw $3, %r10d, %xmm0, %xmm0
|
|
|
|
; AVX2-NEXT: shlq $51, %r11
|
|
|
|
; AVX2-NEXT: sarq $63, %r11
|
|
|
|
; AVX2-NEXT: vpinsrw $4, %r11d, %xmm0, %xmm0
|
|
|
|
; AVX2-NEXT: shlq $50, %r14
|
|
|
|
; AVX2-NEXT: sarq $63, %r14
|
|
|
|
; AVX2-NEXT: vpinsrw $5, %r14d, %xmm0, %xmm0
|
|
|
|
; AVX2-NEXT: shlq $49, %r15
|
|
|
|
; AVX2-NEXT: sarq $63, %r15
|
|
|
|
; AVX2-NEXT: vpinsrw $6, %r15d, %xmm0, %xmm0
|
|
|
|
; AVX2-NEXT: shrq $15, %r9
|
|
|
|
; AVX2-NEXT: vpinsrw $7, %r9d, %xmm0, %xmm0
|
|
|
|
; AVX2-NEXT: shlq $63, %r13
|
|
|
|
; AVX2-NEXT: sarq $63, %r13
|
|
|
|
; AVX2-NEXT: vmovd %r13d, %xmm1
|
|
|
|
; AVX2-NEXT: shlq $62, %r12
|
|
|
|
; AVX2-NEXT: sarq $63, %r12
|
|
|
|
; AVX2-NEXT: vpinsrw $1, %r12d, %xmm1, %xmm1
|
|
|
|
; AVX2-NEXT: shlq $61, %rbx
|
|
|
|
; AVX2-NEXT: sarq $63, %rbx
|
|
|
|
; AVX2-NEXT: vpinsrw $2, %ebx, %xmm1, %xmm1
|
|
|
|
; AVX2-NEXT: shlq $60, %rdi
|
|
|
|
; AVX2-NEXT: sarq $63, %rdi
|
|
|
|
; AVX2-NEXT: vpinsrw $3, %edi, %xmm1, %xmm1
|
2015-10-30 06:19:21 +08:00
|
|
|
; AVX2-NEXT: shlq $59, %rcx
|
|
|
|
; AVX2-NEXT: sarq $63, %rcx
|
|
|
|
; AVX2-NEXT: vpinsrw $4, %ecx, %xmm1, %xmm1
|
[X86] Improve shift combining
This folds (ashr (shl a, [56,48,32,24,16]), SarConst)
into (shl, (sext (a), [56,48,32,24,16] - SarConst))
or into (lshr, (sext (a), SarConst - [56,48,32,24,16]))
depending on sign of (SarConst - [56,48,32,24,16])
sexts in X86 are MOVs.
The MOVs have the same code size as above SHIFTs (only SHIFT by 1 has lower code size).
However the MOVs have 2 advantages to SHIFTs on x86:
1. MOVs can write to a register that differs from source.
2. MOVs accept memory operands.
This fixes PR24373.
Patch by: evgeny.v.stupachenko@intel.com
Differential Revision: http://reviews.llvm.org/D13161
llvm-svn: 255761
2015-12-16 19:22:37 +08:00
|
|
|
; AVX2-NEXT: shlq $58, %rdx
|
|
|
|
; AVX2-NEXT: sarq $63, %rdx
|
|
|
|
; AVX2-NEXT: vpinsrw $5, %edx, %xmm1, %xmm1
|
|
|
|
; AVX2-NEXT: shlq $57, %rsi
|
|
|
|
; AVX2-NEXT: sarq $63, %rsi
|
|
|
|
; AVX2-NEXT: vpinsrw $6, %esi, %xmm1, %xmm1
|
|
|
|
; AVX2-NEXT: shrq $7, %rbp
|
|
|
|
; AVX2-NEXT: vpinsrw $7, %ebp, %xmm1, %xmm1
|
2015-10-30 06:19:21 +08:00
|
|
|
; AVX2-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0
|
[X86] Improve shift combining
This folds (ashr (shl a, [56,48,32,24,16]), SarConst)
into (shl, (sext (a), [56,48,32,24,16] - SarConst))
or into (lshr, (sext (a), SarConst - [56,48,32,24,16]))
depending on sign of (SarConst - [56,48,32,24,16])
sexts in X86 are MOVs.
The MOVs have the same code size as above SHIFTs (only SHIFT by 1 has lower code size).
However the MOVs have 2 advantages to SHIFTs on x86:
1. MOVs can write to a register that differs from source.
2. MOVs accept memory operands.
This fixes PR24373.
Patch by: evgeny.v.stupachenko@intel.com
Differential Revision: http://reviews.llvm.org/D13161
llvm-svn: 255761
2015-12-16 19:22:37 +08:00
|
|
|
; AVX2-NEXT: popq %rbx
|
|
|
|
; AVX2-NEXT: popq %r12
|
|
|
|
; AVX2-NEXT: popq %r13
|
|
|
|
; AVX2-NEXT: popq %r14
|
|
|
|
; AVX2-NEXT: popq %r15
|
|
|
|
; AVX2-NEXT: popq %rbp
|
2015-10-30 06:19:21 +08:00
|
|
|
; AVX2-NEXT: retq
|
|
|
|
;
|
|
|
|
; X32-SSE41-LABEL: load_sext_16i1_to_16i16:
|
|
|
|
; X32-SSE41: # BB#0: # %entry
|
|
|
|
; X32-SSE41-NEXT: movl {{[0-9]+}}(%esp), %eax
|
|
|
|
; X32-SSE41-NEXT: movzwl (%eax), %eax
|
|
|
|
; X32-SSE41-NEXT: movl %eax, %ecx
|
|
|
|
; X32-SSE41-NEXT: shrl %ecx
|
|
|
|
; X32-SSE41-NEXT: andl $1, %ecx
|
|
|
|
; X32-SSE41-NEXT: movl %eax, %edx
|
|
|
|
; X32-SSE41-NEXT: andl $1, %edx
|
|
|
|
; X32-SSE41-NEXT: movd %edx, %xmm1
|
|
|
|
; X32-SSE41-NEXT: pinsrb $1, %ecx, %xmm1
|
|
|
|
; X32-SSE41-NEXT: movl %eax, %ecx
|
|
|
|
; X32-SSE41-NEXT: shrl $2, %ecx
|
|
|
|
; X32-SSE41-NEXT: andl $1, %ecx
|
|
|
|
; X32-SSE41-NEXT: pinsrb $2, %ecx, %xmm1
|
|
|
|
; X32-SSE41-NEXT: movl %eax, %ecx
|
|
|
|
; X32-SSE41-NEXT: shrl $3, %ecx
|
|
|
|
; X32-SSE41-NEXT: andl $1, %ecx
|
|
|
|
; X32-SSE41-NEXT: pinsrb $3, %ecx, %xmm1
|
|
|
|
; X32-SSE41-NEXT: movl %eax, %ecx
|
|
|
|
; X32-SSE41-NEXT: shrl $4, %ecx
|
|
|
|
; X32-SSE41-NEXT: andl $1, %ecx
|
|
|
|
; X32-SSE41-NEXT: pinsrb $4, %ecx, %xmm1
|
|
|
|
; X32-SSE41-NEXT: movl %eax, %ecx
|
|
|
|
; X32-SSE41-NEXT: shrl $5, %ecx
|
|
|
|
; X32-SSE41-NEXT: andl $1, %ecx
|
|
|
|
; X32-SSE41-NEXT: pinsrb $5, %ecx, %xmm1
|
|
|
|
; X32-SSE41-NEXT: movl %eax, %ecx
|
|
|
|
; X32-SSE41-NEXT: shrl $6, %ecx
|
|
|
|
; X32-SSE41-NEXT: andl $1, %ecx
|
|
|
|
; X32-SSE41-NEXT: pinsrb $6, %ecx, %xmm1
|
|
|
|
; X32-SSE41-NEXT: movl %eax, %ecx
|
|
|
|
; X32-SSE41-NEXT: shrl $7, %ecx
|
|
|
|
; X32-SSE41-NEXT: andl $1, %ecx
|
|
|
|
; X32-SSE41-NEXT: pinsrb $7, %ecx, %xmm1
|
|
|
|
; X32-SSE41-NEXT: movl %eax, %ecx
|
|
|
|
; X32-SSE41-NEXT: shrl $8, %ecx
|
|
|
|
; X32-SSE41-NEXT: andl $1, %ecx
|
|
|
|
; X32-SSE41-NEXT: pinsrb $8, %ecx, %xmm1
|
|
|
|
; X32-SSE41-NEXT: movl %eax, %ecx
|
|
|
|
; X32-SSE41-NEXT: shrl $9, %ecx
|
|
|
|
; X32-SSE41-NEXT: andl $1, %ecx
|
|
|
|
; X32-SSE41-NEXT: pinsrb $9, %ecx, %xmm1
|
|
|
|
; X32-SSE41-NEXT: movl %eax, %ecx
|
|
|
|
; X32-SSE41-NEXT: shrl $10, %ecx
|
|
|
|
; X32-SSE41-NEXT: andl $1, %ecx
|
|
|
|
; X32-SSE41-NEXT: pinsrb $10, %ecx, %xmm1
|
|
|
|
; X32-SSE41-NEXT: movl %eax, %ecx
|
|
|
|
; X32-SSE41-NEXT: shrl $11, %ecx
|
|
|
|
; X32-SSE41-NEXT: andl $1, %ecx
|
|
|
|
; X32-SSE41-NEXT: pinsrb $11, %ecx, %xmm1
|
|
|
|
; X32-SSE41-NEXT: movl %eax, %ecx
|
|
|
|
; X32-SSE41-NEXT: shrl $12, %ecx
|
|
|
|
; X32-SSE41-NEXT: andl $1, %ecx
|
|
|
|
; X32-SSE41-NEXT: pinsrb $12, %ecx, %xmm1
|
|
|
|
; X32-SSE41-NEXT: movl %eax, %ecx
|
|
|
|
; X32-SSE41-NEXT: shrl $13, %ecx
|
|
|
|
; X32-SSE41-NEXT: andl $1, %ecx
|
|
|
|
; X32-SSE41-NEXT: pinsrb $13, %ecx, %xmm1
|
|
|
|
; X32-SSE41-NEXT: movl %eax, %ecx
|
|
|
|
; X32-SSE41-NEXT: shrl $14, %ecx
|
|
|
|
; X32-SSE41-NEXT: andl $1, %ecx
|
|
|
|
; X32-SSE41-NEXT: pinsrb $14, %ecx, %xmm1
|
|
|
|
; X32-SSE41-NEXT: shrl $15, %eax
|
|
|
|
; X32-SSE41-NEXT: pinsrb $15, %eax, %xmm1
|
|
|
|
; X32-SSE41-NEXT: pmovzxbw {{.*#+}} xmm0 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero
|
|
|
|
; X32-SSE41-NEXT: psllw $15, %xmm0
|
|
|
|
; X32-SSE41-NEXT: psraw $15, %xmm0
|
|
|
|
; X32-SSE41-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
|
|
|
|
; X32-SSE41-NEXT: psllw $15, %xmm1
|
|
|
|
; X32-SSE41-NEXT: psraw $15, %xmm1
|
|
|
|
; X32-SSE41-NEXT: retl
|
|
|
|
entry:
|
|
|
|
%X = load <16 x i1>, <16 x i1>* %ptr
|
|
|
|
%Y = sext <16 x i1> %X to <16 x i16>
|
|
|
|
ret <16 x i16> %Y
|
|
|
|
}
|
|
|
|
|
[X86] Improve shift combining
This folds (ashr (shl a, [56,48,32,24,16]), SarConst)
into (shl, (sext (a), [56,48,32,24,16] - SarConst))
or into (lshr, (sext (a), SarConst - [56,48,32,24,16]))
depending on sign of (SarConst - [56,48,32,24,16])
sexts in X86 are MOVs.
The MOVs have the same code size as above SHIFTs (only SHIFT by 1 has lower code size).
However the MOVs have 2 advantages to SHIFTs on x86:
1. MOVs can write to a register that differs from source.
2. MOVs accept memory operands.
This fixes PR24373.
Patch by: evgeny.v.stupachenko@intel.com
Differential Revision: http://reviews.llvm.org/D13161
llvm-svn: 255761
2015-12-16 19:22:37 +08:00
|
|
|
define <32 x i8> @load_sext_32i1_to_32i8(<32 x i1> *%ptr) nounwind readnone {
|
2015-10-30 06:19:21 +08:00
|
|
|
; SSE2-LABEL: load_sext_32i1_to_32i8:
|
|
|
|
; SSE2: # BB#0: # %entry
|
[X86] Improve shift combining
This folds (ashr (shl a, [56,48,32,24,16]), SarConst)
into (shl, (sext (a), [56,48,32,24,16] - SarConst))
or into (lshr, (sext (a), SarConst - [56,48,32,24,16]))
depending on sign of (SarConst - [56,48,32,24,16])
sexts in X86 are MOVs.
The MOVs have the same code size as above SHIFTs (only SHIFT by 1 has lower code size).
However the MOVs have 2 advantages to SHIFTs on x86:
1. MOVs can write to a register that differs from source.
2. MOVs accept memory operands.
This fixes PR24373.
Patch by: evgeny.v.stupachenko@intel.com
Differential Revision: http://reviews.llvm.org/D13161
llvm-svn: 255761
2015-12-16 19:22:37 +08:00
|
|
|
; SSE2-NEXT: pushq %rbp
|
|
|
|
; SSE2-NEXT: pushq %r15
|
|
|
|
; SSE2-NEXT: pushq %r14
|
|
|
|
; SSE2-NEXT: pushq %r13
|
|
|
|
; SSE2-NEXT: pushq %r12
|
|
|
|
; SSE2-NEXT: pushq %rbx
|
|
|
|
; SSE2-NEXT: movswq (%rdi), %rbx
|
|
|
|
; SSE2-NEXT: movq %rbx, %r10
|
|
|
|
; SSE2-NEXT: movq %rbx, %r8
|
|
|
|
; SSE2-NEXT: movq %rbx, %r9
|
|
|
|
; SSE2-NEXT: movq %rbx, %r11
|
|
|
|
; SSE2-NEXT: movq %rbx, %r14
|
|
|
|
; SSE2-NEXT: movq %rbx, %r15
|
|
|
|
; SSE2-NEXT: movq %rbx, %r12
|
|
|
|
; SSE2-NEXT: movq %rbx, %r13
|
|
|
|
; SSE2-NEXT: movq %rbx, %rdx
|
|
|
|
; SSE2-NEXT: movq %rbx, %rsi
|
|
|
|
; SSE2-NEXT: movq %rbx, %rcx
|
|
|
|
; SSE2-NEXT: movq %rbx, %rbp
|
|
|
|
; SSE2-NEXT: movq %rbx, %rax
|
|
|
|
; SSE2-NEXT: shlq $49, %rax
|
|
|
|
; SSE2-NEXT: sarq $63, %rax
|
|
|
|
; SSE2-NEXT: movd %eax, %xmm0
|
|
|
|
; SSE2-NEXT: movq %rbx, %rax
|
|
|
|
; SSE2-NEXT: shlq $57, %r10
|
|
|
|
; SSE2-NEXT: sarq $63, %r10
|
|
|
|
; SSE2-NEXT: movd %r10d, %xmm15
|
|
|
|
; SSE2-NEXT: movq %rbx, %r10
|
|
|
|
; SSE2-NEXT: movsbq %bl, %rbx
|
|
|
|
; SSE2-NEXT: punpcklbw {{.*#+}} xmm15 = xmm15[0],xmm0[0],xmm15[1],xmm0[1],xmm15[2],xmm0[2],xmm15[3],xmm0[3],xmm15[4],xmm0[4],xmm15[5],xmm0[5],xmm15[6],xmm0[6],xmm15[7],xmm0[7]
|
|
|
|
; SSE2-NEXT: shlq $53, %r8
|
|
|
|
; SSE2-NEXT: sarq $63, %r8
|
|
|
|
; SSE2-NEXT: movd %r8d, %xmm8
|
|
|
|
; SSE2-NEXT: shlq $61, %r9
|
|
|
|
; SSE2-NEXT: sarq $63, %r9
|
|
|
|
; SSE2-NEXT: movd %r9d, %xmm2
|
|
|
|
; SSE2-NEXT: shlq $51, %r11
|
|
|
|
; SSE2-NEXT: sarq $63, %r11
|
|
|
|
; SSE2-NEXT: movd %r11d, %xmm9
|
|
|
|
; SSE2-NEXT: shlq $59, %r14
|
|
|
|
; SSE2-NEXT: sarq $63, %r14
|
|
|
|
; SSE2-NEXT: movd %r14d, %xmm5
|
|
|
|
; SSE2-NEXT: shlq $55, %r15
|
|
|
|
; SSE2-NEXT: sarq $63, %r15
|
|
|
|
; SSE2-NEXT: movd %r15d, %xmm10
|
|
|
|
; SSE2-NEXT: shlq $63, %r12
|
|
|
|
; SSE2-NEXT: sarq $63, %r12
|
|
|
|
; SSE2-NEXT: movd %r12d, %xmm0
|
|
|
|
; SSE2-NEXT: shlq $50, %r13
|
|
|
|
; SSE2-NEXT: sarq $63, %r13
|
|
|
|
; SSE2-NEXT: movd %r13d, %xmm11
|
|
|
|
; SSE2-NEXT: shlq $58, %rdx
|
|
|
|
; SSE2-NEXT: sarq $63, %rdx
|
|
|
|
; SSE2-NEXT: movd %edx, %xmm4
|
|
|
|
; SSE2-NEXT: shlq $54, %rsi
|
|
|
|
; SSE2-NEXT: sarq $63, %rsi
|
|
|
|
; SSE2-NEXT: movd %esi, %xmm12
|
|
|
|
; SSE2-NEXT: shlq $62, %rcx
|
2015-10-30 06:19:21 +08:00
|
|
|
; SSE2-NEXT: sarq $63, %rcx
|
[X86] Improve shift combining
This folds (ashr (shl a, [56,48,32,24,16]), SarConst)
into (shl, (sext (a), [56,48,32,24,16] - SarConst))
or into (lshr, (sext (a), SarConst - [56,48,32,24,16]))
depending on sign of (SarConst - [56,48,32,24,16])
sexts in X86 are MOVs.
The MOVs have the same code size as above SHIFTs (only SHIFT by 1 has lower code size).
However the MOVs have 2 advantages to SHIFTs on x86:
1. MOVs can write to a register that differs from source.
2. MOVs accept memory operands.
This fixes PR24373.
Patch by: evgeny.v.stupachenko@intel.com
Differential Revision: http://reviews.llvm.org/D13161
llvm-svn: 255761
2015-12-16 19:22:37 +08:00
|
|
|
; SSE2-NEXT: movd %ecx, %xmm6
|
|
|
|
; SSE2-NEXT: shlq $52, %rbp
|
|
|
|
; SSE2-NEXT: sarq $63, %rbp
|
|
|
|
; SSE2-NEXT: movd %ebp, %xmm13
|
|
|
|
; SSE2-NEXT: shlq $60, %rax
|
2015-10-30 06:19:21 +08:00
|
|
|
; SSE2-NEXT: sarq $63, %rax
|
[X86] Improve shift combining
This folds (ashr (shl a, [56,48,32,24,16]), SarConst)
into (shl, (sext (a), [56,48,32,24,16] - SarConst))
or into (lshr, (sext (a), SarConst - [56,48,32,24,16]))
depending on sign of (SarConst - [56,48,32,24,16])
sexts in X86 are MOVs.
The MOVs have the same code size as above SHIFTs (only SHIFT by 1 has lower code size).
However the MOVs have 2 advantages to SHIFTs on x86:
1. MOVs can write to a register that differs from source.
2. MOVs accept memory operands.
This fixes PR24373.
Patch by: evgeny.v.stupachenko@intel.com
Differential Revision: http://reviews.llvm.org/D13161
llvm-svn: 255761
2015-12-16 19:22:37 +08:00
|
|
|
; SSE2-NEXT: movd %eax, %xmm7
|
|
|
|
; SSE2-NEXT: shrq $15, %r10
|
|
|
|
; SSE2-NEXT: movd %r10d, %xmm14
|
|
|
|
; SSE2-NEXT: shrq $7, %rbx
|
|
|
|
; SSE2-NEXT: movd %ebx, %xmm3
|
|
|
|
; SSE2-NEXT: movswq 2(%rdi), %rdx
|
|
|
|
; SSE2-NEXT: movq %rdx, %r8
|
|
|
|
; SSE2-NEXT: movq %rdx, %r9
|
|
|
|
; SSE2-NEXT: movq %rdx, %r10
|
|
|
|
; SSE2-NEXT: movq %rdx, %r11
|
|
|
|
; SSE2-NEXT: movq %rdx, %r14
|
|
|
|
; SSE2-NEXT: movq %rdx, %r15
|
|
|
|
; SSE2-NEXT: movq %rdx, %r12
|
|
|
|
; SSE2-NEXT: movq %rdx, %r13
|
|
|
|
; SSE2-NEXT: movq %rdx, %rbx
|
|
|
|
; SSE2-NEXT: movq %rdx, %rax
|
|
|
|
; SSE2-NEXT: movq %rdx, %rcx
|
|
|
|
; SSE2-NEXT: movq %rdx, %rsi
|
|
|
|
; SSE2-NEXT: movq %rdx, %rdi
|
|
|
|
; SSE2-NEXT: movq %rdx, %rbp
|
|
|
|
; SSE2-NEXT: shlq $49, %rbp
|
|
|
|
; SSE2-NEXT: sarq $63, %rbp
|
|
|
|
; SSE2-NEXT: movd %ebp, %xmm1
|
|
|
|
; SSE2-NEXT: movq %rdx, %rbp
|
|
|
|
; SSE2-NEXT: movsbq %dl, %rdx
|
|
|
|
; SSE2-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm8[0],xmm2[1],xmm8[1],xmm2[2],xmm8[2],xmm2[3],xmm8[3],xmm2[4],xmm8[4],xmm2[5],xmm8[5],xmm2[6],xmm8[6],xmm2[7],xmm8[7]
|
|
|
|
; SSE2-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm15[0],xmm2[1],xmm15[1],xmm2[2],xmm15[2],xmm2[3],xmm15[3],xmm2[4],xmm15[4],xmm2[5],xmm15[5],xmm2[6],xmm15[6],xmm2[7],xmm15[7]
|
|
|
|
; SSE2-NEXT: punpcklbw {{.*#+}} xmm5 = xmm5[0],xmm9[0],xmm5[1],xmm9[1],xmm5[2],xmm9[2],xmm5[3],xmm9[3],xmm5[4],xmm9[4],xmm5[5],xmm9[5],xmm5[6],xmm9[6],xmm5[7],xmm9[7]
|
|
|
|
; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm10[0],xmm0[1],xmm10[1],xmm0[2],xmm10[2],xmm0[3],xmm10[3],xmm0[4],xmm10[4],xmm0[5],xmm10[5],xmm0[6],xmm10[6],xmm0[7],xmm10[7]
|
|
|
|
; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm5[0],xmm0[1],xmm5[1],xmm0[2],xmm5[2],xmm0[3],xmm5[3],xmm0[4],xmm5[4],xmm0[5],xmm5[5],xmm0[6],xmm5[6],xmm0[7],xmm5[7]
|
2015-10-30 06:19:21 +08:00
|
|
|
; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7]
|
[X86] Improve shift combining
This folds (ashr (shl a, [56,48,32,24,16]), SarConst)
into (shl, (sext (a), [56,48,32,24,16] - SarConst))
or into (lshr, (sext (a), SarConst - [56,48,32,24,16]))
depending on sign of (SarConst - [56,48,32,24,16])
sexts in X86 are MOVs.
The MOVs have the same code size as above SHIFTs (only SHIFT by 1 has lower code size).
However the MOVs have 2 advantages to SHIFTs on x86:
1. MOVs can write to a register that differs from source.
2. MOVs accept memory operands.
This fixes PR24373.
Patch by: evgeny.v.stupachenko@intel.com
Differential Revision: http://reviews.llvm.org/D13161
llvm-svn: 255761
2015-12-16 19:22:37 +08:00
|
|
|
; SSE2-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm11[0],xmm4[1],xmm11[1],xmm4[2],xmm11[2],xmm4[3],xmm11[3],xmm4[4],xmm11[4],xmm4[5],xmm11[5],xmm4[6],xmm11[6],xmm4[7],xmm11[7]
|
|
|
|
; SSE2-NEXT: punpcklbw {{.*#+}} xmm6 = xmm6[0],xmm12[0],xmm6[1],xmm12[1],xmm6[2],xmm12[2],xmm6[3],xmm12[3],xmm6[4],xmm12[4],xmm6[5],xmm12[5],xmm6[6],xmm12[6],xmm6[7],xmm12[7]
|
|
|
|
; SSE2-NEXT: punpcklbw {{.*#+}} xmm6 = xmm6[0],xmm4[0],xmm6[1],xmm4[1],xmm6[2],xmm4[2],xmm6[3],xmm4[3],xmm6[4],xmm4[4],xmm6[5],xmm4[5],xmm6[6],xmm4[6],xmm6[7],xmm4[7]
|
|
|
|
; SSE2-NEXT: punpcklbw {{.*#+}} xmm7 = xmm7[0],xmm13[0],xmm7[1],xmm13[1],xmm7[2],xmm13[2],xmm7[3],xmm13[3],xmm7[4],xmm13[4],xmm7[5],xmm13[5],xmm7[6],xmm13[6],xmm7[7],xmm13[7]
|
|
|
|
; SSE2-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm14[0],xmm3[1],xmm14[1],xmm3[2],xmm14[2],xmm3[3],xmm14[3],xmm3[4],xmm14[4],xmm3[5],xmm14[5],xmm3[6],xmm14[6],xmm3[7],xmm14[7]
|
|
|
|
; SSE2-NEXT: shlq $57, %r8
|
|
|
|
; SSE2-NEXT: sarq $63, %r8
|
|
|
|
; SSE2-NEXT: movd %r8d, %xmm2
|
|
|
|
; SSE2-NEXT: punpcklbw {{.*#+}} xmm7 = xmm7[0],xmm3[0],xmm7[1],xmm3[1],xmm7[2],xmm3[2],xmm7[3],xmm3[3],xmm7[4],xmm3[4],xmm7[5],xmm3[5],xmm7[6],xmm3[6],xmm7[7],xmm3[7]
|
|
|
|
; SSE2-NEXT: shlq $53, %r9
|
|
|
|
; SSE2-NEXT: sarq $63, %r9
|
|
|
|
; SSE2-NEXT: movd %r9d, %xmm3
|
|
|
|
; SSE2-NEXT: punpcklbw {{.*#+}} xmm6 = xmm6[0],xmm7[0],xmm6[1],xmm7[1],xmm6[2],xmm7[2],xmm6[3],xmm7[3],xmm6[4],xmm7[4],xmm6[5],xmm7[5],xmm6[6],xmm7[6],xmm6[7],xmm7[7]
|
|
|
|
; SSE2-NEXT: shlq $61, %r10
|
|
|
|
; SSE2-NEXT: sarq $63, %r10
|
|
|
|
; SSE2-NEXT: movd %r10d, %xmm4
|
|
|
|
; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm6[0],xmm0[1],xmm6[1],xmm0[2],xmm6[2],xmm0[3],xmm6[3],xmm0[4],xmm6[4],xmm0[5],xmm6[5],xmm0[6],xmm6[6],xmm0[7],xmm6[7]
|
|
|
|
; SSE2-NEXT: shlq $51, %r11
|
|
|
|
; SSE2-NEXT: sarq $63, %r11
|
|
|
|
; SSE2-NEXT: movd %r11d, %xmm5
|
2015-10-30 06:19:21 +08:00
|
|
|
; SSE2-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3],xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7]
|
[X86] Improve shift combining
This folds (ashr (shl a, [56,48,32,24,16]), SarConst)
into (shl, (sext (a), [56,48,32,24,16] - SarConst))
or into (lshr, (sext (a), SarConst - [56,48,32,24,16]))
depending on sign of (SarConst - [56,48,32,24,16])
sexts in X86 are MOVs.
The MOVs have the same code size as above SHIFTs (only SHIFT by 1 has lower code size).
However the MOVs have 2 advantages to SHIFTs on x86:
1. MOVs can write to a register that differs from source.
2. MOVs accept memory operands.
This fixes PR24373.
Patch by: evgeny.v.stupachenko@intel.com
Differential Revision: http://reviews.llvm.org/D13161
llvm-svn: 255761
2015-12-16 19:22:37 +08:00
|
|
|
; SSE2-NEXT: shlq $59, %r14
|
|
|
|
; SSE2-NEXT: sarq $63, %r14
|
|
|
|
; SSE2-NEXT: movd %r14d, %xmm6
|
|
|
|
; SSE2-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm3[0],xmm4[1],xmm3[1],xmm4[2],xmm3[2],xmm4[3],xmm3[3],xmm4[4],xmm3[4],xmm4[5],xmm3[5],xmm4[6],xmm3[6],xmm4[7],xmm3[7]
|
|
|
|
; SSE2-NEXT: shlq $55, %r15
|
|
|
|
; SSE2-NEXT: sarq $63, %r15
|
|
|
|
; SSE2-NEXT: movd %r15d, %xmm3
|
|
|
|
; SSE2-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm2[0],xmm4[1],xmm2[1],xmm4[2],xmm2[2],xmm4[3],xmm2[3],xmm4[4],xmm2[4],xmm4[5],xmm2[5],xmm4[6],xmm2[6],xmm4[7],xmm2[7]
|
|
|
|
; SSE2-NEXT: shlq $63, %r12
|
|
|
|
; SSE2-NEXT: sarq $63, %r12
|
|
|
|
; SSE2-NEXT: movd %r12d, %xmm1
|
|
|
|
; SSE2-NEXT: punpcklbw {{.*#+}} xmm6 = xmm6[0],xmm5[0],xmm6[1],xmm5[1],xmm6[2],xmm5[2],xmm6[3],xmm5[3],xmm6[4],xmm5[4],xmm6[5],xmm5[5],xmm6[6],xmm5[6],xmm6[7],xmm5[7]
|
|
|
|
; SSE2-NEXT: shlq $50, %r13
|
|
|
|
; SSE2-NEXT: sarq $63, %r13
|
|
|
|
; SSE2-NEXT: movd %r13d, %xmm2
|
2015-10-30 06:19:21 +08:00
|
|
|
; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1],xmm1[2],xmm3[2],xmm1[3],xmm3[3],xmm1[4],xmm3[4],xmm1[5],xmm3[5],xmm1[6],xmm3[6],xmm1[7],xmm3[7]
|
[X86] Improve shift combining
This folds (ashr (shl a, [56,48,32,24,16]), SarConst)
into (shl, (sext (a), [56,48,32,24,16] - SarConst))
or into (lshr, (sext (a), SarConst - [56,48,32,24,16]))
depending on sign of (SarConst - [56,48,32,24,16])
sexts in X86 are MOVs.
The MOVs have the same code size as above SHIFTs (only SHIFT by 1 has lower code size).
However the MOVs have 2 advantages to SHIFTs on x86:
1. MOVs can write to a register that differs from source.
2. MOVs accept memory operands.
This fixes PR24373.
Patch by: evgeny.v.stupachenko@intel.com
Differential Revision: http://reviews.llvm.org/D13161
llvm-svn: 255761
2015-12-16 19:22:37 +08:00
|
|
|
; SSE2-NEXT: shlq $58, %rbx
|
|
|
|
; SSE2-NEXT: sarq $63, %rbx
|
|
|
|
; SSE2-NEXT: movd %ebx, %xmm3
|
|
|
|
; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm6[0],xmm1[1],xmm6[1],xmm1[2],xmm6[2],xmm1[3],xmm6[3],xmm1[4],xmm6[4],xmm1[5],xmm6[5],xmm1[6],xmm6[6],xmm1[7],xmm6[7]
|
|
|
|
; SSE2-NEXT: shlq $54, %rax
|
|
|
|
; SSE2-NEXT: sarq $63, %rax
|
|
|
|
; SSE2-NEXT: movd %eax, %xmm5
|
|
|
|
; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1],xmm1[2],xmm4[2],xmm1[3],xmm4[3],xmm1[4],xmm4[4],xmm1[5],xmm4[5],xmm1[6],xmm4[6],xmm1[7],xmm4[7]
|
2015-10-30 06:19:21 +08:00
|
|
|
; SSE2-NEXT: shlq $62, %rcx
|
|
|
|
; SSE2-NEXT: sarq $63, %rcx
|
|
|
|
; SSE2-NEXT: movd %ecx, %xmm4
|
[X86] Improve shift combining
This folds (ashr (shl a, [56,48,32,24,16]), SarConst)
into (shl, (sext (a), [56,48,32,24,16] - SarConst))
or into (lshr, (sext (a), SarConst - [56,48,32,24,16]))
depending on sign of (SarConst - [56,48,32,24,16])
sexts in X86 are MOVs.
The MOVs have the same code size as above SHIFTs (only SHIFT by 1 has lower code size).
However the MOVs have 2 advantages to SHIFTs on x86:
1. MOVs can write to a register that differs from source.
2. MOVs accept memory operands.
This fixes PR24373.
Patch by: evgeny.v.stupachenko@intel.com
Differential Revision: http://reviews.llvm.org/D13161
llvm-svn: 255761
2015-12-16 19:22:37 +08:00
|
|
|
; SSE2-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3],xmm3[4],xmm2[4],xmm3[5],xmm2[5],xmm3[6],xmm2[6],xmm3[7],xmm2[7]
|
|
|
|
; SSE2-NEXT: shlq $52, %rsi
|
|
|
|
; SSE2-NEXT: sarq $63, %rsi
|
|
|
|
; SSE2-NEXT: movd %esi, %xmm2
|
|
|
|
; SSE2-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm5[0],xmm4[1],xmm5[1],xmm4[2],xmm5[2],xmm4[3],xmm5[3],xmm4[4],xmm5[4],xmm4[5],xmm5[5],xmm4[6],xmm5[6],xmm4[7],xmm5[7]
|
|
|
|
; SSE2-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm3[0],xmm4[1],xmm3[1],xmm4[2],xmm3[2],xmm4[3],xmm3[3],xmm4[4],xmm3[4],xmm4[5],xmm3[5],xmm4[6],xmm3[6],xmm4[7],xmm3[7]
|
|
|
|
; SSE2-NEXT: shlq $60, %rdi
|
|
|
|
; SSE2-NEXT: sarq $63, %rdi
|
|
|
|
; SSE2-NEXT: movd %edi, %xmm3
|
|
|
|
; SSE2-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3],xmm3[4],xmm2[4],xmm3[5],xmm2[5],xmm3[6],xmm2[6],xmm3[7],xmm2[7]
|
|
|
|
; SSE2-NEXT: shrq $15, %rbp
|
|
|
|
; SSE2-NEXT: movd %ebp, %xmm2
|
|
|
|
; SSE2-NEXT: shrq $7, %rdx
|
|
|
|
; SSE2-NEXT: movd %edx, %xmm5
|
|
|
|
; SSE2-NEXT: punpcklbw {{.*#+}} xmm5 = xmm5[0],xmm2[0],xmm5[1],xmm2[1],xmm5[2],xmm2[2],xmm5[3],xmm2[3],xmm5[4],xmm2[4],xmm5[5],xmm2[5],xmm5[6],xmm2[6],xmm5[7],xmm2[7]
|
|
|
|
; SSE2-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm5[0],xmm3[1],xmm5[1],xmm3[2],xmm5[2],xmm3[3],xmm5[3],xmm3[4],xmm5[4],xmm3[5],xmm5[5],xmm3[6],xmm5[6],xmm3[7],xmm5[7]
|
|
|
|
; SSE2-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm3[0],xmm4[1],xmm3[1],xmm4[2],xmm3[2],xmm4[3],xmm3[3],xmm4[4],xmm3[4],xmm4[5],xmm3[5],xmm4[6],xmm3[6],xmm4[7],xmm3[7]
|
2015-10-30 06:19:21 +08:00
|
|
|
; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1],xmm1[2],xmm4[2],xmm1[3],xmm4[3],xmm1[4],xmm4[4],xmm1[5],xmm4[5],xmm1[6],xmm4[6],xmm1[7],xmm4[7]
|
[X86] Improve shift combining
This folds (ashr (shl a, [56,48,32,24,16]), SarConst)
into (shl, (sext (a), [56,48,32,24,16] - SarConst))
or into (lshr, (sext (a), SarConst - [56,48,32,24,16]))
depending on sign of (SarConst - [56,48,32,24,16])
sexts in X86 are MOVs.
The MOVs have the same code size as above SHIFTs (only SHIFT by 1 has lower code size).
However the MOVs have 2 advantages to SHIFTs on x86:
1. MOVs can write to a register that differs from source.
2. MOVs accept memory operands.
This fixes PR24373.
Patch by: evgeny.v.stupachenko@intel.com
Differential Revision: http://reviews.llvm.org/D13161
llvm-svn: 255761
2015-12-16 19:22:37 +08:00
|
|
|
; SSE2-NEXT: popq %rbx
|
|
|
|
; SSE2-NEXT: popq %r12
|
|
|
|
; SSE2-NEXT: popq %r13
|
|
|
|
; SSE2-NEXT: popq %r14
|
|
|
|
; SSE2-NEXT: popq %r15
|
|
|
|
; SSE2-NEXT: popq %rbp
|
2015-10-30 06:19:21 +08:00
|
|
|
; SSE2-NEXT: retq
|
|
|
|
;
|
|
|
|
; SSSE3-LABEL: load_sext_32i1_to_32i8:
|
|
|
|
; SSSE3: # BB#0: # %entry
|
[X86] Improve shift combining
This folds (ashr (shl a, [56,48,32,24,16]), SarConst)
into (shl, (sext (a), [56,48,32,24,16] - SarConst))
or into (lshr, (sext (a), SarConst - [56,48,32,24,16]))
depending on sign of (SarConst - [56,48,32,24,16])
sexts in X86 are MOVs.
The MOVs have the same code size as above SHIFTs (only SHIFT by 1 has lower code size).
However the MOVs have 2 advantages to SHIFTs on x86:
1. MOVs can write to a register that differs from source.
2. MOVs accept memory operands.
This fixes PR24373.
Patch by: evgeny.v.stupachenko@intel.com
Differential Revision: http://reviews.llvm.org/D13161
llvm-svn: 255761
2015-12-16 19:22:37 +08:00
|
|
|
; SSSE3-NEXT: pushq %rbp
|
|
|
|
; SSSE3-NEXT: pushq %r15
|
|
|
|
; SSSE3-NEXT: pushq %r14
|
|
|
|
; SSSE3-NEXT: pushq %r13
|
|
|
|
; SSSE3-NEXT: pushq %r12
|
|
|
|
; SSSE3-NEXT: pushq %rbx
|
|
|
|
; SSSE3-NEXT: movswq (%rdi), %rbx
|
|
|
|
; SSSE3-NEXT: movq %rbx, %r10
|
|
|
|
; SSSE3-NEXT: movq %rbx, %r8
|
|
|
|
; SSSE3-NEXT: movq %rbx, %r9
|
|
|
|
; SSSE3-NEXT: movq %rbx, %r11
|
|
|
|
; SSSE3-NEXT: movq %rbx, %r14
|
|
|
|
; SSSE3-NEXT: movq %rbx, %r15
|
|
|
|
; SSSE3-NEXT: movq %rbx, %r12
|
|
|
|
; SSSE3-NEXT: movq %rbx, %r13
|
|
|
|
; SSSE3-NEXT: movq %rbx, %rdx
|
|
|
|
; SSSE3-NEXT: movq %rbx, %rsi
|
|
|
|
; SSSE3-NEXT: movq %rbx, %rcx
|
|
|
|
; SSSE3-NEXT: movq %rbx, %rbp
|
|
|
|
; SSSE3-NEXT: movq %rbx, %rax
|
|
|
|
; SSSE3-NEXT: shlq $49, %rax
|
|
|
|
; SSSE3-NEXT: sarq $63, %rax
|
|
|
|
; SSSE3-NEXT: movd %eax, %xmm0
|
|
|
|
; SSSE3-NEXT: movq %rbx, %rax
|
|
|
|
; SSSE3-NEXT: shlq $57, %r10
|
|
|
|
; SSSE3-NEXT: sarq $63, %r10
|
|
|
|
; SSSE3-NEXT: movd %r10d, %xmm15
|
|
|
|
; SSSE3-NEXT: movq %rbx, %r10
|
|
|
|
; SSSE3-NEXT: movsbq %bl, %rbx
|
|
|
|
; SSSE3-NEXT: punpcklbw {{.*#+}} xmm15 = xmm15[0],xmm0[0],xmm15[1],xmm0[1],xmm15[2],xmm0[2],xmm15[3],xmm0[3],xmm15[4],xmm0[4],xmm15[5],xmm0[5],xmm15[6],xmm0[6],xmm15[7],xmm0[7]
|
|
|
|
; SSSE3-NEXT: shlq $53, %r8
|
|
|
|
; SSSE3-NEXT: sarq $63, %r8
|
|
|
|
; SSSE3-NEXT: movd %r8d, %xmm8
|
|
|
|
; SSSE3-NEXT: shlq $61, %r9
|
|
|
|
; SSSE3-NEXT: sarq $63, %r9
|
|
|
|
; SSSE3-NEXT: movd %r9d, %xmm2
|
|
|
|
; SSSE3-NEXT: shlq $51, %r11
|
|
|
|
; SSSE3-NEXT: sarq $63, %r11
|
|
|
|
; SSSE3-NEXT: movd %r11d, %xmm9
|
|
|
|
; SSSE3-NEXT: shlq $59, %r14
|
|
|
|
; SSSE3-NEXT: sarq $63, %r14
|
|
|
|
; SSSE3-NEXT: movd %r14d, %xmm5
|
|
|
|
; SSSE3-NEXT: shlq $55, %r15
|
|
|
|
; SSSE3-NEXT: sarq $63, %r15
|
|
|
|
; SSSE3-NEXT: movd %r15d, %xmm10
|
|
|
|
; SSSE3-NEXT: shlq $63, %r12
|
|
|
|
; SSSE3-NEXT: sarq $63, %r12
|
|
|
|
; SSSE3-NEXT: movd %r12d, %xmm0
|
|
|
|
; SSSE3-NEXT: shlq $50, %r13
|
|
|
|
; SSSE3-NEXT: sarq $63, %r13
|
|
|
|
; SSSE3-NEXT: movd %r13d, %xmm11
|
|
|
|
; SSSE3-NEXT: shlq $58, %rdx
|
|
|
|
; SSSE3-NEXT: sarq $63, %rdx
|
|
|
|
; SSSE3-NEXT: movd %edx, %xmm4
|
|
|
|
; SSSE3-NEXT: shlq $54, %rsi
|
|
|
|
; SSSE3-NEXT: sarq $63, %rsi
|
|
|
|
; SSSE3-NEXT: movd %esi, %xmm12
|
2015-10-30 06:19:21 +08:00
|
|
|
; SSSE3-NEXT: shlq $62, %rcx
|
|
|
|
; SSSE3-NEXT: sarq $63, %rcx
|
[X86] Improve shift combining
This folds (ashr (shl a, [56,48,32,24,16]), SarConst)
into (shl, (sext (a), [56,48,32,24,16] - SarConst))
or into (lshr, (sext (a), SarConst - [56,48,32,24,16]))
depending on sign of (SarConst - [56,48,32,24,16])
sexts in X86 are MOVs.
The MOVs have the same code size as above SHIFTs (only SHIFT by 1 has lower code size).
However the MOVs have 2 advantages to SHIFTs on x86:
1. MOVs can write to a register that differs from source.
2. MOVs accept memory operands.
This fixes PR24373.
Patch by: evgeny.v.stupachenko@intel.com
Differential Revision: http://reviews.llvm.org/D13161
llvm-svn: 255761
2015-12-16 19:22:37 +08:00
|
|
|
; SSSE3-NEXT: movd %ecx, %xmm6
|
|
|
|
; SSSE3-NEXT: shlq $52, %rbp
|
|
|
|
; SSSE3-NEXT: sarq $63, %rbp
|
|
|
|
; SSSE3-NEXT: movd %ebp, %xmm13
|
|
|
|
; SSSE3-NEXT: shlq $60, %rax
|
2015-10-30 06:19:21 +08:00
|
|
|
; SSSE3-NEXT: sarq $63, %rax
|
[X86] Improve shift combining
This folds (ashr (shl a, [56,48,32,24,16]), SarConst)
into (shl, (sext (a), [56,48,32,24,16] - SarConst))
or into (lshr, (sext (a), SarConst - [56,48,32,24,16]))
depending on sign of (SarConst - [56,48,32,24,16])
sexts in X86 are MOVs.
The MOVs have the same code size as above SHIFTs (only SHIFT by 1 has lower code size).
However the MOVs have 2 advantages to SHIFTs on x86:
1. MOVs can write to a register that differs from source.
2. MOVs accept memory operands.
This fixes PR24373.
Patch by: evgeny.v.stupachenko@intel.com
Differential Revision: http://reviews.llvm.org/D13161
llvm-svn: 255761
2015-12-16 19:22:37 +08:00
|
|
|
; SSSE3-NEXT: movd %eax, %xmm7
|
|
|
|
; SSSE3-NEXT: shrq $15, %r10
|
|
|
|
; SSSE3-NEXT: movd %r10d, %xmm14
|
|
|
|
; SSSE3-NEXT: shrq $7, %rbx
|
|
|
|
; SSSE3-NEXT: movd %ebx, %xmm3
|
|
|
|
; SSSE3-NEXT: movswq 2(%rdi), %rdx
|
|
|
|
; SSSE3-NEXT: movq %rdx, %r8
|
|
|
|
; SSSE3-NEXT: movq %rdx, %r9
|
|
|
|
; SSSE3-NEXT: movq %rdx, %r10
|
|
|
|
; SSSE3-NEXT: movq %rdx, %r11
|
|
|
|
; SSSE3-NEXT: movq %rdx, %r14
|
|
|
|
; SSSE3-NEXT: movq %rdx, %r15
|
|
|
|
; SSSE3-NEXT: movq %rdx, %r12
|
|
|
|
; SSSE3-NEXT: movq %rdx, %r13
|
|
|
|
; SSSE3-NEXT: movq %rdx, %rbx
|
|
|
|
; SSSE3-NEXT: movq %rdx, %rax
|
|
|
|
; SSSE3-NEXT: movq %rdx, %rcx
|
|
|
|
; SSSE3-NEXT: movq %rdx, %rsi
|
|
|
|
; SSSE3-NEXT: movq %rdx, %rdi
|
|
|
|
; SSSE3-NEXT: movq %rdx, %rbp
|
|
|
|
; SSSE3-NEXT: shlq $49, %rbp
|
|
|
|
; SSSE3-NEXT: sarq $63, %rbp
|
|
|
|
; SSSE3-NEXT: movd %ebp, %xmm1
|
|
|
|
; SSSE3-NEXT: movq %rdx, %rbp
|
|
|
|
; SSSE3-NEXT: movsbq %dl, %rdx
|
|
|
|
; SSSE3-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm8[0],xmm2[1],xmm8[1],xmm2[2],xmm8[2],xmm2[3],xmm8[3],xmm2[4],xmm8[4],xmm2[5],xmm8[5],xmm2[6],xmm8[6],xmm2[7],xmm8[7]
|
|
|
|
; SSSE3-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm15[0],xmm2[1],xmm15[1],xmm2[2],xmm15[2],xmm2[3],xmm15[3],xmm2[4],xmm15[4],xmm2[5],xmm15[5],xmm2[6],xmm15[6],xmm2[7],xmm15[7]
|
|
|
|
; SSSE3-NEXT: punpcklbw {{.*#+}} xmm5 = xmm5[0],xmm9[0],xmm5[1],xmm9[1],xmm5[2],xmm9[2],xmm5[3],xmm9[3],xmm5[4],xmm9[4],xmm5[5],xmm9[5],xmm5[6],xmm9[6],xmm5[7],xmm9[7]
|
|
|
|
; SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm10[0],xmm0[1],xmm10[1],xmm0[2],xmm10[2],xmm0[3],xmm10[3],xmm0[4],xmm10[4],xmm0[5],xmm10[5],xmm0[6],xmm10[6],xmm0[7],xmm10[7]
|
|
|
|
; SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm5[0],xmm0[1],xmm5[1],xmm0[2],xmm5[2],xmm0[3],xmm5[3],xmm0[4],xmm5[4],xmm0[5],xmm5[5],xmm0[6],xmm5[6],xmm0[7],xmm5[7]
|
2015-10-30 06:19:21 +08:00
|
|
|
; SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7]
|
[X86] Improve shift combining
This folds (ashr (shl a, [56,48,32,24,16]), SarConst)
into (shl, (sext (a), [56,48,32,24,16] - SarConst))
or into (lshr, (sext (a), SarConst - [56,48,32,24,16]))
depending on sign of (SarConst - [56,48,32,24,16])
sexts in X86 are MOVs.
The MOVs have the same code size as above SHIFTs (only SHIFT by 1 has lower code size).
However the MOVs have 2 advantages to SHIFTs on x86:
1. MOVs can write to a register that differs from source.
2. MOVs accept memory operands.
This fixes PR24373.
Patch by: evgeny.v.stupachenko@intel.com
Differential Revision: http://reviews.llvm.org/D13161
llvm-svn: 255761
2015-12-16 19:22:37 +08:00
|
|
|
; SSSE3-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm11[0],xmm4[1],xmm11[1],xmm4[2],xmm11[2],xmm4[3],xmm11[3],xmm4[4],xmm11[4],xmm4[5],xmm11[5],xmm4[6],xmm11[6],xmm4[7],xmm11[7]
|
|
|
|
; SSSE3-NEXT: punpcklbw {{.*#+}} xmm6 = xmm6[0],xmm12[0],xmm6[1],xmm12[1],xmm6[2],xmm12[2],xmm6[3],xmm12[3],xmm6[4],xmm12[4],xmm6[5],xmm12[5],xmm6[6],xmm12[6],xmm6[7],xmm12[7]
|
|
|
|
; SSSE3-NEXT: punpcklbw {{.*#+}} xmm6 = xmm6[0],xmm4[0],xmm6[1],xmm4[1],xmm6[2],xmm4[2],xmm6[3],xmm4[3],xmm6[4],xmm4[4],xmm6[5],xmm4[5],xmm6[6],xmm4[6],xmm6[7],xmm4[7]
|
|
|
|
; SSSE3-NEXT: punpcklbw {{.*#+}} xmm7 = xmm7[0],xmm13[0],xmm7[1],xmm13[1],xmm7[2],xmm13[2],xmm7[3],xmm13[3],xmm7[4],xmm13[4],xmm7[5],xmm13[5],xmm7[6],xmm13[6],xmm7[7],xmm13[7]
|
|
|
|
; SSSE3-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm14[0],xmm3[1],xmm14[1],xmm3[2],xmm14[2],xmm3[3],xmm14[3],xmm3[4],xmm14[4],xmm3[5],xmm14[5],xmm3[6],xmm14[6],xmm3[7],xmm14[7]
|
|
|
|
; SSSE3-NEXT: shlq $57, %r8
|
|
|
|
; SSSE3-NEXT: sarq $63, %r8
|
|
|
|
; SSSE3-NEXT: movd %r8d, %xmm2
|
|
|
|
; SSSE3-NEXT: punpcklbw {{.*#+}} xmm7 = xmm7[0],xmm3[0],xmm7[1],xmm3[1],xmm7[2],xmm3[2],xmm7[3],xmm3[3],xmm7[4],xmm3[4],xmm7[5],xmm3[5],xmm7[6],xmm3[6],xmm7[7],xmm3[7]
|
|
|
|
; SSSE3-NEXT: shlq $53, %r9
|
|
|
|
; SSSE3-NEXT: sarq $63, %r9
|
|
|
|
; SSSE3-NEXT: movd %r9d, %xmm3
|
|
|
|
; SSSE3-NEXT: punpcklbw {{.*#+}} xmm6 = xmm6[0],xmm7[0],xmm6[1],xmm7[1],xmm6[2],xmm7[2],xmm6[3],xmm7[3],xmm6[4],xmm7[4],xmm6[5],xmm7[5],xmm6[6],xmm7[6],xmm6[7],xmm7[7]
|
|
|
|
; SSSE3-NEXT: shlq $61, %r10
|
|
|
|
; SSSE3-NEXT: sarq $63, %r10
|
|
|
|
; SSSE3-NEXT: movd %r10d, %xmm4
|
|
|
|
; SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm6[0],xmm0[1],xmm6[1],xmm0[2],xmm6[2],xmm0[3],xmm6[3],xmm0[4],xmm6[4],xmm0[5],xmm6[5],xmm0[6],xmm6[6],xmm0[7],xmm6[7]
|
|
|
|
; SSSE3-NEXT: shlq $51, %r11
|
|
|
|
; SSSE3-NEXT: sarq $63, %r11
|
|
|
|
; SSSE3-NEXT: movd %r11d, %xmm5
|
2015-10-30 06:19:21 +08:00
|
|
|
; SSSE3-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3],xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7]
|
[X86] Improve shift combining
This folds (ashr (shl a, [56,48,32,24,16]), SarConst)
into (shl, (sext (a), [56,48,32,24,16] - SarConst))
or into (lshr, (sext (a), SarConst - [56,48,32,24,16]))
depending on sign of (SarConst - [56,48,32,24,16])
sexts in X86 are MOVs.
The MOVs have the same code size as above SHIFTs (only SHIFT by 1 has lower code size).
However the MOVs have 2 advantages to SHIFTs on x86:
1. MOVs can write to a register that differs from source.
2. MOVs accept memory operands.
This fixes PR24373.
Patch by: evgeny.v.stupachenko@intel.com
Differential Revision: http://reviews.llvm.org/D13161
llvm-svn: 255761
2015-12-16 19:22:37 +08:00
|
|
|
; SSSE3-NEXT: shlq $59, %r14
|
|
|
|
; SSSE3-NEXT: sarq $63, %r14
|
|
|
|
; SSSE3-NEXT: movd %r14d, %xmm6
|
|
|
|
; SSSE3-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm3[0],xmm4[1],xmm3[1],xmm4[2],xmm3[2],xmm4[3],xmm3[3],xmm4[4],xmm3[4],xmm4[5],xmm3[5],xmm4[6],xmm3[6],xmm4[7],xmm3[7]
|
|
|
|
; SSSE3-NEXT: shlq $55, %r15
|
|
|
|
; SSSE3-NEXT: sarq $63, %r15
|
|
|
|
; SSSE3-NEXT: movd %r15d, %xmm3
|
|
|
|
; SSSE3-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm2[0],xmm4[1],xmm2[1],xmm4[2],xmm2[2],xmm4[3],xmm2[3],xmm4[4],xmm2[4],xmm4[5],xmm2[5],xmm4[6],xmm2[6],xmm4[7],xmm2[7]
|
|
|
|
; SSSE3-NEXT: shlq $63, %r12
|
|
|
|
; SSSE3-NEXT: sarq $63, %r12
|
|
|
|
; SSSE3-NEXT: movd %r12d, %xmm1
|
|
|
|
; SSSE3-NEXT: punpcklbw {{.*#+}} xmm6 = xmm6[0],xmm5[0],xmm6[1],xmm5[1],xmm6[2],xmm5[2],xmm6[3],xmm5[3],xmm6[4],xmm5[4],xmm6[5],xmm5[5],xmm6[6],xmm5[6],xmm6[7],xmm5[7]
|
|
|
|
; SSSE3-NEXT: shlq $50, %r13
|
|
|
|
; SSSE3-NEXT: sarq $63, %r13
|
|
|
|
; SSSE3-NEXT: movd %r13d, %xmm2
|
2015-10-30 06:19:21 +08:00
|
|
|
; SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1],xmm1[2],xmm3[2],xmm1[3],xmm3[3],xmm1[4],xmm3[4],xmm1[5],xmm3[5],xmm1[6],xmm3[6],xmm1[7],xmm3[7]
|
[X86] Improve shift combining
This folds (ashr (shl a, [56,48,32,24,16]), SarConst)
into (shl, (sext (a), [56,48,32,24,16] - SarConst))
or into (lshr, (sext (a), SarConst - [56,48,32,24,16]))
depending on sign of (SarConst - [56,48,32,24,16])
sexts in X86 are MOVs.
The MOVs have the same code size as above SHIFTs (only SHIFT by 1 has lower code size).
However the MOVs have 2 advantages to SHIFTs on x86:
1. MOVs can write to a register that differs from source.
2. MOVs accept memory operands.
This fixes PR24373.
Patch by: evgeny.v.stupachenko@intel.com
Differential Revision: http://reviews.llvm.org/D13161
llvm-svn: 255761
2015-12-16 19:22:37 +08:00
|
|
|
; SSSE3-NEXT: shlq $58, %rbx
|
|
|
|
; SSSE3-NEXT: sarq $63, %rbx
|
|
|
|
; SSSE3-NEXT: movd %ebx, %xmm3
|
|
|
|
; SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm6[0],xmm1[1],xmm6[1],xmm1[2],xmm6[2],xmm1[3],xmm6[3],xmm1[4],xmm6[4],xmm1[5],xmm6[5],xmm1[6],xmm6[6],xmm1[7],xmm6[7]
|
|
|
|
; SSSE3-NEXT: shlq $54, %rax
|
|
|
|
; SSSE3-NEXT: sarq $63, %rax
|
|
|
|
; SSSE3-NEXT: movd %eax, %xmm5
|
|
|
|
; SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1],xmm1[2],xmm4[2],xmm1[3],xmm4[3],xmm1[4],xmm4[4],xmm1[5],xmm4[5],xmm1[6],xmm4[6],xmm1[7],xmm4[7]
|
2015-10-30 06:19:21 +08:00
|
|
|
; SSSE3-NEXT: shlq $62, %rcx
|
|
|
|
; SSSE3-NEXT: sarq $63, %rcx
|
|
|
|
; SSSE3-NEXT: movd %ecx, %xmm4
|
[X86] Improve shift combining
This folds (ashr (shl a, [56,48,32,24,16]), SarConst)
into (shl, (sext (a), [56,48,32,24,16] - SarConst))
or into (lshr, (sext (a), SarConst - [56,48,32,24,16]))
depending on sign of (SarConst - [56,48,32,24,16])
sexts in X86 are MOVs.
The MOVs have the same code size as above SHIFTs (only SHIFT by 1 has lower code size).
However the MOVs have 2 advantages to SHIFTs on x86:
1. MOVs can write to a register that differs from source.
2. MOVs accept memory operands.
This fixes PR24373.
Patch by: evgeny.v.stupachenko@intel.com
Differential Revision: http://reviews.llvm.org/D13161
llvm-svn: 255761
2015-12-16 19:22:37 +08:00
|
|
|
; SSSE3-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3],xmm3[4],xmm2[4],xmm3[5],xmm2[5],xmm3[6],xmm2[6],xmm3[7],xmm2[7]
|
|
|
|
; SSSE3-NEXT: shlq $52, %rsi
|
|
|
|
; SSSE3-NEXT: sarq $63, %rsi
|
|
|
|
; SSSE3-NEXT: movd %esi, %xmm2
|
|
|
|
; SSSE3-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm5[0],xmm4[1],xmm5[1],xmm4[2],xmm5[2],xmm4[3],xmm5[3],xmm4[4],xmm5[4],xmm4[5],xmm5[5],xmm4[6],xmm5[6],xmm4[7],xmm5[7]
|
|
|
|
; SSSE3-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm3[0],xmm4[1],xmm3[1],xmm4[2],xmm3[2],xmm4[3],xmm3[3],xmm4[4],xmm3[4],xmm4[5],xmm3[5],xmm4[6],xmm3[6],xmm4[7],xmm3[7]
|
|
|
|
; SSSE3-NEXT: shlq $60, %rdi
|
|
|
|
; SSSE3-NEXT: sarq $63, %rdi
|
|
|
|
; SSSE3-NEXT: movd %edi, %xmm3
|
|
|
|
; SSSE3-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3],xmm3[4],xmm2[4],xmm3[5],xmm2[5],xmm3[6],xmm2[6],xmm3[7],xmm2[7]
|
|
|
|
; SSSE3-NEXT: shrq $15, %rbp
|
|
|
|
; SSSE3-NEXT: movd %ebp, %xmm2
|
|
|
|
; SSSE3-NEXT: shrq $7, %rdx
|
|
|
|
; SSSE3-NEXT: movd %edx, %xmm5
|
|
|
|
; SSSE3-NEXT: punpcklbw {{.*#+}} xmm5 = xmm5[0],xmm2[0],xmm5[1],xmm2[1],xmm5[2],xmm2[2],xmm5[3],xmm2[3],xmm5[4],xmm2[4],xmm5[5],xmm2[5],xmm5[6],xmm2[6],xmm5[7],xmm2[7]
|
|
|
|
; SSSE3-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm5[0],xmm3[1],xmm5[1],xmm3[2],xmm5[2],xmm3[3],xmm5[3],xmm3[4],xmm5[4],xmm3[5],xmm5[5],xmm3[6],xmm5[6],xmm3[7],xmm5[7]
|
|
|
|
; SSSE3-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm3[0],xmm4[1],xmm3[1],xmm4[2],xmm3[2],xmm4[3],xmm3[3],xmm4[4],xmm3[4],xmm4[5],xmm3[5],xmm4[6],xmm3[6],xmm4[7],xmm3[7]
|
2015-10-30 06:19:21 +08:00
|
|
|
; SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm4[0],xmm1[1],xmm4[1],xmm1[2],xmm4[2],xmm1[3],xmm4[3],xmm1[4],xmm4[4],xmm1[5],xmm4[5],xmm1[6],xmm4[6],xmm1[7],xmm4[7]
|
[X86] Improve shift combining
This folds (ashr (shl a, [56,48,32,24,16]), SarConst)
into (shl, (sext (a), [56,48,32,24,16] - SarConst))
or into (lshr, (sext (a), SarConst - [56,48,32,24,16]))
depending on sign of (SarConst - [56,48,32,24,16])
sexts in X86 are MOVs.
The MOVs have the same code size as above SHIFTs (only SHIFT by 1 has lower code size).
However the MOVs have 2 advantages to SHIFTs on x86:
1. MOVs can write to a register that differs from source.
2. MOVs accept memory operands.
This fixes PR24373.
Patch by: evgeny.v.stupachenko@intel.com
Differential Revision: http://reviews.llvm.org/D13161
llvm-svn: 255761
2015-12-16 19:22:37 +08:00
|
|
|
; SSSE3-NEXT: popq %rbx
|
|
|
|
; SSSE3-NEXT: popq %r12
|
|
|
|
; SSSE3-NEXT: popq %r13
|
|
|
|
; SSSE3-NEXT: popq %r14
|
|
|
|
; SSSE3-NEXT: popq %r15
|
|
|
|
; SSSE3-NEXT: popq %rbp
|
2015-10-30 06:19:21 +08:00
|
|
|
; SSSE3-NEXT: retq
|
|
|
|
;
|
|
|
|
; SSE41-LABEL: load_sext_32i1_to_32i8:
|
|
|
|
; SSE41: # BB#0: # %entry
|
[X86] Improve shift combining
This folds (ashr (shl a, [56,48,32,24,16]), SarConst)
into (shl, (sext (a), [56,48,32,24,16] - SarConst))
or into (lshr, (sext (a), SarConst - [56,48,32,24,16]))
depending on sign of (SarConst - [56,48,32,24,16])
sexts in X86 are MOVs.
The MOVs have the same code size as above SHIFTs (only SHIFT by 1 has lower code size).
However the MOVs have 2 advantages to SHIFTs on x86:
1. MOVs can write to a register that differs from source.
2. MOVs accept memory operands.
This fixes PR24373.
Patch by: evgeny.v.stupachenko@intel.com
Differential Revision: http://reviews.llvm.org/D13161
llvm-svn: 255761
2015-12-16 19:22:37 +08:00
|
|
|
; SSE41-NEXT: movswq (%rdi), %rax
|
2015-10-30 06:19:21 +08:00
|
|
|
; SSE41-NEXT: movq %rax, %rcx
|
|
|
|
; SSE41-NEXT: shlq $62, %rcx
|
|
|
|
; SSE41-NEXT: sarq $63, %rcx
|
|
|
|
; SSE41-NEXT: movq %rax, %rdx
|
|
|
|
; SSE41-NEXT: shlq $63, %rdx
|
|
|
|
; SSE41-NEXT: sarq $63, %rdx
|
|
|
|
; SSE41-NEXT: movd %edx, %xmm0
|
|
|
|
; SSE41-NEXT: pinsrb $1, %ecx, %xmm0
|
|
|
|
; SSE41-NEXT: movq %rax, %rcx
|
|
|
|
; SSE41-NEXT: shlq $61, %rcx
|
|
|
|
; SSE41-NEXT: sarq $63, %rcx
|
|
|
|
; SSE41-NEXT: pinsrb $2, %ecx, %xmm0
|
|
|
|
; SSE41-NEXT: movq %rax, %rcx
|
|
|
|
; SSE41-NEXT: shlq $60, %rcx
|
|
|
|
; SSE41-NEXT: sarq $63, %rcx
|
|
|
|
; SSE41-NEXT: pinsrb $3, %ecx, %xmm0
|
|
|
|
; SSE41-NEXT: movq %rax, %rcx
|
|
|
|
; SSE41-NEXT: shlq $59, %rcx
|
|
|
|
; SSE41-NEXT: sarq $63, %rcx
|
|
|
|
; SSE41-NEXT: pinsrb $4, %ecx, %xmm0
|
|
|
|
; SSE41-NEXT: movq %rax, %rcx
|
|
|
|
; SSE41-NEXT: shlq $58, %rcx
|
|
|
|
; SSE41-NEXT: sarq $63, %rcx
|
|
|
|
; SSE41-NEXT: pinsrb $5, %ecx, %xmm0
|
|
|
|
; SSE41-NEXT: movq %rax, %rcx
|
|
|
|
; SSE41-NEXT: shlq $57, %rcx
|
|
|
|
; SSE41-NEXT: sarq $63, %rcx
|
|
|
|
; SSE41-NEXT: pinsrb $6, %ecx, %xmm0
|
[X86] Improve shift combining
This folds (ashr (shl a, [56,48,32,24,16]), SarConst)
into (shl, (sext (a), [56,48,32,24,16] - SarConst))
or into (lshr, (sext (a), SarConst - [56,48,32,24,16]))
depending on sign of (SarConst - [56,48,32,24,16])
sexts in X86 are MOVs.
The MOVs have the same code size as above SHIFTs (only SHIFT by 1 has lower code size).
However the MOVs have 2 advantages to SHIFTs on x86:
1. MOVs can write to a register that differs from source.
2. MOVs accept memory operands.
This fixes PR24373.
Patch by: evgeny.v.stupachenko@intel.com
Differential Revision: http://reviews.llvm.org/D13161
llvm-svn: 255761
2015-12-16 19:22:37 +08:00
|
|
|
; SSE41-NEXT: movsbq %al, %rcx
|
|
|
|
; SSE41-NEXT: shrq $7, %rcx
|
2015-10-30 06:19:21 +08:00
|
|
|
; SSE41-NEXT: pinsrb $7, %ecx, %xmm0
|
|
|
|
; SSE41-NEXT: movq %rax, %rcx
|
|
|
|
; SSE41-NEXT: shlq $55, %rcx
|
|
|
|
; SSE41-NEXT: sarq $63, %rcx
|
|
|
|
; SSE41-NEXT: pinsrb $8, %ecx, %xmm0
|
|
|
|
; SSE41-NEXT: movq %rax, %rcx
|
|
|
|
; SSE41-NEXT: shlq $54, %rcx
|
|
|
|
; SSE41-NEXT: sarq $63, %rcx
|
|
|
|
; SSE41-NEXT: pinsrb $9, %ecx, %xmm0
|
|
|
|
; SSE41-NEXT: movq %rax, %rcx
|
|
|
|
; SSE41-NEXT: shlq $53, %rcx
|
|
|
|
; SSE41-NEXT: sarq $63, %rcx
|
|
|
|
; SSE41-NEXT: pinsrb $10, %ecx, %xmm0
|
|
|
|
; SSE41-NEXT: movq %rax, %rcx
|
|
|
|
; SSE41-NEXT: shlq $52, %rcx
|
|
|
|
; SSE41-NEXT: sarq $63, %rcx
|
|
|
|
; SSE41-NEXT: pinsrb $11, %ecx, %xmm0
|
|
|
|
; SSE41-NEXT: movq %rax, %rcx
|
|
|
|
; SSE41-NEXT: shlq $51, %rcx
|
|
|
|
; SSE41-NEXT: sarq $63, %rcx
|
|
|
|
; SSE41-NEXT: pinsrb $12, %ecx, %xmm0
|
|
|
|
; SSE41-NEXT: movq %rax, %rcx
|
|
|
|
; SSE41-NEXT: shlq $50, %rcx
|
|
|
|
; SSE41-NEXT: sarq $63, %rcx
|
|
|
|
; SSE41-NEXT: pinsrb $13, %ecx, %xmm0
|
|
|
|
; SSE41-NEXT: movq %rax, %rcx
|
|
|
|
; SSE41-NEXT: shlq $49, %rcx
|
|
|
|
; SSE41-NEXT: sarq $63, %rcx
|
|
|
|
; SSE41-NEXT: pinsrb $14, %ecx, %xmm0
|
[X86] Improve shift combining
This folds (ashr (shl a, [56,48,32,24,16]), SarConst)
into (shl, (sext (a), [56,48,32,24,16] - SarConst))
or into (lshr, (sext (a), SarConst - [56,48,32,24,16]))
depending on sign of (SarConst - [56,48,32,24,16])
sexts in X86 are MOVs.
The MOVs have the same code size as above SHIFTs (only SHIFT by 1 has lower code size).
However the MOVs have 2 advantages to SHIFTs on x86:
1. MOVs can write to a register that differs from source.
2. MOVs accept memory operands.
This fixes PR24373.
Patch by: evgeny.v.stupachenko@intel.com
Differential Revision: http://reviews.llvm.org/D13161
llvm-svn: 255761
2015-12-16 19:22:37 +08:00
|
|
|
; SSE41-NEXT: shrq $15, %rax
|
2015-10-30 06:19:21 +08:00
|
|
|
; SSE41-NEXT: pinsrb $15, %eax, %xmm0
|
[X86] Improve shift combining
This folds (ashr (shl a, [56,48,32,24,16]), SarConst)
into (shl, (sext (a), [56,48,32,24,16] - SarConst))
or into (lshr, (sext (a), SarConst - [56,48,32,24,16]))
depending on sign of (SarConst - [56,48,32,24,16])
sexts in X86 are MOVs.
The MOVs have the same code size as above SHIFTs (only SHIFT by 1 has lower code size).
However the MOVs have 2 advantages to SHIFTs on x86:
1. MOVs can write to a register that differs from source.
2. MOVs accept memory operands.
This fixes PR24373.
Patch by: evgeny.v.stupachenko@intel.com
Differential Revision: http://reviews.llvm.org/D13161
llvm-svn: 255761
2015-12-16 19:22:37 +08:00
|
|
|
; SSE41-NEXT: movswq 2(%rdi), %rax
|
2015-10-30 06:19:21 +08:00
|
|
|
; SSE41-NEXT: movq %rax, %rcx
|
|
|
|
; SSE41-NEXT: shlq $62, %rcx
|
|
|
|
; SSE41-NEXT: sarq $63, %rcx
|
|
|
|
; SSE41-NEXT: movq %rax, %rdx
|
|
|
|
; SSE41-NEXT: shlq $63, %rdx
|
|
|
|
; SSE41-NEXT: sarq $63, %rdx
|
|
|
|
; SSE41-NEXT: movd %edx, %xmm1
|
|
|
|
; SSE41-NEXT: pinsrb $1, %ecx, %xmm1
|
|
|
|
; SSE41-NEXT: movq %rax, %rcx
|
|
|
|
; SSE41-NEXT: shlq $61, %rcx
|
|
|
|
; SSE41-NEXT: sarq $63, %rcx
|
|
|
|
; SSE41-NEXT: pinsrb $2, %ecx, %xmm1
|
|
|
|
; SSE41-NEXT: movq %rax, %rcx
|
|
|
|
; SSE41-NEXT: shlq $60, %rcx
|
|
|
|
; SSE41-NEXT: sarq $63, %rcx
|
|
|
|
; SSE41-NEXT: pinsrb $3, %ecx, %xmm1
|
|
|
|
; SSE41-NEXT: movq %rax, %rcx
|
|
|
|
; SSE41-NEXT: shlq $59, %rcx
|
|
|
|
; SSE41-NEXT: sarq $63, %rcx
|
2015-09-12 23:36:41 +08:00
|
|
|
; SSE41-NEXT: pinsrb $4, %ecx, %xmm1
|
2015-10-30 06:19:21 +08:00
|
|
|
; SSE41-NEXT: movq %rax, %rcx
|
|
|
|
; SSE41-NEXT: shlq $58, %rcx
|
|
|
|
; SSE41-NEXT: sarq $63, %rcx
|
2015-09-12 23:36:41 +08:00
|
|
|
; SSE41-NEXT: pinsrb $5, %ecx, %xmm1
|
2015-10-30 06:19:21 +08:00
|
|
|
; SSE41-NEXT: movq %rax, %rcx
|
|
|
|
; SSE41-NEXT: shlq $57, %rcx
|
|
|
|
; SSE41-NEXT: sarq $63, %rcx
|
2015-09-12 23:36:41 +08:00
|
|
|
; SSE41-NEXT: pinsrb $6, %ecx, %xmm1
|
[X86] Improve shift combining
This folds (ashr (shl a, [56,48,32,24,16]), SarConst)
into (shl, (sext (a), [56,48,32,24,16] - SarConst))
or into (lshr, (sext (a), SarConst - [56,48,32,24,16]))
depending on sign of (SarConst - [56,48,32,24,16])
sexts in X86 are MOVs.
The MOVs have the same code size as above SHIFTs (only SHIFT by 1 has lower code size).
However the MOVs have 2 advantages to SHIFTs on x86:
1. MOVs can write to a register that differs from source.
2. MOVs accept memory operands.
This fixes PR24373.
Patch by: evgeny.v.stupachenko@intel.com
Differential Revision: http://reviews.llvm.org/D13161
llvm-svn: 255761
2015-12-16 19:22:37 +08:00
|
|
|
; SSE41-NEXT: movsbq %al, %rcx
|
|
|
|
; SSE41-NEXT: shrq $7, %rcx
|
2015-09-12 23:36:41 +08:00
|
|
|
; SSE41-NEXT: pinsrb $7, %ecx, %xmm1
|
2015-10-30 06:19:21 +08:00
|
|
|
; SSE41-NEXT: movq %rax, %rcx
|
|
|
|
; SSE41-NEXT: shlq $55, %rcx
|
|
|
|
; SSE41-NEXT: sarq $63, %rcx
|
2015-09-12 23:36:41 +08:00
|
|
|
; SSE41-NEXT: pinsrb $8, %ecx, %xmm1
|
2015-10-30 06:19:21 +08:00
|
|
|
; SSE41-NEXT: movq %rax, %rcx
|
|
|
|
; SSE41-NEXT: shlq $54, %rcx
|
|
|
|
; SSE41-NEXT: sarq $63, %rcx
|
2015-09-12 23:36:41 +08:00
|
|
|
; SSE41-NEXT: pinsrb $9, %ecx, %xmm1
|
2015-10-30 06:19:21 +08:00
|
|
|
; SSE41-NEXT: movq %rax, %rcx
|
|
|
|
; SSE41-NEXT: shlq $53, %rcx
|
|
|
|
; SSE41-NEXT: sarq $63, %rcx
|
2015-09-12 23:36:41 +08:00
|
|
|
; SSE41-NEXT: pinsrb $10, %ecx, %xmm1
|
2015-10-30 06:19:21 +08:00
|
|
|
; SSE41-NEXT: movq %rax, %rcx
|
|
|
|
; SSE41-NEXT: shlq $52, %rcx
|
|
|
|
; SSE41-NEXT: sarq $63, %rcx
|
2015-09-12 23:36:41 +08:00
|
|
|
; SSE41-NEXT: pinsrb $11, %ecx, %xmm1
|
2015-10-30 06:19:21 +08:00
|
|
|
; SSE41-NEXT: movq %rax, %rcx
|
|
|
|
; SSE41-NEXT: shlq $51, %rcx
|
|
|
|
; SSE41-NEXT: sarq $63, %rcx
|
2015-09-12 23:36:41 +08:00
|
|
|
; SSE41-NEXT: pinsrb $12, %ecx, %xmm1
|
2015-10-30 06:19:21 +08:00
|
|
|
; SSE41-NEXT: movq %rax, %rcx
|
|
|
|
; SSE41-NEXT: shlq $50, %rcx
|
|
|
|
; SSE41-NEXT: sarq $63, %rcx
|
2015-09-12 23:36:41 +08:00
|
|
|
; SSE41-NEXT: pinsrb $13, %ecx, %xmm1
|
2015-10-30 06:19:21 +08:00
|
|
|
; SSE41-NEXT: movq %rax, %rcx
|
|
|
|
; SSE41-NEXT: shlq $49, %rcx
|
|
|
|
; SSE41-NEXT: sarq $63, %rcx
|
2015-09-12 23:36:41 +08:00
|
|
|
; SSE41-NEXT: pinsrb $14, %ecx, %xmm1
|
[X86] Improve shift combining
This folds (ashr (shl a, [56,48,32,24,16]), SarConst)
into (shl, (sext (a), [56,48,32,24,16] - SarConst))
or into (lshr, (sext (a), SarConst - [56,48,32,24,16]))
depending on sign of (SarConst - [56,48,32,24,16])
sexts in X86 are MOVs.
The MOVs have the same code size as above SHIFTs (only SHIFT by 1 has lower code size).
However the MOVs have 2 advantages to SHIFTs on x86:
1. MOVs can write to a register that differs from source.
2. MOVs accept memory operands.
This fixes PR24373.
Patch by: evgeny.v.stupachenko@intel.com
Differential Revision: http://reviews.llvm.org/D13161
llvm-svn: 255761
2015-12-16 19:22:37 +08:00
|
|
|
; SSE41-NEXT: shrq $15, %rax
|
2015-09-12 23:36:41 +08:00
|
|
|
; SSE41-NEXT: pinsrb $15, %eax, %xmm1
|
|
|
|
; SSE41-NEXT: retq
|
|
|
|
;
|
2015-10-30 06:19:21 +08:00
|
|
|
; AVX1-LABEL: load_sext_32i1_to_32i8:
|
2015-09-12 23:36:41 +08:00
|
|
|
; AVX1: # BB#0: # %entry
|
[X86] Improve shift combining
This folds (ashr (shl a, [56,48,32,24,16]), SarConst)
into (shl, (sext (a), [56,48,32,24,16] - SarConst))
or into (lshr, (sext (a), SarConst - [56,48,32,24,16]))
depending on sign of (SarConst - [56,48,32,24,16])
sexts in X86 are MOVs.
The MOVs have the same code size as above SHIFTs (only SHIFT by 1 has lower code size).
However the MOVs have 2 advantages to SHIFTs on x86:
1. MOVs can write to a register that differs from source.
2. MOVs accept memory operands.
This fixes PR24373.
Patch by: evgeny.v.stupachenko@intel.com
Differential Revision: http://reviews.llvm.org/D13161
llvm-svn: 255761
2015-12-16 19:22:37 +08:00
|
|
|
; AVX1-NEXT: pushq %rbp
|
|
|
|
; AVX1-NEXT: pushq %r15
|
|
|
|
; AVX1-NEXT: pushq %r14
|
|
|
|
; AVX1-NEXT: pushq %r13
|
|
|
|
; AVX1-NEXT: pushq %r12
|
|
|
|
; AVX1-NEXT: pushq %rbx
|
|
|
|
; AVX1-NEXT: movslq (%rdi), %rax
|
2015-09-12 23:36:41 +08:00
|
|
|
; AVX1-NEXT: movq %rax, %rcx
|
[X86] Improve shift combining
This folds (ashr (shl a, [56,48,32,24,16]), SarConst)
into (shl, (sext (a), [56,48,32,24,16] - SarConst))
or into (lshr, (sext (a), SarConst - [56,48,32,24,16]))
depending on sign of (SarConst - [56,48,32,24,16])
sexts in X86 are MOVs.
The MOVs have the same code size as above SHIFTs (only SHIFT by 1 has lower code size).
However the MOVs have 2 advantages to SHIFTs on x86:
1. MOVs can write to a register that differs from source.
2. MOVs accept memory operands.
This fixes PR24373.
Patch by: evgeny.v.stupachenko@intel.com
Differential Revision: http://reviews.llvm.org/D13161
llvm-svn: 255761
2015-12-16 19:22:37 +08:00
|
|
|
; AVX1-NEXT: shlq $47, %rcx
|
2015-09-12 23:36:41 +08:00
|
|
|
; AVX1-NEXT: sarq $63, %rcx
|
[X86] Improve shift combining
This folds (ashr (shl a, [56,48,32,24,16]), SarConst)
into (shl, (sext (a), [56,48,32,24,16] - SarConst))
or into (lshr, (sext (a), SarConst - [56,48,32,24,16]))
depending on sign of (SarConst - [56,48,32,24,16])
sexts in X86 are MOVs.
The MOVs have the same code size as above SHIFTs (only SHIFT by 1 has lower code size).
However the MOVs have 2 advantages to SHIFTs on x86:
1. MOVs can write to a register that differs from source.
2. MOVs accept memory operands.
This fixes PR24373.
Patch by: evgeny.v.stupachenko@intel.com
Differential Revision: http://reviews.llvm.org/D13161
llvm-svn: 255761
2015-12-16 19:22:37 +08:00
|
|
|
; AVX1-NEXT: vmovd %ecx, %xmm0
|
|
|
|
; AVX1-NEXT: movq %rax, %r8
|
2015-09-12 23:36:41 +08:00
|
|
|
; AVX1-NEXT: movq %rax, %rdx
|
|
|
|
; AVX1-NEXT: movq %rax, %rcx
|
[X86] Improve shift combining
This folds (ashr (shl a, [56,48,32,24,16]), SarConst)
into (shl, (sext (a), [56,48,32,24,16] - SarConst))
or into (lshr, (sext (a), SarConst - [56,48,32,24,16]))
depending on sign of (SarConst - [56,48,32,24,16])
sexts in X86 are MOVs.
The MOVs have the same code size as above SHIFTs (only SHIFT by 1 has lower code size).
However the MOVs have 2 advantages to SHIFTs on x86:
1. MOVs can write to a register that differs from source.
2. MOVs accept memory operands.
This fixes PR24373.
Patch by: evgeny.v.stupachenko@intel.com
Differential Revision: http://reviews.llvm.org/D13161
llvm-svn: 255761
2015-12-16 19:22:37 +08:00
|
|
|
; AVX1-NEXT: movq %rax, %rdi
|
|
|
|
; AVX1-NEXT: movq %rax, %r13
|
|
|
|
; AVX1-NEXT: movq %rax, %rsi
|
|
|
|
; AVX1-NEXT: movq %rax, %r10
|
|
|
|
; AVX1-NEXT: movq %rax, %r11
|
|
|
|
; AVX1-NEXT: movq %rax, %r9
|
|
|
|
; AVX1-NEXT: movq %rax, %rbx
|
|
|
|
; AVX1-NEXT: movq %rax, %r14
|
|
|
|
; AVX1-NEXT: movq %rax, %r15
|
|
|
|
; AVX1-NEXT: movq %rax, %r12
|
|
|
|
; AVX1-NEXT: movq %rax, %rbp
|
|
|
|
; AVX1-NEXT: shlq $46, %rbp
|
|
|
|
; AVX1-NEXT: sarq $63, %rbp
|
|
|
|
; AVX1-NEXT: vpinsrb $1, %ebp, %xmm0, %xmm0
|
|
|
|
; AVX1-NEXT: movq %rax, %rbp
|
|
|
|
; AVX1-NEXT: shlq $45, %r8
|
|
|
|
; AVX1-NEXT: sarq $63, %r8
|
|
|
|
; AVX1-NEXT: vpinsrb $2, %r8d, %xmm0, %xmm0
|
|
|
|
; AVX1-NEXT: movq %rax, %r8
|
|
|
|
; AVX1-NEXT: shlq $44, %rdx
|
|
|
|
; AVX1-NEXT: sarq $63, %rdx
|
|
|
|
; AVX1-NEXT: vpinsrb $3, %edx, %xmm0, %xmm0
|
|
|
|
; AVX1-NEXT: movq %rax, %rdx
|
2015-10-30 06:19:21 +08:00
|
|
|
; AVX1-NEXT: shlq $43, %rcx
|
2015-09-12 23:36:41 +08:00
|
|
|
; AVX1-NEXT: sarq $63, %rcx
|
2015-10-30 06:19:21 +08:00
|
|
|
; AVX1-NEXT: vpinsrb $4, %ecx, %xmm0, %xmm0
|
2015-09-12 23:36:41 +08:00
|
|
|
; AVX1-NEXT: movq %rax, %rcx
|
[X86] Improve shift combining
This folds (ashr (shl a, [56,48,32,24,16]), SarConst)
into (shl, (sext (a), [56,48,32,24,16] - SarConst))
or into (lshr, (sext (a), SarConst - [56,48,32,24,16]))
depending on sign of (SarConst - [56,48,32,24,16])
sexts in X86 are MOVs.
The MOVs have the same code size as above SHIFTs (only SHIFT by 1 has lower code size).
However the MOVs have 2 advantages to SHIFTs on x86:
1. MOVs can write to a register that differs from source.
2. MOVs accept memory operands.
This fixes PR24373.
Patch by: evgeny.v.stupachenko@intel.com
Differential Revision: http://reviews.llvm.org/D13161
llvm-svn: 255761
2015-12-16 19:22:37 +08:00
|
|
|
; AVX1-NEXT: shlq $42, %rdi
|
|
|
|
; AVX1-NEXT: sarq $63, %rdi
|
|
|
|
; AVX1-NEXT: vpinsrb $5, %edi, %xmm0, %xmm0
|
|
|
|
; AVX1-NEXT: movq %rax, %rdi
|
|
|
|
; AVX1-NEXT: shlq $41, %r13
|
|
|
|
; AVX1-NEXT: sarq $63, %r13
|
|
|
|
; AVX1-NEXT: vpinsrb $6, %r13d, %xmm0, %xmm0
|
|
|
|
; AVX1-NEXT: movq %rax, %r13
|
|
|
|
; AVX1-NEXT: shlq $40, %rsi
|
|
|
|
; AVX1-NEXT: sarq $63, %rsi
|
|
|
|
; AVX1-NEXT: vpinsrb $7, %esi, %xmm0, %xmm0
|
|
|
|
; AVX1-NEXT: movq %rax, %rsi
|
|
|
|
; AVX1-NEXT: shlq $39, %r10
|
|
|
|
; AVX1-NEXT: sarq $63, %r10
|
|
|
|
; AVX1-NEXT: vpinsrb $8, %r10d, %xmm0, %xmm0
|
|
|
|
; AVX1-NEXT: movq %rax, %r10
|
|
|
|
; AVX1-NEXT: shlq $38, %r11
|
|
|
|
; AVX1-NEXT: sarq $63, %r11
|
|
|
|
; AVX1-NEXT: vpinsrb $9, %r11d, %xmm0, %xmm0
|
|
|
|
; AVX1-NEXT: movsbq %al, %r11
|
|
|
|
; AVX1-NEXT: shlq $37, %r9
|
|
|
|
; AVX1-NEXT: sarq $63, %r9
|
|
|
|
; AVX1-NEXT: vpinsrb $10, %r9d, %xmm0, %xmm0
|
|
|
|
; AVX1-NEXT: movq %rax, %r9
|
|
|
|
; AVX1-NEXT: shlq $36, %rbx
|
|
|
|
; AVX1-NEXT: sarq $63, %rbx
|
|
|
|
; AVX1-NEXT: vpinsrb $11, %ebx, %xmm0, %xmm0
|
|
|
|
; AVX1-NEXT: movq %rax, %rbx
|
|
|
|
; AVX1-NEXT: shlq $35, %r14
|
|
|
|
; AVX1-NEXT: sarq $63, %r14
|
|
|
|
; AVX1-NEXT: vpinsrb $12, %r14d, %xmm0, %xmm0
|
|
|
|
; AVX1-NEXT: movq %rax, %r14
|
|
|
|
; AVX1-NEXT: shlq $34, %r15
|
|
|
|
; AVX1-NEXT: sarq $63, %r15
|
|
|
|
; AVX1-NEXT: vpinsrb $13, %r15d, %xmm0, %xmm0
|
|
|
|
; AVX1-NEXT: movq %rax, %r15
|
|
|
|
; AVX1-NEXT: shlq $33, %r12
|
|
|
|
; AVX1-NEXT: sarq $63, %r12
|
|
|
|
; AVX1-NEXT: vpinsrb $14, %r12d, %xmm0, %xmm0
|
|
|
|
; AVX1-NEXT: movq %rax, %r12
|
|
|
|
; AVX1-NEXT: shrq $31, %rbp
|
|
|
|
; AVX1-NEXT: vpinsrb $15, %ebp, %xmm0, %xmm0
|
|
|
|
; AVX1-NEXT: movq %rax, %rbp
|
2015-09-12 23:36:41 +08:00
|
|
|
; AVX1-NEXT: shlq $63, %rdx
|
|
|
|
; AVX1-NEXT: sarq $63, %rdx
|
|
|
|
; AVX1-NEXT: vmovd %edx, %xmm1
|
[X86] Improve shift combining
This folds (ashr (shl a, [56,48,32,24,16]), SarConst)
into (shl, (sext (a), [56,48,32,24,16] - SarConst))
or into (lshr, (sext (a), SarConst - [56,48,32,24,16]))
depending on sign of (SarConst - [56,48,32,24,16])
sexts in X86 are MOVs.
The MOVs have the same code size as above SHIFTs (only SHIFT by 1 has lower code size).
However the MOVs have 2 advantages to SHIFTs on x86:
1. MOVs can write to a register that differs from source.
2. MOVs accept memory operands.
This fixes PR24373.
Patch by: evgeny.v.stupachenko@intel.com
Differential Revision: http://reviews.llvm.org/D13161
llvm-svn: 255761
2015-12-16 19:22:37 +08:00
|
|
|
; AVX1-NEXT: movq %rax, %rdx
|
|
|
|
; AVX1-NEXT: movswq %ax, %rax
|
|
|
|
; AVX1-NEXT: shlq $62, %r8
|
|
|
|
; AVX1-NEXT: sarq $63, %r8
|
|
|
|
; AVX1-NEXT: vpinsrb $1, %r8d, %xmm1, %xmm1
|
2015-09-12 23:36:41 +08:00
|
|
|
; AVX1-NEXT: shlq $61, %rcx
|
|
|
|
; AVX1-NEXT: sarq $63, %rcx
|
2015-10-30 06:19:21 +08:00
|
|
|
; AVX1-NEXT: vpinsrb $2, %ecx, %xmm1, %xmm1
|
[X86] Improve shift combining
This folds (ashr (shl a, [56,48,32,24,16]), SarConst)
into (shl, (sext (a), [56,48,32,24,16] - SarConst))
or into (lshr, (sext (a), SarConst - [56,48,32,24,16]))
depending on sign of (SarConst - [56,48,32,24,16])
sexts in X86 are MOVs.
The MOVs have the same code size as above SHIFTs (only SHIFT by 1 has lower code size).
However the MOVs have 2 advantages to SHIFTs on x86:
1. MOVs can write to a register that differs from source.
2. MOVs accept memory operands.
This fixes PR24373.
Patch by: evgeny.v.stupachenko@intel.com
Differential Revision: http://reviews.llvm.org/D13161
llvm-svn: 255761
2015-12-16 19:22:37 +08:00
|
|
|
; AVX1-NEXT: shlq $60, %rdi
|
|
|
|
; AVX1-NEXT: sarq $63, %rdi
|
|
|
|
; AVX1-NEXT: vpinsrb $3, %edi, %xmm1, %xmm1
|
|
|
|
; AVX1-NEXT: shlq $59, %r13
|
|
|
|
; AVX1-NEXT: sarq $63, %r13
|
|
|
|
; AVX1-NEXT: vpinsrb $4, %r13d, %xmm1, %xmm1
|
|
|
|
; AVX1-NEXT: shlq $58, %rsi
|
|
|
|
; AVX1-NEXT: sarq $63, %rsi
|
|
|
|
; AVX1-NEXT: vpinsrb $5, %esi, %xmm1, %xmm1
|
|
|
|
; AVX1-NEXT: shlq $57, %r10
|
|
|
|
; AVX1-NEXT: sarq $63, %r10
|
|
|
|
; AVX1-NEXT: vpinsrb $6, %r10d, %xmm1, %xmm1
|
|
|
|
; AVX1-NEXT: shrq $7, %r11
|
|
|
|
; AVX1-NEXT: vpinsrb $7, %r11d, %xmm1, %xmm1
|
|
|
|
; AVX1-NEXT: shlq $55, %r9
|
|
|
|
; AVX1-NEXT: sarq $63, %r9
|
|
|
|
; AVX1-NEXT: vpinsrb $8, %r9d, %xmm1, %xmm1
|
|
|
|
; AVX1-NEXT: shlq $54, %rbx
|
|
|
|
; AVX1-NEXT: sarq $63, %rbx
|
|
|
|
; AVX1-NEXT: vpinsrb $9, %ebx, %xmm1, %xmm1
|
|
|
|
; AVX1-NEXT: shlq $53, %r14
|
|
|
|
; AVX1-NEXT: sarq $63, %r14
|
|
|
|
; AVX1-NEXT: vpinsrb $10, %r14d, %xmm1, %xmm1
|
|
|
|
; AVX1-NEXT: shlq $52, %r15
|
|
|
|
; AVX1-NEXT: sarq $63, %r15
|
|
|
|
; AVX1-NEXT: vpinsrb $11, %r15d, %xmm1, %xmm1
|
|
|
|
; AVX1-NEXT: shlq $51, %r12
|
|
|
|
; AVX1-NEXT: sarq $63, %r12
|
|
|
|
; AVX1-NEXT: vpinsrb $12, %r12d, %xmm1, %xmm1
|
|
|
|
; AVX1-NEXT: shlq $50, %rbp
|
|
|
|
; AVX1-NEXT: sarq $63, %rbp
|
|
|
|
; AVX1-NEXT: vpinsrb $13, %ebp, %xmm1, %xmm1
|
|
|
|
; AVX1-NEXT: shlq $49, %rdx
|
|
|
|
; AVX1-NEXT: sarq $63, %rdx
|
|
|
|
; AVX1-NEXT: vpinsrb $14, %edx, %xmm1, %xmm1
|
|
|
|
; AVX1-NEXT: shrq $15, %rax
|
2015-10-30 06:19:21 +08:00
|
|
|
; AVX1-NEXT: vpinsrb $15, %eax, %xmm1, %xmm1
|
2015-09-12 23:36:41 +08:00
|
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
|
[X86] Improve shift combining
This folds (ashr (shl a, [56,48,32,24,16]), SarConst)
into (shl, (sext (a), [56,48,32,24,16] - SarConst))
or into (lshr, (sext (a), SarConst - [56,48,32,24,16]))
depending on sign of (SarConst - [56,48,32,24,16])
sexts in X86 are MOVs.
The MOVs have the same code size as above SHIFTs (only SHIFT by 1 has lower code size).
However the MOVs have 2 advantages to SHIFTs on x86:
1. MOVs can write to a register that differs from source.
2. MOVs accept memory operands.
This fixes PR24373.
Patch by: evgeny.v.stupachenko@intel.com
Differential Revision: http://reviews.llvm.org/D13161
llvm-svn: 255761
2015-12-16 19:22:37 +08:00
|
|
|
; AVX1-NEXT: popq %rbx
|
|
|
|
; AVX1-NEXT: popq %r12
|
|
|
|
; AVX1-NEXT: popq %r13
|
|
|
|
; AVX1-NEXT: popq %r14
|
|
|
|
; AVX1-NEXT: popq %r15
|
|
|
|
; AVX1-NEXT: popq %rbp
|
2015-09-12 23:36:41 +08:00
|
|
|
; AVX1-NEXT: retq
|
|
|
|
;
|
2015-10-30 06:19:21 +08:00
|
|
|
; AVX2-LABEL: load_sext_32i1_to_32i8:
|
2015-09-12 23:36:41 +08:00
|
|
|
; AVX2: # BB#0: # %entry
|
[X86] Improve shift combining
This folds (ashr (shl a, [56,48,32,24,16]), SarConst)
into (shl, (sext (a), [56,48,32,24,16] - SarConst))
or into (lshr, (sext (a), SarConst - [56,48,32,24,16]))
depending on sign of (SarConst - [56,48,32,24,16])
sexts in X86 are MOVs.
The MOVs have the same code size as above SHIFTs (only SHIFT by 1 has lower code size).
However the MOVs have 2 advantages to SHIFTs on x86:
1. MOVs can write to a register that differs from source.
2. MOVs accept memory operands.
This fixes PR24373.
Patch by: evgeny.v.stupachenko@intel.com
Differential Revision: http://reviews.llvm.org/D13161
llvm-svn: 255761
2015-12-16 19:22:37 +08:00
|
|
|
; AVX2-NEXT: pushq %rbp
|
|
|
|
; AVX2-NEXT: pushq %r15
|
|
|
|
; AVX2-NEXT: pushq %r14
|
|
|
|
; AVX2-NEXT: pushq %r13
|
|
|
|
; AVX2-NEXT: pushq %r12
|
|
|
|
; AVX2-NEXT: pushq %rbx
|
|
|
|
; AVX2-NEXT: movslq (%rdi), %rax
|
2015-09-12 23:36:41 +08:00
|
|
|
; AVX2-NEXT: movq %rax, %rcx
|
[X86] Improve shift combining
This folds (ashr (shl a, [56,48,32,24,16]), SarConst)
into (shl, (sext (a), [56,48,32,24,16] - SarConst))
or into (lshr, (sext (a), SarConst - [56,48,32,24,16]))
depending on sign of (SarConst - [56,48,32,24,16])
sexts in X86 are MOVs.
The MOVs have the same code size as above SHIFTs (only SHIFT by 1 has lower code size).
However the MOVs have 2 advantages to SHIFTs on x86:
1. MOVs can write to a register that differs from source.
2. MOVs accept memory operands.
This fixes PR24373.
Patch by: evgeny.v.stupachenko@intel.com
Differential Revision: http://reviews.llvm.org/D13161
llvm-svn: 255761
2015-12-16 19:22:37 +08:00
|
|
|
; AVX2-NEXT: shlq $47, %rcx
|
2015-09-12 23:36:41 +08:00
|
|
|
; AVX2-NEXT: sarq $63, %rcx
|
[X86] Improve shift combining
This folds (ashr (shl a, [56,48,32,24,16]), SarConst)
into (shl, (sext (a), [56,48,32,24,16] - SarConst))
or into (lshr, (sext (a), SarConst - [56,48,32,24,16]))
depending on sign of (SarConst - [56,48,32,24,16])
sexts in X86 are MOVs.
The MOVs have the same code size as above SHIFTs (only SHIFT by 1 has lower code size).
However the MOVs have 2 advantages to SHIFTs on x86:
1. MOVs can write to a register that differs from source.
2. MOVs accept memory operands.
This fixes PR24373.
Patch by: evgeny.v.stupachenko@intel.com
Differential Revision: http://reviews.llvm.org/D13161
llvm-svn: 255761
2015-12-16 19:22:37 +08:00
|
|
|
; AVX2-NEXT: vmovd %ecx, %xmm0
|
|
|
|
; AVX2-NEXT: movq %rax, %r8
|
2015-09-12 23:36:41 +08:00
|
|
|
; AVX2-NEXT: movq %rax, %rdx
|
|
|
|
; AVX2-NEXT: movq %rax, %rcx
|
[X86] Improve shift combining
This folds (ashr (shl a, [56,48,32,24,16]), SarConst)
into (shl, (sext (a), [56,48,32,24,16] - SarConst))
or into (lshr, (sext (a), SarConst - [56,48,32,24,16]))
depending on sign of (SarConst - [56,48,32,24,16])
sexts in X86 are MOVs.
The MOVs have the same code size as above SHIFTs (only SHIFT by 1 has lower code size).
However the MOVs have 2 advantages to SHIFTs on x86:
1. MOVs can write to a register that differs from source.
2. MOVs accept memory operands.
This fixes PR24373.
Patch by: evgeny.v.stupachenko@intel.com
Differential Revision: http://reviews.llvm.org/D13161
llvm-svn: 255761
2015-12-16 19:22:37 +08:00
|
|
|
; AVX2-NEXT: movq %rax, %rdi
|
|
|
|
; AVX2-NEXT: movq %rax, %r13
|
|
|
|
; AVX2-NEXT: movq %rax, %rsi
|
|
|
|
; AVX2-NEXT: movq %rax, %r10
|
|
|
|
; AVX2-NEXT: movq %rax, %r11
|
|
|
|
; AVX2-NEXT: movq %rax, %r9
|
|
|
|
; AVX2-NEXT: movq %rax, %rbx
|
|
|
|
; AVX2-NEXT: movq %rax, %r14
|
|
|
|
; AVX2-NEXT: movq %rax, %r15
|
|
|
|
; AVX2-NEXT: movq %rax, %r12
|
|
|
|
; AVX2-NEXT: movq %rax, %rbp
|
|
|
|
; AVX2-NEXT: shlq $46, %rbp
|
|
|
|
; AVX2-NEXT: sarq $63, %rbp
|
|
|
|
; AVX2-NEXT: vpinsrb $1, %ebp, %xmm0, %xmm0
|
|
|
|
; AVX2-NEXT: movq %rax, %rbp
|
|
|
|
; AVX2-NEXT: shlq $45, %r8
|
|
|
|
; AVX2-NEXT: sarq $63, %r8
|
|
|
|
; AVX2-NEXT: vpinsrb $2, %r8d, %xmm0, %xmm0
|
|
|
|
; AVX2-NEXT: movq %rax, %r8
|
|
|
|
; AVX2-NEXT: shlq $44, %rdx
|
|
|
|
; AVX2-NEXT: sarq $63, %rdx
|
|
|
|
; AVX2-NEXT: vpinsrb $3, %edx, %xmm0, %xmm0
|
|
|
|
; AVX2-NEXT: movq %rax, %rdx
|
2015-10-30 06:19:21 +08:00
|
|
|
; AVX2-NEXT: shlq $43, %rcx
|
2015-09-12 23:36:41 +08:00
|
|
|
; AVX2-NEXT: sarq $63, %rcx
|
2015-10-30 06:19:21 +08:00
|
|
|
; AVX2-NEXT: vpinsrb $4, %ecx, %xmm0, %xmm0
|
2015-09-12 23:36:41 +08:00
|
|
|
; AVX2-NEXT: movq %rax, %rcx
|
[X86] Improve shift combining
This folds (ashr (shl a, [56,48,32,24,16]), SarConst)
into (shl, (sext (a), [56,48,32,24,16] - SarConst))
or into (lshr, (sext (a), SarConst - [56,48,32,24,16]))
depending on sign of (SarConst - [56,48,32,24,16])
sexts in X86 are MOVs.
The MOVs have the same code size as above SHIFTs (only SHIFT by 1 has lower code size).
However the MOVs have 2 advantages to SHIFTs on x86:
1. MOVs can write to a register that differs from source.
2. MOVs accept memory operands.
This fixes PR24373.
Patch by: evgeny.v.stupachenko@intel.com
Differential Revision: http://reviews.llvm.org/D13161
llvm-svn: 255761
2015-12-16 19:22:37 +08:00
|
|
|
; AVX2-NEXT: shlq $42, %rdi
|
|
|
|
; AVX2-NEXT: sarq $63, %rdi
|
|
|
|
; AVX2-NEXT: vpinsrb $5, %edi, %xmm0, %xmm0
|
|
|
|
; AVX2-NEXT: movq %rax, %rdi
|
|
|
|
; AVX2-NEXT: shlq $41, %r13
|
|
|
|
; AVX2-NEXT: sarq $63, %r13
|
|
|
|
; AVX2-NEXT: vpinsrb $6, %r13d, %xmm0, %xmm0
|
|
|
|
; AVX2-NEXT: movq %rax, %r13
|
|
|
|
; AVX2-NEXT: shlq $40, %rsi
|
|
|
|
; AVX2-NEXT: sarq $63, %rsi
|
|
|
|
; AVX2-NEXT: vpinsrb $7, %esi, %xmm0, %xmm0
|
|
|
|
; AVX2-NEXT: movq %rax, %rsi
|
|
|
|
; AVX2-NEXT: shlq $39, %r10
|
|
|
|
; AVX2-NEXT: sarq $63, %r10
|
|
|
|
; AVX2-NEXT: vpinsrb $8, %r10d, %xmm0, %xmm0
|
|
|
|
; AVX2-NEXT: movq %rax, %r10
|
|
|
|
; AVX2-NEXT: shlq $38, %r11
|
|
|
|
; AVX2-NEXT: sarq $63, %r11
|
|
|
|
; AVX2-NEXT: vpinsrb $9, %r11d, %xmm0, %xmm0
|
|
|
|
; AVX2-NEXT: movsbq %al, %r11
|
|
|
|
; AVX2-NEXT: shlq $37, %r9
|
|
|
|
; AVX2-NEXT: sarq $63, %r9
|
|
|
|
; AVX2-NEXT: vpinsrb $10, %r9d, %xmm0, %xmm0
|
|
|
|
; AVX2-NEXT: movq %rax, %r9
|
|
|
|
; AVX2-NEXT: shlq $36, %rbx
|
|
|
|
; AVX2-NEXT: sarq $63, %rbx
|
|
|
|
; AVX2-NEXT: vpinsrb $11, %ebx, %xmm0, %xmm0
|
|
|
|
; AVX2-NEXT: movq %rax, %rbx
|
|
|
|
; AVX2-NEXT: shlq $35, %r14
|
|
|
|
; AVX2-NEXT: sarq $63, %r14
|
|
|
|
; AVX2-NEXT: vpinsrb $12, %r14d, %xmm0, %xmm0
|
|
|
|
; AVX2-NEXT: movq %rax, %r14
|
|
|
|
; AVX2-NEXT: shlq $34, %r15
|
|
|
|
; AVX2-NEXT: sarq $63, %r15
|
|
|
|
; AVX2-NEXT: vpinsrb $13, %r15d, %xmm0, %xmm0
|
|
|
|
; AVX2-NEXT: movq %rax, %r15
|
|
|
|
; AVX2-NEXT: shlq $33, %r12
|
|
|
|
; AVX2-NEXT: sarq $63, %r12
|
|
|
|
; AVX2-NEXT: vpinsrb $14, %r12d, %xmm0, %xmm0
|
|
|
|
; AVX2-NEXT: movq %rax, %r12
|
|
|
|
; AVX2-NEXT: shrq $31, %rbp
|
|
|
|
; AVX2-NEXT: vpinsrb $15, %ebp, %xmm0, %xmm0
|
|
|
|
; AVX2-NEXT: movq %rax, %rbp
|
2015-09-12 23:36:41 +08:00
|
|
|
; AVX2-NEXT: shlq $63, %rdx
|
|
|
|
; AVX2-NEXT: sarq $63, %rdx
|
|
|
|
; AVX2-NEXT: vmovd %edx, %xmm1
|
[X86] Improve shift combining
This folds (ashr (shl a, [56,48,32,24,16]), SarConst)
into (shl, (sext (a), [56,48,32,24,16] - SarConst))
or into (lshr, (sext (a), SarConst - [56,48,32,24,16]))
depending on sign of (SarConst - [56,48,32,24,16])
sexts in X86 are MOVs.
The MOVs have the same code size as above SHIFTs (only SHIFT by 1 has lower code size).
However the MOVs have 2 advantages to SHIFTs on x86:
1. MOVs can write to a register that differs from source.
2. MOVs accept memory operands.
This fixes PR24373.
Patch by: evgeny.v.stupachenko@intel.com
Differential Revision: http://reviews.llvm.org/D13161
llvm-svn: 255761
2015-12-16 19:22:37 +08:00
|
|
|
; AVX2-NEXT: movq %rax, %rdx
|
|
|
|
; AVX2-NEXT: movswq %ax, %rax
|
|
|
|
; AVX2-NEXT: shlq $62, %r8
|
|
|
|
; AVX2-NEXT: sarq $63, %r8
|
|
|
|
; AVX2-NEXT: vpinsrb $1, %r8d, %xmm1, %xmm1
|
2015-09-12 23:36:41 +08:00
|
|
|
; AVX2-NEXT: shlq $61, %rcx
|
|
|
|
; AVX2-NEXT: sarq $63, %rcx
|
2015-10-30 06:19:21 +08:00
|
|
|
; AVX2-NEXT: vpinsrb $2, %ecx, %xmm1, %xmm1
|
[X86] Improve shift combining
This folds (ashr (shl a, [56,48,32,24,16]), SarConst)
into (shl, (sext (a), [56,48,32,24,16] - SarConst))
or into (lshr, (sext (a), SarConst - [56,48,32,24,16]))
depending on sign of (SarConst - [56,48,32,24,16])
sexts in X86 are MOVs.
The MOVs have the same code size as above SHIFTs (only SHIFT by 1 has lower code size).
However the MOVs have 2 advantages to SHIFTs on x86:
1. MOVs can write to a register that differs from source.
2. MOVs accept memory operands.
This fixes PR24373.
Patch by: evgeny.v.stupachenko@intel.com
Differential Revision: http://reviews.llvm.org/D13161
llvm-svn: 255761
2015-12-16 19:22:37 +08:00
|
|
|
; AVX2-NEXT: shlq $60, %rdi
|
|
|
|
; AVX2-NEXT: sarq $63, %rdi
|
|
|
|
; AVX2-NEXT: vpinsrb $3, %edi, %xmm1, %xmm1
|
|
|
|
; AVX2-NEXT: shlq $59, %r13
|
|
|
|
; AVX2-NEXT: sarq $63, %r13
|
|
|
|
; AVX2-NEXT: vpinsrb $4, %r13d, %xmm1, %xmm1
|
|
|
|
; AVX2-NEXT: shlq $58, %rsi
|
|
|
|
; AVX2-NEXT: sarq $63, %rsi
|
|
|
|
; AVX2-NEXT: vpinsrb $5, %esi, %xmm1, %xmm1
|
|
|
|
; AVX2-NEXT: shlq $57, %r10
|
|
|
|
; AVX2-NEXT: sarq $63, %r10
|
|
|
|
; AVX2-NEXT: vpinsrb $6, %r10d, %xmm1, %xmm1
|
|
|
|
; AVX2-NEXT: shrq $7, %r11
|
|
|
|
; AVX2-NEXT: vpinsrb $7, %r11d, %xmm1, %xmm1
|
|
|
|
; AVX2-NEXT: shlq $55, %r9
|
|
|
|
; AVX2-NEXT: sarq $63, %r9
|
|
|
|
; AVX2-NEXT: vpinsrb $8, %r9d, %xmm1, %xmm1
|
|
|
|
; AVX2-NEXT: shlq $54, %rbx
|
|
|
|
; AVX2-NEXT: sarq $63, %rbx
|
|
|
|
; AVX2-NEXT: vpinsrb $9, %ebx, %xmm1, %xmm1
|
|
|
|
; AVX2-NEXT: shlq $53, %r14
|
|
|
|
; AVX2-NEXT: sarq $63, %r14
|
|
|
|
; AVX2-NEXT: vpinsrb $10, %r14d, %xmm1, %xmm1
|
|
|
|
; AVX2-NEXT: shlq $52, %r15
|
|
|
|
; AVX2-NEXT: sarq $63, %r15
|
|
|
|
; AVX2-NEXT: vpinsrb $11, %r15d, %xmm1, %xmm1
|
|
|
|
; AVX2-NEXT: shlq $51, %r12
|
|
|
|
; AVX2-NEXT: sarq $63, %r12
|
|
|
|
; AVX2-NEXT: vpinsrb $12, %r12d, %xmm1, %xmm1
|
|
|
|
; AVX2-NEXT: shlq $50, %rbp
|
|
|
|
; AVX2-NEXT: sarq $63, %rbp
|
|
|
|
; AVX2-NEXT: vpinsrb $13, %ebp, %xmm1, %xmm1
|
|
|
|
; AVX2-NEXT: shlq $49, %rdx
|
|
|
|
; AVX2-NEXT: sarq $63, %rdx
|
|
|
|
; AVX2-NEXT: vpinsrb $14, %edx, %xmm1, %xmm1
|
|
|
|
; AVX2-NEXT: shrq $15, %rax
|
2015-10-30 06:19:21 +08:00
|
|
|
; AVX2-NEXT: vpinsrb $15, %eax, %xmm1, %xmm1
|
2015-09-12 23:36:41 +08:00
|
|
|
; AVX2-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0
|
[X86] Improve shift combining
This folds (ashr (shl a, [56,48,32,24,16]), SarConst)
into (shl, (sext (a), [56,48,32,24,16] - SarConst))
or into (lshr, (sext (a), SarConst - [56,48,32,24,16]))
depending on sign of (SarConst - [56,48,32,24,16])
sexts in X86 are MOVs.
The MOVs have the same code size as above SHIFTs (only SHIFT by 1 has lower code size).
However the MOVs have 2 advantages to SHIFTs on x86:
1. MOVs can write to a register that differs from source.
2. MOVs accept memory operands.
This fixes PR24373.
Patch by: evgeny.v.stupachenko@intel.com
Differential Revision: http://reviews.llvm.org/D13161
llvm-svn: 255761
2015-12-16 19:22:37 +08:00
|
|
|
; AVX2-NEXT: popq %rbx
|
|
|
|
; AVX2-NEXT: popq %r12
|
|
|
|
; AVX2-NEXT: popq %r13
|
|
|
|
; AVX2-NEXT: popq %r14
|
|
|
|
; AVX2-NEXT: popq %r15
|
|
|
|
; AVX2-NEXT: popq %rbp
|
2015-09-12 23:36:41 +08:00
|
|
|
; AVX2-NEXT: retq
|
|
|
|
;
|
2015-10-30 06:19:21 +08:00
|
|
|
; X32-SSE41-LABEL: load_sext_32i1_to_32i8:
|
2015-09-12 23:36:41 +08:00
|
|
|
; X32-SSE41: # BB#0: # %entry
|
2015-10-30 06:19:21 +08:00
|
|
|
; X32-SSE41-NEXT: pushl %esi
|
2015-09-12 23:36:41 +08:00
|
|
|
; X32-SSE41-NEXT: movl {{[0-9]+}}(%esp), %eax
|
[X86] Improve shift combining
This folds (ashr (shl a, [56,48,32,24,16]), SarConst)
into (shl, (sext (a), [56,48,32,24,16] - SarConst))
or into (lshr, (sext (a), SarConst - [56,48,32,24,16]))
depending on sign of (SarConst - [56,48,32,24,16])
sexts in X86 are MOVs.
The MOVs have the same code size as above SHIFTs (only SHIFT by 1 has lower code size).
However the MOVs have 2 advantages to SHIFTs on x86:
1. MOVs can write to a register that differs from source.
2. MOVs accept memory operands.
This fixes PR24373.
Patch by: evgeny.v.stupachenko@intel.com
Differential Revision: http://reviews.llvm.org/D13161
llvm-svn: 255761
2015-12-16 19:22:37 +08:00
|
|
|
; X32-SSE41-NEXT: movswl (%eax), %ecx
|
2015-10-30 06:19:21 +08:00
|
|
|
; X32-SSE41-NEXT: movl %ecx, %edx
|
|
|
|
; X32-SSE41-NEXT: shll $30, %edx
|
|
|
|
; X32-SSE41-NEXT: sarl $31, %edx
|
|
|
|
; X32-SSE41-NEXT: movl %ecx, %esi
|
|
|
|
; X32-SSE41-NEXT: shll $31, %esi
|
|
|
|
; X32-SSE41-NEXT: sarl $31, %esi
|
|
|
|
; X32-SSE41-NEXT: movd %esi, %xmm0
|
|
|
|
; X32-SSE41-NEXT: pinsrb $1, %edx, %xmm0
|
|
|
|
; X32-SSE41-NEXT: movl %ecx, %edx
|
|
|
|
; X32-SSE41-NEXT: shll $29, %edx
|
|
|
|
; X32-SSE41-NEXT: sarl $31, %edx
|
|
|
|
; X32-SSE41-NEXT: pinsrb $2, %edx, %xmm0
|
|
|
|
; X32-SSE41-NEXT: movl %ecx, %edx
|
|
|
|
; X32-SSE41-NEXT: shll $28, %edx
|
|
|
|
; X32-SSE41-NEXT: sarl $31, %edx
|
|
|
|
; X32-SSE41-NEXT: pinsrb $3, %edx, %xmm0
|
|
|
|
; X32-SSE41-NEXT: movl %ecx, %edx
|
|
|
|
; X32-SSE41-NEXT: shll $27, %edx
|
|
|
|
; X32-SSE41-NEXT: sarl $31, %edx
|
|
|
|
; X32-SSE41-NEXT: pinsrb $4, %edx, %xmm0
|
|
|
|
; X32-SSE41-NEXT: movl %ecx, %edx
|
|
|
|
; X32-SSE41-NEXT: shll $26, %edx
|
|
|
|
; X32-SSE41-NEXT: sarl $31, %edx
|
|
|
|
; X32-SSE41-NEXT: pinsrb $5, %edx, %xmm0
|
|
|
|
; X32-SSE41-NEXT: movl %ecx, %edx
|
|
|
|
; X32-SSE41-NEXT: shll $25, %edx
|
|
|
|
; X32-SSE41-NEXT: sarl $31, %edx
|
|
|
|
; X32-SSE41-NEXT: pinsrb $6, %edx, %xmm0
|
[X86] Improve shift combining
This folds (ashr (shl a, [56,48,32,24,16]), SarConst)
into (shl, (sext (a), [56,48,32,24,16] - SarConst))
or into (lshr, (sext (a), SarConst - [56,48,32,24,16]))
depending on sign of (SarConst - [56,48,32,24,16])
sexts in X86 are MOVs.
The MOVs have the same code size as above SHIFTs (only SHIFT by 1 has lower code size).
However the MOVs have 2 advantages to SHIFTs on x86:
1. MOVs can write to a register that differs from source.
2. MOVs accept memory operands.
This fixes PR24373.
Patch by: evgeny.v.stupachenko@intel.com
Differential Revision: http://reviews.llvm.org/D13161
llvm-svn: 255761
2015-12-16 19:22:37 +08:00
|
|
|
; X32-SSE41-NEXT: movsbl %cl, %edx
|
|
|
|
; X32-SSE41-NEXT: shrl $7, %edx
|
2015-10-30 06:19:21 +08:00
|
|
|
; X32-SSE41-NEXT: pinsrb $7, %edx, %xmm0
|
|
|
|
; X32-SSE41-NEXT: movl %ecx, %edx
|
|
|
|
; X32-SSE41-NEXT: shll $23, %edx
|
|
|
|
; X32-SSE41-NEXT: sarl $31, %edx
|
|
|
|
; X32-SSE41-NEXT: pinsrb $8, %edx, %xmm0
|
|
|
|
; X32-SSE41-NEXT: movl %ecx, %edx
|
|
|
|
; X32-SSE41-NEXT: shll $22, %edx
|
|
|
|
; X32-SSE41-NEXT: sarl $31, %edx
|
|
|
|
; X32-SSE41-NEXT: pinsrb $9, %edx, %xmm0
|
|
|
|
; X32-SSE41-NEXT: movl %ecx, %edx
|
|
|
|
; X32-SSE41-NEXT: shll $21, %edx
|
|
|
|
; X32-SSE41-NEXT: sarl $31, %edx
|
|
|
|
; X32-SSE41-NEXT: pinsrb $10, %edx, %xmm0
|
|
|
|
; X32-SSE41-NEXT: movl %ecx, %edx
|
|
|
|
; X32-SSE41-NEXT: shll $20, %edx
|
|
|
|
; X32-SSE41-NEXT: sarl $31, %edx
|
|
|
|
; X32-SSE41-NEXT: pinsrb $11, %edx, %xmm0
|
|
|
|
; X32-SSE41-NEXT: movl %ecx, %edx
|
|
|
|
; X32-SSE41-NEXT: shll $19, %edx
|
|
|
|
; X32-SSE41-NEXT: sarl $31, %edx
|
|
|
|
; X32-SSE41-NEXT: pinsrb $12, %edx, %xmm0
|
|
|
|
; X32-SSE41-NEXT: movl %ecx, %edx
|
|
|
|
; X32-SSE41-NEXT: shll $18, %edx
|
|
|
|
; X32-SSE41-NEXT: sarl $31, %edx
|
|
|
|
; X32-SSE41-NEXT: pinsrb $13, %edx, %xmm0
|
|
|
|
; X32-SSE41-NEXT: movl %ecx, %edx
|
|
|
|
; X32-SSE41-NEXT: shll $17, %edx
|
|
|
|
; X32-SSE41-NEXT: sarl $31, %edx
|
|
|
|
; X32-SSE41-NEXT: pinsrb $14, %edx, %xmm0
|
[X86] Improve shift combining
This folds (ashr (shl a, [56,48,32,24,16]), SarConst)
into (shl, (sext (a), [56,48,32,24,16] - SarConst))
or into (lshr, (sext (a), SarConst - [56,48,32,24,16]))
depending on sign of (SarConst - [56,48,32,24,16])
sexts in X86 are MOVs.
The MOVs have the same code size as above SHIFTs (only SHIFT by 1 has lower code size).
However the MOVs have 2 advantages to SHIFTs on x86:
1. MOVs can write to a register that differs from source.
2. MOVs accept memory operands.
This fixes PR24373.
Patch by: evgeny.v.stupachenko@intel.com
Differential Revision: http://reviews.llvm.org/D13161
llvm-svn: 255761
2015-12-16 19:22:37 +08:00
|
|
|
; X32-SSE41-NEXT: shrl $15, %ecx
|
2015-10-30 06:19:21 +08:00
|
|
|
; X32-SSE41-NEXT: pinsrb $15, %ecx, %xmm0
|
[X86] Improve shift combining
This folds (ashr (shl a, [56,48,32,24,16]), SarConst)
into (shl, (sext (a), [56,48,32,24,16] - SarConst))
or into (lshr, (sext (a), SarConst - [56,48,32,24,16]))
depending on sign of (SarConst - [56,48,32,24,16])
sexts in X86 are MOVs.
The MOVs have the same code size as above SHIFTs (only SHIFT by 1 has lower code size).
However the MOVs have 2 advantages to SHIFTs on x86:
1. MOVs can write to a register that differs from source.
2. MOVs accept memory operands.
This fixes PR24373.
Patch by: evgeny.v.stupachenko@intel.com
Differential Revision: http://reviews.llvm.org/D13161
llvm-svn: 255761
2015-12-16 19:22:37 +08:00
|
|
|
; X32-SSE41-NEXT: movswl 2(%eax), %eax
|
2015-09-12 23:36:41 +08:00
|
|
|
; X32-SSE41-NEXT: movl %eax, %ecx
|
2015-10-30 06:19:21 +08:00
|
|
|
; X32-SSE41-NEXT: shll $30, %ecx
|
|
|
|
; X32-SSE41-NEXT: sarl $31, %ecx
|
2015-09-12 23:36:41 +08:00
|
|
|
; X32-SSE41-NEXT: movl %eax, %edx
|
2015-10-30 06:19:21 +08:00
|
|
|
; X32-SSE41-NEXT: shll $31, %edx
|
|
|
|
; X32-SSE41-NEXT: sarl $31, %edx
|
2015-09-12 23:36:41 +08:00
|
|
|
; X32-SSE41-NEXT: movd %edx, %xmm1
|
|
|
|
; X32-SSE41-NEXT: pinsrb $1, %ecx, %xmm1
|
|
|
|
; X32-SSE41-NEXT: movl %eax, %ecx
|
2015-10-30 06:19:21 +08:00
|
|
|
; X32-SSE41-NEXT: shll $29, %ecx
|
|
|
|
; X32-SSE41-NEXT: sarl $31, %ecx
|
2015-09-12 23:36:41 +08:00
|
|
|
; X32-SSE41-NEXT: pinsrb $2, %ecx, %xmm1
|
|
|
|
; X32-SSE41-NEXT: movl %eax, %ecx
|
2015-10-30 06:19:21 +08:00
|
|
|
; X32-SSE41-NEXT: shll $28, %ecx
|
|
|
|
; X32-SSE41-NEXT: sarl $31, %ecx
|
2015-09-12 23:36:41 +08:00
|
|
|
; X32-SSE41-NEXT: pinsrb $3, %ecx, %xmm1
|
|
|
|
; X32-SSE41-NEXT: movl %eax, %ecx
|
2015-10-30 06:19:21 +08:00
|
|
|
; X32-SSE41-NEXT: shll $27, %ecx
|
|
|
|
; X32-SSE41-NEXT: sarl $31, %ecx
|
2015-09-12 23:36:41 +08:00
|
|
|
; X32-SSE41-NEXT: pinsrb $4, %ecx, %xmm1
|
|
|
|
; X32-SSE41-NEXT: movl %eax, %ecx
|
2015-10-30 06:19:21 +08:00
|
|
|
; X32-SSE41-NEXT: shll $26, %ecx
|
|
|
|
; X32-SSE41-NEXT: sarl $31, %ecx
|
2015-09-12 23:36:41 +08:00
|
|
|
; X32-SSE41-NEXT: pinsrb $5, %ecx, %xmm1
|
|
|
|
; X32-SSE41-NEXT: movl %eax, %ecx
|
2015-10-30 06:19:21 +08:00
|
|
|
; X32-SSE41-NEXT: shll $25, %ecx
|
|
|
|
; X32-SSE41-NEXT: sarl $31, %ecx
|
2015-09-12 23:36:41 +08:00
|
|
|
; X32-SSE41-NEXT: pinsrb $6, %ecx, %xmm1
|
[X86] Improve shift combining
This folds (ashr (shl a, [56,48,32,24,16]), SarConst)
into (shl, (sext (a), [56,48,32,24,16] - SarConst))
or into (lshr, (sext (a), SarConst - [56,48,32,24,16]))
depending on sign of (SarConst - [56,48,32,24,16])
sexts in X86 are MOVs.
The MOVs have the same code size as above SHIFTs (only SHIFT by 1 has lower code size).
However the MOVs have 2 advantages to SHIFTs on x86:
1. MOVs can write to a register that differs from source.
2. MOVs accept memory operands.
This fixes PR24373.
Patch by: evgeny.v.stupachenko@intel.com
Differential Revision: http://reviews.llvm.org/D13161
llvm-svn: 255761
2015-12-16 19:22:37 +08:00
|
|
|
; X32-SSE41-NEXT: movsbl %al, %ecx
|
|
|
|
; X32-SSE41-NEXT: shrl $7, %ecx
|
2015-09-12 23:36:41 +08:00
|
|
|
; X32-SSE41-NEXT: pinsrb $7, %ecx, %xmm1
|
|
|
|
; X32-SSE41-NEXT: movl %eax, %ecx
|
2015-10-30 06:19:21 +08:00
|
|
|
; X32-SSE41-NEXT: shll $23, %ecx
|
|
|
|
; X32-SSE41-NEXT: sarl $31, %ecx
|
2015-09-12 23:36:41 +08:00
|
|
|
; X32-SSE41-NEXT: pinsrb $8, %ecx, %xmm1
|
|
|
|
; X32-SSE41-NEXT: movl %eax, %ecx
|
2015-10-30 06:19:21 +08:00
|
|
|
; X32-SSE41-NEXT: shll $22, %ecx
|
|
|
|
; X32-SSE41-NEXT: sarl $31, %ecx
|
2015-09-12 23:36:41 +08:00
|
|
|
; X32-SSE41-NEXT: pinsrb $9, %ecx, %xmm1
|
|
|
|
; X32-SSE41-NEXT: movl %eax, %ecx
|
2015-10-30 06:19:21 +08:00
|
|
|
; X32-SSE41-NEXT: shll $21, %ecx
|
|
|
|
; X32-SSE41-NEXT: sarl $31, %ecx
|
2015-09-12 23:36:41 +08:00
|
|
|
; X32-SSE41-NEXT: pinsrb $10, %ecx, %xmm1
|
|
|
|
; X32-SSE41-NEXT: movl %eax, %ecx
|
2015-10-30 06:19:21 +08:00
|
|
|
; X32-SSE41-NEXT: shll $20, %ecx
|
|
|
|
; X32-SSE41-NEXT: sarl $31, %ecx
|
2015-09-12 23:36:41 +08:00
|
|
|
; X32-SSE41-NEXT: pinsrb $11, %ecx, %xmm1
|
|
|
|
; X32-SSE41-NEXT: movl %eax, %ecx
|
2015-10-30 06:19:21 +08:00
|
|
|
; X32-SSE41-NEXT: shll $19, %ecx
|
|
|
|
; X32-SSE41-NEXT: sarl $31, %ecx
|
2015-09-12 23:36:41 +08:00
|
|
|
; X32-SSE41-NEXT: pinsrb $12, %ecx, %xmm1
|
|
|
|
; X32-SSE41-NEXT: movl %eax, %ecx
|
2015-10-30 06:19:21 +08:00
|
|
|
; X32-SSE41-NEXT: shll $18, %ecx
|
|
|
|
; X32-SSE41-NEXT: sarl $31, %ecx
|
2015-09-12 23:36:41 +08:00
|
|
|
; X32-SSE41-NEXT: pinsrb $13, %ecx, %xmm1
|
|
|
|
; X32-SSE41-NEXT: movl %eax, %ecx
|
2015-10-30 06:19:21 +08:00
|
|
|
; X32-SSE41-NEXT: shll $17, %ecx
|
|
|
|
; X32-SSE41-NEXT: sarl $31, %ecx
|
2015-09-12 23:36:41 +08:00
|
|
|
; X32-SSE41-NEXT: pinsrb $14, %ecx, %xmm1
|
[X86] Improve shift combining
This folds (ashr (shl a, [56,48,32,24,16]), SarConst)
into (shl, (sext (a), [56,48,32,24,16] - SarConst))
or into (lshr, (sext (a), SarConst - [56,48,32,24,16]))
depending on sign of (SarConst - [56,48,32,24,16])
sexts in X86 are MOVs.
The MOVs have the same code size as above SHIFTs (only SHIFT by 1 has lower code size).
However the MOVs have 2 advantages to SHIFTs on x86:
1. MOVs can write to a register that differs from source.
2. MOVs accept memory operands.
This fixes PR24373.
Patch by: evgeny.v.stupachenko@intel.com
Differential Revision: http://reviews.llvm.org/D13161
llvm-svn: 255761
2015-12-16 19:22:37 +08:00
|
|
|
; X32-SSE41-NEXT: shrl $15, %eax
|
2015-09-12 23:36:41 +08:00
|
|
|
; X32-SSE41-NEXT: pinsrb $15, %eax, %xmm1
|
2015-10-30 06:19:21 +08:00
|
|
|
; X32-SSE41-NEXT: popl %esi
|
2015-09-12 23:36:41 +08:00
|
|
|
; X32-SSE41-NEXT: retl
|
|
|
|
entry:
|
2015-10-30 06:19:21 +08:00
|
|
|
%X = load <32 x i1>, <32 x i1>* %ptr
|
|
|
|
%Y = sext <32 x i1> %X to <32 x i8>
|
|
|
|
ret <32 x i8> %Y
|
2015-09-12 23:36:41 +08:00
|
|
|
}
|
|
|
|
|
2015-07-25 22:07:20 +08:00
|
|
|
define <16 x i16> @load_sext_16i8_to_16i16(<16 x i8> *%ptr) {
|
|
|
|
; SSE2-LABEL: load_sext_16i8_to_16i16:
|
2014-10-02 14:52:19 +08:00
|
|
|
; SSE2: # BB#0: # %entry
|
2015-07-25 22:07:20 +08:00
|
|
|
; SSE2-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
|
2015-05-21 18:05:03 +08:00
|
|
|
; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
|
2015-07-25 22:07:20 +08:00
|
|
|
; SSE2-NEXT: psraw $8, %xmm0
|
|
|
|
; SSE2-NEXT: movq {{.*#+}} xmm1 = mem[0],zero
|
|
|
|
; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
|
|
|
|
; SSE2-NEXT: psraw $8, %xmm1
|
2014-10-02 04:41:36 +08:00
|
|
|
; SSE2-NEXT: retq
|
|
|
|
;
|
2015-07-25 22:07:20 +08:00
|
|
|
; SSSE3-LABEL: load_sext_16i8_to_16i16:
|
2014-10-02 14:52:19 +08:00
|
|
|
; SSSE3: # BB#0: # %entry
|
2015-07-25 22:07:20 +08:00
|
|
|
; SSSE3-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
|
2015-05-21 18:05:03 +08:00
|
|
|
; SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
|
2015-07-25 22:07:20 +08:00
|
|
|
; SSSE3-NEXT: psraw $8, %xmm0
|
|
|
|
; SSSE3-NEXT: movq {{.*#+}} xmm1 = mem[0],zero
|
|
|
|
; SSSE3-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
|
|
|
|
; SSSE3-NEXT: psraw $8, %xmm1
|
2014-10-02 04:41:36 +08:00
|
|
|
; SSSE3-NEXT: retq
|
|
|
|
;
|
2015-07-25 22:07:20 +08:00
|
|
|
; SSE41-LABEL: load_sext_16i8_to_16i16:
|
2014-10-02 14:52:19 +08:00
|
|
|
; SSE41: # BB#0: # %entry
|
2015-07-25 22:07:20 +08:00
|
|
|
; SSE41-NEXT: pmovsxbw (%rdi), %xmm0
|
|
|
|
; SSE41-NEXT: pmovsxbw 8(%rdi), %xmm1
|
2014-10-02 04:41:36 +08:00
|
|
|
; SSE41-NEXT: retq
|
2014-10-02 04:32:44 +08:00
|
|
|
;
|
2015-07-25 22:07:20 +08:00
|
|
|
; AVX1-LABEL: load_sext_16i8_to_16i16:
|
|
|
|
; AVX1: # BB#0: # %entry
|
|
|
|
; AVX1-NEXT: vpmovsxbw (%rdi), %xmm0
|
|
|
|
; AVX1-NEXT: vpmovsxbw 8(%rdi), %xmm1
|
|
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
|
|
|
|
; AVX1-NEXT: retq
|
2014-10-02 04:49:54 +08:00
|
|
|
;
|
2015-07-25 22:07:20 +08:00
|
|
|
; AVX2-LABEL: load_sext_16i8_to_16i16:
|
|
|
|
; AVX2: # BB#0: # %entry
|
|
|
|
; AVX2-NEXT: vpmovsxbw (%rdi), %ymm0
|
|
|
|
; AVX2-NEXT: retq
|
|
|
|
;
|
|
|
|
; X32-SSE41-LABEL: load_sext_16i8_to_16i16:
|
2014-10-02 14:52:19 +08:00
|
|
|
; X32-SSE41: # BB#0: # %entry
|
2014-10-02 04:49:54 +08:00
|
|
|
; X32-SSE41-NEXT: movl {{[0-9]+}}(%esp), %eax
|
2015-07-25 22:07:20 +08:00
|
|
|
; X32-SSE41-NEXT: pmovsxbw (%eax), %xmm0
|
|
|
|
; X32-SSE41-NEXT: pmovsxbw 8(%eax), %xmm1
|
2014-10-02 04:49:54 +08:00
|
|
|
; X32-SSE41-NEXT: retl
|
2014-10-02 14:52:19 +08:00
|
|
|
entry:
|
2015-07-25 22:07:20 +08:00
|
|
|
%X = load <16 x i8>, <16 x i8>* %ptr
|
|
|
|
%Y = sext <16 x i8> %X to <16 x i16>
|
|
|
|
ret <16 x i16> %Y
|
2012-12-19 15:50:20 +08:00
|
|
|
}
|
|
|
|
|
2015-07-25 19:17:35 +08:00
|
|
|
define <2 x i64> @load_sext_2i16_to_2i64(<2 x i16> *%ptr) {
|
|
|
|
; SSE2-LABEL: load_sext_2i16_to_2i64:
|
2014-10-02 14:52:19 +08:00
|
|
|
; SSE2: # BB#0: # %entry
|
2015-07-13 01:40:49 +08:00
|
|
|
; SSE2-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
|
2015-05-21 18:05:03 +08:00
|
|
|
; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3]
|
|
|
|
; SSE2-NEXT: movdqa %xmm0, %xmm1
|
|
|
|
; SSE2-NEXT: psrad $31, %xmm1
|
|
|
|
; SSE2-NEXT: psrad $16, %xmm0
|
|
|
|
; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
|
2014-10-02 04:41:36 +08:00
|
|
|
; SSE2-NEXT: retq
|
|
|
|
;
|
2015-07-25 19:17:35 +08:00
|
|
|
; SSSE3-LABEL: load_sext_2i16_to_2i64:
|
2014-10-02 14:52:19 +08:00
|
|
|
; SSSE3: # BB#0: # %entry
|
2015-07-13 01:40:49 +08:00
|
|
|
; SSSE3-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
|
2015-05-21 18:05:03 +08:00
|
|
|
; SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3]
|
|
|
|
; SSSE3-NEXT: movdqa %xmm0, %xmm1
|
|
|
|
; SSSE3-NEXT: psrad $31, %xmm1
|
|
|
|
; SSSE3-NEXT: psrad $16, %xmm0
|
|
|
|
; SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
|
2014-10-02 04:41:36 +08:00
|
|
|
; SSSE3-NEXT: retq
|
|
|
|
;
|
2015-07-25 19:17:35 +08:00
|
|
|
; SSE41-LABEL: load_sext_2i16_to_2i64:
|
2014-10-02 14:52:19 +08:00
|
|
|
; SSE41: # BB#0: # %entry
|
2014-10-02 04:41:36 +08:00
|
|
|
; SSE41-NEXT: pmovsxwq (%rdi), %xmm0
|
|
|
|
; SSE41-NEXT: retq
|
2014-10-02 04:32:44 +08:00
|
|
|
;
|
2015-07-25 22:07:20 +08:00
|
|
|
; AVX-LABEL: load_sext_2i16_to_2i64:
|
|
|
|
; AVX: # BB#0: # %entry
|
|
|
|
; AVX-NEXT: vpmovsxwq (%rdi), %xmm0
|
|
|
|
; AVX-NEXT: retq
|
|
|
|
;
|
|
|
|
; X32-SSE41-LABEL: load_sext_2i16_to_2i64:
|
|
|
|
; X32-SSE41: # BB#0: # %entry
|
|
|
|
; X32-SSE41-NEXT: movl {{[0-9]+}}(%esp), %eax
|
|
|
|
; X32-SSE41-NEXT: pmovsxwq (%eax), %xmm0
|
|
|
|
; X32-SSE41-NEXT: retl
|
|
|
|
entry:
|
|
|
|
%X = load <2 x i16>, <2 x i16>* %ptr
|
|
|
|
%Y = sext <2 x i16> %X to <2 x i64>
|
|
|
|
ret <2 x i64> %Y
|
|
|
|
}
|
|
|
|
|
|
|
|
define <4 x i32> @load_sext_4i16_to_4i32(<4 x i16> *%ptr) {
|
|
|
|
; SSE2-LABEL: load_sext_4i16_to_4i32:
|
|
|
|
; SSE2: # BB#0: # %entry
|
|
|
|
; SSE2-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
|
|
|
|
; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3]
|
|
|
|
; SSE2-NEXT: psrad $16, %xmm0
|
|
|
|
; SSE2-NEXT: retq
|
|
|
|
;
|
|
|
|
; SSSE3-LABEL: load_sext_4i16_to_4i32:
|
|
|
|
; SSSE3: # BB#0: # %entry
|
|
|
|
; SSSE3-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
|
|
|
|
; SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3]
|
|
|
|
; SSSE3-NEXT: psrad $16, %xmm0
|
|
|
|
; SSSE3-NEXT: retq
|
|
|
|
;
|
|
|
|
; SSE41-LABEL: load_sext_4i16_to_4i32:
|
|
|
|
; SSE41: # BB#0: # %entry
|
|
|
|
; SSE41-NEXT: pmovsxwd (%rdi), %xmm0
|
|
|
|
; SSE41-NEXT: retq
|
|
|
|
;
|
|
|
|
; AVX-LABEL: load_sext_4i16_to_4i32:
|
|
|
|
; AVX: # BB#0: # %entry
|
|
|
|
; AVX-NEXT: vpmovsxwd (%rdi), %xmm0
|
|
|
|
; AVX-NEXT: retq
|
|
|
|
;
|
|
|
|
; X32-SSE41-LABEL: load_sext_4i16_to_4i32:
|
|
|
|
; X32-SSE41: # BB#0: # %entry
|
|
|
|
; X32-SSE41-NEXT: movl {{[0-9]+}}(%esp), %eax
|
|
|
|
; X32-SSE41-NEXT: pmovsxwd (%eax), %xmm0
|
|
|
|
; X32-SSE41-NEXT: retl
|
|
|
|
entry:
|
|
|
|
%X = load <4 x i16>, <4 x i16>* %ptr
|
|
|
|
%Y = sext <4 x i16> %X to <4 x i32>
|
|
|
|
ret <4 x i32> %Y
|
|
|
|
}
|
|
|
|
|
|
|
|
define <4 x i64> @load_sext_4i16_to_4i64(<4 x i16> *%ptr) {
|
|
|
|
; SSE2-LABEL: load_sext_4i16_to_4i64:
|
|
|
|
; SSE2: # BB#0: # %entry
|
|
|
|
; SSE2-NEXT: movswq 2(%rdi), %rax
|
|
|
|
; SSE2-NEXT: movd %rax, %xmm1
|
|
|
|
; SSE2-NEXT: movswq (%rdi), %rax
|
|
|
|
; SSE2-NEXT: movd %rax, %xmm0
|
|
|
|
; SSE2-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
|
|
|
|
; SSE2-NEXT: movswq 6(%rdi), %rax
|
|
|
|
; SSE2-NEXT: movd %rax, %xmm2
|
|
|
|
; SSE2-NEXT: movswq 4(%rdi), %rax
|
|
|
|
; SSE2-NEXT: movd %rax, %xmm1
|
|
|
|
; SSE2-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0]
|
|
|
|
; SSE2-NEXT: retq
|
|
|
|
;
|
|
|
|
; SSSE3-LABEL: load_sext_4i16_to_4i64:
|
|
|
|
; SSSE3: # BB#0: # %entry
|
|
|
|
; SSSE3-NEXT: movswq 2(%rdi), %rax
|
|
|
|
; SSSE3-NEXT: movd %rax, %xmm1
|
|
|
|
; SSSE3-NEXT: movswq (%rdi), %rax
|
|
|
|
; SSSE3-NEXT: movd %rax, %xmm0
|
|
|
|
; SSSE3-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
|
|
|
|
; SSSE3-NEXT: movswq 6(%rdi), %rax
|
|
|
|
; SSSE3-NEXT: movd %rax, %xmm2
|
|
|
|
; SSSE3-NEXT: movswq 4(%rdi), %rax
|
|
|
|
; SSSE3-NEXT: movd %rax, %xmm1
|
|
|
|
; SSSE3-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0]
|
|
|
|
; SSSE3-NEXT: retq
|
|
|
|
;
|
|
|
|
; SSE41-LABEL: load_sext_4i16_to_4i64:
|
|
|
|
; SSE41: # BB#0: # %entry
|
|
|
|
; SSE41-NEXT: pmovsxwq (%rdi), %xmm0
|
|
|
|
; SSE41-NEXT: pmovsxwq 4(%rdi), %xmm1
|
|
|
|
; SSE41-NEXT: retq
|
|
|
|
;
|
|
|
|
; AVX1-LABEL: load_sext_4i16_to_4i64:
|
|
|
|
; AVX1: # BB#0: # %entry
|
|
|
|
; AVX1-NEXT: vpmovsxwd (%rdi), %xmm0
|
|
|
|
; AVX1-NEXT: vpmovsxdq %xmm0, %xmm1
|
|
|
|
; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
|
|
|
|
; AVX1-NEXT: vpmovsxdq %xmm0, %xmm0
|
|
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
|
|
|
|
; AVX1-NEXT: retq
|
|
|
|
;
|
|
|
|
; AVX2-LABEL: load_sext_4i16_to_4i64:
|
|
|
|
; AVX2: # BB#0: # %entry
|
|
|
|
; AVX2-NEXT: vpmovsxwq (%rdi), %ymm0
|
|
|
|
; AVX2-NEXT: retq
|
|
|
|
;
|
|
|
|
; X32-SSE41-LABEL: load_sext_4i16_to_4i64:
|
|
|
|
; X32-SSE41: # BB#0: # %entry
|
|
|
|
; X32-SSE41-NEXT: movl {{[0-9]+}}(%esp), %eax
|
|
|
|
; X32-SSE41-NEXT: pmovsxwq (%eax), %xmm0
|
|
|
|
; X32-SSE41-NEXT: pmovsxwq 4(%eax), %xmm1
|
|
|
|
; X32-SSE41-NEXT: retl
|
|
|
|
entry:
|
|
|
|
%X = load <4 x i16>, <4 x i16>* %ptr
|
|
|
|
%Y = sext <4 x i16> %X to <4 x i64>
|
|
|
|
ret <4 x i64> %Y
|
|
|
|
}
|
|
|
|
|
|
|
|
define <8 x i32> @load_sext_8i16_to_8i32(<8 x i16> *%ptr) {
|
|
|
|
; SSE2-LABEL: load_sext_8i16_to_8i32:
|
|
|
|
; SSE2: # BB#0: # %entry
|
|
|
|
; SSE2-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
|
|
|
|
; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3]
|
|
|
|
; SSE2-NEXT: psrad $16, %xmm0
|
|
|
|
; SSE2-NEXT: movq {{.*#+}} xmm1 = mem[0],zero
|
|
|
|
; SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3]
|
|
|
|
; SSE2-NEXT: psrad $16, %xmm1
|
|
|
|
; SSE2-NEXT: retq
|
|
|
|
;
|
|
|
|
; SSSE3-LABEL: load_sext_8i16_to_8i32:
|
|
|
|
; SSSE3: # BB#0: # %entry
|
|
|
|
; SSSE3-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
|
|
|
|
; SSSE3-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3]
|
|
|
|
; SSSE3-NEXT: psrad $16, %xmm0
|
|
|
|
; SSSE3-NEXT: movq {{.*#+}} xmm1 = mem[0],zero
|
|
|
|
; SSSE3-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3]
|
|
|
|
; SSSE3-NEXT: psrad $16, %xmm1
|
|
|
|
; SSSE3-NEXT: retq
|
|
|
|
;
|
|
|
|
; SSE41-LABEL: load_sext_8i16_to_8i32:
|
|
|
|
; SSE41: # BB#0: # %entry
|
|
|
|
; SSE41-NEXT: pmovsxwd (%rdi), %xmm0
|
|
|
|
; SSE41-NEXT: pmovsxwd 8(%rdi), %xmm1
|
|
|
|
; SSE41-NEXT: retq
|
|
|
|
;
|
|
|
|
; AVX1-LABEL: load_sext_8i16_to_8i32:
|
|
|
|
; AVX1: # BB#0: # %entry
|
|
|
|
; AVX1-NEXT: vpmovsxwd (%rdi), %xmm0
|
|
|
|
; AVX1-NEXT: vpmovsxwd 8(%rdi), %xmm1
|
|
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
|
|
|
|
; AVX1-NEXT: retq
|
|
|
|
;
|
|
|
|
; AVX2-LABEL: load_sext_8i16_to_8i32:
|
|
|
|
; AVX2: # BB#0: # %entry
|
|
|
|
; AVX2-NEXT: vpmovsxwd (%rdi), %ymm0
|
|
|
|
; AVX2-NEXT: retq
|
2014-10-02 04:49:54 +08:00
|
|
|
;
|
2015-07-25 22:07:20 +08:00
|
|
|
; X32-SSE41-LABEL: load_sext_8i16_to_8i32:
|
2014-10-02 14:52:19 +08:00
|
|
|
; X32-SSE41: # BB#0: # %entry
|
2014-10-02 04:49:54 +08:00
|
|
|
; X32-SSE41-NEXT: movl {{[0-9]+}}(%esp), %eax
|
2015-07-25 22:07:20 +08:00
|
|
|
; X32-SSE41-NEXT: pmovsxwd (%eax), %xmm0
|
|
|
|
; X32-SSE41-NEXT: pmovsxwd 8(%eax), %xmm1
|
2014-10-02 04:49:54 +08:00
|
|
|
; X32-SSE41-NEXT: retl
|
2014-10-02 14:52:19 +08:00
|
|
|
entry:
|
2015-07-25 22:07:20 +08:00
|
|
|
%X = load <8 x i16>, <8 x i16>* %ptr
|
|
|
|
%Y = sext <8 x i16> %X to <8 x i32>
|
|
|
|
ret <8 x i32> %Y
|
2012-12-19 15:50:20 +08:00
|
|
|
}
|
|
|
|
|
2015-07-25 19:17:35 +08:00
|
|
|
define <2 x i64> @load_sext_2i32_to_2i64(<2 x i32> *%ptr) {
|
|
|
|
; SSE2-LABEL: load_sext_2i32_to_2i64:
|
2014-10-02 14:52:19 +08:00
|
|
|
; SSE2: # BB#0: # %entry
|
2015-07-13 01:40:49 +08:00
|
|
|
; SSE2-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
|
2015-05-21 18:05:03 +08:00
|
|
|
; SSE2-NEXT: movdqa %xmm0, %xmm1
|
|
|
|
; SSE2-NEXT: psrad $31, %xmm1
|
|
|
|
; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
|
2014-10-02 04:41:36 +08:00
|
|
|
; SSE2-NEXT: retq
|
|
|
|
;
|
2015-07-25 19:17:35 +08:00
|
|
|
; SSSE3-LABEL: load_sext_2i32_to_2i64:
|
2014-10-02 14:52:19 +08:00
|
|
|
; SSSE3: # BB#0: # %entry
|
2015-07-13 01:40:49 +08:00
|
|
|
; SSSE3-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
|
2015-05-21 18:05:03 +08:00
|
|
|
; SSSE3-NEXT: movdqa %xmm0, %xmm1
|
|
|
|
; SSSE3-NEXT: psrad $31, %xmm1
|
|
|
|
; SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
|
2014-10-02 04:41:36 +08:00
|
|
|
; SSSE3-NEXT: retq
|
|
|
|
;
|
2015-07-25 19:17:35 +08:00
|
|
|
; SSE41-LABEL: load_sext_2i32_to_2i64:
|
2014-10-02 14:52:19 +08:00
|
|
|
; SSE41: # BB#0: # %entry
|
2014-10-02 04:41:36 +08:00
|
|
|
; SSE41-NEXT: pmovsxdq (%rdi), %xmm0
|
|
|
|
; SSE41-NEXT: retq
|
2014-10-02 04:32:44 +08:00
|
|
|
;
|
2015-07-25 19:17:35 +08:00
|
|
|
; AVX-LABEL: load_sext_2i32_to_2i64:
|
2014-10-02 14:52:19 +08:00
|
|
|
; AVX: # BB#0: # %entry
|
2014-10-02 04:32:44 +08:00
|
|
|
; AVX-NEXT: vpmovsxdq (%rdi), %xmm0
|
|
|
|
; AVX-NEXT: retq
|
2014-10-02 04:49:54 +08:00
|
|
|
;
|
2015-07-25 19:17:35 +08:00
|
|
|
; X32-SSE41-LABEL: load_sext_2i32_to_2i64:
|
2014-10-02 14:52:19 +08:00
|
|
|
; X32-SSE41: # BB#0: # %entry
|
2014-10-02 04:49:54 +08:00
|
|
|
; X32-SSE41-NEXT: movl {{[0-9]+}}(%esp), %eax
|
|
|
|
; X32-SSE41-NEXT: pmovsxdq (%eax), %xmm0
|
|
|
|
; X32-SSE41-NEXT: retl
|
2014-10-02 14:52:19 +08:00
|
|
|
entry:
|
2015-02-28 05:17:42 +08:00
|
|
|
%X = load <2 x i32>, <2 x i32>* %ptr
|
2012-12-19 15:50:20 +08:00
|
|
|
%Y = sext <2 x i32> %X to <2 x i64>
|
2015-07-25 22:07:20 +08:00
|
|
|
ret <2 x i64> %Y
|
2012-12-19 15:50:20 +08:00
|
|
|
}
|
|
|
|
|
2015-07-25 22:07:20 +08:00
|
|
|
define <4 x i64> @load_sext_4i32_to_4i64(<4 x i32> *%ptr) {
|
|
|
|
; SSE2-LABEL: load_sext_4i32_to_4i64:
|
|
|
|
; SSE2: # BB#0: # %entry
|
|
|
|
; SSE2-NEXT: movdqa (%rdi), %xmm0
|
|
|
|
; SSE2-NEXT: movdqa %xmm0, %xmm2
|
|
|
|
; SSE2-NEXT: psrad $31, %xmm2
|
|
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
|
|
|
|
; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
|
|
|
|
; SSE2-NEXT: movdqa %xmm1, %xmm2
|
|
|
|
; SSE2-NEXT: psrad $31, %xmm2
|
|
|
|
; SSE2-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
|
|
|
|
; SSE2-NEXT: retq
|
|
|
|
;
|
|
|
|
; SSSE3-LABEL: load_sext_4i32_to_4i64:
|
|
|
|
; SSSE3: # BB#0: # %entry
|
|
|
|
; SSSE3-NEXT: movdqa (%rdi), %xmm0
|
|
|
|
; SSSE3-NEXT: movdqa %xmm0, %xmm2
|
|
|
|
; SSSE3-NEXT: psrad $31, %xmm2
|
|
|
|
; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
|
|
|
|
; SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
|
|
|
|
; SSSE3-NEXT: movdqa %xmm1, %xmm2
|
|
|
|
; SSSE3-NEXT: psrad $31, %xmm2
|
|
|
|
; SSSE3-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
|
|
|
|
; SSSE3-NEXT: retq
|
|
|
|
;
|
|
|
|
; SSE41-LABEL: load_sext_4i32_to_4i64:
|
|
|
|
; SSE41: # BB#0: # %entry
|
|
|
|
; SSE41-NEXT: pmovsxdq (%rdi), %xmm0
|
|
|
|
; SSE41-NEXT: pmovsxdq 8(%rdi), %xmm1
|
|
|
|
; SSE41-NEXT: retq
|
|
|
|
;
|
|
|
|
; AVX1-LABEL: load_sext_4i32_to_4i64:
|
|
|
|
; AVX1: # BB#0: # %entry
|
|
|
|
; AVX1-NEXT: vpmovsxdq (%rdi), %xmm0
|
|
|
|
; AVX1-NEXT: vpmovsxdq 8(%rdi), %xmm1
|
|
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
|
|
|
|
; AVX1-NEXT: retq
|
|
|
|
;
|
|
|
|
; AVX2-LABEL: load_sext_4i32_to_4i64:
|
|
|
|
; AVX2: # BB#0: # %entry
|
|
|
|
; AVX2-NEXT: vpmovsxdq (%rdi), %ymm0
|
|
|
|
; AVX2-NEXT: retq
|
|
|
|
;
|
|
|
|
; X32-SSE41-LABEL: load_sext_4i32_to_4i64:
|
|
|
|
; X32-SSE41: # BB#0: # %entry
|
|
|
|
; X32-SSE41-NEXT: movl {{[0-9]+}}(%esp), %eax
|
|
|
|
; X32-SSE41-NEXT: pmovsxdq (%eax), %xmm0
|
|
|
|
; X32-SSE41-NEXT: pmovsxdq 8(%eax), %xmm1
|
|
|
|
; X32-SSE41-NEXT: retl
|
|
|
|
entry:
|
|
|
|
%X = load <4 x i32>, <4 x i32>* %ptr
|
|
|
|
%Y = sext <4 x i32> %X to <4 x i64>
|
|
|
|
ret <4 x i64> %Y
|
|
|
|
}
|
|
|
|
|
|
|
|
define i32 @sext_2i8_to_i32(<16 x i8> %A) nounwind uwtable readnone ssp {
|
|
|
|
; SSE2-LABEL: sext_2i8_to_i32:
|
2014-10-02 14:52:19 +08:00
|
|
|
; SSE2: # BB#0: # %entry
|
2014-10-02 04:41:36 +08:00
|
|
|
; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
|
|
|
|
; SSE2-NEXT: psraw $8, %xmm0
|
2015-07-25 22:07:20 +08:00
|
|
|
; SSE2-NEXT: movd %xmm0, %eax
|
2014-10-02 04:41:36 +08:00
|
|
|
; SSE2-NEXT: retq
|
|
|
|
;
|
2015-07-25 22:07:20 +08:00
|
|
|
; SSSE3-LABEL: sext_2i8_to_i32:
|
2014-10-02 14:52:19 +08:00
|
|
|
; SSSE3: # BB#0: # %entry
|
2014-10-02 04:41:36 +08:00
|
|
|
; SSSE3-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
|
|
|
|
; SSSE3-NEXT: psraw $8, %xmm0
|
2015-07-25 22:07:20 +08:00
|
|
|
; SSSE3-NEXT: movd %xmm0, %eax
|
2014-10-02 04:41:36 +08:00
|
|
|
; SSSE3-NEXT: retq
|
|
|
|
;
|
2015-07-25 22:07:20 +08:00
|
|
|
; SSE41-LABEL: sext_2i8_to_i32:
|
2014-10-02 14:52:19 +08:00
|
|
|
; SSE41: # BB#0: # %entry
|
2015-07-25 22:07:20 +08:00
|
|
|
; SSE41-NEXT: pmovsxbw %xmm0, %xmm0
|
|
|
|
; SSE41-NEXT: movd %xmm0, %eax
|
2014-10-02 04:41:36 +08:00
|
|
|
; SSE41-NEXT: retq
|
2014-10-02 04:32:44 +08:00
|
|
|
;
|
2015-07-25 22:07:20 +08:00
|
|
|
; AVX-LABEL: sext_2i8_to_i32:
|
2014-10-02 14:52:19 +08:00
|
|
|
; AVX: # BB#0: # %entry
|
2015-07-25 22:07:20 +08:00
|
|
|
; AVX-NEXT: vpmovsxbw %xmm0, %xmm0
|
|
|
|
; AVX-NEXT: vmovd %xmm0, %eax
|
2014-10-02 04:32:44 +08:00
|
|
|
; AVX-NEXT: retq
|
2014-10-02 04:49:54 +08:00
|
|
|
;
|
2015-07-25 22:07:20 +08:00
|
|
|
; X32-SSE41-LABEL: sext_2i8_to_i32:
|
2014-10-02 14:52:19 +08:00
|
|
|
; X32-SSE41: # BB#0: # %entry
|
2015-07-25 22:07:20 +08:00
|
|
|
; X32-SSE41-NEXT: pushl %eax
|
[X86] Improve shift combining
This folds (ashr (shl a, [56,48,32,24,16]), SarConst)
into (shl, (sext (a), [56,48,32,24,16] - SarConst))
or into (lshr, (sext (a), SarConst - [56,48,32,24,16]))
depending on sign of (SarConst - [56,48,32,24,16])
sexts in X86 are MOVs.
The MOVs have the same code size as above SHIFTs (only SHIFT by 1 has lower code size).
However the MOVs have 2 advantages to SHIFTs on x86:
1. MOVs can write to a register that differs from source.
2. MOVs accept memory operands.
This fixes PR24373.
Patch by: evgeny.v.stupachenko@intel.com
Differential Revision: http://reviews.llvm.org/D13161
llvm-svn: 255761
2015-12-16 19:22:37 +08:00
|
|
|
; X32-SSE41-NEXT: .Ltmp0:
|
2015-07-25 22:07:20 +08:00
|
|
|
; X32-SSE41-NEXT: .cfi_def_cfa_offset 8
|
|
|
|
; X32-SSE41-NEXT: pmovsxbw %xmm0, %xmm0
|
|
|
|
; X32-SSE41-NEXT: movd %xmm0, %eax
|
2015-11-24 06:17:44 +08:00
|
|
|
; X32-SSE41-NEXT: popl %ecx
|
2014-10-02 04:49:54 +08:00
|
|
|
; X32-SSE41-NEXT: retl
|
2014-10-02 14:52:19 +08:00
|
|
|
entry:
|
2015-07-25 22:07:20 +08:00
|
|
|
%Shuf = shufflevector <16 x i8> %A, <16 x i8> undef, <2 x i32> <i32 0, i32 1>
|
|
|
|
%Ex = sext <2 x i8> %Shuf to <2 x i16>
|
|
|
|
%Bc = bitcast <2 x i16> %Ex to i32
|
|
|
|
ret i32 %Bc
|
2012-12-19 15:50:20 +08:00
|
|
|
}
|
2013-02-20 20:42:54 +08:00
|
|
|
|
|
|
|
define <4 x i64> @sext_4i1_to_4i64(<4 x i1> %mask) {
|
2014-10-02 04:41:36 +08:00
|
|
|
; SSE2-LABEL: sext_4i1_to_4i64:
|
2014-10-02 14:52:19 +08:00
|
|
|
; SSE2: # BB#0:
|
2014-10-02 04:41:36 +08:00
|
|
|
; SSE2-NEXT: pslld $31, %xmm0
|
|
|
|
; SSE2-NEXT: psrad $31, %xmm0
|
2015-05-21 18:05:03 +08:00
|
|
|
; SSE2-NEXT: movdqa %xmm0, %xmm2
|
|
|
|
; SSE2-NEXT: psrad $31, %xmm2
|
|
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
|
|
|
|
; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
|
|
|
|
; SSE2-NEXT: movdqa %xmm1, %xmm2
|
|
|
|
; SSE2-NEXT: psrad $31, %xmm2
|
|
|
|
; SSE2-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
|
2014-10-02 04:41:36 +08:00
|
|
|
; SSE2-NEXT: retq
|
|
|
|
;
|
|
|
|
; SSSE3-LABEL: sext_4i1_to_4i64:
|
2014-10-02 14:52:19 +08:00
|
|
|
; SSSE3: # BB#0:
|
2014-10-02 04:41:36 +08:00
|
|
|
; SSSE3-NEXT: pslld $31, %xmm0
|
|
|
|
; SSSE3-NEXT: psrad $31, %xmm0
|
2015-05-21 18:05:03 +08:00
|
|
|
; SSSE3-NEXT: movdqa %xmm0, %xmm2
|
|
|
|
; SSSE3-NEXT: psrad $31, %xmm2
|
|
|
|
; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
|
|
|
|
; SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
|
|
|
|
; SSSE3-NEXT: movdqa %xmm1, %xmm2
|
|
|
|
; SSSE3-NEXT: psrad $31, %xmm2
|
|
|
|
; SSSE3-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
|
2014-10-02 04:41:36 +08:00
|
|
|
; SSSE3-NEXT: retq
|
|
|
|
;
|
|
|
|
; SSE41-LABEL: sext_4i1_to_4i64:
|
2014-10-02 14:52:19 +08:00
|
|
|
; SSE41: # BB#0:
|
2014-10-02 04:41:36 +08:00
|
|
|
; SSE41-NEXT: pslld $31, %xmm0
|
|
|
|
; SSE41-NEXT: psrad $31, %xmm0
|
2015-05-21 18:05:03 +08:00
|
|
|
; SSE41-NEXT: pmovsxdq %xmm0, %xmm2
|
|
|
|
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
|
|
|
|
; SSE41-NEXT: pmovsxdq %xmm0, %xmm1
|
2014-10-02 04:41:36 +08:00
|
|
|
; SSE41-NEXT: movdqa %xmm2, %xmm0
|
|
|
|
; SSE41-NEXT: retq
|
2014-10-02 04:32:44 +08:00
|
|
|
;
|
|
|
|
; AVX1-LABEL: sext_4i1_to_4i64:
|
2014-10-02 14:52:19 +08:00
|
|
|
; AVX1: # BB#0:
|
2014-10-02 04:32:44 +08:00
|
|
|
; AVX1-NEXT: vpslld $31, %xmm0, %xmm0
|
|
|
|
; AVX1-NEXT: vpsrad $31, %xmm0, %xmm0
|
|
|
|
; AVX1-NEXT: vpmovsxdq %xmm0, %xmm1
|
[x86] Enable the new vector shuffle lowering by default.
Update the entire regression test suite for the new shuffles. Remove
most of the old testing which was devoted to the old shuffle lowering
path and is no longer relevant really. Also remove a few other random
tests that only really exercised shuffles and only incidently or without
any interesting aspects to them.
Benchmarking that I have done shows a few small regressions with this on
LNT, zero measurable regressions on real, large applications, and for
several benchmarks where the loop vectorizer fires in the hot path it
shows 5% to 40% improvements for SSE2 and SSE3 code running on Sandy
Bridge machines. Running on AMD machines shows even more dramatic
improvements.
When using newer ISA vector extensions the gains are much more modest,
but the code is still better on the whole. There are a few regressions
being tracked (PR21137, PR21138, PR21139) but by and large this is
expected to be a win for x86 generated code performance.
It is also more correct than the code it replaces. I have fuzz tested
this extensively with ISA extensions up through AVX2 and found no
crashes or miscompiles (yet...). The old lowering had a few miscompiles
and crashers after a somewhat smaller amount of fuzz testing.
There is one significant area where the new code path lags behind and
that is in AVX-512 support. However, there was *extremely little*
support for that already and so this isn't a significant step backwards
and the new framework will probably make it easier to implement lowering
that uses the full power of AVX-512's table-based shuffle+blend (IMO).
Many thanks to Quentin, Andrea, Robert, and others for benchmarking
assistance. Thanks to Adam and others for help with AVX-512. Thanks to
Hal, Eric, and *many* others for answering my incessant questions about
how the backend actually works. =]
I will leave the old code path in the tree until the 3 PRs above are at
least resolved to folks' satisfaction. Then I will rip it (and 1000s of
lines of code) out. =] I don't expect this flag to stay around for very
long. It may not survive next week.
llvm-svn: 219046
2014-10-04 11:52:55 +08:00
|
|
|
; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
|
2014-10-02 04:32:44 +08:00
|
|
|
; AVX1-NEXT: vpmovsxdq %xmm0, %xmm0
|
2014-10-02 04:19:32 +08:00
|
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
|
|
|
|
; AVX1-NEXT: retq
|
|
|
|
;
|
2014-10-02 04:32:44 +08:00
|
|
|
; AVX2-LABEL: sext_4i1_to_4i64:
|
2014-10-02 14:52:19 +08:00
|
|
|
; AVX2: # BB#0:
|
2014-10-02 04:32:44 +08:00
|
|
|
; AVX2-NEXT: vpslld $31, %xmm0, %xmm0
|
|
|
|
; AVX2-NEXT: vpsrad $31, %xmm0, %xmm0
|
|
|
|
; AVX2-NEXT: vpmovsxdq %xmm0, %ymm0
|
2014-10-02 04:19:32 +08:00
|
|
|
; AVX2-NEXT: retq
|
2014-10-02 04:49:54 +08:00
|
|
|
;
|
|
|
|
; X32-SSE41-LABEL: sext_4i1_to_4i64:
|
2014-10-02 14:52:19 +08:00
|
|
|
; X32-SSE41: # BB#0:
|
2014-10-02 04:49:54 +08:00
|
|
|
; X32-SSE41-NEXT: pslld $31, %xmm0
|
|
|
|
; X32-SSE41-NEXT: psrad $31, %xmm0
|
2015-05-21 18:05:03 +08:00
|
|
|
; X32-SSE41-NEXT: pmovsxdq %xmm0, %xmm2
|
|
|
|
; X32-SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
|
|
|
|
; X32-SSE41-NEXT: pmovsxdq %xmm0, %xmm1
|
2014-10-02 04:49:54 +08:00
|
|
|
; X32-SSE41-NEXT: movdqa %xmm2, %xmm0
|
|
|
|
; X32-SSE41-NEXT: retl
|
2014-10-02 04:32:44 +08:00
|
|
|
%extmask = sext <4 x i1> %mask to <4 x i64>
|
|
|
|
ret <4 x i64> %extmask
|
|
|
|
}
|
|
|
|
|
|
|
|
define <4 x i64> @sext_4i8_to_4i64(<4 x i8> %mask) {
|
2014-10-02 04:41:36 +08:00
|
|
|
; SSE2-LABEL: sext_4i8_to_4i64:
|
2014-10-02 14:52:19 +08:00
|
|
|
; SSE2: # BB#0:
|
2014-10-02 04:41:36 +08:00
|
|
|
; SSE2-NEXT: pslld $24, %xmm0
|
|
|
|
; SSE2-NEXT: psrad $24, %xmm0
|
2015-05-21 18:05:03 +08:00
|
|
|
; SSE2-NEXT: movdqa %xmm0, %xmm2
|
|
|
|
; SSE2-NEXT: psrad $31, %xmm2
|
|
|
|
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
|
|
|
|
; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
|
|
|
|
; SSE2-NEXT: movdqa %xmm1, %xmm2
|
|
|
|
; SSE2-NEXT: psrad $31, %xmm2
|
|
|
|
; SSE2-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
|
2014-10-02 04:41:36 +08:00
|
|
|
; SSE2-NEXT: retq
|
|
|
|
;
|
|
|
|
; SSSE3-LABEL: sext_4i8_to_4i64:
|
2014-10-02 14:52:19 +08:00
|
|
|
; SSSE3: # BB#0:
|
2014-10-02 04:41:36 +08:00
|
|
|
; SSSE3-NEXT: pslld $24, %xmm0
|
|
|
|
; SSSE3-NEXT: psrad $24, %xmm0
|
2015-05-21 18:05:03 +08:00
|
|
|
; SSSE3-NEXT: movdqa %xmm0, %xmm2
|
|
|
|
; SSSE3-NEXT: psrad $31, %xmm2
|
|
|
|
; SSSE3-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
|
|
|
|
; SSSE3-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
|
|
|
|
; SSSE3-NEXT: movdqa %xmm1, %xmm2
|
|
|
|
; SSSE3-NEXT: psrad $31, %xmm2
|
|
|
|
; SSSE3-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
|
2014-10-02 04:41:36 +08:00
|
|
|
; SSSE3-NEXT: retq
|
|
|
|
;
|
|
|
|
; SSE41-LABEL: sext_4i8_to_4i64:
|
2014-10-02 14:52:19 +08:00
|
|
|
; SSE41: # BB#0:
|
2014-10-02 04:41:36 +08:00
|
|
|
; SSE41-NEXT: pslld $24, %xmm0
|
|
|
|
; SSE41-NEXT: psrad $24, %xmm0
|
2015-05-21 18:05:03 +08:00
|
|
|
; SSE41-NEXT: pmovsxdq %xmm0, %xmm2
|
|
|
|
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
|
|
|
|
; SSE41-NEXT: pmovsxdq %xmm0, %xmm1
|
2014-10-02 04:41:36 +08:00
|
|
|
; SSE41-NEXT: movdqa %xmm2, %xmm0
|
|
|
|
; SSE41-NEXT: retq
|
2014-10-02 04:32:44 +08:00
|
|
|
;
|
|
|
|
; AVX1-LABEL: sext_4i8_to_4i64:
|
2014-10-02 14:52:19 +08:00
|
|
|
; AVX1: # BB#0:
|
2014-10-02 04:32:44 +08:00
|
|
|
; AVX1-NEXT: vpslld $24, %xmm0, %xmm0
|
|
|
|
; AVX1-NEXT: vpsrad $24, %xmm0, %xmm0
|
2014-10-02 04:19:32 +08:00
|
|
|
; AVX1-NEXT: vpmovsxdq %xmm0, %xmm1
|
[x86] Enable the new vector shuffle lowering by default.
Update the entire regression test suite for the new shuffles. Remove
most of the old testing which was devoted to the old shuffle lowering
path and is no longer relevant really. Also remove a few other random
tests that only really exercised shuffles and only incidently or without
any interesting aspects to them.
Benchmarking that I have done shows a few small regressions with this on
LNT, zero measurable regressions on real, large applications, and for
several benchmarks where the loop vectorizer fires in the hot path it
shows 5% to 40% improvements for SSE2 and SSE3 code running on Sandy
Bridge machines. Running on AMD machines shows even more dramatic
improvements.
When using newer ISA vector extensions the gains are much more modest,
but the code is still better on the whole. There are a few regressions
being tracked (PR21137, PR21138, PR21139) but by and large this is
expected to be a win for x86 generated code performance.
It is also more correct than the code it replaces. I have fuzz tested
this extensively with ISA extensions up through AVX2 and found no
crashes or miscompiles (yet...). The old lowering had a few miscompiles
and crashers after a somewhat smaller amount of fuzz testing.
There is one significant area where the new code path lags behind and
that is in AVX-512 support. However, there was *extremely little*
support for that already and so this isn't a significant step backwards
and the new framework will probably make it easier to implement lowering
that uses the full power of AVX-512's table-based shuffle+blend (IMO).
Many thanks to Quentin, Andrea, Robert, and others for benchmarking
assistance. Thanks to Adam and others for help with AVX-512. Thanks to
Hal, Eric, and *many* others for answering my incessant questions about
how the backend actually works. =]
I will leave the old code path in the tree until the 3 PRs above are at
least resolved to folks' satisfaction. Then I will rip it (and 1000s of
lines of code) out. =] I don't expect this flag to stay around for very
long. It may not survive next week.
llvm-svn: 219046
2014-10-04 11:52:55 +08:00
|
|
|
; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
|
2014-10-02 04:19:32 +08:00
|
|
|
; AVX1-NEXT: vpmovsxdq %xmm0, %xmm0
|
|
|
|
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
|
|
|
|
; AVX1-NEXT: retq
|
|
|
|
;
|
2014-10-02 04:32:44 +08:00
|
|
|
; AVX2-LABEL: sext_4i8_to_4i64:
|
2014-10-02 14:52:19 +08:00
|
|
|
; AVX2: # BB#0:
|
2014-10-02 04:32:44 +08:00
|
|
|
; AVX2-NEXT: vpslld $24, %xmm0, %xmm0
|
|
|
|
; AVX2-NEXT: vpsrad $24, %xmm0, %xmm0
|
|
|
|
; AVX2-NEXT: vpmovsxdq %xmm0, %ymm0
|
2014-10-02 04:19:32 +08:00
|
|
|
; AVX2-NEXT: retq
|
2014-10-02 04:49:54 +08:00
|
|
|
;
|
|
|
|
; X32-SSE41-LABEL: sext_4i8_to_4i64:
|
2014-10-02 14:52:19 +08:00
|
|
|
; X32-SSE41: # BB#0:
|
2014-10-02 04:49:54 +08:00
|
|
|
; X32-SSE41-NEXT: pslld $24, %xmm0
|
|
|
|
; X32-SSE41-NEXT: psrad $24, %xmm0
|
2015-05-21 18:05:03 +08:00
|
|
|
; X32-SSE41-NEXT: pmovsxdq %xmm0, %xmm2
|
|
|
|
; X32-SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
|
|
|
|
; X32-SSE41-NEXT: pmovsxdq %xmm0, %xmm1
|
2014-10-02 04:49:54 +08:00
|
|
|
; X32-SSE41-NEXT: movdqa %xmm2, %xmm0
|
|
|
|
; X32-SSE41-NEXT: retl
|
2014-10-02 04:32:44 +08:00
|
|
|
%extmask = sext <4 x i8> %mask to <4 x i64>
|
|
|
|
ret <4 x i64> %extmask
|
|
|
|
}
|