2016-07-10 04:55:20 +08:00
|
|
|
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
|
|
|
|
; RUN: llc < %s -mtriple=i686-unknown -mattr=+sse2 | FileCheck %s --check-prefix=X32
|
|
|
|
; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+sse2 | FileCheck %s --check-prefix=X64
|
- When DAG combiner is folding a bit convert into a BUILD_VECTOR, it should check if it's essentially a SCALAR_TO_VECTOR. Avoid turning (v8i16) <10, u, u, u> to <10, 0, u, u, u, u, u, u>. Instead, simply convert it to a SCALAR_TO_VECTOR of the proper type.
- X86 now normalize SCALAR_TO_VECTOR to (BIT_CONVERT (v4i32 SCALAR_TO_VECTOR)). Get rid of X86ISD::S2VEC.
llvm-svn: 47290
2008-02-19 07:04:32 +08:00
|
|
|
|
|
|
|
define <2 x i64> @t1(<2 x i64> %b1, <2 x i64> %c) nounwind {
|
2016-07-10 04:55:20 +08:00
|
|
|
; X32-LABEL: t1:
|
2017-12-05 01:18:51 +08:00
|
|
|
; X32: # %bb.0:
|
2016-07-10 04:55:20 +08:00
|
|
|
; X32-NEXT: movl $14, %eax
|
|
|
|
; X32-NEXT: movd %eax, %xmm1
|
|
|
|
; X32-NEXT: psrlw %xmm1, %xmm0
|
|
|
|
; X32-NEXT: retl
|
|
|
|
;
|
|
|
|
; X64-LABEL: t1:
|
2017-12-05 01:18:51 +08:00
|
|
|
; X64: # %bb.0:
|
2016-07-10 04:55:20 +08:00
|
|
|
; X64-NEXT: movl $14, %eax
|
|
|
|
; X64-NEXT: movd %eax, %xmm1
|
|
|
|
; X64-NEXT: psrlw %xmm1, %xmm0
|
|
|
|
; X64-NEXT: retq
|
- When DAG combiner is folding a bit convert into a BUILD_VECTOR, it should check if it's essentially a SCALAR_TO_VECTOR. Avoid turning (v8i16) <10, u, u, u> to <10, 0, u, u, u, u, u, u>. Instead, simply convert it to a SCALAR_TO_VECTOR of the proper type.
- X86 now normalize SCALAR_TO_VECTOR to (BIT_CONVERT (v4i32 SCALAR_TO_VECTOR)). Get rid of X86ISD::S2VEC.
llvm-svn: 47290
2008-02-19 07:04:32 +08:00
|
|
|
%tmp1 = bitcast <2 x i64> %b1 to <8 x i16>
|
|
|
|
%tmp2 = tail call <8 x i16> @llvm.x86.sse2.psrl.w( <8 x i16> %tmp1, <8 x i16> bitcast (<4 x i32> < i32 14, i32 undef, i32 undef, i32 undef > to <8 x i16>) ) nounwind readnone
|
|
|
|
%tmp3 = bitcast <8 x i16> %tmp2 to <2 x i64>
|
|
|
|
ret <2 x i64> %tmp3
|
|
|
|
}
|
|
|
|
|
|
|
|
define <4 x i32> @t2(<2 x i64> %b1, <2 x i64> %c) nounwind {
|
2016-07-10 04:55:20 +08:00
|
|
|
; X32-LABEL: t2:
|
2017-12-05 01:18:51 +08:00
|
|
|
; X32: # %bb.0:
|
2016-07-10 04:55:20 +08:00
|
|
|
; X32-NEXT: movl $14, %eax
|
|
|
|
; X32-NEXT: movd %eax, %xmm1
|
|
|
|
; X32-NEXT: pslld %xmm1, %xmm0
|
|
|
|
; X32-NEXT: retl
|
|
|
|
;
|
|
|
|
; X64-LABEL: t2:
|
2017-12-05 01:18:51 +08:00
|
|
|
; X64: # %bb.0:
|
2016-07-10 04:55:20 +08:00
|
|
|
; X64-NEXT: movl $14, %eax
|
|
|
|
; X64-NEXT: movd %eax, %xmm1
|
|
|
|
; X64-NEXT: pslld %xmm1, %xmm0
|
|
|
|
; X64-NEXT: retq
|
- When DAG combiner is folding a bit convert into a BUILD_VECTOR, it should check if it's essentially a SCALAR_TO_VECTOR. Avoid turning (v8i16) <10, u, u, u> to <10, 0, u, u, u, u, u, u>. Instead, simply convert it to a SCALAR_TO_VECTOR of the proper type.
- X86 now normalize SCALAR_TO_VECTOR to (BIT_CONVERT (v4i32 SCALAR_TO_VECTOR)). Get rid of X86ISD::S2VEC.
llvm-svn: 47290
2008-02-19 07:04:32 +08:00
|
|
|
%tmp1 = bitcast <2 x i64> %b1 to <4 x i32>
|
|
|
|
%tmp2 = tail call <4 x i32> @llvm.x86.sse2.psll.d( <4 x i32> %tmp1, <4 x i32> < i32 14, i32 undef, i32 undef, i32 undef > ) nounwind readnone
|
|
|
|
ret <4 x i32> %tmp2
|
|
|
|
}
|
|
|
|
|
2016-07-10 04:55:20 +08:00
|
|
|
declare <8 x i16> @llvm.x86.sse2.psrl.w(<8 x i16>, <8 x i16>) nounwind readnone
|
|
|
|
declare <4 x i32> @llvm.x86.sse2.psll.d(<4 x i32>, <4 x i32>) nounwind readnone
|