2017-08-02 08:28:10 +08:00
|
|
|
; RUN: llc < %s -mcpu=corei7 | FileCheck %s
|
2011-10-30 05:23:04 +08:00
|
|
|
|
|
|
|
target triple = "x86_64-unknown-linux-gnu"
|
|
|
|
|
2013-07-19 06:47:09 +08:00
|
|
|
;CHECK-LABEL: ltstore:
|
This commit contains a few changes that had to go in together.
1. Simplify xor/and/or (bitcast(A), bitcast(B)) -> bitcast(op (A,B))
(and also scalar_to_vector).
2. Xor/and/or are indifferent to the swizzle operation (shuffle of one src).
Simplify xor/and/or (shuff(A), shuff(B)) -> shuff(op (A, B))
3. Optimize swizzles of shuffles: shuff(shuff(x, y), undef) -> shuff(x, y).
4. Fix an X86ISelLowering optimization which was very bitcast-sensitive.
Code which was previously compiled to this:
movd (%rsi), %xmm0
movdqa .LCPI0_0(%rip), %xmm2
pshufb %xmm2, %xmm0
movd (%rdi), %xmm1
pshufb %xmm2, %xmm1
pxor %xmm0, %xmm1
pshufb .LCPI0_1(%rip), %xmm1
movd %xmm1, (%rdi)
ret
Now compiles to this:
movl (%rsi), %eax
xorl %eax, (%rdi)
ret
llvm-svn: 153848
2012-04-02 03:31:22 +08:00
|
|
|
;CHECK: movq
|
2012-04-08 05:19:08 +08:00
|
|
|
;CHECK: movq
|
|
|
|
;CHECK: ret
|
|
|
|
define void @ltstore(<4 x i32>* %pA, <2 x i32>* %pB) {
|
2011-10-30 05:23:04 +08:00
|
|
|
entry:
|
2015-02-28 05:17:42 +08:00
|
|
|
%in = load <4 x i32>, <4 x i32>* %pA
|
2011-10-30 05:23:04 +08:00
|
|
|
%j = shufflevector <4 x i32> %in, <4 x i32> undef, <2 x i32> <i32 0, i32 1>
|
2012-04-08 05:19:08 +08:00
|
|
|
store <2 x i32> %j, <2 x i32>* %pB
|
2011-10-30 05:23:04 +08:00
|
|
|
ret void
|
|
|
|
}
|
|
|
|
|