Michael Ilseman
|
74a6da963b
|
Optimization: bitcast (<1 x ...> insertelement ..., X, ...) to ... ==> bitcast X to ...
llvm-svn: 174905
|
2013-02-11 21:41:44 +00:00 |
Michael Ilseman
|
35f82ff833
|
Remove trailing whitespace
llvm-svn: 174903
|
2013-02-11 21:36:49 +00:00 |
Nadav Rotem
|
365af6f17b
|
Implement Constant::isAllOnesValue(). Fix ConstantFolding to use the new api.
llvm-svn: 138469
|
2011-08-24 20:18:38 +00:00 |
Eric Christopher
|
7bc78f692c
|
Revert "Address Duncan's CR request:"
This reverts commit 20a05be15ea5271ab6185b83200fa88263362400. (svn rev 138340)
Conflicts:
test/Transforms/InstCombine/bitcast.ll
llvm-svn: 138366
|
2011-08-23 20:11:10 +00:00 |
Nadav Rotem
|
7d3effa389
|
Fix a typo in the test from the previous commit.
llvm-svn: 138342
|
2011-08-23 17:56:54 +00:00 |
Nadav Rotem
|
c78e6607b5
|
Address Duncan's CR request:
1. Cleanup the tests in ConstantFolding.cpp
2. Implement isAllOnes for Constant, ConstantFP, ConstantVector
llvm-svn: 138340
|
2011-08-23 17:48:43 +00:00 |
Nadav Rotem
|
ad4a70ad3e
|
Add constant folding support for bitcasts of splat vectors to integers.
llvm-svn: 138206
|
2011-08-20 14:02:29 +00:00 |
Chris Lattner
|
d0214f3efe
|
handle the constant case of vector insertion. For something
like this:
struct S { float A, B, C, D; };
struct S g;
struct S bar() {
struct S A = g;
++A.B;
A.A = 42;
return A;
}
we now generate:
_bar: ## @bar
## BB#0: ## %entry
movq _g@GOTPCREL(%rip), %rax
movss 12(%rax), %xmm0
pshufd $16, %xmm0, %xmm0
movss 4(%rax), %xmm2
movss 8(%rax), %xmm1
pshufd $16, %xmm1, %xmm1
unpcklps %xmm0, %xmm1
addss LCPI1_0(%rip), %xmm2
pshufd $16, %xmm2, %xmm2
movss LCPI1_1(%rip), %xmm0
pshufd $16, %xmm0, %xmm0
unpcklps %xmm2, %xmm0
ret
instead of:
_bar: ## @bar
## BB#0: ## %entry
movq _g@GOTPCREL(%rip), %rax
movss 12(%rax), %xmm0
pshufd $16, %xmm0, %xmm0
movss 4(%rax), %xmm2
movss 8(%rax), %xmm1
pshufd $16, %xmm1, %xmm1
unpcklps %xmm0, %xmm1
addss LCPI1_0(%rip), %xmm2
movd %xmm2, %eax
shlq $32, %rax
addq $1109917696, %rax ## imm = 0x42280000
movd %rax, %xmm0
ret
llvm-svn: 112345
|
2010-08-28 01:50:57 +00:00 |
Chris Lattner
|
dd6601048e
|
optimize bitcasts from large integers to vector into vector
element insertion from the pieces that feed into the vector.
This handles a pattern that occurs frequently due to code
generated for the x86-64 abi. We now compile something like
this:
struct S { float A, B, C, D; };
struct S g;
struct S bar() {
struct S A = g;
++A.A;
++A.C;
return A;
}
into all nice vector operations:
_bar: ## @bar
## BB#0: ## %entry
movq _g@GOTPCREL(%rip), %rax
movss LCPI1_0(%rip), %xmm1
movss (%rax), %xmm0
addss %xmm1, %xmm0
pshufd $16, %xmm0, %xmm0
movss 4(%rax), %xmm2
movss 12(%rax), %xmm3
pshufd $16, %xmm2, %xmm2
unpcklps %xmm2, %xmm0
addss 8(%rax), %xmm1
pshufd $16, %xmm1, %xmm1
pshufd $16, %xmm3, %xmm2
unpcklps %xmm2, %xmm1
ret
instead of icky integer operations:
_bar: ## @bar
movq _g@GOTPCREL(%rip), %rax
movss LCPI1_0(%rip), %xmm1
movss (%rax), %xmm0
addss %xmm1, %xmm0
movd %xmm0, %ecx
movl 4(%rax), %edx
movl 12(%rax), %esi
shlq $32, %rdx
addq %rcx, %rdx
movd %rdx, %xmm0
addss 8(%rax), %xmm1
movd %xmm1, %eax
shlq $32, %rsi
addq %rax, %rsi
movd %rsi, %xmm1
ret
This resolves rdar://8360454
llvm-svn: 112343
|
2010-08-28 01:20:38 +00:00 |
Chris Lattner
|
bfd2228182
|
optimize "integer extraction out of the middle of a vector" as produced
by SRoA. This is part of rdar://7892780, but needs another xform to
expose this.
llvm-svn: 112232
|
2010-08-26 22:14:59 +00:00 |
Chris Lattner
|
d4ebd6df5a
|
optimize bitcast(trunc(bitcast(x))) where the result is a float and 'x'
is a vector to be a vector element extraction. This allows clang to
compile:
struct S { float A, B, C, D; };
float foo(struct S A) { return A.A + A.B+A.C+A.D; }
into:
_foo: ## @foo
## BB#0: ## %entry
movd %xmm0, %rax
shrq $32, %rax
movd %eax, %xmm2
addss %xmm0, %xmm2
movapd %xmm1, %xmm3
addss %xmm2, %xmm3
movd %xmm1, %rax
shrq $32, %rax
movd %eax, %xmm0
addss %xmm3, %xmm0
ret
instead of:
_foo: ## @foo
## BB#0: ## %entry
movd %xmm0, %rax
movd %eax, %xmm0
shrq $32, %rax
movd %eax, %xmm2
addss %xmm0, %xmm2
movd %xmm1, %rax
movd %eax, %xmm1
addss %xmm2, %xmm1
shrq $32, %rax
movd %eax, %xmm0
addss %xmm1, %xmm0
ret
... eliminating half of the horribleness.
llvm-svn: 112227
|
2010-08-26 21:55:42 +00:00 |
Chris Lattner
|
3c19d3d5c3
|
filecheckize
llvm-svn: 112225
|
2010-08-26 21:51:41 +00:00 |
Chris Lattner
|
7717c616bd
|
rename test
llvm-svn: 112224
|
2010-08-26 21:50:56 +00:00 |