Nate Begeman
36853ee1fd
Teach the legalizer how to legalize FP_TO_UINT.
...
Teach the legalizer to promote FP_TO_UINT to FP_TO_SINT if the wider
FP_TO_UINT is also illegal. This allows us on PPC to codegen
unsigned short foo(float a) { return a; }
as:
_foo:
.LBB_foo_0: ; entry
fctiwz f0, f1
stfd f0, -8(r1)
lwz r2, -4(r1)
rlwinm r3, r2, 0, 16, 31
blr
instead of:
_foo:
.LBB_foo_0: ; entry
fctiwz f0, f1
stfd f0, -8(r1)
lwz r2, -4(r1)
lis r3, ha16(.CPI_foo_0)
lfs f0, lo16(.CPI_foo_0)(r3)
fcmpu cr0, f1, f0
blt .LBB_foo_2 ; entry
.LBB_foo_1: ; entry
fsubs f0, f1, f0
fctiwz f0, f0
stfd f0, -16(r1)
lwz r2, -12(r1)
xoris r2, r2, 32768
.LBB_foo_2: ; entry
rlwinm r3, r2, 0, 16, 31
blr
llvm-svn: 22785
2005-08-14 01:20:53 +00:00
Nate Begeman
83f6b98c42
Make FP_TO_UINT Illegal. This allows us to generate significantly better
...
codegen for FP_TO_UINT by using the legalizer's SELECT variant.
Implement a codegen improvement for SELECT_CC, selecting the false node in
the MBB that feeds the phi node. This allows us to codegen:
void foo(int *a, int b, int c) { int d = (a < b) ? 5 : 9; *a = d; }
as:
_foo:
li r2, 5
cmpw cr0, r4, r3
bgt .LBB_foo_2 ; entry
.LBB_foo_1: ; entry
li r2, 9
.LBB_foo_2: ; entry
stw r2, 0(r3)
blr
insted of:
_foo:
li r2, 5
li r5, 9
cmpw cr0, r4, r3
bgt .LBB_foo_2 ; entry
.LBB_foo_1: ; entry
or r2, r5, r5
.LBB_foo_2: ; entry
stw r2, 0(r3)
blr
llvm-svn: 22784
2005-08-14 01:17:16 +00:00
Andrew Lenharth
107a0a7690
Testing a variable before it is defined doesn't work so well. It is a fairly small thing, so just let everyone build the .a file
...
llvm-svn: 22783
2005-08-13 14:58:23 +00:00
Chris Lattner
47d3ec3525
Ooops, don't forget to clear this. The real inner loop is now:
...
.LBB_foo_3: ; no_exit.1
lfd f2, 0(r9)
lfd f3, 8(r9)
fmul f4, f1, f2
fmadd f4, f0, f3, f4
stfd f4, 8(r9)
fmul f3, f1, f3
fmsub f2, f0, f2, f3
stfd f2, 0(r9)
addi r9, r9, 16
addi r8, r8, 1
cmpw cr0, r8, r4
ble .LBB_foo_3 ; no_exit.1
llvm-svn: 22782
2005-08-13 07:42:01 +00:00
Chris Lattner
5949d49032
Recursively scan scev expressions for common subexpressions. This allows us
...
to handle nested loops much better, for example, by being able to tell that
these two expressions:
{( 8 + ( 16 * ( 1 + %Tmp11 + %Tmp12)) + %c_),+,( 16 * %Tmp 12)}<loopentry.1>
{(( 16 * ( 1 + %Tmp11 + %Tmp12)) + %c_),+,( 16 * %Tmp12)}<loopentry.1>
Have the following common part that can be shared:
{(( 16 * ( 1 + %Tmp11 + %Tmp12)) + %c_),+,( 16 * %Tmp12)}<loopentry.1>
This allows us to codegen an important inner loop in 168.wupwise as:
.LBB_foo_4: ; no_exit.1
lfd f2, 16(r9)
fmul f3, f0, f2
fmul f2, f1, f2
fadd f4, f3, f2
stfd f4, 8(r9)
fsub f2, f3, f2
stfd f2, 16(r9)
addi r8, r8, 1
addi r9, r9, 16
cmpw cr0, r8, r4
ble .LBB_foo_4 ; no_exit.1
instead of:
.LBB_foo_3: ; no_exit.1
lfdx f2, r6, r9
add r10, r6, r9
lfd f3, 8(r10)
fmul f4, f1, f2
fmadd f4, f0, f3, f4
stfd f4, 8(r10)
fmul f3, f1, f3
fmsub f2, f0, f2, f3
stfdx f2, r6, r9
addi r9, r9, 16
addi r8, r8, 1
cmpw cr0, r8, r4
ble .LBB_foo_3 ; no_exit.1
llvm-svn: 22781
2005-08-13 07:27:18 +00:00
Nate Begeman
dc3154ec66
Remove an unncessary argument to SimplifySelectCC and add an additional
...
assert when creating a select_cc node.
llvm-svn: 22780
2005-08-13 06:14:17 +00:00
Nate Begeman
b6651e81a0
Fix the fabs regression on x86 by abstracting the select_cc optimization
...
out into SimplifySelectCC. This allows both ISD::SELECT and ISD::SELECT_CC
to use the same set of simplifying folds.
llvm-svn: 22779
2005-08-13 06:00:21 +00:00
Nate Begeman
a22bf778c9
Remove support for 64b PPC, it's been broken for a long time. It'll be
...
back once a DAG->DAG ISel exists.
llvm-svn: 22778
2005-08-13 05:59:16 +00:00
Andrew Lenharth
6b62b479fa
Fix oversized GOT problem with gcc-4 on alpha
...
llvm-svn: 22777
2005-08-13 05:09:50 +00:00
Chris Lattner
89c1dfc733
Teach SplitCriticalEdge to update LoopInfo if it is alive. This fixes
...
a problem in LoopStrengthReduction, where it would split critical edges
then confused itself with outdated loop information.
llvm-svn: 22776
2005-08-13 01:38:43 +00:00
Chris Lattner
79396539d3
remove dead code. The exit block list is computed on demand, thus does not
...
need to be updated. This code is a relic from when it did.
llvm-svn: 22775
2005-08-13 01:30:36 +00:00
Chris Lattner
21381e8424
implement a couple of simple shift foldings.
...
e.g. (X & 7) >> 3 -> 0
llvm-svn: 22774
2005-08-12 23:54:58 +00:00
Jim Laskey
35960708b7
Fix for 2005-08-12-rlwimi-crash.ll. Make allowance for masks being shifted to
...
zero.
llvm-svn: 22773
2005-08-12 23:52:46 +00:00
Jim Laskey
461edda709
Added test cases to guarantee use of ORC and ANDC.
...
llvm-svn: 22772
2005-08-12 23:40:14 +00:00
Jim Laskey
a568700618
1. This changes handles the cases of (~x)&y and x&(~y) yielding ANDC, and
...
(~x)|y and x|(~y) yielding ORC.
llvm-svn: 22771
2005-08-12 23:38:02 +00:00
Chris Lattner
f6a762ada1
testcase that crashed the ppc backend, distilled from crafty
...
llvm-svn: 22770
2005-08-12 23:34:03 +00:00
Chris Lattner
8447b49526
When splitting critical edges, make sure not to leave the new block in the
...
middle of the loop. This turns a critical loop in gzip into this:
.LBB_test_1: ; loopentry
or r27, r28, r28
add r28, r3, r27
lhz r28, 3(r28)
add r26, r4, r27
lhz r26, 3(r26)
cmpw cr0, r28, r26
bne .LBB_test_8 ; loopentry.loopexit_crit_edge
.LBB_test_2: ; shortcirc_next.0
add r28, r3, r27
lhz r28, 5(r28)
add r26, r4, r27
lhz r26, 5(r26)
cmpw cr0, r28, r26
bne .LBB_test_7 ; shortcirc_next.0.loopexit_crit_edge
.LBB_test_3: ; shortcirc_next.1
add r28, r3, r27
lhz r28, 7(r28)
add r26, r4, r27
lhz r26, 7(r26)
cmpw cr0, r28, r26
bne .LBB_test_6 ; shortcirc_next.1.loopexit_crit_edge
.LBB_test_4: ; shortcirc_next.2
add r28, r3, r27
lhz r26, 9(r28)
add r28, r4, r27
lhz r25, 9(r28)
addi r28, r27, 8
cmpw cr7, r26, r25
mfcr r26, 1
rlwinm r26, r26, 31, 31, 31
add r25, r8, r27
cmpw cr7, r25, r7
mfcr r25, 1
rlwinm r25, r25, 29, 31, 31
and. r26, r26, r25
bne .LBB_test_1 ; loopentry
instead of this:
.LBB_test_1: ; loopentry
or r27, r28, r28
add r28, r3, r27
lhz r28, 3(r28)
add r26, r4, r27
lhz r26, 3(r26)
cmpw cr0, r28, r26
beq .LBB_test_3 ; shortcirc_next.0
.LBB_test_2: ; loopentry.loopexit_crit_edge
add r2, r30, r27
add r8, r29, r27
b .LBB_test_9 ; loopexit
.LBB_test_3: ; shortcirc_next.0
add r28, r3, r27
lhz r28, 5(r28)
add r26, r4, r27
lhz r26, 5(r26)
cmpw cr0, r28, r26
beq .LBB_test_5 ; shortcirc_next.1
.LBB_test_4: ; shortcirc_next.0.loopexit_crit_edge
add r2, r11, r27
add r8, r12, r27
b .LBB_test_9 ; loopexit
.LBB_test_5: ; shortcirc_next.1
add r28, r3, r27
lhz r28, 7(r28)
add r26, r4, r27
lhz r26, 7(r26)
cmpw cr0, r28, r26
beq .LBB_test_7 ; shortcirc_next.2
.LBB_test_6: ; shortcirc_next.1.loopexit_crit_edge
add r2, r9, r27
add r8, r10, r27
b .LBB_test_9 ; loopexit
.LBB_test_7: ; shortcirc_next.2
add r28, r3, r27
lhz r26, 9(r28)
add r28, r4, r27
lhz r25, 9(r28)
addi r28, r27, 8
cmpw cr7, r26, r25
mfcr r26, 1
rlwinm r26, r26, 31, 31, 31
add r25, r8, r27
cmpw cr7, r25, r7
mfcr r25, 1
rlwinm r25, r25, 29, 31, 31
and. r26, r26, r25
bne .LBB_test_1 ; loopentry
Next up, improve the code for the loop.
llvm-svn: 22769
2005-08-12 22:22:17 +00:00
Chris Lattner
e09bbc800c
Add a helper method
...
llvm-svn: 22768
2005-08-12 22:14:06 +00:00
Chris Lattner
1344253e3f
add a helper method
...
llvm-svn: 22767
2005-08-12 22:13:27 +00:00
Chris Lattner
4fec86d348
Fix a FIXME: if we are inserting code for a PHI argument, split the critical
...
edge so that the code is not always executed for both operands. This
prevents LSR from inserting code into loops whose exit blocks contain
PHI uses of IV expressions (which are outside of loops). On gzip, for
example, we turn this ugly code:
.LBB_test_1: ; loopentry
add r27, r3, r28
lhz r27, 3(r27)
add r26, r4, r28
lhz r26, 3(r26)
add r25, r30, r28 ;; Only live if exiting the loop
add r24, r29, r28 ;; Only live if exiting the loop
cmpw cr0, r27, r26
bne .LBB_test_5 ; loopexit
into this:
.LBB_test_1: ; loopentry
or r27, r28, r28
add r28, r3, r27
lhz r28, 3(r28)
add r26, r4, r27
lhz r26, 3(r26)
cmpw cr0, r28, r26
beq .LBB_test_3 ; shortcirc_next.0
.LBB_test_2: ; loopentry.loopexit_crit_edge
add r2, r30, r27
add r8, r29, r27
b .LBB_test_9 ; loopexit
.LBB_test_2: ; shortcirc_next.0
...
blt .LBB_test_1
into this:
.LBB_test_1: ; loopentry
or r27, r28, r28
add r28, r3, r27
lhz r28, 3(r28)
add r26, r4, r27
lhz r26, 3(r26)
cmpw cr0, r28, r26
beq .LBB_test_3 ; shortcirc_next.0
.LBB_test_2: ; loopentry.loopexit_crit_edge
add r2, r30, r27
add r8, r29, r27
b .LBB_t_3: ; shortcirc_next.0
.LBB_test_3: ; shortcirc_next.0
...
blt .LBB_test_1
Next step: get the block out of the loop so that the loop is all
fall-throughs again.
llvm-svn: 22766
2005-08-12 22:06:11 +00:00
Chris Lattner
b7ebe65c56
Change break critical edges to not remove, then insert, PHI node entries.
...
Instead, just update the BB in-place. This is both faster, and it prevents
split-critical-edges from shuffling the PHI argument list unneccesarily.
llvm-svn: 22765
2005-08-12 21:58:07 +00:00
Andrew Lenharth
8c6701be6e
match gcc's use of tabs, makes diffs easier
...
llvm-svn: 22764
2005-08-12 16:14:08 +00:00
Andrew Lenharth
ca94102d3e
.section cleanup, patch from Nicholas Riley
...
llvm-svn: 22763
2005-08-12 16:13:43 +00:00
Chris Lattner
4ac8b3f781
First rev of Xcode 2.1 project
...
llvm-svn: 22762
2005-08-11 22:19:26 +00:00
Jim Laskey
a50f770a2c
1. Added the function isOpcWithIntImmediate to simplify testing of operand with
...
specified opcode and an integer constant right operand.
2. Modified ISD::SHL, ISD::SRL, ISD::SRA to use rlwinm when applied after a mask.
llvm-svn: 22761
2005-08-11 21:59:23 +00:00
Chris Lattner
d418d752f4
Tidied up the use of dyn_cast<ConstantSDNode> by using isIntImmediate more.
...
Patch by Jim Laskey.
llvm-svn: 22760
2005-08-11 17:56:50 +00:00
Chris Lattner
c5e1312baa
Use a more efficient method of creating integer and float virtual registers
...
(avoids an extra level of indirection in MakeReg).
defined MakeIntReg using RegMap->createVirtualRegister(PPC32::GPRCRegisterClass)
defined MakeFPReg using RegMap->createVirtualRegister(PPC32::FPRCRegisterClass)
s/MakeReg(MVT::i32)/MakeIntReg/
s/MakeReg(MVT::f64)/MakeFPReg/
Patch by Jim Laskey!
llvm-svn: 22759
2005-08-11 17:15:31 +00:00
Nate Begeman
5c7656fd53
Add a select_cc optimization for recognizing abs(int). This speeds up an
...
integer MPEG encoding loop by a factor of two.
llvm-svn: 22758
2005-08-11 02:18:13 +00:00
Nate Begeman
180b08897f
Some SELECT_CC cleanups:
...
1. move assertions for node creation to getNode()
2. legalize the values returned in ExpandOp immediately
3. Move select_cc optimizations from SELECT's getNode() to SELECT_CC's,
allowing them to be cleaned up significantly.
This paves the way to pick up additional optimizations on SELECT_CC, such
as sum-of-absolute-differences.
llvm-svn: 22757
2005-08-11 01:12:20 +00:00
Nate Begeman
5646b181e8
Make SELECT illegal on PPC32, switch to using SELECT_CC, which more closely
...
reflects what the hardware is capable of. This significantly simplifies
the CC handling logic throughout the ISel.
llvm-svn: 22756
2005-08-10 20:52:09 +00:00
Nate Begeman
e5b86d7442
Add new node, SELECT_CC. This node is for targets that don't natively
...
implement SELECT.
llvm-svn: 22755
2005-08-10 20:51:12 +00:00
Chris Lattner
3428b95634
Changes for PPC32ISelPattern.cpp
...
1. Clean up how SelectIntImmediateExpr handles use counts.
2. "Subtract from" was not clearing hi 16 bits.
Patch by Jim Laskey
llvm-svn: 22754
2005-08-10 18:11:33 +00:00
Chris Lattner
21c0fd9e8f
Fix an oversight that may be causing PR617.
...
llvm-svn: 22753
2005-08-10 17:37:53 +00:00
Chris Lattner
1a92cb2cd1
now that we handle non-constant strides, this testcase passes
...
llvm-svn: 22752
2005-08-10 17:17:45 +00:00
Chris Lattner
62df798919
remove some trickiness that broke yacr2 and some other programs last night
...
llvm-svn: 22751
2005-08-10 17:15:20 +00:00
Chris Lattner
aeedcc7fc2
Changed the XOR case to use the isOprNot predicate.
...
Patch by Jim Laskey!
llvm-svn: 22750
2005-08-10 16:35:46 +00:00
Chris Lattner
67d0753773
1. Refactored handling of integer immediate values for add, or, xor and sub.
...
New routine: ISel::SelectIntImmediateExpr
2. Now checking use counts of large constants. If use count is > 2 then drop
thru so that the constant gets loaded into a register.
Source:
int %test1(int %a) {
entry:
%tmp.1 = add int %a, 123456789 ; <int> [#uses=1]
%tmp.2 = or int %tmp.1, 123456789 ; <int> [#uses=1]
%tmp.3 = xor int %tmp.2, 123456789 ; <int> [#uses=1]
%tmp.4 = sub int %tmp.3, -123456789 ; <int> [#uses=1]
ret int %tmp.4
}
Did Emit:
.machine ppc970
.text
.align 2
.globl _test1
_test1:
.LBB_test1_0: ; entry
addi r2, r3, -13035
addis r2, r2, 1884
ori r2, r2, 52501
oris r2, r2, 1883
xori r2, r2, 52501
xoris r2, r2, 1883
addi r2, r2, 52501
addis r3, r2, 1883
blr
Now Emits:
.machine ppc970
.text
.align 2
.globl _test1
_test1:
.LBB_test1_0: ; entry
lis r2, 1883
ori r2, r2, 52501
add r3, r3, r2
or r3, r3, r2
xor r3, r3, r2
add r3, r3, r2
blr
Patch by Jim Laskey!
llvm-svn: 22749
2005-08-10 16:34:52 +00:00
Duraid Madina
1c2f9fdf71
sorry!! this is temporary; for some reason the nasty constmul code seems to
...
be an infinite loop when using g++-4.0.1*, this kills the ia64 nightly
tester. A proper fix shall be forthcoming!!! thanks for not killing me. :)
llvm-svn: 22748
2005-08-10 12:38:57 +00:00
Chris Lattner
5f56d71cd7
Fix a bug compiling: select (i32 < i32), f32, f32
...
llvm-svn: 22747
2005-08-10 03:40:09 +00:00
Chris Lattner
f83ce5faee
Make loop-simplify produce better loops by turning PHI nodes like X = phi [X, Y]
...
into just Y. This often occurs when it seperates loops that have collapsed loop
headers. This implements LoopSimplify/phi-node-simplify.ll
llvm-svn: 22746
2005-08-10 02:07:32 +00:00
Chris Lattner
d54879c136
New testcase
...
llvm-svn: 22745
2005-08-10 02:06:35 +00:00
Chris Lattner
677d85784a
Allow indvar simplify to canonicalize ANY affine IV, not just affine IVs with
...
constant stride. This implements Transforms/IndVarsSimplify/variable-stride-ivs.ll
llvm-svn: 22744
2005-08-10 01:12:06 +00:00
Chris Lattner
9af011cc11
new testcase
...
llvm-svn: 22743
2005-08-10 01:11:24 +00:00
Chris Lattner
35c0e2ee33
Fix an obvious oops
...
llvm-svn: 22742
2005-08-10 00:59:40 +00:00
Chris Lattner
08c7fd19e1
new testcase we handle
...
llvm-svn: 22741
2005-08-10 00:48:11 +00:00
Chris Lattner
edff91a49a
Teach LSR to strength reduce IVs that have a loop-invariant but non-constant stride.
...
For code like this:
void foo(float *a, float *b, int n, int stride_a, int stride_b) {
int i;
for (i=0; i<n; i++)
a[i*stride_a] = b[i*stride_b];
}
we now emit:
.LBB_foo2_2: ; no_exit
lfs f0, 0(r4)
stfs f0, 0(r3)
addi r7, r7, 1
add r4, r2, r4
add r3, r6, r3
cmpw cr0, r7, r5
blt .LBB_foo2_2 ; no_exit
instead of:
.LBB_foo_2: ; no_exit
mullw r8, r2, r7 ;; multiply!
slwi r8, r8, 2
lfsx f0, r4, r8
mullw r8, r2, r6 ;; multiply!
slwi r8, r8, 2
stfsx f0, r3, r8
addi r2, r2, 1
cmpw cr0, r2, r5
blt .LBB_foo_2 ; no_exit
loops with variable strides occur pretty often. For example, in SPECFP2K
there are 317 variable strides in 177.mesa, 3 in 179.art, 14 in 188.ammp,
56 in 168.wupwise, 36 in 172.mgrid.
Now we can allow indvars to turn functions written like this:
void foo2(float *a, float *b, int n, int stride_a, int stride_b) {
int i, ai = 0, bi = 0;
for (i=0; i<n; i++)
{
a[ai] = b[bi];
ai += stride_a;
bi += stride_b;
}
}
into code like the above for better analysis. With this patch, they generate
identical code.
llvm-svn: 22740
2005-08-10 00:45:21 +00:00
Chris Lattner
dde7dc525e
Fix Regression/Transforms/LoopStrengthReduce/phi_node_update_multiple_preds.ll
...
by being more careful about updating PHI nodes
llvm-svn: 22739
2005-08-10 00:35:32 +00:00
Chris Lattner
832105d511
new testcase
...
llvm-svn: 22738
2005-08-10 00:33:01 +00:00
Chris Lattner
c6c4d99a21
Fix some 80 column violations.
...
Once we compute the evolution for a GEP, tell SE about it. This allows users
of the GEP to know it, if the users are not direct. This allows us to compile
this testcase:
void fbSolidFillmmx(int w, unsigned char *d) {
while (w >= 64) {
*(unsigned long long *) (d + 0) = 0;
*(unsigned long long *) (d + 8) = 0;
*(unsigned long long *) (d + 16) = 0;
*(unsigned long long *) (d + 24) = 0;
*(unsigned long long *) (d + 32) = 0;
*(unsigned long long *) (d + 40) = 0;
*(unsigned long long *) (d + 48) = 0;
*(unsigned long long *) (d + 56) = 0;
w -= 64;
d += 64;
}
}
into:
.LBB_fbSolidFillmmx_2: ; no_exit
li r2, 0
stw r2, 0(r4)
stw r2, 4(r4)
stw r2, 8(r4)
stw r2, 12(r4)
stw r2, 16(r4)
stw r2, 20(r4)
stw r2, 24(r4)
stw r2, 28(r4)
stw r2, 32(r4)
stw r2, 36(r4)
stw r2, 40(r4)
stw r2, 44(r4)
stw r2, 48(r4)
stw r2, 52(r4)
stw r2, 56(r4)
stw r2, 60(r4)
addi r4, r4, 64
addi r3, r3, -64
cmpwi cr0, r3, 63
bgt .LBB_fbSolidFillmmx_2 ; no_exit
instead of:
.LBB_fbSolidFillmmx_2: ; no_exit
li r11, 0
stw r11, 0(r4)
stw r11, 4(r4)
stwx r11, r10, r4
add r12, r10, r4
stw r11, 4(r12)
stwx r11, r9, r4
add r12, r9, r4
stw r11, 4(r12)
stwx r11, r8, r4
add r12, r8, r4
stw r11, 4(r12)
stwx r11, r7, r4
add r12, r7, r4
stw r11, 4(r12)
stwx r11, r6, r4
add r12, r6, r4
stw r11, 4(r12)
stwx r11, r5, r4
add r12, r5, r4
stw r11, 4(r12)
stwx r11, r2, r4
add r12, r2, r4
stw r11, 4(r12)
addi r4, r4, 64
addi r3, r3, -64
cmpwi cr0, r3, 63
bgt .LBB_fbSolidFillmmx_2 ; no_exit
llvm-svn: 22737
2005-08-09 23:39:36 +00:00
Chris Lattner
b310ac4a86
implement two helper methods
...
llvm-svn: 22736
2005-08-09 23:36:33 +00:00