Chris Lattner
af5b25f139
Remove some redundant checks, add a couple of new ones. This allows us to
...
compile this:
int foo (unsigned long a, unsigned long long g) {
return a >= g;
}
To:
foo:
movl 8(%esp), %eax
cmpl %eax, 4(%esp)
setae %al
cmpl $0, 12(%esp)
sete %cl
andb %al, %cl
movzbl %cl, %eax
ret
instead of:
foo:
movl 8(%esp), %eax
cmpl %eax, 4(%esp)
setae %al
movzbw %al, %cx
movl 12(%esp), %edx
cmpl $0, %edx
sete %al
movzbw %al, %ax
cmpl $0, %edx
cmove %cx, %ax
movzbl %al, %eax
ret
llvm-svn: 21244
2005-04-12 02:54:39 +00:00
Chris Lattner
aedcabe8db
Emit comparisons against the sign bit better. Codegen this:
...
bool %test1(long %X) {
%A = setlt long %X, 0
ret bool %A
}
like this:
test1:
cmpl $0, 8(%esp)
setl %al
movzbl %al, %eax
ret
instead of:
test1:
movl 8(%esp), %ecx
cmpl $0, %ecx
setl %al
movzbw %al, %ax
cmpl $0, 4(%esp)
setb %dl
movzbw %dl, %dx
cmpl $0, %ecx
cmove %dx, %ax
movzbl %al, %eax
ret
llvm-svn: 21243
2005-04-12 02:19:10 +00:00
Chris Lattner
71ff44e46c
Emit long comparison against -1 better. Instead of this (x86):
...
test2:
movl 8(%esp), %eax
notl %eax
movl 4(%esp), %ecx
notl %ecx
orl %eax, %ecx
cmpl $0, %ecx
sete %al
movzbl %al, %eax
ret
or this (PPC):
_test2:
nor r2, r4, r4
nor r3, r3, r3
or r2, r2, r3
cntlzw r2, r2
srwi r3, r2, 5
blr
Emit this:
test2:
movl 8(%esp), %eax
andl 4(%esp), %eax
cmpl $-1, %eax
sete %al
movzbl %al, %eax
ret
or this:
_test2:
.LBB_test2_0: ;
and r2, r4, r3
cmpwi cr0, r2, -1
li r3, 1
li r2, 0
beq .LBB_test2_2 ;
.LBB_test2_1: ;
or r3, r2, r2
.LBB_test2_2: ;
blr
it seems like the PPC isel could do better for R32 == -1 case.
llvm-svn: 21242
2005-04-12 01:46:05 +00:00
Chris Lattner
87bd69884a
canonicalize x <u 1 -> x == 0. On this testcase:
...
unsigned long long g;
unsigned long foo (unsigned long a) {
return (a >= g) ? 1 : 0;
}
It changes the ppc code from:
_foo:
.LBB_foo_0: ; entry
mflr r11
stw r11, 8(r1)
bl "L00000$pb"
"L00000$pb":
mflr r2
addis r2, r2, ha16(L_g$non_lazy_ptr-"L00000$pb")
lwz r2, lo16(L_g$non_lazy_ptr-"L00000$pb")(r2)
lwz r4, 0(r2)
lwz r2, 4(r2)
cmplw cr0, r3, r2
li r2, 1
li r3, 0
bge .LBB_foo_2 ; entry
.LBB_foo_1: ; entry
or r2, r3, r3
.LBB_foo_2: ; entry
cmplwi cr0, r4, 1
li r3, 1
li r5, 0
blt .LBB_foo_4 ; entry
.LBB_foo_3: ; entry
or r3, r5, r5
.LBB_foo_4: ; entry
cmpwi cr0, r4, 0
beq .LBB_foo_6 ; entry
.LBB_foo_5: ; entry
or r2, r3, r3
.LBB_foo_6: ; entry
rlwinm r3, r2, 0, 31, 31
lwz r11, 8(r1)
mtlr r11
blr
to:
_foo:
.LBB_foo_0: ; entry
mflr r11
stw r11, 8(r1)
bl "L00000$pb"
"L00000$pb":
mflr r2
addis r2, r2, ha16(L_g$non_lazy_ptr-"L00000$pb")
lwz r2, lo16(L_g$non_lazy_ptr-"L00000$pb")(r2)
lwz r4, 0(r2)
lwz r2, 4(r2)
cmplw cr0, r3, r2
li r2, 1
li r3, 0
bge .LBB_foo_2 ; entry
.LBB_foo_1: ; entry
or r2, r3, r3
.LBB_foo_2: ; entry
cntlzw r3, r4
srwi r3, r3, 5
cmpwi cr0, r4, 0
beq .LBB_foo_4 ; entry
.LBB_foo_3: ; entry
or r2, r3, r3
.LBB_foo_4: ; entry
rlwinm r3, r2, 0, 31, 31
lwz r11, 8(r1)
mtlr r11
blr
llvm-svn: 21241
2005-04-12 00:28:49 +00:00
Chris Lattner
8ffd004920
Teach the dag mechanism that this:
...
long long test2(unsigned A, unsigned B) {
return ((unsigned long long)A << 32) + B;
}
is equivalent to this:
long long test1(unsigned A, unsigned B) {
return ((unsigned long long)A << 32) | B;
}
Now they are both codegen'd to this on ppc:
_test2:
blr
or this on x86:
test2:
movl 4(%esp), %edx
movl 8(%esp), %eax
ret
llvm-svn: 21231
2005-04-11 20:29:59 +00:00
Chris Lattner
edd197062f
Fix expansion of shifts by exactly NVT bits on arch's (like X86) that have
...
masking shifts.
This fixes the miscompilation of this:
long long test1(unsigned A, unsigned B) {
return ((unsigned long long)A << 32) | B;
}
into this:
test1:
movl 4(%esp), %edx
movl %edx, %eax
orl 8(%esp), %eax
ret
allowing us to generate this instead:
test1:
movl 4(%esp), %edx
movl 8(%esp), %eax
ret
llvm-svn: 21230
2005-04-11 20:08:52 +00:00
Nate Begeman
add0c63ad2
Fix libcall code to not pass a NULL Chain to LowerCallTo
...
Fix libcall code to not crash or assert looking for an ADJCALLSTACKUP node
when it is known that there is no ADJCALLSTACKDOWN to match.
Expand i64 multiply when ISD::MULHU is legal for the target.
llvm-svn: 21214
2005-04-11 03:01:51 +00:00
Chris Lattner
e2427c9afc
Don't bother sign/zext_inreg'ing the result of an and operation if we know
...
the result does change as a result of the extend.
This improves codegen for Alpha on this testcase:
int %a(ushort* %i) {
%tmp.1 = load ushort* %i
%tmp.2 = cast ushort %tmp.1 to int
%tmp.4 = and int %tmp.2, 1
ret int %tmp.4
}
Generating:
a:
ldgp $29, 0($27)
ldwu $0,0($16)
and $0,1,$0
ret $31,($26),1
instead of:
a:
ldgp $29, 0($27)
ldwu $0,0($16)
and $0,1,$0
addl $0,0,$0
ret $31,($26),1
btw, alpha really should switch to livein/outs for args :)
llvm-svn: 21213
2005-04-10 23:37:16 +00:00
Chris Lattner
a3b7ef05f4
Teach legalize to deal with targets that don't support some SEXTLOAD/ZEXTLOADs
...
llvm-svn: 21212
2005-04-10 22:54:25 +00:00
Chris Lattner
391a351ede
don't zextload fp values!
...
llvm-svn: 21209
2005-04-10 17:40:35 +00:00
Chris Lattner
c53cd501b5
Until we have a dag combiner, promote using zextload's instead of extloads.
...
This gives the optimizer a bit of information about the top-part of the
value.
llvm-svn: 21205
2005-04-10 04:33:47 +00:00
Chris Lattner
f74c794ccf
Fold zext_inreg(zextload), likewise for sext's
...
llvm-svn: 21204
2005-04-10 04:33:08 +00:00
Chris Lattner
f2bff92411
add a simple xform
...
llvm-svn: 21203
2005-04-10 04:04:49 +00:00
Chris Lattner
d8cbfe82ba
Fix a thinko. If the operand is promoted, pass the promoted value into
...
the new zero extend, not the original operand. This fixes cast bool -> long
on ppc.
Add an unrelated fixme
llvm-svn: 21196
2005-04-10 01:13:15 +00:00
Chris Lattner
da504741da
add a little peephole optimization. This allows us to codegen:
...
int a(short i) {
return i & 1;
}
as
_a:
andi. r3, r3, 1
blr
instead of:
_a:
rlwinm r2, r3, 0, 16, 31
andi. r3, r2, 1
blr
on ppc. It should also help the other risc targets.
llvm-svn: 21189
2005-04-09 21:43:54 +00:00
Chris Lattner
1a44855f8f
there is no need to remove this instruction, linscan does it already as it
...
removes noop moves.
llvm-svn: 21183
2005-04-09 16:24:20 +00:00
Chris Lattner
0b1681bce1
Adjust live intervals to support a livein set
...
llvm-svn: 21182
2005-04-09 16:17:50 +00:00
Chris Lattner
4c6ab01a20
Consider the livein/out set for a function, allowing targets to not have to
...
use ugly imp_def/imp_uses for arguments and return values.
llvm-svn: 21180
2005-04-09 15:23:25 +00:00
Chris Lattner
6a31b878f8
recognize some patterns as fabs operations, so that fabs at the source level
...
is deconstructed then reconstructed here. This catches 19 fabs's in 177.mesa
9 in 168.wupwise, 5 in 171.swim, 3 in 172.mgrid, and 14 in 173.applu out of
specfp2000.
This allows the X86 code generator to make MUCH better code than before for
each of these and saves one instr on ppc.
This depends on the previous CFE patch to expose these correctly.
llvm-svn: 21171
2005-04-09 05:15:53 +00:00
Chris Lattner
8a98c7f337
Emit BRCONDTWOWAY when possible.
...
llvm-svn: 21167
2005-04-09 03:30:29 +00:00
Chris Lattner
fd98678a8a
Legalize BRCONDTWOWAY into a BRCOND/BR pair if a target doesn't support it.
...
llvm-svn: 21166
2005-04-09 03:30:19 +00:00
Chris Lattner
b0713c74a2
print and fold BRCONDTWOWAY correctly
...
llvm-svn: 21165
2005-04-09 03:27:28 +00:00
Chris Lattner
0ea81f9db4
canonicalize a bunch of operations involving fneg
...
llvm-svn: 21160
2005-04-09 03:02:46 +00:00
Chris Lattner
b32d9318d2
If a target zero or sign extends the result of its setcc, allow folding of
...
this into sign/zero extension instructions later.
On PPC, for example, this testcase:
%G = external global sbyte
implementation
void %test(int %X, int %Y) {
%C = setlt int %X, %Y
%D = cast bool %C to sbyte
store sbyte %D, sbyte* %G
ret void
}
Now codegens to:
cmpw cr0, r3, r4
li r3, 1
li r4, 0
blt .LBB_test_2 ;
.LBB_test_1: ;
or r3, r4, r4
.LBB_test_2: ;
addis r2, r2, ha16(L_G$non_lazy_ptr-"L00000$pb")
lwz r2, lo16(L_G$non_lazy_ptr-"L00000$pb")(r2)
stb r3, 0(r2)
instead of:
cmpw cr0, r3, r4
li r3, 1
li r4, 0
blt .LBB_test_2 ;
.LBB_test_1: ;
or r3, r4, r4
.LBB_test_2: ;
*** rlwinm r3, r3, 0, 31, 31
addis r2, r2, ha16(L_G$non_lazy_ptr-"L00000$pb")
lwz r2, lo16(L_G$non_lazy_ptr-"L00000$pb")(r2)
stb r3, 0(r2)
llvm-svn: 21148
2005-04-07 19:43:53 +00:00
Chris Lattner
dfed7355c9
Remove somethign I had for testing
...
llvm-svn: 21144
2005-04-07 18:58:54 +00:00
Chris Lattner
6b03a0cba1
This patch does two things. First, it canonicalizes 'X >= C' -> 'X > C-1'
...
(likewise for <= >=u >=u).
Second, it implements a special case hack to turn 'X gtu SINTMAX' -> 'X lt 0'
On powerpc, for example, this changes this:
lis r2, 32767
ori r2, r2, 65535
cmplw cr0, r3, r2
bgt .LBB_test_2
into:
cmpwi cr0, r3, 0
blt .LBB_test_2
llvm-svn: 21142
2005-04-07 18:14:58 +00:00
Chris Lattner
7d13eae254
Fix a really scary bug that Nate found where we weren't deleting the right
...
elements auto of the autoCSE maps.
llvm-svn: 21128
2005-04-07 00:30:13 +00:00
Nate Begeman
b067492535
Teach ExpandShift how to handle shifts by a constant. This allows targets
...
like PowerPC to codegen long shifts in many fewer instructions.
llvm-svn: 21122
2005-04-06 21:13:14 +00:00
Nate Begeman
20b7d2a36f
Expand SREM and UREM for targets that claim not to have them, like PowerPC
...
llvm-svn: 21103
2005-04-06 00:23:54 +00:00
Nate Begeman
55e8625c69
Add MULHU and MULHS nodes for the high part of an (un)signed 32x32=64b
...
multiply.
llvm-svn: 21102
2005-04-05 22:36:56 +00:00
Chris Lattner
6a6056e93d
Make sure to notice that explicit physregs are used in the function
...
llvm-svn: 21084
2005-04-04 21:35:34 +00:00
Nate Begeman
cc00a7c42d
Handle expanding arguments to ISD::TRUNCATE. This happens on PowerPC when
...
you have something like i16 = truncate i64. This fixes Regression/C/casts
llvm-svn: 21073
2005-04-04 00:57:08 +00:00
Chris Lattner
4784489de2
Fix sign_extend and zero_extend of promoted value types to expanded value
...
types. This occurs when casting short to long on PPC for example.
llvm-svn: 21072
2005-04-03 23:41:52 +00:00
Duraid Madina
73a316d712
add support for prefix/suffix strings to go around GlobalValue(s)
...
(which may or be function pointers) in the asmprinter. For the moment,
this changes nothing, except the IA64 backend which can use this to write:
data8.ua @fptr(blah__blah__mangled_function_name)
(by setting FunctionAddrPrefix/Suffix to "@fptr(" / ")")
llvm-svn: 21024
2005-04-02 12:21:51 +00:00
Chris Lattner
0c14000760
transform fabs/fabsf calls into FABS nodes.
...
llvm-svn: 21014
2005-04-02 05:26:53 +00:00
Chris Lattner
a0c72cf289
Expand fabs into fneg
...
llvm-svn: 21013
2005-04-02 05:26:37 +00:00
Chris Lattner
f68fd0b533
Turn -0.0 - X -> fneg
...
llvm-svn: 21011
2005-04-02 05:04:50 +00:00
Chris Lattner
13fe99c807
Several changes mixed up here. First when legalizing a DAG with pcmarker,
...
dont' regen the whole dag if unneccesary. Second, fix and ugly bug with
the _PARTS nodes that caused legalize to produce multiples of them.
Finally, implement initial support for FABS and FNEG. Currently FNEG is
the only one to be trusted though.
llvm-svn: 21009
2005-04-02 05:00:07 +00:00
Chris Lattner
c4a2046a88
print fneg/fabs
...
llvm-svn: 21008
2005-04-02 04:58:41 +00:00
Chris Lattner
4157c417a1
fix some bugs in the implementation of SHL_PARTS and friends.
...
llvm-svn: 21004
2005-04-02 04:00:59 +00:00
Chris Lattner
2e5872c671
Turn expanded shift operations into (e.g.) SHL_PARTS if the target supports it.
...
llvm-svn: 21002
2005-04-02 03:38:53 +00:00
Chris Lattner
5b7bb56ef8
Print some new nodes
...
llvm-svn: 21001
2005-04-02 03:30:42 +00:00
Chris Lattner
07f97d5f55
Fix a bug when inserting a libcall into a function with no other calls.
...
llvm-svn: 20999
2005-04-02 03:22:40 +00:00
Nate Begeman
69d39433c4
Fix a warning about an unhandled switch case
...
llvm-svn: 20994
2005-04-02 00:41:14 +00:00
Nate Begeman
cda9aa7fa9
Add ISD::UNDEF node
...
Teach the SelectionDAG code how to expand and promote it
Have PPC32 LowerCallTo generate ISD::UNDEF for int arg regs used up by fp
arguments, but not shadowing their value. This allows us to do the right
thing with both fixed and vararg floating point arguments.
llvm-svn: 20988
2005-04-01 22:34:39 +00:00
Chris Lattner
329c14a8bc
print the machine CFG in the -print-machineinstrs dump
...
llvm-svn: 20976
2005-04-01 06:48:38 +00:00
Andrew Lenharth
dec53920b4
PCMarker support for DAG and Alpha
...
llvm-svn: 20965
2005-03-31 21:24:06 +00:00
Chris Lattner
5ca31d9831
Instead of setting up the CFG edges at selectiondag construction time, set
...
them up after the code has been emitted. This allows targets to select one
mbb as multiple mbb's as needed.
llvm-svn: 20937
2005-03-30 01:10:47 +00:00
Chris Lattner
db45f7d763
Fix a bug that andrew noticed where we do not correctly sign/zero extend
...
returned integer values all of the way to 64-bits (we only did it to 32-bits
leaving the top bits undefined). This causes problems for targets like alpha
whose ABI's define the top bits too.
llvm-svn: 20926
2005-03-29 19:09:56 +00:00
Chris Lattner
32e08b7c06
implement legalization of build_pair for nate
...
llvm-svn: 20901
2005-03-28 22:03:13 +00:00