2009-09-09 07:54:48 +08:00
|
|
|
; RUN: llc < %s -march=x86-64 > %t
|
Implement support for using modeling implicit-zero-extension on x86-64
with SUBREG_TO_REG, teach SimpleRegisterCoalescing to coalesce
SUBREG_TO_REG instructions (which are similar to INSERT_SUBREG
instructions), and teach the DAGCombiner to take advantage of this on
targets which support it. This eliminates many redundant
zero-extension operations on x86-64.
This adds a new TargetLowering hook, isZExtFree. It's similar to
isTruncateFree, except it only applies to actual definitions, and not
no-op truncates which may not zero the high bits.
Also, this adds a new optimization to SimplifyDemandedBits: transform
operations like x+y into (zext (add (trunc x), (trunc y))) on targets
where all the casts are no-ops. In contexts where the high part of the
add is explicitly masked off, this allows the mask operation to be
eliminated. Fix the DAGCombiner to avoid undoing these transformations
to eliminate casts on targets where the casts are no-ops.
Also, this adds a new two-address lowering heuristic. Since
two-address lowering runs before coalescing, it helps to be able to
look through copies when deciding whether commuting and/or
three-address conversion are profitable.
Also, fix a bug in LiveInterval::MergeInClobberRanges. It didn't handle
the case that a clobber range extended both before and beyond an
existing live range. In that case, multiple live ranges need to be
added. This was exposed by the new subreg coalescing code.
Remove 2008-05-06-SpillerBug.ll. It was bugpoint-reduced, and the
spiller behavior it was looking for no longer occurrs with the new
instruction selection.
llvm-svn: 68576
2009-04-08 08:15:30 +08:00
|
|
|
; RUN: not grep leaq %t
|
|
|
|
; RUN: not grep incq %t
|
|
|
|
; RUN: not grep decq %t
|
|
|
|
; RUN: not grep negq %t
|
|
|
|
; RUN: not grep addq %t
|
|
|
|
; RUN: not grep subq %t
|
2012-07-03 03:09:46 +08:00
|
|
|
; RUN: not grep "movl %" %t
|
Implement support for using modeling implicit-zero-extension on x86-64
with SUBREG_TO_REG, teach SimpleRegisterCoalescing to coalesce
SUBREG_TO_REG instructions (which are similar to INSERT_SUBREG
instructions), and teach the DAGCombiner to take advantage of this on
targets which support it. This eliminates many redundant
zero-extension operations on x86-64.
This adds a new TargetLowering hook, isZExtFree. It's similar to
isTruncateFree, except it only applies to actual definitions, and not
no-op truncates which may not zero the high bits.
Also, this adds a new optimization to SimplifyDemandedBits: transform
operations like x+y into (zext (add (trunc x), (trunc y))) on targets
where all the casts are no-ops. In contexts where the high part of the
add is explicitly masked off, this allows the mask operation to be
eliminated. Fix the DAGCombiner to avoid undoing these transformations
to eliminate casts on targets where the casts are no-ops.
Also, this adds a new two-address lowering heuristic. Since
two-address lowering runs before coalescing, it helps to be able to
look through copies when deciding whether commuting and/or
three-address conversion are profitable.
Also, fix a bug in LiveInterval::MergeInClobberRanges. It didn't handle
the case that a clobber range extended both before and beyond an
existing live range. In that case, multiple live ranges need to be
added. This was exposed by the new subreg coalescing code.
Remove 2008-05-06-SpillerBug.ll. It was bugpoint-reduced, and the
spiller behavior it was looking for no longer occurrs with the new
instruction selection.
llvm-svn: 68576
2009-04-08 08:15:30 +08:00
|
|
|
|
|
|
|
; Utilize implicit zero-extension on x86-64 to eliminate explicit
|
|
|
|
; zero-extensions. Shrink 64-bit adds to 32-bit when the high
|
|
|
|
; 32-bits will be zeroed.
|
|
|
|
|
|
|
|
define void @bar(i64 %x, i64 %y, i64* %z) nounwind readnone {
|
|
|
|
entry:
|
|
|
|
%t0 = add i64 %x, %y
|
|
|
|
%t1 = and i64 %t0, 4294967295
|
|
|
|
store i64 %t1, i64* %z
|
|
|
|
ret void
|
|
|
|
}
|
|
|
|
define void @easy(i32 %x, i32 %y, i64* %z) nounwind readnone {
|
|
|
|
entry:
|
|
|
|
%t0 = add i32 %x, %y
|
|
|
|
%tn = zext i32 %t0 to i64
|
|
|
|
%t1 = and i64 %tn, 4294967295
|
|
|
|
store i64 %t1, i64* %z
|
|
|
|
ret void
|
|
|
|
}
|
|
|
|
define void @cola(i64 *%x, i64 %y, i64* %z, i64 %u) nounwind readnone {
|
|
|
|
entry:
|
2015-02-28 05:17:42 +08:00
|
|
|
%p = load i64, i64* %x
|
Implement support for using modeling implicit-zero-extension on x86-64
with SUBREG_TO_REG, teach SimpleRegisterCoalescing to coalesce
SUBREG_TO_REG instructions (which are similar to INSERT_SUBREG
instructions), and teach the DAGCombiner to take advantage of this on
targets which support it. This eliminates many redundant
zero-extension operations on x86-64.
This adds a new TargetLowering hook, isZExtFree. It's similar to
isTruncateFree, except it only applies to actual definitions, and not
no-op truncates which may not zero the high bits.
Also, this adds a new optimization to SimplifyDemandedBits: transform
operations like x+y into (zext (add (trunc x), (trunc y))) on targets
where all the casts are no-ops. In contexts where the high part of the
add is explicitly masked off, this allows the mask operation to be
eliminated. Fix the DAGCombiner to avoid undoing these transformations
to eliminate casts on targets where the casts are no-ops.
Also, this adds a new two-address lowering heuristic. Since
two-address lowering runs before coalescing, it helps to be able to
look through copies when deciding whether commuting and/or
three-address conversion are profitable.
Also, fix a bug in LiveInterval::MergeInClobberRanges. It didn't handle
the case that a clobber range extended both before and beyond an
existing live range. In that case, multiple live ranges need to be
added. This was exposed by the new subreg coalescing code.
Remove 2008-05-06-SpillerBug.ll. It was bugpoint-reduced, and the
spiller behavior it was looking for no longer occurrs with the new
instruction selection.
llvm-svn: 68576
2009-04-08 08:15:30 +08:00
|
|
|
%t0 = add i64 %p, %y
|
|
|
|
%t1 = and i64 %t0, 4294967295
|
|
|
|
%t2 = xor i64 %t1, %u
|
|
|
|
store i64 %t2, i64* %z
|
|
|
|
ret void
|
|
|
|
}
|
|
|
|
define void @yaks(i64 *%x, i64 %y, i64* %z, i64 %u) nounwind readnone {
|
|
|
|
entry:
|
2015-02-28 05:17:42 +08:00
|
|
|
%p = load i64, i64* %x
|
Implement support for using modeling implicit-zero-extension on x86-64
with SUBREG_TO_REG, teach SimpleRegisterCoalescing to coalesce
SUBREG_TO_REG instructions (which are similar to INSERT_SUBREG
instructions), and teach the DAGCombiner to take advantage of this on
targets which support it. This eliminates many redundant
zero-extension operations on x86-64.
This adds a new TargetLowering hook, isZExtFree. It's similar to
isTruncateFree, except it only applies to actual definitions, and not
no-op truncates which may not zero the high bits.
Also, this adds a new optimization to SimplifyDemandedBits: transform
operations like x+y into (zext (add (trunc x), (trunc y))) on targets
where all the casts are no-ops. In contexts where the high part of the
add is explicitly masked off, this allows the mask operation to be
eliminated. Fix the DAGCombiner to avoid undoing these transformations
to eliminate casts on targets where the casts are no-ops.
Also, this adds a new two-address lowering heuristic. Since
two-address lowering runs before coalescing, it helps to be able to
look through copies when deciding whether commuting and/or
three-address conversion are profitable.
Also, fix a bug in LiveInterval::MergeInClobberRanges. It didn't handle
the case that a clobber range extended both before and beyond an
existing live range. In that case, multiple live ranges need to be
added. This was exposed by the new subreg coalescing code.
Remove 2008-05-06-SpillerBug.ll. It was bugpoint-reduced, and the
spiller behavior it was looking for no longer occurrs with the new
instruction selection.
llvm-svn: 68576
2009-04-08 08:15:30 +08:00
|
|
|
%t0 = add i64 %p, %y
|
|
|
|
%t1 = xor i64 %t0, %u
|
|
|
|
%t2 = and i64 %t1, 4294967295
|
|
|
|
store i64 %t2, i64* %z
|
|
|
|
ret void
|
|
|
|
}
|
|
|
|
define void @foo(i64 *%x, i64 *%y, i64* %z) nounwind readnone {
|
|
|
|
entry:
|
2015-02-28 05:17:42 +08:00
|
|
|
%a = load i64, i64* %x
|
|
|
|
%b = load i64, i64* %y
|
Implement support for using modeling implicit-zero-extension on x86-64
with SUBREG_TO_REG, teach SimpleRegisterCoalescing to coalesce
SUBREG_TO_REG instructions (which are similar to INSERT_SUBREG
instructions), and teach the DAGCombiner to take advantage of this on
targets which support it. This eliminates many redundant
zero-extension operations on x86-64.
This adds a new TargetLowering hook, isZExtFree. It's similar to
isTruncateFree, except it only applies to actual definitions, and not
no-op truncates which may not zero the high bits.
Also, this adds a new optimization to SimplifyDemandedBits: transform
operations like x+y into (zext (add (trunc x), (trunc y))) on targets
where all the casts are no-ops. In contexts where the high part of the
add is explicitly masked off, this allows the mask operation to be
eliminated. Fix the DAGCombiner to avoid undoing these transformations
to eliminate casts on targets where the casts are no-ops.
Also, this adds a new two-address lowering heuristic. Since
two-address lowering runs before coalescing, it helps to be able to
look through copies when deciding whether commuting and/or
three-address conversion are profitable.
Also, fix a bug in LiveInterval::MergeInClobberRanges. It didn't handle
the case that a clobber range extended both before and beyond an
existing live range. In that case, multiple live ranges need to be
added. This was exposed by the new subreg coalescing code.
Remove 2008-05-06-SpillerBug.ll. It was bugpoint-reduced, and the
spiller behavior it was looking for no longer occurrs with the new
instruction selection.
llvm-svn: 68576
2009-04-08 08:15:30 +08:00
|
|
|
%t0 = add i64 %a, %b
|
|
|
|
%t1 = and i64 %t0, 4294967295
|
|
|
|
store i64 %t1, i64* %z
|
|
|
|
ret void
|
|
|
|
}
|
|
|
|
define void @avo(i64 %x, i64* %z, i64 %u) nounwind readnone {
|
|
|
|
entry:
|
|
|
|
%t0 = add i64 %x, 734847
|
|
|
|
%t1 = and i64 %t0, 4294967295
|
|
|
|
%t2 = xor i64 %t1, %u
|
|
|
|
store i64 %t2, i64* %z
|
|
|
|
ret void
|
|
|
|
}
|
|
|
|
define void @phe(i64 %x, i64* %z, i64 %u) nounwind readnone {
|
|
|
|
entry:
|
|
|
|
%t0 = add i64 %x, 734847
|
|
|
|
%t1 = xor i64 %t0, %u
|
|
|
|
%t2 = and i64 %t1, 4294967295
|
|
|
|
store i64 %t2, i64* %z
|
|
|
|
ret void
|
|
|
|
}
|
|
|
|
define void @oze(i64 %y, i64* %z) nounwind readnone {
|
|
|
|
entry:
|
|
|
|
%t0 = add i64 %y, 1
|
|
|
|
%t1 = and i64 %t0, 4294967295
|
|
|
|
store i64 %t1, i64* %z
|
|
|
|
ret void
|
|
|
|
}
|
|
|
|
|
|
|
|
define void @sbar(i64 %x, i64 %y, i64* %z) nounwind readnone {
|
|
|
|
entry:
|
|
|
|
%t0 = sub i64 %x, %y
|
|
|
|
%t1 = and i64 %t0, 4294967295
|
|
|
|
store i64 %t1, i64* %z
|
|
|
|
ret void
|
|
|
|
}
|
|
|
|
define void @seasy(i32 %x, i32 %y, i64* %z) nounwind readnone {
|
|
|
|
entry:
|
|
|
|
%t0 = sub i32 %x, %y
|
|
|
|
%tn = zext i32 %t0 to i64
|
|
|
|
%t1 = and i64 %tn, 4294967295
|
|
|
|
store i64 %t1, i64* %z
|
|
|
|
ret void
|
|
|
|
}
|
|
|
|
define void @scola(i64 *%x, i64 %y, i64* %z, i64 %u) nounwind readnone {
|
|
|
|
entry:
|
2015-02-28 05:17:42 +08:00
|
|
|
%p = load i64, i64* %x
|
Implement support for using modeling implicit-zero-extension on x86-64
with SUBREG_TO_REG, teach SimpleRegisterCoalescing to coalesce
SUBREG_TO_REG instructions (which are similar to INSERT_SUBREG
instructions), and teach the DAGCombiner to take advantage of this on
targets which support it. This eliminates many redundant
zero-extension operations on x86-64.
This adds a new TargetLowering hook, isZExtFree. It's similar to
isTruncateFree, except it only applies to actual definitions, and not
no-op truncates which may not zero the high bits.
Also, this adds a new optimization to SimplifyDemandedBits: transform
operations like x+y into (zext (add (trunc x), (trunc y))) on targets
where all the casts are no-ops. In contexts where the high part of the
add is explicitly masked off, this allows the mask operation to be
eliminated. Fix the DAGCombiner to avoid undoing these transformations
to eliminate casts on targets where the casts are no-ops.
Also, this adds a new two-address lowering heuristic. Since
two-address lowering runs before coalescing, it helps to be able to
look through copies when deciding whether commuting and/or
three-address conversion are profitable.
Also, fix a bug in LiveInterval::MergeInClobberRanges. It didn't handle
the case that a clobber range extended both before and beyond an
existing live range. In that case, multiple live ranges need to be
added. This was exposed by the new subreg coalescing code.
Remove 2008-05-06-SpillerBug.ll. It was bugpoint-reduced, and the
spiller behavior it was looking for no longer occurrs with the new
instruction selection.
llvm-svn: 68576
2009-04-08 08:15:30 +08:00
|
|
|
%t0 = sub i64 %p, %y
|
|
|
|
%t1 = and i64 %t0, 4294967295
|
|
|
|
%t2 = xor i64 %t1, %u
|
|
|
|
store i64 %t2, i64* %z
|
|
|
|
ret void
|
|
|
|
}
|
|
|
|
define void @syaks(i64 *%x, i64 %y, i64* %z, i64 %u) nounwind readnone {
|
|
|
|
entry:
|
2015-02-28 05:17:42 +08:00
|
|
|
%p = load i64, i64* %x
|
Implement support for using modeling implicit-zero-extension on x86-64
with SUBREG_TO_REG, teach SimpleRegisterCoalescing to coalesce
SUBREG_TO_REG instructions (which are similar to INSERT_SUBREG
instructions), and teach the DAGCombiner to take advantage of this on
targets which support it. This eliminates many redundant
zero-extension operations on x86-64.
This adds a new TargetLowering hook, isZExtFree. It's similar to
isTruncateFree, except it only applies to actual definitions, and not
no-op truncates which may not zero the high bits.
Also, this adds a new optimization to SimplifyDemandedBits: transform
operations like x+y into (zext (add (trunc x), (trunc y))) on targets
where all the casts are no-ops. In contexts where the high part of the
add is explicitly masked off, this allows the mask operation to be
eliminated. Fix the DAGCombiner to avoid undoing these transformations
to eliminate casts on targets where the casts are no-ops.
Also, this adds a new two-address lowering heuristic. Since
two-address lowering runs before coalescing, it helps to be able to
look through copies when deciding whether commuting and/or
three-address conversion are profitable.
Also, fix a bug in LiveInterval::MergeInClobberRanges. It didn't handle
the case that a clobber range extended both before and beyond an
existing live range. In that case, multiple live ranges need to be
added. This was exposed by the new subreg coalescing code.
Remove 2008-05-06-SpillerBug.ll. It was bugpoint-reduced, and the
spiller behavior it was looking for no longer occurrs with the new
instruction selection.
llvm-svn: 68576
2009-04-08 08:15:30 +08:00
|
|
|
%t0 = sub i64 %p, %y
|
|
|
|
%t1 = xor i64 %t0, %u
|
|
|
|
%t2 = and i64 %t1, 4294967295
|
|
|
|
store i64 %t2, i64* %z
|
|
|
|
ret void
|
|
|
|
}
|
|
|
|
define void @sfoo(i64 *%x, i64 *%y, i64* %z) nounwind readnone {
|
|
|
|
entry:
|
2015-02-28 05:17:42 +08:00
|
|
|
%a = load i64, i64* %x
|
|
|
|
%b = load i64, i64* %y
|
Implement support for using modeling implicit-zero-extension on x86-64
with SUBREG_TO_REG, teach SimpleRegisterCoalescing to coalesce
SUBREG_TO_REG instructions (which are similar to INSERT_SUBREG
instructions), and teach the DAGCombiner to take advantage of this on
targets which support it. This eliminates many redundant
zero-extension operations on x86-64.
This adds a new TargetLowering hook, isZExtFree. It's similar to
isTruncateFree, except it only applies to actual definitions, and not
no-op truncates which may not zero the high bits.
Also, this adds a new optimization to SimplifyDemandedBits: transform
operations like x+y into (zext (add (trunc x), (trunc y))) on targets
where all the casts are no-ops. In contexts where the high part of the
add is explicitly masked off, this allows the mask operation to be
eliminated. Fix the DAGCombiner to avoid undoing these transformations
to eliminate casts on targets where the casts are no-ops.
Also, this adds a new two-address lowering heuristic. Since
two-address lowering runs before coalescing, it helps to be able to
look through copies when deciding whether commuting and/or
three-address conversion are profitable.
Also, fix a bug in LiveInterval::MergeInClobberRanges. It didn't handle
the case that a clobber range extended both before and beyond an
existing live range. In that case, multiple live ranges need to be
added. This was exposed by the new subreg coalescing code.
Remove 2008-05-06-SpillerBug.ll. It was bugpoint-reduced, and the
spiller behavior it was looking for no longer occurrs with the new
instruction selection.
llvm-svn: 68576
2009-04-08 08:15:30 +08:00
|
|
|
%t0 = sub i64 %a, %b
|
|
|
|
%t1 = and i64 %t0, 4294967295
|
|
|
|
store i64 %t1, i64* %z
|
|
|
|
ret void
|
|
|
|
}
|
|
|
|
define void @swya(i64 %y, i64* %z) nounwind readnone {
|
|
|
|
entry:
|
|
|
|
%t0 = sub i64 0, %y
|
|
|
|
%t1 = and i64 %t0, 4294967295
|
|
|
|
store i64 %t1, i64* %z
|
|
|
|
ret void
|
|
|
|
}
|
|
|
|
define void @soze(i64 %y, i64* %z) nounwind readnone {
|
|
|
|
entry:
|
|
|
|
%t0 = sub i64 %y, 1
|
|
|
|
%t1 = and i64 %t0, 4294967295
|
|
|
|
store i64 %t1, i64* %z
|
|
|
|
ret void
|
|
|
|
}
|