2017-08-24 15:38:36 +08:00
|
|
|
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
|
2017-08-02 08:28:10 +08:00
|
|
|
; RUN: llc -mattr=+cmov,cx16 -mtriple=i386-pc-linux -verify-machineinstrs < %s | FileCheck %s -check-prefix=LINUX
|
|
|
|
; RUN: llc -mattr=cx16 -mtriple=i386-macosx -relocation-model=pic -verify-machineinstrs < %s | FileCheck %s -check-prefix=PIC
|
2012-10-10 07:48:33 +08:00
|
|
|
|
2012-09-26 02:08:13 +08:00
|
|
|
@sc64 = external global i64
|
|
|
|
|
2017-08-24 15:38:36 +08:00
|
|
|
define i64 @atomic_max_i64() nounwind {
|
|
|
|
; LINUX-LABEL: atomic_max_i64:
|
2017-12-05 01:18:51 +08:00
|
|
|
; LINUX: # %bb.0: # %entry
|
2017-08-24 15:38:36 +08:00
|
|
|
; LINUX-NEXT: pushl %ebx
|
|
|
|
; LINUX-NEXT: pushl %esi
|
|
|
|
; LINUX-NEXT: movl sc64+4, %edx
|
|
|
|
; LINUX-NEXT: movl sc64, %eax
|
|
|
|
; LINUX-NEXT: movl $4, %esi
|
|
|
|
; LINUX-NEXT: .p2align 4, 0x90
|
|
|
|
; LINUX-NEXT: .LBB0_1: # %atomicrmw.start
|
|
|
|
; LINUX-NEXT: # =>This Inner Loop Header: Depth=1
|
|
|
|
; LINUX-NEXT: cmpl %eax, %esi
|
[x86] Fix an amazing goof in the handling of sub, or, and xor lowering.
The comment for this code indicated that it should work similar to our
handling of add lowering above: if we see uses of an instruction other
than flag usage and store usage, it tries to avoid the specialized
X86ISD::* nodes that are designed for flag+op modeling and emits an
explicit test.
Problem is, only the add case actually did this. In all the other cases,
the logic was incomplete and inverted. Any time the value was used by
a store, we bailed on the specialized X86ISD node. All of this appears
to have been historical where we had different logic here. =/
Turns out, we have quite a few patterns designed around these nodes. We
should actually form them. I fixed the code to match what we do for add,
and it has quite a positive effect just within some of our test cases.
The only thing close to a regression I see is using:
notl %r
testl %r, %r
instead of:
xorl -1, %r
But we can add a pattern or something to fold that back out. The
improvements seem more than worth this.
I've also worked with Craig to update the comments to no longer be
actively contradicted by the code. =[ Some of this still remains
a mystery to both Craig and myself, but this seems like a large step in
the direction of consistency and slightly more accurate comments.
Many thanks to Craig for help figuring out this nasty stuff.
Differential Revision: https://reviews.llvm.org/D37096
llvm-svn: 311737
2017-08-25 08:34:07 +08:00
|
|
|
; LINUX-NEXT: movl $0, %ecx
|
2017-08-24 15:38:36 +08:00
|
|
|
; LINUX-NEXT: sbbl %edx, %ecx
|
[x86] Fix an amazing goof in the handling of sub, or, and xor lowering.
The comment for this code indicated that it should work similar to our
handling of add lowering above: if we see uses of an instruction other
than flag usage and store usage, it tries to avoid the specialized
X86ISD::* nodes that are designed for flag+op modeling and emits an
explicit test.
Problem is, only the add case actually did this. In all the other cases,
the logic was incomplete and inverted. Any time the value was used by
a store, we bailed on the specialized X86ISD node. All of this appears
to have been historical where we had different logic here. =/
Turns out, we have quite a few patterns designed around these nodes. We
should actually form them. I fixed the code to match what we do for add,
and it has quite a positive effect just within some of our test cases.
The only thing close to a regression I see is using:
notl %r
testl %r, %r
instead of:
xorl -1, %r
But we can add a pattern or something to fold that back out. The
improvements seem more than worth this.
I've also worked with Craig to update the comments to no longer be
actively contradicted by the code. =[ Some of this still remains
a mystery to both Craig and myself, but this seems like a large step in
the direction of consistency and slightly more accurate comments.
Many thanks to Craig for help figuring out this nasty stuff.
Differential Revision: https://reviews.llvm.org/D37096
llvm-svn: 311737
2017-08-25 08:34:07 +08:00
|
|
|
; LINUX-NEXT: movl $0, %ecx
|
|
|
|
; LINUX-NEXT: cmovll %edx, %ecx
|
2017-08-24 15:38:36 +08:00
|
|
|
; LINUX-NEXT: movl $5, %ebx
|
[x86] Fix an amazing goof in the handling of sub, or, and xor lowering.
The comment for this code indicated that it should work similar to our
handling of add lowering above: if we see uses of an instruction other
than flag usage and store usage, it tries to avoid the specialized
X86ISD::* nodes that are designed for flag+op modeling and emits an
explicit test.
Problem is, only the add case actually did this. In all the other cases,
the logic was incomplete and inverted. Any time the value was used by
a store, we bailed on the specialized X86ISD node. All of this appears
to have been historical where we had different logic here. =/
Turns out, we have quite a few patterns designed around these nodes. We
should actually form them. I fixed the code to match what we do for add,
and it has quite a positive effect just within some of our test cases.
The only thing close to a regression I see is using:
notl %r
testl %r, %r
instead of:
xorl -1, %r
But we can add a pattern or something to fold that back out. The
improvements seem more than worth this.
I've also worked with Craig to update the comments to no longer be
actively contradicted by the code. =[ Some of this still remains
a mystery to both Craig and myself, but this seems like a large step in
the direction of consistency and slightly more accurate comments.
Many thanks to Craig for help figuring out this nasty stuff.
Differential Revision: https://reviews.llvm.org/D37096
llvm-svn: 311737
2017-08-25 08:34:07 +08:00
|
|
|
; LINUX-NEXT: cmovll %eax, %ebx
|
2017-08-24 15:38:36 +08:00
|
|
|
; LINUX-NEXT: lock cmpxchg8b sc64
|
|
|
|
; LINUX-NEXT: jne .LBB0_1
|
2017-12-05 01:18:51 +08:00
|
|
|
; LINUX-NEXT: # %bb.2: # %atomicrmw.end
|
2017-08-24 15:38:36 +08:00
|
|
|
; LINUX-NEXT: popl %esi
|
|
|
|
; LINUX-NEXT: popl %ebx
|
|
|
|
; LINUX-NEXT: retl
|
|
|
|
;
|
|
|
|
; PIC-LABEL: atomic_max_i64:
|
2017-12-05 01:18:51 +08:00
|
|
|
; PIC: ## %bb.0: ## %entry
|
2017-08-24 15:38:36 +08:00
|
|
|
; PIC-NEXT: pushl %ebx
|
|
|
|
; PIC-NEXT: pushl %edi
|
|
|
|
; PIC-NEXT: pushl %esi
|
|
|
|
; PIC-NEXT: calll L0$pb
|
|
|
|
; PIC-NEXT: L0$pb:
|
|
|
|
; PIC-NEXT: popl %eax
|
|
|
|
; PIC-NEXT: movl L_sc64$non_lazy_ptr-L0$pb(%eax), %esi
|
|
|
|
; PIC-NEXT: movl (%esi), %eax
|
|
|
|
; PIC-NEXT: movl 4(%esi), %edx
|
|
|
|
; PIC-NEXT: movl $4, %edi
|
|
|
|
; PIC-NEXT: .p2align 4, 0x90
|
|
|
|
; PIC-NEXT: LBB0_1: ## %atomicrmw.start
|
|
|
|
; PIC-NEXT: ## =>This Inner Loop Header: Depth=1
|
|
|
|
; PIC-NEXT: cmpl %eax, %edi
|
[x86] Fix an amazing goof in the handling of sub, or, and xor lowering.
The comment for this code indicated that it should work similar to our
handling of add lowering above: if we see uses of an instruction other
than flag usage and store usage, it tries to avoid the specialized
X86ISD::* nodes that are designed for flag+op modeling and emits an
explicit test.
Problem is, only the add case actually did this. In all the other cases,
the logic was incomplete and inverted. Any time the value was used by
a store, we bailed on the specialized X86ISD node. All of this appears
to have been historical where we had different logic here. =/
Turns out, we have quite a few patterns designed around these nodes. We
should actually form them. I fixed the code to match what we do for add,
and it has quite a positive effect just within some of our test cases.
The only thing close to a regression I see is using:
notl %r
testl %r, %r
instead of:
xorl -1, %r
But we can add a pattern or something to fold that back out. The
improvements seem more than worth this.
I've also worked with Craig to update the comments to no longer be
actively contradicted by the code. =[ Some of this still remains
a mystery to both Craig and myself, but this seems like a large step in
the direction of consistency and slightly more accurate comments.
Many thanks to Craig for help figuring out this nasty stuff.
Differential Revision: https://reviews.llvm.org/D37096
llvm-svn: 311737
2017-08-25 08:34:07 +08:00
|
|
|
; PIC-NEXT: movl $0, %ecx
|
2017-08-24 15:38:36 +08:00
|
|
|
; PIC-NEXT: sbbl %edx, %ecx
|
[x86] Fix an amazing goof in the handling of sub, or, and xor lowering.
The comment for this code indicated that it should work similar to our
handling of add lowering above: if we see uses of an instruction other
than flag usage and store usage, it tries to avoid the specialized
X86ISD::* nodes that are designed for flag+op modeling and emits an
explicit test.
Problem is, only the add case actually did this. In all the other cases,
the logic was incomplete and inverted. Any time the value was used by
a store, we bailed on the specialized X86ISD node. All of this appears
to have been historical where we had different logic here. =/
Turns out, we have quite a few patterns designed around these nodes. We
should actually form them. I fixed the code to match what we do for add,
and it has quite a positive effect just within some of our test cases.
The only thing close to a regression I see is using:
notl %r
testl %r, %r
instead of:
xorl -1, %r
But we can add a pattern or something to fold that back out. The
improvements seem more than worth this.
I've also worked with Craig to update the comments to no longer be
actively contradicted by the code. =[ Some of this still remains
a mystery to both Craig and myself, but this seems like a large step in
the direction of consistency and slightly more accurate comments.
Many thanks to Craig for help figuring out this nasty stuff.
Differential Revision: https://reviews.llvm.org/D37096
llvm-svn: 311737
2017-08-25 08:34:07 +08:00
|
|
|
; PIC-NEXT: movl $0, %ecx
|
|
|
|
; PIC-NEXT: cmovll %edx, %ecx
|
2017-08-24 15:38:36 +08:00
|
|
|
; PIC-NEXT: movl $5, %ebx
|
[x86] Fix an amazing goof in the handling of sub, or, and xor lowering.
The comment for this code indicated that it should work similar to our
handling of add lowering above: if we see uses of an instruction other
than flag usage and store usage, it tries to avoid the specialized
X86ISD::* nodes that are designed for flag+op modeling and emits an
explicit test.
Problem is, only the add case actually did this. In all the other cases,
the logic was incomplete and inverted. Any time the value was used by
a store, we bailed on the specialized X86ISD node. All of this appears
to have been historical where we had different logic here. =/
Turns out, we have quite a few patterns designed around these nodes. We
should actually form them. I fixed the code to match what we do for add,
and it has quite a positive effect just within some of our test cases.
The only thing close to a regression I see is using:
notl %r
testl %r, %r
instead of:
xorl -1, %r
But we can add a pattern or something to fold that back out. The
improvements seem more than worth this.
I've also worked with Craig to update the comments to no longer be
actively contradicted by the code. =[ Some of this still remains
a mystery to both Craig and myself, but this seems like a large step in
the direction of consistency and slightly more accurate comments.
Many thanks to Craig for help figuring out this nasty stuff.
Differential Revision: https://reviews.llvm.org/D37096
llvm-svn: 311737
2017-08-25 08:34:07 +08:00
|
|
|
; PIC-NEXT: cmovll %eax, %ebx
|
2017-08-24 15:38:36 +08:00
|
|
|
; PIC-NEXT: lock cmpxchg8b (%esi)
|
|
|
|
; PIC-NEXT: jne LBB0_1
|
2017-12-05 01:18:51 +08:00
|
|
|
; PIC-NEXT: ## %bb.2: ## %atomicrmw.end
|
2017-08-24 15:38:36 +08:00
|
|
|
; PIC-NEXT: popl %esi
|
|
|
|
; PIC-NEXT: popl %edi
|
|
|
|
; PIC-NEXT: popl %ebx
|
|
|
|
; PIC-NEXT: retl
|
|
|
|
; PIC-NEXT: ## -- End function
|
|
|
|
entry:
|
|
|
|
%max = atomicrmw max i64* @sc64, i64 5 acquire
|
|
|
|
ret i64 %max
|
|
|
|
}
|
|
|
|
|
|
|
|
define i64 @atomic_min_i64() nounwind {
|
|
|
|
; LINUX-LABEL: atomic_min_i64:
|
2017-12-05 01:18:51 +08:00
|
|
|
; LINUX: # %bb.0: # %entry
|
2017-08-24 15:38:36 +08:00
|
|
|
; LINUX-NEXT: pushl %ebx
|
|
|
|
; LINUX-NEXT: movl sc64+4, %edx
|
|
|
|
; LINUX-NEXT: movl sc64, %eax
|
|
|
|
; LINUX-NEXT: .p2align 4, 0x90
|
|
|
|
; LINUX-NEXT: .LBB1_1: # %atomicrmw.start
|
|
|
|
; LINUX-NEXT: # =>This Inner Loop Header: Depth=1
|
|
|
|
; LINUX-NEXT: cmpl $7, %eax
|
|
|
|
; LINUX-NEXT: movl %edx, %ecx
|
|
|
|
; LINUX-NEXT: sbbl $0, %ecx
|
[x86] Fix an amazing goof in the handling of sub, or, and xor lowering.
The comment for this code indicated that it should work similar to our
handling of add lowering above: if we see uses of an instruction other
than flag usage and store usage, it tries to avoid the specialized
X86ISD::* nodes that are designed for flag+op modeling and emits an
explicit test.
Problem is, only the add case actually did this. In all the other cases,
the logic was incomplete and inverted. Any time the value was used by
a store, we bailed on the specialized X86ISD node. All of this appears
to have been historical where we had different logic here. =/
Turns out, we have quite a few patterns designed around these nodes. We
should actually form them. I fixed the code to match what we do for add,
and it has quite a positive effect just within some of our test cases.
The only thing close to a regression I see is using:
notl %r
testl %r, %r
instead of:
xorl -1, %r
But we can add a pattern or something to fold that back out. The
improvements seem more than worth this.
I've also worked with Craig to update the comments to no longer be
actively contradicted by the code. =[ Some of this still remains
a mystery to both Craig and myself, but this seems like a large step in
the direction of consistency and slightly more accurate comments.
Many thanks to Craig for help figuring out this nasty stuff.
Differential Revision: https://reviews.llvm.org/D37096
llvm-svn: 311737
2017-08-25 08:34:07 +08:00
|
|
|
; LINUX-NEXT: movl $0, %ecx
|
|
|
|
; LINUX-NEXT: cmovll %edx, %ecx
|
2017-08-24 15:38:36 +08:00
|
|
|
; LINUX-NEXT: movl $6, %ebx
|
[x86] Fix an amazing goof in the handling of sub, or, and xor lowering.
The comment for this code indicated that it should work similar to our
handling of add lowering above: if we see uses of an instruction other
than flag usage and store usage, it tries to avoid the specialized
X86ISD::* nodes that are designed for flag+op modeling and emits an
explicit test.
Problem is, only the add case actually did this. In all the other cases,
the logic was incomplete and inverted. Any time the value was used by
a store, we bailed on the specialized X86ISD node. All of this appears
to have been historical where we had different logic here. =/
Turns out, we have quite a few patterns designed around these nodes. We
should actually form them. I fixed the code to match what we do for add,
and it has quite a positive effect just within some of our test cases.
The only thing close to a regression I see is using:
notl %r
testl %r, %r
instead of:
xorl -1, %r
But we can add a pattern or something to fold that back out. The
improvements seem more than worth this.
I've also worked with Craig to update the comments to no longer be
actively contradicted by the code. =[ Some of this still remains
a mystery to both Craig and myself, but this seems like a large step in
the direction of consistency and slightly more accurate comments.
Many thanks to Craig for help figuring out this nasty stuff.
Differential Revision: https://reviews.llvm.org/D37096
llvm-svn: 311737
2017-08-25 08:34:07 +08:00
|
|
|
; LINUX-NEXT: cmovll %eax, %ebx
|
2017-08-24 15:38:36 +08:00
|
|
|
; LINUX-NEXT: lock cmpxchg8b sc64
|
|
|
|
; LINUX-NEXT: jne .LBB1_1
|
2017-12-05 01:18:51 +08:00
|
|
|
; LINUX-NEXT: # %bb.2: # %atomicrmw.end
|
2017-08-24 15:38:36 +08:00
|
|
|
; LINUX-NEXT: popl %ebx
|
|
|
|
; LINUX-NEXT: retl
|
|
|
|
;
|
|
|
|
; PIC-LABEL: atomic_min_i64:
|
2017-12-05 01:18:51 +08:00
|
|
|
; PIC: ## %bb.0: ## %entry
|
2017-08-24 15:38:36 +08:00
|
|
|
; PIC-NEXT: pushl %ebx
|
|
|
|
; PIC-NEXT: pushl %esi
|
|
|
|
; PIC-NEXT: calll L1$pb
|
|
|
|
; PIC-NEXT: L1$pb:
|
|
|
|
; PIC-NEXT: popl %eax
|
|
|
|
; PIC-NEXT: movl L_sc64$non_lazy_ptr-L1$pb(%eax), %esi
|
|
|
|
; PIC-NEXT: movl (%esi), %eax
|
|
|
|
; PIC-NEXT: movl 4(%esi), %edx
|
|
|
|
; PIC-NEXT: .p2align 4, 0x90
|
|
|
|
; PIC-NEXT: LBB1_1: ## %atomicrmw.start
|
|
|
|
; PIC-NEXT: ## =>This Inner Loop Header: Depth=1
|
|
|
|
; PIC-NEXT: cmpl $7, %eax
|
|
|
|
; PIC-NEXT: movl %edx, %ecx
|
|
|
|
; PIC-NEXT: sbbl $0, %ecx
|
[x86] Fix an amazing goof in the handling of sub, or, and xor lowering.
The comment for this code indicated that it should work similar to our
handling of add lowering above: if we see uses of an instruction other
than flag usage and store usage, it tries to avoid the specialized
X86ISD::* nodes that are designed for flag+op modeling and emits an
explicit test.
Problem is, only the add case actually did this. In all the other cases,
the logic was incomplete and inverted. Any time the value was used by
a store, we bailed on the specialized X86ISD node. All of this appears
to have been historical where we had different logic here. =/
Turns out, we have quite a few patterns designed around these nodes. We
should actually form them. I fixed the code to match what we do for add,
and it has quite a positive effect just within some of our test cases.
The only thing close to a regression I see is using:
notl %r
testl %r, %r
instead of:
xorl -1, %r
But we can add a pattern or something to fold that back out. The
improvements seem more than worth this.
I've also worked with Craig to update the comments to no longer be
actively contradicted by the code. =[ Some of this still remains
a mystery to both Craig and myself, but this seems like a large step in
the direction of consistency and slightly more accurate comments.
Many thanks to Craig for help figuring out this nasty stuff.
Differential Revision: https://reviews.llvm.org/D37096
llvm-svn: 311737
2017-08-25 08:34:07 +08:00
|
|
|
; PIC-NEXT: movl $0, %ecx
|
|
|
|
; PIC-NEXT: cmovll %edx, %ecx
|
2017-08-24 15:38:36 +08:00
|
|
|
; PIC-NEXT: movl $6, %ebx
|
[x86] Fix an amazing goof in the handling of sub, or, and xor lowering.
The comment for this code indicated that it should work similar to our
handling of add lowering above: if we see uses of an instruction other
than flag usage and store usage, it tries to avoid the specialized
X86ISD::* nodes that are designed for flag+op modeling and emits an
explicit test.
Problem is, only the add case actually did this. In all the other cases,
the logic was incomplete and inverted. Any time the value was used by
a store, we bailed on the specialized X86ISD node. All of this appears
to have been historical where we had different logic here. =/
Turns out, we have quite a few patterns designed around these nodes. We
should actually form them. I fixed the code to match what we do for add,
and it has quite a positive effect just within some of our test cases.
The only thing close to a regression I see is using:
notl %r
testl %r, %r
instead of:
xorl -1, %r
But we can add a pattern or something to fold that back out. The
improvements seem more than worth this.
I've also worked with Craig to update the comments to no longer be
actively contradicted by the code. =[ Some of this still remains
a mystery to both Craig and myself, but this seems like a large step in
the direction of consistency and slightly more accurate comments.
Many thanks to Craig for help figuring out this nasty stuff.
Differential Revision: https://reviews.llvm.org/D37096
llvm-svn: 311737
2017-08-25 08:34:07 +08:00
|
|
|
; PIC-NEXT: cmovll %eax, %ebx
|
2017-08-24 15:38:36 +08:00
|
|
|
; PIC-NEXT: lock cmpxchg8b (%esi)
|
|
|
|
; PIC-NEXT: jne LBB1_1
|
2017-12-05 01:18:51 +08:00
|
|
|
; PIC-NEXT: ## %bb.2: ## %atomicrmw.end
|
2017-08-24 15:38:36 +08:00
|
|
|
; PIC-NEXT: popl %esi
|
|
|
|
; PIC-NEXT: popl %ebx
|
|
|
|
; PIC-NEXT: retl
|
|
|
|
; PIC-NEXT: ## -- End function
|
|
|
|
entry:
|
|
|
|
%min = atomicrmw min i64* @sc64, i64 6 acquire
|
|
|
|
ret i64 %min
|
|
|
|
}
|
|
|
|
|
|
|
|
define i64 @atomic_umax_i64() nounwind {
|
|
|
|
; LINUX-LABEL: atomic_umax_i64:
|
2017-12-05 01:18:51 +08:00
|
|
|
; LINUX: # %bb.0: # %entry
|
2017-08-24 15:38:36 +08:00
|
|
|
; LINUX-NEXT: pushl %ebx
|
|
|
|
; LINUX-NEXT: pushl %esi
|
|
|
|
; LINUX-NEXT: movl sc64+4, %edx
|
|
|
|
; LINUX-NEXT: movl sc64, %eax
|
|
|
|
; LINUX-NEXT: movl $7, %esi
|
|
|
|
; LINUX-NEXT: .p2align 4, 0x90
|
|
|
|
; LINUX-NEXT: .LBB2_1: # %atomicrmw.start
|
|
|
|
; LINUX-NEXT: # =>This Inner Loop Header: Depth=1
|
|
|
|
; LINUX-NEXT: cmpl %eax, %esi
|
[x86] Fix an amazing goof in the handling of sub, or, and xor lowering.
The comment for this code indicated that it should work similar to our
handling of add lowering above: if we see uses of an instruction other
than flag usage and store usage, it tries to avoid the specialized
X86ISD::* nodes that are designed for flag+op modeling and emits an
explicit test.
Problem is, only the add case actually did this. In all the other cases,
the logic was incomplete and inverted. Any time the value was used by
a store, we bailed on the specialized X86ISD node. All of this appears
to have been historical where we had different logic here. =/
Turns out, we have quite a few patterns designed around these nodes. We
should actually form them. I fixed the code to match what we do for add,
and it has quite a positive effect just within some of our test cases.
The only thing close to a regression I see is using:
notl %r
testl %r, %r
instead of:
xorl -1, %r
But we can add a pattern or something to fold that back out. The
improvements seem more than worth this.
I've also worked with Craig to update the comments to no longer be
actively contradicted by the code. =[ Some of this still remains
a mystery to both Craig and myself, but this seems like a large step in
the direction of consistency and slightly more accurate comments.
Many thanks to Craig for help figuring out this nasty stuff.
Differential Revision: https://reviews.llvm.org/D37096
llvm-svn: 311737
2017-08-25 08:34:07 +08:00
|
|
|
; LINUX-NEXT: movl $0, %ecx
|
2017-08-24 15:38:36 +08:00
|
|
|
; LINUX-NEXT: sbbl %edx, %ecx
|
[x86] Fix an amazing goof in the handling of sub, or, and xor lowering.
The comment for this code indicated that it should work similar to our
handling of add lowering above: if we see uses of an instruction other
than flag usage and store usage, it tries to avoid the specialized
X86ISD::* nodes that are designed for flag+op modeling and emits an
explicit test.
Problem is, only the add case actually did this. In all the other cases,
the logic was incomplete and inverted. Any time the value was used by
a store, we bailed on the specialized X86ISD node. All of this appears
to have been historical where we had different logic here. =/
Turns out, we have quite a few patterns designed around these nodes. We
should actually form them. I fixed the code to match what we do for add,
and it has quite a positive effect just within some of our test cases.
The only thing close to a regression I see is using:
notl %r
testl %r, %r
instead of:
xorl -1, %r
But we can add a pattern or something to fold that back out. The
improvements seem more than worth this.
I've also worked with Craig to update the comments to no longer be
actively contradicted by the code. =[ Some of this still remains
a mystery to both Craig and myself, but this seems like a large step in
the direction of consistency and slightly more accurate comments.
Many thanks to Craig for help figuring out this nasty stuff.
Differential Revision: https://reviews.llvm.org/D37096
llvm-svn: 311737
2017-08-25 08:34:07 +08:00
|
|
|
; LINUX-NEXT: movl $0, %ecx
|
|
|
|
; LINUX-NEXT: cmovbl %edx, %ecx
|
2017-08-24 15:38:36 +08:00
|
|
|
; LINUX-NEXT: movl $7, %ebx
|
[x86] Fix an amazing goof in the handling of sub, or, and xor lowering.
The comment for this code indicated that it should work similar to our
handling of add lowering above: if we see uses of an instruction other
than flag usage and store usage, it tries to avoid the specialized
X86ISD::* nodes that are designed for flag+op modeling and emits an
explicit test.
Problem is, only the add case actually did this. In all the other cases,
the logic was incomplete and inverted. Any time the value was used by
a store, we bailed on the specialized X86ISD node. All of this appears
to have been historical where we had different logic here. =/
Turns out, we have quite a few patterns designed around these nodes. We
should actually form them. I fixed the code to match what we do for add,
and it has quite a positive effect just within some of our test cases.
The only thing close to a regression I see is using:
notl %r
testl %r, %r
instead of:
xorl -1, %r
But we can add a pattern or something to fold that back out. The
improvements seem more than worth this.
I've also worked with Craig to update the comments to no longer be
actively contradicted by the code. =[ Some of this still remains
a mystery to both Craig and myself, but this seems like a large step in
the direction of consistency and slightly more accurate comments.
Many thanks to Craig for help figuring out this nasty stuff.
Differential Revision: https://reviews.llvm.org/D37096
llvm-svn: 311737
2017-08-25 08:34:07 +08:00
|
|
|
; LINUX-NEXT: cmovbl %eax, %ebx
|
2017-08-24 15:38:36 +08:00
|
|
|
; LINUX-NEXT: lock cmpxchg8b sc64
|
|
|
|
; LINUX-NEXT: jne .LBB2_1
|
2017-12-05 01:18:51 +08:00
|
|
|
; LINUX-NEXT: # %bb.2: # %atomicrmw.end
|
2017-08-24 15:38:36 +08:00
|
|
|
; LINUX-NEXT: popl %esi
|
|
|
|
; LINUX-NEXT: popl %ebx
|
|
|
|
; LINUX-NEXT: retl
|
|
|
|
;
|
|
|
|
; PIC-LABEL: atomic_umax_i64:
|
2017-12-05 01:18:51 +08:00
|
|
|
; PIC: ## %bb.0: ## %entry
|
2017-08-24 15:38:36 +08:00
|
|
|
; PIC-NEXT: pushl %ebx
|
|
|
|
; PIC-NEXT: pushl %edi
|
|
|
|
; PIC-NEXT: pushl %esi
|
|
|
|
; PIC-NEXT: calll L2$pb
|
|
|
|
; PIC-NEXT: L2$pb:
|
|
|
|
; PIC-NEXT: popl %eax
|
|
|
|
; PIC-NEXT: movl L_sc64$non_lazy_ptr-L2$pb(%eax), %esi
|
|
|
|
; PIC-NEXT: movl (%esi), %eax
|
|
|
|
; PIC-NEXT: movl 4(%esi), %edx
|
|
|
|
; PIC-NEXT: movl $7, %edi
|
|
|
|
; PIC-NEXT: .p2align 4, 0x90
|
|
|
|
; PIC-NEXT: LBB2_1: ## %atomicrmw.start
|
|
|
|
; PIC-NEXT: ## =>This Inner Loop Header: Depth=1
|
|
|
|
; PIC-NEXT: cmpl %eax, %edi
|
[x86] Fix an amazing goof in the handling of sub, or, and xor lowering.
The comment for this code indicated that it should work similar to our
handling of add lowering above: if we see uses of an instruction other
than flag usage and store usage, it tries to avoid the specialized
X86ISD::* nodes that are designed for flag+op modeling and emits an
explicit test.
Problem is, only the add case actually did this. In all the other cases,
the logic was incomplete and inverted. Any time the value was used by
a store, we bailed on the specialized X86ISD node. All of this appears
to have been historical where we had different logic here. =/
Turns out, we have quite a few patterns designed around these nodes. We
should actually form them. I fixed the code to match what we do for add,
and it has quite a positive effect just within some of our test cases.
The only thing close to a regression I see is using:
notl %r
testl %r, %r
instead of:
xorl -1, %r
But we can add a pattern or something to fold that back out. The
improvements seem more than worth this.
I've also worked with Craig to update the comments to no longer be
actively contradicted by the code. =[ Some of this still remains
a mystery to both Craig and myself, but this seems like a large step in
the direction of consistency and slightly more accurate comments.
Many thanks to Craig for help figuring out this nasty stuff.
Differential Revision: https://reviews.llvm.org/D37096
llvm-svn: 311737
2017-08-25 08:34:07 +08:00
|
|
|
; PIC-NEXT: movl $0, %ecx
|
2017-08-24 15:38:36 +08:00
|
|
|
; PIC-NEXT: sbbl %edx, %ecx
|
[x86] Fix an amazing goof in the handling of sub, or, and xor lowering.
The comment for this code indicated that it should work similar to our
handling of add lowering above: if we see uses of an instruction other
than flag usage and store usage, it tries to avoid the specialized
X86ISD::* nodes that are designed for flag+op modeling and emits an
explicit test.
Problem is, only the add case actually did this. In all the other cases,
the logic was incomplete and inverted. Any time the value was used by
a store, we bailed on the specialized X86ISD node. All of this appears
to have been historical where we had different logic here. =/
Turns out, we have quite a few patterns designed around these nodes. We
should actually form them. I fixed the code to match what we do for add,
and it has quite a positive effect just within some of our test cases.
The only thing close to a regression I see is using:
notl %r
testl %r, %r
instead of:
xorl -1, %r
But we can add a pattern or something to fold that back out. The
improvements seem more than worth this.
I've also worked with Craig to update the comments to no longer be
actively contradicted by the code. =[ Some of this still remains
a mystery to both Craig and myself, but this seems like a large step in
the direction of consistency and slightly more accurate comments.
Many thanks to Craig for help figuring out this nasty stuff.
Differential Revision: https://reviews.llvm.org/D37096
llvm-svn: 311737
2017-08-25 08:34:07 +08:00
|
|
|
; PIC-NEXT: movl $0, %ecx
|
|
|
|
; PIC-NEXT: cmovbl %edx, %ecx
|
2017-08-24 15:38:36 +08:00
|
|
|
; PIC-NEXT: movl $7, %ebx
|
[x86] Fix an amazing goof in the handling of sub, or, and xor lowering.
The comment for this code indicated that it should work similar to our
handling of add lowering above: if we see uses of an instruction other
than flag usage and store usage, it tries to avoid the specialized
X86ISD::* nodes that are designed for flag+op modeling and emits an
explicit test.
Problem is, only the add case actually did this. In all the other cases,
the logic was incomplete and inverted. Any time the value was used by
a store, we bailed on the specialized X86ISD node. All of this appears
to have been historical where we had different logic here. =/
Turns out, we have quite a few patterns designed around these nodes. We
should actually form them. I fixed the code to match what we do for add,
and it has quite a positive effect just within some of our test cases.
The only thing close to a regression I see is using:
notl %r
testl %r, %r
instead of:
xorl -1, %r
But we can add a pattern or something to fold that back out. The
improvements seem more than worth this.
I've also worked with Craig to update the comments to no longer be
actively contradicted by the code. =[ Some of this still remains
a mystery to both Craig and myself, but this seems like a large step in
the direction of consistency and slightly more accurate comments.
Many thanks to Craig for help figuring out this nasty stuff.
Differential Revision: https://reviews.llvm.org/D37096
llvm-svn: 311737
2017-08-25 08:34:07 +08:00
|
|
|
; PIC-NEXT: cmovbl %eax, %ebx
|
2017-08-24 15:38:36 +08:00
|
|
|
; PIC-NEXT: lock cmpxchg8b (%esi)
|
|
|
|
; PIC-NEXT: jne LBB2_1
|
2017-12-05 01:18:51 +08:00
|
|
|
; PIC-NEXT: ## %bb.2: ## %atomicrmw.end
|
2017-08-24 15:38:36 +08:00
|
|
|
; PIC-NEXT: popl %esi
|
|
|
|
; PIC-NEXT: popl %edi
|
|
|
|
; PIC-NEXT: popl %ebx
|
|
|
|
; PIC-NEXT: retl
|
|
|
|
; PIC-NEXT: ## -- End function
|
|
|
|
entry:
|
|
|
|
%umax = atomicrmw umax i64* @sc64, i64 7 acquire
|
|
|
|
ret i64 %umax
|
|
|
|
}
|
|
|
|
|
|
|
|
define i64 @atomic_umin_i64() nounwind {
|
|
|
|
; LINUX-LABEL: atomic_umin_i64:
|
2017-12-05 01:18:51 +08:00
|
|
|
; LINUX: # %bb.0: # %entry
|
2017-08-24 15:38:36 +08:00
|
|
|
; LINUX-NEXT: pushl %ebx
|
|
|
|
; LINUX-NEXT: movl sc64+4, %edx
|
|
|
|
; LINUX-NEXT: movl sc64, %eax
|
|
|
|
; LINUX-NEXT: .p2align 4, 0x90
|
|
|
|
; LINUX-NEXT: .LBB3_1: # %atomicrmw.start
|
|
|
|
; LINUX-NEXT: # =>This Inner Loop Header: Depth=1
|
|
|
|
; LINUX-NEXT: cmpl $9, %eax
|
|
|
|
; LINUX-NEXT: movl %edx, %ecx
|
|
|
|
; LINUX-NEXT: sbbl $0, %ecx
|
[x86] Fix an amazing goof in the handling of sub, or, and xor lowering.
The comment for this code indicated that it should work similar to our
handling of add lowering above: if we see uses of an instruction other
than flag usage and store usage, it tries to avoid the specialized
X86ISD::* nodes that are designed for flag+op modeling and emits an
explicit test.
Problem is, only the add case actually did this. In all the other cases,
the logic was incomplete and inverted. Any time the value was used by
a store, we bailed on the specialized X86ISD node. All of this appears
to have been historical where we had different logic here. =/
Turns out, we have quite a few patterns designed around these nodes. We
should actually form them. I fixed the code to match what we do for add,
and it has quite a positive effect just within some of our test cases.
The only thing close to a regression I see is using:
notl %r
testl %r, %r
instead of:
xorl -1, %r
But we can add a pattern or something to fold that back out. The
improvements seem more than worth this.
I've also worked with Craig to update the comments to no longer be
actively contradicted by the code. =[ Some of this still remains
a mystery to both Craig and myself, but this seems like a large step in
the direction of consistency and slightly more accurate comments.
Many thanks to Craig for help figuring out this nasty stuff.
Differential Revision: https://reviews.llvm.org/D37096
llvm-svn: 311737
2017-08-25 08:34:07 +08:00
|
|
|
; LINUX-NEXT: movl $0, %ecx
|
|
|
|
; LINUX-NEXT: cmovbl %edx, %ecx
|
2017-08-24 15:38:36 +08:00
|
|
|
; LINUX-NEXT: movl $8, %ebx
|
[x86] Fix an amazing goof in the handling of sub, or, and xor lowering.
The comment for this code indicated that it should work similar to our
handling of add lowering above: if we see uses of an instruction other
than flag usage and store usage, it tries to avoid the specialized
X86ISD::* nodes that are designed for flag+op modeling and emits an
explicit test.
Problem is, only the add case actually did this. In all the other cases,
the logic was incomplete and inverted. Any time the value was used by
a store, we bailed on the specialized X86ISD node. All of this appears
to have been historical where we had different logic here. =/
Turns out, we have quite a few patterns designed around these nodes. We
should actually form them. I fixed the code to match what we do for add,
and it has quite a positive effect just within some of our test cases.
The only thing close to a regression I see is using:
notl %r
testl %r, %r
instead of:
xorl -1, %r
But we can add a pattern or something to fold that back out. The
improvements seem more than worth this.
I've also worked with Craig to update the comments to no longer be
actively contradicted by the code. =[ Some of this still remains
a mystery to both Craig and myself, but this seems like a large step in
the direction of consistency and slightly more accurate comments.
Many thanks to Craig for help figuring out this nasty stuff.
Differential Revision: https://reviews.llvm.org/D37096
llvm-svn: 311737
2017-08-25 08:34:07 +08:00
|
|
|
; LINUX-NEXT: cmovbl %eax, %ebx
|
2017-08-24 15:38:36 +08:00
|
|
|
; LINUX-NEXT: lock cmpxchg8b sc64
|
|
|
|
; LINUX-NEXT: jne .LBB3_1
|
2017-12-05 01:18:51 +08:00
|
|
|
; LINUX-NEXT: # %bb.2: # %atomicrmw.end
|
2017-08-24 15:38:36 +08:00
|
|
|
; LINUX-NEXT: popl %ebx
|
|
|
|
; LINUX-NEXT: retl
|
|
|
|
;
|
|
|
|
; PIC-LABEL: atomic_umin_i64:
|
2017-12-05 01:18:51 +08:00
|
|
|
; PIC: ## %bb.0: ## %entry
|
2017-08-24 15:38:36 +08:00
|
|
|
; PIC-NEXT: pushl %ebx
|
|
|
|
; PIC-NEXT: pushl %esi
|
|
|
|
; PIC-NEXT: calll L3$pb
|
|
|
|
; PIC-NEXT: L3$pb:
|
|
|
|
; PIC-NEXT: popl %eax
|
|
|
|
; PIC-NEXT: movl L_sc64$non_lazy_ptr-L3$pb(%eax), %esi
|
|
|
|
; PIC-NEXT: movl (%esi), %eax
|
|
|
|
; PIC-NEXT: movl 4(%esi), %edx
|
|
|
|
; PIC-NEXT: .p2align 4, 0x90
|
|
|
|
; PIC-NEXT: LBB3_1: ## %atomicrmw.start
|
|
|
|
; PIC-NEXT: ## =>This Inner Loop Header: Depth=1
|
|
|
|
; PIC-NEXT: cmpl $9, %eax
|
|
|
|
; PIC-NEXT: movl %edx, %ecx
|
|
|
|
; PIC-NEXT: sbbl $0, %ecx
|
[x86] Fix an amazing goof in the handling of sub, or, and xor lowering.
The comment for this code indicated that it should work similar to our
handling of add lowering above: if we see uses of an instruction other
than flag usage and store usage, it tries to avoid the specialized
X86ISD::* nodes that are designed for flag+op modeling and emits an
explicit test.
Problem is, only the add case actually did this. In all the other cases,
the logic was incomplete and inverted. Any time the value was used by
a store, we bailed on the specialized X86ISD node. All of this appears
to have been historical where we had different logic here. =/
Turns out, we have quite a few patterns designed around these nodes. We
should actually form them. I fixed the code to match what we do for add,
and it has quite a positive effect just within some of our test cases.
The only thing close to a regression I see is using:
notl %r
testl %r, %r
instead of:
xorl -1, %r
But we can add a pattern or something to fold that back out. The
improvements seem more than worth this.
I've also worked with Craig to update the comments to no longer be
actively contradicted by the code. =[ Some of this still remains
a mystery to both Craig and myself, but this seems like a large step in
the direction of consistency and slightly more accurate comments.
Many thanks to Craig for help figuring out this nasty stuff.
Differential Revision: https://reviews.llvm.org/D37096
llvm-svn: 311737
2017-08-25 08:34:07 +08:00
|
|
|
; PIC-NEXT: movl $0, %ecx
|
|
|
|
; PIC-NEXT: cmovbl %edx, %ecx
|
2017-08-24 15:38:36 +08:00
|
|
|
; PIC-NEXT: movl $8, %ebx
|
[x86] Fix an amazing goof in the handling of sub, or, and xor lowering.
The comment for this code indicated that it should work similar to our
handling of add lowering above: if we see uses of an instruction other
than flag usage and store usage, it tries to avoid the specialized
X86ISD::* nodes that are designed for flag+op modeling and emits an
explicit test.
Problem is, only the add case actually did this. In all the other cases,
the logic was incomplete and inverted. Any time the value was used by
a store, we bailed on the specialized X86ISD node. All of this appears
to have been historical where we had different logic here. =/
Turns out, we have quite a few patterns designed around these nodes. We
should actually form them. I fixed the code to match what we do for add,
and it has quite a positive effect just within some of our test cases.
The only thing close to a regression I see is using:
notl %r
testl %r, %r
instead of:
xorl -1, %r
But we can add a pattern or something to fold that back out. The
improvements seem more than worth this.
I've also worked with Craig to update the comments to no longer be
actively contradicted by the code. =[ Some of this still remains
a mystery to both Craig and myself, but this seems like a large step in
the direction of consistency and slightly more accurate comments.
Many thanks to Craig for help figuring out this nasty stuff.
Differential Revision: https://reviews.llvm.org/D37096
llvm-svn: 311737
2017-08-25 08:34:07 +08:00
|
|
|
; PIC-NEXT: cmovbl %eax, %ebx
|
2017-08-24 15:38:36 +08:00
|
|
|
; PIC-NEXT: lock cmpxchg8b (%esi)
|
|
|
|
; PIC-NEXT: jne LBB3_1
|
2017-12-05 01:18:51 +08:00
|
|
|
; PIC-NEXT: ## %bb.2: ## %atomicrmw.end
|
2017-08-24 15:38:36 +08:00
|
|
|
; PIC-NEXT: popl %esi
|
|
|
|
; PIC-NEXT: popl %ebx
|
|
|
|
; PIC-NEXT: retl
|
|
|
|
; PIC-NEXT: ## -- End function
|
|
|
|
entry:
|
|
|
|
%umin = atomicrmw umin i64* @sc64, i64 8 acquire
|
|
|
|
ret i64 %umin
|
2012-10-10 07:48:33 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
@id = internal global i64 0, align 8
|
|
|
|
|
|
|
|
define void @tf_bug(i8* %ptr) nounwind {
|
2017-08-24 15:38:36 +08:00
|
|
|
; LINUX-LABEL: tf_bug:
|
2017-12-05 01:18:51 +08:00
|
|
|
; LINUX: # %bb.0: # %entry
|
2017-08-24 15:38:36 +08:00
|
|
|
; LINUX-NEXT: pushl %ebx
|
|
|
|
; LINUX-NEXT: pushl %esi
|
|
|
|
; LINUX-NEXT: movl {{[0-9]+}}(%esp), %esi
|
|
|
|
; LINUX-NEXT: movl id+4, %edx
|
|
|
|
; LINUX-NEXT: movl id, %eax
|
|
|
|
; LINUX-NEXT: .p2align 4, 0x90
|
|
|
|
; LINUX-NEXT: .LBB4_1: # %atomicrmw.start
|
|
|
|
; LINUX-NEXT: # =>This Inner Loop Header: Depth=1
|
|
|
|
; LINUX-NEXT: movl %eax, %ebx
|
|
|
|
; LINUX-NEXT: addl $1, %ebx
|
|
|
|
; LINUX-NEXT: movl %edx, %ecx
|
|
|
|
; LINUX-NEXT: adcl $0, %ecx
|
|
|
|
; LINUX-NEXT: lock cmpxchg8b id
|
|
|
|
; LINUX-NEXT: jne .LBB4_1
|
2017-12-05 01:18:51 +08:00
|
|
|
; LINUX-NEXT: # %bb.2: # %atomicrmw.end
|
2017-08-24 15:38:36 +08:00
|
|
|
; LINUX-NEXT: addl $1, %eax
|
|
|
|
; LINUX-NEXT: adcl $0, %edx
|
|
|
|
; LINUX-NEXT: movl %eax, (%esi)
|
|
|
|
; LINUX-NEXT: movl %edx, 4(%esi)
|
|
|
|
; LINUX-NEXT: popl %esi
|
|
|
|
; LINUX-NEXT: popl %ebx
|
|
|
|
; LINUX-NEXT: retl
|
|
|
|
;
|
2013-07-14 14:24:09 +08:00
|
|
|
; PIC-LABEL: tf_bug:
|
2017-12-05 01:18:51 +08:00
|
|
|
; PIC: ## %bb.0: ## %entry
|
2017-08-24 15:38:36 +08:00
|
|
|
; PIC-NEXT: pushl %ebx
|
|
|
|
; PIC-NEXT: pushl %edi
|
|
|
|
; PIC-NEXT: pushl %esi
|
|
|
|
; PIC-NEXT: calll L4$pb
|
|
|
|
; PIC-NEXT: L4$pb:
|
|
|
|
; PIC-NEXT: popl %edi
|
|
|
|
; PIC-NEXT: movl {{[0-9]+}}(%esp), %esi
|
|
|
|
; PIC-NEXT: movl (_id-L4$pb)+4(%edi), %edx
|
|
|
|
; PIC-NEXT: movl _id-L4$pb(%edi), %eax
|
|
|
|
; PIC-NEXT: .p2align 4, 0x90
|
|
|
|
; PIC-NEXT: LBB4_1: ## %atomicrmw.start
|
|
|
|
; PIC-NEXT: ## =>This Inner Loop Header: Depth=1
|
|
|
|
; PIC-NEXT: movl %eax, %ebx
|
|
|
|
; PIC-NEXT: addl $1, %ebx
|
|
|
|
; PIC-NEXT: movl %edx, %ecx
|
|
|
|
; PIC-NEXT: adcl $0, %ecx
|
|
|
|
; PIC-NEXT: lock cmpxchg8b _id-L4$pb(%edi)
|
|
|
|
; PIC-NEXT: jne LBB4_1
|
2017-12-05 01:18:51 +08:00
|
|
|
; PIC-NEXT: ## %bb.2: ## %atomicrmw.end
|
2017-08-24 15:38:36 +08:00
|
|
|
; PIC-NEXT: addl $1, %eax
|
|
|
|
; PIC-NEXT: adcl $0, %edx
|
|
|
|
; PIC-NEXT: movl %eax, (%esi)
|
|
|
|
; PIC-NEXT: movl %edx, 4(%esi)
|
|
|
|
; PIC-NEXT: popl %esi
|
|
|
|
; PIC-NEXT: popl %edi
|
|
|
|
; PIC-NEXT: popl %ebx
|
|
|
|
; PIC-NEXT: retl
|
|
|
|
; PIC-NEXT: ## -- End function
|
|
|
|
; PIC-NEXT: .zerofill __DATA,__bss,_id,8,3 ## @id
|
|
|
|
entry:
|
2012-10-10 07:48:33 +08:00
|
|
|
%tmp1 = atomicrmw add i64* @id, i64 1 seq_cst
|
|
|
|
%tmp2 = add i64 %tmp1, 1
|
|
|
|
%tmp3 = bitcast i8* %ptr to i64*
|
|
|
|
store i64 %tmp2, i64* %tmp3, align 4
|
2012-09-26 02:08:13 +08:00
|
|
|
ret void
|
|
|
|
}
|