2014-01-24 14:23:31 +08:00
|
|
|
; RUN: llc < %s -march=sparcv9 -verify-machineinstrs | FileCheck %s
|
2014-01-02 06:11:54 +08:00
|
|
|
|
2016-05-24 04:33:00 +08:00
|
|
|
; CHECK-LABEL: test_atomic_i8
|
|
|
|
; CHECK: ldub [%o0]
|
|
|
|
; CHECK: membar
|
|
|
|
; CHECK: ldub [%o1]
|
|
|
|
; CHECK: membar
|
|
|
|
; CHECK: membar
|
|
|
|
; CHECK: stb {{.+}}, [%o2]
|
|
|
|
define i8 @test_atomic_i8(i8* %ptr1, i8* %ptr2, i8* %ptr3) {
|
|
|
|
entry:
|
|
|
|
%0 = load atomic i8, i8* %ptr1 acquire, align 1
|
|
|
|
%1 = load atomic i8, i8* %ptr2 acquire, align 1
|
|
|
|
%2 = add i8 %0, %1
|
|
|
|
store atomic i8 %2, i8* %ptr3 release, align 1
|
|
|
|
ret i8 %2
|
|
|
|
}
|
|
|
|
|
|
|
|
; CHECK-LABEL: test_atomic_i16
|
|
|
|
; CHECK: lduh [%o0]
|
|
|
|
; CHECK: membar
|
|
|
|
; CHECK: lduh [%o1]
|
|
|
|
; CHECK: membar
|
|
|
|
; CHECK: membar
|
|
|
|
; CHECK: sth {{.+}}, [%o2]
|
|
|
|
define i16 @test_atomic_i16(i16* %ptr1, i16* %ptr2, i16* %ptr3) {
|
|
|
|
entry:
|
|
|
|
%0 = load atomic i16, i16* %ptr1 acquire, align 2
|
|
|
|
%1 = load atomic i16, i16* %ptr2 acquire, align 2
|
|
|
|
%2 = add i16 %0, %1
|
|
|
|
store atomic i16 %2, i16* %ptr3 release, align 2
|
|
|
|
ret i16 %2
|
|
|
|
}
|
|
|
|
|
2014-01-02 06:11:54 +08:00
|
|
|
; CHECK-LABEL: test_atomic_i32
|
|
|
|
; CHECK: ld [%o0]
|
|
|
|
; CHECK: membar
|
|
|
|
; CHECK: ld [%o1]
|
|
|
|
; CHECK: membar
|
|
|
|
; CHECK: membar
|
|
|
|
; CHECK: st {{.+}}, [%o2]
|
|
|
|
define i32 @test_atomic_i32(i32* %ptr1, i32* %ptr2, i32* %ptr3) {
|
|
|
|
entry:
|
2016-05-24 04:33:00 +08:00
|
|
|
%0 = load atomic i32, i32* %ptr1 acquire, align 4
|
|
|
|
%1 = load atomic i32, i32* %ptr2 acquire, align 4
|
2014-01-02 06:11:54 +08:00
|
|
|
%2 = add i32 %0, %1
|
2016-05-24 04:33:00 +08:00
|
|
|
store atomic i32 %2, i32* %ptr3 release, align 4
|
2014-01-02 06:11:54 +08:00
|
|
|
ret i32 %2
|
|
|
|
}
|
|
|
|
|
|
|
|
; CHECK-LABEL: test_atomic_i64
|
|
|
|
; CHECK: ldx [%o0]
|
|
|
|
; CHECK: membar
|
|
|
|
; CHECK: ldx [%o1]
|
|
|
|
; CHECK: membar
|
|
|
|
; CHECK: membar
|
|
|
|
; CHECK: stx {{.+}}, [%o2]
|
|
|
|
define i64 @test_atomic_i64(i64* %ptr1, i64* %ptr2, i64* %ptr3) {
|
|
|
|
entry:
|
2015-02-28 05:17:42 +08:00
|
|
|
%0 = load atomic i64, i64* %ptr1 acquire, align 8
|
|
|
|
%1 = load atomic i64, i64* %ptr2 acquire, align 8
|
2014-01-02 06:11:54 +08:00
|
|
|
%2 = add i64 %0, %1
|
|
|
|
store atomic i64 %2, i64* %ptr3 release, align 8
|
|
|
|
ret i64 %2
|
|
|
|
}
|
|
|
|
|
2016-06-18 02:11:48 +08:00
|
|
|
;; TODO: the "move %icc" and related instructions are totally
|
|
|
|
;; redundant here. There's something weird happening in optimization
|
|
|
|
;; of the success value of cmpxchg.
|
|
|
|
|
|
|
|
; CHECK-LABEL: test_cmpxchg_i8
|
|
|
|
; CHECK: and %o1, -4, %o2
|
|
|
|
; CHECK: mov 3, %o3
|
|
|
|
; CHECK: andn %o3, %o1, %o1
|
|
|
|
; CHECK: sll %o1, 3, %o1
|
|
|
|
; CHECK: mov 255, %o3
|
|
|
|
; CHECK: sll %o3, %o1, %o5
|
|
|
|
; CHECK: xor %o5, -1, %o3
|
|
|
|
; CHECK: mov 123, %o4
|
|
|
|
; CHECK: ld [%o2], %g2
|
|
|
|
; CHECK: sll %o4, %o1, %o4
|
|
|
|
; CHECK: and %o0, 255, %o0
|
|
|
|
; CHECK: sll %o0, %o1, %o0
|
|
|
|
; CHECK: andn %g2, %o5, %g2
|
|
|
|
; CHECK: sethi 0, %o5
|
|
|
|
; CHECK: [[LABEL1:\.L.*]]:
|
|
|
|
; CHECK: or %g2, %o4, %g3
|
|
|
|
; CHECK: or %g2, %o0, %g4
|
|
|
|
; CHECK: cas [%o2], %g4, %g3
|
|
|
|
; CHECK: cmp %g3, %g4
|
|
|
|
; CHECK: mov %o5, %g4
|
|
|
|
; CHECK: move %icc, 1, %g4
|
|
|
|
; CHECK: cmp %g4, 0
|
|
|
|
; CHECK: bne [[LABEL2:\.L.*]]
|
|
|
|
; CHECK: nop
|
|
|
|
; CHECK: and %g3, %o3, %g4
|
|
|
|
; CHECK: cmp %g2, %g4
|
|
|
|
; CHECK: bne [[LABEL1]]
|
|
|
|
; CHECK: mov %g4, %g2
|
|
|
|
; CHECK: [[LABEL2]]:
|
|
|
|
; CHECK: retl
|
|
|
|
; CHECK: srl %g3, %o1, %o0
|
|
|
|
define i8 @test_cmpxchg_i8(i8 %a, i8* %ptr) {
|
|
|
|
entry:
|
|
|
|
%pair = cmpxchg i8* %ptr, i8 %a, i8 123 monotonic monotonic
|
|
|
|
%b = extractvalue { i8, i1 } %pair, 0
|
|
|
|
ret i8 %b
|
|
|
|
}
|
|
|
|
|
|
|
|
; CHECK-LABEL: test_cmpxchg_i16
|
|
|
|
|
|
|
|
; CHECK: and %o1, -4, %o2
|
|
|
|
; CHECK: and %o1, 3, %o1
|
|
|
|
; CHECK: xor %o1, 2, %o1
|
|
|
|
; CHECK: sll %o1, 3, %o1
|
|
|
|
; CHECK: sethi 63, %o3
|
|
|
|
; CHECK: or %o3, 1023, %o4
|
|
|
|
; CHECK: sll %o4, %o1, %o5
|
|
|
|
; CHECK: xor %o5, -1, %o3
|
|
|
|
; CHECK: and %o0, %o4, %o4
|
|
|
|
; CHECK: ld [%o2], %g2
|
|
|
|
; CHECK: mov 123, %o0
|
|
|
|
; CHECK: sll %o0, %o1, %o0
|
|
|
|
; CHECK: sll %o4, %o1, %o4
|
|
|
|
; CHECK: andn %g2, %o5, %g2
|
|
|
|
; CHECK: sethi 0, %o5
|
|
|
|
; CHECK: [[LABEL1:\.L.*]]:
|
|
|
|
; CHECK: or %g2, %o0, %g3
|
|
|
|
; CHECK: or %g2, %o4, %g4
|
|
|
|
; CHECK: cas [%o2], %g4, %g3
|
|
|
|
; CHECK: cmp %g3, %g4
|
|
|
|
; CHECK: mov %o5, %g4
|
|
|
|
; CHECK: move %icc, 1, %g4
|
|
|
|
; CHECK: cmp %g4, 0
|
|
|
|
; CHECK: bne [[LABEL2:\.L.*]]
|
|
|
|
; CHECK: nop
|
|
|
|
; CHECK: and %g3, %o3, %g4
|
|
|
|
; CHECK: cmp %g2, %g4
|
|
|
|
; CHECK: bne [[LABEL1]]
|
|
|
|
; CHECK: mov %g4, %g2
|
|
|
|
; CHECK: [[LABEL2]]:
|
|
|
|
; CHECK: retl
|
|
|
|
; CHECK: srl %g3, %o1, %o0
|
|
|
|
define i16 @test_cmpxchg_i16(i16 %a, i16* %ptr) {
|
|
|
|
entry:
|
|
|
|
%pair = cmpxchg i16* %ptr, i16 %a, i16 123 monotonic monotonic
|
|
|
|
%b = extractvalue { i16, i1 } %pair, 0
|
|
|
|
ret i16 %b
|
|
|
|
}
|
|
|
|
|
2014-01-02 06:11:54 +08:00
|
|
|
; CHECK-LABEL: test_cmpxchg_i32
|
TableGen: fix operand counting for aliases
TableGen has a fairly dubious heuristic to decide whether an alias should be
printed: does the alias have lest operands than the real instruction. This is
bad enough (particularly with no way to override it), but it should at least be
calculated consistently for both strings.
This patch implements that logic: first get the *correct* string for the
variant, in the same way as the Matcher, without guessing; then count the
number of whitespace chars.
There are basically 4 changes this brings about after the previous
commits; all of these appear to be good, so I have changed the tests:
+ ARM64: we print "neg X, Y" instead of "sub X, xzr, Y".
+ ARM64: we skip implicit "uxtx" and "uxtw" modifiers.
+ Sparc: we print "mov A, B" instead of "or %g0, A, B".
+ Sparc: we print "fcmpX A, B" instead of "fcmpX %fcc0, A, B"
llvm-svn: 208969
2014-05-16 17:42:04 +08:00
|
|
|
; CHECK: mov 123, [[R:%[gilo][0-7]]]
|
2014-01-02 06:11:54 +08:00
|
|
|
; CHECK: cas [%o1], %o0, [[R]]
|
|
|
|
|
|
|
|
define i32 @test_cmpxchg_i32(i32 %a, i32* %ptr) {
|
|
|
|
entry:
|
IR: add "cmpxchg weak" variant to support permitted failure.
This commit adds a weak variant of the cmpxchg operation, as described
in C++11. A cmpxchg instruction with this modifier is permitted to
fail to store, even if the comparison indicated it should.
As a result, cmpxchg instructions must return a flag indicating
success in addition to their original iN value loaded. Thus, for
uniformity *all* cmpxchg instructions now return "{ iN, i1 }". The
second flag is 1 when the store succeeded.
At the DAG level, a new ATOMIC_CMP_SWAP_WITH_SUCCESS node has been
added as the natural representation for the new cmpxchg instructions.
It is a strong cmpxchg.
By default this gets Expanded to the existing ATOMIC_CMP_SWAP during
Legalization, so existing backends should see no change in behaviour.
If they wish to deal with the enhanced node instead, they can call
setOperationAction on it. Beware: as a node with 2 results, it cannot
be selected from TableGen.
Currently, no use is made of the extra information provided in this
patch. Test updates are almost entirely adapting the input IR to the
new scheme.
Summary for out of tree users:
------------------------------
+ Legacy Bitcode files are upgraded during read.
+ Legacy assembly IR files will be invalid.
+ Front-ends must adapt to different type for "cmpxchg".
+ Backends should be unaffected by default.
llvm-svn: 210903
2014-06-13 22:24:07 +08:00
|
|
|
%pair = cmpxchg i32* %ptr, i32 %a, i32 123 monotonic monotonic
|
|
|
|
%b = extractvalue { i32, i1 } %pair, 0
|
2014-01-02 06:11:54 +08:00
|
|
|
ret i32 %b
|
|
|
|
}
|
|
|
|
|
|
|
|
; CHECK-LABEL: test_cmpxchg_i64
|
TableGen: fix operand counting for aliases
TableGen has a fairly dubious heuristic to decide whether an alias should be
printed: does the alias have lest operands than the real instruction. This is
bad enough (particularly with no way to override it), but it should at least be
calculated consistently for both strings.
This patch implements that logic: first get the *correct* string for the
variant, in the same way as the Matcher, without guessing; then count the
number of whitespace chars.
There are basically 4 changes this brings about after the previous
commits; all of these appear to be good, so I have changed the tests:
+ ARM64: we print "neg X, Y" instead of "sub X, xzr, Y".
+ ARM64: we skip implicit "uxtx" and "uxtw" modifiers.
+ Sparc: we print "mov A, B" instead of "or %g0, A, B".
+ Sparc: we print "fcmpX A, B" instead of "fcmpX %fcc0, A, B"
llvm-svn: 208969
2014-05-16 17:42:04 +08:00
|
|
|
; CHECK: mov 123, [[R:%[gilo][0-7]]]
|
2014-01-02 06:11:54 +08:00
|
|
|
; CHECK: casx [%o1], %o0, [[R]]
|
|
|
|
|
|
|
|
define i64 @test_cmpxchg_i64(i64 %a, i64* %ptr) {
|
|
|
|
entry:
|
IR: add "cmpxchg weak" variant to support permitted failure.
This commit adds a weak variant of the cmpxchg operation, as described
in C++11. A cmpxchg instruction with this modifier is permitted to
fail to store, even if the comparison indicated it should.
As a result, cmpxchg instructions must return a flag indicating
success in addition to their original iN value loaded. Thus, for
uniformity *all* cmpxchg instructions now return "{ iN, i1 }". The
second flag is 1 when the store succeeded.
At the DAG level, a new ATOMIC_CMP_SWAP_WITH_SUCCESS node has been
added as the natural representation for the new cmpxchg instructions.
It is a strong cmpxchg.
By default this gets Expanded to the existing ATOMIC_CMP_SWAP during
Legalization, so existing backends should see no change in behaviour.
If they wish to deal with the enhanced node instead, they can call
setOperationAction on it. Beware: as a node with 2 results, it cannot
be selected from TableGen.
Currently, no use is made of the extra information provided in this
patch. Test updates are almost entirely adapting the input IR to the
new scheme.
Summary for out of tree users:
------------------------------
+ Legacy Bitcode files are upgraded during read.
+ Legacy assembly IR files will be invalid.
+ Front-ends must adapt to different type for "cmpxchg".
+ Backends should be unaffected by default.
llvm-svn: 210903
2014-06-13 22:24:07 +08:00
|
|
|
%pair = cmpxchg i64* %ptr, i64 %a, i64 123 monotonic monotonic
|
|
|
|
%b = extractvalue { i64, i1 } %pair, 0
|
2014-01-02 06:11:54 +08:00
|
|
|
ret i64 %b
|
|
|
|
}
|
|
|
|
|
2016-06-18 02:11:48 +08:00
|
|
|
; CHECK-LABEL: test_swap_i8
|
|
|
|
; CHECK: mov 42, [[R:%[gilo][0-7]]]
|
|
|
|
; CHECK: cas
|
|
|
|
|
|
|
|
define i8 @test_swap_i8(i8 %a, i8* %ptr) {
|
|
|
|
entry:
|
|
|
|
%b = atomicrmw xchg i8* %ptr, i8 42 monotonic
|
|
|
|
ret i8 %b
|
|
|
|
}
|
|
|
|
|
|
|
|
; CHECK-LABEL: test_swap_i16
|
|
|
|
; CHECK: mov 42, [[R:%[gilo][0-7]]]
|
|
|
|
; CHECK: cas
|
|
|
|
|
|
|
|
define i16 @test_swap_i16(i16 %a, i16* %ptr) {
|
|
|
|
entry:
|
|
|
|
%b = atomicrmw xchg i16* %ptr, i16 42 monotonic
|
|
|
|
ret i16 %b
|
|
|
|
}
|
|
|
|
|
2014-01-02 06:11:54 +08:00
|
|
|
; CHECK-LABEL: test_swap_i32
|
TableGen: fix operand counting for aliases
TableGen has a fairly dubious heuristic to decide whether an alias should be
printed: does the alias have lest operands than the real instruction. This is
bad enough (particularly with no way to override it), but it should at least be
calculated consistently for both strings.
This patch implements that logic: first get the *correct* string for the
variant, in the same way as the Matcher, without guessing; then count the
number of whitespace chars.
There are basically 4 changes this brings about after the previous
commits; all of these appear to be good, so I have changed the tests:
+ ARM64: we print "neg X, Y" instead of "sub X, xzr, Y".
+ ARM64: we skip implicit "uxtx" and "uxtw" modifiers.
+ Sparc: we print "mov A, B" instead of "or %g0, A, B".
+ Sparc: we print "fcmpX A, B" instead of "fcmpX %fcc0, A, B"
llvm-svn: 208969
2014-05-16 17:42:04 +08:00
|
|
|
; CHECK: mov 42, [[R:%[gilo][0-7]]]
|
2014-01-02 06:11:54 +08:00
|
|
|
; CHECK: swap [%o1], [[R]]
|
|
|
|
|
|
|
|
define i32 @test_swap_i32(i32 %a, i32* %ptr) {
|
|
|
|
entry:
|
|
|
|
%b = atomicrmw xchg i32* %ptr, i32 42 monotonic
|
|
|
|
ret i32 %b
|
|
|
|
}
|
2014-01-24 14:23:31 +08:00
|
|
|
|
2014-01-30 12:48:46 +08:00
|
|
|
; CHECK-LABEL: test_swap_i64
|
|
|
|
; CHECK: casx [%o1],
|
|
|
|
|
|
|
|
define i64 @test_swap_i64(i64 %a, i64* %ptr) {
|
|
|
|
entry:
|
|
|
|
%b = atomicrmw xchg i64* %ptr, i64 42 monotonic
|
|
|
|
ret i64 %b
|
|
|
|
}
|
|
|
|
|
2016-06-18 02:11:48 +08:00
|
|
|
; CHECK-LABEL: test_load_sub_i8
|
|
|
|
; CHECK: membar
|
|
|
|
; CHECK: .L{{.*}}:
|
|
|
|
; CHECK: sub
|
|
|
|
; CHECK: cas [{{%[gilo][0-7]}}]
|
|
|
|
; CHECK: membar
|
|
|
|
define zeroext i8 @test_load_sub_i8(i8* %p, i8 zeroext %v) {
|
|
|
|
entry:
|
|
|
|
%0 = atomicrmw sub i8* %p, i8 %v seq_cst
|
|
|
|
ret i8 %0
|
|
|
|
}
|
|
|
|
|
|
|
|
; CHECK-LABEL: test_load_sub_i16
|
|
|
|
; CHECK: membar
|
|
|
|
; CHECK: .L{{.*}}:
|
|
|
|
; CHECK: sub
|
|
|
|
; CHECK: cas [{{%[gilo][0-7]}}]
|
|
|
|
; CHECK: membar
|
|
|
|
define zeroext i16 @test_load_sub_i16(i16* %p, i16 zeroext %v) {
|
|
|
|
entry:
|
|
|
|
%0 = atomicrmw sub i16* %p, i16 %v seq_cst
|
|
|
|
ret i16 %0
|
|
|
|
}
|
|
|
|
|
|
|
|
; CHECK-LABEL: test_load_add_i32
|
2014-01-24 14:23:31 +08:00
|
|
|
; CHECK: membar
|
[MachineCopyPropagation] Extend pass to do COPY source forwarding
Summary:
This change extends MachineCopyPropagation to do COPY source forwarding
and adds an additional run of the pass to the default pass pipeline just
after register allocation.
This version of this patch uses the newly added
MachineOperand::isRenamable bit to avoid forwarding registers is such a
way as to violate constraints that aren't captured in the
Machine IR (e.g. ABI or ISA constraints).
This change is a continuation of the work started in D30751.
Reviewers: qcolombet, javed.absar, MatzeB, jonpa, tstellar
Subscribers: tpr, mgorny, mcrosier, nhaehnle, nemanjai, jyknight, hfinkel, arsenm, inouehrs, eraman, sdardis, guyblank, fedor.sergeev, aheejin, dschuff, jfb, myatsina, llvm-commits
Differential Revision: https://reviews.llvm.org/D41835
llvm-svn: 323991
2018-02-02 02:54:01 +08:00
|
|
|
; CHECK: mov [[U:%[gilo][0-7]]], [[V:%[gilo][0-7]]]
|
|
|
|
; CHECK: add [[U:%[gilo][0-7]]], %o1, [[V2:%[gilo][0-7]]]
|
|
|
|
; CHECK: cas [%o0], [[V]], [[V2]]
|
2014-01-24 14:23:31 +08:00
|
|
|
; CHECK: membar
|
2016-06-18 02:11:48 +08:00
|
|
|
define zeroext i32 @test_load_add_i32(i32* %p, i32 zeroext %v) {
|
2014-01-24 14:23:31 +08:00
|
|
|
entry:
|
|
|
|
%0 = atomicrmw add i32* %p, i32 %v seq_cst
|
|
|
|
ret i32 %0
|
|
|
|
}
|
|
|
|
|
|
|
|
; CHECK-LABEL: test_load_sub_64
|
|
|
|
; CHECK: membar
|
|
|
|
; CHECK: sub
|
|
|
|
; CHECK: casx [%o0]
|
|
|
|
; CHECK: membar
|
|
|
|
define zeroext i64 @test_load_sub_64(i64* %p, i64 zeroext %v) {
|
|
|
|
entry:
|
|
|
|
%0 = atomicrmw sub i64* %p, i64 %v seq_cst
|
|
|
|
ret i64 %0
|
|
|
|
}
|
|
|
|
|
|
|
|
; CHECK-LABEL: test_load_xor_32
|
|
|
|
; CHECK: membar
|
|
|
|
; CHECK: xor
|
|
|
|
; CHECK: cas [%o0]
|
|
|
|
; CHECK: membar
|
|
|
|
define zeroext i32 @test_load_xor_32(i32* %p, i32 zeroext %v) {
|
|
|
|
entry:
|
|
|
|
%0 = atomicrmw xor i32* %p, i32 %v seq_cst
|
|
|
|
ret i32 %0
|
|
|
|
}
|
|
|
|
|
|
|
|
; CHECK-LABEL: test_load_and_32
|
|
|
|
; CHECK: membar
|
|
|
|
; CHECK: and
|
|
|
|
; CHECK-NOT: xor
|
|
|
|
; CHECK: cas [%o0]
|
|
|
|
; CHECK: membar
|
|
|
|
define zeroext i32 @test_load_and_32(i32* %p, i32 zeroext %v) {
|
|
|
|
entry:
|
|
|
|
%0 = atomicrmw and i32* %p, i32 %v seq_cst
|
|
|
|
ret i32 %0
|
|
|
|
}
|
|
|
|
|
|
|
|
; CHECK-LABEL: test_load_nand_32
|
|
|
|
; CHECK: membar
|
|
|
|
; CHECK: and
|
|
|
|
; CHECK: xor
|
|
|
|
; CHECK: cas [%o0]
|
|
|
|
; CHECK: membar
|
|
|
|
define zeroext i32 @test_load_nand_32(i32* %p, i32 zeroext %v) {
|
|
|
|
entry:
|
|
|
|
%0 = atomicrmw nand i32* %p, i32 %v seq_cst
|
|
|
|
ret i32 %0
|
|
|
|
}
|
|
|
|
|
|
|
|
; CHECK-LABEL: test_load_max_64
|
|
|
|
; CHECK: membar
|
|
|
|
; CHECK: cmp
|
|
|
|
; CHECK: movg %xcc
|
|
|
|
; CHECK: casx [%o0]
|
|
|
|
; CHECK: membar
|
|
|
|
define zeroext i64 @test_load_max_64(i64* %p, i64 zeroext %v) {
|
|
|
|
entry:
|
|
|
|
%0 = atomicrmw max i64* %p, i64 %v seq_cst
|
|
|
|
ret i64 %0
|
|
|
|
}
|
|
|
|
|
|
|
|
; CHECK-LABEL: test_load_umin_32
|
|
|
|
; CHECK: membar
|
|
|
|
; CHECK: cmp
|
|
|
|
; CHECK: movleu %icc
|
|
|
|
; CHECK: cas [%o0]
|
|
|
|
; CHECK: membar
|
|
|
|
define zeroext i32 @test_load_umin_32(i32* %p, i32 zeroext %v) {
|
|
|
|
entry:
|
|
|
|
%0 = atomicrmw umin i32* %p, i32 %v seq_cst
|
|
|
|
ret i32 %0
|
|
|
|
}
|