2011-12-21 11:04:10 +08:00
|
|
|
; RUN: llc < %s -mtriple=armv7-apple-ios -verify-machineinstrs | FileCheck %s
|
|
|
|
; RUN: llc < %s -mtriple=thumbv7-apple-ios -verify-machineinstrs | FileCheck %s
|
2013-10-25 17:30:20 +08:00
|
|
|
; RUN: llc < %s -mtriple=thumbv6-apple-ios -verify-machineinstrs | FileCheck %s --check-prefix=CHECK-T1
|
2014-08-06 19:13:14 +08:00
|
|
|
; RUN: llc < %s -mtriple=thumbv6-apple-ios -verify-machineinstrs -mcpu=cortex-m0 | FileCheck %s --check-prefix=CHECK-M0
|
2011-05-28 07:54:00 +08:00
|
|
|
|
|
|
|
define void @func(i32 %argc, i8** %argv) nounwind {
|
|
|
|
entry:
|
|
|
|
%argc.addr = alloca i32 ; <i32*> [#uses=1]
|
|
|
|
%argv.addr = alloca i8** ; <i8***> [#uses=1]
|
|
|
|
%val1 = alloca i32 ; <i32*> [#uses=2]
|
|
|
|
%val2 = alloca i32 ; <i32*> [#uses=15]
|
|
|
|
%andt = alloca i32 ; <i32*> [#uses=2]
|
|
|
|
%ort = alloca i32 ; <i32*> [#uses=2]
|
|
|
|
%xort = alloca i32 ; <i32*> [#uses=2]
|
|
|
|
%old = alloca i32 ; <i32*> [#uses=18]
|
|
|
|
%temp = alloca i32 ; <i32*> [#uses=2]
|
|
|
|
store i32 %argc, i32* %argc.addr
|
|
|
|
store i8** %argv, i8*** %argv.addr
|
|
|
|
store i32 0, i32* %val1
|
|
|
|
store i32 31, i32* %val2
|
|
|
|
store i32 3855, i32* %andt
|
|
|
|
store i32 3855, i32* %ort
|
|
|
|
store i32 3855, i32* %xort
|
|
|
|
store i32 4, i32* %temp
|
|
|
|
%tmp = load i32* %temp
|
|
|
|
; CHECK: ldrex
|
|
|
|
; CHECK: add
|
|
|
|
; CHECK: strex
|
2013-10-25 17:30:20 +08:00
|
|
|
; CHECK-T1: blx ___sync_fetch_and_add_4
|
2014-08-06 19:13:14 +08:00
|
|
|
; CHECK-M0: bl ___sync_fetch_and_add_4
|
2011-09-27 04:27:49 +08:00
|
|
|
%0 = atomicrmw add i32* %val1, i32 %tmp monotonic
|
2011-05-28 07:54:00 +08:00
|
|
|
store i32 %0, i32* %old
|
|
|
|
; CHECK: ldrex
|
|
|
|
; CHECK: sub
|
|
|
|
; CHECK: strex
|
2013-10-25 17:30:20 +08:00
|
|
|
; CHECK-T1: blx ___sync_fetch_and_sub_4
|
2014-08-06 19:13:14 +08:00
|
|
|
; CHECK-M0: bl ___sync_fetch_and_sub_4
|
2011-09-27 04:27:49 +08:00
|
|
|
%1 = atomicrmw sub i32* %val2, i32 30 monotonic
|
2011-05-28 07:54:00 +08:00
|
|
|
store i32 %1, i32* %old
|
|
|
|
; CHECK: ldrex
|
|
|
|
; CHECK: add
|
|
|
|
; CHECK: strex
|
2013-10-25 17:30:20 +08:00
|
|
|
; CHECK-T1: blx ___sync_fetch_and_add_4
|
2014-08-06 19:13:14 +08:00
|
|
|
; CHECK-M0: bl ___sync_fetch_and_add_4
|
2011-09-27 04:27:49 +08:00
|
|
|
%2 = atomicrmw add i32* %val2, i32 1 monotonic
|
2011-05-28 07:54:00 +08:00
|
|
|
store i32 %2, i32* %old
|
|
|
|
; CHECK: ldrex
|
|
|
|
; CHECK: sub
|
|
|
|
; CHECK: strex
|
2013-10-25 17:30:20 +08:00
|
|
|
; CHECK-T1: blx ___sync_fetch_and_sub_4
|
2014-08-06 19:13:14 +08:00
|
|
|
; CHECK-M0: bl ___sync_fetch_and_sub_4
|
2011-09-27 04:27:49 +08:00
|
|
|
%3 = atomicrmw sub i32* %val2, i32 1 monotonic
|
2011-05-28 07:54:00 +08:00
|
|
|
store i32 %3, i32* %old
|
|
|
|
; CHECK: ldrex
|
|
|
|
; CHECK: and
|
|
|
|
; CHECK: strex
|
2013-10-25 17:30:20 +08:00
|
|
|
; CHECK-T1: blx ___sync_fetch_and_and_4
|
2014-08-06 19:13:14 +08:00
|
|
|
; CHECK-M0: bl ___sync_fetch_and_and_4
|
2011-09-27 04:27:49 +08:00
|
|
|
%4 = atomicrmw and i32* %andt, i32 4080 monotonic
|
2011-05-28 07:54:00 +08:00
|
|
|
store i32 %4, i32* %old
|
|
|
|
; CHECK: ldrex
|
|
|
|
; CHECK: or
|
|
|
|
; CHECK: strex
|
2013-10-25 17:30:20 +08:00
|
|
|
; CHECK-T1: blx ___sync_fetch_and_or_4
|
2014-08-06 19:13:14 +08:00
|
|
|
; CHECK-M0: bl ___sync_fetch_and_or_4
|
2011-09-27 04:27:49 +08:00
|
|
|
%5 = atomicrmw or i32* %ort, i32 4080 monotonic
|
2011-05-28 07:54:00 +08:00
|
|
|
store i32 %5, i32* %old
|
|
|
|
; CHECK: ldrex
|
|
|
|
; CHECK: eor
|
|
|
|
; CHECK: strex
|
2013-10-25 17:30:20 +08:00
|
|
|
; CHECK-T1: blx ___sync_fetch_and_xor_4
|
2014-08-06 19:13:14 +08:00
|
|
|
; CHECK-M0: bl ___sync_fetch_and_xor_4
|
2011-09-27 04:27:49 +08:00
|
|
|
%6 = atomicrmw xor i32* %xort, i32 4080 monotonic
|
2011-05-28 07:54:00 +08:00
|
|
|
store i32 %6, i32* %old
|
|
|
|
; CHECK: ldrex
|
|
|
|
; CHECK: cmp
|
|
|
|
; CHECK: strex
|
2013-10-25 17:30:20 +08:00
|
|
|
; CHECK-T1: blx ___sync_fetch_and_min_4
|
2014-08-06 19:13:14 +08:00
|
|
|
; CHECK-M0: bl ___sync_fetch_and_min_4
|
2011-09-27 04:27:49 +08:00
|
|
|
%7 = atomicrmw min i32* %val2, i32 16 monotonic
|
2011-05-28 07:54:00 +08:00
|
|
|
store i32 %7, i32* %old
|
2011-12-21 11:04:10 +08:00
|
|
|
%neg = sub i32 0, 1
|
2011-05-28 07:54:00 +08:00
|
|
|
; CHECK: ldrex
|
|
|
|
; CHECK: cmp
|
|
|
|
; CHECK: strex
|
2013-10-25 17:30:20 +08:00
|
|
|
; CHECK-T1: blx ___sync_fetch_and_min_4
|
2014-08-06 19:13:14 +08:00
|
|
|
; CHECK-M0: bl ___sync_fetch_and_min_4
|
2011-09-27 04:27:49 +08:00
|
|
|
%8 = atomicrmw min i32* %val2, i32 %neg monotonic
|
2011-05-28 07:54:00 +08:00
|
|
|
store i32 %8, i32* %old
|
|
|
|
; CHECK: ldrex
|
|
|
|
; CHECK: cmp
|
|
|
|
; CHECK: strex
|
2013-10-25 17:30:20 +08:00
|
|
|
; CHECK-T1: blx ___sync_fetch_and_max_4
|
2014-08-06 19:13:14 +08:00
|
|
|
; CHECK-M0: bl ___sync_fetch_and_max_4
|
2011-09-27 04:27:49 +08:00
|
|
|
%9 = atomicrmw max i32* %val2, i32 1 monotonic
|
2011-05-28 07:54:00 +08:00
|
|
|
store i32 %9, i32* %old
|
|
|
|
; CHECK: ldrex
|
|
|
|
; CHECK: cmp
|
|
|
|
; CHECK: strex
|
2013-10-25 17:30:20 +08:00
|
|
|
; CHECK-T1: blx ___sync_fetch_and_max_4
|
2014-08-06 19:13:14 +08:00
|
|
|
; CHECK-M0: bl ___sync_fetch_and_max_4
|
2011-09-27 04:27:49 +08:00
|
|
|
%10 = atomicrmw max i32* %val2, i32 0 monotonic
|
2011-05-28 07:54:00 +08:00
|
|
|
store i32 %10, i32* %old
|
2011-12-21 11:04:10 +08:00
|
|
|
; CHECK: ldrex
|
|
|
|
; CHECK: cmp
|
|
|
|
; CHECK: strex
|
2013-10-25 17:30:20 +08:00
|
|
|
; CHECK-T1: blx ___sync_fetch_and_umin_4
|
2014-08-06 19:13:14 +08:00
|
|
|
; CHECK-M0: bl ___sync_fetch_and_umin_4
|
2011-12-21 11:04:10 +08:00
|
|
|
%11 = atomicrmw umin i32* %val2, i32 16 monotonic
|
|
|
|
store i32 %11, i32* %old
|
|
|
|
%uneg = sub i32 0, 1
|
|
|
|
; CHECK: ldrex
|
|
|
|
; CHECK: cmp
|
|
|
|
; CHECK: strex
|
2013-10-25 17:30:20 +08:00
|
|
|
; CHECK-T1: blx ___sync_fetch_and_umin_4
|
2014-08-06 19:13:14 +08:00
|
|
|
; CHECK-M0: bl ___sync_fetch_and_umin_4
|
2011-12-21 11:04:10 +08:00
|
|
|
%12 = atomicrmw umin i32* %val2, i32 %uneg monotonic
|
|
|
|
store i32 %12, i32* %old
|
|
|
|
; CHECK: ldrex
|
|
|
|
; CHECK: cmp
|
|
|
|
; CHECK: strex
|
2013-10-25 17:30:20 +08:00
|
|
|
; CHECK-T1: blx ___sync_fetch_and_umax_4
|
2014-08-06 19:13:14 +08:00
|
|
|
; CHECK-M0: bl ___sync_fetch_and_umax_4
|
2011-12-21 11:04:10 +08:00
|
|
|
%13 = atomicrmw umax i32* %val2, i32 1 monotonic
|
|
|
|
store i32 %13, i32* %old
|
|
|
|
; CHECK: ldrex
|
|
|
|
; CHECK: cmp
|
|
|
|
; CHECK: strex
|
2013-10-25 17:30:20 +08:00
|
|
|
; CHECK-T1: blx ___sync_fetch_and_umax_4
|
2014-08-06 19:13:14 +08:00
|
|
|
; CHECK-M0: bl ___sync_fetch_and_umax_4
|
2011-12-21 11:04:10 +08:00
|
|
|
%14 = atomicrmw umax i32* %val2, i32 0 monotonic
|
|
|
|
store i32 %14, i32* %old
|
|
|
|
|
|
|
|
ret void
|
2011-05-28 07:54:00 +08:00
|
|
|
}
|
2011-12-22 02:56:22 +08:00
|
|
|
|
|
|
|
define void @func2() nounwind {
|
|
|
|
entry:
|
|
|
|
%val = alloca i16
|
|
|
|
%old = alloca i16
|
|
|
|
store i16 31, i16* %val
|
|
|
|
; CHECK: ldrex
|
|
|
|
; CHECK: cmp
|
|
|
|
; CHECK: strex
|
2013-10-25 17:30:20 +08:00
|
|
|
; CHECK-T1: blx ___sync_fetch_and_umin_2
|
2014-08-06 19:13:14 +08:00
|
|
|
; CHECK-M0: bl ___sync_fetch_and_umin_2
|
2011-12-22 02:56:22 +08:00
|
|
|
%0 = atomicrmw umin i16* %val, i16 16 monotonic
|
|
|
|
store i16 %0, i16* %old
|
|
|
|
%uneg = sub i16 0, 1
|
|
|
|
; CHECK: ldrex
|
|
|
|
; CHECK: cmp
|
|
|
|
; CHECK: strex
|
2013-10-25 17:30:20 +08:00
|
|
|
; CHECK-T1: blx ___sync_fetch_and_umin_2
|
2014-08-06 19:13:14 +08:00
|
|
|
; CHECK-M0: bl ___sync_fetch_and_umin_2
|
2011-12-22 02:56:22 +08:00
|
|
|
%1 = atomicrmw umin i16* %val, i16 %uneg monotonic
|
|
|
|
store i16 %1, i16* %old
|
|
|
|
; CHECK: ldrex
|
|
|
|
; CHECK: cmp
|
|
|
|
; CHECK: strex
|
2013-10-25 17:30:20 +08:00
|
|
|
; CHECK-T1: blx ___sync_fetch_and_umax_2
|
2014-08-06 19:13:14 +08:00
|
|
|
; CHECK-M0: bl ___sync_fetch_and_umax_2
|
2011-12-22 02:56:22 +08:00
|
|
|
%2 = atomicrmw umax i16* %val, i16 1 monotonic
|
|
|
|
store i16 %2, i16* %old
|
|
|
|
; CHECK: ldrex
|
|
|
|
; CHECK: cmp
|
|
|
|
; CHECK: strex
|
2013-10-25 17:30:20 +08:00
|
|
|
; CHECK-T1: blx ___sync_fetch_and_umax_2
|
2014-08-06 19:13:14 +08:00
|
|
|
; CHECK-M0: bl ___sync_fetch_and_umax_2
|
2011-12-22 02:56:22 +08:00
|
|
|
%3 = atomicrmw umax i16* %val, i16 0 monotonic
|
|
|
|
store i16 %3, i16* %old
|
|
|
|
ret void
|
|
|
|
}
|
|
|
|
|
|
|
|
define void @func3() nounwind {
|
|
|
|
entry:
|
|
|
|
%val = alloca i8
|
|
|
|
%old = alloca i8
|
|
|
|
store i8 31, i8* %val
|
|
|
|
; CHECK: ldrex
|
|
|
|
; CHECK: cmp
|
|
|
|
; CHECK: strex
|
2013-10-25 17:30:20 +08:00
|
|
|
; CHECK-T1: blx ___sync_fetch_and_umin_1
|
2014-08-06 19:13:14 +08:00
|
|
|
; CHECK-M0: bl ___sync_fetch_and_umin_1
|
2011-12-22 02:56:22 +08:00
|
|
|
%0 = atomicrmw umin i8* %val, i8 16 monotonic
|
|
|
|
store i8 %0, i8* %old
|
|
|
|
; CHECK: ldrex
|
|
|
|
; CHECK: cmp
|
|
|
|
; CHECK: strex
|
2013-10-25 17:30:20 +08:00
|
|
|
; CHECK-T1: blx ___sync_fetch_and_umin_1
|
2014-08-06 19:13:14 +08:00
|
|
|
; CHECK-M0: bl ___sync_fetch_and_umin_1
|
2011-12-22 02:56:22 +08:00
|
|
|
%uneg = sub i8 0, 1
|
|
|
|
%1 = atomicrmw umin i8* %val, i8 %uneg monotonic
|
|
|
|
store i8 %1, i8* %old
|
|
|
|
; CHECK: ldrex
|
|
|
|
; CHECK: cmp
|
|
|
|
; CHECK: strex
|
2013-10-25 17:30:20 +08:00
|
|
|
; CHECK-T1: blx ___sync_fetch_and_umax_1
|
2014-08-06 19:13:14 +08:00
|
|
|
; CHECK-M0: bl ___sync_fetch_and_umax_1
|
2011-12-22 02:56:22 +08:00
|
|
|
%2 = atomicrmw umax i8* %val, i8 1 monotonic
|
|
|
|
store i8 %2, i8* %old
|
|
|
|
; CHECK: ldrex
|
|
|
|
; CHECK: cmp
|
|
|
|
; CHECK: strex
|
2013-10-25 17:30:20 +08:00
|
|
|
; CHECK-T1: blx ___sync_fetch_and_umax_1
|
2014-08-06 19:13:14 +08:00
|
|
|
; CHECK-M0: bl ___sync_fetch_and_umax_1
|
2011-12-22 02:56:22 +08:00
|
|
|
%3 = atomicrmw umax i8* %val, i8 0 monotonic
|
|
|
|
store i8 %3, i8* %old
|
|
|
|
ret void
|
|
|
|
}
|
2012-08-31 10:08:34 +08:00
|
|
|
|
|
|
|
; CHECK: func4
|
|
|
|
; This function should not need to use callee-saved registers.
|
|
|
|
; rdar://problem/12203728
|
|
|
|
; CHECK-NOT: r4
|
|
|
|
define i32 @func4(i32* %p) nounwind optsize ssp {
|
|
|
|
entry:
|
|
|
|
%0 = atomicrmw add i32* %p, i32 1 monotonic
|
|
|
|
ret i32 %0
|
|
|
|
}
|
2014-04-03 21:06:54 +08:00
|
|
|
|
|
|
|
define i32 @test_cmpxchg_fail_order(i32 *%addr, i32 %desired, i32 %new) {
|
|
|
|
; CHECK-LABEL: test_cmpxchg_fail_order:
|
|
|
|
|
IR: add "cmpxchg weak" variant to support permitted failure.
This commit adds a weak variant of the cmpxchg operation, as described
in C++11. A cmpxchg instruction with this modifier is permitted to
fail to store, even if the comparison indicated it should.
As a result, cmpxchg instructions must return a flag indicating
success in addition to their original iN value loaded. Thus, for
uniformity *all* cmpxchg instructions now return "{ iN, i1 }". The
second flag is 1 when the store succeeded.
At the DAG level, a new ATOMIC_CMP_SWAP_WITH_SUCCESS node has been
added as the natural representation for the new cmpxchg instructions.
It is a strong cmpxchg.
By default this gets Expanded to the existing ATOMIC_CMP_SWAP during
Legalization, so existing backends should see no change in behaviour.
If they wish to deal with the enhanced node instead, they can call
setOperationAction on it. Beware: as a node with 2 results, it cannot
be selected from TableGen.
Currently, no use is made of the extra information provided in this
patch. Test updates are almost entirely adapting the input IR to the
new scheme.
Summary for out of tree users:
------------------------------
+ Legacy Bitcode files are upgraded during read.
+ Legacy assembly IR files will be invalid.
+ Front-ends must adapt to different type for "cmpxchg".
+ Backends should be unaffected by default.
llvm-svn: 210903
2014-06-13 22:24:07 +08:00
|
|
|
%pair = cmpxchg i32* %addr, i32 %desired, i32 %new seq_cst monotonic
|
|
|
|
%oldval = extractvalue { i32, i1 } %pair, 0
|
2014-04-03 21:06:54 +08:00
|
|
|
; CHECK: dmb ish
|
|
|
|
; CHECK: [[LOOP_BB:\.?LBB[0-9]+_1]]:
|
|
|
|
; CHECK: ldrex [[OLDVAL:r[0-9]+]], [r[[ADDR:[0-9]+]]]
|
|
|
|
; CHECK: cmp [[OLDVAL]], r1
|
|
|
|
; CHECK: bxne lr
|
|
|
|
; CHECK: strex [[SUCCESS:r[0-9]+]], r2, [r[[ADDR]]]
|
|
|
|
; CHECK: cmp [[SUCCESS]], #0
|
|
|
|
; CHECK: bne [[LOOP_BB]]
|
|
|
|
; CHECK: dmb ish
|
|
|
|
; CHECK: bx lr
|
|
|
|
|
|
|
|
ret i32 %oldval
|
|
|
|
}
|
|
|
|
|
|
|
|
define i32 @test_cmpxchg_fail_order1(i32 *%addr, i32 %desired, i32 %new) {
|
|
|
|
; CHECK-LABEL: test_cmpxchg_fail_order1:
|
|
|
|
|
IR: add "cmpxchg weak" variant to support permitted failure.
This commit adds a weak variant of the cmpxchg operation, as described
in C++11. A cmpxchg instruction with this modifier is permitted to
fail to store, even if the comparison indicated it should.
As a result, cmpxchg instructions must return a flag indicating
success in addition to their original iN value loaded. Thus, for
uniformity *all* cmpxchg instructions now return "{ iN, i1 }". The
second flag is 1 when the store succeeded.
At the DAG level, a new ATOMIC_CMP_SWAP_WITH_SUCCESS node has been
added as the natural representation for the new cmpxchg instructions.
It is a strong cmpxchg.
By default this gets Expanded to the existing ATOMIC_CMP_SWAP during
Legalization, so existing backends should see no change in behaviour.
If they wish to deal with the enhanced node instead, they can call
setOperationAction on it. Beware: as a node with 2 results, it cannot
be selected from TableGen.
Currently, no use is made of the extra information provided in this
patch. Test updates are almost entirely adapting the input IR to the
new scheme.
Summary for out of tree users:
------------------------------
+ Legacy Bitcode files are upgraded during read.
+ Legacy assembly IR files will be invalid.
+ Front-ends must adapt to different type for "cmpxchg".
+ Backends should be unaffected by default.
llvm-svn: 210903
2014-06-13 22:24:07 +08:00
|
|
|
%pair = cmpxchg i32* %addr, i32 %desired, i32 %new acquire acquire
|
|
|
|
%oldval = extractvalue { i32, i1 } %pair, 0
|
2014-04-03 21:06:54 +08:00
|
|
|
; CHECK-NOT: dmb ish
|
|
|
|
; CHECK: [[LOOP_BB:\.?LBB[0-9]+_1]]:
|
|
|
|
; CHECK: ldrex [[OLDVAL:r[0-9]+]], [r[[ADDR:[0-9]+]]]
|
|
|
|
; CHECK: cmp [[OLDVAL]], r1
|
|
|
|
; CHECK: bne [[END_BB:\.?LBB[0-9]+_[0-9]+]]
|
|
|
|
; CHECK: strex [[SUCCESS:r[0-9]+]], r2, [r[[ADDR]]]
|
|
|
|
; CHECK: cmp [[SUCCESS]], #0
|
|
|
|
; CHECK: bne [[LOOP_BB]]
|
|
|
|
; CHECK: [[END_BB]]:
|
|
|
|
; CHECK: dmb ish
|
|
|
|
; CHECK: bx lr
|
|
|
|
|
|
|
|
ret i32 %oldval
|
|
|
|
}
|