Use C++11 atomics for ticket locks implementation

This patch replaces use of compiler builtin atomics with
C++11 atomics for ticket locks implementation. Ticket locks
are used in critical places of the runtime, e.g. in the tasking
mechanism.

The main reason this change was introduced is the problem
with work stealing function on ARM architecture which suffered
from nasty race condition. It turned out that the root cause of
the problem lies in the way ticket locks are implemented. Changing
compiler builtins into C++11 atomics solves the problem.

Two assertions were added into kmp_tasking.c which are useful
for detecting early symptoms of something wrong going on with
work stealing, which were among the possible outcomes of the
race condition.

Differential Revision: http://reviews.llvm.org/D19878

llvm-svn: 271324
This commit is contained in:
Paul Osmialowski 2016-05-31 20:20:32 +00:00
parent 42b0c352c1
commit f7cc6affdb
6 changed files with 139 additions and 82 deletions

View File

@ -3075,6 +3075,7 @@ extern kmp_uint32 __kmp_lt_4( kmp_uint32 value, kmp_uint32 checker );
extern kmp_uint32 __kmp_ge_4( kmp_uint32 value, kmp_uint32 checker );
extern kmp_uint32 __kmp_le_4( kmp_uint32 value, kmp_uint32 checker );
extern kmp_uint32 __kmp_wait_yield_4( kmp_uint32 volatile * spinner, kmp_uint32 checker, kmp_uint32 (*pred) (kmp_uint32, kmp_uint32), void * obj );
extern void __kmp_wait_yield_4_ptr( void * spinner, kmp_uint32 checker, kmp_uint32 (* pred)( void *, kmp_uint32 ), void * obj );
class kmp_flag_32;
class kmp_flag_64;

View File

@ -2570,6 +2570,32 @@ __kmp_wait_yield_4(volatile kmp_uint32 * spinner,
return r;
}
void
__kmp_wait_yield_4_ptr(void *spinner,
kmp_uint32 checker,
kmp_uint32 (*pred)( void *, kmp_uint32 ),
void *obj // Higher-level synchronization object, or NULL.
)
{
// note: we may not belong to a team at this point
register void *spin = spinner;
register kmp_uint32 check = checker;
register kmp_uint32 spins;
register kmp_uint32 (*f) ( void *, kmp_uint32 ) = pred;
KMP_FSYNC_SPIN_INIT( obj, spin );
KMP_INIT_YIELD( spins );
// main wait spin loop
while ( !f( spin, check ) ) {
KMP_FSYNC_SPIN_PREPARE( obj );
/* if we have waited a bit, or are oversubscribed, yield */
/* pause is in the following code */
KMP_YIELD( TCR_4( __kmp_nth ) > __kmp_avail_proc );
KMP_YIELD_SPIN( spins );
}
KMP_FSYNC_SPIN_ACQUIRED( obj );
}
} // extern "C"
#ifdef KMP_GOMP_COMPAT

View File

@ -14,6 +14,7 @@
#include <stddef.h>
#include <atomic>
#include "kmp.h"
#include "kmp_itt.h"
@ -719,47 +720,36 @@ __kmp_destroy_nested_futex_lock_with_checks( kmp_futex_lock_t *lck )
static kmp_int32
__kmp_get_ticket_lock_owner( kmp_ticket_lock_t *lck )
{
return TCR_4( lck->lk.owner_id ) - 1;
return std::atomic_load_explicit( &lck->lk.owner_id, std::memory_order_relaxed ) - 1;
}
static inline bool
__kmp_is_ticket_lock_nestable( kmp_ticket_lock_t *lck )
{
return lck->lk.depth_locked != -1;
return std::atomic_load_explicit( &lck->lk.depth_locked, std::memory_order_relaxed ) != -1;
}
static kmp_uint32
__kmp_bakery_check(kmp_uint32 value, kmp_uint32 checker)
__kmp_bakery_check( void *now_serving, kmp_uint32 my_ticket )
{
register kmp_uint32 pause;
if (value == checker) {
return TRUE;
}
for (pause = checker - value; pause != 0; --pause);
return FALSE;
return std::atomic_load_explicit( (std::atomic<unsigned> *)now_serving, std::memory_order_acquire ) == my_ticket;
}
__forceinline static int
__kmp_acquire_ticket_lock_timed_template( kmp_ticket_lock_t *lck, kmp_int32 gtid )
{
kmp_uint32 my_ticket;
KMP_MB();
my_ticket = KMP_TEST_THEN_INC32( (kmp_int32 *) &lck->lk.next_ticket );
kmp_uint32 my_ticket = std::atomic_fetch_add_explicit( &lck->lk.next_ticket, 1U, std::memory_order_relaxed );
#ifdef USE_LOCK_PROFILE
if ( TCR_4( lck->lk.now_serving ) != my_ticket )
if ( std::atomic_load_explicit( &lck->lk.now_serving, std::memory_order_relaxed ) != my_ticket )
__kmp_printf( "LOCK CONTENTION: %p\n", lck );
/* else __kmp_printf( "." );*/
#endif /* USE_LOCK_PROFILE */
if ( TCR_4( lck->lk.now_serving ) == my_ticket ) {
KMP_FSYNC_ACQUIRED(lck);
if ( std::atomic_load_explicit( &lck->lk.now_serving, std::memory_order_acquire ) == my_ticket ) {
return KMP_LOCK_ACQUIRED_FIRST;
}
KMP_WAIT_YIELD( &lck->lk.now_serving, my_ticket, __kmp_bakery_check, lck );
KMP_FSYNC_ACQUIRED(lck);
KMP_WAIT_YIELD_PTR( &lck->lk.now_serving, my_ticket, __kmp_bakery_check, lck );
return KMP_LOCK_ACQUIRED_FIRST;
}
@ -773,7 +763,11 @@ static int
__kmp_acquire_ticket_lock_with_checks( kmp_ticket_lock_t *lck, kmp_int32 gtid )
{
char const * const func = "omp_set_lock";
if ( lck->lk.initialized != lck ) {
if ( ! std::atomic_load_explicit( &lck->lk.initialized, std::memory_order_relaxed ) ) {
KMP_FATAL( LockIsUninitialized, func );
}
if ( lck->lk.self != lck ) {
KMP_FATAL( LockIsUninitialized, func );
}
if ( __kmp_is_ticket_lock_nestable( lck ) ) {
@ -785,19 +779,19 @@ __kmp_acquire_ticket_lock_with_checks( kmp_ticket_lock_t *lck, kmp_int32 gtid )
__kmp_acquire_ticket_lock( lck, gtid );
lck->lk.owner_id = gtid + 1;
std::atomic_store_explicit( &lck->lk.owner_id, gtid + 1, std::memory_order_relaxed );
return KMP_LOCK_ACQUIRED_FIRST;
}
int
__kmp_test_ticket_lock( kmp_ticket_lock_t *lck, kmp_int32 gtid )
{
kmp_uint32 my_ticket = TCR_4( lck->lk.next_ticket );
if ( TCR_4( lck->lk.now_serving ) == my_ticket ) {
kmp_uint32 my_ticket = std::atomic_load_explicit( &lck->lk.next_ticket, std::memory_order_relaxed );
if ( std::atomic_load_explicit( &lck->lk.now_serving, std::memory_order_relaxed ) == my_ticket ) {
kmp_uint32 next_ticket = my_ticket + 1;
if ( KMP_COMPARE_AND_STORE_ACQ32( (kmp_int32 *) &lck->lk.next_ticket,
my_ticket, next_ticket ) ) {
KMP_FSYNC_ACQUIRED( lck );
if ( std::atomic_compare_exchange_strong_explicit( &lck->lk.next_ticket,
&my_ticket, next_ticket, std::memory_order_acquire, std::memory_order_acquire )) {
return TRUE;
}
}
@ -808,7 +802,11 @@ static int
__kmp_test_ticket_lock_with_checks( kmp_ticket_lock_t *lck, kmp_int32 gtid )
{
char const * const func = "omp_test_lock";
if ( lck->lk.initialized != lck ) {
if ( ! std::atomic_load_explicit( &lck->lk.initialized, std::memory_order_relaxed ) ) {
KMP_FATAL( LockIsUninitialized, func );
}
if ( lck->lk.self != lck ) {
KMP_FATAL( LockIsUninitialized, func );
}
if ( __kmp_is_ticket_lock_nestable( lck ) ) {
@ -818,7 +816,7 @@ __kmp_test_ticket_lock_with_checks( kmp_ticket_lock_t *lck, kmp_int32 gtid )
int retval = __kmp_test_ticket_lock( lck, gtid );
if ( retval ) {
lck->lk.owner_id = gtid + 1;
std::atomic_store_explicit( &lck->lk.owner_id, gtid + 1, std::memory_order_relaxed );
}
return retval;
}
@ -826,16 +824,9 @@ __kmp_test_ticket_lock_with_checks( kmp_ticket_lock_t *lck, kmp_int32 gtid )
int
__kmp_release_ticket_lock( kmp_ticket_lock_t *lck, kmp_int32 gtid )
{
kmp_uint32 distance;
kmp_uint32 distance = std::atomic_load_explicit( &lck->lk.next_ticket, std::memory_order_relaxed ) - std::atomic_load_explicit( &lck->lk.now_serving, std::memory_order_relaxed );
KMP_MB(); /* Flush all pending memory write invalidates. */
KMP_FSYNC_RELEASING(lck);
distance = ( TCR_4( lck->lk.next_ticket ) - TCR_4( lck->lk.now_serving ) );
KMP_ST_REL32( &(lck->lk.now_serving), lck->lk.now_serving + 1 );
KMP_MB(); /* Flush all pending memory write invalidates. */
std::atomic_fetch_add_explicit( &lck->lk.now_serving, 1U, std::memory_order_release );
KMP_YIELD( distance
> (kmp_uint32) (__kmp_avail_proc ? __kmp_avail_proc : __kmp_xproc) );
@ -846,8 +837,11 @@ static int
__kmp_release_ticket_lock_with_checks( kmp_ticket_lock_t *lck, kmp_int32 gtid )
{
char const * const func = "omp_unset_lock";
KMP_MB(); /* in case another processor initialized lock */
if ( lck->lk.initialized != lck ) {
if ( ! std::atomic_load_explicit( &lck->lk.initialized, std::memory_order_relaxed ) ) {
KMP_FATAL( LockIsUninitialized, func );
}
if ( lck->lk.self != lck ) {
KMP_FATAL( LockIsUninitialized, func );
}
if ( __kmp_is_ticket_lock_nestable( lck ) ) {
@ -860,7 +854,7 @@ __kmp_release_ticket_lock_with_checks( kmp_ticket_lock_t *lck, kmp_int32 gtid )
&& ( __kmp_get_ticket_lock_owner( lck ) != gtid ) ) {
KMP_FATAL( LockUnsettingSetByAnother, func );
}
lck->lk.owner_id = 0;
std::atomic_store_explicit( &lck->lk.owner_id, 0, std::memory_order_relaxed );
return __kmp_release_ticket_lock( lck, gtid );
}
@ -868,11 +862,12 @@ void
__kmp_init_ticket_lock( kmp_ticket_lock_t * lck )
{
lck->lk.location = NULL;
TCW_4( lck->lk.next_ticket, 0 );
TCW_4( lck->lk.now_serving, 0 );
lck->lk.owner_id = 0; // no thread owns the lock.
lck->lk.depth_locked = -1; // -1 => not a nested lock.
lck->lk.initialized = (kmp_ticket_lock *)lck;
lck->lk.self = lck;
std::atomic_store_explicit( &lck->lk.next_ticket, 0U, std::memory_order_relaxed );
std::atomic_store_explicit( &lck->lk.now_serving, 0U, std::memory_order_relaxed );
std::atomic_store_explicit( &lck->lk.owner_id, 0, std::memory_order_relaxed ); // no thread owns the lock.
std::atomic_store_explicit( &lck->lk.depth_locked, -1, std::memory_order_relaxed ); // -1 => not a nested lock.
std::atomic_store_explicit( &lck->lk.initialized, true, std::memory_order_release );
}
static void
@ -884,19 +879,24 @@ __kmp_init_ticket_lock_with_checks( kmp_ticket_lock_t * lck )
void
__kmp_destroy_ticket_lock( kmp_ticket_lock_t *lck )
{
lck->lk.initialized = NULL;
lck->lk.location = NULL;
lck->lk.next_ticket = 0;
lck->lk.now_serving = 0;
lck->lk.owner_id = 0;
lck->lk.depth_locked = -1;
std::atomic_store_explicit( &lck->lk.initialized, false, std::memory_order_release );
lck->lk.self = NULL;
lck->lk.location = NULL;
std::atomic_store_explicit( &lck->lk.next_ticket, 0U, std::memory_order_relaxed );
std::atomic_store_explicit( &lck->lk.now_serving, 0U, std::memory_order_relaxed );
std::atomic_store_explicit( &lck->lk.owner_id, 0, std::memory_order_relaxed );
std::atomic_store_explicit( &lck->lk.depth_locked, -1, std::memory_order_relaxed );
}
static void
__kmp_destroy_ticket_lock_with_checks( kmp_ticket_lock_t *lck )
{
char const * const func = "omp_destroy_lock";
if ( lck->lk.initialized != lck ) {
if ( ! std::atomic_load_explicit( &lck->lk.initialized, std::memory_order_relaxed ) ) {
KMP_FATAL( LockIsUninitialized, func );
}
if ( lck->lk.self != lck ) {
KMP_FATAL( LockIsUninitialized, func );
}
if ( __kmp_is_ticket_lock_nestable( lck ) ) {
@ -919,15 +919,13 @@ __kmp_acquire_nested_ticket_lock( kmp_ticket_lock_t *lck, kmp_int32 gtid )
KMP_DEBUG_ASSERT( gtid >= 0 );
if ( __kmp_get_ticket_lock_owner( lck ) == gtid ) {
lck->lk.depth_locked += 1;
std::atomic_fetch_add_explicit( &lck->lk.depth_locked, 1, std::memory_order_relaxed );
return KMP_LOCK_ACQUIRED_NEXT;
}
else {
__kmp_acquire_ticket_lock_timed_template( lck, gtid );
KMP_MB();
lck->lk.depth_locked = 1;
KMP_MB();
lck->lk.owner_id = gtid + 1;
std::atomic_store_explicit( &lck->lk.depth_locked, 1, std::memory_order_relaxed );
std::atomic_store_explicit( &lck->lk.owner_id, gtid + 1, std::memory_order_relaxed );
return KMP_LOCK_ACQUIRED_FIRST;
}
}
@ -936,7 +934,11 @@ static int
__kmp_acquire_nested_ticket_lock_with_checks( kmp_ticket_lock_t *lck, kmp_int32 gtid )
{
char const * const func = "omp_set_nest_lock";
if ( lck->lk.initialized != lck ) {
if ( ! std::atomic_load_explicit( &lck->lk.initialized, std::memory_order_relaxed ) ) {
KMP_FATAL( LockIsUninitialized, func );
}
if ( lck->lk.self != lck ) {
KMP_FATAL( LockIsUninitialized, func );
}
if ( ! __kmp_is_ticket_lock_nestable( lck ) ) {
@ -953,16 +955,15 @@ __kmp_test_nested_ticket_lock( kmp_ticket_lock_t *lck, kmp_int32 gtid )
KMP_DEBUG_ASSERT( gtid >= 0 );
if ( __kmp_get_ticket_lock_owner( lck ) == gtid ) {
retval = ++lck->lk.depth_locked;
retval = std::atomic_fetch_add_explicit( &lck->lk.depth_locked, 1, std::memory_order_relaxed ) + 1;
}
else if ( !__kmp_test_ticket_lock( lck, gtid ) ) {
retval = 0;
}
else {
KMP_MB();
retval = lck->lk.depth_locked = 1;
KMP_MB();
lck->lk.owner_id = gtid + 1;
std::atomic_store_explicit( &lck->lk.depth_locked, 1, std::memory_order_relaxed );
std::atomic_store_explicit( &lck->lk.owner_id, gtid + 1, std::memory_order_relaxed );
retval = 1;
}
return retval;
}
@ -972,7 +973,11 @@ __kmp_test_nested_ticket_lock_with_checks( kmp_ticket_lock_t *lck,
kmp_int32 gtid )
{
char const * const func = "omp_test_nest_lock";
if ( lck->lk.initialized != lck ) {
if ( ! std::atomic_load_explicit( &lck->lk.initialized, std::memory_order_relaxed ) ) {
KMP_FATAL( LockIsUninitialized, func );
}
if ( lck->lk.self != lck ) {
KMP_FATAL( LockIsUninitialized, func );
}
if ( ! __kmp_is_ticket_lock_nestable( lck ) ) {
@ -986,10 +991,8 @@ __kmp_release_nested_ticket_lock( kmp_ticket_lock_t *lck, kmp_int32 gtid )
{
KMP_DEBUG_ASSERT( gtid >= 0 );
KMP_MB();
if ( --(lck->lk.depth_locked) == 0 ) {
KMP_MB();
lck->lk.owner_id = 0;
if ( ( std::atomic_fetch_add_explicit( &lck->lk.depth_locked, -1, std::memory_order_relaxed ) - 1 ) == 0 ) {
std::atomic_store_explicit( &lck->lk.owner_id, 0, std::memory_order_relaxed );
__kmp_release_ticket_lock( lck, gtid );
return KMP_LOCK_RELEASED;
}
@ -1000,8 +1003,11 @@ static int
__kmp_release_nested_ticket_lock_with_checks( kmp_ticket_lock_t *lck, kmp_int32 gtid )
{
char const * const func = "omp_unset_nest_lock";
KMP_MB(); /* in case another processor initialized lock */
if ( lck->lk.initialized != lck ) {
if ( ! std::atomic_load_explicit( &lck->lk.initialized, std::memory_order_relaxed ) ) {
KMP_FATAL( LockIsUninitialized, func );
}
if ( lck->lk.self != lck ) {
KMP_FATAL( LockIsUninitialized, func );
}
if ( ! __kmp_is_ticket_lock_nestable( lck ) ) {
@ -1020,7 +1026,7 @@ void
__kmp_init_nested_ticket_lock( kmp_ticket_lock_t * lck )
{
__kmp_init_ticket_lock( lck );
lck->lk.depth_locked = 0; // >= 0 for nestable locks, -1 for simple locks
std::atomic_store_explicit( &lck->lk.depth_locked, 0, std::memory_order_relaxed ); // >= 0 for nestable locks, -1 for simple locks
}
static void
@ -1033,14 +1039,18 @@ void
__kmp_destroy_nested_ticket_lock( kmp_ticket_lock_t *lck )
{
__kmp_destroy_ticket_lock( lck );
lck->lk.depth_locked = 0;
std::atomic_store_explicit( &lck->lk.depth_locked, 0, std::memory_order_relaxed );
}
static void
__kmp_destroy_nested_ticket_lock_with_checks( kmp_ticket_lock_t *lck )
{
char const * const func = "omp_destroy_nest_lock";
if ( lck->lk.initialized != lck ) {
if ( ! std::atomic_load_explicit( &lck->lk.initialized, std::memory_order_relaxed ) ) {
KMP_FATAL( LockIsUninitialized, func );
}
if ( lck->lk.self != lck ) {
KMP_FATAL( LockIsUninitialized, func );
}
if ( ! __kmp_is_ticket_lock_nestable( lck ) ) {
@ -1060,7 +1070,7 @@ __kmp_destroy_nested_ticket_lock_with_checks( kmp_ticket_lock_t *lck )
static int
__kmp_is_ticket_lock_initialized( kmp_ticket_lock_t *lck )
{
return lck == lck->lk.initialized;
return std::atomic_load_explicit( &lck->lk.initialized, std::memory_order_relaxed ) && ( lck->lk.self == lck);
}
static const ident_t *

View File

@ -23,6 +23,8 @@
#include "kmp_debug.h"
#ifdef __cplusplus
#include <atomic>
extern "C" {
#endif // __cplusplus
@ -233,17 +235,26 @@ extern void __kmp_destroy_nested_futex_lock( kmp_futex_lock_t *lck );
// Ticket locks.
// ----------------------------------------------------------------------------
#ifdef __cplusplus
struct kmp_base_ticket_lock {
// `initialized' must be the first entry in the lock data structure!
volatile union kmp_ticket_lock * initialized; // points to the lock union if in initialized state
ident_t const * location; // Source code location of omp_init_lock().
volatile kmp_uint32 next_ticket; // ticket number to give to next thread which acquires
volatile kmp_uint32 now_serving; // ticket number for thread which holds the lock
volatile kmp_int32 owner_id; // (gtid+1) of owning thread, 0 if unlocked
kmp_int32 depth_locked; // depth locked, for nested locks only
kmp_lock_flags_t flags; // lock specifics, e.g. critical section lock
std::atomic<bool> initialized;
volatile union kmp_ticket_lock *self; // points to the lock union
ident_t const * location; // Source code location of omp_init_lock().
std::atomic<unsigned> next_ticket; // ticket number to give to next thread which acquires
std::atomic<unsigned> now_serving; // ticket number for thread which holds the lock
std::atomic<int> owner_id; // (gtid+1) of owning thread, 0 if unlocked
std::atomic<int> depth_locked; // depth locked, for nested locks only
kmp_lock_flags_t flags; // lock specifics, e.g. critical section lock
};
#else // __cplusplus
struct kmp_base_ticket_lock;
#endif // !__cplusplus
typedef struct kmp_base_ticket_lock kmp_base_ticket_lock_t;
union KMP_ALIGN_CACHE kmp_ticket_lock {
@ -260,7 +271,13 @@ typedef union kmp_ticket_lock kmp_ticket_lock_t;
// kmp_ticket_lock_t xlock = KMP_TICKET_LOCK_INITIALIZER( xlock );
// Note the macro argument. It is important to make var properly initialized.
//
#define KMP_TICKET_LOCK_INITIALIZER( lock ) { { (kmp_ticket_lock_t *) & (lock), NULL, 0, 0, 0, -1 } }
#define KMP_TICKET_LOCK_INITIALIZER( lock ) { { ATOMIC_VAR_INIT(true), \
&(lock), \
NULL, \
ATOMIC_VAR_INIT(0U), \
ATOMIC_VAR_INIT(0U), \
ATOMIC_VAR_INIT(0), \
ATOMIC_VAR_INIT(-1) } }
extern int __kmp_acquire_ticket_lock( kmp_ticket_lock_t *lck, kmp_int32 gtid );
extern int __kmp_test_ticket_lock( kmp_ticket_lock_t *lck, kmp_int32 gtid );

View File

@ -650,6 +650,7 @@ typedef void (*microtask_t)( int *gtid, int *npr, ... );
#endif
#define KMP_WAIT_YIELD __kmp_wait_yield_4
#define KMP_WAIT_YIELD_PTR __kmp_wait_yield_4_ptr
#define KMP_EQ __kmp_eq_4
#define KMP_NEQ __kmp_neq_4
#define KMP_LT __kmp_lt_4

View File

@ -1734,12 +1734,14 @@ __kmp_steal_task( kmp_info_t *victim, kmp_int32 gtid, kmp_task_team_t *task_team
if ( !is_constrained ) {
taskdata = victim_td -> td.td_deque[ victim_td -> td.td_deque_head ];
KMP_ASSERT(taskdata);
// Bump head pointer and Wrap.
victim_td -> td.td_deque_head = ( victim_td -> td.td_deque_head + 1 ) & TASK_DEQUE_MASK(victim_td->td);
} else {
// While we have postponed tasks let's steal from tail of the deque (smaller tasks)
kmp_int32 tail = ( victim_td -> td.td_deque_tail - 1 ) & TASK_DEQUE_MASK(victim_td->td); // Wrap index.
taskdata = victim_td -> td.td_deque[ tail ];
KMP_ASSERT(taskdata);
// we need to check if the candidate obeys task scheduling constraint:
// only child of current task can be scheduled
kmp_taskdata_t * current = __kmp_threads[ gtid ]->th.th_current_task;