forked from OSchip/llvm-project
This is a preparation patch to get information about the actual release status from nested locks for OMPT.
llvm-svn: 236114
This commit is contained in:
parent
ba742f58af
commit
8d09facaf2
|
@ -171,7 +171,7 @@ __kmp_test_tas_lock_with_checks( kmp_tas_lock_t *lck, kmp_int32 gtid )
|
|||
return __kmp_test_tas_lock( lck, gtid );
|
||||
}
|
||||
|
||||
void
|
||||
int
|
||||
__kmp_release_tas_lock( kmp_tas_lock_t *lck, kmp_int32 gtid )
|
||||
{
|
||||
KMP_MB(); /* Flush all pending memory write invalidates. */
|
||||
|
@ -182,9 +182,10 @@ __kmp_release_tas_lock( kmp_tas_lock_t *lck, kmp_int32 gtid )
|
|||
|
||||
KMP_YIELD( TCR_4( __kmp_nth ) > ( __kmp_avail_proc ? __kmp_avail_proc :
|
||||
__kmp_xproc ) );
|
||||
return KMP_LOCK_RELEASED;
|
||||
}
|
||||
|
||||
static void
|
||||
static int
|
||||
__kmp_release_tas_lock_with_checks( kmp_tas_lock_t *lck, kmp_int32 gtid )
|
||||
{
|
||||
char const * const func = "omp_unset_lock";
|
||||
|
@ -200,7 +201,7 @@ __kmp_release_tas_lock_with_checks( kmp_tas_lock_t *lck, kmp_int32 gtid )
|
|||
&& ( __kmp_get_tas_lock_owner( lck ) != gtid ) ) {
|
||||
KMP_FATAL( LockUnsettingSetByAnother, func );
|
||||
}
|
||||
__kmp_release_tas_lock( lck, gtid );
|
||||
return __kmp_release_tas_lock( lck, gtid );
|
||||
}
|
||||
|
||||
void
|
||||
|
@ -294,7 +295,7 @@ __kmp_test_nested_tas_lock_with_checks( kmp_tas_lock_t *lck, kmp_int32 gtid )
|
|||
return __kmp_test_nested_tas_lock( lck, gtid );
|
||||
}
|
||||
|
||||
void
|
||||
int
|
||||
__kmp_release_nested_tas_lock( kmp_tas_lock_t *lck, kmp_int32 gtid )
|
||||
{
|
||||
KMP_DEBUG_ASSERT( gtid >= 0 );
|
||||
|
@ -302,10 +303,12 @@ __kmp_release_nested_tas_lock( kmp_tas_lock_t *lck, kmp_int32 gtid )
|
|||
KMP_MB();
|
||||
if ( --(lck->lk.depth_locked) == 0 ) {
|
||||
__kmp_release_tas_lock( lck, gtid );
|
||||
return KMP_LOCK_RELEASED;
|
||||
}
|
||||
return KMP_LOCK_STILL_HELD;
|
||||
}
|
||||
|
||||
static void
|
||||
static int
|
||||
__kmp_release_nested_tas_lock_with_checks( kmp_tas_lock_t *lck, kmp_int32 gtid )
|
||||
{
|
||||
char const * const func = "omp_unset_nest_lock";
|
||||
|
@ -319,7 +322,7 @@ __kmp_release_nested_tas_lock_with_checks( kmp_tas_lock_t *lck, kmp_int32 gtid )
|
|||
if ( __kmp_get_tas_lock_owner( lck ) != gtid ) {
|
||||
KMP_FATAL( LockUnsettingSetByAnother, func );
|
||||
}
|
||||
__kmp_release_nested_tas_lock( lck, gtid );
|
||||
return __kmp_release_nested_tas_lock( lck, gtid );
|
||||
}
|
||||
|
||||
void
|
||||
|
@ -497,7 +500,7 @@ __kmp_test_futex_lock_with_checks( kmp_futex_lock_t *lck, kmp_int32 gtid )
|
|||
return __kmp_test_futex_lock( lck, gtid );
|
||||
}
|
||||
|
||||
void
|
||||
int
|
||||
__kmp_release_futex_lock( kmp_futex_lock_t *lck, kmp_int32 gtid )
|
||||
{
|
||||
KMP_MB(); /* Flush all pending memory write invalidates. */
|
||||
|
@ -525,9 +528,10 @@ __kmp_release_futex_lock( kmp_futex_lock_t *lck, kmp_int32 gtid )
|
|||
|
||||
KMP_YIELD( TCR_4( __kmp_nth ) > ( __kmp_avail_proc ? __kmp_avail_proc :
|
||||
__kmp_xproc ) );
|
||||
return KMP_LOCK_RELEASED;
|
||||
}
|
||||
|
||||
static void
|
||||
static int
|
||||
__kmp_release_futex_lock_with_checks( kmp_futex_lock_t *lck, kmp_int32 gtid )
|
||||
{
|
||||
char const * const func = "omp_unset_lock";
|
||||
|
@ -543,7 +547,7 @@ __kmp_release_futex_lock_with_checks( kmp_futex_lock_t *lck, kmp_int32 gtid )
|
|||
&& ( __kmp_get_futex_lock_owner( lck ) != gtid ) ) {
|
||||
KMP_FATAL( LockUnsettingSetByAnother, func );
|
||||
}
|
||||
__kmp_release_futex_lock( lck, gtid );
|
||||
return __kmp_release_futex_lock( lck, gtid );
|
||||
}
|
||||
|
||||
void
|
||||
|
@ -637,7 +641,7 @@ __kmp_test_nested_futex_lock_with_checks( kmp_futex_lock_t *lck, kmp_int32 gtid
|
|||
return __kmp_test_nested_futex_lock( lck, gtid );
|
||||
}
|
||||
|
||||
void
|
||||
int
|
||||
__kmp_release_nested_futex_lock( kmp_futex_lock_t *lck, kmp_int32 gtid )
|
||||
{
|
||||
KMP_DEBUG_ASSERT( gtid >= 0 );
|
||||
|
@ -645,10 +649,12 @@ __kmp_release_nested_futex_lock( kmp_futex_lock_t *lck, kmp_int32 gtid )
|
|||
KMP_MB();
|
||||
if ( --(lck->lk.depth_locked) == 0 ) {
|
||||
__kmp_release_futex_lock( lck, gtid );
|
||||
return KMP_LOCK_RELEASED;
|
||||
}
|
||||
return KMP_LOCK_STILL_HELD;
|
||||
}
|
||||
|
||||
static void
|
||||
static int
|
||||
__kmp_release_nested_futex_lock_with_checks( kmp_futex_lock_t *lck, kmp_int32 gtid )
|
||||
{
|
||||
char const * const func = "omp_unset_nest_lock";
|
||||
|
@ -662,7 +668,7 @@ __kmp_release_nested_futex_lock_with_checks( kmp_futex_lock_t *lck, kmp_int32 gt
|
|||
if ( __kmp_get_futex_lock_owner( lck ) != gtid ) {
|
||||
KMP_FATAL( LockUnsettingSetByAnother, func );
|
||||
}
|
||||
__kmp_release_nested_futex_lock( lck, gtid );
|
||||
return __kmp_release_nested_futex_lock( lck, gtid );
|
||||
}
|
||||
|
||||
void
|
||||
|
@ -809,7 +815,7 @@ __kmp_test_ticket_lock_with_checks( kmp_ticket_lock_t *lck, kmp_int32 gtid )
|
|||
return retval;
|
||||
}
|
||||
|
||||
void
|
||||
int
|
||||
__kmp_release_ticket_lock( kmp_ticket_lock_t *lck, kmp_int32 gtid )
|
||||
{
|
||||
kmp_uint32 distance;
|
||||
|
@ -825,9 +831,10 @@ __kmp_release_ticket_lock( kmp_ticket_lock_t *lck, kmp_int32 gtid )
|
|||
|
||||
KMP_YIELD( distance
|
||||
> (kmp_uint32) (__kmp_avail_proc ? __kmp_avail_proc : __kmp_xproc) );
|
||||
return KMP_LOCK_RELEASED;
|
||||
}
|
||||
|
||||
static void
|
||||
static int
|
||||
__kmp_release_ticket_lock_with_checks( kmp_ticket_lock_t *lck, kmp_int32 gtid )
|
||||
{
|
||||
char const * const func = "omp_unset_lock";
|
||||
|
@ -846,7 +853,7 @@ __kmp_release_ticket_lock_with_checks( kmp_ticket_lock_t *lck, kmp_int32 gtid )
|
|||
KMP_FATAL( LockUnsettingSetByAnother, func );
|
||||
}
|
||||
lck->lk.owner_id = 0;
|
||||
__kmp_release_ticket_lock( lck, gtid );
|
||||
return __kmp_release_ticket_lock( lck, gtid );
|
||||
}
|
||||
|
||||
void
|
||||
|
@ -964,7 +971,7 @@ __kmp_test_nested_ticket_lock_with_checks( kmp_ticket_lock_t *lck,
|
|||
return __kmp_test_nested_ticket_lock( lck, gtid );
|
||||
}
|
||||
|
||||
void
|
||||
int
|
||||
__kmp_release_nested_ticket_lock( kmp_ticket_lock_t *lck, kmp_int32 gtid )
|
||||
{
|
||||
KMP_DEBUG_ASSERT( gtid >= 0 );
|
||||
|
@ -974,10 +981,12 @@ __kmp_release_nested_ticket_lock( kmp_ticket_lock_t *lck, kmp_int32 gtid )
|
|||
KMP_MB();
|
||||
lck->lk.owner_id = 0;
|
||||
__kmp_release_ticket_lock( lck, gtid );
|
||||
return KMP_LOCK_RELEASED;
|
||||
}
|
||||
return KMP_LOCK_STILL_HELD;
|
||||
}
|
||||
|
||||
static void
|
||||
static int
|
||||
__kmp_release_nested_ticket_lock_with_checks( kmp_ticket_lock_t *lck, kmp_int32 gtid )
|
||||
{
|
||||
char const * const func = "omp_unset_nest_lock";
|
||||
|
@ -994,7 +1003,7 @@ __kmp_release_nested_ticket_lock_with_checks( kmp_ticket_lock_t *lck, kmp_int32
|
|||
if ( __kmp_get_ticket_lock_owner( lck ) != gtid ) {
|
||||
KMP_FATAL( LockUnsettingSetByAnother, func );
|
||||
}
|
||||
__kmp_release_nested_ticket_lock( lck, gtid );
|
||||
return __kmp_release_nested_ticket_lock( lck, gtid );
|
||||
}
|
||||
|
||||
void
|
||||
|
@ -1435,7 +1444,7 @@ __kmp_test_queuing_lock_with_checks( kmp_queuing_lock_t *lck, kmp_int32 gtid )
|
|||
return retval;
|
||||
}
|
||||
|
||||
void
|
||||
int
|
||||
__kmp_release_queuing_lock( kmp_queuing_lock_t *lck, kmp_int32 gtid )
|
||||
{
|
||||
register kmp_info_t *this_thr;
|
||||
|
@ -1482,7 +1491,7 @@ __kmp_release_queuing_lock( kmp_queuing_lock_t *lck, kmp_int32 gtid )
|
|||
#ifdef DEBUG_QUEUING_LOCKS
|
||||
TRACE_LOCK_HT( gtid+1, "rel exit: ", 0, 0 );
|
||||
#endif
|
||||
return;
|
||||
return KMP_LOCK_RELEASED;
|
||||
}
|
||||
dequeued = FALSE;
|
||||
|
||||
|
@ -1556,7 +1565,7 @@ __kmp_release_queuing_lock( kmp_queuing_lock_t *lck, kmp_int32 gtid )
|
|||
#ifdef DEBUG_QUEUING_LOCKS
|
||||
TRACE_LOCK( gtid+1, "rel exit 2" );
|
||||
#endif
|
||||
return;
|
||||
return KMP_LOCK_RELEASED;
|
||||
}
|
||||
/* KMP_CPU_PAUSE( ); don't want to make releasing thread hold up acquiring threads */
|
||||
|
||||
|
@ -1566,9 +1575,10 @@ __kmp_release_queuing_lock( kmp_queuing_lock_t *lck, kmp_int32 gtid )
|
|||
|
||||
} /* while */
|
||||
KMP_ASSERT2( 0, "should not get here" );
|
||||
return KMP_LOCK_RELEASED;
|
||||
}
|
||||
|
||||
static void
|
||||
static int
|
||||
__kmp_release_queuing_lock_with_checks( kmp_queuing_lock_t *lck,
|
||||
kmp_int32 gtid )
|
||||
{
|
||||
|
@ -1587,7 +1597,7 @@ __kmp_release_queuing_lock_with_checks( kmp_queuing_lock_t *lck,
|
|||
KMP_FATAL( LockUnsettingSetByAnother, func );
|
||||
}
|
||||
lck->lk.owner_id = 0;
|
||||
__kmp_release_queuing_lock( lck, gtid );
|
||||
return __kmp_release_queuing_lock( lck, gtid );
|
||||
}
|
||||
|
||||
void
|
||||
|
@ -1711,7 +1721,7 @@ __kmp_test_nested_queuing_lock_with_checks( kmp_queuing_lock_t *lck,
|
|||
return __kmp_test_nested_queuing_lock( lck, gtid );
|
||||
}
|
||||
|
||||
void
|
||||
int
|
||||
__kmp_release_nested_queuing_lock( kmp_queuing_lock_t *lck, kmp_int32 gtid )
|
||||
{
|
||||
KMP_DEBUG_ASSERT( gtid >= 0 );
|
||||
|
@ -1721,10 +1731,12 @@ __kmp_release_nested_queuing_lock( kmp_queuing_lock_t *lck, kmp_int32 gtid )
|
|||
KMP_MB();
|
||||
lck->lk.owner_id = 0;
|
||||
__kmp_release_queuing_lock( lck, gtid );
|
||||
return KMP_LOCK_RELEASED;
|
||||
}
|
||||
return KMP_LOCK_STILL_HELD;
|
||||
}
|
||||
|
||||
static void
|
||||
static int
|
||||
__kmp_release_nested_queuing_lock_with_checks( kmp_queuing_lock_t *lck, kmp_int32 gtid )
|
||||
{
|
||||
char const * const func = "omp_unset_nest_lock";
|
||||
|
@ -1741,7 +1753,7 @@ __kmp_release_nested_queuing_lock_with_checks( kmp_queuing_lock_t *lck, kmp_int3
|
|||
if ( __kmp_get_queuing_lock_owner( lck ) != gtid ) {
|
||||
KMP_FATAL( LockUnsettingSetByAnother, func );
|
||||
}
|
||||
__kmp_release_nested_queuing_lock( lck, gtid );
|
||||
return __kmp_release_nested_queuing_lock( lck, gtid );
|
||||
}
|
||||
|
||||
void
|
||||
|
@ -2312,7 +2324,7 @@ __kmp_acquire_adaptive_lock_with_checks( kmp_adaptive_lock_t *lck, kmp_int32 gti
|
|||
lck->lk.qlk.owner_id = gtid + 1;
|
||||
}
|
||||
|
||||
static void
|
||||
static int
|
||||
__kmp_release_adaptive_lock( kmp_adaptive_lock_t *lck, kmp_int32 gtid )
|
||||
{
|
||||
if ( __kmp_is_unlocked_queuing_lock( GET_QLK_PTR(lck) ) )
|
||||
|
@ -2327,9 +2339,10 @@ __kmp_release_adaptive_lock( kmp_adaptive_lock_t *lck, kmp_int32 gtid )
|
|||
// so should use the underlying lock's release scheme.
|
||||
__kmp_release_queuing_lock( GET_QLK_PTR(lck), gtid );
|
||||
}
|
||||
return KMP_LOCK_RELEASED;
|
||||
}
|
||||
|
||||
static void
|
||||
static int
|
||||
__kmp_release_adaptive_lock_with_checks( kmp_adaptive_lock_t *lck, kmp_int32 gtid )
|
||||
{
|
||||
char const * const func = "omp_unset_lock";
|
||||
|
@ -2345,6 +2358,7 @@ __kmp_release_adaptive_lock_with_checks( kmp_adaptive_lock_t *lck, kmp_int32 gti
|
|||
}
|
||||
lck->lk.qlk.owner_id = 0;
|
||||
__kmp_release_adaptive_lock( lck, gtid );
|
||||
return KMP_LOCK_RELEASED;
|
||||
}
|
||||
|
||||
static void
|
||||
|
@ -2655,7 +2669,7 @@ __kmp_test_drdpa_lock_with_checks( kmp_drdpa_lock_t *lck, kmp_int32 gtid )
|
|||
return retval;
|
||||
}
|
||||
|
||||
void
|
||||
int
|
||||
__kmp_release_drdpa_lock( kmp_drdpa_lock_t *lck, kmp_int32 gtid )
|
||||
{
|
||||
//
|
||||
|
@ -2672,9 +2686,10 @@ __kmp_release_drdpa_lock( kmp_drdpa_lock_t *lck, kmp_int32 gtid )
|
|||
ticket - 1, lck));
|
||||
KMP_FSYNC_RELEASING(lck);
|
||||
KMP_ST_REL64(&(polls[ticket & mask].poll), ticket); // volatile store
|
||||
return KMP_LOCK_RELEASED;
|
||||
}
|
||||
|
||||
static void
|
||||
static int
|
||||
__kmp_release_drdpa_lock_with_checks( kmp_drdpa_lock_t *lck, kmp_int32 gtid )
|
||||
{
|
||||
char const * const func = "omp_unset_lock";
|
||||
|
@ -2693,7 +2708,7 @@ __kmp_release_drdpa_lock_with_checks( kmp_drdpa_lock_t *lck, kmp_int32 gtid )
|
|||
KMP_FATAL( LockUnsettingSetByAnother, func );
|
||||
}
|
||||
lck->lk.owner_id = 0;
|
||||
__kmp_release_drdpa_lock( lck, gtid );
|
||||
return __kmp_release_drdpa_lock( lck, gtid );
|
||||
}
|
||||
|
||||
void
|
||||
|
@ -2829,7 +2844,7 @@ __kmp_test_nested_drdpa_lock_with_checks( kmp_drdpa_lock_t *lck, kmp_int32 gtid
|
|||
return __kmp_test_nested_drdpa_lock( lck, gtid );
|
||||
}
|
||||
|
||||
void
|
||||
int
|
||||
__kmp_release_nested_drdpa_lock( kmp_drdpa_lock_t *lck, kmp_int32 gtid )
|
||||
{
|
||||
KMP_DEBUG_ASSERT( gtid >= 0 );
|
||||
|
@ -2839,10 +2854,12 @@ __kmp_release_nested_drdpa_lock( kmp_drdpa_lock_t *lck, kmp_int32 gtid )
|
|||
KMP_MB();
|
||||
lck->lk.owner_id = 0;
|
||||
__kmp_release_drdpa_lock( lck, gtid );
|
||||
return KMP_LOCK_RELEASED;
|
||||
}
|
||||
return KMP_LOCK_STILL_HELD;
|
||||
}
|
||||
|
||||
static void
|
||||
static int
|
||||
__kmp_release_nested_drdpa_lock_with_checks( kmp_drdpa_lock_t *lck, kmp_int32 gtid )
|
||||
{
|
||||
char const * const func = "omp_unset_nest_lock";
|
||||
|
@ -2859,7 +2876,7 @@ __kmp_release_nested_drdpa_lock_with_checks( kmp_drdpa_lock_t *lck, kmp_int32 gt
|
|||
if ( __kmp_get_drdpa_lock_owner( lck ) != gtid ) {
|
||||
KMP_FATAL( LockUnsettingSetByAnother, func );
|
||||
}
|
||||
__kmp_release_nested_drdpa_lock( lck, gtid );
|
||||
return __kmp_release_nested_drdpa_lock( lck, gtid );
|
||||
}
|
||||
|
||||
void
|
||||
|
@ -3519,14 +3536,14 @@ kmp_int32 ( *__kmp_get_user_lock_owner_ )( kmp_user_lock_p lck ) = NULL;
|
|||
void ( *__kmp_acquire_user_lock_with_checks_ )( kmp_user_lock_p lck, kmp_int32 gtid ) = NULL;
|
||||
|
||||
int ( *__kmp_test_user_lock_with_checks_ )( kmp_user_lock_p lck, kmp_int32 gtid ) = NULL;
|
||||
void ( *__kmp_release_user_lock_with_checks_ )( kmp_user_lock_p lck, kmp_int32 gtid ) = NULL;
|
||||
int ( *__kmp_release_user_lock_with_checks_ )( kmp_user_lock_p lck, kmp_int32 gtid ) = NULL;
|
||||
void ( *__kmp_init_user_lock_with_checks_ )( kmp_user_lock_p lck ) = NULL;
|
||||
void ( *__kmp_destroy_user_lock_ )( kmp_user_lock_p lck ) = NULL;
|
||||
void ( *__kmp_destroy_user_lock_with_checks_ )( kmp_user_lock_p lck ) = NULL;
|
||||
void ( *__kmp_acquire_nested_user_lock_with_checks_ )( kmp_user_lock_p lck, kmp_int32 gtid ) = NULL;
|
||||
|
||||
int ( *__kmp_test_nested_user_lock_with_checks_ )( kmp_user_lock_p lck, kmp_int32 gtid ) = NULL;
|
||||
void ( *__kmp_release_nested_user_lock_with_checks_ )( kmp_user_lock_p lck, kmp_int32 gtid ) = NULL;
|
||||
int ( *__kmp_release_nested_user_lock_with_checks_ )( kmp_user_lock_p lck, kmp_int32 gtid ) = NULL;
|
||||
void ( *__kmp_init_nested_user_lock_with_checks_ )( kmp_user_lock_p lck ) = NULL;
|
||||
void ( *__kmp_destroy_nested_user_lock_with_checks_ )( kmp_user_lock_p lck ) = NULL;
|
||||
|
||||
|
|
|
@ -161,16 +161,19 @@ typedef union kmp_tas_lock kmp_tas_lock_t;
|
|||
|
||||
extern void __kmp_acquire_tas_lock( kmp_tas_lock_t *lck, kmp_int32 gtid );
|
||||
extern int __kmp_test_tas_lock( kmp_tas_lock_t *lck, kmp_int32 gtid );
|
||||
extern void __kmp_release_tas_lock( kmp_tas_lock_t *lck, kmp_int32 gtid );
|
||||
extern int __kmp_release_tas_lock( kmp_tas_lock_t *lck, kmp_int32 gtid );
|
||||
extern void __kmp_init_tas_lock( kmp_tas_lock_t *lck );
|
||||
extern void __kmp_destroy_tas_lock( kmp_tas_lock_t *lck );
|
||||
|
||||
extern void __kmp_acquire_nested_tas_lock( kmp_tas_lock_t *lck, kmp_int32 gtid );
|
||||
extern int __kmp_test_nested_tas_lock( kmp_tas_lock_t *lck, kmp_int32 gtid );
|
||||
extern void __kmp_release_nested_tas_lock( kmp_tas_lock_t *lck, kmp_int32 gtid );
|
||||
extern int __kmp_release_nested_tas_lock( kmp_tas_lock_t *lck, kmp_int32 gtid );
|
||||
extern void __kmp_init_nested_tas_lock( kmp_tas_lock_t *lck );
|
||||
extern void __kmp_destroy_nested_tas_lock( kmp_tas_lock_t *lck );
|
||||
|
||||
#define KMP_LOCK_RELEASED 1
|
||||
#define KMP_LOCK_STILL_HELD 0
|
||||
|
||||
|
||||
#if KMP_OS_LINUX && (KMP_ARCH_X86 || KMP_ARCH_X86_64 || KMP_ARCH_ARM || KMP_ARCH_AARCH64)
|
||||
|
||||
|
@ -212,13 +215,13 @@ typedef union kmp_futex_lock kmp_futex_lock_t;
|
|||
|
||||
extern void __kmp_acquire_futex_lock( kmp_futex_lock_t *lck, kmp_int32 gtid );
|
||||
extern int __kmp_test_futex_lock( kmp_futex_lock_t *lck, kmp_int32 gtid );
|
||||
extern void __kmp_release_futex_lock( kmp_futex_lock_t *lck, kmp_int32 gtid );
|
||||
extern int __kmp_release_futex_lock( kmp_futex_lock_t *lck, kmp_int32 gtid );
|
||||
extern void __kmp_init_futex_lock( kmp_futex_lock_t *lck );
|
||||
extern void __kmp_destroy_futex_lock( kmp_futex_lock_t *lck );
|
||||
|
||||
extern void __kmp_acquire_nested_futex_lock( kmp_futex_lock_t *lck, kmp_int32 gtid );
|
||||
extern int __kmp_test_nested_futex_lock( kmp_futex_lock_t *lck, kmp_int32 gtid );
|
||||
extern void __kmp_release_nested_futex_lock( kmp_futex_lock_t *lck, kmp_int32 gtid );
|
||||
extern int __kmp_release_nested_futex_lock( kmp_futex_lock_t *lck, kmp_int32 gtid );
|
||||
extern void __kmp_init_nested_futex_lock( kmp_futex_lock_t *lck );
|
||||
extern void __kmp_destroy_nested_futex_lock( kmp_futex_lock_t *lck );
|
||||
|
||||
|
@ -261,13 +264,13 @@ typedef union kmp_ticket_lock kmp_ticket_lock_t;
|
|||
extern void __kmp_acquire_ticket_lock( kmp_ticket_lock_t *lck, kmp_int32 gtid );
|
||||
extern int __kmp_test_ticket_lock( kmp_ticket_lock_t *lck, kmp_int32 gtid );
|
||||
extern int __kmp_test_ticket_lock_with_cheks( kmp_ticket_lock_t *lck, kmp_int32 gtid );
|
||||
extern void __kmp_release_ticket_lock( kmp_ticket_lock_t *lck, kmp_int32 gtid );
|
||||
extern int __kmp_release_ticket_lock( kmp_ticket_lock_t *lck, kmp_int32 gtid );
|
||||
extern void __kmp_init_ticket_lock( kmp_ticket_lock_t *lck );
|
||||
extern void __kmp_destroy_ticket_lock( kmp_ticket_lock_t *lck );
|
||||
|
||||
extern void __kmp_acquire_nested_ticket_lock( kmp_ticket_lock_t *lck, kmp_int32 gtid );
|
||||
extern int __kmp_test_nested_ticket_lock( kmp_ticket_lock_t *lck, kmp_int32 gtid );
|
||||
extern void __kmp_release_nested_ticket_lock( kmp_ticket_lock_t *lck, kmp_int32 gtid );
|
||||
extern int __kmp_release_nested_ticket_lock( kmp_ticket_lock_t *lck, kmp_int32 gtid );
|
||||
extern void __kmp_init_nested_ticket_lock( kmp_ticket_lock_t *lck );
|
||||
extern void __kmp_destroy_nested_ticket_lock( kmp_ticket_lock_t *lck );
|
||||
|
||||
|
@ -363,13 +366,13 @@ typedef union kmp_queuing_lock kmp_queuing_lock_t;
|
|||
|
||||
extern void __kmp_acquire_queuing_lock( kmp_queuing_lock_t *lck, kmp_int32 gtid );
|
||||
extern int __kmp_test_queuing_lock( kmp_queuing_lock_t *lck, kmp_int32 gtid );
|
||||
extern void __kmp_release_queuing_lock( kmp_queuing_lock_t *lck, kmp_int32 gtid );
|
||||
extern int __kmp_release_queuing_lock( kmp_queuing_lock_t *lck, kmp_int32 gtid );
|
||||
extern void __kmp_init_queuing_lock( kmp_queuing_lock_t *lck );
|
||||
extern void __kmp_destroy_queuing_lock( kmp_queuing_lock_t *lck );
|
||||
|
||||
extern void __kmp_acquire_nested_queuing_lock( kmp_queuing_lock_t *lck, kmp_int32 gtid );
|
||||
extern int __kmp_test_nested_queuing_lock( kmp_queuing_lock_t *lck, kmp_int32 gtid );
|
||||
extern void __kmp_release_nested_queuing_lock( kmp_queuing_lock_t *lck, kmp_int32 gtid );
|
||||
extern int __kmp_release_nested_queuing_lock( kmp_queuing_lock_t *lck, kmp_int32 gtid );
|
||||
extern void __kmp_init_nested_queuing_lock( kmp_queuing_lock_t *lck );
|
||||
extern void __kmp_destroy_nested_queuing_lock( kmp_queuing_lock_t *lck );
|
||||
|
||||
|
@ -467,13 +470,13 @@ typedef union kmp_drdpa_lock kmp_drdpa_lock_t;
|
|||
|
||||
extern void __kmp_acquire_drdpa_lock( kmp_drdpa_lock_t *lck, kmp_int32 gtid );
|
||||
extern int __kmp_test_drdpa_lock( kmp_drdpa_lock_t *lck, kmp_int32 gtid );
|
||||
extern void __kmp_release_drdpa_lock( kmp_drdpa_lock_t *lck, kmp_int32 gtid );
|
||||
extern int __kmp_release_drdpa_lock( kmp_drdpa_lock_t *lck, kmp_int32 gtid );
|
||||
extern void __kmp_init_drdpa_lock( kmp_drdpa_lock_t *lck );
|
||||
extern void __kmp_destroy_drdpa_lock( kmp_drdpa_lock_t *lck );
|
||||
|
||||
extern void __kmp_acquire_nested_drdpa_lock( kmp_drdpa_lock_t *lck, kmp_int32 gtid );
|
||||
extern int __kmp_test_nested_drdpa_lock( kmp_drdpa_lock_t *lck, kmp_int32 gtid );
|
||||
extern void __kmp_release_nested_drdpa_lock( kmp_drdpa_lock_t *lck, kmp_int32 gtid );
|
||||
extern int __kmp_release_nested_drdpa_lock( kmp_drdpa_lock_t *lck, kmp_int32 gtid );
|
||||
extern void __kmp_init_nested_drdpa_lock( kmp_drdpa_lock_t *lck );
|
||||
extern void __kmp_destroy_nested_drdpa_lock( kmp_drdpa_lock_t *lck );
|
||||
|
||||
|
@ -716,7 +719,7 @@ __kmp_test_user_lock_with_checks( kmp_user_lock_p lck, kmp_int32 gtid )
|
|||
}
|
||||
#endif
|
||||
|
||||
extern void ( *__kmp_release_user_lock_with_checks_ )( kmp_user_lock_p lck, kmp_int32 gtid );
|
||||
extern int ( *__kmp_release_user_lock_with_checks_ )( kmp_user_lock_p lck, kmp_int32 gtid );
|
||||
|
||||
static inline void
|
||||
__kmp_release_user_lock_with_checks( kmp_user_lock_p lck, kmp_int32 gtid )
|
||||
|
@ -848,13 +851,13 @@ __kmp_test_nested_user_lock_with_checks( kmp_user_lock_p lck, kmp_int32 gtid )
|
|||
}
|
||||
#endif
|
||||
|
||||
extern void ( *__kmp_release_nested_user_lock_with_checks_ )( kmp_user_lock_p lck, kmp_int32 gtid );
|
||||
extern int ( *__kmp_release_nested_user_lock_with_checks_ )( kmp_user_lock_p lck, kmp_int32 gtid );
|
||||
|
||||
static inline void
|
||||
static inline int
|
||||
__kmp_release_nested_user_lock_with_checks( kmp_user_lock_p lck, kmp_int32 gtid )
|
||||
{
|
||||
KMP_DEBUG_ASSERT( __kmp_release_nested_user_lock_with_checks_ != NULL );
|
||||
( *__kmp_release_nested_user_lock_with_checks_ )( lck, gtid );
|
||||
return ( *__kmp_release_nested_user_lock_with_checks_ )( lck, gtid );
|
||||
}
|
||||
|
||||
extern void ( *__kmp_init_nested_user_lock_with_checks_ )( kmp_user_lock_p lck );
|
||||
|
@ -939,7 +942,7 @@ extern void __kmp_set_user_lock_vptrs( kmp_lock_kind_t user_lock_kind );
|
|||
#define KMP_BIND_USER_LOCK_TEMPLATE(nest, kind, suffix) { \
|
||||
__kmp_acquire##nest##user_lock_with_checks_ = ( void (*)( kmp_user_lock_p, kmp_int32 ) ) \
|
||||
__kmp_acquire##nest##kind##_##suffix; \
|
||||
__kmp_release##nest##user_lock_with_checks_ = ( void (*)( kmp_user_lock_p, kmp_int32 ) ) \
|
||||
__kmp_release##nest##user_lock_with_checks_ = ( int (*)( kmp_user_lock_p, kmp_int32 ) ) \
|
||||
__kmp_release##nest##kind##_##suffix; \
|
||||
__kmp_test##nest##user_lock_with_checks_ = ( int (*)( kmp_user_lock_p, kmp_int32 ) ) \
|
||||
__kmp_test##nest##kind##_##suffix; \
|
||||
|
|
Loading…
Reference in New Issue