forked from OSchip/llvm-project
Code cleanup for the runtime without monitor thread
This change removes/disables unnecessary code when monitor thread is not used. Patch by Hansang Bae Differential Revision: https://reviews.llvm.org/D25102 llvm-svn: 283577
This commit is contained in:
parent
a1234cf280
commit
e1c7c13c3d
|
@ -1028,8 +1028,6 @@ extern int __kmp_place_num_threads_per_core;
|
|||
# define KMP_DEFAULT_STKSIZE ((size_t)(1024 * 1024))
|
||||
#endif
|
||||
|
||||
#define KMP_DEFAULT_MONITOR_STKSIZE ((size_t)(64 * 1024))
|
||||
|
||||
#define KMP_DEFAULT_MALLOC_POOL_INCR ((size_t) (1024 * 1024))
|
||||
#define KMP_MIN_MALLOC_POOL_INCR ((size_t) (4 * 1024))
|
||||
#define KMP_MAX_MALLOC_POOL_INCR (~((size_t)1<<((sizeof(size_t)*(1<<3))-1)))
|
||||
|
@ -1045,12 +1043,16 @@ extern int __kmp_place_num_threads_per_core;
|
|||
#define KMP_MIN_STKPADDING (0)
|
||||
#define KMP_MAX_STKPADDING (2 * 1024 * 1024)
|
||||
|
||||
#define KMP_MIN_MONITOR_WAKEUPS (1) /* min number of times monitor wakes up per second */
|
||||
#define KMP_MAX_MONITOR_WAKEUPS (1000) /* maximum number of times monitor can wake up per second */
|
||||
#define KMP_BLOCKTIME_MULTIPLIER (1000) /* number of blocktime units per second */
|
||||
#define KMP_MIN_BLOCKTIME (0)
|
||||
#define KMP_MAX_BLOCKTIME (INT_MAX) /* Must be this for "infinite" setting the work */
|
||||
#define KMP_DEFAULT_BLOCKTIME (200) /* __kmp_blocktime is in milliseconds */
|
||||
|
||||
#if KMP_USE_MONITOR
|
||||
#define KMP_DEFAULT_MONITOR_STKSIZE ((size_t)(64 * 1024))
|
||||
#define KMP_MIN_MONITOR_WAKEUPS (1) /* min number of times monitor wakes up per second */
|
||||
#define KMP_MAX_MONITOR_WAKEUPS (1000) /* maximum number of times monitor can wake up per second */
|
||||
|
||||
/* Calculate new number of monitor wakeups for a specific block time based on previous monitor_wakeups */
|
||||
/* Only allow increasing number of wakeups */
|
||||
#define KMP_WAKEUPS_FROM_BLOCKTIME(blocktime, monitor_wakeups) \
|
||||
|
@ -1063,6 +1065,7 @@ extern int __kmp_place_num_threads_per_core;
|
|||
#define KMP_INTERVALS_FROM_BLOCKTIME(blocktime, monitor_wakeups) \
|
||||
( ( (blocktime) + (KMP_BLOCKTIME_MULTIPLIER / (monitor_wakeups)) - 1 ) / \
|
||||
(KMP_BLOCKTIME_MULTIPLIER / (monitor_wakeups)) )
|
||||
#endif // KMP_USE_MONITOR
|
||||
|
||||
#define KMP_MIN_STATSCOLS 40
|
||||
#define KMP_MAX_STATSCOLS 4096
|
||||
|
@ -1810,7 +1813,9 @@ typedef struct kmp_internal_control {
|
|||
kmp_int8 dynamic; /* internal control for dynamic adjustment of threads (per thread) */
|
||||
kmp_int8 bt_set; /* internal control for whether blocktime is explicitly set */
|
||||
int blocktime; /* internal control for blocktime */
|
||||
#if KMP_USE_MONITOR
|
||||
int bt_intervals; /* internal control for blocktime intervals */
|
||||
#endif
|
||||
int nproc; /* internal control for #threads for next parallel region (per thread) */
|
||||
int max_active_levels; /* internal control for max_active_levels */
|
||||
kmp_r_sched_t sched; /* internal control for runtime schedule {sched,chunk} pair */
|
||||
|
@ -2001,7 +2006,9 @@ typedef struct kmp_local {
|
|||
|
||||
#define get__blocktime( xteam, xtid ) ((xteam)->t.t_threads[(xtid)]->th.th_current_task->td_icvs.blocktime)
|
||||
#define get__bt_set( xteam, xtid ) ((xteam)->t.t_threads[(xtid)]->th.th_current_task->td_icvs.bt_set)
|
||||
#if KMP_USE_MONITOR
|
||||
#define get__bt_intervals( xteam, xtid ) ((xteam)->t.t_threads[(xtid)]->th.th_current_task->td_icvs.bt_intervals)
|
||||
#endif
|
||||
|
||||
#define get__nested_2(xteam,xtid) ((xteam)->t.t_threads[(xtid)]->th.th_current_task->td_icvs.nested)
|
||||
#define get__dynamic_2(xteam,xtid) ((xteam)->t.t_threads[(xtid)]->th.th_current_task->td_icvs.dynamic)
|
||||
|
@ -2011,8 +2018,10 @@ typedef struct kmp_local {
|
|||
#define set__blocktime_team( xteam, xtid, xval ) \
|
||||
( ( (xteam)->t.t_threads[(xtid)]->th.th_current_task->td_icvs.blocktime ) = (xval) )
|
||||
|
||||
#if KMP_USE_MONITOR
|
||||
#define set__bt_intervals_team( xteam, xtid, xval ) \
|
||||
( ( (xteam)->t.t_threads[(xtid)]->th.th_current_task->td_icvs.bt_intervals ) = (xval) )
|
||||
#endif
|
||||
|
||||
#define set__bt_set_team( xteam, xtid, xval ) \
|
||||
( ( (xteam)->t.t_threads[(xtid)]->th.th_current_task->td_icvs.bt_set ) = (xval) )
|
||||
|
@ -2380,7 +2389,9 @@ typedef struct KMP_ALIGN_CACHE kmp_base_info {
|
|||
/* at the start of a barrier, and the values stored in the team are used */
|
||||
/* at points in the code where the team struct is no longer guaranteed */
|
||||
/* to exist (from the POV of worker threads). */
|
||||
#if KMP_USE_MONITOR
|
||||
int th_team_bt_intervals;
|
||||
#endif
|
||||
int th_team_bt_set;
|
||||
|
||||
|
||||
|
@ -2835,8 +2846,10 @@ extern int __kmp_tp_capacity; /* capacity of __kmp_threads if threadpr
|
|||
extern int __kmp_tp_cached; /* whether threadprivate cache has been created (__kmpc_threadprivate_cached()) */
|
||||
extern int __kmp_dflt_nested; /* nested parallelism enabled by default a la OMP_NESTED */
|
||||
extern int __kmp_dflt_blocktime; /* number of milliseconds to wait before blocking (env setting) */
|
||||
#if KMP_USE_MONITOR
|
||||
extern int __kmp_monitor_wakeups;/* number of times monitor wakes up per second */
|
||||
extern int __kmp_bt_intervals; /* number of monitor timestamp intervals before blocking */
|
||||
#endif
|
||||
#ifdef KMP_ADJUST_BLOCKTIME
|
||||
extern int __kmp_zero_bt; /* whether blocktime has been forced to zero */
|
||||
#endif /* KMP_ADJUST_BLOCKTIME */
|
||||
|
|
|
@ -1108,7 +1108,9 @@ __kmp_barrier(enum barrier_type bt, int gtid, int is_split, size_t reduce_size,
|
|||
the team struct is not guaranteed to exist. */
|
||||
// See note about the corresponding code in __kmp_join_barrier() being performance-critical.
|
||||
if (__kmp_dflt_blocktime != KMP_MAX_BLOCKTIME) {
|
||||
#if KMP_USE_MONITOR
|
||||
this_thr->th.th_team_bt_intervals = team->t.t_implicit_task_taskdata[tid].td_icvs.bt_intervals;
|
||||
#endif
|
||||
this_thr->th.th_team_bt_set = team->t.t_implicit_task_taskdata[tid].td_icvs.bt_set;
|
||||
}
|
||||
|
||||
|
@ -1425,7 +1427,9 @@ __kmp_join_barrier(int gtid)
|
|||
down EPCC parallel by 2x. As a workaround, we do not perform the copy if blocktime=infinite,
|
||||
since the values are not used by __kmp_wait_template() in that case. */
|
||||
if (__kmp_dflt_blocktime != KMP_MAX_BLOCKTIME) {
|
||||
#if KMP_USE_MONITOR
|
||||
this_thr->th.th_team_bt_intervals = team->t.t_implicit_task_taskdata[tid].td_icvs.bt_intervals;
|
||||
#endif
|
||||
this_thr->th.th_team_bt_set = team->t.t_implicit_task_taskdata[tid].td_icvs.bt_set;
|
||||
}
|
||||
|
||||
|
@ -1612,7 +1616,9 @@ __kmp_fork_barrier(int gtid, int tid)
|
|||
access it when the team struct is not guaranteed to exist. */
|
||||
// See note about the corresponding code in __kmp_join_barrier() being performance-critical
|
||||
if (__kmp_dflt_blocktime != KMP_MAX_BLOCKTIME) {
|
||||
#if KMP_USE_MONITOR
|
||||
this_thr->th.th_team_bt_intervals = team->t.t_implicit_task_taskdata[tid].td_icvs.bt_intervals;
|
||||
#endif
|
||||
this_thr->th.th_team_bt_set = team->t.t_implicit_task_taskdata[tid].td_icvs.bt_set;
|
||||
}
|
||||
} // master
|
||||
|
|
|
@ -141,8 +141,10 @@ enum sched_type __kmp_static = kmp_sch_static_greedy; /* default static sched
|
|||
enum sched_type __kmp_guided = kmp_sch_guided_iterative_chunked; /* default guided scheduling method */
|
||||
enum sched_type __kmp_auto = kmp_sch_guided_analytical_chunked; /* default auto scheduling method */
|
||||
int __kmp_dflt_blocktime = KMP_DEFAULT_BLOCKTIME;
|
||||
#if KMP_USE_MONITOR
|
||||
int __kmp_monitor_wakeups = KMP_MIN_MONITOR_WAKEUPS;
|
||||
int __kmp_bt_intervals = KMP_INTERVALS_FROM_BLOCKTIME( KMP_DEFAULT_BLOCKTIME, KMP_MIN_MONITOR_WAKEUPS );
|
||||
#endif
|
||||
#ifdef KMP_ADJUST_BLOCKTIME
|
||||
int __kmp_zero_bt = FALSE;
|
||||
#endif /* KMP_ADJUST_BLOCKTIME */
|
||||
|
|
|
@ -3034,7 +3034,9 @@ __kmp_get_global_icvs( void ) {
|
|||
(kmp_int8)__kmp_global.g.g_dynamic, //internal control for dynamic adjustment of threads (per thread)
|
||||
(kmp_int8)__kmp_env_blocktime, //int bt_set; //internal control for whether blocktime is explicitly set
|
||||
__kmp_dflt_blocktime, //int blocktime; //internal control for blocktime
|
||||
#if KMP_USE_MONITOR
|
||||
__kmp_bt_intervals, //int bt_intervals; //internal control for blocktime intervals
|
||||
#endif
|
||||
__kmp_dflt_team_nth, //int nproc; //internal control for # of threads for next parallel region (per thread)
|
||||
// (use a max ub on value if __kmp_parallel_initialize not called yet)
|
||||
__kmp_dflt_max_active_levels, //int max_active_levels; //internal control for max_active_levels
|
||||
|
@ -6376,8 +6378,10 @@ __kmp_do_serial_initialize( void )
|
|||
|
||||
// Three vars below moved here from __kmp_env_initialize() "KMP_BLOCKTIME" part
|
||||
__kmp_dflt_blocktime = KMP_DEFAULT_BLOCKTIME;
|
||||
#if KMP_USE_MONITOR
|
||||
__kmp_monitor_wakeups = KMP_WAKEUPS_FROM_BLOCKTIME( __kmp_dflt_blocktime, __kmp_monitor_wakeups );
|
||||
__kmp_bt_intervals = KMP_INTERVALS_FROM_BLOCKTIME( __kmp_dflt_blocktime, __kmp_monitor_wakeups );
|
||||
#endif
|
||||
// From "KMP_LIBRARY" part of __kmp_env_initialize()
|
||||
__kmp_library = library_throughput;
|
||||
// From KMP_SCHEDULE initialization
|
||||
|
@ -7449,7 +7453,9 @@ void
|
|||
__kmp_aux_set_blocktime (int arg, kmp_info_t *thread, int tid)
|
||||
{
|
||||
int blocktime = arg; /* argument is in milliseconds */
|
||||
#if KMP_USE_MONITOR
|
||||
int bt_intervals;
|
||||
#endif
|
||||
int bt_set;
|
||||
|
||||
__kmp_save_internal_controls( thread );
|
||||
|
@ -7463,20 +7469,29 @@ __kmp_aux_set_blocktime (int arg, kmp_info_t *thread, int tid)
|
|||
set__blocktime_team( thread->th.th_team, tid, blocktime );
|
||||
set__blocktime_team( thread->th.th_serial_team, 0, blocktime );
|
||||
|
||||
#if KMP_USE_MONITOR
|
||||
/* Calculate and set blocktime intervals for the teams */
|
||||
bt_intervals = KMP_INTERVALS_FROM_BLOCKTIME(blocktime, __kmp_monitor_wakeups);
|
||||
|
||||
set__bt_intervals_team( thread->th.th_team, tid, bt_intervals );
|
||||
set__bt_intervals_team( thread->th.th_serial_team, 0, bt_intervals );
|
||||
#endif
|
||||
|
||||
/* Set whether blocktime has been set to "TRUE" */
|
||||
bt_set = TRUE;
|
||||
|
||||
set__bt_set_team( thread->th.th_team, tid, bt_set );
|
||||
set__bt_set_team( thread->th.th_serial_team, 0, bt_set );
|
||||
KF_TRACE(10, ( "kmp_set_blocktime: T#%d(%d:%d), blocktime=%d, bt_intervals=%d, monitor_updates=%d\n",
|
||||
__kmp_gtid_from_tid(tid, thread->th.th_team),
|
||||
thread->th.th_team->t.t_id, tid, blocktime, bt_intervals, __kmp_monitor_wakeups ) );
|
||||
KF_TRACE(10, ( "kmp_set_blocktime: T#%d(%d:%d), blocktime=%d"
|
||||
#if KMP_USE_MONITOR
|
||||
", bt_intervals=%d, monitor_updates=%d"
|
||||
#endif
|
||||
"\n",
|
||||
__kmp_gtid_from_tid(tid, thread->th.th_team), thread->th.th_team->t.t_id, tid, blocktime
|
||||
#if KMP_USE_MONITOR
|
||||
, bt_intervals, __kmp_monitor_wakeups
|
||||
#endif
|
||||
) );
|
||||
}
|
||||
|
||||
void
|
||||
|
|
|
@ -659,9 +659,11 @@ __kmp_stg_parse_blocktime( char const * name, char const * value, void * data )
|
|||
}; // if
|
||||
__kmp_env_blocktime = TRUE; // KMP_BLOCKTIME was specified.
|
||||
}; // if
|
||||
// calculate number of monitor thread wakeup intervals corresonding to blocktime.
|
||||
#if KMP_USE_MONITOR
|
||||
// calculate number of monitor thread wakeup intervals corresponding to blocktime.
|
||||
__kmp_monitor_wakeups = KMP_WAKEUPS_FROM_BLOCKTIME( __kmp_dflt_blocktime, __kmp_monitor_wakeups );
|
||||
__kmp_bt_intervals = KMP_INTERVALS_FROM_BLOCKTIME( __kmp_dflt_blocktime, __kmp_monitor_wakeups );
|
||||
#endif
|
||||
K_DIAG( 1, ( "__kmp_env_blocktime == %d\n", __kmp_env_blocktime ) );
|
||||
if ( __kmp_env_blocktime ) {
|
||||
K_DIAG( 1, ( "__kmp_dflt_blocktime == %d\n", __kmp_dflt_blocktime ) );
|
||||
|
|
Loading…
Reference in New Issue