forked from OSchip/llvm-project
Fix minor formatting issues
Some code was restructured to move it under KMP_DEBUG. The rest is formatting changes to fix some things broken by clang-format Patch by Terry Wilmarth Differential Revision: https://reviews.llvm.org/D33744 llvm-svn: 304438
This commit is contained in:
parent
9af30e3541
commit
642688b632
|
@ -2092,7 +2092,7 @@ restart_radix_check:
|
|||
if (threadInfo[i][threadIdIndex] == UINT_MAX) {
|
||||
threadInfo[i][threadIdIndex] = threadIdCt++;
|
||||
}
|
||||
// Aparrently the thread id field was specified for some entries and not
|
||||
// Apparently the thread id field was specified for some entries and not
|
||||
// others. Start the thread id counter off at the next higher thread id.
|
||||
else if (threadIdCt <= threadInfo[i][threadIdIndex]) {
|
||||
threadIdCt = threadInfo[i][threadIdIndex] + 1;
|
||||
|
|
|
@ -1828,9 +1828,8 @@ void __kmp_fork_barrier(int gtid, int tid) {
|
|||
#endif
|
||||
|
||||
if (__kmp_tasking_mode != tskm_immediate_exec) {
|
||||
__kmp_task_team_setup(
|
||||
this_thr, team,
|
||||
0); // 0 indicates setup current task team if nthreads > 1
|
||||
// 0 indicates setup current task team if nthreads > 1
|
||||
__kmp_task_team_setup(this_thr, team, 0);
|
||||
}
|
||||
|
||||
/* The master thread may have changed its blocktime between the join barrier
|
||||
|
|
|
@ -101,7 +101,6 @@ static kmp_info_t *__kmp_thread_pool_insert_pt = NULL;
|
|||
/* Calculate the identifier of the current thread */
|
||||
/* fast (and somewhat portable) way to get unique identifier of executing
|
||||
thread. Returns KMP_GTID_DNE if we haven't been assigned a gtid. */
|
||||
|
||||
int __kmp_get_global_thread_id() {
|
||||
int i;
|
||||
kmp_info_t **other_threads;
|
||||
|
@ -912,7 +911,6 @@ static int __kmp_reserve_threads(kmp_root_t *root, kmp_team_t *parent_team,
|
|||
}
|
||||
|
||||
// Check if the threads array is large enough, or needs expanding.
|
||||
//
|
||||
// See comment in __kmp_register_root() about the adjustment if
|
||||
// __kmp_threads[0] == NULL.
|
||||
capacity = __kmp_threads_capacity;
|
||||
|
@ -949,17 +947,18 @@ static int __kmp_reserve_threads(kmp_root_t *root, kmp_team_t *parent_team,
|
|||
}
|
||||
}
|
||||
|
||||
#ifdef KMP_DEBUG
|
||||
if (new_nthreads == 1) {
|
||||
KC_TRACE(10,
|
||||
("__kmp_reserve_threads: T#%d serializing team after reclaiming "
|
||||
"dead roots and rechecking; requested %d threads\n",
|
||||
__kmp_get_gtid(), set_nthreads));
|
||||
return 1;
|
||||
} else {
|
||||
KC_TRACE(10, ("__kmp_reserve_threads: T#%d allocating %d threads; requested"
|
||||
" %d threads\n",
|
||||
__kmp_get_gtid(), new_nthreads, set_nthreads));
|
||||
}
|
||||
|
||||
KC_TRACE(10, ("__kmp_reserve_threads: T#%d allocating %d threads; requested "
|
||||
"%d threads\n",
|
||||
__kmp_get_gtid(), new_nthreads, set_nthreads));
|
||||
#endif // KMP_DEBUG
|
||||
return new_nthreads;
|
||||
}
|
||||
|
||||
|
@ -1424,9 +1423,8 @@ int __kmp_fork_call(ident_t *loc, int gtid,
|
|||
// used to launch non-serial teams even if nested is not allowed
|
||||
active_level = parent_team->t.t_active_level;
|
||||
#if OMP_40_ENABLED
|
||||
teams_level =
|
||||
master_th->th
|
||||
.th_teams_level; // needed to check nesting inside the teams
|
||||
// needed to check nesting inside the teams
|
||||
teams_level = master_th->th.th_teams_level;
|
||||
#endif
|
||||
#if KMP_NESTED_HOT_TEAMS
|
||||
p_hot_teams = &master_th->th.th_hot_teams;
|
||||
|
@ -1434,8 +1432,8 @@ int __kmp_fork_call(ident_t *loc, int gtid,
|
|||
*p_hot_teams = (kmp_hot_team_ptr_t *)__kmp_allocate(
|
||||
sizeof(kmp_hot_team_ptr_t) * __kmp_hot_teams_max_level);
|
||||
(*p_hot_teams)[0].hot_team = root->r.r_hot_team;
|
||||
(*p_hot_teams)[0].hot_team_nth =
|
||||
1; // it is either actual or not needed (when active_level > 0)
|
||||
// it is either actual or not needed (when active_level > 0)
|
||||
(*p_hot_teams)[0].hot_team_nth = 1;
|
||||
}
|
||||
#endif
|
||||
|
||||
|
@ -1572,8 +1570,7 @@ int __kmp_fork_call(ident_t *loc, int gtid,
|
|||
#if USE_DEBUGGER
|
||||
if (__kmp_debugging) { // Let debugger override number of threads.
|
||||
int nth = __kmp_omp_num_threads(loc);
|
||||
if (nth >
|
||||
0) { // 0 means debugger does not want to change number of threads.
|
||||
if (nth > 0) { // 0 means debugger doesn't want to change num threads
|
||||
master_set_numthreads = nth;
|
||||
}; // if
|
||||
}; // if
|
||||
|
@ -1640,17 +1637,15 @@ int __kmp_fork_call(ident_t *loc, int gtid,
|
|||
#endif /* OMP_40_ENABLED */
|
||||
)) ||
|
||||
(__kmp_library == library_serial)) {
|
||||
KC_TRACE(
|
||||
10,
|
||||
("__kmp_fork_call: T#%d serializing team; requested %d threads\n",
|
||||
gtid, nthreads));
|
||||
KC_TRACE(10, ("__kmp_fork_call: T#%d serializing team; requested %d"
|
||||
" threads\n",
|
||||
gtid, nthreads));
|
||||
nthreads = 1;
|
||||
}
|
||||
}
|
||||
if (nthreads > 1) {
|
||||
/* determine how many new threads we can use */
|
||||
__kmp_acquire_bootstrap_lock(&__kmp_forkjoin_lock);
|
||||
|
||||
nthreads = __kmp_reserve_threads(
|
||||
root, parent_team, master_tid, nthreads
|
||||
#if OMP_40_ENABLED
|
||||
|
@ -2088,9 +2083,8 @@ int __kmp_fork_call(ident_t *loc, int gtid,
|
|||
master_th->th.th_task_state;
|
||||
master_th->th.th_task_state_top++;
|
||||
#if KMP_NESTED_HOT_TEAMS
|
||||
if (team ==
|
||||
master_th->th.th_hot_teams[active_level]
|
||||
.hot_team) { // Restore master's nested state if nested hot team
|
||||
if (team == master_th->th.th_hot_teams[active_level].hot_team) {
|
||||
// Restore master's nested state if nested hot team
|
||||
master_th->th.th_task_state =
|
||||
master_th->th
|
||||
.th_task_state_memo_stack[master_th->th.th_task_state_top];
|
||||
|
@ -2174,8 +2168,8 @@ int __kmp_fork_call(ident_t *loc, int gtid,
|
|||
master_th->th.th_frame_time = tmp_time;
|
||||
if (__kmp_forkjoin_frames_mode == 3)
|
||||
team->t.t_region_time = tmp_time;
|
||||
} else // only one notification scheme (either "submit" or
|
||||
// "forking/joined", not both)
|
||||
} else
|
||||
// only one notification scheme (either "submit" or "forking/joined", not both)
|
||||
#endif /* USE_ITT_NOTIFY */
|
||||
if ((__itt_frame_begin_v3_ptr || KMP_ITT_DEBUG) &&
|
||||
__kmp_forkjoin_frames && !__kmp_forkjoin_frames_mode) {
|
||||
|
@ -2201,8 +2195,9 @@ int __kmp_fork_call(ident_t *loc, int gtid,
|
|||
#endif /* USE_ITT_BUILD */
|
||||
|
||||
#if OMP_40_ENABLED
|
||||
if (ap) // AC: skip __kmp_internal_fork at teams construct, let only master
|
||||
// threads execute
|
||||
// AC: skip __kmp_internal_fork at teams construct, let only master
|
||||
// threads execute
|
||||
if (ap)
|
||||
#endif /* OMP_40_ENABLED */
|
||||
{
|
||||
__kmp_internal_fork(loc, gtid, team);
|
||||
|
@ -4379,7 +4374,6 @@ static void __kmp_reinitialize_team(kmp_team_t *team,
|
|||
KMP_CHECK_UPDATE(team->t.t_ident, loc);
|
||||
|
||||
KMP_CHECK_UPDATE(team->t.t_id, KMP_GEN_TEAM_ID());
|
||||
|
||||
// Copy ICVs to the master thread's implicit taskdata
|
||||
__kmp_init_implicit_task(loc, team->t.t_threads[0], team, 0, FALSE);
|
||||
copy_icvs(&team->t.t_implicit_task_taskdata[0].td_icvs, new_icvs);
|
||||
|
@ -4797,8 +4791,7 @@ __kmp_allocate_team(kmp_root_t *root, int new_nproc, int max_nproc,
|
|||
if (team->t.t_nproc == new_nproc) { // Check changes in number of threads
|
||||
KA_TRACE(20, ("__kmp_allocate_team: reusing hot team\n"));
|
||||
// This case can mean that omp_set_num_threads() was called and the hot
|
||||
// team size
|
||||
// was already reduced, so we check the special flag
|
||||
// team size was already reduced, so we check the special flag
|
||||
if (team->t.t_size_changed == -1) {
|
||||
team->t.t_size_changed = 1;
|
||||
} else {
|
||||
|
|
|
@ -869,8 +869,8 @@ void __kmp_init_implicit_task(ident_t *loc_ref, kmp_info_t *this_thr,
|
|||
|
||||
if (set_curr_task) { // only do this init first time thread is created
|
||||
task->td_incomplete_child_tasks = 0;
|
||||
task->td_allocated_child_tasks = 0; // Not used: don't need to
|
||||
// deallocate implicit task
|
||||
// Not used: don't need to deallocate implicit task
|
||||
task->td_allocated_child_tasks = 0;
|
||||
#if OMP_40_ENABLED
|
||||
task->td_taskgroup = NULL; // An implicit task does not have taskgroup
|
||||
task->td_dephash = NULL;
|
||||
|
|
Loading…
Reference in New Issue