Renaming change: 41 -> 45 and 4.1 -> 4.5

OpenMP 4.1 is now OpenMP 4.5.  Any mention of 41 or 4.1 is replaced with
45 or 4.5.  Also, if the CMake option LIBOMP_OMP_VERSION is 41, CMake warns that
41 is deprecated and to use 45 instead.

llvm-svn: 272687
This commit is contained in:
Jonathan Peyton 2016-06-14 17:57:47 +00:00
parent 2a86b555e1
commit df6818bea4
25 changed files with 93 additions and 89 deletions

View File

@ -116,8 +116,8 @@ Library type can be normal, profile, or stubs.
-DCMAKE_BUILD_TYPE=Release|Debug|RelWithDebInfo
Build type can be Release, Debug, or RelWithDebInfo.
-DLIBOMP_OMP_VERSION=41|40|30
OpenMP version can be either 41, 40 or 30.
-DLIBOMP_OMP_VERSION=45|40|30
OpenMP version can be either 45, 40 or 30.
-DLIBOMP_MIC_ARCH=knc|knf
This value is ignored if LIBOMP_ARCH != mic

View File

@ -86,9 +86,13 @@ libomp_check_variable(LIBOMP_ARCH 32e x86_64 32 i386 arm ppc64 ppc64le aarch64 m
set(LIBOMP_LIB_TYPE normal CACHE STRING
"Performance,Profiling,Stubs library (normal/profile/stubs)")
libomp_check_variable(LIBOMP_LIB_TYPE normal profile stubs)
set(LIBOMP_OMP_VERSION 41 CACHE STRING
"The OpenMP version (41/40/30)")
libomp_check_variable(LIBOMP_OMP_VERSION 41 40 30)
set(LIBOMP_OMP_VERSION 45 CACHE STRING
"The OpenMP version (45/40/30)")
if(LIBOMP_OMP_VERSION EQUAL 41)
libomp_warning_say("LIBOMP_OMP_VERSION=41 is deprecated and will be removed in a later version. Please use 45.")
set(LIBOMP_OMP_VERSION 45)
endif()
libomp_check_variable(LIBOMP_OMP_VERSION 45 40 30)
# Set the OpenMP Year and Month assiociated with version
if(${LIBOMP_OMP_VERSION} GREATER 40 OR ${LIBOMP_OMP_VERSION} EQUAL 40)
set(LIBOMP_OMP_YEAR_MONTH 201307)

View File

@ -21,7 +21,7 @@ set(libomp_suffix)
libomp_append(libomp_suffix .deb DEBUG_BUILD)
libomp_append(libomp_suffix .dia RELWITHDEBINFO_BUILD)
libomp_append(libomp_suffix .min MINSIZEREL_BUILD)
if(NOT "${LIBOMP_OMP_VERSION}" STREQUAL "41")
if(NOT "${LIBOMP_OMP_VERSION}" STREQUAL "45")
libomp_append(libomp_suffix .${LIBOMP_OMP_VERSION})
endif()
libomp_append(libomp_suffix .s1 LIBOMP_STATS)

View File

@ -191,8 +191,8 @@ function(libomp_get_gdflags gdflags)
libomp_append(gdflags_local "-D stub" STUBS_LIBRARY)
libomp_append(gdflags_local "-D HAVE_QUAD" LIBOMP_USE_QUAD_PRECISION)
libomp_append(gdflags_local "-D USE_DEBUGGER" LIBOMP_USE_DEBUGGER)
if(${LIBOMP_OMP_VERSION} GREATER 41 OR ${LIBOMP_OMP_VERSION} EQUAL 41)
libomp_append(gdflags_local "-D OMP_41")
if(${LIBOMP_OMP_VERSION} GREATER 45 OR ${LIBOMP_OMP_VERSION} EQUAL 45)
libomp_append(gdflags_local "-D OMP_45")
endif()
if(${LIBOMP_OMP_VERSION} GREATER 40 OR ${LIBOMP_OMP_VERSION} EQUAL 40)
libomp_append(gdflags_local "-D OMP_40")

View File

@ -384,9 +384,9 @@ kmpc_set_defaults 224
%endif # OMP_40
%endif
# OpenMP 4.1 entry points
# OpenMP 4.5 entry points
%ifndef stub
%ifdef OMP_41
%ifdef OMP_45
__kmpc_proxy_task_completed 259
__kmpc_proxy_task_completed_ooo 260
__kmpc_doacross_init 261
@ -505,9 +505,9 @@ kmp_set_warnings_off 780
%endif
%endif # OMP_40
# OpenMP 41
# OpenMP 45
%ifdef OMP_41
%ifdef OMP_45
omp_init_lock_with_hint 870
omp_init_nest_lock_with_hint 871
omp_get_max_task_priority 872
@ -527,7 +527,7 @@ kmp_set_warnings_off 780
omp_target_associate_ptr 888
omp_target_disassociate_ptr 889
%endif
%endif # OMP_41
%endif # OMP_45
kmp_set_disp_num_buffers 890

View File

@ -1,5 +1,5 @@
/*
* include/41/omp.h.var
* include/45/omp.h.var
*/

View File

@ -1,4 +1,4 @@
! include/41/omp_lib.f.var
! include/45/omp_lib.f.var
!
!//===----------------------------------------------------------------------===//

View File

@ -1,4 +1,4 @@
! include/41/omp_lib.f90.var
! include/45/omp_lib.f90.var
!
!//===----------------------------------------------------------------------===//

View File

@ -1,4 +1,4 @@
! include/41/omp_lib.h.var
! include/45/omp_lib.h.var
!
!//===----------------------------------------------------------------------===//

View File

@ -1,5 +1,5 @@
/*
* include/41/ompt.h.var
* include/45/ompt.h.var
*/
#ifndef __OMPT__

View File

@ -313,7 +313,7 @@ enum sched_type {
kmp_sch_static_steal = 44, /**< accessible only through KMP_SCHEDULE environment variable */
#if OMP_41_ENABLED
#if OMP_45_ENABLED
kmp_sch_static_balanced_chunked = 45, /**< static with chunk adjustment (e.g., simd) */
#endif
@ -369,7 +369,7 @@ enum sched_type {
kmp_nm_ord_trapezoidal = 199,
kmp_nm_upper = 200, /**< upper bound for nomerge values */
#if OMP_41_ENABLED
#if OMP_45_ENABLED
/* Support for OpenMP 4.5 monotonic and nonmonotonic schedule modifiers.
* Since we need to distinguish the three possible cases (no modifier, monotonic modifier,
* nonmonotonic modifier), we need separate bits for each modifier.
@ -393,7 +393,7 @@ enum sched_type {
# define SCHEDULE_HAS_NONMONOTONIC(s) (((s) & kmp_sch_modifier_nonmonotonic) != 0)
# define SCHEDULE_HAS_NO_MODIFIERS(s) (((s) & (kmp_sch_modifier_nonmonotonic | kmp_sch_modifier_monotonic)) == 0)
#else
/* By doing this we hope to avoid multiple tests on OMP_41_ENABLED. Compilers can now eliminate tests on compile time
/* By doing this we hope to avoid multiple tests on OMP_45_ENABLED. Compilers can now eliminate tests on compile time
* constants and dead code that results from them, so we can leave code guarded by such an if in place.
*/
# define SCHEDULE_WITHOUT_MODIFIERS(s) (s)
@ -1697,7 +1697,7 @@ typedef struct dispatch_shared_info {
dispatch_shared_info64_t s64;
} u;
volatile kmp_uint32 buffer_index;
#if OMP_41_ENABLED
#if OMP_45_ENABLED
volatile kmp_int32 doacross_buf_idx; // teamwise index
volatile kmp_uint32 *doacross_flags; // shared array of iteration flags (0/1)
kmp_int32 doacross_num_done; // count finished threads
@ -1715,7 +1715,7 @@ typedef struct kmp_disp {
dispatch_private_info_t *th_disp_buffer;
kmp_int32 th_disp_index;
#if OMP_41_ENABLED
#if OMP_45_ENABLED
kmp_int32 th_doacross_buf_idx; // thread's doacross buffer index
volatile kmp_uint32 *th_doacross_flags; // pointer to shared array of flags
kmp_int64 *th_doacross_info; // info on loop bounds
@ -2036,7 +2036,7 @@ typedef enum kmp_tasking_mode {
extern kmp_tasking_mode_t __kmp_tasking_mode; /* determines how/when to execute tasks */
extern kmp_int32 __kmp_task_stealing_constraint;
#if OMP_41_ENABLED
#if OMP_45_ENABLED
extern kmp_int32 __kmp_max_task_priority; // Set via OMP_MAX_TASK_PRIORITY if specified, defaults to 0 otherwise
#endif
@ -2057,11 +2057,11 @@ extern kmp_int32 __kmp_task_stealing_constraint;
*/
typedef kmp_int32 (* kmp_routine_entry_t)( kmp_int32, void * );
#if OMP_40_ENABLED || OMP_41_ENABLED
#if OMP_40_ENABLED || OMP_45_ENABLED
typedef union kmp_cmplrdata {
#if OMP_41_ENABLED
#if OMP_45_ENABLED
kmp_int32 priority; /**< priority specified by user for the task */
#endif // OMP_41_ENABLED
#endif // OMP_45_ENABLED
#if OMP_40_ENABLED
kmp_routine_entry_t destructors; /* pointer to function to invoke deconstructors of firstprivate C++ objects */
#endif // OMP_40_ENABLED
@ -2076,7 +2076,7 @@ typedef struct kmp_task { /* GEH: Shouldn't this be aligned so
void * shareds; /**< pointer to block of pointers to shared vars */
kmp_routine_entry_t routine; /**< pointer to routine to call for executing task */
kmp_int32 part_id; /**< part id for the task */
#if OMP_40_ENABLED || OMP_41_ENABLED
#if OMP_40_ENABLED || OMP_45_ENABLED
kmp_cmplrdata_t data1; /* Two known optional additions: destructors and priority */
kmp_cmplrdata_t data2; /* Process destructors first, priority second */
/* future data */
@ -2177,7 +2177,7 @@ typedef struct kmp_tasking_flags { /* Total struct must be exactly 32 b
unsigned merged_if0 : 1; /* no __kmpc_task_{begin/complete}_if0 calls in if0 code path */
#if OMP_40_ENABLED
unsigned destructors_thunk : 1; /* set if the compiler creates a thunk to invoke destructors from the runtime */
#if OMP_41_ENABLED
#if OMP_45_ENABLED
unsigned proxy : 1; /* task is a proxy task (it will be executed outside the context of the RTL) */
unsigned priority_specified :1; /* set if the compiler provides priority setting for the task */
unsigned reserved : 10; /* reserved for compiler use */
@ -2230,7 +2230,7 @@ struct kmp_taskdata { /* aligned during dynamic
#if OMPT_SUPPORT
ompt_task_info_t ompt_task_info;
#endif
#if OMP_41_ENABLED
#if OMP_45_ENABLED
kmp_task_team_t * td_task_team;
kmp_int32 td_size_alloc; // The size of task structure, including shareds etc.
#endif
@ -2280,7 +2280,7 @@ typedef struct kmp_base_task_team {
/* TRUE means tt_threads_data is set up and initialized */
kmp_int32 tt_nproc; /* #threads in team */
kmp_int32 tt_max_threads; /* number of entries allocated for threads_data array */
#if OMP_41_ENABLED
#if OMP_45_ENABLED
kmp_int32 tt_found_proxy_tasks; /* Have we found proxy tasks since last barrier */
#endif
@ -3422,7 +3422,7 @@ KMP_EXPORT void __kmpc_end_ordered ( ident_t *, kmp_int32 global_tid );
KMP_EXPORT void __kmpc_critical ( ident_t *, kmp_int32 global_tid, kmp_critical_name * );
KMP_EXPORT void __kmpc_end_critical ( ident_t *, kmp_int32 global_tid, kmp_critical_name * );
#if OMP_41_ENABLED
#if OMP_45_ENABLED
KMP_EXPORT void __kmpc_critical_with_hint ( ident_t *, kmp_int32 global_tid, kmp_critical_name *, uintptr_t hint );
#endif
@ -3509,7 +3509,7 @@ KMP_EXPORT kmp_int32 __kmpc_cancellationpoint(ident_t* loc_ref, kmp_int32 gtid,
KMP_EXPORT kmp_int32 __kmpc_cancel_barrier(ident_t* loc_ref, kmp_int32 gtid);
KMP_EXPORT int __kmp_get_cancellation_status(int cancel_kind);
#if OMP_41_ENABLED
#if OMP_45_ENABLED
KMP_EXPORT void __kmpc_proxy_task_completed( kmp_int32 gtid, kmp_task_t *ptask );
KMP_EXPORT void __kmpc_proxy_task_completed_ooo ( kmp_task_t *ptask );
@ -3535,7 +3535,7 @@ KMP_EXPORT void __kmpc_unset_nest_lock( ident_t *loc, kmp_int32 gtid, void **use
KMP_EXPORT int __kmpc_test_lock( ident_t *loc, kmp_int32 gtid, void **user_lock );
KMP_EXPORT int __kmpc_test_nest_lock( ident_t *loc, kmp_int32 gtid, void **user_lock );
#if OMP_41_ENABLED
#if OMP_45_ENABLED
KMP_EXPORT void __kmpc_init_lock_with_hint( ident_t *loc, kmp_int32 gtid, void **user_lock, uintptr_t hint );
KMP_EXPORT void __kmpc_init_nest_lock_with_hint( ident_t *loc, kmp_int32 gtid, void **user_lock, uintptr_t hint );
#endif
@ -3591,7 +3591,7 @@ KMP_EXPORT void __kmpc_push_proc_bind( ident_t *loc, kmp_int32 global_tid, int p
KMP_EXPORT void __kmpc_push_num_teams( ident_t *loc, kmp_int32 global_tid, kmp_int32 num_teams, kmp_int32 num_threads );
KMP_EXPORT void __kmpc_fork_teams(ident_t *loc, kmp_int32 argc, kmpc_micro microtask, ...);
#endif
#if OMP_41_ENABLED
#if OMP_45_ENABLED
struct kmp_dim { // loop bounds info casted to kmp_int64
kmp_int64 lo; // lower
kmp_int64 up; // upper

View File

@ -1260,7 +1260,7 @@ __kmp_barrier(enum barrier_type bt, int gtid, int is_split, size_t reduce_size,
} else { // Team is serialized.
status = 0;
if (__kmp_tasking_mode != tskm_immediate_exec) {
#if OMP_41_ENABLED
#if OMP_45_ENABLED
if ( this_thr->th.th_task_team != NULL ) {
void *itt_sync_obj = NULL;
#if USE_ITT_NOTIFY

View File

@ -65,7 +65,7 @@
#define KMP_VERSION_MINOR @LIBOMP_VERSION_MINOR@
#define LIBOMP_OMP_VERSION @LIBOMP_OMP_VERSION@
#define OMP_50_ENABLED (LIBOMP_OMP_VERSION >= 50)
#define OMP_41_ENABLED (LIBOMP_OMP_VERSION >= 41)
#define OMP_45_ENABLED (LIBOMP_OMP_VERSION >= 45)
#define OMP_40_ENABLED (LIBOMP_OMP_VERSION >= 40)
#define OMP_30_ENABLED (LIBOMP_OMP_VERSION >= 30)

View File

@ -502,7 +502,7 @@ __kmpc_end_serialized_parallel(ident_t *loc, kmp_int32 global_tid)
this_thr = __kmp_threads[ global_tid ];
serial_team = this_thr->th.th_serial_team;
#if OMP_41_ENABLED
#if OMP_45_ENABLED
kmp_task_team_t * task_team = this_thr->th.th_task_team;
// we need to wait for the proxy tasks before finishing the thread
@ -3036,7 +3036,7 @@ void __kmpc_place_threads(int nS, int sO, int nC, int cO, int nT)
__kmp_place_num_threads_per_core = nT;
}
#if OMP_41_ENABLED
#if OMP_45_ENABLED
/*!
@ingroup WORK_SHARING
@param loc source location information.

View File

@ -175,7 +175,7 @@ struct dispatch_shared_info_template {
dispatch_shared_info64_t s64;
} u;
volatile kmp_uint32 buffer_index;
#if OMP_41_ENABLED
#if OMP_45_ENABLED
volatile kmp_int32 doacross_buf_idx; // teamwise index
kmp_uint32 *doacross_flags; // array of iteration flags (0/1)
kmp_int32 doacross_num_done; // count finished threads

View File

@ -689,7 +689,7 @@ xexpand(FTN_GET_PROC_BIND)( void )
#endif
}
#if OMP_41_ENABLED
#if OMP_45_ENABLED
int FTN_STDCALL
FTN_GET_NUM_PLACES( void )
{
@ -953,7 +953,7 @@ xexpand(FTN_IS_INITIAL_DEVICE)( void )
#endif // OMP_40_ENABLED
#if OMP_41_ENABLED && defined(KMP_STUB)
#if OMP_45_ENABLED && defined(KMP_STUB)
// OpenMP 4.5 entries for stubs library
int FTN_STDCALL
@ -1008,7 +1008,7 @@ FTN_TARGET_DISASSOCIATE_PTR(void *host_ptr, int device_num)
{
return -1;
}
#endif // OMP_41_ENABLED && defined(KMP_STUB)
#endif // OMP_45_ENABLED && defined(KMP_STUB)
#ifdef KMP_STUB
typedef enum { UNINIT = -1, UNLOCKED, LOCKED } kmp_stub_lock_t;
@ -1300,7 +1300,7 @@ FTN_GET_CANCELLATION_STATUS(int cancel_kind) {
#endif // OMP_40_ENABLED
#if OMP_41_ENABLED
#if OMP_45_ENABLED
/* returns the maximum allowed task priority */
int FTN_STDCALL
FTN_GET_MAX_TASK_PRIORITY( void )
@ -1394,8 +1394,8 @@ xaliasify(FTN_GET_CANCELLATION, 40);
xaliasify(FTN_IS_INITIAL_DEVICE, 40);
#endif /* OMP_40_ENABLED */
#if OMP_41_ENABLED
// OMP_4.1 aliases
#if OMP_45_ENABLED
// OMP_4.5 aliases
#endif
#if OMP_50_ENABLED
@ -1463,8 +1463,8 @@ xversionify(FTN_GET_CANCELLATION, 40, "OMP_4.0");
xversionify(FTN_IS_INITIAL_DEVICE, 40, "OMP_4.0");
#endif /* OMP_40_ENABLED */
#if OMP_41_ENABLED
// OMP_4.1 versioned symbols
#if OMP_45_ENABLED
// OMP_4.5 versioned symbols
#endif
#if OMP_50_ENABLED

View File

@ -115,7 +115,7 @@
#define FTN_GET_CANCELLATION_STATUS kmp_get_cancellation_status
#endif
#if OMP_41_ENABLED
#if OMP_45_ENABLED
#define FTN_GET_MAX_TASK_PRIORITY omp_get_max_task_priority
#define FTN_GET_NUM_PLACES omp_get_num_places
#define FTN_GET_PLACE_NUM_PROCS omp_get_place_num_procs
@ -234,7 +234,7 @@
#define FTN_GET_CANCELLATION_STATUS kmp_get_cancellation_status_
#endif
#if OMP_41_ENABLED
#if OMP_45_ENABLED
#define FTN_GET_MAX_TASK_PRIORITY omp_get_max_task_priority_
#define FTN_GET_NUM_PLACES omp_get_num_places_
#define FTN_GET_PLACE_NUM_PROCS omp_get_place_num_procs_
@ -353,7 +353,7 @@
#define FTN_GET_CANCELLATION_STATUS KMP_GET_CANCELLATION_STATUS
#endif
#if OMP_41_ENABLED
#if OMP_45_ENABLED
#define FTN_GET_MAX_TASK_PRIORITY OMP_GET_MAX_TASK_PRIORITY
#define FTN_GET_NUM_PLACES OMP_GET_NUM_PLACES
#define FTN_GET_PLACE_NUM_PROCS OMP_GET_PLACE_NUM_PROCS
@ -472,7 +472,7 @@
#define FTN_GET_CANCELLATION_STATUS KMP_GET_CANCELLATION_STATUS_
#endif
#if OMP_41_ENABLED
#if OMP_45_ENABLED
#define FTN_GET_MAX_TASK_PRIORITY OMP_GET_MAX_TASK_PRIORITY_
#define FTN_GET_NUM_PLACES OMP_GET_NUM_PLACES_
#define FTN_GET_PLACE_NUM_PROCS OMP_GET_PLACE_NUM_PROCS_

View File

@ -262,7 +262,7 @@ int __kmp_place_core_offset = 0;
int __kmp_place_num_threads_per_core = 0;
kmp_tasking_mode_t __kmp_tasking_mode = tskm_task_teams;
#if OMP_41_ENABLED
#if OMP_45_ENABLED
kmp_int32 __kmp_max_task_priority = 0;
#endif

View File

@ -1071,7 +1071,7 @@ extern void __kmp_cleanup_user_locks();
// KMP_USE_DYNAMIC_LOCK enables dynamic dispatch of lock functions without breaking the current
// compatibility. Essential functionality of this new code is dynamic dispatch, but it also
// implements (or enables implementation of) hinted user lock and critical section which will be
// part of OMP 4.1 soon.
// part of OMP 4.5 soon.
//
// Lock type can be decided at creation time (i.e., lock initialization), and subsequent lock
// function call on the created lock object requires type extraction and call through jump table

View File

@ -679,7 +679,7 @@ typedef void (*microtask_t)( int *gtid, int *npr, ... );
#endif
// Enable dynamic user lock
#if OMP_41_ENABLED
#if OMP_45_ENABLED
# define KMP_USE_DYNAMIC_LOCK 1
#endif

View File

@ -2978,7 +2978,7 @@ __kmp_allocate_team_arrays(kmp_team_t *team, int max_nth)
/* setup dispatch buffers */
for(i = 0 ; i < num_disp_buff; ++i) {
team->t.t_disp_buffer[i].buffer_index = i;
#if OMP_41_ENABLED
#if OMP_45_ENABLED
team->t.t_disp_buffer[i].doacross_buf_idx = i;
#endif
}
@ -3899,7 +3899,7 @@ __kmp_unregister_root_current_thread( int gtid )
KMP_MB();
#if OMP_41_ENABLED
#if OMP_45_ENABLED
kmp_info_t * thread = __kmp_threads[gtid];
kmp_team_t * team = thread->th.th_team;
kmp_task_team_t * task_team = thread->th.th_task_team;
@ -4047,7 +4047,7 @@ __kmp_initialize_info( kmp_info_t *this_thr, kmp_team_t *team, int tid, int gtid
KMP_DEBUG_ASSERT( dispatch == &team->t.t_dispatch[ tid ] );
dispatch->th_disp_index = 0;
#if OMP_41_ENABLED
#if OMP_45_ENABLED
dispatch->th_doacross_buf_idx = 0;
#endif
if( ! dispatch->th_disp_buffer ) {
@ -6757,7 +6757,7 @@ __kmp_run_before_invoked_task( int gtid, int tid, kmp_info_t *this_thr,
//KMP_DEBUG_ASSERT( this_thr->th.th_dispatch == &team->t.t_dispatch[ this_thr->th.th_info.ds.ds_tid ] );
dispatch->th_disp_index = 0; /* reset the dispatch buffer counter */
#if OMP_41_ENABLED
#if OMP_45_ENABLED
dispatch->th_doacross_buf_idx = 0; /* reset the doacross dispatch buffer counter */
#endif
if( __kmp_env_consistency_check )
@ -6999,13 +6999,13 @@ __kmp_internal_fork( ident_t *id, int gtid, kmp_team_t *team )
int i;
for (i = 0; i < __kmp_dispatch_num_buffers; ++i) {
team->t.t_disp_buffer[ i ].buffer_index = i;
#if OMP_41_ENABLED
#if OMP_45_ENABLED
team->t.t_disp_buffer[i].doacross_buf_idx = i;
#endif
}
} else {
team->t.t_disp_buffer[ 0 ].buffer_index = 0;
#if OMP_41_ENABLED
#if OMP_45_ENABLED
team->t.t_disp_buffer[0].doacross_buf_idx = 0;
#endif
}

View File

@ -324,7 +324,7 @@ __kmp_for_static_init(
*plastiter = (tid == ((trip_count - 1)/( UT )chunk) % nth);
break;
}
#if OMP_41_ENABLED
#if OMP_45_ENABLED
case kmp_sch_static_balanced_chunked:
{
register T old_upper = *pupper;

View File

@ -1182,7 +1182,7 @@ __kmp_stg_print_max_active_levels( kmp_str_buf_t * buffer, char const * name, vo
__kmp_stg_print_int( buffer, name, __kmp_dflt_max_active_levels );
} // __kmp_stg_print_max_active_levels
#if OMP_41_ENABLED
#if OMP_45_ENABLED
// -------------------------------------------------------------------------------------------------
// OpenMP 4.5: OMP_MAX_TASK_PRIORITY
// -------------------------------------------------------------------------------------------------
@ -1195,7 +1195,7 @@ static void
__kmp_stg_print_max_task_priority(kmp_str_buf_t *buffer, char const *name, void *data) {
__kmp_stg_print_int(buffer, name, __kmp_max_task_priority);
} // __kmp_stg_print_max_task_priority
#endif // OMP_41_ENABLED
#endif // OMP_45_ENABLED
// -------------------------------------------------------------------------------------------------
// KMP_DISP_NUM_BUFFERS
@ -4658,7 +4658,7 @@ static kmp_setting_t __kmp_stg_table[] = {
{ "KMP_TASKING", __kmp_stg_parse_tasking, __kmp_stg_print_tasking, NULL, 0, 0 },
{ "KMP_TASK_STEALING_CONSTRAINT", __kmp_stg_parse_task_stealing, __kmp_stg_print_task_stealing, NULL, 0, 0 },
{ "OMP_MAX_ACTIVE_LEVELS", __kmp_stg_parse_max_active_levels, __kmp_stg_print_max_active_levels, NULL, 0, 0 },
#if OMP_41_ENABLED
#if OMP_45_ENABLED
{ "OMP_MAX_TASK_PRIORITY", __kmp_stg_parse_max_task_priority, __kmp_stg_print_max_task_priority, NULL, 0, 0 },
#endif
{ "OMP_THREAD_LIMIT", __kmp_stg_parse_all_threads, __kmp_stg_print_all_threads, NULL, 0, 0 },

View File

@ -479,7 +479,7 @@ __kmpc_omp_task_with_deps( ident_t *loc_ref, kmp_int32 gtid, kmp_task_t * new_ta
#endif /* OMPT_SUPPORT && OMPT_TRACE */
bool serial = current_task->td_flags.team_serial || current_task->td_flags.tasking_ser || current_task->td_flags.final;
#if OMP_41_ENABLED
#if OMP_45_ENABLED
serial = serial && !(new_taskdata->td_flags.proxy == TASK_PROXY);
#endif
@ -505,7 +505,7 @@ __kmpc_omp_task_with_deps( ident_t *loc_ref, kmp_int32 gtid, kmp_task_t * new_ta
return TASK_CURRENT_NOT_QUEUED;
}
} else {
#if OMP_41_ENABLED
#if OMP_45_ENABLED
kmp_task_team_t * task_team = thread->th.th_task_team;
if ( task_team && task_team->tt.tt_found_proxy_tasks )
__kmpc_omp_wait_deps ( loc_ref, gtid, ndeps, dep_list, ndeps_noalias, noalias_dep_list );
@ -551,7 +551,7 @@ __kmpc_omp_wait_deps ( ident_t *loc_ref, kmp_int32 gtid, kmp_int32 ndeps, kmp_de
// - dependences are not computed in serial teams (except if we have proxy tasks)
// - if the dephash is not yet created it means we have nothing to wait for
bool ignore = current_task->td_flags.team_serial || current_task->td_flags.tasking_ser || current_task->td_flags.final;
#if OMP_41_ENABLED
#if OMP_45_ENABLED
ignore = ignore && thread->th.th_task_team->tt.tt_found_proxy_tasks == FALSE;
#endif
ignore = ignore || current_task->td_dephash == NULL;

View File

@ -32,7 +32,7 @@ static void __kmp_enable_tasking( kmp_task_team_t *task_team, kmp_info_t *this_t
static void __kmp_alloc_task_deque( kmp_info_t *thread, kmp_thread_data_t *thread_data );
static int __kmp_realloc_task_threads_data( kmp_info_t *thread, kmp_task_team_t *task_team );
#ifdef OMP_41_ENABLED
#ifdef OMP_45_ENABLED
static void __kmp_bottom_half_finish_proxy( kmp_int32 gtid, kmp_task_t * ptask );
#endif
@ -315,7 +315,7 @@ __kmp_push_task(kmp_int32 gtid, kmp_task_t * task )
// Lock the deque for the task push operation
__kmp_acquire_bootstrap_lock( & thread_data -> td.td_deque_lock );
#if OMP_41_ENABLED
#if OMP_45_ENABLED
// Need to recheck as we can get a proxy task from a thread outside of OpenMP
if ( TCR_4(thread_data -> td.td_deque_ntasks) >= TASK_DEQUE_SIZE(thread_data->td) )
{
@ -837,7 +837,7 @@ __kmp_init_implicit_task( ident_t *loc_ref, kmp_info_t *this_thr, kmp_team_t *te
task->td_flags.tiedness = TASK_TIED;
task->td_flags.tasktype = TASK_IMPLICIT;
#if OMP_41_ENABLED
#if OMP_45_ENABLED
task->td_flags.proxy = TASK_FULL;
#endif
@ -925,7 +925,7 @@ __kmp_task_alloc( ident_t *loc_ref, kmp_int32 gtid, kmp_tasking_flags_t *flags,
flags->final = 1;
}
#if OMP_41_ENABLED
#if OMP_45_ENABLED
if ( flags->proxy == TASK_PROXY ) {
flags->tiedness = TASK_UNTIED;
flags->merged_if0 = 1;
@ -1009,7 +1009,7 @@ __kmp_task_alloc( ident_t *loc_ref, kmp_int32 gtid, kmp_tasking_flags_t *flags,
taskdata->td_taskwait_counter = 0;
taskdata->td_taskwait_thread = 0;
KMP_DEBUG_ASSERT( taskdata->td_parent != NULL );
#if OMP_41_ENABLED
#if OMP_45_ENABLED
// avoid copying icvs for proxy tasks
if ( flags->proxy == TASK_FULL )
#endif
@ -1021,7 +1021,7 @@ __kmp_task_alloc( ident_t *loc_ref, kmp_int32 gtid, kmp_tasking_flags_t *flags,
#if OMP_40_ENABLED
taskdata->td_flags.destructors_thunk = flags->destructors_thunk;
#endif // OMP_40_ENABLED
#if OMP_41_ENABLED
#if OMP_45_ENABLED
taskdata->td_flags.proxy = flags->proxy;
taskdata->td_task_team = thread->th.th_task_team;
taskdata->td_size_alloc = shareds_offset + sizeof_shareds;
@ -1056,7 +1056,7 @@ __kmp_task_alloc( ident_t *loc_ref, kmp_int32 gtid, kmp_tasking_flags_t *flags,
#endif
// Only need to keep track of child task counts if team parallel and tasking not serialized or if it is a proxy task
#if OMP_41_ENABLED
#if OMP_45_ENABLED
if ( flags->proxy == TASK_PROXY || !( taskdata -> td_flags.team_serial || taskdata -> td_flags.tasking_ser ) )
#else
if ( !( taskdata -> td_flags.team_serial || taskdata -> td_flags.tasking_ser ) )
@ -1095,7 +1095,7 @@ __kmpc_omp_task_alloc( ident_t *loc_ref, kmp_int32 gtid, kmp_int32 flags,
input_flags->native = FALSE;
// __kmp_task_alloc() sets up all other runtime flags
#if OMP_41_ENABLED
#if OMP_45_ENABLED
KA_TRACE(10, ("__kmpc_omp_task_alloc(enter): T#%d loc=%p, flags=(%s %s) "
"sizeof_task=%ld sizeof_shared=%ld entry=%p\n",
gtid, loc_ref, input_flags->tiedness ? "tied " : "untied",
@ -1134,7 +1134,7 @@ __kmp_invoke_task( kmp_int32 gtid, kmp_task_t *task, kmp_taskdata_t * current_ta
KA_TRACE(30, ("__kmp_invoke_task(enter): T#%d invoking task %p, current_task=%p\n",
gtid, taskdata, current_task) );
KMP_DEBUG_ASSERT(task);
#if OMP_41_ENABLED
#if OMP_45_ENABLED
if ( taskdata->td_flags.proxy == TASK_PROXY &&
taskdata->td_flags.complete == 1)
{
@ -1158,7 +1158,7 @@ __kmp_invoke_task( kmp_int32 gtid, kmp_task_t *task, kmp_taskdata_t * current_ta
}
#endif
#if OMP_41_ENABLED
#if OMP_45_ENABLED
// Proxy tasks are not handled by the runtime
if ( taskdata->td_flags.proxy != TASK_PROXY )
#endif
@ -1255,7 +1255,7 @@ __kmp_invoke_task( kmp_int32 gtid, kmp_task_t *task, kmp_taskdata_t * current_ta
}
#endif
#if OMP_41_ENABLED
#if OMP_45_ENABLED
// Proxy tasks are not handled by the runtime
if ( taskdata->td_flags.proxy != TASK_PROXY )
#endif
@ -1333,7 +1333,7 @@ __kmp_omp_task( kmp_int32 gtid, kmp_task_t * new_task, bool serialize_immediate
/* Should we execute the new task or queue it? For now, let's just always try to
queue it. If the queue fills up, then we'll execute it. */
#if OMP_41_ENABLED
#if OMP_45_ENABLED
if ( new_taskdata->td_flags.proxy == TASK_PROXY || __kmp_push_task( gtid, new_task ) == TASK_NOT_PUSHED ) // if cannot defer
#else
if ( __kmp_push_task( gtid, new_task ) == TASK_NOT_PUSHED ) // if cannot defer
@ -1433,7 +1433,7 @@ __kmpc_omp_taskwait( ident_t *loc_ref, kmp_int32 gtid )
__kmp_itt_taskwait_starting( gtid, itt_sync_obj );
#endif /* USE_ITT_BUILD */
#if OMP_41_ENABLED
#if OMP_45_ENABLED
if ( ! taskdata->td_flags.team_serial || (thread->th.th_task_team != NULL && thread->th.th_task_team->tt.tt_found_proxy_tasks) )
#else
if ( ! taskdata->td_flags.team_serial )
@ -1574,7 +1574,7 @@ __kmpc_end_taskgroup( ident_t* loc, int gtid )
__kmp_itt_taskwait_starting( gtid, itt_sync_obj );
#endif /* USE_ITT_BUILD */
#if OMP_41_ENABLED
#if OMP_45_ENABLED
if ( ! taskdata->td_flags.team_serial || (thread->th.th_task_team != NULL && thread->th.th_task_team->tt.tt_found_proxy_tasks) )
#else
if ( ! taskdata->td_flags.team_serial )
@ -1827,7 +1827,7 @@ static inline int __kmp_execute_tasks_template(kmp_info_t *thread, kmp_int32 gti
nthreads = task_team -> tt.tt_nproc;
unfinished_threads = &(task_team -> tt.tt_unfinished_threads);
#if OMP_41_ENABLED
#if OMP_45_ENABLED
KMP_DEBUG_ASSERT( nthreads > 1 || task_team->tt.tt_found_proxy_tasks);
#else
KMP_DEBUG_ASSERT( nthreads > 1 );
@ -1936,7 +1936,7 @@ static inline int __kmp_execute_tasks_template(kmp_info_t *thread, kmp_int32 gti
}
// The task source has been exhausted. If in final spin loop of barrier, check if termination condition is satisfied.
#if OMP_41_ENABLED
#if OMP_45_ENABLED
// The work queue may be empty but there might be proxy tasks still executing
if (final_spin && TCR_4(current_task->td_incomplete_child_tasks) == 0)
#else
@ -1970,7 +1970,7 @@ static inline int __kmp_execute_tasks_template(kmp_info_t *thread, kmp_int32 gti
return FALSE;
}
#if OMP_41_ENABLED
#if OMP_45_ENABLED
// We could be getting tasks from target constructs; if this is the only thread, keep trying to execute
// tasks from own queue
if (nthreads == 1)
@ -2383,7 +2383,7 @@ __kmp_allocate_task_team( kmp_info_t *thread, kmp_team_t *team )
}
TCW_4(task_team -> tt.tt_found_tasks, FALSE);
#if OMP_41_ENABLED
#if OMP_45_ENABLED
TCW_4(task_team -> tt.tt_found_proxy_tasks, FALSE);
#endif
task_team -> tt.tt_nproc = nthreads = team->t.t_nproc;
@ -2551,7 +2551,7 @@ __kmp_task_team_setup( kmp_info_t *this_thr, kmp_team_t *team, int always )
if (!task_team->tt.tt_active || team->t.t_nproc != task_team->tt.tt_nproc) {
TCW_4(task_team->tt.tt_nproc, team->t.t_nproc);
TCW_4(task_team->tt.tt_found_tasks, FALSE);
#if OMP_41_ENABLED
#if OMP_45_ENABLED
TCW_4(task_team->tt.tt_found_proxy_tasks, FALSE);
#endif
TCW_4(task_team->tt.tt_unfinished_threads, team->t.t_nproc );
@ -2616,7 +2616,7 @@ __kmp_task_team_wait( kmp_info_t *this_thr, kmp_team_t *team
KA_TRACE(20, ("__kmp_task_team_wait: Master T#%d deactivating task_team %p: "
"setting active to false, setting local and team's pointer to NULL\n",
__kmp_gtid_from_thread(this_thr), task_team));
#if OMP_41_ENABLED
#if OMP_45_ENABLED
KMP_DEBUG_ASSERT( task_team->tt.tt_nproc > 1 || task_team->tt.tt_found_proxy_tasks == TRUE );
TCW_SYNC_4( task_team->tt.tt_found_proxy_tasks, FALSE );
#else
@ -2668,7 +2668,7 @@ __kmp_tasking_barrier( kmp_team_t *team, kmp_info_t *thread, int gtid )
}
#if OMP_41_ENABLED
#if OMP_45_ENABLED
/* __kmp_give_task puts a task into a given thread queue if:
- the queue for that thread was created