forked from OSchip/llvm-project
Remove trailing whitespace in src/ directory
This patch doesn't affect D19878's context. So D19878 still cleanly applies. llvm-svn: 270252
This commit is contained in:
parent
b21d4e17a2
commit
611184919f
|
@ -184,7 +184,7 @@
|
|||
%ifdef USE_DEBUGGER
|
||||
__kmp_debugging DATA
|
||||
__kmp_omp_debug_struct_info DATA
|
||||
%endif
|
||||
%endif
|
||||
|
||||
# Symbols for MS mutual detection:
|
||||
_You_must_link_with_exactly_one_OpenMP_library DATA
|
||||
|
|
|
@ -305,7 +305,7 @@ typedef void (*ompt_new_parallel_callback_t) (
|
|||
typedef void (*ompt_end_parallel_callback_t) (
|
||||
ompt_parallel_id_t parallel_id, /* id of parallel region */
|
||||
ompt_task_id_t task_id, /* id of task */
|
||||
ompt_invoker_t invoker /* who invokes master task? */
|
||||
ompt_invoker_t invoker /* who invokes master task? */
|
||||
);
|
||||
|
||||
/* tasks */
|
||||
|
|
|
@ -325,7 +325,7 @@ typedef void (*ompt_new_parallel_callback_t) (
|
|||
typedef void (*ompt_end_parallel_callback_t) (
|
||||
ompt_parallel_id_t parallel_id, /* id of parallel region */
|
||||
ompt_task_id_t task_id, /* id of task */
|
||||
ompt_invoker_t invoker /* who invokes master task? */
|
||||
ompt_invoker_t invoker /* who invokes master task? */
|
||||
);
|
||||
|
||||
/* tasks */
|
||||
|
|
|
@ -325,7 +325,7 @@ typedef void (*ompt_new_parallel_callback_t) (
|
|||
typedef void (*ompt_end_parallel_callback_t) (
|
||||
ompt_parallel_id_t parallel_id, /* id of parallel region */
|
||||
ompt_task_id_t task_id, /* id of task */
|
||||
ompt_invoker_t invoker /* who invokes master task? */
|
||||
ompt_invoker_t invoker /* who invokes master task? */
|
||||
);
|
||||
|
||||
/* tasks */
|
||||
|
|
|
@ -218,7 +218,7 @@
|
|||
|
||||
// Compiler 12.0 changed alignment of 16 and 32-byte arguments (like _Quad
|
||||
// and kmp_cmplx128) on IA-32 architecture. The following aligned structures
|
||||
// are implemented to support the old alignment in 10.1, 11.0, 11.1 and
|
||||
// are implemented to support the old alignment in 10.1, 11.0, 11.1 and
|
||||
// introduce the new alignment in 12.0. See CQ88405.
|
||||
#if KMP_ARCH_X86 && KMP_HAVE_QUAD
|
||||
|
||||
|
@ -226,7 +226,7 @@
|
|||
|
||||
#pragma pack( push, 4 )
|
||||
|
||||
|
||||
|
||||
struct KMP_DO_ALIGN( 4 ) Quad_a4_t {
|
||||
_Quad q;
|
||||
|
||||
|
@ -378,7 +378,7 @@ static inline void
|
|||
__kmp_acquire_atomic_lock( kmp_atomic_lock_t *lck, kmp_int32 gtid )
|
||||
{
|
||||
#if OMPT_SUPPORT && OMPT_TRACE
|
||||
if (ompt_enabled &&
|
||||
if (ompt_enabled &&
|
||||
ompt_callbacks.ompt_callback(ompt_event_wait_atomic)) {
|
||||
ompt_callbacks.ompt_callback(ompt_event_wait_atomic)(
|
||||
(ompt_wait_id_t) lck);
|
||||
|
@ -388,7 +388,7 @@ __kmp_acquire_atomic_lock( kmp_atomic_lock_t *lck, kmp_int32 gtid )
|
|||
__kmp_acquire_queuing_lock( lck, gtid );
|
||||
|
||||
#if OMPT_SUPPORT && OMPT_TRACE
|
||||
if (ompt_enabled &&
|
||||
if (ompt_enabled &&
|
||||
ompt_callbacks.ompt_callback(ompt_event_acquired_atomic)) {
|
||||
ompt_callbacks.ompt_callback(ompt_event_acquired_atomic)(
|
||||
(ompt_wait_id_t) lck);
|
||||
|
|
|
@ -1395,7 +1395,7 @@ __kmp_join_barrier(int gtid)
|
|||
KMP_DEBUG_ASSERT(this_thr == team->t.t_threads[tid]);
|
||||
KA_TRACE(10, ("__kmp_join_barrier: T#%d(%d:%d) arrived at join barrier\n", gtid, team_id, tid));
|
||||
|
||||
#if OMPT_SUPPORT
|
||||
#if OMPT_SUPPORT
|
||||
#if OMPT_TRACE
|
||||
if (ompt_enabled &&
|
||||
ompt_callbacks.ompt_callback(ompt_event_barrier_begin)) {
|
||||
|
|
|
@ -29,12 +29,12 @@ Request cancellation of the binding OpenMP region.
|
|||
*/
|
||||
kmp_int32 __kmpc_cancel(ident_t* loc_ref, kmp_int32 gtid, kmp_int32 cncl_kind) {
|
||||
kmp_info_t *this_thr = __kmp_threads [ gtid ];
|
||||
|
||||
|
||||
KC_TRACE( 10, ("__kmpc_cancel: T#%d request %d OMP_CANCELLATION=%d\n", gtid, cncl_kind, __kmp_omp_cancellation) );
|
||||
|
||||
KMP_DEBUG_ASSERT(cncl_kind != cancel_noreq);
|
||||
KMP_DEBUG_ASSERT(cncl_kind == cancel_parallel || cncl_kind == cancel_loop ||
|
||||
cncl_kind == cancel_sections || cncl_kind == cancel_taskgroup);
|
||||
KMP_DEBUG_ASSERT(cncl_kind == cancel_parallel || cncl_kind == cancel_loop ||
|
||||
cncl_kind == cancel_sections || cncl_kind == cancel_taskgroup);
|
||||
KMP_DEBUG_ASSERT(__kmp_get_gtid() == gtid);
|
||||
|
||||
if (__kmp_omp_cancellation) {
|
||||
|
@ -49,7 +49,7 @@ kmp_int32 __kmpc_cancel(ident_t* loc_ref, kmp_int32 gtid, kmp_int32 cncl_kind) {
|
|||
KMP_DEBUG_ASSERT(this_team);
|
||||
kmp_int32 old = KMP_COMPARE_AND_STORE_RET32(&(this_team->t.t_cancel_request), cancel_noreq, cncl_kind);
|
||||
if (old == cancel_noreq || old == cncl_kind) {
|
||||
//printf("__kmpc_cancel: this_team->t.t_cancel_request=%d @ %p\n",
|
||||
//printf("__kmpc_cancel: this_team->t.t_cancel_request=%d @ %p\n",
|
||||
// this_team->t.t_cancel_request, &(this_team->t.t_cancel_request));
|
||||
// we do not have a cancellation request in this team or we do have one
|
||||
// that matches the current request -> cancel
|
||||
|
@ -61,12 +61,12 @@ kmp_int32 __kmpc_cancel(ident_t* loc_ref, kmp_int32 gtid, kmp_int32 cncl_kind) {
|
|||
// cancellation requests for a task group
|
||||
// are handled through the taskgroup structure
|
||||
{
|
||||
kmp_taskdata_t* task;
|
||||
kmp_taskdata_t* task;
|
||||
kmp_taskgroup_t* taskgroup;
|
||||
|
||||
|
||||
task = this_thr->th.th_current_task;
|
||||
KMP_DEBUG_ASSERT( task );
|
||||
|
||||
|
||||
taskgroup = task->td_taskgroup;
|
||||
if (taskgroup) {
|
||||
kmp_int32 old = KMP_COMPARE_AND_STORE_RET32(&(taskgroup->cancel_request), cancel_noreq, cncl_kind);
|
||||
|
@ -100,7 +100,7 @@ kmp_int32 __kmpc_cancel(ident_t* loc_ref, kmp_int32 gtid, kmp_int32 cncl_kind) {
|
|||
@param gtid Global thread ID of encountering thread
|
||||
@param cncl_kind Cancellation kind (parallel, for, sections, taskgroup)
|
||||
|
||||
@return returns true if a matching cancellation request has been flagged in the RTL and the
|
||||
@return returns true if a matching cancellation request has been flagged in the RTL and the
|
||||
encountering thread has to cancel..
|
||||
|
||||
Cancellation point for the encountering thread.
|
||||
|
@ -111,8 +111,8 @@ kmp_int32 __kmpc_cancellationpoint(ident_t* loc_ref, kmp_int32 gtid, kmp_int32 c
|
|||
KC_TRACE( 10, ("__kmpc_cancellationpoint: T#%d request %d OMP_CANCELLATION=%d\n", gtid, cncl_kind, __kmp_omp_cancellation) );
|
||||
|
||||
KMP_DEBUG_ASSERT(cncl_kind != cancel_noreq);
|
||||
KMP_DEBUG_ASSERT(cncl_kind == cancel_parallel || cncl_kind == cancel_loop ||
|
||||
cncl_kind == cancel_sections || cncl_kind == cancel_taskgroup);
|
||||
KMP_DEBUG_ASSERT(cncl_kind == cancel_parallel || cncl_kind == cancel_loop ||
|
||||
cncl_kind == cancel_sections || cncl_kind == cancel_taskgroup);
|
||||
KMP_DEBUG_ASSERT(__kmp_get_gtid() == gtid);
|
||||
|
||||
if (__kmp_omp_cancellation) {
|
||||
|
@ -144,15 +144,15 @@ kmp_int32 __kmpc_cancellationpoint(ident_t* loc_ref, kmp_int32 gtid, kmp_int32 c
|
|||
// cancellation requests for a task group
|
||||
// are handled through the taskgroup structure
|
||||
{
|
||||
kmp_taskdata_t* task;
|
||||
kmp_taskdata_t* task;
|
||||
kmp_taskgroup_t* taskgroup;
|
||||
|
||||
|
||||
task = this_thr->th.th_current_task;
|
||||
KMP_DEBUG_ASSERT( task );
|
||||
|
||||
|
||||
taskgroup = task->td_taskgroup;
|
||||
if (taskgroup) {
|
||||
// return the current status of cancellation for the
|
||||
// return the current status of cancellation for the
|
||||
// taskgroup
|
||||
return !!taskgroup->cancel_request;
|
||||
}
|
||||
|
@ -178,11 +178,11 @@ kmp_int32 __kmpc_cancellationpoint(ident_t* loc_ref, kmp_int32 gtid, kmp_int32 c
|
|||
@param loc_ref location of the original task directive
|
||||
@param gtid Global thread ID of encountering thread
|
||||
|
||||
@return returns true if a matching cancellation request has been flagged in the RTL and the
|
||||
@return returns true if a matching cancellation request has been flagged in the RTL and the
|
||||
encountering thread has to cancel..
|
||||
|
||||
Barrier with cancellation point to send threads from the barrier to the
|
||||
end of the parallel region. Needs a special code pattern as documented
|
||||
end of the parallel region. Needs a special code pattern as documented
|
||||
in the design document for the cancellation feature.
|
||||
*/
|
||||
kmp_int32
|
||||
|
@ -208,7 +208,7 @@ __kmpc_cancel_barrier(ident_t *loc, kmp_int32 gtid) {
|
|||
__kmpc_barrier(loc, gtid);
|
||||
this_team->t.t_cancel_request = cancel_noreq;
|
||||
// the next barrier is the fork/join barrier, which
|
||||
// synchronizes the threads leaving here
|
||||
// synchronizes the threads leaving here
|
||||
break;
|
||||
case cancel_loop:
|
||||
case cancel_sections:
|
||||
|
@ -233,7 +233,7 @@ __kmpc_cancel_barrier(ident_t *loc, kmp_int32 gtid) {
|
|||
KMP_ASSERT ( 0 /* false */);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -242,21 +242,21 @@ __kmpc_cancel_barrier(ident_t *loc, kmp_int32 gtid) {
|
|||
@param loc_ref location of the original task directive
|
||||
@param gtid Global thread ID of encountering thread
|
||||
|
||||
@return returns true if a matching cancellation request has been flagged in the RTL and the
|
||||
@return returns true if a matching cancellation request has been flagged in the RTL and the
|
||||
encountering thread has to cancel..
|
||||
|
||||
Query function to query the current status of cancellation requests.
|
||||
Can be used to implement the following pattern:
|
||||
|
||||
|
||||
if (kmp_get_cancellation_status(kmp_cancel_parallel)) {
|
||||
perform_cleanup();
|
||||
#pragma omp cancellation point parallel
|
||||
#pragma omp cancellation point parallel
|
||||
}
|
||||
*/
|
||||
int __kmp_get_cancellation_status(int cancel_kind) {
|
||||
if (__kmp_omp_cancellation) {
|
||||
kmp_info_t *this_thr = __kmp_entry_thread();
|
||||
|
||||
|
||||
switch (cancel_kind) {
|
||||
case cancel_parallel:
|
||||
case cancel_loop:
|
||||
|
@ -267,7 +267,7 @@ int __kmp_get_cancellation_status(int cancel_kind) {
|
|||
}
|
||||
case cancel_taskgroup:
|
||||
{
|
||||
kmp_taskdata_t* task;
|
||||
kmp_taskdata_t* task;
|
||||
kmp_taskgroup_t* taskgroup;
|
||||
task = this_thr->th.th_current_task;
|
||||
taskgroup = task->td_taskgroup;
|
||||
|
|
|
@ -283,7 +283,7 @@ __kmpc_fork_call(ident_t *loc, kmp_int32 argc, kmpc_micro microtask, ...)
|
|||
{
|
||||
int gtid = __kmp_entry_gtid();
|
||||
|
||||
#if (KMP_STATS_ENABLED)
|
||||
#if (KMP_STATS_ENABLED)
|
||||
int inParallel = __kmpc_in_parallel(loc);
|
||||
if (inParallel)
|
||||
{
|
||||
|
@ -620,7 +620,7 @@ __kmpc_flush(ident_t *loc)
|
|||
if ( ! __kmp_cpuinfo.sse2 ) {
|
||||
// CPU cannot execute SSE2 instructions.
|
||||
} else {
|
||||
#if KMP_COMPILER_ICC
|
||||
#if KMP_COMPILER_ICC
|
||||
_mm_mfence();
|
||||
#elif KMP_COMPILER_MSVC
|
||||
MemoryBarrier();
|
||||
|
|
|
@ -42,7 +42,7 @@
|
|||
|
||||
#define __KMP_BUILD_ASSERT( expr, suffix ) typedef char __kmp_build_check_##suffix[ (expr) ? 1 : -1 ]
|
||||
#define _KMP_BUILD_ASSERT( expr, suffix ) __KMP_BUILD_ASSERT( (expr), suffix )
|
||||
#ifdef KMP_USE_ASSERT
|
||||
#ifdef KMP_USE_ASSERT
|
||||
#define KMP_BUILD_ASSERT( expr ) _KMP_BUILD_ASSERT( (expr), __LINE__ )
|
||||
#else
|
||||
#define KMP_BUILD_ASSERT( expr ) /* nothing to do */
|
||||
|
|
|
@ -23,19 +23,19 @@
|
|||
|
||||
/* * This external variable can be set by any debugger to flag to the runtime that we
|
||||
are currently executing inside a debugger. This will allow the debugger to override
|
||||
the number of threads spawned in a parallel region by using __kmp_omp_num_threads() (below).
|
||||
* When __kmp_debugging is TRUE, each team and each task gets a unique integer identifier
|
||||
the number of threads spawned in a parallel region by using __kmp_omp_num_threads() (below).
|
||||
* When __kmp_debugging is TRUE, each team and each task gets a unique integer identifier
|
||||
that can be used by debugger to conveniently identify teams and tasks.
|
||||
* The debugger has access to __kmp_omp_debug_struct_info which contains information
|
||||
about the OpenMP library's important internal structures. This access will allow the debugger
|
||||
about the OpenMP library's important internal structures. This access will allow the debugger
|
||||
to read detailed information from the typical OpenMP constructs (teams, threads, tasking, etc. )
|
||||
during a debugging session and offer detailed and useful information which the user can probe
|
||||
about the OpenMP portion of their code.
|
||||
*/
|
||||
extern int __kmp_debugging; /* Boolean whether currently debugging OpenMP RTL */
|
||||
// Return number of threads specified by the debugger for given parallel region.
|
||||
/* The ident field, which represents a source file location, is used to check if the
|
||||
debugger has changed the number of threads for the parallel region at source file
|
||||
/* The ident field, which represents a source file location, is used to check if the
|
||||
debugger has changed the number of threads for the parallel region at source file
|
||||
location ident. This way, specific parallel regions' number of threads can be changed
|
||||
at the debugger's request.
|
||||
*/
|
||||
|
|
|
@ -23,17 +23,17 @@
|
|||
------------------------------------------------------------------------------------------------
|
||||
On Windows* OS, there are two environments (at least, see below):
|
||||
|
||||
1. Environment maintained by Windows* OS on IA-32 architecture.
|
||||
1. Environment maintained by Windows* OS on IA-32 architecture.
|
||||
Accessible through GetEnvironmentVariable(),
|
||||
SetEnvironmentVariable(), and GetEnvironmentStrings().
|
||||
|
||||
2. Environment maintained by C RTL. Accessible through getenv(), putenv().
|
||||
|
||||
putenv() function updates both C and Windows* OS on IA-32 architecture. getenv() function
|
||||
search for variables in C RTL environment only. Windows* OS on IA-32 architecture functions work *only*
|
||||
putenv() function updates both C and Windows* OS on IA-32 architecture. getenv() function
|
||||
search for variables in C RTL environment only. Windows* OS on IA-32 architecture functions work *only*
|
||||
with Windows* OS on IA-32 architecture.
|
||||
|
||||
Windows* OS on IA-32 architecture maintained by OS, so there is always only one Windows* OS on
|
||||
Windows* OS on IA-32 architecture maintained by OS, so there is always only one Windows* OS on
|
||||
IA-32 architecture per process. Changes in Windows* OS on IA-32 architecture are process-visible.
|
||||
|
||||
C environment maintained by C RTL. Multiple copies of C RTL may be present in the process, and
|
||||
|
@ -42,11 +42,11 @@
|
|||
Thus, proper way to work with environment on Windows* OS is:
|
||||
|
||||
1. Set variables with putenv() function -- both C and Windows* OS on
|
||||
IA-32 architecture are being updated. Windows* OS on
|
||||
IA-32 architecture are being updated. Windows* OS on
|
||||
IA-32 architecture may be considered as primary target,
|
||||
while updating C RTL environment is a free bonus.
|
||||
|
||||
2. Get variables with GetEnvironmentVariable() -- getenv() does not
|
||||
2. Get variables with GetEnvironmentVariable() -- getenv() does not
|
||||
search Windows* OS on IA-32 architecture, and can not see variables
|
||||
set with SetEnvironmentVariable().
|
||||
|
||||
|
@ -214,7 +214,7 @@ __kmp_env_set( char const * name, char const * value, int overwrite ) {
|
|||
// Dead code. I tried to put too many variables into Linux* OS
|
||||
// environment on IA-32 architecture. When application consumes
|
||||
// more than ~2.5 GB of memory, entire system feels bad. Sometimes
|
||||
// application is killed (by OS?), sometimes system stops
|
||||
// application is killed (by OS?), sometimes system stops
|
||||
// responding... But this error message never appears. --ln
|
||||
__kmp_msg(
|
||||
kmp_ms_fatal,
|
||||
|
|
|
@ -518,7 +518,7 @@
|
|||
// All GOMP_3.0 symbols
|
||||
#define KMP_API_NAME_GOMP_TASKYIELD GOMP_taskyield
|
||||
|
||||
// All GOMP_4.0 symbols
|
||||
// All GOMP_4.0 symbols
|
||||
// TODO: As of 2013-10-14, none of the GOMP_4.0 functions are implemented in libomp
|
||||
#define KMP_API_NAME_GOMP_BARRIER_CANCEL GOMP_barrier_cancel
|
||||
#define KMP_API_NAME_GOMP_CANCEL GOMP_cancel
|
||||
|
|
|
@ -364,7 +364,7 @@ __kmp_GOMP_fork_call(ident_t *loc, int gtid, void (*unwrapped_task)(void *), mic
|
|||
__kmp_run_before_invoked_task(gtid, tid, thr, team);
|
||||
}
|
||||
|
||||
#if OMPT_SUPPORT
|
||||
#if OMPT_SUPPORT
|
||||
if (ompt_enabled) {
|
||||
#if OMPT_TRACE
|
||||
ompt_team_info_t *team_info = __ompt_get_teaminfo(0, NULL);
|
||||
|
@ -493,20 +493,20 @@ xexpand(KMP_API_NAME_GOMP_PARALLEL_END)(void)
|
|||
serialized_task_id = task_info->task_id;
|
||||
|
||||
// Record that we re-entered the runtime system in the implicit
|
||||
// task frame representing the parallel region.
|
||||
// task frame representing the parallel region.
|
||||
ompt_frame = &task_info->frame;
|
||||
ompt_frame->reenter_runtime_frame = __builtin_frame_address(0);
|
||||
|
||||
// unlink if necessary. no-op if there is not a lightweight task.
|
||||
ompt_lw_taskteam_t *lwt = __ompt_lw_taskteam_unlink(thr);
|
||||
// GOMP allocates/frees lwt since it can't be kept on the stack
|
||||
if (lwt) {
|
||||
if (lwt) {
|
||||
__kmp_free(lwt);
|
||||
|
||||
|
||||
#if OMPT_SUPPORT
|
||||
if (ompt_enabled) {
|
||||
// Since a lightweight task was destroyed, make sure that the
|
||||
// remaining deepest task knows the stack frame where the runtime
|
||||
// remaining deepest task knows the stack frame where the runtime
|
||||
// was reentered.
|
||||
ompt_frame = __ompt_get_task_frame_internal(0);
|
||||
ompt_frame->reenter_runtime_frame = __builtin_frame_address(0);
|
||||
|
@ -553,14 +553,14 @@ xexpand(KMP_API_NAME_GOMP_PARALLEL_END)(void)
|
|||
|
||||
#if OMPT_SUPPORT
|
||||
if (ompt_enabled) {
|
||||
// Record that we re-entered the runtime system in the frame that
|
||||
// Record that we re-entered the runtime system in the frame that
|
||||
// created the parallel region.
|
||||
ompt_frame->reenter_runtime_frame = __builtin_frame_address(0);
|
||||
|
||||
if (ompt_callbacks.ompt_callback(ompt_event_parallel_end)) {
|
||||
ompt_task_info_t *task_info = __ompt_get_taskinfo(0);
|
||||
ompt_callbacks.ompt_callback(ompt_event_parallel_end)(
|
||||
parallel_id, task_info->task_id,
|
||||
parallel_id, task_info->task_id,
|
||||
OMPT_INVOKER(fork_context_gnu));
|
||||
}
|
||||
|
||||
|
@ -909,20 +909,20 @@ LOOP_NEXT_ULL(xexpand(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_RUNTIME_NEXT), \
|
|||
|
||||
#else
|
||||
|
||||
#define OMPT_LOOP_PRE()
|
||||
#define OMPT_LOOP_PRE()
|
||||
|
||||
#define OMPT_LOOP_POST()
|
||||
|
||||
#endif
|
||||
|
||||
|
||||
PARALLEL_LOOP_START(xexpand(KMP_API_NAME_GOMP_PARALLEL_LOOP_STATIC_START),
|
||||
PARALLEL_LOOP_START(xexpand(KMP_API_NAME_GOMP_PARALLEL_LOOP_STATIC_START),
|
||||
kmp_sch_static, OMPT_LOOP_PRE, OMPT_LOOP_POST)
|
||||
PARALLEL_LOOP_START(xexpand(KMP_API_NAME_GOMP_PARALLEL_LOOP_DYNAMIC_START),
|
||||
PARALLEL_LOOP_START(xexpand(KMP_API_NAME_GOMP_PARALLEL_LOOP_DYNAMIC_START),
|
||||
kmp_sch_dynamic_chunked, OMPT_LOOP_PRE, OMPT_LOOP_POST)
|
||||
PARALLEL_LOOP_START(xexpand(KMP_API_NAME_GOMP_PARALLEL_LOOP_GUIDED_START),
|
||||
PARALLEL_LOOP_START(xexpand(KMP_API_NAME_GOMP_PARALLEL_LOOP_GUIDED_START),
|
||||
kmp_sch_guided_chunked, OMPT_LOOP_PRE, OMPT_LOOP_POST)
|
||||
PARALLEL_LOOP_START(xexpand(KMP_API_NAME_GOMP_PARALLEL_LOOP_RUNTIME_START),
|
||||
PARALLEL_LOOP_START(xexpand(KMP_API_NAME_GOMP_PARALLEL_LOOP_RUNTIME_START),
|
||||
kmp_sch_runtime, OMPT_LOOP_PRE, OMPT_LOOP_POST)
|
||||
|
||||
|
||||
|
|
|
@ -43,7 +43,7 @@ kmp_bootstrap_lock_t __kmp_console_lock = KMP_BOOTSTRAP_LOCK_INITIALIZER( __kmp_
|
|||
|
||||
#if KMP_OS_WINDOWS
|
||||
|
||||
# ifdef KMP_DEBUG
|
||||
# ifdef KMP_DEBUG
|
||||
/* __kmp_stdout is used only for dev build */
|
||||
static HANDLE __kmp_stdout = NULL;
|
||||
# endif
|
||||
|
@ -77,7 +77,7 @@ kmp_bootstrap_lock_t __kmp_console_lock = KMP_BOOTSTRAP_LOCK_INITIALIZER( __kmp_
|
|||
/* wait until user presses return before closing window */
|
||||
/* TODO only close if a window was opened */
|
||||
if( __kmp_console_exists ) {
|
||||
#ifdef KMP_DEBUG
|
||||
#ifdef KMP_DEBUG
|
||||
/* standard out is used only in dev build */
|
||||
__kmp_stdout = NULL;
|
||||
#endif
|
||||
|
@ -95,7 +95,7 @@ kmp_bootstrap_lock_t __kmp_console_lock = KMP_BOOTSTRAP_LOCK_INITIALIZER( __kmp_
|
|||
__kmp_acquire_bootstrap_lock( &__kmp_console_lock );
|
||||
|
||||
if( ! __kmp_console_exists ) {
|
||||
#ifdef KMP_DEBUG
|
||||
#ifdef KMP_DEBUG
|
||||
/* standard out is used only in dev build */
|
||||
HANDLE ho;
|
||||
#endif
|
||||
|
|
|
@ -3394,7 +3394,7 @@ __kmp_lookup_indirect_lock(void **user_lock, const char *func)
|
|||
if (lck == NULL) {
|
||||
KMP_FATAL(LockIsUninitialized, func);
|
||||
}
|
||||
return lck;
|
||||
return lck;
|
||||
} else {
|
||||
if (OMP_LOCK_T_SIZE < sizeof(void *)) {
|
||||
return KMP_GET_I_LOCK(KMP_EXTRACT_I_INDEX(user_lock));
|
||||
|
@ -3544,7 +3544,7 @@ __kmp_init_dynamic_user_locks()
|
|||
__kmp_i_lock_table.size = KMP_I_LOCK_CHUNK;
|
||||
__kmp_i_lock_table.table = (kmp_indirect_lock_t **)__kmp_allocate(sizeof(kmp_indirect_lock_t *));
|
||||
*(__kmp_i_lock_table.table) = (kmp_indirect_lock_t *)
|
||||
__kmp_allocate(KMP_I_LOCK_CHUNK*sizeof(kmp_indirect_lock_t));
|
||||
__kmp_allocate(KMP_I_LOCK_CHUNK*sizeof(kmp_indirect_lock_t));
|
||||
__kmp_i_lock_table.next = 0;
|
||||
|
||||
// Indirect lock size
|
||||
|
|
|
@ -1200,7 +1200,7 @@ extern kmp_indirect_lock_t * __kmp_allocate_indirect_lock(void **, kmp_int32, km
|
|||
// Cleans up global states and data structures for managing dynamic user locks.
|
||||
extern void __kmp_cleanup_indirect_user_locks();
|
||||
|
||||
// Default user lock sequence when not using hinted locks.
|
||||
// Default user lock sequence when not using hinted locks.
|
||||
extern kmp_dyna_lockseq_t __kmp_user_lock_seq;
|
||||
|
||||
// Jump table for "set lock location", available only for indirect locks.
|
||||
|
|
|
@ -5488,7 +5488,7 @@ __kmp_launch_thread( kmp_info_t *this_thr )
|
|||
task_info->frame.exit_runtime_frame = 0;
|
||||
task_info->task_id = 0;
|
||||
}
|
||||
#endif
|
||||
#endif
|
||||
}
|
||||
}
|
||||
TCR_SYNC_PTR((intptr_t)__kmp_global.g.g_done);
|
||||
|
@ -6842,14 +6842,14 @@ __kmp_teams_master( int gtid )
|
|||
#if INCLUDE_SSC_MARKS
|
||||
SSC_MARK_JOINING();
|
||||
#endif
|
||||
|
||||
|
||||
// AC: last parameter "1" eliminates join barrier which won't work because
|
||||
// worker threads are in a fork barrier waiting for more parallel regions
|
||||
__kmp_join_call( loc, gtid
|
||||
#if OMPT_SUPPORT
|
||||
, fork_context_intel
|
||||
#endif
|
||||
, 1 );
|
||||
, 1 );
|
||||
}
|
||||
|
||||
int
|
||||
|
|
|
@ -50,7 +50,7 @@
|
|||
# define KMP_MEMCPY_S(dst, bsz, src, cnt) memcpy(dst, src, cnt)
|
||||
# define KMP_SNPRINTF snprintf
|
||||
# define KMP_SSCANF sscanf
|
||||
# define KMP_STRCPY_S(dst, bsz, src) strcpy(dst, src)
|
||||
# define KMP_STRCPY_S(dst, bsz, src) strcpy(dst, src)
|
||||
# define KMP_STRNCPY_S(dst, bsz, src, cnt) strncpy(dst, src, cnt)
|
||||
# define KMP_VSNPRINTF vsnprintf
|
||||
# define KMP_STRNCPY strncpy
|
||||
|
|
|
@ -97,8 +97,8 @@ __kmp_for_static_init(
|
|||
register kmp_info_t *th = __kmp_threads[ gtid ];
|
||||
|
||||
#if OMPT_SUPPORT && OMPT_TRACE
|
||||
ompt_team_info_t *team_info = NULL;
|
||||
ompt_task_info_t *task_info = NULL;
|
||||
ompt_team_info_t *team_info = NULL;
|
||||
ompt_task_info_t *task_info = NULL;
|
||||
|
||||
if (ompt_enabled) {
|
||||
// Only fully initialize variables needed by OMPT if OMPT is enabled.
|
||||
|
|
|
@ -239,7 +239,7 @@ __kmp_process_deps ( kmp_int32 gtid, kmp_depnode_t *node, kmp_dephash_t *hash,
|
|||
kmp_task_t *task )
|
||||
{
|
||||
KA_TRACE(30, ("__kmp_process_deps<%d>: T#%d processing %d depencies : dep_barrier = %d\n", filter, gtid, ndeps, dep_barrier ) );
|
||||
|
||||
|
||||
kmp_info_t *thread = __kmp_threads[ gtid ];
|
||||
kmp_int32 npredecessors=0;
|
||||
for ( kmp_int32 i = 0; i < ndeps ; i++ ) {
|
||||
|
@ -276,9 +276,9 @@ __kmp_process_deps ( kmp_int32 gtid, kmp_depnode_t *node, kmp_dephash_t *hash,
|
|||
if ( last_out->dn.task ) {
|
||||
__kmp_track_dependence(last_out,node,task);
|
||||
last_out->dn.successors = __kmp_add_node(thread, last_out->dn.successors, node);
|
||||
KA_TRACE(40,("__kmp_process_deps<%d>: T#%d adding dependence from %p to %p\n",
|
||||
KA_TRACE(40,("__kmp_process_deps<%d>: T#%d adding dependence from %p to %p\n",
|
||||
filter,gtid, KMP_TASK_TO_TASKDATA(last_out->dn.task), KMP_TASK_TO_TASKDATA(node->dn.task)));
|
||||
|
||||
|
||||
npredecessors++;
|
||||
}
|
||||
KMP_RELEASE_DEPNODE(gtid,last_out);
|
||||
|
@ -373,7 +373,7 @@ __kmp_release_deps ( kmp_int32 gtid, kmp_taskdata_t *task )
|
|||
if ( !node ) return;
|
||||
|
||||
KA_TRACE(20, ("__kmp_realease_deps: T#%d notifying succesors of task %p.\n", gtid, task ) );
|
||||
|
||||
|
||||
KMP_ACQUIRE_DEPNODE(gtid,node);
|
||||
node->dn.task = NULL; // mark this task as finished, so no new dependencies are generated
|
||||
KMP_RELEASE_DEPNODE(gtid,node);
|
||||
|
@ -386,7 +386,7 @@ __kmp_release_deps ( kmp_int32 gtid, kmp_taskdata_t *task )
|
|||
// successor task can be NULL for wait_depends or because deps are still being processed
|
||||
if ( npredecessors == 0 ) {
|
||||
KMP_MB();
|
||||
if ( successor->dn.task ) {
|
||||
if ( successor->dn.task ) {
|
||||
KA_TRACE(20, ("__kmp_realease_deps: T#%d successor %p of %p scheduled for execution.\n", gtid, successor->dn.task, task ) );
|
||||
__kmp_omp_task(gtid,successor->dn.task,false);
|
||||
}
|
||||
|
|
|
@ -1057,9 +1057,9 @@ __kmp_task_alloc( ident_t *loc_ref, kmp_int32 gtid, kmp_tasking_flags_t *flags,
|
|||
|
||||
// Only need to keep track of child task counts if team parallel and tasking not serialized or if it is a proxy task
|
||||
#if OMP_41_ENABLED
|
||||
if ( flags->proxy == TASK_PROXY || !( taskdata -> td_flags.team_serial || taskdata -> td_flags.tasking_ser ) )
|
||||
if ( flags->proxy == TASK_PROXY || !( taskdata -> td_flags.team_serial || taskdata -> td_flags.tasking_ser ) )
|
||||
#else
|
||||
if ( !( taskdata -> td_flags.team_serial || taskdata -> td_flags.tasking_ser ) )
|
||||
if ( !( taskdata -> td_flags.team_serial || taskdata -> td_flags.tasking_ser ) )
|
||||
#endif
|
||||
{
|
||||
KMP_TEST_THEN_INC32( (kmp_int32 *)(& parent_task->td_incomplete_child_tasks) );
|
||||
|
@ -1406,12 +1406,12 @@ __kmpc_omp_taskwait( ident_t *loc_ref, kmp_int32 gtid )
|
|||
#if OMPT_SUPPORT && OMPT_TRACE
|
||||
ompt_task_id_t my_task_id;
|
||||
ompt_parallel_id_t my_parallel_id;
|
||||
|
||||
|
||||
if (ompt_enabled) {
|
||||
kmp_team_t *team = thread->th.th_team;
|
||||
my_task_id = taskdata->ompt_task_info.task_id;
|
||||
my_parallel_id = team->t.ompt_team_info.parallel_id;
|
||||
|
||||
|
||||
taskdata->ompt_task_info.frame.reenter_runtime_frame = __builtin_frame_address(0);
|
||||
if (ompt_callbacks.ompt_callback(ompt_event_taskwait_begin)) {
|
||||
ompt_callbacks.ompt_callback(ompt_event_taskwait_begin)(
|
||||
|
@ -1434,9 +1434,9 @@ __kmpc_omp_taskwait( ident_t *loc_ref, kmp_int32 gtid )
|
|||
#endif /* USE_ITT_BUILD */
|
||||
|
||||
#if OMP_41_ENABLED
|
||||
if ( ! taskdata->td_flags.team_serial || (thread->th.th_task_team != NULL && thread->th.th_task_team->tt.tt_found_proxy_tasks) )
|
||||
if ( ! taskdata->td_flags.team_serial || (thread->th.th_task_team != NULL && thread->th.th_task_team->tt.tt_found_proxy_tasks) )
|
||||
#else
|
||||
if ( ! taskdata->td_flags.team_serial )
|
||||
if ( ! taskdata->td_flags.team_serial )
|
||||
#endif
|
||||
{
|
||||
// GEH: if team serialized, avoid reading the volatile variable below.
|
||||
|
@ -1575,9 +1575,9 @@ __kmpc_end_taskgroup( ident_t* loc, int gtid )
|
|||
#endif /* USE_ITT_BUILD */
|
||||
|
||||
#if OMP_41_ENABLED
|
||||
if ( ! taskdata->td_flags.team_serial || (thread->th.th_task_team != NULL && thread->th.th_task_team->tt.tt_found_proxy_tasks) )
|
||||
if ( ! taskdata->td_flags.team_serial || (thread->th.th_task_team != NULL && thread->th.th_task_team->tt.tt_found_proxy_tasks) )
|
||||
#else
|
||||
if ( ! taskdata->td_flags.team_serial )
|
||||
if ( ! taskdata->td_flags.team_serial )
|
||||
#endif
|
||||
{
|
||||
kmp_flag_32 flag(&(taskgroup->count), 0U);
|
||||
|
@ -1800,7 +1800,7 @@ __kmp_steal_task( kmp_info_t *victim, kmp_int32 gtid, kmp_task_team_t *task_team
|
|||
// spinner == NULL means only execute a single task and return.
|
||||
// checker is the value to check to terminate the spin.
|
||||
template <class C>
|
||||
static inline int __kmp_execute_tasks_template(kmp_info_t *thread, kmp_int32 gtid, C *flag, int final_spin,
|
||||
static inline int __kmp_execute_tasks_template(kmp_info_t *thread, kmp_int32 gtid, C *flag, int final_spin,
|
||||
int *thread_finished
|
||||
USE_ITT_BUILD_ARG(void * itt_sync_obj), kmp_int32 is_constrained)
|
||||
{
|
||||
|
@ -1867,9 +1867,9 @@ static inline int __kmp_execute_tasks_template(kmp_info_t *thread, kmp_int32 gti
|
|||
// of the barrier, check and see if the termination condition is satisfied.
|
||||
#if OMP_41_ENABLED
|
||||
// The work queue may be empty but there might be proxy tasks still executing
|
||||
if (final_spin && TCR_4(current_task -> td_incomplete_child_tasks) == 0)
|
||||
if (final_spin && TCR_4(current_task -> td_incomplete_child_tasks) == 0)
|
||||
#else
|
||||
if (final_spin)
|
||||
if (final_spin)
|
||||
#endif
|
||||
{
|
||||
// First, decrement the #unfinished threads, if that has not already
|
||||
|
@ -1952,9 +1952,9 @@ static inline int __kmp_execute_tasks_template(kmp_info_t *thread, kmp_int32 gti
|
|||
// of the barrier, check and see if the termination condition is satisfied.
|
||||
#if OMP_41_ENABLED
|
||||
// The work queue may be empty but there might be proxy tasks still executing
|
||||
if (final_spin && TCR_4(current_task -> td_incomplete_child_tasks) == 0)
|
||||
if (final_spin && TCR_4(current_task -> td_incomplete_child_tasks) == 0)
|
||||
#else
|
||||
if (final_spin)
|
||||
if (final_spin)
|
||||
#endif
|
||||
{
|
||||
// First, decrement the #unfinished threads, if that has not already
|
||||
|
@ -2069,9 +2069,9 @@ static inline int __kmp_execute_tasks_template(kmp_info_t *thread, kmp_int32 gti
|
|||
// termination condition before doing that.
|
||||
#if OMP_41_ENABLED
|
||||
// The work queue may be empty but there might be proxy tasks still executing
|
||||
if (final_spin && TCR_4(current_task -> td_incomplete_child_tasks) == 0)
|
||||
if (final_spin && TCR_4(current_task -> td_incomplete_child_tasks) == 0)
|
||||
#else
|
||||
if (final_spin)
|
||||
if (final_spin)
|
||||
#endif
|
||||
{
|
||||
// First, decrement the #unfinished threads, if that has not already
|
||||
|
@ -2619,17 +2619,17 @@ __kmp_task_team_setup( kmp_info_t *this_thr, kmp_team_t *team, int always )
|
|||
|
||||
// If this task_team hasn't been created yet, allocate it. It will be used in the region after the next.
|
||||
// If it exists, it is the current task team and shouldn't be touched yet as it may still be in use.
|
||||
if (team->t.t_task_team[this_thr->th.th_task_state] == NULL && (always || team->t.t_nproc > 1) ) {
|
||||
if (team->t.t_task_team[this_thr->th.th_task_state] == NULL && (always || team->t.t_nproc > 1) ) {
|
||||
team->t.t_task_team[this_thr->th.th_task_state] = __kmp_allocate_task_team( this_thr, team );
|
||||
KA_TRACE(20, ("__kmp_task_team_setup: Master T#%d created new task_team %p for team %d at parity=%d\n",
|
||||
__kmp_gtid_from_thread(this_thr), team->t.t_task_team[this_thr->th.th_task_state],
|
||||
((team != NULL) ? team->t.t_id : -1), this_thr->th.th_task_state));
|
||||
}
|
||||
|
||||
// After threads exit the release, they will call sync, and then point to this other task_team; make sure it is
|
||||
// After threads exit the release, they will call sync, and then point to this other task_team; make sure it is
|
||||
// allocated and properly initialized. As threads spin in the barrier release phase, they will continue to use the
|
||||
// previous task_team struct(above), until they receive the signal to stop checking for tasks (they can't safely
|
||||
// reference the kmp_team_t struct, which could be reallocated by the master thread). No task teams are formed for
|
||||
// reference the kmp_team_t struct, which could be reallocated by the master thread). No task teams are formed for
|
||||
// serialized teams.
|
||||
if (team->t.t_nproc > 1) {
|
||||
int other_team = 1 - this_thr->th.th_task_state;
|
||||
|
|
|
@ -519,11 +519,11 @@ kmp_threadprivate_insert( int gtid, void *pc_addr, void *data_addr, size_t pc_si
|
|||
/*!
|
||||
@ingroup THREADPRIVATE
|
||||
|
||||
@param loc source location information
|
||||
@param data pointer to data being privatized
|
||||
@param ctor pointer to constructor function for data
|
||||
@param cctor pointer to copy constructor function for data
|
||||
@param dtor pointer to destructor function for data
|
||||
@param loc source location information
|
||||
@param data pointer to data being privatized
|
||||
@param ctor pointer to constructor function for data
|
||||
@param cctor pointer to copy constructor function for data
|
||||
@param dtor pointer to destructor function for data
|
||||
|
||||
Register constructors and destructors for thread private data.
|
||||
This function is called when executing in parallel, when we know the thread id.
|
||||
|
@ -617,14 +617,14 @@ __kmpc_threadprivate(ident_t *loc, kmp_int32 global_tid, void *data, size_t size
|
|||
|
||||
/*!
|
||||
@ingroup THREADPRIVATE
|
||||
@param loc source location information
|
||||
@param global_tid global thread number
|
||||
@param data pointer to data to privatize
|
||||
@param size size of data to privatize
|
||||
@param cache pointer to cache
|
||||
@return pointer to private storage
|
||||
@param loc source location information
|
||||
@param global_tid global thread number
|
||||
@param data pointer to data to privatize
|
||||
@param size size of data to privatize
|
||||
@param cache pointer to cache
|
||||
@return pointer to private storage
|
||||
|
||||
Allocate private storage for threadprivate data.
|
||||
Allocate private storage for threadprivate data.
|
||||
*/
|
||||
void *
|
||||
__kmpc_threadprivate_cached(
|
||||
|
@ -653,7 +653,7 @@ __kmpc_threadprivate_cached(
|
|||
// No need to zero the allocated memory; __kmp_allocate does that.
|
||||
KC_TRACE( 50, ("__kmpc_threadprivate_cached: T#%d allocated cache at address %p\n",
|
||||
global_tid, my_cache ) );
|
||||
|
||||
|
||||
/* TODO: free all this memory in __kmp_common_destroy using __kmp_threadpriv_cache_list */
|
||||
/* Add address of mycache to linked list for cleanup later */
|
||||
kmp_cached_addr_t *tp_cache_addr;
|
||||
|
@ -687,11 +687,11 @@ __kmpc_threadprivate_cached(
|
|||
|
||||
/*!
|
||||
@ingroup THREADPRIVATE
|
||||
@param loc source location information
|
||||
@param data pointer to data being privatized
|
||||
@param ctor pointer to constructor function for data
|
||||
@param cctor pointer to copy constructor function for data
|
||||
@param dtor pointer to destructor function for data
|
||||
@param loc source location information
|
||||
@param data pointer to data being privatized
|
||||
@param ctor pointer to constructor function for data
|
||||
@param cctor pointer to copy constructor function for data
|
||||
@param dtor pointer to destructor function for data
|
||||
@param vector_length length of the vector (bytes or elements?)
|
||||
Register vector constructors and destructors for thread private data.
|
||||
*/
|
||||
|
|
|
@ -33,7 +33,7 @@ to build higher level operations such as barriers and fork/join.
|
|||
@{
|
||||
*/
|
||||
|
||||
/*!
|
||||
/*!
|
||||
* The flag_type describes the storage used for the flag.
|
||||
*/
|
||||
enum flag_type {
|
||||
|
@ -365,16 +365,16 @@ class kmp_basic_flag : public kmp_flag<FlagType> {
|
|||
public:
|
||||
kmp_basic_flag(volatile FlagType *p) : kmp_flag<FlagType>(p, traits_type::t), num_waiting_threads(0) {}
|
||||
kmp_basic_flag(volatile FlagType *p, kmp_info_t *thr) : kmp_flag<FlagType>(p, traits_type::t), num_waiting_threads(1) {
|
||||
waiting_threads[0] = thr;
|
||||
waiting_threads[0] = thr;
|
||||
}
|
||||
kmp_basic_flag(volatile FlagType *p, FlagType c) : kmp_flag<FlagType>(p, traits_type::t), checker(c), num_waiting_threads(0) {}
|
||||
/*!
|
||||
* param i in index into waiting_threads
|
||||
* @result the thread that is waiting at index i
|
||||
*/
|
||||
kmp_info_t * get_waiter(kmp_uint32 i) {
|
||||
kmp_info_t * get_waiter(kmp_uint32 i) {
|
||||
KMP_DEBUG_ASSERT(i<num_waiting_threads);
|
||||
return waiting_threads[i];
|
||||
return waiting_threads[i];
|
||||
}
|
||||
/*!
|
||||
* @result num_waiting_threads
|
||||
|
@ -385,8 +385,8 @@ class kmp_basic_flag : public kmp_flag<FlagType> {
|
|||
*
|
||||
* Insert a waiting thread at index 0.
|
||||
*/
|
||||
void set_waiter(kmp_info_t *thr) {
|
||||
waiting_threads[0] = thr;
|
||||
void set_waiter(kmp_info_t *thr) {
|
||||
waiting_threads[0] = thr;
|
||||
num_waiting_threads = 1;
|
||||
}
|
||||
/*!
|
||||
|
@ -417,22 +417,22 @@ class kmp_basic_flag : public kmp_flag<FlagType> {
|
|||
* @result Actual flag value before sleep bit(s) set.
|
||||
* Notes that there is at least one thread sleeping on the flag by setting sleep bit(s).
|
||||
*/
|
||||
FlagType set_sleeping() {
|
||||
FlagType set_sleeping() {
|
||||
return traits_type::test_then_or((volatile FlagType *)this->get(), KMP_BARRIER_SLEEP_STATE);
|
||||
}
|
||||
/*!
|
||||
* @result Actual flag value before sleep bit(s) cleared.
|
||||
* Notes that there are no longer threads sleeping on the flag by clearing sleep bit(s).
|
||||
*/
|
||||
FlagType unset_sleeping() {
|
||||
FlagType unset_sleeping() {
|
||||
return traits_type::test_then_and((volatile FlagType *)this->get(), ~KMP_BARRIER_SLEEP_STATE);
|
||||
}
|
||||
/*!
|
||||
/*!
|
||||
* @param old_loc in old value of flag
|
||||
* Test whether there are threads sleeping on the flag's old value in old_loc.
|
||||
*/
|
||||
bool is_sleeping_val(FlagType old_loc) { return old_loc & KMP_BARRIER_SLEEP_STATE; }
|
||||
/*!
|
||||
/*!
|
||||
* Test whether there are threads sleeping on the flag.
|
||||
*/
|
||||
bool is_sleeping() { return is_sleeping_val(*(this->get())); }
|
||||
|
|
|
@ -54,7 +54,7 @@ enum tool_setting_e {
|
|||
|
||||
|
||||
typedef void (*ompt_initialize_t) (
|
||||
ompt_function_lookup_t ompt_fn_lookup,
|
||||
ompt_function_lookup_t ompt_fn_lookup,
|
||||
const char *version,
|
||||
unsigned int ompt_version
|
||||
);
|
||||
|
@ -101,7 +101,7 @@ OMPT_API_ROUTINE ompt_thread_id_t ompt_get_thread_id(void);
|
|||
* found, ompt_tool's return value is used to initialize the tool. Otherwise,
|
||||
* NULL is returned and OMPT won't be enabled */
|
||||
#if OMPT_HAVE_WEAK_ATTRIBUTE
|
||||
_OMP_EXTERN
|
||||
_OMP_EXTERN
|
||||
__attribute__ (( weak ))
|
||||
ompt_initialize_t ompt_tool()
|
||||
{
|
||||
|
@ -247,7 +247,7 @@ void ompt_post_init()
|
|||
// Initialize the tool if so indicated.
|
||||
//--------------------------------------------------
|
||||
if (ompt_enabled) {
|
||||
ompt_initialize_fn(ompt_fn_lookup, ompt_get_runtime_version(),
|
||||
ompt_initialize_fn(ompt_fn_lookup, ompt_get_runtime_version(),
|
||||
OMPT_VERSION);
|
||||
|
||||
ompt_thread_t *root_thread = ompt_get_thread();
|
||||
|
@ -432,8 +432,8 @@ OMPT_API_ROUTINE void *ompt_get_task_function(int depth)
|
|||
****************************************************************************/
|
||||
|
||||
// Don't define this as static. The loader may choose to eliminate the symbol
|
||||
// even though it is needed by tools.
|
||||
#define OMPT_API_PLACEHOLDER
|
||||
// even though it is needed by tools.
|
||||
#define OMPT_API_PLACEHOLDER
|
||||
|
||||
// Ensure that placeholders don't have mangled names in the symbol table.
|
||||
#ifdef __cplusplus
|
||||
|
@ -441,7 +441,7 @@ extern "C" {
|
|||
#endif
|
||||
|
||||
|
||||
OMPT_API_PLACEHOLDER void ompt_idle(void)
|
||||
OMPT_API_PLACEHOLDER void ompt_idle(void)
|
||||
{
|
||||
// This function is a placeholder used to represent the calling context of
|
||||
// idle OpenMP worker threads. It is not meant to be invoked.
|
||||
|
|
|
@ -74,10 +74,10 @@ ompt_get_thread()
|
|||
}
|
||||
|
||||
|
||||
inline void
|
||||
inline void
|
||||
ompt_set_thread_state(ompt_thread_t *thread, ompt_state_t state)
|
||||
{
|
||||
thread->th.ompt_thread_info.state = state;
|
||||
thread->th.ompt_thread_info.state = state;
|
||||
}
|
||||
|
||||
|
||||
|
|
|
@ -140,7 +140,7 @@ The same ID may not be reused for different instances, unless a previous
|
|||
# if ITT_PLATFORM==ITT_PLATFORM_WIN
|
||||
# define CDECL __cdecl
|
||||
# else /* ITT_PLATFORM==ITT_PLATFORM_WIN */
|
||||
# if defined _M_IX86 || defined __i386__
|
||||
# if defined _M_IX86 || defined __i386__
|
||||
# define CDECL __attribute__ ((cdecl))
|
||||
# else /* _M_IX86 || __i386__ */
|
||||
# define CDECL /* actual only on x86 platform */
|
||||
|
@ -153,7 +153,7 @@ The same ID may not be reused for different instances, unless a previous
|
|||
# define STDCALL __stdcall
|
||||
# else /* ITT_PLATFORM==ITT_PLATFORM_WIN */
|
||||
# if defined _M_IX86 || defined __i386__
|
||||
# define STDCALL __attribute__ ((stdcall))
|
||||
# define STDCALL __attribute__ ((stdcall))
|
||||
# else /* _M_IX86 || __i386__ */
|
||||
# define STDCALL /* supported only on x86 platform */
|
||||
# endif /* _M_IX86 || __i386__ */
|
||||
|
@ -416,19 +416,19 @@ ITT_STUBV(ITTAPI, void, thread_ignore, (void))
|
|||
*********************************************************************/
|
||||
/** @{ */
|
||||
/**
|
||||
* @hideinitializer
|
||||
* @hideinitializer
|
||||
* @brief possible value for suppression mask
|
||||
*/
|
||||
#define __itt_suppress_all_errors 0x7fffffff
|
||||
|
||||
/**
|
||||
* @hideinitializer
|
||||
* @hideinitializer
|
||||
* @brief possible value for suppression mask (suppresses errors from threading analysis)
|
||||
*/
|
||||
#define __itt_suppress_threading_errors 0x000000ff
|
||||
|
||||
/**
|
||||
* @hideinitializer
|
||||
* @hideinitializer
|
||||
* @brief possible value for suppression mask (suppresses errors from memory analysis)
|
||||
*/
|
||||
#define __itt_suppress_memory_errors 0x0000ff00
|
||||
|
@ -454,7 +454,7 @@ ITT_STUBV(ITTAPI, void, suppress_push, (unsigned int mask))
|
|||
/** @endcond */
|
||||
|
||||
/**
|
||||
* @brief Undo the effects of the matching call to __itt_suppress_push
|
||||
* @brief Undo the effects of the matching call to __itt_suppress_push
|
||||
*/
|
||||
void ITTAPI __itt_suppress_pop(void);
|
||||
|
||||
|
@ -1584,13 +1584,13 @@ ITT_STUBV(ITTAPI, void, heap_record_memory_growth_end, (void))
|
|||
* @brief Specify the type of heap detection/reporting to modify.
|
||||
*/
|
||||
/**
|
||||
* @hideinitializer
|
||||
* @hideinitializer
|
||||
* @brief Report on memory leaks.
|
||||
*/
|
||||
#define __itt_heap_leaks 0x00000001
|
||||
|
||||
/**
|
||||
* @hideinitializer
|
||||
* @hideinitializer
|
||||
* @brief Report on memory growth.
|
||||
*/
|
||||
#define __itt_heap_growth 0x00000002
|
||||
|
@ -1667,7 +1667,7 @@ typedef struct ___itt_domain
|
|||
* @ingroup domains
|
||||
* @brief Create a domain.
|
||||
* Create domain using some domain name: the URI naming style is recommended.
|
||||
* Because the set of domains is expected to be static over the application's
|
||||
* Because the set of domains is expected to be static over the application's
|
||||
* execution time, there is no mechanism to destroy a domain.
|
||||
* Any domain can be accessed by any thread in the process, regardless of
|
||||
* which thread created the domain. This call is thread-safe.
|
||||
|
@ -1801,7 +1801,7 @@ ITT_STUBV(ITTAPI, void, id_create, (const __itt_domain *domain, __itt_id id))
|
|||
* @brief Destroy an instance of identifier.
|
||||
* This ends the lifetime of the current instance of the given ID value in the trace.
|
||||
* Any relationships that are established after this lifetime ends are invalid.
|
||||
* This call must be performed before the given ID value can be reused for a different
|
||||
* This call must be performed before the given ID value can be reused for a different
|
||||
* named entity instance.
|
||||
* @param[in] domain The domain controlling the execution of this call.
|
||||
* @param[in] id The ID to destroy.
|
||||
|
@ -2360,7 +2360,7 @@ ITT_STUBV(ITTAPI, void, metadata_add, (const __itt_domain *domain, __itt_id id,
|
|||
* @param[in] id The identifier of the instance to which the metadata is to be added, or __itt_null to add to the current task
|
||||
* @param[in] key The name of the metadata
|
||||
* @param[in] data The metadata itself
|
||||
* @param[in] length The number of characters in the string, or -1 if the length is unknown but the string is null-terminated
|
||||
* @param[in] length The number of characters in the string, or -1 if the length is unknown but the string is null-terminated
|
||||
*/
|
||||
#if ITT_PLATFORM==ITT_PLATFORM_WIN
|
||||
void ITTAPI __itt_metadata_str_addA(const __itt_domain *domain, __itt_id id, __itt_string_handle *key, const char *data, size_t length);
|
||||
|
@ -2396,9 +2396,9 @@ ITT_STUBV(ITTAPI, void, metadata_str_add, (const __itt_domain *domain, __itt_id
|
|||
#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
|
||||
#else /* INTEL_NO_ITTNOTIFY_API */
|
||||
#if ITT_PLATFORM==ITT_PLATFORM_WIN
|
||||
#define __itt_metadata_str_addA(d,x,y,z,a)
|
||||
#define __itt_metadata_str_addA(d,x,y,z,a)
|
||||
#define __itt_metadata_str_addA_ptr 0
|
||||
#define __itt_metadata_str_addW(d,x,y,z,a)
|
||||
#define __itt_metadata_str_addW(d,x,y,z,a)
|
||||
#define __itt_metadata_str_addW_ptr 0
|
||||
#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */
|
||||
#define __itt_metadata_str_add(d,x,y,z,a)
|
||||
|
@ -2422,7 +2422,7 @@ ITT_STUBV(ITTAPI, void, metadata_str_add, (const __itt_domain *domain, __itt_id
|
|||
* @param[in] scope The scope of the instance to which the metadata is to be added
|
||||
|
||||
* @param[in] id The identifier of the instance to which the metadata is to be added, or __itt_null to add to the current task
|
||||
|
||||
|
||||
* @param[in] key The name of the metadata
|
||||
* @param[in] type The type of the metadata
|
||||
* @param[in] count The number of elements of the given type. If count == 0, no metadata will be added.
|
||||
|
@ -2455,7 +2455,7 @@ ITT_STUBV(ITTAPI, void, metadata_add_with_scope, (const __itt_domain *domain, __
|
|||
|
||||
* @param[in] key The name of the metadata
|
||||
* @param[in] data The metadata itself
|
||||
* @param[in] length The number of characters in the string, or -1 if the length is unknown but the string is null-terminated
|
||||
* @param[in] length The number of characters in the string, or -1 if the length is unknown but the string is null-terminated
|
||||
*/
|
||||
#if ITT_PLATFORM==ITT_PLATFORM_WIN
|
||||
void ITTAPI __itt_metadata_str_add_with_scopeA(const __itt_domain *domain, __itt_scope scope, __itt_string_handle *key, const char *data, size_t length);
|
||||
|
@ -2491,9 +2491,9 @@ ITT_STUBV(ITTAPI, void, metadata_str_add_with_scope, (const __itt_domain *domain
|
|||
#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
|
||||
#else /* INTEL_NO_ITTNOTIFY_API */
|
||||
#if ITT_PLATFORM==ITT_PLATFORM_WIN
|
||||
#define __itt_metadata_str_add_with_scopeA(d,x,y,z,a)
|
||||
#define __itt_metadata_str_add_with_scopeA(d,x,y,z,a)
|
||||
#define __itt_metadata_str_add_with_scopeA_ptr 0
|
||||
#define __itt_metadata_str_add_with_scopeW(d,x,y,z,a)
|
||||
#define __itt_metadata_str_add_with_scopeW(d,x,y,z,a)
|
||||
#define __itt_metadata_str_add_with_scopeW_ptr 0
|
||||
#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */
|
||||
#define __itt_metadata_str_add_with_scope(d,x,y,z,a)
|
||||
|
@ -3079,9 +3079,9 @@ ITT_STUB(LIBITTAPI, int, event_end, (__itt_event event))
|
|||
|
||||
/**
|
||||
* @enum __itt_av_data_type
|
||||
* @brief Defines types of arrays data (for C/C++ intrinsic types)
|
||||
* @brief Defines types of arrays data (for C/C++ intrinsic types)
|
||||
*/
|
||||
typedef enum
|
||||
typedef enum
|
||||
{
|
||||
__itt_e_first = 0,
|
||||
__itt_e_char = 0, /* 1-byte integer */
|
||||
|
@ -3101,8 +3101,8 @@ typedef enum
|
|||
* @brief Save an array data to a file.
|
||||
* Output format is defined by the file extension. The csv and bmp formats are supported (bmp - for 2-dimensional array only).
|
||||
* @param[in] data - pointer to the array data
|
||||
* @param[in] rank - the rank of the array
|
||||
* @param[in] dimensions - pointer to an array of integers, which specifies the array dimensions.
|
||||
* @param[in] rank - the rank of the array
|
||||
* @param[in] dimensions - pointer to an array of integers, which specifies the array dimensions.
|
||||
* The size of dimensions must be equal to the rank
|
||||
* @param[in] type - the type of the array, specified as one of the __itt_av_data_type values (for intrinsic types)
|
||||
* @param[in] filePath - the file path; the output format is defined by the file extension
|
||||
|
|
|
@ -74,7 +74,7 @@
|
|||
# if ITT_PLATFORM==ITT_PLATFORM_WIN
|
||||
# define CDECL __cdecl
|
||||
# else /* ITT_PLATFORM==ITT_PLATFORM_WIN */
|
||||
# if defined _M_IX86 || defined __i386__
|
||||
# if defined _M_IX86 || defined __i386__
|
||||
# define CDECL __attribute__ ((cdecl))
|
||||
# else /* _M_IX86 || __i386__ */
|
||||
# define CDECL /* actual only on x86 platform */
|
||||
|
@ -87,7 +87,7 @@
|
|||
# define STDCALL __stdcall
|
||||
# else /* ITT_PLATFORM==ITT_PLATFORM_WIN */
|
||||
# if defined _M_IX86 || defined __i386__
|
||||
# define STDCALL __attribute__ ((stdcall))
|
||||
# define STDCALL __attribute__ ((stdcall))
|
||||
# else /* _M_IX86 || __i386__ */
|
||||
# define STDCALL /* supported only on x86 platform */
|
||||
# endif /* _M_IX86 || __i386__ */
|
||||
|
|
|
@ -23,7 +23,7 @@
|
|||
#include <stdarg.h>
|
||||
#include <string.h>
|
||||
|
||||
#define INTEL_NO_MACRO_BODY
|
||||
#define INTEL_NO_MACRO_BODY
|
||||
#define INTEL_ITTNOTIFY_API_PRIVATE
|
||||
#include "ittnotify.h"
|
||||
#include "legacy/ittnotify.h"
|
||||
|
|
|
@ -79,7 +79,7 @@
|
|||
# if ITT_PLATFORM==ITT_PLATFORM_WIN
|
||||
# define CDECL __cdecl
|
||||
# else /* ITT_PLATFORM==ITT_PLATFORM_WIN */
|
||||
# if defined _M_IX86 || defined __i386__
|
||||
# if defined _M_IX86 || defined __i386__
|
||||
# define CDECL __attribute__ ((cdecl))
|
||||
# else /* _M_IX86 || __i386__ */
|
||||
# define CDECL /* actual only on x86 platform */
|
||||
|
@ -92,7 +92,7 @@
|
|||
# define STDCALL __stdcall
|
||||
# else /* ITT_PLATFORM==ITT_PLATFORM_WIN */
|
||||
# if defined _M_IX86 || defined __i386__
|
||||
# define STDCALL __attribute__ ((stdcall))
|
||||
# define STDCALL __attribute__ ((stdcall))
|
||||
# else /* _M_IX86 || __i386__ */
|
||||
# define STDCALL /* supported only on x86 platform */
|
||||
# endif /* _M_IX86 || __i386__ */
|
||||
|
|
|
@ -1436,7 +1436,7 @@ KMP_LABEL(kmp_1_exit):
|
|||
|
||||
DEBUG_INFO __kmp_bsr32
|
||||
|
||||
|
||||
|
||||
// -----------------------------------------------------------------------
|
||||
#endif /* KMP_ARCH_X86_64 */
|
||||
|
||||
|
|
|
@ -1006,10 +1006,10 @@ __kmp_create_worker( int gtid, kmp_info_t *th, size_t stack_size )
|
|||
__kmp_msg(kmp_ms_fatal, KMP_MSG( CantSetWorkerState ), KMP_ERR( status ), __kmp_msg_null);
|
||||
}; // if
|
||||
|
||||
/* Set stack size for this thread now.
|
||||
/* Set stack size for this thread now.
|
||||
* The multiple of 2 is there because on some machines, requesting an unusual stacksize
|
||||
* causes the thread to have an offset before the dummy alloca() takes place to create the
|
||||
* offset. Since we want the user to have a sufficient stacksize AND support a stack offset, we
|
||||
* offset. Since we want the user to have a sufficient stacksize AND support a stack offset, we
|
||||
* alloca() twice the offset so that the upcoming alloca() does not eliminate any premade
|
||||
* offset, and also gives the user the stack space they requested for all threads */
|
||||
stack_size += gtid * __kmp_stkoffset * 2;
|
||||
|
@ -2582,11 +2582,11 @@ __kmp_get_load_balance( int max )
|
|||
int
|
||||
__kmp_invoke_microtask( microtask_t pkfn,
|
||||
int gtid, int tid,
|
||||
int argc, void *p_argv[]
|
||||
int argc, void *p_argv[]
|
||||
#if OMPT_SUPPORT
|
||||
, void **exit_frame_ptr
|
||||
#endif
|
||||
)
|
||||
)
|
||||
{
|
||||
#if OMPT_SUPPORT
|
||||
*exit_frame_ptr = __builtin_frame_address(0);
|
||||
|
|
Loading…
Reference in New Issue