char const __kmp_version_lock[] = KMP_VERSION_PREFIX "lock type: run time selectable";
#endif /* KMP_DEBUG */
-
#define KMP_MIN( x, y ) ( (x) < (y) ? (x) : (y) )
/* ------------------------------------------------------------------------ */
#ifdef KMP_DEBUG
__kmp_reset_lock( &__kmp_stdio_lock );
#endif // KMP_DEBUG
-
-
}
BOOL WINAPI
new_nthreads = tl_nthreads;
}
-
//
// Check if the threads array is large enough, or needs expanding.
//
__kmp_release_bootstrap_lock( &__kmp_forkjoin_lock );
-
#if USE_ITT_BUILD
if ( team->t.t_active_level == 1 // only report frames at level 1
# if OMP_40_ENABLED
__kmp_acquire_bootstrap_lock( &__kmp_forkjoin_lock );
-
// Release the extra threads we don't need any more.
for ( f = new_nth; f < hot_team->t.t_nproc; f++ ) {
KMP_DEBUG_ASSERT( hot_team->t.t_threads[f] != NULL );
}
#endif
-
__kmp_release_bootstrap_lock( &__kmp_forkjoin_lock );
//
// Don't touch th_active_in_pool or th_active.
// The worker thread adjusts those flags as it sleeps/awakens.
//
-
__kmp_thread_pool_nth--;
KA_TRACE( 20, ("__kmp_allocate_thread: T#%d using thread T#%d\n",
__kmp_create_worker( new_gtid, new_thr, __kmp_stksize );
KF_TRACE( 10, ("__kmp_allocate_thread: after __kmp_create_worker: %p\n", new_thr ));
-
KA_TRACE( 20, ("__kmp_allocate_thread: T#%d forked T#%d\n", __kmp_get_gtid(), new_gtid ));
KMP_MB();
return new_thr;
team->t.t_size_changed = 1;
-
#if KMP_NESTED_HOT_TEAMS
int avail_threads = hot_teams[level].hot_team_nth;
if( new_nproc < avail_threads )
// Reset pointer to parent team only for non-hot teams.
team->t.t_parent = NULL;
-
/* free the worker threads */
for ( f = 1; f < team->t.t_nproc; ++ f ) {
KMP_DEBUG_ASSERT( team->t.t_threads[ f ] );
team->t.t_threads[ f ] = NULL;
}
-
/* put the team back in the team pool */
/* TODO limit size of team pool, call reap_team if pool too large */
team->t.t_next_pool = (kmp_team_t*) __kmp_team_pool;
}
this_th->th.th_task_state = 0;
-
/* put thread back on the free pool */
TCW_PTR(this_th->th.th_team, NULL);
TCW_PTR(this_th->th.th_root, NULL);
__kmp_release_64(&flag);
}; // if
-
// Terminate OS thread.
__kmp_reap_worker( thread );
TCW_4(__kmp_init_gtid, FALSE);
KMP_MB(); /* Flush all pending memory write invalidates. */
-
__kmp_cleanup();
#if OMPT_SUPPORT
ompt_fini();
# include <pthread_np.h>
#endif
-
#include <dirent.h>
#include <ctype.h>
#include <fcntl.h>
return exit_val;
}
-
/* The monitor thread controls all of the threads in the complex */
static void*
KMP_DEBUG_ASSERT( TCR_4(__kmp_thread_pool_active_nth) >= 0 );
}
deactivated = TRUE;
-
-
}
#if USE_SUSPEND_TIMEOUT
}
#endif
-
status = pthread_mutex_unlock( &th->th.th_suspend_mx.m_mutex );
KMP_CHECK_SYSFAIL( "pthread_mutex_unlock", status );
KF_TRACE( 5, ( "__kmp_resume_template: T#%d exiting, thread T#%d already awake: flag(%p): "
"%u => %u\n",
gtid, target_gtid, flag->get(), old_spin, *flag->get() ) );
-
status = pthread_mutex_unlock( &th->th.th_suspend_mx.m_mutex );
KMP_CHECK_SYSFAIL( "pthread_mutex_unlock", status );
return;
}
#endif
-
status = pthread_cond_signal( &th->th.th_suspend_cv.c_cond );
KMP_CHECK_SYSFAIL( "pthread_cond_signal", status );
status = pthread_mutex_unlock( &th->th.th_suspend_mx.m_mutex );
__kmp_win32_mutex_unlock( &cv->waiters_count_lock_ );
__kmp_win32_mutex_unlock( mx );
-
for (;;) {
int wait_done;
}
deactivated = TRUE;
-
__kmp_win32_cond_wait( &th->th.th_suspend_cv, &th->th.th_suspend_mx, 0, 0 );
}
else {
}
}
-
__kmp_win32_mutex_unlock( &th->th.th_suspend_mx );
KF_TRACE( 30, ("__kmp_suspend_template: T#%d exit\n", th_gtid ) );
KF_TRACE( 5, ( "__kmp_resume_template: T#%d about to wakeup T#%d, reset sleep bit for flag's loc(%p)\n",
gtid, target_gtid, flag->get() ) );
-
__kmp_win32_cond_signal( &th->th.th_suspend_cv );
__kmp_win32_mutex_unlock( &th->th.th_suspend_mx );
void
__kmp_read_system_time( double *delta )
{
-
if (delta != NULL) {
BOOL status;
LARGE_INTEGER now;
return exit_val;
}
-
/* The monitor thread controls all of the threads in the complex */
void * __stdcall