int __kmp_aux_get_affinity(void **mask) {
int gtid;
int retval;
+#if KMP_OS_WINDOWS || KMP_DEBUG
kmp_info_t *th;
-
+#endif
if (!KMP_AFFINITY_CAPABLE()) {
return -1;
}
gtid = __kmp_entry_gtid();
+#if KMP_OS_WINDOWS || KMP_DEBUG
th = __kmp_threads[gtid];
+#else
+ (void)gtid; // unused variable
+#endif
KMP_DEBUG_ASSERT(th->th.th_affin_mask != NULL);
KA_TRACE(
In debug mode, fill the memory block with 0xEF before call to free(). */
void ___kmp_free(void *ptr KMP_SRC_LOC_DECL) {
kmp_mem_descr_t descr;
+#if KMP_DEBUG
kmp_uintptr_t addr_allocated; // Address returned by malloc().
kmp_uintptr_t addr_aligned; // Aligned address passed by caller.
-
+#endif
KE_TRACE(25,
("-> __kmp_free( %p ) called from %s:%d\n", ptr KMP_SRC_LOC_PARM));
KMP_ASSERT(ptr != NULL);
"ptr_aligned=%p, size_aligned=%d\n",
descr.ptr_allocated, (int)descr.size_allocated,
descr.ptr_aligned, (int)descr.size_aligned));
-
+#if KMP_DEBUG
addr_allocated = (kmp_uintptr_t)descr.ptr_allocated;
addr_aligned = (kmp_uintptr_t)descr.ptr_aligned;
-
KMP_DEBUG_ASSERT(addr_aligned % CACHE_LINE == 0);
KMP_DEBUG_ASSERT(descr.ptr_aligned == ptr);
KMP_DEBUG_ASSERT(addr_allocated + sizeof(kmp_mem_descr_t) <= addr_aligned);
KMP_DEBUG_ASSERT(descr.size_aligned < descr.size_allocated);
KMP_DEBUG_ASSERT(addr_aligned + descr.size_aligned <=
addr_allocated + descr.size_allocated);
-
-#ifdef KMP_DEBUG
memset(descr.ptr_allocated, 0xEF, descr.size_allocated);
// Fill memory block with 0xEF, it helps catch using freed memory.
#endif
kmp_info_t *this_thr = __kmp_threads[gtid];
kmp_team_t *team;
kmp_uint nproc;
- kmp_info_t *master_thread;
int tid;
#ifdef KMP_DEBUG
int team_id;
tid = __kmp_tid_from_gtid(gtid);
#ifdef KMP_DEBUG
team_id = team->t.t_id;
-#endif /* KMP_DEBUG */
- master_thread = this_thr->th.th_team_master;
-#ifdef KMP_DEBUG
+ kmp_info_t *master_thread = this_thr->th.th_team_master;
if (master_thread != team->t.t_threads[0]) {
__kmp_print_structure();
}
kmp_uint64 cur_time = __itt_get_timestamp();
ident_t *loc = team->t.t_ident;
kmp_info_t **other_threads = team->t.t_threads;
- int nproc = this_thr->th.th_team_nproc;
- int i;
switch (__kmp_forkjoin_frames_mode) {
case 1:
__kmp_itt_frame_submit(gtid, this_thr->th.th_frame_time, cur_time, 0,
// Set arrive time to zero to be able to check it in
// __kmp_invoke_task(); the same is done inside the loop below
this_thr->th.th_bar_arrive_time = 0;
- for (i = 1; i < nproc; ++i) {
+ for (kmp_uint i = 1; i < nproc; ++i) {
delta += (cur_time - other_threads[i]->th.th_bar_arrive_time);
other_threads[i]->th.th_bar_arrive_time = 0;
}
}
int __kmp_release_queuing_lock(kmp_queuing_lock_t *lck, kmp_int32 gtid) {
- kmp_info_t *this_thr;
volatile kmp_int32 *head_id_p = &lck->lk.head_id;
volatile kmp_int32 *tail_id_p = &lck->lk.tail_id;
KA_TRACE(1000,
("__kmp_release_queuing_lock: lck:%p, T#%d entering\n", lck, gtid));
KMP_DEBUG_ASSERT(gtid >= 0);
- this_thr = __kmp_thread_from_gtid(gtid);
+#if KMP_DEBUG || DEBUG_QUEUING_LOCKS
+ kmp_info_t *this_thr = __kmp_thread_from_gtid(gtid);
+#endif
KMP_DEBUG_ASSERT(this_thr != NULL);
#ifdef DEBUG_QUEUING_LOCKS
TRACE_LOCK(gtid + 1, "rel ent");
int use_hot_team = team == root->r.r_hot_team;
#if KMP_NESTED_HOT_TEAMS
int level;
- kmp_hot_team_ptr_t *hot_teams;
if (master) {
level = team->t.t_active_level - 1;
if (master->th.th_teams_microtask) { // in teams construct?
// team_of_workers before the parallel
} // team->t.t_level will be increased inside parallel
}
- hot_teams = master->th.th_hot_teams;
+#if KMP_DEBUG
+ kmp_hot_team_ptr_t *hot_teams = master->th.th_hot_teams;
+#endif
if (level < __kmp_hot_teams_max_level) {
KMP_DEBUG_ASSERT(team == hot_teams[level].hot_team);
use_hot_team = 1;
kmp_info_t *thread = __kmp_threads[gtid];
kmp_task_team_t *task_team =
thread->th.th_task_team; // might be NULL for serial teams...
+#if KMP_DEBUG
kmp_int32 children = 0;
-
+#endif
KA_TRACE(10, ("__kmp_task_finish(enter): T#%d finishing task %p and resuming "
"task %p\n",
gtid, taskdata, resumed_task));
taskdata->td_flags.hidden_helper) {
__kmp_release_deps(gtid, taskdata);
// Predecrement simulated by "- 1" calculation
- children =
- KMP_ATOMIC_DEC(&taskdata->td_parent->td_incomplete_child_tasks) - 1;
+#if KMP_DEBUG
+ children = -1 +
+#endif
+ KMP_ATOMIC_DEC(&taskdata->td_parent->td_incomplete_child_tasks);
KMP_DEBUG_ASSERT(children >= 0);
if (taskdata->td_taskgroup)
KMP_ATOMIC_DEC(&taskdata->td_taskgroup->count);
// We need to un-mark this victim as a finished victim. This must be done
// before releasing the lock, or else other threads (starting with the
// primary thread victim) might be prematurely released from the barrier!!!
- kmp_int32 count;
-
- count = KMP_ATOMIC_INC(unfinished_threads);
-
+#if KMP_DEBUG
+ kmp_int32 count =
+#endif
+ KMP_ATOMIC_INC(unfinished_threads);
KA_TRACE(
20,
("__kmp_steal_task: T#%d inc unfinished_threads to %d: task_team=%p\n",
gtid, count + 1, task_team));
-
*thread_finished = FALSE;
}
TCW_4(victim_td->td.td_deque_ntasks, ntasks - 1);
// done. This decrement might be to the spin location, and result in the
// termination condition being satisfied.
if (!*thread_finished) {
- kmp_int32 count;
-
- count = KMP_ATOMIC_DEC(unfinished_threads) - 1;
+#if KMP_DEBUG
+ kmp_int32 count = -1 +
+#endif
+ KMP_ATOMIC_DEC(unfinished_threads);
KA_TRACE(20, ("__kmp_execute_tasks_template: T#%d dec "
"unfinished_threads to %d task_team=%p\n",
gtid, count, task_team));
}
static void __kmp_second_top_half_finish_proxy(kmp_taskdata_t *taskdata) {
+#if KMP_DEBUG
kmp_int32 children = 0;
-
// Predecrement simulated by "- 1" calculation
- children =
- KMP_ATOMIC_DEC(&taskdata->td_parent->td_incomplete_child_tasks) - 1;
+ children = -1 +
+#endif
+ KMP_ATOMIC_DEC(&taskdata->td_parent->td_incomplete_child_tasks);
KMP_DEBUG_ASSERT(children >= 0);
// Remove the imaginary children
"exit_val = %p\n",
th->th.th_info.ds.ds_gtid, exit_val));
}
+#else
+ (void)status; // unused variable
#endif /* KMP_DEBUG */
KA_TRACE(10, ("__kmp_reap_worker: done reaping T#%d\n",