libomp_append(flags_local -Wno-switch LIBOMP_HAVE_WNO_SWITCH_FLAG)
libomp_append(flags_local -Wno-uninitialized LIBOMP_HAVE_WNO_UNINITIALIZED_FLAG)
libomp_append(flags_local -Wno-unused-but-set-variable LIBOMP_HAVE_WNO_UNUSED_BUT_SET_VARIABLE_FLAG)
+ # libomp_append(flags_local -Wconversion LIBOMP_HAVE_WCONVERSION_FLAG)
libomp_append(flags_local /GS LIBOMP_HAVE_GS_FLAG)
libomp_append(flags_local /EHsc LIBOMP_HAVE_EHSC_FLAG)
libomp_append(flags_local /Oy- LIBOMP_HAVE_OY__FLAG)
check_cxx_compiler_flag(-Wno-switch LIBOMP_HAVE_WNO_SWITCH_FLAG)
check_cxx_compiler_flag(-Wno-uninitialized LIBOMP_HAVE_WNO_UNINITIALIZED_FLAG)
check_cxx_compiler_flag(-Wno-unused-but-set-variable LIBOMP_HAVE_WNO_UNUSED_BUT_SET_VARIABLE_FLAG)
+# check_cxx_compiler_flag(-Wconversion LIBOMP_HAVE_WCONVERSION_FLAG)
check_cxx_compiler_flag(-msse2 LIBOMP_HAVE_MSSE2_FLAG)
check_cxx_compiler_flag(-ftls-model=initial-exec LIBOMP_HAVE_FTLS_MODEL_FLAG)
libomp_check_architecture_flag(-mmic LIBOMP_HAVE_MMIC_FLAG)
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
+#include <limits>
+#include <type_traits>
/* include <ctype.h> don't use; problems with /MD on Windows* OS NT due to bad
Microsoft library. Some macros provided below to replace these functions */
#ifndef __ABSOFT_WIN
kmp_depnode_t
*td_depnode; // Pointer to graph node if this task has dependencies
kmp_task_team_t *td_task_team;
- kmp_int32 td_size_alloc; // The size of task structure, including shareds etc.
+ size_t td_size_alloc; // Size of task structure, including shareds etc.
#if defined(KMP_GOMP_COMPAT)
// 4 or 8 byte integers for the loop bounds in GOMP_taskloop
kmp_int32 td_size_loop_bounds;
extern void __kmp_aux_set_library(enum library_type arg);
extern void __kmp_aux_set_stacksize(size_t arg);
extern void __kmp_aux_set_blocktime(int arg, kmp_info_t *thread, int tid);
-extern void __kmp_aux_set_defaults(char const *str, int len);
+extern void __kmp_aux_set_defaults(char const *str, size_t len);
/* Functions called from __kmp_aux_env_initialize() in kmp_settings.cpp */
void kmpc_set_blocktime(int arg);
operator FILE *() { return f; }
};
+template <typename SourceType, typename TargetType,
+ bool isSourceSmaller = (sizeof(SourceType) < sizeof(TargetType)),
+ bool isSourceEqual = (sizeof(SourceType) == sizeof(TargetType)),
+ bool isSourceSigned = std::is_signed<SourceType>::value,
+ bool isTargetSigned = std::is_signed<TargetType>::value>
+struct kmp_convert {};
+
+// Both types are signed; Source smaller
+template <typename SourceType, typename TargetType>
+struct kmp_convert<SourceType, TargetType, true, false, true, true> {
+ static TargetType to(SourceType src) { return (TargetType)src; }
+};
+// Source equal
+template <typename SourceType, typename TargetType>
+struct kmp_convert<SourceType, TargetType, false, true, true, true> {
+ static TargetType to(SourceType src) { return src; }
+};
+// Source bigger
+template <typename SourceType, typename TargetType>
+struct kmp_convert<SourceType, TargetType, false, false, true, true> {
+ static TargetType to(SourceType src) {
+ KMP_ASSERT(src <= static_cast<SourceType>(
+ (std::numeric_limits<TargetType>::max)()));
+ KMP_ASSERT(src >= static_cast<SourceType>(
+ (std::numeric_limits<TargetType>::min)()));
+ return (TargetType)src;
+ }
+};
+
+// Source signed, Target unsigned
+// Source smaller
+template <typename SourceType, typename TargetType>
+struct kmp_convert<SourceType, TargetType, true, false, true, false> {
+ static TargetType to(SourceType src) {
+ KMP_ASSERT(src >= 0);
+ return (TargetType)src;
+ }
+};
+// Source equal
+template <typename SourceType, typename TargetType>
+struct kmp_convert<SourceType, TargetType, false, true, true, false> {
+ static TargetType to(SourceType src) {
+ KMP_ASSERT(src >= 0);
+ return (TargetType)src;
+ }
+};
+// Source bigger
+template <typename SourceType, typename TargetType>
+struct kmp_convert<SourceType, TargetType, false, false, true, false> {
+ static TargetType to(SourceType src) {
+ KMP_ASSERT(src >= 0);
+ KMP_ASSERT(src <= static_cast<SourceType>(
+ (std::numeric_limits<TargetType>::max)()));
+ return (TargetType)src;
+ }
+};
+
+// Source unsigned, Target signed
+// Source smaller
+template <typename SourceType, typename TargetType>
+struct kmp_convert<SourceType, TargetType, true, false, false, true> {
+ static TargetType to(SourceType src) { return (TargetType)src; }
+};
+// Source equal
+template <typename SourceType, typename TargetType>
+struct kmp_convert<SourceType, TargetType, false, true, false, true> {
+ static TargetType to(SourceType src) {
+ KMP_ASSERT(src <= static_cast<SourceType>(
+ (std::numeric_limits<TargetType>::max)()));
+ return (TargetType)src;
+ }
+};
+// Source bigger
+template <typename SourceType, typename TargetType>
+struct kmp_convert<SourceType, TargetType, false, false, false, true> {
+ static TargetType to(SourceType src) {
+ KMP_ASSERT(src <= static_cast<SourceType>(
+ (std::numeric_limits<TargetType>::max)()));
+ return (TargetType)src;
+ }
+};
+
+// Source unsigned, Target unsigned
+// Source smaller
+template <typename SourceType, typename TargetType>
+struct kmp_convert<SourceType, TargetType, true, false, false, false> {
+ static TargetType to(SourceType src) { return (TargetType)src; }
+};
+// Source equal
+template <typename SourceType, typename TargetType>
+struct kmp_convert<SourceType, TargetType, false, true, false, false> {
+ static TargetType to(SourceType src) { return src; }
+};
+// Source bigger
+template <typename SourceType, typename TargetType>
+struct kmp_convert<SourceType, TargetType, false, false, false, false> {
+ static TargetType to(SourceType src) {
+ KMP_ASSERT(src <= static_cast<SourceType>(
+ (std::numeric_limits<TargetType>::max)()));
+ return (TargetType)src;
+ }
+};
+
+template <typename T1, typename T2>
+static inline void __kmp_type_convert(T1 src, T2 *dest) {
+ *dest = kmp_convert<T1, T2>::to(src);
+}
+
#endif /* KMP_H */
KMP_DEBUG_ASSERT(depth > 0);
thr_bar->depth = depth;
- thr_bar->base_leaf_kids = (kmp_uint8)machine_hierarchy.numPerLevel[0] - 1;
+ __kmp_type_convert(machine_hierarchy.numPerLevel[0] - 1,
+ &(thr_bar->base_leaf_kids));
thr_bar->skip_per_level = machine_hierarchy.skipPerLevel;
}
}
// Range with three or more contiguous bits in the affinity mask
if (previous - start > 1) {
- KMP_SNPRINTF(scan, end - scan + 1, "%d-%d", static_cast<int>(start),
- static_cast<int>(previous));
+ KMP_SNPRINTF(scan, end - scan + 1, "%u-%u", start, previous);
} else {
// Range with one or two contiguous bits in the affinity mask
- KMP_SNPRINTF(scan, end - scan + 1, "%d", static_cast<int>(start));
+ KMP_SNPRINTF(scan, end - scan + 1, "%u", start);
KMP_ADVANCE_SCAN(scan);
if (previous - start > 0) {
- KMP_SNPRINTF(scan, end - scan + 1, ",%d", static_cast<int>(previous));
+ KMP_SNPRINTF(scan, end - scan + 1, ",%u", previous);
}
}
KMP_ADVANCE_SCAN(scan);
}
// Range with three or more contiguous bits in the affinity mask
if (previous - start > 1) {
- __kmp_str_buf_print(buf, "%d-%d", static_cast<int>(start),
- static_cast<int>(previous));
+ __kmp_str_buf_print(buf, "%u-%u", start, previous);
} else {
// Range with one or two contiguous bits in the affinity mask
- __kmp_str_buf_print(buf, "%d", static_cast<int>(start));
+ __kmp_str_buf_print(buf, "%u", start);
if (previous - start > 0) {
- __kmp_str_buf_print(buf, ",%d", static_cast<int>(previous));
+ __kmp_str_buf_print(buf, ",%u", previous);
}
}
// Start over with new start point
int get_system_affinity(bool abort_on_error) override {
KMP_ASSERT2(KMP_AFFINITY_CAPABLE(),
"Illegal get affinity operation when not capable");
- int retval =
+ long retval =
hwloc_get_cpubind(__kmp_hwloc_topology, mask, HWLOC_CPUBIND_THREAD);
if (retval >= 0) {
return 0;
int set_system_affinity(bool abort_on_error) const override {
KMP_ASSERT2(KMP_AFFINITY_CAPABLE(),
"Illegal set affinity operation when not capable");
- int retval =
+ long retval =
hwloc_set_cpubind(__kmp_hwloc_topology, mask, HWLOC_CPUBIND_THREAD);
if (retval >= 0) {
return 0;
#endif
class KMPNativeAffinity : public KMPAffinity {
class Mask : public KMPAffinity::Mask {
- typedef unsigned char mask_t;
- static const int BITS_PER_MASK_T = sizeof(mask_t) * CHAR_BIT;
+ typedef unsigned long mask_t;
+ typedef decltype(__kmp_affin_mask_size) mask_size_type;
+ static const unsigned int BITS_PER_MASK_T = sizeof(mask_t) * CHAR_BIT;
+ static const mask_t ONE = 1;
+ mask_size_type get_num_mask_types() const {
+ return __kmp_affin_mask_size / sizeof(mask_t);
+ }
public:
mask_t *mask;
__kmp_free(mask);
}
void set(int i) override {
- mask[i / BITS_PER_MASK_T] |= ((mask_t)1 << (i % BITS_PER_MASK_T));
+ mask[i / BITS_PER_MASK_T] |= (ONE << (i % BITS_PER_MASK_T));
}
bool is_set(int i) const override {
- return (mask[i / BITS_PER_MASK_T] & ((mask_t)1 << (i % BITS_PER_MASK_T)));
+ return (mask[i / BITS_PER_MASK_T] & (ONE << (i % BITS_PER_MASK_T)));
}
void clear(int i) override {
- mask[i / BITS_PER_MASK_T] &= ~((mask_t)1 << (i % BITS_PER_MASK_T));
+ mask[i / BITS_PER_MASK_T] &= ~(ONE << (i % BITS_PER_MASK_T));
}
void zero() override {
- for (size_t i = 0; i < __kmp_affin_mask_size; ++i)
- mask[i] = 0;
+ mask_size_type e = get_num_mask_types();
+ for (mask_size_type i = 0; i < e; ++i)
+ mask[i] = (mask_t)0;
}
void copy(const KMPAffinity::Mask *src) override {
const Mask *convert = static_cast<const Mask *>(src);
- for (size_t i = 0; i < __kmp_affin_mask_size; ++i)
+ mask_size_type e = get_num_mask_types();
+ for (mask_size_type i = 0; i < e; ++i)
mask[i] = convert->mask[i];
}
void bitwise_and(const KMPAffinity::Mask *rhs) override {
const Mask *convert = static_cast<const Mask *>(rhs);
- for (size_t i = 0; i < __kmp_affin_mask_size; ++i)
+ mask_size_type e = get_num_mask_types();
+ for (mask_size_type i = 0; i < e; ++i)
mask[i] &= convert->mask[i];
}
void bitwise_or(const KMPAffinity::Mask *rhs) override {
const Mask *convert = static_cast<const Mask *>(rhs);
- for (size_t i = 0; i < __kmp_affin_mask_size; ++i)
+ mask_size_type e = get_num_mask_types();
+ for (mask_size_type i = 0; i < e; ++i)
mask[i] |= convert->mask[i];
}
void bitwise_not() override {
- for (size_t i = 0; i < __kmp_affin_mask_size; ++i)
+ mask_size_type e = get_num_mask_types();
+ for (mask_size_type i = 0; i < e; ++i)
mask[i] = ~(mask[i]);
}
int begin() const override {
++retval;
return retval;
}
- int end() const override { return __kmp_affin_mask_size * BITS_PER_MASK_T; }
+ int end() const override {
+ int e;
+ __kmp_type_convert(get_num_mask_types() * BITS_PER_MASK_T, &e);
+ return e;
+ }
int next(int previous) const override {
int retval = previous + 1;
while (retval < end() && !is_set(retval))
KMP_ASSERT2(KMP_AFFINITY_CAPABLE(),
"Illegal get affinity operation when not capable");
#if KMP_OS_LINUX
- int retval =
+ long retval =
syscall(__NR_sched_getaffinity, 0, __kmp_affin_mask_size, mask);
#elif KMP_OS_FREEBSD
int r =
KMP_ASSERT2(KMP_AFFINITY_CAPABLE(),
"Illegal set affinity operation when not capable");
#if KMP_OS_LINUX
- int retval =
+ long retval =
syscall(__NR_sched_setaffinity, 0, __kmp_affin_mask_size, mask);
#elif KMP_OS_FREEBSD
int r =
skipPerLevel = &(numPerLevel[maxLevels]);
// Copy old elements from old arrays
- for (kmp_uint32 i = 0; i < old_maxLevels;
- ++i) { // init numPerLevel[*] to 1 item per level
+ for (kmp_uint32 i = 0; i < old_maxLevels; ++i) {
+ // init numPerLevel[*] to 1 item per level
numPerLevel[i] = old_numPerLevel[i];
skipPerLevel[i] = old_skipPerLevel[i];
}
// Init new elements in arrays to 1
- for (kmp_uint32 i = old_maxLevels; i < maxLevels;
- ++i) { // init numPerLevel[*] to 1 item per level
+ for (kmp_uint32 i = old_maxLevels; i < maxLevels; ++i) {
+ // init numPerLevel[*] to 1 item per level
numPerLevel[i] = 1;
skipPerLevel[i] = 1;
}
case omp_atk_pinned:
break;
case omp_atk_alignment:
- al->alignment = traits[i].value;
+ __kmp_type_convert(traits[i].value, &(al->alignment));
KMP_ASSERT(IS_POWER_OF_TWO(al->alignment));
break;
case omp_atk_pool_size:
void *___kmp_fast_allocate(kmp_info_t *this_thr, size_t size KMP_SRC_LOC_DECL) {
void *ptr;
- int num_lines;
- int idx;
+ size_t num_lines, idx;
int index;
void *alloc_ptr;
size_t alloc_size;
#define OP_UPDATE_CRITICAL(TYPE, OP, LCK_ID) \
__kmp_acquire_atomic_lock(&ATOMIC_LOCK##LCK_ID, gtid); \
- (*lhs) = (TYPE)((*lhs)OP(rhs)); \
+ (*lhs) = (TYPE)((*lhs)OP((TYPE)rhs)); \
__kmp_release_atomic_lock(&ATOMIC_LOCK##LCK_ID, gtid);
// ------------------------------------------------------------------------
{ \
TYPE old_value, new_value; \
old_value = *(TYPE volatile *)lhs; \
- new_value = (TYPE)(old_value OP rhs); \
+ new_value = (TYPE)(old_value OP((TYPE)rhs)); \
while (!KMP_COMPARE_AND_STORE_ACQ##BITS( \
(kmp_int##BITS *)lhs, *VOLATILE_CAST(kmp_int##BITS *) & old_value, \
*VOLATILE_CAST(kmp_int##BITS *) & new_value)) { \
KMP_DO_PAUSE; \
\
old_value = *(TYPE volatile *)lhs; \
- new_value = (TYPE)(old_value OP rhs); \
+ new_value = (TYPE)(old_value OP((TYPE)rhs)); \
} \
}
++d;
}
}
- thr_bar->offset = 7 - (tid - thr_bar->parent_tid - 1);
+ __kmp_type_convert(7 - (tid - thr_bar->parent_tid - 1), &(thr_bar->offset));
thr_bar->old_tid = tid;
thr_bar->wait_flag = KMP_BARRIER_NOT_WAITING;
thr_bar->team = team;
if (thr_bar->my_level == 0)
thr_bar->leaf_kids = 0;
if (thr_bar->leaf_kids && (kmp_uint32)tid + thr_bar->leaf_kids + 1 > nproc)
- thr_bar->leaf_kids = nproc - tid - 1;
+ __kmp_type_convert(nproc - tid - 1, &(thr_bar->leaf_kids));
thr_bar->leaf_state = 0;
for (int i = 0; i < thr_bar->leaf_kids; ++i)
((char *)&(thr_bar->leaf_state))[7 - i] = 1;
__kmp_save_internal_controls(thread);
- set__dynamic(thread, flag ? TRUE : FALSE);
+ set__dynamic(thread, flag ? true : false);
}
void ompc_set_nested(int flag) {
th->th.th_team = team;
th->th.th_team_nproc = team->t.t_nproc;
th->th.th_task_team = team->t.t_task_team[task_state];
- th->th.th_task_state = task_state;
+ __kmp_type_convert(task_state, &(th->th.th_task_state));
}
/* 2.a.i. Reduce Block without a terminating barrier */
#endif
if (flags == NULL) {
// we are the first thread, allocate the array of flags
- size_t size = trace_count / 8 + 8; // in bytes, use single bit per iteration
+ size_t size =
+ (size_t)trace_count / 8 + 8; // in bytes, use single bit per iteration
flags = (kmp_uint32 *)__kmp_thread_calloc(th, size, 1);
KMP_MB();
sh_buf->doacross_flags = flags;
void __kmpc_doacross_wait(ident_t *loc, int gtid, const kmp_int64 *vec) {
__kmp_assert_valid_gtid(gtid);
- kmp_int32 shft, num_dims, i;
+ kmp_int64 shft;
+ size_t num_dims, i;
kmp_uint32 flag;
kmp_int64 iter_number; // iteration number of "collapsed" loop nest
kmp_info_t *th = __kmp_threads[gtid];
// calculate sequential iteration number and check out-of-bounds condition
pr_buf = th->th.th_dispatch;
KMP_DEBUG_ASSERT(pr_buf->th_doacross_info != NULL);
- num_dims = pr_buf->th_doacross_info[0];
+ num_dims = (size_t)pr_buf->th_doacross_info[0];
lo = pr_buf->th_doacross_info[2];
up = pr_buf->th_doacross_info[3];
st = pr_buf->th_doacross_info[4];
#endif
for (i = 1; i < num_dims; ++i) {
kmp_int64 iter, ln;
- kmp_int32 j = i * 4;
+ size_t j = i * 4;
ln = pr_buf->th_doacross_info[j + 1];
lo = pr_buf->th_doacross_info[j + 2];
up = pr_buf->th_doacross_info[j + 3];
#if OMPT_SUPPORT && OMPT_OPTIONAL
if (ompt_enabled.ompt_callback_dependences) {
ompt_callbacks.ompt_callback(ompt_callback_dependences)(
- &(OMPT_CUR_TASK_INFO(th)->task_data), deps, num_dims);
+ &(OMPT_CUR_TASK_INFO(th)->task_data), deps, (kmp_uint32)num_dims);
}
#endif
KA_TRACE(20,
void __kmpc_doacross_post(ident_t *loc, int gtid, const kmp_int64 *vec) {
__kmp_assert_valid_gtid(gtid);
- kmp_int32 shft, num_dims, i;
+ kmp_int64 shft;
+ size_t num_dims, i;
kmp_uint32 flag;
kmp_int64 iter_number; // iteration number of "collapsed" loop nest
kmp_info_t *th = __kmp_threads[gtid];
// out-of-bounds checks)
pr_buf = th->th.th_dispatch;
KMP_DEBUG_ASSERT(pr_buf->th_doacross_info != NULL);
- num_dims = pr_buf->th_doacross_info[0];
+ num_dims = (size_t)pr_buf->th_doacross_info[0];
lo = pr_buf->th_doacross_info[2];
st = pr_buf->th_doacross_info[4];
#if OMPT_SUPPORT && OMPT_OPTIONAL
#endif
for (i = 1; i < num_dims; ++i) {
kmp_int64 iter, ln;
- kmp_int32 j = i * 4;
+ size_t j = i * 4;
ln = pr_buf->th_doacross_info[j + 1];
lo = pr_buf->th_doacross_info[j + 2];
st = pr_buf->th_doacross_info[j + 4];
#if OMPT_SUPPORT && OMPT_OPTIONAL
if (ompt_enabled.ompt_callback_dependences) {
ompt_callbacks.ompt_callback(ompt_callback_dependences)(
- &(OMPT_CUR_TASK_INFO(th)->task_data), deps, num_dims);
+ &(OMPT_CUR_TASK_INFO(th)->task_data), deps, (kmp_uint32)num_dims);
}
#endif
shft = iter_number % 32; // use 32-bit granularity
KA_TRACE(20, ("__kmpc_doacross_fini() exit: serialized team %p\n", team));
return; // nothing to do
}
- num_done = KMP_TEST_THEN_INC32((kmp_int32 *)pr_buf->th_doacross_info[1]) + 1;
+ num_done =
+ KMP_TEST_THEN_INC32((kmp_uintptr_t)(pr_buf->th_doacross_info[1])) + 1;
if (num_done == th->th.th_team_nproc) {
// we are the last thread, need to free shared resources
int idx = pr_buf->th_doacross_buf_idx - 1;
// when remaining iters become less than parm2 - switch to dynamic
pr->u.p.parm2 = guided_int_param * nproc * (chunk + 1);
*(double *)&pr->u.p.parm3 =
- guided_flt_param / nproc; // may occupy parm3 and parm4
+ guided_flt_param / (double)nproc; // may occupy parm3 and parm4
}
} else {
KD_TRACE(100, ("__kmp_dispatch_init_algorithm: T#%d falling-through to "
UT cross;
/* commonly used term: (2 nproc - 1)/(2 nproc) */
- x = (long double)1.0 - (long double)0.5 / nproc;
+ x = 1.0 - 0.5 / (double)nproc;
#ifdef KMP_DEBUG
{ // test natural alignment
typedef typename traits_t<T>::signed_t ST;
typedef typename traits_t<T>::floating_t DBL;
int status = 0;
- kmp_int32 last = 0;
+ bool last = false;
T start;
ST incr;
UT limit, trip, init;
}
if (!status) { // try to steal
kmp_info_t **other_threads = team->t.t_threads;
- int while_limit = pr->u.p.parm3;
- int while_index = 0;
+ T while_limit = pr->u.p.parm3;
+ T while_index = 0;
T id = pr->u.p.static_steal_counter; // loop id
int idx = (th->th.th_dispatch->th_disp_index - 1) %
__kmp_dispatch_num_buffers; // current loop index
if (!status) {
kmp_info_t **other_threads = team->t.t_threads;
- int while_limit = pr->u.p.parm3;
- int while_index = 0;
+ T while_limit = pr->u.p.parm3;
+ T while_index = 0;
T id = pr->u.p.static_steal_counter; // loop id
int idx = (th->th.th_dispatch->th_disp_index - 1) %
__kmp_dispatch_num_buffers; // current loop index
while ((!status) && (while_limit != ++while_index)) {
dispatch_private_info_template<T> *victim;
union_i4 vold, vnew;
- kmp_int32 remaining;
+ T remaining;
T victimIdx = pr->u.p.parm4;
T oldVictimIdx = victimIdx ? victimIdx - 1 : nproc - 1;
victim = reinterpret_cast<dispatch_private_info_template<T> *>(
break; // not enough chunks to steal, goto next victim
}
if (remaining > 3) {
- vnew.p.ub -= (remaining >> 2); // try to steal 1/4 of remaining
+ // try to steal 1/4 of remaining
+ vnew.p.ub -= remaining >> 2;
} else {
vnew.p.ub -= 1; // steal 1 chunk of 2 or 3 remaining
}
pr->u.p.count = 1;
*p_lb = pr->u.p.lb;
*p_ub = pr->u.p.ub;
- last = pr->u.p.parm1;
+ last = (pr->u.p.parm1 != 0);
if (p_st != NULL)
*p_st = pr->u.p.st;
} else { /* no iterations to do */
if ((T)remaining > chunkspec) {
limit = init + chunkspec - 1;
} else {
- last = 1; // the last chunk
+ last = true; // the last chunk
limit = init + remaining - 1;
} // if
} // if
break;
} // if
- limit = init +
- (UT)(remaining * *(double *)&pr->u.p.parm3); // divide by K*nproc
+ limit = init + (UT)((double)remaining *
+ *(double *)&pr->u.p.parm3); // divide by K*nproc
if (compare_and_swap<ST>(RCAST(volatile ST *, &sh->u.s.iteration),
(ST)init, (ST)limit)) {
// CAS was successful, chunk obtained
if ((T)remaining > chunk) {
limit = init + chunk - 1;
} else {
- last = 1; // the last chunk
+ last = true; // the last chunk
limit = init + remaining - 1;
} // if
} // if
break;
} // if
// divide by K*nproc
- UT span = remaining * (*(double *)&pr->u.p.parm3);
+ UT span;
+ __kmp_type_convert((double)remaining * (*(double *)&pr->u.p.parm3),
+ &span);
UT rem = span % chunk;
if (rem) // adjust so that span%chunk == 0
span += chunk - rem;
"next_index:%llu curr_wait:%llu next_wait:%llu\n",
__kmp_get_gtid(), current_index, next_index, current_wait_value,
next_wait_value));
- char v = (current_wait_value ? 0x1 : 0x0);
+ char v = (current_wait_value ? '\1' : '\0');
(RCAST(volatile char *, &(bdata->val[current_index])))[id] = v;
__kmp_wait<kmp_uint64>(&(bdata->val[current_index]), current_wait_value,
__kmp_eq<kmp_uint64> USE_ITT_BUILD_ARG(NULL));
// When no iterations are found (status == 0) and this is not the last
// layer, attempt to go up the hierarchy for more iterations
if (status == 0 && !last_layer) {
+ kmp_int32 hid;
+ __kmp_type_convert(hier_id, &hid);
status = next_recurse(loc, gtid, parent, &contains_last, &my_lb, &my_ub,
- &my_st, hier_id, hier_level + 1);
+ &my_st, hid, hier_level + 1);
KD_TRACE(
10,
("kmp_hier_t.next_recurse(): T#%d (%d) hier_next() returned %d\n",
bool done = false;
while (!done) {
done = true;
+ kmp_int32 uid;
+ __kmp_type_convert(unit_id, &uid);
status = next_recurse(loc, gtid, parent, &contains_last, p_lb, p_ub,
- p_st, unit_id, 0);
+ p_st, uid, 0);
if (status == 1) {
__kmp_dispatch_init_algorithm(loc, gtid, pr, pr->schedule,
parent->get_next_lb(tdata->index),
bool done = false;
while (!done) {
done = true;
+ kmp_int32 uid;
+ __kmp_type_convert(unit_id, &uid);
status = next_recurse(loc, gtid, parent, &contains_last, p_lb, p_ub,
- p_st, unit_id, 0);
+ p_st, uid, 0);
if (status == 1) {
sh = parent->get_curr_sh(tdata->index);
__kmp_dispatch_init_algorithm(loc, gtid, pr, pr->schedule,
___kmp_env_blk_parse_unix(kmp_env_blk_t *block, // M: Env block to fill.
char **env // I: Unix environment to parse.
) {
-
char *bulk = NULL;
kmp_env_var_t *vars = NULL;
int count = 0;
- int size = 0; // Size of bulk.
+ size_t size = 0; // Size of bulk.
// Count number of variables and length of required bulk.
{
- count = 0;
- size = 0;
while (env[count] != NULL) {
size += KMP_STRLEN(env[count]) + 1;
++count;
char *var; // Pointer to beginning of var.
char *name; // Pointer to name of variable.
char *value; // Pointer to value.
- int len; // Length of variable.
+ size_t len; // Length of variable.
int i;
var = bulk;
for (i = 0; i < count; ++i) {
int FTN_STDCALL FTN_GET_STACKSIZE(void) {
#ifdef KMP_STUB
- return __kmps_get_stacksize();
+ return (int)__kmps_get_stacksize();
#else
if (!__kmp_init_serial) {
__kmp_serial_initialize();
} else {
#endif
if (!__kmp_init_parallel ||
- (gtid = (kmp_intptr_t)(
- pthread_getspecific(__kmp_gtid_threadprivate_key))) == 0) {
+ (gtid = (int)((kmp_intptr_t)(
+ pthread_getspecific(__kmp_gtid_threadprivate_key)))) == 0) {
return 0;
}
--gtid;
thread = __kmp_entry_thread();
// !!! What if foreign thread calls it?
__kmp_save_internal_controls(thread);
- set__dynamic(thread, KMP_DEREF flag ? TRUE : FALSE);
+ set__dynamic(thread, KMP_DEREF flag ? true : false);
#endif
}
kmp_depend_info_t dep_list[ndeps];
for (kmp_int32 i = 0; i < ndeps; i++)
dep_list[i] = gomp_depends.get_kmp_depend(i);
- __kmpc_omp_task_with_deps(&loc, gtid, task, ndeps, dep_list, 0, NULL);
+ kmp_int32 ndeps_cnv;
+ __kmp_type_convert(ndeps, &ndeps_cnv);
+ __kmpc_omp_task_with_deps(&loc, gtid, task, ndeps_cnv, dep_list, 0, NULL);
} else {
__kmpc_omp_task(&loc, gtid, task);
}
kmp_info_t *th = __kmp_threads[gtid];
MKLOC(loc, "GOMP_doacross_post");
kmp_int64 num_dims = th->th.th_dispatch->th_doacross_info[0];
- kmp_int64 *vec =
- (kmp_int64 *)__kmp_thread_malloc(th, sizeof(kmp_int64) * num_dims);
+ kmp_int64 *vec = (kmp_int64 *)__kmp_thread_malloc(
+ th, (size_t)(sizeof(kmp_int64) * num_dims));
for (kmp_int64 i = 0; i < num_dims; ++i) {
vec[i] = (kmp_int64)count[i];
}
kmp_info_t *th = __kmp_threads[gtid];
MKLOC(loc, "GOMP_doacross_wait");
kmp_int64 num_dims = th->th.th_dispatch->th_doacross_info[0];
- kmp_int64 *vec =
- (kmp_int64 *)__kmp_thread_malloc(th, sizeof(kmp_int64) * num_dims);
+ kmp_int64 *vec = (kmp_int64 *)__kmp_thread_malloc(
+ th, (size_t)(sizeof(kmp_int64) * num_dims));
vec[0] = (kmp_int64)first;
for (kmp_int64 i = 1; i < num_dims; ++i) {
T item = va_arg(args, T);
kmp_msg_type_t type;
int num;
char *str;
- int len;
+ size_t len;
}; // struct kmp_message
typedef struct kmp_msg kmp_msg_t;
// More strong condition: make sure we have room at least for for two
// different ids (for each barrier type).
object = reinterpret_cast<void *>(
- kmp_uintptr_t(team) +
- counter % (sizeof(kmp_team_t) / bs_last_barrier) * bs_last_barrier +
+ (kmp_uintptr_t)(team) +
+ (kmp_uintptr_t)counter % (sizeof(kmp_team_t) / bs_last_barrier) *
+ bs_last_barrier +
bt);
KMP_ITT_DEBUG_LOCK();
KMP_ITT_DEBUG_PRINT("[bar obj] type=%d, counter=%lld, object=%p\n", bt,
("__kmp_acquire_futex_lock: lck:%p, T#%d before futex_wait(0x%x)\n",
lck, gtid, poll_val));
- kmp_int32 rc;
+ long rc;
if ((rc = syscall(__NR_futex, &(lck->lk.poll), FUTEX_WAIT, poll_val, NULL,
NULL, 0)) != 0) {
KA_TRACE(1000, ("__kmp_acquire_futex_lock: lck:%p, T#%d futex_wait(0x%x) "
- "failed (rc=%d errno=%d)\n",
+ "failed (rc=%ld errno=%d)\n",
lck, gtid, poll_val, rc, errno));
continue;
}
// __kmp_get_random: Get a random number using a linear congruential method.
unsigned short __kmp_get_random(kmp_info_t *thread) {
unsigned x = thread->th.th_x;
- unsigned short r = x >> 16;
+ unsigned short r = (unsigned short)(x >> 16);
thread->th.th_x = x * thread->th.th_a + 1;
team->t.t_threads[f]->th.th_task_state =
team->t.t_threads[0]->th.th_task_state_memo_stack[level];
} else { // set th_task_state for new threads in non-nested hot team
- int old_state =
+ kmp_uint8 old_state =
team->t.t_threads[0]->th.th_task_state; // copy master's state
for (f = old_nproc; f < team->t.t_nproc; ++f)
team->t.t_threads[f]->th.th_task_state = old_state;
/* ------------------------------------------------------------------------ */
void __kmp_internal_end_dest(void *specific_gtid) {
-#if KMP_COMPILER_ICC
-#pragma warning(push)
-#pragma warning(disable : 810) // conversion from "void *" to "int" may lose
-// significant bits
-#endif
// Make sure no significant bits are lost
- int gtid = (kmp_intptr_t)specific_gtid - 1;
-#if KMP_COMPILER_ICC
-#pragma warning(pop)
-#endif
+ int gtid;
+ __kmp_type_convert((kmp_intptr_t)specific_gtid - 1, >id);
KA_TRACE(30, ("__kmp_internal_end_dest: T#%d\n", gtid));
/* NOTE: the gtid is stored as gitd+1 in the thread-local-storage
static void __kmp_do_serial_initialize(void) {
int i, gtid;
- int size;
+ size_t size;
KA_TRACE(10, ("__kmp_do_serial_initialize: enter\n"));
const char *long_name = __kmp_affinity_format_table[i].long_name;
char field_format = __kmp_affinity_format_table[i].field_format;
if (parse_long_name) {
- int length = KMP_STRLEN(long_name);
+ size_t length = KMP_STRLEN(long_name);
if (strncmp(*ptr, long_name, length) == 0) {
found_valid_name = true;
(*ptr) += length; // skip the long name
#if KMP_USE_MONITOR
int bt_intervals;
#endif
- int bt_set;
+ kmp_int8 bt_set;
__kmp_save_internal_controls(thread);
#endif
}
-void __kmp_aux_set_defaults(char const *str, int len) {
+void __kmp_aux_set_defaults(char const *str, size_t len) {
if (!__kmp_init_serial) {
__kmp_serial_initialize();
}
KMP_INFORM(Using_uint64_Value, name, buf.str);
__kmp_str_buf_free(&buf);
}
- *out = uint;
+ __kmp_type_convert(uint, out);
} // __kmp_stg_parse_int
#if KMP_DEBUG_ADAPTIVE_LOCKS
}
} // __kmp_stg_print_int
-#if USE_ITT_BUILD && USE_ITT_NOTIFY
static void __kmp_stg_print_uint64(kmp_str_buf_t *buffer, char const *name,
kmp_uint64 value) {
if (__kmp_env_format) {
__kmp_str_buf_print(buffer, " %s=%" KMP_UINT64_SPEC "\n", name, value);
}
} // __kmp_stg_print_uint64
-#endif
static void __kmp_stg_print_str(kmp_str_buf_t *buffer, char const *name,
char const *value) {
msg = KMP_I18N_STR(ValueTooLarge);
KMP_WARNING(ParseSizeIntWarn, name, value, msg);
} else { // valid setting
- __kmp_dflt_max_active_levels = tmp_dflt;
+ __kmp_type_convert(tmp_dflt, &(__kmp_dflt_max_active_levels));
__kmp_dflt_max_active_levels_set = true;
}
}
static void __kmp_stg_print_taskloop_min_tasks(kmp_str_buf_t *buffer,
char const *name, void *data) {
- __kmp_stg_print_int(buffer, name, __kmp_taskloop_min_tasks);
+ __kmp_stg_print_uint64(buffer, name, __kmp_taskloop_min_tasks);
} // __kmp_stg_print_taskloop_min_tasks
// -----------------------------------------------------------------------------
*nextEnv = next;
{
- int len = next - env;
+ ptrdiff_t len = next - env;
char *retlist = (char *)__kmp_allocate((len + 1) * sizeof(char));
KMP_MEMCPY_S(retlist, (len + 1) * sizeof(char), env, len * sizeof(char));
retlist[len] = '\0';
}
{
- int len = scan - env;
+ ptrdiff_t len = scan - env;
char *retlist = (char *)__kmp_allocate((len + 1) * sizeof(char));
KMP_MEMCPY_S(retlist, (len + 1) * sizeof(char), env, len * sizeof(char));
retlist[len] = '\0';
if (len == 0 && *pos == ':') {
__kmp_hws_abs_flag = 1; // if the first symbol is ":", skip it
} else {
- input[len] = toupper(*pos);
+ input[len] = (char)(toupper(*pos));
if (input[len] == 'X')
input[len] = ','; // unify delimiters of levels
if (input[len] == 'O' && strchr(digits, *(pos + 1)))
// output interface
static kmp_stats_output_module *__kmp_stats_global_output = NULL;
-double logHistogram::binMax[] = {
- 1.e1l, 1.e2l, 1.e3l, 1.e4l, 1.e5l, 1.e6l, 1.e7l, 1.e8l,
- 1.e9l, 1.e10l, 1.e11l, 1.e12l, 1.e13l, 1.e14l, 1.e15l, 1.e16l,
- 1.e17l, 1.e18l, 1.e19l, 1.e20l, 1.e21l, 1.e22l, 1.e23l, 1.e24l,
- 1.e25l, 1.e26l, 1.e27l, 1.e28l, 1.e29l, 1.e30l};
+double logHistogram::binMax[] = {1.e1l, 1.e2l, 1.e3l, 1.e4l, 1.e5l, 1.e6l,
+ 1.e7l, 1.e8l, 1.e9l, 1.e10l, 1.e11l, 1.e12l,
+ 1.e13l, 1.e14l, 1.e15l, 1.e16l, 1.e17l, 1.e18l,
+ 1.e19l, 1.e20l, 1.e21l, 1.e22l, 1.e23l, 1.e24l,
+ 1.e25l, 1.e26l, 1.e27l, 1.e28l, 1.e29l, 1.e30l,
+ // Always have infinity be the last value
+ std::numeric_limits<double>::infinity()};
/* ************* statistic member functions ************* */
}
std::string statistic::format(char unit, bool total) const {
- std::string result = formatSI(sampleCount, 9, ' ');
+ std::string result = formatSI((double)sampleCount, 9, ' ');
if (sampleCount == 0) {
result = result + std::string(", ") + formatSI(0.0, 9, unit);
// According to a micro-architect this is likely to be faster than a binary
// search, since
// it will only have one branch mis-predict
- for (int b = 0; b < numBins; b++)
+ for (int b = 0; b < numBins - 1; b++)
if (binMax[b] > v)
return b;
- fprintf(stderr,
- "Trying to add a sample that is too large into a histogram\n");
- KMP_ASSERT(0);
- return -1;
+ return numBins - 1;
}
void logHistogram::addSample(double sample) {
result << "\n";
}
for (int i = minBin(); i <= maxBin(); i++) {
- result << "10**" << i << "<=v<10**" << (i + 1) << ", "
- << formatSI(count(i), 9, ' ') << ", " << formatSI(total(i), 9, unit);
+ result << "10**" << i << "<=v<";
+ if (i + 1 == numBins - 1)
+ result << "infinity, ";
+ else
+ result << "10**" << (i + 1) << ", ";
+ result << formatSI(count(i), 9, ' ') << ", " << formatSI(total(i), 9, unit);
if (i != maxBin())
result << "\n";
}
int kmp_stats_output_module::printPerThreadEventsFlag = 0;
static char const *lastName(char *name) {
- int l = strlen(name);
+ int l = (int)strlen(name);
for (int i = l - 1; i >= 0; --i) {
if (name[i] == '.')
name[i] = '_';
for (int c = 0; c < COUNTER_LAST; c++) {
counter const *stat = &theCounters[c];
fprintf(statsOut, "%-25s, %s\n", counter::name(counter_e(c)),
- formatSI(stat->getValue(), 9, ' ').c_str());
+ formatSI((double)stat->getValue(), 9, ' ').c_str());
}
}
for (counter_e c = counter_e(0); c < COUNTER_LAST; c = counter_e(c + 1)) {
if (counter::masterOnly(c) && t != 0)
continue;
- allCounters[c].addSample((*it)->getCounter(c)->getValue());
+ allCounters[c].addSample((double)(*it)->getCounter(c)->getValue());
}
}
* @ingroup STATS_GATHERING
*/
#define KMP_COUNT_VALUE(name, value) \
- __kmp_stats_thread_ptr->getTimer(TIMER_##name)->addSample(value)
+ __kmp_stats_thread_ptr->getTimer(TIMER_##name)->addSample((double)value)
/*!
* \brief Increments specified counter (name).
KMP_STR_BUF_INVARIANT(buffer);
} // __kmp_str_buf_clear
-void __kmp_str_buf_reserve(kmp_str_buf_t *buffer, int size) {
+void __kmp_str_buf_reserve(kmp_str_buf_t *buffer, size_t size) {
KMP_STR_BUF_INVARIANT(buffer);
KMP_DEBUG_ASSERT(size >= 0);
KMP_STR_BUF_INVARIANT(buffer);
} // __kmp_str_buf_free
-void __kmp_str_buf_cat(kmp_str_buf_t *buffer, char const *str, int len) {
+void __kmp_str_buf_cat(kmp_str_buf_t *buffer, char const *str, size_t len) {
KMP_STR_BUF_INVARIANT(buffer);
KMP_DEBUG_ASSERT(str != NULL);
KMP_DEBUG_ASSERT(len >= 0);
+
__kmp_str_buf_reserve(buffer, buffer->used + len + 1);
KMP_MEMCPY(buffer->str + buffer->used, str, len);
buffer->str[buffer->used + len] = 0;
- buffer->used += len;
+ __kmp_type_convert(buffer->used + len, &(buffer->used));
KMP_STR_BUF_INVARIANT(buffer);
} // __kmp_str_buf_cat
slash = strrchr(fname->dir, '/');
if (KMP_OS_WINDOWS &&
slash == NULL) { // On Windows* OS, if slash not found,
- char first = TOLOWER(fname->dir[0]); // look for drive.
+ char first = (char)TOLOWER(fname->dir[0]); // look for drive.
if ('a' <= first && first <= 'z' && fname->dir[1] == ':') {
slash = &fname->dir[1];
}
}
void __kmp_str_buf_clear(kmp_str_buf_t *buffer);
-void __kmp_str_buf_reserve(kmp_str_buf_t *buffer, int size);
+void __kmp_str_buf_reserve(kmp_str_buf_t *buffer, size_t size);
void __kmp_str_buf_detach(kmp_str_buf_t *buffer);
void __kmp_str_buf_free(kmp_str_buf_t *buffer);
-void __kmp_str_buf_cat(kmp_str_buf_t *buffer, char const *str, int len);
+void __kmp_str_buf_cat(kmp_str_buf_t *buffer, char const *str, size_t len);
void __kmp_str_buf_catbuf(kmp_str_buf_t *dest, const kmp_str_buf_t *src);
int __kmp_str_buf_vprint(kmp_str_buf_t *buffer, char const *format,
va_list args);
/* kmp API functions */
void kmp_set_stacksize(omp_int_t arg) {
i;
- __kmps_set_stacksize(arg);
+ __kmps_set_stacksize((size_t)arg);
}
void kmp_set_stacksize_s(size_t arg) {
i;
static size_t __kmps_stacksize = KMP_DEFAULT_STKSIZE;
-void __kmps_set_stacksize(int arg) {
+void __kmps_set_stacksize(size_t arg) {
i;
__kmps_stacksize = arg;
} // __kmps_set_stacksize
-int __kmps_get_stacksize(void) {
+size_t __kmps_get_stacksize(void) {
i;
return __kmps_stacksize;
} // __kmps_get_stacksize
int __kmps_get_library(void);
void __kmps_set_nested(int arg);
int __kmps_get_nested(void);
-void __kmps_set_stacksize(int arg);
-int __kmps_get_stacksize();
+void __kmps_set_stacksize(size_t arg);
+size_t __kmps_get_stacksize();
#ifndef KMP_SCHED_TYPE_DEFINED
#define KMP_SCHED_TYPE_DEFINED
size_t sizes[] = { 997, 2003, 4001, 8191, 16001, 32003, 64007, 131071, 270029 };
const size_t MAX_GEN = 8;
-static inline kmp_int32 __kmp_dephash_hash(kmp_intptr_t addr, size_t hsize) {
+static inline size_t __kmp_dephash_hash(kmp_intptr_t addr, size_t hsize) {
// TODO alternate to try: set = (((Addr64)(addrUsefulBits * 9.618)) %
// m_num_sets );
return ((addr >> 6) ^ (addr >> 2)) % hsize;
return current_dephash;
size_t new_size = sizes[gen];
- kmp_int32 size_to_allocate =
+ size_t size_to_allocate =
new_size * sizeof(kmp_dephash_entry_t *) + sizeof(kmp_dephash_t);
#if USE_FAST_MEMORY
next = entry->next_in_bucket;
// Compute the new hash using the new size, and insert the entry in
// the new bucket.
- kmp_int32 new_bucket = __kmp_dephash_hash(entry->addr, h->size);
+ size_t new_bucket = __kmp_dephash_hash(entry->addr, h->size);
entry->next_in_bucket = h->buckets[new_bucket];
if (entry->next_in_bucket) {
h->nconflicts++;
else
h_size = KMP_DEPHASH_OTHER_SIZE;
- kmp_int32 size =
- h_size * sizeof(kmp_dephash_entry_t *) + sizeof(kmp_dephash_t);
+ size_t size = h_size * sizeof(kmp_dephash_entry_t *) + sizeof(kmp_dephash_t);
#if USE_FAST_MEMORY
h = (kmp_dephash_t *)__kmp_fast_allocate(thread, size);
*hash = __kmp_dephash_extend(thread, h);
h = *hash;
}
- kmp_int32 bucket = __kmp_dephash_hash(addr, h->size);
+ size_t bucket = __kmp_dephash_hash(addr, h->size);
kmp_dephash_entry_t *entry;
for (entry = h->buckets[bucket]; entry; entry = entry->next_in_bucket)
} // non-NULL reduce_orig means new interface used
}
-template <typename T> void __kmp_call_init(kmp_taskred_data_t &item, int j);
+template <typename T> void __kmp_call_init(kmp_taskred_data_t &item, size_t j);
template <>
void __kmp_call_init<kmp_task_red_input_t>(kmp_taskred_data_t &item,
- int offset) {
+ size_t offset) {
((void (*)(void *))item.reduce_init)((char *)(item.reduce_priv) + offset);
}
template <>
void __kmp_call_init<kmp_taskred_input_t>(kmp_taskred_data_t &item,
- int offset) {
+ size_t offset) {
((void (*)(void *, void *))item.reduce_init)(
(char *)(item.reduce_priv) + offset, item.reduce_orig);
}
__kmp_assert_valid_gtid(gtid);
kmp_info_t *thread = __kmp_threads[gtid];
kmp_taskgroup_t *tg = thread->th.th_current_task->td_taskgroup;
- kmp_int32 nth = thread->th.th_team_nproc;
+ kmp_uint32 nth = thread->th.th_team_nproc;
kmp_taskred_data_t *arr;
// check input data just in case
arr[i].reduce_pend = (char *)(arr[i].reduce_priv) + nth * size;
if (arr[i].reduce_init != NULL) {
// initialize all thread-specific items
- for (int j = 0; j < nth; ++j) {
+ for (size_t j = 0; j < nth; ++j) {
__kmp_call_init<T>(arr[i], j * size);
}
}
// Toggle the th_task_state field, to switch which task_team this thread
// refers to
- this_thr->th.th_task_state = 1 - this_thr->th.th_task_state;
+ this_thr->th.th_task_state = (kmp_uint8)(1 - this_thr->th.th_task_state);
+
// It is now safe to propagate the task team pointer from the team struct to
// the current thread.
TCW_PTR(this_thr->th.th_task_team,
// Initialize the data area from the template.
static void __kmp_copy_common_data(void *pc_addr, struct private_data *d) {
char *addr = (char *)pc_addr;
- int i, offset;
- for (offset = 0; d != 0; d = d->next) {
- for (i = d->more; i > 0; --i) {
+ for (size_t offset = 0; d != 0; d = d->next) {
+ for (int i = d->more; i > 0; --i) {
if (d->data == 0)
memset(&addr[offset], '\0', d->size);
else
} else { // Wrong unit.
return result;
}
- result = value;
+ result = (kmp_uint64)value; // rounds down
}
return result;
public:
typedef P flag_t;
kmp_flag_native(volatile P *p, flag_type ft)
- : loc(p), t({(unsigned int)ft, 0U}) {}
+ : loc(p), t({(short unsigned int)ft, 0U}) {}
volatile P *get() { return loc; }
void *get_void_p() { return RCAST(void *, CCAST(P *, loc)); }
void set(volatile P *new_loc) { loc = new_loc; }
public:
typedef P flag_t;
kmp_flag(std::atomic<P> *p, flag_type ft)
- : loc(p), t({(unsigned int)ft, 0U}) {}
+ : loc(p), t({(short unsigned int)ft, 0U}) {}
/*!
* @result the pointer to the actual flag
*/
__kmp_lock_suspend_mx(th);
volatile void *spin = flag->get();
- void *cacheline = (void *)(kmp_uint64(spin) & ~(CACHE_LINE - 1));
+ void *cacheline = (void *)(kmp_uintptr_t(spin) & ~(CACHE_LINE - 1));
if (!flag->done_check()) {
// Mark thread as no longer active
return 0;
*addr = ret_addr;
- *size = ret_size;
+ *size = (size_t)ret_size;
return 1;
}
};
// Convert timespec to nanoseconds.
-#define TS2NS(timespec) (((timespec).tv_sec * 1e9) + (timespec).tv_nsec)
+#define TS2NS(timespec) \
+ (((timespec).tv_sec * (long int)1e9) + (timespec).tv_nsec)
static struct kmp_sys_timer __kmp_sys_timer_data;
// If Linux* OS:
// If the syscall fails or returns a suggestion for the size,
// then we don't have to search for an appropriate size.
- int gCode;
- int sCode;
+ long gCode;
+ long sCode;
unsigned char *buf;
buf = (unsigned char *)KMP_INTERNAL_MALLOC(KMP_CPU_SET_SIZE_LIMIT);
gCode = syscall(__NR_sched_getaffinity, 0, KMP_CPU_SET_SIZE_LIMIT, buf);
KA_TRACE(30, ("__kmp_affinity_determine_capable: "
- "initial getaffinity call returned %d errno = %d\n",
+ "initial getaffinity call returned %ld errno = %d\n",
gCode, errno));
// if ((gCode < 0) && (errno == ENOSYS))
// buffer with the same size fails with errno set to EFAULT.
sCode = syscall(__NR_sched_setaffinity, 0, gCode, NULL);
KA_TRACE(30, ("__kmp_affinity_determine_capable: "
- "setaffinity for mask size %d returned %d errno = %d\n",
+ "setaffinity for mask size %ld returned %ld errno = %d\n",
gCode, sCode, errno));
if (sCode < 0) {
if (errno == ENOSYS) {
for (size = 1; size <= KMP_CPU_SET_SIZE_LIMIT; size *= 2) {
gCode = syscall(__NR_sched_getaffinity, 0, size, buf);
KA_TRACE(30, ("__kmp_affinity_determine_capable: "
- "getaffinity for mask size %d returned %d errno = %d\n",
+ "getaffinity for mask size %ld returned %ld errno = %d\n",
size, gCode, errno));
if (gCode < 0) {
sCode = syscall(__NR_sched_setaffinity, 0, gCode, NULL);
KA_TRACE(30, ("__kmp_affinity_determine_capable: "
- "setaffinity for mask size %d returned %d errno = %d\n",
+ "setaffinity for mask size %ld returned %ld errno = %d\n",
gCode, sCode, errno));
if (sCode < 0) {
if (errno == ENOSYS) { // Linux* OS only
}
}
#elif KMP_OS_FREEBSD
- int gCode;
+ long gCode;
unsigned char *buf;
buf = (unsigned char *)KMP_INTERNAL_MALLOC(KMP_CPU_SET_SIZE_LIMIT);
gCode = pthread_getaffinity_np(pthread_self(), KMP_CPU_SET_SIZE_LIMIT, reinterpret_cast<cpuset_t *>(buf));
int __kmp_futex_determine_capable() {
int loc = 0;
- int rc = syscall(__NR_futex, &loc, FUTEX_WAKE, 1, NULL, NULL, 0);
+ long rc = syscall(__NR_futex, &loc, FUTEX_WAKE, 1, NULL, NULL, 0);
int retval = (rc == 0) || (errno != ENOSYS);
KA_TRACE(10,
/*t =*/times(&buffer);
- return (buffer.tms_utime + buffer.tms_cutime) / (double)CLOCKS_PER_SEC;
+ return (double)(buffer.tms_utime + buffer.tms_cutime) /
+ (double)CLOCKS_PER_SEC;
}
int __kmp_read_system_info(struct kmp_sys_info *info) {
status = gettimeofday(&tval, NULL);
KMP_CHECK_SYSFAIL_ERRNO("gettimeofday", status);
TIMEVAL_TO_TIMESPEC(&tval, &stop);
- t_ns = TS2NS(stop) - TS2NS(__kmp_sys_timer_data.start);
+ t_ns = (double)(TS2NS(stop) - TS2NS(__kmp_sys_timer_data.start));
*delta = (t_ns * 1e-9);
}
#if KMP_OS_LINUX || KMP_OS_DRAGONFLY || KMP_OS_FREEBSD || KMP_OS_NETBSD || \
KMP_OS_OPENBSD || KMP_OS_HURD
- r = sysconf(_SC_NPROCESSORS_ONLN);
+ __kmp_type_convert(sysconf(_SC_NPROCESSORS_ONLN), &(r));
#elif KMP_OS_DARWIN
if (sysconf(_SC_THREADS)) {
/* Query the maximum number of threads */
- __kmp_sys_max_nth = sysconf(_SC_THREAD_THREADS_MAX);
+ __kmp_type_convert(sysconf(_SC_THREAD_THREADS_MAX), &(__kmp_sys_max_nth));
if (__kmp_sys_max_nth == -1) {
/* Unlimited threads for NPTL */
__kmp_sys_max_nth = INT_MAX;
nsec2 = __kmp_now_nsec();
diff = nsec2 - nsec;
if (diff > 0) {
- kmp_uint64 tpms = (kmp_uint64)(1e6 * (delay + (now - goal)) / diff);
+ kmp_uint64 tpms = ((kmp_uint64)1e6 * (delay + (now - goal)) / diff);
if (tpms > 0)
__kmp_ticks_per_msec = tpms;
}
// getloadavg() may return the number of samples less than requested that is
// less than 3.
if (__kmp_load_balance_interval < 180 && (res >= 1)) {
- ret_avg = averages[0]; // 1 min
+ ret_avg = (int)averages[0]; // 1 min
} else if ((__kmp_load_balance_interval >= 180 &&
__kmp_load_balance_interval < 600) &&
(res >= 2)) {
- ret_avg = averages[1]; // 5 min
+ ret_avg = (int)averages[1]; // 5 min
} else if ((__kmp_load_balance_interval >= 600) && (res == 3)) {
- ret_avg = averages[2]; // 15 min
+ ret_avg = (int)averages[2]; // 15 min
} else { // Error occurred
return -1;
}
-- ln
*/
char buffer[65];
- int len;
+ ssize_t len;
len = read(stat_file, buffer, sizeof(buffer) - 1);
if (len >= 0) {
buffer[len] = 0;