EnumValue
Enum(profile_reproducibility) String(multithreaded) Value(PROFILE_REPRODUCIBILITY_MULTITHREADED)
-fprofile-reproducible
+fprofile-reproducible=
Common Joined RejectNegative Var(flag_profile_reproducible) Enum(profile_reproducibility) Init(PROFILE_REPRODUCIBILITY_SERIAL)
-fprofile-reproducible=[serial|parallel-runs|multithreaded] Control level of reproducibility of profile gathered by -fprofile-generate.
abs (counters[0]) is the number of executions
for i in 0 ... TOPN-1
- counters[2 * i + 1] is target
- abs (counters[2 * i + 2]) is corresponding hitrate counter.
+ counters[2 * i + 2] is target
+ counters[2 * i + 3] is corresponding hitrate counter.
Value of counters[0] negative when counter became
full during merging and some values are lost. */
*value = 0;
gcov_type read_all = abs_hwi (hist->hvalue.counters[0]);
+ gcov_type covered = 0;
+ for (unsigned i = 0; i < counters; ++i)
+ covered += hist->hvalue.counters[2 * i + 3];
gcov_type v = hist->hvalue.counters[2 * n + 2];
gcov_type c = hist->hvalue.counters[2 * n + 3];
if (hist->hvalue.counters[0] < 0
- && (flag_profile_reproducible == PROFILE_REPRODUCIBILITY_PARALLEL_RUNS
- || (flag_profile_reproducible
- == PROFILE_REPRODUCIBILITY_MULTITHREADED)))
- return false;
+ && flag_profile_reproducible == PROFILE_REPRODUCIBILITY_PARALLEL_RUNS)
+ {
+ if (dump_file)
+ fprintf (dump_file, "Histogram value dropped in %qs mode",
+ "-fprofile-reproducible=parallel-runs");
+ return false;
+ }
+ else if (covered != read_all
+ && flag_profile_reproducible == PROFILE_REPRODUCIBILITY_MULTITHREADED)
+ {
+ if (dump_file)
+ fprintf (dump_file, "Histogram value dropped in %qs mode",
+ "-fprofile-reproducible=multithreaded");
+ return false;
+ }
/* Indirect calls can't be verified. */
if (stmt
-- the stored candidate on the most common value of the measured entity
-- counter
+
+ We use -TOTAL for situation when merging dropped some values.
+ The information is used for -fprofile-reproducible flag.
*/
void
gcov_type all = gcov_get_counter_ignore_scaling (-1);
gcov_type n = gcov_get_counter_ignore_scaling (-1);
- counters[GCOV_TOPN_MEM_COUNTERS * i] += all;
+ unsigned full = all < 0;
+ gcov_type *total = &counters[GCOV_TOPN_MEM_COUNTERS * i];
+ *total += full ? -all : all;
for (unsigned j = 0; j < n; j++)
{
gcov_type count = gcov_get_counter_ignore_scaling (-1);
// TODO: we should use atomic here
- gcov_topn_add_value (counters + GCOV_TOPN_MEM_COUNTERS * i, value,
- count, 0, 0);
+ full |= gcov_topn_add_value (counters + GCOV_TOPN_MEM_COUNTERS * i,
+ value, count, 0, 0);
}
+
+ if (full)
+ *total = -(*total);
}
}
#endif /* L_gcov_merge_topn */
/* Add key value pair VALUE:COUNT to a top N COUNTERS. When INCREMENT_TOTAL
is true, add COUNT to total of the TOP counter. If USE_ATOMIC is true,
- do it in atomic way. */
+ do it in atomic way. Return true when the counter is full, otherwise
+ return false. */
-static inline void
+static inline unsigned
gcov_topn_add_value (gcov_type *counters, gcov_type value, gcov_type count,
int use_atomic, int increment_total)
{
if (current_node->value == value)
{
gcov_counter_add (¤t_node->count, count, use_atomic);
- return;
+ return 0;
}
if (minimal_node == NULL
minimal_node->value = value;
minimal_node->count = count;
}
+
+ return 1;
}
else
{
struct gcov_kvp *new_node = allocate_gcov_kvp ();
if (new_node == NULL)
- return;
+ return 0;
new_node->value = value;
new_node->count = count;
if (success)
gcov_counter_add (&counters[1], 1, use_atomic);
}
+
+ return 0;
}
#endif /* !inhibit_libc */