"UNDEFINED", "READY"
};
+enum thread_spec {
+ THREAD_SPEC__UNDEFINED = 0,
+ THREAD_SPEC__CPU,
+};
+
struct record {
struct perf_tool tool;
struct record_opts opts;
record__mmap_cpu_mask_free(&mask->affinity);
}
+static int record__parse_threads(const struct option *opt, const char *str, int unset)
+{
+ struct record_opts *opts = opt->value;
+
+ if (unset || !str || !strlen(str))
+ opts->threads_spec = THREAD_SPEC__CPU;
+
+ return 0;
+}
+
static int parse_output_max_size(const struct option *opt,
const char *str, int unset)
{
&record.debuginfod.set, "debuginfod urls",
"Enable debuginfod data retrieval from DEBUGINFOD_URLS or specified urls",
"system"),
+ OPT_CALLBACK_OPTARG(0, "threads", &record.opts, NULL, "spec",
+ "write collected trace data into several data files using parallel threads",
+ record__parse_threads),
OPT_END()
};
return ret;
}
+static int record__init_thread_cpu_masks(struct record *rec, struct perf_cpu_map *cpus)
+{
+ int t, ret, nr_cpus = perf_cpu_map__nr(cpus);
+
+ ret = record__alloc_thread_masks(rec, nr_cpus, cpu__max_cpu().cpu);
+ if (ret)
+ return ret;
+
+ rec->nr_threads = nr_cpus;
+ pr_debug("nr_threads: %d\n", rec->nr_threads);
+
+ for (t = 0; t < rec->nr_threads; t++) {
+ set_bit(cpus->map[t].cpu, rec->thread_masks[t].maps.bits);
+ set_bit(cpus->map[t].cpu, rec->thread_masks[t].affinity.bits);
+ if (verbose) {
+ pr_debug("thread_masks[%d]: ", t);
+ mmap_cpu_mask__scnprintf(&rec->thread_masks[t].maps, "maps");
+ pr_debug("thread_masks[%d]: ", t);
+ mmap_cpu_mask__scnprintf(&rec->thread_masks[t].affinity, "affinity");
+ }
+ }
+
+ return 0;
+}
+
static int record__init_thread_default_masks(struct record *rec, struct perf_cpu_map *cpus)
{
int ret;
{
struct perf_cpu_map *cpus = rec->evlist->core.cpus;
- return record__init_thread_default_masks(rec, cpus);
+ if (!record__threads_enabled(rec))
+ return record__init_thread_default_masks(rec, cpus);
+
+ return record__init_thread_cpu_masks(rec, cpus);
}
int cmd_record(int argc, const char **argv)