*/
if (!evsel->own_cpus || evlist->has_user_cpus) {
perf_cpu_map__put(evsel->cpus);
- evsel->cpus = perf_cpu_map__get(evlist->cpus);
- } else if (!evsel->system_wide && perf_cpu_map__empty(evlist->cpus)) {
+ evsel->cpus = perf_cpu_map__get(evlist->user_requested_cpus);
+ } else if (!evsel->system_wide && perf_cpu_map__empty(evlist->user_requested_cpus)) {
perf_cpu_map__put(evsel->cpus);
- evsel->cpus = perf_cpu_map__get(evlist->cpus);
+ evsel->cpus = perf_cpu_map__get(evlist->user_requested_cpus);
} else if (evsel->cpus != evsel->own_cpus) {
perf_cpu_map__put(evsel->cpus);
evsel->cpus = perf_cpu_map__get(evsel->own_cpus);
void perf_evlist__exit(struct perf_evlist *evlist)
{
- perf_cpu_map__put(evlist->cpus);
+ perf_cpu_map__put(evlist->user_requested_cpus);
perf_cpu_map__put(evlist->all_cpus);
perf_thread_map__put(evlist->threads);
- evlist->cpus = NULL;
+ evlist->user_requested_cpus = NULL;
evlist->all_cpus = NULL;
evlist->threads = NULL;
fdarray__exit(&evlist->pollfd);
* original reference count of 1. If that is not the case it is up to
* the caller to increase the reference count.
*/
- if (cpus != evlist->cpus) {
- perf_cpu_map__put(evlist->cpus);
- evlist->cpus = perf_cpu_map__get(cpus);
+ if (cpus != evlist->user_requested_cpus) {
+ perf_cpu_map__put(evlist->user_requested_cpus);
+ evlist->user_requested_cpus = perf_cpu_map__get(cpus);
}
if (threads != evlist->threads) {
int perf_evlist__alloc_pollfd(struct perf_evlist *evlist)
{
- int nr_cpus = perf_cpu_map__nr(evlist->cpus);
+ int nr_cpus = perf_cpu_map__nr(evlist->user_requested_cpus);
int nr_threads = perf_thread_map__nr(evlist->threads);
int nfds = 0;
struct perf_evsel *evsel;
int idx, struct perf_mmap_param *mp, int cpu_idx,
int thread, int *_output, int *_output_overwrite)
{
- struct perf_cpu evlist_cpu = perf_cpu_map__cpu(evlist->cpus, cpu_idx);
+ struct perf_cpu evlist_cpu = perf_cpu_map__cpu(evlist->user_requested_cpus, cpu_idx);
struct perf_evsel *evsel;
int revent;
struct perf_mmap_param *mp)
{
int nr_threads = perf_thread_map__nr(evlist->threads);
- int nr_cpus = perf_cpu_map__nr(evlist->cpus);
+ int nr_cpus = perf_cpu_map__nr(evlist->user_requested_cpus);
int cpu, thread;
for (cpu = 0; cpu < nr_cpus; cpu++) {
{
int nr_mmaps;
- nr_mmaps = perf_cpu_map__nr(evlist->cpus);
- if (perf_cpu_map__empty(evlist->cpus))
+ nr_mmaps = perf_cpu_map__nr(evlist->user_requested_cpus);
+ if (perf_cpu_map__empty(evlist->user_requested_cpus))
nr_mmaps = perf_thread_map__nr(evlist->threads);
return nr_mmaps;
struct perf_mmap_param *mp)
{
struct perf_evsel *evsel;
- const struct perf_cpu_map *cpus = evlist->cpus;
+ const struct perf_cpu_map *cpus = evlist->user_requested_cpus;
const struct perf_thread_map *threads = evlist->threads;
if (!ops || !ops->get || !ops->mmap)
int nr_entries;
int nr_groups;
bool has_user_cpus;
- struct perf_cpu_map *cpus;
+ /**
+ * The cpus passed from the command line or all online CPUs by
+ * default.
+ */
+ struct perf_cpu_map *user_requested_cpus;
+ /** The union of all evsel cpu maps. */
struct perf_cpu_map *all_cpus;
struct perf_thread_map *threads;
int nr_mmaps;
struct evsel *evsel, u32 option)
{
int i, err = -EINVAL;
- struct perf_cpu_map *event_cpus = evsel->evlist->core.cpus;
+ struct perf_cpu_map *event_cpus = evsel->evlist->core.user_requested_cpus;
struct perf_cpu_map *online_cpus = perf_cpu_map__new(NULL);
/* Set option of each CPU we have */
container_of(itr, struct cs_etm_recording, itr);
struct perf_pmu *cs_etm_pmu = ptr->cs_etm_pmu;
struct evsel *evsel, *cs_etm_evsel = NULL;
- struct perf_cpu_map *cpus = evlist->core.cpus;
+ struct perf_cpu_map *cpus = evlist->core.user_requested_cpus;
bool privileged = perf_event_paranoid_check(-1);
int err = 0;
{
int i;
int etmv3 = 0, etmv4 = 0, ete = 0;
- struct perf_cpu_map *event_cpus = evlist->core.cpus;
+ struct perf_cpu_map *event_cpus = evlist->core.user_requested_cpus;
struct perf_cpu_map *online_cpus = perf_cpu_map__new(NULL);
/* cpu map is not empty, we have specific CPUs to work with */
u32 offset;
u64 nr_cpu, type;
struct perf_cpu_map *cpu_map;
- struct perf_cpu_map *event_cpus = session->evlist->core.cpus;
+ struct perf_cpu_map *event_cpus = session->evlist->core.user_requested_cpus;
struct perf_cpu_map *online_cpus = perf_cpu_map__new(NULL);
struct cs_etm_recording *ptr =
container_of(itr, struct cs_etm_recording, itr);
container_of(itr, struct arm_spe_recording, itr);
struct perf_pmu *arm_spe_pmu = sper->arm_spe_pmu;
struct evsel *evsel, *arm_spe_evsel = NULL;
- struct perf_cpu_map *cpus = evlist->core.cpus;
+ struct perf_cpu_map *cpus = evlist->core.user_requested_cpus;
bool privileged = perf_event_paranoid_check(-1);
struct evsel *tracking_evsel;
int err;
container_of(itr, struct intel_bts_recording, itr);
struct perf_pmu *intel_bts_pmu = btsr->intel_bts_pmu;
struct evsel *evsel, *intel_bts_evsel = NULL;
- const struct perf_cpu_map *cpus = evlist->core.cpus;
+ const struct perf_cpu_map *cpus = evlist->core.user_requested_cpus;
bool privileged = perf_event_paranoid_check(-1);
if (opts->auxtrace_sample_mode) {
ui__warning("Intel Processor Trace: TSC not available\n");
}
- per_cpu_mmaps = !perf_cpu_map__empty(session->evlist->core.cpus);
+ per_cpu_mmaps = !perf_cpu_map__empty(session->evlist->core.user_requested_cpus);
auxtrace_info->type = PERF_AUXTRACE_INTEL_PT;
auxtrace_info->priv[INTEL_PT_PMU_TYPE] = intel_pt_pmu->type;
struct perf_pmu *intel_pt_pmu = ptr->intel_pt_pmu;
bool have_timing_info, need_immediate = false;
struct evsel *evsel, *intel_pt_evsel = NULL;
- const struct perf_cpu_map *cpus = evlist->core.cpus;
+ const struct perf_cpu_map *cpus = evlist->core.user_requested_cpus;
bool privileged = perf_event_paranoid_check(-1);
u64 tsc_bit;
int err;
init_stats(&time_stats);
- printf(" Number of cpus:\t%d\n", perf_cpu_map__nr(evlist->core.cpus));
+ printf(" Number of cpus:\t%d\n", perf_cpu_map__nr(evlist->core.user_requested_cpus));
printf(" Number of threads:\t%d\n", evlist->core.threads->nr);
printf(" Number of events:\t%d (%d fds)\n",
evlist->core.nr_entries, evlist__count_evsel_fds(evlist));
static int set_tracing_cpu(struct perf_ftrace *ftrace)
{
- struct perf_cpu_map *cpumap = ftrace->evlist->core.cpus;
+ struct perf_cpu_map *cpumap = ftrace->evlist->core.user_requested_cpus;
if (!target__has_cpu(&ftrace->target))
return 0;
int m, tm, nr_mmaps = evlist->core.nr_mmaps;
struct mmap *mmap = evlist->mmap;
struct mmap *overwrite_mmap = evlist->overwrite_mmap;
- struct perf_cpu_map *cpus = evlist->core.cpus;
+ struct perf_cpu_map *cpus = evlist->core.user_requested_cpus;
thread_data->nr_mmaps = bitmap_weight(thread_data->mask->maps.bits,
thread_data->mask->maps.nbits);
return err;
}
- err = perf_event__synthesize_cpu_map(&rec->tool, rec->evlist->core.cpus,
+ err = perf_event__synthesize_cpu_map(&rec->tool, rec->evlist->core.user_requested_cpus,
process_synthesized_event, NULL);
if (err < 0) {
pr_err("Couldn't synthesize cpu map.\n");
static int record__init_thread_masks(struct record *rec)
{
int ret = 0;
- struct perf_cpu_map *cpus = rec->evlist->core.cpus;
+ struct perf_cpu_map *cpus = rec->evlist->core.user_requested_cpus;
if (!record__threads_enabled(rec))
return record__init_thread_default_masks(rec, cpus);
if (group)
evlist__set_leader(evsel_list);
- if (!cpu_map__is_dummy(evsel_list->core.cpus)) {
+ if (!cpu_map__is_dummy(evsel_list->core.user_requested_cpus)) {
if (affinity__setup(&saved_affinity) < 0)
return -1;
affinity = &saved_affinity;
aggr_cpu_id_get_t get_id = aggr_mode__get_aggr(stat_config.aggr_mode);
if (get_id) {
- stat_config.aggr_map = cpu_aggr_map__new(evsel_list->core.cpus,
+ stat_config.aggr_map = cpu_aggr_map__new(evsel_list->core.user_requested_cpus,
get_id, /*data=*/NULL);
if (!stat_config.aggr_map) {
pr_err("cannot build %s map", aggr_mode__string[stat_config.aggr_mode]);
* taking the highest cpu number to be the size of
* the aggregation translate cpumap.
*/
- if (evsel_list->core.cpus)
- nr = perf_cpu_map__max(evsel_list->core.cpus).cpu;
+ if (evsel_list->core.user_requested_cpus)
+ nr = perf_cpu_map__max(evsel_list->core.user_requested_cpus).cpu;
else
nr = 0;
stat_config.cpus_aggr_map = cpu_aggr_map__empty_new(nr + 1);
if (!get_id)
return 0;
- stat_config.aggr_map = cpu_aggr_map__new(evsel_list->core.cpus, get_id, env);
+ stat_config.aggr_map = cpu_aggr_map__new(evsel_list->core.user_requested_cpus, get_id, env);
if (!stat_config.aggr_map) {
pr_err("cannot build %s map", aggr_mode__string[stat_config.aggr_mode]);
return -1;
evlist__for_each_entry(evlist, counter) {
try_again:
- if (evsel__open(counter, top->evlist->core.cpus,
+ if (evsel__open(counter, top->evlist->core.user_requested_cpus,
top->evlist->core.threads) < 0) {
/*
mp->idx = idx;
if (per_cpu) {
- mp->cpu = perf_cpu_map__cpu(evlist->core.cpus, idx);
+ mp->cpu = perf_cpu_map__cpu(evlist->core.user_requested_cpus, idx);
if (evlist->core.threads)
mp->tid = perf_thread_map__pid(evlist->core.threads, 0);
else
/* don't need to set cpu filter for system-wide mode */
if (ftrace->target.cpu_list) {
- ncpus = perf_cpu_map__nr(ftrace->evlist->core.cpus);
+ ncpus = perf_cpu_map__nr(ftrace->evlist->core.user_requested_cpus);
bpf_map__set_max_entries(skel->maps.cpu_filter, ncpus);
}
fd = bpf_map__fd(skel->maps.cpu_filter);
for (i = 0; i < ncpus; i++) {
- cpu = perf_cpu_map__cpu(ftrace->evlist->core.cpus, i).cpu;
+ cpu = perf_cpu_map__cpu(ftrace->evlist->core.user_requested_cpus, i).cpu;
bpf_map_update_elem(fd, &cpu, &val, BPF_ANY);
}
}
bool has_imm = false;
// See explanation in evlist__close()
- if (!cpu_map__is_dummy(evlist->core.cpus)) {
+ if (!cpu_map__is_dummy(evlist->core.user_requested_cpus)) {
if (affinity__setup(&saved_affinity) < 0)
return;
affinity = &saved_affinity;
struct affinity saved_affinity, *affinity = NULL;
// See explanation in evlist__close()
- if (!cpu_map__is_dummy(evlist->core.cpus)) {
+ if (!cpu_map__is_dummy(evlist->core.user_requested_cpus)) {
if (affinity__setup(&saved_affinity) < 0)
return;
affinity = &saved_affinity;
static int evlist__enable_event_thread(struct evlist *evlist, struct evsel *evsel, int thread)
{
int cpu;
- int nr_cpus = perf_cpu_map__nr(evlist->core.cpus);
+ int nr_cpus = perf_cpu_map__nr(evlist->core.user_requested_cpus);
if (!evsel->core.fd)
return -EINVAL;
int evlist__enable_event_idx(struct evlist *evlist, struct evsel *evsel, int idx)
{
- bool per_cpu_mmaps = !perf_cpu_map__empty(evlist->core.cpus);
+ bool per_cpu_mmaps = !perf_cpu_map__empty(evlist->core.user_requested_cpus);
if (per_cpu_mmaps)
return evlist__enable_event_cpu(evlist, evsel, idx);
struct affinity affinity;
/*
- * With perf record core.cpus is usually NULL.
+ * With perf record core.user_requested_cpus is usually NULL.
* Use the old method to handle this for now.
*/
- if (!evlist->core.cpus || cpu_map__is_dummy(evlist->core.cpus)) {
+ if (!evlist->core.user_requested_cpus ||
+ cpu_map__is_dummy(evlist->core.user_requested_cpus)) {
evlist__for_each_entry_reverse(evlist, evsel)
evsel__close(evsel);
return;
* Default: one fd per CPU, all threads, aka systemwide
* as sys_perf_event_open(cpu = -1, thread = -1) is EINVAL
*/
- if (evlist->core.threads == NULL && evlist->core.cpus == NULL) {
+ if (evlist->core.threads == NULL && evlist->core.user_requested_cpus == NULL) {
err = evlist__create_syswide_maps(evlist);
if (err < 0)
goto out_err;
if (opts->group)
evlist__set_leader(evlist);
- if (perf_cpu_map__cpu(evlist->core.cpus, 0).cpu < 0)
+ if (perf_cpu_map__cpu(evlist->core.user_requested_cpus, 0).cpu < 0)
opts->no_inherit = true;
use_comm_exec = perf_can_comm_exec();
evsel = evlist__last(temp_evlist);
- if (!evlist || perf_cpu_map__empty(evlist->core.cpus)) {
+ if (!evlist || perf_cpu_map__empty(evlist->core.user_requested_cpus)) {
struct perf_cpu_map *cpus = perf_cpu_map__new(NULL);
if (cpus)
perf_cpu_map__put(cpus);
} else {
- cpu = perf_cpu_map__cpu(evlist->core.cpus, 0);
+ cpu = perf_cpu_map__cpu(evlist->core.user_requested_cpus, 0);
}
while (1) {
}
evlist__for_each_entry(evlist, counter) {
- if (evsel__open(counter, evlist->core.cpus, evlist->core.threads) < 0)
+ if (evsel__open(counter, evlist->core.user_requested_cpus,
+ evlist->core.threads) < 0)
goto out_delete_evlist;
}
int all_idx;
struct perf_cpu cpu;
- perf_cpu_map__for_each_cpu(cpu, all_idx, evlist->core.cpus) {
+ perf_cpu_map__for_each_cpu(cpu, all_idx, evlist->core.user_requested_cpus) {
struct evsel *counter;
bool first = true;
return err;
}
- err = perf_event__synthesize_cpu_map(tool, evlist->core.cpus, process, NULL);
+ err = perf_event__synthesize_cpu_map(tool, evlist->core.user_requested_cpus, process, NULL);
if (err < 0) {
pr_err("Couldn't synthesize thread map.\n");
return err;
if (target->cpu_list)
ret += SNPRINTF(bf + ret, size - ret, ", CPU%s: %s)",
- perf_cpu_map__nr(top->evlist->core.cpus) > 1 ? "s" : "",
+ perf_cpu_map__nr(top->evlist->core.user_requested_cpus) > 1
+ ? "s" : "",
target->cpu_list);
else {
if (target->tid)
ret += SNPRINTF(bf + ret, size - ret, ")");
else
ret += SNPRINTF(bf + ret, size - ret, ", %d CPU%s)",
- perf_cpu_map__nr(top->evlist->core.cpus),
- perf_cpu_map__nr(top->evlist->core.cpus) > 1 ? "s" : "");
+ perf_cpu_map__nr(top->evlist->core.user_requested_cpus),
+ perf_cpu_map__nr(top->evlist->core.user_requested_cpus) > 1
+ ? "s" : "");
}
perf_top__reset_sample_counters(top);