1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright(C) 2015 Linaro Limited. All rights reserved.
4 * Author: Mathieu Poirier <mathieu.poirier@linaro.org>
7 #include <linux/coresight.h>
8 #include <linux/coresight-pmu.h>
9 #include <linux/cpumask.h>
10 #include <linux/device.h>
11 #include <linux/list.h>
13 #include <linux/init.h>
14 #include <linux/perf_event.h>
15 #include <linux/percpu-defs.h>
16 #include <linux/slab.h>
17 #include <linux/types.h>
18 #include <linux/workqueue.h>
20 #include "coresight-etm-perf.h"
21 #include "coresight-priv.h"
23 static struct pmu etm_pmu;
24 static bool etm_perf_up;
27 * struct etm_event_data - Coresight specifics associated to an event
28 * @work: Handle to free allocated memory outside IRQ context.
29 * @mask: Hold the CPU(s) this event was set for.
30 * @snk_config: The sink configuration.
31 * @path: An array of path, each slot for one CPU.
33 struct etm_event_data {
34 struct work_struct work;
37 struct list_head * __percpu *path;
40 static DEFINE_PER_CPU(struct perf_output_handle, ctx_handle);
41 static DEFINE_PER_CPU(struct coresight_device *, csdev_src);
43 /* ETMv3.5/PTM's ETMCR is 'config' */
44 PMU_FORMAT_ATTR(cycacc, "config:" __stringify(ETM_OPT_CYCACC));
45 PMU_FORMAT_ATTR(timestamp, "config:" __stringify(ETM_OPT_TS));
46 PMU_FORMAT_ATTR(retstack, "config:" __stringify(ETM_OPT_RETSTK));
48 static struct attribute *etm_config_formats_attr[] = {
49 &format_attr_cycacc.attr,
50 &format_attr_timestamp.attr,
51 &format_attr_retstack.attr,
55 static const struct attribute_group etm_pmu_format_group = {
57 .attrs = etm_config_formats_attr,
60 static const struct attribute_group *etm_pmu_attr_groups[] = {
61 &etm_pmu_format_group,
65 static inline struct list_head **
66 etm_event_cpu_path_ptr(struct etm_event_data *data, int cpu)
68 return per_cpu_ptr(data->path, cpu);
71 static inline struct list_head *
72 etm_event_cpu_path(struct etm_event_data *data, int cpu)
74 return *etm_event_cpu_path_ptr(data, cpu);
77 static void etm_event_read(struct perf_event *event) {}
79 static int etm_addr_filters_alloc(struct perf_event *event)
81 struct etm_filters *filters;
82 int node = event->cpu == -1 ? -1 : cpu_to_node(event->cpu);
84 filters = kzalloc_node(sizeof(struct etm_filters), GFP_KERNEL, node);
89 memcpy(filters, event->parent->hw.addr_filters,
92 event->hw.addr_filters = filters;
97 static void etm_event_destroy(struct perf_event *event)
99 kfree(event->hw.addr_filters);
100 event->hw.addr_filters = NULL;
103 static int etm_event_init(struct perf_event *event)
107 if (event->attr.type != etm_pmu.type) {
112 ret = etm_addr_filters_alloc(event);
116 event->destroy = etm_event_destroy;
121 static void free_event_data(struct work_struct *work)
125 struct etm_event_data *event_data;
126 struct coresight_device *sink;
128 event_data = container_of(work, struct etm_event_data, work);
129 mask = &event_data->mask;
131 * First deal with the sink configuration. See comment in
132 * etm_setup_aux() about why we take the first available path.
134 if (event_data->snk_config) {
135 cpu = cpumask_first(mask);
136 sink = coresight_get_sink(etm_event_cpu_path(event_data, cpu));
137 if (sink_ops(sink)->free_buffer)
138 sink_ops(sink)->free_buffer(event_data->snk_config);
141 for_each_cpu(cpu, mask) {
142 struct list_head **ppath;
144 ppath = etm_event_cpu_path_ptr(event_data, cpu);
145 if (!(IS_ERR_OR_NULL(*ppath)))
146 coresight_release_path(*ppath);
150 free_percpu(event_data->path);
154 static void *alloc_event_data(int cpu)
157 struct etm_event_data *event_data;
159 /* First get memory for the session's data */
160 event_data = kzalloc(sizeof(struct etm_event_data), GFP_KERNEL);
164 /* Make sure nothing disappears under us */
167 mask = &event_data->mask;
169 cpumask_set_cpu(cpu, mask);
171 cpumask_copy(mask, cpu_online_mask);
175 * Each CPU has a single path between source and destination. As such
176 * allocate an array using CPU numbers as indexes. That way a path
177 * for any CPU can easily be accessed at any given time. We proceed
178 * the same way for sessions involving a single CPU. The cost of
179 * unused memory when dealing with single CPU trace scenarios is small
180 * compared to the cost of searching through an optimized array.
182 event_data->path = alloc_percpu(struct list_head *);
184 if (!event_data->path) {
192 static void etm_free_aux(void *data)
194 struct etm_event_data *event_data = data;
196 schedule_work(&event_data->work);
199 static void *etm_setup_aux(struct perf_event *event, void **pages,
200 int nr_pages, bool overwrite)
202 int cpu = event->cpu;
204 struct coresight_device *sink;
205 struct etm_event_data *event_data = NULL;
207 event_data = alloc_event_data(cpu);
210 INIT_WORK(&event_data->work, free_event_data);
213 * In theory nothing prevent tracers in a trace session from being
214 * associated with different sinks, nor having a sink per tracer. But
215 * until we have HW with this kind of topology we need to assume tracers
216 * in a trace session are using the same sink. Therefore go through
217 * the coresight bus and pick the first enabled sink.
219 * When operated from sysFS users are responsible to enable the sink
220 * while from perf, the perf tools will do it based on the choice made
221 * on the cmd line. As such the "enable_sink" flag in sysFS is reset.
223 sink = coresight_get_enabled_sink(true);
227 mask = &event_data->mask;
229 /* Setup the path for each CPU in a trace session */
230 for_each_cpu(cpu, mask) {
231 struct list_head *path;
232 struct coresight_device *csdev;
234 csdev = per_cpu(csdev_src, cpu);
239 * Building a path doesn't enable it, it simply builds a
240 * list of devices from source to sink that can be
241 * referenced later when the path is actually needed.
243 path = coresight_build_path(csdev, sink);
247 *etm_event_cpu_path_ptr(event_data, cpu) = path;
250 if (!sink_ops(sink)->alloc_buffer)
253 cpu = cpumask_first(mask);
254 /* Get the AUX specific data from the sink buffer */
255 event_data->snk_config =
256 sink_ops(sink)->alloc_buffer(sink, cpu, pages,
257 nr_pages, overwrite);
258 if (!event_data->snk_config)
265 etm_free_aux(event_data);
270 static void etm_event_start(struct perf_event *event, int flags)
272 int cpu = smp_processor_id();
273 struct etm_event_data *event_data;
274 struct perf_output_handle *handle = this_cpu_ptr(&ctx_handle);
275 struct coresight_device *sink, *csdev = per_cpu(csdev_src, cpu);
276 struct list_head *path;
282 * Deal with the ring buffer API and get a handle on the
283 * session's information.
285 event_data = perf_aux_output_begin(handle, event);
289 path = etm_event_cpu_path(event_data, cpu);
290 /* We need a sink, no need to continue without one */
291 sink = coresight_get_sink(path);
292 if (WARN_ON_ONCE(!sink || !sink_ops(sink)->set_buffer))
295 /* Configure the sink */
296 if (sink_ops(sink)->set_buffer(sink, handle,
297 event_data->snk_config))
300 /* Nothing will happen without a path */
301 if (coresight_enable_path(path, CS_MODE_PERF))
304 /* Tell the perf core the event is alive */
307 /* Finally enable the tracer */
308 if (source_ops(csdev)->enable(csdev, event, CS_MODE_PERF))
309 goto fail_disable_path;
315 coresight_disable_path(path);
317 perf_aux_output_flag(handle, PERF_AUX_FLAG_TRUNCATED);
318 perf_aux_output_end(handle, 0);
320 event->hw.state = PERF_HES_STOPPED;
324 static void etm_event_stop(struct perf_event *event, int mode)
326 int cpu = smp_processor_id();
328 struct coresight_device *sink, *csdev = per_cpu(csdev_src, cpu);
329 struct perf_output_handle *handle = this_cpu_ptr(&ctx_handle);
330 struct etm_event_data *event_data = perf_get_aux(handle);
331 struct list_head *path;
333 if (event->hw.state == PERF_HES_STOPPED)
339 path = etm_event_cpu_path(event_data, cpu);
343 sink = coresight_get_sink(path);
348 source_ops(csdev)->disable(csdev, event);
351 event->hw.state = PERF_HES_STOPPED;
353 if (mode & PERF_EF_UPDATE) {
354 if (WARN_ON_ONCE(handle->event != event))
357 /* update trace information */
358 if (!sink_ops(sink)->update_buffer)
361 sink_ops(sink)->update_buffer(sink, handle,
362 event_data->snk_config);
364 if (!sink_ops(sink)->reset_buffer)
367 size = sink_ops(sink)->reset_buffer(sink, handle,
368 event_data->snk_config);
370 perf_aux_output_end(handle, size);
373 /* Disabling the path make its elements available to other sessions */
374 coresight_disable_path(path);
377 static int etm_event_add(struct perf_event *event, int mode)
380 struct hw_perf_event *hwc = &event->hw;
382 if (mode & PERF_EF_START) {
383 etm_event_start(event, 0);
384 if (hwc->state & PERF_HES_STOPPED)
387 hwc->state = PERF_HES_STOPPED;
393 static void etm_event_del(struct perf_event *event, int mode)
395 etm_event_stop(event, PERF_EF_UPDATE);
398 static int etm_addr_filters_validate(struct list_head *filters)
400 bool range = false, address = false;
402 struct perf_addr_filter *filter;
404 list_for_each_entry(filter, filters, entry) {
406 * No need to go further if there's no more
409 if (++index > ETM_ADDR_CMP_MAX)
412 /* filter::size==0 means single address trigger */
415 * The existing code relies on START/STOP filters
416 * being address filters.
418 if (filter->action == PERF_ADDR_FILTER_ACTION_START ||
419 filter->action == PERF_ADDR_FILTER_ACTION_STOP)
427 * At this time we don't allow range and start/stop filtering
428 * to cohabitate, they have to be mutually exclusive.
430 if (range && address)
437 static void etm_addr_filters_sync(struct perf_event *event)
439 struct perf_addr_filters_head *head = perf_event_addr_filters(event);
440 unsigned long start, stop, *offs = event->addr_filters_offs;
441 struct etm_filters *filters = event->hw.addr_filters;
442 struct etm_filter *etm_filter;
443 struct perf_addr_filter *filter;
446 list_for_each_entry(filter, &head->list, entry) {
447 start = filter->offset + offs[i];
448 stop = start + filter->size;
449 etm_filter = &filters->etm_filter[i];
451 switch (filter->action) {
452 case PERF_ADDR_FILTER_ACTION_FILTER:
453 etm_filter->start_addr = start;
454 etm_filter->stop_addr = stop;
455 etm_filter->type = ETM_ADDR_TYPE_RANGE;
457 case PERF_ADDR_FILTER_ACTION_START:
458 etm_filter->start_addr = start;
459 etm_filter->type = ETM_ADDR_TYPE_START;
461 case PERF_ADDR_FILTER_ACTION_STOP:
462 etm_filter->stop_addr = stop;
463 etm_filter->type = ETM_ADDR_TYPE_STOP;
469 filters->nr_filters = i;
472 int etm_perf_symlink(struct coresight_device *csdev, bool link)
474 char entry[sizeof("cpu9999999")];
475 int ret = 0, cpu = source_ops(csdev)->cpu_id(csdev);
476 struct device *pmu_dev = etm_pmu.dev;
477 struct device *cs_dev = &csdev->dev;
479 sprintf(entry, "cpu%d", cpu);
482 return -EPROBE_DEFER;
485 ret = sysfs_create_link(&pmu_dev->kobj, &cs_dev->kobj, entry);
488 per_cpu(csdev_src, cpu) = csdev;
490 sysfs_remove_link(&pmu_dev->kobj, entry);
491 per_cpu(csdev_src, cpu) = NULL;
497 static int __init etm_perf_init(void)
501 etm_pmu.capabilities = PERF_PMU_CAP_EXCLUSIVE;
503 etm_pmu.attr_groups = etm_pmu_attr_groups;
504 etm_pmu.task_ctx_nr = perf_sw_context;
505 etm_pmu.read = etm_event_read;
506 etm_pmu.event_init = etm_event_init;
507 etm_pmu.setup_aux = etm_setup_aux;
508 etm_pmu.free_aux = etm_free_aux;
509 etm_pmu.start = etm_event_start;
510 etm_pmu.stop = etm_event_stop;
511 etm_pmu.add = etm_event_add;
512 etm_pmu.del = etm_event_del;
513 etm_pmu.addr_filters_sync = etm_addr_filters_sync;
514 etm_pmu.addr_filters_validate = etm_addr_filters_validate;
515 etm_pmu.nr_addr_filters = ETM_ADDR_CMP_MAX;
517 ret = perf_pmu_register(&etm_pmu, CORESIGHT_ETM_PMU_NAME, -1);
523 device_initcall(etm_perf_init);