2 * Performance events ring-buffer code:
4 * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
5 * Copyright (C) 2008-2011 Red Hat, Inc., Ingo Molnar
6 * Copyright (C) 2008-2011 Red Hat, Inc., Peter Zijlstra
7 * Copyright © 2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
9 * For licensing details see kernel-base/COPYING
12 #include <linux/perf_event.h>
13 #include <linux/vmalloc.h>
14 #include <linux/slab.h>
15 #include <linux/circ_buf.h>
16 #include <linux/poll.h>
20 static void perf_output_wakeup(struct perf_output_handle *handle)
22 atomic_set(&handle->rb->poll, POLLIN);
24 handle->event->pending_wakeup = 1;
25 irq_work_queue(&handle->event->pending);
29 * We need to ensure a later event_id doesn't publish a head when a former
30 * event isn't done writing. However since we need to deal with NMIs we
31 * cannot fully serialize things.
33 * We only publish the head (and generate a wakeup) when the outer-most
36 static void perf_output_get_handle(struct perf_output_handle *handle)
38 struct ring_buffer *rb = handle->rb;
42 handle->wakeup = local_read(&rb->wakeup);
45 static void perf_output_put_handle(struct perf_output_handle *handle)
47 struct ring_buffer *rb = handle->rb;
51 head = local_read(&rb->head);
54 * IRQ/NMI can happen here, which means we can miss a head update.
57 if (!local_dec_and_test(&rb->nest))
61 * Since the mmap() consumer (userspace) can run on a different CPU:
65 * if (LOAD ->data_tail) { LOAD ->data_head
67 * STORE $data LOAD $data
68 * smp_wmb() (B) smp_mb() (D)
69 * STORE ->data_head STORE ->data_tail
72 * Where A pairs with D, and B pairs with C.
74 * In our case (A) is a control dependency that separates the load of
75 * the ->data_tail and the stores of $data. In case ->data_tail
76 * indicates there is no room in the buffer to store $data we do not.
78 * D needs to be a full barrier since it separates the data READ
79 * from the tail WRITE.
81 * For B a WMB is sufficient since it separates two WRITEs, and for C
82 * an RMB is sufficient since it separates two READs.
84 * See perf_output_begin().
86 smp_wmb(); /* B, matches C */
87 rb->user_page->data_head = head;
90 * Now check if we missed an update -- rely on previous implied
91 * compiler barriers to force a re-read.
93 if (unlikely(head != local_read(&rb->head))) {
98 if (handle->wakeup != local_read(&rb->wakeup))
99 perf_output_wakeup(handle);
105 int perf_output_begin(struct perf_output_handle *handle,
106 struct perf_event *event, unsigned int size)
108 struct ring_buffer *rb;
109 unsigned long tail, offset, head;
110 int have_lost, page_shift;
112 struct perf_event_header header;
119 * For inherited events we send all the output towards the parent.
122 event = event->parent;
124 rb = rcu_dereference(event->rb);
128 if (unlikely(!rb->nr_pages))
132 handle->event = event;
134 have_lost = local_read(&rb->lost);
135 if (unlikely(have_lost)) {
136 size += sizeof(lost_event);
137 if (event->attr.sample_id_all)
138 size += event->id_header_size;
141 perf_output_get_handle(handle);
144 tail = READ_ONCE(rb->user_page->data_tail);
145 offset = head = local_read(&rb->head);
146 if (!rb->overwrite &&
147 unlikely(CIRC_SPACE(head, tail, perf_data_size(rb)) < size))
151 * The above forms a control dependency barrier separating the
152 * @tail load above from the data stores below. Since the @tail
153 * load is required to compute the branch to fail below.
155 * A, matches D; the full memory barrier userspace SHOULD issue
156 * after reading the data and before storing the new tail
159 * See perf_output_put_handle().
163 } while (local_cmpxchg(&rb->head, offset, head) != offset);
166 * We rely on the implied barrier() by local_cmpxchg() to ensure
167 * none of the data stores below can be lifted up by the compiler.
170 if (unlikely(head - local_read(&rb->wakeup) > rb->watermark))
171 local_add(rb->watermark, &rb->wakeup);
173 page_shift = PAGE_SHIFT + page_order(rb);
175 handle->page = (offset >> page_shift) & (rb->nr_pages - 1);
176 offset &= (1UL << page_shift) - 1;
177 handle->addr = rb->data_pages[handle->page] + offset;
178 handle->size = (1UL << page_shift) - offset;
180 if (unlikely(have_lost)) {
181 struct perf_sample_data sample_data;
183 lost_event.header.size = sizeof(lost_event);
184 lost_event.header.type = PERF_RECORD_LOST;
185 lost_event.header.misc = 0;
186 lost_event.id = event->id;
187 lost_event.lost = local_xchg(&rb->lost, 0);
189 perf_event_header__init_id(&lost_event.header,
190 &sample_data, event);
191 perf_output_put(handle, lost_event);
192 perf_event__output_id_sample(event, handle, &sample_data);
198 local_inc(&rb->lost);
199 perf_output_put_handle(handle);
206 unsigned int perf_output_copy(struct perf_output_handle *handle,
207 const void *buf, unsigned int len)
209 return __output_copy(handle, buf, len);
212 unsigned int perf_output_skip(struct perf_output_handle *handle,
215 return __output_skip(handle, NULL, len);
218 void perf_output_end(struct perf_output_handle *handle)
220 perf_output_put_handle(handle);
225 ring_buffer_init(struct ring_buffer *rb, long watermark, int flags)
227 long max_size = perf_data_size(rb);
230 rb->watermark = min(max_size, watermark);
233 rb->watermark = max_size / 2;
235 if (flags & RING_BUFFER_WRITABLE)
240 atomic_set(&rb->refcount, 1);
242 INIT_LIST_HEAD(&rb->event_list);
243 spin_lock_init(&rb->event_lock);
247 * This is called before hardware starts writing to the AUX area to
248 * obtain an output handle and make sure there's room in the buffer.
249 * When the capture completes, call perf_aux_output_end() to commit
250 * the recorded data to the buffer.
252 * The ordering is similar to that of perf_output_{begin,end}, with
253 * the exception of (B), which should be taken care of by the pmu
254 * driver, since ordering rules will differ depending on hardware.
256 void *perf_aux_output_begin(struct perf_output_handle *handle,
257 struct perf_event *event)
259 struct perf_event *output_event = event;
260 unsigned long aux_head, aux_tail;
261 struct ring_buffer *rb;
263 if (output_event->parent)
264 output_event = output_event->parent;
267 * Since this will typically be open across pmu::add/pmu::del, we
268 * grab ring_buffer's refcount instead of holding rcu read lock
269 * to make sure it doesn't disappear under us.
271 rb = ring_buffer_get(output_event);
275 if (!rb_has_aux(rb) || !atomic_inc_not_zero(&rb->aux_refcount))
279 * If rb::aux_mmap_count is zero (and rb_has_aux() above went through),
280 * the aux buffer is in perf_mmap_close(), about to get freed.
282 if (!atomic_read(&rb->aux_mmap_count))
286 * Nesting is not supported for AUX area, make sure nested
287 * writers are caught early
289 if (WARN_ON_ONCE(local_xchg(&rb->aux_nest, 1)))
292 aux_head = local_read(&rb->aux_head);
295 handle->event = event;
296 handle->head = aux_head;
300 * In overwrite mode, AUX data stores do not depend on aux_tail,
301 * therefore (A) control dependency barrier does not exist. The
302 * (B) <-> (C) ordering is still observed by the pmu driver.
304 if (!rb->aux_overwrite) {
305 aux_tail = ACCESS_ONCE(rb->user_page->aux_tail);
306 handle->wakeup = local_read(&rb->aux_wakeup) + rb->aux_watermark;
307 if (aux_head - aux_tail < perf_aux_size(rb))
308 handle->size = CIRC_SPACE(aux_head, aux_tail, perf_aux_size(rb));
311 * handle->size computation depends on aux_tail load; this forms a
312 * control dependency barrier separating aux_tail load from aux data
313 * store that will be enabled on successful return
315 if (!handle->size) { /* A, matches D */
316 event->pending_disable = 1;
317 perf_output_wakeup(handle);
318 local_set(&rb->aux_nest, 0);
323 return handle->rb->aux_priv;
330 handle->event = NULL;
336 * Commit the data written by hardware into the ring buffer by adjusting
337 * aux_head and posting a PERF_RECORD_AUX into the perf buffer. It is the
338 * pmu driver's responsibility to observe ordering rules of the hardware,
339 * so that all the data is externally visible before this is called.
341 void perf_aux_output_end(struct perf_output_handle *handle, unsigned long size,
344 struct ring_buffer *rb = handle->rb;
345 unsigned long aux_head;
349 flags |= PERF_AUX_FLAG_TRUNCATED;
351 /* in overwrite mode, driver provides aux_head via handle */
352 if (rb->aux_overwrite) {
353 flags |= PERF_AUX_FLAG_OVERWRITE;
355 aux_head = handle->head;
356 local_set(&rb->aux_head, aux_head);
358 aux_head = local_read(&rb->aux_head);
359 local_add(size, &rb->aux_head);
364 * Only send RECORD_AUX if we have something useful to communicate
367 perf_event_aux_event(handle->event, aux_head, size, flags);
370 aux_head = rb->user_page->aux_head = local_read(&rb->aux_head);
372 if (aux_head - local_read(&rb->aux_wakeup) >= rb->aux_watermark) {
373 perf_output_wakeup(handle);
374 local_add(rb->aux_watermark, &rb->aux_wakeup);
376 handle->event = NULL;
378 local_set(&rb->aux_nest, 0);
384 * Skip over a given number of bytes in the AUX buffer, due to, for example,
385 * hardware's alignment constraints.
387 int perf_aux_output_skip(struct perf_output_handle *handle, unsigned long size)
389 struct ring_buffer *rb = handle->rb;
390 unsigned long aux_head;
392 if (size > handle->size)
395 local_add(size, &rb->aux_head);
397 aux_head = rb->user_page->aux_head = local_read(&rb->aux_head);
398 if (aux_head - local_read(&rb->aux_wakeup) >= rb->aux_watermark) {
399 perf_output_wakeup(handle);
400 local_add(rb->aux_watermark, &rb->aux_wakeup);
401 handle->wakeup = local_read(&rb->aux_wakeup) +
405 handle->head = aux_head;
406 handle->size -= size;
411 void *perf_get_aux(struct perf_output_handle *handle)
413 /* this is only valid between perf_aux_output_begin and *_end */
417 return handle->rb->aux_priv;
420 #define PERF_AUX_GFP (GFP_KERNEL | __GFP_ZERO | __GFP_NOWARN | __GFP_NORETRY)
422 static struct page *rb_alloc_aux_page(int node, int order)
426 if (order > MAX_ORDER)
430 page = alloc_pages_node(node, PERF_AUX_GFP, order);
431 } while (!page && order--);
435 * Communicate the allocation size to the driver:
436 * if we managed to secure a high-order allocation,
437 * set its first page's private to this order;
438 * !PagePrivate(page) means it's just a normal page.
440 split_page(page, order);
441 SetPagePrivate(page);
442 set_page_private(page, order);
448 static void rb_free_aux_page(struct ring_buffer *rb, int idx)
450 struct page *page = virt_to_page(rb->aux_pages[idx]);
452 ClearPagePrivate(page);
453 page->mapping = NULL;
457 static void __rb_free_aux(struct ring_buffer *rb)
462 * Should never happen, the last reference should be dropped from
463 * perf_mmap_close() path, which first stops aux transactions (which
464 * in turn are the atomic holders of aux_refcount) and then does the
465 * last rb_free_aux().
467 WARN_ON_ONCE(in_atomic());
470 rb->free_aux(rb->aux_priv);
475 if (rb->aux_nr_pages) {
476 for (pg = 0; pg < rb->aux_nr_pages; pg++)
477 rb_free_aux_page(rb, pg);
479 kfree(rb->aux_pages);
480 rb->aux_nr_pages = 0;
484 int rb_alloc_aux(struct ring_buffer *rb, struct perf_event *event,
485 pgoff_t pgoff, int nr_pages, long watermark, int flags)
487 bool overwrite = !(flags & RING_BUFFER_WRITABLE);
488 int node = (event->cpu == -1) ? -1 : cpu_to_node(event->cpu);
489 int ret = -ENOMEM, max_order = 0;
494 if (event->pmu->capabilities & PERF_PMU_CAP_AUX_NO_SG) {
496 * We need to start with the max_order that fits in nr_pages,
497 * not the other way around, hence ilog2() and not get_order.
499 max_order = ilog2(nr_pages);
502 * PMU requests more than one contiguous chunks of memory
503 * for SW double buffering
505 if ((event->pmu->capabilities & PERF_PMU_CAP_AUX_SW_DOUBLEBUF) &&
514 rb->aux_pages = kzalloc_node(nr_pages * sizeof(void *), GFP_KERNEL, node);
518 rb->free_aux = event->pmu->free_aux;
519 for (rb->aux_nr_pages = 0; rb->aux_nr_pages < nr_pages;) {
523 order = min(max_order, ilog2(nr_pages - rb->aux_nr_pages));
524 page = rb_alloc_aux_page(node, order);
528 for (last = rb->aux_nr_pages + (1 << page_private(page));
529 last > rb->aux_nr_pages; rb->aux_nr_pages++)
530 rb->aux_pages[rb->aux_nr_pages] = page_address(page++);
534 * In overwrite mode, PMUs that don't support SG may not handle more
535 * than one contiguous allocation, since they rely on PMI to do double
536 * buffering. In this case, the entire buffer has to be one contiguous
539 if ((event->pmu->capabilities & PERF_PMU_CAP_AUX_NO_SG) &&
541 struct page *page = virt_to_page(rb->aux_pages[0]);
543 if (page_private(page) != max_order)
547 rb->aux_priv = event->pmu->setup_aux(event->cpu, rb->aux_pages, nr_pages,
555 * aux_pages (and pmu driver's private data, aux_priv) will be
556 * referenced in both producer's and consumer's contexts, thus
557 * we keep a refcount here to make sure either of the two can
558 * reference them safely.
560 atomic_set(&rb->aux_refcount, 1);
562 rb->aux_overwrite = overwrite;
563 rb->aux_watermark = watermark;
565 if (!rb->aux_watermark && !rb->aux_overwrite)
566 rb->aux_watermark = nr_pages << (PAGE_SHIFT - 1);
570 rb->aux_pgoff = pgoff;
577 void rb_free_aux(struct ring_buffer *rb)
579 if (atomic_dec_and_test(&rb->aux_refcount))
583 #ifndef CONFIG_PERF_USE_VMALLOC
586 * Back perf_mmap() with regular GFP_KERNEL-0 pages.
590 __perf_mmap_to_page(struct ring_buffer *rb, unsigned long pgoff)
592 if (pgoff > rb->nr_pages)
596 return virt_to_page(rb->user_page);
598 return virt_to_page(rb->data_pages[pgoff - 1]);
601 static void *perf_mmap_alloc_page(int cpu)
606 node = (cpu == -1) ? cpu : cpu_to_node(cpu);
607 page = alloc_pages_node(node, GFP_KERNEL | __GFP_ZERO, 0);
611 return page_address(page);
614 struct ring_buffer *rb_alloc(int nr_pages, long watermark, int cpu, int flags)
616 struct ring_buffer *rb;
620 size = sizeof(struct ring_buffer);
621 size += nr_pages * sizeof(void *);
623 rb = kzalloc(size, GFP_KERNEL);
627 rb->user_page = perf_mmap_alloc_page(cpu);
631 for (i = 0; i < nr_pages; i++) {
632 rb->data_pages[i] = perf_mmap_alloc_page(cpu);
633 if (!rb->data_pages[i])
634 goto fail_data_pages;
637 rb->nr_pages = nr_pages;
639 ring_buffer_init(rb, watermark, flags);
644 for (i--; i >= 0; i--)
645 free_page((unsigned long)rb->data_pages[i]);
647 free_page((unsigned long)rb->user_page);
656 static void perf_mmap_free_page(unsigned long addr)
658 struct page *page = virt_to_page((void *)addr);
660 page->mapping = NULL;
664 void rb_free(struct ring_buffer *rb)
668 perf_mmap_free_page((unsigned long)rb->user_page);
669 for (i = 0; i < rb->nr_pages; i++)
670 perf_mmap_free_page((unsigned long)rb->data_pages[i]);
675 static int data_page_nr(struct ring_buffer *rb)
677 return rb->nr_pages << page_order(rb);
681 __perf_mmap_to_page(struct ring_buffer *rb, unsigned long pgoff)
683 /* The '>' counts in the user page. */
684 if (pgoff > data_page_nr(rb))
687 return vmalloc_to_page((void *)rb->user_page + pgoff * PAGE_SIZE);
690 static void perf_mmap_unmark_page(void *addr)
692 struct page *page = vmalloc_to_page(addr);
694 page->mapping = NULL;
697 static void rb_free_work(struct work_struct *work)
699 struct ring_buffer *rb;
703 rb = container_of(work, struct ring_buffer, work);
704 nr = data_page_nr(rb);
706 base = rb->user_page;
707 /* The '<=' counts in the user page. */
708 for (i = 0; i <= nr; i++)
709 perf_mmap_unmark_page(base + (i * PAGE_SIZE));
715 void rb_free(struct ring_buffer *rb)
717 schedule_work(&rb->work);
720 struct ring_buffer *rb_alloc(int nr_pages, long watermark, int cpu, int flags)
722 struct ring_buffer *rb;
726 size = sizeof(struct ring_buffer);
727 size += sizeof(void *);
729 rb = kzalloc(size, GFP_KERNEL);
733 INIT_WORK(&rb->work, rb_free_work);
735 all_buf = vmalloc_user((nr_pages + 1) * PAGE_SIZE);
739 rb->user_page = all_buf;
740 rb->data_pages[0] = all_buf + PAGE_SIZE;
743 rb->page_order = ilog2(nr_pages);
746 ring_buffer_init(rb, watermark, flags);
760 perf_mmap_to_page(struct ring_buffer *rb, unsigned long pgoff)
762 if (rb->aux_nr_pages) {
763 /* above AUX space */
764 if (pgoff > rb->aux_pgoff + rb->aux_nr_pages)
768 if (pgoff >= rb->aux_pgoff)
769 return virt_to_page(rb->aux_pages[pgoff - rb->aux_pgoff]);
772 return __perf_mmap_to_page(rb, pgoff);