2 * Performance events ring-buffer code:
4 * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
5 * Copyright (C) 2008-2011 Red Hat, Inc., Ingo Molnar
6 * Copyright (C) 2008-2011 Red Hat, Inc., Peter Zijlstra
7 * Copyright © 2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
9 * For licensing details see kernel-base/COPYING
12 #include <linux/perf_event.h>
13 #include <linux/vmalloc.h>
14 #include <linux/slab.h>
15 #include <linux/circ_buf.h>
16 #include <linux/poll.h>
17 #include <linux/nospec.h>
21 static void perf_output_wakeup(struct perf_output_handle *handle)
23 atomic_set(&handle->rb->poll, EPOLLIN);
25 handle->event->pending_wakeup = 1;
26 irq_work_queue(&handle->event->pending);
30 * We need to ensure a later event_id doesn't publish a head when a former
31 * event isn't done writing. However since we need to deal with NMIs we
32 * cannot fully serialize things.
34 * We only publish the head (and generate a wakeup) when the outer-most
37 static void perf_output_get_handle(struct perf_output_handle *handle)
39 struct ring_buffer *rb = handle->rb;
43 handle->wakeup = local_read(&rb->wakeup);
46 static void perf_output_put_handle(struct perf_output_handle *handle)
48 struct ring_buffer *rb = handle->rb;
53 * In order to avoid publishing a head value that goes backwards,
54 * we must ensure the load of @rb->head happens after we've
55 * incremented @rb->nest.
57 * Otherwise we can observe a @rb->head value before one published
58 * by an IRQ/NMI happening between the load and the increment.
61 head = local_read(&rb->head);
64 * IRQ/NMI can happen here and advance @rb->head, causing our
65 * load above to be stale.
69 * If this isn't the outermost nesting, we don't have to update
70 * @rb->user_page->data_head.
72 if (local_read(&rb->nest) > 1) {
78 * Since the mmap() consumer (userspace) can run on a different CPU:
82 * if (LOAD ->data_tail) { LOAD ->data_head
84 * STORE $data LOAD $data
85 * smp_wmb() (B) smp_mb() (D)
86 * STORE ->data_head STORE ->data_tail
89 * Where A pairs with D, and B pairs with C.
91 * In our case (A) is a control dependency that separates the load of
92 * the ->data_tail and the stores of $data. In case ->data_tail
93 * indicates there is no room in the buffer to store $data we do not.
95 * D needs to be a full barrier since it separates the data READ
96 * from the tail WRITE.
98 * For B a WMB is sufficient since it separates two WRITEs, and for C
99 * an RMB is sufficient since it separates two READs.
101 * See perf_output_begin().
103 smp_wmb(); /* B, matches C */
104 WRITE_ONCE(rb->user_page->data_head, head);
107 * We must publish the head before decrementing the nest count,
108 * otherwise an IRQ/NMI can publish a more recent head value and our
109 * write will (temporarily) publish a stale value.
112 local_set(&rb->nest, 0);
115 * Ensure we decrement @rb->nest before we validate the @rb->head.
116 * Otherwise we cannot be sure we caught the 'last' nested update.
119 if (unlikely(head != local_read(&rb->head))) {
120 local_inc(&rb->nest);
124 if (handle->wakeup != local_read(&rb->wakeup))
125 perf_output_wakeup(handle);
131 static __always_inline bool
132 ring_buffer_has_space(unsigned long head, unsigned long tail,
133 unsigned long data_size, unsigned int size,
137 return CIRC_SPACE(head, tail, data_size) >= size;
139 return CIRC_SPACE(tail, head, data_size) >= size;
142 static __always_inline int
143 __perf_output_begin(struct perf_output_handle *handle,
144 struct perf_event *event, unsigned int size,
147 struct ring_buffer *rb;
148 unsigned long tail, offset, head;
149 int have_lost, page_shift;
151 struct perf_event_header header;
158 * For inherited events we send all the output towards the parent.
161 event = event->parent;
163 rb = rcu_dereference(event->rb);
167 if (unlikely(rb->paused)) {
169 local_inc(&rb->lost);
174 handle->event = event;
176 have_lost = local_read(&rb->lost);
177 if (unlikely(have_lost)) {
178 size += sizeof(lost_event);
179 if (event->attr.sample_id_all)
180 size += event->id_header_size;
183 perf_output_get_handle(handle);
186 tail = READ_ONCE(rb->user_page->data_tail);
187 offset = head = local_read(&rb->head);
188 if (!rb->overwrite) {
189 if (unlikely(!ring_buffer_has_space(head, tail,
196 * The above forms a control dependency barrier separating the
197 * @tail load above from the data stores below. Since the @tail
198 * load is required to compute the branch to fail below.
200 * A, matches D; the full memory barrier userspace SHOULD issue
201 * after reading the data and before storing the new tail
204 * See perf_output_put_handle().
211 } while (local_cmpxchg(&rb->head, offset, head) != offset);
219 * We rely on the implied barrier() by local_cmpxchg() to ensure
220 * none of the data stores below can be lifted up by the compiler.
223 if (unlikely(head - local_read(&rb->wakeup) > rb->watermark))
224 local_add(rb->watermark, &rb->wakeup);
226 page_shift = PAGE_SHIFT + page_order(rb);
228 handle->page = (offset >> page_shift) & (rb->nr_pages - 1);
229 offset &= (1UL << page_shift) - 1;
230 handle->addr = rb->data_pages[handle->page] + offset;
231 handle->size = (1UL << page_shift) - offset;
233 if (unlikely(have_lost)) {
234 struct perf_sample_data sample_data;
236 lost_event.header.size = sizeof(lost_event);
237 lost_event.header.type = PERF_RECORD_LOST;
238 lost_event.header.misc = 0;
239 lost_event.id = event->id;
240 lost_event.lost = local_xchg(&rb->lost, 0);
242 perf_event_header__init_id(&lost_event.header,
243 &sample_data, event);
244 perf_output_put(handle, lost_event);
245 perf_event__output_id_sample(event, handle, &sample_data);
251 local_inc(&rb->lost);
252 perf_output_put_handle(handle);
259 int perf_output_begin_forward(struct perf_output_handle *handle,
260 struct perf_event *event, unsigned int size)
262 return __perf_output_begin(handle, event, size, false);
265 int perf_output_begin_backward(struct perf_output_handle *handle,
266 struct perf_event *event, unsigned int size)
268 return __perf_output_begin(handle, event, size, true);
271 int perf_output_begin(struct perf_output_handle *handle,
272 struct perf_event *event, unsigned int size)
275 return __perf_output_begin(handle, event, size,
276 unlikely(is_write_backward(event)));
279 unsigned int perf_output_copy(struct perf_output_handle *handle,
280 const void *buf, unsigned int len)
282 return __output_copy(handle, buf, len);
285 unsigned int perf_output_skip(struct perf_output_handle *handle,
288 return __output_skip(handle, NULL, len);
291 void perf_output_end(struct perf_output_handle *handle)
293 perf_output_put_handle(handle);
298 ring_buffer_init(struct ring_buffer *rb, long watermark, int flags)
300 long max_size = perf_data_size(rb);
303 rb->watermark = min(max_size, watermark);
306 rb->watermark = max_size / 2;
308 if (flags & RING_BUFFER_WRITABLE)
313 atomic_set(&rb->refcount, 1);
315 INIT_LIST_HEAD(&rb->event_list);
316 spin_lock_init(&rb->event_lock);
319 * perf_output_begin() only checks rb->paused, therefore
320 * rb->paused must be true if we have no pages for output.
326 void perf_aux_output_flag(struct perf_output_handle *handle, u64 flags)
329 * OVERWRITE is determined by perf_aux_output_end() and can't
330 * be passed in directly.
332 if (WARN_ON_ONCE(flags & PERF_AUX_FLAG_OVERWRITE))
335 handle->aux_flags |= flags;
337 EXPORT_SYMBOL_GPL(perf_aux_output_flag);
340 * This is called before hardware starts writing to the AUX area to
341 * obtain an output handle and make sure there's room in the buffer.
342 * When the capture completes, call perf_aux_output_end() to commit
343 * the recorded data to the buffer.
345 * The ordering is similar to that of perf_output_{begin,end}, with
346 * the exception of (B), which should be taken care of by the pmu
347 * driver, since ordering rules will differ depending on hardware.
349 * Call this from pmu::start(); see the comment in perf_aux_output_end()
350 * about its use in pmu callbacks. Both can also be called from the PMI
353 void *perf_aux_output_begin(struct perf_output_handle *handle,
354 struct perf_event *event)
356 struct perf_event *output_event = event;
357 unsigned long aux_head, aux_tail;
358 struct ring_buffer *rb;
360 if (output_event->parent)
361 output_event = output_event->parent;
364 * Since this will typically be open across pmu::add/pmu::del, we
365 * grab ring_buffer's refcount instead of holding rcu read lock
366 * to make sure it doesn't disappear under us.
368 rb = ring_buffer_get(output_event);
376 * If aux_mmap_count is zero, the aux buffer is in perf_mmap_close(),
377 * about to get freed, so we leave immediately.
379 * Checking rb::aux_mmap_count and rb::refcount has to be done in
380 * the same order, see perf_mmap_close. Otherwise we end up freeing
381 * aux pages in this path, which is a bug, because in_atomic().
383 if (!atomic_read(&rb->aux_mmap_count))
386 if (!atomic_inc_not_zero(&rb->aux_refcount))
390 * Nesting is not supported for AUX area, make sure nested
391 * writers are caught early
393 if (WARN_ON_ONCE(local_xchg(&rb->aux_nest, 1)))
396 aux_head = rb->aux_head;
399 handle->event = event;
400 handle->head = aux_head;
402 handle->aux_flags = 0;
405 * In overwrite mode, AUX data stores do not depend on aux_tail,
406 * therefore (A) control dependency barrier does not exist. The
407 * (B) <-> (C) ordering is still observed by the pmu driver.
409 if (!rb->aux_overwrite) {
410 aux_tail = READ_ONCE(rb->user_page->aux_tail);
411 handle->wakeup = rb->aux_wakeup + rb->aux_watermark;
412 if (aux_head - aux_tail < perf_aux_size(rb))
413 handle->size = CIRC_SPACE(aux_head, aux_tail, perf_aux_size(rb));
416 * handle->size computation depends on aux_tail load; this forms a
417 * control dependency barrier separating aux_tail load from aux data
418 * store that will be enabled on successful return
420 if (!handle->size) { /* A, matches D */
421 event->pending_disable = smp_processor_id();
422 perf_output_wakeup(handle);
423 local_set(&rb->aux_nest, 0);
428 return handle->rb->aux_priv;
436 handle->event = NULL;
440 EXPORT_SYMBOL_GPL(perf_aux_output_begin);
442 static __always_inline bool rb_need_aux_wakeup(struct ring_buffer *rb)
444 if (rb->aux_overwrite)
447 if (rb->aux_head - rb->aux_wakeup >= rb->aux_watermark) {
448 rb->aux_wakeup = rounddown(rb->aux_head, rb->aux_watermark);
456 * Commit the data written by hardware into the ring buffer by adjusting
457 * aux_head and posting a PERF_RECORD_AUX into the perf buffer. It is the
458 * pmu driver's responsibility to observe ordering rules of the hardware,
459 * so that all the data is externally visible before this is called.
461 * Note: this has to be called from pmu::stop() callback, as the assumption
462 * of the AUX buffer management code is that after pmu::stop(), the AUX
463 * transaction must be stopped and therefore drop the AUX reference count.
465 void perf_aux_output_end(struct perf_output_handle *handle, unsigned long size)
467 bool wakeup = !!(handle->aux_flags & PERF_AUX_FLAG_TRUNCATED);
468 struct ring_buffer *rb = handle->rb;
469 unsigned long aux_head;
471 /* in overwrite mode, driver provides aux_head via handle */
472 if (rb->aux_overwrite) {
473 handle->aux_flags |= PERF_AUX_FLAG_OVERWRITE;
475 aux_head = handle->head;
476 rb->aux_head = aux_head;
478 handle->aux_flags &= ~PERF_AUX_FLAG_OVERWRITE;
480 aux_head = rb->aux_head;
481 rb->aux_head += size;
484 if (size || handle->aux_flags) {
486 * Only send RECORD_AUX if we have something useful to communicate
489 perf_event_aux_event(handle->event, aux_head, size,
493 WRITE_ONCE(rb->user_page->aux_head, rb->aux_head);
494 if (rb_need_aux_wakeup(rb))
498 if (handle->aux_flags & PERF_AUX_FLAG_TRUNCATED)
499 handle->event->pending_disable = smp_processor_id();
500 perf_output_wakeup(handle);
503 handle->event = NULL;
505 local_set(&rb->aux_nest, 0);
510 EXPORT_SYMBOL_GPL(perf_aux_output_end);
513 * Skip over a given number of bytes in the AUX buffer, due to, for example,
514 * hardware's alignment constraints.
516 int perf_aux_output_skip(struct perf_output_handle *handle, unsigned long size)
518 struct ring_buffer *rb = handle->rb;
520 if (size > handle->size)
523 rb->aux_head += size;
525 WRITE_ONCE(rb->user_page->aux_head, rb->aux_head);
526 if (rb_need_aux_wakeup(rb)) {
527 perf_output_wakeup(handle);
528 handle->wakeup = rb->aux_wakeup + rb->aux_watermark;
531 handle->head = rb->aux_head;
532 handle->size -= size;
536 EXPORT_SYMBOL_GPL(perf_aux_output_skip);
538 void *perf_get_aux(struct perf_output_handle *handle)
540 /* this is only valid between perf_aux_output_begin and *_end */
544 return handle->rb->aux_priv;
546 EXPORT_SYMBOL_GPL(perf_get_aux);
548 #define PERF_AUX_GFP (GFP_KERNEL | __GFP_ZERO | __GFP_NOWARN | __GFP_NORETRY)
550 static struct page *rb_alloc_aux_page(int node, int order)
554 if (order > MAX_ORDER)
558 page = alloc_pages_node(node, PERF_AUX_GFP, order);
559 } while (!page && order--);
563 * Communicate the allocation size to the driver:
564 * if we managed to secure a high-order allocation,
565 * set its first page's private to this order;
566 * !PagePrivate(page) means it's just a normal page.
568 split_page(page, order);
569 SetPagePrivate(page);
570 set_page_private(page, order);
576 static void rb_free_aux_page(struct ring_buffer *rb, int idx)
578 struct page *page = virt_to_page(rb->aux_pages[idx]);
580 ClearPagePrivate(page);
581 page->mapping = NULL;
585 static void __rb_free_aux(struct ring_buffer *rb)
590 * Should never happen, the last reference should be dropped from
591 * perf_mmap_close() path, which first stops aux transactions (which
592 * in turn are the atomic holders of aux_refcount) and then does the
593 * last rb_free_aux().
595 WARN_ON_ONCE(in_atomic());
598 rb->free_aux(rb->aux_priv);
603 if (rb->aux_nr_pages) {
604 for (pg = 0; pg < rb->aux_nr_pages; pg++)
605 rb_free_aux_page(rb, pg);
607 kfree(rb->aux_pages);
608 rb->aux_nr_pages = 0;
612 int rb_alloc_aux(struct ring_buffer *rb, struct perf_event *event,
613 pgoff_t pgoff, int nr_pages, long watermark, int flags)
615 bool overwrite = !(flags & RING_BUFFER_WRITABLE);
616 int node = (event->cpu == -1) ? -1 : cpu_to_node(event->cpu);
617 int ret = -ENOMEM, max_order = 0;
622 if (event->pmu->capabilities & PERF_PMU_CAP_AUX_NO_SG) {
624 * We need to start with the max_order that fits in nr_pages,
625 * not the other way around, hence ilog2() and not get_order.
627 max_order = ilog2(nr_pages);
630 * PMU requests more than one contiguous chunks of memory
631 * for SW double buffering
633 if ((event->pmu->capabilities & PERF_PMU_CAP_AUX_SW_DOUBLEBUF) &&
642 rb->aux_pages = kcalloc_node(nr_pages, sizeof(void *), GFP_KERNEL,
647 rb->free_aux = event->pmu->free_aux;
648 for (rb->aux_nr_pages = 0; rb->aux_nr_pages < nr_pages;) {
652 order = min(max_order, ilog2(nr_pages - rb->aux_nr_pages));
653 page = rb_alloc_aux_page(node, order);
657 for (last = rb->aux_nr_pages + (1 << page_private(page));
658 last > rb->aux_nr_pages; rb->aux_nr_pages++)
659 rb->aux_pages[rb->aux_nr_pages] = page_address(page++);
663 * In overwrite mode, PMUs that don't support SG may not handle more
664 * than one contiguous allocation, since they rely on PMI to do double
665 * buffering. In this case, the entire buffer has to be one contiguous
668 if ((event->pmu->capabilities & PERF_PMU_CAP_AUX_NO_SG) &&
670 struct page *page = virt_to_page(rb->aux_pages[0]);
672 if (page_private(page) != max_order)
676 rb->aux_priv = event->pmu->setup_aux(event, rb->aux_pages, nr_pages,
684 * aux_pages (and pmu driver's private data, aux_priv) will be
685 * referenced in both producer's and consumer's contexts, thus
686 * we keep a refcount here to make sure either of the two can
687 * reference them safely.
689 atomic_set(&rb->aux_refcount, 1);
691 rb->aux_overwrite = overwrite;
692 rb->aux_watermark = watermark;
694 if (!rb->aux_watermark && !rb->aux_overwrite)
695 rb->aux_watermark = nr_pages << (PAGE_SHIFT - 1);
699 rb->aux_pgoff = pgoff;
706 void rb_free_aux(struct ring_buffer *rb)
708 if (atomic_dec_and_test(&rb->aux_refcount))
712 #ifndef CONFIG_PERF_USE_VMALLOC
715 * Back perf_mmap() with regular GFP_KERNEL-0 pages.
719 __perf_mmap_to_page(struct ring_buffer *rb, unsigned long pgoff)
721 if (pgoff > rb->nr_pages)
725 return virt_to_page(rb->user_page);
727 return virt_to_page(rb->data_pages[pgoff - 1]);
730 static void *perf_mmap_alloc_page(int cpu)
735 node = (cpu == -1) ? cpu : cpu_to_node(cpu);
736 page = alloc_pages_node(node, GFP_KERNEL | __GFP_ZERO, 0);
740 return page_address(page);
743 struct ring_buffer *rb_alloc(int nr_pages, long watermark, int cpu, int flags)
745 struct ring_buffer *rb;
749 size = sizeof(struct ring_buffer);
750 size += nr_pages * sizeof(void *);
752 if (order_base_2(size) >= PAGE_SHIFT+MAX_ORDER)
755 rb = kzalloc(size, GFP_KERNEL);
759 rb->user_page = perf_mmap_alloc_page(cpu);
763 for (i = 0; i < nr_pages; i++) {
764 rb->data_pages[i] = perf_mmap_alloc_page(cpu);
765 if (!rb->data_pages[i])
766 goto fail_data_pages;
769 rb->nr_pages = nr_pages;
771 ring_buffer_init(rb, watermark, flags);
776 for (i--; i >= 0; i--)
777 free_page((unsigned long)rb->data_pages[i]);
779 free_page((unsigned long)rb->user_page);
788 static void perf_mmap_free_page(unsigned long addr)
790 struct page *page = virt_to_page((void *)addr);
792 page->mapping = NULL;
796 void rb_free(struct ring_buffer *rb)
800 perf_mmap_free_page((unsigned long)rb->user_page);
801 for (i = 0; i < rb->nr_pages; i++)
802 perf_mmap_free_page((unsigned long)rb->data_pages[i]);
807 static int data_page_nr(struct ring_buffer *rb)
809 return rb->nr_pages << page_order(rb);
813 __perf_mmap_to_page(struct ring_buffer *rb, unsigned long pgoff)
815 /* The '>' counts in the user page. */
816 if (pgoff > data_page_nr(rb))
819 return vmalloc_to_page((void *)rb->user_page + pgoff * PAGE_SIZE);
822 static void perf_mmap_unmark_page(void *addr)
824 struct page *page = vmalloc_to_page(addr);
826 page->mapping = NULL;
829 static void rb_free_work(struct work_struct *work)
831 struct ring_buffer *rb;
835 rb = container_of(work, struct ring_buffer, work);
836 nr = data_page_nr(rb);
838 base = rb->user_page;
839 /* The '<=' counts in the user page. */
840 for (i = 0; i <= nr; i++)
841 perf_mmap_unmark_page(base + (i * PAGE_SIZE));
847 void rb_free(struct ring_buffer *rb)
849 schedule_work(&rb->work);
852 struct ring_buffer *rb_alloc(int nr_pages, long watermark, int cpu, int flags)
854 struct ring_buffer *rb;
858 size = sizeof(struct ring_buffer);
859 size += sizeof(void *);
861 rb = kzalloc(size, GFP_KERNEL);
865 INIT_WORK(&rb->work, rb_free_work);
867 all_buf = vmalloc_user((nr_pages + 1) * PAGE_SIZE);
871 rb->user_page = all_buf;
872 rb->data_pages[0] = all_buf + PAGE_SIZE;
875 rb->page_order = ilog2(nr_pages);
878 ring_buffer_init(rb, watermark, flags);
892 perf_mmap_to_page(struct ring_buffer *rb, unsigned long pgoff)
894 if (rb->aux_nr_pages) {
895 /* above AUX space */
896 if (pgoff > rb->aux_pgoff + rb->aux_nr_pages)
900 if (pgoff >= rb->aux_pgoff) {
901 int aux_pgoff = array_index_nospec(pgoff - rb->aux_pgoff, rb->aux_nr_pages);
902 return virt_to_page(rb->aux_pages[aux_pgoff]);
906 return __perf_mmap_to_page(rb, pgoff);