*/
void tracing_on(void)
{
- ring_buffers_off = 0;
+ set_bit(RB_BUFFERS_ON_BIT, &ring_buffer_flags);
}
+ EXPORT_SYMBOL_GPL(tracing_on);
/**
* tracing_off - turn off all tracing buffers
*/
void tracing_off(void)
{
- ring_buffers_off = 1;
+ clear_bit(RB_BUFFERS_ON_BIT, &ring_buffer_flags);
}
+ EXPORT_SYMBOL_GPL(tracing_off);
+/**
+ * tracing_off_permanent - permanently disable ring buffers
+ *
+ * This function, once called, will disable all ring buffers
+ * permanenty.
+ */
+void tracing_off_permanent(void)
+{
+ set_bit(RB_BUFFERS_DISABLED_BIT, &ring_buffer_flags);
+}
+
+#include "trace.h"
+
/* Up this if you want to test the TIME_EXTENTS and normalization */
#define DEBUG_SHIFT 0
return event;
out:
- if (resched)
- preempt_enable_no_resched_notrace();
- else
- preempt_enable_notrace();
+ ftrace_preempt_enable(resched);
return NULL;
}
+ EXPORT_SYMBOL_GPL(ring_buffer_lock_reserve);
static void rb_commit(struct ring_buffer_per_cpu *cpu_buffer,
struct ring_buffer_event *event)
return overruns;
}
+ EXPORT_SYMBOL_GPL(ring_buffer_overruns);
-/**
- * ring_buffer_iter_reset - reset an iterator
- * @iter: The iterator to reset
- *
- * Resets the iterator, so that it will start from the beginning
- * again.
- */
-void ring_buffer_iter_reset(struct ring_buffer_iter *iter)
+static void rb_iter_reset(struct ring_buffer_iter *iter)
{
struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
if (iter->head)
iter->read_stamp = cpu_buffer->read_stamp;
else
- iter->read_stamp = iter->head_page->time_stamp;
+ iter->read_stamp = iter->head_page->page->time_stamp;
+}
+
+/**
+ * ring_buffer_iter_reset - reset an iterator
+ * @iter: The iterator to reset
+ *
+ * Resets the iterator, so that it will start from the beginning
+ * again.
+ */
+void ring_buffer_iter_reset(struct ring_buffer_iter *iter)
+{
+ struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
+ unsigned long flags;
+
+ spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
+ rb_iter_reset(iter);
+ spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
}
+ EXPORT_SYMBOL_GPL(ring_buffer_iter_reset);
/**
* ring_buffer_iter_empty - check if an iterator has no more to read
return NULL;
}
+ EXPORT_SYMBOL_GPL(ring_buffer_peek);
-/**
- * ring_buffer_iter_peek - peek at the next event to be read
- * @iter: The ring buffer iterator
- * @ts: The timestamp counter of this event.
- *
- * This will return the event that will be read next, but does
- * not increment the iterator.
- */
-struct ring_buffer_event *
-ring_buffer_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
+static struct ring_buffer_event *
+rb_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
{
struct ring_buffer *buffer;
struct ring_buffer_per_cpu *cpu_buffer;
return NULL;
}
+ EXPORT_SYMBOL_GPL(ring_buffer_iter_peek);
/**
+ * ring_buffer_peek - peek at the next event to be read
+ * @buffer: The ring buffer to read
+ * @cpu: The cpu to peak at
+ * @ts: The timestamp counter of this event.
+ *
+ * This will return the event that will be read next, but does
+ * not consume the data.
+ */
+struct ring_buffer_event *
+ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts)
+{
+ struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
+ struct ring_buffer_event *event;
+ unsigned long flags;
+
+ spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
+ event = rb_buffer_peek(buffer, cpu, ts);
+ spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
+
+ return event;
+}
+
+/**
+ * ring_buffer_iter_peek - peek at the next event to be read
+ * @iter: The ring buffer iterator
+ * @ts: The timestamp counter of this event.
+ *
+ * This will return the event that will be read next, but does
+ * not increment the iterator.
+ */
+struct ring_buffer_event *
+ring_buffer_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
+{
+ struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
+ struct ring_buffer_event *event;
+ unsigned long flags;
+
+ spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
+ event = rb_iter_peek(iter, ts);
+ spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
+
+ return event;
+}
+
+/**
* ring_buffer_consume - return an event and consume it
* @buffer: The ring buffer to get the next event from
*
if (!cpu_isset(cpu, buffer->cpumask))
return NULL;
- event = ring_buffer_peek(buffer, cpu, ts);
+ spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
+
+ event = rb_buffer_peek(buffer, cpu, ts);
if (!event)
- return NULL;
+ goto out;
- cpu_buffer = buffer->buffers[cpu];
rb_advance_reader(cpu_buffer);
+ out:
+ spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
+
return event;
}
+ EXPORT_SYMBOL_GPL(ring_buffer_consume);
/**
* ring_buffer_read_start - start a non consuming read of the buffer
rb_reset_cpu(cpu_buffer);
- spin_unlock_irqrestore(&cpu_buffer->lock, flags);
+ __raw_spin_unlock(&cpu_buffer->lock);
+
+ spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
}
+ EXPORT_SYMBOL_GPL(ring_buffer_reset_cpu);
/**
* ring_buffer_reset - reset a ring buffer
return 0;
}
+ EXPORT_SYMBOL_GPL(ring_buffer_swap_cpu);
+static void rb_remove_entries(struct ring_buffer_per_cpu *cpu_buffer,
+ struct buffer_data_page *bpage)
+{
+ struct ring_buffer_event *event;
+ unsigned long head;
+
+ __raw_spin_lock(&cpu_buffer->lock);
+ for (head = 0; head < local_read(&bpage->commit);
+ head += rb_event_length(event)) {
+
+ event = __rb_data_page_index(bpage, head);
+ if (RB_WARN_ON(cpu_buffer, rb_null_event(event)))
+ return;
+ /* Only count data entries */
+ if (event->type != RINGBUF_TYPE_DATA)
+ continue;
+ cpu_buffer->entries--;
+ }
+ __raw_spin_unlock(&cpu_buffer->lock);
+}
+
+/**
+ * ring_buffer_alloc_read_page - allocate a page to read from buffer
+ * @buffer: the buffer to allocate for.
+ *
+ * This function is used in conjunction with ring_buffer_read_page.
+ * When reading a full page from the ring buffer, these functions
+ * can be used to speed up the process. The calling function should
+ * allocate a few pages first with this function. Then when it
+ * needs to get pages from the ring buffer, it passes the result
+ * of this function into ring_buffer_read_page, which will swap
+ * the page that was allocated, with the read page of the buffer.
+ *
+ * Returns:
+ * The page allocated, or NULL on error.
+ */
+void *ring_buffer_alloc_read_page(struct ring_buffer *buffer)
+{
+ unsigned long addr;
+ struct buffer_data_page *bpage;
+
+ addr = __get_free_page(GFP_KERNEL);
+ if (!addr)
+ return NULL;
+
+ bpage = (void *)addr;
+
+ return bpage;
+}
+
+/**
+ * ring_buffer_free_read_page - free an allocated read page
+ * @buffer: the buffer the page was allocate for
+ * @data: the page to free
+ *
+ * Free a page allocated from ring_buffer_alloc_read_page.
+ */
+void ring_buffer_free_read_page(struct ring_buffer *buffer, void *data)
+{
+ free_page((unsigned long)data);
+}
+
+/**
+ * ring_buffer_read_page - extract a page from the ring buffer
+ * @buffer: buffer to extract from
+ * @data_page: the page to use allocated from ring_buffer_alloc_read_page
+ * @cpu: the cpu of the buffer to extract
+ * @full: should the extraction only happen when the page is full.
+ *
+ * This function will pull out a page from the ring buffer and consume it.
+ * @data_page must be the address of the variable that was returned
+ * from ring_buffer_alloc_read_page. This is because the page might be used
+ * to swap with a page in the ring buffer.
+ *
+ * for example:
+ * rpage = ring_buffer_alloc_page(buffer);
+ * if (!rpage)
+ * return error;
+ * ret = ring_buffer_read_page(buffer, &rpage, cpu, 0);
+ * if (ret)
+ * process_page(rpage);
+ *
+ * When @full is set, the function will not return true unless
+ * the writer is off the reader page.
+ *
+ * Note: it is up to the calling functions to handle sleeps and wakeups.
+ * The ring buffer can be used anywhere in the kernel and can not
+ * blindly call wake_up. The layer that uses the ring buffer must be
+ * responsible for that.
+ *
+ * Returns:
+ * 1 if data has been transferred
+ * 0 if no data has been transferred.
+ */
+int ring_buffer_read_page(struct ring_buffer *buffer,
+ void **data_page, int cpu, int full)
+{
+ struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
+ struct ring_buffer_event *event;
+ struct buffer_data_page *bpage;
+ unsigned long flags;
+ int ret = 0;
+
+ if (!data_page)
+ return 0;
+
+ bpage = *data_page;
+ if (!bpage)
+ return 0;
+
+ spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
+
+ /*
+ * rb_buffer_peek will get the next ring buffer if
+ * the current reader page is empty.
+ */
+ event = rb_buffer_peek(buffer, cpu, NULL);
+ if (!event)
+ goto out;
+
+ /* check for data */
+ if (!local_read(&cpu_buffer->reader_page->page->commit))
+ goto out;
+ /*
+ * If the writer is already off of the read page, then simply
+ * switch the read page with the given page. Otherwise
+ * we need to copy the data from the reader to the writer.
+ */
+ if (cpu_buffer->reader_page == cpu_buffer->commit_page) {
+ unsigned int read = cpu_buffer->reader_page->read;
+
+ if (full)
+ goto out;
+ /* The writer is still on the reader page, we must copy */
+ bpage = cpu_buffer->reader_page->page;
+ memcpy(bpage->data,
+ cpu_buffer->reader_page->page->data + read,
+ local_read(&bpage->commit) - read);
+
+ /* consume what was read */
+ cpu_buffer->reader_page += read;
+
+ } else {
+ /* swap the pages */
+ rb_init_page(bpage);
+ bpage = cpu_buffer->reader_page->page;
+ cpu_buffer->reader_page->page = *data_page;
+ cpu_buffer->reader_page->read = 0;
+ *data_page = bpage;
+ }
+ ret = 1;
+
+ /* update the entry counter */
+ rb_remove_entries(cpu_buffer, bpage);
+ out:
+ spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
+
+ return ret;
+}
+
static ssize_t
rb_simple_read(struct file *filp, char __user *ubuf,
size_t cnt, loff_t *ppos)