srcu: Use try-lock lockdep annotation for NMI-safe access.
[platform/kernel/linux-starfive.git] / include / linux / ring_buffer.h
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_RING_BUFFER_H
3 #define _LINUX_RING_BUFFER_H
4
5 #include <linux/mm.h>
6 #include <linux/seq_file.h>
7 #include <linux/poll.h>
8
9 struct trace_buffer;
10 struct ring_buffer_iter;
11
12 /*
13  * Don't refer to this struct directly, use functions below.
14  */
15 struct ring_buffer_event {
16         u32             type_len:5, time_delta:27;
17
18         u32             array[];
19 };
20
21 /**
22  * enum ring_buffer_type - internal ring buffer types
23  *
24  * @RINGBUF_TYPE_PADDING:       Left over page padding or discarded event
25  *                               If time_delta is 0:
26  *                                array is ignored
27  *                                size is variable depending on how much
28  *                                padding is needed
29  *                               If time_delta is non zero:
30  *                                array[0] holds the actual length
31  *                                size = 4 + length (bytes)
32  *
33  * @RINGBUF_TYPE_TIME_EXTEND:   Extend the time delta
34  *                               array[0] = time delta (28 .. 59)
35  *                               size = 8 bytes
36  *
37  * @RINGBUF_TYPE_TIME_STAMP:    Absolute timestamp
38  *                               Same format as TIME_EXTEND except that the
39  *                               value is an absolute timestamp, not a delta
40  *                               event.time_delta contains bottom 27 bits
41  *                               array[0] = top (28 .. 59) bits
42  *                               size = 8 bytes
43  *
44  * <= @RINGBUF_TYPE_DATA_TYPE_LEN_MAX:
45  *                              Data record
46  *                               If type_len is zero:
47  *                                array[0] holds the actual length
48  *                                array[1..(length+3)/4] holds data
49  *                                size = 4 + length (bytes)
50  *                               else
51  *                                length = type_len << 2
52  *                                array[0..(length+3)/4-1] holds data
53  *                                size = 4 + length (bytes)
54  */
55 enum ring_buffer_type {
56         RINGBUF_TYPE_DATA_TYPE_LEN_MAX = 28,
57         RINGBUF_TYPE_PADDING,
58         RINGBUF_TYPE_TIME_EXTEND,
59         RINGBUF_TYPE_TIME_STAMP,
60 };
61
62 unsigned ring_buffer_event_length(struct ring_buffer_event *event);
63 void *ring_buffer_event_data(struct ring_buffer_event *event);
64 u64 ring_buffer_event_time_stamp(struct trace_buffer *buffer,
65                                  struct ring_buffer_event *event);
66
67 /*
68  * ring_buffer_discard_commit will remove an event that has not
69  *   been committed yet. If this is used, then ring_buffer_unlock_commit
70  *   must not be called on the discarded event. This function
71  *   will try to remove the event from the ring buffer completely
72  *   if another event has not been written after it.
73  *
74  * Example use:
75  *
76  *  if (some_condition)
77  *    ring_buffer_discard_commit(buffer, event);
78  *  else
79  *    ring_buffer_unlock_commit(buffer, event);
80  */
81 void ring_buffer_discard_commit(struct trace_buffer *buffer,
82                                 struct ring_buffer_event *event);
83
84 /*
85  * size is in bytes for each per CPU buffer.
86  */
87 struct trace_buffer *
88 __ring_buffer_alloc(unsigned long size, unsigned flags, struct lock_class_key *key);
89
90 /*
91  * Because the ring buffer is generic, if other users of the ring buffer get
92  * traced by ftrace, it can produce lockdep warnings. We need to keep each
93  * ring buffer's lock class separate.
94  */
95 #define ring_buffer_alloc(size, flags)                  \
96 ({                                                      \
97         static struct lock_class_key __key;             \
98         __ring_buffer_alloc((size), (flags), &__key);   \
99 })
100
101 int ring_buffer_wait(struct trace_buffer *buffer, int cpu, int full);
102 __poll_t ring_buffer_poll_wait(struct trace_buffer *buffer, int cpu,
103                           struct file *filp, poll_table *poll_table, int full);
104 void ring_buffer_wake_waiters(struct trace_buffer *buffer, int cpu);
105
106 #define RING_BUFFER_ALL_CPUS -1
107
108 void ring_buffer_free(struct trace_buffer *buffer);
109
110 int ring_buffer_resize(struct trace_buffer *buffer, unsigned long size, int cpu);
111
112 void ring_buffer_change_overwrite(struct trace_buffer *buffer, int val);
113
114 struct ring_buffer_event *ring_buffer_lock_reserve(struct trace_buffer *buffer,
115                                                    unsigned long length);
116 int ring_buffer_unlock_commit(struct trace_buffer *buffer);
117 int ring_buffer_write(struct trace_buffer *buffer,
118                       unsigned long length, void *data);
119
120 void ring_buffer_nest_start(struct trace_buffer *buffer);
121 void ring_buffer_nest_end(struct trace_buffer *buffer);
122
123 struct ring_buffer_event *
124 ring_buffer_peek(struct trace_buffer *buffer, int cpu, u64 *ts,
125                  unsigned long *lost_events);
126 struct ring_buffer_event *
127 ring_buffer_consume(struct trace_buffer *buffer, int cpu, u64 *ts,
128                     unsigned long *lost_events);
129
130 struct ring_buffer_iter *
131 ring_buffer_read_prepare(struct trace_buffer *buffer, int cpu, gfp_t flags);
132 void ring_buffer_read_prepare_sync(void);
133 void ring_buffer_read_start(struct ring_buffer_iter *iter);
134 void ring_buffer_read_finish(struct ring_buffer_iter *iter);
135
136 struct ring_buffer_event *
137 ring_buffer_iter_peek(struct ring_buffer_iter *iter, u64 *ts);
138 void ring_buffer_iter_advance(struct ring_buffer_iter *iter);
139 void ring_buffer_iter_reset(struct ring_buffer_iter *iter);
140 int ring_buffer_iter_empty(struct ring_buffer_iter *iter);
141 bool ring_buffer_iter_dropped(struct ring_buffer_iter *iter);
142
143 unsigned long ring_buffer_size(struct trace_buffer *buffer, int cpu);
144
145 void ring_buffer_reset_cpu(struct trace_buffer *buffer, int cpu);
146 void ring_buffer_reset_online_cpus(struct trace_buffer *buffer);
147 void ring_buffer_reset(struct trace_buffer *buffer);
148
149 #ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
150 int ring_buffer_swap_cpu(struct trace_buffer *buffer_a,
151                          struct trace_buffer *buffer_b, int cpu);
152 #else
153 static inline int
154 ring_buffer_swap_cpu(struct trace_buffer *buffer_a,
155                      struct trace_buffer *buffer_b, int cpu)
156 {
157         return -ENODEV;
158 }
159 #endif
160
161 bool ring_buffer_empty(struct trace_buffer *buffer);
162 bool ring_buffer_empty_cpu(struct trace_buffer *buffer, int cpu);
163
164 void ring_buffer_record_disable(struct trace_buffer *buffer);
165 void ring_buffer_record_enable(struct trace_buffer *buffer);
166 void ring_buffer_record_off(struct trace_buffer *buffer);
167 void ring_buffer_record_on(struct trace_buffer *buffer);
168 bool ring_buffer_record_is_on(struct trace_buffer *buffer);
169 bool ring_buffer_record_is_set_on(struct trace_buffer *buffer);
170 void ring_buffer_record_disable_cpu(struct trace_buffer *buffer, int cpu);
171 void ring_buffer_record_enable_cpu(struct trace_buffer *buffer, int cpu);
172
173 u64 ring_buffer_oldest_event_ts(struct trace_buffer *buffer, int cpu);
174 unsigned long ring_buffer_bytes_cpu(struct trace_buffer *buffer, int cpu);
175 unsigned long ring_buffer_entries(struct trace_buffer *buffer);
176 unsigned long ring_buffer_overruns(struct trace_buffer *buffer);
177 unsigned long ring_buffer_entries_cpu(struct trace_buffer *buffer, int cpu);
178 unsigned long ring_buffer_overrun_cpu(struct trace_buffer *buffer, int cpu);
179 unsigned long ring_buffer_commit_overrun_cpu(struct trace_buffer *buffer, int cpu);
180 unsigned long ring_buffer_dropped_events_cpu(struct trace_buffer *buffer, int cpu);
181 unsigned long ring_buffer_read_events_cpu(struct trace_buffer *buffer, int cpu);
182
183 u64 ring_buffer_time_stamp(struct trace_buffer *buffer);
184 void ring_buffer_normalize_time_stamp(struct trace_buffer *buffer,
185                                       int cpu, u64 *ts);
186 void ring_buffer_set_clock(struct trace_buffer *buffer,
187                            u64 (*clock)(void));
188 void ring_buffer_set_time_stamp_abs(struct trace_buffer *buffer, bool abs);
189 bool ring_buffer_time_stamp_abs(struct trace_buffer *buffer);
190
191 size_t ring_buffer_nr_pages(struct trace_buffer *buffer, int cpu);
192 size_t ring_buffer_nr_dirty_pages(struct trace_buffer *buffer, int cpu);
193
194 void *ring_buffer_alloc_read_page(struct trace_buffer *buffer, int cpu);
195 void ring_buffer_free_read_page(struct trace_buffer *buffer, int cpu, void *data);
196 int ring_buffer_read_page(struct trace_buffer *buffer, void **data_page,
197                           size_t len, int cpu, int full);
198
199 struct trace_seq;
200
201 int ring_buffer_print_entry_header(struct trace_seq *s);
202 int ring_buffer_print_page_header(struct trace_seq *s);
203
204 enum ring_buffer_flags {
205         RB_FL_OVERWRITE         = 1 << 0,
206 };
207
208 #ifdef CONFIG_RING_BUFFER
209 int trace_rb_cpu_prepare(unsigned int cpu, struct hlist_node *node);
210 #else
211 #define trace_rb_cpu_prepare    NULL
212 #endif
213
214 #endif /* _LINUX_RING_BUFFER_H */