Merge tag 'v3.14.25' into backport/v3.14.24-ltsi-rc1+v3.14.25/snapshot-merge.wip
[platform/adaptation/renesas_rcar/renesas_kernel.git] / drivers / staging / lttng / lttng-ring-buffer-client.h
1 /*
2  * lttng-ring-buffer-client.h
3  *
4  * LTTng lib ring buffer client template.
5  *
6  * Copyright (C) 2010-2012 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
7  *
8  * This library is free software; you can redistribute it and/or
9  * modify it under the terms of the GNU Lesser General Public
10  * License as published by the Free Software Foundation; only
11  * version 2.1 of the License.
12  *
13  * This library is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16  * Lesser General Public License for more details.
17  *
18  * You should have received a copy of the GNU Lesser General Public
19  * License along with this library; if not, write to the Free Software
20  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21  */
22
23 #include <linux/module.h>
24 #include <linux/types.h>
25 #include "lib/bitfield.h"
26 #include "wrapper/vmalloc.h"    /* for wrapper_vmalloc_sync_all() */
27 #include "wrapper/trace-clock.h"
28 #include "lttng-events.h"
29 #include "lttng-tracer.h"
30 #include "wrapper/ringbuffer/frontend_types.h"
31
32 #define LTTNG_COMPACT_EVENT_BITS        5
33 #define LTTNG_COMPACT_TSC_BITS          27
34
35 /*
36  * Keep the natural field alignment for _each field_ within this structure if
37  * you ever add/remove a field from this header. Packed attribute is not used
38  * because gcc generates poor code on at least powerpc and mips. Don't ever
39  * let gcc add padding between the structure elements.
40  *
41  * The guarantee we have with timestamps is that all the events in a
42  * packet are included (inclusive) within the begin/end timestamps of
43  * the packet. Another guarantee we have is that the "timestamp begin",
44  * as well as the event timestamps, are monotonically increasing (never
45  * decrease) when moving forward in a stream (physically). But this
46  * guarantee does not apply to "timestamp end", because it is sampled at
47  * commit time, which is not ordered with respect to space reservation.
48  */
49
50 struct packet_header {
51         /* Trace packet header */
52         uint32_t magic;                 /*
53                                          * Trace magic number.
54                                          * contains endianness information.
55                                          */
56         uint8_t uuid[16];
57         uint32_t stream_id;
58
59         struct {
60                 /* Stream packet context */
61                 uint64_t timestamp_begin;       /* Cycle count at subbuffer start */
62                 uint64_t timestamp_end;         /* Cycle count at subbuffer end */
63                 uint64_t content_size;          /* Size of data in subbuffer */
64                 uint64_t packet_size;           /* Subbuffer size (include padding) */
65                 unsigned long events_discarded; /*
66                                                  * Events lost in this subbuffer since
67                                                  * the beginning of the trace.
68                                                  * (may overflow)
69                                                  */
70                 uint32_t cpu_id;                /* CPU id associated with stream */
71                 uint8_t header_end;             /* End of header */
72         } ctx;
73 };
74
75
76 static inline notrace u64 lib_ring_buffer_clock_read(struct channel *chan)
77 {
78         return trace_clock_read64();
79 }
80
81 static inline
82 size_t ctx_get_size(size_t offset, struct lttng_ctx *ctx)
83 {
84         int i;
85         size_t orig_offset = offset;
86
87         if (likely(!ctx))
88                 return 0;
89         for (i = 0; i < ctx->nr_fields; i++)
90                 offset += ctx->fields[i].get_size(offset);
91         return offset - orig_offset;
92 }
93
94 static inline
95 void ctx_record(struct lib_ring_buffer_ctx *bufctx,
96                 struct lttng_channel *chan,
97                 struct lttng_ctx *ctx)
98 {
99         int i;
100
101         if (likely(!ctx))
102                 return;
103         for (i = 0; i < ctx->nr_fields; i++)
104                 ctx->fields[i].record(&ctx->fields[i], bufctx, chan);
105 }
106
107 /*
108  * record_header_size - Calculate the header size and padding necessary.
109  * @config: ring buffer instance configuration
110  * @chan: channel
111  * @offset: offset in the write buffer
112  * @pre_header_padding: padding to add before the header (output)
113  * @ctx: reservation context
114  *
115  * Returns the event header size (including padding).
116  *
117  * The payload must itself determine its own alignment from the biggest type it
118  * contains.
119  */
120 static __inline__
121 unsigned char record_header_size(const struct lib_ring_buffer_config *config,
122                                  struct channel *chan, size_t offset,
123                                  size_t *pre_header_padding,
124                                  struct lib_ring_buffer_ctx *ctx)
125 {
126         struct lttng_channel *lttng_chan = channel_get_private(chan);
127         struct lttng_event *event = ctx->priv;
128         size_t orig_offset = offset;
129         size_t padding;
130
131         switch (lttng_chan->header_type) {
132         case 1: /* compact */
133                 padding = lib_ring_buffer_align(offset, lttng_alignof(uint32_t));
134                 offset += padding;
135                 if (!(ctx->rflags & (RING_BUFFER_RFLAG_FULL_TSC | LTTNG_RFLAG_EXTENDED))) {
136                         offset += sizeof(uint32_t);     /* id and timestamp */
137                 } else {
138                         /* Minimum space taken by LTTNG_COMPACT_EVENT_BITS id */
139                         offset += (LTTNG_COMPACT_EVENT_BITS + CHAR_BIT - 1) / CHAR_BIT;
140                         /* Align extended struct on largest member */
141                         offset += lib_ring_buffer_align(offset, lttng_alignof(uint64_t));
142                         offset += sizeof(uint32_t);     /* id */
143                         offset += lib_ring_buffer_align(offset, lttng_alignof(uint64_t));
144                         offset += sizeof(uint64_t);     /* timestamp */
145                 }
146                 break;
147         case 2: /* large */
148                 padding = lib_ring_buffer_align(offset, lttng_alignof(uint16_t));
149                 offset += padding;
150                 offset += sizeof(uint16_t);
151                 if (!(ctx->rflags & (RING_BUFFER_RFLAG_FULL_TSC | LTTNG_RFLAG_EXTENDED))) {
152                         offset += lib_ring_buffer_align(offset, lttng_alignof(uint32_t));
153                         offset += sizeof(uint32_t);     /* timestamp */
154                 } else {
155                         /* Align extended struct on largest member */
156                         offset += lib_ring_buffer_align(offset, lttng_alignof(uint64_t));
157                         offset += sizeof(uint32_t);     /* id */
158                         offset += lib_ring_buffer_align(offset, lttng_alignof(uint64_t));
159                         offset += sizeof(uint64_t);     /* timestamp */
160                 }
161                 break;
162         default:
163                 padding = 0;
164                 WARN_ON_ONCE(1);
165         }
166         offset += ctx_get_size(offset, event->ctx);
167         offset += ctx_get_size(offset, lttng_chan->ctx);
168
169         *pre_header_padding = padding;
170         return offset - orig_offset;
171 }
172
173 #include "wrapper/ringbuffer/api.h"
174
175 static
176 void lttng_write_event_header_slow(const struct lib_ring_buffer_config *config,
177                                  struct lib_ring_buffer_ctx *ctx,
178                                  uint32_t event_id);
179
180 /*
181  * lttng_write_event_header
182  *
183  * Writes the event header to the offset (already aligned on 32-bits).
184  *
185  * @config: ring buffer instance configuration
186  * @ctx: reservation context
187  * @event_id: event ID
188  */
189 static __inline__
190 void lttng_write_event_header(const struct lib_ring_buffer_config *config,
191                             struct lib_ring_buffer_ctx *ctx,
192                             uint32_t event_id)
193 {
194         struct lttng_channel *lttng_chan = channel_get_private(ctx->chan);
195         struct lttng_event *event = ctx->priv;
196
197         if (unlikely(ctx->rflags))
198                 goto slow_path;
199
200         switch (lttng_chan->header_type) {
201         case 1: /* compact */
202         {
203                 uint32_t id_time = 0;
204
205                 bt_bitfield_write(&id_time, uint32_t,
206                                 0,
207                                 LTTNG_COMPACT_EVENT_BITS,
208                                 event_id);
209                 bt_bitfield_write(&id_time, uint32_t,
210                                 LTTNG_COMPACT_EVENT_BITS,
211                                 LTTNG_COMPACT_TSC_BITS,
212                                 ctx->tsc);
213                 lib_ring_buffer_write(config, ctx, &id_time, sizeof(id_time));
214                 break;
215         }
216         case 2: /* large */
217         {
218                 uint32_t timestamp = (uint32_t) ctx->tsc;
219                 uint16_t id = event_id;
220
221                 lib_ring_buffer_write(config, ctx, &id, sizeof(id));
222                 lib_ring_buffer_align_ctx(ctx, lttng_alignof(uint32_t));
223                 lib_ring_buffer_write(config, ctx, &timestamp, sizeof(timestamp));
224                 break;
225         }
226         default:
227                 WARN_ON_ONCE(1);
228         }
229
230         ctx_record(ctx, lttng_chan, lttng_chan->ctx);
231         ctx_record(ctx, lttng_chan, event->ctx);
232         lib_ring_buffer_align_ctx(ctx, ctx->largest_align);
233
234         return;
235
236 slow_path:
237         lttng_write_event_header_slow(config, ctx, event_id);
238 }
239
240 static
241 void lttng_write_event_header_slow(const struct lib_ring_buffer_config *config,
242                                  struct lib_ring_buffer_ctx *ctx,
243                                  uint32_t event_id)
244 {
245         struct lttng_channel *lttng_chan = channel_get_private(ctx->chan);
246         struct lttng_event *event = ctx->priv;
247
248         switch (lttng_chan->header_type) {
249         case 1: /* compact */
250                 if (!(ctx->rflags & (RING_BUFFER_RFLAG_FULL_TSC | LTTNG_RFLAG_EXTENDED))) {
251                         uint32_t id_time = 0;
252
253                         bt_bitfield_write(&id_time, uint32_t,
254                                         0,
255                                         LTTNG_COMPACT_EVENT_BITS,
256                                         event_id);
257                         bt_bitfield_write(&id_time, uint32_t,
258                                         LTTNG_COMPACT_EVENT_BITS,
259                                         LTTNG_COMPACT_TSC_BITS, ctx->tsc);
260                         lib_ring_buffer_write(config, ctx, &id_time, sizeof(id_time));
261                 } else {
262                         uint8_t id = 0;
263                         uint64_t timestamp = ctx->tsc;
264
265                         bt_bitfield_write(&id, uint8_t,
266                                         0,
267                                         LTTNG_COMPACT_EVENT_BITS,
268                                         31);
269                         lib_ring_buffer_write(config, ctx, &id, sizeof(id));
270                         /* Align extended struct on largest member */
271                         lib_ring_buffer_align_ctx(ctx, lttng_alignof(uint64_t));
272                         lib_ring_buffer_write(config, ctx, &event_id, sizeof(event_id));
273                         lib_ring_buffer_align_ctx(ctx, lttng_alignof(uint64_t));
274                         lib_ring_buffer_write(config, ctx, &timestamp, sizeof(timestamp));
275                 }
276                 break;
277         case 2: /* large */
278         {
279                 if (!(ctx->rflags & (RING_BUFFER_RFLAG_FULL_TSC | LTTNG_RFLAG_EXTENDED))) {
280                         uint32_t timestamp = (uint32_t) ctx->tsc;
281                         uint16_t id = event_id;
282
283                         lib_ring_buffer_write(config, ctx, &id, sizeof(id));
284                         lib_ring_buffer_align_ctx(ctx, lttng_alignof(uint32_t));
285                         lib_ring_buffer_write(config, ctx, &timestamp, sizeof(timestamp));
286                 } else {
287                         uint16_t id = 65535;
288                         uint64_t timestamp = ctx->tsc;
289
290                         lib_ring_buffer_write(config, ctx, &id, sizeof(id));
291                         /* Align extended struct on largest member */
292                         lib_ring_buffer_align_ctx(ctx, lttng_alignof(uint64_t));
293                         lib_ring_buffer_write(config, ctx, &event_id, sizeof(event_id));
294                         lib_ring_buffer_align_ctx(ctx, lttng_alignof(uint64_t));
295                         lib_ring_buffer_write(config, ctx, &timestamp, sizeof(timestamp));
296                 }
297                 break;
298         }
299         default:
300                 WARN_ON_ONCE(1);
301         }
302         ctx_record(ctx, lttng_chan, lttng_chan->ctx);
303         ctx_record(ctx, lttng_chan, event->ctx);
304         lib_ring_buffer_align_ctx(ctx, ctx->largest_align);
305 }
306
307 static const struct lib_ring_buffer_config client_config;
308
309 static u64 client_ring_buffer_clock_read(struct channel *chan)
310 {
311         return lib_ring_buffer_clock_read(chan);
312 }
313
314 static
315 size_t client_record_header_size(const struct lib_ring_buffer_config *config,
316                                  struct channel *chan, size_t offset,
317                                  size_t *pre_header_padding,
318                                  struct lib_ring_buffer_ctx *ctx)
319 {
320         return record_header_size(config, chan, offset,
321                                   pre_header_padding, ctx);
322 }
323
324 /**
325  * client_packet_header_size - called on buffer-switch to a new sub-buffer
326  *
327  * Return header size without padding after the structure. Don't use packed
328  * structure because gcc generates inefficient code on some architectures
329  * (powerpc, mips..)
330  */
331 static size_t client_packet_header_size(void)
332 {
333         return offsetof(struct packet_header, ctx.header_end);
334 }
335
336 static void client_buffer_begin(struct lib_ring_buffer *buf, u64 tsc,
337                                 unsigned int subbuf_idx)
338 {
339         struct channel *chan = buf->backend.chan;
340         struct packet_header *header =
341                 (struct packet_header *)
342                         lib_ring_buffer_offset_address(&buf->backend,
343                                 subbuf_idx * chan->backend.subbuf_size);
344         struct lttng_channel *lttng_chan = channel_get_private(chan);
345         struct lttng_session *session = lttng_chan->session;
346
347         header->magic = CTF_MAGIC_NUMBER;
348         memcpy(header->uuid, session->uuid.b, sizeof(session->uuid));
349         header->stream_id = lttng_chan->id;
350         header->ctx.timestamp_begin = tsc;
351         header->ctx.timestamp_end = 0;
352         header->ctx.content_size = ~0ULL; /* for debugging */
353         header->ctx.packet_size = ~0ULL;
354         header->ctx.events_discarded = 0;
355         header->ctx.cpu_id = buf->backend.cpu;
356 }
357
358 /*
359  * offset is assumed to never be 0 here : never deliver a completely empty
360  * subbuffer. data_size is between 1 and subbuf_size.
361  */
362 static void client_buffer_end(struct lib_ring_buffer *buf, u64 tsc,
363                               unsigned int subbuf_idx, unsigned long data_size)
364 {
365         struct channel *chan = buf->backend.chan;
366         struct packet_header *header =
367                 (struct packet_header *)
368                         lib_ring_buffer_offset_address(&buf->backend,
369                                 subbuf_idx * chan->backend.subbuf_size);
370         unsigned long records_lost = 0;
371
372         header->ctx.timestamp_end = tsc;
373         header->ctx.content_size =
374                 (uint64_t) data_size * CHAR_BIT;                /* in bits */
375         header->ctx.packet_size =
376                 (uint64_t) PAGE_ALIGN(data_size) * CHAR_BIT;    /* in bits */
377         records_lost += lib_ring_buffer_get_records_lost_full(&client_config, buf);
378         records_lost += lib_ring_buffer_get_records_lost_wrap(&client_config, buf);
379         records_lost += lib_ring_buffer_get_records_lost_big(&client_config, buf);
380         header->ctx.events_discarded = records_lost;
381 }
382
383 static int client_buffer_create(struct lib_ring_buffer *buf, void *priv,
384                                 int cpu, const char *name)
385 {
386         return 0;
387 }
388
389 static void client_buffer_finalize(struct lib_ring_buffer *buf, void *priv, int cpu)
390 {
391 }
392
393 static const struct lib_ring_buffer_config client_config = {
394         .cb.ring_buffer_clock_read = client_ring_buffer_clock_read,
395         .cb.record_header_size = client_record_header_size,
396         .cb.subbuffer_header_size = client_packet_header_size,
397         .cb.buffer_begin = client_buffer_begin,
398         .cb.buffer_end = client_buffer_end,
399         .cb.buffer_create = client_buffer_create,
400         .cb.buffer_finalize = client_buffer_finalize,
401
402         .tsc_bits = LTTNG_COMPACT_TSC_BITS,
403         .alloc = RING_BUFFER_ALLOC_PER_CPU,
404         .sync = RING_BUFFER_SYNC_PER_CPU,
405         .mode = RING_BUFFER_MODE_TEMPLATE,
406         .backend = RING_BUFFER_PAGE,
407         .output = RING_BUFFER_OUTPUT_TEMPLATE,
408         .oops = RING_BUFFER_OOPS_CONSISTENCY,
409         .ipi = RING_BUFFER_IPI_BARRIER,
410         .wakeup = RING_BUFFER_WAKEUP_BY_TIMER,
411 };
412
413 static
414 struct channel *_channel_create(const char *name,
415                                 struct lttng_channel *lttng_chan, void *buf_addr,
416                                 size_t subbuf_size, size_t num_subbuf,
417                                 unsigned int switch_timer_interval,
418                                 unsigned int read_timer_interval)
419 {
420         return channel_create(&client_config, name, lttng_chan, buf_addr,
421                               subbuf_size, num_subbuf, switch_timer_interval,
422                               read_timer_interval);
423 }
424
425 static
426 void lttng_channel_destroy(struct channel *chan)
427 {
428         channel_destroy(chan);
429 }
430
431 static
432 struct lib_ring_buffer *lttng_buffer_read_open(struct channel *chan)
433 {
434         struct lib_ring_buffer *buf;
435         int cpu;
436
437         for_each_channel_cpu(cpu, chan) {
438                 buf = channel_get_ring_buffer(&client_config, chan, cpu);
439                 if (!lib_ring_buffer_open_read(buf))
440                         return buf;
441         }
442         return NULL;
443 }
444
445 static
446 int lttng_buffer_has_read_closed_stream(struct channel *chan)
447 {
448         struct lib_ring_buffer *buf;
449         int cpu;
450
451         for_each_channel_cpu(cpu, chan) {
452                 buf = channel_get_ring_buffer(&client_config, chan, cpu);
453                 if (!atomic_long_read(&buf->active_readers))
454                         return 1;
455         }
456         return 0;
457 }
458
459 static
460 void lttng_buffer_read_close(struct lib_ring_buffer *buf)
461 {
462         lib_ring_buffer_release_read(buf);
463 }
464
465 static
466 int lttng_event_reserve(struct lib_ring_buffer_ctx *ctx,
467                       uint32_t event_id)
468 {
469         struct lttng_channel *lttng_chan = channel_get_private(ctx->chan);
470         int ret, cpu;
471
472         cpu = lib_ring_buffer_get_cpu(&client_config);
473         if (cpu < 0)
474                 return -EPERM;
475         ctx->cpu = cpu;
476
477         switch (lttng_chan->header_type) {
478         case 1: /* compact */
479                 if (event_id > 30)
480                         ctx->rflags |= LTTNG_RFLAG_EXTENDED;
481                 break;
482         case 2: /* large */
483                 if (event_id > 65534)
484                         ctx->rflags |= LTTNG_RFLAG_EXTENDED;
485                 break;
486         default:
487                 WARN_ON_ONCE(1);
488         }
489
490         ret = lib_ring_buffer_reserve(&client_config, ctx);
491         if (ret)
492                 goto put;
493         lttng_write_event_header(&client_config, ctx, event_id);
494         return 0;
495 put:
496         lib_ring_buffer_put_cpu(&client_config);
497         return ret;
498 }
499
500 static
501 void lttng_event_commit(struct lib_ring_buffer_ctx *ctx)
502 {
503         lib_ring_buffer_commit(&client_config, ctx);
504         lib_ring_buffer_put_cpu(&client_config);
505 }
506
507 static
508 void lttng_event_write(struct lib_ring_buffer_ctx *ctx, const void *src,
509                      size_t len)
510 {
511         lib_ring_buffer_write(&client_config, ctx, src, len);
512 }
513
514 static
515 void lttng_event_write_from_user(struct lib_ring_buffer_ctx *ctx,
516                                const void __user *src, size_t len)
517 {
518         lib_ring_buffer_copy_from_user_inatomic(&client_config, ctx, src, len);
519 }
520
521 static
522 void lttng_event_memset(struct lib_ring_buffer_ctx *ctx,
523                 int c, size_t len)
524 {
525         lib_ring_buffer_memset(&client_config, ctx, c, len);
526 }
527
528 static
529 wait_queue_head_t *lttng_get_writer_buf_wait_queue(struct channel *chan, int cpu)
530 {
531         struct lib_ring_buffer *buf = channel_get_ring_buffer(&client_config,
532                                         chan, cpu);
533         return &buf->write_wait;
534 }
535
536 static
537 wait_queue_head_t *lttng_get_hp_wait_queue(struct channel *chan)
538 {
539         return &chan->hp_wait;
540 }
541
542 static
543 int lttng_is_finalized(struct channel *chan)
544 {
545         return lib_ring_buffer_channel_is_finalized(chan);
546 }
547
548 static
549 int lttng_is_disabled(struct channel *chan)
550 {
551         return lib_ring_buffer_channel_is_disabled(chan);
552 }
553
554 static struct lttng_transport lttng_relay_transport = {
555         .name = "relay-" RING_BUFFER_MODE_TEMPLATE_STRING,
556         .owner = THIS_MODULE,
557         .ops = {
558                 .channel_create = _channel_create,
559                 .channel_destroy = lttng_channel_destroy,
560                 .buffer_read_open = lttng_buffer_read_open,
561                 .buffer_has_read_closed_stream =
562                         lttng_buffer_has_read_closed_stream,
563                 .buffer_read_close = lttng_buffer_read_close,
564                 .event_reserve = lttng_event_reserve,
565                 .event_commit = lttng_event_commit,
566                 .event_write = lttng_event_write,
567                 .event_write_from_user = lttng_event_write_from_user,
568                 .event_memset = lttng_event_memset,
569                 .packet_avail_size = NULL,      /* Would be racy anyway */
570                 .get_writer_buf_wait_queue = lttng_get_writer_buf_wait_queue,
571                 .get_hp_wait_queue = lttng_get_hp_wait_queue,
572                 .is_finalized = lttng_is_finalized,
573                 .is_disabled = lttng_is_disabled,
574         },
575 };
576
577 static int __init lttng_ring_buffer_client_init(void)
578 {
579         /*
580          * This vmalloc sync all also takes care of the lib ring buffer
581          * vmalloc'd module pages when it is built as a module into LTTng.
582          */
583         wrapper_vmalloc_sync_all();
584         lttng_transport_register(&lttng_relay_transport);
585         return 0;
586 }
587
588 module_init(lttng_ring_buffer_client_init);
589
590 static void __exit lttng_ring_buffer_client_exit(void)
591 {
592         lttng_transport_unregister(&lttng_relay_transport);
593 }
594
595 module_exit(lttng_ring_buffer_client_exit);
596
597 MODULE_LICENSE("GPL and additional rights");
598 MODULE_AUTHOR("Mathieu Desnoyers");
599 MODULE_DESCRIPTION("LTTng ring buffer " RING_BUFFER_MODE_TEMPLATE_STRING
600                    " client");