1 // SPDX-License-Identifier: GPL-2.0
3 * trace_events_synth - synthetic trace events
5 * Copyright (C) 2015, 2020 Tom Zanussi <tom.zanussi@linux.intel.com>
8 #include <linux/module.h>
9 #include <linux/kallsyms.h>
10 #include <linux/security.h>
11 #include <linux/mutex.h>
12 #include <linux/slab.h>
13 #include <linux/stacktrace.h>
14 #include <linux/rculist.h>
15 #include <linux/tracefs.h>
17 /* for gfp flag names */
18 #include <linux/trace_events.h>
19 #include <trace/events/mmflags.h>
20 #include "trace_probe.h"
21 #include "trace_probe_kernel.h"
23 #include "trace_synth.h"
27 C(BAD_NAME, "Illegal name"), \
28 C(INVALID_CMD, "Command must be of the form: <name> field[;field] ..."),\
29 C(INVALID_DYN_CMD, "Command must be of the form: s or -:[synthetic/]<name> field[;field] ..."),\
30 C(EVENT_EXISTS, "Event already exists"), \
31 C(TOO_MANY_FIELDS, "Too many fields"), \
32 C(INCOMPLETE_TYPE, "Incomplete type"), \
33 C(INVALID_TYPE, "Invalid type"), \
34 C(INVALID_FIELD, "Invalid field"), \
35 C(INVALID_ARRAY_SPEC, "Invalid array specification"),
38 #define C(a, b) SYNTH_ERR_##a
45 static const char *err_text[] = { ERRORS };
47 static DEFINE_MUTEX(lastcmd_mutex);
48 static char *last_cmd;
50 static int errpos(const char *str)
54 mutex_lock(&lastcmd_mutex);
55 if (!str || !last_cmd)
58 ret = err_pos(last_cmd, str);
60 mutex_unlock(&lastcmd_mutex);
64 static void last_cmd_set(const char *str)
69 mutex_lock(&lastcmd_mutex);
71 last_cmd = kstrdup(str, GFP_KERNEL);
72 mutex_unlock(&lastcmd_mutex);
75 static void synth_err(u8 err_type, u16 err_pos)
77 mutex_lock(&lastcmd_mutex);
81 tracing_log_err(NULL, "synthetic_events", last_cmd, err_text,
84 mutex_unlock(&lastcmd_mutex);
87 static int create_synth_event(const char *raw_command);
88 static int synth_event_show(struct seq_file *m, struct dyn_event *ev);
89 static int synth_event_release(struct dyn_event *ev);
90 static bool synth_event_is_busy(struct dyn_event *ev);
91 static bool synth_event_match(const char *system, const char *event,
92 int argc, const char **argv, struct dyn_event *ev);
94 static struct dyn_event_operations synth_event_ops = {
95 .create = create_synth_event,
96 .show = synth_event_show,
97 .is_busy = synth_event_is_busy,
98 .free = synth_event_release,
99 .match = synth_event_match,
102 static bool is_synth_event(struct dyn_event *ev)
104 return ev->ops == &synth_event_ops;
107 static struct synth_event *to_synth_event(struct dyn_event *ev)
109 return container_of(ev, struct synth_event, devent);
112 static bool synth_event_is_busy(struct dyn_event *ev)
114 struct synth_event *event = to_synth_event(ev);
116 return event->ref != 0;
119 static bool synth_event_match(const char *system, const char *event,
120 int argc, const char **argv, struct dyn_event *ev)
122 struct synth_event *sev = to_synth_event(ev);
124 return strcmp(sev->name, event) == 0 &&
125 (!system || strcmp(system, SYNTH_SYSTEM) == 0);
128 struct synth_trace_event {
129 struct trace_entry ent;
133 static int synth_event_define_fields(struct trace_event_call *call)
135 struct synth_trace_event trace;
136 int offset = offsetof(typeof(trace), fields);
137 struct synth_event *event = call->data;
138 unsigned int i, size, n_u64;
143 for (i = 0, n_u64 = 0; i < event->n_fields; i++) {
144 size = event->fields[i]->size;
145 is_signed = event->fields[i]->is_signed;
146 type = event->fields[i]->type;
147 name = event->fields[i]->name;
148 ret = trace_define_field(call, type, name, offset, size,
149 is_signed, FILTER_OTHER);
153 event->fields[i]->offset = n_u64;
155 if (event->fields[i]->is_string && !event->fields[i]->is_dynamic) {
156 offset += STR_VAR_LEN_MAX;
157 n_u64 += STR_VAR_LEN_MAX / sizeof(u64);
159 offset += sizeof(u64);
164 event->n_u64 = n_u64;
169 static bool synth_field_signed(char *type)
171 if (str_has_prefix(type, "u"))
173 if (strcmp(type, "gfp_t") == 0)
179 static int synth_field_is_string(char *type)
181 if (strstr(type, "char[") != NULL)
187 static int synth_field_is_stack(char *type)
189 if (strstr(type, "long[") != NULL)
195 static int synth_field_string_size(char *type)
197 char buf[4], *end, *start;
201 start = strstr(type, "char[");
204 start += sizeof("char[") - 1;
206 end = strchr(type, ']');
207 if (!end || end < start || type + strlen(type) > end + 1)
215 return 0; /* variable-length string */
217 strncpy(buf, start, len);
220 err = kstrtouint(buf, 0, &size);
224 if (size > STR_VAR_LEN_MAX)
230 static int synth_field_size(char *type)
234 if (strcmp(type, "s64") == 0)
236 else if (strcmp(type, "u64") == 0)
238 else if (strcmp(type, "s32") == 0)
240 else if (strcmp(type, "u32") == 0)
242 else if (strcmp(type, "s16") == 0)
244 else if (strcmp(type, "u16") == 0)
246 else if (strcmp(type, "s8") == 0)
248 else if (strcmp(type, "u8") == 0)
250 else if (strcmp(type, "char") == 0)
252 else if (strcmp(type, "unsigned char") == 0)
253 size = sizeof(unsigned char);
254 else if (strcmp(type, "int") == 0)
256 else if (strcmp(type, "unsigned int") == 0)
257 size = sizeof(unsigned int);
258 else if (strcmp(type, "long") == 0)
260 else if (strcmp(type, "unsigned long") == 0)
261 size = sizeof(unsigned long);
262 else if (strcmp(type, "bool") == 0)
264 else if (strcmp(type, "pid_t") == 0)
265 size = sizeof(pid_t);
266 else if (strcmp(type, "gfp_t") == 0)
267 size = sizeof(gfp_t);
268 else if (synth_field_is_string(type))
269 size = synth_field_string_size(type);
270 else if (synth_field_is_stack(type))
276 static const char *synth_field_fmt(char *type)
278 const char *fmt = "%llu";
280 if (strcmp(type, "s64") == 0)
282 else if (strcmp(type, "u64") == 0)
284 else if (strcmp(type, "s32") == 0)
286 else if (strcmp(type, "u32") == 0)
288 else if (strcmp(type, "s16") == 0)
290 else if (strcmp(type, "u16") == 0)
292 else if (strcmp(type, "s8") == 0)
294 else if (strcmp(type, "u8") == 0)
296 else if (strcmp(type, "char") == 0)
298 else if (strcmp(type, "unsigned char") == 0)
300 else if (strcmp(type, "int") == 0)
302 else if (strcmp(type, "unsigned int") == 0)
304 else if (strcmp(type, "long") == 0)
306 else if (strcmp(type, "unsigned long") == 0)
308 else if (strcmp(type, "bool") == 0)
310 else if (strcmp(type, "pid_t") == 0)
312 else if (strcmp(type, "gfp_t") == 0)
314 else if (synth_field_is_string(type))
316 else if (synth_field_is_stack(type))
322 static void print_synth_event_num_val(struct trace_seq *s,
323 char *print_fmt, char *name,
324 int size, u64 val, char *space)
328 trace_seq_printf(s, print_fmt, name, (u8)val, space);
332 trace_seq_printf(s, print_fmt, name, (u16)val, space);
336 trace_seq_printf(s, print_fmt, name, (u32)val, space);
340 trace_seq_printf(s, print_fmt, name, val, space);
345 static enum print_line_t print_synth_event(struct trace_iterator *iter,
347 struct trace_event *event)
349 struct trace_array *tr = iter->tr;
350 struct trace_seq *s = &iter->seq;
351 struct synth_trace_event *entry;
352 struct synth_event *se;
353 unsigned int i, n_u64;
357 entry = (struct synth_trace_event *)iter->ent;
358 se = container_of(event, struct synth_event, call.event);
360 trace_seq_printf(s, "%s: ", se->name);
362 for (i = 0, n_u64 = 0; i < se->n_fields; i++) {
363 if (trace_seq_has_overflowed(s))
366 fmt = synth_field_fmt(se->fields[i]->type);
368 /* parameter types */
369 if (tr && tr->trace_flags & TRACE_ITER_VERBOSE)
370 trace_seq_printf(s, "%s ", fmt);
372 snprintf(print_fmt, sizeof(print_fmt), "%%s=%s%%s", fmt);
374 /* parameter values */
375 if (se->fields[i]->is_string) {
376 if (se->fields[i]->is_dynamic) {
377 u32 offset, data_offset;
380 offset = (u32)entry->fields[n_u64];
381 data_offset = offset & 0xffff;
383 str_field = (char *)entry + data_offset;
385 trace_seq_printf(s, print_fmt, se->fields[i]->name,
388 i == se->n_fields - 1 ? "" : " ");
391 trace_seq_printf(s, print_fmt, se->fields[i]->name,
393 (char *)&entry->fields[n_u64],
394 i == se->n_fields - 1 ? "" : " ");
395 n_u64 += STR_VAR_LEN_MAX / sizeof(u64);
397 } else if (se->fields[i]->is_stack) {
398 u32 offset, data_offset, len;
399 unsigned long *p, *end;
401 offset = (u32)entry->fields[n_u64];
402 data_offset = offset & 0xffff;
405 p = (void *)entry + data_offset;
406 end = (void *)p + len - (sizeof(long) - 1);
408 trace_seq_printf(s, "%s=STACK:\n", se->fields[i]->name);
410 for (; *p && p < end; p++)
411 trace_seq_printf(s, "=> %pS\n", (void *)*p);
415 struct trace_print_flags __flags[] = {
416 __def_gfpflag_names, {-1, NULL} };
417 char *space = (i == se->n_fields - 1 ? "" : " ");
419 print_synth_event_num_val(s, print_fmt,
422 entry->fields[n_u64],
425 if (strcmp(se->fields[i]->type, "gfp_t") == 0) {
426 trace_seq_puts(s, " (");
427 trace_print_flags_seq(s, "|",
428 entry->fields[n_u64],
430 trace_seq_putc(s, ')');
436 trace_seq_putc(s, '\n');
438 return trace_handle_return(s);
441 static struct trace_event_functions synth_event_funcs = {
442 .trace = print_synth_event
445 static unsigned int trace_string(struct synth_trace_event *entry,
446 struct synth_event *event,
449 unsigned int data_size,
452 unsigned int len = 0;
459 data_offset = struct_size(entry, fields, event->n_u64);
460 data_offset += data_size;
462 len = fetch_store_strlen((unsigned long)str_val);
464 data_offset |= len << 16;
465 *(u32 *)&entry->fields[*n_u64] = data_offset;
467 ret = fetch_store_string((unsigned long)str_val, &entry->fields[*n_u64], entry);
471 str_field = (char *)&entry->fields[*n_u64];
473 #ifdef CONFIG_ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
474 if ((unsigned long)str_val < TASK_SIZE)
475 ret = strncpy_from_user_nofault(str_field, str_val, STR_VAR_LEN_MAX);
478 ret = strncpy_from_kernel_nofault(str_field, str_val, STR_VAR_LEN_MAX);
481 strcpy(str_field, FAULT_STRING);
483 (*n_u64) += STR_VAR_LEN_MAX / sizeof(u64);
489 static unsigned int trace_stack(struct synth_trace_event *entry,
490 struct synth_event *event,
492 unsigned int data_size,
499 data_offset = struct_size(entry, fields, event->n_u64);
500 data_offset += data_size;
502 for (len = 0; len < HIST_STACKTRACE_DEPTH; len++) {
507 /* Include the zero'd element if it fits */
508 if (len < HIST_STACKTRACE_DEPTH)
513 /* Find the dynamic section to copy the stack into. */
514 data_loc = (void *)entry + data_offset;
515 memcpy(data_loc, stack, len);
517 /* Fill in the field that holds the offset/len combo */
518 data_offset |= len << 16;
519 *(u32 *)&entry->fields[*n_u64] = data_offset;
526 static notrace void trace_event_raw_event_synth(void *__data,
528 unsigned int *var_ref_idx)
530 unsigned int i, n_u64, val_idx, len, data_size = 0;
531 struct trace_event_file *trace_file = __data;
532 struct synth_trace_event *entry;
533 struct trace_event_buffer fbuffer;
534 struct trace_buffer *buffer;
535 struct synth_event *event;
538 event = trace_file->event_call->data;
540 if (trace_trigger_soft_disabled(trace_file))
543 fields_size = event->n_u64 * sizeof(u64);
545 for (i = 0; i < event->n_dynamic_fields; i++) {
546 unsigned int field_pos = event->dynamic_fields[i]->field_pos;
549 val_idx = var_ref_idx[field_pos];
550 str_val = (char *)(long)var_ref_vals[val_idx];
552 if (event->dynamic_fields[i]->is_stack) {
553 len = *((unsigned long *)str_val);
554 len *= sizeof(unsigned long);
556 len = fetch_store_strlen((unsigned long)str_val);
563 * Avoid ring buffer recursion detection, as this event
564 * is being performed within another event.
566 buffer = trace_file->tr->array_buffer.buffer;
567 ring_buffer_nest_start(buffer);
569 entry = trace_event_buffer_reserve(&fbuffer, trace_file,
570 sizeof(*entry) + fields_size);
574 for (i = 0, n_u64 = 0; i < event->n_fields; i++) {
575 val_idx = var_ref_idx[i];
576 if (event->fields[i]->is_string) {
577 char *str_val = (char *)(long)var_ref_vals[val_idx];
579 len = trace_string(entry, event, str_val,
580 event->fields[i]->is_dynamic,
582 data_size += len; /* only dynamic string increments */
583 } else if (event->fields[i]->is_stack) {
584 long *stack = (long *)(long)var_ref_vals[val_idx];
586 len = trace_stack(entry, event, stack,
590 struct synth_field *field = event->fields[i];
591 u64 val = var_ref_vals[val_idx];
593 switch (field->size) {
595 *(u8 *)&entry->fields[n_u64] = (u8)val;
599 *(u16 *)&entry->fields[n_u64] = (u16)val;
603 *(u32 *)&entry->fields[n_u64] = (u32)val;
607 entry->fields[n_u64] = val;
614 trace_event_buffer_commit(&fbuffer);
616 ring_buffer_nest_end(buffer);
619 static void free_synth_event_print_fmt(struct trace_event_call *call)
622 kfree(call->print_fmt);
623 call->print_fmt = NULL;
627 static int __set_synth_event_print_fmt(struct synth_event *event,
634 /* When len=0, we just calculate the needed length */
635 #define LEN_OR_ZERO (len ? len - pos : 0)
637 pos += snprintf(buf + pos, LEN_OR_ZERO, "\"");
638 for (i = 0; i < event->n_fields; i++) {
639 fmt = synth_field_fmt(event->fields[i]->type);
640 pos += snprintf(buf + pos, LEN_OR_ZERO, "%s=%s%s",
641 event->fields[i]->name, fmt,
642 i == event->n_fields - 1 ? "" : ", ");
644 pos += snprintf(buf + pos, LEN_OR_ZERO, "\"");
646 for (i = 0; i < event->n_fields; i++) {
647 if (event->fields[i]->is_string &&
648 event->fields[i]->is_dynamic)
649 pos += snprintf(buf + pos, LEN_OR_ZERO,
650 ", __get_str(%s)", event->fields[i]->name);
651 else if (event->fields[i]->is_stack)
652 pos += snprintf(buf + pos, LEN_OR_ZERO,
653 ", __get_stacktrace(%s)", event->fields[i]->name);
655 pos += snprintf(buf + pos, LEN_OR_ZERO,
656 ", REC->%s", event->fields[i]->name);
661 /* return the length of print_fmt */
665 static int set_synth_event_print_fmt(struct trace_event_call *call)
667 struct synth_event *event = call->data;
671 /* First: called with 0 length to calculate the needed length */
672 len = __set_synth_event_print_fmt(event, NULL, 0);
674 print_fmt = kmalloc(len + 1, GFP_KERNEL);
678 /* Second: actually write the @print_fmt */
679 __set_synth_event_print_fmt(event, print_fmt, len + 1);
680 call->print_fmt = print_fmt;
685 static void free_synth_field(struct synth_field *field)
692 static int check_field_version(const char *prefix, const char *field_type,
693 const char *field_name)
696 * For backward compatibility, the old synthetic event command
697 * format did not require semicolons, and in order to not
698 * break user space, that old format must still work. If a new
699 * feature is added, then the format that uses the new feature
700 * will be required to have semicolons, as nothing that uses
701 * the old format would be using the new, yet to be created,
702 * feature. When a new feature is added, this will detect it,
703 * and return a number greater than 1, and require the format
709 static struct synth_field *parse_synth_field(int argc, char **argv,
710 int *consumed, int *field_version)
712 const char *prefix = NULL, *field_type = argv[0], *field_name, *array;
713 struct synth_field *field;
714 int len, ret = -ENOMEM;
718 if (!strcmp(field_type, "unsigned")) {
720 synth_err(SYNTH_ERR_INCOMPLETE_TYPE, errpos(field_type));
721 return ERR_PTR(-EINVAL);
723 prefix = "unsigned ";
724 field_type = argv[1];
725 field_name = argv[2];
728 field_name = argv[1];
733 synth_err(SYNTH_ERR_INVALID_FIELD, errpos(field_type));
734 return ERR_PTR(-EINVAL);
737 *field_version = check_field_version(prefix, field_type, field_name);
739 field = kzalloc(sizeof(*field), GFP_KERNEL);
741 return ERR_PTR(-ENOMEM);
743 len = strlen(field_name);
744 array = strchr(field_name, '[');
746 len -= strlen(array);
748 field->name = kmemdup_nul(field_name, len, GFP_KERNEL);
752 if (!is_good_name(field->name)) {
753 synth_err(SYNTH_ERR_BAD_NAME, errpos(field_name));
758 len = strlen(field_type) + 1;
761 len += strlen(array);
764 len += strlen(prefix);
766 field->type = kzalloc(len, GFP_KERNEL);
770 seq_buf_init(&s, field->type, len);
772 seq_buf_puts(&s, prefix);
773 seq_buf_puts(&s, field_type);
775 seq_buf_puts(&s, array);
776 if (WARN_ON_ONCE(!seq_buf_buffer_left(&s)))
779 s.buffer[s.len] = '\0';
781 size = synth_field_size(field->type);
784 synth_err(SYNTH_ERR_INVALID_ARRAY_SPEC, errpos(field_name));
786 synth_err(SYNTH_ERR_INVALID_TYPE, errpos(field_type));
789 } else if (size == 0) {
790 if (synth_field_is_string(field->type) ||
791 synth_field_is_stack(field->type)) {
794 len = sizeof("__data_loc ") + strlen(field->type) + 1;
795 type = kzalloc(len, GFP_KERNEL);
799 seq_buf_init(&s, type, len);
800 seq_buf_puts(&s, "__data_loc ");
801 seq_buf_puts(&s, field->type);
803 if (WARN_ON_ONCE(!seq_buf_buffer_left(&s)))
805 s.buffer[s.len] = '\0';
810 field->is_dynamic = true;
813 synth_err(SYNTH_ERR_INVALID_TYPE, errpos(field_type));
820 if (synth_field_is_string(field->type))
821 field->is_string = true;
822 else if (synth_field_is_stack(field->type))
823 field->is_stack = true;
825 field->is_signed = synth_field_signed(field->type);
829 free_synth_field(field);
830 field = ERR_PTR(ret);
834 static void free_synth_tracepoint(struct tracepoint *tp)
843 static struct tracepoint *alloc_synth_tracepoint(char *name)
845 struct tracepoint *tp;
847 tp = kzalloc(sizeof(*tp), GFP_KERNEL);
849 return ERR_PTR(-ENOMEM);
851 tp->name = kstrdup(name, GFP_KERNEL);
854 return ERR_PTR(-ENOMEM);
860 struct synth_event *find_synth_event(const char *name)
862 struct dyn_event *pos;
863 struct synth_event *event;
865 for_each_dyn_event(pos) {
866 if (!is_synth_event(pos))
868 event = to_synth_event(pos);
869 if (strcmp(event->name, name) == 0)
876 static struct trace_event_fields synth_event_fields_array[] = {
877 { .type = TRACE_FUNCTION_TYPE,
878 .define_fields = synth_event_define_fields },
882 static int register_synth_event(struct synth_event *event)
884 struct trace_event_call *call = &event->call;
887 event->call.class = &event->class;
888 event->class.system = kstrdup(SYNTH_SYSTEM, GFP_KERNEL);
889 if (!event->class.system) {
894 event->tp = alloc_synth_tracepoint(event->name);
895 if (IS_ERR(event->tp)) {
896 ret = PTR_ERR(event->tp);
901 INIT_LIST_HEAD(&call->class->fields);
902 call->event.funcs = &synth_event_funcs;
903 call->class->fields_array = synth_event_fields_array;
905 ret = register_trace_event(&call->event);
910 call->flags = TRACE_EVENT_FL_TRACEPOINT;
911 call->class->reg = trace_event_reg;
912 call->class->probe = trace_event_raw_event_synth;
914 call->tp = event->tp;
916 ret = trace_add_event_call(call);
918 pr_warn("Failed to register synthetic event: %s\n",
919 trace_event_name(call));
923 ret = set_synth_event_print_fmt(call);
924 /* unregister_trace_event() will be called inside */
926 trace_remove_event_call(call);
930 unregister_trace_event(&call->event);
934 static int unregister_synth_event(struct synth_event *event)
936 struct trace_event_call *call = &event->call;
939 ret = trace_remove_event_call(call);
944 static void free_synth_event(struct synth_event *event)
951 for (i = 0; i < event->n_fields; i++)
952 free_synth_field(event->fields[i]);
954 kfree(event->fields);
955 kfree(event->dynamic_fields);
957 kfree(event->class.system);
958 free_synth_tracepoint(event->tp);
959 free_synth_event_print_fmt(&event->call);
963 static struct synth_event *alloc_synth_event(const char *name, int n_fields,
964 struct synth_field **fields)
966 unsigned int i, j, n_dynamic_fields = 0;
967 struct synth_event *event;
969 event = kzalloc(sizeof(*event), GFP_KERNEL);
971 event = ERR_PTR(-ENOMEM);
975 event->name = kstrdup(name, GFP_KERNEL);
978 event = ERR_PTR(-ENOMEM);
982 event->fields = kcalloc(n_fields, sizeof(*event->fields), GFP_KERNEL);
983 if (!event->fields) {
984 free_synth_event(event);
985 event = ERR_PTR(-ENOMEM);
989 for (i = 0; i < n_fields; i++)
990 if (fields[i]->is_dynamic)
993 if (n_dynamic_fields) {
994 event->dynamic_fields = kcalloc(n_dynamic_fields,
995 sizeof(*event->dynamic_fields),
997 if (!event->dynamic_fields) {
998 free_synth_event(event);
999 event = ERR_PTR(-ENOMEM);
1004 dyn_event_init(&event->devent, &synth_event_ops);
1006 for (i = 0, j = 0; i < n_fields; i++) {
1007 fields[i]->field_pos = i;
1008 event->fields[i] = fields[i];
1010 if (fields[i]->is_dynamic)
1011 event->dynamic_fields[j++] = fields[i];
1013 event->n_dynamic_fields = j;
1014 event->n_fields = n_fields;
1019 static int synth_event_check_arg_fn(void *data)
1021 struct dynevent_arg_pair *arg_pair = data;
1024 size = synth_field_size((char *)arg_pair->lhs);
1026 if (strstr((char *)arg_pair->lhs, "["))
1030 return size ? 0 : -EINVAL;
1034 * synth_event_add_field - Add a new field to a synthetic event cmd
1035 * @cmd: A pointer to the dynevent_cmd struct representing the new event
1036 * @type: The type of the new field to add
1037 * @name: The name of the new field to add
1039 * Add a new field to a synthetic event cmd object. Field ordering is in
1040 * the same order the fields are added.
1042 * See synth_field_size() for available types. If field_name contains
1043 * [n] the field is considered to be an array.
1045 * Return: 0 if successful, error otherwise.
1047 int synth_event_add_field(struct dynevent_cmd *cmd, const char *type,
1050 struct dynevent_arg_pair arg_pair;
1053 if (cmd->type != DYNEVENT_TYPE_SYNTH)
1059 dynevent_arg_pair_init(&arg_pair, 0, ';');
1061 arg_pair.lhs = type;
1062 arg_pair.rhs = name;
1064 ret = dynevent_arg_pair_add(cmd, &arg_pair, synth_event_check_arg_fn);
1068 if (++cmd->n_fields > SYNTH_FIELDS_MAX)
1073 EXPORT_SYMBOL_GPL(synth_event_add_field);
1076 * synth_event_add_field_str - Add a new field to a synthetic event cmd
1077 * @cmd: A pointer to the dynevent_cmd struct representing the new event
1078 * @type_name: The type and name of the new field to add, as a single string
1080 * Add a new field to a synthetic event cmd object, as a single
1081 * string. The @type_name string is expected to be of the form 'type
1082 * name', which will be appended by ';'. No sanity checking is done -
1083 * what's passed in is assumed to already be well-formed. Field
1084 * ordering is in the same order the fields are added.
1086 * See synth_field_size() for available types. If field_name contains
1087 * [n] the field is considered to be an array.
1089 * Return: 0 if successful, error otherwise.
1091 int synth_event_add_field_str(struct dynevent_cmd *cmd, const char *type_name)
1093 struct dynevent_arg arg;
1096 if (cmd->type != DYNEVENT_TYPE_SYNTH)
1102 dynevent_arg_init(&arg, ';');
1104 arg.str = type_name;
1106 ret = dynevent_arg_add(cmd, &arg, NULL);
1110 if (++cmd->n_fields > SYNTH_FIELDS_MAX)
1115 EXPORT_SYMBOL_GPL(synth_event_add_field_str);
1118 * synth_event_add_fields - Add multiple fields to a synthetic event cmd
1119 * @cmd: A pointer to the dynevent_cmd struct representing the new event
1120 * @fields: An array of type/name field descriptions
1121 * @n_fields: The number of field descriptions contained in the fields array
1123 * Add a new set of fields to a synthetic event cmd object. The event
1124 * fields that will be defined for the event should be passed in as an
1125 * array of struct synth_field_desc, and the number of elements in the
1126 * array passed in as n_fields. Field ordering will retain the
1127 * ordering given in the fields array.
1129 * See synth_field_size() for available types. If field_name contains
1130 * [n] the field is considered to be an array.
1132 * Return: 0 if successful, error otherwise.
1134 int synth_event_add_fields(struct dynevent_cmd *cmd,
1135 struct synth_field_desc *fields,
1136 unsigned int n_fields)
1141 for (i = 0; i < n_fields; i++) {
1142 if (fields[i].type == NULL || fields[i].name == NULL) {
1147 ret = synth_event_add_field(cmd, fields[i].type, fields[i].name);
1154 EXPORT_SYMBOL_GPL(synth_event_add_fields);
1157 * __synth_event_gen_cmd_start - Start a synthetic event command from arg list
1158 * @cmd: A pointer to the dynevent_cmd struct representing the new event
1159 * @name: The name of the synthetic event
1160 * @mod: The module creating the event, NULL if not created from a module
1161 * @args: Variable number of arg (pairs), one pair for each field
1163 * NOTE: Users normally won't want to call this function directly, but
1164 * rather use the synth_event_gen_cmd_start() wrapper, which
1165 * automatically adds a NULL to the end of the arg list. If this
1166 * function is used directly, make sure the last arg in the variable
1169 * Generate a synthetic event command to be executed by
1170 * synth_event_gen_cmd_end(). This function can be used to generate
1171 * the complete command or only the first part of it; in the latter
1172 * case, synth_event_add_field(), synth_event_add_field_str(), or
1173 * synth_event_add_fields() can be used to add more fields following
1176 * There should be an even number variable args, each pair consisting
1177 * of a type followed by a field name.
1179 * See synth_field_size() for available types. If field_name contains
1180 * [n] the field is considered to be an array.
1182 * Return: 0 if successful, error otherwise.
1184 int __synth_event_gen_cmd_start(struct dynevent_cmd *cmd, const char *name,
1185 struct module *mod, ...)
1187 struct dynevent_arg arg;
1191 cmd->event_name = name;
1192 cmd->private_data = mod;
1194 if (cmd->type != DYNEVENT_TYPE_SYNTH)
1197 dynevent_arg_init(&arg, 0);
1199 ret = dynevent_arg_add(cmd, &arg, NULL);
1203 va_start(args, mod);
1205 const char *type, *name;
1207 type = va_arg(args, const char *);
1210 name = va_arg(args, const char *);
1214 if (++cmd->n_fields > SYNTH_FIELDS_MAX) {
1219 ret = synth_event_add_field(cmd, type, name);
1227 EXPORT_SYMBOL_GPL(__synth_event_gen_cmd_start);
1230 * synth_event_gen_cmd_array_start - Start synthetic event command from an array
1231 * @cmd: A pointer to the dynevent_cmd struct representing the new event
1232 * @name: The name of the synthetic event
1233 * @mod: The module creating the event, NULL if not created from a module
1234 * @fields: An array of type/name field descriptions
1235 * @n_fields: The number of field descriptions contained in the fields array
1237 * Generate a synthetic event command to be executed by
1238 * synth_event_gen_cmd_end(). This function can be used to generate
1239 * the complete command or only the first part of it; in the latter
1240 * case, synth_event_add_field(), synth_event_add_field_str(), or
1241 * synth_event_add_fields() can be used to add more fields following
1244 * The event fields that will be defined for the event should be
1245 * passed in as an array of struct synth_field_desc, and the number of
1246 * elements in the array passed in as n_fields. Field ordering will
1247 * retain the ordering given in the fields array.
1249 * See synth_field_size() for available types. If field_name contains
1250 * [n] the field is considered to be an array.
1252 * Return: 0 if successful, error otherwise.
1254 int synth_event_gen_cmd_array_start(struct dynevent_cmd *cmd, const char *name,
1256 struct synth_field_desc *fields,
1257 unsigned int n_fields)
1259 struct dynevent_arg arg;
1263 cmd->event_name = name;
1264 cmd->private_data = mod;
1266 if (cmd->type != DYNEVENT_TYPE_SYNTH)
1269 if (n_fields > SYNTH_FIELDS_MAX)
1272 dynevent_arg_init(&arg, 0);
1274 ret = dynevent_arg_add(cmd, &arg, NULL);
1278 for (i = 0; i < n_fields; i++) {
1279 if (fields[i].type == NULL || fields[i].name == NULL)
1282 ret = synth_event_add_field(cmd, fields[i].type, fields[i].name);
1289 EXPORT_SYMBOL_GPL(synth_event_gen_cmd_array_start);
1291 static int __create_synth_event(const char *name, const char *raw_fields)
1293 char **argv, *field_str, *tmp_fields, *saved_fields = NULL;
1294 struct synth_field *field, *fields[SYNTH_FIELDS_MAX];
1295 int consumed, cmd_version = 1, n_fields_this_loop;
1296 int i, argc, n_fields = 0, ret = 0;
1297 struct synth_event *event = NULL;
1301 * - Add synthetic event: <event_name> field[;field] ...
1302 * - Remove synthetic event: !<event_name> field[;field] ...
1303 * where 'field' = type field_name
1306 if (name[0] == '\0') {
1307 synth_err(SYNTH_ERR_INVALID_CMD, 0);
1311 if (!is_good_name(name)) {
1312 synth_err(SYNTH_ERR_BAD_NAME, errpos(name));
1316 mutex_lock(&event_mutex);
1318 event = find_synth_event(name);
1320 synth_err(SYNTH_ERR_EVENT_EXISTS, errpos(name));
1325 tmp_fields = saved_fields = kstrdup(raw_fields, GFP_KERNEL);
1331 while ((field_str = strsep(&tmp_fields, ";")) != NULL) {
1332 argv = argv_split(GFP_KERNEL, field_str, &argc);
1343 n_fields_this_loop = 0;
1345 while (argc > consumed) {
1348 field = parse_synth_field(argc - consumed,
1349 argv + consumed, &consumed,
1351 if (IS_ERR(field)) {
1352 ret = PTR_ERR(field);
1357 * Track the highest version of any field we
1358 * found in the command.
1360 if (field_version > cmd_version)
1361 cmd_version = field_version;
1364 * Now sort out what is and isn't valid for
1365 * each supported version.
1367 * If we see more than 1 field per loop, it
1368 * means we have multiple fields between
1369 * semicolons, and that's something we no
1370 * longer support in a version 2 or greater
1373 if (cmd_version > 1 && n_fields_this_loop >= 1) {
1374 synth_err(SYNTH_ERR_INVALID_CMD, errpos(field_str));
1379 if (n_fields == SYNTH_FIELDS_MAX) {
1380 synth_err(SYNTH_ERR_TOO_MANY_FIELDS, 0);
1384 fields[n_fields++] = field;
1386 n_fields_this_loop++;
1390 if (consumed < argc) {
1391 synth_err(SYNTH_ERR_INVALID_CMD, 0);
1398 if (n_fields == 0) {
1399 synth_err(SYNTH_ERR_INVALID_CMD, 0);
1404 event = alloc_synth_event(name, n_fields, fields);
1405 if (IS_ERR(event)) {
1406 ret = PTR_ERR(event);
1410 ret = register_synth_event(event);
1412 dyn_event_add(&event->devent, &event->call);
1414 free_synth_event(event);
1416 mutex_unlock(&event_mutex);
1418 kfree(saved_fields);
1424 for (i = 0; i < n_fields; i++)
1425 free_synth_field(fields[i]);
1431 * synth_event_create - Create a new synthetic event
1432 * @name: The name of the new synthetic event
1433 * @fields: An array of type/name field descriptions
1434 * @n_fields: The number of field descriptions contained in the fields array
1435 * @mod: The module creating the event, NULL if not created from a module
1437 * Create a new synthetic event with the given name under the
1438 * trace/events/synthetic/ directory. The event fields that will be
1439 * defined for the event should be passed in as an array of struct
1440 * synth_field_desc, and the number elements in the array passed in as
1441 * n_fields. Field ordering will retain the ordering given in the
1444 * If the new synthetic event is being created from a module, the mod
1445 * param must be non-NULL. This will ensure that the trace buffer
1446 * won't contain unreadable events.
1448 * The new synth event should be deleted using synth_event_delete()
1449 * function. The new synthetic event can be generated from modules or
1450 * other kernel code using trace_synth_event() and related functions.
1452 * Return: 0 if successful, error otherwise.
1454 int synth_event_create(const char *name, struct synth_field_desc *fields,
1455 unsigned int n_fields, struct module *mod)
1457 struct dynevent_cmd cmd;
1461 buf = kzalloc(MAX_DYNEVENT_CMD_LEN, GFP_KERNEL);
1465 synth_event_cmd_init(&cmd, buf, MAX_DYNEVENT_CMD_LEN);
1467 ret = synth_event_gen_cmd_array_start(&cmd, name, mod,
1472 ret = synth_event_gen_cmd_end(&cmd);
1478 EXPORT_SYMBOL_GPL(synth_event_create);
1480 static int destroy_synth_event(struct synth_event *se)
1487 if (trace_event_dyn_busy(&se->call))
1490 ret = unregister_synth_event(se);
1492 dyn_event_remove(&se->devent);
1493 free_synth_event(se);
1500 * synth_event_delete - Delete a synthetic event
1501 * @event_name: The name of the new synthetic event
1503 * Delete a synthetic event that was created with synth_event_create().
1505 * Return: 0 if successful, error otherwise.
1507 int synth_event_delete(const char *event_name)
1509 struct synth_event *se = NULL;
1510 struct module *mod = NULL;
1513 mutex_lock(&event_mutex);
1514 se = find_synth_event(event_name);
1517 ret = destroy_synth_event(se);
1519 mutex_unlock(&event_mutex);
1523 * It is safest to reset the ring buffer if the module
1524 * being unloaded registered any events that were
1525 * used. The only worry is if a new module gets
1526 * loaded, and takes on the same id as the events of
1527 * this module. When printing out the buffer, traced
1528 * events left over from this module may be passed to
1529 * the new module events and unexpected results may
1532 tracing_reset_all_online_cpus();
1537 EXPORT_SYMBOL_GPL(synth_event_delete);
1539 static int check_command(const char *raw_command)
1541 char **argv = NULL, *cmd, *saved_cmd, *name_and_field;
1544 cmd = saved_cmd = kstrdup(raw_command, GFP_KERNEL);
1548 name_and_field = strsep(&cmd, ";");
1549 if (!name_and_field) {
1554 if (name_and_field[0] == '!')
1557 argv = argv_split(GFP_KERNEL, name_and_field, &argc);
1572 static int create_or_delete_synth_event(const char *raw_command)
1574 char *name = NULL, *fields, *p;
1577 raw_command = skip_spaces(raw_command);
1578 if (raw_command[0] == '\0')
1581 last_cmd_set(raw_command);
1583 ret = check_command(raw_command);
1585 synth_err(SYNTH_ERR_INVALID_CMD, 0);
1589 p = strpbrk(raw_command, " \t");
1590 if (!p && raw_command[0] != '!') {
1591 synth_err(SYNTH_ERR_INVALID_CMD, 0);
1596 name = kmemdup_nul(raw_command, p ? p - raw_command : strlen(raw_command), GFP_KERNEL);
1600 if (name[0] == '!') {
1601 ret = synth_event_delete(name + 1);
1605 fields = skip_spaces(p);
1607 ret = __create_synth_event(name, fields);
1614 static int synth_event_run_command(struct dynevent_cmd *cmd)
1616 struct synth_event *se;
1619 ret = create_or_delete_synth_event(cmd->seq.buffer);
1623 se = find_synth_event(cmd->event_name);
1627 se->mod = cmd->private_data;
1633 * synth_event_cmd_init - Initialize a synthetic event command object
1634 * @cmd: A pointer to the dynevent_cmd struct representing the new event
1635 * @buf: A pointer to the buffer used to build the command
1636 * @maxlen: The length of the buffer passed in @buf
1638 * Initialize a synthetic event command object. Use this before
1639 * calling any of the other dyenvent_cmd functions.
1641 void synth_event_cmd_init(struct dynevent_cmd *cmd, char *buf, int maxlen)
1643 dynevent_cmd_init(cmd, buf, maxlen, DYNEVENT_TYPE_SYNTH,
1644 synth_event_run_command);
1646 EXPORT_SYMBOL_GPL(synth_event_cmd_init);
1649 __synth_event_trace_init(struct trace_event_file *file,
1650 struct synth_event_trace_state *trace_state)
1654 memset(trace_state, '\0', sizeof(*trace_state));
1657 * Normal event tracing doesn't get called at all unless the
1658 * ENABLED bit is set (which attaches the probe thus allowing
1659 * this code to be called, etc). Because this is called
1660 * directly by the user, we don't have that but we still need
1661 * to honor not logging when disabled. For the iterated
1662 * trace case, we save the enabled state upon start and just
1663 * ignore the following data calls.
1665 if (!(file->flags & EVENT_FILE_FL_ENABLED) ||
1666 trace_trigger_soft_disabled(file)) {
1667 trace_state->disabled = true;
1672 trace_state->event = file->event_call->data;
1678 __synth_event_trace_start(struct trace_event_file *file,
1679 struct synth_event_trace_state *trace_state,
1680 int dynamic_fields_size)
1682 int entry_size, fields_size = 0;
1685 fields_size = trace_state->event->n_u64 * sizeof(u64);
1686 fields_size += dynamic_fields_size;
1689 * Avoid ring buffer recursion detection, as this event
1690 * is being performed within another event.
1692 trace_state->buffer = file->tr->array_buffer.buffer;
1693 ring_buffer_nest_start(trace_state->buffer);
1695 entry_size = sizeof(*trace_state->entry) + fields_size;
1696 trace_state->entry = trace_event_buffer_reserve(&trace_state->fbuffer,
1699 if (!trace_state->entry) {
1700 ring_buffer_nest_end(trace_state->buffer);
1708 __synth_event_trace_end(struct synth_event_trace_state *trace_state)
1710 trace_event_buffer_commit(&trace_state->fbuffer);
1712 ring_buffer_nest_end(trace_state->buffer);
1716 * synth_event_trace - Trace a synthetic event
1717 * @file: The trace_event_file representing the synthetic event
1718 * @n_vals: The number of values in vals
1719 * @args: Variable number of args containing the event values
1721 * Trace a synthetic event using the values passed in the variable
1724 * The argument list should be a list 'n_vals' u64 values. The number
1725 * of vals must match the number of field in the synthetic event, and
1726 * must be in the same order as the synthetic event fields.
1728 * All vals should be cast to u64, and string vals are just pointers
1729 * to strings, cast to u64. Strings will be copied into space
1730 * reserved in the event for the string, using these pointers.
1732 * Return: 0 on success, err otherwise.
1734 int synth_event_trace(struct trace_event_file *file, unsigned int n_vals, ...)
1736 unsigned int i, n_u64, len, data_size = 0;
1737 struct synth_event_trace_state state;
1741 ret = __synth_event_trace_init(file, &state);
1744 ret = 0; /* just disabled, not really an error */
1748 if (state.event->n_dynamic_fields) {
1749 va_start(args, n_vals);
1751 for (i = 0; i < state.event->n_fields; i++) {
1752 u64 val = va_arg(args, u64);
1754 if (state.event->fields[i]->is_string &&
1755 state.event->fields[i]->is_dynamic) {
1756 char *str_val = (char *)(long)val;
1758 data_size += strlen(str_val) + 1;
1765 ret = __synth_event_trace_start(file, &state, data_size);
1769 if (n_vals != state.event->n_fields) {
1776 va_start(args, n_vals);
1777 for (i = 0, n_u64 = 0; i < state.event->n_fields; i++) {
1780 val = va_arg(args, u64);
1782 if (state.event->fields[i]->is_string) {
1783 char *str_val = (char *)(long)val;
1785 len = trace_string(state.entry, state.event, str_val,
1786 state.event->fields[i]->is_dynamic,
1788 data_size += len; /* only dynamic string increments */
1790 struct synth_field *field = state.event->fields[i];
1792 switch (field->size) {
1794 *(u8 *)&state.entry->fields[n_u64] = (u8)val;
1798 *(u16 *)&state.entry->fields[n_u64] = (u16)val;
1802 *(u32 *)&state.entry->fields[n_u64] = (u32)val;
1806 state.entry->fields[n_u64] = val;
1814 __synth_event_trace_end(&state);
1818 EXPORT_SYMBOL_GPL(synth_event_trace);
1821 * synth_event_trace_array - Trace a synthetic event from an array
1822 * @file: The trace_event_file representing the synthetic event
1823 * @vals: Array of values
1824 * @n_vals: The number of values in vals
1826 * Trace a synthetic event using the values passed in as 'vals'.
1828 * The 'vals' array is just an array of 'n_vals' u64. The number of
1829 * vals must match the number of field in the synthetic event, and
1830 * must be in the same order as the synthetic event fields.
1832 * All vals should be cast to u64, and string vals are just pointers
1833 * to strings, cast to u64. Strings will be copied into space
1834 * reserved in the event for the string, using these pointers.
1836 * Return: 0 on success, err otherwise.
1838 int synth_event_trace_array(struct trace_event_file *file, u64 *vals,
1839 unsigned int n_vals)
1841 unsigned int i, n_u64, field_pos, len, data_size = 0;
1842 struct synth_event_trace_state state;
1846 ret = __synth_event_trace_init(file, &state);
1849 ret = 0; /* just disabled, not really an error */
1853 if (state.event->n_dynamic_fields) {
1854 for (i = 0; i < state.event->n_dynamic_fields; i++) {
1855 field_pos = state.event->dynamic_fields[i]->field_pos;
1856 str_val = (char *)(long)vals[field_pos];
1857 len = strlen(str_val) + 1;
1862 ret = __synth_event_trace_start(file, &state, data_size);
1866 if (n_vals != state.event->n_fields) {
1873 for (i = 0, n_u64 = 0; i < state.event->n_fields; i++) {
1874 if (state.event->fields[i]->is_string) {
1875 char *str_val = (char *)(long)vals[i];
1877 len = trace_string(state.entry, state.event, str_val,
1878 state.event->fields[i]->is_dynamic,
1880 data_size += len; /* only dynamic string increments */
1882 struct synth_field *field = state.event->fields[i];
1885 switch (field->size) {
1887 *(u8 *)&state.entry->fields[n_u64] = (u8)val;
1891 *(u16 *)&state.entry->fields[n_u64] = (u16)val;
1895 *(u32 *)&state.entry->fields[n_u64] = (u32)val;
1899 state.entry->fields[n_u64] = val;
1906 __synth_event_trace_end(&state);
1910 EXPORT_SYMBOL_GPL(synth_event_trace_array);
1913 * synth_event_trace_start - Start piecewise synthetic event trace
1914 * @file: The trace_event_file representing the synthetic event
1915 * @trace_state: A pointer to object tracking the piecewise trace state
1917 * Start the trace of a synthetic event field-by-field rather than all
1920 * This function 'opens' an event trace, which means space is reserved
1921 * for the event in the trace buffer, after which the event's
1922 * individual field values can be set through either
1923 * synth_event_add_next_val() or synth_event_add_val().
1925 * A pointer to a trace_state object is passed in, which will keep
1926 * track of the current event trace state until the event trace is
1927 * closed (and the event finally traced) using
1928 * synth_event_trace_end().
1930 * Note that synth_event_trace_end() must be called after all values
1931 * have been added for each event trace, regardless of whether adding
1932 * all field values succeeded or not.
1934 * Note also that for a given event trace, all fields must be added
1935 * using either synth_event_add_next_val() or synth_event_add_val()
1936 * but not both together or interleaved.
1938 * Return: 0 on success, err otherwise.
1940 int synth_event_trace_start(struct trace_event_file *file,
1941 struct synth_event_trace_state *trace_state)
1948 ret = __synth_event_trace_init(file, trace_state);
1951 ret = 0; /* just disabled, not really an error */
1955 if (trace_state->event->n_dynamic_fields)
1958 ret = __synth_event_trace_start(file, trace_state, 0);
1962 EXPORT_SYMBOL_GPL(synth_event_trace_start);
1964 static int __synth_event_add_val(const char *field_name, u64 val,
1965 struct synth_event_trace_state *trace_state)
1967 struct synth_field *field = NULL;
1968 struct synth_trace_event *entry;
1969 struct synth_event *event;
1977 /* can't mix add_next_synth_val() with add_synth_val() */
1979 if (trace_state->add_next) {
1983 trace_state->add_name = true;
1985 if (trace_state->add_name) {
1989 trace_state->add_next = true;
1992 if (trace_state->disabled)
1995 event = trace_state->event;
1996 if (trace_state->add_name) {
1997 for (i = 0; i < event->n_fields; i++) {
1998 field = event->fields[i];
1999 if (strcmp(field->name, field_name) == 0)
2007 if (trace_state->cur_field >= event->n_fields) {
2011 field = event->fields[trace_state->cur_field++];
2014 entry = trace_state->entry;
2015 if (field->is_string) {
2016 char *str_val = (char *)(long)val;
2019 if (field->is_dynamic) { /* add_val can't do dynamic strings */
2029 str_field = (char *)&entry->fields[field->offset];
2030 strscpy(str_field, str_val, STR_VAR_LEN_MAX);
2032 switch (field->size) {
2034 *(u8 *)&trace_state->entry->fields[field->offset] = (u8)val;
2038 *(u16 *)&trace_state->entry->fields[field->offset] = (u16)val;
2042 *(u32 *)&trace_state->entry->fields[field->offset] = (u32)val;
2046 trace_state->entry->fields[field->offset] = val;
2055 * synth_event_add_next_val - Add the next field's value to an open synth trace
2056 * @val: The value to set the next field to
2057 * @trace_state: A pointer to object tracking the piecewise trace state
2059 * Set the value of the next field in an event that's been opened by
2060 * synth_event_trace_start().
2062 * The val param should be the value cast to u64. If the value points
2063 * to a string, the val param should be a char * cast to u64.
2065 * This function assumes all the fields in an event are to be set one
2066 * after another - successive calls to this function are made, one for
2067 * each field, in the order of the fields in the event, until all
2068 * fields have been set. If you'd rather set each field individually
2069 * without regard to ordering, synth_event_add_val() can be used
2072 * Note however that synth_event_add_next_val() and
2073 * synth_event_add_val() can't be intermixed for a given event trace -
2074 * one or the other but not both can be used at the same time.
2076 * Note also that synth_event_trace_end() must be called after all
2077 * values have been added for each event trace, regardless of whether
2078 * adding all field values succeeded or not.
2080 * Return: 0 on success, err otherwise.
2082 int synth_event_add_next_val(u64 val,
2083 struct synth_event_trace_state *trace_state)
2085 return __synth_event_add_val(NULL, val, trace_state);
2087 EXPORT_SYMBOL_GPL(synth_event_add_next_val);
2090 * synth_event_add_val - Add a named field's value to an open synth trace
2091 * @field_name: The name of the synthetic event field value to set
2092 * @val: The value to set the named field to
2093 * @trace_state: A pointer to object tracking the piecewise trace state
2095 * Set the value of the named field in an event that's been opened by
2096 * synth_event_trace_start().
2098 * The val param should be the value cast to u64. If the value points
2099 * to a string, the val param should be a char * cast to u64.
2101 * This function looks up the field name, and if found, sets the field
2102 * to the specified value. This lookup makes this function more
2103 * expensive than synth_event_add_next_val(), so use that or the
2104 * none-piecewise synth_event_trace() instead if efficiency is more
2107 * Note however that synth_event_add_next_val() and
2108 * synth_event_add_val() can't be intermixed for a given event trace -
2109 * one or the other but not both can be used at the same time.
2111 * Note also that synth_event_trace_end() must be called after all
2112 * values have been added for each event trace, regardless of whether
2113 * adding all field values succeeded or not.
2115 * Return: 0 on success, err otherwise.
2117 int synth_event_add_val(const char *field_name, u64 val,
2118 struct synth_event_trace_state *trace_state)
2120 return __synth_event_add_val(field_name, val, trace_state);
2122 EXPORT_SYMBOL_GPL(synth_event_add_val);
2125 * synth_event_trace_end - End piecewise synthetic event trace
2126 * @trace_state: A pointer to object tracking the piecewise trace state
2128 * End the trace of a synthetic event opened by
2129 * synth_event_trace__start().
2131 * This function 'closes' an event trace, which basically means that
2132 * it commits the reserved event and cleans up other loose ends.
2134 * A pointer to a trace_state object is passed in, which will keep
2135 * track of the current event trace state opened with
2136 * synth_event_trace_start().
2138 * Note that this function must be called after all values have been
2139 * added for each event trace, regardless of whether adding all field
2140 * values succeeded or not.
2142 * Return: 0 on success, err otherwise.
2144 int synth_event_trace_end(struct synth_event_trace_state *trace_state)
2149 __synth_event_trace_end(trace_state);
2153 EXPORT_SYMBOL_GPL(synth_event_trace_end);
2155 static int create_synth_event(const char *raw_command)
2161 raw_command = skip_spaces(raw_command);
2162 if (raw_command[0] == '\0')
2165 last_cmd_set(raw_command);
2169 /* Don't try to process if not our system */
2170 if (name[0] != 's' || name[1] != ':')
2174 p = strpbrk(raw_command, " \t");
2176 synth_err(SYNTH_ERR_INVALID_CMD, 0);
2180 fields = skip_spaces(p);
2182 /* This interface accepts group name prefix */
2183 if (strchr(name, '/')) {
2184 len = str_has_prefix(name, SYNTH_SYSTEM "/");
2186 synth_err(SYNTH_ERR_INVALID_DYN_CMD, 0);
2192 len = name - raw_command;
2194 ret = check_command(raw_command + len);
2196 synth_err(SYNTH_ERR_INVALID_CMD, 0);
2200 name = kmemdup_nul(raw_command + len, p - raw_command - len, GFP_KERNEL);
2204 ret = __create_synth_event(name, fields);
2211 static int synth_event_release(struct dyn_event *ev)
2213 struct synth_event *event = to_synth_event(ev);
2219 if (trace_event_dyn_busy(&event->call))
2222 ret = unregister_synth_event(event);
2226 dyn_event_remove(ev);
2227 free_synth_event(event);
2231 static int __synth_event_show(struct seq_file *m, struct synth_event *event)
2233 struct synth_field *field;
2237 seq_printf(m, "%s\t", event->name);
2239 for (i = 0; i < event->n_fields; i++) {
2240 field = event->fields[i];
2243 t = strstr(type, "__data_loc");
2244 if (t) { /* __data_loc belongs in format but not event desc */
2245 t += sizeof("__data_loc");
2249 /* parameter values */
2250 seq_printf(m, "%s %s%s", type, field->name,
2251 i == event->n_fields - 1 ? "" : "; ");
2259 static int synth_event_show(struct seq_file *m, struct dyn_event *ev)
2261 struct synth_event *event = to_synth_event(ev);
2263 seq_printf(m, "s:%s/", event->class.system);
2265 return __synth_event_show(m, event);
2268 static int synth_events_seq_show(struct seq_file *m, void *v)
2270 struct dyn_event *ev = v;
2272 if (!is_synth_event(ev))
2275 return __synth_event_show(m, to_synth_event(ev));
2278 static const struct seq_operations synth_events_seq_op = {
2279 .start = dyn_event_seq_start,
2280 .next = dyn_event_seq_next,
2281 .stop = dyn_event_seq_stop,
2282 .show = synth_events_seq_show,
2285 static int synth_events_open(struct inode *inode, struct file *file)
2289 ret = security_locked_down(LOCKDOWN_TRACEFS);
2293 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
2294 ret = dyn_events_release_all(&synth_event_ops);
2299 return seq_open(file, &synth_events_seq_op);
2302 static ssize_t synth_events_write(struct file *file,
2303 const char __user *buffer,
2304 size_t count, loff_t *ppos)
2306 return trace_parse_run_command(file, buffer, count, ppos,
2307 create_or_delete_synth_event);
2310 static const struct file_operations synth_events_fops = {
2311 .open = synth_events_open,
2312 .write = synth_events_write,
2314 .llseek = seq_lseek,
2315 .release = seq_release,
2319 * Register dynevent at core_initcall. This allows kernel to setup kprobe
2320 * events in postcore_initcall without tracefs.
2322 static __init int trace_events_synth_init_early(void)
2326 err = dyn_event_register(&synth_event_ops);
2328 pr_warn("Could not register synth_event_ops\n");
2332 core_initcall(trace_events_synth_init_early);
2334 static __init int trace_events_synth_init(void)
2336 struct dentry *entry = NULL;
2338 err = tracing_init_dentry();
2342 entry = tracefs_create_file("synthetic_events", TRACE_MODE_WRITE,
2343 NULL, NULL, &synth_events_fops);
2351 pr_warn("Could not create tracefs 'synthetic_events' entry\n");
2356 fs_initcall(trace_events_synth_init);