1 // SPDX-License-Identifier: GPL-2.0
3 * trace_events_synth - synthetic trace events
5 * Copyright (C) 2015, 2020 Tom Zanussi <tom.zanussi@linux.intel.com>
8 #include <linux/module.h>
9 #include <linux/kallsyms.h>
10 #include <linux/security.h>
11 #include <linux/mutex.h>
12 #include <linux/slab.h>
13 #include <linux/stacktrace.h>
14 #include <linux/rculist.h>
15 #include <linux/tracefs.h>
17 /* for gfp flag names */
18 #include <linux/trace_events.h>
19 #include <trace/events/mmflags.h>
21 #include "trace_synth.h"
25 C(BAD_NAME, "Illegal name"), \
26 C(INVALID_CMD, "Command must be of the form: <name> field[;field] ..."),\
27 C(INVALID_DYN_CMD, "Command must be of the form: s or -:[synthetic/]<name> field[;field] ..."),\
28 C(EVENT_EXISTS, "Event already exists"), \
29 C(TOO_MANY_FIELDS, "Too many fields"), \
30 C(INCOMPLETE_TYPE, "Incomplete type"), \
31 C(INVALID_TYPE, "Invalid type"), \
32 C(INVALID_FIELD, "Invalid field"), \
33 C(INVALID_ARRAY_SPEC, "Invalid array specification"),
36 #define C(a, b) SYNTH_ERR_##a
43 static const char *err_text[] = { ERRORS };
45 static char *last_cmd;
47 static int errpos(const char *str)
49 if (!str || !last_cmd)
52 return err_pos(last_cmd, str);
55 static void last_cmd_set(const char *str)
62 last_cmd = kstrdup(str, GFP_KERNEL);
65 static void synth_err(u8 err_type, u16 err_pos)
70 tracing_log_err(NULL, "synthetic_events", last_cmd, err_text,
74 static int create_synth_event(const char *raw_command);
75 static int synth_event_show(struct seq_file *m, struct dyn_event *ev);
76 static int synth_event_release(struct dyn_event *ev);
77 static bool synth_event_is_busy(struct dyn_event *ev);
78 static bool synth_event_match(const char *system, const char *event,
79 int argc, const char **argv, struct dyn_event *ev);
81 static struct dyn_event_operations synth_event_ops = {
82 .create = create_synth_event,
83 .show = synth_event_show,
84 .is_busy = synth_event_is_busy,
85 .free = synth_event_release,
86 .match = synth_event_match,
89 static bool is_synth_event(struct dyn_event *ev)
91 return ev->ops == &synth_event_ops;
94 static struct synth_event *to_synth_event(struct dyn_event *ev)
96 return container_of(ev, struct synth_event, devent);
99 static bool synth_event_is_busy(struct dyn_event *ev)
101 struct synth_event *event = to_synth_event(ev);
103 return event->ref != 0;
106 static bool synth_event_match(const char *system, const char *event,
107 int argc, const char **argv, struct dyn_event *ev)
109 struct synth_event *sev = to_synth_event(ev);
111 return strcmp(sev->name, event) == 0 &&
112 (!system || strcmp(system, SYNTH_SYSTEM) == 0);
115 struct synth_trace_event {
116 struct trace_entry ent;
120 static int synth_event_define_fields(struct trace_event_call *call)
122 struct synth_trace_event trace;
123 int offset = offsetof(typeof(trace), fields);
124 struct synth_event *event = call->data;
125 unsigned int i, size, n_u64;
130 for (i = 0, n_u64 = 0; i < event->n_fields; i++) {
131 size = event->fields[i]->size;
132 is_signed = event->fields[i]->is_signed;
133 type = event->fields[i]->type;
134 name = event->fields[i]->name;
135 ret = trace_define_field(call, type, name, offset, size,
136 is_signed, FILTER_OTHER);
140 event->fields[i]->offset = n_u64;
142 if (event->fields[i]->is_string && !event->fields[i]->is_dynamic) {
143 offset += STR_VAR_LEN_MAX;
144 n_u64 += STR_VAR_LEN_MAX / sizeof(u64);
146 offset += sizeof(u64);
151 event->n_u64 = n_u64;
156 static bool synth_field_signed(char *type)
158 if (str_has_prefix(type, "u"))
160 if (strcmp(type, "gfp_t") == 0)
166 static int synth_field_is_string(char *type)
168 if (strstr(type, "char[") != NULL)
174 static int synth_field_string_size(char *type)
176 char buf[4], *end, *start;
180 start = strstr(type, "char[");
183 start += sizeof("char[") - 1;
185 end = strchr(type, ']');
186 if (!end || end < start || type + strlen(type) > end + 1)
194 return 0; /* variable-length string */
196 strncpy(buf, start, len);
199 err = kstrtouint(buf, 0, &size);
203 if (size > STR_VAR_LEN_MAX)
209 static int synth_field_size(char *type)
213 if (strcmp(type, "s64") == 0)
215 else if (strcmp(type, "u64") == 0)
217 else if (strcmp(type, "s32") == 0)
219 else if (strcmp(type, "u32") == 0)
221 else if (strcmp(type, "s16") == 0)
223 else if (strcmp(type, "u16") == 0)
225 else if (strcmp(type, "s8") == 0)
227 else if (strcmp(type, "u8") == 0)
229 else if (strcmp(type, "char") == 0)
231 else if (strcmp(type, "unsigned char") == 0)
232 size = sizeof(unsigned char);
233 else if (strcmp(type, "int") == 0)
235 else if (strcmp(type, "unsigned int") == 0)
236 size = sizeof(unsigned int);
237 else if (strcmp(type, "long") == 0)
239 else if (strcmp(type, "unsigned long") == 0)
240 size = sizeof(unsigned long);
241 else if (strcmp(type, "bool") == 0)
243 else if (strcmp(type, "pid_t") == 0)
244 size = sizeof(pid_t);
245 else if (strcmp(type, "gfp_t") == 0)
246 size = sizeof(gfp_t);
247 else if (synth_field_is_string(type))
248 size = synth_field_string_size(type);
253 static const char *synth_field_fmt(char *type)
255 const char *fmt = "%llu";
257 if (strcmp(type, "s64") == 0)
259 else if (strcmp(type, "u64") == 0)
261 else if (strcmp(type, "s32") == 0)
263 else if (strcmp(type, "u32") == 0)
265 else if (strcmp(type, "s16") == 0)
267 else if (strcmp(type, "u16") == 0)
269 else if (strcmp(type, "s8") == 0)
271 else if (strcmp(type, "u8") == 0)
273 else if (strcmp(type, "char") == 0)
275 else if (strcmp(type, "unsigned char") == 0)
277 else if (strcmp(type, "int") == 0)
279 else if (strcmp(type, "unsigned int") == 0)
281 else if (strcmp(type, "long") == 0)
283 else if (strcmp(type, "unsigned long") == 0)
285 else if (strcmp(type, "bool") == 0)
287 else if (strcmp(type, "pid_t") == 0)
289 else if (strcmp(type, "gfp_t") == 0)
291 else if (synth_field_is_string(type))
297 static void print_synth_event_num_val(struct trace_seq *s,
298 char *print_fmt, char *name,
299 int size, u64 val, char *space)
303 trace_seq_printf(s, print_fmt, name, (u8)val, space);
307 trace_seq_printf(s, print_fmt, name, (u16)val, space);
311 trace_seq_printf(s, print_fmt, name, (u32)val, space);
315 trace_seq_printf(s, print_fmt, name, val, space);
320 static enum print_line_t print_synth_event(struct trace_iterator *iter,
322 struct trace_event *event)
324 struct trace_array *tr = iter->tr;
325 struct trace_seq *s = &iter->seq;
326 struct synth_trace_event *entry;
327 struct synth_event *se;
328 unsigned int i, n_u64;
332 entry = (struct synth_trace_event *)iter->ent;
333 se = container_of(event, struct synth_event, call.event);
335 trace_seq_printf(s, "%s: ", se->name);
337 for (i = 0, n_u64 = 0; i < se->n_fields; i++) {
338 if (trace_seq_has_overflowed(s))
341 fmt = synth_field_fmt(se->fields[i]->type);
343 /* parameter types */
344 if (tr && tr->trace_flags & TRACE_ITER_VERBOSE)
345 trace_seq_printf(s, "%s ", fmt);
347 snprintf(print_fmt, sizeof(print_fmt), "%%s=%s%%s", fmt);
349 /* parameter values */
350 if (se->fields[i]->is_string) {
351 if (se->fields[i]->is_dynamic) {
352 u32 offset, data_offset;
355 offset = (u32)entry->fields[n_u64];
356 data_offset = offset & 0xffff;
358 str_field = (char *)entry + data_offset;
360 trace_seq_printf(s, print_fmt, se->fields[i]->name,
363 i == se->n_fields - 1 ? "" : " ");
366 trace_seq_printf(s, print_fmt, se->fields[i]->name,
368 (char *)&entry->fields[n_u64],
369 i == se->n_fields - 1 ? "" : " ");
370 n_u64 += STR_VAR_LEN_MAX / sizeof(u64);
373 struct trace_print_flags __flags[] = {
374 __def_gfpflag_names, {-1, NULL} };
375 char *space = (i == se->n_fields - 1 ? "" : " ");
377 print_synth_event_num_val(s, print_fmt,
380 entry->fields[n_u64],
383 if (strcmp(se->fields[i]->type, "gfp_t") == 0) {
384 trace_seq_puts(s, " (");
385 trace_print_flags_seq(s, "|",
386 entry->fields[n_u64],
388 trace_seq_putc(s, ')');
394 trace_seq_putc(s, '\n');
396 return trace_handle_return(s);
399 static struct trace_event_functions synth_event_funcs = {
400 .trace = print_synth_event
403 static unsigned int trace_string(struct synth_trace_event *entry,
404 struct synth_event *event,
407 unsigned int data_size,
410 unsigned int len = 0;
416 data_offset = offsetof(typeof(*entry), fields);
417 data_offset += event->n_u64 * sizeof(u64);
418 data_offset += data_size;
420 str_field = (char *)entry + data_offset;
422 len = strlen(str_val) + 1;
423 strscpy(str_field, str_val, len);
425 data_offset |= len << 16;
426 *(u32 *)&entry->fields[*n_u64] = data_offset;
430 str_field = (char *)&entry->fields[*n_u64];
432 strscpy(str_field, str_val, STR_VAR_LEN_MAX);
433 (*n_u64) += STR_VAR_LEN_MAX / sizeof(u64);
439 static notrace void trace_event_raw_event_synth(void *__data,
441 unsigned int *var_ref_idx)
443 unsigned int i, n_u64, val_idx, len, data_size = 0;
444 struct trace_event_file *trace_file = __data;
445 struct synth_trace_event *entry;
446 struct trace_event_buffer fbuffer;
447 struct trace_buffer *buffer;
448 struct synth_event *event;
451 event = trace_file->event_call->data;
453 if (trace_trigger_soft_disabled(trace_file))
456 fields_size = event->n_u64 * sizeof(u64);
458 for (i = 0; i < event->n_dynamic_fields; i++) {
459 unsigned int field_pos = event->dynamic_fields[i]->field_pos;
462 val_idx = var_ref_idx[field_pos];
463 str_val = (char *)(long)var_ref_vals[val_idx];
465 len = strlen(str_val) + 1;
471 * Avoid ring buffer recursion detection, as this event
472 * is being performed within another event.
474 buffer = trace_file->tr->array_buffer.buffer;
475 ring_buffer_nest_start(buffer);
477 entry = trace_event_buffer_reserve(&fbuffer, trace_file,
478 sizeof(*entry) + fields_size);
482 for (i = 0, n_u64 = 0; i < event->n_fields; i++) {
483 val_idx = var_ref_idx[i];
484 if (event->fields[i]->is_string) {
485 char *str_val = (char *)(long)var_ref_vals[val_idx];
487 len = trace_string(entry, event, str_val,
488 event->fields[i]->is_dynamic,
490 data_size += len; /* only dynamic string increments */
492 struct synth_field *field = event->fields[i];
493 u64 val = var_ref_vals[val_idx];
495 switch (field->size) {
497 *(u8 *)&entry->fields[n_u64] = (u8)val;
501 *(u16 *)&entry->fields[n_u64] = (u16)val;
505 *(u32 *)&entry->fields[n_u64] = (u32)val;
509 entry->fields[n_u64] = val;
516 trace_event_buffer_commit(&fbuffer);
518 ring_buffer_nest_end(buffer);
521 static void free_synth_event_print_fmt(struct trace_event_call *call)
524 kfree(call->print_fmt);
525 call->print_fmt = NULL;
529 static int __set_synth_event_print_fmt(struct synth_event *event,
536 /* When len=0, we just calculate the needed length */
537 #define LEN_OR_ZERO (len ? len - pos : 0)
539 pos += snprintf(buf + pos, LEN_OR_ZERO, "\"");
540 for (i = 0; i < event->n_fields; i++) {
541 fmt = synth_field_fmt(event->fields[i]->type);
542 pos += snprintf(buf + pos, LEN_OR_ZERO, "%s=%s%s",
543 event->fields[i]->name, fmt,
544 i == event->n_fields - 1 ? "" : ", ");
546 pos += snprintf(buf + pos, LEN_OR_ZERO, "\"");
548 for (i = 0; i < event->n_fields; i++) {
549 if (event->fields[i]->is_string &&
550 event->fields[i]->is_dynamic)
551 pos += snprintf(buf + pos, LEN_OR_ZERO,
552 ", __get_str(%s)", event->fields[i]->name);
554 pos += snprintf(buf + pos, LEN_OR_ZERO,
555 ", REC->%s", event->fields[i]->name);
560 /* return the length of print_fmt */
564 static int set_synth_event_print_fmt(struct trace_event_call *call)
566 struct synth_event *event = call->data;
570 /* First: called with 0 length to calculate the needed length */
571 len = __set_synth_event_print_fmt(event, NULL, 0);
573 print_fmt = kmalloc(len + 1, GFP_KERNEL);
577 /* Second: actually write the @print_fmt */
578 __set_synth_event_print_fmt(event, print_fmt, len + 1);
579 call->print_fmt = print_fmt;
584 static void free_synth_field(struct synth_field *field)
591 static int check_field_version(const char *prefix, const char *field_type,
592 const char *field_name)
595 * For backward compatibility, the old synthetic event command
596 * format did not require semicolons, and in order to not
597 * break user space, that old format must still work. If a new
598 * feature is added, then the format that uses the new feature
599 * will be required to have semicolons, as nothing that uses
600 * the old format would be using the new, yet to be created,
601 * feature. When a new feature is added, this will detect it,
602 * and return a number greater than 1, and require the format
608 static struct synth_field *parse_synth_field(int argc, char **argv,
609 int *consumed, int *field_version)
611 const char *prefix = NULL, *field_type = argv[0], *field_name, *array;
612 struct synth_field *field;
613 int len, ret = -ENOMEM;
617 if (!strcmp(field_type, "unsigned")) {
619 synth_err(SYNTH_ERR_INCOMPLETE_TYPE, errpos(field_type));
620 return ERR_PTR(-EINVAL);
622 prefix = "unsigned ";
623 field_type = argv[1];
624 field_name = argv[2];
627 field_name = argv[1];
632 synth_err(SYNTH_ERR_INVALID_FIELD, errpos(field_type));
633 return ERR_PTR(-EINVAL);
636 *field_version = check_field_version(prefix, field_type, field_name);
638 field = kzalloc(sizeof(*field), GFP_KERNEL);
640 return ERR_PTR(-ENOMEM);
642 len = strlen(field_name);
643 array = strchr(field_name, '[');
645 len -= strlen(array);
647 field->name = kmemdup_nul(field_name, len, GFP_KERNEL);
651 if (!is_good_name(field->name)) {
652 synth_err(SYNTH_ERR_BAD_NAME, errpos(field_name));
657 len = strlen(field_type) + 1;
660 len += strlen(array);
663 len += strlen(prefix);
665 field->type = kzalloc(len, GFP_KERNEL);
669 seq_buf_init(&s, field->type, len);
671 seq_buf_puts(&s, prefix);
672 seq_buf_puts(&s, field_type);
674 seq_buf_puts(&s, array);
675 if (WARN_ON_ONCE(!seq_buf_buffer_left(&s)))
678 s.buffer[s.len] = '\0';
680 size = synth_field_size(field->type);
683 synth_err(SYNTH_ERR_INVALID_ARRAY_SPEC, errpos(field_name));
685 synth_err(SYNTH_ERR_INVALID_TYPE, errpos(field_type));
688 } else if (size == 0) {
689 if (synth_field_is_string(field->type)) {
692 len = sizeof("__data_loc ") + strlen(field->type) + 1;
693 type = kzalloc(len, GFP_KERNEL);
697 seq_buf_init(&s, type, len);
698 seq_buf_puts(&s, "__data_loc ");
699 seq_buf_puts(&s, field->type);
701 if (WARN_ON_ONCE(!seq_buf_buffer_left(&s)))
703 s.buffer[s.len] = '\0';
708 field->is_dynamic = true;
711 synth_err(SYNTH_ERR_INVALID_TYPE, errpos(field_type));
718 if (synth_field_is_string(field->type))
719 field->is_string = true;
721 field->is_signed = synth_field_signed(field->type);
725 free_synth_field(field);
726 field = ERR_PTR(ret);
730 static void free_synth_tracepoint(struct tracepoint *tp)
739 static struct tracepoint *alloc_synth_tracepoint(char *name)
741 struct tracepoint *tp;
743 tp = kzalloc(sizeof(*tp), GFP_KERNEL);
745 return ERR_PTR(-ENOMEM);
747 tp->name = kstrdup(name, GFP_KERNEL);
750 return ERR_PTR(-ENOMEM);
756 struct synth_event *find_synth_event(const char *name)
758 struct dyn_event *pos;
759 struct synth_event *event;
761 for_each_dyn_event(pos) {
762 if (!is_synth_event(pos))
764 event = to_synth_event(pos);
765 if (strcmp(event->name, name) == 0)
772 static struct trace_event_fields synth_event_fields_array[] = {
773 { .type = TRACE_FUNCTION_TYPE,
774 .define_fields = synth_event_define_fields },
778 static int register_synth_event(struct synth_event *event)
780 struct trace_event_call *call = &event->call;
783 event->call.class = &event->class;
784 event->class.system = kstrdup(SYNTH_SYSTEM, GFP_KERNEL);
785 if (!event->class.system) {
790 event->tp = alloc_synth_tracepoint(event->name);
791 if (IS_ERR(event->tp)) {
792 ret = PTR_ERR(event->tp);
797 INIT_LIST_HEAD(&call->class->fields);
798 call->event.funcs = &synth_event_funcs;
799 call->class->fields_array = synth_event_fields_array;
801 ret = register_trace_event(&call->event);
806 call->flags = TRACE_EVENT_FL_TRACEPOINT;
807 call->class->reg = trace_event_reg;
808 call->class->probe = trace_event_raw_event_synth;
810 call->tp = event->tp;
812 ret = trace_add_event_call(call);
814 pr_warn("Failed to register synthetic event: %s\n",
815 trace_event_name(call));
819 ret = set_synth_event_print_fmt(call);
821 trace_remove_event_call(call);
827 unregister_trace_event(&call->event);
831 static int unregister_synth_event(struct synth_event *event)
833 struct trace_event_call *call = &event->call;
836 ret = trace_remove_event_call(call);
841 static void free_synth_event(struct synth_event *event)
848 for (i = 0; i < event->n_fields; i++)
849 free_synth_field(event->fields[i]);
851 kfree(event->fields);
852 kfree(event->dynamic_fields);
854 kfree(event->class.system);
855 free_synth_tracepoint(event->tp);
856 free_synth_event_print_fmt(&event->call);
860 static struct synth_event *alloc_synth_event(const char *name, int n_fields,
861 struct synth_field **fields)
863 unsigned int i, j, n_dynamic_fields = 0;
864 struct synth_event *event;
866 event = kzalloc(sizeof(*event), GFP_KERNEL);
868 event = ERR_PTR(-ENOMEM);
872 event->name = kstrdup(name, GFP_KERNEL);
875 event = ERR_PTR(-ENOMEM);
879 event->fields = kcalloc(n_fields, sizeof(*event->fields), GFP_KERNEL);
880 if (!event->fields) {
881 free_synth_event(event);
882 event = ERR_PTR(-ENOMEM);
886 for (i = 0; i < n_fields; i++)
887 if (fields[i]->is_dynamic)
890 if (n_dynamic_fields) {
891 event->dynamic_fields = kcalloc(n_dynamic_fields,
892 sizeof(*event->dynamic_fields),
894 if (!event->dynamic_fields) {
895 free_synth_event(event);
896 event = ERR_PTR(-ENOMEM);
901 dyn_event_init(&event->devent, &synth_event_ops);
903 for (i = 0, j = 0; i < n_fields; i++) {
904 fields[i]->field_pos = i;
905 event->fields[i] = fields[i];
907 if (fields[i]->is_dynamic)
908 event->dynamic_fields[j++] = fields[i];
910 event->n_dynamic_fields = j;
911 event->n_fields = n_fields;
916 static int synth_event_check_arg_fn(void *data)
918 struct dynevent_arg_pair *arg_pair = data;
921 size = synth_field_size((char *)arg_pair->lhs);
923 if (strstr((char *)arg_pair->lhs, "["))
927 return size ? 0 : -EINVAL;
931 * synth_event_add_field - Add a new field to a synthetic event cmd
932 * @cmd: A pointer to the dynevent_cmd struct representing the new event
933 * @type: The type of the new field to add
934 * @name: The name of the new field to add
936 * Add a new field to a synthetic event cmd object. Field ordering is in
937 * the same order the fields are added.
939 * See synth_field_size() for available types. If field_name contains
940 * [n] the field is considered to be an array.
942 * Return: 0 if successful, error otherwise.
944 int synth_event_add_field(struct dynevent_cmd *cmd, const char *type,
947 struct dynevent_arg_pair arg_pair;
950 if (cmd->type != DYNEVENT_TYPE_SYNTH)
956 dynevent_arg_pair_init(&arg_pair, 0, ';');
961 ret = dynevent_arg_pair_add(cmd, &arg_pair, synth_event_check_arg_fn);
965 if (++cmd->n_fields > SYNTH_FIELDS_MAX)
970 EXPORT_SYMBOL_GPL(synth_event_add_field);
973 * synth_event_add_field_str - Add a new field to a synthetic event cmd
974 * @cmd: A pointer to the dynevent_cmd struct representing the new event
975 * @type_name: The type and name of the new field to add, as a single string
977 * Add a new field to a synthetic event cmd object, as a single
978 * string. The @type_name string is expected to be of the form 'type
979 * name', which will be appended by ';'. No sanity checking is done -
980 * what's passed in is assumed to already be well-formed. Field
981 * ordering is in the same order the fields are added.
983 * See synth_field_size() for available types. If field_name contains
984 * [n] the field is considered to be an array.
986 * Return: 0 if successful, error otherwise.
988 int synth_event_add_field_str(struct dynevent_cmd *cmd, const char *type_name)
990 struct dynevent_arg arg;
993 if (cmd->type != DYNEVENT_TYPE_SYNTH)
999 dynevent_arg_init(&arg, ';');
1001 arg.str = type_name;
1003 ret = dynevent_arg_add(cmd, &arg, NULL);
1007 if (++cmd->n_fields > SYNTH_FIELDS_MAX)
1012 EXPORT_SYMBOL_GPL(synth_event_add_field_str);
1015 * synth_event_add_fields - Add multiple fields to a synthetic event cmd
1016 * @cmd: A pointer to the dynevent_cmd struct representing the new event
1017 * @fields: An array of type/name field descriptions
1018 * @n_fields: The number of field descriptions contained in the fields array
1020 * Add a new set of fields to a synthetic event cmd object. The event
1021 * fields that will be defined for the event should be passed in as an
1022 * array of struct synth_field_desc, and the number of elements in the
1023 * array passed in as n_fields. Field ordering will retain the
1024 * ordering given in the fields array.
1026 * See synth_field_size() for available types. If field_name contains
1027 * [n] the field is considered to be an array.
1029 * Return: 0 if successful, error otherwise.
1031 int synth_event_add_fields(struct dynevent_cmd *cmd,
1032 struct synth_field_desc *fields,
1033 unsigned int n_fields)
1038 for (i = 0; i < n_fields; i++) {
1039 if (fields[i].type == NULL || fields[i].name == NULL) {
1044 ret = synth_event_add_field(cmd, fields[i].type, fields[i].name);
1051 EXPORT_SYMBOL_GPL(synth_event_add_fields);
1054 * __synth_event_gen_cmd_start - Start a synthetic event command from arg list
1055 * @cmd: A pointer to the dynevent_cmd struct representing the new event
1056 * @name: The name of the synthetic event
1057 * @mod: The module creating the event, NULL if not created from a module
1058 * @args: Variable number of arg (pairs), one pair for each field
1060 * NOTE: Users normally won't want to call this function directly, but
1061 * rather use the synth_event_gen_cmd_start() wrapper, which
1062 * automatically adds a NULL to the end of the arg list. If this
1063 * function is used directly, make sure the last arg in the variable
1066 * Generate a synthetic event command to be executed by
1067 * synth_event_gen_cmd_end(). This function can be used to generate
1068 * the complete command or only the first part of it; in the latter
1069 * case, synth_event_add_field(), synth_event_add_field_str(), or
1070 * synth_event_add_fields() can be used to add more fields following
1073 * There should be an even number variable args, each pair consisting
1074 * of a type followed by a field name.
1076 * See synth_field_size() for available types. If field_name contains
1077 * [n] the field is considered to be an array.
1079 * Return: 0 if successful, error otherwise.
1081 int __synth_event_gen_cmd_start(struct dynevent_cmd *cmd, const char *name,
1082 struct module *mod, ...)
1084 struct dynevent_arg arg;
1088 cmd->event_name = name;
1089 cmd->private_data = mod;
1091 if (cmd->type != DYNEVENT_TYPE_SYNTH)
1094 dynevent_arg_init(&arg, 0);
1096 ret = dynevent_arg_add(cmd, &arg, NULL);
1100 va_start(args, mod);
1102 const char *type, *name;
1104 type = va_arg(args, const char *);
1107 name = va_arg(args, const char *);
1111 if (++cmd->n_fields > SYNTH_FIELDS_MAX) {
1116 ret = synth_event_add_field(cmd, type, name);
1124 EXPORT_SYMBOL_GPL(__synth_event_gen_cmd_start);
1127 * synth_event_gen_cmd_array_start - Start synthetic event command from an array
1128 * @cmd: A pointer to the dynevent_cmd struct representing the new event
1129 * @name: The name of the synthetic event
1130 * @fields: An array of type/name field descriptions
1131 * @n_fields: The number of field descriptions contained in the fields array
1133 * Generate a synthetic event command to be executed by
1134 * synth_event_gen_cmd_end(). This function can be used to generate
1135 * the complete command or only the first part of it; in the latter
1136 * case, synth_event_add_field(), synth_event_add_field_str(), or
1137 * synth_event_add_fields() can be used to add more fields following
1140 * The event fields that will be defined for the event should be
1141 * passed in as an array of struct synth_field_desc, and the number of
1142 * elements in the array passed in as n_fields. Field ordering will
1143 * retain the ordering given in the fields array.
1145 * See synth_field_size() for available types. If field_name contains
1146 * [n] the field is considered to be an array.
1148 * Return: 0 if successful, error otherwise.
1150 int synth_event_gen_cmd_array_start(struct dynevent_cmd *cmd, const char *name,
1152 struct synth_field_desc *fields,
1153 unsigned int n_fields)
1155 struct dynevent_arg arg;
1159 cmd->event_name = name;
1160 cmd->private_data = mod;
1162 if (cmd->type != DYNEVENT_TYPE_SYNTH)
1165 if (n_fields > SYNTH_FIELDS_MAX)
1168 dynevent_arg_init(&arg, 0);
1170 ret = dynevent_arg_add(cmd, &arg, NULL);
1174 for (i = 0; i < n_fields; i++) {
1175 if (fields[i].type == NULL || fields[i].name == NULL)
1178 ret = synth_event_add_field(cmd, fields[i].type, fields[i].name);
1185 EXPORT_SYMBOL_GPL(synth_event_gen_cmd_array_start);
1187 static int __create_synth_event(const char *name, const char *raw_fields)
1189 char **argv, *field_str, *tmp_fields, *saved_fields = NULL;
1190 struct synth_field *field, *fields[SYNTH_FIELDS_MAX];
1191 int consumed, cmd_version = 1, n_fields_this_loop;
1192 int i, argc, n_fields = 0, ret = 0;
1193 struct synth_event *event = NULL;
1197 * - Add synthetic event: <event_name> field[;field] ...
1198 * - Remove synthetic event: !<event_name> field[;field] ...
1199 * where 'field' = type field_name
1202 if (name[0] == '\0') {
1203 synth_err(SYNTH_ERR_INVALID_CMD, 0);
1207 if (!is_good_name(name)) {
1208 synth_err(SYNTH_ERR_BAD_NAME, errpos(name));
1212 mutex_lock(&event_mutex);
1214 event = find_synth_event(name);
1216 synth_err(SYNTH_ERR_EVENT_EXISTS, errpos(name));
1221 tmp_fields = saved_fields = kstrdup(raw_fields, GFP_KERNEL);
1227 while ((field_str = strsep(&tmp_fields, ";")) != NULL) {
1228 argv = argv_split(GFP_KERNEL, field_str, &argc);
1239 n_fields_this_loop = 0;
1241 while (argc > consumed) {
1244 field = parse_synth_field(argc - consumed,
1245 argv + consumed, &consumed,
1247 if (IS_ERR(field)) {
1248 ret = PTR_ERR(field);
1253 * Track the highest version of any field we
1254 * found in the command.
1256 if (field_version > cmd_version)
1257 cmd_version = field_version;
1260 * Now sort out what is and isn't valid for
1261 * each supported version.
1263 * If we see more than 1 field per loop, it
1264 * means we have multiple fields between
1265 * semicolons, and that's something we no
1266 * longer support in a version 2 or greater
1269 if (cmd_version > 1 && n_fields_this_loop >= 1) {
1270 synth_err(SYNTH_ERR_INVALID_CMD, errpos(field_str));
1275 fields[n_fields++] = field;
1276 if (n_fields == SYNTH_FIELDS_MAX) {
1277 synth_err(SYNTH_ERR_TOO_MANY_FIELDS, 0);
1282 n_fields_this_loop++;
1286 if (consumed < argc) {
1287 synth_err(SYNTH_ERR_INVALID_CMD, 0);
1294 if (n_fields == 0) {
1295 synth_err(SYNTH_ERR_INVALID_CMD, 0);
1300 event = alloc_synth_event(name, n_fields, fields);
1301 if (IS_ERR(event)) {
1302 ret = PTR_ERR(event);
1306 ret = register_synth_event(event);
1308 dyn_event_add(&event->devent, &event->call);
1310 free_synth_event(event);
1312 mutex_unlock(&event_mutex);
1314 kfree(saved_fields);
1320 for (i = 0; i < n_fields; i++)
1321 free_synth_field(fields[i]);
1327 * synth_event_create - Create a new synthetic event
1328 * @name: The name of the new synthetic event
1329 * @fields: An array of type/name field descriptions
1330 * @n_fields: The number of field descriptions contained in the fields array
1331 * @mod: The module creating the event, NULL if not created from a module
1333 * Create a new synthetic event with the given name under the
1334 * trace/events/synthetic/ directory. The event fields that will be
1335 * defined for the event should be passed in as an array of struct
1336 * synth_field_desc, and the number elements in the array passed in as
1337 * n_fields. Field ordering will retain the ordering given in the
1340 * If the new synthetic event is being created from a module, the mod
1341 * param must be non-NULL. This will ensure that the trace buffer
1342 * won't contain unreadable events.
1344 * The new synth event should be deleted using synth_event_delete()
1345 * function. The new synthetic event can be generated from modules or
1346 * other kernel code using trace_synth_event() and related functions.
1348 * Return: 0 if successful, error otherwise.
1350 int synth_event_create(const char *name, struct synth_field_desc *fields,
1351 unsigned int n_fields, struct module *mod)
1353 struct dynevent_cmd cmd;
1357 buf = kzalloc(MAX_DYNEVENT_CMD_LEN, GFP_KERNEL);
1361 synth_event_cmd_init(&cmd, buf, MAX_DYNEVENT_CMD_LEN);
1363 ret = synth_event_gen_cmd_array_start(&cmd, name, mod,
1368 ret = synth_event_gen_cmd_end(&cmd);
1374 EXPORT_SYMBOL_GPL(synth_event_create);
1376 static int destroy_synth_event(struct synth_event *se)
1383 if (trace_event_dyn_busy(&se->call))
1386 ret = unregister_synth_event(se);
1388 dyn_event_remove(&se->devent);
1389 free_synth_event(se);
1396 * synth_event_delete - Delete a synthetic event
1397 * @event_name: The name of the new synthetic event
1399 * Delete a synthetic event that was created with synth_event_create().
1401 * Return: 0 if successful, error otherwise.
1403 int synth_event_delete(const char *event_name)
1405 struct synth_event *se = NULL;
1406 struct module *mod = NULL;
1409 mutex_lock(&event_mutex);
1410 se = find_synth_event(event_name);
1413 ret = destroy_synth_event(se);
1415 mutex_unlock(&event_mutex);
1418 mutex_lock(&trace_types_lock);
1420 * It is safest to reset the ring buffer if the module
1421 * being unloaded registered any events that were
1422 * used. The only worry is if a new module gets
1423 * loaded, and takes on the same id as the events of
1424 * this module. When printing out the buffer, traced
1425 * events left over from this module may be passed to
1426 * the new module events and unexpected results may
1429 tracing_reset_all_online_cpus();
1430 mutex_unlock(&trace_types_lock);
1435 EXPORT_SYMBOL_GPL(synth_event_delete);
1437 static int check_command(const char *raw_command)
1439 char **argv = NULL, *cmd, *saved_cmd, *name_and_field;
1442 cmd = saved_cmd = kstrdup(raw_command, GFP_KERNEL);
1446 name_and_field = strsep(&cmd, ";");
1447 if (!name_and_field) {
1452 if (name_and_field[0] == '!')
1455 argv = argv_split(GFP_KERNEL, name_and_field, &argc);
1470 static int create_or_delete_synth_event(const char *raw_command)
1472 char *name = NULL, *fields, *p;
1475 raw_command = skip_spaces(raw_command);
1476 if (raw_command[0] == '\0')
1479 last_cmd_set(raw_command);
1481 ret = check_command(raw_command);
1483 synth_err(SYNTH_ERR_INVALID_CMD, 0);
1487 p = strpbrk(raw_command, " \t");
1488 if (!p && raw_command[0] != '!') {
1489 synth_err(SYNTH_ERR_INVALID_CMD, 0);
1494 name = kmemdup_nul(raw_command, p ? p - raw_command : strlen(raw_command), GFP_KERNEL);
1498 if (name[0] == '!') {
1499 ret = synth_event_delete(name + 1);
1503 fields = skip_spaces(p);
1505 ret = __create_synth_event(name, fields);
1512 static int synth_event_run_command(struct dynevent_cmd *cmd)
1514 struct synth_event *se;
1517 ret = create_or_delete_synth_event(cmd->seq.buffer);
1521 se = find_synth_event(cmd->event_name);
1525 se->mod = cmd->private_data;
1531 * synth_event_cmd_init - Initialize a synthetic event command object
1532 * @cmd: A pointer to the dynevent_cmd struct representing the new event
1533 * @buf: A pointer to the buffer used to build the command
1534 * @maxlen: The length of the buffer passed in @buf
1536 * Initialize a synthetic event command object. Use this before
1537 * calling any of the other dyenvent_cmd functions.
1539 void synth_event_cmd_init(struct dynevent_cmd *cmd, char *buf, int maxlen)
1541 dynevent_cmd_init(cmd, buf, maxlen, DYNEVENT_TYPE_SYNTH,
1542 synth_event_run_command);
1544 EXPORT_SYMBOL_GPL(synth_event_cmd_init);
1547 __synth_event_trace_init(struct trace_event_file *file,
1548 struct synth_event_trace_state *trace_state)
1552 memset(trace_state, '\0', sizeof(*trace_state));
1555 * Normal event tracing doesn't get called at all unless the
1556 * ENABLED bit is set (which attaches the probe thus allowing
1557 * this code to be called, etc). Because this is called
1558 * directly by the user, we don't have that but we still need
1559 * to honor not logging when disabled. For the iterated
1560 * trace case, we save the enabled state upon start and just
1561 * ignore the following data calls.
1563 if (!(file->flags & EVENT_FILE_FL_ENABLED) ||
1564 trace_trigger_soft_disabled(file)) {
1565 trace_state->disabled = true;
1570 trace_state->event = file->event_call->data;
1576 __synth_event_trace_start(struct trace_event_file *file,
1577 struct synth_event_trace_state *trace_state,
1578 int dynamic_fields_size)
1580 int entry_size, fields_size = 0;
1583 fields_size = trace_state->event->n_u64 * sizeof(u64);
1584 fields_size += dynamic_fields_size;
1587 * Avoid ring buffer recursion detection, as this event
1588 * is being performed within another event.
1590 trace_state->buffer = file->tr->array_buffer.buffer;
1591 ring_buffer_nest_start(trace_state->buffer);
1593 entry_size = sizeof(*trace_state->entry) + fields_size;
1594 trace_state->entry = trace_event_buffer_reserve(&trace_state->fbuffer,
1597 if (!trace_state->entry) {
1598 ring_buffer_nest_end(trace_state->buffer);
1606 __synth_event_trace_end(struct synth_event_trace_state *trace_state)
1608 trace_event_buffer_commit(&trace_state->fbuffer);
1610 ring_buffer_nest_end(trace_state->buffer);
1614 * synth_event_trace - Trace a synthetic event
1615 * @file: The trace_event_file representing the synthetic event
1616 * @n_vals: The number of values in vals
1617 * @args: Variable number of args containing the event values
1619 * Trace a synthetic event using the values passed in the variable
1622 * The argument list should be a list 'n_vals' u64 values. The number
1623 * of vals must match the number of field in the synthetic event, and
1624 * must be in the same order as the synthetic event fields.
1626 * All vals should be cast to u64, and string vals are just pointers
1627 * to strings, cast to u64. Strings will be copied into space
1628 * reserved in the event for the string, using these pointers.
1630 * Return: 0 on success, err otherwise.
1632 int synth_event_trace(struct trace_event_file *file, unsigned int n_vals, ...)
1634 unsigned int i, n_u64, len, data_size = 0;
1635 struct synth_event_trace_state state;
1639 ret = __synth_event_trace_init(file, &state);
1642 ret = 0; /* just disabled, not really an error */
1646 if (state.event->n_dynamic_fields) {
1647 va_start(args, n_vals);
1649 for (i = 0; i < state.event->n_fields; i++) {
1650 u64 val = va_arg(args, u64);
1652 if (state.event->fields[i]->is_string &&
1653 state.event->fields[i]->is_dynamic) {
1654 char *str_val = (char *)(long)val;
1656 data_size += strlen(str_val) + 1;
1663 ret = __synth_event_trace_start(file, &state, data_size);
1667 if (n_vals != state.event->n_fields) {
1674 va_start(args, n_vals);
1675 for (i = 0, n_u64 = 0; i < state.event->n_fields; i++) {
1678 val = va_arg(args, u64);
1680 if (state.event->fields[i]->is_string) {
1681 char *str_val = (char *)(long)val;
1683 len = trace_string(state.entry, state.event, str_val,
1684 state.event->fields[i]->is_dynamic,
1686 data_size += len; /* only dynamic string increments */
1688 struct synth_field *field = state.event->fields[i];
1690 switch (field->size) {
1692 *(u8 *)&state.entry->fields[n_u64] = (u8)val;
1696 *(u16 *)&state.entry->fields[n_u64] = (u16)val;
1700 *(u32 *)&state.entry->fields[n_u64] = (u32)val;
1704 state.entry->fields[n_u64] = val;
1712 __synth_event_trace_end(&state);
1716 EXPORT_SYMBOL_GPL(synth_event_trace);
1719 * synth_event_trace_array - Trace a synthetic event from an array
1720 * @file: The trace_event_file representing the synthetic event
1721 * @vals: Array of values
1722 * @n_vals: The number of values in vals
1724 * Trace a synthetic event using the values passed in as 'vals'.
1726 * The 'vals' array is just an array of 'n_vals' u64. The number of
1727 * vals must match the number of field in the synthetic event, and
1728 * must be in the same order as the synthetic event fields.
1730 * All vals should be cast to u64, and string vals are just pointers
1731 * to strings, cast to u64. Strings will be copied into space
1732 * reserved in the event for the string, using these pointers.
1734 * Return: 0 on success, err otherwise.
1736 int synth_event_trace_array(struct trace_event_file *file, u64 *vals,
1737 unsigned int n_vals)
1739 unsigned int i, n_u64, field_pos, len, data_size = 0;
1740 struct synth_event_trace_state state;
1744 ret = __synth_event_trace_init(file, &state);
1747 ret = 0; /* just disabled, not really an error */
1751 if (state.event->n_dynamic_fields) {
1752 for (i = 0; i < state.event->n_dynamic_fields; i++) {
1753 field_pos = state.event->dynamic_fields[i]->field_pos;
1754 str_val = (char *)(long)vals[field_pos];
1755 len = strlen(str_val) + 1;
1760 ret = __synth_event_trace_start(file, &state, data_size);
1764 if (n_vals != state.event->n_fields) {
1771 for (i = 0, n_u64 = 0; i < state.event->n_fields; i++) {
1772 if (state.event->fields[i]->is_string) {
1773 char *str_val = (char *)(long)vals[i];
1775 len = trace_string(state.entry, state.event, str_val,
1776 state.event->fields[i]->is_dynamic,
1778 data_size += len; /* only dynamic string increments */
1780 struct synth_field *field = state.event->fields[i];
1783 switch (field->size) {
1785 *(u8 *)&state.entry->fields[n_u64] = (u8)val;
1789 *(u16 *)&state.entry->fields[n_u64] = (u16)val;
1793 *(u32 *)&state.entry->fields[n_u64] = (u32)val;
1797 state.entry->fields[n_u64] = val;
1804 __synth_event_trace_end(&state);
1808 EXPORT_SYMBOL_GPL(synth_event_trace_array);
1811 * synth_event_trace_start - Start piecewise synthetic event trace
1812 * @file: The trace_event_file representing the synthetic event
1813 * @trace_state: A pointer to object tracking the piecewise trace state
1815 * Start the trace of a synthetic event field-by-field rather than all
1818 * This function 'opens' an event trace, which means space is reserved
1819 * for the event in the trace buffer, after which the event's
1820 * individual field values can be set through either
1821 * synth_event_add_next_val() or synth_event_add_val().
1823 * A pointer to a trace_state object is passed in, which will keep
1824 * track of the current event trace state until the event trace is
1825 * closed (and the event finally traced) using
1826 * synth_event_trace_end().
1828 * Note that synth_event_trace_end() must be called after all values
1829 * have been added for each event trace, regardless of whether adding
1830 * all field values succeeded or not.
1832 * Note also that for a given event trace, all fields must be added
1833 * using either synth_event_add_next_val() or synth_event_add_val()
1834 * but not both together or interleaved.
1836 * Return: 0 on success, err otherwise.
1838 int synth_event_trace_start(struct trace_event_file *file,
1839 struct synth_event_trace_state *trace_state)
1846 ret = __synth_event_trace_init(file, trace_state);
1849 ret = 0; /* just disabled, not really an error */
1853 if (trace_state->event->n_dynamic_fields)
1856 ret = __synth_event_trace_start(file, trace_state, 0);
1860 EXPORT_SYMBOL_GPL(synth_event_trace_start);
1862 static int __synth_event_add_val(const char *field_name, u64 val,
1863 struct synth_event_trace_state *trace_state)
1865 struct synth_field *field = NULL;
1866 struct synth_trace_event *entry;
1867 struct synth_event *event;
1875 /* can't mix add_next_synth_val() with add_synth_val() */
1877 if (trace_state->add_next) {
1881 trace_state->add_name = true;
1883 if (trace_state->add_name) {
1887 trace_state->add_next = true;
1890 if (trace_state->disabled)
1893 event = trace_state->event;
1894 if (trace_state->add_name) {
1895 for (i = 0; i < event->n_fields; i++) {
1896 field = event->fields[i];
1897 if (strcmp(field->name, field_name) == 0)
1905 if (trace_state->cur_field >= event->n_fields) {
1909 field = event->fields[trace_state->cur_field++];
1912 entry = trace_state->entry;
1913 if (field->is_string) {
1914 char *str_val = (char *)(long)val;
1917 if (field->is_dynamic) { /* add_val can't do dynamic strings */
1927 str_field = (char *)&entry->fields[field->offset];
1928 strscpy(str_field, str_val, STR_VAR_LEN_MAX);
1930 switch (field->size) {
1932 *(u8 *)&trace_state->entry->fields[field->offset] = (u8)val;
1936 *(u16 *)&trace_state->entry->fields[field->offset] = (u16)val;
1940 *(u32 *)&trace_state->entry->fields[field->offset] = (u32)val;
1944 trace_state->entry->fields[field->offset] = val;
1953 * synth_event_add_next_val - Add the next field's value to an open synth trace
1954 * @val: The value to set the next field to
1955 * @trace_state: A pointer to object tracking the piecewise trace state
1957 * Set the value of the next field in an event that's been opened by
1958 * synth_event_trace_start().
1960 * The val param should be the value cast to u64. If the value points
1961 * to a string, the val param should be a char * cast to u64.
1963 * This function assumes all the fields in an event are to be set one
1964 * after another - successive calls to this function are made, one for
1965 * each field, in the order of the fields in the event, until all
1966 * fields have been set. If you'd rather set each field individually
1967 * without regard to ordering, synth_event_add_val() can be used
1970 * Note however that synth_event_add_next_val() and
1971 * synth_event_add_val() can't be intermixed for a given event trace -
1972 * one or the other but not both can be used at the same time.
1974 * Note also that synth_event_trace_end() must be called after all
1975 * values have been added for each event trace, regardless of whether
1976 * adding all field values succeeded or not.
1978 * Return: 0 on success, err otherwise.
1980 int synth_event_add_next_val(u64 val,
1981 struct synth_event_trace_state *trace_state)
1983 return __synth_event_add_val(NULL, val, trace_state);
1985 EXPORT_SYMBOL_GPL(synth_event_add_next_val);
1988 * synth_event_add_val - Add a named field's value to an open synth trace
1989 * @field_name: The name of the synthetic event field value to set
1990 * @val: The value to set the named field to
1991 * @trace_state: A pointer to object tracking the piecewise trace state
1993 * Set the value of the named field in an event that's been opened by
1994 * synth_event_trace_start().
1996 * The val param should be the value cast to u64. If the value points
1997 * to a string, the val param should be a char * cast to u64.
1999 * This function looks up the field name, and if found, sets the field
2000 * to the specified value. This lookup makes this function more
2001 * expensive than synth_event_add_next_val(), so use that or the
2002 * none-piecewise synth_event_trace() instead if efficiency is more
2005 * Note however that synth_event_add_next_val() and
2006 * synth_event_add_val() can't be intermixed for a given event trace -
2007 * one or the other but not both can be used at the same time.
2009 * Note also that synth_event_trace_end() must be called after all
2010 * values have been added for each event trace, regardless of whether
2011 * adding all field values succeeded or not.
2013 * Return: 0 on success, err otherwise.
2015 int synth_event_add_val(const char *field_name, u64 val,
2016 struct synth_event_trace_state *trace_state)
2018 return __synth_event_add_val(field_name, val, trace_state);
2020 EXPORT_SYMBOL_GPL(synth_event_add_val);
2023 * synth_event_trace_end - End piecewise synthetic event trace
2024 * @trace_state: A pointer to object tracking the piecewise trace state
2026 * End the trace of a synthetic event opened by
2027 * synth_event_trace__start().
2029 * This function 'closes' an event trace, which basically means that
2030 * it commits the reserved event and cleans up other loose ends.
2032 * A pointer to a trace_state object is passed in, which will keep
2033 * track of the current event trace state opened with
2034 * synth_event_trace_start().
2036 * Note that this function must be called after all values have been
2037 * added for each event trace, regardless of whether adding all field
2038 * values succeeded or not.
2040 * Return: 0 on success, err otherwise.
2042 int synth_event_trace_end(struct synth_event_trace_state *trace_state)
2047 __synth_event_trace_end(trace_state);
2051 EXPORT_SYMBOL_GPL(synth_event_trace_end);
2053 static int create_synth_event(const char *raw_command)
2059 raw_command = skip_spaces(raw_command);
2060 if (raw_command[0] == '\0')
2063 last_cmd_set(raw_command);
2067 /* Don't try to process if not our system */
2068 if (name[0] != 's' || name[1] != ':')
2072 p = strpbrk(raw_command, " \t");
2074 synth_err(SYNTH_ERR_INVALID_CMD, 0);
2078 fields = skip_spaces(p);
2080 /* This interface accepts group name prefix */
2081 if (strchr(name, '/')) {
2082 len = str_has_prefix(name, SYNTH_SYSTEM "/");
2084 synth_err(SYNTH_ERR_INVALID_DYN_CMD, 0);
2090 len = name - raw_command;
2092 ret = check_command(raw_command + len);
2094 synth_err(SYNTH_ERR_INVALID_CMD, 0);
2098 name = kmemdup_nul(raw_command + len, p - raw_command - len, GFP_KERNEL);
2102 ret = __create_synth_event(name, fields);
2109 static int synth_event_release(struct dyn_event *ev)
2111 struct synth_event *event = to_synth_event(ev);
2117 if (trace_event_dyn_busy(&event->call))
2120 ret = unregister_synth_event(event);
2124 dyn_event_remove(ev);
2125 free_synth_event(event);
2129 static int __synth_event_show(struct seq_file *m, struct synth_event *event)
2131 struct synth_field *field;
2135 seq_printf(m, "%s\t", event->name);
2137 for (i = 0; i < event->n_fields; i++) {
2138 field = event->fields[i];
2141 t = strstr(type, "__data_loc");
2142 if (t) { /* __data_loc belongs in format but not event desc */
2143 t += sizeof("__data_loc");
2147 /* parameter values */
2148 seq_printf(m, "%s %s%s", type, field->name,
2149 i == event->n_fields - 1 ? "" : "; ");
2157 static int synth_event_show(struct seq_file *m, struct dyn_event *ev)
2159 struct synth_event *event = to_synth_event(ev);
2161 seq_printf(m, "s:%s/", event->class.system);
2163 return __synth_event_show(m, event);
2166 static int synth_events_seq_show(struct seq_file *m, void *v)
2168 struct dyn_event *ev = v;
2170 if (!is_synth_event(ev))
2173 return __synth_event_show(m, to_synth_event(ev));
2176 static const struct seq_operations synth_events_seq_op = {
2177 .start = dyn_event_seq_start,
2178 .next = dyn_event_seq_next,
2179 .stop = dyn_event_seq_stop,
2180 .show = synth_events_seq_show,
2183 static int synth_events_open(struct inode *inode, struct file *file)
2187 ret = security_locked_down(LOCKDOWN_TRACEFS);
2191 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
2192 ret = dyn_events_release_all(&synth_event_ops);
2197 return seq_open(file, &synth_events_seq_op);
2200 static ssize_t synth_events_write(struct file *file,
2201 const char __user *buffer,
2202 size_t count, loff_t *ppos)
2204 return trace_parse_run_command(file, buffer, count, ppos,
2205 create_or_delete_synth_event);
2208 static const struct file_operations synth_events_fops = {
2209 .open = synth_events_open,
2210 .write = synth_events_write,
2212 .llseek = seq_lseek,
2213 .release = seq_release,
2217 * Register dynevent at core_initcall. This allows kernel to setup kprobe
2218 * events in postcore_initcall without tracefs.
2220 static __init int trace_events_synth_init_early(void)
2224 err = dyn_event_register(&synth_event_ops);
2226 pr_warn("Could not register synth_event_ops\n");
2230 core_initcall(trace_events_synth_init_early);
2232 static __init int trace_events_synth_init(void)
2234 struct dentry *entry = NULL;
2236 err = tracing_init_dentry();
2240 entry = tracefs_create_file("synthetic_events", TRACE_MODE_WRITE,
2241 NULL, NULL, &synth_events_fops);
2249 pr_warn("Could not create tracefs 'synthetic_events' entry\n");
2254 fs_initcall(trace_events_synth_init);