1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (c) 2021, Microsoft Corporation.
6 * Beau Belgrave <beaub@linux.microsoft.com>
9 #include <linux/bitmap.h>
10 #include <linux/cdev.h>
11 #include <linux/hashtable.h>
12 #include <linux/list.h>
14 #include <linux/uio.h>
15 #include <linux/ioctl.h>
16 #include <linux/jhash.h>
17 #include <linux/trace_events.h>
18 #include <linux/tracefs.h>
19 #include <linux/types.h>
20 #include <linux/uaccess.h>
21 /* Reminder to move to uapi when everything works */
22 #ifdef CONFIG_COMPILE_TEST
23 #include <linux/user_events.h>
25 #include <uapi/linux/user_events.h>
28 #include "trace_dynevent.h"
30 #define USER_EVENTS_PREFIX_LEN (sizeof(USER_EVENTS_PREFIX)-1)
32 #define FIELD_DEPTH_TYPE 0
33 #define FIELD_DEPTH_NAME 1
34 #define FIELD_DEPTH_SIZE 2
37 * Limits how many trace_event calls user processes can create:
38 * Must be a power of two of PAGE_SIZE.
40 #define MAX_PAGE_ORDER 0
41 #define MAX_PAGES (1 << MAX_PAGE_ORDER)
42 #define MAX_EVENTS (MAX_PAGES * PAGE_SIZE)
44 /* Limit how long of an event name plus args within the subsystem. */
45 #define MAX_EVENT_DESC 512
46 #define EVENT_NAME(user_event) ((user_event)->tracepoint.name)
47 #define MAX_FIELD_ARRAY_SIZE 1024
48 #define MAX_FIELD_ARG_NAME 256
50 static char *register_page_data;
52 static DEFINE_MUTEX(reg_mutex);
53 static DEFINE_HASHTABLE(register_table, 4);
54 static DECLARE_BITMAP(page_bitmap, MAX_EVENTS);
57 * Stores per-event properties, as users register events
58 * within a file a user_event might be created if it does not
59 * already exist. These are globally used and their lifetime
60 * is tied to the refcnt member. These cannot go away until the
61 * refcnt reaches zero.
64 struct tracepoint tracepoint;
65 struct trace_event_call call;
66 struct trace_event_class class;
67 struct dyn_event devent;
68 struct hlist_node node;
69 struct list_head fields;
70 struct list_head validators;
78 * Stores per-file events references, as users register events
79 * within a file this structure is modified and freed via RCU.
80 * The lifetime of this struct is tied to the lifetime of the file.
81 * These are not shared and only accessible by the file that created it.
83 struct user_event_refs {
86 struct user_event *events[];
89 #define VALIDATOR_ENSURE_NULL (1 << 0)
90 #define VALIDATOR_REL (1 << 1)
92 struct user_event_validator {
93 struct list_head link;
98 typedef void (*user_event_func_t) (struct user_event *user, struct iov_iter *i,
99 void *tpdata, bool *faulted);
101 static int user_event_parse(char *name, char *args, char *flags,
102 struct user_event **newuser);
104 static u32 user_event_key(char *name)
106 return jhash(name, strlen(name), 0);
109 static __always_inline __must_check
110 size_t copy_nofault(void *addr, size_t bytes, struct iov_iter *i)
116 ret = copy_from_iter_nocache(addr, bytes, i);
123 static struct list_head *user_event_get_fields(struct trace_event_call *call)
125 struct user_event *user = (struct user_event *)call->data;
127 return &user->fields;
131 * Parses a register command for user_events
132 * Format: event_name[:FLAG1[,FLAG2...]] [field1[;field2...]]
134 * Example event named 'test' with a 20 char 'msg' field with an unsigned int
136 * test char[20] msg;unsigned int id
138 * NOTE: Offsets are from the user data perspective, they are not from the
139 * trace_entry/buffer perspective. We automatically add the common properties
140 * sizes to the offset for the user.
142 * Upon success user_event has its ref count increased by 1.
144 static int user_event_parse_cmd(char *raw_command, struct user_event **newuser)
146 char *name = raw_command;
147 char *args = strpbrk(name, " ");
153 flags = strpbrk(name, ":");
158 return user_event_parse(name, args, flags, newuser);
161 static int user_field_array_size(const char *type)
163 const char *start = strchr(type, '[');
171 if (strscpy(val, start + 1, sizeof(val)) <= 0)
174 bracket = strchr(val, ']');
181 if (kstrtouint(val, 0, &size))
184 if (size > MAX_FIELD_ARRAY_SIZE)
190 static int user_field_size(const char *type)
192 /* long is not allowed from a user, since it's ambigious in size */
193 if (strcmp(type, "s64") == 0)
195 if (strcmp(type, "u64") == 0)
197 if (strcmp(type, "s32") == 0)
199 if (strcmp(type, "u32") == 0)
201 if (strcmp(type, "int") == 0)
203 if (strcmp(type, "unsigned int") == 0)
204 return sizeof(unsigned int);
205 if (strcmp(type, "s16") == 0)
207 if (strcmp(type, "u16") == 0)
209 if (strcmp(type, "short") == 0)
210 return sizeof(short);
211 if (strcmp(type, "unsigned short") == 0)
212 return sizeof(unsigned short);
213 if (strcmp(type, "s8") == 0)
215 if (strcmp(type, "u8") == 0)
217 if (strcmp(type, "char") == 0)
219 if (strcmp(type, "unsigned char") == 0)
220 return sizeof(unsigned char);
221 if (str_has_prefix(type, "char["))
222 return user_field_array_size(type);
223 if (str_has_prefix(type, "unsigned char["))
224 return user_field_array_size(type);
225 if (str_has_prefix(type, "__data_loc "))
227 if (str_has_prefix(type, "__rel_loc "))
230 /* Uknown basic type, error */
234 static void user_event_destroy_validators(struct user_event *user)
236 struct user_event_validator *validator, *next;
237 struct list_head *head = &user->validators;
239 list_for_each_entry_safe(validator, next, head, link) {
240 list_del(&validator->link);
245 static void user_event_destroy_fields(struct user_event *user)
247 struct ftrace_event_field *field, *next;
248 struct list_head *head = &user->fields;
250 list_for_each_entry_safe(field, next, head, link) {
251 list_del(&field->link);
256 static int user_event_add_field(struct user_event *user, const char *type,
257 const char *name, int offset, int size,
258 int is_signed, int filter_type)
260 struct user_event_validator *validator;
261 struct ftrace_event_field *field;
262 int validator_flags = 0;
264 field = kmalloc(sizeof(*field), GFP_KERNEL);
269 if (str_has_prefix(type, "__data_loc "))
272 if (str_has_prefix(type, "__rel_loc ")) {
273 validator_flags |= VALIDATOR_REL;
280 if (strstr(type, "char") != 0)
281 validator_flags |= VALIDATOR_ENSURE_NULL;
283 validator = kmalloc(sizeof(*validator), GFP_KERNEL);
290 validator->flags = validator_flags;
291 validator->offset = offset;
293 /* Want sequential access when validating */
294 list_add_tail(&validator->link, &user->validators);
299 field->offset = offset;
301 field->is_signed = is_signed;
302 field->filter_type = filter_type;
304 list_add(&field->link, &user->fields);
307 * Min size from user writes that are required, this does not include
308 * the size of trace_entry (common fields).
310 user->min_size = (offset + size) - sizeof(struct trace_entry);
316 * Parses the values of a field within the description
317 * Format: type name [size]
319 static int user_event_parse_field(char *field, struct user_event *user,
322 char *part, *type, *name;
323 u32 depth = 0, saved_offset = *offset;
324 int len, size = -EINVAL;
325 bool is_struct = false;
327 field = skip_spaces(field);
332 /* Handle types that have a space within */
333 len = str_has_prefix(field, "unsigned ");
337 len = str_has_prefix(field, "struct ");
343 len = str_has_prefix(field, "__data_loc unsigned ");
347 len = str_has_prefix(field, "__data_loc ");
351 len = str_has_prefix(field, "__rel_loc unsigned ");
355 len = str_has_prefix(field, "__rel_loc ");
362 field = strpbrk(field + len, " ");
372 while ((part = strsep(&field, " ")) != NULL) {
374 case FIELD_DEPTH_TYPE:
377 case FIELD_DEPTH_NAME:
380 case FIELD_DEPTH_SIZE:
384 if (kstrtou32(part, 10, &size))
392 if (depth < FIELD_DEPTH_SIZE || !name)
395 if (depth == FIELD_DEPTH_SIZE)
396 size = user_field_size(type);
404 *offset = saved_offset + size;
406 return user_event_add_field(user, type, name, saved_offset, size,
407 type[0] != 'u', FILTER_OTHER);
410 static int user_event_parse_fields(struct user_event *user, char *args)
413 u32 offset = sizeof(struct trace_entry);
419 while ((field = strsep(&args, ";")) != NULL) {
420 ret = user_event_parse_field(field, user, &offset);
429 static struct trace_event_fields user_event_fields_array[1];
431 static const char *user_field_format(const char *type)
433 if (strcmp(type, "s64") == 0)
435 if (strcmp(type, "u64") == 0)
437 if (strcmp(type, "s32") == 0)
439 if (strcmp(type, "u32") == 0)
441 if (strcmp(type, "int") == 0)
443 if (strcmp(type, "unsigned int") == 0)
445 if (strcmp(type, "s16") == 0)
447 if (strcmp(type, "u16") == 0)
449 if (strcmp(type, "short") == 0)
451 if (strcmp(type, "unsigned short") == 0)
453 if (strcmp(type, "s8") == 0)
455 if (strcmp(type, "u8") == 0)
457 if (strcmp(type, "char") == 0)
459 if (strcmp(type, "unsigned char") == 0)
461 if (strstr(type, "char[") != 0)
464 /* Unknown, likely struct, allowed treat as 64-bit */
468 static bool user_field_is_dyn_string(const char *type, const char **str_func)
470 if (str_has_prefix(type, "__data_loc ")) {
471 *str_func = "__get_str";
475 if (str_has_prefix(type, "__rel_loc ")) {
476 *str_func = "__get_rel_str";
482 return strstr(type, "char") != 0;
485 #define LEN_OR_ZERO (len ? len - pos : 0)
486 static int user_event_set_print_fmt(struct user_event *user, char *buf, int len)
488 struct ftrace_event_field *field, *next;
489 struct list_head *head = &user->fields;
490 int pos = 0, depth = 0;
491 const char *str_func;
493 pos += snprintf(buf + pos, LEN_OR_ZERO, "\"");
495 list_for_each_entry_safe_reverse(field, next, head, link) {
497 pos += snprintf(buf + pos, LEN_OR_ZERO, " ");
499 pos += snprintf(buf + pos, LEN_OR_ZERO, "%s=%s",
500 field->name, user_field_format(field->type));
505 pos += snprintf(buf + pos, LEN_OR_ZERO, "\"");
507 list_for_each_entry_safe_reverse(field, next, head, link) {
508 if (user_field_is_dyn_string(field->type, &str_func))
509 pos += snprintf(buf + pos, LEN_OR_ZERO,
510 ", %s(%s)", str_func, field->name);
512 pos += snprintf(buf + pos, LEN_OR_ZERO,
513 ", REC->%s", field->name);
520 static int user_event_create_print_fmt(struct user_event *user)
525 len = user_event_set_print_fmt(user, NULL, 0);
527 print_fmt = kmalloc(len, GFP_KERNEL);
532 user_event_set_print_fmt(user, print_fmt, len);
534 user->call.print_fmt = print_fmt;
539 static enum print_line_t user_event_print_trace(struct trace_iterator *iter,
541 struct trace_event *event)
543 /* Unsafe to try to decode user provided print_fmt, use hex */
544 trace_print_hex_dump_seq(&iter->seq, "", DUMP_PREFIX_OFFSET, 16,
545 1, iter->ent, iter->ent_size, true);
547 return trace_handle_return(&iter->seq);
550 static struct trace_event_functions user_event_funcs = {
551 .trace = user_event_print_trace,
554 static int user_event_set_call_visible(struct user_event *user, bool visible)
557 const struct cred *old_cred;
560 cred = prepare_creds();
566 * While by default tracefs is locked down, systems can be configured
567 * to allow user_event files to be less locked down. The extreme case
568 * being "other" has read/write access to user_events_data/status.
570 * When not locked down, processes may not have permissions to
571 * add/remove calls themselves to tracefs. We need to temporarily
572 * switch to root file permission to allow for this scenario.
574 cred->fsuid = GLOBAL_ROOT_UID;
576 old_cred = override_creds(cred);
579 ret = trace_add_event_call(&user->call);
581 ret = trace_remove_event_call(&user->call);
583 revert_creds(old_cred);
589 static int destroy_user_event(struct user_event *user)
593 /* Must destroy fields before call removal */
594 user_event_destroy_fields(user);
596 ret = user_event_set_call_visible(user, false);
601 dyn_event_remove(&user->devent);
603 register_page_data[user->index] = 0;
604 clear_bit(user->index, page_bitmap);
605 hash_del(&user->node);
607 user_event_destroy_validators(user);
608 kfree(user->call.print_fmt);
609 kfree(EVENT_NAME(user));
615 static struct user_event *find_user_event(char *name, u32 *outkey)
617 struct user_event *user;
618 u32 key = user_event_key(name);
622 hash_for_each_possible(register_table, user, node, key)
623 if (!strcmp(EVENT_NAME(user), name)) {
624 atomic_inc(&user->refcnt);
631 static int user_event_validate(struct user_event *user, void *data, int len)
633 struct list_head *head = &user->validators;
634 struct user_event_validator *validator;
635 void *pos, *end = data + len;
636 u32 loc, offset, size;
638 list_for_each_entry(validator, head, link) {
639 pos = data + validator->offset;
641 /* Already done min_size check, no bounds check here */
643 offset = loc & 0xffff;
646 if (likely(validator->flags & VALIDATOR_REL))
647 pos += offset + sizeof(loc);
653 if (unlikely(pos > end))
656 if (likely(validator->flags & VALIDATOR_ENSURE_NULL))
657 if (unlikely(*(char *)(pos - 1) != '\0'))
665 * Writes the user supplied payload out to a trace file.
667 static void user_event_ftrace(struct user_event *user, struct iov_iter *i,
668 void *tpdata, bool *faulted)
670 struct trace_event_file *file;
671 struct trace_entry *entry;
672 struct trace_event_buffer event_buffer;
673 size_t size = sizeof(*entry) + i->count;
675 file = (struct trace_event_file *)tpdata;
678 !(file->flags & EVENT_FILE_FL_ENABLED) ||
679 trace_trigger_soft_disabled(file))
682 /* Allocates and fills trace_entry, + 1 of this is data payload */
683 entry = trace_event_buffer_reserve(&event_buffer, file, size);
685 if (unlikely(!entry))
688 if (unlikely(!copy_nofault(entry + 1, i->count, i)))
691 if (!list_empty(&user->validators) &&
692 unlikely(user_event_validate(user, entry, size)))
695 trace_event_buffer_commit(&event_buffer);
700 __trace_event_discard_commit(event_buffer.buffer,
704 #ifdef CONFIG_PERF_EVENTS
706 * Writes the user supplied payload out to perf ring buffer.
708 static void user_event_perf(struct user_event *user, struct iov_iter *i,
709 void *tpdata, bool *faulted)
711 struct hlist_head *perf_head;
713 perf_head = this_cpu_ptr(user->call.perf_events);
715 if (perf_head && !hlist_empty(perf_head)) {
716 struct trace_entry *perf_entry;
717 struct pt_regs *regs;
718 size_t size = sizeof(*perf_entry) + i->count;
721 perf_entry = perf_trace_buf_alloc(ALIGN(size, 8),
724 if (unlikely(!perf_entry))
727 perf_fetch_caller_regs(regs);
729 if (unlikely(!copy_nofault(perf_entry + 1, i->count, i)))
732 if (!list_empty(&user->validators) &&
733 unlikely(user_event_validate(user, perf_entry, size)))
736 perf_trace_buf_submit(perf_entry, size, context,
737 user->call.event.type, 1, regs,
743 perf_swevent_put_recursion_context(context);
749 * Update the register page that is shared between user processes.
751 static void update_reg_page_for(struct user_event *user)
753 struct tracepoint *tp = &user->tracepoint;
756 if (atomic_read(&tp->key.enabled) > 0) {
757 struct tracepoint_func *probe_func_ptr;
758 user_event_func_t probe_func;
760 rcu_read_lock_sched();
762 probe_func_ptr = rcu_dereference_sched(tp->funcs);
764 if (probe_func_ptr) {
766 probe_func = probe_func_ptr->func;
768 if (probe_func == user_event_ftrace)
769 status |= EVENT_STATUS_FTRACE;
770 #ifdef CONFIG_PERF_EVENTS
771 else if (probe_func == user_event_perf)
772 status |= EVENT_STATUS_PERF;
775 status |= EVENT_STATUS_OTHER;
776 } while ((++probe_func_ptr)->func);
779 rcu_read_unlock_sched();
782 register_page_data[user->index] = status;
786 * Register callback for our events from tracing sub-systems.
788 static int user_event_reg(struct trace_event_call *call,
792 struct user_event *user = (struct user_event *)call->data;
799 case TRACE_REG_REGISTER:
800 ret = tracepoint_probe_register(call->tp,
807 case TRACE_REG_UNREGISTER:
808 tracepoint_probe_unregister(call->tp,
813 #ifdef CONFIG_PERF_EVENTS
814 case TRACE_REG_PERF_REGISTER:
815 ret = tracepoint_probe_register(call->tp,
816 call->class->perf_probe,
822 case TRACE_REG_PERF_UNREGISTER:
823 tracepoint_probe_unregister(call->tp,
824 call->class->perf_probe,
828 case TRACE_REG_PERF_OPEN:
829 case TRACE_REG_PERF_CLOSE:
830 case TRACE_REG_PERF_ADD:
831 case TRACE_REG_PERF_DEL:
838 atomic_inc(&user->refcnt);
839 update_reg_page_for(user);
842 update_reg_page_for(user);
843 atomic_dec(&user->refcnt);
847 static int user_event_create(const char *raw_command)
849 struct user_event *user;
853 if (!str_has_prefix(raw_command, USER_EVENTS_PREFIX))
856 raw_command += USER_EVENTS_PREFIX_LEN;
857 raw_command = skip_spaces(raw_command);
859 name = kstrdup(raw_command, GFP_KERNEL);
864 mutex_lock(®_mutex);
866 ret = user_event_parse_cmd(name, &user);
869 atomic_dec(&user->refcnt);
871 mutex_unlock(®_mutex);
879 static int user_event_show(struct seq_file *m, struct dyn_event *ev)
881 struct user_event *user = container_of(ev, struct user_event, devent);
882 struct ftrace_event_field *field, *next;
883 struct list_head *head;
886 seq_printf(m, "%s%s", USER_EVENTS_PREFIX, EVENT_NAME(user));
888 head = trace_get_fields(&user->call);
890 list_for_each_entry_safe_reverse(field, next, head, link) {
896 seq_printf(m, "%s %s", field->type, field->name);
898 if (str_has_prefix(field->type, "struct "))
899 seq_printf(m, " %d", field->size);
909 static bool user_event_is_busy(struct dyn_event *ev)
911 struct user_event *user = container_of(ev, struct user_event, devent);
913 return atomic_read(&user->refcnt) != 0;
916 static int user_event_free(struct dyn_event *ev)
918 struct user_event *user = container_of(ev, struct user_event, devent);
920 if (atomic_read(&user->refcnt) != 0)
923 return destroy_user_event(user);
926 static bool user_field_match(struct ftrace_event_field *field, int argc,
927 const char **argv, int *iout)
929 char *field_name, *arg_name;
930 int len, pos, i = *iout;
931 bool colon = false, match = false;
936 len = MAX_FIELD_ARG_NAME;
937 field_name = kmalloc(len, GFP_KERNEL);
938 arg_name = kmalloc(len, GFP_KERNEL);
940 if (!arg_name || !field_name)
945 for (; i < argc; ++i) {
947 pos += snprintf(arg_name + pos, len - pos, " ");
949 pos += snprintf(arg_name + pos, len - pos, argv[i]);
951 if (strchr(argv[i], ';')) {
960 pos += snprintf(field_name + pos, len - pos, field->type);
961 pos += snprintf(field_name + pos, len - pos, " ");
962 pos += snprintf(field_name + pos, len - pos, field->name);
965 pos += snprintf(field_name + pos, len - pos, ";");
969 match = strcmp(arg_name, field_name) == 0;
977 static bool user_fields_match(struct user_event *user, int argc,
980 struct ftrace_event_field *field, *next;
981 struct list_head *head = &user->fields;
984 list_for_each_entry_safe_reverse(field, next, head, link)
985 if (!user_field_match(field, argc, argv, &i))
994 static bool user_event_match(const char *system, const char *event,
995 int argc, const char **argv, struct dyn_event *ev)
997 struct user_event *user = container_of(ev, struct user_event, devent);
1000 match = strcmp(EVENT_NAME(user), event) == 0 &&
1001 (!system || strcmp(system, USER_EVENTS_SYSTEM) == 0);
1003 if (match && argc > 0)
1004 match = user_fields_match(user, argc, argv);
1009 static struct dyn_event_operations user_event_dops = {
1010 .create = user_event_create,
1011 .show = user_event_show,
1012 .is_busy = user_event_is_busy,
1013 .free = user_event_free,
1014 .match = user_event_match,
1017 static int user_event_trace_register(struct user_event *user)
1021 ret = register_trace_event(&user->call.event);
1026 ret = user_event_set_call_visible(user, true);
1029 unregister_trace_event(&user->call.event);
1035 * Parses the event name, arguments and flags then registers if successful.
1036 * The name buffer lifetime is owned by this method for success cases only.
1037 * Upon success the returned user_event has its ref count increased by 1.
1039 static int user_event_parse(char *name, char *args, char *flags,
1040 struct user_event **newuser)
1045 struct user_event *user;
1047 /* Prevent dyn_event from racing */
1048 mutex_lock(&event_mutex);
1049 user = find_user_event(name, &key);
1050 mutex_unlock(&event_mutex);
1055 * Name is allocated by caller, free it since it already exists.
1056 * Caller only worries about failure cases for freeing.
1062 index = find_first_zero_bit(page_bitmap, MAX_EVENTS);
1064 if (index == MAX_EVENTS)
1067 user = kzalloc(sizeof(*user), GFP_KERNEL);
1072 INIT_LIST_HEAD(&user->class.fields);
1073 INIT_LIST_HEAD(&user->fields);
1074 INIT_LIST_HEAD(&user->validators);
1076 user->tracepoint.name = name;
1078 ret = user_event_parse_fields(user, args);
1083 ret = user_event_create_print_fmt(user);
1088 user->call.data = user;
1089 user->call.class = &user->class;
1090 user->call.name = name;
1091 user->call.flags = TRACE_EVENT_FL_TRACEPOINT;
1092 user->call.tp = &user->tracepoint;
1093 user->call.event.funcs = &user_event_funcs;
1095 user->class.system = USER_EVENTS_SYSTEM;
1096 user->class.fields_array = user_event_fields_array;
1097 user->class.get_fields = user_event_get_fields;
1098 user->class.reg = user_event_reg;
1099 user->class.probe = user_event_ftrace;
1100 #ifdef CONFIG_PERF_EVENTS
1101 user->class.perf_probe = user_event_perf;
1104 mutex_lock(&event_mutex);
1106 ret = user_event_trace_register(user);
1111 user->index = index;
1113 /* Ensure we track ref */
1114 atomic_inc(&user->refcnt);
1116 dyn_event_init(&user->devent, &user_event_dops);
1117 dyn_event_add(&user->devent, &user->call);
1118 set_bit(user->index, page_bitmap);
1119 hash_add(register_table, &user->node, key);
1121 mutex_unlock(&event_mutex);
1126 mutex_unlock(&event_mutex);
1128 user_event_destroy_fields(user);
1129 user_event_destroy_validators(user);
1135 * Deletes a previously created event if it is no longer being used.
1137 static int delete_user_event(char *name)
1141 struct user_event *user = find_user_event(name, &key);
1146 /* Ensure we are the last ref */
1147 if (atomic_read(&user->refcnt) != 1) {
1152 ret = destroy_user_event(user);
1159 /* No longer have this ref */
1160 atomic_dec(&user->refcnt);
1166 * Validates the user payload and writes via iterator.
1168 static ssize_t user_events_write_core(struct file *file, struct iov_iter *i)
1170 struct user_event_refs *refs;
1171 struct user_event *user = NULL;
1172 struct tracepoint *tp;
1173 ssize_t ret = i->count;
1176 if (unlikely(copy_from_iter(&idx, sizeof(idx), i) != sizeof(idx)))
1179 rcu_read_lock_sched();
1181 refs = rcu_dereference_sched(file->private_data);
1184 * The refs->events array is protected by RCU, and new items may be
1185 * added. But the user retrieved from indexing into the events array
1186 * shall be immutable while the file is opened.
1188 if (likely(refs && idx < refs->count))
1189 user = refs->events[idx];
1191 rcu_read_unlock_sched();
1193 if (unlikely(user == NULL))
1196 if (unlikely(i->count < user->min_size))
1199 tp = &user->tracepoint;
1202 * It's possible key.enabled disables after this check, however
1203 * we don't mind if a few events are included in this condition.
1205 if (likely(atomic_read(&tp->key.enabled) > 0)) {
1206 struct tracepoint_func *probe_func_ptr;
1207 user_event_func_t probe_func;
1208 struct iov_iter copy;
1212 if (unlikely(fault_in_iov_iter_readable(i, i->count)))
1217 rcu_read_lock_sched();
1219 probe_func_ptr = rcu_dereference_sched(tp->funcs);
1221 if (probe_func_ptr) {
1224 probe_func = probe_func_ptr->func;
1225 tpdata = probe_func_ptr->data;
1226 probe_func(user, ©, tpdata, &faulted);
1227 } while ((++probe_func_ptr)->func);
1230 rcu_read_unlock_sched();
1232 if (unlikely(faulted))
1239 static ssize_t user_events_write(struct file *file, const char __user *ubuf,
1240 size_t count, loff_t *ppos)
1245 if (unlikely(*ppos != 0))
1248 if (unlikely(import_single_range(READ, (char *)ubuf, count, &iov, &i)))
1251 return user_events_write_core(file, &i);
1254 static ssize_t user_events_write_iter(struct kiocb *kp, struct iov_iter *i)
1256 return user_events_write_core(kp->ki_filp, i);
1259 static int user_events_ref_add(struct file *file, struct user_event *user)
1261 struct user_event_refs *refs, *new_refs;
1262 int i, size, count = 0;
1264 refs = rcu_dereference_protected(file->private_data,
1265 lockdep_is_held(®_mutex));
1268 count = refs->count;
1270 for (i = 0; i < count; ++i)
1271 if (refs->events[i] == user)
1275 size = struct_size(refs, events, count + 1);
1277 new_refs = kzalloc(size, GFP_KERNEL);
1282 new_refs->count = count + 1;
1284 for (i = 0; i < count; ++i)
1285 new_refs->events[i] = refs->events[i];
1287 new_refs->events[i] = user;
1289 atomic_inc(&user->refcnt);
1291 rcu_assign_pointer(file->private_data, new_refs);
1294 kfree_rcu(refs, rcu);
1299 static long user_reg_get(struct user_reg __user *ureg, struct user_reg *kreg)
1304 ret = get_user(size, &ureg->size);
1309 if (size > PAGE_SIZE)
1312 return copy_struct_from_user(kreg, sizeof(*kreg), ureg, size);
1316 * Registers a user_event on behalf of a user process.
1318 static long user_events_ioctl_reg(struct file *file, unsigned long uarg)
1320 struct user_reg __user *ureg = (struct user_reg __user *)uarg;
1321 struct user_reg reg;
1322 struct user_event *user;
1326 ret = user_reg_get(ureg, ®);
1331 name = strndup_user((const char __user *)(uintptr_t)reg.name_args,
1335 ret = PTR_ERR(name);
1339 ret = user_event_parse_cmd(name, &user);
1346 ret = user_events_ref_add(file, user);
1348 /* No longer need parse ref, ref_add either worked or not */
1349 atomic_dec(&user->refcnt);
1351 /* Positive number is index and valid */
1355 put_user((u32)ret, &ureg->write_index);
1356 put_user(user->index, &ureg->status_index);
1362 * Deletes a user_event on behalf of a user process.
1364 static long user_events_ioctl_del(struct file *file, unsigned long uarg)
1366 void __user *ubuf = (void __user *)uarg;
1370 name = strndup_user(ubuf, MAX_EVENT_DESC);
1373 return PTR_ERR(name);
1375 /* event_mutex prevents dyn_event from racing */
1376 mutex_lock(&event_mutex);
1377 ret = delete_user_event(name);
1378 mutex_unlock(&event_mutex);
1386 * Handles the ioctl from user mode to register or alter operations.
1388 static long user_events_ioctl(struct file *file, unsigned int cmd,
1395 mutex_lock(®_mutex);
1396 ret = user_events_ioctl_reg(file, uarg);
1397 mutex_unlock(®_mutex);
1401 mutex_lock(®_mutex);
1402 ret = user_events_ioctl_del(file, uarg);
1403 mutex_unlock(®_mutex);
1411 * Handles the final close of the file from user mode.
1413 static int user_events_release(struct inode *node, struct file *file)
1415 struct user_event_refs *refs;
1416 struct user_event *user;
1420 * Ensure refs cannot change under any situation by taking the
1421 * register mutex during the final freeing of the references.
1423 mutex_lock(®_mutex);
1425 refs = file->private_data;
1431 * The lifetime of refs has reached an end, it's tied to this file.
1432 * The underlying user_events are ref counted, and cannot be freed.
1433 * After this decrement, the user_events may be freed elsewhere.
1435 for (i = 0; i < refs->count; ++i) {
1436 user = refs->events[i];
1439 atomic_dec(&user->refcnt);
1442 file->private_data = NULL;
1444 mutex_unlock(®_mutex);
1451 static const struct file_operations user_data_fops = {
1452 .write = user_events_write,
1453 .write_iter = user_events_write_iter,
1454 .unlocked_ioctl = user_events_ioctl,
1455 .release = user_events_release,
1459 * Maps the shared page into the user process for checking if event is enabled.
1461 static int user_status_mmap(struct file *file, struct vm_area_struct *vma)
1463 unsigned long size = vma->vm_end - vma->vm_start;
1465 if (size != MAX_EVENTS)
1468 return remap_pfn_range(vma, vma->vm_start,
1469 virt_to_phys(register_page_data) >> PAGE_SHIFT,
1470 size, vm_get_page_prot(VM_READ));
1473 static void *user_seq_start(struct seq_file *m, loff_t *pos)
1481 static void *user_seq_next(struct seq_file *m, void *p, loff_t *pos)
1487 static void user_seq_stop(struct seq_file *m, void *p)
1491 static int user_seq_show(struct seq_file *m, void *p)
1493 struct user_event *user;
1495 int i, active = 0, busy = 0, flags;
1497 mutex_lock(®_mutex);
1499 hash_for_each(register_table, i, user, node) {
1500 status = register_page_data[user->index];
1501 flags = user->flags;
1503 seq_printf(m, "%d:%s", user->index, EVENT_NAME(user));
1505 if (flags != 0 || status != 0)
1509 seq_puts(m, " Used by");
1510 if (status & EVENT_STATUS_FTRACE)
1511 seq_puts(m, " ftrace");
1512 if (status & EVENT_STATUS_PERF)
1513 seq_puts(m, " perf");
1514 if (status & EVENT_STATUS_OTHER)
1515 seq_puts(m, " other");
1523 mutex_unlock(®_mutex);
1526 seq_printf(m, "Active: %d\n", active);
1527 seq_printf(m, "Busy: %d\n", busy);
1528 seq_printf(m, "Max: %ld\n", MAX_EVENTS);
1533 static const struct seq_operations user_seq_ops = {
1534 .start = user_seq_start,
1535 .next = user_seq_next,
1536 .stop = user_seq_stop,
1537 .show = user_seq_show,
1540 static int user_status_open(struct inode *node, struct file *file)
1542 return seq_open(file, &user_seq_ops);
1545 static const struct file_operations user_status_fops = {
1546 .open = user_status_open,
1547 .mmap = user_status_mmap,
1549 .llseek = seq_lseek,
1550 .release = seq_release,
1554 * Creates a set of tracefs files to allow user mode interactions.
1556 static int create_user_tracefs(void)
1558 struct dentry *edata, *emmap;
1560 edata = tracefs_create_file("user_events_data", TRACE_MODE_WRITE,
1561 NULL, NULL, &user_data_fops);
1564 pr_warn("Could not create tracefs 'user_events_data' entry\n");
1568 /* mmap with MAP_SHARED requires writable fd */
1569 emmap = tracefs_create_file("user_events_status", TRACE_MODE_WRITE,
1570 NULL, NULL, &user_status_fops);
1573 tracefs_remove(edata);
1574 pr_warn("Could not create tracefs 'user_events_mmap' entry\n");
1583 static void set_page_reservations(bool set)
1587 for (page = 0; page < MAX_PAGES; ++page) {
1588 void *addr = register_page_data + (PAGE_SIZE * page);
1591 SetPageReserved(virt_to_page(addr));
1593 ClearPageReserved(virt_to_page(addr));
1597 static int __init trace_events_user_init(void)
1602 /* Zero all bits beside 0 (which is reserved for failures) */
1603 bitmap_zero(page_bitmap, MAX_EVENTS);
1604 set_bit(0, page_bitmap);
1606 pages = alloc_pages(GFP_KERNEL | __GFP_ZERO, MAX_PAGE_ORDER);
1609 register_page_data = page_address(pages);
1611 set_page_reservations(true);
1613 ret = create_user_tracefs();
1616 pr_warn("user_events could not register with tracefs\n");
1617 set_page_reservations(false);
1618 __free_pages(pages, MAX_PAGE_ORDER);
1622 if (dyn_event_register(&user_event_dops))
1623 pr_warn("user_events could not register with dyn_events\n");
1628 fs_initcall(trace_events_user_init);