1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (c) 2021, Microsoft Corporation.
6 * Beau Belgrave <beaub@linux.microsoft.com>
9 #include <linux/bitmap.h>
10 #include <linux/cdev.h>
11 #include <linux/hashtable.h>
12 #include <linux/list.h>
14 #include <linux/uio.h>
15 #include <linux/ioctl.h>
16 #include <linux/jhash.h>
17 #include <linux/refcount.h>
18 #include <linux/trace_events.h>
19 #include <linux/tracefs.h>
20 #include <linux/types.h>
21 #include <linux/uaccess.h>
22 #include <linux/highmem.h>
23 #include <linux/init.h>
24 #include <linux/user_events.h>
25 #include "trace_dynevent.h"
26 #include "trace_output.h"
29 #define USER_EVENTS_PREFIX_LEN (sizeof(USER_EVENTS_PREFIX)-1)
31 #define FIELD_DEPTH_TYPE 0
32 #define FIELD_DEPTH_NAME 1
33 #define FIELD_DEPTH_SIZE 2
35 /* Limit how long of an event name plus args within the subsystem. */
36 #define MAX_EVENT_DESC 512
37 #define EVENT_NAME(user_event) ((user_event)->tracepoint.name)
38 #define MAX_FIELD_ARRAY_SIZE 1024
41 * Internal bits (kernel side only) to keep track of connected probes:
42 * These are used when status is requested in text form about an event. These
43 * bits are compared against an internal byte on the event to determine which
44 * probes to print out to the user.
46 * These do not reflect the mapped bytes between the user and kernel space.
48 #define EVENT_STATUS_FTRACE BIT(0)
49 #define EVENT_STATUS_PERF BIT(1)
50 #define EVENT_STATUS_OTHER BIT(7)
53 * Stores the system name, tables, and locks for a group of events. This
54 * allows isolation for events by various means.
56 struct user_event_group {
58 struct hlist_node node;
59 struct mutex reg_mutex;
60 DECLARE_HASHTABLE(register_table, 8);
63 /* Group for init_user_ns mapping, top-most group */
64 static struct user_event_group *init_group;
66 /* Max allowed events for the whole system */
67 static unsigned int max_user_events = 32768;
69 /* Current number of events on the whole system */
70 static unsigned int current_user_events;
73 * Stores per-event properties, as users register events
74 * within a file a user_event might be created if it does not
75 * already exist. These are globally used and their lifetime
76 * is tied to the refcnt member. These cannot go away until the
80 struct user_event_group *group;
81 struct tracepoint tracepoint;
82 struct trace_event_call call;
83 struct trace_event_class class;
84 struct dyn_event devent;
85 struct hlist_node node;
86 struct list_head fields;
87 struct list_head validators;
94 * Stores per-mm/event properties that enable an address to be
95 * updated properly for each task. As tasks are forked, we use
96 * these to track enablement sites that are tied to an event.
98 struct user_event_enabler {
99 struct list_head link;
100 struct user_event *event;
103 /* Track enable bit, flags, etc. Aligned for bitops. */
104 unsigned long values;
107 /* Bits 0-5 are for the bit to update upon enable/disable (0-63 allowed) */
108 #define ENABLE_VAL_BIT_MASK 0x3F
110 /* Bit 6 is for faulting status of enablement */
111 #define ENABLE_VAL_FAULTING_BIT 6
113 /* Bit 7 is for freeing status of enablement */
114 #define ENABLE_VAL_FREEING_BIT 7
116 /* Only duplicate the bit value */
117 #define ENABLE_VAL_DUP_MASK ENABLE_VAL_BIT_MASK
119 #define ENABLE_BITOPS(e) (&(e)->values)
121 #define ENABLE_BIT(e) ((int)((e)->values & ENABLE_VAL_BIT_MASK))
123 /* Used for asynchronous faulting in of pages */
124 struct user_event_enabler_fault {
125 struct work_struct work;
126 struct user_event_mm *mm;
127 struct user_event_enabler *enabler;
131 static struct kmem_cache *fault_cache;
133 /* Global list of memory descriptors using user_events */
134 static LIST_HEAD(user_event_mms);
135 static DEFINE_SPINLOCK(user_event_mms_lock);
138 * Stores per-file events references, as users register events
139 * within a file this structure is modified and freed via RCU.
140 * The lifetime of this struct is tied to the lifetime of the file.
141 * These are not shared and only accessible by the file that created it.
143 struct user_event_refs {
146 struct user_event *events[];
149 struct user_event_file_info {
150 struct user_event_group *group;
151 struct user_event_refs *refs;
154 #define VALIDATOR_ENSURE_NULL (1 << 0)
155 #define VALIDATOR_REL (1 << 1)
157 struct user_event_validator {
158 struct list_head link;
163 typedef void (*user_event_func_t) (struct user_event *user, struct iov_iter *i,
164 void *tpdata, bool *faulted);
166 static int user_event_parse(struct user_event_group *group, char *name,
167 char *args, char *flags,
168 struct user_event **newuser);
170 static struct user_event_mm *user_event_mm_get(struct user_event_mm *mm);
171 static struct user_event_mm *user_event_mm_get_all(struct user_event *user);
172 static void user_event_mm_put(struct user_event_mm *mm);
174 static u32 user_event_key(char *name)
176 return jhash(name, strlen(name), 0);
179 static void user_event_group_destroy(struct user_event_group *group)
181 kfree(group->system_name);
185 static char *user_event_group_system_name(struct user_namespace *user_ns)
188 int len = sizeof(USER_EVENTS_SYSTEM) + 1;
190 if (user_ns != &init_user_ns) {
192 * Unexpected at this point:
193 * We only currently support init_user_ns.
194 * When we enable more, this will trigger a failure so log.
196 pr_warn("user_events: Namespace other than init_user_ns!\n");
200 system_name = kmalloc(len, GFP_KERNEL);
205 snprintf(system_name, len, "%s", USER_EVENTS_SYSTEM);
210 static inline struct user_event_group
211 *user_event_group_from_user_ns(struct user_namespace *user_ns)
213 if (user_ns == &init_user_ns)
219 static struct user_event_group *current_user_event_group(void)
221 struct user_namespace *user_ns = current_user_ns();
222 struct user_event_group *group = NULL;
225 group = user_event_group_from_user_ns(user_ns);
230 user_ns = user_ns->parent;
236 static struct user_event_group
237 *user_event_group_create(struct user_namespace *user_ns)
239 struct user_event_group *group;
241 group = kzalloc(sizeof(*group), GFP_KERNEL);
246 group->system_name = user_event_group_system_name(user_ns);
248 if (!group->system_name)
251 mutex_init(&group->reg_mutex);
252 hash_init(group->register_table);
257 user_event_group_destroy(group);
262 static void user_event_enabler_destroy(struct user_event_enabler *enabler)
264 list_del_rcu(&enabler->link);
266 /* No longer tracking the event via the enabler */
267 refcount_dec(&enabler->event->refcnt);
272 static int user_event_mm_fault_in(struct user_event_mm *mm, unsigned long uaddr,
279 * Normally this is low, ensure that it cannot be taken advantage of by
280 * bad user processes to cause excessive looping.
285 mmap_read_lock(mm->mm);
287 /* Ensure MM has tasks, cannot use after exit_mm() */
288 if (refcount_read(&mm->tasks) == 0) {
293 ret = fixup_user_fault(mm->mm, uaddr, FAULT_FLAG_WRITE | FAULT_FLAG_REMOTE,
296 mmap_read_unlock(mm->mm);
301 static int user_event_enabler_write(struct user_event_mm *mm,
302 struct user_event_enabler *enabler,
303 bool fixup_fault, int *attempt);
305 static void user_event_enabler_fault_fixup(struct work_struct *work)
307 struct user_event_enabler_fault *fault = container_of(
308 work, struct user_event_enabler_fault, work);
309 struct user_event_enabler *enabler = fault->enabler;
310 struct user_event_mm *mm = fault->mm;
311 unsigned long uaddr = enabler->addr;
312 int attempt = fault->attempt;
315 ret = user_event_mm_fault_in(mm, uaddr, attempt);
317 if (ret && ret != -ENOENT) {
318 struct user_event *user = enabler->event;
320 pr_warn("user_events: Fault for mm: 0x%pK @ 0x%llx event: %s\n",
321 mm->mm, (unsigned long long)uaddr, EVENT_NAME(user));
324 /* Prevent state changes from racing */
325 mutex_lock(&event_mutex);
327 /* User asked for enabler to be removed during fault */
328 if (test_bit(ENABLE_VAL_FREEING_BIT, ENABLE_BITOPS(enabler))) {
329 user_event_enabler_destroy(enabler);
334 * If we managed to get the page, re-issue the write. We do not
335 * want to get into a possible infinite loop, which is why we only
336 * attempt again directly if the page came in. If we couldn't get
337 * the page here, then we will try again the next time the event is
340 clear_bit(ENABLE_VAL_FAULTING_BIT, ENABLE_BITOPS(enabler));
343 mmap_read_lock(mm->mm);
344 user_event_enabler_write(mm, enabler, true, &attempt);
345 mmap_read_unlock(mm->mm);
348 mutex_unlock(&event_mutex);
350 /* In all cases we no longer need the mm or fault */
351 user_event_mm_put(mm);
352 kmem_cache_free(fault_cache, fault);
355 static bool user_event_enabler_queue_fault(struct user_event_mm *mm,
356 struct user_event_enabler *enabler,
359 struct user_event_enabler_fault *fault;
361 fault = kmem_cache_zalloc(fault_cache, GFP_NOWAIT | __GFP_NOWARN);
366 INIT_WORK(&fault->work, user_event_enabler_fault_fixup);
367 fault->mm = user_event_mm_get(mm);
368 fault->enabler = enabler;
369 fault->attempt = attempt;
371 /* Don't try to queue in again while we have a pending fault */
372 set_bit(ENABLE_VAL_FAULTING_BIT, ENABLE_BITOPS(enabler));
374 if (!schedule_work(&fault->work)) {
375 /* Allow another attempt later */
376 clear_bit(ENABLE_VAL_FAULTING_BIT, ENABLE_BITOPS(enabler));
378 user_event_mm_put(mm);
379 kmem_cache_free(fault_cache, fault);
387 static int user_event_enabler_write(struct user_event_mm *mm,
388 struct user_event_enabler *enabler,
389 bool fixup_fault, int *attempt)
391 unsigned long uaddr = enabler->addr;
397 lockdep_assert_held(&event_mutex);
398 mmap_assert_locked(mm->mm);
402 /* Ensure MM has tasks, cannot use after exit_mm() */
403 if (refcount_read(&mm->tasks) == 0)
406 if (unlikely(test_bit(ENABLE_VAL_FAULTING_BIT, ENABLE_BITOPS(enabler)) ||
407 test_bit(ENABLE_VAL_FREEING_BIT, ENABLE_BITOPS(enabler))))
410 ret = pin_user_pages_remote(mm->mm, uaddr, 1, FOLL_WRITE | FOLL_NOFAULT,
413 if (unlikely(ret <= 0)) {
417 if (!user_event_enabler_queue_fault(mm, enabler, *attempt))
418 pr_warn("user_events: Unable to queue fault handler\n");
423 kaddr = kmap_local_page(page);
424 ptr = kaddr + (uaddr & ~PAGE_MASK);
426 /* Update bit atomically, user tracers must be atomic as well */
427 if (enabler->event && enabler->event->status)
428 set_bit(ENABLE_BIT(enabler), ptr);
430 clear_bit(ENABLE_BIT(enabler), ptr);
433 unpin_user_pages_dirty_lock(&page, 1, true);
438 static bool user_event_enabler_exists(struct user_event_mm *mm,
439 unsigned long uaddr, unsigned char bit)
441 struct user_event_enabler *enabler;
442 struct user_event_enabler *next;
444 list_for_each_entry_safe(enabler, next, &mm->enablers, link) {
445 if (enabler->addr == uaddr && ENABLE_BIT(enabler) == bit)
452 static void user_event_enabler_update(struct user_event *user)
454 struct user_event_enabler *enabler;
455 struct user_event_mm *mm = user_event_mm_get_all(user);
456 struct user_event_mm *next;
461 mmap_read_lock(mm->mm);
464 list_for_each_entry_rcu(enabler, &mm->enablers, link) {
465 if (enabler->event == user) {
467 user_event_enabler_write(mm, enabler, true, &attempt);
472 mmap_read_unlock(mm->mm);
473 user_event_mm_put(mm);
478 static bool user_event_enabler_dup(struct user_event_enabler *orig,
479 struct user_event_mm *mm)
481 struct user_event_enabler *enabler;
483 /* Skip pending frees */
484 if (unlikely(test_bit(ENABLE_VAL_FREEING_BIT, ENABLE_BITOPS(orig))))
487 enabler = kzalloc(sizeof(*enabler), GFP_NOWAIT | __GFP_ACCOUNT);
492 enabler->event = orig->event;
493 enabler->addr = orig->addr;
495 /* Only dup part of value (ignore future flags, etc) */
496 enabler->values = orig->values & ENABLE_VAL_DUP_MASK;
498 refcount_inc(&enabler->event->refcnt);
499 list_add_rcu(&enabler->link, &mm->enablers);
504 static struct user_event_mm *user_event_mm_get(struct user_event_mm *mm)
506 refcount_inc(&mm->refcnt);
511 static struct user_event_mm *user_event_mm_get_all(struct user_event *user)
513 struct user_event_mm *found = NULL;
514 struct user_event_enabler *enabler;
515 struct user_event_mm *mm;
518 * We do not want to block fork/exec while enablements are being
519 * updated, so we use RCU to walk the current tasks that have used
520 * user_events ABI for 1 or more events. Each enabler found in each
521 * task that matches the event being updated has a write to reflect
522 * the kernel state back into the process. Waits/faults must not occur
523 * during this. So we scan the list under RCU for all the mm that have
524 * the event within it. This is needed because mm_read_lock() can wait.
525 * Each user mm returned has a ref inc to handle remove RCU races.
529 list_for_each_entry_rcu(mm, &user_event_mms, link)
530 list_for_each_entry_rcu(enabler, &mm->enablers, link)
531 if (enabler->event == user) {
533 found = user_event_mm_get(mm);
542 static struct user_event_mm *user_event_mm_create(struct task_struct *t)
544 struct user_event_mm *user_mm;
547 user_mm = kzalloc(sizeof(*user_mm), GFP_KERNEL_ACCOUNT);
553 INIT_LIST_HEAD(&user_mm->enablers);
554 refcount_set(&user_mm->refcnt, 1);
555 refcount_set(&user_mm->tasks, 1);
557 spin_lock_irqsave(&user_event_mms_lock, flags);
558 list_add_rcu(&user_mm->link, &user_event_mms);
559 spin_unlock_irqrestore(&user_event_mms_lock, flags);
561 t->user_event_mm = user_mm;
564 * The lifetime of the memory descriptor can slightly outlast
565 * the task lifetime if a ref to the user_event_mm is taken
566 * between list_del_rcu() and call_rcu(). Therefore we need
567 * to take a reference to it to ensure it can live this long
568 * under this corner case. This can also occur in clones that
569 * outlast the parent.
576 static struct user_event_mm *current_user_event_mm(void)
578 struct user_event_mm *user_mm = current->user_event_mm;
583 user_mm = user_event_mm_create(current);
588 refcount_inc(&user_mm->refcnt);
593 static void user_event_mm_destroy(struct user_event_mm *mm)
595 struct user_event_enabler *enabler, *next;
597 list_for_each_entry_safe(enabler, next, &mm->enablers, link)
598 user_event_enabler_destroy(enabler);
604 static void user_event_mm_put(struct user_event_mm *mm)
606 if (mm && refcount_dec_and_test(&mm->refcnt))
607 user_event_mm_destroy(mm);
610 static void delayed_user_event_mm_put(struct work_struct *work)
612 struct user_event_mm *mm;
614 mm = container_of(to_rcu_work(work), struct user_event_mm, put_rwork);
615 user_event_mm_put(mm);
618 void user_event_mm_remove(struct task_struct *t)
620 struct user_event_mm *mm;
625 mm = t->user_event_mm;
626 t->user_event_mm = NULL;
628 /* Clone will increment the tasks, only remove if last clone */
629 if (!refcount_dec_and_test(&mm->tasks))
632 /* Remove the mm from the list, so it can no longer be enabled */
633 spin_lock_irqsave(&user_event_mms_lock, flags);
634 list_del_rcu(&mm->link);
635 spin_unlock_irqrestore(&user_event_mms_lock, flags);
638 * We need to wait for currently occurring writes to stop within
639 * the mm. This is required since exit_mm() snaps the current rss
640 * stats and clears them. On the final mmdrop(), check_mm() will
641 * report a bug if these increment.
643 * All writes/pins are done under mmap_read lock, take the write
644 * lock to ensure in-progress faults have completed. Faults that
645 * are pending but yet to run will check the task count and skip
646 * the fault since the mm is going away.
648 mmap_write_lock(mm->mm);
649 mmap_write_unlock(mm->mm);
652 * Put for mm must be done after RCU delay to handle new refs in
653 * between the list_del_rcu() and now. This ensures any get refs
654 * during rcu_read_lock() are accounted for during list removal.
657 * ---------------------------------------------------------------
658 * user_event_mm_remove() | rcu_read_lock();
659 * list_del_rcu() | list_for_each_entry_rcu();
660 * call_rcu() | refcount_inc();
661 * . | rcu_read_unlock();
662 * schedule_work() | .
663 * user_event_mm_put() | .
665 * mmdrop() cannot be called in the softirq context of call_rcu()
666 * so we use a work queue after call_rcu() to run within.
668 INIT_RCU_WORK(&mm->put_rwork, delayed_user_event_mm_put);
669 queue_rcu_work(system_wq, &mm->put_rwork);
672 void user_event_mm_dup(struct task_struct *t, struct user_event_mm *old_mm)
674 struct user_event_mm *mm = user_event_mm_create(t);
675 struct user_event_enabler *enabler;
682 list_for_each_entry_rcu(enabler, &old_mm->enablers, link)
683 if (!user_event_enabler_dup(enabler, mm))
691 user_event_mm_remove(t);
694 static bool current_user_event_enabler_exists(unsigned long uaddr,
697 struct user_event_mm *user_mm = current_user_event_mm();
703 exists = user_event_enabler_exists(user_mm, uaddr, bit);
705 user_event_mm_put(user_mm);
710 static struct user_event_enabler
711 *user_event_enabler_create(struct user_reg *reg, struct user_event *user,
714 struct user_event_enabler *enabler;
715 struct user_event_mm *user_mm;
716 unsigned long uaddr = (unsigned long)reg->enable_addr;
719 user_mm = current_user_event_mm();
724 enabler = kzalloc(sizeof(*enabler), GFP_KERNEL_ACCOUNT);
729 enabler->event = user;
730 enabler->addr = uaddr;
731 enabler->values = reg->enable_bit;
733 /* Prevents state changes from racing with new enablers */
734 mutex_lock(&event_mutex);
736 /* Attempt to reflect the current state within the process */
737 mmap_read_lock(user_mm->mm);
738 *write_result = user_event_enabler_write(user_mm, enabler, false,
740 mmap_read_unlock(user_mm->mm);
743 * If the write works, then we will track the enabler. A ref to the
744 * underlying user_event is held by the enabler to prevent it going
745 * away while the enabler is still in use by a process. The ref is
746 * removed when the enabler is destroyed. This means a event cannot
747 * be forcefully deleted from the system until all tasks using it
748 * exit or run exec(), which includes forks and clones.
750 if (!*write_result) {
751 refcount_inc(&enabler->event->refcnt);
752 list_add_rcu(&enabler->link, &user_mm->enablers);
755 mutex_unlock(&event_mutex);
758 /* Attempt to fault-in and retry if it worked */
759 if (!user_event_mm_fault_in(user_mm, uaddr, attempt))
766 user_event_mm_put(user_mm);
771 static __always_inline __must_check
772 bool user_event_last_ref(struct user_event *user)
774 return refcount_read(&user->refcnt) == 1;
777 static __always_inline __must_check
778 size_t copy_nofault(void *addr, size_t bytes, struct iov_iter *i)
784 ret = copy_from_iter_nocache(addr, bytes, i);
791 static struct list_head *user_event_get_fields(struct trace_event_call *call)
793 struct user_event *user = (struct user_event *)call->data;
795 return &user->fields;
799 * Parses a register command for user_events
800 * Format: event_name[:FLAG1[,FLAG2...]] [field1[;field2...]]
802 * Example event named 'test' with a 20 char 'msg' field with an unsigned int
804 * test char[20] msg;unsigned int id
806 * NOTE: Offsets are from the user data perspective, they are not from the
807 * trace_entry/buffer perspective. We automatically add the common properties
808 * sizes to the offset for the user.
810 * Upon success user_event has its ref count increased by 1.
812 static int user_event_parse_cmd(struct user_event_group *group,
813 char *raw_command, struct user_event **newuser)
815 char *name = raw_command;
816 char *args = strpbrk(name, " ");
822 flags = strpbrk(name, ":");
827 return user_event_parse(group, name, args, flags, newuser);
830 static int user_field_array_size(const char *type)
832 const char *start = strchr(type, '[');
840 if (strscpy(val, start + 1, sizeof(val)) <= 0)
843 bracket = strchr(val, ']');
850 if (kstrtouint(val, 0, &size))
853 if (size > MAX_FIELD_ARRAY_SIZE)
859 static int user_field_size(const char *type)
861 /* long is not allowed from a user, since it's ambigious in size */
862 if (strcmp(type, "s64") == 0)
864 if (strcmp(type, "u64") == 0)
866 if (strcmp(type, "s32") == 0)
868 if (strcmp(type, "u32") == 0)
870 if (strcmp(type, "int") == 0)
872 if (strcmp(type, "unsigned int") == 0)
873 return sizeof(unsigned int);
874 if (strcmp(type, "s16") == 0)
876 if (strcmp(type, "u16") == 0)
878 if (strcmp(type, "short") == 0)
879 return sizeof(short);
880 if (strcmp(type, "unsigned short") == 0)
881 return sizeof(unsigned short);
882 if (strcmp(type, "s8") == 0)
884 if (strcmp(type, "u8") == 0)
886 if (strcmp(type, "char") == 0)
888 if (strcmp(type, "unsigned char") == 0)
889 return sizeof(unsigned char);
890 if (str_has_prefix(type, "char["))
891 return user_field_array_size(type);
892 if (str_has_prefix(type, "unsigned char["))
893 return user_field_array_size(type);
894 if (str_has_prefix(type, "__data_loc "))
896 if (str_has_prefix(type, "__rel_loc "))
899 /* Uknown basic type, error */
903 static void user_event_destroy_validators(struct user_event *user)
905 struct user_event_validator *validator, *next;
906 struct list_head *head = &user->validators;
908 list_for_each_entry_safe(validator, next, head, link) {
909 list_del(&validator->link);
914 static void user_event_destroy_fields(struct user_event *user)
916 struct ftrace_event_field *field, *next;
917 struct list_head *head = &user->fields;
919 list_for_each_entry_safe(field, next, head, link) {
920 list_del(&field->link);
925 static int user_event_add_field(struct user_event *user, const char *type,
926 const char *name, int offset, int size,
927 int is_signed, int filter_type)
929 struct user_event_validator *validator;
930 struct ftrace_event_field *field;
931 int validator_flags = 0;
933 field = kmalloc(sizeof(*field), GFP_KERNEL_ACCOUNT);
938 if (str_has_prefix(type, "__data_loc "))
941 if (str_has_prefix(type, "__rel_loc ")) {
942 validator_flags |= VALIDATOR_REL;
949 if (strstr(type, "char") != NULL)
950 validator_flags |= VALIDATOR_ENSURE_NULL;
952 validator = kmalloc(sizeof(*validator), GFP_KERNEL_ACCOUNT);
959 validator->flags = validator_flags;
960 validator->offset = offset;
962 /* Want sequential access when validating */
963 list_add_tail(&validator->link, &user->validators);
968 field->offset = offset;
970 field->is_signed = is_signed;
971 field->filter_type = filter_type;
973 if (filter_type == FILTER_OTHER)
974 field->filter_type = filter_assign_type(type);
976 list_add(&field->link, &user->fields);
979 * Min size from user writes that are required, this does not include
980 * the size of trace_entry (common fields).
982 user->min_size = (offset + size) - sizeof(struct trace_entry);
988 * Parses the values of a field within the description
989 * Format: type name [size]
991 static int user_event_parse_field(char *field, struct user_event *user,
994 char *part, *type, *name;
995 u32 depth = 0, saved_offset = *offset;
996 int len, size = -EINVAL;
997 bool is_struct = false;
999 field = skip_spaces(field);
1004 /* Handle types that have a space within */
1005 len = str_has_prefix(field, "unsigned ");
1009 len = str_has_prefix(field, "struct ");
1015 len = str_has_prefix(field, "__data_loc unsigned ");
1019 len = str_has_prefix(field, "__data_loc ");
1023 len = str_has_prefix(field, "__rel_loc unsigned ");
1027 len = str_has_prefix(field, "__rel_loc ");
1034 field = strpbrk(field + len, " ");
1044 while ((part = strsep(&field, " ")) != NULL) {
1046 case FIELD_DEPTH_TYPE:
1049 case FIELD_DEPTH_NAME:
1052 case FIELD_DEPTH_SIZE:
1056 if (kstrtou32(part, 10, &size))
1064 if (depth < FIELD_DEPTH_SIZE || !name)
1067 if (depth == FIELD_DEPTH_SIZE)
1068 size = user_field_size(type);
1076 *offset = saved_offset + size;
1078 return user_event_add_field(user, type, name, saved_offset, size,
1079 type[0] != 'u', FILTER_OTHER);
1082 static int user_event_parse_fields(struct user_event *user, char *args)
1085 u32 offset = sizeof(struct trace_entry);
1091 while ((field = strsep(&args, ";")) != NULL) {
1092 ret = user_event_parse_field(field, user, &offset);
1101 static struct trace_event_fields user_event_fields_array[1];
1103 static const char *user_field_format(const char *type)
1105 if (strcmp(type, "s64") == 0)
1107 if (strcmp(type, "u64") == 0)
1109 if (strcmp(type, "s32") == 0)
1111 if (strcmp(type, "u32") == 0)
1113 if (strcmp(type, "int") == 0)
1115 if (strcmp(type, "unsigned int") == 0)
1117 if (strcmp(type, "s16") == 0)
1119 if (strcmp(type, "u16") == 0)
1121 if (strcmp(type, "short") == 0)
1123 if (strcmp(type, "unsigned short") == 0)
1125 if (strcmp(type, "s8") == 0)
1127 if (strcmp(type, "u8") == 0)
1129 if (strcmp(type, "char") == 0)
1131 if (strcmp(type, "unsigned char") == 0)
1133 if (strstr(type, "char[") != NULL)
1136 /* Unknown, likely struct, allowed treat as 64-bit */
1140 static bool user_field_is_dyn_string(const char *type, const char **str_func)
1142 if (str_has_prefix(type, "__data_loc ")) {
1143 *str_func = "__get_str";
1147 if (str_has_prefix(type, "__rel_loc ")) {
1148 *str_func = "__get_rel_str";
1154 return strstr(type, "char") != NULL;
1157 #define LEN_OR_ZERO (len ? len - pos : 0)
1158 static int user_dyn_field_set_string(int argc, const char **argv, int *iout,
1159 char *buf, int len, bool *colon)
1161 int pos = 0, i = *iout;
1165 for (; i < argc; ++i) {
1167 pos += snprintf(buf + pos, LEN_OR_ZERO, " ");
1169 pos += snprintf(buf + pos, LEN_OR_ZERO, "%s", argv[i]);
1171 if (strchr(argv[i], ';')) {
1178 /* Actual set, advance i */
1185 static int user_field_set_string(struct ftrace_event_field *field,
1186 char *buf, int len, bool colon)
1190 pos += snprintf(buf + pos, LEN_OR_ZERO, "%s", field->type);
1191 pos += snprintf(buf + pos, LEN_OR_ZERO, " ");
1192 pos += snprintf(buf + pos, LEN_OR_ZERO, "%s", field->name);
1195 pos += snprintf(buf + pos, LEN_OR_ZERO, ";");
1200 static int user_event_set_print_fmt(struct user_event *user, char *buf, int len)
1202 struct ftrace_event_field *field, *next;
1203 struct list_head *head = &user->fields;
1204 int pos = 0, depth = 0;
1205 const char *str_func;
1207 pos += snprintf(buf + pos, LEN_OR_ZERO, "\"");
1209 list_for_each_entry_safe_reverse(field, next, head, link) {
1211 pos += snprintf(buf + pos, LEN_OR_ZERO, " ");
1213 pos += snprintf(buf + pos, LEN_OR_ZERO, "%s=%s",
1214 field->name, user_field_format(field->type));
1219 pos += snprintf(buf + pos, LEN_OR_ZERO, "\"");
1221 list_for_each_entry_safe_reverse(field, next, head, link) {
1222 if (user_field_is_dyn_string(field->type, &str_func))
1223 pos += snprintf(buf + pos, LEN_OR_ZERO,
1224 ", %s(%s)", str_func, field->name);
1226 pos += snprintf(buf + pos, LEN_OR_ZERO,
1227 ", REC->%s", field->name);
1234 static int user_event_create_print_fmt(struct user_event *user)
1239 len = user_event_set_print_fmt(user, NULL, 0);
1241 print_fmt = kmalloc(len, GFP_KERNEL_ACCOUNT);
1246 user_event_set_print_fmt(user, print_fmt, len);
1248 user->call.print_fmt = print_fmt;
1253 static enum print_line_t user_event_print_trace(struct trace_iterator *iter,
1255 struct trace_event *event)
1257 return print_event_fields(iter, event);
1260 static struct trace_event_functions user_event_funcs = {
1261 .trace = user_event_print_trace,
1264 static int user_event_set_call_visible(struct user_event *user, bool visible)
1267 const struct cred *old_cred;
1270 cred = prepare_creds();
1276 * While by default tracefs is locked down, systems can be configured
1277 * to allow user_event files to be less locked down. The extreme case
1278 * being "other" has read/write access to user_events_data/status.
1280 * When not locked down, processes may not have permissions to
1281 * add/remove calls themselves to tracefs. We need to temporarily
1282 * switch to root file permission to allow for this scenario.
1284 cred->fsuid = GLOBAL_ROOT_UID;
1286 old_cred = override_creds(cred);
1289 ret = trace_add_event_call(&user->call);
1291 ret = trace_remove_event_call(&user->call);
1293 revert_creds(old_cred);
1299 static int destroy_user_event(struct user_event *user)
1303 lockdep_assert_held(&event_mutex);
1305 /* Must destroy fields before call removal */
1306 user_event_destroy_fields(user);
1308 ret = user_event_set_call_visible(user, false);
1313 dyn_event_remove(&user->devent);
1314 hash_del(&user->node);
1316 user_event_destroy_validators(user);
1317 kfree(user->call.print_fmt);
1318 kfree(EVENT_NAME(user));
1321 if (current_user_events > 0)
1322 current_user_events--;
1324 pr_alert("BUG: Bad current_user_events\n");
1329 static struct user_event *find_user_event(struct user_event_group *group,
1330 char *name, u32 *outkey)
1332 struct user_event *user;
1333 u32 key = user_event_key(name);
1337 hash_for_each_possible(group->register_table, user, node, key)
1338 if (!strcmp(EVENT_NAME(user), name)) {
1339 refcount_inc(&user->refcnt);
1346 static int user_event_validate(struct user_event *user, void *data, int len)
1348 struct list_head *head = &user->validators;
1349 struct user_event_validator *validator;
1350 void *pos, *end = data + len;
1351 u32 loc, offset, size;
1353 list_for_each_entry(validator, head, link) {
1354 pos = data + validator->offset;
1356 /* Already done min_size check, no bounds check here */
1358 offset = loc & 0xffff;
1361 if (likely(validator->flags & VALIDATOR_REL))
1362 pos += offset + sizeof(loc);
1364 pos = data + offset;
1368 if (unlikely(pos > end))
1371 if (likely(validator->flags & VALIDATOR_ENSURE_NULL))
1372 if (unlikely(*(char *)(pos - 1) != '\0'))
1380 * Writes the user supplied payload out to a trace file.
1382 static void user_event_ftrace(struct user_event *user, struct iov_iter *i,
1383 void *tpdata, bool *faulted)
1385 struct trace_event_file *file;
1386 struct trace_entry *entry;
1387 struct trace_event_buffer event_buffer;
1388 size_t size = sizeof(*entry) + i->count;
1390 file = (struct trace_event_file *)tpdata;
1393 !(file->flags & EVENT_FILE_FL_ENABLED) ||
1394 trace_trigger_soft_disabled(file))
1397 /* Allocates and fills trace_entry, + 1 of this is data payload */
1398 entry = trace_event_buffer_reserve(&event_buffer, file, size);
1400 if (unlikely(!entry))
1403 if (unlikely(!copy_nofault(entry + 1, i->count, i)))
1406 if (!list_empty(&user->validators) &&
1407 unlikely(user_event_validate(user, entry, size)))
1410 trace_event_buffer_commit(&event_buffer);
1415 __trace_event_discard_commit(event_buffer.buffer,
1416 event_buffer.event);
1419 #ifdef CONFIG_PERF_EVENTS
1421 * Writes the user supplied payload out to perf ring buffer.
1423 static void user_event_perf(struct user_event *user, struct iov_iter *i,
1424 void *tpdata, bool *faulted)
1426 struct hlist_head *perf_head;
1428 perf_head = this_cpu_ptr(user->call.perf_events);
1430 if (perf_head && !hlist_empty(perf_head)) {
1431 struct trace_entry *perf_entry;
1432 struct pt_regs *regs;
1433 size_t size = sizeof(*perf_entry) + i->count;
1436 perf_entry = perf_trace_buf_alloc(ALIGN(size, 8),
1439 if (unlikely(!perf_entry))
1442 perf_fetch_caller_regs(regs);
1444 if (unlikely(!copy_nofault(perf_entry + 1, i->count, i)))
1447 if (!list_empty(&user->validators) &&
1448 unlikely(user_event_validate(user, perf_entry, size)))
1451 perf_trace_buf_submit(perf_entry, size, context,
1452 user->call.event.type, 1, regs,
1458 perf_swevent_put_recursion_context(context);
1464 * Update the enabled bit among all user processes.
1466 static void update_enable_bit_for(struct user_event *user)
1468 struct tracepoint *tp = &user->tracepoint;
1471 if (atomic_read(&tp->key.enabled) > 0) {
1472 struct tracepoint_func *probe_func_ptr;
1473 user_event_func_t probe_func;
1475 rcu_read_lock_sched();
1477 probe_func_ptr = rcu_dereference_sched(tp->funcs);
1479 if (probe_func_ptr) {
1481 probe_func = probe_func_ptr->func;
1483 if (probe_func == user_event_ftrace)
1484 status |= EVENT_STATUS_FTRACE;
1485 #ifdef CONFIG_PERF_EVENTS
1486 else if (probe_func == user_event_perf)
1487 status |= EVENT_STATUS_PERF;
1490 status |= EVENT_STATUS_OTHER;
1491 } while ((++probe_func_ptr)->func);
1494 rcu_read_unlock_sched();
1497 user->status = status;
1499 user_event_enabler_update(user);
1503 * Register callback for our events from tracing sub-systems.
1505 static int user_event_reg(struct trace_event_call *call,
1506 enum trace_reg type,
1509 struct user_event *user = (struct user_event *)call->data;
1516 case TRACE_REG_REGISTER:
1517 ret = tracepoint_probe_register(call->tp,
1524 case TRACE_REG_UNREGISTER:
1525 tracepoint_probe_unregister(call->tp,
1530 #ifdef CONFIG_PERF_EVENTS
1531 case TRACE_REG_PERF_REGISTER:
1532 ret = tracepoint_probe_register(call->tp,
1533 call->class->perf_probe,
1539 case TRACE_REG_PERF_UNREGISTER:
1540 tracepoint_probe_unregister(call->tp,
1541 call->class->perf_probe,
1545 case TRACE_REG_PERF_OPEN:
1546 case TRACE_REG_PERF_CLOSE:
1547 case TRACE_REG_PERF_ADD:
1548 case TRACE_REG_PERF_DEL:
1555 refcount_inc(&user->refcnt);
1556 update_enable_bit_for(user);
1559 update_enable_bit_for(user);
1560 refcount_dec(&user->refcnt);
1564 static int user_event_create(const char *raw_command)
1566 struct user_event_group *group;
1567 struct user_event *user;
1571 if (!str_has_prefix(raw_command, USER_EVENTS_PREFIX))
1574 raw_command += USER_EVENTS_PREFIX_LEN;
1575 raw_command = skip_spaces(raw_command);
1577 name = kstrdup(raw_command, GFP_KERNEL_ACCOUNT);
1582 group = current_user_event_group();
1589 mutex_lock(&group->reg_mutex);
1591 ret = user_event_parse_cmd(group, name, &user);
1594 refcount_dec(&user->refcnt);
1596 mutex_unlock(&group->reg_mutex);
1604 static int user_event_show(struct seq_file *m, struct dyn_event *ev)
1606 struct user_event *user = container_of(ev, struct user_event, devent);
1607 struct ftrace_event_field *field, *next;
1608 struct list_head *head;
1611 seq_printf(m, "%s%s", USER_EVENTS_PREFIX, EVENT_NAME(user));
1613 head = trace_get_fields(&user->call);
1615 list_for_each_entry_safe_reverse(field, next, head, link) {
1621 seq_printf(m, "%s %s", field->type, field->name);
1623 if (str_has_prefix(field->type, "struct "))
1624 seq_printf(m, " %d", field->size);
1634 static bool user_event_is_busy(struct dyn_event *ev)
1636 struct user_event *user = container_of(ev, struct user_event, devent);
1638 return !user_event_last_ref(user);
1641 static int user_event_free(struct dyn_event *ev)
1643 struct user_event *user = container_of(ev, struct user_event, devent);
1645 if (!user_event_last_ref(user))
1648 return destroy_user_event(user);
1651 static bool user_field_match(struct ftrace_event_field *field, int argc,
1652 const char **argv, int *iout)
1654 char *field_name = NULL, *dyn_field_name = NULL;
1655 bool colon = false, match = false;
1661 dyn_len = user_dyn_field_set_string(argc, argv, iout, dyn_field_name,
1664 len = user_field_set_string(field, field_name, 0, colon);
1669 dyn_field_name = kmalloc(dyn_len, GFP_KERNEL);
1670 field_name = kmalloc(len, GFP_KERNEL);
1672 if (!dyn_field_name || !field_name)
1675 user_dyn_field_set_string(argc, argv, iout, dyn_field_name,
1678 user_field_set_string(field, field_name, len, colon);
1680 match = strcmp(dyn_field_name, field_name) == 0;
1682 kfree(dyn_field_name);
1688 static bool user_fields_match(struct user_event *user, int argc,
1691 struct ftrace_event_field *field, *next;
1692 struct list_head *head = &user->fields;
1695 list_for_each_entry_safe_reverse(field, next, head, link)
1696 if (!user_field_match(field, argc, argv, &i))
1705 static bool user_event_match(const char *system, const char *event,
1706 int argc, const char **argv, struct dyn_event *ev)
1708 struct user_event *user = container_of(ev, struct user_event, devent);
1711 match = strcmp(EVENT_NAME(user), event) == 0 &&
1712 (!system || strcmp(system, USER_EVENTS_SYSTEM) == 0);
1714 if (match && argc > 0)
1715 match = user_fields_match(user, argc, argv);
1720 static struct dyn_event_operations user_event_dops = {
1721 .create = user_event_create,
1722 .show = user_event_show,
1723 .is_busy = user_event_is_busy,
1724 .free = user_event_free,
1725 .match = user_event_match,
1728 static int user_event_trace_register(struct user_event *user)
1732 ret = register_trace_event(&user->call.event);
1737 ret = user_event_set_call_visible(user, true);
1740 unregister_trace_event(&user->call.event);
1746 * Parses the event name, arguments and flags then registers if successful.
1747 * The name buffer lifetime is owned by this method for success cases only.
1748 * Upon success the returned user_event has its ref count increased by 1.
1750 static int user_event_parse(struct user_event_group *group, char *name,
1751 char *args, char *flags,
1752 struct user_event **newuser)
1756 struct user_event *user;
1758 /* Prevent dyn_event from racing */
1759 mutex_lock(&event_mutex);
1760 user = find_user_event(group, name, &key);
1761 mutex_unlock(&event_mutex);
1766 * Name is allocated by caller, free it since it already exists.
1767 * Caller only worries about failure cases for freeing.
1773 user = kzalloc(sizeof(*user), GFP_KERNEL_ACCOUNT);
1778 INIT_LIST_HEAD(&user->class.fields);
1779 INIT_LIST_HEAD(&user->fields);
1780 INIT_LIST_HEAD(&user->validators);
1782 user->group = group;
1783 user->tracepoint.name = name;
1785 ret = user_event_parse_fields(user, args);
1790 ret = user_event_create_print_fmt(user);
1795 user->call.data = user;
1796 user->call.class = &user->class;
1797 user->call.name = name;
1798 user->call.flags = TRACE_EVENT_FL_TRACEPOINT;
1799 user->call.tp = &user->tracepoint;
1800 user->call.event.funcs = &user_event_funcs;
1801 user->class.system = group->system_name;
1803 user->class.fields_array = user_event_fields_array;
1804 user->class.get_fields = user_event_get_fields;
1805 user->class.reg = user_event_reg;
1806 user->class.probe = user_event_ftrace;
1807 #ifdef CONFIG_PERF_EVENTS
1808 user->class.perf_probe = user_event_perf;
1811 mutex_lock(&event_mutex);
1813 if (current_user_events >= max_user_events) {
1818 ret = user_event_trace_register(user);
1823 /* Ensure we track self ref and caller ref (2) */
1824 refcount_set(&user->refcnt, 2);
1826 dyn_event_init(&user->devent, &user_event_dops);
1827 dyn_event_add(&user->devent, &user->call);
1828 hash_add(group->register_table, &user->node, key);
1829 current_user_events++;
1831 mutex_unlock(&event_mutex);
1836 mutex_unlock(&event_mutex);
1838 user_event_destroy_fields(user);
1839 user_event_destroy_validators(user);
1840 kfree(user->call.print_fmt);
1846 * Deletes a previously created event if it is no longer being used.
1848 static int delete_user_event(struct user_event_group *group, char *name)
1851 struct user_event *user = find_user_event(group, name, &key);
1856 refcount_dec(&user->refcnt);
1858 if (!user_event_last_ref(user))
1861 return destroy_user_event(user);
1865 * Validates the user payload and writes via iterator.
1867 static ssize_t user_events_write_core(struct file *file, struct iov_iter *i)
1869 struct user_event_file_info *info = file->private_data;
1870 struct user_event_refs *refs;
1871 struct user_event *user = NULL;
1872 struct tracepoint *tp;
1873 ssize_t ret = i->count;
1876 if (unlikely(copy_from_iter(&idx, sizeof(idx), i) != sizeof(idx)))
1882 rcu_read_lock_sched();
1884 refs = rcu_dereference_sched(info->refs);
1887 * The refs->events array is protected by RCU, and new items may be
1888 * added. But the user retrieved from indexing into the events array
1889 * shall be immutable while the file is opened.
1891 if (likely(refs && idx < refs->count))
1892 user = refs->events[idx];
1894 rcu_read_unlock_sched();
1896 if (unlikely(user == NULL))
1899 if (unlikely(i->count < user->min_size))
1902 tp = &user->tracepoint;
1905 * It's possible key.enabled disables after this check, however
1906 * we don't mind if a few events are included in this condition.
1908 if (likely(atomic_read(&tp->key.enabled) > 0)) {
1909 struct tracepoint_func *probe_func_ptr;
1910 user_event_func_t probe_func;
1911 struct iov_iter copy;
1915 if (unlikely(fault_in_iov_iter_readable(i, i->count)))
1920 rcu_read_lock_sched();
1922 probe_func_ptr = rcu_dereference_sched(tp->funcs);
1924 if (probe_func_ptr) {
1927 probe_func = probe_func_ptr->func;
1928 tpdata = probe_func_ptr->data;
1929 probe_func(user, ©, tpdata, &faulted);
1930 } while ((++probe_func_ptr)->func);
1933 rcu_read_unlock_sched();
1935 if (unlikely(faulted))
1942 static int user_events_open(struct inode *node, struct file *file)
1944 struct user_event_group *group;
1945 struct user_event_file_info *info;
1947 group = current_user_event_group();
1952 info = kzalloc(sizeof(*info), GFP_KERNEL_ACCOUNT);
1957 info->group = group;
1959 file->private_data = info;
1964 static ssize_t user_events_write(struct file *file, const char __user *ubuf,
1965 size_t count, loff_t *ppos)
1970 if (unlikely(*ppos != 0))
1973 if (unlikely(import_single_range(ITER_SOURCE, (char __user *)ubuf,
1977 return user_events_write_core(file, &i);
1980 static ssize_t user_events_write_iter(struct kiocb *kp, struct iov_iter *i)
1982 return user_events_write_core(kp->ki_filp, i);
1985 static int user_events_ref_add(struct user_event_file_info *info,
1986 struct user_event *user)
1988 struct user_event_group *group = info->group;
1989 struct user_event_refs *refs, *new_refs;
1990 int i, size, count = 0;
1992 refs = rcu_dereference_protected(info->refs,
1993 lockdep_is_held(&group->reg_mutex));
1996 count = refs->count;
1998 for (i = 0; i < count; ++i)
1999 if (refs->events[i] == user)
2003 size = struct_size(refs, events, count + 1);
2005 new_refs = kzalloc(size, GFP_KERNEL_ACCOUNT);
2010 new_refs->count = count + 1;
2012 for (i = 0; i < count; ++i)
2013 new_refs->events[i] = refs->events[i];
2015 new_refs->events[i] = user;
2017 refcount_inc(&user->refcnt);
2019 rcu_assign_pointer(info->refs, new_refs);
2022 kfree_rcu(refs, rcu);
2027 static long user_reg_get(struct user_reg __user *ureg, struct user_reg *kreg)
2032 ret = get_user(size, &ureg->size);
2037 if (size > PAGE_SIZE)
2040 if (size < offsetofend(struct user_reg, write_index))
2043 ret = copy_struct_from_user(kreg, sizeof(*kreg), ureg, size);
2048 /* Ensure no flags, since we don't support any yet */
2049 if (kreg->flags != 0)
2052 /* Ensure supported size */
2053 switch (kreg->enable_size) {
2057 #if BITS_PER_LONG >= 64
2066 /* Ensure natural alignment */
2067 if (kreg->enable_addr % kreg->enable_size)
2070 /* Ensure bit range for size */
2071 if (kreg->enable_bit > (kreg->enable_size * BITS_PER_BYTE) - 1)
2074 /* Ensure accessible */
2075 if (!access_ok((const void __user *)(uintptr_t)kreg->enable_addr,
2085 * Registers a user_event on behalf of a user process.
2087 static long user_events_ioctl_reg(struct user_event_file_info *info,
2090 struct user_reg __user *ureg = (struct user_reg __user *)uarg;
2091 struct user_reg reg;
2092 struct user_event *user;
2093 struct user_event_enabler *enabler;
2098 ret = user_reg_get(ureg, ®);
2104 * Prevent users from using the same address and bit multiple times
2105 * within the same mm address space. This can cause unexpected behavior
2106 * for user processes that is far easier to debug if this is explictly
2107 * an error upon registering.
2109 if (current_user_event_enabler_exists((unsigned long)reg.enable_addr,
2113 name = strndup_user((const char __user *)(uintptr_t)reg.name_args,
2117 ret = PTR_ERR(name);
2121 ret = user_event_parse_cmd(info->group, name, &user);
2128 ret = user_events_ref_add(info, user);
2130 /* No longer need parse ref, ref_add either worked or not */
2131 refcount_dec(&user->refcnt);
2133 /* Positive number is index and valid */
2138 * user_events_ref_add succeeded:
2139 * At this point we have a user_event, it's lifetime is bound by the
2140 * reference count, not this file. If anything fails, the user_event
2141 * still has a reference until the file is released. During release
2142 * any remaining references (from user_events_ref_add) are decremented.
2144 * Attempt to create an enabler, which too has a lifetime tied in the
2145 * same way for the event. Once the task that caused the enabler to be
2146 * created exits or issues exec() then the enablers it has created
2147 * will be destroyed and the ref to the event will be decremented.
2149 enabler = user_event_enabler_create(®, user, &write_result);
2154 /* Write failed/faulted, give error back to caller */
2156 return write_result;
2158 put_user((u32)ret, &ureg->write_index);
2164 * Deletes a user_event on behalf of a user process.
2166 static long user_events_ioctl_del(struct user_event_file_info *info,
2169 void __user *ubuf = (void __user *)uarg;
2173 name = strndup_user(ubuf, MAX_EVENT_DESC);
2176 return PTR_ERR(name);
2178 /* event_mutex prevents dyn_event from racing */
2179 mutex_lock(&event_mutex);
2180 ret = delete_user_event(info->group, name);
2181 mutex_unlock(&event_mutex);
2188 static long user_unreg_get(struct user_unreg __user *ureg,
2189 struct user_unreg *kreg)
2194 ret = get_user(size, &ureg->size);
2199 if (size > PAGE_SIZE)
2202 if (size < offsetofend(struct user_unreg, disable_addr))
2205 ret = copy_struct_from_user(kreg, sizeof(*kreg), ureg, size);
2207 /* Ensure no reserved values, since we don't support any yet */
2208 if (kreg->__reserved || kreg->__reserved2)
2214 static int user_event_mm_clear_bit(struct user_event_mm *user_mm,
2215 unsigned long uaddr, unsigned char bit)
2217 struct user_event_enabler enabler;
2221 memset(&enabler, 0, sizeof(enabler));
2222 enabler.addr = uaddr;
2223 enabler.values = bit;
2225 /* Prevents state changes from racing with new enablers */
2226 mutex_lock(&event_mutex);
2228 /* Force the bit to be cleared, since no event is attached */
2229 mmap_read_lock(user_mm->mm);
2230 result = user_event_enabler_write(user_mm, &enabler, false, &attempt);
2231 mmap_read_unlock(user_mm->mm);
2233 mutex_unlock(&event_mutex);
2236 /* Attempt to fault-in and retry if it worked */
2237 if (!user_event_mm_fault_in(user_mm, uaddr, attempt))
2245 * Unregisters an enablement address/bit within a task/user mm.
2247 static long user_events_ioctl_unreg(unsigned long uarg)
2249 struct user_unreg __user *ureg = (struct user_unreg __user *)uarg;
2250 struct user_event_mm *mm = current->user_event_mm;
2251 struct user_event_enabler *enabler, *next;
2252 struct user_unreg reg;
2255 ret = user_unreg_get(ureg, ®);
2266 * Flags freeing and faulting are used to indicate if the enabler is in
2267 * use at all. When faulting is set a page-fault is occurring asyncly.
2268 * During async fault if freeing is set, the enabler will be destroyed.
2269 * If no async fault is happening, we can destroy it now since we hold
2270 * the event_mutex during these checks.
2272 mutex_lock(&event_mutex);
2274 list_for_each_entry_safe(enabler, next, &mm->enablers, link)
2275 if (enabler->addr == reg.disable_addr &&
2276 ENABLE_BIT(enabler) == reg.disable_bit) {
2277 set_bit(ENABLE_VAL_FREEING_BIT, ENABLE_BITOPS(enabler));
2279 if (!test_bit(ENABLE_VAL_FAULTING_BIT, ENABLE_BITOPS(enabler)))
2280 user_event_enabler_destroy(enabler);
2282 /* Removed at least one */
2286 mutex_unlock(&event_mutex);
2288 /* Ensure bit is now cleared for user, regardless of event status */
2290 ret = user_event_mm_clear_bit(mm, reg.disable_addr,
2297 * Handles the ioctl from user mode to register or alter operations.
2299 static long user_events_ioctl(struct file *file, unsigned int cmd,
2302 struct user_event_file_info *info = file->private_data;
2303 struct user_event_group *group = info->group;
2308 mutex_lock(&group->reg_mutex);
2309 ret = user_events_ioctl_reg(info, uarg);
2310 mutex_unlock(&group->reg_mutex);
2314 mutex_lock(&group->reg_mutex);
2315 ret = user_events_ioctl_del(info, uarg);
2316 mutex_unlock(&group->reg_mutex);
2319 case DIAG_IOCSUNREG:
2320 mutex_lock(&group->reg_mutex);
2321 ret = user_events_ioctl_unreg(uarg);
2322 mutex_unlock(&group->reg_mutex);
2330 * Handles the final close of the file from user mode.
2332 static int user_events_release(struct inode *node, struct file *file)
2334 struct user_event_file_info *info = file->private_data;
2335 struct user_event_group *group;
2336 struct user_event_refs *refs;
2337 struct user_event *user;
2343 group = info->group;
2346 * Ensure refs cannot change under any situation by taking the
2347 * register mutex during the final freeing of the references.
2349 mutex_lock(&group->reg_mutex);
2357 * The lifetime of refs has reached an end, it's tied to this file.
2358 * The underlying user_events are ref counted, and cannot be freed.
2359 * After this decrement, the user_events may be freed elsewhere.
2361 for (i = 0; i < refs->count; ++i) {
2362 user = refs->events[i];
2365 refcount_dec(&user->refcnt);
2368 file->private_data = NULL;
2370 mutex_unlock(&group->reg_mutex);
2378 static const struct file_operations user_data_fops = {
2379 .open = user_events_open,
2380 .write = user_events_write,
2381 .write_iter = user_events_write_iter,
2382 .unlocked_ioctl = user_events_ioctl,
2383 .release = user_events_release,
2386 static void *user_seq_start(struct seq_file *m, loff_t *pos)
2394 static void *user_seq_next(struct seq_file *m, void *p, loff_t *pos)
2400 static void user_seq_stop(struct seq_file *m, void *p)
2404 static int user_seq_show(struct seq_file *m, void *p)
2406 struct user_event_group *group = m->private;
2407 struct user_event *user;
2409 int i, active = 0, busy = 0;
2414 mutex_lock(&group->reg_mutex);
2416 hash_for_each(group->register_table, i, user, node) {
2417 status = user->status;
2419 seq_printf(m, "%s", EVENT_NAME(user));
2425 seq_puts(m, " Used by");
2426 if (status & EVENT_STATUS_FTRACE)
2427 seq_puts(m, " ftrace");
2428 if (status & EVENT_STATUS_PERF)
2429 seq_puts(m, " perf");
2430 if (status & EVENT_STATUS_OTHER)
2431 seq_puts(m, " other");
2439 mutex_unlock(&group->reg_mutex);
2442 seq_printf(m, "Active: %d\n", active);
2443 seq_printf(m, "Busy: %d\n", busy);
2448 static const struct seq_operations user_seq_ops = {
2449 .start = user_seq_start,
2450 .next = user_seq_next,
2451 .stop = user_seq_stop,
2452 .show = user_seq_show,
2455 static int user_status_open(struct inode *node, struct file *file)
2457 struct user_event_group *group;
2460 group = current_user_event_group();
2465 ret = seq_open(file, &user_seq_ops);
2468 /* Chain group to seq_file */
2469 struct seq_file *m = file->private_data;
2477 static const struct file_operations user_status_fops = {
2478 .open = user_status_open,
2480 .llseek = seq_lseek,
2481 .release = seq_release,
2485 * Creates a set of tracefs files to allow user mode interactions.
2487 static int create_user_tracefs(void)
2489 struct dentry *edata, *emmap;
2491 edata = tracefs_create_file("user_events_data", TRACE_MODE_WRITE,
2492 NULL, NULL, &user_data_fops);
2495 pr_warn("Could not create tracefs 'user_events_data' entry\n");
2499 emmap = tracefs_create_file("user_events_status", TRACE_MODE_READ,
2500 NULL, NULL, &user_status_fops);
2503 tracefs_remove(edata);
2504 pr_warn("Could not create tracefs 'user_events_mmap' entry\n");
2513 static int set_max_user_events_sysctl(struct ctl_table *table, int write,
2514 void *buffer, size_t *lenp, loff_t *ppos)
2518 mutex_lock(&event_mutex);
2520 ret = proc_douintvec(table, write, buffer, lenp, ppos);
2522 mutex_unlock(&event_mutex);
2527 static struct ctl_table user_event_sysctls[] = {
2529 .procname = "user_events_max",
2530 .data = &max_user_events,
2531 .maxlen = sizeof(unsigned int),
2533 .proc_handler = set_max_user_events_sysctl,
2538 static int __init trace_events_user_init(void)
2542 fault_cache = KMEM_CACHE(user_event_enabler_fault, 0);
2547 init_group = user_event_group_create(&init_user_ns);
2550 kmem_cache_destroy(fault_cache);
2554 ret = create_user_tracefs();
2557 pr_warn("user_events could not register with tracefs\n");
2558 user_event_group_destroy(init_group);
2559 kmem_cache_destroy(fault_cache);
2564 if (dyn_event_register(&user_event_dops))
2565 pr_warn("user_events could not register with dyn_events\n");
2567 register_sysctl_init("kernel", user_event_sysctls);
2572 fs_initcall(trace_events_user_init);