1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (c) 2021, Microsoft Corporation.
6 * Beau Belgrave <beaub@linux.microsoft.com>
9 #include <linux/bitmap.h>
10 #include <linux/cdev.h>
11 #include <linux/hashtable.h>
12 #include <linux/list.h>
14 #include <linux/uio.h>
15 #include <linux/ioctl.h>
16 #include <linux/jhash.h>
17 #include <linux/refcount.h>
18 #include <linux/trace_events.h>
19 #include <linux/tracefs.h>
20 #include <linux/types.h>
21 #include <linux/uaccess.h>
22 #include <linux/highmem.h>
23 #include <linux/init.h>
24 #include <linux/user_events.h>
25 #include "trace_dynevent.h"
26 #include "trace_output.h"
29 #define USER_EVENTS_PREFIX_LEN (sizeof(USER_EVENTS_PREFIX)-1)
31 #define FIELD_DEPTH_TYPE 0
32 #define FIELD_DEPTH_NAME 1
33 #define FIELD_DEPTH_SIZE 2
35 /* Limit how long of an event name plus args within the subsystem. */
36 #define MAX_EVENT_DESC 512
37 #define EVENT_NAME(user_event) ((user_event)->tracepoint.name)
38 #define MAX_FIELD_ARRAY_SIZE 1024
41 * Internal bits (kernel side only) to keep track of connected probes:
42 * These are used when status is requested in text form about an event. These
43 * bits are compared against an internal byte on the event to determine which
44 * probes to print out to the user.
46 * These do not reflect the mapped bytes between the user and kernel space.
48 #define EVENT_STATUS_FTRACE BIT(0)
49 #define EVENT_STATUS_PERF BIT(1)
50 #define EVENT_STATUS_OTHER BIT(7)
53 * Stores the system name, tables, and locks for a group of events. This
54 * allows isolation for events by various means.
56 struct user_event_group {
58 struct hlist_node node;
59 struct mutex reg_mutex;
60 DECLARE_HASHTABLE(register_table, 8);
63 /* Group for init_user_ns mapping, top-most group */
64 static struct user_event_group *init_group;
66 /* Max allowed events for the whole system */
67 static unsigned int max_user_events = 32768;
69 /* Current number of events on the whole system */
70 static unsigned int current_user_events;
73 * Stores per-event properties, as users register events
74 * within a file a user_event might be created if it does not
75 * already exist. These are globally used and their lifetime
76 * is tied to the refcnt member. These cannot go away until the
80 struct user_event_group *group;
81 struct tracepoint tracepoint;
82 struct trace_event_call call;
83 struct trace_event_class class;
84 struct dyn_event devent;
85 struct hlist_node node;
86 struct list_head fields;
87 struct list_head validators;
94 * Stores per-mm/event properties that enable an address to be
95 * updated properly for each task. As tasks are forked, we use
96 * these to track enablement sites that are tied to an event.
98 struct user_event_enabler {
99 struct list_head mm_enablers_link;
100 struct user_event *event;
103 /* Track enable bit, flags, etc. Aligned for bitops. */
104 unsigned long values;
107 /* Bits 0-5 are for the bit to update upon enable/disable (0-63 allowed) */
108 #define ENABLE_VAL_BIT_MASK 0x3F
110 /* Bit 6 is for faulting status of enablement */
111 #define ENABLE_VAL_FAULTING_BIT 6
113 /* Bit 7 is for freeing status of enablement */
114 #define ENABLE_VAL_FREEING_BIT 7
116 /* Only duplicate the bit value */
117 #define ENABLE_VAL_DUP_MASK ENABLE_VAL_BIT_MASK
119 #define ENABLE_BITOPS(e) (&(e)->values)
121 #define ENABLE_BIT(e) ((int)((e)->values & ENABLE_VAL_BIT_MASK))
123 /* Used for asynchronous faulting in of pages */
124 struct user_event_enabler_fault {
125 struct work_struct work;
126 struct user_event_mm *mm;
127 struct user_event_enabler *enabler;
131 static struct kmem_cache *fault_cache;
133 /* Global list of memory descriptors using user_events */
134 static LIST_HEAD(user_event_mms);
135 static DEFINE_SPINLOCK(user_event_mms_lock);
138 * Stores per-file events references, as users register events
139 * within a file this structure is modified and freed via RCU.
140 * The lifetime of this struct is tied to the lifetime of the file.
141 * These are not shared and only accessible by the file that created it.
143 struct user_event_refs {
146 struct user_event *events[];
149 struct user_event_file_info {
150 struct user_event_group *group;
151 struct user_event_refs *refs;
154 #define VALIDATOR_ENSURE_NULL (1 << 0)
155 #define VALIDATOR_REL (1 << 1)
157 struct user_event_validator {
158 struct list_head user_event_link;
163 typedef void (*user_event_func_t) (struct user_event *user, struct iov_iter *i,
164 void *tpdata, bool *faulted);
166 static int user_event_parse(struct user_event_group *group, char *name,
167 char *args, char *flags,
168 struct user_event **newuser);
170 static struct user_event_mm *user_event_mm_get(struct user_event_mm *mm);
171 static struct user_event_mm *user_event_mm_get_all(struct user_event *user);
172 static void user_event_mm_put(struct user_event_mm *mm);
174 static u32 user_event_key(char *name)
176 return jhash(name, strlen(name), 0);
179 static void user_event_group_destroy(struct user_event_group *group)
181 kfree(group->system_name);
185 static char *user_event_group_system_name(struct user_namespace *user_ns)
188 int len = sizeof(USER_EVENTS_SYSTEM) + 1;
190 if (user_ns != &init_user_ns) {
192 * Unexpected at this point:
193 * We only currently support init_user_ns.
194 * When we enable more, this will trigger a failure so log.
196 pr_warn("user_events: Namespace other than init_user_ns!\n");
200 system_name = kmalloc(len, GFP_KERNEL);
205 snprintf(system_name, len, "%s", USER_EVENTS_SYSTEM);
210 static inline struct user_event_group
211 *user_event_group_from_user_ns(struct user_namespace *user_ns)
213 if (user_ns == &init_user_ns)
219 static struct user_event_group *current_user_event_group(void)
221 struct user_namespace *user_ns = current_user_ns();
222 struct user_event_group *group = NULL;
225 group = user_event_group_from_user_ns(user_ns);
230 user_ns = user_ns->parent;
236 static struct user_event_group
237 *user_event_group_create(struct user_namespace *user_ns)
239 struct user_event_group *group;
241 group = kzalloc(sizeof(*group), GFP_KERNEL);
246 group->system_name = user_event_group_system_name(user_ns);
248 if (!group->system_name)
251 mutex_init(&group->reg_mutex);
252 hash_init(group->register_table);
257 user_event_group_destroy(group);
262 static void user_event_enabler_destroy(struct user_event_enabler *enabler)
264 list_del_rcu(&enabler->mm_enablers_link);
266 /* No longer tracking the event via the enabler */
267 refcount_dec(&enabler->event->refcnt);
272 static int user_event_mm_fault_in(struct user_event_mm *mm, unsigned long uaddr,
279 * Normally this is low, ensure that it cannot be taken advantage of by
280 * bad user processes to cause excessive looping.
285 mmap_read_lock(mm->mm);
287 /* Ensure MM has tasks, cannot use after exit_mm() */
288 if (refcount_read(&mm->tasks) == 0) {
293 ret = fixup_user_fault(mm->mm, uaddr, FAULT_FLAG_WRITE | FAULT_FLAG_REMOTE,
296 mmap_read_unlock(mm->mm);
301 static int user_event_enabler_write(struct user_event_mm *mm,
302 struct user_event_enabler *enabler,
303 bool fixup_fault, int *attempt);
305 static void user_event_enabler_fault_fixup(struct work_struct *work)
307 struct user_event_enabler_fault *fault = container_of(
308 work, struct user_event_enabler_fault, work);
309 struct user_event_enabler *enabler = fault->enabler;
310 struct user_event_mm *mm = fault->mm;
311 unsigned long uaddr = enabler->addr;
312 int attempt = fault->attempt;
315 ret = user_event_mm_fault_in(mm, uaddr, attempt);
317 if (ret && ret != -ENOENT) {
318 struct user_event *user = enabler->event;
320 pr_warn("user_events: Fault for mm: 0x%pK @ 0x%llx event: %s\n",
321 mm->mm, (unsigned long long)uaddr, EVENT_NAME(user));
324 /* Prevent state changes from racing */
325 mutex_lock(&event_mutex);
327 /* User asked for enabler to be removed during fault */
328 if (test_bit(ENABLE_VAL_FREEING_BIT, ENABLE_BITOPS(enabler))) {
329 user_event_enabler_destroy(enabler);
334 * If we managed to get the page, re-issue the write. We do not
335 * want to get into a possible infinite loop, which is why we only
336 * attempt again directly if the page came in. If we couldn't get
337 * the page here, then we will try again the next time the event is
340 clear_bit(ENABLE_VAL_FAULTING_BIT, ENABLE_BITOPS(enabler));
343 mmap_read_lock(mm->mm);
344 user_event_enabler_write(mm, enabler, true, &attempt);
345 mmap_read_unlock(mm->mm);
348 mutex_unlock(&event_mutex);
350 /* In all cases we no longer need the mm or fault */
351 user_event_mm_put(mm);
352 kmem_cache_free(fault_cache, fault);
355 static bool user_event_enabler_queue_fault(struct user_event_mm *mm,
356 struct user_event_enabler *enabler,
359 struct user_event_enabler_fault *fault;
361 fault = kmem_cache_zalloc(fault_cache, GFP_NOWAIT | __GFP_NOWARN);
366 INIT_WORK(&fault->work, user_event_enabler_fault_fixup);
367 fault->mm = user_event_mm_get(mm);
368 fault->enabler = enabler;
369 fault->attempt = attempt;
371 /* Don't try to queue in again while we have a pending fault */
372 set_bit(ENABLE_VAL_FAULTING_BIT, ENABLE_BITOPS(enabler));
374 if (!schedule_work(&fault->work)) {
375 /* Allow another attempt later */
376 clear_bit(ENABLE_VAL_FAULTING_BIT, ENABLE_BITOPS(enabler));
378 user_event_mm_put(mm);
379 kmem_cache_free(fault_cache, fault);
387 static int user_event_enabler_write(struct user_event_mm *mm,
388 struct user_event_enabler *enabler,
389 bool fixup_fault, int *attempt)
391 unsigned long uaddr = enabler->addr;
397 lockdep_assert_held(&event_mutex);
398 mmap_assert_locked(mm->mm);
402 /* Ensure MM has tasks, cannot use after exit_mm() */
403 if (refcount_read(&mm->tasks) == 0)
406 if (unlikely(test_bit(ENABLE_VAL_FAULTING_BIT, ENABLE_BITOPS(enabler)) ||
407 test_bit(ENABLE_VAL_FREEING_BIT, ENABLE_BITOPS(enabler))))
410 ret = pin_user_pages_remote(mm->mm, uaddr, 1, FOLL_WRITE | FOLL_NOFAULT,
413 if (unlikely(ret <= 0)) {
417 if (!user_event_enabler_queue_fault(mm, enabler, *attempt))
418 pr_warn("user_events: Unable to queue fault handler\n");
423 kaddr = kmap_local_page(page);
424 ptr = kaddr + (uaddr & ~PAGE_MASK);
426 /* Update bit atomically, user tracers must be atomic as well */
427 if (enabler->event && enabler->event->status)
428 set_bit(ENABLE_BIT(enabler), ptr);
430 clear_bit(ENABLE_BIT(enabler), ptr);
433 unpin_user_pages_dirty_lock(&page, 1, true);
438 static bool user_event_enabler_exists(struct user_event_mm *mm,
439 unsigned long uaddr, unsigned char bit)
441 struct user_event_enabler *enabler;
443 list_for_each_entry(enabler, &mm->enablers, mm_enablers_link) {
444 if (enabler->addr == uaddr && ENABLE_BIT(enabler) == bit)
451 static void user_event_enabler_update(struct user_event *user)
453 struct user_event_enabler *enabler;
454 struct user_event_mm *next;
455 struct user_event_mm *mm;
458 lockdep_assert_held(&event_mutex);
461 * We need to build a one-shot list of all the mms that have an
462 * enabler for the user_event passed in. This list is only valid
463 * while holding the event_mutex. The only reason for this is due
464 * to the global mm list being RCU protected and we use methods
465 * which can wait (mmap_read_lock and pin_user_pages_remote).
467 * NOTE: user_event_mm_get_all() increments the ref count of each
468 * mm that is added to the list to prevent removal timing windows.
469 * We must always put each mm after they are used, which may wait.
471 mm = user_event_mm_get_all(user);
475 mmap_read_lock(mm->mm);
477 list_for_each_entry(enabler, &mm->enablers, mm_enablers_link) {
478 if (enabler->event == user) {
480 user_event_enabler_write(mm, enabler, true, &attempt);
484 mmap_read_unlock(mm->mm);
485 user_event_mm_put(mm);
490 static bool user_event_enabler_dup(struct user_event_enabler *orig,
491 struct user_event_mm *mm)
493 struct user_event_enabler *enabler;
495 /* Skip pending frees */
496 if (unlikely(test_bit(ENABLE_VAL_FREEING_BIT, ENABLE_BITOPS(orig))))
499 enabler = kzalloc(sizeof(*enabler), GFP_NOWAIT | __GFP_ACCOUNT);
504 enabler->event = orig->event;
505 enabler->addr = orig->addr;
507 /* Only dup part of value (ignore future flags, etc) */
508 enabler->values = orig->values & ENABLE_VAL_DUP_MASK;
510 refcount_inc(&enabler->event->refcnt);
512 /* Enablers not exposed yet, RCU not required */
513 list_add(&enabler->mm_enablers_link, &mm->enablers);
518 static struct user_event_mm *user_event_mm_get(struct user_event_mm *mm)
520 refcount_inc(&mm->refcnt);
525 static struct user_event_mm *user_event_mm_get_all(struct user_event *user)
527 struct user_event_mm *found = NULL;
528 struct user_event_enabler *enabler;
529 struct user_event_mm *mm;
532 * We use the mm->next field to build a one-shot list from the global
533 * RCU protected list. To build this list the event_mutex must be held.
534 * This lets us build a list without requiring allocs that could fail
535 * when user based events are most wanted for diagnostics.
537 lockdep_assert_held(&event_mutex);
540 * We do not want to block fork/exec while enablements are being
541 * updated, so we use RCU to walk the current tasks that have used
542 * user_events ABI for 1 or more events. Each enabler found in each
543 * task that matches the event being updated has a write to reflect
544 * the kernel state back into the process. Waits/faults must not occur
545 * during this. So we scan the list under RCU for all the mm that have
546 * the event within it. This is needed because mm_read_lock() can wait.
547 * Each user mm returned has a ref inc to handle remove RCU races.
551 list_for_each_entry_rcu(mm, &user_event_mms, mms_link) {
552 list_for_each_entry_rcu(enabler, &mm->enablers, mm_enablers_link) {
553 if (enabler->event == user) {
555 found = user_event_mm_get(mm);
566 static struct user_event_mm *user_event_mm_alloc(struct task_struct *t)
568 struct user_event_mm *user_mm;
570 user_mm = kzalloc(sizeof(*user_mm), GFP_KERNEL_ACCOUNT);
576 INIT_LIST_HEAD(&user_mm->enablers);
577 refcount_set(&user_mm->refcnt, 1);
578 refcount_set(&user_mm->tasks, 1);
581 * The lifetime of the memory descriptor can slightly outlast
582 * the task lifetime if a ref to the user_event_mm is taken
583 * between list_del_rcu() and call_rcu(). Therefore we need
584 * to take a reference to it to ensure it can live this long
585 * under this corner case. This can also occur in clones that
586 * outlast the parent.
593 static void user_event_mm_attach(struct user_event_mm *user_mm, struct task_struct *t)
597 spin_lock_irqsave(&user_event_mms_lock, flags);
598 list_add_rcu(&user_mm->mms_link, &user_event_mms);
599 spin_unlock_irqrestore(&user_event_mms_lock, flags);
601 t->user_event_mm = user_mm;
604 static struct user_event_mm *current_user_event_mm(void)
606 struct user_event_mm *user_mm = current->user_event_mm;
611 user_mm = user_event_mm_alloc(current);
616 user_event_mm_attach(user_mm, current);
618 refcount_inc(&user_mm->refcnt);
623 static void user_event_mm_destroy(struct user_event_mm *mm)
625 struct user_event_enabler *enabler, *next;
627 list_for_each_entry_safe(enabler, next, &mm->enablers, mm_enablers_link)
628 user_event_enabler_destroy(enabler);
634 static void user_event_mm_put(struct user_event_mm *mm)
636 if (mm && refcount_dec_and_test(&mm->refcnt))
637 user_event_mm_destroy(mm);
640 static void delayed_user_event_mm_put(struct work_struct *work)
642 struct user_event_mm *mm;
644 mm = container_of(to_rcu_work(work), struct user_event_mm, put_rwork);
645 user_event_mm_put(mm);
648 void user_event_mm_remove(struct task_struct *t)
650 struct user_event_mm *mm;
655 mm = t->user_event_mm;
656 t->user_event_mm = NULL;
658 /* Clone will increment the tasks, only remove if last clone */
659 if (!refcount_dec_and_test(&mm->tasks))
662 /* Remove the mm from the list, so it can no longer be enabled */
663 spin_lock_irqsave(&user_event_mms_lock, flags);
664 list_del_rcu(&mm->mms_link);
665 spin_unlock_irqrestore(&user_event_mms_lock, flags);
668 * We need to wait for currently occurring writes to stop within
669 * the mm. This is required since exit_mm() snaps the current rss
670 * stats and clears them. On the final mmdrop(), check_mm() will
671 * report a bug if these increment.
673 * All writes/pins are done under mmap_read lock, take the write
674 * lock to ensure in-progress faults have completed. Faults that
675 * are pending but yet to run will check the task count and skip
676 * the fault since the mm is going away.
678 mmap_write_lock(mm->mm);
679 mmap_write_unlock(mm->mm);
682 * Put for mm must be done after RCU delay to handle new refs in
683 * between the list_del_rcu() and now. This ensures any get refs
684 * during rcu_read_lock() are accounted for during list removal.
687 * ---------------------------------------------------------------
688 * user_event_mm_remove() | rcu_read_lock();
689 * list_del_rcu() | list_for_each_entry_rcu();
690 * call_rcu() | refcount_inc();
691 * . | rcu_read_unlock();
692 * schedule_work() | .
693 * user_event_mm_put() | .
695 * mmdrop() cannot be called in the softirq context of call_rcu()
696 * so we use a work queue after call_rcu() to run within.
698 INIT_RCU_WORK(&mm->put_rwork, delayed_user_event_mm_put);
699 queue_rcu_work(system_wq, &mm->put_rwork);
702 void user_event_mm_dup(struct task_struct *t, struct user_event_mm *old_mm)
704 struct user_event_mm *mm = user_event_mm_alloc(t);
705 struct user_event_enabler *enabler;
712 list_for_each_entry_rcu(enabler, &old_mm->enablers, mm_enablers_link) {
713 if (!user_event_enabler_dup(enabler, mm))
719 user_event_mm_attach(mm, t);
723 user_event_mm_destroy(mm);
726 static bool current_user_event_enabler_exists(unsigned long uaddr,
729 struct user_event_mm *user_mm = current_user_event_mm();
735 exists = user_event_enabler_exists(user_mm, uaddr, bit);
737 user_event_mm_put(user_mm);
742 static struct user_event_enabler
743 *user_event_enabler_create(struct user_reg *reg, struct user_event *user,
746 struct user_event_enabler *enabler;
747 struct user_event_mm *user_mm;
748 unsigned long uaddr = (unsigned long)reg->enable_addr;
751 user_mm = current_user_event_mm();
756 enabler = kzalloc(sizeof(*enabler), GFP_KERNEL_ACCOUNT);
761 enabler->event = user;
762 enabler->addr = uaddr;
763 enabler->values = reg->enable_bit;
765 /* Prevents state changes from racing with new enablers */
766 mutex_lock(&event_mutex);
768 /* Attempt to reflect the current state within the process */
769 mmap_read_lock(user_mm->mm);
770 *write_result = user_event_enabler_write(user_mm, enabler, false,
772 mmap_read_unlock(user_mm->mm);
775 * If the write works, then we will track the enabler. A ref to the
776 * underlying user_event is held by the enabler to prevent it going
777 * away while the enabler is still in use by a process. The ref is
778 * removed when the enabler is destroyed. This means a event cannot
779 * be forcefully deleted from the system until all tasks using it
780 * exit or run exec(), which includes forks and clones.
782 if (!*write_result) {
783 refcount_inc(&enabler->event->refcnt);
784 list_add_rcu(&enabler->mm_enablers_link, &user_mm->enablers);
787 mutex_unlock(&event_mutex);
790 /* Attempt to fault-in and retry if it worked */
791 if (!user_event_mm_fault_in(user_mm, uaddr, attempt))
798 user_event_mm_put(user_mm);
803 static __always_inline __must_check
804 bool user_event_last_ref(struct user_event *user)
806 return refcount_read(&user->refcnt) == 1;
809 static __always_inline __must_check
810 size_t copy_nofault(void *addr, size_t bytes, struct iov_iter *i)
816 ret = copy_from_iter_nocache(addr, bytes, i);
823 static struct list_head *user_event_get_fields(struct trace_event_call *call)
825 struct user_event *user = (struct user_event *)call->data;
827 return &user->fields;
831 * Parses a register command for user_events
832 * Format: event_name[:FLAG1[,FLAG2...]] [field1[;field2...]]
834 * Example event named 'test' with a 20 char 'msg' field with an unsigned int
836 * test char[20] msg;unsigned int id
838 * NOTE: Offsets are from the user data perspective, they are not from the
839 * trace_entry/buffer perspective. We automatically add the common properties
840 * sizes to the offset for the user.
842 * Upon success user_event has its ref count increased by 1.
844 static int user_event_parse_cmd(struct user_event_group *group,
845 char *raw_command, struct user_event **newuser)
847 char *name = raw_command;
848 char *args = strpbrk(name, " ");
854 flags = strpbrk(name, ":");
859 return user_event_parse(group, name, args, flags, newuser);
862 static int user_field_array_size(const char *type)
864 const char *start = strchr(type, '[');
872 if (strscpy(val, start + 1, sizeof(val)) <= 0)
875 bracket = strchr(val, ']');
882 if (kstrtouint(val, 0, &size))
885 if (size > MAX_FIELD_ARRAY_SIZE)
891 static int user_field_size(const char *type)
893 /* long is not allowed from a user, since it's ambigious in size */
894 if (strcmp(type, "s64") == 0)
896 if (strcmp(type, "u64") == 0)
898 if (strcmp(type, "s32") == 0)
900 if (strcmp(type, "u32") == 0)
902 if (strcmp(type, "int") == 0)
904 if (strcmp(type, "unsigned int") == 0)
905 return sizeof(unsigned int);
906 if (strcmp(type, "s16") == 0)
908 if (strcmp(type, "u16") == 0)
910 if (strcmp(type, "short") == 0)
911 return sizeof(short);
912 if (strcmp(type, "unsigned short") == 0)
913 return sizeof(unsigned short);
914 if (strcmp(type, "s8") == 0)
916 if (strcmp(type, "u8") == 0)
918 if (strcmp(type, "char") == 0)
920 if (strcmp(type, "unsigned char") == 0)
921 return sizeof(unsigned char);
922 if (str_has_prefix(type, "char["))
923 return user_field_array_size(type);
924 if (str_has_prefix(type, "unsigned char["))
925 return user_field_array_size(type);
926 if (str_has_prefix(type, "__data_loc "))
928 if (str_has_prefix(type, "__rel_loc "))
931 /* Uknown basic type, error */
935 static void user_event_destroy_validators(struct user_event *user)
937 struct user_event_validator *validator, *next;
938 struct list_head *head = &user->validators;
940 list_for_each_entry_safe(validator, next, head, user_event_link) {
941 list_del(&validator->user_event_link);
946 static void user_event_destroy_fields(struct user_event *user)
948 struct ftrace_event_field *field, *next;
949 struct list_head *head = &user->fields;
951 list_for_each_entry_safe(field, next, head, link) {
952 list_del(&field->link);
957 static int user_event_add_field(struct user_event *user, const char *type,
958 const char *name, int offset, int size,
959 int is_signed, int filter_type)
961 struct user_event_validator *validator;
962 struct ftrace_event_field *field;
963 int validator_flags = 0;
965 field = kmalloc(sizeof(*field), GFP_KERNEL_ACCOUNT);
970 if (str_has_prefix(type, "__data_loc "))
973 if (str_has_prefix(type, "__rel_loc ")) {
974 validator_flags |= VALIDATOR_REL;
981 if (strstr(type, "char") != NULL)
982 validator_flags |= VALIDATOR_ENSURE_NULL;
984 validator = kmalloc(sizeof(*validator), GFP_KERNEL_ACCOUNT);
991 validator->flags = validator_flags;
992 validator->offset = offset;
994 /* Want sequential access when validating */
995 list_add_tail(&validator->user_event_link, &user->validators);
1000 field->offset = offset;
1002 field->is_signed = is_signed;
1003 field->filter_type = filter_type;
1005 if (filter_type == FILTER_OTHER)
1006 field->filter_type = filter_assign_type(type);
1008 list_add(&field->link, &user->fields);
1011 * Min size from user writes that are required, this does not include
1012 * the size of trace_entry (common fields).
1014 user->min_size = (offset + size) - sizeof(struct trace_entry);
1020 * Parses the values of a field within the description
1021 * Format: type name [size]
1023 static int user_event_parse_field(char *field, struct user_event *user,
1026 char *part, *type, *name;
1027 u32 depth = 0, saved_offset = *offset;
1028 int len, size = -EINVAL;
1029 bool is_struct = false;
1031 field = skip_spaces(field);
1036 /* Handle types that have a space within */
1037 len = str_has_prefix(field, "unsigned ");
1041 len = str_has_prefix(field, "struct ");
1047 len = str_has_prefix(field, "__data_loc unsigned ");
1051 len = str_has_prefix(field, "__data_loc ");
1055 len = str_has_prefix(field, "__rel_loc unsigned ");
1059 len = str_has_prefix(field, "__rel_loc ");
1066 field = strpbrk(field + len, " ");
1076 while ((part = strsep(&field, " ")) != NULL) {
1078 case FIELD_DEPTH_TYPE:
1081 case FIELD_DEPTH_NAME:
1084 case FIELD_DEPTH_SIZE:
1088 if (kstrtou32(part, 10, &size))
1096 if (depth < FIELD_DEPTH_SIZE || !name)
1099 if (depth == FIELD_DEPTH_SIZE)
1100 size = user_field_size(type);
1108 *offset = saved_offset + size;
1110 return user_event_add_field(user, type, name, saved_offset, size,
1111 type[0] != 'u', FILTER_OTHER);
1114 static int user_event_parse_fields(struct user_event *user, char *args)
1117 u32 offset = sizeof(struct trace_entry);
1123 while ((field = strsep(&args, ";")) != NULL) {
1124 ret = user_event_parse_field(field, user, &offset);
1133 static struct trace_event_fields user_event_fields_array[1];
1135 static const char *user_field_format(const char *type)
1137 if (strcmp(type, "s64") == 0)
1139 if (strcmp(type, "u64") == 0)
1141 if (strcmp(type, "s32") == 0)
1143 if (strcmp(type, "u32") == 0)
1145 if (strcmp(type, "int") == 0)
1147 if (strcmp(type, "unsigned int") == 0)
1149 if (strcmp(type, "s16") == 0)
1151 if (strcmp(type, "u16") == 0)
1153 if (strcmp(type, "short") == 0)
1155 if (strcmp(type, "unsigned short") == 0)
1157 if (strcmp(type, "s8") == 0)
1159 if (strcmp(type, "u8") == 0)
1161 if (strcmp(type, "char") == 0)
1163 if (strcmp(type, "unsigned char") == 0)
1165 if (strstr(type, "char[") != NULL)
1168 /* Unknown, likely struct, allowed treat as 64-bit */
1172 static bool user_field_is_dyn_string(const char *type, const char **str_func)
1174 if (str_has_prefix(type, "__data_loc ")) {
1175 *str_func = "__get_str";
1179 if (str_has_prefix(type, "__rel_loc ")) {
1180 *str_func = "__get_rel_str";
1186 return strstr(type, "char") != NULL;
1189 #define LEN_OR_ZERO (len ? len - pos : 0)
1190 static int user_dyn_field_set_string(int argc, const char **argv, int *iout,
1191 char *buf, int len, bool *colon)
1193 int pos = 0, i = *iout;
1197 for (; i < argc; ++i) {
1199 pos += snprintf(buf + pos, LEN_OR_ZERO, " ");
1201 pos += snprintf(buf + pos, LEN_OR_ZERO, "%s", argv[i]);
1203 if (strchr(argv[i], ';')) {
1210 /* Actual set, advance i */
1217 static int user_field_set_string(struct ftrace_event_field *field,
1218 char *buf, int len, bool colon)
1222 pos += snprintf(buf + pos, LEN_OR_ZERO, "%s", field->type);
1223 pos += snprintf(buf + pos, LEN_OR_ZERO, " ");
1224 pos += snprintf(buf + pos, LEN_OR_ZERO, "%s", field->name);
1227 pos += snprintf(buf + pos, LEN_OR_ZERO, ";");
1232 static int user_event_set_print_fmt(struct user_event *user, char *buf, int len)
1234 struct ftrace_event_field *field, *next;
1235 struct list_head *head = &user->fields;
1236 int pos = 0, depth = 0;
1237 const char *str_func;
1239 pos += snprintf(buf + pos, LEN_OR_ZERO, "\"");
1241 list_for_each_entry_safe_reverse(field, next, head, link) {
1243 pos += snprintf(buf + pos, LEN_OR_ZERO, " ");
1245 pos += snprintf(buf + pos, LEN_OR_ZERO, "%s=%s",
1246 field->name, user_field_format(field->type));
1251 pos += snprintf(buf + pos, LEN_OR_ZERO, "\"");
1253 list_for_each_entry_safe_reverse(field, next, head, link) {
1254 if (user_field_is_dyn_string(field->type, &str_func))
1255 pos += snprintf(buf + pos, LEN_OR_ZERO,
1256 ", %s(%s)", str_func, field->name);
1258 pos += snprintf(buf + pos, LEN_OR_ZERO,
1259 ", REC->%s", field->name);
1266 static int user_event_create_print_fmt(struct user_event *user)
1271 len = user_event_set_print_fmt(user, NULL, 0);
1273 print_fmt = kmalloc(len, GFP_KERNEL_ACCOUNT);
1278 user_event_set_print_fmt(user, print_fmt, len);
1280 user->call.print_fmt = print_fmt;
1285 static enum print_line_t user_event_print_trace(struct trace_iterator *iter,
1287 struct trace_event *event)
1289 return print_event_fields(iter, event);
1292 static struct trace_event_functions user_event_funcs = {
1293 .trace = user_event_print_trace,
1296 static int user_event_set_call_visible(struct user_event *user, bool visible)
1299 const struct cred *old_cred;
1302 cred = prepare_creds();
1308 * While by default tracefs is locked down, systems can be configured
1309 * to allow user_event files to be less locked down. The extreme case
1310 * being "other" has read/write access to user_events_data/status.
1312 * When not locked down, processes may not have permissions to
1313 * add/remove calls themselves to tracefs. We need to temporarily
1314 * switch to root file permission to allow for this scenario.
1316 cred->fsuid = GLOBAL_ROOT_UID;
1318 old_cred = override_creds(cred);
1321 ret = trace_add_event_call(&user->call);
1323 ret = trace_remove_event_call(&user->call);
1325 revert_creds(old_cred);
1331 static int destroy_user_event(struct user_event *user)
1335 lockdep_assert_held(&event_mutex);
1337 /* Must destroy fields before call removal */
1338 user_event_destroy_fields(user);
1340 ret = user_event_set_call_visible(user, false);
1345 dyn_event_remove(&user->devent);
1346 hash_del(&user->node);
1348 user_event_destroy_validators(user);
1349 kfree(user->call.print_fmt);
1350 kfree(EVENT_NAME(user));
1353 if (current_user_events > 0)
1354 current_user_events--;
1356 pr_alert("BUG: Bad current_user_events\n");
1361 static struct user_event *find_user_event(struct user_event_group *group,
1362 char *name, u32 *outkey)
1364 struct user_event *user;
1365 u32 key = user_event_key(name);
1369 hash_for_each_possible(group->register_table, user, node, key)
1370 if (!strcmp(EVENT_NAME(user), name)) {
1371 refcount_inc(&user->refcnt);
1378 static int user_event_validate(struct user_event *user, void *data, int len)
1380 struct list_head *head = &user->validators;
1381 struct user_event_validator *validator;
1382 void *pos, *end = data + len;
1383 u32 loc, offset, size;
1385 list_for_each_entry(validator, head, user_event_link) {
1386 pos = data + validator->offset;
1388 /* Already done min_size check, no bounds check here */
1390 offset = loc & 0xffff;
1393 if (likely(validator->flags & VALIDATOR_REL))
1394 pos += offset + sizeof(loc);
1396 pos = data + offset;
1400 if (unlikely(pos > end))
1403 if (likely(validator->flags & VALIDATOR_ENSURE_NULL))
1404 if (unlikely(*(char *)(pos - 1) != '\0'))
1412 * Writes the user supplied payload out to a trace file.
1414 static void user_event_ftrace(struct user_event *user, struct iov_iter *i,
1415 void *tpdata, bool *faulted)
1417 struct trace_event_file *file;
1418 struct trace_entry *entry;
1419 struct trace_event_buffer event_buffer;
1420 size_t size = sizeof(*entry) + i->count;
1422 file = (struct trace_event_file *)tpdata;
1425 !(file->flags & EVENT_FILE_FL_ENABLED) ||
1426 trace_trigger_soft_disabled(file))
1429 /* Allocates and fills trace_entry, + 1 of this is data payload */
1430 entry = trace_event_buffer_reserve(&event_buffer, file, size);
1432 if (unlikely(!entry))
1435 if (unlikely(!copy_nofault(entry + 1, i->count, i)))
1438 if (!list_empty(&user->validators) &&
1439 unlikely(user_event_validate(user, entry, size)))
1442 trace_event_buffer_commit(&event_buffer);
1447 __trace_event_discard_commit(event_buffer.buffer,
1448 event_buffer.event);
1451 #ifdef CONFIG_PERF_EVENTS
1453 * Writes the user supplied payload out to perf ring buffer.
1455 static void user_event_perf(struct user_event *user, struct iov_iter *i,
1456 void *tpdata, bool *faulted)
1458 struct hlist_head *perf_head;
1460 perf_head = this_cpu_ptr(user->call.perf_events);
1462 if (perf_head && !hlist_empty(perf_head)) {
1463 struct trace_entry *perf_entry;
1464 struct pt_regs *regs;
1465 size_t size = sizeof(*perf_entry) + i->count;
1468 perf_entry = perf_trace_buf_alloc(ALIGN(size, 8),
1471 if (unlikely(!perf_entry))
1474 perf_fetch_caller_regs(regs);
1476 if (unlikely(!copy_nofault(perf_entry + 1, i->count, i)))
1479 if (!list_empty(&user->validators) &&
1480 unlikely(user_event_validate(user, perf_entry, size)))
1483 perf_trace_buf_submit(perf_entry, size, context,
1484 user->call.event.type, 1, regs,
1490 perf_swevent_put_recursion_context(context);
1496 * Update the enabled bit among all user processes.
1498 static void update_enable_bit_for(struct user_event *user)
1500 struct tracepoint *tp = &user->tracepoint;
1503 if (atomic_read(&tp->key.enabled) > 0) {
1504 struct tracepoint_func *probe_func_ptr;
1505 user_event_func_t probe_func;
1507 rcu_read_lock_sched();
1509 probe_func_ptr = rcu_dereference_sched(tp->funcs);
1511 if (probe_func_ptr) {
1513 probe_func = probe_func_ptr->func;
1515 if (probe_func == user_event_ftrace)
1516 status |= EVENT_STATUS_FTRACE;
1517 #ifdef CONFIG_PERF_EVENTS
1518 else if (probe_func == user_event_perf)
1519 status |= EVENT_STATUS_PERF;
1522 status |= EVENT_STATUS_OTHER;
1523 } while ((++probe_func_ptr)->func);
1526 rcu_read_unlock_sched();
1529 user->status = status;
1531 user_event_enabler_update(user);
1535 * Register callback for our events from tracing sub-systems.
1537 static int user_event_reg(struct trace_event_call *call,
1538 enum trace_reg type,
1541 struct user_event *user = (struct user_event *)call->data;
1548 case TRACE_REG_REGISTER:
1549 ret = tracepoint_probe_register(call->tp,
1556 case TRACE_REG_UNREGISTER:
1557 tracepoint_probe_unregister(call->tp,
1562 #ifdef CONFIG_PERF_EVENTS
1563 case TRACE_REG_PERF_REGISTER:
1564 ret = tracepoint_probe_register(call->tp,
1565 call->class->perf_probe,
1571 case TRACE_REG_PERF_UNREGISTER:
1572 tracepoint_probe_unregister(call->tp,
1573 call->class->perf_probe,
1577 case TRACE_REG_PERF_OPEN:
1578 case TRACE_REG_PERF_CLOSE:
1579 case TRACE_REG_PERF_ADD:
1580 case TRACE_REG_PERF_DEL:
1587 refcount_inc(&user->refcnt);
1588 update_enable_bit_for(user);
1591 update_enable_bit_for(user);
1592 refcount_dec(&user->refcnt);
1596 static int user_event_create(const char *raw_command)
1598 struct user_event_group *group;
1599 struct user_event *user;
1603 if (!str_has_prefix(raw_command, USER_EVENTS_PREFIX))
1606 raw_command += USER_EVENTS_PREFIX_LEN;
1607 raw_command = skip_spaces(raw_command);
1609 name = kstrdup(raw_command, GFP_KERNEL_ACCOUNT);
1614 group = current_user_event_group();
1621 mutex_lock(&group->reg_mutex);
1623 ret = user_event_parse_cmd(group, name, &user);
1626 refcount_dec(&user->refcnt);
1628 mutex_unlock(&group->reg_mutex);
1636 static int user_event_show(struct seq_file *m, struct dyn_event *ev)
1638 struct user_event *user = container_of(ev, struct user_event, devent);
1639 struct ftrace_event_field *field, *next;
1640 struct list_head *head;
1643 seq_printf(m, "%s%s", USER_EVENTS_PREFIX, EVENT_NAME(user));
1645 head = trace_get_fields(&user->call);
1647 list_for_each_entry_safe_reverse(field, next, head, link) {
1653 seq_printf(m, "%s %s", field->type, field->name);
1655 if (str_has_prefix(field->type, "struct "))
1656 seq_printf(m, " %d", field->size);
1666 static bool user_event_is_busy(struct dyn_event *ev)
1668 struct user_event *user = container_of(ev, struct user_event, devent);
1670 return !user_event_last_ref(user);
1673 static int user_event_free(struct dyn_event *ev)
1675 struct user_event *user = container_of(ev, struct user_event, devent);
1677 if (!user_event_last_ref(user))
1680 return destroy_user_event(user);
1683 static bool user_field_match(struct ftrace_event_field *field, int argc,
1684 const char **argv, int *iout)
1686 char *field_name = NULL, *dyn_field_name = NULL;
1687 bool colon = false, match = false;
1693 dyn_len = user_dyn_field_set_string(argc, argv, iout, dyn_field_name,
1696 len = user_field_set_string(field, field_name, 0, colon);
1701 dyn_field_name = kmalloc(dyn_len, GFP_KERNEL);
1702 field_name = kmalloc(len, GFP_KERNEL);
1704 if (!dyn_field_name || !field_name)
1707 user_dyn_field_set_string(argc, argv, iout, dyn_field_name,
1710 user_field_set_string(field, field_name, len, colon);
1712 match = strcmp(dyn_field_name, field_name) == 0;
1714 kfree(dyn_field_name);
1720 static bool user_fields_match(struct user_event *user, int argc,
1723 struct ftrace_event_field *field, *next;
1724 struct list_head *head = &user->fields;
1727 list_for_each_entry_safe_reverse(field, next, head, link)
1728 if (!user_field_match(field, argc, argv, &i))
1737 static bool user_event_match(const char *system, const char *event,
1738 int argc, const char **argv, struct dyn_event *ev)
1740 struct user_event *user = container_of(ev, struct user_event, devent);
1743 match = strcmp(EVENT_NAME(user), event) == 0 &&
1744 (!system || strcmp(system, USER_EVENTS_SYSTEM) == 0);
1746 if (match && argc > 0)
1747 match = user_fields_match(user, argc, argv);
1752 static struct dyn_event_operations user_event_dops = {
1753 .create = user_event_create,
1754 .show = user_event_show,
1755 .is_busy = user_event_is_busy,
1756 .free = user_event_free,
1757 .match = user_event_match,
1760 static int user_event_trace_register(struct user_event *user)
1764 ret = register_trace_event(&user->call.event);
1769 ret = user_event_set_call_visible(user, true);
1772 unregister_trace_event(&user->call.event);
1778 * Parses the event name, arguments and flags then registers if successful.
1779 * The name buffer lifetime is owned by this method for success cases only.
1780 * Upon success the returned user_event has its ref count increased by 1.
1782 static int user_event_parse(struct user_event_group *group, char *name,
1783 char *args, char *flags,
1784 struct user_event **newuser)
1788 struct user_event *user;
1790 /* Prevent dyn_event from racing */
1791 mutex_lock(&event_mutex);
1792 user = find_user_event(group, name, &key);
1793 mutex_unlock(&event_mutex);
1798 * Name is allocated by caller, free it since it already exists.
1799 * Caller only worries about failure cases for freeing.
1805 user = kzalloc(sizeof(*user), GFP_KERNEL_ACCOUNT);
1810 INIT_LIST_HEAD(&user->class.fields);
1811 INIT_LIST_HEAD(&user->fields);
1812 INIT_LIST_HEAD(&user->validators);
1814 user->group = group;
1815 user->tracepoint.name = name;
1817 ret = user_event_parse_fields(user, args);
1822 ret = user_event_create_print_fmt(user);
1827 user->call.data = user;
1828 user->call.class = &user->class;
1829 user->call.name = name;
1830 user->call.flags = TRACE_EVENT_FL_TRACEPOINT;
1831 user->call.tp = &user->tracepoint;
1832 user->call.event.funcs = &user_event_funcs;
1833 user->class.system = group->system_name;
1835 user->class.fields_array = user_event_fields_array;
1836 user->class.get_fields = user_event_get_fields;
1837 user->class.reg = user_event_reg;
1838 user->class.probe = user_event_ftrace;
1839 #ifdef CONFIG_PERF_EVENTS
1840 user->class.perf_probe = user_event_perf;
1843 mutex_lock(&event_mutex);
1845 if (current_user_events >= max_user_events) {
1850 ret = user_event_trace_register(user);
1855 /* Ensure we track self ref and caller ref (2) */
1856 refcount_set(&user->refcnt, 2);
1858 dyn_event_init(&user->devent, &user_event_dops);
1859 dyn_event_add(&user->devent, &user->call);
1860 hash_add(group->register_table, &user->node, key);
1861 current_user_events++;
1863 mutex_unlock(&event_mutex);
1868 mutex_unlock(&event_mutex);
1870 user_event_destroy_fields(user);
1871 user_event_destroy_validators(user);
1872 kfree(user->call.print_fmt);
1878 * Deletes a previously created event if it is no longer being used.
1880 static int delete_user_event(struct user_event_group *group, char *name)
1883 struct user_event *user = find_user_event(group, name, &key);
1888 refcount_dec(&user->refcnt);
1890 if (!user_event_last_ref(user))
1893 return destroy_user_event(user);
1897 * Validates the user payload and writes via iterator.
1899 static ssize_t user_events_write_core(struct file *file, struct iov_iter *i)
1901 struct user_event_file_info *info = file->private_data;
1902 struct user_event_refs *refs;
1903 struct user_event *user = NULL;
1904 struct tracepoint *tp;
1905 ssize_t ret = i->count;
1908 if (unlikely(copy_from_iter(&idx, sizeof(idx), i) != sizeof(idx)))
1914 rcu_read_lock_sched();
1916 refs = rcu_dereference_sched(info->refs);
1919 * The refs->events array is protected by RCU, and new items may be
1920 * added. But the user retrieved from indexing into the events array
1921 * shall be immutable while the file is opened.
1923 if (likely(refs && idx < refs->count))
1924 user = refs->events[idx];
1926 rcu_read_unlock_sched();
1928 if (unlikely(user == NULL))
1931 if (unlikely(i->count < user->min_size))
1934 tp = &user->tracepoint;
1937 * It's possible key.enabled disables after this check, however
1938 * we don't mind if a few events are included in this condition.
1940 if (likely(atomic_read(&tp->key.enabled) > 0)) {
1941 struct tracepoint_func *probe_func_ptr;
1942 user_event_func_t probe_func;
1943 struct iov_iter copy;
1947 if (unlikely(fault_in_iov_iter_readable(i, i->count)))
1952 rcu_read_lock_sched();
1954 probe_func_ptr = rcu_dereference_sched(tp->funcs);
1956 if (probe_func_ptr) {
1959 probe_func = probe_func_ptr->func;
1960 tpdata = probe_func_ptr->data;
1961 probe_func(user, ©, tpdata, &faulted);
1962 } while ((++probe_func_ptr)->func);
1965 rcu_read_unlock_sched();
1967 if (unlikely(faulted))
1974 static int user_events_open(struct inode *node, struct file *file)
1976 struct user_event_group *group;
1977 struct user_event_file_info *info;
1979 group = current_user_event_group();
1984 info = kzalloc(sizeof(*info), GFP_KERNEL_ACCOUNT);
1989 info->group = group;
1991 file->private_data = info;
1996 static ssize_t user_events_write(struct file *file, const char __user *ubuf,
1997 size_t count, loff_t *ppos)
2002 if (unlikely(*ppos != 0))
2005 if (unlikely(import_single_range(ITER_SOURCE, (char __user *)ubuf,
2009 return user_events_write_core(file, &i);
2012 static ssize_t user_events_write_iter(struct kiocb *kp, struct iov_iter *i)
2014 return user_events_write_core(kp->ki_filp, i);
2017 static int user_events_ref_add(struct user_event_file_info *info,
2018 struct user_event *user)
2020 struct user_event_group *group = info->group;
2021 struct user_event_refs *refs, *new_refs;
2022 int i, size, count = 0;
2024 refs = rcu_dereference_protected(info->refs,
2025 lockdep_is_held(&group->reg_mutex));
2028 count = refs->count;
2030 for (i = 0; i < count; ++i)
2031 if (refs->events[i] == user)
2035 size = struct_size(refs, events, count + 1);
2037 new_refs = kzalloc(size, GFP_KERNEL_ACCOUNT);
2042 new_refs->count = count + 1;
2044 for (i = 0; i < count; ++i)
2045 new_refs->events[i] = refs->events[i];
2047 new_refs->events[i] = user;
2049 refcount_inc(&user->refcnt);
2051 rcu_assign_pointer(info->refs, new_refs);
2054 kfree_rcu(refs, rcu);
2059 static long user_reg_get(struct user_reg __user *ureg, struct user_reg *kreg)
2064 ret = get_user(size, &ureg->size);
2069 if (size > PAGE_SIZE)
2072 if (size < offsetofend(struct user_reg, write_index))
2075 ret = copy_struct_from_user(kreg, sizeof(*kreg), ureg, size);
2080 /* Ensure no flags, since we don't support any yet */
2081 if (kreg->flags != 0)
2084 /* Ensure supported size */
2085 switch (kreg->enable_size) {
2089 #if BITS_PER_LONG >= 64
2098 /* Ensure natural alignment */
2099 if (kreg->enable_addr % kreg->enable_size)
2102 /* Ensure bit range for size */
2103 if (kreg->enable_bit > (kreg->enable_size * BITS_PER_BYTE) - 1)
2106 /* Ensure accessible */
2107 if (!access_ok((const void __user *)(uintptr_t)kreg->enable_addr,
2117 * Registers a user_event on behalf of a user process.
2119 static long user_events_ioctl_reg(struct user_event_file_info *info,
2122 struct user_reg __user *ureg = (struct user_reg __user *)uarg;
2123 struct user_reg reg;
2124 struct user_event *user;
2125 struct user_event_enabler *enabler;
2130 ret = user_reg_get(ureg, ®);
2136 * Prevent users from using the same address and bit multiple times
2137 * within the same mm address space. This can cause unexpected behavior
2138 * for user processes that is far easier to debug if this is explictly
2139 * an error upon registering.
2141 if (current_user_event_enabler_exists((unsigned long)reg.enable_addr,
2145 name = strndup_user((const char __user *)(uintptr_t)reg.name_args,
2149 ret = PTR_ERR(name);
2153 ret = user_event_parse_cmd(info->group, name, &user);
2160 ret = user_events_ref_add(info, user);
2162 /* No longer need parse ref, ref_add either worked or not */
2163 refcount_dec(&user->refcnt);
2165 /* Positive number is index and valid */
2170 * user_events_ref_add succeeded:
2171 * At this point we have a user_event, it's lifetime is bound by the
2172 * reference count, not this file. If anything fails, the user_event
2173 * still has a reference until the file is released. During release
2174 * any remaining references (from user_events_ref_add) are decremented.
2176 * Attempt to create an enabler, which too has a lifetime tied in the
2177 * same way for the event. Once the task that caused the enabler to be
2178 * created exits or issues exec() then the enablers it has created
2179 * will be destroyed and the ref to the event will be decremented.
2181 enabler = user_event_enabler_create(®, user, &write_result);
2186 /* Write failed/faulted, give error back to caller */
2188 return write_result;
2190 put_user((u32)ret, &ureg->write_index);
2196 * Deletes a user_event on behalf of a user process.
2198 static long user_events_ioctl_del(struct user_event_file_info *info,
2201 void __user *ubuf = (void __user *)uarg;
2205 name = strndup_user(ubuf, MAX_EVENT_DESC);
2208 return PTR_ERR(name);
2210 /* event_mutex prevents dyn_event from racing */
2211 mutex_lock(&event_mutex);
2212 ret = delete_user_event(info->group, name);
2213 mutex_unlock(&event_mutex);
2220 static long user_unreg_get(struct user_unreg __user *ureg,
2221 struct user_unreg *kreg)
2226 ret = get_user(size, &ureg->size);
2231 if (size > PAGE_SIZE)
2234 if (size < offsetofend(struct user_unreg, disable_addr))
2237 ret = copy_struct_from_user(kreg, sizeof(*kreg), ureg, size);
2239 /* Ensure no reserved values, since we don't support any yet */
2240 if (kreg->__reserved || kreg->__reserved2)
2246 static int user_event_mm_clear_bit(struct user_event_mm *user_mm,
2247 unsigned long uaddr, unsigned char bit)
2249 struct user_event_enabler enabler;
2253 memset(&enabler, 0, sizeof(enabler));
2254 enabler.addr = uaddr;
2255 enabler.values = bit;
2257 /* Prevents state changes from racing with new enablers */
2258 mutex_lock(&event_mutex);
2260 /* Force the bit to be cleared, since no event is attached */
2261 mmap_read_lock(user_mm->mm);
2262 result = user_event_enabler_write(user_mm, &enabler, false, &attempt);
2263 mmap_read_unlock(user_mm->mm);
2265 mutex_unlock(&event_mutex);
2268 /* Attempt to fault-in and retry if it worked */
2269 if (!user_event_mm_fault_in(user_mm, uaddr, attempt))
2277 * Unregisters an enablement address/bit within a task/user mm.
2279 static long user_events_ioctl_unreg(unsigned long uarg)
2281 struct user_unreg __user *ureg = (struct user_unreg __user *)uarg;
2282 struct user_event_mm *mm = current->user_event_mm;
2283 struct user_event_enabler *enabler, *next;
2284 struct user_unreg reg;
2287 ret = user_unreg_get(ureg, ®);
2298 * Flags freeing and faulting are used to indicate if the enabler is in
2299 * use at all. When faulting is set a page-fault is occurring asyncly.
2300 * During async fault if freeing is set, the enabler will be destroyed.
2301 * If no async fault is happening, we can destroy it now since we hold
2302 * the event_mutex during these checks.
2304 mutex_lock(&event_mutex);
2306 list_for_each_entry_safe(enabler, next, &mm->enablers, mm_enablers_link) {
2307 if (enabler->addr == reg.disable_addr &&
2308 ENABLE_BIT(enabler) == reg.disable_bit) {
2309 set_bit(ENABLE_VAL_FREEING_BIT, ENABLE_BITOPS(enabler));
2311 if (!test_bit(ENABLE_VAL_FAULTING_BIT, ENABLE_BITOPS(enabler)))
2312 user_event_enabler_destroy(enabler);
2314 /* Removed at least one */
2319 mutex_unlock(&event_mutex);
2321 /* Ensure bit is now cleared for user, regardless of event status */
2323 ret = user_event_mm_clear_bit(mm, reg.disable_addr,
2330 * Handles the ioctl from user mode to register or alter operations.
2332 static long user_events_ioctl(struct file *file, unsigned int cmd,
2335 struct user_event_file_info *info = file->private_data;
2336 struct user_event_group *group = info->group;
2341 mutex_lock(&group->reg_mutex);
2342 ret = user_events_ioctl_reg(info, uarg);
2343 mutex_unlock(&group->reg_mutex);
2347 mutex_lock(&group->reg_mutex);
2348 ret = user_events_ioctl_del(info, uarg);
2349 mutex_unlock(&group->reg_mutex);
2352 case DIAG_IOCSUNREG:
2353 mutex_lock(&group->reg_mutex);
2354 ret = user_events_ioctl_unreg(uarg);
2355 mutex_unlock(&group->reg_mutex);
2363 * Handles the final close of the file from user mode.
2365 static int user_events_release(struct inode *node, struct file *file)
2367 struct user_event_file_info *info = file->private_data;
2368 struct user_event_group *group;
2369 struct user_event_refs *refs;
2370 struct user_event *user;
2376 group = info->group;
2379 * Ensure refs cannot change under any situation by taking the
2380 * register mutex during the final freeing of the references.
2382 mutex_lock(&group->reg_mutex);
2390 * The lifetime of refs has reached an end, it's tied to this file.
2391 * The underlying user_events are ref counted, and cannot be freed.
2392 * After this decrement, the user_events may be freed elsewhere.
2394 for (i = 0; i < refs->count; ++i) {
2395 user = refs->events[i];
2398 refcount_dec(&user->refcnt);
2401 file->private_data = NULL;
2403 mutex_unlock(&group->reg_mutex);
2411 static const struct file_operations user_data_fops = {
2412 .open = user_events_open,
2413 .write = user_events_write,
2414 .write_iter = user_events_write_iter,
2415 .unlocked_ioctl = user_events_ioctl,
2416 .release = user_events_release,
2419 static void *user_seq_start(struct seq_file *m, loff_t *pos)
2427 static void *user_seq_next(struct seq_file *m, void *p, loff_t *pos)
2433 static void user_seq_stop(struct seq_file *m, void *p)
2437 static int user_seq_show(struct seq_file *m, void *p)
2439 struct user_event_group *group = m->private;
2440 struct user_event *user;
2442 int i, active = 0, busy = 0;
2447 mutex_lock(&group->reg_mutex);
2449 hash_for_each(group->register_table, i, user, node) {
2450 status = user->status;
2452 seq_printf(m, "%s", EVENT_NAME(user));
2458 seq_puts(m, " Used by");
2459 if (status & EVENT_STATUS_FTRACE)
2460 seq_puts(m, " ftrace");
2461 if (status & EVENT_STATUS_PERF)
2462 seq_puts(m, " perf");
2463 if (status & EVENT_STATUS_OTHER)
2464 seq_puts(m, " other");
2472 mutex_unlock(&group->reg_mutex);
2475 seq_printf(m, "Active: %d\n", active);
2476 seq_printf(m, "Busy: %d\n", busy);
2481 static const struct seq_operations user_seq_ops = {
2482 .start = user_seq_start,
2483 .next = user_seq_next,
2484 .stop = user_seq_stop,
2485 .show = user_seq_show,
2488 static int user_status_open(struct inode *node, struct file *file)
2490 struct user_event_group *group;
2493 group = current_user_event_group();
2498 ret = seq_open(file, &user_seq_ops);
2501 /* Chain group to seq_file */
2502 struct seq_file *m = file->private_data;
2510 static const struct file_operations user_status_fops = {
2511 .open = user_status_open,
2513 .llseek = seq_lseek,
2514 .release = seq_release,
2518 * Creates a set of tracefs files to allow user mode interactions.
2520 static int create_user_tracefs(void)
2522 struct dentry *edata, *emmap;
2524 edata = tracefs_create_file("user_events_data", TRACE_MODE_WRITE,
2525 NULL, NULL, &user_data_fops);
2528 pr_warn("Could not create tracefs 'user_events_data' entry\n");
2532 emmap = tracefs_create_file("user_events_status", TRACE_MODE_READ,
2533 NULL, NULL, &user_status_fops);
2536 tracefs_remove(edata);
2537 pr_warn("Could not create tracefs 'user_events_mmap' entry\n");
2546 static int set_max_user_events_sysctl(struct ctl_table *table, int write,
2547 void *buffer, size_t *lenp, loff_t *ppos)
2551 mutex_lock(&event_mutex);
2553 ret = proc_douintvec(table, write, buffer, lenp, ppos);
2555 mutex_unlock(&event_mutex);
2560 static struct ctl_table user_event_sysctls[] = {
2562 .procname = "user_events_max",
2563 .data = &max_user_events,
2564 .maxlen = sizeof(unsigned int),
2566 .proc_handler = set_max_user_events_sysctl,
2571 static int __init trace_events_user_init(void)
2575 fault_cache = KMEM_CACHE(user_event_enabler_fault, 0);
2580 init_group = user_event_group_create(&init_user_ns);
2583 kmem_cache_destroy(fault_cache);
2587 ret = create_user_tracefs();
2590 pr_warn("user_events could not register with tracefs\n");
2591 user_event_group_destroy(init_group);
2592 kmem_cache_destroy(fault_cache);
2597 if (dyn_event_register(&user_event_dops))
2598 pr_warn("user_events could not register with dyn_events\n");
2600 register_sysctl_init("kernel", user_event_sysctls);
2605 fs_initcall(trace_events_user_init);