1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (c) 2021, Microsoft Corporation.
6 * Beau Belgrave <beaub@linux.microsoft.com>
9 #include <linux/bitmap.h>
10 #include <linux/cdev.h>
11 #include <linux/hashtable.h>
12 #include <linux/list.h>
14 #include <linux/uio.h>
15 #include <linux/ioctl.h>
16 #include <linux/jhash.h>
17 #include <linux/refcount.h>
18 #include <linux/trace_events.h>
19 #include <linux/tracefs.h>
20 #include <linux/types.h>
21 #include <linux/uaccess.h>
22 #include <linux/highmem.h>
23 #include <linux/init.h>
24 #include <linux/user_events.h>
26 #include "trace_dynevent.h"
28 #define USER_EVENTS_PREFIX_LEN (sizeof(USER_EVENTS_PREFIX)-1)
30 #define FIELD_DEPTH_TYPE 0
31 #define FIELD_DEPTH_NAME 1
32 #define FIELD_DEPTH_SIZE 2
34 /* Limit how long of an event name plus args within the subsystem. */
35 #define MAX_EVENT_DESC 512
36 #define EVENT_NAME(user_event) ((user_event)->tracepoint.name)
37 #define MAX_FIELD_ARRAY_SIZE 1024
40 * Internal bits (kernel side only) to keep track of connected probes:
41 * These are used when status is requested in text form about an event. These
42 * bits are compared against an internal byte on the event to determine which
43 * probes to print out to the user.
45 * These do not reflect the mapped bytes between the user and kernel space.
47 #define EVENT_STATUS_FTRACE BIT(0)
48 #define EVENT_STATUS_PERF BIT(1)
49 #define EVENT_STATUS_OTHER BIT(7)
52 * Stores the system name, tables, and locks for a group of events. This
53 * allows isolation for events by various means.
55 struct user_event_group {
57 struct hlist_node node;
58 struct mutex reg_mutex;
59 DECLARE_HASHTABLE(register_table, 8);
62 /* Group for init_user_ns mapping, top-most group */
63 static struct user_event_group *init_group;
65 /* Max allowed events for the whole system */
66 static unsigned int max_user_events = 32768;
68 /* Current number of events on the whole system */
69 static unsigned int current_user_events;
72 * Stores per-event properties, as users register events
73 * within a file a user_event might be created if it does not
74 * already exist. These are globally used and their lifetime
75 * is tied to the refcnt member. These cannot go away until the
79 struct user_event_group *group;
80 struct tracepoint tracepoint;
81 struct trace_event_call call;
82 struct trace_event_class class;
83 struct dyn_event devent;
84 struct hlist_node node;
85 struct list_head fields;
86 struct list_head validators;
93 * Stores per-mm/event properties that enable an address to be
94 * updated properly for each task. As tasks are forked, we use
95 * these to track enablement sites that are tied to an event.
97 struct user_event_enabler {
98 struct list_head link;
99 struct user_event *event;
102 /* Track enable bit, flags, etc. Aligned for bitops. */
106 /* Bits 0-5 are for the bit to update upon enable/disable (0-63 allowed) */
107 #define ENABLE_VAL_BIT_MASK 0x3F
109 /* Bit 6 is for faulting status of enablement */
110 #define ENABLE_VAL_FAULTING_BIT 6
112 /* Bit 7 is for freeing status of enablement */
113 #define ENABLE_VAL_FREEING_BIT 7
115 /* Only duplicate the bit value */
116 #define ENABLE_VAL_DUP_MASK ENABLE_VAL_BIT_MASK
118 #define ENABLE_BITOPS(e) ((unsigned long *)&(e)->values)
120 /* Used for asynchronous faulting in of pages */
121 struct user_event_enabler_fault {
122 struct work_struct work;
123 struct user_event_mm *mm;
124 struct user_event_enabler *enabler;
127 static struct kmem_cache *fault_cache;
129 /* Global list of memory descriptors using user_events */
130 static LIST_HEAD(user_event_mms);
131 static DEFINE_SPINLOCK(user_event_mms_lock);
134 * Stores per-file events references, as users register events
135 * within a file this structure is modified and freed via RCU.
136 * The lifetime of this struct is tied to the lifetime of the file.
137 * These are not shared and only accessible by the file that created it.
139 struct user_event_refs {
142 struct user_event *events[];
145 struct user_event_file_info {
146 struct user_event_group *group;
147 struct user_event_refs *refs;
150 #define VALIDATOR_ENSURE_NULL (1 << 0)
151 #define VALIDATOR_REL (1 << 1)
153 struct user_event_validator {
154 struct list_head link;
159 typedef void (*user_event_func_t) (struct user_event *user, struct iov_iter *i,
160 void *tpdata, bool *faulted);
162 static int user_event_parse(struct user_event_group *group, char *name,
163 char *args, char *flags,
164 struct user_event **newuser);
166 static struct user_event_mm *user_event_mm_get(struct user_event_mm *mm);
167 static struct user_event_mm *user_event_mm_get_all(struct user_event *user);
168 static void user_event_mm_put(struct user_event_mm *mm);
170 static u32 user_event_key(char *name)
172 return jhash(name, strlen(name), 0);
175 static void user_event_group_destroy(struct user_event_group *group)
177 kfree(group->system_name);
181 static char *user_event_group_system_name(struct user_namespace *user_ns)
184 int len = sizeof(USER_EVENTS_SYSTEM) + 1;
186 if (user_ns != &init_user_ns) {
188 * Unexpected at this point:
189 * We only currently support init_user_ns.
190 * When we enable more, this will trigger a failure so log.
192 pr_warn("user_events: Namespace other than init_user_ns!\n");
196 system_name = kmalloc(len, GFP_KERNEL);
201 snprintf(system_name, len, "%s", USER_EVENTS_SYSTEM);
206 static inline struct user_event_group
207 *user_event_group_from_user_ns(struct user_namespace *user_ns)
209 if (user_ns == &init_user_ns)
215 static struct user_event_group *current_user_event_group(void)
217 struct user_namespace *user_ns = current_user_ns();
218 struct user_event_group *group = NULL;
221 group = user_event_group_from_user_ns(user_ns);
226 user_ns = user_ns->parent;
232 static struct user_event_group
233 *user_event_group_create(struct user_namespace *user_ns)
235 struct user_event_group *group;
237 group = kzalloc(sizeof(*group), GFP_KERNEL);
242 group->system_name = user_event_group_system_name(user_ns);
244 if (!group->system_name)
247 mutex_init(&group->reg_mutex);
248 hash_init(group->register_table);
253 user_event_group_destroy(group);
258 static void user_event_enabler_destroy(struct user_event_enabler *enabler)
260 list_del_rcu(&enabler->link);
262 /* No longer tracking the event via the enabler */
263 refcount_dec(&enabler->event->refcnt);
268 static int user_event_mm_fault_in(struct user_event_mm *mm, unsigned long uaddr)
273 mmap_read_lock(mm->mm);
275 /* Ensure MM has tasks, cannot use after exit_mm() */
276 if (refcount_read(&mm->tasks) == 0) {
281 ret = fixup_user_fault(mm->mm, uaddr, FAULT_FLAG_WRITE | FAULT_FLAG_REMOTE,
284 mmap_read_unlock(mm->mm);
289 static int user_event_enabler_write(struct user_event_mm *mm,
290 struct user_event_enabler *enabler,
293 static void user_event_enabler_fault_fixup(struct work_struct *work)
295 struct user_event_enabler_fault *fault = container_of(
296 work, struct user_event_enabler_fault, work);
297 struct user_event_enabler *enabler = fault->enabler;
298 struct user_event_mm *mm = fault->mm;
299 unsigned long uaddr = enabler->addr;
302 ret = user_event_mm_fault_in(mm, uaddr);
304 if (ret && ret != -ENOENT) {
305 struct user_event *user = enabler->event;
307 pr_warn("user_events: Fault for mm: 0x%pK @ 0x%llx event: %s\n",
308 mm->mm, (unsigned long long)uaddr, EVENT_NAME(user));
311 /* Prevent state changes from racing */
312 mutex_lock(&event_mutex);
314 /* User asked for enabler to be removed during fault */
315 if (test_bit(ENABLE_VAL_FREEING_BIT, ENABLE_BITOPS(enabler))) {
316 user_event_enabler_destroy(enabler);
321 * If we managed to get the page, re-issue the write. We do not
322 * want to get into a possible infinite loop, which is why we only
323 * attempt again directly if the page came in. If we couldn't get
324 * the page here, then we will try again the next time the event is
327 clear_bit(ENABLE_VAL_FAULTING_BIT, ENABLE_BITOPS(enabler));
330 mmap_read_lock(mm->mm);
331 user_event_enabler_write(mm, enabler, true);
332 mmap_read_unlock(mm->mm);
335 mutex_unlock(&event_mutex);
337 /* In all cases we no longer need the mm or fault */
338 user_event_mm_put(mm);
339 kmem_cache_free(fault_cache, fault);
342 static bool user_event_enabler_queue_fault(struct user_event_mm *mm,
343 struct user_event_enabler *enabler)
345 struct user_event_enabler_fault *fault;
347 fault = kmem_cache_zalloc(fault_cache, GFP_NOWAIT | __GFP_NOWARN);
352 INIT_WORK(&fault->work, user_event_enabler_fault_fixup);
353 fault->mm = user_event_mm_get(mm);
354 fault->enabler = enabler;
356 /* Don't try to queue in again while we have a pending fault */
357 set_bit(ENABLE_VAL_FAULTING_BIT, ENABLE_BITOPS(enabler));
359 if (!schedule_work(&fault->work)) {
360 /* Allow another attempt later */
361 clear_bit(ENABLE_VAL_FAULTING_BIT, ENABLE_BITOPS(enabler));
363 user_event_mm_put(mm);
364 kmem_cache_free(fault_cache, fault);
372 static int user_event_enabler_write(struct user_event_mm *mm,
373 struct user_event_enabler *enabler,
376 unsigned long uaddr = enabler->addr;
382 lockdep_assert_held(&event_mutex);
383 mmap_assert_locked(mm->mm);
385 /* Ensure MM has tasks, cannot use after exit_mm() */
386 if (refcount_read(&mm->tasks) == 0)
389 if (unlikely(test_bit(ENABLE_VAL_FAULTING_BIT, ENABLE_BITOPS(enabler)) ||
390 test_bit(ENABLE_VAL_FREEING_BIT, ENABLE_BITOPS(enabler))))
393 ret = pin_user_pages_remote(mm->mm, uaddr, 1, FOLL_WRITE | FOLL_NOFAULT,
396 if (unlikely(ret <= 0)) {
400 if (!user_event_enabler_queue_fault(mm, enabler))
401 pr_warn("user_events: Unable to queue fault handler\n");
406 kaddr = kmap_local_page(page);
407 ptr = kaddr + (uaddr & ~PAGE_MASK);
409 /* Update bit atomically, user tracers must be atomic as well */
410 if (enabler->event && enabler->event->status)
411 set_bit(enabler->values & ENABLE_VAL_BIT_MASK, ptr);
413 clear_bit(enabler->values & ENABLE_VAL_BIT_MASK, ptr);
416 unpin_user_pages_dirty_lock(&page, 1, true);
421 static void user_event_enabler_update(struct user_event *user)
423 struct user_event_enabler *enabler;
424 struct user_event_mm *mm = user_event_mm_get_all(user);
425 struct user_event_mm *next;
429 mmap_read_lock(mm->mm);
432 list_for_each_entry_rcu(enabler, &mm->enablers, link)
433 if (enabler->event == user)
434 user_event_enabler_write(mm, enabler, true);
437 mmap_read_unlock(mm->mm);
438 user_event_mm_put(mm);
443 static bool user_event_enabler_dup(struct user_event_enabler *orig,
444 struct user_event_mm *mm)
446 struct user_event_enabler *enabler;
448 /* Skip pending frees */
449 if (unlikely(test_bit(ENABLE_VAL_FREEING_BIT, ENABLE_BITOPS(orig))))
452 enabler = kzalloc(sizeof(*enabler), GFP_NOWAIT | __GFP_ACCOUNT);
457 enabler->event = orig->event;
458 enabler->addr = orig->addr;
460 /* Only dup part of value (ignore future flags, etc) */
461 enabler->values = orig->values & ENABLE_VAL_DUP_MASK;
463 refcount_inc(&enabler->event->refcnt);
464 list_add_rcu(&enabler->link, &mm->enablers);
469 static struct user_event_mm *user_event_mm_get(struct user_event_mm *mm)
471 refcount_inc(&mm->refcnt);
476 static struct user_event_mm *user_event_mm_get_all(struct user_event *user)
478 struct user_event_mm *found = NULL;
479 struct user_event_enabler *enabler;
480 struct user_event_mm *mm;
483 * We do not want to block fork/exec while enablements are being
484 * updated, so we use RCU to walk the current tasks that have used
485 * user_events ABI for 1 or more events. Each enabler found in each
486 * task that matches the event being updated has a write to reflect
487 * the kernel state back into the process. Waits/faults must not occur
488 * during this. So we scan the list under RCU for all the mm that have
489 * the event within it. This is needed because mm_read_lock() can wait.
490 * Each user mm returned has a ref inc to handle remove RCU races.
494 list_for_each_entry_rcu(mm, &user_event_mms, link)
495 list_for_each_entry_rcu(enabler, &mm->enablers, link)
496 if (enabler->event == user) {
498 found = user_event_mm_get(mm);
507 static struct user_event_mm *user_event_mm_create(struct task_struct *t)
509 struct user_event_mm *user_mm;
512 user_mm = kzalloc(sizeof(*user_mm), GFP_KERNEL_ACCOUNT);
518 INIT_LIST_HEAD(&user_mm->enablers);
519 refcount_set(&user_mm->refcnt, 1);
520 refcount_set(&user_mm->tasks, 1);
522 spin_lock_irqsave(&user_event_mms_lock, flags);
523 list_add_rcu(&user_mm->link, &user_event_mms);
524 spin_unlock_irqrestore(&user_event_mms_lock, flags);
526 t->user_event_mm = user_mm;
529 * The lifetime of the memory descriptor can slightly outlast
530 * the task lifetime if a ref to the user_event_mm is taken
531 * between list_del_rcu() and call_rcu(). Therefore we need
532 * to take a reference to it to ensure it can live this long
533 * under this corner case. This can also occur in clones that
534 * outlast the parent.
541 static struct user_event_mm *current_user_event_mm(void)
543 struct user_event_mm *user_mm = current->user_event_mm;
548 user_mm = user_event_mm_create(current);
553 refcount_inc(&user_mm->refcnt);
558 static void user_event_mm_destroy(struct user_event_mm *mm)
560 struct user_event_enabler *enabler, *next;
562 list_for_each_entry_safe(enabler, next, &mm->enablers, link)
563 user_event_enabler_destroy(enabler);
569 static void user_event_mm_put(struct user_event_mm *mm)
571 if (mm && refcount_dec_and_test(&mm->refcnt))
572 user_event_mm_destroy(mm);
575 static void delayed_user_event_mm_put(struct work_struct *work)
577 struct user_event_mm *mm;
579 mm = container_of(to_rcu_work(work), struct user_event_mm, put_rwork);
580 user_event_mm_put(mm);
583 void user_event_mm_remove(struct task_struct *t)
585 struct user_event_mm *mm;
590 mm = t->user_event_mm;
591 t->user_event_mm = NULL;
593 /* Clone will increment the tasks, only remove if last clone */
594 if (!refcount_dec_and_test(&mm->tasks))
597 /* Remove the mm from the list, so it can no longer be enabled */
598 spin_lock_irqsave(&user_event_mms_lock, flags);
599 list_del_rcu(&mm->link);
600 spin_unlock_irqrestore(&user_event_mms_lock, flags);
603 * We need to wait for currently occurring writes to stop within
604 * the mm. This is required since exit_mm() snaps the current rss
605 * stats and clears them. On the final mmdrop(), check_mm() will
606 * report a bug if these increment.
608 * All writes/pins are done under mmap_read lock, take the write
609 * lock to ensure in-progress faults have completed. Faults that
610 * are pending but yet to run will check the task count and skip
611 * the fault since the mm is going away.
613 mmap_write_lock(mm->mm);
614 mmap_write_unlock(mm->mm);
617 * Put for mm must be done after RCU delay to handle new refs in
618 * between the list_del_rcu() and now. This ensures any get refs
619 * during rcu_read_lock() are accounted for during list removal.
622 * ---------------------------------------------------------------
623 * user_event_mm_remove() | rcu_read_lock();
624 * list_del_rcu() | list_for_each_entry_rcu();
625 * call_rcu() | refcount_inc();
626 * . | rcu_read_unlock();
627 * schedule_work() | .
628 * user_event_mm_put() | .
630 * mmdrop() cannot be called in the softirq context of call_rcu()
631 * so we use a work queue after call_rcu() to run within.
633 INIT_RCU_WORK(&mm->put_rwork, delayed_user_event_mm_put);
634 queue_rcu_work(system_wq, &mm->put_rwork);
637 void user_event_mm_dup(struct task_struct *t, struct user_event_mm *old_mm)
639 struct user_event_mm *mm = user_event_mm_create(t);
640 struct user_event_enabler *enabler;
647 list_for_each_entry_rcu(enabler, &old_mm->enablers, link)
648 if (!user_event_enabler_dup(enabler, mm))
656 user_event_mm_remove(t);
659 static struct user_event_enabler
660 *user_event_enabler_create(struct user_reg *reg, struct user_event *user,
663 struct user_event_enabler *enabler;
664 struct user_event_mm *user_mm;
665 unsigned long uaddr = (unsigned long)reg->enable_addr;
667 user_mm = current_user_event_mm();
672 enabler = kzalloc(sizeof(*enabler), GFP_KERNEL_ACCOUNT);
677 enabler->event = user;
678 enabler->addr = uaddr;
679 enabler->values = reg->enable_bit;
681 /* Prevents state changes from racing with new enablers */
682 mutex_lock(&event_mutex);
684 /* Attempt to reflect the current state within the process */
685 mmap_read_lock(user_mm->mm);
686 *write_result = user_event_enabler_write(user_mm, enabler, false);
687 mmap_read_unlock(user_mm->mm);
690 * If the write works, then we will track the enabler. A ref to the
691 * underlying user_event is held by the enabler to prevent it going
692 * away while the enabler is still in use by a process. The ref is
693 * removed when the enabler is destroyed. This means a event cannot
694 * be forcefully deleted from the system until all tasks using it
695 * exit or run exec(), which includes forks and clones.
697 if (!*write_result) {
698 refcount_inc(&enabler->event->refcnt);
699 list_add_rcu(&enabler->link, &user_mm->enablers);
702 mutex_unlock(&event_mutex);
705 /* Attempt to fault-in and retry if it worked */
706 if (!user_event_mm_fault_in(user_mm, uaddr))
713 user_event_mm_put(user_mm);
718 static __always_inline __must_check
719 bool user_event_last_ref(struct user_event *user)
721 return refcount_read(&user->refcnt) == 1;
724 static __always_inline __must_check
725 size_t copy_nofault(void *addr, size_t bytes, struct iov_iter *i)
731 ret = copy_from_iter_nocache(addr, bytes, i);
738 static struct list_head *user_event_get_fields(struct trace_event_call *call)
740 struct user_event *user = (struct user_event *)call->data;
742 return &user->fields;
746 * Parses a register command for user_events
747 * Format: event_name[:FLAG1[,FLAG2...]] [field1[;field2...]]
749 * Example event named 'test' with a 20 char 'msg' field with an unsigned int
751 * test char[20] msg;unsigned int id
753 * NOTE: Offsets are from the user data perspective, they are not from the
754 * trace_entry/buffer perspective. We automatically add the common properties
755 * sizes to the offset for the user.
757 * Upon success user_event has its ref count increased by 1.
759 static int user_event_parse_cmd(struct user_event_group *group,
760 char *raw_command, struct user_event **newuser)
762 char *name = raw_command;
763 char *args = strpbrk(name, " ");
769 flags = strpbrk(name, ":");
774 return user_event_parse(group, name, args, flags, newuser);
777 static int user_field_array_size(const char *type)
779 const char *start = strchr(type, '[');
787 if (strscpy(val, start + 1, sizeof(val)) <= 0)
790 bracket = strchr(val, ']');
797 if (kstrtouint(val, 0, &size))
800 if (size > MAX_FIELD_ARRAY_SIZE)
806 static int user_field_size(const char *type)
808 /* long is not allowed from a user, since it's ambigious in size */
809 if (strcmp(type, "s64") == 0)
811 if (strcmp(type, "u64") == 0)
813 if (strcmp(type, "s32") == 0)
815 if (strcmp(type, "u32") == 0)
817 if (strcmp(type, "int") == 0)
819 if (strcmp(type, "unsigned int") == 0)
820 return sizeof(unsigned int);
821 if (strcmp(type, "s16") == 0)
823 if (strcmp(type, "u16") == 0)
825 if (strcmp(type, "short") == 0)
826 return sizeof(short);
827 if (strcmp(type, "unsigned short") == 0)
828 return sizeof(unsigned short);
829 if (strcmp(type, "s8") == 0)
831 if (strcmp(type, "u8") == 0)
833 if (strcmp(type, "char") == 0)
835 if (strcmp(type, "unsigned char") == 0)
836 return sizeof(unsigned char);
837 if (str_has_prefix(type, "char["))
838 return user_field_array_size(type);
839 if (str_has_prefix(type, "unsigned char["))
840 return user_field_array_size(type);
841 if (str_has_prefix(type, "__data_loc "))
843 if (str_has_prefix(type, "__rel_loc "))
846 /* Uknown basic type, error */
850 static void user_event_destroy_validators(struct user_event *user)
852 struct user_event_validator *validator, *next;
853 struct list_head *head = &user->validators;
855 list_for_each_entry_safe(validator, next, head, link) {
856 list_del(&validator->link);
861 static void user_event_destroy_fields(struct user_event *user)
863 struct ftrace_event_field *field, *next;
864 struct list_head *head = &user->fields;
866 list_for_each_entry_safe(field, next, head, link) {
867 list_del(&field->link);
872 static int user_event_add_field(struct user_event *user, const char *type,
873 const char *name, int offset, int size,
874 int is_signed, int filter_type)
876 struct user_event_validator *validator;
877 struct ftrace_event_field *field;
878 int validator_flags = 0;
880 field = kmalloc(sizeof(*field), GFP_KERNEL_ACCOUNT);
885 if (str_has_prefix(type, "__data_loc "))
888 if (str_has_prefix(type, "__rel_loc ")) {
889 validator_flags |= VALIDATOR_REL;
896 if (strstr(type, "char") != NULL)
897 validator_flags |= VALIDATOR_ENSURE_NULL;
899 validator = kmalloc(sizeof(*validator), GFP_KERNEL_ACCOUNT);
906 validator->flags = validator_flags;
907 validator->offset = offset;
909 /* Want sequential access when validating */
910 list_add_tail(&validator->link, &user->validators);
915 field->offset = offset;
917 field->is_signed = is_signed;
918 field->filter_type = filter_type;
920 list_add(&field->link, &user->fields);
923 * Min size from user writes that are required, this does not include
924 * the size of trace_entry (common fields).
926 user->min_size = (offset + size) - sizeof(struct trace_entry);
932 * Parses the values of a field within the description
933 * Format: type name [size]
935 static int user_event_parse_field(char *field, struct user_event *user,
938 char *part, *type, *name;
939 u32 depth = 0, saved_offset = *offset;
940 int len, size = -EINVAL;
941 bool is_struct = false;
943 field = skip_spaces(field);
948 /* Handle types that have a space within */
949 len = str_has_prefix(field, "unsigned ");
953 len = str_has_prefix(field, "struct ");
959 len = str_has_prefix(field, "__data_loc unsigned ");
963 len = str_has_prefix(field, "__data_loc ");
967 len = str_has_prefix(field, "__rel_loc unsigned ");
971 len = str_has_prefix(field, "__rel_loc ");
978 field = strpbrk(field + len, " ");
988 while ((part = strsep(&field, " ")) != NULL) {
990 case FIELD_DEPTH_TYPE:
993 case FIELD_DEPTH_NAME:
996 case FIELD_DEPTH_SIZE:
1000 if (kstrtou32(part, 10, &size))
1008 if (depth < FIELD_DEPTH_SIZE || !name)
1011 if (depth == FIELD_DEPTH_SIZE)
1012 size = user_field_size(type);
1020 *offset = saved_offset + size;
1022 return user_event_add_field(user, type, name, saved_offset, size,
1023 type[0] != 'u', FILTER_OTHER);
1026 static int user_event_parse_fields(struct user_event *user, char *args)
1029 u32 offset = sizeof(struct trace_entry);
1035 while ((field = strsep(&args, ";")) != NULL) {
1036 ret = user_event_parse_field(field, user, &offset);
1045 static struct trace_event_fields user_event_fields_array[1];
1047 static const char *user_field_format(const char *type)
1049 if (strcmp(type, "s64") == 0)
1051 if (strcmp(type, "u64") == 0)
1053 if (strcmp(type, "s32") == 0)
1055 if (strcmp(type, "u32") == 0)
1057 if (strcmp(type, "int") == 0)
1059 if (strcmp(type, "unsigned int") == 0)
1061 if (strcmp(type, "s16") == 0)
1063 if (strcmp(type, "u16") == 0)
1065 if (strcmp(type, "short") == 0)
1067 if (strcmp(type, "unsigned short") == 0)
1069 if (strcmp(type, "s8") == 0)
1071 if (strcmp(type, "u8") == 0)
1073 if (strcmp(type, "char") == 0)
1075 if (strcmp(type, "unsigned char") == 0)
1077 if (strstr(type, "char[") != NULL)
1080 /* Unknown, likely struct, allowed treat as 64-bit */
1084 static bool user_field_is_dyn_string(const char *type, const char **str_func)
1086 if (str_has_prefix(type, "__data_loc ")) {
1087 *str_func = "__get_str";
1091 if (str_has_prefix(type, "__rel_loc ")) {
1092 *str_func = "__get_rel_str";
1098 return strstr(type, "char") != NULL;
1101 #define LEN_OR_ZERO (len ? len - pos : 0)
1102 static int user_dyn_field_set_string(int argc, const char **argv, int *iout,
1103 char *buf, int len, bool *colon)
1105 int pos = 0, i = *iout;
1109 for (; i < argc; ++i) {
1111 pos += snprintf(buf + pos, LEN_OR_ZERO, " ");
1113 pos += snprintf(buf + pos, LEN_OR_ZERO, "%s", argv[i]);
1115 if (strchr(argv[i], ';')) {
1122 /* Actual set, advance i */
1129 static int user_field_set_string(struct ftrace_event_field *field,
1130 char *buf, int len, bool colon)
1134 pos += snprintf(buf + pos, LEN_OR_ZERO, "%s", field->type);
1135 pos += snprintf(buf + pos, LEN_OR_ZERO, " ");
1136 pos += snprintf(buf + pos, LEN_OR_ZERO, "%s", field->name);
1139 pos += snprintf(buf + pos, LEN_OR_ZERO, ";");
1144 static int user_event_set_print_fmt(struct user_event *user, char *buf, int len)
1146 struct ftrace_event_field *field, *next;
1147 struct list_head *head = &user->fields;
1148 int pos = 0, depth = 0;
1149 const char *str_func;
1151 pos += snprintf(buf + pos, LEN_OR_ZERO, "\"");
1153 list_for_each_entry_safe_reverse(field, next, head, link) {
1155 pos += snprintf(buf + pos, LEN_OR_ZERO, " ");
1157 pos += snprintf(buf + pos, LEN_OR_ZERO, "%s=%s",
1158 field->name, user_field_format(field->type));
1163 pos += snprintf(buf + pos, LEN_OR_ZERO, "\"");
1165 list_for_each_entry_safe_reverse(field, next, head, link) {
1166 if (user_field_is_dyn_string(field->type, &str_func))
1167 pos += snprintf(buf + pos, LEN_OR_ZERO,
1168 ", %s(%s)", str_func, field->name);
1170 pos += snprintf(buf + pos, LEN_OR_ZERO,
1171 ", REC->%s", field->name);
1178 static int user_event_create_print_fmt(struct user_event *user)
1183 len = user_event_set_print_fmt(user, NULL, 0);
1185 print_fmt = kmalloc(len, GFP_KERNEL_ACCOUNT);
1190 user_event_set_print_fmt(user, print_fmt, len);
1192 user->call.print_fmt = print_fmt;
1197 static enum print_line_t user_event_print_trace(struct trace_iterator *iter,
1199 struct trace_event *event)
1201 /* Unsafe to try to decode user provided print_fmt, use hex */
1202 trace_print_hex_dump_seq(&iter->seq, "", DUMP_PREFIX_OFFSET, 16,
1203 1, iter->ent, iter->ent_size, true);
1205 return trace_handle_return(&iter->seq);
1208 static struct trace_event_functions user_event_funcs = {
1209 .trace = user_event_print_trace,
1212 static int user_event_set_call_visible(struct user_event *user, bool visible)
1215 const struct cred *old_cred;
1218 cred = prepare_creds();
1224 * While by default tracefs is locked down, systems can be configured
1225 * to allow user_event files to be less locked down. The extreme case
1226 * being "other" has read/write access to user_events_data/status.
1228 * When not locked down, processes may not have permissions to
1229 * add/remove calls themselves to tracefs. We need to temporarily
1230 * switch to root file permission to allow for this scenario.
1232 cred->fsuid = GLOBAL_ROOT_UID;
1234 old_cred = override_creds(cred);
1237 ret = trace_add_event_call(&user->call);
1239 ret = trace_remove_event_call(&user->call);
1241 revert_creds(old_cred);
1247 static int destroy_user_event(struct user_event *user)
1251 lockdep_assert_held(&event_mutex);
1253 /* Must destroy fields before call removal */
1254 user_event_destroy_fields(user);
1256 ret = user_event_set_call_visible(user, false);
1261 dyn_event_remove(&user->devent);
1262 hash_del(&user->node);
1264 user_event_destroy_validators(user);
1265 kfree(user->call.print_fmt);
1266 kfree(EVENT_NAME(user));
1269 if (current_user_events > 0)
1270 current_user_events--;
1272 pr_alert("BUG: Bad current_user_events\n");
1277 static struct user_event *find_user_event(struct user_event_group *group,
1278 char *name, u32 *outkey)
1280 struct user_event *user;
1281 u32 key = user_event_key(name);
1285 hash_for_each_possible(group->register_table, user, node, key)
1286 if (!strcmp(EVENT_NAME(user), name)) {
1287 refcount_inc(&user->refcnt);
1294 static int user_event_validate(struct user_event *user, void *data, int len)
1296 struct list_head *head = &user->validators;
1297 struct user_event_validator *validator;
1298 void *pos, *end = data + len;
1299 u32 loc, offset, size;
1301 list_for_each_entry(validator, head, link) {
1302 pos = data + validator->offset;
1304 /* Already done min_size check, no bounds check here */
1306 offset = loc & 0xffff;
1309 if (likely(validator->flags & VALIDATOR_REL))
1310 pos += offset + sizeof(loc);
1312 pos = data + offset;
1316 if (unlikely(pos > end))
1319 if (likely(validator->flags & VALIDATOR_ENSURE_NULL))
1320 if (unlikely(*(char *)(pos - 1) != '\0'))
1328 * Writes the user supplied payload out to a trace file.
1330 static void user_event_ftrace(struct user_event *user, struct iov_iter *i,
1331 void *tpdata, bool *faulted)
1333 struct trace_event_file *file;
1334 struct trace_entry *entry;
1335 struct trace_event_buffer event_buffer;
1336 size_t size = sizeof(*entry) + i->count;
1338 file = (struct trace_event_file *)tpdata;
1341 !(file->flags & EVENT_FILE_FL_ENABLED) ||
1342 trace_trigger_soft_disabled(file))
1345 /* Allocates and fills trace_entry, + 1 of this is data payload */
1346 entry = trace_event_buffer_reserve(&event_buffer, file, size);
1348 if (unlikely(!entry))
1351 if (unlikely(!copy_nofault(entry + 1, i->count, i)))
1354 if (!list_empty(&user->validators) &&
1355 unlikely(user_event_validate(user, entry, size)))
1358 trace_event_buffer_commit(&event_buffer);
1363 __trace_event_discard_commit(event_buffer.buffer,
1364 event_buffer.event);
1367 #ifdef CONFIG_PERF_EVENTS
1369 * Writes the user supplied payload out to perf ring buffer.
1371 static void user_event_perf(struct user_event *user, struct iov_iter *i,
1372 void *tpdata, bool *faulted)
1374 struct hlist_head *perf_head;
1376 perf_head = this_cpu_ptr(user->call.perf_events);
1378 if (perf_head && !hlist_empty(perf_head)) {
1379 struct trace_entry *perf_entry;
1380 struct pt_regs *regs;
1381 size_t size = sizeof(*perf_entry) + i->count;
1384 perf_entry = perf_trace_buf_alloc(ALIGN(size, 8),
1387 if (unlikely(!perf_entry))
1390 perf_fetch_caller_regs(regs);
1392 if (unlikely(!copy_nofault(perf_entry + 1, i->count, i)))
1395 if (!list_empty(&user->validators) &&
1396 unlikely(user_event_validate(user, perf_entry, size)))
1399 perf_trace_buf_submit(perf_entry, size, context,
1400 user->call.event.type, 1, regs,
1406 perf_swevent_put_recursion_context(context);
1412 * Update the enabled bit among all user processes.
1414 static void update_enable_bit_for(struct user_event *user)
1416 struct tracepoint *tp = &user->tracepoint;
1419 if (atomic_read(&tp->key.enabled) > 0) {
1420 struct tracepoint_func *probe_func_ptr;
1421 user_event_func_t probe_func;
1423 rcu_read_lock_sched();
1425 probe_func_ptr = rcu_dereference_sched(tp->funcs);
1427 if (probe_func_ptr) {
1429 probe_func = probe_func_ptr->func;
1431 if (probe_func == user_event_ftrace)
1432 status |= EVENT_STATUS_FTRACE;
1433 #ifdef CONFIG_PERF_EVENTS
1434 else if (probe_func == user_event_perf)
1435 status |= EVENT_STATUS_PERF;
1438 status |= EVENT_STATUS_OTHER;
1439 } while ((++probe_func_ptr)->func);
1442 rcu_read_unlock_sched();
1445 user->status = status;
1447 user_event_enabler_update(user);
1451 * Register callback for our events from tracing sub-systems.
1453 static int user_event_reg(struct trace_event_call *call,
1454 enum trace_reg type,
1457 struct user_event *user = (struct user_event *)call->data;
1464 case TRACE_REG_REGISTER:
1465 ret = tracepoint_probe_register(call->tp,
1472 case TRACE_REG_UNREGISTER:
1473 tracepoint_probe_unregister(call->tp,
1478 #ifdef CONFIG_PERF_EVENTS
1479 case TRACE_REG_PERF_REGISTER:
1480 ret = tracepoint_probe_register(call->tp,
1481 call->class->perf_probe,
1487 case TRACE_REG_PERF_UNREGISTER:
1488 tracepoint_probe_unregister(call->tp,
1489 call->class->perf_probe,
1493 case TRACE_REG_PERF_OPEN:
1494 case TRACE_REG_PERF_CLOSE:
1495 case TRACE_REG_PERF_ADD:
1496 case TRACE_REG_PERF_DEL:
1503 refcount_inc(&user->refcnt);
1504 update_enable_bit_for(user);
1507 update_enable_bit_for(user);
1508 refcount_dec(&user->refcnt);
1512 static int user_event_create(const char *raw_command)
1514 struct user_event_group *group;
1515 struct user_event *user;
1519 if (!str_has_prefix(raw_command, USER_EVENTS_PREFIX))
1522 raw_command += USER_EVENTS_PREFIX_LEN;
1523 raw_command = skip_spaces(raw_command);
1525 name = kstrdup(raw_command, GFP_KERNEL_ACCOUNT);
1530 group = current_user_event_group();
1537 mutex_lock(&group->reg_mutex);
1539 ret = user_event_parse_cmd(group, name, &user);
1542 refcount_dec(&user->refcnt);
1544 mutex_unlock(&group->reg_mutex);
1552 static int user_event_show(struct seq_file *m, struct dyn_event *ev)
1554 struct user_event *user = container_of(ev, struct user_event, devent);
1555 struct ftrace_event_field *field, *next;
1556 struct list_head *head;
1559 seq_printf(m, "%s%s", USER_EVENTS_PREFIX, EVENT_NAME(user));
1561 head = trace_get_fields(&user->call);
1563 list_for_each_entry_safe_reverse(field, next, head, link) {
1569 seq_printf(m, "%s %s", field->type, field->name);
1571 if (str_has_prefix(field->type, "struct "))
1572 seq_printf(m, " %d", field->size);
1582 static bool user_event_is_busy(struct dyn_event *ev)
1584 struct user_event *user = container_of(ev, struct user_event, devent);
1586 return !user_event_last_ref(user);
1589 static int user_event_free(struct dyn_event *ev)
1591 struct user_event *user = container_of(ev, struct user_event, devent);
1593 if (!user_event_last_ref(user))
1596 return destroy_user_event(user);
1599 static bool user_field_match(struct ftrace_event_field *field, int argc,
1600 const char **argv, int *iout)
1602 char *field_name = NULL, *dyn_field_name = NULL;
1603 bool colon = false, match = false;
1609 dyn_len = user_dyn_field_set_string(argc, argv, iout, dyn_field_name,
1612 len = user_field_set_string(field, field_name, 0, colon);
1617 dyn_field_name = kmalloc(dyn_len, GFP_KERNEL);
1618 field_name = kmalloc(len, GFP_KERNEL);
1620 if (!dyn_field_name || !field_name)
1623 user_dyn_field_set_string(argc, argv, iout, dyn_field_name,
1626 user_field_set_string(field, field_name, len, colon);
1628 match = strcmp(dyn_field_name, field_name) == 0;
1630 kfree(dyn_field_name);
1636 static bool user_fields_match(struct user_event *user, int argc,
1639 struct ftrace_event_field *field, *next;
1640 struct list_head *head = &user->fields;
1643 list_for_each_entry_safe_reverse(field, next, head, link)
1644 if (!user_field_match(field, argc, argv, &i))
1653 static bool user_event_match(const char *system, const char *event,
1654 int argc, const char **argv, struct dyn_event *ev)
1656 struct user_event *user = container_of(ev, struct user_event, devent);
1659 match = strcmp(EVENT_NAME(user), event) == 0 &&
1660 (!system || strcmp(system, USER_EVENTS_SYSTEM) == 0);
1662 if (match && argc > 0)
1663 match = user_fields_match(user, argc, argv);
1668 static struct dyn_event_operations user_event_dops = {
1669 .create = user_event_create,
1670 .show = user_event_show,
1671 .is_busy = user_event_is_busy,
1672 .free = user_event_free,
1673 .match = user_event_match,
1676 static int user_event_trace_register(struct user_event *user)
1680 ret = register_trace_event(&user->call.event);
1685 ret = user_event_set_call_visible(user, true);
1688 unregister_trace_event(&user->call.event);
1694 * Parses the event name, arguments and flags then registers if successful.
1695 * The name buffer lifetime is owned by this method for success cases only.
1696 * Upon success the returned user_event has its ref count increased by 1.
1698 static int user_event_parse(struct user_event_group *group, char *name,
1699 char *args, char *flags,
1700 struct user_event **newuser)
1704 struct user_event *user;
1706 /* Prevent dyn_event from racing */
1707 mutex_lock(&event_mutex);
1708 user = find_user_event(group, name, &key);
1709 mutex_unlock(&event_mutex);
1714 * Name is allocated by caller, free it since it already exists.
1715 * Caller only worries about failure cases for freeing.
1721 user = kzalloc(sizeof(*user), GFP_KERNEL_ACCOUNT);
1726 INIT_LIST_HEAD(&user->class.fields);
1727 INIT_LIST_HEAD(&user->fields);
1728 INIT_LIST_HEAD(&user->validators);
1730 user->group = group;
1731 user->tracepoint.name = name;
1733 ret = user_event_parse_fields(user, args);
1738 ret = user_event_create_print_fmt(user);
1743 user->call.data = user;
1744 user->call.class = &user->class;
1745 user->call.name = name;
1746 user->call.flags = TRACE_EVENT_FL_TRACEPOINT;
1747 user->call.tp = &user->tracepoint;
1748 user->call.event.funcs = &user_event_funcs;
1749 user->class.system = group->system_name;
1751 user->class.fields_array = user_event_fields_array;
1752 user->class.get_fields = user_event_get_fields;
1753 user->class.reg = user_event_reg;
1754 user->class.probe = user_event_ftrace;
1755 #ifdef CONFIG_PERF_EVENTS
1756 user->class.perf_probe = user_event_perf;
1759 mutex_lock(&event_mutex);
1761 if (current_user_events >= max_user_events) {
1766 ret = user_event_trace_register(user);
1771 /* Ensure we track self ref and caller ref (2) */
1772 refcount_set(&user->refcnt, 2);
1774 dyn_event_init(&user->devent, &user_event_dops);
1775 dyn_event_add(&user->devent, &user->call);
1776 hash_add(group->register_table, &user->node, key);
1777 current_user_events++;
1779 mutex_unlock(&event_mutex);
1784 mutex_unlock(&event_mutex);
1786 user_event_destroy_fields(user);
1787 user_event_destroy_validators(user);
1788 kfree(user->call.print_fmt);
1794 * Deletes a previously created event if it is no longer being used.
1796 static int delete_user_event(struct user_event_group *group, char *name)
1799 struct user_event *user = find_user_event(group, name, &key);
1804 refcount_dec(&user->refcnt);
1806 if (!user_event_last_ref(user))
1809 return destroy_user_event(user);
1813 * Validates the user payload and writes via iterator.
1815 static ssize_t user_events_write_core(struct file *file, struct iov_iter *i)
1817 struct user_event_file_info *info = file->private_data;
1818 struct user_event_refs *refs;
1819 struct user_event *user = NULL;
1820 struct tracepoint *tp;
1821 ssize_t ret = i->count;
1824 if (unlikely(copy_from_iter(&idx, sizeof(idx), i) != sizeof(idx)))
1827 rcu_read_lock_sched();
1829 refs = rcu_dereference_sched(info->refs);
1832 * The refs->events array is protected by RCU, and new items may be
1833 * added. But the user retrieved from indexing into the events array
1834 * shall be immutable while the file is opened.
1836 if (likely(refs && idx < refs->count))
1837 user = refs->events[idx];
1839 rcu_read_unlock_sched();
1841 if (unlikely(user == NULL))
1844 if (unlikely(i->count < user->min_size))
1847 tp = &user->tracepoint;
1850 * It's possible key.enabled disables after this check, however
1851 * we don't mind if a few events are included in this condition.
1853 if (likely(atomic_read(&tp->key.enabled) > 0)) {
1854 struct tracepoint_func *probe_func_ptr;
1855 user_event_func_t probe_func;
1856 struct iov_iter copy;
1860 if (unlikely(fault_in_iov_iter_readable(i, i->count)))
1865 rcu_read_lock_sched();
1867 probe_func_ptr = rcu_dereference_sched(tp->funcs);
1869 if (probe_func_ptr) {
1872 probe_func = probe_func_ptr->func;
1873 tpdata = probe_func_ptr->data;
1874 probe_func(user, ©, tpdata, &faulted);
1875 } while ((++probe_func_ptr)->func);
1878 rcu_read_unlock_sched();
1880 if (unlikely(faulted))
1887 static int user_events_open(struct inode *node, struct file *file)
1889 struct user_event_group *group;
1890 struct user_event_file_info *info;
1892 group = current_user_event_group();
1897 info = kzalloc(sizeof(*info), GFP_KERNEL_ACCOUNT);
1902 info->group = group;
1904 file->private_data = info;
1909 static ssize_t user_events_write(struct file *file, const char __user *ubuf,
1910 size_t count, loff_t *ppos)
1915 if (unlikely(*ppos != 0))
1918 if (unlikely(import_single_range(ITER_SOURCE, (char __user *)ubuf,
1922 return user_events_write_core(file, &i);
1925 static ssize_t user_events_write_iter(struct kiocb *kp, struct iov_iter *i)
1927 return user_events_write_core(kp->ki_filp, i);
1930 static int user_events_ref_add(struct user_event_file_info *info,
1931 struct user_event *user)
1933 struct user_event_group *group = info->group;
1934 struct user_event_refs *refs, *new_refs;
1935 int i, size, count = 0;
1937 refs = rcu_dereference_protected(info->refs,
1938 lockdep_is_held(&group->reg_mutex));
1941 count = refs->count;
1943 for (i = 0; i < count; ++i)
1944 if (refs->events[i] == user)
1948 size = struct_size(refs, events, count + 1);
1950 new_refs = kzalloc(size, GFP_KERNEL_ACCOUNT);
1955 new_refs->count = count + 1;
1957 for (i = 0; i < count; ++i)
1958 new_refs->events[i] = refs->events[i];
1960 new_refs->events[i] = user;
1962 refcount_inc(&user->refcnt);
1964 rcu_assign_pointer(info->refs, new_refs);
1967 kfree_rcu(refs, rcu);
1972 static long user_reg_get(struct user_reg __user *ureg, struct user_reg *kreg)
1977 ret = get_user(size, &ureg->size);
1982 if (size > PAGE_SIZE)
1985 if (size < offsetofend(struct user_reg, write_index))
1988 ret = copy_struct_from_user(kreg, sizeof(*kreg), ureg, size);
1993 /* Ensure no flags, since we don't support any yet */
1994 if (kreg->flags != 0)
1997 /* Ensure supported size */
1998 switch (kreg->enable_size) {
2002 #if BITS_PER_LONG >= 64
2011 /* Ensure natural alignment */
2012 if (kreg->enable_addr % kreg->enable_size)
2015 /* Ensure bit range for size */
2016 if (kreg->enable_bit > (kreg->enable_size * BITS_PER_BYTE) - 1)
2019 /* Ensure accessible */
2020 if (!access_ok((const void __user *)(uintptr_t)kreg->enable_addr,
2030 * Registers a user_event on behalf of a user process.
2032 static long user_events_ioctl_reg(struct user_event_file_info *info,
2035 struct user_reg __user *ureg = (struct user_reg __user *)uarg;
2036 struct user_reg reg;
2037 struct user_event *user;
2038 struct user_event_enabler *enabler;
2043 ret = user_reg_get(ureg, ®);
2048 name = strndup_user((const char __user *)(uintptr_t)reg.name_args,
2052 ret = PTR_ERR(name);
2056 ret = user_event_parse_cmd(info->group, name, &user);
2063 ret = user_events_ref_add(info, user);
2065 /* No longer need parse ref, ref_add either worked or not */
2066 refcount_dec(&user->refcnt);
2068 /* Positive number is index and valid */
2073 * user_events_ref_add succeeded:
2074 * At this point we have a user_event, it's lifetime is bound by the
2075 * reference count, not this file. If anything fails, the user_event
2076 * still has a reference until the file is released. During release
2077 * any remaining references (from user_events_ref_add) are decremented.
2079 * Attempt to create an enabler, which too has a lifetime tied in the
2080 * same way for the event. Once the task that caused the enabler to be
2081 * created exits or issues exec() then the enablers it has created
2082 * will be destroyed and the ref to the event will be decremented.
2084 enabler = user_event_enabler_create(®, user, &write_result);
2089 /* Write failed/faulted, give error back to caller */
2091 return write_result;
2093 put_user((u32)ret, &ureg->write_index);
2099 * Deletes a user_event on behalf of a user process.
2101 static long user_events_ioctl_del(struct user_event_file_info *info,
2104 void __user *ubuf = (void __user *)uarg;
2108 name = strndup_user(ubuf, MAX_EVENT_DESC);
2111 return PTR_ERR(name);
2113 /* event_mutex prevents dyn_event from racing */
2114 mutex_lock(&event_mutex);
2115 ret = delete_user_event(info->group, name);
2116 mutex_unlock(&event_mutex);
2123 static long user_unreg_get(struct user_unreg __user *ureg,
2124 struct user_unreg *kreg)
2129 ret = get_user(size, &ureg->size);
2134 if (size > PAGE_SIZE)
2137 if (size < offsetofend(struct user_unreg, disable_addr))
2140 ret = copy_struct_from_user(kreg, sizeof(*kreg), ureg, size);
2142 /* Ensure no reserved values, since we don't support any yet */
2143 if (kreg->__reserved || kreg->__reserved2)
2150 * Unregisters an enablement address/bit within a task/user mm.
2152 static long user_events_ioctl_unreg(unsigned long uarg)
2154 struct user_unreg __user *ureg = (struct user_unreg __user *)uarg;
2155 struct user_event_mm *mm = current->user_event_mm;
2156 struct user_event_enabler *enabler, *next;
2157 struct user_unreg reg;
2160 ret = user_unreg_get(ureg, ®);
2171 * Flags freeing and faulting are used to indicate if the enabler is in
2172 * use at all. When faulting is set a page-fault is occurring asyncly.
2173 * During async fault if freeing is set, the enabler will be destroyed.
2174 * If no async fault is happening, we can destroy it now since we hold
2175 * the event_mutex during these checks.
2177 mutex_lock(&event_mutex);
2179 list_for_each_entry_safe(enabler, next, &mm->enablers, link)
2180 if (enabler->addr == reg.disable_addr &&
2181 (enabler->values & ENABLE_VAL_BIT_MASK) == reg.disable_bit) {
2182 set_bit(ENABLE_VAL_FREEING_BIT, ENABLE_BITOPS(enabler));
2184 if (!test_bit(ENABLE_VAL_FAULTING_BIT, ENABLE_BITOPS(enabler)))
2185 user_event_enabler_destroy(enabler);
2187 /* Removed at least one */
2191 mutex_unlock(&event_mutex);
2197 * Handles the ioctl from user mode to register or alter operations.
2199 static long user_events_ioctl(struct file *file, unsigned int cmd,
2202 struct user_event_file_info *info = file->private_data;
2203 struct user_event_group *group = info->group;
2208 mutex_lock(&group->reg_mutex);
2209 ret = user_events_ioctl_reg(info, uarg);
2210 mutex_unlock(&group->reg_mutex);
2214 mutex_lock(&group->reg_mutex);
2215 ret = user_events_ioctl_del(info, uarg);
2216 mutex_unlock(&group->reg_mutex);
2219 case DIAG_IOCSUNREG:
2220 mutex_lock(&group->reg_mutex);
2221 ret = user_events_ioctl_unreg(uarg);
2222 mutex_unlock(&group->reg_mutex);
2230 * Handles the final close of the file from user mode.
2232 static int user_events_release(struct inode *node, struct file *file)
2234 struct user_event_file_info *info = file->private_data;
2235 struct user_event_group *group;
2236 struct user_event_refs *refs;
2237 struct user_event *user;
2243 group = info->group;
2246 * Ensure refs cannot change under any situation by taking the
2247 * register mutex during the final freeing of the references.
2249 mutex_lock(&group->reg_mutex);
2257 * The lifetime of refs has reached an end, it's tied to this file.
2258 * The underlying user_events are ref counted, and cannot be freed.
2259 * After this decrement, the user_events may be freed elsewhere.
2261 for (i = 0; i < refs->count; ++i) {
2262 user = refs->events[i];
2265 refcount_dec(&user->refcnt);
2268 file->private_data = NULL;
2270 mutex_unlock(&group->reg_mutex);
2278 static const struct file_operations user_data_fops = {
2279 .open = user_events_open,
2280 .write = user_events_write,
2281 .write_iter = user_events_write_iter,
2282 .unlocked_ioctl = user_events_ioctl,
2283 .release = user_events_release,
2286 static void *user_seq_start(struct seq_file *m, loff_t *pos)
2294 static void *user_seq_next(struct seq_file *m, void *p, loff_t *pos)
2300 static void user_seq_stop(struct seq_file *m, void *p)
2304 static int user_seq_show(struct seq_file *m, void *p)
2306 struct user_event_group *group = m->private;
2307 struct user_event *user;
2309 int i, active = 0, busy = 0;
2314 mutex_lock(&group->reg_mutex);
2316 hash_for_each(group->register_table, i, user, node) {
2317 status = user->status;
2319 seq_printf(m, "%s", EVENT_NAME(user));
2325 seq_puts(m, " Used by");
2326 if (status & EVENT_STATUS_FTRACE)
2327 seq_puts(m, " ftrace");
2328 if (status & EVENT_STATUS_PERF)
2329 seq_puts(m, " perf");
2330 if (status & EVENT_STATUS_OTHER)
2331 seq_puts(m, " other");
2339 mutex_unlock(&group->reg_mutex);
2342 seq_printf(m, "Active: %d\n", active);
2343 seq_printf(m, "Busy: %d\n", busy);
2348 static const struct seq_operations user_seq_ops = {
2349 .start = user_seq_start,
2350 .next = user_seq_next,
2351 .stop = user_seq_stop,
2352 .show = user_seq_show,
2355 static int user_status_open(struct inode *node, struct file *file)
2357 struct user_event_group *group;
2360 group = current_user_event_group();
2365 ret = seq_open(file, &user_seq_ops);
2368 /* Chain group to seq_file */
2369 struct seq_file *m = file->private_data;
2377 static const struct file_operations user_status_fops = {
2378 .open = user_status_open,
2380 .llseek = seq_lseek,
2381 .release = seq_release,
2385 * Creates a set of tracefs files to allow user mode interactions.
2387 static int create_user_tracefs(void)
2389 struct dentry *edata, *emmap;
2391 edata = tracefs_create_file("user_events_data", TRACE_MODE_WRITE,
2392 NULL, NULL, &user_data_fops);
2395 pr_warn("Could not create tracefs 'user_events_data' entry\n");
2399 emmap = tracefs_create_file("user_events_status", TRACE_MODE_READ,
2400 NULL, NULL, &user_status_fops);
2403 tracefs_remove(edata);
2404 pr_warn("Could not create tracefs 'user_events_mmap' entry\n");
2413 static int set_max_user_events_sysctl(struct ctl_table *table, int write,
2414 void *buffer, size_t *lenp, loff_t *ppos)
2418 mutex_lock(&event_mutex);
2420 ret = proc_douintvec(table, write, buffer, lenp, ppos);
2422 mutex_unlock(&event_mutex);
2427 static struct ctl_table user_event_sysctls[] = {
2429 .procname = "user_events_max",
2430 .data = &max_user_events,
2431 .maxlen = sizeof(unsigned int),
2433 .proc_handler = set_max_user_events_sysctl,
2438 static int __init trace_events_user_init(void)
2442 fault_cache = KMEM_CACHE(user_event_enabler_fault, 0);
2447 init_group = user_event_group_create(&init_user_ns);
2450 kmem_cache_destroy(fault_cache);
2454 ret = create_user_tracefs();
2457 pr_warn("user_events could not register with tracefs\n");
2458 user_event_group_destroy(init_group);
2459 kmem_cache_destroy(fault_cache);
2464 if (dyn_event_register(&user_event_dops))
2465 pr_warn("user_events could not register with dyn_events\n");
2467 register_sysctl_init("kernel", user_event_sysctls);
2472 fs_initcall(trace_events_user_init);