1 // SPDX-License-Identifier: GPL-2.0-only
4 * Android IPC Subsystem
6 * Copyright (C) 2007-2008 Google, Inc.
12 * There are 3 main spinlocks which must be acquired in the
15 * 1) proc->outer_lock : protects binder_ref
16 * binder_proc_lock() and binder_proc_unlock() are
18 * 2) node->lock : protects most fields of binder_node.
19 * binder_node_lock() and binder_node_unlock() are
21 * 3) proc->inner_lock : protects the thread and node lists
22 * (proc->threads, proc->waiting_threads, proc->nodes)
23 * and all todo lists associated with the binder_proc
24 * (proc->todo, thread->todo, proc->delivered_death and
25 * node->async_todo), as well as thread->transaction_stack
26 * binder_inner_proc_lock() and binder_inner_proc_unlock()
29 * Any lock under procA must never be nested under any lock at the same
30 * level or below on procB.
32 * Functions that require a lock held on entry indicate which lock
33 * in the suffix of the function name:
35 * foo_olocked() : requires node->outer_lock
36 * foo_nlocked() : requires node->lock
37 * foo_ilocked() : requires proc->inner_lock
38 * foo_oilocked(): requires proc->outer_lock and proc->inner_lock
39 * foo_nilocked(): requires node->lock and proc->inner_lock
43 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
45 #include <linux/fdtable.h>
46 #include <linux/file.h>
47 #include <linux/freezer.h>
49 #include <linux/list.h>
50 #include <linux/miscdevice.h>
51 #include <linux/module.h>
52 #include <linux/mutex.h>
53 #include <linux/nsproxy.h>
54 #include <linux/poll.h>
55 #include <linux/debugfs.h>
56 #include <linux/rbtree.h>
57 #include <linux/sched/signal.h>
58 #include <linux/sched/mm.h>
59 #include <linux/seq_file.h>
60 #include <linux/string.h>
61 #include <linux/uaccess.h>
62 #include <linux/pid_namespace.h>
63 #include <linux/security.h>
64 #include <linux/spinlock.h>
65 #include <linux/ratelimit.h>
66 #include <linux/syscalls.h>
67 #include <linux/task_work.h>
68 #include <linux/sizes.h>
70 #include <uapi/linux/android/binder.h>
72 #include <linux/cacheflush.h>
74 #include "binder_internal.h"
75 #include "binder_trace.h"
77 static HLIST_HEAD(binder_deferred_list);
78 static DEFINE_MUTEX(binder_deferred_lock);
80 static HLIST_HEAD(binder_devices);
81 static HLIST_HEAD(binder_procs);
82 static DEFINE_MUTEX(binder_procs_lock);
84 static HLIST_HEAD(binder_dead_nodes);
85 static DEFINE_SPINLOCK(binder_dead_nodes_lock);
87 static struct dentry *binder_debugfs_dir_entry_root;
88 static struct dentry *binder_debugfs_dir_entry_proc;
89 static atomic_t binder_last_id;
91 static int proc_show(struct seq_file *m, void *unused);
92 DEFINE_SHOW_ATTRIBUTE(proc);
94 #define FORBIDDEN_MMAP_FLAGS (VM_WRITE)
97 BINDER_DEBUG_USER_ERROR = 1U << 0,
98 BINDER_DEBUG_FAILED_TRANSACTION = 1U << 1,
99 BINDER_DEBUG_DEAD_TRANSACTION = 1U << 2,
100 BINDER_DEBUG_OPEN_CLOSE = 1U << 3,
101 BINDER_DEBUG_DEAD_BINDER = 1U << 4,
102 BINDER_DEBUG_DEATH_NOTIFICATION = 1U << 5,
103 BINDER_DEBUG_READ_WRITE = 1U << 6,
104 BINDER_DEBUG_USER_REFS = 1U << 7,
105 BINDER_DEBUG_THREADS = 1U << 8,
106 BINDER_DEBUG_TRANSACTION = 1U << 9,
107 BINDER_DEBUG_TRANSACTION_COMPLETE = 1U << 10,
108 BINDER_DEBUG_FREE_BUFFER = 1U << 11,
109 BINDER_DEBUG_INTERNAL_REFS = 1U << 12,
110 BINDER_DEBUG_PRIORITY_CAP = 1U << 13,
111 BINDER_DEBUG_SPINLOCKS = 1U << 14,
113 static uint32_t binder_debug_mask = BINDER_DEBUG_USER_ERROR |
114 BINDER_DEBUG_FAILED_TRANSACTION | BINDER_DEBUG_DEAD_TRANSACTION;
115 module_param_named(debug_mask, binder_debug_mask, uint, 0644);
117 char *binder_devices_param = CONFIG_ANDROID_BINDER_DEVICES;
118 module_param_named(devices, binder_devices_param, charp, 0444);
120 static DECLARE_WAIT_QUEUE_HEAD(binder_user_error_wait);
121 static int binder_stop_on_user_error;
123 static int binder_set_stop_on_user_error(const char *val,
124 const struct kernel_param *kp)
128 ret = param_set_int(val, kp);
129 if (binder_stop_on_user_error < 2)
130 wake_up(&binder_user_error_wait);
133 module_param_call(stop_on_user_error, binder_set_stop_on_user_error,
134 param_get_int, &binder_stop_on_user_error, 0644);
136 static __printf(2, 3) void binder_debug(int mask, const char *format, ...)
138 struct va_format vaf;
141 if (binder_debug_mask & mask) {
142 va_start(args, format);
145 pr_info_ratelimited("%pV", &vaf);
150 #define binder_txn_error(x...) \
151 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION, x)
153 static __printf(1, 2) void binder_user_error(const char *format, ...)
155 struct va_format vaf;
158 if (binder_debug_mask & BINDER_DEBUG_USER_ERROR) {
159 va_start(args, format);
162 pr_info_ratelimited("%pV", &vaf);
166 if (binder_stop_on_user_error)
167 binder_stop_on_user_error = 2;
170 #define binder_set_extended_error(ee, _id, _command, _param) \
173 (ee)->command = _command; \
174 (ee)->param = _param; \
177 #define to_flat_binder_object(hdr) \
178 container_of(hdr, struct flat_binder_object, hdr)
180 #define to_binder_fd_object(hdr) container_of(hdr, struct binder_fd_object, hdr)
182 #define to_binder_buffer_object(hdr) \
183 container_of(hdr, struct binder_buffer_object, hdr)
185 #define to_binder_fd_array_object(hdr) \
186 container_of(hdr, struct binder_fd_array_object, hdr)
188 static struct binder_stats binder_stats;
190 static inline void binder_stats_deleted(enum binder_stat_types type)
192 atomic_inc(&binder_stats.obj_deleted[type]);
195 static inline void binder_stats_created(enum binder_stat_types type)
197 atomic_inc(&binder_stats.obj_created[type]);
200 struct binder_transaction_log binder_transaction_log;
201 struct binder_transaction_log binder_transaction_log_failed;
203 static struct binder_transaction_log_entry *binder_transaction_log_add(
204 struct binder_transaction_log *log)
206 struct binder_transaction_log_entry *e;
207 unsigned int cur = atomic_inc_return(&log->cur);
209 if (cur >= ARRAY_SIZE(log->entry))
211 e = &log->entry[cur % ARRAY_SIZE(log->entry)];
212 WRITE_ONCE(e->debug_id_done, 0);
214 * write-barrier to synchronize access to e->debug_id_done.
215 * We make sure the initialized 0 value is seen before
216 * memset() other fields are zeroed by memset.
219 memset(e, 0, sizeof(*e));
223 enum binder_deferred_state {
224 BINDER_DEFERRED_FLUSH = 0x01,
225 BINDER_DEFERRED_RELEASE = 0x02,
229 BINDER_LOOPER_STATE_REGISTERED = 0x01,
230 BINDER_LOOPER_STATE_ENTERED = 0x02,
231 BINDER_LOOPER_STATE_EXITED = 0x04,
232 BINDER_LOOPER_STATE_INVALID = 0x08,
233 BINDER_LOOPER_STATE_WAITING = 0x10,
234 BINDER_LOOPER_STATE_POLL = 0x20,
238 * binder_proc_lock() - Acquire outer lock for given binder_proc
239 * @proc: struct binder_proc to acquire
241 * Acquires proc->outer_lock. Used to protect binder_ref
242 * structures associated with the given proc.
244 #define binder_proc_lock(proc) _binder_proc_lock(proc, __LINE__)
246 _binder_proc_lock(struct binder_proc *proc, int line)
247 __acquires(&proc->outer_lock)
249 binder_debug(BINDER_DEBUG_SPINLOCKS,
250 "%s: line=%d\n", __func__, line);
251 spin_lock(&proc->outer_lock);
255 * binder_proc_unlock() - Release spinlock for given binder_proc
256 * @proc: struct binder_proc to acquire
258 * Release lock acquired via binder_proc_lock()
260 #define binder_proc_unlock(_proc) _binder_proc_unlock(_proc, __LINE__)
262 _binder_proc_unlock(struct binder_proc *proc, int line)
263 __releases(&proc->outer_lock)
265 binder_debug(BINDER_DEBUG_SPINLOCKS,
266 "%s: line=%d\n", __func__, line);
267 spin_unlock(&proc->outer_lock);
271 * binder_inner_proc_lock() - Acquire inner lock for given binder_proc
272 * @proc: struct binder_proc to acquire
274 * Acquires proc->inner_lock. Used to protect todo lists
276 #define binder_inner_proc_lock(proc) _binder_inner_proc_lock(proc, __LINE__)
278 _binder_inner_proc_lock(struct binder_proc *proc, int line)
279 __acquires(&proc->inner_lock)
281 binder_debug(BINDER_DEBUG_SPINLOCKS,
282 "%s: line=%d\n", __func__, line);
283 spin_lock(&proc->inner_lock);
287 * binder_inner_proc_unlock() - Release inner lock for given binder_proc
288 * @proc: struct binder_proc to acquire
290 * Release lock acquired via binder_inner_proc_lock()
292 #define binder_inner_proc_unlock(proc) _binder_inner_proc_unlock(proc, __LINE__)
294 _binder_inner_proc_unlock(struct binder_proc *proc, int line)
295 __releases(&proc->inner_lock)
297 binder_debug(BINDER_DEBUG_SPINLOCKS,
298 "%s: line=%d\n", __func__, line);
299 spin_unlock(&proc->inner_lock);
303 * binder_node_lock() - Acquire spinlock for given binder_node
304 * @node: struct binder_node to acquire
306 * Acquires node->lock. Used to protect binder_node fields
308 #define binder_node_lock(node) _binder_node_lock(node, __LINE__)
310 _binder_node_lock(struct binder_node *node, int line)
311 __acquires(&node->lock)
313 binder_debug(BINDER_DEBUG_SPINLOCKS,
314 "%s: line=%d\n", __func__, line);
315 spin_lock(&node->lock);
319 * binder_node_unlock() - Release spinlock for given binder_proc
320 * @node: struct binder_node to acquire
322 * Release lock acquired via binder_node_lock()
324 #define binder_node_unlock(node) _binder_node_unlock(node, __LINE__)
326 _binder_node_unlock(struct binder_node *node, int line)
327 __releases(&node->lock)
329 binder_debug(BINDER_DEBUG_SPINLOCKS,
330 "%s: line=%d\n", __func__, line);
331 spin_unlock(&node->lock);
335 * binder_node_inner_lock() - Acquire node and inner locks
336 * @node: struct binder_node to acquire
338 * Acquires node->lock. If node->proc also acquires
339 * proc->inner_lock. Used to protect binder_node fields
341 #define binder_node_inner_lock(node) _binder_node_inner_lock(node, __LINE__)
343 _binder_node_inner_lock(struct binder_node *node, int line)
344 __acquires(&node->lock) __acquires(&node->proc->inner_lock)
346 binder_debug(BINDER_DEBUG_SPINLOCKS,
347 "%s: line=%d\n", __func__, line);
348 spin_lock(&node->lock);
350 binder_inner_proc_lock(node->proc);
352 /* annotation for sparse */
353 __acquire(&node->proc->inner_lock);
357 * binder_node_unlock() - Release node and inner locks
358 * @node: struct binder_node to acquire
360 * Release lock acquired via binder_node_lock()
362 #define binder_node_inner_unlock(node) _binder_node_inner_unlock(node, __LINE__)
364 _binder_node_inner_unlock(struct binder_node *node, int line)
365 __releases(&node->lock) __releases(&node->proc->inner_lock)
367 struct binder_proc *proc = node->proc;
369 binder_debug(BINDER_DEBUG_SPINLOCKS,
370 "%s: line=%d\n", __func__, line);
372 binder_inner_proc_unlock(proc);
374 /* annotation for sparse */
375 __release(&node->proc->inner_lock);
376 spin_unlock(&node->lock);
379 static bool binder_worklist_empty_ilocked(struct list_head *list)
381 return list_empty(list);
385 * binder_worklist_empty() - Check if no items on the work list
386 * @proc: binder_proc associated with list
387 * @list: list to check
389 * Return: true if there are no items on list, else false
391 static bool binder_worklist_empty(struct binder_proc *proc,
392 struct list_head *list)
396 binder_inner_proc_lock(proc);
397 ret = binder_worklist_empty_ilocked(list);
398 binder_inner_proc_unlock(proc);
403 * binder_enqueue_work_ilocked() - Add an item to the work list
404 * @work: struct binder_work to add to list
405 * @target_list: list to add work to
407 * Adds the work to the specified list. Asserts that work
408 * is not already on a list.
410 * Requires the proc->inner_lock to be held.
413 binder_enqueue_work_ilocked(struct binder_work *work,
414 struct list_head *target_list)
416 BUG_ON(target_list == NULL);
417 BUG_ON(work->entry.next && !list_empty(&work->entry));
418 list_add_tail(&work->entry, target_list);
422 * binder_enqueue_deferred_thread_work_ilocked() - Add deferred thread work
423 * @thread: thread to queue work to
424 * @work: struct binder_work to add to list
426 * Adds the work to the todo list of the thread. Doesn't set the process_todo
427 * flag, which means that (if it wasn't already set) the thread will go to
428 * sleep without handling this work when it calls read.
430 * Requires the proc->inner_lock to be held.
433 binder_enqueue_deferred_thread_work_ilocked(struct binder_thread *thread,
434 struct binder_work *work)
436 WARN_ON(!list_empty(&thread->waiting_thread_node));
437 binder_enqueue_work_ilocked(work, &thread->todo);
441 * binder_enqueue_thread_work_ilocked() - Add an item to the thread work list
442 * @thread: thread to queue work to
443 * @work: struct binder_work to add to list
445 * Adds the work to the todo list of the thread, and enables processing
448 * Requires the proc->inner_lock to be held.
451 binder_enqueue_thread_work_ilocked(struct binder_thread *thread,
452 struct binder_work *work)
454 WARN_ON(!list_empty(&thread->waiting_thread_node));
455 binder_enqueue_work_ilocked(work, &thread->todo);
456 thread->process_todo = true;
460 * binder_enqueue_thread_work() - Add an item to the thread work list
461 * @thread: thread to queue work to
462 * @work: struct binder_work to add to list
464 * Adds the work to the todo list of the thread, and enables processing
468 binder_enqueue_thread_work(struct binder_thread *thread,
469 struct binder_work *work)
471 binder_inner_proc_lock(thread->proc);
472 binder_enqueue_thread_work_ilocked(thread, work);
473 binder_inner_proc_unlock(thread->proc);
477 binder_dequeue_work_ilocked(struct binder_work *work)
479 list_del_init(&work->entry);
483 * binder_dequeue_work() - Removes an item from the work list
484 * @proc: binder_proc associated with list
485 * @work: struct binder_work to remove from list
487 * Removes the specified work item from whatever list it is on.
488 * Can safely be called if work is not on any list.
491 binder_dequeue_work(struct binder_proc *proc, struct binder_work *work)
493 binder_inner_proc_lock(proc);
494 binder_dequeue_work_ilocked(work);
495 binder_inner_proc_unlock(proc);
498 static struct binder_work *binder_dequeue_work_head_ilocked(
499 struct list_head *list)
501 struct binder_work *w;
503 w = list_first_entry_or_null(list, struct binder_work, entry);
505 list_del_init(&w->entry);
510 binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer);
511 static void binder_free_thread(struct binder_thread *thread);
512 static void binder_free_proc(struct binder_proc *proc);
513 static void binder_inc_node_tmpref_ilocked(struct binder_node *node);
515 static bool binder_has_work_ilocked(struct binder_thread *thread,
518 return thread->process_todo ||
519 thread->looper_need_return ||
521 !binder_worklist_empty_ilocked(&thread->proc->todo));
524 static bool binder_has_work(struct binder_thread *thread, bool do_proc_work)
528 binder_inner_proc_lock(thread->proc);
529 has_work = binder_has_work_ilocked(thread, do_proc_work);
530 binder_inner_proc_unlock(thread->proc);
535 static bool binder_available_for_proc_work_ilocked(struct binder_thread *thread)
537 return !thread->transaction_stack &&
538 binder_worklist_empty_ilocked(&thread->todo) &&
539 (thread->looper & (BINDER_LOOPER_STATE_ENTERED |
540 BINDER_LOOPER_STATE_REGISTERED));
543 static void binder_wakeup_poll_threads_ilocked(struct binder_proc *proc,
547 struct binder_thread *thread;
549 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) {
550 thread = rb_entry(n, struct binder_thread, rb_node);
551 if (thread->looper & BINDER_LOOPER_STATE_POLL &&
552 binder_available_for_proc_work_ilocked(thread)) {
554 wake_up_interruptible_sync(&thread->wait);
556 wake_up_interruptible(&thread->wait);
562 * binder_select_thread_ilocked() - selects a thread for doing proc work.
563 * @proc: process to select a thread from
565 * Note that calling this function moves the thread off the waiting_threads
566 * list, so it can only be woken up by the caller of this function, or a
567 * signal. Therefore, callers *should* always wake up the thread this function
570 * Return: If there's a thread currently waiting for process work,
571 * returns that thread. Otherwise returns NULL.
573 static struct binder_thread *
574 binder_select_thread_ilocked(struct binder_proc *proc)
576 struct binder_thread *thread;
578 assert_spin_locked(&proc->inner_lock);
579 thread = list_first_entry_or_null(&proc->waiting_threads,
580 struct binder_thread,
581 waiting_thread_node);
584 list_del_init(&thread->waiting_thread_node);
590 * binder_wakeup_thread_ilocked() - wakes up a thread for doing proc work.
591 * @proc: process to wake up a thread in
592 * @thread: specific thread to wake-up (may be NULL)
593 * @sync: whether to do a synchronous wake-up
595 * This function wakes up a thread in the @proc process.
596 * The caller may provide a specific thread to wake-up in
597 * the @thread parameter. If @thread is NULL, this function
598 * will wake up threads that have called poll().
600 * Note that for this function to work as expected, callers
601 * should first call binder_select_thread() to find a thread
602 * to handle the work (if they don't have a thread already),
603 * and pass the result into the @thread parameter.
605 static void binder_wakeup_thread_ilocked(struct binder_proc *proc,
606 struct binder_thread *thread,
609 assert_spin_locked(&proc->inner_lock);
613 wake_up_interruptible_sync(&thread->wait);
615 wake_up_interruptible(&thread->wait);
619 /* Didn't find a thread waiting for proc work; this can happen
621 * 1. All threads are busy handling transactions
622 * In that case, one of those threads should call back into
623 * the kernel driver soon and pick up this work.
624 * 2. Threads are using the (e)poll interface, in which case
625 * they may be blocked on the waitqueue without having been
626 * added to waiting_threads. For this case, we just iterate
627 * over all threads not handling transaction work, and
628 * wake them all up. We wake all because we don't know whether
629 * a thread that called into (e)poll is handling non-binder
632 binder_wakeup_poll_threads_ilocked(proc, sync);
635 static void binder_wakeup_proc_ilocked(struct binder_proc *proc)
637 struct binder_thread *thread = binder_select_thread_ilocked(proc);
639 binder_wakeup_thread_ilocked(proc, thread, /* sync = */false);
642 static void binder_set_nice(long nice)
646 if (can_nice(current, nice)) {
647 set_user_nice(current, nice);
650 min_nice = rlimit_to_nice(rlimit(RLIMIT_NICE));
651 binder_debug(BINDER_DEBUG_PRIORITY_CAP,
652 "%d: nice value %ld not allowed use %ld instead\n",
653 current->pid, nice, min_nice);
654 set_user_nice(current, min_nice);
655 if (min_nice <= MAX_NICE)
657 binder_user_error("%d RLIMIT_NICE not set\n", current->pid);
660 static struct binder_node *binder_get_node_ilocked(struct binder_proc *proc,
661 binder_uintptr_t ptr)
663 struct rb_node *n = proc->nodes.rb_node;
664 struct binder_node *node;
666 assert_spin_locked(&proc->inner_lock);
669 node = rb_entry(n, struct binder_node, rb_node);
673 else if (ptr > node->ptr)
677 * take an implicit weak reference
678 * to ensure node stays alive until
679 * call to binder_put_node()
681 binder_inc_node_tmpref_ilocked(node);
688 static struct binder_node *binder_get_node(struct binder_proc *proc,
689 binder_uintptr_t ptr)
691 struct binder_node *node;
693 binder_inner_proc_lock(proc);
694 node = binder_get_node_ilocked(proc, ptr);
695 binder_inner_proc_unlock(proc);
699 static struct binder_node *binder_init_node_ilocked(
700 struct binder_proc *proc,
701 struct binder_node *new_node,
702 struct flat_binder_object *fp)
704 struct rb_node **p = &proc->nodes.rb_node;
705 struct rb_node *parent = NULL;
706 struct binder_node *node;
707 binder_uintptr_t ptr = fp ? fp->binder : 0;
708 binder_uintptr_t cookie = fp ? fp->cookie : 0;
709 __u32 flags = fp ? fp->flags : 0;
711 assert_spin_locked(&proc->inner_lock);
716 node = rb_entry(parent, struct binder_node, rb_node);
720 else if (ptr > node->ptr)
724 * A matching node is already in
725 * the rb tree. Abandon the init
728 binder_inc_node_tmpref_ilocked(node);
733 binder_stats_created(BINDER_STAT_NODE);
735 rb_link_node(&node->rb_node, parent, p);
736 rb_insert_color(&node->rb_node, &proc->nodes);
737 node->debug_id = atomic_inc_return(&binder_last_id);
740 node->cookie = cookie;
741 node->work.type = BINDER_WORK_NODE;
742 node->min_priority = flags & FLAT_BINDER_FLAG_PRIORITY_MASK;
743 node->accept_fds = !!(flags & FLAT_BINDER_FLAG_ACCEPTS_FDS);
744 node->txn_security_ctx = !!(flags & FLAT_BINDER_FLAG_TXN_SECURITY_CTX);
745 spin_lock_init(&node->lock);
746 INIT_LIST_HEAD(&node->work.entry);
747 INIT_LIST_HEAD(&node->async_todo);
748 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
749 "%d:%d node %d u%016llx c%016llx created\n",
750 proc->pid, current->pid, node->debug_id,
751 (u64)node->ptr, (u64)node->cookie);
756 static struct binder_node *binder_new_node(struct binder_proc *proc,
757 struct flat_binder_object *fp)
759 struct binder_node *node;
760 struct binder_node *new_node = kzalloc(sizeof(*node), GFP_KERNEL);
764 binder_inner_proc_lock(proc);
765 node = binder_init_node_ilocked(proc, new_node, fp);
766 binder_inner_proc_unlock(proc);
767 if (node != new_node)
769 * The node was already added by another thread
776 static void binder_free_node(struct binder_node *node)
779 binder_stats_deleted(BINDER_STAT_NODE);
782 static int binder_inc_node_nilocked(struct binder_node *node, int strong,
784 struct list_head *target_list)
786 struct binder_proc *proc = node->proc;
788 assert_spin_locked(&node->lock);
790 assert_spin_locked(&proc->inner_lock);
793 if (target_list == NULL &&
794 node->internal_strong_refs == 0 &&
796 node == node->proc->context->binder_context_mgr_node &&
797 node->has_strong_ref)) {
798 pr_err("invalid inc strong node for %d\n",
802 node->internal_strong_refs++;
804 node->local_strong_refs++;
805 if (!node->has_strong_ref && target_list) {
806 struct binder_thread *thread = container_of(target_list,
807 struct binder_thread, todo);
808 binder_dequeue_work_ilocked(&node->work);
809 BUG_ON(&thread->todo != target_list);
810 binder_enqueue_deferred_thread_work_ilocked(thread,
815 node->local_weak_refs++;
816 if (!node->has_weak_ref && list_empty(&node->work.entry)) {
817 if (target_list == NULL) {
818 pr_err("invalid inc weak node for %d\n",
825 binder_enqueue_work_ilocked(&node->work, target_list);
831 static int binder_inc_node(struct binder_node *node, int strong, int internal,
832 struct list_head *target_list)
836 binder_node_inner_lock(node);
837 ret = binder_inc_node_nilocked(node, strong, internal, target_list);
838 binder_node_inner_unlock(node);
843 static bool binder_dec_node_nilocked(struct binder_node *node,
844 int strong, int internal)
846 struct binder_proc *proc = node->proc;
848 assert_spin_locked(&node->lock);
850 assert_spin_locked(&proc->inner_lock);
853 node->internal_strong_refs--;
855 node->local_strong_refs--;
856 if (node->local_strong_refs || node->internal_strong_refs)
860 node->local_weak_refs--;
861 if (node->local_weak_refs || node->tmp_refs ||
862 !hlist_empty(&node->refs))
866 if (proc && (node->has_strong_ref || node->has_weak_ref)) {
867 if (list_empty(&node->work.entry)) {
868 binder_enqueue_work_ilocked(&node->work, &proc->todo);
869 binder_wakeup_proc_ilocked(proc);
872 if (hlist_empty(&node->refs) && !node->local_strong_refs &&
873 !node->local_weak_refs && !node->tmp_refs) {
875 binder_dequeue_work_ilocked(&node->work);
876 rb_erase(&node->rb_node, &proc->nodes);
877 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
878 "refless node %d deleted\n",
881 BUG_ON(!list_empty(&node->work.entry));
882 spin_lock(&binder_dead_nodes_lock);
884 * tmp_refs could have changed so
887 if (node->tmp_refs) {
888 spin_unlock(&binder_dead_nodes_lock);
891 hlist_del(&node->dead_node);
892 spin_unlock(&binder_dead_nodes_lock);
893 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
894 "dead node %d deleted\n",
903 static void binder_dec_node(struct binder_node *node, int strong, int internal)
907 binder_node_inner_lock(node);
908 free_node = binder_dec_node_nilocked(node, strong, internal);
909 binder_node_inner_unlock(node);
911 binder_free_node(node);
914 static void binder_inc_node_tmpref_ilocked(struct binder_node *node)
917 * No call to binder_inc_node() is needed since we
918 * don't need to inform userspace of any changes to
925 * binder_inc_node_tmpref() - take a temporary reference on node
926 * @node: node to reference
928 * Take reference on node to prevent the node from being freed
929 * while referenced only by a local variable. The inner lock is
930 * needed to serialize with the node work on the queue (which
931 * isn't needed after the node is dead). If the node is dead
932 * (node->proc is NULL), use binder_dead_nodes_lock to protect
933 * node->tmp_refs against dead-node-only cases where the node
934 * lock cannot be acquired (eg traversing the dead node list to
937 static void binder_inc_node_tmpref(struct binder_node *node)
939 binder_node_lock(node);
941 binder_inner_proc_lock(node->proc);
943 spin_lock(&binder_dead_nodes_lock);
944 binder_inc_node_tmpref_ilocked(node);
946 binder_inner_proc_unlock(node->proc);
948 spin_unlock(&binder_dead_nodes_lock);
949 binder_node_unlock(node);
953 * binder_dec_node_tmpref() - remove a temporary reference on node
954 * @node: node to reference
956 * Release temporary reference on node taken via binder_inc_node_tmpref()
958 static void binder_dec_node_tmpref(struct binder_node *node)
962 binder_node_inner_lock(node);
964 spin_lock(&binder_dead_nodes_lock);
966 __acquire(&binder_dead_nodes_lock);
968 BUG_ON(node->tmp_refs < 0);
970 spin_unlock(&binder_dead_nodes_lock);
972 __release(&binder_dead_nodes_lock);
974 * Call binder_dec_node() to check if all refcounts are 0
975 * and cleanup is needed. Calling with strong=0 and internal=1
976 * causes no actual reference to be released in binder_dec_node().
977 * If that changes, a change is needed here too.
979 free_node = binder_dec_node_nilocked(node, 0, 1);
980 binder_node_inner_unlock(node);
982 binder_free_node(node);
985 static void binder_put_node(struct binder_node *node)
987 binder_dec_node_tmpref(node);
990 static struct binder_ref *binder_get_ref_olocked(struct binder_proc *proc,
991 u32 desc, bool need_strong_ref)
993 struct rb_node *n = proc->refs_by_desc.rb_node;
994 struct binder_ref *ref;
997 ref = rb_entry(n, struct binder_ref, rb_node_desc);
999 if (desc < ref->data.desc) {
1001 } else if (desc > ref->data.desc) {
1003 } else if (need_strong_ref && !ref->data.strong) {
1004 binder_user_error("tried to use weak ref as strong ref\n");
1014 * binder_get_ref_for_node_olocked() - get the ref associated with given node
1015 * @proc: binder_proc that owns the ref
1016 * @node: binder_node of target
1017 * @new_ref: newly allocated binder_ref to be initialized or %NULL
1019 * Look up the ref for the given node and return it if it exists
1021 * If it doesn't exist and the caller provides a newly allocated
1022 * ref, initialize the fields of the newly allocated ref and insert
1023 * into the given proc rb_trees and node refs list.
1025 * Return: the ref for node. It is possible that another thread
1026 * allocated/initialized the ref first in which case the
1027 * returned ref would be different than the passed-in
1028 * new_ref. new_ref must be kfree'd by the caller in
1031 static struct binder_ref *binder_get_ref_for_node_olocked(
1032 struct binder_proc *proc,
1033 struct binder_node *node,
1034 struct binder_ref *new_ref)
1036 struct binder_context *context = proc->context;
1037 struct rb_node **p = &proc->refs_by_node.rb_node;
1038 struct rb_node *parent = NULL;
1039 struct binder_ref *ref;
1044 ref = rb_entry(parent, struct binder_ref, rb_node_node);
1046 if (node < ref->node)
1048 else if (node > ref->node)
1049 p = &(*p)->rb_right;
1056 binder_stats_created(BINDER_STAT_REF);
1057 new_ref->data.debug_id = atomic_inc_return(&binder_last_id);
1058 new_ref->proc = proc;
1059 new_ref->node = node;
1060 rb_link_node(&new_ref->rb_node_node, parent, p);
1061 rb_insert_color(&new_ref->rb_node_node, &proc->refs_by_node);
1063 new_ref->data.desc = (node == context->binder_context_mgr_node) ? 0 : 1;
1064 for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
1065 ref = rb_entry(n, struct binder_ref, rb_node_desc);
1066 if (ref->data.desc > new_ref->data.desc)
1068 new_ref->data.desc = ref->data.desc + 1;
1071 p = &proc->refs_by_desc.rb_node;
1074 ref = rb_entry(parent, struct binder_ref, rb_node_desc);
1076 if (new_ref->data.desc < ref->data.desc)
1078 else if (new_ref->data.desc > ref->data.desc)
1079 p = &(*p)->rb_right;
1083 rb_link_node(&new_ref->rb_node_desc, parent, p);
1084 rb_insert_color(&new_ref->rb_node_desc, &proc->refs_by_desc);
1086 binder_node_lock(node);
1087 hlist_add_head(&new_ref->node_entry, &node->refs);
1089 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1090 "%d new ref %d desc %d for node %d\n",
1091 proc->pid, new_ref->data.debug_id, new_ref->data.desc,
1093 binder_node_unlock(node);
1097 static void binder_cleanup_ref_olocked(struct binder_ref *ref)
1099 bool delete_node = false;
1101 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1102 "%d delete ref %d desc %d for node %d\n",
1103 ref->proc->pid, ref->data.debug_id, ref->data.desc,
1104 ref->node->debug_id);
1106 rb_erase(&ref->rb_node_desc, &ref->proc->refs_by_desc);
1107 rb_erase(&ref->rb_node_node, &ref->proc->refs_by_node);
1109 binder_node_inner_lock(ref->node);
1110 if (ref->data.strong)
1111 binder_dec_node_nilocked(ref->node, 1, 1);
1113 hlist_del(&ref->node_entry);
1114 delete_node = binder_dec_node_nilocked(ref->node, 0, 1);
1115 binder_node_inner_unlock(ref->node);
1117 * Clear ref->node unless we want the caller to free the node
1121 * The caller uses ref->node to determine
1122 * whether the node needs to be freed. Clear
1123 * it since the node is still alive.
1129 binder_debug(BINDER_DEBUG_DEAD_BINDER,
1130 "%d delete ref %d desc %d has death notification\n",
1131 ref->proc->pid, ref->data.debug_id,
1133 binder_dequeue_work(ref->proc, &ref->death->work);
1134 binder_stats_deleted(BINDER_STAT_DEATH);
1136 binder_stats_deleted(BINDER_STAT_REF);
1140 * binder_inc_ref_olocked() - increment the ref for given handle
1141 * @ref: ref to be incremented
1142 * @strong: if true, strong increment, else weak
1143 * @target_list: list to queue node work on
1145 * Increment the ref. @ref->proc->outer_lock must be held on entry
1147 * Return: 0, if successful, else errno
1149 static int binder_inc_ref_olocked(struct binder_ref *ref, int strong,
1150 struct list_head *target_list)
1155 if (ref->data.strong == 0) {
1156 ret = binder_inc_node(ref->node, 1, 1, target_list);
1162 if (ref->data.weak == 0) {
1163 ret = binder_inc_node(ref->node, 0, 1, target_list);
1173 * binder_dec_ref() - dec the ref for given handle
1174 * @ref: ref to be decremented
1175 * @strong: if true, strong decrement, else weak
1177 * Decrement the ref.
1179 * Return: true if ref is cleaned up and ready to be freed
1181 static bool binder_dec_ref_olocked(struct binder_ref *ref, int strong)
1184 if (ref->data.strong == 0) {
1185 binder_user_error("%d invalid dec strong, ref %d desc %d s %d w %d\n",
1186 ref->proc->pid, ref->data.debug_id,
1187 ref->data.desc, ref->data.strong,
1192 if (ref->data.strong == 0)
1193 binder_dec_node(ref->node, strong, 1);
1195 if (ref->data.weak == 0) {
1196 binder_user_error("%d invalid dec weak, ref %d desc %d s %d w %d\n",
1197 ref->proc->pid, ref->data.debug_id,
1198 ref->data.desc, ref->data.strong,
1204 if (ref->data.strong == 0 && ref->data.weak == 0) {
1205 binder_cleanup_ref_olocked(ref);
1212 * binder_get_node_from_ref() - get the node from the given proc/desc
1213 * @proc: proc containing the ref
1214 * @desc: the handle associated with the ref
1215 * @need_strong_ref: if true, only return node if ref is strong
1216 * @rdata: the id/refcount data for the ref
1218 * Given a proc and ref handle, return the associated binder_node
1220 * Return: a binder_node or NULL if not found or not strong when strong required
1222 static struct binder_node *binder_get_node_from_ref(
1223 struct binder_proc *proc,
1224 u32 desc, bool need_strong_ref,
1225 struct binder_ref_data *rdata)
1227 struct binder_node *node;
1228 struct binder_ref *ref;
1230 binder_proc_lock(proc);
1231 ref = binder_get_ref_olocked(proc, desc, need_strong_ref);
1236 * Take an implicit reference on the node to ensure
1237 * it stays alive until the call to binder_put_node()
1239 binder_inc_node_tmpref(node);
1242 binder_proc_unlock(proc);
1247 binder_proc_unlock(proc);
1252 * binder_free_ref() - free the binder_ref
1255 * Free the binder_ref. Free the binder_node indicated by ref->node
1256 * (if non-NULL) and the binder_ref_death indicated by ref->death.
1258 static void binder_free_ref(struct binder_ref *ref)
1261 binder_free_node(ref->node);
1267 * binder_update_ref_for_handle() - inc/dec the ref for given handle
1268 * @proc: proc containing the ref
1269 * @desc: the handle associated with the ref
1270 * @increment: true=inc reference, false=dec reference
1271 * @strong: true=strong reference, false=weak reference
1272 * @rdata: the id/refcount data for the ref
1274 * Given a proc and ref handle, increment or decrement the ref
1275 * according to "increment" arg.
1277 * Return: 0 if successful, else errno
1279 static int binder_update_ref_for_handle(struct binder_proc *proc,
1280 uint32_t desc, bool increment, bool strong,
1281 struct binder_ref_data *rdata)
1284 struct binder_ref *ref;
1285 bool delete_ref = false;
1287 binder_proc_lock(proc);
1288 ref = binder_get_ref_olocked(proc, desc, strong);
1294 ret = binder_inc_ref_olocked(ref, strong, NULL);
1296 delete_ref = binder_dec_ref_olocked(ref, strong);
1300 binder_proc_unlock(proc);
1303 binder_free_ref(ref);
1307 binder_proc_unlock(proc);
1312 * binder_dec_ref_for_handle() - dec the ref for given handle
1313 * @proc: proc containing the ref
1314 * @desc: the handle associated with the ref
1315 * @strong: true=strong reference, false=weak reference
1316 * @rdata: the id/refcount data for the ref
1318 * Just calls binder_update_ref_for_handle() to decrement the ref.
1320 * Return: 0 if successful, else errno
1322 static int binder_dec_ref_for_handle(struct binder_proc *proc,
1323 uint32_t desc, bool strong, struct binder_ref_data *rdata)
1325 return binder_update_ref_for_handle(proc, desc, false, strong, rdata);
1330 * binder_inc_ref_for_node() - increment the ref for given proc/node
1331 * @proc: proc containing the ref
1332 * @node: target node
1333 * @strong: true=strong reference, false=weak reference
1334 * @target_list: worklist to use if node is incremented
1335 * @rdata: the id/refcount data for the ref
1337 * Given a proc and node, increment the ref. Create the ref if it
1338 * doesn't already exist
1340 * Return: 0 if successful, else errno
1342 static int binder_inc_ref_for_node(struct binder_proc *proc,
1343 struct binder_node *node,
1345 struct list_head *target_list,
1346 struct binder_ref_data *rdata)
1348 struct binder_ref *ref;
1349 struct binder_ref *new_ref = NULL;
1352 binder_proc_lock(proc);
1353 ref = binder_get_ref_for_node_olocked(proc, node, NULL);
1355 binder_proc_unlock(proc);
1356 new_ref = kzalloc(sizeof(*ref), GFP_KERNEL);
1359 binder_proc_lock(proc);
1360 ref = binder_get_ref_for_node_olocked(proc, node, new_ref);
1362 ret = binder_inc_ref_olocked(ref, strong, target_list);
1364 binder_proc_unlock(proc);
1365 if (new_ref && ref != new_ref)
1367 * Another thread created the ref first so
1368 * free the one we allocated
1374 static void binder_pop_transaction_ilocked(struct binder_thread *target_thread,
1375 struct binder_transaction *t)
1377 BUG_ON(!target_thread);
1378 assert_spin_locked(&target_thread->proc->inner_lock);
1379 BUG_ON(target_thread->transaction_stack != t);
1380 BUG_ON(target_thread->transaction_stack->from != target_thread);
1381 target_thread->transaction_stack =
1382 target_thread->transaction_stack->from_parent;
1387 * binder_thread_dec_tmpref() - decrement thread->tmp_ref
1388 * @thread: thread to decrement
1390 * A thread needs to be kept alive while being used to create or
1391 * handle a transaction. binder_get_txn_from() is used to safely
1392 * extract t->from from a binder_transaction and keep the thread
1393 * indicated by t->from from being freed. When done with that
1394 * binder_thread, this function is called to decrement the
1395 * tmp_ref and free if appropriate (thread has been released
1396 * and no transaction being processed by the driver)
1398 static void binder_thread_dec_tmpref(struct binder_thread *thread)
1401 * atomic is used to protect the counter value while
1402 * it cannot reach zero or thread->is_dead is false
1404 binder_inner_proc_lock(thread->proc);
1405 atomic_dec(&thread->tmp_ref);
1406 if (thread->is_dead && !atomic_read(&thread->tmp_ref)) {
1407 binder_inner_proc_unlock(thread->proc);
1408 binder_free_thread(thread);
1411 binder_inner_proc_unlock(thread->proc);
1415 * binder_proc_dec_tmpref() - decrement proc->tmp_ref
1416 * @proc: proc to decrement
1418 * A binder_proc needs to be kept alive while being used to create or
1419 * handle a transaction. proc->tmp_ref is incremented when
1420 * creating a new transaction or the binder_proc is currently in-use
1421 * by threads that are being released. When done with the binder_proc,
1422 * this function is called to decrement the counter and free the
1423 * proc if appropriate (proc has been released, all threads have
1424 * been released and not currenly in-use to process a transaction).
1426 static void binder_proc_dec_tmpref(struct binder_proc *proc)
1428 binder_inner_proc_lock(proc);
1430 if (proc->is_dead && RB_EMPTY_ROOT(&proc->threads) &&
1432 binder_inner_proc_unlock(proc);
1433 binder_free_proc(proc);
1436 binder_inner_proc_unlock(proc);
1440 * binder_get_txn_from() - safely extract the "from" thread in transaction
1441 * @t: binder transaction for t->from
1443 * Atomically return the "from" thread and increment the tmp_ref
1444 * count for the thread to ensure it stays alive until
1445 * binder_thread_dec_tmpref() is called.
1447 * Return: the value of t->from
1449 static struct binder_thread *binder_get_txn_from(
1450 struct binder_transaction *t)
1452 struct binder_thread *from;
1454 spin_lock(&t->lock);
1457 atomic_inc(&from->tmp_ref);
1458 spin_unlock(&t->lock);
1463 * binder_get_txn_from_and_acq_inner() - get t->from and acquire inner lock
1464 * @t: binder transaction for t->from
1466 * Same as binder_get_txn_from() except it also acquires the proc->inner_lock
1467 * to guarantee that the thread cannot be released while operating on it.
1468 * The caller must call binder_inner_proc_unlock() to release the inner lock
1469 * as well as call binder_dec_thread_txn() to release the reference.
1471 * Return: the value of t->from
1473 static struct binder_thread *binder_get_txn_from_and_acq_inner(
1474 struct binder_transaction *t)
1475 __acquires(&t->from->proc->inner_lock)
1477 struct binder_thread *from;
1479 from = binder_get_txn_from(t);
1481 __acquire(&from->proc->inner_lock);
1484 binder_inner_proc_lock(from->proc);
1486 BUG_ON(from != t->from);
1489 binder_inner_proc_unlock(from->proc);
1490 __acquire(&from->proc->inner_lock);
1491 binder_thread_dec_tmpref(from);
1496 * binder_free_txn_fixups() - free unprocessed fd fixups
1497 * @t: binder transaction for t->from
1499 * If the transaction is being torn down prior to being
1500 * processed by the target process, free all of the
1501 * fd fixups and fput the file structs. It is safe to
1502 * call this function after the fixups have been
1503 * processed -- in that case, the list will be empty.
1505 static void binder_free_txn_fixups(struct binder_transaction *t)
1507 struct binder_txn_fd_fixup *fixup, *tmp;
1509 list_for_each_entry_safe(fixup, tmp, &t->fd_fixups, fixup_entry) {
1511 if (fixup->target_fd >= 0)
1512 put_unused_fd(fixup->target_fd);
1513 list_del(&fixup->fixup_entry);
1518 static void binder_txn_latency_free(struct binder_transaction *t)
1520 int from_proc, from_thread, to_proc, to_thread;
1522 spin_lock(&t->lock);
1523 from_proc = t->from ? t->from->proc->pid : 0;
1524 from_thread = t->from ? t->from->pid : 0;
1525 to_proc = t->to_proc ? t->to_proc->pid : 0;
1526 to_thread = t->to_thread ? t->to_thread->pid : 0;
1527 spin_unlock(&t->lock);
1529 trace_binder_txn_latency_free(t, from_proc, from_thread, to_proc, to_thread);
1532 static void binder_free_transaction(struct binder_transaction *t)
1534 struct binder_proc *target_proc = t->to_proc;
1537 binder_inner_proc_lock(target_proc);
1538 target_proc->outstanding_txns--;
1539 if (target_proc->outstanding_txns < 0)
1540 pr_warn("%s: Unexpected outstanding_txns %d\n",
1541 __func__, target_proc->outstanding_txns);
1542 if (!target_proc->outstanding_txns && target_proc->is_frozen)
1543 wake_up_interruptible_all(&target_proc->freeze_wait);
1545 t->buffer->transaction = NULL;
1546 binder_inner_proc_unlock(target_proc);
1548 if (trace_binder_txn_latency_free_enabled())
1549 binder_txn_latency_free(t);
1551 * If the transaction has no target_proc, then
1552 * t->buffer->transaction has already been cleared.
1554 binder_free_txn_fixups(t);
1556 binder_stats_deleted(BINDER_STAT_TRANSACTION);
1559 static void binder_send_failed_reply(struct binder_transaction *t,
1560 uint32_t error_code)
1562 struct binder_thread *target_thread;
1563 struct binder_transaction *next;
1565 BUG_ON(t->flags & TF_ONE_WAY);
1567 target_thread = binder_get_txn_from_and_acq_inner(t);
1568 if (target_thread) {
1569 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
1570 "send failed reply for transaction %d to %d:%d\n",
1572 target_thread->proc->pid,
1573 target_thread->pid);
1575 binder_pop_transaction_ilocked(target_thread, t);
1576 if (target_thread->reply_error.cmd == BR_OK) {
1577 target_thread->reply_error.cmd = error_code;
1578 binder_enqueue_thread_work_ilocked(
1580 &target_thread->reply_error.work);
1581 wake_up_interruptible(&target_thread->wait);
1584 * Cannot get here for normal operation, but
1585 * we can if multiple synchronous transactions
1586 * are sent without blocking for responses.
1587 * Just ignore the 2nd error in this case.
1589 pr_warn("Unexpected reply error: %u\n",
1590 target_thread->reply_error.cmd);
1592 binder_inner_proc_unlock(target_thread->proc);
1593 binder_thread_dec_tmpref(target_thread);
1594 binder_free_transaction(t);
1597 __release(&target_thread->proc->inner_lock);
1598 next = t->from_parent;
1600 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
1601 "send failed reply for transaction %d, target dead\n",
1604 binder_free_transaction(t);
1606 binder_debug(BINDER_DEBUG_DEAD_BINDER,
1607 "reply failed, no target thread at root\n");
1611 binder_debug(BINDER_DEBUG_DEAD_BINDER,
1612 "reply failed, no target thread -- retry %d\n",
1618 * binder_cleanup_transaction() - cleans up undelivered transaction
1619 * @t: transaction that needs to be cleaned up
1620 * @reason: reason the transaction wasn't delivered
1621 * @error_code: error to return to caller (if synchronous call)
1623 static void binder_cleanup_transaction(struct binder_transaction *t,
1625 uint32_t error_code)
1627 if (t->buffer->target_node && !(t->flags & TF_ONE_WAY)) {
1628 binder_send_failed_reply(t, error_code);
1630 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
1631 "undelivered transaction %d, %s\n",
1632 t->debug_id, reason);
1633 binder_free_transaction(t);
1638 * binder_get_object() - gets object and checks for valid metadata
1639 * @proc: binder_proc owning the buffer
1640 * @u: sender's user pointer to base of buffer
1641 * @buffer: binder_buffer that we're parsing.
1642 * @offset: offset in the @buffer at which to validate an object.
1643 * @object: struct binder_object to read into
1645 * Copy the binder object at the given offset into @object. If @u is
1646 * provided then the copy is from the sender's buffer. If not, then
1647 * it is copied from the target's @buffer.
1649 * Return: If there's a valid metadata object at @offset, the
1650 * size of that object. Otherwise, it returns zero. The object
1651 * is read into the struct binder_object pointed to by @object.
1653 static size_t binder_get_object(struct binder_proc *proc,
1654 const void __user *u,
1655 struct binder_buffer *buffer,
1656 unsigned long offset,
1657 struct binder_object *object)
1660 struct binder_object_header *hdr;
1661 size_t object_size = 0;
1663 read_size = min_t(size_t, sizeof(*object), buffer->data_size - offset);
1664 if (offset > buffer->data_size || read_size < sizeof(*hdr))
1667 if (copy_from_user(object, u + offset, read_size))
1670 if (binder_alloc_copy_from_buffer(&proc->alloc, object, buffer,
1675 /* Ok, now see if we read a complete object. */
1677 switch (hdr->type) {
1678 case BINDER_TYPE_BINDER:
1679 case BINDER_TYPE_WEAK_BINDER:
1680 case BINDER_TYPE_HANDLE:
1681 case BINDER_TYPE_WEAK_HANDLE:
1682 object_size = sizeof(struct flat_binder_object);
1684 case BINDER_TYPE_FD:
1685 object_size = sizeof(struct binder_fd_object);
1687 case BINDER_TYPE_PTR:
1688 object_size = sizeof(struct binder_buffer_object);
1690 case BINDER_TYPE_FDA:
1691 object_size = sizeof(struct binder_fd_array_object);
1696 if (offset <= buffer->data_size - object_size &&
1697 buffer->data_size >= object_size)
1704 * binder_validate_ptr() - validates binder_buffer_object in a binder_buffer.
1705 * @proc: binder_proc owning the buffer
1706 * @b: binder_buffer containing the object
1707 * @object: struct binder_object to read into
1708 * @index: index in offset array at which the binder_buffer_object is
1710 * @start_offset: points to the start of the offset array
1711 * @object_offsetp: offset of @object read from @b
1712 * @num_valid: the number of valid offsets in the offset array
1714 * Return: If @index is within the valid range of the offset array
1715 * described by @start and @num_valid, and if there's a valid
1716 * binder_buffer_object at the offset found in index @index
1717 * of the offset array, that object is returned. Otherwise,
1718 * %NULL is returned.
1719 * Note that the offset found in index @index itself is not
1720 * verified; this function assumes that @num_valid elements
1721 * from @start were previously verified to have valid offsets.
1722 * If @object_offsetp is non-NULL, then the offset within
1723 * @b is written to it.
1725 static struct binder_buffer_object *binder_validate_ptr(
1726 struct binder_proc *proc,
1727 struct binder_buffer *b,
1728 struct binder_object *object,
1729 binder_size_t index,
1730 binder_size_t start_offset,
1731 binder_size_t *object_offsetp,
1732 binder_size_t num_valid)
1735 binder_size_t object_offset;
1736 unsigned long buffer_offset;
1738 if (index >= num_valid)
1741 buffer_offset = start_offset + sizeof(binder_size_t) * index;
1742 if (binder_alloc_copy_from_buffer(&proc->alloc, &object_offset,
1744 sizeof(object_offset)))
1746 object_size = binder_get_object(proc, NULL, b, object_offset, object);
1747 if (!object_size || object->hdr.type != BINDER_TYPE_PTR)
1750 *object_offsetp = object_offset;
1752 return &object->bbo;
1756 * binder_validate_fixup() - validates pointer/fd fixups happen in order.
1757 * @proc: binder_proc owning the buffer
1758 * @b: transaction buffer
1759 * @objects_start_offset: offset to start of objects buffer
1760 * @buffer_obj_offset: offset to binder_buffer_object in which to fix up
1761 * @fixup_offset: start offset in @buffer to fix up
1762 * @last_obj_offset: offset to last binder_buffer_object that we fixed
1763 * @last_min_offset: minimum fixup offset in object at @last_obj_offset
1765 * Return: %true if a fixup in buffer @buffer at offset @offset is
1768 * For safety reasons, we only allow fixups inside a buffer to happen
1769 * at increasing offsets; additionally, we only allow fixup on the last
1770 * buffer object that was verified, or one of its parents.
1772 * Example of what is allowed:
1775 * B (parent = A, offset = 0)
1776 * C (parent = A, offset = 16)
1777 * D (parent = C, offset = 0)
1778 * E (parent = A, offset = 32) // min_offset is 16 (C.parent_offset)
1780 * Examples of what is not allowed:
1782 * Decreasing offsets within the same parent:
1784 * C (parent = A, offset = 16)
1785 * B (parent = A, offset = 0) // decreasing offset within A
1787 * Referring to a parent that wasn't the last object or any of its parents:
1789 * B (parent = A, offset = 0)
1790 * C (parent = A, offset = 0)
1791 * C (parent = A, offset = 16)
1792 * D (parent = B, offset = 0) // B is not A or any of A's parents
1794 static bool binder_validate_fixup(struct binder_proc *proc,
1795 struct binder_buffer *b,
1796 binder_size_t objects_start_offset,
1797 binder_size_t buffer_obj_offset,
1798 binder_size_t fixup_offset,
1799 binder_size_t last_obj_offset,
1800 binder_size_t last_min_offset)
1802 if (!last_obj_offset) {
1803 /* Nothing to fix up in */
1807 while (last_obj_offset != buffer_obj_offset) {
1808 unsigned long buffer_offset;
1809 struct binder_object last_object;
1810 struct binder_buffer_object *last_bbo;
1811 size_t object_size = binder_get_object(proc, NULL, b,
1814 if (object_size != sizeof(*last_bbo))
1817 last_bbo = &last_object.bbo;
1819 * Safe to retrieve the parent of last_obj, since it
1820 * was already previously verified by the driver.
1822 if ((last_bbo->flags & BINDER_BUFFER_FLAG_HAS_PARENT) == 0)
1824 last_min_offset = last_bbo->parent_offset + sizeof(uintptr_t);
1825 buffer_offset = objects_start_offset +
1826 sizeof(binder_size_t) * last_bbo->parent;
1827 if (binder_alloc_copy_from_buffer(&proc->alloc,
1830 sizeof(last_obj_offset)))
1833 return (fixup_offset >= last_min_offset);
1837 * struct binder_task_work_cb - for deferred close
1839 * @twork: callback_head for task work
1842 * Structure to pass task work to be handled after
1843 * returning from binder_ioctl() via task_work_add().
1845 struct binder_task_work_cb {
1846 struct callback_head twork;
1851 * binder_do_fd_close() - close list of file descriptors
1852 * @twork: callback head for task work
1854 * It is not safe to call ksys_close() during the binder_ioctl()
1855 * function if there is a chance that binder's own file descriptor
1856 * might be closed. This is to meet the requirements for using
1857 * fdget() (see comments for __fget_light()). Therefore use
1858 * task_work_add() to schedule the close operation once we have
1859 * returned from binder_ioctl(). This function is a callback
1860 * for that mechanism and does the actual ksys_close() on the
1861 * given file descriptor.
1863 static void binder_do_fd_close(struct callback_head *twork)
1865 struct binder_task_work_cb *twcb = container_of(twork,
1866 struct binder_task_work_cb, twork);
1873 * binder_deferred_fd_close() - schedule a close for the given file-descriptor
1874 * @fd: file-descriptor to close
1876 * See comments in binder_do_fd_close(). This function is used to schedule
1877 * a file-descriptor to be closed after returning from binder_ioctl().
1879 static void binder_deferred_fd_close(int fd)
1881 struct binder_task_work_cb *twcb;
1883 twcb = kzalloc(sizeof(*twcb), GFP_KERNEL);
1886 init_task_work(&twcb->twork, binder_do_fd_close);
1887 twcb->file = close_fd_get_file(fd);
1889 // pin it until binder_do_fd_close(); see comments there
1890 get_file(twcb->file);
1891 filp_close(twcb->file, current->files);
1892 task_work_add(current, &twcb->twork, TWA_RESUME);
1898 static void binder_transaction_buffer_release(struct binder_proc *proc,
1899 struct binder_thread *thread,
1900 struct binder_buffer *buffer,
1901 binder_size_t failed_at,
1904 int debug_id = buffer->debug_id;
1905 binder_size_t off_start_offset, buffer_offset, off_end_offset;
1907 binder_debug(BINDER_DEBUG_TRANSACTION,
1908 "%d buffer release %d, size %zd-%zd, failed at %llx\n",
1909 proc->pid, buffer->debug_id,
1910 buffer->data_size, buffer->offsets_size,
1911 (unsigned long long)failed_at);
1913 if (buffer->target_node)
1914 binder_dec_node(buffer->target_node, 1, 0);
1916 off_start_offset = ALIGN(buffer->data_size, sizeof(void *));
1917 off_end_offset = is_failure && failed_at ? failed_at :
1918 off_start_offset + buffer->offsets_size;
1919 for (buffer_offset = off_start_offset; buffer_offset < off_end_offset;
1920 buffer_offset += sizeof(binder_size_t)) {
1921 struct binder_object_header *hdr;
1922 size_t object_size = 0;
1923 struct binder_object object;
1924 binder_size_t object_offset;
1926 if (!binder_alloc_copy_from_buffer(&proc->alloc, &object_offset,
1927 buffer, buffer_offset,
1928 sizeof(object_offset)))
1929 object_size = binder_get_object(proc, NULL, buffer,
1930 object_offset, &object);
1931 if (object_size == 0) {
1932 pr_err("transaction release %d bad object at offset %lld, size %zd\n",
1933 debug_id, (u64)object_offset, buffer->data_size);
1937 switch (hdr->type) {
1938 case BINDER_TYPE_BINDER:
1939 case BINDER_TYPE_WEAK_BINDER: {
1940 struct flat_binder_object *fp;
1941 struct binder_node *node;
1943 fp = to_flat_binder_object(hdr);
1944 node = binder_get_node(proc, fp->binder);
1946 pr_err("transaction release %d bad node %016llx\n",
1947 debug_id, (u64)fp->binder);
1950 binder_debug(BINDER_DEBUG_TRANSACTION,
1951 " node %d u%016llx\n",
1952 node->debug_id, (u64)node->ptr);
1953 binder_dec_node(node, hdr->type == BINDER_TYPE_BINDER,
1955 binder_put_node(node);
1957 case BINDER_TYPE_HANDLE:
1958 case BINDER_TYPE_WEAK_HANDLE: {
1959 struct flat_binder_object *fp;
1960 struct binder_ref_data rdata;
1963 fp = to_flat_binder_object(hdr);
1964 ret = binder_dec_ref_for_handle(proc, fp->handle,
1965 hdr->type == BINDER_TYPE_HANDLE, &rdata);
1968 pr_err("transaction release %d bad handle %d, ret = %d\n",
1969 debug_id, fp->handle, ret);
1972 binder_debug(BINDER_DEBUG_TRANSACTION,
1973 " ref %d desc %d\n",
1974 rdata.debug_id, rdata.desc);
1977 case BINDER_TYPE_FD: {
1979 * No need to close the file here since user-space
1980 * closes it for successfully delivered
1981 * transactions. For transactions that weren't
1982 * delivered, the new fd was never allocated so
1983 * there is no need to close and the fput on the
1984 * file is done when the transaction is torn
1988 case BINDER_TYPE_PTR:
1990 * Nothing to do here, this will get cleaned up when the
1991 * transaction buffer gets freed
1994 case BINDER_TYPE_FDA: {
1995 struct binder_fd_array_object *fda;
1996 struct binder_buffer_object *parent;
1997 struct binder_object ptr_object;
1998 binder_size_t fda_offset;
2000 binder_size_t fd_buf_size;
2001 binder_size_t num_valid;
2005 * The fd fixups have not been applied so no
2006 * fds need to be closed.
2011 num_valid = (buffer_offset - off_start_offset) /
2012 sizeof(binder_size_t);
2013 fda = to_binder_fd_array_object(hdr);
2014 parent = binder_validate_ptr(proc, buffer, &ptr_object,
2020 pr_err("transaction release %d bad parent offset\n",
2024 fd_buf_size = sizeof(u32) * fda->num_fds;
2025 if (fda->num_fds >= SIZE_MAX / sizeof(u32)) {
2026 pr_err("transaction release %d invalid number of fds (%lld)\n",
2027 debug_id, (u64)fda->num_fds);
2030 if (fd_buf_size > parent->length ||
2031 fda->parent_offset > parent->length - fd_buf_size) {
2032 /* No space for all file descriptors here. */
2033 pr_err("transaction release %d not enough space for %lld fds in buffer\n",
2034 debug_id, (u64)fda->num_fds);
2038 * the source data for binder_buffer_object is visible
2039 * to user-space and the @buffer element is the user
2040 * pointer to the buffer_object containing the fd_array.
2041 * Convert the address to an offset relative to
2042 * the base of the transaction buffer.
2045 (parent->buffer - (uintptr_t)buffer->user_data) +
2047 for (fd_index = 0; fd_index < fda->num_fds;
2051 binder_size_t offset = fda_offset +
2052 fd_index * sizeof(fd);
2054 err = binder_alloc_copy_from_buffer(
2055 &proc->alloc, &fd, buffer,
2056 offset, sizeof(fd));
2059 binder_deferred_fd_close(fd);
2061 * Need to make sure the thread goes
2062 * back to userspace to complete the
2066 thread->looper_need_return = true;
2071 pr_err("transaction release %d bad object type %x\n",
2072 debug_id, hdr->type);
2078 static int binder_translate_binder(struct flat_binder_object *fp,
2079 struct binder_transaction *t,
2080 struct binder_thread *thread)
2082 struct binder_node *node;
2083 struct binder_proc *proc = thread->proc;
2084 struct binder_proc *target_proc = t->to_proc;
2085 struct binder_ref_data rdata;
2088 node = binder_get_node(proc, fp->binder);
2090 node = binder_new_node(proc, fp);
2094 if (fp->cookie != node->cookie) {
2095 binder_user_error("%d:%d sending u%016llx node %d, cookie mismatch %016llx != %016llx\n",
2096 proc->pid, thread->pid, (u64)fp->binder,
2097 node->debug_id, (u64)fp->cookie,
2102 if (security_binder_transfer_binder(proc->cred, target_proc->cred)) {
2107 ret = binder_inc_ref_for_node(target_proc, node,
2108 fp->hdr.type == BINDER_TYPE_BINDER,
2109 &thread->todo, &rdata);
2113 if (fp->hdr.type == BINDER_TYPE_BINDER)
2114 fp->hdr.type = BINDER_TYPE_HANDLE;
2116 fp->hdr.type = BINDER_TYPE_WEAK_HANDLE;
2118 fp->handle = rdata.desc;
2121 trace_binder_transaction_node_to_ref(t, node, &rdata);
2122 binder_debug(BINDER_DEBUG_TRANSACTION,
2123 " node %d u%016llx -> ref %d desc %d\n",
2124 node->debug_id, (u64)node->ptr,
2125 rdata.debug_id, rdata.desc);
2127 binder_put_node(node);
2131 static int binder_translate_handle(struct flat_binder_object *fp,
2132 struct binder_transaction *t,
2133 struct binder_thread *thread)
2135 struct binder_proc *proc = thread->proc;
2136 struct binder_proc *target_proc = t->to_proc;
2137 struct binder_node *node;
2138 struct binder_ref_data src_rdata;
2141 node = binder_get_node_from_ref(proc, fp->handle,
2142 fp->hdr.type == BINDER_TYPE_HANDLE, &src_rdata);
2144 binder_user_error("%d:%d got transaction with invalid handle, %d\n",
2145 proc->pid, thread->pid, fp->handle);
2148 if (security_binder_transfer_binder(proc->cred, target_proc->cred)) {
2153 binder_node_lock(node);
2154 if (node->proc == target_proc) {
2155 if (fp->hdr.type == BINDER_TYPE_HANDLE)
2156 fp->hdr.type = BINDER_TYPE_BINDER;
2158 fp->hdr.type = BINDER_TYPE_WEAK_BINDER;
2159 fp->binder = node->ptr;
2160 fp->cookie = node->cookie;
2162 binder_inner_proc_lock(node->proc);
2164 __acquire(&node->proc->inner_lock);
2165 binder_inc_node_nilocked(node,
2166 fp->hdr.type == BINDER_TYPE_BINDER,
2169 binder_inner_proc_unlock(node->proc);
2171 __release(&node->proc->inner_lock);
2172 trace_binder_transaction_ref_to_node(t, node, &src_rdata);
2173 binder_debug(BINDER_DEBUG_TRANSACTION,
2174 " ref %d desc %d -> node %d u%016llx\n",
2175 src_rdata.debug_id, src_rdata.desc, node->debug_id,
2177 binder_node_unlock(node);
2179 struct binder_ref_data dest_rdata;
2181 binder_node_unlock(node);
2182 ret = binder_inc_ref_for_node(target_proc, node,
2183 fp->hdr.type == BINDER_TYPE_HANDLE,
2189 fp->handle = dest_rdata.desc;
2191 trace_binder_transaction_ref_to_ref(t, node, &src_rdata,
2193 binder_debug(BINDER_DEBUG_TRANSACTION,
2194 " ref %d desc %d -> ref %d desc %d (node %d)\n",
2195 src_rdata.debug_id, src_rdata.desc,
2196 dest_rdata.debug_id, dest_rdata.desc,
2200 binder_put_node(node);
2204 static int binder_translate_fd(u32 fd, binder_size_t fd_offset,
2205 struct binder_transaction *t,
2206 struct binder_thread *thread,
2207 struct binder_transaction *in_reply_to)
2209 struct binder_proc *proc = thread->proc;
2210 struct binder_proc *target_proc = t->to_proc;
2211 struct binder_txn_fd_fixup *fixup;
2214 bool target_allows_fd;
2217 target_allows_fd = !!(in_reply_to->flags & TF_ACCEPT_FDS);
2219 target_allows_fd = t->buffer->target_node->accept_fds;
2220 if (!target_allows_fd) {
2221 binder_user_error("%d:%d got %s with fd, %d, but target does not allow fds\n",
2222 proc->pid, thread->pid,
2223 in_reply_to ? "reply" : "transaction",
2226 goto err_fd_not_accepted;
2231 binder_user_error("%d:%d got transaction with invalid fd, %d\n",
2232 proc->pid, thread->pid, fd);
2236 ret = security_binder_transfer_file(proc->cred, target_proc->cred, file);
2243 * Add fixup record for this transaction. The allocation
2244 * of the fd in the target needs to be done from a
2247 fixup = kzalloc(sizeof(*fixup), GFP_KERNEL);
2253 fixup->offset = fd_offset;
2254 fixup->target_fd = -1;
2255 trace_binder_transaction_fd_send(t, fd, fixup->offset);
2256 list_add_tail(&fixup->fixup_entry, &t->fd_fixups);
2264 err_fd_not_accepted:
2269 * struct binder_ptr_fixup - data to be fixed-up in target buffer
2270 * @offset offset in target buffer to fixup
2271 * @skip_size bytes to skip in copy (fixup will be written later)
2272 * @fixup_data data to write at fixup offset
2275 * This is used for the pointer fixup list (pf) which is created and consumed
2276 * during binder_transaction() and is only accessed locally. No
2277 * locking is necessary.
2279 * The list is ordered by @offset.
2281 struct binder_ptr_fixup {
2282 binder_size_t offset;
2284 binder_uintptr_t fixup_data;
2285 struct list_head node;
2289 * struct binder_sg_copy - scatter-gather data to be copied
2290 * @offset offset in target buffer
2291 * @sender_uaddr user address in source buffer
2292 * @length bytes to copy
2295 * This is used for the sg copy list (sgc) which is created and consumed
2296 * during binder_transaction() and is only accessed locally. No
2297 * locking is necessary.
2299 * The list is ordered by @offset.
2301 struct binder_sg_copy {
2302 binder_size_t offset;
2303 const void __user *sender_uaddr;
2305 struct list_head node;
2309 * binder_do_deferred_txn_copies() - copy and fixup scatter-gather data
2310 * @alloc: binder_alloc associated with @buffer
2311 * @buffer: binder buffer in target process
2312 * @sgc_head: list_head of scatter-gather copy list
2313 * @pf_head: list_head of pointer fixup list
2315 * Processes all elements of @sgc_head, applying fixups from @pf_head
2316 * and copying the scatter-gather data from the source process' user
2317 * buffer to the target's buffer. It is expected that the list creation
2318 * and processing all occurs during binder_transaction() so these lists
2319 * are only accessed in local context.
2321 * Return: 0=success, else -errno
2323 static int binder_do_deferred_txn_copies(struct binder_alloc *alloc,
2324 struct binder_buffer *buffer,
2325 struct list_head *sgc_head,
2326 struct list_head *pf_head)
2329 struct binder_sg_copy *sgc, *tmpsgc;
2330 struct binder_ptr_fixup *tmppf;
2331 struct binder_ptr_fixup *pf =
2332 list_first_entry_or_null(pf_head, struct binder_ptr_fixup,
2335 list_for_each_entry_safe(sgc, tmpsgc, sgc_head, node) {
2336 size_t bytes_copied = 0;
2338 while (bytes_copied < sgc->length) {
2340 size_t bytes_left = sgc->length - bytes_copied;
2341 size_t offset = sgc->offset + bytes_copied;
2344 * We copy up to the fixup (pointed to by pf)
2346 copy_size = pf ? min(bytes_left, (size_t)pf->offset - offset)
2348 if (!ret && copy_size)
2349 ret = binder_alloc_copy_user_to_buffer(
2352 sgc->sender_uaddr + bytes_copied,
2354 bytes_copied += copy_size;
2355 if (copy_size != bytes_left) {
2357 /* we stopped at a fixup offset */
2358 if (pf->skip_size) {
2360 * we are just skipping. This is for
2361 * BINDER_TYPE_FDA where the translated
2362 * fds will be fixed up when we get
2363 * to target context.
2365 bytes_copied += pf->skip_size;
2367 /* apply the fixup indicated by pf */
2369 ret = binder_alloc_copy_to_buffer(
2373 sizeof(pf->fixup_data));
2374 bytes_copied += sizeof(pf->fixup_data);
2376 list_del(&pf->node);
2378 pf = list_first_entry_or_null(pf_head,
2379 struct binder_ptr_fixup, node);
2382 list_del(&sgc->node);
2385 list_for_each_entry_safe(pf, tmppf, pf_head, node) {
2386 BUG_ON(pf->skip_size == 0);
2387 list_del(&pf->node);
2390 BUG_ON(!list_empty(sgc_head));
2392 return ret > 0 ? -EINVAL : ret;
2396 * binder_cleanup_deferred_txn_lists() - free specified lists
2397 * @sgc_head: list_head of scatter-gather copy list
2398 * @pf_head: list_head of pointer fixup list
2400 * Called to clean up @sgc_head and @pf_head if there is an
2403 static void binder_cleanup_deferred_txn_lists(struct list_head *sgc_head,
2404 struct list_head *pf_head)
2406 struct binder_sg_copy *sgc, *tmpsgc;
2407 struct binder_ptr_fixup *pf, *tmppf;
2409 list_for_each_entry_safe(sgc, tmpsgc, sgc_head, node) {
2410 list_del(&sgc->node);
2413 list_for_each_entry_safe(pf, tmppf, pf_head, node) {
2414 list_del(&pf->node);
2420 * binder_defer_copy() - queue a scatter-gather buffer for copy
2421 * @sgc_head: list_head of scatter-gather copy list
2422 * @offset: binder buffer offset in target process
2423 * @sender_uaddr: user address in source process
2424 * @length: bytes to copy
2426 * Specify a scatter-gather block to be copied. The actual copy must
2427 * be deferred until all the needed fixups are identified and queued.
2428 * Then the copy and fixups are done together so un-translated values
2429 * from the source are never visible in the target buffer.
2431 * We are guaranteed that repeated calls to this function will have
2432 * monotonically increasing @offset values so the list will naturally
2435 * Return: 0=success, else -errno
2437 static int binder_defer_copy(struct list_head *sgc_head, binder_size_t offset,
2438 const void __user *sender_uaddr, size_t length)
2440 struct binder_sg_copy *bc = kzalloc(sizeof(*bc), GFP_KERNEL);
2445 bc->offset = offset;
2446 bc->sender_uaddr = sender_uaddr;
2447 bc->length = length;
2448 INIT_LIST_HEAD(&bc->node);
2451 * We are guaranteed that the deferred copies are in-order
2452 * so just add to the tail.
2454 list_add_tail(&bc->node, sgc_head);
2460 * binder_add_fixup() - queue a fixup to be applied to sg copy
2461 * @pf_head: list_head of binder ptr fixup list
2462 * @offset: binder buffer offset in target process
2463 * @fixup: bytes to be copied for fixup
2464 * @skip_size: bytes to skip when copying (fixup will be applied later)
2466 * Add the specified fixup to a list ordered by @offset. When copying
2467 * the scatter-gather buffers, the fixup will be copied instead of
2468 * data from the source buffer. For BINDER_TYPE_FDA fixups, the fixup
2469 * will be applied later (in target process context), so we just skip
2470 * the bytes specified by @skip_size. If @skip_size is 0, we copy the
2473 * This function is called *mostly* in @offset order, but there are
2474 * exceptions. Since out-of-order inserts are relatively uncommon,
2475 * we insert the new element by searching backward from the tail of
2478 * Return: 0=success, else -errno
2480 static int binder_add_fixup(struct list_head *pf_head, binder_size_t offset,
2481 binder_uintptr_t fixup, size_t skip_size)
2483 struct binder_ptr_fixup *pf = kzalloc(sizeof(*pf), GFP_KERNEL);
2484 struct binder_ptr_fixup *tmppf;
2489 pf->offset = offset;
2490 pf->fixup_data = fixup;
2491 pf->skip_size = skip_size;
2492 INIT_LIST_HEAD(&pf->node);
2494 /* Fixups are *mostly* added in-order, but there are some
2495 * exceptions. Look backwards through list for insertion point.
2497 list_for_each_entry_reverse(tmppf, pf_head, node) {
2498 if (tmppf->offset < pf->offset) {
2499 list_add(&pf->node, &tmppf->node);
2504 * if we get here, then the new offset is the lowest so
2505 * insert at the head
2507 list_add(&pf->node, pf_head);
2511 static int binder_translate_fd_array(struct list_head *pf_head,
2512 struct binder_fd_array_object *fda,
2513 const void __user *sender_ubuffer,
2514 struct binder_buffer_object *parent,
2515 struct binder_buffer_object *sender_uparent,
2516 struct binder_transaction *t,
2517 struct binder_thread *thread,
2518 struct binder_transaction *in_reply_to)
2520 binder_size_t fdi, fd_buf_size;
2521 binder_size_t fda_offset;
2522 const void __user *sender_ufda_base;
2523 struct binder_proc *proc = thread->proc;
2526 if (fda->num_fds == 0)
2529 fd_buf_size = sizeof(u32) * fda->num_fds;
2530 if (fda->num_fds >= SIZE_MAX / sizeof(u32)) {
2531 binder_user_error("%d:%d got transaction with invalid number of fds (%lld)\n",
2532 proc->pid, thread->pid, (u64)fda->num_fds);
2535 if (fd_buf_size > parent->length ||
2536 fda->parent_offset > parent->length - fd_buf_size) {
2537 /* No space for all file descriptors here. */
2538 binder_user_error("%d:%d not enough space to store %lld fds in buffer\n",
2539 proc->pid, thread->pid, (u64)fda->num_fds);
2543 * the source data for binder_buffer_object is visible
2544 * to user-space and the @buffer element is the user
2545 * pointer to the buffer_object containing the fd_array.
2546 * Convert the address to an offset relative to
2547 * the base of the transaction buffer.
2549 fda_offset = (parent->buffer - (uintptr_t)t->buffer->user_data) +
2551 sender_ufda_base = (void __user *)(uintptr_t)sender_uparent->buffer +
2554 if (!IS_ALIGNED((unsigned long)fda_offset, sizeof(u32)) ||
2555 !IS_ALIGNED((unsigned long)sender_ufda_base, sizeof(u32))) {
2556 binder_user_error("%d:%d parent offset not aligned correctly.\n",
2557 proc->pid, thread->pid);
2560 ret = binder_add_fixup(pf_head, fda_offset, 0, fda->num_fds * sizeof(u32));
2564 for (fdi = 0; fdi < fda->num_fds; fdi++) {
2566 binder_size_t offset = fda_offset + fdi * sizeof(fd);
2567 binder_size_t sender_uoffset = fdi * sizeof(fd);
2569 ret = copy_from_user(&fd, sender_ufda_base + sender_uoffset, sizeof(fd));
2571 ret = binder_translate_fd(fd, offset, t, thread,
2574 return ret > 0 ? -EINVAL : ret;
2579 static int binder_fixup_parent(struct list_head *pf_head,
2580 struct binder_transaction *t,
2581 struct binder_thread *thread,
2582 struct binder_buffer_object *bp,
2583 binder_size_t off_start_offset,
2584 binder_size_t num_valid,
2585 binder_size_t last_fixup_obj_off,
2586 binder_size_t last_fixup_min_off)
2588 struct binder_buffer_object *parent;
2589 struct binder_buffer *b = t->buffer;
2590 struct binder_proc *proc = thread->proc;
2591 struct binder_proc *target_proc = t->to_proc;
2592 struct binder_object object;
2593 binder_size_t buffer_offset;
2594 binder_size_t parent_offset;
2596 if (!(bp->flags & BINDER_BUFFER_FLAG_HAS_PARENT))
2599 parent = binder_validate_ptr(target_proc, b, &object, bp->parent,
2600 off_start_offset, &parent_offset,
2603 binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
2604 proc->pid, thread->pid);
2608 if (!binder_validate_fixup(target_proc, b, off_start_offset,
2609 parent_offset, bp->parent_offset,
2611 last_fixup_min_off)) {
2612 binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
2613 proc->pid, thread->pid);
2617 if (parent->length < sizeof(binder_uintptr_t) ||
2618 bp->parent_offset > parent->length - sizeof(binder_uintptr_t)) {
2619 /* No space for a pointer here! */
2620 binder_user_error("%d:%d got transaction with invalid parent offset\n",
2621 proc->pid, thread->pid);
2624 buffer_offset = bp->parent_offset +
2625 (uintptr_t)parent->buffer - (uintptr_t)b->user_data;
2626 return binder_add_fixup(pf_head, buffer_offset, bp->buffer, 0);
2630 * binder_proc_transaction() - sends a transaction to a process and wakes it up
2631 * @t: transaction to send
2632 * @proc: process to send the transaction to
2633 * @thread: thread in @proc to send the transaction to (may be NULL)
2635 * This function queues a transaction to the specified process. It will try
2636 * to find a thread in the target process to handle the transaction and
2637 * wake it up. If no thread is found, the work is queued to the proc
2640 * If the @thread parameter is not NULL, the transaction is always queued
2641 * to the waitlist of that specific thread.
2643 * Return: 0 if the transaction was successfully queued
2644 * BR_DEAD_REPLY if the target process or thread is dead
2645 * BR_FROZEN_REPLY if the target process or thread is frozen
2647 static int binder_proc_transaction(struct binder_transaction *t,
2648 struct binder_proc *proc,
2649 struct binder_thread *thread)
2651 struct binder_node *node = t->buffer->target_node;
2652 bool oneway = !!(t->flags & TF_ONE_WAY);
2653 bool pending_async = false;
2656 binder_node_lock(node);
2659 if (node->has_async_transaction)
2660 pending_async = true;
2662 node->has_async_transaction = true;
2665 binder_inner_proc_lock(proc);
2666 if (proc->is_frozen) {
2667 proc->sync_recv |= !oneway;
2668 proc->async_recv |= oneway;
2671 if ((proc->is_frozen && !oneway) || proc->is_dead ||
2672 (thread && thread->is_dead)) {
2673 binder_inner_proc_unlock(proc);
2674 binder_node_unlock(node);
2675 return proc->is_frozen ? BR_FROZEN_REPLY : BR_DEAD_REPLY;
2678 if (!thread && !pending_async)
2679 thread = binder_select_thread_ilocked(proc);
2682 binder_enqueue_thread_work_ilocked(thread, &t->work);
2683 else if (!pending_async)
2684 binder_enqueue_work_ilocked(&t->work, &proc->todo);
2686 binder_enqueue_work_ilocked(&t->work, &node->async_todo);
2689 binder_wakeup_thread_ilocked(proc, thread, !oneway /* sync */);
2691 proc->outstanding_txns++;
2692 binder_inner_proc_unlock(proc);
2693 binder_node_unlock(node);
2699 * binder_get_node_refs_for_txn() - Get required refs on node for txn
2700 * @node: struct binder_node for which to get refs
2701 * @proc: returns @node->proc if valid
2702 * @error: if no @proc then returns BR_DEAD_REPLY
2704 * User-space normally keeps the node alive when creating a transaction
2705 * since it has a reference to the target. The local strong ref keeps it
2706 * alive if the sending process dies before the target process processes
2707 * the transaction. If the source process is malicious or has a reference
2708 * counting bug, relying on the local strong ref can fail.
2710 * Since user-space can cause the local strong ref to go away, we also take
2711 * a tmpref on the node to ensure it survives while we are constructing
2712 * the transaction. We also need a tmpref on the proc while we are
2713 * constructing the transaction, so we take that here as well.
2715 * Return: The target_node with refs taken or NULL if no @node->proc is NULL.
2716 * Also sets @proc if valid. If the @node->proc is NULL indicating that the
2717 * target proc has died, @error is set to BR_DEAD_REPLY
2719 static struct binder_node *binder_get_node_refs_for_txn(
2720 struct binder_node *node,
2721 struct binder_proc **procp,
2724 struct binder_node *target_node = NULL;
2726 binder_node_inner_lock(node);
2729 binder_inc_node_nilocked(node, 1, 0, NULL);
2730 binder_inc_node_tmpref_ilocked(node);
2731 node->proc->tmp_ref++;
2732 *procp = node->proc;
2734 *error = BR_DEAD_REPLY;
2735 binder_node_inner_unlock(node);
2740 static void binder_set_txn_from_error(struct binder_transaction *t, int id,
2741 uint32_t command, int32_t param)
2743 struct binder_thread *from = binder_get_txn_from_and_acq_inner(t);
2746 /* annotation for sparse */
2747 __release(&from->proc->inner_lock);
2751 /* don't override existing errors */
2752 if (from->ee.command == BR_OK)
2753 binder_set_extended_error(&from->ee, id, command, param);
2754 binder_inner_proc_unlock(from->proc);
2755 binder_thread_dec_tmpref(from);
2758 static void binder_transaction(struct binder_proc *proc,
2759 struct binder_thread *thread,
2760 struct binder_transaction_data *tr, int reply,
2761 binder_size_t extra_buffers_size)
2764 struct binder_transaction *t;
2765 struct binder_work *w;
2766 struct binder_work *tcomplete;
2767 binder_size_t buffer_offset = 0;
2768 binder_size_t off_start_offset, off_end_offset;
2769 binder_size_t off_min;
2770 binder_size_t sg_buf_offset, sg_buf_end_offset;
2771 binder_size_t user_offset = 0;
2772 struct binder_proc *target_proc = NULL;
2773 struct binder_thread *target_thread = NULL;
2774 struct binder_node *target_node = NULL;
2775 struct binder_transaction *in_reply_to = NULL;
2776 struct binder_transaction_log_entry *e;
2777 uint32_t return_error = 0;
2778 uint32_t return_error_param = 0;
2779 uint32_t return_error_line = 0;
2780 binder_size_t last_fixup_obj_off = 0;
2781 binder_size_t last_fixup_min_off = 0;
2782 struct binder_context *context = proc->context;
2783 int t_debug_id = atomic_inc_return(&binder_last_id);
2784 char *secctx = NULL;
2786 struct list_head sgc_head;
2787 struct list_head pf_head;
2788 const void __user *user_buffer = (const void __user *)
2789 (uintptr_t)tr->data.ptr.buffer;
2790 INIT_LIST_HEAD(&sgc_head);
2791 INIT_LIST_HEAD(&pf_head);
2793 e = binder_transaction_log_add(&binder_transaction_log);
2794 e->debug_id = t_debug_id;
2795 e->call_type = reply ? 2 : !!(tr->flags & TF_ONE_WAY);
2796 e->from_proc = proc->pid;
2797 e->from_thread = thread->pid;
2798 e->target_handle = tr->target.handle;
2799 e->data_size = tr->data_size;
2800 e->offsets_size = tr->offsets_size;
2801 strscpy(e->context_name, proc->context->name, BINDERFS_MAX_NAME);
2803 binder_inner_proc_lock(proc);
2804 binder_set_extended_error(&thread->ee, t_debug_id, BR_OK, 0);
2805 binder_inner_proc_unlock(proc);
2808 binder_inner_proc_lock(proc);
2809 in_reply_to = thread->transaction_stack;
2810 if (in_reply_to == NULL) {
2811 binder_inner_proc_unlock(proc);
2812 binder_user_error("%d:%d got reply transaction with no transaction stack\n",
2813 proc->pid, thread->pid);
2814 return_error = BR_FAILED_REPLY;
2815 return_error_param = -EPROTO;
2816 return_error_line = __LINE__;
2817 goto err_empty_call_stack;
2819 if (in_reply_to->to_thread != thread) {
2820 spin_lock(&in_reply_to->lock);
2821 binder_user_error("%d:%d got reply transaction with bad transaction stack, transaction %d has target %d:%d\n",
2822 proc->pid, thread->pid, in_reply_to->debug_id,
2823 in_reply_to->to_proc ?
2824 in_reply_to->to_proc->pid : 0,
2825 in_reply_to->to_thread ?
2826 in_reply_to->to_thread->pid : 0);
2827 spin_unlock(&in_reply_to->lock);
2828 binder_inner_proc_unlock(proc);
2829 return_error = BR_FAILED_REPLY;
2830 return_error_param = -EPROTO;
2831 return_error_line = __LINE__;
2833 goto err_bad_call_stack;
2835 thread->transaction_stack = in_reply_to->to_parent;
2836 binder_inner_proc_unlock(proc);
2837 binder_set_nice(in_reply_to->saved_priority);
2838 target_thread = binder_get_txn_from_and_acq_inner(in_reply_to);
2839 if (target_thread == NULL) {
2840 /* annotation for sparse */
2841 __release(&target_thread->proc->inner_lock);
2842 binder_txn_error("%d:%d reply target not found\n",
2843 thread->pid, proc->pid);
2844 return_error = BR_DEAD_REPLY;
2845 return_error_line = __LINE__;
2846 goto err_dead_binder;
2848 if (target_thread->transaction_stack != in_reply_to) {
2849 binder_user_error("%d:%d got reply transaction with bad target transaction stack %d, expected %d\n",
2850 proc->pid, thread->pid,
2851 target_thread->transaction_stack ?
2852 target_thread->transaction_stack->debug_id : 0,
2853 in_reply_to->debug_id);
2854 binder_inner_proc_unlock(target_thread->proc);
2855 return_error = BR_FAILED_REPLY;
2856 return_error_param = -EPROTO;
2857 return_error_line = __LINE__;
2859 target_thread = NULL;
2860 goto err_dead_binder;
2862 target_proc = target_thread->proc;
2863 target_proc->tmp_ref++;
2864 binder_inner_proc_unlock(target_thread->proc);
2866 if (tr->target.handle) {
2867 struct binder_ref *ref;
2870 * There must already be a strong ref
2871 * on this node. If so, do a strong
2872 * increment on the node to ensure it
2873 * stays alive until the transaction is
2876 binder_proc_lock(proc);
2877 ref = binder_get_ref_olocked(proc, tr->target.handle,
2880 target_node = binder_get_node_refs_for_txn(
2881 ref->node, &target_proc,
2884 binder_user_error("%d:%d got transaction to invalid handle, %u\n",
2885 proc->pid, thread->pid, tr->target.handle);
2886 return_error = BR_FAILED_REPLY;
2888 binder_proc_unlock(proc);
2890 mutex_lock(&context->context_mgr_node_lock);
2891 target_node = context->binder_context_mgr_node;
2893 target_node = binder_get_node_refs_for_txn(
2894 target_node, &target_proc,
2897 return_error = BR_DEAD_REPLY;
2898 mutex_unlock(&context->context_mgr_node_lock);
2899 if (target_node && target_proc->pid == proc->pid) {
2900 binder_user_error("%d:%d got transaction to context manager from process owning it\n",
2901 proc->pid, thread->pid);
2902 return_error = BR_FAILED_REPLY;
2903 return_error_param = -EINVAL;
2904 return_error_line = __LINE__;
2905 goto err_invalid_target_handle;
2909 binder_txn_error("%d:%d cannot find target node\n",
2910 thread->pid, proc->pid);
2912 * return_error is set above
2914 return_error_param = -EINVAL;
2915 return_error_line = __LINE__;
2916 goto err_dead_binder;
2918 e->to_node = target_node->debug_id;
2919 if (WARN_ON(proc == target_proc)) {
2920 binder_txn_error("%d:%d self transactions not allowed\n",
2921 thread->pid, proc->pid);
2922 return_error = BR_FAILED_REPLY;
2923 return_error_param = -EINVAL;
2924 return_error_line = __LINE__;
2925 goto err_invalid_target_handle;
2927 if (security_binder_transaction(proc->cred,
2928 target_proc->cred) < 0) {
2929 binder_txn_error("%d:%d transaction credentials failed\n",
2930 thread->pid, proc->pid);
2931 return_error = BR_FAILED_REPLY;
2932 return_error_param = -EPERM;
2933 return_error_line = __LINE__;
2934 goto err_invalid_target_handle;
2936 binder_inner_proc_lock(proc);
2938 w = list_first_entry_or_null(&thread->todo,
2939 struct binder_work, entry);
2940 if (!(tr->flags & TF_ONE_WAY) && w &&
2941 w->type == BINDER_WORK_TRANSACTION) {
2943 * Do not allow new outgoing transaction from a
2944 * thread that has a transaction at the head of
2945 * its todo list. Only need to check the head
2946 * because binder_select_thread_ilocked picks a
2947 * thread from proc->waiting_threads to enqueue
2948 * the transaction, and nothing is queued to the
2949 * todo list while the thread is on waiting_threads.
2951 binder_user_error("%d:%d new transaction not allowed when there is a transaction on thread todo\n",
2952 proc->pid, thread->pid);
2953 binder_inner_proc_unlock(proc);
2954 return_error = BR_FAILED_REPLY;
2955 return_error_param = -EPROTO;
2956 return_error_line = __LINE__;
2957 goto err_bad_todo_list;
2960 if (!(tr->flags & TF_ONE_WAY) && thread->transaction_stack) {
2961 struct binder_transaction *tmp;
2963 tmp = thread->transaction_stack;
2964 if (tmp->to_thread != thread) {
2965 spin_lock(&tmp->lock);
2966 binder_user_error("%d:%d got new transaction with bad transaction stack, transaction %d has target %d:%d\n",
2967 proc->pid, thread->pid, tmp->debug_id,
2968 tmp->to_proc ? tmp->to_proc->pid : 0,
2970 tmp->to_thread->pid : 0);
2971 spin_unlock(&tmp->lock);
2972 binder_inner_proc_unlock(proc);
2973 return_error = BR_FAILED_REPLY;
2974 return_error_param = -EPROTO;
2975 return_error_line = __LINE__;
2976 goto err_bad_call_stack;
2979 struct binder_thread *from;
2981 spin_lock(&tmp->lock);
2983 if (from && from->proc == target_proc) {
2984 atomic_inc(&from->tmp_ref);
2985 target_thread = from;
2986 spin_unlock(&tmp->lock);
2989 spin_unlock(&tmp->lock);
2990 tmp = tmp->from_parent;
2993 binder_inner_proc_unlock(proc);
2996 e->to_thread = target_thread->pid;
2997 e->to_proc = target_proc->pid;
2999 /* TODO: reuse incoming transaction for reply */
3000 t = kzalloc(sizeof(*t), GFP_KERNEL);
3002 binder_txn_error("%d:%d cannot allocate transaction\n",
3003 thread->pid, proc->pid);
3004 return_error = BR_FAILED_REPLY;
3005 return_error_param = -ENOMEM;
3006 return_error_line = __LINE__;
3007 goto err_alloc_t_failed;
3009 INIT_LIST_HEAD(&t->fd_fixups);
3010 binder_stats_created(BINDER_STAT_TRANSACTION);
3011 spin_lock_init(&t->lock);
3013 tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL);
3014 if (tcomplete == NULL) {
3015 binder_txn_error("%d:%d cannot allocate work for transaction\n",
3016 thread->pid, proc->pid);
3017 return_error = BR_FAILED_REPLY;
3018 return_error_param = -ENOMEM;
3019 return_error_line = __LINE__;
3020 goto err_alloc_tcomplete_failed;
3022 binder_stats_created(BINDER_STAT_TRANSACTION_COMPLETE);
3024 t->debug_id = t_debug_id;
3027 binder_debug(BINDER_DEBUG_TRANSACTION,
3028 "%d:%d BC_REPLY %d -> %d:%d, data %016llx-%016llx size %lld-%lld-%lld\n",
3029 proc->pid, thread->pid, t->debug_id,
3030 target_proc->pid, target_thread->pid,
3031 (u64)tr->data.ptr.buffer,
3032 (u64)tr->data.ptr.offsets,
3033 (u64)tr->data_size, (u64)tr->offsets_size,
3034 (u64)extra_buffers_size);
3036 binder_debug(BINDER_DEBUG_TRANSACTION,
3037 "%d:%d BC_TRANSACTION %d -> %d - node %d, data %016llx-%016llx size %lld-%lld-%lld\n",
3038 proc->pid, thread->pid, t->debug_id,
3039 target_proc->pid, target_node->debug_id,
3040 (u64)tr->data.ptr.buffer,
3041 (u64)tr->data.ptr.offsets,
3042 (u64)tr->data_size, (u64)tr->offsets_size,
3043 (u64)extra_buffers_size);
3045 if (!reply && !(tr->flags & TF_ONE_WAY))
3049 t->sender_euid = task_euid(proc->tsk);
3050 t->to_proc = target_proc;
3051 t->to_thread = target_thread;
3053 t->flags = tr->flags;
3054 t->priority = task_nice(current);
3056 if (target_node && target_node->txn_security_ctx) {
3060 security_cred_getsecid(proc->cred, &secid);
3061 ret = security_secid_to_secctx(secid, &secctx, &secctx_sz);
3063 binder_txn_error("%d:%d failed to get security context\n",
3064 thread->pid, proc->pid);
3065 return_error = BR_FAILED_REPLY;
3066 return_error_param = ret;
3067 return_error_line = __LINE__;
3068 goto err_get_secctx_failed;
3070 added_size = ALIGN(secctx_sz, sizeof(u64));
3071 extra_buffers_size += added_size;
3072 if (extra_buffers_size < added_size) {
3073 binder_txn_error("%d:%d integer overflow of extra_buffers_size\n",
3074 thread->pid, proc->pid);
3075 return_error = BR_FAILED_REPLY;
3076 return_error_param = -EINVAL;
3077 return_error_line = __LINE__;
3078 goto err_bad_extra_size;
3082 trace_binder_transaction(reply, t, target_node);
3084 t->buffer = binder_alloc_new_buf(&target_proc->alloc, tr->data_size,
3085 tr->offsets_size, extra_buffers_size,
3086 !reply && (t->flags & TF_ONE_WAY), current->tgid);
3087 if (IS_ERR(t->buffer)) {
3090 ret = PTR_ERR(t->buffer);
3091 s = (ret == -ESRCH) ? ": vma cleared, target dead or dying"
3092 : (ret == -ENOSPC) ? ": no space left"
3093 : (ret == -ENOMEM) ? ": memory allocation failed"
3095 binder_txn_error("cannot allocate buffer%s", s);
3097 return_error_param = PTR_ERR(t->buffer);
3098 return_error = return_error_param == -ESRCH ?
3099 BR_DEAD_REPLY : BR_FAILED_REPLY;
3100 return_error_line = __LINE__;
3102 goto err_binder_alloc_buf_failed;
3106 size_t buf_offset = ALIGN(tr->data_size, sizeof(void *)) +
3107 ALIGN(tr->offsets_size, sizeof(void *)) +
3108 ALIGN(extra_buffers_size, sizeof(void *)) -
3109 ALIGN(secctx_sz, sizeof(u64));
3111 t->security_ctx = (uintptr_t)t->buffer->user_data + buf_offset;
3112 err = binder_alloc_copy_to_buffer(&target_proc->alloc,
3113 t->buffer, buf_offset,
3116 t->security_ctx = 0;
3119 security_release_secctx(secctx, secctx_sz);
3122 t->buffer->debug_id = t->debug_id;
3123 t->buffer->transaction = t;
3124 t->buffer->target_node = target_node;
3125 t->buffer->clear_on_free = !!(t->flags & TF_CLEAR_BUF);
3126 trace_binder_transaction_alloc_buf(t->buffer);
3128 if (binder_alloc_copy_user_to_buffer(
3129 &target_proc->alloc,
3131 ALIGN(tr->data_size, sizeof(void *)),
3132 (const void __user *)
3133 (uintptr_t)tr->data.ptr.offsets,
3134 tr->offsets_size)) {
3135 binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
3136 proc->pid, thread->pid);
3137 return_error = BR_FAILED_REPLY;
3138 return_error_param = -EFAULT;
3139 return_error_line = __LINE__;
3140 goto err_copy_data_failed;
3142 if (!IS_ALIGNED(tr->offsets_size, sizeof(binder_size_t))) {
3143 binder_user_error("%d:%d got transaction with invalid offsets size, %lld\n",
3144 proc->pid, thread->pid, (u64)tr->offsets_size);
3145 return_error = BR_FAILED_REPLY;
3146 return_error_param = -EINVAL;
3147 return_error_line = __LINE__;
3148 goto err_bad_offset;
3150 if (!IS_ALIGNED(extra_buffers_size, sizeof(u64))) {
3151 binder_user_error("%d:%d got transaction with unaligned buffers size, %lld\n",
3152 proc->pid, thread->pid,
3153 (u64)extra_buffers_size);
3154 return_error = BR_FAILED_REPLY;
3155 return_error_param = -EINVAL;
3156 return_error_line = __LINE__;
3157 goto err_bad_offset;
3159 off_start_offset = ALIGN(tr->data_size, sizeof(void *));
3160 buffer_offset = off_start_offset;
3161 off_end_offset = off_start_offset + tr->offsets_size;
3162 sg_buf_offset = ALIGN(off_end_offset, sizeof(void *));
3163 sg_buf_end_offset = sg_buf_offset + extra_buffers_size -
3164 ALIGN(secctx_sz, sizeof(u64));
3166 for (buffer_offset = off_start_offset; buffer_offset < off_end_offset;
3167 buffer_offset += sizeof(binder_size_t)) {
3168 struct binder_object_header *hdr;
3170 struct binder_object object;
3171 binder_size_t object_offset;
3172 binder_size_t copy_size;
3174 if (binder_alloc_copy_from_buffer(&target_proc->alloc,
3178 sizeof(object_offset))) {
3179 binder_txn_error("%d:%d copy offset from buffer failed\n",
3180 thread->pid, proc->pid);
3181 return_error = BR_FAILED_REPLY;
3182 return_error_param = -EINVAL;
3183 return_error_line = __LINE__;
3184 goto err_bad_offset;
3188 * Copy the source user buffer up to the next object
3189 * that will be processed.
3191 copy_size = object_offset - user_offset;
3192 if (copy_size && (user_offset > object_offset ||
3193 binder_alloc_copy_user_to_buffer(
3194 &target_proc->alloc,
3195 t->buffer, user_offset,
3196 user_buffer + user_offset,
3198 binder_user_error("%d:%d got transaction with invalid data ptr\n",
3199 proc->pid, thread->pid);
3200 return_error = BR_FAILED_REPLY;
3201 return_error_param = -EFAULT;
3202 return_error_line = __LINE__;
3203 goto err_copy_data_failed;
3205 object_size = binder_get_object(target_proc, user_buffer,
3206 t->buffer, object_offset, &object);
3207 if (object_size == 0 || object_offset < off_min) {
3208 binder_user_error("%d:%d got transaction with invalid offset (%lld, min %lld max %lld) or object.\n",
3209 proc->pid, thread->pid,
3212 (u64)t->buffer->data_size);
3213 return_error = BR_FAILED_REPLY;
3214 return_error_param = -EINVAL;
3215 return_error_line = __LINE__;
3216 goto err_bad_offset;
3219 * Set offset to the next buffer fragment to be
3222 user_offset = object_offset + object_size;
3225 off_min = object_offset + object_size;
3226 switch (hdr->type) {
3227 case BINDER_TYPE_BINDER:
3228 case BINDER_TYPE_WEAK_BINDER: {
3229 struct flat_binder_object *fp;
3231 fp = to_flat_binder_object(hdr);
3232 ret = binder_translate_binder(fp, t, thread);
3235 binder_alloc_copy_to_buffer(&target_proc->alloc,
3239 binder_txn_error("%d:%d translate binder failed\n",
3240 thread->pid, proc->pid);
3241 return_error = BR_FAILED_REPLY;
3242 return_error_param = ret;
3243 return_error_line = __LINE__;
3244 goto err_translate_failed;
3247 case BINDER_TYPE_HANDLE:
3248 case BINDER_TYPE_WEAK_HANDLE: {
3249 struct flat_binder_object *fp;
3251 fp = to_flat_binder_object(hdr);
3252 ret = binder_translate_handle(fp, t, thread);
3254 binder_alloc_copy_to_buffer(&target_proc->alloc,
3258 binder_txn_error("%d:%d translate handle failed\n",
3259 thread->pid, proc->pid);
3260 return_error = BR_FAILED_REPLY;
3261 return_error_param = ret;
3262 return_error_line = __LINE__;
3263 goto err_translate_failed;
3267 case BINDER_TYPE_FD: {
3268 struct binder_fd_object *fp = to_binder_fd_object(hdr);
3269 binder_size_t fd_offset = object_offset +
3270 (uintptr_t)&fp->fd - (uintptr_t)fp;
3271 int ret = binder_translate_fd(fp->fd, fd_offset, t,
3272 thread, in_reply_to);
3276 binder_alloc_copy_to_buffer(&target_proc->alloc,
3280 binder_txn_error("%d:%d translate fd failed\n",
3281 thread->pid, proc->pid);
3282 return_error = BR_FAILED_REPLY;
3283 return_error_param = ret;
3284 return_error_line = __LINE__;
3285 goto err_translate_failed;
3288 case BINDER_TYPE_FDA: {
3289 struct binder_object ptr_object;
3290 binder_size_t parent_offset;
3291 struct binder_object user_object;
3292 size_t user_parent_size;
3293 struct binder_fd_array_object *fda =
3294 to_binder_fd_array_object(hdr);
3295 size_t num_valid = (buffer_offset - off_start_offset) /
3296 sizeof(binder_size_t);
3297 struct binder_buffer_object *parent =
3298 binder_validate_ptr(target_proc, t->buffer,
3299 &ptr_object, fda->parent,
3304 binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
3305 proc->pid, thread->pid);
3306 return_error = BR_FAILED_REPLY;
3307 return_error_param = -EINVAL;
3308 return_error_line = __LINE__;
3309 goto err_bad_parent;
3311 if (!binder_validate_fixup(target_proc, t->buffer,
3316 last_fixup_min_off)) {
3317 binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
3318 proc->pid, thread->pid);
3319 return_error = BR_FAILED_REPLY;
3320 return_error_param = -EINVAL;
3321 return_error_line = __LINE__;
3322 goto err_bad_parent;
3325 * We need to read the user version of the parent
3326 * object to get the original user offset
3329 binder_get_object(proc, user_buffer, t->buffer,
3330 parent_offset, &user_object);
3331 if (user_parent_size != sizeof(user_object.bbo)) {
3332 binder_user_error("%d:%d invalid ptr object size: %zd vs %zd\n",
3333 proc->pid, thread->pid,
3335 sizeof(user_object.bbo));
3336 return_error = BR_FAILED_REPLY;
3337 return_error_param = -EINVAL;
3338 return_error_line = __LINE__;
3339 goto err_bad_parent;
3341 ret = binder_translate_fd_array(&pf_head, fda,
3342 user_buffer, parent,
3343 &user_object.bbo, t,
3344 thread, in_reply_to);
3346 ret = binder_alloc_copy_to_buffer(&target_proc->alloc,
3351 binder_txn_error("%d:%d translate fd array failed\n",
3352 thread->pid, proc->pid);
3353 return_error = BR_FAILED_REPLY;
3354 return_error_param = ret > 0 ? -EINVAL : ret;
3355 return_error_line = __LINE__;
3356 goto err_translate_failed;
3358 last_fixup_obj_off = parent_offset;
3359 last_fixup_min_off =
3360 fda->parent_offset + sizeof(u32) * fda->num_fds;
3362 case BINDER_TYPE_PTR: {
3363 struct binder_buffer_object *bp =
3364 to_binder_buffer_object(hdr);
3365 size_t buf_left = sg_buf_end_offset - sg_buf_offset;
3368 if (bp->length > buf_left) {
3369 binder_user_error("%d:%d got transaction with too large buffer\n",
3370 proc->pid, thread->pid);
3371 return_error = BR_FAILED_REPLY;
3372 return_error_param = -EINVAL;
3373 return_error_line = __LINE__;
3374 goto err_bad_offset;
3376 ret = binder_defer_copy(&sgc_head, sg_buf_offset,
3377 (const void __user *)(uintptr_t)bp->buffer,
3380 binder_txn_error("%d:%d deferred copy failed\n",
3381 thread->pid, proc->pid);
3382 return_error = BR_FAILED_REPLY;
3383 return_error_param = ret;
3384 return_error_line = __LINE__;
3385 goto err_translate_failed;
3387 /* Fixup buffer pointer to target proc address space */
3388 bp->buffer = (uintptr_t)
3389 t->buffer->user_data + sg_buf_offset;
3390 sg_buf_offset += ALIGN(bp->length, sizeof(u64));
3392 num_valid = (buffer_offset - off_start_offset) /
3393 sizeof(binder_size_t);
3394 ret = binder_fixup_parent(&pf_head, t,
3399 last_fixup_min_off);
3401 binder_alloc_copy_to_buffer(&target_proc->alloc,
3405 binder_txn_error("%d:%d failed to fixup parent\n",
3406 thread->pid, proc->pid);
3407 return_error = BR_FAILED_REPLY;
3408 return_error_param = ret;
3409 return_error_line = __LINE__;
3410 goto err_translate_failed;
3412 last_fixup_obj_off = object_offset;
3413 last_fixup_min_off = 0;
3416 binder_user_error("%d:%d got transaction with invalid object type, %x\n",
3417 proc->pid, thread->pid, hdr->type);
3418 return_error = BR_FAILED_REPLY;
3419 return_error_param = -EINVAL;
3420 return_error_line = __LINE__;
3421 goto err_bad_object_type;
3424 /* Done processing objects, copy the rest of the buffer */
3425 if (binder_alloc_copy_user_to_buffer(
3426 &target_proc->alloc,
3427 t->buffer, user_offset,
3428 user_buffer + user_offset,
3429 tr->data_size - user_offset)) {
3430 binder_user_error("%d:%d got transaction with invalid data ptr\n",
3431 proc->pid, thread->pid);
3432 return_error = BR_FAILED_REPLY;
3433 return_error_param = -EFAULT;
3434 return_error_line = __LINE__;
3435 goto err_copy_data_failed;
3438 ret = binder_do_deferred_txn_copies(&target_proc->alloc, t->buffer,
3439 &sgc_head, &pf_head);
3441 binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
3442 proc->pid, thread->pid);
3443 return_error = BR_FAILED_REPLY;
3444 return_error_param = ret;
3445 return_error_line = __LINE__;
3446 goto err_copy_data_failed;
3448 if (t->buffer->oneway_spam_suspect)
3449 tcomplete->type = BINDER_WORK_TRANSACTION_ONEWAY_SPAM_SUSPECT;
3451 tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE;
3452 t->work.type = BINDER_WORK_TRANSACTION;
3455 binder_enqueue_thread_work(thread, tcomplete);
3456 binder_inner_proc_lock(target_proc);
3457 if (target_thread->is_dead) {
3458 return_error = BR_DEAD_REPLY;
3459 binder_inner_proc_unlock(target_proc);
3460 goto err_dead_proc_or_thread;
3462 BUG_ON(t->buffer->async_transaction != 0);
3463 binder_pop_transaction_ilocked(target_thread, in_reply_to);
3464 binder_enqueue_thread_work_ilocked(target_thread, &t->work);
3465 target_proc->outstanding_txns++;
3466 binder_inner_proc_unlock(target_proc);
3467 wake_up_interruptible_sync(&target_thread->wait);
3468 binder_free_transaction(in_reply_to);
3469 } else if (!(t->flags & TF_ONE_WAY)) {
3470 BUG_ON(t->buffer->async_transaction != 0);
3471 binder_inner_proc_lock(proc);
3473 * Defer the TRANSACTION_COMPLETE, so we don't return to
3474 * userspace immediately; this allows the target process to
3475 * immediately start processing this transaction, reducing
3476 * latency. We will then return the TRANSACTION_COMPLETE when
3477 * the target replies (or there is an error).
3479 binder_enqueue_deferred_thread_work_ilocked(thread, tcomplete);
3481 t->from_parent = thread->transaction_stack;
3482 thread->transaction_stack = t;
3483 binder_inner_proc_unlock(proc);
3484 return_error = binder_proc_transaction(t,
3485 target_proc, target_thread);
3487 binder_inner_proc_lock(proc);
3488 binder_pop_transaction_ilocked(thread, t);
3489 binder_inner_proc_unlock(proc);
3490 goto err_dead_proc_or_thread;
3493 BUG_ON(target_node == NULL);
3494 BUG_ON(t->buffer->async_transaction != 1);
3495 binder_enqueue_thread_work(thread, tcomplete);
3496 return_error = binder_proc_transaction(t, target_proc, NULL);
3498 goto err_dead_proc_or_thread;
3501 binder_thread_dec_tmpref(target_thread);
3502 binder_proc_dec_tmpref(target_proc);
3504 binder_dec_node_tmpref(target_node);
3506 * write barrier to synchronize with initialization
3510 WRITE_ONCE(e->debug_id_done, t_debug_id);
3513 err_dead_proc_or_thread:
3514 binder_txn_error("%d:%d dead process or thread\n",
3515 thread->pid, proc->pid);
3516 return_error_line = __LINE__;
3517 binder_dequeue_work(proc, tcomplete);
3518 err_translate_failed:
3519 err_bad_object_type:
3522 err_copy_data_failed:
3523 binder_cleanup_deferred_txn_lists(&sgc_head, &pf_head);
3524 binder_free_txn_fixups(t);
3525 trace_binder_transaction_failed_buffer_release(t->buffer);
3526 binder_transaction_buffer_release(target_proc, NULL, t->buffer,
3527 buffer_offset, true);
3529 binder_dec_node_tmpref(target_node);
3531 t->buffer->transaction = NULL;
3532 binder_alloc_free_buf(&target_proc->alloc, t->buffer);
3533 err_binder_alloc_buf_failed:
3536 security_release_secctx(secctx, secctx_sz);
3537 err_get_secctx_failed:
3539 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
3540 err_alloc_tcomplete_failed:
3541 if (trace_binder_txn_latency_free_enabled())
3542 binder_txn_latency_free(t);
3544 binder_stats_deleted(BINDER_STAT_TRANSACTION);
3548 err_empty_call_stack:
3550 err_invalid_target_handle:
3552 binder_dec_node(target_node, 1, 0);
3553 binder_dec_node_tmpref(target_node);
3556 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
3557 "%d:%d transaction %s to %d:%d failed %d/%d/%d, size %lld-%lld line %d\n",
3558 proc->pid, thread->pid, reply ? "reply" :
3559 (tr->flags & TF_ONE_WAY ? "async" : "call"),
3560 target_proc ? target_proc->pid : 0,
3561 target_thread ? target_thread->pid : 0,
3562 t_debug_id, return_error, return_error_param,
3563 (u64)tr->data_size, (u64)tr->offsets_size,
3567 binder_thread_dec_tmpref(target_thread);
3569 binder_proc_dec_tmpref(target_proc);
3572 struct binder_transaction_log_entry *fe;
3574 e->return_error = return_error;
3575 e->return_error_param = return_error_param;
3576 e->return_error_line = return_error_line;
3577 fe = binder_transaction_log_add(&binder_transaction_log_failed);
3580 * write barrier to synchronize with initialization
3584 WRITE_ONCE(e->debug_id_done, t_debug_id);
3585 WRITE_ONCE(fe->debug_id_done, t_debug_id);
3588 BUG_ON(thread->return_error.cmd != BR_OK);
3590 binder_set_txn_from_error(in_reply_to, t_debug_id,
3591 return_error, return_error_param);
3592 thread->return_error.cmd = BR_TRANSACTION_COMPLETE;
3593 binder_enqueue_thread_work(thread, &thread->return_error.work);
3594 binder_send_failed_reply(in_reply_to, return_error);
3596 binder_inner_proc_lock(proc);
3597 binder_set_extended_error(&thread->ee, t_debug_id,
3598 return_error, return_error_param);
3599 binder_inner_proc_unlock(proc);
3600 thread->return_error.cmd = return_error;
3601 binder_enqueue_thread_work(thread, &thread->return_error.work);
3606 * binder_free_buf() - free the specified buffer
3607 * @proc: binder proc that owns buffer
3608 * @buffer: buffer to be freed
3609 * @is_failure: failed to send transaction
3611 * If buffer for an async transaction, enqueue the next async
3612 * transaction from the node.
3614 * Cleanup buffer and free it.
3617 binder_free_buf(struct binder_proc *proc,
3618 struct binder_thread *thread,
3619 struct binder_buffer *buffer, bool is_failure)
3621 binder_inner_proc_lock(proc);
3622 if (buffer->transaction) {
3623 buffer->transaction->buffer = NULL;
3624 buffer->transaction = NULL;
3626 binder_inner_proc_unlock(proc);
3627 if (buffer->async_transaction && buffer->target_node) {
3628 struct binder_node *buf_node;
3629 struct binder_work *w;
3631 buf_node = buffer->target_node;
3632 binder_node_inner_lock(buf_node);
3633 BUG_ON(!buf_node->has_async_transaction);
3634 BUG_ON(buf_node->proc != proc);
3635 w = binder_dequeue_work_head_ilocked(
3636 &buf_node->async_todo);
3638 buf_node->has_async_transaction = false;
3640 binder_enqueue_work_ilocked(
3642 binder_wakeup_proc_ilocked(proc);
3644 binder_node_inner_unlock(buf_node);
3646 trace_binder_transaction_buffer_release(buffer);
3647 binder_transaction_buffer_release(proc, thread, buffer, 0, is_failure);
3648 binder_alloc_free_buf(&proc->alloc, buffer);
3651 static int binder_thread_write(struct binder_proc *proc,
3652 struct binder_thread *thread,
3653 binder_uintptr_t binder_buffer, size_t size,
3654 binder_size_t *consumed)
3657 struct binder_context *context = proc->context;
3658 void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
3659 void __user *ptr = buffer + *consumed;
3660 void __user *end = buffer + size;
3662 while (ptr < end && thread->return_error.cmd == BR_OK) {
3665 if (get_user(cmd, (uint32_t __user *)ptr))
3667 ptr += sizeof(uint32_t);
3668 trace_binder_command(cmd);
3669 if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.bc)) {
3670 atomic_inc(&binder_stats.bc[_IOC_NR(cmd)]);
3671 atomic_inc(&proc->stats.bc[_IOC_NR(cmd)]);
3672 atomic_inc(&thread->stats.bc[_IOC_NR(cmd)]);
3680 const char *debug_string;
3681 bool strong = cmd == BC_ACQUIRE || cmd == BC_RELEASE;
3682 bool increment = cmd == BC_INCREFS || cmd == BC_ACQUIRE;
3683 struct binder_ref_data rdata;
3685 if (get_user(target, (uint32_t __user *)ptr))
3688 ptr += sizeof(uint32_t);
3690 if (increment && !target) {
3691 struct binder_node *ctx_mgr_node;
3693 mutex_lock(&context->context_mgr_node_lock);
3694 ctx_mgr_node = context->binder_context_mgr_node;
3696 if (ctx_mgr_node->proc == proc) {
3697 binder_user_error("%d:%d context manager tried to acquire desc 0\n",
3698 proc->pid, thread->pid);
3699 mutex_unlock(&context->context_mgr_node_lock);
3702 ret = binder_inc_ref_for_node(
3704 strong, NULL, &rdata);
3706 mutex_unlock(&context->context_mgr_node_lock);
3709 ret = binder_update_ref_for_handle(
3710 proc, target, increment, strong,
3712 if (!ret && rdata.desc != target) {
3713 binder_user_error("%d:%d tried to acquire reference to desc %d, got %d instead\n",
3714 proc->pid, thread->pid,
3715 target, rdata.desc);
3719 debug_string = "IncRefs";
3722 debug_string = "Acquire";
3725 debug_string = "Release";
3729 debug_string = "DecRefs";
3733 binder_user_error("%d:%d %s %d refcount change on invalid ref %d ret %d\n",
3734 proc->pid, thread->pid, debug_string,
3735 strong, target, ret);
3738 binder_debug(BINDER_DEBUG_USER_REFS,
3739 "%d:%d %s ref %d desc %d s %d w %d\n",
3740 proc->pid, thread->pid, debug_string,
3741 rdata.debug_id, rdata.desc, rdata.strong,
3745 case BC_INCREFS_DONE:
3746 case BC_ACQUIRE_DONE: {
3747 binder_uintptr_t node_ptr;
3748 binder_uintptr_t cookie;
3749 struct binder_node *node;
3752 if (get_user(node_ptr, (binder_uintptr_t __user *)ptr))
3754 ptr += sizeof(binder_uintptr_t);
3755 if (get_user(cookie, (binder_uintptr_t __user *)ptr))
3757 ptr += sizeof(binder_uintptr_t);
3758 node = binder_get_node(proc, node_ptr);
3760 binder_user_error("%d:%d %s u%016llx no match\n",
3761 proc->pid, thread->pid,
3762 cmd == BC_INCREFS_DONE ?
3768 if (cookie != node->cookie) {
3769 binder_user_error("%d:%d %s u%016llx node %d cookie mismatch %016llx != %016llx\n",
3770 proc->pid, thread->pid,
3771 cmd == BC_INCREFS_DONE ?
3772 "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
3773 (u64)node_ptr, node->debug_id,
3774 (u64)cookie, (u64)node->cookie);
3775 binder_put_node(node);
3778 binder_node_inner_lock(node);
3779 if (cmd == BC_ACQUIRE_DONE) {
3780 if (node->pending_strong_ref == 0) {
3781 binder_user_error("%d:%d BC_ACQUIRE_DONE node %d has no pending acquire request\n",
3782 proc->pid, thread->pid,
3784 binder_node_inner_unlock(node);
3785 binder_put_node(node);
3788 node->pending_strong_ref = 0;
3790 if (node->pending_weak_ref == 0) {
3791 binder_user_error("%d:%d BC_INCREFS_DONE node %d has no pending increfs request\n",
3792 proc->pid, thread->pid,
3794 binder_node_inner_unlock(node);
3795 binder_put_node(node);
3798 node->pending_weak_ref = 0;
3800 free_node = binder_dec_node_nilocked(node,
3801 cmd == BC_ACQUIRE_DONE, 0);
3803 binder_debug(BINDER_DEBUG_USER_REFS,
3804 "%d:%d %s node %d ls %d lw %d tr %d\n",
3805 proc->pid, thread->pid,
3806 cmd == BC_INCREFS_DONE ? "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
3807 node->debug_id, node->local_strong_refs,
3808 node->local_weak_refs, node->tmp_refs);
3809 binder_node_inner_unlock(node);
3810 binder_put_node(node);
3813 case BC_ATTEMPT_ACQUIRE:
3814 pr_err("BC_ATTEMPT_ACQUIRE not supported\n");
3816 case BC_ACQUIRE_RESULT:
3817 pr_err("BC_ACQUIRE_RESULT not supported\n");
3820 case BC_FREE_BUFFER: {
3821 binder_uintptr_t data_ptr;
3822 struct binder_buffer *buffer;
3824 if (get_user(data_ptr, (binder_uintptr_t __user *)ptr))
3826 ptr += sizeof(binder_uintptr_t);
3828 buffer = binder_alloc_prepare_to_free(&proc->alloc,
3830 if (IS_ERR_OR_NULL(buffer)) {
3831 if (PTR_ERR(buffer) == -EPERM) {
3833 "%d:%d BC_FREE_BUFFER u%016llx matched unreturned or currently freeing buffer\n",
3834 proc->pid, thread->pid,
3838 "%d:%d BC_FREE_BUFFER u%016llx no match\n",
3839 proc->pid, thread->pid,
3844 binder_debug(BINDER_DEBUG_FREE_BUFFER,
3845 "%d:%d BC_FREE_BUFFER u%016llx found buffer %d for %s transaction\n",
3846 proc->pid, thread->pid, (u64)data_ptr,
3848 buffer->transaction ? "active" : "finished");
3849 binder_free_buf(proc, thread, buffer, false);
3853 case BC_TRANSACTION_SG:
3855 struct binder_transaction_data_sg tr;
3857 if (copy_from_user(&tr, ptr, sizeof(tr)))
3860 binder_transaction(proc, thread, &tr.transaction_data,
3861 cmd == BC_REPLY_SG, tr.buffers_size);
3864 case BC_TRANSACTION:
3866 struct binder_transaction_data tr;
3868 if (copy_from_user(&tr, ptr, sizeof(tr)))
3871 binder_transaction(proc, thread, &tr,
3872 cmd == BC_REPLY, 0);
3876 case BC_REGISTER_LOOPER:
3877 binder_debug(BINDER_DEBUG_THREADS,
3878 "%d:%d BC_REGISTER_LOOPER\n",
3879 proc->pid, thread->pid);
3880 binder_inner_proc_lock(proc);
3881 if (thread->looper & BINDER_LOOPER_STATE_ENTERED) {
3882 thread->looper |= BINDER_LOOPER_STATE_INVALID;
3883 binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called after BC_ENTER_LOOPER\n",
3884 proc->pid, thread->pid);
3885 } else if (proc->requested_threads == 0) {
3886 thread->looper |= BINDER_LOOPER_STATE_INVALID;
3887 binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called without request\n",
3888 proc->pid, thread->pid);
3890 proc->requested_threads--;
3891 proc->requested_threads_started++;
3893 thread->looper |= BINDER_LOOPER_STATE_REGISTERED;
3894 binder_inner_proc_unlock(proc);
3896 case BC_ENTER_LOOPER:
3897 binder_debug(BINDER_DEBUG_THREADS,
3898 "%d:%d BC_ENTER_LOOPER\n",
3899 proc->pid, thread->pid);
3900 if (thread->looper & BINDER_LOOPER_STATE_REGISTERED) {
3901 thread->looper |= BINDER_LOOPER_STATE_INVALID;
3902 binder_user_error("%d:%d ERROR: BC_ENTER_LOOPER called after BC_REGISTER_LOOPER\n",
3903 proc->pid, thread->pid);
3905 thread->looper |= BINDER_LOOPER_STATE_ENTERED;
3907 case BC_EXIT_LOOPER:
3908 binder_debug(BINDER_DEBUG_THREADS,
3909 "%d:%d BC_EXIT_LOOPER\n",
3910 proc->pid, thread->pid);
3911 thread->looper |= BINDER_LOOPER_STATE_EXITED;
3914 case BC_REQUEST_DEATH_NOTIFICATION:
3915 case BC_CLEAR_DEATH_NOTIFICATION: {
3917 binder_uintptr_t cookie;
3918 struct binder_ref *ref;
3919 struct binder_ref_death *death = NULL;
3921 if (get_user(target, (uint32_t __user *)ptr))
3923 ptr += sizeof(uint32_t);
3924 if (get_user(cookie, (binder_uintptr_t __user *)ptr))
3926 ptr += sizeof(binder_uintptr_t);
3927 if (cmd == BC_REQUEST_DEATH_NOTIFICATION) {
3929 * Allocate memory for death notification
3930 * before taking lock
3932 death = kzalloc(sizeof(*death), GFP_KERNEL);
3933 if (death == NULL) {
3934 WARN_ON(thread->return_error.cmd !=
3936 thread->return_error.cmd = BR_ERROR;
3937 binder_enqueue_thread_work(
3939 &thread->return_error.work);
3941 BINDER_DEBUG_FAILED_TRANSACTION,
3942 "%d:%d BC_REQUEST_DEATH_NOTIFICATION failed\n",
3943 proc->pid, thread->pid);
3947 binder_proc_lock(proc);
3948 ref = binder_get_ref_olocked(proc, target, false);
3950 binder_user_error("%d:%d %s invalid ref %d\n",
3951 proc->pid, thread->pid,
3952 cmd == BC_REQUEST_DEATH_NOTIFICATION ?
3953 "BC_REQUEST_DEATH_NOTIFICATION" :
3954 "BC_CLEAR_DEATH_NOTIFICATION",
3956 binder_proc_unlock(proc);
3961 binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
3962 "%d:%d %s %016llx ref %d desc %d s %d w %d for node %d\n",
3963 proc->pid, thread->pid,
3964 cmd == BC_REQUEST_DEATH_NOTIFICATION ?
3965 "BC_REQUEST_DEATH_NOTIFICATION" :
3966 "BC_CLEAR_DEATH_NOTIFICATION",
3967 (u64)cookie, ref->data.debug_id,
3968 ref->data.desc, ref->data.strong,
3969 ref->data.weak, ref->node->debug_id);
3971 binder_node_lock(ref->node);
3972 if (cmd == BC_REQUEST_DEATH_NOTIFICATION) {
3974 binder_user_error("%d:%d BC_REQUEST_DEATH_NOTIFICATION death notification already set\n",
3975 proc->pid, thread->pid);
3976 binder_node_unlock(ref->node);
3977 binder_proc_unlock(proc);
3981 binder_stats_created(BINDER_STAT_DEATH);
3982 INIT_LIST_HEAD(&death->work.entry);
3983 death->cookie = cookie;
3985 if (ref->node->proc == NULL) {
3986 ref->death->work.type = BINDER_WORK_DEAD_BINDER;
3988 binder_inner_proc_lock(proc);
3989 binder_enqueue_work_ilocked(
3990 &ref->death->work, &proc->todo);
3991 binder_wakeup_proc_ilocked(proc);
3992 binder_inner_proc_unlock(proc);
3995 if (ref->death == NULL) {
3996 binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification not active\n",
3997 proc->pid, thread->pid);
3998 binder_node_unlock(ref->node);
3999 binder_proc_unlock(proc);
4003 if (death->cookie != cookie) {
4004 binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification cookie mismatch %016llx != %016llx\n",
4005 proc->pid, thread->pid,
4008 binder_node_unlock(ref->node);
4009 binder_proc_unlock(proc);
4013 binder_inner_proc_lock(proc);
4014 if (list_empty(&death->work.entry)) {
4015 death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
4016 if (thread->looper &
4017 (BINDER_LOOPER_STATE_REGISTERED |
4018 BINDER_LOOPER_STATE_ENTERED))
4019 binder_enqueue_thread_work_ilocked(
4023 binder_enqueue_work_ilocked(
4026 binder_wakeup_proc_ilocked(
4030 BUG_ON(death->work.type != BINDER_WORK_DEAD_BINDER);
4031 death->work.type = BINDER_WORK_DEAD_BINDER_AND_CLEAR;
4033 binder_inner_proc_unlock(proc);
4035 binder_node_unlock(ref->node);
4036 binder_proc_unlock(proc);
4038 case BC_DEAD_BINDER_DONE: {
4039 struct binder_work *w;
4040 binder_uintptr_t cookie;
4041 struct binder_ref_death *death = NULL;
4043 if (get_user(cookie, (binder_uintptr_t __user *)ptr))
4046 ptr += sizeof(cookie);
4047 binder_inner_proc_lock(proc);
4048 list_for_each_entry(w, &proc->delivered_death,
4050 struct binder_ref_death *tmp_death =
4052 struct binder_ref_death,
4055 if (tmp_death->cookie == cookie) {
4060 binder_debug(BINDER_DEBUG_DEAD_BINDER,
4061 "%d:%d BC_DEAD_BINDER_DONE %016llx found %pK\n",
4062 proc->pid, thread->pid, (u64)cookie,
4064 if (death == NULL) {
4065 binder_user_error("%d:%d BC_DEAD_BINDER_DONE %016llx not found\n",
4066 proc->pid, thread->pid, (u64)cookie);
4067 binder_inner_proc_unlock(proc);
4070 binder_dequeue_work_ilocked(&death->work);
4071 if (death->work.type == BINDER_WORK_DEAD_BINDER_AND_CLEAR) {
4072 death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
4073 if (thread->looper &
4074 (BINDER_LOOPER_STATE_REGISTERED |
4075 BINDER_LOOPER_STATE_ENTERED))
4076 binder_enqueue_thread_work_ilocked(
4077 thread, &death->work);
4079 binder_enqueue_work_ilocked(
4082 binder_wakeup_proc_ilocked(proc);
4085 binder_inner_proc_unlock(proc);
4089 pr_err("%d:%d unknown command %u\n",
4090 proc->pid, thread->pid, cmd);
4093 *consumed = ptr - buffer;
4098 static void binder_stat_br(struct binder_proc *proc,
4099 struct binder_thread *thread, uint32_t cmd)
4101 trace_binder_return(cmd);
4102 if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.br)) {
4103 atomic_inc(&binder_stats.br[_IOC_NR(cmd)]);
4104 atomic_inc(&proc->stats.br[_IOC_NR(cmd)]);
4105 atomic_inc(&thread->stats.br[_IOC_NR(cmd)]);
4109 static int binder_put_node_cmd(struct binder_proc *proc,
4110 struct binder_thread *thread,
4112 binder_uintptr_t node_ptr,
4113 binder_uintptr_t node_cookie,
4115 uint32_t cmd, const char *cmd_name)
4117 void __user *ptr = *ptrp;
4119 if (put_user(cmd, (uint32_t __user *)ptr))
4121 ptr += sizeof(uint32_t);
4123 if (put_user(node_ptr, (binder_uintptr_t __user *)ptr))
4125 ptr += sizeof(binder_uintptr_t);
4127 if (put_user(node_cookie, (binder_uintptr_t __user *)ptr))
4129 ptr += sizeof(binder_uintptr_t);
4131 binder_stat_br(proc, thread, cmd);
4132 binder_debug(BINDER_DEBUG_USER_REFS, "%d:%d %s %d u%016llx c%016llx\n",
4133 proc->pid, thread->pid, cmd_name, node_debug_id,
4134 (u64)node_ptr, (u64)node_cookie);
4140 static int binder_wait_for_work(struct binder_thread *thread,
4144 struct binder_proc *proc = thread->proc;
4147 freezer_do_not_count();
4148 binder_inner_proc_lock(proc);
4150 prepare_to_wait(&thread->wait, &wait, TASK_INTERRUPTIBLE);
4151 if (binder_has_work_ilocked(thread, do_proc_work))
4154 list_add(&thread->waiting_thread_node,
4155 &proc->waiting_threads);
4156 binder_inner_proc_unlock(proc);
4158 binder_inner_proc_lock(proc);
4159 list_del_init(&thread->waiting_thread_node);
4160 if (signal_pending(current)) {
4165 finish_wait(&thread->wait, &wait);
4166 binder_inner_proc_unlock(proc);
4173 * binder_apply_fd_fixups() - finish fd translation
4174 * @proc: binder_proc associated @t->buffer
4175 * @t: binder transaction with list of fd fixups
4177 * Now that we are in the context of the transaction target
4178 * process, we can allocate and install fds. Process the
4179 * list of fds to translate and fixup the buffer with the
4180 * new fds first and only then install the files.
4182 * If we fail to allocate an fd, skip the install and release
4183 * any fds that have already been allocated.
4185 static int binder_apply_fd_fixups(struct binder_proc *proc,
4186 struct binder_transaction *t)
4188 struct binder_txn_fd_fixup *fixup, *tmp;
4191 list_for_each_entry(fixup, &t->fd_fixups, fixup_entry) {
4192 int fd = get_unused_fd_flags(O_CLOEXEC);
4195 binder_debug(BINDER_DEBUG_TRANSACTION,
4196 "failed fd fixup txn %d fd %d\n",
4201 binder_debug(BINDER_DEBUG_TRANSACTION,
4202 "fd fixup txn %d fd %d\n",
4204 trace_binder_transaction_fd_recv(t, fd, fixup->offset);
4205 fixup->target_fd = fd;
4206 if (binder_alloc_copy_to_buffer(&proc->alloc, t->buffer,
4213 list_for_each_entry_safe(fixup, tmp, &t->fd_fixups, fixup_entry) {
4214 fd_install(fixup->target_fd, fixup->file);
4215 list_del(&fixup->fixup_entry);
4222 binder_free_txn_fixups(t);
4226 static int binder_thread_read(struct binder_proc *proc,
4227 struct binder_thread *thread,
4228 binder_uintptr_t binder_buffer, size_t size,
4229 binder_size_t *consumed, int non_block)
4231 void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
4232 void __user *ptr = buffer + *consumed;
4233 void __user *end = buffer + size;
4236 int wait_for_proc_work;
4238 if (*consumed == 0) {
4239 if (put_user(BR_NOOP, (uint32_t __user *)ptr))
4241 ptr += sizeof(uint32_t);
4245 binder_inner_proc_lock(proc);
4246 wait_for_proc_work = binder_available_for_proc_work_ilocked(thread);
4247 binder_inner_proc_unlock(proc);
4249 thread->looper |= BINDER_LOOPER_STATE_WAITING;
4251 trace_binder_wait_for_work(wait_for_proc_work,
4252 !!thread->transaction_stack,
4253 !binder_worklist_empty(proc, &thread->todo));
4254 if (wait_for_proc_work) {
4255 if (!(thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
4256 BINDER_LOOPER_STATE_ENTERED))) {
4257 binder_user_error("%d:%d ERROR: Thread waiting for process work before calling BC_REGISTER_LOOPER or BC_ENTER_LOOPER (state %x)\n",
4258 proc->pid, thread->pid, thread->looper);
4259 wait_event_interruptible(binder_user_error_wait,
4260 binder_stop_on_user_error < 2);
4262 binder_set_nice(proc->default_priority);
4266 if (!binder_has_work(thread, wait_for_proc_work))
4269 ret = binder_wait_for_work(thread, wait_for_proc_work);
4272 thread->looper &= ~BINDER_LOOPER_STATE_WAITING;
4279 struct binder_transaction_data_secctx tr;
4280 struct binder_transaction_data *trd = &tr.transaction_data;
4281 struct binder_work *w = NULL;
4282 struct list_head *list = NULL;
4283 struct binder_transaction *t = NULL;
4284 struct binder_thread *t_from;
4285 size_t trsize = sizeof(*trd);
4287 binder_inner_proc_lock(proc);
4288 if (!binder_worklist_empty_ilocked(&thread->todo))
4289 list = &thread->todo;
4290 else if (!binder_worklist_empty_ilocked(&proc->todo) &&
4294 binder_inner_proc_unlock(proc);
4297 if (ptr - buffer == 4 && !thread->looper_need_return)
4302 if (end - ptr < sizeof(tr) + 4) {
4303 binder_inner_proc_unlock(proc);
4306 w = binder_dequeue_work_head_ilocked(list);
4307 if (binder_worklist_empty_ilocked(&thread->todo))
4308 thread->process_todo = false;
4311 case BINDER_WORK_TRANSACTION: {
4312 binder_inner_proc_unlock(proc);
4313 t = container_of(w, struct binder_transaction, work);
4315 case BINDER_WORK_RETURN_ERROR: {
4316 struct binder_error *e = container_of(
4317 w, struct binder_error, work);
4319 WARN_ON(e->cmd == BR_OK);
4320 binder_inner_proc_unlock(proc);
4321 if (put_user(e->cmd, (uint32_t __user *)ptr))
4325 ptr += sizeof(uint32_t);
4327 binder_stat_br(proc, thread, cmd);
4329 case BINDER_WORK_TRANSACTION_COMPLETE:
4330 case BINDER_WORK_TRANSACTION_ONEWAY_SPAM_SUSPECT: {
4331 if (proc->oneway_spam_detection_enabled &&
4332 w->type == BINDER_WORK_TRANSACTION_ONEWAY_SPAM_SUSPECT)
4333 cmd = BR_ONEWAY_SPAM_SUSPECT;
4335 cmd = BR_TRANSACTION_COMPLETE;
4336 binder_inner_proc_unlock(proc);
4338 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
4339 if (put_user(cmd, (uint32_t __user *)ptr))
4341 ptr += sizeof(uint32_t);
4343 binder_stat_br(proc, thread, cmd);
4344 binder_debug(BINDER_DEBUG_TRANSACTION_COMPLETE,
4345 "%d:%d BR_TRANSACTION_COMPLETE\n",
4346 proc->pid, thread->pid);
4348 case BINDER_WORK_NODE: {
4349 struct binder_node *node = container_of(w, struct binder_node, work);
4351 binder_uintptr_t node_ptr = node->ptr;
4352 binder_uintptr_t node_cookie = node->cookie;
4353 int node_debug_id = node->debug_id;
4356 void __user *orig_ptr = ptr;
4358 BUG_ON(proc != node->proc);
4359 strong = node->internal_strong_refs ||
4360 node->local_strong_refs;
4361 weak = !hlist_empty(&node->refs) ||
4362 node->local_weak_refs ||
4363 node->tmp_refs || strong;
4364 has_strong_ref = node->has_strong_ref;
4365 has_weak_ref = node->has_weak_ref;
4367 if (weak && !has_weak_ref) {
4368 node->has_weak_ref = 1;
4369 node->pending_weak_ref = 1;
4370 node->local_weak_refs++;
4372 if (strong && !has_strong_ref) {
4373 node->has_strong_ref = 1;
4374 node->pending_strong_ref = 1;
4375 node->local_strong_refs++;
4377 if (!strong && has_strong_ref)
4378 node->has_strong_ref = 0;
4379 if (!weak && has_weak_ref)
4380 node->has_weak_ref = 0;
4381 if (!weak && !strong) {
4382 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
4383 "%d:%d node %d u%016llx c%016llx deleted\n",
4384 proc->pid, thread->pid,
4388 rb_erase(&node->rb_node, &proc->nodes);
4389 binder_inner_proc_unlock(proc);
4390 binder_node_lock(node);
4392 * Acquire the node lock before freeing the
4393 * node to serialize with other threads that
4394 * may have been holding the node lock while
4395 * decrementing this node (avoids race where
4396 * this thread frees while the other thread
4397 * is unlocking the node after the final
4400 binder_node_unlock(node);
4401 binder_free_node(node);
4403 binder_inner_proc_unlock(proc);
4405 if (weak && !has_weak_ref)
4406 ret = binder_put_node_cmd(
4407 proc, thread, &ptr, node_ptr,
4408 node_cookie, node_debug_id,
4409 BR_INCREFS, "BR_INCREFS");
4410 if (!ret && strong && !has_strong_ref)
4411 ret = binder_put_node_cmd(
4412 proc, thread, &ptr, node_ptr,
4413 node_cookie, node_debug_id,
4414 BR_ACQUIRE, "BR_ACQUIRE");
4415 if (!ret && !strong && has_strong_ref)
4416 ret = binder_put_node_cmd(
4417 proc, thread, &ptr, node_ptr,
4418 node_cookie, node_debug_id,
4419 BR_RELEASE, "BR_RELEASE");
4420 if (!ret && !weak && has_weak_ref)
4421 ret = binder_put_node_cmd(
4422 proc, thread, &ptr, node_ptr,
4423 node_cookie, node_debug_id,
4424 BR_DECREFS, "BR_DECREFS");
4425 if (orig_ptr == ptr)
4426 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
4427 "%d:%d node %d u%016llx c%016llx state unchanged\n",
4428 proc->pid, thread->pid,
4435 case BINDER_WORK_DEAD_BINDER:
4436 case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
4437 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
4438 struct binder_ref_death *death;
4440 binder_uintptr_t cookie;
4442 death = container_of(w, struct binder_ref_death, work);
4443 if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION)
4444 cmd = BR_CLEAR_DEATH_NOTIFICATION_DONE;
4446 cmd = BR_DEAD_BINDER;
4447 cookie = death->cookie;
4449 binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
4450 "%d:%d %s %016llx\n",
4451 proc->pid, thread->pid,
4452 cmd == BR_DEAD_BINDER ?
4454 "BR_CLEAR_DEATH_NOTIFICATION_DONE",
4456 if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION) {
4457 binder_inner_proc_unlock(proc);
4459 binder_stats_deleted(BINDER_STAT_DEATH);
4461 binder_enqueue_work_ilocked(
4462 w, &proc->delivered_death);
4463 binder_inner_proc_unlock(proc);
4465 if (put_user(cmd, (uint32_t __user *)ptr))
4467 ptr += sizeof(uint32_t);
4468 if (put_user(cookie,
4469 (binder_uintptr_t __user *)ptr))
4471 ptr += sizeof(binder_uintptr_t);
4472 binder_stat_br(proc, thread, cmd);
4473 if (cmd == BR_DEAD_BINDER)
4474 goto done; /* DEAD_BINDER notifications can cause transactions */
4477 binder_inner_proc_unlock(proc);
4478 pr_err("%d:%d: bad work type %d\n",
4479 proc->pid, thread->pid, w->type);
4486 BUG_ON(t->buffer == NULL);
4487 if (t->buffer->target_node) {
4488 struct binder_node *target_node = t->buffer->target_node;
4490 trd->target.ptr = target_node->ptr;
4491 trd->cookie = target_node->cookie;
4492 t->saved_priority = task_nice(current);
4493 if (t->priority < target_node->min_priority &&
4494 !(t->flags & TF_ONE_WAY))
4495 binder_set_nice(t->priority);
4496 else if (!(t->flags & TF_ONE_WAY) ||
4497 t->saved_priority > target_node->min_priority)
4498 binder_set_nice(target_node->min_priority);
4499 cmd = BR_TRANSACTION;
4501 trd->target.ptr = 0;
4505 trd->code = t->code;
4506 trd->flags = t->flags;
4507 trd->sender_euid = from_kuid(current_user_ns(), t->sender_euid);
4509 t_from = binder_get_txn_from(t);
4511 struct task_struct *sender = t_from->proc->tsk;
4514 task_tgid_nr_ns(sender,
4515 task_active_pid_ns(current));
4517 trd->sender_pid = 0;
4520 ret = binder_apply_fd_fixups(proc, t);
4522 struct binder_buffer *buffer = t->buffer;
4523 bool oneway = !!(t->flags & TF_ONE_WAY);
4524 int tid = t->debug_id;
4527 binder_thread_dec_tmpref(t_from);
4528 buffer->transaction = NULL;
4529 binder_cleanup_transaction(t, "fd fixups failed",
4531 binder_free_buf(proc, thread, buffer, true);
4532 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
4533 "%d:%d %stransaction %d fd fixups failed %d/%d, line %d\n",
4534 proc->pid, thread->pid,
4536 (cmd == BR_REPLY ? "reply " : ""),
4537 tid, BR_FAILED_REPLY, ret, __LINE__);
4538 if (cmd == BR_REPLY) {
4539 cmd = BR_FAILED_REPLY;
4540 if (put_user(cmd, (uint32_t __user *)ptr))
4542 ptr += sizeof(uint32_t);
4543 binder_stat_br(proc, thread, cmd);
4548 trd->data_size = t->buffer->data_size;
4549 trd->offsets_size = t->buffer->offsets_size;
4550 trd->data.ptr.buffer = (uintptr_t)t->buffer->user_data;
4551 trd->data.ptr.offsets = trd->data.ptr.buffer +
4552 ALIGN(t->buffer->data_size,
4555 tr.secctx = t->security_ctx;
4556 if (t->security_ctx) {
4557 cmd = BR_TRANSACTION_SEC_CTX;
4558 trsize = sizeof(tr);
4560 if (put_user(cmd, (uint32_t __user *)ptr)) {
4562 binder_thread_dec_tmpref(t_from);
4564 binder_cleanup_transaction(t, "put_user failed",
4569 ptr += sizeof(uint32_t);
4570 if (copy_to_user(ptr, &tr, trsize)) {
4572 binder_thread_dec_tmpref(t_from);
4574 binder_cleanup_transaction(t, "copy_to_user failed",
4581 trace_binder_transaction_received(t);
4582 binder_stat_br(proc, thread, cmd);
4583 binder_debug(BINDER_DEBUG_TRANSACTION,
4584 "%d:%d %s %d %d:%d, cmd %u size %zd-%zd ptr %016llx-%016llx\n",
4585 proc->pid, thread->pid,
4586 (cmd == BR_TRANSACTION) ? "BR_TRANSACTION" :
4587 (cmd == BR_TRANSACTION_SEC_CTX) ?
4588 "BR_TRANSACTION_SEC_CTX" : "BR_REPLY",
4589 t->debug_id, t_from ? t_from->proc->pid : 0,
4590 t_from ? t_from->pid : 0, cmd,
4591 t->buffer->data_size, t->buffer->offsets_size,
4592 (u64)trd->data.ptr.buffer,
4593 (u64)trd->data.ptr.offsets);
4596 binder_thread_dec_tmpref(t_from);
4597 t->buffer->allow_user_free = 1;
4598 if (cmd != BR_REPLY && !(t->flags & TF_ONE_WAY)) {
4599 binder_inner_proc_lock(thread->proc);
4600 t->to_parent = thread->transaction_stack;
4601 t->to_thread = thread;
4602 thread->transaction_stack = t;
4603 binder_inner_proc_unlock(thread->proc);
4605 binder_free_transaction(t);
4612 *consumed = ptr - buffer;
4613 binder_inner_proc_lock(proc);
4614 if (proc->requested_threads == 0 &&
4615 list_empty(&thread->proc->waiting_threads) &&
4616 proc->requested_threads_started < proc->max_threads &&
4617 (thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
4618 BINDER_LOOPER_STATE_ENTERED)) /* the user-space code fails to */
4619 /*spawn a new thread if we leave this out */) {
4620 proc->requested_threads++;
4621 binder_inner_proc_unlock(proc);
4622 binder_debug(BINDER_DEBUG_THREADS,
4623 "%d:%d BR_SPAWN_LOOPER\n",
4624 proc->pid, thread->pid);
4625 if (put_user(BR_SPAWN_LOOPER, (uint32_t __user *)buffer))
4627 binder_stat_br(proc, thread, BR_SPAWN_LOOPER);
4629 binder_inner_proc_unlock(proc);
4633 static void binder_release_work(struct binder_proc *proc,
4634 struct list_head *list)
4636 struct binder_work *w;
4637 enum binder_work_type wtype;
4640 binder_inner_proc_lock(proc);
4641 w = binder_dequeue_work_head_ilocked(list);
4642 wtype = w ? w->type : 0;
4643 binder_inner_proc_unlock(proc);
4648 case BINDER_WORK_TRANSACTION: {
4649 struct binder_transaction *t;
4651 t = container_of(w, struct binder_transaction, work);
4653 binder_cleanup_transaction(t, "process died.",
4656 case BINDER_WORK_RETURN_ERROR: {
4657 struct binder_error *e = container_of(
4658 w, struct binder_error, work);
4660 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
4661 "undelivered TRANSACTION_ERROR: %u\n",
4664 case BINDER_WORK_TRANSACTION_COMPLETE: {
4665 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
4666 "undelivered TRANSACTION_COMPLETE\n");
4668 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
4670 case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
4671 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
4672 struct binder_ref_death *death;
4674 death = container_of(w, struct binder_ref_death, work);
4675 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
4676 "undelivered death notification, %016llx\n",
4677 (u64)death->cookie);
4679 binder_stats_deleted(BINDER_STAT_DEATH);
4681 case BINDER_WORK_NODE:
4684 pr_err("unexpected work type, %d, not freed\n",
4692 static struct binder_thread *binder_get_thread_ilocked(
4693 struct binder_proc *proc, struct binder_thread *new_thread)
4695 struct binder_thread *thread = NULL;
4696 struct rb_node *parent = NULL;
4697 struct rb_node **p = &proc->threads.rb_node;
4701 thread = rb_entry(parent, struct binder_thread, rb_node);
4703 if (current->pid < thread->pid)
4705 else if (current->pid > thread->pid)
4706 p = &(*p)->rb_right;
4712 thread = new_thread;
4713 binder_stats_created(BINDER_STAT_THREAD);
4714 thread->proc = proc;
4715 thread->pid = current->pid;
4716 atomic_set(&thread->tmp_ref, 0);
4717 init_waitqueue_head(&thread->wait);
4718 INIT_LIST_HEAD(&thread->todo);
4719 rb_link_node(&thread->rb_node, parent, p);
4720 rb_insert_color(&thread->rb_node, &proc->threads);
4721 thread->looper_need_return = true;
4722 thread->return_error.work.type = BINDER_WORK_RETURN_ERROR;
4723 thread->return_error.cmd = BR_OK;
4724 thread->reply_error.work.type = BINDER_WORK_RETURN_ERROR;
4725 thread->reply_error.cmd = BR_OK;
4726 thread->ee.command = BR_OK;
4727 INIT_LIST_HEAD(&new_thread->waiting_thread_node);
4731 static struct binder_thread *binder_get_thread(struct binder_proc *proc)
4733 struct binder_thread *thread;
4734 struct binder_thread *new_thread;
4736 binder_inner_proc_lock(proc);
4737 thread = binder_get_thread_ilocked(proc, NULL);
4738 binder_inner_proc_unlock(proc);
4740 new_thread = kzalloc(sizeof(*thread), GFP_KERNEL);
4741 if (new_thread == NULL)
4743 binder_inner_proc_lock(proc);
4744 thread = binder_get_thread_ilocked(proc, new_thread);
4745 binder_inner_proc_unlock(proc);
4746 if (thread != new_thread)
4752 static void binder_free_proc(struct binder_proc *proc)
4754 struct binder_device *device;
4756 BUG_ON(!list_empty(&proc->todo));
4757 BUG_ON(!list_empty(&proc->delivered_death));
4758 if (proc->outstanding_txns)
4759 pr_warn("%s: Unexpected outstanding_txns %d\n",
4760 __func__, proc->outstanding_txns);
4761 device = container_of(proc->context, struct binder_device, context);
4762 if (refcount_dec_and_test(&device->ref)) {
4763 kfree(proc->context->name);
4766 binder_alloc_deferred_release(&proc->alloc);
4767 put_task_struct(proc->tsk);
4768 put_cred(proc->cred);
4769 binder_stats_deleted(BINDER_STAT_PROC);
4773 static void binder_free_thread(struct binder_thread *thread)
4775 BUG_ON(!list_empty(&thread->todo));
4776 binder_stats_deleted(BINDER_STAT_THREAD);
4777 binder_proc_dec_tmpref(thread->proc);
4781 static int binder_thread_release(struct binder_proc *proc,
4782 struct binder_thread *thread)
4784 struct binder_transaction *t;
4785 struct binder_transaction *send_reply = NULL;
4786 int active_transactions = 0;
4787 struct binder_transaction *last_t = NULL;
4789 binder_inner_proc_lock(thread->proc);
4791 * take a ref on the proc so it survives
4792 * after we remove this thread from proc->threads.
4793 * The corresponding dec is when we actually
4794 * free the thread in binder_free_thread()
4798 * take a ref on this thread to ensure it
4799 * survives while we are releasing it
4801 atomic_inc(&thread->tmp_ref);
4802 rb_erase(&thread->rb_node, &proc->threads);
4803 t = thread->transaction_stack;
4805 spin_lock(&t->lock);
4806 if (t->to_thread == thread)
4809 __acquire(&t->lock);
4811 thread->is_dead = true;
4815 active_transactions++;
4816 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
4817 "release %d:%d transaction %d %s, still active\n",
4818 proc->pid, thread->pid,
4820 (t->to_thread == thread) ? "in" : "out");
4822 if (t->to_thread == thread) {
4823 thread->proc->outstanding_txns--;
4825 t->to_thread = NULL;
4827 t->buffer->transaction = NULL;
4831 } else if (t->from == thread) {
4836 spin_unlock(&last_t->lock);
4838 spin_lock(&t->lock);
4840 __acquire(&t->lock);
4842 /* annotation for sparse, lock not acquired in last iteration above */
4843 __release(&t->lock);
4846 * If this thread used poll, make sure we remove the waitqueue from any
4847 * poll data structures holding it.
4849 if (thread->looper & BINDER_LOOPER_STATE_POLL)
4850 wake_up_pollfree(&thread->wait);
4852 binder_inner_proc_unlock(thread->proc);
4855 * This is needed to avoid races between wake_up_pollfree() above and
4856 * someone else removing the last entry from the queue for other reasons
4857 * (e.g. ep_remove_wait_queue() being called due to an epoll file
4858 * descriptor being closed). Such other users hold an RCU read lock, so
4859 * we can be sure they're done after we call synchronize_rcu().
4861 if (thread->looper & BINDER_LOOPER_STATE_POLL)
4865 binder_send_failed_reply(send_reply, BR_DEAD_REPLY);
4866 binder_release_work(proc, &thread->todo);
4867 binder_thread_dec_tmpref(thread);
4868 return active_transactions;
4871 static __poll_t binder_poll(struct file *filp,
4872 struct poll_table_struct *wait)
4874 struct binder_proc *proc = filp->private_data;
4875 struct binder_thread *thread = NULL;
4876 bool wait_for_proc_work;
4878 thread = binder_get_thread(proc);
4882 binder_inner_proc_lock(thread->proc);
4883 thread->looper |= BINDER_LOOPER_STATE_POLL;
4884 wait_for_proc_work = binder_available_for_proc_work_ilocked(thread);
4886 binder_inner_proc_unlock(thread->proc);
4888 poll_wait(filp, &thread->wait, wait);
4890 if (binder_has_work(thread, wait_for_proc_work))
4896 static int binder_ioctl_write_read(struct file *filp,
4897 unsigned int cmd, unsigned long arg,
4898 struct binder_thread *thread)
4901 struct binder_proc *proc = filp->private_data;
4902 unsigned int size = _IOC_SIZE(cmd);
4903 void __user *ubuf = (void __user *)arg;
4904 struct binder_write_read bwr;
4906 if (size != sizeof(struct binder_write_read)) {
4910 if (copy_from_user(&bwr, ubuf, sizeof(bwr))) {
4914 binder_debug(BINDER_DEBUG_READ_WRITE,
4915 "%d:%d write %lld at %016llx, read %lld at %016llx\n",
4916 proc->pid, thread->pid,
4917 (u64)bwr.write_size, (u64)bwr.write_buffer,
4918 (u64)bwr.read_size, (u64)bwr.read_buffer);
4920 if (bwr.write_size > 0) {
4921 ret = binder_thread_write(proc, thread,
4924 &bwr.write_consumed);
4925 trace_binder_write_done(ret);
4927 bwr.read_consumed = 0;
4928 if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
4933 if (bwr.read_size > 0) {
4934 ret = binder_thread_read(proc, thread, bwr.read_buffer,
4937 filp->f_flags & O_NONBLOCK);
4938 trace_binder_read_done(ret);
4939 binder_inner_proc_lock(proc);
4940 if (!binder_worklist_empty_ilocked(&proc->todo))
4941 binder_wakeup_proc_ilocked(proc);
4942 binder_inner_proc_unlock(proc);
4944 if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
4949 binder_debug(BINDER_DEBUG_READ_WRITE,
4950 "%d:%d wrote %lld of %lld, read return %lld of %lld\n",
4951 proc->pid, thread->pid,
4952 (u64)bwr.write_consumed, (u64)bwr.write_size,
4953 (u64)bwr.read_consumed, (u64)bwr.read_size);
4954 if (copy_to_user(ubuf, &bwr, sizeof(bwr))) {
4962 static int binder_ioctl_set_ctx_mgr(struct file *filp,
4963 struct flat_binder_object *fbo)
4966 struct binder_proc *proc = filp->private_data;
4967 struct binder_context *context = proc->context;
4968 struct binder_node *new_node;
4969 kuid_t curr_euid = current_euid();
4971 mutex_lock(&context->context_mgr_node_lock);
4972 if (context->binder_context_mgr_node) {
4973 pr_err("BINDER_SET_CONTEXT_MGR already set\n");
4977 ret = security_binder_set_context_mgr(proc->cred);
4980 if (uid_valid(context->binder_context_mgr_uid)) {
4981 if (!uid_eq(context->binder_context_mgr_uid, curr_euid)) {
4982 pr_err("BINDER_SET_CONTEXT_MGR bad uid %d != %d\n",
4983 from_kuid(&init_user_ns, curr_euid),
4984 from_kuid(&init_user_ns,
4985 context->binder_context_mgr_uid));
4990 context->binder_context_mgr_uid = curr_euid;
4992 new_node = binder_new_node(proc, fbo);
4997 binder_node_lock(new_node);
4998 new_node->local_weak_refs++;
4999 new_node->local_strong_refs++;
5000 new_node->has_strong_ref = 1;
5001 new_node->has_weak_ref = 1;
5002 context->binder_context_mgr_node = new_node;
5003 binder_node_unlock(new_node);
5004 binder_put_node(new_node);
5006 mutex_unlock(&context->context_mgr_node_lock);
5010 static int binder_ioctl_get_node_info_for_ref(struct binder_proc *proc,
5011 struct binder_node_info_for_ref *info)
5013 struct binder_node *node;
5014 struct binder_context *context = proc->context;
5015 __u32 handle = info->handle;
5017 if (info->strong_count || info->weak_count || info->reserved1 ||
5018 info->reserved2 || info->reserved3) {
5019 binder_user_error("%d BINDER_GET_NODE_INFO_FOR_REF: only handle may be non-zero.",
5024 /* This ioctl may only be used by the context manager */
5025 mutex_lock(&context->context_mgr_node_lock);
5026 if (!context->binder_context_mgr_node ||
5027 context->binder_context_mgr_node->proc != proc) {
5028 mutex_unlock(&context->context_mgr_node_lock);
5031 mutex_unlock(&context->context_mgr_node_lock);
5033 node = binder_get_node_from_ref(proc, handle, true, NULL);
5037 info->strong_count = node->local_strong_refs +
5038 node->internal_strong_refs;
5039 info->weak_count = node->local_weak_refs;
5041 binder_put_node(node);
5046 static int binder_ioctl_get_node_debug_info(struct binder_proc *proc,
5047 struct binder_node_debug_info *info)
5050 binder_uintptr_t ptr = info->ptr;
5052 memset(info, 0, sizeof(*info));
5054 binder_inner_proc_lock(proc);
5055 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) {
5056 struct binder_node *node = rb_entry(n, struct binder_node,
5058 if (node->ptr > ptr) {
5059 info->ptr = node->ptr;
5060 info->cookie = node->cookie;
5061 info->has_strong_ref = node->has_strong_ref;
5062 info->has_weak_ref = node->has_weak_ref;
5066 binder_inner_proc_unlock(proc);
5071 static bool binder_txns_pending_ilocked(struct binder_proc *proc)
5074 struct binder_thread *thread;
5076 if (proc->outstanding_txns > 0)
5079 for (n = rb_first(&proc->threads); n; n = rb_next(n)) {
5080 thread = rb_entry(n, struct binder_thread, rb_node);
5081 if (thread->transaction_stack)
5087 static int binder_ioctl_freeze(struct binder_freeze_info *info,
5088 struct binder_proc *target_proc)
5092 if (!info->enable) {
5093 binder_inner_proc_lock(target_proc);
5094 target_proc->sync_recv = false;
5095 target_proc->async_recv = false;
5096 target_proc->is_frozen = false;
5097 binder_inner_proc_unlock(target_proc);
5102 * Freezing the target. Prevent new transactions by
5103 * setting frozen state. If timeout specified, wait
5104 * for transactions to drain.
5106 binder_inner_proc_lock(target_proc);
5107 target_proc->sync_recv = false;
5108 target_proc->async_recv = false;
5109 target_proc->is_frozen = true;
5110 binder_inner_proc_unlock(target_proc);
5112 if (info->timeout_ms > 0)
5113 ret = wait_event_interruptible_timeout(
5114 target_proc->freeze_wait,
5115 (!target_proc->outstanding_txns),
5116 msecs_to_jiffies(info->timeout_ms));
5118 /* Check pending transactions that wait for reply */
5120 binder_inner_proc_lock(target_proc);
5121 if (binder_txns_pending_ilocked(target_proc))
5123 binder_inner_proc_unlock(target_proc);
5127 binder_inner_proc_lock(target_proc);
5128 target_proc->is_frozen = false;
5129 binder_inner_proc_unlock(target_proc);
5135 static int binder_ioctl_get_freezer_info(
5136 struct binder_frozen_status_info *info)
5138 struct binder_proc *target_proc;
5142 info->sync_recv = 0;
5143 info->async_recv = 0;
5145 mutex_lock(&binder_procs_lock);
5146 hlist_for_each_entry(target_proc, &binder_procs, proc_node) {
5147 if (target_proc->pid == info->pid) {
5149 binder_inner_proc_lock(target_proc);
5150 txns_pending = binder_txns_pending_ilocked(target_proc);
5151 info->sync_recv |= target_proc->sync_recv |
5152 (txns_pending << 1);
5153 info->async_recv |= target_proc->async_recv;
5154 binder_inner_proc_unlock(target_proc);
5157 mutex_unlock(&binder_procs_lock);
5165 static int binder_ioctl_get_extended_error(struct binder_thread *thread,
5168 struct binder_extended_error ee;
5170 binder_inner_proc_lock(thread->proc);
5172 binder_set_extended_error(&thread->ee, 0, BR_OK, 0);
5173 binder_inner_proc_unlock(thread->proc);
5175 if (copy_to_user(ubuf, &ee, sizeof(ee)))
5181 static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
5184 struct binder_proc *proc = filp->private_data;
5185 struct binder_thread *thread;
5186 unsigned int size = _IOC_SIZE(cmd);
5187 void __user *ubuf = (void __user *)arg;
5189 /*pr_info("binder_ioctl: %d:%d %x %lx\n",
5190 proc->pid, current->pid, cmd, arg);*/
5192 binder_selftest_alloc(&proc->alloc);
5194 trace_binder_ioctl(cmd, arg);
5196 ret = wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
5200 thread = binder_get_thread(proc);
5201 if (thread == NULL) {
5207 case BINDER_WRITE_READ:
5208 ret = binder_ioctl_write_read(filp, cmd, arg, thread);
5212 case BINDER_SET_MAX_THREADS: {
5215 if (copy_from_user(&max_threads, ubuf,
5216 sizeof(max_threads))) {
5220 binder_inner_proc_lock(proc);
5221 proc->max_threads = max_threads;
5222 binder_inner_proc_unlock(proc);
5225 case BINDER_SET_CONTEXT_MGR_EXT: {
5226 struct flat_binder_object fbo;
5228 if (copy_from_user(&fbo, ubuf, sizeof(fbo))) {
5232 ret = binder_ioctl_set_ctx_mgr(filp, &fbo);
5237 case BINDER_SET_CONTEXT_MGR:
5238 ret = binder_ioctl_set_ctx_mgr(filp, NULL);
5242 case BINDER_THREAD_EXIT:
5243 binder_debug(BINDER_DEBUG_THREADS, "%d:%d exit\n",
5244 proc->pid, thread->pid);
5245 binder_thread_release(proc, thread);
5248 case BINDER_VERSION: {
5249 struct binder_version __user *ver = ubuf;
5251 if (size != sizeof(struct binder_version)) {
5255 if (put_user(BINDER_CURRENT_PROTOCOL_VERSION,
5256 &ver->protocol_version)) {
5262 case BINDER_GET_NODE_INFO_FOR_REF: {
5263 struct binder_node_info_for_ref info;
5265 if (copy_from_user(&info, ubuf, sizeof(info))) {
5270 ret = binder_ioctl_get_node_info_for_ref(proc, &info);
5274 if (copy_to_user(ubuf, &info, sizeof(info))) {
5281 case BINDER_GET_NODE_DEBUG_INFO: {
5282 struct binder_node_debug_info info;
5284 if (copy_from_user(&info, ubuf, sizeof(info))) {
5289 ret = binder_ioctl_get_node_debug_info(proc, &info);
5293 if (copy_to_user(ubuf, &info, sizeof(info))) {
5299 case BINDER_FREEZE: {
5300 struct binder_freeze_info info;
5301 struct binder_proc **target_procs = NULL, *target_proc;
5302 int target_procs_count = 0, i = 0;
5306 if (copy_from_user(&info, ubuf, sizeof(info))) {
5311 mutex_lock(&binder_procs_lock);
5312 hlist_for_each_entry(target_proc, &binder_procs, proc_node) {
5313 if (target_proc->pid == info.pid)
5314 target_procs_count++;
5317 if (target_procs_count == 0) {
5318 mutex_unlock(&binder_procs_lock);
5323 target_procs = kcalloc(target_procs_count,
5324 sizeof(struct binder_proc *),
5327 if (!target_procs) {
5328 mutex_unlock(&binder_procs_lock);
5333 hlist_for_each_entry(target_proc, &binder_procs, proc_node) {
5334 if (target_proc->pid != info.pid)
5337 binder_inner_proc_lock(target_proc);
5338 target_proc->tmp_ref++;
5339 binder_inner_proc_unlock(target_proc);
5341 target_procs[i++] = target_proc;
5343 mutex_unlock(&binder_procs_lock);
5345 for (i = 0; i < target_procs_count; i++) {
5347 ret = binder_ioctl_freeze(&info,
5350 binder_proc_dec_tmpref(target_procs[i]);
5353 kfree(target_procs);
5359 case BINDER_GET_FROZEN_INFO: {
5360 struct binder_frozen_status_info info;
5362 if (copy_from_user(&info, ubuf, sizeof(info))) {
5367 ret = binder_ioctl_get_freezer_info(&info);
5371 if (copy_to_user(ubuf, &info, sizeof(info))) {
5377 case BINDER_ENABLE_ONEWAY_SPAM_DETECTION: {
5380 if (copy_from_user(&enable, ubuf, sizeof(enable))) {
5384 binder_inner_proc_lock(proc);
5385 proc->oneway_spam_detection_enabled = (bool)enable;
5386 binder_inner_proc_unlock(proc);
5389 case BINDER_GET_EXTENDED_ERROR:
5390 ret = binder_ioctl_get_extended_error(thread, ubuf);
5401 thread->looper_need_return = false;
5402 wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
5403 if (ret && ret != -EINTR)
5404 pr_info("%d:%d ioctl %x %lx returned %d\n", proc->pid, current->pid, cmd, arg, ret);
5406 trace_binder_ioctl_done(ret);
5410 static void binder_vma_open(struct vm_area_struct *vma)
5412 struct binder_proc *proc = vma->vm_private_data;
5414 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
5415 "%d open vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
5416 proc->pid, vma->vm_start, vma->vm_end,
5417 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
5418 (unsigned long)pgprot_val(vma->vm_page_prot));
5421 static void binder_vma_close(struct vm_area_struct *vma)
5423 struct binder_proc *proc = vma->vm_private_data;
5425 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
5426 "%d close vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
5427 proc->pid, vma->vm_start, vma->vm_end,
5428 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
5429 (unsigned long)pgprot_val(vma->vm_page_prot));
5430 binder_alloc_vma_close(&proc->alloc);
5433 static vm_fault_t binder_vm_fault(struct vm_fault *vmf)
5435 return VM_FAULT_SIGBUS;
5438 static const struct vm_operations_struct binder_vm_ops = {
5439 .open = binder_vma_open,
5440 .close = binder_vma_close,
5441 .fault = binder_vm_fault,
5444 static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
5446 struct binder_proc *proc = filp->private_data;
5448 if (proc->tsk != current->group_leader)
5451 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
5452 "%s: %d %lx-%lx (%ld K) vma %lx pagep %lx\n",
5453 __func__, proc->pid, vma->vm_start, vma->vm_end,
5454 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
5455 (unsigned long)pgprot_val(vma->vm_page_prot));
5457 if (vma->vm_flags & FORBIDDEN_MMAP_FLAGS) {
5458 pr_err("%s: %d %lx-%lx %s failed %d\n", __func__,
5459 proc->pid, vma->vm_start, vma->vm_end, "bad vm_flags", -EPERM);
5462 vma->vm_flags |= VM_DONTCOPY | VM_MIXEDMAP;
5463 vma->vm_flags &= ~VM_MAYWRITE;
5465 vma->vm_ops = &binder_vm_ops;
5466 vma->vm_private_data = proc;
5468 return binder_alloc_mmap_handler(&proc->alloc, vma);
5471 static int binder_open(struct inode *nodp, struct file *filp)
5473 struct binder_proc *proc, *itr;
5474 struct binder_device *binder_dev;
5475 struct binderfs_info *info;
5476 struct dentry *binder_binderfs_dir_entry_proc = NULL;
5477 bool existing_pid = false;
5479 binder_debug(BINDER_DEBUG_OPEN_CLOSE, "%s: %d:%d\n", __func__,
5480 current->group_leader->pid, current->pid);
5482 proc = kzalloc(sizeof(*proc), GFP_KERNEL);
5485 spin_lock_init(&proc->inner_lock);
5486 spin_lock_init(&proc->outer_lock);
5487 get_task_struct(current->group_leader);
5488 proc->tsk = current->group_leader;
5489 proc->cred = get_cred(filp->f_cred);
5490 INIT_LIST_HEAD(&proc->todo);
5491 init_waitqueue_head(&proc->freeze_wait);
5492 proc->default_priority = task_nice(current);
5493 /* binderfs stashes devices in i_private */
5494 if (is_binderfs_device(nodp)) {
5495 binder_dev = nodp->i_private;
5496 info = nodp->i_sb->s_fs_info;
5497 binder_binderfs_dir_entry_proc = info->proc_log_dir;
5499 binder_dev = container_of(filp->private_data,
5500 struct binder_device, miscdev);
5502 refcount_inc(&binder_dev->ref);
5503 proc->context = &binder_dev->context;
5504 binder_alloc_init(&proc->alloc);
5506 binder_stats_created(BINDER_STAT_PROC);
5507 proc->pid = current->group_leader->pid;
5508 INIT_LIST_HEAD(&proc->delivered_death);
5509 INIT_LIST_HEAD(&proc->waiting_threads);
5510 filp->private_data = proc;
5512 mutex_lock(&binder_procs_lock);
5513 hlist_for_each_entry(itr, &binder_procs, proc_node) {
5514 if (itr->pid == proc->pid) {
5515 existing_pid = true;
5519 hlist_add_head(&proc->proc_node, &binder_procs);
5520 mutex_unlock(&binder_procs_lock);
5522 if (binder_debugfs_dir_entry_proc && !existing_pid) {
5525 snprintf(strbuf, sizeof(strbuf), "%u", proc->pid);
5527 * proc debug entries are shared between contexts.
5528 * Only create for the first PID to avoid debugfs log spamming
5529 * The printing code will anyway print all contexts for a given
5530 * PID so this is not a problem.
5532 proc->debugfs_entry = debugfs_create_file(strbuf, 0444,
5533 binder_debugfs_dir_entry_proc,
5534 (void *)(unsigned long)proc->pid,
5538 if (binder_binderfs_dir_entry_proc && !existing_pid) {
5540 struct dentry *binderfs_entry;
5542 snprintf(strbuf, sizeof(strbuf), "%u", proc->pid);
5544 * Similar to debugfs, the process specific log file is shared
5545 * between contexts. Only create for the first PID.
5546 * This is ok since same as debugfs, the log file will contain
5547 * information on all contexts of a given PID.
5549 binderfs_entry = binderfs_create_file(binder_binderfs_dir_entry_proc,
5550 strbuf, &proc_fops, (void *)(unsigned long)proc->pid);
5551 if (!IS_ERR(binderfs_entry)) {
5552 proc->binderfs_entry = binderfs_entry;
5556 error = PTR_ERR(binderfs_entry);
5557 pr_warn("Unable to create file %s in binderfs (error %d)\n",
5565 static int binder_flush(struct file *filp, fl_owner_t id)
5567 struct binder_proc *proc = filp->private_data;
5569 binder_defer_work(proc, BINDER_DEFERRED_FLUSH);
5574 static void binder_deferred_flush(struct binder_proc *proc)
5579 binder_inner_proc_lock(proc);
5580 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) {
5581 struct binder_thread *thread = rb_entry(n, struct binder_thread, rb_node);
5583 thread->looper_need_return = true;
5584 if (thread->looper & BINDER_LOOPER_STATE_WAITING) {
5585 wake_up_interruptible(&thread->wait);
5589 binder_inner_proc_unlock(proc);
5591 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
5592 "binder_flush: %d woke %d threads\n", proc->pid,
5596 static int binder_release(struct inode *nodp, struct file *filp)
5598 struct binder_proc *proc = filp->private_data;
5600 debugfs_remove(proc->debugfs_entry);
5602 if (proc->binderfs_entry) {
5603 binderfs_remove_file(proc->binderfs_entry);
5604 proc->binderfs_entry = NULL;
5607 binder_defer_work(proc, BINDER_DEFERRED_RELEASE);
5612 static int binder_node_release(struct binder_node *node, int refs)
5614 struct binder_ref *ref;
5616 struct binder_proc *proc = node->proc;
5618 binder_release_work(proc, &node->async_todo);
5620 binder_node_lock(node);
5621 binder_inner_proc_lock(proc);
5622 binder_dequeue_work_ilocked(&node->work);
5624 * The caller must have taken a temporary ref on the node,
5626 BUG_ON(!node->tmp_refs);
5627 if (hlist_empty(&node->refs) && node->tmp_refs == 1) {
5628 binder_inner_proc_unlock(proc);
5629 binder_node_unlock(node);
5630 binder_free_node(node);
5636 node->local_strong_refs = 0;
5637 node->local_weak_refs = 0;
5638 binder_inner_proc_unlock(proc);
5640 spin_lock(&binder_dead_nodes_lock);
5641 hlist_add_head(&node->dead_node, &binder_dead_nodes);
5642 spin_unlock(&binder_dead_nodes_lock);
5644 hlist_for_each_entry(ref, &node->refs, node_entry) {
5647 * Need the node lock to synchronize
5648 * with new notification requests and the
5649 * inner lock to synchronize with queued
5650 * death notifications.
5652 binder_inner_proc_lock(ref->proc);
5654 binder_inner_proc_unlock(ref->proc);
5660 BUG_ON(!list_empty(&ref->death->work.entry));
5661 ref->death->work.type = BINDER_WORK_DEAD_BINDER;
5662 binder_enqueue_work_ilocked(&ref->death->work,
5664 binder_wakeup_proc_ilocked(ref->proc);
5665 binder_inner_proc_unlock(ref->proc);
5668 binder_debug(BINDER_DEBUG_DEAD_BINDER,
5669 "node %d now dead, refs %d, death %d\n",
5670 node->debug_id, refs, death);
5671 binder_node_unlock(node);
5672 binder_put_node(node);
5677 static void binder_deferred_release(struct binder_proc *proc)
5679 struct binder_context *context = proc->context;
5681 int threads, nodes, incoming_refs, outgoing_refs, active_transactions;
5683 mutex_lock(&binder_procs_lock);
5684 hlist_del(&proc->proc_node);
5685 mutex_unlock(&binder_procs_lock);
5687 mutex_lock(&context->context_mgr_node_lock);
5688 if (context->binder_context_mgr_node &&
5689 context->binder_context_mgr_node->proc == proc) {
5690 binder_debug(BINDER_DEBUG_DEAD_BINDER,
5691 "%s: %d context_mgr_node gone\n",
5692 __func__, proc->pid);
5693 context->binder_context_mgr_node = NULL;
5695 mutex_unlock(&context->context_mgr_node_lock);
5696 binder_inner_proc_lock(proc);
5698 * Make sure proc stays alive after we
5699 * remove all the threads
5703 proc->is_dead = true;
5704 proc->is_frozen = false;
5705 proc->sync_recv = false;
5706 proc->async_recv = false;
5708 active_transactions = 0;
5709 while ((n = rb_first(&proc->threads))) {
5710 struct binder_thread *thread;
5712 thread = rb_entry(n, struct binder_thread, rb_node);
5713 binder_inner_proc_unlock(proc);
5715 active_transactions += binder_thread_release(proc, thread);
5716 binder_inner_proc_lock(proc);
5721 while ((n = rb_first(&proc->nodes))) {
5722 struct binder_node *node;
5724 node = rb_entry(n, struct binder_node, rb_node);
5727 * take a temporary ref on the node before
5728 * calling binder_node_release() which will either
5729 * kfree() the node or call binder_put_node()
5731 binder_inc_node_tmpref_ilocked(node);
5732 rb_erase(&node->rb_node, &proc->nodes);
5733 binder_inner_proc_unlock(proc);
5734 incoming_refs = binder_node_release(node, incoming_refs);
5735 binder_inner_proc_lock(proc);
5737 binder_inner_proc_unlock(proc);
5740 binder_proc_lock(proc);
5741 while ((n = rb_first(&proc->refs_by_desc))) {
5742 struct binder_ref *ref;
5744 ref = rb_entry(n, struct binder_ref, rb_node_desc);
5746 binder_cleanup_ref_olocked(ref);
5747 binder_proc_unlock(proc);
5748 binder_free_ref(ref);
5749 binder_proc_lock(proc);
5751 binder_proc_unlock(proc);
5753 binder_release_work(proc, &proc->todo);
5754 binder_release_work(proc, &proc->delivered_death);
5756 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
5757 "%s: %d threads %d, nodes %d (ref %d), refs %d, active transactions %d\n",
5758 __func__, proc->pid, threads, nodes, incoming_refs,
5759 outgoing_refs, active_transactions);
5761 binder_proc_dec_tmpref(proc);
5764 static void binder_deferred_func(struct work_struct *work)
5766 struct binder_proc *proc;
5771 mutex_lock(&binder_deferred_lock);
5772 if (!hlist_empty(&binder_deferred_list)) {
5773 proc = hlist_entry(binder_deferred_list.first,
5774 struct binder_proc, deferred_work_node);
5775 hlist_del_init(&proc->deferred_work_node);
5776 defer = proc->deferred_work;
5777 proc->deferred_work = 0;
5782 mutex_unlock(&binder_deferred_lock);
5784 if (defer & BINDER_DEFERRED_FLUSH)
5785 binder_deferred_flush(proc);
5787 if (defer & BINDER_DEFERRED_RELEASE)
5788 binder_deferred_release(proc); /* frees proc */
5791 static DECLARE_WORK(binder_deferred_work, binder_deferred_func);
5794 binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer)
5796 mutex_lock(&binder_deferred_lock);
5797 proc->deferred_work |= defer;
5798 if (hlist_unhashed(&proc->deferred_work_node)) {
5799 hlist_add_head(&proc->deferred_work_node,
5800 &binder_deferred_list);
5801 schedule_work(&binder_deferred_work);
5803 mutex_unlock(&binder_deferred_lock);
5806 static void print_binder_transaction_ilocked(struct seq_file *m,
5807 struct binder_proc *proc,
5809 struct binder_transaction *t)
5811 struct binder_proc *to_proc;
5812 struct binder_buffer *buffer = t->buffer;
5814 spin_lock(&t->lock);
5815 to_proc = t->to_proc;
5817 "%s %d: %pK from %d:%d to %d:%d code %x flags %x pri %ld r%d",
5818 prefix, t->debug_id, t,
5819 t->from ? t->from->proc->pid : 0,
5820 t->from ? t->from->pid : 0,
5821 to_proc ? to_proc->pid : 0,
5822 t->to_thread ? t->to_thread->pid : 0,
5823 t->code, t->flags, t->priority, t->need_reply);
5824 spin_unlock(&t->lock);
5826 if (proc != to_proc) {
5828 * Can only safely deref buffer if we are holding the
5829 * correct proc inner lock for this node
5835 if (buffer == NULL) {
5836 seq_puts(m, " buffer free\n");
5839 if (buffer->target_node)
5840 seq_printf(m, " node %d", buffer->target_node->debug_id);
5841 seq_printf(m, " size %zd:%zd data %pK\n",
5842 buffer->data_size, buffer->offsets_size,
5846 static void print_binder_work_ilocked(struct seq_file *m,
5847 struct binder_proc *proc,
5849 const char *transaction_prefix,
5850 struct binder_work *w)
5852 struct binder_node *node;
5853 struct binder_transaction *t;
5856 case BINDER_WORK_TRANSACTION:
5857 t = container_of(w, struct binder_transaction, work);
5858 print_binder_transaction_ilocked(
5859 m, proc, transaction_prefix, t);
5861 case BINDER_WORK_RETURN_ERROR: {
5862 struct binder_error *e = container_of(
5863 w, struct binder_error, work);
5865 seq_printf(m, "%stransaction error: %u\n",
5868 case BINDER_WORK_TRANSACTION_COMPLETE:
5869 seq_printf(m, "%stransaction complete\n", prefix);
5871 case BINDER_WORK_NODE:
5872 node = container_of(w, struct binder_node, work);
5873 seq_printf(m, "%snode work %d: u%016llx c%016llx\n",
5874 prefix, node->debug_id,
5875 (u64)node->ptr, (u64)node->cookie);
5877 case BINDER_WORK_DEAD_BINDER:
5878 seq_printf(m, "%shas dead binder\n", prefix);
5880 case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
5881 seq_printf(m, "%shas cleared dead binder\n", prefix);
5883 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION:
5884 seq_printf(m, "%shas cleared death notification\n", prefix);
5887 seq_printf(m, "%sunknown work: type %d\n", prefix, w->type);
5892 static void print_binder_thread_ilocked(struct seq_file *m,
5893 struct binder_thread *thread,
5896 struct binder_transaction *t;
5897 struct binder_work *w;
5898 size_t start_pos = m->count;
5901 seq_printf(m, " thread %d: l %02x need_return %d tr %d\n",
5902 thread->pid, thread->looper,
5903 thread->looper_need_return,
5904 atomic_read(&thread->tmp_ref));
5905 header_pos = m->count;
5906 t = thread->transaction_stack;
5908 if (t->from == thread) {
5909 print_binder_transaction_ilocked(m, thread->proc,
5910 " outgoing transaction", t);
5912 } else if (t->to_thread == thread) {
5913 print_binder_transaction_ilocked(m, thread->proc,
5914 " incoming transaction", t);
5917 print_binder_transaction_ilocked(m, thread->proc,
5918 " bad transaction", t);
5922 list_for_each_entry(w, &thread->todo, entry) {
5923 print_binder_work_ilocked(m, thread->proc, " ",
5924 " pending transaction", w);
5926 if (!print_always && m->count == header_pos)
5927 m->count = start_pos;
5930 static void print_binder_node_nilocked(struct seq_file *m,
5931 struct binder_node *node)
5933 struct binder_ref *ref;
5934 struct binder_work *w;
5938 hlist_for_each_entry(ref, &node->refs, node_entry)
5941 seq_printf(m, " node %d: u%016llx c%016llx hs %d hw %d ls %d lw %d is %d iw %d tr %d",
5942 node->debug_id, (u64)node->ptr, (u64)node->cookie,
5943 node->has_strong_ref, node->has_weak_ref,
5944 node->local_strong_refs, node->local_weak_refs,
5945 node->internal_strong_refs, count, node->tmp_refs);
5947 seq_puts(m, " proc");
5948 hlist_for_each_entry(ref, &node->refs, node_entry)
5949 seq_printf(m, " %d", ref->proc->pid);
5953 list_for_each_entry(w, &node->async_todo, entry)
5954 print_binder_work_ilocked(m, node->proc, " ",
5955 " pending async transaction", w);
5959 static void print_binder_ref_olocked(struct seq_file *m,
5960 struct binder_ref *ref)
5962 binder_node_lock(ref->node);
5963 seq_printf(m, " ref %d: desc %d %snode %d s %d w %d d %pK\n",
5964 ref->data.debug_id, ref->data.desc,
5965 ref->node->proc ? "" : "dead ",
5966 ref->node->debug_id, ref->data.strong,
5967 ref->data.weak, ref->death);
5968 binder_node_unlock(ref->node);
5971 static void print_binder_proc(struct seq_file *m,
5972 struct binder_proc *proc, int print_all)
5974 struct binder_work *w;
5976 size_t start_pos = m->count;
5978 struct binder_node *last_node = NULL;
5980 seq_printf(m, "proc %d\n", proc->pid);
5981 seq_printf(m, "context %s\n", proc->context->name);
5982 header_pos = m->count;
5984 binder_inner_proc_lock(proc);
5985 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
5986 print_binder_thread_ilocked(m, rb_entry(n, struct binder_thread,
5987 rb_node), print_all);
5989 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) {
5990 struct binder_node *node = rb_entry(n, struct binder_node,
5992 if (!print_all && !node->has_async_transaction)
5996 * take a temporary reference on the node so it
5997 * survives and isn't removed from the tree
5998 * while we print it.
6000 binder_inc_node_tmpref_ilocked(node);
6001 /* Need to drop inner lock to take node lock */
6002 binder_inner_proc_unlock(proc);
6004 binder_put_node(last_node);
6005 binder_node_inner_lock(node);
6006 print_binder_node_nilocked(m, node);
6007 binder_node_inner_unlock(node);
6009 binder_inner_proc_lock(proc);
6011 binder_inner_proc_unlock(proc);
6013 binder_put_node(last_node);
6016 binder_proc_lock(proc);
6017 for (n = rb_first(&proc->refs_by_desc);
6020 print_binder_ref_olocked(m, rb_entry(n,
6023 binder_proc_unlock(proc);
6025 binder_alloc_print_allocated(m, &proc->alloc);
6026 binder_inner_proc_lock(proc);
6027 list_for_each_entry(w, &proc->todo, entry)
6028 print_binder_work_ilocked(m, proc, " ",
6029 " pending transaction", w);
6030 list_for_each_entry(w, &proc->delivered_death, entry) {
6031 seq_puts(m, " has delivered dead binder\n");
6034 binder_inner_proc_unlock(proc);
6035 if (!print_all && m->count == header_pos)
6036 m->count = start_pos;
6039 static const char * const binder_return_strings[] = {
6044 "BR_ACQUIRE_RESULT",
6046 "BR_TRANSACTION_COMPLETE",
6051 "BR_ATTEMPT_ACQUIRE",
6056 "BR_CLEAR_DEATH_NOTIFICATION_DONE",
6059 "BR_ONEWAY_SPAM_SUSPECT",
6062 static const char * const binder_command_strings[] = {
6065 "BC_ACQUIRE_RESULT",
6073 "BC_ATTEMPT_ACQUIRE",
6074 "BC_REGISTER_LOOPER",
6077 "BC_REQUEST_DEATH_NOTIFICATION",
6078 "BC_CLEAR_DEATH_NOTIFICATION",
6079 "BC_DEAD_BINDER_DONE",
6080 "BC_TRANSACTION_SG",
6084 static const char * const binder_objstat_strings[] = {
6091 "transaction_complete"
6094 static void print_binder_stats(struct seq_file *m, const char *prefix,
6095 struct binder_stats *stats)
6099 BUILD_BUG_ON(ARRAY_SIZE(stats->bc) !=
6100 ARRAY_SIZE(binder_command_strings));
6101 for (i = 0; i < ARRAY_SIZE(stats->bc); i++) {
6102 int temp = atomic_read(&stats->bc[i]);
6105 seq_printf(m, "%s%s: %d\n", prefix,
6106 binder_command_strings[i], temp);
6109 BUILD_BUG_ON(ARRAY_SIZE(stats->br) !=
6110 ARRAY_SIZE(binder_return_strings));
6111 for (i = 0; i < ARRAY_SIZE(stats->br); i++) {
6112 int temp = atomic_read(&stats->br[i]);
6115 seq_printf(m, "%s%s: %d\n", prefix,
6116 binder_return_strings[i], temp);
6119 BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
6120 ARRAY_SIZE(binder_objstat_strings));
6121 BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
6122 ARRAY_SIZE(stats->obj_deleted));
6123 for (i = 0; i < ARRAY_SIZE(stats->obj_created); i++) {
6124 int created = atomic_read(&stats->obj_created[i]);
6125 int deleted = atomic_read(&stats->obj_deleted[i]);
6127 if (created || deleted)
6128 seq_printf(m, "%s%s: active %d total %d\n",
6130 binder_objstat_strings[i],
6136 static void print_binder_proc_stats(struct seq_file *m,
6137 struct binder_proc *proc)
6139 struct binder_work *w;
6140 struct binder_thread *thread;
6142 int count, strong, weak, ready_threads;
6143 size_t free_async_space =
6144 binder_alloc_get_free_async_space(&proc->alloc);
6146 seq_printf(m, "proc %d\n", proc->pid);
6147 seq_printf(m, "context %s\n", proc->context->name);
6150 binder_inner_proc_lock(proc);
6151 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
6154 list_for_each_entry(thread, &proc->waiting_threads, waiting_thread_node)
6157 seq_printf(m, " threads: %d\n", count);
6158 seq_printf(m, " requested threads: %d+%d/%d\n"
6159 " ready threads %d\n"
6160 " free async space %zd\n", proc->requested_threads,
6161 proc->requested_threads_started, proc->max_threads,
6165 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n))
6167 binder_inner_proc_unlock(proc);
6168 seq_printf(m, " nodes: %d\n", count);
6172 binder_proc_lock(proc);
6173 for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
6174 struct binder_ref *ref = rb_entry(n, struct binder_ref,
6177 strong += ref->data.strong;
6178 weak += ref->data.weak;
6180 binder_proc_unlock(proc);
6181 seq_printf(m, " refs: %d s %d w %d\n", count, strong, weak);
6183 count = binder_alloc_get_allocated_count(&proc->alloc);
6184 seq_printf(m, " buffers: %d\n", count);
6186 binder_alloc_print_pages(m, &proc->alloc);
6189 binder_inner_proc_lock(proc);
6190 list_for_each_entry(w, &proc->todo, entry) {
6191 if (w->type == BINDER_WORK_TRANSACTION)
6194 binder_inner_proc_unlock(proc);
6195 seq_printf(m, " pending transactions: %d\n", count);
6197 print_binder_stats(m, " ", &proc->stats);
6201 int binder_state_show(struct seq_file *m, void *unused)
6203 struct binder_proc *proc;
6204 struct binder_node *node;
6205 struct binder_node *last_node = NULL;
6207 seq_puts(m, "binder state:\n");
6209 spin_lock(&binder_dead_nodes_lock);
6210 if (!hlist_empty(&binder_dead_nodes))
6211 seq_puts(m, "dead nodes:\n");
6212 hlist_for_each_entry(node, &binder_dead_nodes, dead_node) {
6214 * take a temporary reference on the node so it
6215 * survives and isn't removed from the list
6216 * while we print it.
6219 spin_unlock(&binder_dead_nodes_lock);
6221 binder_put_node(last_node);
6222 binder_node_lock(node);
6223 print_binder_node_nilocked(m, node);
6224 binder_node_unlock(node);
6226 spin_lock(&binder_dead_nodes_lock);
6228 spin_unlock(&binder_dead_nodes_lock);
6230 binder_put_node(last_node);
6232 mutex_lock(&binder_procs_lock);
6233 hlist_for_each_entry(proc, &binder_procs, proc_node)
6234 print_binder_proc(m, proc, 1);
6235 mutex_unlock(&binder_procs_lock);
6240 int binder_stats_show(struct seq_file *m, void *unused)
6242 struct binder_proc *proc;
6244 seq_puts(m, "binder stats:\n");
6246 print_binder_stats(m, "", &binder_stats);
6248 mutex_lock(&binder_procs_lock);
6249 hlist_for_each_entry(proc, &binder_procs, proc_node)
6250 print_binder_proc_stats(m, proc);
6251 mutex_unlock(&binder_procs_lock);
6256 int binder_transactions_show(struct seq_file *m, void *unused)
6258 struct binder_proc *proc;
6260 seq_puts(m, "binder transactions:\n");
6261 mutex_lock(&binder_procs_lock);
6262 hlist_for_each_entry(proc, &binder_procs, proc_node)
6263 print_binder_proc(m, proc, 0);
6264 mutex_unlock(&binder_procs_lock);
6269 static int proc_show(struct seq_file *m, void *unused)
6271 struct binder_proc *itr;
6272 int pid = (unsigned long)m->private;
6274 mutex_lock(&binder_procs_lock);
6275 hlist_for_each_entry(itr, &binder_procs, proc_node) {
6276 if (itr->pid == pid) {
6277 seq_puts(m, "binder proc state:\n");
6278 print_binder_proc(m, itr, 1);
6281 mutex_unlock(&binder_procs_lock);
6286 static void print_binder_transaction_log_entry(struct seq_file *m,
6287 struct binder_transaction_log_entry *e)
6289 int debug_id = READ_ONCE(e->debug_id_done);
6291 * read barrier to guarantee debug_id_done read before
6292 * we print the log values
6296 "%d: %s from %d:%d to %d:%d context %s node %d handle %d size %d:%d ret %d/%d l=%d",
6297 e->debug_id, (e->call_type == 2) ? "reply" :
6298 ((e->call_type == 1) ? "async" : "call "), e->from_proc,
6299 e->from_thread, e->to_proc, e->to_thread, e->context_name,
6300 e->to_node, e->target_handle, e->data_size, e->offsets_size,
6301 e->return_error, e->return_error_param,
6302 e->return_error_line);
6304 * read-barrier to guarantee read of debug_id_done after
6305 * done printing the fields of the entry
6308 seq_printf(m, debug_id && debug_id == READ_ONCE(e->debug_id_done) ?
6309 "\n" : " (incomplete)\n");
6312 int binder_transaction_log_show(struct seq_file *m, void *unused)
6314 struct binder_transaction_log *log = m->private;
6315 unsigned int log_cur = atomic_read(&log->cur);
6320 count = log_cur + 1;
6321 cur = count < ARRAY_SIZE(log->entry) && !log->full ?
6322 0 : count % ARRAY_SIZE(log->entry);
6323 if (count > ARRAY_SIZE(log->entry) || log->full)
6324 count = ARRAY_SIZE(log->entry);
6325 for (i = 0; i < count; i++) {
6326 unsigned int index = cur++ % ARRAY_SIZE(log->entry);
6328 print_binder_transaction_log_entry(m, &log->entry[index]);
6333 const struct file_operations binder_fops = {
6334 .owner = THIS_MODULE,
6335 .poll = binder_poll,
6336 .unlocked_ioctl = binder_ioctl,
6337 .compat_ioctl = compat_ptr_ioctl,
6338 .mmap = binder_mmap,
6339 .open = binder_open,
6340 .flush = binder_flush,
6341 .release = binder_release,
6344 static int __init init_binder_device(const char *name)
6347 struct binder_device *binder_device;
6349 binder_device = kzalloc(sizeof(*binder_device), GFP_KERNEL);
6353 binder_device->miscdev.fops = &binder_fops;
6354 binder_device->miscdev.minor = MISC_DYNAMIC_MINOR;
6355 binder_device->miscdev.name = name;
6357 refcount_set(&binder_device->ref, 1);
6358 binder_device->context.binder_context_mgr_uid = INVALID_UID;
6359 binder_device->context.name = name;
6360 mutex_init(&binder_device->context.context_mgr_node_lock);
6362 ret = misc_register(&binder_device->miscdev);
6364 kfree(binder_device);
6368 hlist_add_head(&binder_device->hlist, &binder_devices);
6373 static int __init binder_init(void)
6376 char *device_name, *device_tmp;
6377 struct binder_device *device;
6378 struct hlist_node *tmp;
6379 char *device_names = NULL;
6381 ret = binder_alloc_shrinker_init();
6385 atomic_set(&binder_transaction_log.cur, ~0U);
6386 atomic_set(&binder_transaction_log_failed.cur, ~0U);
6388 binder_debugfs_dir_entry_root = debugfs_create_dir("binder", NULL);
6389 if (binder_debugfs_dir_entry_root)
6390 binder_debugfs_dir_entry_proc = debugfs_create_dir("proc",
6391 binder_debugfs_dir_entry_root);
6393 if (binder_debugfs_dir_entry_root) {
6394 debugfs_create_file("state",
6396 binder_debugfs_dir_entry_root,
6398 &binder_state_fops);
6399 debugfs_create_file("stats",
6401 binder_debugfs_dir_entry_root,
6403 &binder_stats_fops);
6404 debugfs_create_file("transactions",
6406 binder_debugfs_dir_entry_root,
6408 &binder_transactions_fops);
6409 debugfs_create_file("transaction_log",
6411 binder_debugfs_dir_entry_root,
6412 &binder_transaction_log,
6413 &binder_transaction_log_fops);
6414 debugfs_create_file("failed_transaction_log",
6416 binder_debugfs_dir_entry_root,
6417 &binder_transaction_log_failed,
6418 &binder_transaction_log_fops);
6421 if (!IS_ENABLED(CONFIG_ANDROID_BINDERFS) &&
6422 strcmp(binder_devices_param, "") != 0) {
6424 * Copy the module_parameter string, because we don't want to
6425 * tokenize it in-place.
6427 device_names = kstrdup(binder_devices_param, GFP_KERNEL);
6428 if (!device_names) {
6430 goto err_alloc_device_names_failed;
6433 device_tmp = device_names;
6434 while ((device_name = strsep(&device_tmp, ","))) {
6435 ret = init_binder_device(device_name);
6437 goto err_init_binder_device_failed;
6441 ret = init_binderfs();
6443 goto err_init_binder_device_failed;
6447 err_init_binder_device_failed:
6448 hlist_for_each_entry_safe(device, tmp, &binder_devices, hlist) {
6449 misc_deregister(&device->miscdev);
6450 hlist_del(&device->hlist);
6454 kfree(device_names);
6456 err_alloc_device_names_failed:
6457 debugfs_remove_recursive(binder_debugfs_dir_entry_root);
6462 device_initcall(binder_init);
6464 #define CREATE_TRACE_POINTS
6465 #include "binder_trace.h"
6467 MODULE_LICENSE("GPL v2");