1 // SPDX-License-Identifier: GPL-2.0-only
4 * Android IPC Subsystem
6 * Copyright (C) 2007-2008 Google, Inc.
12 * There are 3 main spinlocks which must be acquired in the
15 * 1) proc->outer_lock : protects binder_ref
16 * binder_proc_lock() and binder_proc_unlock() are
18 * 2) node->lock : protects most fields of binder_node.
19 * binder_node_lock() and binder_node_unlock() are
21 * 3) proc->inner_lock : protects the thread and node lists
22 * (proc->threads, proc->waiting_threads, proc->nodes)
23 * and all todo lists associated with the binder_proc
24 * (proc->todo, thread->todo, proc->delivered_death and
25 * node->async_todo), as well as thread->transaction_stack
26 * binder_inner_proc_lock() and binder_inner_proc_unlock()
29 * Any lock under procA must never be nested under any lock at the same
30 * level or below on procB.
32 * Functions that require a lock held on entry indicate which lock
33 * in the suffix of the function name:
35 * foo_olocked() : requires node->outer_lock
36 * foo_nlocked() : requires node->lock
37 * foo_ilocked() : requires proc->inner_lock
38 * foo_oilocked(): requires proc->outer_lock and proc->inner_lock
39 * foo_nilocked(): requires node->lock and proc->inner_lock
43 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
45 #include <linux/fdtable.h>
46 #include <linux/file.h>
47 #include <linux/freezer.h>
49 #include <linux/list.h>
50 #include <linux/miscdevice.h>
51 #include <linux/module.h>
52 #include <linux/mutex.h>
53 #include <linux/nsproxy.h>
54 #include <linux/poll.h>
55 #include <linux/debugfs.h>
56 #include <linux/rbtree.h>
57 #include <linux/sched/signal.h>
58 #include <linux/sched/mm.h>
59 #include <linux/seq_file.h>
60 #include <linux/string.h>
61 #include <linux/uaccess.h>
62 #include <linux/pid_namespace.h>
63 #include <linux/security.h>
64 #include <linux/spinlock.h>
65 #include <linux/ratelimit.h>
66 #include <linux/syscalls.h>
67 #include <linux/task_work.h>
68 #include <linux/sizes.h>
70 #include <uapi/linux/android/binder.h>
72 #include <asm/cacheflush.h>
74 #include "binder_internal.h"
75 #include "binder_trace.h"
77 static HLIST_HEAD(binder_deferred_list);
78 static DEFINE_MUTEX(binder_deferred_lock);
80 static HLIST_HEAD(binder_devices);
81 static HLIST_HEAD(binder_procs);
82 static DEFINE_MUTEX(binder_procs_lock);
84 static HLIST_HEAD(binder_dead_nodes);
85 static DEFINE_SPINLOCK(binder_dead_nodes_lock);
87 static struct dentry *binder_debugfs_dir_entry_root;
88 static struct dentry *binder_debugfs_dir_entry_proc;
89 static atomic_t binder_last_id;
91 static int proc_show(struct seq_file *m, void *unused);
92 DEFINE_SHOW_ATTRIBUTE(proc);
94 #define FORBIDDEN_MMAP_FLAGS (VM_WRITE)
97 BINDER_DEBUG_USER_ERROR = 1U << 0,
98 BINDER_DEBUG_FAILED_TRANSACTION = 1U << 1,
99 BINDER_DEBUG_DEAD_TRANSACTION = 1U << 2,
100 BINDER_DEBUG_OPEN_CLOSE = 1U << 3,
101 BINDER_DEBUG_DEAD_BINDER = 1U << 4,
102 BINDER_DEBUG_DEATH_NOTIFICATION = 1U << 5,
103 BINDER_DEBUG_READ_WRITE = 1U << 6,
104 BINDER_DEBUG_USER_REFS = 1U << 7,
105 BINDER_DEBUG_THREADS = 1U << 8,
106 BINDER_DEBUG_TRANSACTION = 1U << 9,
107 BINDER_DEBUG_TRANSACTION_COMPLETE = 1U << 10,
108 BINDER_DEBUG_FREE_BUFFER = 1U << 11,
109 BINDER_DEBUG_INTERNAL_REFS = 1U << 12,
110 BINDER_DEBUG_PRIORITY_CAP = 1U << 13,
111 BINDER_DEBUG_SPINLOCKS = 1U << 14,
113 static uint32_t binder_debug_mask = BINDER_DEBUG_USER_ERROR |
114 BINDER_DEBUG_FAILED_TRANSACTION | BINDER_DEBUG_DEAD_TRANSACTION;
115 module_param_named(debug_mask, binder_debug_mask, uint, 0644);
117 char *binder_devices_param = CONFIG_ANDROID_BINDER_DEVICES;
118 module_param_named(devices, binder_devices_param, charp, 0444);
120 static DECLARE_WAIT_QUEUE_HEAD(binder_user_error_wait);
121 static int binder_stop_on_user_error;
123 static int binder_set_stop_on_user_error(const char *val,
124 const struct kernel_param *kp)
128 ret = param_set_int(val, kp);
129 if (binder_stop_on_user_error < 2)
130 wake_up(&binder_user_error_wait);
133 module_param_call(stop_on_user_error, binder_set_stop_on_user_error,
134 param_get_int, &binder_stop_on_user_error, 0644);
136 #define binder_debug(mask, x...) \
138 if (binder_debug_mask & mask) \
139 pr_info_ratelimited(x); \
142 #define binder_user_error(x...) \
144 if (binder_debug_mask & BINDER_DEBUG_USER_ERROR) \
145 pr_info_ratelimited(x); \
146 if (binder_stop_on_user_error) \
147 binder_stop_on_user_error = 2; \
150 #define to_flat_binder_object(hdr) \
151 container_of(hdr, struct flat_binder_object, hdr)
153 #define to_binder_fd_object(hdr) container_of(hdr, struct binder_fd_object, hdr)
155 #define to_binder_buffer_object(hdr) \
156 container_of(hdr, struct binder_buffer_object, hdr)
158 #define to_binder_fd_array_object(hdr) \
159 container_of(hdr, struct binder_fd_array_object, hdr)
161 static struct binder_stats binder_stats;
163 static inline void binder_stats_deleted(enum binder_stat_types type)
165 atomic_inc(&binder_stats.obj_deleted[type]);
168 static inline void binder_stats_created(enum binder_stat_types type)
170 atomic_inc(&binder_stats.obj_created[type]);
173 struct binder_transaction_log_entry {
185 int return_error_line;
186 uint32_t return_error;
187 uint32_t return_error_param;
188 char context_name[BINDERFS_MAX_NAME + 1];
191 struct binder_transaction_log {
194 struct binder_transaction_log_entry entry[32];
197 static struct binder_transaction_log binder_transaction_log;
198 static struct binder_transaction_log binder_transaction_log_failed;
200 static struct binder_transaction_log_entry *binder_transaction_log_add(
201 struct binder_transaction_log *log)
203 struct binder_transaction_log_entry *e;
204 unsigned int cur = atomic_inc_return(&log->cur);
206 if (cur >= ARRAY_SIZE(log->entry))
208 e = &log->entry[cur % ARRAY_SIZE(log->entry)];
209 WRITE_ONCE(e->debug_id_done, 0);
211 * write-barrier to synchronize access to e->debug_id_done.
212 * We make sure the initialized 0 value is seen before
213 * memset() other fields are zeroed by memset.
216 memset(e, 0, sizeof(*e));
220 enum binder_deferred_state {
221 BINDER_DEFERRED_FLUSH = 0x01,
222 BINDER_DEFERRED_RELEASE = 0x02,
226 BINDER_LOOPER_STATE_REGISTERED = 0x01,
227 BINDER_LOOPER_STATE_ENTERED = 0x02,
228 BINDER_LOOPER_STATE_EXITED = 0x04,
229 BINDER_LOOPER_STATE_INVALID = 0x08,
230 BINDER_LOOPER_STATE_WAITING = 0x10,
231 BINDER_LOOPER_STATE_POLL = 0x20,
235 * binder_proc_lock() - Acquire outer lock for given binder_proc
236 * @proc: struct binder_proc to acquire
238 * Acquires proc->outer_lock. Used to protect binder_ref
239 * structures associated with the given proc.
241 #define binder_proc_lock(proc) _binder_proc_lock(proc, __LINE__)
243 _binder_proc_lock(struct binder_proc *proc, int line)
244 __acquires(&proc->outer_lock)
246 binder_debug(BINDER_DEBUG_SPINLOCKS,
247 "%s: line=%d\n", __func__, line);
248 spin_lock(&proc->outer_lock);
252 * binder_proc_unlock() - Release spinlock for given binder_proc
253 * @proc: struct binder_proc to acquire
255 * Release lock acquired via binder_proc_lock()
257 #define binder_proc_unlock(_proc) _binder_proc_unlock(_proc, __LINE__)
259 _binder_proc_unlock(struct binder_proc *proc, int line)
260 __releases(&proc->outer_lock)
262 binder_debug(BINDER_DEBUG_SPINLOCKS,
263 "%s: line=%d\n", __func__, line);
264 spin_unlock(&proc->outer_lock);
268 * binder_inner_proc_lock() - Acquire inner lock for given binder_proc
269 * @proc: struct binder_proc to acquire
271 * Acquires proc->inner_lock. Used to protect todo lists
273 #define binder_inner_proc_lock(proc) _binder_inner_proc_lock(proc, __LINE__)
275 _binder_inner_proc_lock(struct binder_proc *proc, int line)
276 __acquires(&proc->inner_lock)
278 binder_debug(BINDER_DEBUG_SPINLOCKS,
279 "%s: line=%d\n", __func__, line);
280 spin_lock(&proc->inner_lock);
284 * binder_inner_proc_unlock() - Release inner lock for given binder_proc
285 * @proc: struct binder_proc to acquire
287 * Release lock acquired via binder_inner_proc_lock()
289 #define binder_inner_proc_unlock(proc) _binder_inner_proc_unlock(proc, __LINE__)
291 _binder_inner_proc_unlock(struct binder_proc *proc, int line)
292 __releases(&proc->inner_lock)
294 binder_debug(BINDER_DEBUG_SPINLOCKS,
295 "%s: line=%d\n", __func__, line);
296 spin_unlock(&proc->inner_lock);
300 * binder_node_lock() - Acquire spinlock for given binder_node
301 * @node: struct binder_node to acquire
303 * Acquires node->lock. Used to protect binder_node fields
305 #define binder_node_lock(node) _binder_node_lock(node, __LINE__)
307 _binder_node_lock(struct binder_node *node, int line)
308 __acquires(&node->lock)
310 binder_debug(BINDER_DEBUG_SPINLOCKS,
311 "%s: line=%d\n", __func__, line);
312 spin_lock(&node->lock);
316 * binder_node_unlock() - Release spinlock for given binder_proc
317 * @node: struct binder_node to acquire
319 * Release lock acquired via binder_node_lock()
321 #define binder_node_unlock(node) _binder_node_unlock(node, __LINE__)
323 _binder_node_unlock(struct binder_node *node, int line)
324 __releases(&node->lock)
326 binder_debug(BINDER_DEBUG_SPINLOCKS,
327 "%s: line=%d\n", __func__, line);
328 spin_unlock(&node->lock);
332 * binder_node_inner_lock() - Acquire node and inner locks
333 * @node: struct binder_node to acquire
335 * Acquires node->lock. If node->proc also acquires
336 * proc->inner_lock. Used to protect binder_node fields
338 #define binder_node_inner_lock(node) _binder_node_inner_lock(node, __LINE__)
340 _binder_node_inner_lock(struct binder_node *node, int line)
341 __acquires(&node->lock) __acquires(&node->proc->inner_lock)
343 binder_debug(BINDER_DEBUG_SPINLOCKS,
344 "%s: line=%d\n", __func__, line);
345 spin_lock(&node->lock);
347 binder_inner_proc_lock(node->proc);
349 /* annotation for sparse */
350 __acquire(&node->proc->inner_lock);
354 * binder_node_unlock() - Release node and inner locks
355 * @node: struct binder_node to acquire
357 * Release lock acquired via binder_node_lock()
359 #define binder_node_inner_unlock(node) _binder_node_inner_unlock(node, __LINE__)
361 _binder_node_inner_unlock(struct binder_node *node, int line)
362 __releases(&node->lock) __releases(&node->proc->inner_lock)
364 struct binder_proc *proc = node->proc;
366 binder_debug(BINDER_DEBUG_SPINLOCKS,
367 "%s: line=%d\n", __func__, line);
369 binder_inner_proc_unlock(proc);
371 /* annotation for sparse */
372 __release(&node->proc->inner_lock);
373 spin_unlock(&node->lock);
376 static bool binder_worklist_empty_ilocked(struct list_head *list)
378 return list_empty(list);
382 * binder_worklist_empty() - Check if no items on the work list
383 * @proc: binder_proc associated with list
384 * @list: list to check
386 * Return: true if there are no items on list, else false
388 static bool binder_worklist_empty(struct binder_proc *proc,
389 struct list_head *list)
393 binder_inner_proc_lock(proc);
394 ret = binder_worklist_empty_ilocked(list);
395 binder_inner_proc_unlock(proc);
400 * binder_enqueue_work_ilocked() - Add an item to the work list
401 * @work: struct binder_work to add to list
402 * @target_list: list to add work to
404 * Adds the work to the specified list. Asserts that work
405 * is not already on a list.
407 * Requires the proc->inner_lock to be held.
410 binder_enqueue_work_ilocked(struct binder_work *work,
411 struct list_head *target_list)
413 BUG_ON(target_list == NULL);
414 BUG_ON(work->entry.next && !list_empty(&work->entry));
415 list_add_tail(&work->entry, target_list);
419 * binder_enqueue_deferred_thread_work_ilocked() - Add deferred thread work
420 * @thread: thread to queue work to
421 * @work: struct binder_work to add to list
423 * Adds the work to the todo list of the thread. Doesn't set the process_todo
424 * flag, which means that (if it wasn't already set) the thread will go to
425 * sleep without handling this work when it calls read.
427 * Requires the proc->inner_lock to be held.
430 binder_enqueue_deferred_thread_work_ilocked(struct binder_thread *thread,
431 struct binder_work *work)
433 WARN_ON(!list_empty(&thread->waiting_thread_node));
434 binder_enqueue_work_ilocked(work, &thread->todo);
438 * binder_enqueue_thread_work_ilocked() - Add an item to the thread work list
439 * @thread: thread to queue work to
440 * @work: struct binder_work to add to list
442 * Adds the work to the todo list of the thread, and enables processing
445 * Requires the proc->inner_lock to be held.
448 binder_enqueue_thread_work_ilocked(struct binder_thread *thread,
449 struct binder_work *work)
451 WARN_ON(!list_empty(&thread->waiting_thread_node));
452 binder_enqueue_work_ilocked(work, &thread->todo);
453 thread->process_todo = true;
457 * binder_enqueue_thread_work() - Add an item to the thread work list
458 * @thread: thread to queue work to
459 * @work: struct binder_work to add to list
461 * Adds the work to the todo list of the thread, and enables processing
465 binder_enqueue_thread_work(struct binder_thread *thread,
466 struct binder_work *work)
468 binder_inner_proc_lock(thread->proc);
469 binder_enqueue_thread_work_ilocked(thread, work);
470 binder_inner_proc_unlock(thread->proc);
474 binder_dequeue_work_ilocked(struct binder_work *work)
476 list_del_init(&work->entry);
480 * binder_dequeue_work() - Removes an item from the work list
481 * @proc: binder_proc associated with list
482 * @work: struct binder_work to remove from list
484 * Removes the specified work item from whatever list it is on.
485 * Can safely be called if work is not on any list.
488 binder_dequeue_work(struct binder_proc *proc, struct binder_work *work)
490 binder_inner_proc_lock(proc);
491 binder_dequeue_work_ilocked(work);
492 binder_inner_proc_unlock(proc);
495 static struct binder_work *binder_dequeue_work_head_ilocked(
496 struct list_head *list)
498 struct binder_work *w;
500 w = list_first_entry_or_null(list, struct binder_work, entry);
502 list_del_init(&w->entry);
507 binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer);
508 static void binder_free_thread(struct binder_thread *thread);
509 static void binder_free_proc(struct binder_proc *proc);
510 static void binder_inc_node_tmpref_ilocked(struct binder_node *node);
512 static bool binder_has_work_ilocked(struct binder_thread *thread,
515 return thread->process_todo ||
516 thread->looper_need_return ||
518 !binder_worklist_empty_ilocked(&thread->proc->todo));
521 static bool binder_has_work(struct binder_thread *thread, bool do_proc_work)
525 binder_inner_proc_lock(thread->proc);
526 has_work = binder_has_work_ilocked(thread, do_proc_work);
527 binder_inner_proc_unlock(thread->proc);
532 static bool binder_available_for_proc_work_ilocked(struct binder_thread *thread)
534 return !thread->transaction_stack &&
535 binder_worklist_empty_ilocked(&thread->todo) &&
536 (thread->looper & (BINDER_LOOPER_STATE_ENTERED |
537 BINDER_LOOPER_STATE_REGISTERED));
540 static void binder_wakeup_poll_threads_ilocked(struct binder_proc *proc,
544 struct binder_thread *thread;
546 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) {
547 thread = rb_entry(n, struct binder_thread, rb_node);
548 if (thread->looper & BINDER_LOOPER_STATE_POLL &&
549 binder_available_for_proc_work_ilocked(thread)) {
551 wake_up_interruptible_sync(&thread->wait);
553 wake_up_interruptible(&thread->wait);
559 * binder_select_thread_ilocked() - selects a thread for doing proc work.
560 * @proc: process to select a thread from
562 * Note that calling this function moves the thread off the waiting_threads
563 * list, so it can only be woken up by the caller of this function, or a
564 * signal. Therefore, callers *should* always wake up the thread this function
567 * Return: If there's a thread currently waiting for process work,
568 * returns that thread. Otherwise returns NULL.
570 static struct binder_thread *
571 binder_select_thread_ilocked(struct binder_proc *proc)
573 struct binder_thread *thread;
575 assert_spin_locked(&proc->inner_lock);
576 thread = list_first_entry_or_null(&proc->waiting_threads,
577 struct binder_thread,
578 waiting_thread_node);
581 list_del_init(&thread->waiting_thread_node);
587 * binder_wakeup_thread_ilocked() - wakes up a thread for doing proc work.
588 * @proc: process to wake up a thread in
589 * @thread: specific thread to wake-up (may be NULL)
590 * @sync: whether to do a synchronous wake-up
592 * This function wakes up a thread in the @proc process.
593 * The caller may provide a specific thread to wake-up in
594 * the @thread parameter. If @thread is NULL, this function
595 * will wake up threads that have called poll().
597 * Note that for this function to work as expected, callers
598 * should first call binder_select_thread() to find a thread
599 * to handle the work (if they don't have a thread already),
600 * and pass the result into the @thread parameter.
602 static void binder_wakeup_thread_ilocked(struct binder_proc *proc,
603 struct binder_thread *thread,
606 assert_spin_locked(&proc->inner_lock);
610 wake_up_interruptible_sync(&thread->wait);
612 wake_up_interruptible(&thread->wait);
616 /* Didn't find a thread waiting for proc work; this can happen
618 * 1. All threads are busy handling transactions
619 * In that case, one of those threads should call back into
620 * the kernel driver soon and pick up this work.
621 * 2. Threads are using the (e)poll interface, in which case
622 * they may be blocked on the waitqueue without having been
623 * added to waiting_threads. For this case, we just iterate
624 * over all threads not handling transaction work, and
625 * wake them all up. We wake all because we don't know whether
626 * a thread that called into (e)poll is handling non-binder
629 binder_wakeup_poll_threads_ilocked(proc, sync);
632 static void binder_wakeup_proc_ilocked(struct binder_proc *proc)
634 struct binder_thread *thread = binder_select_thread_ilocked(proc);
636 binder_wakeup_thread_ilocked(proc, thread, /* sync = */false);
639 static void binder_set_nice(long nice)
643 if (can_nice(current, nice)) {
644 set_user_nice(current, nice);
647 min_nice = rlimit_to_nice(rlimit(RLIMIT_NICE));
648 binder_debug(BINDER_DEBUG_PRIORITY_CAP,
649 "%d: nice value %ld not allowed use %ld instead\n",
650 current->pid, nice, min_nice);
651 set_user_nice(current, min_nice);
652 if (min_nice <= MAX_NICE)
654 binder_user_error("%d RLIMIT_NICE not set\n", current->pid);
657 static struct binder_node *binder_get_node_ilocked(struct binder_proc *proc,
658 binder_uintptr_t ptr)
660 struct rb_node *n = proc->nodes.rb_node;
661 struct binder_node *node;
663 assert_spin_locked(&proc->inner_lock);
666 node = rb_entry(n, struct binder_node, rb_node);
670 else if (ptr > node->ptr)
674 * take an implicit weak reference
675 * to ensure node stays alive until
676 * call to binder_put_node()
678 binder_inc_node_tmpref_ilocked(node);
685 static struct binder_node *binder_get_node(struct binder_proc *proc,
686 binder_uintptr_t ptr)
688 struct binder_node *node;
690 binder_inner_proc_lock(proc);
691 node = binder_get_node_ilocked(proc, ptr);
692 binder_inner_proc_unlock(proc);
696 static struct binder_node *binder_init_node_ilocked(
697 struct binder_proc *proc,
698 struct binder_node *new_node,
699 struct flat_binder_object *fp)
701 struct rb_node **p = &proc->nodes.rb_node;
702 struct rb_node *parent = NULL;
703 struct binder_node *node;
704 binder_uintptr_t ptr = fp ? fp->binder : 0;
705 binder_uintptr_t cookie = fp ? fp->cookie : 0;
706 __u32 flags = fp ? fp->flags : 0;
708 assert_spin_locked(&proc->inner_lock);
713 node = rb_entry(parent, struct binder_node, rb_node);
717 else if (ptr > node->ptr)
721 * A matching node is already in
722 * the rb tree. Abandon the init
725 binder_inc_node_tmpref_ilocked(node);
730 binder_stats_created(BINDER_STAT_NODE);
732 rb_link_node(&node->rb_node, parent, p);
733 rb_insert_color(&node->rb_node, &proc->nodes);
734 node->debug_id = atomic_inc_return(&binder_last_id);
737 node->cookie = cookie;
738 node->work.type = BINDER_WORK_NODE;
739 node->min_priority = flags & FLAT_BINDER_FLAG_PRIORITY_MASK;
740 node->accept_fds = !!(flags & FLAT_BINDER_FLAG_ACCEPTS_FDS);
741 node->txn_security_ctx = !!(flags & FLAT_BINDER_FLAG_TXN_SECURITY_CTX);
742 spin_lock_init(&node->lock);
743 INIT_LIST_HEAD(&node->work.entry);
744 INIT_LIST_HEAD(&node->async_todo);
745 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
746 "%d:%d node %d u%016llx c%016llx created\n",
747 proc->pid, current->pid, node->debug_id,
748 (u64)node->ptr, (u64)node->cookie);
753 static struct binder_node *binder_new_node(struct binder_proc *proc,
754 struct flat_binder_object *fp)
756 struct binder_node *node;
757 struct binder_node *new_node = kzalloc(sizeof(*node), GFP_KERNEL);
761 binder_inner_proc_lock(proc);
762 node = binder_init_node_ilocked(proc, new_node, fp);
763 binder_inner_proc_unlock(proc);
764 if (node != new_node)
766 * The node was already added by another thread
773 static void binder_free_node(struct binder_node *node)
776 binder_stats_deleted(BINDER_STAT_NODE);
779 static int binder_inc_node_nilocked(struct binder_node *node, int strong,
781 struct list_head *target_list)
783 struct binder_proc *proc = node->proc;
785 assert_spin_locked(&node->lock);
787 assert_spin_locked(&proc->inner_lock);
790 if (target_list == NULL &&
791 node->internal_strong_refs == 0 &&
793 node == node->proc->context->binder_context_mgr_node &&
794 node->has_strong_ref)) {
795 pr_err("invalid inc strong node for %d\n",
799 node->internal_strong_refs++;
801 node->local_strong_refs++;
802 if (!node->has_strong_ref && target_list) {
803 struct binder_thread *thread = container_of(target_list,
804 struct binder_thread, todo);
805 binder_dequeue_work_ilocked(&node->work);
806 BUG_ON(&thread->todo != target_list);
807 binder_enqueue_deferred_thread_work_ilocked(thread,
812 node->local_weak_refs++;
813 if (!node->has_weak_ref && list_empty(&node->work.entry)) {
814 if (target_list == NULL) {
815 pr_err("invalid inc weak node for %d\n",
822 binder_enqueue_work_ilocked(&node->work, target_list);
828 static int binder_inc_node(struct binder_node *node, int strong, int internal,
829 struct list_head *target_list)
833 binder_node_inner_lock(node);
834 ret = binder_inc_node_nilocked(node, strong, internal, target_list);
835 binder_node_inner_unlock(node);
840 static bool binder_dec_node_nilocked(struct binder_node *node,
841 int strong, int internal)
843 struct binder_proc *proc = node->proc;
845 assert_spin_locked(&node->lock);
847 assert_spin_locked(&proc->inner_lock);
850 node->internal_strong_refs--;
852 node->local_strong_refs--;
853 if (node->local_strong_refs || node->internal_strong_refs)
857 node->local_weak_refs--;
858 if (node->local_weak_refs || node->tmp_refs ||
859 !hlist_empty(&node->refs))
863 if (proc && (node->has_strong_ref || node->has_weak_ref)) {
864 if (list_empty(&node->work.entry)) {
865 binder_enqueue_work_ilocked(&node->work, &proc->todo);
866 binder_wakeup_proc_ilocked(proc);
869 if (hlist_empty(&node->refs) && !node->local_strong_refs &&
870 !node->local_weak_refs && !node->tmp_refs) {
872 binder_dequeue_work_ilocked(&node->work);
873 rb_erase(&node->rb_node, &proc->nodes);
874 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
875 "refless node %d deleted\n",
878 BUG_ON(!list_empty(&node->work.entry));
879 spin_lock(&binder_dead_nodes_lock);
881 * tmp_refs could have changed so
884 if (node->tmp_refs) {
885 spin_unlock(&binder_dead_nodes_lock);
888 hlist_del(&node->dead_node);
889 spin_unlock(&binder_dead_nodes_lock);
890 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
891 "dead node %d deleted\n",
900 static void binder_dec_node(struct binder_node *node, int strong, int internal)
904 binder_node_inner_lock(node);
905 free_node = binder_dec_node_nilocked(node, strong, internal);
906 binder_node_inner_unlock(node);
908 binder_free_node(node);
911 static void binder_inc_node_tmpref_ilocked(struct binder_node *node)
914 * No call to binder_inc_node() is needed since we
915 * don't need to inform userspace of any changes to
922 * binder_inc_node_tmpref() - take a temporary reference on node
923 * @node: node to reference
925 * Take reference on node to prevent the node from being freed
926 * while referenced only by a local variable. The inner lock is
927 * needed to serialize with the node work on the queue (which
928 * isn't needed after the node is dead). If the node is dead
929 * (node->proc is NULL), use binder_dead_nodes_lock to protect
930 * node->tmp_refs against dead-node-only cases where the node
931 * lock cannot be acquired (eg traversing the dead node list to
934 static void binder_inc_node_tmpref(struct binder_node *node)
936 binder_node_lock(node);
938 binder_inner_proc_lock(node->proc);
940 spin_lock(&binder_dead_nodes_lock);
941 binder_inc_node_tmpref_ilocked(node);
943 binder_inner_proc_unlock(node->proc);
945 spin_unlock(&binder_dead_nodes_lock);
946 binder_node_unlock(node);
950 * binder_dec_node_tmpref() - remove a temporary reference on node
951 * @node: node to reference
953 * Release temporary reference on node taken via binder_inc_node_tmpref()
955 static void binder_dec_node_tmpref(struct binder_node *node)
959 binder_node_inner_lock(node);
961 spin_lock(&binder_dead_nodes_lock);
963 __acquire(&binder_dead_nodes_lock);
965 BUG_ON(node->tmp_refs < 0);
967 spin_unlock(&binder_dead_nodes_lock);
969 __release(&binder_dead_nodes_lock);
971 * Call binder_dec_node() to check if all refcounts are 0
972 * and cleanup is needed. Calling with strong=0 and internal=1
973 * causes no actual reference to be released in binder_dec_node().
974 * If that changes, a change is needed here too.
976 free_node = binder_dec_node_nilocked(node, 0, 1);
977 binder_node_inner_unlock(node);
979 binder_free_node(node);
982 static void binder_put_node(struct binder_node *node)
984 binder_dec_node_tmpref(node);
987 static struct binder_ref *binder_get_ref_olocked(struct binder_proc *proc,
988 u32 desc, bool need_strong_ref)
990 struct rb_node *n = proc->refs_by_desc.rb_node;
991 struct binder_ref *ref;
994 ref = rb_entry(n, struct binder_ref, rb_node_desc);
996 if (desc < ref->data.desc) {
998 } else if (desc > ref->data.desc) {
1000 } else if (need_strong_ref && !ref->data.strong) {
1001 binder_user_error("tried to use weak ref as strong ref\n");
1011 * binder_get_ref_for_node_olocked() - get the ref associated with given node
1012 * @proc: binder_proc that owns the ref
1013 * @node: binder_node of target
1014 * @new_ref: newly allocated binder_ref to be initialized or %NULL
1016 * Look up the ref for the given node and return it if it exists
1018 * If it doesn't exist and the caller provides a newly allocated
1019 * ref, initialize the fields of the newly allocated ref and insert
1020 * into the given proc rb_trees and node refs list.
1022 * Return: the ref for node. It is possible that another thread
1023 * allocated/initialized the ref first in which case the
1024 * returned ref would be different than the passed-in
1025 * new_ref. new_ref must be kfree'd by the caller in
1028 static struct binder_ref *binder_get_ref_for_node_olocked(
1029 struct binder_proc *proc,
1030 struct binder_node *node,
1031 struct binder_ref *new_ref)
1033 struct binder_context *context = proc->context;
1034 struct rb_node **p = &proc->refs_by_node.rb_node;
1035 struct rb_node *parent = NULL;
1036 struct binder_ref *ref;
1041 ref = rb_entry(parent, struct binder_ref, rb_node_node);
1043 if (node < ref->node)
1045 else if (node > ref->node)
1046 p = &(*p)->rb_right;
1053 binder_stats_created(BINDER_STAT_REF);
1054 new_ref->data.debug_id = atomic_inc_return(&binder_last_id);
1055 new_ref->proc = proc;
1056 new_ref->node = node;
1057 rb_link_node(&new_ref->rb_node_node, parent, p);
1058 rb_insert_color(&new_ref->rb_node_node, &proc->refs_by_node);
1060 new_ref->data.desc = (node == context->binder_context_mgr_node) ? 0 : 1;
1061 for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
1062 ref = rb_entry(n, struct binder_ref, rb_node_desc);
1063 if (ref->data.desc > new_ref->data.desc)
1065 new_ref->data.desc = ref->data.desc + 1;
1068 p = &proc->refs_by_desc.rb_node;
1071 ref = rb_entry(parent, struct binder_ref, rb_node_desc);
1073 if (new_ref->data.desc < ref->data.desc)
1075 else if (new_ref->data.desc > ref->data.desc)
1076 p = &(*p)->rb_right;
1080 rb_link_node(&new_ref->rb_node_desc, parent, p);
1081 rb_insert_color(&new_ref->rb_node_desc, &proc->refs_by_desc);
1083 binder_node_lock(node);
1084 hlist_add_head(&new_ref->node_entry, &node->refs);
1086 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1087 "%d new ref %d desc %d for node %d\n",
1088 proc->pid, new_ref->data.debug_id, new_ref->data.desc,
1090 binder_node_unlock(node);
1094 static void binder_cleanup_ref_olocked(struct binder_ref *ref)
1096 bool delete_node = false;
1098 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1099 "%d delete ref %d desc %d for node %d\n",
1100 ref->proc->pid, ref->data.debug_id, ref->data.desc,
1101 ref->node->debug_id);
1103 rb_erase(&ref->rb_node_desc, &ref->proc->refs_by_desc);
1104 rb_erase(&ref->rb_node_node, &ref->proc->refs_by_node);
1106 binder_node_inner_lock(ref->node);
1107 if (ref->data.strong)
1108 binder_dec_node_nilocked(ref->node, 1, 1);
1110 hlist_del(&ref->node_entry);
1111 delete_node = binder_dec_node_nilocked(ref->node, 0, 1);
1112 binder_node_inner_unlock(ref->node);
1114 * Clear ref->node unless we want the caller to free the node
1118 * The caller uses ref->node to determine
1119 * whether the node needs to be freed. Clear
1120 * it since the node is still alive.
1126 binder_debug(BINDER_DEBUG_DEAD_BINDER,
1127 "%d delete ref %d desc %d has death notification\n",
1128 ref->proc->pid, ref->data.debug_id,
1130 binder_dequeue_work(ref->proc, &ref->death->work);
1131 binder_stats_deleted(BINDER_STAT_DEATH);
1133 binder_stats_deleted(BINDER_STAT_REF);
1137 * binder_inc_ref_olocked() - increment the ref for given handle
1138 * @ref: ref to be incremented
1139 * @strong: if true, strong increment, else weak
1140 * @target_list: list to queue node work on
1142 * Increment the ref. @ref->proc->outer_lock must be held on entry
1144 * Return: 0, if successful, else errno
1146 static int binder_inc_ref_olocked(struct binder_ref *ref, int strong,
1147 struct list_head *target_list)
1152 if (ref->data.strong == 0) {
1153 ret = binder_inc_node(ref->node, 1, 1, target_list);
1159 if (ref->data.weak == 0) {
1160 ret = binder_inc_node(ref->node, 0, 1, target_list);
1170 * binder_dec_ref() - dec the ref for given handle
1171 * @ref: ref to be decremented
1172 * @strong: if true, strong decrement, else weak
1174 * Decrement the ref.
1176 * Return: true if ref is cleaned up and ready to be freed
1178 static bool binder_dec_ref_olocked(struct binder_ref *ref, int strong)
1181 if (ref->data.strong == 0) {
1182 binder_user_error("%d invalid dec strong, ref %d desc %d s %d w %d\n",
1183 ref->proc->pid, ref->data.debug_id,
1184 ref->data.desc, ref->data.strong,
1189 if (ref->data.strong == 0)
1190 binder_dec_node(ref->node, strong, 1);
1192 if (ref->data.weak == 0) {
1193 binder_user_error("%d invalid dec weak, ref %d desc %d s %d w %d\n",
1194 ref->proc->pid, ref->data.debug_id,
1195 ref->data.desc, ref->data.strong,
1201 if (ref->data.strong == 0 && ref->data.weak == 0) {
1202 binder_cleanup_ref_olocked(ref);
1209 * binder_get_node_from_ref() - get the node from the given proc/desc
1210 * @proc: proc containing the ref
1211 * @desc: the handle associated with the ref
1212 * @need_strong_ref: if true, only return node if ref is strong
1213 * @rdata: the id/refcount data for the ref
1215 * Given a proc and ref handle, return the associated binder_node
1217 * Return: a binder_node or NULL if not found or not strong when strong required
1219 static struct binder_node *binder_get_node_from_ref(
1220 struct binder_proc *proc,
1221 u32 desc, bool need_strong_ref,
1222 struct binder_ref_data *rdata)
1224 struct binder_node *node;
1225 struct binder_ref *ref;
1227 binder_proc_lock(proc);
1228 ref = binder_get_ref_olocked(proc, desc, need_strong_ref);
1233 * Take an implicit reference on the node to ensure
1234 * it stays alive until the call to binder_put_node()
1236 binder_inc_node_tmpref(node);
1239 binder_proc_unlock(proc);
1244 binder_proc_unlock(proc);
1249 * binder_free_ref() - free the binder_ref
1252 * Free the binder_ref. Free the binder_node indicated by ref->node
1253 * (if non-NULL) and the binder_ref_death indicated by ref->death.
1255 static void binder_free_ref(struct binder_ref *ref)
1258 binder_free_node(ref->node);
1264 * binder_update_ref_for_handle() - inc/dec the ref for given handle
1265 * @proc: proc containing the ref
1266 * @desc: the handle associated with the ref
1267 * @increment: true=inc reference, false=dec reference
1268 * @strong: true=strong reference, false=weak reference
1269 * @rdata: the id/refcount data for the ref
1271 * Given a proc and ref handle, increment or decrement the ref
1272 * according to "increment" arg.
1274 * Return: 0 if successful, else errno
1276 static int binder_update_ref_for_handle(struct binder_proc *proc,
1277 uint32_t desc, bool increment, bool strong,
1278 struct binder_ref_data *rdata)
1281 struct binder_ref *ref;
1282 bool delete_ref = false;
1284 binder_proc_lock(proc);
1285 ref = binder_get_ref_olocked(proc, desc, strong);
1291 ret = binder_inc_ref_olocked(ref, strong, NULL);
1293 delete_ref = binder_dec_ref_olocked(ref, strong);
1297 binder_proc_unlock(proc);
1300 binder_free_ref(ref);
1304 binder_proc_unlock(proc);
1309 * binder_dec_ref_for_handle() - dec the ref for given handle
1310 * @proc: proc containing the ref
1311 * @desc: the handle associated with the ref
1312 * @strong: true=strong reference, false=weak reference
1313 * @rdata: the id/refcount data for the ref
1315 * Just calls binder_update_ref_for_handle() to decrement the ref.
1317 * Return: 0 if successful, else errno
1319 static int binder_dec_ref_for_handle(struct binder_proc *proc,
1320 uint32_t desc, bool strong, struct binder_ref_data *rdata)
1322 return binder_update_ref_for_handle(proc, desc, false, strong, rdata);
1327 * binder_inc_ref_for_node() - increment the ref for given proc/node
1328 * @proc: proc containing the ref
1329 * @node: target node
1330 * @strong: true=strong reference, false=weak reference
1331 * @target_list: worklist to use if node is incremented
1332 * @rdata: the id/refcount data for the ref
1334 * Given a proc and node, increment the ref. Create the ref if it
1335 * doesn't already exist
1337 * Return: 0 if successful, else errno
1339 static int binder_inc_ref_for_node(struct binder_proc *proc,
1340 struct binder_node *node,
1342 struct list_head *target_list,
1343 struct binder_ref_data *rdata)
1345 struct binder_ref *ref;
1346 struct binder_ref *new_ref = NULL;
1349 binder_proc_lock(proc);
1350 ref = binder_get_ref_for_node_olocked(proc, node, NULL);
1352 binder_proc_unlock(proc);
1353 new_ref = kzalloc(sizeof(*ref), GFP_KERNEL);
1356 binder_proc_lock(proc);
1357 ref = binder_get_ref_for_node_olocked(proc, node, new_ref);
1359 ret = binder_inc_ref_olocked(ref, strong, target_list);
1361 if (ret && ref == new_ref) {
1363 * Cleanup the failed reference here as the target
1364 * could now be dead and have already released its
1365 * references by now. Calling on the new reference
1366 * with strong=0 and a tmp_refs will not decrement
1367 * the node. The new_ref gets kfree'd below.
1369 binder_cleanup_ref_olocked(new_ref);
1373 binder_proc_unlock(proc);
1374 if (new_ref && ref != new_ref)
1376 * Another thread created the ref first so
1377 * free the one we allocated
1383 static void binder_pop_transaction_ilocked(struct binder_thread *target_thread,
1384 struct binder_transaction *t)
1386 BUG_ON(!target_thread);
1387 assert_spin_locked(&target_thread->proc->inner_lock);
1388 BUG_ON(target_thread->transaction_stack != t);
1389 BUG_ON(target_thread->transaction_stack->from != target_thread);
1390 target_thread->transaction_stack =
1391 target_thread->transaction_stack->from_parent;
1396 * binder_thread_dec_tmpref() - decrement thread->tmp_ref
1397 * @thread: thread to decrement
1399 * A thread needs to be kept alive while being used to create or
1400 * handle a transaction. binder_get_txn_from() is used to safely
1401 * extract t->from from a binder_transaction and keep the thread
1402 * indicated by t->from from being freed. When done with that
1403 * binder_thread, this function is called to decrement the
1404 * tmp_ref and free if appropriate (thread has been released
1405 * and no transaction being processed by the driver)
1407 static void binder_thread_dec_tmpref(struct binder_thread *thread)
1410 * atomic is used to protect the counter value while
1411 * it cannot reach zero or thread->is_dead is false
1413 binder_inner_proc_lock(thread->proc);
1414 atomic_dec(&thread->tmp_ref);
1415 if (thread->is_dead && !atomic_read(&thread->tmp_ref)) {
1416 binder_inner_proc_unlock(thread->proc);
1417 binder_free_thread(thread);
1420 binder_inner_proc_unlock(thread->proc);
1424 * binder_proc_dec_tmpref() - decrement proc->tmp_ref
1425 * @proc: proc to decrement
1427 * A binder_proc needs to be kept alive while being used to create or
1428 * handle a transaction. proc->tmp_ref is incremented when
1429 * creating a new transaction or the binder_proc is currently in-use
1430 * by threads that are being released. When done with the binder_proc,
1431 * this function is called to decrement the counter and free the
1432 * proc if appropriate (proc has been released, all threads have
1433 * been released and not currenly in-use to process a transaction).
1435 static void binder_proc_dec_tmpref(struct binder_proc *proc)
1437 binder_inner_proc_lock(proc);
1439 if (proc->is_dead && RB_EMPTY_ROOT(&proc->threads) &&
1441 binder_inner_proc_unlock(proc);
1442 binder_free_proc(proc);
1445 binder_inner_proc_unlock(proc);
1449 * binder_get_txn_from() - safely extract the "from" thread in transaction
1450 * @t: binder transaction for t->from
1452 * Atomically return the "from" thread and increment the tmp_ref
1453 * count for the thread to ensure it stays alive until
1454 * binder_thread_dec_tmpref() is called.
1456 * Return: the value of t->from
1458 static struct binder_thread *binder_get_txn_from(
1459 struct binder_transaction *t)
1461 struct binder_thread *from;
1463 spin_lock(&t->lock);
1466 atomic_inc(&from->tmp_ref);
1467 spin_unlock(&t->lock);
1472 * binder_get_txn_from_and_acq_inner() - get t->from and acquire inner lock
1473 * @t: binder transaction for t->from
1475 * Same as binder_get_txn_from() except it also acquires the proc->inner_lock
1476 * to guarantee that the thread cannot be released while operating on it.
1477 * The caller must call binder_inner_proc_unlock() to release the inner lock
1478 * as well as call binder_dec_thread_txn() to release the reference.
1480 * Return: the value of t->from
1482 static struct binder_thread *binder_get_txn_from_and_acq_inner(
1483 struct binder_transaction *t)
1484 __acquires(&t->from->proc->inner_lock)
1486 struct binder_thread *from;
1488 from = binder_get_txn_from(t);
1490 __acquire(&from->proc->inner_lock);
1493 binder_inner_proc_lock(from->proc);
1495 BUG_ON(from != t->from);
1498 binder_inner_proc_unlock(from->proc);
1499 __acquire(&from->proc->inner_lock);
1500 binder_thread_dec_tmpref(from);
1505 * binder_free_txn_fixups() - free unprocessed fd fixups
1506 * @t: binder transaction for t->from
1508 * If the transaction is being torn down prior to being
1509 * processed by the target process, free all of the
1510 * fd fixups and fput the file structs. It is safe to
1511 * call this function after the fixups have been
1512 * processed -- in that case, the list will be empty.
1514 static void binder_free_txn_fixups(struct binder_transaction *t)
1516 struct binder_txn_fd_fixup *fixup, *tmp;
1518 list_for_each_entry_safe(fixup, tmp, &t->fd_fixups, fixup_entry) {
1520 list_del(&fixup->fixup_entry);
1525 static void binder_txn_latency_free(struct binder_transaction *t)
1527 int from_proc, from_thread, to_proc, to_thread;
1529 spin_lock(&t->lock);
1530 from_proc = t->from ? t->from->proc->pid : 0;
1531 from_thread = t->from ? t->from->pid : 0;
1532 to_proc = t->to_proc ? t->to_proc->pid : 0;
1533 to_thread = t->to_thread ? t->to_thread->pid : 0;
1534 spin_unlock(&t->lock);
1536 trace_binder_txn_latency_free(t, from_proc, from_thread, to_proc, to_thread);
1539 static void binder_free_transaction(struct binder_transaction *t)
1541 struct binder_proc *target_proc = t->to_proc;
1544 binder_inner_proc_lock(target_proc);
1545 target_proc->outstanding_txns--;
1546 if (target_proc->outstanding_txns < 0)
1547 pr_warn("%s: Unexpected outstanding_txns %d\n",
1548 __func__, target_proc->outstanding_txns);
1549 if (!target_proc->outstanding_txns && target_proc->is_frozen)
1550 wake_up_interruptible_all(&target_proc->freeze_wait);
1552 t->buffer->transaction = NULL;
1553 binder_inner_proc_unlock(target_proc);
1555 if (trace_binder_txn_latency_free_enabled())
1556 binder_txn_latency_free(t);
1558 * If the transaction has no target_proc, then
1559 * t->buffer->transaction has already been cleared.
1561 binder_free_txn_fixups(t);
1563 binder_stats_deleted(BINDER_STAT_TRANSACTION);
1566 static void binder_send_failed_reply(struct binder_transaction *t,
1567 uint32_t error_code)
1569 struct binder_thread *target_thread;
1570 struct binder_transaction *next;
1572 BUG_ON(t->flags & TF_ONE_WAY);
1574 target_thread = binder_get_txn_from_and_acq_inner(t);
1575 if (target_thread) {
1576 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
1577 "send failed reply for transaction %d to %d:%d\n",
1579 target_thread->proc->pid,
1580 target_thread->pid);
1582 binder_pop_transaction_ilocked(target_thread, t);
1583 if (target_thread->reply_error.cmd == BR_OK) {
1584 target_thread->reply_error.cmd = error_code;
1585 binder_enqueue_thread_work_ilocked(
1587 &target_thread->reply_error.work);
1588 wake_up_interruptible(&target_thread->wait);
1591 * Cannot get here for normal operation, but
1592 * we can if multiple synchronous transactions
1593 * are sent without blocking for responses.
1594 * Just ignore the 2nd error in this case.
1596 pr_warn("Unexpected reply error: %u\n",
1597 target_thread->reply_error.cmd);
1599 binder_inner_proc_unlock(target_thread->proc);
1600 binder_thread_dec_tmpref(target_thread);
1601 binder_free_transaction(t);
1604 __release(&target_thread->proc->inner_lock);
1605 next = t->from_parent;
1607 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
1608 "send failed reply for transaction %d, target dead\n",
1611 binder_free_transaction(t);
1613 binder_debug(BINDER_DEBUG_DEAD_BINDER,
1614 "reply failed, no target thread at root\n");
1618 binder_debug(BINDER_DEBUG_DEAD_BINDER,
1619 "reply failed, no target thread -- retry %d\n",
1625 * binder_cleanup_transaction() - cleans up undelivered transaction
1626 * @t: transaction that needs to be cleaned up
1627 * @reason: reason the transaction wasn't delivered
1628 * @error_code: error to return to caller (if synchronous call)
1630 static void binder_cleanup_transaction(struct binder_transaction *t,
1632 uint32_t error_code)
1634 if (t->buffer->target_node && !(t->flags & TF_ONE_WAY)) {
1635 binder_send_failed_reply(t, error_code);
1637 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
1638 "undelivered transaction %d, %s\n",
1639 t->debug_id, reason);
1640 binder_free_transaction(t);
1645 * binder_get_object() - gets object and checks for valid metadata
1646 * @proc: binder_proc owning the buffer
1647 * @u: sender's user pointer to base of buffer
1648 * @buffer: binder_buffer that we're parsing.
1649 * @offset: offset in the @buffer at which to validate an object.
1650 * @object: struct binder_object to read into
1652 * Copy the binder object at the given offset into @object. If @u is
1653 * provided then the copy is from the sender's buffer. If not, then
1654 * it is copied from the target's @buffer.
1656 * Return: If there's a valid metadata object at @offset, the
1657 * size of that object. Otherwise, it returns zero. The object
1658 * is read into the struct binder_object pointed to by @object.
1660 static size_t binder_get_object(struct binder_proc *proc,
1661 const void __user *u,
1662 struct binder_buffer *buffer,
1663 unsigned long offset,
1664 struct binder_object *object)
1667 struct binder_object_header *hdr;
1668 size_t object_size = 0;
1670 read_size = min_t(size_t, sizeof(*object), buffer->data_size - offset);
1671 if (offset > buffer->data_size || read_size < sizeof(*hdr))
1674 if (copy_from_user(object, u + offset, read_size))
1677 if (binder_alloc_copy_from_buffer(&proc->alloc, object, buffer,
1682 /* Ok, now see if we read a complete object. */
1684 switch (hdr->type) {
1685 case BINDER_TYPE_BINDER:
1686 case BINDER_TYPE_WEAK_BINDER:
1687 case BINDER_TYPE_HANDLE:
1688 case BINDER_TYPE_WEAK_HANDLE:
1689 object_size = sizeof(struct flat_binder_object);
1691 case BINDER_TYPE_FD:
1692 object_size = sizeof(struct binder_fd_object);
1694 case BINDER_TYPE_PTR:
1695 object_size = sizeof(struct binder_buffer_object);
1697 case BINDER_TYPE_FDA:
1698 object_size = sizeof(struct binder_fd_array_object);
1703 if (offset <= buffer->data_size - object_size &&
1704 buffer->data_size >= object_size)
1711 * binder_validate_ptr() - validates binder_buffer_object in a binder_buffer.
1712 * @proc: binder_proc owning the buffer
1713 * @b: binder_buffer containing the object
1714 * @object: struct binder_object to read into
1715 * @index: index in offset array at which the binder_buffer_object is
1717 * @start_offset: points to the start of the offset array
1718 * @object_offsetp: offset of @object read from @b
1719 * @num_valid: the number of valid offsets in the offset array
1721 * Return: If @index is within the valid range of the offset array
1722 * described by @start and @num_valid, and if there's a valid
1723 * binder_buffer_object at the offset found in index @index
1724 * of the offset array, that object is returned. Otherwise,
1725 * %NULL is returned.
1726 * Note that the offset found in index @index itself is not
1727 * verified; this function assumes that @num_valid elements
1728 * from @start were previously verified to have valid offsets.
1729 * If @object_offsetp is non-NULL, then the offset within
1730 * @b is written to it.
1732 static struct binder_buffer_object *binder_validate_ptr(
1733 struct binder_proc *proc,
1734 struct binder_buffer *b,
1735 struct binder_object *object,
1736 binder_size_t index,
1737 binder_size_t start_offset,
1738 binder_size_t *object_offsetp,
1739 binder_size_t num_valid)
1742 binder_size_t object_offset;
1743 unsigned long buffer_offset;
1745 if (index >= num_valid)
1748 buffer_offset = start_offset + sizeof(binder_size_t) * index;
1749 if (binder_alloc_copy_from_buffer(&proc->alloc, &object_offset,
1751 sizeof(object_offset)))
1753 object_size = binder_get_object(proc, NULL, b, object_offset, object);
1754 if (!object_size || object->hdr.type != BINDER_TYPE_PTR)
1757 *object_offsetp = object_offset;
1759 return &object->bbo;
1763 * binder_validate_fixup() - validates pointer/fd fixups happen in order.
1764 * @proc: binder_proc owning the buffer
1765 * @b: transaction buffer
1766 * @objects_start_offset: offset to start of objects buffer
1767 * @buffer_obj_offset: offset to binder_buffer_object in which to fix up
1768 * @fixup_offset: start offset in @buffer to fix up
1769 * @last_obj_offset: offset to last binder_buffer_object that we fixed
1770 * @last_min_offset: minimum fixup offset in object at @last_obj_offset
1772 * Return: %true if a fixup in buffer @buffer at offset @offset is
1775 * For safety reasons, we only allow fixups inside a buffer to happen
1776 * at increasing offsets; additionally, we only allow fixup on the last
1777 * buffer object that was verified, or one of its parents.
1779 * Example of what is allowed:
1782 * B (parent = A, offset = 0)
1783 * C (parent = A, offset = 16)
1784 * D (parent = C, offset = 0)
1785 * E (parent = A, offset = 32) // min_offset is 16 (C.parent_offset)
1787 * Examples of what is not allowed:
1789 * Decreasing offsets within the same parent:
1791 * C (parent = A, offset = 16)
1792 * B (parent = A, offset = 0) // decreasing offset within A
1794 * Referring to a parent that wasn't the last object or any of its parents:
1796 * B (parent = A, offset = 0)
1797 * C (parent = A, offset = 0)
1798 * C (parent = A, offset = 16)
1799 * D (parent = B, offset = 0) // B is not A or any of A's parents
1801 static bool binder_validate_fixup(struct binder_proc *proc,
1802 struct binder_buffer *b,
1803 binder_size_t objects_start_offset,
1804 binder_size_t buffer_obj_offset,
1805 binder_size_t fixup_offset,
1806 binder_size_t last_obj_offset,
1807 binder_size_t last_min_offset)
1809 if (!last_obj_offset) {
1810 /* Nothing to fix up in */
1814 while (last_obj_offset != buffer_obj_offset) {
1815 unsigned long buffer_offset;
1816 struct binder_object last_object;
1817 struct binder_buffer_object *last_bbo;
1818 size_t object_size = binder_get_object(proc, NULL, b,
1821 if (object_size != sizeof(*last_bbo))
1824 last_bbo = &last_object.bbo;
1826 * Safe to retrieve the parent of last_obj, since it
1827 * was already previously verified by the driver.
1829 if ((last_bbo->flags & BINDER_BUFFER_FLAG_HAS_PARENT) == 0)
1831 last_min_offset = last_bbo->parent_offset + sizeof(uintptr_t);
1832 buffer_offset = objects_start_offset +
1833 sizeof(binder_size_t) * last_bbo->parent;
1834 if (binder_alloc_copy_from_buffer(&proc->alloc,
1837 sizeof(last_obj_offset)))
1840 return (fixup_offset >= last_min_offset);
1844 * struct binder_task_work_cb - for deferred close
1846 * @twork: callback_head for task work
1849 * Structure to pass task work to be handled after
1850 * returning from binder_ioctl() via task_work_add().
1852 struct binder_task_work_cb {
1853 struct callback_head twork;
1858 * binder_do_fd_close() - close list of file descriptors
1859 * @twork: callback head for task work
1861 * It is not safe to call ksys_close() during the binder_ioctl()
1862 * function if there is a chance that binder's own file descriptor
1863 * might be closed. This is to meet the requirements for using
1864 * fdget() (see comments for __fget_light()). Therefore use
1865 * task_work_add() to schedule the close operation once we have
1866 * returned from binder_ioctl(). This function is a callback
1867 * for that mechanism and does the actual ksys_close() on the
1868 * given file descriptor.
1870 static void binder_do_fd_close(struct callback_head *twork)
1872 struct binder_task_work_cb *twcb = container_of(twork,
1873 struct binder_task_work_cb, twork);
1880 * binder_deferred_fd_close() - schedule a close for the given file-descriptor
1881 * @fd: file-descriptor to close
1883 * See comments in binder_do_fd_close(). This function is used to schedule
1884 * a file-descriptor to be closed after returning from binder_ioctl().
1886 static void binder_deferred_fd_close(int fd)
1888 struct binder_task_work_cb *twcb;
1890 twcb = kzalloc(sizeof(*twcb), GFP_KERNEL);
1893 init_task_work(&twcb->twork, binder_do_fd_close);
1894 close_fd_get_file(fd, &twcb->file);
1896 filp_close(twcb->file, current->files);
1897 task_work_add(current, &twcb->twork, TWA_RESUME);
1903 static void binder_transaction_buffer_release(struct binder_proc *proc,
1904 struct binder_thread *thread,
1905 struct binder_buffer *buffer,
1906 binder_size_t failed_at,
1909 int debug_id = buffer->debug_id;
1910 binder_size_t off_start_offset, buffer_offset, off_end_offset;
1912 binder_debug(BINDER_DEBUG_TRANSACTION,
1913 "%d buffer release %d, size %zd-%zd, failed at %llx\n",
1914 proc->pid, buffer->debug_id,
1915 buffer->data_size, buffer->offsets_size,
1916 (unsigned long long)failed_at);
1918 if (buffer->target_node)
1919 binder_dec_node(buffer->target_node, 1, 0);
1921 off_start_offset = ALIGN(buffer->data_size, sizeof(void *));
1922 off_end_offset = is_failure && failed_at ? failed_at :
1923 off_start_offset + buffer->offsets_size;
1924 for (buffer_offset = off_start_offset; buffer_offset < off_end_offset;
1925 buffer_offset += sizeof(binder_size_t)) {
1926 struct binder_object_header *hdr;
1927 size_t object_size = 0;
1928 struct binder_object object;
1929 binder_size_t object_offset;
1931 if (!binder_alloc_copy_from_buffer(&proc->alloc, &object_offset,
1932 buffer, buffer_offset,
1933 sizeof(object_offset)))
1934 object_size = binder_get_object(proc, NULL, buffer,
1935 object_offset, &object);
1936 if (object_size == 0) {
1937 pr_err("transaction release %d bad object at offset %lld, size %zd\n",
1938 debug_id, (u64)object_offset, buffer->data_size);
1942 switch (hdr->type) {
1943 case BINDER_TYPE_BINDER:
1944 case BINDER_TYPE_WEAK_BINDER: {
1945 struct flat_binder_object *fp;
1946 struct binder_node *node;
1948 fp = to_flat_binder_object(hdr);
1949 node = binder_get_node(proc, fp->binder);
1951 pr_err("transaction release %d bad node %016llx\n",
1952 debug_id, (u64)fp->binder);
1955 binder_debug(BINDER_DEBUG_TRANSACTION,
1956 " node %d u%016llx\n",
1957 node->debug_id, (u64)node->ptr);
1958 binder_dec_node(node, hdr->type == BINDER_TYPE_BINDER,
1960 binder_put_node(node);
1962 case BINDER_TYPE_HANDLE:
1963 case BINDER_TYPE_WEAK_HANDLE: {
1964 struct flat_binder_object *fp;
1965 struct binder_ref_data rdata;
1968 fp = to_flat_binder_object(hdr);
1969 ret = binder_dec_ref_for_handle(proc, fp->handle,
1970 hdr->type == BINDER_TYPE_HANDLE, &rdata);
1973 pr_err("transaction release %d bad handle %d, ret = %d\n",
1974 debug_id, fp->handle, ret);
1977 binder_debug(BINDER_DEBUG_TRANSACTION,
1978 " ref %d desc %d\n",
1979 rdata.debug_id, rdata.desc);
1982 case BINDER_TYPE_FD: {
1984 * No need to close the file here since user-space
1985 * closes it for for successfully delivered
1986 * transactions. For transactions that weren't
1987 * delivered, the new fd was never allocated so
1988 * there is no need to close and the fput on the
1989 * file is done when the transaction is torn
1993 case BINDER_TYPE_PTR:
1995 * Nothing to do here, this will get cleaned up when the
1996 * transaction buffer gets freed
1999 case BINDER_TYPE_FDA: {
2000 struct binder_fd_array_object *fda;
2001 struct binder_buffer_object *parent;
2002 struct binder_object ptr_object;
2003 binder_size_t fda_offset;
2005 binder_size_t fd_buf_size;
2006 binder_size_t num_valid;
2010 * The fd fixups have not been applied so no
2011 * fds need to be closed.
2016 num_valid = (buffer_offset - off_start_offset) /
2017 sizeof(binder_size_t);
2018 fda = to_binder_fd_array_object(hdr);
2019 parent = binder_validate_ptr(proc, buffer, &ptr_object,
2025 pr_err("transaction release %d bad parent offset\n",
2029 fd_buf_size = sizeof(u32) * fda->num_fds;
2030 if (fda->num_fds >= SIZE_MAX / sizeof(u32)) {
2031 pr_err("transaction release %d invalid number of fds (%lld)\n",
2032 debug_id, (u64)fda->num_fds);
2035 if (fd_buf_size > parent->length ||
2036 fda->parent_offset > parent->length - fd_buf_size) {
2037 /* No space for all file descriptors here. */
2038 pr_err("transaction release %d not enough space for %lld fds in buffer\n",
2039 debug_id, (u64)fda->num_fds);
2043 * the source data for binder_buffer_object is visible
2044 * to user-space and the @buffer element is the user
2045 * pointer to the buffer_object containing the fd_array.
2046 * Convert the address to an offset relative to
2047 * the base of the transaction buffer.
2050 (parent->buffer - (uintptr_t)buffer->user_data) +
2052 for (fd_index = 0; fd_index < fda->num_fds;
2056 binder_size_t offset = fda_offset +
2057 fd_index * sizeof(fd);
2059 err = binder_alloc_copy_from_buffer(
2060 &proc->alloc, &fd, buffer,
2061 offset, sizeof(fd));
2064 binder_deferred_fd_close(fd);
2066 * Need to make sure the thread goes
2067 * back to userspace to complete the
2071 thread->looper_need_return = true;
2076 pr_err("transaction release %d bad object type %x\n",
2077 debug_id, hdr->type);
2083 static int binder_translate_binder(struct flat_binder_object *fp,
2084 struct binder_transaction *t,
2085 struct binder_thread *thread)
2087 struct binder_node *node;
2088 struct binder_proc *proc = thread->proc;
2089 struct binder_proc *target_proc = t->to_proc;
2090 struct binder_ref_data rdata;
2093 node = binder_get_node(proc, fp->binder);
2095 node = binder_new_node(proc, fp);
2099 if (fp->cookie != node->cookie) {
2100 binder_user_error("%d:%d sending u%016llx node %d, cookie mismatch %016llx != %016llx\n",
2101 proc->pid, thread->pid, (u64)fp->binder,
2102 node->debug_id, (u64)fp->cookie,
2107 if (security_binder_transfer_binder(proc->cred, target_proc->cred)) {
2112 ret = binder_inc_ref_for_node(target_proc, node,
2113 fp->hdr.type == BINDER_TYPE_BINDER,
2114 &thread->todo, &rdata);
2118 if (fp->hdr.type == BINDER_TYPE_BINDER)
2119 fp->hdr.type = BINDER_TYPE_HANDLE;
2121 fp->hdr.type = BINDER_TYPE_WEAK_HANDLE;
2123 fp->handle = rdata.desc;
2126 trace_binder_transaction_node_to_ref(t, node, &rdata);
2127 binder_debug(BINDER_DEBUG_TRANSACTION,
2128 " node %d u%016llx -> ref %d desc %d\n",
2129 node->debug_id, (u64)node->ptr,
2130 rdata.debug_id, rdata.desc);
2132 binder_put_node(node);
2136 static int binder_translate_handle(struct flat_binder_object *fp,
2137 struct binder_transaction *t,
2138 struct binder_thread *thread)
2140 struct binder_proc *proc = thread->proc;
2141 struct binder_proc *target_proc = t->to_proc;
2142 struct binder_node *node;
2143 struct binder_ref_data src_rdata;
2146 node = binder_get_node_from_ref(proc, fp->handle,
2147 fp->hdr.type == BINDER_TYPE_HANDLE, &src_rdata);
2149 binder_user_error("%d:%d got transaction with invalid handle, %d\n",
2150 proc->pid, thread->pid, fp->handle);
2153 if (security_binder_transfer_binder(proc->cred, target_proc->cred)) {
2158 binder_node_lock(node);
2159 if (node->proc == target_proc) {
2160 if (fp->hdr.type == BINDER_TYPE_HANDLE)
2161 fp->hdr.type = BINDER_TYPE_BINDER;
2163 fp->hdr.type = BINDER_TYPE_WEAK_BINDER;
2164 fp->binder = node->ptr;
2165 fp->cookie = node->cookie;
2167 binder_inner_proc_lock(node->proc);
2169 __acquire(&node->proc->inner_lock);
2170 binder_inc_node_nilocked(node,
2171 fp->hdr.type == BINDER_TYPE_BINDER,
2174 binder_inner_proc_unlock(node->proc);
2176 __release(&node->proc->inner_lock);
2177 trace_binder_transaction_ref_to_node(t, node, &src_rdata);
2178 binder_debug(BINDER_DEBUG_TRANSACTION,
2179 " ref %d desc %d -> node %d u%016llx\n",
2180 src_rdata.debug_id, src_rdata.desc, node->debug_id,
2182 binder_node_unlock(node);
2184 struct binder_ref_data dest_rdata;
2186 binder_node_unlock(node);
2187 ret = binder_inc_ref_for_node(target_proc, node,
2188 fp->hdr.type == BINDER_TYPE_HANDLE,
2194 fp->handle = dest_rdata.desc;
2196 trace_binder_transaction_ref_to_ref(t, node, &src_rdata,
2198 binder_debug(BINDER_DEBUG_TRANSACTION,
2199 " ref %d desc %d -> ref %d desc %d (node %d)\n",
2200 src_rdata.debug_id, src_rdata.desc,
2201 dest_rdata.debug_id, dest_rdata.desc,
2205 binder_put_node(node);
2209 static int binder_translate_fd(u32 fd, binder_size_t fd_offset,
2210 struct binder_transaction *t,
2211 struct binder_thread *thread,
2212 struct binder_transaction *in_reply_to)
2214 struct binder_proc *proc = thread->proc;
2215 struct binder_proc *target_proc = t->to_proc;
2216 struct binder_txn_fd_fixup *fixup;
2219 bool target_allows_fd;
2222 target_allows_fd = !!(in_reply_to->flags & TF_ACCEPT_FDS);
2224 target_allows_fd = t->buffer->target_node->accept_fds;
2225 if (!target_allows_fd) {
2226 binder_user_error("%d:%d got %s with fd, %d, but target does not allow fds\n",
2227 proc->pid, thread->pid,
2228 in_reply_to ? "reply" : "transaction",
2231 goto err_fd_not_accepted;
2236 binder_user_error("%d:%d got transaction with invalid fd, %d\n",
2237 proc->pid, thread->pid, fd);
2241 ret = security_binder_transfer_file(proc->cred, target_proc->cred, file);
2248 * Add fixup record for this transaction. The allocation
2249 * of the fd in the target needs to be done from a
2252 fixup = kzalloc(sizeof(*fixup), GFP_KERNEL);
2258 fixup->offset = fd_offset;
2259 trace_binder_transaction_fd_send(t, fd, fixup->offset);
2260 list_add_tail(&fixup->fixup_entry, &t->fd_fixups);
2268 err_fd_not_accepted:
2272 static int binder_translate_fd_array(struct binder_fd_array_object *fda,
2273 struct binder_buffer_object *parent,
2274 struct binder_transaction *t,
2275 struct binder_thread *thread,
2276 struct binder_transaction *in_reply_to)
2278 binder_size_t fdi, fd_buf_size;
2279 binder_size_t fda_offset;
2280 struct binder_proc *proc = thread->proc;
2281 struct binder_proc *target_proc = t->to_proc;
2283 fd_buf_size = sizeof(u32) * fda->num_fds;
2284 if (fda->num_fds >= SIZE_MAX / sizeof(u32)) {
2285 binder_user_error("%d:%d got transaction with invalid number of fds (%lld)\n",
2286 proc->pid, thread->pid, (u64)fda->num_fds);
2289 if (fd_buf_size > parent->length ||
2290 fda->parent_offset > parent->length - fd_buf_size) {
2291 /* No space for all file descriptors here. */
2292 binder_user_error("%d:%d not enough space to store %lld fds in buffer\n",
2293 proc->pid, thread->pid, (u64)fda->num_fds);
2297 * the source data for binder_buffer_object is visible
2298 * to user-space and the @buffer element is the user
2299 * pointer to the buffer_object containing the fd_array.
2300 * Convert the address to an offset relative to
2301 * the base of the transaction buffer.
2303 fda_offset = (parent->buffer - (uintptr_t)t->buffer->user_data) +
2305 if (!IS_ALIGNED((unsigned long)fda_offset, sizeof(u32))) {
2306 binder_user_error("%d:%d parent offset not aligned correctly.\n",
2307 proc->pid, thread->pid);
2310 for (fdi = 0; fdi < fda->num_fds; fdi++) {
2313 binder_size_t offset = fda_offset + fdi * sizeof(fd);
2315 ret = binder_alloc_copy_from_buffer(&target_proc->alloc,
2317 offset, sizeof(fd));
2319 ret = binder_translate_fd(fd, offset, t, thread,
2322 return ret > 0 ? -EINVAL : ret;
2327 static int binder_fixup_parent(struct binder_transaction *t,
2328 struct binder_thread *thread,
2329 struct binder_buffer_object *bp,
2330 binder_size_t off_start_offset,
2331 binder_size_t num_valid,
2332 binder_size_t last_fixup_obj_off,
2333 binder_size_t last_fixup_min_off)
2335 struct binder_buffer_object *parent;
2336 struct binder_buffer *b = t->buffer;
2337 struct binder_proc *proc = thread->proc;
2338 struct binder_proc *target_proc = t->to_proc;
2339 struct binder_object object;
2340 binder_size_t buffer_offset;
2341 binder_size_t parent_offset;
2343 if (!(bp->flags & BINDER_BUFFER_FLAG_HAS_PARENT))
2346 parent = binder_validate_ptr(target_proc, b, &object, bp->parent,
2347 off_start_offset, &parent_offset,
2350 binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
2351 proc->pid, thread->pid);
2355 if (!binder_validate_fixup(target_proc, b, off_start_offset,
2356 parent_offset, bp->parent_offset,
2358 last_fixup_min_off)) {
2359 binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
2360 proc->pid, thread->pid);
2364 if (parent->length < sizeof(binder_uintptr_t) ||
2365 bp->parent_offset > parent->length - sizeof(binder_uintptr_t)) {
2366 /* No space for a pointer here! */
2367 binder_user_error("%d:%d got transaction with invalid parent offset\n",
2368 proc->pid, thread->pid);
2371 buffer_offset = bp->parent_offset +
2372 (uintptr_t)parent->buffer - (uintptr_t)b->user_data;
2373 if (binder_alloc_copy_to_buffer(&target_proc->alloc, b, buffer_offset,
2374 &bp->buffer, sizeof(bp->buffer))) {
2375 binder_user_error("%d:%d got transaction with invalid parent offset\n",
2376 proc->pid, thread->pid);
2384 * binder_proc_transaction() - sends a transaction to a process and wakes it up
2385 * @t: transaction to send
2386 * @proc: process to send the transaction to
2387 * @thread: thread in @proc to send the transaction to (may be NULL)
2389 * This function queues a transaction to the specified process. It will try
2390 * to find a thread in the target process to handle the transaction and
2391 * wake it up. If no thread is found, the work is queued to the proc
2394 * If the @thread parameter is not NULL, the transaction is always queued
2395 * to the waitlist of that specific thread.
2397 * Return: 0 if the transaction was successfully queued
2398 * BR_DEAD_REPLY if the target process or thread is dead
2399 * BR_FROZEN_REPLY if the target process or thread is frozen
2401 static int binder_proc_transaction(struct binder_transaction *t,
2402 struct binder_proc *proc,
2403 struct binder_thread *thread)
2405 struct binder_node *node = t->buffer->target_node;
2406 bool oneway = !!(t->flags & TF_ONE_WAY);
2407 bool pending_async = false;
2410 binder_node_lock(node);
2413 if (node->has_async_transaction)
2414 pending_async = true;
2416 node->has_async_transaction = true;
2419 binder_inner_proc_lock(proc);
2420 if (proc->is_frozen) {
2421 proc->sync_recv |= !oneway;
2422 proc->async_recv |= oneway;
2425 if ((proc->is_frozen && !oneway) || proc->is_dead ||
2426 (thread && thread->is_dead)) {
2427 binder_inner_proc_unlock(proc);
2428 binder_node_unlock(node);
2429 return proc->is_frozen ? BR_FROZEN_REPLY : BR_DEAD_REPLY;
2432 if (!thread && !pending_async)
2433 thread = binder_select_thread_ilocked(proc);
2436 binder_enqueue_thread_work_ilocked(thread, &t->work);
2437 else if (!pending_async)
2438 binder_enqueue_work_ilocked(&t->work, &proc->todo);
2440 binder_enqueue_work_ilocked(&t->work, &node->async_todo);
2443 binder_wakeup_thread_ilocked(proc, thread, !oneway /* sync */);
2445 proc->outstanding_txns++;
2446 binder_inner_proc_unlock(proc);
2447 binder_node_unlock(node);
2453 * binder_get_node_refs_for_txn() - Get required refs on node for txn
2454 * @node: struct binder_node for which to get refs
2455 * @proc: returns @node->proc if valid
2456 * @error: if no @proc then returns BR_DEAD_REPLY
2458 * User-space normally keeps the node alive when creating a transaction
2459 * since it has a reference to the target. The local strong ref keeps it
2460 * alive if the sending process dies before the target process processes
2461 * the transaction. If the source process is malicious or has a reference
2462 * counting bug, relying on the local strong ref can fail.
2464 * Since user-space can cause the local strong ref to go away, we also take
2465 * a tmpref on the node to ensure it survives while we are constructing
2466 * the transaction. We also need a tmpref on the proc while we are
2467 * constructing the transaction, so we take that here as well.
2469 * Return: The target_node with refs taken or NULL if no @node->proc is NULL.
2470 * Also sets @proc if valid. If the @node->proc is NULL indicating that the
2471 * target proc has died, @error is set to BR_DEAD_REPLY
2473 static struct binder_node *binder_get_node_refs_for_txn(
2474 struct binder_node *node,
2475 struct binder_proc **procp,
2478 struct binder_node *target_node = NULL;
2480 binder_node_inner_lock(node);
2483 binder_inc_node_nilocked(node, 1, 0, NULL);
2484 binder_inc_node_tmpref_ilocked(node);
2485 node->proc->tmp_ref++;
2486 *procp = node->proc;
2488 *error = BR_DEAD_REPLY;
2489 binder_node_inner_unlock(node);
2494 static void binder_transaction(struct binder_proc *proc,
2495 struct binder_thread *thread,
2496 struct binder_transaction_data *tr, int reply,
2497 binder_size_t extra_buffers_size)
2500 struct binder_transaction *t;
2501 struct binder_work *w;
2502 struct binder_work *tcomplete;
2503 binder_size_t buffer_offset = 0;
2504 binder_size_t off_start_offset, off_end_offset;
2505 binder_size_t off_min;
2506 binder_size_t sg_buf_offset, sg_buf_end_offset;
2507 binder_size_t user_offset = 0;
2508 struct binder_proc *target_proc = NULL;
2509 struct binder_thread *target_thread = NULL;
2510 struct binder_node *target_node = NULL;
2511 struct binder_transaction *in_reply_to = NULL;
2512 struct binder_transaction_log_entry *e;
2513 uint32_t return_error = 0;
2514 uint32_t return_error_param = 0;
2515 uint32_t return_error_line = 0;
2516 binder_size_t last_fixup_obj_off = 0;
2517 binder_size_t last_fixup_min_off = 0;
2518 struct binder_context *context = proc->context;
2519 int t_debug_id = atomic_inc_return(&binder_last_id);
2520 char *secctx = NULL;
2522 const void __user *user_buffer = (const void __user *)
2523 (uintptr_t)tr->data.ptr.buffer;
2525 e = binder_transaction_log_add(&binder_transaction_log);
2526 e->debug_id = t_debug_id;
2527 e->call_type = reply ? 2 : !!(tr->flags & TF_ONE_WAY);
2528 e->from_proc = proc->pid;
2529 e->from_thread = thread->pid;
2530 e->target_handle = tr->target.handle;
2531 e->data_size = tr->data_size;
2532 e->offsets_size = tr->offsets_size;
2533 strscpy(e->context_name, proc->context->name, BINDERFS_MAX_NAME);
2536 binder_inner_proc_lock(proc);
2537 in_reply_to = thread->transaction_stack;
2538 if (in_reply_to == NULL) {
2539 binder_inner_proc_unlock(proc);
2540 binder_user_error("%d:%d got reply transaction with no transaction stack\n",
2541 proc->pid, thread->pid);
2542 return_error = BR_FAILED_REPLY;
2543 return_error_param = -EPROTO;
2544 return_error_line = __LINE__;
2545 goto err_empty_call_stack;
2547 if (in_reply_to->to_thread != thread) {
2548 spin_lock(&in_reply_to->lock);
2549 binder_user_error("%d:%d got reply transaction with bad transaction stack, transaction %d has target %d:%d\n",
2550 proc->pid, thread->pid, in_reply_to->debug_id,
2551 in_reply_to->to_proc ?
2552 in_reply_to->to_proc->pid : 0,
2553 in_reply_to->to_thread ?
2554 in_reply_to->to_thread->pid : 0);
2555 spin_unlock(&in_reply_to->lock);
2556 binder_inner_proc_unlock(proc);
2557 return_error = BR_FAILED_REPLY;
2558 return_error_param = -EPROTO;
2559 return_error_line = __LINE__;
2561 goto err_bad_call_stack;
2563 thread->transaction_stack = in_reply_to->to_parent;
2564 binder_inner_proc_unlock(proc);
2565 binder_set_nice(in_reply_to->saved_priority);
2566 target_thread = binder_get_txn_from_and_acq_inner(in_reply_to);
2567 if (target_thread == NULL) {
2568 /* annotation for sparse */
2569 __release(&target_thread->proc->inner_lock);
2570 return_error = BR_DEAD_REPLY;
2571 return_error_line = __LINE__;
2572 goto err_dead_binder;
2574 if (target_thread->transaction_stack != in_reply_to) {
2575 binder_user_error("%d:%d got reply transaction with bad target transaction stack %d, expected %d\n",
2576 proc->pid, thread->pid,
2577 target_thread->transaction_stack ?
2578 target_thread->transaction_stack->debug_id : 0,
2579 in_reply_to->debug_id);
2580 binder_inner_proc_unlock(target_thread->proc);
2581 return_error = BR_FAILED_REPLY;
2582 return_error_param = -EPROTO;
2583 return_error_line = __LINE__;
2585 target_thread = NULL;
2586 goto err_dead_binder;
2588 target_proc = target_thread->proc;
2589 target_proc->tmp_ref++;
2590 binder_inner_proc_unlock(target_thread->proc);
2592 if (tr->target.handle) {
2593 struct binder_ref *ref;
2596 * There must already be a strong ref
2597 * on this node. If so, do a strong
2598 * increment on the node to ensure it
2599 * stays alive until the transaction is
2602 binder_proc_lock(proc);
2603 ref = binder_get_ref_olocked(proc, tr->target.handle,
2606 target_node = binder_get_node_refs_for_txn(
2607 ref->node, &target_proc,
2610 binder_user_error("%d:%d got transaction to invalid handle, %u\n",
2611 proc->pid, thread->pid, tr->target.handle);
2612 return_error = BR_FAILED_REPLY;
2614 binder_proc_unlock(proc);
2616 mutex_lock(&context->context_mgr_node_lock);
2617 target_node = context->binder_context_mgr_node;
2619 target_node = binder_get_node_refs_for_txn(
2620 target_node, &target_proc,
2623 return_error = BR_DEAD_REPLY;
2624 mutex_unlock(&context->context_mgr_node_lock);
2625 if (target_node && target_proc->pid == proc->pid) {
2626 binder_user_error("%d:%d got transaction to context manager from process owning it\n",
2627 proc->pid, thread->pid);
2628 return_error = BR_FAILED_REPLY;
2629 return_error_param = -EINVAL;
2630 return_error_line = __LINE__;
2631 goto err_invalid_target_handle;
2636 * return_error is set above
2638 return_error_param = -EINVAL;
2639 return_error_line = __LINE__;
2640 goto err_dead_binder;
2642 e->to_node = target_node->debug_id;
2643 if (WARN_ON(proc == target_proc)) {
2644 return_error = BR_FAILED_REPLY;
2645 return_error_param = -EINVAL;
2646 return_error_line = __LINE__;
2647 goto err_invalid_target_handle;
2649 if (security_binder_transaction(proc->cred,
2650 target_proc->cred) < 0) {
2651 return_error = BR_FAILED_REPLY;
2652 return_error_param = -EPERM;
2653 return_error_line = __LINE__;
2654 goto err_invalid_target_handle;
2656 binder_inner_proc_lock(proc);
2658 w = list_first_entry_or_null(&thread->todo,
2659 struct binder_work, entry);
2660 if (!(tr->flags & TF_ONE_WAY) && w &&
2661 w->type == BINDER_WORK_TRANSACTION) {
2663 * Do not allow new outgoing transaction from a
2664 * thread that has a transaction at the head of
2665 * its todo list. Only need to check the head
2666 * because binder_select_thread_ilocked picks a
2667 * thread from proc->waiting_threads to enqueue
2668 * the transaction, and nothing is queued to the
2669 * todo list while the thread is on waiting_threads.
2671 binder_user_error("%d:%d new transaction not allowed when there is a transaction on thread todo\n",
2672 proc->pid, thread->pid);
2673 binder_inner_proc_unlock(proc);
2674 return_error = BR_FAILED_REPLY;
2675 return_error_param = -EPROTO;
2676 return_error_line = __LINE__;
2677 goto err_bad_todo_list;
2680 if (!(tr->flags & TF_ONE_WAY) && thread->transaction_stack) {
2681 struct binder_transaction *tmp;
2683 tmp = thread->transaction_stack;
2684 if (tmp->to_thread != thread) {
2685 spin_lock(&tmp->lock);
2686 binder_user_error("%d:%d got new transaction with bad transaction stack, transaction %d has target %d:%d\n",
2687 proc->pid, thread->pid, tmp->debug_id,
2688 tmp->to_proc ? tmp->to_proc->pid : 0,
2690 tmp->to_thread->pid : 0);
2691 spin_unlock(&tmp->lock);
2692 binder_inner_proc_unlock(proc);
2693 return_error = BR_FAILED_REPLY;
2694 return_error_param = -EPROTO;
2695 return_error_line = __LINE__;
2696 goto err_bad_call_stack;
2699 struct binder_thread *from;
2701 spin_lock(&tmp->lock);
2703 if (from && from->proc == target_proc) {
2704 atomic_inc(&from->tmp_ref);
2705 target_thread = from;
2706 spin_unlock(&tmp->lock);
2709 spin_unlock(&tmp->lock);
2710 tmp = tmp->from_parent;
2713 binder_inner_proc_unlock(proc);
2716 e->to_thread = target_thread->pid;
2717 e->to_proc = target_proc->pid;
2719 /* TODO: reuse incoming transaction for reply */
2720 t = kzalloc(sizeof(*t), GFP_KERNEL);
2722 return_error = BR_FAILED_REPLY;
2723 return_error_param = -ENOMEM;
2724 return_error_line = __LINE__;
2725 goto err_alloc_t_failed;
2727 INIT_LIST_HEAD(&t->fd_fixups);
2728 binder_stats_created(BINDER_STAT_TRANSACTION);
2729 spin_lock_init(&t->lock);
2731 tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL);
2732 if (tcomplete == NULL) {
2733 return_error = BR_FAILED_REPLY;
2734 return_error_param = -ENOMEM;
2735 return_error_line = __LINE__;
2736 goto err_alloc_tcomplete_failed;
2738 binder_stats_created(BINDER_STAT_TRANSACTION_COMPLETE);
2740 t->debug_id = t_debug_id;
2743 binder_debug(BINDER_DEBUG_TRANSACTION,
2744 "%d:%d BC_REPLY %d -> %d:%d, data %016llx-%016llx size %lld-%lld-%lld\n",
2745 proc->pid, thread->pid, t->debug_id,
2746 target_proc->pid, target_thread->pid,
2747 (u64)tr->data.ptr.buffer,
2748 (u64)tr->data.ptr.offsets,
2749 (u64)tr->data_size, (u64)tr->offsets_size,
2750 (u64)extra_buffers_size);
2752 binder_debug(BINDER_DEBUG_TRANSACTION,
2753 "%d:%d BC_TRANSACTION %d -> %d - node %d, data %016llx-%016llx size %lld-%lld-%lld\n",
2754 proc->pid, thread->pid, t->debug_id,
2755 target_proc->pid, target_node->debug_id,
2756 (u64)tr->data.ptr.buffer,
2757 (u64)tr->data.ptr.offsets,
2758 (u64)tr->data_size, (u64)tr->offsets_size,
2759 (u64)extra_buffers_size);
2761 if (!reply && !(tr->flags & TF_ONE_WAY))
2765 t->sender_euid = task_euid(proc->tsk);
2766 t->to_proc = target_proc;
2767 t->to_thread = target_thread;
2769 t->flags = tr->flags;
2770 t->priority = task_nice(current);
2772 if (target_node && target_node->txn_security_ctx) {
2776 security_cred_getsecid(proc->cred, &secid);
2777 ret = security_secid_to_secctx(secid, &secctx, &secctx_sz);
2779 return_error = BR_FAILED_REPLY;
2780 return_error_param = ret;
2781 return_error_line = __LINE__;
2782 goto err_get_secctx_failed;
2784 added_size = ALIGN(secctx_sz, sizeof(u64));
2785 extra_buffers_size += added_size;
2786 if (extra_buffers_size < added_size) {
2787 /* integer overflow of extra_buffers_size */
2788 return_error = BR_FAILED_REPLY;
2789 return_error_param = -EINVAL;
2790 return_error_line = __LINE__;
2791 goto err_bad_extra_size;
2795 trace_binder_transaction(reply, t, target_node);
2797 t->buffer = binder_alloc_new_buf(&target_proc->alloc, tr->data_size,
2798 tr->offsets_size, extra_buffers_size,
2799 !reply && (t->flags & TF_ONE_WAY), current->tgid);
2800 if (IS_ERR(t->buffer)) {
2802 * -ESRCH indicates VMA cleared. The target is dying.
2804 return_error_param = PTR_ERR(t->buffer);
2805 return_error = return_error_param == -ESRCH ?
2806 BR_DEAD_REPLY : BR_FAILED_REPLY;
2807 return_error_line = __LINE__;
2809 goto err_binder_alloc_buf_failed;
2813 size_t buf_offset = ALIGN(tr->data_size, sizeof(void *)) +
2814 ALIGN(tr->offsets_size, sizeof(void *)) +
2815 ALIGN(extra_buffers_size, sizeof(void *)) -
2816 ALIGN(secctx_sz, sizeof(u64));
2818 t->security_ctx = (uintptr_t)t->buffer->user_data + buf_offset;
2819 err = binder_alloc_copy_to_buffer(&target_proc->alloc,
2820 t->buffer, buf_offset,
2823 t->security_ctx = 0;
2826 security_release_secctx(secctx, secctx_sz);
2829 t->buffer->debug_id = t->debug_id;
2830 t->buffer->transaction = t;
2831 t->buffer->target_node = target_node;
2832 t->buffer->clear_on_free = !!(t->flags & TF_CLEAR_BUF);
2833 trace_binder_transaction_alloc_buf(t->buffer);
2835 if (binder_alloc_copy_user_to_buffer(
2836 &target_proc->alloc,
2838 ALIGN(tr->data_size, sizeof(void *)),
2839 (const void __user *)
2840 (uintptr_t)tr->data.ptr.offsets,
2841 tr->offsets_size)) {
2842 binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
2843 proc->pid, thread->pid);
2844 return_error = BR_FAILED_REPLY;
2845 return_error_param = -EFAULT;
2846 return_error_line = __LINE__;
2847 goto err_copy_data_failed;
2849 if (!IS_ALIGNED(tr->offsets_size, sizeof(binder_size_t))) {
2850 binder_user_error("%d:%d got transaction with invalid offsets size, %lld\n",
2851 proc->pid, thread->pid, (u64)tr->offsets_size);
2852 return_error = BR_FAILED_REPLY;
2853 return_error_param = -EINVAL;
2854 return_error_line = __LINE__;
2855 goto err_bad_offset;
2857 if (!IS_ALIGNED(extra_buffers_size, sizeof(u64))) {
2858 binder_user_error("%d:%d got transaction with unaligned buffers size, %lld\n",
2859 proc->pid, thread->pid,
2860 (u64)extra_buffers_size);
2861 return_error = BR_FAILED_REPLY;
2862 return_error_param = -EINVAL;
2863 return_error_line = __LINE__;
2864 goto err_bad_offset;
2866 off_start_offset = ALIGN(tr->data_size, sizeof(void *));
2867 buffer_offset = off_start_offset;
2868 off_end_offset = off_start_offset + tr->offsets_size;
2869 sg_buf_offset = ALIGN(off_end_offset, sizeof(void *));
2870 sg_buf_end_offset = sg_buf_offset + extra_buffers_size -
2871 ALIGN(secctx_sz, sizeof(u64));
2873 for (buffer_offset = off_start_offset; buffer_offset < off_end_offset;
2874 buffer_offset += sizeof(binder_size_t)) {
2875 struct binder_object_header *hdr;
2877 struct binder_object object;
2878 binder_size_t object_offset;
2879 binder_size_t copy_size;
2881 if (binder_alloc_copy_from_buffer(&target_proc->alloc,
2885 sizeof(object_offset))) {
2886 return_error = BR_FAILED_REPLY;
2887 return_error_param = -EINVAL;
2888 return_error_line = __LINE__;
2889 goto err_bad_offset;
2893 * Copy the source user buffer up to the next object
2894 * that will be processed.
2896 copy_size = object_offset - user_offset;
2897 if (copy_size && (user_offset > object_offset ||
2898 binder_alloc_copy_user_to_buffer(
2899 &target_proc->alloc,
2900 t->buffer, user_offset,
2901 user_buffer + user_offset,
2903 binder_user_error("%d:%d got transaction with invalid data ptr\n",
2904 proc->pid, thread->pid);
2905 return_error = BR_FAILED_REPLY;
2906 return_error_param = -EFAULT;
2907 return_error_line = __LINE__;
2908 goto err_copy_data_failed;
2910 object_size = binder_get_object(target_proc, user_buffer,
2911 t->buffer, object_offset, &object);
2912 if (object_size == 0 || object_offset < off_min) {
2913 binder_user_error("%d:%d got transaction with invalid offset (%lld, min %lld max %lld) or object.\n",
2914 proc->pid, thread->pid,
2917 (u64)t->buffer->data_size);
2918 return_error = BR_FAILED_REPLY;
2919 return_error_param = -EINVAL;
2920 return_error_line = __LINE__;
2921 goto err_bad_offset;
2924 * Set offset to the next buffer fragment to be
2927 user_offset = object_offset + object_size;
2930 off_min = object_offset + object_size;
2931 switch (hdr->type) {
2932 case BINDER_TYPE_BINDER:
2933 case BINDER_TYPE_WEAK_BINDER: {
2934 struct flat_binder_object *fp;
2936 fp = to_flat_binder_object(hdr);
2937 ret = binder_translate_binder(fp, t, thread);
2940 binder_alloc_copy_to_buffer(&target_proc->alloc,
2944 return_error = BR_FAILED_REPLY;
2945 return_error_param = ret;
2946 return_error_line = __LINE__;
2947 goto err_translate_failed;
2950 case BINDER_TYPE_HANDLE:
2951 case BINDER_TYPE_WEAK_HANDLE: {
2952 struct flat_binder_object *fp;
2954 fp = to_flat_binder_object(hdr);
2955 ret = binder_translate_handle(fp, t, thread);
2957 binder_alloc_copy_to_buffer(&target_proc->alloc,
2961 return_error = BR_FAILED_REPLY;
2962 return_error_param = ret;
2963 return_error_line = __LINE__;
2964 goto err_translate_failed;
2968 case BINDER_TYPE_FD: {
2969 struct binder_fd_object *fp = to_binder_fd_object(hdr);
2970 binder_size_t fd_offset = object_offset +
2971 (uintptr_t)&fp->fd - (uintptr_t)fp;
2972 int ret = binder_translate_fd(fp->fd, fd_offset, t,
2973 thread, in_reply_to);
2977 binder_alloc_copy_to_buffer(&target_proc->alloc,
2981 return_error = BR_FAILED_REPLY;
2982 return_error_param = ret;
2983 return_error_line = __LINE__;
2984 goto err_translate_failed;
2987 case BINDER_TYPE_FDA: {
2988 struct binder_object ptr_object;
2989 binder_size_t parent_offset;
2990 struct binder_fd_array_object *fda =
2991 to_binder_fd_array_object(hdr);
2992 size_t num_valid = (buffer_offset - off_start_offset) /
2993 sizeof(binder_size_t);
2994 struct binder_buffer_object *parent =
2995 binder_validate_ptr(target_proc, t->buffer,
2996 &ptr_object, fda->parent,
3001 binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
3002 proc->pid, thread->pid);
3003 return_error = BR_FAILED_REPLY;
3004 return_error_param = -EINVAL;
3005 return_error_line = __LINE__;
3006 goto err_bad_parent;
3008 if (!binder_validate_fixup(target_proc, t->buffer,
3013 last_fixup_min_off)) {
3014 binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
3015 proc->pid, thread->pid);
3016 return_error = BR_FAILED_REPLY;
3017 return_error_param = -EINVAL;
3018 return_error_line = __LINE__;
3019 goto err_bad_parent;
3021 ret = binder_translate_fd_array(fda, parent, t, thread,
3024 ret = binder_alloc_copy_to_buffer(&target_proc->alloc,
3029 return_error = BR_FAILED_REPLY;
3030 return_error_param = ret > 0 ? -EINVAL : ret;
3031 return_error_line = __LINE__;
3032 goto err_translate_failed;
3034 last_fixup_obj_off = parent_offset;
3035 last_fixup_min_off =
3036 fda->parent_offset + sizeof(u32) * fda->num_fds;
3038 case BINDER_TYPE_PTR: {
3039 struct binder_buffer_object *bp =
3040 to_binder_buffer_object(hdr);
3041 size_t buf_left = sg_buf_end_offset - sg_buf_offset;
3044 if (bp->length > buf_left) {
3045 binder_user_error("%d:%d got transaction with too large buffer\n",
3046 proc->pid, thread->pid);
3047 return_error = BR_FAILED_REPLY;
3048 return_error_param = -EINVAL;
3049 return_error_line = __LINE__;
3050 goto err_bad_offset;
3052 if (binder_alloc_copy_user_to_buffer(
3053 &target_proc->alloc,
3056 (const void __user *)
3057 (uintptr_t)bp->buffer,
3059 binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
3060 proc->pid, thread->pid);
3061 return_error_param = -EFAULT;
3062 return_error = BR_FAILED_REPLY;
3063 return_error_line = __LINE__;
3064 goto err_copy_data_failed;
3066 /* Fixup buffer pointer to target proc address space */
3067 bp->buffer = (uintptr_t)
3068 t->buffer->user_data + sg_buf_offset;
3069 sg_buf_offset += ALIGN(bp->length, sizeof(u64));
3071 num_valid = (buffer_offset - off_start_offset) /
3072 sizeof(binder_size_t);
3073 ret = binder_fixup_parent(t, thread, bp,
3077 last_fixup_min_off);
3079 binder_alloc_copy_to_buffer(&target_proc->alloc,
3083 return_error = BR_FAILED_REPLY;
3084 return_error_param = ret;
3085 return_error_line = __LINE__;
3086 goto err_translate_failed;
3088 last_fixup_obj_off = object_offset;
3089 last_fixup_min_off = 0;
3092 binder_user_error("%d:%d got transaction with invalid object type, %x\n",
3093 proc->pid, thread->pid, hdr->type);
3094 return_error = BR_FAILED_REPLY;
3095 return_error_param = -EINVAL;
3096 return_error_line = __LINE__;
3097 goto err_bad_object_type;
3100 /* Done processing objects, copy the rest of the buffer */
3101 if (binder_alloc_copy_user_to_buffer(
3102 &target_proc->alloc,
3103 t->buffer, user_offset,
3104 user_buffer + user_offset,
3105 tr->data_size - user_offset)) {
3106 binder_user_error("%d:%d got transaction with invalid data ptr\n",
3107 proc->pid, thread->pid);
3108 return_error = BR_FAILED_REPLY;
3109 return_error_param = -EFAULT;
3110 return_error_line = __LINE__;
3111 goto err_copy_data_failed;
3113 if (t->buffer->oneway_spam_suspect)
3114 tcomplete->type = BINDER_WORK_TRANSACTION_ONEWAY_SPAM_SUSPECT;
3116 tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE;
3117 t->work.type = BINDER_WORK_TRANSACTION;
3120 binder_enqueue_thread_work(thread, tcomplete);
3121 binder_inner_proc_lock(target_proc);
3122 if (target_thread->is_dead) {
3123 return_error = BR_DEAD_REPLY;
3124 binder_inner_proc_unlock(target_proc);
3125 goto err_dead_proc_or_thread;
3127 BUG_ON(t->buffer->async_transaction != 0);
3128 binder_pop_transaction_ilocked(target_thread, in_reply_to);
3129 binder_enqueue_thread_work_ilocked(target_thread, &t->work);
3130 target_proc->outstanding_txns++;
3131 binder_inner_proc_unlock(target_proc);
3132 wake_up_interruptible_sync(&target_thread->wait);
3133 binder_free_transaction(in_reply_to);
3134 } else if (!(t->flags & TF_ONE_WAY)) {
3135 BUG_ON(t->buffer->async_transaction != 0);
3136 binder_inner_proc_lock(proc);
3138 * Defer the TRANSACTION_COMPLETE, so we don't return to
3139 * userspace immediately; this allows the target process to
3140 * immediately start processing this transaction, reducing
3141 * latency. We will then return the TRANSACTION_COMPLETE when
3142 * the target replies (or there is an error).
3144 binder_enqueue_deferred_thread_work_ilocked(thread, tcomplete);
3146 t->from_parent = thread->transaction_stack;
3147 thread->transaction_stack = t;
3148 binder_inner_proc_unlock(proc);
3149 return_error = binder_proc_transaction(t,
3150 target_proc, target_thread);
3152 binder_inner_proc_lock(proc);
3153 binder_pop_transaction_ilocked(thread, t);
3154 binder_inner_proc_unlock(proc);
3155 goto err_dead_proc_or_thread;
3158 BUG_ON(target_node == NULL);
3159 BUG_ON(t->buffer->async_transaction != 1);
3160 binder_enqueue_thread_work(thread, tcomplete);
3161 return_error = binder_proc_transaction(t, target_proc, NULL);
3163 goto err_dead_proc_or_thread;
3166 binder_thread_dec_tmpref(target_thread);
3167 binder_proc_dec_tmpref(target_proc);
3169 binder_dec_node_tmpref(target_node);
3171 * write barrier to synchronize with initialization
3175 WRITE_ONCE(e->debug_id_done, t_debug_id);
3178 err_dead_proc_or_thread:
3179 return_error_line = __LINE__;
3180 binder_dequeue_work(proc, tcomplete);
3181 err_translate_failed:
3182 err_bad_object_type:
3185 err_copy_data_failed:
3186 binder_free_txn_fixups(t);
3187 trace_binder_transaction_failed_buffer_release(t->buffer);
3188 binder_transaction_buffer_release(target_proc, NULL, t->buffer,
3189 buffer_offset, true);
3191 binder_dec_node_tmpref(target_node);
3193 t->buffer->transaction = NULL;
3194 binder_alloc_free_buf(&target_proc->alloc, t->buffer);
3195 err_binder_alloc_buf_failed:
3198 security_release_secctx(secctx, secctx_sz);
3199 err_get_secctx_failed:
3201 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
3202 err_alloc_tcomplete_failed:
3203 if (trace_binder_txn_latency_free_enabled())
3204 binder_txn_latency_free(t);
3206 binder_stats_deleted(BINDER_STAT_TRANSACTION);
3210 err_empty_call_stack:
3212 err_invalid_target_handle:
3214 binder_thread_dec_tmpref(target_thread);
3216 binder_proc_dec_tmpref(target_proc);
3218 binder_dec_node(target_node, 1, 0);
3219 binder_dec_node_tmpref(target_node);
3222 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
3223 "%d:%d transaction failed %d/%d, size %lld-%lld line %d\n",
3224 proc->pid, thread->pid, return_error, return_error_param,
3225 (u64)tr->data_size, (u64)tr->offsets_size,
3229 struct binder_transaction_log_entry *fe;
3231 e->return_error = return_error;
3232 e->return_error_param = return_error_param;
3233 e->return_error_line = return_error_line;
3234 fe = binder_transaction_log_add(&binder_transaction_log_failed);
3237 * write barrier to synchronize with initialization
3241 WRITE_ONCE(e->debug_id_done, t_debug_id);
3242 WRITE_ONCE(fe->debug_id_done, t_debug_id);
3245 BUG_ON(thread->return_error.cmd != BR_OK);
3247 thread->return_error.cmd = BR_TRANSACTION_COMPLETE;
3248 binder_enqueue_thread_work(thread, &thread->return_error.work);
3249 binder_send_failed_reply(in_reply_to, return_error);
3251 thread->return_error.cmd = return_error;
3252 binder_enqueue_thread_work(thread, &thread->return_error.work);
3257 * binder_free_buf() - free the specified buffer
3258 * @proc: binder proc that owns buffer
3259 * @buffer: buffer to be freed
3260 * @is_failure: failed to send transaction
3262 * If buffer for an async transaction, enqueue the next async
3263 * transaction from the node.
3265 * Cleanup buffer and free it.
3268 binder_free_buf(struct binder_proc *proc,
3269 struct binder_thread *thread,
3270 struct binder_buffer *buffer, bool is_failure)
3272 binder_inner_proc_lock(proc);
3273 if (buffer->transaction) {
3274 buffer->transaction->buffer = NULL;
3275 buffer->transaction = NULL;
3277 binder_inner_proc_unlock(proc);
3278 if (buffer->async_transaction && buffer->target_node) {
3279 struct binder_node *buf_node;
3280 struct binder_work *w;
3282 buf_node = buffer->target_node;
3283 binder_node_inner_lock(buf_node);
3284 BUG_ON(!buf_node->has_async_transaction);
3285 BUG_ON(buf_node->proc != proc);
3286 w = binder_dequeue_work_head_ilocked(
3287 &buf_node->async_todo);
3289 buf_node->has_async_transaction = false;
3291 binder_enqueue_work_ilocked(
3293 binder_wakeup_proc_ilocked(proc);
3295 binder_node_inner_unlock(buf_node);
3297 trace_binder_transaction_buffer_release(buffer);
3298 binder_transaction_buffer_release(proc, thread, buffer, 0, is_failure);
3299 binder_alloc_free_buf(&proc->alloc, buffer);
3302 static int binder_thread_write(struct binder_proc *proc,
3303 struct binder_thread *thread,
3304 binder_uintptr_t binder_buffer, size_t size,
3305 binder_size_t *consumed)
3308 struct binder_context *context = proc->context;
3309 void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
3310 void __user *ptr = buffer + *consumed;
3311 void __user *end = buffer + size;
3313 while (ptr < end && thread->return_error.cmd == BR_OK) {
3316 if (get_user(cmd, (uint32_t __user *)ptr))
3318 ptr += sizeof(uint32_t);
3319 trace_binder_command(cmd);
3320 if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.bc)) {
3321 atomic_inc(&binder_stats.bc[_IOC_NR(cmd)]);
3322 atomic_inc(&proc->stats.bc[_IOC_NR(cmd)]);
3323 atomic_inc(&thread->stats.bc[_IOC_NR(cmd)]);
3331 const char *debug_string;
3332 bool strong = cmd == BC_ACQUIRE || cmd == BC_RELEASE;
3333 bool increment = cmd == BC_INCREFS || cmd == BC_ACQUIRE;
3334 struct binder_ref_data rdata;
3336 if (get_user(target, (uint32_t __user *)ptr))
3339 ptr += sizeof(uint32_t);
3341 if (increment && !target) {
3342 struct binder_node *ctx_mgr_node;
3344 mutex_lock(&context->context_mgr_node_lock);
3345 ctx_mgr_node = context->binder_context_mgr_node;
3347 if (ctx_mgr_node->proc == proc) {
3348 binder_user_error("%d:%d context manager tried to acquire desc 0\n",
3349 proc->pid, thread->pid);
3350 mutex_unlock(&context->context_mgr_node_lock);
3353 ret = binder_inc_ref_for_node(
3355 strong, NULL, &rdata);
3357 mutex_unlock(&context->context_mgr_node_lock);
3360 ret = binder_update_ref_for_handle(
3361 proc, target, increment, strong,
3363 if (!ret && rdata.desc != target) {
3364 binder_user_error("%d:%d tried to acquire reference to desc %d, got %d instead\n",
3365 proc->pid, thread->pid,
3366 target, rdata.desc);
3370 debug_string = "IncRefs";
3373 debug_string = "Acquire";
3376 debug_string = "Release";
3380 debug_string = "DecRefs";
3384 binder_user_error("%d:%d %s %d refcount change on invalid ref %d ret %d\n",
3385 proc->pid, thread->pid, debug_string,
3386 strong, target, ret);
3389 binder_debug(BINDER_DEBUG_USER_REFS,
3390 "%d:%d %s ref %d desc %d s %d w %d\n",
3391 proc->pid, thread->pid, debug_string,
3392 rdata.debug_id, rdata.desc, rdata.strong,
3396 case BC_INCREFS_DONE:
3397 case BC_ACQUIRE_DONE: {
3398 binder_uintptr_t node_ptr;
3399 binder_uintptr_t cookie;
3400 struct binder_node *node;
3403 if (get_user(node_ptr, (binder_uintptr_t __user *)ptr))
3405 ptr += sizeof(binder_uintptr_t);
3406 if (get_user(cookie, (binder_uintptr_t __user *)ptr))
3408 ptr += sizeof(binder_uintptr_t);
3409 node = binder_get_node(proc, node_ptr);
3411 binder_user_error("%d:%d %s u%016llx no match\n",
3412 proc->pid, thread->pid,
3413 cmd == BC_INCREFS_DONE ?
3419 if (cookie != node->cookie) {
3420 binder_user_error("%d:%d %s u%016llx node %d cookie mismatch %016llx != %016llx\n",
3421 proc->pid, thread->pid,
3422 cmd == BC_INCREFS_DONE ?
3423 "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
3424 (u64)node_ptr, node->debug_id,
3425 (u64)cookie, (u64)node->cookie);
3426 binder_put_node(node);
3429 binder_node_inner_lock(node);
3430 if (cmd == BC_ACQUIRE_DONE) {
3431 if (node->pending_strong_ref == 0) {
3432 binder_user_error("%d:%d BC_ACQUIRE_DONE node %d has no pending acquire request\n",
3433 proc->pid, thread->pid,
3435 binder_node_inner_unlock(node);
3436 binder_put_node(node);
3439 node->pending_strong_ref = 0;
3441 if (node->pending_weak_ref == 0) {
3442 binder_user_error("%d:%d BC_INCREFS_DONE node %d has no pending increfs request\n",
3443 proc->pid, thread->pid,
3445 binder_node_inner_unlock(node);
3446 binder_put_node(node);
3449 node->pending_weak_ref = 0;
3451 free_node = binder_dec_node_nilocked(node,
3452 cmd == BC_ACQUIRE_DONE, 0);
3454 binder_debug(BINDER_DEBUG_USER_REFS,
3455 "%d:%d %s node %d ls %d lw %d tr %d\n",
3456 proc->pid, thread->pid,
3457 cmd == BC_INCREFS_DONE ? "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
3458 node->debug_id, node->local_strong_refs,
3459 node->local_weak_refs, node->tmp_refs);
3460 binder_node_inner_unlock(node);
3461 binder_put_node(node);
3464 case BC_ATTEMPT_ACQUIRE:
3465 pr_err("BC_ATTEMPT_ACQUIRE not supported\n");
3467 case BC_ACQUIRE_RESULT:
3468 pr_err("BC_ACQUIRE_RESULT not supported\n");
3471 case BC_FREE_BUFFER: {
3472 binder_uintptr_t data_ptr;
3473 struct binder_buffer *buffer;
3475 if (get_user(data_ptr, (binder_uintptr_t __user *)ptr))
3477 ptr += sizeof(binder_uintptr_t);
3479 buffer = binder_alloc_prepare_to_free(&proc->alloc,
3481 if (IS_ERR_OR_NULL(buffer)) {
3482 if (PTR_ERR(buffer) == -EPERM) {
3484 "%d:%d BC_FREE_BUFFER u%016llx matched unreturned or currently freeing buffer\n",
3485 proc->pid, thread->pid,
3489 "%d:%d BC_FREE_BUFFER u%016llx no match\n",
3490 proc->pid, thread->pid,
3495 binder_debug(BINDER_DEBUG_FREE_BUFFER,
3496 "%d:%d BC_FREE_BUFFER u%016llx found buffer %d for %s transaction\n",
3497 proc->pid, thread->pid, (u64)data_ptr,
3499 buffer->transaction ? "active" : "finished");
3500 binder_free_buf(proc, thread, buffer, false);
3504 case BC_TRANSACTION_SG:
3506 struct binder_transaction_data_sg tr;
3508 if (copy_from_user(&tr, ptr, sizeof(tr)))
3511 binder_transaction(proc, thread, &tr.transaction_data,
3512 cmd == BC_REPLY_SG, tr.buffers_size);
3515 case BC_TRANSACTION:
3517 struct binder_transaction_data tr;
3519 if (copy_from_user(&tr, ptr, sizeof(tr)))
3522 binder_transaction(proc, thread, &tr,
3523 cmd == BC_REPLY, 0);
3527 case BC_REGISTER_LOOPER:
3528 binder_debug(BINDER_DEBUG_THREADS,
3529 "%d:%d BC_REGISTER_LOOPER\n",
3530 proc->pid, thread->pid);
3531 binder_inner_proc_lock(proc);
3532 if (thread->looper & BINDER_LOOPER_STATE_ENTERED) {
3533 thread->looper |= BINDER_LOOPER_STATE_INVALID;
3534 binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called after BC_ENTER_LOOPER\n",
3535 proc->pid, thread->pid);
3536 } else if (proc->requested_threads == 0) {
3537 thread->looper |= BINDER_LOOPER_STATE_INVALID;
3538 binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called without request\n",
3539 proc->pid, thread->pid);
3541 proc->requested_threads--;
3542 proc->requested_threads_started++;
3544 thread->looper |= BINDER_LOOPER_STATE_REGISTERED;
3545 binder_inner_proc_unlock(proc);
3547 case BC_ENTER_LOOPER:
3548 binder_debug(BINDER_DEBUG_THREADS,
3549 "%d:%d BC_ENTER_LOOPER\n",
3550 proc->pid, thread->pid);
3551 if (thread->looper & BINDER_LOOPER_STATE_REGISTERED) {
3552 thread->looper |= BINDER_LOOPER_STATE_INVALID;
3553 binder_user_error("%d:%d ERROR: BC_ENTER_LOOPER called after BC_REGISTER_LOOPER\n",
3554 proc->pid, thread->pid);
3556 thread->looper |= BINDER_LOOPER_STATE_ENTERED;
3558 case BC_EXIT_LOOPER:
3559 binder_debug(BINDER_DEBUG_THREADS,
3560 "%d:%d BC_EXIT_LOOPER\n",
3561 proc->pid, thread->pid);
3562 thread->looper |= BINDER_LOOPER_STATE_EXITED;
3565 case BC_REQUEST_DEATH_NOTIFICATION:
3566 case BC_CLEAR_DEATH_NOTIFICATION: {
3568 binder_uintptr_t cookie;
3569 struct binder_ref *ref;
3570 struct binder_ref_death *death = NULL;
3572 if (get_user(target, (uint32_t __user *)ptr))
3574 ptr += sizeof(uint32_t);
3575 if (get_user(cookie, (binder_uintptr_t __user *)ptr))
3577 ptr += sizeof(binder_uintptr_t);
3578 if (cmd == BC_REQUEST_DEATH_NOTIFICATION) {
3580 * Allocate memory for death notification
3581 * before taking lock
3583 death = kzalloc(sizeof(*death), GFP_KERNEL);
3584 if (death == NULL) {
3585 WARN_ON(thread->return_error.cmd !=
3587 thread->return_error.cmd = BR_ERROR;
3588 binder_enqueue_thread_work(
3590 &thread->return_error.work);
3592 BINDER_DEBUG_FAILED_TRANSACTION,
3593 "%d:%d BC_REQUEST_DEATH_NOTIFICATION failed\n",
3594 proc->pid, thread->pid);
3598 binder_proc_lock(proc);
3599 ref = binder_get_ref_olocked(proc, target, false);
3601 binder_user_error("%d:%d %s invalid ref %d\n",
3602 proc->pid, thread->pid,
3603 cmd == BC_REQUEST_DEATH_NOTIFICATION ?
3604 "BC_REQUEST_DEATH_NOTIFICATION" :
3605 "BC_CLEAR_DEATH_NOTIFICATION",
3607 binder_proc_unlock(proc);
3612 binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
3613 "%d:%d %s %016llx ref %d desc %d s %d w %d for node %d\n",
3614 proc->pid, thread->pid,
3615 cmd == BC_REQUEST_DEATH_NOTIFICATION ?
3616 "BC_REQUEST_DEATH_NOTIFICATION" :
3617 "BC_CLEAR_DEATH_NOTIFICATION",
3618 (u64)cookie, ref->data.debug_id,
3619 ref->data.desc, ref->data.strong,
3620 ref->data.weak, ref->node->debug_id);
3622 binder_node_lock(ref->node);
3623 if (cmd == BC_REQUEST_DEATH_NOTIFICATION) {
3625 binder_user_error("%d:%d BC_REQUEST_DEATH_NOTIFICATION death notification already set\n",
3626 proc->pid, thread->pid);
3627 binder_node_unlock(ref->node);
3628 binder_proc_unlock(proc);
3632 binder_stats_created(BINDER_STAT_DEATH);
3633 INIT_LIST_HEAD(&death->work.entry);
3634 death->cookie = cookie;
3636 if (ref->node->proc == NULL) {
3637 ref->death->work.type = BINDER_WORK_DEAD_BINDER;
3639 binder_inner_proc_lock(proc);
3640 binder_enqueue_work_ilocked(
3641 &ref->death->work, &proc->todo);
3642 binder_wakeup_proc_ilocked(proc);
3643 binder_inner_proc_unlock(proc);
3646 if (ref->death == NULL) {
3647 binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification not active\n",
3648 proc->pid, thread->pid);
3649 binder_node_unlock(ref->node);
3650 binder_proc_unlock(proc);
3654 if (death->cookie != cookie) {
3655 binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification cookie mismatch %016llx != %016llx\n",
3656 proc->pid, thread->pid,
3659 binder_node_unlock(ref->node);
3660 binder_proc_unlock(proc);
3664 binder_inner_proc_lock(proc);
3665 if (list_empty(&death->work.entry)) {
3666 death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
3667 if (thread->looper &
3668 (BINDER_LOOPER_STATE_REGISTERED |
3669 BINDER_LOOPER_STATE_ENTERED))
3670 binder_enqueue_thread_work_ilocked(
3674 binder_enqueue_work_ilocked(
3677 binder_wakeup_proc_ilocked(
3681 BUG_ON(death->work.type != BINDER_WORK_DEAD_BINDER);
3682 death->work.type = BINDER_WORK_DEAD_BINDER_AND_CLEAR;
3684 binder_inner_proc_unlock(proc);
3686 binder_node_unlock(ref->node);
3687 binder_proc_unlock(proc);
3689 case BC_DEAD_BINDER_DONE: {
3690 struct binder_work *w;
3691 binder_uintptr_t cookie;
3692 struct binder_ref_death *death = NULL;
3694 if (get_user(cookie, (binder_uintptr_t __user *)ptr))
3697 ptr += sizeof(cookie);
3698 binder_inner_proc_lock(proc);
3699 list_for_each_entry(w, &proc->delivered_death,
3701 struct binder_ref_death *tmp_death =
3703 struct binder_ref_death,
3706 if (tmp_death->cookie == cookie) {
3711 binder_debug(BINDER_DEBUG_DEAD_BINDER,
3712 "%d:%d BC_DEAD_BINDER_DONE %016llx found %pK\n",
3713 proc->pid, thread->pid, (u64)cookie,
3715 if (death == NULL) {
3716 binder_user_error("%d:%d BC_DEAD_BINDER_DONE %016llx not found\n",
3717 proc->pid, thread->pid, (u64)cookie);
3718 binder_inner_proc_unlock(proc);
3721 binder_dequeue_work_ilocked(&death->work);
3722 if (death->work.type == BINDER_WORK_DEAD_BINDER_AND_CLEAR) {
3723 death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
3724 if (thread->looper &
3725 (BINDER_LOOPER_STATE_REGISTERED |
3726 BINDER_LOOPER_STATE_ENTERED))
3727 binder_enqueue_thread_work_ilocked(
3728 thread, &death->work);
3730 binder_enqueue_work_ilocked(
3733 binder_wakeup_proc_ilocked(proc);
3736 binder_inner_proc_unlock(proc);
3740 pr_err("%d:%d unknown command %d\n",
3741 proc->pid, thread->pid, cmd);
3744 *consumed = ptr - buffer;
3749 static void binder_stat_br(struct binder_proc *proc,
3750 struct binder_thread *thread, uint32_t cmd)
3752 trace_binder_return(cmd);
3753 if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.br)) {
3754 atomic_inc(&binder_stats.br[_IOC_NR(cmd)]);
3755 atomic_inc(&proc->stats.br[_IOC_NR(cmd)]);
3756 atomic_inc(&thread->stats.br[_IOC_NR(cmd)]);
3760 static int binder_put_node_cmd(struct binder_proc *proc,
3761 struct binder_thread *thread,
3763 binder_uintptr_t node_ptr,
3764 binder_uintptr_t node_cookie,
3766 uint32_t cmd, const char *cmd_name)
3768 void __user *ptr = *ptrp;
3770 if (put_user(cmd, (uint32_t __user *)ptr))
3772 ptr += sizeof(uint32_t);
3774 if (put_user(node_ptr, (binder_uintptr_t __user *)ptr))
3776 ptr += sizeof(binder_uintptr_t);
3778 if (put_user(node_cookie, (binder_uintptr_t __user *)ptr))
3780 ptr += sizeof(binder_uintptr_t);
3782 binder_stat_br(proc, thread, cmd);
3783 binder_debug(BINDER_DEBUG_USER_REFS, "%d:%d %s %d u%016llx c%016llx\n",
3784 proc->pid, thread->pid, cmd_name, node_debug_id,
3785 (u64)node_ptr, (u64)node_cookie);
3791 static int binder_wait_for_work(struct binder_thread *thread,
3795 struct binder_proc *proc = thread->proc;
3798 freezer_do_not_count();
3799 binder_inner_proc_lock(proc);
3801 prepare_to_wait(&thread->wait, &wait, TASK_INTERRUPTIBLE);
3802 if (binder_has_work_ilocked(thread, do_proc_work))
3805 list_add(&thread->waiting_thread_node,
3806 &proc->waiting_threads);
3807 binder_inner_proc_unlock(proc);
3809 binder_inner_proc_lock(proc);
3810 list_del_init(&thread->waiting_thread_node);
3811 if (signal_pending(current)) {
3816 finish_wait(&thread->wait, &wait);
3817 binder_inner_proc_unlock(proc);
3824 * binder_apply_fd_fixups() - finish fd translation
3825 * @proc: binder_proc associated @t->buffer
3826 * @t: binder transaction with list of fd fixups
3828 * Now that we are in the context of the transaction target
3829 * process, we can allocate and install fds. Process the
3830 * list of fds to translate and fixup the buffer with the
3833 * If we fail to allocate an fd, then free the resources by
3834 * fput'ing files that have not been processed and ksys_close'ing
3835 * any fds that have already been allocated.
3837 static int binder_apply_fd_fixups(struct binder_proc *proc,
3838 struct binder_transaction *t)
3840 struct binder_txn_fd_fixup *fixup, *tmp;
3843 list_for_each_entry(fixup, &t->fd_fixups, fixup_entry) {
3844 int fd = get_unused_fd_flags(O_CLOEXEC);
3847 binder_debug(BINDER_DEBUG_TRANSACTION,
3848 "failed fd fixup txn %d fd %d\n",
3853 binder_debug(BINDER_DEBUG_TRANSACTION,
3854 "fd fixup txn %d fd %d\n",
3856 trace_binder_transaction_fd_recv(t, fd, fixup->offset);
3857 fd_install(fd, fixup->file);
3859 if (binder_alloc_copy_to_buffer(&proc->alloc, t->buffer,
3866 list_for_each_entry_safe(fixup, tmp, &t->fd_fixups, fixup_entry) {
3873 err = binder_alloc_copy_from_buffer(&proc->alloc, &fd,
3879 binder_deferred_fd_close(fd);
3881 list_del(&fixup->fixup_entry);
3888 static int binder_thread_read(struct binder_proc *proc,
3889 struct binder_thread *thread,
3890 binder_uintptr_t binder_buffer, size_t size,
3891 binder_size_t *consumed, int non_block)
3893 void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
3894 void __user *ptr = buffer + *consumed;
3895 void __user *end = buffer + size;
3898 int wait_for_proc_work;
3900 if (*consumed == 0) {
3901 if (put_user(BR_NOOP, (uint32_t __user *)ptr))
3903 ptr += sizeof(uint32_t);
3907 binder_inner_proc_lock(proc);
3908 wait_for_proc_work = binder_available_for_proc_work_ilocked(thread);
3909 binder_inner_proc_unlock(proc);
3911 thread->looper |= BINDER_LOOPER_STATE_WAITING;
3913 trace_binder_wait_for_work(wait_for_proc_work,
3914 !!thread->transaction_stack,
3915 !binder_worklist_empty(proc, &thread->todo));
3916 if (wait_for_proc_work) {
3917 if (!(thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
3918 BINDER_LOOPER_STATE_ENTERED))) {
3919 binder_user_error("%d:%d ERROR: Thread waiting for process work before calling BC_REGISTER_LOOPER or BC_ENTER_LOOPER (state %x)\n",
3920 proc->pid, thread->pid, thread->looper);
3921 wait_event_interruptible(binder_user_error_wait,
3922 binder_stop_on_user_error < 2);
3924 binder_set_nice(proc->default_priority);
3928 if (!binder_has_work(thread, wait_for_proc_work))
3931 ret = binder_wait_for_work(thread, wait_for_proc_work);
3934 thread->looper &= ~BINDER_LOOPER_STATE_WAITING;
3941 struct binder_transaction_data_secctx tr;
3942 struct binder_transaction_data *trd = &tr.transaction_data;
3943 struct binder_work *w = NULL;
3944 struct list_head *list = NULL;
3945 struct binder_transaction *t = NULL;
3946 struct binder_thread *t_from;
3947 size_t trsize = sizeof(*trd);
3949 binder_inner_proc_lock(proc);
3950 if (!binder_worklist_empty_ilocked(&thread->todo))
3951 list = &thread->todo;
3952 else if (!binder_worklist_empty_ilocked(&proc->todo) &&
3956 binder_inner_proc_unlock(proc);
3959 if (ptr - buffer == 4 && !thread->looper_need_return)
3964 if (end - ptr < sizeof(tr) + 4) {
3965 binder_inner_proc_unlock(proc);
3968 w = binder_dequeue_work_head_ilocked(list);
3969 if (binder_worklist_empty_ilocked(&thread->todo))
3970 thread->process_todo = false;
3973 case BINDER_WORK_TRANSACTION: {
3974 binder_inner_proc_unlock(proc);
3975 t = container_of(w, struct binder_transaction, work);
3977 case BINDER_WORK_RETURN_ERROR: {
3978 struct binder_error *e = container_of(
3979 w, struct binder_error, work);
3981 WARN_ON(e->cmd == BR_OK);
3982 binder_inner_proc_unlock(proc);
3983 if (put_user(e->cmd, (uint32_t __user *)ptr))
3987 ptr += sizeof(uint32_t);
3989 binder_stat_br(proc, thread, cmd);
3991 case BINDER_WORK_TRANSACTION_COMPLETE:
3992 case BINDER_WORK_TRANSACTION_ONEWAY_SPAM_SUSPECT: {
3993 if (proc->oneway_spam_detection_enabled &&
3994 w->type == BINDER_WORK_TRANSACTION_ONEWAY_SPAM_SUSPECT)
3995 cmd = BR_ONEWAY_SPAM_SUSPECT;
3997 cmd = BR_TRANSACTION_COMPLETE;
3998 binder_inner_proc_unlock(proc);
4000 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
4001 if (put_user(cmd, (uint32_t __user *)ptr))
4003 ptr += sizeof(uint32_t);
4005 binder_stat_br(proc, thread, cmd);
4006 binder_debug(BINDER_DEBUG_TRANSACTION_COMPLETE,
4007 "%d:%d BR_TRANSACTION_COMPLETE\n",
4008 proc->pid, thread->pid);
4010 case BINDER_WORK_NODE: {
4011 struct binder_node *node = container_of(w, struct binder_node, work);
4013 binder_uintptr_t node_ptr = node->ptr;
4014 binder_uintptr_t node_cookie = node->cookie;
4015 int node_debug_id = node->debug_id;
4018 void __user *orig_ptr = ptr;
4020 BUG_ON(proc != node->proc);
4021 strong = node->internal_strong_refs ||
4022 node->local_strong_refs;
4023 weak = !hlist_empty(&node->refs) ||
4024 node->local_weak_refs ||
4025 node->tmp_refs || strong;
4026 has_strong_ref = node->has_strong_ref;
4027 has_weak_ref = node->has_weak_ref;
4029 if (weak && !has_weak_ref) {
4030 node->has_weak_ref = 1;
4031 node->pending_weak_ref = 1;
4032 node->local_weak_refs++;
4034 if (strong && !has_strong_ref) {
4035 node->has_strong_ref = 1;
4036 node->pending_strong_ref = 1;
4037 node->local_strong_refs++;
4039 if (!strong && has_strong_ref)
4040 node->has_strong_ref = 0;
4041 if (!weak && has_weak_ref)
4042 node->has_weak_ref = 0;
4043 if (!weak && !strong) {
4044 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
4045 "%d:%d node %d u%016llx c%016llx deleted\n",
4046 proc->pid, thread->pid,
4050 rb_erase(&node->rb_node, &proc->nodes);
4051 binder_inner_proc_unlock(proc);
4052 binder_node_lock(node);
4054 * Acquire the node lock before freeing the
4055 * node to serialize with other threads that
4056 * may have been holding the node lock while
4057 * decrementing this node (avoids race where
4058 * this thread frees while the other thread
4059 * is unlocking the node after the final
4062 binder_node_unlock(node);
4063 binder_free_node(node);
4065 binder_inner_proc_unlock(proc);
4067 if (weak && !has_weak_ref)
4068 ret = binder_put_node_cmd(
4069 proc, thread, &ptr, node_ptr,
4070 node_cookie, node_debug_id,
4071 BR_INCREFS, "BR_INCREFS");
4072 if (!ret && strong && !has_strong_ref)
4073 ret = binder_put_node_cmd(
4074 proc, thread, &ptr, node_ptr,
4075 node_cookie, node_debug_id,
4076 BR_ACQUIRE, "BR_ACQUIRE");
4077 if (!ret && !strong && has_strong_ref)
4078 ret = binder_put_node_cmd(
4079 proc, thread, &ptr, node_ptr,
4080 node_cookie, node_debug_id,
4081 BR_RELEASE, "BR_RELEASE");
4082 if (!ret && !weak && has_weak_ref)
4083 ret = binder_put_node_cmd(
4084 proc, thread, &ptr, node_ptr,
4085 node_cookie, node_debug_id,
4086 BR_DECREFS, "BR_DECREFS");
4087 if (orig_ptr == ptr)
4088 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
4089 "%d:%d node %d u%016llx c%016llx state unchanged\n",
4090 proc->pid, thread->pid,
4097 case BINDER_WORK_DEAD_BINDER:
4098 case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
4099 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
4100 struct binder_ref_death *death;
4102 binder_uintptr_t cookie;
4104 death = container_of(w, struct binder_ref_death, work);
4105 if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION)
4106 cmd = BR_CLEAR_DEATH_NOTIFICATION_DONE;
4108 cmd = BR_DEAD_BINDER;
4109 cookie = death->cookie;
4111 binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
4112 "%d:%d %s %016llx\n",
4113 proc->pid, thread->pid,
4114 cmd == BR_DEAD_BINDER ?
4116 "BR_CLEAR_DEATH_NOTIFICATION_DONE",
4118 if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION) {
4119 binder_inner_proc_unlock(proc);
4121 binder_stats_deleted(BINDER_STAT_DEATH);
4123 binder_enqueue_work_ilocked(
4124 w, &proc->delivered_death);
4125 binder_inner_proc_unlock(proc);
4127 if (put_user(cmd, (uint32_t __user *)ptr))
4129 ptr += sizeof(uint32_t);
4130 if (put_user(cookie,
4131 (binder_uintptr_t __user *)ptr))
4133 ptr += sizeof(binder_uintptr_t);
4134 binder_stat_br(proc, thread, cmd);
4135 if (cmd == BR_DEAD_BINDER)
4136 goto done; /* DEAD_BINDER notifications can cause transactions */
4139 binder_inner_proc_unlock(proc);
4140 pr_err("%d:%d: bad work type %d\n",
4141 proc->pid, thread->pid, w->type);
4148 BUG_ON(t->buffer == NULL);
4149 if (t->buffer->target_node) {
4150 struct binder_node *target_node = t->buffer->target_node;
4152 trd->target.ptr = target_node->ptr;
4153 trd->cookie = target_node->cookie;
4154 t->saved_priority = task_nice(current);
4155 if (t->priority < target_node->min_priority &&
4156 !(t->flags & TF_ONE_WAY))
4157 binder_set_nice(t->priority);
4158 else if (!(t->flags & TF_ONE_WAY) ||
4159 t->saved_priority > target_node->min_priority)
4160 binder_set_nice(target_node->min_priority);
4161 cmd = BR_TRANSACTION;
4163 trd->target.ptr = 0;
4167 trd->code = t->code;
4168 trd->flags = t->flags;
4169 trd->sender_euid = from_kuid(current_user_ns(), t->sender_euid);
4171 t_from = binder_get_txn_from(t);
4173 struct task_struct *sender = t_from->proc->tsk;
4176 task_tgid_nr_ns(sender,
4177 task_active_pid_ns(current));
4179 trd->sender_pid = 0;
4182 ret = binder_apply_fd_fixups(proc, t);
4184 struct binder_buffer *buffer = t->buffer;
4185 bool oneway = !!(t->flags & TF_ONE_WAY);
4186 int tid = t->debug_id;
4189 binder_thread_dec_tmpref(t_from);
4190 buffer->transaction = NULL;
4191 binder_cleanup_transaction(t, "fd fixups failed",
4193 binder_free_buf(proc, thread, buffer, true);
4194 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
4195 "%d:%d %stransaction %d fd fixups failed %d/%d, line %d\n",
4196 proc->pid, thread->pid,
4198 (cmd == BR_REPLY ? "reply " : ""),
4199 tid, BR_FAILED_REPLY, ret, __LINE__);
4200 if (cmd == BR_REPLY) {
4201 cmd = BR_FAILED_REPLY;
4202 if (put_user(cmd, (uint32_t __user *)ptr))
4204 ptr += sizeof(uint32_t);
4205 binder_stat_br(proc, thread, cmd);
4210 trd->data_size = t->buffer->data_size;
4211 trd->offsets_size = t->buffer->offsets_size;
4212 trd->data.ptr.buffer = (uintptr_t)t->buffer->user_data;
4213 trd->data.ptr.offsets = trd->data.ptr.buffer +
4214 ALIGN(t->buffer->data_size,
4217 tr.secctx = t->security_ctx;
4218 if (t->security_ctx) {
4219 cmd = BR_TRANSACTION_SEC_CTX;
4220 trsize = sizeof(tr);
4222 if (put_user(cmd, (uint32_t __user *)ptr)) {
4224 binder_thread_dec_tmpref(t_from);
4226 binder_cleanup_transaction(t, "put_user failed",
4231 ptr += sizeof(uint32_t);
4232 if (copy_to_user(ptr, &tr, trsize)) {
4234 binder_thread_dec_tmpref(t_from);
4236 binder_cleanup_transaction(t, "copy_to_user failed",
4243 trace_binder_transaction_received(t);
4244 binder_stat_br(proc, thread, cmd);
4245 binder_debug(BINDER_DEBUG_TRANSACTION,
4246 "%d:%d %s %d %d:%d, cmd %d size %zd-%zd ptr %016llx-%016llx\n",
4247 proc->pid, thread->pid,
4248 (cmd == BR_TRANSACTION) ? "BR_TRANSACTION" :
4249 (cmd == BR_TRANSACTION_SEC_CTX) ?
4250 "BR_TRANSACTION_SEC_CTX" : "BR_REPLY",
4251 t->debug_id, t_from ? t_from->proc->pid : 0,
4252 t_from ? t_from->pid : 0, cmd,
4253 t->buffer->data_size, t->buffer->offsets_size,
4254 (u64)trd->data.ptr.buffer,
4255 (u64)trd->data.ptr.offsets);
4258 binder_thread_dec_tmpref(t_from);
4259 t->buffer->allow_user_free = 1;
4260 if (cmd != BR_REPLY && !(t->flags & TF_ONE_WAY)) {
4261 binder_inner_proc_lock(thread->proc);
4262 t->to_parent = thread->transaction_stack;
4263 t->to_thread = thread;
4264 thread->transaction_stack = t;
4265 binder_inner_proc_unlock(thread->proc);
4267 binder_free_transaction(t);
4274 *consumed = ptr - buffer;
4275 binder_inner_proc_lock(proc);
4276 if (proc->requested_threads == 0 &&
4277 list_empty(&thread->proc->waiting_threads) &&
4278 proc->requested_threads_started < proc->max_threads &&
4279 (thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
4280 BINDER_LOOPER_STATE_ENTERED)) /* the user-space code fails to */
4281 /*spawn a new thread if we leave this out */) {
4282 proc->requested_threads++;
4283 binder_inner_proc_unlock(proc);
4284 binder_debug(BINDER_DEBUG_THREADS,
4285 "%d:%d BR_SPAWN_LOOPER\n",
4286 proc->pid, thread->pid);
4287 if (put_user(BR_SPAWN_LOOPER, (uint32_t __user *)buffer))
4289 binder_stat_br(proc, thread, BR_SPAWN_LOOPER);
4291 binder_inner_proc_unlock(proc);
4295 static void binder_release_work(struct binder_proc *proc,
4296 struct list_head *list)
4298 struct binder_work *w;
4299 enum binder_work_type wtype;
4302 binder_inner_proc_lock(proc);
4303 w = binder_dequeue_work_head_ilocked(list);
4304 wtype = w ? w->type : 0;
4305 binder_inner_proc_unlock(proc);
4310 case BINDER_WORK_TRANSACTION: {
4311 struct binder_transaction *t;
4313 t = container_of(w, struct binder_transaction, work);
4315 binder_cleanup_transaction(t, "process died.",
4318 case BINDER_WORK_RETURN_ERROR: {
4319 struct binder_error *e = container_of(
4320 w, struct binder_error, work);
4322 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
4323 "undelivered TRANSACTION_ERROR: %u\n",
4326 case BINDER_WORK_TRANSACTION_COMPLETE: {
4327 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
4328 "undelivered TRANSACTION_COMPLETE\n");
4330 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
4332 case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
4333 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
4334 struct binder_ref_death *death;
4336 death = container_of(w, struct binder_ref_death, work);
4337 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
4338 "undelivered death notification, %016llx\n",
4339 (u64)death->cookie);
4341 binder_stats_deleted(BINDER_STAT_DEATH);
4343 case BINDER_WORK_NODE:
4346 pr_err("unexpected work type, %d, not freed\n",
4354 static struct binder_thread *binder_get_thread_ilocked(
4355 struct binder_proc *proc, struct binder_thread *new_thread)
4357 struct binder_thread *thread = NULL;
4358 struct rb_node *parent = NULL;
4359 struct rb_node **p = &proc->threads.rb_node;
4363 thread = rb_entry(parent, struct binder_thread, rb_node);
4365 if (current->pid < thread->pid)
4367 else if (current->pid > thread->pid)
4368 p = &(*p)->rb_right;
4374 thread = new_thread;
4375 binder_stats_created(BINDER_STAT_THREAD);
4376 thread->proc = proc;
4377 thread->pid = current->pid;
4378 atomic_set(&thread->tmp_ref, 0);
4379 init_waitqueue_head(&thread->wait);
4380 INIT_LIST_HEAD(&thread->todo);
4381 rb_link_node(&thread->rb_node, parent, p);
4382 rb_insert_color(&thread->rb_node, &proc->threads);
4383 thread->looper_need_return = true;
4384 thread->return_error.work.type = BINDER_WORK_RETURN_ERROR;
4385 thread->return_error.cmd = BR_OK;
4386 thread->reply_error.work.type = BINDER_WORK_RETURN_ERROR;
4387 thread->reply_error.cmd = BR_OK;
4388 INIT_LIST_HEAD(&new_thread->waiting_thread_node);
4392 static struct binder_thread *binder_get_thread(struct binder_proc *proc)
4394 struct binder_thread *thread;
4395 struct binder_thread *new_thread;
4397 binder_inner_proc_lock(proc);
4398 thread = binder_get_thread_ilocked(proc, NULL);
4399 binder_inner_proc_unlock(proc);
4401 new_thread = kzalloc(sizeof(*thread), GFP_KERNEL);
4402 if (new_thread == NULL)
4404 binder_inner_proc_lock(proc);
4405 thread = binder_get_thread_ilocked(proc, new_thread);
4406 binder_inner_proc_unlock(proc);
4407 if (thread != new_thread)
4413 static void binder_free_proc(struct binder_proc *proc)
4415 struct binder_device *device;
4417 BUG_ON(!list_empty(&proc->todo));
4418 BUG_ON(!list_empty(&proc->delivered_death));
4419 if (proc->outstanding_txns)
4420 pr_warn("%s: Unexpected outstanding_txns %d\n",
4421 __func__, proc->outstanding_txns);
4422 device = container_of(proc->context, struct binder_device, context);
4423 if (refcount_dec_and_test(&device->ref)) {
4424 kfree(proc->context->name);
4427 binder_alloc_deferred_release(&proc->alloc);
4428 put_task_struct(proc->tsk);
4429 put_cred(proc->cred);
4430 binder_stats_deleted(BINDER_STAT_PROC);
4434 static void binder_free_thread(struct binder_thread *thread)
4436 BUG_ON(!list_empty(&thread->todo));
4437 binder_stats_deleted(BINDER_STAT_THREAD);
4438 binder_proc_dec_tmpref(thread->proc);
4442 static int binder_thread_release(struct binder_proc *proc,
4443 struct binder_thread *thread)
4445 struct binder_transaction *t;
4446 struct binder_transaction *send_reply = NULL;
4447 int active_transactions = 0;
4448 struct binder_transaction *last_t = NULL;
4450 binder_inner_proc_lock(thread->proc);
4452 * take a ref on the proc so it survives
4453 * after we remove this thread from proc->threads.
4454 * The corresponding dec is when we actually
4455 * free the thread in binder_free_thread()
4459 * take a ref on this thread to ensure it
4460 * survives while we are releasing it
4462 atomic_inc(&thread->tmp_ref);
4463 rb_erase(&thread->rb_node, &proc->threads);
4464 t = thread->transaction_stack;
4466 spin_lock(&t->lock);
4467 if (t->to_thread == thread)
4470 __acquire(&t->lock);
4472 thread->is_dead = true;
4476 active_transactions++;
4477 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
4478 "release %d:%d transaction %d %s, still active\n",
4479 proc->pid, thread->pid,
4481 (t->to_thread == thread) ? "in" : "out");
4483 if (t->to_thread == thread) {
4484 thread->proc->outstanding_txns--;
4486 t->to_thread = NULL;
4488 t->buffer->transaction = NULL;
4492 } else if (t->from == thread) {
4497 spin_unlock(&last_t->lock);
4499 spin_lock(&t->lock);
4501 __acquire(&t->lock);
4503 /* annotation for sparse, lock not acquired in last iteration above */
4504 __release(&t->lock);
4507 * If this thread used poll, make sure we remove the waitqueue from any
4508 * poll data structures holding it.
4510 if (thread->looper & BINDER_LOOPER_STATE_POLL)
4511 wake_up_pollfree(&thread->wait);
4513 binder_inner_proc_unlock(thread->proc);
4516 * This is needed to avoid races between wake_up_pollfree() above and
4517 * someone else removing the last entry from the queue for other reasons
4518 * (e.g. ep_remove_wait_queue() being called due to an epoll file
4519 * descriptor being closed). Such other users hold an RCU read lock, so
4520 * we can be sure they're done after we call synchronize_rcu().
4522 if (thread->looper & BINDER_LOOPER_STATE_POLL)
4526 binder_send_failed_reply(send_reply, BR_DEAD_REPLY);
4527 binder_release_work(proc, &thread->todo);
4528 binder_thread_dec_tmpref(thread);
4529 return active_transactions;
4532 static __poll_t binder_poll(struct file *filp,
4533 struct poll_table_struct *wait)
4535 struct binder_proc *proc = filp->private_data;
4536 struct binder_thread *thread = NULL;
4537 bool wait_for_proc_work;
4539 thread = binder_get_thread(proc);
4543 binder_inner_proc_lock(thread->proc);
4544 thread->looper |= BINDER_LOOPER_STATE_POLL;
4545 wait_for_proc_work = binder_available_for_proc_work_ilocked(thread);
4547 binder_inner_proc_unlock(thread->proc);
4549 poll_wait(filp, &thread->wait, wait);
4551 if (binder_has_work(thread, wait_for_proc_work))
4557 static int binder_ioctl_write_read(struct file *filp,
4558 unsigned int cmd, unsigned long arg,
4559 struct binder_thread *thread)
4562 struct binder_proc *proc = filp->private_data;
4563 unsigned int size = _IOC_SIZE(cmd);
4564 void __user *ubuf = (void __user *)arg;
4565 struct binder_write_read bwr;
4567 if (size != sizeof(struct binder_write_read)) {
4571 if (copy_from_user(&bwr, ubuf, sizeof(bwr))) {
4575 binder_debug(BINDER_DEBUG_READ_WRITE,
4576 "%d:%d write %lld at %016llx, read %lld at %016llx\n",
4577 proc->pid, thread->pid,
4578 (u64)bwr.write_size, (u64)bwr.write_buffer,
4579 (u64)bwr.read_size, (u64)bwr.read_buffer);
4581 if (bwr.write_size > 0) {
4582 ret = binder_thread_write(proc, thread,
4585 &bwr.write_consumed);
4586 trace_binder_write_done(ret);
4588 bwr.read_consumed = 0;
4589 if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
4594 if (bwr.read_size > 0) {
4595 ret = binder_thread_read(proc, thread, bwr.read_buffer,
4598 filp->f_flags & O_NONBLOCK);
4599 trace_binder_read_done(ret);
4600 binder_inner_proc_lock(proc);
4601 if (!binder_worklist_empty_ilocked(&proc->todo))
4602 binder_wakeup_proc_ilocked(proc);
4603 binder_inner_proc_unlock(proc);
4605 if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
4610 binder_debug(BINDER_DEBUG_READ_WRITE,
4611 "%d:%d wrote %lld of %lld, read return %lld of %lld\n",
4612 proc->pid, thread->pid,
4613 (u64)bwr.write_consumed, (u64)bwr.write_size,
4614 (u64)bwr.read_consumed, (u64)bwr.read_size);
4615 if (copy_to_user(ubuf, &bwr, sizeof(bwr))) {
4623 static int binder_ioctl_set_ctx_mgr(struct file *filp,
4624 struct flat_binder_object *fbo)
4627 struct binder_proc *proc = filp->private_data;
4628 struct binder_context *context = proc->context;
4629 struct binder_node *new_node;
4630 kuid_t curr_euid = current_euid();
4632 mutex_lock(&context->context_mgr_node_lock);
4633 if (context->binder_context_mgr_node) {
4634 pr_err("BINDER_SET_CONTEXT_MGR already set\n");
4638 ret = security_binder_set_context_mgr(proc->cred);
4641 if (uid_valid(context->binder_context_mgr_uid)) {
4642 if (!uid_eq(context->binder_context_mgr_uid, curr_euid)) {
4643 pr_err("BINDER_SET_CONTEXT_MGR bad uid %d != %d\n",
4644 from_kuid(&init_user_ns, curr_euid),
4645 from_kuid(&init_user_ns,
4646 context->binder_context_mgr_uid));
4651 context->binder_context_mgr_uid = curr_euid;
4653 new_node = binder_new_node(proc, fbo);
4658 binder_node_lock(new_node);
4659 new_node->local_weak_refs++;
4660 new_node->local_strong_refs++;
4661 new_node->has_strong_ref = 1;
4662 new_node->has_weak_ref = 1;
4663 context->binder_context_mgr_node = new_node;
4664 binder_node_unlock(new_node);
4665 binder_put_node(new_node);
4667 mutex_unlock(&context->context_mgr_node_lock);
4671 static int binder_ioctl_get_node_info_for_ref(struct binder_proc *proc,
4672 struct binder_node_info_for_ref *info)
4674 struct binder_node *node;
4675 struct binder_context *context = proc->context;
4676 __u32 handle = info->handle;
4678 if (info->strong_count || info->weak_count || info->reserved1 ||
4679 info->reserved2 || info->reserved3) {
4680 binder_user_error("%d BINDER_GET_NODE_INFO_FOR_REF: only handle may be non-zero.",
4685 /* This ioctl may only be used by the context manager */
4686 mutex_lock(&context->context_mgr_node_lock);
4687 if (!context->binder_context_mgr_node ||
4688 context->binder_context_mgr_node->proc != proc) {
4689 mutex_unlock(&context->context_mgr_node_lock);
4692 mutex_unlock(&context->context_mgr_node_lock);
4694 node = binder_get_node_from_ref(proc, handle, true, NULL);
4698 info->strong_count = node->local_strong_refs +
4699 node->internal_strong_refs;
4700 info->weak_count = node->local_weak_refs;
4702 binder_put_node(node);
4707 static int binder_ioctl_get_node_debug_info(struct binder_proc *proc,
4708 struct binder_node_debug_info *info)
4711 binder_uintptr_t ptr = info->ptr;
4713 memset(info, 0, sizeof(*info));
4715 binder_inner_proc_lock(proc);
4716 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) {
4717 struct binder_node *node = rb_entry(n, struct binder_node,
4719 if (node->ptr > ptr) {
4720 info->ptr = node->ptr;
4721 info->cookie = node->cookie;
4722 info->has_strong_ref = node->has_strong_ref;
4723 info->has_weak_ref = node->has_weak_ref;
4727 binder_inner_proc_unlock(proc);
4732 static bool binder_txns_pending_ilocked(struct binder_proc *proc)
4735 struct binder_thread *thread;
4737 if (proc->outstanding_txns > 0)
4740 for (n = rb_first(&proc->threads); n; n = rb_next(n)) {
4741 thread = rb_entry(n, struct binder_thread, rb_node);
4742 if (thread->transaction_stack)
4748 static int binder_ioctl_freeze(struct binder_freeze_info *info,
4749 struct binder_proc *target_proc)
4753 if (!info->enable) {
4754 binder_inner_proc_lock(target_proc);
4755 target_proc->sync_recv = false;
4756 target_proc->async_recv = false;
4757 target_proc->is_frozen = false;
4758 binder_inner_proc_unlock(target_proc);
4763 * Freezing the target. Prevent new transactions by
4764 * setting frozen state. If timeout specified, wait
4765 * for transactions to drain.
4767 binder_inner_proc_lock(target_proc);
4768 target_proc->sync_recv = false;
4769 target_proc->async_recv = false;
4770 target_proc->is_frozen = true;
4771 binder_inner_proc_unlock(target_proc);
4773 if (info->timeout_ms > 0)
4774 ret = wait_event_interruptible_timeout(
4775 target_proc->freeze_wait,
4776 (!target_proc->outstanding_txns),
4777 msecs_to_jiffies(info->timeout_ms));
4779 /* Check pending transactions that wait for reply */
4781 binder_inner_proc_lock(target_proc);
4782 if (binder_txns_pending_ilocked(target_proc))
4784 binder_inner_proc_unlock(target_proc);
4788 binder_inner_proc_lock(target_proc);
4789 target_proc->is_frozen = false;
4790 binder_inner_proc_unlock(target_proc);
4796 static int binder_ioctl_get_freezer_info(
4797 struct binder_frozen_status_info *info)
4799 struct binder_proc *target_proc;
4803 info->sync_recv = 0;
4804 info->async_recv = 0;
4806 mutex_lock(&binder_procs_lock);
4807 hlist_for_each_entry(target_proc, &binder_procs, proc_node) {
4808 if (target_proc->pid == info->pid) {
4810 binder_inner_proc_lock(target_proc);
4811 txns_pending = binder_txns_pending_ilocked(target_proc);
4812 info->sync_recv |= target_proc->sync_recv |
4813 (txns_pending << 1);
4814 info->async_recv |= target_proc->async_recv;
4815 binder_inner_proc_unlock(target_proc);
4818 mutex_unlock(&binder_procs_lock);
4826 static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
4829 struct binder_proc *proc = filp->private_data;
4830 struct binder_thread *thread;
4831 unsigned int size = _IOC_SIZE(cmd);
4832 void __user *ubuf = (void __user *)arg;
4834 /*pr_info("binder_ioctl: %d:%d %x %lx\n",
4835 proc->pid, current->pid, cmd, arg);*/
4837 binder_selftest_alloc(&proc->alloc);
4839 trace_binder_ioctl(cmd, arg);
4841 ret = wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
4845 thread = binder_get_thread(proc);
4846 if (thread == NULL) {
4852 case BINDER_WRITE_READ:
4853 ret = binder_ioctl_write_read(filp, cmd, arg, thread);
4857 case BINDER_SET_MAX_THREADS: {
4860 if (copy_from_user(&max_threads, ubuf,
4861 sizeof(max_threads))) {
4865 binder_inner_proc_lock(proc);
4866 proc->max_threads = max_threads;
4867 binder_inner_proc_unlock(proc);
4870 case BINDER_SET_CONTEXT_MGR_EXT: {
4871 struct flat_binder_object fbo;
4873 if (copy_from_user(&fbo, ubuf, sizeof(fbo))) {
4877 ret = binder_ioctl_set_ctx_mgr(filp, &fbo);
4882 case BINDER_SET_CONTEXT_MGR:
4883 ret = binder_ioctl_set_ctx_mgr(filp, NULL);
4887 case BINDER_THREAD_EXIT:
4888 binder_debug(BINDER_DEBUG_THREADS, "%d:%d exit\n",
4889 proc->pid, thread->pid);
4890 binder_thread_release(proc, thread);
4893 case BINDER_VERSION: {
4894 struct binder_version __user *ver = ubuf;
4896 if (size != sizeof(struct binder_version)) {
4900 if (put_user(BINDER_CURRENT_PROTOCOL_VERSION,
4901 &ver->protocol_version)) {
4907 case BINDER_GET_NODE_INFO_FOR_REF: {
4908 struct binder_node_info_for_ref info;
4910 if (copy_from_user(&info, ubuf, sizeof(info))) {
4915 ret = binder_ioctl_get_node_info_for_ref(proc, &info);
4919 if (copy_to_user(ubuf, &info, sizeof(info))) {
4926 case BINDER_GET_NODE_DEBUG_INFO: {
4927 struct binder_node_debug_info info;
4929 if (copy_from_user(&info, ubuf, sizeof(info))) {
4934 ret = binder_ioctl_get_node_debug_info(proc, &info);
4938 if (copy_to_user(ubuf, &info, sizeof(info))) {
4944 case BINDER_FREEZE: {
4945 struct binder_freeze_info info;
4946 struct binder_proc **target_procs = NULL, *target_proc;
4947 int target_procs_count = 0, i = 0;
4951 if (copy_from_user(&info, ubuf, sizeof(info))) {
4956 mutex_lock(&binder_procs_lock);
4957 hlist_for_each_entry(target_proc, &binder_procs, proc_node) {
4958 if (target_proc->pid == info.pid)
4959 target_procs_count++;
4962 if (target_procs_count == 0) {
4963 mutex_unlock(&binder_procs_lock);
4968 target_procs = kcalloc(target_procs_count,
4969 sizeof(struct binder_proc *),
4972 if (!target_procs) {
4973 mutex_unlock(&binder_procs_lock);
4978 hlist_for_each_entry(target_proc, &binder_procs, proc_node) {
4979 if (target_proc->pid != info.pid)
4982 binder_inner_proc_lock(target_proc);
4983 target_proc->tmp_ref++;
4984 binder_inner_proc_unlock(target_proc);
4986 target_procs[i++] = target_proc;
4988 mutex_unlock(&binder_procs_lock);
4990 for (i = 0; i < target_procs_count; i++) {
4992 ret = binder_ioctl_freeze(&info,
4995 binder_proc_dec_tmpref(target_procs[i]);
4998 kfree(target_procs);
5004 case BINDER_GET_FROZEN_INFO: {
5005 struct binder_frozen_status_info info;
5007 if (copy_from_user(&info, ubuf, sizeof(info))) {
5012 ret = binder_ioctl_get_freezer_info(&info);
5016 if (copy_to_user(ubuf, &info, sizeof(info))) {
5022 case BINDER_ENABLE_ONEWAY_SPAM_DETECTION: {
5025 if (copy_from_user(&enable, ubuf, sizeof(enable))) {
5029 binder_inner_proc_lock(proc);
5030 proc->oneway_spam_detection_enabled = (bool)enable;
5031 binder_inner_proc_unlock(proc);
5041 thread->looper_need_return = false;
5042 wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
5043 if (ret && ret != -EINTR)
5044 pr_info("%d:%d ioctl %x %lx returned %d\n", proc->pid, current->pid, cmd, arg, ret);
5046 trace_binder_ioctl_done(ret);
5050 static void binder_vma_open(struct vm_area_struct *vma)
5052 struct binder_proc *proc = vma->vm_private_data;
5054 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
5055 "%d open vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
5056 proc->pid, vma->vm_start, vma->vm_end,
5057 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
5058 (unsigned long)pgprot_val(vma->vm_page_prot));
5061 static void binder_vma_close(struct vm_area_struct *vma)
5063 struct binder_proc *proc = vma->vm_private_data;
5065 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
5066 "%d close vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
5067 proc->pid, vma->vm_start, vma->vm_end,
5068 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
5069 (unsigned long)pgprot_val(vma->vm_page_prot));
5070 binder_alloc_vma_close(&proc->alloc);
5073 static vm_fault_t binder_vm_fault(struct vm_fault *vmf)
5075 return VM_FAULT_SIGBUS;
5078 static const struct vm_operations_struct binder_vm_ops = {
5079 .open = binder_vma_open,
5080 .close = binder_vma_close,
5081 .fault = binder_vm_fault,
5084 static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
5086 struct binder_proc *proc = filp->private_data;
5088 if (proc->tsk != current->group_leader)
5091 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
5092 "%s: %d %lx-%lx (%ld K) vma %lx pagep %lx\n",
5093 __func__, proc->pid, vma->vm_start, vma->vm_end,
5094 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
5095 (unsigned long)pgprot_val(vma->vm_page_prot));
5097 if (vma->vm_flags & FORBIDDEN_MMAP_FLAGS) {
5098 pr_err("%s: %d %lx-%lx %s failed %d\n", __func__,
5099 proc->pid, vma->vm_start, vma->vm_end, "bad vm_flags", -EPERM);
5102 vma->vm_flags |= VM_DONTCOPY | VM_MIXEDMAP;
5103 vma->vm_flags &= ~VM_MAYWRITE;
5105 vma->vm_ops = &binder_vm_ops;
5106 vma->vm_private_data = proc;
5108 return binder_alloc_mmap_handler(&proc->alloc, vma);
5111 static int binder_open(struct inode *nodp, struct file *filp)
5113 struct binder_proc *proc, *itr;
5114 struct binder_device *binder_dev;
5115 struct binderfs_info *info;
5116 struct dentry *binder_binderfs_dir_entry_proc = NULL;
5117 bool existing_pid = false;
5119 binder_debug(BINDER_DEBUG_OPEN_CLOSE, "%s: %d:%d\n", __func__,
5120 current->group_leader->pid, current->pid);
5122 proc = kzalloc(sizeof(*proc), GFP_KERNEL);
5125 spin_lock_init(&proc->inner_lock);
5126 spin_lock_init(&proc->outer_lock);
5127 get_task_struct(current->group_leader);
5128 proc->tsk = current->group_leader;
5129 proc->cred = get_cred(filp->f_cred);
5130 INIT_LIST_HEAD(&proc->todo);
5131 init_waitqueue_head(&proc->freeze_wait);
5132 proc->default_priority = task_nice(current);
5133 /* binderfs stashes devices in i_private */
5134 if (is_binderfs_device(nodp)) {
5135 binder_dev = nodp->i_private;
5136 info = nodp->i_sb->s_fs_info;
5137 binder_binderfs_dir_entry_proc = info->proc_log_dir;
5139 binder_dev = container_of(filp->private_data,
5140 struct binder_device, miscdev);
5142 refcount_inc(&binder_dev->ref);
5143 proc->context = &binder_dev->context;
5144 binder_alloc_init(&proc->alloc);
5146 binder_stats_created(BINDER_STAT_PROC);
5147 proc->pid = current->group_leader->pid;
5148 INIT_LIST_HEAD(&proc->delivered_death);
5149 INIT_LIST_HEAD(&proc->waiting_threads);
5150 filp->private_data = proc;
5152 mutex_lock(&binder_procs_lock);
5153 hlist_for_each_entry(itr, &binder_procs, proc_node) {
5154 if (itr->pid == proc->pid) {
5155 existing_pid = true;
5159 hlist_add_head(&proc->proc_node, &binder_procs);
5160 mutex_unlock(&binder_procs_lock);
5162 if (binder_debugfs_dir_entry_proc && !existing_pid) {
5165 snprintf(strbuf, sizeof(strbuf), "%u", proc->pid);
5167 * proc debug entries are shared between contexts.
5168 * Only create for the first PID to avoid debugfs log spamming
5169 * The printing code will anyway print all contexts for a given
5170 * PID so this is not a problem.
5172 proc->debugfs_entry = debugfs_create_file(strbuf, 0444,
5173 binder_debugfs_dir_entry_proc,
5174 (void *)(unsigned long)proc->pid,
5178 if (binder_binderfs_dir_entry_proc && !existing_pid) {
5180 struct dentry *binderfs_entry;
5182 snprintf(strbuf, sizeof(strbuf), "%u", proc->pid);
5184 * Similar to debugfs, the process specific log file is shared
5185 * between contexts. Only create for the first PID.
5186 * This is ok since same as debugfs, the log file will contain
5187 * information on all contexts of a given PID.
5189 binderfs_entry = binderfs_create_file(binder_binderfs_dir_entry_proc,
5190 strbuf, &proc_fops, (void *)(unsigned long)proc->pid);
5191 if (!IS_ERR(binderfs_entry)) {
5192 proc->binderfs_entry = binderfs_entry;
5196 error = PTR_ERR(binderfs_entry);
5197 pr_warn("Unable to create file %s in binderfs (error %d)\n",
5205 static int binder_flush(struct file *filp, fl_owner_t id)
5207 struct binder_proc *proc = filp->private_data;
5209 binder_defer_work(proc, BINDER_DEFERRED_FLUSH);
5214 static void binder_deferred_flush(struct binder_proc *proc)
5219 binder_inner_proc_lock(proc);
5220 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) {
5221 struct binder_thread *thread = rb_entry(n, struct binder_thread, rb_node);
5223 thread->looper_need_return = true;
5224 if (thread->looper & BINDER_LOOPER_STATE_WAITING) {
5225 wake_up_interruptible(&thread->wait);
5229 binder_inner_proc_unlock(proc);
5231 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
5232 "binder_flush: %d woke %d threads\n", proc->pid,
5236 static int binder_release(struct inode *nodp, struct file *filp)
5238 struct binder_proc *proc = filp->private_data;
5240 debugfs_remove(proc->debugfs_entry);
5242 if (proc->binderfs_entry) {
5243 binderfs_remove_file(proc->binderfs_entry);
5244 proc->binderfs_entry = NULL;
5247 binder_defer_work(proc, BINDER_DEFERRED_RELEASE);
5252 static int binder_node_release(struct binder_node *node, int refs)
5254 struct binder_ref *ref;
5256 struct binder_proc *proc = node->proc;
5258 binder_release_work(proc, &node->async_todo);
5260 binder_node_lock(node);
5261 binder_inner_proc_lock(proc);
5262 binder_dequeue_work_ilocked(&node->work);
5264 * The caller must have taken a temporary ref on the node,
5266 BUG_ON(!node->tmp_refs);
5267 if (hlist_empty(&node->refs) && node->tmp_refs == 1) {
5268 binder_inner_proc_unlock(proc);
5269 binder_node_unlock(node);
5270 binder_free_node(node);
5276 node->local_strong_refs = 0;
5277 node->local_weak_refs = 0;
5278 binder_inner_proc_unlock(proc);
5280 spin_lock(&binder_dead_nodes_lock);
5281 hlist_add_head(&node->dead_node, &binder_dead_nodes);
5282 spin_unlock(&binder_dead_nodes_lock);
5284 hlist_for_each_entry(ref, &node->refs, node_entry) {
5287 * Need the node lock to synchronize
5288 * with new notification requests and the
5289 * inner lock to synchronize with queued
5290 * death notifications.
5292 binder_inner_proc_lock(ref->proc);
5294 binder_inner_proc_unlock(ref->proc);
5300 BUG_ON(!list_empty(&ref->death->work.entry));
5301 ref->death->work.type = BINDER_WORK_DEAD_BINDER;
5302 binder_enqueue_work_ilocked(&ref->death->work,
5304 binder_wakeup_proc_ilocked(ref->proc);
5305 binder_inner_proc_unlock(ref->proc);
5308 binder_debug(BINDER_DEBUG_DEAD_BINDER,
5309 "node %d now dead, refs %d, death %d\n",
5310 node->debug_id, refs, death);
5311 binder_node_unlock(node);
5312 binder_put_node(node);
5317 static void binder_deferred_release(struct binder_proc *proc)
5319 struct binder_context *context = proc->context;
5321 int threads, nodes, incoming_refs, outgoing_refs, active_transactions;
5323 mutex_lock(&binder_procs_lock);
5324 hlist_del(&proc->proc_node);
5325 mutex_unlock(&binder_procs_lock);
5327 mutex_lock(&context->context_mgr_node_lock);
5328 if (context->binder_context_mgr_node &&
5329 context->binder_context_mgr_node->proc == proc) {
5330 binder_debug(BINDER_DEBUG_DEAD_BINDER,
5331 "%s: %d context_mgr_node gone\n",
5332 __func__, proc->pid);
5333 context->binder_context_mgr_node = NULL;
5335 mutex_unlock(&context->context_mgr_node_lock);
5336 binder_inner_proc_lock(proc);
5338 * Make sure proc stays alive after we
5339 * remove all the threads
5343 proc->is_dead = true;
5344 proc->is_frozen = false;
5345 proc->sync_recv = false;
5346 proc->async_recv = false;
5348 active_transactions = 0;
5349 while ((n = rb_first(&proc->threads))) {
5350 struct binder_thread *thread;
5352 thread = rb_entry(n, struct binder_thread, rb_node);
5353 binder_inner_proc_unlock(proc);
5355 active_transactions += binder_thread_release(proc, thread);
5356 binder_inner_proc_lock(proc);
5361 while ((n = rb_first(&proc->nodes))) {
5362 struct binder_node *node;
5364 node = rb_entry(n, struct binder_node, rb_node);
5367 * take a temporary ref on the node before
5368 * calling binder_node_release() which will either
5369 * kfree() the node or call binder_put_node()
5371 binder_inc_node_tmpref_ilocked(node);
5372 rb_erase(&node->rb_node, &proc->nodes);
5373 binder_inner_proc_unlock(proc);
5374 incoming_refs = binder_node_release(node, incoming_refs);
5375 binder_inner_proc_lock(proc);
5377 binder_inner_proc_unlock(proc);
5380 binder_proc_lock(proc);
5381 while ((n = rb_first(&proc->refs_by_desc))) {
5382 struct binder_ref *ref;
5384 ref = rb_entry(n, struct binder_ref, rb_node_desc);
5386 binder_cleanup_ref_olocked(ref);
5387 binder_proc_unlock(proc);
5388 binder_free_ref(ref);
5389 binder_proc_lock(proc);
5391 binder_proc_unlock(proc);
5393 binder_release_work(proc, &proc->todo);
5394 binder_release_work(proc, &proc->delivered_death);
5396 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
5397 "%s: %d threads %d, nodes %d (ref %d), refs %d, active transactions %d\n",
5398 __func__, proc->pid, threads, nodes, incoming_refs,
5399 outgoing_refs, active_transactions);
5401 binder_proc_dec_tmpref(proc);
5404 static void binder_deferred_func(struct work_struct *work)
5406 struct binder_proc *proc;
5411 mutex_lock(&binder_deferred_lock);
5412 if (!hlist_empty(&binder_deferred_list)) {
5413 proc = hlist_entry(binder_deferred_list.first,
5414 struct binder_proc, deferred_work_node);
5415 hlist_del_init(&proc->deferred_work_node);
5416 defer = proc->deferred_work;
5417 proc->deferred_work = 0;
5422 mutex_unlock(&binder_deferred_lock);
5424 if (defer & BINDER_DEFERRED_FLUSH)
5425 binder_deferred_flush(proc);
5427 if (defer & BINDER_DEFERRED_RELEASE)
5428 binder_deferred_release(proc); /* frees proc */
5431 static DECLARE_WORK(binder_deferred_work, binder_deferred_func);
5434 binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer)
5436 mutex_lock(&binder_deferred_lock);
5437 proc->deferred_work |= defer;
5438 if (hlist_unhashed(&proc->deferred_work_node)) {
5439 hlist_add_head(&proc->deferred_work_node,
5440 &binder_deferred_list);
5441 schedule_work(&binder_deferred_work);
5443 mutex_unlock(&binder_deferred_lock);
5446 static void print_binder_transaction_ilocked(struct seq_file *m,
5447 struct binder_proc *proc,
5449 struct binder_transaction *t)
5451 struct binder_proc *to_proc;
5452 struct binder_buffer *buffer = t->buffer;
5454 spin_lock(&t->lock);
5455 to_proc = t->to_proc;
5457 "%s %d: %pK from %d:%d to %d:%d code %x flags %x pri %ld r%d",
5458 prefix, t->debug_id, t,
5459 t->from ? t->from->proc->pid : 0,
5460 t->from ? t->from->pid : 0,
5461 to_proc ? to_proc->pid : 0,
5462 t->to_thread ? t->to_thread->pid : 0,
5463 t->code, t->flags, t->priority, t->need_reply);
5464 spin_unlock(&t->lock);
5466 if (proc != to_proc) {
5468 * Can only safely deref buffer if we are holding the
5469 * correct proc inner lock for this node
5475 if (buffer == NULL) {
5476 seq_puts(m, " buffer free\n");
5479 if (buffer->target_node)
5480 seq_printf(m, " node %d", buffer->target_node->debug_id);
5481 seq_printf(m, " size %zd:%zd data %pK\n",
5482 buffer->data_size, buffer->offsets_size,
5486 static void print_binder_work_ilocked(struct seq_file *m,
5487 struct binder_proc *proc,
5489 const char *transaction_prefix,
5490 struct binder_work *w)
5492 struct binder_node *node;
5493 struct binder_transaction *t;
5496 case BINDER_WORK_TRANSACTION:
5497 t = container_of(w, struct binder_transaction, work);
5498 print_binder_transaction_ilocked(
5499 m, proc, transaction_prefix, t);
5501 case BINDER_WORK_RETURN_ERROR: {
5502 struct binder_error *e = container_of(
5503 w, struct binder_error, work);
5505 seq_printf(m, "%stransaction error: %u\n",
5508 case BINDER_WORK_TRANSACTION_COMPLETE:
5509 seq_printf(m, "%stransaction complete\n", prefix);
5511 case BINDER_WORK_NODE:
5512 node = container_of(w, struct binder_node, work);
5513 seq_printf(m, "%snode work %d: u%016llx c%016llx\n",
5514 prefix, node->debug_id,
5515 (u64)node->ptr, (u64)node->cookie);
5517 case BINDER_WORK_DEAD_BINDER:
5518 seq_printf(m, "%shas dead binder\n", prefix);
5520 case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
5521 seq_printf(m, "%shas cleared dead binder\n", prefix);
5523 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION:
5524 seq_printf(m, "%shas cleared death notification\n", prefix);
5527 seq_printf(m, "%sunknown work: type %d\n", prefix, w->type);
5532 static void print_binder_thread_ilocked(struct seq_file *m,
5533 struct binder_thread *thread,
5536 struct binder_transaction *t;
5537 struct binder_work *w;
5538 size_t start_pos = m->count;
5541 seq_printf(m, " thread %d: l %02x need_return %d tr %d\n",
5542 thread->pid, thread->looper,
5543 thread->looper_need_return,
5544 atomic_read(&thread->tmp_ref));
5545 header_pos = m->count;
5546 t = thread->transaction_stack;
5548 if (t->from == thread) {
5549 print_binder_transaction_ilocked(m, thread->proc,
5550 " outgoing transaction", t);
5552 } else if (t->to_thread == thread) {
5553 print_binder_transaction_ilocked(m, thread->proc,
5554 " incoming transaction", t);
5557 print_binder_transaction_ilocked(m, thread->proc,
5558 " bad transaction", t);
5562 list_for_each_entry(w, &thread->todo, entry) {
5563 print_binder_work_ilocked(m, thread->proc, " ",
5564 " pending transaction", w);
5566 if (!print_always && m->count == header_pos)
5567 m->count = start_pos;
5570 static void print_binder_node_nilocked(struct seq_file *m,
5571 struct binder_node *node)
5573 struct binder_ref *ref;
5574 struct binder_work *w;
5578 hlist_for_each_entry(ref, &node->refs, node_entry)
5581 seq_printf(m, " node %d: u%016llx c%016llx hs %d hw %d ls %d lw %d is %d iw %d tr %d",
5582 node->debug_id, (u64)node->ptr, (u64)node->cookie,
5583 node->has_strong_ref, node->has_weak_ref,
5584 node->local_strong_refs, node->local_weak_refs,
5585 node->internal_strong_refs, count, node->tmp_refs);
5587 seq_puts(m, " proc");
5588 hlist_for_each_entry(ref, &node->refs, node_entry)
5589 seq_printf(m, " %d", ref->proc->pid);
5593 list_for_each_entry(w, &node->async_todo, entry)
5594 print_binder_work_ilocked(m, node->proc, " ",
5595 " pending async transaction", w);
5599 static void print_binder_ref_olocked(struct seq_file *m,
5600 struct binder_ref *ref)
5602 binder_node_lock(ref->node);
5603 seq_printf(m, " ref %d: desc %d %snode %d s %d w %d d %pK\n",
5604 ref->data.debug_id, ref->data.desc,
5605 ref->node->proc ? "" : "dead ",
5606 ref->node->debug_id, ref->data.strong,
5607 ref->data.weak, ref->death);
5608 binder_node_unlock(ref->node);
5611 static void print_binder_proc(struct seq_file *m,
5612 struct binder_proc *proc, int print_all)
5614 struct binder_work *w;
5616 size_t start_pos = m->count;
5618 struct binder_node *last_node = NULL;
5620 seq_printf(m, "proc %d\n", proc->pid);
5621 seq_printf(m, "context %s\n", proc->context->name);
5622 header_pos = m->count;
5624 binder_inner_proc_lock(proc);
5625 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
5626 print_binder_thread_ilocked(m, rb_entry(n, struct binder_thread,
5627 rb_node), print_all);
5629 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) {
5630 struct binder_node *node = rb_entry(n, struct binder_node,
5632 if (!print_all && !node->has_async_transaction)
5636 * take a temporary reference on the node so it
5637 * survives and isn't removed from the tree
5638 * while we print it.
5640 binder_inc_node_tmpref_ilocked(node);
5641 /* Need to drop inner lock to take node lock */
5642 binder_inner_proc_unlock(proc);
5644 binder_put_node(last_node);
5645 binder_node_inner_lock(node);
5646 print_binder_node_nilocked(m, node);
5647 binder_node_inner_unlock(node);
5649 binder_inner_proc_lock(proc);
5651 binder_inner_proc_unlock(proc);
5653 binder_put_node(last_node);
5656 binder_proc_lock(proc);
5657 for (n = rb_first(&proc->refs_by_desc);
5660 print_binder_ref_olocked(m, rb_entry(n,
5663 binder_proc_unlock(proc);
5665 binder_alloc_print_allocated(m, &proc->alloc);
5666 binder_inner_proc_lock(proc);
5667 list_for_each_entry(w, &proc->todo, entry)
5668 print_binder_work_ilocked(m, proc, " ",
5669 " pending transaction", w);
5670 list_for_each_entry(w, &proc->delivered_death, entry) {
5671 seq_puts(m, " has delivered dead binder\n");
5674 binder_inner_proc_unlock(proc);
5675 if (!print_all && m->count == header_pos)
5676 m->count = start_pos;
5679 static const char * const binder_return_strings[] = {
5684 "BR_ACQUIRE_RESULT",
5686 "BR_TRANSACTION_COMPLETE",
5691 "BR_ATTEMPT_ACQUIRE",
5696 "BR_CLEAR_DEATH_NOTIFICATION_DONE",
5699 "BR_ONEWAY_SPAM_SUSPECT",
5702 static const char * const binder_command_strings[] = {
5705 "BC_ACQUIRE_RESULT",
5713 "BC_ATTEMPT_ACQUIRE",
5714 "BC_REGISTER_LOOPER",
5717 "BC_REQUEST_DEATH_NOTIFICATION",
5718 "BC_CLEAR_DEATH_NOTIFICATION",
5719 "BC_DEAD_BINDER_DONE",
5720 "BC_TRANSACTION_SG",
5724 static const char * const binder_objstat_strings[] = {
5731 "transaction_complete"
5734 static void print_binder_stats(struct seq_file *m, const char *prefix,
5735 struct binder_stats *stats)
5739 BUILD_BUG_ON(ARRAY_SIZE(stats->bc) !=
5740 ARRAY_SIZE(binder_command_strings));
5741 for (i = 0; i < ARRAY_SIZE(stats->bc); i++) {
5742 int temp = atomic_read(&stats->bc[i]);
5745 seq_printf(m, "%s%s: %d\n", prefix,
5746 binder_command_strings[i], temp);
5749 BUILD_BUG_ON(ARRAY_SIZE(stats->br) !=
5750 ARRAY_SIZE(binder_return_strings));
5751 for (i = 0; i < ARRAY_SIZE(stats->br); i++) {
5752 int temp = atomic_read(&stats->br[i]);
5755 seq_printf(m, "%s%s: %d\n", prefix,
5756 binder_return_strings[i], temp);
5759 BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
5760 ARRAY_SIZE(binder_objstat_strings));
5761 BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
5762 ARRAY_SIZE(stats->obj_deleted));
5763 for (i = 0; i < ARRAY_SIZE(stats->obj_created); i++) {
5764 int created = atomic_read(&stats->obj_created[i]);
5765 int deleted = atomic_read(&stats->obj_deleted[i]);
5767 if (created || deleted)
5768 seq_printf(m, "%s%s: active %d total %d\n",
5770 binder_objstat_strings[i],
5776 static void print_binder_proc_stats(struct seq_file *m,
5777 struct binder_proc *proc)
5779 struct binder_work *w;
5780 struct binder_thread *thread;
5782 int count, strong, weak, ready_threads;
5783 size_t free_async_space =
5784 binder_alloc_get_free_async_space(&proc->alloc);
5786 seq_printf(m, "proc %d\n", proc->pid);
5787 seq_printf(m, "context %s\n", proc->context->name);
5790 binder_inner_proc_lock(proc);
5791 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
5794 list_for_each_entry(thread, &proc->waiting_threads, waiting_thread_node)
5797 seq_printf(m, " threads: %d\n", count);
5798 seq_printf(m, " requested threads: %d+%d/%d\n"
5799 " ready threads %d\n"
5800 " free async space %zd\n", proc->requested_threads,
5801 proc->requested_threads_started, proc->max_threads,
5805 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n))
5807 binder_inner_proc_unlock(proc);
5808 seq_printf(m, " nodes: %d\n", count);
5812 binder_proc_lock(proc);
5813 for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
5814 struct binder_ref *ref = rb_entry(n, struct binder_ref,
5817 strong += ref->data.strong;
5818 weak += ref->data.weak;
5820 binder_proc_unlock(proc);
5821 seq_printf(m, " refs: %d s %d w %d\n", count, strong, weak);
5823 count = binder_alloc_get_allocated_count(&proc->alloc);
5824 seq_printf(m, " buffers: %d\n", count);
5826 binder_alloc_print_pages(m, &proc->alloc);
5829 binder_inner_proc_lock(proc);
5830 list_for_each_entry(w, &proc->todo, entry) {
5831 if (w->type == BINDER_WORK_TRANSACTION)
5834 binder_inner_proc_unlock(proc);
5835 seq_printf(m, " pending transactions: %d\n", count);
5837 print_binder_stats(m, " ", &proc->stats);
5840 static int state_show(struct seq_file *m, void *unused)
5842 struct binder_proc *proc;
5843 struct binder_node *node;
5844 struct binder_node *last_node = NULL;
5846 seq_puts(m, "binder state:\n");
5848 spin_lock(&binder_dead_nodes_lock);
5849 if (!hlist_empty(&binder_dead_nodes))
5850 seq_puts(m, "dead nodes:\n");
5851 hlist_for_each_entry(node, &binder_dead_nodes, dead_node) {
5853 * take a temporary reference on the node so it
5854 * survives and isn't removed from the list
5855 * while we print it.
5858 spin_unlock(&binder_dead_nodes_lock);
5860 binder_put_node(last_node);
5861 binder_node_lock(node);
5862 print_binder_node_nilocked(m, node);
5863 binder_node_unlock(node);
5865 spin_lock(&binder_dead_nodes_lock);
5867 spin_unlock(&binder_dead_nodes_lock);
5869 binder_put_node(last_node);
5871 mutex_lock(&binder_procs_lock);
5872 hlist_for_each_entry(proc, &binder_procs, proc_node)
5873 print_binder_proc(m, proc, 1);
5874 mutex_unlock(&binder_procs_lock);
5879 static int stats_show(struct seq_file *m, void *unused)
5881 struct binder_proc *proc;
5883 seq_puts(m, "binder stats:\n");
5885 print_binder_stats(m, "", &binder_stats);
5887 mutex_lock(&binder_procs_lock);
5888 hlist_for_each_entry(proc, &binder_procs, proc_node)
5889 print_binder_proc_stats(m, proc);
5890 mutex_unlock(&binder_procs_lock);
5895 static int transactions_show(struct seq_file *m, void *unused)
5897 struct binder_proc *proc;
5899 seq_puts(m, "binder transactions:\n");
5900 mutex_lock(&binder_procs_lock);
5901 hlist_for_each_entry(proc, &binder_procs, proc_node)
5902 print_binder_proc(m, proc, 0);
5903 mutex_unlock(&binder_procs_lock);
5908 static int proc_show(struct seq_file *m, void *unused)
5910 struct binder_proc *itr;
5911 int pid = (unsigned long)m->private;
5913 mutex_lock(&binder_procs_lock);
5914 hlist_for_each_entry(itr, &binder_procs, proc_node) {
5915 if (itr->pid == pid) {
5916 seq_puts(m, "binder proc state:\n");
5917 print_binder_proc(m, itr, 1);
5920 mutex_unlock(&binder_procs_lock);
5925 static void print_binder_transaction_log_entry(struct seq_file *m,
5926 struct binder_transaction_log_entry *e)
5928 int debug_id = READ_ONCE(e->debug_id_done);
5930 * read barrier to guarantee debug_id_done read before
5931 * we print the log values
5935 "%d: %s from %d:%d to %d:%d context %s node %d handle %d size %d:%d ret %d/%d l=%d",
5936 e->debug_id, (e->call_type == 2) ? "reply" :
5937 ((e->call_type == 1) ? "async" : "call "), e->from_proc,
5938 e->from_thread, e->to_proc, e->to_thread, e->context_name,
5939 e->to_node, e->target_handle, e->data_size, e->offsets_size,
5940 e->return_error, e->return_error_param,
5941 e->return_error_line);
5943 * read-barrier to guarantee read of debug_id_done after
5944 * done printing the fields of the entry
5947 seq_printf(m, debug_id && debug_id == READ_ONCE(e->debug_id_done) ?
5948 "\n" : " (incomplete)\n");
5951 static int transaction_log_show(struct seq_file *m, void *unused)
5953 struct binder_transaction_log *log = m->private;
5954 unsigned int log_cur = atomic_read(&log->cur);
5959 count = log_cur + 1;
5960 cur = count < ARRAY_SIZE(log->entry) && !log->full ?
5961 0 : count % ARRAY_SIZE(log->entry);
5962 if (count > ARRAY_SIZE(log->entry) || log->full)
5963 count = ARRAY_SIZE(log->entry);
5964 for (i = 0; i < count; i++) {
5965 unsigned int index = cur++ % ARRAY_SIZE(log->entry);
5967 print_binder_transaction_log_entry(m, &log->entry[index]);
5972 const struct file_operations binder_fops = {
5973 .owner = THIS_MODULE,
5974 .poll = binder_poll,
5975 .unlocked_ioctl = binder_ioctl,
5976 .compat_ioctl = compat_ptr_ioctl,
5977 .mmap = binder_mmap,
5978 .open = binder_open,
5979 .flush = binder_flush,
5980 .release = binder_release,
5983 DEFINE_SHOW_ATTRIBUTE(state);
5984 DEFINE_SHOW_ATTRIBUTE(stats);
5985 DEFINE_SHOW_ATTRIBUTE(transactions);
5986 DEFINE_SHOW_ATTRIBUTE(transaction_log);
5988 const struct binder_debugfs_entry binder_debugfs_entries[] = {
5992 .fops = &state_fops,
5998 .fops = &stats_fops,
6002 .name = "transactions",
6004 .fops = &transactions_fops,
6008 .name = "transaction_log",
6010 .fops = &transaction_log_fops,
6011 .data = &binder_transaction_log,
6014 .name = "failed_transaction_log",
6016 .fops = &transaction_log_fops,
6017 .data = &binder_transaction_log_failed,
6022 static int __init init_binder_device(const char *name)
6025 struct binder_device *binder_device;
6027 binder_device = kzalloc(sizeof(*binder_device), GFP_KERNEL);
6031 binder_device->miscdev.fops = &binder_fops;
6032 binder_device->miscdev.minor = MISC_DYNAMIC_MINOR;
6033 binder_device->miscdev.name = name;
6035 refcount_set(&binder_device->ref, 1);
6036 binder_device->context.binder_context_mgr_uid = INVALID_UID;
6037 binder_device->context.name = name;
6038 mutex_init(&binder_device->context.context_mgr_node_lock);
6040 ret = misc_register(&binder_device->miscdev);
6042 kfree(binder_device);
6046 hlist_add_head(&binder_device->hlist, &binder_devices);
6051 static int __init binder_init(void)
6054 char *device_name, *device_tmp;
6055 struct binder_device *device;
6056 struct hlist_node *tmp;
6057 char *device_names = NULL;
6059 ret = binder_alloc_shrinker_init();
6063 atomic_set(&binder_transaction_log.cur, ~0U);
6064 atomic_set(&binder_transaction_log_failed.cur, ~0U);
6066 binder_debugfs_dir_entry_root = debugfs_create_dir("binder", NULL);
6067 if (binder_debugfs_dir_entry_root) {
6068 const struct binder_debugfs_entry *db_entry;
6070 binder_for_each_debugfs_entry(db_entry)
6071 debugfs_create_file(db_entry->name,
6073 binder_debugfs_dir_entry_root,
6077 binder_debugfs_dir_entry_proc = debugfs_create_dir("proc",
6078 binder_debugfs_dir_entry_root);
6081 if (!IS_ENABLED(CONFIG_ANDROID_BINDERFS) &&
6082 strcmp(binder_devices_param, "") != 0) {
6084 * Copy the module_parameter string, because we don't want to
6085 * tokenize it in-place.
6087 device_names = kstrdup(binder_devices_param, GFP_KERNEL);
6088 if (!device_names) {
6090 goto err_alloc_device_names_failed;
6093 device_tmp = device_names;
6094 while ((device_name = strsep(&device_tmp, ","))) {
6095 ret = init_binder_device(device_name);
6097 goto err_init_binder_device_failed;
6101 ret = init_binderfs();
6103 goto err_init_binder_device_failed;
6107 err_init_binder_device_failed:
6108 hlist_for_each_entry_safe(device, tmp, &binder_devices, hlist) {
6109 misc_deregister(&device->miscdev);
6110 hlist_del(&device->hlist);
6114 kfree(device_names);
6116 err_alloc_device_names_failed:
6117 debugfs_remove_recursive(binder_debugfs_dir_entry_root);
6122 device_initcall(binder_init);
6124 #define CREATE_TRACE_POINTS
6125 #include "binder_trace.h"
6127 MODULE_LICENSE("GPL v2");