3 * Android IPC Subsystem
5 * Copyright (C) 2007-2008 Google, Inc.
7 * This software is licensed under the terms of the GNU General Public
8 * License version 2, as published by the Free Software Foundation, and
9 * may be copied, distributed, and modified under those terms.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
21 * There are 3 main spinlocks which must be acquired in the
24 * 1) proc->outer_lock : protects binder_ref
25 * binder_proc_lock() and binder_proc_unlock() are
27 * 2) node->lock : protects most fields of binder_node.
28 * binder_node_lock() and binder_node_unlock() are
30 * 3) proc->inner_lock : protects the thread and node lists
31 * (proc->threads, proc->waiting_threads, proc->nodes)
32 * and all todo lists associated with the binder_proc
33 * (proc->todo, thread->todo, proc->delivered_death and
34 * node->async_todo), as well as thread->transaction_stack
35 * binder_inner_proc_lock() and binder_inner_proc_unlock()
38 * Any lock under procA must never be nested under any lock at the same
39 * level or below on procB.
41 * Functions that require a lock held on entry indicate which lock
42 * in the suffix of the function name:
44 * foo_olocked() : requires node->outer_lock
45 * foo_nlocked() : requires node->lock
46 * foo_ilocked() : requires proc->inner_lock
47 * foo_oilocked(): requires proc->outer_lock and proc->inner_lock
48 * foo_nilocked(): requires node->lock and proc->inner_lock
52 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
54 #include <asm/cacheflush.h>
55 #include <linux/fdtable.h>
56 #include <linux/file.h>
57 #include <linux/freezer.h>
59 #include <linux/list.h>
60 #include <linux/miscdevice.h>
61 #include <linux/module.h>
62 #include <linux/mutex.h>
63 #include <linux/nsproxy.h>
64 #include <linux/poll.h>
65 #include <linux/debugfs.h>
66 #include <linux/rbtree.h>
67 #include <linux/sched/signal.h>
68 #include <linux/sched/mm.h>
69 #include <linux/seq_file.h>
70 #include <linux/uaccess.h>
71 #include <linux/pid_namespace.h>
72 #include <linux/security.h>
73 #include <linux/spinlock.h>
75 #ifdef CONFIG_ANDROID_BINDER_IPC_32BIT
76 #define BINDER_IPC_32BIT 1
79 #include <uapi/linux/android/binder.h>
80 #include "binder_alloc.h"
81 #include "binder_trace.h"
83 static HLIST_HEAD(binder_deferred_list);
84 static DEFINE_MUTEX(binder_deferred_lock);
86 static HLIST_HEAD(binder_devices);
87 static HLIST_HEAD(binder_procs);
88 static DEFINE_MUTEX(binder_procs_lock);
90 static HLIST_HEAD(binder_dead_nodes);
91 static DEFINE_SPINLOCK(binder_dead_nodes_lock);
93 static struct dentry *binder_debugfs_dir_entry_root;
94 static struct dentry *binder_debugfs_dir_entry_proc;
95 static atomic_t binder_last_id;
97 #define BINDER_DEBUG_ENTRY(name) \
98 static int binder_##name##_open(struct inode *inode, struct file *file) \
100 return single_open(file, binder_##name##_show, inode->i_private); \
103 static const struct file_operations binder_##name##_fops = { \
104 .owner = THIS_MODULE, \
105 .open = binder_##name##_open, \
107 .llseek = seq_lseek, \
108 .release = single_release, \
111 static int binder_proc_show(struct seq_file *m, void *unused);
112 BINDER_DEBUG_ENTRY(proc);
114 /* This is only defined in include/asm-arm/sizes.h */
120 #define SZ_4M 0x400000
123 #define FORBIDDEN_MMAP_FLAGS (VM_WRITE)
126 BINDER_DEBUG_USER_ERROR = 1U << 0,
127 BINDER_DEBUG_FAILED_TRANSACTION = 1U << 1,
128 BINDER_DEBUG_DEAD_TRANSACTION = 1U << 2,
129 BINDER_DEBUG_OPEN_CLOSE = 1U << 3,
130 BINDER_DEBUG_DEAD_BINDER = 1U << 4,
131 BINDER_DEBUG_DEATH_NOTIFICATION = 1U << 5,
132 BINDER_DEBUG_READ_WRITE = 1U << 6,
133 BINDER_DEBUG_USER_REFS = 1U << 7,
134 BINDER_DEBUG_THREADS = 1U << 8,
135 BINDER_DEBUG_TRANSACTION = 1U << 9,
136 BINDER_DEBUG_TRANSACTION_COMPLETE = 1U << 10,
137 BINDER_DEBUG_FREE_BUFFER = 1U << 11,
138 BINDER_DEBUG_INTERNAL_REFS = 1U << 12,
139 BINDER_DEBUG_PRIORITY_CAP = 1U << 13,
140 BINDER_DEBUG_SPINLOCKS = 1U << 14,
142 static uint32_t binder_debug_mask = BINDER_DEBUG_USER_ERROR |
143 BINDER_DEBUG_FAILED_TRANSACTION | BINDER_DEBUG_DEAD_TRANSACTION;
144 module_param_named(debug_mask, binder_debug_mask, uint, S_IWUSR | S_IRUGO);
146 static char *binder_devices_param = CONFIG_ANDROID_BINDER_DEVICES;
147 module_param_named(devices, binder_devices_param, charp, 0444);
149 static DECLARE_WAIT_QUEUE_HEAD(binder_user_error_wait);
150 static int binder_stop_on_user_error;
152 static int binder_set_stop_on_user_error(const char *val,
153 struct kernel_param *kp)
157 ret = param_set_int(val, kp);
158 if (binder_stop_on_user_error < 2)
159 wake_up(&binder_user_error_wait);
162 module_param_call(stop_on_user_error, binder_set_stop_on_user_error,
163 param_get_int, &binder_stop_on_user_error, S_IWUSR | S_IRUGO);
165 #define binder_debug(mask, x...) \
167 if (binder_debug_mask & mask) \
171 #define binder_user_error(x...) \
173 if (binder_debug_mask & BINDER_DEBUG_USER_ERROR) \
175 if (binder_stop_on_user_error) \
176 binder_stop_on_user_error = 2; \
179 #define to_flat_binder_object(hdr) \
180 container_of(hdr, struct flat_binder_object, hdr)
182 #define to_binder_fd_object(hdr) container_of(hdr, struct binder_fd_object, hdr)
184 #define to_binder_buffer_object(hdr) \
185 container_of(hdr, struct binder_buffer_object, hdr)
187 #define to_binder_fd_array_object(hdr) \
188 container_of(hdr, struct binder_fd_array_object, hdr)
190 enum binder_stat_types {
196 BINDER_STAT_TRANSACTION,
197 BINDER_STAT_TRANSACTION_COMPLETE,
201 struct binder_stats {
202 atomic_t br[_IOC_NR(BR_FAILED_REPLY) + 1];
203 atomic_t bc[_IOC_NR(BC_REPLY_SG) + 1];
204 atomic_t obj_created[BINDER_STAT_COUNT];
205 atomic_t obj_deleted[BINDER_STAT_COUNT];
208 static struct binder_stats binder_stats;
210 static inline void binder_stats_deleted(enum binder_stat_types type)
212 atomic_inc(&binder_stats.obj_deleted[type]);
215 static inline void binder_stats_created(enum binder_stat_types type)
217 atomic_inc(&binder_stats.obj_created[type]);
220 struct binder_transaction_log_entry {
232 int return_error_line;
233 uint32_t return_error;
234 uint32_t return_error_param;
235 const char *context_name;
237 struct binder_transaction_log {
240 struct binder_transaction_log_entry entry[32];
242 static struct binder_transaction_log binder_transaction_log;
243 static struct binder_transaction_log binder_transaction_log_failed;
245 static struct binder_transaction_log_entry *binder_transaction_log_add(
246 struct binder_transaction_log *log)
248 struct binder_transaction_log_entry *e;
249 unsigned int cur = atomic_inc_return(&log->cur);
251 if (cur >= ARRAY_SIZE(log->entry))
253 e = &log->entry[cur % ARRAY_SIZE(log->entry)];
254 WRITE_ONCE(e->debug_id_done, 0);
256 * write-barrier to synchronize access to e->debug_id_done.
257 * We make sure the initialized 0 value is seen before
258 * memset() other fields are zeroed by memset.
261 memset(e, 0, sizeof(*e));
265 struct binder_context {
266 struct binder_node *binder_context_mgr_node;
267 struct mutex context_mgr_node_lock;
269 kuid_t binder_context_mgr_uid;
273 struct binder_device {
274 struct hlist_node hlist;
275 struct miscdevice miscdev;
276 struct binder_context context;
280 * struct binder_work - work enqueued on a worklist
281 * @entry: node enqueued on list
282 * @type: type of work to be performed
284 * There are separate work lists for proc, thread, and node (async).
287 struct list_head entry;
290 BINDER_WORK_TRANSACTION = 1,
291 BINDER_WORK_TRANSACTION_COMPLETE,
292 BINDER_WORK_RETURN_ERROR,
294 BINDER_WORK_DEAD_BINDER,
295 BINDER_WORK_DEAD_BINDER_AND_CLEAR,
296 BINDER_WORK_CLEAR_DEATH_NOTIFICATION,
300 struct binder_error {
301 struct binder_work work;
306 * struct binder_node - binder node bookkeeping
307 * @debug_id: unique ID for debugging
308 * (invariant after initialized)
309 * @lock: lock for node fields
310 * @work: worklist element for node work
311 * (protected by @proc->inner_lock)
312 * @rb_node: element for proc->nodes tree
313 * (protected by @proc->inner_lock)
314 * @dead_node: element for binder_dead_nodes list
315 * (protected by binder_dead_nodes_lock)
316 * @proc: binder_proc that owns this node
317 * (invariant after initialized)
318 * @refs: list of references on this node
319 * (protected by @lock)
320 * @internal_strong_refs: used to take strong references when
321 * initiating a transaction
322 * (protected by @proc->inner_lock if @proc
324 * @local_weak_refs: weak user refs from local process
325 * (protected by @proc->inner_lock if @proc
327 * @local_strong_refs: strong user refs from local process
328 * (protected by @proc->inner_lock if @proc
330 * @tmp_refs: temporary kernel refs
331 * (protected by @proc->inner_lock while @proc
332 * is valid, and by binder_dead_nodes_lock
333 * if @proc is NULL. During inc/dec and node release
334 * it is also protected by @lock to provide safety
335 * as the node dies and @proc becomes NULL)
336 * @ptr: userspace pointer for node
337 * (invariant, no lock needed)
338 * @cookie: userspace cookie for node
339 * (invariant, no lock needed)
340 * @has_strong_ref: userspace notified of strong ref
341 * (protected by @proc->inner_lock if @proc
343 * @pending_strong_ref: userspace has acked notification of strong ref
344 * (protected by @proc->inner_lock if @proc
346 * @has_weak_ref: userspace notified of weak ref
347 * (protected by @proc->inner_lock if @proc
349 * @pending_weak_ref: userspace has acked notification of weak ref
350 * (protected by @proc->inner_lock if @proc
352 * @has_async_transaction: async transaction to node in progress
353 * (protected by @lock)
354 * @accept_fds: file descriptor operations supported for node
355 * (invariant after initialized)
356 * @min_priority: minimum scheduling priority
357 * (invariant after initialized)
358 * @async_todo: list of async work items
359 * (protected by @proc->inner_lock)
361 * Bookkeeping structure for binder nodes.
366 struct binder_work work;
368 struct rb_node rb_node;
369 struct hlist_node dead_node;
371 struct binder_proc *proc;
372 struct hlist_head refs;
373 int internal_strong_refs;
375 int local_strong_refs;
377 binder_uintptr_t ptr;
378 binder_uintptr_t cookie;
381 * bitfield elements protected by
385 u8 pending_strong_ref:1;
387 u8 pending_weak_ref:1;
391 * invariant after initialization
396 bool has_async_transaction;
397 struct list_head async_todo;
400 struct binder_ref_death {
402 * @work: worklist element for death notifications
403 * (protected by inner_lock of the proc that
404 * this ref belongs to)
406 struct binder_work work;
407 binder_uintptr_t cookie;
411 * struct binder_ref_data - binder_ref counts and id
412 * @debug_id: unique ID for the ref
413 * @desc: unique userspace handle for ref
414 * @strong: strong ref count (debugging only if not locked)
415 * @weak: weak ref count (debugging only if not locked)
417 * Structure to hold ref count and ref id information. Since
418 * the actual ref can only be accessed with a lock, this structure
419 * is used to return information about the ref to callers of
420 * ref inc/dec functions.
422 struct binder_ref_data {
430 * struct binder_ref - struct to track references on nodes
431 * @data: binder_ref_data containing id, handle, and current refcounts
432 * @rb_node_desc: node for lookup by @data.desc in proc's rb_tree
433 * @rb_node_node: node for lookup by @node in proc's rb_tree
434 * @node_entry: list entry for node->refs list in target node
435 * (protected by @node->lock)
436 * @proc: binder_proc containing ref
437 * @node: binder_node of target node. When cleaning up a
438 * ref for deletion in binder_cleanup_ref, a non-NULL
439 * @node indicates the node must be freed
440 * @death: pointer to death notification (ref_death) if requested
441 * (protected by @node->lock)
443 * Structure to track references from procA to target node (on procB). This
444 * structure is unsafe to access without holding @proc->outer_lock.
447 /* Lookups needed: */
448 /* node + proc => ref (transaction) */
449 /* desc + proc => ref (transaction, inc/dec ref) */
450 /* node => refs + procs (proc exit) */
451 struct binder_ref_data data;
452 struct rb_node rb_node_desc;
453 struct rb_node rb_node_node;
454 struct hlist_node node_entry;
455 struct binder_proc *proc;
456 struct binder_node *node;
457 struct binder_ref_death *death;
460 enum binder_deferred_state {
461 BINDER_DEFERRED_PUT_FILES = 0x01,
462 BINDER_DEFERRED_FLUSH = 0x02,
463 BINDER_DEFERRED_RELEASE = 0x04,
467 * struct binder_proc - binder process bookkeeping
468 * @proc_node: element for binder_procs list
469 * @threads: rbtree of binder_threads in this proc
470 * (protected by @inner_lock)
471 * @nodes: rbtree of binder nodes associated with
472 * this proc ordered by node->ptr
473 * (protected by @inner_lock)
474 * @refs_by_desc: rbtree of refs ordered by ref->desc
475 * (protected by @outer_lock)
476 * @refs_by_node: rbtree of refs ordered by ref->node
477 * (protected by @outer_lock)
478 * @waiting_threads: threads currently waiting for proc work
479 * (protected by @inner_lock)
480 * @pid PID of group_leader of process
481 * (invariant after initialized)
482 * @tsk task_struct for group_leader of process
483 * (invariant after initialized)
484 * @files files_struct for process
485 * (protected by @files_lock)
486 * @files_lock mutex to protect @files
487 * @deferred_work_node: element for binder_deferred_list
488 * (protected by binder_deferred_lock)
489 * @deferred_work: bitmap of deferred work to perform
490 * (protected by binder_deferred_lock)
491 * @is_dead: process is dead and awaiting free
492 * when outstanding transactions are cleaned up
493 * (protected by @inner_lock)
494 * @todo: list of work for this process
495 * (protected by @inner_lock)
496 * @wait: wait queue head to wait for proc work
497 * (invariant after initialized)
498 * @stats: per-process binder statistics
499 * (atomics, no lock needed)
500 * @delivered_death: list of delivered death notification
501 * (protected by @inner_lock)
502 * @max_threads: cap on number of binder threads
503 * (protected by @inner_lock)
504 * @requested_threads: number of binder threads requested but not
505 * yet started. In current implementation, can
507 * (protected by @inner_lock)
508 * @requested_threads_started: number binder threads started
509 * (protected by @inner_lock)
510 * @tmp_ref: temporary reference to indicate proc is in use
511 * (protected by @inner_lock)
512 * @default_priority: default scheduler priority
513 * (invariant after initialized)
514 * @debugfs_entry: debugfs node
515 * @alloc: binder allocator bookkeeping
516 * @context: binder_context for this proc
517 * (invariant after initialized)
518 * @inner_lock: can nest under outer_lock and/or node lock
519 * @outer_lock: no nesting under innor or node lock
520 * Lock order: 1) outer, 2) node, 3) inner
522 * Bookkeeping structure for binder processes
525 struct hlist_node proc_node;
526 struct rb_root threads;
527 struct rb_root nodes;
528 struct rb_root refs_by_desc;
529 struct rb_root refs_by_node;
530 struct list_head waiting_threads;
532 struct task_struct *tsk;
533 struct files_struct *files;
534 struct mutex files_lock;
535 struct hlist_node deferred_work_node;
539 struct list_head todo;
540 wait_queue_head_t wait;
541 struct binder_stats stats;
542 struct list_head delivered_death;
544 int requested_threads;
545 int requested_threads_started;
547 long default_priority;
548 struct dentry *debugfs_entry;
549 struct binder_alloc alloc;
550 struct binder_context *context;
551 spinlock_t inner_lock;
552 spinlock_t outer_lock;
556 BINDER_LOOPER_STATE_REGISTERED = 0x01,
557 BINDER_LOOPER_STATE_ENTERED = 0x02,
558 BINDER_LOOPER_STATE_EXITED = 0x04,
559 BINDER_LOOPER_STATE_INVALID = 0x08,
560 BINDER_LOOPER_STATE_WAITING = 0x10,
561 BINDER_LOOPER_STATE_POLL = 0x20,
565 * struct binder_thread - binder thread bookkeeping
566 * @proc: binder process for this thread
567 * (invariant after initialization)
568 * @rb_node: element for proc->threads rbtree
569 * (protected by @proc->inner_lock)
570 * @waiting_thread_node: element for @proc->waiting_threads list
571 * (protected by @proc->inner_lock)
572 * @pid: PID for this thread
573 * (invariant after initialization)
574 * @looper: bitmap of looping state
575 * (only accessed by this thread)
576 * @looper_needs_return: looping thread needs to exit driver
578 * @transaction_stack: stack of in-progress transactions for this thread
579 * (protected by @proc->inner_lock)
580 * @todo: list of work to do for this thread
581 * (protected by @proc->inner_lock)
582 * @return_error: transaction errors reported by this thread
583 * (only accessed by this thread)
584 * @reply_error: transaction errors reported by target thread
585 * (protected by @proc->inner_lock)
586 * @wait: wait queue for thread work
587 * @stats: per-thread statistics
588 * (atomics, no lock needed)
589 * @tmp_ref: temporary reference to indicate thread is in use
590 * (atomic since @proc->inner_lock cannot
591 * always be acquired)
592 * @is_dead: thread is dead and awaiting free
593 * when outstanding transactions are cleaned up
594 * (protected by @proc->inner_lock)
596 * Bookkeeping structure for binder threads.
598 struct binder_thread {
599 struct binder_proc *proc;
600 struct rb_node rb_node;
601 struct list_head waiting_thread_node;
603 int looper; /* only modified by this thread */
604 bool looper_need_return; /* can be written by other thread */
605 struct binder_transaction *transaction_stack;
606 struct list_head todo;
607 struct binder_error return_error;
608 struct binder_error reply_error;
609 wait_queue_head_t wait;
610 struct binder_stats stats;
615 struct binder_transaction {
617 struct binder_work work;
618 struct binder_thread *from;
619 struct binder_transaction *from_parent;
620 struct binder_proc *to_proc;
621 struct binder_thread *to_thread;
622 struct binder_transaction *to_parent;
623 unsigned need_reply:1;
624 /* unsigned is_dead:1; */ /* not used at the moment */
626 struct binder_buffer *buffer;
633 * @lock: protects @from, @to_proc, and @to_thread
635 * @from, @to_proc, and @to_thread can be set to NULL
636 * during thread teardown
642 * binder_proc_lock() - Acquire outer lock for given binder_proc
643 * @proc: struct binder_proc to acquire
645 * Acquires proc->outer_lock. Used to protect binder_ref
646 * structures associated with the given proc.
648 #define binder_proc_lock(proc) _binder_proc_lock(proc, __LINE__)
650 _binder_proc_lock(struct binder_proc *proc, int line)
652 binder_debug(BINDER_DEBUG_SPINLOCKS,
653 "%s: line=%d\n", __func__, line);
654 spin_lock(&proc->outer_lock);
658 * binder_proc_unlock() - Release spinlock for given binder_proc
659 * @proc: struct binder_proc to acquire
661 * Release lock acquired via binder_proc_lock()
663 #define binder_proc_unlock(_proc) _binder_proc_unlock(_proc, __LINE__)
665 _binder_proc_unlock(struct binder_proc *proc, int line)
667 binder_debug(BINDER_DEBUG_SPINLOCKS,
668 "%s: line=%d\n", __func__, line);
669 spin_unlock(&proc->outer_lock);
673 * binder_inner_proc_lock() - Acquire inner lock for given binder_proc
674 * @proc: struct binder_proc to acquire
676 * Acquires proc->inner_lock. Used to protect todo lists
678 #define binder_inner_proc_lock(proc) _binder_inner_proc_lock(proc, __LINE__)
680 _binder_inner_proc_lock(struct binder_proc *proc, int line)
682 binder_debug(BINDER_DEBUG_SPINLOCKS,
683 "%s: line=%d\n", __func__, line);
684 spin_lock(&proc->inner_lock);
688 * binder_inner_proc_unlock() - Release inner lock for given binder_proc
689 * @proc: struct binder_proc to acquire
691 * Release lock acquired via binder_inner_proc_lock()
693 #define binder_inner_proc_unlock(proc) _binder_inner_proc_unlock(proc, __LINE__)
695 _binder_inner_proc_unlock(struct binder_proc *proc, int line)
697 binder_debug(BINDER_DEBUG_SPINLOCKS,
698 "%s: line=%d\n", __func__, line);
699 spin_unlock(&proc->inner_lock);
703 * binder_node_lock() - Acquire spinlock for given binder_node
704 * @node: struct binder_node to acquire
706 * Acquires node->lock. Used to protect binder_node fields
708 #define binder_node_lock(node) _binder_node_lock(node, __LINE__)
710 _binder_node_lock(struct binder_node *node, int line)
712 binder_debug(BINDER_DEBUG_SPINLOCKS,
713 "%s: line=%d\n", __func__, line);
714 spin_lock(&node->lock);
718 * binder_node_unlock() - Release spinlock for given binder_proc
719 * @node: struct binder_node to acquire
721 * Release lock acquired via binder_node_lock()
723 #define binder_node_unlock(node) _binder_node_unlock(node, __LINE__)
725 _binder_node_unlock(struct binder_node *node, int line)
727 binder_debug(BINDER_DEBUG_SPINLOCKS,
728 "%s: line=%d\n", __func__, line);
729 spin_unlock(&node->lock);
733 * binder_node_inner_lock() - Acquire node and inner locks
734 * @node: struct binder_node to acquire
736 * Acquires node->lock. If node->proc also acquires
737 * proc->inner_lock. Used to protect binder_node fields
739 #define binder_node_inner_lock(node) _binder_node_inner_lock(node, __LINE__)
741 _binder_node_inner_lock(struct binder_node *node, int line)
743 binder_debug(BINDER_DEBUG_SPINLOCKS,
744 "%s: line=%d\n", __func__, line);
745 spin_lock(&node->lock);
747 binder_inner_proc_lock(node->proc);
751 * binder_node_unlock() - Release node and inner locks
752 * @node: struct binder_node to acquire
754 * Release lock acquired via binder_node_lock()
756 #define binder_node_inner_unlock(node) _binder_node_inner_unlock(node, __LINE__)
758 _binder_node_inner_unlock(struct binder_node *node, int line)
760 struct binder_proc *proc = node->proc;
762 binder_debug(BINDER_DEBUG_SPINLOCKS,
763 "%s: line=%d\n", __func__, line);
765 binder_inner_proc_unlock(proc);
766 spin_unlock(&node->lock);
769 static bool binder_worklist_empty_ilocked(struct list_head *list)
771 return list_empty(list);
775 * binder_worklist_empty() - Check if no items on the work list
776 * @proc: binder_proc associated with list
777 * @list: list to check
779 * Return: true if there are no items on list, else false
781 static bool binder_worklist_empty(struct binder_proc *proc,
782 struct list_head *list)
786 binder_inner_proc_lock(proc);
787 ret = binder_worklist_empty_ilocked(list);
788 binder_inner_proc_unlock(proc);
793 binder_enqueue_work_ilocked(struct binder_work *work,
794 struct list_head *target_list)
796 BUG_ON(target_list == NULL);
797 BUG_ON(work->entry.next && !list_empty(&work->entry));
798 list_add_tail(&work->entry, target_list);
802 * binder_enqueue_work() - Add an item to the work list
803 * @proc: binder_proc associated with list
804 * @work: struct binder_work to add to list
805 * @target_list: list to add work to
807 * Adds the work to the specified list. Asserts that work
808 * is not already on a list.
811 binder_enqueue_work(struct binder_proc *proc,
812 struct binder_work *work,
813 struct list_head *target_list)
815 binder_inner_proc_lock(proc);
816 binder_enqueue_work_ilocked(work, target_list);
817 binder_inner_proc_unlock(proc);
821 binder_dequeue_work_ilocked(struct binder_work *work)
823 list_del_init(&work->entry);
827 * binder_dequeue_work() - Removes an item from the work list
828 * @proc: binder_proc associated with list
829 * @work: struct binder_work to remove from list
831 * Removes the specified work item from whatever list it is on.
832 * Can safely be called if work is not on any list.
835 binder_dequeue_work(struct binder_proc *proc, struct binder_work *work)
837 binder_inner_proc_lock(proc);
838 binder_dequeue_work_ilocked(work);
839 binder_inner_proc_unlock(proc);
842 static struct binder_work *binder_dequeue_work_head_ilocked(
843 struct list_head *list)
845 struct binder_work *w;
847 w = list_first_entry_or_null(list, struct binder_work, entry);
849 list_del_init(&w->entry);
854 * binder_dequeue_work_head() - Dequeues the item at head of list
855 * @proc: binder_proc associated with list
856 * @list: list to dequeue head
858 * Removes the head of the list if there are items on the list
860 * Return: pointer dequeued binder_work, NULL if list was empty
862 static struct binder_work *binder_dequeue_work_head(
863 struct binder_proc *proc,
864 struct list_head *list)
866 struct binder_work *w;
868 binder_inner_proc_lock(proc);
869 w = binder_dequeue_work_head_ilocked(list);
870 binder_inner_proc_unlock(proc);
875 binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer);
876 static void binder_free_thread(struct binder_thread *thread);
877 static void binder_free_proc(struct binder_proc *proc);
878 static void binder_inc_node_tmpref_ilocked(struct binder_node *node);
880 static int task_get_unused_fd_flags(struct binder_proc *proc, int flags)
882 unsigned long rlim_cur;
886 mutex_lock(&proc->files_lock);
887 if (proc->files == NULL) {
891 if (!lock_task_sighand(proc->tsk, &irqs)) {
895 rlim_cur = task_rlimit(proc->tsk, RLIMIT_NOFILE);
896 unlock_task_sighand(proc->tsk, &irqs);
898 ret = __alloc_fd(proc->files, 0, rlim_cur, flags);
900 mutex_unlock(&proc->files_lock);
905 * copied from fd_install
907 static void task_fd_install(
908 struct binder_proc *proc, unsigned int fd, struct file *file)
910 mutex_lock(&proc->files_lock);
912 __fd_install(proc->files, fd, file);
913 mutex_unlock(&proc->files_lock);
917 * copied from sys_close
919 static long task_close_fd(struct binder_proc *proc, unsigned int fd)
923 mutex_lock(&proc->files_lock);
924 if (proc->files == NULL) {
928 retval = __close_fd(proc->files, fd);
929 /* can't restart close syscall because file table entry was cleared */
930 if (unlikely(retval == -ERESTARTSYS ||
931 retval == -ERESTARTNOINTR ||
932 retval == -ERESTARTNOHAND ||
933 retval == -ERESTART_RESTARTBLOCK))
936 mutex_unlock(&proc->files_lock);
940 static bool binder_has_work_ilocked(struct binder_thread *thread,
943 return !binder_worklist_empty_ilocked(&thread->todo) ||
944 thread->looper_need_return ||
946 !binder_worklist_empty_ilocked(&thread->proc->todo));
949 static bool binder_has_work(struct binder_thread *thread, bool do_proc_work)
953 binder_inner_proc_lock(thread->proc);
954 has_work = binder_has_work_ilocked(thread, do_proc_work);
955 binder_inner_proc_unlock(thread->proc);
960 static bool binder_available_for_proc_work_ilocked(struct binder_thread *thread)
962 return !thread->transaction_stack &&
963 binder_worklist_empty_ilocked(&thread->todo) &&
964 (thread->looper & (BINDER_LOOPER_STATE_ENTERED |
965 BINDER_LOOPER_STATE_REGISTERED));
968 static void binder_wakeup_poll_threads_ilocked(struct binder_proc *proc,
972 struct binder_thread *thread;
974 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) {
975 thread = rb_entry(n, struct binder_thread, rb_node);
976 if (thread->looper & BINDER_LOOPER_STATE_POLL &&
977 binder_available_for_proc_work_ilocked(thread)) {
979 wake_up_interruptible_sync(&thread->wait);
981 wake_up_interruptible(&thread->wait);
987 * binder_select_thread_ilocked() - selects a thread for doing proc work.
988 * @proc: process to select a thread from
990 * Note that calling this function moves the thread off the waiting_threads
991 * list, so it can only be woken up by the caller of this function, or a
992 * signal. Therefore, callers *should* always wake up the thread this function
995 * Return: If there's a thread currently waiting for process work,
996 * returns that thread. Otherwise returns NULL.
998 static struct binder_thread *
999 binder_select_thread_ilocked(struct binder_proc *proc)
1001 struct binder_thread *thread;
1003 assert_spin_locked(&proc->inner_lock);
1004 thread = list_first_entry_or_null(&proc->waiting_threads,
1005 struct binder_thread,
1006 waiting_thread_node);
1009 list_del_init(&thread->waiting_thread_node);
1015 * binder_wakeup_thread_ilocked() - wakes up a thread for doing proc work.
1016 * @proc: process to wake up a thread in
1017 * @thread: specific thread to wake-up (may be NULL)
1018 * @sync: whether to do a synchronous wake-up
1020 * This function wakes up a thread in the @proc process.
1021 * The caller may provide a specific thread to wake-up in
1022 * the @thread parameter. If @thread is NULL, this function
1023 * will wake up threads that have called poll().
1025 * Note that for this function to work as expected, callers
1026 * should first call binder_select_thread() to find a thread
1027 * to handle the work (if they don't have a thread already),
1028 * and pass the result into the @thread parameter.
1030 static void binder_wakeup_thread_ilocked(struct binder_proc *proc,
1031 struct binder_thread *thread,
1034 assert_spin_locked(&proc->inner_lock);
1038 wake_up_interruptible_sync(&thread->wait);
1040 wake_up_interruptible(&thread->wait);
1044 /* Didn't find a thread waiting for proc work; this can happen
1046 * 1. All threads are busy handling transactions
1047 * In that case, one of those threads should call back into
1048 * the kernel driver soon and pick up this work.
1049 * 2. Threads are using the (e)poll interface, in which case
1050 * they may be blocked on the waitqueue without having been
1051 * added to waiting_threads. For this case, we just iterate
1052 * over all threads not handling transaction work, and
1053 * wake them all up. We wake all because we don't know whether
1054 * a thread that called into (e)poll is handling non-binder
1057 binder_wakeup_poll_threads_ilocked(proc, sync);
1060 static void binder_wakeup_proc_ilocked(struct binder_proc *proc)
1062 struct binder_thread *thread = binder_select_thread_ilocked(proc);
1064 binder_wakeup_thread_ilocked(proc, thread, /* sync = */false);
1067 static void binder_set_nice(long nice)
1071 if (can_nice(current, nice)) {
1072 set_user_nice(current, nice);
1075 min_nice = rlimit_to_nice(rlimit(RLIMIT_NICE));
1076 binder_debug(BINDER_DEBUG_PRIORITY_CAP,
1077 "%d: nice value %ld not allowed use %ld instead\n",
1078 current->pid, nice, min_nice);
1079 set_user_nice(current, min_nice);
1080 if (min_nice <= MAX_NICE)
1082 binder_user_error("%d RLIMIT_NICE not set\n", current->pid);
1085 static struct binder_node *binder_get_node_ilocked(struct binder_proc *proc,
1086 binder_uintptr_t ptr)
1088 struct rb_node *n = proc->nodes.rb_node;
1089 struct binder_node *node;
1091 assert_spin_locked(&proc->inner_lock);
1094 node = rb_entry(n, struct binder_node, rb_node);
1096 if (ptr < node->ptr)
1098 else if (ptr > node->ptr)
1102 * take an implicit weak reference
1103 * to ensure node stays alive until
1104 * call to binder_put_node()
1106 binder_inc_node_tmpref_ilocked(node);
1113 static struct binder_node *binder_get_node(struct binder_proc *proc,
1114 binder_uintptr_t ptr)
1116 struct binder_node *node;
1118 binder_inner_proc_lock(proc);
1119 node = binder_get_node_ilocked(proc, ptr);
1120 binder_inner_proc_unlock(proc);
1124 static struct binder_node *binder_init_node_ilocked(
1125 struct binder_proc *proc,
1126 struct binder_node *new_node,
1127 struct flat_binder_object *fp)
1129 struct rb_node **p = &proc->nodes.rb_node;
1130 struct rb_node *parent = NULL;
1131 struct binder_node *node;
1132 binder_uintptr_t ptr = fp ? fp->binder : 0;
1133 binder_uintptr_t cookie = fp ? fp->cookie : 0;
1134 __u32 flags = fp ? fp->flags : 0;
1136 assert_spin_locked(&proc->inner_lock);
1141 node = rb_entry(parent, struct binder_node, rb_node);
1143 if (ptr < node->ptr)
1145 else if (ptr > node->ptr)
1146 p = &(*p)->rb_right;
1149 * A matching node is already in
1150 * the rb tree. Abandon the init
1153 binder_inc_node_tmpref_ilocked(node);
1158 binder_stats_created(BINDER_STAT_NODE);
1160 rb_link_node(&node->rb_node, parent, p);
1161 rb_insert_color(&node->rb_node, &proc->nodes);
1162 node->debug_id = atomic_inc_return(&binder_last_id);
1165 node->cookie = cookie;
1166 node->work.type = BINDER_WORK_NODE;
1167 node->min_priority = flags & FLAT_BINDER_FLAG_PRIORITY_MASK;
1168 node->accept_fds = !!(flags & FLAT_BINDER_FLAG_ACCEPTS_FDS);
1169 spin_lock_init(&node->lock);
1170 INIT_LIST_HEAD(&node->work.entry);
1171 INIT_LIST_HEAD(&node->async_todo);
1172 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1173 "%d:%d node %d u%016llx c%016llx created\n",
1174 proc->pid, current->pid, node->debug_id,
1175 (u64)node->ptr, (u64)node->cookie);
1180 static struct binder_node *binder_new_node(struct binder_proc *proc,
1181 struct flat_binder_object *fp)
1183 struct binder_node *node;
1184 struct binder_node *new_node = kzalloc(sizeof(*node), GFP_KERNEL);
1188 binder_inner_proc_lock(proc);
1189 node = binder_init_node_ilocked(proc, new_node, fp);
1190 binder_inner_proc_unlock(proc);
1191 if (node != new_node)
1193 * The node was already added by another thread
1200 static void binder_free_node(struct binder_node *node)
1203 binder_stats_deleted(BINDER_STAT_NODE);
1206 static int binder_inc_node_nilocked(struct binder_node *node, int strong,
1208 struct list_head *target_list)
1210 struct binder_proc *proc = node->proc;
1212 assert_spin_locked(&node->lock);
1214 assert_spin_locked(&proc->inner_lock);
1217 if (target_list == NULL &&
1218 node->internal_strong_refs == 0 &&
1220 node == node->proc->context->binder_context_mgr_node &&
1221 node->has_strong_ref)) {
1222 pr_err("invalid inc strong node for %d\n",
1226 node->internal_strong_refs++;
1228 node->local_strong_refs++;
1229 if (!node->has_strong_ref && target_list) {
1230 binder_dequeue_work_ilocked(&node->work);
1231 binder_enqueue_work_ilocked(&node->work, target_list);
1235 node->local_weak_refs++;
1236 if (!node->has_weak_ref && list_empty(&node->work.entry)) {
1237 if (target_list == NULL) {
1238 pr_err("invalid inc weak node for %d\n",
1242 binder_enqueue_work_ilocked(&node->work, target_list);
1248 static int binder_inc_node(struct binder_node *node, int strong, int internal,
1249 struct list_head *target_list)
1253 binder_node_inner_lock(node);
1254 ret = binder_inc_node_nilocked(node, strong, internal, target_list);
1255 binder_node_inner_unlock(node);
1260 static bool binder_dec_node_nilocked(struct binder_node *node,
1261 int strong, int internal)
1263 struct binder_proc *proc = node->proc;
1265 assert_spin_locked(&node->lock);
1267 assert_spin_locked(&proc->inner_lock);
1270 node->internal_strong_refs--;
1272 node->local_strong_refs--;
1273 if (node->local_strong_refs || node->internal_strong_refs)
1277 node->local_weak_refs--;
1278 if (node->local_weak_refs || node->tmp_refs ||
1279 !hlist_empty(&node->refs))
1283 if (proc && (node->has_strong_ref || node->has_weak_ref)) {
1284 if (list_empty(&node->work.entry)) {
1285 binder_enqueue_work_ilocked(&node->work, &proc->todo);
1286 binder_wakeup_proc_ilocked(proc);
1289 if (hlist_empty(&node->refs) && !node->local_strong_refs &&
1290 !node->local_weak_refs && !node->tmp_refs) {
1292 binder_dequeue_work_ilocked(&node->work);
1293 rb_erase(&node->rb_node, &proc->nodes);
1294 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1295 "refless node %d deleted\n",
1298 BUG_ON(!list_empty(&node->work.entry));
1299 spin_lock(&binder_dead_nodes_lock);
1301 * tmp_refs could have changed so
1304 if (node->tmp_refs) {
1305 spin_unlock(&binder_dead_nodes_lock);
1308 hlist_del(&node->dead_node);
1309 spin_unlock(&binder_dead_nodes_lock);
1310 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1311 "dead node %d deleted\n",
1320 static void binder_dec_node(struct binder_node *node, int strong, int internal)
1324 binder_node_inner_lock(node);
1325 free_node = binder_dec_node_nilocked(node, strong, internal);
1326 binder_node_inner_unlock(node);
1328 binder_free_node(node);
1331 static void binder_inc_node_tmpref_ilocked(struct binder_node *node)
1334 * No call to binder_inc_node() is needed since we
1335 * don't need to inform userspace of any changes to
1342 * binder_inc_node_tmpref() - take a temporary reference on node
1343 * @node: node to reference
1345 * Take reference on node to prevent the node from being freed
1346 * while referenced only by a local variable. The inner lock is
1347 * needed to serialize with the node work on the queue (which
1348 * isn't needed after the node is dead). If the node is dead
1349 * (node->proc is NULL), use binder_dead_nodes_lock to protect
1350 * node->tmp_refs against dead-node-only cases where the node
1351 * lock cannot be acquired (eg traversing the dead node list to
1354 static void binder_inc_node_tmpref(struct binder_node *node)
1356 binder_node_lock(node);
1358 binder_inner_proc_lock(node->proc);
1360 spin_lock(&binder_dead_nodes_lock);
1361 binder_inc_node_tmpref_ilocked(node);
1363 binder_inner_proc_unlock(node->proc);
1365 spin_unlock(&binder_dead_nodes_lock);
1366 binder_node_unlock(node);
1370 * binder_dec_node_tmpref() - remove a temporary reference on node
1371 * @node: node to reference
1373 * Release temporary reference on node taken via binder_inc_node_tmpref()
1375 static void binder_dec_node_tmpref(struct binder_node *node)
1379 binder_node_inner_lock(node);
1381 spin_lock(&binder_dead_nodes_lock);
1383 BUG_ON(node->tmp_refs < 0);
1385 spin_unlock(&binder_dead_nodes_lock);
1387 * Call binder_dec_node() to check if all refcounts are 0
1388 * and cleanup is needed. Calling with strong=0 and internal=1
1389 * causes no actual reference to be released in binder_dec_node().
1390 * If that changes, a change is needed here too.
1392 free_node = binder_dec_node_nilocked(node, 0, 1);
1393 binder_node_inner_unlock(node);
1395 binder_free_node(node);
1398 static void binder_put_node(struct binder_node *node)
1400 binder_dec_node_tmpref(node);
1403 static struct binder_ref *binder_get_ref_olocked(struct binder_proc *proc,
1404 u32 desc, bool need_strong_ref)
1406 struct rb_node *n = proc->refs_by_desc.rb_node;
1407 struct binder_ref *ref;
1410 ref = rb_entry(n, struct binder_ref, rb_node_desc);
1412 if (desc < ref->data.desc) {
1414 } else if (desc > ref->data.desc) {
1416 } else if (need_strong_ref && !ref->data.strong) {
1417 binder_user_error("tried to use weak ref as strong ref\n");
1427 * binder_get_ref_for_node_olocked() - get the ref associated with given node
1428 * @proc: binder_proc that owns the ref
1429 * @node: binder_node of target
1430 * @new_ref: newly allocated binder_ref to be initialized or %NULL
1432 * Look up the ref for the given node and return it if it exists
1434 * If it doesn't exist and the caller provides a newly allocated
1435 * ref, initialize the fields of the newly allocated ref and insert
1436 * into the given proc rb_trees and node refs list.
1438 * Return: the ref for node. It is possible that another thread
1439 * allocated/initialized the ref first in which case the
1440 * returned ref would be different than the passed-in
1441 * new_ref. new_ref must be kfree'd by the caller in
1444 static struct binder_ref *binder_get_ref_for_node_olocked(
1445 struct binder_proc *proc,
1446 struct binder_node *node,
1447 struct binder_ref *new_ref)
1449 struct binder_context *context = proc->context;
1450 struct rb_node **p = &proc->refs_by_node.rb_node;
1451 struct rb_node *parent = NULL;
1452 struct binder_ref *ref;
1457 ref = rb_entry(parent, struct binder_ref, rb_node_node);
1459 if (node < ref->node)
1461 else if (node > ref->node)
1462 p = &(*p)->rb_right;
1469 binder_stats_created(BINDER_STAT_REF);
1470 new_ref->data.debug_id = atomic_inc_return(&binder_last_id);
1471 new_ref->proc = proc;
1472 new_ref->node = node;
1473 rb_link_node(&new_ref->rb_node_node, parent, p);
1474 rb_insert_color(&new_ref->rb_node_node, &proc->refs_by_node);
1476 new_ref->data.desc = (node == context->binder_context_mgr_node) ? 0 : 1;
1477 for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
1478 ref = rb_entry(n, struct binder_ref, rb_node_desc);
1479 if (ref->data.desc > new_ref->data.desc)
1481 new_ref->data.desc = ref->data.desc + 1;
1484 p = &proc->refs_by_desc.rb_node;
1487 ref = rb_entry(parent, struct binder_ref, rb_node_desc);
1489 if (new_ref->data.desc < ref->data.desc)
1491 else if (new_ref->data.desc > ref->data.desc)
1492 p = &(*p)->rb_right;
1496 rb_link_node(&new_ref->rb_node_desc, parent, p);
1497 rb_insert_color(&new_ref->rb_node_desc, &proc->refs_by_desc);
1499 binder_node_lock(node);
1500 hlist_add_head(&new_ref->node_entry, &node->refs);
1502 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1503 "%d new ref %d desc %d for node %d\n",
1504 proc->pid, new_ref->data.debug_id, new_ref->data.desc,
1506 binder_node_unlock(node);
1510 static void binder_cleanup_ref_olocked(struct binder_ref *ref)
1512 bool delete_node = false;
1514 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1515 "%d delete ref %d desc %d for node %d\n",
1516 ref->proc->pid, ref->data.debug_id, ref->data.desc,
1517 ref->node->debug_id);
1519 rb_erase(&ref->rb_node_desc, &ref->proc->refs_by_desc);
1520 rb_erase(&ref->rb_node_node, &ref->proc->refs_by_node);
1522 binder_node_inner_lock(ref->node);
1523 if (ref->data.strong)
1524 binder_dec_node_nilocked(ref->node, 1, 1);
1526 hlist_del(&ref->node_entry);
1527 delete_node = binder_dec_node_nilocked(ref->node, 0, 1);
1528 binder_node_inner_unlock(ref->node);
1530 * Clear ref->node unless we want the caller to free the node
1534 * The caller uses ref->node to determine
1535 * whether the node needs to be freed. Clear
1536 * it since the node is still alive.
1542 binder_debug(BINDER_DEBUG_DEAD_BINDER,
1543 "%d delete ref %d desc %d has death notification\n",
1544 ref->proc->pid, ref->data.debug_id,
1546 binder_dequeue_work(ref->proc, &ref->death->work);
1547 binder_stats_deleted(BINDER_STAT_DEATH);
1549 binder_stats_deleted(BINDER_STAT_REF);
1553 * binder_inc_ref_olocked() - increment the ref for given handle
1554 * @ref: ref to be incremented
1555 * @strong: if true, strong increment, else weak
1556 * @target_list: list to queue node work on
1558 * Increment the ref. @ref->proc->outer_lock must be held on entry
1560 * Return: 0, if successful, else errno
1562 static int binder_inc_ref_olocked(struct binder_ref *ref, int strong,
1563 struct list_head *target_list)
1568 if (ref->data.strong == 0) {
1569 ret = binder_inc_node(ref->node, 1, 1, target_list);
1575 if (ref->data.weak == 0) {
1576 ret = binder_inc_node(ref->node, 0, 1, target_list);
1586 * binder_dec_ref() - dec the ref for given handle
1587 * @ref: ref to be decremented
1588 * @strong: if true, strong decrement, else weak
1590 * Decrement the ref.
1592 * Return: true if ref is cleaned up and ready to be freed
1594 static bool binder_dec_ref_olocked(struct binder_ref *ref, int strong)
1597 if (ref->data.strong == 0) {
1598 binder_user_error("%d invalid dec strong, ref %d desc %d s %d w %d\n",
1599 ref->proc->pid, ref->data.debug_id,
1600 ref->data.desc, ref->data.strong,
1605 if (ref->data.strong == 0)
1606 binder_dec_node(ref->node, strong, 1);
1608 if (ref->data.weak == 0) {
1609 binder_user_error("%d invalid dec weak, ref %d desc %d s %d w %d\n",
1610 ref->proc->pid, ref->data.debug_id,
1611 ref->data.desc, ref->data.strong,
1617 if (ref->data.strong == 0 && ref->data.weak == 0) {
1618 binder_cleanup_ref_olocked(ref);
1625 * binder_get_node_from_ref() - get the node from the given proc/desc
1626 * @proc: proc containing the ref
1627 * @desc: the handle associated with the ref
1628 * @need_strong_ref: if true, only return node if ref is strong
1629 * @rdata: the id/refcount data for the ref
1631 * Given a proc and ref handle, return the associated binder_node
1633 * Return: a binder_node or NULL if not found or not strong when strong required
1635 static struct binder_node *binder_get_node_from_ref(
1636 struct binder_proc *proc,
1637 u32 desc, bool need_strong_ref,
1638 struct binder_ref_data *rdata)
1640 struct binder_node *node;
1641 struct binder_ref *ref;
1643 binder_proc_lock(proc);
1644 ref = binder_get_ref_olocked(proc, desc, need_strong_ref);
1649 * Take an implicit reference on the node to ensure
1650 * it stays alive until the call to binder_put_node()
1652 binder_inc_node_tmpref(node);
1655 binder_proc_unlock(proc);
1660 binder_proc_unlock(proc);
1665 * binder_free_ref() - free the binder_ref
1668 * Free the binder_ref. Free the binder_node indicated by ref->node
1669 * (if non-NULL) and the binder_ref_death indicated by ref->death.
1671 static void binder_free_ref(struct binder_ref *ref)
1674 binder_free_node(ref->node);
1680 * binder_update_ref_for_handle() - inc/dec the ref for given handle
1681 * @proc: proc containing the ref
1682 * @desc: the handle associated with the ref
1683 * @increment: true=inc reference, false=dec reference
1684 * @strong: true=strong reference, false=weak reference
1685 * @rdata: the id/refcount data for the ref
1687 * Given a proc and ref handle, increment or decrement the ref
1688 * according to "increment" arg.
1690 * Return: 0 if successful, else errno
1692 static int binder_update_ref_for_handle(struct binder_proc *proc,
1693 uint32_t desc, bool increment, bool strong,
1694 struct binder_ref_data *rdata)
1697 struct binder_ref *ref;
1698 bool delete_ref = false;
1700 binder_proc_lock(proc);
1701 ref = binder_get_ref_olocked(proc, desc, strong);
1707 ret = binder_inc_ref_olocked(ref, strong, NULL);
1709 delete_ref = binder_dec_ref_olocked(ref, strong);
1713 binder_proc_unlock(proc);
1716 binder_free_ref(ref);
1720 binder_proc_unlock(proc);
1725 * binder_dec_ref_for_handle() - dec the ref for given handle
1726 * @proc: proc containing the ref
1727 * @desc: the handle associated with the ref
1728 * @strong: true=strong reference, false=weak reference
1729 * @rdata: the id/refcount data for the ref
1731 * Just calls binder_update_ref_for_handle() to decrement the ref.
1733 * Return: 0 if successful, else errno
1735 static int binder_dec_ref_for_handle(struct binder_proc *proc,
1736 uint32_t desc, bool strong, struct binder_ref_data *rdata)
1738 return binder_update_ref_for_handle(proc, desc, false, strong, rdata);
1743 * binder_inc_ref_for_node() - increment the ref for given proc/node
1744 * @proc: proc containing the ref
1745 * @node: target node
1746 * @strong: true=strong reference, false=weak reference
1747 * @target_list: worklist to use if node is incremented
1748 * @rdata: the id/refcount data for the ref
1750 * Given a proc and node, increment the ref. Create the ref if it
1751 * doesn't already exist
1753 * Return: 0 if successful, else errno
1755 static int binder_inc_ref_for_node(struct binder_proc *proc,
1756 struct binder_node *node,
1758 struct list_head *target_list,
1759 struct binder_ref_data *rdata)
1761 struct binder_ref *ref;
1762 struct binder_ref *new_ref = NULL;
1765 binder_proc_lock(proc);
1766 ref = binder_get_ref_for_node_olocked(proc, node, NULL);
1768 binder_proc_unlock(proc);
1769 new_ref = kzalloc(sizeof(*ref), GFP_KERNEL);
1772 binder_proc_lock(proc);
1773 ref = binder_get_ref_for_node_olocked(proc, node, new_ref);
1775 ret = binder_inc_ref_olocked(ref, strong, target_list);
1777 binder_proc_unlock(proc);
1778 if (new_ref && ref != new_ref)
1780 * Another thread created the ref first so
1781 * free the one we allocated
1787 static void binder_pop_transaction_ilocked(struct binder_thread *target_thread,
1788 struct binder_transaction *t)
1790 BUG_ON(!target_thread);
1791 assert_spin_locked(&target_thread->proc->inner_lock);
1792 BUG_ON(target_thread->transaction_stack != t);
1793 BUG_ON(target_thread->transaction_stack->from != target_thread);
1794 target_thread->transaction_stack =
1795 target_thread->transaction_stack->from_parent;
1800 * binder_thread_dec_tmpref() - decrement thread->tmp_ref
1801 * @thread: thread to decrement
1803 * A thread needs to be kept alive while being used to create or
1804 * handle a transaction. binder_get_txn_from() is used to safely
1805 * extract t->from from a binder_transaction and keep the thread
1806 * indicated by t->from from being freed. When done with that
1807 * binder_thread, this function is called to decrement the
1808 * tmp_ref and free if appropriate (thread has been released
1809 * and no transaction being processed by the driver)
1811 static void binder_thread_dec_tmpref(struct binder_thread *thread)
1814 * atomic is used to protect the counter value while
1815 * it cannot reach zero or thread->is_dead is false
1817 binder_inner_proc_lock(thread->proc);
1818 atomic_dec(&thread->tmp_ref);
1819 if (thread->is_dead && !atomic_read(&thread->tmp_ref)) {
1820 binder_inner_proc_unlock(thread->proc);
1821 binder_free_thread(thread);
1824 binder_inner_proc_unlock(thread->proc);
1828 * binder_proc_dec_tmpref() - decrement proc->tmp_ref
1829 * @proc: proc to decrement
1831 * A binder_proc needs to be kept alive while being used to create or
1832 * handle a transaction. proc->tmp_ref is incremented when
1833 * creating a new transaction or the binder_proc is currently in-use
1834 * by threads that are being released. When done with the binder_proc,
1835 * this function is called to decrement the counter and free the
1836 * proc if appropriate (proc has been released, all threads have
1837 * been released and not currenly in-use to process a transaction).
1839 static void binder_proc_dec_tmpref(struct binder_proc *proc)
1841 binder_inner_proc_lock(proc);
1843 if (proc->is_dead && RB_EMPTY_ROOT(&proc->threads) &&
1845 binder_inner_proc_unlock(proc);
1846 binder_free_proc(proc);
1849 binder_inner_proc_unlock(proc);
1853 * binder_get_txn_from() - safely extract the "from" thread in transaction
1854 * @t: binder transaction for t->from
1856 * Atomically return the "from" thread and increment the tmp_ref
1857 * count for the thread to ensure it stays alive until
1858 * binder_thread_dec_tmpref() is called.
1860 * Return: the value of t->from
1862 static struct binder_thread *binder_get_txn_from(
1863 struct binder_transaction *t)
1865 struct binder_thread *from;
1867 spin_lock(&t->lock);
1870 atomic_inc(&from->tmp_ref);
1871 spin_unlock(&t->lock);
1876 * binder_get_txn_from_and_acq_inner() - get t->from and acquire inner lock
1877 * @t: binder transaction for t->from
1879 * Same as binder_get_txn_from() except it also acquires the proc->inner_lock
1880 * to guarantee that the thread cannot be released while operating on it.
1881 * The caller must call binder_inner_proc_unlock() to release the inner lock
1882 * as well as call binder_dec_thread_txn() to release the reference.
1884 * Return: the value of t->from
1886 static struct binder_thread *binder_get_txn_from_and_acq_inner(
1887 struct binder_transaction *t)
1889 struct binder_thread *from;
1891 from = binder_get_txn_from(t);
1894 binder_inner_proc_lock(from->proc);
1896 BUG_ON(from != t->from);
1899 binder_inner_proc_unlock(from->proc);
1900 binder_thread_dec_tmpref(from);
1904 static void binder_free_transaction(struct binder_transaction *t)
1907 t->buffer->transaction = NULL;
1909 binder_stats_deleted(BINDER_STAT_TRANSACTION);
1912 static void binder_send_failed_reply(struct binder_transaction *t,
1913 uint32_t error_code)
1915 struct binder_thread *target_thread;
1916 struct binder_transaction *next;
1918 BUG_ON(t->flags & TF_ONE_WAY);
1920 target_thread = binder_get_txn_from_and_acq_inner(t);
1921 if (target_thread) {
1922 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
1923 "send failed reply for transaction %d to %d:%d\n",
1925 target_thread->proc->pid,
1926 target_thread->pid);
1928 binder_pop_transaction_ilocked(target_thread, t);
1929 if (target_thread->reply_error.cmd == BR_OK) {
1930 target_thread->reply_error.cmd = error_code;
1931 binder_enqueue_work_ilocked(
1932 &target_thread->reply_error.work,
1933 &target_thread->todo);
1934 wake_up_interruptible(&target_thread->wait);
1937 * Cannot get here for normal operation, but
1938 * we can if multiple synchronous transactions
1939 * are sent without blocking for responses.
1940 * Just ignore the 2nd error in this case.
1942 pr_warn("Unexpected reply error: %u\n",
1943 target_thread->reply_error.cmd);
1945 binder_inner_proc_unlock(target_thread->proc);
1946 binder_thread_dec_tmpref(target_thread);
1947 binder_free_transaction(t);
1950 next = t->from_parent;
1952 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
1953 "send failed reply for transaction %d, target dead\n",
1956 binder_free_transaction(t);
1958 binder_debug(BINDER_DEBUG_DEAD_BINDER,
1959 "reply failed, no target thread at root\n");
1963 binder_debug(BINDER_DEBUG_DEAD_BINDER,
1964 "reply failed, no target thread -- retry %d\n",
1970 * binder_cleanup_transaction() - cleans up undelivered transaction
1971 * @t: transaction that needs to be cleaned up
1972 * @reason: reason the transaction wasn't delivered
1973 * @error_code: error to return to caller (if synchronous call)
1975 static void binder_cleanup_transaction(struct binder_transaction *t,
1977 uint32_t error_code)
1979 if (t->buffer->target_node && !(t->flags & TF_ONE_WAY)) {
1980 binder_send_failed_reply(t, error_code);
1982 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
1983 "undelivered transaction %d, %s\n",
1984 t->debug_id, reason);
1985 binder_free_transaction(t);
1990 * binder_validate_object() - checks for a valid metadata object in a buffer.
1991 * @buffer: binder_buffer that we're parsing.
1992 * @offset: offset in the buffer at which to validate an object.
1994 * Return: If there's a valid metadata object at @offset in @buffer, the
1995 * size of that object. Otherwise, it returns zero.
1997 static size_t binder_validate_object(struct binder_buffer *buffer, u64 offset)
1999 /* Check if we can read a header first */
2000 struct binder_object_header *hdr;
2001 size_t object_size = 0;
2003 if (offset > buffer->data_size - sizeof(*hdr) ||
2004 buffer->data_size < sizeof(*hdr) ||
2005 !IS_ALIGNED(offset, sizeof(u32)))
2008 /* Ok, now see if we can read a complete object. */
2009 hdr = (struct binder_object_header *)(buffer->data + offset);
2010 switch (hdr->type) {
2011 case BINDER_TYPE_BINDER:
2012 case BINDER_TYPE_WEAK_BINDER:
2013 case BINDER_TYPE_HANDLE:
2014 case BINDER_TYPE_WEAK_HANDLE:
2015 object_size = sizeof(struct flat_binder_object);
2017 case BINDER_TYPE_FD:
2018 object_size = sizeof(struct binder_fd_object);
2020 case BINDER_TYPE_PTR:
2021 object_size = sizeof(struct binder_buffer_object);
2023 case BINDER_TYPE_FDA:
2024 object_size = sizeof(struct binder_fd_array_object);
2029 if (offset <= buffer->data_size - object_size &&
2030 buffer->data_size >= object_size)
2037 * binder_validate_ptr() - validates binder_buffer_object in a binder_buffer.
2038 * @b: binder_buffer containing the object
2039 * @index: index in offset array at which the binder_buffer_object is
2041 * @start: points to the start of the offset array
2042 * @num_valid: the number of valid offsets in the offset array
2044 * Return: If @index is within the valid range of the offset array
2045 * described by @start and @num_valid, and if there's a valid
2046 * binder_buffer_object at the offset found in index @index
2047 * of the offset array, that object is returned. Otherwise,
2048 * %NULL is returned.
2049 * Note that the offset found in index @index itself is not
2050 * verified; this function assumes that @num_valid elements
2051 * from @start were previously verified to have valid offsets.
2053 static struct binder_buffer_object *binder_validate_ptr(struct binder_buffer *b,
2054 binder_size_t index,
2055 binder_size_t *start,
2056 binder_size_t num_valid)
2058 struct binder_buffer_object *buffer_obj;
2059 binder_size_t *offp;
2061 if (index >= num_valid)
2064 offp = start + index;
2065 buffer_obj = (struct binder_buffer_object *)(b->data + *offp);
2066 if (buffer_obj->hdr.type != BINDER_TYPE_PTR)
2073 * binder_validate_fixup() - validates pointer/fd fixups happen in order.
2074 * @b: transaction buffer
2075 * @objects_start start of objects buffer
2076 * @buffer: binder_buffer_object in which to fix up
2077 * @offset: start offset in @buffer to fix up
2078 * @last_obj: last binder_buffer_object that we fixed up in
2079 * @last_min_offset: minimum fixup offset in @last_obj
2081 * Return: %true if a fixup in buffer @buffer at offset @offset is
2084 * For safety reasons, we only allow fixups inside a buffer to happen
2085 * at increasing offsets; additionally, we only allow fixup on the last
2086 * buffer object that was verified, or one of its parents.
2088 * Example of what is allowed:
2091 * B (parent = A, offset = 0)
2092 * C (parent = A, offset = 16)
2093 * D (parent = C, offset = 0)
2094 * E (parent = A, offset = 32) // min_offset is 16 (C.parent_offset)
2096 * Examples of what is not allowed:
2098 * Decreasing offsets within the same parent:
2100 * C (parent = A, offset = 16)
2101 * B (parent = A, offset = 0) // decreasing offset within A
2103 * Referring to a parent that wasn't the last object or any of its parents:
2105 * B (parent = A, offset = 0)
2106 * C (parent = A, offset = 0)
2107 * C (parent = A, offset = 16)
2108 * D (parent = B, offset = 0) // B is not A or any of A's parents
2110 static bool binder_validate_fixup(struct binder_buffer *b,
2111 binder_size_t *objects_start,
2112 struct binder_buffer_object *buffer,
2113 binder_size_t fixup_offset,
2114 struct binder_buffer_object *last_obj,
2115 binder_size_t last_min_offset)
2118 /* Nothing to fix up in */
2122 while (last_obj != buffer) {
2124 * Safe to retrieve the parent of last_obj, since it
2125 * was already previously verified by the driver.
2127 if ((last_obj->flags & BINDER_BUFFER_FLAG_HAS_PARENT) == 0)
2129 last_min_offset = last_obj->parent_offset + sizeof(uintptr_t);
2130 last_obj = (struct binder_buffer_object *)
2131 (b->data + *(objects_start + last_obj->parent));
2133 return (fixup_offset >= last_min_offset);
2136 static void binder_transaction_buffer_release(struct binder_proc *proc,
2137 struct binder_buffer *buffer,
2138 binder_size_t *failed_at)
2140 binder_size_t *offp, *off_start, *off_end;
2141 int debug_id = buffer->debug_id;
2143 binder_debug(BINDER_DEBUG_TRANSACTION,
2144 "%d buffer release %d, size %zd-%zd, failed at %pK\n",
2145 proc->pid, buffer->debug_id,
2146 buffer->data_size, buffer->offsets_size, failed_at);
2148 if (buffer->target_node)
2149 binder_dec_node(buffer->target_node, 1, 0);
2151 off_start = (binder_size_t *)(buffer->data +
2152 ALIGN(buffer->data_size, sizeof(void *)));
2154 off_end = failed_at;
2156 off_end = (void *)off_start + buffer->offsets_size;
2157 for (offp = off_start; offp < off_end; offp++) {
2158 struct binder_object_header *hdr;
2159 size_t object_size = binder_validate_object(buffer, *offp);
2161 if (object_size == 0) {
2162 pr_err("transaction release %d bad object at offset %lld, size %zd\n",
2163 debug_id, (u64)*offp, buffer->data_size);
2166 hdr = (struct binder_object_header *)(buffer->data + *offp);
2167 switch (hdr->type) {
2168 case BINDER_TYPE_BINDER:
2169 case BINDER_TYPE_WEAK_BINDER: {
2170 struct flat_binder_object *fp;
2171 struct binder_node *node;
2173 fp = to_flat_binder_object(hdr);
2174 node = binder_get_node(proc, fp->binder);
2176 pr_err("transaction release %d bad node %016llx\n",
2177 debug_id, (u64)fp->binder);
2180 binder_debug(BINDER_DEBUG_TRANSACTION,
2181 " node %d u%016llx\n",
2182 node->debug_id, (u64)node->ptr);
2183 binder_dec_node(node, hdr->type == BINDER_TYPE_BINDER,
2185 binder_put_node(node);
2187 case BINDER_TYPE_HANDLE:
2188 case BINDER_TYPE_WEAK_HANDLE: {
2189 struct flat_binder_object *fp;
2190 struct binder_ref_data rdata;
2193 fp = to_flat_binder_object(hdr);
2194 ret = binder_dec_ref_for_handle(proc, fp->handle,
2195 hdr->type == BINDER_TYPE_HANDLE, &rdata);
2198 pr_err("transaction release %d bad handle %d, ret = %d\n",
2199 debug_id, fp->handle, ret);
2202 binder_debug(BINDER_DEBUG_TRANSACTION,
2203 " ref %d desc %d\n",
2204 rdata.debug_id, rdata.desc);
2207 case BINDER_TYPE_FD: {
2208 struct binder_fd_object *fp = to_binder_fd_object(hdr);
2210 binder_debug(BINDER_DEBUG_TRANSACTION,
2211 " fd %d\n", fp->fd);
2213 task_close_fd(proc, fp->fd);
2215 case BINDER_TYPE_PTR:
2217 * Nothing to do here, this will get cleaned up when the
2218 * transaction buffer gets freed
2221 case BINDER_TYPE_FDA: {
2222 struct binder_fd_array_object *fda;
2223 struct binder_buffer_object *parent;
2224 uintptr_t parent_buffer;
2227 binder_size_t fd_buf_size;
2229 fda = to_binder_fd_array_object(hdr);
2230 parent = binder_validate_ptr(buffer, fda->parent,
2234 pr_err("transaction release %d bad parent offset",
2239 * Since the parent was already fixed up, convert it
2240 * back to kernel address space to access it
2242 parent_buffer = parent->buffer -
2243 binder_alloc_get_user_buffer_offset(
2246 fd_buf_size = sizeof(u32) * fda->num_fds;
2247 if (fda->num_fds >= SIZE_MAX / sizeof(u32)) {
2248 pr_err("transaction release %d invalid number of fds (%lld)\n",
2249 debug_id, (u64)fda->num_fds);
2252 if (fd_buf_size > parent->length ||
2253 fda->parent_offset > parent->length - fd_buf_size) {
2254 /* No space for all file descriptors here. */
2255 pr_err("transaction release %d not enough space for %lld fds in buffer\n",
2256 debug_id, (u64)fda->num_fds);
2259 fd_array = (u32 *)(parent_buffer + (uintptr_t)fda->parent_offset);
2260 for (fd_index = 0; fd_index < fda->num_fds; fd_index++)
2261 task_close_fd(proc, fd_array[fd_index]);
2264 pr_err("transaction release %d bad object type %x\n",
2265 debug_id, hdr->type);
2271 static int binder_translate_binder(struct flat_binder_object *fp,
2272 struct binder_transaction *t,
2273 struct binder_thread *thread)
2275 struct binder_node *node;
2276 struct binder_proc *proc = thread->proc;
2277 struct binder_proc *target_proc = t->to_proc;
2278 struct binder_ref_data rdata;
2281 node = binder_get_node(proc, fp->binder);
2283 node = binder_new_node(proc, fp);
2287 if (fp->cookie != node->cookie) {
2288 binder_user_error("%d:%d sending u%016llx node %d, cookie mismatch %016llx != %016llx\n",
2289 proc->pid, thread->pid, (u64)fp->binder,
2290 node->debug_id, (u64)fp->cookie,
2295 if (security_binder_transfer_binder(proc->tsk, target_proc->tsk)) {
2300 ret = binder_inc_ref_for_node(target_proc, node,
2301 fp->hdr.type == BINDER_TYPE_BINDER,
2302 &thread->todo, &rdata);
2306 if (fp->hdr.type == BINDER_TYPE_BINDER)
2307 fp->hdr.type = BINDER_TYPE_HANDLE;
2309 fp->hdr.type = BINDER_TYPE_WEAK_HANDLE;
2311 fp->handle = rdata.desc;
2314 trace_binder_transaction_node_to_ref(t, node, &rdata);
2315 binder_debug(BINDER_DEBUG_TRANSACTION,
2316 " node %d u%016llx -> ref %d desc %d\n",
2317 node->debug_id, (u64)node->ptr,
2318 rdata.debug_id, rdata.desc);
2320 binder_put_node(node);
2324 static int binder_translate_handle(struct flat_binder_object *fp,
2325 struct binder_transaction *t,
2326 struct binder_thread *thread)
2328 struct binder_proc *proc = thread->proc;
2329 struct binder_proc *target_proc = t->to_proc;
2330 struct binder_node *node;
2331 struct binder_ref_data src_rdata;
2334 node = binder_get_node_from_ref(proc, fp->handle,
2335 fp->hdr.type == BINDER_TYPE_HANDLE, &src_rdata);
2337 binder_user_error("%d:%d got transaction with invalid handle, %d\n",
2338 proc->pid, thread->pid, fp->handle);
2341 if (security_binder_transfer_binder(proc->tsk, target_proc->tsk)) {
2346 binder_node_lock(node);
2347 if (node->proc == target_proc) {
2348 if (fp->hdr.type == BINDER_TYPE_HANDLE)
2349 fp->hdr.type = BINDER_TYPE_BINDER;
2351 fp->hdr.type = BINDER_TYPE_WEAK_BINDER;
2352 fp->binder = node->ptr;
2353 fp->cookie = node->cookie;
2355 binder_inner_proc_lock(node->proc);
2356 binder_inc_node_nilocked(node,
2357 fp->hdr.type == BINDER_TYPE_BINDER,
2360 binder_inner_proc_unlock(node->proc);
2361 trace_binder_transaction_ref_to_node(t, node, &src_rdata);
2362 binder_debug(BINDER_DEBUG_TRANSACTION,
2363 " ref %d desc %d -> node %d u%016llx\n",
2364 src_rdata.debug_id, src_rdata.desc, node->debug_id,
2366 binder_node_unlock(node);
2368 struct binder_ref_data dest_rdata;
2370 binder_node_unlock(node);
2371 ret = binder_inc_ref_for_node(target_proc, node,
2372 fp->hdr.type == BINDER_TYPE_HANDLE,
2378 fp->handle = dest_rdata.desc;
2380 trace_binder_transaction_ref_to_ref(t, node, &src_rdata,
2382 binder_debug(BINDER_DEBUG_TRANSACTION,
2383 " ref %d desc %d -> ref %d desc %d (node %d)\n",
2384 src_rdata.debug_id, src_rdata.desc,
2385 dest_rdata.debug_id, dest_rdata.desc,
2389 binder_put_node(node);
2393 static int binder_translate_fd(int fd,
2394 struct binder_transaction *t,
2395 struct binder_thread *thread,
2396 struct binder_transaction *in_reply_to)
2398 struct binder_proc *proc = thread->proc;
2399 struct binder_proc *target_proc = t->to_proc;
2403 bool target_allows_fd;
2406 target_allows_fd = !!(in_reply_to->flags & TF_ACCEPT_FDS);
2408 target_allows_fd = t->buffer->target_node->accept_fds;
2409 if (!target_allows_fd) {
2410 binder_user_error("%d:%d got %s with fd, %d, but target does not allow fds\n",
2411 proc->pid, thread->pid,
2412 in_reply_to ? "reply" : "transaction",
2415 goto err_fd_not_accepted;
2420 binder_user_error("%d:%d got transaction with invalid fd, %d\n",
2421 proc->pid, thread->pid, fd);
2425 ret = security_binder_transfer_file(proc->tsk, target_proc->tsk, file);
2431 target_fd = task_get_unused_fd_flags(target_proc, O_CLOEXEC);
2432 if (target_fd < 0) {
2434 goto err_get_unused_fd;
2436 task_fd_install(target_proc, target_fd, file);
2437 trace_binder_transaction_fd(t, fd, target_fd);
2438 binder_debug(BINDER_DEBUG_TRANSACTION, " fd %d -> %d\n",
2447 err_fd_not_accepted:
2451 static int binder_translate_fd_array(struct binder_fd_array_object *fda,
2452 struct binder_buffer_object *parent,
2453 struct binder_transaction *t,
2454 struct binder_thread *thread,
2455 struct binder_transaction *in_reply_to)
2457 binder_size_t fdi, fd_buf_size, num_installed_fds;
2459 uintptr_t parent_buffer;
2461 struct binder_proc *proc = thread->proc;
2462 struct binder_proc *target_proc = t->to_proc;
2464 fd_buf_size = sizeof(u32) * fda->num_fds;
2465 if (fda->num_fds >= SIZE_MAX / sizeof(u32)) {
2466 binder_user_error("%d:%d got transaction with invalid number of fds (%lld)\n",
2467 proc->pid, thread->pid, (u64)fda->num_fds);
2470 if (fd_buf_size > parent->length ||
2471 fda->parent_offset > parent->length - fd_buf_size) {
2472 /* No space for all file descriptors here. */
2473 binder_user_error("%d:%d not enough space to store %lld fds in buffer\n",
2474 proc->pid, thread->pid, (u64)fda->num_fds);
2478 * Since the parent was already fixed up, convert it
2479 * back to the kernel address space to access it
2481 parent_buffer = parent->buffer -
2482 binder_alloc_get_user_buffer_offset(&target_proc->alloc);
2483 fd_array = (u32 *)(parent_buffer + (uintptr_t)fda->parent_offset);
2484 if (!IS_ALIGNED((unsigned long)fd_array, sizeof(u32))) {
2485 binder_user_error("%d:%d parent offset not aligned correctly.\n",
2486 proc->pid, thread->pid);
2489 for (fdi = 0; fdi < fda->num_fds; fdi++) {
2490 target_fd = binder_translate_fd(fd_array[fdi], t, thread,
2493 goto err_translate_fd_failed;
2494 fd_array[fdi] = target_fd;
2498 err_translate_fd_failed:
2500 * Failed to allocate fd or security error, free fds
2503 num_installed_fds = fdi;
2504 for (fdi = 0; fdi < num_installed_fds; fdi++)
2505 task_close_fd(target_proc, fd_array[fdi]);
2509 static int binder_fixup_parent(struct binder_transaction *t,
2510 struct binder_thread *thread,
2511 struct binder_buffer_object *bp,
2512 binder_size_t *off_start,
2513 binder_size_t num_valid,
2514 struct binder_buffer_object *last_fixup_obj,
2515 binder_size_t last_fixup_min_off)
2517 struct binder_buffer_object *parent;
2519 struct binder_buffer *b = t->buffer;
2520 struct binder_proc *proc = thread->proc;
2521 struct binder_proc *target_proc = t->to_proc;
2523 if (!(bp->flags & BINDER_BUFFER_FLAG_HAS_PARENT))
2526 parent = binder_validate_ptr(b, bp->parent, off_start, num_valid);
2528 binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
2529 proc->pid, thread->pid);
2533 if (!binder_validate_fixup(b, off_start,
2534 parent, bp->parent_offset,
2536 last_fixup_min_off)) {
2537 binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
2538 proc->pid, thread->pid);
2542 if (parent->length < sizeof(binder_uintptr_t) ||
2543 bp->parent_offset > parent->length - sizeof(binder_uintptr_t)) {
2544 /* No space for a pointer here! */
2545 binder_user_error("%d:%d got transaction with invalid parent offset\n",
2546 proc->pid, thread->pid);
2549 parent_buffer = (u8 *)((uintptr_t)parent->buffer -
2550 binder_alloc_get_user_buffer_offset(
2551 &target_proc->alloc));
2552 *(binder_uintptr_t *)(parent_buffer + bp->parent_offset) = bp->buffer;
2558 * binder_proc_transaction() - sends a transaction to a process and wakes it up
2559 * @t: transaction to send
2560 * @proc: process to send the transaction to
2561 * @thread: thread in @proc to send the transaction to (may be NULL)
2563 * This function queues a transaction to the specified process. It will try
2564 * to find a thread in the target process to handle the transaction and
2565 * wake it up. If no thread is found, the work is queued to the proc
2568 * If the @thread parameter is not NULL, the transaction is always queued
2569 * to the waitlist of that specific thread.
2571 * Return: true if the transactions was successfully queued
2572 * false if the target process or thread is dead
2574 static bool binder_proc_transaction(struct binder_transaction *t,
2575 struct binder_proc *proc,
2576 struct binder_thread *thread)
2578 struct list_head *target_list = NULL;
2579 struct binder_node *node = t->buffer->target_node;
2580 bool oneway = !!(t->flags & TF_ONE_WAY);
2584 binder_node_lock(node);
2587 if (node->has_async_transaction) {
2588 target_list = &node->async_todo;
2591 node->has_async_transaction = 1;
2595 binder_inner_proc_lock(proc);
2597 if (proc->is_dead || (thread && thread->is_dead)) {
2598 binder_inner_proc_unlock(proc);
2599 binder_node_unlock(node);
2603 if (!thread && !target_list)
2604 thread = binder_select_thread_ilocked(proc);
2607 target_list = &thread->todo;
2608 else if (!target_list)
2609 target_list = &proc->todo;
2611 BUG_ON(target_list != &node->async_todo);
2613 binder_enqueue_work_ilocked(&t->work, target_list);
2616 binder_wakeup_thread_ilocked(proc, thread, !oneway /* sync */);
2618 binder_inner_proc_unlock(proc);
2619 binder_node_unlock(node);
2625 * binder_get_node_refs_for_txn() - Get required refs on node for txn
2626 * @node: struct binder_node for which to get refs
2627 * @proc: returns @node->proc if valid
2628 * @error: if no @proc then returns BR_DEAD_REPLY
2630 * User-space normally keeps the node alive when creating a transaction
2631 * since it has a reference to the target. The local strong ref keeps it
2632 * alive if the sending process dies before the target process processes
2633 * the transaction. If the source process is malicious or has a reference
2634 * counting bug, relying on the local strong ref can fail.
2636 * Since user-space can cause the local strong ref to go away, we also take
2637 * a tmpref on the node to ensure it survives while we are constructing
2638 * the transaction. We also need a tmpref on the proc while we are
2639 * constructing the transaction, so we take that here as well.
2641 * Return: The target_node with refs taken or NULL if no @node->proc is NULL.
2642 * Also sets @proc if valid. If the @node->proc is NULL indicating that the
2643 * target proc has died, @error is set to BR_DEAD_REPLY
2645 static struct binder_node *binder_get_node_refs_for_txn(
2646 struct binder_node *node,
2647 struct binder_proc **procp,
2650 struct binder_node *target_node = NULL;
2652 binder_node_inner_lock(node);
2655 binder_inc_node_nilocked(node, 1, 0, NULL);
2656 binder_inc_node_tmpref_ilocked(node);
2657 node->proc->tmp_ref++;
2658 *procp = node->proc;
2660 *error = BR_DEAD_REPLY;
2661 binder_node_inner_unlock(node);
2666 static void binder_transaction(struct binder_proc *proc,
2667 struct binder_thread *thread,
2668 struct binder_transaction_data *tr, int reply,
2669 binder_size_t extra_buffers_size)
2672 struct binder_transaction *t;
2673 struct binder_work *tcomplete;
2674 binder_size_t *offp, *off_end, *off_start;
2675 binder_size_t off_min;
2676 u8 *sg_bufp, *sg_buf_end;
2677 struct binder_proc *target_proc = NULL;
2678 struct binder_thread *target_thread = NULL;
2679 struct binder_node *target_node = NULL;
2680 struct binder_transaction *in_reply_to = NULL;
2681 struct binder_transaction_log_entry *e;
2682 uint32_t return_error = 0;
2683 uint32_t return_error_param = 0;
2684 uint32_t return_error_line = 0;
2685 struct binder_buffer_object *last_fixup_obj = NULL;
2686 binder_size_t last_fixup_min_off = 0;
2687 struct binder_context *context = proc->context;
2688 int t_debug_id = atomic_inc_return(&binder_last_id);
2690 e = binder_transaction_log_add(&binder_transaction_log);
2691 e->debug_id = t_debug_id;
2692 e->call_type = reply ? 2 : !!(tr->flags & TF_ONE_WAY);
2693 e->from_proc = proc->pid;
2694 e->from_thread = thread->pid;
2695 e->target_handle = tr->target.handle;
2696 e->data_size = tr->data_size;
2697 e->offsets_size = tr->offsets_size;
2698 e->context_name = proc->context->name;
2701 binder_inner_proc_lock(proc);
2702 in_reply_to = thread->transaction_stack;
2703 if (in_reply_to == NULL) {
2704 binder_inner_proc_unlock(proc);
2705 binder_user_error("%d:%d got reply transaction with no transaction stack\n",
2706 proc->pid, thread->pid);
2707 return_error = BR_FAILED_REPLY;
2708 return_error_param = -EPROTO;
2709 return_error_line = __LINE__;
2710 goto err_empty_call_stack;
2712 if (in_reply_to->to_thread != thread) {
2713 spin_lock(&in_reply_to->lock);
2714 binder_user_error("%d:%d got reply transaction with bad transaction stack, transaction %d has target %d:%d\n",
2715 proc->pid, thread->pid, in_reply_to->debug_id,
2716 in_reply_to->to_proc ?
2717 in_reply_to->to_proc->pid : 0,
2718 in_reply_to->to_thread ?
2719 in_reply_to->to_thread->pid : 0);
2720 spin_unlock(&in_reply_to->lock);
2721 binder_inner_proc_unlock(proc);
2722 return_error = BR_FAILED_REPLY;
2723 return_error_param = -EPROTO;
2724 return_error_line = __LINE__;
2726 goto err_bad_call_stack;
2728 thread->transaction_stack = in_reply_to->to_parent;
2729 binder_inner_proc_unlock(proc);
2730 binder_set_nice(in_reply_to->saved_priority);
2731 target_thread = binder_get_txn_from_and_acq_inner(in_reply_to);
2732 if (target_thread == NULL) {
2733 return_error = BR_DEAD_REPLY;
2734 return_error_line = __LINE__;
2735 goto err_dead_binder;
2737 if (target_thread->transaction_stack != in_reply_to) {
2738 binder_user_error("%d:%d got reply transaction with bad target transaction stack %d, expected %d\n",
2739 proc->pid, thread->pid,
2740 target_thread->transaction_stack ?
2741 target_thread->transaction_stack->debug_id : 0,
2742 in_reply_to->debug_id);
2743 binder_inner_proc_unlock(target_thread->proc);
2744 return_error = BR_FAILED_REPLY;
2745 return_error_param = -EPROTO;
2746 return_error_line = __LINE__;
2748 target_thread = NULL;
2749 goto err_dead_binder;
2751 target_proc = target_thread->proc;
2752 target_proc->tmp_ref++;
2753 binder_inner_proc_unlock(target_thread->proc);
2755 if (tr->target.handle) {
2756 struct binder_ref *ref;
2759 * There must already be a strong ref
2760 * on this node. If so, do a strong
2761 * increment on the node to ensure it
2762 * stays alive until the transaction is
2765 binder_proc_lock(proc);
2766 ref = binder_get_ref_olocked(proc, tr->target.handle,
2769 target_node = binder_get_node_refs_for_txn(
2770 ref->node, &target_proc,
2773 binder_user_error("%d:%d got transaction to invalid handle\n",
2774 proc->pid, thread->pid);
2775 return_error = BR_FAILED_REPLY;
2777 binder_proc_unlock(proc);
2779 mutex_lock(&context->context_mgr_node_lock);
2780 target_node = context->binder_context_mgr_node;
2782 target_node = binder_get_node_refs_for_txn(
2783 target_node, &target_proc,
2786 return_error = BR_DEAD_REPLY;
2787 mutex_unlock(&context->context_mgr_node_lock);
2788 if (target_node && target_proc == proc) {
2789 binder_user_error("%d:%d got transaction to context manager from process owning it\n",
2790 proc->pid, thread->pid);
2791 return_error = BR_FAILED_REPLY;
2792 return_error_param = -EINVAL;
2793 return_error_line = __LINE__;
2794 goto err_invalid_target_handle;
2799 * return_error is set above
2801 return_error_param = -EINVAL;
2802 return_error_line = __LINE__;
2803 goto err_dead_binder;
2805 e->to_node = target_node->debug_id;
2806 if (security_binder_transaction(proc->tsk,
2807 target_proc->tsk) < 0) {
2808 return_error = BR_FAILED_REPLY;
2809 return_error_param = -EPERM;
2810 return_error_line = __LINE__;
2811 goto err_invalid_target_handle;
2813 binder_inner_proc_lock(proc);
2814 if (!(tr->flags & TF_ONE_WAY) && thread->transaction_stack) {
2815 struct binder_transaction *tmp;
2817 tmp = thread->transaction_stack;
2818 if (tmp->to_thread != thread) {
2819 spin_lock(&tmp->lock);
2820 binder_user_error("%d:%d got new transaction with bad transaction stack, transaction %d has target %d:%d\n",
2821 proc->pid, thread->pid, tmp->debug_id,
2822 tmp->to_proc ? tmp->to_proc->pid : 0,
2824 tmp->to_thread->pid : 0);
2825 spin_unlock(&tmp->lock);
2826 binder_inner_proc_unlock(proc);
2827 return_error = BR_FAILED_REPLY;
2828 return_error_param = -EPROTO;
2829 return_error_line = __LINE__;
2830 goto err_bad_call_stack;
2833 struct binder_thread *from;
2835 spin_lock(&tmp->lock);
2837 if (from && from->proc == target_proc) {
2838 atomic_inc(&from->tmp_ref);
2839 target_thread = from;
2840 spin_unlock(&tmp->lock);
2843 spin_unlock(&tmp->lock);
2844 tmp = tmp->from_parent;
2847 binder_inner_proc_unlock(proc);
2850 e->to_thread = target_thread->pid;
2851 e->to_proc = target_proc->pid;
2853 /* TODO: reuse incoming transaction for reply */
2854 t = kzalloc(sizeof(*t), GFP_KERNEL);
2856 return_error = BR_FAILED_REPLY;
2857 return_error_param = -ENOMEM;
2858 return_error_line = __LINE__;
2859 goto err_alloc_t_failed;
2861 binder_stats_created(BINDER_STAT_TRANSACTION);
2862 spin_lock_init(&t->lock);
2864 tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL);
2865 if (tcomplete == NULL) {
2866 return_error = BR_FAILED_REPLY;
2867 return_error_param = -ENOMEM;
2868 return_error_line = __LINE__;
2869 goto err_alloc_tcomplete_failed;
2871 binder_stats_created(BINDER_STAT_TRANSACTION_COMPLETE);
2873 t->debug_id = t_debug_id;
2876 binder_debug(BINDER_DEBUG_TRANSACTION,
2877 "%d:%d BC_REPLY %d -> %d:%d, data %016llx-%016llx size %lld-%lld-%lld\n",
2878 proc->pid, thread->pid, t->debug_id,
2879 target_proc->pid, target_thread->pid,
2880 (u64)tr->data.ptr.buffer,
2881 (u64)tr->data.ptr.offsets,
2882 (u64)tr->data_size, (u64)tr->offsets_size,
2883 (u64)extra_buffers_size);
2885 binder_debug(BINDER_DEBUG_TRANSACTION,
2886 "%d:%d BC_TRANSACTION %d -> %d - node %d, data %016llx-%016llx size %lld-%lld-%lld\n",
2887 proc->pid, thread->pid, t->debug_id,
2888 target_proc->pid, target_node->debug_id,
2889 (u64)tr->data.ptr.buffer,
2890 (u64)tr->data.ptr.offsets,
2891 (u64)tr->data_size, (u64)tr->offsets_size,
2892 (u64)extra_buffers_size);
2894 if (!reply && !(tr->flags & TF_ONE_WAY))
2898 t->sender_euid = task_euid(proc->tsk);
2899 t->to_proc = target_proc;
2900 t->to_thread = target_thread;
2902 t->flags = tr->flags;
2903 t->priority = task_nice(current);
2905 trace_binder_transaction(reply, t, target_node);
2907 t->buffer = binder_alloc_new_buf(&target_proc->alloc, tr->data_size,
2908 tr->offsets_size, extra_buffers_size,
2909 !reply && (t->flags & TF_ONE_WAY));
2910 if (IS_ERR(t->buffer)) {
2912 * -ESRCH indicates VMA cleared. The target is dying.
2914 return_error_param = PTR_ERR(t->buffer);
2915 return_error = return_error_param == -ESRCH ?
2916 BR_DEAD_REPLY : BR_FAILED_REPLY;
2917 return_error_line = __LINE__;
2919 goto err_binder_alloc_buf_failed;
2921 t->buffer->debug_id = t->debug_id;
2922 t->buffer->transaction = t;
2923 t->buffer->target_node = target_node;
2924 trace_binder_transaction_alloc_buf(t->buffer);
2925 off_start = (binder_size_t *)(t->buffer->data +
2926 ALIGN(tr->data_size, sizeof(void *)));
2929 if (copy_from_user(t->buffer->data, (const void __user *)(uintptr_t)
2930 tr->data.ptr.buffer, tr->data_size)) {
2931 binder_user_error("%d:%d got transaction with invalid data ptr\n",
2932 proc->pid, thread->pid);
2933 return_error = BR_FAILED_REPLY;
2934 return_error_param = -EFAULT;
2935 return_error_line = __LINE__;
2936 goto err_copy_data_failed;
2938 if (copy_from_user(offp, (const void __user *)(uintptr_t)
2939 tr->data.ptr.offsets, tr->offsets_size)) {
2940 binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
2941 proc->pid, thread->pid);
2942 return_error = BR_FAILED_REPLY;
2943 return_error_param = -EFAULT;
2944 return_error_line = __LINE__;
2945 goto err_copy_data_failed;
2947 if (!IS_ALIGNED(tr->offsets_size, sizeof(binder_size_t))) {
2948 binder_user_error("%d:%d got transaction with invalid offsets size, %lld\n",
2949 proc->pid, thread->pid, (u64)tr->offsets_size);
2950 return_error = BR_FAILED_REPLY;
2951 return_error_param = -EINVAL;
2952 return_error_line = __LINE__;
2953 goto err_bad_offset;
2955 if (!IS_ALIGNED(extra_buffers_size, sizeof(u64))) {
2956 binder_user_error("%d:%d got transaction with unaligned buffers size, %lld\n",
2957 proc->pid, thread->pid,
2958 (u64)extra_buffers_size);
2959 return_error = BR_FAILED_REPLY;
2960 return_error_param = -EINVAL;
2961 return_error_line = __LINE__;
2962 goto err_bad_offset;
2964 off_end = (void *)off_start + tr->offsets_size;
2965 sg_bufp = (u8 *)(PTR_ALIGN(off_end, sizeof(void *)));
2966 sg_buf_end = sg_bufp + extra_buffers_size;
2968 for (; offp < off_end; offp++) {
2969 struct binder_object_header *hdr;
2970 size_t object_size = binder_validate_object(t->buffer, *offp);
2972 if (object_size == 0 || *offp < off_min) {
2973 binder_user_error("%d:%d got transaction with invalid offset (%lld, min %lld max %lld) or object.\n",
2974 proc->pid, thread->pid, (u64)*offp,
2976 (u64)t->buffer->data_size);
2977 return_error = BR_FAILED_REPLY;
2978 return_error_param = -EINVAL;
2979 return_error_line = __LINE__;
2980 goto err_bad_offset;
2983 hdr = (struct binder_object_header *)(t->buffer->data + *offp);
2984 off_min = *offp + object_size;
2985 switch (hdr->type) {
2986 case BINDER_TYPE_BINDER:
2987 case BINDER_TYPE_WEAK_BINDER: {
2988 struct flat_binder_object *fp;
2990 fp = to_flat_binder_object(hdr);
2991 ret = binder_translate_binder(fp, t, thread);
2993 return_error = BR_FAILED_REPLY;
2994 return_error_param = ret;
2995 return_error_line = __LINE__;
2996 goto err_translate_failed;
2999 case BINDER_TYPE_HANDLE:
3000 case BINDER_TYPE_WEAK_HANDLE: {
3001 struct flat_binder_object *fp;
3003 fp = to_flat_binder_object(hdr);
3004 ret = binder_translate_handle(fp, t, thread);
3006 return_error = BR_FAILED_REPLY;
3007 return_error_param = ret;
3008 return_error_line = __LINE__;
3009 goto err_translate_failed;
3013 case BINDER_TYPE_FD: {
3014 struct binder_fd_object *fp = to_binder_fd_object(hdr);
3015 int target_fd = binder_translate_fd(fp->fd, t, thread,
3018 if (target_fd < 0) {
3019 return_error = BR_FAILED_REPLY;
3020 return_error_param = target_fd;
3021 return_error_line = __LINE__;
3022 goto err_translate_failed;
3027 case BINDER_TYPE_FDA: {
3028 struct binder_fd_array_object *fda =
3029 to_binder_fd_array_object(hdr);
3030 struct binder_buffer_object *parent =
3031 binder_validate_ptr(t->buffer, fda->parent,
3035 binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
3036 proc->pid, thread->pid);
3037 return_error = BR_FAILED_REPLY;
3038 return_error_param = -EINVAL;
3039 return_error_line = __LINE__;
3040 goto err_bad_parent;
3042 if (!binder_validate_fixup(t->buffer, off_start,
3043 parent, fda->parent_offset,
3045 last_fixup_min_off)) {
3046 binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
3047 proc->pid, thread->pid);
3048 return_error = BR_FAILED_REPLY;
3049 return_error_param = -EINVAL;
3050 return_error_line = __LINE__;
3051 goto err_bad_parent;
3053 ret = binder_translate_fd_array(fda, parent, t, thread,
3056 return_error = BR_FAILED_REPLY;
3057 return_error_param = ret;
3058 return_error_line = __LINE__;
3059 goto err_translate_failed;
3061 last_fixup_obj = parent;
3062 last_fixup_min_off =
3063 fda->parent_offset + sizeof(u32) * fda->num_fds;
3065 case BINDER_TYPE_PTR: {
3066 struct binder_buffer_object *bp =
3067 to_binder_buffer_object(hdr);
3068 size_t buf_left = sg_buf_end - sg_bufp;
3070 if (bp->length > buf_left) {
3071 binder_user_error("%d:%d got transaction with too large buffer\n",
3072 proc->pid, thread->pid);
3073 return_error = BR_FAILED_REPLY;
3074 return_error_param = -EINVAL;
3075 return_error_line = __LINE__;
3076 goto err_bad_offset;
3078 if (copy_from_user(sg_bufp,
3079 (const void __user *)(uintptr_t)
3080 bp->buffer, bp->length)) {
3081 binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
3082 proc->pid, thread->pid);
3083 return_error_param = -EFAULT;
3084 return_error = BR_FAILED_REPLY;
3085 return_error_line = __LINE__;
3086 goto err_copy_data_failed;
3088 /* Fixup buffer pointer to target proc address space */
3089 bp->buffer = (uintptr_t)sg_bufp +
3090 binder_alloc_get_user_buffer_offset(
3091 &target_proc->alloc);
3092 sg_bufp += ALIGN(bp->length, sizeof(u64));
3094 ret = binder_fixup_parent(t, thread, bp, off_start,
3097 last_fixup_min_off);
3099 return_error = BR_FAILED_REPLY;
3100 return_error_param = ret;
3101 return_error_line = __LINE__;
3102 goto err_translate_failed;
3104 last_fixup_obj = bp;
3105 last_fixup_min_off = 0;
3108 binder_user_error("%d:%d got transaction with invalid object type, %x\n",
3109 proc->pid, thread->pid, hdr->type);
3110 return_error = BR_FAILED_REPLY;
3111 return_error_param = -EINVAL;
3112 return_error_line = __LINE__;
3113 goto err_bad_object_type;
3116 tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE;
3117 binder_enqueue_work(proc, tcomplete, &thread->todo);
3118 t->work.type = BINDER_WORK_TRANSACTION;
3121 binder_inner_proc_lock(target_proc);
3122 if (target_thread->is_dead) {
3123 binder_inner_proc_unlock(target_proc);
3124 goto err_dead_proc_or_thread;
3126 BUG_ON(t->buffer->async_transaction != 0);
3127 binder_pop_transaction_ilocked(target_thread, in_reply_to);
3128 binder_enqueue_work_ilocked(&t->work, &target_thread->todo);
3129 binder_inner_proc_unlock(target_proc);
3130 wake_up_interruptible_sync(&target_thread->wait);
3131 binder_free_transaction(in_reply_to);
3132 } else if (!(t->flags & TF_ONE_WAY)) {
3133 BUG_ON(t->buffer->async_transaction != 0);
3134 binder_inner_proc_lock(proc);
3136 t->from_parent = thread->transaction_stack;
3137 thread->transaction_stack = t;
3138 binder_inner_proc_unlock(proc);
3139 if (!binder_proc_transaction(t, target_proc, target_thread)) {
3140 binder_inner_proc_lock(proc);
3141 binder_pop_transaction_ilocked(thread, t);
3142 binder_inner_proc_unlock(proc);
3143 goto err_dead_proc_or_thread;
3146 BUG_ON(target_node == NULL);
3147 BUG_ON(t->buffer->async_transaction != 1);
3148 if (!binder_proc_transaction(t, target_proc, NULL))
3149 goto err_dead_proc_or_thread;
3152 binder_thread_dec_tmpref(target_thread);
3153 binder_proc_dec_tmpref(target_proc);
3155 binder_dec_node_tmpref(target_node);
3157 * write barrier to synchronize with initialization
3161 WRITE_ONCE(e->debug_id_done, t_debug_id);
3164 err_dead_proc_or_thread:
3165 return_error = BR_DEAD_REPLY;
3166 return_error_line = __LINE__;
3167 binder_dequeue_work(proc, tcomplete);
3168 err_translate_failed:
3169 err_bad_object_type:
3172 err_copy_data_failed:
3173 trace_binder_transaction_failed_buffer_release(t->buffer);
3174 binder_transaction_buffer_release(target_proc, t->buffer, offp);
3176 binder_dec_node_tmpref(target_node);
3178 t->buffer->transaction = NULL;
3179 binder_alloc_free_buf(&target_proc->alloc, t->buffer);
3180 err_binder_alloc_buf_failed:
3182 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
3183 err_alloc_tcomplete_failed:
3185 binder_stats_deleted(BINDER_STAT_TRANSACTION);
3188 err_empty_call_stack:
3190 err_invalid_target_handle:
3192 binder_thread_dec_tmpref(target_thread);
3194 binder_proc_dec_tmpref(target_proc);
3196 binder_dec_node(target_node, 1, 0);
3197 binder_dec_node_tmpref(target_node);
3200 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
3201 "%d:%d transaction failed %d/%d, size %lld-%lld line %d\n",
3202 proc->pid, thread->pid, return_error, return_error_param,
3203 (u64)tr->data_size, (u64)tr->offsets_size,
3207 struct binder_transaction_log_entry *fe;
3209 e->return_error = return_error;
3210 e->return_error_param = return_error_param;
3211 e->return_error_line = return_error_line;
3212 fe = binder_transaction_log_add(&binder_transaction_log_failed);
3215 * write barrier to synchronize with initialization
3219 WRITE_ONCE(e->debug_id_done, t_debug_id);
3220 WRITE_ONCE(fe->debug_id_done, t_debug_id);
3223 BUG_ON(thread->return_error.cmd != BR_OK);
3225 thread->return_error.cmd = BR_TRANSACTION_COMPLETE;
3226 binder_enqueue_work(thread->proc,
3227 &thread->return_error.work,
3229 binder_send_failed_reply(in_reply_to, return_error);
3231 thread->return_error.cmd = return_error;
3232 binder_enqueue_work(thread->proc,
3233 &thread->return_error.work,
3238 static int binder_thread_write(struct binder_proc *proc,
3239 struct binder_thread *thread,
3240 binder_uintptr_t binder_buffer, size_t size,
3241 binder_size_t *consumed)
3244 struct binder_context *context = proc->context;
3245 void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
3246 void __user *ptr = buffer + *consumed;
3247 void __user *end = buffer + size;
3249 while (ptr < end && thread->return_error.cmd == BR_OK) {
3252 if (get_user(cmd, (uint32_t __user *)ptr))
3254 ptr += sizeof(uint32_t);
3255 trace_binder_command(cmd);
3256 if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.bc)) {
3257 atomic_inc(&binder_stats.bc[_IOC_NR(cmd)]);
3258 atomic_inc(&proc->stats.bc[_IOC_NR(cmd)]);
3259 atomic_inc(&thread->stats.bc[_IOC_NR(cmd)]);
3267 const char *debug_string;
3268 bool strong = cmd == BC_ACQUIRE || cmd == BC_RELEASE;
3269 bool increment = cmd == BC_INCREFS || cmd == BC_ACQUIRE;
3270 struct binder_ref_data rdata;
3272 if (get_user(target, (uint32_t __user *)ptr))
3275 ptr += sizeof(uint32_t);
3277 if (increment && !target) {
3278 struct binder_node *ctx_mgr_node;
3279 mutex_lock(&context->context_mgr_node_lock);
3280 ctx_mgr_node = context->binder_context_mgr_node;
3282 ret = binder_inc_ref_for_node(
3284 strong, NULL, &rdata);
3285 mutex_unlock(&context->context_mgr_node_lock);
3288 ret = binder_update_ref_for_handle(
3289 proc, target, increment, strong,
3291 if (!ret && rdata.desc != target) {
3292 binder_user_error("%d:%d tried to acquire reference to desc %d, got %d instead\n",
3293 proc->pid, thread->pid,
3294 target, rdata.desc);
3298 debug_string = "IncRefs";
3301 debug_string = "Acquire";
3304 debug_string = "Release";
3308 debug_string = "DecRefs";
3312 binder_user_error("%d:%d %s %d refcount change on invalid ref %d ret %d\n",
3313 proc->pid, thread->pid, debug_string,
3314 strong, target, ret);
3317 binder_debug(BINDER_DEBUG_USER_REFS,
3318 "%d:%d %s ref %d desc %d s %d w %d\n",
3319 proc->pid, thread->pid, debug_string,
3320 rdata.debug_id, rdata.desc, rdata.strong,
3324 case BC_INCREFS_DONE:
3325 case BC_ACQUIRE_DONE: {
3326 binder_uintptr_t node_ptr;
3327 binder_uintptr_t cookie;
3328 struct binder_node *node;
3331 if (get_user(node_ptr, (binder_uintptr_t __user *)ptr))
3333 ptr += sizeof(binder_uintptr_t);
3334 if (get_user(cookie, (binder_uintptr_t __user *)ptr))
3336 ptr += sizeof(binder_uintptr_t);
3337 node = binder_get_node(proc, node_ptr);
3339 binder_user_error("%d:%d %s u%016llx no match\n",
3340 proc->pid, thread->pid,
3341 cmd == BC_INCREFS_DONE ?
3347 if (cookie != node->cookie) {
3348 binder_user_error("%d:%d %s u%016llx node %d cookie mismatch %016llx != %016llx\n",
3349 proc->pid, thread->pid,
3350 cmd == BC_INCREFS_DONE ?
3351 "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
3352 (u64)node_ptr, node->debug_id,
3353 (u64)cookie, (u64)node->cookie);
3354 binder_put_node(node);
3357 binder_node_inner_lock(node);
3358 if (cmd == BC_ACQUIRE_DONE) {
3359 if (node->pending_strong_ref == 0) {
3360 binder_user_error("%d:%d BC_ACQUIRE_DONE node %d has no pending acquire request\n",
3361 proc->pid, thread->pid,
3363 binder_node_inner_unlock(node);
3364 binder_put_node(node);
3367 node->pending_strong_ref = 0;
3369 if (node->pending_weak_ref == 0) {
3370 binder_user_error("%d:%d BC_INCREFS_DONE node %d has no pending increfs request\n",
3371 proc->pid, thread->pid,
3373 binder_node_inner_unlock(node);
3374 binder_put_node(node);
3377 node->pending_weak_ref = 0;
3379 free_node = binder_dec_node_nilocked(node,
3380 cmd == BC_ACQUIRE_DONE, 0);
3382 binder_debug(BINDER_DEBUG_USER_REFS,
3383 "%d:%d %s node %d ls %d lw %d tr %d\n",
3384 proc->pid, thread->pid,
3385 cmd == BC_INCREFS_DONE ? "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
3386 node->debug_id, node->local_strong_refs,
3387 node->local_weak_refs, node->tmp_refs);
3388 binder_node_inner_unlock(node);
3389 binder_put_node(node);
3392 case BC_ATTEMPT_ACQUIRE:
3393 pr_err("BC_ATTEMPT_ACQUIRE not supported\n");
3395 case BC_ACQUIRE_RESULT:
3396 pr_err("BC_ACQUIRE_RESULT not supported\n");
3399 case BC_FREE_BUFFER: {
3400 binder_uintptr_t data_ptr;
3401 struct binder_buffer *buffer;
3403 if (get_user(data_ptr, (binder_uintptr_t __user *)ptr))
3405 ptr += sizeof(binder_uintptr_t);
3407 buffer = binder_alloc_prepare_to_free(&proc->alloc,
3409 if (IS_ERR_OR_NULL(buffer)) {
3410 if (PTR_ERR(buffer) == -EPERM) {
3412 "%d:%d BC_FREE_BUFFER u%016llx matched unreturned or currently freeing buffer\n",
3413 proc->pid, thread->pid,
3417 "%d:%d BC_FREE_BUFFER u%016llx no match\n",
3418 proc->pid, thread->pid,
3423 binder_debug(BINDER_DEBUG_FREE_BUFFER,
3424 "%d:%d BC_FREE_BUFFER u%016llx found buffer %d for %s transaction\n",
3425 proc->pid, thread->pid, (u64)data_ptr,
3427 buffer->transaction ? "active" : "finished");
3429 if (buffer->transaction) {
3430 buffer->transaction->buffer = NULL;
3431 buffer->transaction = NULL;
3433 if (buffer->async_transaction && buffer->target_node) {
3434 struct binder_node *buf_node;
3435 struct binder_work *w;
3437 buf_node = buffer->target_node;
3438 binder_node_inner_lock(buf_node);
3439 BUG_ON(!buf_node->has_async_transaction);
3440 BUG_ON(buf_node->proc != proc);
3441 w = binder_dequeue_work_head_ilocked(
3442 &buf_node->async_todo);
3444 buf_node->has_async_transaction = 0;
3446 binder_enqueue_work_ilocked(
3448 binder_wakeup_proc_ilocked(proc);
3450 binder_node_inner_unlock(buf_node);
3452 trace_binder_transaction_buffer_release(buffer);
3453 binder_transaction_buffer_release(proc, buffer, NULL);
3454 binder_alloc_free_buf(&proc->alloc, buffer);
3458 case BC_TRANSACTION_SG:
3460 struct binder_transaction_data_sg tr;
3462 if (copy_from_user(&tr, ptr, sizeof(tr)))
3465 binder_transaction(proc, thread, &tr.transaction_data,
3466 cmd == BC_REPLY_SG, tr.buffers_size);
3469 case BC_TRANSACTION:
3471 struct binder_transaction_data tr;
3473 if (copy_from_user(&tr, ptr, sizeof(tr)))
3476 binder_transaction(proc, thread, &tr,
3477 cmd == BC_REPLY, 0);
3481 case BC_REGISTER_LOOPER:
3482 binder_debug(BINDER_DEBUG_THREADS,
3483 "%d:%d BC_REGISTER_LOOPER\n",
3484 proc->pid, thread->pid);
3485 binder_inner_proc_lock(proc);
3486 if (thread->looper & BINDER_LOOPER_STATE_ENTERED) {
3487 thread->looper |= BINDER_LOOPER_STATE_INVALID;
3488 binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called after BC_ENTER_LOOPER\n",
3489 proc->pid, thread->pid);
3490 } else if (proc->requested_threads == 0) {
3491 thread->looper |= BINDER_LOOPER_STATE_INVALID;
3492 binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called without request\n",
3493 proc->pid, thread->pid);
3495 proc->requested_threads--;
3496 proc->requested_threads_started++;
3498 thread->looper |= BINDER_LOOPER_STATE_REGISTERED;
3499 binder_inner_proc_unlock(proc);
3501 case BC_ENTER_LOOPER:
3502 binder_debug(BINDER_DEBUG_THREADS,
3503 "%d:%d BC_ENTER_LOOPER\n",
3504 proc->pid, thread->pid);
3505 if (thread->looper & BINDER_LOOPER_STATE_REGISTERED) {
3506 thread->looper |= BINDER_LOOPER_STATE_INVALID;
3507 binder_user_error("%d:%d ERROR: BC_ENTER_LOOPER called after BC_REGISTER_LOOPER\n",
3508 proc->pid, thread->pid);
3510 thread->looper |= BINDER_LOOPER_STATE_ENTERED;
3512 case BC_EXIT_LOOPER:
3513 binder_debug(BINDER_DEBUG_THREADS,
3514 "%d:%d BC_EXIT_LOOPER\n",
3515 proc->pid, thread->pid);
3516 thread->looper |= BINDER_LOOPER_STATE_EXITED;
3519 case BC_REQUEST_DEATH_NOTIFICATION:
3520 case BC_CLEAR_DEATH_NOTIFICATION: {
3522 binder_uintptr_t cookie;
3523 struct binder_ref *ref;
3524 struct binder_ref_death *death = NULL;
3526 if (get_user(target, (uint32_t __user *)ptr))
3528 ptr += sizeof(uint32_t);
3529 if (get_user(cookie, (binder_uintptr_t __user *)ptr))
3531 ptr += sizeof(binder_uintptr_t);
3532 if (cmd == BC_REQUEST_DEATH_NOTIFICATION) {
3534 * Allocate memory for death notification
3535 * before taking lock
3537 death = kzalloc(sizeof(*death), GFP_KERNEL);
3538 if (death == NULL) {
3539 WARN_ON(thread->return_error.cmd !=
3541 thread->return_error.cmd = BR_ERROR;
3542 binder_enqueue_work(
3544 &thread->return_error.work,
3547 BINDER_DEBUG_FAILED_TRANSACTION,
3548 "%d:%d BC_REQUEST_DEATH_NOTIFICATION failed\n",
3549 proc->pid, thread->pid);
3553 binder_proc_lock(proc);
3554 ref = binder_get_ref_olocked(proc, target, false);
3556 binder_user_error("%d:%d %s invalid ref %d\n",
3557 proc->pid, thread->pid,
3558 cmd == BC_REQUEST_DEATH_NOTIFICATION ?
3559 "BC_REQUEST_DEATH_NOTIFICATION" :
3560 "BC_CLEAR_DEATH_NOTIFICATION",
3562 binder_proc_unlock(proc);
3567 binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
3568 "%d:%d %s %016llx ref %d desc %d s %d w %d for node %d\n",
3569 proc->pid, thread->pid,
3570 cmd == BC_REQUEST_DEATH_NOTIFICATION ?
3571 "BC_REQUEST_DEATH_NOTIFICATION" :
3572 "BC_CLEAR_DEATH_NOTIFICATION",
3573 (u64)cookie, ref->data.debug_id,
3574 ref->data.desc, ref->data.strong,
3575 ref->data.weak, ref->node->debug_id);
3577 binder_node_lock(ref->node);
3578 if (cmd == BC_REQUEST_DEATH_NOTIFICATION) {
3580 binder_user_error("%d:%d BC_REQUEST_DEATH_NOTIFICATION death notification already set\n",
3581 proc->pid, thread->pid);
3582 binder_node_unlock(ref->node);
3583 binder_proc_unlock(proc);
3587 binder_stats_created(BINDER_STAT_DEATH);
3588 INIT_LIST_HEAD(&death->work.entry);
3589 death->cookie = cookie;
3591 if (ref->node->proc == NULL) {
3592 ref->death->work.type = BINDER_WORK_DEAD_BINDER;
3594 binder_inner_proc_lock(proc);
3595 binder_enqueue_work_ilocked(
3596 &ref->death->work, &proc->todo);
3597 binder_wakeup_proc_ilocked(proc);
3598 binder_inner_proc_unlock(proc);
3601 if (ref->death == NULL) {
3602 binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification not active\n",
3603 proc->pid, thread->pid);
3604 binder_node_unlock(ref->node);
3605 binder_proc_unlock(proc);
3609 if (death->cookie != cookie) {
3610 binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification cookie mismatch %016llx != %016llx\n",
3611 proc->pid, thread->pid,
3614 binder_node_unlock(ref->node);
3615 binder_proc_unlock(proc);
3619 binder_inner_proc_lock(proc);
3620 if (list_empty(&death->work.entry)) {
3621 death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
3622 if (thread->looper &
3623 (BINDER_LOOPER_STATE_REGISTERED |
3624 BINDER_LOOPER_STATE_ENTERED))
3625 binder_enqueue_work_ilocked(
3629 binder_enqueue_work_ilocked(
3632 binder_wakeup_proc_ilocked(
3636 BUG_ON(death->work.type != BINDER_WORK_DEAD_BINDER);
3637 death->work.type = BINDER_WORK_DEAD_BINDER_AND_CLEAR;
3639 binder_inner_proc_unlock(proc);
3641 binder_node_unlock(ref->node);
3642 binder_proc_unlock(proc);
3644 case BC_DEAD_BINDER_DONE: {
3645 struct binder_work *w;
3646 binder_uintptr_t cookie;
3647 struct binder_ref_death *death = NULL;
3649 if (get_user(cookie, (binder_uintptr_t __user *)ptr))
3652 ptr += sizeof(cookie);
3653 binder_inner_proc_lock(proc);
3654 list_for_each_entry(w, &proc->delivered_death,
3656 struct binder_ref_death *tmp_death =
3658 struct binder_ref_death,
3661 if (tmp_death->cookie == cookie) {
3666 binder_debug(BINDER_DEBUG_DEAD_BINDER,
3667 "%d:%d BC_DEAD_BINDER_DONE %016llx found %pK\n",
3668 proc->pid, thread->pid, (u64)cookie,
3670 if (death == NULL) {
3671 binder_user_error("%d:%d BC_DEAD_BINDER_DONE %016llx not found\n",
3672 proc->pid, thread->pid, (u64)cookie);
3673 binder_inner_proc_unlock(proc);
3676 binder_dequeue_work_ilocked(&death->work);
3677 if (death->work.type == BINDER_WORK_DEAD_BINDER_AND_CLEAR) {
3678 death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
3679 if (thread->looper &
3680 (BINDER_LOOPER_STATE_REGISTERED |
3681 BINDER_LOOPER_STATE_ENTERED))
3682 binder_enqueue_work_ilocked(
3683 &death->work, &thread->todo);
3685 binder_enqueue_work_ilocked(
3688 binder_wakeup_proc_ilocked(proc);
3691 binder_inner_proc_unlock(proc);
3695 pr_err("%d:%d unknown command %d\n",
3696 proc->pid, thread->pid, cmd);
3699 *consumed = ptr - buffer;
3704 static void binder_stat_br(struct binder_proc *proc,
3705 struct binder_thread *thread, uint32_t cmd)
3707 trace_binder_return(cmd);
3708 if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.br)) {
3709 atomic_inc(&binder_stats.br[_IOC_NR(cmd)]);
3710 atomic_inc(&proc->stats.br[_IOC_NR(cmd)]);
3711 atomic_inc(&thread->stats.br[_IOC_NR(cmd)]);
3715 static int binder_put_node_cmd(struct binder_proc *proc,
3716 struct binder_thread *thread,
3718 binder_uintptr_t node_ptr,
3719 binder_uintptr_t node_cookie,
3721 uint32_t cmd, const char *cmd_name)
3723 void __user *ptr = *ptrp;
3725 if (put_user(cmd, (uint32_t __user *)ptr))
3727 ptr += sizeof(uint32_t);
3729 if (put_user(node_ptr, (binder_uintptr_t __user *)ptr))
3731 ptr += sizeof(binder_uintptr_t);
3733 if (put_user(node_cookie, (binder_uintptr_t __user *)ptr))
3735 ptr += sizeof(binder_uintptr_t);
3737 binder_stat_br(proc, thread, cmd);
3738 binder_debug(BINDER_DEBUG_USER_REFS, "%d:%d %s %d u%016llx c%016llx\n",
3739 proc->pid, thread->pid, cmd_name, node_debug_id,
3740 (u64)node_ptr, (u64)node_cookie);
3746 static int binder_wait_for_work(struct binder_thread *thread,
3750 struct binder_proc *proc = thread->proc;
3753 freezer_do_not_count();
3754 binder_inner_proc_lock(proc);
3756 prepare_to_wait(&thread->wait, &wait, TASK_INTERRUPTIBLE);
3757 if (binder_has_work_ilocked(thread, do_proc_work))
3760 list_add(&thread->waiting_thread_node,
3761 &proc->waiting_threads);
3762 binder_inner_proc_unlock(proc);
3764 binder_inner_proc_lock(proc);
3765 list_del_init(&thread->waiting_thread_node);
3766 if (signal_pending(current)) {
3771 finish_wait(&thread->wait, &wait);
3772 binder_inner_proc_unlock(proc);
3778 static int binder_thread_read(struct binder_proc *proc,
3779 struct binder_thread *thread,
3780 binder_uintptr_t binder_buffer, size_t size,
3781 binder_size_t *consumed, int non_block)
3783 void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
3784 void __user *ptr = buffer + *consumed;
3785 void __user *end = buffer + size;
3788 int wait_for_proc_work;
3790 if (*consumed == 0) {
3791 if (put_user(BR_NOOP, (uint32_t __user *)ptr))
3793 ptr += sizeof(uint32_t);
3797 binder_inner_proc_lock(proc);
3798 wait_for_proc_work = binder_available_for_proc_work_ilocked(thread);
3799 binder_inner_proc_unlock(proc);
3801 thread->looper |= BINDER_LOOPER_STATE_WAITING;
3803 trace_binder_wait_for_work(wait_for_proc_work,
3804 !!thread->transaction_stack,
3805 !binder_worklist_empty(proc, &thread->todo));
3806 if (wait_for_proc_work) {
3807 if (!(thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
3808 BINDER_LOOPER_STATE_ENTERED))) {
3809 binder_user_error("%d:%d ERROR: Thread waiting for process work before calling BC_REGISTER_LOOPER or BC_ENTER_LOOPER (state %x)\n",
3810 proc->pid, thread->pid, thread->looper);
3811 wait_event_interruptible(binder_user_error_wait,
3812 binder_stop_on_user_error < 2);
3814 binder_set_nice(proc->default_priority);
3818 if (!binder_has_work(thread, wait_for_proc_work))
3821 ret = binder_wait_for_work(thread, wait_for_proc_work);
3824 thread->looper &= ~BINDER_LOOPER_STATE_WAITING;
3831 struct binder_transaction_data tr;
3832 struct binder_work *w = NULL;
3833 struct list_head *list = NULL;
3834 struct binder_transaction *t = NULL;
3835 struct binder_thread *t_from;
3837 binder_inner_proc_lock(proc);
3838 if (!binder_worklist_empty_ilocked(&thread->todo))
3839 list = &thread->todo;
3840 else if (!binder_worklist_empty_ilocked(&proc->todo) &&
3844 binder_inner_proc_unlock(proc);
3847 if (ptr - buffer == 4 && !thread->looper_need_return)
3852 if (end - ptr < sizeof(tr) + 4) {
3853 binder_inner_proc_unlock(proc);
3856 w = binder_dequeue_work_head_ilocked(list);
3859 case BINDER_WORK_TRANSACTION: {
3860 binder_inner_proc_unlock(proc);
3861 t = container_of(w, struct binder_transaction, work);
3863 case BINDER_WORK_RETURN_ERROR: {
3864 struct binder_error *e = container_of(
3865 w, struct binder_error, work);
3867 WARN_ON(e->cmd == BR_OK);
3868 binder_inner_proc_unlock(proc);
3869 if (put_user(e->cmd, (uint32_t __user *)ptr))
3872 ptr += sizeof(uint32_t);
3874 binder_stat_br(proc, thread, e->cmd);
3876 case BINDER_WORK_TRANSACTION_COMPLETE: {
3877 binder_inner_proc_unlock(proc);
3878 cmd = BR_TRANSACTION_COMPLETE;
3879 if (put_user(cmd, (uint32_t __user *)ptr))
3881 ptr += sizeof(uint32_t);
3883 binder_stat_br(proc, thread, cmd);
3884 binder_debug(BINDER_DEBUG_TRANSACTION_COMPLETE,
3885 "%d:%d BR_TRANSACTION_COMPLETE\n",
3886 proc->pid, thread->pid);
3888 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
3890 case BINDER_WORK_NODE: {
3891 struct binder_node *node = container_of(w, struct binder_node, work);
3893 binder_uintptr_t node_ptr = node->ptr;
3894 binder_uintptr_t node_cookie = node->cookie;
3895 int node_debug_id = node->debug_id;
3898 void __user *orig_ptr = ptr;
3900 BUG_ON(proc != node->proc);
3901 strong = node->internal_strong_refs ||
3902 node->local_strong_refs;
3903 weak = !hlist_empty(&node->refs) ||
3904 node->local_weak_refs ||
3905 node->tmp_refs || strong;
3906 has_strong_ref = node->has_strong_ref;
3907 has_weak_ref = node->has_weak_ref;
3909 if (weak && !has_weak_ref) {
3910 node->has_weak_ref = 1;
3911 node->pending_weak_ref = 1;
3912 node->local_weak_refs++;
3914 if (strong && !has_strong_ref) {
3915 node->has_strong_ref = 1;
3916 node->pending_strong_ref = 1;
3917 node->local_strong_refs++;
3919 if (!strong && has_strong_ref)
3920 node->has_strong_ref = 0;
3921 if (!weak && has_weak_ref)
3922 node->has_weak_ref = 0;
3923 if (!weak && !strong) {
3924 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
3925 "%d:%d node %d u%016llx c%016llx deleted\n",
3926 proc->pid, thread->pid,
3930 rb_erase(&node->rb_node, &proc->nodes);
3931 binder_inner_proc_unlock(proc);
3932 binder_node_lock(node);
3934 * Acquire the node lock before freeing the
3935 * node to serialize with other threads that
3936 * may have been holding the node lock while
3937 * decrementing this node (avoids race where
3938 * this thread frees while the other thread
3939 * is unlocking the node after the final
3942 binder_node_unlock(node);
3943 binder_free_node(node);
3945 binder_inner_proc_unlock(proc);
3947 if (weak && !has_weak_ref)
3948 ret = binder_put_node_cmd(
3949 proc, thread, &ptr, node_ptr,
3950 node_cookie, node_debug_id,
3951 BR_INCREFS, "BR_INCREFS");
3952 if (!ret && strong && !has_strong_ref)
3953 ret = binder_put_node_cmd(
3954 proc, thread, &ptr, node_ptr,
3955 node_cookie, node_debug_id,
3956 BR_ACQUIRE, "BR_ACQUIRE");
3957 if (!ret && !strong && has_strong_ref)
3958 ret = binder_put_node_cmd(
3959 proc, thread, &ptr, node_ptr,
3960 node_cookie, node_debug_id,
3961 BR_RELEASE, "BR_RELEASE");
3962 if (!ret && !weak && has_weak_ref)
3963 ret = binder_put_node_cmd(
3964 proc, thread, &ptr, node_ptr,
3965 node_cookie, node_debug_id,
3966 BR_DECREFS, "BR_DECREFS");
3967 if (orig_ptr == ptr)
3968 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
3969 "%d:%d node %d u%016llx c%016llx state unchanged\n",
3970 proc->pid, thread->pid,
3977 case BINDER_WORK_DEAD_BINDER:
3978 case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
3979 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
3980 struct binder_ref_death *death;
3982 binder_uintptr_t cookie;
3984 death = container_of(w, struct binder_ref_death, work);
3985 if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION)
3986 cmd = BR_CLEAR_DEATH_NOTIFICATION_DONE;
3988 cmd = BR_DEAD_BINDER;
3989 cookie = death->cookie;
3991 binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
3992 "%d:%d %s %016llx\n",
3993 proc->pid, thread->pid,
3994 cmd == BR_DEAD_BINDER ?
3996 "BR_CLEAR_DEATH_NOTIFICATION_DONE",
3998 if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION) {
3999 binder_inner_proc_unlock(proc);
4001 binder_stats_deleted(BINDER_STAT_DEATH);
4003 binder_enqueue_work_ilocked(
4004 w, &proc->delivered_death);
4005 binder_inner_proc_unlock(proc);
4007 if (put_user(cmd, (uint32_t __user *)ptr))
4009 ptr += sizeof(uint32_t);
4010 if (put_user(cookie,
4011 (binder_uintptr_t __user *)ptr))
4013 ptr += sizeof(binder_uintptr_t);
4014 binder_stat_br(proc, thread, cmd);
4015 if (cmd == BR_DEAD_BINDER)
4016 goto done; /* DEAD_BINDER notifications can cause transactions */
4023 BUG_ON(t->buffer == NULL);
4024 if (t->buffer->target_node) {
4025 struct binder_node *target_node = t->buffer->target_node;
4027 tr.target.ptr = target_node->ptr;
4028 tr.cookie = target_node->cookie;
4029 t->saved_priority = task_nice(current);
4030 if (t->priority < target_node->min_priority &&
4031 !(t->flags & TF_ONE_WAY))
4032 binder_set_nice(t->priority);
4033 else if (!(t->flags & TF_ONE_WAY) ||
4034 t->saved_priority > target_node->min_priority)
4035 binder_set_nice(target_node->min_priority);
4036 cmd = BR_TRANSACTION;
4043 tr.flags = t->flags;
4044 tr.sender_euid = from_kuid(current_user_ns(), t->sender_euid);
4046 t_from = binder_get_txn_from(t);
4048 struct task_struct *sender = t_from->proc->tsk;
4050 tr.sender_pid = task_tgid_nr_ns(sender,
4051 task_active_pid_ns(current));
4056 tr.data_size = t->buffer->data_size;
4057 tr.offsets_size = t->buffer->offsets_size;
4058 tr.data.ptr.buffer = (binder_uintptr_t)
4059 ((uintptr_t)t->buffer->data +
4060 binder_alloc_get_user_buffer_offset(&proc->alloc));
4061 tr.data.ptr.offsets = tr.data.ptr.buffer +
4062 ALIGN(t->buffer->data_size,
4065 if (put_user(cmd, (uint32_t __user *)ptr)) {
4067 binder_thread_dec_tmpref(t_from);
4069 binder_cleanup_transaction(t, "put_user failed",
4074 ptr += sizeof(uint32_t);
4075 if (copy_to_user(ptr, &tr, sizeof(tr))) {
4077 binder_thread_dec_tmpref(t_from);
4079 binder_cleanup_transaction(t, "copy_to_user failed",
4086 trace_binder_transaction_received(t);
4087 binder_stat_br(proc, thread, cmd);
4088 binder_debug(BINDER_DEBUG_TRANSACTION,
4089 "%d:%d %s %d %d:%d, cmd %d size %zd-%zd ptr %016llx-%016llx\n",
4090 proc->pid, thread->pid,
4091 (cmd == BR_TRANSACTION) ? "BR_TRANSACTION" :
4093 t->debug_id, t_from ? t_from->proc->pid : 0,
4094 t_from ? t_from->pid : 0, cmd,
4095 t->buffer->data_size, t->buffer->offsets_size,
4096 (u64)tr.data.ptr.buffer, (u64)tr.data.ptr.offsets);
4099 binder_thread_dec_tmpref(t_from);
4100 t->buffer->allow_user_free = 1;
4101 if (cmd == BR_TRANSACTION && !(t->flags & TF_ONE_WAY)) {
4102 binder_inner_proc_lock(thread->proc);
4103 t->to_parent = thread->transaction_stack;
4104 t->to_thread = thread;
4105 thread->transaction_stack = t;
4106 binder_inner_proc_unlock(thread->proc);
4108 binder_free_transaction(t);
4115 *consumed = ptr - buffer;
4116 binder_inner_proc_lock(proc);
4117 if (proc->requested_threads == 0 &&
4118 list_empty(&thread->proc->waiting_threads) &&
4119 proc->requested_threads_started < proc->max_threads &&
4120 (thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
4121 BINDER_LOOPER_STATE_ENTERED)) /* the user-space code fails to */
4122 /*spawn a new thread if we leave this out */) {
4123 proc->requested_threads++;
4124 binder_inner_proc_unlock(proc);
4125 binder_debug(BINDER_DEBUG_THREADS,
4126 "%d:%d BR_SPAWN_LOOPER\n",
4127 proc->pid, thread->pid);
4128 if (put_user(BR_SPAWN_LOOPER, (uint32_t __user *)buffer))
4130 binder_stat_br(proc, thread, BR_SPAWN_LOOPER);
4132 binder_inner_proc_unlock(proc);
4136 static void binder_release_work(struct binder_proc *proc,
4137 struct list_head *list)
4139 struct binder_work *w;
4142 w = binder_dequeue_work_head(proc, list);
4147 case BINDER_WORK_TRANSACTION: {
4148 struct binder_transaction *t;
4150 t = container_of(w, struct binder_transaction, work);
4152 binder_cleanup_transaction(t, "process died.",
4155 case BINDER_WORK_RETURN_ERROR: {
4156 struct binder_error *e = container_of(
4157 w, struct binder_error, work);
4159 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
4160 "undelivered TRANSACTION_ERROR: %u\n",
4163 case BINDER_WORK_TRANSACTION_COMPLETE: {
4164 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
4165 "undelivered TRANSACTION_COMPLETE\n");
4167 binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
4169 case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
4170 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
4171 struct binder_ref_death *death;
4173 death = container_of(w, struct binder_ref_death, work);
4174 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
4175 "undelivered death notification, %016llx\n",
4176 (u64)death->cookie);
4178 binder_stats_deleted(BINDER_STAT_DEATH);
4181 pr_err("unexpected work type, %d, not freed\n",
4189 static struct binder_thread *binder_get_thread_ilocked(
4190 struct binder_proc *proc, struct binder_thread *new_thread)
4192 struct binder_thread *thread = NULL;
4193 struct rb_node *parent = NULL;
4194 struct rb_node **p = &proc->threads.rb_node;
4198 thread = rb_entry(parent, struct binder_thread, rb_node);
4200 if (current->pid < thread->pid)
4202 else if (current->pid > thread->pid)
4203 p = &(*p)->rb_right;
4209 thread = new_thread;
4210 binder_stats_created(BINDER_STAT_THREAD);
4211 thread->proc = proc;
4212 thread->pid = current->pid;
4213 atomic_set(&thread->tmp_ref, 0);
4214 init_waitqueue_head(&thread->wait);
4215 INIT_LIST_HEAD(&thread->todo);
4216 rb_link_node(&thread->rb_node, parent, p);
4217 rb_insert_color(&thread->rb_node, &proc->threads);
4218 thread->looper_need_return = true;
4219 thread->return_error.work.type = BINDER_WORK_RETURN_ERROR;
4220 thread->return_error.cmd = BR_OK;
4221 thread->reply_error.work.type = BINDER_WORK_RETURN_ERROR;
4222 thread->reply_error.cmd = BR_OK;
4223 INIT_LIST_HEAD(&new_thread->waiting_thread_node);
4227 static struct binder_thread *binder_get_thread(struct binder_proc *proc)
4229 struct binder_thread *thread;
4230 struct binder_thread *new_thread;
4232 binder_inner_proc_lock(proc);
4233 thread = binder_get_thread_ilocked(proc, NULL);
4234 binder_inner_proc_unlock(proc);
4236 new_thread = kzalloc(sizeof(*thread), GFP_KERNEL);
4237 if (new_thread == NULL)
4239 binder_inner_proc_lock(proc);
4240 thread = binder_get_thread_ilocked(proc, new_thread);
4241 binder_inner_proc_unlock(proc);
4242 if (thread != new_thread)
4248 static void binder_free_proc(struct binder_proc *proc)
4250 BUG_ON(!list_empty(&proc->todo));
4251 BUG_ON(!list_empty(&proc->delivered_death));
4252 binder_alloc_deferred_release(&proc->alloc);
4253 put_task_struct(proc->tsk);
4254 binder_stats_deleted(BINDER_STAT_PROC);
4258 static void binder_free_thread(struct binder_thread *thread)
4260 BUG_ON(!list_empty(&thread->todo));
4261 binder_stats_deleted(BINDER_STAT_THREAD);
4262 binder_proc_dec_tmpref(thread->proc);
4266 static int binder_thread_release(struct binder_proc *proc,
4267 struct binder_thread *thread)
4269 struct binder_transaction *t;
4270 struct binder_transaction *send_reply = NULL;
4271 int active_transactions = 0;
4272 struct binder_transaction *last_t = NULL;
4274 binder_inner_proc_lock(thread->proc);
4276 * take a ref on the proc so it survives
4277 * after we remove this thread from proc->threads.
4278 * The corresponding dec is when we actually
4279 * free the thread in binder_free_thread()
4283 * take a ref on this thread to ensure it
4284 * survives while we are releasing it
4286 atomic_inc(&thread->tmp_ref);
4287 rb_erase(&thread->rb_node, &proc->threads);
4288 t = thread->transaction_stack;
4290 spin_lock(&t->lock);
4291 if (t->to_thread == thread)
4294 thread->is_dead = true;
4298 active_transactions++;
4299 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
4300 "release %d:%d transaction %d %s, still active\n",
4301 proc->pid, thread->pid,
4303 (t->to_thread == thread) ? "in" : "out");
4305 if (t->to_thread == thread) {
4307 t->to_thread = NULL;
4309 t->buffer->transaction = NULL;
4313 } else if (t->from == thread) {
4318 spin_unlock(&last_t->lock);
4320 spin_lock(&t->lock);
4324 * If this thread used poll, make sure we remove the waitqueue
4325 * from any epoll data structures holding it with POLLFREE.
4326 * waitqueue_active() is safe to use here because we're holding
4329 if ((thread->looper & BINDER_LOOPER_STATE_POLL) &&
4330 waitqueue_active(&thread->wait)) {
4331 wake_up_poll(&thread->wait, POLLHUP | POLLFREE);
4334 binder_inner_proc_unlock(thread->proc);
4337 * This is needed to avoid races between wake_up_poll() above and
4338 * and ep_remove_waitqueue() called for other reasons (eg the epoll file
4339 * descriptor being closed); ep_remove_waitqueue() holds an RCU read
4340 * lock, so we can be sure it's done after calling synchronize_rcu().
4342 if (thread->looper & BINDER_LOOPER_STATE_POLL)
4346 binder_send_failed_reply(send_reply, BR_DEAD_REPLY);
4347 binder_release_work(proc, &thread->todo);
4348 binder_thread_dec_tmpref(thread);
4349 return active_transactions;
4352 static unsigned int binder_poll(struct file *filp,
4353 struct poll_table_struct *wait)
4355 struct binder_proc *proc = filp->private_data;
4356 struct binder_thread *thread = NULL;
4357 bool wait_for_proc_work;
4359 thread = binder_get_thread(proc);
4363 binder_inner_proc_lock(thread->proc);
4364 thread->looper |= BINDER_LOOPER_STATE_POLL;
4365 wait_for_proc_work = binder_available_for_proc_work_ilocked(thread);
4367 binder_inner_proc_unlock(thread->proc);
4369 poll_wait(filp, &thread->wait, wait);
4371 if (binder_has_work(thread, wait_for_proc_work))
4377 static int binder_ioctl_write_read(struct file *filp,
4378 unsigned int cmd, unsigned long arg,
4379 struct binder_thread *thread)
4382 struct binder_proc *proc = filp->private_data;
4383 unsigned int size = _IOC_SIZE(cmd);
4384 void __user *ubuf = (void __user *)arg;
4385 struct binder_write_read bwr;
4387 if (size != sizeof(struct binder_write_read)) {
4391 if (copy_from_user(&bwr, ubuf, sizeof(bwr))) {
4395 binder_debug(BINDER_DEBUG_READ_WRITE,
4396 "%d:%d write %lld at %016llx, read %lld at %016llx\n",
4397 proc->pid, thread->pid,
4398 (u64)bwr.write_size, (u64)bwr.write_buffer,
4399 (u64)bwr.read_size, (u64)bwr.read_buffer);
4401 if (bwr.write_size > 0) {
4402 ret = binder_thread_write(proc, thread,
4405 &bwr.write_consumed);
4406 trace_binder_write_done(ret);
4408 bwr.read_consumed = 0;
4409 if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
4414 if (bwr.read_size > 0) {
4415 ret = binder_thread_read(proc, thread, bwr.read_buffer,
4418 filp->f_flags & O_NONBLOCK);
4419 trace_binder_read_done(ret);
4420 binder_inner_proc_lock(proc);
4421 if (!binder_worklist_empty_ilocked(&proc->todo))
4422 binder_wakeup_proc_ilocked(proc);
4423 binder_inner_proc_unlock(proc);
4425 if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
4430 binder_debug(BINDER_DEBUG_READ_WRITE,
4431 "%d:%d wrote %lld of %lld, read return %lld of %lld\n",
4432 proc->pid, thread->pid,
4433 (u64)bwr.write_consumed, (u64)bwr.write_size,
4434 (u64)bwr.read_consumed, (u64)bwr.read_size);
4435 if (copy_to_user(ubuf, &bwr, sizeof(bwr))) {
4443 static int binder_ioctl_set_ctx_mgr(struct file *filp)
4446 struct binder_proc *proc = filp->private_data;
4447 struct binder_context *context = proc->context;
4448 struct binder_node *new_node;
4449 kuid_t curr_euid = current_euid();
4451 mutex_lock(&context->context_mgr_node_lock);
4452 if (context->binder_context_mgr_node) {
4453 pr_err("BINDER_SET_CONTEXT_MGR already set\n");
4457 ret = security_binder_set_context_mgr(proc->tsk);
4460 if (uid_valid(context->binder_context_mgr_uid)) {
4461 if (!uid_eq(context->binder_context_mgr_uid, curr_euid)) {
4462 pr_err("BINDER_SET_CONTEXT_MGR bad uid %d != %d\n",
4463 from_kuid(&init_user_ns, curr_euid),
4464 from_kuid(&init_user_ns,
4465 context->binder_context_mgr_uid));
4470 context->binder_context_mgr_uid = curr_euid;
4472 new_node = binder_new_node(proc, NULL);
4477 binder_node_lock(new_node);
4478 new_node->local_weak_refs++;
4479 new_node->local_strong_refs++;
4480 new_node->has_strong_ref = 1;
4481 new_node->has_weak_ref = 1;
4482 context->binder_context_mgr_node = new_node;
4483 binder_node_unlock(new_node);
4484 binder_put_node(new_node);
4486 mutex_unlock(&context->context_mgr_node_lock);
4490 static int binder_ioctl_get_node_debug_info(struct binder_proc *proc,
4491 struct binder_node_debug_info *info)
4494 binder_uintptr_t ptr = info->ptr;
4496 memset(info, 0, sizeof(*info));
4498 binder_inner_proc_lock(proc);
4499 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) {
4500 struct binder_node *node = rb_entry(n, struct binder_node,
4502 if (node->ptr > ptr) {
4503 info->ptr = node->ptr;
4504 info->cookie = node->cookie;
4505 info->has_strong_ref = node->has_strong_ref;
4506 info->has_weak_ref = node->has_weak_ref;
4510 binder_inner_proc_unlock(proc);
4515 static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
4518 struct binder_proc *proc = filp->private_data;
4519 struct binder_thread *thread;
4520 unsigned int size = _IOC_SIZE(cmd);
4521 void __user *ubuf = (void __user *)arg;
4523 /*pr_info("binder_ioctl: %d:%d %x %lx\n",
4524 proc->pid, current->pid, cmd, arg);*/
4526 binder_selftest_alloc(&proc->alloc);
4528 trace_binder_ioctl(cmd, arg);
4530 ret = wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
4534 thread = binder_get_thread(proc);
4535 if (thread == NULL) {
4541 case BINDER_WRITE_READ:
4542 ret = binder_ioctl_write_read(filp, cmd, arg, thread);
4546 case BINDER_SET_MAX_THREADS: {
4549 if (copy_from_user(&max_threads, ubuf,
4550 sizeof(max_threads))) {
4554 binder_inner_proc_lock(proc);
4555 proc->max_threads = max_threads;
4556 binder_inner_proc_unlock(proc);
4559 case BINDER_SET_CONTEXT_MGR:
4560 ret = binder_ioctl_set_ctx_mgr(filp);
4564 case BINDER_THREAD_EXIT:
4565 binder_debug(BINDER_DEBUG_THREADS, "%d:%d exit\n",
4566 proc->pid, thread->pid);
4567 binder_thread_release(proc, thread);
4570 case BINDER_VERSION: {
4571 struct binder_version __user *ver = ubuf;
4573 if (size != sizeof(struct binder_version)) {
4577 if (put_user(BINDER_CURRENT_PROTOCOL_VERSION,
4578 &ver->protocol_version)) {
4584 case BINDER_GET_NODE_DEBUG_INFO: {
4585 struct binder_node_debug_info info;
4587 if (copy_from_user(&info, ubuf, sizeof(info))) {
4592 ret = binder_ioctl_get_node_debug_info(proc, &info);
4596 if (copy_to_user(ubuf, &info, sizeof(info))) {
4609 thread->looper_need_return = false;
4610 wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
4611 if (ret && ret != -ERESTARTSYS)
4612 pr_info("%d:%d ioctl %x %lx returned %d\n", proc->pid, current->pid, cmd, arg, ret);
4614 trace_binder_ioctl_done(ret);
4618 static void binder_vma_open(struct vm_area_struct *vma)
4620 struct binder_proc *proc = vma->vm_private_data;
4622 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
4623 "%d open vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
4624 proc->pid, vma->vm_start, vma->vm_end,
4625 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
4626 (unsigned long)pgprot_val(vma->vm_page_prot));
4629 static void binder_vma_close(struct vm_area_struct *vma)
4631 struct binder_proc *proc = vma->vm_private_data;
4633 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
4634 "%d close vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
4635 proc->pid, vma->vm_start, vma->vm_end,
4636 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
4637 (unsigned long)pgprot_val(vma->vm_page_prot));
4638 binder_alloc_vma_close(&proc->alloc);
4639 binder_defer_work(proc, BINDER_DEFERRED_PUT_FILES);
4642 static int binder_vm_fault(struct vm_fault *vmf)
4644 return VM_FAULT_SIGBUS;
4647 static const struct vm_operations_struct binder_vm_ops = {
4648 .open = binder_vma_open,
4649 .close = binder_vma_close,
4650 .fault = binder_vm_fault,
4653 static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
4656 struct binder_proc *proc = filp->private_data;
4657 const char *failure_string;
4659 if (proc->tsk != current->group_leader)
4662 if ((vma->vm_end - vma->vm_start) > SZ_4M)
4663 vma->vm_end = vma->vm_start + SZ_4M;
4665 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
4666 "%s: %d %lx-%lx (%ld K) vma %lx pagep %lx\n",
4667 __func__, proc->pid, vma->vm_start, vma->vm_end,
4668 (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
4669 (unsigned long)pgprot_val(vma->vm_page_prot));
4671 if (vma->vm_flags & FORBIDDEN_MMAP_FLAGS) {
4673 failure_string = "bad vm_flags";
4676 vma->vm_flags = (vma->vm_flags | VM_DONTCOPY) & ~VM_MAYWRITE;
4677 vma->vm_ops = &binder_vm_ops;
4678 vma->vm_private_data = proc;
4680 ret = binder_alloc_mmap_handler(&proc->alloc, vma);
4683 mutex_lock(&proc->files_lock);
4684 proc->files = get_files_struct(current);
4685 mutex_unlock(&proc->files_lock);
4689 pr_err("binder_mmap: %d %lx-%lx %s failed %d\n",
4690 proc->pid, vma->vm_start, vma->vm_end, failure_string, ret);
4694 static int binder_open(struct inode *nodp, struct file *filp)
4696 struct binder_proc *proc;
4697 struct binder_device *binder_dev;
4699 binder_debug(BINDER_DEBUG_OPEN_CLOSE, "binder_open: %d:%d\n",
4700 current->group_leader->pid, current->pid);
4702 proc = kzalloc(sizeof(*proc), GFP_KERNEL);
4705 spin_lock_init(&proc->inner_lock);
4706 spin_lock_init(&proc->outer_lock);
4707 get_task_struct(current->group_leader);
4708 proc->tsk = current->group_leader;
4709 mutex_init(&proc->files_lock);
4710 INIT_LIST_HEAD(&proc->todo);
4711 proc->default_priority = task_nice(current);
4712 binder_dev = container_of(filp->private_data, struct binder_device,
4714 proc->context = &binder_dev->context;
4715 binder_alloc_init(&proc->alloc);
4717 binder_stats_created(BINDER_STAT_PROC);
4718 proc->pid = current->group_leader->pid;
4719 INIT_LIST_HEAD(&proc->delivered_death);
4720 INIT_LIST_HEAD(&proc->waiting_threads);
4721 filp->private_data = proc;
4723 mutex_lock(&binder_procs_lock);
4724 hlist_add_head(&proc->proc_node, &binder_procs);
4725 mutex_unlock(&binder_procs_lock);
4727 if (binder_debugfs_dir_entry_proc) {
4730 snprintf(strbuf, sizeof(strbuf), "%u", proc->pid);
4732 * proc debug entries are shared between contexts, so
4733 * this will fail if the process tries to open the driver
4734 * again with a different context. The priting code will
4735 * anyway print all contexts that a given PID has, so this
4738 proc->debugfs_entry = debugfs_create_file(strbuf, S_IRUGO,
4739 binder_debugfs_dir_entry_proc,
4740 (void *)(unsigned long)proc->pid,
4747 static int binder_flush(struct file *filp, fl_owner_t id)
4749 struct binder_proc *proc = filp->private_data;
4751 binder_defer_work(proc, BINDER_DEFERRED_FLUSH);
4756 static void binder_deferred_flush(struct binder_proc *proc)
4761 binder_inner_proc_lock(proc);
4762 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) {
4763 struct binder_thread *thread = rb_entry(n, struct binder_thread, rb_node);
4765 thread->looper_need_return = true;
4766 if (thread->looper & BINDER_LOOPER_STATE_WAITING) {
4767 wake_up_interruptible(&thread->wait);
4771 binder_inner_proc_unlock(proc);
4773 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
4774 "binder_flush: %d woke %d threads\n", proc->pid,
4778 static int binder_release(struct inode *nodp, struct file *filp)
4780 struct binder_proc *proc = filp->private_data;
4782 debugfs_remove(proc->debugfs_entry);
4783 binder_defer_work(proc, BINDER_DEFERRED_RELEASE);
4788 static int binder_node_release(struct binder_node *node, int refs)
4790 struct binder_ref *ref;
4792 struct binder_proc *proc = node->proc;
4794 binder_release_work(proc, &node->async_todo);
4796 binder_node_lock(node);
4797 binder_inner_proc_lock(proc);
4798 binder_dequeue_work_ilocked(&node->work);
4800 * The caller must have taken a temporary ref on the node,
4802 BUG_ON(!node->tmp_refs);
4803 if (hlist_empty(&node->refs) && node->tmp_refs == 1) {
4804 binder_inner_proc_unlock(proc);
4805 binder_node_unlock(node);
4806 binder_free_node(node);
4812 node->local_strong_refs = 0;
4813 node->local_weak_refs = 0;
4814 binder_inner_proc_unlock(proc);
4816 spin_lock(&binder_dead_nodes_lock);
4817 hlist_add_head(&node->dead_node, &binder_dead_nodes);
4818 spin_unlock(&binder_dead_nodes_lock);
4820 hlist_for_each_entry(ref, &node->refs, node_entry) {
4823 * Need the node lock to synchronize
4824 * with new notification requests and the
4825 * inner lock to synchronize with queued
4826 * death notifications.
4828 binder_inner_proc_lock(ref->proc);
4830 binder_inner_proc_unlock(ref->proc);
4836 BUG_ON(!list_empty(&ref->death->work.entry));
4837 ref->death->work.type = BINDER_WORK_DEAD_BINDER;
4838 binder_enqueue_work_ilocked(&ref->death->work,
4840 binder_wakeup_proc_ilocked(ref->proc);
4841 binder_inner_proc_unlock(ref->proc);
4844 binder_debug(BINDER_DEBUG_DEAD_BINDER,
4845 "node %d now dead, refs %d, death %d\n",
4846 node->debug_id, refs, death);
4847 binder_node_unlock(node);
4848 binder_put_node(node);
4853 static void binder_deferred_release(struct binder_proc *proc)
4855 struct binder_context *context = proc->context;
4857 int threads, nodes, incoming_refs, outgoing_refs, active_transactions;
4859 BUG_ON(proc->files);
4861 mutex_lock(&binder_procs_lock);
4862 hlist_del(&proc->proc_node);
4863 mutex_unlock(&binder_procs_lock);
4865 mutex_lock(&context->context_mgr_node_lock);
4866 if (context->binder_context_mgr_node &&
4867 context->binder_context_mgr_node->proc == proc) {
4868 binder_debug(BINDER_DEBUG_DEAD_BINDER,
4869 "%s: %d context_mgr_node gone\n",
4870 __func__, proc->pid);
4871 context->binder_context_mgr_node = NULL;
4873 mutex_unlock(&context->context_mgr_node_lock);
4874 binder_inner_proc_lock(proc);
4876 * Make sure proc stays alive after we
4877 * remove all the threads
4881 proc->is_dead = true;
4883 active_transactions = 0;
4884 while ((n = rb_first(&proc->threads))) {
4885 struct binder_thread *thread;
4887 thread = rb_entry(n, struct binder_thread, rb_node);
4888 binder_inner_proc_unlock(proc);
4890 active_transactions += binder_thread_release(proc, thread);
4891 binder_inner_proc_lock(proc);
4896 while ((n = rb_first(&proc->nodes))) {
4897 struct binder_node *node;
4899 node = rb_entry(n, struct binder_node, rb_node);
4902 * take a temporary ref on the node before
4903 * calling binder_node_release() which will either
4904 * kfree() the node or call binder_put_node()
4906 binder_inc_node_tmpref_ilocked(node);
4907 rb_erase(&node->rb_node, &proc->nodes);
4908 binder_inner_proc_unlock(proc);
4909 incoming_refs = binder_node_release(node, incoming_refs);
4910 binder_inner_proc_lock(proc);
4912 binder_inner_proc_unlock(proc);
4915 binder_proc_lock(proc);
4916 while ((n = rb_first(&proc->refs_by_desc))) {
4917 struct binder_ref *ref;
4919 ref = rb_entry(n, struct binder_ref, rb_node_desc);
4921 binder_cleanup_ref_olocked(ref);
4922 binder_proc_unlock(proc);
4923 binder_free_ref(ref);
4924 binder_proc_lock(proc);
4926 binder_proc_unlock(proc);
4928 binder_release_work(proc, &proc->todo);
4929 binder_release_work(proc, &proc->delivered_death);
4931 binder_debug(BINDER_DEBUG_OPEN_CLOSE,
4932 "%s: %d threads %d, nodes %d (ref %d), refs %d, active transactions %d\n",
4933 __func__, proc->pid, threads, nodes, incoming_refs,
4934 outgoing_refs, active_transactions);
4936 binder_proc_dec_tmpref(proc);
4939 static void binder_deferred_func(struct work_struct *work)
4941 struct binder_proc *proc;
4942 struct files_struct *files;
4947 mutex_lock(&binder_deferred_lock);
4948 if (!hlist_empty(&binder_deferred_list)) {
4949 proc = hlist_entry(binder_deferred_list.first,
4950 struct binder_proc, deferred_work_node);
4951 hlist_del_init(&proc->deferred_work_node);
4952 defer = proc->deferred_work;
4953 proc->deferred_work = 0;
4958 mutex_unlock(&binder_deferred_lock);
4961 if (defer & BINDER_DEFERRED_PUT_FILES) {
4962 mutex_lock(&proc->files_lock);
4963 files = proc->files;
4966 mutex_unlock(&proc->files_lock);
4969 if (defer & BINDER_DEFERRED_FLUSH)
4970 binder_deferred_flush(proc);
4972 if (defer & BINDER_DEFERRED_RELEASE)
4973 binder_deferred_release(proc); /* frees proc */
4976 put_files_struct(files);
4979 static DECLARE_WORK(binder_deferred_work, binder_deferred_func);
4982 binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer)
4984 mutex_lock(&binder_deferred_lock);
4985 proc->deferred_work |= defer;
4986 if (hlist_unhashed(&proc->deferred_work_node)) {
4987 hlist_add_head(&proc->deferred_work_node,
4988 &binder_deferred_list);
4989 schedule_work(&binder_deferred_work);
4991 mutex_unlock(&binder_deferred_lock);
4994 static void print_binder_transaction_ilocked(struct seq_file *m,
4995 struct binder_proc *proc,
4997 struct binder_transaction *t)
4999 struct binder_proc *to_proc;
5000 struct binder_buffer *buffer = t->buffer;
5002 spin_lock(&t->lock);
5003 to_proc = t->to_proc;
5005 "%s %d: %pK from %d:%d to %d:%d code %x flags %x pri %ld r%d",
5006 prefix, t->debug_id, t,
5007 t->from ? t->from->proc->pid : 0,
5008 t->from ? t->from->pid : 0,
5009 to_proc ? to_proc->pid : 0,
5010 t->to_thread ? t->to_thread->pid : 0,
5011 t->code, t->flags, t->priority, t->need_reply);
5012 spin_unlock(&t->lock);
5014 if (proc != to_proc) {
5016 * Can only safely deref buffer if we are holding the
5017 * correct proc inner lock for this node
5023 if (buffer == NULL) {
5024 seq_puts(m, " buffer free\n");
5027 if (buffer->target_node)
5028 seq_printf(m, " node %d", buffer->target_node->debug_id);
5029 seq_printf(m, " size %zd:%zd data %pK\n",
5030 buffer->data_size, buffer->offsets_size,
5034 static void print_binder_work_ilocked(struct seq_file *m,
5035 struct binder_proc *proc,
5037 const char *transaction_prefix,
5038 struct binder_work *w)
5040 struct binder_node *node;
5041 struct binder_transaction *t;
5044 case BINDER_WORK_TRANSACTION:
5045 t = container_of(w, struct binder_transaction, work);
5046 print_binder_transaction_ilocked(
5047 m, proc, transaction_prefix, t);
5049 case BINDER_WORK_RETURN_ERROR: {
5050 struct binder_error *e = container_of(
5051 w, struct binder_error, work);
5053 seq_printf(m, "%stransaction error: %u\n",
5056 case BINDER_WORK_TRANSACTION_COMPLETE:
5057 seq_printf(m, "%stransaction complete\n", prefix);
5059 case BINDER_WORK_NODE:
5060 node = container_of(w, struct binder_node, work);
5061 seq_printf(m, "%snode work %d: u%016llx c%016llx\n",
5062 prefix, node->debug_id,
5063 (u64)node->ptr, (u64)node->cookie);
5065 case BINDER_WORK_DEAD_BINDER:
5066 seq_printf(m, "%shas dead binder\n", prefix);
5068 case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
5069 seq_printf(m, "%shas cleared dead binder\n", prefix);
5071 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION:
5072 seq_printf(m, "%shas cleared death notification\n", prefix);
5075 seq_printf(m, "%sunknown work: type %d\n", prefix, w->type);
5080 static void print_binder_thread_ilocked(struct seq_file *m,
5081 struct binder_thread *thread,
5084 struct binder_transaction *t;
5085 struct binder_work *w;
5086 size_t start_pos = m->count;
5089 seq_printf(m, " thread %d: l %02x need_return %d tr %d\n",
5090 thread->pid, thread->looper,
5091 thread->looper_need_return,
5092 atomic_read(&thread->tmp_ref));
5093 header_pos = m->count;
5094 t = thread->transaction_stack;
5096 if (t->from == thread) {
5097 print_binder_transaction_ilocked(m, thread->proc,
5098 " outgoing transaction", t);
5100 } else if (t->to_thread == thread) {
5101 print_binder_transaction_ilocked(m, thread->proc,
5102 " incoming transaction", t);
5105 print_binder_transaction_ilocked(m, thread->proc,
5106 " bad transaction", t);
5110 list_for_each_entry(w, &thread->todo, entry) {
5111 print_binder_work_ilocked(m, thread->proc, " ",
5112 " pending transaction", w);
5114 if (!print_always && m->count == header_pos)
5115 m->count = start_pos;
5118 static void print_binder_node_nilocked(struct seq_file *m,
5119 struct binder_node *node)
5121 struct binder_ref *ref;
5122 struct binder_work *w;
5126 hlist_for_each_entry(ref, &node->refs, node_entry)
5129 seq_printf(m, " node %d: u%016llx c%016llx hs %d hw %d ls %d lw %d is %d iw %d tr %d",
5130 node->debug_id, (u64)node->ptr, (u64)node->cookie,
5131 node->has_strong_ref, node->has_weak_ref,
5132 node->local_strong_refs, node->local_weak_refs,
5133 node->internal_strong_refs, count, node->tmp_refs);
5135 seq_puts(m, " proc");
5136 hlist_for_each_entry(ref, &node->refs, node_entry)
5137 seq_printf(m, " %d", ref->proc->pid);
5141 list_for_each_entry(w, &node->async_todo, entry)
5142 print_binder_work_ilocked(m, node->proc, " ",
5143 " pending async transaction", w);
5147 static void print_binder_ref_olocked(struct seq_file *m,
5148 struct binder_ref *ref)
5150 binder_node_lock(ref->node);
5151 seq_printf(m, " ref %d: desc %d %snode %d s %d w %d d %pK\n",
5152 ref->data.debug_id, ref->data.desc,
5153 ref->node->proc ? "" : "dead ",
5154 ref->node->debug_id, ref->data.strong,
5155 ref->data.weak, ref->death);
5156 binder_node_unlock(ref->node);
5159 static void print_binder_proc(struct seq_file *m,
5160 struct binder_proc *proc, int print_all)
5162 struct binder_work *w;
5164 size_t start_pos = m->count;
5166 struct binder_node *last_node = NULL;
5168 seq_printf(m, "proc %d\n", proc->pid);
5169 seq_printf(m, "context %s\n", proc->context->name);
5170 header_pos = m->count;
5172 binder_inner_proc_lock(proc);
5173 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
5174 print_binder_thread_ilocked(m, rb_entry(n, struct binder_thread,
5175 rb_node), print_all);
5177 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) {
5178 struct binder_node *node = rb_entry(n, struct binder_node,
5181 * take a temporary reference on the node so it
5182 * survives and isn't removed from the tree
5183 * while we print it.
5185 binder_inc_node_tmpref_ilocked(node);
5186 /* Need to drop inner lock to take node lock */
5187 binder_inner_proc_unlock(proc);
5189 binder_put_node(last_node);
5190 binder_node_inner_lock(node);
5191 print_binder_node_nilocked(m, node);
5192 binder_node_inner_unlock(node);
5194 binder_inner_proc_lock(proc);
5196 binder_inner_proc_unlock(proc);
5198 binder_put_node(last_node);
5201 binder_proc_lock(proc);
5202 for (n = rb_first(&proc->refs_by_desc);
5205 print_binder_ref_olocked(m, rb_entry(n,
5208 binder_proc_unlock(proc);
5210 binder_alloc_print_allocated(m, &proc->alloc);
5211 binder_inner_proc_lock(proc);
5212 list_for_each_entry(w, &proc->todo, entry)
5213 print_binder_work_ilocked(m, proc, " ",
5214 " pending transaction", w);
5215 list_for_each_entry(w, &proc->delivered_death, entry) {
5216 seq_puts(m, " has delivered dead binder\n");
5219 binder_inner_proc_unlock(proc);
5220 if (!print_all && m->count == header_pos)
5221 m->count = start_pos;
5224 static const char * const binder_return_strings[] = {
5229 "BR_ACQUIRE_RESULT",
5231 "BR_TRANSACTION_COMPLETE",
5236 "BR_ATTEMPT_ACQUIRE",
5241 "BR_CLEAR_DEATH_NOTIFICATION_DONE",
5245 static const char * const binder_command_strings[] = {
5248 "BC_ACQUIRE_RESULT",
5256 "BC_ATTEMPT_ACQUIRE",
5257 "BC_REGISTER_LOOPER",
5260 "BC_REQUEST_DEATH_NOTIFICATION",
5261 "BC_CLEAR_DEATH_NOTIFICATION",
5262 "BC_DEAD_BINDER_DONE",
5263 "BC_TRANSACTION_SG",
5267 static const char * const binder_objstat_strings[] = {
5274 "transaction_complete"
5277 static void print_binder_stats(struct seq_file *m, const char *prefix,
5278 struct binder_stats *stats)
5282 BUILD_BUG_ON(ARRAY_SIZE(stats->bc) !=
5283 ARRAY_SIZE(binder_command_strings));
5284 for (i = 0; i < ARRAY_SIZE(stats->bc); i++) {
5285 int temp = atomic_read(&stats->bc[i]);
5288 seq_printf(m, "%s%s: %d\n", prefix,
5289 binder_command_strings[i], temp);
5292 BUILD_BUG_ON(ARRAY_SIZE(stats->br) !=
5293 ARRAY_SIZE(binder_return_strings));
5294 for (i = 0; i < ARRAY_SIZE(stats->br); i++) {
5295 int temp = atomic_read(&stats->br[i]);
5298 seq_printf(m, "%s%s: %d\n", prefix,
5299 binder_return_strings[i], temp);
5302 BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
5303 ARRAY_SIZE(binder_objstat_strings));
5304 BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
5305 ARRAY_SIZE(stats->obj_deleted));
5306 for (i = 0; i < ARRAY_SIZE(stats->obj_created); i++) {
5307 int created = atomic_read(&stats->obj_created[i]);
5308 int deleted = atomic_read(&stats->obj_deleted[i]);
5310 if (created || deleted)
5311 seq_printf(m, "%s%s: active %d total %d\n",
5313 binder_objstat_strings[i],
5319 static void print_binder_proc_stats(struct seq_file *m,
5320 struct binder_proc *proc)
5322 struct binder_work *w;
5323 struct binder_thread *thread;
5325 int count, strong, weak, ready_threads;
5326 size_t free_async_space =
5327 binder_alloc_get_free_async_space(&proc->alloc);
5329 seq_printf(m, "proc %d\n", proc->pid);
5330 seq_printf(m, "context %s\n", proc->context->name);
5333 binder_inner_proc_lock(proc);
5334 for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
5337 list_for_each_entry(thread, &proc->waiting_threads, waiting_thread_node)
5340 seq_printf(m, " threads: %d\n", count);
5341 seq_printf(m, " requested threads: %d+%d/%d\n"
5342 " ready threads %d\n"
5343 " free async space %zd\n", proc->requested_threads,
5344 proc->requested_threads_started, proc->max_threads,
5348 for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n))
5350 binder_inner_proc_unlock(proc);
5351 seq_printf(m, " nodes: %d\n", count);
5355 binder_proc_lock(proc);
5356 for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
5357 struct binder_ref *ref = rb_entry(n, struct binder_ref,
5360 strong += ref->data.strong;
5361 weak += ref->data.weak;
5363 binder_proc_unlock(proc);
5364 seq_printf(m, " refs: %d s %d w %d\n", count, strong, weak);
5366 count = binder_alloc_get_allocated_count(&proc->alloc);
5367 seq_printf(m, " buffers: %d\n", count);
5369 binder_alloc_print_pages(m, &proc->alloc);
5372 binder_inner_proc_lock(proc);
5373 list_for_each_entry(w, &proc->todo, entry) {
5374 if (w->type == BINDER_WORK_TRANSACTION)
5377 binder_inner_proc_unlock(proc);
5378 seq_printf(m, " pending transactions: %d\n", count);
5380 print_binder_stats(m, " ", &proc->stats);
5384 static int binder_state_show(struct seq_file *m, void *unused)
5386 struct binder_proc *proc;
5387 struct binder_node *node;
5388 struct binder_node *last_node = NULL;
5390 seq_puts(m, "binder state:\n");
5392 spin_lock(&binder_dead_nodes_lock);
5393 if (!hlist_empty(&binder_dead_nodes))
5394 seq_puts(m, "dead nodes:\n");
5395 hlist_for_each_entry(node, &binder_dead_nodes, dead_node) {
5397 * take a temporary reference on the node so it
5398 * survives and isn't removed from the list
5399 * while we print it.
5402 spin_unlock(&binder_dead_nodes_lock);
5404 binder_put_node(last_node);
5405 binder_node_lock(node);
5406 print_binder_node_nilocked(m, node);
5407 binder_node_unlock(node);
5409 spin_lock(&binder_dead_nodes_lock);
5411 spin_unlock(&binder_dead_nodes_lock);
5413 binder_put_node(last_node);
5415 mutex_lock(&binder_procs_lock);
5416 hlist_for_each_entry(proc, &binder_procs, proc_node)
5417 print_binder_proc(m, proc, 1);
5418 mutex_unlock(&binder_procs_lock);
5423 static int binder_stats_show(struct seq_file *m, void *unused)
5425 struct binder_proc *proc;
5427 seq_puts(m, "binder stats:\n");
5429 print_binder_stats(m, "", &binder_stats);
5431 mutex_lock(&binder_procs_lock);
5432 hlist_for_each_entry(proc, &binder_procs, proc_node)
5433 print_binder_proc_stats(m, proc);
5434 mutex_unlock(&binder_procs_lock);
5439 static int binder_transactions_show(struct seq_file *m, void *unused)
5441 struct binder_proc *proc;
5443 seq_puts(m, "binder transactions:\n");
5444 mutex_lock(&binder_procs_lock);
5445 hlist_for_each_entry(proc, &binder_procs, proc_node)
5446 print_binder_proc(m, proc, 0);
5447 mutex_unlock(&binder_procs_lock);
5452 static int binder_proc_show(struct seq_file *m, void *unused)
5454 struct binder_proc *itr;
5455 int pid = (unsigned long)m->private;
5457 mutex_lock(&binder_procs_lock);
5458 hlist_for_each_entry(itr, &binder_procs, proc_node) {
5459 if (itr->pid == pid) {
5460 seq_puts(m, "binder proc state:\n");
5461 print_binder_proc(m, itr, 1);
5464 mutex_unlock(&binder_procs_lock);
5469 static void print_binder_transaction_log_entry(struct seq_file *m,
5470 struct binder_transaction_log_entry *e)
5472 int debug_id = READ_ONCE(e->debug_id_done);
5474 * read barrier to guarantee debug_id_done read before
5475 * we print the log values
5479 "%d: %s from %d:%d to %d:%d context %s node %d handle %d size %d:%d ret %d/%d l=%d",
5480 e->debug_id, (e->call_type == 2) ? "reply" :
5481 ((e->call_type == 1) ? "async" : "call "), e->from_proc,
5482 e->from_thread, e->to_proc, e->to_thread, e->context_name,
5483 e->to_node, e->target_handle, e->data_size, e->offsets_size,
5484 e->return_error, e->return_error_param,
5485 e->return_error_line);
5487 * read-barrier to guarantee read of debug_id_done after
5488 * done printing the fields of the entry
5491 seq_printf(m, debug_id && debug_id == READ_ONCE(e->debug_id_done) ?
5492 "\n" : " (incomplete)\n");
5495 static int binder_transaction_log_show(struct seq_file *m, void *unused)
5497 struct binder_transaction_log *log = m->private;
5498 unsigned int log_cur = atomic_read(&log->cur);
5503 count = log_cur + 1;
5504 cur = count < ARRAY_SIZE(log->entry) && !log->full ?
5505 0 : count % ARRAY_SIZE(log->entry);
5506 if (count > ARRAY_SIZE(log->entry) || log->full)
5507 count = ARRAY_SIZE(log->entry);
5508 for (i = 0; i < count; i++) {
5509 unsigned int index = cur++ % ARRAY_SIZE(log->entry);
5511 print_binder_transaction_log_entry(m, &log->entry[index]);
5516 static const struct file_operations binder_fops = {
5517 .owner = THIS_MODULE,
5518 .poll = binder_poll,
5519 .unlocked_ioctl = binder_ioctl,
5520 .compat_ioctl = binder_ioctl,
5521 .mmap = binder_mmap,
5522 .open = binder_open,
5523 .flush = binder_flush,
5524 .release = binder_release,
5527 BINDER_DEBUG_ENTRY(state);
5528 BINDER_DEBUG_ENTRY(stats);
5529 BINDER_DEBUG_ENTRY(transactions);
5530 BINDER_DEBUG_ENTRY(transaction_log);
5532 static int __init init_binder_device(const char *name)
5535 struct binder_device *binder_device;
5537 binder_device = kzalloc(sizeof(*binder_device), GFP_KERNEL);
5541 binder_device->miscdev.fops = &binder_fops;
5542 binder_device->miscdev.minor = MISC_DYNAMIC_MINOR;
5543 binder_device->miscdev.name = name;
5545 binder_device->context.binder_context_mgr_uid = INVALID_UID;
5546 binder_device->context.name = name;
5547 mutex_init(&binder_device->context.context_mgr_node_lock);
5549 ret = misc_register(&binder_device->miscdev);
5551 kfree(binder_device);
5555 hlist_add_head(&binder_device->hlist, &binder_devices);
5560 static int __init binder_init(void)
5563 char *device_name, *device_names, *device_tmp;
5564 struct binder_device *device;
5565 struct hlist_node *tmp;
5567 binder_alloc_shrinker_init();
5569 atomic_set(&binder_transaction_log.cur, ~0U);
5570 atomic_set(&binder_transaction_log_failed.cur, ~0U);
5572 binder_debugfs_dir_entry_root = debugfs_create_dir("binder", NULL);
5573 if (binder_debugfs_dir_entry_root)
5574 binder_debugfs_dir_entry_proc = debugfs_create_dir("proc",
5575 binder_debugfs_dir_entry_root);
5577 if (binder_debugfs_dir_entry_root) {
5578 debugfs_create_file("state",
5580 binder_debugfs_dir_entry_root,
5582 &binder_state_fops);
5583 debugfs_create_file("stats",
5585 binder_debugfs_dir_entry_root,
5587 &binder_stats_fops);
5588 debugfs_create_file("transactions",
5590 binder_debugfs_dir_entry_root,
5592 &binder_transactions_fops);
5593 debugfs_create_file("transaction_log",
5595 binder_debugfs_dir_entry_root,
5596 &binder_transaction_log,
5597 &binder_transaction_log_fops);
5598 debugfs_create_file("failed_transaction_log",
5600 binder_debugfs_dir_entry_root,
5601 &binder_transaction_log_failed,
5602 &binder_transaction_log_fops);
5606 * Copy the module_parameter string, because we don't want to
5607 * tokenize it in-place.
5609 device_names = kzalloc(strlen(binder_devices_param) + 1, GFP_KERNEL);
5610 if (!device_names) {
5612 goto err_alloc_device_names_failed;
5614 strcpy(device_names, binder_devices_param);
5616 device_tmp = device_names;
5617 while ((device_name = strsep(&device_tmp, ","))) {
5618 ret = init_binder_device(device_name);
5620 goto err_init_binder_device_failed;
5625 err_init_binder_device_failed:
5626 hlist_for_each_entry_safe(device, tmp, &binder_devices, hlist) {
5627 misc_deregister(&device->miscdev);
5628 hlist_del(&device->hlist);
5632 kfree(device_names);
5634 err_alloc_device_names_failed:
5635 debugfs_remove_recursive(binder_debugfs_dir_entry_root);
5640 device_initcall(binder_init);
5642 #define CREATE_TRACE_POINTS
5643 #include "binder_trace.h"
5645 MODULE_LICENSE("GPL v2");