scripts: mkbootimg_rpi4.sh: Fix to install modules on non sudo
[platform/kernel/linux-rpi.git] / drivers / android / binder.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* binder.c
3  *
4  * Android IPC Subsystem
5  *
6  * Copyright (C) 2007-2008 Google, Inc.
7  */
8
9 /*
10  * Locking overview
11  *
12  * There are 3 main spinlocks which must be acquired in the
13  * order shown:
14  *
15  * 1) proc->outer_lock : protects binder_ref
16  *    binder_proc_lock() and binder_proc_unlock() are
17  *    used to acq/rel.
18  * 2) node->lock : protects most fields of binder_node.
19  *    binder_node_lock() and binder_node_unlock() are
20  *    used to acq/rel
21  * 3) proc->inner_lock : protects the thread and node lists
22  *    (proc->threads, proc->waiting_threads, proc->nodes)
23  *    and all todo lists associated with the binder_proc
24  *    (proc->todo, thread->todo, proc->delivered_death and
25  *    node->async_todo), as well as thread->transaction_stack
26  *    binder_inner_proc_lock() and binder_inner_proc_unlock()
27  *    are used to acq/rel
28  *
29  * Any lock under procA must never be nested under any lock at the same
30  * level or below on procB.
31  *
32  * Functions that require a lock held on entry indicate which lock
33  * in the suffix of the function name:
34  *
35  * foo_olocked() : requires node->outer_lock
36  * foo_nlocked() : requires node->lock
37  * foo_ilocked() : requires proc->inner_lock
38  * foo_oilocked(): requires proc->outer_lock and proc->inner_lock
39  * foo_nilocked(): requires node->lock and proc->inner_lock
40  * ...
41  */
42
43 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
44
45 #include <linux/fdtable.h>
46 #include <linux/file.h>
47 #include <linux/freezer.h>
48 #include <linux/fs.h>
49 #include <linux/list.h>
50 #include <linux/miscdevice.h>
51 #include <linux/module.h>
52 #include <linux/mutex.h>
53 #include <linux/nsproxy.h>
54 #include <linux/poll.h>
55 #include <linux/debugfs.h>
56 #include <linux/rbtree.h>
57 #include <linux/sched/signal.h>
58 #include <linux/sched/mm.h>
59 #include <linux/seq_file.h>
60 #include <linux/string.h>
61 #include <linux/uaccess.h>
62 #include <linux/pid_namespace.h>
63 #include <linux/security.h>
64 #include <linux/spinlock.h>
65 #include <linux/ratelimit.h>
66 #include <linux/syscalls.h>
67 #include <linux/task_work.h>
68 #include <linux/sizes.h>
69
70 #include <uapi/linux/android/binder.h>
71
72 #include <asm/cacheflush.h>
73
74 #include "binder_internal.h"
75 #include "binder_trace.h"
76
77 static HLIST_HEAD(binder_deferred_list);
78 static DEFINE_MUTEX(binder_deferred_lock);
79
80 static HLIST_HEAD(binder_devices);
81 static HLIST_HEAD(binder_procs);
82 static DEFINE_MUTEX(binder_procs_lock);
83
84 static HLIST_HEAD(binder_dead_nodes);
85 static DEFINE_SPINLOCK(binder_dead_nodes_lock);
86
87 static struct dentry *binder_debugfs_dir_entry_root;
88 static struct dentry *binder_debugfs_dir_entry_proc;
89 static atomic_t binder_last_id;
90
91 static int proc_show(struct seq_file *m, void *unused);
92 DEFINE_SHOW_ATTRIBUTE(proc);
93
94 #define FORBIDDEN_MMAP_FLAGS                (VM_WRITE)
95
96 enum {
97         BINDER_DEBUG_USER_ERROR             = 1U << 0,
98         BINDER_DEBUG_FAILED_TRANSACTION     = 1U << 1,
99         BINDER_DEBUG_DEAD_TRANSACTION       = 1U << 2,
100         BINDER_DEBUG_OPEN_CLOSE             = 1U << 3,
101         BINDER_DEBUG_DEAD_BINDER            = 1U << 4,
102         BINDER_DEBUG_DEATH_NOTIFICATION     = 1U << 5,
103         BINDER_DEBUG_READ_WRITE             = 1U << 6,
104         BINDER_DEBUG_USER_REFS              = 1U << 7,
105         BINDER_DEBUG_THREADS                = 1U << 8,
106         BINDER_DEBUG_TRANSACTION            = 1U << 9,
107         BINDER_DEBUG_TRANSACTION_COMPLETE   = 1U << 10,
108         BINDER_DEBUG_FREE_BUFFER            = 1U << 11,
109         BINDER_DEBUG_INTERNAL_REFS          = 1U << 12,
110         BINDER_DEBUG_PRIORITY_CAP           = 1U << 13,
111         BINDER_DEBUG_SPINLOCKS              = 1U << 14,
112 };
113 static uint32_t binder_debug_mask = BINDER_DEBUG_USER_ERROR |
114         BINDER_DEBUG_FAILED_TRANSACTION | BINDER_DEBUG_DEAD_TRANSACTION;
115 module_param_named(debug_mask, binder_debug_mask, uint, 0644);
116
117 char *binder_devices_param = CONFIG_ANDROID_BINDER_DEVICES;
118 module_param_named(devices, binder_devices_param, charp, 0444);
119
120 static DECLARE_WAIT_QUEUE_HEAD(binder_user_error_wait);
121 static int binder_stop_on_user_error;
122
123 static int binder_set_stop_on_user_error(const char *val,
124                                          const struct kernel_param *kp)
125 {
126         int ret;
127
128         ret = param_set_int(val, kp);
129         if (binder_stop_on_user_error < 2)
130                 wake_up(&binder_user_error_wait);
131         return ret;
132 }
133 module_param_call(stop_on_user_error, binder_set_stop_on_user_error,
134         param_get_int, &binder_stop_on_user_error, 0644);
135
136 #define binder_debug(mask, x...) \
137         do { \
138                 if (binder_debug_mask & mask) \
139                         pr_info_ratelimited(x); \
140         } while (0)
141
142 #define binder_user_error(x...) \
143         do { \
144                 if (binder_debug_mask & BINDER_DEBUG_USER_ERROR) \
145                         pr_info_ratelimited(x); \
146                 if (binder_stop_on_user_error) \
147                         binder_stop_on_user_error = 2; \
148         } while (0)
149
150 #define to_flat_binder_object(hdr) \
151         container_of(hdr, struct flat_binder_object, hdr)
152
153 #define to_binder_fd_object(hdr) container_of(hdr, struct binder_fd_object, hdr)
154
155 #define to_binder_buffer_object(hdr) \
156         container_of(hdr, struct binder_buffer_object, hdr)
157
158 #define to_binder_fd_array_object(hdr) \
159         container_of(hdr, struct binder_fd_array_object, hdr)
160
161 static struct binder_stats binder_stats;
162
163 static inline void binder_stats_deleted(enum binder_stat_types type)
164 {
165         atomic_inc(&binder_stats.obj_deleted[type]);
166 }
167
168 static inline void binder_stats_created(enum binder_stat_types type)
169 {
170         atomic_inc(&binder_stats.obj_created[type]);
171 }
172
173 struct binder_transaction_log_entry {
174         int debug_id;
175         int debug_id_done;
176         int call_type;
177         int from_proc;
178         int from_thread;
179         int target_handle;
180         int to_proc;
181         int to_thread;
182         int to_node;
183         int data_size;
184         int offsets_size;
185         int return_error_line;
186         uint32_t return_error;
187         uint32_t return_error_param;
188         char context_name[BINDERFS_MAX_NAME + 1];
189 };
190
191 struct binder_transaction_log {
192         atomic_t cur;
193         bool full;
194         struct binder_transaction_log_entry entry[32];
195 };
196
197 static struct binder_transaction_log binder_transaction_log;
198 static struct binder_transaction_log binder_transaction_log_failed;
199
200 static struct binder_transaction_log_entry *binder_transaction_log_add(
201         struct binder_transaction_log *log)
202 {
203         struct binder_transaction_log_entry *e;
204         unsigned int cur = atomic_inc_return(&log->cur);
205
206         if (cur >= ARRAY_SIZE(log->entry))
207                 log->full = true;
208         e = &log->entry[cur % ARRAY_SIZE(log->entry)];
209         WRITE_ONCE(e->debug_id_done, 0);
210         /*
211          * write-barrier to synchronize access to e->debug_id_done.
212          * We make sure the initialized 0 value is seen before
213          * memset() other fields are zeroed by memset.
214          */
215         smp_wmb();
216         memset(e, 0, sizeof(*e));
217         return e;
218 }
219
220 enum binder_deferred_state {
221         BINDER_DEFERRED_FLUSH        = 0x01,
222         BINDER_DEFERRED_RELEASE      = 0x02,
223 };
224
225 enum {
226         BINDER_LOOPER_STATE_REGISTERED  = 0x01,
227         BINDER_LOOPER_STATE_ENTERED     = 0x02,
228         BINDER_LOOPER_STATE_EXITED      = 0x04,
229         BINDER_LOOPER_STATE_INVALID     = 0x08,
230         BINDER_LOOPER_STATE_WAITING     = 0x10,
231         BINDER_LOOPER_STATE_POLL        = 0x20,
232 };
233
234 /**
235  * binder_proc_lock() - Acquire outer lock for given binder_proc
236  * @proc:         struct binder_proc to acquire
237  *
238  * Acquires proc->outer_lock. Used to protect binder_ref
239  * structures associated with the given proc.
240  */
241 #define binder_proc_lock(proc) _binder_proc_lock(proc, __LINE__)
242 static void
243 _binder_proc_lock(struct binder_proc *proc, int line)
244         __acquires(&proc->outer_lock)
245 {
246         binder_debug(BINDER_DEBUG_SPINLOCKS,
247                      "%s: line=%d\n", __func__, line);
248         spin_lock(&proc->outer_lock);
249 }
250
251 /**
252  * binder_proc_unlock() - Release spinlock for given binder_proc
253  * @proc:         struct binder_proc to acquire
254  *
255  * Release lock acquired via binder_proc_lock()
256  */
257 #define binder_proc_unlock(_proc) _binder_proc_unlock(_proc, __LINE__)
258 static void
259 _binder_proc_unlock(struct binder_proc *proc, int line)
260         __releases(&proc->outer_lock)
261 {
262         binder_debug(BINDER_DEBUG_SPINLOCKS,
263                      "%s: line=%d\n", __func__, line);
264         spin_unlock(&proc->outer_lock);
265 }
266
267 /**
268  * binder_inner_proc_lock() - Acquire inner lock for given binder_proc
269  * @proc:         struct binder_proc to acquire
270  *
271  * Acquires proc->inner_lock. Used to protect todo lists
272  */
273 #define binder_inner_proc_lock(proc) _binder_inner_proc_lock(proc, __LINE__)
274 static void
275 _binder_inner_proc_lock(struct binder_proc *proc, int line)
276         __acquires(&proc->inner_lock)
277 {
278         binder_debug(BINDER_DEBUG_SPINLOCKS,
279                      "%s: line=%d\n", __func__, line);
280         spin_lock(&proc->inner_lock);
281 }
282
283 /**
284  * binder_inner_proc_unlock() - Release inner lock for given binder_proc
285  * @proc:         struct binder_proc to acquire
286  *
287  * Release lock acquired via binder_inner_proc_lock()
288  */
289 #define binder_inner_proc_unlock(proc) _binder_inner_proc_unlock(proc, __LINE__)
290 static void
291 _binder_inner_proc_unlock(struct binder_proc *proc, int line)
292         __releases(&proc->inner_lock)
293 {
294         binder_debug(BINDER_DEBUG_SPINLOCKS,
295                      "%s: line=%d\n", __func__, line);
296         spin_unlock(&proc->inner_lock);
297 }
298
299 /**
300  * binder_node_lock() - Acquire spinlock for given binder_node
301  * @node:         struct binder_node to acquire
302  *
303  * Acquires node->lock. Used to protect binder_node fields
304  */
305 #define binder_node_lock(node) _binder_node_lock(node, __LINE__)
306 static void
307 _binder_node_lock(struct binder_node *node, int line)
308         __acquires(&node->lock)
309 {
310         binder_debug(BINDER_DEBUG_SPINLOCKS,
311                      "%s: line=%d\n", __func__, line);
312         spin_lock(&node->lock);
313 }
314
315 /**
316  * binder_node_unlock() - Release spinlock for given binder_proc
317  * @node:         struct binder_node to acquire
318  *
319  * Release lock acquired via binder_node_lock()
320  */
321 #define binder_node_unlock(node) _binder_node_unlock(node, __LINE__)
322 static void
323 _binder_node_unlock(struct binder_node *node, int line)
324         __releases(&node->lock)
325 {
326         binder_debug(BINDER_DEBUG_SPINLOCKS,
327                      "%s: line=%d\n", __func__, line);
328         spin_unlock(&node->lock);
329 }
330
331 /**
332  * binder_node_inner_lock() - Acquire node and inner locks
333  * @node:         struct binder_node to acquire
334  *
335  * Acquires node->lock. If node->proc also acquires
336  * proc->inner_lock. Used to protect binder_node fields
337  */
338 #define binder_node_inner_lock(node) _binder_node_inner_lock(node, __LINE__)
339 static void
340 _binder_node_inner_lock(struct binder_node *node, int line)
341         __acquires(&node->lock) __acquires(&node->proc->inner_lock)
342 {
343         binder_debug(BINDER_DEBUG_SPINLOCKS,
344                      "%s: line=%d\n", __func__, line);
345         spin_lock(&node->lock);
346         if (node->proc)
347                 binder_inner_proc_lock(node->proc);
348         else
349                 /* annotation for sparse */
350                 __acquire(&node->proc->inner_lock);
351 }
352
353 /**
354  * binder_node_unlock() - Release node and inner locks
355  * @node:         struct binder_node to acquire
356  *
357  * Release lock acquired via binder_node_lock()
358  */
359 #define binder_node_inner_unlock(node) _binder_node_inner_unlock(node, __LINE__)
360 static void
361 _binder_node_inner_unlock(struct binder_node *node, int line)
362         __releases(&node->lock) __releases(&node->proc->inner_lock)
363 {
364         struct binder_proc *proc = node->proc;
365
366         binder_debug(BINDER_DEBUG_SPINLOCKS,
367                      "%s: line=%d\n", __func__, line);
368         if (proc)
369                 binder_inner_proc_unlock(proc);
370         else
371                 /* annotation for sparse */
372                 __release(&node->proc->inner_lock);
373         spin_unlock(&node->lock);
374 }
375
376 static bool binder_worklist_empty_ilocked(struct list_head *list)
377 {
378         return list_empty(list);
379 }
380
381 /**
382  * binder_worklist_empty() - Check if no items on the work list
383  * @proc:       binder_proc associated with list
384  * @list:       list to check
385  *
386  * Return: true if there are no items on list, else false
387  */
388 static bool binder_worklist_empty(struct binder_proc *proc,
389                                   struct list_head *list)
390 {
391         bool ret;
392
393         binder_inner_proc_lock(proc);
394         ret = binder_worklist_empty_ilocked(list);
395         binder_inner_proc_unlock(proc);
396         return ret;
397 }
398
399 /**
400  * binder_enqueue_work_ilocked() - Add an item to the work list
401  * @work:         struct binder_work to add to list
402  * @target_list:  list to add work to
403  *
404  * Adds the work to the specified list. Asserts that work
405  * is not already on a list.
406  *
407  * Requires the proc->inner_lock to be held.
408  */
409 static void
410 binder_enqueue_work_ilocked(struct binder_work *work,
411                            struct list_head *target_list)
412 {
413         BUG_ON(target_list == NULL);
414         BUG_ON(work->entry.next && !list_empty(&work->entry));
415         list_add_tail(&work->entry, target_list);
416 }
417
418 /**
419  * binder_enqueue_deferred_thread_work_ilocked() - Add deferred thread work
420  * @thread:       thread to queue work to
421  * @work:         struct binder_work to add to list
422  *
423  * Adds the work to the todo list of the thread. Doesn't set the process_todo
424  * flag, which means that (if it wasn't already set) the thread will go to
425  * sleep without handling this work when it calls read.
426  *
427  * Requires the proc->inner_lock to be held.
428  */
429 static void
430 binder_enqueue_deferred_thread_work_ilocked(struct binder_thread *thread,
431                                             struct binder_work *work)
432 {
433         WARN_ON(!list_empty(&thread->waiting_thread_node));
434         binder_enqueue_work_ilocked(work, &thread->todo);
435 }
436
437 /**
438  * binder_enqueue_thread_work_ilocked() - Add an item to the thread work list
439  * @thread:       thread to queue work to
440  * @work:         struct binder_work to add to list
441  *
442  * Adds the work to the todo list of the thread, and enables processing
443  * of the todo queue.
444  *
445  * Requires the proc->inner_lock to be held.
446  */
447 static void
448 binder_enqueue_thread_work_ilocked(struct binder_thread *thread,
449                                    struct binder_work *work)
450 {
451         WARN_ON(!list_empty(&thread->waiting_thread_node));
452         binder_enqueue_work_ilocked(work, &thread->todo);
453         thread->process_todo = true;
454 }
455
456 /**
457  * binder_enqueue_thread_work() - Add an item to the thread work list
458  * @thread:       thread to queue work to
459  * @work:         struct binder_work to add to list
460  *
461  * Adds the work to the todo list of the thread, and enables processing
462  * of the todo queue.
463  */
464 static void
465 binder_enqueue_thread_work(struct binder_thread *thread,
466                            struct binder_work *work)
467 {
468         binder_inner_proc_lock(thread->proc);
469         binder_enqueue_thread_work_ilocked(thread, work);
470         binder_inner_proc_unlock(thread->proc);
471 }
472
473 static void
474 binder_dequeue_work_ilocked(struct binder_work *work)
475 {
476         list_del_init(&work->entry);
477 }
478
479 /**
480  * binder_dequeue_work() - Removes an item from the work list
481  * @proc:         binder_proc associated with list
482  * @work:         struct binder_work to remove from list
483  *
484  * Removes the specified work item from whatever list it is on.
485  * Can safely be called if work is not on any list.
486  */
487 static void
488 binder_dequeue_work(struct binder_proc *proc, struct binder_work *work)
489 {
490         binder_inner_proc_lock(proc);
491         binder_dequeue_work_ilocked(work);
492         binder_inner_proc_unlock(proc);
493 }
494
495 static struct binder_work *binder_dequeue_work_head_ilocked(
496                                         struct list_head *list)
497 {
498         struct binder_work *w;
499
500         w = list_first_entry_or_null(list, struct binder_work, entry);
501         if (w)
502                 list_del_init(&w->entry);
503         return w;
504 }
505
506 static void
507 binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer);
508 static void binder_free_thread(struct binder_thread *thread);
509 static void binder_free_proc(struct binder_proc *proc);
510 static void binder_inc_node_tmpref_ilocked(struct binder_node *node);
511
512 static bool binder_has_work_ilocked(struct binder_thread *thread,
513                                     bool do_proc_work)
514 {
515         return thread->process_todo ||
516                 thread->looper_need_return ||
517                 (do_proc_work &&
518                  !binder_worklist_empty_ilocked(&thread->proc->todo));
519 }
520
521 static bool binder_has_work(struct binder_thread *thread, bool do_proc_work)
522 {
523         bool has_work;
524
525         binder_inner_proc_lock(thread->proc);
526         has_work = binder_has_work_ilocked(thread, do_proc_work);
527         binder_inner_proc_unlock(thread->proc);
528
529         return has_work;
530 }
531
532 static bool binder_available_for_proc_work_ilocked(struct binder_thread *thread)
533 {
534         return !thread->transaction_stack &&
535                 binder_worklist_empty_ilocked(&thread->todo) &&
536                 (thread->looper & (BINDER_LOOPER_STATE_ENTERED |
537                                    BINDER_LOOPER_STATE_REGISTERED));
538 }
539
540 static void binder_wakeup_poll_threads_ilocked(struct binder_proc *proc,
541                                                bool sync)
542 {
543         struct rb_node *n;
544         struct binder_thread *thread;
545
546         for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) {
547                 thread = rb_entry(n, struct binder_thread, rb_node);
548                 if (thread->looper & BINDER_LOOPER_STATE_POLL &&
549                     binder_available_for_proc_work_ilocked(thread)) {
550                         if (sync)
551                                 wake_up_interruptible_sync(&thread->wait);
552                         else
553                                 wake_up_interruptible(&thread->wait);
554                 }
555         }
556 }
557
558 /**
559  * binder_select_thread_ilocked() - selects a thread for doing proc work.
560  * @proc:       process to select a thread from
561  *
562  * Note that calling this function moves the thread off the waiting_threads
563  * list, so it can only be woken up by the caller of this function, or a
564  * signal. Therefore, callers *should* always wake up the thread this function
565  * returns.
566  *
567  * Return:      If there's a thread currently waiting for process work,
568  *              returns that thread. Otherwise returns NULL.
569  */
570 static struct binder_thread *
571 binder_select_thread_ilocked(struct binder_proc *proc)
572 {
573         struct binder_thread *thread;
574
575         assert_spin_locked(&proc->inner_lock);
576         thread = list_first_entry_or_null(&proc->waiting_threads,
577                                           struct binder_thread,
578                                           waiting_thread_node);
579
580         if (thread)
581                 list_del_init(&thread->waiting_thread_node);
582
583         return thread;
584 }
585
586 /**
587  * binder_wakeup_thread_ilocked() - wakes up a thread for doing proc work.
588  * @proc:       process to wake up a thread in
589  * @thread:     specific thread to wake-up (may be NULL)
590  * @sync:       whether to do a synchronous wake-up
591  *
592  * This function wakes up a thread in the @proc process.
593  * The caller may provide a specific thread to wake-up in
594  * the @thread parameter. If @thread is NULL, this function
595  * will wake up threads that have called poll().
596  *
597  * Note that for this function to work as expected, callers
598  * should first call binder_select_thread() to find a thread
599  * to handle the work (if they don't have a thread already),
600  * and pass the result into the @thread parameter.
601  */
602 static void binder_wakeup_thread_ilocked(struct binder_proc *proc,
603                                          struct binder_thread *thread,
604                                          bool sync)
605 {
606         assert_spin_locked(&proc->inner_lock);
607
608         if (thread) {
609                 if (sync)
610                         wake_up_interruptible_sync(&thread->wait);
611                 else
612                         wake_up_interruptible(&thread->wait);
613                 return;
614         }
615
616         /* Didn't find a thread waiting for proc work; this can happen
617          * in two scenarios:
618          * 1. All threads are busy handling transactions
619          *    In that case, one of those threads should call back into
620          *    the kernel driver soon and pick up this work.
621          * 2. Threads are using the (e)poll interface, in which case
622          *    they may be blocked on the waitqueue without having been
623          *    added to waiting_threads. For this case, we just iterate
624          *    over all threads not handling transaction work, and
625          *    wake them all up. We wake all because we don't know whether
626          *    a thread that called into (e)poll is handling non-binder
627          *    work currently.
628          */
629         binder_wakeup_poll_threads_ilocked(proc, sync);
630 }
631
632 static void binder_wakeup_proc_ilocked(struct binder_proc *proc)
633 {
634         struct binder_thread *thread = binder_select_thread_ilocked(proc);
635
636         binder_wakeup_thread_ilocked(proc, thread, /* sync = */false);
637 }
638
639 static void binder_set_nice(long nice)
640 {
641         long min_nice;
642
643         if (can_nice(current, nice)) {
644                 set_user_nice(current, nice);
645                 return;
646         }
647         min_nice = rlimit_to_nice(rlimit(RLIMIT_NICE));
648         binder_debug(BINDER_DEBUG_PRIORITY_CAP,
649                      "%d: nice value %ld not allowed use %ld instead\n",
650                       current->pid, nice, min_nice);
651         set_user_nice(current, min_nice);
652         if (min_nice <= MAX_NICE)
653                 return;
654         binder_user_error("%d RLIMIT_NICE not set\n", current->pid);
655 }
656
657 static struct binder_node *binder_get_node_ilocked(struct binder_proc *proc,
658                                                    binder_uintptr_t ptr)
659 {
660         struct rb_node *n = proc->nodes.rb_node;
661         struct binder_node *node;
662
663         assert_spin_locked(&proc->inner_lock);
664
665         while (n) {
666                 node = rb_entry(n, struct binder_node, rb_node);
667
668                 if (ptr < node->ptr)
669                         n = n->rb_left;
670                 else if (ptr > node->ptr)
671                         n = n->rb_right;
672                 else {
673                         /*
674                          * take an implicit weak reference
675                          * to ensure node stays alive until
676                          * call to binder_put_node()
677                          */
678                         binder_inc_node_tmpref_ilocked(node);
679                         return node;
680                 }
681         }
682         return NULL;
683 }
684
685 static struct binder_node *binder_get_node(struct binder_proc *proc,
686                                            binder_uintptr_t ptr)
687 {
688         struct binder_node *node;
689
690         binder_inner_proc_lock(proc);
691         node = binder_get_node_ilocked(proc, ptr);
692         binder_inner_proc_unlock(proc);
693         return node;
694 }
695
696 static struct binder_node *binder_init_node_ilocked(
697                                                 struct binder_proc *proc,
698                                                 struct binder_node *new_node,
699                                                 struct flat_binder_object *fp)
700 {
701         struct rb_node **p = &proc->nodes.rb_node;
702         struct rb_node *parent = NULL;
703         struct binder_node *node;
704         binder_uintptr_t ptr = fp ? fp->binder : 0;
705         binder_uintptr_t cookie = fp ? fp->cookie : 0;
706         __u32 flags = fp ? fp->flags : 0;
707
708         assert_spin_locked(&proc->inner_lock);
709
710         while (*p) {
711
712                 parent = *p;
713                 node = rb_entry(parent, struct binder_node, rb_node);
714
715                 if (ptr < node->ptr)
716                         p = &(*p)->rb_left;
717                 else if (ptr > node->ptr)
718                         p = &(*p)->rb_right;
719                 else {
720                         /*
721                          * A matching node is already in
722                          * the rb tree. Abandon the init
723                          * and return it.
724                          */
725                         binder_inc_node_tmpref_ilocked(node);
726                         return node;
727                 }
728         }
729         node = new_node;
730         binder_stats_created(BINDER_STAT_NODE);
731         node->tmp_refs++;
732         rb_link_node(&node->rb_node, parent, p);
733         rb_insert_color(&node->rb_node, &proc->nodes);
734         node->debug_id = atomic_inc_return(&binder_last_id);
735         node->proc = proc;
736         node->ptr = ptr;
737         node->cookie = cookie;
738         node->work.type = BINDER_WORK_NODE;
739         node->min_priority = flags & FLAT_BINDER_FLAG_PRIORITY_MASK;
740         node->accept_fds = !!(flags & FLAT_BINDER_FLAG_ACCEPTS_FDS);
741         node->txn_security_ctx = !!(flags & FLAT_BINDER_FLAG_TXN_SECURITY_CTX);
742         spin_lock_init(&node->lock);
743         INIT_LIST_HEAD(&node->work.entry);
744         INIT_LIST_HEAD(&node->async_todo);
745         binder_debug(BINDER_DEBUG_INTERNAL_REFS,
746                      "%d:%d node %d u%016llx c%016llx created\n",
747                      proc->pid, current->pid, node->debug_id,
748                      (u64)node->ptr, (u64)node->cookie);
749
750         return node;
751 }
752
753 static struct binder_node *binder_new_node(struct binder_proc *proc,
754                                            struct flat_binder_object *fp)
755 {
756         struct binder_node *node;
757         struct binder_node *new_node = kzalloc(sizeof(*node), GFP_KERNEL);
758
759         if (!new_node)
760                 return NULL;
761         binder_inner_proc_lock(proc);
762         node = binder_init_node_ilocked(proc, new_node, fp);
763         binder_inner_proc_unlock(proc);
764         if (node != new_node)
765                 /*
766                  * The node was already added by another thread
767                  */
768                 kfree(new_node);
769
770         return node;
771 }
772
773 static void binder_free_node(struct binder_node *node)
774 {
775         kfree(node);
776         binder_stats_deleted(BINDER_STAT_NODE);
777 }
778
779 static int binder_inc_node_nilocked(struct binder_node *node, int strong,
780                                     int internal,
781                                     struct list_head *target_list)
782 {
783         struct binder_proc *proc = node->proc;
784
785         assert_spin_locked(&node->lock);
786         if (proc)
787                 assert_spin_locked(&proc->inner_lock);
788         if (strong) {
789                 if (internal) {
790                         if (target_list == NULL &&
791                             node->internal_strong_refs == 0 &&
792                             !(node->proc &&
793                               node == node->proc->context->binder_context_mgr_node &&
794                               node->has_strong_ref)) {
795                                 pr_err("invalid inc strong node for %d\n",
796                                         node->debug_id);
797                                 return -EINVAL;
798                         }
799                         node->internal_strong_refs++;
800                 } else
801                         node->local_strong_refs++;
802                 if (!node->has_strong_ref && target_list) {
803                         struct binder_thread *thread = container_of(target_list,
804                                                     struct binder_thread, todo);
805                         binder_dequeue_work_ilocked(&node->work);
806                         BUG_ON(&thread->todo != target_list);
807                         binder_enqueue_deferred_thread_work_ilocked(thread,
808                                                                    &node->work);
809                 }
810         } else {
811                 if (!internal)
812                         node->local_weak_refs++;
813                 if (!node->has_weak_ref && list_empty(&node->work.entry)) {
814                         if (target_list == NULL) {
815                                 pr_err("invalid inc weak node for %d\n",
816                                         node->debug_id);
817                                 return -EINVAL;
818                         }
819                         /*
820                          * See comment above
821                          */
822                         binder_enqueue_work_ilocked(&node->work, target_list);
823                 }
824         }
825         return 0;
826 }
827
828 static int binder_inc_node(struct binder_node *node, int strong, int internal,
829                            struct list_head *target_list)
830 {
831         int ret;
832
833         binder_node_inner_lock(node);
834         ret = binder_inc_node_nilocked(node, strong, internal, target_list);
835         binder_node_inner_unlock(node);
836
837         return ret;
838 }
839
840 static bool binder_dec_node_nilocked(struct binder_node *node,
841                                      int strong, int internal)
842 {
843         struct binder_proc *proc = node->proc;
844
845         assert_spin_locked(&node->lock);
846         if (proc)
847                 assert_spin_locked(&proc->inner_lock);
848         if (strong) {
849                 if (internal)
850                         node->internal_strong_refs--;
851                 else
852                         node->local_strong_refs--;
853                 if (node->local_strong_refs || node->internal_strong_refs)
854                         return false;
855         } else {
856                 if (!internal)
857                         node->local_weak_refs--;
858                 if (node->local_weak_refs || node->tmp_refs ||
859                                 !hlist_empty(&node->refs))
860                         return false;
861         }
862
863         if (proc && (node->has_strong_ref || node->has_weak_ref)) {
864                 if (list_empty(&node->work.entry)) {
865                         binder_enqueue_work_ilocked(&node->work, &proc->todo);
866                         binder_wakeup_proc_ilocked(proc);
867                 }
868         } else {
869                 if (hlist_empty(&node->refs) && !node->local_strong_refs &&
870                     !node->local_weak_refs && !node->tmp_refs) {
871                         if (proc) {
872                                 binder_dequeue_work_ilocked(&node->work);
873                                 rb_erase(&node->rb_node, &proc->nodes);
874                                 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
875                                              "refless node %d deleted\n",
876                                              node->debug_id);
877                         } else {
878                                 BUG_ON(!list_empty(&node->work.entry));
879                                 spin_lock(&binder_dead_nodes_lock);
880                                 /*
881                                  * tmp_refs could have changed so
882                                  * check it again
883                                  */
884                                 if (node->tmp_refs) {
885                                         spin_unlock(&binder_dead_nodes_lock);
886                                         return false;
887                                 }
888                                 hlist_del(&node->dead_node);
889                                 spin_unlock(&binder_dead_nodes_lock);
890                                 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
891                                              "dead node %d deleted\n",
892                                              node->debug_id);
893                         }
894                         return true;
895                 }
896         }
897         return false;
898 }
899
900 static void binder_dec_node(struct binder_node *node, int strong, int internal)
901 {
902         bool free_node;
903
904         binder_node_inner_lock(node);
905         free_node = binder_dec_node_nilocked(node, strong, internal);
906         binder_node_inner_unlock(node);
907         if (free_node)
908                 binder_free_node(node);
909 }
910
911 static void binder_inc_node_tmpref_ilocked(struct binder_node *node)
912 {
913         /*
914          * No call to binder_inc_node() is needed since we
915          * don't need to inform userspace of any changes to
916          * tmp_refs
917          */
918         node->tmp_refs++;
919 }
920
921 /**
922  * binder_inc_node_tmpref() - take a temporary reference on node
923  * @node:       node to reference
924  *
925  * Take reference on node to prevent the node from being freed
926  * while referenced only by a local variable. The inner lock is
927  * needed to serialize with the node work on the queue (which
928  * isn't needed after the node is dead). If the node is dead
929  * (node->proc is NULL), use binder_dead_nodes_lock to protect
930  * node->tmp_refs against dead-node-only cases where the node
931  * lock cannot be acquired (eg traversing the dead node list to
932  * print nodes)
933  */
934 static void binder_inc_node_tmpref(struct binder_node *node)
935 {
936         binder_node_lock(node);
937         if (node->proc)
938                 binder_inner_proc_lock(node->proc);
939         else
940                 spin_lock(&binder_dead_nodes_lock);
941         binder_inc_node_tmpref_ilocked(node);
942         if (node->proc)
943                 binder_inner_proc_unlock(node->proc);
944         else
945                 spin_unlock(&binder_dead_nodes_lock);
946         binder_node_unlock(node);
947 }
948
949 /**
950  * binder_dec_node_tmpref() - remove a temporary reference on node
951  * @node:       node to reference
952  *
953  * Release temporary reference on node taken via binder_inc_node_tmpref()
954  */
955 static void binder_dec_node_tmpref(struct binder_node *node)
956 {
957         bool free_node;
958
959         binder_node_inner_lock(node);
960         if (!node->proc)
961                 spin_lock(&binder_dead_nodes_lock);
962         else
963                 __acquire(&binder_dead_nodes_lock);
964         node->tmp_refs--;
965         BUG_ON(node->tmp_refs < 0);
966         if (!node->proc)
967                 spin_unlock(&binder_dead_nodes_lock);
968         else
969                 __release(&binder_dead_nodes_lock);
970         /*
971          * Call binder_dec_node() to check if all refcounts are 0
972          * and cleanup is needed. Calling with strong=0 and internal=1
973          * causes no actual reference to be released in binder_dec_node().
974          * If that changes, a change is needed here too.
975          */
976         free_node = binder_dec_node_nilocked(node, 0, 1);
977         binder_node_inner_unlock(node);
978         if (free_node)
979                 binder_free_node(node);
980 }
981
982 static void binder_put_node(struct binder_node *node)
983 {
984         binder_dec_node_tmpref(node);
985 }
986
987 static struct binder_ref *binder_get_ref_olocked(struct binder_proc *proc,
988                                                  u32 desc, bool need_strong_ref)
989 {
990         struct rb_node *n = proc->refs_by_desc.rb_node;
991         struct binder_ref *ref;
992
993         while (n) {
994                 ref = rb_entry(n, struct binder_ref, rb_node_desc);
995
996                 if (desc < ref->data.desc) {
997                         n = n->rb_left;
998                 } else if (desc > ref->data.desc) {
999                         n = n->rb_right;
1000                 } else if (need_strong_ref && !ref->data.strong) {
1001                         binder_user_error("tried to use weak ref as strong ref\n");
1002                         return NULL;
1003                 } else {
1004                         return ref;
1005                 }
1006         }
1007         return NULL;
1008 }
1009
1010 /**
1011  * binder_get_ref_for_node_olocked() - get the ref associated with given node
1012  * @proc:       binder_proc that owns the ref
1013  * @node:       binder_node of target
1014  * @new_ref:    newly allocated binder_ref to be initialized or %NULL
1015  *
1016  * Look up the ref for the given node and return it if it exists
1017  *
1018  * If it doesn't exist and the caller provides a newly allocated
1019  * ref, initialize the fields of the newly allocated ref and insert
1020  * into the given proc rb_trees and node refs list.
1021  *
1022  * Return:      the ref for node. It is possible that another thread
1023  *              allocated/initialized the ref first in which case the
1024  *              returned ref would be different than the passed-in
1025  *              new_ref. new_ref must be kfree'd by the caller in
1026  *              this case.
1027  */
1028 static struct binder_ref *binder_get_ref_for_node_olocked(
1029                                         struct binder_proc *proc,
1030                                         struct binder_node *node,
1031                                         struct binder_ref *new_ref)
1032 {
1033         struct binder_context *context = proc->context;
1034         struct rb_node **p = &proc->refs_by_node.rb_node;
1035         struct rb_node *parent = NULL;
1036         struct binder_ref *ref;
1037         struct rb_node *n;
1038
1039         while (*p) {
1040                 parent = *p;
1041                 ref = rb_entry(parent, struct binder_ref, rb_node_node);
1042
1043                 if (node < ref->node)
1044                         p = &(*p)->rb_left;
1045                 else if (node > ref->node)
1046                         p = &(*p)->rb_right;
1047                 else
1048                         return ref;
1049         }
1050         if (!new_ref)
1051                 return NULL;
1052
1053         binder_stats_created(BINDER_STAT_REF);
1054         new_ref->data.debug_id = atomic_inc_return(&binder_last_id);
1055         new_ref->proc = proc;
1056         new_ref->node = node;
1057         rb_link_node(&new_ref->rb_node_node, parent, p);
1058         rb_insert_color(&new_ref->rb_node_node, &proc->refs_by_node);
1059
1060         new_ref->data.desc = (node == context->binder_context_mgr_node) ? 0 : 1;
1061         for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
1062                 ref = rb_entry(n, struct binder_ref, rb_node_desc);
1063                 if (ref->data.desc > new_ref->data.desc)
1064                         break;
1065                 new_ref->data.desc = ref->data.desc + 1;
1066         }
1067
1068         p = &proc->refs_by_desc.rb_node;
1069         while (*p) {
1070                 parent = *p;
1071                 ref = rb_entry(parent, struct binder_ref, rb_node_desc);
1072
1073                 if (new_ref->data.desc < ref->data.desc)
1074                         p = &(*p)->rb_left;
1075                 else if (new_ref->data.desc > ref->data.desc)
1076                         p = &(*p)->rb_right;
1077                 else
1078                         BUG();
1079         }
1080         rb_link_node(&new_ref->rb_node_desc, parent, p);
1081         rb_insert_color(&new_ref->rb_node_desc, &proc->refs_by_desc);
1082
1083         binder_node_lock(node);
1084         hlist_add_head(&new_ref->node_entry, &node->refs);
1085
1086         binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1087                      "%d new ref %d desc %d for node %d\n",
1088                       proc->pid, new_ref->data.debug_id, new_ref->data.desc,
1089                       node->debug_id);
1090         binder_node_unlock(node);
1091         return new_ref;
1092 }
1093
1094 static void binder_cleanup_ref_olocked(struct binder_ref *ref)
1095 {
1096         bool delete_node = false;
1097
1098         binder_debug(BINDER_DEBUG_INTERNAL_REFS,
1099                      "%d delete ref %d desc %d for node %d\n",
1100                       ref->proc->pid, ref->data.debug_id, ref->data.desc,
1101                       ref->node->debug_id);
1102
1103         rb_erase(&ref->rb_node_desc, &ref->proc->refs_by_desc);
1104         rb_erase(&ref->rb_node_node, &ref->proc->refs_by_node);
1105
1106         binder_node_inner_lock(ref->node);
1107         if (ref->data.strong)
1108                 binder_dec_node_nilocked(ref->node, 1, 1);
1109
1110         hlist_del(&ref->node_entry);
1111         delete_node = binder_dec_node_nilocked(ref->node, 0, 1);
1112         binder_node_inner_unlock(ref->node);
1113         /*
1114          * Clear ref->node unless we want the caller to free the node
1115          */
1116         if (!delete_node) {
1117                 /*
1118                  * The caller uses ref->node to determine
1119                  * whether the node needs to be freed. Clear
1120                  * it since the node is still alive.
1121                  */
1122                 ref->node = NULL;
1123         }
1124
1125         if (ref->death) {
1126                 binder_debug(BINDER_DEBUG_DEAD_BINDER,
1127                              "%d delete ref %d desc %d has death notification\n",
1128                               ref->proc->pid, ref->data.debug_id,
1129                               ref->data.desc);
1130                 binder_dequeue_work(ref->proc, &ref->death->work);
1131                 binder_stats_deleted(BINDER_STAT_DEATH);
1132         }
1133         binder_stats_deleted(BINDER_STAT_REF);
1134 }
1135
1136 /**
1137  * binder_inc_ref_olocked() - increment the ref for given handle
1138  * @ref:         ref to be incremented
1139  * @strong:      if true, strong increment, else weak
1140  * @target_list: list to queue node work on
1141  *
1142  * Increment the ref. @ref->proc->outer_lock must be held on entry
1143  *
1144  * Return: 0, if successful, else errno
1145  */
1146 static int binder_inc_ref_olocked(struct binder_ref *ref, int strong,
1147                                   struct list_head *target_list)
1148 {
1149         int ret;
1150
1151         if (strong) {
1152                 if (ref->data.strong == 0) {
1153                         ret = binder_inc_node(ref->node, 1, 1, target_list);
1154                         if (ret)
1155                                 return ret;
1156                 }
1157                 ref->data.strong++;
1158         } else {
1159                 if (ref->data.weak == 0) {
1160                         ret = binder_inc_node(ref->node, 0, 1, target_list);
1161                         if (ret)
1162                                 return ret;
1163                 }
1164                 ref->data.weak++;
1165         }
1166         return 0;
1167 }
1168
1169 /**
1170  * binder_dec_ref() - dec the ref for given handle
1171  * @ref:        ref to be decremented
1172  * @strong:     if true, strong decrement, else weak
1173  *
1174  * Decrement the ref.
1175  *
1176  * Return: true if ref is cleaned up and ready to be freed
1177  */
1178 static bool binder_dec_ref_olocked(struct binder_ref *ref, int strong)
1179 {
1180         if (strong) {
1181                 if (ref->data.strong == 0) {
1182                         binder_user_error("%d invalid dec strong, ref %d desc %d s %d w %d\n",
1183                                           ref->proc->pid, ref->data.debug_id,
1184                                           ref->data.desc, ref->data.strong,
1185                                           ref->data.weak);
1186                         return false;
1187                 }
1188                 ref->data.strong--;
1189                 if (ref->data.strong == 0)
1190                         binder_dec_node(ref->node, strong, 1);
1191         } else {
1192                 if (ref->data.weak == 0) {
1193                         binder_user_error("%d invalid dec weak, ref %d desc %d s %d w %d\n",
1194                                           ref->proc->pid, ref->data.debug_id,
1195                                           ref->data.desc, ref->data.strong,
1196                                           ref->data.weak);
1197                         return false;
1198                 }
1199                 ref->data.weak--;
1200         }
1201         if (ref->data.strong == 0 && ref->data.weak == 0) {
1202                 binder_cleanup_ref_olocked(ref);
1203                 return true;
1204         }
1205         return false;
1206 }
1207
1208 /**
1209  * binder_get_node_from_ref() - get the node from the given proc/desc
1210  * @proc:       proc containing the ref
1211  * @desc:       the handle associated with the ref
1212  * @need_strong_ref: if true, only return node if ref is strong
1213  * @rdata:      the id/refcount data for the ref
1214  *
1215  * Given a proc and ref handle, return the associated binder_node
1216  *
1217  * Return: a binder_node or NULL if not found or not strong when strong required
1218  */
1219 static struct binder_node *binder_get_node_from_ref(
1220                 struct binder_proc *proc,
1221                 u32 desc, bool need_strong_ref,
1222                 struct binder_ref_data *rdata)
1223 {
1224         struct binder_node *node;
1225         struct binder_ref *ref;
1226
1227         binder_proc_lock(proc);
1228         ref = binder_get_ref_olocked(proc, desc, need_strong_ref);
1229         if (!ref)
1230                 goto err_no_ref;
1231         node = ref->node;
1232         /*
1233          * Take an implicit reference on the node to ensure
1234          * it stays alive until the call to binder_put_node()
1235          */
1236         binder_inc_node_tmpref(node);
1237         if (rdata)
1238                 *rdata = ref->data;
1239         binder_proc_unlock(proc);
1240
1241         return node;
1242
1243 err_no_ref:
1244         binder_proc_unlock(proc);
1245         return NULL;
1246 }
1247
1248 /**
1249  * binder_free_ref() - free the binder_ref
1250  * @ref:        ref to free
1251  *
1252  * Free the binder_ref. Free the binder_node indicated by ref->node
1253  * (if non-NULL) and the binder_ref_death indicated by ref->death.
1254  */
1255 static void binder_free_ref(struct binder_ref *ref)
1256 {
1257         if (ref->node)
1258                 binder_free_node(ref->node);
1259         kfree(ref->death);
1260         kfree(ref);
1261 }
1262
1263 /**
1264  * binder_update_ref_for_handle() - inc/dec the ref for given handle
1265  * @proc:       proc containing the ref
1266  * @desc:       the handle associated with the ref
1267  * @increment:  true=inc reference, false=dec reference
1268  * @strong:     true=strong reference, false=weak reference
1269  * @rdata:      the id/refcount data for the ref
1270  *
1271  * Given a proc and ref handle, increment or decrement the ref
1272  * according to "increment" arg.
1273  *
1274  * Return: 0 if successful, else errno
1275  */
1276 static int binder_update_ref_for_handle(struct binder_proc *proc,
1277                 uint32_t desc, bool increment, bool strong,
1278                 struct binder_ref_data *rdata)
1279 {
1280         int ret = 0;
1281         struct binder_ref *ref;
1282         bool delete_ref = false;
1283
1284         binder_proc_lock(proc);
1285         ref = binder_get_ref_olocked(proc, desc, strong);
1286         if (!ref) {
1287                 ret = -EINVAL;
1288                 goto err_no_ref;
1289         }
1290         if (increment)
1291                 ret = binder_inc_ref_olocked(ref, strong, NULL);
1292         else
1293                 delete_ref = binder_dec_ref_olocked(ref, strong);
1294
1295         if (rdata)
1296                 *rdata = ref->data;
1297         binder_proc_unlock(proc);
1298
1299         if (delete_ref)
1300                 binder_free_ref(ref);
1301         return ret;
1302
1303 err_no_ref:
1304         binder_proc_unlock(proc);
1305         return ret;
1306 }
1307
1308 /**
1309  * binder_dec_ref_for_handle() - dec the ref for given handle
1310  * @proc:       proc containing the ref
1311  * @desc:       the handle associated with the ref
1312  * @strong:     true=strong reference, false=weak reference
1313  * @rdata:      the id/refcount data for the ref
1314  *
1315  * Just calls binder_update_ref_for_handle() to decrement the ref.
1316  *
1317  * Return: 0 if successful, else errno
1318  */
1319 static int binder_dec_ref_for_handle(struct binder_proc *proc,
1320                 uint32_t desc, bool strong, struct binder_ref_data *rdata)
1321 {
1322         return binder_update_ref_for_handle(proc, desc, false, strong, rdata);
1323 }
1324
1325
1326 /**
1327  * binder_inc_ref_for_node() - increment the ref for given proc/node
1328  * @proc:        proc containing the ref
1329  * @node:        target node
1330  * @strong:      true=strong reference, false=weak reference
1331  * @target_list: worklist to use if node is incremented
1332  * @rdata:       the id/refcount data for the ref
1333  *
1334  * Given a proc and node, increment the ref. Create the ref if it
1335  * doesn't already exist
1336  *
1337  * Return: 0 if successful, else errno
1338  */
1339 static int binder_inc_ref_for_node(struct binder_proc *proc,
1340                         struct binder_node *node,
1341                         bool strong,
1342                         struct list_head *target_list,
1343                         struct binder_ref_data *rdata)
1344 {
1345         struct binder_ref *ref;
1346         struct binder_ref *new_ref = NULL;
1347         int ret = 0;
1348
1349         binder_proc_lock(proc);
1350         ref = binder_get_ref_for_node_olocked(proc, node, NULL);
1351         if (!ref) {
1352                 binder_proc_unlock(proc);
1353                 new_ref = kzalloc(sizeof(*ref), GFP_KERNEL);
1354                 if (!new_ref)
1355                         return -ENOMEM;
1356                 binder_proc_lock(proc);
1357                 ref = binder_get_ref_for_node_olocked(proc, node, new_ref);
1358         }
1359         ret = binder_inc_ref_olocked(ref, strong, target_list);
1360         *rdata = ref->data;
1361         if (ret && ref == new_ref) {
1362                 /*
1363                  * Cleanup the failed reference here as the target
1364                  * could now be dead and have already released its
1365                  * references by now. Calling on the new reference
1366                  * with strong=0 and a tmp_refs will not decrement
1367                  * the node. The new_ref gets kfree'd below.
1368                  */
1369                 binder_cleanup_ref_olocked(new_ref);
1370                 ref = NULL;
1371         }
1372
1373         binder_proc_unlock(proc);
1374         if (new_ref && ref != new_ref)
1375                 /*
1376                  * Another thread created the ref first so
1377                  * free the one we allocated
1378                  */
1379                 kfree(new_ref);
1380         return ret;
1381 }
1382
1383 static void binder_pop_transaction_ilocked(struct binder_thread *target_thread,
1384                                            struct binder_transaction *t)
1385 {
1386         BUG_ON(!target_thread);
1387         assert_spin_locked(&target_thread->proc->inner_lock);
1388         BUG_ON(target_thread->transaction_stack != t);
1389         BUG_ON(target_thread->transaction_stack->from != target_thread);
1390         target_thread->transaction_stack =
1391                 target_thread->transaction_stack->from_parent;
1392         t->from = NULL;
1393 }
1394
1395 /**
1396  * binder_thread_dec_tmpref() - decrement thread->tmp_ref
1397  * @thread:     thread to decrement
1398  *
1399  * A thread needs to be kept alive while being used to create or
1400  * handle a transaction. binder_get_txn_from() is used to safely
1401  * extract t->from from a binder_transaction and keep the thread
1402  * indicated by t->from from being freed. When done with that
1403  * binder_thread, this function is called to decrement the
1404  * tmp_ref and free if appropriate (thread has been released
1405  * and no transaction being processed by the driver)
1406  */
1407 static void binder_thread_dec_tmpref(struct binder_thread *thread)
1408 {
1409         /*
1410          * atomic is used to protect the counter value while
1411          * it cannot reach zero or thread->is_dead is false
1412          */
1413         binder_inner_proc_lock(thread->proc);
1414         atomic_dec(&thread->tmp_ref);
1415         if (thread->is_dead && !atomic_read(&thread->tmp_ref)) {
1416                 binder_inner_proc_unlock(thread->proc);
1417                 binder_free_thread(thread);
1418                 return;
1419         }
1420         binder_inner_proc_unlock(thread->proc);
1421 }
1422
1423 /**
1424  * binder_proc_dec_tmpref() - decrement proc->tmp_ref
1425  * @proc:       proc to decrement
1426  *
1427  * A binder_proc needs to be kept alive while being used to create or
1428  * handle a transaction. proc->tmp_ref is incremented when
1429  * creating a new transaction or the binder_proc is currently in-use
1430  * by threads that are being released. When done with the binder_proc,
1431  * this function is called to decrement the counter and free the
1432  * proc if appropriate (proc has been released, all threads have
1433  * been released and not currenly in-use to process a transaction).
1434  */
1435 static void binder_proc_dec_tmpref(struct binder_proc *proc)
1436 {
1437         binder_inner_proc_lock(proc);
1438         proc->tmp_ref--;
1439         if (proc->is_dead && RB_EMPTY_ROOT(&proc->threads) &&
1440                         !proc->tmp_ref) {
1441                 binder_inner_proc_unlock(proc);
1442                 binder_free_proc(proc);
1443                 return;
1444         }
1445         binder_inner_proc_unlock(proc);
1446 }
1447
1448 /**
1449  * binder_get_txn_from() - safely extract the "from" thread in transaction
1450  * @t:  binder transaction for t->from
1451  *
1452  * Atomically return the "from" thread and increment the tmp_ref
1453  * count for the thread to ensure it stays alive until
1454  * binder_thread_dec_tmpref() is called.
1455  *
1456  * Return: the value of t->from
1457  */
1458 static struct binder_thread *binder_get_txn_from(
1459                 struct binder_transaction *t)
1460 {
1461         struct binder_thread *from;
1462
1463         spin_lock(&t->lock);
1464         from = t->from;
1465         if (from)
1466                 atomic_inc(&from->tmp_ref);
1467         spin_unlock(&t->lock);
1468         return from;
1469 }
1470
1471 /**
1472  * binder_get_txn_from_and_acq_inner() - get t->from and acquire inner lock
1473  * @t:  binder transaction for t->from
1474  *
1475  * Same as binder_get_txn_from() except it also acquires the proc->inner_lock
1476  * to guarantee that the thread cannot be released while operating on it.
1477  * The caller must call binder_inner_proc_unlock() to release the inner lock
1478  * as well as call binder_dec_thread_txn() to release the reference.
1479  *
1480  * Return: the value of t->from
1481  */
1482 static struct binder_thread *binder_get_txn_from_and_acq_inner(
1483                 struct binder_transaction *t)
1484         __acquires(&t->from->proc->inner_lock)
1485 {
1486         struct binder_thread *from;
1487
1488         from = binder_get_txn_from(t);
1489         if (!from) {
1490                 __acquire(&from->proc->inner_lock);
1491                 return NULL;
1492         }
1493         binder_inner_proc_lock(from->proc);
1494         if (t->from) {
1495                 BUG_ON(from != t->from);
1496                 return from;
1497         }
1498         binder_inner_proc_unlock(from->proc);
1499         __acquire(&from->proc->inner_lock);
1500         binder_thread_dec_tmpref(from);
1501         return NULL;
1502 }
1503
1504 /**
1505  * binder_free_txn_fixups() - free unprocessed fd fixups
1506  * @t:  binder transaction for t->from
1507  *
1508  * If the transaction is being torn down prior to being
1509  * processed by the target process, free all of the
1510  * fd fixups and fput the file structs. It is safe to
1511  * call this function after the fixups have been
1512  * processed -- in that case, the list will be empty.
1513  */
1514 static void binder_free_txn_fixups(struct binder_transaction *t)
1515 {
1516         struct binder_txn_fd_fixup *fixup, *tmp;
1517
1518         list_for_each_entry_safe(fixup, tmp, &t->fd_fixups, fixup_entry) {
1519                 fput(fixup->file);
1520                 list_del(&fixup->fixup_entry);
1521                 kfree(fixup);
1522         }
1523 }
1524
1525 static void binder_txn_latency_free(struct binder_transaction *t)
1526 {
1527         int from_proc, from_thread, to_proc, to_thread;
1528
1529         spin_lock(&t->lock);
1530         from_proc = t->from ? t->from->proc->pid : 0;
1531         from_thread = t->from ? t->from->pid : 0;
1532         to_proc = t->to_proc ? t->to_proc->pid : 0;
1533         to_thread = t->to_thread ? t->to_thread->pid : 0;
1534         spin_unlock(&t->lock);
1535
1536         trace_binder_txn_latency_free(t, from_proc, from_thread, to_proc, to_thread);
1537 }
1538
1539 static void binder_free_transaction(struct binder_transaction *t)
1540 {
1541         struct binder_proc *target_proc = t->to_proc;
1542
1543         if (target_proc) {
1544                 binder_inner_proc_lock(target_proc);
1545                 target_proc->outstanding_txns--;
1546                 if (target_proc->outstanding_txns < 0)
1547                         pr_warn("%s: Unexpected outstanding_txns %d\n",
1548                                 __func__, target_proc->outstanding_txns);
1549                 if (!target_proc->outstanding_txns && target_proc->is_frozen)
1550                         wake_up_interruptible_all(&target_proc->freeze_wait);
1551                 if (t->buffer)
1552                         t->buffer->transaction = NULL;
1553                 binder_inner_proc_unlock(target_proc);
1554         }
1555         if (trace_binder_txn_latency_free_enabled())
1556                 binder_txn_latency_free(t);
1557         /*
1558          * If the transaction has no target_proc, then
1559          * t->buffer->transaction has already been cleared.
1560          */
1561         binder_free_txn_fixups(t);
1562         kfree(t);
1563         binder_stats_deleted(BINDER_STAT_TRANSACTION);
1564 }
1565
1566 static void binder_send_failed_reply(struct binder_transaction *t,
1567                                      uint32_t error_code)
1568 {
1569         struct binder_thread *target_thread;
1570         struct binder_transaction *next;
1571
1572         BUG_ON(t->flags & TF_ONE_WAY);
1573         while (1) {
1574                 target_thread = binder_get_txn_from_and_acq_inner(t);
1575                 if (target_thread) {
1576                         binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
1577                                      "send failed reply for transaction %d to %d:%d\n",
1578                                       t->debug_id,
1579                                       target_thread->proc->pid,
1580                                       target_thread->pid);
1581
1582                         binder_pop_transaction_ilocked(target_thread, t);
1583                         if (target_thread->reply_error.cmd == BR_OK) {
1584                                 target_thread->reply_error.cmd = error_code;
1585                                 binder_enqueue_thread_work_ilocked(
1586                                         target_thread,
1587                                         &target_thread->reply_error.work);
1588                                 wake_up_interruptible(&target_thread->wait);
1589                         } else {
1590                                 /*
1591                                  * Cannot get here for normal operation, but
1592                                  * we can if multiple synchronous transactions
1593                                  * are sent without blocking for responses.
1594                                  * Just ignore the 2nd error in this case.
1595                                  */
1596                                 pr_warn("Unexpected reply error: %u\n",
1597                                         target_thread->reply_error.cmd);
1598                         }
1599                         binder_inner_proc_unlock(target_thread->proc);
1600                         binder_thread_dec_tmpref(target_thread);
1601                         binder_free_transaction(t);
1602                         return;
1603                 }
1604                 __release(&target_thread->proc->inner_lock);
1605                 next = t->from_parent;
1606
1607                 binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
1608                              "send failed reply for transaction %d, target dead\n",
1609                              t->debug_id);
1610
1611                 binder_free_transaction(t);
1612                 if (next == NULL) {
1613                         binder_debug(BINDER_DEBUG_DEAD_BINDER,
1614                                      "reply failed, no target thread at root\n");
1615                         return;
1616                 }
1617                 t = next;
1618                 binder_debug(BINDER_DEBUG_DEAD_BINDER,
1619                              "reply failed, no target thread -- retry %d\n",
1620                               t->debug_id);
1621         }
1622 }
1623
1624 /**
1625  * binder_cleanup_transaction() - cleans up undelivered transaction
1626  * @t:          transaction that needs to be cleaned up
1627  * @reason:     reason the transaction wasn't delivered
1628  * @error_code: error to return to caller (if synchronous call)
1629  */
1630 static void binder_cleanup_transaction(struct binder_transaction *t,
1631                                        const char *reason,
1632                                        uint32_t error_code)
1633 {
1634         if (t->buffer->target_node && !(t->flags & TF_ONE_WAY)) {
1635                 binder_send_failed_reply(t, error_code);
1636         } else {
1637                 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
1638                         "undelivered transaction %d, %s\n",
1639                         t->debug_id, reason);
1640                 binder_free_transaction(t);
1641         }
1642 }
1643
1644 /**
1645  * binder_get_object() - gets object and checks for valid metadata
1646  * @proc:       binder_proc owning the buffer
1647  * @u:          sender's user pointer to base of buffer
1648  * @buffer:     binder_buffer that we're parsing.
1649  * @offset:     offset in the @buffer at which to validate an object.
1650  * @object:     struct binder_object to read into
1651  *
1652  * Copy the binder object at the given offset into @object. If @u is
1653  * provided then the copy is from the sender's buffer. If not, then
1654  * it is copied from the target's @buffer.
1655  *
1656  * Return:      If there's a valid metadata object at @offset, the
1657  *              size of that object. Otherwise, it returns zero. The object
1658  *              is read into the struct binder_object pointed to by @object.
1659  */
1660 static size_t binder_get_object(struct binder_proc *proc,
1661                                 const void __user *u,
1662                                 struct binder_buffer *buffer,
1663                                 unsigned long offset,
1664                                 struct binder_object *object)
1665 {
1666         size_t read_size;
1667         struct binder_object_header *hdr;
1668         size_t object_size = 0;
1669
1670         read_size = min_t(size_t, sizeof(*object), buffer->data_size - offset);
1671         if (offset > buffer->data_size || read_size < sizeof(*hdr))
1672                 return 0;
1673         if (u) {
1674                 if (copy_from_user(object, u + offset, read_size))
1675                         return 0;
1676         } else {
1677                 if (binder_alloc_copy_from_buffer(&proc->alloc, object, buffer,
1678                                                   offset, read_size))
1679                         return 0;
1680         }
1681
1682         /* Ok, now see if we read a complete object. */
1683         hdr = &object->hdr;
1684         switch (hdr->type) {
1685         case BINDER_TYPE_BINDER:
1686         case BINDER_TYPE_WEAK_BINDER:
1687         case BINDER_TYPE_HANDLE:
1688         case BINDER_TYPE_WEAK_HANDLE:
1689                 object_size = sizeof(struct flat_binder_object);
1690                 break;
1691         case BINDER_TYPE_FD:
1692                 object_size = sizeof(struct binder_fd_object);
1693                 break;
1694         case BINDER_TYPE_PTR:
1695                 object_size = sizeof(struct binder_buffer_object);
1696                 break;
1697         case BINDER_TYPE_FDA:
1698                 object_size = sizeof(struct binder_fd_array_object);
1699                 break;
1700         default:
1701                 return 0;
1702         }
1703         if (offset <= buffer->data_size - object_size &&
1704             buffer->data_size >= object_size)
1705                 return object_size;
1706         else
1707                 return 0;
1708 }
1709
1710 /**
1711  * binder_validate_ptr() - validates binder_buffer_object in a binder_buffer.
1712  * @proc:       binder_proc owning the buffer
1713  * @b:          binder_buffer containing the object
1714  * @object:     struct binder_object to read into
1715  * @index:      index in offset array at which the binder_buffer_object is
1716  *              located
1717  * @start_offset: points to the start of the offset array
1718  * @object_offsetp: offset of @object read from @b
1719  * @num_valid:  the number of valid offsets in the offset array
1720  *
1721  * Return:      If @index is within the valid range of the offset array
1722  *              described by @start and @num_valid, and if there's a valid
1723  *              binder_buffer_object at the offset found in index @index
1724  *              of the offset array, that object is returned. Otherwise,
1725  *              %NULL is returned.
1726  *              Note that the offset found in index @index itself is not
1727  *              verified; this function assumes that @num_valid elements
1728  *              from @start were previously verified to have valid offsets.
1729  *              If @object_offsetp is non-NULL, then the offset within
1730  *              @b is written to it.
1731  */
1732 static struct binder_buffer_object *binder_validate_ptr(
1733                                                 struct binder_proc *proc,
1734                                                 struct binder_buffer *b,
1735                                                 struct binder_object *object,
1736                                                 binder_size_t index,
1737                                                 binder_size_t start_offset,
1738                                                 binder_size_t *object_offsetp,
1739                                                 binder_size_t num_valid)
1740 {
1741         size_t object_size;
1742         binder_size_t object_offset;
1743         unsigned long buffer_offset;
1744
1745         if (index >= num_valid)
1746                 return NULL;
1747
1748         buffer_offset = start_offset + sizeof(binder_size_t) * index;
1749         if (binder_alloc_copy_from_buffer(&proc->alloc, &object_offset,
1750                                           b, buffer_offset,
1751                                           sizeof(object_offset)))
1752                 return NULL;
1753         object_size = binder_get_object(proc, NULL, b, object_offset, object);
1754         if (!object_size || object->hdr.type != BINDER_TYPE_PTR)
1755                 return NULL;
1756         if (object_offsetp)
1757                 *object_offsetp = object_offset;
1758
1759         return &object->bbo;
1760 }
1761
1762 /**
1763  * binder_validate_fixup() - validates pointer/fd fixups happen in order.
1764  * @proc:               binder_proc owning the buffer
1765  * @b:                  transaction buffer
1766  * @objects_start_offset: offset to start of objects buffer
1767  * @buffer_obj_offset:  offset to binder_buffer_object in which to fix up
1768  * @fixup_offset:       start offset in @buffer to fix up
1769  * @last_obj_offset:    offset to last binder_buffer_object that we fixed
1770  * @last_min_offset:    minimum fixup offset in object at @last_obj_offset
1771  *
1772  * Return:              %true if a fixup in buffer @buffer at offset @offset is
1773  *                      allowed.
1774  *
1775  * For safety reasons, we only allow fixups inside a buffer to happen
1776  * at increasing offsets; additionally, we only allow fixup on the last
1777  * buffer object that was verified, or one of its parents.
1778  *
1779  * Example of what is allowed:
1780  *
1781  * A
1782  *   B (parent = A, offset = 0)
1783  *   C (parent = A, offset = 16)
1784  *     D (parent = C, offset = 0)
1785  *   E (parent = A, offset = 32) // min_offset is 16 (C.parent_offset)
1786  *
1787  * Examples of what is not allowed:
1788  *
1789  * Decreasing offsets within the same parent:
1790  * A
1791  *   C (parent = A, offset = 16)
1792  *   B (parent = A, offset = 0) // decreasing offset within A
1793  *
1794  * Referring to a parent that wasn't the last object or any of its parents:
1795  * A
1796  *   B (parent = A, offset = 0)
1797  *   C (parent = A, offset = 0)
1798  *   C (parent = A, offset = 16)
1799  *     D (parent = B, offset = 0) // B is not A or any of A's parents
1800  */
1801 static bool binder_validate_fixup(struct binder_proc *proc,
1802                                   struct binder_buffer *b,
1803                                   binder_size_t objects_start_offset,
1804                                   binder_size_t buffer_obj_offset,
1805                                   binder_size_t fixup_offset,
1806                                   binder_size_t last_obj_offset,
1807                                   binder_size_t last_min_offset)
1808 {
1809         if (!last_obj_offset) {
1810                 /* Nothing to fix up in */
1811                 return false;
1812         }
1813
1814         while (last_obj_offset != buffer_obj_offset) {
1815                 unsigned long buffer_offset;
1816                 struct binder_object last_object;
1817                 struct binder_buffer_object *last_bbo;
1818                 size_t object_size = binder_get_object(proc, NULL, b,
1819                                                        last_obj_offset,
1820                                                        &last_object);
1821                 if (object_size != sizeof(*last_bbo))
1822                         return false;
1823
1824                 last_bbo = &last_object.bbo;
1825                 /*
1826                  * Safe to retrieve the parent of last_obj, since it
1827                  * was already previously verified by the driver.
1828                  */
1829                 if ((last_bbo->flags & BINDER_BUFFER_FLAG_HAS_PARENT) == 0)
1830                         return false;
1831                 last_min_offset = last_bbo->parent_offset + sizeof(uintptr_t);
1832                 buffer_offset = objects_start_offset +
1833                         sizeof(binder_size_t) * last_bbo->parent;
1834                 if (binder_alloc_copy_from_buffer(&proc->alloc,
1835                                                   &last_obj_offset,
1836                                                   b, buffer_offset,
1837                                                   sizeof(last_obj_offset)))
1838                         return false;
1839         }
1840         return (fixup_offset >= last_min_offset);
1841 }
1842
1843 /**
1844  * struct binder_task_work_cb - for deferred close
1845  *
1846  * @twork:                callback_head for task work
1847  * @fd:                   fd to close
1848  *
1849  * Structure to pass task work to be handled after
1850  * returning from binder_ioctl() via task_work_add().
1851  */
1852 struct binder_task_work_cb {
1853         struct callback_head twork;
1854         struct file *file;
1855 };
1856
1857 /**
1858  * binder_do_fd_close() - close list of file descriptors
1859  * @twork:      callback head for task work
1860  *
1861  * It is not safe to call ksys_close() during the binder_ioctl()
1862  * function if there is a chance that binder's own file descriptor
1863  * might be closed. This is to meet the requirements for using
1864  * fdget() (see comments for __fget_light()). Therefore use
1865  * task_work_add() to schedule the close operation once we have
1866  * returned from binder_ioctl(). This function is a callback
1867  * for that mechanism and does the actual ksys_close() on the
1868  * given file descriptor.
1869  */
1870 static void binder_do_fd_close(struct callback_head *twork)
1871 {
1872         struct binder_task_work_cb *twcb = container_of(twork,
1873                         struct binder_task_work_cb, twork);
1874
1875         fput(twcb->file);
1876         kfree(twcb);
1877 }
1878
1879 /**
1880  * binder_deferred_fd_close() - schedule a close for the given file-descriptor
1881  * @fd:         file-descriptor to close
1882  *
1883  * See comments in binder_do_fd_close(). This function is used to schedule
1884  * a file-descriptor to be closed after returning from binder_ioctl().
1885  */
1886 static void binder_deferred_fd_close(int fd)
1887 {
1888         struct binder_task_work_cb *twcb;
1889
1890         twcb = kzalloc(sizeof(*twcb), GFP_KERNEL);
1891         if (!twcb)
1892                 return;
1893         init_task_work(&twcb->twork, binder_do_fd_close);
1894         close_fd_get_file(fd, &twcb->file);
1895         if (twcb->file) {
1896                 filp_close(twcb->file, current->files);
1897                 task_work_add(current, &twcb->twork, TWA_RESUME);
1898         } else {
1899                 kfree(twcb);
1900         }
1901 }
1902
1903 static void binder_transaction_buffer_release(struct binder_proc *proc,
1904                                               struct binder_thread *thread,
1905                                               struct binder_buffer *buffer,
1906                                               binder_size_t failed_at,
1907                                               bool is_failure)
1908 {
1909         int debug_id = buffer->debug_id;
1910         binder_size_t off_start_offset, buffer_offset, off_end_offset;
1911
1912         binder_debug(BINDER_DEBUG_TRANSACTION,
1913                      "%d buffer release %d, size %zd-%zd, failed at %llx\n",
1914                      proc->pid, buffer->debug_id,
1915                      buffer->data_size, buffer->offsets_size,
1916                      (unsigned long long)failed_at);
1917
1918         if (buffer->target_node)
1919                 binder_dec_node(buffer->target_node, 1, 0);
1920
1921         off_start_offset = ALIGN(buffer->data_size, sizeof(void *));
1922         off_end_offset = is_failure && failed_at ? failed_at :
1923                                 off_start_offset + buffer->offsets_size;
1924         for (buffer_offset = off_start_offset; buffer_offset < off_end_offset;
1925              buffer_offset += sizeof(binder_size_t)) {
1926                 struct binder_object_header *hdr;
1927                 size_t object_size = 0;
1928                 struct binder_object object;
1929                 binder_size_t object_offset;
1930
1931                 if (!binder_alloc_copy_from_buffer(&proc->alloc, &object_offset,
1932                                                    buffer, buffer_offset,
1933                                                    sizeof(object_offset)))
1934                         object_size = binder_get_object(proc, NULL, buffer,
1935                                                         object_offset, &object);
1936                 if (object_size == 0) {
1937                         pr_err("transaction release %d bad object at offset %lld, size %zd\n",
1938                                debug_id, (u64)object_offset, buffer->data_size);
1939                         continue;
1940                 }
1941                 hdr = &object.hdr;
1942                 switch (hdr->type) {
1943                 case BINDER_TYPE_BINDER:
1944                 case BINDER_TYPE_WEAK_BINDER: {
1945                         struct flat_binder_object *fp;
1946                         struct binder_node *node;
1947
1948                         fp = to_flat_binder_object(hdr);
1949                         node = binder_get_node(proc, fp->binder);
1950                         if (node == NULL) {
1951                                 pr_err("transaction release %d bad node %016llx\n",
1952                                        debug_id, (u64)fp->binder);
1953                                 break;
1954                         }
1955                         binder_debug(BINDER_DEBUG_TRANSACTION,
1956                                      "        node %d u%016llx\n",
1957                                      node->debug_id, (u64)node->ptr);
1958                         binder_dec_node(node, hdr->type == BINDER_TYPE_BINDER,
1959                                         0);
1960                         binder_put_node(node);
1961                 } break;
1962                 case BINDER_TYPE_HANDLE:
1963                 case BINDER_TYPE_WEAK_HANDLE: {
1964                         struct flat_binder_object *fp;
1965                         struct binder_ref_data rdata;
1966                         int ret;
1967
1968                         fp = to_flat_binder_object(hdr);
1969                         ret = binder_dec_ref_for_handle(proc, fp->handle,
1970                                 hdr->type == BINDER_TYPE_HANDLE, &rdata);
1971
1972                         if (ret) {
1973                                 pr_err("transaction release %d bad handle %d, ret = %d\n",
1974                                  debug_id, fp->handle, ret);
1975                                 break;
1976                         }
1977                         binder_debug(BINDER_DEBUG_TRANSACTION,
1978                                      "        ref %d desc %d\n",
1979                                      rdata.debug_id, rdata.desc);
1980                 } break;
1981
1982                 case BINDER_TYPE_FD: {
1983                         /*
1984                          * No need to close the file here since user-space
1985                          * closes it for for successfully delivered
1986                          * transactions. For transactions that weren't
1987                          * delivered, the new fd was never allocated so
1988                          * there is no need to close and the fput on the
1989                          * file is done when the transaction is torn
1990                          * down.
1991                          */
1992                 } break;
1993                 case BINDER_TYPE_PTR:
1994                         /*
1995                          * Nothing to do here, this will get cleaned up when the
1996                          * transaction buffer gets freed
1997                          */
1998                         break;
1999                 case BINDER_TYPE_FDA: {
2000                         struct binder_fd_array_object *fda;
2001                         struct binder_buffer_object *parent;
2002                         struct binder_object ptr_object;
2003                         binder_size_t fda_offset;
2004                         size_t fd_index;
2005                         binder_size_t fd_buf_size;
2006                         binder_size_t num_valid;
2007
2008                         if (is_failure) {
2009                                 /*
2010                                  * The fd fixups have not been applied so no
2011                                  * fds need to be closed.
2012                                  */
2013                                 continue;
2014                         }
2015
2016                         num_valid = (buffer_offset - off_start_offset) /
2017                                                 sizeof(binder_size_t);
2018                         fda = to_binder_fd_array_object(hdr);
2019                         parent = binder_validate_ptr(proc, buffer, &ptr_object,
2020                                                      fda->parent,
2021                                                      off_start_offset,
2022                                                      NULL,
2023                                                      num_valid);
2024                         if (!parent) {
2025                                 pr_err("transaction release %d bad parent offset\n",
2026                                        debug_id);
2027                                 continue;
2028                         }
2029                         fd_buf_size = sizeof(u32) * fda->num_fds;
2030                         if (fda->num_fds >= SIZE_MAX / sizeof(u32)) {
2031                                 pr_err("transaction release %d invalid number of fds (%lld)\n",
2032                                        debug_id, (u64)fda->num_fds);
2033                                 continue;
2034                         }
2035                         if (fd_buf_size > parent->length ||
2036                             fda->parent_offset > parent->length - fd_buf_size) {
2037                                 /* No space for all file descriptors here. */
2038                                 pr_err("transaction release %d not enough space for %lld fds in buffer\n",
2039                                        debug_id, (u64)fda->num_fds);
2040                                 continue;
2041                         }
2042                         /*
2043                          * the source data for binder_buffer_object is visible
2044                          * to user-space and the @buffer element is the user
2045                          * pointer to the buffer_object containing the fd_array.
2046                          * Convert the address to an offset relative to
2047                          * the base of the transaction buffer.
2048                          */
2049                         fda_offset =
2050                             (parent->buffer - (uintptr_t)buffer->user_data) +
2051                             fda->parent_offset;
2052                         for (fd_index = 0; fd_index < fda->num_fds;
2053                              fd_index++) {
2054                                 u32 fd;
2055                                 int err;
2056                                 binder_size_t offset = fda_offset +
2057                                         fd_index * sizeof(fd);
2058
2059                                 err = binder_alloc_copy_from_buffer(
2060                                                 &proc->alloc, &fd, buffer,
2061                                                 offset, sizeof(fd));
2062                                 WARN_ON(err);
2063                                 if (!err) {
2064                                         binder_deferred_fd_close(fd);
2065                                         /*
2066                                          * Need to make sure the thread goes
2067                                          * back to userspace to complete the
2068                                          * deferred close
2069                                          */
2070                                         if (thread)
2071                                                 thread->looper_need_return = true;
2072                                 }
2073                         }
2074                 } break;
2075                 default:
2076                         pr_err("transaction release %d bad object type %x\n",
2077                                 debug_id, hdr->type);
2078                         break;
2079                 }
2080         }
2081 }
2082
2083 static int binder_translate_binder(struct flat_binder_object *fp,
2084                                    struct binder_transaction *t,
2085                                    struct binder_thread *thread)
2086 {
2087         struct binder_node *node;
2088         struct binder_proc *proc = thread->proc;
2089         struct binder_proc *target_proc = t->to_proc;
2090         struct binder_ref_data rdata;
2091         int ret = 0;
2092
2093         node = binder_get_node(proc, fp->binder);
2094         if (!node) {
2095                 node = binder_new_node(proc, fp);
2096                 if (!node)
2097                         return -ENOMEM;
2098         }
2099         if (fp->cookie != node->cookie) {
2100                 binder_user_error("%d:%d sending u%016llx node %d, cookie mismatch %016llx != %016llx\n",
2101                                   proc->pid, thread->pid, (u64)fp->binder,
2102                                   node->debug_id, (u64)fp->cookie,
2103                                   (u64)node->cookie);
2104                 ret = -EINVAL;
2105                 goto done;
2106         }
2107         if (security_binder_transfer_binder(proc->cred, target_proc->cred)) {
2108                 ret = -EPERM;
2109                 goto done;
2110         }
2111
2112         ret = binder_inc_ref_for_node(target_proc, node,
2113                         fp->hdr.type == BINDER_TYPE_BINDER,
2114                         &thread->todo, &rdata);
2115         if (ret)
2116                 goto done;
2117
2118         if (fp->hdr.type == BINDER_TYPE_BINDER)
2119                 fp->hdr.type = BINDER_TYPE_HANDLE;
2120         else
2121                 fp->hdr.type = BINDER_TYPE_WEAK_HANDLE;
2122         fp->binder = 0;
2123         fp->handle = rdata.desc;
2124         fp->cookie = 0;
2125
2126         trace_binder_transaction_node_to_ref(t, node, &rdata);
2127         binder_debug(BINDER_DEBUG_TRANSACTION,
2128                      "        node %d u%016llx -> ref %d desc %d\n",
2129                      node->debug_id, (u64)node->ptr,
2130                      rdata.debug_id, rdata.desc);
2131 done:
2132         binder_put_node(node);
2133         return ret;
2134 }
2135
2136 static int binder_translate_handle(struct flat_binder_object *fp,
2137                                    struct binder_transaction *t,
2138                                    struct binder_thread *thread)
2139 {
2140         struct binder_proc *proc = thread->proc;
2141         struct binder_proc *target_proc = t->to_proc;
2142         struct binder_node *node;
2143         struct binder_ref_data src_rdata;
2144         int ret = 0;
2145
2146         node = binder_get_node_from_ref(proc, fp->handle,
2147                         fp->hdr.type == BINDER_TYPE_HANDLE, &src_rdata);
2148         if (!node) {
2149                 binder_user_error("%d:%d got transaction with invalid handle, %d\n",
2150                                   proc->pid, thread->pid, fp->handle);
2151                 return -EINVAL;
2152         }
2153         if (security_binder_transfer_binder(proc->cred, target_proc->cred)) {
2154                 ret = -EPERM;
2155                 goto done;
2156         }
2157
2158         binder_node_lock(node);
2159         if (node->proc == target_proc) {
2160                 if (fp->hdr.type == BINDER_TYPE_HANDLE)
2161                         fp->hdr.type = BINDER_TYPE_BINDER;
2162                 else
2163                         fp->hdr.type = BINDER_TYPE_WEAK_BINDER;
2164                 fp->binder = node->ptr;
2165                 fp->cookie = node->cookie;
2166                 if (node->proc)
2167                         binder_inner_proc_lock(node->proc);
2168                 else
2169                         __acquire(&node->proc->inner_lock);
2170                 binder_inc_node_nilocked(node,
2171                                          fp->hdr.type == BINDER_TYPE_BINDER,
2172                                          0, NULL);
2173                 if (node->proc)
2174                         binder_inner_proc_unlock(node->proc);
2175                 else
2176                         __release(&node->proc->inner_lock);
2177                 trace_binder_transaction_ref_to_node(t, node, &src_rdata);
2178                 binder_debug(BINDER_DEBUG_TRANSACTION,
2179                              "        ref %d desc %d -> node %d u%016llx\n",
2180                              src_rdata.debug_id, src_rdata.desc, node->debug_id,
2181                              (u64)node->ptr);
2182                 binder_node_unlock(node);
2183         } else {
2184                 struct binder_ref_data dest_rdata;
2185
2186                 binder_node_unlock(node);
2187                 ret = binder_inc_ref_for_node(target_proc, node,
2188                                 fp->hdr.type == BINDER_TYPE_HANDLE,
2189                                 NULL, &dest_rdata);
2190                 if (ret)
2191                         goto done;
2192
2193                 fp->binder = 0;
2194                 fp->handle = dest_rdata.desc;
2195                 fp->cookie = 0;
2196                 trace_binder_transaction_ref_to_ref(t, node, &src_rdata,
2197                                                     &dest_rdata);
2198                 binder_debug(BINDER_DEBUG_TRANSACTION,
2199                              "        ref %d desc %d -> ref %d desc %d (node %d)\n",
2200                              src_rdata.debug_id, src_rdata.desc,
2201                              dest_rdata.debug_id, dest_rdata.desc,
2202                              node->debug_id);
2203         }
2204 done:
2205         binder_put_node(node);
2206         return ret;
2207 }
2208
2209 static int binder_translate_fd(u32 fd, binder_size_t fd_offset,
2210                                struct binder_transaction *t,
2211                                struct binder_thread *thread,
2212                                struct binder_transaction *in_reply_to)
2213 {
2214         struct binder_proc *proc = thread->proc;
2215         struct binder_proc *target_proc = t->to_proc;
2216         struct binder_txn_fd_fixup *fixup;
2217         struct file *file;
2218         int ret = 0;
2219         bool target_allows_fd;
2220
2221         if (in_reply_to)
2222                 target_allows_fd = !!(in_reply_to->flags & TF_ACCEPT_FDS);
2223         else
2224                 target_allows_fd = t->buffer->target_node->accept_fds;
2225         if (!target_allows_fd) {
2226                 binder_user_error("%d:%d got %s with fd, %d, but target does not allow fds\n",
2227                                   proc->pid, thread->pid,
2228                                   in_reply_to ? "reply" : "transaction",
2229                                   fd);
2230                 ret = -EPERM;
2231                 goto err_fd_not_accepted;
2232         }
2233
2234         file = fget(fd);
2235         if (!file) {
2236                 binder_user_error("%d:%d got transaction with invalid fd, %d\n",
2237                                   proc->pid, thread->pid, fd);
2238                 ret = -EBADF;
2239                 goto err_fget;
2240         }
2241         ret = security_binder_transfer_file(proc->cred, target_proc->cred, file);
2242         if (ret < 0) {
2243                 ret = -EPERM;
2244                 goto err_security;
2245         }
2246
2247         /*
2248          * Add fixup record for this transaction. The allocation
2249          * of the fd in the target needs to be done from a
2250          * target thread.
2251          */
2252         fixup = kzalloc(sizeof(*fixup), GFP_KERNEL);
2253         if (!fixup) {
2254                 ret = -ENOMEM;
2255                 goto err_alloc;
2256         }
2257         fixup->file = file;
2258         fixup->offset = fd_offset;
2259         trace_binder_transaction_fd_send(t, fd, fixup->offset);
2260         list_add_tail(&fixup->fixup_entry, &t->fd_fixups);
2261
2262         return ret;
2263
2264 err_alloc:
2265 err_security:
2266         fput(file);
2267 err_fget:
2268 err_fd_not_accepted:
2269         return ret;
2270 }
2271
2272 static int binder_translate_fd_array(struct binder_fd_array_object *fda,
2273                                      struct binder_buffer_object *parent,
2274                                      struct binder_transaction *t,
2275                                      struct binder_thread *thread,
2276                                      struct binder_transaction *in_reply_to)
2277 {
2278         binder_size_t fdi, fd_buf_size;
2279         binder_size_t fda_offset;
2280         struct binder_proc *proc = thread->proc;
2281         struct binder_proc *target_proc = t->to_proc;
2282
2283         fd_buf_size = sizeof(u32) * fda->num_fds;
2284         if (fda->num_fds >= SIZE_MAX / sizeof(u32)) {
2285                 binder_user_error("%d:%d got transaction with invalid number of fds (%lld)\n",
2286                                   proc->pid, thread->pid, (u64)fda->num_fds);
2287                 return -EINVAL;
2288         }
2289         if (fd_buf_size > parent->length ||
2290             fda->parent_offset > parent->length - fd_buf_size) {
2291                 /* No space for all file descriptors here. */
2292                 binder_user_error("%d:%d not enough space to store %lld fds in buffer\n",
2293                                   proc->pid, thread->pid, (u64)fda->num_fds);
2294                 return -EINVAL;
2295         }
2296         /*
2297          * the source data for binder_buffer_object is visible
2298          * to user-space and the @buffer element is the user
2299          * pointer to the buffer_object containing the fd_array.
2300          * Convert the address to an offset relative to
2301          * the base of the transaction buffer.
2302          */
2303         fda_offset = (parent->buffer - (uintptr_t)t->buffer->user_data) +
2304                 fda->parent_offset;
2305         if (!IS_ALIGNED((unsigned long)fda_offset, sizeof(u32))) {
2306                 binder_user_error("%d:%d parent offset not aligned correctly.\n",
2307                                   proc->pid, thread->pid);
2308                 return -EINVAL;
2309         }
2310         for (fdi = 0; fdi < fda->num_fds; fdi++) {
2311                 u32 fd;
2312                 int ret;
2313                 binder_size_t offset = fda_offset + fdi * sizeof(fd);
2314
2315                 ret = binder_alloc_copy_from_buffer(&target_proc->alloc,
2316                                                     &fd, t->buffer,
2317                                                     offset, sizeof(fd));
2318                 if (!ret)
2319                         ret = binder_translate_fd(fd, offset, t, thread,
2320                                                   in_reply_to);
2321                 if (ret)
2322                         return ret > 0 ? -EINVAL : ret;
2323         }
2324         return 0;
2325 }
2326
2327 static int binder_fixup_parent(struct binder_transaction *t,
2328                                struct binder_thread *thread,
2329                                struct binder_buffer_object *bp,
2330                                binder_size_t off_start_offset,
2331                                binder_size_t num_valid,
2332                                binder_size_t last_fixup_obj_off,
2333                                binder_size_t last_fixup_min_off)
2334 {
2335         struct binder_buffer_object *parent;
2336         struct binder_buffer *b = t->buffer;
2337         struct binder_proc *proc = thread->proc;
2338         struct binder_proc *target_proc = t->to_proc;
2339         struct binder_object object;
2340         binder_size_t buffer_offset;
2341         binder_size_t parent_offset;
2342
2343         if (!(bp->flags & BINDER_BUFFER_FLAG_HAS_PARENT))
2344                 return 0;
2345
2346         parent = binder_validate_ptr(target_proc, b, &object, bp->parent,
2347                                      off_start_offset, &parent_offset,
2348                                      num_valid);
2349         if (!parent) {
2350                 binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
2351                                   proc->pid, thread->pid);
2352                 return -EINVAL;
2353         }
2354
2355         if (!binder_validate_fixup(target_proc, b, off_start_offset,
2356                                    parent_offset, bp->parent_offset,
2357                                    last_fixup_obj_off,
2358                                    last_fixup_min_off)) {
2359                 binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
2360                                   proc->pid, thread->pid);
2361                 return -EINVAL;
2362         }
2363
2364         if (parent->length < sizeof(binder_uintptr_t) ||
2365             bp->parent_offset > parent->length - sizeof(binder_uintptr_t)) {
2366                 /* No space for a pointer here! */
2367                 binder_user_error("%d:%d got transaction with invalid parent offset\n",
2368                                   proc->pid, thread->pid);
2369                 return -EINVAL;
2370         }
2371         buffer_offset = bp->parent_offset +
2372                         (uintptr_t)parent->buffer - (uintptr_t)b->user_data;
2373         if (binder_alloc_copy_to_buffer(&target_proc->alloc, b, buffer_offset,
2374                                         &bp->buffer, sizeof(bp->buffer))) {
2375                 binder_user_error("%d:%d got transaction with invalid parent offset\n",
2376                                   proc->pid, thread->pid);
2377                 return -EINVAL;
2378         }
2379
2380         return 0;
2381 }
2382
2383 /**
2384  * binder_proc_transaction() - sends a transaction to a process and wakes it up
2385  * @t:          transaction to send
2386  * @proc:       process to send the transaction to
2387  * @thread:     thread in @proc to send the transaction to (may be NULL)
2388  *
2389  * This function queues a transaction to the specified process. It will try
2390  * to find a thread in the target process to handle the transaction and
2391  * wake it up. If no thread is found, the work is queued to the proc
2392  * waitqueue.
2393  *
2394  * If the @thread parameter is not NULL, the transaction is always queued
2395  * to the waitlist of that specific thread.
2396  *
2397  * Return:      0 if the transaction was successfully queued
2398  *              BR_DEAD_REPLY if the target process or thread is dead
2399  *              BR_FROZEN_REPLY if the target process or thread is frozen
2400  */
2401 static int binder_proc_transaction(struct binder_transaction *t,
2402                                     struct binder_proc *proc,
2403                                     struct binder_thread *thread)
2404 {
2405         struct binder_node *node = t->buffer->target_node;
2406         bool oneway = !!(t->flags & TF_ONE_WAY);
2407         bool pending_async = false;
2408
2409         BUG_ON(!node);
2410         binder_node_lock(node);
2411         if (oneway) {
2412                 BUG_ON(thread);
2413                 if (node->has_async_transaction)
2414                         pending_async = true;
2415                 else
2416                         node->has_async_transaction = true;
2417         }
2418
2419         binder_inner_proc_lock(proc);
2420         if (proc->is_frozen) {
2421                 proc->sync_recv |= !oneway;
2422                 proc->async_recv |= oneway;
2423         }
2424
2425         if ((proc->is_frozen && !oneway) || proc->is_dead ||
2426                         (thread && thread->is_dead)) {
2427                 binder_inner_proc_unlock(proc);
2428                 binder_node_unlock(node);
2429                 return proc->is_frozen ? BR_FROZEN_REPLY : BR_DEAD_REPLY;
2430         }
2431
2432         if (!thread && !pending_async)
2433                 thread = binder_select_thread_ilocked(proc);
2434
2435         if (thread)
2436                 binder_enqueue_thread_work_ilocked(thread, &t->work);
2437         else if (!pending_async)
2438                 binder_enqueue_work_ilocked(&t->work, &proc->todo);
2439         else
2440                 binder_enqueue_work_ilocked(&t->work, &node->async_todo);
2441
2442         if (!pending_async)
2443                 binder_wakeup_thread_ilocked(proc, thread, !oneway /* sync */);
2444
2445         proc->outstanding_txns++;
2446         binder_inner_proc_unlock(proc);
2447         binder_node_unlock(node);
2448
2449         return 0;
2450 }
2451
2452 /**
2453  * binder_get_node_refs_for_txn() - Get required refs on node for txn
2454  * @node:         struct binder_node for which to get refs
2455  * @proc:         returns @node->proc if valid
2456  * @error:        if no @proc then returns BR_DEAD_REPLY
2457  *
2458  * User-space normally keeps the node alive when creating a transaction
2459  * since it has a reference to the target. The local strong ref keeps it
2460  * alive if the sending process dies before the target process processes
2461  * the transaction. If the source process is malicious or has a reference
2462  * counting bug, relying on the local strong ref can fail.
2463  *
2464  * Since user-space can cause the local strong ref to go away, we also take
2465  * a tmpref on the node to ensure it survives while we are constructing
2466  * the transaction. We also need a tmpref on the proc while we are
2467  * constructing the transaction, so we take that here as well.
2468  *
2469  * Return: The target_node with refs taken or NULL if no @node->proc is NULL.
2470  * Also sets @proc if valid. If the @node->proc is NULL indicating that the
2471  * target proc has died, @error is set to BR_DEAD_REPLY
2472  */
2473 static struct binder_node *binder_get_node_refs_for_txn(
2474                 struct binder_node *node,
2475                 struct binder_proc **procp,
2476                 uint32_t *error)
2477 {
2478         struct binder_node *target_node = NULL;
2479
2480         binder_node_inner_lock(node);
2481         if (node->proc) {
2482                 target_node = node;
2483                 binder_inc_node_nilocked(node, 1, 0, NULL);
2484                 binder_inc_node_tmpref_ilocked(node);
2485                 node->proc->tmp_ref++;
2486                 *procp = node->proc;
2487         } else
2488                 *error = BR_DEAD_REPLY;
2489         binder_node_inner_unlock(node);
2490
2491         return target_node;
2492 }
2493
2494 static void binder_transaction(struct binder_proc *proc,
2495                                struct binder_thread *thread,
2496                                struct binder_transaction_data *tr, int reply,
2497                                binder_size_t extra_buffers_size)
2498 {
2499         int ret;
2500         struct binder_transaction *t;
2501         struct binder_work *w;
2502         struct binder_work *tcomplete;
2503         binder_size_t buffer_offset = 0;
2504         binder_size_t off_start_offset, off_end_offset;
2505         binder_size_t off_min;
2506         binder_size_t sg_buf_offset, sg_buf_end_offset;
2507         binder_size_t user_offset = 0;
2508         struct binder_proc *target_proc = NULL;
2509         struct binder_thread *target_thread = NULL;
2510         struct binder_node *target_node = NULL;
2511         struct binder_transaction *in_reply_to = NULL;
2512         struct binder_transaction_log_entry *e;
2513         uint32_t return_error = 0;
2514         uint32_t return_error_param = 0;
2515         uint32_t return_error_line = 0;
2516         binder_size_t last_fixup_obj_off = 0;
2517         binder_size_t last_fixup_min_off = 0;
2518         struct binder_context *context = proc->context;
2519         int t_debug_id = atomic_inc_return(&binder_last_id);
2520         char *secctx = NULL;
2521         u32 secctx_sz = 0;
2522         const void __user *user_buffer = (const void __user *)
2523                                 (uintptr_t)tr->data.ptr.buffer;
2524
2525         e = binder_transaction_log_add(&binder_transaction_log);
2526         e->debug_id = t_debug_id;
2527         e->call_type = reply ? 2 : !!(tr->flags & TF_ONE_WAY);
2528         e->from_proc = proc->pid;
2529         e->from_thread = thread->pid;
2530         e->target_handle = tr->target.handle;
2531         e->data_size = tr->data_size;
2532         e->offsets_size = tr->offsets_size;
2533         strscpy(e->context_name, proc->context->name, BINDERFS_MAX_NAME);
2534
2535         if (reply) {
2536                 binder_inner_proc_lock(proc);
2537                 in_reply_to = thread->transaction_stack;
2538                 if (in_reply_to == NULL) {
2539                         binder_inner_proc_unlock(proc);
2540                         binder_user_error("%d:%d got reply transaction with no transaction stack\n",
2541                                           proc->pid, thread->pid);
2542                         return_error = BR_FAILED_REPLY;
2543                         return_error_param = -EPROTO;
2544                         return_error_line = __LINE__;
2545                         goto err_empty_call_stack;
2546                 }
2547                 if (in_reply_to->to_thread != thread) {
2548                         spin_lock(&in_reply_to->lock);
2549                         binder_user_error("%d:%d got reply transaction with bad transaction stack, transaction %d has target %d:%d\n",
2550                                 proc->pid, thread->pid, in_reply_to->debug_id,
2551                                 in_reply_to->to_proc ?
2552                                 in_reply_to->to_proc->pid : 0,
2553                                 in_reply_to->to_thread ?
2554                                 in_reply_to->to_thread->pid : 0);
2555                         spin_unlock(&in_reply_to->lock);
2556                         binder_inner_proc_unlock(proc);
2557                         return_error = BR_FAILED_REPLY;
2558                         return_error_param = -EPROTO;
2559                         return_error_line = __LINE__;
2560                         in_reply_to = NULL;
2561                         goto err_bad_call_stack;
2562                 }
2563                 thread->transaction_stack = in_reply_to->to_parent;
2564                 binder_inner_proc_unlock(proc);
2565                 binder_set_nice(in_reply_to->saved_priority);
2566                 target_thread = binder_get_txn_from_and_acq_inner(in_reply_to);
2567                 if (target_thread == NULL) {
2568                         /* annotation for sparse */
2569                         __release(&target_thread->proc->inner_lock);
2570                         return_error = BR_DEAD_REPLY;
2571                         return_error_line = __LINE__;
2572                         goto err_dead_binder;
2573                 }
2574                 if (target_thread->transaction_stack != in_reply_to) {
2575                         binder_user_error("%d:%d got reply transaction with bad target transaction stack %d, expected %d\n",
2576                                 proc->pid, thread->pid,
2577                                 target_thread->transaction_stack ?
2578                                 target_thread->transaction_stack->debug_id : 0,
2579                                 in_reply_to->debug_id);
2580                         binder_inner_proc_unlock(target_thread->proc);
2581                         return_error = BR_FAILED_REPLY;
2582                         return_error_param = -EPROTO;
2583                         return_error_line = __LINE__;
2584                         in_reply_to = NULL;
2585                         target_thread = NULL;
2586                         goto err_dead_binder;
2587                 }
2588                 target_proc = target_thread->proc;
2589                 target_proc->tmp_ref++;
2590                 binder_inner_proc_unlock(target_thread->proc);
2591         } else {
2592                 if (tr->target.handle) {
2593                         struct binder_ref *ref;
2594
2595                         /*
2596                          * There must already be a strong ref
2597                          * on this node. If so, do a strong
2598                          * increment on the node to ensure it
2599                          * stays alive until the transaction is
2600                          * done.
2601                          */
2602                         binder_proc_lock(proc);
2603                         ref = binder_get_ref_olocked(proc, tr->target.handle,
2604                                                      true);
2605                         if (ref) {
2606                                 target_node = binder_get_node_refs_for_txn(
2607                                                 ref->node, &target_proc,
2608                                                 &return_error);
2609                         } else {
2610                                 binder_user_error("%d:%d got transaction to invalid handle, %u\n",
2611                                                   proc->pid, thread->pid, tr->target.handle);
2612                                 return_error = BR_FAILED_REPLY;
2613                         }
2614                         binder_proc_unlock(proc);
2615                 } else {
2616                         mutex_lock(&context->context_mgr_node_lock);
2617                         target_node = context->binder_context_mgr_node;
2618                         if (target_node)
2619                                 target_node = binder_get_node_refs_for_txn(
2620                                                 target_node, &target_proc,
2621                                                 &return_error);
2622                         else
2623                                 return_error = BR_DEAD_REPLY;
2624                         mutex_unlock(&context->context_mgr_node_lock);
2625                         if (target_node && target_proc->pid == proc->pid) {
2626                                 binder_user_error("%d:%d got transaction to context manager from process owning it\n",
2627                                                   proc->pid, thread->pid);
2628                                 return_error = BR_FAILED_REPLY;
2629                                 return_error_param = -EINVAL;
2630                                 return_error_line = __LINE__;
2631                                 goto err_invalid_target_handle;
2632                         }
2633                 }
2634                 if (!target_node) {
2635                         /*
2636                          * return_error is set above
2637                          */
2638                         return_error_param = -EINVAL;
2639                         return_error_line = __LINE__;
2640                         goto err_dead_binder;
2641                 }
2642                 e->to_node = target_node->debug_id;
2643                 if (WARN_ON(proc == target_proc)) {
2644                         return_error = BR_FAILED_REPLY;
2645                         return_error_param = -EINVAL;
2646                         return_error_line = __LINE__;
2647                         goto err_invalid_target_handle;
2648                 }
2649                 if (security_binder_transaction(proc->cred,
2650                                                 target_proc->cred) < 0) {
2651                         return_error = BR_FAILED_REPLY;
2652                         return_error_param = -EPERM;
2653                         return_error_line = __LINE__;
2654                         goto err_invalid_target_handle;
2655                 }
2656                 binder_inner_proc_lock(proc);
2657
2658                 w = list_first_entry_or_null(&thread->todo,
2659                                              struct binder_work, entry);
2660                 if (!(tr->flags & TF_ONE_WAY) && w &&
2661                     w->type == BINDER_WORK_TRANSACTION) {
2662                         /*
2663                          * Do not allow new outgoing transaction from a
2664                          * thread that has a transaction at the head of
2665                          * its todo list. Only need to check the head
2666                          * because binder_select_thread_ilocked picks a
2667                          * thread from proc->waiting_threads to enqueue
2668                          * the transaction, and nothing is queued to the
2669                          * todo list while the thread is on waiting_threads.
2670                          */
2671                         binder_user_error("%d:%d new transaction not allowed when there is a transaction on thread todo\n",
2672                                           proc->pid, thread->pid);
2673                         binder_inner_proc_unlock(proc);
2674                         return_error = BR_FAILED_REPLY;
2675                         return_error_param = -EPROTO;
2676                         return_error_line = __LINE__;
2677                         goto err_bad_todo_list;
2678                 }
2679
2680                 if (!(tr->flags & TF_ONE_WAY) && thread->transaction_stack) {
2681                         struct binder_transaction *tmp;
2682
2683                         tmp = thread->transaction_stack;
2684                         if (tmp->to_thread != thread) {
2685                                 spin_lock(&tmp->lock);
2686                                 binder_user_error("%d:%d got new transaction with bad transaction stack, transaction %d has target %d:%d\n",
2687                                         proc->pid, thread->pid, tmp->debug_id,
2688                                         tmp->to_proc ? tmp->to_proc->pid : 0,
2689                                         tmp->to_thread ?
2690                                         tmp->to_thread->pid : 0);
2691                                 spin_unlock(&tmp->lock);
2692                                 binder_inner_proc_unlock(proc);
2693                                 return_error = BR_FAILED_REPLY;
2694                                 return_error_param = -EPROTO;
2695                                 return_error_line = __LINE__;
2696                                 goto err_bad_call_stack;
2697                         }
2698                         while (tmp) {
2699                                 struct binder_thread *from;
2700
2701                                 spin_lock(&tmp->lock);
2702                                 from = tmp->from;
2703                                 if (from && from->proc == target_proc) {
2704                                         atomic_inc(&from->tmp_ref);
2705                                         target_thread = from;
2706                                         spin_unlock(&tmp->lock);
2707                                         break;
2708                                 }
2709                                 spin_unlock(&tmp->lock);
2710                                 tmp = tmp->from_parent;
2711                         }
2712                 }
2713                 binder_inner_proc_unlock(proc);
2714         }
2715         if (target_thread)
2716                 e->to_thread = target_thread->pid;
2717         e->to_proc = target_proc->pid;
2718
2719         /* TODO: reuse incoming transaction for reply */
2720         t = kzalloc(sizeof(*t), GFP_KERNEL);
2721         if (t == NULL) {
2722                 return_error = BR_FAILED_REPLY;
2723                 return_error_param = -ENOMEM;
2724                 return_error_line = __LINE__;
2725                 goto err_alloc_t_failed;
2726         }
2727         INIT_LIST_HEAD(&t->fd_fixups);
2728         binder_stats_created(BINDER_STAT_TRANSACTION);
2729         spin_lock_init(&t->lock);
2730
2731         tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL);
2732         if (tcomplete == NULL) {
2733                 return_error = BR_FAILED_REPLY;
2734                 return_error_param = -ENOMEM;
2735                 return_error_line = __LINE__;
2736                 goto err_alloc_tcomplete_failed;
2737         }
2738         binder_stats_created(BINDER_STAT_TRANSACTION_COMPLETE);
2739
2740         t->debug_id = t_debug_id;
2741
2742         if (reply)
2743                 binder_debug(BINDER_DEBUG_TRANSACTION,
2744                              "%d:%d BC_REPLY %d -> %d:%d, data %016llx-%016llx size %lld-%lld-%lld\n",
2745                              proc->pid, thread->pid, t->debug_id,
2746                              target_proc->pid, target_thread->pid,
2747                              (u64)tr->data.ptr.buffer,
2748                              (u64)tr->data.ptr.offsets,
2749                              (u64)tr->data_size, (u64)tr->offsets_size,
2750                              (u64)extra_buffers_size);
2751         else
2752                 binder_debug(BINDER_DEBUG_TRANSACTION,
2753                              "%d:%d BC_TRANSACTION %d -> %d - node %d, data %016llx-%016llx size %lld-%lld-%lld\n",
2754                              proc->pid, thread->pid, t->debug_id,
2755                              target_proc->pid, target_node->debug_id,
2756                              (u64)tr->data.ptr.buffer,
2757                              (u64)tr->data.ptr.offsets,
2758                              (u64)tr->data_size, (u64)tr->offsets_size,
2759                              (u64)extra_buffers_size);
2760
2761         if (!reply && !(tr->flags & TF_ONE_WAY))
2762                 t->from = thread;
2763         else
2764                 t->from = NULL;
2765         t->sender_euid = task_euid(proc->tsk);
2766         t->to_proc = target_proc;
2767         t->to_thread = target_thread;
2768         t->code = tr->code;
2769         t->flags = tr->flags;
2770         t->priority = task_nice(current);
2771
2772         if (target_node && target_node->txn_security_ctx) {
2773                 u32 secid;
2774                 size_t added_size;
2775
2776                 security_cred_getsecid(proc->cred, &secid);
2777                 ret = security_secid_to_secctx(secid, &secctx, &secctx_sz);
2778                 if (ret) {
2779                         return_error = BR_FAILED_REPLY;
2780                         return_error_param = ret;
2781                         return_error_line = __LINE__;
2782                         goto err_get_secctx_failed;
2783                 }
2784                 added_size = ALIGN(secctx_sz, sizeof(u64));
2785                 extra_buffers_size += added_size;
2786                 if (extra_buffers_size < added_size) {
2787                         /* integer overflow of extra_buffers_size */
2788                         return_error = BR_FAILED_REPLY;
2789                         return_error_param = -EINVAL;
2790                         return_error_line = __LINE__;
2791                         goto err_bad_extra_size;
2792                 }
2793         }
2794
2795         trace_binder_transaction(reply, t, target_node);
2796
2797         t->buffer = binder_alloc_new_buf(&target_proc->alloc, tr->data_size,
2798                 tr->offsets_size, extra_buffers_size,
2799                 !reply && (t->flags & TF_ONE_WAY), current->tgid);
2800         if (IS_ERR(t->buffer)) {
2801                 /*
2802                  * -ESRCH indicates VMA cleared. The target is dying.
2803                  */
2804                 return_error_param = PTR_ERR(t->buffer);
2805                 return_error = return_error_param == -ESRCH ?
2806                         BR_DEAD_REPLY : BR_FAILED_REPLY;
2807                 return_error_line = __LINE__;
2808                 t->buffer = NULL;
2809                 goto err_binder_alloc_buf_failed;
2810         }
2811         if (secctx) {
2812                 int err;
2813                 size_t buf_offset = ALIGN(tr->data_size, sizeof(void *)) +
2814                                     ALIGN(tr->offsets_size, sizeof(void *)) +
2815                                     ALIGN(extra_buffers_size, sizeof(void *)) -
2816                                     ALIGN(secctx_sz, sizeof(u64));
2817
2818                 t->security_ctx = (uintptr_t)t->buffer->user_data + buf_offset;
2819                 err = binder_alloc_copy_to_buffer(&target_proc->alloc,
2820                                                   t->buffer, buf_offset,
2821                                                   secctx, secctx_sz);
2822                 if (err) {
2823                         t->security_ctx = 0;
2824                         WARN_ON(1);
2825                 }
2826                 security_release_secctx(secctx, secctx_sz);
2827                 secctx = NULL;
2828         }
2829         t->buffer->debug_id = t->debug_id;
2830         t->buffer->transaction = t;
2831         t->buffer->target_node = target_node;
2832         t->buffer->clear_on_free = !!(t->flags & TF_CLEAR_BUF);
2833         trace_binder_transaction_alloc_buf(t->buffer);
2834
2835         if (binder_alloc_copy_user_to_buffer(
2836                                 &target_proc->alloc,
2837                                 t->buffer,
2838                                 ALIGN(tr->data_size, sizeof(void *)),
2839                                 (const void __user *)
2840                                         (uintptr_t)tr->data.ptr.offsets,
2841                                 tr->offsets_size)) {
2842                 binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
2843                                 proc->pid, thread->pid);
2844                 return_error = BR_FAILED_REPLY;
2845                 return_error_param = -EFAULT;
2846                 return_error_line = __LINE__;
2847                 goto err_copy_data_failed;
2848         }
2849         if (!IS_ALIGNED(tr->offsets_size, sizeof(binder_size_t))) {
2850                 binder_user_error("%d:%d got transaction with invalid offsets size, %lld\n",
2851                                 proc->pid, thread->pid, (u64)tr->offsets_size);
2852                 return_error = BR_FAILED_REPLY;
2853                 return_error_param = -EINVAL;
2854                 return_error_line = __LINE__;
2855                 goto err_bad_offset;
2856         }
2857         if (!IS_ALIGNED(extra_buffers_size, sizeof(u64))) {
2858                 binder_user_error("%d:%d got transaction with unaligned buffers size, %lld\n",
2859                                   proc->pid, thread->pid,
2860                                   (u64)extra_buffers_size);
2861                 return_error = BR_FAILED_REPLY;
2862                 return_error_param = -EINVAL;
2863                 return_error_line = __LINE__;
2864                 goto err_bad_offset;
2865         }
2866         off_start_offset = ALIGN(tr->data_size, sizeof(void *));
2867         buffer_offset = off_start_offset;
2868         off_end_offset = off_start_offset + tr->offsets_size;
2869         sg_buf_offset = ALIGN(off_end_offset, sizeof(void *));
2870         sg_buf_end_offset = sg_buf_offset + extra_buffers_size -
2871                 ALIGN(secctx_sz, sizeof(u64));
2872         off_min = 0;
2873         for (buffer_offset = off_start_offset; buffer_offset < off_end_offset;
2874              buffer_offset += sizeof(binder_size_t)) {
2875                 struct binder_object_header *hdr;
2876                 size_t object_size;
2877                 struct binder_object object;
2878                 binder_size_t object_offset;
2879                 binder_size_t copy_size;
2880
2881                 if (binder_alloc_copy_from_buffer(&target_proc->alloc,
2882                                                   &object_offset,
2883                                                   t->buffer,
2884                                                   buffer_offset,
2885                                                   sizeof(object_offset))) {
2886                         return_error = BR_FAILED_REPLY;
2887                         return_error_param = -EINVAL;
2888                         return_error_line = __LINE__;
2889                         goto err_bad_offset;
2890                 }
2891
2892                 /*
2893                  * Copy the source user buffer up to the next object
2894                  * that will be processed.
2895                  */
2896                 copy_size = object_offset - user_offset;
2897                 if (copy_size && (user_offset > object_offset ||
2898                                 binder_alloc_copy_user_to_buffer(
2899                                         &target_proc->alloc,
2900                                         t->buffer, user_offset,
2901                                         user_buffer + user_offset,
2902                                         copy_size))) {
2903                         binder_user_error("%d:%d got transaction with invalid data ptr\n",
2904                                         proc->pid, thread->pid);
2905                         return_error = BR_FAILED_REPLY;
2906                         return_error_param = -EFAULT;
2907                         return_error_line = __LINE__;
2908                         goto err_copy_data_failed;
2909                 }
2910                 object_size = binder_get_object(target_proc, user_buffer,
2911                                 t->buffer, object_offset, &object);
2912                 if (object_size == 0 || object_offset < off_min) {
2913                         binder_user_error("%d:%d got transaction with invalid offset (%lld, min %lld max %lld) or object.\n",
2914                                           proc->pid, thread->pid,
2915                                           (u64)object_offset,
2916                                           (u64)off_min,
2917                                           (u64)t->buffer->data_size);
2918                         return_error = BR_FAILED_REPLY;
2919                         return_error_param = -EINVAL;
2920                         return_error_line = __LINE__;
2921                         goto err_bad_offset;
2922                 }
2923                 /*
2924                  * Set offset to the next buffer fragment to be
2925                  * copied
2926                  */
2927                 user_offset = object_offset + object_size;
2928
2929                 hdr = &object.hdr;
2930                 off_min = object_offset + object_size;
2931                 switch (hdr->type) {
2932                 case BINDER_TYPE_BINDER:
2933                 case BINDER_TYPE_WEAK_BINDER: {
2934                         struct flat_binder_object *fp;
2935
2936                         fp = to_flat_binder_object(hdr);
2937                         ret = binder_translate_binder(fp, t, thread);
2938
2939                         if (ret < 0 ||
2940                             binder_alloc_copy_to_buffer(&target_proc->alloc,
2941                                                         t->buffer,
2942                                                         object_offset,
2943                                                         fp, sizeof(*fp))) {
2944                                 return_error = BR_FAILED_REPLY;
2945                                 return_error_param = ret;
2946                                 return_error_line = __LINE__;
2947                                 goto err_translate_failed;
2948                         }
2949                 } break;
2950                 case BINDER_TYPE_HANDLE:
2951                 case BINDER_TYPE_WEAK_HANDLE: {
2952                         struct flat_binder_object *fp;
2953
2954                         fp = to_flat_binder_object(hdr);
2955                         ret = binder_translate_handle(fp, t, thread);
2956                         if (ret < 0 ||
2957                             binder_alloc_copy_to_buffer(&target_proc->alloc,
2958                                                         t->buffer,
2959                                                         object_offset,
2960                                                         fp, sizeof(*fp))) {
2961                                 return_error = BR_FAILED_REPLY;
2962                                 return_error_param = ret;
2963                                 return_error_line = __LINE__;
2964                                 goto err_translate_failed;
2965                         }
2966                 } break;
2967
2968                 case BINDER_TYPE_FD: {
2969                         struct binder_fd_object *fp = to_binder_fd_object(hdr);
2970                         binder_size_t fd_offset = object_offset +
2971                                 (uintptr_t)&fp->fd - (uintptr_t)fp;
2972                         int ret = binder_translate_fd(fp->fd, fd_offset, t,
2973                                                       thread, in_reply_to);
2974
2975                         fp->pad_binder = 0;
2976                         if (ret < 0 ||
2977                             binder_alloc_copy_to_buffer(&target_proc->alloc,
2978                                                         t->buffer,
2979                                                         object_offset,
2980                                                         fp, sizeof(*fp))) {
2981                                 return_error = BR_FAILED_REPLY;
2982                                 return_error_param = ret;
2983                                 return_error_line = __LINE__;
2984                                 goto err_translate_failed;
2985                         }
2986                 } break;
2987                 case BINDER_TYPE_FDA: {
2988                         struct binder_object ptr_object;
2989                         binder_size_t parent_offset;
2990                         struct binder_fd_array_object *fda =
2991                                 to_binder_fd_array_object(hdr);
2992                         size_t num_valid = (buffer_offset - off_start_offset) /
2993                                                 sizeof(binder_size_t);
2994                         struct binder_buffer_object *parent =
2995                                 binder_validate_ptr(target_proc, t->buffer,
2996                                                     &ptr_object, fda->parent,
2997                                                     off_start_offset,
2998                                                     &parent_offset,
2999                                                     num_valid);
3000                         if (!parent) {
3001                                 binder_user_error("%d:%d got transaction with invalid parent offset or type\n",
3002                                                   proc->pid, thread->pid);
3003                                 return_error = BR_FAILED_REPLY;
3004                                 return_error_param = -EINVAL;
3005                                 return_error_line = __LINE__;
3006                                 goto err_bad_parent;
3007                         }
3008                         if (!binder_validate_fixup(target_proc, t->buffer,
3009                                                    off_start_offset,
3010                                                    parent_offset,
3011                                                    fda->parent_offset,
3012                                                    last_fixup_obj_off,
3013                                                    last_fixup_min_off)) {
3014                                 binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n",
3015                                                   proc->pid, thread->pid);
3016                                 return_error = BR_FAILED_REPLY;
3017                                 return_error_param = -EINVAL;
3018                                 return_error_line = __LINE__;
3019                                 goto err_bad_parent;
3020                         }
3021                         ret = binder_translate_fd_array(fda, parent, t, thread,
3022                                                         in_reply_to);
3023                         if (!ret)
3024                                 ret = binder_alloc_copy_to_buffer(&target_proc->alloc,
3025                                                                   t->buffer,
3026                                                                   object_offset,
3027                                                                   fda, sizeof(*fda));
3028                         if (ret) {
3029                                 return_error = BR_FAILED_REPLY;
3030                                 return_error_param = ret > 0 ? -EINVAL : ret;
3031                                 return_error_line = __LINE__;
3032                                 goto err_translate_failed;
3033                         }
3034                         last_fixup_obj_off = parent_offset;
3035                         last_fixup_min_off =
3036                                 fda->parent_offset + sizeof(u32) * fda->num_fds;
3037                 } break;
3038                 case BINDER_TYPE_PTR: {
3039                         struct binder_buffer_object *bp =
3040                                 to_binder_buffer_object(hdr);
3041                         size_t buf_left = sg_buf_end_offset - sg_buf_offset;
3042                         size_t num_valid;
3043
3044                         if (bp->length > buf_left) {
3045                                 binder_user_error("%d:%d got transaction with too large buffer\n",
3046                                                   proc->pid, thread->pid);
3047                                 return_error = BR_FAILED_REPLY;
3048                                 return_error_param = -EINVAL;
3049                                 return_error_line = __LINE__;
3050                                 goto err_bad_offset;
3051                         }
3052                         if (binder_alloc_copy_user_to_buffer(
3053                                                 &target_proc->alloc,
3054                                                 t->buffer,
3055                                                 sg_buf_offset,
3056                                                 (const void __user *)
3057                                                         (uintptr_t)bp->buffer,
3058                                                 bp->length)) {
3059                                 binder_user_error("%d:%d got transaction with invalid offsets ptr\n",
3060                                                   proc->pid, thread->pid);
3061                                 return_error_param = -EFAULT;
3062                                 return_error = BR_FAILED_REPLY;
3063                                 return_error_line = __LINE__;
3064                                 goto err_copy_data_failed;
3065                         }
3066                         /* Fixup buffer pointer to target proc address space */
3067                         bp->buffer = (uintptr_t)
3068                                 t->buffer->user_data + sg_buf_offset;
3069                         sg_buf_offset += ALIGN(bp->length, sizeof(u64));
3070
3071                         num_valid = (buffer_offset - off_start_offset) /
3072                                         sizeof(binder_size_t);
3073                         ret = binder_fixup_parent(t, thread, bp,
3074                                                   off_start_offset,
3075                                                   num_valid,
3076                                                   last_fixup_obj_off,
3077                                                   last_fixup_min_off);
3078                         if (ret < 0 ||
3079                             binder_alloc_copy_to_buffer(&target_proc->alloc,
3080                                                         t->buffer,
3081                                                         object_offset,
3082                                                         bp, sizeof(*bp))) {
3083                                 return_error = BR_FAILED_REPLY;
3084                                 return_error_param = ret;
3085                                 return_error_line = __LINE__;
3086                                 goto err_translate_failed;
3087                         }
3088                         last_fixup_obj_off = object_offset;
3089                         last_fixup_min_off = 0;
3090                 } break;
3091                 default:
3092                         binder_user_error("%d:%d got transaction with invalid object type, %x\n",
3093                                 proc->pid, thread->pid, hdr->type);
3094                         return_error = BR_FAILED_REPLY;
3095                         return_error_param = -EINVAL;
3096                         return_error_line = __LINE__;
3097                         goto err_bad_object_type;
3098                 }
3099         }
3100         /* Done processing objects, copy the rest of the buffer */
3101         if (binder_alloc_copy_user_to_buffer(
3102                                 &target_proc->alloc,
3103                                 t->buffer, user_offset,
3104                                 user_buffer + user_offset,
3105                                 tr->data_size - user_offset)) {
3106                 binder_user_error("%d:%d got transaction with invalid data ptr\n",
3107                                 proc->pid, thread->pid);
3108                 return_error = BR_FAILED_REPLY;
3109                 return_error_param = -EFAULT;
3110                 return_error_line = __LINE__;
3111                 goto err_copy_data_failed;
3112         }
3113         if (t->buffer->oneway_spam_suspect)
3114                 tcomplete->type = BINDER_WORK_TRANSACTION_ONEWAY_SPAM_SUSPECT;
3115         else
3116                 tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE;
3117         t->work.type = BINDER_WORK_TRANSACTION;
3118
3119         if (reply) {
3120                 binder_enqueue_thread_work(thread, tcomplete);
3121                 binder_inner_proc_lock(target_proc);
3122                 if (target_thread->is_dead) {
3123                         return_error = BR_DEAD_REPLY;
3124                         binder_inner_proc_unlock(target_proc);
3125                         goto err_dead_proc_or_thread;
3126                 }
3127                 BUG_ON(t->buffer->async_transaction != 0);
3128                 binder_pop_transaction_ilocked(target_thread, in_reply_to);
3129                 binder_enqueue_thread_work_ilocked(target_thread, &t->work);
3130                 target_proc->outstanding_txns++;
3131                 binder_inner_proc_unlock(target_proc);
3132                 wake_up_interruptible_sync(&target_thread->wait);
3133                 binder_free_transaction(in_reply_to);
3134         } else if (!(t->flags & TF_ONE_WAY)) {
3135                 BUG_ON(t->buffer->async_transaction != 0);
3136                 binder_inner_proc_lock(proc);
3137                 /*
3138                  * Defer the TRANSACTION_COMPLETE, so we don't return to
3139                  * userspace immediately; this allows the target process to
3140                  * immediately start processing this transaction, reducing
3141                  * latency. We will then return the TRANSACTION_COMPLETE when
3142                  * the target replies (or there is an error).
3143                  */
3144                 binder_enqueue_deferred_thread_work_ilocked(thread, tcomplete);
3145                 t->need_reply = 1;
3146                 t->from_parent = thread->transaction_stack;
3147                 thread->transaction_stack = t;
3148                 binder_inner_proc_unlock(proc);
3149                 return_error = binder_proc_transaction(t,
3150                                 target_proc, target_thread);
3151                 if (return_error) {
3152                         binder_inner_proc_lock(proc);
3153                         binder_pop_transaction_ilocked(thread, t);
3154                         binder_inner_proc_unlock(proc);
3155                         goto err_dead_proc_or_thread;
3156                 }
3157         } else {
3158                 BUG_ON(target_node == NULL);
3159                 BUG_ON(t->buffer->async_transaction != 1);
3160                 binder_enqueue_thread_work(thread, tcomplete);
3161                 return_error = binder_proc_transaction(t, target_proc, NULL);
3162                 if (return_error)
3163                         goto err_dead_proc_or_thread;
3164         }
3165         if (target_thread)
3166                 binder_thread_dec_tmpref(target_thread);
3167         binder_proc_dec_tmpref(target_proc);
3168         if (target_node)
3169                 binder_dec_node_tmpref(target_node);
3170         /*
3171          * write barrier to synchronize with initialization
3172          * of log entry
3173          */
3174         smp_wmb();
3175         WRITE_ONCE(e->debug_id_done, t_debug_id);
3176         return;
3177
3178 err_dead_proc_or_thread:
3179         return_error_line = __LINE__;
3180         binder_dequeue_work(proc, tcomplete);
3181 err_translate_failed:
3182 err_bad_object_type:
3183 err_bad_offset:
3184 err_bad_parent:
3185 err_copy_data_failed:
3186         binder_free_txn_fixups(t);
3187         trace_binder_transaction_failed_buffer_release(t->buffer);
3188         binder_transaction_buffer_release(target_proc, NULL, t->buffer,
3189                                           buffer_offset, true);
3190         if (target_node)
3191                 binder_dec_node_tmpref(target_node);
3192         target_node = NULL;
3193         t->buffer->transaction = NULL;
3194         binder_alloc_free_buf(&target_proc->alloc, t->buffer);
3195 err_binder_alloc_buf_failed:
3196 err_bad_extra_size:
3197         if (secctx)
3198                 security_release_secctx(secctx, secctx_sz);
3199 err_get_secctx_failed:
3200         kfree(tcomplete);
3201         binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
3202 err_alloc_tcomplete_failed:
3203         if (trace_binder_txn_latency_free_enabled())
3204                 binder_txn_latency_free(t);
3205         kfree(t);
3206         binder_stats_deleted(BINDER_STAT_TRANSACTION);
3207 err_alloc_t_failed:
3208 err_bad_todo_list:
3209 err_bad_call_stack:
3210 err_empty_call_stack:
3211 err_dead_binder:
3212 err_invalid_target_handle:
3213         if (target_thread)
3214                 binder_thread_dec_tmpref(target_thread);
3215         if (target_proc)
3216                 binder_proc_dec_tmpref(target_proc);
3217         if (target_node) {
3218                 binder_dec_node(target_node, 1, 0);
3219                 binder_dec_node_tmpref(target_node);
3220         }
3221
3222         binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
3223                      "%d:%d transaction failed %d/%d, size %lld-%lld line %d\n",
3224                      proc->pid, thread->pid, return_error, return_error_param,
3225                      (u64)tr->data_size, (u64)tr->offsets_size,
3226                      return_error_line);
3227
3228         {
3229                 struct binder_transaction_log_entry *fe;
3230
3231                 e->return_error = return_error;
3232                 e->return_error_param = return_error_param;
3233                 e->return_error_line = return_error_line;
3234                 fe = binder_transaction_log_add(&binder_transaction_log_failed);
3235                 *fe = *e;
3236                 /*
3237                  * write barrier to synchronize with initialization
3238                  * of log entry
3239                  */
3240                 smp_wmb();
3241                 WRITE_ONCE(e->debug_id_done, t_debug_id);
3242                 WRITE_ONCE(fe->debug_id_done, t_debug_id);
3243         }
3244
3245         BUG_ON(thread->return_error.cmd != BR_OK);
3246         if (in_reply_to) {
3247                 thread->return_error.cmd = BR_TRANSACTION_COMPLETE;
3248                 binder_enqueue_thread_work(thread, &thread->return_error.work);
3249                 binder_send_failed_reply(in_reply_to, return_error);
3250         } else {
3251                 thread->return_error.cmd = return_error;
3252                 binder_enqueue_thread_work(thread, &thread->return_error.work);
3253         }
3254 }
3255
3256 /**
3257  * binder_free_buf() - free the specified buffer
3258  * @proc:       binder proc that owns buffer
3259  * @buffer:     buffer to be freed
3260  * @is_failure: failed to send transaction
3261  *
3262  * If buffer for an async transaction, enqueue the next async
3263  * transaction from the node.
3264  *
3265  * Cleanup buffer and free it.
3266  */
3267 static void
3268 binder_free_buf(struct binder_proc *proc,
3269                 struct binder_thread *thread,
3270                 struct binder_buffer *buffer, bool is_failure)
3271 {
3272         binder_inner_proc_lock(proc);
3273         if (buffer->transaction) {
3274                 buffer->transaction->buffer = NULL;
3275                 buffer->transaction = NULL;
3276         }
3277         binder_inner_proc_unlock(proc);
3278         if (buffer->async_transaction && buffer->target_node) {
3279                 struct binder_node *buf_node;
3280                 struct binder_work *w;
3281
3282                 buf_node = buffer->target_node;
3283                 binder_node_inner_lock(buf_node);
3284                 BUG_ON(!buf_node->has_async_transaction);
3285                 BUG_ON(buf_node->proc != proc);
3286                 w = binder_dequeue_work_head_ilocked(
3287                                 &buf_node->async_todo);
3288                 if (!w) {
3289                         buf_node->has_async_transaction = false;
3290                 } else {
3291                         binder_enqueue_work_ilocked(
3292                                         w, &proc->todo);
3293                         binder_wakeup_proc_ilocked(proc);
3294                 }
3295                 binder_node_inner_unlock(buf_node);
3296         }
3297         trace_binder_transaction_buffer_release(buffer);
3298         binder_transaction_buffer_release(proc, thread, buffer, 0, is_failure);
3299         binder_alloc_free_buf(&proc->alloc, buffer);
3300 }
3301
3302 static int binder_thread_write(struct binder_proc *proc,
3303                         struct binder_thread *thread,
3304                         binder_uintptr_t binder_buffer, size_t size,
3305                         binder_size_t *consumed)
3306 {
3307         uint32_t cmd;
3308         struct binder_context *context = proc->context;
3309         void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
3310         void __user *ptr = buffer + *consumed;
3311         void __user *end = buffer + size;
3312
3313         while (ptr < end && thread->return_error.cmd == BR_OK) {
3314                 int ret;
3315
3316                 if (get_user(cmd, (uint32_t __user *)ptr))
3317                         return -EFAULT;
3318                 ptr += sizeof(uint32_t);
3319                 trace_binder_command(cmd);
3320                 if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.bc)) {
3321                         atomic_inc(&binder_stats.bc[_IOC_NR(cmd)]);
3322                         atomic_inc(&proc->stats.bc[_IOC_NR(cmd)]);
3323                         atomic_inc(&thread->stats.bc[_IOC_NR(cmd)]);
3324                 }
3325                 switch (cmd) {
3326                 case BC_INCREFS:
3327                 case BC_ACQUIRE:
3328                 case BC_RELEASE:
3329                 case BC_DECREFS: {
3330                         uint32_t target;
3331                         const char *debug_string;
3332                         bool strong = cmd == BC_ACQUIRE || cmd == BC_RELEASE;
3333                         bool increment = cmd == BC_INCREFS || cmd == BC_ACQUIRE;
3334                         struct binder_ref_data rdata;
3335
3336                         if (get_user(target, (uint32_t __user *)ptr))
3337                                 return -EFAULT;
3338
3339                         ptr += sizeof(uint32_t);
3340                         ret = -1;
3341                         if (increment && !target) {
3342                                 struct binder_node *ctx_mgr_node;
3343
3344                                 mutex_lock(&context->context_mgr_node_lock);
3345                                 ctx_mgr_node = context->binder_context_mgr_node;
3346                                 if (ctx_mgr_node) {
3347                                         if (ctx_mgr_node->proc == proc) {
3348                                                 binder_user_error("%d:%d context manager tried to acquire desc 0\n",
3349                                                                   proc->pid, thread->pid);
3350                                                 mutex_unlock(&context->context_mgr_node_lock);
3351                                                 return -EINVAL;
3352                                         }
3353                                         ret = binder_inc_ref_for_node(
3354                                                         proc, ctx_mgr_node,
3355                                                         strong, NULL, &rdata);
3356                                 }
3357                                 mutex_unlock(&context->context_mgr_node_lock);
3358                         }
3359                         if (ret)
3360                                 ret = binder_update_ref_for_handle(
3361                                                 proc, target, increment, strong,
3362                                                 &rdata);
3363                         if (!ret && rdata.desc != target) {
3364                                 binder_user_error("%d:%d tried to acquire reference to desc %d, got %d instead\n",
3365                                         proc->pid, thread->pid,
3366                                         target, rdata.desc);
3367                         }
3368                         switch (cmd) {
3369                         case BC_INCREFS:
3370                                 debug_string = "IncRefs";
3371                                 break;
3372                         case BC_ACQUIRE:
3373                                 debug_string = "Acquire";
3374                                 break;
3375                         case BC_RELEASE:
3376                                 debug_string = "Release";
3377                                 break;
3378                         case BC_DECREFS:
3379                         default:
3380                                 debug_string = "DecRefs";
3381                                 break;
3382                         }
3383                         if (ret) {
3384                                 binder_user_error("%d:%d %s %d refcount change on invalid ref %d ret %d\n",
3385                                         proc->pid, thread->pid, debug_string,
3386                                         strong, target, ret);
3387                                 break;
3388                         }
3389                         binder_debug(BINDER_DEBUG_USER_REFS,
3390                                      "%d:%d %s ref %d desc %d s %d w %d\n",
3391                                      proc->pid, thread->pid, debug_string,
3392                                      rdata.debug_id, rdata.desc, rdata.strong,
3393                                      rdata.weak);
3394                         break;
3395                 }
3396                 case BC_INCREFS_DONE:
3397                 case BC_ACQUIRE_DONE: {
3398                         binder_uintptr_t node_ptr;
3399                         binder_uintptr_t cookie;
3400                         struct binder_node *node;
3401                         bool free_node;
3402
3403                         if (get_user(node_ptr, (binder_uintptr_t __user *)ptr))
3404                                 return -EFAULT;
3405                         ptr += sizeof(binder_uintptr_t);
3406                         if (get_user(cookie, (binder_uintptr_t __user *)ptr))
3407                                 return -EFAULT;
3408                         ptr += sizeof(binder_uintptr_t);
3409                         node = binder_get_node(proc, node_ptr);
3410                         if (node == NULL) {
3411                                 binder_user_error("%d:%d %s u%016llx no match\n",
3412                                         proc->pid, thread->pid,
3413                                         cmd == BC_INCREFS_DONE ?
3414                                         "BC_INCREFS_DONE" :
3415                                         "BC_ACQUIRE_DONE",
3416                                         (u64)node_ptr);
3417                                 break;
3418                         }
3419                         if (cookie != node->cookie) {
3420                                 binder_user_error("%d:%d %s u%016llx node %d cookie mismatch %016llx != %016llx\n",
3421                                         proc->pid, thread->pid,
3422                                         cmd == BC_INCREFS_DONE ?
3423                                         "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
3424                                         (u64)node_ptr, node->debug_id,
3425                                         (u64)cookie, (u64)node->cookie);
3426                                 binder_put_node(node);
3427                                 break;
3428                         }
3429                         binder_node_inner_lock(node);
3430                         if (cmd == BC_ACQUIRE_DONE) {
3431                                 if (node->pending_strong_ref == 0) {
3432                                         binder_user_error("%d:%d BC_ACQUIRE_DONE node %d has no pending acquire request\n",
3433                                                 proc->pid, thread->pid,
3434                                                 node->debug_id);
3435                                         binder_node_inner_unlock(node);
3436                                         binder_put_node(node);
3437                                         break;
3438                                 }
3439                                 node->pending_strong_ref = 0;
3440                         } else {
3441                                 if (node->pending_weak_ref == 0) {
3442                                         binder_user_error("%d:%d BC_INCREFS_DONE node %d has no pending increfs request\n",
3443                                                 proc->pid, thread->pid,
3444                                                 node->debug_id);
3445                                         binder_node_inner_unlock(node);
3446                                         binder_put_node(node);
3447                                         break;
3448                                 }
3449                                 node->pending_weak_ref = 0;
3450                         }
3451                         free_node = binder_dec_node_nilocked(node,
3452                                         cmd == BC_ACQUIRE_DONE, 0);
3453                         WARN_ON(free_node);
3454                         binder_debug(BINDER_DEBUG_USER_REFS,
3455                                      "%d:%d %s node %d ls %d lw %d tr %d\n",
3456                                      proc->pid, thread->pid,
3457                                      cmd == BC_INCREFS_DONE ? "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
3458                                      node->debug_id, node->local_strong_refs,
3459                                      node->local_weak_refs, node->tmp_refs);
3460                         binder_node_inner_unlock(node);
3461                         binder_put_node(node);
3462                         break;
3463                 }
3464                 case BC_ATTEMPT_ACQUIRE:
3465                         pr_err("BC_ATTEMPT_ACQUIRE not supported\n");
3466                         return -EINVAL;
3467                 case BC_ACQUIRE_RESULT:
3468                         pr_err("BC_ACQUIRE_RESULT not supported\n");
3469                         return -EINVAL;
3470
3471                 case BC_FREE_BUFFER: {
3472                         binder_uintptr_t data_ptr;
3473                         struct binder_buffer *buffer;
3474
3475                         if (get_user(data_ptr, (binder_uintptr_t __user *)ptr))
3476                                 return -EFAULT;
3477                         ptr += sizeof(binder_uintptr_t);
3478
3479                         buffer = binder_alloc_prepare_to_free(&proc->alloc,
3480                                                               data_ptr);
3481                         if (IS_ERR_OR_NULL(buffer)) {
3482                                 if (PTR_ERR(buffer) == -EPERM) {
3483                                         binder_user_error(
3484                                                 "%d:%d BC_FREE_BUFFER u%016llx matched unreturned or currently freeing buffer\n",
3485                                                 proc->pid, thread->pid,
3486                                                 (u64)data_ptr);
3487                                 } else {
3488                                         binder_user_error(
3489                                                 "%d:%d BC_FREE_BUFFER u%016llx no match\n",
3490                                                 proc->pid, thread->pid,
3491                                                 (u64)data_ptr);
3492                                 }
3493                                 break;
3494                         }
3495                         binder_debug(BINDER_DEBUG_FREE_BUFFER,
3496                                      "%d:%d BC_FREE_BUFFER u%016llx found buffer %d for %s transaction\n",
3497                                      proc->pid, thread->pid, (u64)data_ptr,
3498                                      buffer->debug_id,
3499                                      buffer->transaction ? "active" : "finished");
3500                         binder_free_buf(proc, thread, buffer, false);
3501                         break;
3502                 }
3503
3504                 case BC_TRANSACTION_SG:
3505                 case BC_REPLY_SG: {
3506                         struct binder_transaction_data_sg tr;
3507
3508                         if (copy_from_user(&tr, ptr, sizeof(tr)))
3509                                 return -EFAULT;
3510                         ptr += sizeof(tr);
3511                         binder_transaction(proc, thread, &tr.transaction_data,
3512                                            cmd == BC_REPLY_SG, tr.buffers_size);
3513                         break;
3514                 }
3515                 case BC_TRANSACTION:
3516                 case BC_REPLY: {
3517                         struct binder_transaction_data tr;
3518
3519                         if (copy_from_user(&tr, ptr, sizeof(tr)))
3520                                 return -EFAULT;
3521                         ptr += sizeof(tr);
3522                         binder_transaction(proc, thread, &tr,
3523                                            cmd == BC_REPLY, 0);
3524                         break;
3525                 }
3526
3527                 case BC_REGISTER_LOOPER:
3528                         binder_debug(BINDER_DEBUG_THREADS,
3529                                      "%d:%d BC_REGISTER_LOOPER\n",
3530                                      proc->pid, thread->pid);
3531                         binder_inner_proc_lock(proc);
3532                         if (thread->looper & BINDER_LOOPER_STATE_ENTERED) {
3533                                 thread->looper |= BINDER_LOOPER_STATE_INVALID;
3534                                 binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called after BC_ENTER_LOOPER\n",
3535                                         proc->pid, thread->pid);
3536                         } else if (proc->requested_threads == 0) {
3537                                 thread->looper |= BINDER_LOOPER_STATE_INVALID;
3538                                 binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called without request\n",
3539                                         proc->pid, thread->pid);
3540                         } else {
3541                                 proc->requested_threads--;
3542                                 proc->requested_threads_started++;
3543                         }
3544                         thread->looper |= BINDER_LOOPER_STATE_REGISTERED;
3545                         binder_inner_proc_unlock(proc);
3546                         break;
3547                 case BC_ENTER_LOOPER:
3548                         binder_debug(BINDER_DEBUG_THREADS,
3549                                      "%d:%d BC_ENTER_LOOPER\n",
3550                                      proc->pid, thread->pid);
3551                         if (thread->looper & BINDER_LOOPER_STATE_REGISTERED) {
3552                                 thread->looper |= BINDER_LOOPER_STATE_INVALID;
3553                                 binder_user_error("%d:%d ERROR: BC_ENTER_LOOPER called after BC_REGISTER_LOOPER\n",
3554                                         proc->pid, thread->pid);
3555                         }
3556                         thread->looper |= BINDER_LOOPER_STATE_ENTERED;
3557                         break;
3558                 case BC_EXIT_LOOPER:
3559                         binder_debug(BINDER_DEBUG_THREADS,
3560                                      "%d:%d BC_EXIT_LOOPER\n",
3561                                      proc->pid, thread->pid);
3562                         thread->looper |= BINDER_LOOPER_STATE_EXITED;
3563                         break;
3564
3565                 case BC_REQUEST_DEATH_NOTIFICATION:
3566                 case BC_CLEAR_DEATH_NOTIFICATION: {
3567                         uint32_t target;
3568                         binder_uintptr_t cookie;
3569                         struct binder_ref *ref;
3570                         struct binder_ref_death *death = NULL;
3571
3572                         if (get_user(target, (uint32_t __user *)ptr))
3573                                 return -EFAULT;
3574                         ptr += sizeof(uint32_t);
3575                         if (get_user(cookie, (binder_uintptr_t __user *)ptr))
3576                                 return -EFAULT;
3577                         ptr += sizeof(binder_uintptr_t);
3578                         if (cmd == BC_REQUEST_DEATH_NOTIFICATION) {
3579                                 /*
3580                                  * Allocate memory for death notification
3581                                  * before taking lock
3582                                  */
3583                                 death = kzalloc(sizeof(*death), GFP_KERNEL);
3584                                 if (death == NULL) {
3585                                         WARN_ON(thread->return_error.cmd !=
3586                                                 BR_OK);
3587                                         thread->return_error.cmd = BR_ERROR;
3588                                         binder_enqueue_thread_work(
3589                                                 thread,
3590                                                 &thread->return_error.work);
3591                                         binder_debug(
3592                                                 BINDER_DEBUG_FAILED_TRANSACTION,
3593                                                 "%d:%d BC_REQUEST_DEATH_NOTIFICATION failed\n",
3594                                                 proc->pid, thread->pid);
3595                                         break;
3596                                 }
3597                         }
3598                         binder_proc_lock(proc);
3599                         ref = binder_get_ref_olocked(proc, target, false);
3600                         if (ref == NULL) {
3601                                 binder_user_error("%d:%d %s invalid ref %d\n",
3602                                         proc->pid, thread->pid,
3603                                         cmd == BC_REQUEST_DEATH_NOTIFICATION ?
3604                                         "BC_REQUEST_DEATH_NOTIFICATION" :
3605                                         "BC_CLEAR_DEATH_NOTIFICATION",
3606                                         target);
3607                                 binder_proc_unlock(proc);
3608                                 kfree(death);
3609                                 break;
3610                         }
3611
3612                         binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
3613                                      "%d:%d %s %016llx ref %d desc %d s %d w %d for node %d\n",
3614                                      proc->pid, thread->pid,
3615                                      cmd == BC_REQUEST_DEATH_NOTIFICATION ?
3616                                      "BC_REQUEST_DEATH_NOTIFICATION" :
3617                                      "BC_CLEAR_DEATH_NOTIFICATION",
3618                                      (u64)cookie, ref->data.debug_id,
3619                                      ref->data.desc, ref->data.strong,
3620                                      ref->data.weak, ref->node->debug_id);
3621
3622                         binder_node_lock(ref->node);
3623                         if (cmd == BC_REQUEST_DEATH_NOTIFICATION) {
3624                                 if (ref->death) {
3625                                         binder_user_error("%d:%d BC_REQUEST_DEATH_NOTIFICATION death notification already set\n",
3626                                                 proc->pid, thread->pid);
3627                                         binder_node_unlock(ref->node);
3628                                         binder_proc_unlock(proc);
3629                                         kfree(death);
3630                                         break;
3631                                 }
3632                                 binder_stats_created(BINDER_STAT_DEATH);
3633                                 INIT_LIST_HEAD(&death->work.entry);
3634                                 death->cookie = cookie;
3635                                 ref->death = death;
3636                                 if (ref->node->proc == NULL) {
3637                                         ref->death->work.type = BINDER_WORK_DEAD_BINDER;
3638
3639                                         binder_inner_proc_lock(proc);
3640                                         binder_enqueue_work_ilocked(
3641                                                 &ref->death->work, &proc->todo);
3642                                         binder_wakeup_proc_ilocked(proc);
3643                                         binder_inner_proc_unlock(proc);
3644                                 }
3645                         } else {
3646                                 if (ref->death == NULL) {
3647                                         binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification not active\n",
3648                                                 proc->pid, thread->pid);
3649                                         binder_node_unlock(ref->node);
3650                                         binder_proc_unlock(proc);
3651                                         break;
3652                                 }
3653                                 death = ref->death;
3654                                 if (death->cookie != cookie) {
3655                                         binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification cookie mismatch %016llx != %016llx\n",
3656                                                 proc->pid, thread->pid,
3657                                                 (u64)death->cookie,
3658                                                 (u64)cookie);
3659                                         binder_node_unlock(ref->node);
3660                                         binder_proc_unlock(proc);
3661                                         break;
3662                                 }
3663                                 ref->death = NULL;
3664                                 binder_inner_proc_lock(proc);
3665                                 if (list_empty(&death->work.entry)) {
3666                                         death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
3667                                         if (thread->looper &
3668                                             (BINDER_LOOPER_STATE_REGISTERED |
3669                                              BINDER_LOOPER_STATE_ENTERED))
3670                                                 binder_enqueue_thread_work_ilocked(
3671                                                                 thread,
3672                                                                 &death->work);
3673                                         else {
3674                                                 binder_enqueue_work_ilocked(
3675                                                                 &death->work,
3676                                                                 &proc->todo);
3677                                                 binder_wakeup_proc_ilocked(
3678                                                                 proc);
3679                                         }
3680                                 } else {
3681                                         BUG_ON(death->work.type != BINDER_WORK_DEAD_BINDER);
3682                                         death->work.type = BINDER_WORK_DEAD_BINDER_AND_CLEAR;
3683                                 }
3684                                 binder_inner_proc_unlock(proc);
3685                         }
3686                         binder_node_unlock(ref->node);
3687                         binder_proc_unlock(proc);
3688                 } break;
3689                 case BC_DEAD_BINDER_DONE: {
3690                         struct binder_work *w;
3691                         binder_uintptr_t cookie;
3692                         struct binder_ref_death *death = NULL;
3693
3694                         if (get_user(cookie, (binder_uintptr_t __user *)ptr))
3695                                 return -EFAULT;
3696
3697                         ptr += sizeof(cookie);
3698                         binder_inner_proc_lock(proc);
3699                         list_for_each_entry(w, &proc->delivered_death,
3700                                             entry) {
3701                                 struct binder_ref_death *tmp_death =
3702                                         container_of(w,
3703                                                      struct binder_ref_death,
3704                                                      work);
3705
3706                                 if (tmp_death->cookie == cookie) {
3707                                         death = tmp_death;
3708                                         break;
3709                                 }
3710                         }
3711                         binder_debug(BINDER_DEBUG_DEAD_BINDER,
3712                                      "%d:%d BC_DEAD_BINDER_DONE %016llx found %pK\n",
3713                                      proc->pid, thread->pid, (u64)cookie,
3714                                      death);
3715                         if (death == NULL) {
3716                                 binder_user_error("%d:%d BC_DEAD_BINDER_DONE %016llx not found\n",
3717                                         proc->pid, thread->pid, (u64)cookie);
3718                                 binder_inner_proc_unlock(proc);
3719                                 break;
3720                         }
3721                         binder_dequeue_work_ilocked(&death->work);
3722                         if (death->work.type == BINDER_WORK_DEAD_BINDER_AND_CLEAR) {
3723                                 death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION;
3724                                 if (thread->looper &
3725                                         (BINDER_LOOPER_STATE_REGISTERED |
3726                                          BINDER_LOOPER_STATE_ENTERED))
3727                                         binder_enqueue_thread_work_ilocked(
3728                                                 thread, &death->work);
3729                                 else {
3730                                         binder_enqueue_work_ilocked(
3731                                                         &death->work,
3732                                                         &proc->todo);
3733                                         binder_wakeup_proc_ilocked(proc);
3734                                 }
3735                         }
3736                         binder_inner_proc_unlock(proc);
3737                 } break;
3738
3739                 default:
3740                         pr_err("%d:%d unknown command %d\n",
3741                                proc->pid, thread->pid, cmd);
3742                         return -EINVAL;
3743                 }
3744                 *consumed = ptr - buffer;
3745         }
3746         return 0;
3747 }
3748
3749 static void binder_stat_br(struct binder_proc *proc,
3750                            struct binder_thread *thread, uint32_t cmd)
3751 {
3752         trace_binder_return(cmd);
3753         if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.br)) {
3754                 atomic_inc(&binder_stats.br[_IOC_NR(cmd)]);
3755                 atomic_inc(&proc->stats.br[_IOC_NR(cmd)]);
3756                 atomic_inc(&thread->stats.br[_IOC_NR(cmd)]);
3757         }
3758 }
3759
3760 static int binder_put_node_cmd(struct binder_proc *proc,
3761                                struct binder_thread *thread,
3762                                void __user **ptrp,
3763                                binder_uintptr_t node_ptr,
3764                                binder_uintptr_t node_cookie,
3765                                int node_debug_id,
3766                                uint32_t cmd, const char *cmd_name)
3767 {
3768         void __user *ptr = *ptrp;
3769
3770         if (put_user(cmd, (uint32_t __user *)ptr))
3771                 return -EFAULT;
3772         ptr += sizeof(uint32_t);
3773
3774         if (put_user(node_ptr, (binder_uintptr_t __user *)ptr))
3775                 return -EFAULT;
3776         ptr += sizeof(binder_uintptr_t);
3777
3778         if (put_user(node_cookie, (binder_uintptr_t __user *)ptr))
3779                 return -EFAULT;
3780         ptr += sizeof(binder_uintptr_t);
3781
3782         binder_stat_br(proc, thread, cmd);
3783         binder_debug(BINDER_DEBUG_USER_REFS, "%d:%d %s %d u%016llx c%016llx\n",
3784                      proc->pid, thread->pid, cmd_name, node_debug_id,
3785                      (u64)node_ptr, (u64)node_cookie);
3786
3787         *ptrp = ptr;
3788         return 0;
3789 }
3790
3791 static int binder_wait_for_work(struct binder_thread *thread,
3792                                 bool do_proc_work)
3793 {
3794         DEFINE_WAIT(wait);
3795         struct binder_proc *proc = thread->proc;
3796         int ret = 0;
3797
3798         freezer_do_not_count();
3799         binder_inner_proc_lock(proc);
3800         for (;;) {
3801                 prepare_to_wait(&thread->wait, &wait, TASK_INTERRUPTIBLE);
3802                 if (binder_has_work_ilocked(thread, do_proc_work))
3803                         break;
3804                 if (do_proc_work)
3805                         list_add(&thread->waiting_thread_node,
3806                                  &proc->waiting_threads);
3807                 binder_inner_proc_unlock(proc);
3808                 schedule();
3809                 binder_inner_proc_lock(proc);
3810                 list_del_init(&thread->waiting_thread_node);
3811                 if (signal_pending(current)) {
3812                         ret = -EINTR;
3813                         break;
3814                 }
3815         }
3816         finish_wait(&thread->wait, &wait);
3817         binder_inner_proc_unlock(proc);
3818         freezer_count();
3819
3820         return ret;
3821 }
3822
3823 /**
3824  * binder_apply_fd_fixups() - finish fd translation
3825  * @proc:         binder_proc associated @t->buffer
3826  * @t:  binder transaction with list of fd fixups
3827  *
3828  * Now that we are in the context of the transaction target
3829  * process, we can allocate and install fds. Process the
3830  * list of fds to translate and fixup the buffer with the
3831  * new fds.
3832  *
3833  * If we fail to allocate an fd, then free the resources by
3834  * fput'ing files that have not been processed and ksys_close'ing
3835  * any fds that have already been allocated.
3836  */
3837 static int binder_apply_fd_fixups(struct binder_proc *proc,
3838                                   struct binder_transaction *t)
3839 {
3840         struct binder_txn_fd_fixup *fixup, *tmp;
3841         int ret = 0;
3842
3843         list_for_each_entry(fixup, &t->fd_fixups, fixup_entry) {
3844                 int fd = get_unused_fd_flags(O_CLOEXEC);
3845
3846                 if (fd < 0) {
3847                         binder_debug(BINDER_DEBUG_TRANSACTION,
3848                                      "failed fd fixup txn %d fd %d\n",
3849                                      t->debug_id, fd);
3850                         ret = -ENOMEM;
3851                         break;
3852                 }
3853                 binder_debug(BINDER_DEBUG_TRANSACTION,
3854                              "fd fixup txn %d fd %d\n",
3855                              t->debug_id, fd);
3856                 trace_binder_transaction_fd_recv(t, fd, fixup->offset);
3857                 fd_install(fd, fixup->file);
3858                 fixup->file = NULL;
3859                 if (binder_alloc_copy_to_buffer(&proc->alloc, t->buffer,
3860                                                 fixup->offset, &fd,
3861                                                 sizeof(u32))) {
3862                         ret = -EINVAL;
3863                         break;
3864                 }
3865         }
3866         list_for_each_entry_safe(fixup, tmp, &t->fd_fixups, fixup_entry) {
3867                 if (fixup->file) {
3868                         fput(fixup->file);
3869                 } else if (ret) {
3870                         u32 fd;
3871                         int err;
3872
3873                         err = binder_alloc_copy_from_buffer(&proc->alloc, &fd,
3874                                                             t->buffer,
3875                                                             fixup->offset,
3876                                                             sizeof(fd));
3877                         WARN_ON(err);
3878                         if (!err)
3879                                 binder_deferred_fd_close(fd);
3880                 }
3881                 list_del(&fixup->fixup_entry);
3882                 kfree(fixup);
3883         }
3884
3885         return ret;
3886 }
3887
3888 static int binder_thread_read(struct binder_proc *proc,
3889                               struct binder_thread *thread,
3890                               binder_uintptr_t binder_buffer, size_t size,
3891                               binder_size_t *consumed, int non_block)
3892 {
3893         void __user *buffer = (void __user *)(uintptr_t)binder_buffer;
3894         void __user *ptr = buffer + *consumed;
3895         void __user *end = buffer + size;
3896
3897         int ret = 0;
3898         int wait_for_proc_work;
3899
3900         if (*consumed == 0) {
3901                 if (put_user(BR_NOOP, (uint32_t __user *)ptr))
3902                         return -EFAULT;
3903                 ptr += sizeof(uint32_t);
3904         }
3905
3906 retry:
3907         binder_inner_proc_lock(proc);
3908         wait_for_proc_work = binder_available_for_proc_work_ilocked(thread);
3909         binder_inner_proc_unlock(proc);
3910
3911         thread->looper |= BINDER_LOOPER_STATE_WAITING;
3912
3913         trace_binder_wait_for_work(wait_for_proc_work,
3914                                    !!thread->transaction_stack,
3915                                    !binder_worklist_empty(proc, &thread->todo));
3916         if (wait_for_proc_work) {
3917                 if (!(thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
3918                                         BINDER_LOOPER_STATE_ENTERED))) {
3919                         binder_user_error("%d:%d ERROR: Thread waiting for process work before calling BC_REGISTER_LOOPER or BC_ENTER_LOOPER (state %x)\n",
3920                                 proc->pid, thread->pid, thread->looper);
3921                         wait_event_interruptible(binder_user_error_wait,
3922                                                  binder_stop_on_user_error < 2);
3923                 }
3924                 binder_set_nice(proc->default_priority);
3925         }
3926
3927         if (non_block) {
3928                 if (!binder_has_work(thread, wait_for_proc_work))
3929                         ret = -EAGAIN;
3930         } else {
3931                 ret = binder_wait_for_work(thread, wait_for_proc_work);
3932         }
3933
3934         thread->looper &= ~BINDER_LOOPER_STATE_WAITING;
3935
3936         if (ret)
3937                 return ret;
3938
3939         while (1) {
3940                 uint32_t cmd;
3941                 struct binder_transaction_data_secctx tr;
3942                 struct binder_transaction_data *trd = &tr.transaction_data;
3943                 struct binder_work *w = NULL;
3944                 struct list_head *list = NULL;
3945                 struct binder_transaction *t = NULL;
3946                 struct binder_thread *t_from;
3947                 size_t trsize = sizeof(*trd);
3948
3949                 binder_inner_proc_lock(proc);
3950                 if (!binder_worklist_empty_ilocked(&thread->todo))
3951                         list = &thread->todo;
3952                 else if (!binder_worklist_empty_ilocked(&proc->todo) &&
3953                            wait_for_proc_work)
3954                         list = &proc->todo;
3955                 else {
3956                         binder_inner_proc_unlock(proc);
3957
3958                         /* no data added */
3959                         if (ptr - buffer == 4 && !thread->looper_need_return)
3960                                 goto retry;
3961                         break;
3962                 }
3963
3964                 if (end - ptr < sizeof(tr) + 4) {
3965                         binder_inner_proc_unlock(proc);
3966                         break;
3967                 }
3968                 w = binder_dequeue_work_head_ilocked(list);
3969                 if (binder_worklist_empty_ilocked(&thread->todo))
3970                         thread->process_todo = false;
3971
3972                 switch (w->type) {
3973                 case BINDER_WORK_TRANSACTION: {
3974                         binder_inner_proc_unlock(proc);
3975                         t = container_of(w, struct binder_transaction, work);
3976                 } break;
3977                 case BINDER_WORK_RETURN_ERROR: {
3978                         struct binder_error *e = container_of(
3979                                         w, struct binder_error, work);
3980
3981                         WARN_ON(e->cmd == BR_OK);
3982                         binder_inner_proc_unlock(proc);
3983                         if (put_user(e->cmd, (uint32_t __user *)ptr))
3984                                 return -EFAULT;
3985                         cmd = e->cmd;
3986                         e->cmd = BR_OK;
3987                         ptr += sizeof(uint32_t);
3988
3989                         binder_stat_br(proc, thread, cmd);
3990                 } break;
3991                 case BINDER_WORK_TRANSACTION_COMPLETE:
3992                 case BINDER_WORK_TRANSACTION_ONEWAY_SPAM_SUSPECT: {
3993                         if (proc->oneway_spam_detection_enabled &&
3994                                    w->type == BINDER_WORK_TRANSACTION_ONEWAY_SPAM_SUSPECT)
3995                                 cmd = BR_ONEWAY_SPAM_SUSPECT;
3996                         else
3997                                 cmd = BR_TRANSACTION_COMPLETE;
3998                         binder_inner_proc_unlock(proc);
3999                         kfree(w);
4000                         binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
4001                         if (put_user(cmd, (uint32_t __user *)ptr))
4002                                 return -EFAULT;
4003                         ptr += sizeof(uint32_t);
4004
4005                         binder_stat_br(proc, thread, cmd);
4006                         binder_debug(BINDER_DEBUG_TRANSACTION_COMPLETE,
4007                                      "%d:%d BR_TRANSACTION_COMPLETE\n",
4008                                      proc->pid, thread->pid);
4009                 } break;
4010                 case BINDER_WORK_NODE: {
4011                         struct binder_node *node = container_of(w, struct binder_node, work);
4012                         int strong, weak;
4013                         binder_uintptr_t node_ptr = node->ptr;
4014                         binder_uintptr_t node_cookie = node->cookie;
4015                         int node_debug_id = node->debug_id;
4016                         int has_weak_ref;
4017                         int has_strong_ref;
4018                         void __user *orig_ptr = ptr;
4019
4020                         BUG_ON(proc != node->proc);
4021                         strong = node->internal_strong_refs ||
4022                                         node->local_strong_refs;
4023                         weak = !hlist_empty(&node->refs) ||
4024                                         node->local_weak_refs ||
4025                                         node->tmp_refs || strong;
4026                         has_strong_ref = node->has_strong_ref;
4027                         has_weak_ref = node->has_weak_ref;
4028
4029                         if (weak && !has_weak_ref) {
4030                                 node->has_weak_ref = 1;
4031                                 node->pending_weak_ref = 1;
4032                                 node->local_weak_refs++;
4033                         }
4034                         if (strong && !has_strong_ref) {
4035                                 node->has_strong_ref = 1;
4036                                 node->pending_strong_ref = 1;
4037                                 node->local_strong_refs++;
4038                         }
4039                         if (!strong && has_strong_ref)
4040                                 node->has_strong_ref = 0;
4041                         if (!weak && has_weak_ref)
4042                                 node->has_weak_ref = 0;
4043                         if (!weak && !strong) {
4044                                 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
4045                                              "%d:%d node %d u%016llx c%016llx deleted\n",
4046                                              proc->pid, thread->pid,
4047                                              node_debug_id,
4048                                              (u64)node_ptr,
4049                                              (u64)node_cookie);
4050                                 rb_erase(&node->rb_node, &proc->nodes);
4051                                 binder_inner_proc_unlock(proc);
4052                                 binder_node_lock(node);
4053                                 /*
4054                                  * Acquire the node lock before freeing the
4055                                  * node to serialize with other threads that
4056                                  * may have been holding the node lock while
4057                                  * decrementing this node (avoids race where
4058                                  * this thread frees while the other thread
4059                                  * is unlocking the node after the final
4060                                  * decrement)
4061                                  */
4062                                 binder_node_unlock(node);
4063                                 binder_free_node(node);
4064                         } else
4065                                 binder_inner_proc_unlock(proc);
4066
4067                         if (weak && !has_weak_ref)
4068                                 ret = binder_put_node_cmd(
4069                                                 proc, thread, &ptr, node_ptr,
4070                                                 node_cookie, node_debug_id,
4071                                                 BR_INCREFS, "BR_INCREFS");
4072                         if (!ret && strong && !has_strong_ref)
4073                                 ret = binder_put_node_cmd(
4074                                                 proc, thread, &ptr, node_ptr,
4075                                                 node_cookie, node_debug_id,
4076                                                 BR_ACQUIRE, "BR_ACQUIRE");
4077                         if (!ret && !strong && has_strong_ref)
4078                                 ret = binder_put_node_cmd(
4079                                                 proc, thread, &ptr, node_ptr,
4080                                                 node_cookie, node_debug_id,
4081                                                 BR_RELEASE, "BR_RELEASE");
4082                         if (!ret && !weak && has_weak_ref)
4083                                 ret = binder_put_node_cmd(
4084                                                 proc, thread, &ptr, node_ptr,
4085                                                 node_cookie, node_debug_id,
4086                                                 BR_DECREFS, "BR_DECREFS");
4087                         if (orig_ptr == ptr)
4088                                 binder_debug(BINDER_DEBUG_INTERNAL_REFS,
4089                                              "%d:%d node %d u%016llx c%016llx state unchanged\n",
4090                                              proc->pid, thread->pid,
4091                                              node_debug_id,
4092                                              (u64)node_ptr,
4093                                              (u64)node_cookie);
4094                         if (ret)
4095                                 return ret;
4096                 } break;
4097                 case BINDER_WORK_DEAD_BINDER:
4098                 case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
4099                 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
4100                         struct binder_ref_death *death;
4101                         uint32_t cmd;
4102                         binder_uintptr_t cookie;
4103
4104                         death = container_of(w, struct binder_ref_death, work);
4105                         if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION)
4106                                 cmd = BR_CLEAR_DEATH_NOTIFICATION_DONE;
4107                         else
4108                                 cmd = BR_DEAD_BINDER;
4109                         cookie = death->cookie;
4110
4111                         binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
4112                                      "%d:%d %s %016llx\n",
4113                                       proc->pid, thread->pid,
4114                                       cmd == BR_DEAD_BINDER ?
4115                                       "BR_DEAD_BINDER" :
4116                                       "BR_CLEAR_DEATH_NOTIFICATION_DONE",
4117                                       (u64)cookie);
4118                         if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION) {
4119                                 binder_inner_proc_unlock(proc);
4120                                 kfree(death);
4121                                 binder_stats_deleted(BINDER_STAT_DEATH);
4122                         } else {
4123                                 binder_enqueue_work_ilocked(
4124                                                 w, &proc->delivered_death);
4125                                 binder_inner_proc_unlock(proc);
4126                         }
4127                         if (put_user(cmd, (uint32_t __user *)ptr))
4128                                 return -EFAULT;
4129                         ptr += sizeof(uint32_t);
4130                         if (put_user(cookie,
4131                                      (binder_uintptr_t __user *)ptr))
4132                                 return -EFAULT;
4133                         ptr += sizeof(binder_uintptr_t);
4134                         binder_stat_br(proc, thread, cmd);
4135                         if (cmd == BR_DEAD_BINDER)
4136                                 goto done; /* DEAD_BINDER notifications can cause transactions */
4137                 } break;
4138                 default:
4139                         binder_inner_proc_unlock(proc);
4140                         pr_err("%d:%d: bad work type %d\n",
4141                                proc->pid, thread->pid, w->type);
4142                         break;
4143                 }
4144
4145                 if (!t)
4146                         continue;
4147
4148                 BUG_ON(t->buffer == NULL);
4149                 if (t->buffer->target_node) {
4150                         struct binder_node *target_node = t->buffer->target_node;
4151
4152                         trd->target.ptr = target_node->ptr;
4153                         trd->cookie =  target_node->cookie;
4154                         t->saved_priority = task_nice(current);
4155                         if (t->priority < target_node->min_priority &&
4156                             !(t->flags & TF_ONE_WAY))
4157                                 binder_set_nice(t->priority);
4158                         else if (!(t->flags & TF_ONE_WAY) ||
4159                                  t->saved_priority > target_node->min_priority)
4160                                 binder_set_nice(target_node->min_priority);
4161                         cmd = BR_TRANSACTION;
4162                 } else {
4163                         trd->target.ptr = 0;
4164                         trd->cookie = 0;
4165                         cmd = BR_REPLY;
4166                 }
4167                 trd->code = t->code;
4168                 trd->flags = t->flags;
4169                 trd->sender_euid = from_kuid(current_user_ns(), t->sender_euid);
4170
4171                 t_from = binder_get_txn_from(t);
4172                 if (t_from) {
4173                         struct task_struct *sender = t_from->proc->tsk;
4174
4175                         trd->sender_pid =
4176                                 task_tgid_nr_ns(sender,
4177                                                 task_active_pid_ns(current));
4178                 } else {
4179                         trd->sender_pid = 0;
4180                 }
4181
4182                 ret = binder_apply_fd_fixups(proc, t);
4183                 if (ret) {
4184                         struct binder_buffer *buffer = t->buffer;
4185                         bool oneway = !!(t->flags & TF_ONE_WAY);
4186                         int tid = t->debug_id;
4187
4188                         if (t_from)
4189                                 binder_thread_dec_tmpref(t_from);
4190                         buffer->transaction = NULL;
4191                         binder_cleanup_transaction(t, "fd fixups failed",
4192                                                    BR_FAILED_REPLY);
4193                         binder_free_buf(proc, thread, buffer, true);
4194                         binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
4195                                      "%d:%d %stransaction %d fd fixups failed %d/%d, line %d\n",
4196                                      proc->pid, thread->pid,
4197                                      oneway ? "async " :
4198                                         (cmd == BR_REPLY ? "reply " : ""),
4199                                      tid, BR_FAILED_REPLY, ret, __LINE__);
4200                         if (cmd == BR_REPLY) {
4201                                 cmd = BR_FAILED_REPLY;
4202                                 if (put_user(cmd, (uint32_t __user *)ptr))
4203                                         return -EFAULT;
4204                                 ptr += sizeof(uint32_t);
4205                                 binder_stat_br(proc, thread, cmd);
4206                                 break;
4207                         }
4208                         continue;
4209                 }
4210                 trd->data_size = t->buffer->data_size;
4211                 trd->offsets_size = t->buffer->offsets_size;
4212                 trd->data.ptr.buffer = (uintptr_t)t->buffer->user_data;
4213                 trd->data.ptr.offsets = trd->data.ptr.buffer +
4214                                         ALIGN(t->buffer->data_size,
4215                                             sizeof(void *));
4216
4217                 tr.secctx = t->security_ctx;
4218                 if (t->security_ctx) {
4219                         cmd = BR_TRANSACTION_SEC_CTX;
4220                         trsize = sizeof(tr);
4221                 }
4222                 if (put_user(cmd, (uint32_t __user *)ptr)) {
4223                         if (t_from)
4224                                 binder_thread_dec_tmpref(t_from);
4225
4226                         binder_cleanup_transaction(t, "put_user failed",
4227                                                    BR_FAILED_REPLY);
4228
4229                         return -EFAULT;
4230                 }
4231                 ptr += sizeof(uint32_t);
4232                 if (copy_to_user(ptr, &tr, trsize)) {
4233                         if (t_from)
4234                                 binder_thread_dec_tmpref(t_from);
4235
4236                         binder_cleanup_transaction(t, "copy_to_user failed",
4237                                                    BR_FAILED_REPLY);
4238
4239                         return -EFAULT;
4240                 }
4241                 ptr += trsize;
4242
4243                 trace_binder_transaction_received(t);
4244                 binder_stat_br(proc, thread, cmd);
4245                 binder_debug(BINDER_DEBUG_TRANSACTION,
4246                              "%d:%d %s %d %d:%d, cmd %d size %zd-%zd ptr %016llx-%016llx\n",
4247                              proc->pid, thread->pid,
4248                              (cmd == BR_TRANSACTION) ? "BR_TRANSACTION" :
4249                                 (cmd == BR_TRANSACTION_SEC_CTX) ?
4250                                      "BR_TRANSACTION_SEC_CTX" : "BR_REPLY",
4251                              t->debug_id, t_from ? t_from->proc->pid : 0,
4252                              t_from ? t_from->pid : 0, cmd,
4253                              t->buffer->data_size, t->buffer->offsets_size,
4254                              (u64)trd->data.ptr.buffer,
4255                              (u64)trd->data.ptr.offsets);
4256
4257                 if (t_from)
4258                         binder_thread_dec_tmpref(t_from);
4259                 t->buffer->allow_user_free = 1;
4260                 if (cmd != BR_REPLY && !(t->flags & TF_ONE_WAY)) {
4261                         binder_inner_proc_lock(thread->proc);
4262                         t->to_parent = thread->transaction_stack;
4263                         t->to_thread = thread;
4264                         thread->transaction_stack = t;
4265                         binder_inner_proc_unlock(thread->proc);
4266                 } else {
4267                         binder_free_transaction(t);
4268                 }
4269                 break;
4270         }
4271
4272 done:
4273
4274         *consumed = ptr - buffer;
4275         binder_inner_proc_lock(proc);
4276         if (proc->requested_threads == 0 &&
4277             list_empty(&thread->proc->waiting_threads) &&
4278             proc->requested_threads_started < proc->max_threads &&
4279             (thread->looper & (BINDER_LOOPER_STATE_REGISTERED |
4280              BINDER_LOOPER_STATE_ENTERED)) /* the user-space code fails to */
4281              /*spawn a new thread if we leave this out */) {
4282                 proc->requested_threads++;
4283                 binder_inner_proc_unlock(proc);
4284                 binder_debug(BINDER_DEBUG_THREADS,
4285                              "%d:%d BR_SPAWN_LOOPER\n",
4286                              proc->pid, thread->pid);
4287                 if (put_user(BR_SPAWN_LOOPER, (uint32_t __user *)buffer))
4288                         return -EFAULT;
4289                 binder_stat_br(proc, thread, BR_SPAWN_LOOPER);
4290         } else
4291                 binder_inner_proc_unlock(proc);
4292         return 0;
4293 }
4294
4295 static void binder_release_work(struct binder_proc *proc,
4296                                 struct list_head *list)
4297 {
4298         struct binder_work *w;
4299         enum binder_work_type wtype;
4300
4301         while (1) {
4302                 binder_inner_proc_lock(proc);
4303                 w = binder_dequeue_work_head_ilocked(list);
4304                 wtype = w ? w->type : 0;
4305                 binder_inner_proc_unlock(proc);
4306                 if (!w)
4307                         return;
4308
4309                 switch (wtype) {
4310                 case BINDER_WORK_TRANSACTION: {
4311                         struct binder_transaction *t;
4312
4313                         t = container_of(w, struct binder_transaction, work);
4314
4315                         binder_cleanup_transaction(t, "process died.",
4316                                                    BR_DEAD_REPLY);
4317                 } break;
4318                 case BINDER_WORK_RETURN_ERROR: {
4319                         struct binder_error *e = container_of(
4320                                         w, struct binder_error, work);
4321
4322                         binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
4323                                 "undelivered TRANSACTION_ERROR: %u\n",
4324                                 e->cmd);
4325                 } break;
4326                 case BINDER_WORK_TRANSACTION_COMPLETE: {
4327                         binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
4328                                 "undelivered TRANSACTION_COMPLETE\n");
4329                         kfree(w);
4330                         binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
4331                 } break;
4332                 case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
4333                 case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: {
4334                         struct binder_ref_death *death;
4335
4336                         death = container_of(w, struct binder_ref_death, work);
4337                         binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
4338                                 "undelivered death notification, %016llx\n",
4339                                 (u64)death->cookie);
4340                         kfree(death);
4341                         binder_stats_deleted(BINDER_STAT_DEATH);
4342                 } break;
4343                 case BINDER_WORK_NODE:
4344                         break;
4345                 default:
4346                         pr_err("unexpected work type, %d, not freed\n",
4347                                wtype);
4348                         break;
4349                 }
4350         }
4351
4352 }
4353
4354 static struct binder_thread *binder_get_thread_ilocked(
4355                 struct binder_proc *proc, struct binder_thread *new_thread)
4356 {
4357         struct binder_thread *thread = NULL;
4358         struct rb_node *parent = NULL;
4359         struct rb_node **p = &proc->threads.rb_node;
4360
4361         while (*p) {
4362                 parent = *p;
4363                 thread = rb_entry(parent, struct binder_thread, rb_node);
4364
4365                 if (current->pid < thread->pid)
4366                         p = &(*p)->rb_left;
4367                 else if (current->pid > thread->pid)
4368                         p = &(*p)->rb_right;
4369                 else
4370                         return thread;
4371         }
4372         if (!new_thread)
4373                 return NULL;
4374         thread = new_thread;
4375         binder_stats_created(BINDER_STAT_THREAD);
4376         thread->proc = proc;
4377         thread->pid = current->pid;
4378         atomic_set(&thread->tmp_ref, 0);
4379         init_waitqueue_head(&thread->wait);
4380         INIT_LIST_HEAD(&thread->todo);
4381         rb_link_node(&thread->rb_node, parent, p);
4382         rb_insert_color(&thread->rb_node, &proc->threads);
4383         thread->looper_need_return = true;
4384         thread->return_error.work.type = BINDER_WORK_RETURN_ERROR;
4385         thread->return_error.cmd = BR_OK;
4386         thread->reply_error.work.type = BINDER_WORK_RETURN_ERROR;
4387         thread->reply_error.cmd = BR_OK;
4388         INIT_LIST_HEAD(&new_thread->waiting_thread_node);
4389         return thread;
4390 }
4391
4392 static struct binder_thread *binder_get_thread(struct binder_proc *proc)
4393 {
4394         struct binder_thread *thread;
4395         struct binder_thread *new_thread;
4396
4397         binder_inner_proc_lock(proc);
4398         thread = binder_get_thread_ilocked(proc, NULL);
4399         binder_inner_proc_unlock(proc);
4400         if (!thread) {
4401                 new_thread = kzalloc(sizeof(*thread), GFP_KERNEL);
4402                 if (new_thread == NULL)
4403                         return NULL;
4404                 binder_inner_proc_lock(proc);
4405                 thread = binder_get_thread_ilocked(proc, new_thread);
4406                 binder_inner_proc_unlock(proc);
4407                 if (thread != new_thread)
4408                         kfree(new_thread);
4409         }
4410         return thread;
4411 }
4412
4413 static void binder_free_proc(struct binder_proc *proc)
4414 {
4415         struct binder_device *device;
4416
4417         BUG_ON(!list_empty(&proc->todo));
4418         BUG_ON(!list_empty(&proc->delivered_death));
4419         if (proc->outstanding_txns)
4420                 pr_warn("%s: Unexpected outstanding_txns %d\n",
4421                         __func__, proc->outstanding_txns);
4422         device = container_of(proc->context, struct binder_device, context);
4423         if (refcount_dec_and_test(&device->ref)) {
4424                 kfree(proc->context->name);
4425                 kfree(device);
4426         }
4427         binder_alloc_deferred_release(&proc->alloc);
4428         put_task_struct(proc->tsk);
4429         put_cred(proc->cred);
4430         binder_stats_deleted(BINDER_STAT_PROC);
4431         kfree(proc);
4432 }
4433
4434 static void binder_free_thread(struct binder_thread *thread)
4435 {
4436         BUG_ON(!list_empty(&thread->todo));
4437         binder_stats_deleted(BINDER_STAT_THREAD);
4438         binder_proc_dec_tmpref(thread->proc);
4439         kfree(thread);
4440 }
4441
4442 static int binder_thread_release(struct binder_proc *proc,
4443                                  struct binder_thread *thread)
4444 {
4445         struct binder_transaction *t;
4446         struct binder_transaction *send_reply = NULL;
4447         int active_transactions = 0;
4448         struct binder_transaction *last_t = NULL;
4449
4450         binder_inner_proc_lock(thread->proc);
4451         /*
4452          * take a ref on the proc so it survives
4453          * after we remove this thread from proc->threads.
4454          * The corresponding dec is when we actually
4455          * free the thread in binder_free_thread()
4456          */
4457         proc->tmp_ref++;
4458         /*
4459          * take a ref on this thread to ensure it
4460          * survives while we are releasing it
4461          */
4462         atomic_inc(&thread->tmp_ref);
4463         rb_erase(&thread->rb_node, &proc->threads);
4464         t = thread->transaction_stack;
4465         if (t) {
4466                 spin_lock(&t->lock);
4467                 if (t->to_thread == thread)
4468                         send_reply = t;
4469         } else {
4470                 __acquire(&t->lock);
4471         }
4472         thread->is_dead = true;
4473
4474         while (t) {
4475                 last_t = t;
4476                 active_transactions++;
4477                 binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
4478                              "release %d:%d transaction %d %s, still active\n",
4479                               proc->pid, thread->pid,
4480                              t->debug_id,
4481                              (t->to_thread == thread) ? "in" : "out");
4482
4483                 if (t->to_thread == thread) {
4484                         thread->proc->outstanding_txns--;
4485                         t->to_proc = NULL;
4486                         t->to_thread = NULL;
4487                         if (t->buffer) {
4488                                 t->buffer->transaction = NULL;
4489                                 t->buffer = NULL;
4490                         }
4491                         t = t->to_parent;
4492                 } else if (t->from == thread) {
4493                         t->from = NULL;
4494                         t = t->from_parent;
4495                 } else
4496                         BUG();
4497                 spin_unlock(&last_t->lock);
4498                 if (t)
4499                         spin_lock(&t->lock);
4500                 else
4501                         __acquire(&t->lock);
4502         }
4503         /* annotation for sparse, lock not acquired in last iteration above */
4504         __release(&t->lock);
4505
4506         /*
4507          * If this thread used poll, make sure we remove the waitqueue from any
4508          * poll data structures holding it.
4509          */
4510         if (thread->looper & BINDER_LOOPER_STATE_POLL)
4511                 wake_up_pollfree(&thread->wait);
4512
4513         binder_inner_proc_unlock(thread->proc);
4514
4515         /*
4516          * This is needed to avoid races between wake_up_pollfree() above and
4517          * someone else removing the last entry from the queue for other reasons
4518          * (e.g. ep_remove_wait_queue() being called due to an epoll file
4519          * descriptor being closed).  Such other users hold an RCU read lock, so
4520          * we can be sure they're done after we call synchronize_rcu().
4521          */
4522         if (thread->looper & BINDER_LOOPER_STATE_POLL)
4523                 synchronize_rcu();
4524
4525         if (send_reply)
4526                 binder_send_failed_reply(send_reply, BR_DEAD_REPLY);
4527         binder_release_work(proc, &thread->todo);
4528         binder_thread_dec_tmpref(thread);
4529         return active_transactions;
4530 }
4531
4532 static __poll_t binder_poll(struct file *filp,
4533                                 struct poll_table_struct *wait)
4534 {
4535         struct binder_proc *proc = filp->private_data;
4536         struct binder_thread *thread = NULL;
4537         bool wait_for_proc_work;
4538
4539         thread = binder_get_thread(proc);
4540         if (!thread)
4541                 return POLLERR;
4542
4543         binder_inner_proc_lock(thread->proc);
4544         thread->looper |= BINDER_LOOPER_STATE_POLL;
4545         wait_for_proc_work = binder_available_for_proc_work_ilocked(thread);
4546
4547         binder_inner_proc_unlock(thread->proc);
4548
4549         poll_wait(filp, &thread->wait, wait);
4550
4551         if (binder_has_work(thread, wait_for_proc_work))
4552                 return EPOLLIN;
4553
4554         return 0;
4555 }
4556
4557 static int binder_ioctl_write_read(struct file *filp,
4558                                 unsigned int cmd, unsigned long arg,
4559                                 struct binder_thread *thread)
4560 {
4561         int ret = 0;
4562         struct binder_proc *proc = filp->private_data;
4563         unsigned int size = _IOC_SIZE(cmd);
4564         void __user *ubuf = (void __user *)arg;
4565         struct binder_write_read bwr;
4566
4567         if (size != sizeof(struct binder_write_read)) {
4568                 ret = -EINVAL;
4569                 goto out;
4570         }
4571         if (copy_from_user(&bwr, ubuf, sizeof(bwr))) {
4572                 ret = -EFAULT;
4573                 goto out;
4574         }
4575         binder_debug(BINDER_DEBUG_READ_WRITE,
4576                      "%d:%d write %lld at %016llx, read %lld at %016llx\n",
4577                      proc->pid, thread->pid,
4578                      (u64)bwr.write_size, (u64)bwr.write_buffer,
4579                      (u64)bwr.read_size, (u64)bwr.read_buffer);
4580
4581         if (bwr.write_size > 0) {
4582                 ret = binder_thread_write(proc, thread,
4583                                           bwr.write_buffer,
4584                                           bwr.write_size,
4585                                           &bwr.write_consumed);
4586                 trace_binder_write_done(ret);
4587                 if (ret < 0) {
4588                         bwr.read_consumed = 0;
4589                         if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
4590                                 ret = -EFAULT;
4591                         goto out;
4592                 }
4593         }
4594         if (bwr.read_size > 0) {
4595                 ret = binder_thread_read(proc, thread, bwr.read_buffer,
4596                                          bwr.read_size,
4597                                          &bwr.read_consumed,
4598                                          filp->f_flags & O_NONBLOCK);
4599                 trace_binder_read_done(ret);
4600                 binder_inner_proc_lock(proc);
4601                 if (!binder_worklist_empty_ilocked(&proc->todo))
4602                         binder_wakeup_proc_ilocked(proc);
4603                 binder_inner_proc_unlock(proc);
4604                 if (ret < 0) {
4605                         if (copy_to_user(ubuf, &bwr, sizeof(bwr)))
4606                                 ret = -EFAULT;
4607                         goto out;
4608                 }
4609         }
4610         binder_debug(BINDER_DEBUG_READ_WRITE,
4611                      "%d:%d wrote %lld of %lld, read return %lld of %lld\n",
4612                      proc->pid, thread->pid,
4613                      (u64)bwr.write_consumed, (u64)bwr.write_size,
4614                      (u64)bwr.read_consumed, (u64)bwr.read_size);
4615         if (copy_to_user(ubuf, &bwr, sizeof(bwr))) {
4616                 ret = -EFAULT;
4617                 goto out;
4618         }
4619 out:
4620         return ret;
4621 }
4622
4623 static int binder_ioctl_set_ctx_mgr(struct file *filp,
4624                                     struct flat_binder_object *fbo)
4625 {
4626         int ret = 0;
4627         struct binder_proc *proc = filp->private_data;
4628         struct binder_context *context = proc->context;
4629         struct binder_node *new_node;
4630         kuid_t curr_euid = current_euid();
4631
4632         mutex_lock(&context->context_mgr_node_lock);
4633         if (context->binder_context_mgr_node) {
4634                 pr_err("BINDER_SET_CONTEXT_MGR already set\n");
4635                 ret = -EBUSY;
4636                 goto out;
4637         }
4638         ret = security_binder_set_context_mgr(proc->cred);
4639         if (ret < 0)
4640                 goto out;
4641         if (uid_valid(context->binder_context_mgr_uid)) {
4642                 if (!uid_eq(context->binder_context_mgr_uid, curr_euid)) {
4643                         pr_err("BINDER_SET_CONTEXT_MGR bad uid %d != %d\n",
4644                                from_kuid(&init_user_ns, curr_euid),
4645                                from_kuid(&init_user_ns,
4646                                          context->binder_context_mgr_uid));
4647                         ret = -EPERM;
4648                         goto out;
4649                 }
4650         } else {
4651                 context->binder_context_mgr_uid = curr_euid;
4652         }
4653         new_node = binder_new_node(proc, fbo);
4654         if (!new_node) {
4655                 ret = -ENOMEM;
4656                 goto out;
4657         }
4658         binder_node_lock(new_node);
4659         new_node->local_weak_refs++;
4660         new_node->local_strong_refs++;
4661         new_node->has_strong_ref = 1;
4662         new_node->has_weak_ref = 1;
4663         context->binder_context_mgr_node = new_node;
4664         binder_node_unlock(new_node);
4665         binder_put_node(new_node);
4666 out:
4667         mutex_unlock(&context->context_mgr_node_lock);
4668         return ret;
4669 }
4670
4671 static int binder_ioctl_get_node_info_for_ref(struct binder_proc *proc,
4672                 struct binder_node_info_for_ref *info)
4673 {
4674         struct binder_node *node;
4675         struct binder_context *context = proc->context;
4676         __u32 handle = info->handle;
4677
4678         if (info->strong_count || info->weak_count || info->reserved1 ||
4679             info->reserved2 || info->reserved3) {
4680                 binder_user_error("%d BINDER_GET_NODE_INFO_FOR_REF: only handle may be non-zero.",
4681                                   proc->pid);
4682                 return -EINVAL;
4683         }
4684
4685         /* This ioctl may only be used by the context manager */
4686         mutex_lock(&context->context_mgr_node_lock);
4687         if (!context->binder_context_mgr_node ||
4688                 context->binder_context_mgr_node->proc != proc) {
4689                 mutex_unlock(&context->context_mgr_node_lock);
4690                 return -EPERM;
4691         }
4692         mutex_unlock(&context->context_mgr_node_lock);
4693
4694         node = binder_get_node_from_ref(proc, handle, true, NULL);
4695         if (!node)
4696                 return -EINVAL;
4697
4698         info->strong_count = node->local_strong_refs +
4699                 node->internal_strong_refs;
4700         info->weak_count = node->local_weak_refs;
4701
4702         binder_put_node(node);
4703
4704         return 0;
4705 }
4706
4707 static int binder_ioctl_get_node_debug_info(struct binder_proc *proc,
4708                                 struct binder_node_debug_info *info)
4709 {
4710         struct rb_node *n;
4711         binder_uintptr_t ptr = info->ptr;
4712
4713         memset(info, 0, sizeof(*info));
4714
4715         binder_inner_proc_lock(proc);
4716         for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) {
4717                 struct binder_node *node = rb_entry(n, struct binder_node,
4718                                                     rb_node);
4719                 if (node->ptr > ptr) {
4720                         info->ptr = node->ptr;
4721                         info->cookie = node->cookie;
4722                         info->has_strong_ref = node->has_strong_ref;
4723                         info->has_weak_ref = node->has_weak_ref;
4724                         break;
4725                 }
4726         }
4727         binder_inner_proc_unlock(proc);
4728
4729         return 0;
4730 }
4731
4732 static bool binder_txns_pending_ilocked(struct binder_proc *proc)
4733 {
4734         struct rb_node *n;
4735         struct binder_thread *thread;
4736
4737         if (proc->outstanding_txns > 0)
4738                 return true;
4739
4740         for (n = rb_first(&proc->threads); n; n = rb_next(n)) {
4741                 thread = rb_entry(n, struct binder_thread, rb_node);
4742                 if (thread->transaction_stack)
4743                         return true;
4744         }
4745         return false;
4746 }
4747
4748 static int binder_ioctl_freeze(struct binder_freeze_info *info,
4749                                struct binder_proc *target_proc)
4750 {
4751         int ret = 0;
4752
4753         if (!info->enable) {
4754                 binder_inner_proc_lock(target_proc);
4755                 target_proc->sync_recv = false;
4756                 target_proc->async_recv = false;
4757                 target_proc->is_frozen = false;
4758                 binder_inner_proc_unlock(target_proc);
4759                 return 0;
4760         }
4761
4762         /*
4763          * Freezing the target. Prevent new transactions by
4764          * setting frozen state. If timeout specified, wait
4765          * for transactions to drain.
4766          */
4767         binder_inner_proc_lock(target_proc);
4768         target_proc->sync_recv = false;
4769         target_proc->async_recv = false;
4770         target_proc->is_frozen = true;
4771         binder_inner_proc_unlock(target_proc);
4772
4773         if (info->timeout_ms > 0)
4774                 ret = wait_event_interruptible_timeout(
4775                         target_proc->freeze_wait,
4776                         (!target_proc->outstanding_txns),
4777                         msecs_to_jiffies(info->timeout_ms));
4778
4779         /* Check pending transactions that wait for reply */
4780         if (ret >= 0) {
4781                 binder_inner_proc_lock(target_proc);
4782                 if (binder_txns_pending_ilocked(target_proc))
4783                         ret = -EAGAIN;
4784                 binder_inner_proc_unlock(target_proc);
4785         }
4786
4787         if (ret < 0) {
4788                 binder_inner_proc_lock(target_proc);
4789                 target_proc->is_frozen = false;
4790                 binder_inner_proc_unlock(target_proc);
4791         }
4792
4793         return ret;
4794 }
4795
4796 static int binder_ioctl_get_freezer_info(
4797                                 struct binder_frozen_status_info *info)
4798 {
4799         struct binder_proc *target_proc;
4800         bool found = false;
4801         __u32 txns_pending;
4802
4803         info->sync_recv = 0;
4804         info->async_recv = 0;
4805
4806         mutex_lock(&binder_procs_lock);
4807         hlist_for_each_entry(target_proc, &binder_procs, proc_node) {
4808                 if (target_proc->pid == info->pid) {
4809                         found = true;
4810                         binder_inner_proc_lock(target_proc);
4811                         txns_pending = binder_txns_pending_ilocked(target_proc);
4812                         info->sync_recv |= target_proc->sync_recv |
4813                                         (txns_pending << 1);
4814                         info->async_recv |= target_proc->async_recv;
4815                         binder_inner_proc_unlock(target_proc);
4816                 }
4817         }
4818         mutex_unlock(&binder_procs_lock);
4819
4820         if (!found)
4821                 return -EINVAL;
4822
4823         return 0;
4824 }
4825
4826 static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
4827 {
4828         int ret;
4829         struct binder_proc *proc = filp->private_data;
4830         struct binder_thread *thread;
4831         unsigned int size = _IOC_SIZE(cmd);
4832         void __user *ubuf = (void __user *)arg;
4833
4834         /*pr_info("binder_ioctl: %d:%d %x %lx\n",
4835                         proc->pid, current->pid, cmd, arg);*/
4836
4837         binder_selftest_alloc(&proc->alloc);
4838
4839         trace_binder_ioctl(cmd, arg);
4840
4841         ret = wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
4842         if (ret)
4843                 goto err_unlocked;
4844
4845         thread = binder_get_thread(proc);
4846         if (thread == NULL) {
4847                 ret = -ENOMEM;
4848                 goto err;
4849         }
4850
4851         switch (cmd) {
4852         case BINDER_WRITE_READ:
4853                 ret = binder_ioctl_write_read(filp, cmd, arg, thread);
4854                 if (ret)
4855                         goto err;
4856                 break;
4857         case BINDER_SET_MAX_THREADS: {
4858                 int max_threads;
4859
4860                 if (copy_from_user(&max_threads, ubuf,
4861                                    sizeof(max_threads))) {
4862                         ret = -EINVAL;
4863                         goto err;
4864                 }
4865                 binder_inner_proc_lock(proc);
4866                 proc->max_threads = max_threads;
4867                 binder_inner_proc_unlock(proc);
4868                 break;
4869         }
4870         case BINDER_SET_CONTEXT_MGR_EXT: {
4871                 struct flat_binder_object fbo;
4872
4873                 if (copy_from_user(&fbo, ubuf, sizeof(fbo))) {
4874                         ret = -EINVAL;
4875                         goto err;
4876                 }
4877                 ret = binder_ioctl_set_ctx_mgr(filp, &fbo);
4878                 if (ret)
4879                         goto err;
4880                 break;
4881         }
4882         case BINDER_SET_CONTEXT_MGR:
4883                 ret = binder_ioctl_set_ctx_mgr(filp, NULL);
4884                 if (ret)
4885                         goto err;
4886                 break;
4887         case BINDER_THREAD_EXIT:
4888                 binder_debug(BINDER_DEBUG_THREADS, "%d:%d exit\n",
4889                              proc->pid, thread->pid);
4890                 binder_thread_release(proc, thread);
4891                 thread = NULL;
4892                 break;
4893         case BINDER_VERSION: {
4894                 struct binder_version __user *ver = ubuf;
4895
4896                 if (size != sizeof(struct binder_version)) {
4897                         ret = -EINVAL;
4898                         goto err;
4899                 }
4900                 if (put_user(BINDER_CURRENT_PROTOCOL_VERSION,
4901                              &ver->protocol_version)) {
4902                         ret = -EINVAL;
4903                         goto err;
4904                 }
4905                 break;
4906         }
4907         case BINDER_GET_NODE_INFO_FOR_REF: {
4908                 struct binder_node_info_for_ref info;
4909
4910                 if (copy_from_user(&info, ubuf, sizeof(info))) {
4911                         ret = -EFAULT;
4912                         goto err;
4913                 }
4914
4915                 ret = binder_ioctl_get_node_info_for_ref(proc, &info);
4916                 if (ret < 0)
4917                         goto err;
4918
4919                 if (copy_to_user(ubuf, &info, sizeof(info))) {
4920                         ret = -EFAULT;
4921                         goto err;
4922                 }
4923
4924                 break;
4925         }
4926         case BINDER_GET_NODE_DEBUG_INFO: {
4927                 struct binder_node_debug_info info;
4928
4929                 if (copy_from_user(&info, ubuf, sizeof(info))) {
4930                         ret = -EFAULT;
4931                         goto err;
4932                 }
4933
4934                 ret = binder_ioctl_get_node_debug_info(proc, &info);
4935                 if (ret < 0)
4936                         goto err;
4937
4938                 if (copy_to_user(ubuf, &info, sizeof(info))) {
4939                         ret = -EFAULT;
4940                         goto err;
4941                 }
4942                 break;
4943         }
4944         case BINDER_FREEZE: {
4945                 struct binder_freeze_info info;
4946                 struct binder_proc **target_procs = NULL, *target_proc;
4947                 int target_procs_count = 0, i = 0;
4948
4949                 ret = 0;
4950
4951                 if (copy_from_user(&info, ubuf, sizeof(info))) {
4952                         ret = -EFAULT;
4953                         goto err;
4954                 }
4955
4956                 mutex_lock(&binder_procs_lock);
4957                 hlist_for_each_entry(target_proc, &binder_procs, proc_node) {
4958                         if (target_proc->pid == info.pid)
4959                                 target_procs_count++;
4960                 }
4961
4962                 if (target_procs_count == 0) {
4963                         mutex_unlock(&binder_procs_lock);
4964                         ret = -EINVAL;
4965                         goto err;
4966                 }
4967
4968                 target_procs = kcalloc(target_procs_count,
4969                                        sizeof(struct binder_proc *),
4970                                        GFP_KERNEL);
4971
4972                 if (!target_procs) {
4973                         mutex_unlock(&binder_procs_lock);
4974                         ret = -ENOMEM;
4975                         goto err;
4976                 }
4977
4978                 hlist_for_each_entry(target_proc, &binder_procs, proc_node) {
4979                         if (target_proc->pid != info.pid)
4980                                 continue;
4981
4982                         binder_inner_proc_lock(target_proc);
4983                         target_proc->tmp_ref++;
4984                         binder_inner_proc_unlock(target_proc);
4985
4986                         target_procs[i++] = target_proc;
4987                 }
4988                 mutex_unlock(&binder_procs_lock);
4989
4990                 for (i = 0; i < target_procs_count; i++) {
4991                         if (ret >= 0)
4992                                 ret = binder_ioctl_freeze(&info,
4993                                                           target_procs[i]);
4994
4995                         binder_proc_dec_tmpref(target_procs[i]);
4996                 }
4997
4998                 kfree(target_procs);
4999
5000                 if (ret < 0)
5001                         goto err;
5002                 break;
5003         }
5004         case BINDER_GET_FROZEN_INFO: {
5005                 struct binder_frozen_status_info info;
5006
5007                 if (copy_from_user(&info, ubuf, sizeof(info))) {
5008                         ret = -EFAULT;
5009                         goto err;
5010                 }
5011
5012                 ret = binder_ioctl_get_freezer_info(&info);
5013                 if (ret < 0)
5014                         goto err;
5015
5016                 if (copy_to_user(ubuf, &info, sizeof(info))) {
5017                         ret = -EFAULT;
5018                         goto err;
5019                 }
5020                 break;
5021         }
5022         case BINDER_ENABLE_ONEWAY_SPAM_DETECTION: {
5023                 uint32_t enable;
5024
5025                 if (copy_from_user(&enable, ubuf, sizeof(enable))) {
5026                         ret = -EFAULT;
5027                         goto err;
5028                 }
5029                 binder_inner_proc_lock(proc);
5030                 proc->oneway_spam_detection_enabled = (bool)enable;
5031                 binder_inner_proc_unlock(proc);
5032                 break;
5033         }
5034         default:
5035                 ret = -EINVAL;
5036                 goto err;
5037         }
5038         ret = 0;
5039 err:
5040         if (thread)
5041                 thread->looper_need_return = false;
5042         wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
5043         if (ret && ret != -EINTR)
5044                 pr_info("%d:%d ioctl %x %lx returned %d\n", proc->pid, current->pid, cmd, arg, ret);
5045 err_unlocked:
5046         trace_binder_ioctl_done(ret);
5047         return ret;
5048 }
5049
5050 static void binder_vma_open(struct vm_area_struct *vma)
5051 {
5052         struct binder_proc *proc = vma->vm_private_data;
5053
5054         binder_debug(BINDER_DEBUG_OPEN_CLOSE,
5055                      "%d open vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
5056                      proc->pid, vma->vm_start, vma->vm_end,
5057                      (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
5058                      (unsigned long)pgprot_val(vma->vm_page_prot));
5059 }
5060
5061 static void binder_vma_close(struct vm_area_struct *vma)
5062 {
5063         struct binder_proc *proc = vma->vm_private_data;
5064
5065         binder_debug(BINDER_DEBUG_OPEN_CLOSE,
5066                      "%d close vm area %lx-%lx (%ld K) vma %lx pagep %lx\n",
5067                      proc->pid, vma->vm_start, vma->vm_end,
5068                      (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
5069                      (unsigned long)pgprot_val(vma->vm_page_prot));
5070         binder_alloc_vma_close(&proc->alloc);
5071 }
5072
5073 static vm_fault_t binder_vm_fault(struct vm_fault *vmf)
5074 {
5075         return VM_FAULT_SIGBUS;
5076 }
5077
5078 static const struct vm_operations_struct binder_vm_ops = {
5079         .open = binder_vma_open,
5080         .close = binder_vma_close,
5081         .fault = binder_vm_fault,
5082 };
5083
5084 static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
5085 {
5086         struct binder_proc *proc = filp->private_data;
5087
5088         if (proc->tsk != current->group_leader)
5089                 return -EINVAL;
5090
5091         binder_debug(BINDER_DEBUG_OPEN_CLOSE,
5092                      "%s: %d %lx-%lx (%ld K) vma %lx pagep %lx\n",
5093                      __func__, proc->pid, vma->vm_start, vma->vm_end,
5094                      (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
5095                      (unsigned long)pgprot_val(vma->vm_page_prot));
5096
5097         if (vma->vm_flags & FORBIDDEN_MMAP_FLAGS) {
5098                 pr_err("%s: %d %lx-%lx %s failed %d\n", __func__,
5099                        proc->pid, vma->vm_start, vma->vm_end, "bad vm_flags", -EPERM);
5100                 return -EPERM;
5101         }
5102         vma->vm_flags |= VM_DONTCOPY | VM_MIXEDMAP;
5103         vma->vm_flags &= ~VM_MAYWRITE;
5104
5105         vma->vm_ops = &binder_vm_ops;
5106         vma->vm_private_data = proc;
5107
5108         return binder_alloc_mmap_handler(&proc->alloc, vma);
5109 }
5110
5111 static int binder_open(struct inode *nodp, struct file *filp)
5112 {
5113         struct binder_proc *proc, *itr;
5114         struct binder_device *binder_dev;
5115         struct binderfs_info *info;
5116         struct dentry *binder_binderfs_dir_entry_proc = NULL;
5117         bool existing_pid = false;
5118
5119         binder_debug(BINDER_DEBUG_OPEN_CLOSE, "%s: %d:%d\n", __func__,
5120                      current->group_leader->pid, current->pid);
5121
5122         proc = kzalloc(sizeof(*proc), GFP_KERNEL);
5123         if (proc == NULL)
5124                 return -ENOMEM;
5125         spin_lock_init(&proc->inner_lock);
5126         spin_lock_init(&proc->outer_lock);
5127         get_task_struct(current->group_leader);
5128         proc->tsk = current->group_leader;
5129         proc->cred = get_cred(filp->f_cred);
5130         INIT_LIST_HEAD(&proc->todo);
5131         init_waitqueue_head(&proc->freeze_wait);
5132         proc->default_priority = task_nice(current);
5133         /* binderfs stashes devices in i_private */
5134         if (is_binderfs_device(nodp)) {
5135                 binder_dev = nodp->i_private;
5136                 info = nodp->i_sb->s_fs_info;
5137                 binder_binderfs_dir_entry_proc = info->proc_log_dir;
5138         } else {
5139                 binder_dev = container_of(filp->private_data,
5140                                           struct binder_device, miscdev);
5141         }
5142         refcount_inc(&binder_dev->ref);
5143         proc->context = &binder_dev->context;
5144         binder_alloc_init(&proc->alloc);
5145
5146         binder_stats_created(BINDER_STAT_PROC);
5147         proc->pid = current->group_leader->pid;
5148         INIT_LIST_HEAD(&proc->delivered_death);
5149         INIT_LIST_HEAD(&proc->waiting_threads);
5150         filp->private_data = proc;
5151
5152         mutex_lock(&binder_procs_lock);
5153         hlist_for_each_entry(itr, &binder_procs, proc_node) {
5154                 if (itr->pid == proc->pid) {
5155                         existing_pid = true;
5156                         break;
5157                 }
5158         }
5159         hlist_add_head(&proc->proc_node, &binder_procs);
5160         mutex_unlock(&binder_procs_lock);
5161
5162         if (binder_debugfs_dir_entry_proc && !existing_pid) {
5163                 char strbuf[11];
5164
5165                 snprintf(strbuf, sizeof(strbuf), "%u", proc->pid);
5166                 /*
5167                  * proc debug entries are shared between contexts.
5168                  * Only create for the first PID to avoid debugfs log spamming
5169                  * The printing code will anyway print all contexts for a given
5170                  * PID so this is not a problem.
5171                  */
5172                 proc->debugfs_entry = debugfs_create_file(strbuf, 0444,
5173                         binder_debugfs_dir_entry_proc,
5174                         (void *)(unsigned long)proc->pid,
5175                         &proc_fops);
5176         }
5177
5178         if (binder_binderfs_dir_entry_proc && !existing_pid) {
5179                 char strbuf[11];
5180                 struct dentry *binderfs_entry;
5181
5182                 snprintf(strbuf, sizeof(strbuf), "%u", proc->pid);
5183                 /*
5184                  * Similar to debugfs, the process specific log file is shared
5185                  * between contexts. Only create for the first PID.
5186                  * This is ok since same as debugfs, the log file will contain
5187                  * information on all contexts of a given PID.
5188                  */
5189                 binderfs_entry = binderfs_create_file(binder_binderfs_dir_entry_proc,
5190                         strbuf, &proc_fops, (void *)(unsigned long)proc->pid);
5191                 if (!IS_ERR(binderfs_entry)) {
5192                         proc->binderfs_entry = binderfs_entry;
5193                 } else {
5194                         int error;
5195
5196                         error = PTR_ERR(binderfs_entry);
5197                         pr_warn("Unable to create file %s in binderfs (error %d)\n",
5198                                 strbuf, error);
5199                 }
5200         }
5201
5202         return 0;
5203 }
5204
5205 static int binder_flush(struct file *filp, fl_owner_t id)
5206 {
5207         struct binder_proc *proc = filp->private_data;
5208
5209         binder_defer_work(proc, BINDER_DEFERRED_FLUSH);
5210
5211         return 0;
5212 }
5213
5214 static void binder_deferred_flush(struct binder_proc *proc)
5215 {
5216         struct rb_node *n;
5217         int wake_count = 0;
5218
5219         binder_inner_proc_lock(proc);
5220         for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) {
5221                 struct binder_thread *thread = rb_entry(n, struct binder_thread, rb_node);
5222
5223                 thread->looper_need_return = true;
5224                 if (thread->looper & BINDER_LOOPER_STATE_WAITING) {
5225                         wake_up_interruptible(&thread->wait);
5226                         wake_count++;
5227                 }
5228         }
5229         binder_inner_proc_unlock(proc);
5230
5231         binder_debug(BINDER_DEBUG_OPEN_CLOSE,
5232                      "binder_flush: %d woke %d threads\n", proc->pid,
5233                      wake_count);
5234 }
5235
5236 static int binder_release(struct inode *nodp, struct file *filp)
5237 {
5238         struct binder_proc *proc = filp->private_data;
5239
5240         debugfs_remove(proc->debugfs_entry);
5241
5242         if (proc->binderfs_entry) {
5243                 binderfs_remove_file(proc->binderfs_entry);
5244                 proc->binderfs_entry = NULL;
5245         }
5246
5247         binder_defer_work(proc, BINDER_DEFERRED_RELEASE);
5248
5249         return 0;
5250 }
5251
5252 static int binder_node_release(struct binder_node *node, int refs)
5253 {
5254         struct binder_ref *ref;
5255         int death = 0;
5256         struct binder_proc *proc = node->proc;
5257
5258         binder_release_work(proc, &node->async_todo);
5259
5260         binder_node_lock(node);
5261         binder_inner_proc_lock(proc);
5262         binder_dequeue_work_ilocked(&node->work);
5263         /*
5264          * The caller must have taken a temporary ref on the node,
5265          */
5266         BUG_ON(!node->tmp_refs);
5267         if (hlist_empty(&node->refs) && node->tmp_refs == 1) {
5268                 binder_inner_proc_unlock(proc);
5269                 binder_node_unlock(node);
5270                 binder_free_node(node);
5271
5272                 return refs;
5273         }
5274
5275         node->proc = NULL;
5276         node->local_strong_refs = 0;
5277         node->local_weak_refs = 0;
5278         binder_inner_proc_unlock(proc);
5279
5280         spin_lock(&binder_dead_nodes_lock);
5281         hlist_add_head(&node->dead_node, &binder_dead_nodes);
5282         spin_unlock(&binder_dead_nodes_lock);
5283
5284         hlist_for_each_entry(ref, &node->refs, node_entry) {
5285                 refs++;
5286                 /*
5287                  * Need the node lock to synchronize
5288                  * with new notification requests and the
5289                  * inner lock to synchronize with queued
5290                  * death notifications.
5291                  */
5292                 binder_inner_proc_lock(ref->proc);
5293                 if (!ref->death) {
5294                         binder_inner_proc_unlock(ref->proc);
5295                         continue;
5296                 }
5297
5298                 death++;
5299
5300                 BUG_ON(!list_empty(&ref->death->work.entry));
5301                 ref->death->work.type = BINDER_WORK_DEAD_BINDER;
5302                 binder_enqueue_work_ilocked(&ref->death->work,
5303                                             &ref->proc->todo);
5304                 binder_wakeup_proc_ilocked(ref->proc);
5305                 binder_inner_proc_unlock(ref->proc);
5306         }
5307
5308         binder_debug(BINDER_DEBUG_DEAD_BINDER,
5309                      "node %d now dead, refs %d, death %d\n",
5310                      node->debug_id, refs, death);
5311         binder_node_unlock(node);
5312         binder_put_node(node);
5313
5314         return refs;
5315 }
5316
5317 static void binder_deferred_release(struct binder_proc *proc)
5318 {
5319         struct binder_context *context = proc->context;
5320         struct rb_node *n;
5321         int threads, nodes, incoming_refs, outgoing_refs, active_transactions;
5322
5323         mutex_lock(&binder_procs_lock);
5324         hlist_del(&proc->proc_node);
5325         mutex_unlock(&binder_procs_lock);
5326
5327         mutex_lock(&context->context_mgr_node_lock);
5328         if (context->binder_context_mgr_node &&
5329             context->binder_context_mgr_node->proc == proc) {
5330                 binder_debug(BINDER_DEBUG_DEAD_BINDER,
5331                              "%s: %d context_mgr_node gone\n",
5332                              __func__, proc->pid);
5333                 context->binder_context_mgr_node = NULL;
5334         }
5335         mutex_unlock(&context->context_mgr_node_lock);
5336         binder_inner_proc_lock(proc);
5337         /*
5338          * Make sure proc stays alive after we
5339          * remove all the threads
5340          */
5341         proc->tmp_ref++;
5342
5343         proc->is_dead = true;
5344         proc->is_frozen = false;
5345         proc->sync_recv = false;
5346         proc->async_recv = false;
5347         threads = 0;
5348         active_transactions = 0;
5349         while ((n = rb_first(&proc->threads))) {
5350                 struct binder_thread *thread;
5351
5352                 thread = rb_entry(n, struct binder_thread, rb_node);
5353                 binder_inner_proc_unlock(proc);
5354                 threads++;
5355                 active_transactions += binder_thread_release(proc, thread);
5356                 binder_inner_proc_lock(proc);
5357         }
5358
5359         nodes = 0;
5360         incoming_refs = 0;
5361         while ((n = rb_first(&proc->nodes))) {
5362                 struct binder_node *node;
5363
5364                 node = rb_entry(n, struct binder_node, rb_node);
5365                 nodes++;
5366                 /*
5367                  * take a temporary ref on the node before
5368                  * calling binder_node_release() which will either
5369                  * kfree() the node or call binder_put_node()
5370                  */
5371                 binder_inc_node_tmpref_ilocked(node);
5372                 rb_erase(&node->rb_node, &proc->nodes);
5373                 binder_inner_proc_unlock(proc);
5374                 incoming_refs = binder_node_release(node, incoming_refs);
5375                 binder_inner_proc_lock(proc);
5376         }
5377         binder_inner_proc_unlock(proc);
5378
5379         outgoing_refs = 0;
5380         binder_proc_lock(proc);
5381         while ((n = rb_first(&proc->refs_by_desc))) {
5382                 struct binder_ref *ref;
5383
5384                 ref = rb_entry(n, struct binder_ref, rb_node_desc);
5385                 outgoing_refs++;
5386                 binder_cleanup_ref_olocked(ref);
5387                 binder_proc_unlock(proc);
5388                 binder_free_ref(ref);
5389                 binder_proc_lock(proc);
5390         }
5391         binder_proc_unlock(proc);
5392
5393         binder_release_work(proc, &proc->todo);
5394         binder_release_work(proc, &proc->delivered_death);
5395
5396         binder_debug(BINDER_DEBUG_OPEN_CLOSE,
5397                      "%s: %d threads %d, nodes %d (ref %d), refs %d, active transactions %d\n",
5398                      __func__, proc->pid, threads, nodes, incoming_refs,
5399                      outgoing_refs, active_transactions);
5400
5401         binder_proc_dec_tmpref(proc);
5402 }
5403
5404 static void binder_deferred_func(struct work_struct *work)
5405 {
5406         struct binder_proc *proc;
5407
5408         int defer;
5409
5410         do {
5411                 mutex_lock(&binder_deferred_lock);
5412                 if (!hlist_empty(&binder_deferred_list)) {
5413                         proc = hlist_entry(binder_deferred_list.first,
5414                                         struct binder_proc, deferred_work_node);
5415                         hlist_del_init(&proc->deferred_work_node);
5416                         defer = proc->deferred_work;
5417                         proc->deferred_work = 0;
5418                 } else {
5419                         proc = NULL;
5420                         defer = 0;
5421                 }
5422                 mutex_unlock(&binder_deferred_lock);
5423
5424                 if (defer & BINDER_DEFERRED_FLUSH)
5425                         binder_deferred_flush(proc);
5426
5427                 if (defer & BINDER_DEFERRED_RELEASE)
5428                         binder_deferred_release(proc); /* frees proc */
5429         } while (proc);
5430 }
5431 static DECLARE_WORK(binder_deferred_work, binder_deferred_func);
5432
5433 static void
5434 binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer)
5435 {
5436         mutex_lock(&binder_deferred_lock);
5437         proc->deferred_work |= defer;
5438         if (hlist_unhashed(&proc->deferred_work_node)) {
5439                 hlist_add_head(&proc->deferred_work_node,
5440                                 &binder_deferred_list);
5441                 schedule_work(&binder_deferred_work);
5442         }
5443         mutex_unlock(&binder_deferred_lock);
5444 }
5445
5446 static void print_binder_transaction_ilocked(struct seq_file *m,
5447                                              struct binder_proc *proc,
5448                                              const char *prefix,
5449                                              struct binder_transaction *t)
5450 {
5451         struct binder_proc *to_proc;
5452         struct binder_buffer *buffer = t->buffer;
5453
5454         spin_lock(&t->lock);
5455         to_proc = t->to_proc;
5456         seq_printf(m,
5457                    "%s %d: %pK from %d:%d to %d:%d code %x flags %x pri %ld r%d",
5458                    prefix, t->debug_id, t,
5459                    t->from ? t->from->proc->pid : 0,
5460                    t->from ? t->from->pid : 0,
5461                    to_proc ? to_proc->pid : 0,
5462                    t->to_thread ? t->to_thread->pid : 0,
5463                    t->code, t->flags, t->priority, t->need_reply);
5464         spin_unlock(&t->lock);
5465
5466         if (proc != to_proc) {
5467                 /*
5468                  * Can only safely deref buffer if we are holding the
5469                  * correct proc inner lock for this node
5470                  */
5471                 seq_puts(m, "\n");
5472                 return;
5473         }
5474
5475         if (buffer == NULL) {
5476                 seq_puts(m, " buffer free\n");
5477                 return;
5478         }
5479         if (buffer->target_node)
5480                 seq_printf(m, " node %d", buffer->target_node->debug_id);
5481         seq_printf(m, " size %zd:%zd data %pK\n",
5482                    buffer->data_size, buffer->offsets_size,
5483                    buffer->user_data);
5484 }
5485
5486 static void print_binder_work_ilocked(struct seq_file *m,
5487                                      struct binder_proc *proc,
5488                                      const char *prefix,
5489                                      const char *transaction_prefix,
5490                                      struct binder_work *w)
5491 {
5492         struct binder_node *node;
5493         struct binder_transaction *t;
5494
5495         switch (w->type) {
5496         case BINDER_WORK_TRANSACTION:
5497                 t = container_of(w, struct binder_transaction, work);
5498                 print_binder_transaction_ilocked(
5499                                 m, proc, transaction_prefix, t);
5500                 break;
5501         case BINDER_WORK_RETURN_ERROR: {
5502                 struct binder_error *e = container_of(
5503                                 w, struct binder_error, work);
5504
5505                 seq_printf(m, "%stransaction error: %u\n",
5506                            prefix, e->cmd);
5507         } break;
5508         case BINDER_WORK_TRANSACTION_COMPLETE:
5509                 seq_printf(m, "%stransaction complete\n", prefix);
5510                 break;
5511         case BINDER_WORK_NODE:
5512                 node = container_of(w, struct binder_node, work);
5513                 seq_printf(m, "%snode work %d: u%016llx c%016llx\n",
5514                            prefix, node->debug_id,
5515                            (u64)node->ptr, (u64)node->cookie);
5516                 break;
5517         case BINDER_WORK_DEAD_BINDER:
5518                 seq_printf(m, "%shas dead binder\n", prefix);
5519                 break;
5520         case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
5521                 seq_printf(m, "%shas cleared dead binder\n", prefix);
5522                 break;
5523         case BINDER_WORK_CLEAR_DEATH_NOTIFICATION:
5524                 seq_printf(m, "%shas cleared death notification\n", prefix);
5525                 break;
5526         default:
5527                 seq_printf(m, "%sunknown work: type %d\n", prefix, w->type);
5528                 break;
5529         }
5530 }
5531
5532 static void print_binder_thread_ilocked(struct seq_file *m,
5533                                         struct binder_thread *thread,
5534                                         int print_always)
5535 {
5536         struct binder_transaction *t;
5537         struct binder_work *w;
5538         size_t start_pos = m->count;
5539         size_t header_pos;
5540
5541         seq_printf(m, "  thread %d: l %02x need_return %d tr %d\n",
5542                         thread->pid, thread->looper,
5543                         thread->looper_need_return,
5544                         atomic_read(&thread->tmp_ref));
5545         header_pos = m->count;
5546         t = thread->transaction_stack;
5547         while (t) {
5548                 if (t->from == thread) {
5549                         print_binder_transaction_ilocked(m, thread->proc,
5550                                         "    outgoing transaction", t);
5551                         t = t->from_parent;
5552                 } else if (t->to_thread == thread) {
5553                         print_binder_transaction_ilocked(m, thread->proc,
5554                                                  "    incoming transaction", t);
5555                         t = t->to_parent;
5556                 } else {
5557                         print_binder_transaction_ilocked(m, thread->proc,
5558                                         "    bad transaction", t);
5559                         t = NULL;
5560                 }
5561         }
5562         list_for_each_entry(w, &thread->todo, entry) {
5563                 print_binder_work_ilocked(m, thread->proc, "    ",
5564                                           "    pending transaction", w);
5565         }
5566         if (!print_always && m->count == header_pos)
5567                 m->count = start_pos;
5568 }
5569
5570 static void print_binder_node_nilocked(struct seq_file *m,
5571                                        struct binder_node *node)
5572 {
5573         struct binder_ref *ref;
5574         struct binder_work *w;
5575         int count;
5576
5577         count = 0;
5578         hlist_for_each_entry(ref, &node->refs, node_entry)
5579                 count++;
5580
5581         seq_printf(m, "  node %d: u%016llx c%016llx hs %d hw %d ls %d lw %d is %d iw %d tr %d",
5582                    node->debug_id, (u64)node->ptr, (u64)node->cookie,
5583                    node->has_strong_ref, node->has_weak_ref,
5584                    node->local_strong_refs, node->local_weak_refs,
5585                    node->internal_strong_refs, count, node->tmp_refs);
5586         if (count) {
5587                 seq_puts(m, " proc");
5588                 hlist_for_each_entry(ref, &node->refs, node_entry)
5589                         seq_printf(m, " %d", ref->proc->pid);
5590         }
5591         seq_puts(m, "\n");
5592         if (node->proc) {
5593                 list_for_each_entry(w, &node->async_todo, entry)
5594                         print_binder_work_ilocked(m, node->proc, "    ",
5595                                           "    pending async transaction", w);
5596         }
5597 }
5598
5599 static void print_binder_ref_olocked(struct seq_file *m,
5600                                      struct binder_ref *ref)
5601 {
5602         binder_node_lock(ref->node);
5603         seq_printf(m, "  ref %d: desc %d %snode %d s %d w %d d %pK\n",
5604                    ref->data.debug_id, ref->data.desc,
5605                    ref->node->proc ? "" : "dead ",
5606                    ref->node->debug_id, ref->data.strong,
5607                    ref->data.weak, ref->death);
5608         binder_node_unlock(ref->node);
5609 }
5610
5611 static void print_binder_proc(struct seq_file *m,
5612                               struct binder_proc *proc, int print_all)
5613 {
5614         struct binder_work *w;
5615         struct rb_node *n;
5616         size_t start_pos = m->count;
5617         size_t header_pos;
5618         struct binder_node *last_node = NULL;
5619
5620         seq_printf(m, "proc %d\n", proc->pid);
5621         seq_printf(m, "context %s\n", proc->context->name);
5622         header_pos = m->count;
5623
5624         binder_inner_proc_lock(proc);
5625         for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
5626                 print_binder_thread_ilocked(m, rb_entry(n, struct binder_thread,
5627                                                 rb_node), print_all);
5628
5629         for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) {
5630                 struct binder_node *node = rb_entry(n, struct binder_node,
5631                                                     rb_node);
5632                 if (!print_all && !node->has_async_transaction)
5633                         continue;
5634
5635                 /*
5636                  * take a temporary reference on the node so it
5637                  * survives and isn't removed from the tree
5638                  * while we print it.
5639                  */
5640                 binder_inc_node_tmpref_ilocked(node);
5641                 /* Need to drop inner lock to take node lock */
5642                 binder_inner_proc_unlock(proc);
5643                 if (last_node)
5644                         binder_put_node(last_node);
5645                 binder_node_inner_lock(node);
5646                 print_binder_node_nilocked(m, node);
5647                 binder_node_inner_unlock(node);
5648                 last_node = node;
5649                 binder_inner_proc_lock(proc);
5650         }
5651         binder_inner_proc_unlock(proc);
5652         if (last_node)
5653                 binder_put_node(last_node);
5654
5655         if (print_all) {
5656                 binder_proc_lock(proc);
5657                 for (n = rb_first(&proc->refs_by_desc);
5658                      n != NULL;
5659                      n = rb_next(n))
5660                         print_binder_ref_olocked(m, rb_entry(n,
5661                                                             struct binder_ref,
5662                                                             rb_node_desc));
5663                 binder_proc_unlock(proc);
5664         }
5665         binder_alloc_print_allocated(m, &proc->alloc);
5666         binder_inner_proc_lock(proc);
5667         list_for_each_entry(w, &proc->todo, entry)
5668                 print_binder_work_ilocked(m, proc, "  ",
5669                                           "  pending transaction", w);
5670         list_for_each_entry(w, &proc->delivered_death, entry) {
5671                 seq_puts(m, "  has delivered dead binder\n");
5672                 break;
5673         }
5674         binder_inner_proc_unlock(proc);
5675         if (!print_all && m->count == header_pos)
5676                 m->count = start_pos;
5677 }
5678
5679 static const char * const binder_return_strings[] = {
5680         "BR_ERROR",
5681         "BR_OK",
5682         "BR_TRANSACTION",
5683         "BR_REPLY",
5684         "BR_ACQUIRE_RESULT",
5685         "BR_DEAD_REPLY",
5686         "BR_TRANSACTION_COMPLETE",
5687         "BR_INCREFS",
5688         "BR_ACQUIRE",
5689         "BR_RELEASE",
5690         "BR_DECREFS",
5691         "BR_ATTEMPT_ACQUIRE",
5692         "BR_NOOP",
5693         "BR_SPAWN_LOOPER",
5694         "BR_FINISHED",
5695         "BR_DEAD_BINDER",
5696         "BR_CLEAR_DEATH_NOTIFICATION_DONE",
5697         "BR_FAILED_REPLY",
5698         "BR_FROZEN_REPLY",
5699         "BR_ONEWAY_SPAM_SUSPECT",
5700 };
5701
5702 static const char * const binder_command_strings[] = {
5703         "BC_TRANSACTION",
5704         "BC_REPLY",
5705         "BC_ACQUIRE_RESULT",
5706         "BC_FREE_BUFFER",
5707         "BC_INCREFS",
5708         "BC_ACQUIRE",
5709         "BC_RELEASE",
5710         "BC_DECREFS",
5711         "BC_INCREFS_DONE",
5712         "BC_ACQUIRE_DONE",
5713         "BC_ATTEMPT_ACQUIRE",
5714         "BC_REGISTER_LOOPER",
5715         "BC_ENTER_LOOPER",
5716         "BC_EXIT_LOOPER",
5717         "BC_REQUEST_DEATH_NOTIFICATION",
5718         "BC_CLEAR_DEATH_NOTIFICATION",
5719         "BC_DEAD_BINDER_DONE",
5720         "BC_TRANSACTION_SG",
5721         "BC_REPLY_SG",
5722 };
5723
5724 static const char * const binder_objstat_strings[] = {
5725         "proc",
5726         "thread",
5727         "node",
5728         "ref",
5729         "death",
5730         "transaction",
5731         "transaction_complete"
5732 };
5733
5734 static void print_binder_stats(struct seq_file *m, const char *prefix,
5735                                struct binder_stats *stats)
5736 {
5737         int i;
5738
5739         BUILD_BUG_ON(ARRAY_SIZE(stats->bc) !=
5740                      ARRAY_SIZE(binder_command_strings));
5741         for (i = 0; i < ARRAY_SIZE(stats->bc); i++) {
5742                 int temp = atomic_read(&stats->bc[i]);
5743
5744                 if (temp)
5745                         seq_printf(m, "%s%s: %d\n", prefix,
5746                                    binder_command_strings[i], temp);
5747         }
5748
5749         BUILD_BUG_ON(ARRAY_SIZE(stats->br) !=
5750                      ARRAY_SIZE(binder_return_strings));
5751         for (i = 0; i < ARRAY_SIZE(stats->br); i++) {
5752                 int temp = atomic_read(&stats->br[i]);
5753
5754                 if (temp)
5755                         seq_printf(m, "%s%s: %d\n", prefix,
5756                                    binder_return_strings[i], temp);
5757         }
5758
5759         BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
5760                      ARRAY_SIZE(binder_objstat_strings));
5761         BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) !=
5762                      ARRAY_SIZE(stats->obj_deleted));
5763         for (i = 0; i < ARRAY_SIZE(stats->obj_created); i++) {
5764                 int created = atomic_read(&stats->obj_created[i]);
5765                 int deleted = atomic_read(&stats->obj_deleted[i]);
5766
5767                 if (created || deleted)
5768                         seq_printf(m, "%s%s: active %d total %d\n",
5769                                 prefix,
5770                                 binder_objstat_strings[i],
5771                                 created - deleted,
5772                                 created);
5773         }
5774 }
5775
5776 static void print_binder_proc_stats(struct seq_file *m,
5777                                     struct binder_proc *proc)
5778 {
5779         struct binder_work *w;
5780         struct binder_thread *thread;
5781         struct rb_node *n;
5782         int count, strong, weak, ready_threads;
5783         size_t free_async_space =
5784                 binder_alloc_get_free_async_space(&proc->alloc);
5785
5786         seq_printf(m, "proc %d\n", proc->pid);
5787         seq_printf(m, "context %s\n", proc->context->name);
5788         count = 0;
5789         ready_threads = 0;
5790         binder_inner_proc_lock(proc);
5791         for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n))
5792                 count++;
5793
5794         list_for_each_entry(thread, &proc->waiting_threads, waiting_thread_node)
5795                 ready_threads++;
5796
5797         seq_printf(m, "  threads: %d\n", count);
5798         seq_printf(m, "  requested threads: %d+%d/%d\n"
5799                         "  ready threads %d\n"
5800                         "  free async space %zd\n", proc->requested_threads,
5801                         proc->requested_threads_started, proc->max_threads,
5802                         ready_threads,
5803                         free_async_space);
5804         count = 0;
5805         for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n))
5806                 count++;
5807         binder_inner_proc_unlock(proc);
5808         seq_printf(m, "  nodes: %d\n", count);
5809         count = 0;
5810         strong = 0;
5811         weak = 0;
5812         binder_proc_lock(proc);
5813         for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) {
5814                 struct binder_ref *ref = rb_entry(n, struct binder_ref,
5815                                                   rb_node_desc);
5816                 count++;
5817                 strong += ref->data.strong;
5818                 weak += ref->data.weak;
5819         }
5820         binder_proc_unlock(proc);
5821         seq_printf(m, "  refs: %d s %d w %d\n", count, strong, weak);
5822
5823         count = binder_alloc_get_allocated_count(&proc->alloc);
5824         seq_printf(m, "  buffers: %d\n", count);
5825
5826         binder_alloc_print_pages(m, &proc->alloc);
5827
5828         count = 0;
5829         binder_inner_proc_lock(proc);
5830         list_for_each_entry(w, &proc->todo, entry) {
5831                 if (w->type == BINDER_WORK_TRANSACTION)
5832                         count++;
5833         }
5834         binder_inner_proc_unlock(proc);
5835         seq_printf(m, "  pending transactions: %d\n", count);
5836
5837         print_binder_stats(m, "  ", &proc->stats);
5838 }
5839
5840 static int state_show(struct seq_file *m, void *unused)
5841 {
5842         struct binder_proc *proc;
5843         struct binder_node *node;
5844         struct binder_node *last_node = NULL;
5845
5846         seq_puts(m, "binder state:\n");
5847
5848         spin_lock(&binder_dead_nodes_lock);
5849         if (!hlist_empty(&binder_dead_nodes))
5850                 seq_puts(m, "dead nodes:\n");
5851         hlist_for_each_entry(node, &binder_dead_nodes, dead_node) {
5852                 /*
5853                  * take a temporary reference on the node so it
5854                  * survives and isn't removed from the list
5855                  * while we print it.
5856                  */
5857                 node->tmp_refs++;
5858                 spin_unlock(&binder_dead_nodes_lock);
5859                 if (last_node)
5860                         binder_put_node(last_node);
5861                 binder_node_lock(node);
5862                 print_binder_node_nilocked(m, node);
5863                 binder_node_unlock(node);
5864                 last_node = node;
5865                 spin_lock(&binder_dead_nodes_lock);
5866         }
5867         spin_unlock(&binder_dead_nodes_lock);
5868         if (last_node)
5869                 binder_put_node(last_node);
5870
5871         mutex_lock(&binder_procs_lock);
5872         hlist_for_each_entry(proc, &binder_procs, proc_node)
5873                 print_binder_proc(m, proc, 1);
5874         mutex_unlock(&binder_procs_lock);
5875
5876         return 0;
5877 }
5878
5879 static int stats_show(struct seq_file *m, void *unused)
5880 {
5881         struct binder_proc *proc;
5882
5883         seq_puts(m, "binder stats:\n");
5884
5885         print_binder_stats(m, "", &binder_stats);
5886
5887         mutex_lock(&binder_procs_lock);
5888         hlist_for_each_entry(proc, &binder_procs, proc_node)
5889                 print_binder_proc_stats(m, proc);
5890         mutex_unlock(&binder_procs_lock);
5891
5892         return 0;
5893 }
5894
5895 static int transactions_show(struct seq_file *m, void *unused)
5896 {
5897         struct binder_proc *proc;
5898
5899         seq_puts(m, "binder transactions:\n");
5900         mutex_lock(&binder_procs_lock);
5901         hlist_for_each_entry(proc, &binder_procs, proc_node)
5902                 print_binder_proc(m, proc, 0);
5903         mutex_unlock(&binder_procs_lock);
5904
5905         return 0;
5906 }
5907
5908 static int proc_show(struct seq_file *m, void *unused)
5909 {
5910         struct binder_proc *itr;
5911         int pid = (unsigned long)m->private;
5912
5913         mutex_lock(&binder_procs_lock);
5914         hlist_for_each_entry(itr, &binder_procs, proc_node) {
5915                 if (itr->pid == pid) {
5916                         seq_puts(m, "binder proc state:\n");
5917                         print_binder_proc(m, itr, 1);
5918                 }
5919         }
5920         mutex_unlock(&binder_procs_lock);
5921
5922         return 0;
5923 }
5924
5925 static void print_binder_transaction_log_entry(struct seq_file *m,
5926                                         struct binder_transaction_log_entry *e)
5927 {
5928         int debug_id = READ_ONCE(e->debug_id_done);
5929         /*
5930          * read barrier to guarantee debug_id_done read before
5931          * we print the log values
5932          */
5933         smp_rmb();
5934         seq_printf(m,
5935                    "%d: %s from %d:%d to %d:%d context %s node %d handle %d size %d:%d ret %d/%d l=%d",
5936                    e->debug_id, (e->call_type == 2) ? "reply" :
5937                    ((e->call_type == 1) ? "async" : "call "), e->from_proc,
5938                    e->from_thread, e->to_proc, e->to_thread, e->context_name,
5939                    e->to_node, e->target_handle, e->data_size, e->offsets_size,
5940                    e->return_error, e->return_error_param,
5941                    e->return_error_line);
5942         /*
5943          * read-barrier to guarantee read of debug_id_done after
5944          * done printing the fields of the entry
5945          */
5946         smp_rmb();
5947         seq_printf(m, debug_id && debug_id == READ_ONCE(e->debug_id_done) ?
5948                         "\n" : " (incomplete)\n");
5949 }
5950
5951 static int transaction_log_show(struct seq_file *m, void *unused)
5952 {
5953         struct binder_transaction_log *log = m->private;
5954         unsigned int log_cur = atomic_read(&log->cur);
5955         unsigned int count;
5956         unsigned int cur;
5957         int i;
5958
5959         count = log_cur + 1;
5960         cur = count < ARRAY_SIZE(log->entry) && !log->full ?
5961                 0 : count % ARRAY_SIZE(log->entry);
5962         if (count > ARRAY_SIZE(log->entry) || log->full)
5963                 count = ARRAY_SIZE(log->entry);
5964         for (i = 0; i < count; i++) {
5965                 unsigned int index = cur++ % ARRAY_SIZE(log->entry);
5966
5967                 print_binder_transaction_log_entry(m, &log->entry[index]);
5968         }
5969         return 0;
5970 }
5971
5972 const struct file_operations binder_fops = {
5973         .owner = THIS_MODULE,
5974         .poll = binder_poll,
5975         .unlocked_ioctl = binder_ioctl,
5976         .compat_ioctl = compat_ptr_ioctl,
5977         .mmap = binder_mmap,
5978         .open = binder_open,
5979         .flush = binder_flush,
5980         .release = binder_release,
5981 };
5982
5983 DEFINE_SHOW_ATTRIBUTE(state);
5984 DEFINE_SHOW_ATTRIBUTE(stats);
5985 DEFINE_SHOW_ATTRIBUTE(transactions);
5986 DEFINE_SHOW_ATTRIBUTE(transaction_log);
5987
5988 const struct binder_debugfs_entry binder_debugfs_entries[] = {
5989         {
5990                 .name = "state",
5991                 .mode = 0444,
5992                 .fops = &state_fops,
5993                 .data = NULL,
5994         },
5995         {
5996                 .name = "stats",
5997                 .mode = 0444,
5998                 .fops = &stats_fops,
5999                 .data = NULL,
6000         },
6001         {
6002                 .name = "transactions",
6003                 .mode = 0444,
6004                 .fops = &transactions_fops,
6005                 .data = NULL,
6006         },
6007         {
6008                 .name = "transaction_log",
6009                 .mode = 0444,
6010                 .fops = &transaction_log_fops,
6011                 .data = &binder_transaction_log,
6012         },
6013         {
6014                 .name = "failed_transaction_log",
6015                 .mode = 0444,
6016                 .fops = &transaction_log_fops,
6017                 .data = &binder_transaction_log_failed,
6018         },
6019         {} /* terminator */
6020 };
6021
6022 static int __init init_binder_device(const char *name)
6023 {
6024         int ret;
6025         struct binder_device *binder_device;
6026
6027         binder_device = kzalloc(sizeof(*binder_device), GFP_KERNEL);
6028         if (!binder_device)
6029                 return -ENOMEM;
6030
6031         binder_device->miscdev.fops = &binder_fops;
6032         binder_device->miscdev.minor = MISC_DYNAMIC_MINOR;
6033         binder_device->miscdev.name = name;
6034
6035         refcount_set(&binder_device->ref, 1);
6036         binder_device->context.binder_context_mgr_uid = INVALID_UID;
6037         binder_device->context.name = name;
6038         mutex_init(&binder_device->context.context_mgr_node_lock);
6039
6040         ret = misc_register(&binder_device->miscdev);
6041         if (ret < 0) {
6042                 kfree(binder_device);
6043                 return ret;
6044         }
6045
6046         hlist_add_head(&binder_device->hlist, &binder_devices);
6047
6048         return ret;
6049 }
6050
6051 static int __init binder_init(void)
6052 {
6053         int ret;
6054         char *device_name, *device_tmp;
6055         struct binder_device *device;
6056         struct hlist_node *tmp;
6057         char *device_names = NULL;
6058
6059         ret = binder_alloc_shrinker_init();
6060         if (ret)
6061                 return ret;
6062
6063         atomic_set(&binder_transaction_log.cur, ~0U);
6064         atomic_set(&binder_transaction_log_failed.cur, ~0U);
6065
6066         binder_debugfs_dir_entry_root = debugfs_create_dir("binder", NULL);
6067         if (binder_debugfs_dir_entry_root) {
6068                 const struct binder_debugfs_entry *db_entry;
6069
6070                 binder_for_each_debugfs_entry(db_entry)
6071                         debugfs_create_file(db_entry->name,
6072                                             db_entry->mode,
6073                                             binder_debugfs_dir_entry_root,
6074                                             db_entry->data,
6075                                             db_entry->fops);
6076
6077                 binder_debugfs_dir_entry_proc = debugfs_create_dir("proc",
6078                                                  binder_debugfs_dir_entry_root);
6079         }
6080
6081         if (!IS_ENABLED(CONFIG_ANDROID_BINDERFS) &&
6082             strcmp(binder_devices_param, "") != 0) {
6083                 /*
6084                 * Copy the module_parameter string, because we don't want to
6085                 * tokenize it in-place.
6086                  */
6087                 device_names = kstrdup(binder_devices_param, GFP_KERNEL);
6088                 if (!device_names) {
6089                         ret = -ENOMEM;
6090                         goto err_alloc_device_names_failed;
6091                 }
6092
6093                 device_tmp = device_names;
6094                 while ((device_name = strsep(&device_tmp, ","))) {
6095                         ret = init_binder_device(device_name);
6096                         if (ret)
6097                                 goto err_init_binder_device_failed;
6098                 }
6099         }
6100
6101         ret = init_binderfs();
6102         if (ret)
6103                 goto err_init_binder_device_failed;
6104
6105         return ret;
6106
6107 err_init_binder_device_failed:
6108         hlist_for_each_entry_safe(device, tmp, &binder_devices, hlist) {
6109                 misc_deregister(&device->miscdev);
6110                 hlist_del(&device->hlist);
6111                 kfree(device);
6112         }
6113
6114         kfree(device_names);
6115
6116 err_alloc_device_names_failed:
6117         debugfs_remove_recursive(binder_debugfs_dir_entry_root);
6118
6119         return ret;
6120 }
6121
6122 device_initcall(binder_init);
6123
6124 #define CREATE_TRACE_POINTS
6125 #include "binder_trace.h"
6126
6127 MODULE_LICENSE("GPL v2");